blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 327 | content_id stringlengths 40 40 | detected_licenses listlengths 0 91 | license_type stringclasses 2 values | repo_name stringlengths 5 134 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 46 values | visit_date timestamp[us]date 2016-08-02 22:44:29 2023-09-06 08:39:28 | revision_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | committer_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | github_id int64 19.4k 671M ⌀ | star_events_count int64 0 40k | fork_events_count int64 0 32.4k | gha_license_id stringclasses 14 values | gha_event_created_at timestamp[us]date 2012-06-21 16:39:19 2023-09-14 21:52:42 ⌀ | gha_created_at timestamp[us]date 2008-05-25 01:21:32 2023-06-28 13:19:12 ⌀ | gha_language stringclasses 60 values | src_encoding stringclasses 24 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 7 9.18M | extension stringclasses 20 values | filename stringlengths 1 141 | content stringlengths 7 9.18M |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
12796d6139e34a41b337ae4b8c348723147b0807 | bff6f874ddadce8109260ac9c36a8e1f76bc5536 | /ndnd_code/text_runs_on temperature.R | 2e1c2842af27c164e4cece9cec4b6820d089225a | [] | no_license | ElliotSivel/NDND | b45d06e5b8f0ea9b81e8793ab6f203426b7e4b75 | 88ef12fe46ffb7ac3ead95dfbe4ab1c50aa7dacb | refs/heads/master | 2021-06-22T00:16:58.066969 | 2021-01-12T17:03:31 | 2021-01-12T17:03:31 | 171,261,910 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 696 | r | text_runs_on temperature.R | # small test script to run 3 simulations with 3 temperatures
load("~/Documents/Work/NDND/NDND_in_R/NDND/ndnd_data/NDNDData_2019_04_09_10_22_33.RData") # my reference data file
save(NDNDData,file = './NDNDData.Rdata')
source('./ndnd_code/NDND_main.r')
NDNDData=TEOMR(NDNDData,c(1,1,1,1,1,1,0,0),1)
NDNDData$Data.tag=Sys.time()
NDNDData$comment="temperature increased by 1 degree from original file"
save(NDNDData,file = './NDNDData.Rdata')
source('./ndnd_code/NDND_main.r')
NDNDData=TEOMR(NDNDData,c(1,1,1,1,1,1,0,0),1)
NDNDData$Data.tag=Sys.time()
NDNDData$comment="temperature increased by 2 degree from original file"
save(NDNDData,file = './NDNDData.Rdata')
source('./ndnd_code/NDND_main.r')
|
4550e560d2f65981e54adc24a829b5bb25390ee8 | eb64e76a208dd608c8a753af25e649415e0f2325 | /R/binToDec.R | cb9ed0dd74992fc9272ebc8ff6e3269f908aec5c | [] | no_license | cran/bayesloglin | b8a34d75ef71c6d486fbd1b31719bbee80f29fbe | f748df55cd8cc958a761b270b82cecf2746b9838 | refs/heads/master | 2021-01-12T06:50:29.768481 | 2016-12-19T15:13:03 | 2016-12-19T15:13:03 | 76,839,534 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 91 | r | binToDec.R | binToDec <-
function (x) {
dec <- sum(x * 2^(rev(seq_along(x)) - 1))
return(dec)
}
|
02c17b2c458d1e69a591cd96449bd5bc0bacf0c8 | cfaf00159de526c80f44376ce0defd82051ac3c4 | /tests/testthat/test-pkg_examples.R | 6e0c5b152b78917205a764b38942736b1ff3673d | [] | no_license | cran/RcppProgress | 3a5023e9f354dc5329873ba49c4d10b4b2950fa7 | f63f3f9b9ba4d01c45c44e778607c641fc884c54 | refs/heads/master | 2020-12-25T17:57:15.728649 | 2020-02-06T11:10:08 | 2020-02-06T11:10:08 | 17,693,210 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 821 | r | test-pkg_examples.R | source("wrap_examples.R")
context('RcppProgressExample sequential\n')
.test_sequential <- function() {
expect_error(test_sequential(nb = 500), NA)
}
test_that("test_sequential", .test_sequential())
context('RcppProgressExample multithreaded\n')
.test_multithreaded <- function() {
expect_error(test_multithreaded(nb = 1000, threads = 4), NA)
}
test_that("test_multithreaded", .test_multithreaded())
context('RcppProgressArmadillo multithreaded\n')
.amardillo_multithreaded <- function() {
expect_error(amardillo_multithreaded(nb = 1000, threads = 4), NA)
}
test_that("amardillo_multithreaded", .amardillo_multithreaded())
context('RcppProgressETA:custom progress bar\n')
.eta_progress_bar <- function() {
expect_error(eta_progress_bar(nb = 1000), NA)
}
test_that("eta_progress_bar", .eta_progress_bar())
|
3e69df87176370810d30f33d7dd72a798778a4a9 | 6484698359c33ec731d7448cecd48fb9beaba341 | /man/innerAUC_fct.Rd | 3d72dfb63c598c6d7bfb802a351d44ccd22496ab | [] | no_license | krumsieklab/SurvRank | af88aa83f1b7ac5adb0d4bac8f8c2ef164e33a90 | 2666b006321d0fb3e84bfee9c15a43f35219af30 | refs/heads/master | 2020-05-07T21:29:11.332431 | 2019-04-12T16:25:07 | 2019-04-12T16:25:07 | 180,906,378 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,713 | rd | innerAUC_fct.Rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/innerAUC_fct.R
\name{innerAUC_fct}
\alias{innerAUC_fct}
\title{Calculates inner and outer survival AUCs of training and testset}
\usage{
innerAUC_fct(f, data, t, cv.out, cv.in, i, fold, data.out, nr.var, out.s, sd1,
c.time, ranking, used.rank, used.rank1se, pred.in, pred.out, pred.out1,
auc.out, auc.out1)
}
\arguments{
\item{f}{ranking function}
\item{data}{data input}
\item{t}{current t.times argument}
\item{cv.out}{number of outer CV}
\item{cv.in}{number of inner CV}
\item{i}{current counter index}
\item{fold}{current fold}
\item{data.out}{training data of outer CV}
\item{nr.var}{maximum number of variables into model}
\item{out.s}{predefined output matrix for inner tAUC evaluations}
\item{sd1}{factor to which sparser solutions should be chosen. Not maximum Survival AUC in inner loop is used in stepwise selection, instead \code{max(survAUC)*sd1} leading to sparser solutions}
\item{c.time}{as defined in package \code{survAUC} time; a positive number restricting the upper limit of the time range under consideration.}
\item{ranking}{predefined ranking list}
\item{used.rank}{predefined list}
\item{used.rank1se}{predefined list}
\item{pred.in}{predefined list for inner predictions}
\item{pred.out}{predefined list for outer predictions}
\item{pred.out1}{predefined list for outer predictions with \code{sd1} factor}
\item{auc.out}{vector of survival AUCs}
\item{auc.out1}{vector of survival AUCs with \code{sd1} factor}
}
\description{
work-horse function for all ranking methods with inner and outer CV loops
}
\keyword{internal}
|
bb9d6e9669cac395f67200937d9e13884f6adf27 | cfc4a7b37657114bb93c7130eff4fc2458381a4f | /doc-ja/sample-quotientfield04.rb.v.rd | 47062f2ff55e22e75a7284c19e235744ae7207db | [
"MIT"
] | permissive | kunishi/algebra-ruby2 | 5bc3fae343505de879f7a8ae631f9397a5060f6b | ab8e3dce503bf59477b18bfc93d7cdf103507037 | refs/heads/master | 2021-11-11T16:54:52.502856 | 2021-11-04T02:18:45 | 2021-11-04T02:18:45 | 28,221,289 | 6 | 0 | null | 2016-05-05T16:11:38 | 2014-12-19T08:36:45 | Ruby | UTF-8 | R | false | false | 381 | rd | sample-quotientfield04.rb.v.rd | =begin
# sample-quotientfield04.rb
require "algebra"
F13 = ResidueClassRing(Integer, 13)
F = RationalFunctionField(F13, "x")
x = F.var
AF = AlgebraicExtensionField(F, "a") {|a| a**2 - 2*x}
a = AF.var
p( (a/4*x + AF.unity/2)/(x**2 + a*x + 1) +
(-a/4*x + AF.unity/2)/(x**2 - a*x + 1) )
#=> (-x^3 + x^2 + 1)/(x^4 + 11x^3 + 2x^2 + 1)
((<_|CONTENTS>))
=end
|
d76fbac184840de9663a491bc7502b6d8c93f5c0 | 9aafde089eb3d8bba05aec912e61fbd9fb84bd49 | /codeml_files/newick_trees_processed/6606_5/rinput.R | a81bb71dba2e57c3decd3e3500cc2997e6c19f0c | [] | no_license | DaniBoo/cyanobacteria_project | 6a816bb0ccf285842b61bfd3612c176f5877a1fb | be08ff723284b0c38f9c758d3e250c664bbfbf3b | refs/heads/master | 2021-01-25T05:28:00.686474 | 2013-03-23T15:09:39 | 2013-03-23T15:09:39 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 135 | r | rinput.R | library(ape)
testtree <- read.tree("6606_5.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="6606_5_unrooted.txt") |
daadf46d7f215c41fb6866074dfa9a811996f786 | 7e46e285f29c527bb8c7a55b89411e13641c0bd9 | /Source/pomp-novac-fitting.R | e4152bd6c88f6226299aebf4b478c9f2314e4ed0 | [] | no_license | HopkinsIDD/singledose-ocv | d5a2f8eea95d1267b02190593f2db5ad9e075b4a | ec516ba0ca908076eb4114c3d58515c6130e2ea7 | refs/heads/master | 2021-01-20T07:48:33.219390 | 2015-12-07T08:40:48 | 2015-12-07T08:40:48 | 29,299,386 | 2 | 1 | null | null | null | null | UTF-8 | R | false | false | 43,010 | r | pomp-novac-fitting.R | ###################################################################
## An attempt to really nail down unvac fits for zim and conakry ##
###################################################################
source("Source/leakyvac-pomp-model-inC-novac.R")
library(pomp)
palette(brewer.pal(8,"Set1"))
set.seed(243947892)
## for some parallel mif runs
library(foreach)
#library(multicore)
library(doMC)
registerDoMC(2)
## ------------------------- ##
## Let's start wiht zimbabwe ##
## ------------------------- ##
## zim popualtion
pop.zim <- 13.4e6
## zimdat
zim.dat <- get.zim.data()[-50,-1]
zim.dat$week <- 1:49
colnames(zim.dat)
## build pomp model object
zim.mod <- build.leaky.model.C(pop=pop.zim,
dat = zim.dat,
model.name = "zimmod")
## specify starting parameters
## remember these are in units of weeks
E0 <- 10/pop.zim
I0 <- 10/pop.zim
A0 <- 1e-11
R0 <- 0.36
S0 <- 1- R0-I0-E0-A0
guess.params <- c(gamma=1.78,
sigma=5,
theta=10,
beta1=3.30,
beta2=6,
rho=0.03,
theta0=0,
S.0=S0,
E.0=E0,
I.0=I0,
A.0=A0,
R.0=R0)
#zim.mod.win <- window(zim.mod,start=14,end=49)
#zim.mod.win@t0 <- 13
## first let's start with Trajectory matching
set.seed(19821)
tm.zim <- traj.match(zim.mod,
start=guess.params,
est=c('beta1','R.0','I.0'),
method="subplex",
transform=TRUE)
summary(tm.zim)
sim.zim.tm <- simulate(tm.zim,
nsim=500,
seed=1914679109L,
transform=TRUE)
## let's plot
plot(zim.dat[,1],ylim=c(0,20000))
for (i in 1:500) {
lines(sim.zim.tm[[i]]@data[1,],lty=2,col=AddAlpha(5,.05))
}
## run a pfilter to look at likelihood
zim.pf <- pfilter(tm.zim,
Np=1000,
save.params=TRUE)
logLik(zim.pf) #-377
## let's mif
set.seed(19822)
mif.zim <- mif(tm.zim,
start=coef(tm.zim),
Nmif=50,
pars=c('beta1','gamma'),
ivps=c('I.0','S.0'),
transform=TRUE,
rw.sd=c(beta1=0.12,
I.0=0.12,
S.0=0.12,
gamma=0.12),
Np=2000,
ic.lag=length(zim.mod@data)/2,
var.factor=1,
cooling.type="hyperbolic",
cooling.fraction=0.05,
method="mif2")
saveRDS(mif.zim,file="GeneratedData/mif-zim-REV.rds")
mif.zim <- readRDS("GeneratedData/mif-zim-REV.rds")
mif.zim <- continue(mif.zim,Nmif=50)
logLik(mif.zim)
logLik(pfilter(mif.zim,Np=5000))
## now let's explore the space around to make sure we aren't stuck in
## a local maximum
estpars <- c('beta1','gamma')
set.seed(19823)
mf.zim <- foreach(i=1:10,
.inorder=FALSE,
.options.multicore=list(set.seed=TRUE)
) %dopar%
{
## let's saple our parameters
theta.guess <- coef(tm.zim)
theta.guess[estpars] <- rlnorm(
n=length(estpars),
meanlog=log(theta.guess[estpars]),
sdlog=0.2
)
# now sample from I.0
I.0.count <- runif(1,1,1e4)/pop.zim # people
theta.guess['S.0'] <- theta.guess['S.0'] - I.0.count
theta.guess['I.0'] <- I.0.count
theta.guess['S.0'] <- theta.guess['S.0']*runif(1,.8,1.2)
theta.guess['R.0'] <- max(0,1-sum(theta.guess[c('S.0','I.0','E.0','A.0')]))
m1 <- mif(
tm.zim,
Nmif=100,
start=theta.guess,
pars=c('beta1','gamma'),
ivps=c('I.0','S.0'),
transform=TRUE,
rw.sd=c(beta1=0.1,gamma=0.1,I.0=0.1,S.0=0.1),
Np=3000,
ic.lag=length(zim.mod@data),
var.factor=1,
cooling.type="hyperbolic",
cooling.fraction=0.05,
method="mif2"
)
ll <- replicate(n=3,logLik(pfilter(m1,Np=10000)))
list(mif=m1,ll=ll)
}
sapply(mf.zim,function(x) x[[2]])
## compare.mif now depreciated now using mifList object
zim.mflist <- do.call(c,lapply(mf.zim,function(x) x[[1]]))
saveRDS(mf.zim,file="GeneratedData/parallel-mif-zim-REV.rds")
to.pdf(plot(zim.mflist),"Plots/unvac-zimmif-REV.pdf")
## now explore the best mifs a little further
mf.zim <- readRDS("GeneratedData/parallel-mif-zim-REV.rds")
#mf.zim <- readRDS("GeneratedData/parallel-mif-zim.rds")
mult.pomps <- sapply(mf.zim,function(x) x[[1]])
plot(do.call(c,lapply(mf.zim,function(x) x[[1]])))
## compare.mif(mult.pomps)
## let's look at which ones are best
best.pomps <- order(colMeans(sapply(mf.zim,function(x) x[[2]])),decreasing=TRUE)
set.seed(19823)
better.mif.zim <- mif(mult.pomps[[best.pomps[1]]],
Nmif=100,
pars=c('beta1','gamma'),
ivps=c('I.0','S.0'),
transform=TRUE,
rw.sd=c(beta1=0.12,I.0=0.12,S.0=0.12,gamma=0.12),
Np=2000,
ic.lag=length(zim.mod@data)/2,
var.factor=1,
cooling.type="hyperbolic",
cooling.fraction=0.05,
method="mif2")
logLik(pfilter(better.mif.zim,Np=10000))
to.pdf(plot(better.mif.zim),"Plots/mif-zim-final-diag.pdf")
## and plot
set.seed(4278435)
sim.zim.mif <- simulate(better.mif.zim,
nsim=500,
transform=TRUE)
## wrapper for plotting simulations and zim data with PIs
make.zim.sim.plot <- function(run,dat,nlines=500){
zim.mat <- sapply(run,function(x) x@data[1,])
zim.means <- apply(zim.mat,1,mean)
zim.ci <- apply(zim.mat,1,function(x) quantile(x,c(.025,.975)))
plot(dat[,1],ylim=c(0,14000),xlab="epidemic week",ylab="cases per week",pch=4)
for (i in 1:nlines) {
lines(sim.zim.mif[[i]]@data[1,],lty=2,col=AddAlpha(4,.05))
}
lines(zim.means,col=4)
lines(zim.ci[1,],col=4,lty=2)
lines(zim.ci[2,],col=4,lty=2)
legend("topright",c("simulated epidemic",
"mean simulated epidemic",
"95% Prediction Interval",
"data"),
col=c(AddAlpha(4,0.1),4,4,"black"),lty=c(1,1,2,-1),pch=c(-1,-1,-1,4),bty="n")
}
## make the pdf
to.pdf(make.zim.sim.plot(sim.zim.mif,zim.dat),"Plots/mif-zim-unvac-REV.pdf")
pdf("Plots/hist-finalsize-uncon-zim-REV.pdf")
hist(colSums(sapply(sim.zim.mif,function(x) x@data[1,])),
col="grey",border="white",breaks="fd",
xlab="Final Epidemic Size of Simulation",
main="Final Size of Zimbabwe Simulations",
xlim=c(80000,140000))
abline(v=98591,col="orange",lwd=2,lty=2)
text(97000,50,"Reported Epidemic \n Size = 98,591",cex=.9)
dev.off()
## ------------------------------------------------------ ##
## NOTE: we are going to use this fit for our projections ##
## ------------------------------------------------------ ##
saveRDS(better.mif.zim,file="GeneratedData/mif-zim-REV.rds")
better.mif.zim <- readRDS("GeneratedData/mif-zim-REV.rds")
## and save the final states from our particle filter
pf.zim <- pfilter(better.mif.zim,Np=5000,save.states=TRUE)
est.states.zim <- sapply(pf.zim@saved.states,rowMeans)
saveRDS(est.states.zim,file="GeneratedData/mif-zim-states-REV.rds")
## --------------------------------------------------------------- ##
## Let's do a little profiling of our beta and gamma parameters to ##
## see how peaky they look ##
## --------------------------------------------------------------- ##
## first for beta
beta.range <- seq(coef(better.mif.zim)['beta1']*.6,coef(better.mif.zim)['beta1']*1.4,length=15)
mf.zim.beta.prof <- foreach(i=1:length(beta.range),
.inorder=FALSE,
.options.multicore=list(set.seed=TRUE)
) %dopar%
{
theta.guess <- coef(better.mif.zim)
theta.guess['beta1'] <- beta.range[i]
m1 <- mif(
better.mif.zim,
Nmif=50,
start=theta.guess,
pars=c('gamma'),
ivps=c('I.0','S.0'),
transform=TRUE,
rw.sd=c(gamma=0.1,I.0=0.1,S.0=0.1),
Np=2000,
ic.lag=length(zim.mod@data)/2,
var.factor=1,
cooling.type="hyperbolic",
cooling.fraction=0.05,
method="mif2"
)
ll <- replicate(n=3,logLik(pfilter(m1,Np=10000)))
list(mif=m1,ll=ll)
}
beta.logliks <- colMeans(sapply(mf.zim.beta.prof,function(x) x[[2]]))
cis <- max(beta.logliks) - qchisq(.95,1)/2
pdf("Plots/proflik-beta-zim-REV.pdf")
plot(beta.range,beta.logliks,xlab="beta",ylab="log-likelihood",main="Profile Likelihood of Beta (Zimbabwe)")
abline(h=cis,lty=2,col=AddAlpha("orange",.75),lwd=2)
abline(v=approx(beta.logliks[2:6],beta.range[2:6],xout=cis),lty=2,col=AddAlpha("orange",.75),lwd=2)
abline(v=approx(beta.logliks[8:13],beta.range[8:13],xout=cis),lty=2,col=AddAlpha("orange",.75),lwd=2)
text(4,-336,sprintf("95%% CI %.2f-%.2f",
approx(beta.logliks[2:6],beta.range[2:6],xout=cis)$y,
approx(beta.logliks[8:12],beta.range[8:12],xout=cis)$y))
dev.off()
## approximate 95% CI
# gamma.range <- seq(coef(better.mif.zim)['gamma']*.6,coef(better.mif.zim)['gamma']*2,length=25)
gamma.range <- seq(2.2,5,length=25)
mf.zim.gamma.prof <- foreach(i=1:length(gamma.range),
.inorder=FALSE,
.options.multicore=list(set.seed=TRUE)
) %dopar%
{
theta.guess <- coef(better.mif.zim)
theta.guess['gamma'] <- gamma.range[i]
m1 <- mif(
better.mif.zim,
Nmif=50,
start=theta.guess,
pars=c('beta1'),
ivps=c('I.0','S.0'),
transform=TRUE,
rw.sd=c(beta1=0.1,I.0=0.1,S.0=0.1),
Np=2000,
ic.lag=length(zim.mod@data)/2,
var.factor=1,
cooling.type="hyperbolic",
cooling.fraction=0.05,
method="mif2"
)
ll <- replicate(n=3,logLik(pfilter(m1,Np=10000)))
list(mif=m1,ll=ll)
}
saveRDS(gamma.logliks,file="GeneratedData/gamma_proflik.rds")
plot(gamma.range,colMeans(sapply(mf.zim.gamma.prof,function(x) x[[2]])))
gamma.logliks <- colMeans(sapply(mf.zim.gamma.prof,function(x) x[[2]]))
cis <- max(gamma.logliks) - qchisq(.95,1)/2
pdf("Plots/proflik-gamma-zim-REV.pdf")
plot(gamma.range,gamma.logliks,xlab="gamma",ylab="log-likelihood",main="Profile Likelihood of Gamma (Zimbabwe)")
abline(h=cis,lty=2,col=AddAlpha("orange",.75),lwd=2)
abline(v=approx(gamma.logliks[5:7],gamma.range[5:7],xout=cis),lty=2,col=AddAlpha("orange",.75),lwd=2)
abline(v=approx(gamma.logliks[20:25],gamma.range[20:25],xout=cis),lty=2,col=AddAlpha("orange",.75),lwd=2)
text(3.5,-337.5,sprintf("95%% CI %.2f-%.2f",
approx(gamma.logliks[5:7],gamma.range[5:7],xout=cis)$y,
approx(gamma.logliks[20:25],gamma.range[20:25],xout=cis)$y))
dev.off()
## NOW Let's calculate the joint profile for beta and gamma to get
## R
beta.seq <- seq(2.5,6,length=30)
gamma.seq <- seq(2,5,length=30)
r.seq <- expand.grid(beta.seq,gamma.seq)
R.prof <- foreach(i=1:nrow(r.seq),
.inorder=FALSE,
.options.multicore=list(set.seed=TRUE)
) %dopar%
{
theta.guess <- coef(better.mif.zim)
theta.guess['gamma'] <- r.seq[i,2]
theta.guess['beta1'] <- r.seq[i,1]
m1 <- mif(
better.mif.zim,
Nmif=50,
start=theta.guess,
ivps=c('I.0','S.0'),
transform=TRUE,
rw.sd=c(I.0=0.1,S.0=0.1),
Np=2000,
ic.lag=length(zim.mod@data)/2,
var.factor=1,
cooling.type="hyperbolic",
cooling.fraction=0.05,
method="mif2"
)
ll <- replicate(n=3,logLik(pfilter(m1,Np=10000)))
list(mif=m1,ll=ll)
}
saveRDS(R.logliks,file="GeneratedData/R_proflik.rds")
## get CI for R
prof.lik <- R.logliks
prof.mat <- matrix(prof.lik[,3],nrow=30)
colnames(prof.mat) <- beta.seq
rownames(prof.mat) <- gamma.seq
ci.lines <- contourLines(prof.mat*2,levels=max(prof.mat)*2 - 3.814/2)
## we will only take the middle
range(approx(seq(0,1,length=30),beta.seq,ci.lines[[2]]$x)$y/approx(seq(0,1,length=30),gamma.seq,ci.lines[[2]]$y)$y)
## now let's look at the initial state of susceptibles
## first for beta
S0.range <- seq(0.89,
0.93,length=15)
mf.zim.S0.prof <- foreach(i=1:length(S0.range),
.inorder=FALSE,
.options.multicore=list(set.seed=TRUE)
) %dopar%
{
theta.guess <- coef(better2.mif.zim)
theta.guess['S.0'] <- S0.range[i]
## need to reduce I.0 so init cond. sum to 1
theta.guess['R.0'] <- max(0,1- sum(theta.guess[c('E.0','S.0','A.0','I.0')]))
m1 <- mif(
better2.mif.zim,
Nmif=50,
start=theta.guess,
pars=c('beta1','gamma'),
ivps=c('I.0'),
transform=TRUE,
rw.sd=c(beta1=0.1,gamma=0.1,I.0=0.1),
Np=2000,
ic.lag=length(zim.mod@data),
var.factor=1,
cooling.type="hyperbolic",
cooling.fraction=0.05,
method="mif2"
)
ll <- replicate(n=3,logLik(pfilter(m1,Np=10000)))
list(mif=m1,ll=ll)
}
S0.logliks <- colMeans(sapply(mf.zim.S0.prof,function(x) x[[2]]))
cis <- max(S0.logliks) - qchisq(.95,1)/2
pdf("Plots/proflik-S0-zim.pdf")
plot(S0.range,S0.logliks,xlab="S0",ylab="log-likelihood",main="Profile Likelihood of Transmission Parameter (S0)")
abline(h=cis,lty=2,col=AddAlpha("orange",.75),lwd=2)
abline(v=approx(S0.logliks[5:7],S0.range[5:7],xout=cis),lty=2,col=AddAlpha("orange",.75),lwd=2)
abline(v=approx(S0.logliks[11:13],S0.range[11:13],xout=cis),lty=2,col=AddAlpha("orange",.75),lwd=2)
text(3.45,-338,sprintf("95%% CI %.2f-%.2f",
approx(S0.logliks[2:6],S0.range[2:6],xout=cis)$y,
approx(S0.logliks[7:11],S0.range[7:11],xout=cis)$y))
dev.off()
##################################################################
## For Harare only using data extracted from Fernandez et al. ##
## Ultiumatley decided not to use this since the data are a bit ##
## suspect and pretty challenging to fit well with an SIR model ##
##################################################################
pop.harare <- 1606000
har.dat <- get.zim.data(harare.only = TRUE)
har.dat$week <- 1:nrow(har.dat)
## build pomp model object
har.mod <- build.leaky.model.C(pop=pop.harare,
dat=har.dat,
model.name = "harmod")
## specify starting parameters
## remember these are in units of weeks
E0 <- 10/0.04/pop.harare/3
I0 <- 10/0.04/pop.harare/3
A0 <- 100/pop.harare
R0 <- 0.5
S0 <- 1- R0-I0-E0-A0
guess.params <- c(gamma=7/3,
sigma=5,
theta=10,
beta1=3.9,
beta2=3.0,
rho=0.04,
theta0=0.0001,
S.0=S0,
E.0=E0,
I.0=I0,
A.0=A0,
R.0=R0)
#har.mod.win <- window(har.mod,start=16,end=44)
#har.mod.win@t0 <- 15
## first let's start with Trajector matching
tm.har <- traj.match(har.mod,
start=guess.params,
est=c('beta1','I.0','E.0','S.0'),
method="subplex",
maxit=15000,
transform=TRUE
)
sim.har.tm <- simulate(tm.har,
nsim=500,
seed=1914679109L,
transform=TRUE)
#pdf("Plots/harare-fernandezextract.pdf")
plot(har.dat[,2],ylim=c(0,2000),type="h")
#dev.off()
for (i in 1:500) {
lines(sim.har.tm[[i]]@data[1,],lty=2,col=AddAlpha(4,.05))
}
mif.har <- mif(tm.har,
Nmif=100,
pars=c('beta1'),
ivps=c('I.0','E.0','S.0'),
transform=TRUE,
rw.sd=c(beta1=0.1,S.0=0.1,E.0=0.1,I.0=0.1),
Np=2000,
ic.lag=length(har.mod@data),
var.factor=1,
cooling.type="hyperbolic",
cooling.fraction=0.05,
method="mif2",
verbose=TRUE)
#mif.zim.mif1 <- mif(mif.zim.mif1,Nmif=50,cooling.fraction=0.80)
#mif3.zim <- mif(mif2.zim,Nmif=50,cooling.fraction=0.80)
sim.zim.mif <- simulate(mif.zim,
nsim=500,
seed=1914679109L,
transform=TRUE)
#dev.off()
for (i in 1:500) {
lines(sim.zim.mif[[i]]@data[1,],lty=2,col=AddAlpha(2,.05))
}
#####################
## now for conakry ##
#####################
## 2010 populatino estimates from Institut National de la Statistique de Guinée
pop.con <- 1656300
## zimdat
con.dat <- get.conakry.data()
## build pomp model object
con.mod <- build.leaky.model.C(pop=pop.con,
dat=con.dat,
my.times="day",
my.t0=0,
model.name="conakrymodel")
## specify starting parameters
## remember these are in units of weeks
E0 <- 1/0.04/pop.con
I0 <- 1/0.04/pop.con
A0 <- 1/pop.con
R0 <- 0.5
S0 <- 1- R0-I0-E0-A0
guess.params.con <- c(gamma=7/3,
sigma=5,
theta=20,
beta1=3.9,
beta2=3.0,
rho=0.04,
theta0=0.0001,
S.0=S0,
E.0=E0,
I.0=I0,
A.0=A0,
R.0=R0)
# start and stops refer to indices not days (t0=0)
con.mod.win <- window(con.mod,start=33,end=153)
con.mod.win@t0 <- 32
tm.con <- traj.match(con.mod.win,
start=guess.params.con,
est=c('beta1','E.0','S.0','I.0','gamma'),
method="subplex",
maxit=15000,
transform=TRUE
)
sim.con.tm <- simulate(tm.con,
nsim=500,
seed=1914679109L,
transform=TRUE)
plot(con.dat[33:153,2],ylim=c(0,500))
for (i in 1:500) {
lines(sim.con.tm[[i]]@data[1,],lty=2,col=AddAlpha(5,.05))
}
## not a great fit but it gets up somewhere
logLik(pfilter(tm.con,Np=1000))
mif.con <- mif(tm.con,
start=coef(tm.con),
Nmif=100,
pars=c('beta1','gamma'),
ivps=c('I.0','S.0'),
transform=TRUE,
rw.sd=c(beta1=0.15,gamma=0.15,I.0=0.15,S.0=0.15),
Np=3000,
ic.lag=length(con.mod.win@data),
var.factor=1,
cooling.type="hyperbolic",
cooling.fraction=0.05,
method="mif2",
verbose=TRUE)
## run a particle filter
pf.con <- pfilter(mif.con,Np=5000,save.states=TRUE)
## get the average state at each time
#est.states.con <- sapply(pf.con@saved.states,rowMeans)
#saveRDS(est.states.con,file="GeneratedData/mif-con-states.rds")
sim.con.mif <- simulate(mif.con,
nsim=500,
seed=1914679109L,
transform=TRUE)
estpars <- c("beta1","gamma")
mf.con <- foreach(i=1:10,
.inorder=FALSE,
.options.multicore=list(set.seed=TRUE)
) %dopar%
{
theta.guess <- coef(mif.con)
theta.guess[estpars] <- rlnorm(
n=length(estpars),
meanlog=log(theta.guess[estpars]),
sdlog=0.1
)
## now sample from I.0
I.0.count <- runif(1,1,1e4)/pop.con # people
theta.guess['S.0'] <- theta.guess['S.0'] - I.0.count
theta.guess['I.0'] <- I.0.count
theta.guess['S.0'] <- theta.guess['S.0']*runif(1,.8,1.2)
theta.guess['R.0'] <- max(0,1-sum(theta.guess[c('S.0','I.0','E.0','A.0')]))
m1 <- mif(
tm.con,
Nmif=100,
start=theta.guess,
pars=c('beta1','gamma'),
ivps=c('I.0','S.0'),
transform=TRUE,
rw.sd=c(beta1=0.1,gamma=0.15,I.0=0.15,S.0=0.15),
Np=2000,
ic.lag=length(con.mod@data),
var.factor=1,
cooling.type="hyperbolic",
cooling.fraction=0.05,
method="mif2"
)
ll <- replicate(n=10,logLik(pfilter(m1,Np=10000)))
list(mif=m1,ll=ll)
}
saveRDS(mf.con,file="GeneratedData/parallel-mif-con-REV.rds")
#mf.con <- readRDS("GeneratedData/parallel-mif-con.rds")
mif.cons <- sapply(mf.con,function(v) v[[1]])
mif.cons.ll <- sapply(mf.con,function(v) v[[2]])
compare.mif(mif.cons)
best.pomps <- order(colMeans(mif.cons.ll),decreasing=TRUE)
better.mif.con <- mif(mf.con[[best.pomps[1]]][[1]],
Nmif=200,
pars=c('beta1','gamma'),
ivps=c('I.0','S.0'),
transform=TRUE,
rw.sd=c(beta1=0.15,I.0=0.15,S.0=0.15,gamma=0.15),
Np=4000,
ic.lag=length(con.mod.win@data),
var.factor=1,
cooling.type="hyperbolic",
cooling.fraction=0.05,
method="mif2")
logLik(pfilter(better.mif.con,Np=10000))
better.mif.con2 <- mif(better.mif.con,
Nmif=100,
pars=c('beta1','gamma'),
ivps=c('I.0','S.0'),
transform=TRUE,
rw.sd=c(beta1=0.15,I.0=0.15,S.0=0.15,gamma=0.15),
Np=4000,
ic.lag=length(con.mod.win@data),
var.factor=1,
cooling.type="hyperbolic",
cooling.fraction=0.05,
method="mif2")
logLik(pfilter(better.mif.con2,Np=10000))
## now let's save! this is going to be the object we use in
## future simulations!
#saveRDS(better.mif.con2,file="GeneratedData/mif-con.rds")
better.mif.con2 <- readRDS("GeneratedData/mif-con.rds")
## run a particle filter
pf.con <- pfilter(better.mif.con2,Np=10000,save.states=TRUE)
## get the average state at each time and save it for vac simulations
est.states.con <- sapply(pf.con@saved.states,rowMeans)
saveRDS(est.states.con,file="GeneratedData/mif-con-states.rds")
sim.con.mif <- simulate(better.mif.con2,
nsim=500,
seed=1914679109L,
transform=TRUE)
plot(con.dat[,2],ylim=c(0,250),xlab="epidemic day",ylab="cases per day",pch=4)
for (i in 1:500) {
lines(33:153,sim.con.mif[[i]]@data[1,],lty=2,col=AddAlpha(3,.05))
}
pdf("Plots/mif-con-unvac.pdf")
con.mat <- sapply(sim.con.mif,function(x) x@data[1,])
con.means <- apply(con.mat,1,mean)
con.ci <- apply(con.mat,1,function(x) quantile(x,c(.025,.975)))
plot(con.dat[,2],ylim=c(0,250),xlab="epidemic day",ylab="cases per day",pch=4)
for (i in 1:500) {
lines(33:153,sim.con.mif[[i]]@data[1,],lty=2,col=AddAlpha(4,.05))
}
lines(33:153,con.means,col=4)
lines(33:153,con.ci[1,],col=4,lty=2)
lines(33:153,con.ci[2,],col=4,lty=2)
legend("topright",c("simulated epidemic",
"mean simulated epidemic",
"95% Prediction Interval",
"data"),
col=c(AddAlpha(4,0.1),4,4,"black"),lty=c(1,1,2,-1),pch=c(-1,-1,-1,4),bty="n")
dev.off()
pdf("Plots/hist-finalsize-uncon-con.pdf")
hist(colSums(sapply(sim.con.mif,function(x) x@data[1,])),
col="grey",border="white",breaks="fd",
xlab="Final Epidemic Size of Simulation",
main="Final Size of Conakry Simulations")
abline(v=4566,col="orange",lwd=2,lty=2)
text(4750,50,"Reported Epidemic \n Size = 4,566",cex=.9)
dev.off()
## --------------------------------------------------------------- ##
## Let's do a little profiling of our beta and gamma parameters to ##
## see how peaky they look ##
## --------------------------------------------------------------- ##
## first for beta
beta.range <- seq(coef(better.mif.con2)['beta1']*.6,coef(better.mif.con2)['beta1']*1.4,length=15)
mf.con.beta.prof <- foreach(i=1:length(beta.range),
.inorder=FALSE,
.options.multicore=list(set.seed=TRUE)
) %dopar%
{
theta.guess <- coef(better.mif.con2)
theta.guess['beta1'] <- beta.range[i]
m1 <- mif(
better.mif.con2,
Nmif=70,
start=theta.guess,
pars=c('gamma'),
ivps=c('I.0','S.0'),
transform=TRUE,
rw.sd=c(gamma=0.15,I.0=0.15,S.0=0.15),
Np=2000,
ic.lag=length(con.mod@data),
var.factor=1,
cooling.type="hyperbolic",
cooling.fraction=0.05,
method="mif2"
)
ll <- replicate(n=3,logLik(pfilter(m1,Np=10000)))
list(mif=m1,ll=ll)
}
## a few clearly didn't coverge
theta.guess <- coef(better.mif.con2)
theta.guess['beta1'] <- beta.range[11]
redo.m1 <- mif(
better.mif.con2,
Nmif=120,
start=theta.guess,
pars=c('gamma'),
ivps=c('I.0','S.0'),
transform=TRUE,
rw.sd=c(gamma=0.15,I.0=0.15,S.0=0.15),
Np=3000,
ic.lag=length(con.mod.win@data),
var.factor=1,
cooling.type="hyperbolic",
cooling.fraction=0.05,
method="mif2"
)
ll.redo1 <- replicate(n=3,logLik(pfilter(redo.m1,Np=10000)))
beta.logliks <- colMeans(sapply(mf.con.beta.prof,function(x) x[[2]]))
## swap out the mean of the redo
beta.logliks[11] <- mean(ll.redo1)
cis <- max(beta.logliks) - qchisq(.95,1)/2
pdf("Plots/proflik-beta-con.pdf")
plot(beta.range,beta.logliks,
ylim=c(-500,-400),
xlim=c(beta.range[1],beta.range[11]),
xlab="beta",
ylab="log-likelihood",
main="Profile Likelihood of Transmission Parameter (Beta)")
abline(h=cis,lty=2,col=AddAlpha("orange",.75),lwd=2)
abline(v=approx(beta.logliks[5:7],beta.range[5:7],xout=cis),lty=2,col=AddAlpha("orange",.75),lwd=2)
abline(v=approx(beta.logliks[8:10],beta.range[8:10],xout=cis),lty=2,col=AddAlpha("orange",.75),lwd=2)
text(2.25,-418,sprintf("95%% CI %.2f-%.2f",approx(beta.logliks[5:7],beta.range[5:7],xout=cis)$y,
approx(beta.logliks[8:10],beta.range[8:10],xout=cis)$y))
dev.off()
## approximate 95% CI
gamma.range <- seq(0.58,0.7,length=20)
mf.con.gamma.prof <- foreach(i=1:length(gamma.range),
.inorder=FALSE,
.options.multicore=list(set.seed=TRUE)
) %dopar%
{
theta.guess <- coef(better.mif.con2)
theta.guess['gamma'] <- gamma.range[i]
m1 <- mif(
better.mif.con2,
Nmif=200,
start=theta.guess,
pars=c('beta1'),
ivps=c('I.0','S.0'),
transform=TRUE,
rw.sd=c(beta1=0.15,I.0=0.15,S.0=0.15),
Np=10000,
ic.lag=length(con.mod@data),
var.factor=1,
cooling.type="hyperbolic",
cooling.fraction=0.05,
method="mif2"
)
ll <- replicate(n=3,logLik(pfilter(m1,Np=10000)))
list(mif=m1,ll=ll)
}
plot(gamma.range,colMeans(sapply(mf.con.gamma.prof,function(x) x[[2]])))
gamma.logliks <- colMeans(sapply(mf.con.gamma.prof,function(x) x[[2]]))
plot(gamma.range,gamma.logliks,ylim=c(-425,-410))
cis <- max(gamma.logliks) -qchisq(.95,1)/2
pdf("Plots/proflik-gamma-con.pdf")
plot(gamma.range,gamma.logliks,xlab="gamma",ylab="log-likelihood",main="Profile Likelihood of Gamma")
abline(h=cis,lty=2,col=AddAlpha("orange",.75),lwd=2)
abline(v=approx(gamma.logliks[5:7],gamma.range[5:7],xout=cis),lty=2,col=AddAlpha("orange",.75),lwd=2)
abline(v=approx(gamma.logliks[14:17],gamma.range[14:17],xout=cis),lty=2,col=AddAlpha("orange",.75),lwd=2)
text(2,-337.5,sprintf("95%% CI %.2f-%.2f",approx(gamma.logliks[5:7],gamma.range[5:7],xout=cis)$y,
approx(gamma.logliks[14:17],gamma.range[14:17],xout=cis)$y))
dev.off()
beta.seq <- seq(2.5,6,length=30)
gamma.seq <- seq(2,5,length=30)
r.seq <- expand.grid(beta.seq,gamma.seq)
R.prof <- foreach(i=1:nrow(r.seq),
.inorder=FALSE,
.options.multicore=list(set.seed=TRUE)
) %dopar%
{
theta.guess <- coef(better.mif.zim)
theta.guess['gamma'] <- r.seq[i,2]
theta.guess['beta1'] <- r.seq[i,1]
m1 <- mif(
better.mif.zim,
Nmif=50,
start=theta.guess,
ivps=c('I.0','S.0'),
transform=TRUE,
rw.sd=c(I.0=0.1,S.0=0.1),
Np=2000,
ic.lag=length(zim.mod@data)/2,
var.factor=1,
cooling.type="hyperbolic",
cooling.fraction=0.05,
method="mif2"
)
ll <- replicate(n=3,logLik(pfilter(m1,Np=10000)))
list(mif=m1,ll=ll)
}
saveRDS(R.logliks,file="GeneratedData/R_proflik.rds")
## get CI for R
prof.lik <- R.logliks
prof.mat <- matrix(prof.lik[,3],nrow=30)
colnames(prof.mat) <- beta.seq
rownames(prof.mat) <- gamma.seq
ci.lines <- contourLines(prof.mat*2,levels=max(prof.mat)*2 - 3.814/2)
## we will only take the middle
range(approx(seq(0,1,length=30),beta.seq,ci.lines[[2]]$x)$y/approx(seq(0,1,length=30),gamma.seq,ci.lines[[2]]$y)$y)
## -------------------------------------- ##
## Now Port au Prince ##
## we will start with the first wave only ##
## -------------------------------------- ##
source("Source/leakyvac-pomp-model-inC-novac-seasonal.R")
## need to get a better population estimate
## this is likley to be tricky given the IDP
## population at the time
pop.portap <- 2.1e6
## zimdat
portap.dat <- get.haiti.data(first.wave.only=F)
#covartab <- make.covartab(0,nrow(portap.dat)+1,byt=1,degree=3,nbasis=4)
#covartab <- make.covartab(0,nrow(portap.dat)+1,byt=1,degree=5,nbasis=5)
#covartab <- make.covartab(0,nrow(portap.dat)+1,byt=1,degree=4,nbasis=4)
covartab <- make.covartab(0,nrow(portap.dat)+1,byt=1,degree=6,nbasis=6)
## build pomp model object
portap.mod <- build.leaky.model.C.seas(pop=pop.portap,
dat=portap.dat,
my.times="day",
my.t0=0,
covar=covartab,
model.name="papmodel")
#portap.mod.win <- window(portap.mod,start=1,end=297)
## specify starting parameters
## remember these are in units of weeks
E0 <- 10/pop.portap
I0 <- 10/pop.portap
A0 <- 0.0/pop.portap
R0 <- 0.000
S0 <- 1- R0-I0-E0-A0
guess.params.portap <- c(gamma=1/2,
sigma=1/1.4,
theta=10,
beta1=1.1,
beta2=.05,
beta3=.5,
beta4=.2,
beta5=.1,
beta6=1,
iota=1e-10,
rho=0.9,#.15
theta0=0.0,
S.0=S0,
E.0=E0,
I.0=I0,
A.0=A0,
R.0=R0)
tm.portap <- traj.match(portap.mod,
start=coef(mif.portap.best),#guess.params.portap,
est=c('beta1',
'beta2',
'beta3',
'beta4',
'beta5',
'beta6',
'rho',
'iota',
'I.0',
'E.0'),
method="Nelder-Mead",
maxit=15000,
transform=TRUE
)
summary(tm.portap)
logLik(pfilter(portap.mod,params=coef(tm.portap),Np=10000))
sim.portap.tm <- simulate(tm.portap,
params=coef(tm.portap),
nsim=500,
seed=1914679109L,
transform=TRUE)
plot(portap.dat[,2])
for (i in 1:200) {
lines(sim.portap.tm[[i]]@data[1,],lty=2,col=AddAlpha(3,.05))
}
mif.portap.6df <- mif(tm.portap,
start=coef(mif.portap.best),
Nmif=100,
ivps = c('E.0','I.0'),
transform=TRUE,
rw.sd=c(
beta1=0.1,
beta2=0.1,
beta3=0.1,
beta4=0.1,
beta5=0.1,
beta6=0.1,
iota=0.1,
rho=0.1,
E.0=.12,
I.0=0.12),
Np=5000,
ic.lag=length(portap.mod@data),
var.factor=1,
cooling.type="hyperbolic",
cooling.fraction=0.03,
method="mif2",
verbose=FALSE)
mif.portap.6df.cont <- continue(mif.portap.6df,Nmif=50)
logLik(pfilter(mif.portap.6df.cont,Np=10000))
mif.portap.6df.next2 <- mif(mif.portap.6df.cont,
Nmif=50,
ivps = c('E.0','I.0'),
transform=TRUE,
rw.sd=c(
beta1=0.1,
beta2=0.1,
beta3=0.1,
beta4=0.1,
beta5=0.1,
beta6=0.1,
iota=0.1,
rho=0.1,
E.0=.12,
I.0=0.12,
R.0=0.12),
Np=5000,
ic.lag=length(portap.mod@data),
var.factor=1,
cooling.type="hyperbolic",
cooling.fraction=0.05,
method="mif2",
verbose=FALSE)
mif.portap.6df.next3 <- continue(mif.portap.6df.next2,Nmif=50)
mif.portap.6df.next4 <- continue(mif.portap.6df.next3,Nmif=50)
logLik( pfilter(mif.portap.6df,Np=10000))
logLik( pfilter(mif.portap.6df.next4,Np=10000))
#mif.portap.5df.2 <- continue(mif.portap.5df.2,Nmif=50)
#mif.portap <- mif(mif.portap2,Nmif=50)
## run a particle filter
pf.portap <- pfilter(mif.portap.6df,Np=5000,save.states=TRUE)
logLik(pf.portap)
saveRDS(mif.portap.6df.next4,file="GeneratedData/mif-haiti.rds")
mif.portap.6df.next4 <- readRDS("GeneratedData/mif-haiti.rds")
sim.mif.portap <- simulate(mif.portap.6df.next4,
# params=tmp,
nsim=500,
transform=TRUE)
plot(portap.dat[,2],ylim=c(0,2000))
for (i in 1:500) {
lines(sim.mif.portap[[i]]@data[1,],lty=2,col=AddAlpha(3,.05))
}
par(new=T)
plot((covartab[,2]*coef(mif.portap.6df.cont)["beta1"] +
covartab[,3]*coef(mif.portap.6df.cont)["beta2"] +
covartab[,4]*coef(mif.portap.6df.cont)["beta3"] +
covartab[,5]*coef(mif.portap.6df.cont)["beta4"] +
covartab[,6]*coef(mif.portap.6df.cont)["beta5"] +
covartab[,7]*coef(mif.portap.6df.cont)["beta6"])[-c(1:2)]/coef(mif.portap.6df.cont)["gamma"],
ylab="",type="l",col="red")
estpars <- c("beta1","beta2","beta3","beta4","beta5","beta6","rho","iota")
mf.portap <- foreach(i=1:10,
.inorder=FALSE,
.options.multicore=list(set.seed=TRUE)
) %dopar%
{
theta.guess <- coef(mif.portap.6df.cont)
theta.guess[estpars] <- rlnorm(
n=length(estpars),
meanlog=log(theta.guess[estpars]),
sdlog=0.1
)
## now sample from I.0
I.0.count <- runif(1,1,100)/pop.portap # people
E.0.count <- runif(1,1,100)/pop.portap # people
theta.guess['E.0'] <- E.0.count
theta.guess['I.0'] <- I.0.count
theta.guess['S.0'] <- theta.guess['S.0'] - I.0.count - E.0.count
theta.guess['R.0'] <- max(0,1-sum(theta.guess[c('S.0','I.0','E.0','A.0')]))
m1 <- mif(
tm.portap,
Nmif=100,
start=theta.guess,
ivps=c('I.0','E.0'),
transform=TRUE,
rw.sd=c(
beta1=0.1,
beta2=0.1,
beta3=0.1,
beta4=0.1,
beta5=0.1,
beta6=0.1,
iota=0.1,
rho=0.1,
I.0=0.1,
E.0=0.1,
R.0=0.1),
Np=5000,
ic.lag=length(portap.mod@data),
var.factor=1,
cooling.type="hyperbolic",
cooling.fraction=0.03,
method="mif2"
)
ll <- replicate(n=10,logLik(pfilter(m1,Np=10000)))
list(mif=m1,ll=ll)
}
## look at logliks
which.max(
colMeans(sapply(mf.portap,function(x) x[[2]]))
)
pf.best <- pfilter(mf.portap[[4]][[1]],Np=20000,save.states=TRUE)
logLik(pf.best)
sim.portap.mif <- simulate(mf.portap[[4]][[1]],
nsim=500,
seed=1914679109L,
transform=TRUE)
mif.portap.best <- mif(test,
Nmif=50,
ivps = c('E.0','I.0'),
transform=TRUE,
rw.sd=c(
beta1=0.1,
beta2=0.1,
beta3=0.1,
beta4=0.1,
beta5=0.1,
beta6=0.1,
rho=0.1,
iota=0.1,
E.0=0.12,
I.0=0.12),
Np=6000,
ic.lag=length(portap.mod@data),
var.factor=1,
cooling.type="hyperbolic",
cooling.fraction=0.01,
method="mif2",
verbose=FALSE)
mif.portap.best <- mif.portap.6df.next4
pf.best <- pfilter(mif.portap.best,Np=20000,save.states=TRUE)
logLik(pf.best)
est.states.portaup <- sapply(pf.best@saved.states,rowMeans)
saveRDS(est.states.portaup,file="GeneratedData/mif-haiti-states.rds")
## ---------------------- ##
## Bring in parallel runs ##
## ---------------------- ##
## mf.portap <- readRDS(file="GeneratedData/parallel-mif-portaup-6df-fitrho.rds")
## mif.portap.best <- mf.portap[[5]][[1]]
##saveRDS(mif.portap.best,file="GeneratedData/mif-haiti-REV.rds")
mf.portap.best <- readRDS(file="GeneratedData/mif-haiti.rds")
sim.portap.mif <- simulate(mif.portap.best,
#params=coef(mif.portap.best),
nsim=500,
seed=1914679109L,
transform=TRUE)
pdf("Plots/mif-pap-unvac-6df-best-seas-R0.pdf")
pap.mat <- sapply(sim.portap.mif,function(x) x@data[1,])
pap.means <- apply(pap.mat,1,mean)
pap.ci <- apply(pap.mat,1,function(x) quantile(x,c(.025,.975)))
plot(portap.dat[,2],ylim=c(0,2200),xlab="epidemic day",ylab="cases per day",col=4,pch=4)
for (i in 1:300) {
lines(sim.portap.mif[[i]]@data[1,],lty=2,col=AddAlpha(4,.02))
}
lines(pap.means,col=4,lwd=2)
lines(pap.ci[1,],col=4,lty=2)
lines(pap.ci[2,],col=4,lty=2)
legend("topright",c("simulated epidemic",
"mean simulated epidemic",
"95% prediction interval",
"seasonal forcing function",
"data"),
col=c(AddAlpha(4,0.1),4,4,3,"black"),lty=c(1,1,2,4,-1),pch=c(-1,-1,-1,-1,4),bty="n")
#dev.off()
par(new=T)
plot(
# pf.best@states["S",]/colSums(pf.best@states[1:5,])*
((covartab[,2]*coef(mif.portap.best)["beta1"] +
covartab[,3]*coef(mif.portap.best)["beta2"] +
covartab[,4]*coef(mif.portap.best)["beta3"] +
covartab[,5]*coef(mif.portap.best)["beta4"] +
covartab[,6]*coef(mif.portap.best)["beta5"] +
covartab[,7]*coef(mif.portap.best)["beta6"]
))
[-c(1:2)]/coef(mif.portap.best)["gamma"]
,ylab="",axes=F,xlab="",type="l",col=3,lty=4)
axis(4)
dev.off()
pdf("Plots/hist-finalsize-uncon-pap-6df-full.pdf")
hist(colSums(sapply(sim.portap.mif,function(x) x@data[1,1:297])),
col="grey",border="white",breaks="fd",
xlab="Final Epidemic Size of Simulation",
main="Final Size of Port au Prince Simulations")
abline(v=sum(mif.portap.best@data),col="orange",lwd=2,lty=2)
text(129000,70,"Reported Epidemic \n Size = 119,902",cex=.9)
dev.off()
## compare.mif(
## sapply(mf.portap,function(x) x[[1]])
## )
## mif.portap.cont <- mif(mf.portap[[8]][[1]],Nmif=50)
## saveRDS(mf.portap,file="GeneratedData/parallel-mif-portaup-6df-fitrho.rds")
## tmp <- readRDS("GeneratedData/parallel-mif-portaup.rds")
## ## le
#t's do some profileing of params to get CIs
rho.range <- seq(0.9,.95,length=50)
mf.pap.rho.prof <- foreach(i=1:length(rho.range),
.inorder=FALSE,
.options.multicore=list(set.seed=TRUE)
) %dopar%
{
theta.guess <- coef(mf.portap.best)
theta.guess['rho'] <- rho.range[i]
m1 <- mif(
mf.portap.best,
Nmif=50,
start=theta.guess,
transform=TRUE,
pars=c("beta1","beta2","beta3","beta4","beta5","beta6","iota"),
rw.sd=c(
beta1=0.1,
beta2=0.1,
beta3=0.1,
beta4=0.1,
beta5=0.1,
beta6=0.1,
iota=0.1),
Np=5000,
var.factor=1,
cooling.type="hyperbolic",
cooling.fraction=0.03,
method="mif2",
verbose=T
)
ll <- replicate(n=3,logLik(pfilter(m1,Np=20000)))
list(mif=m1,ll=ll)
}
#saveRDS(mf.pap.rho.prof,file="GeneratedData/rho_proflik.rds")
mf.pap.rho.prof <- readRDS(file="GeneratedData/rho_proflik3.rds")
rho.logliks <- sapply(mf.pap.rho.prof,function(x) min(x[[2]]))
cis <-max(rho.logliks)- qchisq(.95,1)/2
plot(rho.range,sapply(mf.pap.rho.prof,function(x) max(x[[2]])),ylim=c(-2100,-2000))
points(rho.range,sapply(mf.pap.rho.prof,function(x) min(x[[2]])),pch=3)
,ylim=c(-2100,-2030))
pdf("Plots/proflik-gamma-zim-REV.pdf")
plot(gamma.range,gamma.logliks,xlab="gamma",ylab="log-likelihood",main="Profile Likelihood of Gamma (Zimbabwe)")
abline(h=cis,lty=2,col=AddAlpha("orange",.75),lwd=2)
abline(v=approx(gamma.logliks[5:7],gamma.range[5:7],xout=cis),lty=2,col=AddAlpha("orange",.75),lwd=2)
abline(v=approx(gamma.logliks[20:25],gamma.range[20:25],xout=cis),lty=2,col=AddAlpha("orange",.75),lwd=2)
text(3.5,-337.5,sprintf("95%% CI %.2f-%.2f",
approx(gamma.logliks[5:7],gamma.range[5:7],xout=cis)$y,
approx(gamma.logliks[20:25],gamma.range[20:25],xout=cis)$y))
dev.off()
## exploring alternative fits for Haiti
haiti.para <- readRDS(file="GeneratedData/parallel-mif-portaup-6df-fitrho.rds")
logliks <- colMeans(sapply(haiti.para,function(x) x[[2]]))
order(logliks,decreasing = T)
|
fef1c1008d821c9915eb0a3dad90062bed5a7598 | f530b7e7b0de3d3083ebdf9cd507e35b61227664 | /R/utils.R | 031ed41e8af80b559a9e270c9c95e1f7739bfd69 | [] | no_license | shbrief/GenomicSuperSignature | cc0eb4dff165477b3aae981c8c6cf800ee3c38b2 | ca168ab8d5de2908416b477ebcc0f3f42eb80a04 | refs/heads/master | 2023-05-10T19:50:08.389840 | 2023-05-02T03:17:58 | 2023-05-02T03:17:58 | 278,696,963 | 12 | 5 | null | 2022-08-23T19:06:23 | 2020-07-10T17:41:12 | R | UTF-8 | R | false | false | 4,294 | r | utils.R | ### Extract expression matrix from different classes of input datasets
.extractExprsMatrix <- function(dataset) {
if (is(dataset, "ExpressionSet")) {
dat <- Biobase::exprs(dataset)
} else if (is(dataset,"SummarizedExperiment")) {
dat <- SummarizedExperiment::assay(dataset)
} else if (is.matrix(dataset)) {
dat <- dataset
} else {
stop("'dataset' should be one of the following objects: ExpressionSet,
SummarizedExperiment, and matrix.")
}
return(dat)
}
### Check ind validity
.availableRAV <- function(RAVmodel, ind) {
availableRAV <- gsub("RAV", "", colData(RAVmodel)$RAV) %>% as.numeric
## Check whether the ind exists in the model
x <- vector(length = length(ind))
for (i in seq_along(ind)) {
if (!ind[i] %in% availableRAV) {
x[i] <- TRUE # assign TRUE if index doesn't exist
}
}
## Print error message if any of the ind doesn't exist.
if (any(x)) {
y <- paste(paste0("RAV",ind[x]), collapse=", ") # combine non-existing ind
msg <- paste0("Selected ind (", y, ") doesn't exist.")
stop(msg)
}
}
## Restructure RAVmodel metadata slot
.RAVmodelVersion <- function(RAVmodel) {
if (version(RAVmodel) == ">= 0.0.7") {
cluster <- S4Vectors::metadata(RAVmodel)$cluster
} else {
cluster <- colData(RAVmodel)$cluster
}
}
## Extract variance explained by PCs in a given cluster
.varByPCsInCluster <- function(RAVmodel, ind) {
# components in clusters
cl_membership <- metadata(RAVmodel)$cluster
components <- names(which(cl_membership == ind))
# PCA summary
pcaSummary <- trainingData(RAVmodel)$PCAsummary
Projs <- lapply(components, function(x) {
unlist(strsplit(x, "\\.PC"))[1] %>% as.character
}) %>% unlist
data <- pcaSummary[Projs]
# Extract variance explained
input_summary <- as.data.frame(matrix(ncol = 3, nrow = length(data)))
colnames(input_summary) <- c("studyName", "PC", "Variance explained (%)")
for (i in seq_along(data)) {
studyname <- Projs[i]
j <- unlist(strsplit(components[i], "\\.PC"))[2] %>% as.numeric
var <- data[[i]]["Variance",j]
input_summary[i, 1] <- studyname
input_summary[i, 2] <- j
input_summary[i, 3] <- round(var*100, digits = 2)
}
return(input_summary)
}
## Message for low-quality RAVs
.lowQualityRAVs <- function(RAVmodel, ind, filterMessage = TRUE) {
if (isTRUE(filterMessage)) {
## Load filterList
local_data_store <- new.env(parent = emptyenv())
data("filterList", envir = local_data_store, package = "GenomicSuperSignature")
filterList <- local_data_store[["filterList"]]
## Select RAVmodel
filterListNames <- c("Cluster_Size_filter", "GSEA_C2_filter",
"GSEA_PLIERpriors_filter", "Redundancy_filter")
c2 <- "MSigDB C2 version 7.1"
plier_priors <- "Three priors from PLIER (bloodCellMarkersIRISDMAP, svmMarkers, and canonicalPathways)"
if (nrow(trainingData(RAVmodel)) == 536 & geneSets(RAVmodel) == c2) {
filterList <- filterList[filterListNames[c(1,2,4)]]
} else if ((nrow(trainingData(RAVmodel)) == 536 &
geneSets(RAVmodel) == plier_priors)) {
filterList <- filterList[filterListNames[c(1,3,4)]]
}
## Check whether index belong to the filter list
for (i in ind) {
res <- vapply(filterList, function(x) {i %in% x}, logical(1))
if (any(res)) {
filtered <- paste(names(res)[which(res == TRUE)], collapse = ", ") %>%
gsub("_filter", "", .)
msg <- paste(paste0("RAV", i), "can be filtered based on", filtered)
message(msg)
}
}
## More information on GenomicSuperSignaturePaper GitHub page
# if (any(res)) {message("Information on filtering : bit.ly/rav_filtering")}
}
}
## Study metadata for different RAVmodels
.getStudyMeta <- function(RAVmodel) {
td <- rownames(trainingData(RAVmodel)) # training data used for RAVmodel
dir <- system.file("extdata", package = "GenomicSuperSignature")
if ("DRP000987" %in% td) {
## 536 datasets from refine.bio
studyMeta <- utils::read.table(file.path(dir, "studyMeta.tsv.gz"))
} else if ("GSE13294" %in% td) {
## 8 CRC and 10 OV from curated data packages
studyMeta <- utils::read.table(file.path(dir, "studyMeta_CRCOV.tsv"))
}
return(studyMeta)
}
|
c40629b36f40e8b479ca14459eb9a325dfeb4752 | f44f88f39935e2879ebb3ff7f2abb11258e5d46f | /beast_scripts/v2.5_pipeline/v2.5_GetSeq.R | a957f6db5496974f18caf19a45e6c30450836d6b | [] | no_license | oncoapop/data_reporting | d5d98b9bf11781be5506d70855e18cf28dbc2f29 | 7bb63516a4bc4caf3c92e31ccd6bcd99a755322b | refs/heads/master | 2022-08-23T20:21:18.094496 | 2020-05-22T00:51:51 | 2020-05-22T00:51:51 | 261,604,041 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 5,566 | r | v2.5_GetSeq.R | ##################################################
## Script to get sequence (not SNV/SNP masked)
## around SNV or indels to design primers for
## Targeted resequencing on the MiSeq
## Aparicio Lab WSOP 2013-001 developed by
## Dr Damian Yap , Research Associate
## dyap@bccrc.ca Version 3.0 (Sep 2013)
## Pipeline use gets parse args from html form
##################################################
# These commands must be specifed in order for this script to work
# source("http://www.bioconductor.org/biocLite.R");
# source("http://www.bioconductor.org/biocLite.R"); biocLite("BSgenome");
# biocLite("BSgenome.Hsapiens.UCSC.hg19"); library('BSgenome.Hsapiens.UCSC.hg19')
library('BSgenome.Hsapiens.UCSC.hg19')
# if run directly uncomment the sample name
# Command line `Rscript v2.5_GetSeq.R --no-save --no-restore --args $dir/$sample/$file`
# This takes the 4th argument (see str above) which is sample name
args <- commandArgs(trailingOnly = TRUE)
input <- args[4]
# To test this programme in R using source
# commandArgs <- function() "TEST/123/20130926214630"
# source(file="~/Scripts/v2.5_pipeline/v2.5_GetSeq.R")
# For testing only uncomment for production
# input <- "TEST/123/20130926214630"
Project <- strsplit(input, split="/")[[1]][1]
sample <- strsplit(input, split="/")[[1]][2]
posfile <- strsplit(input, split="/")[[1]][3]
print("Directory")
print(Project)
print("Sample_ID")
print(sample)
print("File")
print(posfile)
homebase="home/dyap/Projects/PrimerDesign"
setwd(homebase)
# all files from this point should be hg19
hg19file=paste(posfile, "hg19", sep="-")
# commented lines are Done by v2.5_primerdesign.cgi
# projdir=paste("mkdir", Project, sep=" ")
# system(projdir)
# setwd(paste(homebase,Project,sep="/"))
# samdir=paste("mkdir", sample, sep=" ")
# system(samdir)
wd=paste(paste(homebase,Project,sep="/"),sample,sep="/")
setwd(wd)
#system('mkdir positions')
system('mkdir Annotate')
system('mkdir primer3')
#############################################
# Save input files under $homebase/positions#
#############################################
##############################################
###### User defined variables ######
# Directory and file references
basedir=wd
sourcedir=paste(basedir,"positions", sep="/")
p3dir=paste(basedir,"primer3", sep="/")
annpath=paste(basedir,"Annotate", sep="/")
######################
# These are the input files
snvfile=paste(posfile, "hg19.csv", sep="-")
input=paste(sourcedir,snvfile,sep="/")
#######################################
# This is the name of the primer3 design file
p3file=paste(posfile,"p3_design.txt",sep="_")
outfile=paste(p3dir,p3file,sep="/")
###############################################
file1 = paste(annpath, paste(posfile, "Annotate.csv", sep="_") ,sep="/")
###############################################
file2 = paste(sourcedir, paste(posfile, "positions.txt", sep="_") ,sep="/")
# offsets (sequences on either side of SNV,indel for matching only)
WToffset=5
snpdf <- read.csv(file=input, stringsAsFactors = FALSE, header= FALSE)
# For positions
posdf <- data.frame(Chr = rep("", nrow(snpdf)),
Pos1 = rep(0, nrow(snpdf)),
ID = rep("", nrow(snpdf)),
stringsAsFactors = FALSE)
# For annotation files
andf <- data.frame(Chr = rep("", nrow(snpdf)),
Pos1 = rep(0, nrow(snpdf)),
Pos2 = rep(0, nrow(snpdf)),
WT = rep("", nrow(snpdf)),
SNV = rep("", nrow(snpdf)),
stringsAsFactors = FALSE)
# For SNV matching
outdf <- data.frame(ID = rep("", nrow(snpdf)),
Chr = rep("", nrow(snpdf)),
Pos1 = rep(0, nrow(snpdf)),
Pos2 = rep(0, nrow(snpdf)),
SNV = rep("", nrow(snpdf)),
Cxt = rep("", nrow(snpdf)),
Seq = rep("", nrow(snpdf)),
stringsAsFactors = FALSE)
offset <- 5
for (ri in seq(nrow(snpdf))) {
chr <- snpdf[ri,1]
position1 <- as.numeric(snpdf[ri,2])
# for SNV the position is the same for both
position2 <- as.numeric(snpdf[ri,2])
sample <- snpdf[ri,4]
IF masked sequence is provided
# sequence <- snpdf[ri,5]
wt <- as.character(getSeq(Hsapiens,chr,position1,position1))
cxt <- as.character(paste(getSeq(Hsapiens,chr,position1-offset,position1),
getSeq(Hsapiens,chr,position2+1,position2+offset),
sep=''))
outdf$ID[ri] <- paste(paste(sample, chr, sep="_"), position1, sep="_")
outdf$Chr[ri] <- chr
outdf$Pos1[ri] <- position1
outdf$Pos2[ri] <- position2
outdf$SNV[ri] <- wt
outdf$Cxt[ri] <-cxt
outdf$Seq[ri] <- sequence
print(outdf$ID[ri])
posdf$ID[ri] <- outdf$ID[ri]
posdf$Chr[ri] <- outdf$Chr[ri]
posdf$Pos1[ri] <- outdf$Pos1[ri]
# Fake the SNV to be just the complement of WT position (as SNV allele is not known)
if (wt=="A") snv <- "T"
if (wt=="C") snv <- "G"
if (wt=="G") snv <- "C"
if (wt=="T") snv <- "A"
andf$Chr[ri] <- gsub("chr","", outdf$Chr[ri])
andf$Pos1[ri] <- outdf$Pos1[ri]
andf$Pos2[ri] <- outdf$Pos2[ri]
andf$WT[ri] <- outdf$SNV[ri]
andf$SNV[ri] <-snv
}
# Output file design.csv
print(outdf)
write.csv(outdf, file = outfile )
# Output file positions.txt
print(posdf)
write.csv(posdf, file = file2 )
# Format for ANNOVAR <15 43762161 43762161 T C>
print(andf)
write.csv(andf, file = file1)
print("v2.5_GetSeq.R complete...")
|
fb4988d74274ed15612a048b399d05bf621d461b | 1fc02d5293e23639d667acc9c228b761478206e2 | /man/bonfInfinite.Rd | 7ea225165a90d43a9d55161c0ebed36998e42531 | [] | no_license | dsrobertson/onlineFDR | caf7fa9d6f52531170b3d5caa505a15c87d6db11 | 2e5a3eaf9cf85d2c04a587ad3dd8783f66435159 | refs/heads/master | 2023-04-29T11:25:12.532739 | 2023-04-12T10:30:23 | 2023-04-12T10:30:23 | 129,420,795 | 14 | 4 | null | 2023-04-12T10:33:39 | 2018-04-13T15:27:02 | R | UTF-8 | R | false | true | 2,559 | rd | bonfInfinite.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bonfInfinite.R
\name{bonfInfinite}
\alias{bonfInfinite}
\title{Online FDR control based on a Bonferroni-like test}
\usage{
bonfInfinite(
d,
alpha = 0.05,
alphai,
random = TRUE,
date.format = "\%Y-\%m-\%d"
)
}
\arguments{
\item{d}{Either a vector of p-values, or a dataframe with three columns: an
identifier (`id'), date (`date') and p-value (`pval'). If no column of dates
is provided, then the p-values are treated as being ordered in sequence,
arriving one at a time.}
\item{alpha}{Overall significance level of the FDR procedure, the default is
0.05.}
\item{alphai}{Optional vector of \eqn{\alpha_i}, where hypothesis \eqn{i} is
rejected if the \eqn{i}-th p-value is less than or equal to \eqn{\alpha_i}.
A default is provided as proposed by Javanmard and Montanari (2018),
equation 31.}
\item{random}{Logical. If \code{TRUE} (the default), then the order of the
p-values in each batch (i.e. those that have exactly the same date) is
randomised.}
\item{date.format}{Optional string giving the format that is used for dates.}
}
\value{
\item{d.out}{ A dataframe with the original data \code{d} (which
will be reordered if there are batches and \code{random = TRUE}), the
adjusted signifcance thresholds \code{alphai} and the indicator function of
discoveries \code{R}, where \code{R[i] = 1} corresponds to hypothesis
\eqn{i} being rejected (otherwise \code{R[i] = 0}).}
}
\description{
This funcion is deprecated, please use \code{\link{Alpha_spending}} instead.
}
\details{
Implements online FDR control using a Bonferroni-like test.
The function takes as its input either a vector of p-values, or a dataframe
with three columns: an identifier (`id'), date (`date') and p-value (`pval').
The case where p-values arrive in batches corresponds to multiple instances of
the same date. If no column of dates is provided, then the p-values are
treated as being ordered in sequence, arriving one at a time.
The procedure controls FDR for a potentially infinite stream of p-values by
using a Bonferroni-like test. Given an overall significance level
\eqn{\alpha}, we choose a (potentially infinite) sequence of non-negative
numbers \eqn{\alpha_i} such that they sum to \eqn{\alpha}. Hypothesis \eqn{i}
is rejected if the \eqn{i}-th p-value is less than or equal to \eqn{\alpha_i}.
}
\references{
Javanmard, A. and Montanari, A. (2018) Online Rules for Control of
False Discovery Rate and False Discovery Exceedance. \emph{Annals of
Statistics}, 46(2):526-554.
}
|
e2eebd99e92839705d9e3b51196089ce5393cf78 | 73fca71f8407e428d3b891289d758778c03b7bec | /man/track.info.Rd | e8f99990034f77585c666bed6b3a4f8c1e35c87a | [] | no_license | cran/trackObjs | 7ada054878bf3de45d793988f7979cc6f464afef | 4c37bd207a34ead6d7fc6b347de1a7261b87e39b | refs/heads/master | 2020-05-30T08:59:57.519980 | 2012-09-22T00:00:00 | 2012-09-22T00:00:00 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,935 | rd | track.info.Rd | \name{track.info}
\alias{track.filename}
\alias{track.datadir}
\alias{track.info}
\alias{env.is.tracked}
\alias{tracked.envs}
\title{Return filenames and directories for tracked variables.}
\description{Return filenames and directories for tracked variables.}
\usage{
track.filename(expr, list = character(0), pos = 1, envir = as.environment(pos), suffix = FALSE)
track.datadir(pos = 1, envir = as.environment(pos), relative = TRUE)
track.info(pos = 1, envir = as.environment(pos), all=TRUE)
env.is.tracked(pos = 1, envir = as.environment(pos))
tracked.envs(envirs=search())
}
\arguments{
\item{expr}{ An unquoted variable name }
\item{list}{ A character vector of variable names }
\item{pos}{ The search path position of the environment being tracked
(default is 1 for the global environment)}
\item{envir}{ The environment being tracked. This is an alternate way
(to the use of \code{pos=})
of specifying the environment being tracked, but should be rarely needed.}
\item{suffix}{: Return the filename with the RData suffix (extension)
(taken from \code{track.options("RDataSuffix")})}
\item{relative}{: Return a path relative to the current working
directory, or an absolute path?}
\item{all}{ Return info about all tracked environments?}
\item{envirs}{A list or vector of objects that can be interpreted as
environments by \code{as.environment}}
}
\value{
\describe{
\item{track.filename()}{ returns the filenames for tracked variables.
These names are guaranteed to be distinct for distinct variables.}
\item{track.datadir()}{ returns the directory in which RData files
for tracked variables are stored.}
\item{\code{track.info}:}{ returns a dataframe of information about
environments currently tracked.}
\item{env.is.tracked:}{returns \code{TRUE} or \code{FALSE}}
\item{tracked.envs:}{with no arguments, it returns the names of
tracked environment that are on the search list. If given an
argument that is a vector of environments (or environment names),
it returns the subset of that vector that are tracked environments.}
}
}
\note{
The \code{track} package stores RData files in the directory returned
by \code{track.datadir()}. It is not advisable to write other RData
files to that directory. Filenames for variables may change when an
object is deleted and then recreated.
A warning message like "env R_GlobalEnv (pos 1 on search list) appears
to be an inactive tracked environment, saved from another session and
loaded here inappropriately" indicates that the environment has some
but not all of the structure of a tracked environment. In particular,
the variable \code{.trackingEnv} exists in it, but does not seem to be
connected properly. Some of the bindings may be active bindings, but
they may have come disconnected from the tracking environment. The
most common way that this kind of situation can arise is from doing
\code{save.image()} before \code{track.stop()}, and then reloading the
saved image (e.g., when restarting R).
To fix this situation, do the following:
\enumerate{
\item \code{rm(.trackingEnv, pos=1)}
\item \code{names(which(!sapply(ls(pos=1), bindingIsActive,
as.environment(1))))} # to see which variables have active bindings
\item \code{x1 <- x} # for each variable x that has an active binding
and that you want to save
\item \code{rm(x, pos=1)}
\item \code{save.image()} # to overwrite the old saved .RData file
(only works with position 1)
}
If the inactive tracked environment is at a position other than 1 on the
search list, substitute the appropriate position for 1 in the above.
}
\author{Tony Plate \email{tplate@acm.org}}
\seealso{ \link[=track-package]{Overview} and \link[=track.design]{design} of the \code{track} package. }
\examples{
##############################################################
# Warning: running this example will cause variables currently
# in the R global environment to be written to .RData files
# in a tracking database on the filesystem under R's temporary
# directory, and will cause the variables to be removed temporarily
# from the R global environment.
# It is recommended to run this example with a fresh R session
# with no important variables in the global environment.
##############################################################
library(trackObjs)
track.start(dir=file.path(tempdir(), 'rdatadir4'))
x <- 33
X <- array(1:24, dim=2:4)
Y <- list(a=1:3,b=2)
X[2] <- -1
track.datadir(relative=TRUE)
track.datadir(relative=FALSE)
track.filename(list=c("x", "X"))
env.is.tracked(pos=1)
env.is.tracked(pos=2)
# Would normally not call track.stop(), but do so here to clean up after
# running this example.
track.stop(pos=1, keepVars=TRUE)
}
\keyword{ data }
|
41e2ef84d7abe423a1a6ce4be067ae1ca570c650 | f61ee31916b71a31aca66cc1149ff5526f95b758 | /tests/testthat/test-logreg.R | 1ae798327bfce7e6a43eb32b4e3df518c81a27c3 | [
"MIT"
] | permissive | Ryksmith/blblm | 7346675923fb7b2b6bc01b859ce387020ed939e2 | feabe2014f11d5f6cab6811d19f4988324acf0d7 | refs/heads/master | 2022-10-06T08:00:50.176335 | 2020-06-11T11:36:28 | 2020-06-11T11:36:28 | 270,088,216 | 0 | 0 | null | 2020-06-06T19:54:18 | 2020-06-06T19:54:18 | null | UTF-8 | R | false | false | 238 | r | test-logreg.R | test_that("Logistic regression works", {
data <- iris
labels <- rep(0:1,75)
data$Species <- labels
fit <- blb_logreg(Species ~ Petal.Length * Sepal.Length, data = data, m = 2, B = 100)
expect_equal(length(coef(fit)), 4)
})
|
1a3ed264eeef2d4c62fa49131cce54e70d03cb31 | 0a70bf8f5c7511edb5b0c03f31e12bcabeae92b2 | /R/sigma.R | d927cef9a3561d8c45904b21e011ee7a09e36733 | [
"MIT"
] | permissive | iankloo/sigma | 38505a5111f2108536dc6698cb8bbf56da46b329 | 46e15dc03a7d0d2a008e6f0a99b373c2e5b18851 | refs/heads/master | 2021-01-15T17:40:38.922050 | 2017-06-28T14:17:31 | 2017-06-28T14:17:31 | 40,320,730 | 0 | 0 | null | 2015-08-06T18:30:04 | 2015-08-06T18:30:04 | null | UTF-8 | R | false | false | 902 | r | sigma.R |
#' @import htmlwidgets
#' @export
sigma <- function(gexf, drawEdges = TRUE, drawNodes = TRUE,
width = NULL, height = NULL) {
# read the gexf file
data <- paste(readLines(gexf), collapse="\n")
# create a list that contains the settings
settings <- list(
drawEdges = drawEdges,
drawNodes = drawNodes
)
# pass the data and settings using 'x'
x <- list(
data = data,
settings = settings
)
# create the widget
htmlwidgets::createWidget("sigma", x, width = width, height = height)
}
#' @export
sigmaOutput <- function(outputId, width = "100%", height = "400px") {
shinyWidgetOutput(outputId, "sigma", width, height, package = "sigma")
}
#' @export
renderSigma <- function(expr, env = parent.frame(), quoted = FALSE) {
if (!quoted) { expr <- substitute(expr) } # force quoted
shinyRenderWidget(expr, sigmaOutput, env, quoted = TRUE)
} |
85fda23ddf7d850892dcc8ee86121eccbcbabf8f | 05a5a1f17f5df9fe295b616fb8d3c2427b2430ac | /man/dat_ckid.Rd | 0bac2ab8845ceb8a611910e8346f1d53b8fac6f2 | [] | no_license | AntiportaD/hrcomprisk | 20a0961bdf986414b7c1f2a8a2a23f9c93573d8d | c72ae62e96d05a585575a7ae8ea8c4952f03fce5 | refs/heads/master | 2020-09-27T04:53:26.499249 | 2020-01-23T14:35:03 | 2020-01-23T14:35:03 | 226,434,561 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,338 | rd | dat_ckid.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{dat_ckid}
\alias{dat_ckid}
\title{CKID dataset}
\format{A data frame with 626 rows and 13 variables:
\describe{
\item{b1nb0}{Binary indicator for race: black=1, non-black=0}
\item{entry}{Years since onset of chronic kidney disease at entry into study}
\item{event}{Renal replacement therapy indicator: 0=none, 1=dialysis, 2=transplant}
\item{exit}{Years since onset of chronic kidney disease at event/censoring time}
\item{foodassist}{Binary indicator for use of food assistance}
\item{inckd}{Years in study (=exit-entry)}
\item{incomegt75}{Household income > $75,000 per year}
\item{incomelt30}{Household income < $30,000 per year}
\item{lps}{Binary indicator of low birth weight, premature birth, or small for gestational age}
\item{male1fe0}{Binary indicator for sex: male=1, female=0}
\item{matedultcoll}{Maternal education less than college}
\item{privatemd}{Binary indicator for private doctor}
\item{public}{Binary indicator for public insurance}s
}}
\source{
\url{https://statepi.jhsph.edu/ckid/ckid.html}
}
\usage{
dat_ckid
}
\description{
A dataset containing time, socieconomic and outcome variables of 626 subjects from the
Chronic Kidney Disease in Children (CKiD) Study.
}
\keyword{datasets}
|
7f41e19f90ddfc5812dfb3753c1e4ab28ce2f52f | b74e35f81dbda954c2187384c2a42f7d8035100b | /plot6.R | a24c2168fbe8445f5cb00ddfe9d81f7e309c8a20 | [] | no_license | ez3804/ExData_Prj2 | 1c37ca5bbe0c808b01a84a2124ddbe13a592f996 | b79f9550dcdcc6e3d2782ab92eb2da3cf96f5064 | refs/heads/master | 2020-04-20T06:46:47.844441 | 2015-01-24T17:12:06 | 2015-01-24T17:12:06 | 29,783,118 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 960 | r | plot6.R | # Check if both data exist. If not, load the data.
if (!"data" %in% ls()) {
pmData <- readRDS("./data/summarySCC_PM25.rds")
}
if (!"data" %in% ls()) {
classData <- readRDS("./data/Source_Classification_Code.rds")
}
#plt the data
subset <- pmData[pmData$fips == "24510"|pmData$fips == "06037", ]
par("mar"=c(5.1, 4.5, 4.1, 2.1))
png(filename = "plot6.png",
width = 480, height = 480,
units = "px")
motor <- grep("motor", classData$Short.Name, ignore.case = T)
motor <- classData[motor, ]
motor <- subset[subset$SCC %in% motor$SCC, ]
g <- ggplot(motor, aes(year, Emissions, color = fips))
g + geom_line(stat = "summary", fun.y = "sum") +
ylab(expression('Total PM'[2.5]*" Emissions")) +
ggtitle("Comparison of Total Emissions From Motor\n Vehicle Sources in Baltimore City\n and Los Angeles County from 1999 to 2008") +
scale_colour_discrete(name = "Group", label = c("Los Angeles","Baltimore"))
dev.off()
|
9c1a55b3d86313760fcd48bb14242350aea5b2ed | 53d7e351e21cc70ae0f2b746dbfbd8e2eec22566 | /man/xmuTwinSuper_Continuous.Rd | 9f98a12be0239e87e8bb1a7af12dc502a7c2e428 | [] | no_license | tbates/umx | eaa122285241fc00444846581225756be319299d | 12b1d8a43c84cc810b24244fda1a681f7a3eb813 | refs/heads/master | 2023-08-31T14:58:18.941189 | 2023-08-31T09:52:02 | 2023-08-31T09:52:02 | 5,418,108 | 38 | 25 | null | 2023-09-12T21:09:45 | 2012-08-14T20:18:01 | R | UTF-8 | R | false | true | 4,213 | rd | xmuTwinSuper_Continuous.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/xmu_make_top_twin_models.R
\name{xmuTwinSuper_Continuous}
\alias{xmuTwinSuper_Continuous}
\title{Create core of twin model for all-continuous data.}
\usage{
xmuTwinSuper_Continuous(
name = NULL,
fullVars,
fullCovs = NULL,
sep,
mzData,
dzData,
equateMeans,
type,
allContinuousMethod,
nSib
)
}
\arguments{
\item{name}{The name of the supermodel}
\item{fullVars}{Full Variable names (wt_T1)}
\item{fullCovs}{Full Covariate names (age_T1)}
\item{sep}{default "_T"}
\item{mzData}{An mxData object containing the MZ data}
\item{dzData}{An mxData object containing the DZ data}
\item{equateMeans}{Whether to equate the means across twins (default TRUE)}
\item{type}{type}
\item{allContinuousMethod}{allContinuousMethod}
\item{nSib}{nSib}
}
\value{
\itemize{
\item A twin model
}
}
\description{
Sets up top, MZ and DZ submodels with a means model, data, and expectation for all-continuous data.
called by \code{\link[=xmu_make_TwinSuperModel]{xmu_make_TwinSuperModel()}}.
}
\examples{
\dontrun{
xmuTwinSuper_Continuous(name="twin_super", selVars = selVars, selCovs = selCovs,
mzData = mzData, dzData = dzData, equateMeans = TRUE, type = type,
allContinuousMethod = allContinuousMethod, nSib= nSib, sep = "_T" )
}
}
\seealso{
\itemize{
\item \code{\link[=xmu_make_TwinSuperModel]{xmu_make_TwinSuperModel()}}
}
Other xmu internal not for end user:
\code{\link{umxModel}()},
\code{\link{umxRenameMatrix}()},
\code{\link{umx_APA_pval}()},
\code{\link{umx_fun_mean_sd}()},
\code{\link{umx_get_bracket_addresses}()},
\code{\link{umx_make}()},
\code{\link{umx_standardize}()},
\code{\link{umx_string_to_algebra}()},
\code{\link{xmuHasSquareBrackets}()},
\code{\link{xmuLabel_MATRIX_Model}()},
\code{\link{xmuLabel_Matrix}()},
\code{\link{xmuLabel_RAM_Model}()},
\code{\link{xmuMI}()},
\code{\link{xmuMakeDeviationThresholdsMatrices}()},
\code{\link{xmuMakeOneHeadedPathsFromPathList}()},
\code{\link{xmuMakeTwoHeadedPathsFromPathList}()},
\code{\link{xmuMaxLevels}()},
\code{\link{xmuMinLevels}()},
\code{\link{xmuPropagateLabels}()},
\code{\link{xmuRAM2Ordinal}()},
\code{\link{xmuTwinSuper_NoBinary}()},
\code{\link{xmuTwinUpgradeMeansToCovariateModel}()},
\code{\link{xmu_CI_merge}()},
\code{\link{xmu_CI_stash}()},
\code{\link{xmu_DF_to_mxData_TypeCov}()},
\code{\link{xmu_PadAndPruneForDefVars}()},
\code{\link{xmu_bracket_address2rclabel}()},
\code{\link{xmu_cell_is_on}()},
\code{\link{xmu_check_levels_identical}()},
\code{\link{xmu_check_needs_means}()},
\code{\link{xmu_check_variance}()},
\code{\link{xmu_clean_label}()},
\code{\link{xmu_data_missing}()},
\code{\link{xmu_data_swap_a_block}()},
\code{\link{xmu_describe_data_WLS}()},
\code{\link{xmu_dot_make_paths}()},
\code{\link{xmu_dot_make_residuals}()},
\code{\link{xmu_dot_maker}()},
\code{\link{xmu_dot_move_ranks}()},
\code{\link{xmu_dot_rank_str}()},
\code{\link{xmu_extract_column}()},
\code{\link{xmu_get_CI}()},
\code{\link{xmu_lavaan_process_group}()},
\code{\link{xmu_make_TwinSuperModel}()},
\code{\link{xmu_make_bin_cont_pair_data}()},
\code{\link{xmu_make_mxData}()},
\code{\link{xmu_match.arg}()},
\code{\link{xmu_name_from_lavaan_str}()},
\code{\link{xmu_path2twin}()},
\code{\link{xmu_path_regex}()},
\code{\link{xmu_print_algebras}()},
\code{\link{xmu_rclabel_2_bracket_address}()},
\code{\link{xmu_safe_run_summary}()},
\code{\link{xmu_set_sep_from_suffix}()},
\code{\link{xmu_show_fit_or_comparison}()},
\code{\link{xmu_simplex_corner}()},
\code{\link{xmu_standardize_ACEcov}()},
\code{\link{xmu_standardize_ACEv}()},
\code{\link{xmu_standardize_ACE}()},
\code{\link{xmu_standardize_CP}()},
\code{\link{xmu_standardize_IP}()},
\code{\link{xmu_standardize_RAM}()},
\code{\link{xmu_standardize_SexLim}()},
\code{\link{xmu_standardize_Simplex}()},
\code{\link{xmu_start_value_list}()},
\code{\link{xmu_starts}()},
\code{\link{xmu_summary_RAM_group_parameters}()},
\code{\link{xmu_twin_add_WeightMatrices}()},
\code{\link{xmu_twin_check}()},
\code{\link{xmu_twin_get_var_names}()},
\code{\link{xmu_twin_make_def_means_mats_and_alg}()},
\code{\link{xmu_twin_upgrade_selDvs2SelVars}()}
}
\concept{xmu internal not for end user}
|
b371fd7e7eb187ff85c123b33bc29cf6e2b27e91 | 5c6e8f322dc82416fd43e03fea5ddb5342d3a5b7 | /man/proportion_df.Rd | 793d61a899b0c89704af03da8e5333f729b1ead4 | [] | no_license | ArnaudDroitLab/GenomicOperations | a847efea620adad0690f820572efe48062ee05e5 | 0d7ce960f5c18545d9e170fd605f0a22deba6342 | refs/heads/master | 2020-06-05T21:26:53.338085 | 2019-10-18T20:02:28 | 2019-10-18T20:02:28 | 192,550,540 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 594 | rd | proportion_df.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/GenomicEnrichment.R
\name{proportion_df}
\alias{proportion_df}
\title{Returns a data-frame giving the coverage proportions for all elements of a
\linkS4class{GenomicEnrichment} object.}
\usage{
proportion_df(x)
}
\arguments{
\item{x}{A \linkS4class{GenomicEnrichment} object.}
}
\value{
A data-frame giving genome-wide and per-element coverage proportions
in nucleotides.
}
\description{
Returns a data-frame giving the coverage proportions for all elements of a
\linkS4class{GenomicEnrichment} object.
}
|
75d9e9fc95889f81a6d230b8b929b1e1b5b9e737 | 9d484077026b7fcf26188d77281f573eaec1f1d3 | /R/external_packages/ssgsea.GBM.classification/man/MSIG.apply.model.Rd | 879be19cb245ae814524871ca1c6d05dd8eab98e | [] | no_license | gaberosser/qmul-bioinf | 603d0fe1ed07d7233f752e9d8fe7b02c7cf505fe | 3cb6fa0e763ddc0a375fcd99a55eab5f9df26fe3 | refs/heads/master | 2022-02-22T06:40:29.539333 | 2022-02-12T00:44:04 | 2022-02-12T00:44:04 | 202,544,760 | 3 | 1 | null | null | null | null | UTF-8 | R | false | false | 19,245 | rd | MSIG.apply.model.Rd | \name{MSIG.apply.model}
\alias{MSIG.apply.model}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
MSIG.apply.model
}
\description{
%% ~~ A concise (1-5 lines) description of what the function does. ~~
}
\usage{
MSIG.apply.model(gct.file, cls.file, phen.annot.file = NULL, output.dir, database.dir, identifiers, column.subset = "ALL", column.sel.type = "samples", thres = "NULL", ceil = "NULL", shift = "NULL", fold = 1, delta = 0, norm = 6, no.call.range.max = NULL, no.call.range.min = NULL)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{gct.file}{
%% ~~Describe \code{gct.file} here~~
}
\item{cls.file}{
%% ~~Describe \code{cls.file} here~~
}
\item{phen.annot.file}{
%% ~~Describe \code{phen.annot.file} here~~
}
\item{output.dir}{
%% ~~Describe \code{output.dir} here~~
}
\item{database.dir}{
%% ~~Describe \code{database.dir} here~~
}
\item{identifiers}{
%% ~~Describe \code{identifiers} here~~
}
\item{column.subset}{
%% ~~Describe \code{column.subset} here~~
}
\item{column.sel.type}{
%% ~~Describe \code{column.sel.type} here~~
}
\item{thres}{
%% ~~Describe \code{thres} here~~
}
\item{ceil}{
%% ~~Describe \code{ceil} here~~
}
\item{shift}{
%% ~~Describe \code{shift} here~~
}
\item{fold}{
%% ~~Describe \code{fold} here~~
}
\item{delta}{
%% ~~Describe \code{delta} here~~
}
\item{norm}{
%% ~~Describe \code{norm} here~~
}
\item{no.call.range.max}{
%% ~~Describe \code{no.call.range.max} here~~
}
\item{no.call.range.min}{
%% ~~Describe \code{no.call.range.min} here~~
}
}
\details{
%% ~~ If necessary, more details than the description above ~~
}
\value{
%% ~Describe the value returned
%% If it is a LIST, use
%% \item{comp1 }{Description of 'comp1'}
%% \item{comp2 }{Description of 'comp2'}
%% ...
}
\references{
%% ~put references to the literature/web site here ~
}
\author{
%% ~~who you are~~
}
\note{
%% ~~further notes~~
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
}
\examples{
##---- Should be DIRECTLY executable !! ----
##-- ==> Define data, use random,
##-- or do help(data=index) for the standard data sets.
## The function is currently defined as
function (gct.file, cls.file, phen.annot.file = NULL, output.dir,
database.dir, identifiers, column.subset = "ALL", column.sel.type = "samples",
thres = "NULL", ceil = "NULL", shift = "NULL", fold = 1,
delta = 0, norm = 6, no.call.range.max = NULL, no.call.range.min = NULL)
{
print(c("Processing test file: ", gct.file))
O <- MSIG.Subset.Dataset(input.ds = gct.file, input.cls = cls.file,
column.subset = column.subset, column.sel.type = column.sel.type,
row.subset = "ALL", output.ds = paste(output.dir, "temp2.gct",
sep = ""), output.cls = paste(output.dir, "temp2.cls",
sep = ""))
O <- MSIG.Preprocess.Dataset(input.ds = paste(output.dir,
"temp2.gct", sep = ""), output.ds = paste(output.dir,
"temp3.gct", sep = ""), thres = thres, ceil = ceil, normalization = "NULL")
dataset <- MSIG.Gct2Frame(filename = paste(output.dir, "temp3.gct",
sep = ""))
m.test <- data.matrix(dataset$ds)
gs.names.test <- dataset$row.names
gs.descs.test <- dataset$descs
sample.names.test <- dataset$names
Ns.test <- length(m.test[1, ])
Ng.test <- length(m.test[, 1])
CLS <- MSIG.ReadClsFile(file = paste(output.dir, "temp2.cls",
sep = ""))
class.labels.test <- CLS$class.v
class.phen.test <- CLS$phen
class.list.test <- CLS$class.list
for (sig in identifiers) {
filename <- paste(database.dir, sig, ".msig.params",
sep = "")
temp <- readLines(filename)
seed <- as.numeric(noquote(unlist(strsplit(temp[[1]],
"\t")))[2])
topgs <- as.numeric(noquote(unlist(strsplit(temp[[2]],
"\t")))[2])
link.function <- unlist(strsplit(temp[[3]], "\t"))[2]
model.type <- unlist(strsplit(temp[[4]], "\t"))[2]
burnin.iter <- as.numeric(noquote(unlist(strsplit(temp[[5]],
"\t")))[2])
mcmc.iter <- as.numeric(noquote(unlist(strsplit(temp[[6]],
"\t")))[2])
col.target <- unlist(strsplit(temp[[7]], "\t"))[2]
col.control <- unlist(strsplit(temp[[8]], "\t"))[2]
no.call.r.max <- as.numeric(noquote(unlist(strsplit(temp[[9]],
"\t")))[2])
no.call.r.min <- as.numeric(noquote(unlist(strsplit(temp[[10]],
"\t")))[2])
beta0.train <- as.numeric(noquote(unlist(strsplit(temp[[11]],
"\t")))[2])
beta1.train <- as.numeric(noquote(unlist(strsplit(temp[[12]],
"\t")))[2])
target.class <- unlist(strsplit(temp[[13]], "\t"))[2]
c1 <- c(col.target, col.control)
if (is.null(no.call.range.max)) {
no.call.range.max <- no.call.r.max
}
if (is.null(no.call.range.min)) {
no.call.range.min <- no.call.r.min
}
filename <- paste(database.dir, sig, ".msig.gct", sep = "")
dataset <- MSIG.Gct2Frame(filename = filename)
sample.molsig.sorted.subset <- dataset$ds
Ns <- length(sample.molsig.sorted.subset[1, ])
msize.all <- length(sample.molsig.sorted.subset[, 1])
sample.molsig.sorted.subset.gs <- dataset$row.names
sample.names <- dataset$names
filename <- paste(database.dir, sig, ".msig.gct", sep = "")
dataset <- MSIG.Gct2Frame(filename = filename)
sample.molsig.sorted.subset <- dataset$ds
Ns <- length(sample.molsig.sorted.subset[1, ])
msize.all <- length(sample.molsig.sorted.subset[, 1])
sample.molsig.sorted.subset.gs <- dataset$row.names
sample.names <- dataset$names
filename <- paste(database.dir, sig, ".msig.cls", sep = "")
CLS <- MSIG.ReadClsFile(file = filename)
class.labels <- CLS$class.v
class.phen <- CLS$phen
class.list <- CLS$class.list
for (i in 1:length(class.list)) {
if (class.list[i] == target.class) {
class.labels[i] <- 1
}
else {
class.list[i] <- "CNTL"
class.labels[i] <- 0
}
}
print(c("Target class:", target.class))
print(c("Class labels:", class.labels))
col.index <- order(class.labels, decreasing = T)
for (j in 1:msize.all) {
sample.molsig.sorted.subset[j, ] <- sample.molsig.sorted.subset[j,
col.index]
}
sample.names <- sample.names[col.index]
class.labels <- class.labels[col.index]
class.list <- class.list[col.index]
class.phen <- c(target.class, "CNTL")
control.class <- "CNTL"
gs.names2 <- intersect(sample.molsig.sorted.subset.gs,
gs.names.test)
locations <- match(gs.names2, gs.names.test, nomatch = 0)
m.test2 <- m.test[locations, ]
locations2 <- match(gs.names2, sample.molsig.sorted.subset.gs)
m.train <- sample.molsig.sorted.subset[locations2, ]
print(c("Matched signature and test set: overlap=", length(gs.names2),
" Total original signature size= ", length(sample.molsig.sorted.subset.gs)))
msize <- length(locations)
sig.matrix <- array(0, dim = c(msize, Ns))
sig.matrix.test <- array(0, dim = c(msize, Ns.test))
for (k in 1:Ns) {
sig.matrix[, k] <- rank(m.train[, k], ties.method = "average")
}
for (k in 1:Ns.test) {
sig.matrix.test[, k] <- rank(m.test2[, k], ties.method = "average")
}
sig.matrix.all <- cbind(sig.matrix, sig.matrix.test)
sample.names.all <- c(sample.names, sample.names.test)
MSIG.HeatMapPlot.5(V = t(sig.matrix.all), row.names = sample.names.all,
col.labels = rep(1, msize), col.classes = "C", col.names = gs.names2,
main = paste(sig, gct.file, sep = " / "), xlab = " ",
ylab = " ", row.norm = F, cmap.type = 2)
t.class.point <- apply(sig.matrix[, class.list == target.class],
MARGIN = 1, FUN = mean)
c.class.point <- apply(sig.matrix[, class.list == control.class],
MARGIN = 1, FUN = mean)
d.t.class <- vector(length = Ns, mode = "numeric")
d.c.class <- vector(length = Ns, mode = "numeric")
d.c.t.class <- sum(abs(t.class.point - c.class.point))
x <- vector(length = Ns, mode = "numeric")
y <- vector(length = Ns, mode = "numeric")
d.t.class.test <- vector(length = Ns.test, mode = "numeric")
d.c.class.test <- vector(length = Ns.test, mode = "numeric")
x.test <- vector(length = Ns.test, mode = "numeric")
y.test <- vector(length = Ns.test, mode = "numeric")
for (i in 1:Ns) {
d.t.class[i] <- sum(abs(t.class.point - sig.matrix[,
i]))/d.c.t.class
d.c.class[i] <- sum(abs(c.class.point - sig.matrix[,
i]))/d.c.t.class
x[i] <- (d.t.class[i]^2 - d.c.class[i]^2 - 1)/(-2)
y[i] <- sqrt(d.c.class[i]^2 - x[i]^2)
}
print(c("Creating regression signature model using overlap..."))
target.var <- ifelse(class.list == target.class, 1, 0)
if (model.type == "Bayesian") {
if (link.function == "logit") {
reg.model <- MCMClogit(target.var ~ x, burnin = burnin.iter,
mcmc = mcmc.iter, bayes.resid = T)
}
else if (link.function == "probit") {
reg.model <- MCMCprobit(target.var ~ x, burnin = burnin.iter,
mcmc = mcmc.iter, bayes.resid = T)
}
else {
stop("Unknown link function")
}
}
else if (model.type == "Classic") {
if (link.function == "logit") {
reg.model <- glm(target.var ~ x, family = binomial("logit"))
}
else if (link.function == "probit") {
reg.model <- glm(target.var ~ x, family = binomial("probit"))
}
else {
stop("Unknown link function")
}
}
else {
stop("Unknown model type")
}
if (model.type == "Bayesian") {
beta0 <- reg.model[, 1]
beta1 <- reg.model[, 2]
print(c("beta0=", beta0, " beta1=", beta1))
prob.i <- matrix(0, nrow = Ns, ncol = 3)
}
else if (model.type == "Classic") {
beta0 <- reg.model[[1]][1]
beta1 <- reg.model[[1]][2]
print(c("beta0=", beta0, " beta1=", beta1))
prob.i <- matrix(0, nrow = Ns, ncol = 3)
}
else {
stop("Unknown model type")
}
print(c("beta0 train=", beta0.train, " beta0=", beta0))
print(c("beta1 train=", beta1.train, " beta1=", beta1))
xmin <- min(x)
xmax <- max(x)
range.x <- xmax - xmin
prob.m <- matrix(0, nrow = 1000, ncol = 3)
x.m <- vector(length = 1000, mode = "numeric")
for (k in 1:1000) {
x.m[k] <- xmin + k * (range.x/1000)
if (link.function == "logit") {
p.vec <- (exp(beta0 + beta1 * x.m[k])/(1 + exp(beta0 +
beta1 * x.m[k])))
}
else if (link.function == "probit") {
p.vec <- (erf(beta0 + beta1 * x.m[k]) + 1)/2
}
else {
nstop("Unknown link function")
}
prob.m[k, 1] <- quantile(p.vec, probs = 0.5)
prob.m[k, 2] <- quantile(p.vec, probs = 0.05)
prob.m[k, 3] <- quantile(p.vec, probs = 0.95)
}
istar <- which.min(abs(0.5 - prob.m[, 1]))
istar <- xmin + istar * (range.x/1000)
for (i in 1:Ns.test) {
d.t.class.test[i] <- sum(abs(t.class.point - sig.matrix.test[,
i]))/d.c.t.class
d.c.class.test[i] <- sum(abs(c.class.point - sig.matrix.test[,
i]))/d.c.t.class
x.test[i] <- (d.t.class.test[i]^2 - d.c.class.test[i]^2 -
1)/(-2)
y.test[i] <- sqrt(d.c.class.test[i]^2 - x.test[i]^2)
}
x.range <- range(c(x, x.test, 0, 1))
y.range <- range(c(y, y.test, 0))
x11(height = 24, width = 30)
plot(x, y, xlim = x.range, ylim = y.range, type = "n",
main = sig, sub = gct.file)
points(0, 0, cex = 2, pch = 21, col = 1, bg = 3)
points(1, 0, cex = 2, pch = 21, col = 1, bg = 2)
points(x[class.list == control.class], y[class.list ==
control.class], cex = 1, pch = 21, col = 1, bg = 3)
points(x[class.list == target.class], y[class.list ==
target.class], cex = 1, pch = 21, col = 1, bg = 2)
k <- 1
for (i in class.list.test) {
points(x.test[class.list.test == i], y.test[class.list.test ==
i], cex = 1, pch = 22, col = 1, bg = k\%\%5)
k <- k + 1
}
prob.i.test <- matrix(0, nrow = Ns.test, ncol = 3)
for (i in 1:Ns.test) {
if (link.function == "logit") {
p.vec.test <- (exp(beta0 + beta1 * x.test[i])/(1 +
exp(beta0 + beta1 * x.test[i])))
}
else if (link.function == "probit") {
p.vec.test <- (erf(beta0 + beta1 * x.test[i]) +
1)/2
}
else {
stop("Unknown link function")
}
prob.i.test[i, 1] <- quantile(p.vec.test, probs = 0.5)
prob.i.test[i, 2] <- quantile(p.vec.test, probs = 0.05)
prob.i.test[i, 3] <- quantile(p.vec.test, probs = 0.95)
}
x.index <- order(x.test, decreasing = F)
x.order.test <- x.test[x.index]
prob.i.order.test <- prob.i.test[x.index, ]
class.list.test.order <- class.list.test[x.index]
x11(height = 7, width = 9.5)
nf <- layout(matrix(c(1, 2), 1, 2, byrow = T), widths = c(3.75,
1), heights = 1, respect = FALSE)
plot(x.order.test, prob.i.order.test[, 1], sub = gct.file,
pch = 20, ylim = c(-0.05, 1.07), main = sig, xlim = c(-0.1,
1.1), col = 0, cex.axis = 1.35, cex = 3, cex.lab = 1.35,
xlab = "Activation Index", ylab = "Probability")
points(x.m, prob.m[, 1], type = "l", lwd = 2, col = 1,
lty = 1, cex = 1)
points(x.m, prob.m[, 2], type = "l", col = 4, lty = 1,
cex = 1)
points(x.m, prob.m[, 3], type = "l", col = 4, lty = 1,
cex = 1)
arrows(x.order.test, prob.i.order.test[, 2], x.order.test,
prob.i.order.test[, 3], col = 4, angle = 90, code = 3,
length = 0)
range.x <- range(x.order.test)
points(range.x, c(0.5, 0.5), type = "l", lty = 3, col = 1,
lwd = 2)
points(c(istar, istar), c(-0.07, 1.07), type = "l", lty = 3,
col = 1, lwd = 2)
k <- 1
for (i in class.list.test) {
points(x.order.test[class.list.test.order == i],
prob.i.order.test[class.list.test.order == i,
1], pch = 21, bg = k\%\%5, col = 1, cex = 2)
k <- k + 1
}
leg.txt <- unique(class.list.test.order)
p.vec <- rep(21, length(unique(class.list.test.order)))
c.vec <- rep(seq(1, 5), length(unique(class.list.test.order)))
par(mar = c(0, 0, 0, 0))
plot(c(0, 0), c(1, 1), xlim = c(0, 1), ylim = c(0, 1),
axes = F, type = "n", xlab = "", ylab = "")
legend(x = 0, y = 0.8, legend = leg.txt, bty = "n", xjust = 0,
yjust = 1, pch = p.vec, pt.bg = c.vec, col = "black",
cex = 1.2, pt.cex = 2)
activation.indicator <- ifelse(prob.i.test[, 1] >= 0.5,
1, 0)
activation.indicator <- ifelse((prob.i.test[, 1] >= no.call.range.max) |
(prob.i.test[, 1] <= no.call.range.min), activation.indicator,
0.5)
if (!is.null(phen.annot.file)) {
filename <- phen.annot.file
dataset <- MSIG.Gct2Frame(filename = filename)
phen.annot <- data.matrix(dataset$ds)
phen.annot.gs <- dataset$row.names
for (i in 1:length(phen.annot[, 1])) {
phen.annot[i, ] <- (phen.annot[i, ] - min(phen.annot[i,
]))/(max(phen.annot[i, ]) - min(phen.annot[i,
]))
}
z <- rbind(prob.i.test[, 1], activation.indicator,
phen.annot)
p.lab <- c(paste("P(", sig, ")", sep = ""), paste("A(",
sig, ")", sep = ""), phen.annot.gs)
}
else {
z <- rbind(prob.i.test[, 1], activation.indicator)
p.lab <- c(paste("P(", sig, ")", sep = ""), paste("A(",
sig, ")", sep = ""))
}
MSIG.HeatMapPlot.5(V = z, row.names = p.lab, col.labels = class.labels.test,
col.classes = class.phen.test, col.names = sample.names.test,
main = paste(sig, " Activation on Test", sep = ""),
xlab = " ", ylab = " ", sub = gct.file, row.norm = F,
cmap.type = 3, rotated.col.labels = T)
if (sig == identifiers[[1]]) {
z.all <- prob.i.test[, 1]
z.act.all <- activation.indicator
if (!is.null(phen.annot.file)) {
phen.annot.all <- phen.annot
phen.annot.gs.all <- phen.annot.gs
}
p.lab.all <- paste("P(", sig, ")", sep = "")
p.act.lab.all <- paste("A(", sig, ")", sep = "")
}
else {
z.all <- rbind(z.all, prob.i.test[, 1])
z.act.all <- rbind(z.act.all, activation.indicator)
p.lab.all <- c(p.lab.all, paste("P(", sig, ")", sep = ""))
p.act.lab.all <- c(p.act.lab.all, paste("A(", sig,
")", sep = ""))
}
}
if (!is.null(phen.annot.file)) {
z.all <- rbind(z.all, phen.annot)
z.act.all <- rbind(z.act.all, phen.annot)
p.lab.all <- c(p.lab.all, phen.annot.gs)
p.act.lab.all <- c(p.act.lab.all, phen.annot.gs)
}
print(c("dim z.all=", dim(z.all)))
MSIG.HeatMapPlot.5(V = z.all, row.names = p.lab.all, col.labels = class.labels.test,
col.classes = class.phen.test, col.names = sample.names.test,
main = " ", xlab = " ", ylab = " ", sub = gct.file, row.norm = F,
cmap.type = 2, rotated.col.labels = T)
MSIG.HeatMapPlot.5(V = z.act.all, row.names = p.act.lab.all,
col.labels = class.labels.test, col.classes = class.phen.test,
col.names = sample.names.test, main = " ", xlab = " ",
ylab = " ", sub = gct.file, row.norm = F, cmap.type = 2,
rotated.col.labels = T)
}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~kwd1 }
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
f8adf2035303a519ef2cd937be1f7556959575c7 | d202cab73a83bafc80fc6a90ff9dee786d5f5970 | /submission_form_shiny/defunced_app.R | f4fecd2f9222df000d80efd9e56cafbe55b4843e | [] | no_license | eeb-bipoc-db/EEB_POC | 28844e6c8d2304326dc716f9a672bab0a1b1f063 | 2185d837448256c3020f9f6ca1afc379a01c1334 | refs/heads/master | 2023-02-16T20:04:37.998877 | 2021-01-15T22:34:15 | 2021-01-15T22:34:15 | 274,485,337 | 1 | 2 | null | 2021-01-15T22:34:17 | 2020-06-23T18:56:12 | JavaScript | UTF-8 | R | false | false | 13,156 | r | defunced_app.R | # eeb_poc Shiny server
# Written by Mairin Deith (mdeith@zoology.ubc.ca)
# First created June, 2020
# https://deanattali.com/2015/06/14/mimicking-google-form-shiny/
# Load libraries ----------------------------------------------------------
# devtools::install_github('rstudio/DT')
library(DT)
library(shiny)
library(shinybusy)
library(shinyjs)
library(digest)
library(tidyr)
library(rorcid)
library(googledrive)
library(googlesheets4)
library(shinythemes)
### To do - ORCID to first page - lookups there?
### Link to Google sheet/database here
### SET UP AUTHENTICATION
# Designate project-specific cache
# To be hosted in shinyapps.io designated folder
# options(gargle_oauth_cache = ".cache")
# Run once in an interactive session to create the auth cache.
# drive_auth()
# Authorize Google Sheets to use this token
# gs4_auth(token = drive_token())
# In subsequent runs, use this cache
drive_auth(cache = ".cache", email = "eebpocdatabase@gmail.com")
gs4_auth(token = drive_token())
# UI ----------------------------------------------------------------------
shinyApp(
ui <- fluidPage(theme=shinytheme("yeti"),
shinyjs::useShinyjs(),
tags$div(class = "h1",
"POC Authors in BEES* - Submission portal"),
tags$div(class = "h2", "*Behavioural, ecological, evolutionary, and social sciences"),
sidebarLayout(
sidebarPanel(
helpText("The Graduate Diversity Council in the Department of Environmental Science, Policy, & Management at UC Berkeley and a group of collaborators from the Zoology Department at the University of British Columbia are seeking to increase visibility of scholars with underrepresented racial backgrounds in our seminar series, course syllabuses, and citation practices. To that end, we are assembling a list of BIPOC (Black, Indigenous, Person of Color) scholars in fields related to environmental sciences (including natural, social, and/or physical sciences)."),
br(),
helpText("If you identify as a scholar in environmental sciences from an underrepresented racial or ethnic background, we would love to include you on a list that will be used for future seminar series and revising course syllabuses. Please take a few minutes to fill out this form and share it with others in your network!"),
br(),
helpText("All fields except your name are optional - please only fill in what you are comfortable being accessible online.")
),
mainPanel(
tags$h3("Scholar information"),
column(4,
textInput("name", label = "Name (required)", value=""),
textInput("email", label = "Email address", value=""),
textInput("country", label = "Country of current residence", value=""),
textInput("institution", label = "Affiliated institution", value=""),
selectizeInput("careerstage", label = "Career stage", choices = c("", "Graduate student", "Post-doctoral Scholar", "Research Scientist", "Pre-Tenure Faculty", "Post-Tenure Faculty", "Emeritus Faculty")),
textInput("twitter", label = "Twitter handle", value=""),
helpText("We are also interested in highlighting some of your research contributions associated with your ORCID. See below if you would like to also contribute to this database."),
textInput("orcid_form", label = "ORCID (format: xxxx-xxxx-xxxx-xxxx)", value=""),
),
column(4,
textInput("site", label = "Affiliated website (including lab/department webpages or personal webpages)", value=""),
textInput("scholar", label = "Google Scholar or other research page", value=""),
tags$hr(),
selectizeInput("gender", label = "Gender", choices = c("", "Nonbinary", "Female", "Male", "Prefer not to say", "Prefer another identity (indicate below)")),
textInput("gender_openended", label = "Preferred identity", value=""),
selectInput("bipoc", label = "Do you identify as a BIPOC (Black, Indigenous, Person of Color) scholar?", choices = c("", "Yes", "No")),
textInput("bipoc_specify", label = "Underpresented racial/ethnic minotirty identity", value=""),
selectInput("disability", label = "Do you identify as a person with a disability?", choices = c("", "Yes", "No")),
selectInput("other_underrep", label = "Do you identify as an other underrepresented group not listed above? (e.g. LGBTQ+, First Generation College, or others)", choices = c("", "Yes", "No")),
textInput("other_specify", label = "Feel free to specify here:", value="")
),
column(4,
selectInput("subdisc", label = "Subdiscipline", choices = c("", "Biogeochemistry","Entomology","Evolutionary Biology","Food Systems & Agroecology","Forestry","Freshwater Ecology","Political Ecology","Sustainability Studies","Wildlife Ecology","Conservation Science","Environmental Social Sciences","Other...")),
textInput("disc_specify", label = "Please specify your subdiscipline", value=""),
textInput("keywords", label = "Please provide keywords for your research, separated with a semicolon (;)", value=""),
helpText("One of the purposes of this database is to connect those looking for more representative speakers at academic events. Our intention is that your time will be compensated for these events; however we cannot ensure that your contact information will be used exclusively by paying hosts."),
selectInput("speaking_ops", label = "Are you open to being contacted for speaking opportunities?", choices = c("", "Yes", "No")),
textInput("refers", label = "Please provide the names of other BIPOC scholars in your field that you would recommend we reach out to.")
),
# Continue in the main panel
actionButton("submitauth", label = "Submit author information to our database", icon = icon("archive"), class = "btn-success", width = "100%"),
tags$hr(),
tags$h4("Use your ORCID (if provided) to lookup research works"),
column(6,
actionButton("orcid_lookup", "Find works associated with your ORCID", icon = icon("search"), class = "btn-primary", width = "100%"),
uiOutput("orcid_search_error"),
uiOutput("orcid_search_restart")
),
column(6,
actionButton("submitselected", "Submit selected works", icon = icon("archive"), class = "btn-success", width = "100%"),
checkboxInput("dt_sel", "Select/deselect all")
),
DT::DTOutput("works_dt")
) # end main panel
) # sidebar layout
),
server = function(input, output, session){
workstable <<- data.frame()
show_modal_spinner(spin = "spring", text = "Connecting to database...")
# Setup Google Sheets and global ID parameter
# Initially disable/hide some buttons
### shinyjs::hide("input_type")
shinyjs::hide("gender_openended")
shinyjs::hide("bipoc_specify")
shinyjs::hide("other_specify")
shinyjs::hide("disc_specify")
shinyjs::hide("orcid_lookup")
shinyjs::hide("submitselected")
shinyjs::hide("dt_sel")
wb <<- googledrive::drive_get("nov10_shinytest_authors")
# Get a unique fid for that author - first column
newid <<- max(
na.omit(range_speedread(ss=wb, sheet = 1, range = "Sheet1!A:A")
), 0) + 1
remove_modal_spinner()
message(paste0("ID: ", newid))
# "Other" boxes appearances controlled here
# If authors choose any field with "indicate below" options, have those
# appear
observeEvent(input$gender, {
if(input$gender == "Prefer another identity (indicate below)"){
shinyjs::show("gender_openended")
} else {
shinyjs::hide("gender_openended")
}
})
observeEvent(input$bipoc, {
if(input$bipoc == "Yes"){
shinyjs::show("bipoc_specify")
} else {
shinyjs::hide("bipoc_specify")
}
})
observeEvent(input$other_underrep, {
if(input$other_underrep == "Yes"){
shinyjs::show("other_specify")
} else {
shinyjs::hide("other_specify")
}
})
observeEvent(input$subdisc, {
if(input$subdisc == "Other..."){
shinyjs::show("disc_specify")
} else {
shinyjs::hide("disc_specify")
}
})
# Submit author data to GSheet
observeEvent(input$submitauth, {
show_modal_spinner(spin = "spring", "Submitting to Google Sheet database...")
# Create a dataframe based on user inputs - this will be saved to the GSheet
author_df <- reactive({data.frame(
submitter_unique_id = newid,
name = input$name,
institution = input$institution,
email = gsub("@", "[at]", input$email),
site = input$site,
country = input$country,
scholar = input$scholar,
orcid = input$orcid_form,
twitter = input$twitter,
careerstage = input$careerstage,
gender = ifelse(input$gender=="Prefer another identity (indicate below)", input$gender_openended, input$gender),
bipoc = input$bipoc,
bipoc_specify = input$bipoc_specify,
disability = input$disability,
other_underrep_minority = input$other_underrep,
other_underrep_minority_specify = input$other_specify,
subdisc = input$subdisc,
disc_specify = input$disc_specify,
keywords = input$keywords,
refers = input$refers,
speaking_ops = input$speaking_ops,
upload_date = strptime(Sys.time(), "%m/%d/%y %H:%M:%S")
)
})
googlesheets4::sheet_append(ss=wb, data=author_df(), sheet=1)
remove_modal_spinner()
}, ignoreInit=T) # once = T)
# Only show the "search ORCID" button If an ORCID is provided
# otherwise disable (greyed out)
observeEvent(input$orcid_form, {
if(input$orcid_form != ""){
shinyjs::show("orcid_lookup")
shinyjs::show("dt_sel")
shinyjs::show("submitselected")
observeEvent(input$orcid_lookup, {
show_modal_spinner(spin = "spring", text = "Looking up works...")
message(paste0("...searching for ORCID: ", input$orcid_form))
q0 <- tryCatch({
rorcid::orcid_works(orcid = input$orcid_form, warn = F)
}, error=function(cond){
output$orcid_search_error <- renderUI(HTML(paste0(
h2(paste0("ORCID Lookup Error: '", cond, "'. Please try again."))
)))
remove_modal_spinner()
return(NULL)
}, warning=function(cond){
message(paste0("Warning with lookup: ",cond))
remove_modal_spinner()
return(NULL)
})
if(!is.null(q0)){
shinyjs::hide("orcid_search_error")
# Eval/parse with ID to get the DF of works
q <- eval(parse(text=paste0("q0$'", input$orcid_form, "'$works")))
doi_fetcher <- q$`external.ids.external.id`
doi_vec <- c()
html_vec <- c()
for(d in doi_fetcher){
rowidx <- which(d$`external-id-type` == "doi")
doi_tmp <- d$`external-id-value`[rowidx]
if(!identical(rowidx, integer(0))){
doi_vec <- c(doi_vec, doi_tmp)
html_vec <- c(html_vec, paste0("https://doi.org/", doi_tmp))
} else {
doi_vec <- c(doi_vec, "No DOI found")
html_vec <- c(html_vec, NULL)
}
}
# message(colnames(q))
workstable <<- q %>%
dplyr::select("title.title.value",
"publication-date.year.value",
"publication-date.day.value",
"publication-date.month.value",
"journal-title.value",
"path") %>%
dplyr::transmute(Title = `title.title.value`,
Journal = `journal-title.value`,
Year = `publication-date.year.value`,
Date = paste0(tidyr::replace_na(month.abb[as.numeric(`publication-date.month.value`)], ""), " ", tidyr::replace_na(`publication-date.day.value`, "")),
ORCID.Path = path,
DOI = html_vec)
remove_modal_spinner()
message("Search complete")
}
}, ignoreInit=T)
} else {
shinyjs::hide("orcid_lookup")
shinyjs::hide("dt_sel")
shinyjs::hide("submitselected")
}
})
prettytable <<- reactive({DT::datatable(workstable)})
output$works_dt <- DT::renderDT(
prettytable())
dt_proxy <<- DT::dataTableProxy("works_dt")
observeEvent(input$dt_sel, {
if (isTRUE(input$dt_sel)) {
message("...select all")
DT::selectRows(dt_proxy, input$works_dt_rows_all)
} else {
DT::selectRows(dt_proxy, NULL)
}
})
# observeEvent(input$works_dt_rows_selected, {
# if(is.null(input$works_dt_rows_selected) || length(input$works_dt_rows_selected) == 0){
# } else {
# shinyjs::enable("submitselected")
# }
# })
observeEvent(input$submitselected, {
show_modal_spinner(spin = "spring", text = "Submitting to works database...")
submitted_data <- isolate(save_df())
submitted_data$submitter_unique_id <- newid
worksdb <- googledrive::drive_get("nov10_shinytest_works")
googlesheets4::sheet_append(ss=worksdb, data=submitted_data, sheet=1)
remove_modal_spinner()
}
)
}
)
|
84eb4b53d1c82cdeb5eae80ad3ffde67188eebc1 | 1b141d6887525dd038885603ba0525a4799fb297 | /R/E_CODE.R | b5e13b2bc8c33fd8a1c7000d8e7b519281f40fc2 | [
"MIT"
] | permissive | mjkarlsen/traumaR | c7b032ad24f5511d891348cf40b764e14a4d784b | dd52deec08282e8955c5fea6ad1fb7b2a80e0a9f | refs/heads/master | 2022-09-17T04:17:13.452037 | 2020-06-06T18:47:08 | 2020-06-06T18:47:08 | 260,229,827 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 106,572 | r | E_CODE.R | #' Cause of Injury
#'
#' @param col A column in PTOS data that typically starts with E_ACT_1
#'
#' @return It translations of the code into human friendly values.
#' @export
e_code <- function(col) {
col_value <- case.(col == 800.0, 'Railway Collision w/ Rolling Stock - Railway Employee',
col == 800.1, 'Railway Collision w/ Rolling Stock - Railway Passenger',
col == 800.2, 'Railway Collision w/ Rolling Stock - Pedestrian',
col == 800.3, 'Railway Collision w/ Rolling Stock - Pedal Cyclist',
col == 800.8, 'Railway Collision w/ Rolling Stock - Oth Person',
col == 800.9, 'Railway Collision w/ Rolling Stock - Unspec Person',
col == 801.0, 'Railway Collision w/ Oth Object - Railway Employee',
col == 801.1, 'Railway Collision w/ Oth Object - Railway Passenger',
col == 801.2, 'Railway Collision w/ Oth Object - Pedestrian',
col == 801.3, 'Railway Collision w/ Oth Object - Pedal Cyclist',
col == 801.8, 'Railway Collision w/ Oth Object - Oth Person',
col == 801.9, 'Railway Collision w/ Oth Object - Unspec Person',
col == 802.0, 'Railway Derailment w/o Prior Collision - Railway Employee',
col == 802.1, 'Railway Derailment w/o Prior Collision - Railway Passenger',
col == 802.2, 'Railway Derailment w/o Prior Collision - Pedestrian',
col == 802.3, 'Railway Derailment w/o Prior Collision - Pedal Cyclist',
col == 802.8, 'Railway Derailment w/o Prior Collision - Oth Person',
col == 802.9, 'Railway Derailment w/o Prior Collision - Unspec Person',
col == 803.0, 'Railway Explosion, Fire, or Burning - Railway Employee',
col == 803.1, 'Railway Explosion, Fire, or Burning - Railway Passenger',
col == 803.2, 'Railway Explosion, Fire, or Burning - Pedestrian',
col == 803.3, 'Railway Explosion, Fire, or Burning - Pedal Cyclist',
col == 803.8, 'Railway Explosion, Fire, or Burning - Oth Person',
col == 803.9, 'Railway Explosion, Fire, or Burning - Unspec Person',
col == 804.0, 'Fall In, On, or From Railway Train - Railway Employee',
col == 804.1, 'Fall In, On, or From Railway Train - Railway Passenger',
col == 804.2, 'Fall In, On, or From Railway Train - Pedestrian',
col == 804.3, 'Fall In, On, or From Railway Train - Pedal Cyclist',
col == 804.8, 'Fall In, On, or From Railway Train - Oth Person',
col == 804.9, 'Fall In, On, or From Railway Train - Unspec Person',
col == 805.0, 'Railway, Hit by Rolling Stock - Railway Employee',
col == 805.1, 'Railway, Hit by Rolling Stock - Railway Passenger',
col == 805.2, 'Railway, Hit by Rolling Stock - Pedestrian',
col == 805.3, 'Railway, Hit by Rolling Stock - Pedal Cyclist',
col == 805.8, 'Railway, Hit by Rolling Stock - Oth Person',
col == 805.9, 'Railway, Hit by Rolling Stock - Unspec Person',
col == 806.0, 'Oth Spec Railway Accident - Railway Employee',
col == 806.1, 'Oth Spec Railway Accident - Railway Passenger',
col == 806.2, 'Oth Spec Railway Accident - Pedestrian',
col == 806.3, 'Oth Spec Railway Accident - Pedal Cyclist',
col == 806.8, 'Oth Spec Railway Accident - Oth Person',
col == 806.9, 'Oth Spec Railway Accident - Unspec Person',
col == 807.0, 'Railway, Unspec Nature - Railway Employee',
col == 807.1, 'Railway, Unspec Nature - Railway Passenger',
col == 807.2, 'Railway, Unspec Nature - Pedestrian',
col == 807.3, 'Railway, Unspec Nature - Pedal Cyclist',
col == 807.8, 'Railway, Unspec Nature - Oth Person',
col == 807.9, 'Railway, Unspec Nature - Unspec Person',
col == 810.0, 'MVA Traffic, Collision w/ Train - Driver of MV, Non MC',
col == 810.1, 'MVA Traffic, Collision w/ Train - Passenger in MV, Non MC',
col == 810.2, 'MVA Traffic, Collision w/ Train - Motorcyclist',
col == 810.3, 'MVA Traffic, Collision w/ Train - Passenger on Motorcycle',
col == 810.4, 'MVA Traffic, Collision w/ Train - Occupant of Streetcar',
col == 810.5, 'MVA Traffic, Collision w/ Train - Occupant of Animal Veh',
col == 810.6, 'MVA Traffic, Collision w/ Train - Pedal Cyclist',
col == 810.7, 'MVA Traffic, Collision w/ Train - Pedestrian',
col == 810.8, 'MVA Traffic, Collision w/ Train - Oth Person',
col == 810.9, 'MVA Traffic, Collision w/ Train - Unspec Person',
col == 811.0, 'MVA Traffic, Re-entr Collision w/ MV - Driver of MV, Non MC',
col == 811.1, 'MVA Traffic, Re-entr Collision w/ MV - Passenger in MV, Non MC',
col == 811.2, 'MVA Traffic, Re-entr Collision w/ MV - Motorcyclist',
col == 811.3, 'MVA Traffic, Re-entr Collision w/ MV - Passenger on Motorcycle',
col == 811.4, 'MVA Traffic, Re-entr Collision w/ MV - Occupant of Streetcar',
col == 811.5, 'MVA Traffic, Re-entr Collision w/ MV - Occupant of Animal Veh',
col == 811.6, 'MVA Traffic, Re-entr Collision w/ MV - Pedal Cyclist',
col == 811.7, 'MVA Traffic, Re-entr Collision w/ MV - Pedestrian',
col == 811.8, 'MVA Traffic, Re-entr Collision w/ MV - Oth Person',
col == 811.9, 'MVA Traffic, Re-entr Collision w/ MV - Unspec Person',
col == 812.0, 'Oth MVA Traffic, Collision w/ MV - Driver of MV, Non MC',
col == 812.1, 'Oth MVA Traffic, Collision w/ MV - Passenger in MV, Non MC',
col == 812.2, 'Oth MVA Traffic, Collision w/ MV - Motorcyclist',
col == 812.3, 'Oth MVA Traffic, Collision w/ MV - Passenger on Motorcycle',
col == 812.4, 'Oth MVA Traffic, Collision w/ MV - Occupant of Streetcar',
col == 812.5, 'Oth MVA Traffic, Collision w/ MV - Occupant of Animal Veh',
col == 812.6, 'Oth MVA Traffic, Collision w/ MV - Pedal Cyclist',
col == 812.7, 'Oth MVA Traffic, Collision w/ MV - Pedestrian',
col == 812.8, 'Oth MVA Traffic, Collision w/ MV - Oth Person',
col == 812.9, 'Oth MVA Traffic, Collision w/ MV - Unspec Person',
col == 813.0, 'MVA Traffic, Collision w/ Oth Veh - Driver of MV, Non MC',
col == 813.1, 'MVA Traffic, Collision w/ Oth Veh - Passenger in MV, Non MC',
col == 813.2, 'MVA Traffic, Collision w/ Oth Veh - Motorcyclist',
col == 813.3, 'MVA Traffic, Collision w/ Oth Veh - Passenger on Motorcycle',
col == 813.4, 'MVA Traffic, Collision w/ Oth Veh - Occupant of Streetcar',
col == 813.5, 'MVA Traffic, Collision w/ Oth Veh - Occupant of Animal Veh',
col == 813.6, 'MVA Traffic, Collision w/ Oth Veh - Pedal Cyclist',
col == 813.7, 'MVA Traffic, Collision w/ Oth Veh - Pedestrian',
col == 813.8, 'MVA Traffic, Collision w/ Oth Veh - Oth Person',
col == 813.9, 'MVA Traffic, Collision w/ Oth Veh - Unspec Person',
col == 814.0, 'MVA Traffic, Collision w/ Pedestrian - Driver of MV, Non MC',
col == 814.1, 'MVA Traffic, Collision w/ Pedestrian - Passenger in MV, Non MC',
col == 814.2, 'MVA Traffic, Collision w/ Pedestrian - Motorcyclist',
col == 814.3, 'MVA Traffic, Collision w/ Pedestrian - Passenger on Motorcycle',
col == 814.4, 'MVA Traffic, Collision w/ Pedestrian - Occupant of Streetcar',
col == 814.5, 'MVA Traffic, Collision w/ Pedestrian - Occupant of Animal Veh',
col == 814.6, 'MVA Traffic, Collision w/ Pedestrian - Pedal Cyclist',
col == 814.7, 'MVA Traffic, Collision w/ Pedestrian - Pedestrian',
col == 814.8, 'MVA Traffic, Collision w/ Pedestrian - Oth Person',
col == 814.9, 'MVA Traffic, Collision w/ Pedestrian - Unspec Person',
col == 815.0, 'Oth MVA Traffic, Highway Collision - Driver of MV, Non MC',
col == 815.1, 'Oth MVA Traffic, Highway Collision - Passenger in MV, Non MC',
col == 815.2, 'Oth MVA Traffic, Highway Collision - Motorcyclist',
col == 815.3, 'Oth MVA Traffic, Highway Collision - Passenger on Motorcycle',
col == 815.4, 'Oth MVA Traffic, Highway Collision - Occupant of Streetcar',
col == 815.5, 'Oth MVA Traffic, Highway Collision - Occupant of Animal Veh',
col == 815.6, 'Oth MVA Traffic, Highway Collision - Pedal Cyclist',
col == 815.7, 'Oth MVA Traffic, Highway Collision - Pedestrian',
col == 815.8, 'Oth MVA Traffic, Highway Collision - Oth Person',
col == 815.9, 'Oth MVA Traffic, Highway Collision - Unspec Person',
col == 816.0, 'MVA Traffic, Loss Control-No Collision - Driver of MV, Non MC',
col == 816.1, 'MVA Traffic, Loss Control-No Collision - Passenger in MV, Non MC',
col == 816.2, 'MVA Traffic, Loss Control-No Collision - Motorcyclist',
col == 816.3, 'MVA Traffic, Loss Control-No Collision - Passenger on Motorcycle',
col == 816.4, 'MVA Traffic, Loss Control-No Collision - Occupant of Streetcar',
col == 816.5, 'MVA Traffic, Loss Control-No Collision - Occupant of Animal Veh',
col == 816.6, 'MVA Traffic, Loss Control-No Collision - Pedal Cyclist',
col == 816.7, 'MVA Traffic, Loss Control-No Collision - Pedestrian',
col == 816.8, 'MVA Traffic, Loss Control-No Collision - Oth Person',
col == 816.9, 'MVA Traffic, Loss Control-No Collision - Unspec Person',
col == 817.0, 'Noncollision MVA Traffic, Board/Alight - Driver of MV, Non MC',
col == 817.1, 'Noncollision MVA Traffic, Board/Alight - Passenger in MV, Non MC',
col == 817.2, 'Noncollision MVA Traffic, Board/Alight - Motorcyclist',
col == 817.3, 'Noncollision MVA Traffic, Board/Alight - Passenger on Motorcycle',
col == 817.4, 'Noncollision MVA Traffic, Board/Alight - Occupant of Streetcar',
col == 817.5, 'Noncollision MVA Traffic, Board/Alight - Occupant of Animal Veh',
col == 817.6, 'Noncollision MVA Traffic, Board/Alight - Pedal Cyclist',
col == 817.7, 'Noncollision MVA Traffic, Board/Alight - Pedestrian',
col == 817.8, 'Noncollision MVA Traffic, Board/Alight - Oth Person',
col == 817.9, 'Noncollision MVA Traffic, Board/Alight - Unspec Person',
col == 818.0, 'Oth Noncollision MVA Traffic - Driver of MV, Non MC',
col == 818.1, 'Oth Noncollision MVA Traffic - Passenger in MV, Non MC',
col == 818.2, 'Oth Noncollision MVA Traffic - Motorcyclist',
col == 818.3, 'Oth Noncollision MVA Traffic - Passenger on Motorcycle',
col == 818.4, 'Oth Noncollision MVA Traffic - Occupant of Streetcar',
col == 818.5, 'Oth Noncollision MVA Traffic - Occupant of Animal Veh',
col == 818.6, 'Oth Noncollision MVA Traffic - Pedal Cyclist',
col == 818.7, 'Oth Noncollision MVA Traffic - Pedestrian',
col == 818.8, 'Oth Noncollision MVA Traffic - Oth Person',
col == 818.9, 'Oth Noncollision MVA Traffic - Unspec Person',
col == 819.0, 'MVA Traffic, Unspec Nature - Driver of MV, Non MC',
col == 819.1, 'MVA Traffic, Unspec Nature - Passenger in MV, Non MC',
col == 819.2, 'MVA Traffic, Unspec Nature - Motorcyclist',
col == 819.3, 'MVA Traffic, Unspec Nature - Passenger on Motorcycle',
col == 819.4, 'MVA Traffic, Unspec Nature - Occupant of Streetcar',
col == 819.5, 'MVA Traffic, Unspec Nature - Occupant of Animal Veh',
col == 819.6, 'MVA Traffic, Unspec Nature - Pedal Cyclist',
col == 819.7, 'MVA Traffic, Unspec Nature - Pedestrian',
col == 819.8, 'MVA Traffic, Unspec Nature - Oth Person',
col == 819.9, 'MVA Traffic, Unspec Nature - Unspec Person',
col == 820.0, 'N-traffic Accident, Snow MV - Driver of MV, Non MC',
col == 820.1, 'N-traffic Accident, Snow MV - Passenger in MV, Non MC',
col == 820.2, 'N-traffic Accident, Snow MV - Motorcyclist',
col == 820.3, 'N-traffic Accident, Snow MV - Passenger on Motorcycle',
col == 820.4, 'N-traffic Accident, Snow MV - Occupant of Streetcar',
col == 820.5, 'N-traffic Accident, Snow MV - Occupant of Animal Veh',
col == 820.6, 'N-traffic Accident, Snow MV - Pedal Cyclist',
col == 820.7, 'N-traffic Accident, Snow MV - Pedestrian',
col == 820.8, 'N-traffic Accident, Snow MV - Oth Person',
col == 820.9, 'N-traffic Accident, Snow MV - Unspec Person',
col == 821.0, 'N-traffic Accident, Oth Off-Road MV - Driver of MV, Non MC',
col == 821.1, 'N-traffic Accident, Oth Off-Road MV - Passenger in MV, Non MC',
col == 821.2, 'N-traffic Accident, Oth Off-Road MV - Motorcyclist',
col == 821.3, 'N-traffic Accident, Oth Off-Road MV - Passenger on Motorcycle',
col == 821.4, 'N-traffic Accident, Oth Off-Road MV - Occupant of Streetcar',
col == 821.5, 'N-traffic Accident, Oth Off-Road MV - Occupant of Animal Veh',
col == 821.6, 'N-traffic Accident, Oth Off-Road MV - Pedal Cyclist',
col == 821.7, 'N-traffic Accident, Oth Off-Road MV - Pedestrian',
col == 821.8, 'N-traffic Accident, Oth Off-Road MV - Oth Person',
col == 821.9, 'N-traffic Accident, Oth Off-Road MV - Unspec Person',
col == 822.0, 'Oth MVA N-traffic Collision,Move Object - Driver of MV, Non MC',
col == 822.1, 'Oth MVA N-traffic Collision,Move Object - Passenger in MV, Non MC',
col == 822.2, 'Oth MVA N-traffic Collision,Move Object - Motorcyclist',
col == 822.3, 'Oth MVA N-traffic Collision,Move Object - Passenger on Motorcycle',
col == 822.4, 'Oth MVA N-traffic Collision,Move Object - Occupant of Streetcar',
col == 822.5, 'Oth MVA N-traffic Collision,Move Object - Occupant of Animal Veh',
col == 822.6, 'Oth MVA N-traffic Collision,Move Object - Pedal Cyclist',
col == 822.7, 'Oth MVA N-traffic Collision,Move Object - Pedestrian',
col == 822.8, 'Oth MVA N-traffic Collision,Move Object - Oth Person',
col == 822.9, 'Oth MVA N-traffic Collision,Move Object - Unspec Person',
col == 823.0, 'Oth MVA N-Traffic Collision,Stat Object - Driver of MV, Non MC',
col == 823.1, 'Oth MVA N-Traffic Collision,Stat Object - Passenger in MV, Non MC',
col == 823.2, 'Oth MVA N-Traffic Collision,Stat Object - Motorcyclist',
col == 823.3, 'Oth MVA N-Traffic Collision,Stat Object - Passenger on Motorcycle',
col == 823.4, 'Oth MVA N-Traffic Collision,Stat Object - Occupant of Streetcar',
col == 823.5, 'Oth MVA N-Traffic Collision,Stat Object - Occupant of Animal Veh',
col == 823.6, 'Oth MVA N-Traffic Collision,Stat Object - Pedal Cyclist',
col == 823.7, 'Oth MVA N-Traffic Collision,Stat Object - Pedestrian',
col == 823.8, 'Oth MVA N-Traffic Collision,Stat Object - Oth Person',
col == 823.9, 'Oth MVA N-Traffic Collision,Stat Object - Unspec Person',
col == 824.0, 'Oth MVA N-Traffic, Board/Alight - Driver of MV, Non MC',
col == 824.1, 'Oth MVA N-Traffic, Board/Alight - Passenger in MV, Non MC',
col == 824.2, 'Oth MVA N-Traffic, Board/Alight - Motorcyclist',
col == 824.3, 'Oth MVA N-Traffic, Board/Alight - Passenger on Motorcycle',
col == 824.4, 'Oth MVA N-Traffic, Board/Alight - Occupant of Streetcar',
col == 824.5, 'Oth MVA N-Traffic, Board/Alight - Occupant of Animal Veh',
col == 824.6, 'Oth MVA N-Traffic, Board/Alight - Pedal Cyclist',
col == 824.7, 'Oth MVA N-Traffic, Board/Alight - Pedestrian',
col == 824.8, 'Oth MVA N-Traffic, Board/Alight - Oth Person',
col == 824.9, 'Oth MVA N-Traffic, Board/Alight - Unspec Person',
col == 825.0, 'Oth MVA N-Traffic, Oth & Unspec Nature - Driver of MV, Non MC',
col == 825.1, 'Oth MVA N-Traffic, Oth & Unspec Nature - Passenger in MV, Non MC',
col == 825.2, 'Oth MVA N-Traffic, Oth & Unspec Nature - Motorcyclist',
col == 825.3, 'Oth MVA N-Traffic, Oth & Unspec Nature - Passenger on Motorcycle',
col == 825.4, 'Oth MVA N-Traffic, Oth & Unspec Nature - Occupant of Streetcar',
col == 825.5, 'Oth MVA N-Traffic, Oth & Unspec Nature - Occupant of Animal Veh',
col == 825.6, 'Oth MVA N-Traffic, Oth & Unspec Nature - Pedal Cyclist',
col == 825.7, 'Oth MVA N-Traffic, Oth & Unspec Nature - Pedestrian',
col == 825.8, 'Oth MVA N-Traffic, Oth & Unspec Nature - Oth Person',
col == 825.9, 'Oth MVA N-Traffic, Oth & Unspec Nature - Unspec Person',
col == 826.0, 'Pedal Cycle Accident - Pedestrian',
col == 826.1, 'Pedal Cycle Accident - Pedal Cyclist',
col == 826.2, 'Pedal Cycle Accident - Rider of Animal',
col == 826.3, 'Pedal Cycle Accident - Occupant of Animal-Drawn Veh',
col == 826.4, 'Pedal Cycle Accident - Occupant of Streetcar',
col == 826.8, 'Pedal Cycle Accident - Oth Person',
col == 826.9, 'Pedal Cycle Accident - Unspec Person',
col == 827.0, 'Animal-Drawn Veh Accident - Pedestrian',
col == 827.2, 'Animal-Drawn Veh Accident - Rider of Animal',
col == 827.3, 'Animal-Drawn Veh Accident - Occupant of Animal-Drawn Veh',
col == 827.4, 'Animal-Drawn Veh Accident - Occupant of Streetcar',
col == 827.8, 'Animal-Drawn Veh Accident - Oth Person',
col == 827.9, 'Animal-Drawn Veh Accident - Unspec Person',
col == 828.0, 'Accident, Ridden Animal - Pedestrian',
col == 828.2, 'Accident, Ridden Animal - Rider of Animal',
col == 828.3, 'Accident, Ridden Animal - Occupant of Animal-Drawn Veh',
col == 828.4, 'Accident, Ridden Animal - Occupant of Streetcar',
col == 828.8, 'Accident, Ridden Animal - Oth Person',
col == 828.9, 'Accident, Ridden Animal - Unspec Person',
col == 829.0, 'Oth Road Veh Accidents - Pedestrian',
col == 829.4, 'Oth Road Veh Accidents - Occupant of Streetcar',
col == 829.8, 'Oth Road Veh Accidents - Oth Person',
col == 829.9, 'Oth Road Veh Accidents - Unspec Person',
col == 830.0, 'H2OCraft Accident, Submersion - Small Boater (Unpowered)',
col == 830.1, 'H2OCraft Accident, Submersion - Small Boater (Powered)',
col == 830.2, 'H2OCraft Accident, Submersion - Crew of Oth H2OCraft',
col == 830.3, 'H2OCraft Accident, Submersion - Pass of Oth H2OCraft',
col == 830.4, 'H2OCraft Accident, Submersion - H2O Skier',
col == 830.5, 'H2OCraft Accident, Submersion - Swimmer',
col == 830.6, 'H2OCraft Accident, Submersion - Dockers/Stevedores',
col == 830.7, 'H2OCraft Accident, Submersion - Military watercraft, any type',
col == 830.8, 'H2OCraft Accident, Submersion - Oth Person',
col == 830.9, 'H2OCraft Accident, Submersion - Unspec Person',
col == 831.0, 'H2OCraft Accident, Oth Injury - Small Boater (Unpowered)',
col == 831.1, 'H2OCraft Accident, Oth Injury - Small Boater (Powered)',
col == 831.2, 'H2OCraft Accident, Oth Injury - Crew of Oth H2OCraft',
col == 831.3, 'H2OCraft Accident, Oth Injury - Pass of Oth H2OCraft',
col == 831.4, 'H2OCraft Accident, Oth Injury - H2O Skier',
col == 831.5, 'H2OCraft Accident, Oth Injury - Swimmer',
col == 831.6, 'H2OCraft Accident, Oth Injury - Dockers/Stevedores',
col == 831.7, 'H2OCraft Accident, Oth Injury - Military watercraft, any type',
col == 831.8, 'H2OCraft Accident, Oth Injury - Oth Person',
col == 831.9, 'H2OCraft Accident, Oth Injury - Unspec Person',
col == 832.0, 'H2O Transport, Oth Submersion/Drown - Small Boater (Unpowered)',
col == 832.1, 'H2O Transport, Oth Submersion/Drown - Small Boater (Powered)',
col == 832.2, 'H2O Transport, Oth Submersion/Drown - Crew of Oth H2OCraft',
col == 832.3, 'H2O Transport, Oth Submersion/Drown - Pass of Oth H2OCraft',
col == 832.4, 'H2O Transport, Oth Submersion/Drown - H2O Skier',
col == 832.5, 'H2O Transport, Oth Submersion/Drown - Swimmer',
col == 832.6, 'H2O Transport, Oth Submersion/Drown - Dockers/Stevedores',
col == 832.7, 'H2O Transport, Oth Submersion/Drown - Military watercraft, any type',
col == 832.8, 'H2O Transport, Oth Submersion/Drown - Oth Person',
col == 832.9, 'H2O Transport, Oth Submersion/Drown - Unspec Person',
col == 833.0, 'H2O Transport, Stairs/Ladders Fall - Small Boater (Unpowered)',
col == 833.1, 'H2O Transport, Stairs/Ladders Fall - Small Boater (Powered)',
col == 833.2, 'H2O Transport, Stairs/Ladders Fall - Crew of Oth H2OCraft',
col == 833.3, 'H2O Transport, Stairs/Ladders Fall - Pass of Oth H2OCraft',
col == 833.4, 'H2O Transport, Stairs/Ladders Fall - H2O Skier',
col == 833.5, 'H2O Transport, Stairs/Ladders Fall - Swimmer',
col == 833.6, 'H2O Transport, Stairs/Ladders Fall - Dockers/Stevedores',
col == 833.7, 'H2O Transport, Stairs/Ladders Fall - Military watercraft, any type',
col == 833.8, 'H2O Transport, Stairs/Ladders Fall - Oth Person',
col == 833.9, 'H2O Transport, Stairs/Ladders Fall - Unspec Person',
col == 834.0, 'H2O Transport, Oth Multi-level Fall - Small Boater (Unpowered)',
col == 834.1, 'H2O Transport, Oth Multi-level Fall - Small Boater (Powered)',
col == 834.2, 'H2O Transport, Oth Multi-level Fall - Crew of Oth H2OCraft',
col == 834.3, 'H2O Transport, Oth Multi-level Fall - Pass of Oth H2OCraft',
col == 834.4, 'H2O Transport, Oth Multi-level Fall - H2O Skier',
col == 834.5, 'H2O Transport, Oth Multi-level Fall - Swimmer',
col == 834.6, 'H2O Transport, Oth Multi-level Fall - Dockers/Stevedores',
col == 834.7, 'H2O Transport, Oth Multi-level Fall - Military watercraft, any type',
col == 834.8, 'H2O Transport, Oth Multi-level Fall - Oth Person',
col == 834.9, 'H2O Transport, Oth Multi-level Fall - Unspec Person',
col == 835.0, 'H2O Transport, Oth & Unspec Fall - Small Boater (Unpowered)',
col == 835.1, 'H2O Transport, Oth & Unspec Fall - Small Boater (Powered)',
col == 835.2, 'H2O Transport, Oth & Unspec Fall - Crew of Oth H2OCraft',
col == 835.3, 'H2O Transport, Oth & Unspec Fall - Pass of Oth H2OCraft',
col == 835.4, 'H2O Transport, Oth & Unspec Fall - H2O Skier',
col == 835.5, 'H2O Transport, Oth & Unspec Fall - Swimmer',
col == 835.6, 'H2O Transport, Oth & Unspec Fall - Dockers/Stevedores',
col == 835.7, 'H2O Transport, Oth & Unspec Fall - Military watercraft, any type',
col == 835.8, 'H2O Transport, Oth & Unspec Fall - Oth Person',
col == 835.9, 'H2O Transport, Oth & Unspec Fall - Unspec Person',
col == 836.0, 'H2O Transport, Machinery Accident - Small Boater (Unpowered)',
col == 836.1, 'H2O Transport, Machinery Accident - Small Boater (Powered)',
col == 836.2, 'H2O Transport, Machinery Accident - Crew of Oth H2OCraft',
col == 836.3, 'H2O Transport, Machinery Accident - Pass of Oth H2OCraft',
col == 836.4, 'H2O Transport, Machinery Accident - H2O Skier',
col == 836.5, 'H2O Transport, Machinery Accident - Swimmer',
col == 836.6, 'H2O Transport, Machinery Accident - Dockers/Stevedores',
col == 836.7, 'H2O Transport, Machinery Accident - Military watercraft, any type',
col == 836.8, 'H2O Transport, Machinery Accident - Oth Person',
col == 836.9, 'H2O Transport, Machinery Accident - Unspec Person',
col == 837.0, 'H2OCraft Explosion, Fire, or Burning - Small Boater (Unpowered)',
col == 837.1, 'H2OCraft Explosion, Fire, or Burning - Small Boater (Powered)',
col == 837.2, 'H2OCraft Explosion, Fire, or Burning - Crew of Oth H2OCraft',
col == 837.3, 'H2OCraft Explosion, Fire, or Burning - Pass of Oth H2OCraft',
col == 837.4, 'H2OCraft Explosion, Fire, or Burning - H2O Skier',
col == 837.5, 'H2OCraft Explosion, Fire, or Burning - Swimmer',
col == 837.6, 'H2OCraft Explosion, Fire, or Burning - Dockers/Stevedores',
col == 837.7, 'H2OCraft Explosion, Fire, or Burning - Military watercraft, any type',
col == 837.8, 'H2OCraft Explosion, Fire, or Burning - Oth Person',
col == 837.9, 'H2OCraft Explosion, Fire, or Burning - Unspec Person',
col == 838.0, 'Oth & Unspec H2O Transport Accident - Small Boater (Unpowered)',
col == 838.1, 'Oth & Unspec H2O Transport Accident - Small Boater (Powered)',
col == 838.2, 'Oth & Unspec H2O Transport Accident - Crew of Oth H2OCraft',
col == 838.3, 'Oth & Unspec H2O Transport Accident - Pass of Oth H2OCraft',
col == 838.4, 'Oth & Unspec H2O Transport Accident - H2O Skier',
col == 838.5, 'Oth & Unspec H2O Transport Accident - Swimmer',
col == 838.6, 'Oth & Unspec H2O Transport Accident - Dockers/Stevedores',
col == 838.7, 'Oth & Unspec H2O Transport Accident - Military watercraft, any type',
col == 838.8, 'Oth & Unspec H2O Transport Accident - Oth Person',
col == 838.9, 'Oth & Unspec H2O Transport Accident - Unspec Person',
col == 840.0, 'Powered Aircraft, Tkoff/Land - Spacecraft Occupant',
col == 840.1, 'Powered Aircraft, Tkoff/Land - Military Aircraft Occupant',
col == 840.2, 'Powered Aircraft, Tkoff/Land - Ground-Ground Commercial Crew',
col == 840.3, 'Powered Aircraft, Tkoff/Land - Ground-Ground Commercial Occupant',
col == 840.4, 'Powered Aircraft, Tkoff/Land - Ground-Air Commercial Occupant',
col == 840.5, 'Powered Aircraft, Tkoff/Land - Oth Powered Aircraft Occupant',
col == 840.6, 'Powered Aircraft, Tkoff/Land - Unpowered Aircraft Occupant',
col == 840.7, 'Powered Aircraft, Tkoff/Land - Parachutist',
col == 840.8, 'Powered Aircraft, Tkoff/Land - Ground Crew/Airline Employee',
col == 840.9, 'Powered Aircraft, Tkoff/Land - Oth Person',
col == 841.0, 'Oth & Unspec Powered Aircraft - Spacecraft Occupant',
col == 841.1, 'Oth & Unspec Powered Aircraft - Military Aircraft Occupant',
col == 841.2, 'Oth & Unspec Powered Aircraft - Ground-Ground Commercial Crew',
col == 841.3, 'Oth & Unspec Powered Aircraft - Ground-Ground Commercial Occupant',
col == 841.4, 'Oth & Unspec Powered Aircraft - Ground-Air Commercial Occupant',
col == 841.5, 'Oth & Unspec Powered Aircraft - Oth Powered Aircraft Occupant',
col == 841.6, 'Oth & Unspec Powered Aircraft - Unpowered Aircraft Occupant',
col == 841.7, 'Oth & Unspec Powered Aircraft - Parachutist',
col == 841.8, 'Oth & Unspec Powered Aircraft - Ground Crew/Airline Employee',
col == 841.9, 'Oth & Unspec Powered Aircraft - Oth Person',
col == 842.6, 'Unpowered Aircraft - Unpowered Aircraft Occupant',
col == 842.7, 'Unpowered Aircraft - Parachutist',
col == 842.8, 'Unpowered Aircraft - Ground Crew/Airline Employee',
col == 842.9, 'Unpowered Aircraft - Oth Person',
col == 843.0, 'Fall In/ On/ From Aircraft - Spacecraft Occupant',
col == 843.1, 'Fall In/ On/ From Aircraft - Military Aircraft Occupant',
col == 843.2, 'Fall In/ On/ From Aircraft - Ground-Ground Commercial Crew',
col == 843.3, 'Fall In/ On/ From Aircraft - Ground-Ground Commercial Occupant',
col == 843.4, 'Fall In/ On/ From Aircraft - Ground-Air Commercial Occupant',
col == 843.5, 'Fall In/ On/ From Aircraft - Oth Powered Aircraft Occupant',
col == 843.6, 'Fall In/ On/ From Aircraft - Unpowered Aircraft Occupant',
col == 843.7, 'Fall In/ On/ From Aircraft - Parachutist',
col == 843.8, 'Fall In/ On/ From Aircraft - Ground Crew/Airline Employee',
col == 843.9, 'Fall In/ On/ From Aircraft - Oth Person',
col == 844.0, 'Oth Spec Air Transport - Spacecraft Occupant',
col == 844.1, 'Oth Spec Air Transport - Military Aircraft Occupant',
col == 844.2, 'Oth Spec Air Transport - Ground-Ground Commercial Crew',
col == 844.3, 'Oth Spec Air Transport - Ground-Ground Commercial Occupant',
col == 844.4, 'Oth Spec Air Transport - Ground-Air Commercial Occupant',
col == 844.5, 'Oth Spec Air Transport - Oth Powered Aircraft Occupant',
col == 844.6, 'Oth Spec Air Transport - Unpowered Aircraft Occupant',
col == 844.7, 'Oth Spec Air Transport - Parachutist',
col == 844.8, 'Oth Spec Air Transport - Ground Crew/Airline Employee',
col == 844.9, 'Oth Spec Air Transport - Oth Person',
col == 845.0, 'Spacecraft Accident - Spacecraft Occupant',
col == 845.8, 'Spacecraft Accident - Ground Crew/Airline Employee',
col == 845.9, 'Spacecraft Accident - Oth Person',
col == 846.0, 'Powered Veh w/in Premises of Industrial/Commercial Establishment',
col == 847.0, 'Accidents Involving Cable Cars Not Running on Rails',
col == 848.0, 'Accidents Involving Oth Veh, NEC',
col == 850.0, 'Acc Poison - Heroin',
col == 850.1, 'Acc Poison - Methadone',
col == 850.2, 'Acc Poison - Oth Opiates and Related Narcotics',
col == 850.3, 'Acc Poison - Salicylates',
col == 850.4, 'Acc Poison - Aromatic Analgesics, NEC',
col == 850.5, 'Acc Poison - Pyrazole Derivatives',
col == 850.6, 'Acc Poison - Antirheumatics [antiphlogistics]',
col == 850.7, 'Acc Poison - Oth Non-Narcotic Analgesics',
col == 850.8, 'Acc Poison - Oth Spec Analgesics and Antipyretics',
col == 850.9, 'Acc Poison - Unspec Analgesic or Antipyretic',
col == 851.0, 'Acc Poison - Barbiturates',
col == 852.0, 'Acc Poison - Chloral Hydrate Group',
col == 852.1, 'Acc Poison - Paraldehyde',
col == 852.2, 'Acc Poison - Bromine Compounds',
col == 852.3, 'Acc Poison - Methaqualone Compounds',
col == 852.4, 'Acc Poison - Glutethimide Group',
col == 852.5, 'Acc Poison - Mixed Sedatives, NEC',
col == 852.8, 'Acc Poison - Oth Spec Sedatives and Hypnotics',
col == 852.9, 'Acc Poison - Unspec Sedative or Hypnotic',
col == 853.0, 'Acc Poison - Phenothiazine-based Tranquilizers',
col == 853.1, 'Acc Poison - Butyrophenone-based Tranquilizers',
col == 853.2, 'Acc Poison - Benzodiazepine-based Tranquilizers',
col == 853.8, 'Acc Poison - Oth Spec Tranquilizers',
col == 853.9, 'Acc Poison - Unspec Tranquilizer',
col == 854.0, 'Acc Poison - Antidepressants',
col == 854.1, 'Acc Poison - Psychodysleptics [hallucinogens]',
col == 854.2, 'Acc Poison - Psychostimulants',
col == 854.3, 'Acc Poison - Central Nervous System Stimulants',
col == 854.8, 'Acc Poison - Oth Psychotropic Agents',
col == 855.0, 'Acc Poison - Anticonvulsant & Anti-Parkinsonism Drugs',
col == 855.1, 'Acc Poison - Oth Central Nervous System Depressants',
col == 855.2, 'Acc Poison - Local Anesthetics',
col == 855.3, 'Acc Poison - Parasympathomimetics [cholinergics]',
col == 855.4, 'Acc Poison - Parasympatholytics/Spasmolytics',
col == 855.5, 'Acc Poison - Sympathomimetics [adrenergics]',
col == 855.6, 'Acc Poison - Sympatholytics [antiadrenergics]',
col == 855.8, 'Acc Poison - Oth Spec Drugs on Central/Autonomic Nervous System',
col == 855.9, 'Acc Poison - Unspec Drugs on Central/Autonomic Nervous System',
col == 856.0, 'Acc Poison - Antibiotics',
col == 857.0, 'Acc Poison - Oth Anti-Infectives',
col == 858.0, 'Acc Poison - Hormones and Synthetic Substitutes',
col == 858.1, 'Acc Poison - Primarily Systemic Agents',
col == 858.2, 'Acc Poison - Agents Mainly Affecting Blood Constituents',
col == 858.3, 'Acc Poison - Agents Mainly Affecting Cardiovascular System',
col == 858.4, 'Acc Poison - Agents Mainly Affecting Gastrointestinal System',
col == 858.5, 'Acc Poison - H2O/Mineral/Uric Acid Metabolism Drugs',
col == 858.6, 'Acc Poison - Agents act on Smooth,Skeletal Muscles & Respiratory',
col == 858.7, 'Acc Poison - Skin/Ophthalmological/Otorhinolaryngological/Dental',
col == 858.8, 'Acc Poison - Oth Spec Drugs',
col == 858.9, 'Acc Poison - Unspec Drug',
col == 860.0, 'Acc Poison - Alcoholic Beverages',
col == 860.1, 'Acc Poison - Oth/Unspec Ethyl Alcohol and Its Products',
col == 860.2, 'Acc Poison - Methyl Alcohol',
col == 860.3, 'Acc Poison - Isopropyl Alcohol',
col == 860.4, 'Acc Poison - Fusel Oil',
col == 860.8, 'Acc Poison - Oth Spec Alcohols',
col == 860.9, 'Acc Poison - Unspec Alcohol',
col == 861.0, 'Acc Poison - Synthetic Detergents and Shampoos',
col == 861.1, 'Acc Poison - Soap Products',
col == 861.2, 'Acc Poison - Polishes',
col == 861.3, 'Acc Poison - Oth Cleansing and Polishing Agents',
col == 861.4, 'Acc Poison - Disinfectants',
col == 861.5, 'Acc Poison - Lead Paints',
col == 861.6, 'Acc Poison - Oth Paints and Varnishes',
col == 861.9, 'Acc Poison - Unspec',
col == 862.0, 'Acc Poison - Petroleum Solvents',
col == 862.1, 'Acc Poison - Petroleum Fuels and Cleaners',
col == 862.2, 'Acc Poison - Lubricating Oils',
col == 862.3, 'Acc Poison - Petroleum Solids',
col == 862.4, 'Acc Poison - Oth Spec Solvents',
col == 862.9, 'Acc Poison - Unspec Solvent',
col == 863.0, 'Acc Poison - Insecticides of Organochlorine Compounds',
col == 863.1, 'Acc Poison - Insecticides of Organophosphorus Compounds',
col == 863.2, 'Acc Poison - Carbamates',
col == 863.3, 'Acc Poison - Mixtures of Insecticides',
col == 863.4, 'Acc Poison - Oth and Unspec Insecticides',
col == 863.5, 'Acc Poison - Herbicides',
col == 863.6, 'Acc Poison - Fungicides',
col == 863.7, 'Acc Poison - Rodenticides',
col == 863.8, 'Acc Poison - Fumigants',
col == 863.9, 'Acc Poison - Oth and Unspec',
col == 864.0, 'Acc Poison - Corrosive Aromatics',
col == 864.1, 'Acc Poison - Acids',
col == 864.2, 'Acc Poison - Caustic Alkalis',
col == 864.3, 'Acc Poison - Oth Spec Corrosives and Caustics',
col == 864.4, 'Acc Poison - Unspec Corrosives and Caustics',
col == 865.0, 'Acc Poison - Meat',
col == 865.1, 'Acc Poison - Shellfish',
col == 865.2, 'Acc Poison - Oth Fish',
col == 865.3, 'Acc Poison - Berries and Seeds',
col == 865.4, 'Acc Poison - Oth Spec Plants',
col == 865.5, 'Acc Poison - Mushrooms and Oth Fungi',
col == 865.8, 'Acc Poison - Oth Spec Foods',
col == 865.9, 'Acc Poison - Unspec Foodstuff or Poisonous Plant',
col == 866.0, 'Acc Poison - Lead and Its Compounds and Fumes',
col == 866.1, 'Acc Poison - Mercury and Its Compounds and Fumes',
col == 866.2, 'Acc Poison - Antimony and Its Compounds and Fumes',
col == 866.3, 'Acc Poison - Arsenic and Its Compounds and Fumes',
col == 866.4, 'Acc Poison - Oth Metals and Their Compounds and Fumes',
col == 866.5, 'Acc Poison - Plant Foods and Fertilizers',
col == 866.6, 'Acc Poison - Glues and Adhesives',
col == 866.7, 'Acc Poison - Cosmetics',
col == 866.8, 'Acc Poison - Oth Spec Solid or Liquid Substances',
col == 866.9, 'Acc Poison - Unspec Solid or Liquid Substance',
col == 867.0, 'Acc Poison by Gas Distributed by Pipeline',
col == 868.0, 'Acc Poison - Liquid Petroleum Gas in Mobile Containers',
col == 868.1, 'Acc Poison - Oth and Unspec Utility Gas',
col == 868.2, 'Acc Poison - Motor Veh Exhaust Gas',
col == 868.3, 'Acc Poison - Carbon Monoxide-Incomplete Combustion Domestic Fuel',
col == 868.8, 'Acc Poison - Carbon Monoxide From Oth Sources',
col == 868.9, 'Acc Poison - Unspec Carbon Monoxide',
col == 869.0, 'Acc Poison - Nitrogen Oxides',
col == 869.1, 'Acc Poison - Sulfur Dioxide',
col == 869.2, 'Acc Poison - Freon',
col == 869.3, 'Acc Poison - Lacrimogenic Gas [tear gas]',
col == 869.4, 'Acc Poison - Second Hand Tobacco Smoke',
col == 869.8, 'Acc Poison - Oth Spec Gases and Vapors',
col == 869.9, 'Acc Poison - Unspec Gases and Vapors',
col == 870.0, 'Cut/Hemorrhage During - Surgical Operation',
col == 870.1, 'Cut/Hemorrhage During - Infusion/Transfusion',
col == 870.2, 'Cut/Hemorrhage During - Kidney Dialysis/Oth Perfusion',
col == 870.3, 'Cut/Hemorrhage During - Injection/Vaccination',
col == 870.4, 'Cut/Hemorrhage During - Endoscopic Examination',
col == 870.5, 'Cut/Hemorrhage During - Aspiration/Puncture/Catheterization',
col == 870.6, 'Cut/Hemorrhage During - Heart Catheterization',
col == 870.7, 'Cut/Hemorrhage During - Administration of Enema',
col == 870.8, 'Cut/Hemorrhage During - Oth Spec Medical Care',
col == 870.9, 'Cut/Hemorrhage During - Unspec Medical Care',
col == 871.0, 'Foreign Object Left In Body- Surgical Operation',
col == 871.1, 'Foreign Object Left In Body- Infusion/Transfusion',
col == 871.2, 'Foreign Object Left In Body- Kidney Dialysis/Oth Perfusion',
col == 871.3, 'Foreign Object Left In Body- Injection/Vaccination',
col == 871.4, 'Foreign Object Left In Body- Endoscopic Examination',
col == 871.5, 'Foreign Object Left In Body- Aspiration/Puncture/Catheterization',
col == 871.6, 'Foreign Object Left In Body- Heart Catheterization',
col == 871.7, 'Foreign Object Left In Body- Removal of Catheter or Packing',
col == 871.8, 'Foreign Object Left In Body- Oth Spec Procedures',
col == 871.9, 'Foreign Object Left In Body- Unspec Procedure',
col == 872.0, 'Sterile Precautions Fail - Surgical Operation',
col == 872.1, 'Sterile Precautions Fail - Infusion/Transfusion',
col == 872.2, 'Sterile Precautions Fail - Kidney Dialysis/Oth Perfusion',
col == 872.3, 'Sterile Precautions Fail - Injection/Vaccination',
col == 872.4, 'Sterile Precautions Fail - Endoscopic Examination',
col == 872.5, 'Sterile Precautions Fail - Aspiration/Puncture/Catheterization',
col == 872.6, 'Sterile Precautions Fail - Heart Catheterization',
col == 872.8, 'Sterile Precautions Fail - Oth Spec Procedures',
col == 872.9, 'Sterile Precautions Fail - Unspec Procedure',
col == 873.0, 'Dosage Fail - Excessive Blood/Fluid During (Trans/In)Fusion',
col == 873.1, 'Dosage Fail - Incorrect Dilution of Fluid During Infusion',
col == 873.2, 'Dosage Fail - Overdose of Radiation in Therapy',
col == 873.3, 'Dosage Fail - Accidental Radiation Exposure During Care',
col == 873.4, 'Dosage Fail - Dosage Fail in Electroshock/Insulin-Shock Therapy',
col == 873.5, 'Dosage Fail - Inappropriate Temperature in Application/Packing',
col == 873.6, 'Dosage Fail - Nonadministration of Necessary Drug/Medicine',
col == 873.8, 'Dosage Fail - Oth Spec Dosage Fail',
col == 873.9, 'Dosage Fail - Unspec Dosage Fail',
col == 874.0, 'Instrument Mechanical Fail - Surgical Operation',
col == 874.1, 'Instrument Mechanical Fail - Infusion/Transfusion',
col == 874.2, 'Instrument Mechanical Fail - Kidney Dialysis/Oth Perfusion',
col == 874.3, 'Instrument Mechanical Fail - Endoscopic Examination',
col == 874.4, 'Instrument Mechanical Fail - Aspiration/Puncture/Catheterization',
col == 874.5, 'Instrument Mechanical Fail - Heart Catheterization',
col == 874.8, 'Instrument Mechanical Fail - Oth Spec Procedures',
col == 874.9, 'Instrument Mechanical Fail - Unspec Procedure',
col == 875.0, 'Contaminated Blood/Fluid/Drug/Bio Matter- Transfused/Infused',
col == 875.1, 'Contaminated Blood/Fluid/Drug/Bio Matter- Injected/Vaccination',
col == 875.2, 'Contaminated Blood/Fluid/Drug/Bio Matter- Administered,Oth Means',
col == 875.8, 'Contaminated Blood/Fluid/Drug/Bio Matter- Oth',
col == 875.9, 'Contaminated Blood/Fluid/Drug/Bio Matter- Unspec',
col == 876.0, 'Oth Misadventures During - Mismatched Blood in Transfusion',
col == 876.1, 'Oth Misadventures During - Wrong Fluid in Infusion',
col == 876.2, 'Oth Misadventures During - Surgery Suture/Ligature Failure',
col == 876.3, 'Oth Misadventures During - Endotracheal Tube Wrongly Placed',
col == 876.4, 'Oth Misadventures During - Failure, Intro/Remove Oth Instrument',
col == 876.5, 'Oth Misadventures During - Inappropriate Operation Performance',
col == 876.6, 'Oth Misadventures During - Patient not scheduled for surgery',
col == 876.7, 'Oth Misadventures During - Correct Procedure on Wrong Side',
col == 876.8, 'Oth Misadventures - Oth Spec Misadventures During Care',
col == 876.9, 'Oth Misadventures - Unspec Misadventures During Care',
col == 878.0, 'Surgery w/o Mention of Mishap - Transplant of Whole Organ',
col == 878.1, 'Surgery w/o Mention of Mishap - Implant of Artificial Device',
col == 878.2, 'Surgery w/o Mention of Mishap - Anastomosis/Bypass/Graft-Tissue',
col == 878.3, 'Surgery w/o Mention of Mishap - Formation of External Stoma',
col == 878.4, 'Surgery w/o Mention of Mishap - Oth Restorative Surgery',
col == 878.5, 'Surgery w/o Mention of Mishap - Amputation of Limb(s)',
col == 878.6, 'Surgery w/o Mention of Mishap - Removal of Oth Organ, Part/Total',
col == 878.8, 'Surgery w/o Mention of Mishap - Oth Spec Surgery & Procedures',
col == 878.9, 'Surgery w/o Mention of Mishap - Unspec Surgery & Procedures',
col == 879.0, 'Oth Proc w/o Mention of Mishap - Cardiac Catheterization',
col == 879.1, 'Oth Proc w/o Mention of Mishap - Kidney Dialysis',
col == 879.2, 'Oth Proc w/o Mention of Mishap - Radiology/Radiotherapy',
col == 879.3, 'Oth Proc w/o Mention of Mishap - Shock Therapy',
col == 879.4, 'Oth Proc w/o Mention of Mishap - Aspiration of Fluid',
col == 879.5, 'Oth Proc w/o Mention of Mishap - Insert Gastric/Duodenal Sound',
col == 879.6, 'Oth Proc w/o Mention of Mishap - Urinary Catheterization',
col == 879.7, 'Oth Proc w/o Mention of Mishap - Blood Sampling',
col == 879.8, 'Oth Proc w/o Mention of Mishap - Oth Spec Procedures',
col == 879.9, 'Oth Proc w/o Mention of Mishap - Unspec Procedure',
col == 880.0, 'Fall On or From Stairs/Steps - Escalator',
col == 880.1, 'Fall On or From Stairs/Steps - Sidewalk Curb',
col == 880.9, 'Fall On or From Stairs/Steps - Oth Stairs or Steps',
col == 881.0, 'Fall On or From Ladders/Scaffolding - Ladder',
col == 881.1, 'Fall On or From Ladders/Scaffolding - Scaffolding',
col == 882.0, 'Fall From or Out of Building/Other Structure',
col == 883.0, 'Fall into Hole/Oth Surface Opening - Jump/Dive into H2O [pool]',
col == 883.1, 'Fall into Hole/Oth Surface Opening - Well',
col == 883.2, 'Fall into Hole/Oth Surface Opening - Storm Drain/Manhole',
col == 883.9, 'Fall into Hole/Oth Surface Opening - Oth Hole/Surface Opening',
col == 884.0, 'Oth Multi-level Fall - Playground Equipment',
col == 884.1, 'Oth Multi-level Fall - Cliff',
col == 884.2, 'Oth Multi-level Fall - Chair',
col == 884.3, 'Oth Multi-level Fall - Wheelchair',
col == 884.4, 'Oth Multi-level Fall - Bed',
col == 884.5, 'Oth Multi-level Fall - Other Furniture',
col == 884.6, 'Oth Multi-level Fall - Commode Toilet',
col == 884.9, 'Oth Multi-level Fall - Oth Multi-Level Fall',
col == 885.0, 'Fall on Same Level - Nonmotorized Scooter (10/2002)',
col == 885.1, 'Fall on Same Level - Roller/In-Line Skates',
col == 885.2, 'Fall on Same Level - Skateboard',
col == 885.3, 'Fall on Same Level - Skis',
col == 885.4, 'Fall on Same Level - Snowboard',
col == 885.9, 'Fall on Same Level - Other',
col == 886.0, 'Fall From Collision/Push/Shoving By, W/ Oth Person - In Sports',
col == 886.9, 'Fall From Collision/Push/Shoving By, W/ Oth Person - Oth/Unspec',
col == 887.0, 'Fracture, Cause Unspec',
col == 888.0, 'Oth and Unspec Fall - Resulting in Striking Sharp Object',
col == 888.1, 'Oth and Unspec Fall - Resulting in Striking Other Object',
col == 888.8, 'Oth and Unspec Fall - Oth',
col == 888.9, 'Oth and Unspec Fall - Unspec',
col == 890.0, 'Private Dwelling Conflagration - Conflagration Explosion',
col == 890.1, 'Private Dwelling Conflagration - Fumes from PVC Combustion',
col == 890.2, 'Private Dwelling Conflagration - Oth Smoke and Fumes',
col == 890.3, 'Private Dwelling Conflagration - Conflagration Burning',
col == 890.8, 'Private Dwelling Conflagration - Oth Conflagration Accident',
col == 890.9, 'Private Dwelling Conflagration - Unspec Conflagration Accident',
col == 891.0, 'Oth/Unspec Building Conflagration- Conflagration Explosion',
col == 891.1, 'Oth/Unspec Building Conflagration- Fumes from PVC Combustion',
col == 891.2, 'Oth/Unspec Building Conflagration- Oth Smoke and Fumes',
col == 891.3, 'Oth/Unspec Building Conflagration- Conflagration Burning',
col == 891.8, 'Oth/Unspec Building Conflagration- Oth Conflagration Accident',
col == 891.9, 'Oth/Unspec Building Conflagration- Unspec Conflagration Accident',
col == 892.0, 'Conflagration Not in Building or Structure',
col == 893.0, 'Clothing Ignition - Controlled Fire in Private Dwelling',
col == 893.1, 'Clothing Ignition - Controlled Fire in Oth Building/Structure',
col == 893.2, 'Clothing Ignition - Controlled Fire Not in Building/Structure',
col == 893.8, 'Clothing Ignition - Oth Spec Sources',
col == 893.9, 'Clothing Ignition - Unspec Source',
col == 894.0, 'Ignition of Highly Inflammable Material',
col == 895.0, 'Accident by Controlled Fire in Private Dwelling',
col == 896.0, 'Accident by Controlled Fire in Oth/Unspec Building/Structure',
col == 897.0, 'Accident by Controlled Fire Not in Building/Structure',
col == 898.0, 'Accident by Oth Spec Fire and Flames - Burning Bedclothes',
col == 898.1, 'Accident by Oth Spec Fire and Flames - Oth',
col == 899.0, 'Accident by Unspec Fire',
col == 900.0, 'Excessive Heat - Due to Weather Conditions',
col == 900.1, 'Excessive Heat - Of Man-Made Origin',
col == 900.9, 'Excessive Heat - Of Unspec Origin',
col == 901.0, 'Excessive Cold - Due to Weather Conditions',
col == 901.1, 'Excessive Cold - Of Man-Made Origin',
col == 901.8, 'Excessive Cold - Oth Spec Origin',
col == 901.9, 'Excessive Cold - Of Unspec Origin',
col == 902.0, 'High/Low/Changing Air Pressure - High Altitude Residence/Visit',
col == 902.1, 'High/Low/Changing Air Pressure - In Aircraft',
col == 902.2, 'High/Low/Changing Air Pressure - Due to Diving',
col == 902.8, 'High/Low/Changing Air Pressure - Due to Oth Spec Causes',
col == 902.9, 'High/Low/Changing Air Pressure - Unspec Cause',
col == 903.0, 'Travel and Motion',
col == 904.0, 'Hunger/Thirst/Exposure/Neglect - Infant/Helpless Persons',
col == 904.1, 'Hunger/Thirst/Exposure/Neglect - Lack of Food',
col == 904.2, 'Hunger/Thirst/Exposure/Neglect - Lack of H2O',
col == 904.3, 'Hunger/Thirst/Exposure/Neglect - Exposure(to Weather), NEC',
col == 904.9, 'Hunger/Thirst/Exposure/Neglect - Privation, Unqualified',
col == 905.0, 'Poison/Toxic Reactions - Venomous Snakes/Lizards',
col == 905.1, 'Poison/Toxic Reactions - Venomous Spiders',
col == 905.2, 'Poison/Toxic Reactions - Scorpion',
col == 905.3, 'Poison/Toxic Reactions - Hornets, Wasps, Bees',
col == 905.4, 'Poison/Toxic Reactions - Centipede/Venomous Millipede (tropical)',
col == 905.5, 'Poison/Toxic Reactions - Oth Venomous Arthropods',
col == 905.6, 'Poison/Toxic Reactions - Venomous H2O Animals/Plants',
col == 905.7, 'Poison/Toxic Reactions - Oth Plants',
col == 905.8, 'Poison/Toxic Reactions - Oth Spec',
col == 905.9, 'Poison/Toxic Reactions - Unspec',
col == 906.0, 'Oth Injury by Animal - Dog Bite',
col == 906.1, 'Oth Injury by Animal - Rat Bite',
col == 906.2, 'Oth Injury by Animal - Bite of Nonvenomous Snakes/Lizards',
col == 906.3, 'Oth Injury by Animal - Oth Animal Bite (Except Arthropod)',
col == 906.4, 'Oth Injury by Animal - Bite of Nonvenomous Arthropod',
col == 906.5, 'Oth Injury by Animal - Bite of Unspec Animal/Animal Bite NOS',
col == 906.8, 'Oth Injury by Animal - Oth Spec Injury Caused by Animal',
col == 906.9, 'Oth Injury by Animal - Unspec Injury Caused by Animal',
col == 907.0, 'Lightning',
col == 908.0, 'Cataclysmic Storms - Hurricane, Storm Surge, Tidal Wave, Typhoon',
col == 908.1, 'Cataclysmic Storms - Tornado, Cyclone, Twisters',
col == 908.2, 'Cataclysmic Storms - Floods, Torrential Rainfall, Flash Flood',
col == 908.3, 'Cataclysmic Storms - Blizzard (snow/ice)',
col == 908.4, 'Cataclysmic Storms - Dust Storm',
col == 908.8, 'Cataclysmic Storms - Oth Cataclysmic Storms',
col == 908.9, 'Cataclysmic Storms - Unspec Cataclysmic Storms/Storm NOS',
col == 909.0, 'Cataclysmic Earth - Earthquakes',
col == 909.1, 'Cataclysmic Earth - Volcanic Eruption, Burns from Lava/Ash Inhale',
col == 909.2, 'Cataclysmic Earth - Avalanche, Landslide, Mudslide',
col == 909.3, 'Cataclysmic Earth - Collapse of Dam or Man-made Structure',
col == 909.4, 'Cataclysmic Earth - Tidal Wave, Tidal Wave NOS, Tsunami',
col == 909.8, 'Cataclysmic Earth - Oth Cataclysmic Earth Movements/Eruptions',
col == 909.9, 'Cataclysmic Earth - Unspec Cataclysmic Earth Movements/Eruptions',
col == 910.0, 'Accidental Drown/Submersion - While H2O-Skiing',
col == 910.1, 'Accidental Drown/Submersion - Oth Sport w/ Diving Equipment',
col == 910.2, 'Accidental Drown/Submersion - Oth Sport w/out Diving Equipment',
col == 910.3, 'Accidental Drown/Submersion - Swim/Diving for Non-Sport Purposes',
col == 910.4, 'Accidental Drown/Submersion - In Bathtub',
col == 910.8, 'Accidental Drown/Submersion - Oth Accidental Drown/Submersion',
col == 910.9, 'Accidental Drown/Submersion - Unspec Accidental Drown/Submersion',
col == 911.0, 'Inhalation & Ingestion of Food Causing Choking/Suffocation',
col == 912.0, 'Inhalation & Ingestion of Oth Object Causing Choking/Suffocation',
col == 913.0, 'Accidental Mechanical Suffocate- In Bed or Cradle',
col == 913.1, 'Accidental Mechanical Suffocate- By Plastic Bag',
col == 913.2, 'Accidental Mechanical Suffocate- Lack of Air (In Closed Place)',
col == 913.3, 'Accidental Mechanical Suffocate- By Falling Earth/Oth Substance',
col == 913.8, 'Accidental Mechanical Suffocate- Oth Spec Means',
col == 913.9, 'Accidental Mechanical Suffocate- Unspec Means',
col == 914.0, 'Foreign Body Accidentally Entering Eye and Adnexa',
col == 915.0, 'Foreign Body Accidentally Entering Oth Orifice',
col == 916.0, 'Struck Accidentally by Falling Object',
col == 917.0, 'Striking Against/Struck Accidentally - In Sports w/o Subseq Fall',
col == 917.1, 'Striking Against/Struck Accidentally - Crowd Fear/Panic w/o Subseq Fall',
col == 917.2, 'Striking Against/Struck Accidentally - In Running H2O w/o Subseq Fall',
col == 917.3, 'Striking Against/Struck Accidentally - Furniture w/o Subseq Fall',
col == 917.4, 'Striking Against/Struck Accidentally - Oth Stationary Object w/o Subseq Fall',
col == 917.5, 'Striking Against/Struck Accidentally - In Sports w/ Subseq Fall',
col == 917.6, 'Striking Against/Struck Accidentally - Crowd,Collective Fear/Panic w/ Subseq Fall',
col == 917.7, 'Striking Against/Struck Accidentally - Furniture w/ Subseq Fall',
col == 917.8, 'Striking Against/Struck Accidentally - Oth Stationary Object w/ Subseq Fall',
col == 917.9, 'Striking Against/Struck Accidentally - Oth w/ or w/o Subseq Fall',
col == 918.0, 'Caught Accidentally In or Between Objects',
col == 919.0, 'Machinery Accident - Agricultural Machines',
col == 919.1, 'Machinery Accident - Mining and Earth-Drilling Machinery',
col == 919.2, 'Machinery Accident - Lifting Machines and Appliances',
col == 919.3, 'Machinery Accident - Metalworking Machines',
col == 919.4, 'Machinery Accident - Woodworking and Forming Machines',
col == 919.5, 'Machinery Accident - Prime Movers, Except Electrical Motors',
col == 919.6, 'Machinery Accident - Transmission Machinery',
col == 919.7, 'Machinery Accident - Earth Moving/Scraping/Oth Excavating Machine',
col == 919.8, 'Machinery Accident - Oth Spec Machinery',
col == 919.9, 'Machinery Accident - Unspec Machinery',
col == 920.0, 'Cutting Object Accident - Powered Lawn Mower',
col == 920.1, 'Cutting Object Accident - Oth Powered Hand Tools',
col == 920.2, 'Cutting Object Accident - Powered Household Appliances/Implements',
col == 920.3, 'Cutting Object Accident - Knives, Swords, and Daggers',
col == 920.4, 'Cutting Object Accident - Oth Hand Tools and Implements',
col == 920.5, 'Cutting Object Accident - Hypodermic Needle, Contaminated Needle',
col == 920.8, 'Cutting Object Accident - Oth Spec Cut/Piercing Instrument/Object',
col == 920.9, 'Cutting Object Accident - Unspec Cut/Piercing Instrument/Object',
col == 921.0, 'Pressure Vessel Explosion Accident - Boilers',
col == 921.1, 'Pressure Vessel Explosion Accident - Gas Cylinders',
col == 921.8, 'Pressure Vessel Explosion Accident - Oth Spec Pressure Vessels',
col == 921.9, 'Pressure Vessel Explosion Accident - Unspec Pressure Vessel',
col == 922.0, 'Firearm Missile Accident - Handgun',
col == 922.1, 'Firearm Missile Accident - Shotgun (Automatic)',
col == 922.2, 'Firearm Missile Accident - Hunting Rifle',
col == 922.3, 'Firearm Missile Accident - Military Firearms',
col == 922.4, 'Firearm Missile Accident - Air Gun',
col == 922.5, 'Firearm Missile Accident - Paintball Gun',
col == 922.8, 'Firearm Missile Accident - Oth Spec Firearm Missile',
col == 922.9, 'Firearm Missile Accident - Unspec Firearm Missile',
col == 923.0, 'Explosive Material Accident - Fireworks',
col == 923.1, 'Explosive Material Accident - Blasting Materials',
col == 923.2, 'Explosive Material Accident - Explosive Gases',
col == 923.8, 'Explosive Material Accident - Oth Explosive Materials',
col == 923.9, 'Explosive Material Accident - Unspec Explosive Material',
col == 924.0, 'Accident, Hot/Corrosive Material - Hot Liquids/Vapors/Steam',
col == 924.1, 'Accident, Hot/Corrosive Material - Caustic/Corrosive Substances',
col == 924.2, 'Accident, Hot/Corrosive Material - Hot (Boiling) Tap Water',
col == 924.8, 'Accident, Hot/Corrosive Material - Oth',
col == 924.9, 'Accident, Hot/Corrosive Material - Unspec',
col == 925.0, 'Accident, Electric Current - Domestic Wiring and Appliances',
col == 925.1, 'Accident, Electric Current - Electric Power Plants/Stations/Lines',
col == 925.2, 'Accident, Electric Current - Industrial Wires/Appliance/Machinery',
col == 925.8, 'Accident, Electric Current - Oth Electric Current',
col == 925.9, 'Accident, Electric Current - Unspec Electric Current',
col == 926.0, 'Radiation Exposure - Radiofrequency Radiation',
col == 926.1, 'Radiation Exposure - Infra-red Heaters and Lamps',
col == 926.2, 'Radiation Exposure - Visible/Ultraviolet Light Sources',
col == 926.3, 'Radiation Exposure - X-ray/Oth Electromagnetic Ionize Radiation',
col == 926.4, 'Radiation Exposure - Lasers',
col == 926.5, 'Radiation Exposure - Radioactive Isotopes',
col == 926.8, 'Radiation Exposure - Oth Spec Radiation',
col == 926.9, 'Radiation Exposure - Unspec Radiation',
col == 927.0, 'Overexertion from sudden strenuous movement',
col == 927.1, 'Overexertion from prolonged static position',
col == 927.2, 'Excessive physical exertion from prolonged activity',
col == 927.3, 'Cumulative trauma from repetitive motion',
col == 927.4, 'Cumulative trauma from repetitive impact',
col == 927.8, 'Other overexertion and strenuous and repetitive movements or loads',
col == 927.9, 'Unspecified overexertion and strenuous and repetitive movements or loads',
col == 928.0, 'Oth/Unspec Environmental/Accidental - Stay in Weightless Environment',
col == 928.1, 'Oth/Unspec Environmental/Accidental - Exposure to Noise',
col == 928.2, 'Oth/Unspec Environmental/Accidental - Vibration',
col == 928.3, 'Oth/Unspec Environmental/Accidental - Human Being Bite',
col == 928.4, 'Oth/Unspec Environmental/Accidental - External Constriction Caused by Hair',
col == 928.5, 'Oth/Unspec Environmental/Accidental - External Constriction Caused by Other Obj',
col == 928.6, 'Oth/Unspec Environmental/Accidental - Exposure to Algae/Toxin',
col == 928.7, 'Oth/Unspec Environmental/Accidental - Component of Firearm or Gun',
col == 928.8, 'Oth/Unspec Environmental/Accidental - Oth',
col == 928.9, 'Oth/Unspec Environmental/Accidental - Unspec Accident',
col == 929.0, 'Late Effects of Injury - MVA',
col == 929.1, 'Late Effects of Injury - Oth Transport Accident',
col == 929.2, 'Late Effects of Injury - Accidental Poison',
col == 929.3, 'Late Effects of Injury - Accidental Fall',
col == 929.4, 'Late Effects of Injury - Accident Caused by Fire',
col == 929.5, 'Late Effects of Injury - Accident by Natural/Environment Factors',
col == 929.8, 'Late Effects of Injury - Oth Accidents',
col == 929.9, 'Late Effects of Injury - Unspec Accident',
col == 930.0, 'Adverse Effects - Penicillins',
col == 930.1, 'Adverse Effects - Antifungal Antibiotics',
col == 930.2, 'Adverse Effects - Chloramphenicol Group',
col == 930.3, 'Adverse Effects - Erythromycin and Oth Macrolides',
col == 930.4, 'Adverse Effects - Tetracycline Group',
col == 930.5, 'Adverse Effects - Cephalosporin Group',
col == 930.6, 'Adverse Effects - Antimycobacterial Antibiotics',
col == 930.7, 'Adverse Effects - Antineoplastic Antibiotics',
col == 930.8, 'Adverse Effects - Oth Spec Antibiotics',
col == 930.9, 'Adverse Effects - Unspec Antibiotic',
col == 931.0, 'Adverse Effects - Sulfonamides',
col == 931.1, 'Adverse Effects - Arsenical Anti-Infectives',
col == 931.2, 'Adverse Effects - Heavy Metal Anti-Infectives',
col == 931.3, 'Adverse Effects - Quinoline/Hydroxyquinoline Derivatives',
col == 931.4, 'Adverse Effects - Antimalarial/Drug Act on Oth Blood Protozoa',
col == 931.5, 'Adverse Effects - Oth Antiprotozoal Drugs',
col == 931.6, 'Adverse Effects - Anthelmintics',
col == 931.7, 'Adverse Effects - Antiviral Drugs',
col == 931.8, 'Adverse Effects - Oth Antimycobacterial Drugs',
col == 931.9, 'Adverse Effects - Oth and Unspec Anti-Infectives',
col == 932.0, 'Adverse Effects - Adrenal Cortical Steroids',
col == 932.1, 'Adverse Effects - Androgens/Anabolic Cogeners',
col == 932.2, 'Adverse Effects - Ovarian Hormone/Synthetic Substitutes',
col == 932.3, 'Adverse Effects - Insulins/Antidiabetic Agents',
col == 932.4, 'Adverse Effects - Anterior Pituitary Hormones',
col == 932.5, 'Adverse Effects - Posterior Pituitary Hormones',
col == 932.6, 'Adverse Effects - Parathyroid/Parathyroid Derivatives',
col == 932.7, 'Adverse Effects - Thyroid/Thyroid Derivatives',
col == 932.8, 'Adverse Effects - Antithyroid Agents',
col == 932.9, 'Adverse Effects - Oth/Unspec Hormones/Synthetic Substitutes',
col == 933.0, 'Adverse Effects - Antiallergic/Antiemetic Drugs',
col == 933.1, 'Adverse Effects - Antineoplastic/Immunosuppressive Drugs',
col == 933.2, 'Adverse Effects - Acidifying Agents',
col == 933.3, 'Adverse Effects - Alkalizing Agents',
col == 933.4, 'Adverse Effects - Enzymes, NEC',
col == 933.5, 'Adverse Effects - Vitamins, NEC',
col == 933.6, 'Adverse Effects - Oral Bisphosphonate',
col == 933.7, 'Adverse Effects - IV Bisphosphonate',
col == 933.8, 'Adverse Effects - Oth Systemic Agents, NEC',
col == 933.9, 'Adverse Effects - Unspec Systemic Agent',
col == 934.0, 'Adverse Effects - Iron and its Compounds',
col == 934.1, 'Adverse Effects - Liver Preparations/Oth Antianemic Agent',
col == 934.2, 'Adverse Effects - Anticoagulants',
col == 934.3, 'Adverse Effects - Vitamin K [Phytonadione]',
col == 934.4, 'Adverse Effects - Fibrinolysis-Affecting Drugs',
col == 934.5, 'Adverse Effects - Anticoagulant Antagonists & Oth Coagulants',
col == 934.6, 'Adverse Effects - Gamma Globulin',
col == 934.7, 'Adverse Effects - Natural Blood/Blood Products',
col == 934.8, 'Adverse Effects - Oth Agents Affecting Blood Constituents',
col == 934.9, 'Adverse Effects - Unspec Agent Affecting Blood Constituents',
col == 935.0, 'Adverse Effects - Heroin',
col == 935.1, 'Adverse Effects - Methadone',
col == 935.2, 'Adverse Effects - Oth Opiates & Related Narcotics',
col == 935.3, 'Adverse Effects - Salicylates',
col == 935.4, 'Adverse Effects - Aromatic Analgesics, NEC',
col == 935.5, 'Adverse Effects - Pyrazole Derivatives',
col == 935.6, 'Adverse Effects - Antirheumatics [Antiphlogistics]',
col == 935.7, 'Adverse Effects - Oth Non-Narcotic Analgesics',
col == 935.8, 'Adverse Effects - Oth Spec Analgesics/Antipyretics',
col == 935.9, 'Adverse Effects - Unspec Analgesic/Antipyretic',
col == 936.0, 'Adverse Effects - Oxazolidine Derivatives',
col == 936.1, 'Adverse Effects - Hydantoin Derivatives',
col == 936.2, 'Adverse Effects - Succinimides',
col == 936.3, 'Adverse Effects - Oth/Unspec Anticonvulsants',
col == 936.4, 'Adverse Effects - Anti-Parkinsonism Drugs',
col == 937.0, 'Adverse Effects - Barbiturates',
col == 937.1, 'Adverse Effects - Chloral Hydrate Group',
col == 937.2, 'Adverse Effects - Paraldehyde',
col == 937.3, 'Adverse Effects - Bromine Compounds',
col == 937.4, 'Adverse Effects - Methaqualone Compounds',
col == 937.5, 'Adverse Effects - Glutethimide Group',
col == 937.6, 'Adverse Effects - Mixed Sedatives, NEC',
col == 937.8, 'Adverse Effects - Oth Sedatives/Hypnotics',
col == 937.9, 'Adverse Effects - Unspec',
col == 938.0, 'Adverse Effects - Central Nervous System Muscle-Tone Depressants',
col == 938.1, 'Adverse Effects - Halothane',
col == 938.2, 'Adverse Effects - Oth Gaseous Anesthetics',
col == 938.3, 'Adverse Effects - Intravenous Anesthetics',
col == 938.4, 'Adverse Effects - Oth/Unspec General Anesthetics',
col == 938.5, 'Adverse Effects - Surface/Infiltration Anesthetics',
col == 938.6, 'Adverse Effects - Peripheral Nerve & Plexus-Blocking Anesthetics',
col == 938.7, 'Adverse Effects - Spinal Anesthetics',
col == 938.9, 'Adverse Effects - Oth/Unspec Local Anesthetics',
col == 939.0, 'Adverse Effects - Antidepressants',
col == 939.1, 'Adverse Effects - Phenothiazine-Based Tranquilizers',
col == 939.2, 'Adverse Effects - Butyrophenone-Based Tranquilizers',
col == 939.3, 'Adverse Effects - Oth Antipsychotic/Neuroleptic/Maj Tranquilizer',
col == 939.4, 'Adverse Effects - Benzodiazepine-Based Tranquilizers',
col == 939.5, 'Adverse Effects - Oth Tranquilizers',
col == 939.6, 'Adverse Effects - Psychodysleptics [hallucinogens]',
col == 939.7, 'Adverse Effects - Psychostimulants',
col == 939.8, 'Adverse Effects - Oth Psychotropic Agents',
col == 939.9, 'Adverse Effects - Unspec Psychotropic Agent',
col == 940.0, 'Adverse Effects - Analeptics',
col == 940.1, 'Adverse Effects - Opiate Antagonists',
col == 940.8, 'Adverse Effects - Oth Spec Central Nervous System Stimulants',
col == 940.9, 'Adverse Effects - Unspec Central Nervous System Stimulant',
col == 941.0, 'Adverse Effects - Parasympathomimetics [cholinergics]',
col == 941.1, 'Adverse Effects - Parasympatholytics/Spasmolytics',
col == 941.2, 'Adverse Effects - Sympathomimetics [adrenergics]',
col == 941.3, 'Adverse Effects - Sympatholytics [antiadrenergics]',
col == 941.9, 'Adverse Effects - Unspec Drug Affecting Autonomic Nervous System',
col == 942.0, 'Adverse Effects - Cardiac Rhythm Regulators',
col == 942.1, 'Adverse Effects - Cardiotonic Glycosides/Similar Drugs',
col == 942.2, 'Adverse Effects - Antilipemic/Antiarteriosclerotic Drugs',
col == 942.3, 'Adverse Effects - Ganglion-Blocking Agents',
col == 942.4, 'Adverse Effects - Coronary Vasodilators',
col == 942.5, 'Adverse Effects - Oth Vasodilators',
col == 942.6, 'Adverse Effects - Oth Antihypertensive Agents',
col == 942.7, 'Adverse Effects - Antivaricose Drugs/Sclerosing Agents',
col == 942.8, 'Adverse Effects - Capillary-Active Drugs',
col == 942.9, 'Adverse Effects - Oth & Unspec Agents on Cardiovascular System',
col == 943.0, 'Adverse Effects - Antacids/Antigastric Secretion Drugs',
col == 943.1, 'Adverse Effects - Irritant Cathartics',
col == 943.2, 'Adverse Effects - Emollient Cathartics',
col == 943.3, 'Adverse Effects - Oth Cathartic/Intestinal Atonia Drugs',
col == 943.4, 'Adverse Effects - Digestants',
col == 943.5, 'Adverse Effects - Antidiarrheal Drugs',
col == 943.6, 'Adverse Effects - Emetics',
col == 943.8, 'Adverse Effects - Oth Spec Agents on Gastrointestinal System',
col == 943.9, 'Adverse Effects - Unspec Agent on Gastrointestinal System',
col == 944.0, 'Adverse Effects - Mercurial Diuretics',
col == 944.1, 'Adverse Effects - Purine Derivative Diuretics',
col == 944.2, 'Adverse Effects - Carbon Acid Anhydrase Inhibitors',
col == 944.3, 'Adverse Effects - Saluretics',
col == 944.4, 'Adverse Effects - Oth Diuretics',
col == 944.5, 'Adverse Effects - Electrolytic, Caloric, H2O-Balance Agents',
col == 944.6, 'Adverse Effects - Oth Mineral Salts, NEC',
col == 944.7, 'Adverse Effects - Uric Acid Metabolism Drugs',
col == 945.0, 'Adverse Effects - Oxytocic Agents',
col == 945.1, 'Adverse Effects - Smooth Muscle Relaxants',
col == 945.2, 'Adverse Effects - Skeletal Muscle Relaxants',
col == 945.3, 'Adverse Effects - Oth & Unspec Drugs Acting on Muscles',
col == 945.4, 'Adverse Effects - Antitussives',
col == 945.5, 'Adverse Effects - Expectorants',
col == 945.6, 'Adverse Effects - Anti-Common Cold Drugs',
col == 945.7, 'Adverse Effects - Antiasthmatics',
col == 945.8, 'Adverse Effects - Oth & Unspec Respiratory Drugs',
col == 946.0, 'Adverse Effects - Local Anti-Infectives & Anti-Inflammatory Drug',
col == 946.1, 'Adverse Effects - Antipruritics',
col == 946.2, 'Adverse Effects - Local Astringents & Local Detergents',
col == 946.3, 'Adverse Effects - Emollients, Demulcents, and Protectants',
col == 946.4, 'Adverse Effects - Keratolytics, Keratoplastics, Hair Treatments',
col == 946.5, 'Adverse Effects - Eye Anti-Infectives and Oth Eye Drugs',
col == 946.6, 'Adverse Effects - Anti-Infectives/Oth Drugs for Ear/Nose/Throat',
col == 946.7, 'Adverse Effects - Dental Drugs Topically Applied',
col == 946.8, 'Adverse Effects - Oth Agents Affecting Skin & Mucous Membrane',
col == 946.9, 'Adverse Effects - Unspec Agent Affecting Skin & Mucous Membrane',
col == 947.0, 'Adverse Effects - Dietetics',
col == 947.1, 'Adverse Effects - Lipotropic Drugs',
col == 947.2, 'Adverse Effects - Antidotes & Chelating Agents, NEC',
col == 947.3, 'Adverse Effects - Alcohol Deterrents',
col == 947.4, 'Adverse Effects - Pharmaceutical Excipients',
col == 947.8, 'Adverse Effects - Oth Drugs & Medicinal Substances',
col == 947.9, 'Adverse Effects - Unspec Drug or Medicinal Substance',
col == 948.0, 'Adverse Effects - BCG Vaccine',
col == 948.1, 'Adverse Effects - Typhoid and Paratyphoid',
col == 948.2, 'Adverse Effects - Cholera',
col == 948.3, 'Adverse Effects - Plague',
col == 948.4, 'Adverse Effects - Tetanus',
col == 948.5, 'Adverse Effects - Diphtheria',
col == 948.6, 'Adverse Effects - Pertussis Vaccine, Pertussis Component Combo',
col == 948.8, 'Adverse Effects - Oth and Unspec Bacterial Vaccines',
col == 948.9, 'Adverse Effects - Mixed Bacterial Vaccines,No Pertusis Component',
col == 949.0, 'Adverse Effects - Smallpox Vaccine',
col == 949.1, 'Adverse Effects - Rabies Vaccine',
col == 949.2, 'Adverse Effects - Typhus Vaccine',
col == 949.3, 'Adverse Effects - Yellow Fever Vaccine',
col == 949.4, 'Adverse Effects - Measles Vaccine',
col == 949.5, 'Adverse Effects - Poliomyelitis Vaccine',
col == 949.6, 'Adverse Effects - Oth & Unspec Viral & Rickettsial Vaccines',
col == 949.7, 'Adverse Effects - Mixed Viral-Rickettsial & Bacterial Vaccines',
col == 949.9, 'Adverse Effects - Oth & Unspec Vaccines & Biological Substances',
col == 950.0, 'Suicide/Self Poison- Analgesics, Antipyretics & Antirheumatics',
col == 950.1, 'Suicide/Self Poison- Barbiturates',
col == 950.2, 'Suicide/Self Poison- Oth Sedatives & Hypnotics',
col == 950.3, 'Suicide/Self Poison- Tranquilizers/Oth Psychotropic Agents',
col == 950.4, 'Suicide/Self Poison- Oth Spec Drugs/Medicinal Substances',
col == 950.5, 'Suicide/Self Poison- Unspec Drug/Medicinal Substance',
col == 950.6, 'Suicide/Self Poison- (Agri/Horti)Cultural Chemical/Pharmaceutical',
col == 950.7, 'Suicide/Self Poison- Corrosive/Caustic Substances',
col == 950.8, 'Suicide/Self Poison- Arsenic and its Compounds',
col == 950.9, 'Suicide/Self Poison- Oth & Unspec Solid/Liquid Substances',
col == 951.0, 'Suicide/Self Poison - Gas Distributed by Pipeline',
col == 951.1, 'Suicide/Self Poison - Liquid Petroleum Gas (Mobile Containers)',
col == 951.8, 'Suicide/Self Poison - Oth Utility Gas',
col == 952.0, 'Suicide/Self Poison - Motor Vehicle Exhaust Gas',
col == 952.1, 'Suicide/Self Poison - Oth Carbon Monoxide',
col == 952.8, 'Suicide/Self Poison - Oth Spec Gases and Vapors',
col == 952.9, 'Suicide/Self Poison - Unspec Gases and Vapors',
col == 953.0, 'Suicide/Self Injury - Hanging',
col == 953.1, 'Suicide/Self Injury - Suffocation by Plastic Bag',
col == 953.8, 'Suicide/Self Injury - Oth Spec Means',
col == 953.9, 'Suicide/Self Injury - Unspec Means',
col == 954.0, 'Suicide and Self-Inflicted Injury by Submersion [Drowning]',
col == 955.0, 'Suicide/Self Injury - Handgun',
col == 955.1, 'Suicide/Self Injury - Shotgun',
col == 955.2, 'Suicide/Self Injury - Hunting Rifle',
col == 955.3, 'Suicide/Self Injury - Military Firearms',
col == 955.4, 'Suicide/Self Injury - Oth and Unspec Firearm',
col == 955.5, 'Suicide/Self Injury - Explosives',
col == 955.6, 'Suicide/Self Injury - Air Gun',
col == 955.7, 'Suicide/Self Injury - Paintball Gun',
col == 955.9, 'Suicide/Self Injury - Unspec',
col == 956.0, 'Suicide and Self-Inflicted Injury by Cut/Piercing Instrument',
col == 957.0, 'Suicide/Self Injury, Jump,High Place - Residential Premises',
col == 957.1, 'Suicide/Self Injury, Jump,High Place - Oth Man-Made Structures',
col == 957.2, 'Suicide/Self Injury, Jump,High Place - Natural Sites',
col == 957.9, 'Suicide/Self Injury, Jump,High Place - Unspec',
col == 958.0, 'Suicide/Self Injury - Jumping or Lying Before Moving Object',
col == 958.1, 'Suicide/Self Injury - Burns, Fire',
col == 958.2, 'Suicide/Self Injury - Scald',
col == 958.3, 'Suicide/Self Injury - Extremes of Cold',
col == 958.4, 'Suicide/Self Injury - Electrocution',
col == 958.5, 'Suicide/Self Injury - Crashing of Motor Vehicle',
col == 958.6, 'Suicide/Self Injury - Crashing of Aircraft',
col == 958.7, 'Suicide/Self Injury - Caustic Substances, Except Poisoning',
col == 958.8, 'Suicide/Self Injury - Oth Spec Means',
col == 958.9, 'Suicide/Self Injury - Unspec Means',
col == 959.0, 'Late Effects of Self-Inflicted Injury',
col == 960.0, 'Fight/Brawl/Rape - Unarmed Fight or Brawl',
col == 960.1, 'Fight/Brawl/Rape - Rape',
col == 961.0, 'Assault by Corrosive or Caustic Substance, Except Poisoning',
col == 962.0, 'Assault by Poison - Drugs and Medicinal Substances',
col == 962.1, 'Assault by Poison - Oth Solid and Liquid Substances',
col == 962.2, 'Assault by Poison - Oth Gases and Vapors',
col == 962.9, 'Assault by Poison - Unspec Poisoning',
col == 963.0, 'Assault by Hanging and Strangulation',
col == 964.0, 'Assault by Submersion [Drowning]',
col == 965.0, 'Assault by Firearms/Explosives - Handgun',
col == 965.1, 'Assault by Firearms/Explosives - Shotgun',
col == 965.2, 'Assault by Firearms/Explosives - Hunting Rifle',
col == 965.3, 'Assault by Firearms/Explosives - Military Firearms',
col == 965.4, 'Assault by Firearms/Explosives - Oth and Unspec Firearm',
col == 965.5, 'Assault by Firearms/Explosives - Antipersonnel Bomb',
col == 965.6, 'Assault by Firearms/Explosives - Gasoline Bomb',
col == 965.7, 'Assault by Firearms/Explosives - Letter Bomb',
col == 965.8, 'Assault by Firearms/Explosives - Oth Spec Explosive',
col == 965.9, 'Assault by Firearms/Explosives - Unspec Explosive',
col == 966.0, 'Assault by Cutting and Piercing Instrument',
col == 967.0, 'Child/Adult Abuse by Father/Stepfather/Male Partner',
col == 967.1, 'Child/Adult Abuse by Oth Spec Person',
col == 967.2, 'Child/Adult Abuse by Mother/Stepmother/Female Partner',
col == 967.3, 'Child/Adult Abuse by Spouse/Partner/Ex-Spouse/Ex-Partner',
col == 967.4, 'Child/Adult Abuse by Child',
col == 967.5, 'Child/Adult Abuse by Sibling',
col == 967.6, 'Child/Adult Abuse by Grandparent',
col == 967.7, 'Child/Adult Abuse by Other Relative',
col == 967.8, 'Child/Adult Abuse by Non-related Caregiver',
col == 967.9, 'Child/Adult Abuse by Unspec Person',
col == 968.0, 'Assault by Oth/Unspec Means - Fire',
col == 968.1, 'Assault by Oth/Unspec Means - Pushing from a High Place',
col == 968.2, 'Assault by Oth/Unspec Means - Striking by Blunt/Thrown Object',
col == 968.3, 'Assault by Oth/Unspec Means - Hot Liquid',
col == 968.4, 'Assault by Oth/Unspec Means - Criminal Neglect',
col == 968.5, 'Assault by Oth/Unspec Means - Vehicular Assault',
col == 968.6, 'Assault by Oth/Unspec Means - Air Gun',
col == 968.7, 'Assault by Oth/Unspec Means - Human Being Bite',
col == 968.8, 'Assault by Oth/Unspec Means - Oth Spec Means',
col == 968.9, 'Assault by Oth/Unspec Means - Unspec Means',
col == 969.0, 'Late Effects of Injury Purposely Inflicted by Oth Person',
col == 970.0, 'Injury Due to Legal Intervention by Firearms',
col == 971.0, 'Injury Due to Legal Intervention by Explosives',
col == 972.0, 'Injury Due to Legal Intervention by Gas',
col == 973.0, 'Injury Due to Legal Intervention by Blunt Object',
col == 974.0, 'Injury Due to Legal Intervention by Cut/Piercing Instrument',
col == 975.0, 'Injury Due to Legal Intervention by Oth Spec Means',
col == 976.0, 'Injury Due to Legal Intervention by Unspec Means',
col == 977.0, 'Late Effects of Injuries Due to Legal Intervention',
col == 978.0, 'Legal Execution',
col == 979.0, 'Terrorism - Explosion of Marine Weapons',
col == 979.1, 'Terrorism - Destruction of Aircraft',
col == 979.2, 'Terrorism - Other Explosions and Fragments',
col == 979.3, 'Terrorism - Fires, Conflagrations, and Hot Substances',
col == 979.4, 'Terrorism - Firearms',
col == 979.5, 'Terrorism - Nuclear Weapons',
col == 979.6, 'Terrorism - Biological Weapons',
col == 979.7, 'Terrorism - Chemical Weapons',
col == 979.8, 'Terrorism - Other Weapons',
col == 979.9, 'Terrorism - Secondary Effects',
col == 980.0, 'Poison,Un/Intentional- Analgesic/Anti(Pyretic/Rheumatic)',
col == 980.1, 'Poison,Un/Intentional- Barbiturates',
col == 980.2, 'Poison,Un/Intentional- Oth Sedatives and Hypnotics',
col == 980.3, 'Poison,Un/Intentional- Tranquilizers/Psychotropic Agents',
col == 980.4, 'Poison,Un/Intentional- Oth Spec Drugs/Medicines',
col == 980.5, 'Poison,Un/Intentional- Unspec Drug/Medicine',
col == 980.6, 'Poison,Un/Intentional- Corrosive/Caustic Substances',
col == 980.7, 'Poison,Un/Intentional- (Agri/Horti)Cultural Chemical/Pharmaceutic',
col == 980.8, 'Poison,Un/Intentional- Arsenic and its Compounds',
col == 980.9, 'Poison,Un/Intentional- Oth/Unspec Solids/Liquids',
col == 981.0, 'Poison, Un/Intentional - Gas Distributed by Pipeline',
col == 981.1, 'Poison, Un/Intentional - Liquid Petroleum Gas (Mobile Containers)',
col == 981.8, 'Poison, Un/Intentional - Oth Utility Gas',
col == 982.0, 'Poison, Un/Intentional - Motor Vehicle Exhaust Gas',
col == 982.1, 'Poison, Un/Intentional - Oth Carbon Monoxide',
col == 982.8, 'Poison, Un/Intentional - Oth Spec Gases and Vapors',
col == 982.9, 'Poison, Un/Intentional - Unspec Gases and Vapors',
col == 983.0, 'Hang/Strangle/Suffocate, Un/Intentional- Hanging',
col == 983.1, 'Hang/Strangle/Suffocate, Un/Intentional- Suffocate by Plastic Bag',
col == 983.8, 'Hang/Strangle/Suffocate, Un/Intentional- Oth Spec Means',
col == 983.9, 'Hang/Strangle/Suffocate, Un/Intentional- Unspec Means',
col == 984.0, 'Submersion [Drowning], Undetermined Un/Intentional',
col == 985.0, 'Firearms/Explosives, Un/Intentional - Handgun',
col == 985.1, 'Firearms/Explosives, Un/Intentional - Shotgun',
col == 985.2, 'Firearms/Explosives, Un/Intentional - Hunting Rifle',
col == 985.3, 'Firearms/Explosives, Un/Intentional - Military Firearms',
col == 985.4, 'Firearms/Explosives, Un/Intentional - Oth/Unspec Firearm',
col == 985.5, 'Firearms/Explosives, Un/Intentional - Explosives',
col == 985.6, 'Firearms/Explosives, Un/Intentional - Air Gun',
col == 985.7, 'Firearms/Explosives, Un/Intentional - Paintball Gun',
col == 986.0, 'Injury by Cut/Piercing Instruments, Undetermined Un/Intentional',
col == 987.0, 'Fall From High Place, Un/Intentional - Residential Premises',
col == 987.1, 'Fall From High Place, Un/Intentional - Oth Man-Made Structures',
col == 987.2, 'Fall From High Place, Un/Intentional - Natural Sites',
col == 987.9, 'Fall From High Place, Un/Intentional - Unspec Site',
col == 988.0, 'Oth/Unspec Injury, Un/Intentional - Jump/Lie Before Moving Object',
col == 988.1, 'Oth/Unspec Injury, Un/Intentional - Burns/Fire',
col == 988.2, 'Oth/Unspec Injury, Un/Intentional - Scald',
col == 988.3, 'Oth/Unspec Injury, Un/Intentional - Extremes of Cold',
col == 988.4, 'Oth/Unspec Injury, Un/Intentional - Electrocution',
col == 988.5, 'Oth/Unspec Injury, Un/Intentional - Crashing of Motor Vehicle',
col == 988.6, 'Oth/Unspec Injury, Un/Intentional - Crashing of Aircraft',
col == 988.7, 'Oth/Unspec Injury, Un/Intentional - Caustic Substances,Not Poison',
col == 988.8, 'Oth/Unspec Injury, Un/Intentional - Oth Spec Means',
col == 988.9, 'Oth/Unspec Injury, Un/Intentional - Unspec Means',
col == 989.0, 'Late Effects of Injury, Undetermined Un/Intentional',
col == 990.0, 'War Operations Injury - From Gasoline Bomb',
col == 990.1, 'War Operations Injury - From Flamethrower',
col == 990.2, 'War Operations Injury - From Incendiary Bullet',
col == 990.3, 'War Operations Injury - From Fire Casued by Conventional Weapon',
col == 990.9, 'War Operations Injury - From Oth/Unspec Source',
col == 991.0, 'War Operations Injury - Rubber Bullets (Rifle)',
col == 991.1, 'War Operations Injury - Pellets (Rifle)',
col == 991.2, 'War Operations Injury - Oth Bullets',
col == 991.3, 'War Operations Injury - Antipersonnel Bomb (Fragments)',
col == 991.4, 'War Operations Injury - From Munition Fragments',
col == 991.5, 'War Operations Injury - From Person IED',
col == 991.6, 'War Operations Injury - From Vehicle IED',
col == 991.7, 'War Operations Injury - From Other IED',
col == 991.8, 'War Operations Injury - From Weapon Fragments',
col == 991.9, 'War Operations Injury - Oth/Unspec Fragments',
col == 992.0, 'Injury Due to War Operations by Torpedo',
col == 992.1, 'Injury Due to War Operations by Depth Charge',
col == 992.2, 'Injury Due to War Operations by Marine Mines',
col == 992.3, 'Injury Due to War Operations by Sea Based Artillery Shells',
col == 992.8, 'Injury Due to War Operations by Other Marine Weapons',
col == 992.9, 'Injury Due to War Operations by Unspec Marine Weapons',
col == 993.0, 'Injury Due to War Operations by Areal Bomb',
col == 993.1, 'Injury Due to War Operations by Guided Missle',
col == 993.2, 'Injury Due to War Operations by Mortar',
col == 993.3, 'Injury Due to War Operations by Person IED',
col == 993.4, 'Injury Due to War Operations by Vehicle IED',
col == 993.5, 'Injury Due to War Operations by Other IED',
col == 993.6, 'Injury Due to War Operations by Unintentional Detonation Own Munitions',
col == 993.7, 'Injury Due to War Operations by Unintentional Discharge Own Launch Device',
col == 993.8, 'Injury Due to War Operations by Other Specified Explosion',
col == 993.9, 'Injury Due to War Operations by Unspec Explosion',
col == 994.0, 'Injury Due to War Destruction Aircraft - Enemy Fire/Explosives',
col == 994.1, 'Injury Due to War Destruction Aircraft - Unintentional Own Explosives',
col == 994.2, 'Injury Due to War Destruction Aircraft - Collision Other Aircraft',
col == 994.3, 'Injury Due to War Destruction Aircraft - Onboard Fire',
col == 994.8, 'Injury Due to War Destruction Aircraft - Other',
col == 994.9, 'Injury Due to War Destruction Aircraft - Unspecified',
col == 995.0, 'Injury Due to War Operations by Unarmed Hand-to-hand Combat',
col == 995.1, 'Injury Due to War Operations by Struck by Blunt Object',
col == 995.2, 'Injury Due to War Operations by Piercing Object',
col == 995.3, 'Injury Due to War Operations by Intentional Restriction of Airway',
col == 995.4, 'Injury Due to War Operations by Unintentional Drowning',
col == 995.8, 'Injury Due to War Operations by Other Conventional Warfare',
col == 995.9, 'Injury Due to War Operations by Unspecified Conventional Warfare',
col == 996.0, 'Injury Due to War Operations by Nuclear Weapons - Direct Blast',
col == 996.1, 'Injury Due to War Operations by Nuclear Weapons - Indirect Blast',
col == 996.2, 'Injury Due to War Operations by Nuclear Weapons - Thermal Radiation',
col == 996.3, 'Injury Due to War Operations by Nuclear Weapons - Nuclear Radiation',
col == 996.8, 'Injury Due to War Operations by Nuclear Weapons - Other',
col == 996.9, 'Injury Due to War Operations by Nuclear Weapons - Unspecified',
col == 997.0, 'War Operations Injury - Lasers',
col == 997.1, 'War Operations Injury - Biological Warfare',
col == 997.2, 'War Operations Injury - Gases, Fumes, and Chemicals',
col == 997.3, 'War Operations Injury - Weapons of Mass Destruction, NFS',
col == 997.8, 'War Operations Injury - Oth Spec Unconventional Warfare',
col == 997.9, 'War Operations Injury - Unspec Unconventional Warfare',
col == 998.0, 'Injury Due to War Occur After Hostile Cessation - Mines',
col == 998.1, 'Injury Due to War Occur After Hostile Cessation - Bombs',
col == 998.8, 'Injury Due to War Occur After Hostile Cessation - Other',
col == 998.9, 'Injury Due to War Occur After Hostile Cessation - Unspecified',
col == 999.0, 'Late Effect of Injury Due to War Operations',
col == 999.1, 'Late Effect of Injury Due to Terrorism',
default = "Unknown")
return(col_value)
}
|
7031845b8ac578aa5f29c591cc58096e58cb71ca | d76f3780e9dc27478f282935d3cbaf7c6143ccf3 | /run_analysis.R | 9b4fd79ec8683c2e9c012acb27ebec711395adae | [] | no_license | raol/GettingAndCleaningData | 244aaf3d077da708f945cda4852a6bed63676587 | 0c5fe1b5c33c1584097c9901325abd66f481278a | refs/heads/master | 2016-09-05T12:35:27.406659 | 2014-06-19T13:39:27 | 2014-06-19T13:39:27 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,287 | r | run_analysis.R | if(!file.exists("./data")) {
dir.create("data")
}
if(!file.exists("./data/activity.zip")) {
fileURL <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
download.file(fileURL, destfile="./data/activity.zip")
}
if(!file.exists("./data/UCI HAR Dataset")) {
unzip(zipfile="./data/activity.zip", exdir="data")
}
# read data
X_test <- read.table("./data/UCI HAR Dataset/test/X_test.txt")
X_train <- read.table("./data/UCI HAR Dataset/train/X_train.txt")
Y_test <- read.table("./data/UCI HAR Dataset/test/Y_test.txt")
Y_train <- read.table("./data/UCI HAR Dataset/train/Y_train.txt")
Subject_test <- read.table("./data/UCI HAR Dataset/test/subject_test.txt")
Subject_train <- read.table("./data/UCI HAR Dataset/train/subject_train.txt")
features <- read.table("./data/UCI HAR Dataset/features.txt")
activity_labels <- read.table("./data/UCI HAR Dataset/activity_labels.txt")
#merge X, Y and subject test/train datasets
# to corresponging full datasets
X_full <- rbind(X_test, X_train)
Y_full <- rbind(Y_test, Y_train)
Subject_full <- rbind(Subject_test, Subject_train)
#assign column names to X dataset
colnames(X_full) <- features[, 2]
# find columns that match mean/std criteria
mean_std_columns <- c(grep("mean()", colnames(X_full)), grep("std()", colnames(X_full)))
#extract them to the new dataset
X_mean_std <- X_full[, mean_std_columns]
full_data <- cbind(Y_full, X_mean_std)
#assign corresponding activity name
full_data <- merge(activity_labels, full_data, by.x = 1, by.y = 1)
#add subject column to resulting data table
full_data <- cbind(Subject_full, full_data)
# drop redundand activity id column from full_data
# since we already have activity title column
full_data <- full_data[, -c(2)]
#and assign subject/activity column names
colnames(full_data)[1:2] <- c("SubjectId", "Activity")
# now let's create tidy dataset by applying melt function
# to treat all columns but SubjectId and Activity as variables
library(reshape2)
tidy_data <- melt(full_data, id=c("SubjectId", "Activity"))
# and get average values across all variables
tidy_data <- dcast(tidy_data, SubjectId + Activity ~ variable, mean)
write.table(tidy_data, file="tidy_data.txt")
|
9a2b79d000066a14e8b984a7df52d575145a1af2 | 277dbb992966a549176e2b7f526715574b421440 | /R_training/실습제출/서승우/19.11.07/myhome_map.R | 2b82587ac4c0c1a70220ee926bd19530aa07ab4c | [] | no_license | BaeYS-marketing/R | 58bc7f448d7486510218035a3e09d1dd562bca4b | 03b500cb428eded36d7c65bd8b2ee3437a7f5ef1 | refs/heads/master | 2020-12-11T04:30:28.034460 | 2020-01-17T08:47:38 | 2020-01-17T08:47:38 | 227,819,378 | 0 | 0 | null | 2019-12-13T12:06:33 | 2019-12-13T10:56:18 | C++ | UTF-8 | R | false | false | 590 | r | myhome_map.R | today<-Sys.time()
sec<-as.numeric(as.character(format(today,"%S")))
mh<-geocode(enc2utf8('경기도 하남시 덕풍남로 11&language=ko'), source = 'google', output = 'latlona')
cen <- c(mh$lon, mh$lat)
myhome<-data.frame(mh$lon, mh$lat)
mt<-ifelse(sec<=14, 'terrain',
ifelse(sec<=29, 'satellite',
ifelse(sec<=44, 'roadmap', 'hybrid')))
map <- get_googlemap(center=cen, maptype=mt,zoom=16, marker=myhome)
ggmap(map) + labs(x='위도', y='경도', title='우리 동네') +
geom_text(aes(x=mh$lon, y=mh$lat, label="우리집", vjust=0, hjust=0))
ggsave('mymap.png')
|
a5ccf7e540885576bfb7bccc314a89f58e92f53b | f36b2ad1dc17ec05278f13c7fa72a1fd8343ee19 | /man/chk_flag.Rd | fe00c420a7f3fd7842394595bb120fbc8f842972 | [
"MIT"
] | permissive | poissonconsulting/chk | 45f5d81df8a967aad6e148f0bff9a9f5b89a51ac | c2545f04b23e918444d4758e4362d20dfaa8350b | refs/heads/main | 2023-06-14T19:32:17.452025 | 2023-05-27T23:53:25 | 2023-05-27T23:53:25 | 199,894,184 | 43 | 3 | NOASSERTION | 2023-01-05T18:50:23 | 2019-07-31T16:42:59 | R | UTF-8 | R | false | true | 1,088 | rd | chk_flag.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/chk-flag.R
\name{chk_flag}
\alias{chk_flag}
\alias{vld_flag}
\title{Check Flag}
\usage{
chk_flag(x, x_name = NULL)
vld_flag(x)
}
\arguments{
\item{x}{The object to check.}
\item{x_name}{A string of the name of object x or NULL.}
}
\value{
The \code{chk_} function throws an informative error if the test fails or
returns the original object if successful so it can used in pipes.
The \code{vld_} function returns a flag indicating whether the test was met.
}
\description{
Checks if non-missing logical scalar using
\code{is.logical(x) && length(x) == 1L && !anyNA(x)}
\strong{Pass}: \code{TRUE}, \code{FALSE}.
\strong{Fail}: \code{logical(0)}, \code{c(TRUE, TRUE)}, \code{"TRUE"}, \code{1}, \code{NA}.
}
\section{Functions}{
\itemize{
\item \code{vld_flag()}: Validate Flag
}}
\examples{
# chk_flag
chk_flag(TRUE)
try(vld_flag(1))
# vld_flag
vld_flag(TRUE)
vld_flag(1)
}
\seealso{
Other chk_logical:
\code{\link{chk_false}()},
\code{\link{chk_lgl}()},
\code{\link{chk_true}()}
}
\concept{chk_logical}
|
d73f477a3a2bbc8aecb647a1d2335c586f39e26c | d5df1809218923be5eeb23c97ffebcaafd1c156e | /importance_sampling.R | dcb42fde816cb3cac208522f1a8a2dc0e510dc13 | [] | no_license | rogersguo/bayesian_melding | e3a48dafa8bd2e658fd868fe2ec37ad83f16b288 | 8c6e46c2b278389fefa6130713e8e6839e08dd65 | refs/heads/master | 2020-12-03T09:11:00.384028 | 2014-10-20T14:27:33 | 2014-10-20T14:27:33 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,434 | r | importance_sampling.R | #### importance sampling example
library(ggplot2)
# from this paper http://iopscience.iop.org/0143-0807/22/4/315/pdf/0143-0807_22_4_315.pdf
## first simulate 100000 draws from the exp distribution with rate 1
# how many random samples to generate
number_samples<-10e5
#
random_numbers<-runif(number_samples)
# converts 0-1 variate to the corresponding quantile
# in the exponetial distribution with rate = 1
# same functionality as qexp; written for transparency
quantile_function<-function(x){
return(-log(1-x))
}
#
exp_samples<-quantile_function(random_numbers)
#hist(exp_samples, freq=F)
exp_hist<-qplot(exp_samples, geom="histogram")
## Compare this to the theoretical distribution
xs<-seq(0,15,length.out=10e3)
## this is a simplified version of the (probably much faster) internal pexp
pdf_exp<-function(x){
return(exp(-x))
}
p_xs<-pdf_exp(xs)
points(xs, p_xs, type="l")
# consider we are interested in the rare event that our samples exceed threshold
# T
# probability of this is the integral from p to infinity of the pdf
# or alternatively, the proportion of our random samples that are greater the T
Ts<-seq(1,8,1)
log_proportions<-lapply(Ts, function(T) {log(sum(exp_samples>T)/number_samples)})
plot(Ts, log_proportions)
# because as T increases we have fewer and fewer samples above T, we have larger
# errors in our numerical approximations of the integral.
# instead sample from g(x) (not p(x)) and then reweight these new values
# according to the sample weight it should have in p
calculate_a<- function(Tee){
a<- 0.5 * ( 1+Tee + sqrt(1+Tee**2))
return(a)
}
g_pdf_creator<-function(Tee){
a<-calculate_a(Tee)
g_pdf<-function(x){
p_x<- (1 / a) * exp(-x/a)
return(p_x)
}
return(g_pdf)
}
Tee<-8
g_pdf<-g_pdf_creator(Tee)
g_quantile_function_creator<-function(a){
q_quantile_function<-function(x){
return(-a*log(1-x))
}
return(q_quantile_function)
}
g_quantile_function<- g_quantile_function_creator(calculate_a(Tee))
hist(g_quantile_function(random_numbers), freq=F)
us<-seq(0,120,length.out=100)
points(us, g_pdf(us),type="l")
random_numbers<-runif(number_samples)
# sample from g instead
g_samples<- g_quantile_function(random_numbers)
# weight by the ratio of g to p
Iks<- pdf_exp(g_samples)/g_pdf(g_samples)
# include only those areas above the threshold
Iks<-Iks * (g_samples>Tee)
# average to get integral estimatew
sum(Iks)/number_samples
|
e2a4110d0afef87e467bcb55dd4f84f3989dd177 | 65406a7fa042037846277385df804308a4eb16dd | /statisticalAnalysis/dataAnalysis/historicalObservation/Archive/Rainfall_Frequency_Analysis_MultModels.R | 32c22330199a159901ffeaaf57ea3b722cb212b8 | [
"MIT"
] | permissive | uva-hydroinformatics/vtrc-climate | d4353abe559b1ca42f8fdd3a00eeccd183922920 | 51a1db1d8edb04bede2a19cb7c342fe628c7edaf | refs/heads/master | 2020-04-26T20:10:54.731154 | 2019-03-29T20:58:41 | 2019-03-29T20:58:41 | 173,800,949 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,831 | r | Rainfall_Frequency_Analysis_MultModels.R | # This script is to analysis daily rainfall projection data to get 24-hour rainfall
# intensity for 2050 and 2100
# data from 2035 to 2065 is used to get IDF for 2050
# data from 2071 to 2100 is used to get IDF for 2100
library(nsRFA)
library(gsubfn)
######################################################################################################
#####calculate 24-hour rainfall intensity for different return periods based on given distribution####
rain_24hrs <- function(MSC, Max_DailyRain, criteria ){
non_exc_prob <- c(0.99, 0.98, 0.9, 0.5, 0.01)
if(MSC[paste0(criteria, "dist")] == "P3"){
parms <- ML_estimation(Max_DailyRain, dist="P3")
returns <- invF.gamma(non_exc_prob, parms[1],parms[2], parms[3])
}
if(MSC[paste0(criteria, "dist")] == "LP3"){
parms <- ML_estimation(log(Max_DailyRain), dist="P3")
returns <- exp(invF.gamma(non_exc_prob, parms[1],parms[2], parms[3]))
}
if(MSC[paste0(criteria, "dist")] == "NORM"){
parms <- ML_estimation(Max_DailyRain, dist="NORM")
returns <- qnorm(non_exc_prob, mean=parms[1], sd=parms[2])
}
if(MSC[paste0(criteria, "dist")] == "LN"){
parms <- ML_estimation(log(Max_DailyRain), dist="NORM")
returns <- exp(qnorm(non_exc_prob, mean=parms[1], sd=parms[2]))
}
if(MSC[paste0(criteria, "dist")] == "EV1" || MSC[paste0(criteria, "dist")] == "GUMBEL"){
parms <- ML_estimation(Max_DailyRain, dist="EV1")
returns <- parms[1] - parms[2]*log(-log(non_exc_prob))
}
if(MSC[paste0(criteria, "dist")] == "EV2"){
parms <- ML_estimation(log(Max_DailyRain), dist="EV1")
returns <- exp(parms[1] - parms[2]*log(-log(non_exc_prob)))
}
if(MSC[paste0(criteria, "dist")] == "GEV"){
parms <- ML_estimation(Max_DailyRain, dist="GEV")
returns <- invF.GEV(non_exc_prob, parms[1],parms[2], parms[3])
}
return(returns)
}
#####################################################################################################
#####define function to calculate the 24-hour rainfall for each given GCM############################
returns_rain <- function(file_dir){
pr <- read.csv(paste0(file_dir, "/pr_day.csv"), header = FALSE, sep=",", col.names = c("Rain"), colClasses = c("double"))
time <- read.csv(paste0(file_dir, "/time_day.csv"), header = FALSE, sep=",",
col.name = c("Index", "Year", "Mon", "Day"), colClasses = c("NULL", "integer", "integer", "integer"))
time["Rain"] <- pr["Rain"]
rain_proj <- time
YearBaseline <- seq(from=1985, to=2015, by=1)
Year2050 <- seq(from=2021, to=2050, by=1)
Year2100 <- seq(from=2071, to=2100, by=1)
#YearBaseline <- seq(from=1986, to=2005, by=1)
#Year2050 <- seq(from=2041, to=2060, by=1)
#Year2100 <- seq(from=2081, to=2100, by=1)
#YearBaseline <- seq(from=1976, to=2015, by=1)
#Year2050 <- seq(from=2031, to=2070, by=1)
#Year2100 <- seq(from=2061, to=2100, by=1)
Max_DailyRain_Baseline <- list()
Max_DailyRain_2050 <- list()
Max_DailyRain_2100 <- list()
MaxRain <- function(Years){
Max_DailyRain <- list()
for(yr in Years){
dailyRain <- rain_proj[rain_proj$Year==yr,]
Max_DailyRain[yr-min(Years)-1] <- max(dailyRain$Rain)
}
return(Max_DailyRain)
}
Max_DailyRain_Baseline <- MaxRain(YearBaseline)
Max_DailyRain_2050 <- MaxRain(Year2050)
Max_DailyRain_2100 <- MaxRain(Year2100)
Max_DailyRain_Baseline <- unlist(Max_DailyRain_Baseline, use.name=FALSE)
Max_DailyRain_2050 <- unlist(Max_DailyRain_2050, use.name=FALSE)
Max_DailyRain_2100 <- unlist(Max_DailyRain_2100, use.name=FALSE)
Max_DailyRain_Baseline <- sort(Max_DailyRain_Baseline, decreasing=FALSE)
Max_DailyRain_2050 <- sort(Max_DailyRain_2050, decreasing=FALSE)
Max_DailyRain_2100 <- sort(Max_DailyRain_2100, decreasing=FALSE)
# Set criteria
criteria <- "AIC"
MSCBaseline <- MSClaio2008(Max_DailyRain_Baseline, crit=criteria)
MSC2050 <- MSClaio2008(Max_DailyRain_2050, crit=criteria)
MSC2100 <- MSClaio2008(Max_DailyRain_2100, crit=criteria)
returns_Baseline <- rain_24hrs(MSCBaseline, Max_DailyRain_Baseline, criteria)
returns_2050 <- rain_24hrs(MSC2050, Max_DailyRain_2050, criteria)
returns_2100 <- rain_24hrs(MSC2100, Max_DailyRain_2100, criteria)
####Calculate the change of 24hour rainfall change compared to baseline for each return period (percentage, %)
returns_change_2050 <- (returns_2050 - returns_Baseline)/returns_Baseline*100
returns_change_2100 <- (returns_2100 - returns_Baseline)/returns_Baseline*100
# return two values
return(c(returns_change_2050, returns_change_2100))
}
##########################Climate Change Scenario is the only variable to change####################
file_dir <- "C:/Users/Yawen Shen/Desktop/ThirdPaper/Climate Change/Rainfall Projection/RCP26/"
###################################################################################################
GCMs <- read.csv(paste0(file_dir, "GCMs.csv"), header=FALSE)
DF_output_2050 <- data.frame(matrix(ncol = 6, nrow = 0))
DF_output_2100 <- data.frame(matrix(ncol = 6, nrow = 0))
for(model in unlist(GCMs)){
print(model)
output <- returns_rain(paste0(file_dir, model, "/output/"))
returns_change_2050 <- output[1:5]
returns_change_2100 <- output[6:10]
DF_output_2050 <- rbind(DF_output_2050, returns_change_2050)
DF_output_2100 <- rbind(DF_output_2100, returns_change_2100)
print(returns_change_2050)
print(returns_change_2100)
}
colnames(DF_output_2050) <- c("YR100", "YR50", "YR10", "YR2", "YR1")
colnames(DF_output_2100) <- c("YR100", "YR50", "YR10", "YR2", "YR1")
DF_output_2050["Models"] <- unlist(GCMs)
DF_output_2100["Models"] <- unlist(GCMs)
# write output to csv
write.csv(DF_output_2050, file=paste0(file_dir, "24Hour_Rainfall_2050_30yr.csv"))
write.csv(DF_output_2100, file=paste0(file_dir, "24Hour_Rainfall_2100_30yr.csv"))
|
18576dbbb3c1b79b1638379fd0015c34f4c29dcd | e1308e1b4707debc25e3660249b727d91d6298c8 | /R/RMS-unbalanced.R | bb9c7dfc32291354ba0b499042ef979ee64f9b32 | [] | no_license | by1919/SPprm | 49836f6efacc4632bc57d7cc7c4f28cb887ad929 | ab7d862c8c56d8b23c7ff36af1d462206aad330d | refs/heads/master | 2022-12-18T10:57:05.901336 | 2020-09-29T20:27:20 | 2020-09-29T20:27:20 | 299,730,354 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,161 | r | RMS-unbalanced.R | #' Conduct estimation and RMS test for one-way random-effects ANOVA model
#'
#' We are conducting hypothesis test for a composite parameter, the RMS, defined as \eqn{\sqrt{\mu^2+\sigma_b^2+\sigma_w^2}:=\sqrt{\rho} }, where
#' \eqn{\mu} is the overall mean, and \eqn{(\sigma_b^2,\sigma_w^2)} are the between/within-subject variances in the
#' one-way random-effects ANOVA model, \eqn{y_{ij}=\mu+u_i+\epsilon_{ij}}, where \eqn{u_i\sim N(0,\sigma_b^2) } and
#' \eqn{\epsilon_{ij}\sim N(0,\sigma_w^2) }. We want to test \eqn{H_0: \rho\ge \rho_0}.
#' We implement a parametric Bootstrap based test with ``exact'' p-value calculation, voiding the need for Bootstrap Monte Carlo simulation.
#' See the reference of Bai et. al (2018). The score and Wald Z-tests, both large-sample normal approximation tests, are also implemented.
#'
#' @param Y vector of outcomes
#' @param subj subject id (factors). Observations with the sam id are coming from the same individual.
#' @param rho null threshold of acceptable squared RMS value.
#' @param REML using REML instead of MLE. Default to TRUE.
#' @return
#' \describe{
#' \item{p.value}{ test p-values for: QMS test, score Z-test, Wald Z-test }
#' \item{pars0}{ estimated null parameter values }
#' \item{pars}{ estimated MLE parameter values }
#' }
#' @export
#' @references
#' Bai,Y., Wang,Z., Lystig,T.C., and Wu,B. (2018) Statistical test with sample size and power calculation for paired repeated measures designs of method comparison studies.
#' @examples
#' s2w=1.4^2; s2b=1.7^2; mu0=-0.4
#' ng = c(10,2,10,10,5,7,9,10)
#' A = rep(1:8, times=ng)
#' Y = mu0 + (rnorm(8)*sqrt(s2b))[A] + rnorm(sum(ng))*sqrt(s2w)
#' RMSt(Y,A)
RMSt <- function(Y, subj, rho=9, REML=TRUE){
ng = as.vector( table(subj) ); N = sum(ng); K = length(ng)
mus = tapply(Y, subj, mean)
sse = sum( tapply(Y, subj, function(yi) sum((yi-mean(yi))^2) ) )
## est
lfn = function(xpar){
mu = xpar[1]; s2b = exp(xpar[2]); s2w = exp(xpar[3])
ll1 = sum( log(s2w+ng*s2b) ) + (N-K)*xpar[3] + sse/s2w + sum( ng/(s2w+ng*s2b)*(mus-mu)^2 )
ans = ll1 + REML*log( sum(ng/(s2w+ng*s2b)) )
}
cfn = function(xpar){
mu = xpar[1]; s2b = exp(xpar[2]); s2w = exp(xpar[3])
mu^2+s2b+s2w - rho
}
xpar = nloptr::cobyla(c(mean(Y),log(rho/2),log(rho/2)), lfn, hin=cfn)$par
mu = xpar[1]; s2b = exp(xpar[2]); s2w = exp(xpar[3])
## RMS test
lam = h = dta = NULL
for(i in 1:K){
lam = c(lam, s2w+ng[i]*s2b, s2w); h = c(h, 1,ng[i]-1)
eta0 = ng[i]*mu^2/(s2w+ng[i]*s2b)
dta = c(dta,eta0, 0)
}
pvalt = 1-pchisum(sum(Y^2),lam,h,dta)
## score Z
tau2 = ( 2*(s2w+ng*s2b)^2+2*(ng-1)*s2w^2 + 4*ng*(s2w+ng*s2b)*mu^2 )/ng^2
Zs = (mean(Y^2)-rho)/sqrt(sum(tau2))
pvals = pnorm(Zs)
## Wald Z
xpar = nloptr::newuoa(c(0,0,0), lfn)$par
mu1 = xpar[1]; s2b1 = exp(xpar[2]); s2w1 = exp(xpar[3])
tau2 = ( 2*(s2w1+ng*s2b1)^2+2*(ng-1)*s2w1^2 + 4*ng*(s2w1+ng*s2b1)*mu1^2 )/ng^2
Zw = (mean(Y^2)-rho)/sqrt(sum(tau2))
pvalw = pnorm(Zw)
##
pval = c(pvalt,pvals,pvalw)
names(pval) = c('QMS', 'Z-score', 'Z-Wald')
return( list(p.value=pval, pars0=c(s2w=s2w,s2b=s2b,mu=mu), pars=c(s2w=s2w1,s2b=s2b1,mu=mu1)) )
}
|
c32ffd6be602abd03eb70b9c9bd07d16b41fc759 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/PathSelectMP/examples/NumEndFile.Rd.R | 89be6077d505b42498a33ccfc24a541dcdf2c774 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 587 | r | NumEndFile.Rd.R | library(PathSelectMP)
### Name: NumEndFile
### Title: Extract Number From INP and OUT Files
### Aliases: NumEndFile
### Keywords: Parse helper
### ** Examples
## Don't show:
NumEndFile<-function(NameoFile,pattern1,pattern2){
#print(NameoFile[2])
LocUse=grep(NameoFile,pattern=pattern1)
Loc=gregexpr(pattern =pattern2,NameoFile[LocUse])[[1]][1]
num=substr(NameoFile[LocUse],1,(Loc-2))
#print(num)
return(as.numeric(num))
}
## End(Don't show)
files=c("new_1.out","new_10.out","new_11.out","new_12.out")
hh=lapply(strsplit(files,"_"),NumEndFile,pattern1=".out",pattern2="o")
|
f4af53e930c67c3867692263c7eb037267668e00 | fbe0d74f9b11925c098c25f810e1c10a2ee0556f | /R/class_bill.R | 19ffb320b20660439c1c991a0a136ba2a451e91b | [] | no_license | takewiki/tsdm | 6848bee07680d04c0781dc098043bf2c44c5006e | c5133cddfbf3c59f1b770c5d056b621e5ebccd05 | refs/heads/master | 2021-08-06T06:20:31.271092 | 2020-05-23T13:05:30 | 2020-05-23T13:05:30 | 179,942,457 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 357 | r | class_bill.R | #' 定义一个单据的基本类型
#'
#' @slot FInterId integer. 单据内码
#' @slot FNumber character. 单据代码
#' @slot FName character. 单据名称
#'
#' @return 没有返回值
#' @export
#'
#' @examples 不需要示例
setClass('bill',slots = c(FInterId = 'integer',FNumber='character',FName='character'),
contains = 'VIRTUAL');
|
cc0d0fd76a41c3f7460964644224e0173827614e | 3653a5e85dca41ca724b03c83fad08e92c433244 | /GoogleVis.R | 361aecf1862e57fab41ade0692abaef0931705f0 | [] | no_license | ChanningC12/Developing-Data-Products | 3742e035ddd05403673c4e329cb44ebe175422ec | a3165c3ebf12a6bbd8e207dafbded7c216dfffad | refs/heads/master | 2020-07-02T09:53:23.507313 | 2016-11-21T01:44:09 | 2016-11-21T01:44:09 | 74,312,179 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,267 | r | GoogleVis.R | # Google Vis API
install.packages("googleVis")
library(googleVis)
## Example
suppressPackageStartupMessages(library(googleVis))
# assign gvis chart to Fruit
M = gvisMotionChart(Fruits,"Fruit","Year",options=list(width=600,height=400))
plot(M)
print(M,"chart") # give you the relevant html page
# Motion chart: givsMotionChart
# Interactive maps: givsGeochart
# Interactive tables: gvisTable
# Line charts: gvisLineChart
# Bar charts: gvisColumnChart
# Tree maps: gvisTreeMap
G = gvisGeoChart(Exports, locationvar="Country",colorvar = "Profit", options=list(width=600, height=400))
plot(G)
# merge multiple plots
T1 = gvisTable(Exports, options=list(width=200,height=270))
GT = gvisMerge(G,T1,horizontal=F)
plot(GT)
GTM = gvisMerge(GT,M,horizontal = T)
plot(GTM)
##### Exports example. specify a region, region = 150 will zoom in WE region
G2 = gvisGeoChart(Exports, locationvar="Country",colorvar = "Profit",
options=list(width=600, height=400, region="150"))
plot(G2)
##### Line chart
df = data.frame(label=c("Full Pay","RBO","NFO"), val1=c(0.85,0.10,0.05), val2=c(1000,150,100))
Line = gvisLineChart(df, xvar="label",yvar=c("val1","val2"),
options = list(title="Genworth", legend="bottom"))
plot(Line)
|
c1dbe21298fbc2014a26a1e08af214c12d1eeb02 | 1259fcec2ee9fc09eefdac558358a2e202cc32f2 | /testing-scripts/case_study_data_munging.R | 27fd73864a3ea97d0165c2c883a7a8eb65e7b43b | [
"MIT"
] | permissive | bertrand-lab/cobia | 912a6f75966396a84e321da8744eb7fd5303ce21 | c9170b662c9ee3d2cac66cb4effdf9ad74af5a84 | refs/heads/master | 2021-06-24T19:30:23.483139 | 2020-11-24T22:27:08 | 2020-11-24T22:27:08 | 154,718,223 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 10,712 | r | case_study_data_munging.R | library(ggplot2)
library(dplyr)
library(readxl)
library(seqinr)
library(reshape2)
library(cleaver)
library(Peptides)
library(Biostrings)
countCharOccurrences <- function(char, s) {
s2 <- gsub(char,"",s)
return (nchar(s) - nchar(s2))
}
'%!in%' <- function(x,y)!('%in%'(x,y))
# read in file with ORF ids and annotations
# annot_contigs <- read_excel("data/bertrand_tfg_data/annotation_allTFG.mmetsp_fc_pn_reclassified.edgeR.xlsx")
annot_contigs <- read_excel("data/bertrand_data/antarctica_2013_MCM_FeVit_annotations.xlsx", skip = 1)
vit_keywords <- c('vitamin-B12 independent', 'Cobalamin-independent')
all_tryptic_peps <- read.table(file = 'data/bertrand_data/orfs.filtered.pep.trypsin_wcontigs.txt', sep = ',')
get_contigs <- function(key_word, annotation_file){
if(is.character(key_word[1]) != TRUE ){
stop
}
orf_list_dups <- vector()
for(i in 1:length(key_word)){
annot_contigs2_orfs_kegg <- annot_contigs[grepl(pattern = key_word[i], x = annot_contigs$kegg_desc),]$orf_id
annot_contigs2_orfs_kog <- annot_contigs[grepl(pattern = key_word[i], x = annot_contigs$KOG_desc),]$orf_id
annot_contigs2_orfs_ko <- annot_contigs[grepl(pattern = key_word[i], x = annot_contigs$KO_desc),]$orf_id
annot_contigs2_orfs_all <- annot_contigs[grepl(pattern = key_word[i], x = annot_contigs$best_hit_annotation),]$orf_id
annot_contigs2_orfs_pfams <- annot_contigs[grepl(pattern = key_word[i], x = annot_contigs$PFams_desc),]$orf_id
all_orfs <- c(annot_contigs2_orfs_kegg, annot_contigs2_orfs_kog, annot_contigs2_orfs_ko, annot_contigs2_orfs_all, annot_contigs2_orfs_pfams) %>% unique()
orf_list_dups <- c(orf_list_dups, all_orfs)
}
all_orfs <- unique(orf_list_dups)
return(all_orfs)
}
get_peps <- function(contig_list, tryptic_peptide_file){
# of all the tryptic peptides, which peptides are in the sub group of tryptic peptides
target_tryp_peps <- tryptic_peptide_file[which(tryptic_peptide_file$V2 %in% contig_list), ]
nontarget_tryp_peps <- tryptic_peptide_file[which(tryptic_peptide_file$V2 %!in% contig_list), ]
proteotypic_informative_tryp_peps <- target_tryp_peps[target_tryp_peps$V1 %!in% nontarget_tryp_peps$V1,]$V1 %>% as.character()
proteotypic_informative_tryp_pep_df <- target_tryp_peps[target_tryp_peps$V1 %!in% nontarget_tryp_peps$V1,]
return_list <- list(proteotypic_informative_tryp_peps, proteotypic_informative_tryp_pep_df)
return(return_list)
}
write_targeted_cobia <- function(get_peps_out){
# write file for targeted cobia
pep_targeted <- get_peps_out
pep_targeted$mz_nomod <- mw(pep_targeted$pep_seq)/2
pep_targeted$len_prot <- nchar(pep_targeted$pep_seq %>% as.character())
pep_targeted$num_m <- str_count(pep_targeted$pep_seq %>% as.character(), pattern = "M")
pep_targeted$num_c <- str_count(pep_targeted$pep_seq %>% as.character(), pattern = "C")
pep_targeted$mz <- pep_targeted$mz_nomod + pep_targeted$num_c*28.5 + pep_targeted$num_m*8
pep_targeted2 <- pep_targeted %>% filter(mz < 2000, len_prot > 4)
return(pep_targeted2)
}
get_tax_specific_peps <- function(contig_annot_file, proteotypic_peps, taxonomy_id){
# names(proteotypic_informative_tryp_pep_df) <- c('pep_seq', 'orf_id')
good_peptide_candidates <- inner_join(proteotypic_peps, contig_annot_file[, c(1:37)], by = 'orf_id')
# subset good peptide candidates by taxonomy
tax_specific_peps <- good_peptide_candidates[good_peptide_candidates$best_LPI_species == taxonomy_id, ]$pep_seq %>% as.character()
# tax specific proteins
not_tax_specific_peps <- good_peptide_candidates[good_peptide_candidates$best_LPI_species != taxonomy_id, ]$pep_seq %>% as.character()
good_tax_peps <- tax_specific_peps[tax_specific_peps %!in% not_tax_specific_peps] %>% as.character()
return(good_tax_peps)
}
find_tax_peps <- function(tryptic_peptide_file, key_word, annotation_file, target_tax){
# tryptic_peptide_file <- all_tryptic_peps
# key_word <- vit_keywords
# annotation_file <- annot_contigs
# target_tax <- "Fragilariopsis cylindrus"
target_contigs <- get_contigs(key_word = key_word, annotation_file = annotation_file)
candidate_peps <- get_peps(contig_list = target_contigs,
tryptic_peptide_file = tryptic_peptide_file)
names(candidate_peps[[2]]) <- c('pep_seq', 'orf_id')
tax_specific_peps <- get_tax_specific_peps(contig_annot_file = annotation_file,
proteotypic_peps = candidate_peps[[2]],
taxonomy_id = target_tax)
return(tax_specific_peps)
}
good_frag_peps <- find_tax_peps(tryptic_peptide_file = all_tryptic_peps,
key_word = vit_keywords,
annotation_file = annot_contigs,
target_tax = "Fragilariopsis cylindrus")
write.fasta(as.list(good_frag_peps), names = seq(from = 1, to = length(good_frag_peps)), file.out = 'data/bertrand_data/good_frag_peps.fasta')
write.csv(data.frame(pep_seq = good_frag_peps),
row.names = FALSE,
file = "data/bertrand_data/frag_cyl_peps_metE.csv")
# reading in cofragmentation data
targ <- read.csv("data/bertrand_data/orfs.filtered.pep.trypsin_targeted_frag_cyl_metE_mi-0.00833333_ipw-0.725_para-15_co-sim.csv")
# joining target with cofragmentation scores
consequence_file <- read.csv("data/bertrand_data/output2018_11_14_18_50_50_499.csv")
targ_con <- inner_join(x = targ, y = consequence_file, by = c("pep_seq" = "Peptide"))
final_pep_file <- data.frame(Peptide = targ_con$pep_seq, `Cofragmentation Score` = targ_con$mean_cofrag_score, `CONSeQuence Score` = targ_con$CONS)
# three peptides had a CONSeQuence score of 0, so they are not actually within the con file. They are "LLPLYK" "DEFISK" "FVGADK". DEFISK was not found in all Frag genomes, so it;s removed. The others are manually added in.
additional_peps <- data.frame(Peptide = c("LLPLYK", "FVGADK"), `Cofragmentation Score` = c(13.55429, 195.13714), `CONSeQuence Score` = c(0, 0))
final_pep_file_appened <- rbind(final_pep_file, additional_peps)
# removing peptides that were not found in Fragilariopsis genomes from Mock et al (manually searched for each peptide)
bad_peptides <- c('EIQIHEPALVFDESSK', 'SPANLTDYLANVK', 'IDSIPVGEHFYYDGVLSWAEWLGIVPK')
final_table_for_paper <- final_pep_file_appened %>% filter(Peptide %!in% bad_peptides)
write.csv(final_table_for_paper, file = 'data/bertrand_data/frag_cyl_metE_peps_consequence.csv')
# subset the CONSEQUENCE scores of four
really_good_peps <- c("HSTFAQTEGSIDVQR", "AQAVEELGWSLQLADDK", "WFTTNYHYLPSEVDTK")
pep_lc_file <- read.csv("data/bertrand_data/orfs.filtered.pep.trypsin_lc-retention-times.csv")
dda_params_file <- read.csv("data/broberg_data/dda_params_broberg.csv")
# look for peptides of similar mass and retention time as above
cofrag_buddies <- function(pep_seq_cofrag, lc_file, dda_params_file){
# lc_file <- pep_lc_file
# pep_seq_cofrag <- "HSTFAQTEGSIDVQR"
rt_pep <- lc_file[lc_file$peptide_sequence == paste0(pep_seq_cofrag, '-OH'), ]$rts
rt_upper_bound <- rt_pep + dda_params_file[1, c('ion_peak_width')]
rt_lower_bound <- rt_pep - dda_params_file[1, c('ion_peak_width')]
mz_pep <- lc_file[lc_file$peptide_sequence == paste0(pep_seq_cofrag, '-OH'), ]$mass/2
mz_upper_pep <- mz_pep + 0.5*dda_params_file[1, c('precursor_selection_window')]
mz_lower_pep <- mz_pep - 0.5*dda_params_file[1, c('precursor_selection_window')]
lc_file$mz <- lc_file$mass/2
other_peps <- lc_file %>% dplyr::filter(rts > rt_lower_bound,
rts < rt_upper_bound,
mz > mz_lower_pep,
mz < mz_upper_pep)
other_peps_seqs_unique <- unique(other_peps$peptide_sequence) %>% as.character()
other_peps_seqs <- other_peps$peptide_sequence %>% as.character()
other_peps_contigs_unique <- unique(other_peps$contig) %>% as.character()
other_peps_contigs <- other_peps$contig %>% as.character()
finale_list <- list(other_peps_seqs, other_peps_contigs, other_peps_seqs_unique, other_peps_contigs_unique)
return(finale_list)
}
test <- cofrag_buddies(pep_seq_cofrag = 'HSTFAQTEGSIDVQR',
lc_file = pep_lc_file,
dda_params_file = dda_params_file)
cofrag_buddies(pep_seq_cofrag = 'AQAVEELGWSLQLADDK',
lc_file = pep_lc_file,
dda_params_file = dda_params_file)
cofrag_buddies(pep_seq_cofrag = 'WFTTNYHYLPSEVDTK',
lc_file = pep_lc_file,
dda_params_file = dda_params_file)
cofrag_buddy_annot <- function(cofrag_buddy_output, annot_file){
# cofrag_buddy_output <- test
# annot_file <- annot_contigs
cofrag_buddy_output_peps <- data.frame(pep_seq = cofrag_buddy_output[[1]], orf_id = cofrag_buddy_output[[2]])
annot_sub <- annot_file[annot_file$orf_id %in% cofrag_buddy_output_peps[[2]], c('best_hit_annotation',
'kegg_desc',
'KOG_desc',
'KO_desc',
'best_LPI_species', 'orf_id')]
annot_sub_finale <- inner_join(cofrag_buddy_output_peps, annot_sub, by = 'orf_id')
# finale_df <- cbind(cofrag_buddy_output[[1]],
# annot_sub$best_hit_annotation,
# annot_sub$kegg_desc,
# annot_sub$KOG_desc,
# annot_sub$KO_desc,
# annot_sub$best_LPI_species)
return(annot_sub_finale)
}
cofrag_proc <- function(pep_seq, lc_file_master = pep_lc_file, dda_params_file_master = dda_params_file, annot_file = annot_contigs){
# pep_seq <- 'HSTFAQTEGSIDVQR'
co_buddies <- cofrag_buddies(pep_seq_cofrag = pep_seq, lc_file = lc_file_master, dda_params_file = dda_params_file_master)
co_buddies_annot <- cofrag_buddy_annot(cofrag_buddy_output = co_buddies, annot_file = annot_file)
return(co_buddies_annot)
}
cofrag_proc(pep_seq = 'HSTFAQTEGSIDVQR')
# lots of unknown proteins. even three separately identified proteins from frag
cofrag_proc(pep_seq = 'AQAVEELGWSLQLADDK')[13,]
cofrag_proc(pep_seq = 'AQAVEELGWSLQLADDK')[7,]
cofrag_proc(pep_seq = 'AQAVEELGWSLQLADDK')[64,]
cofrag_proc(pep_seq = 'WFTTNYHYLPSEVDTK')[15,]
cofrag_proc(pep_seq = 'WFTTNYHYLPSEVDTK')[22,]
# determine which of the contigs are also in that bin
# figure out what they do biologically, and see what would happen if expression patterns changed
|
76dc90222ecee1ab436deb4d1eccd2d9e7c5c880 | 5febc1e3f2dd766ff664f8e0ae79002072359bde | /R/mgraph.r | 462dc9e3ec1a64ec4035d6515652e2042827a7ce | [
"MIT"
] | permissive | tanaylab/metacell | 0eff965982c9dcf27d545b4097e413c8f3ae051c | ff482b0827cc48e5a7ddfb9c48d6c6417f438031 | refs/heads/master | 2023-08-04T05:16:09.473351 | 2023-07-25T13:37:46 | 2023-07-25T13:37:46 | 196,806,305 | 89 | 30 | NOASSERTION | 2023-07-25T13:38:07 | 2019-07-14T07:20:34 | R | UTF-8 | R | false | false | 1,480 | r | mgraph.r | #' manifold graph structure over a metacell object
#'
#' Splitting metacells over a discrete time axis, defining manifold connections and estimated flows over them
#'
#' @slot mc_id id of the metacell object we represent as a network
#' @slot times_nms names of the time points (Default 1:T)
#' @slot mc_t distribution of metacells (rows) over time points (cols)
#' @slot mc_manifold a data frame defining triplets mc1, mc2, distance.
#'
#' @export tgMCManifGraph
#' @exportClass tgMCManifGraph
tgMCManifGraph <- setClass(
"tgMCManifGraph",
slots = c(
mc_id = "character",
mgraph = "data.frame"
)
)
#' Construct a meta cell manifold graph
#'
#'
#' @param mc_id metacell object id
#' @param mgraph data fra,e defining mc1, mc2, distance
#' @export
setMethod(
"initialize",
signature = "tgMCManifGraph",
definition =
function(.Object, mc_id, mgraph) {
.Object@mc_id = mc_id
.Object@mgraph = mgraph
mc = scdb_mc(mc_id)
if(is.null(mc)) {
stop("MC-ERR unkown mc_id ", mc_id, " when building mc mgraph")
}
return(.Object)
}
)
#' Generate a new metacell manifold graph object
#'
#' This constructs a meta cell manifold graph object - only encapsulating an edge list data frame
#'
#' @param mc_id id of scdb meta cell object ot be added
#' @param mgraph the mgraph data frame containing fields mc1, mc2, distance
#' @export
mcell_new_mc_mgraph = function(mgraph_id, mc_id, mgraph)
{
scdb_add_mc(mgraph_id, tgMCManifGraph(mc_id, mgraph))
}
|
fe202d2a5e3a0b3ad1cffb2e4685833651c597ab | 0f104ea64886750d6c5f7051810b4ee39fa91ba9 | /man/redcap_variables.Rd | 1dd8c30e85e6cfe2abc74b620af6dba0aec641cf | [
"MIT"
] | permissive | OuhscBbmc/REDCapR | 3ca0c106e93b14d55e2c3e678f7178f0e925a83a | 34f2154852fb52fb99bccd8e8295df8171eb1c18 | refs/heads/main | 2023-07-24T02:44:12.211484 | 2023-07-15T23:03:31 | 2023-07-15T23:03:31 | 14,738,204 | 108 | 43 | NOASSERTION | 2023-09-04T23:07:30 | 2013-11-27T05:27:58 | R | UTF-8 | R | false | true | 2,943 | rd | redcap_variables.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/redcap-variables.R
\name{redcap_variables}
\alias{redcap_variables}
\title{Enumerate the exported variables}
\usage{
redcap_variables(
redcap_uri,
token,
verbose = TRUE,
config_options = NULL,
handle_httr = NULL
)
}
\arguments{
\item{redcap_uri}{The
\href{https://en.wikipedia.org/wiki/Uniform_Resource_Identifier}{uri}/url
of the REDCap server
typically formatted as "https://server.org/apps/redcap/api/".
Required.}
\item{token}{The user-specific string that serves as the password for a
project. Required.}
\item{verbose}{A boolean value indicating if \code{message}s should be printed
to the R console during the operation. The verbose output might contain
sensitive information (\emph{e.g.} PHI), so turn this off if the output might
be visible somewhere public. Optional.}
\item{config_options}{A list of options passed to \code{\link[httr:POST]{httr::POST()}}.
See details at \code{\link[httr:httr_options]{httr::httr_options()}}. Optional.}
\item{handle_httr}{The value passed to the \code{handle} parameter of
\code{\link[httr:POST]{httr::POST()}}.
This is useful for only unconventional authentication approaches. It
should be \code{NULL} for most institutions. Optional.}
}
\value{
Currently, a list is returned with the following elements,
\itemize{
\item \code{data}: A \code{\link[tibble:tibble]{tibble::tibble()}} where each row represents one column
in the REDCap dataset.
\item \code{success}: A boolean value indicating if the operation was apparently
successful.
\item \code{status_code}: The
\href{https://en.wikipedia.org/wiki/List_of_HTTP_status_codes}{http status code}
of the operation.
\item \code{outcome_message}: A human readable string indicating the operation's
outcome.
\item \code{elapsed_seconds}: The duration of the function.
\item \code{raw_text}: If an operation is NOT successful, the text returned by
REDCap. If an operation is successful, the \code{raw_text} is returned as an
empty string to save RAM.
}
}
\description{
This function calls the 'exportFieldNames' function of the
REDCap API.
}
\details{
As of REDCap version 6.14.2, three variable types are \emph{not} returned in
this call: calculated, file, and descriptive. All variables returned are
writable/uploadable.
}
\examples{
\dontrun{
uri <- "https://bbmc.ouhsc.edu/redcap/api/"
token <- "9A81268476645C4E5F03428B8AC3AA7B"
ds_variable <- REDCapR::redcap_variables(redcap_uri=uri, token=token)$data
}
}
\references{
The official documentation can be found on the 'API Help Page'
and 'API Examples' pages on the REDCap wiki (\emph{i.e.},
https://community.projectredcap.org/articles/456/api-documentation.html and
https://community.projectredcap.org/articles/462/api-examples.html).
If you do not have an account for the wiki, please ask your campus REDCap
administrator to send you the static material.
}
\author{
Will Beasley
}
|
5e5a8b5de4ced9d8b450d6fa71365c6f74127add | e125045bcb852ee63d02515a0cfeee4d77a3a79d | /TimeSeriesJieYue.R | f9f634161073283bf2a1aebedc4b9f3ff13b6bf4 | [] | no_license | lchen22643/DataCleaningToolsR | 6af5c13d288963b8078969d9543b8bc3e7de33a2 | 3db03a6eb8b5ac4795b9ba42ba435fd4b433e4d4 | refs/heads/master | 2020-04-04T09:39:00.135247 | 2018-11-02T07:28:18 | 2018-11-02T07:28:18 | 155,826,174 | 0 | 0 | null | null | null | null | GB18030 | R | false | false | 4,688 | r | TimeSeriesJieYue.R | library(dplyr)
library(tidyr)
library(TTR)
library(forecast)
library(MatrixModels)
repay = readRDS(file="repay.rds")
repay$paytime=substr(repay$PAY_DATE,regexpr("[0-9]",repay$PAY_DATE),regexpr("\\s",repay$PAY_DATE))
#repay$repaytime=substr(repay$REPAY_DATE,regexpr("[0-9]",repay$REPAY_DATE),regexpr("\\s",repay$REPAY_DATE))
repay=select(repay,-UPDATE_TIME,-CREATE_TIME,-PAY_DATE)
x = na.omit(repay)
repay$paytime=as.Date(repay$paytime,format = "%Y/%m/%d")
repay1 = filter(repay,paytime<"2018/7/26" )
#行为数据
repay$qiancha = (repay$MUST_BASE+repay$MUST_INST)-(repay$REAL_BASE+repay$REAL_INST)#不还款的两种因素
laolai = filter(repay,(MUST_PENALTY!=0|MUST_DEFAULT!=0|qiancha!=0))
laolai=na.omit(laolai)
laolai%>%
group_by(paytime)%>%
arrange(paytime)%>%
summarise(money = sum(qian))->repaid
rep = filter(repaid,paytime>"2017/1/1")
plot(rep$paytime, rep$money, main = "TIme series",
xlab = "time",
ylab = "Count(missrepay)perday ",type="l")
#############################################################################
rep$yearmonth=strftime(rep$paytime, format = "%y-%m")
rep$monthDate=strftime(rep$paytime, format = "%d")
#############################################################################
rep%>%
arrange(desc(paytime))%>%
group_by(yearmonth)%>%
mutate(sumbymonth=sum(money))%>%
arrange(paytime)%>%
mutate(weight=money/sumbymonth)->ques
#############################################################################
rep0 = filter(rep,paytime>'2017/1/30')
repp = select(rep0,-paytime)
ques1 =spread(repp,monthDate,money)#这个是关于钱的时间序列
ques0=filter(ques,paytime>'2017/1/30')
repp1 = select(ques0,yearmonth,monthDate, weight)
#这个是关于每日还款占当月还款比例的时间序列,目的是把随着时间增长的趋势抹平
ques11 =spread(repp1,monthDate,weight)
names(ques11)
ques11[is.na(ques11)]<-0
ques1[is.na(ques1)]<-0
############time series######################################################
ts(ques$weight)
plot(ts(ques$money))
abline(lm(ts(ques$money)~time(ques$paytime)))
plot(ts(ques$weight))
abline(lm(ts(ques$weight)~time(ques$paytime)))
a = lm(ts(ques$money)~time(ques$paytime))
b=lm(ts(ques$weight)~time(ques$paytime))
summary(a)
summary(b)
#从这里可以看出,用比重在时间序列里做出的数据分析
############################################################################
ques[is.na(ques)]<-0
ques = filter(ques,paytime>'2017/1/30')
ques$yearmonth
#acf(tsSMA)
aa=ts(ques$weight,frequency=11)
auto.arima(aa,trace=T)
data.fit=arima(aa,order=c(3,0,1),seasonal=list(order=c(1,0,0),period=1),method="ML")
airforecast <- forecast::forecast(data.fit,h=13,level=c(.1))
airforecast
plot(airforecast)
#######################################################################noswat
#acf(tsSMA0)
aab=ts(ques$money,frequency=11)
auto.arima(aab,trace=T)
data.fit0=arima(aab,order=c(4,0,4),seasonal=list(order=c(1,0,1),period=11),method="ML")
??forecast.arima
airforecast0 <- forecast::forecast(data.fit0,h=11,level=c(0.1))
airforecast0
plot(airforecast0)
##############forecast validation/ and show the plot and details of transform
weighttrans=gather(ques11,`01`, `05`, `06` ,`08` ,`09`, `10`,`11`, `14`,`16`,`26`,`27`,`28`,key=monthDate,value = 'weight')
moneytrans=gather(ques1,`01`, `05`, `06` ,`08` ,`09`, `10`,`11`, `14`,`16`,`26`,`27`,`28`,key=monthDate,value = 'money')
weighttrans=arrange(weighttrans,yearmonth,monthDate)
moneytrans=arrange(moneytrans,yearmonth,monthDate)
trainw=weighttrans[1:168,]
testw=weighttrans[169:184,]
aab=ts(trainw$weight,frequency=12)
auto.arima(aab,trace=T)
data.fit0=arima(aab,order=c(1,0,0),seasonal=list(order=c(0,1,0),period=12),method="ML")
forecast0 <- forecast::forecast(data.fit0,h=16,level=c(0.1))
forecast0$mean
testw$weight
b= c(0.42307069 ,0.02126879, 0.04722411, 0.02137607 ,0.02988654 ,0.03889301, 0.03150717 ,0.00355773, 0.24555460, 0.03332303, 0.05149679,
0.05287017, 0.42307069, 0.02126879, 0.04722411, 0.02137607)
mae(b,testw$weight)
###############################
trainm=moneytrans[1:168,]
testm=moneytrans[169:184,]
XXB=ts(trainm$money,frequency=12)
auto.arima(XXB,trace=T)
data.fit1=arima(XXB,order=c(1,0,0),seasonal=list(order=c(0,1,0),period=12),method="ML")
??forecast.arima
forecast1 <- forecast::forecast(data.fit1,h=16,level=c(0.1))
summary = summary(forecast1)
a = summary$`Point Forecast`
a
testm$money
a<-c(5203118.1,261983.7,580230.3,262614.9,367157.9,477801.7,387066.4,43706.8,3016644.4,409374.2,632639.4,
649511.3,5203118.1,261983.7,580230.3,262614.9)
mae(a,testm$money)
forecastfuture <- forecast::forecast(data.fit1,h=24,level=c(0.1))
plot(forecastfuture)
|
357b1fd0fbfaf6efc991a3e54f76548c33cb09aa | f58a7f3646fbd25d0ef8ebecc7003c785e6e42da | /R/run_random_bias_experiments.R | 160a10c7abe1b9c7a81beda11d379dc2d27c529c | [] | no_license | schnee/big-data-big-math | 0bc01ee641e936f260ccf7ffad56d30bd3599a50 | 9d055dddf21eb7ce996857f6b6df4430f39d4b3d | refs/heads/master | 2023-02-09T10:54:37.192899 | 2023-02-02T15:47:45 | 2023-02-02T15:47:45 | 185,220,994 | 3 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,782 | r | run_random_bias_experiments.R |
library(keras)
library(ggplot2)
library(purrr)
library(readr)
library(tibble)
library(dplyr)
library(magrittr)
devtools::load_all(here::here("packages/testbench"))
mnist <- keras::dataset_fashion_mnist()
x_train <- mnist$train$x
y_train <- mnist$train$y
x_test <- mnist$test$x
y_test <- mnist$test$y
#damage_tib <- c(0.01) %>%
damage_tib <- c(0:9 / 100, 1:9 / 10, 91:99 / 100) %>%
sort() %>%
map_dfr(run_random_damage_exp, x_train, y_train, x_test, y_test)
damage_tib %>% write_csv("fashion-mnist-damage-results.csv")
damage_tib <- read_csv("fashion-mnist-damage-results.csv") %>%
mutate(unbiased = 1-frac)
ggplot(damage_tib, aes(x=unbiased, y=acc, color=exp_name)) +
geom_line(size=1) + geom_point(color="white", size = 0.2) +
ggthemes::scale_color_few("Model Type", palette = "Dark") +
ggthemes::theme_few() +
scale_x_continuous(labels = scales::percent) +
labs(
title = "Model Architectures and Random Bias",
subtitle = "Fashion MNIST Dataset",
x = "Correctly labeled training data\n(percent of 60,000 obs)",
y = "Accuracy (OVA)"
)
ggsave(filename=here::here("plot/acc-rand-bias.png"),
width = 16 * (1/3),
height = 9 * (1/3),
dpi = 300)
ggplot(damage_tib, aes(x=unbiased, y=auc, color=exp_name)) +
geom_line(size=1) + geom_point(color="white", size = 0.2) +
ggthemes::scale_color_few("Model Type", palette = "Dark") +
ggthemes::theme_few() +
scale_x_continuous(labels = scales::percent) +
labs(
title = "Model Architectures and Random Bias",
subtitle = "Fashion MNIST Dataset",
x = "Correctly labeled training data\n(percent of 60,000 obs)",
y = "AUC (OVA)"
)
ggsave(filename=here::here("plot/auc-rand-bias.png"),
width = 16 * (1/3),
height = 9 * (1/3),
dpi = 300)
|
8b831474891268d76acb4544f7f55617c2dc679d | 1700d8d60853c7ca7420bee4c5216c2dc379cd1c | /R/createGWCoGAPSSets.R | 69b4eb0b5418e5acf0d9877c6856668046f31bff | [] | no_license | genesofeve/GWCoGAPS | f7465cd97389e9b387c807a9209f756114881d7b | 5607ee8d104ec4fa453b255b8e98515fe05db1cc | refs/heads/master | 2021-01-20T16:44:44.424777 | 2016-10-04T15:36:58 | 2016-10-04T15:36:58 | 67,257,211 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,196 | r | createGWCoGAPSSets.R | #' createGWCoGAPSSets
#'
#'\code{createGWCoGAPSSets} factors whole genome data into randomly generated sets for indexing;
#'
#'@param D data matrix with unique rownames
#'@param nSets number of sets for parallelization
#'@param outRDA name of output file
#'@param keep logical indicating whether or not to save gene set list. Default is TRUE.
#'@export
#'@return list with randomly generated sets of genes from whole genome data
#'@examples \dontrun{
#'createGWCoGAPSSet(D,nSets=nSets)
#'}
#'
createGWCoGAPSSets<-function(data=D, #data matrix with unique rownames
nSets=nSets, #number of sets for parallelization
outRDA="GenesInCoGAPSSets.Rda", #name of output file
keep=TRUE #logical indicating whether or not to save gene set list. Default is TRUE.
){
genes=rownames(data)
setSize=floor(length(genes)/nSets)
genesInSets <- list()
for (set in 1:nSets) {
if(set!=nSets){genesInSets[[set]] <- sample(genes,setSize)}
if(set==nSets){genesInSets[[set]] <- genes}
genes=genes[!genes%in%genesInSets[[set]]]
}
if(!identical(sort(unlist(genesInSets)),sort(rownames(data)))){print("Gene identifiers not unique!")}
if(keep==TRUE){save(list=c('genesInSets'),file=outRDA)}
return(genesInSets)
}
|
bc617ce89101f0c0824bd19b186d42d922a03bf4 | 552ef1b37b1689c0347071a4ac10f542cb47543f | /R/plot_ranked_facs.R | f9ef916f42ec767c84b3294003544e30770ff724 | [] | no_license | lhenneman/hyspdisp | d49fb29a3944ca0c50398c70ff21459fee247358 | 1763245269211f48da803d282720e6d818a2e619 | refs/heads/master | 2021-05-05T06:37:39.251922 | 2019-10-16T19:41:36 | 2019-10-16T19:41:36 | 118,811,581 | 5 | 3 | null | 2019-06-05T13:45:14 | 2018-01-24T19:27:40 | R | UTF-8 | R | false | false | 3,945 | r | plot_ranked_facs.R | plot_ranked_facs <- function( ranks.dt,
size.var,
size.name,
size.legend.range = NULL,
plot.title = NULL,
xlims = NULL,
ylims = NULL,
dist.scalebar = 400){
# -- limit data table to units under the rank -- #
ranks.dt.trim <- copy( ranks.dt)
# -- set name of variable size variable -- #
setnames( ranks.dt.trim, size.var, 'size.var')
# -- link with PP data if not already -- #
if( !( 'Longitude' %in% names( ranks.dt.trim) & 'Latitude' %in% names( ranks.dt.trim)))
stop( "Latitude and Longitude must be included in ranks.dt")
# -- find lat/lon range -- #
if( is.null( xlims) & is.null( ylims)){
latlonrange <- data.table( xlim = c( min( ranks.dt.trim$Longitude) - .1,
max( ranks.dt.trim$Longitude) + .1),
ylim = c( min( ranks.dt.trim$Latitude - .5),
max( ranks.dt.trim$Latitude + .1)))
} else
latlonrange <- data.table( xlim = xlims,
ylim = ylims)
# -- find size legend range -- #
if( is.null( size.legend.range) ){
size.legend.range <- c( 0, signif( max( ranks.dt.trim$size.var), 2))
}
# -- download states -- #
states <- data.table( map_data("state"))
# -- make the plot -- #
gg_coal <- ggplot() +
theme_bw() +
labs(title = plot.title) +
geom_polygon(data = states,
aes(x = long, y = lat, group = group),
fill = 'white',
color = "black",
size = .25) +
coord_sf(
xlim = latlonrange$xlim,
ylim = latlonrange$ylim,
datum = NA
) +
geom_point(data = ranks.dt.trim,
aes(x = Longitude,
y = Latitude,
size = size.var),
color = '#479ddd') +
scale_size_area(guide = guide_legend(title.position = "top"),
name = size.name,
max_size = 5,
limits = size.legend.range,
oob = squish
) +
theme(
plot.title = element_text(size = 16, hjust = 0.5), #element_blank(), #
axis.title = element_text(size = 24),
axis.text = element_blank(),
axis.title.x = element_blank(),
axis.title.y = element_blank(),
legend.title = element_text(size = 10),
legend.title.align = 0.5,
legend.position = "bottom", #c(.22, .15),
legend.text = element_text(size = 8, angle = 0),
legend.background = element_rect(fill = 'transparent'),
legend.key.size = unit(.05, 'npc'),
legend.direction = 'horizontal',
# rect = element_blank(), #( fill = 'transparent'),
strip.text = element_text( size = 14),
strip.background = element_rect( fill = 'white')
) +
geom_rect( data = latlonrange,
aes(xmin = xlim[1] - 5,
xmax = xlim[1] + (xlim[2] - xlim[1]) / 2,
ymin = ylim[1] - 5,
ymax = ylim[1] + .5),
fill = 'white',
color = NA) +
ggsn::scalebar( location = 'bottomleft',
anchor = c( x = latlonrange$xlim[1] + .2, y = latlonrange$ylim[1] + .2),
x.min = latlonrange$xlim[1],
y.min = latlonrange$ylim[1],
x.max = latlonrange$xlim[2],
y.max = latlonrange$ylim[2],
dist = dist.scalebar / 2,
height = 0.02,
st.dist = 0.04,
st.size = 3,
dd2km = TRUE,
model = 'WGS84')
print( gg_coal)
return( list( plot = gg_coal,
latlonrange = copy( latlonrange)))
}
|
4ee0c464502a46ff3c8ff4295278968ceeb1c9f6 | 9e6a08f6dc509964994a90a7fef83b26959001be | /Code/feature_tree.R | 10ce32c3ba3d7cfc22f3a344fe9f934da48665b0 | [] | no_license | julianzaugg/mine_waste | d9b04c0ca5ba74f9018809a586b5ef07d7929734 | 257e8b0aca3b5206a8834057f09a7de414311666 | refs/heads/master | 2022-07-03T14:31:00.932032 | 2020-05-12T05:21:45 | 2020-05-12T05:21:45 | 206,080,124 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 26,318 | r | feature_tree.R | # Construct tree for abundant features and collapse
# See https://bioconductor.org/help/course-materials/2017/BioC2017/Day1/Workshops/Microbiome/MicrobiomeWorkflowII.html
# or paper "Bioconductor Workflow for Microbiome Data Analysis: from raw reads to community analyses"
detachAllPackages <- function() {
basic.packages <- c("package:stats","package:graphics","package:grDevices","package:utils","package:datasets","package:methods","package:base")
package.list <- search()[ifelse(unlist(gregexpr("package:",search()))==1,TRUE,FALSE)]
package.list <- setdiff(package.list,basic.packages)
if (length(package.list)>0) for (package in package.list) detach(package, character.only=TRUE)
}
detachAllPackages()
# library("knitr")
# library("BiocStyle")
.cran_packages <- c("ggplot2", "gridExtra")
.bioc_packages <- c("dada2", "phyloseq", "DECIPHER", "phangorn")
# .inst <- .cran_packages %in% installed.packages()
# if(any(!.inst)) {
# install.packages(.cran_packages[!.inst])
# }
# .inst <- .bioc_packages %in% installed.packages()
# if(any(!.inst)) {
# source("http://bioconductor.org/biocLite.R")
# biocLite(.bioc_packages[!.inst], ask = F)
# }
#
# sapply(c(.cran_packages, .bioc_packages), require, character.only = TRUE)
library(phyloseq)
library(phangorn)
library(DECIPHER)
library(dada2)
if (!requireNamespace("BiocManager", quietly = TRUE))
install.packages("BiocManager")
# devtools::install_github("GuangchuangYu/treeio")
# BiocManager::install("treeio")
library(treeio); packageVersion("treeio")
# devtools::install_github("GuangchuangYu/ggtree")
# BiocManager::install("ggtree")
library(ggtree); packageVersion("ggtree")
# install.packages("ips")
library(ips)
# Set the working directory
setwd("/Users/julianzaugg/Desktop/ACE/major_projects/mine_waste/analysis/")
source("Code/helper_functions.R")
# Load the processed metadata
metadata.df <- read.csv("Result_tables/combined/other/combined_processed_metadata.csv", sep =",", header = T)
# Remove unknown commodity samples from metadata
metadata.df <- subset(metadata.df, Commodity != "Unknown")
# Set the Index to be the rowname
rownames(metadata.df) <- metadata.df$Index
# Load the OTU - taxonomy mapping file
otu_taxonomy_map.df <- read.csv("Result_tables/combined/other/combined_otu_taxonomy_map.csv", header = T)
# Since it takes a long time to calculate, and since it was already calculated, load the sequences for most abundant features per sample across all projects
# This should be the unique set of sequences for the top 10 features by relative abundance for each sample across all projects
# In the following steps, we will filter these features further as we don't want to build a tree on all of them as
# many are going to be very low abundance, from Unknown commoditity and, if we decide to filter by region, from projects targetting different regions
seqs <- getSequences("Result_other/combined/sequences/combined_most_abundant_assigned_features.fasta")
# names(seqs) <- seqs
# Load all the OTU metadata + abundance data
otu_data.df <- read.csv("Result_tables/combined/combined_counts_abundances_and_metadata_tables/combined_OTU_counts_abundances_and_metadata.csv",header = T)
# And load the genus data. We load the genus data as we may want to filter to those features that are only in the most abundant genera.
# This may correspond to other results we have generated that are limited to the most abundant genera
genus_data.df <- read.csv("Result_tables/combined/combined_counts_abundances_and_metadata_tables/combined_Genus_counts_abundances_and_metadata.csv",header = T)
# Remove unknown commodities
otu_data.df <- subset(otu_data.df, Commodity != "Unknown")
genus_data.df <- subset(genus_data.df, Commodity != "Unknown")
# Summarise the genus data for each study_accession
genus_taxa_summary.df <- generate_taxa_summary(mydata = genus_data.df, taxa_column = "taxonomy_genus", group_by_columns = c("Commodity", "study_accession"))
# Get the top 10 genera for each study_accession
genus_taxa_summary_filtered.df <- filter_summary_to_top_n(taxa_summary = genus_taxa_summary.df, grouping_variables = c("Commodity", "study_accession"),
abundance_column = "Mean_relative_abundance", my_top_n = 10)
top_10_genera.df <- melt(unique(genus_taxa_summary_filtered.df$taxonomy_genus))
names(top_10_genera.df) <- "Genus"
top_10_genera.df$Genus_silva_format <- top_10_genera.df$Genus
top_10_genera.df$Genus_silva_format <- gsub("d__", "D_0__", top_10_genera.df$Genus_silva_format)
top_10_genera.df$Genus_silva_format <- gsub("p__", "D_1__", top_10_genera.df$Genus_silva_format)
top_10_genera.df$Genus_silva_format <- gsub("c__", "D_2__", top_10_genera.df$Genus_silva_format)
top_10_genera.df$Genus_silva_format <- gsub("o__", "D_3__", top_10_genera.df$Genus_silva_format)
top_10_genera.df$Genus_silva_format <- gsub("f__", "D_4__", top_10_genera.df$Genus_silva_format)
top_10_genera.df$Genus_silva_format <- gsub("g__", "D_5__", top_10_genera.df$Genus_silva_format)
write.csv(top_10_genera.df,
file = "Result_tables/combined/other/combined_study_accession_top_10_genera.csv",
row.names = F,
quote = F)
# ------------------------------------------------------------------------------------------------------------------------------------------------
# ------------------------------------------------------------------------------------------------------------------------------------------------
## Generate iTOL table for tree visualisation for genera
# # TODO - make a separate script to do this for the silva entries rather than here.
# # This is because leaf nodes will be the silva IDs rather than genera (ideally unique)
#
# # Genus colour_for_each_commodity
# tree_summary_table.df <- subset(genus_data.df, taxonomy_genus %in% genus_taxa_summary_filtered.df$taxonomy_genus)
#
# # Since projects are processed separately, colours are not in the abundance + metadata table. They need to be added back.
# # To do this, we need to collect the samples that each OTU.ID is found in and the corresponding Commodities etc.
#
# tree_summary_table.df <- unique(tree_summary_table.df[c("Domain", "Phylum", "Class", "Order", "Family", "Genus",
# "taxonomy_phylum", "taxonomy_class", "taxonomy_order", "taxonomy_family", "taxonomy_genus",
# "Commodity", "Sample_type", "Sample_treatment")])
#
# # process commodity
# otu_commodity.df <- df2matrix(dcast(data = tree_summary_table.df, OTU.ID~Commodity,fill = 0))
# for (name in colnames(otu_commodity.df)){
# assigned_colour <- as.character(subset(unique(metadata.df[c("Commodity", "Commodity_colour")]), Commodity == name)$Commodity_colour)
# otu_commodity.df[,name][otu_commodity.df[,name] > 0] <- assigned_colour
# otu_commodity.df[,name][otu_commodity.df[,name] == 0] <- "#ffffff"
# }
#
# ------------------------------------------------------------------------------------------------------------------------------------------------
# ------------------------------------------------------------------------------------------------------------------------------------------------
dim(otu_data.df)
# Filter the feature data to those features that are in the most abundant genera for each study_accession
otu_data_top_features.df <- subset(otu_data.df, taxonomy_genus %in% unique(genus_taxa_summary_filtered.df$taxonomy_genus))
dim(otu_data_top_features.df)
# Filter the feature data to those features that are at least 0.1% abundance
otu_data_top_features.df <- subset(otu_data_top_features.df, Relative_abundance >= 0.001)
dim(otu_data_top_features.df)
summary(unique(genus_taxa_summary_filtered.df$taxonomy_genus) %in% unique(otu_data_top_features.df$taxonomy_genus))
# Filter the feature data to those most abundant features that remain
# otu_data_top_features.df <- subset(otu_data_top_features.df, OTU.ID %in% names(seqs))
# dim(otu_data_top_features.df)
# (optional) Filter to features that are only in projects that targetted just the V4 region
# otu_data_top_features.df <- subset(otu_data_top_features.df, Final_16S_region == "V4")
# Calculate the prevalence of the remaining features in the full data (prior to filtering)
N_samples_per_project <- otu_data.df %>% group_by(study_accession) %>% summarise(N_samples = n_distinct(Sample))
N_samples_per_feature <- otu_data_top_features.df %>% group_by(study_accession, OTU.ID) %>% summarise(In_N_samples = n_distinct(Sample))
# head(N_samples_per_feature)
# head(N_samples_per_project)
prevelances.df <- left_join(N_samples_per_feature, N_samples_per_project, by = "study_accession")
prevelances.df$Prevalence <- with(prevelances.df, In_N_samples/N_samples)
# subset(otu_data.df, OTU.ID == "9016f374255e870578c2fbb416ac42e6")
# unique(subset(otu_data.df, study_accession == "PRJNA339895")$Sample)
# subset(prevelances.df, OTU.ID == "9016f374255e870578c2fbb416ac42e6")
summary(unique(genus_taxa_summary_filtered.df$taxonomy_genus) %in% unique(otu_data_top_features.df$taxonomy_genus))
# Filter to those features that are in at least 20% of samples for a study_accession
otu_data_top_features.df <- otu_data_top_features.df %>% filter(OTU.ID %in% unique(prevelances.df[prevelances.df$Prevalence >= 0.2,]$OTU.ID))
# A number of the most abundant genera will likely no longer be represented by the remaining features at this point
# This is primarily due to the filtering by : sequenced region, features in the top 10 per sample
summary(unique(genus_taxa_summary_filtered.df$taxonomy_genus) %in% unique(otu_data_top_features.df$taxonomy_genus))
length(unique(otu_data_top_features.df[unique(otu_data_top_features.df$taxonomy_genus) %in% unique(genus_taxa_summary_filtered.df$taxonomy_genus),]$taxonomy_family))
length(unique(otu_data_top_features.df[unique(otu_data_top_features.df$taxonomy_genus) %in% unique(genus_taxa_summary_filtered.df$taxonomy_genus),]$taxonomy_order))
length(unique(otu_data_top_features.df[unique(otu_data_top_features.df$taxonomy_genus) %in% unique(genus_taxa_summary_filtered.df$taxonomy_genus),]$taxonomy_class))
length(unique(otu_data_top_features.df[unique(otu_data_top_features.df$taxonomy_genus) %in% unique(genus_taxa_summary_filtered.df$taxonomy_genus),]$taxonomy_phylum))
write.csv(otu_data_top_features.df, "Result_tables/combined/other/combined_otu_metadata_for_tree.csv",row.names = F, quote = F)
# Filter the sequences to the final list of features
seqs_filtered <- seqs[names(seqs) %in% unique(otu_data_top_features.df$OTU.ID)]
print(paste0("Number of feature sequences remaining from top-per-sample set: ", length(seqs_filtered), "/", length(seqs)))
# Write the filtered feature sequences to file
writeXStringSet(DNAStringSet(seqs_filtered), file="Result_other/combined/sequences/combined_most_abundant_assigned_features_filtered.fasta")
# ------------------------------------------------------------------------------------------------------------------------------------------------
# ------------------------------------------------------------------------------------------------------------------------------------------------
## Generate iTOL table for tree visualisation for features
tree_summary_table.df <- subset(otu_data_top_features.df, OTU.ID %in% names(seqs_filtered))
# Since projects are processed separately, colours are not in the abundance + metadata table. They need to be added back.
# To do this, we need to collect the samples that each OTU.ID is found in and the corresponding Commodities etc.
tree_summary_table.df <- unique(tree_summary_table.df[c("OTU.ID", "Domain", "Phylum", "Class", "Order", "Family", "Genus", "Species",
"taxonomy_phylum", "taxonomy_class", "taxonomy_order", "taxonomy_family", "taxonomy_genus", "taxonomy_species",
"Commodity", "Sample_type", "Sample_treatment")])
# process commodity
otu_commodity.df <- df2matrix(dcast(data = tree_summary_table.df, OTU.ID~Commodity,fill = 0))
for (name in colnames(otu_commodity.df)){
assigned_colour <- as.character(subset(unique(metadata.df[c("Commodity", "Commodity_colour")]), Commodity == name)$Commodity_colour)
otu_commodity.df[,name][otu_commodity.df[,name] > 0] <- assigned_colour
otu_commodity.df[,name][otu_commodity.df[,name] == 0] <- "#ffffff"
}
otu_commodity.df <- m2df(otu_commodity.df, "OTU.ID")
# process sample type
otu_sample_type.df <- df2matrix(dcast(data = tree_summary_table.df, OTU.ID~Sample_type,fill = 0))
for (name in colnames(otu_sample_type.df)){
assigned_colour <- as.character(subset(unique(metadata.df[c("Sample_type", "Sample_type_colour")]), Sample_type == name)$Sample_type_colour)
otu_sample_type.df[,name][otu_sample_type.df[,name] > 0] <- assigned_colour
otu_sample_type.df[,name][otu_sample_type.df[,name] == 0] <- "#ffffff"
}
otu_sample_type.df <- m2df(otu_sample_type.df, "OTU.ID")
# process sample treatment
otu_sample_treatment.df <- df2matrix(dcast(data = tree_summary_table.df, OTU.ID~Sample_treatment,fill = 0))
for (name in colnames(otu_sample_treatment.df)){
assigned_colour <- as.character(subset(unique(metadata.df[c("Sample_treatment", "Sample_treatment_colour")]), Sample_treatment == name)$Sample_treatment_colour)
otu_sample_treatment.df[,name][otu_sample_treatment.df[,name] > 0] <- assigned_colour
otu_sample_treatment.df[,name][otu_sample_treatment.df[,name] == 0] <- "#ffffff"
}
otu_sample_treatment.df <- m2df(otu_sample_treatment.df, "OTU.ID")
# Merge all together
itol_data.df <- left_join(left_join(otu_commodity.df, otu_sample_type.df, by = "OTU.ID"), otu_sample_treatment.df, by = "OTU.ID")
# Add taxonomy data
temp <- unique(tree_summary_table.df[,!names(tree_summary_table.df) %in% c("Commodity", "Sample_treatment", "Sample_type")])
itol_data.df <- left_join(itol_data.df, temp, by = "OTU.ID")
# Create additional labels
# itol_data.df$Label <-
# Assign colours for each taxa level
my_colour_palette_15 <- c("#77b642","#7166d9","#cfa240","#b351bb","#4fac7f","#d44891","#79843a","#c68ad4","#d15a2c","#5ba7d9","#ce4355","#6570ba","#b67249","#9b4a6f","#df8398")
domain_palette <- setNames(colorRampPalette(my_colour_palette_15)(length(unique(itol_data.df$Domain))), unique(itol_data.df$Domain))
phylum_palette <- setNames(colorRampPalette(my_colour_palette_15)(length(unique(itol_data.df$taxonomy_phylum))), unique(itol_data.df$taxonomy_phylum))
class_palette <- setNames(colorRampPalette(my_colour_palette_15)(length(unique(itol_data.df$taxonomy_class))), unique(itol_data.df$taxonomy_class))
order_palette <- setNames(colorRampPalette(my_colour_palette_15)(length(unique(itol_data.df$taxonomy_order))), unique(itol_data.df$taxonomy_order))
family_palette <- setNames(colorRampPalette(my_colour_palette_15)(length(unique(itol_data.df$taxonomy_family))), unique(itol_data.df$taxonomy_family))
genus_palette <- setNames(colorRampPalette(my_colour_palette_15)(length(unique(itol_data.df$taxonomy_genus))), unique(itol_data.df$taxonomy_genus))
itol_data.df$Domain_colour <- as.character(lapply(as.character(itol_data.df$Domain), function(x) as.character(domain_palette[x])))
itol_data.df$Phylum_colour <- as.character(lapply(as.character(itol_data.df$taxonomy_phylum), function(x) as.character(phylum_palette[x])))
itol_data.df$Class_colour <- as.character(lapply(as.character(itol_data.df$taxonomy_class), function(x) as.character(class_palette[x])))
itol_data.df$Order_colour <- as.character(lapply(as.character(itol_data.df$taxonomy_order), function(x) as.character(order_palette[x])))
itol_data.df$Family_colour <- as.character(lapply(as.character(itol_data.df$taxonomy_family), function(x) as.character(family_palette[x])))
itol_data.df$Genus_colour <- as.character(lapply(as.character(itol_data.df$taxonomy_genus), function(x) as.character(genus_palette[x])))
write.csv(itol_data.df, "Result_tables/combined/other/itol_metadata.csv", quote = F, row.names = F)
# ------------------------------------------------------------------------------------------------------------------------------------------------
# ------------------------------------------------------------------------------------------------------------------------------------------------
# Align the feature sequences using the DECIPHER aligner
# Note - I have tried this and the alignment with default parameters was terrible (tree had long branches for some clades)
# alignment <- AlignSeqs(DNAStringSet(seqs_filtered), anchor=NA)
# Write the aligned feature sequences to file
# writeXStringSet(alignment, file="Result_other/combined/sequences/combined_most_abundant_assigned_features_filtered_aligned.fasta")
# alignment <- DNAStringSet(readDNAMultipleAlignment("Result_other/combined/sequences/combined_most_abundant_assigned_features_filtered_aligned.fasta"))
# alignment <- DNAStringSet(readDNAMultipleAlignment("Result_other/combined/sequences/combined_most_abundant_assigned_features_filtered_aligned_MAFFT.fasta"))
# ------------------------------------------------------------------------------------------------------------------------
# ------------------------------------------------------------------------------------------------------------------------
# Build tree with RAxML, also slow
# Requires input in different format, hence read.dna
# rax_alignment <- read.dna("Result_other/combined/sequences/combined_most_abundant_assigned_features_filtered_aligned.fasta",format="fasta",as.matrix=TRUE)
# alignment.rax.gtr <- raxml(rax_alignment,
# m="GTRGAMMAIX", # model
# f="a", # best tree and bootstrap
# p=1234, # random number seed
# x=2345, # random seed for rapid bootstrapping
# N=100, # number of bootstrap replicates
# file="alignment", # name of output files
# #exec="raxmlHPC-PTHREADS-SSE3", # name of executable
# exec = "/Applications/miniconda3/envs/raxml_8.2.12/bin/raxmlHPC-PTHREADS-SSE3",
# threads=2
# )
# Align with phangorn (can be slower) if optimising with optim.pml, e.g. ~3-4 hours for ~3500 features !
# phangAlign <- phyDat(as(alignment, "matrix"), type="DNA")
# dm <- dist.ml(phangAlign)
# treeNJ <- NJ(dm) # Note, tip order != sequence order
# fit = pml(treeNJ, data=phangAlign)
# fitGTR <- update(fit, k=4, inv=0.2)
# write.tree(fitGTR$tree, file = "Result_other/combined/trees/fitted_GTR.newick")
# write.tree(fitGTR$tree, file = "Result_other/combined/trees/fitted_GTR_MAFFT.newick")
# fitGTR <- optim.pml(fitGTR, model="GTR", optInv=TRUE, optGamma=TRUE,
# rearrangement = "stochastic", control = pml.control(trace = 0))
# write.tree(fitGTR$tree, file = "Result_other/combined/trees/fitted_GTR_optim_pml.newick")
detach("package:phangorn", unload=TRUE)
# Load alignment built externally
alignment <- DNAStringSet(readDNAStringSet("Additional_results/sina_aligner/sina_alignment_cleaned_man_filtered.fasta",format = "fasta"))
# Load tree built externally
# mytree <- read_tree("Additional_results/raxml/RAxML_bestTree.alignment")
mytree <- read_tree("Additional_results/sina_aligner/sina_tree_10col_10seq.newick")
write.tree(ladderize(mytree_relabeled),file = "Additional_results/sina_aligner/sina_tree_10col_10seq_ladderized.newick")
# Now that we have the tree, we want to collapse the tips to the genus level
# phyloseq has a function, tax_glom, that can do this.
# First we need to create a phyloseq object, which requires the following:
# otu_table - matrix, features can be either rows or columns (needs to be specified)
# sample_data - data.frame, rownames are the sample names in the otu_table
# tax_table - matrix, rownames must match the OTU names
# tree
# Create the OTU table
my_otu_data.m <- subset(otu_data.df[c("OTU.ID","Sample", "Relative_abundance")], OTU.ID %in% names(seqs_filtered))
my_otu_data.m <- my_otu_data.m %>% spread(Sample,Relative_abundance,fill = 0)
my_otu_data.m <- df2matrix(my_otu_data.m)
# Sample data table
my_sample_data.df <- metadata.df[colnames(my_otu_data.m),]
# Taxonomy table
my_tax_data.m <- subset(otu_taxonomy_map.df, OTU.ID %in% names(seqs_filtered))[c("Domain", "Phylum", "Class", "Order", "Family", "Genus", "Species", "taxonomy_phylum", "taxonomy_class", "taxonomy_order", "taxonomy_family", "taxonomy_genus", "taxonomy_species", "OTU.ID")]
rownames(my_tax_data.m) <- my_tax_data.m$OTU.ID
my_tax_data.m$OTU.ID <- NULL
my_tax_data.m <- as.matrix(my_tax_data.m)
# Create the phyloseq object
ps <- phyloseq(otu_table(my_otu_data.m, taxa_are_rows = T),
sample_data(my_sample_data.df),
tax_table(my_tax_data.m),
phy_tree(mytree))
mytree_relabeled <- mytree
mytree_relabeled$tip.label <- as.character(unlist(lapply(mytree_relabeled$tip.label, function(x) subset(otu_taxonomy_map.df, OTU.ID == x)[,"Genus"])))
write.tree(ladderize(mytree_relabeled),file = "test.newick")
rank_names(ps)
# table(tax_table(ps)[, "taxonomy_phylum"], exclude = NULL)
# Collapse the tree down to the genus level
phyloseq_genus_tree <- tax_glom(ps, taxrank = "taxonomy_genus", NArm = TRUE)
genus_tree <- phy_tree(phyloseq_genus_tree)
# genus_tree$tip.label <- as.character(unlist(lapply(genus_tree$tip.label, function(x) subset(otu_taxonomy_map.df, OTU.ID == x)[,"Genus"])))
ggtree_data.df <- otu_data_top_features.df
names(ggtree_data.df)
ggtree_data.df <- ggtree_data.df[c("OTU.ID", "Commodity")]
ggtree_data.df <- as.data.frame(+(table(ggtree_data.df)!=0)) # binarise (presence / absence)
p <- ggtree(genus_tree, layout = "circular") + geom_tiplab(size=3, align=F, linesize=.5)
p
gheatmap(p, data = ggtree_data.df)
ggtree_data.df <- dcast(ggtree_data.df, formula =OTU.ID~ Commodity)
ggtree_data.df <- df2matrix(ggtree_data.df)
ggtree_data.df[ggtree_data.df > 0] <- 1
ggtree_data.df$run_accession <- NULL
rownames(ggtree_data.df) <- ggtree_data.df$OTU.ID
# layout one of 'rectangular', 'slanted', 'fan', 'circular', 'radial', 'equal_angle' or 'daylight'
p <- ggtree(genus_tree, layout = "rectangular", branch.length='rate')
gheatmap(p, data = )
?gheatmap
# seqs["27975adba200137bab8ad346917aee84"]
# length(genus_tree$tip.label) == length(unique(subset(otu_taxonomy_map.df, OTU.ID %in% genus_tree$tip.label)$taxonomy_genus))
# genus_tree$tip.label <- unlist(lapply(genus_tree$tip.label, function(x) subset(otu_taxonomy_map.df, OTU.ID == x)[,"Genus"]))
# Create metadata table for tree. Need columns (tracks) for each variable value. need
x <- data.frame(label = genus_tree$tip.label,as.data.frame(my_tax_data.m)[genus_tree$tip.label,])
## convert the phylo object to a treeio::treedata object
genus_tree <- treedata(phylo = genus_tree)
## add the annotation
genus_tree <- full_join(genus_tree, x, by="label")
ggtree(ps2) +geom_text(aes(x=branch, label=Class, color = Phylum)) + coord_polar(theta="y") #geom_tiplab()
plot_tree(ps2,shape = NULL, ladderize = "left",label.tips = "Genus") #+ coord_polar(theta="y")
plot_tree(ps, ladderize = "left",label.tips = "Genus") #+ coord_polar(theta="y")
# Write the collapse genus tree to file
write.tree(phy_tree(ps2), file = "Result_other/combined/trees/genus_tree.newick")
# ------------------------------------------------------------------------------------------------
# ------------------------------------------------------------------------------------------------
# ------------------------------------------------------------------------------------------------
genus_tree <- phy_tree(phyloseq_genus_tree)
genus_tree$tip.label <- as.character(unlist(lapply(genus_tree$tip.label, function(x) subset(otu_taxonomy_map.df, OTU.ID == x)[,"taxonomy_genus"])))
otu_taxonomy_map.df[with(otu_taxonomy_map.df, grepl("36e33cb85", OTU.ID)),]
otu_taxonomy_map.df[with(otu_taxonomy_map.df, grepl("48c2df70d", OTU.ID)),]
otu_taxonomy_map.df[with(otu_taxonomy_map.df, grepl("4b1f67b", OTU.ID)),]
heatmap.m <- genus_taxa_summary.df[c("study_accession", "taxonomy_genus","Mean_relative_abundance")]
heatmap.m <- heatmap.m[heatmap.m$taxonomy_genus %in% genus_tree$tip.label ,]
heatmap.m <- heatmap.m %>% spread(study_accession, Mean_relative_abundance,fill = 0)
heatmap.m <- df2matrix(heatmap.m)
length(genus_tree$tip.label)
dim(heatmap.m)
genus_tree$tip.label %in% rownames(heatmap.m)
heatmap_metadata.df <- unique(metadata.df[,c("Commodity", "study_accession","Sample_type","Sample_treatment","Final_16S_region",
"Primers_for_16S_samples_from_manually_checking_database_or_publication",
"Top_region_from_BLAST_raw_combined",
grep("colour", names(metadata.df), value =T)), drop = F])
names(heatmap_metadata.df)[names(heatmap_metadata.df) == "Primers_for_16S_samples_from_manually_checking_database_or_publication"] <- "Published_16S_region"
names(heatmap_metadata.df)[names(heatmap_metadata.df) == "Top_region_from_BLAST_raw_combined"] <- "Inferred_16S_region"
heatmap_metadata.df <- subset(heatmap_metadata.df, Commodity != "Unknown")
rownames(heatmap_metadata.df) <- heatmap_metadata.df$study_accession
make_heatmap(heatmap.m*100,
mymetadata = heatmap_metadata.df,
filename = paste0("test_heatmap.pdf"),
variables = c("Commodity","Sample_type","Sample_treatment", "Published_16S_region", "Inferred_16S_region", "Final_16S_region"),
column_title = "Study accession",
row_title = "Genus",
plot_height = 30,
plot_width = 15,
cluster_columns = T,
cluster_rows = T,
column_title_size = 10,
row_title_size = 10,
my_annotation_palette = my_colour_palette_15,
legend_labels = c(c(0, 0.001, 0.005,0.05, seq(.1,.5,.1))*100, "> 60"),
my_breaks = c(0, 0.001, 0.005,0.05, seq(.1,.6,.1))*100,
legend_title = "Mean relative abundance %",
discrete_legend = T,
palette_choice = 'purple',
show_row_dend = F,
row_dend_width = unit(25, "cm")
)
|
8999b52fd1b9a721fb5e9d95ff6c909033b353de | 9347791c6ee1d84399f3cc8be94041e3193b5439 | /aphrc/hh/idVars.R | cab1999c1d4791cc6e3f3a330a4bb8e774a3e1ff | [] | no_license | CYGUBICKO/projects | 6362bac0051208fda143630de27086ecd3210b19 | 5b819097f490ec6f919cdcfaeeff8e5ed41846c6 | refs/heads/master | 2020-04-16T19:50:46.393690 | 2019-07-16T17:32:49 | 2019-07-16T17:32:49 | 165,876,634 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 923 | r | idVars.R | library(dplyr)
library(tibble)
library(DT)
# Duplicate IDs
id_vars <- grep("id", names(working_df), value = TRUE, ignore.case = TRUE)
id_df <- (working_df
%>% select(id_vars)
%>% sapply(function(x)sum(duplicated(x) & (!is.na(x)|x!="")))
%>% enframe(name = "variable")
%>% mutate(prop_dup = round(value/nrow(working_df), digits = 3) * 100)
%>% rename(dup_count = value)
)
id_dup_dis <- (id_df
%>% varLabs()
%>% as.data.frame()
)
id_dup_dis <- datatable(id_dup_dis)
## Objects to report
# id_dup_dis
# Keep necessary files only
# rdnosave()
save(file=rdaname
, working_df
, codebook
, missPropFunc
# Global functions
, saveXlsx
, varLabs
, extractLabs
, propFunc
, tabsFunc
, recodeLabs
, extractIssues
, file_prefix
# Working df chunk
, miss_prop_df
, miss_prop_df_html
, no_vars_droped
# Missing values chunk
, miss_dist_plot
# ID variables
, id_dup_dis
)
|
eaf6f29b8f63c80835c2f55c467dc6c83a7ed697 | 401a068f6d221792df986b560085bd45f42a62df | /R/merge-tidy_cpgs.R | 0d1cb1ca6a366b13c6bce6fbe649261eee2d7525 | [] | no_license | tanaylab/gpatterns | 33f8e7740e0407ec4be02e342866350c66d97ff7 | aa792f4a2ee9d53be7da75943904049514d100fb | refs/heads/master | 2023-05-25T05:08:06.078673 | 2023-05-14T13:39:10 | 2023-05-14T13:39:10 | 205,788,385 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,389 | r | merge-tidy_cpgs.R | #' merge tidy cpgs directories
#'
#' @param dirs tidy_cpgs directories
#' @param out_dir output directory
#' @param nbins number of genomic bins for output tidy_cpgs
#' @param paired_end is the run paired end
#' @param stats_dir directory for tidy_cpgs stats
#' @param filter_dups_bin binary for filter_dups_cpgs python script
#'
#' @return NULL#'
#'
#' @export
gpatterns.merge_tidy_cpgs <- function(dirs, out_dir=tempdir(), nbins=nrow(gintervals.all()), paired_end = TRUE, stats_dir = paste0(out_dir, "/stats"), filter_dups_bin=system.file("import", "filter_dups_cpgs.py", package="gpatterns")){
genomic_bins <- gbin_intervals(intervals = gintervals.all(), nbins)
fn_df <- map_dfr(dirs, ~ tibble(fn = list.files(.x, pattern="tcpgs.gz", full.names=TRUE)) %>% mutate(name = gsub(".tcpgs.gz$", "", basename(fn))) %>% separate(name, c("chrom", "start", "end"), sep="_") %>% mutate(start = as.numeric(start), end = as.numeric(end))) %>% select(chrom, start, end, everything())
bins_df <- genomic_bins %>% gintervals.neighbors1(fn_df, maxneighbors=nrow(fn_df), mindist=0, maxdist=0) %>% filter(dist == 0) %>% distinct(chrom, start, end, chrom1, start1, end1, fn)
dir.create(out_dir, showWarnings = FALSE)
dir.create(stats_dir, showWarnings = FALSE)
bins_df_cmd <- bins_df %>%
group_by(chrom, start, end) %>%
summarise(
end_cond = glue("$4 != \"-\" && $4 <= $3 && $4 <= {end[1]} && $4 >= {start[1]}"),
start_cond = glue("$3 != \"-\" && ($3 < $4 || $4 == \"-\") && $3 <= {end[1]} && $3 >= {start[1]}"),
awk_cmd = glue("awk -F',' 'NR==1 || ({start_cond}) || ({end_cond})'"),
sort_cmd = glue("awk 'NR==1; NR > 1 {{print $0 | \"sort --field-separator=, -k2,7 -k1 -k9\"}}'"),
stats_fn = glue("{stats_dir}/{chrom[1]}_{start[1]}_{end[1]}.stats"),
out_fn = glue("{out_dir}/{chrom[1]}_{start[1]}_{end[1]}.tcpgs.gz"),
fns = paste(fn, collapse=" "),
filter_dups_cmd = glue("{filter_dups_bin} -i - -s {stats_fn} --sorted"),
filter_dups_cmd = ifelse(paired_end, filter_dups_cmd, paste(filter_dups_cmd, "--only_R1")),
cmd = glue("gzip -d -c {fns} | {awk_cmd} | {sort_cmd} | {filter_dups_cmd} | gzip -c > {out_fn}"))
plyr::l_ply(bins_df_cmd$cmd, function(x) system(x), .parallel=TRUE)
} |
790eca70a1fd0127145de5b493822fac5cdcd320 | d3d0c8fc2af05f1ba3ed6d29b9abaaad80b0ca33 | /man/randomLetter.Rd | 857136f05fe688fd00f9c56a26f99c0863ad80ce | [] | no_license | natehawk2/NateUtils | 2e5e16cb244f52e8ebf53b9ee751bb33550cc20d | 5e4991c35e5ee5d9479b23763183a2aafc026b8b | refs/heads/main | 2023-01-13T10:06:18.617234 | 2020-11-13T16:23:43 | 2020-11-13T16:23:43 | 309,730,938 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 671 | rd | randomLetter.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/randomLetter.R
\name{randomLetter}
\alias{randomLetter}
\title{Random Letter Generator}
\usage{
randomLetter(n, letter_probs = rep(1/26, 26), wreplacement = TRUE)
}
\arguments{
\item{n}{number of desired random letters}
\item{letter_probs}{a vector of probabilities for each of the 26 letters (optional)}
\item{wreplacement}{if you want multiple letters, logical TRUE/FALSE for sampling with replacement}
}
\value{
a random letter based on probabilities
}
\description{
Random Letter Generator
}
\examples{
randomLetter(1) #selecct 1 random letter
randomLetter(2) #select 2 random letters
}
|
a749537a65cb8546eb11a5cee05ecf4454587524 | d50108e8915254f9e1ddb5642dc57ba522559755 | /R/pull_normalised_data_comparison.R | 66e6129111b87281c7f4df598cea966cee78a578 | [] | no_license | craigpoku/normalistioncomparison | 6800f803c0282e7bfd4c4d5cb5df1b7b0b4b4af6 | f935ec112e6a04c309db59b5777831c3dc9160f7 | refs/heads/main | 2023-06-18T23:16:41.865144 | 2021-07-15T08:49:55 | 2021-07-15T08:49:55 | 386,079,125 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,295 | r | pull_normalised_data_comparison.R | #' Pull normalised data comparison function
#' Should you want to run either the deweather or rmweather function, this code will allow you create a dataframe with
#' the required sites and prepare it to be normalised for a single pollutant choice
#'
#' @param df_de Output of normalised pollutants with deweather module
#' @param df_rm Output of normalised pollutants with rmweather module
#' @param df3_raw Output of raw pollutants
#' @param pollutant_type Input for pollutant to test code - only been tested for one pollutant (tested for nox)
#' @param site List of sites used in normalisation code
#' @keywords pull_normalised_statistics
#' @export
#' @examples
#' pull_normalised_data_comparison()
pull_normalised_data_comparison = function(df_de, df2_rm, df3_raw, pollutant_type, site){
site_data_de = df[[site]]
site_data_rm = df2[[site]]
site_data_de_pre = site_data_de %>%
mutate(site = site)
site_data_rm_pre = site_data_rm$normalised %>%
mutate(site = site)
df3 = df3 %>%
filter(code == site) %>%
select(code, get(pollutant_type), date) %>%
rename(site = code)
left_join(site_data_de_pre, site_data_rm_pre, by = c("date", "site")) %>%
left_join(., df3, by = c("date", "site")) %>%
rename(rmweather_predict = value_predict)
} |
30e12aec7d742383ec0d2ad6d4179e6c93a7db7e | e840bfee518f7764773ab79c700fa8d883db1148 | /inst/shiny/examples/1_simple_addition/Addition.R | 75cdc2374b7c07355af24f9897b7636edaae40b1 | [
"MIT"
] | permissive | jonas-hag/tidymodules | 642101ddefd310a13eafd7e58d2f944c064fc90b | 988b567228e5ebdfb03181448f0cdb8e98594e78 | refs/heads/master | 2020-12-26T12:15:54.222048 | 2020-01-29T11:03:16 | 2020-01-29T11:03:16 | 237,506,269 | 0 | 0 | null | 2020-01-31T19:59:19 | 2020-01-31T19:59:18 | null | UTF-8 | R | false | false | 1,471 | r | Addition.R |
Addition <- R6::R6Class(
"Addition",
inherit = tidymodules::TidyModule,
public = list(
initialize = function(...){
# mandatory
super$initialize(...)
self$definePort({
self$addInputPort(
name = "left",
description = "input value to add to the user selected number",
sample = 5)
self$addOutputPort(
name = "total",
description = "Sum of the two numbers",
sample = 6)
})
},
ui = function() {
div(style="width:30%;background:lightgrey;border: solid;border-color: grey;padding: 20px;",
"Module input : ",textOutput(self$ns("left")),
" + ",sliderInput(self$ns("right"),label = "Number to add",min = 1,max = 100,value = 1),
" = ",textOutput(self$ns("total"))
)
},
server = function(input, output, session){
# Mandatory
super$server(input, output, session)
sum_numbers <- reactive({
req(input$right)
req(self$getInput(1))
as.numeric(self$getInput(1)())+as.numeric(input$right)
})
output$left <- renderText({
req(self$getInput(1))
self$getInput(1)()
})
output$total <- renderText({
sum_numbers()
})
self$assignPort({
self$updateOutputPort(
id = "total",
output = sum_numbers)
})
}
)
)
|
5674dc3bc7d48a0f315b43fd73168e099720ef1a | daeee1f6fa2191038550e6dde443d6554bce2c61 | /R/methods.R | 046a1541196ecd801497ded4449fc078d3e37ebd | [
"MIT"
] | permissive | nfultz/distributions3 | a58f88146c81a70ab09e43c3f2762e8dcd42c52a | 945dcecd6488329127bc5585b6042cf9ec4dba81 | refs/heads/master | 2020-08-24T20:17:53.012731 | 2020-06-26T03:09:45 | 2020-06-26T03:09:45 | 216,898,661 | 0 | 0 | NOASSERTION | 2019-10-22T19:57:28 | 2019-10-22T19:57:27 | null | UTF-8 | R | false | false | 4,984 | r | methods.R | # things to sort out with the generics
# - can i get stats::generics() to use ellipsis::check_dots_used()?
# - pdf() conflict with grDevices::pdf()
#' Draw a random sample from a probability distribution
#'
#' @param d A probability distribution object such as those created by
#' a call to [Bernoulli()], [Beta()], or [Binomial()].
#' @param n The number of samples to draw. Should be a positive
#' integer. Defaults to `1L`.
#' @param ... Unused. Unevaluated arguments will generate a warning to
#' catch mispellings or other possible errors.
#'
#' @examples
#'
#' X <- Normal()
#'
#' random(X, 10)
#' @export
random <- function(d, n = 1L, ...) {
ellipsis::check_dots_used()
UseMethod("random")
}
#' Evaluate the probability density of a probability distribution
#'
#' For discrete distributions, the probabilty mass function. `pmf()`
#' is an alias.
#'
#' @inheritParams random
#'
#' @param x A vector of elements whose probabilities you would like to
#' determine given the distribution `d`.
#'
#' @return A vector of probabilities, one for each element of `x`.
#'
#' @examples
#'
#' X <- Normal()
#'
#' pdf(X, c(1, 2, 3, 4, 5))
#' pmf(X, c(1, 2, 3, 4, 5))
#'
#' log_pdf(X, c(1, 2, 3, 4, 5))
#' @export
pdf <- function(d, x, ...) {
ellipsis::check_dots_used()
UseMethod("pdf")
}
#' @rdname pdf
#' @export
log_pdf <- function(d, x, ...) {
ellipsis::check_dots_used()
UseMethod("log_pdf")
}
#' @rdname pdf
#' @export
pmf <- function(d, x, ...) {
pdf(d, x, ...)
}
#' Evaluate the probability density of a probability distribution
#'
#' For discrete distributions, the probabilty mass function.
#'
#' @inheritParams random
#'
#' @param x A vector of elements whose cumulative probabilities you would
#' like to determine given the distribution `d`.
#'
#' @return A vector of probabilities, one for each element of `x`.
#'
#' @examples
#'
#' X <- Normal()
#'
#' cdf(X, c(1, 2, 3, 4, 5))
#' @export
cdf <- function(d, x, ...) {
ellipsis::check_dots_used()
UseMethod("cdf")
}
#' Find the quantile of a probability distribution
#'
#' TODO: Note that this current masks the [stats::quantile()] generic
#' to allow for consistent argument names and warnings when arguments
#' disappear into `...`.
#'
#' @inheritParams random
#'
#' @param p A vector of probabilites.
#'
#' @return A vector of quantiles, one for each element of `p`.
#'
#' @examples
#'
#' X <- Normal()
#'
#' cdf(X, c(0.2, 0.4, 0.6, 0.8))
#' @export
quantile <- function(d, p, ...) {
ellipsis::check_dots_used()
UseMethod("quantile")
}
#' Compute the moments of a probability distribution
#'
#' @param d A probability distribution object such as those created by
#' a call to [Bernoulli()], [Beta()], or [Binomial()].
#'
#' @return A numeric scalar
#' @export
#'
variance <- function(d, ...) {
ellipsis::check_dots_used()
UseMethod("variance")
}
#' @rdname variance
#' @export
skewness <- function(d, ...) {
ellipsis::check_dots_used()
UseMethod("skewness")
}
#' @rdname variance
kurtosis <- function(d, ...) {
ellipsis::check_dots_used()
UseMethod("kurtosis")
}
#' Compute the likelihood of a probability distribution given data
#'
#' @param d A probability distribution object such as those created by
#' a call to [Bernoulli()], [Beta()], or [Binomial()].
#' @param x A vector of data to compute the likelihood.
#' @param ... Unused. Unevaluated arguments will generate a warning to
#' catch mispellings or other possible errors.
#'
#' @return the likelihood
#'
#' @examples
#'
#' X <- Normal()
#'
#' likelihood(X, c(-1, 0, 0, 0, 3))
#' @export
likelihood <- function(d, x, ...) {
exp(log_likelihood(d, x, ...))
}
#' Compute the log-likelihood of a probability distribution given data
#'
#' @inheritParams likelihood
#'
#' @return the log-likelihood
#'
#' @examples
#'
#' X <- Normal()
#'
#' log_likelihood(X, c(-1, 0, 0, 0, 3))
#' @export
log_likelihood <- function(d, x, ...) {
sum(log_pdf(d, x, ...))
}
#' Fit a distribution to data
#'
#' Approximates an empirical distribution with a theoretical one
#'
#' @inheritParams likelihood
#'
#' @return A distribution (the same kind as `d`) where the parameters
#' are the MLE estimates based on `x`.
#'
#' @examples
#'
#' X <- Normal()
#'
#' fit_mle(X, c(-1, 0, 0, 0, 3))
#' @export
fit_mle <- function(d, x, ...) {
ellipsis::check_dots_used()
UseMethod("fit_mle")
}
#' Compute the sufficient statistics of a distribution from data
#'
#' @inheritParams fit_mle
#'
#' @return a named list of sufficient statistics
suff_stat <- function(d, x, ...) {
ellipsis::check_dots_used()
UseMethod("suff_stat")
}
#' Return the support of a distribution
#'
#' @param d A probability distribution object such as those created by
#' a call to [Bernoulli()], [Beta()], or [Binomial()].
#' @return A vector with two elements indicating the range of the support.
#'
#' @export
support <- function(d){
if(!is_distribution(d))
stop("d must be a supported distribution object")
UseMethod("support")
}
|
9aa8af1a06a07bace254fe504ac61f2df3ed8e53 | b7c24dc504a1807b046065a08e3248cb6a26ba48 | /examples/examples.R | d3dd0f02f80fafd4fbd7c519469b2bdbd08ef57a | [] | no_license | csv/ddr | 0b9a4404198812b8fc9e7e4b6fc0301bdd925dd7 | 94e13b4e1cad30b198936d6eae1f30244fbd9bca | refs/heads/master | 2020-12-24T13:53:26.841085 | 2013-06-12T15:57:27 | 2013-06-12T15:57:27 | 9,332,032 | 5 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,891 | r | examples.R | library("ddr")
ddr_init()
#==================================================================================#
# basics
play(piano$C3)
play(chop(piano$C3, bpm=100, count=1/8))
play(reverse(piano$C3))
play(pitch(piano$C3, -36))
play(loop(chop(piano$C3, bpm=100, count=1/8), 16))
play(chord(C3, piano, "maj", bpm=100, count=4))
#==================================================================================#
# sound sequencing -- call me maybe
# sounds
c1 <- chord(A4, sweeplow, "maj", bpm=119, count=1)
c2 <- chord(E4, sweeplow, "maj", bpm=119, count=1)
c3 <- chord(B4, sweeplow, "maj", bpm=119, count=1)
c4 <- chord(C.4, sweeplow, "min", bpm=119, count=1)
wavs <- list(c1, c2, c3, c4, roland$HHC, roland$TAM, roland$HHO, roland$BD1, roland$SD1)
# sequences
A <- c(1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0)
E <- c(0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0)
B <- c(0,0,0,0,0,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,0,0,0,0)
C.m<-c(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0)
H <- c(0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,1,1)
T <- c(0,0,1,0,0,0,1,0,0,0,1,0,0,0,1,0,0,0,1,0,0,0,1,0,0,0,1,0,0,0,0,0)
O <- c(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1)
K <- c(1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0)
S <- c(0,0,1,0,0,0,1,0,0,0,1,0,0,0,1,0,0,0,1,0,0,0,1,0,0,0,1,0,0,0,1,0)
seqs <- list(A, E, B, C.m, H, T, O, K, S)
callmemaybe <- sequence(wavs, seqs, bpm=59.5, count=1/16)
play(loop(callmemaybe, 4))
#==================================================================================#
# random drum loops
wavs <- list(roland$HHC, roland$TAM, roland$HHO, roland$BD1, roland$SD1)
# sequences
H <- rnorm(32, mean=0.5, sd=0.15)
T <- rbinom(32, 1, prob=0.05)
O <- rbinom(32, 1, prob=0.075)
K <- rbinom(32, 1, prob=0.2)
S <- rbinom(32, 1, prob=0.3)
seqs <- list(H, T, O, K, S)
drum_loop <- sequence(wavs, seqs, bpm=59.5, count=1/16)
play(loop(drum_loop, 4))
#==================================================================================#
# data sonfication
data('ChickWeight')
cw <- ChickWeight
# arpeggi
chicks <- arpeggidata(sqrt(cw$weight),
blip,
scale="Emajor",
bpm=150,
count=1/32)
play(chicks)
#==================================================================================#
bpm <- 280
ct <- 1/4
rate <- arpeggidata(fms_data$rate,
sinewave,
low_note="",
high_note="",
descending = FALSE,
scale="Cmajor",
remove=NULL,
bpm=bpm,
count=ct)
writeWave(rate, "rate.wav")
ceil <- arpeggidata(fms_data$dist_to_ceiling,
sinewave,
low_note="",
high_note="",
descending = TRUE,
scale="Emajor",
remove=NULL,
bpm=bpm,
count=ct)
writeWave(ceil, "ceiling.wav")
gen_chords <- function(z) {
if (z < 0) {
if (z <= -0.5) {
c <- chord(A3, sinewave,
"min", bpm=bpm,
count=ct)
} else {
c <- chord(A4, sinewave,
"min", bpm=bpm,
count=ct)
}
} else {
if (z >= 0.5) {
c <- chord(C4, sinewave,
"maj", bpm=bpm,
count=ct)
} else {
c <- chord(C3, sinewave,
"maj", bpm=bpm,
count=ct)
}
}
return(c)
}
chords <- llply(fms_data$z_change, gen_chords, .progress="text")
bind_list_of_waves <- function(x, y) {
bind(x, y)
}
reduce_waves <- function(list_of_waves) {
Reduce(bind_list_of_waves, list_of_waves)
}
chords <- reduce_waves(chords)
writeWave(chords, "chords.wav")
|
3da9271f8d7975663fc263092cae5c360392d3f1 | d3c7ad01ca0a6461c2520babbebcd562f49c49bf | /plot3.R | 89fc3587f6f05943881b3527482719b51dfb1a7f | [] | no_license | gancedo/ExData_Plotting1 | 6f4e2f329a9df06c0f8490431fd1874b7bd3638a | aa8e2b020c28ce1796a2c82ed195dfde1c436f39 | refs/heads/master | 2021-01-16T00:47:44.438107 | 2015-02-08T12:28:34 | 2015-02-08T12:28:35 | 30,202,285 | 0 | 0 | null | 2015-02-02T18:48:36 | 2015-02-02T18:48:36 | null | UTF-8 | R | false | false | 1,617 | r | plot3.R |
# Type this if you need to clear your workspace.
# rm(list=ls())
# Read the data; the file 'household_power_consumption.txt'
# should be in your working directory.
data <- read.table(file= "household_power_consumption.txt", header=TRUE,
colClasses=c("character","character",
"numeric","numeric","numeric","numeric",
"numeric","numeric","numeric"),
dec=".",
sep = ";", quote = "",
na.strings = "?",
strip.white=TRUE,
stringsAsFactors = FALSE)
# We only need the data for 1 and 2 Feb 2007.
data <- data[data$Date=="1/2/2007" | data$Date=="2/2/2007",]
#data <- data %>%
# mutate(dateTime = paste(Date,Time))
dateTime <- as.POSIXlt(strptime(paste(data$Date,data$Time), "%d/%m/%Y %H:%M:%S"))
# Plot3
# Stricltly speaking, it is not necessary to
# add the mfrow parameter, but this way we make
# sure that we have one single graph.
par(mfrow=c(1,1), cex=.75)
plot(dateTime, data$Sub_metering_1,
type="l", ann=FALSE)
lines(dateTime, data$Sub_metering_2, col="red")
lines(dateTime, data$Sub_metering_3, col="blue")
title(ylab="Energy sub metering")
legend("topright",
c("Sub_metering_1 ", "Sub_metering_2 ", "Sub_metering_3 "),
y.intersp=.8,
lty=c(1,1),
col=c("black","red","blue"))
# Write the graph to a file.
# The width and length are 480 pixels by default.
dev.copy(png, file="plot3.png")
dev.off() |
d01ad6aee2ee4f6b11e1f75f1da674ccb33aee2e | 86997936c51093b9b7e39d52e83b11accd03c1f8 | /Week10/Homework 5 Solutions-2.R | e7917d9eb7213e7caacfe451e3077063e57861cf | [] | no_license | kempernb/Data-Mining | ad2815e3d9f3fd701f4883b9e86c06de2bc07959 | 013abc8e0b841749b9e795b9ec962a5a73b5e881 | refs/heads/main | 2023-02-13T06:14:49.488635 | 2021-01-03T02:27:44 | 2021-01-03T02:27:44 | 326,308,711 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,266 | r | Homework 5 Solutions-2.R | spam.df <- read.csv("spambase.csv", header = TRUE, stringsAsFactors = TRUE)
par(mfcol = c(1,1))
# convert Spam to a factor
spam.df$Spam <- as.factor(spam.df$Spam)
summary(spam.df)
# rename variables
library(dplyr)
spam.df <- rename(spam.df, "re" = re., "C_semicolon" = C., "C_parenthesis" = C..1, "C_bracket" = C..2, "C_exclamation" = C..3,
"C_dollar" = C..4, "C_pound" = C..5)
t(t(names(spam.df)))
# partition the data
set.seed(7)
train.rows <- sample(nrow(spam.df), nrow(spam.df)*0.6)
train.data <- spam.df[train.rows, ]
valid.data <- spam.df[-train.rows, ]
# create the full classification tree
library(rpart)
library(rpart.plot)
spam.ct <- rpart(Spam ~ ., data = train.data, method = "class",
cp = 0, minsplit = 1)
# prp(spam.ct, type = 1, extra = 1, varlen = -10,
# box.col = ifelse(spam.ct$frame$var == "<leaf>", 'gray', 'white'))
spam.ct
length(spam.ct$frame$var[spam.ct$frame$var == "<leaf>"])
spam.ct.pred.train <- predict(spam.ct, train.data, type = "class")
# generate confusion matrix for training data
library(caret)
confusionMatrix(spam.ct.pred.train,
train.data$Spam,
positive = "1")
# classify records in the validation data
spam.ct.pred.valid <- predict(spam.ct, valid.data, type = "class")
confusionMatrix(spam.ct.pred.valid,
as.factor(valid.data$Spam),
positive = "1")
# perform cross-validation
cv.ct <- rpart(Spam ~ ., data = train.data, method = "class",
cp = 0, minsplit = 1, xval = 10)
# use printcp() to print the table
options(scipen = 999)
printcp(cv.ct)
0.22098+0.013693
0.234673
# create the best pruned tree
pruned.ct <- prune(cv.ct, cp = 0.00464253)
prp(pruned.ct, type = 1, extra = 1, varlen = -10,
box.col = ifelse(pruned.ct$frame$var == "<leaf>", 'gray', 'white'))
pruned.ct
length(pruned.ct$frame$var[pruned.ct$frame$var == "<leaf>"])
prp(pruned.ct, type = 1, extra = 1, varlen = -10,
box.col = ifelse(spam.ct$frame$var == "<leaf>", 'gray', 'white'))
# classify records in the validation data based on best pruned tree
best.pred.valid <- predict(pruned.ct, valid.data, type = "class")
confusionMatrix(best.pred.valid,
as.factor(valid.data$Spam),
positive = "1")
# create a random forest to predict spam
library(randomForest)
spam.rf <- randomForest(Spam ~ ., data = train.data, ntree = 500,
mtry = 4, nodesize = 5, importance = TRUE)
# variable importance plots
varImpPlot(spam.rf, type = 1)
#confusion matrix
rf.pred <- predict(spam.rf, valid.data)
confusionMatrix(rf.pred, valid.data$Spam, positive = "1")
######## pre-processing for neural nets ###############
str(spam.df)
t(t(names(spam.df)))
# neural net with one hidden layer containing 3 nodes
library(neuralnet)
spam.nn3 <- neuralnet(Spam ~ ., data = train.data,
linear.output = FALSE, hidden = 3)
# plot network
plot(spam.nn3, rep = "best")
## confusion matrix
library(caret)
predict.valid <- neuralnet::compute(spam.nn3, valid.data[,-58])
predicted.class.valid <- apply(predict.valid$net.result, 1, which.max) - 1
confusionMatrix(as.factor(ifelse(predicted.class.valid == 1, "1", "0")),
valid.data$Spam, positive = "1")
# neural net with one hidden layer containing 28 nodes
spam.nn28 <- neuralnet(Spam ~ ., data = train.data,
linear.output = FALSE, hidden = 28)
# plot network
plot(spam.nn28, rep = "best")
## confusion matrix
predict.valid <- neuralnet::compute(spam.nn28, valid.data[,-58])
predicted.class.valid <- apply(predict.valid$net.result, 1, which.max) - 1
confusionMatrix(as.factor(ifelse(predicted.class.valid == 1, "1", "0")),
valid.data$Spam, positive = "1")
# neural net with two hidden layers containing 12 nodes each
spam.nn12.12 <- neuralnet(Spam ~ ., data = train.data,
linear.output = FALSE, hidden = c(12,12))
# plot network
plot(spam.nn12.12, rep = "best")
## confusion matrix
predict.valid <- neuralnet::compute(spam.nn12.12, valid.data[,-58])
predicted.class.valid <- apply(predict.valid$net.result, 1, which.max) - 1
confusionMatrix(as.factor(ifelse(predicted.class.valid == 1, "1", "0")),
valid.data$Spam, positive = "1")
|
2fd137f4a9058b7a2cdf79e146bf834c8fe4fb1a | acdb497aa8a47599d3b7bd9438be2101b6ef415a | /man/APPENC12.Rd | fdbededd4fdd608f36985215c535e8060090cdd6 | [] | no_license | bryangoodrich/ALSM | 106ce1ab43806ec7c74fc72f9a26a094bf1f61d1 | 6fe1a413f996d26755638e9b2c81ae0aafd1a509 | refs/heads/main | 2022-07-15T15:55:23.708741 | 2022-07-03T19:55:04 | 2022-07-03T19:55:04 | 39,878,127 | 16 | 9 | null | null | null | null | UTF-8 | R | false | false | 567 | rd | APPENC12.Rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{APPENC12}
\alias{APPENC12}
\title{APPENC12}
\format{\preformatted{'data.frame': 192 obs. of 7 variables:
$ V1: int 1 2 3 4 5 6 7 8 9 10 ...
$ V2: int 1 1 1 1 1 1 1 1 1 1 ...
$ V3: int 1 1 1 1 2 2 2 2 3 3 ...
$ V4: int 1 1 1 1 1 1 1 1 1 1 ...
$ V5: int 1 2 3 4 1 2 3 4 1 2 ...
$ V6: int 1 1 1 1 1 1 1 1 1 1 ...
$ V7: num 0.81 0.8 0.82 0.5 0.77 0.78 0.79 0.51 0.8 0.82 ...
}}
\usage{
APPENC12
}
\description{
APPENC12
}
\keyword{datasets}
|
f7ef18299351ccc49f7321d13a40b82f2c98ae5a | c5d9392545f15a5bbd2c9b08c8c44fa6d98e6117 | /program/ui.R | e03a5139ad6df4f960818b91b910aebd79919201 | [] | no_license | dansum/delivery | 41088c5e1f48d4f7f69c9628fefd667ea59d377b | 9bdbd91b0e8b728d0b64676524134586645fcf36 | refs/heads/master | 2021-01-01T19:20:44.782407 | 2014-06-21T23:48:29 | 2014-06-21T23:48:29 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,926 | r | ui.R | library(shiny)
shinyUI(pageWithSidebar(
headerPanel("Please input a YoutubeId from our pre-selection to see viewing data about that video."),
sidebarPanel(
h4("The videos are from Khan Academy's translation in Bulgarian and this analysis will help you gauge interest in different videos, as measured by how often people actually watch them"),
selectInput(inputId="videoid",label = "We have pre-selected video IDs for you",
choices = c("IrdMDufjFvg" = "IrdMDufjFvg",
"00fv7xEGbv8" = "00fv7xEGbv8",
"cA19Bjtk4T8" = "cA19Bjtk4T8",
"K0UOZyG1_gw" = "K0UOZyG1_gw",
"Goi_ucJwHWc" = "Goi_ucJwHWc")
), # need a dynamic drop-down; if not dynamic, then choose 10 videos manually; or radio button)
selectInput(inputId="measure", label = "Choose your favorite statistic to see",
choices = c("view" = "view",
"like" = "like",
"favorite" = "favorite")
), # need a cynamic drop-down; if not dynamic, then choose 3 measures manually; or radio button)
numericInput(inputId="benchmark",
label = "Provide a number that you want this measure to reach. This will be our benchmark",
0, min = 0, max = 1000, step=1),
actionButton("goButton","Show me the Data")
),
mainPanel(
p('You selected this video ID:'),
textOutput('videoid_out'),
p('You selected this measure:'),
textOutput('measure_out'),
p('Current data:'),
textOutput('stat_out'),
p('This is how many more of the measure we need to reach your benchmark (negative if we beat your benchmark; error if you did not provide one):'),
textOutput('bench_diff_out')
# p('Testing Testing Testing'),
# textOutput('videoid_index_test') # video_measure
)
))
|
d44205044a1e6b0034153f72846417bee95467b9 | d4bbec7817b1704c40de6aca499625cf9fa2cb04 | /src/lib/special/beta/__test__/fixture-generation/beta-postive-grid.R | 27bb1d48910c27115d33d69d1af1e41b9ed9b08f | [
"MIT"
] | permissive | R-js/libRmath.js | ac9f21c0a255271814bdc161b378aa07d14b2736 | 9462e581da4968938bf4bcea2c716eb372016450 | refs/heads/main | 2023-07-24T15:00:08.372576 | 2023-07-16T16:59:32 | 2023-07-16T16:59:32 | 79,675,609 | 108 | 15 | MIT | 2023-02-08T15:23:17 | 2017-01-21T22:01:44 | TypeScript | UTF-8 | R | false | false | 295 | r | beta-postive-grid.R |
#options(digits=20);
#df <- data.frame(x=c(1),y=c(1), z=c(1));
#b <- function(a, b) {
# a_1 <- gamma(a+b);
# a_2 <- gamma(a);
# a_3 <- gamma(b);
# a_2*a_3/a_1;
# }
#
#for (j in seq(0.1, 2, 0.1)) {
# for (i in seq(0.1,2,0.1)) {
# df[nrow(df)+1,] = c(i,j,b(i,j));
# }
#}
#df
|
7e27a27ebe3a407e1747a11fbc24d2952d5c874d | efaaaefd321d8a665bb131cb5bb04b85f0d382bc | /man/c_fun.Rd | d58db2743934e04031ca6dc22a977ab6d6bbea98 | [] | no_license | cgaillac/RationalExp | 23589beabb373b2fdbfac158de28c0119f687a14 | 2fe24006907c3782d6a75064cbde121f02bff246 | refs/heads/master | 2020-04-03T15:26:22.038906 | 2019-02-07T15:27:24 | 2019-02-07T15:27:24 | 155,362,447 | 2 | 0 | null | null | null | null | UTF-8 | R | false | true | 510 | rd | c_fun.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/c_fun.R
\name{c_fun}
\alias{c_fun}
\title{Compute the difference between mean of subvectors of two vectors}
\usage{
c_fun(i, i_t, y, z)
}
\arguments{
\item{i}{starting index}
\item{i_t}{final index}
\item{y}{first vector of elements}
\item{z}{second vector of elements}
}
\value{
a real, the difference between means of subvectors of two vectors
}
\description{
Compute the difference between mean of subvectors of two vectors
}
|
e0773f74e91d4a47dc5d7713b1a6b5ca32c51526 | 04e794e6bdb8b3de778a338dff08fad061bd314b | /Shared/misc/cli/experiment-stats.R | 7b40aa14320e14286c352c186bc6eccd1373493b | [] | no_license | tectronics/socialgossip | b5404e56a7ea1d785cca90be82b371edb67a8a5c | d9575a1f097e02746e849a34557633061dfa3f61 | refs/heads/master | 2018-01-11T15:03:00.938360 | 2014-06-14T16:13:46 | 2014-06-14T16:13:46 | 46,855,976 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,883 | r | experiment-stats.R | #!/usr/bin/Rscript --vanilla
# Imports.
library(getopt)
# Sources in our common files.
rlibhome <- Sys.getenv("RLIB_HOME", unset=NA)
if (is.na(rlibhome)) {
stop("Environment variable RLIB_HOME was not set.")
} else {
source(file.path(rlibhome, "common.R"))
}
opt_spec <- matrix(c(
'verbose', 'v', 0, "logical", 'verbose mode',
'help', 'h', 0, "logical", 'prints this help message',
'input', 'i', 1, "character", 'input file (mandatory)',
'output', 'o', 1, "character", 'output file',
'metric', 'm', 1, "character", 'plotted metric (mandatory)',
'algorithm', 'a', 1, "character", 'algorithm name (mandatory)',
'logplot', 'l', 1, "character", 'logplot axis (x, y, or xy)'
), ncol=5, byrow=TRUE)
# Parses the command line.
opt = getopt(opt_spec)
if (!is.null(opt$help)) {
# Help request.
message(getopt(opt_spec, usage=TRUE))
q(save="no", status=0)
}
# Checks that the "mandatory options" are not null.
chkmandatory(opt, c("input", "metric", "algorithm"))
# Set the defaults for stuff that wasn't set.
if (is.null(opt$output)) { opt$output = "./output.eps" }
if (is.null(opt$logplot)) { opt$logplot = "" }
if (is.null(opt$verbose)) { opt$verbose = FALSE }
# Reads the file.
the_data <- read.table(file=opt$input, header=FALSE, sep=" ")
# Plots the data. Assumes the stuff we want is in the last column.
the_data = the_data[[dim(the_data)[2]]]
metric_hist(the_data, algorithm=opt$algorithm, measure=opt$metric, file_name=opt$output, log=opt$logplot, real_zero=TRUE)
# Prints minimum, maximum, avg, std. dev and 90th percentile.
s <- std_stats(the_data)
s <- paste(s["minimum"], s["maximum"], s["average"], s["standard deviation"], s["90th percentile"])
cat(s)
|
ed33e12bb3c98b2d535cef329a75d62bf9d387fb | 0be0c7d71fca454f77e59fae473330b2c157c653 | /README.rd | f41ab000afe0a327a414825af2eedc85c3fdc39e | [] | no_license | dipbd1/django_practice | 6dfdaee7c19e4310953afcd7385b8f61978ad436 | a7b0b15abbde383dac8365efd20ba86f74d83f4a | refs/heads/master | 2022-12-27T21:10:13.627827 | 2019-02-13T14:44:19 | 2019-02-13T14:44:19 | 170,489,978 | 0 | 1 | null | 2022-12-15T23:27:16 | 2019-02-13T10:40:22 | Python | UTF-8 | R | false | false | 68 | rd | README.rd | Initially, I was trying to test both Django and Git so I can learn.
|
962ba6071bb406c44cdaba9b0cec62cd96625e12 | 1f4366b5fa0da91bcc91518c87b81fd6818ea278 | /man/ffl_info.Rd | 9253d1e85e6f5704201fa00a0e4bf5ff979e1a79 | [
"MIT"
] | permissive | kiernann/fflr | d9a98c4fee465e43c388d6a116393538ee7914c9 | b44d595d677c140ab388d3601be765034fc0ea4e | refs/heads/master | 2022-09-24T09:49:14.845411 | 2022-09-19T01:13:43 | 2022-09-19T01:13:43 | 209,177,645 | 18 | 5 | NOASSERTION | 2023-09-12T15:47:00 | 2019-09-17T23:53:07 | R | UTF-8 | R | false | true | 738 | rd | ffl_info.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/info.R
\name{ffl_info}
\alias{ffl_info}
\alias{ffl_year}
\alias{ffl_week}
\title{Get fantasy football information}
\usage{
ffl_info()
ffl_year(offset = 0)
ffl_week(offset = 0)
}
\arguments{
\item{offset}{Add negative or positive values.}
}
\value{
A list of season information.
}
\description{
Information on the current fantasy football season, with functions to quickly
access and modify certain information (like the current \code{seasonId} or
\code{scoringPeriodId}).
}
\examples{
str(ffl_info())
Sys.time()
ffl_year()
ffl_week(-1)
}
\seealso{
Other Game information:
\code{\link{espn_games}()},
\code{\link{ffl_seasons}()}
}
\concept{Game information}
|
5899df008ce49f34d4510ca7503570f22d8c4730 | a7d0294b1056888b29bf802dcc87411201947bd6 | /get_data_NCMS.R | 20d81619784c00fa5f1812bb54964545373da75a | [] | no_license | karafede/Masdar_Interactive_ts_daily_sat_PM | ddf1c3ce9b3853ea9db15e099a05651f4fab4a8e | 4204541f0e3289333fb23dbd6c8d5e0f61fc8f6f | refs/heads/master | 2021-01-20T11:05:33.940055 | 2017-03-05T07:00:31 | 2017-03-05T07:00:31 | 83,913,465 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 14,002 | r | get_data_NCMS.R |
library(readr)
library(dplyr)
library(threadr)
library(tidyr)
library(dygraphs)
# function to generate time-series based on data for each year in the UAE
#setwd("Z:/_SHARED_FOLDERS/Air Quality/Phase 1/Pathflow of Phase I_DG/Interactive_plots_R")
# setwd("Z:/_SHARED_FOLDERS/Air Quality/Phase 1/Pathflow of Phase I_DG/dawit Data/Hourly Database format CSV")
# station data
# NCMS_2013 <- read_csv("Z:/_SHARED_FOLDERS/Air Quality/Phase 1/Pathflow of Phase I_DG/dawit Data/daily data/daily moved/daily_filtered_4_box/database_NCMS_ 2013 _daily_filtered.csv")
# NCMS_2014 <- read_csv("Z:/_SHARED_FOLDERS/Air Quality/Phase 1/Pathflow of Phase I_DG/dawit Data/daily data/daily moved/daily_filtered_4_box/database_NCMS_ 2014 _daily_filtered.csv")
# NCMS_2015 <- read_csv("Z:/_SHARED_FOLDERS/Air Quality/Phase 1/Pathflow of Phase I_DG/dawit Data/daily data/daily moved/daily_filtered_4_box/database_NCMS_ 2015 _daily_filtered.csv")
# NCMS_2016 <- read_csv("Z:/_SHARED_FOLDERS/Air Quality/Phase 1/Pathflow of Phase I_DG/dawit Data/daily data/daily moved/daily_filtered_4_box/database_NCMS_ 2016 _daily_filtered.csv")
# load hourly data and filter only one specific time~~~~~~~~~~~~~~~~~~~
NCMS_2013_filtered <- read_csv("Z:/_SHARED_FOLDERS/Air Quality/Phase 1/Pathflow of Phase I_DG/dawit Data/Hourly Database format CSV/Arranged dates/R files/filtered_4_box/database_NCMS_ 2013 _hourly_filtered.csv")
NCMS_2014_filtered <- read_csv("Z:/_SHARED_FOLDERS/Air Quality/Phase 1/Pathflow of Phase I_DG/dawit Data/Hourly Database format CSV/Arranged dates/R files/filtered_4_box/database_NCMS_ 2014 _hourly_filtered.csv")
NCMS_2015_filtered <- read_csv("Z:/_SHARED_FOLDERS/Air Quality/Phase 1/Pathflow of Phase I_DG/dawit Data/Hourly Database format CSV/Arranged dates/R files/filtered_4_box/database_NCMS_ 2015 _hourly_filtered.csv")
NCMS_2016_filtered <- read_csv("Z:/_SHARED_FOLDERS/Air Quality/Phase 1/Pathflow of Phase I_DG/dawit Data/Hourly Database format CSV/Arranged dates/R files/filtered_4_box/database_NCMS_ 2016 _hourly_filtered.csv")
NCMS_2013_filtered_time <- filter(NCMS_2013_filtered, grepl('12:', DateTime))
NCMS_2014_filtered_time <- filter(NCMS_2014_filtered, grepl('12:', DateTime))
NCMS_2015_filtered_time <- filter(NCMS_2015_filtered, grepl('12:', DateTime))
NCMS_2016_filtered_time <- filter(NCMS_2016_filtered, grepl('11:', DateTime))
# DM_2013 <- read_csv("Z:/_SHARED_FOLDERS/Air Quality/Phase 1/Pathflow of Phase I_DG/dawit Data/daily data/daily moved/daily_filtered_4_box/database_DM_ 2013 _daily_filtered.csv")
# DM_2014 <- read_csv("Z:/_SHARED_FOLDERS/Air Quality/Phase 1/Pathflow of Phase I_DG/dawit Data/daily data/daily moved/daily_filtered_4_box/database_DM_ 2014 _daily_filtered.csv")
# DM_2015 <- read_csv("Z:/_SHARED_FOLDERS/Air Quality/Phase 1/Pathflow of Phase I_DG/dawit Data/daily data/daily moved/daily_filtered_4_box/database_DM_ 2015 _daily_filtered.csv")
# DM_2016 <- read_csv("Z:/_SHARED_FOLDERS/Air Quality/Phase 1/Pathflow of Phase I_DG/dawit Data/daily data/daily moved/daily_filtered_4_box/database_DM_ 2016 _daily_filtered.csv")
# load hourly data and filter only one specific time~~~~~~~~~~~~~~~~~~~
DM_2013_filtered <- read_csv("Z:/_SHARED_FOLDERS/Air Quality/Phase 1/Pathflow of Phase I_DG/dawit Data/Hourly Database format CSV/Arranged dates/R files/filtered_4_box/database_DM_ 2013 _hourly_filtered.csv")
DM_2014_filtered <- read_csv("Z:/_SHARED_FOLDERS/Air Quality/Phase 1/Pathflow of Phase I_DG/dawit Data/Hourly Database format CSV/Arranged dates/R files/filtered_4_box/database_DM_ 2014 _hourly_filtered.csv")
DM_2015_filtered <- read_csv("Z:/_SHARED_FOLDERS/Air Quality/Phase 1/Pathflow of Phase I_DG/dawit Data/Hourly Database format CSV/Arranged dates/R files/filtered_4_box/database_DM_ 2015 _hourly_filtered.csv")
DM_2016_filtered <- read_csv("Z:/_SHARED_FOLDERS/Air Quality/Phase 1/Pathflow of Phase I_DG/dawit Data/Hourly Database format CSV/Arranged dates/R files/filtered_4_box/database_DM_ 2016 _hourly_filtered.csv")
DM_2016_filtered$Site <- ifelse(grepl("DUBAIAIRPORT", DM_2016_filtered$Site, ignore.case = TRUE),
"DUBAI AIR PORT", DM_2016_filtered$Site)
DM_2013_filtered_time <- filter(DM_2013_filtered, grepl('12:', DateTime))
DM_2014_filtered_time <- filter(DM_2014_filtered, grepl('12:', DateTime))
DM_2015_filtered_time <- filter(DM_2015_filtered, grepl('12:', DateTime))
DM_2016_filtered_time <- filter(DM_2016_filtered, grepl('11:', DateTime))
# EAD_2013 <- read_csv("Z:/_SHARED_FOLDERS/Air Quality/Phase 1/Pathflow of Phase I_DG/dawit Data/daily data/daily moved/daily_filtered_4_box/database_EAD_ 2013 _daily_filtered.csv")
# EAD_2014 <- read_csv("Z:/_SHARED_FOLDERS/Air Quality/Phase 1/Pathflow of Phase I_DG/dawit Data/daily data/daily moved/daily_filtered_4_box/database_EAD_ 2014 _daily_filtered.csv")
# EAD_2015 <- read_csv("Z:/_SHARED_FOLDERS/Air Quality/Phase 1/Pathflow of Phase I_DG/dawit Data/daily data/daily moved/daily_filtered_4_box/database_EAD_ 2015 _daily_filtered.csv")
# EAD_2016 <- read_csv("Z:/_SHARED_FOLDERS/Air Quality/Phase 1/Pathflow of Phase I_DG/dawit Data/daily data/daily moved/daily_filtered_4_box/database_EAD_ 2016 _daily_filtered.csv")
# load hourly data and filter only one specific time~~~~~~~~~~~~~~~~~~~
EAD_2013_filtered <- read_csv("Z:/_SHARED_FOLDERS/Air Quality/Phase 1/Pathflow of Phase I_DG/dawit Data/Hourly Database format CSV/Arranged dates/R files/filtered_4_box/database_EAD_ 2013 _hourly_filtered.csv")
EAD_2014_filtered <- read_csv("Z:/_SHARED_FOLDERS/Air Quality/Phase 1/Pathflow of Phase I_DG/dawit Data/Hourly Database format CSV/Arranged dates/R files/filtered_4_box/database_EAD_ 2014 _hourly_filtered.csv")
EAD_2015_filtered <- read_csv("Z:/_SHARED_FOLDERS/Air Quality/Phase 1/Pathflow of Phase I_DG/dawit Data/Hourly Database format CSV/Arranged dates/R files/filtered_4_box/database_EAD_ 2015 _hourly_filtered.csv")
EAD_2016_filtered <- read_csv("Z:/_SHARED_FOLDERS/Air Quality/Phase 1/Pathflow of Phase I_DG/dawit Data/Hourly Database format CSV/Arranged dates/R files/filtered_4_box/database_EAD_ 2016 _hourly_filtered.csv")
EAD_2013_filtered_time <- filter(EAD_2013_filtered, grepl('12:', DateTime))
EAD_2014_filtered_time <- filter(EAD_2014_filtered, grepl('12:', DateTime))
EAD_2015_filtered_time <- filter(EAD_2015_filtered, grepl('12:', DateTime))
EAD_2016_filtered_time <- filter(EAD_2016_filtered, grepl('11:', DateTime))
# bind data together
# AQ_data <- rbind(EAD_2013, EAD_2014, EAD_2015, EAD_2016,
# DM_2013, DM_2014, DM_2015, DM_2016,
# NCMS_2013, NCMS_2014, NCMS_2015, NCMS_2016)
AQ_data <- rbind(EAD_2013_filtered_time, EAD_2014_filtered_time, EAD_2015_filtered_time, EAD_2016_filtered_time,
DM_2013_filtered_time, DM_2014_filtered_time, DM_2015_filtered_time, DM_2016_filtered_time,
NCMS_2013_filtered_time, NCMS_2014_filtered_time, NCMS_2015_filtered_time, NCMS_2016_filtered_time)
AQ_data_PM25 <- AQ_data %>%
filter(Pollutant == "PM2.5")
AQ_data_PM10 <- AQ_data %>%
filter(Pollutant == "PM10")
AQ_data_PM <- rbind(AQ_data_PM25,
AQ_data_PM10)
dir <- "Z:/_SHARED_FOLDERS/Air Quality/Phase 1/Pathflow of Phase I_DG/dawit Data/Hourly Database format CSV/Arranged dates/R files/filtered_4_box"
write_csv(AQ_data_PM, paste0(dir, "/","PM25_PM10_data_filtered_4_box.csv"))
# shift time back of three hours~~~~~~~~~~~~~~~~~~~
# AQ_data$DateTime <- (AQ_data$DateTime) - 4*60*60
# display only the date~~~~~~~~~~~~~~~~~~~~~~~~~~~~
AQ_data <- AQ_data %>%
mutate(DateTime = ymd_hms(DateTime, tz = "UTC"),
Date = date(DateTime))
# satellite data
Sat_data <- read_csv("Z:/_SHARED_FOLDERS/Air Quality/Phase 1/Pathflow of Phase I_DG/Sat_AOD_Correlation/PM25_from_AOD_MODIS.csv")
# location of the stations with the PM2.5
get_sites <- function(var) {
NCMS_PM25 <- AQ_data %>%
filter(Pollutant == var) %>%
distinct(Site, Latitude, Longitude)
# Return
NCMS_PM25
}
get_measurement_time_series <- function(station, pollutant) {
# Import hourly data from several years
# NCMS[sapply(NCMS,is.na)] = NA
# NCMS <- AQ_data %>%
# # mutate(date = mdy_hms(DateTime, tz = "UTC")) %>%
# mutate(date = ymd(Date)) %>%
# dplyr:: select(date,
# Site,
# Pollutant,
# Daily_mean) %>%
# filter(Site == station)
NCMS <- AQ_data %>%
# mutate(date = mdy_hms(DateTime, tz = "UTC")) %>%
mutate(date = ymd(Date)) %>%
dplyr:: select(date,
Site,
Pollutant,
Value) %>%
filter(Site == station)
# replace NaN (not a Number with NA that is a missing value)
# NCMS[sapply(NCMS,is.na)] = NA
NCMS_filtered <- Sat_data %>%
# mutate(date = mdy_hms(DateTime, tz = "UTC")) %>%
mutate(date = ymd(Date)) %>%
dplyr:: select(date,
Site,
AOD_PM25) %>%
filter(Site == station)
# data_time <- NCMS %>%
# spread(Pollutant, Daily_mean)
data_time <- NCMS %>%
spread(Pollutant, Value)
data_time_filtered <- NCMS_filtered %>%
select(-Site)
# data_time <- data_time %>%
# left_join(data_time_filtered, by = "date")
# Build timeseries for plots
time_series <- data_frame_to_timeseries(data_time, tz = "UTC")
time_series_filtered <- data_frame_to_timeseries(data_time_filtered, tz = "UTC")
# Return
#time_series
#time_series_filtered
# bind two time series together
All_data <- cbind(time_series,time_series_filtered)
#return
All_data
}
# All_data
# data_both<- All_data[pollu,]
# data_both_ts<-as.ts(data_both[1])
# data_both_ts_2<-as.ts(data_both[2])
# data_both_ts$time_series
# data_both_ts_2$time_series_filtered
############################################################################
############################################################################
# to create grouped interactive dygraphs
# pollutant <- "PM<sub>2.5</sub>"
# station<-"Deira"
#
# ts<-All_data
# station = "Zabeel"
# group = pollutant
# pollu= "PM2.5"
# da<- is.na(data_time$AOD_PM25 )
# dada <- which(da , arr.ind = T,useNames = TRUE)
#
# station<- "Al Ain Street"
# pollutant <- "PM<sub>2.5</sub>"
#
# All_data<-get_measurement_time_series(station, pollutant)
# ts<-All_data
# pollu<-"PM2.5"
# group = pollutant
#
#
# station = "Al Ain Islamic Ins"
# group = pollutant
# pollu="PM2.5"
# data_both<- All_data[pollu,]
# data_both_ts<-as.ts(data_both[1])
# data_both_ts_2<-as.ts(data_both[2])
#ts_xxx <- cbind(data_both_ts$time_series, data_both_ts_2$time_series_filtered)
#dawit<-interactive_plot(time_series_BainAljesrain, station, group, pollu)
interactive_plot <- function(ts, station, group, pollu) {
check_<-row.names(ts)
if (!is.null(ts) & is.element(pollu, check_) ) {
# Get colour vector
colour_vector <- threadr::ggplot2_colours(45)
#PM10
if (pollutant == "PM<sub>10</sub>") {
data_both<- ts[pollu,]
data_both_ts<-as.ts(data_both[1])
data_both_ts_2<-as.ts(data_both[2])
ts_xxx <- cbind(data_both_ts$time_series, data_both_ts_2$time_series_filtered)
plot <- dygraph(ts_xxx, group = group, main = paste(station, " - ", pollutant)) %>%
dySeries("..1",label = "Station", color = "red") %>%
dySeries("..2",label = "SAT.", color = "blue") %>%
dyAxis("y", label = "Hourly PM<sub>10</sub> (μg m<sup>-3</sup>)") %>%
dyRangeSelector()
}
#PM2.5
if (pollutant == "PM<sub>2.5</sub>") {
data_both<- ts[pollu,]
data_both_ts<-as.ts(data_both[1])
data_both_ts_2<-as.ts(data_both[2])
ts_xxx <- cbind(data_both_ts$time_series, data_both_ts_2$time_series_filtered)
plot <- dygraph(ts_xxx, group = group, main = paste(station, " - ", pollutant)) %>%
dySeries("..1",label = "Station", color = "red") %>%
dySeries("..2",label = "SAT.", color = "blue") %>%
dyAxis("y", label = "Hourly PM<sub>2.5</sub> (μg m<sup>-3</sup>)") %>%
dyRangeSelector()
}
# Return
plot
}
}
# pollutant <- "NO2"
# plot <- interactive_plot(time_series_Ghalilah$NO2, station = "Ghalilah", group = pollutant)
# plot
interactive_map_index <- function(df) {
# Map
map <- leaflet() %>%
addTiles(group = "OpenStreetMap") %>%
addProviderTiles("Stamen.Toner", group = "Toner") %>%
addProviderTiles("Esri.WorldImagery", group = "Images") %>%
addMarkers(data = df, lng = ~ Longitude, lat = ~ Latitude,
popup = ~ Site, group = "Sites") %>%
addPolygons(stroke = TRUE, data = shp_UAE,
weight = 1.5, color = ~ colorNumeric(c("#a56e6e", "#7a7acc", "#FFFF00", "#ff0000", "#be68be", "#7fbf7f", "#008000", "#0000ff"), shp_UAE$ID_1)(ID_1),
fillOpacity = 0.5,
group = "shape_UAE") %>%
addLayersControl(baseGroups = c("OpenStreetMap", "Toner", "Images"),
overlayGroups = c("Sites"))
# Return
map
}
#
interactive_map <- function(df) {
# Map
map <- leaflet() %>%
setView(lng = 55.9971, lat = 25.3302, zoom = 9) %>%
addTiles(group = "OpenStreetMap") %>%
addProviderTiles("Stamen.Toner", group = "Toner") %>%
addProviderTiles("Esri.WorldImagery", group = "Images") %>%
addMarkers(data = df, lng = ~ Longitude, lat = ~ Latitude,
popup = ~ Site, group = "Sites") %>%
addLayersControl(baseGroups = c("OpenStreetMap", "Toner", "Images"),
overlayGroups = c("Sites"))
# Return
map
}
|
f1415139cb4a68aa68b56e9b3b61381692299e26 | 67615957a9f5d2f74817db4ce219fe10644c0ae0 | /courses/stat486/slides/10-control/10-control.R | 808252950be543093668e0313b3ac98479102ede | [] | no_license | jarad/jarad.github.com | 29ed8dc5583a52a57cd26bac252d071a0ff623a9 | 00a2bada3de6d6aa89b4795f52d5b134dd3edfe7 | refs/heads/master | 2023-08-09T10:30:19.203097 | 2023-07-30T14:54:31 | 2023-07-30T14:54:31 | 6,108,556 | 9 | 21 | null | null | null | null | UTF-8 | R | false | false | 5,138 | r | 10-control.R | ## -------------------------------------------------------------------------------------
library("tidyverse")
## -------------------------------------------------------------------------------------
# First expression
1+2
# Second expression
a <- 1; b <- 2; a+b
# Third expression
{
a <- 1
b <- 2
a + b
}
## -------------------------------------------------------------------------------------
?expression
## -------------------------------------------------------------------------------------
if (TRUE) {
print("This was true!")
}
## -------------------------------------------------------------------------------------
this <- TRUE
if (this) {
print("`this` was true!")
}
## -------------------------------------------------------------------------------------
if (1<2) {
print("one is less than two!")
}
if (1>2) {
print("one is greater than two!")
}
## -------------------------------------------------------------------------------------
a <- 1
b <- 2
if (a < 2) {
print("`a` is less than 2!")
}
if (a < b) {
print("`a` is less than `b`!")
}
## -------------------------------------------------------------------------------------
if (a < b) {
print("`a` is less than `b`!")
} else {
print("`b` is not less than `a`!")
}
## -------------------------------------------------------------------------------------
if (a > b) {
print("`a` is greater than `b`!")
} else {
print("`a` is not greater than `b`!")
}
## -------------------------------------------------------------------------------------
if (a > b) {
print("`a` is greater than `b`!")
} else if (dplyr::near(a,b)) {
print("`a` is near `b`!")
} else {
print("`a` must be greater than b")
}
## -------------------------------------------------------------------------------------
if (a < b)
print("`a` is less than `b`!")
## -------------------------------------------------------------------------------------
ifelse(c(TRUE, FALSE, TRUE), "this was true", "this was false")
## -------------------------------------------------------------------------------------
this <- "a"
switch(this,
a = "`this` is `a`",
b = "`this` is `b`",
"`this` is not `a` or `b`")
this <- "b"
switch(this,
a = "`this` is `a`",
b = "`this` is `b`",
"`this` is not `a` or `b`")
this <- "c"
switch(this,
a = "`this` is `a`",
b = "`this` is `b`",
"`this` is not `a` or `b`")
## -------------------------------------------------------------------------------------
for (i in 1:10) {
print(i)
}
## -------------------------------------------------------------------------------------
for (i in 1:10) {
if (i > 5)
print(i)
}
## -------------------------------------------------------------------------------------
for (d in c(2.3, 3.5, 4.6)) {
print(d)
}
## -------------------------------------------------------------------------------------
for (c in c("my","char","vector")) {
print(c)
}
## -------------------------------------------------------------------------------------
this <- NULL
for (i in 1:length(this)) {
print(i)
}
## -------------------------------------------------------------------------------------
for (i in seq_along(this)) {
print(i)
}
## -------------------------------------------------------------------------------------
my_chars <- c("my","char","vector")
for (i in seq_along(my_chars)) {
print(paste(i, ":", my_chars[i]))
}
## -------------------------------------------------------------------------------------
for (i in seq_len(nrow(ToothGrowth))) {
if (ToothGrowth$supp[i] == "OJ" &
near(ToothGrowth$dose[i], 2) &
ToothGrowth$len[i] > 25) {
print(ToothGrowth[i,])
}
}
## -------------------------------------------------------------------------------------
for (i in 1:10)
print(i)
## -------------------------------------------------------------------------------------
a <- TRUE
while (a) {
print(a)
a <- FALSE
}
## -------------------------------------------------------------------------------------
i <- 0
while (i < 10) {
print(i)
i <- i + 1
}
## -------------------------------------------------------------------------------------
x <- 2
while (x < 1) { # Evaluated before the loop
print("We entered the loop.")
}
while (x < 100) { # Evaluated after each loop
x <- x*x
print(x)
}
## ---- eval=FALSE----------------------------------------------------------------------
## while (TRUE) {
## # do something
## }
## -------------------------------------------------------------------------------------
max_iterations <- 1000
i <- 1
while (TRUE & (i < max_iterations) ) {
i <- i + 1
# Do something
}
print(i)
## -------------------------------------------------------------------------------------
i <- 10
repeat {
print(i)
i <- i + 1
if (i > 13)
break
}
## -------------------------------------------------------------------------------------
i <- 1
repeat {
print(i)
i <- i + 1
if (i %% 2) { # %% is the mod function, 0 is FALSE and 1 is TRUE
next # skips to next iteration of repeat
}
if (i > 14)
break
}
|
eb0859c578a4be7c805c0c600bbda4c112941f32 | 0d38cc682ba9aab8eff18b0862bdde0fc266e8e5 | /r4ds_chp16_vectors.R | 884430f48968381371319f2bed0b161e6f3c598a | [] | no_license | ShadeWilson/r4ds_notes | 5888b974ebe1db9737bb68959f525209b93cdba1 | 3ec75146ba26664643fa1c8a6144632caf4dc126 | refs/heads/master | 2021-07-18T17:43:10.540724 | 2017-10-27T05:40:54 | 2017-10-27T05:40:54 | 108,501,051 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 9,863 | r | r4ds_chp16_vectors.R | # Notes from R FOR DATA SCIENCE,
# an O'Reilly guide by Hadley Wickham and Garrett Grolemund
# Availible online at http://r4ds.had.co.nz/
# PART THREE: Program
# Chapter 16: Vectors
library(tidyverse)
# Two types of vectors:
# ATOMIC vectors (six): logical, integer, double, character, complex, raw
# int and dbl are known together as numeric vectors
# LISTS aka recursive vectors bc lists can contain other lists
# main difference b/w atomic vectors and lists is that stomic vectors are
# homogeneous while lists can be heterogeneous
# Also NULL represents the absence of a vector while NA represents the abs of
# a VALUE of a vector
# every vector has two key properties
# TYPE, can determine with typeof():
typeof(letters)
typeof(1:10)
# LENGTH, can determine with length()
x <- list("a", "b", 1:10)
length(x)
# vectors can also has arbitrary metadata in the form of attributes: augmented vectors
# Four types of augmented vectors:
# 1) factors are built on top of integer values
# 2) dates and date-times aer built on top of numeric vectors
# 3) df's and tibbles are built on top of lists
############### Important Types of Atomic Vector ###############
# LOGCIAL
# can only take three values: TRUE, FALSE, NA
# constructed with comparison operators, can also create by hand with c()
1:10 %% 3 == 0
c(TRUE, T, FALSE, NA)
# NUMERIC
# ints, doubles == numeric
# in R, numbers are doubles by default. Place L after num to make int
typeof(1)
typeof(1L)
1.5L
# Note: doubles are approximations. use near() instead of == for comparison
# ints have one special values, NA, while doubles have four: NA, NaN, Inf, and -Inf
# all can arise during division
c(-1, 0, 1) / 0
# avoid using == to check for these other special values. Instead use is.finite(),
# is.infinite(), and is.nan()
# CHARACTER
# each element of a char vect is a string, and a string can have an arbitrary amt
# of data
# R uses a global string pool, meaning that each unique string is only stored
# in memory once, and every use of the string points to that representation
# reduces the amt of memory needed by duplicate strings. see w/ pryr::object_size()
x <- "This is a reasonably long string"
pryr::object_size(x)
y <- rep(x, 1000)
pryr::object_size(y) # pointers are only 8 bytes each
# MISSING VALUES
# each type of atomic vector has its own missing value
NA
NA_integer_
NA_real_
NA_character_
# Exercises
near
readr::parse_logical()
#################### Using Atomic Vectors ####################
# COERSION ----------------------------------------------------------------
# Two ways to convert/coerce one type of vector to another
# 1) explicit coersion happens when you call a function like as.logical(),
# as.integer(), etc. Always check if you can avoid this upstream if using this
# ex: tweak readr col_types specification
# 2) implicit coercion happens when you use a vcetor in a specific context that
# expects a certain type of vector. EX: when you use a logical vector
# with a numeric sum function
# sum of logical vector is number of trues
x <- sample(20, 100, replace = TRUE)
y <- x > 10
sum(y) # how many are greater than 10?
mean(y) # what proportion are greater than 10?
# when trying to create a vector containing multiple types with c(),
# the most complex one wins
typeof(c(TRUE, 1L))
typeof(c(1L, 1.5))
typeof(c(1.5, "a"))
# TEST FUNCTIONS: better to use purrr::is_* test functions than base
# all versions come with is_scalar_* to test if length is one
############## Scalars and Recycling Rules ##############
# R will implicitly coerce the length of vecotrs, called vector recycling
# shorter vector is repeated (or recycled) to the same length as the longer vector
# most useful when you are mixing factors and "scalars" (aka single num, length 1)
# most built in functions are vectorized, will operate on a vector of numbers
# why this works:
sample(10) + 100
runif(10) > 0.5
# in R, basic mathematical operations work with vectors. Means you should never need
# to perform explicit iteration when performing simple mathematical computation
# what happens if you add two vectors of different length?
1:10 + 1:2
# here R expands the shortest vector to the same length as the longest
# this is silent except when the length of the longer is not an integer multiple
# of the length of the shorter
1:10 + 1:3
# tidyverse functions will throw errors if you try and use this property
# if want it, need to use rep()
tibble(x = 1:4, y = 1:2)
tibble(x = 1:4, y = rep(1:2, 2))
tibble(x = 1:4, y = rep(1:2, each = 2))
# NAMING VECTORS
# during creation
c(x = 1, y = 2, z = 4)
# after the fact w/ purrr
set_names(1:3, c("a", "b", "c"))
# SUBSETTING --------------------------------------------------------------
# filter() only works with tibble, need [, the subsetting function
# four things you can subset a vector with
# 1) a numeric vecter containing only integers (must be all positive, all neg,
# or zero)
x <- c("one", "two", "three", "four", "five")
x[c(3, 2, 5)]
# by repeating a position, you can make a longer output than input
x[c(1, 1, 5, 5, 5, 2)]
# negative values drop elements at the specified positions
x[c(-1, -3, -5)]
# its an error to mix positive and negative values
x[c(1, -1)]
x[0] # not very useful outside of testing functions
# 2) subsetting with a logical vector keeps all values corresponding to a TRUE value
# useful with comparison functions
x <- c(10, 3, NA, 5, 8, 1, NA)
# all non-missing values of x
x[!is.na(x)]
# all even (or missing) values of x
x[x %% 2 == 0]
# 3) if you have a named vector, you can subset it with a character vector
x <- c(abc = 1, def = 2, xyz = 5)
x[c("xyz", "def")]
# can duplicate single entries this way similar to positive integers
# 4) simplest type of subsetting is nothing: x[], returns complete x
# not useful for vectors, but useful for subsetting matrices bc lets you select
# all the rows or all the columns by leaving index blank
# ex: x[1, ]
# important variation of [ called [[. It only ever extracts a single element
# and always drops names. Most impt for lists
# Exercises
x <- c(2:20, NA)
# 4 a
last_val <- function(x) {
len <- length(x)
x[[len]]
}
last_val(x)
# b
y <- c("one", "two", "three", "four", "five", NA, 7, 8, 9, 10)
sum(y[is.na(y)])
even_pos <- function(x, na.rm = TRUE) {
len <- length(x) + sum(is.na(x))
i <- 0
vect <- c(0)
while (i < len) {
vect <- c(vect, i)
i = i + 2
}
x[vect]
}
even_pos(y)
# c
except_last <- function(x) {
x[-length(x)]
}
except_last(x)
# d
only_even <- function(x) {
x[x %% 2 == 0 & !is.na(x)]
}
only_even(x)
################### Recursive Vectors (Lists) ###################
# lists can contain other lsits. Good for representing hierarchical or tree-like
# structures. create with list()
x <- list(1,2,3)
x
# str() is useful tool with lists bc focuses on the structure, not the contents
str(x)
x_named <- list(a = 1, b = 2, c = 3)
str(x_named)
# lists can contain a mix of objects, unlike atomic vectors
y <- list("a", 1L, 1.5, TRUE)
str(y)
# can even contain other lists
z <- list(list(1, 2), list(3, 4))
str(z)
# VISUALIZING LISTS
x1 <- list(c(1, 2), c(3, 4))
x2 <- list(list(1, 2), list(3, 4))
x3 <- list(1, list(2, list(3)))
# SUBSETTING
# three ways:
a <- list(a = 1:3, b = "a string", c = pi, d = list(-1, -5))
# 1) [ extracts a sublist. result will always be a list
str(a[1:2])
str(a[4])
# can subset same as with other vectors
# 2) [[ extracts a single component rom a list. removes a level of hierarchy
# from the list
str(a[[1]])
str(a[[4]])
str(a[[4]][[1]])
# 3) $ is a shorthand for extrcting named elements of a list
a$a
a[["a"]]
# diff b/w [ and [[ really impt for lists because [[ drills down into the list
# while [ returns a new, smaller list
##################### Attributes #####################
# any vector can contain any arbitrary amt of metadata thru its attributes
# get and set with attr() or see them all at once with attributes()
x <- 1:10
attr(x, "greeting")
attr(x, "greeting") <- "Hi!"
attr(x, "farewell") <- "Bye!"
attributes(x)
# THree v impt attributes used to implement fundamental parts of R:
# 1) Names are used to name the elements of a vector
# 2) Dimensions (dims) make a vector behave like a matrix or array
# 3) Class is used to implement the S3 object-oriented system
# generic function looks like:
as.Date
# can see all methods for a generic with methods()
methods("as.Date")
# see implementation of a method with getS3method()
getS3method("as.Date", "default")
getS3method("as.Date", "numeric")
# most impt S3 generic is print(): controls how obj is printed when you type its name
# AUGMENTED VECTORS
# vectors with addtional attributes like factors and date-times, times, tibbles
# FACTORS
# designed to represent categorical data that can take a fixed set of possible values
# built on top of ints, have a levels attribute
x <- factor(c("ab", "cd", "ab"), levels = c("ab", "cd", "ef"))
x
typeof(x)
attributes(x)
# DATES AND DATE-TIMES
# dates in R are numeric vectors that rep the num of days since 1 Jan 1970
x <- as.Date("1970-01-01")
unclass(x)
typeof(x)
attributes(x)
# date-times are numeic vectors with class POSIXct that rep numbers of seconds
# since Jan 1 190 (portable operating system interface, calendar time)
x <- lubridate::ymd_hm("1970-01-01 01:00")
unclass(x)
typeof(x)
attributes(x)
# tzone attribute is optional: controls how it is printed, not the abs time
attr(x, "tzone") <- "US/Pacific"
x
attr(x, "tzone") <- "US/Eastern"
x
# other type of date0times called POSIClt built on top of named lists
y <- as.POSIXlt(x)
typeof(y)
attributes(y)
# TIBBLES
# augmented lists. 3 classe: tbl_df, tbl, and data.frame
# two attributes: (column) names and row.names
tb <- tibble::tibble(x = 1:5, y = 5:1)
typeof(tb)
attributes(tb)
# tranditional data.frames have a very similar structure |
254cfd997f189b9f525a14cc8ff67f68c8e00769 | 093dacede7c431ab1cbef672830f76920942b801 | /man/MDA4.Rd | 8fc4897e3ac3af0a49deb42d107afbca16dfe591 | [
"Apache-2.0"
] | permissive | bhklab/MetaGxBreast | a30cee29007ededf0fbeb64524f18b3a3b8128b8 | 3ba8f39928a20dffb799c338622a1461d2e9ef98 | refs/heads/master | 2021-06-03T09:54:44.555453 | 2021-04-23T18:54:53 | 2021-04-23T18:54:53 | 100,535,452 | 4 | 2 | null | null | null | null | UTF-8 | R | false | false | 2,169 | rd | MDA4.Rd | \name{ MDA4 }
\alias{ MDA4 }
\docType{data}
\title{ MDA4 }
\description{ ExpressionSet for the MDA4 Dataset}
\format{
\preformatted{
experimentData(eset):
Experiment data
Experimenter name:
Laboratory:
Contact information: http://www.ncbi.nlm.nih.gov/pubmed/?term=16896004
Title:
URL: http://bioinformatics.mdanderson.org/pubdata.html
PMIDs: 16896004
No abstract available.
notes:
summary:
The developed 30-probe set has high sensitivity and negative predictive va
lue, accurately identifying 12 out of 13 patients with pCR and 27 out of 2
8 patients with residual disease.
mapping.method:
maxRowVariance
mapping.group:
EntrezGene.ID
preprocessing:
As published by original author.
featureData(eset):
An object of class 'AnnotatedDataFrame'
featureNames: 1007_s_at 1053_at ... AFFX-HUMISGF3A/M97935_MB_at
(21169 total)
varLabels: probeset gene EntrezGene.ID best_probe
varMetadata: labelDescription
}}
\details{
\preformatted{
assayData: 21169 features, 129 samples
Platform type:
---------------------------
Available sample meta-data:
---------------------------
sample_name:
Length Class Mode
129 character character
unique_patient_ID:
Length Class Mode
129 character character
sample_type:
tumor
129
er:
negative positive NA's
48 79 2
pgr:
negative positive NA's
73 54 2
her2:
negative positive
114 15
tumor_size:
Min. 1st Qu. Median Mean 3rd Qu. Max. NA's
0.000 0.500 1.800 2.162 3.000 10.000 8
N:
0 1 NA's
59 62 8
age_at_initial_pathologic_diagnosis:
Min. 1st Qu. Median Mean 3rd Qu. Max.
28.00 43.00 51.00 51.43 61.00 73.00
treatment:
chemotherapy
129
batch:
MDA4
129
uncurated_author_metadata:
Length Class Mode
129 character character
duplicates:
MDA4.MDA4_M207 MDA4.MDA4_M400 NA's
1 1 127
}}
\source{ http://bioinformatics.mdanderson.org/pubdata.html }
\keyword{datasets}
|
23089d5000dbc49768e5bf41dbfce3c35071c4b8 | 0ea14eab0e669da89b8ba2703b3cdca86f390ed4 | /eqtl_sentinel_snp/R/run_matrixEQTL.R | b3c679e8414d0057e339544d5d703f7f90e44325 | [] | no_license | heiniglab/hawe2021_meQTL_analyses | bb1a835baac74bcfd7c3a3c996feec12b59f9e4e | cb32d188a80c2a4fa55eafc6bd24a21a0bd4889a | refs/heads/main | 2023-04-18T16:28:39.623160 | 2022-01-14T08:21:08 | 2022-01-14T08:21:08 | 373,108,082 | 6 | 5 | null | null | null | null | UTF-8 | R | false | false | 5,058 | r | run_matrixEQTL.R | #' -----------------------------------------------------------------------------
#' Run matrix eQTL with no consideration for cis/trans or covariates.
#'
#' @author Johann Hawe <johann.hawe@helmholtz-muenchen.de>
#'
#' @date Tue Dec 10 16:52:45 2019
#' -----------------------------------------------------------------------------
log <- file(snakemake@log[[1]], open="wt")
sink(log)
sink(log, type="message")
# ------------------------------------------------------------------------------
print("Load libraries and source scripts")
# ------------------------------------------------------------------------------
library(tidyverse)
library(MatrixEQTL)
# debug
print("Num Threads:")
print(RhpcBLASctl::omp_get_num_procs())
print("BLAS Num Threads:")
print(RhpcBLASctl::blas_get_num_procs())
# ------------------------------------------------------------------------------
print("Get snakemake params.")
# ------------------------------------------------------------------------------
# output
fout_associations <- snakemake@output$associations
# input files
fdependent <- snakemake@input$dependent
findependent <- snakemake@input$independent
findependent_subset <- snakemake@input$subset
# params
threads <- snakemake@threads
pv_threshold <- 1
use_subset <- as.logical(snakemake@params$use_subset)
if(is.na(use_subset)) {
warning("'use_subset' not specified. Using provided subset of independent entities.")
use_subset <- T
}
keep_non_beta <- as.logical(snakemake@params$keep_non_beta)
if(is.na(keep_non_beta)) {
warning("'keep_non_beta' not specified. Setting default to FALSE.")
keep_non_beta <- F
}
calculate_no_fdr <- as.logical(snakemake@params$calculate_no_fdr)
if(is.na(calculate_no_fdr)) {
warning("'calculate_no_fdr' not specified. Setting default to FALSE.")
calculate_no_fdr <-F
}
# set openBLAS number of threads accordingly
RhpcBLASctl::blas_set_num_threads(threads)
RhpcBLASctl::omp_set_num_threads(threads)
# ------------------------------------------------------------------------------
print(paste0("Prepare sliced data: ", date()))
# ------------------------------------------------------------------------------
print("dependent data.")
dep <- SlicedData$new()
dep$fileDelimiter <- "\t"
dep$fileOmitCharacters <- "NA"
dep$fileSkipRows <- 1
dep$fileSkipColumns <- 1
dep$fileSliceSize <- 30000
dep$LoadFile(fdependent)
# we first load the data manually, subset to the necessary entities and
# convert it to asliced dataset
print("independent data.")
indep <- read_tsv(findependent, col_names=F, skip = 1)
ids <- indep %>% pull(X1)
indep <- indep %>% select(-X1) %>% data.matrix
rownames(indep) <- ids
if(use_subset) {
samp <- read_tsv(findependent_subset, col_names=F, skip=1)
entity_subset <- samp %>% pull(X3)
entity_subset <- unique(c(entity_subset, samp %>% pull(X1)))
entity_subset <- setdiff(entity_subset, NA)
entity_subset_avail <- entity_subset[entity_subset %in% ids]
indep <- indep[entity_subset_avail,,drop=F]
if(length(entity_subset_avail) < length(entity_subset)) {
warning("Not all entities available in data (eg CpGs with too many NAs?")
}
}
indep_sliced <- SlicedData$new()
indep_sliced$fileOmitCharacters <- "NA" # denote missing values;
indep_sliced$fileSliceSize = 30000 # read file in pieces of 30,000 rows
indep_sliced$CreateFromMatrix(indep)
# ------------------------------------------------------------------------------
print(paste0("Compute QTLs: ", date()))
# ------------------------------------------------------------------------------
# run analysis
result <- Matrix_eQTL_main(
snps = indep_sliced,
gene = dep,
output_file_name = fout_associations,
pvOutputThreshold = pv_threshold,
useModel = modelLINEAR,
verbose = TRUE,
errorCovariance = numeric(),
pvalue.hist = FALSE,
noFDRsaveMemory = calculate_no_fdr)
# ------------------------------------------------------------------------------
print(paste0("Analysis done: ", date()))
# ------------------------------------------------------------------------------
print("Time in seconds:")
print(result$time.in.sec)
print("Finalizing results, setting SEs.")
# if no FDR values are calculated the results are written out on disk directly and
# and not available in the output object (the following code is not working anymore)
if(! calculate_no_fdr){
# set standard errors and remove unnecessary stuff if requested
result$all$eqtls$beta_se = result$all$eqtls$beta / result$all$eqtls$statistic
if(!keep_non_beta) {
result$all$eqtls$statistic <- NULL
result$all$eqtls$pvalue <- NULL
result$all$eqtls$FDR <- NULL
}
# ------------------------------------------------------------------------------
print("Save results.")
# ------------------------------------------------------------------------------
write_tsv(result$all$eqtls, path = fout_associations)
}
# ------------------------------------------------------------------------------
print("SessionInfo:")
# ------------------------------------------------------------------------------
sessionInfo()
|
79d53f8a57c5c990b34f1d6dc78833f22331aae7 | e1dd1d9eca961779828f9f3be289ba78785b15da | /Code.R | 820ec02245d8cb7e80aaa444452bdb44a014adf0 | [] | no_license | emieldelange/Social-Influence-Information-flow | a2a1217b1bc7966e3030684eafe438d032f7fb88 | 6bf67294687622cc2f186877b15748a6ef5bb50a | refs/heads/main | 2023-03-20T06:08:10.990990 | 2021-03-08T08:32:14 | 2021-03-08T08:32:14 | 337,731,725 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 105,411 | r | Code.R | ### Linear modelling of raw data ###
library(car)
library(lme4)
library(ggplot2)
library(tidyverse)
totaldata <- read.csv("Raw behavior data for linear modelling.csv")
totaldata <- as_tibble(totaldata, rownames = "ID") %>%
pivot_longer(cols = matches("attitudes|control|dnorms|innorms|intention|Knowledge|pledge|story|hotline"),
names_to = "names", values_to = "values") %>%
separate(col = names, into = c("period", "variable"), sep = "\\.",
fill = "left") %>%
pivot_wider(names_from = variable, values_from = values) %>%
mutate(period = factor(period, levels = c("base", "follow", "final")))
#normalise some variables
totaldata$agemod <- totaldata$prelim.age/sd(totaldata$prelim.age, na.rm=TRUE)
totaldata$knowledgemod <- totaldata$Knowledge/sd(totaldata$Knowledge, na.rm=TRUE)
#linear models
##### Intention lmer
lmintention1 <- lmer(intention ~ period * follow.event_attendance + prelim.gender +agemod +
hh.SMP + hh.wealth1 +Base+ hh.Pesticide + (1|ID), data=totaldata)
summary(lmintention1)
parnames <- c("intercept","periodfollow", "periodfinal", "attendance", "gender", "age", "SMP", "wealth", "base", "pest", "interaction1", "interaction2")
linearHypothesis(lmintention1, "periodfollow+periodfollow:follow.event_attendance=0")
linearHypothesis(lmintention1, "periodfinal+periodfinal:follow.event_attendance=0")
deltaMethod(lmintention1, g="periodfollow+interaction1", parameterNames=parnames) #Attendees increase short term?
deltaMethod(lmintention1, g="periodfinal+interaction2", parameterNames=parnames) #Attendees increase short term?
lmintention2 <- lmer(intention ~ period + knowledgemod + prelim.gender +agemod + hh.SMP +
hh.wealth1 +Base+ hh.Pesticide+ (1|ID), data=totaldata)
summary(lmintention2)
lmintention3 <- lmer(intention ~ period + pledge + story + hotline + prelim.gender +agemod + hh.SMP +
hh.wealth1 +Base+ hh.Pesticide+ (1|ID), data=totaldata)
summary(lmintention3)
##### attitudes lmer
lmattitudes1 <- lmer(attitudes ~ period * follow.event_attendance + prelim.gender +agemod +
hh.SMP + hh.wealth1 +Base+ hh.Pesticide + (1|ID), data=totaldata)
summary(lmattitudes1)
linearHypothesis(lmattitudes1, "periodfollow+periodfollow:follow.event_attendance=0")
linearHypothesis(lmattitudes1, "periodfinal+periodfinal:follow.event_attendance=0")
deltaMethod(lmattitudes1, g="periodfollow+interaction1", parameterNames=parnames) #Attendees increase short term?
deltaMethod(lmattitudes1, g="periodfinal+interaction2", parameterNames=parnames) #Attendees increase short term?
lmattitudes2 <- lmer(attitudes ~ period + knowledgemod + prelim.gender +agemod + hh.SMP +
hh.wealth1 +Base+ hh.Pesticide+ (1|ID), data=totaldata)
summary(lmattitudes2)
lmattitudes3 <- lmer(attitudes ~ period + pledge + story + hotline + prelim.gender +agemod + hh.SMP +
hh.wealth1 +Base+ hh.Pesticide+ (1|ID), data=totaldata)
summary(lmattitudes3)
##### control lmer
lmcontrol1 <- lmer(control ~ period * follow.event_attendance + prelim.gender +agemod +
hh.SMP + hh.wealth1 +Base+ hh.Pesticide + (1|ID), data=totaldata)
summary(lmcontrol1)
linearHypothesis(lmcontrol1, "periodfollow+periodfollow:follow.event_attendance=0")
linearHypothesis(lmcontrol1, "periodfinal+periodfinal:follow.event_attendance=0")
deltaMethod(lmcontrol1, g="periodfollow+interaction1", parameterNames=parnames) #Attendees increase short term?
deltaMethod(lmcontrol1, g="periodfinal+interaction2", parameterNames=parnames) #Attendees increase short term?
lmcontrol2 <- lmer(control ~ period + knowledgemod + prelim.gender +agemod + hh.SMP +
hh.wealth1 +Base+ hh.Pesticide+ (1|ID), data=totaldata)
summary(lmcontrol2)
lmcontrol3 <- lmer(control ~ period + pledge + story + hotline + prelim.gender +agemod + hh.SMP +
hh.wealth1 +Base+ hh.Pesticide+ (1|ID), data=totaldata)
summary(lmcontrol3)
##### dnorms lmer
lmdnorms1 <- lmer(dnorms ~ period * follow.event_attendance + prelim.gender +agemod +
hh.SMP + hh.wealth1 +Base+ hh.Pesticide + (1|ID), data=totaldata)
summary(lmdnorms1)
linearHypothesis(lmdnorms1, "periodfollow+periodfollow:follow.event_attendance=0")
linearHypothesis(lmdnorms1, "periodfinal+periodfinal:follow.event_attendance=0")
deltaMethod(lmdnorms1, g="periodfollow+interaction1", parameterNames=parnames) #Attendees increase short term?
deltaMethod(lmdnorms1, g="periodfinal+interaction2", parameterNames=parnames) #Attendees increase short term?
lmdnorms2 <- lmer(dnorms ~ period + knowledgemod + prelim.gender +agemod + hh.SMP +
hh.wealth1 +Base+ hh.Pesticide+ (1|ID), data=totaldata)
summary(lmdnorms2)
lmdnorms3 <- lmer(dnorms ~ period + pledge + story + hotline + prelim.gender +agemod + hh.SMP +
hh.wealth1 +Base+ hh.Pesticide+ (1|ID), data=totaldata)
summary(lmdnorms3)
##### innorms lmer
lminnorms1 <- lmer(innorms ~ period * follow.event_attendance + prelim.gender +agemod +
hh.SMP + hh.wealth1 +Base+ hh.Pesticide + (1|ID), data=totaldata)
summary(lminnorms1)
linearHypothesis(lminnorms1, "periodfollow+periodfollow:follow.event_attendance=0")
linearHypothesis(lminnorms1, "periodfinal+periodfinal:follow.event_attendance=0")
deltaMethod(lminnorms1, g="periodfollow+interaction1", parameterNames=parnames) #Attendees increase short term?
deltaMethod(lminnorms1, g="periodfinal+interaction2", parameterNames=parnames) #Attendees increase short term?
lminnorms2 <- lmer(innorms ~ period + knowledgemod + prelim.gender +agemod + hh.SMP +
hh.wealth1 +Base+ hh.Pesticide+ (1|ID), data=totaldata)
summary(lminnorms2)
lminnorms3 <- lmer(innorms ~ period + pledge + story + hotline + prelim.gender +agemod + hh.SMP +
hh.wealth1 +Base+ hh.Pesticide+ (1|ID), data=totaldata)
summary(lminnorms3)
##### Knowledge lmer
lmknow <- lmer(knowledge ~ period * follow.event_attendance + prelim.gender +agemod + hh.SMP + hh.wealth1+Base+ hh.Pesticide + (1|ID), data=TPBPlots2)
summary(lmknow)
linearHypothesis(lmknow, "periodfollow+periodfollow:follow.event_attendance=0")
linearHypothesis(lmknow, "periodfinal+periodfinal:follow.event_attendance=0")
linearHypothesis(lmknow, "periodfinal+periodfinal:follow.event_attendance=periodfollow+periodfollow:follow.event_attendance")
linearHypothesis(lmknow, "periodfinal=periodfollow")
deltaMethod(lmknow, g="periodfollow + interaction1", parameterNames=parnames) #Attendees increase short term?
deltaMethod(lmknow, g="periodfinal + interaction2", parameterNames=parnames) #Attendees increase short term?
deltaMethod(lmknow, g="periodfinal+interaction2-(periodfollow+interaction1)", parameterNames=parnames) #Attendees increase short term?
deltaMethod(lmknow, g="periodfinal-periodfollow", parameterNames=parnames) #Attendees increase short term?
#Visualising changes
distplots <- TPBPlots2
distplots$intention<-distplots$intention/10
distplots$attitudes<-distplots$attitudes/20
distplots$control<-distplots$control/15
distplots$dnorms<-distplots$dnorms/10
distplots$innorms<-distplots$innorms/20
distplots <- distplots %>%
as_tibble() %>%
pivot_longer(cols = matches("intention|attitudes|dnorms|innorms|control"),names_to = "names", values_to = "values")
distplots$names <- as.factor(distplots$names)
distplots$names <- fct_relevel(distplots$names,"intention", "attitudes","control","dnorms","innorms")
levels(distplots$names) <- c("Intention", "Attitudes", "Perceived control", "Perceived descriptive norm", "Perceived injunctive norm")
levels(distplots$period) <- c("Wave 1", "Wave 2", "Wave 3")
ggplot(distplots, aes(x=names, y=values, fill=period))+
geom_violin(width=1,position=position_dodge(0.8)) +
geom_boxplot(width=0.1, color="black", position=position_dodge(0.8)) +
labs(x = "TPB Construct", y="Scaled value") +
scale_fill_grey(start=0.8, end=0.8) +
theme_bw() +
ylim(0, 1) +
theme(legend.position="none")
#TPB GLMs
# Period 1
BaselineIntentionGLM <- glm(intention ~ control + attitudes + dnorms + innorms, data=totaldata[which(totaldata$period=="base"),])
summary(BaselineIntentionGLM)
# Period 2
FollowIntentionGLM <- glm(intention ~ control + attitudes + dnorms + innorms, data=totaldata[which(totaldata$period=="base"),])
summary(FollowIntentionGLM)
# Period 3
FinalIntentionGLM <- glm(intention ~ control + attitudes + dnorms + innorms, data=totaldata[which(totaldata$period=="base"),])
summary(FinalIntentionGLM)
TPBGLM <- rbind(as.data.frame(summary(BaselineIntentionGLM)$coefficients[2:5,1:2]),
as.data.frame(summary(FollowIntentionGLM)$coefficients[2:5,1:2]),
as.data.frame(summary(FinalIntentionGLM)$coefficients[2:5,1:2]))
TPBGLM$period <- as.factor(c(rep("base",4), rep("follow", 4), rep("final",4)))
TPBGLM$period <- as.factor(TPBGLM$period)
TPBGLM$SE <- TPBGLM$`Std. Error`
TPBGLM$CI <- TPBGLM$SE*1.96
TPBGLM$min <- TPBGLM$Estimate-TPBGLM$CI
TPBGLM$max <- TPBGLM$Estimate+TPBGLM$CI
ggplot(TPBGLM, aes(y=variable, x=Estimate, xmin=min, xmax=max))+
geom_dotplot(binaxis='y',stackdir='center',dotsize=0.4)+
geom_errorbarh(height=0) +
facet_grid(.~period) +
geom_vline(xintercept=0, linetype="dotted")
#############################################################################
######### Imputation using Mice and subsequent linear modelling ##########
library(ggplot2)
library(tidyverse)
library(mice)
library(reshape2)
library(car)
library(carEx)
library(lme4)
library(broom.mixed)
micedata <- read.csv("Raw behavior data for SNA.csv")
D <- 50 #Number of imputations
#Conduct multiple imputation
miceImp <- mice(micedata, m=D, meth='pmm', seed=503, maxit=20)
plot(miceImp)
#diagnostics
mice::bwplot(miceImp, story2)
densityplot(miceImp)
xyplot(miceImp, hotline1 ~ base.intention, pch=18,cex=1)
#lengthen temporarily to calculate a total knowledge score
long.data <- complete(miceImp, action="long", include=TRUE)
long.data$follow.Knowledge <- apply(long.data[,24:26], 1, sum)
long.data$final.Knowledge <- apply(long.data[,27:29], 1, sum)
long.data$base.hotline <- 0 #before the intervention all knowledge is 0
long.data$base.story <- 0
long.data$base.pledge <- 0
long.data$base.Knowledge <- 0
wide.data <- as.mids(long.data)
id <- list() # Create a list for storing completed imputed data sets
intention.m1 <- list() # Create a list for storing fitted models
intention.m2 <- list()
attitudes.m1 <- list()
attitudes.m2 <- list()
control.m1 <- list()
control.m2 <- list()
dnorms.m1 <- list()
dnorms.m2 <- list()
innorms.m1 <- list()
innorms.m2 <- list()
knowledge.m <- list()
for(i in 1:D){
# Complete the data
id[[i]] <- complete(wide.data, action=i) %>%
# Reshape
as_tibble(rownames = "id") %>%
pivot_longer(cols = matches("attitudes|control|dnorms|innorms|intention|knowledge|pledge|story|hotline"),
names_to = "names", values_to = "values") %>%
separate(col = names, into = c("period", "variable"), sep = "\\.",
fill = "left") %>%
pivot_wider(names_from = variable, values_from = values) %>%
mutate(period = factor(period, levels = c("base", "follow", "final")))
id[[i]]$Knowledgemod <- id[[i]]$Knowledge/sd(id[[i]]$Knowledge)
# Fit models (in this case, a model for intention as a function of event attendance)
intention.m1[[i]] <- lmer(intention ~ period*follow.event_attendance + prelim.gender +
hh.SMP + agemod + hh.wealth1 + Base + hh.Pesticide + (1|id), data=id[[i]])
intention.m2[[i]] <- lmer(intention ~ period + pledge + hotline +story + prelim.gender +
hh.SMP + agemod + hh.wealth1 + Base + hh.Pesticide + (1|id), data=id[[i]])
#attitudes
attitudes.m1[[i]] <- lmer(attitudes ~ period*follow.event_attendance + prelim.gender +
hh.SMP + agemod + hh.wealth1 + Base + hh.Pesticide + (1|id), data=id[[i]])
attitudes.m2[[i]] <- lmer(attitudes ~ period + pledge + hotline +story + prelim.gender +
hh.SMP + agemod + hh.wealth1 + Base + hh.Pesticide + (1|id), data=id[[i]])
#control
control.m1[[i]] <- lmer(control ~ period*follow.event_attendance + prelim.gender +
hh.SMP + agemod + hh.wealth1 + Base + hh.Pesticide + (1|id), data=id[[i]])
control.m2[[i]] <- lmer(control ~ period + pledge + hotline +story + prelim.gender +
hh.SMP + agemod + hh.wealth1 + Base + hh.Pesticide + (1|id), data=id[[i]])
#dnorms
dnorms.m1[[i]] <- lmer(dnorms ~ period*follow.event_attendance + prelim.gender +
hh.SMP + agemod + hh.wealth1 + Base + hh.Pesticide + (1|id), data=id[[i]])
dnorms.m2[[i]] <- lmer(dnorms ~ period + pledge + hotline +story + prelim.gender +
hh.SMP + agemod + hh.wealth1 + Base + hh.Pesticide + (1|id), data=id[[i]])
#innorms
innorms.m1[[i]] <- lmer(innorms ~ period*follow.event_attendance + prelim.gender +
hh.SMP + agemod + hh.wealth1 + Base + hh.Pesticide + (1|id), data=id[[i]])
innorms.m2[[i]] <- lmer(innorms ~ period +pledge + hotline +story + prelim.gender +
hh.SMP + agemod + hh.wealth1 + Base + hh.Pesticide + (1|id), data=id[[i]])
#knowledge
knowledge.m[[i]] <- lmer(Knowledge ~ period*follow.event_attendance + prelim.gender +
hh.SMP + agemod + hh.wealth1 + Base + hh.Pesticide + (1|id), data=id[[i]])
}
#intention
intention.rep1 <- as.mira(intention.m1) # Convert model list to a mira object so that it works with pool()
intention.pooled1 <- pool(intention.rep1) # Pool results across model list (e.g. pooled effect sizes and variances)
intention.rep2 <- as.mira(intention.m2)
intention.pooled2 <- pool(intention.rep2)
summary(intention.pooled1)
summary(intention.pooled2)
parnames <- c(levels(summary(intention.pooled1)$term)[1:10], "interaction1", "interaction2")
linearHypothesis(intention.rep1, "periodfollow+periodfollow:follow.event_attendance = 0") #Attendees increase short term?
linearHypothesis(intention.rep1, "periodfinal+periodfinal:follow.event_attendance = 0") #Attendees increase long term?
deltaMethod(intention.rep1, g="periodfollow + interaction1", parameterNames=parnames) #Attendees increase short term?
deltaMethod(intention.rep1, g="periodfinal + interaction2", parameterNames=parnames) #Attendees increase short term?
#attitudes
attitudes.rep1 <- as.mira(attitudes.m1) # Convert model list to a mira object so that it works with pool()
attitudes.pooled1 <- pool(attitudes.rep1) # Pool results across model list (e.g. pooled effect sizes and variances)
attitudes.rep2 <- as.mira(attitudes.m2)
attitudes.pooled2 <- pool(attitudes.rep2)
summary(attitudes.pooled1)
summary(attitudes.pooled2)
linearHypothesis(attitudes.rep1, "periodfollow+periodfollow:follow.event_attendance = 0") #Attendees increase short term?
linearHypothesis(attitudes.rep1, "periodfinal+periodfinal:follow.event_attendance = 0") #Attendees increase long term?
deltaMethod(attitudes.rep1, g="periodfollow + interaction1", parameterNames=parnames) #Attendees increase short term?
deltaMethod(attitudes.rep1, g="periodfinal + interaction2", parameterNames=parnames) #Attendees increase short term?
#control
control.rep1 <- as.mira(control.m1) # Convert model list to a mira object so that it works with pool()
control.pooled1 <- pool(control.rep1) # Pool results across model list (e.g. pooled effect sizes and variances)
control.rep2 <- as.mira(control.m2)
control.pooled2 <- pool(control.rep2)
summary(control.pooled1)
summary(control.pooled2)
linearHypothesis(control.rep1, "periodfollow+periodfollow:follow.event_attendance = 0") #Attendees increase short term?
linearHypothesis(control.rep1, "periodfinal+periodfinal:follow.event_attendance = 0") #Attendees increase long term?
deltaMethod(control.rep1, g="periodfollow + interaction1", parameterNames=parnames) #Attendees increase short term?
deltaMethod(control.rep1, g="periodfinal + interaction2", parameterNames=parnames) #Attendees increase short term?
#dnorms
dnorms.rep1 <- as.mira(dnorms.m1) # Convert model list to a mira object so that it works with pool()
dnorms.pooled1 <- pool(dnorms.rep1) # Pool results across model list (e.g. pooled effect sizes and variances)
dnorms.rep2 <- as.mira(dnorms.m2)
dnorms.pooled2 <- pool(dnorms.rep2)
summary(dnorms.pooled1)
summary(dnorms.pooled2)
linearHypothesis(dnorms.rep1, "periodfollow+periodfollow:follow.event_attendance = 0") #Attendees increase short term?
linearHypothesis(dnorms.rep1, "periodfinal+periodfinal:follow.event_attendance = 0") #Attendees increase long term?
deltaMethod(dnorms.rep1, g="periodfollow + interaction1", parameterNames=parnames) #Attendees increase short term?
deltaMethod(dnorms.rep1, g="periodfinal + interaction2", parameterNames=parnames) #Attendees increase short term?
#innorms
innorms.rep1 <- as.mira(innorms.m1) # Convert model list to a mira object so that it works with pool()
innorms.pooled1 <- pool(innorms.rep1) # Pool results across model list (e.g. pooled effect sizes and variances)
innorms.rep2 <- as.mira(innorms.m2)
innorms.pooled2 <- pool(innorms.rep2)
summary(innorms.pooled1)
summary(innorms.pooled2)
linearHypothesis(innorms.rep1, "periodfollow+periodfollow:follow.event_attendance = 0") #Attendees increase short term?
linearHypothesis(innorms.rep1, "periodfinal+periodfinal:follow.event_attendance = 0") #Attendees increase long term?
deltaMethod(innorms.rep1, g="periodfollow + interaction1", parameterNames=parnames) #Attendees increase short term?
deltaMethod(innorms.rep1, g="periodfinal + interaction2", parameterNames=parnames) #Attendees increase short term?
#knowledge
knowledge.rep <- as.mira(knowledge.m) # Convert model list to a mira object so that it works with pool()
knowledge.pooled <- pool(knowledge.rep) # Pool results across model list (e.g. pooled effect sizes and variances)
summary(knowledge.pooled)
linearHypothesis(knowledge.rep, "periodfollow+periodfollow:follow.event_attendance = 0") #Attendees increase short term?
linearHypothesis(knowledge.rep, "periodfinal+periodfinal:follow.event_attendance = 0") #Attendees increase long term?
linearHypothesis(knowledge.rep, "periodfinal+periodfinal:follow.event_attendance = periodfollow+ periodfollow:follow.event_attendance")
linearHypothesis(knowledge.rep, "periodfinal = periodfollow")
deltaMethod(knowledge.rep, g="periodfollow + interaction1", parameterNames=parnames) #Attendees increase short term?
deltaMethod(knowledge.rep, g="periodfinal + interaction2", parameterNames=parnames) #Attendees increase short term?
deltaMethod(knowledge.rep, g="periodfinal+interaction2-(periodfollow+interaction1)", parameterNames=parnames) #Attendees increase short term?
deltaMethod(knowledge.rep, g="periodfinal-periodfollow", parameterNames=parnames) #Attendees increase short term?
#TPB GLMs
Baseline = list()
Follow = list()
Final = list()
for(i in 1:D){
glm.data <- complete(wide.data, action=i)
Baseline[[i]] <- glm(base.intention ~ base.control + base.attitudes + base.dnorms + base.innorms, data=glm.data)
Follow[[i]] <- glm(follow.intention ~ follow.control + follow.attitudes + follow.dnorms + follow.innorms, data=glm.data)
Final[[i]] <- glm(final.intention ~ final.control + final.attitudes + final.dnorms + final.innorms, data=glm.data)
}
base.rep <- as.mira(Baseline) # Convert model list to a mira object so that it works with pool()
follow.rep <- as.mira(Follow)
final.rep <- as.mira(Final)
base.pooled <- pool(base.rep) # Pool results across model list (e.g. pooled effect sizes and variances)
follow.pooled <- pool(follow.rep)
final.pooled <- pool(final.rep)
summary(base.pooled)
summary(follow.pooled)
summary(final.pooled)
saveRDS(wide.data, file="miceImp.rds")
save.image("Imp.data.RData")
rm(list=ls())
############## Knowledge histograms
#average knowledge in wave 2
mean(long.data$follow.Knowledge[which(long.data$follow.event_attendance==1)], na.rm=T) #attendees
sd(long.data$follow.Knowledge[which(long.data$follow.event_attendance==1)], na.rm=T) #attendees
mean(long.data$follow.Knowledge[which(long.data$follow.event_attendance==0 & long.data$follow.Knowledge>0)], na.rm=T) #non-attendees
sd(long.data$follow.Knowledge[which(long.data$follow.event_attendance==0 & long.data$follow.Knowledge>0)], na.rm=T) #non-attendees
#average knowledge in wave 2
mean(long.data$final.Knowledge[which(long.data$follow.event_attendance==1)], na.rm=T) #attendees
sd(long.data$final.Knowledge[which(long.data$follow.event_attendance==1)], na.rm=T) #attendees
mean(long.data$final.Knowledge[which(long.data$follow.event_attendance==0 & long.data$final.Knowledge>0)], na.rm=T) #non-attendees
sd(long.data$final.Knowledge[which(long.data$follow.event_attendance==0 & long.data$final.Knowledge>0)], na.rm=T) #non-attendees
#how many recall?
mean.wave2 = rep(0,20)
mean.wave3 = rep(0,20)
mean.pledge= rep(0,20)
mean.hotline= rep(0,20)
mean.story= rep(0,20)
for(i in 1:20){
mean.wave2[i] <- length(long.data$follow.Knowledge[which(long.data$follow.event_attendance==0 & long.data$follow.Knowledge>0 & long.data$.imp==i)])
mean.wave3[i] <- length(long.data$final.Knowledge[which(long.data$follow.event_attendance==0 & long.data$final.Knowledge>0 & long.data$.imp==i)])
mean.pledge[i] <- length(long.data$final.pledge[which(long.data$follow.event_attendance==0 & long.data$final.pledge>0 & long.data$.imp==i)])
mean.story[i] <- length(long.data$final.story[which(long.data$follow.event_attendance==0 & long.data$final.story>0 & long.data$.imp==i)])
mean.hotline[i] <- length(long.data$final.hotline[which(long.data$follow.event_attendance==0 & long.data$final.hotline>0 & long.data$.imp==i)])
}
mean(mean.wave2)
sd(mean.wave2)
mean(mean.wave3)
sd(mean.wave3)
mean(mean.pledge)
sd(mean.pledge)
mean(mean.hotline)
sd(mean.hotline)
mean(mean.story)
sd(mean.story)
#plot
plot.data <- long.data %>%
filter(.imp>0) %>%
select(.id, follow.Knowledge, final.Knowledge, follow.event_attendance, .imp) %>%
as_tibble(rownames=".id") %>%
pivot_longer(cols = matches("Knowledge"), names_to = "names", values_to = "values") %>%
separate(col = names, into = c("period", "variable"), sep = "\\.",
fill = "left") %>%
pivot_wider(names_from = variable, values_from = values)
plot.data$period <- fct_relevel(plot.data$period, "follow")
levels(plot.data$period) <-c("Wave 2", "Wave 3")
plot.data2 <- data.frame(knowledge=rep(c(0:12),4),attendance=rep(c(rep(0,13),rep(1,13)),2), wave=c(rep("Wave 2",26),rep("Wave 3",26)))
for(i in 1:12){
for(x in 0:1){
for(y in 1:2){
for(imp in 1:20){
z <- c("Wave 2", "Wave 3")[y]
try(plot.data2[which(plot.data2$knowledge==i & plot.data2$attendance==x & plot.data2$wave==z), imp+3] <-
length(plot.data$.id[which(plot.data$Knowledge==i & plot.data$follow.event_attendance==x & plot.data$period==z & plot.data$.imp==imp)]), silent=T)
}
}
}
}
plot.data2$mean <- rowMeans(plot.data2[,4:23], na.rm=T)
plot.data2$sd <- apply(plot.data2[,4:23],1, sd, na.rm=T)
plot.data2$max <- plot.data2$mean+plot.data2$sd
plot.data2$min <- plot.data2$mean-plot.data2$sd
plot.data2$attendance <- as.factor(plot.data2$attendance)
plot.data2$knowledge <- as.factor(plot.data2$knowledge)
ggplot(plot.data2[which(plot.data2$knowledge>0),], aes(x=knowledge, y=mean, fill=attendance,ymin=min, ymax=max)) +
geom_bar(stat="identity", position=position_dodge()) +
geom_errorbar(width=.2, position=position_dodge(.9)) +
facet_wrap(.~wave) +
scale_fill_grey(name="Attendee", labels=c("No", "Yes")) +
theme_bw() +
labs(y="Mean number of individuals", x="Amount of knowledge")
###############################################################################################
############ Preparing to impute and estimate the SAOMs ################################
#This code is adapted from Krause et.al.
#https://www.stats.ox.ac.uk/~snijders/siena/MultipleImputationNetworkAndBehavior.html#imputing-the-behavior-with-mice
library(RSiena)
library(mice)
library(igraph)
D <- 50 #set number of imputations
N <- 365 #set number of actors
set.seed(1325798)
network <- as.matrix(read.csv("Network matrix.csv")[-1,-1])
#For robustness check using the updated network use:
# network <- as.matrix(read.csv("Updated matrix.csv")[-1,-1])
miceImp <- readRDS("miceImp.rds")
networkdata <- read.csv("Raw behavior data for SNA.csv")
#Define some functions
siena07ToConvergence <- function(alg, dat, eff, ans0=NULL, threshold, nodes=10,
cluster = TRUE, n2startPrev = 1000, ...) {
# parameters are:
# alg, dat, eff: Arguments for siena07: algorithm, data, effects object.
# ans0: previous answer, if available; used as prevAns in siena07.
# threshold: largest satisfactory value
# for overall maximum convergence ratio (indicating convergence).
# nodes: number of processes for parallel processing.
numr <- 0
if (is.null(ans0)) {
ans <- siena07(alg, data = dat, effects = eff, prevAns = ans0,nbrNodes = nodes,
returnDeps = TRUE, useCluster = cluster, ...) # the first run
} else {
alg$nsub <- 1
alg$n2start <- n2startPrev
ans <- siena07(alg, data = dat, effects = eff, prevAns = ans0,nbrNodes = nodes,
returnDeps = TRUE, useCluster = cluster, ...)
}
repeat {
#save(ans, file = paste("ans",numr,".RData",sep = "")) # to be safe
numr <- numr + 1 # count number of repeated runs
tm <- ans$tconv.max # convergence indicator
cat(numr,"tconv max:", round(tm,3),"\n") # report how far we are
if (tm < threshold) {break} # success
if (tm > 10) {stop()} # divergence without much hope
# of returning to good parameter values
if (numr > 100) {stop()} # now it has lasted too long
alg$nsub <- 1
alg$n2start <- 1000 + numr * 1000
alg$n3 <- 2000 + numr * 1000
ans <- siena07(alg, data = dat,effects = eff,prevAns = ans, nbrNodes = nodes,
returnDeps = TRUE, useCluster = cluster, ...)
}
if (tm > threshold) {
stop("Warning: convergence inadequate.\n")
}
ans
}
#define some general covariates
Age <- coCovar(networkdata$prelim.age)
Wealth <- coCovar(networkdata$hh.wealth1)
SMP <- coCovar(networkdata$hh.SMP)
pesticide <- coCovar(networkdata$hh.Pesticide)
Gender <- coCovar(networkdata$prelim.gender)
Dummy <-varCovar(cbind(rep(0,365), rep(1,365)))
###################################################################################################
########### Imputation and estimation of the SAOM for intention #################################
network1 <- network
#####Stationary SAOM
visits <- sienaDependent(array(c(network1, network1), dim = c(N,N, 2)) ,
allowOnly = FALSE)
a2 <- coCovar(networkdata$follow.intention) # the 2nd wave incomplete behavior as covariate
stationaryDataList <- list()
for (d in 1:D) {
intention <- sienaDependent(cbind(complete(miceImp,d)$base.intention,
complete(miceImp,d)$base.intention),
type = "behavior", allowOnly = FALSE)
stationaryDataList[[d]] <- sienaDataCreate(visits,intention,a2, Age, Wealth, Gender, pesticide, SMP)
}
Data.stationary <- sienaGroupCreate(stationaryDataList)
effects.stationary <- getEffects(Data.stationary)
effects.stationary[effects.stationary$shortName == 'recip',]$include <- FALSE
# 2nd wave as covariate
effects.stationary <- includeEffects(effects.stationary, effFrom,
name = "intention", interaction1 = "a2")
#beh control
effects.stationary <- includeEffects(effects.stationary, name = "intention",
indeg, interaction1 = "visits")
effects.stationary <- includeEffects(effects.stationary, name = "intention",
outdeg, interaction1 = "visits")
# influence
effects.stationary <- includeEffects(effects.stationary, name = "intention",
avSim, interaction1 = "visits")
#Control
effects.stationary <- includeEffects(effects.stationary, name = "intention",
effFrom, interaction1 = "SMP")
effects.stationary <- includeEffects(effects.stationary, name = "intention",
effFrom, interaction1 = "Gender")
effects.stationary <- includeEffects(effects.stationary, name = "intention",
effFrom, interaction1 = "Wealth")
effects.stationary <- includeEffects(effects.stationary, name = "intention",
effFrom, interaction1 = "Age")
effects.stationary <- includeEffects(effects.stationary, name = "intention",
effFrom, interaction1 = "pesticide")
for (d in 1:D) { #fix the rate function
effects.stationary <- setEffect(effects.stationary, Rate, initialValue = 0.01,
name = "visits",fix = TRUE,
group = d,type = "rate",test = FALSE)
effects.stationary <- setEffect(effects.stationary, Rate, initialValue = 8,
name = "intention",fix = TRUE,
group = d,type = "rate",test = FALSE)
}
estimation.options.st <- sienaAlgorithmCreate(useStdInits = FALSE,
seed = 1325798,
n3 = 3000, maxlike = FALSE,
cond = FALSE, diagonalize = 0.6,
firstg = 0.02,
behModelType = c(intention = 2),
lessMem = TRUE)
#estimate the SAOM
period0saom <- siena07ToConvergence(alg = estimation.options.st,
dat = Data.stationary, nodes=10,
eff = effects.stationary, threshold = 0.2)
save.image('./intention/main/conmi.RData')
imputation.options <- sienaAlgorithmCreate(useStdInits = FALSE,
seed = 1325798,
cond = FALSE,
behModelType = c(intention = 2),
maxlike = TRUE,
nsub = 0,
simOnly = TRUE,
n3 = 10)
stationaryImpDataList <- list()
for (d in 1:D) {
n1 <- network1
n1 <- n1 + 10
n1 <- ifelse(n1>11, 11, n1)
diag(n1) <- 0
n2 <- n1
tieList <- c(1:(nrow(n1)**2))[c(n1 == 11)]
tieList <- tieList[!is.na(tieList)]
changedTie <- sample(tieList,1)
n1[changedTie] <- 0
n2[changedTie] <- 1
visits <- sienaDependent(array(c(n1,n2), dim = c(N,N, 2)),
allowOnly = FALSE )
i1 <- networkdata$base.intention
i1.3s <- c(1:N)[i1 == 8 & !is.na(i1)]
int <- sample(i1.3s,1)
i1change <- complete(miceImp,d)$base.intention
i1change[int] <- sample(c(7,9),1)
intention <- sienaDependent(cbind(i1change,i1), type = "behavior",
allowOnly = FALSE)
stationaryImpDataList[[d]] <- sienaDataCreate(visits, intention,a2,Atd, Age,Gender, SMP, pesticide, Wealth)
}
Data.stationary.imp <- sienaGroupCreate(stationaryImpDataList)
#impute first wave
sims <- siena07(imputation.options, data = Data.stationary.imp,
effects = effects.stationary,
prevAns = period0saom,
returnDeps = TRUE)$sims[[10]]
int1imp <- matrix(NA,N,D)
for (d in 1:D) {
int1imp[,d] = sims[[d]][[1]][[2]]
}
save.image('./intention/main/conmi.RData')
##########################################################################
################### Imputing Later Waves #################################
##########################################################################
int2imp <- matrix(NA,N,D)
int3imp <- matrix(NA,N,D)
n1 <- network1
diag(n1) <- 0
n2 <- n1
tieList <- c(1:(nrow(n1)**2))[c(n1 == 1)]
tieList <- tieList[!is.na(tieList)]
changedTie <- sample(tieList,1)
n1[changedTie] <- 1
n2[changedTie] <- 0
estimation.options <- sienaAlgorithmCreate(useStdInits = FALSE,
seed = 1325798,
n3 = 3000, maxlike = FALSE,
cond = FALSE, diagonalize = 0.3,
firstg = 0.02,
behModelType = c(intention = 2),
lessMem = TRUE)
for (d in 1:D) {
cat('imputation',d,'\n')
# now impute wave2
visits <- sienaDependent(array(c(n1,n2),
dim = c(N,N,2)))
intention <- sienaDependent(cbind(int1imp[,d], networkdata$follow.intention), type = "behavior")
Know <- coCovar(complete(miceImp, d)$Knowledge1/sd(complete(miceImp, d)$Knowledge1,na.rm=T))
a3 <- coCovar(networkdata$final.intention)
Data.w2 <- sienaDataCreate(visits, intention, Age, Wealth, Know, a3, Gender, SMP, pesticide)
effects.twoWaves <- getEffects(Data.w2)
effects.twoWaves[effects.twoWaves$shortName == 'recip',]$include <- FALSE
#influence
effects.twoWaves <- includeEffects(effects.twoWaves, avSim,
name = 'intention',
interaction1 = "visits")
#Knowledge
effects.twoWaves <- includeEffects(effects.twoWaves, effFrom, name="intention", interaction1="Know")
#Control
effects.twoWaves <- includeEffects(effects.twoWaves, effFrom, name="intention", interaction1="Gender")
effects.twoWaves <- includeEffects(effects.twoWaves, effFrom, name="intention", interaction1="Age")
effects.twoWaves <- includeEffects(effects.twoWaves, effFrom, name="intention", interaction1="Wealth")
effects.twoWaves <- includeEffects(effects.twoWaves, effFrom, name="intention", interaction1="a3")
effects.twoWaves <- includeEffects(effects.twoWaves, name = "intention",effFrom, interaction1 = "SMP")
effects.twoWaves <- includeEffects(effects.twoWaves, name = "intention",effFrom, interaction1 = "pesticide")
#beh control
effects.twoWaves <- includeEffects(effects.twoWaves, name = "intention",
indeg, interaction1 = "visits")
effects.twoWaves <- includeEffects(effects.twoWaves, name = "intention",
outdeg, interaction1 = "visits")
#fix the rate function
effects.twoWaves <- setEffect(effects.twoWaves, Rate, initialValue = 0.01,
name = "visits",fix = TRUE,
type = "rate",test = FALSE)
if (d == 1) {
period1saom <- siena07ToConvergence(alg = estimation.options,
dat = Data.w2,nodes=10,
eff = effects.twoWaves,
threshold = 0.2)
} else {
period1saom <- siena07ToConvergence(alg = estimation.options,
dat = Data.w2, nodes=10,
eff = effects.twoWaves,
threshold = 0.2,
ans0 = period1saom)
}
sims <- siena07(imputation.options, data = Data.w2,
effects = effects.twoWaves,
prevAns = period1saom,
returnDeps = TRUE)$sims[[10]]
int2imp[,d] <- sims[[2]]
# impute wave 3
visits <- sienaDependent(array( c(n1,n2),
dim = c(N,N, 2)))
intention <- sienaDependent(cbind(int2imp[,d],networkdata$final.intention), type = "behavior")
Know <- coCovar(complete(miceImp, d)$Knowledge2/sd(complete(miceImp, d)$Knowledge2,na.rm=T))
Data.w3 <- sienaDataCreate(visits, intention, Age, Wealth, Atd, Know, Gender, SMP, pesticide)
effects.twoWaves <- getEffects(Data.w3)
effects.twoWaves[effects.twoWaves$shortName == 'recip',]$include <- FALSE
#influence
effects.twoWaves <- includeEffects(effects.twoWaves, avSim,
name = 'intention',
interaction1 = "visits")
#Knowledge
effects.twoWaves <- includeEffects(effects.twoWaves, effFrom, name="intention", interaction1="Know")
#Control
effects.twoWaves <- includeEffects(effects.twoWaves, effFrom, name="intention", interaction1="Gender")
effects.twoWaves <- includeEffects(effects.twoWaves, effFrom, name="intention", interaction1="Age")
effects.twoWaves <- includeEffects(effects.twoWaves, effFrom, name="intention", interaction1="Wealth")
effects.twoWaves <- includeEffects(effects.twoWaves, name = "intention",effFrom, interaction1 = "SMP")
effects.twoWaves <- includeEffects(effects.twoWaves, name = "intention",effFrom, interaction1 = "pesticide")
effects.twoWaves <- includeEffects(effects.twoWaves, name = "intention",
indeg, interaction1 = "visits")
effects.twoWaves <- includeEffects(effects.twoWaves, name = "intention",
outdeg, interaction1 = "visits")
#fix the rate function
effects.twoWaves <- setEffect(effects.twoWaves, Rate, initialValue = 0.01,
name = "visits",fix = TRUE,
type = "rate",test = FALSE)
if (d == 1) {
period2saom <- siena07ToConvergence(alg = estimation.options,
dat = Data.w3,nodes=10,
eff = effects.twoWaves,
threshold = 0.2)
} else {
period2saom <- siena07ToConvergence(alg = estimation.options,
dat = Data.w3,nodes=10,
eff = effects.twoWaves,
threshold = 0.2,
ans0 = period2saom)
}
sims <- siena07(imputation.options, data = Data.w3,
effects = effects.twoWaves,
prevAns = period2saom,
returnDeps = TRUE)$sims[[10]]
int3imp[,d] <- sims[[2]]
save.image('./intention/main/conmi.RData')
}
##############################################################################
############################# 4. Estimating the models ######################
##############################################################################
#modify the network slightly in each wave
n1 <- network1
tieList <- c(1:(nrow(n1)**2))[c(n1 == 0)]
tieList <- tieList[!is.na(tieList)]
changedTie <- sample(tieList,1)
n2 <- n1
n2[changedTie] <- NA
n3 <- n2
changedTie <- sample(tieList,1)
n3[changedTie] <- NA
consaomResults <- list()
constantDataList <- list()
for (d in 1:D) {
cat('Imputation',d,'\n')
visits <- sienaDependent(array(c(n1, n2, n3), dim = c(N,N, 3)) ,
allowOnly = FALSE)
intention <- sienaDependent(cbind(int1imp[,d], int2imp[,d], int3imp[,d]),
type = "behavior", allowOnly = FALSE)
Know <- varCovar(cbind((complete(miceImp, d)$Knowledge1/sd(complete(miceImp, d)$Knowledge1,na.rm=T)),
(complete(miceImp, d)$Knowledge2/sd(complete(miceImp, d)$Knowledge2,na.rm=T))))
Data <- sienaDataCreate(visits,intention,Dummy,Age,Wealth,Know, Atd, Gender, SMP, pesticide)
effects.constant <- getEffects(Data)
effects.constant[effects.constant$shortName == 'recip',]$include <- FALSE
#Dummy
effects.constant <- includeEffects(effects.constant, effFrom, name="intention", interaction1="Dummy")
#Controls
effects.constant <- includeEffects(effects.constant, name = "intention",effFrom, interaction1 = "SMP")
effects.constant <- includeEffects(effects.constant, effFrom, name="intention", interaction1="Age")
effects.constant <- includeEffects(effects.constant, name = "intention",effFrom, interaction1 = "Wealth")
effects.constant <- includeEffects(effects.constant, effFrom, name="intention", interaction1="pesticide")
effects.constant <- includeEffects(effects.constant, name = "intention",effFrom, interaction1 = "Gender")
effects.constant <- includeEffects(effects.constant, name = "intention",
indeg, interaction1 = "visits")
effects.constant <- includeEffects(effects.constant, name = "intention",
outdeg, interaction1 = "visits")
#Knowledge
effects.constant <- includeEffects(effects.constant, effFrom, name="intention", interaction1="Know")
# influence
effects.constant <- includeEffects(effects.constant, name = "intention",
avSim, interaction1 = "visits")
effects.constant <- includeInteraction(effects.constant, avSim, effFrom,
name="intention", interaction1=c("visits","Know"))
effects.constant <- includeInteraction(effects.constant, avSim, effFrom,
name="intention", interaction1=c("visits","Dummy"))
#fix the rate function
effects.constant <- setEffect(effects.constant, Rate, initialValue = 0.01,
name = "visits",fix = TRUE,
type = "rate",test = FALSE)
effects.constant <- setEffect(effects.constant, Rate, initialValue = 0.01,
name = "visits",fix = TRUE,
type = "rate",test = FALSE, period=2)
estimation.options <- sienaAlgorithmCreate(useStdInits = FALSE,
seed = 1325798,
n3 = 3000, maxlike = FALSE,
behModelType = c(intention = 2),
lessMem = FALSE, cond=F)
if (d == 1) {
consaomResults[[d]] <- siena07ToConvergence(alg = estimation.options,nodes=10,
dat = Data, eff = effects.constant,
threshold = 0.2)
} else {
consaomResults[[d]] <- siena07ToConvergence(alg = estimation.options,nodes=10,
dat = Data, eff = effects.constant,
threshold = 0.2,
ans0 = consaomResults[[d - 1]])
}
save.image('./intention/main/conmi.RData')
}
saveRDS(consaomResults, file="./intention/main/Constant network fit final=5.rds")
write.csv(int1imp, "./intention/main/Constant network-Int1Imp-f20.csv")
write.csv(int2imp, "./intention/main/Constant network-Int2Imp-f20.csv")
write.csv(int3imp, "./intention/main/Constant network-Int3Imp-f20.csv")
write.csv(n1, "./intention/main/Constant network-net1.csv")
write.csv(n2, "./intention/main/Constant network-net2.csv")
write.csv(n3, "./intention/main/Constant network-net3.csv")
##Combining results
rowVar <- function(x) {
rowSums((x - rowMeans(x))^2)/(dim(x)[2] - 1)
}
npar <- sum(effects.constant$include)
conMIResults <- as.data.frame(matrix(,npar,(2 * D)))
for (d in 1:D) {
names(conMIResults)[d * 2 - 1] <- paste("imp" , "mean", sep = as.character(d))
names(conMIResults)[d * 2] <- paste("imp" , "se", sep = as.character(d))
conMIResults[,d * 2 - 1] <- consaomResults[[d]]$theta
conMIResults[,d * 2] <- sqrt(diag(consaomResults[[d]]$covtheta))
}
WDMIs <- matrix(0,npar,npar)
for (d in 1:D) {
WDMIs <- WDMIs + consaomResults[[d]]$covtheta
}
WDMIs <- (1/D) * WDMIs
confinalResults <- as.data.frame(matrix(,npar,2))
names(confinalResults) <- c("combinedEstimate", "combinedSE")
rownames(confinalResults) <- consaomResults[[1]]$effects$effectName
confinalResults$combinedEstimate <- rowMeans(conMIResults[,seq(1,2*D,2)])
confinalResults$combinedSE <- sqrt(diag(WDMIs) + ((D + 1)/D) *
rowVar(conMIResults[,seq(1,2*D,2)]))
write.csv(confinalResults, "./intention/main/Constant network results final.csv")
###################################################################################################
########### Imputation and estimation of the SAOM for descriptive norms ###########################
rm(list=setdiff(ls(), c("miceImp", "networkdata", "D", "N", "network")))
network1 <- network
#####Stationary SAOM
visits <- sienaDependent(array(c(network1, network1), dim = c(N,N, 2)) ,
allowOnly = FALSE)
a2 <- coCovar(networkdata$follow.dnorms) # the 2nd wave incomplete behavior as covariate
stationaryDataList <- list()
for (d in 1:D) {
dnorms <- sienaDependent(cbind(complete(miceImp,d)$base.dnorms,
complete(miceImp,d)$base.dnorms),
type = "behavior", allowOnly = FALSE)
intention <- coCovar(complete(miceImp,d)$base.intention)
stationaryDataList[[d]] <- sienaDataCreate(visits,dnorms,a2, Age, Gender, Wealth, pesticide, SMP, intention)
}
Data.stationary <- sienaGroupCreate(stationaryDataList)
effects.stationary <- getEffects(Data.stationary)
effects.stationary[effects.stationary$shortName == 'recip',]$include <- FALSE
# 2nd wave as covariate
effects.stationary <- includeEffects(effects.stationary, effFrom,
name = "dnorms", interaction1 = "a2")
# influence
effects.stationary <- includeEffects(effects.stationary, name = "dnorms",
avXAlt, interaction1 = "intention", interaction2="visits")
effects.stationary <- includeEffects(effects.stationary, name = "dnorms",
indeg, interaction1 = "visits")
effects.stationary <- includeEffects(effects.stationary, name = "dnorms",
outdeg, interaction1 = "visits")
#effect from attendance
effects.stationary <- includeEffects(effects.stationary, name = "dnorms",
effFrom, interaction1 = "SMP")
effects.stationary <- includeEffects(effects.stationary, name = "dnorms",
effFrom, interaction1 = "pesticide")
effects.stationary <- includeEffects(effects.stationary, name = "dnorms",
effFrom, interaction1 = "Age")
effects.stationary <- includeEffects(effects.stationary, name = "dnorms",
effFrom, interaction1 = "Wealth")
effects.stationary <- includeEffects(effects.stationary, name = "dnorms",
effFrom, interaction1 = "Gender")
for (d in 1:D) { #fix the rate function
effects.stationary <- setEffect(effects.stationary, Rate, initialValue = 0.01,
name = "visits",fix = TRUE,
group = d,type = "rate",test = FALSE)
effects.stationary <- setEffect(effects.stationary, Rate, initialValue = 8,
name = "dnorms",fix = TRUE,
group = d,type = "rate",test = FALSE)
}
estimation.options.st <- sienaAlgorithmCreate(useStdInits = FALSE,
seed = 1325798,
n3 = 3000, maxlike = FALSE,
cond = FALSE, diagonalize = 0.6,
firstg = 0.02,
behModelType = c(dnorms = 2),
lessMem = TRUE)
#estimate the SAOM
period0saom <- siena07ToConvergence(alg = estimation.options.st,
dat = Data.stationary, nodes=10,
eff = effects.stationary, threshold = 0.2)
imputation.options <- sienaAlgorithmCreate(useStdInits = FALSE,
seed = 1325798,
cond = FALSE,
behModelType = c(dnorms = 2),
maxlike = TRUE,
nsub = 0,
simOnly = TRUE,
n3 = 10)
stationaryImpDataList <- list()
for (d in 1:D) {
n1 <- network1
n1 <- n1 + 10
n1 <- ifelse(n1>11, 11, n1)
diag(n1) <- 0
n2 <- n1
tieList <- c(1:(nrow(n1)**2))[c(n1 == 11)]
tieList <- tieList[!is.na(tieList)]
changedTie <- sample(tieList,1)
n1[changedTie] <- 0
n2[changedTie] <- 1
visits <- sienaDependent(array(c(n1,n2), dim = c(N,N, 2)),
allowOnly = FALSE )
i1 <- networkdata$base.dnorms
i1.3s <- c(1:N)[i1 == 8 & !is.na(i1)]
int <- sample(i1.3s,1)
i1change <- complete(miceImp,d)$base.dnorms
i1change[int] <- sample(c(7,9),1)
dnorms <- sienaDependent(cbind(i1change,i1), type = "behavior",
allowOnly = FALSE)
stationaryImpDataList[[d]] <- sienaDataCreate(visits,dnorms,a2,Atd,Age,Gender,Wealth,SMP,pesticide,intention)
}
Data.stationary.imp <- sienaGroupCreate(stationaryImpDataList)
#impute first wave
sims <- siena07(imputation.options, data = Data.stationary.imp,
effects = effects.stationary,
prevAns = period0saom,
returnDeps = TRUE)$sims[[10]]
int1imp <- matrix(NA,N,D)
for (d in 1:D) {
int1imp[,d] = sims[[d]][[1]][[2]]
}
save.image('./dnorms/main/conmi.RData')
##########################################################################
################### Imputing Later Waves #################################
##########################################################################
int2imp <- matrix(NA,N,D)
int3imp <- matrix(NA,N,D)
n1 <- network1
diag(n1) <- 0
n2 <- n1
tieList <- c(1:(nrow(n1)**2))[c(n1 == 1)]
tieList <- tieList[!is.na(tieList)]
changedTie <- sample(tieList,1)
n1[changedTie] <- 0
n2[changedTie] <- 1
estimation.options <- sienaAlgorithmCreate(useStdInits = FALSE,
seed = 1325798,
n3 = 3000, maxlike = FALSE,
cond = FALSE, diagonalize = 0.3,
firstg = 0.02,
behModelType = c(dnorms = 2),
lessMem = TRUE)
for (d in 1:D) {
cat('imputation',d,'\n')
# now impute wave2
visits <- sienaDependent(array(c(n1,n2),
dim = c(N,N,2)))
dnorms <- sienaDependent(cbind(int1imp[,d], networkdata$follow.dnorms), type = "behavior")
Know <- coCovar(complete(miceImp, d)$Knowledge1/sd(complete(miceImp, d)$Knowledge1,na.rm=T))
a3 <- coCovar(networkdata$final.dnorms)
intention <- coCovar(complete(miceImp,d)$follow.intention)
Data.w2 <- sienaDataCreate(visits, dnorms, Age, Gender, Wealth, Atd, Know, pesticide, SMP,
intention, a3)
effects.twoWaves <- getEffects(Data.w2)
effects.twoWaves[effects.twoWaves$shortName == 'recip',]$include <- FALSE
effects.twoWaves <- includeEffects(effects.twoWaves, avXAlt,
name = 'dnorms',
interaction1 = "intention", interaction2="visits")
effects.twoWaves <- includeEffects(effects.twoWaves, effFrom, name="dnorms", interaction1="Know")
effects.twoWaves <- includeEffects(effects.twoWaves, effFrom, name="dnorms", interaction1="a3")
effects.twoWaves <- includeEffects(effects.twoWaves, effFrom, name="dnorms", interaction1="Wealth")
effects.twoWaves <- includeEffects(effects.twoWaves, effFrom, name="dnorms", interaction1="Gender")
effects.twoWaves <- includeEffects(effects.twoWaves, effFrom, name="dnorms", interaction1="Age")
effects.twoWaves <- includeEffects(effects.twoWaves, effFrom, name="dnorms", interaction1="SMP")
effects.twoWaves <- includeEffects(effects.twoWaves, effFrom, name="dnorms", interaction1="pesticide")
effects.twoWaves <- includeEffects(effects.twoWaves, name = "dnorms",
indeg, interaction1 = "visits")
effects.twoWaves <- includeEffects(effects.twoWaves, name = "dnorms",
outdeg, interaction1 = "visits")
#fix the rate function
effects.twoWaves <- setEffect(effects.twoWaves, Rate, initialValue = 0.01,
name = "visits",fix = TRUE,
type = "rate",test = FALSE)
if (d == 1) {
period1saom <- siena07ToConvergence(alg = estimation.options,
dat = Data.w2,nodes=10,
eff = effects.twoWaves,
threshold = 0.2)
} else {
period1saom <- siena07ToConvergence(alg = estimation.options,
dat = Data.w2, nodes=10,
eff = effects.twoWaves,
threshold = 0.2,
ans0 = period1saom)
}
sims <- siena07(imputation.options, data = Data.w2,
effects = effects.twoWaves,
prevAns = period1saom,
returnDeps = TRUE)$sims[[10]]
int2imp[,d] <- sims[[2]]
# impute wave 3
visits <- sienaDependent(array( c(n1,n2),
dim = c(N,N, 2)))
dnorms <- sienaDependent(cbind(int2imp[,d],networkdata$final.dnorms), type = "behavior")
Know <- coCovar(complete(miceImp, d)$Knowledge2/sd(complete(miceImp, d)$Knowledge2,na.rm=T))
intention <- coCovar(complete(miceImp,d)$final.intention)
Data.w3 <- sienaDataCreate(visits, dnorms, Age, Wealth, Atd, Know, Gender, SMP, intention, pesticide)
effects.twoWaves <- getEffects(Data.w3)
effects.twoWaves[effects.twoWaves$shortName == 'recip',]$include <- FALSE
effects.twoWaves <- includeEffects(effects.twoWaves, avXAlt,
name = 'dnorms',
interaction1 = "intention", interaction2="visits")
effects.twoWaves <- includeEffects(effects.twoWaves, effFrom, name="dnorms", interaction1="Know")
effects.twoWaves <- includeEffects(effects.twoWaves, effFrom, name="dnorms", interaction1="Wealth")
effects.twoWaves <- includeEffects(effects.twoWaves, effFrom, name="dnorms", interaction1="Gender")
effects.twoWaves <- includeEffects(effects.twoWaves, effFrom, name="dnorms", interaction1="Age")
effects.twoWaves <- includeEffects(effects.twoWaves, effFrom, name="dnorms", interaction1="SMP")
effects.twoWaves <- includeEffects(effects.twoWaves, effFrom, name="dnorms", interaction1="pesticide")
effects.twoWaves <- includeEffects(effects.twoWaves, name = "dnorms",
indeg, interaction1 = "visits")
effects.twoWaves <- includeEffects(effects.twoWaves, name = "dnorms",
outdeg, interaction1 = "visits")
#fix the rate function
effects.twoWaves <- setEffect(effects.twoWaves, Rate, initialValue = 0.01,
name = "visits",fix = TRUE,
type = "rate",test = FALSE)
if (d == 1) {
period2saom <- siena07ToConvergence(alg = estimation.options,
dat = Data.w3,nodes=10,
eff = effects.twoWaves,
threshold = 0.2)
} else {
period2saom <- siena07ToConvergence(alg = estimation.options,
dat = Data.w3,nodes=10,
eff = effects.twoWaves,
threshold = 0.2,
ans0 = period2saom)
}
sims <- siena07(imputation.options, data = Data.w3,
effects = effects.twoWaves,
prevAns = period2saom,
returnDeps = TRUE)$sims[[10]]
int3imp[,d] <- sims[[2]]
save.image('./dnorms/main/conmi.RData')
}
##############################################################################
############################# 4. Estimating the models ######################
##############################################################################
#modify the network slightly in each wave
n1 <- network1
tieList <- c(1:(nrow(n1)**2))[c(n1 == 0)]
tieList <- tieList[!is.na(tieList)]
changedTie <- sample(tieList,1)
n2 <- n1
n2[changedTie] <- NA
n3 <- n2
changedTie <- sample(tieList,1)
n3[changedTie] <- NA
consaomResults <- list()
constantDataList <- list()
for (d in 1:D) {
cat('Imputation',d,'\n')
visits <- sienaDependent(array(c(n1, n2, n3), dim = c(N,N, 3)) ,
allowOnly = FALSE)
dnorms <- sienaDependent(cbind(int1imp[,d], int2imp[,d], int3imp[,d]),
type = "behavior", allowOnly = FALSE)
Know <- varCovar(cbind((complete(miceImp, d)$Knowledge1/sd(complete(miceImp, d)$Knowledge1,na.rm=T)),
(complete(miceImp, d)$Knowledge2/sd(complete(miceImp, d)$Knowledge2,na.rm=T))))
intention <-varCovar(cbind(complete(miceImp,d)$base.intention,
complete(miceImp,d)$follow.intention, complete(miceImp,d)$final.intention))
Data <- sienaDataCreate(visits,dnorms,Dummy, Age,Wealth,Gender,SMP,pesticide,intention,Know, Atd)
effects.constant <- getEffects(Data)
effects.constant[effects.constant$shortName == 'recip',]$include <- FALSE
effects.constant <- includeEffects(effects.constant, effFrom, name="dnorms", interaction1="Know")
effects.constant <- includeEffects(effects.constant, name = "dnorms",effFrom, interaction1 = "SMP")
effects.constant <- includeEffects(effects.constant, effFrom, name="dnorms", interaction1="Gender")
effects.constant <- includeEffects(effects.constant, effFrom, name="dnorms", interaction1="Age")
effects.constant <- includeEffects(effects.constant, name = "dnorms",effFrom, interaction1 = "Wealth")
effects.constant <- includeEffects(effects.constant, name = "dnorms",effFrom, interaction1 = "pesticide")
effects.constant <- includeEffects(effects.constant, effFrom, name="dnorms", interaction1="Dummy")
effects.constant <- includeEffects(effects.constant, name = "dnorms",
indeg, interaction1 = "visits")
effects.constant <- includeEffects(effects.constant, name = "dnorms",
outdeg, interaction1 = "visits")
effects.constant <- includeEffects(effects.constant, name = "dnorms",
avXAlt, interaction1 = "intention", interaction2="visits")
effects.constant <- includeInteraction(effects.constant, avXAlt, effFrom,
name="dnorms", interaction1=c("intention","Know"),interaction2=c("visits",""))
effects.constant <- includeInteraction(effects.constant, avXAlt, effFrom,
name="dnorms", interaction1=c("intention","Dummy"), interaction2=c("visits",""))
#fix the rate function
effects.constant <- setEffect(effects.constant, Rate, initialValue = 0.01,
name = "visits",fix = TRUE,
type = "rate",test = FALSE)
effects.constant <- setEffect(effects.constant, Rate, initialValue = 0.01,
name = "visits",fix = TRUE,
type = "rate",test = FALSE, period=2)
estimation.options <- sienaAlgorithmCreate(useStdInits = FALSE,
seed = 1325798,
n3 = 3000, maxlike = FALSE,
behModelType = c(dnorms = 2),
lessMem = FALSE, cond=F)
if (d == 1) {
consaomResults[[d]] <- siena07ToConvergence(alg = estimation.options,nodes=10,
dat = Data, eff = effects.constant,
threshold = 0.2)
} else {
consaomResults[[d]] <- siena07ToConvergence(alg = estimation.options,nodes=10,
dat = Data, eff = effects.constant,
threshold = 0.2,
ans0 = consaomResults[[d - 1]])
}
save.image('./dnorms/main/conmi.RData')
}
saveRDS(consaomResults, file="./dnorms/main/Constant network dnorms.rds")
write.csv(int1imp, "./dnorms/main/Constant network-Int1Imp-3.csv")
write.csv(int2imp, "./dnorms/main/Constant network-Int2Imp-3.csv")
write.csv(int3imp, "./dnorms/main/Constant network-Int3Imp-3.csv")
write.csv(n1, "./dnorms/main/Constant network-net1.csv")
write.csv(n2, "./dnorms/main/Constant network-net2.csv")
write.csv(n3, "./dnorms/main/Constant network-net3.csv")
##Combining results
rowVar <- function(x) {
rowSums((x - rowMeans(x))^2)/(dim(x)[2] - 1)
}
npar <- sum(effects.constant$include)
conMIResults <- as.data.frame(matrix(,npar,(2 * D)))
for (d in 1:D) {
names(conMIResults)[d * 2 - 1] <- paste("imp" , "mean", sep = as.character(d))
names(conMIResults)[d * 2] <- paste("imp" , "se", sep = as.character(d))
conMIResults[,d * 2 - 1] <- consaomResults[[d]]$theta
conMIResults[,d * 2] <- sqrt(diag(consaomResults[[d]]$covtheta))
}
WDMIs <- matrix(0,npar,npar)
for (d in 1:D) {
WDMIs <- WDMIs + consaomResults[[d]]$covtheta
}
WDMIs <- (1/D) * WDMIs
confinalResults <- as.data.frame(matrix(,npar,2))
names(confinalResults) <- c("combinedEstimate", "combinedSE")
rownames(confinalResults) <- consaomResults[[1]]$effects$effectName
confinalResults$combinedEstimate <- rowMeans(conMIResults[,seq(1,2*D,2)])
confinalResults$combinedSE <- sqrt(diag(WDMIs) + ((D + 1)/D) *
rowVar(conMIResults[,seq(1,2*D,2)]))
write.csv(confinalResults, "./dnorms/main/Constant network results dnorms D=20.csv")
###################################################################################################
########### Imputation and estimation of the SAOM for injunctive norms #################################
rm(list=setdiff(ls(), c("miceImp", "networkdata", "D", "N", "network")))
network1 <- network
#####Stationary SAOM
visits <- sienaDependent(array(c(network1, network1), dim = c(N,N, 2)) ,
allowOnly = FALSE)
a2 <- coCovar(networkdata$follow.innorms) # the 2nd wave incomplete behavior as covariate
stationaryDataList <- list()
for (d in 1:D) {
innorms <- sienaDependent(cbind(complete(miceImp,d)$base.innorms,
complete(miceImp,d)$base.innorms),
type = "behavior", allowOnly = FALSE)
attitudes <- coCovar(complete(miceImp,d)$base.attitudes)
stationaryDataList[[d]] <- sienaDataCreate(visits,innorms,a2, Age, Gender, Wealth, pesticide, SMP, attitudes)
}
Data.stationary <- sienaGroupCreate(stationaryDataList)
effects.stationary <- getEffects(Data.stationary)
effects.stationary[effects.stationary$shortName == 'recip',]$include <- FALSE
# 2nd wave as covariate
effects.stationary <- includeEffects(effects.stationary, effFrom,
name = "innorms", interaction1 = "a2")
# influence
effects.stationary <- includeEffects(effects.stationary, name = "innorms",
avXAlt, interaction1 = "attitudes", interaction2="visits")
effects.stationary <- includeEffects(effects.stationary, name = "innorms",
indeg, interaction1 = "visits")
effects.stationary <- includeEffects(effects.stationary, name = "innorms",
outdeg, interaction1 = "visits")
#effect from attendance
effects.stationary <- includeEffects(effects.stationary, name = "innorms",
effFrom, interaction1 = "SMP")
for (d in 1:D) { #fix the rate function
effects.stationary <- setEffect(effects.stationary, Rate, initialValue = 0.01,
name = "visits",fix = TRUE,
group = d,type = "rate",test = FALSE)
effects.stationary <- setEffect(effects.stationary, Rate, initialValue = 16,
name = "innorms",fix = TRUE,
group = d,type = "rate",test = FALSE)
}
estimation.options.st <- sienaAlgorithmCreate(useStdInits = FALSE,
seed = 1325798,
n3 = 3000, maxlike = FALSE,
cond = FALSE, diagonalize = 0.6,
firstg = 0.02,
behModelType = c(innorms = 2),
lessMem = TRUE)
#estimate the SAOM
period0saom <- siena07ToConvergence(alg = estimation.options.st,
dat = Data.stationary, nodes=10,
eff = effects.stationary, threshold = 0.2)
imputation.options <- sienaAlgorithmCreate(useStdInits = FALSE,
seed = 1325798,
cond = FALSE,
behModelType = c(innorms = 2),
maxlike = TRUE,
nsub = 0,
simOnly = TRUE,
n3 = 10)
stationaryImpDataList <- list()
for (d in 1:D) {
n1 <- network1
n1 <- n1 + 10
n1 <- ifelse(n1>11, 11, n1)
diag(n1) <- 0
n2 <- n1
tieList <- c(1:(nrow(n1)**2))[c(n1 == 11)]
tieList <- tieList[!is.na(tieList)]
changedTie <- sample(tieList,1)
n1[changedTie] <- 0
n2[changedTie] <- 1
visits <- sienaDependent(array(c(n1,n2), dim = c(N,N, 2)),
allowOnly = FALSE )
i1 <- networkdata$base.innorms
i1.3s <- c(1:N)[i1 == 8 & !is.na(i1)]
int <- sample(i1.3s,1)
i1change <- complete(miceImp,d)$base.innorms
i1change[int] <- sample(c(7,9),1)
innorms <- sienaDependent(cbind(i1change,i1), type = "behavior",
allowOnly = FALSE)
stationaryImpDataList[[d]] <- sienaDataCreate(visits,innorms,a2,Atd,Age,Gender,Wealth,SMP,pesticide,attitudes)
}
Data.stationary.imp <- sienaGroupCreate(stationaryImpDataList)
#impute first wave
sims <- siena07(imputation.options, data = Data.stationary.imp,
effects = effects.stationary,
prevAns = period0saom,
returnDeps = TRUE)$sims[[10]]
int1imp <- matrix(NA,N,D)
for (d in 1:D) {
int1imp[,d] = sims[[d]][[1]][[2]]
}
save.image('./innorms/main/conmi.RData')
##########################################################################
################### Imputing Later Waves #################################
##########################################################################
n1 <- network1
diag(n1) <- 0
n2 <- n1
tieList <- c(1:(nrow(n1)**2))[c(n1 == 1)]
tieList <- tieList[!is.na(tieList)]
changedTie <- sample(tieList,1)
n1[changedTie] <- 0
n2[changedTie] <- 1
int2imp <- matrix(NA,N,D)
int3imp <- matrix(NA,N,D)
estimation.options <- sienaAlgorithmCreate(useStdInits = FALSE,
seed = 1325798,
n3 = 3000, maxlike = FALSE,
cond = FALSE, diagonalize = 0.3,
firstg = 0.02,
behModelType = c(innorms = 2),
lessMem = TRUE)
for (d in 1:D) {
cat('imputation',d,'\n')
# now impute wave2
visits <- sienaDependent(array(c(n1,n2),
dim = c(N,N,2)))
innorms <- sienaDependent(cbind(int1imp[,d], networkdata$follow.innorms), type = "behavior")
Know <- coCovar(complete(miceImp, d)$Knowledge1/sd(complete(miceImp, d)$Knowledge1,na.rm=T))
a3 <- coCovar(networkdata$final.innorms)
attitudes <- coCovar(complete(miceImp,d)$follow.attitudes)
Data.w2 <- sienaDataCreate(visits, innorms, Age, Gender, Wealth, Atd, Know, pesticide, SMP,
attitudes, a3)
effects.twoWaves <- getEffects(Data.w2)
effects.twoWaves[effects.twoWaves$shortName == 'recip',]$include <- FALSE
effects.twoWaves <- includeEffects(effects.twoWaves, avXAlt,
name = 'innorms',
interaction1 = "attitudes", interaction2="visits")
effects.twoWaves <- includeEffects(effects.twoWaves, effFrom, name="innorms", interaction1="Know")
effects.twoWaves <- includeEffects(effects.twoWaves, effFrom, name="innorms", interaction1="a3")
effects.twoWaves <- includeEffects(effects.twoWaves, effFrom, name="innorms", interaction1="SMP")
effects.twoWaves <- includeEffects(effects.twoWaves, name = "innorms",
indeg, interaction1 = "visits")
effects.twoWaves <- includeEffects(effects.twoWaves, name = "innorms",
outdeg, interaction1 = "visits")
#fix the rate function
effects.twoWaves <- setEffect(effects.twoWaves, Rate, initialValue = 0.01,
name = "visits",fix = TRUE,
type = "rate",test = FALSE)
if (d == 1) {
period1saom <- siena07ToConvergence(alg = estimation.options,
dat = Data.w2,nodes=10,
eff = effects.twoWaves,
threshold = 0.2)
} else {
period1saom <- siena07ToConvergence(alg = estimation.options,
dat = Data.w2, nodes=10,
eff = effects.twoWaves,
threshold = 0.2,
ans0 = period1saom)
}
sims <- siena07(imputation.options, data = Data.w2,
effects = effects.twoWaves,
prevAns = period1saom,
returnDeps = TRUE)$sims[[10]]
int2imp[,d] <- sims[[2]]
# impute wave 3
visits <- sienaDependent(array( c(n1,n2),
dim = c(N,N, 2)))
innorms <- sienaDependent(cbind(int2imp[,d],networkdata$final.innorms), type = "behavior")
Know <- coCovar(complete(miceImp, d)$Knowledge2/sd(complete(miceImp, d)$Knowledge2,na.rm=T))
attitudes <- coCovar(complete(miceImp,d)$final.attitudes)
Data.w3 <- sienaDataCreate(visits, innorms, Age, Wealth, Atd, Know, Gender, SMP, attitudes, pesticide)
effects.twoWaves <- getEffects(Data.w3)
effects.twoWaves[effects.twoWaves$shortName == 'recip',]$include <- FALSE
effects.twoWaves <- includeEffects(effects.twoWaves, avXAlt,
name = 'innorms',
interaction1 = "attitudes", interaction2="visits")
effects.twoWaves <- includeEffects(effects.twoWaves, effFrom, name="innorms", interaction1="Know")
effects.twoWaves <- includeEffects(effects.twoWaves, effFrom, name="innorms", interaction1="SMP")
effects.twoWaves <- includeEffects(effects.twoWaves, name = "innorms",
indeg, interaction1 = "visits")
effects.twoWaves <- includeEffects(effects.twoWaves, name = "innorms",
outdeg, interaction1 = "visits")
#fix the rate function
effects.twoWaves <- setEffect(effects.twoWaves, Rate, initialValue = 0.01,
name = "visits",fix = TRUE,
type = "rate",test = FALSE)
if (d == 1) {
period2saom <- siena07ToConvergence(alg = estimation.options,
dat = Data.w3,nodes=10,
eff = effects.twoWaves,
threshold = 0.2)
} else {
period2saom <- siena07ToConvergence(alg = estimation.options,
dat = Data.w3,nodes=10,
eff = effects.twoWaves,
threshold = 0.2,
ans0 = period2saom)
}
sims <- siena07(imputation.options, data = Data.w3,
effects = effects.twoWaves,
prevAns = period2saom,
returnDeps = TRUE)$sims[[10]]
int3imp[,d] <- sims[[2]]
save.image('./innorms/main/conmi.RData')
}
##############################################################################
############################# 4. Estimating the models ######################
##############################################################################
#modify the network slightly in each wave
n1 <- network1
tieList <- c(1:(nrow(n1)**2))[c(n1 == 0)]
tieList <- tieList[!is.na(tieList)]
changedTie <- sample(tieList,1)
n2 <- n1
n2[changedTie] <- NA
n3 <- n2
changedTie <- sample(tieList,1)
n3[changedTie] <- NA
consaomResults <- list()
constantDataList <- list()
for (d in 1:D) {
cat('Imputation',d,'\n')
visits <- sienaDependent(array(c(n1, n2, n3), dim = c(N,N, 3)) ,
allowOnly = FALSE)
innorms <- sienaDependent(cbind(int1imp[,d], int2imp[,d], int3imp[,d]),
type = "behavior", allowOnly = FALSE)
Know <- varCovar(cbind((complete(miceImp, d)$Knowledge1/sd(complete(miceImp, d)$Knowledge1,na.rm=T)),
(complete(miceImp, d)$Knowledge2/sd(complete(miceImp, d)$Knowledge2,na.rm=T))))
attitudes <-varCovar(cbind(complete(miceImp,d)$base.attitudes,
complete(miceImp,d)$follow.attitudes, complete(miceImp,d)$final.attitudes))
Data <- sienaDataCreate(visits,innorms,Dummy,Age,Wealth,Gender,SMP,pesticide,attitudes,Know)
effects.constant <- getEffects(Data)
effects.constant[effects.constant$shortName == 'recip',]$include <- FALSE
effects.constant <- includeEffects(effects.constant, effFrom, name="innorms", interaction1="Know")
effects.constant <- includeEffects(effects.constant, name = "innorms",effFrom, interaction1 = "SMP")
effects.constant <- includeEffects(effects.constant, effFrom, name="innorms", interaction1="Dummy")
effects.constant <- includeEffects(effects.constant, name = "innorms",
indeg, interaction1 = "visits")
effects.constant <- includeEffects(effects.constant, name = "innorms",
outdeg, interaction1 = "visits")
effects.constant <- includeEffects(effects.constant, name = "innorms",
avXAlt, interaction1 = "attitudes", interaction2="visits")
effects.constant <- includeInteraction(effects.constant, avXAlt, effFrom,
name="innorms", interaction1=c("attitudes","Know"),interaction2=c("visits",""))
effects.constant <- includeInteraction(effects.constant, avXAlt, effFrom,
name="innorms", interaction1=c("attitudes","Dummy"), interaction2=c("visits",""))
#fix the rate function
effects.constant <- setEffect(effects.constant, Rate, initialValue = 0.01,
name = "visits",fix = TRUE,
type = "rate",test = FALSE)
effects.constant <- setEffect(effects.constant, Rate, initialValue = 0.01,
name = "visits",fix = TRUE,
type = "rate",test = FALSE, period=2)
estimation.options <- sienaAlgorithmCreate(useStdInits = FALSE,
seed = 1325798,
n3 = 3000, maxlike = FALSE,
behModelType = c(innorms = 2),
lessMem = FALSE, cond=F)
if (d == 1) {
consaomResults[[d]] <- siena07ToConvergence(alg = estimation.options,nodes=10,
dat = Data, eff = effects.constant,
threshold = 0.2)
} else {
consaomResults[[d]] <- siena07ToConvergence(alg = estimation.options,nodes=10,
dat = Data, eff = effects.constant,
threshold = 0.2,
ans0 = consaomResults[[d - 1]])
}
save.image('./innorms/main/conmi.RData')
}
saveRDS(consaomResults, file="./innorms/main/Constant network innorms D=20.rds")
write.csv(int1imp, "./innorms/main/Constant network-Int1Imp-3-20.csv")
write.csv(int2imp, "./innorms/main/Constant network-Int2Imp-3-20.csv")
write.csv(int3imp, "./innorms/main/Constant network-Int3Imp-3-20.csv")
write.csv(n1, "./innorms/main/Constant network-net1.csv")
write.csv(n2, "./innorms/main/Constant network-net2.csv")
write.csv(n3, "./innorms/main/Constant network-net3.csv")
##Combining results
rowVar <- function(x) {
rowSums((x - rowMeans(x))^2)/(dim(x)[2] - 1)
}
npar <- sum(effects.constant$include)
conMIResults <- as.data.frame(matrix(,npar,(2 * D)))
for (d in 1:D) {
names(conMIResults)[d * 2 - 1] <- paste("imp" , "mean", sep = as.character(d))
names(conMIResults)[d * 2] <- paste("imp" , "se", sep = as.character(d))
conMIResults[,d * 2 - 1] <- consaomResults[[d]]$theta
conMIResults[,d * 2] <- sqrt(diag(consaomResults[[d]]$covtheta))
}
WDMIs <- matrix(0,npar,npar)
for (d in 1:D) {
WDMIs <- WDMIs + consaomResults[[d]]$covtheta
}
WDMIs <- (1/D) * WDMIs
confinalResults <- as.data.frame(matrix(,npar,2))
names(confinalResults) <- c("combinedEstimate", "combinedSE")
rownames(confinalResults) <- consaomResults[[1]]$effects$effectName
confinalResults$combinedEstimate <- rowMeans(conMIResults[,seq(1,2*D,2)])
confinalResults$combinedSE <- sqrt(diag(WDMIs) + ((D + 1)/D) *
rowVar(conMIResults[,seq(1,2*D,2)]))
table(round(confinalResults, 3))
write.csv(confinalResults, "./innorms/main/Constant network results innorms D=20.csv")
###########################################################################################
###################### Estimation of SAOM for information flow ###########################
rm(list=setdiff(ls(), c("miceImp", "networkdata", "D", "N", "network")))
#Create knowledge variable as a binary non-decreasing variable
##Create the knowledge variables
Knowledge.follow <- rowSums(cbind(networkdata$follow.hotline, networkdata$follow.pledge, networkdata$follow.story), na.rm=T)
Knowledge.final <- rowSums(cbind(networkdata$final.hotline, networkdata$final.pledge, networkdata$final.story), na.rm=T)
Knowledge.base <- rep(0,365)
Knowledge <- as.data.frame(cbind(Knowledge.base, Knowledge.follow, Knowledge.final))
Attendance <- networkdata$follow.event_attendance
Attendance[is.na(Attendance)] <- 0
BinKnowledge <- as.data.frame(Knowledge)
BinKnowledge$one <- Attendance
BinKnowledge$two <- 0
BinKnowledge$three <- 0
for (i in 1:365){
BinKnowledge$two[i] <- ifelse(BinKnowledge$Knowledge.follow[i]>0,1,0)
BinKnowledge$three[i] <- ifelse(BinKnowledge$two[i]==1, 1, 0)
BinKnowledge$three[i] <- ifelse(BinKnowledge$Knowledge.final[i]>0, 1, BinKnowledge$three[i])
if(is.na(BinKnowledge$two[i])){BinKnowledge$two[i] <- BinKnowledge$one[i]}
if(is.na(BinKnowledge$three[i])){BinKnowledge$three[i] <- BinKnowledge$two[i]}
}
BinKnowledge <- dplyr::select(BinKnowledge, one, two, three) #BinKnowledge is complete (imputed by us) and non-decreasing
colnames(BinKnowledge) <- c("base.know","follow.know","final.know")
Infomat <- as.matrix(BinKnowledge)
#Modify the network by one tie in each wave
n1 <- network
tieList <- c(1:(nrow(n1)**2))[c(n1 == 0)]
tieList <- tieList[!is.na(tieList)]
changedTie <- sample(tieList,1)
n2 <- n1
n2[changedTie] <- NA
n3 <- n2
changedTie <- sample(tieList,1)
n3[changedTie] <- NA
Sienanet <- sienaDependent(array(c(n1, n2, n3), dim = c(N,N, 3)) ,
allowOnly = FALSE)
SienaBinKnow <- sienaDependent(Infomat, type="behavior")
InfoData <- sienaDataCreate(Sienanet,SienaBinKnow)
diffusion.effects <- getEffects(InfoData)
diffusion.effects[diffusion.effects$shortName == 'recip',]$include <- FALSE
diffusion.effects <- includeEffects(diffusion.effects, totExposure, name="SienaBinKnow",
interaction1 = "Sienanet", type="rate")
#fix the rate function
diffusion.effects <- setEffect(diffusion.effects, Rate, initialValue = 0.01,
name = "network",fix = TRUE,
type = "rate",test = FALSE)
diffusion.effects <- setEffect(diffusion.effects, Rate, initialValue = 0.01,
name = "network",fix = TRUE,
type = "rate",test = FALSE, period=2)
estimation.options <- sienaAlgorithmCreate(useStdInits = FALSE,
seed = 1325798, lessMem=FALSE,
n3 = 3000, maxlike = FALSE, cond=F)
DOI.saomResults <- siena07ToConvergence(alg = estimation.options,nodes=10,
dat = InfoData, eff = diffusion.effects,
threshold = 0.2)
DOIGOF <- sienaGOF(DOI.saomResults, varName = "SienaBinKnow",
BehaviorDistribution)
plot(DOIGOF)
summary(DOIGOF)
saveRDS(DOI.saomResults, file="./Info/main/Diffusion of Innovations combined.rds")
## Now repeat with the networks split up
Visits <- as.matrix(read.csv("visits network.csv"))
#Visits <- as.matrix(read.csv("updated visits network.csv")) #for updated network
Visitors <- as.matrix(read.csv("Visitors network.csv"))
#Visitors <- as.matrix(read.csv("updated visitors network.csv")) #for updated network
HHnet <- as.matrix(read.csv("coresidence network.csv"))
#HHnet <- as.matrix(read.csv("coresidence network.csv")) #for updated network
#modify each network by one tie for each wave
n1 <- Visits
tieList <- c(1:(nrow(n1)**2))[c(n1 == 0)]
tieList <- tieList[!is.na(tieList)]
changedTie <- sample(tieList,1)
n2 <- n1
n2[changedTie] <- NA
n3 <- n2
changedTie <- sample(tieList,1)
n3[changedTie] <- NA
#coresidence
h1 <- HHnet
tieList <- c(1:(nrow(h1)**2))[c(h1 == 0)]
tieList <- tieList[!is.na(tieList)]
changedTie <- sample(tieList,1)
h2 <- h1
h2[changedTie] <- NA
h3 <- h2
changedTie <- sample(tieList,1)
h3[changedTie] <- NA
#visitors
v1 <- Visitors
tieList <- c(1:(nrow(v1)**2))[c(v1 == 0)]
tieList <- tieList[!is.na(tieList)]
cvangedTie <- sample(tieList,1)
v2 <- v1
v2[changedTie] <- NA
v3 <- v2
changedTie <- sample(tieList,1)
v3[changedTie] <- NA
visits <- sienaDependent(array(c(n1, n2, n3), dim = c(N,N, 3)) ,
allowOnly = FALSE)
visitors <- sienaDependent(array(c(v1, v2, v3), dim = c(N,N, 3)) ,
allowOnly = FALSE)
household <- sienaDependent(array(c(h1, h2, h3), dim = c(N,N, 3)) ,
allowOnly = FALSE)
SienaBinKnow <- sienaDependent(Infomat, type="behavior")
InfoData <- sienaDataCreate(visits,visitors,household,SienaBinKnow)
diffusion.effects <- getEffects(InfoData)
diffusion.effects[diffusion.effects$shortName == 'recip',]$include <- FALSE
diffusion.effects <- includeEffects(diffusion.effects, totExposure, name="SienaBinKnow",
interaction1 = "visits", type="rate")
diffusion.effects <- includeEffects(diffusion.effects, totExposure, name="SienaBinKnow",
interaction1 = "visitors", type="rate")
diffusion.effects <- includeEffects(diffusion.effects, totExposure, name="SienaBinKnow",
interaction1 = "household", type="rate")
#fix the rate function
diffusion.effects <- setEffect(diffusion.effects, Rate, initialValue = 0.01,
name = "visits",fix = TRUE,
type = "rate",test = FALSE)
diffusion.effects <- setEffect(diffusion.effects, Rate, initialValue = 0.01,
name = "visits",fix = TRUE,
type = "rate",test = FALSE, period=2)
diffusion.effects <- setEffect(diffusion.effects, Rate, initialValue = 0.01,
name = "visitors",fix = TRUE,
type = "rate",test = FALSE)
diffusion.effects <- setEffect(diffusion.effects, Rate, initialValue = 0.01,
name = "visitors",fix = TRUE,
type = "rate",test = FALSE, period=2)
diffusion.effects <- setEffect(diffusion.effects, Rate, initialValue = 0.01,
name = "household",fix = TRUE,
type = "rate",test = FALSE)
diffusion.effects <- setEffect(diffusion.effects, Rate, initialValue = 0.01,
name = "household",fix = TRUE,
type = "rate",test = FALSE, period=2)
estimation.options <- sienaAlgorithmCreate(useStdInits = FALSE,
seed = 1325798, lessMem=FALSE,
n3 = 3000, maxlike = FALSE, cond=F)
DOI.saomResults <- siena07ToConvergence(alg = estimation.options,nodes=10,
dat = InfoData, eff = diffusion.effects,
threshold = 0.2)
saveRDS(DOI.saomResults, file="./Info/main/Diffusion of Innovations multinet.rds")
##################################################################################################
##################### Goodness of fit checking #################################################
#This script is adapted from a script provided by Chen Wang
# As written, the script can be applied to the models for 'intention'
# Some modification of the script is required to fit it to the other models, this is indicated
library(RSiena)
library(lattice)
library(MASS)
library(Matrix)
library(igraph)
library(gridExtra)
#read the sienafit object
fitlist <- readRDS("Constant network fit final=20.rds")
D <- length(fitlist) # number of imputations
for(imp in 1:D){
behaviour <- cbind(int1imp[,imp], int2imp[,imp], int3imp[,imp])
finalnet <- n3 ###the network used in wave 3
finalnet[which(finalnet==11)] <- 1
ans <- fitlist[[imp]]
### GOF testing
### the original "BehaviorDistribution" in RSiena has some issues
### but it provides a perfect strawman object for GOF testing
gofb <- sienaGOF(ans, BehaviorDistribution, varName = "intention",verbose=TRUE) # <- change intention for other dependent variable
plot(gofb)
save(gofb,file="gofb.Rdata")
### behavior distribution
k <- table(factor(behaviour[,3], levels = (min(behaviour):max(behaviour))))
observed <- matrix(k,nrow=1)
observations <- nrow(observed)
n <- length(k)
simulated <- matrix(rep(0,1000*n),nrow=1000)
for (z in 1:1000) {
for(x in min(behaviour):max(behaviour)){
simulated[z,x-1] <- length(ans$sims[[z]][[1]][[2]][[2]][which(ans$sims[[z]][[1]][[2]][[2]]==x)]) # <- check max and min for the dependent var
}
}
variates <- ncol(simulated)
a <- cov(simulated)
ainv <- ginv(a)
expectation <- colMeans(simulated)
centeredSimulations <- scale(simulated, scale=FALSE)
centeredObservations <- observed - expectation
mhd <- function(x)
{
x %*% ainv %*% x
}
simTestStat <- apply(centeredSimulations, 1, mhd)
obsTestStat <- apply(centeredObservations, 1, mhd)
#if (twoTailed)
#{
# p <- sapply(1:observations, function (i)
# 1 - abs(1 - 2 * sum(obsTestStat[i] <=
# simTestStat)/length(simTestStat)) )
#}
#else
#{
p <- sapply(1:observations, function (i)
sum(obsTestStat[i] <= simTestStat) /length(simTestStat))
#}
load("gofb.Rdata")
gofb$Joint$Simulations <- simulated
gofb$Joint$Observations <- observed
gofb$Joint$p <- p
attr(gofb,"auxiliaryStatisticName")<-"Behavior Distribution"
plot1<-plot(gofb, main="Distribution of behaviour levels", xlab="Intention") # <- alter title
##########################################################################################
### behavior transition
a <- behaviour[,2]
b <- behaviour[,3]
k <- c(t(table(factor(a, levels=min(behaviour):max(behaviour)),factor(b, levels =min(behaviour):max(behaviour))))) # <- check max and min
observed <- matrix(k,nrow=1)
observations <- nrow(observed)
n <- length(k)
simulated <- matrix(rep(0,1000*n),nrow=1000)
for (z in 1:1000) {
g <- cbind(a,ans$sims[[z]][[1]][[2]][[2]])
for (i in min(behaviour):max(behaviour)) for (j in min(behaviour):max(behaviour)) {
p <- (i-2)*9+(j-1) # <- check these values depending on the max and min. i.e. for injunctive norm, it should be "(i-5)*15+(j-4)", for info "(i+1)*2+(j-1)"
simulated[z,p] <- length(which(g[,1]==i & g[,2]==j))
}
#simulated[z,] <- c(t(table(a,g)))
}
variates <- ncol(simulated)
a <- cov(simulated)
ainv <- ginv(a)
expectation <- colMeans(simulated)
centeredSimulations <- scale(simulated, scale=FALSE)
centeredObservations <- observed - expectation
mhd <- function(x)
{
x %*% ainv %*% x
}
simTestStat <- apply(centeredSimulations, 1, mhd)
obsTestStat <- apply(centeredObservations, 1, mhd)
#if (twoTailed)
#{
# p <- sapply(1:observations, function (i)
# 1 - abs(1 - 2 * sum(obsTestStat[i] <=
# simTestStat)/length(simTestStat)) )
#}
#else
#{
p <- sapply(1:observations, function (i)
sum(obsTestStat[i] <= simTestStat) /length(simTestStat))
#}
load("gofb.Rdata")
gofb$Joint$Simulations <- simulated
gofb$Joint$Observations <- observed
gofb$Joint$p <- p
attr(gofb,"auxiliaryStatisticName")<-"Behavior Transition"
attr(gofb[[1]], "key")=c("2:",rep("_",8), "3:", rep("_",8), "4:",
rep("_",8), "5:", rep("_",8), "6:", rep("_",8), "7:",
rep("_",8), "8:", rep("_",8), "9:", rep("_",8), "10:", rep("_",8)) # <- alter key
plot2 <- plot(gofb, main="Behavioral transitions", xlab="Transition", xaxt="n")
##########################################################################################
### behavior change
b <- behaviour[,3]-behaviour[,2]
values <- min(b):max(b)
k <- table(factor(b, levels=values))
observed <- matrix(k,nrow=1)
observations <- nrow(observed)
n <- length(k)
simulated <- matrix(rep(0,1000*n),nrow=1000)
for (z in 1:1000) {
g <- ans$sims[[z]][[1]][[2]][[2]]-behaviour[,2]
for(x in 1:n){
simulated[z,x] <- length(g[which(g==values[x])])
}
}
variates <- ncol(simulated)
a <- cov(simulated)
ainv <- ginv(a)
expectation <- colMeans(simulated)
centeredSimulations <- scale(simulated, scale=FALSE)
centeredObservations <- observed - expectation
mhd <- function(x)
{
x %*% ainv %*% x
}
simTestStat <- apply(centeredSimulations, 1, mhd)
obsTestStat <- apply(centeredObservations, 1, mhd)
#if (twoTailed)
#{
# p <- sapply(1:observations, function (i)
# 1 - abs(1 - 2 * sum(obsTestStat[i] <=
# simTestStat)/length(simTestStat)) )
#}
#else
#{
p <- sapply(1:observations, function (i)
sum(obsTestStat[i] <= simTestStat) /length(simTestStat))
#}
load("gofb.Rdata")
gofb$Joint$Simulations <- simulated
gofb$Joint$Observations <- observed
gofb$Joint$p <- p
attr(gofb,"auxiliaryStatisticName")<-"Behavior Change Values"
attr(gofb[[1]],"key")<-c(-7:7) #<- alter key
plot3 <- plot(gofb, main="Behavior change values", xlab="Change magnitude", xaxt="n")
##########################################################################################
### out-degree & in-degree by behavior
f3 <- finalnet
f3[which(f3==10)] <- NA
a <- igraph::degree(graph.adjacency(f3),mode="out")
b <- igraph::degree(graph.adjacency(f3),mode="in")
values <- min(behaviour):max(behaviour)
n <- length(values)
c <- d <- rep(0,n)
for(i in 1:n){
c[i] <- mean(a[which(behaviour[,3]==values[i])])
d[i] <- mean(b[which(behaviour[,3]==values[i])])
}
k <- c
observed <- matrix(k,nrow=1)
observed[is.na(observed)==TRUE] <-0
observations <- nrow(observed)
n <- length(k)
simulated <- matrix(rep(0,1000*n),nrow=1000)
for (z in 1:1000) {
f <- graph_from_edgelist(ans$sims[[z]][[1]][[1]][[2]][,1:2], directed=TRUE)
h <- ans$sims[[z]][[1]][[2]][[2]]
j <- igraph::degree(f,mode="out")
for(i in 1:n){
simulated[z,i] <- mean(j[which(h==values[i])],na.rm=TRUE)
}
}
simulated[is.na(simulated)==TRUE] <- 0
variates <- ncol(simulated)
a <- cov(simulated)
ainv <- ginv(a)
expectation <- colMeans(simulated)
centeredSimulations <- scale(simulated, scale=FALSE)
centeredObservations <- observed - expectation
mhd <- function(x)
{
x %*% ainv %*% x
}
simTestStat <- apply(centeredSimulations, 1, mhd)
obsTestStat <- apply(centeredObservations, 1, mhd)
#if (twoTailed)
#{
# p <- sapply(1:observations, function (i)
# 1 - abs(1 - 2 * sum(obsTestStat[i] <=
# simTestStat)/length(simTestStat)) )
#}
#else
#{
p <- sapply(1:observations, function (i)
sum(obsTestStat[i] <= simTestStat) /length(simTestStat))
#}
load("gofb.Rdata")
gofb$Joint$Simulations <- simulated
gofb$Joint$Observations <- observed
gofb$Joint$p <- p
attr(gofb,"auxiliaryStatisticName")<-"Average Out-degree by Behavior"
plot4 <- plot(gofb, main="Average Out-degree by Behavior", xlab="Intention") #<- alter title
k <- d
observed <- matrix(k,nrow=1)
observed[is.na(observed)==TRUE] <- 0
observations <- nrow(observed)
n <- length(k)
simulated <- matrix(rep(0,1000*n),nrow=1000)
for (z in 1:1000) {
f <- graph_from_edgelist(ans$sims[[z]][[1]][[1]][[2]][,1:2], directed=TRUE)
h <- ans$sims[[z]][[1]][[2]][[2]]
j <- igraph::degree(f,mode="in")
for(i in 1:n){
simulated[z,i] <- mean(j[which(h==values[i])],na.rm=TRUE)
}
}
simulated[is.na(simulated)==TRUE] <- 0
variates <- ncol(simulated)
a <- cov(simulated)
ainv <- ginv(a)
expectation <- colMeans(simulated)
centeredSimulations <- scale(simulated, scale=FALSE)
centeredObservations <- observed - expectation
mhd <- function(x)
{
x %*% ainv %*% x
}
simTestStat <- apply(centeredSimulations, 1, mhd)
obsTestStat <- apply(centeredObservations, 1, mhd)
#if (twoTailed)
#{
# p <- sapply(1:observations, function (i)
# 1 - abs(1 - 2 * sum(obsTestStat[i] <=
# simTestStat)/length(simTestStat)) )
#}
#else
#{
p <- sapply(1:observations, function (i)
sum(obsTestStat[i] <= simTestStat) /length(simTestStat))
#}
load("gofb.Rdata")
gofb$Joint$Simulations <- simulated
gofb$Joint$Observations <- observed
gofb$Joint$p <- p
attr(gofb,"auxiliaryStatisticName")<-"Average In-degree by Behavior"
plot5 <- plot(gofb, main="Average In-degree by Behavior", xlab="Intention") #<- alter title
##########################################################################################
### edgewise homophily
net <- finalnet
net[which(net==10)] <- NA
net <- graph.adjacency(net, mode="directed")
edges <- as_edgelist(net)
colnames(edges) <- c("i","j")
beh <- behaviour[,3]
p2 <- p1 <- cbind(beh,c(1:length(beh))) # two auxiliary matrices for merging depression
colnames(p1)<-c("si","i")
colnames(p2)<-c("sj","j")
w <- merge(merge(edges,p1),p2)
l <- k <- 0
for (a in 1:nrow(edges)){
if (is.na(w[a,3])==FALSE & is.na(w[a,4])==FALSE) {
k <- k+(1-abs(w[a,3]-w[a,4])/2)
l <- l+1
}
}
k <- k/l
rm(net,edges,beh,p1,p2,w)
observed <- matrix(k,nrow=1)
observations <- nrow(observed)
n <- length(k)
simulated <- matrix(rep(0,1000*n),nrow=1000)
for (z in 1:1000) {
edges <- ans$sims[[z]][[1]][[1]][[2]]
colnames(edges) <- c("i","j","xij")
beh <- ans$sims[[z]][[1]][[2]][[2]]
p2 <- p1 <- cbind(beh,c(1:length(beh))) # two auxiliary matrices for merging depression
colnames(p1)<-c("si","i")
colnames(p2)<-c("sj","j")
w <- merge(merge(edges,p1),p2)
l <- k <- 0
for (a in 1:nrow(edges)){
k <- k+(1-abs(w[a,4]-w[a,5])/2)
l <- l+1
}
simulated[z] <- k/l
}
variates <- ncol(simulated)
a <- cov(simulated)
ainv <- ginv(a)
expectation <- colMeans(simulated)
centeredSimulations <- scale(simulated, scale=FALSE)
centeredObservations <- observed - expectation
mhd <- function(x)
{
x %*% ainv %*% x
}
simTestStat <- apply(centeredSimulations, 1, mhd)
obsTestStat <- apply(centeredObservations, 1, mhd)
#if (twoTailed)
#{
# p <- sapply(1:observations, function (i)
# 1 - abs(1 - 2 * sum(obsTestStat[i] <=
# simTestStat)/length(simTestStat)) )
#}
#else
#{
p <- sapply(1:observations, function (i)
sum(obsTestStat[i] <= simTestStat) /length(simTestStat))
#}
load("gofb.Rdata")
gofb$Joint$Simulations <- simulated
gofb$Joint$Observations <- observed
gofb$Joint$p <- p
attr(gofb,"auxiliaryStatisticName")<-"Edgewise Homophily"
#plot(gofb,key=c("Edgewise Homophily"))
k1<-observed
s1<-simulated
mhp1<-p
##########################################################################################
### autocorrelation
net <- finalnet
net[which(net==10)] <- NA
net <- graph.adjacency(net, mode="directed")
edges <- as_edgelist(net)
colnames(edges)<-c("i","j")
beh <- behaviour[,3]
y <- which(is.na(beh)==FALSE)
nv <- length(y)
p_bar <- mean(beh[y])
denominator <- sum((beh[y]-p_bar)^2)/nv
p2 <- p1 <- cbind(beh,c(1:length(beh))) # two auxiliary matrices for merging depression
colnames(p1)<-c("pi","i")
colnames(p2)<-c("pj","j")
w <- merge(merge(edges,p1),p2)
ne <- nrow(edges)
numerator <- 0
for (a in 1:ne)
{
if (is.na(w[a,3])==FALSE & is.na(w[a,4])==FALSE) {
numerator <- numerator + (w[a,3]-p_bar)*(w[a,4]-p_bar)
}
}
numerator <- numerator/ne
moranw2 <- numerator/denominator
moranw2
denominator <- 2*sum((beh[y]-p_bar)^2)/(nv-1)
numerator <- 0
for (a in 1:ne)
{
if (is.na(w[a,3])==FALSE & is.na(w[a,4])==FALSE) {
numerator <- numerator + (w[a,3]-w[a,4])^2
}
}
numerator <- numerator/ne
gearyw2 <- numerator/denominator
gearyw2
k <- moranw2
observed <- matrix(k,nrow=1)
observations <- nrow(observed)
n <- length(k)
simulated <- matrix(rep(0,1000*n),nrow=1000)
for (z in 1:1000) {
edges <- ans$sims[[z]][[1]][[1]][[2]]
ne <- nrow(edges)
colnames(edges)<-c("i","j","xij")
nv <- length(ans$sims[[z]][[1]][[2]][[2]])
t <- matrix(ans$sims[[z]][[1]][[2]][[2]],ncol=1)
s_bar <- mean(t)
denominator <- sum((t-s_bar)^2)/nv
t2 <- t1 <- cbind(t,c(1:nv))
colnames(t1)<-c("si","i")
colnames(t2)<-c("sj","j")
w <- merge(merge(edges,t1),t2)
numerator <- sum(w[,3]*(w[,4]-s_bar)*(w[,5]-s_bar))/ne
simulated[z] <- numerator/denominator
}
variates <- ncol(simulated)
a <- cov(simulated)
ainv <- ginv(a)
expectation <- colMeans(simulated)
centeredSimulations <- scale(simulated, scale=FALSE)
centeredObservations <- observed - expectation
mhd <- function(x)
{
x %*% ainv %*% x
}
simTestStat <- apply(centeredSimulations, 1, mhd)
obsTestStat <- apply(centeredObservations, 1, mhd)
#if (twoTailed)
#{
# p <- sapply(1:observations, function (i)
# 1 - abs(1 - 2 * sum(obsTestStat[i] <=
# simTestStat)/length(simTestStat)) )
#}
#else
#{
p <- sapply(1:observations, function (i)
sum(obsTestStat[i] <= simTestStat) /length(simTestStat))
#}
load("gofb.Rdata")
gofb$Joint$Simulations <- simulated
gofb$Joint$Observations <- observed
gofb$Joint$p <- p
attr(gofb,"auxiliaryStatisticName")<-"Moran's I"
#plot(gofb,key=c("Moran's I"))
k2<-observed
s2<-simulated
mhp2<-p
k <- gearyw2
observed <- matrix(k,nrow=1)
observations <- nrow(observed)
n <- length(k)
simulated <- matrix(rep(0,1000*n),nrow=1000)
for (z in 1:1000) {
edges <- ans$sims[[z]][[1]][[1]][[2]]
ne <- nrow(edges)
colnames(edges)<-c("i","j","xij")
nv <- length(ans$sims[[z]][[1]][[2]][[2]])
t <- matrix(ans$sims[[z]][[1]][[2]][[2]],ncol=1)
s_bar <- mean(t)
denominator <- 2*sum((t-s_bar)^2)/(nv-1)
t2 <- t1 <- cbind(t,c(1:nv))
colnames(t1)<-c("si","i")
colnames(t2)<-c("sj","j")
w <- merge(merge(edges,t1),t2)
numerator <- sum(w[,3]*(w[,4]-w[,5])^2)/ne
simulated[z] <- numerator/denominator
}
variates <- ncol(simulated)
a <- cov(simulated)
ainv <- ginv(a)
expectation <- colMeans(simulated)
centeredSimulations <- scale(simulated, scale=FALSE)
centeredObservations <- observed - expectation
mhd <- function(x)
{
x %*% ainv %*% x
}
simTestStat <- apply(centeredSimulations, 1, mhd)
obsTestStat <- apply(centeredObservations, 1, mhd)
#if (twoTailed)
#{
# p <- sapply(1:observations, function (i)
# 1 - abs(1 - 2 * sum(obsTestStat[i] <=
# simTestStat)/length(simTestStat)) )
#}
#else
#{
p <- sapply(1:observations, function (i)
sum(obsTestStat[i] <= simTestStat) /length(simTestStat))
#}
load("gofb.Rdata")
gofb$Joint$Simulations <- simulated
gofb$Joint$Observations <- observed
gofb$Joint$p <- p
attr(gofb,"auxiliaryStatisticName")<-"Geary's C"
#plot(gofb,key=c("Geary's c"))
k3<-observed
s3<-simulated
mhp3<-p
k <- c(k1,k2,k3)
observed <- matrix(k,nrow=1)
observations <- nrow(observed)
n <- length(k)
simulated <- cbind(s1,s2,s3)
load("gofb.Rdata")
gofb$Joint$Simulations <- simulated
gofb$Joint$Observations <- observed
gofb$Joint$p <- c(mhp1,mhp2,mhp3)
attr(gofb,"auxiliaryStatisticName")<-"Behavior Similarity"
plot6 <- plot(gofb,key=c("Edgewise Homophily","Moran's I","Geary's c"), main="Behavior similarity")
jpeg(paste('Imputation',imp,'.jpeg'), width=297, height=210, units="mm", res=1080)
grid.arrange(plot1, plot2, plot3, plot4, plot5, plot6, ncol=3)
dev.off()
}
|
0d5e0079fc7dfa0b5044393838b297232c994099 | fd9b6834fc9574f2329bbe73a3d743940d60ff18 | /Combine_results.R | 266458d3049f1182a56d541c021d557ca43b565c | [] | no_license | egalimov/C.elegans_length_measurements | 7365555f9a5da601c5f6b7d165df46beaeba4d1e | 06a5698d21e2d589050da0922e554cc106534a0c | refs/heads/master | 2020-03-22T15:46:50.669160 | 2018-07-09T11:50:21 | 2018-07-09T11:50:21 | 140,277,820 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,748 | r | Combine_results.R | ###R script to combine results of analyses into a table
graphics.off()
colors <- c("brown","black","black","black","black","black","black",
"green","green","green","green",
"red","red","red","red",
"blue","blue","blue","blue","blue","blue"
)
bf<-data.frame(V1=seq(-29.9,130.1, by =0.5),stringsAsFactors=F)
temp1<-data.frame(bf,V2=rep(NA,length(bf[,1]),stringsAsFactors=F))
temp1$V2<-round(temp1$V1, 1)
head<-data.frame(bf,V2=rep(NA,length(bf[,1]),stringsAsFactors=F))
head$V2<-round(head$V1, 1)
tail<-data.frame(bf,V2=rep(NA,length(bf[,1]),stringsAsFactors=F))
tail$V2<-round(tail$V1, 1)
temp2<-data.frame(bf,V2=rep(NA,length(bf[,1]),stringsAsFactors=F))
t
)
bf<-data.frame(V1=seq(-29.9,130.1, by =0.5),stringsAsFactors=F)
temp1<-data.frame(bf,V2=rep(NA,length(bf[,1]),stringsAsFactors=F))
temp1$V2<-round(temp1$V1, 1)
head<-data.frame(bf,V2=rep(NA,length(bf[,1]),stringsAsFactors=F))
head$V2<-round(head$V1, 1)
tail<-data.frame(bf,V2=rep(NA,length(bf[,1]),stringsAsFactors=F))
tail$V2<-round(tail$V1, 1)
temp2<-data.frame(bf,V2=rep(NA,length(bf[,1]),stringsAsFactors=F))
temp2$V2<-round(temp2$V1, 1)
analyses<-data.frame(V1=seq(1,30, by =1),stringsAsFactors=F)
range1<-data.frame(V1=seq(0.1,180.1, by =0.5),stringsAsFactors=F)
combLength<-data.frame(range1,V2=rep(NA,length(range1[,1]),stringsAsFactors=F))
combHead<-data.frame(range1,V2=rep(NA, length(range1[,1]),stringsAsFactors=F))
combTail<-data.frame(range1,V2=rep(NA,length(range1[,1]),stringsAsFactors=F))
combHead
aDeclineSlope<-data.frame(stringsAsFactors=F)
aRaiseSlope<-data.frame(stringsAsFactors=F)
my.path <- list("z1.txt","z2.txt","z3.txt","z4.txt","z5.txt","z6.txt","z7.txt","z8.txt","z9.txt","z10.txt",
"z11.txt","z12.txt","z13.txt","z14.txt","z15.txt","z16.txt","z17.txt","z18.txt","z19.txt","z20.txt")
my.data <- list()
for (z in 1:length(my.path)){
my.data[[z]] <- read.delim(my.path[[z]],header=F,stringsAsFactors=F)
kk1<-my.data[[z]]
s= z+1
s1=z+2
for (i in 1:length(range1[,1])){
combLength[i,s] = kk1[i,7]
combHead[i,s]=kk1[i,10]
combTail[i,s]=kk1[i,11]
}
'Relative minLength'
analyses[z,2]<-kk1[10,9]
'A'
analyses[z,3]<-kk1[23,9]
'B'
analyses[z,4]<-kk1[24,9]
'C'
analyses[z,5]<-kk1[25,9]
'D'
analyses[z,6]<-kk1[26,9]
'Initial length in pixels'
analyses[z,7]<-kk1[12,9]
'Blue fluor index'
analyses[z,8] <- kk1[7,9]
'Min length index'
analyses[z,9] <- kk1[8,9]
'Head index'
analyses[z,10] <- kk1[28,9]
'Tail index'
analyses[z,11] <- kk1[29,9]
'Maximal decrease for 3 min'
analyses[z,12] <- kk1[32,9]
'Maximal increase for 3 min'
analyses[z,13] <- kk1[39,9]
'Blue fluorescence normalized - Length'
for(i in 1:length(temp1[,1])){
for(k in 1:length(kk1[,1])){
if(temp1[i,2]==kk1[k,13]){
temp1[i,s1]=kk1[k,7]
}
else{
}
}
}
'Blue fluorescence normalized - Head'
for(i in 1:length(head[,1])){
for(k in 1:length(kk1[,1])){
if(head[i,2]==kk1[k,13]){
head[i,s1]=kk1[k,10]
}
else{
}
}
}
'Blue fluorescence normalized - Tail'
for(i in 1:length(tail[,1])){
for(k in 1:length(kk1[,1])){
if(tail[i,2]==kk1[k,13]){
tail[i,s1]=kk1[k,11]
}
else{
}
}
}
'min Length normalized'
for(i in 1:length(temp2[,1])){
for(k in 1:length(kk1[,1])){
if(temp2[i,2]==kk1[k,12]){
temp2[i,s1]=kk1[k,7]
}
else{
}
}
}
'
for (h in 1:15){
p =h
n =h+17
m =h+32
aDeclineSlope[n,z]=kk1[h,13]
aRaiseSlope[n,z]=kk1[h,16]
aDeclineSlope[m,z]=kk1[h,14]
aRaiseSlope[m,z]=kk1[h,17]
aDeclineSlope[p,z]=kk1[h,15]
aRaiseSlope[p,z]=kk1[h,18]
}'
}
write.table(analyses, "analyses.txt", sep = "\t", eol = "\n", row.names = FALSE, col.names = FALSE)
write.table(combLength, "combLength.txt", sep = "\t", eol = "\n", row.names = FALSE, col.names = FALSE)
write.table(combHead, "combHead.txt", sep = "\t", eol = "\n", row.names = FALSE, col.names = FALSE)
write.table(combTail, "combTail.txt", sep = "\t", eol = "\n", row.names = FALSE, col.names = FALSE)
write.table(temp1, "bf.txt", sep = "\t", eol = "\n", row.names = FALSE, col.names = FALSE)
write.table(temp2, "mn.txt", sep = "\t", eol = "\n", row.names = FALSE, col.names = FALSE)
'write.table(aDeclineSlope, "aDeclineSlope.txt", sep = "\t", eol = "\n", row.names = FALSE, col.names = FALSE)
write.table(aRaiseSlope, "aRaiseSlope.txt", sep = "\t", eol = "\n", row.names = FALSE, col.names = FALSE)'
png("combLength.png")
plot(combLength[,1],combLength[,2], type="n", xlab = "Time, min", ylab = "%, initial length", xlim=c(0, 160), ylim=c(0, 1.3))
for (aaa in 1:length(my.path)){
t=aaa+1
lines(combLength[,1],combLength[,t], col = colors[t])
title(main = "Length")
}
png(file = "combLength.png")
dev.off()
graphics.off()
png("combHead.png")
plot(combHead[,1],combHead[,2], type="n", xlab = "Time, min", ylab = "%, initial length", xlim=c(0, 160), ylim=c(0, 1.3))
for (aaa in 1:length(my.path)){
t=aaa+1
lines(combHead[,1],combHead[,t], col = colors[t])
title(main = "Head")
}
png(file = "combHead.png")
dev.off()
graphics.off()
png("combTail.png")
plot(combTail[,1],combTail[,2], type="n", xlab = "Time, min", ylab = "%, initial length", xlim=c(0, 160), ylim=c(0, 1.3))
for (aaa in 1:length(my.path)){
t=aaa+1
lines(combHead[,1],combHead[,t], col = colors[t])
title(main = "Tail")
}
png(file = "combTail.png")
dev.off()
graphics.off()
png("bf.png")
plot(temp1[,2],temp1[,3], type="n", xlab = "Time, min", ylab = "%, initial length", xlim=c(-30, 160), ylim=c(0, 1.3))
for (aaa in 1:length(my.path)){
t=aaa+2
t1=t-1
lines(temp1[,1],temp1[,t], col = colors[t1])
title(main = "BF start normalized")
}
png(file = "bf.png")
dev.off()
graphics.off()
png("bf_head.png")
plot(head[,2],head[,3], type="n", xlab = "Time, min", ylab = "%, initial length", xlim=c(-30, 160), ylim=c(0, 1.3))
for (aaa in 1:length(my.path)){
t=aaa+2
t1=t-1
lines(head[,1],head[,t], col = colors[t1])
title(main = "BF start normalized _ head")
}
png(file = "bf_head.png")
dev.off()
graphics.off()
png("bf_tail.png")
plot(tail[,2],tail[,3], type="n", xlab = "Time, min", ylab = "%, initial length", xlim=c(-30, 160), ylim=c(0, 1.3))
for (aaa in 1:length(my.path)){
t=aaa+2
t1=t-1
lines(tail[,1],tail[,t], col = colors[t1])
title(main = "BF start normalized _ tail")
}
png(file = "bf_tail.png")
dev.off()
graphics.off()
png("ml.png")
plot(temp2[,2],temp2[,3], type="n", xlab = "Time, min", ylab = "%, initial length", xlim=c(-30, 160), ylim=c(0, 1.3))
for (aaa in 1:length(my.path)){
t=aaa+2
t1=t-1
lines(temp2[,1],temp2[,t], col = colors[t1])
title(main = "min Length start normalized")
}
png(file = "ml.png")
dev.off()
graphics.off() |
dab3f4fc1738fb1081ce85520dada33b4e428593 | 200463671864174ede1ca8c55e2cfae8f4e4b2f9 | /基本数据管理.R | f9a01765d21804c6f81da0a52080691a5e03aaff | [] | no_license | ChiJiuJiu/R | 6420404002a1c5eb9400fdfb3f2923fa865f1039 | bc648c6410a92a74f5231f076b17c2656b7b62d2 | refs/heads/master | 2020-05-31T14:59:59.871791 | 2019-06-08T12:22:30 | 2019-06-08T12:22:30 | 190,345,698 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,985 | r | 基本数据管理.R | #创建新变量
mydata <- data.frame(x1 = c(2, 2, 6, 4), x2 = c(3, 4, 2, 8))
mydata
sumx = mydata$x1 + mydata$x2
#给数据框中添加新变量
#方法一
mydata$sumx = mydata$x1 + mydata$x2
mydata$meanx = (mydata$x1 + mydata$x2) / 2
mydata
#方法二
attach(mydata)
mydata$sumx = x1 + x2
mydata$meanx = (x1 + x2) / 2
detach(mydata)
mydata
rm(mydata)
#方法三
mydata <- transform(mydata, sumx = x1 + x2, meanx = (x1 + x2) / 2)
mydata
#变量重编码
#创建数据框
manager <- c(1, 2, 3, 4, 5)
data <- c("10/24/08", "10/28/08", "10/1/08", "10/12/08", "5/1/09")
country <- c("US", "US", "UK", "UK", "UK")
gender <- c("M", "F", "F", "M", "F")
age <- c(32, 45, 25, 39, 99)
q1 <- c(5, 3, 3 ,3, 2)
q2 <- c(4, 5, 5, 3, 2)
q3 <- c(5, 2, 5, 4, 1)
q4 <- c(5, 5, 5, NA, 2)
q5 <- c(5, 5, 2, NA, 1)
leadership <- data.frame(manager, data, country, gender, age, q1, q2, q3, q4, q5, stringsAsFactors = FALSE)
#重新编码
#方法一
leadership$age[leadership$age == 99] <- NA
leadership$agecat[leadership$age > 65] <- "Elder"
leadership$agecat[leadership$age <= 65 & leadership$age > 39] <- "Middle age"
leadership$agecat[leadership$age <= 39] <- "Young"
#方法二
leadership <- within(leadership, {
agecat <- NA
agecat[age > 65] <- "Elder"
agecat[age <= 65 & age >39] <- "Middle age"
agecat[age <= 39] <- "Young"
agecat[age == 99] <- NA
})
#变量的重命名
#方法一
fix(leadership)
#方法二
#先导包
install.packages("plyr")
#载入内存
library(plyr)
#重命名
leadership <- rename(leadership, c(data = "testdate"))
#打印所有变量名
names(leadership)
#方法三
names(leadership)[2] <- "date"
names(leadership)[6:10] <- c("item1", "item2", "item3", "item4", "item5")
#缺失值
y <- c(1, 2, 3, NA)
is.na(y) #返回结果:FALSE FALSE FALSE TRUE
is.na(leadership[, 6:10])
#NA求和
x <- c(1, 2, NA, 3)
z <- sum(x)
#除去缺失值求和
y <- sum(x, na.rm = TRUE)
#除去所有缺失值(有缺失值的行都删除)
newdata <- na.omit(leadership)
|
8f94bcb313091fea12c3ec25fd85a92350c767f7 | b2f61fde194bfcb362b2266da124138efd27d867 | /code/dcnf-ankit-optimized/Results/QBFLIB-2018/A1/Database/Amendola-Ricca-Truszczynski/selection-hard/ctrl.e#1.a#3.E#118.A#48.c#.w#9.s#41.asp/ctrl.e#1.a#3.E#118.A#48.c#.w#9.s#41.asp.R | facde5940b30e98f221d1d3440ae92e6a18ef7e9 | [] | no_license | arey0pushpa/dcnf-autarky | e95fddba85c035e8b229f5fe9ac540b692a4d5c0 | a6c9a52236af11d7f7e165a4b25b32c538da1c98 | refs/heads/master | 2021-06-09T00:56:32.937250 | 2021-02-19T15:15:23 | 2021-02-19T15:15:23 | 136,440,042 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 91 | r | ctrl.e#1.a#3.E#118.A#48.c#.w#9.s#41.asp.R | c9a42cce73612673bce4c6aaa5b9ee09 ctrl.e#1.a#3.E#118.A#48.c#.w#9.s#41.asp.qdimacs 8661 25486 |
53c7c4077e5686ea42346e7a9a34fcbc670c1683 | ca93f195a1bc06f75c2bf9a40fc5490261f20870 | /ui.R | f44ea9764e17563e4f11364bfe00c1c586e71306 | [] | no_license | vanderq/CourseraDevelopingDataProducts | 04d1ebb57d977c226a7a1aea58d6749e8aa0b85f | 91154c1526169bfae6720872f2f86445a68f60a8 | refs/heads/master | 2020-04-17T09:21:38.383895 | 2019-01-18T18:49:40 | 2019-01-18T18:49:40 | 166,451,693 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,697 | r | ui.R | #
# This is the user-interface definition of a Shiny web application. You can
# run the application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
nchsData2 <- read.csv("NCHS_-_Leading_Causes_of_Death__United_States.csv")
causes <- unique(nchsData2$Cause.Name)
causes <- sort(causes)
states <- unique(nchsData2$State)
states <- sort(states)
states <- as.vector(states)
states <- c("All States", states)
# Define UI for application that draws a histogram
shinyUI(fluidPage(
# Application title
titlePanel("Death Causes in the US between 1999 and 2016"),
# Sidebar with a slider input for number of bins
sidebarLayout(
sidebarPanel(
sliderInput("sliderYear", "For which years do you want to see the Data?", min = 1999, max = 2016, value = c(1999, 2016)),
checkboxInput("suppressLargestState", "Suppress Data for whole Country", value=FALSE),
selectInput(
"selectCause",
label = h5("Select Cause of Death"),
choices = causes
),
selectInput(
"selectState",
label = h5("State"),
choices = states
)
#submitButton("Submit")
),
# Show a plot of the generated distribution
mainPanel(
textOutput("SelectedYearMin"),
textOutput("SelectedYearMax"),
textOutput("SelectedCause"),
textOutput("SelectedState"),
plotOutput("deathPlot"),
textOutput("LinearModelIntercept"),
textOutput("LinearModelSlope")
)
)
))
|
4cd9b6644d2f3952e1ca518e4a071b309020eede | b61c793564f2197ea1f076cabc990f81baccec8f | /man/grepf.Rd | a6cf2b32b9579d51e9ceedf2b16d2f5e6597d9cc | [
"MIT"
] | permissive | tkonopka/shrt | 46fabfcbfd3819a9016b412f1a7b91f4ba88c28b | eeef8bf50aee0412b5feff427c12ba2eec17332d | refs/heads/master | 2020-05-21T17:48:37.016989 | 2020-02-28T06:26:33 | 2020-02-28T06:26:33 | 60,825,097 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 581 | rd | grepf.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{grepf}
\alias{grepf}
\title{Pattern matching inside text files}
\usage{
grepf(pattern, path = ".", file.pattern = NULL, ...)
}
\arguments{
\item{pattern}{character, pattern to look for}
\item{path}{directory to look in}
\item{file.pattern}{pattern to consider among the files}
\item{...}{other parameters passed on to list.files()}
}
\description{
Reads contents of all files in a directory and matches a pattern line-by-line.
}
\details{
The name is short for: (grep) inside (f)iles
}
|
41a998b06180b1e32d3ce65a5a10cf8713afcff7 | 5c7616c0498df84d91c80ff41a01865c4abb8eaa | /R_Dates.r | 56d15a1768848ef368eec8f476a10fad2f8d945c | [] | no_license | nkuhta/R-Basics | f68e8e131b4caf1d4c4c75e4865859dc61ed7898 | 0d4209193972db6e930fdc21ca831dea03ca7681 | refs/heads/master | 2020-05-21T20:13:11.273532 | 2017-07-08T00:45:46 | 2017-07-08T00:45:46 | 64,035,089 | 0 | 0 | null | 2016-10-14T03:36:15 | 2016-07-23T20:34:03 | R | UTF-8 | R | false | false | 2,586 | r | R_Dates.r | ## Dates in R
## Refer to Coursera R Programming Course from John Hopkins
###############################################
############### Dates in R #################
###############################################
# R caculates days and seconds since 1970-01-01
x <- as.Date("1970-01-01")
# output prints like a character
# > x
# [1] "1970-01-01"
# unclass gives the days since 1970-01-01
# > unclass(x)
# [1] 0
# > unclass(as.Date("1970-01-02"))
# [1] 1
###############################################
############### Times in R #################
###############################################
# POSIXct - stores very large integer
# POSIXlt - stores as a list with lots of useful information
t <- Sys.time()
# > t
# [1] "2016-09-01 11:22:35 PDT"
p <- as.POSIXlt(t)
# > names(unclass(p))
# [1] "sec" "min" "hour" "mday" "mon" "year"
# [7] "wday" "yday" "isdst" "zone" "gmtoff"
# > p$sec
# [1] 29.13939
# already in POSIXct format
t1 <- Sys.time()
# > t1
# [1] "2016-09-01 11:49:26 PDT"
# > unclass(t1)
# [1] 1472755767 # NUMBER of seconds since 1970
# > t1$sec
# Error in t1$sec : $ operator is invalid for atomic vectors
# > print(as.POSIXlt(t1)$sec)
# [1] 26.93587
###############################################
################# strptime #################
###############################################
datestring <- c("January 10, 2012 10:40","December 9, 2011 9:10")
d1 <- strptime(datestring,"%B %d, %Y %H:%M") # "%B %d, %Y %H:%M" = "%Month %day, %Year %Hour:%Minute"
# > d1
# [1] "2012-01-10 10:40:00 PST" "2011-12-09 09:10:00 PST"
# > class(d1)
# [1] "POSIXlt" "POSIXt"
###############################################
############ Time Arithmetic ###############
###############################################
m <- as.Date("2012-01-01")
n <- strptime("9 January 2011 11:34:21","%d %b %Y %H:%M:%S")
# > m-n
# Error in m - n : non-numeric argument to binary operator
# In addition: Warning message:
# Incompatible methods ("-.Date", "-.POSIXt") for "-"
m <- as.POSIXlt(m)
# > m-n
# Time difference of 356.1845 days
# Even leap year, leap seconds, daylight savings, and time zones are tracked.
f <- as.Date("2012-03-01")
u <- as.Date("2012-02-28")
# > f-u
# Time difference of 2 days
g <- as.POSIXct("2012-10-25 01:00:00")
q <- as.POSIXct("2012-10-25 06:00:00",tz="GMT")
# > g-q
# Time difference of 2 hours
|
e0e742b2594a1fdd3488e7d0ad48c5518de96393 | e47dedfe6e24ec0302bc0465c01d0999f439770f | /coursera3/run_analysis.R | 85cb731264a8b4be1861ee253f7f2d01d3a4d2bd | [] | no_license | ryentes/dscoursework | a871b3f793451c5253a92165b28067445b8ece25 | 69b9a7e37e246619c9e04a813da5674e39393eef | refs/heads/master | 2021-01-10T03:54:22.832545 | 2015-12-04T15:41:14 | 2015-12-04T15:41:14 | 44,930,096 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,686 | r | run_analysis.R | library("plyr")
library("dplyr")
library("Hmisc")
# Load Helper function for preparing the datasets
source("getData.R")
# Get Activity labels
y <- read.table("activity_labels.txt", header=FALSE)
activity_labels <- tolower(as.vector(y[,2]))
# Get column labels
x <- read.table("features.txt", header=FALSE)
features <- as.vector(x[,2])
# Read the data sets and prep them for merging
test <- getData("test", features, activity_labels)
train <- getData("train", features, activity_labels)
# 1- Merge the data sets
all <- rbind(test, train)
# 2- Drop all the variables but subject id, activity id,
# and means and std deviations
names <- colnames(all)
isin <- grepl("std()", names, fixed=TRUE) | grepl("mean()", names, fixed=TRUE)
isin[1:2]=TRUE
all <- all[,isin]
# 3 Labels were applied to the factor in the function earlier
# 4 Clean up the variable names
names <- colnames(all)
for (i in 3:length(names)) {
if(grepl("-mean()-", names[i], fixed=TRUE)) {
names[i] <- gsub("-mean()-", names[i], fixed=TRUE, replacement="Mean")
}
if(grepl("-mean()", names[i], fixed=TRUE)) {
names[i] <- gsub("-mean()", names[i], fixed=TRUE, replacement="Mean")
}
if(grepl("-std()-", names[i], fixed=TRUE)) {
names[i] <- gsub("-std()-", names[i], fixed=TRUE, replacement="Std")
}
if(grepl("-std()", names[i], fixed=TRUE)) {
names[i] <- gsub("-std()", names[i], fixed=TRUE, replacement="Std")
}
}
colnames(all) <- names
# 5 Calculate means for each variable for each subject and activity
final <- ddply(all, .(subjectID, activityID), summarise_each, funs(mean), -subjectID, -activityID)
# Write out the tidy dataset
write.table(final, "output.txt", row.name=FALSE) |
251a92ae89af0d425e782675ac025f52fe9b67fb | 2f789a7d8f28fe6be900ad6d18cc7ca3de18fd51 | /seq_analysis/utils/common_aesthetics.R | bc20c823d1811aac106ce2a18a283a8bad4d38a7 | [
"MIT"
] | permissive | bradleycolquitt/deaf_gex | da2c951a30e40237ce1dc8d2809d59b11e193d81 | 6ab0f9bc8317191cb78852bf40212e51e2027ae0 | refs/heads/main | 2023-05-25T01:03:51.119168 | 2023-05-01T20:31:20 | 2023-05-01T20:31:20 | 478,714,099 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,154 | r | common_aesthetics.R | library(ggsci)
position_levels = c("ra", "arco", "hvc", "ncl", "lman", "nido", "x", "stri", "fieldl", "ov", "meso")
position_pretty_levels = c("RA", "Arco", "HVC", "NCL", "LMAN", "Nido", "Area X", "Striatum", "Field L", "Ovoid", "Meso")
position_colors = c('#a6cee3','#1f78b4','#b2df8a','#33a02c','#fb9a99','#e31a1c','#fdbf6f','#ff7f00','#cab2d6','#6a3d9a','#ffff99','#b15928')
names(position_colors) = c("nido", "lman", "stri", "x", "arco", "ra", "ncl", "hvc", "fieldl", "ov", "dummy", "meso")
position_pretty_colors = position_colors
names(position_pretty_colors) = c("Nido", "LMAN", "Striatum","Area X", "Arco", "RA", "NCL", "HVC", "Field L", "Ovoid", "dummy", "Meso")
# LMAN/NIDO and RA/ARCO reversed
position_colors2 = c('#a6cee3','#1f78b4','#b2df8a','#33a02c','#fb9a99','#e31a1c','#fdbf6f','#ff7f00','#cab2d6','#6a3d9a','#ffff99','#b15928')
names(position_colors2) = c("arco", "ra", "stri", "x", "nido", "lman", "ncl", "hvc", "fieldl", "ov", "dummy", "meso")
position_pretty_colors2 = position_colors2
names(position_pretty_colors2) = c("Arco", "RA", "Striatum","Area X", "Nido", "LMAN", "NCL", "HVC", "Field L", "Ovoid", "dummy", "Meso")
position_table = data.frame(position=names(position_colors), position_pretty=names(position_pretty_colors), colors=position_pretty_colors)
position_table$position_pretty = factor(position_table$position_pretty, levels=position_pretty_levels)
position_table$position = factor(position_table$position, levels=position_levels)
deaf_colors = c("firebrick3", "grey50")
names(deaf_colors) = c("deaf", "intact")
duration.of.experiment_colors = c("grey80", "grey50", "grey10")
names(duration.of.experiment_colors) = c("4", "9", "14")
lesion_group_colors = c("#1f78b4", "#33a02c", "#e31a1c", "#6a3d9a")
names(lesion_group_colors) = c("intact-FALSE", "intact-TRUE", "deaf-FALSE", "deaf-TRUE")
lesion_group_colors2 = c("#1f78b4", "#a6cee3", "#e31a1c", "#fb9a99" )
names(lesion_group_colors2) = c("hearing-contra", "hearing-ipsi", "deaf-contra", "deaf-ipsi")
lesion_group_colors2_alt = pal_jama()(7)[c(1,2,3,4)]
names(lesion_group_colors2_alt) = c("hearing-contra", "hearing-ipsi", "deaf-contra", "deaf-ipsi")
|
63ad0974c42d3a6befe7a84d79829a4fa33703ce | 052f14fcd54d3674073a7a0aff2e012b15b0f395 | /immuno_graphical/code/covid_corr_plot_functions.R | 27a3a64c2dd9c0b1f3444a9481f1a90067c91b5c | [] | no_license | brborate/correlates_covpn_bb_archived | e98a4b5317a6fd10949518e15ac7a5ff214ab12b | 79b1a9e5bbd5c4857ab39f381350e8f23cf7da72 | refs/heads/main | 2023-03-12T18:30:24.026841 | 2021-03-01T21:54:26 | 2021-03-01T21:54:26 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 26,080 | r | covid_corr_plot_functions.R | #' Pairplots of assay readouts
#'
#' Produce the pairplots of assay readouts. The correlation is calculated by
#' the resampling-based strata adjusted Spearman rank correlation
#'
#' @param plot_dat: data frame: data for plotting.
#' @param time: string: one of "D1", "D29", "D57", "Delta29overB" or
#' "Delta57overB".
#' @param assays: vector of strings: the assay names for plotting.
#' @param strata: string: the column name in plot_dat that indicates the
#' strata.
#' @param weight: string: the column name in plot_dat that indicates the
#' individual sampling weights.
#' @param plot_title: string: title of the plot.
#' @param column_labels: vector of strings: titles of each column.
#' @param height: scalar: plot height.
#' @param width: scalar: plot width.
#' @param units: string: the unit of plot height and width.
#' @param corr_size: scalar: font size of the correlation labels.
#' @param point_size: scalar: point size in the scatter plots.
#' @param loess_lwd: scalar: loess line width in the scatter plots.
#' @param plot_title_size: scalar: font size of the plot title.
#' @param column_label_size: scalar: font size of the column labels.
#' @param axis_label_size: scalar: font size of the axis labels.
#' @param filename: string: output file name.
#'
#' @return pairplots: a ggplot object of the pairplot
covid_corr_pairplots <- function(plot_dat, ## data for plotting
time,
assays,
strata,
weight,
plot_title,
column_labels,
height = 5.1,
width = 5.05,
units = "in",
corr_size = 5,
point_size = 0.5,
loess_lwd = 1,
plot_title_size = 10,
column_label_size = 6.5,
axis_label_size = 9,
filename) {
dat.tmp <- plot_dat[, paste0(time, assays)]
rr <- range(dat.tmp, na.rm = TRUE)
if (rr[2] - rr[1] < 2) {
rr <- floor(rr[1]):ceiling(rr[2])
}
breaks <- floor(rr[1]):ceiling(rr[2])
if (rr[2] > ceiling(rr[1])) {
breaks <- ceiling(rr[1]):floor(rr[2])
} else {
breaks <- floor(rr[1]):ceiling(rr[2]) ## breaks on the axis
}
if (max(breaks) - min(breaks) >= 6) {
breaks <- breaks[breaks %% 2 == 0]
}
pairplots <- ggpairs(
data = dat.tmp, title = plot_title,
columnLabels = column_labels,
upper = list(
continuous =
wrap(ggally_cor_resample,
stars = FALSE,
size = corr_size,
strata = subdat[, strata],
weight = subdat[, weight]
)
),
lower = list(
continuous =
wrap("points", size = point_size)
)
) +
theme_bw() +
theme(
plot.title = element_text(hjust = 0.5, size = plot_title_size),
strip.text = element_text(size = column_label_size, face = "bold"),
axis.text = element_text(size = axis_label_size),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank()
)
pairplots[1, 1] <- pairplots[1, 1] +
scale_x_continuous(limits = rr, breaks = breaks) + ylim(0, 1.2)
for (j in 2:pairplots$nrow) {
for (k in 1:(j - 1)) {
pairplots[j, k] <- pairplots[j, k] +
stat_smooth(
method = "loess", color = "red", se = FALSE,
lwd = loess_lwd
) +
scale_x_continuous(
limits = rr, breaks = breaks,
labels = label_math(10^.x)
) +
scale_y_continuous(
limits = rr, breaks = breaks,
labels = label_math(10^.x)
)
}
pairplots[j, j] <- pairplots[j, j] +
scale_x_continuous(
limits = rr, breaks = breaks,
labels = label_math(10^.x)
) + ylim(0, 1.2)
}
ggsave(
filename = filename, plot = pairplots, width = width, height = height,
units = units
)
return(pairplots)
}
###############################################################################
#' Weighted RCDF plots, grouped by a categorical variable
#'
#' Produce the weighted RCDF plots
#'
#' @param plot_dat: data frame: data for plotting.
#' @param x: string: column name in the plot_dat for plotting the value.
#' @param facet_by: string: column name in the plot_dat for deciding the
#' panels.
#' @param color: string: the variable names in plot_dat, separated by ":", for
#' separate RCDF curves.
#' @param weight: string: the column name in plot_dat that indicates the
#' individual sampling weights.
#' @param lwd: scalar: RCDF line width.
#' @param xlim: numeric vector of length two: range of the x-axis.
#' @param xbreaks: numeric vector: locations of where to plot axis ticks.
#' @param palette: string vector: palette that decides the colors of the RCDF
#' curves.
#' @param legend: string vector of length levels(plot_by[, by]): legend labels.
#' @param legend_size: string: font size of the legend labels.
#' @param legend_nrow: integer: number of rows to arrange the legend labels.
#' @param panel_titles: string vector: subtitles of each panel.
#' @param panel_title_size: scalar: font size of the panel titles.
#' @param axis_size: scalar: font size of the axis labels.
#' @param axis_titles: string vector: axis titles for the panels.
#' @param axis_title_size: scalar: font size of the axis title.
#' @param arrange_nrow: integer: number of rows to arrange the panels.
#' @param arrange_ncol: integer: number of columns to arrange the panels.
#' @param height: scalar: plot height.
#' @param width: scalar: plot width.
#' @param units: string: the unit of plot height and width.
#' @param filename: string: output file name.
#'
#' @return output_plot: a ggplot object of the RCDF plots
covid_corr_rcdf_facets <- function(plot_dat,
x,
facet_by,
color,
weight,
lwd = 1,
xlim = c(-2, 10),
xbreaks = seq(-2, 10, 2),
palette = c(
"#1749FF", "#D92321", "#0AB7C9",
"#FF6F1B", "#810094", "#378252",
"#FF5EBF", "#3700A5", "#8F8F8F",
"#787873"
),
legend = levels(plot_dat[, color]),
legend_size = 10,
legend_nrow = ceiling(length(legend) / 2),
panel_titles,
panel_title_size = 10,
axis_size = 10,
axis_titles,
axis_title_size = 9,
arrange_nrow =
ceiling(nlevels(plot_dat[, facet_by]) / 2),
arrange_ncol = 2,
height = 6.5,
width = 6.5,
units = "in",
filename) {
rcdf_list <- vector("list", nlevels(plot_dat[, facet_by]))
for (aa in 1:nlevels(plot_dat[, facet_by])) {
rcdf_list[[aa]] <- ggplot(
subset(plot_dat, plot_dat[, facet_by] ==
levels(plot_dat[, facet_by])[aa]),
aes_string(x = x, color = color, weight = weight)
) +
geom_line(aes(y = 1 - ..y..), stat = "ecdf", lwd = lwd) +
theme_pubr(legend = "none") +
ylab("Reverse ECDF") +
xlab(axis_titles[aa]) +
scale_x_continuous(
labels = label_math(10^.x), limits = xlim,
breaks = xbreaks
) +
scale_color_manual(
values = palette,
labels = legend
) +
ggtitle(panel_titles[aa]) +
guides(color = guide_legend(nrow = legend_nrow, byrow = TRUE)) +
theme(
plot.title = element_text(hjust = 0.5, size = panel_title_size),
legend.title = element_blank(),
legend.text = element_text(size = legend_size, face = "bold"),
panel.grid.minor.y = element_line(),
panel.grid.major.y = element_line(),
axis.title = element_text(size = axis_title_size),
axis.text = element_text(size = axis_size)
)
}
output_plot <- ggarrange(
plotlist = rcdf_list, ncol = 2, nrow = 2,
common.legend = TRUE, legend = "bottom",
align = "h"
)
ggsave(
filename = filename, plot = output_plot, width = width,
height = height, units = units
)
return(output_plot)
}
###############################################################################
#' Weighted RCDF plot
#'
#' Produce the weighted RCDF plots of assay readouts
#'
#' @param plot_dat: data frame: data for plotting.
#' @param x: string: column name in the plot_dat for plotting the value.
#' @param color: string: the variable names in plot_dat, separated by ":", for
#' separate RCDF curves.
#' @param weight: string: the column name in plot_dat that indicates the
#' individual sampling weights.
#' @param lwd: scalar: RCDF line width.
#' @param xlim: numeric vector of length two: range of the x-axis.
#' @param xbreaks: numeric vector: locations of where to plot axis ticks.
#' @param palette: string vector: palette that decides the colors of the RCDF
#' curves.
#' @param legend: string vector of length levels(plot_by[, by]): legend labels.
#' @param legend_size: string: font size of the legend labels.
#' @param legend_nrow: integer: number of rows to arrange the legend labels.
#' @param panel_titles: string vector: subtitles of each panel.
#' @param panel_title_size: scalar: font size of the panel titles.
#' @param axis_size: scalar: font size of the axis labels.
#' @param axis_titles: string vector: axis titles for the panels.
#' @param axis_title_size: scalar: font size of the axis title.
#' @param arrange_nrow: integer: number of rows to arrange the panels.
#' @param arrange_ncol: integer: number of columns to arrange the panels.
#' @param height: scalar: plot height.
#' @param width: scalar: plot width.
#' @param units: string: the unit of plot height and width.
#' @param filename: string: output file name.
#'
#' @return output_plot: a ggplot object of the RCDF plots
covid_corr_rcdf <- function(plot_dat,
x,
color,
lty,
weight,
palette = c(
"#1749FF", "#D92321", "#0AB7C9",
"#FF6F1B", "#810094", "#378252",
"#FF5EBF", "#3700A5", "#8F8F8F",
"#787873"
),
xlab,
lwd = 1,
xlim = c(-2, 10),
xbreaks = seq(-2, 10, by = 2),
plot_title_size = 10,
legend_position = "right",
legend_size = 10,
axis_title_size = 9,
axis_size = 10,
height = 5,
width = 8,
units = "in",
filename) {
output_plot <- ggplot(
plot_dat,
aes_string(
x = x, color = color, lty = lty,
weight = weight
)
) +
geom_line(aes(y = 1 - ..y..), stat = "ecdf", lwd = lwd) +
theme_pubr() +
scale_x_continuous(
limits = xlim, labels = label_math(10^.x),
breaks = xbreaks
) +
scale_color_manual(values = palette) +
ylab("Reverse ECDF") +
xlab(xlab) +
theme(
plot.title = element_text(hjust = 0.5, size = plot_title_size),
legend.position = legend_position,
legend.title = element_blank(),
legend.text = element_text(size = legend_size),
panel.grid.minor.y = element_line(),
panel.grid.major.y = element_line(),
axis.title = element_text(size = axis_title_size),
axis.text = element_text(size = axis_size)
)
ggsave(
filename = filename, plot = output_plot, width = width,
height = height, units = units
)
return(output_plot)
}
###############################################################################
#' Scatter plots showing correlation of two variables, plots grouped by a
#' third variable, with correlation computed by resampling-based baseline
#' strata adjusted Spearman correlation
#'
#' @param plot_dat: data frame: data for plotting.
#' @param x: string: column name in plot_dat for the x-axis value.
#' @param y: string: column name in plot_dat for the y-axis value.
#' @param facet_by: string: column name of plot_dat, grouping variable for the
#' panels.
#' @param strata: string: the column name in plot_dat that indicates the
#' sampling stratum.
#' @param weight: string: the column name in plot_dat that indicates the
#' individual sampling weights.
#' @param nboot: integer: number of resamples.
#' @param lwd: scalar: loess line width.
#' @param lim: numeric vector of length two: range of the x- and y-axis.
#' @param breaks: numeric vector: locations of where to plot axis ticks.
#' @param point_size: scalar: point size in the scatter point.
#' @param corr_size: font size of the correlation labels.
#' @param panel_titles: string vector: subtitles of each panel.
#' @param panel_title_size: scalar: font size of the panel titles.
#' @param axis_size: scalar: font size of the axis labels.
#' @param x_axis_titles: string vector: x-axis titles for the panels.
#' @param y_axis_titles: string vector: y-axis titles for the panels.
#' @param axis_title_size: scalar: font size of the axis title.
#' @param arrange_nrow: integer: number of rows to arrange the panels.
#' @param arrange_ncol: integer: number of columns to arrange the panels.
#' @param height: scalar: plot height.
#' @param width: scalar: plot width.
#' @param units: string: the unit of plot height and width.
#' @param filename: string: output file name.
#'
#' @return output_plot: a ggplot object of the scatter plots
covid_corr_scatter_facets <- function(plot_dat,
x,
y,
facet_by,
strata,
weight,
nboot = 200,
lwd = 1,
lim = NULL,
breaks = NULL,
point_size = 0.5,
corr_size = 4.5,
panel_titles,
panel_title_size = 10,
axis_size = 10,
x_axis_titles,
y_axis_titles,
axis_title_size = 10,
arrange_nrow = ceiling(
nlevels(plot_dat[, facet_by]) / 2
),
arrange_ncol = 2,
height = 7,
width = 7,
units = "in",
filename) {
scatterplot_list <- vector("list", length(assays))
## make the plot axis limits adaptive to the data range
if (is.null(lim) | is.null(breaks)) {
lim <- range(plot_dat[, c(x, y)], na.rm = TRUE)
if (lim[2] - lim[1] < 2) {
lim <- floor(lim[1]):ceiling(lim[2])
}
breaks <- floor(lim[1]):ceiling(lim[2])
if (lim[2] > ceiling(lim[1])) {
breaks <- ceiling(lim[1]):floor(lim[2])
} else {
breaks <- floor(lim[1]):ceiling(lim[2]) ## breaks on the axis
}
if (max(breaks) - min(breaks) >= 6) {
breaks <- breaks[breaks %% 2 == 0]
}
}
for (aa in 1:nlevels(plot_dat[, facet_by])) {
## correlation
ss <- plot_dat[plot_dat[, facet_by] ==
levels(plot_dat[, facet_by])[aa], ] %>%
dplyr::filter(complete.cases(.))
marker_corr <- round(spearman_resample(
x = ss[, x], y = ss[, y],
strata = ss[, strata],
weight = ss[, weight],
B = nboot
), 2)
scatterplot_list[[aa]] <- ggplot(
data = plot_dat[plot_dat[, facet_by] ==
levels(plot_dat[, facet_by])[aa], ],
aes_string(x = x, y = y)
) +
geom_point(size = point_size) +
xlab(x_axis_titles[aa]) +
ylab(y_axis_titles[aa]) +
ggtitle(panel_titles[aa]) +
stat_smooth(method = "loess", color = "red", se = FALSE, lwd = lwd) +
scale_x_continuous(
labels = label_math(10^.x), limits = lim,
breaks = breaks
) +
scale_y_continuous(
labels = label_math(10^.x), limits = lim,
breaks = breaks
) +
geom_text(
x = 0.85 * lim[2] + 0.15 * lim[1], y = 0.93 * lim[2] + 0.07 *
lim[1],
label = paste0("Cor: ", marker_corr), size = corr_size
) +
theme_pubr() +
theme(
plot.title = element_text(hjust = 0.5, size = panel_title_size),
panel.border = element_rect(fill = NA),
panel.grid.minor.y = element_line(),
panel.grid.major.y = element_line(),
axis.title = element_text(size = axis_title_size),
axis.text = element_text(size = axis_size),
legend.title = element_blank()
)
}
output_plot <- ggarrange(
plotlist = scatterplot_list, ncol = arrange_ncol,
nrow = arrange_nrow,
legend = "none", common.legend = FALSE,
align = "h"
)
ggsave(
filename = filename, plot = output_plot, width = width,
height = height, units = units
)
return(output_plot)
}
###############################################################################
#' Weighted boxplots, grouped by a categorical variable
#'
#' Produce the box plots
#'
#' @param plot_dat: data frame: data for plotting.
#' @param x: string: column name in the plot_dat for grouping the boxplots.
#' @param y: string: column name in the plot_dat for the value of the boxplots.
#' @param facet_by: string: column name in the plot_dat for deciding the
#' panels.
#' @param plot_LLOQ: logical: whether to plot LLOQ lines.
#' @param LLOQ: numeric vector: values of the LLOQ lines.
#' @param LLOQ_label_size: numeric: font size of the LLOQ labels.
#' @param LLOW_lwd: LLOQ line width.
#' @param color: string: the variable names in plot_dat, separated by ":", for
#' the boxplot colors.
#' @param lwd: scalar: boxplot border line width.
#' @param box_width: scalar: boxplot width.
#' @param errorbar_width: scalar: error bar with.
#' @param jitter_width: scalar: jitter point area width.
#' @param njitter: integer: number of jitter points.
#' @param palette: string vector: palette that decides the colors of the RCDF
#' curves.
#' @param legend: string vector of length levels(plot_by[, by]): legend labels.
#' @param legend_position: position of the legend in the plot.
#' @param legend_size: string: font size of the legend labels.
#' @param legend_nrow: integer: number of rows to arrange the legend labels.
#' @param ylim: numeric vector of length 2: limits of the y-axis.
#' @param ybreaks: positions of y-axis ticks.
#' @param axis_size: scalar: font size of the axis labels.
#' @param axis_titles_y: string vector: y-axis titles for the panels.
#' @param axis_title_size: scalar: font size of the axis title.
#' @param arrange_nrow: integer: number of rows to arrange the panels.
#' @param arrange_ncol: integer: number of columns to arrange the panels.
#' @param panel_titles: string vector: subtitles of each panel.
#' @param panel_title_size: scalar: font size of the panel titles.
#' @param height: scalar: plot height.
#' @param width: scalar: plot width.
#' @param units: string: the unit of plot height and width.
#' @param filename: string: output file name.
#'
#' @return output_plot: a ggplot object of the RCDF plots
covid_corr_boxplot_facets <- function(plot_dat,
x,
y,
facet_by,
color = x,
palette = c(
"#1749FF", "#D92321",
"#0AB7C9", "#FF6F1B",
"#810094", "#378252",
"#FF5EBF", "#3700A5",
"#8F8F8F", "#787873"
),
plot_LLOQ = TRUE,
LLOQ = NULL,
LLOQ_label_size = 3.5,
LLOW_lwd = 1,
lwd = 1,
point_size = 1.4,
box_width = 0.6,
errorbar_width = 0.45,
jitter_width = 0.15,
njitter = 30,
legend = levels(plot_dat[, x]),
legend_position = "bottom",
legend_nrow = ceiling(
nlevels(plot_dat[, x]) / 2
),
legend_size = 10,
axis_size = 10,
axis_title_size = 9,
axis_titles_y,
xlab_use_letters =
(nlevels(plot_dat[, x]) > 2),
ylim = c(-2, 10),
ybreaks = seq(-2, 10, by = 2),
arrange_nrow = ceiling(
nlevels(plot_dat[, facet_by]) / 2
),
arrange_ncol = 2,
panel_titles,
panel_title_size = 10,
height = 6.5,
width = 6.5,
units = "in",
filename) {
# make a subset of data with 30 sample points for the jitter in each subgroup
# defined by Trt:Bserostatus
if (xlab_use_letters) {
legend <- paste0(
LETTERS[1:nlevels(plot_dat[, x])],
": ",
legend
)
xlabels <- LETTERS[1:nlevels(plot_dat[, x])]
} else {
xlabels <- levels(plot_dat[, x])
}
boxplot_jitter_points <- plot_dat[, c(x, y, facet_by)] %>%
dplyr::filter(., complete.cases(.)) %>%
split(., list(.[, facet_by], .[, x])) %>%
lapply(., function(xx) {
if (nrow(xx) <= njitter) {
return(xx)
} else {
return(xx[sample(1:nrow(xx), size = njitter), ])
}
}) %>%
bind_rows()
boxplot_list <- vector("list", nlevels(plot_dat[, facet_by]))
for (aa in 1:nlevels(plot_dat[, facet_by])) {
boxplot_list[[aa]] <- ggplot(
subset(plot_dat, plot_dat[, facet_by] ==
levels(plot_dat[, facet_by])[aa]),
aes_string(x = x, y = y, color = color)
) +
geom_boxplot(width = box_width, lwd = lwd) +
stat_boxplot(geom = "errorbar", width = errorbar_width, lwd = lwd) +
guides(
alpha = "none", fill = "none",
color = guide_legend(nrow = legend_nrow, byrow = TRUE)
) +
geom_jitter(
data = subset(
boxplot_jitter_points,
boxplot_jitter_points[, facet_by] ==
levels(boxplot_jitter_points[, facet_by])[aa]
),
width = jitter_width, size = point_size
) +
scale_x_discrete(labels = xlabels) +
scale_y_continuous(
limits = ylim, labels = label_math(10^.x),
breaks = ybreaks
) +
theme_pubr(legend = "none") +
ylab(axis_titles_y[aa]) +
xlab("") +
scale_fill_manual(values = palette) +
scale_color_manual(values = palette, labels = legend) +
ggtitle(panel_titles[aa]) +
theme(
plot.title = element_text(hjust = 0.5, size = panel_title_size),
panel.border = element_rect(fill = NA),
panel.grid.minor.y = element_line(),
panel.grid.major.y = element_line(),
axis.title = element_text(size = axis_title_size),
axis.text = element_text(size = axis_size),
legend.title = element_blank(),
legend.text = element_text(size = legend_size, face = "bold")
)
if (plot_LLOQ) {
boxplot_list[[aa]] <- boxplot_list[[aa]] +
geom_hline(
yintercept = LLOQ[aa], linetype = 2, color = "black",
lwd = LLOW_lwd
) +
geom_text(
x = 0.65 + 0.025 * nlevels(plot_dat[, x]), vjust = "right",
y = LLOQ[aa] - 0.5, label = "LLOQ", size = LLOQ_label_size,
color = "black", show.legend = FALSE
)
}
}
output_plot <- ggarrange(
plotlist = boxplot_list, ncol = arrange_ncol,
nrow = arrange_nrow, common.legend = TRUE,
legend = "bottom", align = "h"
)
ggsave(
filename = filename, plot = output_plot, width = width,
height = height, units = units
)
return(output_plot)
}
|
a3a03b1e183ac8422ad1711ec45cad32f4387d99 | 56bdfca7f784ba7c0ec9c4f493d8f9ea821b36de | /man/setOMLConfig.Rd | 91c7da037c0708f813299cb15aa9ad7b3daad7bd | [] | no_license | cran/OpenML | d3980a158f8f6e941567b0eed91bbaf3c6c684f9 | 376ad995b891a6be3ce723d24c03bba99b0a27df | refs/heads/master | 2022-11-13T05:55:02.983709 | 2022-10-19T19:27:50 | 2022-10-19T19:27:50 | 73,572,297 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,354 | rd | setOMLConfig.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/setOMLConfig.R
\name{setOMLConfig}
\alias{setOMLConfig}
\title{Settter for configuration settings.}
\usage{
setOMLConfig(
server = NULL,
verbosity = NULL,
apikey = NULL,
cachedir = NULL,
arff.reader = NULL,
confirm.upload = NULL
)
}
\arguments{
\item{server}{[\code{character(1)}]\cr
URL of the XML API endpoint.}
\item{verbosity}{[\code{integer(1)}]\cr
Verbosity level. Possible values are 0 (normal output), 1 (info output),
2 (debug output).}
\item{apikey}{[\code{character(1)}]\cr
Your OpenML API key. Log in to OpenML, move to your profile to get it.}
\item{cachedir}{[\code{character(1)}]\cr
Path to the cache directory.}
\item{arff.reader}{[\code{character(1)}]\cr
Name of the package which should be used to parse arff files. Possible are
\dQuote{RWeka}, which is the default and \dQuote{farff}.}
\item{confirm.upload}{[\code{logical(1)}]\cr
Should the user be asked for confirmation before upload of OML objects?}
}
\value{
Invisibly returns a list of configuration settings.
}
\description{
Set and overwrite configuration settings.
}
\seealso{
Other config:
\code{\link{configuration}},
\code{\link{getOMLConfig}()},
\code{\link{loadOMLConfig}()},
\code{\link{saveOMLConfig}()}
}
\concept{config}
|
132f63f4680174cb464105dbd9a8c408fb9a6ce4 | 76e47464f4313b79f95fecf01067aa3a6b713d8b | /man/landseamask_generic.Rd | 5a6ba51b287a6e222686126179947aa28ceb797f | [
"MIT"
] | permissive | zejiang-unsw/rISIMIP | 460116d1f6e23826bb9de57e8ee731d29f379730 | 9f9af06dd51d936932795c4cf2a99216fbfcea23 | refs/heads/master | 2021-01-04T09:46:43.105805 | 2019-12-20T11:36:35 | 2019-12-20T11:36:35 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 415 | rd | landseamask_generic.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rISIMIP-package.R
\docType{data}
\name{landseamask_generic}
\alias{landseamask_generic}
\title{Land sea mask of ISIMIP data}
\format{\code{RasterLayer}}
\description{
RasterLayer with land sea mask of ISIMIP data
}
\details{
This RasterLayer depicts the global land sea mask used for gridded ISIMIP data at a resolution of 0.5 degree.
}
|
9f56e1ba6b0f41cf30c5e73686b66136ec87b9bd | 88ff3a5e9f9d7f5355d80741d621bbd99f9232a9 | /3.R | 2b5c352d391bd9d2c0419416107600ba0ace6fe5 | [] | no_license | hlc123xyz/2014-_R_Practice | 0218e562aff1d24596c1eada6c7c1860127e3669 | d165fd5f0a82ae350cb01c1625bfec94f5f40035 | refs/heads/master | 2021-01-20T22:59:18.940786 | 2015-01-21T02:20:12 | 2015-01-21T02:20:12 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,668 | r | 3.R | install.packages("dplyr")
library(dplyr)
??intesect
mtcars$model <- rownames(mtcars)
first <- mtcars[1:20, ]
first
second <- mtcars[10:32, ]
second
intersect(first, second)
union(first, second)
setdiff(first, second)
setdiff(second, first)
setequal(mtcars, mtcars[32:1, ])
mtcars
slice(mtcars, 1L)
?slice
??slice
??nse
??arrange
??summarise_
head(mtcars)
summarise(group_by(mtcars, cyl), m = mean(disp), sd = sd(disp))
by_species <- iris %>% group_by(Species)
by_species %>% summarise_each(funs(length))
?chain
??equal_data_frame
methods('all.equal')
all.equal.default
all.equal.language
?mode
requireNamespace
call.
requireNamespace
eval_tbls
?bench_tbls
?seq_along
??compare
?paste0
install.packages("testthat")
library(testthat)
??testthat::expect_true
??invisible
stop
?as.call
cbind_list
??cbind_list__impl
chain
chain_q
parent.frame()
?eval
new.env
?%.%
%>%
?inherits
?mode
x <- 1
x
mode(X)
storage.mode(x)
mode(x)
typeof(x)
?inherits
?invisible
stopifnot
?deparse
?stopifnot
?trunc
?format
library(plyr)
?as.quoted
(X <- as.quoted(c("a", "b", "log(d)")))
X
as.quoted(a ~ b + log(d))
?colwise
head(baseball)
head(baseball, n = 100)
count(baseball[1:100,], vars = "id")
?create_progress_bar
(l_ply(1:100, identity, .progress = "none"))
(l_ply(1:100, identity, .progress = "tk"))
(l_ply(1:100, identity, .progress = "text"))
(l_ply(1:10000, identity, .progress = progress_text(char = ".")))
?ddply
each(min, max)(1, 10,100)
?liply
l_ply(1:100, identity, .progress = "text")
l_ply(1:100, function(x) Sys.sleep(.01), .progress = "time")
round_any(135, 10)
round_any(Sys.time() + 1:10, 5)
??splitter_d
?split
library(reshape2)
install.packages("reshape")
library(reshape)
?sweep
?melt
head(airquality)
(names(airquality) <- tolower(names(airquality)))
(melt(airquality, id=c("month", "day")))
?nulldefault
?mapply
mapply(rep, 1:4, 4:1)
?merge_recurse
?namerows
?by
?reshape
??stats
??guess_value
?deparse
reshape
?attr
a <- as.list(1:4)
length(a)
a
melt(a)
?varname
attr(a, "varname") <- "ID"
a
melt(a)
attr(a,"t") <-"ddd"
melt(a)
attr(a, 't')
?mapply
mapply(rep, times = 1:4, MoreArgs = list(x = 42))
mapply(rep, times = 1:4, x = 4:1)
(mapply(function(x, y) seq_len(x) + y,
c(a = 1, b = 2, c = 3), # names from first
c(A = 10, B = 0, C = -10))
)
(x <- c(a = 1, b = 2, c = 3))
length(x)
length(c(a = 1))
x
seq_len(x)
seq_len(c(a = 1, b = 2, c = 3))
?preserve.na
?melt_check
?data
head(airquality)
airquality[, 'month', drop = FALSE]
?data.frame
library(reshape2)
parse_formula
library(Rwordseg)
library(tm)
library(TSA)
?stats:::acf
?acf
??xaxp
??mapply
??base
|
928a9d8badabe80c7638d8ab510c07a8edf2d1da | 48705854e259262e4860d36f9ec805044641ca0a | /fun/initParamList.r | f9f48456d080df7f1b19645c2e186f4963ac27fb | [] | no_license | fxi/LebaMod | 9042b37a1c8762f21a5b3860c19ec05aae533c0d | c7933bb6b83b6c1f6b5c649ca95583404e4dbf0d | refs/heads/master | 2021-01-01T18:37:27.540506 | 2014-07-29T08:59:33 | 2014-07-29T08:59:33 | 22,042,033 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 6,018 | r | initParamList.r |
initParamList <- function(varSelect=NULL,
groupSelect=NULL,
speciesSelect=NULL,
methodSelect=NULL,
corrCutoff=NULL,
#corrAutoRemove=NULL,
hexProbExtract=NULL,
probHexRadius=NULL,
pseudoAbsType=NULL,
pseudoAbsNum=NULL,
pseudoAbsMult=NULL,
pseudoAbsRuns=1,
pseudoAbsMap=NULL,
avoidDuplicatedRuns=TRUE,
sendEmail=TRUE,
email="-"
){
# initParam : collect and control values, set default.
# do not allow less than 3 predictors
if(length(varSelect)<3)varSelect=NULL
# if any null in environment, return null,else list.
checkEnv<-as.list(environment())
if(any(TRUE %in% lapply(checkEnv,is.null))){
message('initParamList found nulls in args.')
return(NULL)
}else{
message('initParamList ok to set a new job. Content of input list: ')
print(str(checkEnv))
list(
varSelect=as.list(varSelect[order(varSelect)]),
groupSelect=as.list(groupSelect),
speciesSelect=as.list(speciesSelect),
methodSelect=as.list(methodSelect),
corrCutoff=corrCutoff,
#corrAutoRemove=corrAutoRemove,
hexProbExtract=hexProbExtract,
probHexRadius=probHexRadius,
pseudoAbsType=pseudoAbsType,
pseudoAbsNum=pseudoAbsNum,
pseudoAbsMult=pseudoAbsMult,
pseudoAbsRuns=pseudoAbsRuns,
pseudoAbsMap=pseudoAbsMap,
avoidDuplicatedRuns=avoidDuplicatedRuns,
email=ifelse(isTRUE(sendEmail),email,"")
)
}
}
setId <- function(dbOut,table){
# function setId : increment id based on rows count
# value : idMax+1 or 0 if no table found
require(RSQLite)
# check if db is available and contain models table
dbCon <- dbConnect(SQLite(),dbOut)
dbTabOk <- table %in% dbListTables(dbCon)
if(dbTabOk){
dbRowOk<- dbGetQuery(dbCon,paste('SELECT COUNT(*) FROM',table))>0
if(dbRowOk){
sqlCmd <- paste("SELECT max(id) FROM",table)
idMax <- dbGetQuery(dbCon,sqlCmd)+1
}else{
idMax=0
}
}else{
idMax=0
}
idMax<-as.integer(idMax)
dbWriteTable(dbCon,table,data.frame(id=idMax,time=Sys.time()),row.names=F,append=T)
dbDisconnect(dbCon)
#return(list(idJob=as.integer(idJobMax)))
return(idMax)
}
getPaNum<-function(sp,dbInfo,nPa,mPa,paType){
# get number of pseudo absence, based on number of pa or multiplicator of pa
spDt <- data.table(dbInfo$speciesList)
setkey(spDt,sp)
dS <- spDt[sp]$nDistinctSite
if(paType=='mPa'){
nPa <- dS*mPa
}else{
nPa
}
return(as.integer(nPa))
}
getPrNum<-function(sp,dbInfo){
# get number of distinct site by species
spDt <- data.table(dbInfo$speciesList)
setkey(spDt,sp)
nPr<-spDt[sp]$nDistinctSite
return(as.integer(nPr))
}
initJobTable <- function(paramList,dbInfo,computeModels=F){
# convert parameters to data.table where each row represent a model parameters set
# value : a list of job to be evaluated
# to do : check why unlist is used here.
require(data.table)
require(RSQLite)
require(foreach)
require(digest)
lMet <- unlist(paramList$methodSelect)
lSp <- unlist(paramList$speciesSelect)
lSpDb <- unlist(dbInfo$speciesList)
lGroup <- unlist(paramList$groupSelect)
predictors <- paste0(unlist(paramList$varSelect),collapse=',')
nRuns <- paramList$pseudoAbsRuns
nPa <- paramList$pseudoAbsNum
mPa <- paramList$pseudoAbsMult
paType<-paramList$pseudoAbsType
# test if species exists
if(!all(lSp %in% lSpDb)){
stop("Error in set job. Selected species in parameters doesn't exists in data base.")
}
# expand combinaison of species, method and group. Add predictors. Add others parameters.
jobTable <- data.table(expand.grid(s=lSp,m=lMet,g=lGroup,stringsAsFactors=F),p=predictors)
jobTable[,nPa:=getPaNum(s,dbInfo,nPa,mPa,paType),by=s]
jobTable[,nPr:=getPrNum(s,dbInfo),by=s]
jobTable[,idRun:=paste0('R',digest(c(s,m,g,p,nPa,nPr))),by=names(jobTable)]
jobTable[,'corrCutoff':=paramList$corrCutoff,with=F]
jobTable[,probHexRadius:=paramList$probHexRadius]
jobTable[,email:=paramList$email]
#set runs. All parameters multiplied by number of runs. Only runs change.
jobTable <- foreach(rn=1:nRuns,.combine='rbind')%do%{
assign(paste0('j',rn),jobTable[,r:=rn])
}
# check working path and create directories
#stopifnot(getwd()==dbInfo$projectLocal | getwd()==dbInfo$projectRemote)
dir.create(dbInfo$pathList$models,recursive=T,showWarnings=F)
if(paramList$avoidDuplicatedRuns){
message('Duplicated job will be deleted')
}
#browser()
setkey(jobTable,s,m,g)
return(jobTable)
}
writeJobTableDb<-function(jobTable, dbInfo){
# check for duplicate in finished and pending jobs.
require(RSQLite)
require(data.table)
dbOut<-dbInfo$pathList$dbOut
dbCon <- dbConnect(SQLite(),dbOut)
idRunJob <- paste(unique(jobTable$idRun),collapse="','")
if('jobsPending' %in% dbListTables(dbCon)){
sqlCmd <- paste('SELECT idRun FROM jobsPending WHERE idRun in',paste0("('",idRunJob,"')"))
idRuns<-dbGetQuery(dbCon,sqlCmd)$idRun
print(idRuns)
idRunDup<- unique(idRuns)
jobTable <- subset(jobTable,!idRun %in% idRunDup)
}
if('models' %in% dbListTables(dbCon)){
sqlCmd <- paste('SELECT idRun FROM models WHERE idRun in',paste0("('",idRunJob,"')"))
idRunDup<- unique(dbGetQuery(dbCon,sqlCmd)$idRun)
jobTable <- subset(jobTable,!idRun %in% idRunDup)
}
idJob<-setId(dbOut,'idJobs')
jobTable[,'idJob':=idJob,with=F]
#jobTable[,'dbSp':=dbInfo$pathList$dbIn,with=F]
#jobTable[,'dbMd':=dbInfo$pathList$dbOut,with=F]
#jobTable[,'dbMdF':=dbInfo$pathList$models,with=F]
# unique id instead of incremetial ?? lot of space lost ?
#jobTable[,idRow:=1:nrow(jobTable)]
jobTable[,id:=setId(dbOut,'idModels'),by=list(rownames(jobTable))]
#jobTable[,'email':=mail,with=F]
#jobTable[,hexGridTable:=paste0('hexGrid',jobTable$probHexRadius[1])]
if(nrow(jobTable)>0){
dbWriteTable(dbCon,'jobsPending',jobTable,append=T,row.names=F)
}else{
message('After removing duplicates, no job remains.')
}
dbDisconnect(dbCon)
NULL
}
|
641139996ea208e049dc9b7b940585f65d3a096c | 9fde6a8ae668629dc497d9dca947f58ab46cb843 | /dynamic_stats_dependencies/unit2perc.R | d0328e9abb0dceb088feb8c2460d327b3f8f7a7e | [] | no_license | BenjaminVigreux/BECCA | 532613297deabc64d033ef02b0d3a8f0f86e5778 | f067bcd1aa034b93430f269b04e0452032b7dc66 | refs/heads/master | 2021-01-01T19:17:10.155506 | 2017-08-02T19:53:29 | 2017-08-02T19:53:29 | 98,553,644 | 2 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,530 | r | unit2perc.R | unit2perc <- function(x,y){
## Arguments
# x <- Dataframe from which variables contained in 'y' were originally drawn.
# y <- Descriptive Statistics matrix to be converted to proportional values.
## Dependencies
source("namegetter.R")
## Identify rows relating to single factor
n <- namegetter(x)
factornames <- c(n[[1]],n[[2]])
for (f in 1:length(factornames)){
fac_levels <- grep(factornames[f],colnames(y))
if (length(fac_levels) != 0) {
## Average levels
totvec <- rep(0,dim(y)[1])
for (r in 1:dim(y)[1]){
tot <- sum(y[r,fac_levels], na.rm = TRUE)
z <- x[,c(rownames(y)[r],grep(factornames[f],colnames(x),value = TRUE))]
z <- z[!is.na(z[,1]),2:dim(z)[2]]
totvec[r] <- tot/sum(z,na.rm = TRUE) * 100
for (i in 1:length(fac_levels)){
y[r,fac_levels[i]] <- (y[r,fac_levels[i]]/tot) * 100
## Add total column
if ((i == length(fac_levels)) && (r == dim(y)[1])) {
if (fac_levels[i] < dim(y)[2]) {y <- data.frame(y[,1:fac_levels[i]],totvec,y[,(fac_levels[i]+1):dim(y)[2]])} else {
y <- data.frame(y,totvec)
}
nm <- strsplit(names(y)[fac_levels[i]],"_")[[1]]
colnames(y)[fac_levels[i]+1] <- paste0(nm[1]," Total")
}
}
}
}
}
y <- format(round(y,2),nsmall = 2)
y <- apply(y,c(1,2), function(y) paste0(y,"%"))
return(y)
} |
7c25ba6e5e0e4008fd90730fa0f3c85b5398e32e | d4b9eb3ab2c23e4cb10719190516f23eea4afe95 | /R/processFile.R | 82235d40e84f52f4cb7636ea3a6af85f1fa274df | [] | no_license | SHoeks/BandcampR | 7cc16be98e308a947bcfaadf421c7b374a7fa7b2 | 19bcaa89a02941ef32074799bdf5c767d5251039 | refs/heads/master | 2020-12-10T12:45:26.742690 | 2020-02-05T15:06:20 | 2020-02-05T15:06:20 | 233,598,332 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 333 | r | processFile.R | # read html correctly
processFile = function(filepath) {
options(warn=-1)
lines = c()
con = file(filepath, "r")
while ( TRUE ) {
line = readLines(con, n = 1,encoding = "UTF-8")
if ( length(line) == 0 ) {
break
}
lines = c(lines,line)
}
close(con)
options(warn=0)
return(lines)
}
|
74a93d6cf176aa490cc60185e0e51389120c4281 | 29585dff702209dd446c0ab52ceea046c58e384e | /bdots/R/findBase2.r | 8d5855fcc1b37b0d0ffbe89a14e09af91bfbf4eb | [] | no_license | ingted/R-Examples | 825440ce468ce608c4d73e2af4c0a0213b81c0fe | d0917dbaf698cb8bc0789db0c3ab07453016eab9 | refs/heads/master | 2020-04-14T12:29:22.336088 | 2016-07-21T14:01:14 | 2016-07-21T14:01:14 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 183 | r | findBase2.r | find.base2 <- function(time, fixations, conc) {
if(conc) {
min(fixations[time > find.mu(time, fixations, conc)])
} else max(fixations[time > find.mu(time, fixations, conc)])
} |
a5d5118999e130d54896c1e953728bb7d03f9748 | 628f7369f75ddfad3437ac7c0fc253d77813aebf | /plot1.R | 2b049d0b3536e02441f24947eb3eaae7bf6af259 | [] | no_license | dalexander61/ExData_Plotting1 | 43219633e7f9ee96c981adb48bb8f370b53ee6b1 | b382955e7b246a6bb5403c3474d2e5e49dec5cf9 | refs/heads/master | 2021-07-11T12:30:42.223819 | 2017-10-14T18:20:07 | 2017-10-14T18:20:07 | 106,827,008 | 0 | 0 | null | 2017-10-13T13:20:17 | 2017-10-13T13:20:17 | null | UTF-8 | R | false | false | 624 | r | plot1.R | ## assumes you've downloaded and unzipped the file in your working directory.
## read, subset and clean data
pdata <- read.table("household_power_consumption.txt", header=TRUE, sep=";")
pdata$powerDate <- as.Date(pdata$powerDate, format="%d/%m/%Y") ## convert dates
pdata <- subset(pdata, powerDate=="2007-02-01" | powerDate=="2007-02-02") ## subset data
pdata$Time <- as.character(pdata$Time) ## convert times.
## Plot of Graph 1 and write to png file
png(filename="plot1.png")
hist(as.numeric(pdata$Global_active_power)*.002, col="red", main="Global Active Power", xlab="Global Active Power (kilowatts")
dev.off()
|
db4529cda803575a6af41bbdc8906d456649d02c | d64bdea79f597e0b64c918864a252cd05e99abeb | /ui.R | bf34bb71fde195f8566d917dc9b448b10a08aa5a | [] | no_license | kdobrien/shinyproject | 89cff553750a86459b64ce268805abd372906ae5 | ba238db3a70c450e45c44ceb98d439670a2250b3 | refs/heads/master | 2021-01-10T08:53:21.406273 | 2015-10-25T05:07:30 | 2015-10-25T05:07:30 | 44,897,430 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,589 | r | ui.R | #setwd("/Users/kobrien/DataScientist/DataProducts/shinyproject")
library(shiny)
shinyUI(fluidPage(
fluidRow(
tags$style(type="text/css","h2 { color: darkblue; }"),
tags$style(type="text/css",".divspace { margin-top: 80px; }"),
tags$style(type="text/css",".col1 { width: 150px; font-size: 90%;}"),
tags$style(type="text/css",".smalltext { font-size: 90%; }"),
column(2,"",class="col1",
div(class="divspace"),
uiOutput("dataset"),
uiOutput("cbFeatures")
),
column(10,
h2("Linear Model Explorer"),
tags$div(class="smalltext",
HTML("This application makes it easy to choose features to use for a linear regression and provides"),
HTML("information on the effect of the choices. Interactively change which features are used for"),
HTML("the model fit and immediately view the effects on the Fitted Model Information.")
),
tags$div(class="smalltext", style="padding-left: 20px;",
HTML("1) Select a <em style='color:blue'>Data Source</em>"),
HTML("<br/>2) Choose which <em style='color:blue'>Features</em> to include in the model"),
HTML("<br/>3) View <em style='color:blue'>Fitted Model Information</em> to see the effects of the features chosen. "),
HTML("Repeat steps 2 and 3 as desired.")
),
plotOutput("pairplot")
)
),
fluidRow(
column(2,"",class="col1",
uiOutput("rbModelInfo")
),
column(10,
h6(textOutput("currModel")),
uiOutput("modelinfo"),
plotOutput("modelplot")
)
)
)) |
e30d12800d59d09cf30ebcf6a71e580202c95c92 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/extraTrees/examples/prepareForSave.Rd.R | 70d427f72abb326dd5eabd1272e8f4103c5ad5c8 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 424 | r | prepareForSave.Rd.R | library(extraTrees)
### Name: prepareForSave
### Title: Prepares ExtraTrees object for save() function
### Aliases: prepareForSave
### Keywords: save,load,extraTrees
### ** Examples
et <- extraTrees(iris[,1:4], iris$Species)
prepareForSave(et)
## saving to a file
save(et, file="temp.Rdata")
## testing: remove et and load it back from file
rm(list = "et")
load("temp.Rdata")
predict(et, iris[,1:4])
|
81f5809fc04da723fe4b46406bcb89a7825a7eda | 58ed380e48045a368c06701b61aac8fac41419e7 | /man/predict_goalmodel.Rd | ec4bb81a62529e0de6fa863945156f6a3397c3f8 | [
"MIT"
] | permissive | systats/deeplyr | 5c6419316ce23eb1569b0189a18816f81bb91b94 | 3248e73a24527a7717a01e0e5c8e3021d5b8b823 | refs/heads/master | 2021-07-05T04:40:23.502291 | 2020-10-02T14:49:12 | 2020-10-02T14:49:12 | 185,884,771 | 11 | 0 | null | null | null | null | UTF-8 | R | false | true | 249 | rd | predict_goalmodel.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/api_goalmodel.R
\name{predict_goalmodel}
\alias{predict_goalmodel}
\title{predict_goalmodel}
\usage{
predict_goalmodel(self, new_data)
}
\description{
predict_goalmodel
}
|
5c89e39efdbe740f7feb8e3299b37cdffe525576 | 00daf46a1286c20caa103a95b111a815ea539d73 | /explorations/OpaquePointers/opaqueExplore.R | 04dc87e8cc9fd515764716bcb137de1de3ddff00 | [] | no_license | duncantl/Rllvm | 5e24ec5ef50641535895de4464252d6b8430e191 | 27ae840015619c03b2cc6713bde71367edb1486d | refs/heads/master | 2023-01-10T15:12:40.759998 | 2023-01-02T18:05:26 | 2023-01-02T18:05:26 | 3,893,906 | 65 | 14 | null | 2017-03-09T07:59:25 | 2012-04-01T16:57:16 | R | UTF-8 | R | false | false | 358 | r | opaqueExplore.R | library(Rllvm)
ctxt = getGlobalContext(TRUE)
m = parseIR("dnormLoop.ir", context = ctxt)
p = getParameters(m$v_dnorm)[[1]]
u = getAllUsers(p)
# Seg fault if u[[1]][[1]]
#ty = .Call("R_Value_getLoadStoreType", u[[1]][[2]]) # HalfTyID ??
gep = getAllUsers(getAllUsers(u[[1]][[2]])[[2]])[[1]]
ty = .Call("R_GetElementPtrInst_getSourceElementType", gep)
|
ea420b3b2def37eaddfd3db53bfdb003afbba868 | f5a2fad8fc599c24c83ca2d44147a50ab962f18e | /R/dbxml_handle.R | baa7bed07b24d8026dfa6e87d0ac005a3457c66b | [] | no_license | Shicheng-Guo/drugbankR | 8b0ed6eccba3720f72121ec4a280208a261c519e | 000a84db4c9b491e49c3b34dd7f4ae22a0a157c6 | refs/heads/master | 2023-06-26T20:15:14.375107 | 2021-07-30T21:54:10 | 2021-07-30T21:54:10 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,128 | r | dbxml_handle.R | ########################################
## Import of DrugBank Annotation Data ##
########################################
## Function to import DrugBank xml to data.frame and store in SQLite database.
## Note, this functions needs some major speed improvements. Ideally,
## (1) Download
## - download DrugBank xml file (https://www.drugbank.ca/releases/latest)
## - name uncompressed file 'drugbank.xml'
## (2) Function to convert xml into dataframe and store in SQLite database.
#' @export
#' @importFrom XML xmlParse
#' @importFrom XML xmlRoot
#' @importFrom XML xmlSize
#' @importFrom XML xmlToDataFrame
#'
#' @title Convert drugbank database (xml file) into dataframe.
#'
#' @description Download the original drugbank database \url{http://www.drugbank.ca/releases/latest} (xml file) into your current directory and rename as drugbank.xml
#' then run: drugbank_dataframe = dbxml2df(xmlfile="drugbank.xml", version="5.0.10").
#'
#' @param xmlfile Character, file path to xml file of drugbank database.
#' @param version Character, drugbank version of the xml file
#' @return Dataframe of drugbank xml database.
#' @references \url{http://www.drugbank.ca/releases/latest}
#' @author Yuzhu Duan \url{yduan004@ucr.edu}
#' @note This process with take about 20 minutes.
#' @seealso \code{\link{df2SQLite}}
#' @aliases dbxml2df
#' @examples
#' \dontrun{
#' ## download the original drugbank database \url{http://www.drugbank.ca/releases/latest} (xml file)
#'
#' ## into your current directory and rename as drugbank.xml
#'
#' ## convert drugbank dabase (xml file) into dataframe:
#'
#' drugbank_dataframe <- dbxml2df(xmlfile="drugbank.xml", version="5.0.10")
#' }
dbxml2df <- function(xmlfile, version) {
myxml <- xmlParse(file=xmlfile)
rootnode <- xmlRoot(myxml)
rootsize <- xmlSize(rootnode)
mycol <- c("drugbank-id", "name", "description", "cas-number", "unii",
"state", "groups", "general-references", "synthesis-reference",
"indication", "pharmacodynamics", "mechanism-of-action", "toxicity",
"metabolism", "absorption", "half-life", "protein-binding",
"route-of-elimination", "volume-of-distribution", "clearance",
"classification", "salts", "synonyms", "products", "international-brands",
"mixtures", "packagers", "manufacturers", "prices", "categories",
"affected-organisms", "dosages", "atc-codes", "ahfs-codes", "pdb-entries",
"fda-label", "msds", "patents", "food-interactions", "drug-interactions",
"sequences", "experimental-properties", "external-identifiers", "external-links",
"pathways", "reactions", "snp-effects", "snp-adverse-drug-reactions", "targets",
"enzymes", "carriers", "transporters", "average-mass", "monoisotopic-mass",
"calculated-properties")
## (b) Extract corresponding data in loop and inject into preformatted data.frame
message("Extracting data for column names. This may take 20 minutes.")
df <- as.data.frame(matrix(NA, nrow=rootsize, ncol=length(mycol), dimnames=list(1:rootsize, mycol)))
for(i in 1:rootsize) {
tmp <- xmlToDataFrame(rootnode[i], stringsAsFactors = FALSE, collectNames = FALSE)
v <- as.character(tmp[1,]); names(v) <- colnames(tmp)
df[i,] <- v[mycol]
}
message("Successfully convert DrugBank database (xml file) into dataframe.")
return(df)
}
#' @importFrom RSQLite SQLite
#' @importFrom RSQLite dbConnect
#' @importFrom RSQLite dbWriteTable
#' @importFrom RSQLite dbDisconnect
#' @importFrom utils read.csv
#' @importFrom utils unzip
#'
#' @title Store drugbank dataframe into an SQLite database
#' @description Store specific version of drugbank dataframe into an SQLite database
#' under user's present working directory of R session
#' @param dbdf Drugbank dataframe generated by \code{\link{dbxml2df}} function.
#' @param version Character(1), version of the input drugbank dataframe generated
#' by \code{\link{dbxml2df}} function
#' @return SQLite database (drugbank_versionNumber.db) stored under user's
#' present working directory of R session
#' @author Yuzhu Duan \url{yduan004@ucr.edu}
#' @seealso \code{\link{dbxml2df}}
#' @aliases df2SQLite
#' @examples
#' \dontrun{
#'
#' # download the original drugbank database (http://www.drugbank.ca/releases/latest) (xml file)
#' # to your current R working directory, and rename as drugbank.xml.
#' # Read in the xml file and convert to a data.frame in R
#'
#' drugbank_dataframe = dbxml2df(xmlfile="drugbank.xml", version="5.1.3")
#'
#' # store the converted drugbank dataframe into SQLite database under user's
#' present R working direcotry
#'
#' df2SQLite(dbdf=drugbank_dataframe, version="5.1.3") # set version as version of xml file
#' }
#' @export
df2SQLite <- function(dbdf, version){
mydb <- dbConnect(SQLite(), paste0("./drugbank_",version,".db"))
RSQLite::dbWriteTable(mydb, "dbdf", dbdf)
dbDisconnect(mydb)
message("Successfully store drugbank dataframe into SQLite database ",
paste0("`drugbank_",version,".db`"),
" and it is under your present R working direcotry.")
}
|
a9f3dff28ecd8c9c6f67652bb030f7f78ab38bb3 | deadec09c49c903bb7721a36d39ae5552167cbd0 | /R/BTp.R | 14eed6aca9697b38e9d913c85a26904602f6f62c | [
"MIT"
] | permissive | csoneson/ccostr | 916b8d2fc255a8bdb12fa05050ab839a4733f8fa | c71433513ed207c98dd70bd84b37a74a050f9492 | refs/heads/master | 2020-07-18T00:48:08.437632 | 2019-09-03T17:53:46 | 2019-09-03T17:53:46 | 206,137,326 | 0 | 0 | null | 2019-09-03T17:44:35 | 2019-09-03T17:44:34 | null | UTF-8 | R | false | false | 1,118 | r | BTp.R | #' @description Not ready for use... still experimental
#' @details Not ready for use... still experimental
#'
#' @param x A dataframe with columns: id, cost, delta and surv. If Cost history is available it can be specified by: start and stop,
#' @param n number of observations
#'
#' @return An score.
#'
#' @examples
#' BTp(simCostData(100)$censoredCostHistory, n=100)
#'
#'
#' @importFrom Rdpack reprompt
#' @importFrom rlang .data
#' @import dplyr survival knitr tibble
BTp <- function(x) {
# BTp
xf <- x %>%
group_by(.data$id) %>%
summarize(delta = first(.data$delta),
surv = first(.data$surv))
scf <- summary(survfit(Surv(xf$surv, xf$delta == 0) ~ 1), times = c(1:10, xf$surv))
scf <- data.frame(scf$time, scf$surv)
BTp <- NULL
for (i in 1:10) {
sx <- subset(x, x$start == i - 1)
sx$delta <- ifelse(sx$stop == i, 1, sx$delta)
sx$surv <- pmin(sx$stop, sx$surv)
sx <- left_join(sx, scf, by = c("surv" = "scf.time"))
BTp[i] <- sum((sx$cost * sx$delta) / sx$scf.surv)
}
estimate <- sum(BTp)/nrow(xf)
estimate
}
|
c24dd5cddc08f18d473cf1d48ed6f1a2f8625d8f | 92830b47e3806f2cf65e77c49a95458b1df2aaf5 | /scripts/data_for_even.R | 09c55269e5d8c46fe9e9ad250a22841bcb509933 | [] | no_license | Helen-R/get_ga_data | 0919d98c9e9b0bdc91ff54d08098b53e28ea291b | 9146ff32bc6328cd8b9acacf45530263cedeb8f4 | refs/heads/master | 2021-09-07T01:44:45.479403 | 2018-02-15T11:09:35 | 2018-02-15T11:09:35 | 121,612,756 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,133 | r | data_for_even.R | if (!"slackme" %in% ls()) source("../auxiliary/slackme.R")
if (!"mylib" %in% ls()) source("../auxiliary/mylib.R")
mylib(c("googlesheets", "googleAuthR", "RGoogleAnalytics", "RJSONIO", "data.table", "dplyr", "RODBC"))
# # [ref] http://thinktostart.com/using-google-analytics-r/
# 1 token & authentification
token <- gar_auth_service(json_file="cid/cid_s_ga0k.json")
# fget google sheet
gs <- gs_title("商品頁改版_觀察指標")
wsls <- gs_ws_ls(gs)[3:12]
target.ids <- as.integer(sapply(strsplit(wsls, ".", fixed = T), "[", 1))
date.range <- gs_read(gs, ws="14.糖罐子_服飾", range = cell_rows(6), col_names = FALSE)
date.range <- unlist(date.range[-length(date.range)])
date.range <- strsplit(date.range, "-", fixed = T)
date.range <- lapply(date.range, strptime, format="%m/%e")
date.range <- lapply(date.range, as.character)
# 2 view.id lis
# source("get.gaid.R")
load("cid/gaid.RData")
gaid <- gaid[Status=="Open"&Type=="OfficialShop"] %>%
unique(by = "ProfileId")
condi <- quote(ShopId %in% target.ids)
ga.tab <- gaid[eval(condi), .(ShopId, ProfileId, Owner, Type)]
view.ids <- ga.tab[, .(ProfileId)] %>%
unlist()
shop.ids <- ga.tab[, .(ShopId)] %>%
unlist()
names(view.ids) <- paste0("ShopId.", shop.ids)
# ga?k
nks <- ga.tab[,.(Owner)] %>%
unlist() %>%
gsub(pattern = "pd_", replacement = "") %>%
gsub(pattern = "@nine-yi.com", replacement = "")
names(nks) <- paste0("ShopId.", shop.ids)
# # Authorize the Google Analytics account
# # This need not be executed in every session once the token object is created
# # and saved
# x <- fromJSON("cid_ga1k.json")
# token <- Auth(client.id = x$installed$client_id, client.secret = x$installed$client_secret)
#
# # Save the token object for future sessions
# save(token, file="./token_ga1k_file")
# # In future sessions it can be loaded by running load("./token_file")
get.ga.data <- function (shop.id, st.dt, ed.dt) {
idx <- paste0("ShopId.", shop.id)
if (shop.id %in% c(360, 815)) {
nk <- "ga0k"
} else {
nk <- nks[idx]
}
cat(paste(shop.id, nk, sep="_"))
token <- gar_auth_service(json_file=sprintf("cid/cid_s_%s.json", nk))
ValidateToken(token)
# st.dt <- "2017-03-09"
# # ed.dt <- (as.Date(st.dt) + 6) %>% as.character()
# ed.dt <- "2017-03-19"
view.id <- view.ids[idx]
# Build a list of all the Query Parameters
query.list <- Init(start.date = st.dt,
end.date = ed.dt,
metrics = "ga:bounceRate,ga:avgSessionDuration",
filters = "ga:pagePath =~/SalePage/",
# max.results = 10000,
# sort = "-ga:date",
table.id = sprintf("ga:%s", view.id))
# Create the Query Builder object so that the query parameters are validated
ga.query <- QueryBuilder(query.list)
GetReportData(ga.query, token, split_daywise = F)
}
d <- list()
for (i in 1:length(date.range)) {
st.dt <- date.range[[i]][1]
cat(st.dt)
ed.dt <- date.range[[i]][2]
d[i] <- sapply(shop.ids, get.ga.data, st.dt, ed.dt)
}
dd <- data.frame()
for (x in d) {
dd <- rbind(dd, x)
}
|
c6324b0cc1ab1cd0bbca44809a2e7bcfc7282837 | 14826cb84f7e5e39d7f68e09be173ffbdbda78ba | /Season watch/Ma saison en anime.R | dae953f040e1e22765d5a4c7c8c8549cd257594a | [] | no_license | Reinaldodos/Anime | b1503667ee125610f33379da4aa4991ea8d328f7 | 9a342cf57fe3172b8e0ebdb72820545eaf210cde | refs/heads/master | 2023-04-16T23:43:50.441313 | 2023-04-06T09:03:16 | 2023-04-06T09:03:16 | 81,234,924 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,592 | r | Ma saison en anime.R | pacman::p_load(rvest, tidyverse, data.table)
Liste_Anime =
"https://myanimelist.net/anime/season" %>%
read_html()
source("Season watch/FONCTIONS.R")
safe_read = safely(read_html)
# SCRIPT ------------------------------------------------------------------
print("FETCHEZ LA VACHE!")
ANIME =
Liste_Anime %>%
ANIMATION()
input =
ANIME$URL %>% as.character %>%
set_names %>%
map(safe_read)
output = input %>% purrr::transpose() %>% map(compact)
input = output$result
while (length(output$error)) {
output = output$error %>% names %>% set_names() %>% map(safe_read)
input = list(input, output$result)
}
input = input %>% flatten
Sequels = input %>% map(PREQUEL_DESUKA)
Mangas = input %>% map(SOURCING) %>% compact
Scores =
Mangas %>% map(.f = ~ str_c("https://myanimelist.net", .)) %>%
map(safely(SCORE_SOURCE))
Scores = Scores %>% purrr::transpose() %>% .$result
output =
list(URL = ANIME$URL %>% as.character %>% set_names,
output = Scores,
Sequel = Sequels) %>%
purrr::transpose() %>%
map_df(as.data.table) %>%
mutate_all(.funs = trimws) %>%
left_join(x = ANIME, by = "URL") %>%
mutate(output.Score = output.Score %>% as.numeric,
Sequel = Sequel %>% as.logical)
# Sequels -----------------------------------------------------------------
Sequels = output %>% filter(Sequel)
Sequels %>% pull(Title) %>% write.table(file = "Season watch/Season sequels.csv", row.names = F, quote = F)
Sequels = "Season watch/Season sequels.csv" %>% readLines()
output =
output %>%
filter(Title %in% Sequels) %>%
anti_join(x = output)
|
33e52093a8b08473b40cd971f5c45600edcfbf6b | 41e2fe7a1402664daf67f19a7ab9ea539a97fa8e | /Apriori.R | 0274a8380edcfa4ddcda302dae3bba7d252688ac | [] | no_license | limetestlife/R | 4f544422266d3f2f9bca55adab707fb0abfc5ff1 | 2218d8ad3775b4c81d88e0dd6458b6562fd4722b | refs/heads/master | 2021-01-22T04:28:51.973688 | 2017-06-22T13:42:46 | 2017-06-22T13:42:46 | 92,468,815 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 177 | r | Apriori.R | library(arules)
groceries <- read.transactions("E:/0-R与数据挖掘/Machine-Learning-with-R-datasets-master/groceries.csv",sep=",")
summary(groceries)
inspect(groceries[1:5])
|
e3826eb32361ec781c2ce2c519db2485e6a72966 | 6a74a4e677fa5bdd2a1536b5b383fc8aef4465df | /R/createDiffSummary.r | 0861cd0488101c11a361605d8875c137727a2c46 | [
"Apache-2.0"
] | permissive | OHDSI/Tantalus | c7187a2a8d5572de84f8a61ca04326d083c0f094 | aad9dc93779be31cd85746df5ff67b32ece3a2cd | refs/heads/master | 2020-03-18T19:49:32.597757 | 2018-12-04T19:59:24 | 2018-12-04T19:59:24 | 135,179,897 | 3 | 3 | null | 2018-06-24T17:40:10 | 2018-05-28T15:37:33 | R | UTF-8 | R | false | false | 5,711 | r | createDiffSummary.r | # Copyright 2018 Observational Health Data Sciences and Informatics
#
# This file is part of Tantalus
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#' @title
#' Generate a (numeric) summary of differences between two specified vocabularies.
#'
#' @description
#' This function finds high level differences between two specified vocabularies (essentially sql COUNT comparison queries).
#' The results of the queries are written to a JSON file and converted to html via inst/reports/GenerateDiffReport.Rmd.
#' The summary report, diffSummary.html will be created in \code{JSONFileLoc} unless otherwise specified.
#'
#' @details In an effort to assess the vocabulary proper (rather than the entire CDM), only the following
#' tables are considered:
#' 1. CONCEPT
#' 2. CONCEPT_SYNONYM
#' 3. CONCEPT_ANCESTOR
#' 4. CONCEPT_RELATIONSHIP
#' 5. CONCEPT_CLASS
#' 6. DOMAIN
#' 7. RELATIONSHIP
#'
#' @param connectionDetails An R object of type\cr\code{connectionDetails} created using
#' the function \code{createConnectionDetails} in the
#' \code{DatabaseConnector} package.
#' @param oldVocabularyDatabaseSchema The name of the database schema that contains the old
#' vocabulary instance. Requires read permissions to this
#' database. On SQL Server, this should specify both the database
#' and the schema, so for example 'cdm_vocab.dbo'.
#' @param newVocabularyDatabaseSchema The name of the database schema that contains the new
#' vocabulary instance. Requires read permissions to this
#' database. On SQL Server, this should specify both the database
#' and the schema, so for example 'cdm_vocab.dbo'.
#' @param JSONFileLoc Location of the JSON file created by the function.
#' @param reportFileLoc Location of the html report, defaults to JSONFileLoc.
#' @param oracleTempSchema For Oracle only: the name of the database schema where you want
#' all temporary tables to be managed. Requires create/insert
#' permissions to this database.
#'
#' @export
createDiffSummary <- function(connectionDetails,
oldVocabularyDatabaseSchema,
newVocabularyDatabaseSchema,
JSONFileLoc,
reportFileLoc = JSONFileLoc,
oracleTempSchema = NULL) {
# List to hold the query results
ResultSets <- list()
# The "Count" queries are used for the numeric summaries
pathToSql <- system.file("sql/sql_server", package = "Tantalus")
sqlFiles <- list.files(pathToSql, pattern = "Count.*.sql")
# This query makes the summary report more descriptive
sqlFiles[length(sqlFiles)+1] <- "GetVocabVersion.sql"
invisible(capture.output({
conn <- DatabaseConnector::connect(connectionDetails)
}))
# Execute queries:
for (k in 1:length(sqlFiles)) {
sql <- SqlRender::loadRenderTranslateSql(sqlFilename = sqlFiles[k],
packageName = "Tantalus",
dbms = connectionDetails$dbms,
oracleTempSchema = oracleTempSchema,
old_vocabulary_database_schema = oldVocabularyDatabaseSchema,
new_vocabulary_database_schema = newVocabularyDatabaseSchema)
queryName <- substr(sqlFiles[k], 1, nchar(sqlFiles[k]) - 4)
print(paste0("Processing query: ", queryName))
queryResults <- DatabaseConnector::querySql(conn, sql)
# Use the name of the query to build list attributes (and identify the the results of the query):
# ResultSets$CountSummaryDiff ResultSets$CountConceptDomainChanges etc...
ResultSets[[queryName]] <- queryResults
}
DatabaseConnector::disconnect(conn)
# Create json file using the list of query result sets
ResultSetJSON <- jsonlite::toJSON(ResultSets, pretty = TRUE)
# Write out json file to be used later by markdown Use the vocab names in the name of the json file
# The idea of using json as an extra step here is to have the results in a format that can be easily passed around.
JSONFileName <- paste0(JSONFileLoc,
"diffSummary-",
ResultSets$GetVocabVersion$CURRENT_VOCAB,
"-",
ResultSets$GetVocabVersion$PRIOR_VOCAB,
".json")
ResultSets$JSONFile <- JSONFileName
write(ResultSetJSON, ResultSets$JSONFile)
rmarkdown::render(
input = "inst/reports/GenerateDiffReport.Rmd",
output_dir = reportFileLoc,
output_file = "diffSummary.html",
params = list(JSONFile=ResultSets$JSONFile)
)
}
|
d782a5bcdf926aaf0f14753f132fc20ee67f77e5 | eb8a0799d3db66039912288b7fa7de0a9c81ad1d | /R/RcppExports.R | 03c06573200a3fe325a7cff8acfc76263d578edf | [] | no_license | cran/ibs | 124fd1b2163dd721ea68277b292a61544333d6cc | b65eac79c8e1d93320eeb6de6d568b6df524c985 | refs/heads/master | 2021-05-04T11:49:42.506210 | 2018-11-09T14:10:03 | 2018-11-09T14:10:03 | 18,805,162 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,150 | r | RcppExports.R | bsbases <-
function(x,knots,ord){
ord <- as.integer(ord)
if(length(knots)<=ord)stop("length of knots <= ord!\n")
if(length(as.double(x))==0){
return(matrix(nrow=0,ncol=length(knots)-ord))
}
tmp <- .Call("_ibs_bsbasesCpp",as.double(x),as.double(knots),as.integer(ord),PACKAGE="ibs")
matrix(tmp,nrow=length(x),byrow=TRUE)
}
bspline <-
function(x,knots,ord=4,coef=rep(1,length(knots)-ord)){
ord <- as.integer(ord)
if(length(coef)!=length(knots)-ord)stop("length(knots)-ord!=length(coef)!")
if(length(as.double(x))==0)return(numeric(0))
.Call("_ibs_bsplineCpp",as.double(x),as.integer(ord),as.double(sort(knots)),as.double(coef),PACKAGE="ibs")
}
ibs <-
function(x,knots,ord=4,coef=rep(1,length(knots)-ord)){
if(length(coef)!=length(knots)-ord)stop("length(knots)-ord!=length(coef)!")
if(length(as.double(x))==0)return(numeric(0));
knots <- sort(knots);
if(any(x<knots[1] | x>knots[length(knots)-ord+1]))
stop("Some x value(s) are out of the range from the smallest to the ord-th largest knots!\n")
.Call("_ibs_ibsCpp",as.double(x),as.integer(ord),
as.double(knots),
as.double(coef),PACKAGE="ibs")
}
|
3c6862bd4e5cb073c1b36c459a39b695fa433dc0 | 31d2d467030565c44f4d28d42c0e4d225dececaa | /R/vcov.rasch.R | dfd963dc0e218dd12dd971aff8e72670860d8108 | [] | no_license | cran/ltm | 84fd858915db9fe1506a40628f61e6500a21ed1c | dbbabfa99fa09ad94113856a6a5ae1535e7b817f | refs/heads/master | 2022-02-25T01:10:01.747125 | 2022-02-18T08:40:02 | 2022-02-18T08:40:02 | 17,697,218 | 2 | 4 | null | null | null | null | UTF-8 | R | false | false | 1,680 | r | vcov.rasch.R | vcov.rasch <-
function (object, robust = FALSE, ...) {
if (!inherits(object, "rasch"))
stop("Use only with 'rasch' objects.\n")
inv.hes <- if (robust) {
score.vec <- function (betas, X, constraint, GH) {
p <- nrow(betas)
pr <- probs(GH$Z %*% t(betas))
p.xz <- exp(X %*% t(log(pr)) + (1 - X) %*% t(log(1 - pr)))
p.x <- c(p.xz %*% GH$GHw)
p.zx <- p.xz / p.x
Nt <- GH$GHw * colSums(p.zx)
scores <- matrix(0, p, 2)
for (i in 1:p) {
rit <- GH$GHw * colSums(p.zx * X[, i])
scores[i, ] <- c(crossprod(rit - pr[, i] * Nt, GH$Z))
}
if (!is.null(constraint))
c(scores[, 1], sum(scores[, 2]))[-constraint[, 1]]
else
c(scores[, 1], sum(scores[, 2]))
}
X <- object$X
if (any(is.na(X)))
stop("currently the robust estimation of standard errors does not allow for missing values")
H <- solve(object$hessian)
n <- nrow(X)
nb <- nrow(H)
S <- lapply(1:n, array, data = 0, dim = c(nb, nb))
for (m in 1:n) {
sc <- score.vec(object$coef, X[m, , drop = FALSE], object$constraint, object$GH)
S[[m]] <- outer(sc, sc)
}
S <- matSums(S)
H %*% S %*% H
} else
solve(object$hessian)
p <- nrow(object$coef)
nams <- c(paste("beta.", 1:p, sep = ""), "beta")
if (!is.null(constraint <- object$constraint))
nams <- nams[-constraint[, 1]]
dimnames(inv.hes) <- list(nams, nams)
inv.hes
}
|
aa76e87f737f8fdcc2d8b0f2ce18e3266c397a83 | ed98cb0cd2f0f2ec8df466e9f24579920a2e8ae4 | /cachematrix.R | d1dddb69faa18b83d659480f799499ddb654e9ac | [] | no_license | aishwarya1802/ProgrammingAssignment2-1 | ffd76292112e6c8edde57aa6abf246bc4906c9bc | 4f250df7381e869a15748ff920655342a48e8f15 | refs/heads/master | 2021-01-18T07:32:56.572690 | 2016-03-15T06:12:39 | 2016-03-15T06:12:39 | 53,918,398 | 0 | 0 | null | 2016-03-15T05:49:16 | 2016-03-15T05:49:15 | null | UTF-8 | R | false | false | 1,073 | r | cachematrix.R | ## makeCacheMatrix function is defined initially.
## This function is defined to take the value of a matrix and compute it's Inverse using 'Solve'.
## setmatrix is used to set the Inverse
## getmatrix is used to get the Inverse. It can be used by the user to check the computed Inverse of the matrix.
makeCacheMatrix <- function(b = matrix()) {
m<-NULL
set<-function(y){
b<<-y
m<<-NULL
}
get<-function() b
setmatrix<-function(solve) m<<- solve
getmatrix<-function() m
list(set=set, get=get,
setmatrix=setmatrix,
getmatrix=getmatrix)
}
## cacheSolve is a function which calculated the Inverse of a matrix provided the inverse is not calculated before.
## For the matrix whose inverse is calculated, it will get the inverse from CacheMatrix and would skip the computation.
## Else, it would calculate the Inverse using the 'solve'.
cacheSolve <- function(b=matrix(), ...) {
m<-b$getmatrix()
if(!is.null(m)){
message("getting cached data")
return(m)
}
matrix <- b$get()
m <- solve(matrix, ...)
x$setmatrix(m)
m
}
##END
|
e718c0d001a39832586a84f95fd6bab90324e6e2 | 9c5a7859c5d73cbadf6582ca7262e05d91faf145 | /Labs/Lab 4/Lab 4.R | 1f8fc7da1ab37167f5593fed398ee5a43a9cb799 | [] | no_license | wesleywchang/STAT-147 | 077a961e65151e330dcd7c31789305a8f2c67a0a | 30379aac9fc1bed16ade940127fe1c9866e44b9d | refs/heads/master | 2022-11-30T05:43:39.789606 | 2020-08-15T06:33:30 | 2020-08-15T06:33:30 | 287,691,034 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,839 | r | Lab 4.R | # Statistics 147 Lab #4 Summer 2020
# Wesley Chang
# read file plant.dat in to variable plant_data
plant_data = read.table(file = "C:/Users/wesle/iCloudDrive/Summer 2020 (UCR)/STAT 147 (Session A)/Labs/Lab 4/plant.dat",header = TRUE)
plant_data
# use attach() function to make each column individually accessible
# use the names() function to obtain the column names
attach(plant_data)
names(plant_data)
PlA
PlB
PlC
PlD
# R Question 2
# sample mean
mean_PlA = mean(PlA)
mean_PlA
# sample median
median_PlA = median(PlA)
median_PlA
# sample variance
variance_PlA = var(PlA)
variance_PlA
# sample standard deviation
sd_PlA = sd(PlA)
sd_PlA
# R Question 3
# generate default descriptive statistics for Plant B (PlB)
summary_PlB = summary(PlB)
summary_PlB
# generate mean, median, variance, std dev for Plant C (PlC)
summary_PlC = summary(PlC)
variance_PlC = var(PlC)
sd_PlC = sd(PlC)
summary_PlC
variance_PlC
sd_PlC
# R Question 4
# Generate 98% Ci for Plant A
# Use t.test
# Format: t.test(name_of_variable,alternative = appropriate option,
# conf.level = confidence-level-in-decimal-format)
t.test(PlA,alternative="two.sided",conf.level=0.98)
# find and interpret a 96% confidence interval for the true mean discharge for Plant B
t.test(PlB,alternative="two.sided",conf.level=0.96)
# test mu(PlA) < 1.50
# use t.test
# Format: t.test(name_of_variable,alternative = appropriate option,
# conf.level = confidence-level-in-decimal-format)
t.test(PlA,alternative="less",mu=1.5,conf.level=0.95)
# using R to complete the caculations, test the hypothesis that the true mean discharge
# effluent (uB) for Plant B is significantly different from 1.75 pounds/gallon
# test mu(PlB) =/ 1.74
# two-sided test
t.test(PlB,alternative="two.sided",conf.level=0.95)
|
0be258c74198e1c5787755bf623272050d4fb72b | 994915cd470f20cc042d272182cd72433711a142 | /Scripts/Analysis_IN_maturation.R | 2ef61463a30e0532c00f83b769dd4c81b7ab7dc7 | [
"MIT"
] | permissive | OliverEichmueller/TSC_Science2021 | d3802561211c9aa578cc703fcd20dd7672cd6b14 | 02ac11d15c74a6cea8af2b8fe57b9668149bcdc8 | refs/heads/main | 2023-04-13T14:24:37.291447 | 2021-11-30T08:07:18 | 2021-11-30T08:07:18 | 412,109,853 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 16,121 | r | Analysis_IN_maturation.R | ## Analysis of interneuron maturation
##------ Tue Sep 28 18:43:43 2021 ------##
## Oliver Eichmueller
library(zoo)
library(dplyr)
library(monocle3)
library(ggplot2)
library(ggpubr)
library(pheatmap)
library(grid)
# wrapper for heatmap plotting ----------
add.flag <- function(pheatmap,
kept.labels,
repel.degree) {
# repel.degree = number within [0, 1], which controls how much
# space to allocate for repelling labels.
## repel.degree = 0: spread out labels over existing range of kept labels
## repel.degree = 1: spread out labels over the full y-axis
heatmap <- pheatmap$gtable
new.label <- heatmap$grobs[[which(heatmap$layout$name == "row_names")]]
# keep only labels in kept.labels, replace the rest with ""
new.label$label <- ifelse(new.label$label %in% kept.labels,
new.label$label, "")
# calculate evenly spaced out y-axis positions
repelled.y <- function(d, d.select, k = repel.degree){
# d = vector of distances for labels
# d.select = vector of T/F for which labels are significant
# recursive function to get current label positions
# (note the unit is "npc" for all components of each distance)
strip.npc <- function(dd){
if(!"unit.arithmetic" %in% class(dd)) {
return(as.numeric(dd))
}
d1 <- strip.npc(dd$arg1)
d2 <- strip.npc(dd$arg2)
fn <- dd$fname
return(lazyeval::lazy_eval(paste(d1, fn, d2)))
}
full.range <- sapply(seq_along(d), function(i) strip.npc(d[i]))
selected.range <- sapply(seq_along(d[d.select]), function(i) strip.npc(d[d.select][i]))
return(unit(seq(from = max(selected.range) + k*(max(full.range) - max(selected.range)),
to = min(selected.range) - k*(min(selected.range) - min(full.range)),
length.out = sum(d.select)),
"npc"))
}
new.y.positions <- repelled.y(new.label$y,
d.select = new.label$label != "")
new.flag <- segmentsGrob(x0 = new.label$x,
x1 = new.label$x + unit(0.15, "npc"),
y0 = new.label$y[new.label$label != ""],
y1 = new.y.positions)
# shift position for selected labels
new.label$x <- new.label$x + unit(0.2, "npc")
new.label$y[new.label$label != ""] <- new.y.positions
# add flag to heatmap
heatmap <- gtable::gtable_add_grob(x = heatmap,
grobs = new.flag,
t = 4,
l = 4
)
# replace label positions in heatmap
heatmap$grobs[[which(heatmap$layout$name == "row_names")]] <- new.label
# plot result
grid.newpage()
grid.draw(heatmap)
# return a copy of the heatmap invisibly
invisible(heatmap)
}
# set Output Directory ---------------------------------------------------------
OutDir <- 'path_to/OutDir'
# plot statistics for all organoid integration ---------------------------------
stat_all_orgs <- readRDS(file = "path_to/statistics_AllOrgs.Rds")
# filter for only d110 organoids
stat_all_orgs_filter <- stat_all_orgs %>%
filter(media_genotype != "Old.Tumor", clusters_low %in% c(6,8,9,16)) %>%
mutate(clusters_low = factor(clusters_low),
media = factor(as.vector(media)))
# create media annotation
media_label <- c("High Nutrient d110", "Low Nutrient d110")
names(media_label) <- c("HN", "LN")
pl1 <- ggplot(stat_all_orgs_filter
, aes(x = clusters_low, y = relative, fill = media_genotype)) +
ggnewscale::new_scale_fill() +
geom_col( aes(x = clusters_low, y = relative, fill = media_genotype),
position = "dodge", width = 0.8) +
geom_text(data = stat_all_orgs_filter[
stat_all_orgs_filter$media_genotype %in% c("HN.TSC2", "LN.TSC2"),]
, aes(x = clusters_low, y = relative +2, label = round(relative)),
color = "black", nudge_x = .2) +
geom_text(data = stat_all_orgs_filter[
stat_all_orgs_filter$media_genotype %in% c("HN.Ctrl", "LN.Ctrl"),]
, aes(x = clusters_low, y = relative +2, label = round(relative)),
color = "black", nudge_x = -.2) +
scale_fill_manual("Dataset",
values = c(rgb(102, 153, 102, maxColorValue = 255),
rgb(51, 102, 153, maxColorValue = 255),
rgb(102, 153, 102, maxColorValue = 255),
rgb(51, 102, 153, maxColorValue = 255),
rgb(51, 102, 153, maxColorValue = 255))) +
theme_light() +
theme(strip.text.y = element_text(face = "bold", size = 10, color = "black")
, strip.background.y = element_rect(fill =
c(alpha("lightgrey", alpha = .5),
alpha("blue", alpha = .5),
alpha("green", alpha = .5)))) +
xlab("Clusters") + ylab("Percentage of Dataset")+
scale_y_continuous(limits = c(0,15))+
facet_grid(rows = vars(media), labeller = labeller(media = media_label)) +
ggtitle("Percentage of Interneuron Clusters of whole dataset")
ggsave(paste0(OutDir, '/Perc_IN_AllClustering.pdf'),
device = "pdf", plot = pl1)
# load dataset of all organoids with pseudotime --------------------------------
cds.integration <- readRDS(file = 'path_to/tsc_paper_integration_all_pseudotime.Rds')
# choose immature to mature IN
clip_cds_IN <- monocle3::choose_graph_segments(cds.integration)
clip_cds_IN <- clip_cds[,clip_cds_IN@colData$barcode]
plot_cells(clip_cds_IN, color_cells_by = "media_genotype", show_trajectory_graph = F
, label_groups_by_cluster = F, labels_per_group = 0)
# downsample barcodes based on smallest d110 dataset
barcodes_downsampled <- cds.integration@colData %>% as.data.frame() %>%
filter(media_genotype != "Old.Tumor") %>%
dplyr::group_by(media_genotype) %>%
dplyr::sample_n(4816) %>%
magrittr::use_series(barcode)
# subset IN lineage dataset for downsampled barcodes
barcodes_downsampled_found <- intersect(barcodes_downsampled,
colnames(clip_cds_IN))
clip_cds_IN_downsampled <- clip_cds_IN[,barcodes_downsampled_found]
clip_cds_IN_downsampled@colData$orig.ident <-
as.vector(clip_cds_IN_downsampled@colData$orig.ident)
# re-calculate UMAP
clip_cds_IN_downsampled <- reduce_dimension(clip_cds_IN_downsampled,
max_components = 2)
# re-cluster IN lineage dataset
clip_cds_IN_downsampled <- cluster_cells(clip_cds_IN_downsampled, k = 8)
clip_cds_IN_downsampled@colData$clusters_new <- clip_cds_IN_downsampled@clusters$UMAP$clusters
# plot UMAPs
plot_cells(clip_cds_IN_downsampled, color_cells_by = "cluster", cell_size = 2,
alpha = .5, show_trajectory_graph = FALSE, label_cell_groups = FALSE)
ggsave(paste0(OutDir, '/UMAP_IN_SubClustering.png'),
device = "png")
plot_cells(clip_cds_IN_downsampled, color_cells_by = "media_genotype", cell_size = 2,
alpha = .5, show_trajectory_graph = FALSE, label_cell_groups = FALSE)+
scale_color_manual(values = pals::brewer.set1(4))
ggsave(paste0(OutDir, 'UMAP_IN__media_genotype_SubClustering.png'),
device = "png")
# learn graph
clip_cds_IN_downsampled <-
learn_graph(clip_cds_IN_downsampled, use_partition = F,
learn_graph_control = list(minimal_branch_len = 12,
rann.k = NULL,
orthogonal_proj_tip = FALSE,
geodesic_distance_ratio = 1/3,
euclidean_distance_ratio = 1))
# order cells in pseudotime
clip_cds_IN_downsampled <- order_cells(clip_cds_IN_downsampled)
# UMAP of pseudotime
plot_cells(clip_cds_IN_downsampled, color_cells_by = "pseudotime",
cell_size = 2, show_trajectory_graph = T)
ggsave(paste0(OutDir, 'UMAP_IN_Pseudotime_SubClustering.png'),
device = "png")
# subset only tumor to tuber IN
clip_cds_IN_downsampled_subset <- choose_cells(clip_cds_IN_downsampled)
# graph test on tumor to tuber IN
graph_test_newIN_subset <- graph_test(clip_cds_IN_downsampled_subset)
write.csv(graph_test_newIN_subset, file = paste0(OutDir, 'graph_test_pseudotime_IN.csv'))
# filter gois
goi <- graph_test_newIN_subset %>%
filter(status %in% "OK", q_value <1e-20|q_value==0) %>%
magrittr::use_series(gene_short_name)
# Order cells from LN to HN cluster
clip_cds_IN_downsampled_subset_neworder <-
order_cells(clip_cds_IN_downsampled_subset)
# UMAPs of re-ordered subset
plot_cells(clip_cds_IN_downsampled_subset_neworder,
color_cells_by = "pseudotime",
show_trajectory_graph = F, cell_size = 2)
ggsave(paste0(OutDir, 'UMAP_INsubset_pseudotime_SubClustering.png'),
device = "png")
plot_cells(clip_cds_IN_downsampled_subset_neworder, color_cells_by = "cluster",
show_trajectory_graph = F, cell_size = 2)
ggsave(paste0(OutDir, 'UMAP_INsubset_cluster_SubClustering.png'),
device = "png")
# bin along pseudotime
pseudotime_bin <- pseudotime(clip_cds_IN_downsampled_subset_neworder) %>%
data.frame(pseudotime_bin = .) %>%
mutate(barcode = row.names(.), pseudotime_bin = floor(pseudotime_bin)) %>%
select(barcode, pseudotime_bin)
# add pseudotime bin to coldata
clip_cds_IN_downsampled_subset_neworder@colData <-
clip_cds_IN_downsampled_subset_neworder@colData %>% as.data.frame() %>%
left_join(pseudotime_bin, by = "barcode") %>%
DataFrame(row.names = .$barcode)
# convert pseudotime bin to factor
clip_cds_IN_downsampled_subset_neworder@colData <-
clip_cds_IN_downsampled_subset_neworder@colData %>% as.data.frame() %>%
select(barcode, Size_Factor, orig.ident, clusters_low, media_genotype,
clusters_new, pseudotime_bin) %>%
mutate(pseudotime_bin = factor(pseudotime_bin)) %>%
DataFrame(row.names = .$barcode)
# perform sliding average and plot from tumor to tuber IN
# set window and step
window <- 2
step <- 1
# aggregate gois of graph test per pseudotime bin
dat_IN_subset <- aggregate_gene_expression(clip_cds_IN_downsampled_subset_neworder[goi,]
, cell_group_df = pseudotime_bin
, scale_agg_values = T
, max_agg_value = 1)
dat_IN_subset_backup <- dat_IN_subset
dat_IN_subset <- as.matrix(dat_IN_subset_backup)
# order using sliding average
dat_IN_subset_ordered <-
dat_IN_subset[order(apply(t(rollapply(t(dat_IN_subset), width=window, by=step, FUN=mean)), 1, which.max)), ]
# generate annotation for groups
groupings <-
read.delim('path_to/Overrepresentation.clusterProfiler3.18.1.graph_test.splitByConsecutiveMaxc.gene2set.tsv')
groupings$gene_id <- stringr::str_replace(groupings$gene_id, pattern = "-", "\\.")
groupings_sub <- groupings[1:360,]
row.names(groupings_sub) <- groupings_sub$gene_id
groupings_sub <- groupings_sub %>% select(-gene_id)
groupings_color <- groupings_sub
groupings_color <- list(set = c(`1` = pals::brewer.set2(3)[1],
`2` = pals::brewer.set2(3)[2],
`3` = pals::brewer.set2(3)[3]))
# select genes for annotation
goi_plot <- c("GRIA1", "GRIA2", "DPP10", "GABRA2", "GRIP1", "ARX", "STMN1", "SNAP25", "GRIN2B",
"GABARAPL2", "RPL22", "RPS8", "RPS23", "LAPTM4B")
# plot heatmap
ph1 <- pheatmap(dat_IN_subset_ordered
, cluster_rows = F, cluster_cols = F
, color = rev(pals::brewer.rdbu(100))
, annotation_row = groupings_sub
, annotation_colors = groupings_color
, scale = "row", show_rownames = T, fontsize_row = 15, angle_col = 0, fontsize_col = 20)
# annotated selected genes
pl1 <- add.flag(ph1, goi_plot, repel.degree = .1)
# save plot
ggsave(paste0(OutDir, 'Heatmap_pseudotime_ordered.pdf'),
device = "pdf", plot = pl1)
ggsave(paste0(OutDir, 'Heatmap_pseudotime_ordered.png'),
device = "png", plot = pl1)
# calculate statistics and perform plotting
clip_cds_IN_downsampled_stat <- clip_cds_IN_downsampled@colData %>%
as.data.frame() %>%
dplyr::group_by(media_genotype) %>%
dplyr:: count(clusters_new) %>%
mutate(relative = round(n/4816*100, 2),
media = stringr::str_extract(media_genotype, "LN|HN"))
pl1 <- ggplot(clip_cds_IN_downsampled_stat
, aes(x = clusters_new, y = relative, fill = media_genotype)) +
ggnewscale::new_scale_fill() +
geom_col( aes(x = clusters_new, y = relative, fill = media_genotype),
position = "dodge", width = 0.8) +
geom_text(data = clip_cds_IN_downsampled_stat[
clip_cds_IN_downsampled_stat$media_genotype %in% c("HN.TSC2", "LN.TSC2"),]
, aes(x = clusters_new, y = relative +2, label = relative),
color = "black", nudge_x = .2) +
geom_text(data = clip_cds_IN_downsampled_stat[
clip_cds_IN_downsampled_stat$media_genotype %in% c("HN.Ctrl", "LN.Ctrl"),]
, aes(x = clusters_new, y = relative +2, label = relative),
color = "black", nudge_x = -.2) +
scale_fill_manual("Dataset",
values = c(rgb(102, 153, 102, maxColorValue = 255),
rgb(51, 102, 153, maxColorValue = 255),
rgb(102, 153, 102, maxColorValue = 255),
rgb(51, 102, 153, maxColorValue = 255),
rgb(51, 102, 153, maxColorValue = 255))) +
theme_light() +
theme(strip.text.y = element_text(face = "bold", size = 10, color = "black")
, strip.background.y =
element_rect(fill = c(alpha("lightgrey", alpha = .5),
alpha("blue", alpha = .5),
alpha("green", alpha = .5)))) +
xlab("Clusters") + ylab("Percentage of Dataset")+
scale_y_continuous(limits = c(0,15))+
facet_grid(rows = vars(media), labeller = labeller(media = media_label)) +
ggtitle("Percentage of Interneuron Subclusters of whole dataset")
ggsave(paste0(OutDir, 'Perc_IN_SubClustering.pdf'),
device = "pdf", plot = pl1)
write.csv(clip_cds_IN_downsampled_stat,
paste0(OutDir, 'clip_cds_IN_downsampled_stat.csv'))
# plot UMAPs for individual genes
pl1 <- plot_cells(clip_cds_IN_downsampled, genes = "LAPTM4B",
show_trajectory_graph = F, cell_size = 2, norm_method = "size_only") +
facet_wrap(~media_genotype)
ggsave(paste0(OutDir, 'UMAP_LAPTM4B_IN_downsampled.png'),
device = "png", plot = pl1)
pl1 <- plot_cells(clip_cds_IN_downsampled, genes = "GABARAPL2",
show_trajectory_graph = F, cell_size = 2, norm_method = "size_only") +
facet_wrap(~media_genotype)
ggsave(paste0(OutDir, 'UMAP_GABARAPL2_IN_downsampled.png'),
device = "png", plot = pl1)
pl1 <- plot_cells(clip_cds_IN_downsampled, genes = "RPL22",
show_trajectory_graph = F, cell_size = 2, norm_method = "size_only") +
facet_wrap(~media_genotype)
ggsave(paste0(OutDir, 'UMAP_RPL22_IN_downsampled.png'),
device = "png", plot = pl1)
pl1 <- plot_cells(clip_cds_IN_downsampled, genes = "RPS10",
show_trajectory_graph = F, cell_size = 2, norm_method = "size_only") +
facet_wrap(~media_genotype)
ggsave(paste0(OutDir, 'UMAP_RPS10_IN_downsampled.png'),
device = "png", plot = pl1)
# save files and objects
saveRDS(clip_cds_IN, file = paste0(OutDir, "/CLIP_cds_IN.Rds"))
saveRDS(clip_cds_IN_downsampled,
file = paste0(OutDir, "CLIP_cds_in_downsampled.Rds"))
saveRDS(clip_cds_IN_downsampled_subset,
file = paste0(OutDir, "CLIP_cds_in_downsampled_subset.Rds"))
saveRDS(clip_cds_IN_downsampled_subset_neworder,
file = paste0(OutDir, "CLIP_cds_in_downsampled_subset_neworder.Rds"))
write.csv(dat_IN_subset_ordered %>% as.data.frame()%>%
mutate(gene = row.names(.)),
paste0(OutDir, "dat_IN_subset_ordered.csv"))
|
80edf7944a46d0eac0237198af265d860330c5f1 | c81e596c811e31acae9fed5e48971d331b202afd | /man/LaplaceConvolution.Rd | 854129b9afe1bf58730cd17b2c774118623dd932 | [] | no_license | cran/LaplaceDeconv | bd5fd1cc53a606de6b4df3974f6e96e89dae88f6 | 8bc0c564c2d8ed4eddfadb79d8c18a156bb7d048 | refs/heads/master | 2021-01-10T13:14:51.931387 | 2016-01-27T20:39:49 | 2016-01-27T20:39:49 | 48,082,452 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,129 | rd | LaplaceConvolution.Rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/LagLaplaceDeconvolution.R
\name{LaplaceConvolution}
\alias{LaplaceConvolution}
\title{function LaplaceConvolution}
\usage{
LaplaceConvolution(t, g, f)
}
\arguments{
\item{t,}{numeric vector, the observation times}
\item{g,}{numeric vector, the observed values of the known Laplace convolution kernel at the observation times}
\item{f,}{numeric vector, the coefficients the values of the function f to convole with g}
}
\value{
return the Laplace convolution of f and g using Trapezoidal formula and spline approximation for F
}
\description{
computes the Laplace convolution of two functions f and g observed at discrete times t. Use trapezoidal formula and spline approximation of f.
}
\examples{
\dontrun{
library(LaplaceDeconv)
t = seq(0,10,l=100)
g = exp(-5*t)
f = t^2*exp(-t)
# compute the Laplace convolution from functions computed at times t : f and g
fg = LaplaceConvolution(t,g,f)
matplot(t,cbind(f,g,fg),lty=1,type='l')
legend('topright',lty=1,legend=c('f','g','fxg'),col=1:3)
}
}
\author{
Y. Rozenholc and M. Pensky
}
|
897685f46768c09e303d4ede6ae62bda64c73f11 | 2f74b6fa3057fcb98ad562247ea055ea63446146 | /man/g.rank.Rd | 7b5cb4c37edf000e077fb5b5d1fb97e36c9efd05 | [] | no_license | strayMat/warpDE | 977e0f0b2d99d3ef1e7bdef9e2cad1a3ff6d8275 | 92e50beba7c54581173925aeff14ab02233980b5 | refs/heads/master | 2021-01-01T16:38:04.340919 | 2017-12-07T13:41:45 | 2017-12-07T13:41:45 | 97,879,353 | 2 | 1 | null | null | null | null | UTF-8 | R | false | true | 319 | rd | g.rank.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AllGenerics.R
\name{g.rank}
\alias{g.rank}
\title{Returns the rank and distance of some genes}
\usage{
g.rank(x, g.list)
}
\arguments{
\item{x}{a \code{rankingDE} object.}
}
\value{
the ranks and criteria values for the genes of interest.
}
|
f2eeb99cc282610476ddd3d0bae8c4ef817be114 | d481adea8b5f993c766a7842c2cb9babcef3deef | /UI/03_compare.R | e82f83daea0e3c1da0fcf3034707514fc92cc644 | [] | no_license | AurelieFrechet/world_happiness | 991e1be543a2789267fc9a35685a957ab5e960ff | 7ae1bd22325b2c21815ff30eac08b351b79be1cf | refs/heads/master | 2021-06-24T04:27:50.677840 | 2021-01-19T16:08:01 | 2021-01-19T16:08:01 | 192,379,296 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 785 | r | 03_compare.R | body_content[[length(body_content) + 1]] <-
tabItem(
"compare",
pickerInput(
inputId = "compare_select",
label = "",
choices = countries_list,
multiple = TRUE,
width = "100%",
options = list(
title = "Select multiple countries",
`live-search` = TRUE)
),
br(),
column(width = 6,
h2("Composition of score order by indicator"),
plotlyOutput("compare_stakedbar"),
sliderTextInput(
inputId = "compare_years",
label = "Pick a year:",
choices = years,
width = "100%"
)),
column(width = 6,
htmlOutput("compare_lines_title"),
plotlyOutput("compare_lines"),
switchbuttons_indicator(
inputId = "compare_indicators"))
) |
b17a212b751dc3246f3d7909f576846d1e308333 | fca7d4c6ca3ff0ce8a6f5ed717d642c163b1e0ec | /Scripts/R Scripts/1.R | fb9ee0976d3a2963284b79a79e7ec06b6e99eb38 | [] | no_license | gustavo95/vulnerability-detection-tool | 41776763203798ae5bd0bcf64e5670ffd5580330 | e285afa69e7ac841314e9874f6cea925e5da1fd1 | refs/heads/master | 2021-01-20T20:15:32.460512 | 2016-08-11T03:21:56 | 2016-08-11T03:21:56 | 65,434,617 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,356 | r | 1.R | library(DBI)
library(lattice)
library(Hmisc)
library(dplyr)
library(RMySQL)
library(plotrix)
library(reshape2)
library(graphics)
con <- dbConnect(MySQL(), user = 'root', password = 'admin', host = 'localhost', dbname='changehistory')
#vetorPaths <- c("dom","javascript","javascript_extras","javascript_xpconnect","layout_rendering","libraries","kernel","network","webpage_structure","widget")
#for(i in vetorPaths){
querryTable="kernelClassify"
querryBefore="SELECT func,cveID, module, vulnerability, SUM(NCEC),SUM(NCMC),SUM(NFCEC),SUM(NFCMC),SUM(NMEC),SUM(NMMC),SUM(NVEC),SUM(NVMC),dateT FROM "
querryAfter=" GROUP BY func,file_path,CVEID,module ORDER BY dateT;"
kernelclassify <- dbGetQuery(con,paste(querryBefore,querryTable,querryAfter,sep=""))
kernel_vulnerabilities <- subset(kernelclassify,vulnerability == 1)
kernel_without_vulnerabilities <- subset(kernelclassify,vulnerability == 0)
ts(kernel_vulnerabilities[5], frequency = 12, start = c(1990, 2)) # 2nd Quarter of 1959
print( ts(1:10, frequency = 7, start = c(12, 2)), calendar = TRUE)
z <- ts(matrix(kernel_vulnerabilities[5]), 200, 8), start = c(2005, 2), frequency = 12)
plot(z, yax.flip = TRUE)
ts(kernel_vulnerabilities[5], frequency = 12, start = c(1990, 2)) # 2nd Quarter of 1959
plot(ts(kernel_vulnerabilities[5], frequency = 7, start = c(1990, 1)))
dbDisconnect(con)
|
ebbe1b9241b38cd02126c2d47d2a1c08f3a7fabb | 6a6ec6c149757b7addb61c82df2a175bb4448e00 | /plot4.R | 041cd11b4e393697ff78844a0adeab60eb937319 | [] | no_license | tawabd/Exploratory_data_analysis-coursera--Project1 | 1da773fa8d9c0931cd2c1e36702e6d7391be6450 | d8be4c8cfaee0d2db2c021adfbc17f4938ac746b | refs/heads/master | 2021-05-04T08:53:44.088070 | 2016-10-09T04:53:56 | 2016-10-09T04:53:56 | 70,377,250 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,121 | r | plot4.R | > proj<-read.table("C:\\Coursera_exploratory\\household_power_consumption.txt", header=T, sep=";", stringsAsFactors=FALSE, dec=".")
> powerdata <- proj[proj$Date %in% c("1/2/2007","2/2/2007"),]
> time <- strptime(paste(powerdata$Date, powerdata$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
> subMetering1 <- as.numeric(powerdata$Sub_metering_1)
> subMetering2 <- as.numeric(powerdata$Sub_metering_2)
> subMetering3 <- as.numeric(powerdata$Sub_metering_3)
> globalReactivePower <- as.numeric(powerdata$Global_reactive_power)
> png("plot4.png", width=480, height=480)
> par(mfrow = c(2, 2))
plot(time, globalActivePower, type="l", xlab="", ylab="Global Active Power", cex=0.2)
> png("plot4.png", width=480, height=480)
> plot(time, subMetering1, type="l", ylab="Energy Submetering", xlab="")
> lines(time, subMetering2, type="l", col="red")
> lines(time, subMetering3, type="l", col="blue")
> legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=1, lwd=2.5, col=c("black", "red", "blue"))
> plot(time, globalReactivePower, type="l", xlab="time", ylab="Global_reactive_power")
> dev.off() |
7bc89e136de9436c9f85a65112986f7d39f78d49 | 5c21757fb60ca9fa2232f87cc05ade4e34de6466 | /man/rand.Rd | d08f60b4a4e16faa4b449537e4b7e855f31698a2 | [] | no_license | cran/DBGSA | d2f0c59b50ce4568d98c8acb6aeff7542ed8d5b0 | 5ca177761739df5c3910876059d1b4a7f8fb1183 | refs/heads/master | 2016-09-06T20:06:04.407631 | 2011-12-29T00:00:00 | 2011-12-29T00:00:00 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 581 | rd | rand.Rd | \name{rand}
\alias{rand}
\docType{data}
\title{distances of gene expression obtained by gene resampling}
\description{
An array of data specifying for distances of the gene set expression profile by gene resampling, each row presents a expression profile of a specific gene label, each column represents a sample.
}
\usage{data(rand)}
\format{
These are both distances of gene expression profiles, each row represents a gene label, each column represents a sample
}
\source{
They are derived unsing \code{randdis}
}
\examples{
data(rand)
}
\keyword{datasets}
|
c10aaf6fd68ade22fa6cff08dd71660434fd3596 | e219e19cd3bef5cd39551fe8c03f8f5e371b029c | /plot2.R | 8215ad385ea2acd0133a488bffd50ef5d2978ed7 | [] | no_license | manni-truong/ExData_Plotting1 | 39d80ab19209a22a4a0fe149f7bd7c9805efb5bb | 82ee73078577ada52601fc83639a8a888b66ddb1 | refs/heads/master | 2021-01-15T11:23:55.564758 | 2015-10-10T15:13:54 | 2015-10-10T15:13:54 | 44,004,297 | 0 | 0 | null | 2015-10-10T09:31:17 | 2015-10-10T09:31:16 | null | UTF-8 | R | false | false | 1,169 | r | plot2.R |
# Manni Truong, 2015
# Exploratory Data
library(data.table)
library(dplyr)
# set current working dir to where script lives
current_dir <- dirname(parent.frame(2)$ofile)
setwd(current_dir)
# get data
if (!file.exists("household_power_consumption.txt")) {
tmp <- tempfile()
fileUrl <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(fileUrl, tmp, mode = "wb")
unzip(tmp, "household_power_consumption.txt")
}
# load, process and filter dataset
dt <- fread("household_power_consumption.txt", header = TRUE, stringsAsFactors = TRUE, sep = ";", na.strings = c("?", ""))
dt$Date <- as.Date(dt$Date, format = "%d/%m/%Y")
subset_dt <- filter(dt, Date >= "2007-02-01" & Date <= "2007-02-02")
subset_dt$time_tmp <- paste(subset_dt$Date, subset_dt$Time)
subset_dt <- as.data.frame(subset_dt)
subset_dt$Time <- strptime(subset_dt$time_tmp, format = "%Y-%m-%d %H:%M:%S")
# plotting
png("plot2.png", width = 480, height = 480)
plot(subset_dt$Time, subset_dt$Global_active_power, type = "n", xlab = "", ylab = "Global Active Power (kilowatts)")
lines(subset_dt$Time, subset_dt$Global_active_power)
dev.off()
|
22df246d34ff0f2cf01c35235fbfc186d75c846d | 70774dcbaa6219464131aaf9f0d5b3628c9dcaf8 | /assignment.r | 3dffa7ae908a5ba9f1c92c62bfbaa813e80130b8 | [] | no_license | BabisK/M36102P-Assignment2-8 | 20c2c7cbb5988ededca3b350ffd546efaa212eb2 | 9f2df3966a4f12b159d63b6366fc3383867125bc | refs/heads/master | 2021-01-13T00:56:40.961331 | 2016-02-07T15:51:32 | 2016-02-07T15:51:32 | 51,107,882 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,937 | r | assignment.r | library(moments)
my_column <- 5 + 1
W <- read.csv(file = "w.csv")[[my_column]]
X <- read.csv(file = "x.csv")[[my_column]]
Y <- read.csv(file = "y.csv")[[my_column]]
Z <- read.csv(file = "z.csv")[[my_column]]
par(mfrow = c(2,2))
plot(sort(W), main = "W", ylab = "W")
plot(sort(X), main = "X", ylab = "X")
plot(sort(Y), main = "Y", ylab = "Y")
plot(sort(Z), main = "Z", ylab = "Z")
par(mfrow = c(1,3))
plot(sort(Y)[0:999], main = "Y without highest sample", ylab = "Y")
plot(sort(Y)[0:900], main = "Y without highest 100 samples", ylab = "Y ")
plot(sort(Y)[0:500], main = "Y without highest 500 samples", ylab = "Y")
par(mfrow = c(2,2))
hist(W, col=terrain.colors(15))
hist(X, col=terrain.colors(15), breaks=c(0:8), right = FALSE, include.lowest = FALSE)
hist(Y, col=terrain.colors(15))
hist(Z, col=terrain.colors(15))
par(mfrow = c(2,2))
boxplot(W, col=terrain.colors(15), ylab = "W")
boxplot(X, col=terrain.colors(15), ylab = "X")
boxplot(Y, col=terrain.colors(15), ylab = "Y")
boxplot(Z, col=terrain.colors(15), ylab = "Z")
class(W)
head(W)
summary(W)
var(W)
sd(W)
skewness(W)
kurtosis(W)
class(X)
head(X)
summary(X)
var(X)
sd(X)
skewness(X)
kurtosis(X)
class(Y)
head(Y)
summary(Y)
var(Y)
sd(Y)
skewness(Y)
kurtosis(Y)
class(Z)
head(Z)
summary(Z)
var(Z)
sd(Z)
skewness(Z)
kurtosis(Z)
test.binom <- function(x, size, prob){
t <- table(x)
q <- seq(min(x), max(x))
for (s in size) {
for (p in prob) {
d <- dbinom(q, s, p)
if(length(d) > length(t)) {
t <- c(t, rep(0, times = length(d)-length(t)))
}
if(sum(d) < 1) {
c <- chisq.test(c(t,0), p = c(d, 1-sum(d)))
}
else {
c <- chisq.test(t, p = d)
}
if (is.na(c["p.value"]) == FALSE & c["p.value"] > 0.05) {
break;
}
}
}
c(c, s, p)
}
test.nbinom <- function(x, size, prob){
t <- table(x)
q <- seq(min(x), max(x))
for (s in size) {
for (p in prob) {
d <- dnbinom(q, s, p)
if(length(d) > length(t)) {
t <- c(t, rep(0, times = length(d)-length(t)))
}
if(sum(d) < 1) {
c <- chisq.test(c(t,0), p = c(d, 1-sum(d)))
}
else {
c <- chisq.test(t, p = d)
}
if (is.na(c["p.value"]) == FALSE & c["p.value"] > 0.05) {
break;
}
}
}
c(c, s, p)
}
test.geom <- function(x, prob){
t <- table(x)
q <- seq(min(x), max(x))
for (p in prob) {
d <- dgeom(q, p)
if(length(d) > length(t)) {
t <- c(t, rep(0, times = length(d)-length(t)))
}
if(sum(d) < 1) {
c <- chisq.test(c(t,0), p = c(d, 1-sum(d)))
}
else {
c <- chisq.test(t, p = d)
}
if (is.na(c["p.value"]) == FALSE & c["p.value"] > 0.05) {
break;
}
}
c(c, p)
}
test.pois <- function(x, lamda){
t <- table(x)
q <- seq(min(x), max(x))
for (l in lamda) {
d <- dpois(q, l)
if(length(d) > length(t)) {
t <- c(t, rep(0, times = length(d)-length(t)))
}
if(sum(d) < 1) {
c <- chisq.test(c(t,0), p = c(d, 1-sum(d)))
}
else {
c <- chisq.test(t, p = d)
}
if (is.na(c["p.value"]) == FALSE & c["p.value"] > 0.05) {
break;
}
}
c(c, l)
}
test.pois(W, 5.7)
quantiles <- seq(min(X),max(X))
distribution <-dnbinom(quant,size = 1,prob = 0.7)
chisq.test(x = c(table(X),0), p = c(distribution, 1-sum(distribution)))
logY <- log(Y)
class(logY)
head(logY)
summary(logY)
var(logY)
sd(logY)
skewness(logY)
kurtosis(logY)
par(mfrow = c(1,3))
plot(sort(logY), main = "log(Y)", ylab = "log(Y)")
hist(logY, col=terrain.colors(15), freq = F)
curve(dnorm(x, mean=mean(logY), sd=sd(logY)), add=TRUE, lwd=2)
boxplot(logY, col=terrain.colors(15), ylab = "log(Y)")
shapiro.test(logY)
ks.test(logY, "pnorm", mean(logY), sd(logY))
par(mfrow = c(1,1))
hist(Z, col=terrain.colors(15), freq = F)
curve(dnorm(x, mean=mean(Z), sd=sd(Z)), add=TRUE, lwd=2)
ks.test(Z, "pnorm", mean(Z), sd(Z))
shapiro.test(Z) |
f1c52d624f9c1e68d7d794337db0f068362abc18 | 5ddab239d1f5727351e43f10fd37024259038179 | /overview_function.R | 0f25faab67bce6b8bfbf955a431bda0ed2279bf8 | [] | no_license | supersambo/r_functions | 9c79c6d8d193d40c716a77226c8405e3461989c1 | 82db13e85f6de84150c5f6076c3645c899d5bf56 | refs/heads/master | 2021-01-10T21:52:02.783602 | 2015-07-02T07:56:39 | 2015-07-02T07:56:39 | 14,520,392 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,330 | r | overview_function.R | overview <- function(input){ #defining domain-function
library(plyr)
input <- as.data.frame(gsub("^https*://","",input,perl=TRUE),stringsAsFactors=FALSE) #delete https:// or http:// an convert to data frame
names(input) <- "url"
input$overview <- TRUE
#initialize progress bar
print("Trying to identify overview pages")
pb <- txtProgressBar(min=0,max=nrow(input),style=3)
pbi=0
for (i in as.numeric(row.names(input))){
pbi <- pbi+1
setTxtProgressBar(pb,pbi) #progress bar
splitted <- strsplit(input$url[i],split="/")
#forward if its just the domain
if(length(splitted[[1]])<2){
next
}
#check if the lastterm matches certain conditions
lastterm <- paste(splitted[[1]][2:length(splitted[[1]])],collapse="/")
lastterm <- paste("/",lastterm,sep="")
check <- vector()
check <- c(check,grepl("[a-zA-Z]+-[a-zA-Z]+-[a-zA-Z]+",lastterm)) #words seperated by - indicitate articletitles
check <- c(check,grepl("[a-zA-Z]+_[a-zA-Z]+_[a-zA-Z]+",lastterm)) #words seperated by _ indicitate articletitles
check <- c(check,grepl("[0-9]+-[a-zA-Z]{3,}-[a-zA-Z]{3,}",lastterm))
check <- c(check,grepl("[0-9]{5,}",lastterm)) #more than 4 numbers indicate article ids
check <- c(check,grepl("p=[0-9]+",lastterm)) #used in weblogs indicates post ids
check <- c(check,grepl("id=[0-9]{2,}",tolower(lastterm))) #article ids
check <- c(check,grepl("detail=[0-9]{2,}",tolower(lastterm)))
check <- c(check,grepl("[0-9]{3,}\\.html$",lastterm))
check <- c(check,grepl("[0-9]{3,}\\.htm$",lastterm))
check <- c(check,grepl("[0-9]{3,}\\.php$",lastterm))
check <- c(check,grepl("[0-9]{3,}\\.php4$",lastterm))
check <- c(check,grepl("[0-9]{3,}\\.aspx$",lastterm))
check <- c(check,grepl("\\.pdf$",lastterm))
check <- c(check,grepl("/[0-9]{4}/[0-9]{2}/",lastterm)) #dates such as 2014/06/
#change overview if at least one condition matched
input$overview[i] <- !TRUE %in% check
}
return(input$overview)
}
#input <- c("derstandard.at/bruederle_baut_mist","stern.de/energiewende/","diezeit.de/energiewende/987654/","sueddeutsche.de/energiewende/die_energiewende_wird_teurer/","bild.de/")
#overview(input)
#input="http://www.schornsteinfeger-crovisier.de/Energienachrichten/Uebersicht.html"
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.