blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ec6640ef492ea31d32b7a23d95621ca2bb149de2
|
98599accc936a1e3327d011603a5eea4bbc28258
|
/hw2 b03801053.R
|
1d895a50ff562f452ffe86d38c1053cd2c81d24e
|
[] |
no_license
|
janeru/b03801053
|
1a059f87314ce5533e348108eb6cac8806dac239
|
93c19d5989436dea7a1e98a5e54c710185f3adb7
|
refs/heads/master
| 2020-03-21T16:24:40.323327
| 2018-05-10T03:35:33
| 2018-05-10T03:35:33
| 103,477,980
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,884
|
r
|
hw2 b03801053.R
|
Titanic <- read.csv(file.choose())
library(dplyr)
dd <- Titanic
#刪除na資料
dd <- dd[!is.na(dd$age),]
dd <- dd[!is.na(dd$fare),] #剔除資料中fare=9999的擔保人
#資料類型整理
#gender,survival <- 轉換成factor fare<- 比例 age <- 區間資料
#統計敘述
gender <- factor(dd$gender)
table(dd$fare)
dd$fare <- cut(dd$fare, c(0,150,600))
#Q1.活著且class階級最高的人從事商人的工作,是從什麼地方上岸?
temp <- select(dd,survival,gender,joined,class,job) %>%
filter(survival==1,class==1,job=='Businessman',gender==1)
#發現女性沒有人是從事商人
temp1 <- select(dd,survival,gender,joined,class,job,age) %>%
filter(survival==1,class==1,job=='Businessman',gender==0)
group_by(temp,joined)
summarise(temp1,mean(age))
#發現全部都是男性商人的工作,平均存活年紀在41.875,而且多是來自Southampton與Cherbourg的地區
#Q2.想尋找class階層為3的人相較於class1階層的人,存活情況(想了解是否class3階層比較低的人會不會比較沒有逃生的機會)ˊ
temp2 <- select(dd,class,survival) %>%
filter(class==1)
aa<-temp2[-which(temp2$survival==1),]
dr<-length(aa$survival==0)/length(temp2$survival)
temp3 <- select(dd,class,survival) %>%
filter(class==3)
aa1<-temp3[-which(temp3$survival==1),]
dr1<-length(aa1$survival==0)/length(temp3$survival)
#Q3.class1階級中,從事商人工作的人,購買貴的票的比例一定會比較高嗎?買高得票存活比例會比較高嗎?其存活比例是多少?
temp4 <- select(dd,survival,fare,class,job) %>%
filter(class==1,job=='Businessman')
a=sum(temp4$fare=="(0,150]")/length(temp4$fare)
b=sum(temp4$fare=="(150,600]")/length(temp4$fare)
aa2<-temp4[-which(temp4$survival==0),]
dr3<-length(aa2$survival==1)/length(temp4$survival)
|
fcbeb62b8949199e831a014f19a0701fe60c9f30
|
f69bcd76b3308c3847135442719c49688b03fed3
|
/man/compareGCMMfit.Rd
|
8dc760cb82de31ad2786b645d1e430fc3352cb75
|
[] |
no_license
|
cran/activityGCMM
|
8922e39b4542cedcbe0a1d117d7cf8291e76dc82
|
db777426190dd415c6ddd485844189d183395ab6
|
refs/heads/master
| 2023-06-02T21:47:56.691319
| 2021-06-14T18:20:02
| 2021-06-14T18:20:02
| 348,029,285
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,410
|
rd
|
compareGCMMfit.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ModelFitFunctions.R
\name{compareGCMMfit}
\alias{compareGCMMfit}
\title{Compare fit of GCMM models based on circular residuals}
\usage{
compareGCMMfit(model1, model2, sample = 10000)
}
\arguments{
\item{model1}{Object of class \code{GCMM} with output from \code{\link{GCMM}} function to compare with model2; residuals must be saved when running the \code{\link{GCMM}} function using \code{saveResids=TRUE}}
\item{model2}{Object of class \code{GCMM} with output from \code{\link{GCMM}} function to compare with model1; residuals must be saved when running the \code{\link{GCMM}} function using \code{saveResids=TRUE}}
\item{sample}{Number of posterior samples; default=10000}
}
\value{
Returns object of class \code{GCMMestimate} with list of output
}
\description{
Compare fit of GCMM models by comparing posterior distributions of the summed circular residuals
}
\examples{
\donttest{
FoxVMGCMM<-GCMM(data=redfoxsample$Radians, RE1=redfoxsample$CameraTrapID, family="vonmises",
saveResids=TRUE, scale=c("2pi"), autorun=FALSE, adapt=0, sample=1000, burnin=500, thin=1)
FoxWCGCMM<-GCMM(data=redfoxsample$Radians, RE1=redfoxsample$CameraTrapID, family="wrappedcauchy",
saveResids=TRUE, scale=c("2pi"), autorun=FALSE, adapt=0, sample=1000, burnin=500, thin=1)
FoxModelCompare<-compareGCMMfit(FoxVMGCMM, FoxWCGCMM) }
}
|
3fb362252152a0b470b1244f3ab68944cd6b3b5d
|
447b1e30413599ff5306408b9383937f5c1bef36
|
/man/Wald.Rd
|
c731922f18cfdf10e3f72db6a9491f495eb4e394
|
[] |
no_license
|
twolodzko/extraDistr
|
874768df1d0c1af75924be8f2cc872e222c3bb6d
|
6cdbe85a98c3a34d8360b8c0ffe6eb78517e0fc3
|
refs/heads/master
| 2022-11-11T03:35:25.808964
| 2022-11-08T10:41:21
| 2022-11-08T10:41:21
| 55,365,786
| 41
| 10
| null | 2022-06-25T20:24:44
| 2016-04-03T19:51:19
|
C++
|
UTF-8
|
R
| false
| true
| 1,998
|
rd
|
Wald.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/wald-distribution.R
\name{Wald}
\alias{Wald}
\alias{dwald}
\alias{pwald}
\alias{rwald}
\title{Wald (inverse Gaussian) distribution}
\usage{
dwald(x, mu, lambda, log = FALSE)
pwald(q, mu, lambda, lower.tail = TRUE, log.p = FALSE)
rwald(n, mu, lambda)
}
\arguments{
\item{x, q}{vector of quantiles.}
\item{mu, lambda}{location and shape parameters. Scale must be positive.}
\item{log, log.p}{logical; if TRUE, probabilities p are given as log(p).}
\item{lower.tail}{logical; if TRUE (default), probabilities are \eqn{P[X \le x]}
otherwise, \eqn{P[X > x]}.}
\item{n}{number of observations. If \code{length(n) > 1},
the length is taken to be the number required.}
\item{p}{vector of probabilities.}
}
\description{
Density, distribution function and random generation
for the Wald distribution.
}
\details{
Probability density function
\deqn{
f(x) = \sqrt{\frac{\lambda}{2\pi x^3}} \exp\left( \frac{-\lambda(x-\mu)^2}{2\mu^2 x} \right)
}{
f(x) = sqrt(\lambda/(2*\pi*x^3)) * exp((-\lambda*(x-\mu)^2)/(2*\mu^2*x))
}
Cumulative distribution function
\deqn{
F(x) = \Phi\left(\sqrt{\frac{\lambda}{x}} \left(\frac{x}{\mu}-1 \right) \right) +
\exp\left(\frac{2\lambda}{\mu} \right) \Phi\left(\sqrt{\frac{\lambda}{x}}
\left(\frac{x}{\mu}+1 \right) \right)
}{
F(x) = \Phi(sqrt(\lambda/\mu)*(x/\mu-1)) - exp((2*\lambda)/\mu) *
\Phi(sqrt(\lambda/\mu)*(x/\mu+1))
}
Random generation is done using the algorithm described by Michael, Schucany and Haas (1976).
}
\examples{
x <- rwald(1e5, 5, 16)
hist(x, 100, freq = FALSE)
curve(dwald(x, 5, 16), 0, 50, col = "red", add = TRUE)
hist(pwald(x, 5, 16))
plot(ecdf(x))
curve(pwald(x, 5, 16), 0, 50, col = "red", lwd = 2, add = TRUE)
}
\references{
Michael, J.R., Schucany, W.R., and Haas, R.W. (1976).
Generating Random Variates Using Transformations with Multiple Roots.
The American Statistician, 30(2): 88-90.
}
\concept{Continuous}
\concept{Univariate}
\keyword{distribution}
|
cdb161a4cd82a8b67602c2448bc86d69841ed477
|
55a755507faa59696cce65542a62a9a90ce2b472
|
/man/simulateSpectra-class.Rd
|
03f0c026efca61f91cedffe412d5a4daafcfc754
|
[] |
no_license
|
asmitapoddar/BayesSentinel
|
ac9cf60d1276e41ad595217908488d25dc5ae925
|
f823d184d82c1d8eb6323f81eea0d873ebb69085
|
refs/heads/master
| 2020-03-22T17:42:45.140743
| 2019-04-01T08:10:14
| 2019-04-01T08:10:14
| 140,410,874
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,066
|
rd
|
simulateSpectra-class.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/simulateSpectra.R
\docType{class}
\name{simulateSpectra}
\alias{simulateSpectra}
\alias{simulateSpectra-class}
\title{Create a list with a simulated data set of spectra}
\usage{
simulateSpectra(...)
}
\arguments{
\item{...}{any paramaters to be input into the function}
}
\description{
Simulate one or more Gaussian spectra at regularly sampling time
}
\section{Slots}{
\describe{
\item{\code{nbPixel}}{number of pixels belonging to class k}
\item{\code{nbCluster}}{number of cluster}
\item{\code{nbSpectrum}}{number of spectra}
\item{\code{simulationType}}{type of simulation. Available options are "gaussian" and
"tstudent". Default is "gaussian".}
\item{\code{modelname}}{type of model to be used to build covariance matrix.
Available options are "full" and "parsimonious". Default is "full".}
\item{\code{kernelSpectra}}{type of kernel to be used to simulate spectra. Available options
are "diag", "epanechnikov", "gaussian", "exponential", "uniform", "quadratic"
, "circular", "triangular", "rational quadratic", "inverse multiquadratic".
Default is "gaussian".}
\item{\code{kernelTime}}{type of kernel to be used for simulating time. Available options are
"diag", "epanechnikov", "gaussian", "exponential", "uniform", "quadratic",
"circular", "triangular", "rational quadratic", "inverse multiquadratic".
Default is "gaussian".}
\item{\code{sigma}}{a vector of size nbSpectrum giving the variance level of
the spectrum}
\item{\code{nbSampling}}{number of time intervals of the simulation}
\item{\code{times}}{time intervals of the simulation}
\item{\code{width}}{the width of the kernel to use for "gaussian" simulation. Default is 50.}
\item{\code{gamma}}{degrees of freedom used for simulating "tstudent" distribution of data.
Default is 3.}
\item{\code{labels}}{class labels of the data}
\item{\code{result}}{return a list of simulated data}
}}
\examples{
m = new("simulateSpectra")
res = simulate(m)
}
\author{
Serge Iovleff, Asmita Poddar & Florent Latimier
}
|
64687872557638197c9d55eda8a32ef221df4067
|
b2f61fde194bfcb362b2266da124138efd27d867
|
/code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1+A1/Database/Amendola-Ricca-Truszczynski/selection-hard/ctrl.e#1.a#3.E#138.A#48.c#.w#7.s#44.asp/ctrl.e#1.a#3.E#138.A#48.c#.w#7.s#44.asp.R
|
de9c44b91a53c3edf83561f2cc2d25d64a705fe2
|
[] |
no_license
|
arey0pushpa/dcnf-autarky
|
e95fddba85c035e8b229f5fe9ac540b692a4d5c0
|
a6c9a52236af11d7f7e165a4b25b32c538da1c98
|
refs/heads/master
| 2021-06-09T00:56:32.937250
| 2021-02-19T15:15:23
| 2021-02-19T15:15:23
| 136,440,042
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 91
|
r
|
ctrl.e#1.a#3.E#138.A#48.c#.w#7.s#44.asp.R
|
7d5c47952c33a2e0f5cab863954a4195 ctrl.e#1.a#3.E#138.A#48.c#.w#7.s#44.asp.qdimacs 7913 23182
|
4bacd425fd7c1b46eb65dc8f40689796fdfb5c2b
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/MarginalMediation/examples/mma.Rd.R
|
246e22a25248cb66eb678673875607958f002219
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 611
|
r
|
mma.Rd.R
|
library(MarginalMediation)
### Name: mma
### Title: Marginal Mediation
### Aliases: mma
### ** Examples
## A minimal example:
library(furniture)
data(nhanes_2010)
bcpath = glm(marijuana ~ home_meals + gender + age + asthma,
data = nhanes_2010,
family = "binomial")
apath = glm(home_meals ~ gender + age + asthma,
data = nhanes_2010,
family = "gaussian")
(fit = mma(bcpath, apath,
ind_effects = c("genderFemale-home_meals",
"age-home_meals",
"asthmaNo-home_meals"),
boot = 10))
|
1e9496eb999d51e64f41713e2ee67c594742e714
|
43e1c5ff7d04c3e048bb8b29ad3a4af86a519f91
|
/MCEM_Stopping.R
|
8c5a7a74b38a0fc5a395e0b7f2baa166ab3df8bb
|
[] |
no_license
|
hometownjlu/mcem-drug-safety
|
84f27f1597e4076813e202e3c64bfe37c7a2920a
|
53f373c260450d1e5aea50503ab0069aed969a1c
|
refs/heads/master
| 2022-01-05T00:59:06.638811
| 2019-05-10T23:23:40
| 2019-05-10T23:23:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 16,367
|
r
|
MCEM_Stopping.R
|
setwd("~/data/off_label/assignDrug2012")
rm(list = ls())
# function as.PhViD
as.PhViD = function (DATA.FRAME, MARGIN.THRES = 1)
{
data <- DATA.FRAME
data[, 1] <- as.factor(DATA.FRAME[, 1])
data[, 2] <- as.factor(DATA.FRAME[, 2])
data[, 3] <- as.double(DATA.FRAME[, 3])
coln <- names(data)
names(data)[3] <- "n11"
data_cont <- xtabs(n11 ~ ., data = data)
n1._mat <- apply(data_cont, 1, sum)
n.1_mat <- apply(data_cont, 2, sum)
if (MARGIN.THRES > 1) {
while (sum(n1._mat < MARGIN.THRES) > 0 | sum(n.1_mat <
MARGIN.THRES) > 0) {
data_cont <- data_cont[n1._mat >= MARGIN.THRES, ]
data_cont <- data_cont[, n.1_mat >= MARGIN.THRES]
n1._mat <- apply(data_cont, 1, sum)
n.1_mat <- apply(data_cont, 2, sum)
}
}
coord <- which(data_cont != 0, arr.ind = TRUE)
coord <- coord[order(coord[, 1]), ]
Nb_n1. <- length(n1._mat)
Nb_n.1 <- length(n.1_mat)
libel.medoc <- rownames(data_cont)[coord[, 1]]
libel.effet <- colnames(data_cont)[coord[, 2]]
n11 <- data_cont[coord]
N <- sum(n11)
n1. <- n1._mat[coord[, 1]]
n.1 <- n.1_mat[coord[, 2]]
RES <- vector(mode = "list")
RES$L <- data.frame(libel.medoc, libel.effet)
colnames(RES$L) <- coln[1:2]
RES$data <- cbind(n11, n1., n.1)
rownames(RES$data) <- paste(libel.medoc, libel.effet)
RES$N <- N
return(RES)
}
##formula 12 in the table
.lik2NB <- function(p,n11,E){
sum(-log((p[5] * dnbinom(n11, size=p[1], prob=p[2]/(p[2]+E)) + (1-p[5]) * dnbinom(n11, size=p[3], prob=p[4]/(p[4]+E)))))
}
########add by ying #############
##likelihood function##
.likTronc2NB <- function(p, n11, E, tronc){
nb1 <- dnbinom(n11, size = p[1], prob = p[2] / (p[2] + E)) /
(1 - pnbinom(tronc, size = p[1], prob = p[2] / (p[2] + E)))
nb2 <- dnbinom(n11, size = p[3], prob = p[4] / (p[4] + E)) /
(1 - pnbinom(tronc, size = p[3], prob = p[4] / (p[4] + E)))
L <- (p[5] * nb1) + ((1 - p[5]) * nb2)
sum(-log(L))
}
########add by ying#################
# LB <- .QuantileDuMouchel(0.05, Q, PRIOR.PARAM[1] + n11, PRIOR.PARAM[2] +
# E, PRIOR.PARAM[3] + n11, PRIOR.PARAM[4] + E)
#Seuil <- 0.05
#a1 <- PRIOR.PARAM[1] + n11
#b1 <- PRIOR.PARAM[2] + E
#a2 <- PRIOR.PARAM[3] + n11
#b2 <- PRIOR.PARAM[4] + E
.QuantileDuMouchel<-function(Seuil,Q,a1,b1,a2,b2) {
m<-rep(-100000,length(Q))
M<-rep(100000,length(Q))
x<-rep(1,length(Q))
Cout<- .FCoutQuantileDuMouchel(x,Seuil,Q,a1,b1,a2,b2)
while (max(round(Cout*1e4))!=0) {
S<-sign(Cout)
xnew<-(1+S)/2*((x+m)/2)+(1-S)/2*((M+x)/2) ##??? what does it do
M<-(1+S)/2*x+(1-S)/2*M
m<-(1+S)/2*m+(1-S)/2*x
x<-xnew
Cout<-.FCoutQuantileDuMouchel(x,Seuil,Q,a1,b1,a2,b2)
}
x
}
.FCoutQuantileDuMouchel<-function(p,Seuil,Q,a1,b1,a2,b2) {
Q*pgamma(p,shape=a1,rate=b1)+(1-Q)*pgamma(p,shape=a2,rate=b2)-Seuil
}
# function GPS
GPS = function (DATABASE, RR0 = 1, MIN.n11 = 1, DECISION = 1, DECISION.THRES = 0.05,
RANKSTAT = 1, TRONC = FALSE, TRONC.THRES = 1, PRIOR.INIT = c(alpha1 = 0.2, beta1 = 0.06, alpha2 = 1.4, beta2 = 1.8, w = 0.1), PRIOR.PARAM = NULL)
{
DATA <- DATABASE$data
N <- DATABASE$N
L <- DATABASE$L
n11 <- DATA[, 1]
n1. <- DATA[, 2]
n.1 <- DATA[, 3]
E <- DATA[, 2] * DATA[, 3]/N
P_OUT <- TRUE
if (is.null(PRIOR.PARAM)) {
P_OUT <- FALSE
if (TRONC == FALSE) {
data_cont <- xtabs(DATA[, 1] ~ L[, 1] + L[, 2])
n1._mat <- apply(data_cont, 1, sum)
n.1_mat <- apply(data_cont, 2, sum)
n1._c <- rep(n1._mat, times = length(n.1_mat))
n.1_c <- rep(n.1_mat, each = length(n1._mat))
E_c <- n1._c * n.1_c/N
n11_c <- as.vector(data_cont)
p_out <- suppressWarnings(nlm(.lik2NB, p = PRIOR.INIT,
n11 = n11_c, E = E_c, iterlim = 500))
}
if (TRONC == TRUE) {
tronc <- TRONC.THRES - 1
p_out <- suppressWarnings(nlm(.likTronc2NB, p = PRIOR.INIT,
n11 = n11[n11 >= TRONC.THRES], E = E[n11 >= TRONC.THRES],
tronc, iterlim = 500))
}
PRIOR.PARAM <- p_out$estimate ###hyperparameters are updated add by ying
code.convergence <- p_out$code
}
if (MIN.n11 > 1) {
E <- E[n11 >= MIN.n11]
n1. <- n1.[n11 >= MIN.n11]
n.1 <- n.1[n11 >= MIN.n11]
LL <- data.frame(drugs = L[, 1], events = L[, 2], n11)
LL1 <- LL[, 1][n11 >= MIN.n11]
LL2 <- LL[, 2][n11 >= MIN.n11]
rm(list = "L")
L <- data.frame(LL1, LL2)
n11 <- n11[n11 >= MIN.n11]
}
Nb.Cell <- length(n11)
post.H0 <- vector(length = Nb.Cell)
Q <- PRIOR.PARAM[5] * dnbinom(n11, size = PRIOR.PARAM[1],
prob = PRIOR.PARAM[2]/(PRIOR.PARAM[2] + E))/(PRIOR.PARAM[5] *
dnbinom(n11, size = PRIOR.PARAM[1], prob = PRIOR.PARAM[2]/(PRIOR.PARAM[2] +
E)) + (1 - PRIOR.PARAM[5]) * dnbinom(n11, size = PRIOR.PARAM[3],
prob = PRIOR.PARAM[4]/(PRIOR.PARAM[4] + E)))
post.H0 <- Q * pgamma(RR0, PRIOR.PARAM[1] + n11, PRIOR.PARAM[2] +
E) + (1 - Q) * pgamma(RR0, PRIOR.PARAM[3] + n11, PRIOR.PARAM[4] +
E)
# Posterior Expectation of log2(lambda) formula 10 in the paper
postE <- log(2)^(-1) * (Q * (digamma(PRIOR.PARAM[1] + n11) -
log(PRIOR.PARAM[2] + E)) + (1 - Q) * (digamma(PRIOR.PARAM[3] +
n11) - log(PRIOR.PARAM[4] + E)))
LB <- .QuantileDuMouchel(0.05, Q, PRIOR.PARAM[1] + n11, PRIOR.PARAM[2] +
E, PRIOR.PARAM[3] + n11, PRIOR.PARAM[4] + E)
alpha1 <- PRIOR.PARAM[1] + n11
beta1 <- PRIOR.PARAM[2] + E
alpha2 <- PRIOR.PARAM[3] + n11
beta2 <- PRIOR.PARAM[4] + E
var <- Q*(1-Q)*(alpha1/beta1-alpha2/beta2)^2+Q*alpha1/(beta1^2)+(1-Q)*alpha2/(beta2^2)
var <- var*1.0/N # s.e.
if (RANKSTAT == 1)
RankStat <- post.H0
if (RANKSTAT == 2)
RankStat <- LB
if (RANKSTAT == 3)
RankStat <- postE
if (RANKSTAT == 1) {
FDR <- (cumsum(post.H0[order(RankStat)])/(1:length(post.H0)))
FNR <- rev(cumsum((1 - post.H0)[order(1 - RankStat)]))/(Nb.Cell -
1:length(post.H0))
Se <- cumsum((1 - post.H0)[order(RankStat)])/(sum(1 -
post.H0))
Sp <- rev(cumsum(post.H0[order(1 - RankStat)]))/(Nb.Cell -
sum(1 - post.H0))
}
if (RANKSTAT == 2 | RANKSTAT == 3) {
FDR <- (cumsum(post.H0[order(RankStat, decreasing = TRUE)])/(1:length(post.H0)))
FNR <- rev(cumsum((1 - post.H0)[order(1 - RankStat, decreasing = TRUE)]))/(Nb.Cell -
1:length(post.H0))
Se <- cumsum((1 - post.H0)[order(RankStat, decreasing = TRUE)])/(sum(1 -
post.H0))
Sp <- rev(cumsum(post.H0[order(1 - RankStat, decreasing = TRUE)]))/(Nb.Cell -
sum(1 - post.H0))
}
if (DECISION == 1)
Nb.signaux <- sum(FDR <= DECISION.THRES)
if (DECISION == 2)
Nb.signaux <- min(DECISION.THRES, Nb.Cell)
if (DECISION == 3) {
if (RANKSTAT == 1)
Nb.signaux <- sum(RankStat <= DECISION.THRES, na.rm = TRUE)
if (RANKSTAT == 2 | RANKSTAT == 3)
Nb.signaux <- sum(RankStat >= DECISION.THRES, na.rm = TRUE)
}
Q_func <- Q
Expect_Q <- postE
RES <- vector(mode = "list")
RES$INPUT.PARAM <- data.frame(RR0, MIN.n11, DECISION, DECISION.THRES,
RANKSTAT, TRONC, TRONC.THRES)
RES$STOPPING <- data.frame(Q_func, Expect_Q)
RES$PARAM <- vector(mode = "list")
if (P_OUT == TRUE)
RES$PARAM$PRIOR.PARAM <- data.frame(PRIOR.PARAM)
if (P_OUT == FALSE) {
RES$PARAM$PRIOR.INIT <- data.frame(PRIOR.INIT)
RES$PARAM$PRIOR.PARAM <- PRIOR.PARAM
RES$PARAM$CONVERGENCE <- code.convergence
}
if (RANKSTAT == 1) {
RES$ALLSIGNALS <- data.frame(L[, 1][order(RankStat)],
L[, 2][order(RankStat)], n11[order(RankStat)], E[order(RankStat)],
RankStat[order(RankStat)], (n11/E)[order(RankStat)],
n1.[order(RankStat)], n.1[order(RankStat)], FDR,
FNR, Se, Sp,var)
colnames(RES$ALLSIGNALS) <- c("drug", "event", "count",
"expected count", "postH0", "n11/E", "drug margin",
"event margin", "FDR", "FNR", "Se", "Sp","var")
}
if (RANKSTAT == 2 | RANKSTAT == 3) {
RES$ALLSIGNALS <- data.frame(L[, 1][order(RankStat, decreasing = TRUE)],
L[, 2][order(RankStat, decreasing = TRUE)], n11[order(RankStat,
decreasing = TRUE)], E[order(RankStat, decreasing = TRUE)],
RankStat[order(RankStat, decreasing = TRUE)], (n11/E)[order(RankStat,
decreasing = TRUE)], n1.[order(RankStat, decreasing = TRUE)],
n.1[order(RankStat, decreasing = TRUE)], FDR, FNR,
Se, Sp, post.H0[order(RankStat, decreasing = TRUE)], var)
if (RANKSTAT == 2)
colnames(RES$ALLSIGNALS) <- c("drug", "event", "count",
"expected count", "Q_0.05(lambda)", "n11/E",
"drug margin", "event margin", "FDR", "FNR",
"Se", "Sp", "postH0", "var")
if (RANKSTAT == 3)
colnames(RES$ALLSIGNALS) <- c("drug", "event", "count",
"expected count", "post E(Lambda)", "n11/E",
"drug margin", "event margin", "FDR", "FNR",
"Se", "Sp", "postH0", "var")
}
RES$SIGNALS <- RES$ALLSIGNALS[1:Nb.signaux, ]
RES$NB.SIGNALS <- Nb.signaux
return(RES)
}
InitMgps <- function(sample, baseline.dump) {
if (file_test("-f", baseline.dump)) {
return(FALSE)
}
# calculate contengency table
testTwoWayTable = base::table(sample$drug_id,sample$adr_id) # side effect of spark
testDataFrame = as.data.frame(testTwoWayTable)
colnames(testDataFrame) = c("drug_id","adr_id","freq")
testDataFrame1 = testDataFrame[testDataFrame$freq>0,]
# write.table(testDataFrame1,"omop_adr_related_random_contigency_table.tsv",row.names = FALSE,sep="\t")
# load contengency table
# testDataFrame1 = read.table("omop_adr_related_random_contigency_table.tsv",sep="\t",comment="",header=TRUE)
# for calculating disproportionality score
PhViDdata <- as.PhViD(testDataFrame1)
mgps <- GPS(PhViDdata, DECISION = 3, DECISION.THRES = 2, RANKSTAT = 2)
# = 5% quantile of the posterior distribution of lambda
mgps$ALLSIGNALS[1:5, c('drug', 'event', 'Q_0.05(lambda)', 'Se')]
save(sample, mgps, file = baseline.dump)
return(TRUE)
}
faers.omop.random.report <- "faers.tsv"
#"faers2011.tsv"
# load raw data
#raw.data = read.table(faers.omop.random.report,header=TRUE, sep="\t", comment="")
#colnames(raw.data) = c("report_id","drug_id","role_cod","adr_id","report_year","age","gender","occu_cod")
#unique.data = unique(raw.data[,c("report_id","drug_id","adr_id")])
unique.data = read.table(faers.omop.random.report,header=TRUE, sep="\t", comment="",quote="",stringsAsFactors=FALSE)
unique.data = as.data.frame(unique.data)
baseline.faers.omop.dump <- "faers.RData"
# initialize risk score
InitMgps(unique.data, baseline.faers.omop.dump)
load(baseline.faers.omop.dump)
#save.image("./unittestdump.RData")
#######################################################################
# the MCEM procedure
library(dplyr)
# subset sample and prepare dict
# sample <- unique.data[1:800,]
sample <- unique.data
dict <- mgps$ALLSIGNALS[, c('Q_0.05(lambda)', 'Se')] # row key: paste(1367268, 35104756)
# TODO debug on what stat to use
#rank.stat <- 'post E(Lambda)'
rank.stat <- 'Q_0.05(lambda)'
colnames(dict) = c(rank.stat, 'Se')
na.zero <- function (x) {
x[is.na(x) | (x < 1e-2)] <- 1e-2
return(x)
}
McStep <- function(sample, dict) {
# lookup by dict, signal goes to 'lambda'
sample['index'] <- paste(sample$drug_id, sample$adr_id)
result <- sample %>% mutate(lambda = na.zero(dict[index, rank.stat]), Se = dict[index, 'Se'])
# determine primary cause by multinom
# selected set is a subset of overall sample so that drug ADR pairs in the omop reference
# is always decreased
selected <- result %>%
group_by(report_id, adr_id) %>%
summarise(drug_id = drug_id[which(rmultinom(1, 1, lambda)[,1]>0, arr.ind=TRUE)])
return(as.data.frame(selected)[, c('report_id', 'drug_id', 'adr_id')])
}
MaxStep <- function(sample, prior) {
# calculate contengency table
testTwoWayTable = base::table(sample$drug_id,sample$adr_id)
testDataFrame = as.data.frame(testTwoWayTable)
colnames(testDataFrame) = c("drug_id","adr_id","freq")
testDataFrame1 = testDataFrame[testDataFrame$freq>0,]
PhViDdata <- as.PhViD(testDataFrame1)
mgps <- GPS(PhViDdata, DECISION = 3, DECISION.THRES = 2, RANKSTAT = 2, PRIOR.INIT = prior)
# RANKSTAT = 3: the posterior distribution of lambda
#prior <- mgps$PARAM$PRIOR.PARAM
#var <- get.lambda.Variance(PhViDdata, prior)
print(mgps$ALLSIGNALS[1:5, c('drug', 'event', rank.stat, 'Se')])
#return_list <- list("score"=mgps, "variance"=var)
return(mgps)
}
# iteration 1
next.sample <- McStep(sample, dict)
# save sampling results - compare with PS - add by ying
#write.csv(as.data.frame(next.sample),"sample1.csv",row.names=FALSE)
next.sample <- rbind(sample, next.sample)
next.mgps <- MaxStep(next.sample, mgps$PARAM$PRIOR.PARAM)
#next.mgps <- return_list$score
#next.var <- return_list$variance
print("First iteration is done")
#save(next.sample,next.mgps, file = paste(1,".RData",sep=""))
# continue to iterate
max.iters <- 500
for (i in 2:max.iters) {
message(sprintf("Iteration: %d\n", i))
prev.mgps = next.mgps
dict <- next.mgps$ALLSIGNALS[, c(rank.stat, 'Se')]
#Running time
ptm <- proc.time()
prev.sample <- next.sample
next.sample <- McStep(sample, dict)
solo.sample <- next.sample
#write.csv(as.data.frame(next.sample),paste("sample",i,".csv",sep=""),row.names=FALSE)
next.sample <- rbind(prev.sample, next.sample)
print(proc.time() - ptm)
#next.mgps <- MaxStep(next.sample, mgps$PARAM$PRIOR.PARAM) # TODO should be next.mgps$PARAM$PRIOR.PARAM?
#error handle
ptm <- proc.time()
tryCatch({
# for also solo next sample
next.mgps <- MaxStep(next.sample, mgps$PARAM$PRIOR.PARAM) # TODO should be next.mgps$PARAM$PRIOR.PARAM?
#next.mgps <- return_list$score
#next.var <- return_list$variance
}, warning = function(war) {
# warning handler picks up where error was generated
print('warning')
}, error = function(err) {
# error handler picks up where error was generated
print('error')
}, finally = {
}) # END tryCatch
print(proc.time() - ptm)
if (i %% 1 == 0) {
# TODO evaluate accuracy
#save(next.sample,next.mgps, file = paste(i,".RData",sep=""))
Q.current <- next.mgps$STOPPING$Q_func
Q.prev <- prev.mgps$STOPPING$Q_func
diff <- mapply('-', Q.current, Q.prev)
diff <- mapply('abs', diff)
val <- Reduce("+", diff)/length(diff)
message(sprintf("Difference: %f\n", val))
if (val < 1e-3){
print('Algorithm converged!')
break
}
}
}
save(next.sample,next.mgps, file = paste(i,"_accum.RData",sep=""))
solo.mgps <- MaxStep(solo.sample, mgps$PARAM$PRIOR.PARAM)
save(solo.sample,solo.mgps, file = paste(i,"_solo.RData",sep=""))
|
95f86aa1fa3a1834559c2908d9435a6e007ce5f7
|
cd34c9b4a9e7e0994d177013c204f46d63445ba4
|
/time_series_analysis_r/Time Series Analysis.R
|
e16ef3b10928144a838f89a1bc1d8e81344c6337
|
[] |
no_license
|
Muthukumarantce/datacamp_python
|
3d84859932a37d624a1eeda2fdb2c0f7cffc2582
|
a8c53296b07e3fcb0e713fa8313e2c947095825b
|
refs/heads/master
| 2020-09-17T05:14:26.401118
| 2020-03-28T01:24:47
| 2020-03-28T01:24:47
| 224,001,458
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,501
|
r
|
Time Series Analysis.R
|
# TIME SERIES - SEQUENCE OF DATA IN CHRONOLOGICAL ORDER
# IT IS AN OBJECT WHICH HAS TIME AS INDEX AND REPRESENTED IN X AXIS
"""
Basic Models
White noise
Random walk
Auto Regression
Simple Moving Average
"""
#EXPLORING RAW TIME SERIES
# Print the Nile dataset
print(Nile)
# List the number of observations in the Nile dataset
length(Nile)
# Display the first 10 elements of the Nile dataset
head(Nile,n=10)
# Display the last 12 elements of the Nile dataset
tail(Nile,n=12)
# Plot the Nile data
plot(Nile)
frequency(Nile)
# Plot the Nile data with xlab and ylab arguments
plot(Nile, xlab = "Year", ylab = "River Volume (1e9 m^{3})")
# Plot the Nile data with xlab, ylab, main, and type arguments
#Type ="b" means it includes both lines and points represented in graph
plot(Nile, xlab = "Year", ylab = "River Volume (1e9 m^{3})",main="Annual River Nile Volume at Aswan, 1871-1970",type="b")
#par can be used to set or query graphical parameters. Parameters can be set by specifying them as arguments to par in tag = value form, or by passing them as a list of tagged values.
par(mfrow=c(2,1))
plot(continuous_time_index,continuous_series, type = "b")
"""
Varieties of Time series data
Equally spaced
Approximately spaced
Missed values
Basic Assumptions
Consecutive observations are equally spaced
Apply a discrete time observation index
This may hold approximately
Sampling frequency - R functions
The start() and end() functions return the time index of the first and last observations, respectively.
The time() function calculates a vector of time indices, with one element for each time index on which the series was observed.
The deltat() function returns the fixed time interval between observations and
the frequency() function returns the number of observations per unit time. Finally, the cycle() function returns the position in the cycle of each observation.
"""
#In this exercise, you'll practice applying these functions to the AirPassengers dataset, which reports the monthly total international airline passengers (in thousands) from 1949 to 1960.
# Plot AirPassengers
plot(AirPassengers)
# View the start and end dates of AirPassengers
start(AirPassengers)
end(AirPassengers)
# Use time(), deltat(), frequency(), and cycle() with AirPassengers
time(AirPassengers)
deltat(AirPassengers)
frequency(AirPassengers)
cycle(AirPassengers)
str(AirPassengers)
#Missing Values
# Impute mean values to NA in AirPassengers
AirPassengers[85:96] <- mean(AirPassengers, na.rm = TRUE)
# Add the complete AirPassengers data to your plot
rm(AirPassengers)
points(AirPassengers, type = "l", col = 2, lty = 3)
"""
Time series object
Starts with vector of data
Apply ts function
Why ts ()
Improved Plotting
Access to time index information
is.ts()
to check whether object is timeseries
"""
data_vector <- runif(100,1,100)
plot(data_vector)
time_series <- ts(data_vector,start=2004,frequency = 4)
plot(time_series)
print(time_series)
is.ts(data_vector)
is.ts(AirPassengers)
is.ts(EuStockMarkets)
# View the start, end, and frequency of eu_stocks
start(EuStockMarkets)
end(EuStockMarkets)
frequency(EuStockMarkets)
plot(EuStockMarkets)
str(EuStockMarkets)
ts.plot(EuStockMarkets,col=1:4,xlab="Year",ylab="Index value",main="Major European Stock Indices,1991-1998") + legend("topleft",colnames(EuStockMarkets),lty=1,col=1:4,bty="n")
#Spotting Trends
"""
Trends - Periodic, Linear, Variance, Rapid Growth
"""
"""
Sample Transformations
log() --> linearize rapid growth trend
Stabilizes series with increasing variance
diff() --> removes linear trend
diff(,s) --> seasonal difference transformation, remove periodic trends
diff(x,s=4)
"""
"""
Removing trends in variability via the logarithmic transformation
The logarithmic function log() is a data transformation that can be applied to positively valued time series data. It slightly shrinks observations that are greater than one towards zero, while greatly shrinking very large observations.
This property can stabilize variability when a series exhibits increasing variability over time. It may also be used to linearize a rapid growth pattern over time.
The time series rapid_growth has already been loaded, and is shown in the figure on the right. Note the vertical range of the data.
"""
# Generate the first difference of z
dz = diff(z)
# Plot dz
ts.plot(dz)
# View the length of z and dz, respectively
length(z)
length(dz)
|
11b34aceacc33752a8ea4d1d17b935ea59f95aee
|
43a4e09bce1dd2da2c9f79db4096109a85e0731b
|
/tests/testthat/test-services.R
|
016630f1e45604da73d756b8bccba777c72130ab
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
fabianazioti/rwlts
|
5668dfaa15465e6f2abb14d608ff189b0d567d9d
|
fc39cd579ddb17214dc8ecc61d4e9790ecf75ddc
|
refs/heads/main
| 2023-07-16T02:44:00.755220
| 2021-09-03T12:05:12
| 2021-09-03T12:05:12
| 319,329,913
| 0
| 0
|
MIT
| 2020-12-07T13:33:58
| 2020-12-07T13:33:58
| null |
UTF-8
|
R
| false
| false
| 3,105
|
r
|
test-services.R
|
context("test_services")
testthat::test_that("services api", {
vcr::use_cassette("services_api", {
# skip cran check test
testthat::skip_on_cran()
wlts_bdc <- "https://brazildatacube.dpi.inpe.br/wlts/"
#---- list collections test
# /list_collections - OK
testthat::expect_equal(
object = class(list_collections(wlts_bdc)),
expected = "character")
# /list_collections - Error
testthat::expect_error(
object = list_collections(NULL))
# /list_collections - Error
testthat::expect_error(
object = list_collections("."))
#---- describe collection test
# /describe_collection (structure test)
testthat::expect_equal(
object = class(describe_collection(wlts_bdc, "deter_amazonia_legal")),
expected = "list"
)
# /describe_collection (signature test)
testthat::expect_error(
object = describe_collection(NULL)
)
testthat::expect_error(
object = describe_collection(wlts_bdc, NULL)
)
#---- get trajectory test
# get trajectory test (simple trajectory request)
testthat::expect_s3_class(
object = get_trajectory(
URL = wlts_bdc,
latitude = -12,
longitude = -54,
start_date = "2015-01-01",
end_date = "2017-01-01",
collections = "mapbiomas5_amazonia"),
class = "wlts"
)
# get trajectory test (signature test)
testthat::expect_error(
object = get_trajectory(
URL = NULL,
latitude = -12,
longitude = -54,
collections = "mapbiomas5_amazonia")
)
# get trajectory test (semantic point request test)
testthat::expect_error(
object = get_trajectory(
URL = wlts_bdc,
latitude = c(-12, NULL),
longitude = c(-54, -55),
collections = "mapbiomas5_amazonia")
)
## latitude test
testthat::expect_error(
object = get_trajectory(
URL = wlts_bdc,
latitude = -95,
longitude = -54,
collections = "mapbiomas5_amazonia")
)
## longitude test
testthat::expect_error(
object = get_trajectory(
URL = wlts_bdc,
latitude = -12,
longitude = 185,
collections = "mapbiomas5_amazonia")
)
## time interval request
testthat::expect_error(
object = get_trajectory(
URL = wlts_bdc,
latitude = -12,
longitude = -54,
start_date = "2015/01/01",
collections = "mapbiomas5_amazonia")
)
testthat::expect_error(
object = get_trajectory(
URL = wlts_bdc,
latitude = -12,
longitude = -54,
end_date = "2015-01",
collections = "mapbiomas5_amazonia")
)
testthat::expect_error(
object = get_trajectory(
URL = wlts_bdc,
latitude = -12,
longitude = -54,
start_date = "2017-01-01",
end_date = "2015-01-01",
collections = "mapbiomas5_amazonia")
)
})
})
|
345114718691612c4d3907992ca87edb810e2d74
|
8b66fb29a36f182f2907f3d87967f41854d576bb
|
/analysis_norm.R
|
246c43fe82015426c635b90bae625684afe35fb0
|
[] |
no_license
|
kischacht/doping-olympia
|
3697655fe1005ef8abb1fb7a045b2af7646dfdf0
|
fe622e6966d08f3939fce8a7bbf822a230d3a70b
|
refs/heads/master
| 2020-12-07T00:31:41.792177
| 2016-08-17T12:53:31
| 2016-08-17T12:53:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,162
|
r
|
analysis_norm.R
|
setwd("/home/kira/ownCloud/BR Data/doping olympische spiele")
setwd("/Users/administrator/Desktop/doping olympische spiele")
library(dplyr)
library(ggplot2)
cases <- read.csv("IAAF-doping-cases-clean-deutsch.csv", stringsAsFactors = F, encoding="utf-8")
#athl <- read.csv("participants-by-event.csv", quote="", encoding="utf-8", sep=";")
#names(athl) = c("Jahr","Ort","Name","Land","Geschlecht","Sport","Disziplin","Medal")
library(tidyr)
athl <- read.csv("iaaf-teilnehmer.csv",sep=";",stringsAsFactors = F)
athl <- gather(athl, Geschlecht, count, 2:length(athl))
athl$Jahr <- as.numeric(substr(athl$Geschlecht,2,5))
athl$Geschlecht <- gsub("^X.*\\.","",athl$Geschlecht)
names(athl)[1] = "Land"
write.csv(athl, "iaaf-teilnehmer.csv", row.names = F)
athl <- read.csv("iaaf-teilnehmer.csv",stringsAsFactors = F)
#fälle pro jahr
cpy <- cases %>% group_by(Jahr) %>% summarize(count=length(unique(Name)))# %>%
group_by(Jahr) %>% summarize(count=sum(count))# %>% arrange(-count)
tmp <- athl %>% group_by(Jahr) %>% summarize(sum = sum(count))
cpy <- left_join(cpy, tmp, by=c("Jahr")) %>% mutate(norm = count/sum)
write.csv(cpy, "fälleprojahr.csv", row.names = F)
#fälle pro land
cpc <- cases %>% group_by(COUNTRY, Land, Jahr) %>% filter(Jahr >= 1996) %>%
summarize(count=length(unique(Name))) %>%
group_by(COUNTRY, Land) %>% summarize(count=sum(count)) %>% arrange(-count)
tmp <- athl %>% filter(Jahr >= 1996) %>% group_by(Land) %>% summarize(sum = sum(count))
cpc <- left_join(cpc, tmp, by=c("COUNTRY" = "Land")) %>% mutate(norm = count/sum)
write.csv(cpc, "fälleproland.csv", row.names = F)
#fälle nach geschlecht
cpg <- cases %>% group_by(Geschlecht, Jahr) %>% summarize(count=length(unique(Name))) %>%
group_by(Geschlecht) %>% summarize(count=sum(count)) %>%arrange(-count)
tmp <- athl %>% group_by(Geschlecht) %>% summarize(sum=sum(count))
#spread(Geschlecht, count) %>% mutate(wshare = Women/(Women+Men))
cpg <- left_join(cpg, tmp, by="Geschlecht") %>% mutate(norm = count/sum)
write.csv(cpg, "fällenachgeschlecht.csv", row.names = F)
#fälle nach geschlecht und jahr
cpgy <- cases %>% group_by(Geschlecht, Jahr) %>% summarize(count=length(unique(Name))) %>%arrange(-count)
tmp <- athl %>% group_by(Geschlecht, Jahr) %>% summarize(sum=sum(count))
cpgy <- left_join(cpgy, tmp, by=c("Geschlecht","Jahr")) %>% mutate(norm = count/sum)
write.csv(cpgy, "fällenachgeschlechtundjahr.csv", row.names = F)
#tests pro jahr
#tests bei olympischen spielen
tests <- read.csv("testnrs.csv")[c(1,3)]
tests$Number.of.tests <- as.numeric(gsub(",","",tests$Number.of.tests))
save.image(file="datasets.Rdata")
##################
##visualisierung##
##################
library(ggplot2)
pdf("results/alle-normiert.pdf", width=14)
#fälle pro jahr
ggplot(cpy, aes(x=Jahr, y=count)) + theme_light() + scale_x_continuous(breaks = rev(unique(cpy$Jahr))) +
ggtitle("Doping Violations in Athletics at Olympic Games") + geom_line(colour="orange") + geom_point() +
geom_text(aes(x=Jahr, label=paste0(count)), vjust=-1)
'
#fälle pro jahr normiert
ggplot(cpy, aes(x=Jahr, y=norm)) + theme_light() + scale_x_continuous(breaks = rev(unique(cpgy$Jahr))) +
ggtitle("Anteil überführter Athleten über die Jahre") + geom_line(colour="orange") + geom_point() +
geom_text(aes(x=Jahr, label=paste0(round(norm*100, 2),"%")), vjust=-1) + scale_y_continuous(labels=scales::percent)
#tests pro jahr
ggplot(tests, aes(x=Year, y=Number.of.tests)) + theme_light() + scale_x_continuous(breaks = rev(unique(tests$Year))) +
ggtitle("Gesamtzahl an Doping-Tests bei den olympischen Spielen") + geom_line(colour="orange") + geom_point() +
geom_text(aes(x=Year, label=paste0(Number.of.tests)), vjust=-1)
'
#fälle pro land
#sonstiges kategorie einrichten
cpc <- cpc %>% ungroup %>% arrange(-count)
x <- cpc %>% mutate(Land = c(cpc$Land[1:5], rep("Sonstige",length(cpc$Land)-5)))
x <- mutate(x, Land = factor(x$Land, levels=c("Türkei","Belarus","Russland","Ukraine","USA","Sonstige"))) %>%
group_by(Land) %>% summarize(count=sum(count), sum=sum(sum), norm=count/sum)
#x$Land <- factor(c("Turkey","Belarus","Russia","Ukraine","USA","Others"), levels=c("Turkey","Belarus","Ukraine","Russia","USA","Others"))
#sonstiges summe und norm
tmp <- athl %>% filter(Jahr >= 1996) %>% group_by(Land) %>% summarize(sum = sum(count))
tmp2 = filter(tmp, Land != "TUR" & Land != "RUS" & Land != "BLR" & Land != "UKR" & Land != "USA")
x$sum[6] <- sum(tmp2$sum); x$norm[6] <- x$count[6] / x$sum[6]
#gesamtdurchschnitt
cmean = sum(cpc$count)/sum(tmp$sum) #0.00514601
ggplot(x, aes(x=Land, y=norm, fill=Land)) + theme_minimal() + scale_y_continuous(labels=scales::percent) +
geom_bar(stat="identity") + ggtitle("Anteil überführter Leichtathleten seit 1996") +
geom_hline(yintercept = cmean) + theme(legend.position="none") +
geom_text(aes(x=1, y=cmean, label=paste("Total mean =", paste0(round(cmean*100,2),"%"))), hjust=0,vjust=-0.5)+
geom_text(position= position_dodge(width=0.9), aes(x=Land, label=paste0(count,"/",sum)), vjust=-0.5)
#fälle nach geschlecht
cpg <- cpg %>% ungroup
ggplot(cpg, aes(x=Geschlecht, y=count, fill=Geschlecht)) + theme_minimal() + #scale_y_continuous(labels=scales::percent) +
geom_bar(stat="identity") + ggtitle("Anzahl überführter Leichtathleten nach Geschlecht seit 1996") +
theme(legend.position="none", axis.text.x = element_text(angle=45,hjust=1), panel.background = element_rect(fill="white")) +
geom_text(position= position_dodge(width=0.9), aes(x=Geschlecht, label=paste0(count,"/",sum)), vjust=-0.5)
'
#fälle nach geschlecht und jahr
cpgy <- cpgy %>% ungroup %>% arrange(-norm)
ggplot(cpgy, aes(x=Jahr, y=norm, fill=Geschlecht)) + scale_x_continuous(breaks = rev(unique(cpgy$Jahr))) +theme_minimal() +
geom_bar(stat="identity",position="dodge") + ggtitle("Doping-Fälle nach Geschlecht und Jahr") +
theme(legend.position="bottom", panel.background = element_rect(fill="white")) +
geom_text(position= position_dodge(width=3.5), aes(x=Jahr, label=paste0(count,"/",sum)), vjust=-0.5)
'
dev.off()
write.csv(x, "results/fälleproland-reduziert.csv",row.names = F)
length(unique(cases$Name))
sort(unique(cases$Name))
sum(cpy$count)
|
48e4f365eec64ce28bf44404ba9c782213c578ef
|
38088096f84050aece0bc1d73b33b7d845286bba
|
/man/wrapTTest.Rd
|
bd17fbdd5b4b770c6bcd57fc12fc3415c7b51723
|
[] |
no_license
|
Sandy4321/sigr
|
2ea94d62e82cd89eaeed3bf570546a07d03b27cf
|
2129bede56acf7673710893eaf400d4a6dc891ec
|
refs/heads/master
| 2021-01-14T02:35:47.657148
| 2017-02-10T01:21:54
| 2017-02-10T01:21:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 427
|
rd
|
wrapTTest.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/TTest.R
\name{wrapTTest}
\alias{wrapTTest}
\title{Wrap t.test (difference in means by group).}
\usage{
wrapTTest(x, ...)
}
\arguments{
\item{x}{numeric, data.frame or test.}
\item{...}{extra arguments}
}
\description{
Wrap t.test (difference in means by group).
}
\seealso{
\code{\link{wrapTTest.htest}}, and \code{\link{wrapTTest.data.frame}}
}
|
3adc5e5e1facbdd2fd2e12ea9381821cba466181
|
eb4e032c9e4e6f281cc58efbe2f12cd9f6242b70
|
/MyFirstRscript.R
|
2b928b1ec42f3bad7a55c3863e00231f5bf8080e
|
[
"MIT"
] |
permissive
|
oliverphillpott/INSEADAnalytics
|
a5e70d9a90e77748c07f6bd0e7788c7c68325c79
|
339ebe8205a3bf4374fdcb1b556dd34e5f500a8c
|
refs/heads/master
| 2023-02-28T14:15:44.786830
| 2021-02-05T07:05:06
| 2021-02-05T07:05:06
| 327,044,181
| 0
| 0
|
MIT
| 2021-02-02T20:47:53
| 2021-01-05T15:44:00
|
HTML
|
UTF-8
|
R
| false
| false
| 35
|
r
|
MyFirstRscript.R
|
# My R script
print("Hello World")
|
0c56d4c7aab50bbdf4939559d59fa68c9d6987ef
|
6ff1395ddeedf486f7af75c21cbf546e0622b6c1
|
/man/select_race_first.Rd
|
65f6211c60817f294dd7f98433ff11c1f0fbda88
|
[] |
no_license
|
demographer/censocdev
|
3add321053ba8b4de2595a34ddb8c51519962234
|
6387f4b8f63d047835929cb59c03144f463db90e
|
refs/heads/master
| 2022-07-18T11:03:06.062343
| 2020-05-13T22:37:12
| 2020-05-13T22:37:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 386
|
rd
|
select_race_first.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/select_race_first.R
\name{select_race_first}
\alias{select_race_first}
\title{Select race}
\usage{
select_race_first(data = numapp)
}
\arguments{
\item{numapp}{path to the NUMAPP files}
}
\value{
data.frame with first race for persons with more than one race
}
\description{
Select race
}
\keyword{internal}
|
1495b2c3ed808705a0ac2f5ee57ea720606d9cd7
|
65328e35d3153a3b139c4be85f3881bfa5960002
|
/stepFilter/lasso_plot.R
|
cb2381028af52714a52a84308eee8adca1368d43
|
[] |
no_license
|
qingyao/CNV_evaluation
|
685fb62ee5409daa7fc1a9eff1dec627c573e21b
|
136d83e15f58a1af3a80df7b6e01e0e1c36a146f
|
refs/heads/master
| 2021-09-04T03:57:14.465576
| 2018-01-15T15:45:01
| 2018-01-15T15:45:01
| 108,848,178
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,623
|
r
|
lasso_plot.R
|
setwd('/Users/pgweb/arraydata/aroma/EvaluationPack/')
library(R.utils)
library(reshape2)
library(ggplot2)
lmds <- c(0.3,0.5,1)
gps <- c('1e4','5e4','1e5')
conds <- length(cid)*length(lmds)*length(gps)
segCompare <- data.frame()
for (cid in cids){
for (lmd in lmds){
for (gp in gps){
before_lasso <- 0
before_lasso <- countLines(sprintf('test_data/combine_series/%s/segments,cn,5_sdundo_1.tsv',cid))[1]
after_lasso <- 0
after_lasso <- countLines(sprintf('test_data/combine_series/%s/segments,cn,5_sdundo_1,lasso%s_%s.tsv',cid,lmd,gp))
segCompare <- rbind(segCompare,data.frame(cid,lmd,gp,before_lasso,after_lasso))
}
}
}
gp_dict=c('1e4'='10kb','5e4'='50kb','1e5'='100kb')
segCompare$gp<- gp_dict[segCompare$gp]
segCompare$lmd_gp <- paste(segCompare$lmd,segCompare$gp,sep=',')
plf_search <- read.table('select_3_sample_platform.txt',header=T,stringsAsFactors = F)
platforms <- sapply(cids,function(x) plf_search[plf_search$sample==x,3])
segCompare$platform <- platforms[segCompare$cid]
pdata <- segCompare[,c(1,4:7)]
pdata <- melt(pdata)
pdata$lmd_gp<-factor(pdata$lmd_gp,levels=unique(pdata$lmd_gp))
ggplot(pdata, aes(x=variable, y=value, group=cid)) +
geom_point(aes(colour=platform), size=3, position=position_dodge(width=0.1)) +
geom_line(size=0.5, alpha=0.5, position=position_dodge(width=0.1)) +
xlab('lambda, gapSize') +
ylab('Number of segments') +
scale_colour_manual(values=c("#009E73", "#D55E00")) +
scale_y_log10(limits=c(10,10000),breaks=c(10,31,100,316,1000,3162,1e4)) +
theme_bw() +
facet_wrap(~lmd_gp)
ggsave('test_data/1dlasso_seg_reduction.pdf')
|
d131b8c6605e91931fb5a7f01a78ce901223aa81
|
af0185f7ca10731e24ef5979f996059ff6018ed6
|
/man/vartrack_samplesize_detect.Rd
|
6cf9bc4dac60eda44d161c2385fcf42d7467d300
|
[] |
no_license
|
HopkinsIDD/phylosamp
|
d5d81a4e6df0615c82bccf2a9ac8ddd278ddf77f
|
d43c73bfa108ed03cb5e6330518fd5b3fc1d74c2
|
refs/heads/master
| 2023-05-25T05:04:12.240279
| 2023-05-23T22:02:12
| 2023-05-23T22:02:12
| 266,897,409
| 12
| 2
| null | 2023-05-23T14:23:44
| 2020-05-25T23:07:33
|
R
|
UTF-8
|
R
| false
| true
| 2,642
|
rd
|
vartrack_samplesize_detect.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/vartrack_samplesize_detect.R
\name{vartrack_samplesize_detect}
\alias{vartrack_samplesize_detect}
\title{Calculate sample size needed for variant detection given a desired probability of detection}
\usage{
vartrack_samplesize_detect(
prob,
t = NA,
p_v1 = NA,
omega,
p0_v1 = NA,
r_v1 = NA,
c_ratio = 1,
sampling_freq
)
}
\arguments{
\item{prob}{desired probability of detection}
\item{t}{time step number (e.g., days) at which variant should be detected by. Default = NA (either \code{'t'} or \code{'p_v1'} should be provided, not both)}
\item{p_v1}{the desired prevalence to detect a variant by. Default = NA (either \code{'t'} or \code{'p_v1'} should be provided, not both)}
\item{omega}{probability of sequencing (or other characterization) success}
\item{p0_v1}{initial variant prevalence (# introductions / infected population size)}
\item{r_v1}{logistic growth rate}
\item{c_ratio}{coefficient of detection ratio, calculated as the ratio of the coefficients of variant 1 to variant 2. Default = 1 (no bias)}
\item{sampling_freq}{the sampling frequency (must be either 'xsect' or 'cont')}
}
\value{
scalar of expected sample size
}
\description{
This function calculates the sample size needed for detecting the presence of a variant
given a desired probability of detection and sampling strategy.
}
\examples{
# Cross-sectional sampling
vartrack_samplesize_detect(p_v1 = 0.1, prob = 0.95, omega = 0.8,
c_ratio = 1, sampling_freq = 'xsect')
# Periodic sampling
vartrack_samplesize_detect(prob = 0.95, t = 30, omega = 0.8, p0_v1 = 1/10000,
r_v1 = 0.1, c_ratio = 1, sampling_freq = 'cont')
}
\seealso{
Other variant detection functions:
\code{\link{vartrack_prob_detect_cont}()},
\code{\link{vartrack_prob_detect_xsect}()},
\code{\link{vartrack_prob_detect}()},
\code{\link{vartrack_samplesize_detect_cont}()},
\code{\link{vartrack_samplesize_detect_xsect}()}
Other variant tracking functions:
\code{\link{vartrack_cod_ratio}()},
\code{\link{vartrack_prob_detect_cont}()},
\code{\link{vartrack_prob_detect_xsect}()},
\code{\link{vartrack_prob_detect}()},
\code{\link{vartrack_prob_prev_xsect}()},
\code{\link{vartrack_prob_prev}()},
\code{\link{vartrack_samplesize_detect_cont}()},
\code{\link{vartrack_samplesize_detect_xsect}()},
\code{\link{vartrack_samplesize_prev_xsect}()},
\code{\link{vartrack_samplesize_prev}()}
}
\author{
Shirlee Wohl, Elizabeth C. Lee, Bethany L. DiPrete, and Justin Lessler
}
\concept{variant detection functions}
\concept{variant tracking functions}
|
bf0646bfc3e8711f9da9463922c31f7459f9a688
|
7606bf764d87cad9e50287dafbb87dbf7b8e99fe
|
/R/bemanian_beyer.R
|
727b6984ac588accd538bb6d62def76c2a67ef02
|
[
"Apache-2.0"
] |
permissive
|
idblr/ndi
|
50958411b3d767b2991ef50ac4cf1dd4ceace097
|
39cd672c79c674bea3a113649299af69da614685
|
refs/heads/main
| 2023-04-18T07:27:54.630066
| 2023-02-01T15:57:23
| 2023-02-01T15:57:23
| 521,439,746
| 15
| 1
|
NOASSERTION
| 2022-12-15T21:06:20
| 2022-08-04T23:07:13
|
HTML
|
UTF-8
|
R
| false
| false
| 14,811
|
r
|
bemanian_beyer.R
|
#' Local Exposure and Isolation metric based on Bemanian & Beyer (2017)
#'
#' Compute the aspatial Local Exposure and Isolation (Bemanian & Beyer) metric of a selected racial/ethnic subgroup(s) and U.S. geographies.
#'
#' @param geo_large Character string specifying the larger geographical unit of the data. The default is counties \code{geo_large = "county"}.
#' @param geo_small Character string specifying the smaller geographical unit of the data. The default is census tracts \code{geo_large = "tract"}.
#' @param year Numeric. The year to compute the estimate. The default is 2020, and the years 2009 onward are currently available.
#' @param subgroup Character string specifying the racial/ethnic subgroup(s). See Details for available choices.
#' @param subgroup_ixn Character string specifying the racial/ethnic subgroup(s) as the interaction population. If the same as \code{subgroup}, will compute the simple isolation of the group. See Details for available choices.
#' @param omit_NAs Logical. If FALSE, will compute index for a larger geographical unit only if all of its smaller geographical units have values. The default is TRUE.
#' @param quiet Logical. If TRUE, will display messages about potential missing census information. The default is FALSE.
#' @param ... Arguments passed to \code{\link[tidycensus]{get_acs}} to select state, county, and other arguments for census characteristics
#'
#' @details This function will compute the aspatial Local Exposure and Isolation (LEx/Is) metric of selected racial/ethnic subgroups and U.S. geographies for a specified geographical extent (e.g., the entire U.S. or a single state) based on Bemanian & Beyer (2017) \doi{10.1158/1055-9965.EPI-16-0926}. This function provides the computation of LEx/Is for any of the U.S. Census Bureau race/ethnicity subgroups (including Hispanic and non-Hispanic individuals).
#'
#' The function uses the \code{\link[tidycensus]{get_acs}} function to obtain U.S. Census Bureau 5-year American Community Survey characteristics used for the aspatial computation. The yearly estimates are available for 2009 onward when ACS-5 data are available but are available from other U.S. Census Bureau surveys. The twenty racial/ethnic subgroups (U.S. Census Bureau definitions) are:
#' \itemize{
#' \item{B03002_002: }{not Hispanic or Latino "NHoL"}
#' \item{B03002_003: }{not Hispanic or Latino, white alone "NHoLW"}
#' \item{B03002_004: }{not Hispanic or Latino, Black or African American alone "NHoLB"}
#' \item{B03002_005: }{not Hispanic or Latino, American Indian and Alaska Native alone "NHoLAIAN"}
#' \item{B03002_006: }{not Hispanic or Latino, Asian alone "NHoLA"}
#' \item{B03002_007: }{not Hispanic or Latino, Native Hawaiian and Other Pacific Islander alone "NHoLNHOPI"}
#' \item{B03002_008: }{not Hispanic or Latino, Some other race alone "NHoLSOR"}
#' \item{B03002_009: }{not Hispanic or Latino, Two or more races "NHoLTOMR"}
#' \item{B03002_010: }{not Hispanic or Latino, Two races including Some other race "NHoLTRiSOR"}
#' \item{B03002_011: }{not Hispanic or Latino, Two races excluding Some other race, and three or more races "NHoLTReSOR"}
#' \item{B03002_012: }{Hispanic or Latino "HoL"}
#' \item{B03002_013: }{Hispanic or Latino, white alone "HoLW"}
#' \item{B03002_014: }{Hispanic or Latino, Black or African American alone "HoLB"}
#' \item{B03002_015: }{Hispanic or Latino, American Indian and Alaska Native alone "HoLAIAN"}
#' \item{B03002_016: }{Hispanic or Latino, Asian alone "HoLA"}
#' \item{B03002_017: }{Hispanic or Latino, Native Hawaiian and Other Pacific Islander alone "HoLNHOPI"}
#' \item{B03002_018: }{Hispanic or Latino, Some other race alone "HoLSOR"}
#' \item{B03002_019: }{Hispanic or Latino, Two or more races "HoLTOMR"}
#' \item{B03002_020: }{Hispanic or Latino, Two races including Some other race "HoLTRiSOR"}
#' \item{B03002_021: }{Hispanic or Latino, Two races excluding Some other race, and three or more races "HoLTReSOR"}
#' }
#'
#' Use the internal \code{state} and \code{county} arguments within the \code{\link[tidycensus]{get_acs}} function to specify geographic extent of the data output.
#'
#' LEx/Is is a measure of the probability that two individuals living within a specific smaller geography (e.g., census tract) of either different (i.e., exposure) or the same (i.e., isolation) racial/ethnic subgroup(s) will interact, assuming that individuals within a smaller geography are randomly mixed. LEx/Is is standardized with a logit transformation and centered against an expected case that all races/ethnicities are evenly distributed across a larger geography. (Note: will adjust data by 0.025 if probabilities are zero, one, or undefined. The output will include a warning if adjusted. See \code{\link[car]{logit}} for additional details.)
#'
#' LEx/Is can range from negative infinity to infinity. If LEx/Is is zero then the estimated probability of the interaction between two people of the given subgroup(s) within a smaller geography is equal to the expected probability if the subgroup(s) were perfectly mixed in the larger geography. If LEx/Is is greater than zero then the interaction is more likely to occur within the smaller geography than in the larger geography, and if LEx/Is is less than zero then the interaction is less likely to occur within the smaller geography than in the larger geography. Note: the exponentiation of each LEx/Is metric results in the odds ratio of the specific exposure or isolation of interest in a smaller geography relative to the larger geography.
#'
#' Larger geographies available include state \code{geo_large = "state"}, county \code{geo_large = "county"}, and census tract \code{geo_large = "tract"} levels. Smaller geographies available include, county \code{geo_small = "county"}, census tract \code{geo_small = "tract"}, and census block group \code{geo_small = "block group"} levels. If a larger geographical area is comprised of only one smaller geographical area (e.g., a U.S county contains only one census tract), then the LEx/Is value returned is NA.
#'
#' @return An object of class 'list'. This is a named list with the following components:
#'
#' \describe{
#' \item{\code{lexis}}{An object of class 'tbl' for the GEOID, name, and LEx/Is at specified smaller census geographies.}
#' \item{\code{lexis_data}}{An object of class 'tbl' for the raw census values at specified smaller census geographies.}
#' \item{\code{missing}}{An object of class 'tbl' of the count and proportion of missingness for each census variable used to compute LEx/Is}
#' }
#'
#' @import dplyr
#' @importFrom car logit
#' @importFrom sf st_drop_geometry
#' @importFrom stats complete.cases
#' @importFrom tidycensus get_acs
#' @importFrom tidyr pivot_longer separate
#' @importFrom utils stack
#' @export
#'
#' @seealso \code{\link[tidycensus]{get_acs}} for additional arguments for geographic extent selection (i.e., \code{state} and \code{county}).
#'
#' @examples
#' \dontrun{
#' # Wrapped in \dontrun{} because these examples require a Census API key.
#'
#' # Isolation of non-Hispanic Black vs. non-Hispanic white populations
#' ## of census tracts within Georgia, U.S.A., counties (2020)
#' bemanian_beyer(geo_large = "county", geo_small = "tract", state = "GA",
#' year = 2020, subgroup = "NHoLB", subgroup_ixn = "NHoLW")
#'
#' }
#'
bemanian_beyer <- function(geo_large = "county", geo_small = "tract", year = 2020, subgroup, subgroup_ixn, omit_NAs = TRUE, quiet = FALSE, ...) {
# Check arguments
match.arg(geo_large, choices = c("state", "county", "tract"))
match.arg(geo_small, choices = c("county", "tract", "block group"))
stopifnot(is.numeric(year), year >= 2009) # all variables available 2009 onward
match.arg(subgroup, several.ok = TRUE,
choices = c("NHoL", "NHoLW", "NHoLB", "NHoLAIAN", "NHoLA", "NHoLNHOPI",
"NHoLSOR", "NHoLTOMR", "NHoLTRiSOR", "NHoLTReSOR",
"HoL", "HoLW", "HoLB", "HoLAIAN", "HoLA", "HoLNHOPI",
"HoLSOR", "HoLTOMR", "HoLTRiSOR", "HoLTReSOR"))
match.arg(subgroup_ixn, several.ok = TRUE,
choices = c("NHoL", "NHoLW", "NHoLB", "NHoLAIAN", "NHoLA", "NHoLNHOPI",
"NHoLSOR", "NHoLTOMR", "NHoLTRiSOR", "NHoLTReSOR",
"HoL", "HoLW", "HoLB", "HoLAIAN", "HoLA", "HoLNHOPI",
"HoLSOR", "HoLTOMR", "HoLTRiSOR", "HoLTReSOR"))
# Select census variables
vars <- c(TotalPop = "B03002_001",
NHoL = "B03002_002",
NHoLW = "B03002_003",
NHoLB = "B03002_004",
NHoLAIAN = "B03002_005",
NHoLA = "B03002_006",
NHoLNHOPI = "B03002_007",
NHoLSOR = "B03002_008",
NHoLTOMR = "B03002_009",
NHoLTRiSOR = "B03002_010",
NHoLTReSOR = "B03002_011",
HoL = "B03002_012",
HoLW = "B03002_013",
HoLB = "B03002_014",
HoLAIAN = "B03002_015",
HoLA = "B03002_016",
HoLNHOPI = "B03002_017",
HoLSOR = "B03002_018",
HoLTOMR = "B03002_019",
HoLTRiSOR = "B03002_020",
HoLTReSOR = "B03002_021")
selected_vars <- vars[c("TotalPop", subgroup, subgroup_ixn)]
out_names <- names(selected_vars) # save for output
in_subgroup <- paste(subgroup, "E", sep = "")
in_subgroup_ixn <- paste(subgroup_ixn, "E", sep = "")
# Acquire LEx/Is variables and sf geometries
lexis_data <- suppressMessages(suppressWarnings(tidycensus::get_acs(geography = geo_small,
year = year,
output = "wide",
variables = selected_vars,
geometry = TRUE,
keep_geo_vars = TRUE, ...)))
# Format output
if (geo_small == "county") {
lexis_data <- sf::st_drop_geometry(lexis_data) %>%
tidyr::separate(NAME.y, into = c("county", "state"), sep = ",")
}
if (geo_small == "tract") {
lexis_data <- sf::st_drop_geometry(lexis_data) %>%
tidyr::separate(NAME.y, into = c("tract", "county", "state"), sep = ",") %>%
dplyr::mutate(tract = gsub("[^0-9\\.]", "", tract))
}
if (geo_small == "block group") {
lexis_data <- sf::st_drop_geometry(lexis_data) %>%
tidyr::separate(NAME.y, into = c("block.group", "tract", "county", "state"), sep = ",") %>%
dplyr::mutate(tract = gsub("[^0-9\\.]", "", tract),
block.group = gsub("[^0-9\\.]", "", block.group))
}
# Grouping IDs for LEx/Is computation
if (geo_large == "tract") {
lexis_data <- lexis_data %>%
dplyr::mutate(oid = paste(.$STATEFP, .$COUNTYFP, .$TRACTCE, sep = ""),
state = stringr::str_trim(state),
county = stringr::str_trim(county))
}
if (geo_large == "county") {
lexis_data <- lexis_data %>%
dplyr::mutate(oid = paste(.$STATEFP, .$COUNTYFP, sep = ""),
state = stringr::str_trim(state),
county = stringr::str_trim(county))
}
if (geo_large == "state") {
lexis_data <- lexis_data %>%
dplyr::mutate(oid = .$STATEFP,
state = stringr::str_trim(state))
}
# Count of racial/ethnic subgroup populations
## Count of racial/ethnic comparison subgroup population
if (length(in_subgroup) == 1) {
lexis_data <- lexis_data %>%
dplyr::mutate(subgroup = .[ , in_subgroup])
} else {
lexis_data <- lexis_data %>%
dplyr::mutate(subgroup = rowSums(.[ , in_subgroup]))
}
## Count of racial/ethnic interaction subgroup population
if (length(in_subgroup_ixn) == 1) {
lexis_data <- lexis_data %>%
dplyr::mutate(subgroup_ixn = .[ , in_subgroup_ixn])
} else {
lexis_data <- lexis_data %>%
dplyr::mutate(subgroup_ixn = rowSums(.[ , in_subgroup_ixn]))
}
# Compute LEx/Is
## From Bemanian & Beyer (2017) https://doi.org/10.1158/1055-9965.EPI-16-0926
## E^*_{m,n}(i) = log\left(\frac{p_{im} \times p_{in}}{1 - p_{im} \times p_{in}}\right) - log\left(\frac{P_{m} \times P_{n}}{1 - P_{m} \times P_{n}}\right)
## Where for smaller geographical unit i:
## p_{im} denotes the number of subgroup population m in smaller geographical unit i
## p_{in} denotes the number of subgroup population n in smaller geographical unit i
## P_{m} denotes the number of subgroup population m in larger geographical unit within which the smaller geographic unit i is located
## P_{n} denotes the number of subgroup population n in larger geographical unit within which the smaller geographic unit i is located
## If m \ne n, then computes the exposure of members of subgroup populations m and n
## If m = n, then computes the simple isolation experienced by members of subgroup population m
## Compute
LExIstmp <- lexis_data %>%
split(., f = list(lexis_data$oid)) %>%
lapply(., FUN = lexis_fun, omit_NAs = omit_NAs) %>%
do.call("rbind", .)
# Warning for missingness of census characteristics
missingYN <- lexis_data[ , c("TotalPopE", in_subgroup, in_subgroup_ixn)]
names(missingYN) <- out_names
missingYN <- missingYN %>%
tidyr::pivot_longer(cols = dplyr::everything(),
names_to = "variable",
values_to = "val") %>%
dplyr::group_by(variable) %>%
dplyr::summarise(total = dplyr::n(),
n_missing = sum(is.na(val)),
percent_missing = paste0(round(mean(is.na(val)) * 100, 2), " %"))
if (quiet == FALSE) {
# Warning for missing census data
if (sum(missingYN$n_missing) > 0) {
message("Warning: Missing census data")
}
}
# Format output
lexis <- merge(lexis_data, LExIstmp)
if (geo_small == "state") {
lexis <- lexis %>%
dplyr::select(GEOID, state, LExIs)
}
if (geo_small == "county") {
lexis <- lexis %>%
dplyr::select(GEOID, state, county, LExIs)
}
if (geo_small == "tract") {
lexis <- lexis %>%
dplyr::select(GEOID, state, county, tract, LExIs)
}
if (geo_small == "block group") {
lexis <- lexis %>%
dplyr::select(GEOID, state, county, tract, block.group, LExIs)
}
lexis <- lexis %>%
unique(.) %>%
.[.$GEOID != "NANA", ] %>%
dplyr::arrange(GEOID) %>%
dplyr::as_tibble()
lexis_data <- lexis_data %>%
dplyr::arrange(GEOID) %>%
dplyr::as_tibble()
out <- list(lexis = lexis,
lexis_data = lexis_data,
missing = missingYN)
return(out)
}
|
1bbde0a850c315ed971d28178f59f6118bee9f53
|
d96e1db65f62acb0a82aa2a51d4b06ead9d3f75f
|
/vdrs_sudors_2019.R
|
dfdc3a1503071f4555296fcedf725b2118510b3a
|
[] |
no_license
|
injuryepi/vdrs_misc
|
a7485ba2ac184720765b1e8150d7dd7495cf94d3
|
686a435ba890c9220fa98519ce7091eb7ccc893e
|
refs/heads/master
| 2020-05-21T00:25:20.599320
| 2019-06-17T23:42:06
| 2019-06-17T23:42:06
| 185,826,227
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 14,980
|
r
|
vdrs_sudors_2019.R
|
# R packages to load ------------------------------------------------------
# Store(objects())
# SOAR::Attach()
library(SOAR)
suppressMessages(library(tidyverse))
suppressMessages(library(Hmisc))
suppressMessages(library(lubridate))
library(injuryepi)
library(overdoser)
library(rvdrs)
# Reading the data --------------------------------------------------------
load(".R_Cache/prelim_dth@.RData")
fs::file_info(
file.path(prelim_dth, "death2019.csv")) %>%
pull(modification_time)
d2019pre <- read_csv(file.path(prelim_dth, "death2019.csv"))
load(".R_Cache/clean_var_names@.RData")
load(".R_Cache/sams_vars_sel@.RData")
load(".R_Cache/import_width@.RData")
d2019pre <- d2019pre %>%
set_names(clean_var_names)
d2019pre <- d2019pre %>%
mutate(certno = state_file_number)
# Find VDRS cases -------------------------------------------------------
nvdrs_regex_ <- "X[6-9]|Y[012]|Y3[0-4]|Y35[0-467]|Y87[012]|Y89[09]|W3[2-4]|U0[123]"
u_col <- grep("underly", names(d2019pre), value = F)
d2019pre <- d2019pre %>%
mutate(nvdrs_case = create_diag(., expr = nvdrs_regex_, colvec = u_col))
# Find Y86 ----------------------------------------------------------------
d2019pre <- d2019pre %>%
mutate(y86_case = create_diag(., expr = "Y86", colvec = u_col))
# Add intent and mechanism
d2019pre <- d2019pre %>%
add_ice_intent_mech(uid = certno, underlying_cause = underlying_cod_code)
# Add overdose ------------------------------------------------------------
## functions to capture unintentional
od_ui_fatal_opioid <- function(data, underly_col, mult_col) {
data %>%
mutate(ui_drug = od_create_diag(.,
expr = "X4[0-4]",
colvec = underly_col
)) %>%
mutate(ui_opioid = od_create_cond_diag(.,
expr = "T40[0-46]",
colvec = mult_col,
cond.var = ui_drug
)) %>%
select(-ui_drug)
}
od_und_fatal_opioid <- function(data, underly_col, mult_col) {
data %>%
mutate(und_drug = od_create_diag(.,
expr = "Y1[0-4]",
colvec = underly_col
)) %>%
mutate(und_intent_opioid = od_create_cond_diag(.,
expr = "T40[0-46]",
colvec = mult_col,
cond.var = und_drug
)) %>%
select(-und_drug)
}
####
col_mult <- grep("underlying_cod_code$|record_axis_code", names(d2019pre), value = F)
col_under <- grep("underlying_cod_code$", names(d2019pre), value = F)
d2019pre <- d2019pre %>%
od_fatal_drug_list(underly_col = col_under, mult_col = col_mult)
d2019pre <- d2019pre %>%
od_ui_fatal_opioid(underly_col = col_under, mult_col = col_mult)
# pregnancy "0" doesn't exist
d2019pre <- d2019pre %>%
mutate(pregnancy = ifelse(pregnancy == 0, 9, pregnancy))
# Cases to consider SUDORS or NVDRS -------------------------------------
# include all counties
d2019pre <- d2019pre %>%
mutate(esoos_case = ifelse(ui_opioid == 1, 1, 0))
# Cases to import
d2019pre_vdod <- d2019pre %>%
filter(nvdrs_case == 1 | esoos_case == 1)
# Adding new variables ----------------------------------------------------
d2019pre_vdod <- d2019pre_vdod %>% mutate(
dob = lubridate::mdy(date_of_birth),
ForceNewRecord = rep("Y", nrow(.)),
OverwriteConflicts = rep(" ", nrow(.)),
IncidentYear = rep(" ", nrow(.)),
IncidentNumber = rep(" ", nrow(.)),
VictimNumber = rep(" ", nrow(.)),
DCNumberLastFour = stringr::str_sub(string = certno, -4),
CMENumberLastFour = stringr::str_sub(
gsub("\\W|_", "", me_coroner_case_num, perl = T),
-4
),
LastNameFirstInitial = stringr::str_sub(decedent_last_name, , 1),
BirthDayofMonth = lubridate::day(dob),
RACE_MVR = rep(" ", nrow(.)),
OCCUPC = vector(mode = "numeric", length = nrow(.)),
INDUSTC = vector(mode = "numeric", length = nrow(.)),
TOI_HR = str_pad(
gsub("\\W", "", time_of_injury), # to remove ":" between hr and min
width = 6, side = "left", pad = 0
),
INACT = substr(underlying_cod_code, 5, 5),
INJPL = rep(" ", nrow(.)),
INJPL2 = rep(" ", nrow(.)),
OLDEDUC = rep(" ", nrow(.)),
PRNCDDT_MO = rep(99, nrow(.)),
PRNCDDT_DY = rep(99, nrow(.)),
PRNCDDT_YR = rep(9999, nrow(.)),
DC_CnsBlk = rep(" ", nrow(.)),
DC_CnsTrt = rep(" ", nrow(.)),
Dc_surviv = rep(999, nrow(.)),
dc_Sunit = rep(9, nrow(.)),
Empty_Indust = rep(" ", nrow(.)),
DeathMannerAbstractor = rep(" ", nrow(.))
)
d2019pre_vdod <- d2019pre_vdod %>% set_names(tolower)
d2019pre_vdod <- d2019pre_vdod %>% mutate(
import_id = paste0(dcnumberlastfour,
lastnamefirstinitial, stringr::str_pad(birthdayofmonth, w = 2, side = "left", pad = "0")))
d2019pre_vdod <- d2019pre_vdod %>%
mutate(
death_state_code = death_state_nchs_code,
birthplace_country_code = birthplace_country_fips_code,
residence_state_code = residence_state_nchs_code,
residence_country_code = residence_country_fips_code,
marital = marital_status,
injury_date_month = date_of_injury_month,
injury_date_day = date_of_injury_day,
injury_date_year = date_of_injury_year,
residence_zip = residence_zip_code
)
# To review for the nvdrs cases
d2019pre_vdod <- d2019pre_vdod %>%
mutate(birthplace_state_nchs_code =
str_pad(birthplace_state_nchs_code,
width = 2, side = "left", pad = 0) )
d2019pre_vdod <- d2019pre_vdod %>%
left_join(select(state_codes, wa_codes, target) %>%
mutate(wa_codes = as.character(wa_codes)) %>%
rename(birthplace_state_nchs_code = wa_codes)) %>%
mutate(birthplace_state_nchs_code = target) %>%
select(-target)
# residence_county_wa_code ResidenceCounty COUNTYC
d2019pre_vdod <- d2019pre_vdod %>%
left_join(wa_counties_fips %>%
rename(target = county_fips) %>%
select(county, target) %>%
mutate(county = toupper(county)) %>%
rename(residence_county = county)) %>%
mutate(residence_county = target) %>%
select(-target)
d2019pre_vdod <- d2019pre_vdod %>%
mutate(residence_county =
str_pad(residence_county,
width = 3, side = "left", pad = 0) )
# injury_county_wa_code County INJCOUNTY
d2019pre_vdod <- d2019pre_vdod %>%
left_join(wa_counties_fips %>%
rename(target = county_fips) %>%
mutate(county = toupper(county)) %>%
rename(injury_county = county)) %>%
mutate(injury_county = target) %>%
select(-target)
d2019pre_vdod <- d2019pre_vdod %>%
mutate(injury_county =
str_pad(injury_county,
width = 3, side = "left", pad = 0) )
# describe(d2019pre_vdod$injury_county)
# residence_state_code ResidenceState STATEC
# describe(d2019pre_vdod$residence_state_code)
d2019pre_vdod <- d2019pre_vdod %>%
mutate(residence_state_code =
str_pad(residence_state_code,
width = 2, side = "left", pad = 0) )
d2019pre_vdod <- d2019pre_vdod %>%
left_join(select(state_codes, wa_codes, target) %>%
mutate(wa_codes = as.character(wa_codes)) %>%
rename(residence_state_code = wa_codes)) %>%
mutate(residence_state_code = target) %>%
select(-target)
table(d2019pre_vdod$residence_state_code)
# residence_city_fips_code ResidenceCity CITYC City of residence
d2019pre_vdod <- d2019pre_vdod %>%
mutate(residence_city_fips_code =
str_pad(residence_city_fips_code,
width = 5, side = "left", pad = 0) )
#death_state_nchs_code DeathState DSTATE
d2019pre_vdod <- d2019pre_vdod %>%
mutate(death_state_code =
str_pad(death_state_code,
width = 2, side = "left", pad = 0) )
d2019pre_vdod <- d2019pre_vdod %>%
left_join(select(state_codes, wa_codes, target) %>%
mutate(wa_codes = as.character(wa_codes)) %>%
rename(death_state_code = wa_codes)) %>%
mutate(death_state_code = target) %>%
select(-target)
# marital MaritalStatus MARITAL Marital status
d2019pre_vdod <- d2019pre_vdod %>%
mutate(marital = ifelse(marital == "P", "M", marital))
# place_of_death_type DeathPlace DPLACE Place of Death
d2019pre_vdod <- d2019pre_vdod %>%
mutate(place_of_death_type = as.character(place_of_death_type)) %>%
left_join(dplace_conv %>%
rename(place_of_death_type = dplace)) %>%
mutate(place_of_death_type = tgt) %>%
select(-tgt)
#residence_zip ResidenceZip RESZIP Zip code of residence
d2019pre_vdod <- d2019pre_vdod %>%
mutate(residence_zip = substr(residence_zip, 1, 5))
#injury_zip_code InjuryZip NA ZIP Code where injury occurred
d2019pre_vdod <- d2019pre_vdod %>%
mutate(injury_zip_code = substr(injury_zip_code, 1, 5))
# injury_stateInjuryState INJSTATE State or territory where injury occurred
d2019pre_vdod <- d2019pre_vdod %>%
mutate(injury_state =
str_pad(injury_state,
width = 2, side = "left", pad = 0) )
d2019pre_vdod <- d2019pre_vdod %>%
left_join(select(state_codes, state, target) %>%
rename(injury_state = state)) %>%
mutate(injury_state = target) %>%
select(-target)
# armed_forces Military DC_Vetran Current or former military personnel
d2019pre_vdod <- d2019pre_vdod %>%
left_join(vet_conv) %>%
mutate(armed_forces = tgt) %>%
select(-tgt)
#injury_city InjuryCity DC_InjPlace City where injury occurred
injcity <- wa_cities_codes %>% select(city, placefp) %>%
mutate(city = str_trim(toupper(city))) %>%
rename(injury_city = city,
tgt = placefp)
d2019pre_vdod <- d2019pre_vdod %>%
mutate(injury_city = str_trim(toupper(injury_city))) %>%
left_join(injcity)
d2019pre_vdod <- d2019pre_vdod %>%
mutate(injury_city = tgt) %>%
select(-tgt)
#
d2019pre_vdod <- d2019pre_vdod %>%
select(-county_wa_code)
d2019pre_vdod <- d2019pre_vdod[vars_to_import_x]
# identical(names(d2019pre_vdod), vars_to_import_x)
vars_to_import_x2 <- c("certno", "import_id", sams_vars_sel$nchs_vars)
names(d2019pre_vdod) <- vars_to_import_x2
# Formatting ICD-10 codes -------------------------------------------------
format_icd10 <- compose(
function(x) paste0(" ", stringi::stri_pad_right(x, w = 5), " "),
function(x) gsub("\\.$", "", x),
function(x) gsub("(?<=^(.{3}))", "\\.", x, perl = TRUE),
function(x) ifelse(is.na(x), " ", x)
)
var_causes <- grep("^ACME|^EAC",
names(d2019pre_vdod), value = T, ignore.case = T)
d2019pre_vdod[var_causes] <- sapply(d2019pre_vdod[var_causes], format_icd10)
# Race and Ethnicity ------------------------------------------------------
# race
var_race <- grep("RACE",
names(d2019pre_vdod), value = T, ignore.case = T)
d2019pre_vdod[var_race] <- sapply(d2019pre_vdod[var_race], function(x) gsub("U", "", x ))
# ethnicity
var_eth <- grep("^dethnic", names(d2019pre_vdod), value = T, ignore.case = T)
f_eth <- function(x) gsub("U", "N", x, perl = T)
d2019pre_vdod[var_eth] <- d2019pre_vdod[var_eth] %>%
map_df(f_eth)
# Other Formatting --------------------------------------------------------
d2019pre_vdod <- d2019pre_vdod %>%
mutate(toi_hr = gsub("\\D+|NA", "", toi_hr),
toi_hr = str_pad(toi_hr,width = 6, side = "left", pad = 0),
toi_hr = if_else(nchar(toi_hr) < 6, " ", toi_hr),
toi_hr = str_sub(toi_hr, ,4),
dc_censst = str_trim(dc_censst),
dc_censst = stringr::str_sub(dc_censst, 1, 7),
dc_censst = as.character(ifelse(is.na(dc_censst), "9999.99", dc_censst)),
dc_censst = str_pad(dc_censst,width = 7, side = "left", pad = 0),
dc_cnstrt = rep("9999.99", nrow(.)),
dc_cnsblk = rep(" ", nrow(.)),
dc_pdthtx = substr(dc_pdthtx , 1, 30),
dc_pdthtx = ifelse(dplace != 7, " ", dc_pdthtx),
# country of birth
dc_bthtxt = ifelse(bplace_cnt != "88", " ", dc_bthtxt),
# country of residence
dc_countr = ifelse(countryc != "88", " ", dc_countr))
# replace missing with space
d2019pre_vdod[] <- sapply(d2019pre_vdod[], function(x) ifelse(is.na(x), " ", x))
# replace country ZZ with " "
d2019pre_vdod[] <- sapply(d2019pre_vdod[],
function(x) gsub("^ZZ$", " ", x, perl = T))
# Selected Corrections to minimize import errors --------------------------
ctc <- c("88","99","US", "RQ","VQ","GQ","CA","CU","MX","AQ","CQ")
d2019pre_vdod <- d2019pre_vdod %>%
mutate(bplace_cnt = ifelse(bplace_cnt %in% ctc, bplace_cnt, ""))
# replace "U" in toi_u
d2019pre_vdod <- d2019pre_vdod %>%
mutate(toi_u = ifelse(toi_u == "U", "", toi_u))
# not city fips
notc <- c('00000', '09425', '58845', '09425', '99033', '99041', '17250')
d2019pre_vdod <- d2019pre_vdod %>%
mutate(cityc = ifelse(cityc %in% notc, "99999", cityc),
cityc = ifelse(statec == " ", " ", cityc),
countyc = ifelse(statec == "99", " ", countyc),
dc_censst = rep("9999.99", nrow(.)),
dc_censbl = rep(" ", nrow(.)),
reszip = gsub("\\D+", "", reszip),
reszip = ifelse(nchar(reszip) < 5, " ", reszip),
injuryzip = ifelse(nchar(injuryzip) < 5, " ", injuryzip),
doi_yr = ifelse(doi_yr == " ", "9999", doi_yr)
)
# Check victims already in SAMS to avoid duplications ---------------------
victim <- readr::read_csv("from_sams/temp/Victim.zip")
victim <- victim %>%
filter(IncidentYear == 2019)
victim <- victim %>% mutate(
import_id = paste0(DCNumberLastFour, LastNameFirstInitial, stringr::str_pad(BirthDayOfMonth, w = 2, side = "left", pad = "0")))
# NVDRS Cases -------------------------------------------------------------
d2019pre_nvdrs <- d2019pre %>%
filter(nvdrs_case == 1)
d2019pre_nvdrs_sams <- d2019pre_vdod %>%
filter(injstate == "53",
certno %in% d2019pre_nvdrs$certno,
!(import_id %in% victim$import_id ))
# SUDORS cases ----------------------------------------------------------
d2019pre_sudors <- d2019pre %>%
filter(esoos_case == 1)
sudors_13counties_ <- "Kitsap|Thurston|Spokane|Skagit|Grays Harbor|King|Snohomish|Yakima|Pierce|Island|Clark|Clallam|Whatcom"
counties13 <- wa_counties_fips %>%
filter(grepl(sudors_13counties_, county)) %>%
pull(county_fips) %>% unlist
d2019pre_sudors_sams <- d2019pre_vdod %>%
filter(
injstate == "53",
certno %in% d2019pre_sudors$certno,
!(import_id %in% victim$import_id),
str_pad(injcounty, 3, "left", 0) %in% counties13
)
# Save NVDRS file to load into SAMS ---------------------------------------
gdata::write.fwf(d2019pre_nvdrs_sams %>%
select(-certno, -import_id) %>%
as.data.frame(),
glue::glue("to_sams/d2019pre_nvdrs_{today()}.txt"), width = import_width, colnames = F, sep="", na = " ")
# Save SUDORS file to load into SAMS --------------------------------------
gdata::write.fwf(d2019pre_sudors_sams %>%
select(-certno, -import_id) %>%
as.data.frame(),
glue::glue("to_sams/d2019pre_sudors_{today()}.txt"), width = import_width, colnames = F, sep="", na = " ")
# Copies to Data folder ---------------------------------------------------
write_rds(d2019pre_nvdrs_sams, glue::glue("data/pre_sams/d2019pre_nvdrs_{today()}.rds"))
write_rds(d2019pre_nvdrs, "Data/pre_sams/d2019pre_nvdrs.rds", compress = "xz")
write_rds(d2019pre_sudors_sams, glue::glue("data/pre_sams/d2019pre_sudors_{today()}.rds"))
write_rds(d2019pre_sudors, "Data/pre_sams/d2019pre_sudors.rds", compress = "xz")
|
5bd3da3ee1eb5c513f27135463ee65269ea5209a
|
9c9522758187c1df573b50f974f63657d982b9b3
|
/Scripts/Processing/Testing out tidycensus.R
|
045acabbe4a4e796fa8043c6e6b6c64af1a00ac1
|
[] |
no_license
|
ajkirkpatrick/Low-income-energy
|
97040000f9ce47500c45b111d81df2939136e691
|
205f92c30b6796a5ececc3ea8268dc22e59ac2f6
|
refs/heads/master
| 2023-05-02T16:56:17.209269
| 2021-06-03T12:36:54
| 2021-06-03T12:36:54
| 156,229,194
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,657
|
r
|
Testing out tidycensus.R
|
###### Explore tidycensus #####
## At the moment, must install from gitHub
library(devtools)
dev_mode(TRUE)
# install_github("hadley/ggplot2")
library("ggplot2", lib.loc="~/R-dev")
# dev_mode(FALSE)
require(tidyverse)
require(sf)
library(viridis)
library(rvest)
require(tidycensus)
options(tigris_use_cache = TRUE)
if(grepl('EI',Sys.info()["nodename"] ))
{BASE = file.path('Z:/user/') } else {
BASE = file.path('Volumes/ei-edl01/user/')
}
WORK.OUT = file.path(BASE,'ajk41/Solar Incentive NBER/Data/OUTPUT',Sys.Date())
dir.create(WORK.OUT, recursive=T)
GIS.files = file.path(BASE,'ajk41/GIS files')
mykey = "ff90006e6d5f1960ef3bbb37579766bb563b1121"
census_api_key(mykey)
###########
## https://walkerke.github.io/tidycensus/articles/basic-usage.html
## http://strimas.com/r/tidy-sf/
# get_decennial
# get_acs
test = get_decennial(geography = c('state'),variables = "H043A001", year = c(1990)) #--> single year at a time
test %>% #--> huh. tidy frames can go straight into ggplots
ggplot(aes(x = value, y = reorder(NAME, value))) +
geom_point()
## ACS with geometry
v15 <- load_variables(2015, "acs5", cache = TRUE)
#--> note: no year??
test = get_acs(state = "CA", county='Orange', geography = "tract", variables='B07013_001', geometry=F) # drop the 'E' off the end - tidycensus gets E and MOE
geotest = get_acs(state = "CA", county='Orange', geography = "tract", variables='B07013_001', geometry=T) # drop the 'E' off the end - tidycensus gets E and MOE
geotest = get_acs(state = "CA", county='Orange', geography = "tract", variables='B07013_001', geometry=T, cb=F) # cb=F gives you TIGERlines instead of census cartographic. No idea difference.
geotest %>%
ggplot(aes(fill = estimate, color = estimate)) + #--> fill is fill, color is border (make same for no-border)
geom_sf() +
coord_sf(crs = 26911) +
scale_fill_viridis(option = "magma") +
scale_color_viridis(option = "magma")
gt.2015 = get_acs(state='CA', county='Orange', geography = 'block group', variables = 'B25003_001', geometry=T)
# congressional district, etc. don't seem to work. (see ?get_acs "geometry" for what works: state, county, tract, block group, block and zcta (lowercase))
# Geographies have to line-up with the ORDER of the >'s at http://api.census.gov/data/2015/acs5/geography.html
# That is, if it says "state>combined statistical area", then I can only supply a state, not a county.
# Though the website says "block group" requires state and county and tract; only needs state and county. So...not perfect?
# get_acs() and get_decennial() tell you a bit more.
# use endyear=201X for acs year
gt.2015 = get_acs(state='CA', county='Orange', geography = 'block group', variables = 'B25003_001', geometry=T)
gt.2012 = get_acs(state='CA', county='Orange', endyear=2013, geography = 'block group', variables = 'B25003_001', geometry=T)
range(gt.2012$estimate)
range(gt.2015$estimate) #--> they're different. endyear works!
gt.2015 %>%
ggplot(aes(fill = estimate, color = estimate)) + #--> fill is fill, color is border (make same for no-border)
geom_sf() +
# coord_sf(crs = 26911) +
scale_fill_viridis(option = "magma") +
scale_color_viridis(option = "magma")
## let's try multiple subvariables
v15 <- load_variables(2015, "acs5", cache = TRUE)
View(v15) #-> filter on "tenure"
## in B2500X (see v15), the total households is in B25009_001; the total owner-occupied is B25009_002; and the breakdown by # of people in household is B25009_003:B25009_9.
### so let's test to see if they sum correctly within B25009_002 (owner-occupied) using summary_var
households = get_acs(state='CA', county='Orange', endyear=2015, geography = 'block group', variables = 'B25009_002', summary_var = 'B25009_001', geometry=T)
usevars = paste0('B25009_00', 3:9)
households = get_acs(state='CA', county='Orange', endyear=2015, geography = 'block group', variables = usevars, summary_var = 'B25009_002', geometry=T)
xx = households %>% group_by(NAME) %>% summarize(summaryvar = summary_est[1], actual_sum = sum(estimate)) # Yup!
## OK - so it takes some manual thinking, but summary_var is perfect for the "denominator" in shares!
############
## Now, how do we keep the full FIPS? I want to be able to ID a state>county>census block group
### It may be that NAME is unique, or I can pull out Census Tract with a string exp.
### Or, use 'keep_geo_vars'
households = get_acs(state='CA', county='Orange', endyear=2015, geography = 'block group',
variables = usevars, summary_var = 'B25009_002',
geometry=T, keep_geo_vars = T)
head(households)
## Awesome - STATEFP, COUNTYFP, TRACTCE, and BLKGRPCE form the full FIPS.
## AFFGEOID starts with the geography hierarchy (e.g. block gropu is '150'), then four 0's (not sure), then US, then the concatenated STATEFP(2)/COUNTYFP(3)/TRACTCE(6)/BLKGRPCE(1)
## Note that they're all characters!
#### Plotting with these multiple variables
## either subset the data using variable=='B25009_00X'
## or facet over ~variable:
households %>%
filter(variable=='B25009_005') %>%
ggplot(aes(fill = estimate, color = estimate)) + #--> fill is fill, color is border (make same for no-border)
geom_sf() +
# coord_sf(crs = 26911) +
scale_fill_viridis(option = "magma") +
scale_color_viridis(option = "magma")
households %>%
ggplot(aes(fill = estimate, color = estimate)) + #--> fill is fill, color is border (make same for no-border)
geom_sf() +
facet_wrap(~variable) +
# coord_sf(crs = 26911) +
scale_fill_viridis(option = "inferno") +
scale_color_viridis(option = "inferno")
|
2233d71de71a630ac96c571241d58a4fe50d1129
|
1b3cfee1a5f45617b075e590e6facac87dc489ce
|
/man/land.surface.Rd
|
d57ace097a7c969a8f6693a5d488bc517c35c624
|
[
"CC0-1.0",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
jfisher-usgs/wrv
|
30b5de6a1ac22dafff21eb762b7d4a5f894e2e7a
|
32249a78f5dd9ad978da08f8b4bd2e28c53cd0d5
|
refs/heads/main
| 2022-03-09T20:02:46.180583
| 2020-06-30T17:48:16
| 2020-06-30T17:48:16
| 18,356,349
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,349
|
rd
|
land.surface.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{land.surface}
\alias{land.surface}
\title{Topography of Land Surface}
\format{
An object of SpatialGridDataFrame class.
Each cell on the surface grid represents an elevation in meters above the
North American Vertical Datum of 1988 (NAVD 88).
Geographic coordinates are in units of meters, in conformance with the
North American Datum of 1983 (NAD 83), and placed in the
Idaho Transverse Mercator projection (\href{https://www.idwr.idaho.gov/GIS/IDTM/}{IDTM}).
The spatial grid is composed of 565 rows and 429 columns,
and has cell sizes that are constant at 100 meters by 100 meters.
}
\source{
The National Map (\href{https://nationalmap.gov/elevation.html}{TNM})
1/3-arc-second raster (Gesch, 2007; Gesch and others, 2002),
accessed on December 1, 2015.
This dataset can be downloaded in a Esri ArcGRID format using the
\href{https://viewer.nationalmap.gov/viewer/}{The National Map Viewer}.
Elevation datasets are distributed in geographic coordinates in units of decimal degrees,
and in conformance with the NAD 83.
Elevation values are in meters above the NAVD 88.
The west, east, south, and north bounding coordinates for this dataset are
-115, -114, 43, and 44 decimal degrees, respectively.
Post-processing includes:
(1) project the values of the elevation dataset into the \code{\link{alluvium.thickness}}
spatial grid using bilinear interpolation, and
(2) set values in cells where the elevation of the alluvium bottom is missing to NA.
}
\usage{
land.surface
}
\description{
The Wood River Valley (WRV) is a geologic feature located in south-central Idaho.
This dataset gives the topography of the land surface in the WRV and vicinity.
}
\examples{
raster::image(land.surface)
summary(land.surface)
}
\references{
Gesch, D.B., 2007, The National Elevation Dataset, in Maune, D., ed.,
Digital Elevation Model Technologies and Applications: The DEM Users Manual,
2nd Edition: Bethesda, Maryland, American Society for Photogrammetry and Remote Sensing,
p. 99-118.
Gesch, D., Oimoen, M., Greenlee, S., Nelson, C., Steuck, M., and Tyler, D., 2002,
The National Elevation Dataset: Photogrammetric Engineering and Remote Sensing,
v. 68, no. 1, p. 5-11.
}
\keyword{datasets}
|
030574a1d7f152a5fa1cbb837c39f7f1f5306dbf
|
7d036925bfec97d40e0763440fcb44bb30d068ae
|
/preprocessing_functions.R
|
b8b7e040dfa083c3001678cdd90d855a87218822
|
[] |
no_license
|
dpjmullins/CourseraDataScienceCapstone
|
8466651cf34611efb403e32840aa57dcbc404385
|
e0541dcc110c1ad63199c555ec69280cb90a8730
|
refs/heads/master
| 2022-02-12T07:08:51.245704
| 2022-01-27T10:14:00
| 2022-01-27T10:14:00
| 205,106,305
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,063
|
r
|
preprocessing_functions.R
|
## Functions
library(readr)
library(tm)
badwords <- read_lines(file = 'unsavoury_language.txt')
word_contractions <- read_csv(file = 'word_contractions.txt')
### Functions to chance specific patterns to blanks or spaces
toSpace <- content_transformer(function (x, pattern) gsub(pattern, " ", x))
toNothing <- content_transformer(function (x, pattern) gsub(pattern, "", x))
# Remove email id
RemoveEmail <- content_transformer( function(x) {
str_replace_all(x,"[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\\.[a-zA-Z0-9-.]+", "")
})
## Modify word contractions to be one word for more predictive power
pattern_to_pattern <- content_transformer(function (x, pattern1, pattern2) gsub(pattern1, pattern2, x))
### Function to perform numerous transformations over a text corpus
text_transformer <- function(text_corpus) {
## Remove emails
text_corpus <- tm_map(text_corpus, RemoveEmail)
## Convert symbols to whitespace
text_corpus <- tm_map(text_corpus, toNothing, "[[:punct:]]")
## Convert to lower case
text_corpus <- tm_map(text_corpus, content_transformer(tolower))
## Remove numbers
text_corpus <- tm_map(text_corpus, removeNumbers)
### Remove common English stopwords
#text_corpus <- tm_map(text_corpus, removeWords, stopwords("english"))
## Remove words with 3 or more repeated letters
text_corpus <- tm_map(text_corpus, toSpace, "(.)\\1{2,}")
## fix word contractions
for (i in 1:nrow(word_contractions)) {
text_corpus <- tm_map(text_corpus, pattern_to_pattern, word_contractions[i,1], word_contractions[i,2])
}
### Remove any remaining single letter words
#text_corpus <- tm_map(text_corpus, toSpace, "\\b(.)\\b")
## Remove additional stopwords and badwords
text_corpus <- tm_map(text_corpus, removeWords, c("rt", badwords))
## Remove long words that may be invalid - >15 letters chosen as cut-off here
text_corpus <- tm_map(text_corpus, toSpace, "[[:alpha:]]{15,}")
## Remove extra whitespace
text_corpus <- tm_map(text_corpus, stripWhitespace)
text_corpus
}
|
bda666f42d334c01c28f097c46ce65e9d3cbf132
|
422821c4d4c0d89c9ea90ac66ae7750699dcec2c
|
/R/compute_field_sun_angle.R
|
be6ab471d749a66bbc5f3a811b32ea1dfb1420b0
|
[] |
no_license
|
han-tun/fvcom.tbx
|
64cb71f2f92b5e86df469f4e87162d9c9f306870
|
e371b060f1438b56b56b4bfa46a5080fd9672d98
|
refs/heads/master
| 2022-12-25T05:46:39.322115
| 2020-10-14T14:31:08
| 2020-10-14T14:31:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,147
|
r
|
compute_field_sun_angle.R
|
#' @title Compute sun angle across an FVCOM mesh
#' @description This function computes the sun angle across a spatial mesh on the dates/times specified. To calculate sun angle, the user must specify a dataframe containing node IDs and associated coordinates across which sun angle is evaluated. (Sun angle, a scalar variable, is computed at nodes for consistency with other FVCOM outputs.) Next, the user must specify the date and hours on that date for which to calculate sun angle. Finally, the user needs to specify whether or not sun angle should be returns in degrees or radians, the directory to save files (not required) and whether or not to print messages/progress to the console.
#'
#' @param nodexy A dataframe containing node ids and decimal coordinates (in latitude/longitude). The dataframe should have three columns: 'node_id', 'x' and 'y'. See \code{\link[fvcom.tbx]{dat_nodexy}} for an example.
#' @param date A vector of dates (see \code{\link[base]{Date}}) for which sun angle is to be calculated.
#' @param tz A character vector specifying the time zone. The default is \code{"UTC"}.
#' @param sink_file (optional) A character specifying the name of sun angle fields, if saved as files (see \code{dir2save}, below). If \code{dir2save = TRUE} and \code{sink_file = NULL}, \code{\link[fvcom.tbx]{date_name}} is used to define file names from inputted dates.
#' @param hours A integer vector specifying the hours at which you want to calculate sun angle.
#' @param units A character input defining the units (\code{"degrees"} or \code{"radians"} of sun angle.
#' @param dir2save (optional) A string specifying the directory in which to save sun angle files.
#' @param verbose A logical input specifying whether or not messages and a progress bar should be printed to the console. The default is TRUE.
#'
#' @return For each date, the function creates a matrix of hours x mesh cells containing sun angles. Matrices are either returned as a list or saved as .rds files, with one file per day (if \code{dir2save = TRUE}).
#'
#' @examples
#'
#' #### (1) Compute sun angle across a sample of WeStCOMS nodes
#' sun_angle <-
#' compute_field_sun_angle(
#' nodexy = dat_nodexy,
#' date = as.character("2016-01-01"),
#' tz = "UTC",
#' hours = 0:23,
#' units = "degrees",
#' dir2save = NULL,
#' verbose = TRUE
#' )
#'
#' @author Edward Lavender
#' @source This function is a wrapper for \code{\link[suncalc]{getSunlightPosition}} function.
#' @export
################################################
################################################
#### compute_field_sun_angle
compute_field_sun_angle <-
function(nodexy,
date,
tz = "UTC",
hours = 0:23,
units = "degrees",
sink_file = NULL,
dir2save = NULL,
verbose = TRUE
){
#### Checks
if(!is.null(dir2save)) dir2save <- check_dir(input = dir2save, check_slash = TRUE)
#### Define dataframe to calculate sun_angle:
if(verbose) cat("fvcom.tbx::compute_field_sun_angle() called...\n")
nodexy$index <- 1:nrow(nodexy)
date <- as.POSIXct(date, tz = tz)
secs <- hours*60*60
timestamp <- lapply(date, function(day) day + secs)
timestamp <- sort(do.call(c, timestamp))
lubridate::tz(timestamp) <- tz
dat <- expand.grid(timestamp, nodexy$node_id)
colnames(dat) <- c("date", "mesh_ID")
dat$nodexy_index <- nodexy$index[match(dat$mesh_ID, nodexy$node_id)]
dat$lat <- nodexy$y[match(dat$mesh_ID, nodexy$node_id)]
dat$lon <- nodexy$x[match(dat$mesh_ID, nodexy$node_id)]
dat$hour <- lubridate::hour(dat$date)
dat <- dat[order(dat$date, dat$mesh_ID), ]
#### Compute sun angle
if(verbose) cat("Computing sun angle...\n")
dat$altitude <- suncalc::getSunlightPosition(data = dat[, c("date", "lon", "lat")], keep = c("altitude"))$altitude
check_value(arg = "units", input = units, supp = c("degrees", "radians"), warn = TRUE, default = "degrees")
if(units == "degrees") dat$altitude <- dat$altitude * (180/pi)
#### Define matrices
if(verbose) cat("Defining sun angle arrays...\n")
dat_by_date <- split(dat, f = as.Date(dat$date))
nrw <- length(hours)
ncl <- max(nodexy$index)
sun_angle_mat_ls <- lapply(dat_by_date, function(d){
mat <- matrix(d$altitude, nrow = nrw, ncol = ncl, byrow = TRUE)
colnames(mat) <- nodexy$node_id
rownames(mat) <- hours
return(mat)
})
#### Save file for specified date in appropriate location, if specified:
# If the user has supplied a dir2save...
if(!is.null(dir2save)){
if(verbose) cat("Saving sun angle arrays... \n")
# Define file names, if not provided
if(is.null(sink_file)) sink_file <- date_name(date, define = "date_name")
# Save each file
out <- mapply(sun_angle_mat_ls, sink_file, FUN = function(sun_angle_mat, file){
saveRDS(sun_angle_mat, paste0(dir2save, file, ".rds"))
})
} else{
return(sun_angle_mat_ls)
}
}
#### End of code.
################################################
################################################
|
bfc6391d83ef9b0d4343a62007661e15de6b9a09
|
3a7dc9233fbf64759b5234a18f45a66f8e1cfd96
|
/man/add_census_slot.Rd
|
2c3d239ac48f5169887cbec4f07904cd26e4a34f
|
[
"MIT"
] |
permissive
|
whtns/seuratTools
|
2b3328ce9cf7f3dcdddd03786a8baf87d8e2d646
|
39b6cf4e73f9fa8a3f1a85330cc0bcbf9c302297
|
refs/heads/master
| 2023-06-23T04:33:46.584459
| 2023-06-22T22:50:58
| 2023-06-22T22:50:58
| 179,151,711
| 9
| 0
|
NOASSERTION
| 2021-08-17T20:20:43
| 2019-04-02T20:14:56
|
R
|
UTF-8
|
R
| false
| true
| 324
|
rd
|
add_census_slot.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/monocle2.R
\name{add_census_slot}
\alias{add_census_slot}
\title{add census assay to a seurat object}
\usage{
add_census_slot(seu, assay = "gene", slot = "counts")
}
\arguments{
\item{slot}{}
}
\description{
add census assay to a seurat object
}
|
bef94af656f3d665b9ca2363920324fa02f492c7
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/oro.nifti/examples/audit_trail.Rd.R
|
e69166b76d3aeab8785b99b763577907fd5a2f64
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,685
|
r
|
audit_trail.Rd.R
|
library(oro.nifti)
### Name: Audit Trails
### Title: Facilitate the Creation and Modification of Audit Trails
### Aliases: 'Audit Trails' oro.nifti.info enableAuditTrail newAuditTrail
### niftiExtensionToAuditTrail niftiAuditTrailToExtension
### niftiAuditTrailSystemNode niftiAuditTrailSystemNodeEvent
### niftiAuditTrailCreated niftiAuditTrailEvent getLastCallWithName
### ** Examples
## A good example of the use of these functions is shown by this
## wrapper function which takes a function fun(nim, ...) returning
## lists of arrays which are nifti-ized using as(...)
options("niftiAuditTrail"=TRUE)
enableAuditTrail()
wrapper <- function(functionToWrap, nameOfCallingFunction, nim, ...) {
if (!is(nim, "nifti"))
nim <- as(nim, "nifti")
if (is(nim, "niftiAuditTrail")) {
## This will force as(...) to set the call which created the
## results to the calling function's call rather than
## as(result, nifti) as it would otherwise do
slot(nim, "trail") <- niftiAuditTrailEvent(slot(nim, "trail"), "processing",
nameOfCallingFunction)
}
result <- functionToWrap(nim, ...)
as(result, "nifti") <- nim
return(result)
}
## An example of how wrapper is used follows:
functionToWrap <- function(ignored, x, y) {
return (array(1, dim=c(x,y)))
}
## The nifti-ized form
niftiizedForm <- function(nim,...) {
return(wrapper(functionToWrap, "niftiizedForm", nim, ...))
}
## Not run:
##D if (isTRUE(getOption("niftiAuditTrail"))) {
##D print(slot(as.nifti(functionToWrap(nifti(), 4, 4), nifti()), "trail"))
##D print(slot(niftiizedForm(nifti(), 4, 4), "trail"))
##D }
## End(Not run)
|
9f5afd583f1290881ba589ae5d126359506106c7
|
e0a2289118030bf9600d5684bdf648442cc5f208
|
/2X/2.14/ReactomePA/R/viewPathway.R
|
fe55822d781c0525f22153f56278f1b83a4c5f51
|
[] |
no_license
|
GuangchuangYu/bioc-release
|
af05d6d7fa9c05ab98006cd06ea8df39455e8bae
|
886c0ccb1cd2d3911c3d363f7a0cd790e72329b7
|
refs/heads/master
| 2021-01-11T04:21:25.872653
| 2017-08-03T05:11:41
| 2017-08-03T05:11:41
| 71,201,332
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,541
|
r
|
viewPathway.R
|
##' view reactome pathway
##'
##' plotting reactome pathway
##' @title viewPathway
##' @param pathName pathway Name
##' @param organism supported organism
##' @param readable logical
##' @param foldChange fold change
##' @param ... additional parameter
##' @importFrom graphite convertIdentifiers
##' @importFrom graphite pathwayGraph
##' @importFrom igraph igraph.from.graphNEL
##' @importFrom DOSE scaleNodeColor
##' @importFrom DOSE netplot
##' @importFrom DOSE EXTID2NAME
##' @importFrom DOSE setting.graph.attributes
##' @return plot
##' @export
##' @author Yu Guangchuang
viewPathway <- function(pathName,
organism="human",
readable=TRUE,
foldChange=NULL, ...){
pkg <- "graphite"
require(pkg, character.only=TRUE)
reactome <- eval(parse(text="reactome"))
p <- reactome[[pathName]]
if (organism != "human") {
stop("the specific organism is not supported yet...")
## p@species
}
if (readable) {
p <- convertIdentifiers(p, "symbol")
if (!is.null(foldChange)){
gn <- EXTID2NAME(names(foldChange),organism)
names(foldChange) <- gn
}
} else {
if (!is.null(foldChange)) {
p <- convertIdentifiers(p, "entrez")
}
}
g <- pathwayGraph(p)
gg <- igraph.from.graphNEL(g)
gg <- setting.graph.attributes(gg)
if (!is.null(foldChange)) {
gg <- scaleNodeColor(gg, foldChange)
}
netplot(gg, foldChange=foldChange, ...)
}
|
e813e32f5640e7fa78b1abd4b45754ed95de75fb
|
c94550c3d9a84c7073c51db924db909181498100
|
/ui.R
|
ad364796d8148422ff9e9311251b7f3bdcf2e4b0
|
[] |
no_license
|
aleruete/thisislp
|
682c5d61f48145a6cc9b65bfb226e601b22879bc
|
3ac51df56106bc8fa7061d53d73d202ea58f5fbe
|
refs/heads/master
| 2021-02-17T16:00:00.155031
| 2020-03-05T00:57:34
| 2020-03-05T00:57:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,648
|
r
|
ui.R
|
shinyUI(
birdPage("This is LP", color = "#ff963b",
tabPanel("Home",
frontPage(
title = "This is LP",
subtitle = "LP (born 1981) is an American artist. In her words: \"When I get on the mic and start doing melodies, I can feel that direct line from my heart to my mouth\".",
background = "L.P._180814-0254+copy.jpg"
)
),
tabPanelWithTitle(
"Heart to Mouth",
"Heart to Mouth (2018) is the fifth studio album by LP.
It was preceded by the singles \"Girls Go Wild\" and \"Recovery\".",
get_album(),
column(9,
column(6, highchartOutput("htm_analytics_1")),
column(6, highchartOutput("htm_analytics_2")),
column(12, highchartOutput("htm_analytics_3")),
column(6, highchartOutput("htm_analytics_4")),
column(6, highchartOutput("htm_analytics_5")),
column(6, highchartOutput("htm_analytics_6")),
column(6, highchartOutput("htm_analytics_7")),
column(12, highchartOutput("htm_analytics_8")),
column(6, highchartOutput("htm_analytics_9")),
column(6, highchartOutput("htm_analytics_10"))
)
),
tabPanelWithTitle(
"Lost on You",
"Lost on You (2016) is the fourth studio album by LP.
It was preceded by the singles \"Muddy Waters\", \"Lost on You\" and
\"Other People\".",
get_album("loy_input", names(loy), "Muddy Waters", "loy_songs"),
column(9,
column(6, highchartOutput("loy_analytics_1")),
column(6, highchartOutput("loy_analytics_2")),
column(12, highchartOutput("loy_analytics_3")),
column(6, highchartOutput("loy_analytics_4")),
column(6, highchartOutput("loy_analytics_5")),
column(6, highchartOutput("loy_analytics_6")),
column(6, highchartOutput("loy_analytics_7")),
column(12, highchartOutput("loy_analytics_8")),
column(6, highchartOutput("loy_analytics_9")),
column(6, highchartOutput("loy_analytics_10"))
)
),
tabPanelWithTitle(
"Forever For Now",
"Forever for Now (2014) is the third studio album by LP.
It was preceded by the singles \"Night Like This\" and \"Someday\".",
get_album("ffn_input", names(ffn), "Heavenly Light", "ffn_songs"),
column(9,
column(6, highchartOutput("ffn_analytics_1")),
column(6, highchartOutput("ffn_analytics_2")),
column(12, highchartOutput("ffn_analytics_3")),
column(6, highchartOutput("ffn_analytics_4")),
column(6, highchartOutput("ffn_analytics_5")),
column(6, highchartOutput("ffn_analytics_6")),
column(6, highchartOutput("ffn_analytics_7")),
column(12, highchartOutput("ffn_analytics_8")),
column(6, highchartOutput("ffn_analytics_9")),
column(6, highchartOutput("ffn_analytics_10"))
)
),
tabPanelWithTitle(
"Suburban Sprawl and Alcohol",
"Suburban Sprawl and Alcohol (2004) is the second studio album by LP.
It was edited independently.",
get_album("ssa_input", names(ssa), "Wasted", "ssa_songs"),
column(9,
HTML("This album is not available in Spotify, so here's an Easter Egg :-)<br><br>
<iframe width='100%' height='550' src='https://www.youtube.com/embed/0vyUlO3qgLg' frameborder='0' allow='accelerometer; autoplay; encrypted-media; gyroscope; picture-in-picture' allowfullscreen></iframe>"))
),
tabPanelWithTitle(
"Heart-Shaped Scar",
"Heart-Shaped Scar (2001) is the first studio album by LP.
It was edited independently.",
get_album("hss_input", names(hss), "Perfect", "hss_songs"),
column(9,
column(6, highchartOutput("hss_analytics_1")),
column(6, highchartOutput("hss_analytics_2")),
column(12, highchartOutput("hss_analytics_3")),
column(6, highchartOutput("hss_analytics_4")),
column(6, highchartOutput("hss_analytics_5")),
column(6, highchartOutput("hss_analytics_6")),
column(6, highchartOutput("hss_analytics_7")),
column(12, highchartOutput("hss_analytics_8")),
column(6, highchartOutput("hss_analytics_9")),
column(6, highchartOutput("hss_analytics_10"))
)
),
tabPanelWithTitle(
"How I made this",
column(12,
HTML("<p>I used the packages Rspotify, tidyverse, and highcharter.
In addition I created the <a target='_blank' href='https://github.com/pachamaltese/shinybird'>shinybird package</a>
for the Shiny layout and the <a target='_blank' href='https://github.com/pachamaltese/lp'>lp package</a>
to have the lyrics in R.<p>")
)
)
)
)
|
7a7e1f4f7403f5455a77d22bee9d467e315e44f3
|
e3259d8f489b093b246fe2fd0c4fb6999d6466bf
|
/CampR-master/man/binom.Rd
|
8fde107d110a9b78d5aa95c6a2c66ca45a1384b5
|
[] |
no_license
|
Franvgls/CampR
|
7baf0e8213993db85004b95d009bec33570c0407
|
48987b9f49ea492c043a5c5ec3b85eb76605b137
|
refs/heads/master
| 2023-09-04T00:13:54.220440
| 2023-08-22T14:20:40
| 2023-08-22T14:20:40
| 93,841,088
| 0
| 2
| null | null | null | null |
UTF-8
|
R
| false
| true
| 896
|
rd
|
binom.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/binom.R
\name{binom}
\alias{binom}
\title{Distribucion binomial: exitos necesarios para ser significativo}
\usage{
binom(n, p.teorica = 0.5)
}
\arguments{
\item{n}{Número de eventos a tener en cuenta}
\item{p.teorica}{probabilidad teórica de la distribución que se quiere comprobar.}
}
\value{
Devuelve una matriz con tres columnas: una la probabilidad en función del número de casos que se consideren como un éxito, y luego los intervalos de confianza inferior y superior, a partir de que p baja de 1 y llega a .05 se puede considerar que el resultado no se puede considerar debido al azar
}
\description{
Funcion para la MSFD y calculo de numero de especies que hacen que unos resultados esten fuera de lo que cabria esperar al azar para ver mejoras en la biodiversidad por especie.
}
\examples{
#binom(24)
}
|
2d374bf89ec495ff7460d3fd348376ae35318a25
|
4b55d05caca0e379c862ca7ae242d9d112c160d0
|
/PFCscript.R
|
24595dae83aeaeedfbdd317f477bb7b34c13b651
|
[] |
no_license
|
prooplal/PFC_CIND820
|
995c8b104d0df69ef344d72130c6d8ff5d10c2eb
|
1ae6489071e901adf1425a9cd2eadf18055bab8a
|
refs/heads/main
| 2023-06-29T23:34:06.392930
| 2021-07-31T04:44:52
| 2021-07-31T04:44:52
| 391,256,219
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,168
|
r
|
PFCscript.R
|
#merge Global Crises data set with Global Debt database
MergedD <- merge(GDB, Gcrisis, by= c("Country", by= "Year"))
str(MergedD)
summary(MergedD)
# Data frame with columns to be used
Model1 <- MergedD[, c(-3,-4,-5,-12,-13,-17,-18)]
# Move dependent variables to end columns
Model1$BC <- Model1$`Banking Crisis`
Model1$SC <- Model1$`Systemic Crisis`
Model1a <- Model1[, c(-12,-13)]
# Impute missing values
library(mice)
library(VIM)
# Simplify model column names
names(Model1a) <- c("Cty", "Yr", "TPDall", "TPDlds", "HDall", "HDlds", "NFCDall", "NFCDlds", "Ggd", "Cgd", "GDPbils", "DDDef", "GDPwDef", "In", "CC", "IC", "BC","SC")
# Assess missing data
aggr_plot <- aggr(Model1a, col=c('navyblue','red'), numbers=TRUE, sortVars=TRUE, labels=names(Model1a), cex.axis=.7, gap=3, ylab=c("Histogram of missing data","Pattern"))
pMiss <- function(x){sum(is.na(x))/length(x)*100}
apply(Model1a,2,pMiss)
apply(Model1a,1,pMiss)
md.pattern(Model1a)
# Use Mice "cart"(Classification and Regression Trees) method to impute missing values
Model1a.imp <- mice(Model1a, m=5, method = 'cart', seed = 101)
Imp.Model <- complete(Model1a.imp)
# Replace lower case "n/a" with Zero *****
Imp.Model$DDDef[Imp.Model$DDDef == "n/a"] <- 0
library(caret)
library(doSNOW)
library(caTools)
# CURRENCY CRISIS PREDICTION
# Remove Country and other crisis predictors
Imp.Model.cc <- Imp.Model[c(-1,-16,-17,-18)]
Imp.Model.cc$CC[Imp.Model.cc$CC == "2"] <- 1
Imp.Model.cc$CC <- as.factor(Imp.Model.cc$CC)
Imp.Model.cc$DDDef <- as.numeric(Imp.Model.cc$DDDef)
# Set response variable to Yes/No
levels(Imp.Model.cc$CC) <- c("No", "Yes")
# Create train and test Data sets
ind <- createDataPartition(Imp.Model.cc$CC, p = .80, list = FALSE)
train.cc <- Imp.Model.cc[ind,]
test.cc <- Imp.Model.cc[-ind,]
# setting seed to generate a
# reproducible random sampling
set.seed(12345)
parameterGrid <- expand.grid(mtry = c(2,3,4))
# defining training control
# as cross-validation and
# value of K equal to 10
train_control <- trainControl(method = "cv",
number = 10,savePredictions = TRUE, classProbs = TRUE)
# training the model by assigning Currency Crisis(cc) column
# as target variable and rest other column
# as independent variable
model.cc <- train(CC ~., data = train.cc, method = "rf",
trControl = train_control, tuneGrid = parameterGrid)
print(model.cc)
# Run model on test set
predictions.model.cc <- predict(model.cc, test.cc)
# Create Confusion matrix
tpredictions.cc <- table(predictions.model.cc, actual = test.cc$CC)
tpredictions.cc
# INFLATION CRISIS PREDICTION
# Remove Country and other crisis predictors
Imp.Model.ic <- Imp.Model[c(-1,-15,-17,-18)]
table(Imp.Model.ic$IC)
#Convert 3 n/a and 5 NA's to 0
Imp.Model.ic$IC[Imp.Model.ic$IC == "n/a"] <- 0
Imp.Model.ic$IC[is.na(Imp.Model.ic$IC)] <- 0
Imp.Model.ic$IC <- as.factor(Imp.Model.ic$IC)
Imp.Model.ic$DDDef <- as.numeric(Imp.Model.ic$DDDef)
# Set response variable to Yes/No
levels(Imp.Model.ic$IC) <- c("No", "Yes")
str(Imp.Model.ic)
# Create train and test Data sets
ind <- createDataPartition(Imp.Model.ic$IC, p = .80, list = FALSE)
train.ic <- Imp.Model.ic[ind,]
test.ic <- Imp.Model.ic[-ind,]
table(train.ic$IC)
summary(train.ic$IC)
# setting seed to generate a
# reproducible random sampling
set.seed(12345)
parameterGrid <- expand.grid(mtry = c(2,3,4))
# defining training control
# as cross-validation and
# value of K equal to 10
train_control <- trainControl(method = "cv",
number = 10,savePredictions = TRUE, classProbs = TRUE)
# training the model by assigning Currency Crisis(cc) column
# as target variable and rest other column
# as independent variable
model.ic <- train(IC ~., data = train.ic, method = "rf",
trControl = train_control, tuneGrid = parameterGrid)
print(model.ic)
# Run model on test set
predictions.model.ic <- predict(model.ic, test.ic)
# Create Confusion matrix
tpredictions.ic <- table(predictions.model.ic, actual = test.ic$IC)
tpredictions.ic
|
3c566689da98b961fbe73138285c2137fa4afca6
|
df1722213bed9cf7e95f5bd48b4251b551577100
|
/bcd_emp_3_a_forecast_MINLP.r
|
a3f7fa44a80be3957cc287a0334a56588d577388
|
[] |
no_license
|
stefanrameseder/BiddingCurves
|
eac27cf0075ad2edcfb27600a97796e615e6abf5
|
4ef423d20b3a51b671d64436244490f19699a46c
|
refs/heads/master
| 2020-07-02T19:40:56.637369
| 2019-11-01T10:08:17
| 2019-11-01T10:08:17
| 201,641,250
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,999
|
r
|
bcd_emp_3_a_forecast_MINLP.r
|
# Version for Saving the .RData
print(version <- paste0(ver,"_H", H, "_Bootstrap", Bootstrap, "_K", K, "_Mod_", model))
# make the cluster
cl <- makeCluster(8)
# load required libraries
clusterEvalQ(cl, {
library(forecast)
library(parallel)
library(GenSA)
library(timeSeries)
})
## export required objects
clusterExport(cl , varlist = c("H", "Bootstrap", "K", "forecastInd", "model",
"dates", "bdQuan", "bdMax","bdMW","bdDem",
loadedFunctions, "biddata", # "bidcurves","bidcurves_smoothed",
"maxit", "temp") )
set.seed(1)
#### Methodology: forecastCP in t means data up to t for a forecast for t+1
pmhtntFC <- pblapply(PMHTNT, function(pmhtnt){ # pmhtnt <- "POS_HT"
# Define (lagged) matrices for all horizons:
y <- bdQuan[[pmhtnt]][ , c("min", "gavg", "max")] # y[1:2,]
y1 <- rbind(0, y[-T, ]) # y1[1:2,]
y2 <- rbind(0, y1[-T, ]) # y2[1:3,]
y3 <- rbind(0, y2[-T, ]) # y2[1:3,]
y4 <- rbind(0, y3[-T, ]) # y2[1:3,]
y5 <- rbind(0, y4[-T, ]) # y2[1:3,]
y6 <- rbind(0, y5[-T, ]) # y2[1:3,]
# Choose the Matrix for the bidding function
#yMAT <- as.matrix(cbind(y[ ,c("gavg", "max")], y1[ ,c("gavg", "max")], y2[ ,c("max")], y3[ ,c("max")], y4[ ,c("max")], y5[ ,c("max")]))
yMAT <- as.matrix(cbind(y[ ,c("gavg", "max")], y1[ ,"max"], y2[ ,"max"], y3[ ,"max"]))
yMAX <- y[ , "max"]
nPar <- dim(yMAT)[2]
low <- rep(-10, times = nPar)
up <- rep(10, times = nPar)
## export again
clusterExport(cl , varlist = c("yMAT", "yMAX", "nPar", "low", "up", "maxit",
"temp") )
#sys.time <- system.time(fcs <- parLapply(cl, forecastInd, function(t){
sys.time <- system.time(fcs <- lapply(forecastInd, get(model),
K = K, nPar = nPar, lossFunction = lossFunction,
rho = rho, yMAT = yMAT, yMAX = yMAX,
low = low, up = up, maxit=maxit, temp = temp))
names(fcs) <- dates[forecastInd+1]
print(file <- paste0("R_Data/bcd_",pmhtnt, "_", version,".RData"))
#save(fcs, file = file)
print(sys.time)
return(fcs)
})
print(file <- paste0("R_Data/bcd_fc_minlp_", version,".RData"))
names(pmhtntFC) <- PMHTNT
save(pmhtntFC, file = file)
stopCluster(cl)
load(file)
#### Evaluate methodology
sumUp <- summarizeFC(pmhtntFC, B, bdQuan, PMHTNT)
#### plot methodology
str(pmhtntFC[[pmhtnt]], max = 1)
fcPMHTNT <- lapply(PMHTNT, function(pmhtnt) sapply(pmhtntFC[[pmhtnt]], function(fcs) fcs["fc"]))
names(fcPMHTNT) <- PMHTNT
maxPMHTNT <- lapply(PMHTNT, function(pmhtnt) bdMax[[pmhtnt]][forecastInd+1])
names(maxPMHTNT) <- PMHTNT
gavgPMHTNT <- lapply(PMHTNT, function(pmhtnt) bdQuan[[pmhtnt]][forecastInd+1, "gavg"])
names(gavgPMHTNT) <- PMHTNT
NotAccPMHTNT <- lapply(PMHTNT, function(pmhtnt) sapply(pmhtntFC[[pmhtnt]], function(fcs) fcs["acc"]==0))
names(NotAccPMHTNT) <- PMHTNT
for(pmhtnt in PMHTNT){
plot(forecastInd, fcPMHTNT[[pmhtnt]], col = "blue", type = "l")
lines(forecastInd, gavgPMHTNT[[pmhtnt]], lty = 2)
lines(forecastInd, maxPMHTNT[[pmhtnt]], lty = 1, col = "darkred", lwd = 2)
points(x= forecastInd[NotAccPMHTNT[[pmhtnt]]], y= rep(0, times = length(forecastInd[NotAccPMHTNT[[pmhtnt]]])) , col = "red", pch = 16, cex =1)
abline(v= forecastInd[NotAccPMHTNT[[pmhtnt]]], col = "darkred", lwd =0.5)
abline(h=0, col ="black", lwd = 2)
accAperfRate <- round(sumUp[pmhtnt, ],2)*100
legend( "topleft", col = c("blue", "darkred", "red", NA), inset = 0.01,
legend = c(paste0("Forecast (Performance: ", accAperfRate[2], "%)"), "Highest accepted price", paste0("# Non-accepted (", 100-accAperfRate[1], "%)")),
lwd=c(2,1.5,2, NA), lty = "solid", bg = "white")
readKey()
}
df <- cbind(fc = fcPMHTNT[[pmhtnt]], max = maxPMHTNT[[pmhtnt]], gavg = gavgPMHTNT[[pmhtnt]])
mult_ts <- timeSeries( data = df,
charvec = dates[forecastInd],
format = "%Y-%m-%d")
#pdf(paste0("../latexGraphics/",pm,"_",htnt,".pdf"), width = 10, height = 8)
plot(mult_ts, plot.type="s", ylab = paste0(pmhtnt, " (EUR/MW)"),
#at = "pretty",
format = "%d\n%m\n%Y", at = seq(range(dates[forecastInd])[1], range(dates[forecastInd])[2], length.out=10), xlab = "Date", cex.axis = 1.25,
col=c("blue", "black", "black"), lwd = c(2,1.5,2), lty = c(1,1,2))
names(NotAccPMHTNT[[pmhtnt]]) <- dates[forecastInd]
points(NotAccPMHTNT[[pmhtnt]]*10, col = "darkred", pch = 16, cex = 5)
points(fcPMHTNT[[pmhtnt]], col = "darkred", pch = 1, cex = 5)
abline(v=, col = "red")
points(dates[forecastInd][1], 800, col = "red")
accAperfRate <- round(sumUp[pmhtnt, ],2)
legend( "topleft", col = c("blue", "black", "darkred", NA), inset = 0.01,
legend = c("Forecast", paste0("MaxAcc (", accAperfRate[2], "%)"), paste0("Acc = -2 (", accAperfRate[1], "%)")),
lwd=c(2,1.5,2, NA), lty = "solid", bg = "white")
abline(h=0, col ="darkgrey")
|
3bc36c1faf2d107b727ec7b3c502d211fe482b49
|
6c7783c0da4511ea88f1d334849a41f288d157b7
|
/03_scripts/07_create_categorical_variables.R
|
2665ea26acfb808b2fdf2942254fb6449fec53e6
|
[] |
no_license
|
skraftarcher/LA_FW_Sponges
|
fd59296b245edbd92a1aedfe326692e5ce41e30e
|
39c6d325144bf9af40152fe6a41b7f291559e1fd
|
refs/heads/main
| 2023-07-11T13:39:14.195450
| 2021-08-17T18:31:23
| 2021-08-17T18:31:23
| 311,411,292
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 942
|
r
|
07_create_categorical_variables.R
|
if(!require(tidyverse))install.packages("tidyverse");library(tidyverse)
sp1<-read.csv("01_odata/Miller_sponge data.csv")
sp2<-sp1 %>%
mutate(pH=as.numeric(pH),
ph.cat = case_when(
pH < 6 ~"acidic",
pH > 6 & pH < 7 ~ "slightly.acidic",
pH > 7 & pH < 8 ~"slightly alkaline",
pH >= 8 ~"alkaline"))
# look at how many sites each sponge was found at within each category
sp2%>%
mutate(efr2=ifelse(Efr==0,0,1),# first I'm creating a new variable that is 0 if the sponge wasn't found at a site and 1 if it was
tl2=ifelse(Tl==0,0,1),
th2=ifelse(Th==0,0,1))%>%
group_by(ph.cat)%>%
summarize(n.sites=n(),
n.efr=sum(efr2,rm.na=T),# this gives me the total number of sites where Efr was found in each category of pH (because I grouped by my pH categorical variable)
n.tl=sum(tl2,rm.na=T),
th=sum(th2,rm.na=T))# the rm.na tells it to ignore NAs
|
18d2a5b098114ef1df5457eaba74bfc2465f0a46
|
192b80fff0c455f97402a6f5eaf303a0313ec192
|
/ui.R
|
bc19117f9374f7c32db93ef81ed59c453a20216f
|
[] |
no_license
|
pradnyaalc/Visualization_with_Rshiny
|
2c85b9c0019a1ce5d7b32a42806c662ad18fbdc8
|
cc9db03b5f2335b54b5020a0686cf2855ac0b79c
|
refs/heads/master
| 2022-03-01T00:36:07.164580
| 2019-11-16T07:59:36
| 2019-11-16T07:59:36
| 222,057,902
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,405
|
r
|
ui.R
|
#Name: Pradnya Alchetti
#Student Id - 29595916
#Visualization Project
#ui.R
library(shiny)
library(leaflet)
library(plotly)
library(chorddiag)
library(shinythemes)
#Main Page Panel
fluidPage(theme=shinytheme("cosmo"),
navbarPage("Taxi Statistics", id="nav",
# Tab 1 for airport insights
tabPanel("NYC AirPort Insights",
div(class="outer",
tags$head(
# Include our custom CSS
includeCSS("styles.css")
),
# If not using custom CSS, set height of leafletOutput to a number instead of percent
#render leaflet with new york as focus view
leafletOutput("siteMap", width="100%", height="100%"),
# Shiny versions prior to 0.11 should use class = "modal" instead.
#render leaft panel on the leaflet
absolutePanel(id = "controls_1", class = "panel panel-default", fixed = TRUE,
draggable = TRUE, top = 60, left = 20, right = 600, bottom = "auto",
width = 200, height = "auto",
h3("Trips Explorer"),
#Side panel with controls to select airport
checkboxGroupInput("airport", "Airports",
c("JFK" = "JFK",
"LGA" = "LGA",
"EWR" = "EWR")),
#dropdown for month
selectInput("month", "Month of the year 2016",
c("October" = "10",
"November" = "11",
"December" = "12"))
),
# render right panel on the leaflet
absolutePanel(id = "controls", class = "panel panel-default", fixed = TRUE,
draggable = TRUE, top = 60, left = "auto", right = 20, bottom = "auto",
width = 500, height = "auto",
# render heatmap for tip
plotlyOutput("tip"),
# render donut chart for payment
plotlyOutput("payment",height = 200)
)
)
),
# Tab 2 for Fare Estimator
tabPanel("NYC Fare Estimator",
fluidRow(
# dropdown for borough to be placed vertically
div(style="display: inline-block;vertical-align:top; width: 150px;",
selectInput("borough", "NY Borough",
c("Bronx" = "Bronx",
"Brooklyn" = "Brooklyn",
"Manhattan" = "Manhattan",
"Queens" = "Queens"))),
# dropdown for pickupZone
div(style="display: inline-block;vertical-align:top; width: 150px;",
selectInput("pickupZone", "NY Pickup Zone",
c("Dummy"))),
#dropdown for dropoff zone
div(style="display: inline-block;vertical-align:top; width: 150px;",
selectInput("dropZone", "NY Dropoff Zone",
c("Dummy"))),
#dropdown for hour
div(style="display: inline-block;vertical-align:top; width: 100px;",
selectInput("time", "Hour",
c(0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23)))
),
#render chord diagram
div(class="outer2",
chorddiagOutput("zoneCount", width = 600, height = 400)
),
#render leaflet with map of New York focussed
div(class="outer1",
leafletOutput("map", height = 400, width = 700)
),
#render line chart
div(class="outer3",
plotOutput("line", height = 200, width = 500)
)
)
)
)
|
4f6b18f7f18ee6e843206ef1391f44c9cbb3c11e
|
fb9f62088b0df02e1f86b779011fd445e27f7e5a
|
/tcga.edit.load.R
|
516ffd28ab25416552392d552aa37025c927b808
|
[
"MIT"
] |
permissive
|
michaelsharpnack/RNA_edits
|
d6f17b0379ad245a14c17c7b2221c0b76274abf6
|
fabb6c509ffed97d3ebe94a9e8e6ebc274a143dc
|
refs/heads/master
| 2021-05-15T12:43:41.284400
| 2017-10-27T00:30:49
| 2017-10-27T00:30:49
| 108,479,984
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,590
|
r
|
tcga.edit.load.R
|
tcga.loader <- function(cancer){
cancers <- c('BLCA','BRCA','CESC','CRCC','GBM','HNSC','KICH','KIRC','KIRP','LGG','LIHC','LUAD','LUSC','PRAD','STAD','THCA','UCEC')
setwd('/Users/michaelsharpnack/Desktop/David/Metastasis/editing_query/tcga_edits/')
files.edit <- dir()
setwd('/Users/michaelsharpnack/Desktop/David/Metastasis/editing_query/tcga.rnaseq/')
files.rna <- dir()
setwd('/Users/michaelsharpnack/Desktop/David/Metastasis/editing_query')
temp.rna.geneid <- read.csv(paste('tcga.rnaseq/',files.rna[1],sep=""),stringsAsFactors=FALSE,sep='\t')[,1]
temp.names <- strsplit(temp.rna.geneid,"\\|")
temp.names <- temp.names[-c(1:30)]
temp.names.2 <- vector(mode='numeric',length(temp.names))
for(i in 1:length(temp.names)){
temp.names.2[i] <- temp.names[[i]][1]
}
rna.geneid <- temp.names.2
rm(temp.names,temp.names.2,temp.rna.geneid)
temp.edit <- fread(paste('tcga_edits/',files.edit[cancer],sep=""))
temp.edit.rownames <- temp.edit[[1]]
temp.edit <- as.matrix(temp.edit[,-1])
temp.edit <- temp.edit[,-dim(temp.edit)[2]]
class(temp.edit) <- 'numeric'
rownames(temp.edit) <- temp.edit.rownames
temp.edit.normal <- temp.edit[,grep('Normal',colnames(temp.edit))]
temp.edit <- temp.edit[,grep('Tumor',colnames(temp.edit))]
if(nchar(cancers[cancer]) == 4){
colnames(temp.edit) <- substr(colnames(temp.edit),8+nchar(cancers[12]),23)
colnames(temp.edit.normal) <- substr(colnames(temp.edit.normal),9+nchar(cancers[12]),24)
} else {
colnames(temp.edit) <- substr(colnames(temp.edit),7+nchar(cancers[12]),23)
colnames(temp.edit.normal) <- substr(colnames(temp.edit.normal),8+nchar(cancers[12]),24)
}
print(paste("Reading in RNAseq file for ",cancers[cancer]))
temp.rna <- fread(paste('tcga.rnaseq/',files.rna[cancer],sep=""))
temp.rna <- as.matrix(temp.rna[,-1])
temp.rna <- temp.rna[-c(1:30),]
class(temp.rna) <- 'numeric'
rownames(temp.rna) <- rna.geneid
temp.rna.normal <- temp.rna[,substr(colnames(temp.rna),14,14) == '1']
temp.rna <- temp.rna[,substr(colnames(temp.rna),14,14) == '0']
colnames(temp.rna.normal) <- substr(colnames(temp.rna.normal),1,12)
colnames(temp.rna) <- substr(colnames(temp.rna),1,12)
tcga.rna <- temp.rna[,intersect(colnames(temp.rna),colnames(temp.edit))]
tcga.edit <- temp.edit[,intersect(colnames(temp.rna),colnames(temp.edit))]
tcga.names <- intersect(intersect(intersect(colnames(tcga.rna),colnames(temp.rna.normal)),colnames(tcga.edit)),colnames(temp.edit.normal))
return(list(cancer,tcga.rna,temp.rna.normal,tcga.edit,temp.edit.normal,tcga.names))
}
|
25d008de82858cefa399d528170dddfabd850e3e
|
9728006767999c2554b86bc41e8183d3465010aa
|
/codes/data_retrieval.R
|
476e43ad08f73910519e913a3136e68aae5fa35e
|
[] |
no_license
|
Salfo/econ_blogs
|
46a7824db8c412c77847407a95b38a50676a08fa
|
7492d6d4a2a0f963daf86076d46167e888aa51cd
|
refs/heads/master
| 2020-03-14T08:24:10.523813
| 2018-04-30T16:38:09
| 2018-04-30T16:38:09
| 131,523,231
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 387
|
r
|
data_retrieval.R
|
# Get the list of blogs
library(rvest)
library(dplyr)
blog_urls <- "http://www.onalytica.com/blog/posts/top-200-most-influential-economics-blogs/"
blogs_webpage <- read_html(blog_urls)
blogs_tabl <- blogs_webpage %>%
html_nodes("table") %>%
.[[1]] %>%
html_table(header = 1)
blogs_tabl = blogs_tabl[, -1]
#write.csv(x = blogs_tabl, file = "data/blogs_list.csv", row.names = FALSE)
|
0ce5de1d77401064b90a157dc54b74cc90fc6db9
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/h2o/examples/h2o.table.Rd.R
|
de11934aed4c71a3200031028f339d34cf2fa609
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 567
|
r
|
h2o.table.Rd.R
|
library(h2o)
### Name: h2o.table
### Title: Cross Tabulation and Table Creation in H2O
### Aliases: h2o.table table.H2OFrame
### ** Examples
## No test:
library(h2o)
h2o.init()
prostate_path <- system.file("extdata", "prostate.csv", package = "h2o")
prostate <- h2o.uploadFile(path = prostate_path)
summary(prostate)
# Counts of the ages of all patients
head(h2o.table(prostate[, 3]))
h2o.table(prostate[, 3])
# Two-way table of ages (rows) and race (cols) of all patients
head(h2o.table(prostate[, c(3, 4)]))
h2o.table(prostate[, c(3, 4)])
## End(No test)
|
696dadc25eaeb206c350110e959f9ce12e4a9c2b
|
21b539d13bc89cd86792d5c4f74d595dc2ef8605
|
/005.PreLab.R
|
fedefb60546625b777e22a9c19fb6fad6ff63535
|
[] |
no_license
|
ozkuran/UTA_FoundDataAnalysis
|
b3b1ff51df08910f4a7cac893575c54a7228dd40
|
e32912bf4010ac3243926fe1015c35462333ca3a
|
refs/heads/master
| 2021-01-20T02:16:24.111753
| 2015-02-09T13:54:41
| 2015-02-09T13:54:41
| 28,847,396
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 782
|
r
|
005.PreLab.R
|
library(SDSFoundations)
WR <- WorldRecords
UniqueEvents <- c(unique(WR$Event))
UsainsFirst100mRecord <- WR[which(WR$Event =="Mens 100m" & WR$Athlete == "Usain Bolt"),]
WomensMileUnder260s <- WR[which(WR$Event == "Womens Mile" & WR$Record < 260),]
#Invoke the SDSFoundataions package
library(SDSFoundations)
#Subset the data
menshot <- WR[WR$Event=='Mens Shotput',]
womenshot <- WR[WR$Event=='Womens Shotput',]
#Create scatterplots
plot(menshot$Year,menshot$Record,main='Mens Shotput World Records',xlab='Year',ylab='World Record Distance (m)',pch=16)
plot(womenshot$Year,womenshot$Record,main='Womens Shotput World Records',xlab='Year',ylab='World Record Distance (m)',pch=16)
#Run linear models
linFit(menshot$Year, menshot$Record)
linFit(womenshot$Year,womenshot$Record)
|
944d8cba963a1b3ccdcb843d06d5f0858cab3385
|
c85471f60e9d5c462de6c60c880d05898ec81411
|
/cache/jasonmstevensphd|TidyTuesday_JMS|Tidy_Tuesday_2018_08_21.R
|
9c414f4c1c0a7f243b82228ff45c557c63f7297b
|
[
"CC-BY-4.0",
"MIT"
] |
permissive
|
a-rosenberg/github-content-scraper
|
2416d644ea58403beacba33349ee127e4eb42afe
|
ed3340610a20bb3bd569f5e19db56008365e7ffa
|
refs/heads/master
| 2020-09-06T08:34:58.186945
| 2019-11-15T05:14:37
| 2019-11-15T05:14:37
| 220,376,154
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,807
|
r
|
jasonmstevensphd|TidyTuesday_JMS|Tidy_Tuesday_2018_08_21.R
|
## ----setup, include=FALSE------------------------------------------------
knitr::opts_chunk$set(echo = TRUE)
## ----Libraries, message=FALSE, warning=FALSE, EVAL=FALSE, include=FALSE----
# This is where we import our libraries and files. We'll also add some information to be included on our plots
library(knitr)
library(tidyverse)
library(RColorBrewer)
library(lubridate)
Cal_Fires <- read_csv("week21_calfire_frap.csv") %>%
mutate(Alarm_Date = ymd(alarm_date)) %>%
mutate(Contained_Date = ymd(cont_date)) %>%
mutate(Year = year(Alarm_Date)) %>%
rename(Fire_Name = fire_name) %>%
mutate(cause2 = case_when(cause == 1 | cause == 17 ~ "Natural",
cause == 14 | is.na(cause) ~ "Unknown",
cause != 1 | cause != 14 | cause != 17 ~ "Human"))
Time_of_Analysis <- now(tz = "America/New_York")
Analyst <- "@jasonmstevens"
plot <- theme(plot.background = element_rect(fill = "white"))+
theme(panel.background = element_rect(fill = "white",
colour="grey50"))+
theme(plot.title = element_text(face = "bold",
size = 18,
color = "navy"))+
theme(axis.title = element_text(face = "bold", size = 16))+
theme(aspect.ratio = 3.5/5)
## ----Barplot of California Wildfires, echo=FALSE, message=FALSE, warning=FALSE, tidy=TRUE----
Cal_Fires_Bar <- Cal_Fires %>%
group_by(Year) %>%
summarize(Burned_Acres = sum(gis_acres), na.rm = TRUE) %>%
ggplot(aes(Year, Burned_Acres))+
geom_smooth()+
geom_bar(stat = "identity")+
ggtitle("Acres Burned for California Wildfires")+
labs(x = "Year", y = "Acres Burned",
subtitle = paste("Generated by", Analyst, "on", Time_of_Analysis))+
plot
Cal_Fires_Bar
## ----California Wildfires by Month, echo=FALSE, message=FALSE, warning=FALSE, tidy=TRUE----
Cal_Fires_Month <- Cal_Fires %>%
mutate(Month = month(Alarm_Date, label = TRUE)) %>%
group_by(Month) %>%
summarize(Burned_Acres = sum(gis_acres), na.rm = TRUE) %>%
ggplot(aes(Month, Burned_Acres))+
geom_smooth()+
geom_bar(stat = "identity")+
ggtitle("Acres Burned for California Wildfires\nby Month Since 1950")+
labs(x = "Month", y = "Acres Burned",
subtitle = paste("Generated by", Analyst, "on", Time_of_Analysis))+
plot
Cal_Fires_Month
## ----Active Season California Wildfires, echo=FALSE, message=FALSE, warning=FALSE, tidy=TRUE----
Active_Season <- c("Aug", "Sep", "Oct")
Cal_Fires_Active <- Cal_Fires %>%
mutate(Month = month(Alarm_Date, label = TRUE)) %>%
filter(Month %in% Active_Season) %>%
group_by(Year) %>%
summarize(Burned_Acres = sum(gis_acres), na.rm = TRUE) %>%
ggplot(aes(Year, Burned_Acres))+
geom_smooth()+
geom_bar(stat = "identity")+
ggtitle("Acres Burned for California Wildfires\n During Active Season by Year")+
labs(x = "Year", y = "Acres Burned",
subtitle = paste("Generated by", Analyst, "on", Time_of_Analysis))+
plot
Cal_Fires_Active
## ----Quiet Season California Wildfires, echo=FALSE, message=FALSE, warning=FALSE, tidy=TRUE----
Active_Season <- c("Aug", "Sep", "Oct")
Cal_Fires_Quiet <- Cal_Fires %>%
mutate(Month = month(Alarm_Date, label = TRUE)) %>%
filter(!(Month %in% Active_Season)) %>%
group_by(Year) %>%
summarize(Burned_Acres = sum(gis_acres), na.rm = TRUE) %>%
ggplot(aes(Year, Burned_Acres))+
geom_smooth()+
geom_bar(stat = "identity")+
ggtitle("Acres Burned for California Wildfires\n During Quiet Season by Year")+
labs(x = "Year", y = "Acres Burned",
subtitle = paste("Generated by", Analyst, "on", Time_of_Analysis))+
plot
Cal_Fires_Quiet
ggsave(Cal_Fires_Quiet, filename = "Cal_Fire_Quiet.png")
|
f86d676da1b5597af6b18b47f7efa4286c4a892b
|
e9341aa34f2d07a636ae8e600cd881712d2bee9d
|
/R/ex.R
|
4374dece50f5252434e05ee1e9eb3a49b0f4c30c
|
[] |
no_license
|
jihwanK/data_science
|
816f0b1c7d6a833520d79eb36136c8d4dd394795
|
26c344864e924f81f4754170729bfb7a7d6df203
|
refs/heads/master
| 2022-07-27T02:18:50.220994
| 2022-07-15T13:33:24
| 2022-07-15T13:33:24
| 201,589,014
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 143
|
r
|
ex.R
|
home = '~/git_repository/practical-statistics-for-data-scientists/data'
setwd(home)
state <- read.csv('state.csv')
hist(state[['Murder.Rate']])
|
250d191dc48a04bde811e9e1ce15a0b34df4dcd5
|
799ee4573b1244cced7102ed2a942e52579c03b9
|
/R/CapExModel.R
|
23f6f781ae3d9bc61c24f4d7c39cd6df8c099d62
|
[
"MIT"
] |
permissive
|
UrbanMatrixOne/rumo
|
88476ec79a66b0248570386a8b1c6c81a347e4a3
|
5554a4088723c717dc97847edb56c4c682ec31fa
|
refs/heads/master
| 2021-09-28T21:17:18.033028
| 2018-11-20T18:08:57
| 2018-11-20T18:08:57
| 154,221,360
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,777
|
r
|
CapExModel.R
|
#cap ex projection
fitTreeModel <- function(){
test_set <- clean_IGES_data%>% sample_frac(.1)
train_set <- clean_IGES_data %>% setdiff(test_set)
fit<- tree.model <- rpart::rpart(data = train_set %>% filter(NPVperkw <.5 ), NPVperkw ~ (start_date) + fuel_type + capacity , method = 'anova')
printcp(fit) # display the results
plotcp(fit) # visualize cross-validation results
summary(fit) # detailed summary of splits
par(mfrow=c(1,2)) # two plots on one page
rsq.rpart(fit) # visualize cross-validation results
plot(fit, uniform=TRUE,
main="Classification Tree for IGES_CDM")
text(fit, use.n=TRUE, all=TRUE, cex=.8)
fit.2 <- prune(tree.model, cp= fit$cptable[which.min(fit$cptable[,"xerror"]),"CP"])
plot(fit.2, uniform=TRUE,
main="Classification Tree for IGES_CDM")
text(fit.2, use.n=TRUE, all=TRUE, cex=.8)
ggplot(data.frame(predict = predict(fit, clean_data), actual = clean_data$NPVperkw, type= clean_data$fuel_type), aes(x = actual, y = predict, color = type)) + geom_point( ) + theme_bw()
#Save Model
saveRDS(fit, 'CapExModel.rds')
}
#' getCapEx
#' This function estimats the cost of a grid power plant with a given capacity
#' @param fuel_type e.g. "Wind power" , "Hydro power" or "PV"
#' @param capacity the capacity of the project to be estimated
#' @param country the country the project is built in
#' @export
#' @examples
#' getCapEx()
getCapEx <- function(fuel_type,capacity , country = 'generic')
{
#predict(fit,data.frame(start_date = as.Date('2018-03-21'), capacity = capacity, fuel_type = fuel_type ))
ifelse(fuel_type == 'Wind power', 1385706,
ifelse(fuel_type == 'Hydro power', 1240639*2 ,
1000000 #solar
)
)*ifelse(country == 'China', 1.2,1)
}
|
9be4824fdcd67698c5404689767a21812d33134c
|
5cb06f96db4a22fcd245880c5435e7c37a7a7d18
|
/mapasComparados/loc/code/tmp.r
|
df9ac43ffd579ae4b5aa5d0508f407d2d0ae7aa7
|
[
"MIT"
] |
permissive
|
emagar/mxDistritos
|
c063484aef576d0f1ea79f44e78cb1c18859c6e5
|
f1f163f7e834c53338e9a5525ae829ab48179c1b
|
refs/heads/master
| 2023-09-01T22:19:05.448103
| 2023-08-29T04:51:38
| 2023-08-29T04:51:38
| 106,619,312
| 5
| 14
|
MIT
| 2018-01-05T14:32:44
| 2017-10-11T23:17:30
|
R
|
UTF-8
|
R
| false
| false
| 4,825
|
r
|
tmp.r
|
############
# 20 oax #
############
# 2010 map eric re-did
d <- read.csv(file = "fuenteAlumnos/oaxLoc2010.csv", stringsAsFactors = FALSE)
sel <- which(d$lisnom==""|d$lisnom=="-")
d$lisnom[sel] <- 0; rm(sel)
d$lisnom <- as.numeric(d$lisnom)
# así se hace en R un by yr mo: egen tmp=sum(invested) de stata
d$lisnom <- ave(d$lisnom, as.factor(d$seccion), FUN=sum, na.rm=TRUE)
d <- d[duplicated(d$seccion)==FALSE,] # drop redundant obs
d$ord <- NULL; d$loc <- NULL; d$casilla <- NULL
d$cab2010 <- d$cab; d$cab <- NULL
d$ife <- 20000 + d$ife # as in aymu
dim(d)
d10 <- d; rm(d) #rename
#
# read what claudia prepared
d <- read.csv(file = "fuenteAlumnos/oaxLoc.csv", stringsAsFactors = FALSE)
dim(d)
d <- merge(x = d, y = d10, by = "seccion", all = TRUE)
d <- d[,c("seccion","munn","ife","mun","edon","lisnom","disfed1979","disfed1997","disfed2006","disfed2018","disloc2010","cab2010","disloc2012","disloc2018","nota")]
write.csv(d, file = "oaxLoc.csv", row.names = FALSE)
## ## READ HISTORICAL MAP (MISSING SECCIONES POSSIBLE)
## d <- read.csv(file = "fuenteAlumnos/coaLoc.csv", stringsAsFactors = FALSE)
##
## # handy function to rename one data.frame's column
## rename.col <- function(old=NA, new=NA, what=NA){
## old <- old; new <- new; what <- what;
## colnames(what)[which(colnames(what)==old)] <- new
## return(what)
## }
## d <- rename.col(old="disn2005", new="disloc2005", what=d)
## d <- rename.col(old="disn2011", new="disloc2011", what=d)
## d <- rename.col(old="disn2017", new="disloc2017", what=d)
## #
## # ---> NOTE: <--- #
## # ---> open useEqPrep2fillMissSeccionesLocalMaps.r and run manually to spot errors <--- #
## # ---> will generate new eq object with full map (incl. state and federal districts) <--- #
##
## write.csv(eq, file = "coaLoc.csv", row.names = FALSE)
## get functions to include population
source(paste(dd, "code/getPop.r", sep = ""))
pob05 <- get2005(edon=5)
pob10 <- get2010(edon=5)
head(pob05)
head(pob10)
head(d)
# add 2005 pop
d <- merge(x = d, y = pob05[,c("seccion","ptot")], by = "seccion", all.x = TRUE, all.y = FALSE)
d$pob05 <- ave(d$ptot, as.factor(son), FUN = sum, na.rm = TRUE)
d$ptot <- NULL
# add 2010 pop
d <- merge(x = d, y = pob10[,c("seccion","ptot")], by = "seccion", all.x = TRUE, all.y = FALSE)
d$pob10 <- ave(d$ptot, as.factor(son), FUN=sum, na.rm=TRUE)
d$ptot <- NULL
## # dsi seen from offspring perspective
## # new district's "father" and district similarity index, cf. Cox & Katz
## ## READ HISTORICAL MAPS
## d <- read.csv(file = "coaLoc.csv", stringsAsFactors = FALSE)
## head(d)
## son <- d$disloc2017
## father <- d$disloc2011
## N <- max(son, na.rm = TRUE)
## d$father <- NA
## d$dsi <- 0
## for (i in 1:N){
## #i <- 1 # debug
## sel.n <- which(son==i) # secciones in new district
## tmp <- table(father[sel.n])
## target <- as.numeric(names(tmp)[tmp==max(tmp)][1]) # takes first instance in case of tie (dual fathers)
## d$father[sel.n] <- target
## sel.f <- which(father==target) # secciones in father district
## sel.c <- intersect(sel.n, sel.f) # secciones common to father and new districts
## d$dsi[sel.n] <- round( length(sel.c) / (length(sel.f) + length(sel.n) - length(sel.c)) , 3 )
## }
## dsi <- d[duplicated(son)==FALSE,]
## dsi <- dsi[,c("edon","disloc2017","father","dsi")]
## head(dsi)
## dsi <- dsi[order(dsi$disloc2017),]
## dsi$cab2017 <- c("Acuña", "Piedras Negras", "Sabinas", "San Pedro", "Monclova", "Frontera", "Matamoros", "Torreón", "Torreón", "Torreón", "Torreón", "Ramos Arizpe", "Saltillo", "Saltillo", "Saltillo", "Saltillo")
## dsi <- dsi[order(dsi$dsi),]
## write.csv(dsi, file = "simIndex/dist_coa.csv", row.names = FALSE)
#
## # dsi seen from parent perspective
## # new district's "father" and district similarity index, cf. Cox & Katz
## d$son17 <- NA
## d$dsi <- 0
## for (i in 1:16){
## #i <- 16 # debug
## sel.o <- which(d$disn14==i) # secciones in original district
## tmp <- table(d$disn17[sel.o])
## target <- as.numeric(names(tmp)[tmp==max(tmp)])
## d$son2017[sel.o] <- target
## sel.s <- which(d$disn17==target) # secciones in son district
## sel.c <- intersect(sel.o, sel.s) # secciones common to original and son districts
## d$dsi[sel.o] <- round( length(sel.c) / (length(sel.o) + length(sel.s) - length(sel.c)) , 3 )
## }
## dsi <- d[duplicated(d$disn14)==FALSE, c("disn14","son2017","dsi")]
## dsi <- dsi[order(dsi$disn14),]
## dsi$cab14 <- c("Saltillo", "Saltillo", "Saltillo", "Saltillo", "Ramos Arizpe", "Torreón", "Torreón", "Torreón", "Torreón", "San Pedro", "Frontera", "Monclova", "Múzquiz", "Sabinas", "Acuña", "Piedras Negras")
## dsi <- dsi[order(dsi$dsi),]
## summary(dsi$dsi)
|
576c0b1040659ef57221d4aec97fbfe79645d504
|
79457aaae83a0b3914a38874c10907440e0dfc61
|
/man/raster_cube.Rd
|
326dc12c8fed6bf68b7c05ad6fea558ae97b9c49
|
[] |
permissive
|
appelmar/gdalcubes
|
be9786b36fbe4e25a5c0245968634f57a40752ad
|
2134f769454e147660e7a73c61afa14219de20b4
|
refs/heads/master
| 2023-08-07T20:56:02.442579
| 2023-07-25T06:36:46
| 2023-07-25T06:36:46
| 148,130,790
| 74
| 7
|
MIT
| 2023-03-23T19:56:08
| 2018-09-10T09:25:01
|
C++
|
UTF-8
|
R
| false
| true
| 2,577
|
rd
|
raster_cube.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cube.R
\name{raster_cube}
\alias{raster_cube}
\title{Create a data cube from an image collection}
\usage{
raster_cube(
image_collection,
view,
mask = NULL,
chunking = .pkgenv$default_chunksize
)
}
\arguments{
\item{image_collection}{Source image collection as from \code{image_collection} or \code{create_image_collection}}
\item{view}{A data cube view defining the shape (spatiotemporal extent, resolution, and spatial reference), if missing, a default overview is used}
\item{mask}{mask pixels of images based on band values, see \code{\link{image_mask}}}
\item{chunking}{length-3 vector or a function returning a vector of length 3, defining the size of data cube chunks in the order time, y, x.}
}
\value{
A proxy data cube object
}
\description{
Create a proxy data cube, which loads data from a given image collection according to a data cube view
}
\details{
The following steps will be performed when the data cube is requested to read data of a chunk:
1. Find images from the input collection that intersect with the spatiotemporal extent of the chunk
2. For all resulting images, apply gdalwarp to reproject, resize, and resample to an in-memory GDAL dataset
3. Read the resulting data to the chunk buffer and optionally apply a mask on the result
4. Update pixel-wise aggregator (as defined in the data cube view) to combine values of multiple images within the same data cube pixels
If chunking is provided as a function, it must accept exactly three arguments for the total size of the cube in t, y, and x axes (in this order).
}
\note{
This function returns a proxy object, i.e., it will not start any computations besides deriving the shape of the result.
}
\examples{
# create image collection from example Landsat data only
# if not already done in other examples
if (!file.exists(file.path(tempdir(), "L8.db"))) {
L8_files <- list.files(system.file("L8NY18", package = "gdalcubes"),
".TIF", recursive = TRUE, full.names = TRUE)
create_image_collection(L8_files, "L8_L1TP", file.path(tempdir(), "L8.db"), quiet = TRUE)
}
L8.col = image_collection(file.path(tempdir(), "L8.db"))
v = cube_view(extent=list(left=388941.2, right=766552.4,
bottom=4345299, top=4744931, t0="2018-01", t1="2018-12"),
srs="EPSG:32618", nx = 497, ny=526, dt="P1M")
raster_cube(L8.col, v)
# using a mask on the Landsat quality bit band to filter out clouds
raster_cube(L8.col, v, mask=image_mask("BQA", bits=4, values=16))
}
|
ffa2d0ddb5e45b02e8739f393e9bb4e48109a6d8
|
cd0612d2e4149d22f8a208caacbd69b88afc5869
|
/R/plot_repressor.R
|
2bcc310d918828bffda646cc5891fec0af1d5b5b
|
[] |
no_license
|
josschavezf/erba
|
d69c7a78f4f4cc43af5e4d09afa7d902f070cf11
|
0c653c477e3799f82c860ce303d17eee8ad966c7
|
refs/heads/master
| 2021-07-08T19:49:34.146575
| 2021-04-27T20:13:01
| 2021-04-27T20:13:01
| 172,123,138
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,109
|
r
|
plot_repressor.R
|
#' plot_repressor
#'
#' @description plot repressor and non_repressor transcription factors versus ORFs per genome
#'
#' @param data A data.frame object, needs to have the columns repressor and non_repressor for transcription factors
#' @param filename file name with .tiff extension
#' @param title plot name inside " "
#' @param ymax ylim max
#' @param ylab ylab name inside " "
#'
#' @examples
#' plot_repressor(tf_repressor_non_repressor,
#' filename = "tf_repressor.tiff",
#' ylab = "Transcription factors",
#' title = "Transcription factors per genome",
#' ylim(0,120))
#' @export
plot_repressor <- function(data, filename, title, ylab, ymax = 150) {
tiff(filename = filename, width = 1234, height = 880, units = 'px', res = 100)
myplot <- ggplot(data) +
geom_point(aes(x = ORFs, y = repressor, colour = "repressor")) +
geom_point(aes(x = ORFs, y = non_repressor, colour = "non_repressor")) +
ylim(0, ymax) +
xlim(0,100) +
geom_abline(aes(intercept = intercept(ORFs, repressor, slope(ORFs,repressor)), slope = slope(ORFs,repressor), color = "repressor")) +
geom_abline(aes(intercept = intercept(ORFs, non_repressor, slope(ORFs,non_repressor)), slope = slope(ORFs,non_repressor), color = "non_repressor")) +
ggtitle(title) +
labs(x= "ORFs (x 100)", y = ylab) +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_blank(),
plot.title = element_text(hjust = 0.5, size = 28, face = "bold"),
axis.title = element_text(size = 26, face="bold"),
axis.text = element_text(size = 22, face="bold"),
axis.line = element_line(colour = "black", size=1.5),
axis.ticks = element_line(size = 1.5, lineend = 2),
legend.title = element_blank(),
legend.position = c(0.13,0.85),
legend.text = element_text(size = 20),
legend.key.size = unit(0.45, "in")) +
scale_color_manual(values = colors_represor_activator, aesthetics = "colour")
print(myplot)
dev.off()
}
|
8d4bd5f1f17449c50a2ae73f5de1d9464363b7ea
|
6a90438fff56a060f2ea7794fd242fd3de825d05
|
/DataCleanSubject/project/run_Analysis.R
|
5dc631f3497962092ab46304f5826f73168b49b7
|
[] |
no_license
|
RujutaJ1/RepData_PeerAssessment1
|
01db3ee17de85a5d6ba38534613a3cf5f588002b
|
8ef4459f8ffa44cc1adbc4cf6a4cb72f5e6e7457
|
refs/heads/master
| 2021-01-17T07:56:20.140865
| 2016-01-30T21:53:32
| 2016-01-30T21:53:32
| 34,197,591
| 0
| 0
| null | 2015-04-19T07:35:52
| 2015-04-19T07:35:52
| null |
UTF-8
|
R
| false
| false
| 3,160
|
r
|
run_Analysis.R
|
## Step 1 : First Download the Data, and Unzip it.
#first download the data- its a heavy download,
fileurl <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
getwd()
setwd("F:/Rujuta/Coursera/DataCleanSubject")
download.file(fileurl, destfile="./Data.zip")
unzip("./Data.zip")
## Step 2
# In this step we read the relevant files. X_train is the main data. Subject_trainData is the subjectID
# features are the names of the features and activity is the name of the activity for which data was collected.
# So we read the data, give the names of the features as column names
setwd("./UCI HAR Dataset")
list.files()
features <- read.table("./features.txt")
setwd("./train")
X_traindata <- read.table("./X_train.txt")
XTrainDataSet <- data.table(X_traindata)
y_traindata <- read.table("./y_train.txt")
YTrainData <- data.table(y_traindata)
subject_traindata <- read.table("./subject_train.txt")
SubjectTrainData <- data.table(subject_traindata)
FeatureData <- data.table(features)
names <- as.character(features$V2)
setnames(TrainData, colnames(TrainData), names)
## Step 3
#merge subjectID and Activity name with the test data.
dim(YTrainData)
setnames(YTrainData, "Activity")
dim(SubjectTrainData)
setnames(SubjectTrainData, "SubjectID")
TrainData1 <- cbind(SubjectTrainData,YTrainData, TrainData)
dim(TrainData1)
## Step 4
## Do the same activity with test data.
setwd("./test")
list.files()
X_testdata <- read.table("./X_test.txt")
dim(X_testdata)
TestData <- data.table(X_testdata)
setnames(TestData, colnames(TestData), names)
y_testdata <- read.table("./y_test.txt")
YTestData <- data.table(y_testdata)
dim(YTestData)
setnames(YTestData, "Activity")
SUbjectTestData <- fread("./subject_test.txt")
dim(SUbjectTestData)
setnames(SUbjectTestData, "SubjectID")
TestData1 <- cbind(SUbjectTestData,YTestData,TestData)
dim(TestData1)
## Step5
## combine Test and Train Data.
MainData <- rbind(TestData1, TrainData1)
dim(MainData)
# So this is the final Data.
#dim(MainDataTogether)
Step6
#select only the mean and std variables from the data.
MainDataslice1 <- select(MainData, contains("mean"))
MainDataslice2 <- select(MainData, contains("std"))
MainDataSlice3 <- select(MainData, SubjectID,Activity)
MainDataTogether <- cbind(MainDataSlice3, MainDataslice1, MainDataslice2)
# now to give the activity Lables
ActivityLabels <- c("WALKING","WALKING_UPSTAIRS","WALKING DOWNSTAIRS","SITTING","STANDING","LAYING")
MainDataTogether$Activity <- ordered( MainDataTogether$Activity,levels=c(1,2,3,4,5,6),labels=ActivityLabels)
# Preparing the tidy data set
dim(MainDataTogether)
p <- MainDataTogether[,lapply(.SD, mean), by=SubjectID]
dim(p)
q <- MainDataTogether[, lapply(.SD, mean), by=Activity]
AverageBySubject <- select(p, -Activity)
AverageByActivity <- select(q, -SubjectID)
View(AverageBySubject)
View(AverageByActivity)
write.table(AverageByActivity, file="./DataCleanProjectSubmission_Rujuta.txt", append=FALSE, row.names=FALSE)
write.table(AverageBySubject, file="./DataCleanProjectSubmission_Rujuta.txt", append=TRUE, row.names=FALSE)
# so this is the file that gets uploaded on the github.
|
ed29ccc5bf39ca8b271c99b9797792035897be1d
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/epr/examples/epr-package.Rd.R
|
bc54fc2354c602f7a4a97cd6491448525ab4afa7
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 457
|
r
|
epr-package.Rd.R
|
library(epr)
### Name: epr-package
### Title: Easy Polynomial Regression
### Aliases: epr-package epr
### ** Examples
# analysis in completely randomized design
data(data1)
r1=pr2(data1)
names(r1)
r1
r1[1]
pr1(data1)
# analysis in randomized block design
data(data2)
r2=pr2(data2, design=2)
r2
# analysis in latin square design
data(data3)
r3=pr2(data3, design=3)
r3
# analysis in several latin squares
data(data4)
r4=pr2(data4, design=4)
r4
|
ffdd47941c90339c2d0748844d01cc5e06f5932d
|
755c9d8af86f33c64bb0532f46a6d875e9728e13
|
/scripts/STEP04_PCA.R
|
2433a3e53d7669229e6303f42794f6ce0c69b035
|
[] |
no_license
|
jmzhang1911/CRC_lncRNA_backup5.0
|
f2a08eb81b9806115a1053c690433706ef066845
|
56a48cac477461b5f236afe35bf5a8f79f68c6f6
|
refs/heads/main
| 2023-04-02T01:53:14.521092
| 2020-12-18T01:07:44
| 2020-12-18T01:07:44
| 320,638,764
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,453
|
r
|
STEP04_PCA.R
|
rm(list = ls())
source('Utils.R')
mkdir('outcomes/pca_heatmap')
load('outcomes/inputdata/input.RData')
mRNA_count <- input_matrix_count$mRNA_count
lncRNA_count <- input_matrix_count$lncRNA_count
all_count <- input_matrix_count$mRNA_lncRNA_count
###########=====> do it in jobs >>>>>>###########
#===> saved as kendall_calculate.R in jobs
###########=====> do it in jobs >>>>>>###########
#===> plot heatmap
load('outcomes/pca_heatmap//pheatmap_input.RData')
myplotheatmap <- function(df, sample_infor, title){
p <- pheatmap(df,
scale = 'row',
show_rownames = F,
show_colnames = F,
cluster_cols = F,
cluster_rows = F,
color = colorRampPalette(c("navy", "white", "firebrick3"))(50),
annotation_col = sample_infor,
annotation_row = sample_infor,
main = title
#gaps_row = c(3,9),
#gaps_col = c(3,9)
)
#require(ggplotify)
p2 <- as.ggplot(p)
return(p2)
}
plot_mRNA <- myplotheatmap(mRNA_df, sample_infor, 'heatmap of mRNA')
plot_lncRNA <- myplotheatmap(lncRNA_df, sample_infor, 'heatmap of lncRNA')
plot_all <- myplotheatmap(all_df, sample_infor, 'heatmap of all')
p <- ((plot_mRNA | plot_lncRNA | plot_all )) +
plot_layout(guides = 'collect',nrow = 1) +
plot_annotation(tag_levels = "A")
ggsave('outcomes/pca_heatmap/heatmap.png', width = 60, height = 20, units = "cm")
#===> PCA analysis and plot
mygetpacdata <- function(x, pic=F){
# use pic to check the xlabs and ylabs
time <- c(paste('control', 1:3, sep = '_'),
paste(rep(c('inflammation','cancer'), each = 6), 1:6, sep = '_'))
colnames(x) <- time
time2 <- c(rep('control',3), rep('inflammation', 6), rep('cancer', 6))
sample_infor <- data.frame(time = factor(time2))
rownames(sample_infor) <- colnames(x)
pca <- pca(x, metadata = sample_infor)
biplot(pca, x = 'PC1', y = 'PC2')
pca_rlt <- rownames_to_column(pca$rotated, var = "sample_name")
pca_sample <- rownames_to_column(sample_infor, var = "sample_name")
pca_plot_data <- full_join(pca_rlt, pca_sample, by = 'sample_name')
if(pic == F){
return(pca_plot_data)
}else{
return(biplot(pca, x = 'PC1', y = 'PC2'))
}
}
myplotpca <- function(df, title, x, y){
pcadata <- mygetpacdata(df, pic = F)
pca <- ggplot(data = pcadata, aes(x = PC1, y = PC2)) +
geom_point(size = 5,
aes(color = time)) +
stat_ellipse(aes(color = time)) +
scale_shape_manual(values = range(c(22, 24))) +
scale_color_manual(values = c("#00008B", "#708090", "#8B0000")) +
labs(title = title,
x = str_c('PCA1 (',x,'% variance explained)'),
y = str_c('PCA2 (',y,'% variance explained)')) +
theme_half_open() +
scale_fill_brewer(palette = 'Set3') +
theme(#legend.position = c(0.85, 0.2),
plot.title = element_text(size = 18, hjust = 0.5)) +
guides(fill = guide_legend(override.aes = list(shape = 21)))
}
pca_mRNA <- myplotpca(mRNA_expr,title ='PCA of mRNA',x='60.59',y='22.23')
pca_lncRNA <- myplotpca(lncRNA_expr,title='PCA of lncRNA',x='91.24',y='3.16')
pca_all <- myplotpca(all_expr,title='PCA of all genes',x='60.64',y='22.25')
p2 <- ((pca_mRNA | pca_lncRNA | pca_all)) +
plot_layout(guides = 'collect',nrow = 1) +
plot_annotation(tag_levels = "A")
ggsave('outcomes/pca_heatmap/pac.png', width = 60, height = 20, units = "cm")
|
82cb13f12356dde383d5330c9a97952beab97e18
|
e95bc21ea6b681c5f4a6b1864f12ca6f7c811d45
|
/cachematrix.R
|
d91b375b6049cfa18cd5446cbb46f232d47ff5c4
|
[] |
no_license
|
fabiolucasmsb/ProgrammingAssignment2
|
15cc0873002fedf5e7be4ca8e5f947ced53155cd
|
95ac5ad455b1db2b6e25b335a9833ba3861e2d2e
|
refs/heads/master
| 2020-12-03T04:00:58.282386
| 2015-05-23T14:46:17
| 2015-05-23T14:46:17
| 36,103,938
| 0
| 0
| null | 2015-05-23T02:27:08
| 2015-05-23T02:27:07
| null |
UTF-8
|
R
| false
| false
| 1,012
|
r
|
cachematrix.R
|
## This functions calculate de inversed of a non square matrix
## THis function create a list that manipulate a matrix using gets and sets of the data and
## the inversed matrix
makeCacheMatrix <- function(x = matrix()) {
inversedMatrix <- NULL
set <- function(y)
{
x <<- y
inversedMatrix <<- NULL
}
get <- function() x
setInverse <- function(inverse) inversedMatrix <<- inverse
getInverse <- function() inversedMatrix
list(set = set, get = get, setInverse = setInverse, getInverse = getInverse)
}
## This function try to get the inversed matrix at the cached data and if doesn't exist then calculate
cacheSolve <- function(x, ...) {
library(MASS)
inversedMatrix <- x$getInverse()
if(!is.null(inversedMatrix)) {
return(inversedMatrix)
}
data <- x$get()
inversedMatrix <- ginv(matX)
x$setInverse(inversedMatrix)
inversedMatrix
}
|
b340a83f3e2d43bb69fc0430e0a1287a1dcb9513
|
bae0af3bec95ee9123dd74a3cd42a3792f65e25d
|
/Chapter02/02__04__anovamechanics.R
|
52de07a9c92522d098ecb975bbb558e73d8225df
|
[
"MIT"
] |
permissive
|
PacktPublishing/R-Statistics-Cookbook
|
f521ead1a05104b68663521374861dfced4c1bab
|
74eb6057e47df5d43a981c44a52148bd3930c7e1
|
refs/heads/master
| 2023-02-04T14:18:10.374693
| 2023-01-30T09:26:43
| 2023-01-30T09:26:43
| 179,272,388
| 9
| 18
| null | null | null | null |
UTF-8
|
R
| false
| false
| 707
|
r
|
02__04__anovamechanics.R
|
data = read.csv("./anova__lot_type.csv")
result = lm(Result ~ Lot,data=data)
SS_LOT = sum((predict(result)-mean(data$Result))**2)
result = lm(Result ~ Lot + Food.Type,data=data)
SS_FOODTYPE = sum((predict(result)-mean(data$Result))**2) - SS_LOT
SS_ERROR = sum((predict(result)-data$Result)**2)
FF_LOT = (SS_LOT/1)/(SS_ERROR/56)
FF_FOODTYPE = (SS_FOODTYPE/2)/(SS_ERROR/56)
pval_LOT = 1-pf(FF_LOT,1,56)
pval_FOODTYPE = 1-pf(FF_FOODTYPE,2,56)
print(paste("SS(ERROR) = ",SS_ERROR))
print(paste("SS(LOT) =",SS_LOT,"/F(LOT) = ",FF_LOT,"pvalue = ",pval_LOT))
print(paste("SS(FOODTYPE) =",SS_FOODTYPE,"/F(FOODTYPE) = ",FF_FOODTYPE,"pvalue = ",pval_FOODTYPE))
anova(result)
|
ca754b30cd1b19d8877715f87f970e60060cd581
|
257ffc3438528729b62bc3e7abc24eea2be6193e
|
/man/readgeol.Rd
|
cbf8c923a013cbdaf008b55d450b75bebf82637f
|
[
"MIT"
] |
permissive
|
SHUD-System/rSHUD
|
91e1ae7f077cf5efa52575a32ed4e692ed8034b9
|
1915a9cf2b241a1368b9768251b2f140454bd94e
|
refs/heads/master
| 2023-07-06T11:07:18.335307
| 2023-07-01T15:08:11
| 2023-07-01T15:08:11
| 224,737,854
| 6
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 332
|
rd
|
readgeol.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/readinput.R
\name{readgeol}
\alias{readgeol}
\title{Read the .geol file
\code{readgeol}}
\usage{
readgeol(file = shud.filein()["md.geol"])
}
\arguments{
\item{file}{full path of file}
}
\value{
.geol
}
\description{
Read the .geol file
\code{readgeol}
}
|
c08d6be29b542af53b9575b3615c4916816dcf17
|
ad87af36b4919b941ac2bca11e201557f77be145
|
/man/PredictDriversSpecifications.Rd
|
502d1bf3d1749ef9e87ec643b4b2d6a9d8ffe179
|
[
"Apache-2.0"
] |
permissive
|
cities/VETravelDemand
|
da7dd61aa183c0d9741a3a733d91227237873145
|
7c10bb3e5e9c28ee5ddccdbf0c98da1831a38897
|
refs/heads/master
| 2020-04-06T12:52:52.601231
| 2019-01-22T20:33:35
| 2019-01-22T20:55:42
| 157,474,604
| 1
| 0
|
NOASSERTION
| 2019-01-22T21:08:24
| 2018-11-14T01:56:59
|
R
|
UTF-8
|
R
| false
| true
| 609
|
rd
|
PredictDriversSpecifications.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/PredictDrivers.R
\docType{data}
\name{PredictDriversSpecifications}
\alias{PredictDriversSpecifications}
\title{Specifications list for PredictDrivers module}
\format{A list containing 3 components:
\describe{
\item{RunBy}{the level of geography that the module is run at}
\item{Get}{module inputs to be read from the datastore}
\item{Set}{module outputs to be written to the datastore}
}}
\usage{
PredictDriversSpecifications
}
\description{
A list containing specifications for the PredictDrivers module.
}
\keyword{datasets}
|
489538d66841eaff75a66478cb0bd9c69d1904fa
|
02021efd3d955cae251f89d3b1688bc419174788
|
/Functions/FSO_functions.R
|
529468c71c32ad6d6da2d6a2cdddbb05e438332b
|
[
"Apache-2.0"
] |
permissive
|
MoritzFeigl/FSO_paper
|
52c2e1c0901361cf24781c3916c4a8e8d95eb426
|
cfe0c5c878391cf0142e259d2b441b946389d74b
|
refs/heads/master
| 2022-12-25T17:09:15.018568
| 2020-09-18T09:33:50
| 2020-09-18T09:33:50
| 235,850,171
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 82,989
|
r
|
FSO_functions.R
|
# Functions for d-GR4J Function Space optimization case study
# Moritz Feigl, 2019
#
# Function to load spatial predictors
load_sp_mur <- function(scale = TRUE,
na.approx = FALSE, only_training_basins = TRUE,
full_dataset = FALSE, training_basins = NULL){
path <- "Data/spatial_predictors_mur/"
clay <- raster::raster(paste0(path, "l0_clay_mur.asc"))
bdim <- raster::raster(paste0(path, "l0_bdim_mur.asc"))
elevation <- raster::raster(paste0(path, "l0_dem_mur.asc"))
evi <- raster::raster(paste0(path, "l0_evi_mur.asc"))
hand <- raster::raster(paste0(path, "l0_hand_mur.asc"))
noise <- raster::raster(paste0(path, "l0_noise_mur.asc"))
sand <- raster::raster(paste0(path, "l0_sand_mur.asc"))
slope <- raster::raster(paste0(path, "l0_slope_mur.asc"))
nz <- raster::raster(paste0(path, "l0_nz2000_mur.asc"))
hshade <- raster::raster(paste0(path, "l0_hshade_mur.asc"))
nb <- raster::raster(paste0(path, "l0_nb2000_mur.asc"))
n <- length(raster::values(nb))
# spatial predictor list
spatial_predictors <- data.frame(nb = raster::values(nb), nz = raster::values(nz), clay = raster::values(clay),
elevation = raster::values(elevation), evi = raster::values(evi),
hand = raster::values(hand), noise = raster::values(noise),
sand = raster::values(sand), slope = raster::values(slope),
bdim = raster::values(bdim))
# use mask for NZ = subset
if(!full_dataset){
if(only_training_basins){
nb_subset <- training_basins
spatial_predictors <- spatial_predictors[spatial_predictors$nb %in% nb_subset, ]
} else {
spatial_predictors <- spatial_predictors[!is.na(spatial_predictors$nb), ]
}
}
# scale data to [0, 1]
range01 <- function(x, ...){(x - min(x, ...)) / (max(x, ...) - min(x, ...))}
if(na.approx){
nas <- which(is.na(spatial_predictors), arr.ind = TRUE)
for(i in 1:nrow(nas)){
spatial_predictors[nas[i,1], nas[i, 2]] <- mean(c(
spatial_predictors[nas[i,1]-1, nas[i,2]],
spatial_predictors[nas[i,1]+1, nas[i,2]]), na.rm = TRUE)
}
}
if(scale){
# standardize spatial predictors, except for NB, NZ, noise, evi
spatial_predictors[, c("clay", "sand")] <- range01(spatial_predictors[, c("clay", "sand")], c(0, 100), na.rm = TRUE)
spatial_predictors$bdim <- range01(spatial_predictors$bdim, c(0, 2), na.rm = TRUE)
spatial_predictors$slope <- range01(spatial_predictors$slope, c(0, 90), na.rm = TRUE)
spatial_predictors$elevation <- range01(spatial_predictors$elevation, c(0, 6000), na.rm = TRUE)
spatial_predictors$hand <- range01(spatial_predictors$hand, c(0, 6000), na.rm = TRUE)
}
spatial_predictors <- tibble::as.tibble(spatial_predictors)
spatial_predictors[which(spatial_predictors == 0, arr.ind = TRUE)] <- 0.0001
return(spatial_predictors)
}
# NSE
NSE <- function(observations, predictions){
1- (sum((predictions - observations)^2, na.rm = TRUE) /
sum((mean(observations, na.rm = TRUE) - observations)^2, na.rm = TRUE))
}
evaluate_function_from_string <- function(string, l0){
# Evaluate a function from a string
# evaluates a function given as a string for all relevant l0 layer
# Input:
# string: transfer function as string
# l0: data frame with l0 layer
# Output:
# vector with all evaluated function outputs
tf <- unlist(strsplit(string, c("[/^()*+-]")))
tf <- gsub(" ", "", tf, fixed = TRUE)
relevant_predictors <- l0[which(names(l0) %in% tf)]
if(ncol(relevant_predictors) == 0){
args <- ""
eval(parse(text = paste('f <- function(', args, ') { return(' , string , ')}', sep='')))
f_evaluated <- rep(f(), nrow(l0))
} else {
args <- paste(names(relevant_predictors), collapse = ', ')
eval(parse(text = paste('f <- function(', args, ') { return(' , string , ')}', sep='')))
f_evaluated <- eval(parse(text = paste('mapply(f, ',
paste0('relevant_predictors$',
names(relevant_predictors), collapse = ', '),
')')))
}
f_evaluated[is.infinite(f_evaluated)] <- NA
return(f_evaluated)
}
# Rescale function
rescale <- function(x, to, from = c(-11, 11), ...) { #-11, 11
if(sd(x, na.rm = TRUE) != 0) {
return((x - from[1]) / diff(from) * diff(to) + to[1])
} else {
return(x)
}
}
# Generalized mean function: assumes that data is always positive
gmean <- function(x, p, ...){
if(p == 1){
gmean <- mean(x, ...)
} else {
# check for negative values -> not defined in this version of the generalized mean
if(sum(x < 0) > 0) stop("Generalized mean with p != 1 is only defined for positive values. Check parameter values!")
if(abs(p) < 0.001){
# computational stable formular for geometric mean
logx <- log(x)
gmean <- exp(mean(logx[is.finite(logx)]))
} else {
# computational stable formular of generalized mean for large p
x_max <- max(x, ...)
psum_wmax <- sum((x[x != x_max]/x_max)^p, ...)
n <- length(x)
# catch extreme values
if(is.infinite(psum_wmax)){
if(p < 0) gmean <- min(x, ...)
if(p > 0) gmean <- max(x, ...)
} else{
gmean <- max(x, ...) * exp( 1/p * (log(1 + psum_wmax) -log(n)))
}
}
}
return(gmean)
}
# Create GR4J parameter from given tf and spatial predictors
create_GR4J_para <- function(transfer_functions, l0, parameter_bounds, gmean_parameter,
km1 = FALSE){
# create GR4J parameter from a list with transferfunctions
# Input:
# transfer_functions: named list with transfer strings for GR4Jx1, GR4Jx2, GR4Jx3, GR4Jx4
# l0: data frame with l0 layer
# parameter_bounds: named list with parameter bounds for GR4Jx1, GR4Jx2, GR4Jx3, GR4Jx4
# Output:
# data frame with new GR4J parameters
new_gr4j_para <- data.frame(NB_ = l0$nb, NZ_ = l0$nz,
GR4Jx1 = NA, GR4Jx2 = NA, GR4Jx3 = NA, GR4Jx4 = NA)
new_gr4j_para$GR4Jx1 <- suppressWarnings(
evaluate_function_from_string(transfer_functions$GR4Jx1, l0 = l0))
new_gr4j_para$GR4Jx2 <- suppressWarnings(
evaluate_function_from_string(transfer_functions$GR4Jx2, l0 = l0))
new_gr4j_para$GR4Jx3 <- suppressWarnings(
evaluate_function_from_string(transfer_functions$GR4Jx3, l0 = l0))
new_gr4j_para$GR4Jx4 <- suppressWarnings(
evaluate_function_from_string(transfer_functions$GR4Jx4, l0 = l0))
# scale parameter to parameter bounds
new_gr4j_para$GR4Jx1 <- round(rescale(new_gr4j_para$GR4Jx1, to = parameter_bounds$GR4Jx1), 2)
new_gr4j_para$GR4Jx2 <- round(rescale(new_gr4j_para$GR4Jx2, to = parameter_bounds$GR4Jx2), 2)
new_gr4j_para$GR4Jx3 <- round(rescale(new_gr4j_para$GR4Jx3, to = parameter_bounds$GR4Jx3), 2)
new_gr4j_para$GR4Jx4 <- round(rescale(new_gr4j_para$GR4Jx4, to = parameter_bounds$GR4Jx4), 2)
if(km1){
NB_1km <- raster("Data/km1_NB.asc")
NZ_1km <- raster("Data/km1_NZ.asc")
values(NB_1km)[!values(NB_1km) %in% new_gr4j_para$NB_] <- NA
km1_df <- data.frame(new_gr4j_para[, c("GR4Jx1", "GR4Jx2", "GR4Jx3", "GR4Jx4")],
"NB_" = values(NB_1km)[!is.na(values(NB_1km))],
"NZ_" = values(NZ_1km)[!is.na(values(NB_1km))])
new_gr4j_para <- aggregate(. ~ NZ_ + NB_, km1_df,
function(x) round(gmean(x, p = gmean_parameter, na.rm = TRUE), 2))
new_gr4j_para <- new_gr4j_para[order(new_gr4j_para$NZ_), ]
} else {
# aggregate to 2km scale
new_gr4j_para <- aggregate(. ~ NZ_ + NB_, new_gr4j_para,
function(x) round(gmean(x, p = gmean_parameter, na.rm = TRUE), 2))
new_gr4j_para <- new_gr4j_para[order(new_gr4j_para$NZ_), ]
}
return(new_gr4j_para)
}
# string splitter function
function_splitter <- function(point_tf){
function_splitted <- unlist(strsplit(point_tf, c("[/^()*+-]")))
function_splitted <- gsub(" ", "", function_splitted)
function_splitted <- function_splitted[function_splitted != ""]
return(function_splitted)
}
# Model size loss
size_loss <- function(functions_splitted){
length(unlist(functions_splitted)) * 0.001/3
}
# Load true parameter field depending on the Test_number
true_para_field <- function(Test_number){
path <- "Data/spatial_predictors_mur/"
library(raster)
if(Test_number == 2.1){
# Training and Test basin definitions
true_para_raster <- raster::raster("True parameters/x1_1km.asc")
nz <- raster(paste0(path, "l0_nz2000_mur.asc"))
nb <- raster(paste0(path, "l0_nb2000_mur.asc"))
true_para_field <- data.frame(nz = values(nz), nb = values(nb), true_para = values(true_para_raster))
true_para_field <- true_para_field[!is.na(true_para_field$nz), ]
true_para_field <- true_para_field[!duplicated(true_para_field), ]
}
if(Test_number == 2.2){
# Training and Test basin definitions
true_para_raster <- raster("True parameters/x3_1km.asc")
nz <- raster(paste0(path, "l0_nz2000_mur.asc"))
nb <- raster(paste0(path, "l0_nb2000_mur.asc"))
true_para_field <- data.frame(nz = values(nz), nb = values(nb), true_para = values(true_para_raster))
true_para_field <- true_para_field[!is.na(true_para_field$nz), ]
true_para_field <- true_para_field[!duplicated(true_para_field), ]
}
if(Test_number == 2.3){
# Training and Test basin definitions
true_para_raster <- raster::raster("True parameters/x4_1km.asc")
nz <- raster(paste0(path, "l0_nz2000_mur.asc"))
nb <- raster(paste0(path, "l0_nb2000_mur.asc"))
true_para_field <- data.frame(nz = values(nz), nb = values(nb), true_para = values(true_para_raster))
true_para_field <- true_para_field[!is.na(true_para_field$nz), ]
true_para_field <- true_para_field[!duplicated(true_para_field), ]
}
return(true_para_field)
}
# Create a raster object from a transfer function
raster_from_tf <- function(tf, tf_bounds, only_catchment = TRUE,
aggregate = FALSE, gmean_parameter, km1 = FALSE){
# Input: a transfer function as a string
# output: a raster object with the parameter field
path <- "Data/spatial_predictors_mur/"
# get raster objects of catchment
sand <- raster::raster(paste0(path, "l0_sand_mur.asc"))
nb <- raster::raster(paste0(path, "l0_nb2000_mur.asc"))
nz <- raster::raster(paste0(path, "l0_nz2000_mur.asc"))
if(aggregate){
# 0. Load necessary data
l0_all <- load_sp_mur(na.approx = TRUE, scale = TRUE,
only_training_basins = FALSE,
full_dataset = FALSE)
if(km1){
# 1. Create 250m parameter field
paraf <- sand
raster::values(paraf)[is.na(raster::values(nb))] <- NA
raster::values(paraf)[!is.na(raster::values(nb))] <- rescale(
evaluate_function_from_string(tf, l0 = l0_all), to = tf_bounds)
# 2. get 1km NB and IZ information together with 1km values in a df
NB_1km <- raster::raster("Data/km1_NB.asc")
IZ_1km <- raster::raster("Data/km1_IZ.asc")
NZ_1km <- raster::raster("Data/km1_NZ.asc")
km1_df <- data.frame("para" = raster::values(paraf)[!is.na(raster::values(nb))],
"nb" = raster::values(NB_1km)[!is.na(raster::values(NB_1km))],
"iz" = raster::values(IZ_1km)[!is.na(raster::values(IZ_1km))],
"nz" = raster::values(NZ_1km)[!is.na(raster::values(NZ_1km))])
km1_df$unique_id <- 1:nrow(km1_df)
# 3. Aggregate to 1km raster
km1_values <- aggregate(para ~ nz, km1_df, FUN = gmean,
p = gmean_parameter, na.rm = TRUE)
km1_df_all <- merge(km1_df[, -1], km1_values, by = "nz",
all.x = TRUE)
km1_df_all <- km1_df_all[order(km1_df_all$unique_id), ]
km1_paraf <- sand
raster::values(km1_paraf)[is.na(raster::values(nb))] <- NA
raster::values(km1_paraf)[!is.na(raster::values(nb))] <- km1_df_all$para
tf_grid <- km1_paraf
} else {
# 4. Aggregate to 2km raster
km2_paraf <- sand
# define values depending on tf
raster::values(km2_paraf)[is.na(raster::values(nb))] <- NA
raster::values(km2_paraf)[!is.na(raster::values(nb))] <- rescale(
evaluate_function_from_string(tf, l0 = l0_all),
to = tf_bounds)
# create df with parameter values and nb & nz
grid_values <- data.frame("para" = raster::values(km2_paraf)[!is.na(raster::values(nb))],
"nb" = raster::values(nb)[!is.na(raster::values(nb))],
"nz" = raster::values(nz)[!is.na(raster::values(nb))])
grid_values$unique_id <- 1:nrow(grid_values)
# aggregate parameter field
grid_values_agg <- aggregate(para ~ nz, grid_values, FUN = gmean,
p = gmean_parameter, na.rm = TRUE)
# add unique id again
grid_values_all <- merge(grid_values[, -1], grid_values_agg, by = "nz",
all.x = TRUE)
grid_values_all <- grid_values_all[order(grid_values_all$unique_id), ]
raster::values(km2_paraf)[!is.na(raster::values(nb))] <- grid_values_all$para
tf_grid <- km2_paraf
}
} else {
l0_all <- load_sp_mur(na.approx = FALSE, scale = TRUE,
full_dataset = TRUE)
tf_grid <- sand
# define values depending on tf
raster::values(tf_grid) <- rescale(
evaluate_function_from_string(tf, l0 = l0_all),
to = tf_bounds)
# subset for the catchment area if wanted
if(only_catchment){
raster::values(tf_grid)[is.na(values(nb))] <- NA
}
}
return(tf_grid)
}
# DDS Function
dds_fs<- function(xBounds.df, numIter, OBJFUN, search_dim, Test_number,
true_para_field_df, spatial_predictors, parameter_bounds,
para, para_1km, training_basins){
# INPUTS:
# xBounds.df must be a dataframe with 1st column as minimum, 2nd column as maximum
# numIter is an integer
# OBJFUN is a function which returns a scalar value, for which we are trying to minimize.
#
# OUTPUTS:
# outputs.df is a two entry list, containing x_best and y_best, as they evolve over numIter iterations.
# Format xBounds.df colnames
colnames(xBounds.df) <- c("min", "max")
# Generate initial first guess
x_init <- rnorm(search_dim)
# Evaluate first cost function
x_evaluated <- OBJFUN(x_init, Test_number = Test_number,
true_para_field_df = true_para_field_df,
spatial_predictors = spatial_predictors,
parameter_bounds = parameter_bounds,
para = para, para_1km = para_1km,
training_basins = training_basins)
x_ini <- x_evaluated$`Current point in function space`
x_best <- matrix(x_init, nrow = 1)
if(!is.na( x_evaluated$loss)){
y_init <- x_evaluated$loss
} else {
y_init <- -999
}
y_best <- y_init
r = 0.2
# Select which entry to peturb at each iteration
peturbIdx <- probPeturb(xBounds.df, numIter)
# Peturb each entry by N(0,1)*r(x_max - x_min) reflecting if @ boundaries
sigma <- xBounds.df$max - xBounds.df$min
for (i in 2:numIter){
# Set up test x
x_test <- x_best[i-1, ]
# Get entries we will peturb
idx <- peturbIdx[[i]]
# Initialize vector of peturbations initially zeros with same length of x so we will add this vector to peturb x
peturbVec <- rep(0, length(x_test))
# Generate the required number of random normal variables
N <- rnorm(length(x_test), mean=0, sd=1)
# Set up vector of peturbations
peturbVec[idx] <- r*N[idx]*sigma[idx]
# Temporary resulting x value if we peturbed it
testPeturb <- x_test + peturbVec
# Find the values in testPeturb OBJFUN <- wrapper_ofthat have boundary violations. Store the indices in boundaryViolationsIdx
boundaryViolationIdx <- which(testPeturb<xBounds.df$min | testPeturb > xBounds.df$max)
# Reset those violated indices to the opposite peturbation direction
peturbVec[boundaryViolationIdx]<-(-1*r*N[boundaryViolationIdx]*sigma[boundaryViolationIdx])
# Find values still at violations of min or max and set them to the minimum or maximum values
testPeturb<-x_test + peturbVec
minViolationIdx<-which(testPeturb<xBounds.df$min)
maxViolationIdx<-which(testPeturb>xBounds.df$max)
testPeturb[minViolationIdx]<-xBounds.df$min[minViolationIdx]
testPeturb[maxViolationIdx]<-xBounds.df$max[maxViolationIdx]
# Peturb the test vector
x_test <- x_test + peturbVec
# Evaluate objective function #§ a bit sloppy.. but never mind...
x_evaluated <- OBJFUN(x_test, Test_number = Test_number,
true_para_field_df = true_para_field_df,
spatial_predictors = spatial_predictors,
parameter_bounds = parameter_bounds,
para = para, para_1km = para_1km,
training_basins = training_basins)
x_test <- x_evaluated$`Current point in function space`
y_test <- x_evaluated$loss
if(!is.na(y_test)) {
y_best[i] <- max(c(y_test, y_best[i-1]))
bestIdx <- which.max(c(y_test, y_best[i-1]))
} else {
y_best[i] <- y_best[i-1]
bestIdx <- 2
}
x_choices <- cbind(x_test, x_best[i-1, ])
x_best <- rbind(x_best, x_choices[,bestIdx])
}
output.list <- list(t(x_best), y_best)
return(output.list)
}
probPeturb <- function(x, numIter){
# perturber function for DDS
# Returns numIter length list of entries to be peturbed
# Input is xBounds & numIter.
# Returns numIter entry list with the indices which will be peturbed
xDims <- nrow(x)
probabilityVector<- 1-log(1:numIter)/log(numIter)
peturbIdx <- apply(matrix(unlist(lapply(probabilityVector, function(x) as.logical(rbinom(xDims, 1, x)))), byrow=TRUE, ncol=xDims), 1, which)
return(peturbIdx)
}
SPAEF <- function(observations, predictions){
# SPAtial EFficiency (SPAEF) metric
# Inputs: numeric vectors of observawtions and predictions
# Output: numeric
spaef_try <- try({
alpha <- cor(predictions, observations)
beta <- (sd(observations)/mean(observations)) / (sd(predictions)/mean(predictions))
# scale for histogram distance
observations <- scale(observations)
predictions <- scale(predictions)
range_true <- max(observations) - min(observations)
breaks <- seq(min(observations), max(observations), range_true/100)
c1 <- as.integer(table(cut(observations, breaks = breaks)))
c2 <- as.integer(table(cut(predictions, breaks = breaks)))
c_min <- numeric()
for(i in seq_along(c1)) c_min <- c(c_min, min(c1[i], c2[i]))
gamma <- sum(c_min)/sum(c1)
spaef <- 1 - sqrt((alpha-1)^2 + (beta-1)^2 + (gamma-1)^2)
}, silent = TRUE)
if(class(spaef_try) == "try-error") spaef <- NA
return(spaef)
}
# GR4J model quality
GR4J_model_quality <- function(statistics, Test_number, true_para_field_df,
model_size_loss, new_gr4j_para, relevant_basins = NULL,
statistics_1km = NULL){
# Test 1.1 mean NSE
if(Test_number == 1.1){
output <- list()
# subset relevant basins
if(!is.null(relevant_basins)) statistics <- statistics[statistics$sb %in% relevant_basins, ] # calculate mean loss for all basins
mean_NSE <- mean(statistics$NSE)
wmean_NSE <- weighted.mean(statistics$NSE, w = 1.01-statistics$NSE)
if(is.infinite(mean_NSE)) mean_NSE <- NA
if(is.nan(mean_NSE)) mean_NSE <- NA
# calculate overall loss
full_loss <- mean_NSE - model_size_loss
model_loss <- mean_NSE - model_size_loss
output["mean_NSE"] <- mean_NSE
output["wmean_NSE"] <- wmean_NSE
output["model_loss"] <- model_loss
output["full_loss"] <- full_loss
}
# test 1.2/1.3 weighted mean NSE
if(Test_number == 1.2){
output <- list()
# subset relevant basins
if(!is.null(relevant_basins)) statistics <- statistics[statistics$sb %in% relevant_basins, ]
# calculate mean loss for all basins
mean_NSE <- mean(statistics$NSE)
wmean_NSE <- weighted.mean(statistics$NSE, w = 1.01-statistics$NSE)
if(is.infinite(mean_NSE)) mean_NSE <- NA
if(is.nan(mean_NSE)) mean_NSE <- NA
# calculate overall loss
full_loss <- wmean_NSE - model_size_loss
model_loss <- wmean_NSE - model_size_loss
output["mean_NSE"] <- mean_NSE
output["wmean_NSE"] <- wmean_NSE
output["model_loss"] <- model_loss
output["full_loss"] <- full_loss
}
# Test 2.1-2.3 multi-objective weighted mean NSE using parameter fields
if(Test_number %in% c(2.1, 2.2, 2.3)){
output <- list()
# subset relevant basins
if(!is.null(relevant_basins)) statistics <- statistics[statistics$sb %in% relevant_basins, ]
# calculate mean loss for all basins
mean_NSE <- mean(statistics$NSE)
wmean_NSE <- weighted.mean(statistics$NSE, w = 1.01-statistics$NSE)
# Calculate loss by comparison with observed storage parameter field
if(Test_number == 2.1) estimated_para_field <- new_gr4j_para[, c("NZ_", "NB_", "GR4Jx1")]
if(Test_number == 2.2) estimated_para_field <- new_gr4j_para[, c("NZ_", "NB_", "GR4Jx3")]
if(Test_number == 2.3) estimated_para_field <- new_gr4j_para[, c("NZ_", "NB_", "GR4Jx4")]
# in case relevant basins were chosen (testing)
if(!is.null(relevant_basins)){
true_para_field_df <- true_para_field_df[true_para_field_df$nb %in% relevant_basins, ]
estimated_para_field <- estimated_para_field[estimated_para_field$NB_ %in% relevant_basins, ]
}
merged_paras <- merge(estimated_para_field, true_para_field_df, by.x = c("NZ_", "NB_"),
by.y = c("nz", "nb"))
para_field_loss <- NSE(observations = merged_paras$true_para, predictions = merged_paras$GR4Jx3)
# calculate overall loss
model_loss <- wmean_NSE - model_size_loss
full_loss <- mean(c(para_field_loss, model_loss))
output["mean_NSE"] <- mean_NSE
output["wmean_NSE"] <- wmean_NSE
output["model_loss"] <- model_loss
output["full_loss"] <- full_loss
}
# Test 2.4-2.6 multi-objective weighted mean NSE using states
if(Test_number %in% c(2.4, 2.5)){
output <- list()
# subset relevant basins
if(!is.null(relevant_basins)) statistics <- statistics[statistics$sb %in% relevant_basins, ]
if(Test_number == 2.4) {
gr4j_parameter <- "BW0GEB"
gr4j_state <- "GR4JState1"
true_states <- feather::read_feather("True parameters/GR4J_state1.feather")
}
if(Test_number == 2.5) {
gr4j_parameter <- "BW3GEB"
gr4j_state <- "GR4JState2"
true_states <- feather::read_feather("True parameters/GR4J_state2.feather")
}
# Calculate loss by comparison with observed storage states
# read and format state results
state_names <- read.table("GR4J_distributed/output/COSERO.plus1",
header=FALSE, fill=TRUE, skip = 1, nrows = 1,
stringsAsFactors = FALSE)
state_ind <- grep(state_names, pattern = gr4j_parameter)
state_classes <- rep("NULL", length(state_names))
state_classes[state_ind] <- "numeric"
state_classes[1:3] <- "integer"
states <- read.table("GR4J_distributed/output/COSERO.plus1",
header = FALSE, fill = FALSE, skip = 243,
colClasses = state_classes)
state_classes[c(1:3, state_ind)] <- "character"
names(states) <- read.table("GR4J_distributed/output/COSERO.plus1",
header=FALSE, fill=FALSE, skip = 1,nrows = 1,
colClasses = state_classes)
# get correct names
state_ind2 <- grep(names(states), pattern = gr4j_parameter)
names(states)[state_ind2] <- gsub(names(states)[state_ind2],
pattern = gr4j_parameter,
replacement = gr4j_state)
# in case relevant basins were chosen (testing)
if(!is.null(relevant_basins)){
relevant_columns <- integer()
for(basins in seq_along(relevant_basins)){
current_basin <- paste0("000", relevant_basins[basins])
current_basin <- substring(current_basin,
first = nchar(current_basin) - 3)
relevant_columns <- c(relevant_columns, grep(names(states), pattern = current_basin))
}
relevant_columns <- relevant_columns[relevant_columns != 0]
states <- states[, c(1:3, relevant_columns)]
}
# get only the modelled catchments
true_states <- true_states[, names(true_states) %in% names(states)]
# get only the modelled timesteps
true_states <- as.data.frame(true_states[
paste0(true_states$yyyy, "-", true_states$mm, "-", true_states$dd) %in%
paste0(states$yyyy, "-" ,states$mm, "-", states$dd), ])
# create a list for each catchment and calculate state loss
state_loss <- vector(mode = "list")
for(i in 4:ncol(states)){
state_loss[[i-3]] <- data.frame("true" = true_states[, i], "model" = states[, i])
}
state_loss_nse <- sapply(state_loss,
function(x) {NSE(observations = x$true, predictions = x$model)})
# calculate mean total loss for all basins
mean_NSE <- mean(statistics$NSE)
wmean_NSE <- weighted.mean(statistics$NSE, w = 1.01-statistics$NSE)
full_loss <- numeric(length(state_loss_nse))
for(i in 1:length(state_loss_nse)){
full_loss[i] <- mean(c(statistics$NSE[i] - model_size_loss, state_loss_nse[i]))
}
full_loss <- weighted.mean(full_loss, w = 1.01-full_loss)
# The mean NSE minus the model size loss -> to be consistent with the other tests
model_loss <- wmean_NSE - model_size_loss
# calculate overall loss
output["mean_NSE"] <- mean_NSE
output["wmean_NSE"] <- wmean_NSE
output["model_loss"] <- model_loss
output["full_loss"] <- full_loss
}
if(Test_number == 2.6){
output <- list()
# subset relevant basins
if(!is.null(relevant_basins)) statistics <- statistics[statistics$sb %in% relevant_basins, ]
state_loss_nse <- list()
for(states_qualtiy in 1:2){
if(states_qualtiy == 1) {
gr4j_parameter <- "BW0GEB"
gr4j_state <- "GR4JState1"
true_states <- feather::read_feather("True parameters/GR4J_state1.feather")
}
if(states_qualtiy == 2) {
gr4j_parameter <- "BW3GEB"
gr4j_state <- "GR4JState2"
true_states <- feather::read_feather("True parameters/GR4J_state2.feather")
}
# Calculate loss by comparison with observed storage states
# read and format state results
state_names <- read.table("GR4J_distributed/output/COSERO.plus1",
header=FALSE, fill=TRUE, skip = 1, nrows = 1,
stringsAsFactors = FALSE)
state_ind <- grep(state_names, pattern = gr4j_parameter)
state_classes <- rep("NULL", length(state_names))
state_classes[state_ind] <- "numeric"
state_classes[1:3] <- "integer"
states <- read.table("GR4J_distributed/output/COSERO.plus1",
header = FALSE, fill = FALSE, skip = 243,
colClasses = state_classes)
state_classes[c(1:3, state_ind)] <- "character"
names(states) <- read.table("GR4J_distributed/output/COSERO.plus1",
header=FALSE, fill=FALSE, skip = 1,nrows = 1,
colClasses = state_classes)
# get correct names
state_ind2 <- grep(names(states), pattern = gr4j_parameter)
names(states)[state_ind2] <- gsub(names(states)[state_ind2],
pattern = gr4j_parameter,
replacement = gr4j_state)
# in case relevant basins were chosen (testing)
if(!is.null(relevant_basins)){
relevant_columns <- integer()
for(basins in seq_along(relevant_basins)){
current_basin <- paste0("000", relevant_basins[basins])
current_basin <- substring(current_basin,
first = nchar(current_basin) - 3)
relevant_columns <- c(relevant_columns, grep(names(states), pattern = current_basin))
}
relevant_columns <- relevant_columns[relevant_columns != 0]
states <- states[, c(1:3, relevant_columns)]
}
# get only the modelled catchments
true_states <- true_states[, names(true_states) %in% names(states)]
# get only the modelled timesteps
true_states <- as.data.frame(true_states[
paste0(true_states$yyyy, "-", true_states$mm, "-", true_states$dd) %in%
paste0(states$yyyy, "-" ,states$mm, "-", states$dd), ])
# create a list for each catchment and calculate state loss
state_loss <- vector(mode = "list")
for(i in 4:ncol(states)){
state_loss[[i-3]] <- data.frame("true" = true_states[, i], "model" = states[, i])
}
state_loss_nse[[states_qualtiy]] <- sapply(state_loss,
function(x) {
NSE(observations = x$true,
predictions = x$model)})
}
# calculate mean total loss for all basins
mean_NSE <- mean(statistics$NSE)
wmean_NSE <- weighted.mean(statistics$NSE, w = 1.01-statistics$NSE)
full_loss <- numeric(length(state_loss_nse[[1]]))
for(i in 1:length(full_loss)){
full_loss[i] <- mean(c(statistics$NSE[i] - model_size_loss,
state_loss_nse[[1]][i],
state_loss_nse[[2]][i]))
}
full_loss <- weighted.mean(full_loss, w = 1.01-full_loss)
# The mean NSE minus the model size loss -> to be consistent with the other tests
model_loss <- wmean_NSE - model_size_loss
# calculate overall loss
output["mean_NSE"] <- mean_NSE
output["wmean_NSE"] <- wmean_NSE
output["model_loss"] <- model_loss
output["full_loss"] <- full_loss
}
# Test 3.1-3.3 multi-objective weighted mean NSE using states for both 1km and 2km d-GR4J
if(Test_number == 3.1){
output <- list()
# subset relevant basins
if(!is.null(relevant_basins)) {
statistics <- statistics[statistics$sb %in% relevant_basins, ]
statistics_1km <- statistics_1km[statistics_1km$sb %in% relevant_basins, ]
}
# 2 KM NSE
mean_NSE_2km <- mean(statistics$NSE)
wmean_NSE_2km <- weighted.mean(statistics$NSE, w = 1.01-statistics$NSE)
# 1 KM NSE
mean_NSE_1km <- mean(statistics_1km$NSE)
wmean_NSE_1km <- weighted.mean(statistics_1km$NSE, w = 1.01-statistics_1km$NSE)
# overall NSE
mean_NSE <- mean(c(mean_NSE_1km, mean_NSE_2km))
wmean_NSE <- mean(c(wmean_NSE_1km, wmean_NSE_2km))
if(is.infinite(mean_NSE_2km)) mean_NSE_2km <- NA
if(is.nan(mean_NSE_2km)) mean_NSE_2km <- NA
model_loss <- wmean_NSE - model_size_loss
# calculate overall loss
full_loss <- mean_NSE - model_size_loss
output["mean_NSE"] <- mean_NSE_2km
output["wmean_NSE"] <- wmean_NSE_2km
output["model_loss"] <- model_loss
output["full_loss"] <- full_loss
}
if(Test_number %in% c(3.2, 3.3)){
output <- list()
# subset relevant basins
if(!is.null(relevant_basins)) {
statistics <- statistics[statistics$sb %in% relevant_basins, ]
statistics_1km <- statistics_1km[statistics_1km$sb %in% relevant_basins, ]
}
# 2 KM NSE
mean_NSE_2km <- mean(statistics$NSE)
wmean_NSE_2km <- weighted.mean(statistics$NSE, w = 1.01-statistics$NSE)
# 1 KM NSE
mean_NSE_1km <- mean(statistics_1km$NSE)
wmean_NSE_1km <- weighted.mean(statistics_1km$NSE, w = 1.01-statistics_1km$NSE)
# Losses from states
if(Test_number == 3.2) {
gr4j_parameter <- "BW0GEB"
gr4j_state <- "GR4JState1"
true_states <- feather::read_feather("True parameters/GR4J_state1.feather")
true_states_1km <- feather::read_feather("True parameters/GR4J_state1_1km.feather")
}
if(Test_number == 3.3) {
gr4j_parameter <- "BW3GEB"
gr4j_state <- "GR4JState2"
true_states <- feather::read_feather("True parameters/GR4J_state2.feather")
true_states_1km <- feather::read_feather("True parameters/GR4J_state2_1km.feather")
}
# Calculate state loss
state_loss_nse <- list("1km" = NA, "2km" = NA)
scale_names <- list("1km" = "_1km", "2km" = "")
for(scale in c("1km", "2km")){
scale_name <- scale_names[[scale]]
# Calculate loss by comparison with observed storage states
# read and format state results
state_names <- read.table(paste0("GR4J_distributed", scale_name, "/output/COSERO.plus1"),
header = FALSE, fill = TRUE, skip = 1, nrows = 1,
stringsAsFactors = FALSE)
state_ind <- grep(state_names, pattern = gr4j_parameter)
state_classes <- rep("NULL", length(state_names))
state_classes[state_ind] <- "numeric"
state_classes[1:3] <- "integer"
states <- read.table(paste0("GR4J_distributed", scale_name, "/output/COSERO.plus1"),
header = FALSE, fill = FALSE, skip = 243,
colClasses = state_classes)
state_classes[c(1:3, state_ind)] <- "character"
names(states) <- read.table(paste0("GR4J_distributed", scale_name, "/output/COSERO.plus1"),
header = FALSE, fill = FALSE, skip = 1,nrows = 1,
colClasses = state_classes)
# get correct names
state_ind2 <- grep(names(states), pattern = gr4j_parameter)
names(states)[state_ind2] <- gsub(names(states)[state_ind2],
pattern = gr4j_parameter,
replacement = gr4j_state)
# in case relevant basins were chosen
if(!is.null(relevant_basins)){
relevant_columns <- integer()
for(basins in seq_along(relevant_basins)){
current_basin <- paste0("000", relevant_basins[basins])
current_basin <- substring(current_basin,
first = nchar(current_basin) - 3)
relevant_columns <- c(relevant_columns, grep(names(states), pattern = current_basin))
}
relevant_columns <- relevant_columns[relevant_columns != 0]
states <- states[, c(1:3, relevant_columns)]
}
true_states_scale <- get(paste0("true_states", scale_name))
# get only the modelled catchments
true_states_scale <- true_states_scale[, names(true_states) %in% names(states)]
# get only the modelled timesteps
true_states_scale <- as.data.frame(true_states_scale[
paste0(true_states_scale$yyyy, "-", true_states_scale$mm, "-", true_states_scale$dd) %in%
paste0(states$yyyy, "-" ,states$mm, "-", states$dd), ])
# create a list for each catchment and calculate state loss
state_loss <- vector(mode = "list")
for(i in 4:ncol(states)){
state_loss[[i-3]] <- data.frame("true" = true_states_scale[, i], "model" = states[, i])
}
state_loss_nse[[scale]] <- sapply(state_loss,
function(x) {NSE(observations = x$true, predictions = x$model)})
}
# calculate overall mean NSE
mean_NSE <- mean(c(mean_NSE_1km, mean_NSE_2km))
wmean_NSE <- mean(c(wmean_NSE_1km, wmean_NSE_2km))
if(is.infinite(mean_NSE_2km)) mean_NSE_2km <- NA
if(is.nan(mean_NSE_2km)) mean_NSE_2km <- NA
# calculate full loss
full_loss <- numeric(length(state_loss_nse))
for(i in 1:length(state_loss_nse)){
full_loss_i_2km <- mean(c(statistics$NSE[i] - model_size_loss, state_loss_nse[["2km"]][i]))
full_loss_i_1km <- mean(c(statistics_1km$NSE[i] - model_size_loss, state_loss_nse[["1km"]][i]))
full_loss[i] <- mean(full_loss_i_2km, full_loss_i_1km)
}
full_loss <- weighted.mean(full_loss, w = 1.01-full_loss)
model_loss <- wmean_NSE - model_size_loss
output["mean_NSE"] <- mean_NSE_2km
output["wmean_NSE"] <- wmean_NSE_2km
output["model_loss"] <- model_loss
output["full_loss"] <- full_loss
}
# Test 4.1-4.6 multi-objective optimization with NSE for Q and SPAEF for states
# Test 4.1-4.3 using SPAEF only for the last maps of states
if(Test_number %in% c(4.1, 4.2)){
output <- list()
# subset relevant basins
if(!is.null(relevant_basins)) statistics <- statistics[statistics$sb %in% relevant_basins, ]
if(Test_number == 4.1) {
gr4j_state <- "GR4JSt1"
true_last_states <- as.data.frame(
feather::read_feather(
paste0("True parameters/", training, "_last_GR4J_state1.feather")))
}
if(Test_number == 4.2) {
gr4j_state <- "GR4JSt2"
true_last_states <- as.data.frame(
feather::read_feather(
paste0("True parameters/", training, "_last_GR4J_state2.feather")))
}
#SPAEF
spaef_try <- try({
# Calculate loss by comparison with observed storage states
last_state_names <- read.table("GR4J_distributed/output/statevar_GR4J.dmp",
header=FALSE, fill=TRUE, skip = 2, nrows = 1,
stringsAsFactors = FALSE)
last_states <- read.table("GR4J_distributed/output/statevar_GR4J.dmp",
header = FALSE, fill = FALSE, skip = 3, nrows = 2859)
colnames(last_states) <- last_state_names
last_states <-last_states[last_states$NB %in% relevant_basins,
c("NB", "IZ", gr4j_state)]
true_last_states <- true_last_states[true_last_states$NB %in% relevant_basins, ]
a <- true_last_states[, gr4j_state]
a <- a[is.finite(a)]
b <- last_states[, gr4j_state]
b <- b[is.finite(b)]
alpha <- cor(b, a)
beta <- (sd(a)/mean(a)) / (sd(b)/mean(b))
range_true <- max(a) - min(a)
breaks <- seq(min(a), max(a), range_true/100)
c1 <- as.integer(table(cut(a, breaks = breaks)))
c2 <- as.integer(table(cut(b, breaks = breaks)))
c_min <- numeric()
for(i in seq_along(c1)) c_min <- c(c_min, min(c1[i], c2[i]))
gamma <- sum(c_min)/sum(c1)
spaef <- 1 - sqrt((alpha-1)^2 + (beta-1)^2 + (gamma-1)^2)
})
if(class(spaef_try) == "try-error") spaef <- NA
# calculate mean total loss for all basins
mean_NSE <- mean(statistics$NSE)
wmean_NSE <- weighted.mean(statistics$NSE, w = 1.01-statistics$NSE)
full_loss <- wmean_NSE + spaef - model_size_loss
# The mean NSE minus the model size loss -> to be consistent with the other tests
model_loss <- wmean_NSE - model_size_loss
# calculate overall loss
output["mean_NSE"] <- mean_NSE
output["wmean_NSE"] <- wmean_NSE
output["model_loss"] <- spaef
output["full_loss"] <- full_loss
}
if(Test_number == 4.3){
output <- list()
# subset relevant basins
if(!is.null(relevant_basins)) statistics <- statistics[statistics$sb %in% relevant_basins, ]
# GR4J States 1
gr4j_state <- "GR4JSt1"
true_last_states <- as.data.frame(
feather::read_feather(
paste0("True parameters/", training, "_last_GR4J_state1.feather")))
#SPAEF
spaef_try1 <- try({
# Calculate loss by comparison with observed storage states
last_state_names <- read.table("GR4J_distributed/output/statevar_GR4J.dmp",
header=FALSE, fill=TRUE, skip = 2, nrows = 1,
stringsAsFactors = FALSE)
last_states <- read.table("GR4J_distributed/output/statevar_GR4J.dmp",
header = FALSE, fill = FALSE, skip = 3, nrows = 2859)
colnames(last_states) <- last_state_names
last_states <-last_states[last_states$NB %in% relevant_basins,
c("NB", "IZ", gr4j_state)]
true_last_states <- true_last_states[true_last_states$NB %in% relevant_basins, ]
a <- true_last_states[, gr4j_state]
a <- a[is.finite(a)]
b <- last_states[, gr4j_state]
b <- b[is.finite(b)]
alpha <- cor(b, a)
beta <- (sd(a)/mean(a)) / (sd(b)/mean(b))
range_true <- max(a) - min(a)
breaks <- seq(min(a), max(a), range_true/100)
c1 <- as.integer(table(cut(a, breaks = breaks)))
c2 <- as.integer(table(cut(b, breaks = breaks)))
c_min <- numeric()
for(i in seq_along(c1)) c_min <- c(c_min, min(c1[i], c2[i]))
gamma <- sum(c_min)/sum(c1)
spaef1 <- 1 - sqrt((alpha-1)^2 + (beta-1)^2 + (gamma-1)^2)
})
if(class(spaef_try1) == "try-error") spaef1 <- NA
# GR4J States 2
gr4j_state <- "GR4JSt2"
true_last_states <- as.data.frame(
feather::read_feather(
paste0("True parameters/", training, "_last_GR4J_state2.feather")))
# Calculate loss by comparison with observed storage states
last_state_names <- read.table("GR4J_distributed/output/statevar_GR4J.dmp",
header=FALSE, fill=TRUE, skip = 2, nrows = 1,
stringsAsFactors = FALSE)
last_states <- read.table("GR4J_distributed/output/statevar_GR4J.dmp",
header = FALSE, fill = FALSE, skip = 3, nrows = 2859)
colnames(last_states) <- last_state_names
last_states <-last_states[last_states$NB %in% relevant_basins,
c("NB", "IZ", gr4j_state)]
true_last_states <- true_last_states[true_last_states$NB %in% relevant_basins, ]
#SPAEF
spaef_try2 <- try({
# Calculate loss by comparison with observed storage states
last_state_names <- read.table("GR4J_distributed/output/statevar_GR4J.dmp",
header=FALSE, fill=TRUE, skip = 2, nrows = 1,
stringsAsFactors = FALSE)
last_states <- read.table("GR4J_distributed/output/statevar_GR4J.dmp",
header = FALSE, fill = FALSE, skip = 3, nrows = 2859)
colnames(last_states) <- last_state_names
last_states <-last_states[last_states$NB %in% relevant_basins,
c("NB", "IZ", gr4j_state)]
true_last_states <- true_last_states[true_last_states$NB %in% relevant_basins, ]
a <- true_last_states[, gr4j_state]
a <- a[is.finite(a)]
b <- last_states[, gr4j_state]
b <- b[is.finite(b)]
alpha <- cor(b, a)
beta <- (sd(a)/mean(a)) / (sd(b)/mean(b))
range_true <- max(a) - min(a)
breaks <- seq(min(a), max(a), range_true/100)
c1 <- as.integer(table(cut(a, breaks = breaks)))
c2 <- as.integer(table(cut(b, breaks = breaks)))
c_min <- numeric()
for(i in seq_along(c1)) c_min <- c(c_min, min(c1[i], c2[i]))
gamma <- sum(c_min)/sum(c1)
spaef2 <- 1 - sqrt((alpha-1)^2 + (beta-1)^2 + (gamma-1)^2)
})
if(class(spaef_try2) == "try-error") spaef2 <- NA
if(sum(is.na(c(spaef1, spaef2))) == 0){
spaef <- mean(spaef1, spaef2)
} else spaef <- NA
# calculate mean total loss for all basins
mean_NSE <- mean(statistics$NSE)
wmean_NSE <- weighted.mean(statistics$NSE, w = 1.01-statistics$NSE)
full_loss <- wmean_NSE + spaef - model_size_loss
# The mean NSE minus the model size loss -> to be consistent with the other tests
model_loss <- wmean_NSE - model_size_loss
# calculate overall loss
output["mean_NSE"] <- mean_NSE
output["wmean_NSE"] <- wmean_NSE
output["model_loss"] <- spaef
output["full_loss"] <- full_loss
}
# Test 4.4-4.6 using SPAEF for the time series of state maps
if(Test_number %in% c(4.4, 4.5)){
output <- list()
# subset relevant basins
if(!is.null(relevant_basins)) statistics <- statistics[statistics$sb %in% relevant_basins, ]
gr4j_state <- ifelse(Test_number == 4.4, "GR4JSt1", "GR4JSt2")
# Calculate loss by comparison with observed storage states
# read and format state results
state_files <- list.files("GR4J_distributed/cdr/output", full.names = TRUE)
state_list <- lapply(state_files, function(x) read.table(x, header=TRUE, sep = ","))
if(!is.null(relevant_basins)){
if(sum(relevant_basins %in% test_basins) == length(relevant_basins)){
training_nz <- feather::read_feather("True parameters/training_basins_nz.feather")
state_list <- lapply(state_list, function(x){
x[!(x$NZ %in% training_nz$NZ), ]
})
}
if(sum(relevant_basins %in% training_basins) == length(relevant_basins)){
training_nz <- feather::read_feather("True parameters/training_basins_nz.feather")
state_list <- lapply(state_list, function(x){
x[x$NZ %in% training_nz$NZ, ]
})
}
}
# standardize states
# get relevant state and standardize
state_list <- lapply(state_list, function(x) {
data.frame(NZ = x$NZ, gr4j_state = x[, gr4j_state])})
all_state_list <- Map(merge,
state_list,
true_state_list[1:length(state_list)],
by="NZ", all.y = FALSE)
# For testing use only the testing time steps
if(training == "testing") all_state_list <- all_state_list[2434:length(all_state_list)]
if(training == "training") all_state_list <- all_state_list[241:length(all_state_list)]
# compute spaef for every timestep
all_spaef <- sapply(all_state_list, function(x) {
SPAEF(observations = x[, 2], prediction = x[, 3])
})
spaef <- mean(all_spaef)
# calculate loss
mean_NSE <- mean(statistics$NSE)
wmean_NSE <- weighted.mean(statistics$NSE, w = 1.01-statistics$NSE)
if(is.na(wmean_NSE) | is.na(spaef)){
full_loss <- NA
} else full_loss <- mean(c(wmean_NSE, spaef)) - model_size_loss
# output
output["mean_NSE"] <- mean_NSE
output["wmean_NSE"] <- wmean_NSE
output["model_loss"] <- spaef
output["full_loss"] <- full_loss
}
if(Test_number == 4.6){
output <- list()
# subset relevant basins
if(!is.null(relevant_basins)) statistics <- statistics[statistics$sb %in% relevant_basins, ]
# Calculate loss by comparison with observed storage states
# read and format state results
state_files <- list.files("GR4J_distributed/cdr/output", full.names = TRUE)
state_list <- lapply(state_files, function(x) read.table(x, header=TRUE, sep = ","))
if(!is.null(relevant_basins)){
if(sum(relevant_basins %in% test_basins) == length(relevant_basins)){
training_nz <- feather::read_feather("True parameters/training_basins_nz.feather")
state_list <- lapply(state_list, function(x){
x[!(x$NZ %in% training_nz$NZ), ]
})
}
if(sum(relevant_basins %in% training_basins) == length(relevant_basins)){
training_nz <- feather::read_feather("True parameters/training_basins_nz.feather")
state_list <- lapply(state_list, function(x){
x[x$NZ %in% training_nz$NZ, ]
})
}
}
spaef_list <- list("GR4JSt1" = NA,
"GR4JSt2" = NA)
for(gr4j_state in c("GR4JSt1", "GR4JSt2")){
# get relevant state and standardize
state_list_sub <- lapply(state_list, function(x) {
data.frame(NZ = x$NZ, gr4j_state = x[, gr4j_state])})
true_state_list_sub <- lapply(true_state_list[1:length(state_list)],
function(x) {
data.frame(NZ = x$NZ, gr4j_state = x[, gr4j_state])})
all_state_list <- Map(merge,
state_list_sub,
true_state_list_sub,
by="NZ", all.y = FALSE)
# For testing use only the testing time steps
if(training == "testing") all_state_list <- all_state_list[2434:length(all_state_list)]
if(training == "training") all_state_list <- all_state_list[241:length(all_state_list)]
# compute spaef for every timestep
all_spaef <- sapply(all_state_list, function(x) {
SPAEF(observations = x[, 2], prediction = x[, 3])
})
spaef_list[[gr4j_state]] <- mean(all_spaef)
}
spaef <- mean(unlist(spaef_list))
# calculate loss
mean_NSE <- mean(statistics$NSE)
wmean_NSE <- weighted.mean(statistics$NSE, w = 1.01-statistics$NSE)
if(is.na(wmean_NSE) | is.na(spaef)){
full_loss <- NA
} else full_loss <- mean(c(wmean_NSE, spaef)) - model_size_loss
# output
output["mean_NSE"] <- mean_NSE
output["wmean_NSE"] <- wmean_NSE
output["model_loss"] <- spaef
output["full_loss"] <- full_loss
}
return(output)
}
# Evaluate test basin and test time period performance
evaluate_test_basins <- function(test_functions, Optimizer, Test_number, run,
para, para_1km, training_basins, test_basins,
true_state_list){
true_para_field_df <- true_para_field(Test_number)
if(Test_number %in% c(4.4, 4.5)){
if(!exists("true_state_list")) {
true_state_list <- readRDS("True parameters/true_states_list")
gr4j_state <- ifelse(Test_number == 4.4, "GR4JSt1", "GR4JSt2")
true_state_list <- lapply(true_state_list, function(x) {
data.frame(NZ = x$NZ, gr4j_state = x[, gr4j_state])})
}
}
if(Test_number == 4.6){
if(!exists("true_state_list")) {
true_state_list <- readRDS("True parameters/true_states_list")
}
}
l0 <- load_sp_mur(na.approx = TRUE, scale = TRUE,
only_training_basins = FALSE,
full_dataset = FALSE)
test_functions$GR4Jx2 <- "0"
new_gr4j_para <- create_GR4J_para(transfer_functions = test_functions,
l0 = l0,
parameter_bounds = parameter_bounds,
gmean_parameter = gmean_parameter)
# merge new parameters with parameter file
para_new <- merge(para, new_gr4j_para, by = c("NB_", "NZ_"), suffixes = c(".old", ""),
all.x = TRUE)
para_new[which(is.na(para_new), arr.ind = TRUE)] <- 0 # parameter of unused basins -> 0
para_new <- para_new[, -grep(".old", names(para_new))]
states <- para_new[, grep("iniSt", names(para_new))]
para_new <- para_new[, -grep("iniSt", names(para_new))]
para_new <- cbind(para_new, states, stringsAsFactors = FALSE)
para_new <- para_new[order(para_new$NZ_), ]
para_new[, 1:3] <- para_new[, c(1, 3, 2)]
names(para_new)[2:3] <- c("IZ_", "NZ_")
cat("\n", file = "GR4J_distributed/input/para_Mur_GR4J_fsOptim.txt")
suppressWarnings(write.table(para_new, "GR4J_distributed/input/para_Mur_GR4J_fsOptim.txt",
append = TRUE, row.names = FALSE, quote = FALSE))
setwd("GR4J_distributed/")
sys::exec_wait("start_GR4J_case_study_full_run.bat",
std_out = "GR4J_output.txt")
setwd("..")
# 2 KM statistics
statistics <- read.table("GR4J_distributed/output/statistics_gr4j_Mur.txt",
skip = 21, header = TRUE)
if(Test_number %in% c(3.1, 3.2, 3.3)){
# 1 KM Model parameters
new_gr4j_para_1km <- create_GR4J_para(transfer_functions = test_functions,
l0 = l0,
parameter_bounds = parameter_bounds,
gmean_parameter = gmean_parameter,
km1 = TRUE)
para_new_1km <- merge(para_1km, new_gr4j_para_1km, by = c("NB_", "NZ_"), suffixes = c(".old", ""),
all.x = TRUE)
para_new_1km[which(is.na(para_new_1km), arr.ind = TRUE)] <- 0 # parameter of unused basins -> 0
para_new_1km <- para_new_1km[, -grep(".old", names(para_new_1km))]
states_1km <- para_new_1km[, grep("iniSt", names(para_new_1km))]
para_new_1km <- para_new_1km[, -grep("iniSt", names(para_new_1km))]
para_new_1km <- cbind(para_new_1km, states_1km, stringsAsFactors = FALSE)
para_new_1km <- para_new_1km[order(para_new_1km$NZ_), ]
para_new_1km[, 1:3] <- para_new_1km[, c(1, 3, 2)]
names(para_new_1km)[2:3] <- c("IZ_", "NZ_")
cat("\n", file = "GR4J_distributed_1km/input/para_Mur_GR4J_fsOptim.txt")
suppressWarnings(write.table(para_new, "GR4J_distributed_1km/input/para_Mur_GR4J_fsOptim.txt",
append = TRUE, row.names = FALSE, quote = FALSE))
# 1 KM Model run
setwd("GR4J_distributed_1km/")
sys::exec_wait("start_GR4J_case_study_full_run.bat",
std_out = "GR4J_output.txt")
setwd("..")
# Get statistics and calculate losses
statistics_1km <- read.table("GR4J_distributed_1km/output/statistics_gr4j_Mur.txt",
skip = 21, header = TRUE)
} else {
statistics_1km <- NULL
}
# calculate mean loss for all basins
mean_NSE <- mean(statistics$NSE)
mean_train_NSE <- mean(statistics$NSE[statistics$sb %in% training_basins])
mean_test_NSE <- mean(statistics$NSE[statistics$sb %in% test_basins])
# Model size loss
functions_splitted <- lapply(test_functions, function_splitter)
model_size_loss <- size_loss(functions_splitted)
# All losses for all basins
evaluation <- GR4J_model_quality(statistics = statistics, Test_number = Test_number,
true_para_field_df = true_para_field_df,
model_size_loss = model_size_loss,
new_gr4j_para = new_gr4j_para,
statistics_1km = statistics_1km,
relevant_basins = c(training_basins, test_basins),
training = FALSE, true_state_list = true_state_list)
# test losses
test_evaluation <- GR4J_model_quality(statistics = statistics, Test_number = Test_number,
true_para_field_df = true_para_field_df,
model_size_loss = model_size_loss,
new_gr4j_para = new_gr4j_para,
relevant_basins = test_basins,
statistics_1km = statistics_1km,
training = FALSE, true_state_list = true_state_list)
# train losses
train_evaluation <- GR4J_model_quality(statistics = statistics, Test_number = Test_number,
true_para_field_df = true_para_field_df,
model_size_loss = model_size_loss,
new_gr4j_para = new_gr4j_para,
relevant_basins = training_basins,
statistics_1km = statistics_1km,
training = FALSE, true_state_list = true_state_list)
names(statistics)[1] <- "Basin"
results <- list(data.frame("mean NSE" = round(mean_NSE, 3),
"mean training NSE" = round(mean_train_NSE, 3),
"mean test NSE" = round(mean_test_NSE, 3), check.names = FALSE),
# overall model results
data.frame(
"mean NSE" = round(evaluation[["mean_NSE"]], 3),
"weighted mean NSE" = round(evaluation[["wmean_NSE"]], 3),
"SPAEF or model loss" = round(evaluation[["model_loss"]], 3),
"full loss" = round(evaluation[["full_loss"]], 3), check.names = FALSE),
# training model results
data.frame(
"mean NSE" = round(train_evaluation[["mean_NSE"]], 3),
"weighted mean NSE" = round(train_evaluation[["wmean_NSE"]], 3),
"SPAEF or model loss" = round(train_evaluation[["model_loss"]], 3),
"full loss" = round(train_evaluation[["full_loss"]], 3), check.names = FALSE),
# test model results
data.frame(
"mean NSE" = round(test_evaluation[["mean_NSE"]], 3),
"weighted mean NSE" = round(test_evaluation[["wmean_NSE"]], 3),
"SPAEF or model loss" = round(test_evaluation[["model_loss"]], 3),
"full loss" = round(test_evaluation[["full_loss"]], 3), check.names = FALSE),
statistics[, 1:2])
cat("\nSaved testing results of test nr.", Test_number, "in corresponding folder\n")
file <- paste0("Test ", substr(Test_number, 1, 1),"/",
"Test ", Test_number, "/testing/",
Optimizer, "_testing_", Test_number,
"_run", run, ".txt")
cat("Testing results for test number", Test_number, ":", Optimizer,
"- run", run, "\n\n", file = file)
cat("General results:\n", file = file, append = TRUE)
suppressWarnings(write.table(t(results[[1]]), file = file, append = TRUE,
col.names = FALSE, quote = FALSE))
cat("\n", file = file, append = TRUE)
# all basins
cat("All basins:\n", file = file, append = TRUE)
suppressWarnings(write.table(t(results[[2]]), file = file, append = TRUE,
col.names = FALSE, quote = FALSE))
cat("\n", file = file, append = TRUE)
# training basins
cat("Training basins:\n", file = file, append = TRUE)
suppressWarnings(write.table(t(results[[3]]), file = file, append = TRUE,
col.names = FALSE, quote = FALSE))
cat("\n", file = file, append = TRUE)
# Test basins
cat("Test basins:\n", file = file, append = TRUE)
suppressWarnings(write.table(t(results[[4]]), file = file, append = TRUE,
col.names = FALSE, quote = FALSE))
cat("\n", file = file, append = TRUE)
cat("All basins NSE:\n", file = file, append = TRUE)
suppressWarnings(write.table(results[[5]], file = file, append = TRUE,
row.names = FALSE, quote = FALSE))
cat("\n\nThe tested functions are:\n", file = file, append = TRUE)
cat("x1 = ", test_functions[["GR4Jx1"]], "\n", file = file, append = TRUE)
cat("x2 = 0\n", file = file, append = TRUE)
cat("x3 = ", test_functions[["GR4Jx3"]], "\n", file = file, append = TRUE)
cat("x4 = ", test_functions[["GR4Jx4"]], "\n", file = file, append = TRUE)
return(results)
}
# Evaluate training basins and training time period performance
evaluate_training_basins <- function(end_results, run, Test_number,
spatial_predictors, Optimizer, para, para_1km = NULL,
training_basins, test_basins){
best_tfs <- list(GR4Jx1 = end_results$best_x1,
GR4Jx2 = "0",
GR4Jx3 = end_results$best_x3,
GR4Jx4 = end_results$best_x4)
new_gr4j_para <- try(create_GR4J_para(transfer_functions = best_tfs,
l0 = spatial_predictors,
parameter_bounds = parameter_bounds,
gmean_parameter = gmean_parameter),
silent = TRUE)
if(class(new_gr4j_para) == "try-error") {
stop("Failed evaluating training results. No valid transfer function was found.")
}
# merge new parameters with parameter file
para_new <- merge(para, new_gr4j_para, by = c("NB_", "NZ_"), suffixes = c(".old", ""),
all.x = TRUE)
para_new[which(is.na(para_new), arr.ind = TRUE)] <- 0 # parameter of unused basins -> 0
para_new <- para_new[, -grep(".old", names(para_new))]
states <- para_new[, grep("iniSt", names(para_new))]
para_new <- para_new[, -grep("iniSt", names(para_new))]
para_new <- cbind(para_new, states, stringsAsFactors = FALSE)
para_new <- para_new[order(para_new$NZ_), ]
para_new[, 1:3] <- para_new[, c(1, 3, 2)]
names(para_new)[2:3] <- c("IZ_", "NZ_")
cat("\n", file = "GR4J_distributed/input/para_Mur_GR4J_fsOptim.txt")
suppressWarnings(write.table(para_new, "GR4J_distributed/input/para_Mur_GR4J_fsOptim.txt",
append = TRUE, row.names = FALSE, quote = FALSE))
# start model run
setwd("GR4J_distributed/")
sys::exec_wait("start_GR4J_case_study.bat",
std_out = "GR4J_output.txt")
setwd("..")
if(Test_number %in% c(3.1, 3.2, 3.3)){
# 1 KM Model parameters
new_gr4j_para_1km <- create_GR4J_para(transfer_functions = best_tfs,
l0 = spatial_predictors,
parameter_bounds = parameter_bounds,
gmean_parameter = gmean_parameter,
km1 = TRUE)
para_new_1km <- merge(para_1km, new_gr4j_para_1km, by = c("NB_", "NZ_"), suffixes = c(".old", ""),
all.x = TRUE)
para_new_1km[which(is.na(para_new_1km), arr.ind = TRUE)] <- 0 # parameter of unused basins -> 0
para_new_1km <- para_new_1km[, -grep(".old", names(para_new_1km))]
states_1km <- para_new_1km[, grep("iniSt", names(para_new_1km))]
para_new_1km <- para_new_1km[, -grep("iniSt", names(para_new_1km))]
para_new_1km <- cbind(para_new_1km, states_1km, stringsAsFactors = FALSE)
para_new_1km <- para_new_1km[order(para_new_1km$NZ_), ]
para_new_1km[, 1:3] <- para_new_1km[, c(1, 3, 2)]
names(para_new_1km)[2:3] <- c("IZ_", "NZ_")
cat("\n", file = "GR4J_distributed_1km/input/para_Mur_GR4J_fsOptim.txt")
suppressWarnings(write.table(para_new, "GR4J_distributed_1km/input/para_Mur_GR4J_fsOptim.txt",
append = TRUE, row.names = FALSE, quote = FALSE))
# 1 KM Model run
setwd("GR4J_distributed_1km/")
sys::exec_wait("start_GR4J_case_study.bat",
std_out = "GR4J_output.txt")
setwd("..")
# Get statistics and calculate losses
statistics_1km <- read.table("GR4J_distributed_1km/output/statistics_gr4j_Mur.txt",
skip = 21, header = TRUE)
} else {
statistics_1km <- NULL
}
# Get statistics and calculate losses
statistics <- read.table("GR4J_distributed/output/statistics_gr4j_Mur.txt",
skip = 21, header = TRUE)
# model size loss
functions_splitted <- lapply(best_tfs, function_splitter)
model_size_loss <- size_loss(functions_splitted)
true_para_field_df <- true_para_field(Test_number)
# Evaluate model quality
if(Test_number %in% c(4.4, 4.5)){
if(!exists("true_state_list")) {
true_state_list <- readRDS("True parameters/true_states_list")
gr4j_state <- ifelse(Test_number == 4.4, "GR4JSt1", "GR4JSt2")
true_state_list <- lapply(true_state_list, function(x) {
data.frame(NZ = x$NZ, gr4j_state = x[, gr4j_state])})
}
}
if(Test_number == 4.6){
if(!exists("true_state_list")) {
true_state_list <- readRDS("True parameters/true_states_list")
}
}
evaluation <- GR4J_model_quality(statistics = statistics, Test_number = Test_number,
true_para_field_df = true_para_field_df,
model_size_loss = model_size_loss,
new_gr4j_para = new_gr4j_para,
statistics_1km = statistics_1km,
relevant_basins = training_basins,
true_state_list = true_state_list)
# define training result df
names(statistics)[1] <- "Basin"
train_results <- list(data.frame("mean_NSE" = round(evaluation[["mean_NSE"]], 3),
"weighted_mean_NSE" = round(evaluation[["wmean_NSE"]], 3),
"SPAEF/model_loss" = round(evaluation[["model_loss"]], 3),
"full_loss" = round(evaluation[["full_loss"]], 3)),
statistics[, 1:2])
# save and cat
cat("\nSaved best training results of test nr.", Test_number, "in corresponding folder\n")
file <- paste0("Test ", substr(Test_number, 1, 1),"/",
"Test ", Test_number, "/training/",
Optimizer, "_training_", Test_number,
"_run", run, ".txt")
cat("Training results for test number", Test_number, ":", Optimizer,
"- run", run, "\n\n", file = file)
suppressWarnings(write.table(t(train_results[[1]]), file = file, append = TRUE,
col.names = FALSE, quote = FALSE))
cat("\n", file = file, append = TRUE)
suppressWarnings(write.table(train_results[[2]], file = file,
append = TRUE, row.names = FALSE, quote = FALSE))
cat("\n\nThe optimized functions are:\n", file = file, append = TRUE)
cat("x1 = ", best_tfs[["GR4Jx1"]], "\n", file = file, append = TRUE)
cat("x2 = 0\n", file = file, append = TRUE)
cat("x3 = ", best_tfs[["GR4Jx3"]], "\n", file = file, append = TRUE)
cat("x4 = ", best_tfs[["GR4Jx4"]], "\n", file = file, append = TRUE)
}
# Function Space Optimization
FSO <- function(Optimizer, Test_number, run, iterations,
training_basins, test_basins){
# 1. Setup
cat("\n***", "Test number", Test_number, "-", Optimizer, "optimization run", run, "***\n")
if(Test_number == 1.1) cat("Test info: optimization of mean NSE\n")
if(Test_number == 1.2) cat("Test info: optimization of weighted mean NSE\n")
if(Test_number == 2.1) cat("Test info: optimization of weighted mean NSE and X1 parameter field\n")
if(Test_number == 2.2) cat("Test info: optimization of weighted mean NSE and X3 parameter field\n")
if(Test_number == 2.3) cat("Test info: optimization of weighted mean NSE and X4 parameter field\n")
if(Test_number == 2.4) cat("Test info: optimization of weighted mean NSE and GR4J state S NSE\n")
if(Test_number == 2.5) cat("Test info: optimization of weighted mean NSE and GR4J state R NSE\n")
if(Test_number == 2.5) cat("Test info: optimization of weighted mean NSE and GR4J states S & R NSE\n")
if(Test_number == 3.1) cat("Test info: optimization of weighted mean NSE of 2 km and 1 km model")
if(Test_number == 3.2) cat("Test info: optimization of weighted mean NSE of 2 km and 1 km model
and GR4J state S NSE\n")
if(Test_number == 3.3) cat("Test info: optimization of weighted mean NSE of 2 km and 1 km model
and GR4J state R NSE\n")
if(Test_number == 4.1) cat("Test info: optimization of weighted mean NSE and the last time step GR4J state S SPAEF\n")
if(Test_number == 4.2) cat("Test info: optimization of weighted mean NSE and the last time step GR4J state R SPAEF\n")
if(Test_number == 4.3) cat("Test info: optimization of weighted mean NSE and the last time step GR4J states S & R SPAEF\n")
if(Test_number == 4.4) cat("Test info: optimization of weighted mean NSE and GR4J state S SPAEF\n")
if(Test_number == 4.4) cat("Test info: optimization of weighted mean NSE and GR4J state R SPAEF\n")
if(Test_number == 4.4) cat("Test info: optimization of weighted mean NSE and GR4J states S & R SPAEF\n")
# Generate Test specific folders in directory
general_test_folder <- paste0("Test ", substr(Test_number, 1, 1))
subtest_folder <- paste0("Test ", substr(Test_number, 1, 1),"/",
"Test ", Test_number)
training_folder <- paste0("Test ", substr(Test_number, 1, 1),"/",
"Test ", Test_number, "/training")
testing_folder <- paste0("Test ", substr(Test_number, 1, 1),"/",
"Test ", Test_number, "/testing")
if (!dir.exists(general_test_folder)){
dir.create(general_test_folder)
}
if (!dir.exists(subtest_folder)){
dir.create(subtest_folder)
}
if (!dir.exists(training_folder)){
dir.create(training_folder)
}
if (!dir.exists(testing_folder)){
dir.create(testing_folder)
}
# Plot paths
# path for saving rasters
para_fields <- paste0("Test ", substr(Test_number, 1, 1),"/",
"Test ", Test_number, "/parameter fields/")
if (!dir.exists(para_fields)){
dir.create(para_fields)
}
# paths for run specific para fields
para_fields2 <- paste0(para_fields, "run_", run, "/")
if (!dir.exists(para_fields2)){
dir.create(para_fields2)
}
if (!dir.exists(paste0(para_fields2, "Plots/"))){
dir.create(paste0(para_fields2, "Plots/"))
}
# Path for plots
diag_path1 <- paste0(para_fields, "../diagnostic plots/")
diag_path2 <- paste0(para_fields, "../diagnostic plots/run_", run, "/")
if (!dir.exists(diag_path1)){
dir.create(diag_path1)
}
if (!dir.exists(diag_path2)){
dir.create(diag_path2)
}
diag_path3 <- paste0(para_fields, "../diagnostic plots/run_", run, "/", Optimizer, "/")
if (!dir.exists(diag_path3)){
dir.create(diag_path3)
}
# Load spatial information about storage parameter
true_para_field_df <- true_para_field(Test_number)
# remove old states if existent
tmp <- do.call(file.remove, list(
list.files("GR4J_distributed/cdr/output", full.names = TRUE)))
# 2. Training
# CHANGE 2 KM DEFAULT FILE:
default <- readLines("GR4J_distributed/input/defaults.txt")
# set para file
default[14] <- "para_Mur_GR4J_fsOptim.txt"
default[15] <- "para_Mur_GR4J_true.txt"
# set dates
default[32] <- "2003 01 02 00 00" # start date
default[33] <- "2009 08 31 00 00" # backup
default[36] <- "2009 08 31 00 00"# end date
default[37] <- "2012 12 31 00 00" # backup
# set spin-up
default[40] <- "241"
default[41] <- "2433"
# set Datafile
default[7] <- "QObs_24h_synth_GR4J.txt" # used
default[8] <- "QObs_24h.txt" # backup
# set output type
default[26] <- ifelse(Test_number %in% c(4.4, 4.5), "1", "0")
writeLines(default, con = "GR4J_distributed/input/defaults.txt")
# CHANGE 1 KM DEFAULT FILE:
default <- readLines("GR4J_distributed_1km/input/defaults.txt")
# set para file
default[16] <- "para_Mur_GR4J_fsOptim.txt"
default[17] <- "para_Mur_GR4J_true.txt"
# set dates
default[31] <- "2003 01 02 00 00" # start date
default[32] <- "2009 08 31 00 00" # backup
default[35] <- "2009 08 31 00 00" # end date
default[36] <- "2012 12 31 00 00" # backup
# set spin-up time
default[39] <- "241"
default[40] <- "2433"
# set Datafile
default[7] <- "QObs_24h_synth_GR4J.txt" # used
default[8] <- "QObs_24h.txt" # backup
# set output type
default[25] <- "1"
# write new default file
writeLines(default, con = "GR4J_distributed_1km/input/defaults.txt")
# FSO optimization
source(paste0("Functions/", Optimizer, "_optimization_GR4J.R"), local = TRUE)
results <- feather::read_feather(paste0("Test ", substr(Test_number, 1, 1),"/",
"Test ", Test_number, "/", Optimizer,
"_GR4J_optimization_",
Test_number, "_run", run, ".feather"))
# define optimized functions
result_functions <- list(GR4Jx1 = tail(results$best_x1, 1),
GR4Jx3 = tail(results$best_x3, 1),
GR4Jx4 = tail(results$best_x4, 1))
c("\n------------------------------------------\n")
cat("Finished Function Space Optimization\nOptimized function:\n")
for(i in 1:3) cat(names(result_functions)[i], ":", result_functions[[i]], "\n")
cat("\nCreating and saving diagnostic plots in",
paste0("Test ", substr(Test_number, 1, 1),"/",
"Test ", Test_number, "/run_", run, "/", Optimizer))
# 3. Testing
# CHANGE 2 KM DEFAULT FILE:
default <- readLines("GR4J_distributed/input/defaults.txt")
# set para file
default[14] <- "para_Mur_GR4J_fsOptim.txt"
default[15] <- "para_Mur_GR4J_true.txt"
# set dates
default[32] <- "2003 01 02 00 00" # start date
default[33] <- "2009 08 31 00 00" # nackup -> never used because spin-up is covering that
default[36] <- "2012 12 31 00 00" # end date
default[37] <- "2009 08 31 00 00" # backup
# set spin-up
default[40] <- "2433"
default[41] <- "241"
# set Datafile
default[7] <- "QObs_24h_synth_GR4J.txt" # used
default[8] <- "QObs_24h.txt" # backup
writeLines(default, con = "GR4J_distributed/input/defaults.txt")
# CHANGE 1 KM DEFAULT FILE:
default <- readLines("GR4J_distributed_1km/input/defaults.txt")
# set para file
default[16] <- "para_Mur_GR4J_fsOptim.txt"
default[17] <- "para_Mur_GR4J_true.txt"
# set dates
default[31] <- "2003 01 02 00 00" # start date
default[32] <- "2009 08 31 00 00" # backup
default[35] <- "2012 12 31 00 00" # end date
default[36] <- "2009 08 31 00 00" # backup
# set spin-up time
default[39] <- "2433"
default[40] <- "241"
# set Datafile
default[7] <- "QObs_24h_synth_GR4J.txt" # used
default[8] <- "QObs_24h.txt" # backup
# set output type
default[25] <- "1"
# write new default file
writeLines(default, con = "GR4J_distributed_1km/input/defaults.txt")
# Start test evaluation
test_results <- evaluate_test_basins(test_functions = result_functions,
Optimizer = Optimizer,
Test_number = Test_number,
run = run,
training_basins = training_basins,
test_basins = test_basins,
para = para, para_1km = para_1km,
true_state_list = true_state_list)
# 4. Diagnostic Plots & rasters
library(ggplot2, quietly = TRUE)
library(ggpubr, quietly = TRUE)
for(parameter in c("x1", "x3", "x4")){
plot_parameter <- list("x1" = "X1", "x3" = "X3", "x4" = "X4")
raster_250m <- raster_from_tf(tf = result_functions[[eval(paste0("GR4J", parameter))]],
tf_bounds = parameter_bounds[[eval(paste0("GR4J", parameter))]])
raster_1km <- raster_from_tf(tf = result_functions[[eval(paste0("GR4J", parameter))]],
tf_bounds = parameter_bounds[[eval(paste0("GR4J", parameter))]],
aggregate = TRUE, km1 = TRUE, gmean_parameter = gmean_parameter)
raster_2km <- raster_from_tf(tf = result_functions[[eval(paste0("GR4J", parameter))]],
tf_bounds = parameter_bounds[[eval(paste0("GR4J", parameter))]],
aggregate = TRUE, km1 = FALSE, gmean_parameter = gmean_parameter)
for(rasters in c("250m", "1km", "2km")){
writeRaster(get(paste0("raster_", rasters)),
paste0(para_fields2, Optimizer, "_", Test_number, "_", parameter, "_", rasters, ".asc"),
format = "ascii", overwrite = TRUE)
png(paste0(para_fields2,"/Plots/", Optimizer, "_", Test_number, "_", parameter, "_", rasters, ".png"),
width = 1200, height = 800)
plot(get(paste0("raster_", rasters)),
main = paste0("Parameter", plot_parameter[[parameter]], " - ", Optimizer))
dev.off()
}
# get true parameter field
assign("true_parameter", raster::raster(paste0("True parameters/", parameter, "_2km.asc")))
# Plot True vs. predicted parameters
plot_df <- data.frame("Observation" = values(true_parameter),
"Prediction" = values(get(paste0("raster_2km"))))
max_val <- max(plot_df, na.rm = TRUE)
min_val <- min(plot_df, na.rm = TRUE)
plot_df <- plot_df[!is.na(plot_df$Observation), ]
correlation <- round(cor(plot_df, use = "pairwise.complete.obs")[2, 1], 2)
plot_df <- plot_df[sample(nrow(plot_df), 500), ]
ggplot(plot_df, aes(Observation, Prediction)) + geom_point(col = "cornsilk4") +
geom_smooth(method='lm', col = "darkgoldenrod2") +
labs(x = paste0("True ", plot_parameter[[parameter]]),
y = paste0("Predicted ", plot_parameter[[parameter]])
) + annotate(geom = "text",
x = max_val, y = max_val,
label = paste0("R = ", correlation),
size = 4, hjust = 1, vjust = 2) +
ylim(min_val, max_val) + xlim(min_val, max_val) +
theme_bw()
ggsave(paste0(diag_path2, Optimizer, "/4_", Optimizer, "_", parameter, "_vs_true.png"),
width = 7, height = 7, units = "in")
plot_df_melt <- suppressWarnings(reshape2::melt(plot_df))
ggplot(plot_df_melt, aes(value, fill = variable)) +
geom_density(alpha = 0.4) +
labs(
x = plot_parameter[[parameter]],
title = paste0("GR4J parameter ", plot_parameter[[parameter]]),
subtitle = paste0(Optimizer, " optimization, ", "density estimation")
) + scale_fill_discrete(labels = c("true values",
"predicted values"),
name= "")
ggsave(paste0(diag_path2, Optimizer, "/4_", Optimizer, "_", parameter, "_vs_true_density.png"),
width = 7, height = 7, units = "in")
}
# Plot map with good and bad results
if(Sys.info()[1] == "Linux"){
path <- "/media/cfgrammar/Data/Dropbox/Diss/CF_Grammar/Data/spatial_predictors_mur/"
} else {
path <- "D:/Dropbox/Diss/CF_Grammar/Data/spatial_predictors_mur/"
}
# get raster objects of catchment
nb <- raster::raster(paste0(path, "l0_nb2000_mur.asc"))
test_result <- read.table(paste0(para_fields, "../testing/",
Optimizer, "_testing_", Test_number, "_run", run, ".txt"),
skip = 26, header = TRUE, nrow = 112)
# 1. Plot: Good vs. Bad
good_results <- test_result[test_result$NSE >= 0.8,]
bad_results <- test_result[test_result$NSE < 0.8,]
# define good/bad/training as factor values
good_bad <- nb
values(good_bad)[values(nb) %in% good_results$Basin] <- 1
values(good_bad)[values(nb) %in% bad_results$Basin] <- 2
values(good_bad)[values(nb) %in% training_basins] <- 3
values(good_bad)[is.na(values(nb))] <- NA
# Make plotting data frame
good_bad.p <- raster::rasterToPoints(good_bad)
df <- data.frame(good_bad.p)
# Make appropriate column headings
colnames(df) <- c("Longitude", "Latitude", "Quality")
df$Quality <- factor(df$Quality, levels = c(1, 2, 3), labels = c("good", "bad", "training"))
# Now make the map
ggplot(data = df, aes(y = Latitude, x = Longitude)) +
geom_raster(aes(fill = Quality)) +
theme_bw() +
coord_equal() +
scale_fill_manual(values = c("chartreuse2", "brown1", "grey"),
labels = c("NSE >= 0.8", "NSE < 0.8", "Training"),
name = "", drop = FALSE) +
labs(
title = paste0(Optimizer, " optimization"),
subtitle = "Testing time period 2009 - 2012"
) +
ggsave(paste0(diag_path2, Optimizer, "/1_result_map_", Optimizer, ".png"),
width = 7, height = 7, units = "in")
# 2. plot: NSE map
nse_basins <- nb
for (i in unique(nb)){
values(nse_basins)[values(nb) == i] <- test_result$NSE[test_result$Basin == i]
}
nse_basins.p <- raster::rasterToPoints(nse_basins)
df2 <- data.frame(nse_basins.p)
#Make appropriate column headings
colnames(df2) <- c("Longitude", "Latitude", "NSE")
df2$NSE <- round(df2$NSE, 4)
#Now make the map
min_NSE <- min(df2$NSE)
if(min_NSE < -1){
breaks <- c(1, 0, -1, min_NSE)
cols <- c("black", "brown1", "chartreuse3")
} else {
breaks <- c(1, 0, -1)
cols <- c("brown1", "chartreuse3")
}
if(min_NSE > 0){
nse_lim <- c(0, 1)
} else {
nse_lim <- c(min_NSE, 1)
}
ggplot(data = df2, aes(y = Latitude, x = Longitude)) +
geom_raster(aes(fill=NSE)) +
theme_bw() +
coord_equal() +
scale_fill_gradientn("NSE", limits = nse_lim, breaks = breaks,
colors = cols) +
labs(
title = paste0(Optimizer, " optimization"),
subtitle = "Testing time period 2009 - 2012"
) +
ggsave(paste0(diag_path2, Optimizer, "/2_NSE_map_", Optimizer, ".png"),
width = 7, height = 7, units = "in")
# 3. plot: Elevation vs. good and bad predictions
compare_results_df <- load_sp_mur(scale = FALSE, na.approx = FALSE,
only_training_basins = FALSE, full_dataset = FALSE)
#compare_results_df <- aggregate(elevation ~ nb, compare_results_df, mean)
compare_results_df <- merge(compare_results_df, test_result, by.x = "nb", by.y= "Basin")
compare_results_df$good_bad <- "NSE > 0.8"
compare_results_df$good_bad[compare_results_df$nb %in% bad_results$Basin] <- "NSE <= 0.8"
compare_results_df$good_bad[compare_results_df$nb %in% training_basins] <- "Training"
compare_results_df$good_bad <- factor(compare_results_df$good_bad,
levels = c("NSE <= 0.8", "NSE > 0.8", "Training"))
ggplot(compare_results_df, aes(good_bad, elevation)) + geom_boxplot() +
scale_x_discrete("good_bad", drop = FALSE) +
labs(
x = "", y = "mean zone elevation",
title = paste0(Optimizer, " optimization"),
subtitle = "Testing time period 2009 - 2012"
) +
ggsave(paste0(diag_path2, Optimizer, "/3_elevation_vs_prediction_",
Optimizer, ".png"),
width = 7, height = 7, units = "in")
# 4. Compare Training/test NSE for test time period
test_result$train_test <- "Test"
test_result$train_test[test_result$Basin %in% training_basins] <- "Training"
ggplot(test_result, aes(factor(train_test), NSE)) + geom_boxplot() +
labs(
x = "", y = "NSE of testing time period",
title = paste0(Optimizer, " optimization NSE"),
subtitle = "Testing time period 2009 - 2012"
) +
ggsave(paste0(diag_path2, Optimizer, "/4_NSE_distributions_",
Optimizer, ".png"),
width = 7, height = 7, units = "in")
cat("\nDone!\n")
}
# Load FSO setup
FSO_setup <- function(){
cat("\nCase study is setup as defined in Functions/case_study_setup.\n")
# Load setup
source("Functions/case_study_setup.R")
# Load function space VAE
source("Functions/FSO_VAE_generator.R")
cat("\nFSO setup complete!\n")
}
|
3477570e0ab4cb51ebeccf3e973bdb6be3d713dd
|
34f3fd536b2f9a5be12d13e3aaedef7631f45e98
|
/plot4.R
|
af6559d0f71caa190a762f9c0776fc20a16b8525
|
[] |
no_license
|
ChetanaTarakhala/ExData_Plotting1
|
5aaf1de225757f29cabd7a912ef711487986912a
|
a16b22c63401970963894d3d86fcd736f9865663
|
refs/heads/master
| 2021-01-21T13:25:27.289146
| 2017-09-01T14:28:28
| 2017-09-01T14:28:28
| 101,838,538
| 0
| 0
| null | 2017-08-30T04:42:05
| 2017-08-30T04:42:05
| null |
UTF-8
|
R
| false
| false
| 1,504
|
r
|
plot4.R
|
library(datasets)
electric_data <- read.table("household_power_consumption.txt", sep = ";", header = TRUE, colClasses = "character")
electric_data$Date <- as.Date(electric_data$Date, "%d/%m/%Y")
subset_data <- subset(electric_data, Date == "2007-02-01" | Date == "2007-02-02")
datetime <- paste(subset_data$Date, subset_data$Time)
datetime <- strptime(datetime, "%Y-%m-%d %H:%M:%S")
subset_data[,3] <- as.numeric(subset_data[,3])
subset_data[,4] <- as.numeric(subset_data[,4])
subset_data[,5] <- as.numeric(subset_data[,5])
subset_data[,7] <- as.numeric(subset_data[,7])
subset_data[,8] <- as.numeric(subset_data[,8])
subset_data[,9] <- as.numeric(subset_data[,9])
par(mfrow = c(2,2))
with(subset_data, {
plot(datetime, Global_active_power, type = "l", xlab= "", ylab="Global Active Power")
plot(datetime, Voltage, type = "l", xlab = "datetime", ylab = "Voltage")
plot(datetime, Sub_metering_1, type = "l", xlab = "", ylab = "Energy sub metering")
lines(datetime, Sub_metering_2, col = "red")
lines(datetime, Sub_metering_3, col = "blue")
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=1, lwd=2.5, col=c("black", "red", "blue"))
plot(datetime, Global_reactive_power, type = "l", xlab = "datetime", ylab = "Global_reactive_power")
})
dev.copy(png, "plot4.png", width = 480, height = 480, units = "px")
dev.off()
|
a977687df0aa8dcbf9569dba2b06f5ad49d7383a
|
9a5e7f18f0b0146471de36113fba3128741c6245
|
/churn1.R
|
0c4b7b6ecf9caa4b3f42ab5e944e40acf8f87859
|
[] |
no_license
|
radhikaluvani/churn-redunction
|
54fdd506d9cc8569bb6e29e799fe64607e1dad43
|
7931531789252de62c68933b3855a37196d5090b
|
refs/heads/master
| 2020-04-15T16:02:33.244011
| 2019-01-09T08:24:20
| 2019-01-09T08:24:20
| 164,816,618
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,190
|
r
|
churn1.R
|
rm(list=ls(all=T))
setwd("C:/Users/Neelam/Desktop/Workradhika/project")
getwd()
#loading libararies
library(caret)
library(rpart)
library(C50)
library(rattle)
library(partykit)
library(randomForest)
library(ROCR)
library(ggplot2)
library(reshape2)
library(car)
library(corrplot)
library(e1071)
library(scales)
library(psych)
library(gplots)
library(corrgram)
library(DMwR)
library(unbalanced)
library(dummies)
library(Information)
library(MASS)
library(rpart)
library(gbm)
library(ROSE)
library(sampling)
library(DataCombine)
library(inTrees)
#reading the csv files
data = read.csv("churndata.csv")
#changing variable into numeric form
data$Churn = as.integer(data$Churn)
data$international.plan = as.integer(data$international.plan)
data$voice.mail.plan = as.integer(data$voice.mail.plan)
#changing itnto factorial
data$Churn[data$Churn=="1"] <-0
data$Churn[data$Churn=="2"] <-1
data$international.plan[data$international.plan=="1"] <-0
data$international.plan[data$international.plan=="2"] <-1
data$voice.mail.plan[data$voice.mail.plan=="1"] <-0
data$voice.mail.plan[data$voice.mail.plan=="2"] <-1
######################### missing value analysis#################################
na.omit(data)
summary(data)
#calculating standard deviation
sapply(data, sd)
#Droping unread variables
data$state = NULL
data$area.code = NULL
data$phone.number = NULL
data$number.customer.service.calls = NULL
#calculating correlation Matrix
cormtrix = round(cor(data) , digits = 2)
#histogram
ggplot(data , aes(x = data$total.day.minutes ))+
geom_histogram(binwidth = 1 , fill = "white" , colour = "blue")+
ggtitle("Histogram Analysis") + theme(text=element_text(size=15))
ggplot(data , aes(x = data$total.day.charge ))+
geom_histogram(binwidth = 1 , fill = "white" , colour = "blue")+
ggtitle("Histogram Analysis") + theme(text=element_text(size=15))
ggplot(data , aes(x = data$total.day.calls ))+
geom_histogram(binwidth = 1 , fill = "white" , colour = "blue")+
ggtitle("Histogram Analysis") + theme(text=element_text(size=15))
ggplot(data , aes(x = data$total.eve.minutes ))+
geom_histogram(binwidth = 1 , fill = "white" , colour = "purple")+
ggtitle("Histogram Analysis") + theme(text=element_text(size=15))
ggplot(data , aes(x = data$total.eve.calls ))+
geom_histogram(binwidth = 1 , fill = "white" , colour = "purple")+
ggtitle("Histogram Analysis") + theme(text=element_text(size=15))
ggplot(data , aes(x = data$total.eve.charge ))+
geom_histogram(binwidth = 1 , fill = "white" , colour = "purple")+
ggtitle("Histogram Analysis") + theme(text=element_text(size=15))
ggplot(data , aes(x = data$total.intl.minutes))+
geom_histogram(binwidth = 1 , fill = "white" , colour = "navyblue")+
ggtitle("Histogram Analysis") + theme(text=element_text(size=15))
ggplot(data , aes(x = data$total.intl.charge))+
geom_histogram(binwidth = 1 , fill = "white" , colour = "navyblue")+
ggtitle("Histogram Analysis") + theme(text=element_text(size=15))
ggplot(data , aes(x = data$total.intl.calls))+
geom_histogram(binwidth = 1 , fill = "white" , colour = "navyblue")+
ggtitle("Histogram Analysis") + theme(text=element_text(size=15))
ggplot(data , aes(x = data$account.length))+
geom_histogram(binwidth = 1 , fill = "white" , colour = "orange")+
ggtitle("Histogram Analysis") + theme(text=element_text(size=15))
ggplot(data , aes(x = data$voice.mail.plan))+
geom_histogram(binwidth = 1 , fill = "white" , colour = "orange")+
ggtitle("Histogram Analysis") + theme(text=element_text(size=15))
############################################Outlier Analysis#############################################
# ## BoxPlots - Distribution and Outlier Check
numeric_index = sapply(data,is.numeric) #selecting only numeric
numeric_data = data[,numeric_index]
cnames = colnames(numeric_data)
for (i in 1:length(cnames))
{
assign(paste0("gn",i), ggplot(aes_string(y = (cnames[i]), x = "Churn"), data = subset(data))+
stat_boxplot(geom = "errorbar", width = 0.5) +
geom_boxplot(outlier.colour="red", fill = "blue" ,outlier.shape=18,
outlier.size=0.5, notch=FALSE) +
theme(legend.position="bottom")+
labs(y=cnames[i],x="Churn")+
ggtitle(paste("Boxplot churn for",cnames[i])))
}
# ## Plotting plots together
gridExtra::grid.arrange(gn1,gn2, gn4, ncol=3 )
gridExtra::grid.arrange(gn5, gn6,gn7,ncol=3)
gridExtra::grid.arrange(gn8,gn9,gn10 ,ncol=3)
gridExtra::grid.arrange(gn11,gn12, gn13 ,ncol=3)
gridExtra::grid.arrange(gn14,gn15,gn16 , ncol=3)
#loop to remove from all variables
for(i in cnames){
print(i)
val = data[,i][data[,i] %in% boxplot.stats(data[,i])$out]
print(length(val))
data = data[which(!train[,i] %in% val),]
}
gridExtra::grid.arrange(gn1,gn2, gn4, ncol=3 )
gridExtra::grid.arrange(gn5, gn6,gn7,ncol=3)
gridExtra::grid.arrange(gn8,gn9,gn10 ,ncol=3)
gridExtra::grid.arrange(gn11,gn12, gn13 ,ncol=3)
gridExtra::grid.arrange(gn14,gn15,gn16 , ncol=3)
#############oversampling#######
# ##Simple Random Sampling
data_sample = data[sample(nrow(data), 5000, replace = F), ]
#
# ##Stratified Sampling
# stratas = strata(data, c("profession"), size = c(100, 199, 10, 5), method = "srswor")
# stratified_data = getdata(marketing_train, stratas)
#
# ##Systematic sampling
# #Function to generate Kth index
sys.sample = function(N,n){
k = ceiling(N/n)
r = sample(1:k, 1)
sys.samp = seq(r, r + k*(n-1), k)
}
#
lis = sys.sample(5000, 1000) #select the repective rows
#
# #Create index variable in the data
data$index = 1:5000
#
# #Extract subset from whole data
systematic_data = data[which(data$index %in% lis),]
table(systematic_data$Churn)
#Clean the environment
rmExcept("data" , "data_sample")
#Divide data into train and test using stratified sampling method
#factor_index = sapply(data_sample,is.factor)
set.seed(1234)
index = sample(2 , nrow(data) , replace = TRUE , prob = c(0.7 , 0.3))
train = data[ index==1,]
test = data[index==2,]
###############Model-1 LOGISTIC REGRESSION MODEL#################
#Logistic Regression
logit_model = glm(Churn ~ ., data = train, family = "binomial")
#summary of the model
summary(logit_model)
#predict using logistic regression
logit_Predictions = predict(logit_model, newdata = test, type = "response")
#convert prob
logit_Predictions = ifelse(logit_Predictions > 0.5, 1, 0)
##Evaluate the performance of classification model
ConfMatrix_RF = table(test$Churn, logit_Predictions)
#accuuracy
#accuracy : TP+TN/N
#False Negative rate
#FNR = FN/FN+TP
#Accuracy: 86.23
#FNR: 87.32
######################Model-2 Random Forest#######################################
RF_model = randomForest(Churn ~ . ,data= train, importance = TRUE, ntree = 500 , ntry = 500)
print(RF_model)
importance(RF_model)
summary(RF_model)
#Extract rules fromn random forest
#transform rf object to an inTrees' format
treeList = RF2List(RF_model)
#Extract rules
exec = extractRules(treeList, train[,-17]) # R-executable conditions
write(capture.output(summary(RF_model)), "Rules.txt")
#Visualize some rules
exec[1:10,]
#Make rules more readable:
readableRules = presentRules(exec, colnames(train))
readableRules[1:10,]
# #Get rule metrics
ruleMetric = getRuleMetric(exec, train[,-17], train$Churn) # get rule metrics
#evaulate few rules
ruleMetric[1:10,]
#Presdict test data using random forest model
#RF_Predictions1 = predict(RF_model, test[,-17])
RF_Predictions = predict(RF_model, newdata = test, type = "response")
##Evaluate the performance of classification model
ConfMatrix_RF = table(test$Churn , RF_Predictions)
confusionMatrix(ConfMatrix_RF)
#False Negative rate
#FNR = FN/FN+TP
#Accuracy = 96.28
#FNR = 26.33
#will give the visulization reason on %incMSE
plot.new()
varImpPlot(RF_model , type = 1, pch = 19 , col = 1, cex=1.0, main = " " )
abline(v=45, col = "purple")
#Another type of reason to churn which is incNodePurity
plot.new()
varImpPlot(RF_model , type = 2, pch = 19 , col = 1, cex=1.0, main = " " )
abline(v=45, col = "blue")
##################################plotting ROc curve##################
test$result1 = predict(logit_model, newdata = test, type = "response")
test$result2 = predict(RF_model, newdata = test, type = "response")
pred1 = prediction(test$result1 , test$Churn)
pred2 = prediction(test$result2 , test$Churn)
pref1 = performance(pred1 , "tpr" , "fpr")
pref2 = performance(pred2 , "tpr" , "fpr")
plot.new()
plot(pref1 , col = "purple" , lwd = 2.5)
plot(pref2, col = "red" , add = TRUE , lwd = 2.5)
abline(0,1, col = "green" , lwd = 2.5, lty= 2)
title('Curve')
legend(0.8,0.4,c("Logistic" , "RF"),
lty=c(1,1),
lwd=c(1.4,1.4), col = c("purple" ,"red" , "blue"))
#######saving model to file##########
#Accuracy: 86.23
#FNR: 87.32
#Random forest
#Accuracy = 96.28
#FNR = 26.33
save(RF_model , file = "churnmodel.rda")
|
c65ecf89aa27fb8408ac33f793b6d29241fc3e5c
|
4ba3aaae72316a6c81c65c3eebe6fc87eae51427
|
/ambtest/amb_exp_rhipe.R
|
807df8f4a774e4c448030073c3fc3ea18fef1858
|
[] |
no_license
|
minasel/Seismic_Interferometry
|
bcc01c9920932367d664000e8746461c81b911e3
|
98c7761b8498520f08fe53dd3c30d2a6121eaebf
|
refs/heads/master
| 2020-05-16T17:05:13.711950
| 2018-05-07T17:50:31
| 2018-05-07T17:50:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,812
|
r
|
amb_exp_rhipe.R
|
# con <- file("hadoop_output_log.txt")
# sink(con, append=TRUE)
# sink(con, append=TRUE, type="message")
#
# # This will echo all input and not truncate 150+ character lines...
# source("amb_exp_rhipe.R", echo=TRUE, max.deparse.length=10000)
#
# # Restore output to console
# sink()
# sink(type="message")
library(RSEIS)
library(RPMG)
library(Rwave)
library(datadr)
library(signal)
library(pracma)
user <- 'azehady/'
#data_file_path = './data_1d/'
#project_dir_name = 'interferometry_1d'
data_file_path = './data2/'
project_dir_name = 'data2'
# get the file names from the directory in the server
fnames = list.files(path=data_file_path,
pattern=NULL, full.names=TRUE)
# get the number of the stations
num_station = length(fnames)
Amp <- list()
# create the user directory and move there
user_dir <- paste('/user/', user, sep="")
hdfs.setwd(user_dir)
# remove tmp files
tmp_dir <- paste(user_dir, 'tmp/*', sep="")
rhdel(tmp_dir)
# Create the project directory
project_dir <- paste(user_dir, project_dir_name, sep="")
if (rhexists(project_dir))
rhdel(project_dir)
rhmkdir(project_dir)
# change to the project directory
hdfs.setwd(project_dir)
data_dir <- "data"
if (!rhexists(data_dir))
rhmkdir(data_dir)
# change to the data directory
hdfs.setwd(data_dir)
# remove all data
rhdel("*")
# Read the SAC formatted seismic data for each station from the data directory
# and write them as key value pair file on hadoop
n = 8000
# Divide each station into 2 files because otherwise Hadoop 64 MB block size exceeds
D <- NULL # or 16, needs to divide N-1
for (i in 1:num_station) {
fn = fnames[i]
st_num <- as.character(paste('100', i, sep=""))
station <- read1sac(fn , Iendian = 1 , HEADONLY=FALSE, BIGLONG=FALSE)
N <- length(station$amp)
NN <- n * as.integer(N/n)
NN <- NN - 1
#print(fn)
#print(NN)
station$station <- rep(st_num, N)
stationDF <- data.frame(station=station['station'], amp=station['amp'])
stationDF <- stationDF[1:(N-2), ]
start_idx <- 1
k <- 10
nn <- (NN + 1) / n
nnn <- as.integer(nn / k)
if (nnn != 0)
num_rows <- nnn * n
else
num_rows <- n
end_idx <- as.integer(num_rows)
j <- 1
breakout = FALSE
while(1) {
#print(start_idx)
#print(end_idx)
#print("")
if (end_idx > NN) {
end_idx = NN
}
jN <- length(start_idx:end_idx)
stationKV <- list(list(st_num, stationDF[start_idx:end_idx, ]))
stationDataFile <- paste(i, "_", j, "station", "_Data", sep="")
j <- j + 1
print(paste("Writing ", stationDataFile, " in HDFS."))
if (!rhexists(stationDataFile))
rhwrite(stationKV, file=stationDataFile)
if (end_idx == NN)
break
start_idx <- end_idx + 1
end_idx <- start_idx + num_rows - 1
}
}
hdfs_dir <- paste(user_dir, project_dir_name,"/", data_dir, sep="")
seismHDFSconn <- hdfsConn(hdfs_dir, autoYes = TRUE)
datakvDdf <- ddf(seismHDFSconn)
datakvDdf <- updateAttributes(datakvDdf)
byamp <- divide(datakvDdf,
by ="station",
spill = n,
#output = hdfsConn(dirs, autoYes=TRUE),
update=TRUE)
dt = 0.004
fs = 1/dt
b1= butter(2, c(0.01/(fs/2), 3/(fs/2)))
signbit <- function(data){
for (i in seq(1,length(data))){
if (data[i] < 0) {
data[i] = -1
} else if (data[i] > 0) {
data[i] = 1
} else
data[i] =0
}
return(data)
}
time=(0:(n-1))*dt
l = n/2
proccc <- addTransform(byamp, function(v) {
#print(length(v$amp))
v$amp[is.na(v$amp)] <- 0
a = v$amp - mean(v$amp)
a = detrend(a)
a = filtfilt(b1, a, type="pass")
b = signbit(a)
au_sta_22 = acf(b,lag.max = l-1, type = c("correlation"))
# #print(length(au_sta_22$acf))
vcrit = sqrt(2)*erfinv(0.99)
lconf = -vcrit/sqrt(n);
upconf = vcrit/sqrt(n);
ind_22 = (au_sta_22$acf >=lconf & au_sta_22$acf <= upconf)
au_sta_22$acf[ind_22=="TRUE"] = 0
fit.loess22 <- loess(au_sta_22$acf ~ time[1:l], span=0.15, degree=2)
predict.loess22 <- predict(fit.loess22, time[1:l], se=TRUE)
a_22 <- ts(au_sta_22$acf, frequency = fs) # tell R the sampling frequency
a_22_spec <- spec.pgram(a_22, demean = FALSE, detrend = TRUE,plot = TRUE)
s_22 <- ts(predict.loess22$fit, frequency = fs) # tell R the sampling frequency
s_22_spec <- spec.pgram(s_22, demean = FALSE, detrend = TRUE,plot = TRUE)
# spectral whitening can be done dividing the power spectrum of autocorrelated data to smoothed data . add a little damping to the denominator
wh_sta_22 = a_22_spec$spec / (s_22_spec$spec + 0.00001)
wh_sta_22_time = abs(ifft((wh_sta_22)))
b2= butter(2, c(6/(fs/2), 12/(fs/2)))
result_station_22 <- filtfilt(b2, wh_sta_22_time, type="pass")
})
last = recombine(proccc, combRbind)
hdfs.setwd(project_dir)
cor_threshhold <- 0.60
percent_subset_match <- 0.50
station = list()
station_m = list()
station_corm = list()
selected_subsets_per_station = list()
n_selected_subsets_per_station = c()
for (k in 1:num_station) {
st_number = as.numeric(paste("100", k, sep=""))
station[[k]] = subset(last, last$station == st_number)
m = n/4
v = station[[k]]$val
station_m[[k]] = matrix(v, nrow=m, byrow=FALSE)
station_m[[k]] = station_m[[k]][1:(m/2 - 1), ]
station_corm[[k]] = cor(station_m[[k]])
nn <- nrow(station_corm[[k]]) # number of subsets in each station
#print(station_corm[[i]])
# rgb.palette <- colorRampPalette(c("blue", "yellow"), space = "rgb")
# # levelplot(station_corm[[i]],
# # main=paste("Station ", i, sep=""),
# # xlab="", ylab="",
# # col.regions=rgb.palette(120),
# # cuts=100, at=seq(0,1,0.01))
selected_stations = list() # selected subsets for each station
n_selected_stations = c()
for(i in 1:nn) {
selecteds = c()
for(j in 1:nn) {
if (i==j)
next
if(abs(station_corm[[k]][i,j]) > cor_threshhold)
selecteds = c(selecteds, j)
}
selected_stations[[i]] = selecteds #selected for station i
n_selected_stations = c(n_selected_stations, length(selecteds))
}
selected_subsets = rep(0, nn)
ns <- length(n_selected_stations)
for (i in 1:ns) {
c = n_selected_stations[i]
if (c > as.integer(percent_subset_match * nn)) {
#print(c)
selected_subsets[[i]] = 1
}
}
selected_subsets_per_station[[k]] = selected_subsets
SS <- selected_subsets_per_station[[k]]
nSS <- length(SS[SS == 1])
n_selected_subsets_per_station = c(n_selected_subsets_per_station, nSS)
}
#
#
# for (i in 1:num_station) {
# t = paste("Amplitude of noise from station", i)
# plot(station[[i]]$val[1:l], type='l', col=i, ylab='Amp', main=t)
# }
#
m = n/4
mm = (m/2)-1
st_sum = list()
for(j in 1:num_station) {
st = station[[j]]
cur_st_sum = rep(0, m)
nV <- length(st$val)
for (i in 1:nV) {
stv <- st$val[i]
#print(i)
subset_idx = as.integer((i-1)/m) + 1
#print(subset_idx)
if (selected_subsets_per_station[[j]][subset_idx] == 1) {
idx = i %% m + 1
cur_st_sum[[idx]] = cur_st_sum[[idx]] + stv
}
}
st_sum[[j]] = cur_st_sum
}
pdf()
#par(mar=c(4,4,4,4))
#par(mfrow=c(num_stations,1))
time = (0:(n/4 - 1)) * dt
for (i in 1:num_station) {
t = paste("Summed Amplitude of noise from station", i)
plot(rev(st_sum[[i]]), time, type='l', col=i, ylab='Time(s)', xlab='Amp', main=t, xlim=c(-max(st_sum[[i]]), max(st_sum[[i]])))
abline(v=0, lty=2)
}
dev.off()
# # Now each station data will be saved as chunk
# m <- n/4
# for (i in 1:num_station) {
# fn = fnames[i]
# st_num <- as.character(paste('100', i, sep=""))
# station = subset(last, last$station == st_num)
#
# NN <- length(station$val)
# stationDF <- data.frame(station = station$station, amp = station$val)
#
# output_dir <- paste('Station_', i, '_Output', sep="")
# if (rhexists(output_dir))
# rhdel(output_dir)
# rhmkdir(output_dir)
#
# hdfs.setwd(output_dir)
#
# fD <- as.integer(NN / m)
#
# start_idx <- 1
# end_idx <- as.integer(m)
# for (j in 1:fD) {
# if (j == fD)
# end_idx <- NN
#
# jN <- length(start_idx:end_idx)
# stationKV <- list(list(key=st_num, value=stationDF[start_idx:end_idx, ]))
#
# stationDataFile <- paste(j, "_station_output_", "Data", sep="")
# print(paste("Writing ", stationDataFile, " output file in HDFS."))
# rhwrite(stationKV, file=stationDataFile)
#
# start_idx <- end_idx + 1
# end_idx <- start_idx + as.integer(m) - 1
#
# }
#
# hdfs.setwd(project_dir)
# }
#
# st_sum = list()
#
# for (i in 1:num_station) {
# print(paste("Processing station output", i))
# tryCatch({
# output_dir <- paste(user_dir, dir, "/", 'Station_', i, '_Output', sep="")
# #print(rhls(output_dir))
# outHDFSconn <- hdfsConn(output_dir, autoYes = TRUE)
# outputkvDdf <- ddf(outHDFSconn)
# outputkvDdf <- updateAttributes(outputkvDdf)
#
# byStation <- divide(outputkvDdf,
# by = 'station',
# spill = m
# )
# byStationModified <- ddf(byStation[1:fD])
# sumReduce <- addTransform(byStationModified, function(v) {
# v$amp
# })
#
# st_sum[[i]] = recombine(sumReduce, combMean)
# },
# error = function(err){
# print(err)
# })
# }
# hdfs.setwd(project_dir)
#
#
# for (i in 1:num_station) {
# st_sum[[i]] <- st_sum[[i]] * fD
# }
#
# pdf()
# par(mar=c(4,4,4,4))
# #par(mfrow=c(num_stations,1))
# time = (0:(n/4 - 1)) * dt
# for (i in 1:num_station) {
# t = paste("Summed Amplitude of noise from station", i)
# plot(rev(st_sum[[i]]), time, type='l', col=i, ylab='Time(s)', xlab='Amp', main=t, xlim=c(-max(st_sum[[i]]), max(st_sum[[i]])))
# abline(v=0, lty=2)
# }
# dev.off()
|
798ea98a266a80a0689f72bcff73297d0e9efeae
|
a47ce30f5112b01d5ab3e790a1b51c910f3cf1c3
|
/B_analysts_sources_github/hetong007/CCCP/data.R
|
2e318f047b62379da5f78c1f9ef1c3237a5a9633
|
[] |
no_license
|
Irbis3/crantasticScrapper
|
6b6d7596344115343cfd934d3902b85fbfdd7295
|
7ec91721565ae7c9e2d0e098598ed86e29375567
|
refs/heads/master
| 2020-03-09T04:03:51.955742
| 2018-04-16T09:41:39
| 2018-04-16T09:41:39
| 128,578,890
| 5
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,811
|
r
|
data.R
|
source('lassoshooting.R')
### helper function to generate data matrices X and Y
get.ARX<-function(n, p)
{
X = matrix(0, nrow = n, ncol = p)
for (j in 1:p)
{
if (j==1)
X[,j] = rnorm(n)
else{
X[,j] = X[,j-1]*0.6 + 0.8*rnorm(n)
}
}
return(X)
}
get.linearY<-function(X, n, beta)
{
eps = rnorm(n, sd = 2)
Y = X%*%beta + eps
return(as.vector(Y))
}
scale.X = function(X) {
mns = colMeans(X)
std = sqrt(colMeans(X^2) - mns^2)
res = t((t(X) - mns)/std)
return(res)
}
# Store Result
cccp.res = matrix(0, 100, 10)
colnames(cccp.res) = c("Lambda.hbic","HBIC.hbic","MSE.hbic","TP.hbic","FP.hbic",
"Lambda.mse","HBIC.mse","MSE.mse","TP.mse","FP.mse")
ncv.res = cccp.res
lasso.res = cccp.res
colnames(lasso.res) = c("Lambda.rmse","RMSE.rmse","MSE.rmse","TP.rmse","FP.rmse",
"Lambda.mse","RMSE.mse","MSE.mse","TP.mse","FP.mse")
lambda.vec.cccp = c(0, 0.1, 0.2, 0.3, 0.4, 0.8, 1,2,3,4,5)
lambda.vec.ncv = c(0, 0.1, 0.2, 0.3, 0.4, 0.8, 1,2,3,4,5)
lambda.vec.lasso = c(0, 0.1, 0.2, 0.3, 0.4, 0.8, 1, 2)
for (i in 1:100) {
# Data generation
cat('Dataset No.', i, '\n')
set.seed(i)
n = 100
p = 3000
beta = c(3, 1.5, 0, 0, 2, rep(0, p-5))
X = get.ARX(n, p)
X = scale.X(X)
Y = get.linearY(X, n, beta)
# Select lambda with HBIC for cccp, 2013
res_cccp = HBIC(X, Y, lambda.vec.cccp, method="cccp", beta)
ind_hbic = which.min(res_cccp$hbic.val)
ind_mse = which.min(res_cccp$mse.val)
cccp.res[i,] = c(lambda.vec.cccp[ind_hbic], res_cccp$hbic.val[ind_hbic],
res_cccp$mse.val[ind_hbic],
res_cccp$tp.val[ind_hbic], res_cccp$fp.val[ind_hbic],
lambda.vec.cccp[ind_mse], res_cccp$hbic.val[ind_mse],
res_cccp$mse.val[ind_mse],
res_cccp$tp.val[ind_mse], res_cccp$fp.val[ind_mse])
# Select lambda with HBIC for ncv, 2011
res_ncv = HBIC(X, Y, lambda.vec.ncv, method="ncv", beta)
ind_hbic = which.min(res_ncv$hbic.val)
ind_mse = which.min(res_ncv$mse.val)
ncv.res[i,] = c(lambda.vec.ncv[ind_hbic], res_ncv$hbic.val[ind_hbic],
res_ncv$mse.val[ind_hbic],
res_ncv$tp.val[ind_hbic], res_ncv$fp.val[ind_hbic],
lambda.vec.ncv[ind_mse], res_ncv$hbic.val[ind_mse],
res_ncv$mse.val[ind_mse],
res_ncv$tp.val[ind_mse], res_ncv$fp.val[ind_mse])
# Select lambda with cross validation for vanilla lasso
res_lasso = lasso.cv(X, Y, lambda.vec.lasso, beta, 5)
ind_rmse = which.min(res_lasso$rmse)
ind_mse = which.min(res_lasso$mse)
lasso.res[i,] = c(res_lasso$lambda[ind_rmse], res_lasso$rmse[ind_rmse],
res_lasso$mse[ind_rmse],
res_lasso$tp.val[ind_rmse], res_lasso$fp.val[ind_rmse],
res_lasso$lambda[ind_mse], res_lasso$rmse[ind_mse],
res_lasso$mse[ind_mse],
res_lasso$tp.val[ind_rmse], res_lasso$fp.val[ind_rmse])
}
save(cccp.res, ncv.res, lasso.res, file = 'simulation.100.rda')
## Summarize results
result = matrix(0, 3, 4)
result[1,] = colMeans(cccp.res[,2:5])
result[2,] = colMeans(ncv.res[,2:5])
result[3,] = colMeans(lasso.res[,2:5])
colnames(result) = c("HBIC/RMSE", "MSE.bhat", "TP", "FP")
rownames(result) = c("CCCP", "NCV", "Lasso")
result.mse = matrix(0, 3, 4)
result.mse[1,] = colMeans(cccp.res[,7:10])
result.mse[2,] = colMeans(ncv.res[,7:10])
result.mse[3,] = colMeans(lasso.res[,7:10])
colnames(result.mse) = c("HBIC/RMSE", "MSE.bhat", "TP", "FP")
rownames(result.mse) = c("CCCP", "NCV", "Lasso")
save(result, result.mse, file = 'summarized.ressult.rda')
|
4b7af984b5aeac31dff97f118ad34abb56a19495
|
ef99e639789c259a1f36bc5a399c7eede5498f1b
|
/gapminder_analysis.R
|
55b6c55d6f81992f612f0fdd185bb0439fe1bc4b
|
[] |
no_license
|
ejsher/swc_workshop
|
277e84cd3f8cefca1e443411ff447b33455a1ac1
|
7a7937dba6e786e1d6cdc86d1aa0234479337ef1
|
refs/heads/master
| 2021-01-11T19:25:26.950997
| 2017-01-18T17:01:20
| 2017-01-18T17:01:20
| 79,361,656
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 944
|
r
|
gapminder_analysis.R
|
download.file("https://raw.githubusercontent.com/swcarpentry/r-novice-gapminder/gh-pages/_episodes_rmd/data/gapminder-FiveYearData.csv", destfile = "gapminder-FiveYearData.csv")
gapminder <- read.csv("gapminder-FiveYearData.csv")
ggplot(data=gapminder, aes(x= gdpPercap, y=lifeExp, color= continent)) + geom_point(aes(size=pop))
#make notes with hashtags
# Author: Emily Sherman
#Load necessary packages
library(ggplot2)
#Read in some realistic data
download.file("https://raw.githubusercontent.com/swcarpentry/r-novice-gapminder/gh-pages/_episodes_rmd/data/gapminder-FiveYearData.csv","gapminder-FiveYearData.csv")
#Read in data file
gapminder <- read.csv("gapminder-FiveYearData.csv")
#Create plot with year vs life expectancy
ggplot(data=gapminder, aes(x = year, y = lifeExp, color = continent)) + geom_point() + facet_grid(.~continent)
#save plot
ggsave(filename = "year_vs_lifeexp_percent.png", width = 5, height = 4, units = "in")
|
f66c0f072973c82966d7a32e2ac23ca24edfa39c
|
7917fc0a7108a994bf39359385fb5728d189c182
|
/cran/paws.security.identity/man/fms_put_policy.Rd
|
e8bc4dd45c4e211f0c21d4ab84a39e9fc60d867d
|
[
"Apache-2.0"
] |
permissive
|
TWarczak/paws
|
b59300a5c41e374542a80aba223f84e1e2538bec
|
e70532e3e245286452e97e3286b5decce5c4eb90
|
refs/heads/main
| 2023-07-06T21:51:31.572720
| 2021-08-06T02:08:53
| 2021-08-06T02:08:53
| 396,131,582
| 1
| 0
|
NOASSERTION
| 2021-08-14T21:11:04
| 2021-08-14T21:11:04
| null |
UTF-8
|
R
| false
| true
| 3,223
|
rd
|
fms_put_policy.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fms_operations.R
\name{fms_put_policy}
\alias{fms_put_policy}
\title{Creates an AWS Firewall Manager policy}
\usage{
fms_put_policy(Policy, TagList)
}
\arguments{
\item{Policy}{[required] The details of the AWS Firewall Manager policy to be created.}
\item{TagList}{The tags to add to the AWS resource.}
}
\value{
A list with the following syntax:\preformatted{list(
Policy = list(
PolicyId = "string",
PolicyName = "string",
PolicyUpdateToken = "string",
SecurityServicePolicyData = list(
Type = "WAF"|"WAFV2"|"SHIELD_ADVANCED"|"SECURITY_GROUPS_COMMON"|"SECURITY_GROUPS_CONTENT_AUDIT"|"SECURITY_GROUPS_USAGE_AUDIT"|"NETWORK_FIREWALL",
ManagedServiceData = "string"
),
ResourceType = "string",
ResourceTypeList = list(
"string"
),
ResourceTags = list(
list(
Key = "string",
Value = "string"
)
),
ExcludeResourceTags = TRUE|FALSE,
RemediationEnabled = TRUE|FALSE,
IncludeMap = list(
list(
"string"
)
),
ExcludeMap = list(
list(
"string"
)
)
),
PolicyArn = "string"
)
}
}
\description{
Creates an AWS Firewall Manager policy.
Firewall Manager provides the following types of policies:
\itemize{
\item An AWS WAF policy (type WAFV2), which defines rule groups to run
first in the corresponding AWS WAF web ACL and rule groups to run
last in the web ACL.
\item An AWS WAF Classic policy (type WAF), which defines a rule group.
\item A Shield Advanced policy, which applies Shield Advanced protection
to specified accounts and resources.
\item A security group policy, which manages VPC security groups across
your AWS organization.
\item An AWS Network Firewall policy, which provides firewall rules to
filter network traffic in specified Amazon VPCs.
}
Each policy is specific to one of the types. If you want to enforce more
than one policy type across accounts, create multiple policies. You can
create multiple policies for each type.
You must be subscribed to Shield Advanced to create a Shield Advanced
policy. For more information about subscribing to Shield Advanced, see
\href{https://docs.aws.amazon.com/waf/latest/DDOSAPIReference/API_CreateSubscription.html}{CreateSubscription}.
}
\section{Request syntax}{
\preformatted{svc$put_policy(
Policy = list(
PolicyId = "string",
PolicyName = "string",
PolicyUpdateToken = "string",
SecurityServicePolicyData = list(
Type = "WAF"|"WAFV2"|"SHIELD_ADVANCED"|"SECURITY_GROUPS_COMMON"|"SECURITY_GROUPS_CONTENT_AUDIT"|"SECURITY_GROUPS_USAGE_AUDIT"|"NETWORK_FIREWALL",
ManagedServiceData = "string"
),
ResourceType = "string",
ResourceTypeList = list(
"string"
),
ResourceTags = list(
list(
Key = "string",
Value = "string"
)
),
ExcludeResourceTags = TRUE|FALSE,
RemediationEnabled = TRUE|FALSE,
IncludeMap = list(
list(
"string"
)
),
ExcludeMap = list(
list(
"string"
)
)
),
TagList = list(
list(
Key = "string",
Value = "string"
)
)
)
}
}
\keyword{internal}
|
05e47ddff0c1b6e6818a69c2b6e9ef83be484762
|
f599f81f5fb0351d7f496b43cd6dc07df04d6ced
|
/man/pow.Rd
|
61d1a828935d6350f9af0880fd1fae215c7c91ab
|
[] |
no_license
|
santiagodr/powers
|
a054086cfcae41fbd82e30077ff6331302b4edbf
|
50b87f65e74dbcf3d884b5143de254dc49afb4ba
|
refs/heads/master
| 2021-08-22T04:57:11.237005
| 2017-11-29T10:41:08
| 2017-11-29T10:41:08
| 112,156,892
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 491
|
rd
|
pow.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pow.R
\name{pow}
\alias{pow}
\title{Generic Power Function}
\usage{
pow(x, a, na.rm = FALSE)
}
\arguments{
\item{x}{Vector to raise to some power.}
\item{a}{Power to raise \code{x} by.}
\item{na.rm}{ask if NA's should be removed for the computation, default is FALSE}
}
\value{
The vector \code{x}, raised to the power of \code{a}.
}
\description{
This function is internal and is used by the other functions
}
|
844d9c7c0c63cdd0025f290134eadcbad9711c79
|
20a9435ef4586a43a4e55502d0f0ac40aa185821
|
/man/hmi_pool.Rd
|
8b51d2c192226a127114a27b9451e255eb0c5118
|
[] |
no_license
|
cran/hmi
|
a9df9353e459bfe45d9952370a962fa879c8f5a1
|
6d1edb0d025c182cedb325fa9826f4ba00e988d1
|
refs/heads/master
| 2021-01-23T06:20:51.264453
| 2020-10-01T22:20:02
| 2020-10-01T22:20:02
| 86,358,162
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,450
|
rd
|
hmi_pool.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/hmi_smallfunctions.R
\name{hmi_pool}
\alias{hmi_pool}
\title{Averages the results of the imputation function \code{hmi}.}
\usage{
hmi_pool(mids, analysis_function)
}
\arguments{
\item{mids}{A \code{mids} (multiply imputed data set) object.
Either from the \code{hmi} imputation function or \code{mice}.}
\item{analysis_function}{A user generated function that gets a completed data set,
runs the model and returns all model parameters
he or she is interested in in a vector. See examples below.}
}
\value{
A vector with all averaged results.
}
\description{
This function applies the analysis the user wants to run on every imputed dataset.
The results from every dataset are pooled by simply averaging. So the user has to make sure that
averaging the analysis produces results is meaningful. Currently variance estimates for the averaged
results are not implemented.
}
\examples{
\dontrun{
data(Gcsemv, package = "hmi")
model_formula <- written ~ 1 + gender + coursework + (1 + gender|school)
set.seed(123)
dat_imputed <- hmi(data = Gcsemv, model_formula = model_formula, m = 2, maxit = 2)
my_analysis <- function(complete_data){
# In this list, you can write all the parameters you are interested in.
# Those will be averaged.
# So make sure that averaging makes sense and that you only put in single numeric values.
parameters_of_interest <- list()
# ---- write in the following lines, what you are interetest in to do with your complete_data
# the following lines are an example where the analyst is interested in the fixed intercept
# and fixed slope and the random intercepts variance,
# the random slopes variance and their covariance
my_model <- lme4::lmer(model_formula, data = complete_data)
parameters_of_interest[[1]] <- lme4::fixef(my_model)
parameters_of_interest[[2]] <- lme4::VarCorr(my_model)[[1]][,]
ret <- unlist(parameters_of_interest)# This line is essential if the elements of interest
#should be labeled in the following line.
names(ret) <-
c("beta_intercept", "beta_gender", "beta_coursework", "sigma0", "sigma01", "sigma10", "sigma1")
return(ret)
}
hmi_pool(mids = dat_imputed, analysis_function = my_analysis)
#if you are interested in fixed effects only, consider pool from mice:
mice::pool(with(data = dat_imputed,
expr = lme4::lmer(written ~ 1 + gender + coursework + (1 + gender|school))))
}
}
|
07808a37b95754732c4718f2ed45f3f20d4841ee
|
b1446cdd1b8344bea94237bce8ab08178247fb6b
|
/Code for chapter5.R
|
6437ac7cb2fba3e55c83339263aa928229fdfb7a
|
[] |
no_license
|
abhisaw/R-Deep-Learning-Essentials
|
f36e23d8a67f734d9a507ec0de74725ff14b9ac8
|
29d58db97f6fb7088fcada21b7ccb8013fac5f9e
|
refs/heads/master
| 2022-05-01T20:09:53.882473
| 2016-10-11T18:06:21
| 2016-10-11T18:06:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,321
|
r
|
Code for chapter5.R
|
# picking hyperparameters
library(ggplot2)
library(h2o)
local_h2o <- h2o.init(max_mem_size = "12G", nthreads = 3)
# load mnist data directly via h2o. h2o.importFile imports in parallel ----
h2o_digits <- h2o.importFile("Data/train.csv", destination_frame = "h2odigits")
h2o_digits$label <- as.factor(h2o_digits$label)
i <- 1:32000
h2o_digits_train <- h2o_digits[i, ]
itest <- 32001:42000
h2o_digits_test <- h2o_digits[itest, ]
xnames <- colnames(h2o_digits_train)[-1]
# run models ----
starttime <- Sys.time()
ex1 <- h2o.deeplearning(
x = xnames,
y = "label",
training_frame = h2o_digits_train,
validation_frame = h2o_digits_test,
model_id = "ex1",
activation = "RectifierWithDropout",
hidden = c(100),
epochs = 10,
adaptive_rate = FALSE,
rate = 0.001,
input_dropout_ratio = 0,
hidden_dropout_ratios = c(0.2)
)
endtime <- Sys.time()
endtime - starttime
# 27.90629 secs
starttime <- Sys.time()
ex2 <- h2o.deeplearning(
x = xnames,
y = "label",
training_frame = h2o_digits_train,
validation_frame = h2o_digits_test,
model_id = "ex2",
activation = "RectifierWithDropout",
hidden = c(100),
epochs = 10,
adaptive_rate = FALSE,
rate = 0.01,
input_dropout_ratio = 0,
hidden_dropout_ratios = c(0.2)
)
endtime <- Sys.time()
endtime - starttime
# 18.69458 secs
# conclusions picking hyperparameters ------
# difference ex1 takes longer to train than ex2
ex1
ex2
# other differenct ex1 performs better on validation set.
## Training and predicting new data from a deep neural network ------
# load HAR dataset ------
har_train_x <- readr::read_table("Data/UCI HAR Dataset/train/X_train.txt", col_names = FALSE)
har_train_y <- readr::read_table("Data/UCI HAR Dataset/train/y_train.txt", col_names = FALSE)
# unlist readr reads in _y as a list and doesn't work with factor
har_train <- cbind(har_train_x, outcome = factor(unlist(har_train_y)))
har_test_x <- readr::read_table("Data/UCI HAR Dataset/test/X_test.txt", col_names = FALSE)
har_test_y <- readr::read_table("Data/UCI HAR Dataset/test/y_test.txt", col_names = FALSE)
# unlist readr reads in _y as a list and doesn't work with factor
har_test <- cbind(har_test_x, outcome = factor(unlist(har_test_y)))
har_labels <- readr::read_table("Data/UCI HAR Dataset/activity_labels.txt", col_names = FALSE)
h2oactivity_train <- as.h2o(
har_train,
destination_frame = "h2oactivitytrain")
h2oactivity_test <- as.h2o(
har_test,
destination_frame = "h2oactivitytest")
# build model ------
# activation function is a linear rectifier with dropout
# input dropout ratio = 20%
# hidden dropout ratio = 50%
# 50 hidden neurons
# 10 training iterations
# 561 columns * 50 neurons = 28050 weigths between input and layer 1
# 50 * 6 = 300 weight between layer 1 and output
starttime <- Sys.time()
mt1 <- h2o.deeplearning(
x = colnames(har_train_x),
y = "outcome",
model_id = "mt1",
training_frame= h2oactivity_train,
activation = "RectifierWithDropout",
hidden = c(50),
epochs = 10,
loss = "CrossEntropy",
input_dropout_ratio = .2,
hidden_dropout_ratios = c(.5),
export_weights_and_biases = TRUE
)
endtime <- Sys.time()
endtime - starttime
# 5.266154 secs
# look at the features of the model using the h2o.deepfeatures() function
# model, data, and layer
features1 <- as.data.frame(h2o.deepfeatures(mt1, h2oactivity_train, 1))
features1[1:10, 1:5]
# extract weights from each layer
weights1 <- as.matrix(h2o.weights(mt1, 1))
## plot heatmap of the weights
tmp <- as.data.frame(t(weights1))
tmp$Row <- 1:nrow(tmp)
tmp <- reshape2::melt(tmp, id.vars = c("Row"))
p.heat <- ggplot(tmp, aes(variable, Row, fill = value)) +
geom_tile() +
scale_fill_gradientn(colours = c("black", "white", "blue")) +
theme_classic() +
theme(axis.text = element_blank()) +
xlab("Hidden Neuron") +
ylab("Input Variable") +
ggtitle("Heatmap of Weights for Layer 1")
print(p.heat)
# Steps for calculating the first layer manually
# input data
d <- as.matrix(har_train[, -562])
# biases for hidden layer 1 neurons
b1 <- as.matrix(h2o.biases(mt1, 1))
b12 <- do.call(rbind, rep(list(t(b1)), nrow(d)))
# construct the features for layer 1
# step 1: standardize each column of input data
d_scaled <- apply(d, 2, scale)
# step 2: multiply scaled data by weights
# step 3: add the bias matrix
d_weighted <- d_scaled %*% t(weights1) + b12
# step 4: adjust for drop out ratio
d_weighted <- d_weighted * (1 - .5)
# step 5: linear rectifier => take values 0 or higher
d_weighted_rectifier <- apply(d_weighted, 2, pmax, 0)
all.equal(
as.numeric(features1[, 1]),
d_weighted_rectifier[, 1],
check.attributes = FALSE,
use.names = FALSE,
tolerance = 1e-04)
# steps for the second layer
weights2 <- as.matrix(h2o.weights(mt1, 2))
b2 <- as.matrix(h2o.biases(mt1, 2))
b22 <- do.call(rbind, rep(list(t(b2)), nrow(d)))
yhat <- d_weighted_rectifier %*% t(weights2) + b22
# steps for softmax function.
yhat <- exp(yhat)
normalizer <- do.call(cbind, rep(list(rowSums(yhat)), ncol(yhat)))
yhat <- yhat / normalizer
# final step: derive predicted classifaction
yhat <- cbind(Outcome = apply(yhat, 1, which.max), yhat)
h2o_yhat <- as.data.frame(h2o.predict(mt1, newdata = h2oactivity_train))
# manual process matches that of H2O exactly.
xtabs(~ yhat[, 1] + h2o_yhat[, 1])
h2o.shutdown(prompt = FALSE)
|
a415b8b9b659a58b97cc2ff2b5dcef457ec83e81
|
e2332ea78bced2ddd65ab7b2ae19a24346c26267
|
/solarD.R
|
a516d4cc2a9498c8a2fcb9e83d8577eb49ffec52
|
[] |
no_license
|
shammun/aquaculture_code
|
cd23cdc4fc5c8d2559fddd4d9473f0b33bb5f59e
|
afde27e6031807b0529601dee2407c1098d9e444
|
refs/heads/main
| 2023-01-21T04:48:11.197184
| 2020-11-25T20:41:14
| 2020-11-25T20:41:14
| 316,047,120
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,166
|
r
|
solarD.R
|
# install.packages("solrad")
library(solrad)
#Calculating solar altitude angle for two consecutive days
DOY <- seq(0, 2, .05)
alpha <- Altitude(DOY, Lat = 24.303856, Lon=91.678491, SLon=90, DS=0)
#Note: only the difference between Lon and SLon matters not each value
plot(DOY, alpha)
### This function returns the apparent solar time (in minutes) for a given day of year and location.
DOY <- seq(0, 2, .05)
ast <- AST(1, Lon=91.678491, SLon=90, DS=0)
#Note: only the difference between Lon and SLon matters not each value
plot(DOY, ast)
# This function estimates day length (in hours) for a given day of year and latitude.
DOY <- 1:365
Lat = 24.303856
dl <- DayLength(DOY, Lat)
plot(DOY, dl)
# This function estimates sunrise time (in continuous hour values) for a given day of year and latitude.
sunrise <- Sunset(DOY, Lat)
plot(DOY, sunrise)
# This function estimates sunset time (in continuous hour values) for a given day of year and latitude.
sunset <- Sunset(DOY, Lat)
plot(DOY, sunset)
time_of_the_day = seq(0,21,by=3)
for(i in seq_along(time_of_the_day)){
print(time_of_the_day[i])
}
|
73e5f7e032802fc373d1d50d4109168c8c6df886
|
9a5b97cfbeb454c3cb4da897c6b37abada6839fb
|
/man/hiv.mortmod.Rd
|
fa66066b77dfa99a183747a182c3d0a66952222f
|
[] |
no_license
|
cran/HIV.LifeTables
|
f0278c0c336ca12ed4fd16ff4ce05070ae3278a5
|
134582513820e99750c35bd10aa989cd4d837887
|
refs/heads/master
| 2016-09-06T08:54:38.764901
| 2012-12-04T00:00:00
| 2012-12-04T00:00:00
| 17,679,715
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,484
|
rd
|
hiv.mortmod.Rd
|
\name{hiv.mortmod}
\alias{hiv.mortmod}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
HIV calibrated model life tables for countries with generalized HIV epidemics
}
\description{
This function produces a complete set of mortality rates for ages 0, 1-4, 5-9, 10-14, ...,100+ and life table given a region, sex, and set of inputs which must include HIV prevalence and one of the following mortality indicators: life expectancy at birth (e0), child mortality alone (5q0), or child mortality with adult mortality (45q15)
}
\usage{
hiv.mortmod(prev=NULL, e0=NULL, child.mort = NULL, adult.mort = NULL, model = 1,
region = 1, sex = 1, lt = FALSE, nax=NULL, opt=TRUE)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{prev}{HIV prevalence expressed as a percentage
}
\item{e0}{Life expectancy at birth (used only if model=1)
}
\item{child.mort}{Probability of death between age 0 and 5, 5q0 (used only if model=2 or model=3)
}
\item{adult.mort}{Probability of death between age 15 and 60, 45q15 (used only if model=3)
}
\item{model}{An interger to indicate which inputs will be used on the model. 1 for life expectancy, 2 for child mortality alone, 3 for child mortality with adult mortality
}
\item{region}{An integer to indicate which regional model to use. 1 for Africa, 0 for Caribbean or Latin American
}
\item{sex}{An interger to indicate the sex of the desired life table. 1 for female, 0 for male
}
\item{lt}{Logical. If TRUE a life table is calculated based on the estimated mortality rates. The user can supply nax values. Otherwise, the assumption of half the length of the age interval is used for nax values.
}
\item{nax}{If lt=TRUE, the user can supply a set of nax values
}
\item{opt}{If model=1, opt=TRUE will use a value for the weight for the first SVD component that produces a set of age-specific mortality rates that produce a life exepctancy at birth that matches the input life exepctancy.
}
}
%\details{
%% ~~ If necessary, more details than the description above ~~
%}
\value{
\item{nmx}{The estimated nmx values produced by the model}
\item{lt}{If \code{lt=TRUE} A life table with age intervals matching those of the nmx schedule on which the table is built and with columns for age, nax, nmx, nqx, npx, ndx, lx, nLx, Tx, and ex.}
\item{lt.5q0}{If \code{lt=TRUE} The probability of death between birth and age 5}
\item{lt.45q15}{If \code{lt=TRUE} The probability of death between age 15 and 60}
}
% \references{
% %% ~put references to the literature/web site here ~
% }
\author{
David J Sharrow
}
\note{
If \code{lt=TRUE}, it is possible that the life table contains fewer age groups than the 22 in the estimated mortality rates if the mortality rates are high enough to kill off all people before the final age group, 100+.
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
\code{\link{mortmod.e0}}, \code{\link{mortmod.5q0}}, \code{\link{mortmod.45q15}}
}
\examples{
## a life table at 1.5 percent prevalence, life expectancy of 60, for Africa Females
hiv.mortmod(prev=1.5, e0=60, model=1, region=1, sex=1, lt=TRUE)
## a set of mortality rates at 2.5 percent prevalence, life expectancy of 53,
## for Caribbean Males
hiv.mortmod(prev=2.5, e0=53, model=1, region=0, sex=0, lt=FALSE)
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ models }
\keyword{ misc }% __ONLY ONE__ keyword per line
|
d5e68162935a48f5e85ab0203f3585a45047240e
|
f4fe4e5b855f9715f0dee2ab2016c6a48e35ad64
|
/ros_analysis/bak_ros_12_10_19_outliers/bak_ploting_final_figures.R
|
d569e1add22a02518a75969c43dae6dc3de3d01e
|
[] |
no_license
|
zhangshaiolo/NatVar_proj
|
eca9ee6d161053a0f5da5eaa9c6a409262f0e89e
|
f29b39e61b0ea44f1316c5eaa1defde08f430169
|
refs/heads/master
| 2023-03-13T10:47:41.139995
| 2021-03-03T15:22:24
| 2021-03-03T15:22:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,657
|
r
|
bak_ploting_final_figures.R
|
#this script will be to analyze the antagonist ROS data
library(ggplot2)
library(tidyr)
library(patchwork)
data_1471 <- read.csv("/Users/nicholascolaianni/Documents/dangl_lab/nat_variants_proj/ros_analysis/bak_ros_12_10_19_outliers/150756.summary_info.csv",stringsAsFactors = F)
data_1391 <- read.csv("/Users/nicholascolaianni/Documents/dangl_lab/nat_variants_proj/ros_analysis/bak_ros_12_10_19_outliers/130527_final.summary_info.csv",stringsAsFactors = F)
data_1857 <- read.csv("/Users/nicholascolaianni/Documents/dangl_lab/nat_variants_proj/ros_analysis/bak_ros_12_10_19_outliers/134034.summary_info.csv",stringsAsFactors = F)
cbPalette <- c("#E69F00", "#56B4E9", "#009E73", "#662D91", "#D55E00", "#CC79A7", "Black", "grey")
data_1471$Time <- data_1471$Time/60
data_1857$Time <- data_1857$Time/60
data_1391$Time <- data_1391$Time/60
data_1471 <- data_1471[data_1471$Time <= 30,]
data_1857 <- data_1857[data_1857$Time <= 30,]
data_1391 <- data_1391[data_1391$Time <= 30,]
ggplot(data_1471, aes(x=Time, y=measurement, color=Name))+
geom_line(size=1.5)+
geom_errorbar(aes(ymin=measurement-se, ymax=measurement+se))+
ylab("Photocount")+
xlab("Minutes")+
theme(text = element_text(size=20))+
labs(color='Peptide') +
theme_classic()+
scale_color_manual(values = cbPalette)+
scale_x_continuous(breaks=c(0,4,10,20,30))
ggplot(data_1391, aes(x=Time, y=measurement, color=Name))+
geom_line(size=1.5)+
geom_errorbar(aes(ymin=measurement-se, ymax=measurement+se))+
ylab("Photocount")+
xlab("Minutes")+
theme(text = element_text(size=20))+
labs(color='Peptide') +
scale_color_manual(values = cbPalette)+
theme_classic()+
scale_x_continuous(breaks=c(0,4,10,20,30))
ggplot(data_1857, aes(x=Time, y=measurement, color=Name))+
geom_line(size=1.5)+
geom_errorbar(aes(ymin=measurement-se, ymax=measurement+se))+
ylab("Photocount")+
xlab("Minutes")+
theme(text = element_text(size=20))+
labs(color='Peptide') +
scale_color_manual(values = cbPalette)+
theme_classic()+
scale_x_continuous(breaks=c(0,4,10,20,30))
data_1471 <- read.csv("/Users/nicholascolaianni/Documents/dangl_lab/nat_variants_proj/ros_analysis/bak_ros_12_10_19_outliers/150756.auc_data.csv",stringsAsFactors = F)
data_1391 <- read.csv("/Users/nicholascolaianni/Documents/dangl_lab/nat_variants_proj/ros_analysis/bak_ros_12_10_19_outliers/130527.auc_data.csv",stringsAsFactors = F)
data_1857 <- read.csv("/Users/nicholascolaianni/Documents/dangl_lab/nat_variants_proj/ros_analysis/bak_ros_12_10_19_outliers/134034.auc_data.csv",stringsAsFactors = F)
data_1391$log_auc <- log10(data_1391$AUC)
data_1471$log_auc <- log10(data_1471$AUC)
data_1857$log_auc <- log10(data_1857$AUC)
auc_sum <- data_1391 %>%
dplyr::group_by(Name) %>%
dplyr::summarise(total = n(),
se_d = sd(log_auc)/sqrt(total),
mean_d = mean(log_auc))
set.seed(1994)
library(agricolae)
simple_anova <- aov(log_auc ~ Name, data_1391)
summary(simple_anova)
anova_test <- HSD.test(simple_anova, trt = 'Name')
plot(anova_test)
box_auc <- ggplot(data_1391, aes(x=Name, y=log_auc))+
geom_errorbar(data=auc_sum, aes(x=Name, y=mean_d, ymin=mean_d-se_d, ymax=mean_d+se_d), width=.3, size=1.1)+
geom_point(size=1.5)+
geom_text(data = anova_test$groups, aes(x=row.names(anova_test$groups), label=groups, y=7.7), size=5)+
labs(x="", y="Log10(RLU AUC)")+
theme_bw(base_size = 16)+
theme(axis.text.x = element_text(angle = 45, vjust = 1.0, hjust=1))
box_auc
auc_sum <- data_1471 %>%
dplyr::group_by(Name) %>%
dplyr::summarise(total = n(),
se_d = sd(log_auc)/sqrt(total),
mean_d = mean(log_auc))
simple_anova <- aov(log_auc ~ Name, data_1471)
summary(simple_anova)
anova_test <- HSD.test(simple_anova, trt = 'Name')
plot(anova_test)
box_auc <- ggplot(data_1471, aes(x=Name, y=log_auc))+
geom_errorbar(data=auc_sum, aes(x=Name, y=mean_d, ymin=mean_d-se_d, ymax=mean_d+se_d), width=.3, size=1.1)+
geom_point(size=1.5)+
geom_text(data = anova_test$groups, aes(x=row.names(anova_test$groups), label=groups, y=7.7), size=5)+
labs(x="", y="Log10(RLU AUC)")+
theme_bw(base_size = 16)+
theme(axis.text.x = element_text(angle = 45, vjust = 1.0, hjust=1))
box_auc
auc_sum <- data_1857 %>%
dplyr::group_by(Name) %>%
dplyr::summarise(total = n(),
se_d = sd(log_auc)/sqrt(total),
mean_d = mean(log_auc))
simple_anova <- aov(log_auc ~ Name, data_1857)
summary(simple_anova)
anova_test <- HSD.test(simple_anova, trt = 'Name')
plot(anova_test)
box_auc <- ggplot(data_1857, aes(x=Name, y=log_auc))+
geom_errorbar(data=auc_sum, aes(x=Name, y=mean_d, ymin=mean_d-se_d, ymax=mean_d+se_d), width=.3, size=1.1)+
geom_point(size=1.5)+
geom_text(data = anova_test$groups, aes(x=row.names(anova_test$groups), label=groups, y=7.7), size=5)+
labs(x="", y="Log10(RLU AUC)")+
theme_bw(base_size = 16)+
theme(axis.text.x = element_text(angle = 45, vjust = 1.0, hjust=1))
box_auc
#breaks for y-axis
y_breaks <- c(0,5000,10000,15000,20000,25000)
#Alternative black plots
data_1471$plant[data_1471$Name %in% c("bak1-4-1471","bak1-4-Pa")] <- "bak1-4"
data_1471$plant[data_1471$Name %in% c("bak1-5-1471","bak1-5-Pa")] <- "bak1-5"
data_1471$plant[data_1471$Name %in% c("Col-1471","Col-Pa")] <- "Col-0"
data_1471$plant[data_1471$Name %in% c("fls2/efr-1471","fls2/efr-Pa")] <- "fls2/efr"
#Peptide
data_1471$peptide[data_1471$Name %in% c("bak1-4-1471","bak1-5-1471","Col-1471", "fls2/efr-1471" )] <- "1471"
data_1471$peptide[data_1471$Name %in% c("bak1-4-Pa","bak1-5-Pa","Col-Pa", "fls2/efr-Pa" )] <- "Pa"
data_1471$measurement <- data_1471$measurement - min(data_1471$measurement)
p1471 <- ggplot(data_1471[data_1471$Time/60 < 30,], aes(x=Time/60, y=measurement, color=plant, shape=peptide))+
geom_errorbar(aes(ymin=measurement-se, ymax=measurement+se), size=.3, alpha=.75)+
geom_line(size=.3)+
geom_point(size=4, alpha = .75)+
ylab("Photocount")+
xlab("Minutes")+
theme(text = element_text(size=20))+
labs(color='Plant', shape="Peptide") +
#scale_color_grey()+
scale_color_manual(values = cbPalette)+
theme_classic()+
scale_x_continuous(breaks=c(0,4,10,12,20,30,40,50))+
scale_y_continuous(breaks = y_breaks, limits = c(0,27500))
####
data_1391$plant[data_1391$Name %in% c("bak1-4-1391","bak1-4-Pa")] <- "bak1-4"
data_1391$plant[data_1391$Name %in% c("bak1-5-1391","bak1-5-Pa")] <- "bak1-5"
data_1391$plant[data_1391$Name %in% c("Col-1391","Col-Pa")] <- "Col-0"
data_1391$plant[data_1391$Name %in% c("fls2/efr-1391","fls2/efr-Pa")] <- "fls2/efr"
#Peptide
data_1391$peptide[data_1391$Name %in% c("bak1-4-1391","bak1-5-1391","Col-1391", "fls2/efr-1391" )] <- "1391"
data_1391$peptide[data_1391$Name %in% c("bak1-4-Pa","bak1-5-Pa","Col-Pa", "fls2/efr-Pa" )] <- "Pa"
data_1391$measurement <- data_1391$measurement - min(data_1391$measurement)
p1391 <- ggplot(data_1391[data_1391$Time/60 < 30,], aes(x=Time/60, y=measurement, color=plant, shape=peptide))+
geom_errorbar(aes(ymin=measurement-se, ymax=measurement+se), size=.3, alpha=.75)+
geom_line(size=.3)+
geom_point(size=4, alpha = .75)+
ylab("Photocount")+
xlab("Minutes")+
theme(text = element_text(size=20))+
labs(color='Plant', shape="Peptide") +
#scale_color_grey()+
scale_color_manual(values = cbPalette)+
theme_classic()+
scale_x_continuous(breaks=c(0,4,10,12,20,30,40,50))+
scale_y_continuous(breaks = y_breaks, limits = c(0,27500))
####
data_1857$plant[data_1857$Name %in% c("bak1-4-1857","bak1-4-Pa")] <- "bak1-4"
data_1857$plant[data_1857$Name %in% c("bak1-5-1857","bak1-5-Pa")] <- "bak1-5"
data_1857$plant[data_1857$Name %in% c("Col-1857","Col-Pa")] <- "Col-0"
data_1857$plant[data_1857$Name %in% c("fls2/efr-1857","fls2/efr-Pa")] <- "fls2/efr"
#Peptide
data_1857$peptide[data_1857$Name %in% c("bak1-4-1857","bak1-5-1857","Col-1857", "fls2/efr-1857" )] <- "1857"
data_1857$peptide[data_1857$Name %in% c("bak1-4-Pa","bak1-5-Pa","Col-Pa", "fls2/efr-Pa" )] <- "Pa"
data_1857$measurement <- data_1857$measurement - min(data_1857$measurement)
p1857 <- ggplot(data_1857[data_1857$Time/60 < 30,], aes(x=Time/60, y=measurement, color=plant, shape=peptide))+
geom_errorbar(aes(ymin=measurement-se, ymax=measurement+se), size=.3, alpha=.75)+
geom_line(size=.3)+
geom_point(size=4, alpha = .75)+
ylab("Photocount")+
xlab("Minutes")+
theme(text = element_text(size=20))+
labs(color='Plant', shape= "Peptide") +
#scale_color_grey()+
scale_color_manual(values = cbPalette)+
theme_classic()+
scale_x_continuous(breaks=c(0,4,10,12,20,30,40,50))+
scale_y_continuous(breaks = y_breaks, limits = c(0,27500))
(p1391 | p1857 | p1471)
|
822bfe31ac4f2678db153a794643470eb39078b1
|
0fb0fc3a1807687109254aca77b063374fe18194
|
/R/package.R
|
34a5b61570795a4f88c6c55cd87911d6b02d8e92
|
[] |
no_license
|
KopfLab/dpos
|
bcb5b378f7b0773b3e6a94d81458906d79bc50d5
|
0eff08e090211cb20493291a5d45f33ee5e61294
|
refs/heads/master
| 2021-01-17T16:47:54.499281
| 2016-08-09T03:18:21
| 2016-08-09T03:18:21
| 63,631,909
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 779
|
r
|
package.R
|
#' @title MAT253 testing
#' @description This is a test
#' @name dpos
#' @docType package
#' @author Sebastian Kopf
#' @import dplyr ggplot2
#' @importFrom tidyr gather
#' @importFrom isoread isoread
#' @importFrom methods is
#' @importFrom stats setNames
#' @importFrom stringr str_match_all
#'
#' @include loading.R
#' @include plotting.R
#' @include gui.R
NULL
#' @export
as.data.frame.RangeScanFile <- function(x, ...) {
class(x) <- "data.frame"
return(x)
}
#' @export
as_data_frame.RangeScanFile <- function(x, ...) {
as.data.frame(x) %>% as_data_frame()
}
#' @export
print.RangeScanFile <- function(x, ...) {
cat("RangeScanFile")
cat("\nFile:", attr(x, "filename"))
cat("\nLocation:", attr(x, "filepath"))
cat("\nData:\n")
print(as_data_frame(x), ...)
}
|
a1a27eb81de41862c4ea4d366fc74dcd9ad2dddb
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/xpose4/examples/compute.cwres.Rd.R
|
4c09d27ef6d04f49eb4e55fddb239092cae4817f
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 960
|
r
|
compute.cwres.Rd.R
|
library(xpose4)
### Name: compute.cwres
### Title: Compute the Conditional Weighted Residuals
### Aliases: compute.cwres ind.cwres read.cwres.data is.cwres.readable.file
### sqrtm xpose.calculate.cwres xpose.calculate.cwres
### Keywords: methods
### ** Examples
## Not run:
##D ## Capture CWRES from cwtab5.est and cwtab5.deriv
##D cwres <- compute.cwres(5)
##D mean(cwres)
##D var(cwres)
##D
##D ## Capture CWRES from cwtab1.est and cwtab1.deriv, do not print out, allow zeroes
##D cwres <- compute.cwres("1", printToOutFile = FALSE,
##D onlyNonZero = FALSE)
##D
##D ## Capture CWRES for ID==1
##D cwres.1 <- compute.cwres("1", id=1)
##D
##D ## xpdb5 is an Xpose data object
##D ## We expect to find the required NONMEM run and table files for run
##D ## 5 in the current working directory
##D xpdb5 <- xpose.data(5)
##D
##D ## Compare WRES, CWRES
##D xpdb5 <- xpose.calculate.cwres(xpdb5)
##D cwres.wres.vs.idv(xpdb5)
##D
## End(Not run)
|
ef39f5f1cb15e172abf4b9d57b26f88f324fc151
|
68f28f7f8dba9751386623ec47a691f7bc48400b
|
/TwitterAPI.R
|
87ae7e842f6206ba1fc6e6949a040d3c97a81ba3
|
[] |
no_license
|
alychow/nbahackathon2017
|
aa4c9f97cb346e0e80364134c3bb70574d5e6f67
|
8e87a6f8bdf34c8ffba8d6f37efbf39d4891a282
|
refs/heads/master
| 2021-07-02T13:26:36.277056
| 2017-09-24T12:09:07
| 2017-09-24T12:09:07
| 104,576,316
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,368
|
r
|
TwitterAPI.R
|
library(twitteR)
library(RCurl)
library(RJSONIO)
library(stringr)
library(streamR)
library(dplyr)
# Declare Twitter API Credentials
api_key <- "bNcpnkZVRB8fXRqb7XVWdVMOA" # From dev.twitter.com
api_secret <- "OgIqQwfIExmDZm76nxlbffN2UYH2iDTopPbbTvBLdXI948j9F8" # From dev.twitter.com
token <- "1444647248-8fnJMQ8yEwvWj60wdGIw202S0xEmNgeAjwP0XoH" # From dev.twitter.com
token_secret <- "YvuucT2r8Y6jymMZWZRxSHtXEZhFAagXz0djwOtUFohaR" # From dev.twitter.com
# Create Twitter Connection
setup_twitter_oauth(api_key, api_secret, token, token_secret)
gametime <- read.csv(file = ("/Users/alisonchow/Downloads/NBA_Schedule2016.csv"), stringsAsFactors=FALSE)
# Column 1: Time, Column 2: NumberOfTweets
final.plot.tweets <- read.csv("/Users/alisonchow/Documents/finaldata.csv")
runTwitterSearch <- function(team1, team2, date, startTime) {
# Run Twitter Search. Format is searchTwitter("Search Terms", n=100, lang="en", geocode="lat,lng", also accepts since and until).
endDate <- toString(as.POSIXct(date)+86400)
tweets <- searchTwitter(team1, n=100, lang="en", since=(date),until=(endDate))
tweets.df <- twListToDF(tweets)
tweets.df$created <- as.POSIXct(tweets.df$created)
final.tweets <- filter(tweets.df, created > as.POSIXct(startTime, tz = "GMT"))
endTime <- startTime + 9000
final.tweets <- filter(tweets.df, created <= as.POSIXct(endTime, tz = "GMT")) # Average length of games is 2.5 hours (150 mins)
startTime <- as.POSIXct(startTime)
endTime <- as.POSIXct(endTime)
currTime <- startTime
# Loop through for each minute of tweets for the entire duration of game and count number of tweets
initial <- 1
while(currTime <= endTime) {
final.tweets.minute <- filter(final.tweets, created > currTime)
final.tweets.minute <- filter(final.tweets, created <= endTime)
final.plot.tweets$NumberOfTweets[initial] <- nrow(final.tweets.minute)
currTime <- currTime + 60
initial <- initial + 1
}
}
i <- 1
while (i <= 2) {
# Set input variables to search through Twitter during game
team1 <- gametime$HOME[i] # Home Team as a String for Search Term
team2 <- gametime$AWAY[i] # Away Team as a String for Search Term
date <- gametime$DATE[i] # Date of Game
startTime <- gametime$TIME..GMT.[i] # Start Time of Game as a String
runTwitterSearch(team1, team2, date, startTime)
i <- i + 1
}
|
a5b7d964a8879a499acebeea999d32e8eca649b1
|
6f499ce5aaf92b44b7331f19e9224a7d672bfd57
|
/R/functions.R
|
472ab3d9d8f5b2dc98b723331e752349d483b15e
|
[] |
no_license
|
vpellissier/packing
|
14c04f6925ce51c71d5faf9a311b4b1979855bd8
|
3ecdcee54f1ad8a0ce7237932a1d1c0103fe763e
|
refs/heads/master
| 2021-07-04T13:37:21.022139
| 2017-09-27T11:31:25
| 2017-09-27T11:31:25
| 104,924,141
| 0
| 0
| null | null | null | null |
WINDOWS-1252
|
R
| false
| false
| 6,025
|
r
|
functions.R
|
packing<-function(site.min, site.max, traits, mat, ncores=NULL) #here, site is the number/id of the cell
{
convhulln(traits[site.min,], "FA")$vol->vol.min
convhulln(traits[site.max,], "FA")$vol->vol.max
setdiff(site.max, site.min)->esp.unique.max
length(esp.unique.max)->nbsp.unique.max
intersect(site.min, site.max)->esp.comm
esp.rem<-NULL
volume.depaup<-rich.depaup<-vector()
l.a.max.depaup<-lapply(esp.unique.max, function(x) c(setdiff(esp.unique.max,x), esp.comm))
for (i in seq(length(esp.unique.max)+length(esp.comm)))#length(esp.unique.max))
{
setdiff(esp.unique.max, esp.rem)->esp.unique.max
l.a.max.depaup<-lapply(esp.unique.max, function(x) c(setdiff(esp.unique.max,x), esp.comm))
if(!is.null(ncores))
{
force(l.a.max.depaup)
sfExport("l.a.max.depaup")
sfSapply(l.a.max.depaup, function(x) convhulln(traits[x,], options=c("FA", "QJ"))$vol)->fric.obs
}
if(is.null(ncores))
{
sapply(l.a.max.depaup, function(x) convhulln(traits[x,], options=c("FA", "QJ"))$vol)->fric.obs
}
esp.unique.max[which.max(vol.max-fric.obs)]->esp.rem #largest difference
fric.obs[which.max(vol.max-fric.obs)]->volume.depaup[i]
length(l.a.max.depaup[[which.max(vol.max-fric.obs)]])->rich.depaup[i]
if(volume.depaup[i]<=vol.min) break
}
return(list(pourc.pack=(1-((i-1)/length(site.max)))*100, nb.esp.expansion=i-1, nb.esp.com=length(esp.comm),
nb.esp.min=length(site.min), nb.esp.max=length(site.max)))
}
#################################
# selection of 3 assemblages per biomes.
# 1/ most diverse assemblage at high NPP (A1= richest assemblage in the cells having high NPP, as defined in 9 classes)
# 2/ selection of an assemblage at medium NPP (A2= assemblage with 50% richness of A1, in NPP class 4)
# 3/ selection of an assemblage at low NPP (A2= assemblage with 50% richness of A2, in NPP class 1)
pack.biomes<-function(m, niter=10, ncores=NULL, tab, traits)
{
packs<-data.frame(matrix(ncol=20, nrow=0))
names(packs)<-c("RxB", "Sample", "NPP low", "NPP medium", "NPP high",
paste(c("pourc.pack", "nb.exp.expansion", "nb.esp.com", "nb.esp.min", "nb.esp.max"),
rep(c("lh", "mh", "lm"), each=5), sep="."))
for (i in seq(niter))
{
tab[tab$realms_x_biomes==m,]->rs.tab.region
if(m=="R1.B2")
rs.tab.region<-rs.tab.region[rs.tab.region$nppt<400,]
rs.tab.region$cell<-as.character(rs.tab.region$cell)
cooc[is.element(rownames(cooc), rs.tab.region$cell),]->cooc_region
rs.tab.region$class.npp<-Hmisc::cut2(rs.tab.region$nppt, g=9)
rs.tab.region$class.npp<-cut(rs.tab.region$nppt, breaks=9, labels=F)
rs.tab.region$labels.class.npp<-cut(rs.tab.region$nppt, breaks=9)
if(length(unique(rs.tab.region$class.npp))!=9)
return(packs)
rs.tab.region.high<-rs.tab.region[rs.tab.region$class.npp==9,]
rs.tab.region.medium<-rs.tab.region[rs.tab.region$class.npp==5,]
rs.tab.region.low<-rs.tab.region[rs.tab.region$class.npp==1,]
rs.tab.region.high<-rs.tab.region.high[rs.tab.region.high$nbsp>max(rs.tab.region.high$nbsp)*.90,] #high cell sampled in cells within 95% of max nbsp
cell.high.npp<-sample(rs.tab.region.high$cell,1)
nbsp.high.npp<-rs.tab.region.high$nbsp[rs.tab.region.high$cell==cell.high.npp]
cell.medium<-rs.tab.region.medium[which.min(abs(rs.tab.region.medium$nbsp-nbsp.high.npp/2)), "cell"] #cell closest to x% of the high npp cell
nbsp.medium<-rs.tab.region.medium[rs.tab.region.medium$cell==cell.medium, "nbsp"]
cell.medium.npp<-sample(rs.tab.region.medium[rs.tab.region.medium$nbsp>nbsp.medium*0.95 & rs.tab.region.medium$nbsp<nbsp.medium*1.05, "cell"],1) #sampling one within all cells having ±5% species than the selected cell
nbsp.medium.npp<-rs.tab.region.medium[rs.tab.region.medium$cell==cell.medium.npp, "nbsp"]
rm(list=c("cell.medium", "nbsp.medium"))
cell.low<-rs.tab.region.low[which.min(abs(rs.tab.region.low$nbsp-nbsp.medium.npp/2)), "cell"] #cell closest to x% of the high npp cell
nbsp.low<-rs.tab.region.low[rs.tab.region.low$cell==cell.low, "nbsp"]
cell.low.npp<-sample(rs.tab.region.low[rs.tab.region.low$nbsp>nbsp.low*0.95 & rs.tab.region.low$nbsp<nbsp.low*1.05, "cell"],1) #sampling one within all cells having ±5% species than the selected cell
nbsp.low.npp<-rs.tab.region.low[rs.tab.region.low$cell==cell.low.npp, "nbsp"]
rm(list=c("cell.low", "nbsp.low"))
if(!is.null(ncores))
{
sfInit(parallel = TRUE, cpus=40)
sfExport("traits")
sfLibrary(geometry)
}
cell.low.npp<-cooc_region[cell.low.npp,]
cell.medium.npp<-cooc_region[cell.medium.npp,]
cell.high.npp<-cooc_region[cell.high.npp,]
cell.low.npp<-names(cell.low.npp[cell.low.npp>0])
cell.medium.npp<-names(cell.medium.npp[cell.medium.npp>0])
cell.high.npp<-names(cell.high.npp[cell.high.npp>0])
packing(cell.low.npp, cell.high.npp, traits=traits, mat=cooc_region, ncores=ncores)->pack.LH
packing(cell.medium.npp, cell.high.npp, traits=traits, mat=cooc_region, ncores=ncores)->pack.MH
packing(cell.low.npp, cell.medium.npp, traits=traits, mat=cooc_region, ncores=ncores)->pack.LM
if(!is.null(ncores)) sfStop()
as.character(unique(rs.tab.region$labels.class.npp[rs.tab.region$class.npp==1]))->npp.l
as.character(unique(rs.tab.region$labels.class.npp[rs.tab.region$class.npp==4]))->npp.m
as.character(unique(rs.tab.region$labels.class.npp[rs.tab.region$class.npp==9]))->npp.h
data.frame(t(c(m, i, npp.l, npp.m, npp.h, unlist(pack.LH), unlist(pack.MH), unlist(pack.LM))))->pack.region
names(packs)->names(pack.region)
rbind(packs, pack.region)->packs
print(i)
}
print(paste(m, "Try n°", i))
return(packs)
}
|
73acc0e3712f0ec27b5cbf9d271ddbaee01f298c
|
e1f3ba49206c456e39b87d5c0635fbaaa16e4715
|
/tests/testthat/test_multcomp.R
|
572dec0521c6d548be797ce3b0ea49e73802744c
|
[] |
no_license
|
chjackson/voi
|
3a1c8c5832a7713005b48c9056369e7d28a20169
|
bc80f3b63e3b5ca60595866b0aede65246296c01
|
refs/heads/master
| 2023-06-08T00:08:22.251359
| 2023-06-07T09:43:03
| 2023-06-07T09:43:03
| 227,181,181
| 7
| 5
| null | 2022-09-27T08:49:19
| 2019-12-10T17:44:17
|
R
|
UTF-8
|
R
| false
| false
| 3,557
|
r
|
test_multcomp.R
|
## Create a fake decision model output with three interventions
pi2 <- "p_side_effects_t2"
set.seed(1)
nsim <- nrow(chemo_cea$e)
rr3_sim <- rgamma(nsim, 100, 100)
multcomp_cea <- chemo_cea
multcomp_cea$c <- as.data.frame(multcomp_cea$c)
multcomp_cea$e <- as.data.frame(multcomp_cea$e)
multcomp_cea$c$trt3 <- multcomp_cea$c$Novel * 1.2 * rr3_sim
multcomp_cea$e$trt3 <- multcomp_cea$e$Novel * 0.8 * rr3_sim # new trt is worse
multcomp_nb <- multcomp_cea$e*20000 - multcomp_cea$c
multcomp_pars <- cbind(chemo_pars, rr3_sim)
test_that("EVPPI for decision models with three decision options",{
expect_equal(evpi(multcomp_nb), 417, tol=10)
expect_equal(evppi(multcomp_nb, multcomp_pars, par=pi2)$evppi, 262, tol=1)
expect_equal(evpi(multcomp_cea)$evpi[2], 417, tol=10)
expect_equal(evppi(multcomp_cea, multcomp_pars, par=pi2)$evppi[2], 262, tol=1)
})
## Test the moment matching EVSI method
## Adapt the model function to three interventions
multcomp_model_cea <- function(p_side_effects_t1, p_side_effects_t2,
p_hospitalised_total, p_died,
lambda_home, lambda_hosp,
c_home_care, c_hospital, c_death,
u_recovery, u_home_care, u_hospital,
rate_longterm, rr3){
ce <- chemo_model_cea(p_side_effects_t1 = p_side_effects_t1,
p_side_effects_t2 = p_side_effects_t2,
p_hospitalised_total = p_hospitalised_total,
p_died = p_died,
lambda_home = lambda_home,
lambda_hosp = lambda_hosp,
c_home_care = c_home_care,
c_hospital = c_hospital,
c_death = c_death,
u_recovery = u_recovery,
u_home_care = u_home_care,
u_hospital = u_hospital,
rate_longterm = rate_longterm)
trt3 <- ce[,2] * rr3
cbind(ce, trt3)
}
multcomp_pars_fn <- function(n){
cbind(chemo_pars_fn(n),
rr3 = 1.2*rgamma(n, 100, 100))
}
test_that("moment matching method",{
set.seed(100)
expect_error(
evsi(outputs=multcomp_nb, inputs=chemo_pars,
pars="p_side_effects_t1",
method = "mm", study = "binary", n = c(100), Q = 5,
analysis_args = list(a=53, b=60, n=100),
model_fn = chemo_model_nb, par_fn = chemo_pars_fn),
"Number of decision options")
expect_error(
evm <- evsi(outputs=chemo_cea, inputs=chemo_pars,
pars="p_side_effects_t1",
method = "mm",
study = "binary",
n = c(100), Q = 5,
analysis_args = list(a=53, b=60, n=100),
model_fn = multcomp_model_cea, par_fn = multcomp_pars_fn),
"Number of decision options")
set.seed(1)
evm2 <- evsi(outputs=multcomp_cea, inputs=multcomp_pars,
pars="p_side_effects_t2",
method = "mm", study = "binary",
n = c(100), Q = 5,
analysis_args = list(a=53, b=60, n=100),
model_fn = multcomp_model_cea, par_fn = multcomp_pars_fn)
evm2
expect_equal(evm2$evsi[2], 167, tol=1)
evm3 <- evsi(outputs=multcomp_cea, inputs=multcomp_pars,
pars="p_side_effects_t2",
method = "gam", study = "binary",
analysis_args = list(a=53, b=60, n=100))
expect_equal(evm3$evsi[2], 221, tol=1)
})
|
0c30482b0d45f47d8f258fd01f30bec256094b8e
|
d940ef653cc4bba510c541344422bd56e7561845
|
/R/mantel_pertables.R
|
e66d71b7d6bd7ea65bb1f2dcf0d586725f9c040e
|
[] |
no_license
|
cran/betaper
|
522bef31bc04e7c825fed92280e01c1f8f39a166
|
9a12466326caf27f7e54af35ac20ba8498255c3f
|
refs/heads/master
| 2021-06-24T14:38:45.339965
| 2020-11-24T16:00:11
| 2020-11-24T16:00:11
| 17,694,699
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,675
|
r
|
mantel_pertables.R
|
`mantel_pertables` <-
function (pertab, env, dist.method = "bray", binary = FALSE,
cor.method = "pearson", permutations = 100)
{
#require(vegan)
mantel.test <- function(z, env) {
mantel.st <- mantel(vegdist(z, method = dist.method,
binary = binary), dist(env), method = cor.method,
permutations = permutations)
mantel.r <- -mantel.st$statistic
mantel.p <- mantel.st$signif
return(c(mantel.r, mantel.p))
}
results <- sapply(pertab$pertables, function(x) mantel.test(x,
env))
row.names(results) <- c("r", "p-value")
mantel.quant <- apply(results, 1, quantile, c(0, 0.005, 0.025,
0.5, 0.975, 0.995, 1))
vdper <- lapply(pertab$pertables, function(x) 1 - vegdist(x,
method = dist.method, binary = binary))
z <- pertab$raw
mantel.raw <- mantel(vegdist(z, method = dist.method, binary = binary),
dist(env), method = cor.method, permutations = permutations)
mantel.r <- -mantel.raw$statistic
ptax <- ((rank(c(mantel.r, results[1, ])))/(length(results[1,
]) + 1))[1]
ptax <- ifelse(ptax <= 0.5, ptax, 1 - ptax)
vd <- 1 - vegdist(pertab$raw, method = dist.method, binary = binary)
env.dist <- as.vector(dist(env))
mantel.output <- list(mantel = list(mantel.raw = mantel.raw,
ptax = ptax), simulation = list(results = results, mantel.quant = mantel.quant,
vegdist = vdper), raw = list(vegdist = vd, env.dist = env.dist))
#class(mantel.output) <- c("mantel.pertables", class(mantel.output))
class(mantel.output) <- c("mantel_pertables", class(mantel.output))
return(mantel.output)
}
|
4dc6601b9c6b0d48354c35d475ea9e80bd26c6c9
|
6dde5e79e31f29db901c81e4286fea4fa6adbc48
|
/R/argcheck.R
|
02a7580189333ec0b17072110c4c1314b4df2b92
|
[] |
no_license
|
cran/fda
|
21b10e67f4edd97731a37848d103ccc0ef015f5a
|
68dfa29e2575fb45f84eb34497bb0e2bb795540f
|
refs/heads/master
| 2023-06-08T07:08:07.321404
| 2023-05-23T22:32:07
| 2023-05-23T22:32:07
| 17,696,014
| 23
| 19
| null | 2022-03-13T17:58:28
| 2014-03-13T04:40:29
|
R
|
UTF-8
|
R
| false
| false
| 262
|
r
|
argcheck.R
|
argcheck = function(argvals) {
# check ARGVALS
if (!is.numeric(argvals)) stop("ARGVALS is not numeric.")
argvals <- as.vector(argvals)
if (length(argvals) < 2) stop("ARGVALS does not contain at least two values.")
return(argvals)
}
|
f1e91157ab18326d8d64d112ab91b65ccfcfdb40
|
b12113098b08f72cab9c8fac505921fa6f3b5bb1
|
/R/read_control_files.R
|
68e142aeca1da8dce5f7fd3959a63d8716a1d1ee
|
[] |
no_license
|
dquang/sobekioNHWSP
|
ff6d7406b12c31673ae2dd33b680bf906c731363
|
f9fa651e820c61109fccb72bb1172387bd5e3bc4
|
refs/heads/master
| 2023-01-12T11:03:48.448961
| 2020-07-01T10:13:42
| 2020-07-01T10:13:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,629
|
r
|
read_control_files.R
|
#----reading Trigger file, generating trig_def----
# trigger_tbl should be one table with V1 is the ogriginal file
# orig_line_nr is original line number, this serves any changing later
# this function get definition table of triggers (trigger.def)
.get_trigger_def <- function(trigger.def.f = NULL){
trig_def <- fread(trigger.def.f, sep = "\n",
strip.white = FALSE,
encoding = 'Latin-1',
header = FALSE)
st_mtx <- str_match(trig_def$V1,
"TRGR id '([^']*)' nm '([^']*)'")
trig_def$id <- st_mtx[, 2]
trig_def$nm <- st_mtx[, 3]
# type of trigger
trig_def$ty <- str_match(trig_def$V1, "ty ([0-9]{1}) ")[, 2]
# trigger parameter
trig_def$tp <- str_match(trig_def$V1, "tp ([0-9]{1}) ")[, 2]
# measurement id
trig_def$ml <- str_match(trig_def$V1, " ml '([^']*)' ")[, 2]
# structure id (for hydraulic/combined triggers only)
trig_def$struct <- str_match(trig_def$V1, " ts '([^']*)' ")[, 2]
# check on (only relevant if trigger parameter: 3, 4, 5)
trig_def$chk <- str_match(trig_def$V1, " ch (\\d) ")[, 2]
# cumulative sum of the id, i.e.
# id takes the value of the first element, grouping by none-NA
trig_def[, id := id[1], by = .(cumsum(!is.na(id)))]
return(trig_def)
}
#----reading structure data table-----
# str_tbl should be one table with V1 is the ogriginal file
# orig_line_nr is original line number, this serves any changing later
# this function get the table of structures (struct.dat)
.get_struct_dat <- function(struct.dat.f = NULL){
str_tbl <- fread(struct.dat.f ,
strip.white = FALSE,
encoding = 'Latin-1',
sep = "\n", header = FALSE)
str_tbl[, orig_line_nr := .I]
# get id, name, definitionID
st_mtx <- str_match(
str_tbl$V1,
" id '([^']*)' nm '([^']*)' dd '([^']*)' ")
str_tbl$id <- st_mtx[, 2]
str_tbl$name <- st_mtx[, 3]
str_tbl$def_ID <- st_mtx[, 4]
# get controllers
st_mtx <- str_match(
str_tbl$V1,
" id .* ca (\\d \\d \\d \\d) cj ('[^']*' '[^']*' '[^']*' '[^']*') ")
str_tbl$ca <- st_mtx[, 2]
str_tbl$cj <- st_mtx[, 3]
str_tbl[is.na(ca), ca := str_match(V1, " ca (\\d) ")[, 2]]
str_tbl[is.na(cj), cj := str_match(V1, " cj ('[^']*') ")[, 2]]
return(str_tbl)
}
#----reading table of structure difinitions (with def_ID)-----
# this function get definition table of structures (struct.def)
.get_struct_def <- function(struct.def.f = NULL){
st_def <- fread(struct.def.f ,
strip.white = FALSE,
sep = "\n", header = FALSE, encoding = 'Latin-1')
st_def[, orig_line_nr := .I]
# get the description lines only
st_def_tbl <- st_def[grepl("^STDS id", V1)]
# get def_ID, name, type
st_mtx <- str_match(
st_def_tbl$V1,
"STDS id '([^']*)' nm '([^']*)' ty (\\d{1,2}).*")
st_def_tbl$def_ID <- st_mtx[, 2]
st_def_tbl$def_name <- st_mtx[, 3]
st_def_tbl$def_ty <- st_mtx[, 4]
# get crest level, crest/sill width
st_def_tbl[, cl := as.double(str_match(V1, ' cl (\\d*\\.*\\d*) ')[, 2])]
st_def_tbl[, cw := as.double(str_match(V1, ' [cs]w (\\d*\\.*\\d*) ')[, 2])]
# get possible flow direction
st_def_tbl[, rt := str_match(V1, ' rt (\\d*\\.*\\d*) ')[, 2]]
st_def_tbl[def_ty == '9', rt := str_match(V1, ' (dn -*\\d) ')[, 2]]
st_def_tbl$V1 <- NULL
st_def <- merge(st_def, st_def_tbl, by = 'orig_line_nr', all.x = TRUE)
st_def[, def_ID := def_ID[1], .(cumsum(!is.na(def_ID)))]
return(st_def)
}
#----reading control.def----
# this function get definition table of controllers (control.def)
.get_control_def <- function(control.def.f = NULL){
ct_def <- fread(control.def.f, sep = "\n", header = FALSE,
strip.white = FALSE,
encoding = 'Latin-1')
ct_def[, orig_line_nr := .I]
ct_tbl <- ct_def[grepl('^CNTL id .*', V1)]
# id of the controller
ct_tbl[, id := str_match(V1, "CNTL id '([^']*)'")[,2]]
# name of the controller
ct_tbl[, name := str_match(V1, " nm '([^']*)'")[,2]]
# controller type
ct_tbl[, ct := str_match(V1, " ct (\\d) ")[,2]]
# controlled parameter
ct_tbl[, ca := str_match(V1, " ca (\\d) ")[,2]]
# controlled active yes/no
ct_tbl[, ac := str_match(V1, " ac (\\d) ")[,2]]
# update frequency
ct_tbl[, cf := str_match(V1, " cf (\\d{1,}) ")[,2]]
# trigger active
ct_tbl[, ta := str_match(V1, " ta (\\d \\d \\d \\d) ")[,2]]
ct_tbl[is.na(ta), ta := str_match(V1, " ta (\\d) ")[,2]]
# id of triggers
ct_tbl[, gi :=
str_match(V1, " gi ('[^ ]*' '[^ ]*' '[^ ]*' '[^ ]*') ")[,2]]
ct_tbl[is.na(gi), gi :=
str_match(V1, " gi ('[^ ]*') ")[,2]]
# and (=1) or (=0) relation when using more triggers
ct_tbl[, ao := str_match(V1, " ao (\\d \\d \\d \\d)")[,2]]
# dValue / dt
ct_tbl[, mc := str_match(V1, " mc ([^\\ ]*) ")[,2]]
# interpolation method
ct_tbl[, bl := str_match(V1, " bl (\\d) ")[,2]]
# type of measured parameter
ct_tbl[, cp := str_match(V1, " cp (\\d) ")[,2]]
# time lag between controlling parameter and controller parameter
ct_tbl[, mp := str_match(V1, " mp (\\d) ")[,2]]
# id of measurement node
ct_tbl[, ml := str_match(V1, " ml '([^']*)' ")[,2]]
ct_tbl$V1 <- NULL
ct_def <- merge(ct_def, ct_tbl, by = 'orig_line_nr', all.x = TRUE)
ct_def[, id := id[1], .(cumsum(!is.na(id)))]
return(ct_def)
}
# Sobek code-type -------------------------------------------------------
# type of structure
.get_str_type <- function(x) {
if (is.null(x)) return(NA)
if (is.na(x)) return(NA)
switch(
x,
'0' = 'River weir',
'1' = 'River advanced weir',
'2' = 'General structure',
'3' = 'River pump',
'4' = 'Database structure',
'5' = 'Unknown',
'6' = 'Weir',
'7' = 'Orifice',
'8' = 'Unknown',
'9' = 'Pump',
'10' = 'Culvert/Siphon',
'11' = 'Universal weir',
'12' = 'Bridge',
'13' = 'Branch growth 1D Dam break node',
'112' = 'Branch growth 2D Dam break node',
NA
)
}
# type of flow direction through weir
.get_rt_type <- function(x) {
if (is.null(x)) return(NA)
if (is.na(x)) return(NA)
switch(
x,
'0' = 'Both',
'1' = 'Positive',
'2' = 'Negative',
'3' = 'No flow',
# for pumps
'dn 1' = 'Upward',
'dn 2' = 'Downward',
'dn 3' = 'Both',
'dn -1' = 'Upward (flow >< branch)',
'dn -2' = 'Downward (flow >< branch)',
'dn -3' = 'Both (flow >< branch)',
NA
)
}
# type of controller
.get_ct_type <- function(x) {
if (is.null(x)) return(NA)
if (is.na(x)) return(NA)
switch(
x,
'0' = 'Time controller',
'1' = 'Hydraulic controller',
'2' = 'Interval controller',
'3' = 'PID controller',
'4' = 'Relative time controller',
'5' = 'Relative from value controller',
NA
)
}
# type of control parameter
.get_ct_param_type <- function(x) {
if (is.null(x)) return(NA)
if (is.na(x)) return(NA)
switch(
x,
'0' = 'Crest level',
'1' = 'Crest width',
'2' = 'Gate height',
'3' = 'Pump capacity',
'4' = '',
'5' = 'Bottom level of 2D grid cell',
NA
)
}
# type of measured parameters
.get_cp_type <- function(x) {
if (is.null(x)) return(NA)
if (is.na(x)) return(NA)
switch(
x,
'0' = 'Water level',
'1' = 'Discharge',
'2' = 'Head difference',
'3' = 'Velocity',
'4' = 'Flow direction',
'5' = 'Pressure difference',
NA
)
}
# type of trigger
.get_tg_type <- function(x) {
if (is.null(x)) return(NA)
if (is.na(x)) return(NA)
t_type <- switch(x,
'0' = 'Time',
'1' = 'Hydraulic',
'2' = 'Combined',
NA)
return(t_type)
}
# type of trigger parameter
.get_tg_param <- function(x) {
if (is.null(x)) return(NA)
if (is.na(x)) return(NA)
tg_param <- switch(x,
'0' = 'Waterlevel at branch location',
'1' = 'Head difference over structure',
'2' = 'Discharge at branch location',
'3' = 'Gate lower edge level',
'4' = 'Crest level',
'6' = 'Crest width',
'6' = 'Waterlevel in retention area',
'7' = 'Pressure difference over structure',
NA)
return(tg_param)
}
# modified from kableExtra, added html = TRUE
#' @export
spec_popover2 <-
function(content = NULL,
title = NULL,
trigger = "hover",
html = TRUE,
position = "right")
{
trigger <- match.arg(trigger, c("hover", "click", "focus",
"manual"), several.ok = TRUE)
html <- ifelse(html, '"true"', '"false"')
position <- match.arg(position,
c("bottom", "top", "left",
"right", "auto"),
several.ok = TRUE)
popover_options <-
paste(
"data-toggle=\"popover\"",
paste0("data-trigger=\"",
trigger, "\""),
paste0("data-placement=\"", position,
"\""),
paste0("data-html=", html),
ifelse(!is.null(title), paste0("title=\"", title,
"\""), ""),
paste0("data-content=\"", content, "\"")
)
class(popover_options) <- "ke_popover"
return(popover_options)
}
|
d7558ccaadb8a8c73a4e3dbcdd8d7a622189f673
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/lmeNB/examples/rNBME_R.Rd.R
|
502092bf29fa6839c4ede98b8a4ac4cf030a1286
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 336
|
r
|
rNBME_R.Rd.R
|
library(lmeNB)
### Name: rNBME.R
### Title: Simulate a dataset from the negative binomial mixed-effect
### independent/AR(1) model
### Aliases: rNBME.R
### Keywords: ~kwd1 ~kwd2
### ** Examples
## Not run:
##D ## See the examples in help files of fitParaIND, fitParaAR1, fitSemiIND, fitSemiAR1 and lmeNB
##D
## End(Not run)
|
6fa69e694eff2247a9ffe4afaef6a36ac82709a6
|
809d5d299026e52cebcc301860717194b191ab1f
|
/R/read_image.R
|
a003f7375f99ae80abb8991860eddd7703d0f5b7
|
[] |
no_license
|
drighelli/spatialLIBD
|
5145b6ad07a01319b798c2de6857b27aea414b46
|
8900f1f41e28ef6bdc604cedc15c184cff08f80a
|
refs/heads/master
| 2023-01-02T09:48:28.224346
| 2020-10-26T22:33:31
| 2020-10-26T22:33:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,249
|
r
|
read_image.R
|
#' Read image
#'
#' Helper function for `geom_plot()` that was needed in order to complete
#' `sce_to_ve()`. It generates the `grid::rasterGrob()` data needed by
#' `geom_plot()` that was previously included in the output from
#' `fetch_data("sce")`.
#'
#' @param ve A
#' [VisiumExperiment-class][SpatialExperiment::VisiumExperiment-class] object
#' created, such as one created by `sce_to_ve()`.
#' @param sample_id A `character(1)` specifying the sample ID to work with.
#'
#' @importFrom readbitmap read.bitmap
#' @importFrom grid rasterGrob
#' @importFrom tibble tibble
#' @export
#' @family VisiumExperiment-related functions
#' @return A `tibble::tible()` with a `grob` column that is a `list()` with a
#' `grid::rasterGrob()` object.
#' @author
#' Brenda Pardo, Leonardo Collado-Torres
read_image <- function(ve, sample_id = names(imagePaths(ve))[1]) {
## Check inputs
stopifnot(sample_id %in% names(imagePaths(ve)))
## Read image
img <- readbitmap::read.bitmap(imagePaths(ve)[as.character(sample_id)])
## Create raster
grob <- grid::rasterGrob(img, width = unit(1, "npc"), height = unit(1, "npc"))
## Create tibble for ggplot2
tibble_image <- tibble::tibble(grob = list(grob))
return(tibble_image)
}
|
e0c95803b0f8d1a5fb1d294bcae4129a00c2bbc8
|
dc83f2c0b574499c3bc7116d90d5dcc5694d0665
|
/ALY6020_Arvind_Pawar_HW03.R
|
52bf0c318a43ff67f80a364ac0eb31d370769b2e
|
[] |
no_license
|
ArvindPawar08/Machine-Learning-Projects
|
7d8933424eaddaea0d44f115ad9640e50a7e6b03
|
48686f9d0a95b2da7672a44a64cf68ba5ae54fad
|
refs/heads/master
| 2022-01-17T20:42:23.492846
| 2019-07-17T13:52:38
| 2019-07-17T13:52:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,324
|
r
|
ALY6020_Arvind_Pawar_HW03.R
|
library(caret) # an aggregator package for performing many machine learning models
library(rpart)
#sequence generation
x <- seq(-10, 10, 0.01)
y <- sin(x) + 5*cos(2*x) - 3*sin(3*x) + (1-exp(-x/3) + 25)
df <- as.data.frame(x)
df[,2] <- as.data.frame(y)
head(df)
#let us check the generated values
df
dim(df)
tail(df)
plot(df$x, df$y, type="line", col="red") #plotting the original function
s <- c() #Creating empty list
alpha = 0.1 #setting the alpha value
tree <- rpart(df$y~df$x, data = df) #creating first tree
df$pred <- predict(tree, data = df$x) #first prediction
p<-df$pred #storing the first prediction in variable p
abs_res <- sum(abs(df$y - df$pred)) #calculating sum of residual
s <- c(s,abs_res) #vector to save error values
p2 <- data.frame() #creating empty list
#Implementing Gradient Boosting
while(abs_res > 10){
df$res <- df$y - df$pred #calculating residuals
tree1 <- rpart(df$res~df$x, data = df, control = rpart.control(cp = 0.00000001)) #using above calculated residuals for training the model(growing new trees)
df$pred1 <- predict(tree1, data = df$x) #new predictions
df$pred <- df$pred + alpha*df$pred1 #adding new predictions to previous model using learning rate to improve the model
abs_res<-sum(abs(df$y - df$pred)) #Calculating sum of residuals by taking sum of substituting updated predicted values from our actual observed values
s <- c(s, abs_res) #creating vector for storing absolute error values in each iteration
p2 <- rbind(p2,df$pred) #storing predicted values in each iteration so that I can access values of any iteration while visualization
}
s
plot(df$x, df$y, type = 'line', col="Red", lwd=4, xlab = "x", ylab = "y") #plotting the original function
lines(df$x, p2[5,], col="purple", lwd=3) #plotting the different level (iteration) predicted values
lines(df$x, p2[550,], col="black", lwd=2) #plotting the different level (iteration) predicted values
#legend and title of the plot
legend("bottomright", legend=c("Original Function","First Accuracy", "Accuracy at Different Level"), lwd=c(2,2), col=c("red","green", "purple"), cex = 0.60)
title(main='Original Function and Different Levels of Accuracies', cex.main = 1, font.main= 4, col.main= "black", outer = FALSE)
|
a5cc301c9f7fdeb91e653ca299aa1e055387c43a
|
94e33777e055bf71f867bd158d25f76d75e01bcc
|
/R/read_DetectorPositionData.R
|
a0a2d4d170530dc4698908298f3413aeaa0e3f11
|
[] |
no_license
|
norgardp/psd4
|
305bd8094815dca6cf55f276e5b6d37f29ecb82c
|
5b8a291c6b4e072d463876b4e88d771256e98783
|
refs/heads/master
| 2022-09-24T15:13:36.762092
| 2020-06-03T20:25:59
| 2020-06-03T20:25:59
| 269,189,048
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 199
|
r
|
read_DetectorPositionData.R
|
read_DetectorPositionData = function(dvalue)
{
local_data = read.table(dvalue, header=FALSE, skip=9);
retval = list();
retval[['data']]= cbind(local_data[,1], local_data[,5]);
return(retval);
}
|
07ec57ca604cad1ea5a770d5fdb90920cd86f374
|
af60ff0521de2e108fb12d7a236b22c3d9d500bb
|
/kormaps.R
|
a620ee8156c6652ffa7d1725013bc0e9802588aa
|
[
"MIT"
] |
permissive
|
gusdyd98/parm_crawl
|
3c856e90cd85b45ef2e38f1ac36fbd96ea61351d
|
a76a8e14a35353bb780acd193adb1688e21c6a02
|
refs/heads/master
| 2020-09-04T22:26:32.736262
| 2019-11-06T14:01:09
| 2019-11-06T14:01:09
| 219,908,851
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 276
|
r
|
kormaps.R
|
#install.packages("devtools")
#devtools::install_github("cardiomoon/kormaps2014")
#devtools::install_github("cardiomoon/moonBook2")
library(kormaps2014)
library(moonBook2)
tt<-kormap1
head(tt)
gg<-kormap2
head(gg)
dd<-kormap3
head(dd)
str(dd)
write.csv(dd,"kormap3.csv")
|
c188419f7c733b8aabc8bd03cfd8da8f78f9656e
|
7e817804f78f35c6fb72a43f22a012b84b6077a5
|
/data/Wk1_Quiz1_Codes.R
|
72b9eaf28d7a9fe26c1058a1dfd70f7ac7558a5f
|
[] |
no_license
|
sebollin/GettingAndCleaningData2015
|
27c09144a596bb6c2e9786fd8c0feefc7a029763
|
2c66ac3015d642ac8b4fac898fb45d5e1025e47f
|
refs/heads/master
| 2021-05-14T13:47:37.382888
| 2015-01-15T18:21:52
| 2015-01-15T18:21:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,003
|
r
|
Wk1_Quiz1_Codes.R
|
WK1DT=read.table("getdata-data-ss06hid.csv", sep=",", header=T)
VALVECTOR=WK1DT$VAL
names(VALVECTOR)
head(VALVECTOR)
table(VALVECTOR)
library(xlsx)
DT=read.xlsx("getdata-data-DATA.gov_NGAP.xlsx", sheetIndex=1, header=T)
head(DT)
colIndex=7:15
rowIndex=18:23
dat=read.xlsx("getdata-data-DATA.gov_NGAP.xlsx", sheetIndex=1, colIndex=colIndex, rowIndex=rowIndex)
sum(dat$Zip*dat$Ext,na.rm=T)
library(XML)
library(RCurl)
file_url="https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2Frestaurants.xml"
xData <- getURL(file_url, ssl.verifyPeer=FALSE)
document=xmlTreeParse(xData,useInternal=T)
root_node=xmlRoot(document)
names(root_node)
root_node[[1]]
root_node[[1]][[1]]
V=xpathSApply(root_node, "//zipcode", xmlValue)
head(V)
V2=V[V=="21231"]
str(V2)
summary(V2)
DT=read.table("getdata-data-ss06pid.csv", sep=",", header=T)
mean(DT$pwgtp15,by=DT$SEX)
system.time(mean(DT$pwgtp15,by=DT$SEX))
rowMeans(DT)[DT$SEX==1]; rowMeans(DT)[DT$SEX==2]
sapply(split(DT$pwgtp15,DT$SEX),mean)
mean(sapply)
|
09a5406c323e394ea77e888d7a83ac23a0852905
|
a399eaebe915b3c0b024bdaa3beb7404c50d39ff
|
/plot5.R
|
058533d01fdaa0c30ff16a216f55e467bfd32c32
|
[] |
no_license
|
VoytechM/ExData_Plotting1
|
1f302cd34de996329d8c2bc1290713b9f3d9a68d
|
05882d4fb38ee7ee7a267418e1676e6c63eaa190
|
refs/heads/master
| 2021-05-29T00:59:47.249707
| 2014-07-22T21:45:40
| 2014-07-22T21:45:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 702
|
r
|
plot5.R
|
plot5 <- function() {
if(!file.exists("summarySCC_PM25.rds")) {
url <- 'https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2FNEI_data.zip'
filename <- 'exdata%2Fdata%2FNEI_data.zip'
download.file(url, filename, mode ='wb')
# extract files
unzip(filename)
}
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
NEI$year = as.factor(NEI$year)
y = aggregate(NEI$Emissions, by=list(Category=NEI$year), FUN=sum)
png(filename="plot5.png", width = 480, height = 480, bg = "transparent")
plot(y, ylab="Pollution Levels", xlab="United States")
yr = nrow(y)
segments(1, y[1,2], x1=yr, y1=y[yr,2], col = "red", lwd=2)
dev.off()
}
|
9d2009a6abbb9fb1a048a42d06d9438b4bd88c0e
|
158461b7ef6b7bf09132aa8e438a314a976d00af
|
/man/plot.CAvariants.Rd
|
f8a0a969cf59916453b20a0401a537ea126818a4
|
[] |
no_license
|
jennycombariza/CAvariants
|
f14cb7fb9004a31704fd726ce2a453884b5362a7
|
b9127086aba03b33ffda39cfb99217ab9f5343ae
|
refs/heads/master
| 2020-03-27T20:53:25.817067
| 2017-02-27T08:58:06
| 2017-02-27T08:58:06
| 147,100,440
| 0
| 1
| null | 2018-09-02T16:01:10
| 2018-09-02T16:01:10
| null |
UTF-8
|
R
| false
| false
| 6,972
|
rd
|
plot.CAvariants.Rd
|
\name{plot.CAvariants}
\alias{plot.CAvariants}
\title{
Main plot function
}
\description{
This function allows the analyst to produce the suitable graphical displays with respect to the six variants of correspondence analysis.
In particular when \code{plottype = "classic"}, it produces classical graphical displays for \code{catype = "CA"} and \code{catype = "NSCA"},
where the row and column variables are graphically depicted in principal coordinates.\cr
When we set \code{plottype = "biplot"}, it produces biplot graphical displays, or polynomial biplots in case of ordered analysis.
Notice that for ordered analysis only polynomial biplots are suitable. In particular, for the singly ordered variants only row isometric polynomial biplots make sense,
as we assume that the ordered variable is the column variable (the column coordinates are standard polynomial coordinates
and the row coordinates are principal polynomial coordinates).
When the input parameter \code{catype} is equal to \code{catype = "SOCA"},
\code{catype = "DOCA"}, \code{catype = "SONSCA"} or \code{catype = "DONSCA"}, then the input parameter
\code{plottype} should be equal to \code{plottype = "biplot"}, if \code{biptype = "row"} , it will give back a row isometric polynomial biplot.
}
\usage{
\S3method{plot}{CAvariants}(x, firstaxis = 1, lastaxis = 2, cex = 0.8, cex.lab = 0.8,
prop = 1, plottype = "biplot", biptype = "row",
scaleplot = 1, posleg = "topleft", pos = 2, ell = FALSE, Mell = x$Mell, alpha = 0.05,\dots)
}
\arguments{
\item{x}{
The name of the output object, for example say \code{res}, used with the main function \code{CAvariants}.
}
\item{firstaxis}{
The horizontal polynomial or principal axis, \code{firstaxis}. By default, \code{firstaxis = 1}.
}
\item{lastaxis}{
The vertical polynomial or principal axis, \code{lastaxis}. By default, \code{lastaxis = 2}.
}
\item{cex}{
The size of characters, \code{cex}, displayed on the correspondence plot or biplot. By default, \code{cex = 0.8}.}
\item{cex.lab}{
The parameter \code{cex.lab} that specifies the size of character labels of axes in graphical displays. By default, \code{cex.lab = 0.8}.
}
\item{prop}{
The scaling parameter for specifying the limits of the plotting area. By default, \code{prop = 1}.
}
\item{plottype}{
The type of graphical display required (either a classical correspondence plot or a biplot).
The user can look at a classical correspondence plot by defining the input
parameter \code{plottype = "classic"}.
When \code{plottype = "biplot"}, it produces biplot graphical displays, or polynomial biplots in case of an ordered analysis.
Note that for ordered analysis only polynomial biplots are suitable. In particular for the singly ordered variants,
only row isometric polynomial biplots make sense, as we assume that the ordered variable is the column variable
(the column coordinates are standard polynomial coordinates and the row coordinates are principal polynomial coordinates).
By default, \code{plottype = "biplot"}.
}
\item{biptype}{
For a biplot, one may specify that it be a row-isometric biplot (\code{biptype = "row"}) or a column-isometric biplot (\code{biptype = "column"}).
This feature is available for the nominal symmetrical and the non symmetrical correspondence analyses.
By default, a row-isometric biplot, \code{biptype = "row"}, is produced.
}
\item{scaleplot}{
The parameter for scaling the biplot coordinates, \code{scaleplot}, originally proposed in Section 2.3.1 of Gower et al. (2011) and
described on page 135 of Beh and Lombardo (2014). By default, \code{scaleplot = 1}.
}
\item{posleg}{
The parameter \code{posleg} for specifying the position of the legend when portraying trends of ordered categories
in ordered variants of correspondence analysis.
By default, \code{posleg = "topleft"}. }
\item{pos}{
The parameter for specifying the position of point symbols in the graphical displays.
By default, \code{pos = 2}.}
\item{ell}{ The logical parameter, \code{ell} which specifies whether algebraic confidence ellipses are to be included in the plot or not.
Setting the input parameter to \code{ell = TRUE} will allow the user to assess the statistical significance of each category to
the association between the variables. The ellipses will be included when the plot is constructed using principal coordinates
(being either row and column principal coordinates or row and column principal polynomial coordinates).
By default, this input parameter is set to \code{ell = FALSE}. See also the input parameter \code{ellcomp}
of the function \code{CAvariants}
for a description of the numeric characteristics of the confidence ellipses (eccentricity, area, etc.), as well as the
input parameter \code{ellprint} of the function \code{print.CAvariants} for getting a print of these parameters.
}
\item{Mell}{
The number of axes \code{Mell} considered when portraying the elliptical confidence regions. \cr
By default, it is equal to \code{Mell = min(nrow(Xtable), ncol(Xtable)) - 1}, i.e. the rank of the data matrix.
This parameter is identical to the input parameter \code{Mell} of the function \code{CAvariants}.
}
\item{alpha}{
The confidence level of the elliptical regions. By default, \code{alpha = 0.05}.}
\item{\dots}{Further arguments passed to or from other methods.}
}
\details{
It produces classical and biplot graphical displays. Further when \code{catype} is equal to \cr
\code{"DOCA", "SOCA", "DONSCA"} or \code{"SONSCA"},
the trend of row and column variables after the reconstruction of column profiles by polynomials is portrayed.\cr
For classical biplot displays, it superimposes on it algebraic ellipses of confidence. It uses the secondary plot functions \code{caellipse} or
\code{nscaellipse}, depending on the input parameter \code{catype}. }
\references{
Beh EJ and Lombardo R 2014 Correspondence Analysis: Theory, Practice and New Strategies. John Wiley & Sons.
Gower J, Lubbe S, and le Roux, N 2011 Understanding Biplots. John Wiley & Sons.
Lombardo R Beh EJ Variants of Simple Correspondence Analysis. The R Journal (accepted).
}
\author{
Rosaria Lombardo and Eric J Beh
}
\note{
For classical graphical displays, both sets of coordinates are defined using principal coordinates (see Greenacre's terminology).
In biplot graphical displays, one set of coordinates is standard and the other is principal.
When the analysis is ordered, it makes sense only biplot. One set of coordinates consists of standard polynomial coordinates and
the other one is of principal polynomial coordinates.
}
\examples{
data(asbestos)
risasbestos<-CAvariants(asbestos, catype = "DOCA", firstaxis=1, lastaxis=2)
plot(risasbestos, plotype = "biplot", biptype = "row")
plot(risasbestos, plotype = "biplot", biptype = "row", ell = TRUE)
}
\keyword{multivariate}% __ONLY ONE__ keyword per line
|
24ebb0f73e5405037f1ca0e61d90e101f5c5f028
|
ed150ec4c87748ef3bbbf995e325cfdb900ec2e6
|
/server.R
|
d30bb2897b280a55f09f686cdad1da338f283660
|
[] |
no_license
|
jaiganeshp/GARPFRMFixedIncome
|
07684baafde612b8c56b16e01a52b61e5a219289
|
a8e7e6c5721edd3e6fef815f31ac4d58a7353184
|
refs/heads/master
| 2021-01-01T19:16:32.706915
| 2014-11-09T01:59:01
| 2014-11-09T01:59:01
| 23,296,178
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 14,692
|
r
|
server.R
|
# This is the server logic for a Shiny web application.
#server.R for fixedIncome part of GARPFRM package
#Loading the required library files
library(xlsx)
library(shiny)
library(xts)
source("discountFactorArbitrage.R")
source("riskMetricsAndHedges.R")
#Modified the function bondFullPrice already present in bondFullPrice
bondFullPrice<-function(bond, yield, cashFlowPd, t0, t1, currentDate){
compoundPd = bond$m
face = bond$face
couponRate = bond$couponRate
d1 = as.numeric(t1-currentDate)
d2 = as.numeric(t1-t0)
tmp = 0
if (cashFlowPd > 1)
{
for(k in 1:(cashFlowPd-1)){
tmp = tmp + ((couponRate / compoundPd * face) / ((1 + yield/compoundPd)^k))
}
}
# Calculate dirty price based on partial periods formula
dirtyP = (1 / ((1 + yield / compoundPd)^(d1/d2))) * (couponRate / compoundPd * face + tmp + face / ((1 + yield/compoundPd)^(cashFlowPd-1)))
# Calculate accruedInterest
aiDays = as.numeric(currentDate-t0)
couponDays = as.numeric(t1-t0)
ai = couponRate / compoundPd * face * aiDays / couponDays
cleanP = dirtyP - ai
return(list(dirty=dirtyP, clean=cleanP, accruedInterest=ai))
}
#Reading Inbuilt Data Files
bond.data <- readRDS("data/bond_Data.rds")
tnotes <-readRDS("data/tnotes.rds")
###Main Function####
#We can write all the functionalities of the application under the shinyServer function
shinyServer(function(input, output) {
#Function to calculate the discount factor
output$discount.factor<-renderPrint({
# Take a dependency on input$discount.factor_button
if(input$discount.factor_button == 0)
return("Input paramerters are initialized with default values. Please Press the Run Button to Start the Program")
# Use isolate() to avoid dependency on other inputs
isolate({
#Reading Values once the Run button is clicked
bondprice<-as.numeric(unlist(strsplit(input$bondprice,",")))
cr<-as.numeric(unlist(strsplit(input$cr,",")))
ttm<-as.numeric(unlist(strsplit(input$ttm,",")))
#Checking if all the inputs are numeric
if(all(is.na(c(cr,bondprice,ttm))==FALSE)==FALSE)
return("All the inputs must be numeric")
#Checking if the coupon rate is less than 100%
if(!all(cr<100))
return("Please enter a coupon rate less than 100%")
interval<-max(ttm)/length(ttm)
#Checking if the bonds have equally spaced maturity
if(!all(diff(ttm) == interval))
return("Bonds must have equally spaced maturity dates")
#Checking if the Arguments entered are all of equal length
if(length(cr)!=length(bondprice)){
return("Arguments must be of equal length") } else if(length(cr)!=length(ttm)){
return("Arguments must be of equal length")}
cr <-cr/100
#Generating the cash flow matrix
cashflow<-matrix(0,nrow=length(ttm),ncol=length(ttm))
for(i in 1:nrow(cashflow))
{
for(j in 1:ncol(cashflow))
{
if(ttm[i]-j*interval==0)
{
cashflow[i,j]= 100 * (1+ cr[i]/2)
break
}
else
cashflow[i,j]=100*cr[i]/2
}
}
bondprice<-matrix(bondprice,ncol=1)
#Getting the discount curve using Function defined in GARPFRM
DF<-discountFactor(bondprice,cashflow)
#Getting the spot rates curve using Function defined in GARPFRM
spotrates <- spotForwardRates(ttm,DF)
#Returning the output as List
list(BondPrice=round(bondprice,input$digits),CashFlow=round(cashflow,input$digits),
DiscountFactor=round(DF,input$digits),"Spot Rates"=round(spotrates,input$digits))
})
}
)
##Function to Calculate the full bond price####
output$bond.price<-renderPrint({
# Take a dependency on input$price_button
if (input$bond.price_button == 0)
return("Input paramerters are initialized with default values.Please Press the Run Button to Start the Program")
# Use isolate() to avoid dependency on other inputs
isolate({
#Reading input values
#next coupon date
t1<-input$t1
#previous coupon date
t0<-input$t0
#Maturity Date
tm<-input$tm
#Current Date
tn<-input$tn
#Coupon Rate
cr<-input$bcr
#Yield
y<-input$yield
#Checking if Yield and Coupon rate is greated than 100%
if(!all(c(cr,y)<100))
return("Please enter a percentage less than 100")
cr<-cr/100
y<-y/100
#Calculating the cash flow periods
n <- as.numeric(round((tm-t0)/365*2))
bond = bondSpec(face=100, m=2, couponRate = cr)
#Getting the Full Bond Price using function defined in GARPFRM
bp.tn<-bondFullPrice(bond, y, n, t0, t1, tn)
list(dirty=round(bp.tn$dirty,input$digits),clean=round(bp.tn$clean,input$digits),
accruedInterest=round(bp.tn$accruedInterest,input$digits))
})
}
)
#Function to Plot the Bond Price at various times with a constant discout curve####
output$bond.price_plot<-renderPlot({
# Take a dependency on input$goButton
if(input$bond.price_button == 0)
return("")
# Use isolate() to avoid dependency on other inputs
isolate({
#Reading input values
#next coupon date
t1<-input$t1
#previous coupon date
t0<-input$t0
#Maturity Date
tm<-input$tm
#Current Date
tn<-input$tn
#Reading Bond Coupon Rate and Yield
cr<-input$bcr
y<-input$yield
#Checking if Yield and Coupon rate is greated than 100%
if(!all(c(cr,y)<100))
return("Please enter a percentage less than 100")
cr<-cr/100
y<-y/100
#Calculating the cash flow periods
n <- as.numeric(round((tm-t0)/365*2))
bond = bondSpec(face=100, m=2, couponRate = cr)
t1.add<- seq(t1,length=2, by = "6 months")[2]
#Getting the Full Bond Price at different times using function defined in GARPFRM
bp.tn<-bondFullPrice(bond, y, n, t0, t1, tn)
bp.t0<-bondFullPrice(bond, y, n, t0, t1, t0)
bp.t1<-bondFullPrice(bond, y, n, t0, t1, t1)
bp.t1.clean<-bondFullPrice(bond, y, n-1, t1, t1.add, t1)
bp.t1.new<-bondFullPrice(bond, y, n-1, t1, t1.add, t1.add)
#Merging all the prices
price<-cbind(bp.t0,bp.tn,bp.t1,bp.t1.clean,bp.t1.new)
#Getting the clean and dirty prices from the price
dirtyp <- price[1,]
cleanp <- price[2,]
#According to the time to maturity, changing the date vector
if ( t1.add > tm)
{date <- c(t0,tn,t1)} else
{date<-c(t0,tn,t1,t1,t1.add)}
#Changing the y-axis limits
ymin<- min(as.numeric(dirtyp[1:length(date)]),as.numeric(cleanp[1:length(date)]))
ymax<- max(as.numeric(dirtyp[1:length(date)]),as.numeric(cleanp[1:length(date)]))
#Plotting the data
plot(x=date,y=dirtyp[1:length(date)],type="b",xaxt="n", xlab='Settlement Date', ylab="Price"
,ylim = c(ymin, ymax), col= 3, lty = 1, main = "Plot Showing Variation in Price with Constant Discount Curve")
axis(side=1, at=date, labels=format(as.Date(date), '%Y-%m-%d'))
lines(as.Date(date),cleanp[1:length(date)],type="l",lty=2, col = 4)
legend("bottomleft",c("Dirty Price", "Flat Price"),lty=c(1,2),col=c(3,4), bty="n")
})
}
)
###Function to calculate and display Bond Parameters####
output$bond.parameters<-renderPrint({
# Take a dependency on input$goButton
if(input$present.value_button == 0)
return("Input paramerters are initialized with default values. Please Press the Run Button to Start the Program")
# Use isolate() to avoid dependency on other inputs
isolate({
#Reading years to maturity
t <- input$t
#Reading the discount factor
df<-as.numeric(unlist(strsplit(input$df,",")))
#Reading the bond parameters coupon rate
cr<-input$pcr
#Creating a time sequnce for cash flows until maturity
time<-seq(from=0.5,to=t,by=0.5)
#Checking if only numeric values are entered for discount curve
if(all(is.na(df)==FALSE)==FALSE)
return("All the inputs must be numeric")
#Checking if the coupon rate is less than 100%
if(cr>100)
return("Coupon Rate must be less than 100%")
#Checking if the length of discount curve is greater than sequence of cashflows
if(length(df)<length(time))
return("Discount Curve Should be longer than time to maturiy")
df<-df[1:length(time)]
cr<-cr/100
#Calculating the bondparametneres using the functions defined in GARPFRM R package
bond = bondSpec(time,face=100, m=2, couponRate = cr)
price = bondPrice(bond,df)
ytm = bondYTM(bond,df)
duration=bondDuration(bond,df)
convexity=bondConvexity(bond,df)
mduration = duration/(1+ytm/2)
#Giving output as a list
list(BondPrice=round(price,input$digits),YTM=round(ytm,input$digits),
MacaulayDuration=round(duration,input$digits),ModifiedDuration=round(mduration,input$digits)
,BondConvexity=round(convexity,input$digits))
})
}
)
###Function to get the data for spot rates and discount curve rates####
getDFSpotrate<-reactive(
{
#Taking the dependencies
input$spot.rate_button
isolate({
#Validating the uploaded dataset
if(input$userip == 'Upload a Dataset')
{
if(input$filetype == "Excel"){
format<-unlist(strsplit(input$file.excel$name,'[.]'))[2]
if(format == 'xlsx' || format == 'xlsm'){
dat<- read.xlsx(file=input$file.excel$datapath, sheetName=input$shname,
header=input$header, as.is=TRUE)
} else {
return ("Incorrect File Format. Please Upload an Excel File.")
}
} else if (input$filetype=="CSV"){
format<-unlist(strsplit(input$file.csv$name,'[.]'))[2]
if(format == 'csv'){
dat <- read.table(file=input$file.csv$datapath, sep=",", header=input$header, as.is=TRUE)
} else {
return ("Incorrect File Format. Please Upload a CSV File.")
}
} else if (input$filetype == "Text"){
format<-unlist(strsplit(input$file.txt$name,'[.]'))[2]
if(format == 'txt'){
dat <- read.table(file=input$file.txt$datapath, sep=input$sep, header=input$header, as.is=TRUE)
} else {
return ("Incorrect File Format. Please Upload a Text File.")
}
} else if (input$filetype=="RData"){
format<-unlist(strsplit(input$file.rdata$name,'[.]'))[2]
if(format == 'RData'){
load(input$file.rdata$datapath,newEnv <- new.env())
dat<-get(unlist(ls(newEnv)),envir=newEnv)
} else {
return ("Incorrect File Format. Please Upload a RData File.")
}
}
#Taking the column names
dat.names<- colnames(dat)
#Checking if the header names are present in the files
if(!is.element("IssueDate",dat.names) || !is.element("MaturityDate",dat.names)
|| !is.element("Coupon",dat.names) || !is.element("Ask",dat.names)
|| !is.element("Bid",dat.names) )
return ("Please Check the header names In the file.")
#Checking the maturity date format
if(any(is.na(as.Date(as.character(dat[,"MaturityDate"])))))
return ("Maturity Date Column is not properly formatted")
dat[,"MaturityDate"]<-as.Date(dat[,"MaturityDate"])
step.size = as.numeric(round(diff(dat[,"MaturityDate"])/365,1))[1]
#Checking the length bewtween maturity date
if (!all(as.numeric(round(diff(dat[,"MaturityDate"])/365,1))==step.size )){
return ("Maturity Dates must be equall spaced")}
} else{
switch(input$dataset,"T-Notes & Bonds" = dat<-bond.data,
"T-Notes" = dat<-tnotes)
}
#Taking the number of rows of the data and generating the cash flow matrix
n = nrow(dat)
CF = matrix(0, nrow = n, ncol = n)
for(i in 1:n)
{
CF[i, 1:i] = dat[i,"Coupon"]/2
}
diag(CF) = rep(100, n) + diag(CF)
#Extracting th discount factor
DF<-discountFactor(as.matrix((dat[,"Bid"]+dat["Ask"])/2),CF)
#Merging a discount factor of 1 to to account for discount factor at present time
DF <- c(1,DF)
step.size = as.numeric(round(diff(dat[,"MaturityDate"])/365,1))[1]
time = seq(from=0,by=step.size,length=n+1)
#Calculating the spot and forward rates
rates = spotForwardRates(time,DF)
#Giving output as a list
data<-list(dat=dat,DF=DF,rates=rates,time=time)
})
})
####Function to display spot rates and forward rates####
output$spot.rate<-renderPrint({
# Take a dependency on input$spot.rate_button
if(input$spot.rate_button == 0)
return("Please select Initial dataset or upload your own dataset. Then Press the Run Button to Start the Program")
# Use isolate() to avoid dependency on other inputs
isolate({
#Getting the values from the function
data<-getDFSpotrate()
#If the return is a list then it will be displayed
#otherwise error message will be displayed
if(is.list(data)){
list(DsicountFactor= round(as.vector(data$DF),input$digits)
,SpotRates=round(data$rates[,1],input$digits),ForwardRates=round(data$rates[,2],input$digits))
} else {
data
}
})
}
)
##Function to plot the spot rate and the discount curve####
output$spot.rate_plot<-renderPlot({
# Take a dependency on input$spot.rate_button
if (input$spot.rate_button == 0)
return("")
# Use isolate() to avoid dependency on other inputs
isolate({
#Getting date to plot the spot rate and discount curve
data<-getDFSpotrate()
if(is.list(data))
{
#Plotting spot rate and discount curve
par(mfrow=c(1,2))
plot(data$time,data$rates[,1],type = "b",xlab = "Maturity (In Years)",ylab="Rate", main = "Spot Rates")
plot(data$time,data$DF,type = "b",xlab = "Maturity (In Years)",ylab="Rate", main = "Discount Factors")
par(mfrow=c(1,1))
}
})
}
)
#Function to Print the table of DataSet
output$data.table<-renderDataTable({
# Take a dependency on input$spot.rate_button
if(input$spot.rate_button == 0)
return("")
# Use isolate() to avoid dependency on other inputs
isolate({
#Getting date to plot the spot rate and discount curve
data<-getDFSpotrate()
if(is.list(data))
{
data$dat
}
})
}, options = list(lengthMenu = c(10,15,20,25), pageLength = 10)
)
})#shinyserver
|
064a0230c4ae2d40e3133ad114f45d4c1937c1e4
|
7cfc0077f680bfaf3f059dea77b8318c84404d22
|
/R/helpFunctions.R
|
60fee48d5e6a7b3b5ffaf4db105e8d3151842202
|
[] |
no_license
|
fischuu/hoardeR
|
0bd32b223aa9187e0eacae67eb894f51cf15865b
|
5f2f57edb871f4f69de0ae780a274079b23fe22d
|
refs/heads/master
| 2023-04-28T17:55:40.880308
| 2023-04-14T05:55:47
| 2023-04-14T05:55:47
| 23,508,670
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 871
|
r
|
helpFunctions.R
|
timeToSec <- function(x){
temp <- strsplit(x,":")
hours <- as.numeric(temp[[1]][1])
minutes <- as.numeric(temp[[1]][2])
seconds <- as.numeric(temp[[1]][3])
totalSec <- seconds + minutes * 60 + hours * 3600
totalSec
}
secToTime <- function(x){
hours <- x%/%3600
x <- x-hours*3600
minutes <- x%/%60
x <- x-minutes*60
seconds <- floor(x)
hours <- as.character(hours)
minutes <- as.character(minutes)
seconds <- as.character(seconds)
if(nchar(hours)==1) hours <- paste("0",hours,sep="")
if(nchar(minutes)==1) minutes <- paste("0",minutes,sep="")
if(nchar(seconds)==1) seconds <- paste("0",seconds,sep="")
paste(hours,minutes,seconds,sep=":")
}
timeStat <- function(x){
timeInSec <- c()
for(i in 1:length(x)){
timeInSec[i] <- timeToSec(x[i])
}
avgTime <- round(sum(timeInSec)/length(timeInSec))
secToTime(avgTime)
}
|
d8cfe591d48601923a271abe78da4078befadc9b
|
cfb14029f442727b0e94e773a4262dfd1966da61
|
/v7_mers_model_stoch_exp.R
|
5908f46bc08066ea5af2ea00914739fa35c079b6
|
[] |
no_license
|
karissawhiting/mers_model
|
4465c6606d92d2af4280d01673fe54051097a993
|
e69b980bad4b036d7517499db2992a64b6c4b18c
|
refs/heads/master
| 2021-05-03T12:08:32.601631
| 2018-06-20T18:05:53
| 2018-06-20T18:05:53
| 120,495,377
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,259
|
r
|
v7_mers_model_stoch_exp.R
|
library(tidyverse)
library('fitR')
library(reshape2)
library('coda')
library(lattice)
library(MCMCvis)
library(wesanderson)
library(deSolve)
library(adaptivetau)
#Data Clean -----------------------------
load("./data-raw/mers_times.RData")
mers_times2<- melt(mers_times[,2:4])
epi <- dcast(mers_times2, value ~ variable, length) %>%
rename("times" = value, "exp" = exp_date, "onset" = onset_date, "conf" = conf_date) %>%
mutate(times = as.numeric(times))
times = data.frame(times = 1:56)
epi = full_join(times, epi, by = "times")
epi[is.na(epi)] <- 0
epi<- epi[1:56,]
rm(mers_times, mers_times2, times)
save(epi, file = "./data/epi.RData")
# Coding Model Expanded -----------------------------
SIR$name <- c("SEIC model")
SIR$state.names <- c("S", "E", "I", "C", "Exp", "Inc", "Con")
SIR$theta.names <- c("beta", "L", "D1")
SIR$simulate <- function (theta, init.state, times)
{
SIR_transitions <- list(c(S = -1, E = 1, Exp = 1), c(E = -1, I = 1, Inc = 1),
c(I = -1, C = 1, Con = 1))
SIR_rateFunc <- function(state, theta, t) {
beta <- theta[["beta"]]
L <- theta[["L"]]
D1<- theta[["D1"]]
S <- state[["S"]]
E <- state[["E"]]
I <- state[["I"]]
C <- state[["C"]]
Exp <- state[["Exp"]]
Inc <- state[["Inc"]]
Con <- state[["Con"]]
N <- S + E + I + C
return(c(beta * S * I/N, E/L, I/D1))
}
# put incidence at 0 in init.state
init.state["Exp"] <- 1
init.state["Inc"] <- 1
init.state["Con"] <- 1
traj <- simulateModelStochastic(theta, init.state, times,
SIR_transitions, SIR_rateFunc)
traj$Inc <- c(0, diff(traj$Inc))
return(traj)
}
theta<- c(beta = .99, L = 8.19 , D1 = 4.05, w = .09)
init.state <- c(S = 10000, E = 0, I = 1, C = 0)
times <- 1:100
sim<- SIR$simulate(theta, init.state, times)
$rPointObs
function (model.point, theta)
{
obs.point <- rpois(n = 1, lambda = theta[["rho"]] * model.point[["Inc"]])
return(c(obs = obs.point))
}
$dprior
function (theta, log = FALSE)
{
log.prior.R0 <- dunif(theta[["R0"]], min = 1, max = 50, log = TRUE)
log.prior.latent.period <- dunif(theta[["D_lat"]], min = 0,
max = 10, log = TRUE)
log.prior.infectious.period <- dunif(theta[["D_inf"]], min = 0,
max = 15, log = TRUE)
log.prior.temporary.immune.period <- dunif(theta[["D_imm"]],
min = 0, max = 50, log = TRUE)
log.prior.probability.long.term.immunity <- dunif(theta[["alpha"]],
min = 0, max = 1, log = TRUE)
log.prior.reporting.rate <- dunif(theta[["rho"]], min = 0,
max = 1, log = TRUE)
log.sum = log.prior.R0 + log.prior.latent.period + log.prior.infectious.period +
log.prior.temporary.immune.period + log.prior.probability.long.term.immunity +
log.prior.reporting.rate
return(ifelse(log, log.sum, exp(log.sum)))
}
$dPointObs
function (data.point, model.point, theta, log = FALSE)
{
return(dpois(x = data.point[["obs"]], lambda = theta[["rho"]] *
model.point[["Inc"]], log = log))
}
attr(,"class")
[1] "fitmodel"
|
c45560e4eca04eb8f83ac95c88e33e8269cbd874
|
519e300a10436ea199ca199dfb91c844e2432ede
|
/man/genomatrix.Rd
|
2208e4c2363464d48c26a6be2f321e6f98afa435
|
[] |
no_license
|
pappewaio/AllelicImbalance
|
6c558fce11f0cb041d8f0402260fb21b51e7cab4
|
bbf1df1c789b46894b92181c434e8320bd10e2b1
|
refs/heads/master
| 2020-04-10T21:12:47.139342
| 2020-04-06T20:24:23
| 2020-04-06T20:24:23
| 16,422,421
| 3
| 2
| null | 2015-03-10T21:58:16
| 2014-01-31T22:18:27
|
R
|
UTF-8
|
R
| false
| true
| 393
|
rd
|
genomatrix.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AllelicImbalance-package.R
\docType{data}
\name{genomatrix}
\alias{genomatrix}
\title{genomatrix object}
\description{
genomatrix is an example of a matrix with genotypes
}
\examples{
##load eample data (Not Run)
#data(genomatrix)
}
\author{
Jesper R. Gadin, Lasse Folkersen
}
\keyword{data}
\keyword{genotype}
|
d1e804919bf9293b905ed4e5b800db73b426720f
|
2f8b37ecd5dfefd44304002bde4c9aa996a35442
|
/magisterskie/5_rok/2_semestr/warsztaty_jaroszewicz/weterani/kody/ostatecznie.R
|
5358adb84cf95651a4b8e1bb388e9bc19a6d7771
|
[] |
no_license
|
sommermarta/studia
|
7def3caec6badab276a400022eb332e1d81d195e
|
6bf26b27d4130a672a9413a6aab0f1b3631f1ac3
|
refs/heads/master
| 2020-12-31T03:42:00.440497
| 2016-02-27T13:06:37
| 2016-02-27T13:06:37
| 33,592,757
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,482
|
r
|
ostatecznie.R
|
# ostatecznie
ostat <- read.csv("dane\\cup98test_noclass.txt")
ostat2 <- cbind(ostat[,1:470], cos1=NA, cos2=NA, ostat[471:ncol(ostat)])
modyfikacja5 <- function(dane){
dane <- dane[,-c(2,4,232,233,234,236,237,241,242,244,245,246,247,248:252)]
dane
}
ostat3 <- modyfikacja(ostat2)
ostat4 <- modyfikacja3(ostat3)
ostat5 <- modyfikacja2(ostat4)
ostat5 <- modyfikacja5(ostat5)
# hist_lub_bar <- function(i){
# if(is.numeric(ostat5[,i])){
# hist(ostat5[,i])
# } else barplot(table(ostat5[,i]))
# }
#
# num_char_fac <- function(i){
# if(is.numeric(ostat5[,i])){
# "numeric"
# } else if(is.character(ostat5[,i])){
# "character"
# } else "factor"
# }
#
# i <- 18
# hist_lub_bar(i)
# names(ostat5)[i]
# num_char_fac(i)
# head(ostat5[,i])
# table(ostat5[,i])
# sum(is.na(ostat5[,i]))
# mean(ostat5[,i], na.rm=TRUE)
#
# levostat <- numeric(ncol(ostat5))
# for(i in 1:ncol(ostat5)){
# levostat[i] <- length(levels(ostat5[,i]))
# }
#
# levtest <- numeric(ncol(ostat5))
# for(i in 1:ncol(ostat5)){
# levtest[i] <- length(levels(testowy3[,i]))
# }
#
# names(testowy3)[which(levostat!=levtest)]
# testowy3$RFA_3
# ostat5$RFA_3
bayes_pred <- predict(bayes,
newdata=ostat5,
type="class")
zzz <- which(bayes_pred==1)
# g <- file("komu_wyslac_ostat.txt", "w")
# writeLines(as.character(zzz), g)
# close(g)
#
#
# predict(drzewo, newdata=ostat4, type="class")
#
# dim(ostat5)
# length(levels(testowy3[,3]))
|
e9917da43be01afb4bbd7ffa5c1dbdbd826022cd
|
f8e13f883b565f1e7c0ec35dca6d5fd7e2753b19
|
/code/sexual_debut_survival_model.R
|
4dcd6f377161a642534a28f6104c3a3f76b0a1db
|
[] |
no_license
|
kklot/KPsize
|
27d6963b21eeb8ff0d5696d5fd4ac30534c9a0e2
|
a9759c3b2cc059553fa0f428d6a19064e0b84fea
|
refs/heads/master
| 2023-04-23T06:24:40.124999
| 2021-05-10T10:43:16
| 2021-05-10T10:43:16
| 276,559,871
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,718
|
r
|
sexual_debut_survival_model.R
|
rm(list=ls())
require(INLA)
require(R.utils)
require(data.table)
# data
# -------------------------------------------------------------------------
mydat <- fread('data/AFS_full.csv.bz2')
nbmat <- readRDS('data/nbmat.rds')
# included missing data countries to predict
mydat[, .N, ISO_A2]
# survey weight scaled by sample size
mydat[, .(svy_weight = sum(scaled_w), .N), ISO_A2]
# INLA options
#-----------------------------------------------------------------------------
inla.setOption("enable.inla.argument.weights", TRUE)
assign("enable.model.likelihood.loglogisticsurv", TRUE,
envir=INLA:::inla.get.inlaEnv())
c_inla <- list(strategy = "gaussian", int.strategy='eb')
# Model and priors
#-----------------------------------------------------------------------------
c_fami <- list(variant=1, hyper=list(theta=list(param=c(18, 1), prior='loggamma')))
fm <- inla.surv(afs, event) ~ 1 +
f(subrgid, model='iid', hyper=list(prec = list(prior = "pc.prec"))) +
f(cid, model="besag", graph=nbmat, constr=TRUE, adjust.for.con.comp = FALSE, scale.model=TRUE)
inla.setOption('smtp', 'tauc')
# Fit separate model for male and female, each with 10 threads and assumed log
# logistic distribution for the survival time
fit_all_female = inla(fm,
family='loglogisticsurv',
data = mydat[sex==2],
weight = mydat[sex==2, scaled_w],
control.inla = c_inla,
control.family = c_fami,
control.compute = c_comp,
num.threads = 10, verbose=TRUE)
fit_all_male = inla(fm,
family='loglogisticsurv',
data = mydat[sex==1],
weight = mydat[sex==1, scaled_w],
control.inla = c_inla,
control.family = c_fami,
control.compute = c_comp,
num.threads = 10, verbose=TRUE)
|
011c88aee10ece7c76ddbae45c028040fc57f2f2
|
a8c0a97feee7951a66ddc206c9d40a2b05e24e90
|
/quiz1.R
|
62fac4cb981570f3489abd49a74803d17781cbed
|
[] |
no_license
|
vedantmane/SwiftKey-PredictiveTextAnalyis
|
f90d6ed6ae46f0901fd5350dbf82c176b7366753
|
f6963eb2adb55daf4dcca7cec17d45622a322001
|
refs/heads/master
| 2022-11-24T22:16:15.913161
| 2020-07-27T05:11:32
| 2020-07-27T05:11:32
| 279,011,779
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,931
|
r
|
quiz1.R
|
#Data Science Capstone - Week 1 Quiz
#Q1 File Size of en_US.blogs.txt
print(file.size("./final/en_US/en_US.blogs.txt") / (1024 * 1024))
#Q2 Number of Lines in en_US.twitter.txt
read <- readLines("./final/en_US/en_US.twitter.txt", skipNul = TRUE, encoding = "UTF-8")
length(read)
#Q3 Length of longest line in any of the three en_US data sets
readBlogs <- readLines("./final/en_US/en_US.blogs.txt", skipNul = TRUE, encoding = "UTF-8", warn = FALSE)
readNews <- readLines("./final/en_US/en_US.news.txt", skipNul = TRUE, encoding = "UTF-8", warn = FALSE)
readTwitter <- readLines("./final/en_US/en_US.twitter.txt", skipNul = TRUE, encoding = "UTF-8", warn = FALSE)
charsBlogs <- sapply(readBlogs, nchar)
charsNews <- sapply(readNews, nchar)
charsTwitter <- sapply(readTwitter, nchar)
maxBlogs <- max(charsBlogs)
maxNews <- max(charsNews)
maxTwitter <- max(charsTwitter)
data <- data.frame(c("Blogs", "News", "Twitter"), c(maxBlogs, maxNews, maxTwitter))
names(data) <- c("Source", "Max")
row <- which.max(data[,"Max"])
data[row,]
#Q4 Number of lines where the word "love" (all lowercase) occurs
# by the number of lines the word "hate" (all lowercase) occurs
# in the en_US Twitter dataset
loveLines <- sum(grepl(pattern = "love", fixed = TRUE, ignore.case = FALSE, x = readTwitter[1:length(readTwitter)]))
hateLines <- sum(grepl(pattern = "hate", fixed = TRUE, ignore.case = FALSE, x = readTwitter[1:length(readTwitter)]))
loveLines/hateLines
#Q5 Tweet about biostats in the dataset
readTwitter[which(grepl(pattern = "biostats", fixed = TRUE,
ignore.case = FALSE, x = readTwitter[1:length(readTwitter)]))]
#Q6 "A computer once beat me at chess, but it was no match for me at kickboxing" phrase in dataset
phrase <- "A computer once beat me at chess, but it was no match for me at kickboxing"
sum(grepl(pattern = phrase, fixed = TRUE, ignore.case = FALSE, x = readTwitter[1:length(readTwitter)]))
|
18573f32438e5042018d67027bf9f01618492ff0
|
5b345b8a1c60a40853dc67543b4b23635ca52af8
|
/vignettes/Generowanie_danych.R
|
5a76a562d063b347a9221600ceb5b5d96636423a
|
[] |
no_license
|
tzoltak/MLAKdane
|
9dd280e628a1434ef3e0433a7adab8ee6653e258
|
3ff0567b98729648cd54cbb118d55d6bcd5d7bd3
|
refs/heads/master
| 2021-01-12T08:24:27.554590
| 2016-11-14T15:39:47
| 2016-11-14T15:39:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,667
|
r
|
Generowanie_danych.R
|
##########
# Ładujemy pakiety i ustawiamy stałe
# Zmienne "okienkaMin", "okienkaMax" i "okienkaSufiksy" określają możliwe okienka czasowe,
# np. kombinacja wartości -11 z okienkaMin, 0 z okienkaMax i "_m1" z okienkaSufiksy
# oznacza okienko czasowe od 11 miesięcy przed dyplomem do miesiąca dyplomu (włącznie)
# oraz że zmienne wyliczone dla tego okienka dostaną sufiks "_m1".
# Zmienna "okienkaIter" oznacza natomiast, które z możliwych okienek mają zostać wyliczone
devtools::load_all(".")
library(dplyr)
dataMin = '2014-01-01'
dataMax = '2015-09-30' # 2015-09-30/2015-03-31 dla nowych/starych danych
okienkaMin = c(-11, 1, 13, 1)
okienkaMax = c(0, 12, 24, 1000)
okienkaSufiksy = c('_m1', '_p1', '_p2', '')
okienkaIter = c(2, 4)
plikZapisu = 'dane/nowe/nowe'
probka = 0
##########
# Przygotowujemy dane zus, statystyki z BDL przypisane do PNA, dane OPI (zbiór ZDAU), itp.
# pnaPowiaty = polacz_pna_powiaty(przygotuj_pna(), przygotuj_powiaty(), dataMin, dataMax)
pnaPowiaty = przygotuj_pna_powiaty_mb(dataMin, dataMax)
jednostki = przygotuj_jednostki()
zdau = przygotuj_zdau()
if (probka > 0) {
zdau = zdau %>%
sample_n(probka)
}
zdauAbs = zdau %>%
filter_(~typ %in% 'A') %>%
select_('id_zdau')
zus = przygotuj_zus(dataMin, dataMax)
save(zus, file = 'cache/ZUS.RData', compress = TRUE)
# load('cache/ZUS.RData')
##########
# Wyliczamy pomocniczy zbiór utrataEtatu
utrataEtatu = przygotuj_utrata_pracy(zus, dataMax)
save(utrataEtatu, file = 'cache/utrataEtatu.RData', compress = TRUE)
# load('cache/utrataEtatu.RData')
##########
# złączamy dane ZUS z danymi OPI i statystykami powiatów z BDL
baza = polacz_zus_zdau(zus, zdau, pnaPowiaty, dataMin, dataMax)
save(baza, file = 'cache/baza.RData', compress = TRUE)
# load('cache/baza.RData')
miesieczne = agreguj_do_miesiecy(baza, zdau)
save(miesieczne, file = 'cache/miesieczne.RData', compress = TRUE)
# load('cache/miesieczne.RData')
##########
# Wyliczamy zmienne niezależne od okienka czasu (STUDYP*, TP_*, *_K, *_R, itp.)
studyp = oblicz_studyp(zdau)
czas = oblicz_zmienne_czasowe(baza, utrataEtatu)
stale = oblicz_stale(baza, zdau)
##########
# Wyliczamy zmienne w poszczególnych okienkach czasu
for (i in okienkaIter) {
okienkoMin = okienkaMin[i]
okienkoMax = okienkaMax[i]
cat(okienkoMin, '-', okienkoMax)
okienkoMies = oblicz_okienko(miesieczne, okienkoMin, okienkoMax, dataMin, dataMax)
okienkoBaza = oblicz_okienko(baza, okienkoMin, okienkoMax, dataMin, dataMax)
abs = agreguj_do_okresu(okienkoMies)
np = oblicz_pracodawcy(okienkoBaza)
up = oblicz_utrata_etatu(okienkoMies, utrataEtatu)
razem = zdauAbs %>%
full_join(abs) %>%
full_join(np) %>%
full_join(up) %>%
mutate_(len = ~coalesce(as.integer(len), 0L))
stopifnot(
nrow(zdauAbs) == nrow(razem),
length(unique(razem$id_zdau)) == nrow(razem)
)
rm(abs, np, up);gc()
colnames(razem) = sub('^id_zdau.*$', 'id_zdau', paste0(colnames(razem), okienkaSufiksy[i]))
if (okienkoMax == 1000) {
zam0 = oblicz_zamieszkanie(okienkoBaza, jednostki, TRUE)
colnames(zam0) = sub('^id_zdau.*$', 'id_zdau', paste0(colnames(zam0), '0'))
zam1 = oblicz_zamieszkanie(okienkoBaza, jednostki, FALSE)
colnames(zam1) = sub('^id_zdau.*$', 'id_zdau', paste0(colnames(zam1), '1'))
razem = razem %>%
inner_join(zam0) %>%
inner_join(zam1)
rm(zam0, zam1); gc()
}
save(razem, file = paste0('cache/razem', i, '.RData'), compress = TRUE)
rm(razem);gc()
}
##########
# Złączamy wszystko, cośmy policzyli i zapisujemy
wszystko = oblicz_stale_czasowe(zdau, dataMax) %>%
filter_(~typ %in% 'A') %>%
full_join(studyp) %>%
full_join(czas) %>%
full_join(stale) %>%
left_join(przygotuj_kierunki()) %>%
left_join(jednostki %>% select(jednostka_id, jednostka, uczelnianazwa)) %>%
rename_(kierunek = 'kierunek_id', uczelnia = 'uczelnia_id')
for (i in okienkaIter) {
load(paste0('cache/razem', i, '.RData'))
wszystko = full_join(wszystko, razem)
}
stopifnot(
nrow(wszystko) == nrow(zdau %>% filter_(~typ %in% 'A'))
)
colnames(wszystko) = toupper(colnames(wszystko))
save(wszystko, file = paste0(plikZapisu, '.RData'), compress = TRUE)
##########
# Zbiór danych miesięcznych
okienkoMies = oblicz_okienko(miesieczne, -60, 60, dataMin, dataMax) %>%
filter_(~okres >= okres_min & okres <= okres_max) %>%
select_('id_zdau', 'okres', 'if_x_s', 'if_x_stprg', 'wzg_ez_e', 'wzg_ez_z', 'wzg_ryzbez', 'ez_z', 'ez_e', 'if_p', 'if_e', 'if_s') %>%
mutate_(okres = ~okres2data(okres))
names(okienkoMies) = toupper(paste0(names(okienkoMies), '_M'))
save(okienkoMies, file = paste0(plikZapisu, '_mies.RData'), compress = TRUE)
Sys.time()
|
72086fc145865a6838c02c535338eab606d944b3
|
dc197f99632f5d12b1fdb4e9770436eab21f4c00
|
/diff.vs.fatalities.R
|
5a51a6de979a33cb1c48f1f473eb82206a3325ef
|
[] |
no_license
|
caluchko/hurricane-baby-names
|
443d349407ec5eec51e5559f0bcbce3971c4a8d5
|
574c248505cac4b47461e5585e5639193b93a159
|
refs/heads/master
| 2020-05-27T17:50:01.546947
| 2014-06-10T14:08:46
| 2014-06-10T14:08:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,052
|
r
|
diff.vs.fatalities.R
|
# plot hurricane name diff vs fatalities
# excludes hurricane katrina
# to include katrina use 'hurricanes' in place of 'hurricanes2'
# need to source 'babycanes.R' before running this script
library(ggplot2)
hurricanes2 <- filter(hurricanes, Name != "Katrina")
p1 <- ggplot(hurricanes2, aes(x = alldeaths, y = dif))+
geom_point(colour = "blue", alpha = .5, size = 3)+
geom_rug(size = .3)+
stat_smooth(method = lm, colour = "steelblue")+
xlab("Hurricane fatalities")+
ylab("Percent change in baby name usage after hurricane")+
ggtitle("Do changes in baby name popularity correlate with hurricane fatalities?")+
annotate("text", x = 256, y = -45, label = "Camille", size = 4)+
annotate("text", x = 200, y = -15, label = "Dianne", size = 4)+
annotate("text", x = 159, y = -36, label = "Sandy", size = 4)+
#annotate("text", x = 1833, y = -55, label = "Katrina", size = 4)+
geom_hline(yintercept = 0, color = "darkgray")+
theme_bw()
# makes the same plot with a different facet for M and F
p2 <- p1 + facet_wrap(~ Gender_MF)
|
904f31b3a20e3ff30eee1ebca2184c6bed669814
|
b6dfcb8110de60ba4f48e35c97b0e21ac11f43b9
|
/build/0059_build_goyal_data.R
|
9e856896cd13408b220868e483e9199eadec4cbe
|
[] |
no_license
|
wlsnhng/of-dollars-and-data
|
715c10af032f668daadbd604106f43d920357637
|
6a8b052dc54348b868429f8767baef5d36073966
|
refs/heads/master
| 2020-06-13T20:28:56.632529
| 2019-07-01T12:37:58
| 2019-07-01T12:37:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,258
|
r
|
0059_build_goyal_data.R
|
cat("\014") # Clear your console
rm(list = ls()) #clear your environment
########################## Load in header file ######################## #
setwd("~/git/of_dollars_and_data")
source(file.path(paste0(getwd(),"/header.R")))
########################## Load in Libraries ########################## #
library(dplyr)
########################## Start Program Here ######################### #
df <- readRDS(paste0(importdir, "59-goyal-data/58-goyal-stock-bond-data.Rds")) %>%
mutate(char_date = as.character(yyyymm),
date = as.Date(paste0(substring(char_date, 1, 4),
"-",
substring(char_date, 5, 6),
"-01"),
format = "%Y-%m-%d"),
cpi = as.numeric(infl),
stock = (Index/lag(Index) - 1) - cpi,
corp_bond = as.numeric(corpr) - cpi,
rf = (as.numeric(Rfree) - cpi),
lt_bond = as.numeric(ltr) - cpi) %>%
filter(!is.nan(cpi)) %>%
select(date, stock, lt_bond, corp_bond, rf, cpi)
saveRDS(df, paste0(localdir, "59-goyal-stock-bond-data.Rds"))
# ############################ End ################################## #
|
0e994d00f837f22f7d3f31a5c6c3069a4b95ff46
|
c9654746619bee160987af3fb537c15918c3cf14
|
/volume of the sphere.R
|
0e5483575feeeb190e7edda510003752bb318dea
|
[] |
no_license
|
lakhyaraj/DA_LAB_ASSIGNMENT
|
197f033584b3cd5b05ce802448233facf7a9b4cb
|
7897a9948d4465bc65ed5116f0cd0ee18e281413
|
refs/heads/main
| 2023-07-02T19:49:35.425187
| 2021-08-02T05:25:59
| 2021-08-02T05:25:59
| 391,824,961
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 138
|
r
|
volume of the sphere.R
|
Radius<-readline(prompt = "Enter any radius : ");
Radius<-as.double(Radius);
volume<-4/3*3.14*Radius*Radius*Radius
print(paste(volume))
|
964856625bdb837cddca2b023c0a71d0598171cb
|
9e3969324cab013b3e6d6f3b91d592de58c30196
|
/scripts/bmediatR_moderated_mediation.R
|
24d3f2edc37a0ef8aa34bd811301dc6f5cb31464
|
[] |
no_license
|
MadeleineGastonguay/svenson_hf_DO
|
157b04716ebc94d3a8fb8c7da2cba7dad3d1778b
|
6308e51558e3aaf4a651806962e66b921e255bc5
|
refs/heads/main
| 2023-08-22T17:35:05.552279
| 2021-10-18T18:12:45
| 2021-10-18T18:12:45
| 418,558,443
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 68,260
|
r
|
bmediatR_moderated_mediation.R
|
#' Definions and encodings for models included in the moderated mediation analysis through Bayesian model selection
#'
#' This function prints out a table describing the models considered in the moderated mediation analysis.
#'
#' @export
#' @examples moderation_model_info()
moderation_model_info <- function(){
writeLines(c("likelihood models for all hypothesis",
"hypotheses encoded by presence (1) or absence (0) of 'X->m, y->m, X->y' edges on the DAG",
"(*) denotes reverse causation 'm<-y', i denotes moderation",
"H1: '-,0,0' / y does not depend on X or m",
"H2: '-,1,0' / y depends on m but not X",
"H3: '-,0,1' / y depends on X but not m",
"H4: '-,1,1' / y depends on X and m",
"H5: '0,-,-' / m does not depend on X",
"H6: '1,-,-' / m depends on X",
"H7: '0,*,-' / m depends on y but not X",
"H8: '1,*,-' / m depends on X and y",
"H9: 'i,-,-' / m depends on X and is moderated by t",
"H10: 'i,*,-' / m depends on X and y and relation between X and m is moderated by t",
"H11: '0,*i,-' / m depends on y but not X and is moderated by t",
"H12: '1,*i,-' / m depends on X and y and y -> m is moderated by t",
"H13: '-,0,i' / y depends on X but not m and is moderated by t",
"H14: '-,1,i' / y depends on X and m and X->y is moderated by t",
"H15: '-,i,0' / y depends on m but not X and is moderated by t",
"H16: '-,i,1' / y depends on X and m and m -> y is moderated by t",
"all include covariates Z and t",
"",
"combinations of hypotheses for all cases",
"cases encoded by presence (1) or absence (0) of 'X->m, m->y, X->y' edges on the DAG",
"(*) denotes reverse causation 'm<-y' and (i) denotes moderation",
"c1: '0,0,0' / H1 and H5",
"c2: '0,1,0' / H2 and H5",
"c3: '1,0,0' / H1 and H6",
"c4: '1,1,0' / H2 and H6 - complete mediation",
"c5: '0,0,1' / H3 and H5",
"c6: '0,1,1' / H4 and H5",
"c7: '1,0,1' / H3 and H6 - colocalization",
"c8: '1,1,1' / H4 and H6 - partial mediation",
"c9: '0,*,0' / H1 and H7",
"c10: '1,*,0' / H1 and H8",
"c11: '0,*,1' / H3 and H7 - complete med (reactive)",
"c12: '1,*,1' / H3 and H8",
"c13: '0,i,0' / H5 and H15",
"c14: 'i,0,0' / H1 and H9",
"c15: 'i,1,0' / H2 and H9 - moderated mediation A",
"c16: '1,i,0' / H15 and H6 - moderated mediation B",
"c17: '0,0,i' / H13 and H5",
"c18: '0,1,i' / H14 and H5",
"c19: '0,i,1' / H16 and H5",
"c20: '1,0,i' / H13 and H6",
"c21: 'i,0,1' / H3 and H9",
"c22: '1,1,i' / H14 and H6",
"c23: 'i,1,1' / H4 and H9",
"c24: '1,i,1' / H16 and H6",
"c25: '0,*i,0' / H1 and H11",
"c26: 'i,*,0' / H1 and H10",
"c27: '1,*i,0' / H1 and H12",
"c28: '0,*,i' / H13 and H7",
"c29: '0,*i,1' / H3 and H11",
"c30: '1,*,i' / H13 and H8",
"c31: 'i,*,1' / H3 and H10",
"c32: '1,*i,1' / H3 and H12"))
}
#' Summaries of posterior model probabilities function
#'
#' This function takes the log posterior probability of the data (posterior likelihood) for the various models, the log prior model probabilities, and
#' returns log posterior odds
#'
#' @param ln_prob_data Log posterior likelihoods under the various models, returned by bmediatR().
#' @param ln_prior_c Log prior model probabilities. If posterior_summary() is being used for a non-default posterior odds
#' summary, the log prior model probabilities used with bmediatR() are stored in its output.
#' @param c_numerator The index of models to be summed in the numerator of the posterior odds. Models and their order provided with
#' model_summary().
#' @export
#' @examples posterior_summary()
posterior_summary_moderation <- function(ln_prob_data,
ln_prior_c,
c_numerator){
#function to compute log odds from log probabilities
ln_odds <- function(ln_p, numerator){
ln_odds_numerator <- apply(ln_p[,numerator,drop=F], 1, matrixStats::logSumExp)
ln_odds_denominator <- apply(ln_p[,-numerator,drop=F], 1, matrixStats::logSumExp)
ln_odds <- ln_odds_numerator -ln_odds_denominator
}
#ensure c_numerator is a list
if (!is.list(c_numerator)){
c_numerator <- list(c_numerator)
}
#presets for ln_prior_c;
if (ln_prior_c[1]=="complete"){
ln_prior_c <- c(rep(0,8), rep(-Inf,4), rep(0, 12), rep(-Inf, 8))
} else if (ln_prior_c[1]=="partial"){
ln_prior_c <- c(rep(-Inf,4), rep(0,4), rep(-Inf,4), rep(-Inf, 4), rep(0, 8), rep(-Inf,8))
} else if (ln_prior_c[1]=="reactive"){
ln_prior_c <- rep(0,32)
}
#ensure ln_prior_c sum to 1 on probability scale and that it is a matrix
if (is.matrix(ln_prior_c)){
ln_prior_c <- t(apply(ln_prior_c, 1, function(x){x - matrixStats::logSumExp(x)}))
} else {
ln_prior_c <- ln_prior_c - matrixStats::logSumExp(ln_prior_c)
ln_prior_c <- matrix(ln_prior_c, nrow(ln_prob_data), length(ln_prior_c), byrow=T)
}
#compute posterior probabilities for all cases
#cases encoded by presence (1) or absence (0) of 'X->m, m->y, X->y' edges on the DAG
#(*) denotes reverse causation 'm<-y' and (i) denotes moderation
#c1: '0,0,0' / H1 and H5
#c2: '0,1,0' / H2 and H5
#c3: '1,0,0' / H1 and H6
#c4: '1,1,0' / H2 and H6 - complete mediation
#c5: '0,0,1' / H3 and H5
#c6: '0,1,1' / H4 and H5
#c7: '1,0,1' / H3 and H6 - colocalization
#c8: '1,1,1' / H4 and H6 - partial mediation
#c9: '0,*,0' / H1 and H7
#c10: '1,*,0' / H1 and H8
#c11: '0,*,1' / H3 and H7 - Reactive
#c12: '1,*,1' / H3 and H8
#c13: '0,i,0' / H5 and H15
#c14: 'i,0,0' / H1 and H9
#c15: 'i,1,0' / H2 and H9
#c16: '1,i,0' / H15 and H6
#c17: '0,0,i' / H13 and H5
#c18: '0,1,i' / H14 and H5
#c19: '0,i,1' / H16 and H5
#c20: '1,0,i' / H13 and H6
#c21: 'i,0,1' / H3 and H9
#c22: '1,1,i' / H14 and H6
#c23: 'i,1,1' / H4 and H9
#c24: '1,i,1' / H16 and H6
#c25: '0,*i,0' / H1 and H11
#c26: 'i,*,0' / H1 and H10
#c27: '1,*i,0' / H1 and H12
#c28: '0,*,i' / H13 and H7
#c29: '0,*i,1' / H3 and H11
#c30: '1,*,i' / H13 and H8
#c31: 'i,*,1' / H3 and H10
#c32: '1,*i,1' / H3 and H12
ln_post_c <- cbind(ln_prob_data[,1] + ln_prob_data[,5] + ln_prior_c[,1],
ln_prob_data[,2] + ln_prob_data[,5] + ln_prior_c[,2],
ln_prob_data[,1] + ln_prob_data[,6] + ln_prior_c[,3],
ln_prob_data[,2] + ln_prob_data[,6] + ln_prior_c[,4],
ln_prob_data[,3] + ln_prob_data[,5] + ln_prior_c[,5],
ln_prob_data[,4] + ln_prob_data[,5] + ln_prior_c[,6],
ln_prob_data[,3] + ln_prob_data[,6] + ln_prior_c[,7],
ln_prob_data[,4] + ln_prob_data[,6] + ln_prior_c[,8],
ln_prob_data[,1] + ln_prob_data[,7] + ln_prior_c[,9],
ln_prob_data[,1] + ln_prob_data[,8] + ln_prior_c[,10],
ln_prob_data[,3] + ln_prob_data[,7] + ln_prior_c[,11],
ln_prob_data[,3] + ln_prob_data[,8] + ln_prior_c[,12],
ln_prob_data[,5] + ln_prob_data[,15] + ln_prior_c[,13],
ln_prob_data[,1] + ln_prob_data[,9] + ln_prior_c[,14],
ln_prob_data[,2] + ln_prob_data[,9] + ln_prior_c[,15],
ln_prob_data[,15] + ln_prob_data[,6] + ln_prior_c[,16],
ln_prob_data[,13] + ln_prob_data[,5] + ln_prior_c[,17],
ln_prob_data[,14] + ln_prob_data[,5] + ln_prior_c[,18],
ln_prob_data[,16] + ln_prob_data[,5] + ln_prior_c[,19],
ln_prob_data[,13] + ln_prob_data[,6] + ln_prior_c[,20],
ln_prob_data[,3] + ln_prob_data[,9] + ln_prior_c[,21],
ln_prob_data[,14] + ln_prob_data[,6] + ln_prior_c[,22],
ln_prob_data[,4] + ln_prob_data[,9] + ln_prior_c[,23],
ln_prob_data[,16] + ln_prob_data[,6] + ln_prior_c[,24],
ln_prob_data[,1] + ln_prob_data[,11] + ln_prior_c[,25],
ln_prob_data[,1] + ln_prob_data[,10] + ln_prior_c[,26],
ln_prob_data[,1] + ln_prob_data[,12] + ln_prior_c[,27],
ln_prob_data[,13] + ln_prob_data[,7] + ln_prior_c[,28],
ln_prob_data[,3] + ln_prob_data[,11] + ln_prior_c[,29],
ln_prob_data[,13] + ln_prob_data[,8] + ln_prior_c[,30],
ln_prob_data[,3] + ln_prob_data[,10] + ln_prior_c[,31],
ln_prob_data[,3] + ln_prob_data[,12] + ln_prior_c[,32]
)
ln_ml <- apply(ln_post_c, 1, matrixStats::logSumExp)
ln_post_c <- ln_post_c - ln_ml
colnames(ln_post_c) <- c("0,0,0",
"0,1,0",
"1,0,0",
"1,1,0",
"0,0,1",
"0,1,1",
"1,0,1",
"1,1,1",
"0,*,0",
"1,*,0",
"0,*,1",
"1,*,1",
"0,i,0",
"i,0,0",
"i,1,0",
"1,i,0",
"0,0,i",
"0,1,i",
"0,i,1",
"1,0,i",
"i,0,1",
"1,1,i",
"i,1,1",
"1,i,1",
"0,*i,0",
"i,*,0",
"1,*i,0",
"0,*,i",
"0,*i,1",
"1,*,i",
"i,*,1",
"1,*i,1")
rownames(ln_post_c) <- rownames(ln_prob_data)
#compute prior odds for each combination of cases
ln_prior_odds <- sapply(c_numerator, ln_odds, ln_p=ln_prior_c)
ln_prior_odds <- matrix(ln_prior_odds, ncol=length(c_numerator))
rownames(ln_prior_odds) <- rownames(ln_post_c)
#compute posterior odds for each combination of cases
ln_post_odds <- sapply(c_numerator, ln_odds, ln_p=ln_post_c)
ln_post_odds <- matrix(ln_post_odds, ncol=length(c_numerator))
rownames(ln_post_odds) <- rownames(ln_post_c)
if (is.null(c_numerator)) {
colnames(ln_post_odds) <- colnames(ln_prior_odds) <- c_numerator
} else {
colnames(ln_post_odds) <- colnames(ln_prior_odds) <- names(c_numerator)
}
#return results
list(ln_post_c=ln_post_c, ln_post_odds=ln_post_odds, ln_prior_odds=ln_prior_odds, ln_ml=ln_ml)
}
#' Column indeces for commonly used posterior odds
#'
#' This helper function returns the columns of the log posterior model probabilities to be summed for
#' commonly desired log posterior odds summaries.
#'
#' @param odds_type The desired posterior odds.
#' @export
#' @examples return_preset_odds_index()
return_preset_odds_index_moderation <- function(odds_type = c("mediation",
"partial",
"complete",
"colocal",
"y_depends_x",
"reactive",
"moderation",
"moderation_a",
"moderation_b",
"moderation_b_reactive",
"moderation_c")) {
presets <- list("mediation" = c(4, 8),
"partial" = 8,
"complete" = 4,
"colocal" = 7,
"y_depends_x" = c(4:8, 11, 12),
"reactive" = 9:12,
"moderation" = 13:32,
"moderation_a" = c(14, 15, 21, 23, 26, 31),
"moderation_b" = c(13, 16, 19, 24),
"moderation_b_reactive" = c(25, 27, 29, 32),
"moderation_c" = c(17, 18, 20, 22, 28, 30)
)
index_list <- presets[odds_type]
index_list
}
## Function to process data and optionally align them
process_data_moderation <- function(y, M, X, t,
Z = NULL, Z_y = NULL, Z_M = NULL,
w = NULL, w_y = NULL, w_M = NULL,
align_data = TRUE,
verbose = TRUE) {
# Ensure y is a vector
if (is.matrix(y)) { y <- y[,1] }
# Ensure X, M, t, Z, Z_y, and Z_M are matrices
X <- as.matrix(X)
M <- as.matrix(M)
t <- as.matrix(t)
if (!is.null(Z)) { Z <- as.matrix(Z) }
if (!is.null(Z_y)) { Z_y <- as.matrix(Z_y) }
if (!is.null(Z_M)) { Z_M <- as.matrix(Z_M) }
# Process covariate matrices
if (is.null(Z_y)) { Z_y <- matrix(NA, length(y), 0); rownames(Z_y) <- names(y) }
if (is.null(Z_M)) { Z_M <- matrix(NA, nrow(M), 0); rownames(Z_M) <- rownames(M) }
if (!is.null(Z)) {
if (align_data) {
Z_y <- cbind(Z, Z_y[rownames(Z),])
Z_M <- cbind(Z, Z_M[rownames(Z),])
} else {
Z_y <- cbind(Z, Z_y)
Z_M <- cbind(Z, Z_M)
}
}
# Process weight vectors
if (is.null(w)) {
if (is.null(w_y)) { w_y <- rep(1, length(y)); names(w_y) <- names(y) }
if (is.null(w_M)) { w_M <- rep(1, nrow(M)); names(w_M) <- rownames(M) }
} else {
w_y <- w_M <- w
}
if (align_data) {
# M and Z_M can have NAs
overlapping_samples <- Reduce(f = intersect, x = list(names(y),
rownames(X),
rownames(Z_y),
names(w_y),
rownames(t)))
if (length(overlapping_samples) == 0 | !any(overlapping_samples %in% unique(c(rownames(M), rownames(Z_M), names(w_M))))) {
stop("No samples overlap. Check rownames of M, X, t, Z (or Z_y and Z_M) and names of y and w (or w_y and w_M).", call. = FALSE)
} else if (verbose) {
writeLines(text = c("Number of overlapping samples:", length(overlapping_samples)))
}
# Ordering
y <- y[overlapping_samples]
M <- M[overlapping_samples,, drop = FALSE]
X <- X[overlapping_samples,, drop = FALSE]
t <- t[overlapping_samples,, drop = FALSE]
Z_y <- Z_y[overlapping_samples,, drop = FALSE]
Z_M <- Z_M[overlapping_samples,, drop = FALSE]
w_y <- w_y[overlapping_samples]
w_M <- w_M[overlapping_samples]
}
# Drop observations with missing y, X, or t and update n
complete_y <- !is.na(y)
complete_X <- !apply(is.na(X), 1, any)
complete_t <- !apply(is.na(X), 1, any)
complete <- complete_y & complete_X & complete_t
y <- y[complete]
M <- M[complete,, drop = FALSE]
X <- X[complete,, drop = FALSE]
t <- t[complete,, drop = FALSE]
Z_y <- Z_y[complete,, drop = FALSE]
Z_M <- Z_M[complete,, drop = FALSE]
w_y <- w_y[complete]
w_M <- w_M[complete]
# Drop columns of Z_y and Z_M that are invariant
Z_y_drop <- which(apply(Z_y, 2, function(x) var(x)) == 0)
Z_M_drop <- which(apply(Z_M, 2, function(x) var(x)) == 0)
if (length(Z_y_drop) > 0) {
if (verbose) {
writeLines(paste("Dropping invariants columns from Z_y:", colnames(Z_y)[Z_y_drop]))
}
Z_y <- Z_y[,-Z_y_drop, drop = FALSE]
}
if (length(Z_M_drop) > 0) {
if (verbose) {
writeLines(paste("Dropping invariants columns from Z_M:", colnames(Z_M)[Z_M_drop]))
}
Z_M <- Z_M[,-Z_M_drop, drop = FALSE]
}
# Return processed data
list(y = y,
M = M,
X = X,
t = t,
Z_y = Z_y, Z_M = Z_M,
w_y = w_y, w_M = w_M)
}
#' Convert preset options to log prior case probabilities function
#'
#' This function takes the log prior case probabilities, and if a preset is provided, converts it into the formal log prior case
#' probability.
#'
#' @param ln_prior_c Log prior case probabilities. If posterior_summary_moderation() is being used for a non-default posterior odds
#' summary, the log prior case probabilities used with bmediatR() are stored in its output.
#' @export
#' @examples return_ln_prior_c_from_presets_moderation()
return_ln_prior_c_from_presets_moderation <- function(ln_prior_c) {
#presets for ln_prior_c;
if (ln_prior_c[1]=="complete"){
ln_prior_c <- c(rep(0,8), rep(-Inf,4), rep(0, 12), rep(-Inf, 8))
} else if (ln_prior_c[1]=="partial"){
ln_prior_c <- c(rep(-Inf,4), rep(0,4), rep(-Inf,4), rep(-Inf, 4), rep(0, 8), rep(-Inf,8))
} else if (ln_prior_c[1]=="reactive"){
ln_prior_c <- rep(0,32)
} else {
ln_prior_c <- ln_prior_c
}
ln_prior_c
}
#' Bayesian model selection for moderated mediation analysis function
#'
#' This function takes an outcome (y), candidate mediators (M), a candidate moderator (t), and a driver as a design matrix (X) to perform a
#' Bayesian model selection analysis for mediation.
#'
#' @param y Vector or single column matrix of an outcome variable. Single outcome variable expected.
#' Names or rownames must match across M, X, t, Z, Z_y, Z_M, w, w_y, and w_M (if provided) when align_data = TRUE. If align_data = FALSE,
#' dimensions and order must match across inputs.
#' @param M Vector or matrix of mediator variables. Multiple mediator variables are supported.
#' Names or rownames must match across y, X, t, Z, Z_y, Z_M, w, w_y, and w_M (if provided) when align_data = TRUE. If align_data = FALSE,
#' dimensions and order must match across inputs.
#' @param X Design matrix of the driver. Names or rownames must match across y, M, t, Z, Z_y, Z_M, w, w_y, and w_M (if provided) when align_data = TRUE.
#' If align_data = FALSE, dimensions and order must match across inputs. One common application is for X to represent genetic information at a QTL,
#' either as founder strain haplotypes or variant genotypes, though X is generalizable to other types of variables.
#' @param t Design matrix of the moderator for the outcome and mediator. Names or rownames must match across y, M, Z, Z_y, Z_M, w, w_y, and w_M (if provided) when align_data = TRUE.
#' If align_data = FALSE, dimensions and order must match across inputs.
#' @param Z DEFAULT: NULL. Design matrix of covariates that influence the outcome and mediator variables.
#' Names or rownames must match to those of y, M, X, t, w, w_y, and w_M (if provided) when align_data = TRUE. If align_data=FALSE,
#' dimensions and order must match across inputs. If Z is provided, it supercedes Z_y and Z_M.
#' @param Z_y DEFAULT: NULL. Design matrix of covariates that influence the outcome variable.
#' Names or rownames must match to those of y, M, X, t, Z_M, w, w_y, and w_M (if provided) when align_data = TRUE. If align_data = FALSE,
#' dimensions and order must match across inputs. If Z is provided, it supercedes Z_y and Z_M.
#' @param Z_M DEFAULT: NULL. Design matrix of covariates that influence the mediator variables.
#' Names or rownames must match across y, M, X, t, Z_y, w, w_y, and w_M (if provided) when align_data = TRUE. If align_data = FALSE,
#' dimensions and order must match across inputs. If Z is provided, it supercedes Z_y and Z_M.
#' @param w DEFAULT: NULL. Vector or single column matrix of weights for individuals in analysis that applies to both
#' y and M. Names must match across y, M, X, t, Z, Z_y, and Z_M (if provided) when align_data = TRUE. If align_data = FALSE,
#' dimensions and order must match across inputs. A common use would be for an analysis of strain means, where w
#' is a vector of the number of individuals per strain. If no w, w_y, or w_M is given, observations are equally weighted as 1s for y and M.
#' If w is provided, it supercedes w_y and w_M.
#' @param w_y DEFAULT: NULL. Vector or single column matrix of weights for individuals in analysis, specific to the measurement
#' of y. Names must match across y, M, X, t, Z, Z_y, Z_M, and w_M (if provided) when align_data = TRUE. If align_data = FALSE,
#' dimensions and order must match across inputs. A common use would be for an analysis of strain means, where y and M
#' are summarized from a different number of individuals per strain. w_y is a vector of the number of individuals per strain used to
#' measure y. If no w_y (or w) is given, observations are equally weighted as 1s for y.
#' @param w_M DEFAULT: NULL. Vector or single column matrix of weights for individuals in analysis, specific to the measurement
#' of M. Names must match across y, M, X, t, Z, Z_y, Z_M, and w_y (if provided) when align_data = TRUE. If align_data = FALSE,
#' dimensions and order must match across inputs. A common use would be for an analysis of strain means, where y and M
#' are summarized from a different number of individuals per strain. w_M is a vector of the number of individuals per strain use to
#' measure M. If no w_M (or w) is given, observations are equally weighted as 1s for M.
#' @param tau_sq_mu DEFAULT: 1000. Variance component for the intercept. The DEFAULT represents a diffuse prior, analagous to
#' a fixed effect term.
#' @param tau_sq_Z DEFAULT: 1000. Variance component for the covariates encoded in Z. The DEFAULT represents a diffuse prior, analagous
#' to fixed effect terms.
#' @param phi_sq DEFAULT: c(1, 1, 1). Each element of (a, b, c) represents one of the relationships being evaluated for mediation,
#' specifically the ratio of signal to noise. a is the effect of X on M, b is the effect of M on y, and c is the effect of X on y.
#' The DEFAULT represents relationships that explain 50% of the variation in the outcome variable.
#' @param ln_prior_c DEFAULT: "complete". The prior log case probabilities. See model_info() for description of likelihoods and their
#' combinations into cases. Simplified pre-set options are available, including "complete", "partial", and "reactive".
#' @param align_data DEFAULT: TRUE. If TRUE, expect vector and matrix inputes to have names and rownames, respectively. The overlapping data
#' will then be aligned, allowing the user to not have to reduce data to overlapping samples and order them.
#' @export
#' @examples bmediatR()
bmediatR_moderation <- function(y, M, X, t,
Z = NULL, Z_y = NULL, Z_M = NULL,
w = NULL, w_y = NULL, w_M = NULL,
kappa = 0.001, lambda = 0.001,
tau_sq_mu = 1000,
tau_sq_Z = 1000,
tau_sq_t = 1000, # Prior for the marginal effect of t
phi_sq = c(1, 1, 1, 1, 1, 1), # prior for edges a, b, c, and the interaction term for edge a, b, and c
ln_prior_c = "complete",
options_X = list(sum_to_zero = TRUE, center = FALSE, scale = FALSE),
options_t = list(sum_to_zero = TRUE, center = FALSE, scale = FALSE),
align_data = TRUE,
verbose = TRUE) {
#presets for ln_prior_c;
ln_prior_c <- return_ln_prior_c_from_presets_moderation(ln_prior_c = ln_prior_c)
#ensure ln_prior_c sum to 1 on probability scale
if (is.matrix(ln_prior_c)){
ln_prior_c <- t(apply(ln_prior_c, 1, function(x){x - matrixStats::logSumExp(x)}))
} else {
ln_prior_c <- ln_prior_c - matrixStats::logSumExp(ln_prior_c)
}
#optionally align data
processed_data <- process_data_moderation(y = y, M = M, X = X, t= t,
Z_y = Z_y, Z_M = Z_M,
w_y = w_y, w_M = w_M,
align_data = align_data,
verbose = verbose)
y <- processed_data$y
M <- processed_data$M
X <- processed_data$X
t <- processed_data$t
Z_y <- processed_data$Z_y
Z_M <- processed_data$Z_M
w_y <- processed_data$w_y
w_M <- processed_data$w_M
#dimension of y
n <- length(y)
#dimension of Z's
p_y <- ncol(Z_y)
p_M <- ncol(Z_M)
#scale y, M, and Z
y <- c(scale(y))
M <- apply(M, 2, scale)
if (p_y > 0) { Z_y <- apply(Z_y, 2, scale) }
if (p_M > 0) { Z_M <- apply(Z_M, 2, scale) }
#optionally use sum-to-zero contrast for X
#recommended when X is a matrix of factors, with a column for every factor level
if (options_X$sum_to_zero == TRUE) {
C <- sumtozero_contrast(ncol(X))
X <- X%*%C
}
#optionally use sum-to-zero contrast for t
#recommended when t is a matrix of factors, with a column for every factor level
if (options_t$sum_to_zero == TRUE) {
C <- sumtozero_contrast(ncol(t))
t <- t%*%C
}
#optionally center and scale X
X <- apply(X, 2, scale, center = options_X$center, scale = options_X$scale)
#optionally center and scale t
t <- apply(t, 2, scale, center = options_t$center, scale = options_t$scale)
#dimension of X and t
d <- ncol(X)
k <- ncol(t)
# Interaction between X and moderator
tX <- model.matrix(~ 0 + X:t)
# Dimension of interaction
ix <- ncol(tX)
# Interaction between y and moderator (for reactive models)
ty <- model.matrix(~ 0 + y:t)
# Dimension of interaction
iy <- ncol(ty)
#column design matrix for mu
ones <- matrix(1, n)
#begin Bayesian calculations
if (verbose) { print("Initializing", quote = FALSE) }
#reformat priors
kappa = rep(kappa, 16)
lambda = rep(lambda, 16)
tau_sq_mu = rep(tau_sq_mu, 16)
tau_sq_Z = rep(tau_sq_Z, 16)
tau_sq_t = rep(tau_sq_t, 16)
phi_sq_X = c(NA, NA, phi_sq[3], phi_sq[3], NA, phi_sq[1], NA, phi_sq[1],
phi_sq[1], phi_sq[1], NA, phi_sq[1], phi_sq[3], phi_sq[3], NA, phi_sq[3])
phi_sq_m = c(NA,phi_sq[2],NA,phi_sq[2],NA,NA,NA,NA,
NA, NA, NA, NA, NA, phi_sq[2], phi_sq[2], phi_sq[2])
phi_sq_y = c(NA,NA,NA,NA,NA,NA,phi_sq[2],phi_sq[2],
NA, phi_sq[2], phi_sq[2], phi_sq[2], NA, NA, NA, NA)
phi_sq_int = c(rep(NA, 8), # effect size prior for interaction terms
phi_sq[5], phi_sq[5], phi_sq[6], phi_sq[6], phi_sq[4], phi_sq[4], phi_sq[6], phi_sq[6])
#identify likelihoods that are not supported by the prior
#will not compute cholesky or likelihood for these
calc_ln_prob_data <- rep(NA, 16)
calc_ln_prob_data[1] <- any(!is.infinite(ln_prior_c[c(1,3,9,10)]))
calc_ln_prob_data[2] <- any(!is.infinite(ln_prior_c[c(2,4)]))
calc_ln_prob_data[3] <- any(!is.infinite(ln_prior_c[c(5,7,11,12)]))
calc_ln_prob_data[4] <- any(!is.infinite(ln_prior_c[c(6,8)]))
calc_ln_prob_data[5] <- any(!is.infinite(ln_prior_c[c(1,2,5,6)]))
calc_ln_prob_data[6] <- any(!is.infinite(ln_prior_c[c(3,4,7,8)]))
calc_ln_prob_data[7] <- any(!is.infinite(ln_prior_c[c(9,11)]))
calc_ln_prob_data[8] <- any(!is.infinite(ln_prior_c[c(10,12)]))
calc_ln_prob_data[9] <- any(!is.infinite(ln_prior_c[c(14,15,21,23)]))
calc_ln_prob_data[10] <- any(!is.infinite(ln_prior_c[c(26, 31)]))
calc_ln_prob_data[11] <- any(!is.infinite(ln_prior_c[c(25,29)]))
calc_ln_prob_data[12] <- any(!is.infinite(ln_prior_c[c(27,32)]))
calc_ln_prob_data[13] <- any(!is.infinite(ln_prior_c[c(28,30)]))
calc_ln_prob_data[14] <- any(!is.infinite(ln_prior_c[c(18,22)]))
calc_ln_prob_data[15] <- any(!is.infinite(ln_prior_c[c(13,16)]))
calc_ln_prob_data[16] <- any(!is.infinite(ln_prior_c[c(19,24)]))
#likelihood models for all hypothesis
#hypotheses encoded by presence (1) or absence (0) of 'X->m, y->m, X->y' edges on the DAG
#(*) denotes reverse causation 'm<-y', i denotes moderation
#H1: '-,0,0' / y does not depend on X or m
#H2: '-,1,0' / y depends on m but not X
#H3: '-,0,1' / y depends on X but not m
#H4: '-,1,1' / y depends on X and m
#H5: '0,-,-' / m does not depend on X
#H6: '1,-,-' / m depends on X
#H7: '0,*,-' / m depends on y but not X
#H8: '1,*,-' / m depends on X and y
#H9: 'i,-,-' / m depends on X and is moderated by t
#H10: 'i,*,-' / m depends on X and y and relation between X and M is moderated by t
#H11: '0,*i,-' / m depends on y but not X and is moderated by t
#H12: '1,*i,-' / m depends on X and y and y -> M is moderated by t
#H13: '-,0,i' / y depends on X but not m and is moderated by t
#H14: '-,1,i' / y depends on X and m and X->y is moderated by t
#H15: '-,i,0' / y depends on m but not X and is moderated by t
#H16: '-,i,1' / y depends on X and m and m -> y is moderated by t
#all include covariates Z and t
#design matrices for H1,H3,H5-H13 complete cases (do not depend on m)
X1 <- cbind(ones, Z_y, t)
X3 <- cbind(ones, X, Z_y, t)
X5 <- cbind(ones, Z_M, t)
X6 <- cbind(ones, X, Z_M, t)
X7 <- cbind(X5, y)
X8 <- cbind(X6, y)
X9 <- cbind(X6, tX)
X10 <- cbind(X8, tX)
X11 <- cbind(X7, ty)
X12 <- cbind(X8, ty)
X13 <- cbind(X3, tX)
#check if all scale hyperparameters are identical for H1 and H5
#implies sigma1 and sigma5 identical, used to reduce computations
sigma5_equal_sigma1 <- all(lambda[1]==lambda[5],
tau_sq_mu[1] == tau_sq_mu[5],
tau_sq_Z[1] == tau_sq_Z[5],
identical(Z_y, Z_M),
identical(w_y, w_M))
#check if all scale hyperparameters are identical for H3 and H6
#implies sigma3 and sigma6 identical, used to reduce computations
sigma6_equal_sigma3 <- all(lambda[3]==lambda[6],
tau_sq_mu[3] == tau_sq_mu[6],
tau_sq_Z[3] == tau_sq_Z[6],
identical(Z_y, Z_M),
identical(w_y, w_M))
#check if all scale hyperparameters are identical for H9 and H13
#implies sigma3 and sigma6 identical, used to reduce computations
sigma13_equal_sigma9 <- all(lambda[9]==lambda[13],
tau_sq_mu[9] == tau_sq_mu[13],
tau_sq_Z[9] == tau_sq_Z[13],
identical(Z_y, Z_M),
identical(w_y, w_M),
tau_sq_t[9] == tau_sq_t[13])
#prior variance matrices (diagonal) for H1-H16
v1 <- c(tau_sq_mu[1], rep(tau_sq_Z[1], p_y), rep(tau_sq_t[1], k))
v2 <- c(tau_sq_mu[2], rep(tau_sq_Z[2], p_y), rep(tau_sq_t[2], k), phi_sq_m[2])
v3 <- c(tau_sq_mu[3], rep(phi_sq_X[3], d), rep(tau_sq_Z[3], p_y), rep(tau_sq_t[3], k))
v4 <- c(tau_sq_mu[4], rep(phi_sq_X[4], d), rep(tau_sq_Z[4], p_y), rep(tau_sq_t[4], k), phi_sq_m[4])
v7 <- c(tau_sq_mu[7], rep(tau_sq_Z[7], p_M), rep(tau_sq_t[7], k), phi_sq_y[7])
v8 <- c(tau_sq_mu[8], rep(phi_sq_X[8], d), rep(tau_sq_Z[8], p_M), rep(tau_sq_t[8], k), phi_sq_y[8])
v9 <- c(tau_sq_mu[9], rep(phi_sq_X[9], d), rep(tau_sq_Z[9], p_M), rep(tau_sq_t[9], k), rep(phi_sq_int[9], ix))
v10 <- c(tau_sq_mu[10], rep(phi_sq_X[10], d), rep(tau_sq_Z[10], p_M), rep(tau_sq_t[10], k), rep(phi_sq_int[10], ix))
v11 <- c(tau_sq_mu[11], rep(tau_sq_Z[11], p_M), rep(tau_sq_t[11], k), phi_sq_y[11], rep(phi_sq_int[11], iy))
v12 <- c(tau_sq_mu[12], rep(phi_sq_X[12], d), rep(tau_sq_Z[12], p_M), rep(tau_sq_t[12], k), rep(phi_sq_int[12], iy))
v14 <- c(tau_sq_mu[14], rep(phi_sq_X[14], d), rep(tau_sq_Z[14], p_y), rep(tau_sq_t[14], k), phi_sq_m[14], rep(phi_sq_int[14], ix))
if (!sigma5_equal_sigma1 | !calc_ln_prob_data[1]){
v5 <- c(tau_sq_mu[5], rep(tau_sq_Z[5], p_M), rep(tau_sq_t[5], k))
}
if (!sigma6_equal_sigma3 | !calc_ln_prob_data[3]){
v6 <- c(tau_sq_mu[6], rep(phi_sq_X[6], d), rep(tau_sq_Z[6], p_M), rep(tau_sq_t[5], k))
}
if (!sigma13_equal_sigma9 | !calc_ln_prob_data[9]){
v13 <- c(tau_sq_mu[13], rep(phi_sq_X[13], d), rep(tau_sq_Z[13], p_y), rep(tau_sq_t[13], k), rep(phi_sq_int[13], ix))
}
#scale matrices for H1,H3,H5-H8 complete cases, H9-H13 (do not depend on m)
sigma1 <- crossprod(sqrt(lambda[1]*v1)*t(X1))
sigma3 <- crossprod(sqrt(lambda[3]*v3)*t(X3))
sigma7 <- crossprod(sqrt(lambda[7]*v7)*t(X7))
sigma8 <- crossprod(sqrt(lambda[8]*v8)*t(X8))
sigma9 <- crossprod(sqrt(lambda[9]*v9)*t(X9))
sigma10 <- crossprod(sqrt(lambda[10]*v10)*t(X10))
sigma11 <- crossprod(sqrt(lambda[11]*v11)*t(X11))
sigma12 <- crossprod(sqrt(lambda[12]*v12)*t(X12))
diag(sigma1) <- diag(sigma1) + lambda[1]/w_y
diag(sigma3) <- diag(sigma3) + lambda[3]/w_y
diag(sigma7) <- diag(sigma7) + lambda[7]/w_M
diag(sigma8) <- diag(sigma8) + lambda[8]/w_M
diag(sigma9) <- diag(sigma9) + lambda[9]/w_M
diag(sigma10) <- diag(sigma10) + lambda[10]/w_M
diag(sigma11) <- diag(sigma11) + lambda[11]/w_M
diag(sigma12) <- diag(sigma12) + lambda[12]/w_M
if (!sigma5_equal_sigma1 | !calc_ln_prob_data[1]){
sigma5 <- crossprod(sqrt(lambda[5]*v5)*t(X5))
diag(sigma5) <- diag(sigma5) + lambda[5]/w_M
}
if (!sigma6_equal_sigma3 | !calc_ln_prob_data[3]){
sigma6 <- crossprod(sqrt(lambda[6]*v6)*t(X6))
diag(sigma6) <- diag(sigma6) + lambda[6]/w_M
}
if (!sigma13_equal_sigma9 | !calc_ln_prob_data[9]){
sigma13 <- crossprod(sqrt(lambda[13]*v13)*t(X13))
diag(sigma13) <- diag(sigma13) + lambda[13]/w_y
}
#object to store likelihoods
ln_prob_data=matrix(-Inf, ncol(M), 16)
rownames(ln_prob_data) <- colnames(M)
colnames(ln_prob_data) <- c("-,0,0",
"-,1,0",
"-,0,1",
"-,1,1",
"0,-,-",
"1,-,-",
"0,*,-",
"1,*,-",
"i,-,-",
"i,*,-",
"0,*i,-",
"1,*i,-",
"-,0,i",
"-,1,i",
"-,i,0",
"-,i,1")
#identify batches of M that have the same pattern of missing values
missing_m <- bmediatR:::batch_cols(M)
#iterate over batches of M with same pattern of missing values
if (verbose) { print("Iterating", quote = FALSE) }
counter <- 0
for (b in 1:length(missing_m)){
#subset to non-missing observations
index <- rep(T, length(y))
index[missing_m[[b]]$omit] <- FALSE
if (any(index)){
y_subset <- y[index]
w_y_subset <- w_y[index]
w_M_subset <- w_M[index]
t_subset <- t[index,,drop = F]
#cholesky matrices for H1,H3,H5-H8 non-missing observations (do not depend on m)
if (calc_ln_prob_data[1]){sigma1_chol_subset <- chol(sigma1[index,index])}
if (calc_ln_prob_data[3]){sigma3_chol_subset <- chol(sigma3[index,index])}
if (calc_ln_prob_data[7]){sigma7_chol_subset <- chol(sigma7[index,index])}
if (calc_ln_prob_data[8]){sigma8_chol_subset <- chol(sigma8[index,index])}
if (calc_ln_prob_data[9]){sigma9_chol_subset <- chol(sigma9[index,index])}
if (calc_ln_prob_data[10]){sigma10_chol_subset <- chol(sigma10[index,index])}
if (calc_ln_prob_data[11]){sigma11_chol_subset <- chol(sigma11[index,index])}
if (calc_ln_prob_data[12]){sigma12_chol_subset <- chol(sigma12[index,index])}
if (sigma5_equal_sigma1 & calc_ln_prob_data[1]) {
sigma5_chol_subset <- sigma1_chol_subset
} else if (calc_ln_prob_data[5]) {
sigma5_chol_subset <- chol(sigma5[index,index])
}
if (sigma6_equal_sigma3 & calc_ln_prob_data[3]) {
sigma6_chol_subset <- sigma3_chol_subset
} else if (calc_ln_prob_data[6]) {
sigma6_chol_subset <- chol(sigma6[index,index])
}
if (sigma13_equal_sigma9 & calc_ln_prob_data[9]){
sigma13_chol_subset <- sigma9_chol_subset
} else if (calc_ln_prob_data[13]){
sigma13_chol_subset <- chol(sigma13[index,index])
}
#compute H1 and H3 outside of the mediator loop (invariant)
if (calc_ln_prob_data[1]) { ln_prob_data1 <- bmediatR:::dmvt_chol(y_subset, sigma_chol=sigma1_chol_subset, df = kappa[1]) }
if (calc_ln_prob_data[3]) { ln_prob_data3 <- bmediatR:::dmvt_chol(y_subset, sigma_chol=sigma3_chol_subset, df = kappa[3]) }
if (calc_ln_prob_data[13]){ln_prob_data13 <- bmediatR:::dmvt_chol(y_subset, sigma_chol=sigma13_chol_subset, df = kappa[13])}
#iterate over mediators
for (i in missing_m[[b]]$cols) {
counter <- counter + 1
if (counter%%1000==0 & verbose) { print(paste(counter, "of", ncol(M)), quote=F) }
#set current mediator non-missing observations
m_subset <- M[index,i]
# Calculate design matrix for interaction of M and t
tM <- model.matrix(~ 0 + m_subset:t_subset)
im <- ncol(tM)
#design matrix for H2 and H4 non-missing observations
X2_subset <- cbind(X1[index,,drop = FALSE], m_subset)
X4_subset <- cbind(X3[index,,drop = FALSE], m_subset)
X14_subset <- cbind(X4_subset, tX[index,,drop=FALSE])
X15_subset <- cbind(X2_subset, tM)
X16_subset <- cbind(X4_subset, tM)
v15 <- c(tau_sq_mu[15], rep(tau_sq_Z[15], p_y), rep(tau_sq_t[15], k), phi_sq_m[15], rep(phi_sq_int[15], im))
v16 <- c(tau_sq_mu[16], rep(phi_sq_X[16], d), rep(tau_sq_Z[16], p_y), rep(tau_sq_t[16], k), phi_sq_m[16], rep(phi_sq_int[16], im))
#scale and cholesky matrices for H2 and H4 non-missing observations
sigma2_subset <- crossprod(sqrt(lambda[2]*v2)*t(X2_subset))
sigma4_subset <- crossprod(sqrt(lambda[4]*v4)*t(X4_subset))
sigma14_subset <- crossprod(sqrt(lambda[14]*v14)*t(X14_subset))
sigma15_subset <- crossprod(sqrt(lambda[15]*v15)*t(X15_subset))
sigma16_subset <- crossprod(sqrt(lambda[16]*v16)*t(X16_subset))
diag(sigma2_subset) <- diag(sigma2_subset) + lambda[2]/w_y_subset
diag(sigma4_subset) <- diag(sigma4_subset) + lambda[4]/w_y_subset
diag(sigma14_subset) <- diag(sigma14_subset) + lambda[14]/w_y_subset
diag(sigma15_subset) <- diag(sigma15_subset) + lambda[15]/w_y_subset
diag(sigma16_subset) <- diag(sigma16_subset) + lambda[16]/w_y_subset
if (calc_ln_prob_data[2]){sigma2_chol_subset <- chol(sigma2_subset)}
if (calc_ln_prob_data[4]){sigma4_chol_subset <- chol(sigma4_subset)}
if (calc_ln_prob_data[14]){sigma14_chol_subset <- chol(sigma14_subset)}
if (calc_ln_prob_data[15]){sigma15_chol_subset <- chol(sigma15_subset)}
if (calc_ln_prob_data[16]){sigma16_chol_subset <- chol(sigma16_subset)}
#compute likelihoods for H1-H16
if (calc_ln_prob_data[1]){ln_prob_data[i,1] <- ln_prob_data1}
if (calc_ln_prob_data[3]){ln_prob_data[i,3] <- ln_prob_data3}
if (calc_ln_prob_data[13]){ln_prob_data[i,13] <- ln_prob_data13}
if (calc_ln_prob_data[2]){ln_prob_data[i,2] <- bmediatR:::dmvt_chol(y_subset, sigma_chol=sigma2_chol_subset, df = kappa[2])}
if (calc_ln_prob_data[4]){ln_prob_data[i,4] <- bmediatR:::dmvt_chol(y_subset, sigma_chol=sigma4_chol_subset, df = kappa[4])}
if (calc_ln_prob_data[5]){ln_prob_data[i,5] <- bmediatR:::dmvt_chol(m_subset, sigma_chol=sigma5_chol_subset, df = kappa[5])}
if (calc_ln_prob_data[6]){ln_prob_data[i,6] <- bmediatR:::dmvt_chol(m_subset, sigma_chol=sigma6_chol_subset, df = kappa[6])}
if (calc_ln_prob_data[7]){ln_prob_data[i,7] <- bmediatR:::dmvt_chol(m_subset, sigma_chol=sigma7_chol_subset, df = kappa[7])}
if (calc_ln_prob_data[8]){ln_prob_data[i,8] <- bmediatR:::dmvt_chol(m_subset, sigma_chol=sigma8_chol_subset, df = kappa[8])}
if (calc_ln_prob_data[9]){ln_prob_data[i,9] <- bmediatR:::dmvt_chol(m_subset, sigma_chol=sigma9_chol_subset, df = kappa[9])}
if (calc_ln_prob_data[10]){ln_prob_data[i,10] <- bmediatR:::dmvt_chol(m_subset, sigma_chol=sigma10_chol_subset, df = kappa[10])}
if (calc_ln_prob_data[11]){ln_prob_data[i,11] <- bmediatR:::dmvt_chol(m_subset, sigma_chol=sigma11_chol_subset, df = kappa[11])}
if (calc_ln_prob_data[12]){ln_prob_data[i,12] <- bmediatR:::dmvt_chol(m_subset, sigma_chol=sigma12_chol_subset, df = kappa[12])}
if (calc_ln_prob_data[14]){ln_prob_data[i,14] <- bmediatR:::dmvt_chol(y_subset, sigma_chol=sigma14_chol_subset, df = kappa[14])}
if (calc_ln_prob_data[15]){ln_prob_data[i,15] <- bmediatR:::dmvt_chol(y_subset, sigma_chol=sigma15_chol_subset, df = kappa[15])}
if (calc_ln_prob_data[16]){ln_prob_data[i,16] <- bmediatR:::dmvt_chol(y_subset, sigma_chol=sigma16_chol_subset, df = kappa[16])}
}
}
}
#compute posterior probabilities for all cases
#cases encoded by presence (1) or absence (0) of 'X->m, m->y, X->y' edges on the DAG
#(*) denotes reverse causation 'm<-y' and (i) denotes moderation
#c1: '0,0,0' / H1 and H5
#c2: '0,1,0' / H2 and H5
#c3: '1,0,0' / H1 and H6
#c4: '1,1,0' / H2 and H6 - complete mediation
#c5: '0,0,1' / H3 and H5
#c6: '0,1,1' / H4 and H5
#c7: '1,0,1' / H3 and H6 - colocalization
#c8: '1,1,1' / H4 and H6 - partial mediation
#c9: '0,*,0' / H1 and H7
#c10: '1,*,0' / H1 and H8
#c11: '0,*,1' / H3 and H7 - Reactive
#c12: '1,*,1' / H3 and H8
#c13: '0,i,0' / H5 and H15
#c14: 'i,0,0' / H1 and H9
#c15: 'i,1,0' / H2 and H9
#c16: '1,i,0' / H15 and H6
#c17: '0,0,i' / H13 and H5
#c18: '0,1,i' / H14 and H5
#c19: '0,i,1' / H16 and H5
#c20: '1,0,i' / H13 and H6
#c21: 'i,0,1' / H3 and H9
#c22: '1,1,i' / H14 and H6
#c23: '1,i,1' / H4 and H9
#c24: '1,i,1' / H16 and H6
#c25: '0,*i,0' / H1 and H11
#c26: 'i,*,0' / H1 and H10
#c27: '1,*i,0' / H1 and H12
#c28: '0,*,i' / H13 and H7
#c29: '0,*i,1' / H3 and H11
#c30: '1,*,i' / H13 and H8
#c31: 'i,*,1' / H3 and H10
#c32: '1,*i,1' / H3 and H12
preset_odds_index <- return_preset_odds_index_moderation()
output <- posterior_summary_moderation(ln_prob_data, ln_prior_c, preset_odds_index)
colnames(output$ln_post_odds) <- colnames(output$ln_prior_odds) <- colnames(output$ln_post_odds)
#return results
output$ln_prior_c <- matrix(ln_prior_c, nrow = 1)
colnames(output$ln_prior_c) <- colnames(output$ln_post_c)
output$ln_prob_data <- ln_prob_data
output <- output[c("ln_prob_data", "ln_post_c", "ln_post_odds", "ln_prior_c", "ln_prior_odds", "ln_ml")]
if (verbose) { print("Done", quote = FALSE) }
output
}
#' Bayesian model selection for moderated mediation analysis function, version 0
#'
#' This function takes an outcome (y), candidate mediators (M), a candidate moderator (t), and a driver as a design matrix (X) to perform a
#' Bayesian model selection analysis for mediation.
#'
#' @param y Vector or single column matrix of an outcome variable. Single outcome variable expected.
#' Names or rownames must match across M, X, t, Z, Z_y, Z_M, w, w_y, and w_M (if provided) when align_data = TRUE. If align_data = FALSE,
#' dimensions and order must match across inputs.
#' @param M Vector or matrix of mediator variables. Multiple mediator variables are supported.
#' Names or rownames must match across y, X, t, Z, Z_y, Z_M, w, w_y, and w_M (if provided) when align_data = TRUE. If align_data = FALSE,
#' dimensions and order must match across inputs.
#' @param X Design matrix of the driver. Names or rownames must match across y, M, t, Z, Z_y, Z_M, w, w_y, and w_M (if provided) when align_data = TRUE.
#' If align_data = FALSE, dimensions and order must match across inputs. One common application is for X to represent genetic information at a QTL,
#' either as founder strain haplotypes or variant genotypes, though X is generalizable to other types of variables.
#' @param t Design matrix of the moderator for the outcome vand mediator. Names or rownames must match across y, M, Z, Z_y, Z_M, w, w_y, and w_M (if provided) when align_data = TRUE.
#' If align_data = FALSE, dimensions and order must match across inputs.
#' @param Z DEFAULT: NULL. Design matrix of covariates that influence the outcome and mediator variables.
#' Names or rownames must match to those of y, M, X, t, w, w_y, and w_M (if provided) when align_data = TRUE. If align_data=FALSE,
#' dimensions and order must match across inputs. If Z is provided, it supercedes Z_y and Z_M.
#' @param Z_y DEFAULT: NULL. Design matrix of covariates that influence the outcome variable.
#' Names or rownames must match to those of y, M, X, t, Z_M, w, w_y, and w_M (if provided) when align_data = TRUE. If align_data = FALSE,
#' dimensions and order must match across inputs. If Z is provided, it supercedes Z_y and Z_M.
#' @param Z_M DEFAULT: NULL. Design matrix of covariates that influence the mediator variables.
#' Names or rownames must match across y, M, X, t, Z_y, w, w_y, and w_M (if provided) when align_data = TRUE. If align_data = FALSE,
#' dimensions and order must match across inputs. If Z is provided, it supercedes Z_y and Z_M.
#' @param w DEFAULT: NULL. Vector or single column matrix of weights for individuals in analysis that applies to both
#' y and M. Names must match across y, M, X, t, Z, Z_y, and Z_M (if provided) when align_data = TRUE. If align_data = FALSE,
#' dimensions and order must match across inputs. A common use would be for an analysis of strain means, where w
#' is a vector of the number of individuals per strain. If no w, w_y, or w_M is given, observations are equally weighted as 1s for y and M.
#' If w is provided, it supercedes w_y and w_M.
#' @param w_y DEFAULT: NULL. Vector or single column matrix of weights for individuals in analysis, specific to the measurement
#' of y. Names must match across y, M, X, t, Z, Z_y, Z_M, and w_M (if provided) when align_data = TRUE. If align_data = FALSE,
#' dimensions and order must match across inputs. A common use would be for an analysis of strain means, where y and M
#' are summarized from a different number of individuals per strain. w_y is a vector of the number of individuals per strain used to
#' measure y. If no w_y (or w) is given, observations are equally weighted as 1s for y.
#' @param w_M DEFAULT: NULL. Vector or single column matrix of weights for individuals in analysis, specific to the measurement
#' of M. Names must match across y, M, X, t, Z, Z_y, Z_M, and w_y (if provided) when align_data = TRUE. If align_data = FALSE,
#' dimensions and order must match across inputs. A common use would be for an analysis of strain means, where y and M
#' are summarized from a different number of individuals per strain. w_M is a vector of the number of individuals per strain use to
#' measure M. If no w_M (or w) is given, observations are equally weighted as 1s for M.
#' @param tau_sq_mu DEFAULT: 1000. Variance component for the intercept. The DEFAULT represents a diffuse prior, analagous to
#' a fixed effect term.
#' @param tau_sq_Z DEFAULT: 1000. Variance component for the covariates encoded in Z. The DEFAULT represents a diffuse prior, analagous
#' to fixed effect terms.
#' @param phi_sq DEFAULT: c(1, 1, 1). Each element of (a, b, c) represents one of the relationships being evaluated for mediation,
#' specifically the ratio of signal to noise. a is the effect of X on M, b is the effect of M on y, and c is the effect of X on y.
#' The DEFAULT represents relationships that explain 50% of the variation in the outcome variable.
#' @param ln_prior_c DEFAULT: "complete". The prior log case probabilities. See model_info() for description of likelihoods and their
#' combinations into cases. Simplified pre-set options are available, including "complete", "partial", and "reactive".
#' @param align_data DEFAULT: TRUE. If TRUE, expect vector and matrix inputes to have names and rownames, respectively. The overlapping data
#' will then be aligned, allowing the user to not have to reduce data to overlapping samples and order them.
#' @export
#' @examples bmediatR()
bmediatR_v0_moderation <- function(y, M, X, t,
Z = NULL, Z_y = NULL, Z_M = NULL,
w = NULL, w_y = NULL, w_M = NULL,
kappa = 0.001, lambda = 0.001,
tau_sq_mu = 1000,
tau_sq_Z = 1000,
tau_sq_t = 1000, # Prior for the marginal effect of t
phi_sq_X = c(NA, NA, 1, 1, NA, 1, NA, 1,
1, 1, NA, 1, 1, 1, NA, 1),
phi_sq_m = c(NA, 1, NA, 1, NA, NA, NA, NA,
NA, NA, NA, NA, NA, 1, 1, 1),
phi_sq_y = c(NA, NA, NA, NA, NA, NA, 1, 1,
NA, 1, 1, 1, NA, NA, NA, NA),
phi_sq_int = c(rep(NA, 8), # effect size prior for interaction term
1, 1, 1, 1, 1, 1, 1, 1),
ln_prior_c = "complete",
options_X = list(sum_to_zero = TRUE, center = FALSE, scale = FALSE),
options_t = list(sum_to_zero = TRUE, center = FALSE, scale = FALSE),
align_data = TRUE,
verbose = TRUE) {
#presets for ln_prior_c;
ln_prior_c <- return_ln_prior_c_from_presets_moderation(ln_prior_c = ln_prior_c)
#ensure ln_prior_c sum to 1 on probability scale
if (is.matrix(ln_prior_c)){
ln_prior_c <- t(apply(ln_prior_c, 1, function(x){x - matrixStats::logSumExp(x)}))
} else {
ln_prior_c <- ln_prior_c - matrixStats::logSumExp(ln_prior_c)
}
#optionally align data
processed_data <- process_data_moderation(y = y, M = M, X = X, t= t,
Z_y = Z_y, Z_M = Z_M,
w_y = w_y, w_M = w_M,
align_data = align_data,
verbose = verbose)
y <- processed_data$y
M <- processed_data$M
X <- processed_data$X
t <- processed_data$t
Z_y <- processed_data$Z_y
Z_M <- processed_data$Z_M
w_y <- processed_data$w_y
w_M <- processed_data$w_M
#dimension of y
n <- length(y)
#dimension of Z's
p_y <- ncol(Z_y)
p_M <- ncol(Z_M)
#scale y, M, and Z
y <- c(scale(y))
M <- apply(M, 2, scale)
if (p_y > 0) { Z_y <- apply(Z_y, 2, scale) }
if (p_M > 0) { Z_M <- apply(Z_M, 2, scale) }
#optionally use sum-to-zero contrast for X
#recommended when X is a matrix of factors, with a column for every factor level
if (options_X$sum_to_zero == TRUE) {
C <- sumtozero_contrast(ncol(X))
X <- X%*%C
}
#optionally use sum-to-zero contrast for t
#recommended when t is a matrix of factors, with a column for every factor level
if (options_t$sum_to_zero == TRUE) {
C <- sumtozero_contrast(ncol(t))
t <- t%*%C
}
#optionally center and scale X
X <- apply(X, 2, scale, center = options_X$center, scale = options_X$scale)
#optionally center and scale t
t <- apply(t, 2, scale, center = options_t$center, scale = options_t$scale)
#dimension of X and t
d <- ncol(X)
k <- ncol(t)
# Interaction between X and moderator
tX <- model.matrix(~ 0 + X:t)
# Dimension of interaction
ix <- ncol(tX)
# Interaction between y and moderator
ty <- model.matrix(~ 0 + y:t)
# Dimension of interaction
iy <- ncol(ty)
#column design matrix for mu
ones <- matrix(1, n)
#begin Bayesian calculations
if (verbose) { print("Initializing", quote = FALSE) }
#reformat priors
kappa = rep(kappa, 16)
lambda = rep(lambda, 16)
tau_sq_mu = rep(tau_sq_mu, 16)
tau_sq_Z = rep(tau_sq_Z, 16)
tau_sq_t = rep(tau_sq_t, 16)
#identify likelihoods that are not supported by the prior
#will not compute cholesky or likelihood for these
calc_ln_prob_data <- rep(NA, 16)
calc_ln_prob_data[1] <- any(!is.infinite(ln_prior_c[c(1,3,9,10)]))
calc_ln_prob_data[2] <- any(!is.infinite(ln_prior_c[c(2,4)]))
calc_ln_prob_data[3] <- any(!is.infinite(ln_prior_c[c(5,7,11,12)]))
calc_ln_prob_data[4] <- any(!is.infinite(ln_prior_c[c(6,8)]))
calc_ln_prob_data[5] <- any(!is.infinite(ln_prior_c[c(1,2,5,6)]))
calc_ln_prob_data[6] <- any(!is.infinite(ln_prior_c[c(3,4,7,8)]))
calc_ln_prob_data[7] <- any(!is.infinite(ln_prior_c[c(9,11)]))
calc_ln_prob_data[8] <- any(!is.infinite(ln_prior_c[c(10,12)]))
calc_ln_prob_data[9] <- any(!is.infinite(ln_prior_c[c(14,15,21,23)]))
calc_ln_prob_data[10] <- any(!is.infinite(ln_prior_c[c(26, 31)]))
calc_ln_prob_data[11] <- any(!is.infinite(ln_prior_c[c(25,29)]))
calc_ln_prob_data[12] <- any(!is.infinite(ln_prior_c[c(27,32)]))
calc_ln_prob_data[13] <- any(!is.infinite(ln_prior_c[c(28,30)]))
calc_ln_prob_data[14] <- any(!is.infinite(ln_prior_c[c(18,22)]))
calc_ln_prob_data[15] <- any(!is.infinite(ln_prior_c[c(13,16)]))
calc_ln_prob_data[16] <- any(!is.infinite(ln_prior_c[c(19,24)]))
#likelihood models for all hypothesis
#hypotheses encoded by presence (1) or absence (0) of 'X->m, y->m, X->y' edges on the DAG
#(*) denotes reverse causation 'm<-y', i denotes moderation
#H1: '-,0,0' / y does not depend on X or m
#H2: '-,1,0' / y depends on m but not X
#H3: '-,0,1' / y depends on X but not m
#H4: '-,1,1' / y depends on X and m
#H5: '0,-,-' / m does not depend on X
#H6: '1,-,-' / m depends on X
#H7: '0,*,-' / m depends on y but not X
#H8: '1,*,-' / m depends on X and y
#H9: 'i,-,-' / m depends on X and is moderated by t
#H10: 'i,*,-' / m depends on X and y and relation between X and M is moderated by t
#H11: '0,*i,-' / m depends on y but not X and is moderated by t
#H12: '1,*i,-' / m depends on X and y and y -> M is moderated by t
#H13: '-,0,i' / y depends on X but not m and is moderated by t
#H14: '-,1,i' / y depends on X and m and X->y is moderated by t
#H15: '-,i,0' / y depends on m but not X and is moderated by t
#H16: '-,i,1' / y depends on X and m and m -> y is moderated by t
#all include covariates Z and t
#design matrices for H1,H3,H5-H13 complete cases (do not depend on m)
X1 <- cbind(ones, Z_y, t)
X3 <- cbind(ones, X, Z_y, t)
X5 <- cbind(ones, Z_M, t)
X6 <- cbind(ones, X, Z_M, t)
X7 <- cbind(X5, y)
X8 <- cbind(X6, y)
X9 <- cbind(X6, tX)
X10 <- cbind(X8, tX)
X11 <- cbind(X7, ty)
X12 <- cbind(X8, ty)
X13 <- cbind(X3, tX)
#check if all scale hyperparameters are identical for H1 and H5
#implies sigma1 and sigma5 identical, used to reduce computations
sigma5_equal_sigma1 <- all(lambda[1]==lambda[5],
tau_sq_mu[1] == tau_sq_mu[5],
tau_sq_Z[1] == tau_sq_Z[5],
identical(Z_y, Z_M),
identical(w_y, w_M))
#check if all scale hyperparameters are identical for H3 and H6
#implies sigma3 and sigma6 identical, used to reduce computations
sigma6_equal_sigma3 <- all(lambda[3]==lambda[6],
tau_sq_mu[3] == tau_sq_mu[6],
tau_sq_Z[3] == tau_sq_Z[6],
identical(Z_y, Z_M),
identical(w_y, w_M))
#check if all scale hyperparameters are identical for H9 and H13
#implies sigma3 and sigma6 identical, used to reduce computations
sigma13_equal_sigma9 <- all(lambda[9]==lambda[13],
tau_sq_mu[9] == tau_sq_mu[13],
tau_sq_Z[9] == tau_sq_Z[13],
identical(Z_y, Z_M),
identical(w_y, w_M),
tau_sq_t[9] == tau_sq_t[13])
#prior variance matrices (diagonal) for H1-H16
v1 <- c(tau_sq_mu[1], rep(tau_sq_Z[1], p_y), rep(tau_sq_t[1], k))
v2 <- c(tau_sq_mu[2], rep(tau_sq_Z[2], p_y), rep(tau_sq_t[2], k), phi_sq_m[2])
v3 <- c(tau_sq_mu[3], rep(phi_sq_X[3], d), rep(tau_sq_Z[3], p_y), rep(tau_sq_t[3], k))
v4 <- c(tau_sq_mu[4], rep(phi_sq_X[4], d), rep(tau_sq_Z[4], p_y), rep(tau_sq_t[4], k), phi_sq_m[4])
v7 <- c(tau_sq_mu[7], rep(tau_sq_Z[7], p_M), rep(tau_sq_t[7], k), phi_sq_y[7])
v8 <- c(tau_sq_mu[8], rep(phi_sq_X[8], d), rep(tau_sq_Z[8], p_M), rep(tau_sq_t[8], k), phi_sq_y[8])
v9 <- c(tau_sq_mu[9], rep(phi_sq_X[9], d), rep(tau_sq_Z[9], p_M), rep(tau_sq_t[9], k), rep(phi_sq_int[9], ix))
v10 <- c(tau_sq_mu[10], rep(phi_sq_X[10], d), rep(tau_sq_Z[10], p_M), rep(tau_sq_t[10], k), rep(phi_sq_int[10], ix))
v11 <- c(tau_sq_mu[11], rep(tau_sq_Z[11], p_M), rep(tau_sq_t[11], k), phi_sq_y[11], rep(phi_sq_int[11], iy))
v12 <- c(tau_sq_mu[12], rep(phi_sq_X[12], d), rep(tau_sq_Z[12], p_M), rep(tau_sq_t[12], k), rep(phi_sq_int[12], iy))
v14 <- c(tau_sq_mu[14], rep(phi_sq_X[14], d), rep(tau_sq_Z[14], p_y), rep(tau_sq_t[14], k), phi_sq_m[14], rep(phi_sq_int[14], ix))
if (!sigma5_equal_sigma1 | !calc_ln_prob_data[1]){
v5 <- c(tau_sq_mu[5], rep(tau_sq_Z[5], p_M), rep(tau_sq_t[5], k))
}
if (!sigma6_equal_sigma3 | !calc_ln_prob_data[3]){
v6 <- c(tau_sq_mu[6], rep(phi_sq_X[6], d), rep(tau_sq_Z[6], p_M), rep(tau_sq_t[5], k))
}
if (!sigma13_equal_sigma9 | !calc_ln_prob_data[9]){
v13 <- c(tau_sq_mu[13], rep(phi_sq_X[13], d), rep(tau_sq_Z[13], p_y), rep(tau_sq_t[13], k), rep(phi_sq_int[13], ix))
}
#scale matrices for H1,H3,H5-H8 complete cases, H9-H13 (do not depend on m)
sigma1 <- crossprod(sqrt(lambda[1]*v1)*t(X1))
sigma3 <- crossprod(sqrt(lambda[3]*v3)*t(X3))
sigma7 <- crossprod(sqrt(lambda[7]*v7)*t(X7))
sigma8 <- crossprod(sqrt(lambda[8]*v8)*t(X8))
sigma9 <- crossprod(sqrt(lambda[9]*v9)*t(X9))
sigma10 <- crossprod(sqrt(lambda[10]*v10)*t(X10))
sigma11 <- crossprod(sqrt(lambda[11]*v11)*t(X11))
sigma12 <- crossprod(sqrt(lambda[12]*v12)*t(X12))
diag(sigma1) <- diag(sigma1) + lambda[1]/w_y
diag(sigma3) <- diag(sigma3) + lambda[3]/w_y
diag(sigma7) <- diag(sigma7) + lambda[7]/w_M
diag(sigma8) <- diag(sigma8) + lambda[8]/w_M
diag(sigma9) <- diag(sigma9) + lambda[9]/w_M
diag(sigma10) <- diag(sigma10) + lambda[10]/w_M
diag(sigma11) <- diag(sigma11) + lambda[11]/w_M
diag(sigma12) <- diag(sigma12) + lambda[12]/w_M
if (!sigma5_equal_sigma1 | !calc_ln_prob_data[1]){
sigma5 <- crossprod(sqrt(lambda[5]*v5)*t(X5))
diag(sigma5) <- diag(sigma5) + lambda[5]/w_M
}
if (!sigma6_equal_sigma3 | !calc_ln_prob_data[3]){
sigma6 <- crossprod(sqrt(lambda[6]*v6)*t(X6))
diag(sigma6) <- diag(sigma6) + lambda[6]/w_M
}
if (!sigma13_equal_sigma9 | !calc_ln_prob_data[9]){
sigma13 <- crossprod(sqrt(lambda[13]*v13)*t(X13))
diag(sigma13) <- diag(sigma13) + lambda[13]/w_y
}
#object to store likelihoods
ln_prob_data=matrix(-Inf, ncol(M), 16)
rownames(ln_prob_data) <- colnames(M)
colnames(ln_prob_data) <- c("-,0,0",
"-,1,0",
"-,0,1",
"-,1,1",
"0,-,-",
"1,-,-",
"0,*,-",
"1,*,-",
"i,-,-",
"i,*,-",
"0,*i,-",
"1,*i,-",
"-,0,i",
"-,1,i",
"-,i,0",
"-,i,1")
#identify batches of M that have the same pattern of missing values
missing_m <- bmediatR:::batch_cols(M)
#iterate over batches of M with same pattern of missing values
if (verbose) { print("Iterating", quote = FALSE) }
counter <- 0
for (b in 1:length(missing_m)){
#subset to non-missing observations
index <- rep(T, length(y))
index[missing_m[[b]]$omit] <- FALSE
if (any(index)){
y_subset <- y[index]
w_y_subset <- w_y[index]
w_M_subset <- w_M[index]
#cholesky matrices for H1,H3,H5-H8 non-missing observations (do not depend on m)
if (calc_ln_prob_data[1]){sigma1_chol_subset <- chol(sigma1[index,index])}
if (calc_ln_prob_data[3]){sigma3_chol_subset <- chol(sigma3[index,index])}
if (calc_ln_prob_data[7]){sigma7_chol_subset <- chol(sigma7[index,index])}
if (calc_ln_prob_data[8]){sigma8_chol_subset <- chol(sigma8[index,index])}
if (calc_ln_prob_data[9]){sigma9_chol_subset <- chol(sigma9[index,index])}
if (calc_ln_prob_data[10]){sigma10_chol_subset <- chol(sigma10[index,index])}
if (calc_ln_prob_data[11]){sigma11_chol_subset <- chol(sigma11[index,index])}
if (calc_ln_prob_data[12]){sigma12_chol_subset <- chol(sigma12[index,index])}
if (sigma5_equal_sigma1 & calc_ln_prob_data[1]) {
sigma5_chol_subset <- sigma1_chol_subset
} else if (calc_ln_prob_data[5]) {
sigma5_chol_subset <- chol(sigma5[index,index])
}
if (sigma6_equal_sigma3 & calc_ln_prob_data[3]) {
sigma6_chol_subset <- sigma3_chol_subset
} else if (calc_ln_prob_data[6]) {
sigma6_chol_subset <- chol(sigma6[index,index])
}
if (sigma13_equal_sigma9 & calc_ln_prob_data[9]){
sigma13_chol_subset <- sigma9_chol_subset
} else if (calc_ln_prob_data[13]){
sigma13_chol_subset <- chol(sigma13[index,index])
}
#compute H1 and H3 outside of the mediator loop (invariant)
if (calc_ln_prob_data[1]) { ln_prob_data1 <- bmediatR:::dmvt_chol(y_subset, sigma_chol=sigma1_chol_subset, df = kappa[1]) }
if (calc_ln_prob_data[3]) { ln_prob_data3 <- bmediatR:::dmvt_chol(y_subset, sigma_chol=sigma3_chol_subset, df = kappa[3]) }
if (calc_ln_prob_data[13]){ln_prob_data13 <- bmediatR:::dmvt_chol(y_subset, sigma_chol=sigma13_chol_subset, df = kappa[13])}
#iterate over mediators
for (i in missing_m[[b]]$cols) {
counter <- counter + 1
if (counter%%1000==0 & verbose) { print(paste(counter, "of", ncol(M)), quote=F) }
#set current mediator non-missing observations
m_subset <- M[index,i]
t_subset <- t[index,,drop = F]
# Calculate design matrix for interaction of M and t
tM <- model.matrix(~ 0 + m_subset:t_subset)
im <- ncol(tM)
#design matrix for H2 and H4 non-missing observations
X2_subset <- cbind(X1[index,,drop = FALSE], m_subset)
X4_subset <- cbind(X3[index,,drop = FALSE], m_subset)
X14_subset <- cbind(X4_subset, tX[index,,drop=FALSE])
X15_subset <- cbind(X2_subset, tM)
X16_subset <- cbind(X4_subset, tM)
v15 <- c(tau_sq_mu[15], rep(tau_sq_Z[15], p_y), rep(tau_sq_t[15], k), phi_sq_m[15], rep(phi_sq_int[15], im))
v16 <- c(tau_sq_mu[16], rep(phi_sq_X[16], d), rep(tau_sq_Z[16], p_y), rep(tau_sq_t[16], k), phi_sq_m[16], rep(phi_sq_int[15], im))
#scale and cholesky matrices for H2 and H4 non-missing observations
sigma2_subset <- crossprod(sqrt(lambda[2]*v2)*t(X2_subset))
sigma4_subset <- crossprod(sqrt(lambda[4]*v4)*t(X4_subset))
sigma14_subset <- crossprod(sqrt(lambda[14]*v14)*t(X14_subset))
sigma15_subset <- crossprod(sqrt(lambda[15]*v15)*t(X15_subset))
sigma16_subset <- crossprod(sqrt(lambda[16]*v16)*t(X16_subset))
diag(sigma2_subset) <- diag(sigma2_subset) + lambda[2]/w_y_subset
diag(sigma4_subset) <- diag(sigma4_subset) + lambda[4]/w_y_subset
diag(sigma14_subset) <- diag(sigma14_subset) + lambda[14]/w_y_subset
diag(sigma15_subset) <- diag(sigma15_subset) + lambda[15]/w_y_subset
diag(sigma16_subset) <- diag(sigma16_subset) + lambda[16]/w_y_subset
if (calc_ln_prob_data[2]){sigma2_chol_subset <- chol(sigma2_subset)}
if (calc_ln_prob_data[4]){sigma4_chol_subset <- chol(sigma4_subset)}
if (calc_ln_prob_data[14]){sigma14_chol_subset <- chol(sigma14_subset)}
if (calc_ln_prob_data[15]){sigma15_chol_subset <- chol(sigma15_subset)}
if (calc_ln_prob_data[16]){sigma16_chol_subset <- chol(sigma16_subset)}
#compute likelihoods for H1-H16
if (calc_ln_prob_data[1]){ln_prob_data[i,1] <- ln_prob_data1}
if (calc_ln_prob_data[3]){ln_prob_data[i,3] <- ln_prob_data3}
if (calc_ln_prob_data[13]){ln_prob_data[i,13] <- ln_prob_data13}
if (calc_ln_prob_data[2]){ln_prob_data[i,2] <- bmediatR:::dmvt_chol(y_subset, sigma_chol=sigma2_chol_subset, df = kappa[2])}
if (calc_ln_prob_data[4]){ln_prob_data[i,4] <- bmediatR:::dmvt_chol(y_subset, sigma_chol=sigma4_chol_subset, df = kappa[4])}
if (calc_ln_prob_data[5]){ln_prob_data[i,5] <- bmediatR:::dmvt_chol(m_subset, sigma_chol=sigma5_chol_subset, df = kappa[5])}
if (calc_ln_prob_data[6]){ln_prob_data[i,6] <- bmediatR:::dmvt_chol(m_subset, sigma_chol=sigma6_chol_subset, df = kappa[6])}
if (calc_ln_prob_data[7]){ln_prob_data[i,7] <- bmediatR:::dmvt_chol(m_subset, sigma_chol=sigma7_chol_subset, df = kappa[7])}
if (calc_ln_prob_data[8]){ln_prob_data[i,8] <- bmediatR:::dmvt_chol(m_subset, sigma_chol=sigma8_chol_subset, df = kappa[8])}
if (calc_ln_prob_data[9]){ln_prob_data[i,9] <- bmediatR:::dmvt_chol(m_subset, sigma_chol=sigma9_chol_subset, df = kappa[9])}
if (calc_ln_prob_data[10]){ln_prob_data[i,10] <- bmediatR:::dmvt_chol(m_subset, sigma_chol=sigma10_chol_subset, df = kappa[10])}
if (calc_ln_prob_data[11]){ln_prob_data[i,11] <- bmediatR:::dmvt_chol(m_subset, sigma_chol=sigma11_chol_subset, df = kappa[11])}
if (calc_ln_prob_data[12]){ln_prob_data[i,12] <- bmediatR:::dmvt_chol(m_subset, sigma_chol=sigma12_chol_subset, df = kappa[12])}
if (calc_ln_prob_data[14]){ln_prob_data[i,14] <- bmediatR:::dmvt_chol(y_subset, sigma_chol=sigma14_chol_subset, df = kappa[14])}
if (calc_ln_prob_data[15]){ln_prob_data[i,15] <- bmediatR:::dmvt_chol(y_subset, sigma_chol=sigma15_chol_subset, df = kappa[15])}
if (calc_ln_prob_data[16]){ln_prob_data[i,16] <- bmediatR:::dmvt_chol(y_subset, sigma_chol=sigma16_chol_subset, df = kappa[16])}
}
}
}
#compute posterior probabilities for all cases
#cases encoded by presence (1) or absence (0) of 'X->m, m->y, X->y' edges on the DAG
#(*) denotes reverse causation 'm<-y' and (i) denotes moderation
#c1: '0,0,0' / H1 and H5
#c2: '0,1,0' / H2 and H5
#c3: '1,0,0' / H1 and H6
#c4: '1,1,0' / H2 and H6 - complete mediation
#c5: '0,0,1' / H3 and H5
#c6: '0,1,1' / H4 and H5
#c7: '1,0,1' / H3 and H6 - colocalization
#c8: '1,1,1' / H4 and H6 - partial mediation
#c9: '0,*,0' / H1 and H7
#c10: '1,*,0' / H1 and H8
#c11: '0,*,1' / H3 and H7 - Reactive
#c12: '1,*,1' / H3 and H8
#c13: '0,i,0' / H5 and H15
#c14: 'i,0,0' / H1 and H9
#c15: 'i,1,0' / H2 and H9
#c16: '1,i,0' / H15 and H6
#c17: '0,0,i' / H13 and H5
#c18: '0,1,i' / H14 and H5
#c19: '0,i,1' / H16 and H5
#c20: '1,0,i' / H13 and H6
#c21: 'i,0,1' / H3 and H9
#c22: '1,1,i' / H14 and H6
#c23: '1,i,1' / H4 and H9
#c24: '1,i,1' / H16 and H6
#c25: '0,*i,0' / H1 and H11
#c26: 'i,*,0' / H1 and H10
#c27: '1,*i,0' / H1 and H12
#c28: '0,*,i' / H13 and H7
#c29: '0,*i,1' / H3 and H11
#c30: '1,*,i' / H13 and H8
#c31: 'i,*,1' / H3 and H10
#c32: '1,*i,1' / H3 and H12
preset_odds_index <- return_preset_odds_index_moderation()
output <- posterior_summary_moderation(ln_prob_data, ln_prior_c, preset_odds_index)
colnames(output$ln_post_odds) <- colnames(output$ln_prior_odds) <- colnames(output$ln_post_odds)
#return results
output$ln_prior_c <- matrix(ln_prior_c, nrow = 1)
colnames(output$ln_prior_c) <- colnames(output$ln_post_c)
output$ln_prob_data <- ln_prob_data
output <- output[c("ln_prob_data", "ln_post_c", "ln_post_odds", "ln_prior_c", "ln_prior_odds", "ln_ml")]
if (verbose) { print("Done", quote = FALSE) }
output
}
|
b4dccb2937489e77f467e2852595edf2d4bcd426
|
0dad68bd3a28894180f18ea147026c8438912a73
|
/man/dna2codons.Rd
|
5d9820496f672661ebdad14e4a6e4e02d6c4ff8a
|
[] |
no_license
|
sherrillmix/dnar
|
1bcc2fac63d8af059215dea6fd3e5cdc7200e81f
|
dead343faebda27057a1e7a5789e853b5b73316d
|
refs/heads/master
| 2022-08-12T14:04:05.052121
| 2022-07-13T18:59:18
| 2022-07-13T18:59:18
| 54,718,599
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 620
|
rd
|
dna2codons.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dna.R
\name{dna2codons}
\alias{dna2codons}
\title{Convert DNA string into seperate codons}
\usage{
dna2codons(dna, frame = 0)
}
\arguments{
\item{dna}{Vector of DNA strings}
\item{frame}{Starting frame for codons (0=start on first base, 1=on second, 2=on third)}
}
\value{
List with a vector of 3 base codons for each input DNA string
}
\description{
Convert DNA string into seperate codons
}
\examples{
dna2codons('ACATTTGGG')
dna2codons('ACATTTGGG',1)
dna2codons(c('ACATTTGGG','AAATTAGGC'))
dna2codons(c('ACATTTGGG','AAATTAGGC'),c(1,2))
}
|
cef325d7df962034e931b53e654aec190c746ddd
|
db77509d0a6c1763e19dd514db90124d1fe6a71d
|
/visual.R
|
2299d854468b6e25cb21d8971e9e76d8965d3096
|
[] |
no_license
|
yangdaqiang/guizhou-real-assets-scraping
|
b1b341534f01b21bfd632197da18c48b3bd8c356
|
ac56c96e70c77c3c355c11b31795b7393d873fce
|
refs/heads/main
| 2023-01-01T04:54:49.957374
| 2020-10-14T10:02:37
| 2020-10-14T10:02:37
| 302,786,231
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 601
|
r
|
visual.R
|
text <- read.csv("D:\\CloudStation-ZS\\R\\guizhou-real-assets-scraping\\anshun_asset2020-10-10.csv",
header = T, encoding = 'UTF-8')
ggplot(text, aes(x = presale, y = average_price, color = developer)) + geom_point()
text_small <- text %>% filter(average_price > 0, presale < 500)
ggplot(text_small, aes(x = presale, y = average_price, color = developer)) + geom_point()
ggplot(text_small, aes(x = presale, y = average_price)) + geom_point() + facet_wrap(~developer)
ggplot(data = text) + geom_point(mapping = aes(x = presale, y = average_price), color = developer)
|
15de9db732ddc076d23ee2c421c664698c3829dc
|
3e59a908d8bb7b461696f01ad318eee75e316a43
|
/Pollutantmean.R
|
f86c94119cd13bda46592eb8fa7ca941eccd2118
|
[] |
no_license
|
anishraj1123/R-Programming
|
864aa983bbd0acefe73b30b32bea3de647375bea
|
3825ea264ab368b0276cc288662f3e45c2ec03f8
|
refs/heads/master
| 2020-04-09T09:32:25.966324
| 2018-12-10T18:37:26
| 2018-12-10T18:37:26
| 160,237,327
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 339
|
r
|
Pollutantmean.R
|
pollutantmean <- function(directory, pollutant, id=1:332){
filelist <- list.files("specdata", pattern=".csv", full.names=TRUE)
means <- numeric()
for (i in id){
data <- read.csv(filelist[i])
means <- c(means, data[[pollutant]])
}
finalmean <- mean(means, na.rm=TRUE)
finalmean
}
|
1e47d1fb41653f453784556dcfd8ee99f6b8caeb
|
72d2e1952b4273bb3d3352c2242fe3cae2efa9d9
|
/man/interpolsvd_em.Rd
|
6b2ea7d18043ce72fe611966220ad129fb0e6fea
|
[] |
no_license
|
proto4426/ValUSunSSN
|
94a39508848d11d719b45fae9ea2a4d616a19749
|
932bc247467d154aa419f8f709c0dd00fe7fcde6
|
refs/heads/master
| 2020-12-24T20:09:47.701982
| 2017-10-01T14:32:16
| 2017-10-01T14:32:16
| 86,236,891
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,905
|
rd
|
interpolsvd_em.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/interpolsvd_em.R
\name{interpolsvd_em}
\alias{interpolsvd_em}
\title{interpolated SVD EM algorithm to fill missing values}
\usage{
interpolsvd_em(y, nembed = 1, nsmo = 0, ncomp = 0, threshold1 = 1e-05,
niter = 30, displ = F)
}
\arguments{
\item{y}{a numeric data.frame or matrix of data with gaps}
\item{nembed}{integer value controlling
embedding dimension (must be > 1 for monovariate data)}
\item{nsmo}{integer value controlling cutoff time scale
in number of samples. Set it to 0 if only one single time scale is desired.}
\item{ncomp}{controls the number of significant components.
It has to be specified for running in automatic mode.
Default (0) leads to manual selection during the algorithm}
\item{threshold1}{numeric value controllingthe stop of the iterations after
the relative energy change is < threshold}
\item{niter}{numeric value controlling the maximum number of iterations}
\item{displ}{boolean controlling the display of some information in
the console during the algorithm}
}
\value{
A list with the following elements:
\describe{
\item{\code{y.filled}}{The same dataset as y but with gaps filled}
\item{\code{w.distSVD}}{The distribution of the weights of the initial SVD}
}
But only the first one really affects the outcome. A separation into
two scales only (with a threshold between 50–100 days) isenough to properly
capture both short- and long-term evolutions, and embedding dimensions of
D = 2−5 are usually adequate for reconstructing daily averages. The
determination of the optimum parameters and validation of the results is
preferably made by cross-validation.
}
\description{
This main function fills gaps in monovariate or multivariate data
by SVD-imputation which is closely related to
expectation-maximization (EM) algorithm.
}
\details{
The method decomposes the data into two time scales, which are processed
separately and then merged at the end. The cutoff time scale (nsmo) is
expressed in number of samples. A gaussian filter is used for filtering.
Monovariate data must be embedded first (nembed>1).
In the initial data set, gaps are supposed to be filled in with NA !!.
The three tuneable (hyper)parameters are :
\describe{
\item{\code{ncomp}}
\item{\code{nsmo}}
\item{\code{nembed}}
}
}
\examples{
# Take this for input, as advised in the test.m file
y <- sqrt(data.mat2.fin+1) # Selected randomly here, for testing
options(mc.cores=parallel::detectCores()) # all available cores
z <- interpolsvd_em(y, nembed = 2, nsmo = 81, ncomp = 4,
niter = 30, displ = F)
# 393 sec for the whole dataset (with some stations discarded)
# Then do the inverse transformation to obtain final dataset with filled values
z <- z$y.filled
z_final = z*z - 1
z_final[z_final<0] <- 0
}
\author{
Antoine Pissoort, \email{antoine.pissoort@student.uclouvain.be}
}
|
70ed3aa5cbcfe444c1cf323ce1c2c661ee3fe95b
|
c55296385a0c05dbc27eaf3e2395c95c9a8649f8
|
/read HLA.R
|
964eb27a0b5a33f4303b2f11e675e50f8c4e1c51
|
[] |
no_license
|
Dmdickson/TEST-repo
|
eacd1ef482b53627b1643ac38dbce7f4dbabe67a
|
56245e82a058b7ad88c4b15b97a15b98bb04af10
|
refs/heads/master
| 2021-01-24T09:57:21.047571
| 2018-02-26T23:05:09
| 2018-02-26T23:05:09
| 123,032,175
| 0
| 0
| null | 2018-02-26T23:05:10
| 2018-02-26T21:21:50
|
R
|
UTF-8
|
R
| false
| false
| 5,354
|
r
|
read HLA.R
|
# Purpose: to read into R the data files for the IDEA correlates project: HLA data
###############################################
# Original Author :Dorothy Dickson #
# Edited by #
# Last updated: 19 January 2018 #
# #
###############################################
# HERE IS ANOTHER TEST CHANGE to update the data file name
# here we go again
setwd("C:/Users/dmdickso/IDEA")
path="C:/Users/dmdickso/IDEA/"
###### change the above path once if needed to fit the current location of datafiles ########
hlafile="UVM_GATES_CHALLENGE_HLA.csv"
pathhla<-paste0(path,hlafile, collapse='')
library(tidyr)
library(knitr)
library(plyr)
library(dplyr)
library(reshape2)
hla<-read.csv(pathhla, header=T, stringsAsFactors = T,
col.names=c("subject",
"altid",
"liai",
"hla",
"a1",
"a2",
"b1",
"b2",
"c1",
"c2",
"dpa1",
"dpa2",
"dpb11",
"dpb12",
"dqa11",
"dqa12",
"dqb11",
"dqb12",
"drb11",
"drb12",
"drb31",
"drb32",
"drb41",
"drb42",
"drb51",
"drb52"))
### tabulate allele frequencies
### default options ignore NA missing
#HLAtable<-table(hla$a1)
#############################################################################
#flip the dataset HLA to make occurring alleles the variables and 0/1 response per subject depending on whether the allele was present for HLA_A
#HLA_a1<-dcast(hla, subject~a1,length)
#HLA_a2<-dcast(hla, subject~a2,length)
### alleles in common within A_allele1 and A_allele2
#common<-intersect (names(HLA_a1), names(HLA_a2))
## drop subject and Var.2 from the list of common variables names (interested in allele names only)
# common<-common [-c(1:2)]
# HLA_A<-merge(HLA_a1, HLA_a2, by=c("subject"))
# for (variable in common){
# # Create a summed variable
# HLA_A[[variable]] <- HLA_A %>% select(starts_with(paste0(variable,"."))) %>% rowSums()
# # Delete columns with .x and .y suffixes
# HLA_A <- HLA_A %>% select(-one_of(c(paste0(variable,".x"), paste0(variable,".y"))))
# }
# ### drop var.2.x and var.2.y
# drops <- c("Var.2.x","Var.2.y")
# HLA_A<-HLA_A[ , !(names(HLA_A) %in% drops)]
#####################################################################################
####### two functions = one to accommmodate blank alleles for HLA_A, _B, _C
####### one for alleles that have no subjects with blanks for allele1 and allele2
#### function for no blanks
flipandsum<-function (dframe,var1,var2){
df1<-dcast(dframe, subject~var1,length)
df2<-dcast(dframe, subject~var2,length)
common<-intersect (names(df1), names(df2))
common<-common [-c(1)]
df<-merge(df1, df2, by=c("subject"))
for (variable in common){
df[[variable]] <- df %>% select(starts_with(paste0(variable,"."))) %>% rowSums()
df <- df %>% select(-one_of(c(paste0(variable,".x"), paste0(variable,".y"))))
}
return (df)
}
#### function to accommodate blanks
flipandsum2<-function (dframe,var1,var2){
df1<-dcast(dframe, subject~var1,length)
df2<-dcast(dframe, subject~var2,length)
common<-intersect (names(df1), names(df2))
##### this line different in the two functions, need to get rid of var.2 if there are blanks
common<-common [-c(1:2)]
df<-merge(df1, df2, by=c("subject"))
for (variable in common){
df[[variable]] <- df %>% select(starts_with(paste0(variable,"."))) %>% rowSums()
df <- df %>% select(-one_of(c(paste0(variable,".x"), paste0(variable,".y"))))
}
drops <- c("Var.2.x","Var.2.y")
df<-df[ , !(names(df) %in% drops)]
return (df)
}
#################################################################################
HLA_A<-flipandsum2(hla, hla$a1, hla$a2)
HLA_B<-flipandsum2(hla, hla$b1, hla$b2)
HLA_C<-flipandsum(hla, hla$c1, hla$c2)
HLA_DPB1<-flipandsum(hla, hla$dpb11, hla$dpb12)
HLA_DRB1<-flipandsum(hla, hla$drb11, hla$drb12)
HLA_DQA1<-flipandsum2(hla, hla$dqa11, hla$dqa12)
HLA_DQB1<-flipandsum2(hla, hla$dqb11, hla$dqb12)
HLA_DRB3<-flipandsum2(hla, hla$drb31, hla$drb32)
HLA_DRB4<-flipandsum2(hla, hla$drb41, hla$drb42)
HLA_DRB5<-flipandsum2(hla, hla$drb51, hla$drb52)
# merge all separate HLA-type dataframes then remove individuals
HLAcomplete<-Reduce(function(x,y) merge(x,y, by="subject"), list(HLA_A,HLA_B, HLA_C, HLA_DPB1, HLA_DQA1, HLA_DQB1, HLA_DRB1, HLA_DRB3, HLA_DRB4, HLA_DRB5))
rm(HLA_A, HLA_B, HLA_C,HLA_DPB1, HLA_DRB1, HLA_DQA1, HLA_DQB1, HLA_DRB3,HLA_DRB4, HLA_DRB5)
|
275141da4985995ee453e99c378143d481fe342a
|
3563022b6230e672b97e7cfdbfab8a8ec75b76fe
|
/hart_island_analysis.R
|
5761f0250faf71ed850c683d131d390c2f120205
|
[] |
no_license
|
NewYorkCityCouncil/hart_island_geocoder
|
dd0e1a182f0b91c5b9f588c23efd0d08608651a0
|
93883e71381549b7bb174a6306c4789838aa3f6f
|
refs/heads/master
| 2021-06-10T14:08:57.074354
| 2021-05-03T17:43:58
| 2021-05-03T17:43:58
| 182,321,439
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,756
|
r
|
hart_island_analysis.R
|
library(tidyverse)
library(janitor)
library(ggmap)
library(leaflet)
library(sf)
library(plotly)
#pull in csvs again
df2 <- read_csv('hart_island_burial_records_geocoded.csv')
df2 <- df2[!is.na(df2$lon),] %>%
st_as_sf(coords = c("lon", "lat")) %>% st_set_crs(4326)
#pull in nyc shapefile
nyc_shape <- read_sf('Borough Boundaries/geo_export_f1dab35b-7e61-450a-9e6d-05e61a66f3ff.shp') %>%
st_transform(st_crs(df2))
#only points in the 5 boroughs. This will remove approximately 2% of the data
df3 <- df2 %>%
filter(st_contains(st_union(st_geometry(nyc_shape)), df2, sparse = FALSE))
#read date column to perform temporal analysis
df3$death_date <- as.Date(df3$death_date, "%m/%d/%Y")
#add in a new frame of all PUBLIC hospitals/facilities
public <- read_csv('~/Documents/hart_island_geocode/Health_and_Hospitals_Corporation__HHC__Facilities.csv') %>%
clean_names()
#pull only necessary columns
public_simplified <- dplyr::select(public, 1,3)
#create a new column that simply states public
col_public <- rep("public",length(public$facility_type))
#attach new column to public dataframe
public_simplified <- public_simplified %>%
add_column(col_public)
#change facility name to uppercase to match on Hart Island dataset
public_simplified$facility_name <- toupper(public_simplified$facility_name)
#map public simplified across HI df3
df3$fac_type <- with(df3, public_simplified$col_public[match(place_of_death, public_simplified$facility_name)])
#fill NA's with 'private' label
df3$fac_type[is.na(df3$fac_type)] <- 'private'
#add year column
df3$year <- format(df3$death_date, '%Y')
#understand distribution of nulls and of data as a whole
aggregate(age ~ year, data=df3, function(x) {sum(is.na(x))}, na.action = NULL)
table(df3$year)
#remove data before 1978 - too inconsistently reported
#remove unrealistic ages
df3 <- df3 %>%
filter(year >= 1978 & year <= 2016) %>%
filter(age < 115)
write_csv(df3, 'hart_island_updated_data.csv')
#new dataframe where names are corrected so that there's no overlap of points
#that have the same location but different names
df4 <- df3 %>%
mutate(group_var = paste(round(st_coordinates(geometry)[,1], 5), round(st_coordinates(geometry)[,2], 5))) %>%
group_by(group_var) %>%
count(place_of_death) %>%
arrange(desc(n)) %>%
filter(row_number() == 1) %>%
dplyr::select(-n, -group_var) %>%
st_join(x = df3,y=., left = TRUE, suffix = c("_new", "_old"))
#remove extras, rename _new
df4 <- df4[ , -which(names(df4) %in% c("group_var","place_of_death_old"))]
names(df4)[names(df4) == 'place_of_death_new'] <- 'place_of_death'
#geometry to coordinates, to eliminate issues when pulling distinct
# latlon <- st_coordinates(df4)
# df4 <- cbind(df4,latlon)
# df4$geometry <- NULL
#
# #distinct
# df4 <- distinct(df4) %>%
# drop_na(place_of_death, year)
write.csv(df4, 'full_data_hi.csv')
#make new dataframe with unique location and count of occurences per location
#df5 <- as.data.frame(table(df4$place_of_death, df4$year), stringsAsFactors = FALSE)
df5 <- df4 %>%
# st_as_sf(coords = c("Y", "X")) %>%
count(place_of_death, year) %>%
drop_na(place_of_death, year)
names(df5)[names(df5) == 'n'] <- 'count'
#add range
df5$range <- cut(as.numeric(df5$year), (0:11*4)+1977)
# df5 <- df5 %>% left_join(df4, by = c(""))
df5$range <- gsub(".*([0-9]{4}).*([0-9]{4}).*", '\\1-\\2', df5$range)
#join public hospitals df to the HI df so we can get a sense of which of those facilities are public
df5_combined <- df5 %>%
left_join(distinct(public_simplified, facility_name, .keep_all = TRUE), by = c('place_of_death' = 'facility_name'))
#if the facility type column is NA, it's a private hospital, facility or residence
# df5_combined$facility_type[is.na(df5_combined$facility_type)] <- 'private facility'
df5_combined$col_public[is.na(df5_combined$col_public)] <- 'Private Hospital'
df5_combined$facility_type <- NULL
#replace private with residential/other, nursing facility based on strings
df5_combined$col_public[!str_detect(tolower(df5_combined$place_of_death), 'hospital')] <- 'Residential/Other'
df5_combined$col_public[str_detect(tolower(df5_combined$place_of_death), 'nurs')] <- 'Nursing Facility'
df5_combined$col_public[str_detect(tolower(df5_combined$place_of_death), 'medical')] <- 'Private Hospital'
df5_combined$col_public[str_detect(tolower(df5_combined$col_public), 'public')] <- 'Public Hospital'
#grab only distinct columns, so there's only one instance per place of death and year
df5_combined <- distinct(df5_combined) %>%
drop_na(place_of_death, year)
#convert back to sf
# map_data <- st_as_sf(df5_combined, coords = c('X','Y'), crs =4326)
map_data <- st_as_sf(df5_combined) %>%
mutate(place_of_death = case_when(place_of_death == "BRONX MUNICIPAL HOSPITAL CENTER" ~ "JACOBI MEDICAL CENTER",
TRUE ~ place_of_death)) %>%
group_by(range, place_of_death, col_public) %>%
summarize(count = sum(count))
library(htmltools)
library(htmlwidgets)
map_data <- map_data[order(map_data$range),]
# pal <- colorFactor(c('#12B886', "#BE4BDB", '#228AE6', "#F59F00"), domain = unique(map_data$col_public))
pal <- colorFactor(c('#12B886', "#FF0000", '#BE4BDB', "#000080"), domain = unique(map_data$col_public))
#pal <- colorFactor(councildown::nycc_pal()(4), domain = unique(map_data$col_public))
#c("Residential/Other", "Private Hospital", "Nursing Facility", "Public Hospital"
map <- map_data %>%
leaflet() %>%
addProviderTiles("CartoDB.Positron") %>%
addCircleMarkers(fill = TRUE, fillOpacity = .6, stroke = FALSE,
popup = ~place_of_death, radius = ~sqrt(count),
color = ~pal(col_public),
group = ~range) %>%
addLayersControl(baseGroups = ~unique(range), position = 'topright',
options = layersControlOptions(collapsed = FALSE)) %>%
addLegend(values = ~col_public, pal = pal, position = 'bottomright', title = "Hart Island: Place of Death") %>%
setView(-73.88099670410158,40.72540497175607, zoom = 10.5)
map
tst <- map_data %>%
group_by(range, place_of_death, col_public) %>%
summarize(count = sum(count)) %>%
filter(str_detect(place_of_death, "JACOBI"), range == "1977-1981") %>%
st_geometry()
tst2 <- map_data %>%
group_by(range, place_of_death, col_public) %>%
summarize(count = sum(count))
tst2 %>%
ungroup() %>%
mutate(dist = st_distance(tst2, tst, sparse = FALSE) %>% as.numeric()) %>%
arrange(dist)
tst2[st_nearest_points(tst, tst2),]
map <-leaflet(map_data) %>%
addProviderTiles("CartoDB.Positron") %>%
#addMarkers(
#clusterOptions = markerClusterOptions())
addCircleMarkers(
fill = TRUE, fillOpacity = .45, stroke = FALSE,
popup = ~place_of_death, radius = ~sqrt(count),
color = ~pal(col_public),
group = ~range)
map
mapno2 <- leaflet() %>%
addTiles()
# %>%
# addLayersControl(~range)
# add leaflet-timeline as a dependency
# to get the js and css
mapno2$dependencies[[length(mapno2$dependencies)+1]] <- htmlDependency(
name = "leaflet-timeline",
version = "1.0.0",
src = c("href" = "http://skeate.github.io/Leaflet.timeline/"),
script = "javascripts/leaflet.timeline.js",
stylesheet = "stylesheets/leaflet.timeline.css"
)
mapno2 %>%
onRender(sprintf(
'
function(el,x){
var power_data = %s;
var timeline = L.timeline(power_data, {
pointToLayer: function(data, latlng){
var hue_min = 120;
var hue_max = 0;
var hue = hue_min;
return L.circleMarker(latlng, {
radius: 10,
color: "hsl("+hue+", 100%%, 50%%)",
fillColor: "hsl("+hue+", 100%%, 50%%)"
});
},
steps: 1000,
duration: 10000,
showTicks: true
});
timeline.addTo(this);
}
',
map_data
))
class(df5$count)
g <- list(
scope = 'new york',
projection = list(type = 'albers usa'),
showland = TRUE,
landcolor = toRGB("gray83"),
subunitwidth = 1,
countrywidth = 1,
subunitcolor = toRGB("white"),
countrycolor = toRGB("white")
)
p <- plot_geo(df5, locationmode = 'new york', sizes = c(1, 250), zoom = 20) %>%
add_markers(
x = ~X, y = ~Y,
size = ~count, color = ~col_public, hoverinfo = "text",
text = ~paste(df5$place_of_death, "<br />", df5$count, 'deaths')
) %>%
layout(title = '2014 US city populations<br>(Click legend to toggle)', geo = g)
p
#extract to look exclusively at deaths since beginning of 2007
ten_years <- df3 %>% filter(death_date >= as.Date("2007-01-01"))
nrow(distinct(df5_2[c('place_of_death','year')])) - nrow(distinct(df4[c('place_of_death','year')]))
view(df4[!(duplicated(df4[c('place_of_death','year')]) | duplicated(df4[c('place_of_death','year')], fromLast = TRUE)), ])
|
8fe357c2e78f6e8bec5cd3aef3e391a1c43ba1ea
|
c6a9f08425ce0bb8220559b2633718fc1d515480
|
/man/dot-segments2df.Rd
|
388c765ce5d1d092b72e7dcceffe31e2c096f82d
|
[
"MIT"
] |
permissive
|
Pentchaff/auritus
|
bdd2d55f5c292e27b631e252d094865191551fb5
|
b743f27c09f59987cd15b83d619bec5ea462e4c5
|
refs/heads/master
| 2023-01-28T11:58:17.251265
| 2020-03-09T12:37:02
| 2020-03-09T12:37:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 321
|
rd
|
dot-segments2df.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{.segments2df}
\alias{.segments2df}
\title{Convertes segements list into a data.frame}
\usage{
.segments2df(settings)
}
\arguments{
\item{settings}{raw \code{yml} list}
}
\description{
Convertes segements list into a data.frame
}
|
7d64dd68033fbf0ed98c48310340b34542778f6b
|
a98dda9f3fa5b60200d4b07d985753b737c38ed8
|
/plot1.R
|
f84cb9352afa90f6c7bb8726584a3f0a8556e8de
|
[] |
no_license
|
mccajr2/ExData_Plotting1
|
1329560cdf923c8a3bc67830f9c7412c4dda9971
|
4ab30fbc3f3534d5b1224d3748c7042c6e558340
|
refs/heads/master
| 2021-01-12T21:37:31.453155
| 2015-10-07T20:33:22
| 2015-10-07T20:33:22
| 43,839,420
| 0
| 0
| null | 2015-10-07T19:19:53
| 2015-10-07T19:19:52
| null |
UTF-8
|
R
| false
| false
| 785
|
r
|
plot1.R
|
# Exploratory Data Analysis (exdata-033) Course Project 1
# Histogram of Global_active_power values
plot1 <- function() {
# Load in data subset
source("subsetdata.R")
dataSubset <- subsetdata()
# Define png device
png(filename = "plot1.png", height=480, width=480)
hist(dataSubset$Global_active_power, # Data for histogram
main = "Global Active Power", # Main title
xlab = "Global Active Power (kilowatts)", # x-axis label
ylab = "Frequency", # y-axis label
col = "red") # Histogtam column fill color
# Close device
dev.off()
}
|
73996da160676bd4de090502555f1524e050bb59
|
1a737bd57821c38604c320f9bae5939944e9e312
|
/analysis.R
|
5aa40ea886aa20e37354a353eca9774a3c9d32ae
|
[] |
no_license
|
mdingemanse/huh
|
98fd7675a12c34b7970f0ab39d7347a9b4fad319
|
5d3d016c3473389082e13cbc2b2b4797d4422978
|
refs/heads/master
| 2021-01-18T12:56:37.638296
| 2017-08-16T13:49:40
| 2017-08-16T13:49:40
| 100,368,303
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,187
|
r
|
analysis.R
|
# Huh project analysis and plots
# Mark Dingemanse 2012-13
# ------------------------------
# Preliminaries -----------------------------------------------------------
# check for /in/ and /out/ directories (create them if needed)
add_working_dir <- function(x) { if(file.exists(x)) { cat(x,"dir:",paste0(getwd(),"/",x,"/")) } else { dir.create(paste0(getwd(),"/",x))
cat("subdirectory",x,"created in",getwd()) } }
add_working_dir("in")
add_working_dir("out")
# Packages and useful functions
list.of.packages <- c("tidyverse","ggthemes","GGally","directlabels","reshape2")
new.packages <- list.of.packages[!(list.of.packages %in% installed.packages()[,"Package"])]
if(length(new.packages)>0) install.packages(new.packages)
lapply(list.of.packages, require, character.only=T)
rm(list.of.packages,new.packages)
# Load data ---------------------------------------------------------------
d = read.csv('in/data.txt', sep=" ")
# Select languages for which we have =<10 data points
numcases <- plyr::count(d,'language')
plus10 <- as.vector(numcases[which(numcases$freq > 9),]$language)
plus15 <- as.vector(numcases[which(numcases$freq > 14),]$language)
d.s <- droplevels(d[d$language %in% plus10,])
cases <- plyr::count(d.s,'language')
# Standard deviation of cases/language
sd(cases[,2])
# Visualisations ----------------------------------------------------------
# Build a ggplot theme and set it
windowsFonts(GillSans=windowsFont("Gill Sans MT"))
theme_md <- function (ticks=TRUE, base_family="GillSans", base_size=18) {
ret <- theme_grey(base_family=base_family, base_size=base_size) +
theme(
axis.title.x = element_text(vjust=0),
axis.title.y = element_text(vjust=0.2),
plot.margin = unit(c(0, 0, 0.5, 0.5), "lines"),
legend.title = element_text(colour="grey", face = "plain")
)
if (!ticks) {
ret <- ret + theme(axis.ticks = element_blank())
}
ret
}
theme_set(theme_md(base_size=18))
# Vowel quality -----------------------------------------------------------
# Create table of vowels by language
v = d.s[ , c("language", "height", "front")]
# Get rid of entries with NA values
v <- v[!is.na(v$height), ]
# draw every language at its mean front/height
v.langs <- v %>%
group_by(language) %>%
summarise(height=mean(height),front=mean(front)) %>%
mutate(height = 3-height)
# v.langs$height = 3 - v.langs$height # invert value so that axis doesn't need to be reversed
languages <- levels(v.langs$language)
# abbreviate the languages to their first letters
languages <- substr(levels(v.langs$language),1,3)
# break height into Low/Mid/Heigh to match individual Lg plots
ggplot(v.langs, aes(height, front, label=languages)) +
ylab("Height") +
xlab("Backness") +
coord_cartesian(ylim=c(-.5,6.5),xlim=c(-.5,6.5)) +
scale_y_continuous(breaks=seq(0,6,3), labels=c("Low","Mid","High")) +
scale_x_continuous(breaks=seq(0,6,3), labels=c("Front","Central","Back")) +
scale_colour_manual(values=brewer.pal(10,"Paired")) +
geom_dl(aes(label=languages),method=list("smart.grid",fontface="bold",cex=1))
ggsave(file="out/Figure_2_Languages_in_vowel_space.png", width=6.5,height=6)
# front/height plots by language with some jitter and alpha
cases <- sum(unlist(table(v$height)))
ggplot(v, aes(height, front)) +
geom_point(alpha=5/10,size=5,position = position_jitter(width = .2, height = .2)) +
ylab("Height") +
xlab("Backness") +
coord_cartesian(ylim=c(-.5,3.5),xlim=c(-.5,3.5)) +
scale_y_discrete(breaks=seq(0,3,1), labels=c("Low","","","Mid")) +
scale_x_reverse(breaks=seq(0,3,1), labels=c("Central","","","Front")) +
theme(panel.grid.minor=element_blank(),legend.position = "none") +
facet_wrap(~language)
ggsave(file=paste("out/Figure_3_Vowel_quality_n",cases,".png", sep=""), width=9,height=6)
# Formants ----------------------------------------------------------------
# Formant data for Spanish and Cha'palaa
formants = read.delim('in/formantData.txt')
formants$lg <- substr(formants$language, 1,1)
ggplot(formants, aes(f2, f1)) +
geom_point(size=4,alpha=0) +
coord_cartesian(xlim=c(2500,900),ylim=c(1000,250)) +
scale_x_reverse() +
scale_y_reverse() +
ylab("F1 (Hz)") +
xlab("F2 (Hz)") +
geom_dl(aes(label=formants$lg),method=list(fontface="bold",cex=1))
ggsave(file="out/Figure_4_Formants.png", width=6.5,height=6)
# Intonation --------------------------------------------------------------
# Create table of intonation by language
i = d.s[ , c("language", "int")]
# order data by relative frequency of a level of a factor
i.o <- transform(i, language = ordered(language, levels = names( sort(prop.table(table(i$int, i$language),2)[1,]))))
# remake product plot using ggplot / ggmosaic
i.f <- i %>%
drop_na(int) %>%
group_by(language,int) %>%
mutate(freq=n()) %>%
group_by(language) %>%
mutate(prop=freq/n()) %>%
mutate(intonation = int) %>%
distinct()
ggplot(i.f,aes(language,intonation,size=prop)) +
ylab("Intonation") +
xlab("") +
scale_size(range=c(1,12)) +
geom_point(shape=15) + guides(size=F) +
scale_y_continuous(minor_breaks=NULL,breaks=seq(-3,3,1), labels=c("Falling","","","Level","","","Rising")) +
theme(axis.text.x = element_text(angle=45, hjust=1))
ggsave(file=paste("out/Figure_5_Intonation_n",cases,".png", sep=""), width=6,height=6)
# Intonation: pitch tracks ------------------------------------------------
# load files into list, melt list
files <- list.files(path="in/pitch_data",pattern=".pitch")
filenames <- paste0("in/pitch_data/",files)
pitch <- lapply(filenames, read.delim, header = TRUE, comment = "A")
names(pitch) <- gsub(".pitch","",files)
pitch <- melt(pitch, id=c('time','st'))
pitch$L1 <- as.factor(pitch$L1)
# Add language column
pitch$language <- NA
pitch[grep("Sp",pitch$L1),]$language <- "Spanish"
pitch[grep("Ch",pitch$L1),]$language <- "Chapalaa"
pitch$language <- as.factor(pitch$language)
# Compute means and normalised semitones
d.m <- pitch %>%
group_by(L1) %>%
mutate(mean_st = mean(st)) %>%
mutate(norm_st = st - mean_st)
# Plot
ggplot(d.m, aes(time,norm_st,fill=L1)) +
geom_line(size = 0.6) +
xlab("Normalised time") +
ylab("Centered pitch (semitones)") +
coord_cartesian(ylim=c(-10,10)) +
scale_x_continuous(breaks=c(0,.2,.4,.6,.8,1.0)) +
facet_grid(. ~ language)
ggsave(file="out/Figure_6_Pitch_tracks.png", width=6,height=3.5)
# Onset -------------------------------------------------------------------
# Combined onset data, from glottal stop to zero to aspiration
onset = d.s[ , c("language", "onset")]
cases <- sum(unlist(table(d.s$onset)))
o.f <- onset %>%
drop_na(onset) %>%
group_by(language,onset) %>%
mutate(freq=n()) %>%
group_by(language) %>%
mutate(prop=freq/n()) %>%
distinct()
ggplot(o.f,aes(language,onset,size=prop)) +
scale_size(range=c(1,12)) +
geom_point(shape=15) + guides(size=F) +
ylab("Onset") +
xlab("") +
scale_y_continuous(minor_breaks=NULL,breaks=seq(-3,3,1), labels=c("Glottal stop","","","Ø","","","Aspiration")) +
theme(axis.text.x = element_text(angle=45, hjust=1))
ggsave(file=paste("out/Figure_7_Onset_n",cases,".png", sep=""), width=9,height=6)
# Nasality ----------------------------------------------------------------
nasal = d.s[ , c("language","nasal")]
cases <- sum(unlist(table(d.s$nasal)))
n.f <- nasal %>%
drop_na(nasal) %>%
group_by(language,nasal) %>%
mutate(freq=n()) %>%
group_by(language) %>%
mutate(prop=freq/n()) %>%
distinct()
ggplot(n.f,aes(language,nasal,size=prop)) +
scale_size(range=c(1,12)) +
geom_point(shape=15) + guides(size=F) +
ylab("Nasalisation") +
xlab("") +
scale_y_continuous(minor_breaks=NULL,breaks=seq(1,3,1), labels=c("Oral","","Nasal")) +
theme(axis.text.x = element_text(angle=45, hjust=1))
ggsave(file=paste("out/Figure_S1_Nasality_n",cases,".png", sep=""), width=9,height=6)
# Mouth closure (aperture) ------------------------------------------------
mouth = d.s[ , c("language", "mouth")]
cases <- sum(unlist(table(d.s$mouth)))
m.f <- mouth %>%
drop_na(mouth) %>%
group_by(language,mouth) %>%
mutate(freq=n()) %>%
group_by(language) %>%
mutate(prop=freq/n()) %>%
distinct()
ggplot(m.f,aes(language,mouth,size=prop)) +
scale_size(range=c(1,12)) +
geom_point(shape=15) + guides(size=F) +
ylab("Mouth closure") +
xlab("") +
scale_y_continuous(minor_breaks=NULL,breaks=seq(-4,2), labels=c("Closed","","","Intermediate", "","", "Open")) +
theme(axis.text.x = element_text(angle=45, hjust=1))
ggsave(file=paste("out/Figure_S2_Closure_n",cases,".png", sep=""), width=9,height=6)
# Onset aspiration --------------------------------------------------------
# N.B. zero in this plot means coded as zero, not coded as "g" (which is also "not asp").
asp = d.s[ , c("language", "asp")]
cases <- sum(unlist(table(d.s$asp)))
a.f <- asp %>%
drop_na(asp) %>%
group_by(language,asp) %>%
mutate(freq=n()) %>%
group_by(language) %>%
mutate(prop=freq/n()) %>%
distinct()
ggplot(a.f,aes(language,asp,size=prop)) +
scale_size(range=c(1,12)) +
geom_point(shape=15) + guides(size=F) +
ylab("Aspiration at onset") +
xlab("") +
scale_y_continuous(minor_breaks=NULL,breaks=c(0:3), labels=c("Ø","","","h")) +
theme(axis.text.x = element_text(angle=45, hjust=1))
#ggsave(file=paste("out/Onset aspiration - n",cases,".png", sep=""), width=9,height=6)
# Onset: glottis action ---------------------------------------------------
# N.B. zero in this plot means coded as zero, not coded as "h" (which is also "not glot").
glot = d.s[ , c("language", "glot")]
cases <- sum(unlist(table(d.s$glot)))
g.f <- glot %>%
drop_na(glot) %>%
group_by(language,glot) %>%
mutate(freq=n()) %>%
group_by(language) %>%
mutate(prop=freq/n()) %>%
distinct()
ggplot(g.f,aes(language,glot,size=prop)) +
scale_size(range=c(1,12)) +
geom_point(shape=15) + guides(size=F) + ylab("Glottal stop onset") +
xlab("") +
theme(axis.text.y = element_text(family="Times New Roman")) +
scale_y_continuous(minor_breaks=NULL,breaks=c(0:3), labels=c("Ø", "","", "ʔ")) +
theme(axis.text.x = element_text(angle=45, hjust=1))
#ggsave(file=paste("out/Onset glottal - n",cases,".png", sep=""), width=9,height=6)
|
9b20226c7411f0323a33616a865a42a420ae68b3
|
10867272feb49d6de65b0ff8ee29c07994268e2a
|
/Q15.R
|
ecaa37fce4b7864cf0f508b6cd06e527d87f80f1
|
[] |
no_license
|
umairhanif00/R-Assignment-2
|
8d47e93176c5187c43857741bb3ed3ec793efd5a
|
36d78223433f50c57fb2724270144e4ee307c3d5
|
refs/heads/master
| 2021-01-23T04:39:44.261903
| 2017-03-27T11:33:47
| 2017-03-27T11:33:47
| 86,233,545
| 0
| 0
| null | 2017-03-26T13:12:26
| 2017-03-26T13:12:26
| null |
UTF-8
|
R
| false
| false
| 276
|
r
|
Q15.R
|
#Question 15
dataf$TotalCharges<-as.numeric(as.character(dataf$TotalCharges))
consult_amount <- dataf%>%
select(Procedure,TotalCharges) %>%
group_by(Procedure) %>%
filter(Procedure == 'Consultation') %>%
summarise(sum(TotalCharges,na.rm=TRUE)) %>%
print
|
061a30a30b25fa52073efd8fbacb4fa6c4f24d81
|
1475700f8fe61cb8b8ebaad5788e67ad75a15ce5
|
/man/e_mapa_3D.Rd
|
020a7af4c7e62dd7c79d8143a9266a5778fc28e9
|
[] |
no_license
|
PROMiDAT/discoveR
|
ffc0607935867b124a7c4d724e18513353f3a9e2
|
7689cee7fa2f5425171c8c9e5e714462b17c810f
|
refs/heads/master
| 2023-03-21T01:49:33.226457
| 2023-02-28T20:50:13
| 2023-02-28T20:50:13
| 160,723,382
| 3
| 1
| null | 2021-01-12T19:50:46
| 2018-12-06T19:42:38
|
R
|
UTF-8
|
R
| false
| true
| 934
|
rd
|
e_mapa_3D.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mod_cluster_utils.R
\name{e_mapa_3D}
\alias{e_mapa_3D}
\title{PCA plot of individuals colored by clusters}
\usage{
e_mapa_3D(pca.model, clusters, colores = NULL, ejes = c(1, 2, 3), etq = F)
}
\arguments{
\item{pca.model}{an object of class PCA [FactoMineR].}
\item{clusters}{a vector specifying the cluster of each individual.}
\item{colores}{a vector of color for each cluster.}
\item{ejes}{a numeric vector of length 3 specifying the dimensions to be plotted.}
\item{etq}{a boolean, whether to add label to graph or not.}
}
\value{
echarts4r plot
}
\description{
PCA plot of individuals colored by clusters
}
\examples{
p <- FactoMineR::PCA(iris[, -5], graph = FALSE)
clusters <- factor(kmeans(iris[, -5], 3)$cluster)
e_mapa_3D(p, clusters, c("steelblue", "pink", "forestgreen"), etq = FALSE)
}
\author{
Diego Jimenez <diego.jimenez@promidat.com>
}
|
30160f4a6259db9c51118d7b0bfa6829c621b046
|
0b4c74473a3e93685f1edd5616fb656755e6ad51
|
/R/Analysis/revLetterExtra.R
|
35524ceb656a2fa19edfc3b9061dcb5bb5689abc
|
[] |
no_license
|
s7minhas/Rhodium
|
f6dd805ce99d7af50dd10c2d7f6a0eeae51f3239
|
0627acabab9e5302e13844de2c117325cdf0f7ea
|
refs/heads/master
| 2020-04-10T03:59:46.069362
| 2016-11-02T20:21:57
| 2016-11-02T20:21:57
| 8,080,455
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,920
|
r
|
revLetterExtra.R
|
if(Sys.info()["user"]=="janus829" | Sys.info()["user"]=="s7m"){
source('~/Research/Rhodium/R/setup.R')}
if(Sys.info()["user"]=="Ben"){source('/Users/Ben/Github/Rhodium/R/setup.R')}
# Load conflict country year data
setwd(pathData)
# load('fullData.rda')
# modData = fullData # For full panel extensions
load('combinedData.rda')
modData=yData # For country-conflict-year extensions
# Gen tikz?
genTikz=FALSE
# CREATE APPROPRIATE VARIABLES FOR REGRESSIONS
###################################################################
# Log transforming DVs
modData$lngdp_l0 = log(modData$gdp_l0)
modData$lngdp = log(modData$gdp)
modData$lngdpGr_l0 = (modData$lngdp_l0-modData$lngdp)/modData$lngdp_l0
modData$lngdpGr_l0 = modData$gdpGr_l0
# Transformations for other controls
modData$lngdpCap = log(modData$gdpCap)
modData$lninflation_l1 = logTrans(modData$inflation_l1)
modData$democ = as.numeric(modData$polity2>=6)
modData$polity2 = modData$polity2 + 11
###################################################################
###################################################################
# R1: low GDP correlateds with periods of no conflict
# T-test between GDP and civwar
## we use logged measure of GDP to deal with outliers
# Across full panel
with(modData, t.test(lngdp~civwar, alternative='two.sided') )
###################################################################
###################################################################
# Interaction wont work
set.seed(6886)
fakeData = data.frame(y = rnorm(100), bin=rbinom(100,1,0.5), x=0)
fakeData$x[fakeData$bin==1] = rnorm(sum(fakeData$bin), .23, .25)
fakeData$i = fakeData$x*fakeData$bin
###################################################################
###################################################################
# Number of t observations by country
# Transformations for conflict variables
modData$lnminDist.min <- log(modData$minDist.min+1)
modData$lncapDist.min <- log(modData$capDist.min+1)
modData$lnminDist.mean <- log(modData$minDist.mean+1)
modData$lncapDist.mean <- log(modData$capDist.mean+1)
modData$lnminDistACLED.min <- log(modData$minDistACLED.min+1)
modData$lnminDistACLED.mean <- log(modData$minDistACLED.mean+1)
modData$lncapDistACLED.min <- log(modData$acledCapDist.min+1)
modData$lncapDistACLED.mean <- log(modData$acledCapDist.mean+1)
modData$Int.max <- modData$Int.max-1
dv = 'lngdpGr_l0'
kivs = c('lnminDist.mean', 'lncapDist.mean')
cntrls = c(
'Int.max',
'durSt1max', 'confAreaProp', 'nconf',
'upperincome', 'lninflation_l1', 'polity2', 'resourceGDP', 'gdpGr.mean_l0')
vars = c(dv, kivs, cntrls, 'ccode', 'year')
slice = na.omit( modData[,vars] )
cntryCnts = table(slice$ccode)
length(cntryCnts[cntryCnts>5]) / length(cntryCnts)
length(cntryCnts[cntryCnts<=5]) / length(cntryCnts)
# ~53% of data has fewer than five observations per unit
###################################################################
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.