blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e43ca99100576ab8d917564a3155c0070a25b8bf
|
e995763e6f8d64e3c3af92da06922adacac96d3a
|
/funding_ratio_v2.R
|
efe80b2176ab6e974b3de9f5a618a940372d679b
|
[] |
no_license
|
bmpriya389/Pension_Simulator
|
718a735e05a81da0603e6a789ca663fb906e3aed
|
b05df231a78e1ea7b9c08053d8a67031563107c6
|
refs/heads/master
| 2021-01-18T21:44:52.505126
| 2016-05-26T21:50:22
| 2016-05-26T21:50:22
| 58,401,189
| 0
| 0
| null | 2016-05-09T19:07:19
| 2016-05-09T19:07:19
| null |
UTF-8
|
R
| false
| false
| 3,073
|
r
|
funding_ratio_v2.R
|
library(BayesBridge)
rm(list=ls())
source("functions.R")
###################### Load winklevoss and mortality tables #################################
load('E://SUNYALBANY/Pension_Simulation_Project/Code/Data/winklevossdata.RData')
load('E://SUNYALBANY/Pension_Simulation_Project/Code/Data/Mortality.RData')
######################### Parameters constant but likely to change ##########################
ea<-30
a_sgr<-0.05
sal=60000
sgr<-0.0568
mort<-2
afc<-5
bf<-0.02
i<-0.08
cola<-0.01
amortization<-30
vesting<-5
cm<-'EAN'
median_p<-0.854
median_dr<-0.08
median_sgr<-0.0568
pop<-rep(1,71)
afc<-5
retire<-65
get_qxt(ea,retire)
pop_type='Uniform'
pop_size=100000
ca=37
median=45
inflation=2
pop<-rep(1,71)
active<-seq(30,65)
pgr<-sgr-inflation
retirees<-seq(66,100)
ea<-30
sum(get_ARC(pop,ea,retire,median_p,median_dr,median_sgr,i,a_sgr,sgr,pgr,cola,afc,bf,cm,mort,vesting,amortization))
get_FR(pop,ea,retire,median_p,median_dr,median_sgr,i,a_sgr,sgr,cola,afc,bf,cm,mort,vesting)
get_median_asset(ea,retire,median_p,median_dr,median_sgr,a_sgr,cola,afc,bf,cm,mort,vesting)
get_nc_pop(pop,ea,retire,i,a_sgr,sgr,cola,afc,bf,cm,mort,vesting)
get_NC(ea,retire,i,a_sgr,sgr,cola,afc,bf,cm,mort,vesting)
active<-rep(1,36)
retirees<-rep(1,71)
pgr<-3.68
get_stat(ea,retire,active,retirees,i,a_sgr,sgr,cola,afc,bf,cm,mort,vesting)
get_aal_pop(pop,ea,retire,i,a_sgr,sgr,cola,afc,bf,cm,mort,vesting)
get_AAL(ea,retire,i,a_sgr,sgr,cola,afc,bf,cm,mort,vesting)
get_PVTC_t(ea,retire,i,a_sgr,sgr,cola,afc,bf,mort,vesting)
get_PVTC(ea,retire,i,a_sgr,sgr,cola,afc,bf,mort,vesting,a)
get_term_cost(ea,retire,i,a_sgr,sgr,cola,afc,bf,mort,vesting)
get_vesting_cost(ea,retire,i,a_sgr,cola,afc,bf,mort,vesting)
get_gxv(ea,retire,vesting)
get_xPVFBr(ea,retire,i,a_sgr,sgr,cola,afc,bf,mort)
get_rPVFBx(ea,retire,i,a_sgr,sgr,cola,afc,bf,mort)
get_a_after_r(ea,retire,i,cola,mort)
get_tla_t(ea,retire,i,sgr,mort)
get_tla(ea,retire,i,sgr,mort,a)
get_am(ea,retire,i,amortization)
get_ar(ea,retire,i,cola,mort)
get_replacement_rate(ea,retire,a_sgr,sgr,afc,bf)
get_sal_ca(ea,retire,ca,sal,inflation,sgr)
get_sal_vector_ea(ea,retire,sal,inflation,sgr)
get_rxpxT(ea,retire,mort)
get_xpxT(ea,retire,mort)
get_vxr(ea,retire,i)
get_vrx(ea,retire,i)
get_acc_benefit_65(ea,retire,a_sgr,sgr,afc,bf)
afc<-10
get_acc_benefit_r(ea,retire,a_sgr,sgr,afc,bf)
get_acc_benefits(ea,retire,a_sgr,afc,bf)
get_xpmr(ea,retire,mort)
get_rpmx(ea,retire,mort)
get_xpmx(ea,retire,mort)
get_mort(ea,retire,mort)
get_exp_sal_r(ea,retire,a_sgr,sgr)
get_acc_sal(ea,retire,a_sgr)
get_exp_sal(ea,retire,sgr)
get_act_sal(ea,retire,a_sgr)
3.5105/5.516
get_a_after_r(30,65,0.08,0.02,2)
get_act_sal(30,65,0.056)[length(get_act_sal(30,65,0.056))]
population_retirees(ea,retire,mort,population)
population=generate_pop(ea,retire,pop_type,pop_size,median)
age_grp_labels(ea,retire,age_limit)
age_limit=70
age_information(ea,retire)
|
9c2781c79468b04c3d057037d9a03816e98063d8
|
8ad7ba2d480b741f6dc0fbc8bfa1cd0852f92571
|
/frailty_20160823.R
|
1b24f7ffddcfa69ba4b3ad092dd3a2c7c72f1f66
|
[] |
no_license
|
ElisabethDahlqwist/Frailty
|
8cc2161f1a62b86ed4a1af7f978b5de30f51f773
|
a8ed550b26a7bf172435bd9ee69e7155b7cb693f
|
refs/heads/master
| 2020-04-06T07:09:09.803119
| 2016-08-25T12:05:42
| 2016-08-25T12:05:42
| 65,536,161
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 13,789
|
r
|
frailty_20160823.R
|
rm(list=ls())
library(data.table)
#library(tictoc)
library(numDeriv)
library(survival)
#---DATA GENERATION---
n=20
m=2
eta.true=1
alpha0.true=1.8
theta.true=0.4
beta1.true=1.3
beta2.true=2
beta3.true=0.5
par.true=c(alpha0.true,eta.true,theta.true,beta1.true, beta2.true, beta3.true)
id=rep(1:n,each=m)
u=rep(rgamma(n,shape=1/theta.true,scale=theta.true),each=m)
x=rnorm(n*m)
z=rnorm(n*m)
alpha=alpha0.true/(u*exp(beta1.true*x + beta2.true*z + beta3.true*z*x))^(1/eta.true)
t=rweibull(n*m,shape=eta.true,scale=alpha)
#right-censoring
#c <- rep(Inf,n*m)
c <- runif(n*m,0,10)
delta <- as.numeric(t<c)
t <- pmin(t,c)
#strong left-truncation
#tstar <- rep(0,n*m)
tstar <- runif(n*m,0,2)
incl <- t>tstar
incl <- ave(x=incl,id,FUN=sum)==m
id <- id[incl]
x <- x[incl]
z <- z[incl]
t <- t[incl]
delta <- delta[incl]
tstar <- tstar[incl]
data <- data.frame(t, tstar, delta, x, z, id)
d <- as.vector(tapply(delta,id,FUN=sum))
D <- max(d)
j <- 0:(D-1)
#---LIKELIHOOD FUNCTION---
logp <- c(log(alpha0.true),log(eta.true),log(theta.true),beta1.true, beta2.true, beta3.true)
formula=Surv(tstar, t, delta) ~ x + z + z*x
clusterid<-"id"
frailty <- function(formula, data, logp, clusterid){
call <- match.call()
## Defining input arguments
X <- as.matrix(model.matrix(formula, data)[, -1])
clusterid <- data[, clusterid]
n <- nrow(X)
ncluster <- length(unique(clusterid))
nbeta <- length(attr(terms(formula), "term.labels"))
npar <- 3 + length(attr(terms(formula), "term.labels"))
if(missing(logp)) logp <- c(rep(0, npar))
tstar <- data[, as.character(terms(formula)[[2]][2])]
t <- data[, as.character(terms(formula)[[2]][3])]
delta <- data[, as.character(terms(formula)[[2]][4])]
d <- as.vector(tapply(delta,clusterid,FUN=sum))
#### Likelihood ####
like <- function(logp){
# Defining parameter names
alpha <- exp(logp[1])
eta <- exp(logp[2])
theta <- exp(logp[3])
beta <- as.matrix(logp[4:npar])
# Constructing elements in the log-likelihood
B <- as.vector(X%*%beta)
h <- delta*log(eta*t^(eta-1)/alpha^eta*exp(B))
H <- as.vector((t/alpha)^eta*exp(B))
Hstar <- (tstar/alpha)^eta*exp(B)
temp <- data.table(h,H,Hstar)
temp <- temp[, j = lapply(.SD, sum), by = clusterid]
h <- temp$h
H <- temp$H
Hstar <- temp$Hstar
G <- d*log(theta)+cumsum(c(0,log(1/theta+j)))[d+1]
# Log-likelihood
ll <- -mean(G+h+1/theta*log(1+theta*Hstar)-(1/theta+d)*log(1+theta*H))
return(ll)
}
# The function "temp" aggregate the data table x by cluster id
temp <- function(x){
temp <- data.table(x)
temp <- as.matrix(temp[, j = lapply(.SD, sum), by = clusterid])[, -1]
}
#### Gradient ####
gradientfunc <- function(logp, score=F){
if(missing(score)) score = FALSE
alpha <- exp(logp[1])
eta <- exp(logp[2])
theta <- exp(logp[3])
beta <- as.matrix(logp[4:npar])
# Constructing elements for gradient
B <- as.vector(X%*%beta)
h.eta <- delta*(1+eta*(log(t)-log(alpha)))
h.beta <- X*delta
H <- (t/alpha)^eta*exp(B)
Hstar <- (tstar/alpha)^eta*exp(B)
H.eta <- eta*log(t/alpha)*H
Hstar.eta <- eta*log(tstar/alpha)*Hstar
Hstar.eta[tstar==0] <- 0
H.beta <- X * H
Hstar.beta <- X* Hstar
# Aggregate all elements that are sums over cluster id
h.alpha <- -d*eta
h.eta <- temp(h.eta)
h.beta <- temp(h.beta)
H <- temp(H)
Hstar <- temp(Hstar)
H.alpha <- -eta*H
H.eta <- temp(H.eta)
Hstar.eta <- temp(Hstar.eta)
H.beta <- temp(H.beta)
Hstar.beta <- temp(Hstar.beta)
Hstar.alpha <- -eta*Hstar
K <- H/(1+theta*H)
Kstar <- Hstar/(1+theta*Hstar)
G.theta <- d-cumsum(c(0,1/(1+theta*j)))[d+1]
# Second derivatives of the log-likelihood
dl.dalpha <- h.alpha+Hstar.alpha/(1+theta*Hstar)-(1+theta*d)*H.alpha/(1+theta*H)
dl.deta <- h.eta+Hstar.eta/(1+theta*Hstar)-(1+theta*d)*H.eta/(1+theta*H)
dl.dtheta <- G.theta+1/theta*(log(1+theta*H)-log(1+theta*Hstar))+Kstar-(1+d*theta)*K
dl.dbeta <- as.matrix(h.beta+Hstar.beta/(1+theta*Hstar)-(1+theta*d)*H.beta/(1+theta*H))
# Gradient for all parameters
gradient <- -c(mean(dl.dalpha), mean(dl.deta), mean(dl.dtheta), colMeans(dl.dbeta))
# Score function for all parameters and individuals
if(score == TRUE){
score <- -cbind(dl.dalpha, dl.deta, dl.dtheta, dl.dbeta)
return(score=score)
}
else {return(gradient)}
#names(gradient) <- c("logalpha","logeta","logtheta","beta")
}
#### Hessian ####
hessianfunc <- function(logp){
alpha <- exp(logp[1])
eta <- exp(logp[2])
theta <- exp(logp[3])
beta <- as.matrix(logp[4:npar])
B <- as.vector(X%*%beta)
XX <- c(X)*X[rep(1:nrow(X), nbeta), ]
h.eta <- delta*(1+eta*(log(t)-log(alpha)))
H <- (t/alpha)^eta*exp(B)
Hstar <- (tstar/alpha)^eta*exp(B)
H.eta <- eta*log(t/alpha)*H
Hstar.eta <- eta*log(tstar/alpha)*Hstar
Hstar.eta[tstar==0] <- 0
H.eta.eta <- H.eta+eta^2*(log(t/alpha))^2*H
Hstar.eta.eta <- Hstar.eta+eta^2*(log(tstar/alpha))^2*Hstar
Hstar.eta.eta[tstar==0] <- 0
H.eta.beta <- eta*log(t/alpha)*(H*X)
Hstar.eta.beta <- eta*log(tstar/alpha)*(Hstar*X)
Hstar.eta.beta[tstar==0] <- 0
H.beta <- cbind(H[rep(1:length(H), nbeta)]*XX, clusterid)
Hstar.beta <- cbind(Hstar[rep(1:length(H), nbeta)]*XX, clusterid)
# Aggregate
h.eta <- temp(h.eta)
H <- temp(H)
Hstar <- temp(Hstar)
H.eta <- temp(H.eta)
Hstar.eta <- temp(Hstar.eta)
H.eta.eta <- temp(H.eta.eta)
Hstar.eta.eta <- temp(Hstar.eta.eta)
H.eta.beta <- temp(H.eta.beta)
Hstar.eta.beta <- temp(Hstar.eta.beta)
h.alpha.alpha <- 0
h.alpha.eta <- -d*eta
h.eta.eta <- h.eta-d
H.alpha <- -eta*H
Hstar.alpha <- -eta*Hstar
H.alpha.alpha <- eta^2*H
Hstar.alpha.alpha <- eta^2*Hstar
H.alpha.eta <- -eta*(H+H.eta)
Hstar.alpha.eta <- -eta*(Hstar+Hstar.eta)
K <- H/(1+theta*H)
Kstar <- Hstar/(1+theta*Hstar)
G.theta.theta <- cumsum(c(0,theta*j/(1+theta*j)^2))[d+1]
# Hessian, derivative of gradient of alpha with respect to all parameters except beta
dl.dalpha.dalpha <- -mean(h.alpha.alpha+Hstar.alpha.alpha/(1+theta*Hstar)-
theta*(Hstar.alpha/(1+theta*Hstar))^2-
(1+theta*d)*(H.alpha.alpha/(1+theta*H)-
theta*(H.alpha/(1+theta*H))^2))
dl.dalpha.deta <- -mean(h.alpha.eta+Hstar.alpha.eta/(1+theta*Hstar)-
theta*Hstar.alpha*Hstar.eta/(1+theta*Hstar)^2-
(1+theta*d)*(H.alpha.eta/(1+theta*H)-
theta*H.alpha*H.eta/(1+theta*H)^2))
dl.dalpha.dtheta <- -mean(theta*(-Hstar.alpha*Hstar/(1+theta*Hstar)^2+
H.alpha*H/(1+theta*H)^2-d*(H.alpha/(1+theta*H)-theta*H.alpha*H/(1+theta*H)^2)))
dl.dalpha <- cbind(dl.dalpha.dalpha, dl.dalpha.deta, dl.dalpha.dtheta)
# Hessian, derivative of gradient of eta with respect to all parameters except beta
dl.deta.deta <- -mean(h.eta.eta+Hstar.eta.eta/(1+theta*Hstar)-
theta*(Hstar.eta/(1+theta*Hstar))^2-(1+theta*d)*
(H.eta.eta/(1+theta*H)-theta*(H.eta/(1+theta*H))^2))
dl.deta.dtheta <- -mean(theta*(-Hstar.eta*Hstar/(1+theta*Hstar)^2+
H.eta*H/(1+theta*H)^2-d*(H.eta/(1+theta*H)-
theta*H.eta*H/(1+theta*H)^2)))
dl.deta <- cbind(dl.dalpha.deta, dl.deta.deta, dl.deta.dtheta)
# Hessian, derivative of gradient of theta with respect to all parameters except beta
dl.dtheta.dtheta <- -mean(G.theta.theta+1/theta*(log(1+theta*Hstar)-log(1+theta*H))+
K-Kstar+theta*(K^2-Kstar^2)+d*theta*K*(theta*K-1))
dl.dtheta <- cbind(dl.dalpha.dtheta, dl.deta.dtheta, dl.dtheta.dtheta)
# Hessian, derivative of gradient of beta with respect to all parameters
H <- (t/alpha)^eta*exp(B)
Hstar <- (tstar/alpha)^eta*exp(B)
XX <- c(X)*X[rep(1:nrow(X), nbeta), ]
nbeta_rep <- rep(1:nbeta, each = nrow(X))
### Creating squared cluster sums of H.beta and Hstar.beta
H.beta <- as.matrix(temp(H * X))
H.beta2 <- H.beta[rep(1:nrow(H.beta), nbeta), ] * c(H.beta)
Hstar.beta <- as.matrix(temp(Hstar * X))
Hstar.beta2 <- Hstar.beta[rep(1:nrow(Hstar.beta), nbeta), ] * c(Hstar.beta)
### Creating Cross products of covariates multiplied with H and Hstar
Hstar.beta.beta <- data.table(nbeta_rep, clusterid, Hstar * XX)
H.beta.beta <- data.table(nbeta_rep, clusterid, H * XX)
### Aggregate H and Hstar over clusters
H <- temp(H)
Hstar <- temp(Hstar)
### Calculating Hstar2 <- theta*(sum(H*X))^2/(1+theta*sum(H))^2 and H2 <- (1+d*theta)*(sum(H*X))^2/(1+theta*H)^2
Hstar2 <- theta * Hstar.beta2 / (1 + theta * Hstar)^2
H2 <- theta * (1+d*theta) * H.beta2 / (1 + theta * H)^2
### Aggregate Hstar.beta.beta and H.beta.beta over cluster
Hstar.beta.beta <- data.table(clusterid, nbeta_rep, Hstar.beta.beta)
Hstar.beta.beta <- as.matrix(Hstar.beta.beta[, j = lapply(.SD, sum), by = .(nbeta_rep, clusterid)])[, -1:-2, drop=FALSE] # because columns are droped it is no longer a matrix
H.beta.beta <- data.table(clusterid, nbeta_rep, H.beta.beta)
H.beta.beta <- as.matrix(H.beta.beta[, j = lapply(.SD, sum), by = .(nbeta_rep, clusterid)])[, -1:-2, drop=FALSE]
### Calculate Hstar1 <- Hstar.beta.beta/(1+theta*Hstar) and H1 <- theta * (1 + d * theta)*H.beta.beta/(1+theta*H)
Hstar1 <- Hstar.beta.beta / (1 + theta * Hstar)
H1 <- (1 + d * theta) * H.beta.beta / (1 + theta * H)
dl.dbeta.dbeta <- -(Hstar.beta.beta/(1+theta*Hstar)-
theta*Hstar.beta2/(1+theta*Hstar)^2-(1+theta*d)*
(H.beta.beta/(1+theta*H)-theta*H.beta2/(1+theta*H)^2))
## Aggregate over clusters
nbeta_rep2 <- rep(1:nbeta, each = length(H))
dl.dbeta.dbeta <- data.table(nbeta_rep2, dl.dbeta.dbeta)
dl.dbeta.dbeta <- as.matrix(dl.dbeta.dbeta[, j = lapply(.SD, mean), by = .(nbeta_rep2)])[, -1]
# Derivative of gradient of alpha with respect to beta
H.alpha.beta <- -eta*H.beta
Hstar.alpha.beta <- -eta*Hstar.beta
dl.dalpha.dbeta <- -(colMeans(as.matrix(Hstar.alpha.beta/(1+theta*Hstar)-
theta*Hstar.alpha*Hstar.beta/(1+theta*Hstar)^2-
(1+theta*d)*(H.alpha.beta/(1+theta*H)-
theta*H.alpha*H.beta/(1+theta*H)^2))))
dl.dalpha <- cbind(dl.dalpha, t(dl.dalpha.dbeta))
# Derivative of gradient of eta with respect to beta
dl.deta.dbeta <- -t(colMeans(as.matrix(Hstar.eta.beta/(1+theta*Hstar)-
theta*Hstar.eta*Hstar.beta/(1+theta*Hstar)^2-
(1+theta*d)*(H.eta.beta/(1+theta*H)-
theta*H.eta*H.beta/(1+theta*H)^2))))
dl.deta <- cbind(dl.deta, dl.deta.dbeta)
# Derivative of gradient of theta with respect to beta
dl.dtheta.dbeta <- -t(colMeans(as.matrix(theta*(-Hstar.beta*Hstar/(1+theta*Hstar)^2+ H.beta*H/(1+theta*H)^2
-d*(H.beta/(1+theta*H)-theta*H.beta*H/(1+theta*H)^2)))))
dl.dtheta <- cbind(dl.dtheta, dl.dtheta.dbeta)
### Derivative of gradient of beta with respect to all parameters
dl.dbeta <- rbind(dl.dalpha.dbeta, dl.deta.dbeta, dl.dtheta.dbeta, dl.dbeta.dbeta)
hessian <- cbind(t(dl.dalpha), t(dl.deta), t(dl.dtheta), dl.dbeta)
#colnames(hessian) <- c("logalpha","logeta","logtheta","beta")
#rownames(hessian) <- colnames(hessian)
return(hessian)
}
########################################
### Optimize the likelihood function
fit <- optim(par=logp, fn=like, gr=gradientfunc, method="BFGS", hessian=FALSE)
par <- fit$par
### Get the score function
score <- gradientfunc(par, score=TRUE)
### Get the hessian
hessian <- hessianfunc(par)
#### Output ####
out <- c(list(X=X, fit = fit, par = par, score = score, hessian = hessian, call = call,
formula = formula, data = data, logp = logp, clusterid =clusterid,
ncluster = ncluster, n = n))
class(out) <- "frailty"
return(out)
}
print("true")
print(logp)
estimates<-frailty(formula, data, logp, clusterid="id")
estimates
print.frailty<-function(x, ...){
cat("Call:", "\n")
print.default(x$call)
cat("\nEstimated parameters in the Gamma-Weibull frailty model:", "\n")
cat("\n")
table.est <- t(as.matrix(x$par))
colnames(table.est) <- c("log(\U003B1)", "log(\U003B7)", "log(\U003B8)", names(as.data.frame(x$X)))
r <- rep("", , 1)
rownames(table.est) <- c(r)
print.default(table.est)
cat("\n")
cat("Number of observations:", x$n, "\n")
cat("Number of clusters:", x$ncluster)
}
summary.frailty <- function(object, digits = max(3L, getOption("digits") - 3L), ...){
ans <- list(AF = AF, CI.transform = CI.transform, confidence.level = confidence.level,
confidence.interval = confidence.interval, n.obs = object$n, n.cases = object$n.cases,
n.cluster = object$n.cluster, modelcall = modelcall, call = object$call, method = method,
formula = object$formula, exposure = object$exposure, outcome = object$outcome,
fit = fit, sandwich = object$sandwich)
}
class(ans) <- "summary.AF"
return(ans)
}
|
f8663ad60237f56bc27c8915b78c36bc1a6ee319
|
590d47d3af4c20a1173974554b6440704f547fe1
|
/plot2.R
|
519f7ee8bb1b4674faad384d2920ae03c5cd1fd8
|
[] |
no_license
|
jlehn/ExData_Plotting1
|
4544cd441add47d88904f7437f3b8a8e0af927ae
|
2ef656d80cddb980c7895a6944e0c82e6369bbc8
|
refs/heads/master
| 2021-01-18T15:48:21.329467
| 2014-05-11T21:15:03
| 2014-05-11T21:15:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 786
|
r
|
plot2.R
|
##Note: set working directory to the directory with the R files
# and ensure data is in same directory
## read rawdata from file
rawdata<-read.table("household_power_consumption.txt",
header=TRUE,sep=";",na.strings=c("?"),
stringsAsFactors=F)
## select days from specified dates, create a datetime column from date and time columns
data <- rawdata[(rawdata$Date == "1/2/2007") | (rawdata$Date == "2/2/2007"),]
data$DateTime <- strptime(paste(data$Date, data$Time), "%d/%m/%Y %H:%M:%S")
## open png file device, make plots, and close device
png(filename="plot2.png",width = 480, height = 480, units = "px")
plot(data$DateTime,
data$Global_active_power,
type="l",
xlab="",
ylab="Global Active Power (kilowatts)")
dev.off()
|
089f5df89ee942323de6d90c0aecbb53b256275f
|
f2a2726e65f5d1f47466eb54628e5f90fc6c524e
|
/man/summary.bbcor.Rd
|
8a161aa8f81a942cc9186a4f3375f805750a9c56
|
[] |
no_license
|
cran/BBcor
|
4f03daec2e576eb360bc88d4e4e4696e0c711483
|
f5c4b12f1b4de84aef0c1035b76e6cd91555a95d
|
refs/heads/master
| 2023-08-27T06:05:52.041642
| 2021-10-14T10:20:01
| 2021-10-14T10:20:01
| 284,279,283
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 705
|
rd
|
summary.bbcor.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/methods.R
\name{summary.bbcor}
\alias{summary.bbcor}
\title{Summarize posterior samples from bbcor object}
\usage{
\method{summary}{bbcor}(object, ci = 0.9, decimals = 2, ...)
}
\arguments{
\item{object}{An object of class \code{bbcor}}
\item{ci}{The desired credible interval}
\item{decimals}{The number of decimals points to which estimates should be rounded}
\item{...}{Currently ignored}
}
\value{
A \code{data.frame} summarizing the relations
}
\description{
Summarize posterior samples from bbcor object
}
\examples{
Y <- mtcars[, 1:5]
bb <- bbcor(Y, method = "spearman")
summary(bb)
}
|
54f62505f1bd011814a6136a46074cc936b3c8b8
|
70412a43e78946f75c14a05d79e23a08636ba625
|
/Classes/Day_04/stevens_law.R
|
8ffe23da309db99b3ecdfb62bc6be2baa9796aed
|
[] |
no_license
|
danieljwilson/CMMC-2018
|
94de25ec725b331477d6d38349de3db540d51354
|
8450f092c81f25a056e0de607f05cd79421271e8
|
refs/heads/master
| 2020-03-22T19:45:07.769122
| 2018-08-04T16:57:08
| 2018-08-04T16:57:08
| 140,547,285
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,309
|
r
|
stevens_law.R
|
# Steven's law function
stevens_law <- function(stimuli, a, b){
sub_int = a * stimuli^b
return(sub_int)
}
stimuli <- c(2,4,6,8,10,12)
predictions <- stevens_law(stimuli, 1, 0.7)
# Simpler way to write the function
stevens_law2 <- function(x,a,b)
# Example observations
observations = c(1.1, 1.5, 2.1, 2.5, 2.7, 3.2)
# Plot observations against predictions
plot(stimuli, predictions, type = 'l', las=1, ylim=c(0, 7))
points(stimuli, observations, type='p', las=1, ylab="")
# Function to calculate the RMSD
rmsd = function (params, stimuli, observations){
a = params[1]
b = params[2]
predictions = stevens_law(stimuli, a, b)
rmsd_error = sqrt(mean((observations-predictions)^2))
return(rmsd_error)
}
# Test out the above function
rmsd(c(0.5,.9), stimuli, observations)
# Minimize the rmsd using optim
results <- optim(c(2,1), rmsd, stimuli=stimuli, observations=observations)
# stimuli = stimuli refers the stimuli variable in the rmsd function, and on the right is what we are passing
# in to that variable from our local environment
best_predictions <- stevens_law(stimuli, results$par[1], results$par[2])
# Plot observations against predictions
plot(stimuli, best_predictions, type = 'l', las=1, ylim=c(0, 4), col='red')
points(stimuli, observations, type='p', las=1, ylab="")
|
d05d9a253d2f8c42cff197a76ab93c85775d23d7
|
23592d75a70dac72352f0bed803de9e3e95fa37e
|
/ui.R
|
172ef3234deea3ac1b0ffce4cf4afb72b6928eb2
|
[] |
no_license
|
SatK-ds2020/DS-capstone-project-2020
|
ed436adf7f2114720a7dfc16f3169a8ce8840048
|
aa45e26ab59d9742c7d1035bd74d6a6a96922bdf
|
refs/heads/master
| 2022-12-01T19:03:45.022651
| 2020-08-14T02:50:02
| 2020-08-14T02:50:02
| 273,111,553
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,343
|
r
|
ui.R
|
#
# This is the user-interface definition of a Shiny web application. You can
# run the application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
library(wordcloud)
# Define the app ####
ui <- shinyUI(navbarPage("Next-Word Prediction App",
tabPanel( "Prediction",
fluidRow(
column(12,align = "center",
h3("Instructions",style = "color:darkred"),
HTML("
<p><strong>Dated: 13 Aug, 2020; Author: Satindra Kathania,Ph.D, mail_to:
<a href='mailto:satindrak@gmail.com'>satindrak@gmail.com</a></p></strong>"),
"The app need 5-10 sec to initialize, then start by entering your text in the input box below.
Once you click submit, the App will display a barplot with Top 10 predicted words according to backoff model score.
This App also display the wordcloud for most predicted words. Click on 'Summary' for more details.
This App also display the wordcloud for most predicted words.
Note, currently this app only supports 'English' language.",
br(),
# Link to report
a('More information on the project',
href='https://rpubs.com/SatKat_2020/648814',
target = '_blank'),
br(),
# Link to repo
a('Link to the GitHub Repository',
href='https://github.com/SatK-ds2020/DS-capstone-project-2020',
target = '_blank'),
br(),
)
),
fluidRow(
column(12,align = "center",
# Text input
tags$style(type="text/css", "textarea {width:80%}") ,
tags$textarea(id="text", rows=3, placeholder = "Please enter your text...", ""),
tags$head(tags$style("#text{color: red;
font-size: 20px;
font-style: bold;
}")),
# Submit button for making the prediction
submitButton("Submit")
)
),
fluidRow(
column(12,
fluidRow(
column(6,
h3("Phrases to try",style = "color:darkred"),
"[1] loved nine lives last night you cannot cancel ............. (the)",
br(),
" [2] fun i often wonder why..............(i) ",
br(),
"[3]time chitchat continued commentators i suppose i................. (should) ",
br(),
"[4] card for you today made with digital image............... (from)" ,
br(),
"[5] to use flowers woohoo i am so...............(excited) ",
br(),
"[6] winner will be..........(announced) ",
br(),
"[7] grueling hour boot camps that cost per head has.............. (begun) ",
br(),
"[8] the front page philip orlando the new.............(york) " ,
br(),
"[9] miles from downtown cleveland but look how................(far) ",
br(),
"[10] developed fever while the..................(rest) "
),
column(6,
h3("Top suggested Next Word:" ),
textOutput('text1'),
tags$head(tags$style("#text1{color: red;
font-size: 30px;
font-style: italic;
}"))
))
)),
fluidRow(
column(12,
fluidRow(
column(6,
h3("Top 10 Predicted Words",alignment="center"),
plotOutput("barchart")),
column(6,
h3("Wordcloud for the Predicted Words"),
plotOutput('wordcloud'))
))
)
), #tabpanel
tabPanel("Summary",
h3("Information about the App", style = 'color:darkred'),
HTML("<footer>
<p>Author: Satindra Kathania,Ph.D, mail_to:
<a href='mailto:satindrak@gmail.com'>satindrak@gmail.com</a></p>
</footer>
<p style='color:darkblue'> 1. This is a NextWord Prediction App and is a part of Coursera Data Science Specialization-Capstone Project offered by John Hopkins University.
This web app is inspired by Swiftkey, a predictive keyboard on smart phones.</p>
<p style='color:darkblue'> 2. This app uses a text-prediction model based on N-gram frequency with 'stupid backoff algorithm'.
The text you entered in the inputbox, it's last four words will be used to predict the next word, for example,
if the sentence is <strong>'it happened for the first time'</strong> the words <strong>'for the first time'</strong> will be used
to predict next words.</p>
<p style='color:darkblue'> 3. This prediction model uses 1-5 ngrams as dictionary to search,with a precalculated back-off score from highest to lowest frequency.
The probability of next word is depends on the backoff score which is calculated by dividing the counts of matches found in ngram/n-1 gram
multiplied by **backoff factor= 0.4** with each droping n-grams</p>
- First the search started with 5-grams model to match last 4 words of User Input and the most likely next word is predicted with highest score.<br>
- If no suitable match is found in the 5-gram, the model search 'back's off' to a 4-gram, i.e. using the last three words, from the user provided phrase,
to predict the next best match.<br>
- If no 4-gram match is found, the model 'back's off' to a 3-gram, i.e. using the last two words from the user provided phrase, to predict a suitable match.<br>
- If no bigram word is found, the model default 'back's off' to the highest frequency unigram word available.<br><br>
<p style='color:darkblue'> 4. The general strategy for Building this predicitive language model is as followed:-</p>
- Creating the large corpus with sampling and pooling the blogs, news, and twitter data with stop words.<br>
- Cleaning the corpus and Building n-grams frequency tables, up to 5-grams. These frequency tables serve as frequency
dictionaries for the predictive model to search for the match.<br>
- Probability of a term is modeled based on the Markov chain assumption that the occurrence of a term is dependent on the preceding terms.<br>
- Remove sparse terms, threshold is determined.<br>
- Use a('backoff strategy', href='https://en.wikipedia.org/wiki/Katz%27s_back-off_model') to predict so that if the
probability a quad-gram is very low, use tri-gram to predict, and so on.<br><br>
<p style='color:darkblue'> 5. For more information about the project App and its code, please click on the hyperlinks available on previous page.However,
for more details about NLP, prediction algorithms and its techniques, follow the below references: </p>
")
),
tabPanel("References",
h3("More information on linguistic models", style = 'color:darkred'),
HTML("
<h3><p style='color:darkred'> References </p></h3>
<strong>1.Large language models in machine translation</strong>, http://www.aclweb.org/anthology/D07-1090.pdf'<br>
<strong>2.Real Time Word Prediction Using N-Grams Model</strong>, IJITEE,ISSN: 2278-3075, Vol.8 Issue-5, 2019'<br>
<strong>3.Tokenizer: Introduction to the tokenizers R Package</strong>, https://cran.r-project.org/web/packages/tokenizers/'<br>
<strong>4.Smoothing and Backoff</strong>,'http://www.cs.cornell.edu/courses/cs4740/2014sp/lectures/smoothing+backoff.pdf'<br>
<strong>5.N-gram Language Models</strong>,'https://web.stanford.edu/~jurafsky/slp3/ed3book.pdf'<br>
<strong>6.Speech and Language Processing</strong>,'https://web.stanford.edu/class/cs124/lec/languagemodeling.pdf'<br>
<strong>7.Katz’s back-off model, Wikipedia</strong>,https://en.wikipedia.org/wiki/Katz%27s_back-off_model'<br>
<strong>8.Good-Turing frequency estimation, Wikipedia</strong>,'https://en.wikipedia.org/wiki/Good%E2%80%93Turing_frequency_estimation'<br>
<strong>9.NLP Lunch Tutorial: Smoothing</strong>,'https://nlp.stanford.edu/~wcmac/papers/20050421-smoothing-tutorial.pdf'<br>
<strong>10.Word Prediction Using Stupid Backoff With a 5-gram Language Model</strong>,' https://rpubs.com/pferriere/dscapreport'<br>
<strong>11.Katz’s Backoff Model Implementation in R</strong>,'https://thachtranerc.wordpress.com/2016/04/12/katzs-backoff-model-implementation-in-r'<br>
<strong>12.NLP: Language Models</strong>,'https://www.csd.uwo.ca/courses/CS4442b/L9-NLP-LangModels.pdf'<br>
")
)
)
)
|
7f8833bb120883f70290986fcdbbb7b199775cd0
|
3f36e3afc25870cf6e9429de4a5b0604d52dc03a
|
/man/filterByLift.Rd
|
43710bcfd2b2035f9a9bc8529ff62bd3c183bf8a
|
[] |
no_license
|
Patricklomp/VisualisingHealthTrajectories
|
4077a62b7da7b92ad2c7aa99a918aaf15585788e
|
98e69c50d354a693f0e9e8a3d76c81e3e5088a7a
|
refs/heads/master
| 2023-05-31T16:21:50.435675
| 2021-06-04T07:43:42
| 2021-06-04T07:43:42
| 317,466,951
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 340
|
rd
|
filterByLift.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/filterByLift.R
\name{filterByLift}
\alias{filterByLift}
\title{Filtering edges by lift}
\usage{
filterByLift(g, lift.threshold = 2)
}
\arguments{
\item{g}{igraph object}
\item{lift.threshold}{lift.threshold}
}
\value{
}
\description{
Filtering edges by lift
}
|
046f62a54bab679da847f59ae90b1d4254f68047
|
5509acef91ed779def4836408c94fa2e2c2190ac
|
/Sim 2 code.r
|
cf2f858256bbae9cbb2da47034eeff0a5fd22a45
|
[] |
no_license
|
usystech/TreacherousPathToGeneralizableResults
|
769c9a6843f8172fbc0aa7851a29be3b9336d8b8
|
69b8e7bb42d882256a46924a30ac101634c933f7
|
refs/heads/master
| 2021-01-22T19:54:07.726906
| 2017-03-17T00:40:09
| 2017-03-17T00:40:09
| 85,254,191
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,700
|
r
|
Sim 2 code.r
|
treatEffect<-3
for(){
for(q in 1:100){
treatData <- matrix(rnorm(400), nrow = 20, ncol = 20)
contData <- matrix(rnorm(400), nrow = 20, ncol = 20)
# we have filled in our 2 20x20 arrays with N(0,1) obs.
for(b in 1:20){
treatData[b,b] <-'+'(treatData[b,b],treatEffect) }
# added treatment effect to diagonal
tSubjMeans<-seq(0,0,length.out=20)
tItemMeans<-seq(0,0,length.out=20)
cSubjMeans<-seq(0,0,length.out=20)
cItemMeans<-seq(0,0,length.out=20)
for(w in 1:20){
tSubjMeans[w] <- mean(treatData[w,(1:20)])
cSubjMeans[w] <- mean(contData[w,(1:20)])
tItemMeans[w] <- mean(treatData[(1:20),w])
cItemMeans[w] <- mean(contData[(1:20),w]) }
subjDiff<-seq(0,0,length.out=20)
itemDiff<-seq(0,0,length.out=20)
for(s in 1:20){
subjDiff[s]<-tSubjMeans[s]-cSubjMeans[s]
itemDiff[s]<-tItemMeans[s]-cItemMeans[s] }
SubjDiffSD<-sd(subjDiff)
ItemDiffSD<-sd(itemDiff)
Treat<-c(seq(0,0,length.out=20),seq(1,1,length.out=20))
# VVV calculates subject ANOVA VVV
subjData<-c(cSubjMeans,tSubjMeans)
subjLM<-lm(subjData ~ Treat)
subjAnova<-aov(subjLM)
subjAnovaPVal<-summary(subjAnova)[[1]][["Pr(>F)"]][[1]]
# VVV calculates item ANOVA VVV
itemData<-c(cItemMeans,tItemMeans)
itemLM <-lm(itemData ~ Treat)
itemAnova<-aov(itemLM)
subjAnovaPVal<-summary(itemAnova)[[1]][["Pr(>F)"]][[1]]
******code for effect sizes... PLUG IN subject std error term******************
SubjEffectSize <- (mean(tSubjMeans)-mean(cSubjMeans)/.2289 )
ItemEffectSize <- (mean(tItemMeans)-mean(cItemMeans)/.2289 )
ItemEffectSize
SubjEffectSize
# VVV begins code for quasi-F and min-F calculations VVV
# VVV generates coding for treatment
tTreat<- matrix(1, nrow = 20, ncol = 20)
cTreat<- matrix(0, nrow = 20, ncol = 20)
# VVV generate coding for subject factor
tSubj<- matrix(0, nrow = 20, ncol = 20)
cSubj<- matrix(0, nrow = 20, ncol = 20)
for(a in 1:20){
tSubj[a,]<-a
cSubj[a,]<-a }
# VVV generate coding for item factor
tWord<- matrix(0, nrow = 20, ncol = 20)
cWord<- matrix(0, nrow = 20, ncol = 20)
for(b in 1:20){
for(d in 1:20){
tWord[b,d]<-d
cWord[b,d]<-d } }
Data<-c(cData,tData)
trt<-c(cTreat,tTreat)
sub<-c(cSubj,tSubj)
wrd<-c(cWord,tWord)
Treat<- factor(trt)
Subj<- factor(sub)
Word <- factor(wrd)
aov(Data ~ Treat + Treat:Subj + Treat:Word + Subj:Treat:Word)
#Mean Squares for Quasi F can be calculated
********CALCULATING OF F CRIT VALUE USING FORMULAs***************************
quasiF<-(MSt + MStsw)/(MSts + MStw)
minF<-MSt/(MSts + MStw)
i<-(MSt + MStsw)^2/(MSt^2 + (MStsw^2/722))
j<-(MSts + MStw)^2/(MSts^2/38 + MStw^2/38)
Fcrit <-qf(.95, df1 = i , df2 = j)
Fcrit
quasiF
minF
i
j
}
}
|
7c0527dfbe809456534d11004aa63c422669d33f
|
b02ffd482888293bbfa776dec19539916fcc0fbe
|
/man/inject.Rd
|
f9099fd9b45a25cf341cd1a92e92539412bce2a0
|
[] |
no_license
|
cran/daewr
|
46dcd75aec929bf6626abef2bd40677fea75a0ee
|
19e2a3286b8e8fef5155e7f39fe3c77a1623b359
|
refs/heads/master
| 2023-05-13T17:31:27.375923
| 2023-04-28T21:40:02
| 2023-04-28T21:40:02
| 17,695,372
| 3
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 810
|
rd
|
inject.Rd
|
\name{inject}
\alias{inject}
\docType{data}
\title{Single array for injection molding experiment}
\description{
Data from the single array for injection molding experiment in chapter 12 of Design and Analysis
of Experiments with R
}
\usage{data(inject)}
\format{
A data frame with 20 observations on the following 8 variables.
\describe{
\item{\code{A}}{a numeric vector }
\item{\code{B}}{a numeric vector }
\item{\code{C}}{a numeric vector }
\item{\code{D}}{a numeric vector }
\item{\code{E}}{a numeric vector }
\item{\code{F}}{a numeric vector }
\item{\code{G}}{a numeric vector }
\item{\code{shrinkage}}{a numeric vector }
}
}
\source{
Design and Analysis of Experiments with R, by John Lawson, CRC/Chapman Hall
}
\examples{
data(inject)
}
\keyword{datasets}
|
722afbe7b00978939ae908af6568d3b77143af83
|
6d0dad9419cea35d2c5f8472455efd6448c99e67
|
/R/kappalambda.R
|
8ba3de1ffa93f43f7684319a7f0fb3891586b23d
|
[
"MIT"
] |
permissive
|
ralmond/CPTtools
|
224f0d720adf4003bd3a3cf76ca5ba8d1f315305
|
8ecbacd3997dd8a566048ade135b6c836fcc54a2
|
refs/heads/master
| 2023-08-07T18:45:07.408649
| 2023-07-18T15:56:35
| 2023-07-18T15:56:35
| 239,853,879
| 2
| 1
|
MIT
| 2023-07-18T15:56:38
| 2020-02-11T20:07:01
|
R
|
UTF-8
|
R
| false
| false
| 1,915
|
r
|
kappalambda.R
|
gkLambda <- function (tab, weights=c("None","Linear","Quadratic"),
W = diag(nrow(tab))) {
K <- nrow(tab)
if (nrow(tab)!=ncol(tab))
stop("Expected a square matrix.")
if (missing(W)) {
if (is.character(weights)) weights <- match.arg(weights)
W <- switch(weights,
None=W,
Linear= W - abs(outer(1:K,1:K,"-"))/(K-1),
Quadratic= W - (outer(1:K,1:K,"-")/(K-1))^2)
}
N <- sum(tab)
prow <- rowSums(tab)/N
pmax <- max(colSums(sweep(W,1,prow,"*")))
lambda <- (sum(W*tab)/N - pmax)/(1-pmax)
return(lambda)
}
fcKappa <- function (tab, weights=c("None","Linear","Quadratic"),
W = diag(nrow(tab))) {
K <- nrow(tab)
if (nrow(tab)!=ncol(tab))
stop("Expected a square matrix.")
if (missing(W)) {
if (is.character(weights)) weights <- match.arg(weights)
W <- switch(weights,
None=W,
Linear= W - abs(outer(1:K,1:K,"-"))/(K-1),
Quadratic= W - (outer(1:K,1:K,"-")/(K-1))^2)
}
N <- sum(tab)
agree <- sum(W*tab)/N
prow <- rowSums(tab)/N
pcol <- colSums(tab)/N
expagree <-sum(W*outer(prow,pcol))
(agree - expagree)/(1-expagree)
}
gkGamma <- function (tab) {
tab <- as.matrix(tab)
N <- sum(tab)
rtab <- tab/N
PIs <- PId <- 0
for (a in 1:(nrow(rtab)-1)) {
for (aa in (a+1):nrow(rtab)) {
for (b in 1:ncol(rtab)) {
if (ncol(rtab) > b) {
for (bb in (b+1):ncol(rtab)) {
PIs <- PIs + rtab[a,b]*rtab[aa,bb]
}
}
if (b > 1) {
for (bb in 1:(b-1)) {
PId <- PId + rtab[a,b]*rtab[aa,bb]
}
}
}
}
}
PIs <- 2*PIs
PId <- 2*PId
PIt <- 1-PIs-PId
(PIs-PId)/(1-PIt)
}
|
00048d15f0c17b54034da1ef1298a1dd510e72b1
|
89a6e5f03ae75952f940b8656dbb682df69e662e
|
/R/output_output_html.R
|
c0f58cdb61807debfa6da950ef04cfe437ed7157
|
[] |
no_license
|
arturochian/raconteur
|
ee84ad4b3a352f669b91d54532753810c31d3dfa
|
b783be5606db06c971a6893af5608b67aea17faa
|
refs/heads/master
| 2021-01-18T06:58:49.069903
| 2011-01-24T15:54:06
| 2011-01-24T15:54:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 399
|
r
|
output_output_html.R
|
make_view_output_html <- function(path) {
file_path <- file.path(path, "output.html")
print_line("<!DOCTYPE html PUBLIC \"-//W3C//DTD XHTML 1.0 Strict//EN\" \"http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd\">
<html xmlns=\"http://www.w3.org/1999/xhtml\">
<head>
<title><%= header %></title>
</head>
<body>
<%= output %>
</body>
</html>
", file_path = file_path, append = FALSE)
}
|
68db1d049f268f71a63c2624596c49bd172d4e07
|
0500ba15e741ce1c84bfd397f0f3b43af8cb5ffb
|
/cran/paws.database/man/dynamodb_transact_write_items.Rd
|
737cd1ef7ff4f4717e9ae2515752d193ecd402ba
|
[
"Apache-2.0"
] |
permissive
|
paws-r/paws
|
196d42a2b9aca0e551a51ea5e6f34daca739591b
|
a689da2aee079391e100060524f6b973130f4e40
|
refs/heads/main
| 2023-08-18T00:33:48.538539
| 2023-08-09T09:31:24
| 2023-08-09T09:31:24
| 154,419,943
| 293
| 45
|
NOASSERTION
| 2023-09-14T15:31:32
| 2018-10-24T01:28:47
|
R
|
UTF-8
|
R
| false
| true
| 3,083
|
rd
|
dynamodb_transact_write_items.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dynamodb_operations.R
\name{dynamodb_transact_write_items}
\alias{dynamodb_transact_write_items}
\title{TransactWriteItems is a synchronous write operation that groups up to
100 action requests}
\usage{
dynamodb_transact_write_items(
TransactItems,
ReturnConsumedCapacity = NULL,
ReturnItemCollectionMetrics = NULL,
ClientRequestToken = NULL
)
}
\arguments{
\item{TransactItems}{[required] An ordered array of up to 100 \code{TransactWriteItem} objects, each of which
contains a \code{ConditionCheck}, \code{Put}, \code{Update}, or \code{Delete} object. These
can operate on items in different tables, but the tables must reside in
the same Amazon Web Services account and Region, and no two of them can
operate on the same item.}
\item{ReturnConsumedCapacity}{}
\item{ReturnItemCollectionMetrics}{Determines whether item collection metrics are returned. If set to
\code{SIZE}, the response includes statistics about item collections (if
any), that were modified during the operation and are returned in the
response. If set to \code{NONE} (the default), no statistics are returned.}
\item{ClientRequestToken}{Providing a \code{ClientRequestToken} makes the call to
\code{\link[=dynamodb_transact_write_items]{transact_write_items}} idempotent,
meaning that multiple identical calls have the same effect as one single
call.
Although multiple identical calls using the same client request token
produce the same result on the server (no side effects), the responses
to the calls might not be the same. If the \code{ReturnConsumedCapacity}
parameter is set, then the initial
\code{\link[=dynamodb_transact_write_items]{transact_write_items}} call returns the
amount of write capacity units consumed in making the changes.
Subsequent \code{\link[=dynamodb_transact_write_items]{transact_write_items}} calls
with the same client token return the number of read capacity units
consumed in reading the item.
A client request token is valid for 10 minutes after the first request
that uses it is completed. After 10 minutes, any request with the same
client token is treated as a new request. Do not resubmit the same
request with the same client token for more than 10 minutes, or the
result might not be idempotent.
If you submit a request with the same client token but a change in other
parameters within the 10-minute idempotency window, DynamoDB returns an
\code{IdempotentParameterMismatch} exception.}
}
\description{
\code{\link[=dynamodb_transact_write_items]{transact_write_items}} is a synchronous write operation that groups up to 100 action requests. These actions can target items in different tables, but not in different Amazon Web Services accounts or Regions, and no two actions can target the same item. For example, you cannot both \code{ConditionCheck} and \code{Update} the same item. The aggregate size of the items in the transaction cannot exceed 4 MB.
See \url{https://www.paws-r-sdk.com/docs/dynamodb_transact_write_items/} for full documentation.
}
\keyword{internal}
|
02b305feb972114f84e821c2a64449ba95e5d844
|
2764167b5743be62adadc491ec7dfde210e0703d
|
/man/Lxyz2ll.Rd
|
9c3db60d1be273b59d1050105b71b05e1b83aaa3
|
[] |
no_license
|
cran/GEOmap
|
528a4cbe293211d324405037eb280b415e65f62e
|
0149894022496cee8237868b0bb693d00ef01e41
|
refs/heads/master
| 2023-08-18T14:47:52.021469
| 2023-08-13T12:40:21
| 2023-08-13T13:30:31
| 17,713,753
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 332
|
rd
|
Lxyz2ll.Rd
|
\name{Lxyz2ll}
\Rdversion{1.1}
\alias{Lxyz2ll}
\title{Cartesian to Lat-Lon
}
\description{Cartesian vector to Lat-Lon List
}
\usage{
Lxyz2ll(X)
}
\arguments{
\item{X}{list, x,y,z}
}
\value{list of lat and lon
}
\author{
Jonathan M. Lees<jonathan.lees@unc.edu>
}
\seealso{xyz2ll}
\examples{
Lll2xyz(23, 157)
}
\keyword{misc}
|
561f086d214b745734088f714cfc49ce76af05f9
|
eb09acc1e170228d123eb713c79563c2f10d6f8d
|
/TwitterSentiment/man/capitalization.Rd
|
a33ba62b1a18e8eb0b452e65215bdce8b7463cdb
|
[] |
no_license
|
PHP-2560/final-project-twittersentiment
|
16f6fd3c423d3fcc5f5ad38272766e7d84063308
|
0244f0a00880b62ae0f68078fdcdd152892df336
|
refs/heads/master
| 2020-04-11T04:47:53.675453
| 2018-12-18T03:50:57
| 2018-12-18T03:50:57
| 161,526,369
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 348
|
rd
|
capitalization.Rd
|
\name{capitalize_first_letter}
\alias{capitalize_first_letter}
\title{Capitalization of First Letter}
\description{
Capitalizes the first letter of the word.
}
\usage{
capitalize_first_letter(x)
}
\arguments{
\item{x}{A character vector.}
}
\value{It ouputs a character vector of the same length.}
\examples{
capitalize_first_letter(word)
}
|
1b97754b2f973cfba4b8d620c62f978da3d7be12
|
184180d341d2928ab7c5a626d94f2a9863726c65
|
/valgrind_test_dir/lnorm_C-test.R
|
cf06e5588030a3cf196f848d53a93b845eed485a
|
[] |
no_license
|
akhikolla/RcppDeepStateTest
|
f102ddf03a22b0fc05e02239d53405c8977cbc2b
|
97e73fe4f8cb0f8e5415f52a2474c8bc322bbbe5
|
refs/heads/master
| 2023-03-03T12:19:31.725234
| 2021-02-12T21:50:12
| 2021-02-12T21:50:12
| 254,214,504
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 319
|
r
|
lnorm_C-test.R
|
function (x, pi, mulog, sdlog, max_iter = 500L, tol = 1e-06)
{
e <- get("data.env", .GlobalEnv)
e[["lnorm_C"]][[length(e[["lnorm_C"]]) + 1]] <- list(x = x,
pi = pi, mulog = mulog, sdlog = sdlog, max_iter = max_iter,
tol = tol)
.Call("_mixR_lnorm_C", x, pi, mulog, sdlog, max_iter, tol)
}
|
567cab276cc7081ddca78327c8f81425ce622a25
|
e33a1a6dddbe6ec074d339ed4e82b3fad975e23f
|
/tests/testthat/test_vst.R
|
e5e5ffa658db5cc11d290a5ddcdf304b5aad32e5
|
[] |
no_license
|
zihua/DESeq2
|
c5ae93c4263f0e3c414233c32bfa832311cf07e2
|
eefb97f4a8cd3fa7ecfbb8969edb4d304d3746b8
|
refs/heads/master
| 2020-12-03T05:17:28.377255
| 2015-10-15T20:58:48
| 2015-10-15T20:58:48
| 45,465,791
| 3
| 0
| null | 2015-11-03T12:43:52
| 2015-11-03T12:43:52
| null |
UTF-8
|
R
| false
| false
| 699
|
r
|
test_vst.R
|
dds <- makeExampleDESeqDataSet(n=100, m=4)
design(dds) <- ~ 1
dds <- estimateSizeFactors(dds)
dds <- estimateDispersionsGeneEst(dds)
dds <- estimateDispersionsFit(dds, fitType="parametric")
vsd <- varianceStabilizingTransformation(dds, blind=FALSE)
dds <- estimateDispersionsFit(dds, fitType="local")
vsd <- varianceStabilizingTransformation(dds, blind=FALSE)
dds <- estimateDispersionsFit(dds, fitType="mean")
vsd <- varianceStabilizingTransformation(dds, blind=FALSE)
# test VST basics/errors
dds <- makeExampleDESeqDataSet(n=20, m=4)
colnames(dds) <- NULL
varianceStabilizingTransformation(dds)
head(varianceStabilizingTransformation(assay(dds)))
expect_error(getVarianceStabilizedData(dds))
|
6044e2e79e747fd2d523686e8ff7c81edbd12d8d
|
2ae02c5ab707a50623436d73946444a89673c687
|
/R/SS_executivesummaryMK.R
|
097306cb3e5b63b6c182736ab9ab3dbbd70caf5b
|
[] |
no_license
|
mkapur/kaputils
|
2e1b075fb5641fbbd2c122b6c26a254add6bb6b0
|
7d734acf9697af9c153b5228fe99f03dfd017238
|
refs/heads/master
| 2021-11-24T23:31:28.820396
| 2021-11-10T20:43:51
| 2021-11-10T20:43:51
| 125,903,739
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 34,243
|
r
|
SS_executivesummaryMK.R
|
#' A function to create a executive summary tables from an SS Report.sso file
#'
#' Reads the Report.sso within the directory and creates executive summary
#' tables as required by the current Terms of Reference for West Coast
#' groundfish. Works with Stock Synthesis versions 3.24U and later.
#' Additionally, historical catch and numbers at ages tables are created.
#'
#' @param dir Locates the directory of the files to be read in, double
#' backslashes (or forwardslashes) and quotes necessary.
#' @param plotdir Directory where the table will be saved. The default
#' saves the table to the dir location where the Report.sso file is located.
#' @param quant to calculate confidence intervals, default is set at 0.95
#' @param es.only = only the executive summary tables will be produced, default is false which
#' will return all executive summary tables, historical catches, and numbers-at-ages
#' @param nsex This will allow the user to calculate single sex values based on the new sex
#' specification (-1) in SS for single sex models. Default value is FALSE. TRUE will not divide by 2.
#' @return A csv files containing executive summary tables.
#' @author Chantel Wetzel
#' @export
SS_executivesummaryMK <- function(dir, plotdir = 'default', quant = 0.95, es.only = FALSE, nsex = FALSE){
basemod1 <- SS_output(dir, covar = TRUE)
# Check to make sure dir is a dir
if(!is.character(dir) || !file.info(dir)$isdir){
stop("Input 'dir' should be a directory")
}
# Make sure dir contains the report file
repfile <- file.path(dir, "Report.sso")
if(is.na(file.info(repfile)$size)){
stop("Report.sso not found in 'dir': ", dir)
}
#Read in the base model using r4ss
wd <- paste(dir, "/Report.sso", sep="")
base <- readLines(wd)
rawrep <- read.table(file= wd , col.names = 1:400, fill = TRUE, quote = "",
colClasses = "character", nrows = -1, comment.char = "")
if (plotdir == 'default') { csv.dir = paste0(dir,"/tables/") }
if (plotdir != 'default') { csv.dir = paste0(plotdir,"/tables/")}
dir.create(csv.dir, showWarnings = FALSE)
SS_versionCode <- base[grep("#V",base)]
SS_version <- base[grep("Stock_Synthesis",base)]
SS_version <- SS_version[substring(SS_version,1,2)!="#C"] # remove any version numbering in the comments
SS_versionshort <- toupper(substr(SS_version,1,8))
SS_versionNumeric <- as.numeric(substring(SS_versionshort,5))
#=================================================================================================
# Function Sections
#=================================================================================================
# Function to force R to print correct decimal place
print.numeric <- function(x, digits) { formatC(x, digits = digits, format = "f") }
comma <- function(x, digits=0) { formatC(x, big.mark=",", digits, format = "f") }
emptytest <- function(x){ sum(!is.na(x) & x=="")/length(x) }
# Funtion to calculate confidence intervals
getDerivedQuant.fn <- function(dat, label, yrs, quant, divisor=1) {
# modify old header to new value
names(dat)[names(dat)=="LABEL"] <- "Label"
allYrs <- suppressWarnings(as.numeric(substring(dat$Label[substring(dat$Label,1,3)=="SSB"],5,8)))
allYrs <- allYrs[!is.na(allYrs)]
finalYr <- as.numeric(substring(dat$Label[substring(dat$Label,1,8)=="OFLCatch"],10,13))[1]
if(is.null(yrs)) {
yrs <- allYrs
}
if(yrs[1]<0) {
yrs <- (finalYr+yrs):finalYr
}
out <- dat[dat$Label%in%paste(label,yrs,sep="_"),]
out.value <- out$Value <- out$Value/divisor
out$StdDev <- out$StdDev/divisor
if(label=="Recr") { #use lognormal
out.lower <- exp(log(out.value)-qnorm(1-(1-quant)/2)*sqrt(log((out$StdDev/out.value)^2+1)))
out.upper <- exp(log(out.value)+qnorm(1-(1-quant)/2)*sqrt(log((out$StdDev/out.value)^2+1)))
}
else {
out.lower <- out.value-qnorm(1-(1-quant)/2)*out$StdDev
out.upper <- out.value+qnorm(1-(1-quant)/2)*out$StdDev
}
return(data.frame(Year=yrs,Value=out.value,LowerCI=out.lower,UpperCI=out.upper))
}
# Function to pull values from the read in report file and calculate the confidence intervals
Get.Values <- function(dat, label, yrs, quant, single = FALSE){
if (label == "Recr") { label = " Recr"}
if(!single){
dq = mapply(function(x) out = as.numeric(strsplit(dat[grep(paste(label,"_",x,sep=""),dat)]," ")[[1]][3]), x = yrs)
sd = mapply(function(x) out = as.numeric(strsplit(dat[grep(paste(label,"_",x,sep=""),dat)]," ")[[1]][4]), x = yrs)
if (label == "Main_RecrDev" || label == "Late_RecrDev" || label == "ForeRecr") {
zz = ifelse(SS_versionNumeric < 3.3, 10, 11)
#sd = mapply(function(x) out = as.numeric(strsplit(dat[grep(paste(label,"_",x,sep=""),dat)]," ")[[1]][zz]), x = yrs)
sd = mapply(function(x) out = strsplit(dat[grep(paste(label,"_",x,sep=""),dat)]," ")[[1]][zz], x = yrs)
if (sd[1] == "NA") { sd = rep(0, length(dq)) }
sd = as.numeric(sd)
}
}
if(single){
dq = as.numeric(strsplit(dat[grep(label,dat)]," ")[[1]][3])
sd = as.numeric(strsplit(dat[grep(label,dat)]," ")[[1]][4])
}
if(label == " Recr" || label == "Recr_virgin"){
low = exp(log(dq) - qnorm(1-(1-quant)/2) * sqrt(log(1 + (sd/dq) * (sd/dq))))
high= exp(log(dq) + qnorm(1-(1-quant)/2) * sqrt(log(1 + (sd/dq) * (sd/dq))))
}
if(label != " Recr" && label != "Recr_virgin"){
low = dq - qnorm(1-(1-quant)/2)*sd
high= dq + qnorm(1-(1-quant)/2)*sd
}
if (!single) { return(data.frame(yrs, dq, low, high)) }
if (single) { return(data.frame(dq, low, high)) }
}
matchfun <- function(string, obj=rawrep[,1], substr1=TRUE)
{
# return a line number from the report file (or other file)
# sstr controls whether to compare subsets or the whole line
match(string, if(substr1){substring(obj,1,nchar(string))}else{obj} )
}
matchfun2 <- function(string1,adjust1,string2,adjust2,cols="nonblank",matchcol1=1,matchcol2=1,
objmatch=rawrep,objsubset=rawrep,substr1=TRUE,substr2=TRUE,header=FALSE)
{
# return a subset of values from the report file (or other file)
# subset is defined by character strings at the start and end, with integer
# adjustments of the number of lines to above/below the two strings
line1 <- match(string1,
if(substr1){
substring(objmatch[,matchcol1],1,nchar(string1))
}else{
objmatch[,matchcol1]
})
line2 <- match(string2,
if(substr2){
substring(objmatch[,matchcol2],1,nchar(string2))
}else{
objmatch[,matchcol2]
})
if(is.na(line1) | is.na(line2)) return("absent")
if(is.numeric(cols)) out <- objsubset[(line1+adjust1):(line2+adjust2),cols]
if(cols[1]=="all") out <- objsubset[(line1+adjust1):(line2+adjust2),]
if(cols[1]=="nonblank"){
# returns only columns that contain at least one non-empty value
out <- objsubset[(line1+adjust1):(line2+adjust2),]
out <- out[,apply(out,2,emptytest) < 1]
}
if(header && nrow(out)>0){
out[1,out[1,]==""] <- "NoName"
names(out) <- out[1,]
out <- out[-1,]
}
return(out)
}
#=================================================================================================
# Determine the model version and dimensions of the model
#=================================================================================================
rawdefs <- matchfun2("DEFINITIONS",1,"LIKELIHOOD",-1)
# Determine the number of fishing fleets
if (SS_versionNumeric >= 3.3){
# version 3.30
defs <- rawdefs[-(1:3),apply(rawdefs[-(1:3),],2,emptytest)<1]
defs[defs==""] <- NA
FleetNames <- as.character(defs[grep("Fleet_name",defs$X1),-1])
FleetNames <- FleetNames[!is.na(FleetNames)]
fleet_ID <- 1: length(FleetNames)
fleet_type <- as.numeric(defs[grep("Fleet_type",defs$X1),-1])# as.numeric(defs[4:(3+length(fleet_ID)),1])
nfleets <- sum(fleet_type[!is.na(fleet_type)] <= 2 )
}
if (SS_versionNumeric < 3.3){
# version 3.20 - 3.24
defs <- rawdefs[-(1:3),apply(rawdefs[-(1:3),],2,emptytest)<1]
defs[defs==""] <- NA
lab <- defs$X1
catch_units <- as.numeric(defs[grep("Catch_units",lab),-1])
IsFishFleet <- !is.na(catch_units)
nfleets <- sum(IsFishFleet)
}
begin <- matchfun(string = "TIME_SERIES", obj = rawrep[,1])+2
end <- matchfun(string = "SPR_series", obj = rawrep[,1])-1
temptime <- rawrep[begin:end,2:3]
endyr <- max(as.numeric(temptime[temptime[,2]=="TIME",1]))
startyr <- min(as.numeric(rawrep[begin:end,2]))+2
foreyr <- max(as.numeric(temptime[temptime[,2]=="FORE",1]))
hist <- (endyr - 11):(endyr + 1)
fore <- (endyr+1):foreyr
all <- startyr:foreyr
#======================================================================
# Determine number of areas in the model
#======================================================================
nareas <- max(as.numeric(rawrep[begin:end,1]))
#======================================================================
# Determine the fleet name and number for fisherie with catch
#======================================================================
begin <- matchfun(string = "CATCH", obj = rawrep[,1])+2
end <- matchfun(string = "TIME_SERIES", obj = rawrep[,1])-1
temp <- rawrep[begin:end, 1:18]
names <- unique(temp[,2]) # This is a list of fishery names with catch associated with them
fleet.num <- unique(temp[,1])
#======================================================================
# Find summary age
#======================================================================
ts <- matchfun2("TIME_SERIES", -1,"Area", -1)
smry.age <- as.numeric(toupper(substr(ts[2,2],14,15)))
#======================================================================
# Two-sex or Singl-sex model
#======================================================================
selex <- matchfun2("LEN_SELEX",6,"AGE_SELEX",-1,header=TRUE)
nsexes <- ifelse(SS_versionNumeric < 3.3,
length(unique(as.numeric(selex$gender))),
length(unique(as.numeric(selex$Sex))))
sexfactor = 2
if (nsex) {sexfactor = 1}
#======================================================================
# Determine the number of growth patterns
#======================================================================
find.morph <- matchfun2("MORPH_INDEXING", 1, "MOVEMENT", -2, header=TRUE)
nmorphs <- dim(find.morph)[1] / nsexes
#======================================================================
#ES Table a Catches from the fisheries
#======================================================================
xx = ifelse(SS_versionNumeric < 3.3, 12, 15)
catch = NULL; total.catch = total.dead = 0
ind = hist[1:(length(hist)-1)]
for(a in 1:nareas){
for (i in 1:nfleets){
killed = mapply(function(x) killed = as.numeric(strsplit(base[grep(paste(fleet.num[i], names[i], x, sep=" "),base)]," ")[[1]][xx]), x = ind)
# killed = mapply(function(x) killed = as.numeric(strsplit(base[grep(paste(fleet.num[i], names[i], nareas[a], x, sep=" "),base)]," ")[[1]][xx]), x = ind)
input.catch = mapply(function(x) input.catch = as.numeric(strsplit(base[grep(paste(fleet.num[i], names[i], x, sep=" "),base)]," ")[[1]][xx+1]), x = ind)
# input.catch = mapply(function(x) input.catch = as.numeric(strsplit(base[grep(paste(fleet.num[i], names[i], nareas[a], x, sep=" "),base)]," ")[[1]][xx+1]), x = ind)
total.dead = total.dead + killed
total.catch = total.catch + input.catch
catch = cbind(catch, input.catch)
}
es.a = data.frame(ind, comma(catch, digits = 2), comma(total.catch, digits = 2), comma(total.dead, digits = 2))
colnames(es.a) = c("Years", names, "Total Catch", "Total Dead")
write.csv(es.a, paste0(csv.dir, "/a_Catches_Area", nareas[a], "_ExecutiveSummary.csv"), row.names = F)
}
# SS_readforecastMK(paste0(dir,"/forecast.ss"))
fore <- readLines(paste0(dir,"/forecast.ss"))[44:(length( readLines(paste0(dir,"/forecast.ss")))-2)] %>%
strsplit(.," ")
df <- data.frame()
for(e in 1:length(fore)){
df[e,"Years"] <- as.numeric(fore[[e]][2])
df[e,"Fleet"] <- as.numeric(fore[[e]][4])
df[e,"Total_Catch"] <- as.numeric(fore[[e]][9])
# df[e,"Total_Dead"] <- NA
}
df %>% select(Years, Fleet, Total_Catch) %>% spread(Fleet,Total_Catch)
# fore <-
df2 <- read.csv(paste0(dir,"/tempForeCatch.csv")) %>% select(X.Year, Fleet, dead.B.) %>% spread(Fleet, dead.B.)
#======================================================================
#ES Table b Spawning Biomass and Depletion
#======================================================================
hist <- (endyr - 11):(endyr + 5)
ssb = Get.Values(dat = base, label = "SPB", hist, quant )
# ssb = Get.Values(dat = base, label = "SSB", hist, quant )
if (nsexes == 1) { ssb$dq = ssb$dq / sexfactor ; ssb$low = ssb$low / sexfactor ; ssb$high = ssb$high / sexfactor }
depl = Get.Values(dat = base, label = "Bratio" , hist, quant )
for (i in 1:length(hist)){ dig = ifelse(ssb[i,2] < 100, 1, 0)}
es.b = data.frame(hist,
comma(ssb$dq,digits = dig), paste0(comma(ssb$low,digits = dig), "\u2013", comma(ssb$high,digits = dig)),
print(100*depl$dq, digits = 1), paste0(print(100*depl$low,digits = 1), "\u2013", print(100*depl$high,digits = 1)))
colnames(es.b) = c("Years", "Spawning Output", "95% Asymptotic Interval", "Estimated Depletion (%)", "95% Asymptotic Interval")
write.csv(es.b, paste0(csv.dir, "/b_SSB_ExecutiveSummary.csv"), row.names = F)
hist <- (endyr - 11):(endyr + 1)
#======================================================================
#ES Table c Recruitment
#======================================================================
parameters <- matchfun2("PARAMETERS",1,"DERIVED_QUANTITIES", -1, header=TRUE)
recdevMain <- parameters[substring(parameters$Label,1,12)=="Main_RecrDev",]
recdevLate <- parameters[substring(parameters$Label,1,12)=="Late_RecrDev",]
temp <- toupper(substr(recdevLate$Label,14,17))
late.yrs <- as.numeric(temp)
recdevFore <- parameters[substring(parameters$Label,1, 8)=="ForeRecr",]
temp <- toupper(substr(recdevFore$Label,10,13))
fore.yrs <- as.numeric(temp)
ind <- fore.yrs <= max(hist)
fore.yrs <- 2015:2019 #fore.yrs[ind]
end <- ifelse(length(late.yrs) == 0, fore.yrs - 1, late.yrs - 1)
#
recruits = Get.Values(dat = base, label = "Recr" , (endyr - 11):(endyr + 5), quant )
if (dim(recdevMain)[1] != 0){
recdevs = Get.Values(dat = base, label = "Main_RecrDev", yrs = 1971:end, quant )
# recdevs = Get.Values(dat = base, label = "Main_RecrDev", yrs = hist[1]:end, quant )
devs = cbind(recdevs$dq, recdevs$low, recdevs$high)
if (length(late.yrs) > 0 ){
late.recdevs = Get.Values(dat = base, label = "Late_RecrDev", yrs = late.yrs, quant )
devs = cbind(c(recdevs$dq, late.recdevs$dq), c(recdevs$low, late.recdevs$low), c(recdevs$high, late.recdevs$high))
}
if(length(fore.yrs) > 0){
fore.recdevs = Get.Values(dat = base, label = "ForeRecr", yrs = fore.yrs, quant )
if (length(late.yrs) > 0){
devs = cbind(c(recdevs$dq, late.recdevs$dq, fore.recdevs$dq),
c(recdevs$low, late.recdevs$low, fore.recdevs$low),
c(recdevs$high, late.recdevs$high, fore.recdevs$high))
}
if (length(late.yrs) == 0){
devs = cbind(c(recdevs$dq, fore.recdevs$dq),
c(recdevs$low, fore.recdevs$low),
c(recdevs$high, fore.recdevs$high))
}
}
# Zero out the sd for years where devs were not estimated
devs.out = data.frame(print(devs[,1], digits = 3), paste0(print(devs[,2],digits = 3), "\u2013", print(devs[,3], digits = 3)))
}
if (dim(recdevMain)[1] == 0) { devs.out = data.frame(rep(0, length(hist)), rep(0, length(hist))) }
for (i in 1:length(hist)){ dig = ifelse(recruits[i,2] < 100, 1, 0)}
es.c = data.frame(recruits$yrs,
comma(recruits$dq, dig), paste0(comma(recruits$low, dig),
"\u2013", comma(recruits$high, dig)))#,
#devs.out )
colnames(es.c) = c("Years", "Recruitment", "95% Asymptotic Interval") #, "Recruitment Deviations", "95% Asymptotic Interval")
write.csv(es.c, paste0(csv.dir, "/c_Recr_ExecutiveSummary.csv"), row.names = F)
#======================================================================
#ES Table d 1-SPR (%)
#======================================================================
hist <- (endyr - 11):(endyr + 5)
spr.name = ifelse(SS_versionNumeric >= 3.30, "SPR_report_basis", "SPR_ratio_basis")
spr_type = strsplit(base[grep(spr.name,base)]," ")[[1]][3]
#if (spr_type != "1-SPR") {
# print(":::::::::::::::::::::::::::::::::::WARNING:::::::::::::::::::::::::::::::::::::::")
# print(paste("The SPR is being reported as", spr_type, "."))
# print("West coast groundfish assessments typically report 1-SPR in the executive summary")
# print(":::::::::::::::::::::::::::::::::::WARNING:::::::::::::::::::::::::::::::::::::::") }
adj.spr = Get.Values(dat = base, label = "SPRratio" , hist, quant)
f.value = Get.Values(dat = base, label = "F" , hist, quant)
es.d = data.frame(hist,
print(adj.spr$dq*100,2), paste0(print(adj.spr$low*100,2), "\u2013", print(adj.spr$high*100,2)),
print(f.value$dq,3), paste0(print(f.value$low,3), "\u2013", print(f.value$high,3)))
colnames(es.d) = c("Years", paste0("Estimated ", spr_type, " (%)"), "95% Asymptotic Interval", "Harvest Rate (proportion)", "95% Asymptotic Interval")
write.csv(es.d, paste0(csv.dir, "/d_SPR_ExecutiveSummary.csv"), row.names = F)
hist <- (endyr - 11):(endyr + 1)
#======================================================================
#ES Table e Reference Point Table
#======================================================================
# Find the values within the forecast file
rawforecast <- readLines(paste0(dir, "/forecast.ss"))
rawstarter <- readLines(paste0(dir, "/starter.ss"))
spr <- as.numeric(strsplit(rawforecast[grep("SPRtarget",rawforecast)]," ")[[1]][1])
ssb.virgin = Get.Values(dat = base, label = "SPB_Virgin", hist, quant, single = TRUE)
# ssb.virgin = Get.Values(dat = base, label = "SSB_unfished", hist, quant, single = TRUE)
smry.virgin = data.frame("dq" = basemod1$derived_quants[grep("SmryBio_Unfished", basemod1$derived_quants$Label),"Value"],
"low" = basemod1$derived_quants[grep("SmryBio_Unfished", basemod1$derived_quants$Label),"Value"] - 1.96*basemod1$derived_quants[grep("SmryBio_Unfished", basemod1$derived_quants$Label),"StdDev"],
"high" = basemod1$derived_quants[grep("SmryBio_Unfished", basemod1$derived_quants$Label),"Value"] + 1.96*basemod1$derived_quants[grep("SmryBio_Unfished", basemod1$derived_quants$Label),"StdDev"])
rec.virgin = Get.Values(dat = base, label = "Recr_Virgin", hist, quant, single = TRUE)
# rec.virgin = Get.Values(dat = base, label = "Recr_unfished", hist, quant, single = TRUE)
final.depl = 100*depl[dim(depl)[1],2:4]
b.target = Get.Values(dat = base, label = "SSB_Btgt", hist, quant, single = TRUE)
spr.btarg = Get.Values(dat = base, label = "SPR_Btgt", hist, quant, single = TRUE)
f.btarg = Get.Values(dat = base, label = "Fstd_Btgt", hist, quant, single = TRUE)
yield.btarg= Get.Values(dat = base, label = "TotYield_Btgt", hist, quant, single = TRUE)
b.spr = Get.Values(dat = base, label = "SSB_SPR", hist, quant, single = TRUE)
f.spr = Get.Values(dat = base, label = "Fstd_SPR", hist, quant, single = TRUE)
yield.spr = Get.Values(dat = base, label = "TotYield_SPRtgt", hist, quant, single = TRUE)
b.msy = Get.Values(dat = base, label = "SSB_MSY", hist, quant, single = TRUE)
spr.msy = Get.Values(dat = base, label = "SPR_MSY", hist, quant, single = TRUE)
f.msy = Get.Values(dat = base, label = "Fstd_MSY", hist, quant, single = TRUE)
msy = Get.Values(dat = base, label = "TotYield_MSY", hist, quant, single = TRUE)
# Convert spawning quantities for single-sex models
if (nsexes == 1){
ssb.virgin = ssb.virgin / sexfactor
b.target = b.target / sexfactor
b.spr = b.spr / sexfactor
b.msy = b.msy / sexfactor
}
es.e = matrix(c(
comma(ssb.virgin$dq, dig), paste0(comma(ssb.virgin$low, dig), "\u2013", comma(ssb.virgin$high, dig)),
comma(smry.virgin$dq, dig), paste0(comma(smry.virgin$low, dig), "\u2013", comma(smry.virgin$high, dig)),
comma(ssb$dq[dim(ssb)[1]], dig), paste0(comma(ssb$low[dim(ssb)[1]],dig), "\u2013", comma(ssb$high[dim(ssb)[1]],dig)),
comma(rec.virgin$dq, dig), paste0(comma(rec.virgin$low, dig), "\u2013", comma(rec.virgin$high, dig)),
print(final.depl$dq, 2), paste0(print(final.depl$low, 2), "\u2013", print(final.depl$high, 2)),
"", "",
comma(b.target$dq, dig), paste0(comma(b.target$low, dig), "\u2013", comma(b.target$high, dig)),
print(spr.btarg$dq, 3), paste0(print(spr.btarg$low, 3), "\u2013", print(spr.btarg$high, 3)),
print(f.btarg$dq, 3), paste0(print(f.btarg$low, 3), "\u2013", print(f.btarg$high, 3)),
comma(yield.btarg$dq, dig), paste0(comma(yield.btarg$low, dig), "\u2013", comma(yield.btarg$high, dig)),
"", "",
comma(b.spr$dq, dig), paste0(comma(b.spr$low, dig), "\u2013", comma(b.spr$high, dig)),
print(spr, 3), " NA ",
print(f.spr$dq, 3), paste0(print(f.spr$low, 3), "\u2013", print(f.spr$high, 3)),
comma(yield.spr$dq, dig), paste0(comma(yield.spr$low, dig), "\u2013", comma(yield.spr$high, dig)),
"", "",
comma(b.msy$dq, dig), paste0(comma(b.msy$low, dig), "\u2013", comma(b.msy$high, dig)),
print(spr.msy$dq, 3), paste0(print(spr.msy$low, 3), "\u2013", print(spr.msy$high, 3)),
print(f.msy$dq, 3), paste0(print(f.msy$low, 3), "\u2013", print(f.msy$high, 3)),
comma(msy$dq, dig), paste0(comma(msy$low, dig), "\u2013", comma(msy$high, dig))
), ncol=2, byrow=T )
es.e = noquote(es.e)
colnames(es.e) = c("Estimate", "95% Asymptotic Interval")
rownames(es.e) = c("Unfished Spawning Biomass (mt)",
paste0("Unfished Age ", smry.age, "+ Biomass (mt)"),
paste0("Spawning Biomass", " (", hist[length(hist)], ")"),
"Unfished Recruitment (R0)",
paste0("Depletion ", "(", hist[length(hist)], ")"),
"Reference Points Based SB40%",
"Proxy Spawning Biomass (SB40%)",
"SPR resulting in SB40%",
"Exploitation Rate Resulting in SB40%",
"Yield with SPR Based On SB40% (mt)",
"Reference Points based on SPR proxy for MSY",
"Proxy spawning biomass (SPR50)",
"SPR50",
"Exploitation rate corresponding to SPR50",
"Yield with SPR50 at SBSPR (mt)",
"Reference points based on estimated MSY values",
"Spawning biomass at MSY (SBMSY)",
"SPRMSY",
"Exploitation rate corresponding to SPRMSY",
"MSY (mt)")
write.csv(es.e, paste0(csv.dir, "/e_ReferencePoints_ExecutiveSummary.csv"))
# #======================================================================
# # ES Table f is the historical harvest
# #======================================================================
ind = hist
ofl = rep("fill_in", length(ind))
abc = rep("fill_in", length(ind))
acl = rep("fill_in", length(ind))
catch = c(comma(total.catch, digits = 2), "NA")
dead = c(comma(total.dead, digits = 2), "NA")
es.f = data.frame(ind, ofl, abc, acl, catch, dead)
colnames(es.f) = c("Years", "OFL", "ABC", "ACL", "Landings", "Total Dead")
write.csv(es.f, paste0(csv.dir, "/f_Manage_ExecutiveSummary.csv"), row.names = F)
#
# #======================================================================
# #ES Table g Predicted Quantities
# #======================================================================
ofl.fore = Get.Values(dat = base, label = "OFLCatch" , yrs = fore, quant)
abc.fore = Get.Values(dat = base, label = "ForeCatch" , yrs = fore, quant)
## ssb for all forecast yrs
ssb.fore = data.frame("yrs" = fore,
"dq" = basemod1$derived_quants[grep(paste0("SSB_",fore,collapse = "|"), basemod1$derived_quants$Label),"Value"],
"low" = basemod1$derived_quants[grep(paste0("SSB_",fore,collapse = "|"), basemod1$derived_quants$Label),"Value"] - 1.96*basemod1$derived_quants[grep("SmryBio_Unfished", basemod1$derived_quants$Label),"StdDev"],
"high" = basemod1$derived_quants[grep(paste0("SSB_",fore,collapse = "|"), basemod1$derived_quants$Label),"Value"] + 1.96*basemod1$derived_quants[grep("SmryBio_Unfished", basemod1$derived_quants$Label),"StdDev"])
#Get.Values(dat = base, label = "SSB" , yrs = fore, quant)
depl.fore = Get.Values(dat = base, label = "Bratio", yrs = fore, quant)
if (nsexes == 1) {
ssb.fore$dq = ssb.fore$dq / sexfactor; ssb.fore$low = ssb.fore$low / sexfactor; ssb.fore$high = ssb.fore$high / sexfactor}
smry.fore = 0
for(a in 1:nareas){
temp = mapply(function(x) temp = as.numeric(strsplit(base[grep(paste(a, x,"FORE",sep=" "),base)]," ")[[1]][6]),
x = fore)
smry.fore = smry.fore + temp
}
es.g = data.frame(fore,
comma(ofl.fore$dq, 2),
comma(abc.fore$dq, 2),
comma(smry.fore, 2),
comma(ssb.fore$dq, 2),
print(depl.fore$dq*100,2))
colnames(es.g) = c("Year", "Predicted OFL (mt)", "ABC Catch (mt)", paste0("Age ", smry.age, "+ Biomass (mt)"), "Spawning Biomass (mt)", "Depletion (%)")
write.csv(es.g, paste0(csv.dir, "/g_Projections_ExecutiveSummary.csv"), row.names = F)
#
# #======================================================================
# #ES Table h decision table
# #======================================================================
# # To be done later
#
#
# #======================================================================
# #ES Table i the summary table
# #======================================================================
# hist <- (endyr - 11):(endyr + 5)
# ind = length(hist)-1
# smry = 0
# for(a in 1:nareas){
# temp = mapply(function(x) temp = as.numeric(strsplit(base[grep(paste(a, x,"TIME",sep=" "),base)]," ")[[1]][6]), x = hist[1:ind])
# smry = smry + temp
# }
#
# smry = c(smry, smry.fore[1])
#
# es.i = matrix(c(hist,
# c(print(adj.spr$dq[1:(length(hist)-1)],2), "NA"),
# c(print(f.value$dq[1:(length(hist)-1)],2), "NA"),
# comma(smry, dig),
# comma(ssb$dq[1:length(hist)], dig),
# paste0(comma(ssb$low[1:length(hist)], dig), "\u2013", comma(ssb$high[1:length(hist)], dig)),
# comma(recruits$dq, dig),
# paste0(comma(recruits$low, dig), "\u2013", comma(recruits$high, dig)),
# print(depl$dq[1:length(hist)]*100, 1),
# paste0(print(depl$low[1:length(hist)]*100,1), "\u2013", print(depl$high[1:length(hist)]*100,1))),
# ncol = length(hist), byrow = T)
#
# es.i = noquote(es.i)
#
# rownames(es.i) = c(" Years",
# "1-SPR",
# "Exploitation_Rate",
# paste0("Age ", smry.age, "+ Biomass (mt)"),
# "Spawning Biomass (mt)",
# "95% Confidence Interval",
# "Recruitment",
# "95% Confidence Interval",
# "Depletion (%)",
# "95% Confidence Interval")
#
# write.csv(es.i, paste0(csv.dir, "/i_Summary_ExecutiveSummary.csv"))
# hist <- (endyr - 11):(endyr +1)
#
# if (es.only == FALSE){
# #======================================================================
# # Total Catch when discards are estimated
# #======================================================================
# xx = ifelse(SS_versionNumeric < 3.3, 12, 15)
# total.dead = total.catch = 0
# catch = NULL
# ind = startyr:endyr
# for(a in 1:nareas){
# for (i in 1:nfleets){
# killed = mapply(function(x) killed = as.numeric(strsplit(base[grep(paste(fleet.num[i], names[i], nareas[a], x, sep=" "),base)]," ")[[1]][xx]), x = ind)
# input.catch = mapply(function(x) input.catch = as.numeric(strsplit(base[grep(paste(fleet.num[i], names[i], nareas[a], x, sep=" "),base)]," ")[[1]][xx+1]), x = ind)
# total.dead = total.dead + killed
# total.catch = total.catch + input.catch
# catch = cbind(catch, input.catch)
# }
# mortality = data.frame(ind, comma(catch, 2), comma(total.catch,2), comma(total.dead,2))
# colnames(mortality) = c("Year",names, "Total Catch", "Total Dead")
#
# write.csv(mortality, paste0(csv.dir, "/_CatchesAllYrs_Area", nareas[a], ".csv"), row.names = F)
# }
#
# #======================================================================
# #Numbers at age
# #======================================================================
# if ( nareas > 1) { print(paste0("Patience: There are ", nareas, " areas that are being pulled and combined to create the numbers-at-age tables.")) }
#
# if(SS_versionNumeric < 3.30) {
# maxAge = length(strsplit(base[grep(paste("1 1 1 1 1 1", startyr,sep=" "),base)]," ")[[1]]) - 14
#
# if (nsexes == 1) {
# natage.f = natage.m = 0
# for(a in 1:nareas){
# temp = mapply(function(x) temp = as.numeric(strsplit(base[grep(paste(a,"1 1 1 1", x,sep=" "),base)]," ")[[1]][14:(14+maxAge)]), x = startyr:endyr)
# natage.f = natage.f + t(temp)
# }
#
# colnames(natage.f) = 0:maxAge
# rownames(natage.f) <- startyr:endyr
#
# write.csv(natage.f, paste0(csv.dir, "/_natage.csv"))
# }
#
# if (nsexes == 2) {
# natage.f = natage.m = 0
# for(a in 1:nareas){
# for (b in 1:nmorphs){
# n = b
# temp = mapply(function(x) temp = as.numeric(strsplit(base[grep(paste(a, b, "1 1 1", n, x,sep=" "),base)]," ")[[1]][14:(14+maxAge)]), x = startyr:endyr)
# natage.f = natage.f + t(temp)
# n = ifelse(nmorphs ==1, nsexes, b + nsexes)
# temp = mapply(function(x) temp = as.numeric(strsplit(base[grep(paste(a, b, "2 1 1", n, x,sep=" "),base)]," ")[[1]][14:(14+maxAge)]), x = startyr:endyr)
# natage.m = natage.m + t(temp)
# }
# }
#
# colnames(natage.f) = 0:maxAge; colnames(natage.m) = 0:maxAge
# rownames(natage.f) <- startyr:endyr ; rownames(natage.m) <- startyr:endyr
#
# write.csv(natage.f, paste0(csv.dir, "/_natage_f.csv"))
# write.csv(natage.m, paste0(csv.dir, "/_natage_m.csv"))
# }
# } # SS v3.24 verions loop
#
# # Check to see if numbers-at-age is calculated
# if(SS_versionNumeric >= 3.30) {
# check = as.numeric(strsplit(rawstarter[grep("detailed output", rawstarter)]," ")[[1]][1])
# if (check == 2) { "Detailed age-structure set in starter file set = 2 which does not create numbers-at-age table."}
#
# if (check != 2){
# maxAge = length(strsplit(base[grep(paste("1 1 1 1 1 1 1", startyr,sep=" "),base)]," ")[[1]]) - 14
#
# if (nsexes == 1) {
# natage.f = natage.m = 0
# for(a in 1:nareas){
# temp = mapply(function(x) temp = as.numeric(strsplit(base[grep(paste(a,"1 1 1 1 1 1", x,sep=" "),base)]," ")[[1]][14:(14+maxAge)]), x = startyr:endyr)
# natage.f = natage.f + t(temp)
# }
#
# colnames(natage.f) = 0:maxAge
# rownames(natage.f) <- startyr:endyr
#
# write.csv(natage.f, paste0(csv.dir, "/_natage.csv"))
# }
#
# if (nsexes == 2) {
# natage.f = natage.m = 0
# for(a in 1:nareas){
# for (b in 1:nmorphs){
# n = b
# temp = mapply(function(x) temp = as.numeric(strsplit(base[grep(paste(a, b, "1 1 1 1", n, x,sep=" "),base)]," ")[[1]][14:(14+maxAge)]), x = startyr:endyr)
# natage.f = natage.f + t(temp)
# n = ifelse(nmorphs ==1, nsexes, b + nsexes)
# temp = mapply(function(x) temp = as.numeric(strsplit(base[grep(paste(a, b, "2 1 1 1", n, x,sep=" "),base)]," ")[[1]][14:(14+maxAge)]), x = startyr:endyr)
# natage.m = natage.m + t(temp)
# }
# }
#
# colnames(natage.f) = 0:maxAge; colnames(natage.m) = 0:maxAge
# rownames(natage.f) <- startyr:endyr ; rownames(natage.m) <- startyr:endyr
#
# write.csv(natage.f, paste0(csv.dir, "/_natage_f.csv"))
# write.csv(natage.m, paste0(csv.dir, "/_natage_m.csv"))
# }
# } #check loop
# } # SS version 3.30
#
# } #nareas
}
## not run::
# SS_executivesummaryMK(dir = "C:/Users/Maia Kapur/Dropbox/UW/assessments/china_2019_update/chinarock-update-2019/crCen/base2015")
|
c06404c60d22a3e540d328bfdc96aa4444de6a50
|
6fa24cca5ba1dc15f9f206beb97c260786efe732
|
/script/old/20191210-GLMM.R
|
868783ff0599c75cd2af2021518e5e02ed3b75d1
|
[] |
no_license
|
wetinhsu/Macaca-population-trend
|
fe14e5dfa6290b4a982175feae290053c60446b9
|
a0e738ec817ae70b8ea042eeb7b64df4c9b5cb10
|
refs/heads/master
| 2023-07-19T12:38:45.198058
| 2023-07-11T01:13:59
| 2023-07-11T01:13:59
| 190,343,813
| 0
| 0
| null | 2019-06-05T07:07:49
| 2019-06-05T07:07:49
| null |
UTF-8
|
R
| false
| false
| 7,640
|
r
|
20191210-GLMM.R
|
# data analysis
library(data.table)
library(lme4)
library(car)
library(magrittr)
library(multcomp)
library(ggplot2)
library(psych)
library(readxl)
library(MuMIn)
#------------------------------------------------
#Original data----
M.data.o <- read_excel("./data/clean/for analysis.xlsx",
sheet=1) %>% setDT %>%
.[, DATE := as.IDate(paste(Year, Month, Day, sep = "/"))] %>%
.[TypeName %like% "混", TypeName.n := "mixed"] %>%
.[TypeName %like% "竹林", TypeName.n := "Bamboo"] %>%
.[TypeName %like% "闊葉", TypeName.n := "broad-leaved"] %>%
.[TypeName %like% "針葉", TypeName.n := "coniferous"] %>%
.[, TypeName.1 := ifelse(Distance>20, "Not forest", TypeName.n)] %>%
.[, County := ordered(County,
c("宜蘭縣","基隆市","台北市","臺北市",
"新北市","台北縣","臺北縣",
"桃園縣","桃園市","新竹市",
"新竹縣","苗栗縣",
"台中市","臺中市",
"台中縣","臺中縣",
"彰化縣","南投縣","南投市",
"雲林縣","嘉義縣","嘉義市",
"台南市","臺南市",
"台南縣","臺南縣",
"高雄縣","高雄市",
"屏東縣", "花蓮縣",
"台東縣","臺東縣"))] %>%
.[County %in% list("宜蘭縣","基隆市","台北市","臺北市",
"新北市","台北縣","臺北縣",
"桃園縣","桃園市","新竹市",
"新竹縣","苗栗縣"), Region := "North"] %>%
.[County %in% list("台中市","臺中市",
"台中縣","臺中縣",
"彰化縣","南投縣","南投市",
"雲林縣","嘉義縣","嘉義市"), Region := "Center"] %>%
.[County %in% list("台南市","臺南市",
"台南縣","臺南縣",
"高雄縣","高雄市",
"屏東縣"), Region := "South"]%>%
.[County %in% list("花蓮縣",
"台東縣","臺東縣"), Region := "East"] %>%
.[, julian.D := yday(DATE)] %>%
.[, Altitude_c := substr(Site_N,1,1)] %>% setDT
M.data.o$Year %<>% as.numeric
M.data.o$Survey %<>% as.numeric
M.data.o$Point %<>% as.numeric
M.data.o$Macaca_sur %<>% as.numeric
M.data.o$Month %<>% as.numeric
M.data.o$Day %<>% as.numeric
M.data.o$Distance %<>% as.numeric
M.data.o$julian.D %<>% as.numeric
M.data.o$Region %<>% as.factor
M.data.o$TypeName.1 %<>% as.factor
M.data.o$Site_N %<>% as.factor
#Remove duplicate data-------------------------------------------
M.data <- M.data.o %>% copy(.) %>%
.[Year %in% 2015 & Survey %in% 2 & Site_N %in% "A29-17" & Point %in% 7,
Macaca_sur := NA] %>%
.[Year %in% 2015 & Survey %in% 2 & Site_N %in% "A33-28" & Point %in% 7,
Macaca_sur := NA] %>%
.[Year %in% 2016 & Survey %in% 1 & Site_N %in% "B33-01" & Point %in% 4,
Macaca_sur := NA] %>%
.[Year %in% 2017 & Survey %in% 1 & Site_N %in% "B38-07" & Point %in% 7,
Macaca_sur := NA] %>%
.[Year %in% 2018 & Survey %in% 1 & Site_N %in% "A35-15" & Point %in% 5,
Macaca_sur := NA] %>%
.[Year %in% 2018 & Survey %in% 2 & Site_N %in% "A28-10" & Point %in% 6,
Macaca_sur := NA] %>%
.[Year %in% 2019 & Survey %in% 1 & Site_N %in% "B14-02" & Point %in% 6,
Macaca_sur := NA] %>%
.[Year %in% 2019 & Survey %in% 1 & Site_N %in% "B38-08" & Point %in% 5,
Macaca_sur := NA] %>%
.[Year %in% 2019 & Survey %in% 2 & Site_N %in% "A20-02" & Point %in% 3,
Macaca_sur := NA] %>%
.[Year %in% 2019 & Survey %in% 2 & Site_N %in% "A33-32" & Point %in% 6,
Macaca_sur := NA]
#---------------------------------------------------------------------
M.data <- M.data %>%
.[is.na(Macaca_sur), Macaca_sur := 0] %>%
.[, Year := as.numeric(Year)] %>%
.[, Year.re := Year - min(Year) + 1]
M.data %>%
.[Macaca_sur %in% 1, .N, list(TypeName.1, Macaca_dist)] %>%
dcast(.,Macaca_dist ~TypeName.1, value.var="N")
#==============================================
df <-
M.data %>%
.[Year < 2019,] %>%
.[!(TypeName.1 %in% "Not forest"), ]
#-------------------------------------------
allFit(glmer(Macaca_sur ~ TypeName.1 + Year.re + Altitude + julian.D + Region + (1|Site_N),
family = binomial, data = df)) #嘗試使用一系列優化程序重新擬合glmer模型
df$Altitude.1 <- scale(df$Altitude,scale =T)
df$julian.D.1 <- scale(df$julian.D,scale =T)
m1 <- glmer(Macaca_sur ~ Year.re + TypeName.1 + Altitude.1 + julian.D.1 + Region + (1|Site_N),
family = binomial, data = df,
control = glmerControl(optimizer = "bobyqa"))
summary(m1)
#anova table==============================================
Anova(m1)
summary(glht(m1, linfct = mcp(TypeName.1 = "Tukey")))
summary(glht(m1, linfct = mcp(Survey = "Tukey")))
summary(glht(m1, linfct = mcp(Altitude.1 = "Tukey")))
summary(glht(m1, linfct = mcp(Region = "Tukey")))
#AICc==============================================
options(na.action = "na.fail")
d1<- dredge(
glmer(Macaca_sur ~ TypeName.1 + Year.re + Altitude.1 + julian.D.1 + Region + (1|Site_N),
family = binomial, data = df,
control = glmerControl(optimizer = "bobyqa")),
trace = T)
summary(model.avg(d1))
summary(model.avg(d1, subset = delta < 2))
importance(d1)
sw(model.avg(d1, subset = delta < 2))
#Estimate2==============================================
##<25
aa<- df %>% setDT %>%
.[, A := ifelse(Macaca_dist %in% "A", Macaca_sur,0)] %>%
.[, AB := ifelse(Macaca_dist %in% c("A","B"), Macaca_sur,0)] %>%
.[, .(Mean = mean(A, na.rm=T),
SD = sd(A, na.rm=T)/sqrt(length(A)),
n = .N),
by = list(TypeName.1,Survey, Region)] %>%
.[, N:= sum(n)]
sum(aa$N*(aa$N-aa$n)*(aa$SD)^2/aa$n, na.rm=T)/(unique(aa$N)^2)
mean(aa$Mean)
##<100
bb<- df %>% setDT %>%
.[, A := ifelse(Macaca_dist %in% "A", Macaca_sur,0)] %>%
.[, AB := ifelse(Macaca_dist %in% c("A","B"), Macaca_sur,0)] %>%
.[, .(Mean = mean(AB, na.rm=T),
SD = sd(AB, na.rm=T)/sqrt(length(AB)),
n = .N),
by = list(TypeName.1,Survey, Region)] %>%
.[, N:= sum(n)]
sum(bb$N*(bb$N-bb$n)*(bb$SD)^2/bb$n, na.rm=T)/(unique(bb$N)^2)
mean(bb$Mean)
#Estimate3==============================================
##<25
aa<- df %>% setDT %>%
.[, A := ifelse(Macaca_dist %in% "A", Macaca_sur,0)] %>%
.[, AB := ifelse(Macaca_dist %in% c("A","B"), Macaca_sur,0)] %>%
.[, julian.D_f := cut(julian.D,
breaks = c(seq(0,210,15)),
include.lowest = T)] %>%
.[, .(Mean = mean(A, na.rm=T),
SD = sd(A, na.rm=T)/sqrt(length(A)),
n = .N),
by = list(TypeName.1,julian.D_f, Region)] %>%
.[, N:= sum(n)]
(se.25 <- sum(aa$N*(aa$N-aa$n)*(aa$SD)^2/aa$n, na.rm=T)/(unique(aa$N)^2))
mean(aa$Mean)
se.25^0.5*1.28 #80%CI=se*1.28
##<100
bb<- df %>% setDT %>%
.[, A := ifelse(Macaca_dist %in% "A", Macaca_sur,0)] %>%
.[, AB := ifelse(Macaca_dist %in% c("A","B"), Macaca_sur,0)] %>%
.[, julian.D_F := cut(julian.D,
breaks = c(seq(0,210,15)),
include.lowest = T)] %>%
.[, .(Mean = mean(AB, na.rm=T),
SD = sd(AB, na.rm=T)/sqrt(length(AB)),
n = .N),
by = list(TypeName.1,julian.D_f, Region)] %>%
.[, N:= sum(n)]
(se.100 <- sum(bb$N*(bb$N-bb$n)*(bb$SD)^2/bb$n, na.rm=T)/(unique(bb$N)^2))
mean(bb$Mean)
se.100^0.5*1.28
|
e1253584a16014f6686a8302a66d5f20470edf41
|
ce3bc493274116150497e73aa7539fef1c07442a
|
/R/other_plots.R
|
68741f22d9137bdfaed6c336ae6e692e114d2b08
|
[] |
no_license
|
laresbernardo/lares
|
6c67ff84a60efd53be98d05784a697357bd66626
|
8883d6ef3c3f41d092599ffbdd4c9c352a9becef
|
refs/heads/main
| 2023-08-10T06:26:45.114342
| 2023-07-27T23:47:30
| 2023-07-27T23:48:57
| 141,465,288
| 235
| 61
| null | 2023-07-27T15:58:31
| 2018-07-18T17:04:39
|
R
|
UTF-8
|
R
| false
| false
| 10,426
|
r
|
other_plots.R
|
####################################################################
#' Plot Result with Nothing to Plot
#'
#' This function lets the user print a plot without plot, with a
#' customizable message. It is quite useful for Shiny renderPlot when
#' using filters and no data is returned.
#'
#' @family Visualization
#' @param message Character. What message do you wish to show?
#' @param size Numeric. Font size for \code{message} input.
#' @param ... Additional parameters passed to \code{theme_lares()}.
#' @return Empty ggplot2 object (with a \code{message} if set).
#' @examples
#' Sys.unsetenv("LARES_FONT") # Temporal
#' noPlot(message = "No plot to show!")
#' noPlot(background = "#FF5500", size = 7)
#' @export
noPlot <- function(message = "Nothing to show here!",
size = 4.5, ...) {
ggplot(data.frame(), aes(x = 0, y = 0, label = message)) +
theme_lares(clean = TRUE, ...) +
geom_text(size = size)
}
####################################################################
#' Export ggplot2, gridExtra, or any plot object into rendered file
#'
#' Export any \code{ggplot2}, \code{gridExtra}, or any plot object
#' created with R into rendered \code{png} or \code{jpg} file.
#'
#' @family Tools
#' @param p Plot object. Plot to render and export.
#' @param name Character. File's name or suffix if vars is not \code{NA}. No need
#' to include file format on file name.
#' @param vars Vector. Variable names to identify by filename.
#' @param sep Character. Separator for \code{vars}.
#' @param format Character. One of: \code{png} or \code{jpeg}.
#' @param width,height,res Numeric. Plot's width, height, and res (for grids).
#' @param dir,subdir Character. In which directory/subdirectory do you
#' wish to save the plot? Working directory as default \code{dir}.
#' @param quiet Boolean. Display successful message with filename when saved?
#' @return No return value, called for side effects.
#' @examples
#' p <- noPlot()
#' export_plot(p, name = "noplot", width = 10, height = 8, res = 300, dir = tempdir())
#' export_plot(p, name = "noplot2", subdir = "newplots", dir = tempdir())
#' @export
export_plot <- function(p,
name = "plot", vars = NA, sep = ".vs.",
width = 8, height = 6,
format = "png", res = 300,
dir = getwd(), subdir = NA,
quiet = FALSE) {
check_opts(format, c("png", "jpeg"))
# File name
name <- sub("\\..[^\\.]*$", "", name)
end <- paste0(".", format)
if (!is.na(vars)) {
names <- v2t(cleanText(as.character(vars), spaces = FALSE), sep = sep, quotes = FALSE)
file_name <- paste0(name, "_", names, end)
} else {
file_name <- paste0(name, end)
}
# Create directory if needed
if (!is.na(subdir)) {
dir <- file.path(dir, subdir)
if (!dir.exists(dir)) dir.create(dir)
}
file_name <- paste(dir, file_name, sep = "/")
export_fx <- base::get(format)
export_fx(file_name, height = height * res, width = width * res, res = res)
plot(p)
dev.off()
if (!quiet) message(paste("Plot saved as", file_name))
}
####################################################################
#' Plot timeline as Gantt Plot
#'
#' This function plots groups of observartions with timelines in a
#' Gantt Plot way. Only works if start and end are date format values.
#'
#' @family Visualization
#' @param event Vector. Event, role, label, or row.
#' @param start Vector. Start date.
#' @param end Vector. End date. Only one day be default if not defined
#' @param label Vector. Place, institution, or label.
#' @param group Vector. Academic, Work, Extracurricular... Pass as factor
#' to keep a specific order
#' @param title Character. Title for the plot
#' @param subtitle Character. Subtitle for the plot
#' @param interactive Boolean. Run with plotly?
#' @param save Boolean. Save the output plot in our working directory
#' @param subdir Character. Into which subdirectory do you wish to save the plot to?
#' @return ggplot2 object
#' @examples
#' Sys.unsetenv("LARES_FONT") # Temporal
#' cols <- c("Role", "Place", "Type", "Start", "End")
#' today <- as.character(Sys.Date())
#' cv <- data.frame(rbind(
#' c("Marketing Science Partner", "Facebook", "Work Experience", "2019-12-09", today),
#' c("Data Scientist Consultant", "MatrixDS", "Work Experience", "2018-09-01", today),
#' c("R Community Contributor", "lares library", "Extra", "2018-07-18", today),
#' c("Lead Data Scientist", "MEG", "Work Experience", "2019-01-15", "2019-12-09"),
#' c("Head of Analytics", "Comparamejor/R5", "Work Experience", "2016-08-01", "2019-01-15"),
#' c("Big Data & Data Science Programme", "UdC", "Academic", "2017-09-01", "2018-02-28"),
#' c("Project Engineer", "Polytex", "Work Experience", "2016-05-15", "2016-09-01"),
#' c("Big Data Analyst", "MEG", "Work Experience", "2016-01-01", "2016-04-30"),
#' c("Advanced Excel Instructor", "ARTS", "Work Experience", "2015-11-01", "2016-04-30"),
#' c("Continuous Improvement Intern", "PAVCO", "Work Experience", "2015-04-01", "2015-08-30"),
#' c("Mechanical Design Intern", "SIGALCA", "Work Experience", "2013-07-01", "2013-09-30"),
#' c("DJs Online Community Owner", "LaresDJ.com / SoloParaDJs", "Extra", "2010-01-05", "2020-05-20"),
#' c("Mechanical Engineer Degree", "USB", "Academic", "2009-09-15", "2015-11-20"),
#' c("DJ and Composer/Producer", "Legacy Discplay", "Extra", "2009-05-01", "2015-04-30")
#' ))
#' colnames(cv) <- cols
#' plot_timeline(
#' event = cv$Role,
#' start = cv$Start,
#' end = cv$End,
#' label = cv$Place,
#' # Simple trick to re-arrange the grids
#' group = factor(cv$Type, levels = c("Work Experience", "Academic", "Extra"))
#' )
#' @export
plot_timeline <- function(event,
start, end = start + 1,
label = NA, group = NA,
title = "Curriculum Vitae Timeline",
subtitle = "Bernardo Lares",
interactive = FALSE,
save = FALSE,
subdir = NA) {
# Let's gather all the data
df <- data.frame(
Role = as.character(event),
Place = as.character(label),
Start = as.Date(as.character(start)),
End = as.Date(as.character(end)),
Type = group
)
# Duplicate data for ggplot's geom_lines
cvlong <- data.frame(
pos = rep(as.numeric(rownames(df)), 2),
name = rep(as.character(df$Role), 2),
type = rep(factor(df$Type, ordered = TRUE), 2),
where = rep(as.character(df$Place), 2),
value = c(df$Start, df$End),
label_pos = rep(df$Start + floor((df$End - df$Start) / 2), 2)
)
# Plot timeline
maxdate <- max(df$End)
p <- ggplot(cvlong, aes(
x = .data$value,
y = reorder(.data$name, -.data$pos),
label = .data$where,
group = .data$pos
)) +
geom_vline(xintercept = maxdate, alpha = 0.2) +
labs(title = title, subtitle = subtitle, x = NULL, y = NULL, colour = NULL) +
theme(
panel.background = element_rect(fill = "white", colour = NA),
axis.ticks = element_blank(),
panel.grid.major.x = element_line(size = 0.25, colour = "grey80")
)
# scale_x_date(expand = c(0, 0))
if (!is.na(cvlong$type)[1] || length(unique(cvlong$type)) > 1) {
p <- p + geom_line(aes(colour = .data$type), size = 7) +
facet_grid(.data$type ~ ., scales = "free", space = "free") +
guides(colour = "none")
}
p <- p +
geom_label(aes(x = .data$label_pos), colour = "black", size = 2, alpha = 0.7) +
theme_lares(pal = 2, legend = "none")
# Export file name and folder for plot
if (save) {
file_name <- "cv_timeline.png"
if (!is.na(subdir)) {
# dir.create(file.path(getwd(), subdir), recursive = T)
file_name <- paste(subdir, file_name, sep = "/")
}
p <- p + ggsave(file_name, width = 8, height = 6)
message(paste("Saved plot as", file_name))
}
if (interactive) {
try_require("plotly")
p <- ggplotly(p)
}
return(p)
}
####################################################################
#' Chords Plot
#'
#' This auxiliary function plots discrete and continuous values results
#'
#' @family Visualization
#' @param origin,dest Vectors. Origin and destination vectors
#' @param weight Vector. Weight for each chord.
#' @param mg Numeric. Margin adjust for plot in case of need
#' @param title Character. Title for the plot
#' @param subtitle Character. Subtitle for the plot
#' @param pal Vector. Colour pallete. Order matters.
#' @return chordDiagram object
#' @examples
#' # You must have "circlize" library to use this auxiliary function:
#' \dontrun{
#' df <- data.frame(from = c(1, 1, 2, 3, 4, 1, 6), to = c(4, 4, 4, 2, 2, NA, NA))
#' plot_chord(df$from, df$to)
#' }
#' @export
plot_chord <- function(origin, dest,
weight = 1, mg = 3,
title = "Chord Diagram",
subtitle = "", pal = NA) {
try_require("circlize")
if (length(origin) != length(dest)) {
stop("The origin and dest vectors should have the same length!")
}
df <- data.frame(origin, dest, weight) %>%
mutate(
origin = ifelse(.data$origin == "", " ", as.character(.data$origin)),
dest = ifelse(.data$dest == "", " ", as.character(.data$dest))
) %>%
replaceall(NA, "NA")
colnames(df) <- c("orig_reg", "dest_reg", "flow")
uniq <- unique(c(as.character(df$orig_reg), as.character(df$dest_reg)))
if (is.na(pal)) pal <- names(lares_pal()$palette)
if (length(unique(origin)) > length(pal)) {
stop("Too many chords to plot and not enough colours :(")
}
col <- c(
pal[seq_along(unique(origin))],
rep("darkgrey", length(unique(uniq)) - length(unique(origin)))
)
chordDiagram(
x = df,
grid.col = col,
transparency = 0.2, directional = 1,
preAllocateTracks = list(
track.height = uh(mg, "mm"),
track.margin = c(uh(mg, "mm"), 0)
),
direction.type = c("arrows", "diffHeight"), diffHeight = -0.04,
annotationTrack = c("grid", "axis"), annotationTrackHeight = c(0.05, 0.1),
link.arr.type = "big.arrow", link.sort = TRUE, link.largest.ontop = TRUE
)
title(main = title, line = -1, sub = subtitle, font.sub = 3, family = "Arial Narrow")
legend("bottomright",
pch = 15, col = col, legend = unique(origin),
bg = "transparent", box.lty = 0, cex = 0.8
)
}
|
a601d496251e65016f3f4147c8d409ca182430c3
|
8b066c2e74151c9e39f5c69912b2786c2a3d40c3
|
/man/hs_folder.Rd
|
015bb1183a5bebdb722811b31d82c82e3ce121c1
|
[
"MIT"
] |
permissive
|
program--/HSClientR
|
eb074192e6ea9bf8a807d293df09c2794f9b31df
|
d054bb7a6f839e7f5388a830b44c88a0e94a06ca
|
refs/heads/master
| 2023-03-30T20:34:46.696394
| 2021-04-08T09:59:02
| 2021-04-08T09:59:02
| 341,659,373
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 393
|
rd
|
hs_folder.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/hs_resource.R
\name{hs_folder}
\alias{hs_folder}
\title{Get Resource Folder}
\usage{
hs_folder(id, pathname, ...)
}
\arguments{
\item{id}{Resource ID}
\item{pathname}{Path to folder in resource's contents}
\item{...}{Unused}
}
\value{
A \link[tibble:tibble-package]{tibble}.
}
\description{
Get Resource Folder
}
|
03bb4e96bcc6bfdad955974b4d5b3dcc808cb244
|
0f4f297985d383c0b2f22642fae1e126ffc04eb1
|
/plot4.R
|
777437f1f2c4fa6bb985b482600b1cfef0e264a3
|
[] |
no_license
|
scng/ExData_Plotting1
|
5ab848c98f6d8eff2d8ef66107948841e1779466
|
e7d68f004a526166e30ef8afb340b4a8fd4f6c99
|
refs/heads/master
| 2020-12-26T04:27:34.703705
| 2015-04-07T16:53:37
| 2015-04-07T16:53:37
| 33,536,899
| 0
| 0
| null | 2015-04-07T10:31:03
| 2015-04-07T10:31:02
| null |
UTF-8
|
R
| false
| false
| 2,248
|
r
|
plot4.R
|
plot4 <- function(){
# if global variable d1 not exist...
# call common function to load data
# from the dates 2007-02-01 and 2007-02-02
# and save data as global variable d1
if(!exists("d1")){
source("loadData.R")
d1 <<- loadData()
}
# extract Global_active_power vector for easier reference
ap <- d1$Global_active_power
#remove NA values
ap <- ap[!is.na(ap)]
# extract Voltage vector for easier reference
v1 <- d1$Voltage
#remove NA values
v1 <- v1[!is.na(v1)]
# extract Sub_metering_1 vector for easier reference
m1 <- d1$Sub_metering_1
#remove NA values
m1 <- m1[!is.na(m1)]
# extract Sub_metering_2 vector for easier reference
m2 <- d1$Sub_metering_2
#remove NA values
m2 <- m2[!is.na(m2)]
# extract Sub_metering_3 vector for easier reference
m3 <- d1$Sub_metering_3
#remove NA values
m3 <- m3[!is.na(m3)]
# extract Global_reactive_power vector for easier reference
rp <- d1$Global_reactive_power
#remove NA values
rp <- rp[!is.na(rp)]
# extract Time vector for easier reference
dow <- d1$Time
# open png device
png(filename="plot4.png", width=480, height=480)
# par assign plots in 2x2 grid
par(mfrow=c(2,2))
# generate the topleft plot ("Global_active_power" vs "Time")
plot(dow, ap, type="n",
xlab="", ylab="Global Active Power (kilowatts)")
lines(dow, ap)
# generate the topright plot ("Voltage" vs "Time)
plot(dow, v1, type="n",
xlab="datetime", ylab="Voltage")
lines(dow, v1)
# generate the bottomleft plot ("Sub_metering_1/2/3" vs "Time)
plot(dow, m1, type="n",
xlab="", ylab="Energy sub metering")
lines(dow, m1)
lines(dow, m2, col="red")
lines(dow, m3, col="blue")
legend("topright",
c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),
col=c("black", "red", "blue"), lty=c(1,1,1))
# generate the bottomright plot ("Global_reactive_power" vs "Time)
plot(dow, rp, type="n",
xlab="datetime", ylab="Global_reactive_power")
lines(dow, rp)
# close png device
dev.off()
}
|
f440a07bd736f1d6ba7856dfc9497c79a6fabfb6
|
62ecec9e0aa75428f3718706197af0284569f5c9
|
/man/generateOutput.Rd
|
91a2b53e4d1e1bb370726f4f0a7a1e17cc1ffb6a
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
Mu-Sigma/analysis-pipelines
|
9e4c83d4ba63bfa2422fd3a4a88c601837e920c4
|
a7bfb1a0d5d251a42309b2430c11535be817dea9
|
refs/heads/master
| 2021-06-24T11:05:41.659135
| 2020-11-23T10:42:06
| 2020-11-23T10:42:06
| 161,642,328
| 29
| 1
| null | 2020-06-12T07:37:23
| 2018-12-13T13:20:53
|
R
|
UTF-8
|
R
| false
| true
| 2,157
|
rd
|
generateOutput.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/core-functions.R, R/core-functions-batch.R,
% R/core-streaming-functions.R
\docType{methods}
\name{generateOutput}
\alias{generateOutput}
\alias{generateOutput,AnalysisPipeline-method}
\alias{generateOutput,StreamingAnalysisPipeline-method}
\title{Generate a list of outputs from Pipeline objects}
\usage{
generateOutput(object)
\S4method{generateOutput}{AnalysisPipeline}(object)
\S4method{generateOutput}{StreamingAnalysisPipeline}(object)
}
\arguments{
\item{object}{object that contains input, pipeline, registry and output}
}
\value{
Updated Pipeline object with the outputs at each step stored in the \code{output} slot.
Specific outputs can be obtained by using the \link{getOutputById} function
}
\description{
Generate a list of outputs from Pipeline objects
}
\details{
\code{generateOutput} is a generic function that is implemented for various types of pipeline objects
such as \code{AnalysisPipeline} and \code{StreamingAnalysisPipeline}
The sequence of operations stored in the pipeline object
are run and outputs generated, stored in a list
}
\seealso{
Other Package core functions: \code{\link{BaseAnalysisPipeline-class}},
\code{\link{MetaAnalysisPipeline-class}},
\code{\link{assessEngineSetUp}},
\code{\link{checkSchemaMatch}},
\code{\link{createPipelineInstance}},
\code{\link{exportAsMetaPipeline}},
\code{\link{genericPipelineException}},
\code{\link{getInput}}, \code{\link{getLoggerDetails}},
\code{\link{getOutputById}},
\code{\link{getPipelinePrototype}},
\code{\link{getPipeline}}, \code{\link{getRegistry}},
\code{\link{initDfBasedOnType}},
\code{\link{initialize,BaseAnalysisPipeline-method}},
\code{\link{loadMetaPipeline}},
\code{\link{loadPipeline}},
\code{\link{loadPredefinedFunctionRegistry}},
\code{\link{loadRegistry}}, \code{\link{prepExecution}},
\code{\link{registerFunction}},
\code{\link{savePipeline}}, \code{\link{saveRegistry}},
\code{\link{setInput}}, \code{\link{setLoggerDetails}},
\code{\link{updateObject}},
\code{\link{visualizePipeline}}
}
\concept{Package core functions}
|
6de946cce62a4c6e8514545e81e256a7687cb659
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/devFunc/examples/checkValues.Rd.R
|
5686d1d2fd1fca8c2928ffaef90cb1ff36910dc6
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 721
|
r
|
checkValues.Rd.R
|
library(devFunc)
### Name: checkValues
### Title: Checking if the value of vectors (of length 1) is authorized.
### Aliases: checkValues
### ** Examples
lossType <- 'absolute'
checkValues(list(lossType), list(c('absolute', 'quadratic')))
## No test:
checkValues(list(lossType), list(c('absolute', 'quadratic'), c('test', 'test2')))
## End(No test)
#The next error message is weird, since it does not return the real name of the listObject
#that found to be wrong.
lossType <- 'absolute55'
listObjects <- list(lossType)
listValues <- list(c('absolute', 'quadratic'))
## No test:
checkValues(listObjects, listValues)
#Now it is ok...
checkValues(list(lossType), list(c('absolute', 'quadratic')))
## End(No test)
|
53866eb6923470ef034c2ac1b2292de457469225
|
4538b9bf03a944a1e08653c728cd179db3d59f6c
|
/estimate_r.R
|
4b5c15dc5ce83a2288009b86d5417e496eb6ab5d
|
[] |
no_license
|
amo105/chemicalkinetics
|
bdf7b05bd133c265e5387b6744b716f5554ba274
|
265b5f87815cb161c2041d731b3c58d6223ae269
|
refs/heads/master
| 2020-04-02T06:15:03.920603
| 2018-10-22T12:31:43
| 2018-10-22T12:31:43
| 154,137,840
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,596
|
r
|
estimate_r.R
|
estimate_r5<-function(X.arrayA2,X.arrayB2,X.arrayC2,Z,ini,lower=rep(c(-5,-16,-5),c(5,5,1)),upper=rep(8,length(ini))){
drt<-apply(Z,2,mean)
Zr<-Z-matrix(rep(drt,dim(Z)[1]),ncol=length(drt),byrow=TRUE)
SIG<-var(Zr)
EIG<-eigen(x=SIG,symmetric=TRUE)
UUU<-EIG$vectors
LAM<-EIG$values
CCC<-UUU%*%diag(1/sqrt(LAM))
Zr<-Zr%*%CCC
n_MO<-dim(Z)[1]
lut<-dim(X.arrayC2)[1]
m_MO<-1
kk<-dim(Z)[2]
ddd<-dim(X.arrayA2)[1]
eee<-dim(X.arrayB2)[1]
marg.post<-function(paras){
#fff<<-rbind(fff,paras)
r<-exp(paras)
aaA<-exp(makeA5.cpp(D1=X.arrayA2[,1,],D2=X.arrayA2[,2,],D3=X.arrayA2[,3,],D4=X.arrayA2[,4,],D5=X.arrayA2[,5,],R=r[1:5]))
aaB<-exp(makeA5.cpp(D1=X.arrayB2[,1,],D2=X.arrayB2[,2,],D3=X.arrayB2[,3,],D4=X.arrayB2[,4,],D5=X.arrayB2[,5,],R=r[6:10]))
aaC<-exp(-r[11]*(X.arrayC2))
# iaaA<-solve.cpp(aaA)
# iaaB<-solve.cpp(aaB)
# iaaC<-solve.cpp(aaC)
iaaA<-solve(aaA)
iaaB<-solve(aaB)
iaaC<-solve(aaC)
iaaZ<-cbind(kron.prod(y=Zr[,1],matrices=list(iaaC,iaaB,iaaA)),kron.prod(y=Zr[,2],matrices=list(iaaC,iaaB,iaaA)),kron.prod(y=Zr[,3],matrices=list(iaaC,iaaB,iaaA)))
detaa<-n_MO*((1/ddd)*determinant(aaA)$modulus[1]+(1/eee)*determinant(aaB)$modulus[1]+(1/lut)*determinant(aaC)$modulus[1])
ret<--0.5*kk*detaa-0.5*sum(iaaZ*Zr)
ret}
dmarg.post<-function(paras){
#ggg<<-rbind(ggg,paras)
r<-exp(paras)
aaA<-exp(makeA5.cpp(D1=X.arrayA2[,1,],D2=X.arrayA2[,2,],D3=X.arrayA2[,3,],D4=X.arrayA2[,4,],D5=X.arrayA2[,5,],R=r[1:5]))
aaB<-exp(makeA5.cpp(D1=X.arrayB2[,1,],D2=X.arrayB2[,2,],D3=X.arrayB2[,3,],D4=X.arrayB2[,4,],D5=X.arrayB2[,5,],R=r[6:10]))
aaC<-exp(-r[11]*(X.arrayC2))
# iaaA<-solve.cpp(aaA)
# iaaB<-solve.cpp(aaB)
# iaaC<-solve.cpp(aaC)
iaaA<-solve(aaA)
iaaB<-solve(aaB)
iaaC<-solve(aaC)
der<-c()
for(i in 1:5){
dA<--r[i]*X.arrayA2[,i,]*aaA
iAdA<-iaaA%*%dA
iAdAiA<-iAdA%*%iaaA
iaaZ<-cbind(kron.prod(y=Zr[,1],matrices=list(iaaC,iaaB,iAdAiA)),kron.prod(y=Zr[,2],matrices=list(iaaC,iaaB,iAdAiA)),kron.prod(y=Zr[,3],matrices=list(iaaC,iaaB,iAdAiA)))
der[i]<-0.5*sum(iaaZ*Zr)-0.5*kk*lut*eee*sum(diag(iAdA))}
for(i in 1:5){
dB<--r[i+5]*X.arrayB2[,i,]*aaB
iBdB<-iaaB%*%dB
iBdBiB<-iBdB%*%iaaB
iaaZ<-cbind(kron.prod(y=Zr[,1],matrices=list(iaaC,iBdBiB,iaaA)),kron.prod(y=Zr[,2],matrices=list(iaaC,iBdBiB,iaaA)),kron.prod(y=Zr[,3],matrices=list(iaaC,iBdBiB,iaaA)))
der[i+5]<-0.5*sum(iaaZ*Zr)-0.5*kk*lut*ddd*sum(diag(iBdB))}
dC<--r[11]*X.arrayC2*aaC
iCdC<-iaaC%*%dC
iCdCiC<-iCdC%*%iaaC
iaaZ<-cbind(kron.prod(y=Zr[,1],matrices=list(iCdCiC,iaaB,iaaA)),kron.prod(y=Zr[,2],matrices=list(iCdCiC,iaaB,iaaA)),kron.prod(y=Zr[,3],matrices=list(iCdCiC,iaaB,iaaA)))
der[11]<-0.5*sum(iaaZ*Zr)-0.5*kk*ddd*eee*sum(diag(iCdC))
der}
opt<-optim(fn=marg.post,gr=dmarg.post,par=ini,control=list(fnscale=-1,maxit=200),method="L-BFGS-B",lower=lower,upper=upper)
return(opt)}
GP_functions5<-function(X.arrayA2,X.arrayB2,X.arrayC2,Z,opt.logr,DESIGN,design,uni.times){
drt<-apply(Z,2,mean)
Zr<-Z-matrix(rep(drt,dim(Z)[1]),ncol=length(drt),byrow=TRUE)
n_MO<-dim(Z)[1]
lut<-dim(X.arrayC2)[1]
m_MO<-1
kk<-dim(Z)[2]
ddd<-dim(X.arrayA2)[1]
eee<-dim(X.arrayB2)[1]
r<-exp(opt.logr)
aaA<-exp(makeA5.cpp(D1=X.arrayA2[,1,],D2=X.arrayA2[,2,],D3=X.arrayA2[,3,],D4=X.arrayA2[,4,],D5=X.arrayA2[,5,],R=r[1:5]))
aaB<-exp(makeA5.cpp(D1=X.arrayB2[,1,],D2=X.arrayB2[,2,],D3=X.arrayB2[,3,],D4=X.arrayB2[,4,],D5=X.arrayB2[,5,],R=r[6:10]))
aaC<-exp(-r[11]*(X.arrayC2))
iaaA<-solve.cpp(aaA)
iaaB<-solve.cpp(aaB)
iaaC<-solve.cpp(aaC)
iaaZ<-cbind(kron.prod(y=Zr[,1],matrices=list(iaaC,iaaB,iaaA)),kron.prod(y=Zr[,2],matrices=list(iaaC,iaaB,iaaA)),kron.prod(y=Zr[,3],matrices=list(iaaC,iaaB,iaaA)))
residualv<-as.vector(Zr)
s.hat<-t(Zr)%*%iaaZ
srt1<-c();srt2<-c();srt3<-c();srt4<-c();srt5<-c()
for(bob in times[what.times==1]){
srt1<-c(srt1,(1:length(uni.times))[uni.times==bob])}
for(bob in times[what.times==2]){
srt2<-c(srt2,(1:length(uni.times))[uni.times==bob])}
for(bob in times[what.times==3]){
srt3<-c(srt3,(1:length(uni.times))[uni.times==bob])}
for(bob in times[what.times==4]){
srt4<-c(srt4,(1:length(uni.times))[uni.times==bob])}
for(bob in times[what.times==5]){
srt5<-c(srt5,(1:length(uni.times))[uni.times==bob])}
srt<-list(srt1,srt2,srt3,srt4,srt5)
SRT<-c(srt[[1]],31+srt[[2]],62+srt[[3]],93+srt[[4]],124+srt[[5]])
SRT<-c(SRT,155+SRT,2*155+SRT)
#######################################################################################################
sigtilde<-function(THETA,INI_X,TIM){
aA<-makeaa.cpp(DESIGN=DESIGN,THETA=THETA,R=r[1:5])
aB<-makeaa.cpp(DESIGN=design,THETA=INI_X,R=r[6:10])
aC<-makeaa.cpp(DESIGN=matrix(uni.times,ncol=1),THETA=matrix(TIM,ncol=1),R=r[11])
ata<-aA%*%iaaA
btb<-aB%*%iaaB
ctc<-aC%*%iaaC
tata<-t(ata)
1-diags.cpp(X=aA,Y=tata)*as.vector(aB%*%t(btb))*as.vector(aC%*%t(ctc))}
mu2hat<-function(thet,x,tim){
aA<-matrix(exp(-rowSums.cpp(matrix(rep(r[1:5],ddd),ncol=5,byrow=TRUE)*(matrix(rep(thet,ddd),ncol=5,byrow=TRUE)-DESIGN)^2)),nrow=1)
aB<-matrix(exp(-rowSums.cpp(matrix(rep(r[6:10],eee),ncol=5,byrow=TRUE)*(matrix(rep(x,eee),ncol=5,byrow=TRUE)-design)^2)),nrow=1)
aC<-matrix(exp(-r[11]*(rep(tim,each=length(uni.times))-rep(uni.times,length(tim)))^2),nrow=length(tim),byrow=TRUE)
ata<-aA%*%iaaA
btb<-aB%*%iaaB
ctc<-aC%*%iaaC
out<-cbind(kron3cpp(aa0=ata,aa1=btb,aa2=ctc,yy=Zr[,1]),kron3cpp(aa0=ata,aa1=btb,aa2=ctc,yy=Zr[,2]),kron3cpp(aa0=ata,aa1=btb,aa2=ctc,yy=Zr[,3]))
matrix(rep(drt,length(tim)),ncol=kk,byrow=TRUE)+out}
mu2hatB<-function(THETA,INI_X,TIM){
aA<-makeaa.cpp(DESIGN=DESIGN,THETA=THETA,R=r[1:5])
aB<-makeaa.cpp(DESIGN=design,THETA=INI_X,R=r[6:10])
aC<-makeaa.cpp(DESIGN=matrix(uni.times,ncol=1),THETA=matrix(TIM,ncol=1),R=r[11])
ata<-aA%*%iaaA
btb<-aB%*%iaaB
ctc<-aC%*%iaaC
out<-cbind(kron3cpp(aa0=ata,aa1=btb,aa2=ctc,yy=Zr[,1]),kron3cpp(aa0=ata,aa1=btb,aa2=ctc,yy=Zr[,2]),kron3cpp(aa0=ata,aa1=btb,aa2=ctc,yy=Zr[,3]))
matrix(rep(drt,dim(out)[1]),ncol=kk,byrow=TRUE)+out}
Mhat<-function(theta){
nnn<-length(uni.times)
ddd<-dim(DESIGN)
diffs<-matrix(rep(theta,ddd[1]),ncol=ddd[2],byrow=TRUE)-DESIGN
AAA<-Mhat5.cpp(RESIDUAL=Zr,D2=diffs^2,R=r[1:5],iaaA=iaaA)
matrix(rep(drt,mm),ncol=kk,byrow=TRUE)+AAA[SRT[1:mm],]}
mhat<-function(theta){
as.vector(Mhat(theta))}
#########################################################################################################
dMhat<-function(theta){
nnn<-length(uni.times)
ddd<-dim(DESIGN)
diffs<-matrix(rep(theta,dim(DESIGN)[1]),ncol=dim(DESIGN)[2],byrow=TRUE)-DESIGN
out<-dMhat5.cpp(RESIDUAL=Zr,iaaA=iaaA,D=diffs,D2=diffs^2,R=r[1:5])
out2<-array(0,dim=c(nnn*5,5,kk))
for(j in 1:5){
out2[,j,]<-out[((j-1)*nnn*5+1):(j*nnn*5),]}
out2[SRT[1:mm],,]}
dmhat<-function(theta){
out<-dMhat(theta)
out2<-cbind(as.vector(out[,1,]),as.vector(out[,2,]),as.vector(out[,3,]),as.vector(out[,4,]),as.vector(out[,5,]))
out2}
############################################################################################################
d2Mhat<-function(theta){
nnn<-length(uni.times)
ddd<-dim(DESIGN)
diffs<-matrix(rep(theta,ddd[1]),ncol=ddd[2],byrow=TRUE)-DESIGN
out<-d2Mhat5.cpp(RESIDUAL=Zr,iaaA=iaaA,D=diffs,D2=diffs^2,R=r[1:5])
output<-array(0,dim=c(nnn*5,5,5,kk))
for(ii in 1:5){
for(jj in 1:5){
output[,ii,jj,]<-out[((ii-1)*5+jj-1)*155+(1:155),]}}
output<-output[SRT[1:mm],,,]
output}
d2mhat<-function(theta){
out<-d2Mhat(theta)
out2<-array(0,dim=c(5,mm*kk,5))
for(j in 1:5){
out2[j,,]<-cbind(as.vector(out[,j,1,]),as.vector(out[,j,2,]),as.vector(out[,j,3,]),as.vector(out[,j,4,]),as.vector(out[,j,5,]))}
out2}
###############################################################################################################
list(s.hat=s.hat,sigtilde=sigtilde,Mhat=Mhat,mhat=mhat,dMhat=dMhat,dmhat=dmhat,d2Mhat=d2Mhat,d2mhat=d2mhat,mu2hat=mu2hat,mu2hatB=mu2hatB)
}
|
ca3497d7217f073b0d12f2ec9c266d5f7a658771
|
8d8c704954ecb160ce3e68e0c2fc9631f49f4827
|
/UMD_DMD_plot.R
|
86b4c18046669062ddefbfb84c8f579872113a6f
|
[] |
no_license
|
leklab/DMD
|
cbce7fa038d7a65d086974865ebc5e4c5787048f
|
0b4547c45dedb400459d11ee510cf90edc4a562f
|
refs/heads/master
| 2020-09-20T13:15:22.132684
| 2019-11-29T19:23:51
| 2019-11-29T19:23:51
| 224,492,404
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,220
|
r
|
UMD_DMD_plot.R
|
library(rvest)
library(tidyverse)
#functions
getexons <- function(start, end, n) {
rep(as.integer(start):as.integer(end), times = n)
}
##Deletions
#get the data
dels <- read_html("http://www.umd.be/DMD/4DACTION/W_DMDT1/3") %>%
html_nodes('tr') %>%
html_text() %>%
gsub("\\r", ";", .) %>%
gsub("\\s+;", ";", .) %>%
gsub(";\\s+", ";", .) %>%
gsub("\\#", "n", .) %>%
gsub(";$", "", .) %>%
trimws() %>%
.[grepl("^p|^Protein",.)] %>%
read_delim(., delim = ";", trim_ws = T) %>%
rename(Records = `n records`)
dels$Records <- as.integer(dels$Records)
dels$exons <- gsub("Large rearrangementDeletion from exon ", "", dels$Rearrangement)
dels <- dels %>% separate(exons, c('start','end'), sep = ' to ')
dels <- dels %>% mutate(type = 'Deletion')
n_patients_dels <- sum(dels$Records)
allexons_dels <- mapply(getexons, dels$start, dels$end, dels$Records)
exoncounts_dels <- tibble(exon = 1:79, count = as.integer(table(unlist(allexons_dels))), pct_patients = as.integer(table(unlist(allexons_dels))) / n_patients_dels * 100, type = 'Deletion')
#add frameshift info
fr_del <- dels %>% filter(`Mutation type` == 'Fr.')
allexons_dels_fr <- mapply(getexons, fr_del$start, fr_del$end, fr_del$Records)
fr.table <- table(unlist(allexons_dels_fr))
fr_del_counts <- tibble(exon = names(fr.table), count = fr.table, frame = 'Frameshift', type = 'Deletion', type_frame = 'Frameshift Deletion')
infr_del <- dels %>% filter(`Mutation type` == 'InF')
allexons_dels_infr <- mapply(getexons, infr_del$start, infr_del$end, infr_del$Records)
infr.table <- table(unlist(allexons_dels_infr))
infr_del_counts <- tibble(exon = names(infr.table), count = infr.table, frame = 'In-frame', type = 'Deletion', type_frame = 'In-Frame Deletion')
##Duplications
dups <- read_html("http://www.umd.be/DMD/4DACTION/W_DMDT1/4") %>%
html_nodes('tr') %>%
html_text() %>%
gsub("\\r", ";", .) %>%
gsub("\\s+;", ";", .) %>%
gsub(";\\s+", ";", .) %>%
gsub("\\#", "n", .) %>%
gsub(";$", "", .) %>%
trimws() %>%
.[grepl("^p|^Protein",.)] %>%
read_delim(., delim = ";", trim_ws = T) %>%
rename(Records = `n records`)
dups$Records <- as.integer(dups$Records)
dups$exons <- gsub("Large .* exon ", "", dups$Rearrangement)
dups <- dups %>% separate(exons, c('start','end'), sep = ' to ')
dups <- dups %>% filter(str_detect(Rearrangement,'Duplication'))
dups <- dups %>% mutate(end = if_else(str_detect(Rearrangement,'5\''), 1L, as.integer(end)))
dups <- dups %>% mutate(type = 'Duplication')
n_patients_dups <- sum(dups$Records)
allexons_dups <- mapply(getexons, dups$start, dups$end, dups$Records)
exoncounts_dups <- tibble(exon = 1:79, count = as.integer(table(unlist(allexons_dups))), pct_patients = as.integer(table(unlist(allexons_dups))) / n_patients_dups * 100, type = 'Duplication')
#add frameshift info
fr_dup <- dups %>% filter(`Mutation type` == 'Fr.')
allexons_dups_fr <- mapply(getexons, fr_dup$start, fr_dup$end, fr_dup$Records)
fr_dup.table <- table(unlist(allexons_dups_fr))
fr_dup_counts <- tibble(exon = names(fr_dup.table), count = fr_dup.table, frame = 'Frameshift', type = 'Duplication', type_frame = 'Frameshift Duplication')
infr_dup <- dups %>% filter(`Mutation type` == 'InF')
allexons_dups_infr <- mapply(getexons, infr_dup$start, infr_dup$end, infr_dup$Records)
infr_dup.table <- table(unlist(allexons_dups_infr))
infr_dup_counts <- tibble(exon = names(infr_dup.table), count = infr_dup.table, frame = 'In-frame', type = 'Duplication', type_frame = 'In-Frame Duplication')
## merge
deldup <- bind_rows(exoncounts_dels, exoncounts_dups)
n_total <- n_patients_dels + n_patients_dups
deldup <- deldup %>% mutate(pct_from_total = count / n_total * 100)
deldup_frame <- bind_rows(fr_del_counts, infr_del_counts, fr_dup_counts, infr_dup_counts)
deldup_frame$exon <- as.integer(deldup_frame$exon)
n_total_fr <- sum(fr_del$Records) + sum(fr_dup$Records) + sum(infr_del$Records) + sum(infr_dup$Records)
deldup_frame <- deldup_frame %>% mutate(pct_from_total = count / n_total * 100)
deldup_frame$type_frame <- factor(deldup_frame$type_frame,
levels = c("Frameshift Deletion", "In-Frame Deletion",
"Frameshift Duplication", "In-Frame Duplication"))
#plot
ggplot(deldup, aes(exon, pct_from_total, col = type, fill = type)) +
geom_bar(stat = 'identity') +
theme_bw() +
ylab("Pct of patients with\ndeleted/duplicated exon") +
theme(legend.position = c(0.2,0.83), legend.title = element_blank())
ggsave('UMD_DMD_dels_dups_stacked.png', width = unit(4, 'in'), height = unit(3, 'in'))
ggplot(deldup_frame, aes(exon, pct_from_total, col = type_frame, fill = type_frame)) +
geom_bar(stat = 'identity') +
theme_bw() +
ylab("Pct of patients with\ndeleted/duplicated exon") +
theme(legend.position = c(0.2,0.8),
legend.text = element_text(size = 4),
legend.key.size = unit(0.4, 'cm'),
legend.title = element_blank()) +
scale_fill_manual(values=c("brown2", "coral", "navy", 'cyan2')) +
scale_color_manual(values=c("brown2", "coral", "navy", 'cyan2'))
ggsave('UMD_DMD_dels_dups_frames_stacked.png', width = unit(4, 'in'), height = unit(3, 'in'))
|
94eee3ad258ae84e69ff8cf141becef993634d4a
|
b96e92d86bd142159e4674c59c6fbaf730049802
|
/R/vc_email.R
|
ddee0305e18eae0d4ab85d504cfe8b4f5babd9c8
|
[] |
no_license
|
trinker/valiData
|
0ac536b9ed0435ff27f61973d949e9036fc8c1ac
|
59caaa67acaafb2508e90281812997464766d6f1
|
refs/heads/master
| 2022-06-09T05:59:46.696388
| 2022-05-12T18:25:54
| 2022-05-12T18:25:54
| 74,035,459
| 0
| 1
| null | 2016-11-17T14:37:24
| 2016-11-17T14:37:24
| null |
UTF-8
|
R
| false
| false
| 3,300
|
r
|
vc_email.R
|
#' Validates If Email
#'
#' Validates If Email
#'
#' @param data A data frame.
#' @param x Column name from \code{data} (character string).
#' @param \dots ignored.
#' @export
#' @examples
#' dat <- data.frame(
#' email =c('cookie@cookiemonster.com', 'joe@hometown.uni'
#' , 'meet@seven', '@Jim', 'joe@gmail.com', NA),
#' stringsAsFactors = FALSE
#' )
#' vc_email(dat, 'email')
vc_email <- function(data, x, ...){
## select the column & replace missing with NA
col <- sub_out_missing(data[[x]])
## record missing (NA)
is_na <- is.na(col)
## expression to validate against (elementwise) RFC 5321 compliant
#regex <- "^[a-z0-9.!#$%&'*+/=?^_`{|}~-]+@[a-z0-9.-]+\\.[a-z]{2,}$"
## updated to be less strict via: http://stackoverflow.com/a/38137215/1000343
regex <- "^(([^<>()\\[\\]\\.,;:\\s@\"]+(\\.[^<>()\\[\\]\\.,;:\\s@\"]+)*)|(\".+\"))@(([^<>()\\.,;\\s@\"]+\\.{0,1})+[^<>()\\.,;:\\s@\"]{2,})$"
is_valid <- grepl(regex, col, ignore.case = TRUE, perl = TRUE)
is_valid[is_na] <- NA
## valid columnwise: Are all elelemnts either valid or NA?
are_valid <- all(is_valid|is_na)
## generate the comment
if (!are_valid){
message <- sprintf(
"The following rows of %s do not follow the format of allowable emails:\n\n%s\n\n\n\n",
sQuote(x),
output_truncate(which(!(is_valid|is_na)))
)
} else {
message <- NULL
}
## construct vc list & class
vc_output <- list(
column_name = x,
valid = are_valid,
message = message,
passing = is_valid,
missing = is_na,
call = 'vc_email'
)
class(vc_output) <- 'vc'
vc_output
}
# regex <- "^(([^<>()\\[\\]\\.,;:\\s@\"]+(\\.[^<>()\\[\\]\\.,;:\\s@\"]+)*)|(\".+\"))@(([^<>()\\.,;\\s@\"]+\\.{0,1})+[^<>()\\.,;:\\s@\"]{2,})$"
# email_test_data$is_valid <- grepl(regex, email_test_data$email, ignore.case = TRUE, perl = TRUE)
## email_test_data <- structure(list(email = c("Sean.O'Conner@anyuniv.edu", "prettyandsimple@example.com", "very.common@example.com",
## "disposable.style.email.with+symbol@example.com", "other.email-with-dash@example.com",
## "x@example.com", "\"much.more unusual\"@example.com", "\"very.unusual.@.unusual.com\"@example.com",
## "\"very.(),:;<>[]\\\".VERY.\\\"very@\\\\ \\\"very\\\".unusual\"@strange.example.com",
## "example-indeed@strange-example.com", "admin@mailserver1", "#!$%&'*+-/=?^_`{}|~@example.org",
## "\"()<>[]:,;@\\\\\\\"!#$%&'-/=?^_`{}| ~.a\"@example.org", "\" \"@example.org",
## "example@localhost", "example@s.solutions", "user@com", "user@localserver",
## "user@[IPv6:2001:db8::1]", "user@[1:2:3:4:5::6]", "Abc.example.com",
## "A@b@c@example.com", "a\"b(c)d,e:f;g<h>i[j\\k]l@example.com",
## "just\"not\"right@example.com", "this is\"not\\allowed@example.com",
## "this\\ still\\\"not\\\\allowed@example.com", "john..doe@example.com",
## "john.doe@example..com"), valid = c(TRUE, TRUE, TRUE, TRUE, TRUE, TRUE,
## TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE,
## TRUE, TRUE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE,
## FALSE), is_valid = c(NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA,
## NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA
## )), .Names = c("email", "valid", "is_valid"), row.names = c(NA,
## -28L), class = "data.frame")
|
84b5b950ab292f9e39df195bca9c17f6858cea34
|
12ef9e9c0d415ff83775fcc00a2d23437b5ac6ac
|
/kernelModel.R
|
514989450a17e86409262012dd67a1fce06caeb0
|
[] |
no_license
|
brittzinator/PhDRes
|
951986b6352f1910f7e13a072f3a5cc996b872f5
|
4d7c8cfdedda9e666fc569f05d4a9d69bc67a15b
|
refs/heads/master
| 2016-09-06T10:14:22.352696
| 2014-11-11T22:58:41
| 2014-11-11T22:58:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,284
|
r
|
kernelModel.R
|
library(SuppDists) #for the rinvGauss (Wald) function
logprofile <- function(z,K,z0,d) log((z-d)/z0)/K
rWALD <- function(n,m,Umeanlog,Usdlog,H,Fmeanlog,Fsdlog,h)
{
# n: number of random numbers
# m: wind measurement height
# Umeanlog: mean log wind speed (m/s) at m
# Usdlog: sd log wind speed (m/s) at m
# H: mean plant heigh (m)
# Fmeanlog: mean log terminal velocity (m/s)
# Fsdlog: sd log terminal velocity (m/s)
# h: mean vegetation height (m)
K <- 0.4 # von Karman constant
C0 <- 3.125 # Kolmogorov constant
Aw <- 1.3 # ratio of sigmaw to ustar
d <- 0.7*h # zero-plane displacement
z0 <- 0.1*h # roughness length
Um <- rlnorm(n,meanlog=Umeanlog,sdlog=Usdlog) # simulate n wind events assuming lognormal distribution
ustar <- K*Um/log((2-d)/z0)
U <- (ustar/H)*integrate(logprofile,lower=z0+d,upper=H,K=K,z0=z0,d=d)$value # compute average wind speed between H and z0+d
sigma <- sqrt( (4*((Aw)^4)*K*(H-d)/C0) * ustar/U )
f <- rlnorm(n,meanlog=Fmeanlog,sdlog=Fsdlog) # draw n terminal velocities assuming lognormal distribution
nu <- H*U/f
lambda <- (H/sigma)^2
return(rinvGauss(n,nu=nu,lambda=lambda))
}
distances<-rWALD(n=1000,m=1,Umeanlog=2,Usdlog=.2,H=1.1,Fmeanlog=2,Fsdlog=.5,h=.5)
plot(density(distances), xlim=c(0,10))
|
af5ad2fc93fd730cfa38048fa110de1d6f742e4d
|
f47ee14f548d958615893f56b624620f145d6365
|
/Plot1.R
|
607f3117c0583353ad5be75ad57660b6a8c88f09
|
[] |
no_license
|
TXu8/ExData_Plotting1
|
0d8e3513ca74e8b27c63dcacd48f4802cf890ce6
|
64792bf443ef0ff4d7f62bec3b91c7d1c79794dc
|
refs/heads/master
| 2021-01-11T03:10:13.291921
| 2016-10-17T06:19:54
| 2016-10-17T06:19:54
| 71,105,778
| 0
| 0
| null | 2016-10-17T06:15:25
| 2016-10-17T06:15:25
| null |
UTF-8
|
R
| false
| false
| 513
|
r
|
Plot1.R
|
##Program for 1st assignment - Plot 1
setwd("filepath/")
powerdata<-read.csv("filepath/household_power_consumption.txt",header = TRUE, sep = ";")
powerdata1<-powerdata[which(powerdata$Date == "1/2/2007"|powerdata$Date == "2/2/2007"),]
powerdata1$Global_active_power<-as.numeric(as.character(powerdata1$Global_active_power))
hist(powerdata1$Global_active_power,col="red",xlab = "Global Active Power (Kilowatts)",ylab="Frequency",main="Global Active Power")
dev.copy(png,file="Plot1.png")
dev.off()
|
b007f2a09d9d3eea65469c507a7f121ade78a431
|
91ff28f948cb3387cd6c3b8483fb5e3f6ac00762
|
/man/DR.Rd
|
906f75ea2317fc6e6245d989b5eb53ceddc8e0c3
|
[] |
no_license
|
vlyubchich/funtimes
|
64621060279530bb4c8722491c2baec5bd6b5d77
|
94975df305e584ed73f317df54fa697999c7fd68
|
refs/heads/master
| 2023-05-11T06:43:25.507215
| 2023-04-29T18:12:58
| 2023-04-29T18:12:58
| 60,178,185
| 8
| 19
| null | 2021-08-12T15:00:12
| 2016-06-01T13:13:36
|
R
|
UTF-8
|
R
| false
| true
| 5,876
|
rd
|
DR.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/DR.R
\name{DR}
\alias{DR}
\title{Downhill Riding (DR) Procedure}
\usage{
DR(X, method, minPts = 3, theta = 0.9, B = 500, lb = -30, ub = 10)
}
\arguments{
\item{X}{an \eqn{n\times k} matrix where columns are \eqn{k} objects to be clustered,
and each object contains n observations (objects could be a set of time series).}
\item{method}{the clustering method to be used -- currently either
\dQuote{TRUST} \insertCite{Ciampi_etal_2010}{funtimes}
or \dQuote{DBSCAN} \insertCite{Ester_etal_1996}{funtimes}. If the method is \code{DBSCAN},
then set \code{MinPts} and optimal \eqn{\epsilon} is selected using DR.
If the method is \code{TRUST}, then set \code{theta}, and optimal \eqn{\delta}
is selected using DR.}
\item{minPts}{the minimum number of samples in an \eqn{\epsilon}-neighborhood of
a point to be considered as a core point. The \code{minPts} is to be used only
with the \code{DBSCAN} method. The default value is 3.}
\item{theta}{connectivity parameter \eqn{\theta \in (0,1)}, which is to be used
only with the \code{TRUST} method. The default value is 0.9.}
\item{B}{number of random splits in calculating the
Average Cluster Deviation (ACD). The default value is 500.}
\item{lb, ub}{endpoints for a range of search for the optimal parameter.}
}
\value{
A list containing the following components:
\item{P_opt}{the value of the optimal parameter. If the method is \code{DBSCAN}, then
\code{P_opt} is optimal \eqn{\epsilon}. If the method is \code{TRUST},
then \code{P_opt} is optimal \eqn{\delta}.}
\item{ACD_matrix}{a matrix that returns \code{ACD} for different values of a
tuning parameter.
If the method is \code{DBSCAN}, then the tuning parameter is \eqn{\epsilon}.
If the method is \code{TRUST}, then the tuning parameter is \eqn{\delta}.}
}
\description{
Downhill riding procedure for selecting optimal tuning parameters in clustering
algorithms, using an (in)stability probe.
}
\details{
Parameters \code{lb,ub} are endpoints for the search for the
optimal parameter. The parameter candidates are calculated in a way such that
\eqn{P:= 1.1^x , x \in {lb,lb+0.5,lb+1.0,...,ub}}.
Although the default range of search is sufficiently wide, in some cases
\code{lb,ub} can be further extended if a warning message is given.
For more discussion on properties of the considered clustering algorithms and the
DR procedure see \insertCite{Huang_etal_2016;textual}{funtimes}
and \insertCite{Huang_etal_2018_riding;textual}{funtimes}.
}
\examples{
\dontrun{
## example 1
## use iris data to test DR procedure
data(iris)
require(clue) # calculate NMI to compare the clustering result with the ground truth
require(scatterplot3d)
Data <- scale(iris[,-5])
ground_truth_label <- iris[,5]
# perform DR procedure to select optimal eps for DBSCAN
# and save it in variable eps_opt
eps_opt <- DR(t(Data), method="DBSCAN", minPts = 5)$P_opt
# apply DBSCAN with the optimal eps on iris data
# and save the clustering result in variable res
res <- dbscan(Data, eps = eps_opt, minPts =5)$cluster
# calculate NMI to compare the clustering result with the ground truth label
clue::cl_agreement(as.cl_partition(ground_truth_label),
as.cl_partition(as.numeric(res)), method = "NMI")
# visualize the clustering result and compare it with the ground truth result
# 3D visualization of clustering result using variables Sepal.Width, Sepal.Length,
# and Petal.Length
scatterplot3d(Data[,-4],color = res)
# 3D visualization of ground truth result using variables Sepal.Width, Sepal.Length,
# and Petal.Length
scatterplot3d(Data[,-4],color = as.numeric(ground_truth_label))
## example 2
## use synthetic time series data to test DR procedure
require(funtimes)
require(clue)
require(zoo)
# simulate 16 time series for 4 clusters, each cluster contains 4 time series
set.seed(114)
samp_Ind <- sample(12,replace=F)
time_points <- 30
X <- matrix(0,nrow=time_points,ncol = 12)
cluster1 <- sapply(1:4,function(x) arima.sim(list(order = c(1, 0, 0), ar = c(0.2)),
n = time_points, mean = 0, sd = 1))
cluster2 <- sapply(1:4,function(x) arima.sim(list(order = c(2 ,0, 0), ar = c(0.1, -0.2)),
n = time_points, mean = 2, sd = 1))
cluster3 <- sapply(1:4,function(x) arima.sim(list(order = c(1, 0, 1), ar = c(0.3), ma = c(0.1)),
n = time_points, mean = 6, sd = 1))
X[,samp_Ind[1:4]] <- t(round(cluster1, 4))
X[,samp_Ind[5:8]] <- t(round(cluster2, 4))
X[,samp_Ind[9:12]] <- t(round(cluster3, 4))
# create ground truth label of the synthetic data
ground_truth_label = matrix(1, nrow = 12, ncol = 1)
for(k in 1:3){
ground_truth_label[samp_Ind[(4*k - 4 + 1):(4*k)]] = k
}
# perform DR procedure to select optimal delta for TRUST
# and save it in variable delta_opt
delta_opt <- DR(X, method = "TRUST")$P_opt
# apply TRUST with the optimal delta on the synthetic data
# and save the clustering result in variable res
res <- CSlideCluster(X, Delta = delta_opt, Theta = 0.9)
# calculate NMI to compare the clustering result with the ground truth label
clue::cl_agreement(as.cl_partition(as.numeric(ground_truth_label)),
as.cl_partition(as.numeric(res)), method = "NMI")
# visualize the clustering result and compare it with the ground truth result
# visualization of the clustering result obtained by TRUST
plot.zoo(X, type = "l", plot.type = "single", col = res, xlab = "Time index", ylab = "")
# visualization of the ground truth result
plot.zoo(X, type = "l", plot.type = "single", col = ground_truth_label,
xlab = "Time index", ylab = "")
}
}
\references{
\insertAllCited{}
}
\seealso{
\code{\link{BICC}}, \code{\link[dbscan]{dbscan}}
}
\author{
Xin Huang, Yulia R. Gel
}
\keyword{trend}
\keyword{ts}
|
6015b0d7642a94ebe8560b54fe3a9eab121bc627
|
df58ce6f082bc03cfad6c4eac8636dfaee5107f9
|
/Wykresy/wordcloud.R
|
75ebe513ba8017e0a03149c9937798fd3790c421
|
[] |
no_license
|
mi2-warsaw/eRka-Onet-findTeam
|
0ce76e7427a49ae1587b912ab3b7fa169ab66e42
|
8b93c792f4bede2deae7bdc2f04226c2e39e071c
|
refs/heads/master
| 2021-01-10T08:08:21.403595
| 2015-11-28T19:59:18
| 2015-11-28T19:59:18
| 47,018,497
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 188
|
r
|
wordcloud.R
|
library(wordcloud)
library(Matrix)
words = colnames(DTM_df)
frequencies = colSums(DTM_df)
wordcloud(words, freq = frequencies, max.words = 100, colors = brewer.pal(name = "Accent", n = 8))
|
7b0600e42409b52367f03994ad45fbd316e63743
|
910b056902cc9773e77a1c72b61238d6c438c7b4
|
/tests/test.R
|
db822d8d90d0d3074d77bdda03ecc867ba661a16
|
[] |
no_license
|
thaos/R2D2
|
573bd22481da186bf1c66453f1ff21404c30b58a
|
b7f89c4e7874e7a0b736d4417c35cd2b392c4944
|
refs/heads/master
| 2022-12-16T01:31:10.929767
| 2020-09-10T08:17:58
| 2020-09-10T08:17:58
| 259,948,052
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 682
|
r
|
test.R
|
library(R2D2)
mat <- matrix(
rep(1:10, 2),
ncol = 2
)
print(mat)
mat2 <- matrix(
rep(1:15, 2),
ncol = 2
)
print(mat2)
r2d2_bc = r2d2(mat, mat, 1, 0, 0)
stopifnot(
all.equal(mat, r2d2_bc$r2d2_bc)
)
stopifnot(
all.equal(r2d2_bc$visited_time, rep(1, nrow(mat)))
)
r2d2_bc = r2d2(mat, mat, 2, 0, 0)
stopifnot(
all.equal(mat, r2d2_bc$r2d2_bc)
)
stopifnot(
all.equal(r2d2_bc$visited_time, rep(1, nrow(mat)))
)
r2d2_bc = r2d2(mat, mat2, 1, 0, 0)
stopifnot(
all.equal(
matrix(rep(c(1, 3, 3, 4, 6, 6, 8, 8, 9, 11, 11, 12, 14, 14, 15), 2), ncol = 2),
r2d2_bc$r2d2_bc
)
)
r2d2_bc = r2d2(mat2, mat, 1, 0, 0)
stopifnot(
all.equal(mat, r2d2_bc$r2d2_bc)
)
|
6fdbded5a4ad67ad1af31b670f7010bc1867a5ca
|
1942e07b409f734546294115effd465ff680df1d
|
/plot1.R
|
e3c23db657372f432b07855cad7a398bbe3887cb
|
[] |
no_license
|
mvelascoc/ExData_Plotting1
|
f309aaad8686859e0519b38df1993731a7c50b15
|
55fef9f5eaca98b633a9203939b1c192d42f206c
|
refs/heads/master
| 2021-01-24T02:38:39.898616
| 2014-08-10T00:20:26
| 2014-08-10T00:20:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 844
|
r
|
plot1.R
|
## Exploratory Data Analysis / Coursera / Aug 2014
## Project 1 / Plot 1
## load data
filePath <- "../../data/household_power_consumption.txt"
if (file.exists(filePath)) {
print("Data File Found")
#fullData <- read.table(filePath, sep=";", head = TRUE, na.strings = c("?"))
## subdata dates 2007-02-01 and 2007-02-02
data <- read.table(filePath, sep=";", head = TRUE, na.strings = c("?"), skip= 66636, nrow = 2880)
## plot and save graphic
png("plot1.png", width = 480, height = 480, units = 'px')
hist(data[,3],
col="red", ### define your color
main="Global Active Power", ### main label
xlab="Global Active Power (kilowatts)", ### x-label
ylab="Frequency") ### y-label
## export image
dev.off()
} else {
print("Data File Not Found")
}
|
f6053f0b4359148c5fa5607408e11fc304943dd0
|
42a2ec53d27a7856122ffc079634174f01dffcd3
|
/runs_sensitivity_on_reduced.R
|
dcde19747a8c0281d6116787264ff161d9418cc2
|
[
"CC-BY-4.0"
] |
permissive
|
OlenaShcherbakova/Sociodemographic_factors_complexity
|
84481c368369e4882951321164b363670797c074
|
559f590633699ae8f4c926d4da43d8d1a4eb7716
|
refs/heads/main
| 2023-06-23T23:39:26.516716
| 2023-06-15T07:05:39
| 2023-06-15T07:05:39
| 552,730,271
| 1
| 0
|
CC-BY-4.0
| 2023-06-13T08:17:36
| 2022-10-17T06:23:59
|
R
|
UTF-8
|
R
| false
| false
| 1,888
|
r
|
runs_sensitivity_on_reduced.R
|
source('sensitivity_testing_reduced_B_001.R')
source('sensitivity_testing_reduced_B_05.R')
source('sensitivity_testing_reduced_B_099.R')
WAIC_0.1 <- read.csv("output_tables_reduced/ waics Boundness_social_models .csv") %>%
mutate(prior = "0.1")
WAIC_0.01 <- read.csv("output_tables_reduced/ waics Boundness_social_models prior_0.01 .csv") %>%
mutate(prior = "0.01")
WAIC_0.5 <- read.csv("output_tables_reduced/ waics Boundness_social_models prior_0.5 .csv") %>%
mutate(prior = "0.5")
WAIC_0.99 <- read.csv("output_tables_reduced/ waics Boundness_social_models prior_0.99 .csv") %>%
mutate(prior = "0.99")
sensitivity_B <- as.data.frame(rbind(WAIC_0.1, WAIC_0.01, WAIC_0.5, WAIC_0.99)) %>%
mutate(response="fusion")
source('sensitivity_testing_reduced_I_001.R')
source('sensitivity_testing_reduced_I_05.R')
source('sensitivity_testing_reduced_I_099.R')
WAIC_0.1 <- read.csv("output_tables_reduced/ waics Informativity_social_models .csv") %>%
mutate(prior = "0.1")
WAIC_0.01 <- read.csv("output_tables_reduced/ waics Informativity_social_models prior_0.01 .csv") %>%
mutate(prior = "0.01")
WAIC_0.5 <- read.csv("output_tables_reduced/ waics Informativity_social_models prior_0.5 .csv") %>%
mutate(prior = "0.5")
WAIC_0.99 <- read.csv("output_tables_reduced/ waics Informativity_social_models prior_0.99 .csv") %>%
mutate(prior = "0.99")
sensitivity_I <- as.data.frame(rbind(WAIC_0.1, WAIC_0.01, WAIC_0.5, WAIC_0.99)) %>%
mutate(response="informativity")
sensitivity_all <- as.data.frame(rbind(sensitivity_B, sensitivity_I))
write.csv(sensitivity_all, "output_tables_reduced/Table_sensitivity.csv")
sensitivity_all <- sensitivity_all %>%
flextable() %>%
autofit() %>%
merge_v(j=c("response", "prior")) %>%
fix_border_issues()
save_as_docx(
"Summary of sensitivity" = sensitivity_all,
path = "output_tables_reduced/Table_sensitivity.docx")
|
785093ad5d4a5b1629728cab03e7874ce9b08d2a
|
544bbac2e163fd61503f340d1fb40369aac0f59c
|
/programming/Rcode/Rexamples/MAN/plot.parentimage.Rd
|
60fadf5bb779c0825b1bb040f0afc7dbc60c32d6
|
[
"MIT"
] |
permissive
|
DannyArends/Other
|
810c8855e321f0722140375864fe6f6bf09a97eb
|
bc7328cdf6356a02efe716b865947f0567a239a4
|
refs/heads/master
| 2020-06-08T22:25:54.838498
| 2014-02-03T07:31:30
| 2014-02-03T07:31:30
| 800,277
| 1
| 0
| null | 2014-02-03T06:00:06
| 2010-07-27T09:55:12
|
C
|
UTF-8
|
R
| false
| false
| 1,638
|
rd
|
plot.parentimage.Rd
|
\name{plot.parentimage}
\alias{plot.parentimage}
\title{ plot.parentimage - Graphical representation highlighting possible parents of individuals}
\description{
Graphical representation highlighting possible parents of individuals
}
\usage{
plot.parentimage(x, start = 1, num = (ncol(x)-start), cutoffsib = 3, cutoffpar = 5,\dots)
}
\arguments{
\item{x}{ Result from the function \link{scoreKinship} }
\item{start}{ Start at individual }
\item{num}{ Show this many individuals (DEFAULT: End of the resultset) }
\item{cutoffsib}{ Sibling cutoff (DEFAULT: value > 3*std-dev) }
\item{cutoffpar}{ Parental cutoff (DEFAULT: value > 5*std-dev) }
\item{\dots}{ Additional arguments to plotting function }
}
\details{
}
\value{
plotting routine, no return value
}
\references{
}
\author{
Danny Arends \email{Danny.Arends@gmail.com}
Maintainer: Danny Arends \email{Danny.Arends@gmail.com}
}
\note{
num parameter should be larger than 2
}
\seealso{
\itemize{
\item \code{\link{scoreKinship}} - Calculates kinshipscores based on SNP markerdata
\item \code{\link{parents}} - Finds tripplets of individuals with parents from SNP markerdata
\item \code{\link{kinshipNetwork}} - Created a .SIF network with kinship relations between individuals
}
}
\examples{
#Create a population at H/W equilibrium
population <- CreatePopulationHW(100,300)
#Breed randomly a generation (parents)
population <- BreedRandom(population,25)
#Score kinship in our breeding example
result <- scoreKinship(population$data,plot=FALSE)
#plot the suspected parents
plot.parentimage(result)
}
\keyword{ ~kwd1 }
\keyword{ ~kwd2 }
|
0766b98b46659a7a65238fd4c6b25e95a56f8f35
|
519578112ec38f95e7c9ec28543b1963b8639f1b
|
/tests/testthat.R
|
86bcf8246ab6976e6f7faf7520e134ba24b60699
|
[] |
no_license
|
pachevalier/tricky
|
62416b2686493c569967c76d98deb2f5215d60a7
|
b9e58e962d773984b8bc389fe1c3a8e4f53944c1
|
refs/heads/master
| 2020-05-29T12:28:26.514050
| 2018-04-10T12:00:26
| 2018-04-10T12:00:26
| 44,686,532
| 0
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 39
|
r
|
testthat.R
|
library(testthat)
test_check("tricky")
|
ed46ac04533bc1ae7845c20b12c3df7b1899f201
|
023476c20d96b25b912d4a850fc89e50a271dd41
|
/R/checkPlotTest.R
|
45a545adb0aa952448b8e783d6d3ed918604d4d5
|
[] |
no_license
|
cran/forestFloor
|
2ef4edbb2d9d0a34770132a7f0b97d5d6b8da895
|
a5e6235f3bb90a008006efaab5188984b361b9e1
|
refs/heads/master
| 2020-12-14T08:41:51.338079
| 2017-05-30T11:17:27
| 2017-05-30T11:17:27
| 36,425,904
| 5
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,104
|
r
|
checkPlotTest.R
|
checkPlotTest = function(plotTest,isTrain) {
if(is.null(plotTest)) { #if plotTest is unspecified/NULL
#if all obseravtions of isTrain are TRUE plot train else plot test observations
plotThese = if(all(isTrain)) isTrain else !isTrain
} else {
#match with options
matchArg = pmatch(plotTest,c(T,F,"andTrain")) #possible arguments are TRUE FALSE and "andTrain"
if(length(matchArg)<1 || is.na(matchArg)) {
stop(paste("plotTest= '",plotTest,"' is an incorrect argument"))
}
#if plotTest is T, plot test set if there is one, else raise error
if(matchArg==1) plotThese = if(!all(isTrain)) !isTrain else {
stop("no test set found, did you pass a Xtest when computing forestFloor?")
}
#if not to plot test set, then plot train, train should also be there
if(matchArg==2) plotThese = isTrain
#if plotTest is "andTrain" blot both train and test set
if(matchArg==3) {
plotThese = rep(T,length(isTrain))
if(all(isTrain)) warning("no test found, only plotting train")
}
}
return(plotThese)
}
|
f7203c21c8875c5ddbf3ecaae508c38a1eb764ac
|
a0aab3075c566ebe95c4123d0f3b4f83d009058f
|
/01.basic packages.R
|
60ea8c1d7e0d412faa7716a63fcf5392858dedc1
|
[] |
no_license
|
ingridbrizotti/Data-science-in-R
|
0fef3e5feda3f412f977a9122e9c81601f901a12
|
3e162bc168763ada5f445da5980f09ab0ac0031d
|
refs/heads/master
| 2021-04-29T10:53:39.188088
| 2017-09-23T16:47:45
| 2017-09-23T16:47:45
| 77,861,631
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,540
|
r
|
01.basic packages.R
|
################################### LOAD DATA ############################################################
# If you'd like to read in data from a database
install.packages("RMySQL")
# to read and write Micorsoft Excel files from R
install.packages("XLConnect")
install.packages("xlsx")
# to read SAS or SPSS
install.packages("foreign")
################################### MANAGE DATA ############################################################
# Essential shortcuts for subsetting, summarizing, rearranging, and joining together data sets.
# dplyr is our go to package for fast data manipulation.
install.packages("dplyr")
# Tools for changing the layout of your data sets.
install.packages("tidyr")
# regular expressions and character strings
install.packages("stringr")
# Tools that make working with dates and times easier
install.packages("lubridate")
################################### VISUALIZE DATA ############################################################
# beautiful graphics :)
install.packages("ggplot2")
# 3D visualizations
install.packages("rgl")
# use Google Chart tools to visualize data in R (Gapminder)
install.packages("googleVis")
# maps
install.packages("leafleteVis")
# time series
install.packages("dygraphs")
# tables
install.packages("dt")
# diagrams
install.packages("diagrammeR")
################################### MODEL DATA ############################################################
# Generalized Additive Models
install.packages("mgcv")
# Linear mixed effects models
install.packages("lme4")
# Non-Linear mixed effects models
install.packages("nlme")
# random forest
install.packages("randomForest")
# multiple comparison testing
install.packages("multcomp")
# Visualization tools and tests for categorical data
install.packages("vcd")
# Lasso and elastic-net regression methods with cross validation
install.packages("glmnet")
# survival analysis
install.packages("survival")
# training regression and classification models
install.packages("caret")
install.packages("gmodels")
# decision tree
install.packages("rpart")
# ROC curve
install.packages("ROCR")
################################### REPORT RESULTS ############################################################
install.packages("shiny")
# Reporting
install.packages("R Markdown")
################################### SPATIAL DATA ############################################################
# loading and using spatial data including shapefiles
install.packages("sp")
install.packages("maptools")
# use map polygons for plots
install.packages("maps")
# Download street maps straight from Google maps and use them as a background in your ggplots.
install.packages("ggmap")
######################### TIME SERIES AND FINANCIAL DATA ############################################################
# format for saving time series objects in R
install.packages("zoo")
# manipulating time series data sets
install.packages("xts")
# Tools for downloading financial data, plotting common charts, and doing technical analysis
install.packages("quantmod")
# To write high performance R code
install.packages("quantmod")
# Big Data (An alternative way to organize data sets for very, very fast operations)
install.packages("data.table")
# Use parallel processing in R to speed up your code or to crunch large data sets
install.packages("parallel")
|
f2a12ac07390b22318bc53c83c4e17b2bbc4bf9f
|
ecbab65bf1c0dbff9e9f2960f69c7394164c1f09
|
/Neural Network/concrete.R
|
5cbc6e6157fe2c32f9d7bd0a7427f4645b29be38
|
[] |
no_license
|
patelarti/R-Codes
|
1bedaa5596b1e1dc04af29cf5fcac33f3dd9fae0
|
5ffaceb26fe4f6d35157255f35f4772d9218929c
|
refs/heads/master
| 2022-04-22T18:43:07.868056
| 2020-04-24T18:01:49
| 2020-04-24T18:01:49
| 254,949,224
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,735
|
r
|
concrete.R
|
#Prepare a model for strength of concrete data using Neural Networks
concrete <- read.csv(file.choose())
str(concrete)
class(concrete)
names(concrete)
attach(concrete)
summary(concrete)
library(neuralnet)
library(nnet)
install.packages("NeuralNetTools")
library(NeuralNetTools)
hist(cement, prob = T, breaks = 30)
lines(density(cement))
summary(cement)
hist(slag, prob = T, breaks = 30)
lines(density(slag))
summary(slag)
hist(ash, prob = T, breaks = 30)
lines(density(ash))
summary(ash)
hist(water, prob = T, breaks = 30)
lines(density(water))
summary(water)
hist(superplastic, prob = T, breaks = 30)
lines(density(superplastic))
summary(superplastic)
hist(coarseagg, prob = T, breaks = 30)
lines(density(coarseagg))
summary(coarseagg)
hist(fineagg, prob = T, breaks = 30)
lines(density(fineagg))
summary(fineagg)
hist(strength, prob = T, breaks = 30)
lines(density(strength))
summary(strength)
names(concrete)
sd(cement)
sd(slag)
sd(ash)
sd(water)
sd(superplastic)
sd(coarseagg)
sd(fineagg)
sd(age)
sd(strength)
var(cement)
var(slag)
var(ash)
var(water)
var(superplastic)
var(coarseagg)
var(fineagg)
var(age)
var(strength)
library(moments)
skewness(cement)
skewness(slag)
skewness(ash)
skewness(water)
skewness(superplastic)
skewness(coarseagg)
skewness(fineagg)
skewness(age)
skewness(strength)
kurtosis(cement)
kurtosis(slag)
kurtosis(ash)
kurtosis(water)
kurtosis(superplastic)
kurtosis(coarseagg)
kurtosis(fineagg)
kurtosis(age)
kurtosis(strength)
# Apply Normalization
normalize<-function(x){
return ( (x-min(x))/(max(x)-min(x)))
}
concrete_norm<-as.data.frame(lapply(concrete,FUN=normalize))
summary(concrete_norm$strength)
summary(strength)
# Data Partition
set.seed(123)
ind <- sample(2, nrow(concrete_norm), replace = TRUE, prob = c(0.7,0.3))
concrete_train <- concrete_norm[ind==1,]
concrete_test <- concrete_norm[ind==2,]
# Creating a neural network model on training data
concrete_model <- neuralnet(strength~cement+slag+ash+water+superplastic+coarseagg+fineagg+age,data = concrete_train)
str(concrete_model)
plot(concrete_model, rep = "best")
summary(concrete_model)
par(mar = numeric(4), family = 'serif')
plotnet(concrete_model, alpha = 0.6)
# Evaluating model performance
set.seed(12323)
model_results <- compute(concrete_model,concrete_test[1:8])
predicted_strength <- model_results$net.result
cor(predicted_strength,concrete_test$strength)
str_max <- max(concrete$strength)
str_min <- min(concrete$strength)
unnormalize <- function(x, min, max) {
return( (max - min)*x + min )
}
ActualStrength_pred <- unnormalize(predicted_strength,str_min,str_max)
head(ActualStrength_pred)
# Improve the model performance :
set.seed(12345)
concrete_model2 <- neuralnet(strength~cement+slag+ash+water+superplastic+coarseagg+fineagg+age,data= concrete_train,hidden = 5)
plot(concrete_model2, rep = "best")
summary(concrete_model2)
model_results2<-compute(concrete_model2,concrete_test[1:8])
predicted_strength2<-model_results2$net.result
cor(predicted_strength2,concrete_test$strength)
plot(predicted_strength,concrete_test$strength)
par(mar = numeric(4), family = 'serif')
plotnet(concrete_model2, alpha = 0.6)
concrete_model3 <- neuralnet(strength~.,data= concrete_train,hidden = 7)
plot(concrete_model3, rep = "best")
summary(concrete_model3)
model_results3<-compute(concrete_model3,concrete_test[1:8])
predicted_strength3<-model_results3$net.result
cor(predicted_strength3,concrete_test$strength)
plot(predicted_strength,concrete_test$strength)
par(mar = numeric(4), family = 'serif')
plotnet(concrete_model3, alpha = 0.6)
|
1c492b11e2aa510d62598a54f78efb0519905bd6
|
098841409c03478ddae35c4cdf6367cfd65fa3bf
|
/clustering/code/cellbench/03_assess_meanSil.R
|
c6eeecd98f2e5ae2c1d45d526cddf99e04564352
|
[] |
no_license
|
wangdi2016/imputationBenchmark
|
0281746b482788c347faf9d96e8288639ba388a6
|
0881121444975cd0a3ee2ce69aaec46c3acd7791
|
refs/heads/master
| 2023-07-29T10:16:14.004610
| 2021-09-09T20:00:43
| 2021-09-09T20:00:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,724
|
r
|
03_assess_meanSil.R
|
library(mclust)
library(cluster)
# allmtd = list.files('/home-4/whou10@jhu.edu/scratch/Wenpin/rna_imputation/clustering/result/')
# allmtd = setdiff(allmtd,c('deepimpute','scimpute'))
mtd = as.character(commandArgs(trailingOnly = T)[1])
print(mtd)
allf = c("sc_10x","sc_celseq2","sc_dropseq","sc_10x_5cl", "sc_celseq2_5cl_p1", "sc_celseq2_5cl_p2","sc_celseq2_5cl_p3")
df = data.frame(allf = allf, k = c(rep(3,3), rep(5,4)))
existf = sub('.rds','',list.files(paste0('/home-4/whou10@jhu.edu/scratch/Wenpin/rna_imputation/clustering/result/',mtd,'/')))
if (length(existf)>0){
ACC <- PUR <- ARI <- meanSil <- NULL
for (f in intersect(allf, existf)){
print(f)
clu = readRDS(paste0('/home-4/whou10@jhu.edu/scratch/Wenpin/rna_imputation/clustering/result/',mtd,'/',f,'.rds'))
ct = sub('.*:','',names(clu))
acc <- -mean(sapply(unique(clu),function(i){
p = table(ct[clu==i])/ sum(clu==i)
sum(p * log(p))
}))
pur <- -mean(sapply(unique(ct),function(sct){
p = table(clu[ct==sct])/ sum(ct == sct)
sum(p * log(p))
}))
ACC = c(ACC, acc)
PUR = c(PUR, pur)
## adjusted rank index
suppressMessages(library(mclust))
ARI <- c(ARI,adjustedRandIndex(ct,clu))
## sum of silhouette
mat = readRDS(paste0('/home-4/whou10@jhu.edu/scratch/Wenpin/rna_imputation/result/procimpute/cellbench/',mtd,'/',f,'.rds'))
d = dist(t(mat)) ## use cell by gene for dist()
s = silhouette(clu, dist=d)
meanSil = c(meanSil,mean(s[,3]))
}
df = data.frame(method=mtd, data = intersect(allf, existf), Hacc =ACC, Hpur=PUR, ARI = ARI, meanSil = meanSil)
}
saveRDS(df,paste0('/home-4/whou10@jhu.edu/scratch/Wenpin/rna_imputation/clustering/perf/meanSil/',mtd,'.rds'))
|
19a1d15a026497204e94c4bbfc633d0114cf296c
|
af681784e683a9ff5b0e9b773504a934cc73cd7b
|
/Week03_script.R
|
0b9c9fbbc7f6851e6b22a5f1a1956a938c0c748d
|
[] |
no_license
|
jakeane/qss17_assignments
|
0cf9850f07b0b538af801c416fbd24c54f8cdde4
|
778b6386a497be7281eed2c5c03d19b916cc26f4
|
refs/heads/master
| 2023-01-21T23:29:42.776236
| 2020-12-04T19:40:01
| 2020-12-04T19:40:01
| 295,878,104
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,799
|
r
|
Week03_script.R
|
## Data Visualization (GOVT16-QSS17) Fall 2020
## Ggplot2, Part I (Week 3)
##
## Name: John Keane
## Date: 10/5/20
# Load libraries
library(tidyverse)
# 1. First, let's work with the IMDB dataset
# (a)
movie <- read_csv("hw2data/movie_metadata.csv")
print("movie head:")
head(movie)
print("movie tail:")
tail(movie)
# (b)
ggplot(movie[!is.na(movie$color), ], aes(x = color, y = imdb_score)) +
geom_point(position = "jitter") +
labs(
title = "IMDB Scores of Movies by Color",
x = "Color",
y = "IMDB Score"
) +
theme_minimal()
# (c)
ggplot(movie[!is.na(movie$color), ], aes(x = imdb_score, fill = color)) +
geom_histogram() +
labs(
title = "Distribution of IMDB Scores by Color",
x = "IMDB Score",
y = "Count",
fill = "Color"
) +
theme_minimal()
# (d)
ggplot(
movie[!is.na(movie$color), ],
aes(
x = imdb_score,
y = ..density..,
fill = color
)
) +
geom_histogram(position = "identity", alpha = 0.6) +
labs(
title = "Distribution of IMDB Scores by Color",
x = "IMDB Score",
y = "Density",
fill = "Color"
) +
theme_minimal()
# 2. For this problem, load up the approval dataset
approve <- read_csv("hw2data/approval_data.csv")
print("approve head:")
head(approve)
print("approve tail:")
tail(approve)
# (a)
approval_types <- approve %>%
mutate(yearQrt = year + (0.25 * (qrt - 1))) %>%
select(yearQrt, econapp, fpapp) %>%
pivot_longer(names_to = "type", values_to = "value", c(econapp, fpapp))
approval_types
# (b)
ggplot(approval_types, aes(x = yearQrt, y = value, color = type)) +
geom_line() +
labs(
title = "Economic and Foreign Policy Approval of Executive Admin",
x = "Year",
y = "Approval Rating",
color = "Approval Type"
) +
scale_color_discrete(labels = c("Economic", "Foreign")) +
theme_minimal()
# (c)
ggplot(approval_types, aes(x = yearQrt, y = value, color = type)) +
geom_line(alpha = 0.4) +
geom_smooth() +
labs(
title = "Economic and Foreign Policy Approval of Executive Admin",
x = "Year",
y = "Approval Rating",
color = "Approval Type"
) +
scale_color_discrete(labels = c("Economic", "Foreign")) +
theme_minimal()
# (d)
approve %>%
mutate(yearQrt = year + (0.25 * (qrt - 1))) %>%
select(yearQrt, qrtinfl, qrtunem) %>%
pivot_longer(
names_to = "type",
values_to = "value",
c(qrtinfl, qrtunem)
) %>%
ggplot(aes(x = yearQrt, y = value, color = type)) +
geom_line(alpha = 0.4) +
geom_smooth() +
labs(
title = "Inflation and Employment Rates Over Time",
x = "Year",
y = "Rate",
color = "Type"
) +
scale_color_discrete(labels = c("Inflation", "Unemployment")) +
theme_minimal()
# WIID dataset
wiid <- read_csv("hw2data/WIID_Dec2018.csv")
print("WIID head:")
head(wiid)
print("WIID tail:")
tail(wiid)
# (a)
wiid %>%
filter(
year == 2000,
country %in% c("Germany", "France", "Italy", "Spain", "Norway")
) %>%
group_by(country) %>%
summarize(avgGini = mean(gini_reported, na.rm = TRUE)) %>%
ggplot(aes(x = country, y = avgGini, label = country)) +
geom_point() +
geom_text(vjust = -1.0) +
theme_minimal() +
labs(
title = "Average Gini Coefficient by Country",
x = "Country",
y = "Average Gini Coefficient"
)
# (b)
wiid %>%
filter(region_un == "Asia") %>%
ggplot(aes(x = gini_reported, y = ..density.., fill = region_un_sub)) +
geom_density(alpha = 0.3) +
theme_minimal() +
labs(
title = "Income Inequality in Asia",
y = "Density",
fill = "UN Sub-Region",
x = "Gini Coefficient"
)
# (c)
wiid %>%
filter(region_un == "Europe") %>%
group_by(country) %>%
summarize(meanGini = mean(gini_reported, na.rm = TRUE)) %>%
ggplot(aes(
y = fct_reorder(country, meanGini, min),
x = meanGini, label = country
)) +
geom_point() +
theme_minimal() +
theme(
axis.title.y = element_blank(),
axis.text.y = element_text(size = 6)
) +
labs(x = "Gini Coefficient", title = "Income Inequality in Europe")
# (d)
color_pallete <- colorRampPalette(c("orange", "red", "purple", "blue", "green"))
wiid %>%
filter(region_un == "Africa") %>%
mutate(avgGini = mean(gini_reported, na.rm = TRUE)) %>%
group_by(country) %>%
summarize(avgGiniDiff = mean(gini_reported - avgGini, na.rm = TRUE)) %>%
ggplot(aes(
x = fct_reorder(country, avgGiniDiff, min),
y = avgGiniDiff,
fill = country
)) +
geom_bar(stat = "identity", show.legend = FALSE) +
scale_fill_manual(
values = color_pallete(
length(unique(wiid$country[wiid$region_un == "Africa"]))
)
) +
coord_flip() +
theme_minimal() +
labs(
title = "Income Inequality in Africa",
y = "Gini Coefficient",
x = "Country"
)
# (e)
ggplot(wiid, aes(x = gini_reported, y = ..density.., fill = region_un)) +
geom_histogram(alpha = 0.5) +
scale_fill_manual(
values = color_pallete(length(unique(wiid$region_un)))
) +
theme_minimal() +
labs(
title = "Global Income Inequality",
x = "Gini Coefficient",
fill = "UN Region",
y = "Density"
)
# (f)
wiid %>%
filter(region_un == "Americas") %>%
group_by(region_un_sub, year) %>%
summarize(medianGini = median(gini_reported, na.rm = TRUE)) %>%
ggplot(aes(x = year, y = medianGini, color = region_un_sub)) +
geom_point(alpha = 0.4) +
geom_smooth() +
scale_color_manual(
values = color_pallete(
length(unique(wiid$region_un_sub[wiid$region_un == "Americas"]))
)
) +
theme_minimal() +
labs(
title = "Income Inequality in the Americas",
x = "Year",
y = "Gini Coefficient",
color = "UN Sub-Region"
)
# (g)
wiid %>%
filter(region_un_sub == "Western Asia") %>%
ggplot(
aes(
x = factor(
incomegroup,
levels = c(
"Low income",
"Lower middle income",
"Upper middle income",
"High income"
)
),
y = gini_reported
)
) +
geom_dotplot(
alpha = 0.6,
stackdir = "center",
binaxis = "y",
binwidth = 0.8
) +
stat_summary(fun = "median", color = "red") +
theme_minimal() +
labs(
title = "Income Inequality in Western Asia by Income Group",
x = "Income Group",
y = "Gini Coefficient"
)
|
5f987a3e2a85c2c9c251872836ceced5228ac413
|
c3d1cb3036f0204365e4010649828cfbe05ef8e1
|
/feature_sel/shrunken_features.R
|
dfe998aa47b287d975afc88006690d356e1c375f
|
[] |
no_license
|
sonibk/BISP
|
78d7b7f3636e2e9dbb5da1823cfd8957add10042
|
d538bb4cb62a57d2de701bcfd64653d9529cd134
|
refs/heads/master
| 2022-03-29T17:13:30.525362
| 2019-12-27T06:20:42
| 2019-12-27T06:20:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,540
|
r
|
shrunken_features.R
|
library(pamr)
library(pROC)
library(mccr)
library(foreach)
library(doParallel)
source('feature_sel/pamr.listgenes.R')
source('get_results.R')
source('helper_func.R')
#' Extracts genes from the shrunken object
#'
#' @param shrunken.genes.df.list list containing data frames of output pamr.listgenes() w.r.t each group
#' @return List containing gene names for each group
get.genes.shrunken <- function(shrunken.genes.df.list)
{
genes = list()
for(i in seq_along(shrunken.genes.df.list))
{
genes[[i]] = shrunken.genes.df.list[[i]][['genes.list']][,2]
}
return(genes)
}
#' Gives the shrunken gene object for given data w.r.t each group
#'
#' @param data normalised data containing samples as rows and columns as genes
#' @param train.ind.list List containing indexes of the folds
#' @param stages Stage of every sample in data
#' @param cores Number of CPU cores to be used
#' @param type Indicates whether there are 1 or multiple(type = 1) folds
#' @return List containing gene object for all groups and AFs
get.shrunken.object <- function(data, train.ind.list, stages, cores, type = 1, min.genes = 1, min.range = 0.02)
{
registerDoParallel(cores = cores)
pamr.genes.list <- foreach(i = 1:length(train.ind.list)) %dopar%
{
if(type == 1)
train.ind <- sort(unlist(train.ind.list[-i]))
else
train.ind <- sort(unlist(train.ind.list[[i]]))
train.model <- pamr.train(list(x = as.matrix(t(data[train.ind,])),
y = stages[train.ind]))
cv.model <- pamr.cv(train.model,
data = list(x = t(as.matrix(data[train.ind,])),
y = stages[train.ind]),nfold = 5)
type <- as.factor(stages[train.ind])[1]
mccs <- sapply(seq_along(cv.model$threshold), function(x)
{
mccr(get.order(stages[train.ind], type), get.order(cv.model$yhat[,x], type))
})
thr.ind = sort(which(mccs == max(mccs)), decreasing = T)[1]
if(min.genes != 1)
thr.ind <- max(which(mccs > (mccs[thr.ind] - min.range)))
genes.list <- pamr.listgene(train.model,
data = list(x=as.matrix(t(data[train.ind,])),
y=stages[train.ind]),
threshold = cv.model$threshold[thr.ind],
fitcv = cv.model, genenames = T)
return(list(genes.list = genes.list, aucs = aucs))
}
return(pamr.genes.list)
}
|
64637096d61ed07917d30f92c3dd88faf6ffd333
|
b7e7f6264fe1117805e71ca37094e604ad76e7ca
|
/rankall.R
|
9aa91d05d41ff3c26ae1ebdd4b46668f7a3b5a58
|
[] |
no_license
|
JustSheryl/DataScienceCourseRepo
|
1cae58188500750984dfdf5d56bf125180745e4b
|
4f91840eb0ed2acd1aabe32362839237a15c9f49
|
refs/heads/master
| 2022-11-10T21:08:09.997459
| 2020-06-25T18:50:29
| 2020-06-25T18:50:29
| 263,632,122
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,026
|
r
|
rankall.R
|
rankall <- function(outcome, num = "best") { ## Read outcome data
## get directory from environment;
## if it's not there, use default
directory<-Sys.getenv("directory")
if (is.null(directory)){
directory="C:\\Users\\grier006\\Documents\\R\\R-week4"
}
setwd(directory)
## Read outcome data
data<- read.csv("outcome-of-care-measures.csv", colClasses="character")
## Check that outcome is valid
if (!(outcome %in% c("heart attack", "heart failure", "pneumonia"))){
stop("Invalid Outcome")
}
## per Hospital_Revised_Flatfiles.pdf
## Heart Attack mortality rate: col. 11
## Heart Failure mortaility rate: col. 17
## Pneumonia mortality rate: col. 23
outcomeCol = NULL
if (outcome == "heart attack"){
outcomeCol<-11
}
else if (outcome == "heart failure"){
outcomeCol<-17
}
else{
outcomeCol<-23
}
# We only need 3 of the 40+ columns:
# State (7), Hospital (2), outcomeCol
myData<-data[,c(2, 7, outcomeCol)]
# remove missing data
goodCases<-complete.cases(myData)
myData=myData[goodCases,]
# Make the output more readable
names(myData)[3]<-"Rate"
# Rate should be numeric for sort
myData[,3]<-suppressWarnings(as.numeric(myData[,3]))
# Split by state
StateData<-split(myData, myData$State)
## For each state, find the hospital of the given rank
ans <- lapply(StateData, function (s, num){
s<-s[order(s$Rate, s$Hospital.Name),]
if (num == "best"){
return(s[1])
}
else if (num == "worst"){
# return(s[nrow(s)])
return (s[which.max(s$Rate),])
}
else if (is.numeric(num)){
if (num > nrow(s)){
result<-"NA"
}
else{
result<-s[num,]
}
}
}, num)
## Return a data frame with the hospital names and the ## (abbreviated) state name
return(ans)
}
result<-rankall("heart attack", 20)
head(result)
result<-rankall("pneumonia", "worst")
tail(result)
|
78bed6b315b7c6af539f6334f76ca30cf58f190f
|
67e3265bf57ee6ba59877b538d49229acb6a438a
|
/R/print.pval.R
|
30643ea607578ffabe78dc0bf2b27475463b2400
|
[] |
no_license
|
cran/conting
|
7c259dde0bc77603b63ab7836dfac5c6adafff24
|
aa153b7c9f34c3b55395c8e436a106cee78a4b97
|
refs/heads/master
| 2020-12-24T08:41:02.243028
| 2019-04-02T19:00:03
| 2019-04-02T19:00:03
| 17,695,235
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 442
|
r
|
print.pval.R
|
print.pval <-
function(x, digits = max(3, getOption("digits") - 3),...){
statistic2<-c("X2","deviance","Freeman-Tukey")[x$statnum==(1:3)]
cat("Under the",statistic2,"statistic \n")
cat("\n")
cat("Summary statistics for T_pred \n")
print(round(summary(x$Tpred),digits=digits))
cat("\n")
cat("Summary statistics for T_obs \n")
print(round(summary(x$Tobs),digits=digits))
cat("\n")
cat("Bayesian p-value = ",round(x$pval,digits=digits),"\n")}
|
c345994f0ae2de0825e0daee03b05d8d7885c040
|
280cfdaad648f7211566eb7f2d943952fec91480
|
/part_2-regression/random-forest-regression/random_forest_regression.R
|
3a49367a47cb9a40b568c4c757a941e04cc274aa
|
[] |
no_license
|
mdrijwan123/Machine-Learning-Udemy-A-to-Z
|
01aef5bc9fa8f37c8fbe8e90aeacbdfed7f71cd5
|
c25a87662a56f311b7536908e3becb13b7068ba1
|
refs/heads/master
| 2020-04-12T02:04:53.780657
| 2018-12-18T06:21:08
| 2018-12-18T06:21:08
| 162,237,522
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 915
|
r
|
random_forest_regression.R
|
# Importing the dataset
# ---------------------
dataset = read.csv('../../data_files/Position_Salaries.csv')
dataset = dataset[2:3]
# Fitting Regression to the dataset
# install.packages('randomForest')
library(randomForest)
set.seed(1234)
regressor = randomForest(x=dataset[1],
y=dataset$Salary,
ntree=500)
# Predicting a new result with Polynomial Regression
y_pred = predict(regressor, data.frame(Level=6.5))
library(ggplot2)
# Build dataset to smoothly visualize pedicted graph in higher resolution
x_grid = seq(min(dataset$Level), max(dataset$Level), 0.1)
# Visualizing the Polynomial Regression results
ggplot() +
geom_point(aes(x=dataset$Level, y=dataset$Salary), color='red') +
geom_line(aes(x=x_grid, y=predict(regressor, newdata=data.frame(Level=x_grid))), color='blue') +
ggtitle('Truth and Bluff (Random Forest Regression Model)') +
xlab('Level') +
ylab('Salary')
|
a7cb9e6895e65638c84339e81cfa7a14e474b80f
|
2903a1a39d4fa77a7fc86258733a56fd80be43fa
|
/R/RcppExports.R
|
94de28aedcda450ea2e8fb40bfbea035b14a45e1
|
[] |
no_license
|
bruce1995/BAGEL
|
fc919ba003269df51ee7ebcced800ddb208cc359
|
59f3085b515070bc871c0d06cf86af5a71b07fe4
|
refs/heads/master
| 2022-11-14T15:14:25.649593
| 2020-06-28T20:51:44
| 2020-07-02T16:00:49
| 275,665,344
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,977
|
r
|
RcppExports.R
|
# Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
compute_mu <- function(H, e, alpha, gamma, R, beta, Z, data, D, Q, S, B) {
.Call('_BAGEL_compute_mu', PACKAGE = 'BAGEL', H, e, alpha, gamma, R, beta, Z, data, D, Q, S, B)
}
compute_muk2 <- function(alphakt, gammakt, Rk, betak, Zkt, datak, D, Q, S, B) {
.Call('_BAGEL_compute_muk2', PACKAGE = 'BAGEL', alphakt, gammakt, Rk, betak, Zkt, datak, D, Q, S, B)
}
update_alpha <- function(H, e, alpha, gamma, R, beta, Z, data, D, Q, S, B, mu, Y, sigma_square, sigma_square_alpha = 1) {
.Call('_BAGEL_update_alpha', PACKAGE = 'BAGEL', H, e, alpha, gamma, R, beta, Z, data, D, Q, S, B, mu, Y, sigma_square, sigma_square_alpha)
}
update_gamma <- function(H, e, alpha, gamma, R, beta, Z, data, D, Q, S, B, mu, Y, sigma_square, sigma_square_gamma = 1) {
.Call('_BAGEL_update_gamma', PACKAGE = 'BAGEL', H, e, alpha, gamma, R, beta, Z, data, D, Q, S, B, mu, Y, sigma_square, sigma_square_gamma)
}
update_beta <- function(H, e, alpha, gamma, R, beta, Z, data, D, Q, S, B, Y, sigma_square, sigma_square_beta = 1) {
.Call('_BAGEL_update_beta', PACKAGE = 'BAGEL', H, e, alpha, gamma, R, beta, Z, data, D, Q, S, B, Y, sigma_square, sigma_square_beta)
}
compute_likelihood <- function(a, mu, U, sigma_square) {
.Call('_BAGEL_compute_likelihood', PACKAGE = 'BAGEL', a, mu, U, sigma_square)
}
update_R <- function(H, e, alpha, gamma, R, beta, Z, data, D, Q, S, B, a, U, mu, Y, rho = 0.5, sigma_square = 1) {
.Call('_BAGEL_update_R', PACKAGE = 'BAGEL', H, e, alpha, gamma, R, beta, Z, data, D, Q, S, B, a, U, mu, Y, rho, sigma_square)
}
update_e <- function(H, e, alpha, gamma, R, beta, Z, data, D, Q, S, B, a, U, mu, Y, N, sigma_square = 1, m0 = 1, rho = 0.5, H_max = 50L) {
.Call('_BAGEL_update_e', PACKAGE = 'BAGEL', H, e, alpha, gamma, R, beta, Z, data, D, Q, S, B, a, U, mu, Y, N, sigma_square, m0, rho, H_max)
}
update_sig_square <- function(omega, a = 1, b = 1) {
.Call('_BAGEL_update_sig_square', PACKAGE = 'BAGEL', omega, a, b)
}
update_sig_square_cub <- function(Y, a = 1, b = 1) {
.Call('_BAGEL_update_sig_square_cub', PACKAGE = 'BAGEL', Y, a, b)
}
update_omega <- function(Y, mu, Comega, sigma_square, Q) {
.Call('_BAGEL_update_omega', PACKAGE = 'BAGEL', Y, mu, Comega, sigma_square, Q)
}
update_gamma_sigma_square <- function(gamma, B, a = 1, b = 1) {
.Call('_BAGEL_update_gamma_sigma_square', PACKAGE = 'BAGEL', gamma, B, a, b)
}
update_Y <- function(U, a, mu, Q, sigma_square = 1) {
.Call('_BAGEL_update_Y', PACKAGE = 'BAGEL', U, a, mu, Q, sigma_square)
}
update_a <- function(U, mu, a, sigma, step = 0.1) {
.Call('_BAGEL_update_a', PACKAGE = 'BAGEL', U, mu, a, sigma, step)
}
update_Comega <- function(omega, Comega, Q, sigma_square, step = 0.05, lower = -0.999, upper = 0.999) {
.Call('_BAGEL_update_Comega', PACKAGE = 'BAGEL', omega, Comega, Q, sigma_square, step, lower, upper)
}
|
34f0782bd09831ad7500ba6645e4ebae29cb1ec6
|
6503c8c87f4e7d88ef7cc6231edf9206c5661810
|
/plot2.R
|
85a740c55f7dcbcb0565853e100ac2ca39e822b4
|
[] |
no_license
|
bisc11/ExData_Plotting1
|
68360c50e49f10135dfdb0dc70cdaef67e48686c
|
2180560ebe0df72aa2234f86c63a921353e19595
|
refs/heads/master
| 2020-12-30T23:21:15.093616
| 2015-06-04T11:54:01
| 2015-06-04T11:54:01
| 36,856,688
| 0
| 0
| null | 2015-06-04T08:01:27
| 2015-06-04T08:01:26
| null |
UTF-8
|
R
| false
| false
| 1,398
|
r
|
plot2.R
|
## Before running this script, check that the datafile "household_power_consumption.txt"
## is unzipped and in your working directory.
if (!file.exists("household_power_consumption.txt")){
print("Download and unzip household_power_consumption.txt in your working directory")
}
## Read the datafile. Since we will only be using data from the dates 2007-02-01
## and 2007-02-02, only this data is read. (Makes the script run much faster also.)
housepower<-read.table("household_power_consumption.txt", header = TRUE, sep = ";",
skip = 66636, nrows = 2880)
names(housepower)<-c("Date", "Time", "Global_active_power", "Global_reactive_power",
"Voltage", "Global_intensity", "Sub_metering_1", "Sub_metering_2",
"Sub_metering_3")
## Combine the Date and Time into 1 object, so this can be used as the x axis of the plot.
## Also convert this to class POSIXlt
housepower$DateTime<-as.POSIXlt(paste(housepower$Date,housepower$Time),format = "%d/%m/%Y %H:%M:%S",)
## Open a PNG graphics device for the plot file to be written in.
## Create plot
## Close graphics device
png(filename = "plot2.png", width = 480, height = 480, units = "px")
plot(housepower$DateTime, housepower$Global_active_power, type = "n", xlab = "",
ylab = "Global Active Power (kilowatts)")
lines(housepower$DateTime, housepower$Global_active_power)
dev.off()
|
35650c5f45ed153d78c0cdd2d40431b420703b0b
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/GUILDS/examples/maxLikelihood.ESF.Rd.R
|
d13e62dbde7cb6b38c4062545ccae7e267537e49
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 280
|
r
|
maxLikelihood.ESF.Rd.R
|
library(GUILDS)
### Name: maxLikelihood.ESF
### Title: Maximization of the loglikelihood given the standard Neutral
### Model, using the Etienne Sampling Formula
### Aliases: maxLikelihood.ESF
### ** Examples
A <- c(1,1,1,3,5,8)
maxLikelihood.ESF( c(7,0.1), abund = A)
|
97067ddd8b5a58ba751e9bc87dccf7d484b2495d
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/powerSurvEpi/examples/ssizeCT.default.Rd.R
|
a9f828c19c62e4e9492a800ef5c3d54e8c305bcb
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 416
|
r
|
ssizeCT.default.Rd.R
|
library(powerSurvEpi)
### Name: ssizeCT.default
### Title: Sample Size Calculation in the Analysis of Survival Data for
### Clinical Trials
### Aliases: ssizeCT.default
### Keywords: survival design
### ** Examples
# Example 14.42 in Rosner B. Fundamentals of Biostatistics.
# (6-th edition). (2006) page 809
ssizeCT.default(power = 0.8, k = 1, pE = 0.3707, pC = 0.4890,
RR = 0.7, alpha = 0.05)
|
5346a52a560ad8145ed1ef7d15ac70fefa0e1721
|
ba07f5cbc690640115108e4ee07b46ef8340e5fe
|
/DA3-labs/lab2/code/Ch16_airbnb_random_forest.R
|
c95406834580d1f2ff70007451c0d2b7a27889a4
|
[] |
no_license
|
ozkrleal/london-prediction-r
|
08a16f4c6b3416d57d3b2cea24b10c797eafed41
|
f81488a92dae37b7e54074d6ebb76b62f95fbfa7
|
refs/heads/master
| 2020-12-20T13:12:59.718332
| 2020-02-15T00:32:02
| 2020-02-15T00:32:02
| 236,085,457
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 24,006
|
r
|
Ch16_airbnb_random_forest.R
|
############################################################
#
# DATA ANALYSIS TEXTBOOK
# RANDOM FOREST
# ILLUSTRATION STUDY
# Airbnb London 2017 march 05 data
# v2.1 2019-04-24
# v2.2 2019-07-25 added todo-s
# v2.3 2019-08-12 some small changes by BG
# v2.4 2019-19-18 major update by Zsuzsi
# v2.5 2019-19-19 small fixes by BG, some minor todos
# v2.6 small fixes by Zs, better graphs
# v2.7 new model definitions, new train-holdout split
# v2.8 2019-10-04 many small changes by GB, some minor todos
# v2.9 2020-01-11 data cleaning taken out to prep code + minor graph changes + minor model changes
# v3.0 2020-01-12 table edits, revert varimp graphs to pct
#
############################################################
#
# WHAT THIS CODES DOES:
#
# Define models
# Run random forest, GBM
# Shows tuning
# Evaluate the performance
# Compares models
# Does diagnostics
###########################################################
# CLEAR MEMORY
# best to start new session
rm(list=ls())
library(rattle)
library(tidyverse)
library(caret)
library(ranger)
library(Hmisc)
library(knitr)
library(kableExtra)
library(xtable)
# location folders
data_in <- "lab2/data/"
data_out <- "lab2/data/"
output <- "lab2/output/"
# load ggplot theme function
source("helper_functions/theme_bg.R")
source("helper_functions/da_helper_functions.R")
source("lab2/code/Ch14_airbnb_prediction_functions.R")
#########################################################################################
#
# PART I
# Loading and preparing data ----------------------------------------------
#
#########################################################################################
# Used area
area <- "london"
data <- read_csv(paste0(data_in, "airbnb_", area, "_workfile_adj.csv")) %>%
mutate_if(is.character, factor) %>%
filter(!is.na(price))
count_missing_values <- function(data) {
num_missing_values <- map_int(data, function(x) sum(is.na(x)))
num_missing_values[num_missing_values > 0]
}
count_missing_values(data)
# Sample definition and preparation ---------------------------------------
# We focus on normal apartments, n<8
data <- data %>% filter(n_accommodates < 8)
# copy a variable - purpose later, see at variable importance
data <- data %>% mutate(n_accommodates_copy = n_accommodates)
# basic descr stat -------------------------------------------
skimr::skim(data)
summary(data$price)
Hmisc::describe(data$price)
describe(data$f_room_type)
describe(data$f_property_type)
table(data$f_number_of_reviews)
# create train and holdout samples -------------------------------------------
# train is where we do it all, incl CV
set.seed(2801)
# First pick a smaller than usual training set so that models run faster and check if works
# If works, start anew without these two lines
# try <- createDataPartition(data$price, p = 0.2, list = FALSE)
#data <- data[try, ]
# CUSTOM NEIGHBORHOOD FILTER FOR SMALLER RUNTIMES
selected_boroughs <- c("Hackney", "Camden")
data <- data %>% filter(
f_neighbourhood_cleansed %in% selected_boroughs)
data <- data %>%
mutate(
f_neighbourhood_cleansed = factor(
f_neighbourhood_cleansed, levels = selected_boroughs))
train_indices <- createDataPartition(data$price, p = 0.7, list = FALSE)
data_train <- data[train_indices, ]
data_holdout <- data[-train_indices, ]
dim(data_train)
dim(data_holdout)
# Define models: simpler, extended -----------------------------------------------------------
# Basic Variables inc neighnourhood
basic_vars <- c(
"n_accommodates", "n_beds", "n_days_since",
"f_property_type","f_room_type", "f_bathroom", "f_cancellation_policy", "f_bed_type",
"f_neighbourhood_cleansed")
# reviews
reviews <- c("n_number_of_reviews", "flag_n_number_of_reviews" ,"n_review_scores_rating", "flag_review_scores_rating")
# Dummy variables
amenities <- grep("^d_.*", names(data), value = TRUE)
#interactions for the LASSO
# from ch14
X1 <- c("n_accommodates*f_property_type", "f_room_type*f_property_type", "f_room_type*d_familykidfriendly",
"d_airconditioning*f_property_type", "d_cats*f_property_type", "d_dogs*f_property_type")
# with boroughs
X2 <- c("f_property_type*f_neighbourhood_cleansed", "f_room_type*f_neighbourhood_cleansed",
"n_accommodates*f_neighbourhood_cleansed" )
predictors_1 <- c(basic_vars)
predictors_2 <- c(basic_vars, reviews, amenities)
predictors_E <- c(basic_vars, reviews, amenities, X1,X2)
#########################################################################################
#
# PART II
# RANDOM FORESTS -------------------------------------------------------
#
#########################################################################################
# do 5-fold CV
train_control <- trainControl(method = "cv",
number = 5,
verboseIter = FALSE)
# set tuning
tune_grid <- expand.grid(
.mtry = c(7, 9),
.splitrule = "variance",
.min.node.size = c(5, 10)
)
# simpler model for model A (1)
set.seed(1234)
system.time({
rf_model_1 <- train(
formula(paste0("price ~", paste0(predictors_1, collapse = " + "))),
data = data_train,
method = "ranger",
trControl = train_control,
tuneGrid = tune_grid,
importance = "impurity"
)
})
rf_model_1
# set tuning for benchamrk model (2)
tune_grid <- expand.grid(
.mtry = c(10, 12),
.splitrule = "variance",
.min.node.size = c(5, 10)
)
set.seed(1234)
system.time({
rf_model_2 <- train(
formula(paste0("price ~", paste0(predictors_2, collapse = " + "))),
data = data_train,
method = "ranger",
trControl = train_control,
tuneGrid = tune_grid,
importance = "impurity"
)
})
rf_model_2
# # auto tuning first
# set.seed(1234)
# system.time({
# rf_model_2auto <- train(
# formula(paste0("price ~", paste0(predictors_2, collapse = " + "))),
# data = data_train,
# method = "ranger",
# trControl = train_control,
# tuneLength = 2,
# importance = "impurity"
# )
# })
# rf_model_2auto
# evaluate random forests -------------------------------------------------
results <- resamples(
list(
model_1 = rf_model_1,
model_2 = rf_model_2
# model_2b = rf_model_2b
)
)
summary(results)
# Save outputs -------------------------------------------------------
# Show Model B rmse shown with all the combinations
rf_tuning_modelB <- rf_model_2$results %>%
select(mtry, min.node.size, RMSE) %>%
rename(nodes = min.node.size) %>%
spread(key = mtry, value = RMSE)
kable(x = rf_tuning_modelB, format = "latex", digits = 2, caption = "CV RMSE") %>%
add_header_above(c(" ", "vars" = 3)) %>%
cat(.,file= paste0(output,"rf_tuning_modelB.tex"))
# Turning parameter choice 1
result_1 <- matrix(c(
rf_model_1$finalModel$mtry,
rf_model_2$finalModel$mtry,
rf_model_1$finalModel$min.node.size,
rf_model_2$finalModel$min.node.size
),
nrow=2, ncol=2,
dimnames = list(c("Model A", "Model B"),
c("Min vars","Min nodes"))
)
kable(x = result_1, format = "latex", digits = 3) %>%
cat(.,file= paste0(output,"rf_models_turning_choices.tex"))
# Turning parameter choice 2
result_2 <- matrix(c(mean(results$values$`model_1~RMSE`),
mean(results$values$`model_2~RMSE`)
),
nrow=2, ncol=1,
dimnames = list(c("Model A", "Model B"),
c(results$metrics[2]))
)
kable(x = result_2, format = "latex", digits = 3) %>%
cat(.,file= paste0(output,"rf_models_rmse.tex"))
#########################################################################################
#
# PART III
# MODEL DIAGNOSTICS -------------------------------------------------------
#
#########################################################################################
#########################################################################################
# Variable Importance Plots -------------------------------------------------------
#########################################################################################
# first need a function to calculate grouped varimp
group.importance <- function(rf.obj, groups) {
var.imp <- as.matrix(sapply(groups, function(g) {
sum(importance(rf.obj)[g], na.rm = TRUE)
}))
colnames(var.imp) <- "MeanDecreaseGini"
return(var.imp)
}
# variable importance plot
# 1) full varimp plot, full
# 2) varimp plot grouped
# 3) varimp plot , top 10
# 4) varimp plot w copy, top 10
rf_model_2_var_imp <- importance(rf_model_2$finalModel)/1000
rf_model_2_var_imp_df <-
data.frame(varname = names(rf_model_2_var_imp),imp = rf_model_2_var_imp) %>%
mutate(varname = gsub("f_neighbourhood_cleansed", "Borough:", varname) ) %>%
mutate(varname = gsub("f_room_type", "Room type:", varname) ) %>%
arrange(desc(imp)) %>%
mutate(imp_percentage = imp/sum(imp))
##############################
# 1) full varimp plot, above a cutoff
##############################
# to have a quick look
plot(varImp(rf_model_2))
cutoff = 600
rf_model_2_var_imp_plot <- ggplot(rf_model_2_var_imp_df[rf_model_2_var_imp_df$imp>cutoff,],
aes(x=reorder(varname, imp), y=imp_percentage)) +
geom_point(color=color[3], size=1) +
geom_segment(aes(x=varname,xend=varname,y=0,yend=imp_percentage), color=color[3], size=0.75) +
ylab("Importance") +
xlab("Variable Name") +
coord_flip() +
scale_y_continuous(labels = scales::percent) +
theme_bg() +
theme(axis.text.x = element_text(size=4), axis.text.y = element_text(size=4),
axis.title.x = element_text(size=4), axis.title.y = element_text(size=4))
rf_model_2_var_imp_plot
ggsave(paste0(output, "rf_varimp1.png"), width=mywidth_small, height=myheight_small, unit="cm", dpi=1200)
cairo_ps(filename = paste0(output, "rf_varimp1.eps"),
width = mywidth_small, height = myheight_small, pointsize = 8,
fallback_resolution = 1200)
print(rf_model_2_var_imp_plot)
dev.off()
##############################
# 2) full varimp plot, top 10 only
##############################
# have a version with top 10 vars only
rf_model_2_var_imp_plot_b <- ggplot(rf_model_2_var_imp_df[1:10,], aes(x=reorder(varname, imp), y=imp_percentage)) +
geom_point(color=color[3], size=2) +
geom_segment(aes(x=varname,xend=varname,y=0,yend=imp_percentage), color=color[3], size=1.5) +
ylab("Importance") +
xlab("Variable Name") +
coord_flip() +
scale_y_continuous(labels = scales::percent) +
theme_bg() +
theme(axis.text.x = element_text(size=4), axis.text.y = element_text(size=4),
axis.title.x = element_text(size=4), axis.title.y = element_text(size=4))
rf_model_2_var_imp_plot_b
ggsave(paste0(output, "rf_varimp1_b.png"), width=mywidth_small, height=myheight_small, unit="cm", dpi=1200)
cairo_ps(filename = paste0(output, "rf_varimp1_b.eps"),
width = mywidth_small, height = myheight_small, pointsize = 8,
fallback_resolution = 1200)
print(rf_model_2_var_imp_plot_b)
dev.off()
##############################
# 2) varimp plot grouped
##############################
# grouped variable importance - keep binaries created off factors together
varnames <- rf_model_2$finalModel$xNames
f_neighbourhood_cleansed_varnames <- grep("f_neighbourhood_cleansed",varnames, value = TRUE)
f_cancellation_policy_varnames <- grep("f_cancellation_policy",varnames, value = TRUE)
f_bed_type_varnames <- grep("f_bed_type",varnames, value = TRUE)
f_property_type_varnames <- grep("f_property_type",varnames, value = TRUE)
f_room_type_varnames <- grep("f_room_type",varnames, value = TRUE)
groups <- list(f_neighbourhood_cleansed=f_neighbourhood_cleansed_varnames,
f_cancellation_policy = f_cancellation_policy_varnames,
f_bed_type = f_bed_type_varnames,
f_property_type = f_property_type_varnames,
f_room_type = f_room_type_varnames,
f_bathroom = "f_bathroom",
n_days_since = "n_days_since",
n_accommodates = "n_accommodates",
n_beds = "n_beds")
rf_model_2_var_imp_grouped <- group.importance(rf_model_2$finalModel, groups)
rf_model_2_var_imp_grouped_df <- data.frame(varname = rownames(rf_model_2_var_imp_grouped),
imp = rf_model_2_var_imp_grouped[,1]) %>%
mutate(imp_percentage = imp/sum(imp))
rf_model_2_var_imp_grouped_plot <-
ggplot(rf_model_2_var_imp_grouped_df, aes(x=reorder(varname, imp), y=imp_percentage)) +
geom_point(color=color[3], size=2) +
geom_segment(aes(x=varname,xend=varname,y=0,yend=imp_percentage), color=color[3], size=1.5) +
ylab("Importance") +
xlab("Variable Name") +
coord_flip() +
scale_y_continuous(labels = scales::percent) +
theme_bg() +
theme(axis.text.x = element_text(size=8), axis.text.y = element_text(size=8),
axis.title.x = element_text(size=8), axis.title.y = element_text(size=8))
rf_model_2_var_imp_grouped_plot
ggsave(paste0(output, "rf_varimp_grouped1.png"), width=mywidth_large, height=myheight_large, unit="cm", dpi=1200)
cairo_ps(filename = paste0(output, "rf_varimp_grouped1.eps"),
width = mywidth_large, height = myheight_large, pointsize = 12,
fallback_resolution = 1200)
print(rf_model_2_var_imp_grouped_plot)
dev.off()
# a side investigation -------------------------------------------------------
# with an important variable duplicated: how does it change?
##############################
# 4) varimp plot w copy, top 10
##############################
# repeat model B
# done smartly: we take mtry min.node.size from the final model
set.seed(1234)
rf_model_2_with_copy <- train(
formula(paste0("price ~", paste0(c(predictors_2, "n_accommodates_copy"), collapse = " + "))),
data = data_train,
method = "ranger",
trControl = train_control,
tuneGrid = data.frame(
.mtry = rf_model_2$finalModel$mtry,
.splitrule = "variance",
.min.node.size = rf_model_2$finalModel$min.node.size
),
importance = "impurity"
)
rf_model_2_with_copy_var_imp <- importance(rf_model_2_with_copy$finalModel)
rf_model_2_with_copy_var_imp_df <- data.frame(varname = names(rf_model_2_with_copy_var_imp),
imp = rf_model_2_with_copy_var_imp) %>%
arrange(desc(imp)) %>%
mutate(imp_percentage = imp/sum(imp))
# only keep top 10 as well
rf_model_2_with_copy_var_imp_plot <- ggplot(rf_model_2_with_copy_var_imp_df[1:10,],
aes(x=reorder(varname, imp), y=imp_percentage)) +
geom_point(color=color[3], size=2) +
geom_segment(aes(x=varname,xend=varname,y=0,yend=imp_percentage), color=color[3], size=1.5) +
ylab("Importance") +
xlab("Variable Name") +
coord_flip() +
scale_y_continuous(labels = scales::percent) +
theme_bg() +
theme(axis.text.x = element_text(size=4), axis.text.y = element_text(size=4),
axis.title.x = element_text(size=4), axis.title.y = element_text(size=4))
rf_model_2_with_copy_var_imp_plot
ggsave(paste0(output, "rf_varimp_withcopy.png"), width=mywidth_small, height=myheight_small, unit="cm", dpi=1200)
cairo_ps(filename = paste0(output, "rf_varimp_withcopy.eps"),
width = mywidth_small, height = myheight_small, pointsize = 8,
fallback_resolution = 1200)
print(rf_model_2_with_copy_var_imp_plot)
dev.off()
#########################################################################################
# Partial Dependence Plots -------------------------------------------------------
#########################################################################################
pltnames_pdp <- list("n_accommodates" = "rf_pdp_n_accom",
"f_room_type" = "rf_pdp_roomtype")
pdp::partial(rf_model_2, pred.var = "n_accommodates", pred.grid = distinct_(data_train, "n_accommodates"), train = data_train) %>%
autoplot( ) +
theme_bg() +
theme(axis.text.x = element_text(size=16), axis.text.y = element_text(size=16),
axis.title.x = element_text(size=16), axis.title.y = element_text(size=16))
pdp::partial(rf_model_2, pred.var = "f_room_type", pred.grid = distinct_(data_train, "f_room_type"), train = data_train) %>%
autoplot( ) +
theme_bg() +
theme(axis.text.x = element_text(size=16), axis.text.y = element_text(size=16),
axis.title.x = element_text(size=16), axis.title.y = element_text(size=16))
# Subsample performance: RMSE / mean(y) ---------------------------------------
# NOTE we do this on the holdout set.
# ---- cheaper or more expensive flats - not used in book
data_holdout_w_prediction <- data_holdout %>%
mutate(predicted_price = predict(rf_model_2, newdata = data_holdout))
ggplot(data_holdout_w_prediction, aes(x = price, y = price - predicted_price)) +
geom_point(alpha = 0.01, color = color[3]) +
geom_vline(xintercept = median(data_holdout_w_prediction[["price"]]), linetype = "dashed") +
theme_bw()
describe(data_holdout_w_prediction$n_accommodates)
######### create nice summary table of heterogeneity
a <- data_holdout_w_prediction %>%
mutate(is_low_size = ifelse(n_accommodates <= 3, "small apt", "large apt")) %>%
group_by(is_low_size) %>%
summarise(
rmse = RMSE(predicted_price, price),
mean_price = mean(price),
rmse_norm = RMSE(predicted_price, price) / mean(price)
)
b <- data_holdout_w_prediction %>%
filter(f_neighbourhood_cleansed %in% c("Westminster", "Camden", "Kensington and Chelsea", "Tower Hamlets", "Hackney", "Newham")) %>%
group_by(f_neighbourhood_cleansed) %>%
summarise(
rmse = RMSE(predicted_price, price),
mean_price = mean(price),
rmse_norm = rmse / mean_price
)
c <- data_holdout_w_prediction %>%
filter(f_property_type %in% c("Apartment", "House")) %>%
group_by(f_property_type) %>%
summarise(
rmse = RMSE(predicted_price, price),
mean_price = mean(price),
rmse_norm = rmse / mean_price
)
d <- data_holdout_w_prediction %>%
summarise(
rmse = RMSE(predicted_price, price),
mean_price = mean(price),
rmse_norm = RMSE(predicted_price, price) / mean(price)
)
# Save output
colnames(a) <- c("", "RMSE", "Mean price", "RMSE/price")
colnames(b) <- c("", "RMSE", "Mean price", "RMSE/price")
colnames(c) <- c("", "RMSE", "Mean price", "RMSE/price")
d<- cbind("All", d)
colnames(d) <- c("", "RMSE", "Mean price", "RMSE/price")
line1 <- c("Type", "", "", "")
line2 <- c("Apartment size", "", "", "")
line3 <- c("Borough", "", "", "")
result_3 <- rbind(line2, a, line1, c, line3, b, d) %>%
transform(RMSE = as.numeric(RMSE), `Mean price` = as.numeric(`Mean price`),
`RMSE/price` = as.numeric(`RMSE/price`))
options(knitr.kable.NA = '')
kable(x = result_3, format = "latex", booktabs=TRUE, linesep = "",digits = c(0,2,1,2), col.names = c("","RMSE","Mean price","RMSE/price")) %>%
cat(.,file= paste0(output, "performance_across_subsamples.tex"))
options(knitr.kable.NA = NULL)
##########################################
#########################################################################################
#
# PART IV
# HORSERACE: compare with other models -----------------------------------------------
#
#########################################################################################
# OLS with dummies for area
# using model B
set.seed(1234)
system.time({
ols_model <- train(
formula(paste0("price ~", paste0(predictors_2, collapse = " + "))),
data = data_train,
method = "lm",
trControl = train_control
)
})
ols_model_coeffs <- ols_model$finalModel$coefficients
ols_model_coeffs_df <- data.frame(
"variable" = names(ols_model_coeffs),
"ols_coefficient" = ols_model_coeffs
) %>%
mutate(variable = gsub("`","",variable))
# * LASSO
# using extended model w interactions
set.seed(1234)
system.time({
lasso_model <- train(
formula(paste0("price ~", paste0(predictors_E, collapse = " + "))),
data = data_train,
method = "glmnet",
preProcess = c("center", "scale"),
tuneGrid = expand.grid("alpha" = 1, "lambda" = seq(0.01, 0.25, by = 0.01)),
trControl = train_control
)
})
lasso_coeffs <- coef(
lasso_model$finalModel,
lasso_model$bestTune$lambda) %>%
as.matrix() %>%
as.data.frame() %>%
rownames_to_column(var = "variable") %>%
rename(lasso_coefficient = `1`) # the column has a name "1", to be renamed
lasso_coeffs_non_null <- lasso_coeffs[!lasso_coeffs$lasso_coefficient == 0,]
regression_coeffs <- merge(ols_model_coeffs_df, lasso_coeffs_non_null, by = "variable", all=TRUE)
regression_coeffs %>%
write.csv(file = paste0(output, "regression_coeffs.csv"))
# CART
set.seed(1234)
system.time({
cart_model <- train(
formula(paste0("price ~", paste0(predictors_2, collapse = " + "))),
data = data_train,
method = "rpart",
tuneLength = 10,
trControl = train_control
)
})
fancyRpartPlot(cart_model$finalModel, sub = "")
# GBM -------------------------------------------------------
gbm_grid <- expand.grid(interaction.depth = c(1, 5, 10), # complexity of the tree
n.trees = 250, # number of iterations, i.e. trees
shrinkage = c(0.05, 0.1), # learning rate: how quickly the algorithm adapts
n.minobsinnode = 20 # the minimum number of training set samples in a node to commence splitting
)
set.seed(1234)
system.time({
gbm_model <- train(formula(paste0("price ~", paste0(predictors_2, collapse = " + "))),
data = data_train,
method = "gbm",
trControl = train_control,
verbose = FALSE,
tuneGrid = gbm_grid)
})
gbm_model
# much more tuning
# faster, for testing
#gbm_grid2 <- expand.grid(interaction.depth = c( 5, 7, 9, 11), # complexity of the tree
# n.trees = (1:10)*50, # number of iterations, i.e. trees
# shrinkage = c(0.05, 0.1), # learning rate: how quickly the algorithm adapts
# n.minobsinnode = c(10,20) # the minimum number of training set samples in a node to commence splitting
#)
# # the next will be in final model, loads of tuning
# gbm_grid2 <- expand.grid(interaction.depth = c(1, 3, 5, 7, 9, 11), # complexity of the tree
# n.trees = (1:10)*50, # number of iterations, i.e. trees
# shrinkage = c(0.02, 0.05, 0.1, 0.15, 0.2), # learning rate: how quickly the algorithm adapts
# n.minobsinnode = c(5,10,20,30) # the minimum number of training set samples in a node to commence splitting
# )
# set.seed(1234)
# system.time({
# gbm_model2 <- train(formula(paste0("price ~", paste0(predictors_2, collapse = " + "))),
# data = data_train,
# method = "gbm",
# trControl = train_control,
# verbose = FALSE,
# tuneGrid = gbm_grid2)
# })
# gbm_model2
# and get prediction rmse and add to next summary table
# ---- compare these models
final_models <-
list("OLS" = ols_model,
"LASSO (model w/ interactions)" = lasso_model,
"CART" = cart_model,
"Random forest (smaller model)" = rf_model_1,
"Random forest" = rf_model_2,
# "Random forest (auto tuned)" = rf_model_2auto,
"GBM (basic tuning)" = gbm_model
# "GBM (broad tuning)" = gbm_model2
)
results <- resamples(final_models) %>% summary()
# Save output --------------------------------------------------------
# Model selection is carried out on this CV RMSE
result_4 <- imap(final_models, ~{
mean(results$values[[paste0(.y,"~RMSE")]])
}) %>% unlist() %>% as.data.frame() %>%
rename("CV RMSE" = ".")
kable(x = result_4, format = "latex", digits = 3, booktabs=TRUE, linesep = "") %>%
cat(.,file= paste0(output,"horse_race_of_models_cv_rmse.tex"))
# evaluate preferred model on the holdout set -----------------------------
RMSE(predict(rf_model_2, newdata = data_holdout), data_holdout[["price"]])
|
1a32875da035900d329cfe13d31aae101b5fe5b7
|
ff2689000222b710c8c39c3ebc791cb7bf46446a
|
/R/bgm.R
|
6834b3cf5c652403c8956d1028f43554baa81493
|
[] |
no_license
|
vianney6364/ETGC
|
1e4734fb7b096fcad825a24d32e64e72c2332f30
|
60add98ef34dcfb1f2d104e2b0275c6bf9d96f1b
|
refs/heads/main
| 2023-05-09T14:26:56.219939
| 2021-06-01T14:07:18
| 2021-06-01T14:07:18
| 372,831,922
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,540
|
r
|
bgm.R
|
#' Bayesian global model, on encapsule un modèle global générique dans la fonction
#' @param tab_input mcmc list
#' @param n_proj nombre d'annee de projection
#' @examples
#'
#' #Les données d'entrée
#' data(tableau_pa)
#'
#' resultats_pa<-delta (tab=tableau_pa, esp="BOBO",list_param=c("mois","zone","type_pirogue","motorisation","zone2","saison","engin_peche2"), type_donnee="commercial", effort="auto", titre="PA", param_test=c("mois","zone","type_pirogue","motorisation","zone2","saison","engin_peche2"), espece_id_list='espece', var_eff_list= c("nb_jour_peche", "nb_sorties"), ope_id=c("presence","code_pays","annee", "mois", "zone", "zone2" , "saison", "type_pirogue" , "motorisation" , "engin_peche", "engin_peche2"), col_capture='captures', logtrans="auto", interactions="auto", facteur_flotille="engin_peche2", seuil=0.05)
#' pre_global_model_pa<-sqldf('select annee as Year,sum(i_ab) as C,avg("E.dens") as I from resultats_pa group by annee',drv='SQLite')
#' bgm(pre_global_model_pa)
#' @export
bgm<- function (tab_input,min_c,max_c,Er,CVr,CV_Cobs,CV_Iobs,n_proj=0){
# Load the model
# ----------------------------------------
# Load the model, written in a .txt file
model_file<-paste0(path.package("demerstem"), "/model_BiomProd_WithScenario_JAGS.txt")
source(model_file)
# Write the model specification into a virtual text file with name "model", to be found by JAGS
# The file "model" will be used by functions "jags.model"
textConnection
model <- textConnection(model_JAGS)
# Load and format data
# -----------------------------------------
n_obs <- length(tab_input$Year)
Year <- tab_input$Year
#Il manque 2014 à voir pourquoi ? (Dans les captures ?)
# Needed to build the equilibrium curve
B_e <- seq(from = 0, to = 1500, by = 50)
n_equi <- length(B_e)
# Format data as a list to be read in JAGS
data <- list("I_obs" = tab_input$I, "C_obs" = tab_input$C, "n_obs" = n_obs, "n_proj" = n_proj,
"B_e" = B_e, "n_equi" = n_equi, "Year"=Year,"min_c"=min_c,"max_c"=max_c,"Er"=Er,"CVr"=CVr,"CV_Cobs"=CV_Cobs,"CV_Iobs"=CV_Iobs)
# MCMC options
# ----------------------------------------------------------------------
n.chains = 3
# Adapt the MCMC samplers with n.adapt iterations
n.adapt = 10000
# Iteration after adapting
n.burnin <- 1000
n.stored <- 1000
n.thin <- 10
n.iter <- n.stored*n.thin
# MANAGING NUMBER of ITERATIONS
# (works for both para = T and para = F)
# total number of REALIZED iterations = n.iter
# total number of STORED iterations = n.iter/n.thin
# Run the model to save MCMC results
# ---------------------------------------------------------------------
# Variable to store
monitor <- c( "B", "h", "D", "C",
"r", "r_p", "K", "K_p", "q", "sigma2p",
"C_MSY", "C_MSY_p", "B_MSY", "h_MSY",
"risk", "Over_C","Over_h",
"C_e",
"I_pred", "C_pred")
# Compile the model, create a model object with the name "model.compiled.jags" and adapt the MCMC samplers
# with n.adapt iterations
print("adapting phase")
model.compiled <- jags.model(file = model, data=data, n.chain=n.chains, n.adapt=n.adapt)
# Iteration after adapting
# Start to compute CPU time
ptm <- proc.time()
# Burnin period (not stored)
print("burn-in")
update(model.compiled, n.iter=n.burnin)
# Store mcmc samples
print("mcmc stored for results")
mcmc <- coda.samples(model=model.compiled,variable.names=monitor,n.iter=n.iter,thin=n.thin)
time.to.run.mcmc <- proc.time() - ptm
print(time.to.run.mcmc)
# ----------------------------------------------------------------------------------------------
# Run the model to compute DIC
# ---------------------------------------------------------------------
# Start from a compiled model that has already been updated
dic.pD <- dic.samples(model.compiled, n.iter, "pD")
dic.pD # Deviance Information Criterion
# Alternative penalization of the Deviance
# dic.popt <- dic.samples(model.compiled.jags, n.iter, "popt")
# dic.popt
# -----------------------------------------------------------------------
# --------------------------------------------
# Work with mcmc.list
# --------------------------------------------
# "mcmc" is an object of the class "mcmc.list" (see package library(coda)
# to explore, plot ... mcmc objects
is(mcmc)
# Names of the variables stored in the mcmc list
varnames(mcmc)
return(mcmc)
}
|
7189b3d0e284132a84e6c832f5fe80a2b91e0c5c
|
133ae5007dd9d8b3786b83c778510e0294b4e75f
|
/man/blblm.Rd
|
5cff2c7076d59e2b7ff1d65863cf0fd8624ae84d
|
[
"MIT"
] |
permissive
|
shihanjing/sta141c-project
|
b80de206983efc1f548c0ed3e24b3112262a3e98
|
96a084352c1dc3606319d77e3cbef569a65523bc
|
refs/heads/master
| 2022-11-09T04:20:27.094536
| 2020-06-11T02:24:18
| 2020-06-11T02:24:18
| 270,555,743
| 0
| 0
| null | 2020-06-08T06:27:23
| 2020-06-08T06:27:22
| null |
UTF-8
|
R
| false
| true
| 592
|
rd
|
blblm.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/blblm.R
\name{blblm}
\alias{blblm}
\title{main function to run linear regression with mini bag bootstrap}
\usage{
blblm(formula, data, m = 10, B = 5000, cluster_size = 1)
}
\arguments{
\item{formula}{an object with class "formula"}
\item{data}{a data frame or a vector of filenames}
\item{m}{how many times we split the data}
\item{B}{number of bootstrap performed}
\item{cluster_size}{number of clusters}
}
\value{
blblm object
}
\description{
main function to run linear regression with mini bag bootstrap
}
|
0ace6e09a89146f8da00ddaa8d9669a5be2bcce8
|
e7bd469eccdbf11cded79bfd9d4cf457f87fbba4
|
/R/error.HGgaussian.r
|
06733119105bda78855e46c3f14f2b528ac10973
|
[] |
no_license
|
parsifal9/ptools
|
ac1c4268ec8e7d728e3a21c8df764799dd8c2702
|
baeeb606b7e2f9076155de0b7885b6e45daee94c
|
refs/heads/master
| 2023-08-29T17:05:39.639233
| 2023-08-06T08:43:11
| 2023-08-06T08:43:11
| 299,174,721
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 162
|
r
|
error.HGgaussian.r
|
error.HGgaussian <- function (object, X, y, weights, fv, ...)
{
if (missing(fv))
r <- (y - object$fv)
else
r <- (y - fv)
mean(r * r * weights)
}
|
54c563ea4fc9a18e8582b1413676931d3d662fbd
|
8f3d8a0c98d83b612b1ac3e989b29d1594886608
|
/etl/sagarpa.R
|
4d4da55c531cc2b03d1dd957d34d710ef2a7f384
|
[
"MIT"
] |
permissive
|
plataformapreventiva/food-price-warning-indicator
|
54c7b03e55bc7bda4aa8ec89e94b87ec1e3df52c
|
8aa1f54ddb0d1d94d517832b4d60f06c067e0616
|
refs/heads/master
| 2021-10-08T02:46:07.953202
| 2018-12-06T20:20:06
| 2018-12-06T20:20:06
| 94,374,856
| 2
| 2
| null | 2017-11-16T22:04:26
| 2017-06-14T21:32:30
|
R
|
UTF-8
|
R
| false
| false
| 892
|
r
|
sagarpa.R
|
library(tidyverse)
library(lubridate)
library(stringr)
set.wd('../data/')
municipal <- read_csv('../data/maiz_municipal_2004_2015.csv')
municipal_1 <- municipal %>%
gather(key = anio, value = precio, -Nomestado, -Idmunicipio, -Nommunicipio) %>%
mutate(cve_mun = str_pad(Idmunicipio,width=3,pad='0',side='left'),
precio_kg = precio/1000) %>%
select(nom_ent_corto=Nomestado,cve_mun,nom_mun=Nommunicipio,anio,precio_kg)
catalogo_entidad <- read_csv('../data/catalogo_entidades.csv')
municipal_2 <- municipal_1 %>%
left_join(catalogo_entidad, by = 'nom_ent_corto') %>%
select(cve_ent, nom_ent, cve_mun, nom_mun, anio, precio_kg) %>%
arrange(cve_ent, cve_mun) %>%
mutate(precio_kg = ifelse(precio_kg < 0.30, NA, precio_kg),
id = paste0(cve_ent,cve_mun),
precio_kg_log = log(precio_kg))
write_csv(x = municipal_2, path = '../data/municipal_2.csv')
|
9fd961c78c3af0f47205b5ef052000d1f6b3ea36
|
737b06d18e008ce83980cd95aab6b097010e703f
|
/docs/R/06_wrappers.R
|
43b176e191ad89d6479d6f1eee57319fa5a994eb
|
[] |
no_license
|
RemkoDuursma/cjib
|
848bc357fc3973b8aa48d6e670ab78c6cb334eb6
|
f74ae567cbb30859b43042c9585cea0565fca4a2
|
refs/heads/master
| 2021-02-28T12:01:02.890167
| 2020-11-11T08:40:03
| 2020-11-11T08:40:03
| 245,694,664
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 533
|
r
|
06_wrappers.R
|
library(ggplot2)
library(dplyr)
mtcars_scatter_abline <- function(xvar, yvar, xlab = xvar, ylab = yvar, ...){
plot(mtcars[[xvar]], mtcars[[yvar]], pch = 19,
xlab = xlab, ylab = ylab, ...)
abline(lm( mtcars[[yvar]] ~ mtcars[[xvar]]))
}
mtcars_scatter_abline("wt", "disp", ylab = "Displacement", main = "Weight vs. Displacement")
scatter_mtcars <- function(xvar, yvar, data){
ggplot(data, aes(x = !!sym(xvar), y = !!sym(yvar))) +
geom_point()
}
scatter_mtcars("disp", "wt", data = mtcars)
|
efa62c0770666fd6e3eaae245af23a48fc4f3264
|
7879e0b7476cfefe1d83b8760932a2978296103f
|
/R/globals.R
|
a393707ca6347bf2c8761d2827cd493f2edfe361
|
[
"MIT"
] |
permissive
|
go-yohan/sppcredit
|
75914cb30f37c67e12190c9d2f8f51f542510626
|
19597394d0a8c489857476f50c00f9c2c39e02e6
|
refs/heads/master
| 2020-12-30T16:45:32.897205
| 2017-09-21T20:35:49
| 2017-09-21T20:35:49
| 91,020,921
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 630
|
r
|
globals.R
|
FTPROOT_REFPRICE <- "ftp://pubftp.spp.org/TCR/ReferencePrices"
FTPROOT_DAPRICE <- "ftp://pubftp.spp.org/Markets/DA/LMP_By_SETTLEMENT_LOC"
TEAColors <- c(
"#0BC1AC",
"#919191",
"#87E8E7",
"#D0272A",
"#5FCE5B",
"#535210",
"#F55165",
"#277DCE",
"#531033",
"#101153",
"#FFAD5C",
"#533110",
"#0BC1AD",
"#FF3D3D",
"#D86F4B",
"#0B7BC1",
"#7BC10B",
"#535210"
)
getDfPathsFromPathList <- function(lstPaths) {
dfPaths <- tibble::tibble(Source = purrr::map_chr(lstPaths, function(entry) entry[['Source']]),
Sink = purrr::map_chr(lstPaths, function(entry) entry[['Sink']]))
}
|
402561cdefbb0ba84c87a6dfd322929fa30cfbf9
|
1c740460e7ce2417cfc6208cd5996ca3fe7b3511
|
/Code/mlm/gng/NH_rt_mod12.R
|
26071d3ef33cc6ec002d66b809afa664f3013748
|
[] |
no_license
|
UNCDEPENdLab/PD_Inhibition_DDM
|
103d51f4e31614be01764b19ffbe52c7c8ac5967
|
4a95ead2ee95cc6197d5be2556eb3bdb616195f3
|
refs/heads/master
| 2023-06-30T02:34:12.758028
| 2021-08-05T14:00:40
| 2021-08-05T14:00:40
| 68,667,400
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 18,284
|
r
|
NH_rt_mod12.R
|
setwd("/Users/michael/Data_Analysis/PD_Inhibition_DDM/Code/mlm/gng")
if (!require(pacman)) { install.packages("pacman"); library(pacman) }
p_load(car, brms, nlme, lme4, loo, readr, tidyverse, emmeans, cowplot, glmmTMB, bbmle, broom, MplusAutomation)
# this is an admixture of Alison and Nate's code.
# Load data
# Data has already been cleaned and initial analyses completed by NH.
# For RT analyses, use variable rt_log_trim_grp
# For outcome analyses, use response -> this indicates whether they correctly Go vs No Go'd (can figure out whether they spressed space or not based on the stim)
# Cond refers to number of prior go trials
# Stim refers whether go or no go trial
# Use trial_z rather than trial (for scaling purposes)
# Will likely add in prev_rt in more complex analyses
#basedir <- "~/github_repos/PD_Inhibition_DDM"
basedir <- "~/Data_Analysis/PD_Inhibition_DDM"
#NH add from GNG RT analyses
gng <- read.csv(file.path(basedir, "Data/preprocessed/go_nogo_full_accCode.csv")) %>% mutate(bad_subj = if_else(subj_idx == 15, 1, 0)) #flagged 15 as bad as this was the one person who needed to be according to NH's preprocessing; will test how inclusion of 15 alters results
gng <- mutate(gng, cond= factor(cond, levels = c("OneGo","ThreeGo", "FiveGo", "SevenGo")))
gng <- gng %>% group_by(subj_idx) %>% mutate(
prev_rt = ifelse(lag(rt_log_trim_grp) == 0, NA, lag(rt_log_trim_grp)),
prev_rt_z = scale(prev_rt),
prev_error = ifelse(lag(response) == 1, 0, 1),
id = as.character(subj_idx))
# gng_rt <- dplyr::filter(gng, stim == "Go", response == 1) #only analyze GO RTs!
gng$cond_id <- with(gng, interaction(cond,id))
gng <- mutate(gng, block_trial_z = as.vector(scale(block_trial))) #%>% filter(response == 1, stim == "Go")
# gng <- mutate(gng, block_trial_z = as.vector(scale(block_trial)))
#pull self reps
# fscores <- read.csv(paste0(basedir,"/Outputs/factor_structure/PDDDM_factor_scores_5-18-20.csv")) %>% rename(subj_idx = subject)
fscores_update <- read.csv(file.path(basedir, "Code/factor_structure/AC_bsem_cfa_parcel_mod_savedata.csv")) %>% select(SUBJECT, DISINHIB.Median, ANTAG.Median) %>% rename(subj_idx = SUBJECT, DISINH = DISINHIB.Median, ANTAG = ANTAG.Median) #%>% mutate(DISINH = -1*DISINH) #reverse code Constraint to set in maladaptive direction.
cor(fscores_update) #make sure scores positively correlate
gng <- gng %>% left_join(fscores_update, by = "subj_idx")
mpq <- get(load(file.path(basedir, "Data/preprocessed/MPQ_all_scored_final.RData"))) %>%
filter(exclude_MPQ == 0) %>% select(subject, MPS_wbR:MPS_abR)
snap <- get(load(file.path(basedir, "Data/preprocessed/SNAP_all_scored_final.RData"))) %>% filter(exclude_SNAP == 0) %>%
select(subject, NEGTEMP:HDWK, DISINHP)
k10 <- sas7bdat::read.sas7bdat(file.path(basedir, "Data/SAS Originals/k10.sas7bdat")) %>%
dplyr::select(subject, k10Total) %>% dplyr::rename(K10=k10Total)
stai <- sas7bdat::read.sas7bdat(file.path(basedir, "Data/SAS Originals/stai.sas7bdat")) %>%
dplyr::select(subject, staitotal) %>% dplyr::rename(STAI=staitotal)
self_reps <- inner_join(mpq, snap, by = "subject")
self_reps <- left_join(self_reps, k10, by = "subject")
self_reps <- left_join(self_reps, stai, by = "subject")
#mpluscmd <- "/Users/natehall/Desktop/Mplus/mplus"
mpluscmd <- "/Applications/Mplus/mplus"
# RT analyses
# block trial*trial and prev_rtXblock_trial effects on RT. homogonous covariance structure estimated per subject and variance estimated per cond. Random intercept and slope of trial and block_trial estmimated for each subject. Random slope of block trial estimated per condition within subject.
# Winning from NH updates: Drop nesting of condition for simplicity, as in flanker_acc_traits
# minfo[["m12"]] <- c(fixed="Trials from no-go, Trial number, Trials from no-go x Trial number", l2="Condition within subject: Trials from no-go\nSubject: Intercept, Trials from no-go, Trial")
#
# m12 <- lmer(rt_log_trim_grp ~ block_trial_z*trial_z + (1 + block_trial_z + trial_z| id) + (1 + block_trial_z | id:cond),
# na.action = na.exclude,
# data = gng_rt, control=lmerControl(optCtrl=list(maxfun=2e4)), REML = FALSE) #bump up iterations)
# summary(m12)
tofactor <- self_reps %>% select(subject, MPS_agR, AGG, MISTRUST, MPS_alR, MANIP,
PROPER, MPS_clR, IMPUL, MPS_tdR, MPS_acR, HDWK, K10, STAI) %>%
# mutate(IMPUL =-1*IMPUL) %>% #score toward constraint to make loadings upright. Update 6/25: reverse coding to scale positively with high externalizing
mutate(PROPER = -1*PROPER, MPS_tdR = -1*MPS_tdR, #P2
MPS_clR = -1*MPS_clR, #P3
HDWK = -1*HDWK, MPS_acR = -1*MPS_acR
) %>%
mutate_at(vars(-subject), list(~as.vector(scale(.)))) %>%
dplyr::rename(id=subject)
ggcorrplot::ggcorrplot(cor(tofactor %>% select(-id), use="pairwise")) #looks right
for_msem <- gng %>% mutate(id = subj_idx) %>% inner_join(tofactor, by="id") %>% ungroup() %>%
select(id, response, cond, stim, trial_z, rt_log_trim_grp, block_trial_z,
prev_error, MPS_agR, AGG, MISTRUST, MPS_alR, MANIP,
PROPER, MPS_clR, IMPUL, MPS_tdR, MPS_acR, HDWK, K10, STAI) %>%
mutate(cond = ifelse(cond == "OneGo", 0,
ifelse(cond == "ThreeGo", 1,
ifelse(cond == "FiveGo", 2,3))),
stim = ifelse(stim == "Go", 0,1))
head(for_msem)
for_msem_rt <- for_msem %>% dplyr::filter(stim == 0 & response == 1) #only analyze GO RTs!
lattice::histogram(~rt_log_trim_grp, for_msem_rt) #looks normal to me
xtabs(~id, for_msem_rt) #all similar
lattice::histogram(~block_trial_z, for_msem_rt) #okay
lattice::histogram(~trial_z, for_msem_rt) #relatively uniform given that trial unfolds linearly
# m12 <- lmer(rt_log_trim_grp ~ block_trial_z*trial_z + (1 + block_trial_z + trial_z| id) + (1 + block_trial_z | id:cond),
## MNH: Translate m12 to Mplus (no traits)
rt_with_mod_gng <- mplusObject(
TITLE = "GnG M12 Mplus",
DEFINE = "
t_ixn = block_trial_z*trial_z; !ixn of trials from no-go and overall trial;
",
VARIABLE = "
WITHIN = trial_z block_trial_z t_ixn;
USEVARIABLES = id rt_log_trim_grp block_trial_z trial_z t_ixn;
CLUSTER = id;
",
ANALYSIS = "
TYPE=TWOLEVEL RANDOM;
ESTIMATOR=BAYES;
BITERATIONS=(15000);
CHAINS=4;
PROCESSORS=4;
",
MODEL = "
%WITHIN%
b_trial | rt_log_trim_grp ON trial_z;
b_block_trial | rt_log_trim_grp ON block_trial_z;
rt_log_trim_grp ON t_ixn;
%BETWEEN%
[b_trial b_block_trial]; !slope means
b_trial b_block_trial; !slope variances
!slope correlations
b_trial b_block_trial WITH
b_trial b_block_trial;
[rt_log_trim_grp] (b0); !mean average log RT
",
PLOT = "TYPE = PLOT2;",
OUTPUT = "TECH1 TECH8 STANDARDIZED CINTERVAL;",
rdata = for_msem_rt
)
mout <- mplusModeler(rt_with_mod_gng, modelout="gng_m12_mplus.inp",
run=TRUE, Mplus_command = mpluscmd, hashfilename = FALSE)
# attempt 1, not looking too great. ---------------------------------------
# rt_with_mod_gng <- mplusObject(
# TITLE = "Antagonism, Disinhibition moderates gng random slopes and fixed effects interaction",
# DEFINE = "
# t_ixn = block_trial_z*trial_z; !ixn of trials from no-go and overall trial;
# p1=MEAN(MISTRUST MPS_alR);
# p2=MEAN(PROPER MPS_tdR);
# p3=MEAN(MPS_clR IMPUL);
# p4=MEAN(MPS_acR HDWK);
# p5=MEAN(MPS_agR AGG);
# ",
#
# VARIABLE = "
# WITHIN = trial_z block_trial_z;
# USEVARIABLES = id rt_log_trim_grp block_trial_z trial_z
# MANIP p1 p2 p3 p4 p5
# t_ixn;
# BETWEEN = MANIP p1 p2 p3 p4 p5;
#
# CLUSTER = id;
# ",
# ANALYSIS = "
# TYPE=TWOLEVEL RANDOM;
# ESTIMATOR=BAYES;
# BITERATIONS=(15000);
# BCONVERGENCE=.02;
# CHAINS=2;
# PROCESSORS=2;
# ",
#
#
# MODEL = "
# %WITHIN%
#
# b_trial | rt_log_trim_grp ON trial_z;
# b_block_trial | rt_log_trim_grp ON block_trial_z;
# b_trial_ixn | rt_log_trim_grp ON t_ixn;
#
#
# %BETWEEN%
# [b_trial b_block_trial b_trial_ixn]; !slope means
# b_trial b_block_trial b_trial_ixn; !slope variances
#
# !slope correlations
# b_trial b_block_trial b_trial_ixn WITH
# b_trial b_block_trial b_trial_ixn;
#
# [rt_log_trim_grp] (b0); !mean average log RT
# ![t_ixn] (bixn); !mean ixn term at between subjects level
#
#
# !TRAIT MODEL
#
# antag BY
# p5* ! MPS_agR AGG
# p1 ! MISTRUST MPS_alR
# MANIP;
# antag@1;
#
# disinhib BY
# p2* !PROPER MPS_tdR
# p3 !MPS_clR IMPUL
# p4 !MPS_acR HDWK;
# p1;
# disinhib@1;
#
# antag WITH disinhib;
#
# !disinhib BY p1; !modification index cross-loading for fit
#
# !TRAIT MODERATION
#
# ! trait moderates flanker performance
# b_block_trial ON antag (b_bA)
# disinhib (b_bD);
#
# b_trial ON antag (t_bA)
# disinhib (t_bD);
#
# b_trial_ixn ON antag (ixn_bA)
# disinhib (ixn_bD);
#
# !N.B. Leaving out the association of antag with rt_inv
# !omits a hugely important relationship.
# !Thus, allow antag as a predictor of average (person) RT
#
# rt_log_trim_grp ON antag (bA)
# disinhib (bD);
#
# ",
# PLOT = "TYPE = PLOT2;",
# OUTPUT = "TECH1 TECH8 STANDARDIZED CINTERVAL;",
# rdata = for_msem_rt
# )
# mout <- mplusModeler(rt_with_mod_gng,
# modelout="rt_with_mod_gng.inp", run=TRUE, Mplus_command = mpluscmd, hashfilename = FALSE)
#
#
# flextable(mout$results$parameters$stdyx.standardized %>%
# filter(!paramHeader %in% c("Variances")) %>%
# select(-sig, -posterior_sd) %>%
# mutate(pval=2*pval, #convert to 2-tailed
# pval=if_else(pval < .05, as.character(paste0(pval, "*")), as.character(pval)))
# ) %>% autofit()# %>% save_as_docx(path = "~/Desktop/flanker_rt_traits.docx")
#
# wrong to fit ixn as random ----------------------------------------------
rt_with_mod_gng2 <- mplusObject(
TITLE = "Antagonism, Disinhibition moderates gng random slopes and fixed effects interaction",
DEFINE = "
t_ixn = block_trial_z*trial_z; !ixn of trials from no-go and overall trial;
p1=MEAN(MISTRUST MPS_alR);
p2=MEAN(PROPER MPS_tdR);
p3=MEAN(MPS_clR IMPUL);
p4=MEAN(MPS_acR HDWK);
p5=MEAN(MPS_agR AGG);
CENTER trial_z block_trial_z t_ixn (GRANDMEAN); !make intercepts easy to understand
",
VARIABLE = "
WITHIN = trial_z block_trial_z t_ixn;
USEVARIABLES = id rt_log_trim_grp block_trial_z trial_z
MANIP p1 p2 p3 p4 p5 t_ixn;
BETWEEN = MANIP p1 p2 p3 p4 p5;
CLUSTER = id;
",
ANALYSIS = "
TYPE=TWOLEVEL RANDOM;
ESTIMATOR=BAYES;
BITERATIONS=(30000);
CHAINS=4;
PROCESSORS=4;
",
MODEL = "
%WITHIN%
b_trial | rt_log_trim_grp ON trial_z;
b_block_trial | rt_log_trim_grp ON block_trial_z;
rt_log_trim_grp ON t_ixn;
%BETWEEN%
[b_trial b_block_trial]; !slope means
b_trial b_block_trial; !slope variances
!slope correlations
b_trial b_block_trial WITH
b_trial b_block_trial;
[rt_log_trim_grp] (b0); !mean average log RT
!TRAIT MODEL
antag BY
p5* ! MPS_agR AGG
p1 ! MISTRUST MPS_alR
MANIP;
antag@1;
disinhib BY
p2* !PROPER MPS_tdR
p3 !MPS_clR IMPUL
p4 !MPS_acR HDWK
p1; !cross-load
disinhib@1;
antag WITH disinhib;
!TRAIT MODERATION
! trait moderates flanker performance
b_block_trial ON antag (b_bA)
disinhib (b_bD);
b_trial ON antag (t_bA)
disinhib (t_bD);
!this would only make sense if we had meaningful between-person variation in
!the interaction and were decomposing person emans of the interaction for analysis.
!or, we could add a random slope of the interaction and model that here b_ixn ON ...
!but as written, this doesn't make sense.
!t_ixn ON antag (ixn_bA)
! disinhib (ixn_bD);
!average RT on traits
rt_log_trim_grp ON antag (bA)
disinhib (bD);
",
PLOT = "TYPE = PLOT2;",
OUTPUT = "TECH1 TECH8 STANDARDIZED CINTERVAL;",
rdata = for_msem_rt
)
mout <- mplusModeler(rt_with_mod_gng2, modelout="rt_with_mod_gng2.inp",
run=TRUE, Mplus_command = mpluscmd, hashfilename = FALSE)
flextable(mout$results$parameters$stdyx.standardized %>%
filter(!paramHeader %in% c("Variances")) %>%
select(-sig, -posterior_sd) %>%
mutate(pval=2*pval, #convert to 2-tailed
pval=if_else(pval < .05, as.character(paste0(pval, "*")), as.character(pval)))
) %>% autofit()# %>% save_as_docx(path = "~/Desktop/flanker_rt_traits.docx")
# try dropping ixn? -------------------------------------------------------
rt_with_mod_gng3 <- mplusObject(
TITLE = "Antagonism, Disinhibition moderates gng random slopes and fixed effects interaction",
DEFINE = "
!t_ixn = block_trial_z*trial_z; !ixn of trials from no-go and overall trial;
p1=MEAN(MISTRUST MPS_alR);
p2=MEAN(PROPER MPS_tdR);
p3=MEAN(MPS_clR IMPUL);
p4=MEAN(MPS_acR HDWK);
p5=MEAN(MPS_agR AGG);
",
VARIABLE = "
WITHIN = trial_z block_trial_z;
USEVARIABLES = id rt_log_trim_grp block_trial_z trial_z
MANIP p1 p2 p3 p4 p5;
!t_ixn;
BETWEEN = MANIP p1 p2 p3 p4 p5;
CLUSTER = id;
",
ANALYSIS = "
TYPE=TWOLEVEL RANDOM;
ESTIMATOR=BAYES;
BITERATIONS=(15000);
BCONVERGENCE=.02;
CHAINS=2;
PROCESSORS=2;
",
MODEL = "
%WITHIN%
b_trial | rt_log_trim_grp ON trial_z;
b_block_trial | rt_log_trim_grp ON block_trial_z;
%BETWEEN%
[b_trial b_block_trial]; !slope means
b_trial b_block_trial; !slope variances
!slope correlations
b_trial b_block_trial WITH
b_trial b_block_trial;
[rt_log_trim_grp] (b0); !mean average log RT
!TRAIT MODEL
antag BY
p5* ! MPS_agR AGG
p1 ! MISTRUST MPS_alR
MANIP;
antag@1;
disinhib BY
p2* !PROPER MPS_tdR
p3 !MPS_clR IMPUL
p4 !MPS_acR HDWK;
p1;
disinhib@1;
antag WITH disinhib;
!disinhib BY p1; !modification index cross-loading for fit
!TRAIT MODERATION
! trait moderates flanker performance
b_block_trial ON antag (b_bA)
disinhib (b_bD);
b_trial ON antag (t_bA)
disinhib (t_bD);
!t_ixn ON antag (ixn_bA)
! disinhib (ixn_bD);
!N.B. Leaving out the association of antag with rt_inv
!omits a hugely important relationship.
!Thus, allow antag as a predictor of average (person) RT
rt_log_trim_grp ON antag (bA)
disinhib (bD);
",
PLOT = "TYPE = PLOT2;",
OUTPUT = "TECH1 TECH8 STANDARDIZED CINTERVAL;",
rdata = for_msem
)
mout <- mplusModeler(rt_with_mod_gng3,
modelout="rt_with_mod_gng3.inp", run=TRUE, Mplus_command = mpluscmd, hashfilename = FALSE)
flextable(mout$results$parameters$stdyx.standardized %>%
filter(!paramHeader %in% c("Variances")) %>%
select(-sig, -posterior_sd) %>%
mutate(pval=2*pval, #convert to 2-tailed
pval=if_else(pval < .05, as.character(paste0(pval, "*")), as.character(pval)))
) %>% autofit()# %>% save_as_docx(path = "~/Desktop/flanker_rt_traits.docx")
# ACCURACY ANALYSES -------------------------------------------------------
#winning model: m19
# fixed: stimulus, condition, trial, trialxstimulus
# random: stimulus
acc_with_mod_gng <- mplusObject(
TITLE = "Antagonism, Disinhibition GNG ACC",
DEFINE = "
ts_ixn = stim*trial_z; !ixn of overall trial and stimulus;
p1=MEAN(MISTRUST MPS_alR);
p2=MEAN(PROPER MPS_tdR);
p3=MEAN(MPS_clR IMPUL);
p4=MEAN(MPS_acR HDWK);
p5=MEAN(MPS_agR AGG);
",
VARIABLE = "
! WITHIN = stim; !if my understanding is correct, this will only specify an effect at the within-level
USEVARIABLES = id response stim cond trial_z
MANIP p1 p2 p3 p4 p5
ts_ixn;
BETWEEN = MANIP p1 p2 p3 p4 p5
cond trial_z
ts_ixn;
CLUSTER = id;
",
ANALYSIS = "
TYPE=TWOLEVEL RANDOM;
ESTIMATOR=BAYES;
BITERATIONS=(15000);
BCONVERGENCE=.02;
CHAINS=2;
PROCESSORS=2;
",
MODEL = "
%WITHIN%
b_stim | response ON stim;
b_cond | response ON cond;
b_trial | response ON trial_z
b_ts_ixn | response ON ts_ixn
%BETWEEN%
[b_stim b_cond b_trial b_ts_ixn]; !slope means
b_stim b_cond b_trial b_ts_ixn; !slope variances
[response] (b0); !mean average ACC
!TRAIT MODEL
antag BY
p5* ! MPS_agR AGG
p1 ! MISTRUST MPS_alR
MANIP;
antag@1;
disinhib BY
p2* !PROPER MPS_tdR
p3 !MPS_clR IMPUL
p4 !MPS_acR HDWK;
p1;
disinhib@1;
antag WITH disinhib;
!disinhib BY p1; !modification index cross-loading for fit. Included above.
!TRAIT MODERATION
! trait moderates flanker performance
b_stim ON antag (b_bA)
disinhib (b_bD);
ts_ixn ON antag (ixn_bA)
disinhib (ixn_bD);
cond ON antag
disinhib;
!N.B. Leaving out the association of antag with rt_inv
!omits a hugely important relationship.
!Thus, allow antag as a predictor of average (person) RT
response ON antag (bA)
disinhib (bD);
",
PLOT = "TYPE = PLOT2;",
OUTPUT = "TECH1 TECH8 STANDARDIZED CINTERVAL;",
rdata = for_msem
)
mout <- mplusModeler(acc_with_mod_gng,
modelout="acc_with_mod_gng.inp", run=TRUE, Mplus_command = mpluscmd, hashfilename = FALSE)
flextable(mout$results$parameters$stdyx.standardized %>%
filter(!paramHeader %in% c("Variances")) %>%
select(-sig, -posterior_sd) %>%
mutate(pval=2*pval, #convert to 2-tailed
pval=if_else(pval < .05, as.character(paste0(pval, "*")), as.character(pval)))
) %>% autofit()# %>% save_as_docx(path = "~/Desktop/flanker_rt_traits.docx")
|
feab813b9492768c2e9c99a00dd166d22a1e0d32
|
0eb28b91d153379551cdd235fd05dd50ca0e0f83
|
/R/nyc311.R
|
c152d8f5fca8fb529238bde5a860bdf8d4d9aceb
|
[] |
no_license
|
thisisdaryn/workshop
|
4bac3516bc1dba76919762296ae57b56a0185808
|
a491731090cbaf3245280a12ecef55ba30dd46be
|
refs/heads/master
| 2020-09-10T15:32:03.747382
| 2020-01-17T19:13:01
| 2020-01-17T19:13:01
| 221,739,853
| 0
| 0
| null | 2019-11-21T20:43:20
| 2019-11-14T16:23:59
|
JavaScript
|
UTF-8
|
R
| false
| false
| 655
|
r
|
nyc311.R
|
#' NYC 311 Service Requests for Jan 1-14 2020
#'
#'
#'
#' @source <https://nycopendata.socrata.com/Social-Services/311-Service-Requests-from-2010-to-Present/erm2-nwe9>
#' @format Data frame with columns
#' \describe{
#' \item{Key}{Case identifier}
#' \item{Created}{Date Created}
#' \item{Closed}{Date Closed}
#' \item{Agency}{NYC Agency Acronym}
#' \item{Type}{Type of complaint}
#' \item{LocType}{Type of location}
#' \item{Zip}{ZIP code of complaint}
#' \item{Status}{Complaint status: Pending, Assigned, Started, In Progress, Open or Closed }
#' \item{Borough}{Borough of NYC}
#' \item{Latitude}{Latitude}
#' \item{Longitude}{Longitude}
#' }
"nyc311"
|
af208a9e9d4ef388eff27e1d56cf058ed7a10b48
|
12742f268035aec07a49f58a93affa73870b2f20
|
/Lab1.R
|
3c953ec615a3d574e442c16965e72f077c660afd
|
[] |
no_license
|
nguyench95/R-Labs
|
1fa6bb8b2d64f608fb31026a47f52092086e2e89
|
291ef8d589bd158046e45669e30d283db1ea5900
|
refs/heads/master
| 2022-07-15T08:49:14.117816
| 2020-05-11T20:15:17
| 2020-05-11T20:15:17
| 263,147,968
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,525
|
r
|
Lab1.R
|
##########################################################################
# CSC-315, Fall 2019
# Lab 1: R programming
# Name:
##########################################################################
##########################################################################
# Add R code to the script below in order to explicitly output the
# answers to the following questions. Turn in this assignment
# by creating a Notebook and turning in a physical copy.
##########################################################################
#1) What is 34+29*12
34+29*12
#2) What is the sum of all integers between (and including) -100 and +100.
index <- -100:100
sum(index)
#3) Create a vector that contains the numbers 1-10.
vector <- 1:10
#4) Change the 3rd value of the above vector to 99
vector[3] <- 99
#5) Create a vector that contains the numbers 1-10, 17, and 25.
vector2 <- c(1:10, 17, 25)
#6) Create a vector that contains two elements, your first name and last name
name <- c("Christopher", "Nguyen")
#7) Create a matrix that contains the numbers 87, 89, and 91 in the 1st row and
# 76, 88, and 83 in the second row. Set or change the column names to
# "ExamI", "ExamII", and "ExamIII", and set or change the row names to
# "Joseph Smith" and "Amy Davis"
m <- matrix(c(87,89,91,76,88,83),byrow= TRUE, nrow=2,
dimnames = list(c("Joseph Smith", "Amy Davis"),
c("ExamI", "ExamII", "ExamIII")))
#8) Calculate the average grade for Amy Davis, using the 'mean' function.
avggrade <- mean(m[2,])
#9) "Joseph" prefers to be called "Joe". Change the name of the 1st row of the matrix
# to "Joe Smith". You should do this in a statement that only changes the name of
# the first row. Note that R allows you to directly assign a value to any element
# (or elements) of rownames(m).
rownames(m)[rownames(m)=="Joseph Smith"] <- "Joe Smith"
#10) Create a list that includes the following objects:
# (1) a vector that contains two elements, your first name and last name;
# (2) your major
person <- list(name <- c("Christopher", "Nguyen"), major = "Computer Science")
#11) Read in the survey.txt file as was done in class (and put the code for this in your script!)
d <- read.csv("https://gdancik.github.io/CSC-315/data/datasets/survey.txt")
#12) How many individuals surveyed did not use FB (i.e., spent 0 hours / week on FB)
colnames(d) # get names of columns
summary(d) # summarizes each column; results depend on column type
noFB <- d[d$FB == 0,]
noFBcount <- length(noFB)
print(noFBcount)
#13) What are the GPAs of the three students with the lowest College GPAs (you
# should only display these GPAs)? Hint: use the 'sort' function.
sort <- d[order(College GPA),]
threelowest <- (sort[length(sort)], sort[length(sort)-1, sort[length(sort)-2]]
#14) What are the GPAs of the three students with the highest college GPAs?
# Hint: use the sort function with decreasing = TRUE
#15) Use the 'filter' function from 'dplyr' to create a data frame (tibble)
# called surveyA that includes the students with a 3.5 college GPA or higher
surveyA <- filter(d, College GPA >= 3.5)
print(surveyA)
#16) Display the 'Gender' column from surveyA and comment on the number of
# individuals who are male, and the number who are female.
females <- select(filter (surveyA, Gender == "Female"))
males <- select(filter (surveyA, Gender == "Male"))
|
5b944194416e1a231d194022d53bd70691bd4fae
|
6cbea2b09ccaa5b50a7671d396285737923cf248
|
/Laboratori di Esercitazione/Training Lezione/Training 1 - ML1.R
|
b53bdb15f2237d7b5f3bd7fbe4153b6316fd2981
|
[] |
no_license
|
AngelusGi/MachineLearningDivino
|
c4c6eb6124fdda038545985cde265249d7b26992
|
86547b3764d8324e59369e633bfd51cd0563028f
|
refs/heads/master
| 2022-04-22T20:31:47.023982
| 2020-04-05T10:39:32
| 2020-04-05T10:39:32
| 233,409,930
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,223
|
r
|
Training 1 - ML1.R
|
#Simulazione di modello di Regressione lineare
# MODELLO : Y=(B0+B1X)+E
# n : dimensione training set
# m : dimensione test set
# X : reddito ( tra 1000 e 2000 )
# Y : consumi
# A :
B0<-250 #Costi fissi
B1<-0.7 #70% del reddito va in consumi
n<-80 #Su 100 dati 80 verranno usati per il training
m<-20 #Il resto verrano usati per il test
set.seed(1) #Fissiamo un seed comune per la generazione della stessa randomizzazione
X<-rnorm(100, 1500, 100) #
#Y<-B0+B1*x
E<-rnorm(100,0,50)
Y<-B0+B1*X+E #Modello
A<-matrix(c(X,Y),100,2, byrow = F) #Immetto i dati pseudo-casuali in una matrice
A<-as.data.frame(A) #ne faccio un data set
colnames(A)<-c("reddito","consumi") #rinomino le colonne
#plot(X,Y)
A.training<-A[1:80,] #definisco le prime 80 come training
#la virgola non � sbagliata, per sintassi sto definendo le righe
#che ho scelto e non avendo messo parametri dopo la virgola
#indico che sto prendendo tutte le colonne
A.test<-A[81:100,] #le restanti come tes
output.lm<-lm(consumi~reddito, data = A.training) #metto in output il risultato del linearmodel(lm)
summary(output.lm) #resoconto output
plot(A.training)
abline(output.lm) #aggiungo al plot del grafico la linea lm che rappresenta la mia
#regressione lineare
names(output.lm)
is.list(output.lm) #chiedo se output.lm � una lista
output.lm$coefficients #stampa dei coefficienti
res<-output.lm$residuals #metto i residui dentro res
plot(res) #plot dei residui
abline(h=0)
hist(res) #istogramma
#per valutare se un training � buono, si pu� fare l'analisi dei residui
#pi� i residui sono simmetrici pi� il modello � buono
#------------------------Parte di test----------------------#
A.test
plot(A.test)
abline(output.lm)
#mi calcolo i valori predetti dalla retta
#utilizzo output.lm per dargli il modello calcolato con il training
#ed i nuovi dati su cui testare la predizzione
Y<-predict.lm(output.lm, A.test) #valori predetti
Z<-A.test$consumi #valori osservati dei consumi
X<-A.test$reddito
plot(X,Y)
abline(output.lm)
sum((Z-Y)^2) #Valore utile a stabilire se una procedura � migliore di un'altra
plot(Z,Y)
abline(coef = c(0,1))
#---------------Capitolo 3 del libro--------------------#
#Andare nei package e spuntare la libreria MASS
data() #visualizzo tutti i dataset disponibili
attach(Boston) #scelgo il dataset Boston
#force("Boston") #se non lo ha caricatoBos
#scrivere solo Boston per controllare se viusalizza tutti i dati
#lm(formula = medv~. , data =Boston)
output.lm<-lm(formula = medv ~ . , data = Boston )
summary(output.lm)
#----------------Esercizio--------------------------------#0
#Dataset Boston
#Primi 400 records come training
#106 records per il test
#utilizzando al massimo 5 variabili, trovare il miglior modello
#per la scelta delle variabili, prestare attenzion alle ultime due colonne
#prendo il
#togliere i valori con il Pr(>|t|) pi� altro e senza stelline accanto
output.lm<-lm(formula = medv ~ . -age-indus-chas-zn, data = Boston)
summary(output.lm)
#funzione di correlazione per sapere se due variabili sono dipendenti tra loro
cor(Boston)
#genera una matrice la quale mostra la correlazone tra le variabili della mtrice
|
245637a0bbd2d2befe386559b2dd2bfd84cccfa0
|
6be70ffdb95ed626d05b5ef598b842c5864bac4d
|
/R/calc_congress.R
|
29eaa53b9647ad0019a25f73709c9f180c8d3b3d
|
[] |
no_license
|
Hershberger/partycalls
|
c4f7a539cacd3120bf6b0bfade327f269898105a
|
8d9dc31dd3136eae384a8503ba71832c78139870
|
refs/heads/master
| 2021-09-22T17:54:29.106667
| 2018-09-12T21:16:56
| 2018-09-12T21:16:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 219
|
r
|
calc_congress.R
|
#' Calculate congress corresponding to a year
#'
#' Simple conversion between year and congress
#' @param year integer year
#' @return integer congress
#' @export
calc_congress <- function(year)
{
(year - 1788) / 2
}
|
340fdbeb63bfea524f765e16c309281f8f780d3e
|
f2eadd5c3d82192a2f8a6a8b588364b102185f4f
|
/source/Practice_set.1/step2.SCRAN_normalization_featureSelection.r
|
c4cafea5ca223e7cc9d68cc55d721b3f5a6816d4
|
[] |
no_license
|
mgood2/BIML-2019-SingleCellRNAseq-
|
2a5ce49a44f77828be76375389649bf2656d2bee
|
2d1e993afe1d445f8e93b7e8b746b2383ce3791c
|
refs/heads/master
| 2020-05-16T09:03:25.624452
| 2019-02-23T06:30:54
| 2019-02-23T06:30:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,623
|
r
|
step2.SCRAN_normalization_featureSelection.r
|
# filteredSCEset <- readRDS(file = paste(PATH_PBMC_dataset, "filteredSCEset.rds", sep = "/"))
filteredSCEset <- sce[, which(sce$filtering==1)]
library(scran)
clusters <- quickCluster(filteredSCEset, method="igraph")
filteredSCEset <- computeSumFactors(filteredSCEset, cluster=clusters)
filteredSCEset <- normalize(filteredSCEset)
rownames(filteredSCEset@assays$data$logcounts) <- rownames(filteredSCEset)
colnames(filteredSCEset@assays$data$logcounts) <- colnames(filteredSCEset)
# plot(sizeFactors(filteredSCEset), (filteredSCEset$total_counts)/1000, log="xy",
# ylab="Library size (kilo)", xlab = "Size factor")
### filtering genes
library(Matrix)
# keep_feature <- rowMeans(filteredSCEset@assays$data$logcounts)!=0
keep_feature <- rowSums((filteredSCEset@assays$data$logcounts) != 0) > 3
filteredSCEset <- filteredSCEset[keep_feature, ]
### Select Highly variable genes (feature selection)
var.fit <- trendVar(filteredSCEset, parametric=TRUE, use.spikes=FALSE)#, design = batchDesign)
var.out <- decomposeVar(filteredSCEset, var.fit)
hvg <- var.out[which(var.out$FDR < 0.05 & var.out$bio > .01),]
dim(hvg)
saveRDS(filteredSCEset, file = "filteredSCEset.rds")
saveRDS(hvg, file = "hvg.rds")
plot(y= var.out$total, x=var.out$mean, pch=16, cex=0.3,
ylab="Variance of log-expression", xlab="Mean log-expression")
o <- order(var.out$mean)
lines(y=var.out$tech[o], x=var.out$mean[o], col="dodgerblue", lwd=2)
points(y=var.out$total[var.out$FDR <=0.05 & var.out$bio > 0.1],
x=var.out$mean[var.out$FDR <=0.05 & var.out$bio > 0.1],
pch=16, cex=0.3, col="red")
|
90e219f31c1a7b771c3aacf1e5c8b2025ed3dec5
|
a44d50f5407c1a6d7ed75c589bb06b591be205e8
|
/R/skill_scores.R
|
5232b52c943ef9200bb3cb1ab21125972b57e368
|
[] |
no_license
|
laubblatt/cleaRskyQuantileRegression
|
622462a481398ebbb3391da602c2cecd5b9e39a5
|
8edb71d94538a0680a4fb0e0d507cb16902a6d1e
|
refs/heads/master
| 2020-07-12T02:20:12.030386
| 2019-11-26T09:05:09
| 2019-11-26T09:05:09
| 204,692,274
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,780
|
r
|
skill_scores.R
|
#' Collection of skill scores
#'
#'
#' @filename skill_scores.R
#' @export MSE_SkillScore
#' @export rmse
#'
NULL
MSE_SkillScore = function(o,p,ref) {
#' Calc the Skill Score with the Mean Squared Error as skill
#'
#' Can be interpreted as reduction of variance
#' Skill of 1 is perfect; Skill of 0 no improvement
#'
#' @version 0.1 2018-12 new simple fun used in BSRN clear sky study
#' @references http://www.cawcr.gov.au/projects/verification/
#' @param o vector of observation
#' @param p vector of prediction whose skill is evaluated
#' @param ref the reference prediction (naive, prior etc)
#' @value Skill score of MSE
#' @details rows with missing values are removed
# Skill score - Equation for equation for skill score
# Answers the question: What is the relative improvement of the forecast over some reference forecast?
# Range: Lower bound depends on what score is being used to compute skill and what reference forecast is used,
# but upper bound is always 1; 0 indicates no improvement over the reference forecast. Perfect score: 1.
# Characteristics: Implies information about the value or worth of a forecast relative to an alternative (reference) forecast.
# In meteorology the reference forecast is usually persistence (no change from most recent observation) or climatology.
# The skill score can be unstable for small sample sizes.
# When MSE is the score used in the above expression then the resulting statistic is called the reduction of variance.
dtopr = data.table(o,p,ref)
dtnonan = na.omit(dtopr)
dtnonan
(MSEp = dtnonan[ , 1/.N * sum( (p -o)^2 )])
(MSEref = dtnonan[ , 1/.N * sum( (ref -o)^2 )])
(MSEp - MSEref) / ( 0 - MSEref)
}
rmse <- function(obs, pred) sqrt(mean((obs-pred)^2,na.rm=TRUE))
|
3fc44e2ebf1954da003d478f57d50edd6dbbcb45
|
e907785f763bcfabec58dd01278d6996cd2525ab
|
/man/broad_zhenghe.Rd
|
d21e0993e019bd6e1f8cb204a4475a91029b43a0
|
[] |
no_license
|
cran/SPMS
|
2a8a9796677f908cae849e3a61c183394ef0c980
|
7f337e242f6f743ec4d6b88bfb1a264b63bd1ea8
|
refs/heads/master
| 2016-09-05T16:19:08.045425
| 2013-05-08T00:00:00
| 2013-05-08T00:00:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,633
|
rd
|
broad_zhenghe.Rd
|
\name{broad_zhenghe}
\alias{broad_zhenghe}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
To redundancy the text result.
}
\description{
Function according to the rule that we formulate to redundancy the result.
}
\usage{
broad_zhenghe(paths, dORb, inG)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{paths}{
The paths is fixed, which is set by setPath function at first.
}
\item{dORb}{
The parameter is convenient to users to choose to run depth or broad funtions.
}
\item{inG}{
According to the interest genes number(inG) in sub-pathway to limit the result.
}
}
\value{
It returns the text result
}
\references{
None
}
\author{
Xiaomeng Ni
}
\examples{
##---- Should be DIRECTLY executable !! ----
##-- ==> Define data, use random,
##-- or do help(data=index) for the standard data sets.
#--broad_zhenghe(paths,"broad",3)
## The function is currently defined as
function (paths, dORb, inG)
{
w <- sub(" ", "", paste(paths, "/temp"))
setwd(w)
if (length(which("resulttemp" == dir())) > 0) {
n <- sub(" ", "", paste(w, "/resulttemp"))
setwd(n)
unlink(dir())
}
else {
dir.create("resulttemp")
}
setwd(paths)
if (length(which("txtResult" == dir())) > 0) {
n <- sub(" ", "", paste(paths, "/txtResult"))
setwd(n)
unlink(dir())
}
else {
dir.create("txtResult")
}
num <- 1
a <- sub(" ", "", paste(paths, "/temp/unUse"))
setwd(a)
for (z in 1:length(dir(a))) {
if (length(dir(a)) == 0)
break
name <- substr(dir(a)[1], 1, 8)
pathway <- dir(a)[1]
a <- sub(" ", "", paste(paths, "/temp/unUse1"))
setwd(a)
result11 <- read.delim(pathway, sep = "\t", header = FALSE,
quote = "", na.strings = "NA", stringsAsFactors = FALSE)
unlink(pathway)
b <- sub(" ", "", paste(paths, "/temp/unUse"))
setwd(b)
result10 <- read.table(pathway, sep = "\t", header = FALSE,
na.strings = "NA", stringsAsFactors = FALSE)
unlink(pathway)
result1 <- result11
result <- result10
if (dim(result)[1] > 1) {
for (i in 1:(dim(result)[1] - 1)) {
for (j in (i + 1):dim(result)[1]) {
if (result[i, 4] == result[j, 4] && result[i,
5] == result[j, 5]) {
n <- c()
m <- c()
for (l in 4:(4 + result[i, 5] - 1)) {
n <- c(n, as.character(result1[i, l]))
}
for (p in 4:(4 + result[j, 5] - 1)) {
m <- c(m, as.character(result1[j, p]))
}
if (length(pmatch(n, m)) == result[i, 5] &&
length(pmatch(n, m)) == result[j, 5] &&
(is.na(sum(pmatch(n, m))) == FALSE)) {
result[j, ] = 0
result1[j, ] = 0
}
}
}
}
}
result <- unique(result)
result1 <- unique(result1)
if (dim(result)[1] > 1) {
for (i in 1:(dim(result)[1] - 1)) {
for (j in (i + 1):dim(result)[1]) {
if (result[i, 4] == result[j, 4] && result[i,
5] <= result[j, 5]) {
n <- c()
m <- c()
for (l in 10:(10 + result[i, 4] - 1)) {
n <- c(n, as.character(result[i, l]))
}
for (p in 10:(10 + result[j, 4] - 1)) {
m <- c(m, as.character(result[j, p]))
}
if (length(pmatch(n, m)) == result[i, 4] &&
(is.na(sum(pmatch(n, m))) == FALSE)) {
result[j, ] = 0
result1[j, ] = 0
}
}
if (result[i, 4] == result[j, 4] && result[i,
5] >= result[j, 5]) {
n <- c()
m <- c()
for (l in 10:(10 + result[i, 4] - 1)) {
n <- c(n, as.character(result[i, l]))
}
for (p in 10:(10 + result[j, 4] - 1)) {
m <- c(m, as.character(result[j, p]))
}
if (length(pmatch(m, n)) == result[j, 4] &&
(is.na(sum(pmatch(m, n))) == FALSE)) {
result[i, ] = 0
result1[i, ] = 0
}
}
}
}
}
result <- unique(result)
result1 <- unique(result1)
if (dim(result)[1] > 1) {
for (i in 1:(dim(result)[1] - 1)) {
for (j in (i + 1):dim(result)[1]) {
if (result[i, 4] < result[j, 4] && ((result[i,
5] - result[i, 4]) == (result[j, 5] - result[j,
4]))) {
n <- c()
m <- c()
for (l in 4:(4 + result[i, 5] - 1)) {
n <- c(n, as.character(result1[i, l]))
}
for (p in 4:(4 + result[j, 5] - 1)) {
m <- c(m, as.character(result1[j, p]))
}
if (length(pmatch(n, m)) == result[i, 5]) {
result[i, ] = 0
result1[i, ] = 0
}
}
if (result[i, 4] > result[j, 4] && ((result[i,
5] - result[i, 4]) == (result[j, 5] - result[j,
4]))) {
n <- c()
m <- c()
for (l in 4:(4 + result[i, 5] - 1)) {
n <- c(n, as.character(result1[i, l]))
}
for (p in 4:(4 + result[j, 5] - 1)) {
m <- c(m, as.character(result1[j, p]))
}
if (length(pmatch(m, n)) == result[j, 5]) {
result[j, ] = 0
result1[j, ] = 0
}
}
}
}
}
result <- unique(result)
result1 <- unique(result1)
for (i in 1:dim(result)[1]) {
if (result[i, 4] < inG) {
result[i, ] = 0
result1[i, ] = 0
}
}
result <- unique(result)
result1 <- unique(result1)
if (max(result[, 4]) == 0)
next
for (i in 1:dim(result)[1]) {
mark1 <- c()
for (j in 1:result[i, 4]) {
mark1 <- c(mark1, result[i, (9 + j)])
}
if (length(unique(mark1)) == 1) {
result[i, ] = 0
result1[i, ] = 0
}
else {
mark2 <- c()
mark2 <- unique(mark1)
for (j in 1:length(mark2)) {
k <- 0
k = which(mark1 == mark2[j])
if (length(k)/length(mark1) >= 0.5) {
result[i, ] = 0
result1[i, ] = 0
break
}
}
}
}
result <- unique(result)
result1 <- unique(result1)
for (i in 1:dim(result)[1]) {
if (result[i, 1] == 0) {
result <- result[-i, ]
result1 <- result1[-i, ]
break
}
}
if (dim(result)[1] == 0)
next
for (i in 1:dim(result)[1]) {
result[i, 2] = num
result[i, 9] <- as.character(gsub(" ", "", paste("broad_subPathwayGraph(",
"'", paths, "'", ",", i, ",", "'", name, "'",
")")))
num <- num + 1
}
c <- sub(" ", "", paste(paths, "/txtResult"))
setwd(c)
write.table(result, "txtResult.txt", sep = "\t", col.names = FALSE,
append = TRUE, row.names = FALSE, quote = TRUE)
d <- sub(" ", "", paste(paths, "/temp/resulttemp"))
setwd(d)
write.table(result1, pathway, sep = "\t", col.names = FALSE,
append = TRUE, row.names = FALSE, na = "NA", quote = FALSE)
}
n1 <- sub(" ", "", paste(paths, "/txtResult"))
setwd(n1)
if (length(dir(n1)) > 0) {
gR <- scan("txtResult.txt", what = character(), sep = "\t",
na.strings = "NA")
unlink("txtResult.txt")
k <- c()
gResult1 <- data.frame()
nn <- paste0(substr(gR[1], 1, (nchar(gR[1]) - 4)))
k <- c(k, grep(nn, gR))
if (length(k) > 2) {
for (i in 1:(length(k)/2 - 1)) {
l <- 1
gResult2 <- matrix("0", 1, 50)
for (j in k[2 * i - 1]:(k[2 * i + 1] - 1)) {
gResult2[1, l] <- gR[j]
l = l + 1
}
gResult1 <- rbind(gResult1, gResult2)
}
l <- 1
gResult2 <- matrix("0", 1, 50)
for (i in k[2 * (length(k)/2 - 1) + 1]:length(gR)) {
gResult2[1, l] <- gR[i]
l <- l + 1
}
gResult1 <- rbind(gResult1, gResult2)
}
else {
l <- 1
gResult2 <- matrix("0", 1, 50)
for (i in k[2 * (length(k)/2 - 1) + 1]:length(gR)) {
gResult2[1, l] <- gR[i]
l <- l + 1
}
gResult1 <- rbind(gResult1, gResult2)
}
gResult <- gResult1
gResult[, 1] = as.character(gResult[, 1])
list1 <- list()
for (i in 1:dim(gResult)[1]) {
k <- c()
k <- which(gResult[i, 1] == gResult[, 1])
list1 <- c(list1, list(k = k))
}
data <- unique(list1)
for (i in 1:length(data)) {
if (length(data[[i]]) == 1) {
gResult[data[[i]], 1] = paste0(gResult[data[[i]],
1], "_", "1")
}
else {
for (j in 1:length(data[[i]])) {
gResult[data[[i]][j], 1] = paste0(gResult[data[[i]][j],
1], "_", (data[[i]][j] - data[[i]][1] + 1))
}
}
}
gResult2 <- gResult
m <- c()
k <- 0
for (i in 10:dim(gResult2)[2]) {
if (length(which(gResult2[, i] == 0)) == dim(gResult2)[1]) {
m <- c(m, i)
}
}
for (i in 1:length(m)) {
gResult2 <- gResult2[, -(m[i] - k)]
k = k + 1
}
write.table(gResult2, "txtResult.txt", sep = "\t", col.names = FALSE,
append = TRUE, row.names = FALSE, quote = TRUE)
if (exists("gResult2") && dim(gResult2)[1] > 0) {
return(gResult2)
}
else {
print("Cannot find subpathway!")
}
}
}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ pathway }
\keyword{ zhenghe }% __ONLY ONE__ keyword per line
|
dabfe5cb61f59cd8b4deebdbc4ddfd2bfa957cc2
|
965aa4666380808a369c2c02ab5c3d081caecf82
|
/factCheck 02 - get pages data.R
|
43ab2705f7a955907c64bbef448a0e6d5ba0aa35
|
[] |
no_license
|
lekovicj/EUvsVirus---What-the-Fake
|
cddc02030f1eca951298ed394aae285e6e2c2ef2
|
7c7791a1983304d67cf05f557a671268daf1105f
|
refs/heads/master
| 2022-06-10T15:43:14.605697
| 2020-04-25T22:04:24
| 2020-04-25T22:04:24
| 258,490,503
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,080
|
r
|
factCheck 02 - get pages data.R
|
library(tidyverse)
library(xml2)
library(rvest)
dataRAW <- read_rds(path = "data/factCheck/pagesScraped.rds")
exclude <- read_rds(path = "data/factCheck/pageDetails.rds") %>%
select(link) %>%
distinct()
dataRAW <- dataRAW %>%
anti_join(exclude)
for(i in 1:nrow(dataRAW)) {
page <- read_html(dataRAW$link[i])
details <- page %>%
html_nodes(".entry-content__text--smaller , .entry-content__button--smaller , .entry-content__text--explanation , .entry-title , strong , .entry-content__text--org")
details %>%
html_text() %>%
enframe(value = "text") %>%
left_join(details %>%
html_attr("href") %>%
enframe(value = "fullArticleUrl")) %>%
mutate(link = dataRAW$link[i]) %>%
bind_rows(read_rds("data/factCheck/pageDetails.rds")) %>%
distinct() %>%
write_rds(path = "data/factCheck/pageDetails.rds")
print(paste(i,"###", dataRAW$link[i]))
Sys.sleep(runif(n = 1, min = 0.3333, max = 3))
}
|
a70d5686d0c38582baac3e949b13b73a6ee38b3b
|
3f1d1377ab5bcc2777db0e8b68209374f92bf6f8
|
/R/upload.R
|
3c226b4a3ad79719b62618344dff18eece94ca7d
|
[
"curl"
] |
permissive
|
omegahat/RCurl
|
5dc3299c75dce0d0ab2481902125aebaa9cb7c9e
|
e07c076963fc6436e0b05db04bb0b3a20ba378e9
|
refs/heads/master
| 2022-06-25T08:55:39.848816
| 2022-06-07T02:34:35
| 2022-06-07T02:34:35
| 4,004,831
| 20
| 18
|
NOASSERTION
| 2022-06-07T02:34:35
| 2012-04-12T13:17:38
|
Turing
|
UTF-8
|
R
| false
| false
| 1,441
|
r
|
upload.R
|
ftpUpload =
#
# what is the name of the file or the contents of the file
#
function(what, to, asText = inherits(what, "AsIs") || is.raw(what),
..., curl = getCurlHandle())
{
if(!asText && !inherits(what, "connection")) {
file = file(what, "rb")
on.exit(close(file))
} else
file = what
curlPerform(url = to, upload = TRUE,
readfunction = uploadFunctionHandler(file, asText), ..., curl = curl)
}
uploadFunctionHandler =
#
# returns the function that is called as the READFUNCTION callback.
#
# This handles raw, character and file contents.
#
function(file, asText = inherits(file, "AsIs") || is.raw(file))
{
if(asText) {
pos = 1
isRaw = is.raw(file)
len = if(isRaw) length(file) else nchar(file)
function(size) {
if(pos > len)
return(if(isRaw) raw(0) else character(0))
ans = if(isRaw)
file[seq(pos, length = size)]
else
substring(file, pos, pos + size)
pos <<- pos + len
ans
}
} else
function(size) {
readBin(file, raw(), size)
}
}
CFILE =
function(filename, mode = "r")
{
filename = path.expand(filename)
.Call("R_openFile", filename, as.character(mode))
}
setMethod("close", "CFILE",
function(con, ...) {
.Call("R_closeCFILE", con@ref, PACKAGE = "RCurl")
con
})
|
daccad6717e3ffd3ceb03ac198fe6e74cf1b17de
|
2e1925418c2995782d16826e24f8e4d32d43c84d
|
/database/allocator/analysis.R
|
8b6d622ebc4acff0842166671c1ae554ab3d4c9a
|
[] |
no_license
|
JohnCalhoun/Flamingo
|
7079983357d2c9f6dcf544dbdb67946c0e28c7a5
|
cc77a69caf19a8b4ab62cbe0eff85c38504513eb
|
refs/heads/master
| 2021-01-18T22:33:26.517156
| 2016-05-31T22:41:36
| 2016-05-31T22:41:36
| 50,302,153
| 1
| 2
| null | 2016-01-29T02:48:07
| 2016-01-24T18:47:42
|
Cuda
|
UTF-8
|
R
| false
| false
| 497
|
r
|
analysis.R
|
#! /usr/bin/env Rscript
#load libraries
library(DBI)
library(RSQLite)
#set up database connection
drv<-dbDriver("SQLite")
database_location<-"../flamingo_database.db"
db<-dbConnect(drv,database_location)
#loadin data
dbListTables(db)
dbListFields(db,"allocator")
allocator_sql<-"select 'ID' from 'allocator'"
allocator_rs<-dbSendQuery(db,allocator_sql)
allocator_rs
#dbGetRowsAffected(allocator_rs)
#dbColumnInfo(allocator_rs)
#manipulate data
#display data
#close connection
dbDisconnect(db)
|
3da5cc9b4bbb729ead37759dc343895d4e041959
|
79e29c2faf00c09c3293cb7866c70529583adf16
|
/tests/testthat/test-extend.R
|
1aa0803125fd0b1fb551c922da02bf59cfb2b71f
|
[
"MIT"
] |
permissive
|
schloerke/sass
|
c787acd09ed79c29fadcf819873470088aadc69d
|
20168a157b6eeeb45e89e7ea56daa091211b1bd5
|
refs/heads/master
| 2020-03-28T02:14:05.756939
| 2018-08-17T22:02:02
| 2018-08-17T22:02:02
| 147,556,853
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 235
|
r
|
test-extend.R
|
context('extend')
test_that('extend works', {
expected_first_line <- '.message, .success, .error, .warning '
css <- compile_sass('test-extend.scss')
expect_equal(
strsplit(css, '\\{')[[1]][1],
expected_first_line
)
})
|
3e5f79981b84755d4c301d6c1cb4af344a5ae211
|
9465a81c226513900dc1a96cc746b2fa9b8196c8
|
/codeProcessing/R/h_file_proc.R
|
0c131f2e86991a7a929b3fa86e66f0f411c7a252
|
[] |
no_license
|
enginuity/codeProcessing
|
beced5a7ab42384084756af1076570bdebd28131
|
a44947621e065c2105bce4109bc8f4b28a3a81fe
|
refs/heads/master
| 2021-01-17T15:15:01.619715
| 2017-10-31T08:00:16
| 2017-10-31T08:00:16
| 27,804,505
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,134
|
r
|
h_file_proc.R
|
##@S This file contains function to aid with repeating file processing tasks
## (file-name processing, etc.)
#' Find all files with appropriate file extensions and extract code
#'
#' @param FD [\code{\link{FilesDescription}}] :: A specification of codefiles
#'
#' @return [\code{\link{Codebase}}] :: Specification and container for codefiles
#' and filenames
#'
#' @export
#'
extract_Codebase = function(FD) {
files = find_files(FD = FD)
code = extract_code(files)
return(Codebase(files = files, code = code))
}
#' Find all files
#'
#' See \code{\link{FilesDescription}} to see how to describe a file.
#'
#' @param FD [\code{\link{FilesDescription}}] :: A specification of codefiles
#'
#' @return [vector-char] :: A vector of filenames that match the given
#' \code{\link{FilesDescription}}
#'
#' @export
#'
find_files = function(FD) {
if (!inherits(x = FD, "FilesDescription")) {
stop("Input class is not of class FilesDescription")
}
## Find appropriate filename extension
if (FD$mode == "R") {
ext_regex = "[.]R$"
} else if (FD$mode == "C") {
ext_regex = "[.](c|cc|cpp|h|hh)$"
} else if (FD$mode == "all") {
ext_regex = "."
}
## Start with exact files if any
allfiles = FD$files
## Check file directories if any
for(j in seq_along(FD$dirlist)) {
temp = list.files(path = FD$dirlist[[j]]$dir, recursive = TRUE,
full.names = TRUE)
## Find files with correct filename extension
temp = temp[grep(ext_regex, temp)]
## Apply file_regex as appropriate
if (!is.null(FD$dirlist[[j]]$file_regex)) {
temp = temp[grep(file_regex, temp)]
}
allfiles = c(allfiles, temp)
}
## Add files from file-listing
allfiles = c(allfiles, c(FD$filelist, recursive = TRUE))
return(allfiles)
}
#' Extracts code for each file input
#'
#' @param files [vector-char] :: Filenames to extract code from
#'
#' @return [list-vector-char] :: A list of code read from each given input file
#'
#' @export
#'
extract_code = function(files) {
res = list()
for(j in seq_along(files)) {
res[[j]] = readLines(files[j])
}
return(res)
}
|
e52ce2d9d0b9fcf599cd1ecbb7d54f8ce9124d44
|
10891d71f6512ba51d61a6dffedd8d9b3c5496e6
|
/man/generate_sample.Rd
|
492e8fa0ed0def7ae20a63dabbc885203890535a
|
[
"Apache-2.0"
] |
permissive
|
dewittpe/icd_file_generator
|
d7c1027b35770100bf40cb37a3e18a636c66d6c3
|
5ee69a9a8cbe148f7d8acde7ba7bb29ac95c9727
|
refs/heads/master
| 2020-03-31T10:05:09.209578
| 2018-10-19T19:13:24
| 2018-10-19T19:13:24
| 152,121,980
| 0
| 0
|
Apache-2.0
| 2018-10-08T17:48:58
| 2018-10-08T17:48:57
| null |
UTF-8
|
R
| false
| true
| 1,034
|
rd
|
generate_sample.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/icdgenerator.R
\name{generate_sample}
\alias{generate_sample}
\title{Generate Random ICD Data}
\usage{
generate_sample(version = 10, nrows = 1, dcols = 1, pcols = 1,
gcols = 0, pct_empty = 0.2, quiet = TRUE)
}
\arguments{
\item{version}{ICD version 9 or 10. Defaults to 10.}
\item{nrows}{number of rows to generate}
\item{dcols}{number of diagnostic codes to generate for each row}
\item{pcols}{number of procedure codes to generate for each row}
\item{gcols}{number of "other" columns to generate}
\item{pct_empty}{the percentage of diagnostic and procedure codes to be
"missing" in the resulting data frame.}
\item{quiet}{If \code{FALSE}, then report incremental timing.
To suppress incremental timing set \code{quiet = TRUE}.}
}
\description{
Create a data frame using randomly selected ICD diagnosis and procedure codes
}
\examples{
eg <- generate_sample(
version = 9,
nrows = 340,
dcols = 29,
pcols = 15,
gcols = 114
)
head(eg)
}
|
76abee21d6edf31aec8d7cb74c6d73eba10b9b83
|
59ff81a51cd1e41711291dadb6b6c3d39b30f207
|
/R/retrieve_spectral_library.R
|
b2ec7e579c0e78b50663cc64131de6d65024683c
|
[
"MIT"
] |
permissive
|
MarconiS/NeonSpeciesClassification
|
8646d2cf4f71b1eb6ce5bfd89af47c3c5e76bd9d
|
dd7de3877641c86df6424d9a1604763e401614ba
|
refs/heads/master
| 2023-03-11T17:45:07.083153
| 2021-03-01T22:40:54
| 2021-03-01T22:40:54
| 256,378,136
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 34,706
|
r
|
retrieve_spectral_library.R
|
library(tidyverse)
library(data.table)
csv_vst = fread("~/Documents/GitHub/neonVegWrangleR/outdir/field_data/neon_vst_data_022021.csv") #%>% filter(height > 3)
full_shaded_ids = csv_vst %>%
filter(canopyPosition %in% c("Mostly shaded","Full shade") | !str_detect(plantStatus , "Live"))
full_shaded_ids = full_shaded_ids %>% filter(!str_detect(plantStatus , "Live")) %>%
select(individualID) %>% unique
csv_vst = csv_vst %>%
filter(canopyPosition %in% c("Full sun", "Open growth", "Partially shaded", NA)) %>%
filter(str_detect(plantStatus , "Live")) %>%
filter(!str_detect(growthForm,"shrub|sapling")) %>%
#filter(!canopyPosition %in% c("Mostly shaded","Full shade")) %>%
#filter out trees that have been identified as shaded in a year, and NA in another
#filter(!individualID %in% full_shaded_ids) %>%
group_by(individualID) %>% slice_max(order_by = height) %>%slice(1)
csv_vst = csv_vst %>% filter(!individualID %in% full_shaded_ids$individualID)
#csv_vst = csv_vst %>% filter(height > 3) %>% filter(stemDiameter > 5)
# filter HSI from shade pixels, and apply BRDF and topographic corrections
brick = data.table::fread("./data/indir/brdf_corrected_spectral_library_1920.csv")
#brick = brick[,c(3,10:376)]
brick = brick[complete.cases(brick),]
brick = brick %>% filter(individualID %in% csv_vst$individualID)
# brick = brick %>% filter(year == 2018)
# brick = brick %>% group_by(site) %>% slice_max(year)
# filter out points that are likely too short
metadata = clean_typos_taxonID(csv_vst)
metadata = remove_too_close_stems(metadata, stem_dist = 2)
metadata = inner_join(metadata, brick)
metadata$delta_chm = abs(metadata$height - metadata$CHM)
metadata = metadata %>% group_by(individualID) %>%
mutate(mean_dh = mean(delta_chm), id_dh = min(delta_chm))
metadata = metadata %>% filter(id_dh < 3)
metadata$individualID %>% unique %>% length
metadata = metadata %>% filter(!is.na(band_50))
ids = metadata %>% select(individualID, siteID) %>% unique
# metadata = metadata %>% filter(individualID %in% ids)
sites = ids$siteID %>% table %>% data.frame
sites
#sites = sites %>% filter(Freq >= 40)
#metadata = metadata %>% filter(siteID %in% as.character(sites[[1]]))
#write_csv(metadata, "./data/indir/metadata_classification.csv")
#brick = brick %>% filter(individualID %in% metadata$individualID)
#write_csv(brick, "./data/indir/spectra_ben_compare.csv")
set.seed(0)
noise = apply(metadata[,79:424], 1, function(x)any(x > 1 | x < 0))
metadata[noise,]=NA
metadata = metadata %>% filter(band_192 <0.45) %>%
filter(band_193 <0.45) %>%
filter(band_150 < 0.9) %>%
filter(band_199 < 0.4) %>%
filter(band_262 < 0.27) %>%
filter(band_271 < 0.38) %>%
filter(band_272 < 0.38) %>%
filter(band_277 < 0.45) %>%
filter(band_325 < 0.3) %>%
filter(band_358 < 0.25) %>%
filter(band_359 < 0.3)%>%
filter(band_62 < 0.2)
metadata = metadata[complete.cases(metadata[,79:445]),]
metadata = metadata[,-c(76:79)]
# metadata[metadata$band_192 >0.25,] = NA
# metadata = metadata[complete.cases(metadata[,79:424]),]
# metadata[metadata$band_200:06 >0.37,] = NA
# metadata = metadata[complete.cases(metadata[,79:424]),]
# metadata[metadata$band_150 >0.9,] = NA
#metadata_ = metadata %>% select(-one_of("band_193"))
#metadata = clean_reflectance(metadata)
metadata %>%select(contains("band")) %>% plot_spectra
# #remove pixels with clear measurement errors
# metadata = metadata[complete.cases(metadata[,79:424]),]
# noise = apply(metadata[,79:424], 1, function(x)any(x > 0.15))
# metadata[noise,]=NA
# metadata = metadata[complete.cases(metadata[,79:424]),]
# noise = apply(metadata[,266:422], 1, function(x)any(x > 0.12))
# metadata[noise,]=NA
# metadata = metadata[complete.cases(metadata[,79:424]),]
# noise = apply(metadata[,146:236], 1, function(x)any(x < 0.28))
# metadata[noise,]=NA
# metadata = metadata[complete.cases(metadata[,77:422]),]
ids = metadata %>% select(individualID, siteID, taxonID) %>% unique
# metadata = metadata %>% filter(individualID %in% ids)
species_per_site = ids %>% ungroup %>% select(siteID, taxonID) %>% unique %>%
ungroup %>% group_by(siteID) %>% mutate(species = n()) %>% select(-one_of("taxonID")) %>%
unique
sites = ids$siteID %>% table %>% data.frame
sites
# # train test split
new_df_rfl = list()
for(ii in unique(metadata$taxonID)){
taxa_foo = metadata %>% dplyr::filter(taxonID == ii) #%>% unique
# refl_foo = taxa_foo %>% select(contains("band"))
# if(nrow(taxa_foo)> 30){
# # #check range of spectra
# max_ideal = apply(refl_foo[complete.cases(refl_foo),],
# MARGIN = 2, function(x)quantile(x, 0.999))
# min_ideal = apply(refl_foo[complete.cases(refl_foo),],
# MARGIN = 2, function(x)quantile(x, 0.001))
#
# #filter for outliers: too bright
# cnd = apply(refl_foo, 1,function(x)(x > max_ideal))
# idx <- (apply(data.frame(cnd), 2, any))
# if(length(idx) !=0){
# idx[is.na(idx)] = T
# taxa_foo[idx,] = NA
# }
# #filter for outliers: too dark
# cnd = apply(refl_foo, 1,function(x)(x < min_ideal))
# idx <- (apply(data.frame(cnd), 2, any))
# if(length(idx) !=0){
# idx[is.na(idx)] = T
# taxa_foo[idx,] = NA
# }
# }
# new_df_rfl[[ii]] = taxa_foo
#
#plot spectra of the species
plt = taxa_foo %>% select(contains("band")) %>% plot_spectra
ggsave(paste("./plots/", ii, ".jpg", sep=""), plot = plt)
}
new_df_rfl = do.call(rbind.data.frame, new_df_rfl)
new_df_rfl = new_df_rfl %>% filter(!is.na(band_54))
new_df_rfl = metadata
new_df_rfl = filter_too_rare_out(new_df_rfl, (new_df_rfl$individualID),min_id = 4)
species_per_site = new_df_rfl %>% select(siteID, taxonID) %>% unique %>%
ungroup %>% group_by(siteID) %>% mutate(species = n()) %>% select(-one_of("taxonID")) %>%
unique
# previous_split = data.table::fread("./data/metadata_custom_test.csv")
# previous_split = previous_split %>% filter(groupID == "test") %>%
# select(individualID, siteID,plotID, taxonID)%>%
# unique
# how to deal with the two years? get 2019 for
new_df_rfl %>% ungroup %>% select(year, site) %>% table
sites_2018 = new_df_rfl %>% filter(site %in% c("BLAN", "BONA", "GUAN", "GRSM","NIWO",
"MLBS", "RMNP"))%>%
filter(year == 2018)
sites_2019 = new_df_rfl %>% filter(!site %in% c("BLAN", "BONA", "GUAN", "GRSM","NIWO",
"MLBS", "RMNP"))%>%
filter(year == 2019)
new_df_rfl = rbind.data.frame(sites_2018, sites_2019)
new_df_rfl = new_df_rfl %>% data.frame %>% filter(delta_chm < 3)
set.seed(1987)
remove_sites = new_df_rfl %>% select(siteID, taxonID, individualID) %>% unique
remove_sites$siteID %>% table
new_df_rfl_ = new_df_rfl %>% filter(!siteID %in% c("YELL", "MOAB", "KONZ", "ONAQ", "GRSM", "HEAL"))
unique_entries = new_df_rfl_ %>% group_by(individualID)%>% slice(1)
train = unique_entries %>% group_by(siteID, taxonID) %>% sample_frac(0.12)
train = new_df_rfl_ %>% filter(plotID %in% unique(train$plotID))
test = new_df_rfl_ %>% filter(!individualID %in% unique(train$individualID))
train$groupID = "train"
test$groupID = "test"
test = test %>% filter(siteID %in% unique(train$siteID))
test = test %>% filter(taxonID %in% unique(train$taxonID))
test$individualID %>% unique %>% length
train$individualID %>% unique %>% length
train_check = train %>% select(individualID, taxonID) %>%
unique %>% ungroup %>% select(taxonID) %>% table %>% sort
test_check = test %>% select(individualID, taxonID) %>%
unique %>% ungroup %>% select(taxonID) %>% table %>% sort
test_check %>% sort
train_check %>% sort
test$taxonID %>% unique %>% length
train$taxonID %>% unique %>% length
test$siteID %>% unique %>% length
train$siteID %>% unique %>% length
new_sp = rbind(train, test)
#remove taxa that have less than 4 individuals in the training set
#new_sp = new_sp %>% filter(!taxonID %in% c("GYDI", "QUFA"))
new_sp = new_sp %>% filter(!taxonID %in% c("PODE3", "QULA3", "AIAL", "QUSH", "ACSA2"))
#write_csv(new_sp, "./data/metadata_2021_no_correction.csv")
new_sp = new_sp %>% group_by(individualID) %>%
slice_min(n=8, order_by = delta_chm)
new_sp %>%ungroup %>% filter(groupID == "train") %>%
select(taxonID) %>% table %>% sort
new_sp[,-c(336:341)] %>% select(contains("band")) %>% plot_spectra
#new_sp %>% select(paste("band", 275:279, sep="_")) %>% plot_spectra
new_sp =
write_csv(new_sp[c(1:78, 425)], "/Volumes/Data/Spectral_library/metadata_1819_feb.csv")
write_csv(new_sp[c(14, 79:424)], "/Volumes/Data/Spectral_library/brdf_spectra_1819_feb.csv")
# train$taxonID %>% table %>% sort
# PICO PIPA2
new_sp %>% filter(taxonID == "ACRU") %>%
select(individualID) %>% unique %>% nrow()
ACRU = new_sp %>% filter(taxonID == "ACRU") %>%
group_by(individualID) %>% slice_sample(n=3)
ACSA3 = new_sp %>% filter(taxonID == "ACSA3") %>%
group_by(individualID) %>%
slice_min(n=5, order_by = delta_chm)
PSME = new_sp %>% filter(taxonID == "PSME") %>%
group_by(individualID) %>%
slice_min(n=4, order_by = delta_chm)
PIMA = new_sp %>% filter(taxonID == "PIMA") %>%
group_by(individualID) %>%
slice_min(n=3, order_by = delta_chm)
POTR5 = new_sp %>% filter(taxonID == "POTR5") %>%
group_by(individualID) %>%
slice_min(n=3, order_by = delta_chm)
PIPA2 = new_sp %>% filter(taxonID == "PIPA2") %>%
group_by(individualID) %>%
slice_min(n=3, order_by = delta_chm)
PICO = new_sp %>% filter(taxonID == "PICO") %>%
group_by(individualID) %>%
slice_min(n=6, order_by = delta_chm)
new_sp_ = new_sp %>% filter(!taxonID %in% c("ACSA3", "ACRU", "PSME",
"PIMA", "POTR5",
"PICO", "PIPA2"))
#
new_sp_ = rbind.data.frame(new_sp_, ACSA3, ACRU, PSME, PIMA, POTR5,PICO,
PIPA2)
foo = new_sp_ %>% data.frame %>%filter(groupID == "train")
foo$taxonID %>% table %>% sort
write_csv(new_sp_[c(1:75, 442)], "/Volumes/Data/Spectral_library/metadata_1819_feb.csv")
write_csv(new_sp_[c(14, 79:424)], "/Volumes/Data/Spectral_library/brdf_spectra_1819_feb.csv")
# new_sp %>% filter(taxonID == "ACSA3") %>%
# select(individualID) %>% unique %>% nrow()
# ACSA3 = new_sp %>% filter(taxonID == "ACSA3") %>%
# group_by(individualID) %>% slice_min(n=4, order_by = delta_h)%>%
# filter(delta_h < 1)
#
#
# new_sp %>% filter(taxonID == "MEPO5") %>%
# filter(delta_h < 1)%>% select(individualID) %>% unique %>% nrow()
# MEPO5 = new_sp %>% filter(taxonID == "MEPO5") %>%
# group_by(individualID) %>% slice_min(n=4, order_by = delta_h) %>%
# filter(delta_h < 1)
#
#
# new_sp %>% filter(taxonID == "POTR5") %>%
# select(individualID) %>% unique %>% nrow()
# POTR5 = new_sp %>% filter(taxonID == "POTR5") %>%
# group_by(individualID) %>% slice_min(n=4, order_by = delta_h) %>%
# filter(delta_h < 1)
#
# new_sp %>% filter(taxonID == "PSME") %>%
# select(individualID) %>% unique %>% nrow()
# PSME = new_sp %>% filter(taxonID == "PSME") %>%
# group_by(individualID) %>% slice_min(n=4, order_by = delta_h) %>%
# filter(delta_h < 1)
#
# new_sp %>% filter(taxonID == "PITA") %>%
# select(individualID) %>% unique %>% nrow()
# PITA = new_sp %>% filter(taxonID == "PITA") %>%
# group_by(individualID) %>% slice_min(n=6, order_by = delta_h) %>%
# filter(delta_h < 1)
#
# new_sp %>% filter(taxonID == "PIPA2") %>%
# select(individualID) %>% unique %>% nrow()
# PIPA2 = new_sp %>% filter(taxonID == "PIPA2") %>%
# filter(band_96 > 0.07) %>%
# group_by(individualID) %>% slice_min(n=3, order_by = delta_h) %>%
# filter(delta_h < 1)
#
# new_sp %>% filter(taxonID == "PIMA") %>%
# select(individualID) %>% unique %>% nrow()
# PIMA = new_sp %>% filter(taxonID == "PIMA") %>%
# #filter(band_96 > 0.07) %>%
# group_by(individualID) %>% slice_min(n=3, order_by = delta_h) %>%
# filter(delta_h < 1)
#
# new_sp %>% filter(taxonID == "PICO") %>%
# select(individualID) %>% unique %>% nrow()
# PICO = new_sp %>% filter(taxonID == "PICO") %>%
# group_by(individualID) %>% slice_min(n=5, order_by = delta_h) %>%
# filter(delta_h < 1)
#
# new_sp %>% filter(taxonID == "PIEN") %>%
# select(individualID) %>% unique %>% nrow()
# PIEN = new_sp %>% filter(taxonID == "PIEN") %>%
# group_by(individualID) %>% slice_min(n=6, order_by = delta_h) %>%
# filter(delta_h < 1)
#
# new_sp %>% filter(taxonID == "QUST") %>%
# select(individualID) %>% unique %>% nrow()
# QUST = new_sp %>% filter(taxonID == "QUST") %>%
# group_by(individualID) %>% slice_min(n=6, order_by = delta_h) %>%
# filter(delta_h < 1)
#
# new_sp %>% filter(taxonID == "QURU") %>%
# select(individualID) %>% unique %>% nrow()
# QURU = new_sp %>% filter(taxonID == "QURU") %>%
# group_by(individualID) %>% slice_min(n=6, order_by = delta_h) %>%
# filter(delta_h < 1)
#
# new_sp %>% filter(taxonID == "TSCA") %>%
# select(individualID) %>% unique %>% nrow()
# TSCA = new_sp %>% filter(taxonID == "TSCA") %>%
# group_by(individualID) %>% slice_min(n=6, order_by = delta_h) %>%
# filter(delta_h < 1)
#
# new_sp %>% filter(taxonID == "LITU") %>%
# select(individualID) %>% unique %>% nrow()
# LITU = new_sp %>% filter(taxonID == "LITU") %>%
# group_by(individualID) %>% slice_min(n=6, order_by = delta_h) %>%
# filter(delta_h < 1)
#
# new_sp_ = new_sp %>% filter(!taxonID %in% c("ACSA3", "ACRU", "QUST", "PIEN",
# "MEPO5", "POTR5", "LITU", "QURU",
# "PICO", "PIMA", "PIPA2", "PITA", "PSME", "TSCA"))
#
# new_sp_ = rbind.data.frame(new_sp_, ACSA3, ACRU, QUST, PIEN,MEPO5, POTR5,PICO,
# PIMA, PIPA2, PITA, PSME, QURU, TSCA, LITU)
# tmp_ids = max(ids_per_taxa$Freq) #*2
# few_pix_per_id=list()
# for(sp in unique(train$taxonID)){
# tmp = new_sp %>% filter(taxonID == sp)
# #tmp_ids = max(ids_per_taxa$Freq)
# few_pix_per_id[[sp]]=tmp %>%
# group_by(individualID) %>% slice_sample(n=ceiling(tmp_ids/nrow(tmp)))
#
# }
# few_pix_per_id = do.call(rbind.data.frame, few_pix_per_id)
# few_pix_per_id = rbind.data.frame(few_pix_per_id, test)
#
# ACRU = new_sp %>% filter(taxonID == "ACRU") %>%
# group_by(individualID) %>% slice_sample(n=5)
# POTR5 = new_sp %>% filter(taxonID == "POTR5") %>%
# group_by(individualID) %>% slice_sample(n=4)
# ACSA3 = new_sp %>% filter(taxonID == "ACSA3") %>%
# group_by(individualID) %>% slice_sample(n=5)
# PIMA = new_sp %>% filter(taxonID == "PIMA") %>%
# group_by(individualID) %>% slice_sample(n=3)
# PIPA2 = new_sp %>% filter(taxonID == "PIPA2") %>%
# group_by(individualID) %>% slice_sample(n=3)
# QUST = new_sp %>% filter(taxonID == "QUST") %>%
# group_by(individualID) %>% slice_sample(n=10)
# PSME = new_sp %>% filter(taxonID == "PSME") %>%
# group_by(individualID) %>% slice_sample(n=4)
# ABLAL = new_sp %>% filter(taxonID == "ABLAL") %>%
# group_by(individualID) %>% slice_sample(n=12)
# PICO = new_sp %>% filter(taxonID == "PICO") %>%
# group_by(individualID) %>% slice_sample(n=5)
# PIEN = new_sp %>% filter(taxonID == "PIEN") %>%
# group_by(individualID) %>% slice_sample(n=10)
# PITA = new_sp %>% filter(taxonID == "PITA") %>%
# group_by(individualID) %>% slice_sample(n=14)
# TSCA = new_sp %>% filter(taxonID == "TSCA") %>%
# group_by(individualID) %>% slice_sample(n=10)
#
# replaced_over_sampled = rbind.data.frame(PIMA,ACRU, POTR5, ACSA3,
# PIPA2, QUST, PSME, ABLAL, PICO,
# PIEN, PITA)
# foo = new_sp %>% filter(!taxonID %in% unique(replaced_over_sampled$taxonID))
# foo = rbind.data.frame(replaced_over_sampled, foo)
# write_csv(new_sp_, "./data/metadata_1920.csv")
# write_csv(new_sp_[c(14, 81:422)], "./data/brdf_spectra_1920.csv")
#
# species_classification_data = me_ %>% filter(individualID %in% metadata$individualID)
#
# train = species_classification_data %>% group_by(siteID, taxonID) %>% sample_frac(0.0)
# train = species_classification_data %>% filter(plotID %in% unique(train$plotID))
# test = species_classification_data %>% filter(!plotID %in% unique(train$plotID))
# train$groupID = "train"
# test$groupID = "test"
# test = test %>% filter(siteID %in% unique(train$siteID))
# new_sp = rbind(train, test)
#
#
# write_csv(new_sp, "~/Documents/Data/Field_surveys/VST/data_for_classification.csv")
#
# f$individualID %>% unique
tmp_dat = fread("/Volumes/Data/Spectral_library/metadata_1819.csv")
tmp_dat_ = tmp_dat %>% filter(!individualID %in% full_shaded_ids$individualID)
tmp_dat.train = tmp_dat_ %>% filter(groupID == "train")
tmp_dat.test = tmp_dat_ %>% filter(groupID == "test")
tmp_dat.train$taxonID %>% table %>% sort
tmp_dat.test$taxonID %>% unique %>% length
write_csv(tmp_dat_, "/Volumes/Data/Spectral_library/metadata_1819_feb.csv")
tmp_dat = fread("/Volumes/Data/Spectral_library/brdf_spectra_1819.csv")
tmp_dat_ = tmp_dat %>% filter(!individualID %in% full_shaded_ids$individualID)
write_csv(tmp_dat_, "/Volumes/Data/Spectral_library/brdf_spectra_1819_feb.csv")
library(tidyverse)
library(data.table)
csv_vst = fread("~/Documents/GitHub/neonVegWrangleR/outdir/field_data/neon_vst_data_022021.csv") #%>% filter(height > 3)
full_shaded_ids = csv_vst %>%
filter(canopyPosition %in% c("Mostly shaded","Full shade") | !str_detect(plantStatus , "Live"))
full_shaded_ids = full_shaded_ids %>% filter(!str_detect(plantStatus , "Live")) %>%
select(individualID) %>% unique
csv_vst = csv_vst %>%
filter(canopyPosition %in% c("Full sun", "Open growth", "Partially shaded", NA)) %>%
filter(str_detect(plantStatus , "Live")) %>%
filter(!str_detect(growthForm,"shrub|sapling")) %>%
#filter(!canopyPosition %in% c("Mostly shaded","Full shade")) %>%
#filter out trees that have been identified as shaded in a year, and NA in another
#filter(!individualID %in% full_shaded_ids) %>%
group_by(individualID) %>% slice_max(order_by = height) %>%slice(1)
csv_vst = csv_vst %>% filter(!individualID %in% full_shaded_ids$individualID)
#csv_vst = csv_vst %>% filter(height > 3) %>% filter(stemDiameter > 5)
# filter HSI from shade pixels, and apply BRDF and topographic corrections
brick = data.table::fread("./data/indir/brdf_corrected_spectral_library_1920.csv")
#brick = brick[,c(3,10:376)]
brick = brick[complete.cases(brick),]
brick = brick %>% filter(individualID %in% csv_vst$individualID)
# brick = brick %>% filter(year == 2018)
# brick = brick %>% group_by(site) %>% slice_max(year)
# filter out points that are likely too short
metadata = clean_typos_taxonID(csv_vst)
metadata = remove_too_close_stems(metadata, stem_dist = 2)
metadata = inner_join(metadata, brick)
metadata$delta_chm = abs(metadata$height - metadata$CHM)
metadata = metadata %>% group_by(individualID) %>%
mutate(mean_dh = mean(delta_chm), id_dh = min(delta_chm))
metadata = metadata %>% filter(id_dh < 3)
metadata$individualID %>% unique %>% length
metadata = metadata %>% filter(!is.na(band_50))
ids = metadata %>% select(individualID, siteID) %>% unique
# metadata = metadata %>% filter(individualID %in% ids)
sites = ids$siteID %>% table %>% data.frame
sites
#sites = sites %>% filter(Freq >= 40)
#metadata = metadata %>% filter(siteID %in% as.character(sites[[1]]))
#write_csv(metadata, "./data/indir/metadata_classification.csv")
#brick = brick %>% filter(individualID %in% metadata$individualID)
#write_csv(brick, "./data/indir/spectra_ben_compare.csv")
set.seed(0)
noise = apply(metadata[,79:424], 1, function(x)any(x > 1 | x < 0))
metadata[noise,]=NA
metadata = metadata[complete.cases(metadata[,79:424]),]
metadata = metadata %>% filter(band_192 <0.45) %>%
filter(band_193 <0.45) %>%
filter(band_150 < 0.9) %>%
filter(band_199 < 0.4) %>%
filter(band_262 < 0.27) %>%
filter(band_271 < 0.38) %>%
filter(band_272 < 0.38) %>%
filter(band_277 < 0.45) %>%
filter(band_325 < 0.3) %>%
filter(band_358 < 0.25) %>%
filter(band_359 < 0.3)
# metadata[metadata$band_192 >0.25,] = NA
# metadata = metadata[complete.cases(metadata[,79:424]),]
# metadata[metadata$band_200:06 >0.37,] = NA
# metadata = metadata[complete.cases(metadata[,79:424]),]
# metadata[metadata$band_150 >0.9,] = NA
#metadata_ = metadata %>% select(-one_of("band_193"))
#metadata = clean_reflectance(metadata)
#metadata[,80:120] %>%select(contains("band")) %>% plot_spectra
# #remove pixels with clear measurement errors
# metadata = metadata[complete.cases(metadata[,79:424]),]
# noise = apply(metadata[,79:424], 1, function(x)any(x > 0.15))
# metadata[noise,]=NA
# metadata = metadata[complete.cases(metadata[,79:424]),]
# noise = apply(metadata[,266:422], 1, function(x)any(x > 0.12))
# metadata[noise,]=NA
# metadata = metadata[complete.cases(metadata[,79:424]),]
# noise = apply(metadata[,146:236], 1, function(x)any(x < 0.28))
# metadata[noise,]=NA
# metadata = metadata[complete.cases(metadata[,77:422]),]
ids = metadata %>% select(individualID, siteID, taxonID) %>% unique
# metadata = metadata %>% filter(individualID %in% ids)
species_per_site = ids %>% ungroup %>% select(siteID, taxonID) %>% unique %>%
ungroup %>% group_by(siteID) %>% mutate(species = n()) %>% select(-one_of("taxonID")) %>%
unique
sites = ids$siteID %>% table %>% data.frame
sites
# how to deal with the two years? get 2019 for
new_df_rfl %>% select(year, site) %>% table
sites_2018 = metadata %>% filter(site %in% c("BLAN", "BONA", "GUAN", "GRSM","NIWO",
"MLBS", "RMNP"))%>%
filter(year == 2018)
sites_2019 = metadata %>% filter(!site %in% c("BLAN", "BONA", "GUAN", "GRSM","NIWO",
"MLBS", "RMNP"))%>%
filter(year == 2019)
metadata = rbind.data.frame(sites_2018, sites_2019)
# # train test split
new_df_rfl = list()
for(ii in unique(metadata$taxonID)){
taxa_foo = metadata %>% data.frame %>% dplyr::filter(taxonID == ii) #%>% unique
refl_foo = taxa_foo %>% select(contains("band"))
if(nrow(taxa_foo)> 4){
pix_ind = apply(refl_foo, 2, function(x){
out = boxplot.stats(x)$out
pix_ind = which(x %in% c(out))
pix_ind
})
for(bnd in ncol(refl_foo)){
taxa_foo[pix_ind[[bnd]],] = NA
}
#
# #check range of spectra
#
max_ideal = apply(refl_foo[complete.cases(refl_foo),],
MARGIN = 2, function(x)quantile(x, 0.99))
min_ideal = apply(refl_foo[complete.cases(refl_foo),],
MARGIN = 2, function(x)quantile(x, 0.01))
#filter for outliers: too bright
cnd = apply(refl_foo, 1,function(x)(x > max_ideal))
idx <- (apply(data.frame(cnd), 2, any))
if(length(idx) !=0){
idx[is.na(idx)] = T
taxa_foo[idx,] = NA
}
#filter for outliers: too dark
cnd = apply(refl_foo, 1,function(x)(x < min_ideal))
idx <- (apply(data.frame(cnd), 2, any))
if(length(idx) !=0){
idx[is.na(idx)] = T
taxa_foo[idx,] = NA
}
}
new_df_rfl[[ii]] = taxa_foo
#plot spectra of the species
taxa_foo = taxa_foo %>% filter(!is.na(band_54))
plt = taxa_foo %>% select(contains("band")) %>% plot_spectra
ggsave(paste("./plots/taxa/", ii, ".jpg", sep=""), plot = plt)
}
new_df_rfl = do.call(rbind.data.frame, new_df_rfl)
new_df_rfl = new_df_rfl %>% filter(!is.na(band_54))
# remove the clear outliers from species
new_df_rfl = filter_too_rare_out(new_df_rfl, (new_df_rfl$individualID),min_id = 5)
species_per_site = new_df_rfl %>% select(siteID, taxonID) %>% unique %>%
ungroup %>% group_by(siteID) %>% mutate(species = n()) %>% select(-one_of("taxonID")) %>%
unique
# previous_split = data.table::fread("./data/metadata_custom_test.csv")
# previous_split = previous_split %>% filter(groupID == "test") %>%
# select(individualID, siteID,plotID, taxonID)%>%
# unique
new_df_rfl = new_df_rfl %>% filter(delta_chm < 3)
remove_sites = new_df_rfl %>% select(siteID, taxonID, individualID) %>% unique
remove_sites$siteID %>% table
new_df_rfl_ = new_df_rfl %>% filter(!siteID %in% c("YELL", "MOAB", "KONZ", "ONAQ", "GRSM", "HEAL"))
set.seed(1987)
unique_entries = new_df_rfl_ %>% group_by(individualID)%>% slice(1)
set.seed(0)
train = unique_entries %>% group_by(siteID, taxonID) %>% sample_frac(0.09)
train = new_df_rfl_ %>% filter(plotID %in% unique(train$plotID))
test = new_df_rfl_ %>% filter(!individualID %in% unique(train$individualID))
train$groupID = "train"
test$groupID = "test"
test = test %>% filter(siteID %in% unique(train$siteID))
test = test %>% filter(taxonID %in% unique(train$taxonID))
test$individualID %>% unique %>% length
train$individualID %>% unique %>% length
train_check = train %>% select(individualID, taxonID) %>%
unique %>% ungroup %>% select(taxonID) %>% table %>% sort
test_check = test %>% select(individualID, taxonID) %>%
unique %>% ungroup %>% select(taxonID) %>% table %>% sort
test_check %>% sort
train_check %>% sort
test$taxonID %>% unique %>% length
train$taxonID %>% unique %>% length
test$siteID %>% unique %>% length
train$siteID %>% unique %>% length
new_sp = rbind(train, test)
#remove taxa that have less than 4 individuals in the training set
new_sp = new_sp %>% filter(!taxonID %in% c("GUOF", "NYBI", "PISA2", "QUMI"))
#write_csv(new_sp, "./data/metadata_2021_no_correction.csv")
new_sp = new_sp %>% group_by(individualID) %>%
slice_min(n=6, order_by = delta_chm)
new_sp %>%ungroup %>% filter(groupID == "train") %>%
select(taxonID) %>% table %>% sort
#new_sp %>% select(paste("band", 275:279, sep="_")) %>% plot_spectra
# write_csv(new_sp[c(1:75, 425)], "/Volumes/Data/Spectral_library/metadata_1819_feb.csv")
# write_csv(new_sp[c(14, 79:424)], "/Volumes/Data/Spectral_library/brdf_spectra_1819_feb.csv")
# train$taxonID %>% table %>% sort
# PICO PIPA2
new_sp %>% filter(taxonID == "ACRU") %>%
select(individualID) %>% unique %>% nrow()
ACRU = new_sp %>% filter(taxonID == "ACRU") %>%
group_by(individualID) %>% slice_sample(n=3)
ACSA3 = new_sp %>% filter(taxonID == "ACSA3") %>%
group_by(individualID) %>%
slice_min(n=5, order_by = delta_chm)
PSME = new_sp %>% filter(taxonID == "PSME") %>%
group_by(individualID) %>%
slice_min(n=4, order_by = delta_chm)
PIMA = new_sp %>% filter(taxonID == "PIMA") %>%
group_by(individualID) %>%
slice_min(n=3, order_by = delta_chm)
POTR5 = new_sp %>% filter(taxonID == "POTR5") %>%
group_by(individualID) %>%
slice_min(n=3, order_by = delta_chm)
PIPA2 = new_sp %>% filter(taxonID == "PIPA2") %>%
group_by(individualID) %>%
slice_min(n=3, order_by = delta_chm)
PICO = new_sp %>% filter(taxonID == "PICO") %>%
group_by(individualID) %>%
slice_min(n=6, order_by = delta_chm)
new_sp_ = new_sp %>% filter(!taxonID %in% c("ACSA3", "ACRU", "PSME",
"PIMA", "POTR5",
"PICO", "PIPA2"))
#
new_sp_ = rbind.data.frame(new_sp_, ACSA3, ACRU, PSME, PIMA, POTR5,PICO,
PIPA2)
foo = new_sp_ %>% data.frame %>%filter(groupID == "train")
foo$taxonID %>% table %>% sort
write_csv(new_sp_[c(1:75, 442)], "/Volumes/Data/Spectral_library/metadata_1819_feb.csv")
write_csv(new_sp_[c(14, 76:438)], "/Volumes/Data/Spectral_library/brdf_spectra_1819_feb.csv")
# new_sp_ = cbind.data.frame(fread("/Volumes/Data/Spectral_library/metadata_1819_feb.csv"),
# fread("/Volumes/Data/Spectral_library/brdf_spectra_1819_feb.csv"))
for(ii in unique(new_sp_$taxonID)){
taxa_foo = new_sp_ %>% data.frame %>% dplyr::filter(taxonID == ii) #%>% unique
plt = taxa_foo %>% select(contains("band")) %>% plot_spectra
ggsave(paste("./plots/taxa/clean_", ii, ".jpg", sep=""), plot = plt)
}
# new_sp %>% filter(taxonID == "ACSA3") %>%
# select(individualID) %>% unique %>% nrow()
# ACSA3 = new_sp %>% filter(taxonID == "ACSA3") %>%
# group_by(individualID) %>% slice_min(n=4, order_by = delta_h)%>%
# filter(delta_h < 1)
#
#
# new_sp %>% filter(taxonID == "MEPO5") %>%
# filter(delta_h < 1)%>% select(individualID) %>% unique %>% nrow()
# MEPO5 = new_sp %>% filter(taxonID == "MEPO5") %>%
# group_by(individualID) %>% slice_min(n=4, order_by = delta_h) %>%
# filter(delta_h < 1)
#
#
# new_sp %>% filter(taxonID == "POTR5") %>%
# select(individualID) %>% unique %>% nrow()
# POTR5 = new_sp %>% filter(taxonID == "POTR5") %>%
# group_by(individualID) %>% slice_min(n=4, order_by = delta_h) %>%
# filter(delta_h < 1)
#
# new_sp %>% filter(taxonID == "PSME") %>%
# select(individualID) %>% unique %>% nrow()
# PSME = new_sp %>% filter(taxonID == "PSME") %>%
# group_by(individualID) %>% slice_min(n=4, order_by = delta_h) %>%
# filter(delta_h < 1)
#
# new_sp %>% filter(taxonID == "PITA") %>%
# select(individualID) %>% unique %>% nrow()
# PITA = new_sp %>% filter(taxonID == "PITA") %>%
# group_by(individualID) %>% slice_min(n=6, order_by = delta_h) %>%
# filter(delta_h < 1)
#
# new_sp %>% filter(taxonID == "PIPA2") %>%
# select(individualID) %>% unique %>% nrow()
# PIPA2 = new_sp %>% filter(taxonID == "PIPA2") %>%
# filter(band_96 > 0.07) %>%
# group_by(individualID) %>% slice_min(n=3, order_by = delta_h) %>%
# filter(delta_h < 1)
#
# new_sp %>% filter(taxonID == "PIMA") %>%
# select(individualID) %>% unique %>% nrow()
# PIMA = new_sp %>% filter(taxonID == "PIMA") %>%
# #filter(band_96 > 0.07) %>%
# group_by(individualID) %>% slice_min(n=3, order_by = delta_h) %>%
# filter(delta_h < 1)
#
# new_sp %>% filter(taxonID == "PICO") %>%
# select(individualID) %>% unique %>% nrow()
# PICO = new_sp %>% filter(taxonID == "PICO") %>%
# group_by(individualID) %>% slice_min(n=5, order_by = delta_h) %>%
# filter(delta_h < 1)
#
# new_sp %>% filter(taxonID == "PIEN") %>%
# select(individualID) %>% unique %>% nrow()
# PIEN = new_sp %>% filter(taxonID == "PIEN") %>%
# group_by(individualID) %>% slice_min(n=6, order_by = delta_h) %>%
# filter(delta_h < 1)
#
# new_sp %>% filter(taxonID == "QUST") %>%
# select(individualID) %>% unique %>% nrow()
# QUST = new_sp %>% filter(taxonID == "QUST") %>%
# group_by(individualID) %>% slice_min(n=6, order_by = delta_h) %>%
# filter(delta_h < 1)
#
# new_sp %>% filter(taxonID == "QURU") %>%
# select(individualID) %>% unique %>% nrow()
# QURU = new_sp %>% filter(taxonID == "QURU") %>%
# group_by(individualID) %>% slice_min(n=6, order_by = delta_h) %>%
# filter(delta_h < 1)
#
# new_sp %>% filter(taxonID == "TSCA") %>%
# select(individualID) %>% unique %>% nrow()
# TSCA = new_sp %>% filter(taxonID == "TSCA") %>%
# group_by(individualID) %>% slice_min(n=6, order_by = delta_h) %>%
# filter(delta_h < 1)
#
# new_sp %>% filter(taxonID == "LITU") %>%
# select(individualID) %>% unique %>% nrow()
# LITU = new_sp %>% filter(taxonID == "LITU") %>%
# group_by(individualID) %>% slice_min(n=6, order_by = delta_h) %>%
# filter(delta_h < 1)
#
# new_sp_ = new_sp %>% filter(!taxonID %in% c("ACSA3", "ACRU", "QUST", "PIEN",
# "MEPO5", "POTR5", "LITU", "QURU",
# "PICO", "PIMA", "PIPA2", "PITA", "PSME", "TSCA"))
#
# new_sp_ = rbind.data.frame(new_sp_, ACSA3, ACRU, QUST, PIEN,MEPO5, POTR5,PICO,
# PIMA, PIPA2, PITA, PSME, QURU, TSCA, LITU)
# tmp_ids = max(ids_per_taxa$Freq) #*2
# few_pix_per_id=list()
# for(sp in unique(train$taxonID)){
# tmp = new_sp %>% filter(taxonID == sp)
# #tmp_ids = max(ids_per_taxa$Freq)
# few_pix_per_id[[sp]]=tmp %>%
# group_by(individualID) %>% slice_sample(n=ceiling(tmp_ids/nrow(tmp)))
#
# }
# few_pix_per_id = do.call(rbind.data.frame, few_pix_per_id)
# few_pix_per_id = rbind.data.frame(few_pix_per_id, test)
#
# ACRU = new_sp %>% filter(taxonID == "ACRU") %>%
# group_by(individualID) %>% slice_sample(n=5)
# POTR5 = new_sp %>% filter(taxonID == "POTR5") %>%
# group_by(individualID) %>% slice_sample(n=4)
# ACSA3 = new_sp %>% filter(taxonID == "ACSA3") %>%
# group_by(individualID) %>% slice_sample(n=5)
# PIMA = new_sp %>% filter(taxonID == "PIMA") %>%
# group_by(individualID) %>% slice_sample(n=3)
# PIPA2 = new_sp %>% filter(taxonID == "PIPA2") %>%
# group_by(individualID) %>% slice_sample(n=3)
# QUST = new_sp %>% filter(taxonID == "QUST") %>%
# group_by(individualID) %>% slice_sample(n=10)
# PSME = new_sp %>% filter(taxonID == "PSME") %>%
# group_by(individualID) %>% slice_sample(n=4)
# ABLAL = new_sp %>% filter(taxonID == "ABLAL") %>%
# group_by(individualID) %>% slice_sample(n=12)
# PICO = new_sp %>% filter(taxonID == "PICO") %>%
# group_by(individualID) %>% slice_sample(n=5)
# PIEN = new_sp %>% filter(taxonID == "PIEN") %>%
# group_by(individualID) %>% slice_sample(n=10)
# PITA = new_sp %>% filter(taxonID == "PITA") %>%
# group_by(individualID) %>% slice_sample(n=14)
# TSCA = new_sp %>% filter(taxonID == "TSCA") %>%
# group_by(individualID) %>% slice_sample(n=10)
#
# replaced_over_sampled = rbind.data.frame(PIMA,ACRU, POTR5, ACSA3,
# PIPA2, QUST, PSME, ABLAL, PICO,
# PIEN, PITA)
# foo = new_sp %>% filter(!taxonID %in% unique(replaced_over_sampled$taxonID))
# foo = rbind.data.frame(replaced_over_sampled, foo)
# write_csv(new_sp_, "./data/metadata_1920.csv")
# write_csv(new_sp_[c(14, 81:422)], "./data/brdf_spectra_1920.csv")
#
# species_classification_data = me_ %>% filter(individualID %in% metadata$individualID)
#
# train = species_classification_data %>% group_by(siteID, taxonID) %>% sample_frac(0.0)
# train = species_classification_data %>% filter(plotID %in% unique(train$plotID))
# test = species_classification_data %>% filter(!plotID %in% unique(train$plotID))
# train$groupID = "train"
# test$groupID = "test"
# test = test %>% filter(siteID %in% unique(train$siteID))
# new_sp = rbind(train, test)
#
#
# write_csv(new_sp, "~/Documents/Data/Field_surveys/VST/data_for_classification.csv")
#
# f$individualID %>% unique
tmp_dat = fread("/Volumes/Data/Spectral_library/metadata_1819.csv")
tmp_dat_ = tmp_dat %>% filter(!individualID %in% full_shaded_ids$individualID)
tmp_dat.train = tmp_dat_ %>% filter(groupID == "train")
tmp_dat.test = tmp_dat_ %>% filter(groupID == "test")
tmp_dat.train$taxonID %>% table %>% sort
tmp_dat.test$taxonID %>% unique %>% length
write_csv(tmp_dat_, "/Volumes/Data/Spectral_library/metadata_1819_feb.csv")
tmp_dat = fread("/Volumes/Data/Spectral_library/brdf_spectra_1819.csv")
tmp_dat_ = tmp_dat %>% filter(!individualID %in% full_shaded_ids$individualID)
write_csv(tmp_dat_, "/Volumes/Data/Spectral_library/brdf_spectra_1819_feb.csv")
|
926f68e5c1380b84093100b6f40379246b0f68ee
|
be6e3356c1b1a8b6e73ce7eec50f84875471092e
|
/man/getPatientDataStartEnd.Rd
|
c6e7af8f9fe443ac4aa5f10d35786c7d1e3c8779
|
[
"Apache-2.0"
] |
permissive
|
OHDSI/Aphrodite
|
a9bdbf66e24b4faf6089c493f59db1960302cbd2
|
8dc775e128e0a81c11ca1d2486efd7a5c84a9e5d
|
refs/heads/master
| 2021-07-14T20:36:15.448860
| 2020-09-17T18:56:53
| 2020-09-17T18:56:53
| 32,827,088
| 37
| 15
|
Apache-2.0
| 2021-06-25T19:05:04
| 2015-03-24T21:46:36
|
R
|
UTF-8
|
R
| false
| true
| 1,475
|
rd
|
getPatientDataStartEnd.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/functions.R
\name{getPatientDataStartEnd}
\alias{getPatientDataStartEnd}
\title{This function fetches all the patient data (generic) - from a given start date and with a given end date}
\usage{
getPatientDataStartEnd(connection, dbms, patient_ids, startDate, endDate,
flags, schema, removeDomains = c(""))
}
\arguments{
\item{connection}{The connection to the database server.}
\item{dbms}{The target DBMS for SQL to be rendered in.}
\item{patient_ids}{The list of case patient id's to extract data from - NOT a data.frame.}
\item{startDate}{The start index date for all patients}
\item{endDate}{The end date to fetch data from patients}
\item{flags}{The R dataframe that contains all feature/model flags
specified in settings.R.}
\item{schema}{The database schema being used.}
\item{removeDomains=''}{List of domains to not include as features, if any are specified in settings file}
}
\value{
An object containing the raw feature sets for the patient data.
}
\description{
This function fetches all the patient data (generic). Returns
raw patient data.
}
\details{
Based on the groups of feature sets determined in the flags
variable, this function will fetch patient data within the specified
time range the function returns all patient information
}
\examples{
\dontrun{
patientData <- getPatientDataStartEnd(conn, dbms, patient_ids, start_dates, end_dates, flag , cdmSchema)
}
}
|
dcb1ab1f27577f114979fd60dfcd719f343afb99
|
2bec5a52ce1fb3266e72f8fbeb5226b025584a16
|
/EWGoF/man/EDF_NS.test.Rd
|
2ddf2ee382cfc31c16865efad20cbdbdfb966214
|
[] |
no_license
|
akhikolla/InformationHouse
|
4e45b11df18dee47519e917fcf0a869a77661fce
|
c0daab1e3f2827fd08aa5c31127fadae3f001948
|
refs/heads/master
| 2023-02-12T19:00:20.752555
| 2020-12-31T20:59:23
| 2020-12-31T20:59:23
| 325,589,503
| 9
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,618
|
rd
|
EDF_NS.test.Rd
|
\name{EDF_NS.test}
\alias{EDF_NS.test}
\title{
GoF tests based on the empirical distribution function, the normalized spacings and the probability plots for the Exponential distribution
}
\description{
Computes the Exponential GoF tests based on the empirical distribution function: the Kolmogorov-Smirnov (KS), Cramer-Von-Mises (CM) and Anderson-Darling (AD) tests, the tests based on the probability plot: Shapiro-Wilk (SW) and Patwardhan (PA) tests and the tests based on the normalized spacings: Gnedenko (Gn) and Gini (G) tests.
}
\usage{
EDF_NS.test(x, type = "AD", nsim = 200)
}
\arguments{
\item{x}{a numeric vector of data values.}
\item{type}{the type of the test statistic used. "AD" is the default used test of Anderson-Darling,"KS" for Kolmogorov-Smirnov, "CM" for Cramer-Von-Mises, "SW" for Shapiro-Wilk, "PA" for Patwardhan, "Gn" for Gnedenko and "G" for Gini test statistic.}
\item{nsim}{an integer specifying the number of replicates used in Monte Carlo.}
}
\details{
This function computes the GoF test statistics of three different families: the tests based on the empirical distribution function, the tests based on the probability plots and the tests based on the normalized spacings. The p-value of the tests is computed using Monte-Carlo simulations because only the asymptotic distribution of the previous statistics is known. Therefore the tests can be applied to small samples. }
\value{An object of class htest.}
\references{
\itemize{
\item D'Agostino R.B. and Stephens M.A., \emph{Goodness-of-fit techniques}, Marcel Dekker, 1986.
\item Gail M.H. and Gastwirth J.L., A scale-free goodness-of-fit test for the exponential distribution based on the Gini statistic, \emph{Journal of the Royal Statistical Society, Series B}, 40, 350-357, 1978.
\item Gnedenko B.V., Belyayev Y.K. and Solovyev A.D., \emph{Mathematical Models of Reliability Theory}, Academic Press, 1969.
\item Shapiro S.S. and Wilk M.B., An analysis of variance test for the exponential distribution (complete samples), \emph{Technometrics}, 14, 355-370, 1972.
\item Patwardhan G., Tests for exponentiality, \emph{Communications in Statistics, Theory and Methods}, 17, 3705-3722, 1988.
}
}
\author{
Meryam KRIT}
\examples{
x1 <- rexp(50,2)
#Apply the Kolmogorov-Smirnov test
EDF_NS.test(x1,type="KS")
x2 <- rlnorm(50,0.2)
#Apply the Patwardhan test
EDF_NS.test(x2,type="PA")
#Apply the Cramer-von Mises test
EDF_NS.test(x2,type="CM")
#Apply the Gini test
EDF_NS.test(x2,type="G")
}
\keyword{Empirical distribution function}
\keyword{Gini}
\keyword{Gndenko}
\keyword{Shapiro-Wilk}
\keyword{Patwardhan}
|
57a0cd242fb7b1488fcb9ee71ecf91e87eca43ec
|
c2300aa02b4c2e4a53bebae27e4c38ec077b98e2
|
/plot4.R
|
04a8ab3d01de072d58717ce2f38a1d5b03f26298
|
[] |
no_license
|
danielcalcinaro/ExData_Plotting1
|
5a801f37141364f9ec916e29e2382bcafe257e2e
|
a8590520e19b1b7540497317dfbff0015adf39b3
|
refs/heads/master
| 2021-08-28T06:26:35.081742
| 2017-12-11T12:13:23
| 2017-12-11T12:13:23
| 113,708,997
| 0
| 0
| null | 2017-12-09T23:34:58
| 2017-12-09T23:34:57
| null |
UTF-8
|
R
| false
| false
| 1,381
|
r
|
plot4.R
|
library(dplyr)
read_power<-read.table("household_power_consumption.txt",sep=";",header=TRUE,stringsAsFactors=FALSE)
power<-mutate(read_power,NDate=paste(Date,Time))
power$NDate<-as.POSIXct(power$NDate,format="%d/%m/%Y %H:%M:%S")
powerf<-filter(power,NDate<as.POSIXct("2007-02-03",format="%Y-%m-%d") & NDate>=as.POSIXct("2007-02-01",format="%Y-%m-%d"))
powerf$Sub_metering_1<-as.numeric(powerf$Sub_metering_1)
powerf$Sub_metering_2<-as.numeric(powerf$Sub_metering_2)
powerf$Voltage<-as.numeric(powerf$Voltage)
powerf$Global_active_power<-as.numeric(powerf$Global_active_power)
powerf$Global_reactive_power<-as.numeric(powerf$Global_reactive_power)
png(file="plot4.png")
par(mfrow=c(2,2),mar=c(4,4,2,1),cex=0.7)
with(powerf,plot(NDate,Global_active_power,type="l",xlab="",ylab="Global Active Power"))
with(powerf,plot(NDate,Voltage,type="l",xlab="datetime",ylab="Voltage"))
with(powerf,plot(NDate,Sub_metering_1,col="green",xlab="",ylab="Energy sub metering",type="l"))
with(powerf,lines(NDate,Sub_metering_2,col="red"))
with(powerf,lines(NDate,Sub_metering_3,col="blue"))
legend("topright",col=c("green","red","blue"),lty=c("solid","solid","solid"),legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"))
with(powerf,plot(NDate,Global_reactive_power,type="l",xlab="datetime",ylab="Global Reactive Power"))
dev.off()
|
07ffe063f87084ccac466eb607f3a93b394e1b86
|
9262e777f0812773af7c841cd582a63f92d398a4
|
/inst/userguide/figures/CS3--Cs22_hood-q3.R
|
3f32e13e65c0aaa96a881ff0790ace48c78e8a1a
|
[
"CC0-1.0",
"LicenseRef-scancode-public-domain"
] |
permissive
|
nwfsc-timeseries/MARSS
|
f0124f9ba414a28ecac1f50c4596caaab796fdd2
|
a9d662e880cb6d003ddfbd32d2e1231d132c3b7e
|
refs/heads/master
| 2023-06-07T11:50:43.479197
| 2023-06-02T19:20:17
| 2023-06-02T19:20:17
| 438,764,790
| 1
| 2
|
NOASSERTION
| 2023-06-02T19:17:41
| 2021-12-15T20:32:14
|
R
|
UTF-8
|
R
| false
| false
| 171
|
r
|
CS3--Cs22_hood-q3.R
|
###################################################
### code chunk number 32: Cs22_hood-q3
###################################################
Q.models$hood.independent
|
57599986343a8d8d76f770f38931096f1f4a978c
|
10d8a07507b5fe24ec918736c2b2f58fcea96f57
|
/rankhospital.R
|
b728c9fd36f3a93f59499d661d5399895aa66f0f
|
[] |
no_license
|
YYCGreg/ProgramAssignment3
|
7ee15a061bd8762d76639d006040091a7807342b
|
749dca6522be0a927601d1e903221695ba1e5c9b
|
refs/heads/master
| 2022-11-05T00:37:54.355678
| 2020-06-14T13:39:15
| 2020-06-14T13:39:15
| 271,904,900
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,850
|
r
|
rankhospital.R
|
# Write a function called rankhospital that takes three arguments: the 2-character abbreviated name of a
# state (state), an outcome (outcome), and the ranking of a hospital in that state for that outcome (num).
# The function reads the outcome-of-care-measures.csv file and returns a character vector with the name
# of the hospital that has the ranking specified by the num argument. For example, the call
## Function identifies the hospital that matches the mortality rate rank provided for
## a given outcome
## for the defined state
rankhospital <- function(state, outcome, num = "best") {
## "state" is a character vector identifying the state scope of hospitals
## "outcome" is a character vector for the outcome scope
## "num" is the character vector identifying the rank that should be returned.
## default is "best"
## Function returns a character vector for the name of the hospital with the
## defined mortality rate ranking
## check if outcome is valid
vOutcome <- c("heart attack", "heart failure", "pneumonia")
if (!outcome %in% vOutcome){
stop("invalid outcome")
} ## end of if
## Read outcome data
if (nchar(state)==2) {
outData <- read.csv("outcome-of-care-measures.csv", na.strings = "Not Available")
} else {
stop("invalid state")
} ## end of if
## Check that state is valid
if (state %in% outData$State){
if (outcome=="heart failure"){
## mortality rate from heart failure is column 17
ds <- outData[,c(2,7,17)]
} else if (outcome=="heart attack") {
## mortality rate from heart attack is column 11
ds <- outData[,c(2,7,11)]
} else {
## mortality rate from pneumonia is column 23
ds <- outData[,c(2,7,23)]
} ## end of if
}else {
stop("invalid state")
} ## end of if
## order dataframe
ds <- na.omit(ds[order(ds$State, ds[ ,3], ds$Hospital.Name), ])
## split data by state
s <- split(ds, ds$State)
## set rank number
if(num=="worst"){
n <- nrow(s[[state]])
} else if (num=="best") {
n <- 1
} else {
n <- as.numeric(num)
} ## end of if
## get the list of hospitals
h <- lapply(s, function(y) y[n,1])
hName = as.character(h[state])
## Return hospital name in that state with lowest 30-day death rate
hName
}
|
ed4b0de3b0a2a512b8a297461c44bf7e94fd1b87
|
6bd4081120ef7c99dfa01b18bfdc7a59fa704c85
|
/37810 assignment 2.R
|
38896d3c140a20d2eaaf467c6ced8170611e02d6
|
[] |
no_license
|
Uchicago-Stat-Comp-37810/assignment-2-KumamonYJ
|
2cad833e6ac660bb093d849b9c7860280865b9d6
|
7e039f84a60726d361b0c6602618394b6cb0b8dd
|
refs/heads/master
| 2020-04-01T16:23:25.505323
| 2018-10-22T19:08:16
| 2018-10-22T19:08:16
| 153,379,086
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,927
|
r
|
37810 assignment 2.R
|
#37810 Assignment 2_Yi Jin step 6
source('~/GitHub/assignment-2-KumamonYJ/function.set.R')
# set coefficient A
trueA <- 5
# set coefficient B
trueB <- 0
# set standard error
trueSd <- 10
# set sample size
sampleSize <- 31
# create independent x-values
x <- (-(sampleSize-1)/2):((sampleSize-1)/2)
# create dependent values according to ax + b + N(0,sd)
y <- trueA * x + trueB + rnorm(n=sampleSize,mean=0,sd=trueSd)
# plot picture of x and y, with title "Test Data"
plot(x,y, main="Test Data")
# apply function slopevalues to the sequence
slopelikelihoods <- lapply(seq(3, 7, by=.05), slopevalues )
# plot the sequence and the corresponding outcomes of function slopevalues
plot (seq(3, 7, by=.05), slopelikelihoods , type="l", xlab = "values of slope parameter a", ylab = "Log likelihood")
# assign the startvalue
startvalue = c(4,0,10)
# apply run_metropolis_MCMC function to the startvalue and iterate for 10000 times
chain = run_metropolis_MCMC(startvalue, 10000)
# assign burn-in time
burnIn = 5000
# delete the first burn-in time rows of chain, then calculate the acceptance rate of the remaining rows of chain.
acceptance = 1-mean(duplicated(chain[-(1:burnIn),]))
compare_outcomes=function(iteration){
# assign the startvalue
m=vector(length=10)
std=vector(length=10)
for(i in 1:10){
beta1=rnorm(1,5,1)
beta0=0
se=rnorm(1,10,1)
startvalue = c(beta1,beta0,se)
# apply run_metropolis_MCMC function to the startvalue and iterate for 10000 times
chain = run_metropolis_MCMC(startvalue, iteration)
m[i]=mean(chain[,1])
std[i]=sd(chain[,1])
}
return(c(m,std))
}
#print("outcome of compare_outcomes(1000)")
#compare_outcomes(1000)
#print("outcome of compare_outcomes(10000)")
#compare_outcomes(10000)
#print("outcome of compare_outcomes(100000)")
#compare_outcomes(100000)
#The first ten numbers are the mean and the last ten are std of the values in the chain for a
|
d416bc607b1b6b5856b57f75defde21bcc05b54d
|
9bd76e78124ab7acc42798efe389358e6c9f4676
|
/old_models/discrete_time_models/SEIRS-discrete-time-gamma.R
|
8e03ac2b7cd326d27b51749cbf4d170c3df3cef7
|
[
"MIT"
] |
permissive
|
tc13/covid-19-immunity
|
68f8f105402d4b931c5ebc167b9eaa2d923bc9a1
|
46458273e2322b2a1384d747a189db821b35111b
|
refs/heads/master
| 2023-07-05T19:03:29.562622
| 2021-08-23T08:33:18
| 2021-08-23T08:33:18
| 258,193,243
| 2
| 3
|
MIT
| 2021-04-28T16:07:58
| 2020-04-23T12:20:46
|
R
|
UTF-8
|
R
| false
| false
| 2,738
|
r
|
SEIRS-discrete-time-gamma.R
|
### Dynamics of SARS-CoV-2 with waning immunity
### Thomas Crellen thomas.crellen@bdi.ox.ac.uk, April 2020
### SEIRS discrete time model, gamma (Erlang) distributed waiting times
#Clear R environment
remove(list = ls())
#libraries
require(testthat)
#parameters
sigma_recip <- 4.5 #Average duration of latent period (days) [From He, X., et al. Temporal dynamics in viral shedding and transmissibility of COVID-19. Nat Med (2020). https://doi.org/10.1038/s41591-020-0869-5]
gamma_recip <- 3.07 #Average duration of infectiousness (days) [From He, X., et al.]
R0 <- 2.8 #Basic reproduction number (in the absense of interventions) [From Petra]
beta <- R0*(1/gamma_recip) #Transmission parameter
omega_recip <- 90 #Average duration of immunity (days) [user specified]
m <- 4 #Shape paremeter, latent period
n <- 2 #Shape parameter, infectious period
o <- 2 #Shape parameter, immune period
dt <- 0.25 #time period
#vectors
pars <- c(beta=beta, sigma=1/sigma_recip, gamma=1/gamma_recip, omega=1/omega_recip, m=m, n=n, o=o)
time <- seq(1, 300, by=dt)
# State variables
yini <- c(S=0.5, E1=0, E2=0, E3=0, E4=0, I1=0.5, I2=0, R1=0, R2=0) #Initial population size
S = E1 = E2 = E3 = E4 = I1 = I2 = R1 = R2 = N = numeric(length(time))
#Pars
beta = pars["beta"]
sigma = pars["sigma"]
gamma = pars["gamma"]
omega = pars["omega"]
m = pars["m"]
n = pars["m"]
o = pars["o"]
S[1] = yini["S"]
E1[1] = yini["E1"]
E2[1] = yini["E2"]
E3[1] = yini["E3"]
E4[1] = yini["E4"]
I1[1] = yini["I1"]
I2[1] = yini["I2"]
R1[1] = yini["R1"]
R2[1] = yini["R2"]
N[1] = S[1] + E1[1] + E2[1] + E3[1] + E4[1] + I1[1] + I2[1] + R1[1] + R2[1]
for(i in 1:(length(time)-1)){
t = time[i]
S[(i+1)] = S[i] + omega*dt*R2[i]*o - beta*dt*S[i]*(I1[i]+I2[i])
E1[(i+1)] = E1[i] + beta*dt*S[i]*(I1[i]+I2[i]) - E1[i]*sigma*dt*m
E2[(i+1)] = E2[i] + sigma*dt*m*(E1[i] - E2[i])
E3[(i+1)] = E3[i] + sigma*dt*m*(E2[i] - E3[i])
E4[(i+1)] = E4[i] + sigma*dt*m*(E3[i] - E4[i])
I1[(i+1)] = I1[i] + E4[i]*sigma*dt*m - I1[i]*gamma*dt*n
I2[(i+1)] = I2[i] + gamma*dt*n*(I1[i]-I2[i])
R1[(i+1)] = R1[i] + I2[i]*gamma*dt*n - omega*dt*R1[i]*o
R2[(i+1)] = R2[i] + omega*dt*o*(R1[i] - R2[i])
#Check population size (N) = 1
N[(i+1)] = S[(i+1)] + E1[(i+1)] + E2[(i+1)] + E3[(i+1)] + E4[(i+1)] + I1[(i+1)] + I2[(i+1)] + R1[(i+1)] + R2[(i+1)]
test_that("Pop size (N) = 1", expect_equal(N[(i+1)], 1))
}
#Store as data.frame
out <- data.frame(time=time, S=S, E=E1+E2+E3+E4, I=I1+I2, R=R1+R2)
#plot
with(out, plot(S~time, type="l"))
with(out, plot(E~time, type="l"))
with(out, plot(I~time, type="l"))
with(out, plot(R~time, type="l"))
|
1c4d60e9aabb4c629b534c4457dec4c0d58fee23
|
6460e7412441a992835418f98673a3b7bd96406f
|
/R/reformat_fun.R
|
844dc693b0718b61294f3e5bb2881c31c0b971b4
|
[] |
no_license
|
TylerGrantSmith/funflow
|
dd1b031405d95b38dd70243a8f417351080d69ff
|
975cbdb3eeb8317a0128d7b1289c013dc8d24d08
|
refs/heads/master
| 2022-11-09T22:12:03.455410
| 2020-06-24T21:13:36
| 2020-06-24T21:13:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,588
|
r
|
reformat_fun.R
|
###################################################
# regularize_comments <- function(fun) {
# env <- environment(fun)
# fun <- deparse(fun)
# #fun <- gsub("(\\s*`#`\\(\")(.*?)\\\"\\)$","\\2", fun)
# fun <- gsub("(\\s*)`#`\\(\"(\\s*)(.*?)\\\"\\)$","\\1\\3", fun)
# fun <- gsub("\\\\n","\n",fun)
# eval(parse(text=paste(fun, collapse = "\n"))[[1]],envir = env)
# }
# unnest_comments <- function(call) {
# if(!is.call(call) || identical(call[[1]], quote(`function`))) {
# return(call)
# }
#
# call0 <- lapply(call, function(x) {
# call_str <- paste(deparse(x), collapse ="\n")
# if(startsWith(call_str, "`#`(")){
# x <- list(extract_comment(x),
# clean_call(x))
# }
# x
# })
# call <- as.call(unlist(call0))
# call[] <- lapply(call, unnest_comments)
# call
# }
#
#
# # helper for unnest_comments
# extract_comment <- function(call){
# if(!is.call(call)) {
# return(NULL)
# }
# if(identical(call[[1]], quote(`#`))){
# return(call[1:2])
# }
# unlist(lapply(call, extract_comment))[[1]]
# }
#
# # helper for unnest_comments
# clean_call <- function(call){
# if(!is.call(call)) {
# return(call)
# }
# if(identical(call[[1]], quote(`#`))){
# return(call[[3]])
# }
# call[] <- lapply(call, clean_call)
# call
# }
#
# is_syntactic <- function(x){
# tryCatch({str2lang(x); TRUE},
# error = function(e) FALSE)
# }
#
# nest_comments <- function(fun, prefix){
# src <- deparse(fun, control = "useSource")
# # positions of comments
# pattern <- paste0("^\\s*", prefix)
# commented_lgl <- grepl(pattern, src)
# # positions of 1st comments of comment blocks
# first_comments_lgl <- diff(c(FALSE, commented_lgl)) == 1
# # ids of comment blocks along the lines
# comment_ids <- cumsum(first_comments_lgl) * commented_lgl
# # positions of 1st lines after comment blocks
# first_lines_lgl <- diff(!c(FALSE, commented_lgl)) == 1
# first_lines_ids <- cumsum(first_lines_lgl) * first_lines_lgl
#
# # we iterate through these ids, taking max from lines so if code ends with a
# # comment it will be ignored
# for(i in seq(max(first_lines_ids))){
# comments <- src[comment_ids == i]
# line_num <- which(first_lines_ids == i)
# line <- src[line_num]
# # we move forward character by character until we get a syntactic replacement
# # the code replacement starts with "`#`(" and we try all positions of 2nd
# # parenthese until something works, then deal with next code block
#
# j <- 0
# repeat {
# break_ <- FALSE
# j <- j+1
# line <- src[line_num]
# if(j == 1) code <- paste0("`#`('", paste(comments,collapse="\n"),"', ") else code[j] <- ""
# for(n_chr in seq(nchar(src[line_num]))){
# code[j] <- paste0(code[j], substr(line, n_chr, n_chr))
# if (n_chr < nchar(line))
# code_last_line <- paste0(code[j],")", substr(line, n_chr+1, nchar(line)))
# else
# code_last_line <- paste0(code[j],")")
# #print(code_last_line)
# src_copy <- src
# src_copy[(line_num-j+1):line_num] <- c(head(code,-1), code_last_line)
# if (is_syntactic(paste(src_copy,collapse="\n"))){
# src <- src_copy
# break_ <- TRUE
# break}
# }
# if(break_ || j == 7) break
# line_num <- line_num + 1
# }
# }
# eval(str2lang(paste(src, collapse = "\n")),envir = environment(fun))
# }
#
#
#
# repair_call <- function(call){
# if(!is.call(call)) {
# return(call)
# }
# # if
# if(call[[1]] == quote(`if`)) {
# if(!is.call(call[[3]]) || call[[3]][[1]] != quote(`{`))
# call[[3]] <- as.call(list(quote(`{`), call[[3]]))
# if(length(call) == 4 && (!is.call(call[[4]]) || call[[4]][[1]] != quote(`{`)))
# call[[4]] <- as.call(list(quote(`{`), call[[4]]))
# call[-1] <- lapply(as.list(call[-1]), repair_call)
# return(call)}
# # for
# if(call[[1]] == quote(`for`)) {
# if(!is.call(call[[4]]) || call[[4]][[1]] != quote(`{`))
# call[[4]] <- as.call(list(quote(`{`), call[[4]]))
# call[-1] <- lapply(as.list(call[-1]), repair_call)
# return(call)}
# # repeat
# if(call[[1]] == quote(`repeat`)) {
# if(!is.call(call[[2]]) || call[[2]][[1]] != quote(`{`))
# call[[2]] <- as.call(list(quote(`{`), call[[2]]))
# call[-1] <- lapply(as.list(call[-1]), repair_call)
# return(call)}
# # while
# if(call[[1]] == quote(`while`)) {
# if(!is.call(call[[3]]) || call[[3]][[1]] != quote(`{`)){
# call[[3]] <- as.call(list(quote(`{`), call[[3]]))
# }
# call[-1] <- lapply(as.list(call[-1]), repair_call)
# return(call)}
# call[] <- lapply(call, repair_call)
# call
# }
# instead of the complicated route we take, we could just substitute
# "^\\s*##(.*)" by "`#`('\\1') but this is not robust to string or ops containing prefix
# It's a lot of work to handle a rare situation but that's the simpler I got
# add_comment_calls0 <- function(fun, prefix = "##"){
# if(is.null(prefix)) return(fun)
# # attach each relevant comment to the following call
# fun <- nest_comments(fun, prefix)
# body(fun) <- repair_call(body(fun))
# body(fun) <- unnest_comments(body(fun))
# fun
# }
add_comment_calls <- function(fun, prefix = "##"){
if(is.null(prefix)) return(fun)
src <- deparse(fun, width.cutoff = 500, control = "useSource")
pattern <- paste0("^\\s*(", prefix, ".*?)$")
src <- gsub(pattern, "`#`(\"\\1\")", src)
src <- paste(src, collapse = "\n")
src <- str2lang(src)
eval(src)
}
|
58d30ce55c7abf71e9d2fccd304864b9cc0e8e14
|
7bed383ed8beabe77bb4286b7b47d49590698b96
|
/explore-SaiMajeti/EDA.R
|
82443dd622b4bed73621a11091a7738c32d53bbb
|
[] |
no_license
|
SaiMajeti/Data-Visualization
|
2c5f0fdb26ddc9f59023fff1a3adb9818d753b6c
|
60f6bd90ba6b0d6a97847811daa7d0d63e37a525
|
refs/heads/main
| 2023-02-05T08:17:58.437687
| 2020-12-25T23:35:57
| 2020-12-25T23:35:57
| 324,441,669
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 16,147
|
r
|
EDA.R
|
library(nycflights13)
library(tidyverse)
library(dplyr)
library(ggplot2)
library(ggpubr)
library(hexbin)
library(ggstance)
#Q1 - Look at the number of cancelled flights per day. Is there a pattern? Is the proportion of cancelled flights related to the average delay?
#part 1- find number of flights scheduled vs number of flights cancelled
flights_per_day <- flights %>%
mutate(cancelled_flights = (is.na(dep_delay) | is.na(arr_delay))) %>%
mutate(date = paste(month, day, year, sep = '-')) %>%
group_by(date) %>%
summarise(numflights = n(),
numcancelled = sum(cancelled_flights)) %>%
ungroup()
#plot number of flights per day vs. number of flights cancelled
ggplot()+
geom_point(flights_per_day, mapping = aes(numflights, numcancelled))
#delay - part 1 - correlation between delay and cancellation
cancelled_vs_delay <- flights %>%
mutate(cancelled_flights = (is.na(dep_delay) | is.na(arr_delay))) %>%
mutate(date = paste(month, day, year, sep = '-')) %>%
group_by(date) %>%
summarise(
avg_cancelled = mean(cancelled_flights),
avg_arr_delay = mean(arr_delay, na.rm = TRUE),
avg_dep_delay = mean(dep_delay, na.rm = TRUE)
) %>%
ungroup()
#delay - part 2 - plot to show correlation between arr_delay/dep_delay and the proportion of cancelled flights
p1 <- ggplot(cancelled_vs_delay) +
geom_point(aes(avg_dep_delay, avg_cancelled))
p2 <- ggplot(cancelled_vs_delay) +
geom_point(aes(avg_arr_delay, avg_cancelled))
p <- ggarrange(p1, p2, nrow = 1)
#final plot
p
#Q2 - Which flight(tailnum) has worst on-time record?
#general def. of on time --> flights which are not late are shown on-time
# point 1 - if we consider overall average delay
flights_ontime <- flights %>%
mutate(date = paste(month, day, year, sep = '-')) %>%
#select flights that have arrival time(means they landed)
filter(!is.na(arr_time)) %>%
select(date, arr_delay, tailnum)%>%
group_by(tailnum) %>%
summarise(num_flights = n(),
avg_arr_delay = mean(arr_delay, na.rm = TRUE)
) %>%
filter(num_flights >= 23) %>%
arrange(desc(avg_arr_delay))
# ans - N203FR 41 59.12195
flights_ontime
#point 2 - if we consider flights(tailnums) that are not late (i.e. avg_arr_delay <= 0)
flights_ontime <- flights %>%
mutate(date = paste(month, day, year, sep = '-')) %>%
#selecting only flights that have arrival time(means they landed)
filter(!is.na(arr_time)) %>%
select(date, arr_delay, tailnum)%>%
filter(!is.na(tailnum))%>%
filter(arr_delay <= 0)%>%
group_by(tailnum) %>%
summarise(num_flights = n(),
avg_arr_delay = mean(arr_delay, na.rm = TRUE)
) %>%
filter(num_flights >= 23) %>%
arrange(avg_arr_delay)
flights_ontime
# ans - N423AS 25 -33.44000
#Q3 - What time of day should you fly if you want to avoid delays as much as possible?
flight_hour <- flights %>%
mutate(date = paste(month, day, year, sep = '-')) %>%
#filtering only the flights that are delayed
filter(!is.na(dep_delay), !is.na(arr_delay)) %>%
select(date, flight, tailnum, hour, minute, dep_delay, arr_delay) %>%
#filtering only the flights that have positive delay
filter(dep_delay > 0 & arr_delay > 0) %>%
#creating time column with hour and minute
mutate(time = paste(hour, minute, sep = ':') ) %>%
group_by(time) %>%
summarise(avg_delay = mean(arr_delay)) %>%
arrange(desc(avg_delay)) %>%
filter(min_rank(desc(avg_delay)) <= 25)
flight_hour
# without considering minutes
flight_hour <- flights %>%
mutate(date = paste(month, day, year, sep = '-')) %>%
#filtering only the flights that are delayed
filter(!is.na(dep_delay), !is.na(arr_delay)) %>%
select(date, flight, tailnum, hour, minute, dep_delay, arr_delay) %>%
#filtering only the flights that have positive delay
filter(dep_delay > 0 & arr_delay > 0) %>%
group_by(hour) %>%
summarise(avg_delay = mean(arr_delay)) %>%
arrange(desc(avg_delay))
flight_hour
# without considering positive delay
flight_hour <- flights %>%
mutate(date = paste(month, day, year, sep = '-')) %>%
#filtering only the flights that are delayed
filter(!is.na(dep_delay), !is.na(arr_delay)) %>%
select(date, flight, tailnum, hour, minute, dep_delay, arr_delay) %>%
group_by(hour) %>%
summarise(avg_delay = mean(arr_delay)) %>%
arrange(desc(avg_delay))
flight_hour
#Q4 - For each destination, compute the total minutes of delay. For each flight, compute the proportion of the total delay for its destination.
total_delay <- flights %>%
mutate(date = paste(month, day, year, sep = '-')) %>%
filter(arr_delay > 0) %>%
group_by(flight, dest) %>%
select(dest, flight, arr_delay)%>%
mutate(total_delay = sum(arr_delay),
delay_prop = arr_delay/total_delay
)
total_delay
#if we consider carriers also:
proportion <- flights %>%
filter(arr_delay > 0) %>%
group_by(dest, carrier, flight) %>%
summarise(total_delay = sum(arr_delay)) %>%
group_by(dest) %>%
mutate(
prop = total_delay / sum(total_delay)
) %>%
arrange(dest, desc(prop))
proportion
#Q5-Explore the distribution of each of the x, y, and z variables in diamonds. What do you learn? Think about a diamond and how you might decide which dimension is the length, width, and depth.
p11 <- ggplot(diamonds) +
geom_histogram(mapping = aes(x), na.rm = TRUE, binwidth = 0.01)+ theme_minimal()
p12 <- ggplot(diamonds, mapping = aes(x = "", y = x))+
geom_boxplot(na.rm = TRUE) +
coord_flip()
p1 <- ggarrange(p11, p12, ncol = 1)
p1
p21 <- ggplot(diamonds)+
geom_histogram(mapping = aes(y), na.rm = TRUE, fill = "lightblue", binwidth = 0.01)+ theme_minimal()
p22 <- ggplot(diamonds, mapping = aes(x = "", y = y))+
geom_boxplot(na.rm = TRUE) +
coord_flip()+
scale_y_continuous(limits =c(0, 60), breaks = seq(0, 60, 5))
p2 <- ggarrange(p21, p22, ncol = 1)
p2
p31 <- ggplot(diamonds)+
geom_histogram(mapping = aes(z), na.rm = TRUE, fill = "lightgreen", binwidth = 0.01) + theme_minimal()
p32 <- ggplot(diamonds, mapping = aes(x = "", y = z))+
geom_boxplot(na.rm = TRUE) +
coord_flip()+
scale_y_continuous(limits =c(0, 40), breaks = seq(0, 40, 5))
p3 <- ggarrange(p31, p32, ncol = 1)
p3
#x - length; y - width; z - depth
summary(select(diamonds, x, y, z))
filter(diamonds, min_rank(desc(x)) <= 3)
filter(diamonds, min_rank(desc(y)) <= 3)
filter(diamonds, min_rank(desc(z)) <= 3)
filter(diamonds, min_rank((x)) <= 5)
filter(diamonds, min_rank((y)) <= 5)
filter(diamonds, min_rank((z)) <= 5)
#reducing the limits
p11 <- ggplot(diamonds) +
geom_histogram(mapping = aes(x), na.rm = TRUE, binwidth = 0.01)+ theme_minimal()+
scale_x_continuous(limits =c(0, 10), breaks = seq(0, 10, 1))
p12 <- ggplot(diamonds, mapping = aes(x = "", y = x))+
geom_boxplot(na.rm = TRUE) +
coord_flip()
p1 <- ggarrange(p11, p12, ncol = 1)
p1
p21 <- ggplot(diamonds)+
geom_histogram(mapping = aes(y), na.rm = TRUE, fill = "lightblue", binwidth = 0.01)+ theme_minimal()+
scale_x_continuous(limits =c(0, 10), breaks = seq(0, 10, 1))
p22 <- ggplot(diamonds, mapping = aes(x = "", y = y))+
geom_boxplot(na.rm = TRUE) +
coord_flip()+
scale_y_continuous(limits =c(0, 10), breaks = seq(0, 10, 2))
p2 <- ggarrange(p21, p22, ncol = 1)
p2
p31 <- ggplot(diamonds)+
geom_histogram(mapping = aes(z), na.rm = TRUE, fill = "lightgreen", binwidth = 0.01) + theme_minimal()+
scale_x_continuous(limits =c(0, 10), breaks = seq(0, 10, 1))
p32 <- ggplot(diamonds, mapping = aes(x = "", y = z))+
geom_boxplot(na.rm = TRUE) +
coord_flip()+
scale_y_continuous(limits =c(0, 10), breaks = seq(0, 10, 2))
p3 <- ggarrange(p31, p32, ncol = 1)
p3
#Q6 - Explore the distribution of price. Do you discover anything unusual or surprising? (Hint: Carefully think about the binwidth and make sure you try a wide range of values.)
summary(select(diamonds, price))
price1 <- ggplot(diamonds)+
geom_histogram(aes(price), na.rm = TRUE, binwidth = 1)+
scale_x_continuous(limits =c(0, 20000), breaks = seq(0, 20000, 2000))
price1
price2 <- ggplot(diamonds)+
geom_histogram(aes(price), na.rm = TRUE, binwidth = 5)+
scale_x_continuous(limits =c(0, 20000), breaks = seq(0, 20000, 2000))
price2
price3 <- ggplot(diamonds)+
geom_histogram(aes(price), na.rm = TRUE, binwidth = 10)+
scale_x_continuous(limits =c(0, 20000), breaks = seq(0, 20000, 2000))
price3
price4 <- ggplot(diamonds)+
geom_histogram(aes(price), na.rm = TRUE, binwidth = 50)+
scale_x_continuous(limits =c(0, 20000), breaks = seq(0, 20000, 2000))
price4
price5 <- ggplot(diamonds)+
geom_histogram(aes(price), na.rm = TRUE, binwidth = 50)+
scale_x_continuous(limits =c(0, 8000), breaks = seq(0, 8000, 2000))
price5
price6 <- ggplot(diamonds)+
geom_histogram(aes(price), na.rm = TRUE, binwidth = 50)+
scale_x_continuous(limits =c(0, 3000), breaks = seq(0, 3000, 300))
price6
#Q7- How many diamonds are 0.99 carat? How many are 1 carat? What do you think is the cause of the difference?
carat <- diamonds %>%
filter(carat == 0.99 | carat == 1) %>%
group_by(carat) %>%
summarise(count_0.99 = n())
carat
carat_price <- diamonds %>%
group_by(carat) %>%
summarise(count = n(),
min_price = min(price),
max_price = max(price),
avg_price = mean(price)
) %>%
arrange(carat)
carat_price
smallset <- carat_price %>%
filter(carat >= 0.9, carat <= 1)
smallset
#Q8 - What variable in the diamonds dataset is most important for predicting the price of a diamond? How is that variable correlated with cut? Why does the combination of those two relationships lead to lower quality diamonds being more expensive?
# Carat of the diamond is measured from the dimensions of the diamond. Hence, the other variables cut, color, clarity along with carat can be considered as the factors that effect the price of the diamond.
#Correlation between each variable and price
#2 continuous variables - scatter/hex/box plots
#1 Carat
price_vs_carat1 <- ggplot(diamonds)+
geom_hex(aes(carat, price))
price_vs_carat1
#hard to see - try boxplot
price_vs_carat2 <- ggplot(diamonds, aes(carat, price))+
geom_boxplot(aes(group = cut_width(carat, 0.1)))
price_vs_carat2
#2 - Cut1
#one cont. one categorical
price_vs_cut <- ggplot(data = diamonds, mapping = aes(x = cut, y = price)) +
geom_boxplot()
price_vs_cut
#freq. poly.
price_vs_cut1 <- ggplot(data = diamonds, mapping = aes(x = price, y = ..density..)) +
geom_freqpoly(mapping = aes(colour = cut), binwidth = 500)
price_vs_cut1
#3 - Color
#categorical and cont.
#boxplot
price_vs_color2 <- ggplot(diamonds)+
geom_boxplot(aes(color, price))
price_vs_color2
#price increasing as the color - quality worsens
#4- Clarity
price_vs_clarity <- ggplot(diamonds)+
geom_boxplot(aes(clarity, price))
price_vs_clarity
#5 - carat vs. cut
# 2 categorical variables
carat_cut <- ggplot(diamonds)+
geom_boxplot(aes(cut, carat))+coord_flip()
carat_cut
#Extracredit
#coord_flip()
carat_cut <- ggplot(diamonds)+
geom_boxplot(aes(cut, carat))+
coord_flip()
carat_cut
#ggstance
carat_cut2 <- ggplot(diamonds)+
geom_boxploth(aes(carat, cut))
carat_cut2
#Q9 - How could you rescale the count dataset above to more clearly show the distribution of cut within colour, or colour within cut?
#given
count_dataset <- diamonds %>%
count(color, cut)
count_dataset
#rescaling from count to proportion/density
# Cut within colour
rescale_cutWcol <- diamonds %>%
count(color, cut)%>%
group_by(cut)%>%
mutate(density = n/sum(n))
rescale_cutWcol
cutWcol <- ggplot(rescale_cutWcol)+
geom_tile(aes(color, cut, fill = density))+
scale_fill_distiller(palette = "RdYlBu", limits = c(0,1))
cutWcol
# Colour within cut
rescale_colWcut <- diamonds %>%
count(color, cut)%>%
group_by(color)%>%
mutate(density = n/sum(n))
rescale_colWcut
colWcut <- ggplot(rescale_colWcut)+
geom_tile(aes(color, cut, fill = density))+
scale_fill_distiller(palette = "RdYlBu", limits = c(0,1))
colWcut
#Q10- Use geom_tile() together with dplyr to explore how average flight delays vary by destination and month of year. What makes the plot difficult to read? How could you improve it?
plot1 <- flights %>%
select(year, month, day, dest, dep_delay, arr_delay)%>%
group_by(month, dest)%>%
summarise(avg_dep_delay = mean(dep_delay, na.rm = TRUE),
avg_arr_delay = mean(arr_delay, na.rm = TRUE))
plot_dep <- ggplot(plot1)+
geom_tile(aes(factor(month), dest, fill = avg_dep_delay))
plot_dep
plot_arr <- ggplot(plot1)+
geom_tile(aes(factor(month), dest, fill = avg_arr_delay))
plot_arr
#improvise
improvise <- flights %>%
select(year, month, dest, dep_delay, arr_delay) %>%
filter(!is.na(arr_delay), !is.na(dep_delay)) %>%
group_by(month, dest)%>%
filter(dep_delay > 0, arr_delay > 0) %>%
summarise(avg.dep.delay = mean(dep_delay, na.rm = TRUE),
avg.arr.delay = mean(arr_delay, na.rm = TRUE)
) %>%
ungroup() %>%
group_by(dest) %>%
filter(n() == 12)
#view(improvise)
p1 <- ggplot(improvise) +
geom_tile(aes(factor(month), dest, fill = avg.arr.delay))+
scale_fill_distiller(palette = "RdYlGn")+
xlab("month") + ylab("destination") +
ggtitle(label = "Arrival Delay")
p1
p2 <- ggplot(improvise) +
geom_tile(aes(factor(month), dest, fill = avg.dep.delay)) +
scale_fill_distiller(palette = "RdYlGn") +
xlab("month") + ylab("destination")
p2
#Q11 - Instead of summarising the conditional distribution with a boxplot, you could use a frequency polygon. What do you need to consider when using cut_width() vs cut_number()? How does that impact a visualisation of the 2d distribution of carat and price?
cutnumber <- ggplot(data = diamonds) +
geom_freqpoly(aes(color = cut_number(carat, 5), x = price)) +
labs(color = "Carat")
cutnumber
cutwidth <- ggplot(diamonds)+
geom_freqpoly(aes(price, color = cut_width(carat, 1.5, boundary = 0))) +
labs(color = "Carat")
cutwidth
#Q12 - Visualise the distribution of carat, partitioned by price.
#Carat, price - 2 cont.
#scatter/box
#too many vars/ can't show distribution using scatterplot
distribution <- ggplot(diamonds)+
geom_boxploth(aes(carat, cut_number(price, 10)))+
labs(y = "Price", x = "Carat")
distribution
#Q13 - How does the price distribution of very large diamonds compare to small diamonds? Is it as you expect, or does it surprise you?
price_distribution <- diamonds %>%
arrange(desc(price)) %>%
select(carat, depth, price, x, y, z)
price_distribution
summary(select(diamonds, price, carat, depth, x, y, z))
#Q14 - Combine two of the techniques you've learned to visualise the combined distribution of cut, carat, and price.
technique_boxplot <- ggplot(diamonds, aes(x = cut_width(carat, 0.8), y = price, colour = cut))+
geom_boxplot()
technique_boxplot
technique_hexbin <- ggplot(diamonds)+
geom_hex(aes(carat, price, fill = cut), alpha = 1/3, na.rm = TRUE)+
scale_y_continuous(limits = c(0, 20000), breaks = seq(0, 20000, 2000))
technique_hexbin
#Q15 - Two dimensional plots reveal outliers that are not visible in one dimensional plots. For example, some points in the plot below have an unusual combination of x and y values, which makes the points outliers even though their x and y values appear normal when examined separately. ... Why is a scatterplot a better display than a binned plot for this case?
binned_boxplot <- ggplot(diamonds)+
geom_boxploth(aes(x, cut_width(y, 1.5)))
binned_boxplot
scatterplot <- ggplot(data = diamonds) +
geom_point(mapping = aes(x = x, y = y)) +
coord_cartesian(xlim = c(4, 11), ylim = c(4, 11))
scatterplot
|
d3e1325a646d460662824bbb60dfcceece4bf45d
|
b65abc4f3b8395a8cd1ad037055f1dfc4693cb94
|
/man/pathway_names.Rd
|
2823a1f7c01de0f2a16262cca6c93deeb4dedaae
|
[] |
no_license
|
cran/TPEA
|
6aeabcb2d4fb9bb493a3804335f37b4474801346
|
a5c1c4273bac35b2ef8ab0ff1f0367ac48b26f70
|
refs/heads/master
| 2021-01-18T17:40:12.141616
| 2017-06-25T14:42:32
| 2017-06-25T14:42:32
| 71,983,452
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 177
|
rd
|
pathway_names.Rd
|
\name{pathway_names}
\alias{pathway_names}
\docType{data}
\title{Pathway names in KEGG Database}
\description{All pathway names we used in this method}
\keyword{datasets}
|
193af8dceebfab081f61feeeb7da0a4de65013ba
|
baebf17a7eb0f8c8d1bbb1eb5ab3256308576f11
|
/man/svgPanZoom.Rd
|
1ce5e2d3957b7281362d317193a7df893f92d8f5
|
[
"BSD-3-Clause",
"MIT"
] |
permissive
|
arturochian/svgPanZoom
|
44fe7ab29bc11c80fbac01f9b9fb096434494858
|
32146c2b2a5c0de0e819906d3f96fe7a6176feab
|
refs/heads/master
| 2020-12-27T15:28:43.668326
| 2015-01-12T23:10:45
| 2015-01-12T23:10:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 296
|
rd
|
svgPanZoom.Rd
|
% Generated by roxygen2 (4.0.2): do not edit by hand
\name{svgPanZoom}
\alias{svgPanZoom}
\title{Pan and Zoom R graphics}
\usage{
svgPanZoom(svg, ..., width = NULL, height = NULL)
}
\description{
Add panning and zooming to almost any R graphics and
hopefully and eventually other htmlwidgets.
}
|
53242397b412e505a0a73978cc84541307b7162a
|
ac42cb637f8a5b15d6dbab9b5b5ccd6470102f17
|
/Codes/concat_and_gapfill_climate_data_from_Kenya.R
|
889ddaf845f8b7f57e074e4972ec21a9bd860b52
|
[] |
no_license
|
rsippy/EVP
|
7e6feff73e60bedba222b94b23266e3875c1eb16
|
3ef3302d7ab7b212aeca0ae1270ff3f66127eebc
|
refs/heads/master
| 2020-06-14T21:10:22.799880
| 2019-06-14T06:30:12
| 2019-06-14T06:30:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,348
|
r
|
concat_and_gapfill_climate_data_from_Kenya.R
|
# concatenate climate data from redcap and gapfill missing logger data --------
rm(list=ls()) #remove previous variable assignments
# load libraries
library(tidyverse)
library(plyr)
# load and format data redcap data ---------------------------------------------------------------------
# import redcap data as 'redcap_clim_vec'
source("Codes/REDCap_import_climate_data.R")
# create site and date variables
redcap_climate$site <- gsub("_arm_1", "", redcap_climate$redcap_event_name)
redcap_climate$Date <- redcap_climate$date_collected
# subset data to weather variables of interest
climate_subset <- redcap_climate[,c("Date", "site", "temp_mean_hobo", "rainfall_hobo", "daily_rainfall", "rh_mean_hobo")]
# load and format NOAA GSOD (Global Summary of the Day) from ------------------------------------------
# https://www7.ncdc.noaa.gov/CDO/cdoselect.cmd?datasetabbv=GSOD&countryabbv&georegionabbv
kisumu_gsod <- read.delim("Kenya/Climate/GSOD_Kisumu.txt", header = TRUE, sep = ",")
mombasa_gsod <- read.delim("Kenya/Climate/GSOD_Mombasa.txt", header = TRUE, sep = ",")
# format temperature and site
gsod.files <- list(kisumu_gsod, mombasa_gsod)
gsod.files <-lapply(gsod.files, function(x) cbind(x, mean_temp_gsod = (x$TEMP-32)*(5/9)))
gsod.files <-lapply(gsod.files, function(x) cbind(x, Date = as.Date(paste(substr(x$YEARMODA, 1, 4), substr(x$YEARMODA, 5, 6), substr(x$YEARMODA, 7, 8), sep="-"), "%Y-%m-%d")))
# split, subset data, and merge data
kisumu_gsod <- gsod.files[[1]][,c("Date", "mean_temp_gsod")]
mombasa_gsod <- gsod.files[[2]][,c("Date", "mean_temp_gsod")]
wide_data <- merge(kisumu_gsod, mombasa_gsod, by="Date", all=T)
colnames(wide_data) <- c("Date", "kisumu_mean_temp_gsod", "mombasa_mean_temp_gsod")
# reshape from long to wide
sites <- unique(climate_subset$site)
for (i in 1:length(sites)){
x <- subset(climate_subset, site == sites[i])
x <- x[, !names(x) %in% c("site")]
colnames(x)[2:5] <- paste(sites[i], colnames(x)[2:5], sep='_')
wide_data <- merge(wide_data, x, by="Date", all=T)
}
# make sure every date is included ------------------------------------------------------------------
minDate <- as.Date('2013-06-01', '%Y-%m-%d')
maxDate <- as.Date('2019-02-11', '%Y-%m-%d')
dates <- as.data.frame(seq.Date(minDate, maxDate, by=1))
colnames(dates)[1] <- "Date"
wide_data <- merge(wide_data, dates, by="Date", all=T)
wide_data <- subset(wide_data, Date >= minDate & Date <= maxDate)
# fill in missing temperature data ------------------------------------------------------------------
# Chulaimbo
fill_ch_w_hosp <- lm(chulaimbo_village_temp_mean_hobo ~ chulaimbo_hospital_temp_mean_hobo, data = wide_data)
fill_ch_w_gsod <- lm(chulaimbo_village_temp_mean_hobo ~ kisumu_mean_temp_gsod, data = wide_data)
wide_data$chulaimbo_Temperature <- ifelse(!is.na(wide_data$chulaimbo_village_temp_mean_hobo), wide_data$chulaimbo_village_temp_mean_hobo, round(coef(fill_ch_w_hosp)[[1]] + coef(fill_ch_w_hosp)[[2]] * wide_data$chulaimbo_hospital_temp_mean_hobo, 1))
wide_data$chulaimbo_Temperature <- ifelse(!is.na(wide_data$chulaimbo_Temperature), wide_data$chulaimbo_Temperature, round(coef(fill_ch_w_gsod)[[1]] + coef(fill_ch_w_gsod)[[2]] * wide_data$kisumu_mean_temp_gsod, 1))
# Kisumu
wide_data$kisumu_estate_temp_mean_hobo <- ifelse(wide_data$kisumu_estate_temp_mean_hobo < 18, NA, wide_data$kisumu_estate_temp_mean_hobo) # remove suspect temperature values
fill_ki_w_obama <- lm(kisumu_estate_temp_mean_hobo ~ obama_temp_mean_hobo, data = wide_data)
fill_ki_w_gsod <- lm(kisumu_estate_temp_mean_hobo ~ kisumu_mean_temp_gsod, data = wide_data)
wide_data$kisumu_Temperature <- ifelse(!is.na(wide_data$kisumu_estate_temp_mean_hobo), wide_data$kisumu_estate_temp_mean_hobo, round(coef(fill_ki_w_obama)[[1]] + coef(fill_ki_w_obama)[[2]] * wide_data$obama_temp_mean_hobo, 1))
wide_data$kisumu_Temperature <- ifelse(!is.na(wide_data$kisumu_Temperature), wide_data$kisumu_Temperature, round(coef(fill_ki_w_gsod)[[1]] + coef(fill_ki_w_gsod)[[2]] * wide_data$kisumu_mean_temp_gsod, 1))
# Msambweni
fill_ms_w_uk <- lm(msambweni_temp_mean_hobo ~ ukunda_temp_mean_hobo, data = wide_data)
fill_ms_w_gsod <- lm(msambweni_temp_mean_hobo ~ mombasa_mean_temp_gsod, data = wide_data)
wide_data$msambweni_Temperature <- ifelse(!is.na(wide_data$msambweni_temp_mean_hobo), wide_data$msambweni_temp_mean_hobo, round(coef(fill_ms_w_uk)[[1]] + coef(fill_ms_w_uk)[[2]] * wide_data$ukunda_temp_mean_hobo, 1))
wide_data$msambweni_Temperature <- ifelse(!is.na(wide_data$msambweni_Temperature), wide_data$msambweni_Temperature, round(coef(fill_ms_w_gsod)[[1]] + coef(fill_ms_w_gsod)[[2]] * wide_data$mombasa_mean_temp_gsod, 1))
# Ukunda
wide_data$ukunda_temp_mean_hobo <- ifelse(wide_data$ukunda_temp_mean_hobo >= 34, NA, wide_data$ukunda_temp_mean_hobo) # remove suspect temperature values
fill_uk_w_gsod <- lm(ukunda_temp_mean_hobo ~ mombasa_mean_temp_gsod, data = wide_data)
wide_data$ukunda_Temperature <- ifelse(!is.na(wide_data$ukunda_temp_mean_hobo), wide_data$ukunda_temp_mean_hobo, round(coef(fill_uk_w_gsod)[[1]] + coef(fill_uk_w_gsod)[[2]] * wide_data$mombasa_mean_temp_gsod, 1))
# fill in missing rainfall data -------------------------------------------------------------------
# calculate 30 days aggregated rainfall values
wide_data$chulaimbo_rainfall_hobo <- wide_data$chulaimbo_village_rainfall_hobo
wide_data$chulaimbo_daily_rainfall <- wide_data$chulaimbo_hospital_daily_rainfall
sites2 <- c("chulaimbo", "obama", "msambweni", "ukunda")
for (j in 1:length(sites2)){
wide_data[paste0("Monthly_rainfall_", sites2[j])] <- NA
wide_data[paste0("Monthly_rainfall_", sites2[j], "_noaa")] <- NA
for (k in 30:nrow(wide_data)){
wide_data[k,paste0("Monthly_rainfall_", sites2[j])] <- sum(wide_data[(k-29):k, paste0(sites2[j], "_rainfall_hobo")])
wide_data[k,paste0("Monthly_rainfall_", sites2[j], "_noaa")] <- sum(wide_data[(k-29):k, paste0(sites2[j], "_daily_rainfall")])
}
}
# Chulaimbo
fill_ch_w_noaa <- lm(Monthly_rainfall_chulaimbo ~ Monthly_rainfall_chulaimbo_noaa, data = wide_data)
wide_data$chulaimbo_Rainfall <- ifelse(!is.na(wide_data$Monthly_rainfall_chulaimbo), wide_data$Monthly_rainfall_chulaimbo, round(coef(fill_ch_w_noaa)[[1]] + coef(fill_ch_w_noaa)[[2]] * wide_data$Monthly_rainfall_chulaimbo_noaa, 1))
# Kisumu
fill_ki_w_noaa <- lm(Monthly_rainfall_obama ~ Monthly_rainfall_obama_noaa, data = wide_data)
wide_data$kisumu_Rainfall <- ifelse(!is.na(wide_data$Monthly_rainfall_obama), wide_data$Monthly_rainfall_obama, round(coef(fill_ki_w_noaa)[[1]] + coef(fill_ki_w_noaa)[[2]] * wide_data$Monthly_rainfall_obama_noaa, 1))
# Msmabweni
fill_ms_w_noaa <- lm(Monthly_rainfall_msambweni ~ Monthly_rainfall_msambweni_noaa, data = wide_data)
wide_data$msambweni_Rainfall <- ifelse(!is.na(wide_data$Monthly_rainfall_msambweni), wide_data$Monthly_rainfall_msambweni, round(coef(fill_ms_w_noaa)[[1]] + coef(fill_ms_w_noaa)[[2]] * wide_data$Monthly_rainfall_msambweni_noaa, 1))
# Ukunda
fill_uk_w_noaa <- lm(Monthly_rainfall_ukunda ~ Monthly_rainfall_ukunda_noaa, data = wide_data)
wide_data$ukunda_Rainfall <- ifelse(!is.na(wide_data$Monthly_rainfall_ukunda), wide_data$Monthly_rainfall_ukunda, round(coef(fill_uk_w_noaa)[[1]] + coef(fill_uk_w_noaa)[[2]] * wide_data$Monthly_rainfall_ukunda_noaa, 1))
# fill in missing humidity data -------------------------------------------------------------------
wide_data$Month_Day <- format(wide_data$Date, "%m-%d")
humidityMeans <- ddply(wide_data, .(Month_Day), summarize
, chulaimbo_rh_ltm = round(mean(chulaimbo_hospital_rh_mean_hobo, na.rm=T), mean(chulaimbo_village_rh_mean_hobo, na.rm=T))
, kisumu_rh_ltm = round(mean(obama_rh_mean_hobo, na.rm=T), mean(kisumu_estate_rh_mean_hobo, na.rm=T))
, msambweni_rh_ltm = round(mean(msambweni_rh_mean_hobo, na.rm=T))
, ukunda_rh_ltm = round(mean(ukunda_rh_mean_hobo, na.rm=T)))
wide_data <- merge(wide_data, humidityMeans, by="Month_Day", all=T)
wide_data$chulaimbo_Humidity <- ifelse(!is.na(wide_data$chulaimbo_village_rh_mean_hobo), wide_data$chulaimbo_village_rh_mean_hobo, wide_data$chulaimbo_rh_ltm)
wide_data$kisumu_Humidity <- ifelse(!is.na(wide_data$obama_rh_mean_hobo), wide_data$obama_rh_mean_hobo, wide_data$kisumu_rh_ltm)
wide_data$msambweni_Humidity <- ifelse(!is.na(wide_data$msambweni_rh_mean_hobo), wide_data$msambweni_rh_mean_hobo, wide_data$msambweni_rh_ltm)
wide_data$ukunda_Humidity <- ifelse(!is.na(wide_data$ukunda_rh_mean_hobo), wide_data$ukunda_rh_mean_hobo, wide_data$ukunda_rh_ltm)
# calculate 30 day aggregated rainfall values -----------------------------------------------------
wide_data <- wide_data[order(wide_data$Date),]
sites2 <- c("chulaimbo", "kisumu", "msambweni", "ukunda")
for (j in 1:length(sites2)){
weatherdf <- wide_data[, c("Date", paste0(sites2[j], "_Temperature"), paste0(sites2[j], "_Rainfall"), paste0(sites2[j], "_Humidity"))]
weatherdf$Monthly_rainfall <- NA
weatherdf$Monthly_rainfall_weighted <- NA
weatherdf$Monthly_rainy_days_25 <- NA
for (k in 30:nrow(weatherdf)){
rainSub <- subset(weatherdf, Date >= Date[k] - 29 & Date <= Date[k])
rainSub$exDec <- 30:1
rainSub$exDec <- rainSub[,paste0(sites2[j], "_Rainfall")] * (1/rainSub$exDec)
weatherdf$Monthly_rainfall[k] <- sum(rainSub[paste0(sites2[j], "_Rainfall")])
weatherdf$Monthly_rainfall_weighted[k] <- sum(rainSub$exDec)
weatherdf$Monthly_rainy_days_25[k] <- sum(rainSub[paste0(sites2[j], "_Rainfall")] > 2.5)
if (is.na(weatherdf[k,paste0(sites2[j], "_Temperature")])){
# fill in the few missing temperature days with the mean of the 2 days before and after date with missing data
weatherdf[k,paste0(sites2[j], "_Temperature")] <- mean(weatherdf[(k-2):(k+2),paste0(sites2[j], "_Temperature")], na.rm=T)
}
}
colnames(weatherdf) <- gsub(paste0(sites2[j], "_"), "", colnames(weatherdf))
weatherdf$Site <- paste0(toupper(substr(sites2[j],1,1)), substr(sites2[j],2,nchar(sites2[j])))
weatherdf <- weatherdf[30:nrow(weatherdf),]
assign(sites2[j], weatherdf)
}
# merge data into long format and save -----------------------------------------------------------
weatherdf <- do.call(rbind, list(chulaimbo, kisumu, msambweni, ukunda))
write.csv(weatherdf, "Concatenated_Data/climate_data/gapfilled_climate_data_Kenya.csv", row.names = F)
|
2dff0b0c6e8ad009199b1705ddf8e45fe66ab960
|
922fe381b8ea87a77dd6011fb875d0c13dee325e
|
/cum_flux_summaries_GWP.R
|
5d1d61461dbf8147ce76404b25604b40ae970d6b
|
[] |
no_license
|
pz10/all_incubations
|
94e36a596c716a8eee00b4fab4da9bb3c34ee86f
|
75dd3f03709652cf4ccf50e5516f1c4ee074474a
|
refs/heads/master
| 2021-01-19T00:43:38.271134
| 2017-04-04T15:40:37
| 2017-04-04T15:40:37
| 87,204,294
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 19,864
|
r
|
cum_flux_summaries_GWP.R
|
mydata <- copy(data)
# calcultate GWP CO2 eq
mydata[, CH4.4replicates:= CH4]
mydata[is.na(CH4.4replicates), CH4.4replicates:= 0]
gwpCH4 <- 25
gwpN2O <- 298
mydata[,GWP.CO2:= CO2]
mydata[,GWP.CH4:= CH4.4replicates/1000*16/12 * gwpCH4 * 12/44]
mydata[,GWP.N2O:= N2O/1000*44/28 * gwpN2O * 12/44]
mydata[,GWP.all:= GWP.CO2 + GWP.CH4 + GWP.N2O]
mydata[,GWP.CO2eq.mgCo2m2h:= GWP.all / 14/3/24 * 1000 * 44/12]
# summaries
s.total <- mydata[,.(
NO = mean(NO, na.rm=T),
NO.se = sd(NO, na.rm=T)/sqrt(sum(!is.na(NO))),
N2O = mean(N2O, na.rm=T),
N2O.se = sd(N2O, na.rm=T)/sqrt(sum(!is.na(N2O))),
NH3 = mean(NH3, na.rm=T),
NH3.se = sd(NH3, na.rm=T)/sqrt(sum(!is.na(NH3))),
CO2 = mean(CO2, na.rm=T),
CO2.se = sd(CO2, na.rm=T)/sqrt(sum(!is.na(CO2))),
CH4 = mean(CH4, na.rm=T),
CH4.se = sd(CH4, na.rm=T)/sqrt(sum(!is.na(CH4))),
NO.N2O = mean(NO.N2O, na.rm=T),
NO.N2O.se = sd(NO.N2O, na.rm=T)/sqrt(sum(!is.na(NO.N2O))),
NO.N2O.NH3 = mean(NO.N2O.NH3, na.rm=T),
NO.N2O.NH3.se = sd(NO.N2O.NH3, na.rm=T)/sqrt(sum(!is.na(NO.N2O.NH3))),
NO.N2O.NH3filled = mean(NO.N2O.NH3filled, na.rm=T),
NO.N2O.NH3filled.se = sd(NO.N2O.NH3filled, na.rm=T)/sqrt(sum(!is.na(NO.N2O.NH3filled))),
GWP.CO2 = mean(GWP.CO2, na.rm=T),
GWP.CO2.se = sd(GWP.CO2, na.rm=T)/sqrt(sum(!is.na(GWP.CO2))),
GWP.CH4 = mean(GWP.CH4, na.rm=T),
GWP.CH4.se = sd(GWP.CH4, na.rm=T)/sqrt(sum(!is.na(GWP.CH4))),
GWP.N2O = mean(GWP.N2O, na.rm=T),
GWP.N2O.se = sd(GWP.N2O, na.rm=T)/sqrt(sum(!is.na(GWP.N2O))),
GWP.all = mean(GWP.all, na.rm=T),
GWP.all.se = sd(GWP.all, na.rm=T)/sqrt(sum(!is.na(GWP.all))),
GWP.CO2eq.mgCo2m2h = mean(GWP.CO2eq.mgCo2m2h, na.rm=T),
GWP.CO2eq.mgCo2m2h.se = sd(GWP.CO2eq.mgCo2m2h, na.rm=T)/sqrt(sum(!is.na(GWP.CO2eq.mgCo2m2h)))
)]
s.core <- mydata[,.(
NO = NO,
N2O = N2O,
NH3 = NH3,
CO2 = CO2,
CH4 = CH4,
NO.N2O = NO.N2O,
NO.N2O.NH3 = NO.N2O.NH3,
NO.N2O.NH3filled = NO.N2O.NH3filled,
GWP.CO2 = GWP.CO2,
GWP.CH4 = GWP.CH4,
GWP.N2O = GWP.N2O,
GWP.all = GWP.all,
GWP.CO2eq.mgCo2m2h = GWP.CO2eq.mgCo2m2h
), by=.(fertilization, precipitation, tillage, incubation, treatment)]
s.treat <- mydata[,.(
NO = mean(NO, na.rm=T),
NO.se = sd(NO, na.rm=T)/sqrt(sum(!is.na(NO))),
N2O = mean(N2O, na.rm=T),
N2O.se = sd(N2O, na.rm=T)/sqrt(sum(!is.na(N2O))),
NH3 = mean(NH3, na.rm=T),
NH3.se = sd(NH3, na.rm=T)/sqrt(sum(!is.na(NH3))),
CO2 = mean(CO2, na.rm=T),
CO2.se = sd(CO2, na.rm=T)/sqrt(sum(!is.na(CO2))),
CH4 = mean(CH4, na.rm=T),
CH4.se = sd(CH4, na.rm=T)/sqrt(sum(!is.na(CH4))),
NO.N2O = mean(NO.N2O, na.rm=T),
NO.N2O.se = sd(NO.N2O, na.rm=T)/sqrt(sum(!is.na(NO.N2O))),
NO.N2O.NH3 = mean(NO.N2O.NH3, na.rm=T),
NO.N2O.NH3.se = sd(NO.N2O.NH3, na.rm=T)/sqrt(sum(!is.na(NO.N2O.NH3))),
NO.N2O.NH3filled = mean(NO.N2O.NH3filled, na.rm=T),
NO.N2O.NH3filled.se = sd(NO.N2O.NH3filled, na.rm=T)/sqrt(sum(!is.na(NO.N2O.NH3filled))),
GWP.CO2 = mean(GWP.CO2, na.rm=T),
GWP.CO2.se = sd(GWP.CO2, na.rm=T)/sqrt(sum(!is.na(GWP.CO2))),
GWP.CH4 = mean(GWP.CH4, na.rm=T),
GWP.CH4.se = sd(GWP.CH4, na.rm=T)/sqrt(sum(!is.na(GWP.CH4))),
GWP.N2O = mean(GWP.N2O, na.rm=T),
GWP.N2O.se = sd(GWP.N2O, na.rm=T)/sqrt(sum(!is.na(GWP.N2O))),
GWP.all = mean(GWP.all, na.rm=T),
GWP.all.se = sd(GWP.all, na.rm=T)/sqrt(sum(!is.na(GWP.all))),
GWP.CO2eq.mgCo2m2h = mean(GWP.CO2eq.mgCo2m2h, na.rm=T),
GWP.CO2eq.mgCo2m2h.se = sd(GWP.CO2eq.mgCo2m2h, na.rm=T)/sqrt(sum(!is.na(GWP.CO2eq.mgCo2m2h)))
), by=treatment]
s.till <- mydata[,.(
NO = mean(NO, na.rm=T),
NO.se = sd(NO, na.rm=T)/sqrt(sum(!is.na(NO))),
N2O = mean(N2O, na.rm=T),
N2O.se = sd(N2O, na.rm=T)/sqrt(sum(!is.na(N2O))),
NH3 = mean(NH3, na.rm=T),
NH3.se = sd(NH3, na.rm=T)/sqrt(sum(!is.na(NH3))),
CO2 = mean(CO2, na.rm=T),
CO2.se = sd(CO2, na.rm=T)/sqrt(sum(!is.na(CO2))),
CH4 = mean(CH4, na.rm=T),
CH4.se = sd(CH4, na.rm=T)/sqrt(sum(!is.na(CH4))),
NO.N2O = mean(NO.N2O, na.rm=T),
NO.N2O.se = sd(NO.N2O, na.rm=T)/sqrt(sum(!is.na(NO.N2O))),
NO.N2O.NH3 = mean(NO.N2O.NH3, na.rm=T),
NO.N2O.NH3.se = sd(NO.N2O.NH3, na.rm=T)/sqrt(sum(!is.na(NO.N2O.NH3))),
NO.N2O.NH3filled = mean(NO.N2O.NH3filled, na.rm=T),
NO.N2O.NH3filled.se = sd(NO.N2O.NH3filled, na.rm=T)/sqrt(sum(!is.na(NO.N2O.NH3filled))),
GWP.CO2 = mean(GWP.CO2, na.rm=T),
GWP.CO2.se = sd(GWP.CO2, na.rm=T)/sqrt(sum(!is.na(GWP.CO2))),
GWP.CH4 = mean(GWP.CH4, na.rm=T),
GWP.CH4.se = sd(GWP.CH4, na.rm=T)/sqrt(sum(!is.na(GWP.CH4))),
GWP.N2O = mean(GWP.N2O, na.rm=T),
GWP.N2O.se = sd(GWP.N2O, na.rm=T)/sqrt(sum(!is.na(GWP.N2O))),
GWP.all = mean(GWP.all, na.rm=T),
GWP.all.se = sd(GWP.all, na.rm=T)/sqrt(sum(!is.na(GWP.all))),
GWP.CO2eq.mgCo2m2h = mean(GWP.CO2eq.mgCo2m2h, na.rm=T),
GWP.CO2eq.mgCo2m2h.se = sd(GWP.CO2eq.mgCo2m2h, na.rm=T)/sqrt(sum(!is.na(GWP.CO2eq.mgCo2m2h)))
), by=tillage]
s.fert <- mydata[,.(
NO = mean(NO, na.rm=T),
NO.se = sd(NO, na.rm=T)/sqrt(sum(!is.na(NO))),
N2O = mean(N2O, na.rm=T),
N2O.se = sd(N2O, na.rm=T)/sqrt(sum(!is.na(N2O))),
NH3 = mean(NH3, na.rm=T),
NH3.se = sd(NH3, na.rm=T)/sqrt(sum(!is.na(NH3))),
CO2 = mean(CO2, na.rm=T),
CO2.se = sd(CO2, na.rm=T)/sqrt(sum(!is.na(CO2))),
CH4 = mean(CH4, na.rm=T),
CH4.se = sd(CH4, na.rm=T)/sqrt(sum(!is.na(CH4))),
NO.N2O = mean(NO.N2O, na.rm=T),
NO.N2O.se = sd(NO.N2O, na.rm=T)/sqrt(sum(!is.na(NO.N2O))),
NO.N2O.NH3 = mean(NO.N2O.NH3, na.rm=T),
NO.N2O.NH3.se = sd(NO.N2O.NH3, na.rm=T)/sqrt(sum(!is.na(NO.N2O.NH3))),
NO.N2O.NH3filled = mean(NO.N2O.NH3filled, na.rm=T),
NO.N2O.NH3filled.se = sd(NO.N2O.NH3filled, na.rm=T)/sqrt(sum(!is.na(NO.N2O.NH3filled))),
GWP.CO2 = mean(GWP.CO2, na.rm=T),
GWP.CO2.se = sd(GWP.CO2, na.rm=T)/sqrt(sum(!is.na(GWP.CO2))),
GWP.CH4 = mean(GWP.CH4, na.rm=T),
GWP.CH4.se = sd(GWP.CH4, na.rm=T)/sqrt(sum(!is.na(GWP.CH4))),
GWP.N2O = mean(GWP.N2O, na.rm=T),
GWP.N2O.se = sd(GWP.N2O, na.rm=T)/sqrt(sum(!is.na(GWP.N2O))),
GWP.all = mean(GWP.all, na.rm=T),
GWP.all.se = sd(GWP.all, na.rm=T)/sqrt(sum(!is.na(GWP.all))),
GWP.CO2eq.mgCo2m2h = mean(GWP.CO2eq.mgCo2m2h, na.rm=T),
GWP.CO2eq.mgCo2m2h.se = sd(GWP.CO2eq.mgCo2m2h, na.rm=T)/sqrt(sum(!is.na(GWP.CO2eq.mgCo2m2h)))
), by=fertilization]
s.rain <- mydata[,.(
NO = mean(NO, na.rm=T),
NO.se = sd(NO, na.rm=T)/sqrt(sum(!is.na(NO))),
N2O = mean(N2O, na.rm=T),
N2O.se = sd(N2O, na.rm=T)/sqrt(sum(!is.na(N2O))),
NH3 = mean(NH3, na.rm=T),
NH3.se = sd(NH3, na.rm=T)/sqrt(sum(!is.na(NH3))),
CO2 = mean(CO2, na.rm=T),
CO2.se = sd(CO2, na.rm=T)/sqrt(sum(!is.na(CO2))),
CH4 = mean(CH4, na.rm=T),
CH4.se = sd(CH4, na.rm=T)/sqrt(sum(!is.na(CH4))),
NO.N2O = mean(NO.N2O, na.rm=T),
NO.N2O.se = sd(NO.N2O, na.rm=T)/sqrt(sum(!is.na(NO.N2O))),
NO.N2O.NH3 = mean(NO.N2O.NH3, na.rm=T),
NO.N2O.NH3.se = sd(NO.N2O.NH3, na.rm=T)/sqrt(sum(!is.na(NO.N2O.NH3))),
NO.N2O.NH3filled = mean(NO.N2O.NH3filled, na.rm=T),
NO.N2O.NH3filled.se = sd(NO.N2O.NH3filled, na.rm=T)/sqrt(sum(!is.na(NO.N2O.NH3filled))),
GWP.CO2 = mean(GWP.CO2, na.rm=T),
GWP.CO2.se = sd(GWP.CO2, na.rm=T)/sqrt(sum(!is.na(GWP.CO2))),
GWP.CH4 = mean(GWP.CH4, na.rm=T),
GWP.CH4.se = sd(GWP.CH4, na.rm=T)/sqrt(sum(!is.na(GWP.CH4))),
GWP.N2O = mean(GWP.N2O, na.rm=T),
GWP.N2O.se = sd(GWP.N2O, na.rm=T)/sqrt(sum(!is.na(GWP.N2O))),
GWP.all = mean(GWP.all, na.rm=T),
GWP.all.se = sd(GWP.all, na.rm=T)/sqrt(sum(!is.na(GWP.all))),
GWP.CO2eq.mgCo2m2h = mean(GWP.CO2eq.mgCo2m2h, na.rm=T),
GWP.CO2eq.mgCo2m2h.se = sd(GWP.CO2eq.mgCo2m2h, na.rm=T)/sqrt(sum(!is.na(GWP.CO2eq.mgCo2m2h)))
), by=precipitation]
s.fert.till <- mydata[,.(
NO = mean(NO, na.rm=T),
NO.se = sd(NO, na.rm=T)/sqrt(sum(!is.na(NO))),
N2O = mean(N2O, na.rm=T),
N2O.se = sd(N2O, na.rm=T)/sqrt(sum(!is.na(N2O))),
NH3 = mean(NH3, na.rm=T),
NH3.se = sd(NH3, na.rm=T)/sqrt(sum(!is.na(NH3))),
CO2 = mean(CO2, na.rm=T),
CO2.se = sd(CO2, na.rm=T)/sqrt(sum(!is.na(CO2))),
CH4 = mean(CH4, na.rm=T),
CH4.se = sd(CH4, na.rm=T)/sqrt(sum(!is.na(CH4))),
NO.N2O = mean(NO.N2O, na.rm=T),
NO.N2O.se = sd(NO.N2O, na.rm=T)/sqrt(sum(!is.na(NO.N2O))),
NO.N2O.NH3 = mean(NO.N2O.NH3, na.rm=T),
NO.N2O.NH3.se = sd(NO.N2O.NH3, na.rm=T)/sqrt(sum(!is.na(NO.N2O.NH3))),
NO.N2O.NH3filled = mean(NO.N2O.NH3filled, na.rm=T),
NO.N2O.NH3filled.se = sd(NO.N2O.NH3filled, na.rm=T)/sqrt(sum(!is.na(NO.N2O.NH3filled))),
GWP.CO2 = mean(GWP.CO2, na.rm=T),
GWP.CO2.se = sd(GWP.CO2, na.rm=T)/sqrt(sum(!is.na(GWP.CO2))),
GWP.CH4 = mean(GWP.CH4, na.rm=T),
GWP.CH4.se = sd(GWP.CH4, na.rm=T)/sqrt(sum(!is.na(GWP.CH4))),
GWP.N2O = mean(GWP.N2O, na.rm=T),
GWP.N2O.se = sd(GWP.N2O, na.rm=T)/sqrt(sum(!is.na(GWP.N2O))),
GWP.all = mean(GWP.all, na.rm=T),
GWP.all.se = sd(GWP.all, na.rm=T)/sqrt(sum(!is.na(GWP.all))),
GWP.CO2eq.mgCo2m2h = mean(GWP.CO2eq.mgCo2m2h, na.rm=T),
GWP.CO2eq.mgCo2m2h.se = sd(GWP.CO2eq.mgCo2m2h, na.rm=T)/sqrt(sum(!is.na(GWP.CO2eq.mgCo2m2h)))
), by=.(fertilization, tillage)]
s.fert.rain <- mydata[,.(
NO = mean(NO, na.rm=T),
NO.se = sd(NO, na.rm=T)/sqrt(sum(!is.na(NO))),
N2O = mean(N2O, na.rm=T),
N2O.se = sd(N2O, na.rm=T)/sqrt(sum(!is.na(N2O))),
NH3 = mean(NH3, na.rm=T),
NH3.se = sd(NH3, na.rm=T)/sqrt(sum(!is.na(NH3))),
CO2 = mean(CO2, na.rm=T),
CO2.se = sd(CO2, na.rm=T)/sqrt(sum(!is.na(CO2))),
CH4 = mean(CH4, na.rm=T),
CH4.se = sd(CH4, na.rm=T)/sqrt(sum(!is.na(CH4))),
NO.N2O = mean(NO.N2O, na.rm=T),
NO.N2O.se = sd(NO.N2O, na.rm=T)/sqrt(sum(!is.na(NO.N2O))),
NO.N2O.NH3 = mean(NO.N2O.NH3, na.rm=T),
NO.N2O.NH3.se = sd(NO.N2O.NH3, na.rm=T)/sqrt(sum(!is.na(NO.N2O.NH3))),
NO.N2O.NH3filled = mean(NO.N2O.NH3filled, na.rm=T),
NO.N2O.NH3filled.se = sd(NO.N2O.NH3filled, na.rm=T)/sqrt(sum(!is.na(NO.N2O.NH3filled))),
GWP.CO2 = mean(GWP.CO2, na.rm=T),
GWP.CO2.se = sd(GWP.CO2, na.rm=T)/sqrt(sum(!is.na(GWP.CO2))),
GWP.CH4 = mean(GWP.CH4, na.rm=T),
GWP.CH4.se = sd(GWP.CH4, na.rm=T)/sqrt(sum(!is.na(GWP.CH4))),
GWP.N2O = mean(GWP.N2O, na.rm=T),
GWP.N2O.se = sd(GWP.N2O, na.rm=T)/sqrt(sum(!is.na(GWP.N2O))),
GWP.all = mean(GWP.all, na.rm=T),
GWP.all.se = sd(GWP.all, na.rm=T)/sqrt(sum(!is.na(GWP.all))),
GWP.CO2eq.mgCo2m2h = mean(GWP.CO2eq.mgCo2m2h, na.rm=T),
GWP.CO2eq.mgCo2m2h.se = sd(GWP.CO2eq.mgCo2m2h, na.rm=T)/sqrt(sum(!is.na(GWP.CO2eq.mgCo2m2h)))
), by=.(fertilization, precipitation)]
s.till.rain <- mydata[,.(
NO = mean(NO, na.rm=T),
NO.se = sd(NO, na.rm=T)/sqrt(sum(!is.na(NO))),
N2O = mean(N2O, na.rm=T),
N2O.se = sd(N2O, na.rm=T)/sqrt(sum(!is.na(N2O))),
NH3 = mean(NH3, na.rm=T),
NH3.se = sd(NH3, na.rm=T)/sqrt(sum(!is.na(NH3))),
CO2 = mean(CO2, na.rm=T),
CO2.se = sd(CO2, na.rm=T)/sqrt(sum(!is.na(CO2))),
CH4 = mean(CH4, na.rm=T),
CH4.se = sd(CH4, na.rm=T)/sqrt(sum(!is.na(CH4))),
NO.N2O = mean(NO.N2O, na.rm=T),
NO.N2O.se = sd(NO.N2O, na.rm=T)/sqrt(sum(!is.na(NO.N2O))),
NO.N2O.NH3 = mean(NO.N2O.NH3, na.rm=T),
NO.N2O.NH3.se = sd(NO.N2O.NH3, na.rm=T)/sqrt(sum(!is.na(NO.N2O.NH3))),
NO.N2O.NH3filled = mean(NO.N2O.NH3filled, na.rm=T),
NO.N2O.NH3filled.se = sd(NO.N2O.NH3filled, na.rm=T)/sqrt(sum(!is.na(NO.N2O.NH3filled))),
GWP.CO2 = mean(GWP.CO2, na.rm=T),
GWP.CO2.se = sd(GWP.CO2, na.rm=T)/sqrt(sum(!is.na(GWP.CO2))),
GWP.CH4 = mean(GWP.CH4, na.rm=T),
GWP.CH4.se = sd(GWP.CH4, na.rm=T)/sqrt(sum(!is.na(GWP.CH4))),
GWP.N2O = mean(GWP.N2O, na.rm=T),
GWP.N2O.se = sd(GWP.N2O, na.rm=T)/sqrt(sum(!is.na(GWP.N2O))),
GWP.all = mean(GWP.all, na.rm=T),
GWP.all.se = sd(GWP.all, na.rm=T)/sqrt(sum(!is.na(GWP.all))),
GWP.CO2eq.mgCo2m2h = mean(GWP.CO2eq.mgCo2m2h, na.rm=T),
GWP.CO2eq.mgCo2m2h.se = sd(GWP.CO2eq.mgCo2m2h, na.rm=T)/sqrt(sum(!is.na(GWP.CO2eq.mgCo2m2h)))
), by=.(tillage, precipitation)]
################################################################################
# write.summaries
mydata <- copy (s.core)
no.format <- names(mydata)[names(mydata) %in% c("incabation", "treatment", "fertilization", "precipitation", "tillage")]
to.format4 <- names(mydata)[! names(mydata) %in% c(no.format)]
mydata[,(to.format4):= lapply(.SD, function(x) formatC(x, format = "f", digits = 4)), .SDcols = to.format4]
myfile <- paste0(folder.GWP, "/GWPcumFlux_bycore.dat")
write.table(mydata, file= myfile, row.names = FALSE, sep = "\t", quote = FALSE)
mydata <- copy (s.treat)
to.format2<- c(grep(".se",names(mydata), value=T))
no.format <- names(mydata)[names(mydata) %in% c("treatment", "fertilization", "precipitation", "tillage")]
to.format4 <- names(mydata)[! names(mydata) %in% c(to.format2, no.format)]
mydata[,(to.format2):= lapply(.SD, function(x) formatC(x, format = "f", digits = 2)), .SDcols = to.format2]
mydata[,(to.format4):= lapply(.SD, function(x) formatC(x, format = "f", digits = 4)), .SDcols = to.format4]
myfile <- paste0(folder.GWP, "/GWPcumFlux_bytreat.dat")
write.table(mydata, file= myfile, row.names = FALSE, sep = "\t", quote = FALSE)
mydata <- copy (s.till)
to.format2<- c(grep(".se",names(mydata), value=T))
no.format <- names(mydata)[names(mydata) %in% c("treatment", "fertilization", "precipitation", "tillage")]
to.format4 <- names(mydata)[! names(mydata) %in% c(to.format2, no.format)]
mydata[,(to.format2):= lapply(.SD, function(x) formatC(x, format = "f", digits = 2)), .SDcols = to.format2]
mydata[,(to.format4):= lapply(.SD, function(x) formatC(x, format = "f", digits = 4)), .SDcols = to.format4]
myfile <- paste0(folder.GWP, "/GWPcumFlux_bytillage.dat")
write.table(mydata, file= myfile, row.names = FALSE, sep = "\t", quote = FALSE)
mydata <- copy (s.fert)
to.format2<- c(grep(".se",names(mydata), value=T))
no.format <- names(mydata)[names(mydata) %in% c("treatment", "fertilization", "precipitation", "tillage")]
to.format4 <- names(mydata)[! names(mydata) %in% c(to.format2, no.format)]
mydata[,(to.format2):= lapply(.SD, function(x) formatC(x, format = "f", digits = 2)), .SDcols = to.format2]
mydata[,(to.format4):= lapply(.SD, function(x) formatC(x, format = "f", digits = 4)), .SDcols = to.format4]
myfile <- paste0(folder.GWP, "/GWPcumFlux_byfert.dat")
write.table(mydata, file= myfile, row.names = FALSE, sep = "\t", quote = FALSE)
mydata <- copy (s.rain)
to.format2<- c(grep(".se",names(mydata), value=T))
no.format <- names(mydata)[names(mydata) %in% c("treatment", "fertilization", "precipitation", "tillage")]
to.format4 <- names(mydata)[! names(mydata) %in% c(to.format2, no.format)]
mydata[,(to.format2):= lapply(.SD, function(x) formatC(x, format = "f", digits = 2)), .SDcols = to.format2]
mydata[,(to.format4):= lapply(.SD, function(x) formatC(x, format = "f", digits = 4)), .SDcols = to.format4]
myfile <- paste0(folder.GWP, "/GWPcumFlux_byrain.dat")
write.table(mydata, file= myfile, row.names = FALSE, sep = "\t", quote = FALSE)
mydata <- copy (s.fert.till)
to.format2<- c(grep(".se",names(mydata), value=T))
no.format <- names(mydata)[names(mydata) %in% c("treatment", "fertilization", "precipitation", "tillage")]
to.format4 <- names(mydata)[! names(mydata) %in% c(to.format2, no.format)]
mydata[,(to.format2):= lapply(.SD, function(x) formatC(x, format = "f", digits = 2)), .SDcols = to.format2]
mydata[,(to.format4):= lapply(.SD, function(x) formatC(x, format = "f", digits = 4)), .SDcols = to.format4]
myfile <- paste0(folder.GWP, "/GWPcumFlux_by_fert_till.dat")
write.table(mydata, file= myfile, row.names = FALSE, sep = "\t", quote = FALSE)
mydata <- copy (s.fert.rain)
to.format2<- c(grep(".se",names(mydata), value=T))
no.format <- names(mydata)[names(mydata) %in% c("treatment", "fertilization", "precipitation", "tillage")]
to.format4 <- names(mydata)[! names(mydata) %in% c(to.format2, no.format)]
mydata[,(to.format2):= lapply(.SD, function(x) formatC(x, format = "f", digits = 2)), .SDcols = to.format2]
mydata[,(to.format4):= lapply(.SD, function(x) formatC(x, format = "f", digits = 4)), .SDcols = to.format4]
myfile <- paste0(folder.GWP, "/GWPcumFlux_by_fert_rain.dat")
write.table(mydata, file= myfile, row.names = FALSE, sep = "\t", quote = FALSE)
mydata <- copy (s.till.rain)
to.format2<- c(grep(".se",names(mydata), value=T))
no.format <- names(mydata)[names(mydata) %in% c("treatment", "fertilization", "precipitation", "tillage")]
to.format4 <- names(mydata)[! names(mydata) %in% c(to.format2, no.format)]
mydata[,(to.format2):= lapply(.SD, function(x) formatC(x, format = "f", digits = 2)), .SDcols = to.format2]
mydata[,(to.format4):= lapply(.SD, function(x) formatC(x, format = "f", digits = 4)), .SDcols = to.format4]
myfile <- paste0(folder.GWP, "/GWPcumFlux_by_till_rain.dat")
write.table(mydata, file= myfile, row.names = FALSE, sep = "\t", quote = FALSE)
mydata <- copy (s.total)
to.format2<- c(grep(".se",names(mydata), value=T))
no.format <- names(mydata)[names(mydata) %in% c("treatment", "fertilization", "precipitation", "tillage")]
to.format4 <- names(mydata)[! names(mydata) %in% c(to.format2, no.format)]
mydata[,(to.format2):= lapply(.SD, function(x) formatC(x, format = "f", digits = 2)), .SDcols = to.format2]
mydata[,(to.format4):= lapply(.SD, function(x) formatC(x, format = "f", digits = 4)), .SDcols = to.format4]
myfile <- paste0(folder.GWP, "/GWPcumFlux.dat")
write.table(mydata, file= myfile, row.names = FALSE, sep = "\t", quote = FALSE)
###
rm(mydata)
|
2dc368d4fd8fad526bffb0eb9521f5f81ea4469f
|
1a50b4f1ec326c3c876071f7455b623abf5e84c3
|
/man/summary.binomscreenr.Rd
|
024e02b265c729a1f4e1a18af8b485d835c0dcbd
|
[] |
no_license
|
larajiuk/screenr
|
671d3201c8a6d2d269d4c236afef7558cc64e7ab
|
d5dc80934258f3f68350c0318ccf31d4617e952f
|
refs/heads/master
| 2022-10-20T16:24:50.804955
| 2020-06-23T19:56:52
| 2020-06-23T19:56:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 664
|
rd
|
summary.binomscreenr.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/binomialScreening.R
\name{summary.binomscreenr}
\alias{summary.binomscreenr}
\title{Print Summaries of \code{binomscreenr} Objects}
\usage{
\method{summary}{binomscreenr}(object, diagnostics = FALSE, ...)
}
\arguments{
\item{object}{an object of class \code{binomscreenr} produced by function
\code{binomialScreening}.}
\item{diagnostics}{a logical value; plot model diagnostics if \verb{TRUE}.}
\item{...}{further arguments passed to or from other methods.}
}
\value{
Nothing. Summaries are printed as a side effect.
}
\description{
Print Summaries of \code{binomscreenr} Objects
}
|
9086b2fdf157d2881219ff9baa61bc37c01aef79
|
ffa84a5066a4b6c780abe9dad26b08f9ac5416e1
|
/package/binomial/tests/testthat/test-summary.R
|
b8ffe47dbca7f5b1e2768c5554ec02f786204558
|
[] |
no_license
|
justinhan33/R-Package
|
1cf161399957edbef135330c4487535cf29e2e57
|
806fe67da844a79ff01fbadaf68f18f145e47559
|
refs/heads/master
| 2020-12-14T05:39:10.787925
| 2020-01-18T01:04:38
| 2020-01-18T01:04:38
| 234,659,557
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,464
|
r
|
test-summary.R
|
context("Tests for auxiliary functions")
#aux_mean
test_that("aux_mean is less than or equal to the first parameter", {
expect_lte(aux_mean(10, 0.5), 10)
expect_lte(aux_mean(10, 1), 10)
expect_lte(aux_mean(10, 0), 10)
expect_lte(aux_mean(5, 0.5), 5)
expect_lte(aux_mean(5, 1), 5)
expect_lte(aux_mean(5, 0), 5)
})
test_that("aux_mean produces a single output", {
expect_length(aux_mean(10, 0.5), 1)
expect_length(aux_mean(13, 0.75), 1)
expect_length(aux_mean(20, 1), 1)
expect_length(aux_mean(25, 0), 1)
})
test_that("aux_mean produces output of class type equal to that of its parameters", {
expect_is(aux_mean(10, 0.5), class(10))
expect_is(aux_mean(10, 0.5), class(0.5))
expect_is(aux_mean(17, 0.25), class(17))
expect_is(aux_mean(17, 0.25), class(0.25))
})
#aux_variance
test_that("aux_variance produces a single output", {
expect_length(aux_variance(10, 0.5), 1)
expect_length(aux_variance(13, 0.75), 1)
expect_length(aux_variance(20, 1), 1)
expect_length(aux_variance(25, 0), 1)
})
test_that("aux_variance produces output of class type equal to that of its parameters", {
expect_is(aux_variance(10, 0.5), class(10))
expect_is(aux_variance(10, 0.5), class(0.5))
expect_is(aux_variance(17, 0.25), class(17))
expect_is(aux_variance(17, 0.25), class(0.25))
})
test_that("aux_variance produces non-negative output", {
expect_gte(aux_variance(10, 0.5), 0)
expect_gte(aux_variance(10, 0.75), 0)
expect_gte(aux_variance(10, 0), 0)
expect_gte(aux_variance(10, 1), 0)
})
#aux_mode
test_that("aux_mode produces output of class type equal to that of its parameters", {
expect_is(aux_mode(10, 0.5), class(10))
expect_is(aux_mode(10, 0.5), class(0.5))
expect_is(aux_mode(17, 0.25), class(17))
expect_is(aux_mode(17, 0.25), class(0.25))
})
test_that("aux_mode produces output with valid length", {
expect_length(aux_mode(10, 0.5), 1)
expect_length(aux_mode(9, 0.5), 2)
expect_length(aux_mode(49, 0.3), 2)
})
test_that("aux_mode works with valid parameters", {
expect_equal(aux_mode(10, 0.5), 5)
expect_equal(aux_mode(92, 0.5), 46)
expect_equal(aux_mode(49, 0.4), c(20, 19))
})
#aux_skewness
test_that("aux_skewness fails for certain parameters", {
expect_error(aux_skewness(0, 0.5))
expect_error(aux_skewness(10, 0))
expect_error(aux_skewness(18, 1))
})
test_that("aux_skewness produces a single output", {
expect_length(aux_skewness(10, 0.5), 1)
expect_length(aux_skewness(49, 0.4), 1)
expect_length(aux_skewness(92, 0.5), 1)
})
test_that("aux_skewness produces output of class type equal to that of its parameters", {
expect_is(aux_skewness(10, 0.5), class(10))
expect_is(aux_skewness(10, 0.5), class(0.5))
expect_is(aux_skewness(17, 0.25), class(17))
expect_is(aux_skewness(17, 0.25), class(0.25))
})
#aux_kurtosis
test_that("aux_kurtosis fails for certain parameters", {
expect_error(aux_kurtosis(0, 0.5))
expect_error(aux_kurtosis(10, 0))
expect_error(aux_kurtosis(18, 1))
})
test_that("aux_kurtosis produces a single output", {
expect_length(aux_kurtosis(10, 0.5), 1)
expect_length(aux_kurtosis(49, 0.4), 1)
expect_length(aux_kurtosis(92, 0.5), 1)
})
test_that("aux_kurtosis produces output of class type equal to that of its parameters", {
expect_is(aux_kurtosis(10, 0.5), class(10))
expect_is(aux_kurtosis(10, 0.5), class(0.5))
expect_is(aux_kurtosis(17, 0.25), class(17))
expect_is(aux_kurtosis(17, 0.25), class(0.25))
})
|
bb3e3a8d3d6c03494a7945430bf1ea2c0dd4c5b4
|
41977022b45c051d1091ae99a6c01e42fc2169bb
|
/seq ana lab 2/lab4.R
|
a83c2f6beac52dbded155c95814a2c63f2ced166
|
[] |
no_license
|
wopoczynski/R
|
fc2ff7b5a7b8e7139b4c6af75b20b1d969b03356
|
a5a428e4eb811930b44531e364b058ef6f487d15
|
refs/heads/master
| 2021-01-20T05:22:00.351244
| 2017-08-25T20:21:57
| 2017-08-25T20:21:57
| 101,439,459
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,056
|
r
|
lab4.R
|
dlchrom1=234560296;
dlchrom3=203221608;
dlchrom4=173516952;
dlchrom8=150392723;
dlchrom12=108540874;
dlchrom18=75010233;
contig1=1+2698+161+14809;
contig3=1+2323+171+11569;
contig4=1+9831+71+1664;
contig8=1215+1+7950+63;
contig12=820+1+28+5707;
contig18=690+1+3904+39
dlugosc=c(dlchrom1,dlchrom3,dlchrom4,dlchrom8,dlchrom12,dlchrom18);
contig=c(contig1,contig3,contig4,contig8,contig12,contig18);
plot(dlugosc,contig, type="b", xlab="dlugosc sekwencji", ylab="ilosc contigow",);
title("zaleznosci ilosci contigow od dlugosci sekwencji chromosomu");
lspokrycie=48.0;
lscontig=458935;
penpokrycie=61;
pencontig=230930;
varpokrycie=81;
varcontig=110959;
halpokrycie=103;
halcontig=31786;
plasmidpokrycie=136;
plasmidcontig=349;
simpokrycie=180;
simcontig=82;
pokrycie=c(lspokrycie,penpokrycie,varpokrycie,halpokrycie,plasmidpokrycie,simpokrycie);
contigC=c(lscontig,pencontig,varcontig,halcontig,plasmidcontig,simcontig);
plot(pokrycie,contigC,type='b',xlab='pokrycie genomu',ylab='ilosc contigow');
title("zaleznosc ilosci contigow od pokrycia genomu");
|
86fb9fc91c4f7121dc7167a365bb36df07932ec8
|
bdb965469633a334d1755c5e500c200a083dc1fe
|
/prostateCancerFunctions.R
|
fa1128ca1b3faa5c6553720e532d6c783ce673d9
|
[] |
no_license
|
ysvang/ProstateCancerDream
|
e14e3f6f1caf93c884c512a09a1b70445a70ac9e
|
95cd8329359dd6342c3474c393cebf98e00eebb1
|
refs/heads/master
| 2021-01-10T01:09:14.446248
| 2015-10-13T06:27:59
| 2015-10-13T06:27:59
| 44,156,376
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,424
|
r
|
prostateCancerFunctions.R
|
library(survival)
library(pec)
library(MASS)
library(muhaz)
library(caret)
library(matrixStats)
library(gbm,lib.loc ='gbmci')
library(glmnet)
# read in training and testing data
readData <- function(trainFile, testFile) {
trainingDF <- read.csv(trainFile)
testDF <- read.csv(testFile)
time <- read.csv("trainTime.csv")
names(time) <- c("time")
time <- as.matrix(time) # requires time to be a matrix to work with Surv
event <- read.csv("trainEvent.csv")
names(event) <- c("event")
event <- as.matrix(event) # requires event to be a matrix to work with Surv
return(list("time" = time, "event" = event, "trainingDF" = trainingDF, "testDF" = testDF))
}
# map data vector x to norm based on quantile
MapToNorm <- function(x){
return (qnorm(ppoints(length(x)))[order(order(x))])
}
# normalize real-valued columns
normalizeData <- function(dataObject, n){
time <- dataObject$time
trainingDF <- dataObject$trainingDF
event <- dataObject$event
testDF <- dataObject$testDF
preProc <- preProcess(trainingDF[,1:n], method=c("center", "scale")) # calculate mean and variance of training set
trainingDF.norm <- predict(preProc, trainingDF[,1:n]) # apply centering/scaling to training set
trainingDF.norm = data.frame(trainingDF.norm, trainingDF[,(n+1):ncol(trainingDF)])
testDF.norm <- predict(preProc, testDF[,1:n]) # apply centering/scaling to test set
testDF.norm = data.frame(testDF.norm, testDF[,(n+1):ncol(testDF)])
return(list("time" = time, "event" = event, "trainingDF" = trainingDF.norm, "testDF" = testDF.norm))
}
# coxph regression
model.coxph <- function(dataObject){
coxfit <- coxph(formula=Surv(dataObject$time, dataObject$event) ~ ., data=dataObject$trainingDF, method="efron")
globalRisk <- predict(coxfit, newdata=dataObject$testDF, type="risk") # global (relative risk)
# predict survival probability (risk) at 12, 18, 24 months
testSurvivalProb <- predictSurvProb(coxfit, newdata=dataObject$testDF, times=seq(366,732,183))
risksDF <- data.frame(globalRisk, testSurvivalProb[,1], testSurvivalProb[,2], testSurvivalProb[,3])
return(risksDF)
}
# gradient boost machine with coxph
model.gbmcoxph <- function(dataObject, numTrees, depth, bagFraction, shrink){
time <- dataObject$time
event <- dataObject$event
trainingDF <- dataObject$trainingDF
testDF <- dataObject$testDF
coxphfit.gbm <- gbm(Surv(time, event) ~ ., distribution = "coxph", n.trees = numTrees, data = trainingDF, shrinkage= shrink,
interaction.depth = depth, bag.fraction = bagFraction, cv.folds = 5, keep.data = TRUE, verbose = FALSE)
best.iter <- gbm.perf(coxphfit.gbm, method = "cv", plot.it=FALSE) # returns test set estimate of best number of trees
cumulativeHaz <- basehaz.gbm(time, event, coxphfit.gbm$fit, t.eval = seq(366, 732, 183), cumulative = TRUE)
testDF.linPred <- predict(coxphfit.gbm, newdata = testDF, best.iter)
globalRisk <- exp(testDF.linPred) # global risk
s.12mon <- exp(-cumulativeHaz[1])^globalRisk # Survival prob at 12 months
s.18mon <- exp(-cumulativeHaz[2])^globalRisk # Survival prob at 18 months
s.24mon <- exp(-cumulativeHaz[3])^globalRisk # Survival prob at 24 months
risksDF <- data.frame(globalRisk, s.12mon, s.18mon, s.24mon)
return(risksDF)
}
# gradient boost machine with c-index optimization
model.gbmci <- function(dataObject, numTrees, depth, bagFraction, shrink){
time <- dataObject$time
event <- dataObject$event
trainingDF <- dataObject$trainingDF
testDF <- dataObject$testDF
coxphfit.gbm <- gbm(Surv(time, event) ~ ., distribution = "sci", n.trees = numTrees, data = trainingDF,
interaction.depth = depth, bag.fraction = bagFraction, cv.folds = 5, keep.data = TRUE, verbose = FALSE, shrinkage = shrink)
best.iter <- gbm.perf(coxphfit.gbm, method = "cv", plot.it=FALSE) # returns test set estimate of best number of trees
cumulativeHaz <- basehaz.gbm(time, event, coxphfit.gbm$fit, t.eval = seq(366, 732, 183), cumulative = TRUE)
testDF.linPred <- predict(coxphfit.gbm, newdata = testDF, best.iter)
globalRisk <- exp(testDF.linPred) # global risk
s.12mon <- exp(-cumulativeHaz[1])^globalRisk # Survival prob at 12 months
s.18mon <- exp(-cumulativeHaz[2])^globalRisk # Survival prob at 18 months
s.24mon <- exp(-cumulativeHaz[3])^globalRisk # Survival prob at 24 months
risksDF <- data.frame(globalRisk, s.12mon, s.18mon, s.24mon)
return(risksDF)
}
# coxph regression used to obtain exact time to event
model.coxph.time2event <- function(dataObject){
coxfit <- coxph(formula=Surv(dataObject$time, dataObject$event) ~ ., data=dataObject$trainingDF, method="efron")
globalRisk <- predict(coxfit, newdata=dataObject$testDF, type="risk") # global (relative risk)
# calculates the survival probabilites for each test patient from day 1 through the maximum survival day of the training set
survivalProb <- predictSurvProb(coxfit, newdata=dataObject$testDF, times=seq(1,max(dataObject$time),1))
exactTime2Event <- matrix(data=0,nrow=nrow(dataObject$testDF),ncol=1)
# scans through each patient looking for the first day where survival probability is less or equal to 0.502
for (i in 1:nrow(dataObject$testDF)){
firstElementCount <- 0
for (j in 1:ncol(survivalProb)){
if (survivalProb[i,j] <= 0.502) {
firstElementCount <- 1
exactTime2Event[i] <- j
}
if (j == ncol(survivalProb)){
exactTime2Event[i] <- j
}
if (firstElementCount == 1) {
break
}
}
}
return(exactTime2Event)
}
# gradient boost machine with coxph used to obtain exact time to event
model.gbmcoxph.time2event <- function(dataObject, numTrees, depth, bagFraction, shrink){
time <- dataObject$time
event <- dataObject$event
trainingDF <- dataObject$trainingDF
testDF <- dataObject$testDF
coxphfit.gbm <- gbm(Surv(time, event) ~ ., distribution = "coxph", n.trees = numTrees, data = trainingDF, shrinkage= shrink,
interaction.depth = depth, bag.fraction = bagFraction, cv.folds = 5, keep.data = TRUE, verbose = FALSE)
best.iter <- gbm.perf(coxphfit.gbm, method = "cv", plot.it=FALSE) # returns test set estimate of best number of trees
cumulativeHaz <- basehaz.gbm(time, event, coxphfit.gbm$fit, t.eval = seq(1,max(dataObject$time),1), cumulative = TRUE)
testDF.linPred <- predict(coxphfit.gbm, newdata = testDF, best.iter)
globalRisk <- exp(testDF.linPred) # global risk
exactTime2Event <- matrix(data=0,nrow=nrow(dataObject$testDF),ncol=1)
survivalProb <- matrix(data=0,nrow=nrow(dataObject$testDF),ncol=max(dataObject$time))
# calculates the survival probabilites for each test patient from day 1 through the maximum survival day of the training set
for (k in 1:ncol(survivalProb)){
survivalProb[,k] <- exp(-cumulativeHaz[k])^globalRisk
}
survivalProb[is.na(survivalProb)] <- 1
# scans through each patient looking for the first day where survival probability is less or equal to 0.502
for (i in 1:nrow(dataObject$testDF)){
firstElementCount <- 0
for (j in 1:ncol(survivalProb)){
if (survivalProb[i,j] <= 0.502) {
firstElementCount <- 1
exactTime2Event[i] <- j
}
if (j == ncol(survivalProb)){
exactTime2Event[i] <- j
}
if (firstElementCount == 1) {
break
}
}
}
return(exactTime2Event)
}
# gradient boost machine with ci used to obtain exact time to event
model.gbmci.time2event <- function(dataObject, numTrees, depth, bagFraction, shrink){
time <- dataObject$time
event <- dataObject$event
trainingDF <- dataObject$trainingDF
testDF <- dataObject$testDF
coxphfit.gbm <- gbm(Surv(time, event) ~ ., distribution = "sci", n.trees = numTrees, data = trainingDF, shrinkage= shrink,
interaction.depth = depth, bag.fraction = bagFraction, cv.folds = 5, keep.data = TRUE, verbose = FALSE)
best.iter <- gbm.perf(coxphfit.gbm, method = "cv", plot.it=FALSE) # returns test set estimate of best number of trees
cumulativeHaz <- basehaz.gbm(time, event, coxphfit.gbm$fit, t.eval = seq(1,max(dataObject$time),1), cumulative = TRUE)
testDF.linPred <- predict(coxphfit.gbm, newdata = testDF, best.iter)
globalRisk <- exp(testDF.linPred) # global risk
exactTime2Event <- matrix(data=0,nrow=nrow(dataObject$testDF),ncol=1)
survivalProb <- matrix(data=0,nrow=nrow(dataObject$testDF),ncol=max(dataObject$time))
# calculates the survival probabilites for each test patient from day 1 through the maximum survival day of the training set
for (k in 1:ncol(survivalProb)){
survivalProb[,k] <- exp(-cumulativeHaz[k])^globalRisk
}
survivalProb[is.na(survivalProb)] <- 1
# scans through each patient looking for the first day where survival probability is less or equal to 0.502
for (i in 1:nrow(dataObject$testDF)){
firstElementCount <- 0
for (j in 1:ncol(survivalProb)){
if (survivalProb[i,j] <= 0.502) {
firstElementCount <- 1
exactTime2Event[i] <- j
}
if (j == ncol(survivalProb)){
exactTime2Event[i] <- j
}
if (firstElementCount == 1) {
break
}
}
}
return(exactTime2Event)
}
|
2f50c63ef1fba594d6a040796a2998875080eed7
|
8c84ea2e7e9c74c085ac1c04d3af2f2818264e97
|
/scripts/ch08.R
|
476ae63ad64de5139d1a57ffae90242c46657d5c
|
[] |
no_license
|
liao961120/rethinking_code
|
250351fc54e61dd81e8e23bacf9c1e4a4a35b346
|
5b4d07eb7a71c5735825066c564dec11b399f6aa
|
refs/heads/main
| 2023-06-01T13:03:19.506478
| 2021-06-20T05:16:14
| 2021-06-20T05:16:14
| 335,544,180
| 0
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,079
|
r
|
ch08.R
|
#+ Setup
remotes::install_github('rmcelreath/rethinking', upgrade=F)
library(rethinking)
#' ## R code 8.1
#+ R code 8.1
library(rethinking)
data(rugged)
d <- rugged
# make log version of outcome
d$log_gdp <- log( d$rgdppc_2000 )
# extract countries with GDP data
dd <- d[ complete.cases(d$rgdppc_2000) , ]
# rescale variables
dd$log_gdp_std <- dd$log_gdp / mean(dd$log_gdp)
dd$rugged_std <- dd$rugged / max(dd$rugged)
#' ## R code 8.2
#+ R code 8.2
m8.1 <- quap(
alist(
log_gdp_std ~ dnorm( mu , sigma ) ,
mu <- a + b*( rugged_std - 0.215 ) ,
a ~ dnorm( 1 , 1 ) ,
b ~ dnorm( 0 , 1 ) ,
sigma ~ dexp( 1 )
) , data=dd )
#' ## R code 8.3
#+ R code 8.3
set.seed(7)
prior <- extract.prior( m8.1 )
# set up the plot dimensions
plot( NULL , xlim=c(0,1) , ylim=c(0.5,1.5) ,
xlab="ruggedness" , ylab="log GDP" )
abline( h=min(dd$log_gdp_std) , lty=2 )
abline( h=max(dd$log_gdp_std) , lty=2 )
# draw 50 lines from the prior
rugged_seq <- seq( from=-0.1 , to=1.1 , length.out=30 )
mu <- link( m8.1 , post=prior , data=data.frame(rugged_std=rugged_seq) )
for ( i in 1:50 ) lines( rugged_seq , mu[i,] , col=col.alpha("black",0.3) )
#' ## R code 8.4
#+ R code 8.4
sum( abs(prior$b) > 0.6 ) / length(prior$b)
#' ## R code 8.5
#+ R code 8.5
m8.1 <- quap(
alist(
log_gdp_std ~ dnorm( mu , sigma ) ,
mu <- a + b*( rugged_std - 0.215 ) ,
a ~ dnorm( 1 , 0.1 ) ,
b ~ dnorm( 0 , 0.3 ) ,
sigma ~ dexp(1)
) , data=dd )
#' ## R code 8.6
#+ R code 8.6
precis( m8.1 )
#' ## R code 8.7
#+ R code 8.7
# make variable to index Africa (1) or not (2)
dd$cid <- ifelse( dd$cont_africa==1 , 1 , 2 )
#' ## R code 8.8
#+ R code 8.8
m8.2 <- quap(
alist(
log_gdp_std ~ dnorm( mu , sigma ) ,
mu <- a[cid] + b*( rugged_std - 0.215 ) ,
a[cid] ~ dnorm( 1 , 0.1 ) ,
b ~ dnorm( 0 , 0.3 ) ,
sigma ~ dexp( 1 )
) , data=dd )
#' ## R code 8.9
#+ R code 8.9
compare( m8.1 , m8.2 )
#' ## R code 8.10
#+ R code 8.10
precis( m8.2 , depth=2 )
#' ## R code 8.11
#+ R code 8.11
post <- extract.samples(m8.2)
diff_a1_a2 <- post$a[,1] - post$a[,2]
PI( diff_a1_a2 )
#' ## R code 8.12
#+ R code 8.12
rugged.seq <- seq( from=-0.1 , to=1.1 , length.out=30 )
# compute mu over samples, fixing cid=2 and then cid=1
mu.NotAfrica <- link( m8.2 ,
data=data.frame( cid=2 , rugged_std=rugged.seq ) )
mu.Africa <- link( m8.2 ,
data=data.frame( cid=1 , rugged_std=rugged.seq ) )
# summarize to means and intervals
mu.NotAfrica_mu <- apply( mu.NotAfrica , 2 , mean )
mu.NotAfrica_ci <- apply( mu.NotAfrica , 2 , PI , prob=0.97 )
mu.Africa_mu <- apply( mu.Africa , 2 , mean )
mu.Africa_ci <- apply( mu.Africa , 2 , PI , prob=0.97 )
#' ## R code 8.13
#+ R code 8.13
m8.3 <- quap(
alist(
log_gdp_std ~ dnorm( mu , sigma ) ,
mu <- a[cid] + b[cid]*( rugged_std - 0.215 ) ,
a[cid] ~ dnorm( 1 , 0.1 ) ,
b[cid] ~ dnorm( 0 , 0.3 ) ,
sigma ~ dexp( 1 )
) , data=dd )
#' ## R code 8.14
#+ R code 8.14
precis( m8.5 , depth=2 )
#' ## R code 8.15
#+ R code 8.15
compare( m8.1 , m8.2 , m8.3 , func=PSIS )
#' ## R code 8.16
#+ R code 8.16
plot( PSIS( m8.3 , pointwise=TRUE )$k )
#' ## R code 8.17
#+ R code 8.17
# plot Africa - cid=1
d.A1 <- dd[ dd$cid==1 , ]
plot( d.A1$rugged_std , d.A1$log_gdp_std , pch=16 , col=rangi2 ,
xlab="ruggedness (standardized)" , ylab="log GDP (as proportion of mean)" ,
xlim=c(0,1) )
mu <- link( m8.3 , data=data.frame( cid=1 , rugged_std=rugged_seq ) )
mu_mean <- apply( mu , 2 , mean )
mu_ci <- apply( mu , 2 , PI , prob=0.97 )
lines( rugged_seq , mu_mean , lwd=2 )
shade( mu_ci , rugged_seq , col=col.alpha(rangi2,0.3) )
mtext("African nations")
# plot non-Africa - cid=2
d.A0 <- dd[ dd$cid==2 , ]
plot( d.A0$rugged_std , d.A0$log_gdp_std , pch=1 , col="black" ,
xlab="ruggedness (standardized)" , ylab="log GDP (as proportion of mean)" ,
xlim=c(0,1) )
mu <- link( m8.3 , data=data.frame( cid=2 , rugged_std=rugged_seq ) )
mu_mean <- apply( mu , 2 , mean )
mu_ci <- apply( mu , 2 , PI , prob=0.97 )
lines( rugged_seq , mu_mean , lwd=2 )
shade( mu_ci , rugged_seq )
mtext("Non-African nations")
#' ## R code 8.18
#+ R code 8.18
rugged_seq <- seq(from=-0.2,to=1.2,length.out=30)
muA <- link( m8.3 , data=data.frame(cid=1,rugged_std=rugged_seq) )
muN <- link( m8.3 , data=data.frame(cid=2,rugged_std=rugged_seq) )
delta <- muA - muN
#' ## R code 8.19
#+ R code 8.19
library(rethinking)
data(tulips)
d <- tulips
str(d)
#' ## R code 8.20
#+ R code 8.20
d$blooms_std <- d$blooms / max(d$blooms)
d$water_cent <- d$water - mean(d$water)
d$shade_cent <- d$shade - mean(d$shade)
#' ## R code 8.21
#+ R code 8.21
a <- rnorm( 1e4 , 0.5 , 1 ); sum( a < 0 | a > 1 ) / length( a )
#' ## R code 8.22
#+ R code 8.22
a <- rnorm( 1e4 , 0.5 , 0.25 ); sum( a < 0 | a > 1 ) / length( a )
#' ## R code 8.23
#+ R code 8.23
m8.4 <- quap(
alist(
blooms_std ~ dnorm( mu , sigma ) ,
mu <- a + bw*water_cent + bs*shade_cent ,
a ~ dnorm( 0.5 , 0.25 ) ,
bw ~ dnorm( 0 , 0.25 ) ,
bs ~ dnorm( 0 , 0.25 ) ,
sigma ~ dexp( 1 )
) , data=d )
#' ## R code 8.24
#+ R code 8.24
m8.5 <- quap(
alist(
blooms_std ~ dnorm( mu , sigma ) ,
mu <- a + bw*water_cent + bs*shade_cent + bws*water_cent*shade_cent ,
a ~ dnorm( 0.5 , 0.25 ) ,
bw ~ dnorm( 0 , 0.25 ) ,
bs ~ dnorm( 0 , 0.25 ) ,
bws ~ dnorm( 0 , 0.25 ) ,
sigma ~ dexp( 1 )
) , data=d )
#' ## R code 8.25
#+ R code 8.25
par(mfrow=c(1,3)) # 3 plots in 1 row
for ( s in -1:1 ) {
idx <- which( d$shade_cent==s )
plot( d$water_cent[idx] , d$blooms_std[idx] , xlim=c(-1,1) , ylim=c(0,1) ,
xlab="water" , ylab="blooms" , pch=16 , col=rangi2 )
mu <- link( m8.4 , data=data.frame( shade_cent=s , water_cent=-1:1 ) )
for ( i in 1:20 ) lines( -1:1 , mu[i,] , col=col.alpha("black",0.3) )
}
#' ## R code 8.26
#+ R code 8.26
set.seed(7)
prior <- extract.prior(m8.5)
#' ## R code 8.27
#+ R code 8.27
d$lang.per.cap <- d$num.lang / d$k.pop
|
d2264f3152780508fbaf630d4e1cccba7f000937
|
ea2da1ab290bc772e0804f0d22f070876bfe255f
|
/inst/scripts/Example-1.R
|
9b98641df719454ff8c6468166f2180dae802b58
|
[] |
no_license
|
YinanZheng/HIMA
|
fe9c2273e5c27b0316fe298fbc4437a392e21b63
|
aefb1a44b846ab1711d3a4f65b764c87ff597fea
|
refs/heads/master
| 2023-05-12T15:54:32.116778
| 2023-04-29T01:56:04
| 2023-04-29T01:56:04
| 47,419,574
| 15
| 19
| null | 2018-03-01T17:45:29
| 2015-12-04T17:38:51
|
R
|
UTF-8
|
R
| false
| false
| 1,354
|
r
|
Example-1.R
|
# Generate HIMA example based on real-world dataset - 1 (linear outcome)
set.seed(1029)
p <- 300 # the dimension of mediators
q <- 2 # the dimension of covariates
n <- 300 # sample size
alpha <- matrix(0,1,p) # the coefficients for X -> M
beta <- matrix(0,1,p) # the coefficients for M -> Y
alpha[1:5] <- 0.5
beta[1:5] <- 0.5
zeta <- matrix(0.01,p,q) # the coefficients of covariates for X -> M
eta <- matrix(0.01,1,q) # the coefficients of covariates for M -> Y
gamma <- 0.5 # the direct effect
gamma_total <- gamma + alpha%*%t(beta) # the total effect
X <- matrix(rbinom(n, size=1, prob=0.6),n,1) # expoure: 1=treatment; 0=placebo
Z <- matrix(0,n,2) # covariates
Z[,1] <- rbinom(n, size=1, prob=0.5) # sex: male=1; female=0
Z[,2] <- sample(18:65,n,replace=TRUE) # age ranging from 18-65
# Z[,2] <- (Z[,2]-mean(Z[,2]))/sd(Z[,2]) # scaled age, so we add a note that age is scaled before modeling
e <- matrix(rnorm(n*p, mean = 0, sd = 1.5),n,p)
E <- matrix(rnorm(n, mean = 0, sd = 1),n,1)
M <- X%*%(alpha) + Z%*%t(zeta) + e # the mediator matrix
Y <- X*gamma + M%*%t(beta) + Z%*%t(eta) + E # the response Y
colnames(M) <- paste0("M", 1:p)
rownames(M) <- paste0("S", 1:n)
pheno <- data.frame(Treatment = X, Outcome = Y, Sex = Z[,1], Age = Z[,2])
Example1 <- list(PhenoData = pheno, Mediator = M)
usethis::use_data(Example1, overwrite = TRUE)
|
5caa0e53938107c5f69a5589946e0fbee80aaaad
|
db57b8d257b2ff7adbe8b5e62acf40d3658a15a8
|
/Oldprogram/reading_NGK.r
|
9d1e1c7e7125a521d4814e5fb57910f3c6a2c755
|
[] |
no_license
|
inyoungkim1/NonnegativeGarroteKernel
|
7594e4eaba1146d35933273fcf94e32cc760ca93
|
0c66f79bb0d9510d3ab1d34f631c0108c12583e6
|
refs/heads/master
| 2020-07-11T08:31:34.173482
| 2019-09-04T19:15:33
| 2019-09-04T19:15:33
| 204,490,473
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,049
|
r
|
reading_NGK.r
|
n<-64
#############################################################
#What user needs to change: Kernel and path
name<-"Gauss"
path=paste("/FolderName/Test_",name,"_n=",n,"_p=80.csv",sep="")
#############################################################
name<-"Gauss"
path=paste("/FolderName/Test_",name,"_n=",n,"_p=80.csv",sep="")
A<-as.matrix(read.csv(path)[,-1])
#RSS
RSS<-mean(A[,6]/n);RSSsd<-sd(A[,6]/n)
#SE
SE<-mean(A[,4]);SEsd<-sd(A[,4])
A[which(A!=0)]<-1
count<-colSums(A)
#False Positive rate (alpha)
FP<-mean(rowSums(A[,13:87])/(rowSums(A[,13:87])+75));
FPsd<-sd(rowSums(A[,13:87])/(rowSums(A[,13:87])+75))
#False negative rate (beta)
FN<-mean((5-rowSums(A[,8:12]))/((5-rowSums(A[,8:12]))+5));
FNsd<-sd((5-rowSums(A[,8:12]))/((5-rowSums(A[,8:12]))+5))
MS<-mean(rowSums(A[,8:87]))
MSsd<-sd(rowSums(A[,8:87]))
count
cbind(FP,FPsd,FN,FNsd,MS,MSsd,RSS,RSSsd,SE,SEsd)
#############################################################
#What user needs to change: Kernel and path
name<-"Poly"
path=paste("/FolderName/Test_",name,"_n=",n,"_p=80.csv",sep="")
#############################################################
A<-as.matrix(read.csv(path)[,-1])
#RSS
RSS<-mean(A[,6]/n);RSSsd<-sd(A[,6]/n)
#SE
SE<-mean(A[,4]);SEsd<-sd(A[,4])
A[which(A!=0)]<-1
count1<-colSums(A)
#False Positive rate (alpha)
FP<-mean(rowSums(A[,13:87])/(rowSums(A[,13:87])+75));
FPsd<-sd(rowSums(A[,13:87])/(rowSums(A[,13:87])+75))
#False negative rate (beta)
FN<-mean((5-rowSums(A[,8:12]))/((5-rowSums(A[,8:12]))+5));
FNsd<-sd((5-rowSums(A[,8:12]))/((5-rowSums(A[,8:12]))+5))
MS<-mean(rowSums(A[,8:87]))
MSsd<-sd(rowSums(A[,8:87]))
count1
cbind(FP,FPsd,FN,FNsd,MS,MSsd,RSS,RSSsd,SE,SEsd)
#############
par(mfrow=c(1,1),oma=c(0,1,4,0),mar=c(8,4.5,6,2))
plot(count[-(1:7)]/400,xlab="Predictor Index", ylab="Probability",cex=1.,cex.lab=1.5,cex.axis=1.5)
points(count1[-(1:7)]/400,pch=20)
legend(locator(1),cex=1.5, c("NGK Gauss","NGK linear poly"),pch=c(1,20))
|
5cb101c68f66a1aedc24470089a270ed1fc4f612
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/JumpTest/examples/jumptestday.Rd.R
|
1acecf107b3fc8994466702280a3b8257c3620dd
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 229
|
r
|
jumptestday.Rd.R
|
library(JumpTest)
### Name: jumptestday
### Title: Nonparametric jump test for each interval
### Aliases: jumptestday
### ** Examples
orip <- runif(100)
testres <- jumptestday(orip)
ts <- testres@stat
pv <- testres@pvalue
|
827b6ed97c10f76c2976269012e62e3d3fdfb9c5
|
1e6f64fb9f3adcf2f78b5d53a1f26fd11c7ee0ac
|
/merging_accuracytable_7.R
|
6f259a4112a794b782fb18fc8e1995252bfa01e4
|
[] |
no_license
|
bigtiger94/droughtPrediction
|
30fac36a49e6042d52263c474fbfe7673fcda7fb
|
e52368e65f7998158934c29df629b154ac616033
|
refs/heads/master
| 2022-11-27T12:17:54.984625
| 2020-08-02T11:19:20
| 2020-08-02T11:19:20
| 284,443,046
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,406
|
r
|
merging_accuracytable_7.R
|
dindexname = "sri12"
bsnnames = c("soyang", "daecheong", "andong", "sumjin", "chungju", "hapcheon", "namgang", "imha")
bsncodes = c(1012, 3008, 2001, 4001, 1003, 2015, 2018, 2002)
period.irr = c(4:9)
period.nonirr = setdiff(c(1:12), period.irr)
bsncase = 1
for (bsncase in c(1:length(bsncodes))){
targetbsn = bsncodes[bsncase];
bsnname = bsnnames[bsncase];
kfoldinfo = read.csv("kfoldinfo.csv", header=T, row.names=1)
kk=1
BSfilepath = file.path("./predictResult","BSresult")
BS.df = read.csv(file.path(BSfilepath, paste0("BS_allperiod_",dindexname,"_",bsnname,".csv")), row.names=1)
BS.df$RPS = NA
enstype = "raw"
for (enstype in c("raw", "up")){
for (kk in c(1:4)){
kfold = paste0("k", kk)
predictfilepath = file.path("./predictResult", kfold)
RPSfilepath = file.path(predictfilepath, "RPSresult")
rpsprob.ts = read.csv(file.path(RPSfilepath,paste0("RPS_prob_",enstype,"_",dindexname,"_",bsnname,".csv")), row.names=1) %>%
xts(., ymd(rownames(.)))
rpsdet.ts = read.csv(file.path(RPSfilepath,paste0("RPS_det_",enstype,"_",dindexname,"_",bsnname,".csv")), row.names=1) %>%
xts(., ymd(rownames(.)))
attach(BS.df)
period = "all"
BS.df$RPS[which(Predicttype=="prob"&K==kfold&Enstype==enstype&Period==period)] = mean(rpsprob.ts)
BS.df$RPS[which(Predicttype=="det"&K==kfold&Enstype==enstype&Period==period)] = mean(rpsdet.ts)
period = "irrigation"
RPS.irr = month(rpsprob.ts) %in% period.irr %>% rpsprob.ts[.] %>% mean(.)
BS.df$RPS[which(Predicttype=="prob"&K==kfold&Enstype==enstype&Period==period)] = RPS.irr
RPS.irr = month(rpsdet.ts) %in% period.irr %>% rpsdet.ts[.] %>% mean(.)
BS.df$RPS[which(Predicttype=="det"&K==kfold&Enstype==enstype&Period==period)] = RPS.irr
period = "non-irrigation"
RPS.nonirr = month(rpsprob.ts) %in% period.nonirr %>% rpsprob.ts[.] %>% mean(.)
BS.df$RPS[which(Predicttype=="prob"&K==kfold&Enstype==enstype&Period==period)] = RPS.nonirr
RPS.nonirr = month(rpsdet.ts) %in% period.nonirr %>% rpsdet.ts[.] %>% mean(.)
BS.df$RPS[which(Predicttype=="det"&K==kfold&Enstype==enstype&Period==period)] = RPS.nonirr
detach()
}
}
accfilepath = file.path("./predictResult")
acc.df = BS.df
acc.df %>% write.csv(., file.path(accfilepath, paste0("accuracytable_",dindexname,"_",bsnname,".csv")))
}
|
98e2dc4f5103387601b5b3258c81af7a35600e9a
|
d0d0f1aa8690b903b946c90dd368d7ba910818d5
|
/Enders Materials/AMASS Workshop/R/Ex3.R
|
8237c3e82601b11b8697aee085ee6f179e0afca6
|
[] |
no_license
|
mhalvo/missingdata
|
5fc3722a5d59a89fc7a44fddba549a18e8daa2c5
|
7e5bb66e31790e9a8f27d115c7b606a9413be002
|
refs/heads/master
| 2021-06-24T17:54:28.537775
| 2021-06-21T20:01:35
| 2021-06-21T20:01:35
| 149,327,993
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 678
|
r
|
Ex3.R
|
# Required packages
library(mitml)
# Read data
filepath <- "~/desktop/examples/imps.csv"
impdata <- read.csv(filepath, header = F)
names(impdata) <- c("imputation","id","female","diagnose","sleep","pain","posaff","negaff","stress")
# Compute product variable
impdata$femxpain <- impdata$female * impdata$pain
# Analyze data and pool estimates
implist <- as.mitml.list(split(impdata, impdata$imputation))
analysis <- with(implist, lm(stress ~ female + pain + femxpain))
estimates <- testEstimates(analysis, var.comp = T, df.com = (250-3-1))
estimates
# Compare models with Wald test
emptymodel <- with(implist, lm(stress ~ 1))
testModels(analysis, emptymodel, method = "D1")
|
f39a6d6f040642d658cd108151acaea01967fa37
|
506ec6d6c5eaf172576da7c1df34d7ad629aedc3
|
/man/rGibbsHurdle.Rd
|
4b82060ec624d1707c224b562cb20af965bfbc7f
|
[] |
no_license
|
amcdavid/HurdleNormal
|
fad2957cabad451b94283bc3aefc82de706d6b0f
|
425d4853a0433e8c390988d07bac3e8ebcdae682
|
refs/heads/master
| 2022-05-13T17:14:59.717569
| 2022-05-09T18:16:16
| 2022-05-09T18:16:16
| 49,632,891
| 5
| 4
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,005
|
rd
|
rGibbsHurdle.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/HurdleDistributions.R
\name{rGibbsHurdle}
\alias{rGibbsHurdle}
\alias{cpp_rGibbsHurdle}
\title{Sample from a multivariate hurdle model}
\usage{
rGibbsHurdle(G, H, K, Nt, burnin = 500, thin = 0.1, tol = 5e-04, Nkeep = 500)
}
\arguments{
\item{G}{symmetric discrete interaction matrix}
\item{H}{unstructured location matrix}
\item{K}{Symmetric positive definite conditional precision matrix}
\item{Nt}{Number of unthinned samples, including burnin}
\item{burnin}{how many samples to discard from burn in}
\item{thin}{how many samples to thin}
\item{tol}{Numeric tolerance for zero}
\item{Nkeep}{(optional) number of samples, post-burnin and thinning}
}
\value{
matrix of (Nt-burnin)*thin samples
}
\description{
Sample from a multivariate hurdle model
}
\examples{
G = matrix(c(-15, 1, 0,
1, -15, 1.5,
0, 1.5, -15), nrow=3)
H = diag(5, nrow=3)
K = diag(1, nrow=3)
y = rGibbsHurdle(G, H, K, 2000, thin = .2, burnin=1000)
}
|
b3f3a7afad8645151e9a85221131b599882fb0ef
|
bf0f6a1900c953a9477967d91b4b68422ce926a7
|
/tests/testthat/test_to_ast.R
|
2a2bba77ad05931f38b9b3ffe82c641cb020148a
|
[] |
no_license
|
clarkfitzg/rstatic
|
95f183455580511115168e6962bc3668ad2a763d
|
906373cbbb241390aaa37a5d963fa7c0537f5aad
|
refs/heads/master
| 2021-01-16T00:00:08.242471
| 2017-07-31T01:06:13
| 2017-07-31T01:06:13
| 99,951,249
| 0
| 0
| null | 2017-08-10T17:59:13
| 2017-08-10T17:59:13
| null |
UTF-8
|
R
| false
| false
| 1,732
|
r
|
test_to_ast.R
|
context("toAST")
test_that("call args have correct parent", {
result = toAST( call("is.na", 42L) )
# -----
expect_is(result, "Call")
expect_is(result$args[[1]], "Integer")
expect_identical(result, result$args[[1]]$parent)
})
test_that("primitives are converted to Primitives", {
result = toAST(sum)
# -----
expect_is(result, "Primitive")
#expect_is(result$name, "Symbol")
pnames = names(formals(args(sum)))
expect_equal(names(result$params), pnames)
p1 = result$params[[1]]
expect_is(p1, "Parameter")
expect_equal(p1$default, NULL)
p2 = result$params[[2]]
expect_is(p2, "Parameter")
expect_equal(p2$default$value, FALSE)
})
test_that("functions are converted to Functions", {
result = toAST(ifelse)
# -----
expect_is(result, "Function")
pnames = names(formals(ifelse))
expect_equal(names(result$params), pnames)
p1 = result$params[[1]]
expect_is(p1, "Parameter")
expect_equal(p1$default, NULL)
p2 = result$params[[2]]
expect_is(p2, "Parameter")
expect_equal(p2$default, NULL)
p3 = result$params[[3]]
expect_is(p3, "Parameter")
expect_equal(p3$default, NULL)
})
test_that("functions with no params are converted to Functions", {
result = toAST(function() 42L)
# -----
expect_is(result, "Function")
expect_equal(length(result$params), 0)
})
test_that("function definitions are converted to Functions", {
code = quote(function(a, b = 3) 42L)
result = toAST(code)
# -----
expect_is(result, "Function")
expect_equal(names(result$params), c("a", "b"))
p1 = result$params[[1]]
expect_is(p1, "Parameter")
expect_equal(p1$default, NULL)
p2 = result$params[[2]]
expect_is(p2, "Parameter")
expect_equal(p2$default$value, 3)
})
|
c9e8bb407c760885198ac388fdc98ff2f41e48ef
|
091d556043c26004c97265787f83a09698df7e61
|
/inst/vosondash/ui/popovers.R
|
bd3e94abf417f83d5037d3974e61026f9ad0d78f
|
[] |
no_license
|
cran/VOSONDash
|
8b289008643072ab617e797b2f94b5e06e7beb1f
|
2146bce8bf765f33b512349f3d4a9c274a163d70
|
refs/heads/master
| 2021-07-23T22:20:35.176937
| 2020-07-27T12:20:02
| 2020-07-27T12:20:02
| 201,459,148
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,538
|
r
|
popovers.R
|
po_reseed_graph <- function() {
title <- "Re-seed Graph"
content <- "Generate a new random number to seed the graph layout."
list(title = title, content = content)
}
po_graph_layout <- function() {
title <- "Graph Layout Algorithms"
content <- paste0("Select a network layout for the graph.<br><br>",
"<code>FR</code> Fruchterman-Reingold<br>",
"<code>KK</code> Kamada-Kawai<br>",
"<code>DH</code> Davidson-Harel<br>",
"<code>LGL</code> Large Graph Layout<br>",
"<code>DrL</code> Distributed Recursive Layout<br>",
"<code>GEM</code> GEM Force-Directed Layout<br>",
"<code>MDS</code> Multidimensional Scaling Layout<br><br>",
"<i class = 'fa fa-book-reader'></i> ",
"<a href = 'https://igraph.org/c/doc/igraph-Layout.html' target = '_blank'>igraph Layouts</a>"
)
list(title = title, content = content)
}
po_cat_filter <- function() {
title <- "Categorical Filter"
content <- paste0("Categorical variables are identified by graph vertex attributes with names that begin with the ",
"prefix code <code>vosonCA_</code>. VOSON Dash does not provide an interface for adding ",
"these vertex attributes at this time, so they must be added to the graph in a seperate data ",
"coding process.<br><br>",
"When found these variables appear in the <code>Category</code> select list and can be used to ",
"filter graph vertices using the list of category values under <code>View</code>.")
list(title = title, content = content)
}
po_twit_query <- function() {
title <- "Twitter Search Query"
content <- paste0("A range of search operators and filters can be used in the <code>Search Query</code> input. ",
"Simple terms can also entered and used in conjunction with the <code>Additional Filters</code> ",
"provided.<br><br>",
"<i class = 'fa fa-book-reader'></i> ",
"<a href = 'https://developer.twitter.com/en/docs/tweets/rules-and-filtering/overview/standard-",
"operators' target = '_blank'>Twitter Standard Search Operators</a>"
)
list(title = title, content = content)
}
po_twit_id_range <- function() {
title <- "Twitter ID Range"
content <- paste0("Set the bounds of a search. <code>Since ID</code> requests the twitter API to return only ",
"tweets tweeted after a particular tweet or status ID. <code>Max ID</code> requests the return ",
"of only tweets tweeted before a tweet or status ID."
)
list(title = title, content = content)
}
po_twit_lang <- function() {
title <- "Tweet Language"
content <- paste0("Requests the twitter API to return tweets in the language entered as two character ",
"<code>ISO 639-1</code> code. Language detection is best-effort.<br><br>",
"<i class = 'fa fa-book-reader'></i> ",
"<a href = 'https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes'",
"target = '_blank'>ISO 639-1 Language Codes</a>"
)
list(title = title, content = content)
}
po_twit_results <- function() {
title <- "Search Results"
content <- paste0("Requests the twitter API to return tweets of the following result types.<br><br>",
"<code>Recent</code> Return only the most recent tweets in results<br>",
"<code>Mixed</code> Return mixture of both popular and real time tweets in results<br>",
"<code>Popular</code> Return only the most popular tweets in results"
)
list(title = title, content = content)
}
po_twit_assoc <- function() {
title <- "Concept Relations"
content <- paste0("<code>Limited</code> option includes only ties between most frequently ",
"occurring hashtags and terms. If unchecked the network will include ties between most ",
"frequently occurring hashtags and terms, hashtags and hashtags, and terms and terms."
)
list(title = title, content = content)
}
po_yt_url <- function() {
title <- "Youtube Video URL"
content <- paste0("Enter a Youtube video URL in either long or short format.<br><br>",
"<i class = 'fa fa-angle-right'></i> ",
"<code>https://www.youtube.com/watch?v=xxxxxxxxxxx</code><br>",
"<i class = 'fa fa-angle-right'></i> ",
"<code>https://youtu.be/xxxxxxxx</code>"
)
list(title = title, content = content)
}
po_red_url <- function() {
title <- "Reddit Thread URL"
content <- paste0("Enter a Reddit thread URL in the following format.<br><br>",
"<i class = 'fa fa-angle-right'></i> ",
"<code>https://www.reddit.com/r/xxxxxx/<br>comments/xxxxxx/x_xxxx_xxxx</code><br><br>",
"Collects maximum 500 comments per thread."
)
list(title = title, content = content)
}
po_web_auth <- function() {
title <- "Web Auth Token"
content <- paste0("This token creation method is interactive and will open a web browser tab to twitter asking you ",
"to login and authorize the twitter app. This allows the app to access the twitter API on your ",
"behalf.<br><br>",
"As <code>VOSONDash</code> only uses the twitter search API an app only requires minimal ",
"<code>read</code> access to the users twitter account.<br><br>",
"Note: Unfortunately a current side-effect of <code>aborting</code> or interupting / not ",
"completing the process is ending the <code>VOSONDash</code> session. Please save any work before ",
"continuing.",
"<br><br>",
"<i class = 'fa fa-book-reader'></i> ",
"<a href = 'https://developer.twitter.com/en/docs/basics/authentication/overview/oauth'",
"target = '_blank'>Twitter API: Application-user authentication</a>"
)
list(title = title, content = content)
}
|
05ef02fd8bf79290db1e7268a8b7d6cc982f858b
|
e0e1c770c793ffbad0ce09a1724adbc1534bcc94
|
/R/makePredictions.R
|
99205cb6dc6485ab4d52758cd97dc285af699af7
|
[] |
no_license
|
ctloftin/NCAATournament
|
955a6c14bb5c2d2bf5e71dfeb4e30683f738513c
|
91b8c72e77c27025263d34cf3ad5718b6015db28
|
refs/heads/master
| 2021-09-10T08:40:07.988384
| 2018-03-23T03:35:25
| 2018-03-23T03:35:25
| 124,843,958
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,676
|
r
|
makePredictions.R
|
#'@export
makePredictions <- function() {
results <- read.csv(file = "data/HistNCAATournamentResults.csv", header = T, stringsAsFactors = F)
results$GID <- c(1:nrow(results))
lmmodel <- create_model(fromSample = T, yearToTest = NULL)
test <- lmmodel[[2]]
lmmodel <- lmmodel[[1]]
# test <- get_kp_data(results %>% filter(Year == 2017))[[1]]
test$pred <- predict(lmmodel,newdata=test,type='response')
test$bpred <- ifelse(test$pred > 0.5, 1, 0)
test <- test[order(test$GID),]
gids <- test %>% group_by(GID) %>% summarise(gamec = sum(bpred), count = n()) %>%
filter(gamec != 1 & count == 2) %>% as.data.frame() %>% select(GID)
gids <- as.vector(gids$GID)
w <- which(test$GID %in% gids)
for(i in 1:length(gids)) {
if(test$pred[w[(2*i)-1]] > test$pred[w[2*i]]) {
test$bpred[w[2*i - 1]] <- 1
test$bpred[w[2*i]] <- 0
} else {
test$bpred[w[2*i - 1]] <- 0
test$bpred[w[2*i]] <- 1
}
}
test$Correct <- ifelse(test$Result == test$bpred, 1, 0)
plyr::count(test$Correct)
return(test)
# games <- data.frame()
# for(i in 1:length(unique(test$GID))) {
# t <- test %>% filter(GID == unique(test$GID)[i])
# t <- t[order(-t$bpred),]
# temp <- as.data.frame(matrix(ncol = 0, nrow = 1))
# temp$GID <- t$GID[1]
# temp$Year <- t$Year[1]
# temp$Round <- t$Round[1]
# temp$PWinner <- t$Team[1]
# temp$PLoser <- t$Team[2]
# temp$Winner <- t %>% filter(Result == 1) %>% select(Team) %>% unlist()
# temp$Loser <- t %>% filter(Result == 0) %>% select(Team) %>% unlist()
# temp$Correct <- ifelse(temp$PWinner == temp$Winner, 1, 0)
#
# games <- rbind(games, temp)
# }
}
|
5df34acbfda9bc1c4b952ae5d43ddbfea013d6dc
|
625407d36cc192d1df51593899bf7514ba9dc6ee
|
/plot3.R
|
aada53b42544418439b29ae0ef0ad9dd4ba6a9b4
|
[] |
no_license
|
SergeyBykov1/ExData_Plotting1
|
6dadb9a3fb342dd66ae9e6f2c9dd4048c717de93
|
16f248a8f822d887cc925e5acaf1daaf60e993e3
|
refs/heads/master
| 2021-01-22T14:46:16.190904
| 2014-10-15T02:28:17
| 2014-10-15T02:28:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 914
|
r
|
plot3.R
|
#
# Exploratory Data Analysis
# Course Project 1 Extension
# Plot 3
#
# needs data.table package for fast reading
require("data.table")
full_data <- fread('household_power_consumption.txt', na.strings="?")
target_dates <- as.Date(c("2007-02-01", "2007-02-02"))
target_data <- full_data[as.Date(full_data$Date, format='%d/%m/%Y') %in% target_dates]
datetime <- as.POSIXct(
paste(target_data$Date, target_data$Time), format = "%d/%m/%Y %T")
# construct png directly, as dev.copy results in cropped legend
png("plot3.png", width = 480, height = 480)
# build plot 3
plot(datetime, target_data$Sub_metering_1, type='l',
xlab = '', ylab = 'Energy sub metering')
lines(datetime, target_data$Sub_metering_2, type='l', col='red')
lines(datetime, target_data$Sub_metering_3, type='l', col='blue')
legend("topright", col = c("black","blue", "red"), lwd=1,
legend = colnames(target_data)[7:9])
dev.off()
|
40e0e2041bbbe6f4c99185f7b3b0ae445250441e
|
f3d74693d480724b8e594559ba2adb7a286d7ee6
|
/ar_ordered.R
|
ceaf72c7d72e38c5f6f9771f89c2fe38d0613c9f
|
[] |
no_license
|
deleaf/impois
|
b2a0a1823fb82528d7ce02dca91f0fc07fed28af
|
a88419a6be0dc82d01672c3f03179e3e76d40006
|
refs/heads/master
| 2020-03-09T08:23:20.346793
| 2018-04-28T21:38:49
| 2018-04-28T21:38:49
| 128,687,863
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 580
|
r
|
ar_ordered.R
|
# level gamma acceptance region from ordering of X's at fixed lambda and b
# r is the ranking function
ar.ordered <- function(lambda, b, gamma, r) {
X = 0:170
R = r(X, lambda, b)
# get sort indices
R.sort.idx = sort(R, decreasing=T, index.return=T)$ix
# probability masses
px.sort = dpois(X[R.sort.idx], lambda+b)
cumprob = cumsum(px.sort)
# first index with cumulative probability >= gamma
last.x.idx = min(which(cumprob >= gamma))
left = min(X[R.sort.idx[1:last.x.idx]])
right= max(X[R.sort.idx[1:last.x.idx]])
return(data.frame(lambda,left,right))
}
|
ed135f3b253e12fb6074bacd20370db7718ec44b
|
2a4957cf70afb6f0c8da2cf41365d7accf701558
|
/man/FSR_standalone.Rd
|
193cbf5740c8ddf1bf71652a104e01e56823392a
|
[
"MIT"
] |
permissive
|
klausfrieler/psyquest
|
85f4a9ac205cf3fc00baff845a1db55ed10161e6
|
3667bdf12f206eb7467482ec9945b2a6ff3ced80
|
refs/heads/master
| 2023-07-29T12:46:53.737338
| 2023-07-12T14:04:06
| 2023-07-12T14:04:06
| 364,350,973
| 1
| 0
|
NOASSERTION
| 2022-01-26T11:33:54
| 2021-05-04T18:32:36
|
R
|
UTF-8
|
R
| false
| true
| 971
|
rd
|
FSR_standalone.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/standalone.R
\name{FSR_standalone}
\alias{FSR_standalone}
\title{FSR Standalone}
\usage{
FSR_standalone(languages = psyquest::languages(), subscales = NULL, ...)
}
\arguments{
\item{languages}{(Character vector)
Determines the languages available to participants.
Possible languages include \code{"en"} (English), and \code{"de"} (German).
The first language is selected by default.}
\item{subscales}{(Character vector) The subscales to be included in the questionnaire.
Possible subscales are \code{"Absorption"}, \code{"Fluency of performance"}, \code{"Demands"},
\code{"Skills"}, \code{"Demand Fit"}
and \code{"Importance"}.
If no subscales are provided all subscales are selected.}
\item{...}{Further arguments to be passed to \code{\link{FSR}()}.}
}
\description{
This function launches a standalone testing session for the FSS questionnaire.
FSR stands for 'Theory of Intelligence'.
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.