blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c3084f99c581b9c7d5aa95b40ec4cdef6ade78b5
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/npbr/inst/doc/ex-npbr.R
|
b3a6a5d6c021530482a41d86181c060ab8f598fb
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 57,423
|
r
|
ex-npbr.R
|
### R code from vignette source 'ex-npbr.rnw'
### Encoding: ISO8859-1
###################################################
### code chunk number 1: ex-npbr.rnw:60-65
###################################################
owidth <- getOption("width")
options("width"=70)
ow <- getOption("warn")
options("warn"=-1)
.PngNo <- 0
###################################################
### code chunk number 2: bfig (eval = FALSE)
###################################################
## .PngNo <- .PngNo + 1; name.file <- paste("Fig-bitmap-", .PngNo, ".pdf", sep="")
## pdf(file=name.file, width = 9, height = 7, pointsize = 14, bg = "white")
###################################################
### code chunk number 3: bfig2 (eval = FALSE)
###################################################
## .PngNo <- .PngNo + 1; name.file <- paste("Fig-bitmap-", .PngNo, ".pdf", sep="")
## pdf(file=name.file, width = 14, height = 7, pointsize = 14, bg = "white")
###################################################
### code chunk number 4: bfig3 (eval = FALSE)
###################################################
## .PngNo <- .PngNo + 1; name.file <- paste("Fig-bitmap-", .PngNo, ".pdf", sep="")
## pdf(file=name.file, width = 7, height = 7, pointsize = 14, bg = "white")
###################################################
### code chunk number 5: bfig4 (eval = FALSE)
###################################################
## .PngNo <- .PngNo + 1; name.file <- paste("Fig-bitmap-", .PngNo, ".pdf", sep="")
## pdf(file=name.file, width = 18, height = 7, pointsize = 14, bg = "white")
###################################################
### code chunk number 6: bfig5 (eval = FALSE)
###################################################
## .PngNo <- .PngNo + 1; name.file <- paste("Fig-bitmap-", .PngNo, ".pdf", sep="")
## pdf(file=name.file, width = 18, height = 14, pointsize = 14, bg = "white")
###################################################
### code chunk number 7: zfig (eval = FALSE)
###################################################
## dev.null <- dev.off()
## cat("\\includegraphics[width=0.75\\textwidth]{", name.file, "}\n\n", sep="")
###################################################
### code chunk number 8: zfig2 (eval = FALSE)
###################################################
## dev.null <- dev.off()
## cat("\\includegraphics[width=0.9\\textwidth]{", name.file, "}\n\n", sep="")
###################################################
### code chunk number 9: zfig3 (eval = FALSE)
###################################################
## dev.null <- dev.off()
## cat("\\includegraphics[width=0.8\\textwidth]{", name.file, "}\n\n", sep="")
###################################################
### code chunk number 10: ex-npbr.rnw:357-363 (eval = FALSE)
###################################################
## require("npbr")
## data("records")
## data("nuclear")
## data("air")
## data("post")
## data("green")
###################################################
### code chunk number 11: ex-npbr.rnw:366-375 (eval = FALSE)
###################################################
## plot(result~year, data=records, col='blue2',
## xlab="year", ylab="1500m record")
## plot(ytab~xtab, data=nuclear, col='blue2',
## xlab="temp. of the reactor vessel", ylab="fracture toughness")
## plot(ytab~xtab, data=air, col='blue2',
## xlab="input", ylab="output")
## plot(yprod~xinput, data=post, col='blue2',
## xlab="quantity of labor", ylab="volume of delivered mail")
## plot(log(OUTPUT)~log(COST), data=green, col='blue2')
###################################################
### code chunk number 12: ex-npbr.rnw:380-394 (eval = FALSE)
###################################################
## .PngNo <- .PngNo + 1; name.file <- paste("Fig-bitmap-", .PngNo, ".pdf", sep="")
## pdf(file=name.file, width = 18, height = 14, pointsize = 14, bg = "white")
## op<-par(mfrow=c(2,3),mar=c(3,3.1,2.1,2.1),mgp=c(2,.4,0),oma=c(0,0,0,0),
## cex.lab=1.2, cex.main=1, col.main="blue")
## plot(result~year, data=records, col='blue2', pch=1,
## xlab="year", ylab="1500m record", main="(a)")
## plot(ytab~xtab, data=nuclear, pch=1,col='blue2',
## xlab="temp. of the reactor vessel", ylab="fracture toughness", main="(b)")
## plot(ytab~xtab, data=air, pch=1,col='blue2',
## xlab="input", ylab="output", main="(c)")
## plot(yprod~xinput, data=post, pch=1, col='blue2',
## xlab="quantity of labor", ylab="volume of delivered mail", main="(d)")
## plot(log(OUTPUT)~log(COST), data=green, pch=1,col='blue2', main="(e)")
## par(op)
## dev.null <- dev.off()
## cat("\\includegraphics[width=0.9\\textwidth]{", name.file, "}\n\n", sep="")
###################################################
### code chunk number 13: ex-npbr.rnw:465-468 (eval = FALSE)
###################################################
## x.air <- seq(min(air$xtab), max(air$xtab), length.out=101)
## x.green <- seq(min(log(green$COST)), max(log(green$COST)),
## length.out=101)
###################################################
### code chunk number 14: ex-npbr.rnw:471-481 (eval = FALSE)
###################################################
## y.dea.green<-dea_est(log(green$COST), log(green$OUTPUT),
## x.green, type="dea")
## y.fdh.green<-dea_est(log(green$COST), log(green$OUTPUT),
## x.green, type="fdh")
## y.lfdh.green=dea_est(log(green$COST), log(green$OUTPUT),
## x.green, type="lfdh")
##
## y.dea.air<-dea_est(air$xtab, air$ytab, x.air, type="dea")
## y.fdh.air<-dea_est(air$xtab, air$ytab, x.air, type="fdh")
## y.lfdh.air=dea_est(air$xtab, air$ytab, x.air, type="lfdh")
###################################################
### code chunk number 15: ex-npbr.rnw:485-500 (eval = FALSE)
###################################################
## plot(x.green, y.dea.green, lty=4, lwd=4, col="cyan",
## type="l", xlab="log(cost)",ylab="log(output)")
## lines(x.green, y.fdh.green, lty=1, lwd=4, col="green")
## lines(x.green, y.lfdh.green, lty=2, lwd=4, col="magenta")
## legend("topleft", legend=c("DEA","FDH","LFDH"),
## col=c("cyan","green","magenta"), lty=c(4,1,2), lwd=4)
## points(log(OUTPUT)~log(COST), data=green, cex=1)
##
## plot(x.air, y.dea.air, lty=4, lwd=4, col="cyan",
## type="l", xlab="input",ylab="output")
## lines(x.air, y.fdh.air, lty=1, lwd=4, col="green")
## lines(x.air, y.lfdh.air, lty=2, lwd=4, col="magenta")
## legend("topleft", legend=c("DEA","FDH","LFDH"),
## col=c("cyan","green","magenta"), lty=c(4,1,2), lwd=4)
## points(ytab~xtab, data=air)
###################################################
### code chunk number 16: ex-npbr.rnw:504-523 (eval = FALSE)
###################################################
## .PngNo <- .PngNo + 1; name.file <- paste("Fig-bitmap-", .PngNo, ".pdf", sep="")
## pdf(file=name.file, width = 14, height = 7, pointsize = 14, bg = "white")
## op=par(mfrow=c(1,2),mar=c(3,3.1,2.1,2.1),mgp=c(2,.4,0),oma=c(0,0,0,0),cex.lab=1.2)
## plot(x.green, y.dea.green, lty=4, lwd=4, col="cyan",
## type="l", xlab="log(cost)",ylab="log(output)")
## lines(x.green, y.fdh.green, lty=1, lwd=4, col="green")
## lines(x.green, y.lfdh.green, lty=2, lwd=4, col="magenta")
## legend("topleft", legend=c("DEA","FDH","LFDH"),
## col=c("cyan","green","magenta"), lty=c(4,1,2), lwd=4)
## points(log(OUTPUT)~log(COST), data=green, cex=1)
##
## plot(x.air, y.dea.air, lty=4, lwd=4, col="cyan",
## type="l", xlab="input",ylab="output")
## lines(x.air, y.fdh.air, lty=1, lwd=4, col="green")
## lines(x.air, y.lfdh.air, lty=2, lwd=4, col="magenta")
## legend("topleft", legend=c("DEA","FDH","LFDH"),
## col=c("cyan","green","magenta"), lty=c(4,1,2), lwd=4)
## points(ytab~xtab, data=air)
## par(op)
## dev.null <- dev.off()
## cat("\\includegraphics[width=0.9\\textwidth]{", name.file, "}\n\n", sep="")
###################################################
### code chunk number 17: ex-npbr.rnw:562-567 (eval = FALSE)
###################################################
## (p.aic.records<-poly_degree(records$year, 1/records$result, prange=0:12,
## type = "AIC"))
## (p.aic.air<-poly_degree(air$xtab, air$ytab,
## type = "AIC"))
## (p.aic.nuc<-poly_degree(nuclear$xtab, nuclear$ytab, type = "AIC"))
###################################################
### code chunk number 18: ex-npbr.rnw:571-580 (eval = FALSE)
###################################################
## x.records<-seq(min(records$year), max(records$year), length.out=101)
## y.poly.records<-poly_est(records$year, 1/records$result, x.records,
## deg=p.aic.records)
## y.poly.air<-poly_est(air$xtab, air$ytab, x.air,
## deg=p.aic.air)
## x.nucl <- seq(min(nuclear$xtab), max(nuclear$xtab),
## length.out=101)
## y.poly.nuc<-poly_est(nuclear$xtab, nuclear$ytab, x.nucl,
## deg=p.aic.nuc)
###################################################
### code chunk number 19: ex-npbr.rnw:586-599 (eval = FALSE)
###################################################
## plot(x.records, 1/y.poly.records, lty=1, lwd=4,
## col="magenta", type="l")
## points(result~year, data=records)
## plot(x.air, y.poly.air, lty=1, lwd=4,
## col="magenta", type="l")
## points(ytab~xtab, data=air)
## legend("topleft",legend=paste("degree =",p.aic.air),
## col="magenta", lwd=4, lty=1)
## plot(x.nucl, y.poly.nuc, lty=1, lwd=4,
## col="cyan", type="l", ylim=range(nuclear$ytab))
## points(ytab~xtab, data=nuclear)
## legend("topleft",legend=paste("degree =",p.aic.nuc),
## col="cyan", lwd=4, lty=1)
###################################################
### code chunk number 20: ex-npbr.rnw:603-622 (eval = FALSE)
###################################################
## .PngNo <- .PngNo + 1; name.file <- paste("Fig-bitmap-", .PngNo, ".pdf", sep="")
## pdf(file=name.file, width = 18, height = 7, pointsize = 14, bg = "white")
## op=par(mfrow=c(1,3),mar=c(3,3.1,2.1,2.1),mgp=c(2,.4,0),oma=c(0,0,0,0),cex.lab=1.2)
## plot(x.records, 1/y.poly.records, lty=1, lwd=4,
## col="green", type="l", xlab="year", ylab="1500m record")
## points(result~year, data=records)
## legend("topleft",legend=paste("degree =",p.aic.records),
## col="green", lwd=4, lty=1)
## plot(x.air, y.poly.air, lty=1, lwd=4,
## col="magenta", type="l", xlab="input", ylab="output")
## points(ytab~xtab, data=air)
## legend("topleft",legend=paste("degree =",p.aic.air),
## col="magenta", lwd=4, lty=1)
## plot(x.nucl, y.poly.nuc, lty=1, lwd=4,
## col="cyan", type="l", ylim=range(nuclear$ytab), xlab="temperature", ylab="toughness")
## points(ytab~xtab, data=nuclear)
## legend("topleft",legend=paste("degree =",p.aic.nuc),
## col="cyan", lwd=4, lty=1)
## par(op)
## dev.null <- dev.off()
## cat("\\includegraphics[width=0.9\\textwidth]{", name.file, "}\n\n", sep="")
###################################################
### code chunk number 21: ex-npbr.rnw:726-730 (eval = FALSE)
###################################################
## (kn.bic.air.u<-quad_spline_kn(air$xtab,
## air$ytab, method="u", type="BIC"))
## (kn.bic.green.u<-quad_spline_kn(log(green$COST),
## log(green$OUTPUT), method="u", type="BIC"))
###################################################
### code chunk number 22: ex-npbr.rnw:734-738 (eval = FALSE)
###################################################
## y.quad.air.u<-quad_spline_est(air$xtab,
## air$ytab, x.air, kn=kn.bic.air.u, method="u")
## y.quad.green.u<-quad_spline_est(log(green$COST),
## log(green$OUTPUT), x.green, kn=kn.bic.green.u, method="u")
###################################################
### code chunk number 23: ex-npbr.rnw:741-745 (eval = FALSE)
###################################################
## (kn.bic.air.m<-quad_spline_kn(air$xtab,
## air$ytab, method="m", type="BIC"))
## (kn.bic.green.m<-quad_spline_kn(log(green$COST),
## log(green$OUTPUT), method="m", type="BIC"))
###################################################
### code chunk number 24: ex-npbr.rnw:749-753 (eval = FALSE)
###################################################
## y.quad.air.m<-quad_spline_est(air$xtab,
## air$ytab, x.air, kn=kn.bic.air.m, method="m")
## y.quad.green.m<-quad_spline_est(log(green$COST),
## log(green$OUTPUT), x.green, kn=kn.bic.green.m, method="m")
###################################################
### code chunk number 25: ex-npbr.rnw:758-762 (eval = FALSE)
###################################################
## (kn.bic.air.mc<-quad_spline_kn(air$xtab,
## air$ytab, method="mc", type="BIC"))
## (kn.bic.green.mc<-quad_spline_kn(log(green$COST),
## log(green$OUTPUT), method="mc", type="BIC"))
###################################################
### code chunk number 26: ex-npbr.rnw:766-771 (eval = FALSE)
###################################################
## y.quad.air.mc<-quad_spline_est(air$xtab, air$ytab, x.air,
## kn=kn.bic.air.mc, method="mc", all.dea=TRUE)
## y.quad.green.mc<-quad_spline_est(log(green$COST),
## log(green$OUTPUT), x.green, kn=kn.bic.green.mc,
## method="mc", all.dea=TRUE)
###################################################
### code chunk number 27: ex-npbr.rnw:776-792 (eval = FALSE)
###################################################
## plot(x.air, y.quad.air.u, lty=1, lwd=4, col="green",
## type="l", xlab="input", ylab="output")
## lines(x.air, y.quad.air.m, lty=2, lwd=4, col="cyan")
## lines(x.air, y.quad.air.mc, lty=3, lwd=4, col="magenta")
## points(ytab~xtab, data=air)
## legend("topleft", col=c("green","cyan","magenta"),
## lty=c(1,2,3), legend=c("unconstrained", "monotone",
## "monotone + concave"), lwd=4, cex=0.8)
## plot(x.green, y.quad.green.u, lty=1, lwd=4, col="green",
## type="l", xlab="log(COST)", ylab="log(OUTPUT)")
## lines(x.green, y.quad.green.m, lty=2, lwd=4, col="cyan")
## lines(x.green, y.quad.green.mc, lwd=4, lty=3, col="magenta")
## points(log(OUTPUT)~log(COST), data=green)
## legend("topleft", col=c("green","cyan","magenta"),
## lty=c(1,2,3), legend=c("unconstrained", "monotone",
## "monotone + concave"), lwd=4, cex=0.8)
###################################################
### code chunk number 28: ex-npbr.rnw:796-816 (eval = FALSE)
###################################################
## .PngNo <- .PngNo + 1; name.file <- paste("Fig-bitmap-", .PngNo, ".pdf", sep="")
## pdf(file=name.file, width = 14, height = 7, pointsize = 14, bg = "white")
## op=par(mfrow=c(1,2),mar=c(3,3.1,2.1,2.1),mgp=c(2,.4,0),oma=c(0,0,0,0),cex.lab=1.2)
## plot(x.air, y.quad.air.u, lty=1, lwd=4, col="green",
## type="l", xlab="input", ylab="output")
## lines(x.air, y.quad.air.m, lty=2, lwd=4, col="cyan")
## lines(x.air, y.quad.air.mc, lty=3, lwd=4, col="magenta")
## points(ytab~xtab, data=air)
## legend("topleft", col=c("green","cyan","magenta"),
## lty=c(1,2,3), legend=c("unconstrained", "monotone",
## "monotone + concave"), lwd=4, cex=0.8)
## plot(x.green, y.quad.green.u, lty=1, lwd=4, col="green",
## type="l", xlab="log(COST)", ylab="log(OUTPUT)")
## lines(x.green, y.quad.green.m, lty=2, lwd=4, col="cyan")
## lines(x.green, y.quad.green.mc, lwd=4, lty=3, col="magenta")
## points(log(OUTPUT)~log(COST), data=green)
## legend("topleft", col=c("green","cyan","magenta"),
## lty=c(1,2,3), legend=c("unconstrained", "monotone",
## "monotone + concave"), lwd=4, cex=0.8)
## par(op)
## dev.null <- dev.off()
## cat("\\includegraphics[width=0.9\\textwidth]{", name.file, "}\n\n", sep="")
###################################################
### code chunk number 29: ex-npbr.rnw:851-863 (eval = FALSE)
###################################################
## (kn.bic.air.u<-cub_spline_kn(air$xtab, air$ytab,
## method="u", type="BIC"))
## (kn.bic.green.u<-cub_spline_kn(log(green$COST),
## log(green$OUTPUT), method="u", type="BIC"))
## (kn.bic.air.m<-cub_spline_kn(air$xtab, air$ytab,
## method="m", type="BIC"))
## (kn.bic.green.m<-cub_spline_kn(log(green$COST),
## log(green$OUTPUT), method="m", type="BIC"))
## (kn.bic.air.mc<-cub_spline_kn(air$xtab, air$ytab,
## method="mc", type="BIC"))
## (kn.bic.green.mc<-cub_spline_kn(log(green$COST),
## log(green$OUTPUT), method="mc", type="BIC"))
###################################################
### code chunk number 30: ex-npbr.rnw:867-879 (eval = FALSE)
###################################################
## y.cub.air.u<-cub_spline_est(air$xtab, air$ytab,
## x.air, kn=kn.bic.air.u, method="u")
## y.cub.green.u<-cub_spline_est(log(green$COST),
## log(green$OUTPUT),x.green,kn=kn.bic.green.u,method="u")
## y.cub.air.m<-cub_spline_est(air$xtab, air$ytab,
## x.air, kn=kn.bic.air.m, method="m")
## y.cub.green.m<-cub_spline_est(log(green$COST),
## log(green$OUTPUT),x.green,kn=kn.bic.green.m,method="m")
## y.cub.air.mc<-cub_spline_est(air$xtab, air$ytab,
## x.air, kn=kn.bic.air.mc, method="mc")
## y.cub.green.mc<-cub_spline_est(log(green$COST),
## log(green$OUTPUT),x.green,kn=kn.bic.green.mc,method="mc")
###################################################
### code chunk number 31: ex-npbr.rnw:883-899 (eval = FALSE)
###################################################
## plot(x.air, y.cub.air.u, lty=1, lwd=4, col="green",
## type="l", xlab="input", ylab="output")
## lines(x.air, y.cub.air.m, lty=2, lwd=4, col="cyan")
## lines(x.air, y.cub.air.mc, lty=3, lwd=4, col="magenta")
## points(ytab~xtab, data=air)
## legend("topleft", col=c("green", "cyan","magenta"),
## lty=c(1,2,3), legend=c("unconstrained", "monotone",
## "monotone+concave"), lwd=4, cex=0.8)
## plot(x.green, y.cub.green.u, lty=1, lwd=4, col="green",
## type="l", xlab="log(COST)", ylab="log(OUTPUT)")
## lines(x.green, y.cub.green.m, lty=2, lwd=4, col="cyan")
## lines(x.green, y.cub.green.mc, lty=3, lwd=4, col="magenta")
## points(log(OUTPUT)~log(COST), data=green)
## legend("topleft", col=c("green","cyan","magenta"),
## lty=c(1,2,3), legend=c("unconstrained", "monotone",
## "monotone+concave"), lwd=4, cex=0.8)
###################################################
### code chunk number 32: ex-npbr.rnw:903-923 (eval = FALSE)
###################################################
## .PngNo <- .PngNo + 1; name.file <- paste("Fig-bitmap-", .PngNo, ".pdf", sep="")
## pdf(file=name.file, width = 14, height = 7, pointsize = 14, bg = "white")
## op=par(mfrow=c(1,2),mar=c(3,3.1,2.1,2.1),mgp=c(2,.4,0),oma=c(0,0,0,0),cex.lab=1.2)
## plot(x.air, y.cub.air.u, lty=1, lwd=4, col="green",
## type="l", xlab="input", ylab="output")
## lines(x.air, y.cub.air.m, lty=2, lwd=4, col="cyan")
## lines(x.air, y.cub.air.mc, lty=3, lwd=4, col="magenta")
## points(ytab~xtab, data=air)
## legend("topleft", col=c("green", "cyan","magenta"),
## lty=c(1,2,3), legend=c("unconstrained", "monotone",
## "monotone+concave"), lwd=4, cex=0.8)
## plot(x.green, y.cub.green.u, lty=1, lwd=4, col="green",
## type="l", xlab="log(COST)", ylab="log(OUTPUT)")
## lines(x.green, y.cub.green.m, lty=2, lwd=4, col="cyan")
## lines(x.green, y.cub.green.mc, lty=3, lwd=4, col="magenta")
## points(log(OUTPUT)~log(COST), data=green)
## legend("topleft", col=c("green","cyan","magenta"),
## lty=c(1,2,3), legend=c("unconstrained", "monotone",
## "monotone+concave"), lwd=4, cex=0.8)
## par(op)
## dev.null <- dev.off()
## cat("\\includegraphics[width=0.9\\textwidth]{", name.file, "}\n\n", sep="")
###################################################
### code chunk number 33: ex-npbr.rnw:970-972 (eval = FALSE)
###################################################
## h.records.u<- loc_est_bw(records$year, 1/records$result,
## x.records, h=2, B=100, method="u")
###################################################
### code chunk number 34: ex-npbr.rnw:974-975 (eval = FALSE)
###################################################
## (h.records.u<-22.5)
###################################################
### code chunk number 35: ex-npbr.rnw:977-979 (eval = FALSE)
###################################################
## h.air.u<- loc_est_bw(air$xtab, air$ytab, x.air,
## h=2, B=100, method="u")
###################################################
### code chunk number 36: ex-npbr.rnw:981-982 (eval = FALSE)
###################################################
## (h.air.u<-3.612396)
###################################################
### code chunk number 37: ex-npbr.rnw:984-986 (eval = FALSE)
###################################################
## h.air.m<- loc_est_bw(air$xtab, air$ytab, x.air,
## h=2, B=100, method="m")
###################################################
### code chunk number 38: ex-npbr.rnw:988-989 (eval = FALSE)
###################################################
## (h.air.m<-3.638097)
###################################################
### code chunk number 39: ex-npbr.rnw:991-993 (eval = FALSE)
###################################################
## h.nucl.u <- loc_est_bw(nuclear$xtab, nuclear$ytab,
## x.nucl, h=40, B=100, method="u")
###################################################
### code chunk number 40: ex-npbr.rnw:995-996 (eval = FALSE)
###################################################
## (h.nucl.u<-79.11877)
###################################################
### code chunk number 41: ex-npbr.rnw:998-1000 (eval = FALSE)
###################################################
## h.nucl.m <- loc_est_bw(nuclear$xtab, nuclear$ytab,
## x.nucl, h=40, B=100, method="m")
###################################################
### code chunk number 42: ex-npbr.rnw:1002-1003 (eval = FALSE)
###################################################
## (h.nucl.m<-79.12)
###################################################
### code chunk number 43: ex-npbr.rnw:1007-1017 (eval = FALSE)
###################################################
## y.records.u<-loc_est(records$year, 1/records$result,
## x.records, h=h.records.u, method="u")
## y.air.u<-loc_est(air$xtab, air$ytab, x.air, h=h.air.u,
## method="u")
## y.air.m<-loc_est(air$xtab, air$ytab, x.air, h=h.air.m,
## method="m")
## y.nucl.u<-loc_est(nuclear$xtab, nuclear$ytab, x.nucl,
## h=h.nucl.u, method="u")
## y.nucl.m<-loc_est(nuclear$xtab, nuclear$ytab, x.nucl,
## h=h.nucl.m, method="m")
###################################################
### code chunk number 44: ex-npbr.rnw:1021-1038 (eval = FALSE)
###################################################
## plot(x.records, 1/y.records.u, lty=1, lwd=4,
## col="magenta", type="l")
## points(result~year, data=records)
## legend("topright",legend="unconstrained", col="magenta",
## lwd=4, lty=1)
##
## plot(x.air, y.air.u, lty=1, lwd=4, col="magenta", type="l")
## lines(x.air, y.air.m, lty=2, lwd=4, col="cyan")
## points(ytab~xtab, data=air)
## legend("topleft",legend=c("unconstrained", "improved"),
## col=c("magenta","cyan"), lwd=4, lty=c(1,2))
##
## plot(x.nucl, y.nucl.u, lty=1, lwd=4, col="magenta", type="l")
## lines(x.nucl, y.nucl.m, lty=2, lwd=4, col="cyan")
## points(ytab~xtab, data=nuclear)
## legend("topleft",legend=c("unconstrained", "improved"),
## col=c("magenta","cyan"), lwd=4, lty=c(1,2))
###################################################
### code chunk number 45: ex-npbr.rnw:1042-1061 (eval = FALSE)
###################################################
## .PngNo <- .PngNo + 1; name.file <- paste("Fig-bitmap-", .PngNo, ".pdf", sep="")
## pdf(file=name.file, width = 18, height = 7, pointsize = 14, bg = "white")
## op=par(mfrow=c(1,3),mar=c(3,3.1,2.1,2.1),mgp=c(2,.4,0),oma=c(0,0,0,0),cex.lab=1.2)
## plot(x.records, 1/y.records.u, lty=1, lwd=4, col="magenta", type="l", xlab="year", ylab="1500m record")
## points(result~year, data=records)
## legend("topright",legend="unconstrained", col="magenta", lwd=4, lty=1)
##
## plot(x.air, y.air.u, lty=1, lwd=4, col="magenta", type="l", xlab="input", ylab="output")
## lines(x.air, y.air.m, lty=2, lwd=4, col="cyan")
## points(ytab~xtab, data=air)
## legend("topleft",legend=c("unconstrained", "improved"),
## col=c("magenta","cyan"), lwd=4, lty=c(1,2))
##
## plot(x.nucl, y.nucl.u, lty=1, lwd=4, col="magenta", type="l", ylim=range(nuclear$ytab), xlab="temperature", ylab="toughness")
## lines(x.nucl, y.nucl.m, lty=2, lwd=4, col="cyan")
## points(ytab~xtab, data=nuclear)
## legend("topleft",legend=c("unconstrained", "improved"),
## col=c("magenta","cyan"), lwd=4, lty=c(1,2))
## par(op)
## dev.null <- dev.off()
## cat("\\includegraphics[width=0.9\\textwidth]{", name.file, "}\n\n", sep="")
###################################################
### code chunk number 46: ex-npbr.rnw:1099-1103 (eval = FALSE)
###################################################
## loc_max_1stage<-loc_max(log(green$COST), log(green$OUTPUT),
## x.green, h=0.5, type="one-stage")
## loc_max_2stage<-loc_max(log(green$COST), log(green$OUTPUT),
## x.green, h=0.5, type="two-stage")
###################################################
### code chunk number 47: ex-npbr.rnw:1112-1116 (eval = FALSE)
###################################################
## require("np")
## bw <- npcdistbw(log(OUTPUT)~log(COST), data=green,
## cykertype = "uniform", bwtype="adaptive_nn")$xbw
## (h.opt<-max(bw, max(diff(sort(log(green$COST))))/2))
###################################################
### code chunk number 48: ex-npbr.rnw:1118-1119 (eval = FALSE)
###################################################
## (h.opt=0.4152283)
###################################################
### code chunk number 49: ex-npbr.rnw:1127-1131 (eval = FALSE)
###################################################
## loc_max_1stage.opt<-loc_max(log(green$COST), log(green$OUTPUT),
## x.green, h=h.opt, type="one-stage")
## loc_max_2stage.opt<-loc_max(log(green$COST), log(green$OUTPUT),
## x.green, h=h.opt, type="two-stage")
###################################################
### code chunk number 50: ex-npbr.rnw:1134-1144 (eval = FALSE)
###################################################
## plot(log(OUTPUT)~log(COST), data=green)
## lines(x.green, loc_max_1stage, lty=1, col="magenta")
## lines(x.green, loc_max_2stage, lty=2, col="cyan")
## legend("topleft",legend=c("one-stage", "two-stage"),
## col=c("magenta","cyan"), lty=c(1,2))
## plot(log(OUTPUT)~log(COST), data=green)
## lines(x.green, loc_max_1stage.opt, lty=1, col="magenta")
## lines(x.green, loc_max_2stage.opt, lty=2, col="cyan")
## legend("topleft",legend=c("one-stage", "two-stage"),
## col=c("magenta","cyan"), lty=c(1,2))
###################################################
### code chunk number 51: ex-npbr.rnw:1149-1163 (eval = FALSE)
###################################################
## .PngNo <- .PngNo + 1; name.file <- paste("Fig-bitmap-", .PngNo, ".pdf", sep="")
## pdf(file=name.file, width = 14, height = 7, pointsize = 14, bg = "white")
## op=par(mfrow=c(1,2),mar=c(3,3.1,2.1,2.1),mgp=c(2,.4,0),oma=c(0,0,0,0),cex.lab=1.2)
## plot(log(OUTPUT)~log(COST), data=green, main="Peng and Gijbels choice")
## lines(x.green, loc_max_1stage, lty=1, lwd=2, col="magenta")
## lines(x.green, loc_max_2stage, lty=2, lwd=2, col="cyan")
## legend("topleft",legend=c("one-stage", "two-stage"),
## col=c("magenta","cyan"), lty=c(1,2),lwd=2)
## plot(log(OUTPUT)~log(COST), data=green, main="Automatic selection")
## lines(x.green, loc_max_1stage.opt, lty=1, lwd=2,col="magenta")
## lines(x.green, loc_max_2stage.opt, lty=2, lwd=2,col="cyan")
## legend("topleft",legend=c("one-stage", "two-stage"),
## col=c("magenta","cyan"), lty=c(1,2),lwd=2)
## par(op)
## dev.null <- dev.off()
## cat("\\includegraphics[width=0.9\\textwidth]{", name.file, "}\n\n", sep="")
###################################################
### code chunk number 52: ex-npbr.rnw:1291-1294 (eval = FALSE)
###################################################
## require("npbr")
## require("np")
## data("green")
###################################################
### code chunk number 53: ex-npbr.rnw:1297-1302 (eval = FALSE)
###################################################
## require("np")
## data("green")
## (h.pr.green.m<-kern_smooth_bw(log(green$COST),
## log(green$OUTPUT), method="m", technique="pr",
## bw_method="cv"))
###################################################
### code chunk number 54: ex-npbr.rnw:1304-1305 (eval = FALSE)
###################################################
## (h.pr.green.m<-0.8304566)
###################################################
### code chunk number 55: ex-npbr.rnw:1307-1310 (eval = FALSE)
###################################################
## (h.noh.green.m<-kern_smooth_bw(log(green$COST),
## log(green$OUTPUT), method="m", technique="noh",
## bw_method="bic"))
###################################################
### code chunk number 56: ex-npbr.rnw:1315-1323 (eval = FALSE)
###################################################
## x.green <- seq(min(log(green$COST)), max(log(green$COST)),
## length.out=101)
## y.pr.green.m<-kern_smooth(log(green$COST),
## log(green$OUTPUT), x.green, h=h.pr.green.m,
## method="m", technique="pr")
## y.noh.green.m<-kern_smooth(log(green$COST),
## log(green$OUTPUT), x.green, h=h.noh.green.m,
## method="m", technique="noh")
###################################################
### code chunk number 57: ex-npbr.rnw:1327-1333 (eval = FALSE)
###################################################
## plot(log(OUTPUT)~log(COST), data=green, xlab="log(COST)",
## ylab="log(OUTPUT)")
## lines(x.green, y.pr.green.m, lwd=4, lty=3, col="red")
## lines(x.green, y.noh.green.m, lwd=4, lty=3, col="blue")
## legend("topleft", col=c("blue","red"),
## lty=3, legend=c("noh","pr"), lwd=4, cex=0.8)
###################################################
### code chunk number 58: ex-npbr.rnw:1337-1347 (eval = FALSE)
###################################################
## .PngNo <- .PngNo + 1; name.file <- paste("Fig-bitmap-", .PngNo, ".pdf", sep="")
## pdf(file=name.file, width = 14, height = 7, pointsize = 14, bg = "white")
## #op=par(mfrow=c(1,2),mar=c(3,3.1,2.1,2.1),mgp=c(2,.4,0),oma=c(0,0,0,0),cex.lab=1.2)
## plot(log(OUTPUT)~log(COST), data=green, xlab="log(COST)",
## ylab="log(OUTPUT)")
## lines(x.green, y.pr.green.m, lwd=4, lty=3, col="red")
## lines(x.green, y.noh.green.m, lwd=4, lty=3, col="blue")
## legend("topleft", col=c("blue","red"),
## lty=3, legend=c("noh","pr"), lwd=4, cex=0.8)
## #par(op)
## dev.null <- dev.off()
## cat("\\includegraphics[width=0.8\\textwidth]{", name.file, "}\n\n", sep="")
###################################################
### code chunk number 59: ex-npbr.rnw:1430-1432 (eval = FALSE)
###################################################
## x.post<- seq(post$xinput[100],max(post$xinput),
## length.out=100)
###################################################
### code chunk number 60: ex-npbr.rnw:1435-1436 (eval = FALSE)
###################################################
## rho<-2
###################################################
### code chunk number 61: ex-npbr.rnw:1439-1441 (eval = FALSE)
###################################################
## best_kn.1<-kopt_momt_pick(post$xinput, post$yprod,
## x.post, rho=rho)
###################################################
### code chunk number 62: ex-npbr.rnw:1444-1446 (eval = FALSE)
###################################################
## rho_momt<-rho_momt_pick(post$xinput, post$yprod,
## x.post, method="moment")
###################################################
### code chunk number 63: ex-npbr.rnw:1449-1451 (eval = FALSE)
###################################################
## best_kn.2<-kopt_momt_pick(post$xinput, post$yprod,
## x.post, rho=rho_momt)
###################################################
### code chunk number 64: ex-npbr.rnw:1453-1469 (eval = FALSE)
###################################################
## rho_momt<-c(1.993711,2.360920,2.245450,3.770526,2.724960,3.667846,4.026203,
## 2.281109,1.363260,1.150343,2.567832,2.228400,3.106491,2.592477,
## 2.233479,2.040209,1.916878,1.494831,1.961430,1.930942,1.927990,
## 1.833530,1.808632,1.758135,1.717626,1.686540,1.707200,1.711357,
## 1.720839,1.704845,1.678985,1.686872,1.686907,1.747732,1.741290,
## 1.792388,1.805144,1.855829,1.919817,1.929348,2.046588,2.135351,
## 2.196834,2.224797,2.221043,2.290578,2.390179,2.042884,2.087287,
## 2.158198,2.173314,2.260872,2.311427,1.865147,1.874019,1.913673,
## 1.922869,1.918484,1.949220,1.961016,1.998101,2.023605,2.041663,
## 2.067775,2.088982,2.107949,2.152688,2.170959,1.283350,1.285458,
## 1.295437,1.296902,1.316896,1.331668,1.330163,1.339701,1.322501,
## 1.326488,1.373837,1.392537,1.419458,1.426513,1.448544,1.473716,
## 1.517720,1.549229,1.561259,1.567216,1.580512,1.647293,1.672556,
## 1.750994,1.743083,1.801643,1.823678,1.869798,1.906898,1.873269,
## 1.893699,1.916469)
## best_kn.2<-kopt_momt_pick(post$xinput, post$yprod, x.post, rho=rho_momt)
###################################################
### code chunk number 65: ex-npbr.rnw:1476-1479 (eval = FALSE)
###################################################
## rho_trimmean<-mean(rho_momt, trim=0.00)
## best_kn.3<-kopt_momt_pick(post$xinput, post$yprod,
## x.post, rho=rho_trimmean)
###################################################
### code chunk number 66: ex-npbr.rnw:1482-1488 (eval = FALSE)
###################################################
## res.momt.1<-dfs_momt(post$xinput, post$yprod, x.post,
## rho=rho, k=best_kn.1)
## res.momt.2<-dfs_momt(post$xinput, post$yprod, x.post,
## rho=rho_momt, k=best_kn.2)
## res.momt.3<-dfs_momt(post$xinput, post$yprod, x.post,
## rho=rho_trimmean, k=best_kn.3)
###################################################
### code chunk number 67: ex-npbr.rnw:1491-1506 (eval = FALSE)
###################################################
## plot(yprod~xinput, data=post, xlab="Quantity of labor",
## ylab="Volume of delivered mail")
## lines(x.post, res.momt.1[,1], lty=1, col="cyan")
## lines(x.post, res.momt.1[,2], lty=3, col="magenta")
## lines(x.post, res.momt.1[,3], lty=3, col="magenta")
## plot(yprod~xinput, data=post, xlab="Quantity of labor",
## ylab="Volume of delivered mail")
## lines(x.post, res.momt.2[,1], lty=1, col="cyan")
## lines(x.post, res.momt.2[,2], lty=3, col="magenta")
## lines(x.post, res.momt.2[,3], lty=3, col="magenta")
## plot(yprod~xinput, data=post, xlab="Quantity of labor",
## ylab="Volume of delivered mail")
## lines(x.post, res.momt.3[,1], lty=1, col="cyan")
## lines(x.post, res.momt.3[,2], lty=3, col="magenta")
## lines(x.post, res.momt.3[,3], lty=3, col="magenta")
###################################################
### code chunk number 68: ex-npbr.rnw:1510-1526 (eval = FALSE)
###################################################
## .PngNo <- .PngNo + 1; name.file <- paste("Fig-bitmap-", .PngNo, ".pdf", sep="")
## pdf(file=name.file, width = 18, height = 7, pointsize = 14, bg = "white")
## op=par(mfrow=c(1,3),mar=c(3,3.1,2.1,2.1),mgp=c(2,.4,0),oma=c(0,0,0,0),cex.lab=1.2)
## plot(yprod~xinput, data=post, col="grey", xlab="Quantity of labor", ylab="Volume of delivered mail")
## lines(x.post, res.momt.1[,1], lty=1, lwd=2, col="cyan")
## lines(x.post, res.momt.1[,2], lty=3, lwd=4, col="magenta")
## lines(x.post, res.momt.1[,3], lty=3, lwd=4, col="magenta")
## plot(yprod~xinput, data=post, col="grey", xlab="Quantity of labor", ylab="Volume of delivered mail")
## lines(x.post, res.momt.2[,1], lty=1, lwd=2, col="cyan")
## lines(x.post, res.momt.2[,2], lty=3, lwd=4, col="magenta")
## lines(x.post, res.momt.2[,3], lty=3, lwd=4, col="magenta")
## plot(yprod~xinput, data=post, col="grey", xlab="Quantity of labor", ylab="Volume of delivered mail")
## lines(x.post, res.momt.3[,1], lty=1, lwd=2, col="cyan")
## lines(x.post, res.momt.3[,2], lty=3, lwd=4, col="magenta")
## lines(x.post, res.momt.3[,3], lty=3, lwd=4, col="magenta")
## par(op)
## dev.null <- dev.off()
## cat("\\includegraphics[width=0.9\\textwidth]{", name.file, "}\n\n", sep="")
###################################################
### code chunk number 69: ex-npbr.rnw:1581-1582 (eval = FALSE)
###################################################
## rho<-2
###################################################
### code chunk number 70: ex-npbr.rnw:1585-1587 (eval = FALSE)
###################################################
## best_kn.1<-kopt_momt_pick(post$xinput, post$yprod,
## x.post, method="pickands", rho=rho)
###################################################
### code chunk number 71: ex-npbr.rnw:1590-1592 (eval = FALSE)
###################################################
## rho_pick<-rho_momt_pick(post$xinput, post$yprod,
## x.post, method="pickands")
###################################################
### code chunk number 72: ex-npbr.rnw:1595-1597 (eval = FALSE)
###################################################
## best_kn.2<-kopt_momt_pick(post$xinput, post$yprod,
## x.post, method="pickands", rho=rho_pick)
###################################################
### code chunk number 73: ex-npbr.rnw:1601-1604 (eval = FALSE)
###################################################
## rho_trimmean<-mean(rho_pick, trim=0.00)
## best_kn.3<-kopt_momt_pick(post$xinput, post$yprod,
## x.post, rho=rho_trimmean, method="pickands")
###################################################
### code chunk number 74: ex-npbr.rnw:1607-1613 (eval = FALSE)
###################################################
## res.pick.1<-dfs_pick(post$xinput, post$yprod, x.post,
## rho=rho, k=best_kn.1)
## res.pick.2<-dfs_pick(post$xinput, post$yprod, x.post,
## rho=rho_pick, k=best_kn.2)
## res.pick.3<-dfs_pick(post$xinput, post$yprod, x.post,
## rho=rho_trimmean, k=best_kn.3)
###################################################
### code chunk number 75: ex-npbr.rnw:1617-1632 (eval = FALSE)
###################################################
## plot(yprod~xinput, data=post, xlab="Quantity of labor",
## ylab="Volume of delivered mail")
## lines(x.post, res.pick.1[,1], lty=1, col="cyan")
## lines(x.post, res.pick.1[,2], lty=3, col="magenta")
## lines(x.post, res.pick.1[,3], lty=3, col="magenta")
## plot(yprod~xinput, data=post, xlab="Quantity of labor",
## ylab="Volume of delivered mail")
## lines(x.post, res.pick.2[,1], lty=1, col="cyan")
## lines(x.post, res.pick.2[,2], lty=3, col="magenta")
## lines(x.post, res.pick.2[,3], lty=3, col="magenta")
## plot(yprod~xinput, data=post, xlab="Quantity of labor",
## ylab="Volume of delivered mail")
## lines(x.post, res.pick.3[,1], lty=1, col="cyan")
## lines(x.post, res.pick.3[,2], lty=3, col="magenta")
## lines(x.post, res.pick.3[,3], lty=3, col="magenta")
###################################################
### code chunk number 76: ex-npbr.rnw:1636-1652 (eval = FALSE)
###################################################
## .PngNo <- .PngNo + 1; name.file <- paste("Fig-bitmap-", .PngNo, ".pdf", sep="")
## pdf(file=name.file, width = 18, height = 7, pointsize = 14, bg = "white")
## op=par(mar=c(3,3.1,2.1,2.1),mgp=c(2,.4,0),oma=c(0,0,0,0),cex.lab=1.2, mfrow=c(1,3))
## plot(yprod~xinput, data=post, col="grey", xlab="Quantity of labor", ylab="Volume of delivered mail")
## lines(x.post, res.pick.1[,1], lty=1, lwd=2, col="cyan")
## lines(x.post, res.pick.1[,2], lty=3, lwd=4, col="magenta")
## lines(x.post, res.pick.1[,3], lty=3, lwd=4, col="magenta")
## plot(yprod~xinput, data=post, col="grey", xlab="Quantity of labor", ylab="Volume of delivered mail")
## lines(x.post, res.pick.2[,1], lty=1, lwd=2, col="cyan")
## lines(x.post, res.pick.2[,2], lty=3, lwd=4, col="magenta")
## lines(x.post, res.pick.2[,3], lty=3, lwd=4, col="magenta")
## plot(yprod~xinput, data=post, col="grey", xlab="Quantity of labor", ylab="Volume of delivered mail")
## lines(x.post, res.pick.3[,1], lty=1, lwd=2, col="cyan")
## lines(x.post, res.pick.3[,2], lty=3, lwd=4, col="magenta")
## lines(x.post, res.pick.3[,3], lty=3, lwd=4, col="magenta")
## par(op)
## dev.null <- dev.off()
## cat("\\includegraphics[width=0.9\\textwidth]{", name.file, "}\n\n", sep="")
###################################################
### code chunk number 77: ex-npbr.rnw:1772-1773 (eval = FALSE)
###################################################
## rho<-2
###################################################
### code chunk number 78: ex-npbr.rnw:1776-1780 (eval = FALSE)
###################################################
## best_cm.1<- mopt_pwm(post$xinput, post$yprod,
## x.post, a=2, rho=rho, wind.coef=0.1)
## res.pwm.1<- dfs_pwm(post$xinput, post$yprod, x.post,
## coefm=best_cm.1, a=2, rho=rho)
###################################################
### code chunk number 79: ex-npbr.rnw:1782-1807 (eval = FALSE)
###################################################
## res.pwm.1<-matrix(c(3693.689,3391.852,3995.527,3698.156,3393.867,4002.446,3664.170,3378.494,3949.845,4052.185,3678.129,
## 4426.240,4583.859,4082.118,5085.599,4544.788,4058.775,5030.800,4514.398,4040.244,4988.552,4370.231,3966.160,4774.301,
## 4299.334,3920.889,4677.779,5996.560,5104.971,6888.149,5767.108,5006.010,6528.207,8134.150,6676.470,9591.829,7714.785,
## 6449.040,8980.529,7293.492,6169.318,8417.665,7182.106,6156.329,8207.884,6996.592,6065.089,7928.095,7237.842,6381.361,
## 8094.322,7028.955,6258.911,7798.999,6880.657,6164.981,7596.333,6871.169,6193.560,7548.777,7087.487,6442.267,7732.707,
## 7160.956,6542.813,7779.100,7473.784,6873.938,8073.630,7468.800,6889.968,8047.631,7423.528,6855.837,7991.220,7399.736,
## 6846.585,7952.887,7466.078,6922.605,8009.552,7695.183,7152.966,8237.401,7674.784,7146.509,8203.058,7632.812,7115.122,
## 8150.503,7656.791,7143.767,8169.815,7694.408,7188.657,8200.158,7743.059,7245.345,8240.773,7784.993,7295.642,8274.344,
## 7768.774,7282.830,8254.718,7744.851,7268.383,8221.318,7737.486,7263.321,8211.652,7816.011,7351.348,8280.673,8050.885,
## 7582.127,8519.643,8065.387,7600.336,8530.437,8379.410,7909.951,8848.869,8691.761,8217.325,9166.196,8697.871,8234.138,
## 9161.604,8671.017,8213.569,9128.464,8673.969,8215.666,9132.272,9115.328,8640.232,9590.424,9388.969,8909.159,9868.779,
## 9496.951,9037.316,9956.587,9500.138,9050.094,9950.183,9709.822,9256.602,10163.042,9684.555,9236.256,10132.853,9973.046,
## 9498.580,10447.511,10399.506,9868.298,10930.713,10379.734,9851.823,10907.644,10406.428,9882.258,10930.599,10473.119,
## 9956.109,10990.130,10524.562,10009.960,11039.163,10520.942,10006.962,11034.921,10583.200,10072.521,11093.879,10643.052,
## 10134.391,11151.712,10638.578,10141.266,11135.889,10625.220,10133.219,11117.221,10611.912,10122.349,11101.475,10731.387,
## 10240.820,11221.954,10709.384,10223.295,11195.474,10697.309,10217.597,11177.020,11335.434,10745.236,11925.632,11320.899,
## 10733.487,11908.310,11357.880,10778.002,11937.758,11364.218,10782.596,11945.840,11345.092,10767.871,11922.313,11340.068,
## 10763.661,11916.475,11483.711,10903.637,12063.785,11543.050,10967.724,12118.376,11551.772,10983.171,12120.374,11576.991,
## 11015.011,12138.971,11546.826,10990.595,12103.056,11536.971,10982.598,12091.344,11880.266,11316.796,12443.736,11950.866,
## 11394.222,12507.509,11994.852,11452.743,12536.962,11989.201,11447.978,12530.425,11964.985,11431.400,12498.571,12014.366,
## 11490.019,12538.714,12291.219,11762.119,12820.319,12390.599,11866.754,12914.443,12356.355,11845.739,12866.971,12336.726,
## 11830.709,12842.744,12581.846,12055.270,13108.422,12996.801,12447.918,13545.684,12976.842,12437.372,13516.311,13558.110,
## 12982.361,14133.860,13541.724,12968.784,14114.664,13505.548,12946.694,14064.402,13490.559,12933.933,14047.185,13622.065,
## 13062.711,14181.420,13611.193,13061.849,14160.538,13586.358,13044.210,14128.507,13570.022,13030.314,14109.731,13564.974,
## 13029.502,14100.446),100,3,byrow=TRUE)
###################################################
### code chunk number 80: ex-npbr.rnw:1810-1813 (eval = FALSE)
###################################################
## rho_pwm<-rho_pwm(post$xinput, post$yprod,
## x.post, a=2, lrho=1, urho=Inf)
## rho_pwm_trim<-mean(rho_pwm, trim=0.00)
###################################################
### code chunk number 81: ex-npbr.rnw:1815-1829 (eval = FALSE)
###################################################
## rho_pwm<-c(1.023594,1.024185,1.039690,1.052159,1.024773,1.039298,1.927103,
## 1.837867,1.677647,1.550235,1.454431,1.379524,1.310404,1.260026,1.234148,
## 1.203759,1.178933,1.170742,1.161470,1.149357,1.198126,1.314633,1.515514,
## 1.599483,1.665202,1.722867,1.817279,1.879157,1.945815,1.990754,2.031745,
## 2.105846,2.165757,2.237778,2.259292,2.295147,2.315305,2.397803,2.079720,
## 2.092493,2.206906,2.359096,2.385121,2.400971,2.422183,2.024605,2.243093,
## 2.294995,2.310249,2.317997,2.334523,1.181741,1.189079,1.191125,1.199886,
## 1.204921,1.205482,1.208576,1.202842,1.209828,1.215217,1.195819,1.198989,
## 1.185889,1.188102,1.190547,1.580017,1.581408,1.586728,1.589427,1.590941,
## 1.592341,1.706439,1.721817,1.724899,1.729585,1.732874,1.735634,2.072950,
## 2.097440,2.137919,2.140791,2.144488,2.162599,5.093260,6.434999,6.410624,
## 6.412833,4.358651,3.123234,3.148811,1.078259,1.079294,1.074666,1.076102,
## 1.082311,1.083827,1.077977,1.079619,1.080355)
## rho_pwm_trim<-mean(rho_pwm, trim=0.00)
###################################################
### code chunk number 82: ex-npbr.rnw:1832-1840 (eval = FALSE)
###################################################
## best_cm.2<- mopt_pwm(post$xinput, post$yprod,
## x.post, a=2, rho = rho_pwm)
## best_cm.3<- mopt_pwm(post$xinput, post$yprod,
## x.post, a=2, rho = rho_pwm_trim)
## res.pwm.2<- dfs_pwm(post$xinput, post$yprod,
## x.post, coefm=best_cm.2, rho=rho_pwm)
## res.pwm.3<- dfs_pwm(post$xinput, post$yprod,
## x.post, coefm=best_cm.3, rho=rho_pwm_trim)
###################################################
### code chunk number 83: ex-npbr.rnw:1842-1910 (eval = FALSE)
###################################################
## res.pwm.2<-matrix(c(3423.634,3133.666,3713.601,3420.217,3127.762,3712.672,
## 3403.614,3117.938,3689.290,3709.516,3335.461,4083.571,4093.768,3592.028,4595.509,
## 4056.693,3570.681,4542.706,4476.577,4002.423,4950.731,4294.037,3889.966,4698.107,
## 4152.061,3773.616,4530.506,5570.299,4678.710,6461.887,5307.500,4546.401,6068.599,
## 7158.666,5700.986,8616.345,6731.634,5465.889,7997.379,6339.216,5215.043,7463.390,
## 6232.965,5207.188,7258.743,6068.923,5137.420,7000.426,6265.447,5408.967,7121.928,
## 6137.926,5367.883,6907.970,6035.611,5319.935,6751.287,6039.558,5361.950,6717.166,
## 6290.624,5645.404,6935.844,6477.025,5858.882,7095.168,6966.139,6366.293,7565.985,
## 7056.248,6477.417,7635.079,7083.572,6515.880,7651.263,7125.345,6572.194,7678.496,
## 7285.332,6741.859,7828.805,7571.149,7028.932,8113.367,7620.769,7092.495,8149.044,
## 7623.786,7106.095,8141.477,7687.649,7174.624,8200.673,7797.465,7291.714,8303.215,
## 7904.813,7407.099,8402.527,8013.950,7524.599,8503.301,8016.589,7530.645,8502.533,
## 8020.621,7544.154,8497.088,8028.042,7553.876,8502.208,8182.375,7717.713,8647.037,
## 8127.045,7658.287,8595.803,8153.546,7688.495,8618.597,8586.068,8116.609,9055.527,
## 9069.230,8594.794,9543.665,9094.677,8630.944,9558.409,9076.375,8618.927,9533.822,
## 9100.556,8642.253,9558.859,9142.547,8667.451,9617.643,9669.278,9189.468,10149.088,
## 9830.317,9370.682,10289.953,9842.657,9392.612,10292.701,10069.847,9616.627,10523.067,
## 10058.510,9610.212,10506.808,9019.517,8545.051,9493.982,9359.357,8828.149,9890.564,
## 9351.436,8823.526,9879.347,9389.890,8865.720,9914.060,9469.730,8952.719,9986.740,
## 9517.669,9003.067,10032.270,9518.444,9004.464,10032.424,9571.469,9060.790,10082.148,
## 9634.284,9125.624,10142.945,9656.974,9159.662,10154.285,9631.020,9139.019,10123.022,
## 9627.760,9138.197,10117.324,9716.570,9226.003,10207.137,9709.098,9223.009,10195.188,
## 9713.317,9233.605,10193.029,10746.776,10156.577,11336.974,10738.178,10150.766,11325.589,
## 10786.367,10206.489,11366.245,10797.398,10215.776,11379.019,10786.170,10208.949,11363.391,
## 10783.844,10207.437,11360.252,11073.602,10493.528,11653.676,11154.008,10578.682,11729.334,
## 11170.647,10602.046,11739.248,11205.198,10643.218,11767.177,11184.990,10628.759,11741.220,
## 11180.277,10625.904,11734.650,11982.912,11419.442,12546.382,12087.762,11531.119,12644.406,
## 12185.205,11643.096,12727.315,12183.219,11641.995,12724.442,12159.491,11625.905,12693.077,
## 12231.392,11707.045,12755.740,16571.616,16042.516,17100.716,18554.292,18030.448,19078.137,
## 18317.125,17806.509,18827.741,18235.602,17729.584,18741.619,15878.405,15351.829,16404.981,
## 14649.501,14100.618,15198.383,14637.705,14098.236,15177.175,12136.540,11560.790,12712.289,
## 12130.230,11557.290,12703.170,12131.561,11572.707,12690.416,12124.857,11568.231,12681.483,
## 12246.209,11686.855,12805.564,12260.300,11710.956,12809.644,12246.975,11704.827,12789.123,
## 12240.520,11700.812,12780.229,12245.702,11710.229,12781.174),100,3,byrow=TRUE)
## res.pwm.3<-matrix(c(3646.226,3344.389,3948.064,3648.807,3344.517,3953.097,
## 3622.910,3337.234,3908.586,3997.221,3623.165,4371.276,4507.417,4005.677,5009.158,
## 4467.527,3981.515,4953.540,4436.192,3962.038,4910.346,4299.358,3895.287,4703.428,
## 4230.362,3851.917,4608.807,5853.342,4961.754,6744.931,5639.696,4878.597,6400.795,
## 7896.196,6438.517,9353.876,7498.835,6233.090,8764.580,7098.036,5973.863,8222.210,
## 6994.209,5968.432,8019.987,6819.883,5888.380,7751.386,7058.151,6201.671,7914.632,
## 6865.907,6095.864,7635.951,6727.713,6012.037,7443.389,6722.773,6045.165,7400.381,
## 6936.750,6291.530,7581.970,7009.811,6391.668,7627.954,7315.399,6715.554,7915.245,
## 7313.206,6734.375,7892.037,7270.222,6702.530,7837.913,7250.309,6697.159,7803.460,
## 7316.880,6773.406,7860.353,7540.428,6998.210,8082.645,7524.539,6996.264,8052.813,
## 7485.710,6968.019,8003.401,7510.348,6997.323,8023.372,7547.774,7042.024,8053.525,
## 7596.133,7098.420,8093.847,7640.059,7150.708,8129.410,7624.931,7138.987,8110.875,
## 7604.246,7127.779,8080.713,7598.825,7124.659,8072.991,7677.471,7212.809,8142.133,
## 7906.991,7438.233,8375.749,7921.832,7456.781,8386.883,8229.054,7759.595,8698.513,
## 8533.614,8059.179,9008.049,8542.872,8079.139,9006.605,8518.945,8061.497,8976.392,
## 8521.985,8063.682,8980.288,8948.660,8473.564,9423.756,9215.412,8735.602,9695.222,
## 9326.893,8867.258,9786.529,9334.011,8883.967,9784.056,9539.464,9086.243,9992.684,
## 9516.357,9068.059,9964.655,9796.244,9321.778,10270.709,10204.918,9673.710,10736.125,
## 10186.882,9658.972,10714.792,10213.718,9689.548,10737.888,10281.710,9764.700,10798.721,
## 10332.350,9817.748,10846.952,10328.829,9814.849,10842.809,10390.698,9880.019,10901.376,
## 10449.435,9940.774,10958.095,10448.894,9951.583,10946.206,10437.687,9945.686,10929.688,
## 10425.548,9935.984,10915.111,10542.273,10051.706,11032.840,10522.477,10036.387,11008.566,
## 10512.897,10033.185,10992.608,11123.680,10533.481,11713.878,11110.586,10523.175,11697.997,
## 11148.966,10569.088,11728.844,11155.662,10574.040,11737.283,11138.683,10561.462,11715.904,
## 11133.952,10557.545,11710.360,11272.850,10692.776,11852.924,11331.986,10756.659,11907.312,
## 11342.691,10774.090,11911.292,11369.501,10807.521,11931.481,11342.411,10786.180,11898.641,
## 11333.361,10778.988,11887.734,11668.327,11104.857,12231.797,11739.272,11182.628,12295.915,
## 11787.024,11244.914,12329.133,11781.696,11240.473,12322.920,11762.284,11228.699,12295.870,
## 11813.403,11289.055,12337.750,12083.976,11554.877,12613.076,12182.628,11658.783,12706.472,
## 12154.117,11643.501,12664.733,12136.689,11630.671,12642.707,12372.389,11845.813,12898.965,
## 12775.894,12227.011,13324.777,12759.797,12220.327,13299.266,13323.740,12747.991,13899.490,
## 13308.758,12735.818,13881.698,13279.889,12721.034,13838.743,13265.917,12709.291,13822.544,
## 13394.246,12834.892,13953.601,13387.143,12837.799,13936.488,13365.605,12823.457,13907.753,
## 13350.512,12810.804,13890.221,13346.982,12811.509,13882.454),100,3,byrow=TRUE)
###################################################
### code chunk number 84: ex-npbr.rnw:1913-1928 (eval = FALSE)
###################################################
## plot(yprod~xinput, data=post, xlab="Quantity of labor",
## ylab="Volume of delivered mail")
## lines(x.post, res.pwm.1[,1], lty=1, col="cyan")
## lines(x.post, res.pwm.1[,2], lty=3, col="magenta")
## lines(x.post, res.pwm.1[,3], lty=3, col="magenta")
## plot(yprod~xinput, data=post, xlab="Quantity of labor",
## ylab="Volume of delivered mail")
## lines(x.post, res.pwm.2[,1], lty=1, col="cyan")
## lines(x.post, res.pwm.2[,2], lty=3, col="magenta")
## lines(x.post, res.pwm.2[,3], lty=3, col="magenta")
## plot(yprod~xinput, data=post, xlab="Quantity of labor",
## ylab="Volume of delivered mail")
## lines(x.post, res.pwm.3[,1], lty=1, col="cyan")
## lines(x.post, res.pwm.3[,2], lty=3, col="magenta")
## lines(x.post, res.pwm.3[,3], lty=3, col="magenta")
###################################################
### code chunk number 85: ex-npbr.rnw:1932-1949 (eval = FALSE)
###################################################
## .PngNo <- .PngNo + 1; name.file <- paste("Fig-bitmap-", .PngNo, ".pdf", sep="")
## pdf(file=name.file, width = 18, height = 7, pointsize = 14, bg = "white")
## op=par(mar=c(3,3.1,2.1,2.1),mgp=c(2,.4,0),oma=c(0,0,0,0),cex.lab=1.2, mfrow=c(1,3))
## plot(yprod~xinput, data=post, col="grey", xlab="Quantity of labor", ylab="Volume of delivered mail")
## lines(x.post, res.pwm.1[,1], lty=1, lwd=2, col="cyan")
## lines(x.post, res.pwm.1[,2], lty=3, lwd=4, col="magenta")
## lines(x.post, res.pwm.1[,3], lty=3, lwd=4, col="magenta")
## plot(yprod~xinput, data=post, col="grey", xlab="Quantity of labor", ylab="Volume of delivered mail")
## lines(x.post, res.pwm.2[,1], lty=1, lwd=2, col="cyan")
## lines(x.post, res.pwm.2[,2], lty=3, lwd=4, col="magenta")
## lines(x.post, res.pwm.2[,3], lty=3, lwd=4, col="magenta")
## plot(yprod~xinput, data=post, xlab="Quantity of labor", col="grey",
## ylab="Volume of delivered mail")
## lines(x.post, res.pwm.3[,1], lty=1, lwd=2, col="cyan")
## lines(x.post, res.pwm.3[,2], lty=3, lwd=4, col="magenta")
## lines(x.post, res.pwm.3[,3], lty=3, lwd=4, col="magenta")
## par(op)
## dev.null <- dev.off()
## cat("\\includegraphics[width=0.9\\textwidth]{", name.file, "}\n\n", sep="")
###################################################
### code chunk number 86: ex-npbr.rnw:2013-2014 (eval = FALSE)
###################################################
## require("npbr")
###################################################
### code chunk number 87: ex-npbr.rnw:2017-2038 (eval = FALSE)
###################################################
## N<-5
## x.sim <- seq(0, 1, length.out=1000)
## y.dea<-matrix(0, N, 1000)
## y.cub<-matrix(0, N, 1000)
##
## Fron<-function(x) sqrt(x)
##
## for (k in 1:N)
## {
## n=100; betav=0.5
## xtab <- runif(n, 0, 1)
## V <-rbeta(n, betav, betav)
## ytab <- Fron(xtab)*V
## cind<-which((x.sim>=min(xtab))&(x.sim<=max(xtab)))
## x<-x.sim[cind]
## y.dea[k,cind]<-dea_est(xtab, ytab, x, type="dea")
## kopt<-cub_spline_kn(xtab,ytab,method="mc",krange=1:20,
## type="BIC")
## y.cub[k,cind]<-cub_spline_est(xtab,ytab,x,kn=kopt,
## method="mc",all.dea=FALSE)
## }
###################################################
### code chunk number 88: ex-npbr.rnw:2041-2065 (eval = FALSE)
###################################################
## require("npbr")
## evaluation<-function(MAT,xeval,true_vec)
## {
## # internal function
## denzero<-function(vec)
## {
## return(sum(vec!=0))
## }
##
## nzr<-apply(MAT,1,denzero)
## nzc<-apply(MAT,2,denzero)
## nzc_ind<-which(apply(MAT,2,denzero)!=0)
## nz_mat<-matrix(as.numeric(MAT!=0),dim(MAT)[1],length(xeval),byrow=FALSE)
## cmean<-rep(0,dim(MAT)[2])
## temp<-apply(MAT,2,sum)
## cmean[nzc_ind]<-temp[nzc_ind]*(1/nzc[nzc_ind])
##
## temp2<-apply((MAT-rep(1,dim(MAT)[1]) %*% t(cmean))^2 * nz_mat,2,sum)
## IVAR<-mean(temp2[nzc_ind]*(1/nzc[nzc_ind]))
## temp3<-(true_vec-cmean)^2
## IBIAS<-mean(temp3[nzc_ind])
## IMSE<-IBIAS+IVAR
## return(list(IBIAS2=IBIAS,IVAR=IVAR,MISE=IMSE))
## }
###################################################
### code chunk number 89: ex-npbr.rnw:2068-2071 (eval = FALSE)
###################################################
## result.dea<-evaluation(y.dea,x.sim,Fron(x.sim))
## result.cub<-evaluation(y.cub,x.sim,Fron(x.sim))
## (cbind(result.dea,result.cub))
|
a35264ff77005b5ec66b6c89a845a738c92ce45b
|
7cc5f6f1879edcc0ee241c7f47ae024d2caac606
|
/man/fftN.Rd
|
70069fb91d46c7a3d3765899c2098b12233bf234
|
[] |
no_license
|
cran/FIACH
|
20fe0bfedbe5959e4c737a13cb7546df7a660ede
|
32c477e755dbe28f0d03cd19346e3e62d67ce3b5
|
refs/heads/master
| 2021-01-10T13:17:11.910515
| 2015-10-09T10:28:48
| 2015-10-09T10:28:48
| 48,080,097
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 639
|
rd
|
fftN.Rd
|
\name{fftN}
\alias{fftN}
\title{
Zero Padded 1D Fourier transform
}
\description{
This function is a simple wrapper of Armadillo's fft function. It allows for fast and easy zero padding of a signal.
}
\usage{
fftN(X,N=NULL)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{X}{
X a numeric vector or matrix
}
\item{N}{
Length of zero padded signal. If NULL the function will automatically pad sufficiently for a fast transform.
}
}
\value{
returns the Fourier transform of the signal.
}
\examples{
x<-matrix(rnorm(101*1000),nrow = 101,ncol = 1000)
system.time(test1<-fftN(x))
}
|
fb258e650a156f86e4cf99ca0c11c818c32d0a70
|
1d7c7adfe4190456c0bce74a55f8842f113b54e1
|
/R/wilkie_sd.R
|
b26ea7abf4785e52a09a2c0498871723b8785386
|
[
"MIT"
] |
permissive
|
dmoseev/j0j0r
|
dd56cfa4e9d80324c2e39872a0f9ab480e0c3ba6
|
38e61ba17d1e8395738379adcb4af3f6ef33824b
|
refs/heads/master
| 2023-07-31T13:57:54.235793
| 2020-05-18T20:52:57
| 2020-05-18T20:52:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,170
|
r
|
wilkie_sd.R
|
#' @title slowdown_setup
#'
#' @description Function to return a setup for a slowdown momentum
#' distribution, based on Wilkie 2018, https://arxiv.org/abs/1808.01934v2, eq.
#' 2.9.
#'
#' @param b \code{numeric} b-parameter in Wilkie 2018 eq. 2.9. Quantifies
#' importance of transport. Suggested in the range 0 (no transport) to 10
#' (significant effect).
#' @param n \code{numeric} Particle density.
#' @param A \code{numeric} Particle mass number
#' @param Z \code{numeric} Particle charge number
#' @param birth_energy \code{numeric} Particle birth energy in eV.
#' @param n_e \code{numeric} Electron density.
#' @param T_e_eV \code{numeric} Electron temperature in eV.
#' @param ions \code{data frame} with information on all ion species, containing
#' columns "n" (ion density), "A" (ion mass number), and "Z" (ion charge
#' number)
#' @param name \code{character} Name of distribution/particle
#'
#' @return \code{list} with momentum distribution setup
#'
#' @export
slowdown_setup <- function(b, n, A, Z, birth_energy, n_e, T_e_eV, ions, name){
m <- A * const[["amu"]]
p_c <- m * critical_velocity(n_e, T_e_eV, ions)
p_b <- m * birth_velocity(birth_energy, m)
unnormalized_dist <- list(
function_name = "slowdown_func_p",
gradient = "slowdown_grad",
distargs = list(
n = 1,
p_c = p_c,
p_b = p_b,
b = b,
K = 1
),
p_scale = p_b
)
K <- 1 / integrate_homogeneous_distribution(unnormalized_dist)
distribution <- list(
function_name = "slowdown_func",
gradient = "slowdown_grad",
distargs = list(
n = n,
p_c = p_c,
p_b = p_b,
b = b,
K = K
),
p_scale = p_b
)
list(
name = name,
Z = Z,
A = A,
distribution = distribution
)
}
#' @title slowdown_expr
#'
#' @description expression for a slowdown momentum distribution,
#' https://arxiv.org/abs/1808.01934v2, eq. 2.9, in cylindrical coordinates
slowdown_expr <- expression(
# (2 * pi * n * K) *
# (tau_s / 4 * pi) *
# (1 / (p_c^3 + sqrt(p_perp^2 + p_par^2)^3)) *
# (
# (sqrt(p_perp^2 + p_par^2)^3 / p_b^3) *
# ((p_b^3 + p_c^3) / (sqrt(p_perp^2 + p_par^2)^3 + p_b^3))
# )^(b / 3)
#
# (2 * pi * n * K) *
n * K * (1 / (p_c^3 + sqrt(p_perp^2 + p_par^2)^3)) *
(
(sqrt(p_perp^2 + p_par^2)^3 / p_b^3) *
((p_b^3 + p_c^3) / (sqrt(p_perp^2 + p_par^2)^3 + p_b^3))
)^(b / 3)
)
#' @title slowdown_expr_p
#'
#' @description expression for a slowdown momentum distribution,
#' https://arxiv.org/abs/1808.01934v2, eq. 2.9, using only length of p
slowdown_expr_p <- expression(
n * K * (1 / (p_c^3 + p^3)) *
((p^3 / p_b^3) * ((p_b^3 + p_c^3) / (p^3 + p_b^3)))^(b / 3)
)
#' @title slowdown_func
#'
#' @description function to evaluate a slowdown momentum distribution,
#' https://arxiv.org/abs/1808.01934v2, eq. 2.9
#'
#' @param p_perp \code{numeric} value of perpendicular momentum component
#' @param p_par \code{numeric} value of parallel momentum component
#' @param n \code{numeric} particle density.
#' @param K \code{numeric} integration constant
#' @param p_c \code{numeric} critical momentum
#' @param p_b \code{numeric} birth momentum
#' @param b \code{numeric} transport parameter
#'
#' @return \code{numeric} value of momentum distribution at (p_perp, p_par)
#'
slowdown_func <- function(p_perp, p_par, n, K, p_c, p_b, b){
eval(slowdown_expr) * as.numeric(sqrt(p_perp^2 + p_par^2) < p_b)
}
#' @title slowdown_func_p
#'
#' @description function to evaluate a slowdown momentum distribution,
#' https://arxiv.org/abs/1808.01934v2, eq. 2.9, using only length pf p
#'
#' @param p \code{numeric} length of momentum vector, p = norm(c(p_perp, p_par))
#' @param n \code{numeric} particle density.
#' @param K \code{numeric} integration constant
#' @param p_c \code{numeric} critical momentum
#' @param p_b \code{numeric} birth momentum
#' @param b \code{numeric} transport parameter
#'
#' @return \code{numeric} value of momentum distribution at (p_perp, p_par)
#'
slowdown_func_p <- function(p, n, K, p_c, p_b, b){
eval(slowdown_expr_p) * as.numeric(p < p_b)
}
#' @title slowdown_grad
#'
#' @description function to calculate the gradient of a slowdown
#' momentum distribution with respect to parallel and perpencicular momentum
#'
#' @param p_perp \code{numeric} value of perpendicular momentum component
#' @param p_par \code{numeric} value of parallel momentum component
#' @param n \code{numeric} particle density.
#' @param K \code{numeric} integrations constant
#' @param p_c \code{numeric} critical momentum
#' @param p_b \code{numeric} birth momentum
#' @param b \code{numeric} transport parameter
#'
#' @return \code{list}
#'
slowdown_grad <- deriv(
expr = slowdown_expr,
namevec = c("p_perp","p_par"),
function.arg = c("p_perp", "p_par", "n", "K", "p_c", "p_b", "b")
)
#' @title fast_ion_slowdown_time
#'
#' @description Function to calculate fast ion slowdown time, eq. 2.2 of Wilkie
#' 2018, https://arxiv.org/abs/1808.01934v2
#'
#' @param n_e \code{numeric} electron density.
#' @param T_i_eV \code{numeric} Ion temperaure in eV.
#' @param T_e_eV \code{numeric} Electron temperaure in eV.
#' @param A \code{numeric} Ion mass number
#' @param Z \code{numeric} Ion charge number
#'
#' @return \code{numeric} plasma parameter
#'
#' @export
fast_ion_slowdown_time <- function(n_e, T_i_eV, T_e_eV, A, Z){
m_i <- A * const[["amu"]]
m_e <- const[["m_e"]]
v_te <- thermal_velocity(T_e_eV, m_e)
Lambda <- plasma_parameter(n_e, T_i_eV, T_e_eV)
(3 / (16 * sqrt(pi))) *
(m_i * m_e * v_te^3) /
(Z^2 * exp(4) * n_e * log(Lambda))
}
#' @title plasma_parameter
#'
#' @description Function to calculate the plasma parameter (eq. 1.8 of Swanson
#' 2008)
#'
#' @param n_e \code{numeric} electron density.
#' @param T_i_eV \code{numeric} Ion temperaure in eV.
#' @param T_e_eV \code{numeric} Electron temperaure in eV.
#'
#' @return \code{numeric} plasma parameter
#'
#' @export
plasma_parameter <- function(n_e, T_i_eV, T_e_eV) {
(4 * pi / 3) * n_e * debye_length(n_e, T_i_eV, T_e_eV)^3
}
#' @title debye_length
#'
#' @description Function to calculate the debye length (eq. 1.5 of Swanson 2008)
#'
#' @param n_e \code{numeric} electron density.
#' @param T_i_eV \code{numeric} Ion temperaure in eV.
#' @param T_e_eV \code{numeric} Electron temperaure in eV.
#'
#' @return \code{numeric} Debye length
#'
#' @export
debye_length <- function(n_e, T_i_eV, T_e_eV){
T_i <- T_i_eV * const[["qe"]]
T_e <- T_e_eV * const[["qe"]]
e <- const[["qe"]]
eps0 <- const[["epsilon_0"]]
kd2 <- (n_e * e^2 / eps0) * (1 / T_i + 1 / T_e)
sqrt(1 / kd2)
}
#' @title thermal_velocity
#'
#' @description Function to calculate a particles thermal velocity
#'
#' @param T_eV \code{numeric} Particle temperaure in eV.
#' @param m \code{numeric} particle mass in kg.
#'
#' @return \code{numeric} thermal velocity
#'
#' @export
thermal_velocity <- function(T_eV, m){
sqrt(2 * T_eV * const[["qe"]] / m)
}
#' @title critical_velocity
#'
#' @description Function to calculate the criticl velocity, eq. 2.1 of Wilkie
#' 2018, https://arxiv.org/abs/1808.01934v2.
#'
#' @param n_e \code{numeric} Electron density.
#' @param T_e_eV \code{numeric} Electron temperaure in eV.
#' @param ions \code{data frame} with information on all ion species,
#' containing columns "n" (ion density), "A" (ion mass number), and "Z"
#' (ion charge number)
#'
#' @return \code{numeric} critical velocity
#'
#' @export
critical_velocity <- function(n_e, T_e_eV, ions){
m_e <- const[["m_e"]]
n_i <- ions[["n"]]
m_i <- ions[["A"]] * const[["amu"]]
Z_i <- ions[["Z"]]
v_te <- thermal_velocity(T_e_eV, m_e)
v_te * ((3 * sqrt(pi) / 4) * sum(n_i * m_e * Z_i^2 / (n_e * m_i)))^(1 / 3)
}
#' @title birth_velocity
#'
#' @description Function to calculate the birth velocity of a perticle given its
#' mass and birth energy.
#'
#' @param birth_energy \code{numeric} birth energy in eV.
#' @param m \code{numeric} particle mass in kg.
#'
#' @return \code{numeric} birth velocity
#'
#' @export
birth_velocity <- function(birth_energy, m){
sqrt(2 * birth_energy * const[["qe"]] / m)
}
|
60a22eda04b68f849b0fc582444c4712fa446bf8
|
b32dd1f1c3b674c1c558570dd0319590694dee34
|
/R/me.R
|
1dba7ffd3ebe759706ab4cfca200c47c916f59ec
|
[] |
no_license
|
cran/valmetrics
|
1595ca14df527d868302c7105861b94a49599986
|
9964419ce0f640ce71fe2ff7dbe8d0c1048350be
|
refs/heads/master
| 2023-02-21T04:20:10.619811
| 2021-01-13T14:30:02
| 2021-01-13T14:30:02
| 334,226,965
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 483
|
r
|
me.R
|
#' @title me
#' @description Calculates the Mean error (ME) from observed and
#' predicted values.
#' @inherit mae return author
#' @inheritParams mae
#' @return Mean error (ME).
#' @details Interpretation: smaller is better. Sometimes called bias.
#' @inherit mae return references
#' @examples
#' obs<-c(1:10)
#' pred<-c(1, 1 ,3, 2, 4, 5, 6, 8, 7, 10)
#' me(o=obs, p=pred)
#'
#' @export
me<-function(o, p) return(mean(p-o))
|
e777c7e6b1c944988fbbed90d1200ff96046d975
|
8c2e5408ad2acac0f38cd43bf56bfa52768a3417
|
/Apriori.R
|
f097ede9a666172def774e98160314186ea0bd77
|
[] |
no_license
|
fall2018-wallace/data_science_project
|
964b92e7c82b79dc5c7ddde52ab96ef2788c6fe3
|
44b12d17e6ef76d1307335cc5ae74b536fa5662a
|
refs/heads/master
| 2020-03-30T02:14:25.360575
| 2018-12-10T01:42:32
| 2018-12-10T01:42:32
| 150,621,038
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,341
|
r
|
Apriori.R
|
library(lubridate)
library(arules)
library(dplyr)
library(arulesViz)
cleanData$Satisfaction <- as.numeric(as.character(cleanData$Satisfaction))
satisfied <- ifelse(cleanData$Satisfaction < 3.5,"no","yes")
pricesensitivity <- ifelse(cleanData$Price_sensitivity<3,"low","high")
Flightspa <- ifelse(cleanData$No_of_flights_pa <40, "low", "high")
percentflightwithotherAirlines <- ifelse(cleanData$Percent_of_flights_with_other_airlines<=10,"less than 10","more then 10")
Month <- replicate(length(cleanData$Flight_date),"January")
Month[month(mdy(cleanData$Flight_date))==2] <- "February"
Month[month(mdy(cleanData$Flight_date))==3] <- "March"
head(Month)
Departuredelay <- replicate(length(cleanData$Departure_delay_in_minutes),"Average")
Departuredelay[cleanData$Departure_delay_in_minutes<=60] <- "low"
Departuredelay[cleanData$Departure_delay_in_minutes>180] <- "High"
Arrivaldelay <- replicate(length(cleanData$Arrival_delay_in_minutes),"Average")
Arrivaldelay[cleanData$Arrival_delay_in_minutes<=60] <- "low"
Arrivaldelay[cleanData$Arrival_delay_in_minutes>180] <- "High"
Flight.time <- replicate(length(cleanData$Flight_time_in_minutes),"Average")
Flight.time[cleanData$Flight_time_in_minutes<=100] <- "low"
Flight.time[cleanData$Flight_time_in_minutes >300] <-"High"
FlightDist<- replicate(length(cleanData$Flight_distance),"Average")
FlightDist[cleanData$Flight_distance <=1200] <- "less"
FlightDist[cleanData$Flight_distance >1200] <- "More"
q <- quantile(cleanData$Age, c(0.4, 0.6))
Age <- replicate(length(cleanData$Age), "Average")
Age[cleanData$Age <= q[1]] <- "Low"
Age[cleanData$Age > q[2]] <- "High"
df <- data.frame(satisfied, pricesensitivity, Flightspa, Age ,percentflightwithotherAirlines, Month, Departuredelay, Arrivaldelay, Flight.time, FlightDist, cleanData$Airline_status, cleanData$Gender, cleanData$Type_of_travel, cleanData$Class, cleanData$Airline_name, cleanData$Origin_city, cleanData$Origin_state, cleanData$Destination_city, cleanData$Destination_state, cleanData$Arrival_delay_greater_than_5minutes)
df
rules<-apriori(df,parameter = list(support=0.1, confidence=0.5),appearance = list(default="lhs", rhs=("satisfied=no")))
summary(rules)
inspect(rules)
lifts <- quality(rules)$lift
goodrules<- rules[quality(rules)$lift > 2.0]
inspect(goodrules)
|
23686c99c44f6ee184506dded61e2f0a7efc7e82
|
4905bd421b07d09c583c765d97a277095bbe85c7
|
/inst/ggraptR/test/system-rc.R
|
6c72470b945ac7d53de0ff123a1d6774f55687e1
|
[] |
no_license
|
cargomoose/ggraptR1
|
dbe876f92e76ff5843194ae1b6c649eb8990316f
|
418835e4c6de486fbc82d800b4c6a5e8856d41b1
|
refs/heads/master
| 2020-05-31T06:01:09.100490
| 2016-09-19T11:01:13
| 2016-09-19T11:01:13
| 69,013,446
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 883
|
r
|
system-rc.R
|
# http://adv-r.had.co.nz/OO-essentials.html#picking-a-system
# http://www.cyclismo.org/tutorial/R/s3Classes.html
# http://www.agapow.net/programming/r/reference-classes/
Rappy.ggplot <- setRefClass("Rappy.ggplot",
fields = c('gg'), # dataset, x
methods = list(
# initialize <- function(dataset, x) { # does not work yet
# gg <<- ggplot(dataset, aes(x=x))
# dataset <<- NULL
# x <<- NULL
# },
`+.Rappy.ggplot` <- function(self, gg2) { # does not work yet
# e2name <- deparse(substitute(e2$gg))
# if (is.theme(e1))
# add_theme(e1, e2, e2name)
# else if (is.ggplot(e1))
# add_ggplot(e1$gg, e2$gg, e2name)
ggplot2::`%+%`(self$gg, gg2)
}
)
)
# ggrappy <- Rappy.ggplot$new(dataset, x)
ggrappy <- Rappy.ggplot$new(gg=ggplot(mpg, aes(x=class)))
ggrappy$gg + geom_bar()
# ggrappy + geom_bar() # does not work yet
|
d406d64f0646c59b0eb60b3329a2047636eabed7
|
7950d582ff90f0b616bc84cf14d3c52cf3132a4c
|
/Lab and Lecture tasks/Lab_7/Lab 7.R
|
0fc2c92dca32233cceef9a141be743f94e461a54
|
[] |
no_license
|
bilalhoda1/Statistics
|
ae62d765c30174ac8f14a1ee56cd3450899aea10
|
6a98494e497d72b26635895beef80f386ebbfb6a
|
refs/heads/main
| 2023-01-04T18:45:42.798762
| 2020-11-01T19:27:37
| 2020-11-01T19:27:37
| 309,116,380
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 13,551
|
r
|
Lab 7.R
|
#In this session, we will explore different probability distributions
#This tutorial was adapted on R-bloggers session on probability distributions
#Please note that for this week's assignment, you are expected to answer questions in this script
#Please submit your answers as a .doc or .pdf file with the corresponding answers or plots.
#Please make sure to submit your R file as well!
#Those who do not submit an R file will receive a grade of 0 in the assignment
#Today we will cover three distributions in R
#Binomial
#Poisson
#Normal
#Functions dealing with probability distributions in R
#have a single-letter prefix that defines the type of function we want to use
#pre-fixes are d, p, q, and r
#d refers to density or mass
#c refers to cumulative (graphical depiction of probability for each outcome, cumulatively added to the next outcome)
#q refers to quantile distribution funcion. Calculates the inverse cumulative density function
#r refers to random sampling
#We will combine these prefixes with the names of the distributions we are interested in, # These are binom (Bernoulli and Binomial), pois (Poisson) and norm (Normal).
#Let's start with the bernoulli distribution
#20 coin flips example using p = 0.7 (probability of heads )
#let's plot the mass function of X.
#How do we do this?
#We first generate a vector with the sequence of numbers 1,2,…20 and iterate the function over these values.
n <- 1:20 #setting a vector with numbers 1 to 20
den <- dbinom(n, 20, 0.7) #assigning a new variable called den
den
#?dbinom to get details on the syntax for this function
plot(den, ylab = "Density", xlab = "Number of successes")
sum(den) # should be equal to 1 (one of the three axioms of probability theory)
#looking at the plot, determine what the probaility maximum is?
#around 0.19
#Let's try another example
#Suppose widgits produced at Acme Widgit Works have probability 0.005 of being defective. #Suppose widgits are shipped in cartons containing 25 widgits. What is the probability that a randomly chosen carton contains exactly one defective widgit?
#rephrased: What is the probability of one defective widget given that there are 25 widgits in a carton and the failure rates is 0.005
#Answer
dbinom(1, 25, 0.005)
#Recall that pbinom is the R function that calculates the cumulative density function of the binomial distribution.
vec <- 1:100
d <- pbinom(vec, size=100, prob=0.25)
plot(d)
#what is this function looking up?
#The probability of 27th value being a success
#Question 1: plot a vector of 100 numbers that are drawn from a probability mass function of a binomial distribution with a 0.5 probability of success
n1 <- 1:100
first <- dbinom(n1,100,0.5)
plot(first, ylab = "Density", xlab = "Number of successes")
#The plot is symmetric so we can say the probability of
#50 successes in 100 trials is highest
#1a)what happens when the probability is 0.01, 0.1, and 0.89?
second <-dbinom(n1,100,0.01)
plot(second, ylab = "Density", xlab = "Number of trials")
#The probability of getting 1-3 successes would be higher where as getting 10 or more successes would be 0
third <- dbinom(n1,100,0.1)
plot(third, ylab = "Density", xlab = "Number of trials")
#The probability of getting 1-20 successes would be higher where as getting more than 22 successes would be 0
fourth <- dbinom(n1,100,0.89)
plot(fourth, ylab = "Density", xlab = "Number of trials")
#The probability of getting 80-90 successes would be higher in a 100 trials
#As the probability increases the number of successes also increases
#1b)what happens when the size is increased to 1000, 10000, 100000?
one <- dbinom(1:1000,1000,0.5)
plot(one, ylab = "Density", xlab = "Number of trials")
two <- dbinom(1:10000,10000,0.5)
plot(two, ylab = "Density", xlab = "Number of trials")
three <- dbinom(1:100000,100000,0.5)
plot(three, ylab = "Density", xlab = "Number of trials")
#As the size increases the distributions peak at the same fractional distance
#from the origin, N/2. The peak in the distribution gets sharper and the width
#of the curve also reduces in other words the standard deviation reduces.
#Reference: http://www.pas.rochester.edu/~stte/phy104-F00/notes-5.html
# @@ RGEDIT LANDMARK @@: Normal distribution
#example
#The daily revenue of a local store follows a normal distribution with a mean of $1000 and variation of $200
#what is the probability that the revenue today will be at least $1200?
pnorm(1200,1000,200) # this gives us prob x smaller than $1200
1-pnorm(1200,1000,200) # this is the one, x greater than $1200
#given a mean of $1000 and the variation of $200,
#Question 2
#Suppose widgit weights produced at Acme Widgit Works have weights that are normally distributed with mean 17.46 grams and variation 375.67 grams.
#2a)What is the probability that a randomly chosen widgit weighs more then 19 grams?
# Hint: What is P(X > 19) when X has the N(17.46, 375.67) distribution?
# Note: R wants the s. d. as the parameter, not the variance.
pnorm(19,17.46,375.67) # this gives us prob x smaller than 19
1-pnorm(19,17.46,375.67) # this is the one, x greater than 19
#The probability is 0.4983646
#2b) Please plot the probabilities of outcomes for 100 values between 2 and 200
val <- 1:101
prob <- dnorm(val,17.46,375.67)
plot(prob, ylab = "Density", xlab = "Number of trials between 100 and 200" )
#we can use the rnorm function to randomly sample a set of 100 values from a normal distribution with a specified mean and variance (sd)
z<-rnorm(1000, mean = 10, sd = 4) #assigning the random draws to a variable z
#let's plot this
hist(z, probability = TRUE, col = 'cyan', main='Histogram of 100 draws',xlab='Weights')
#2c) Increase the number of random draws from this distribution to 10000 and 100000.
#What does this distribution look like? In which interval does most of the data lie in?
z1<-rnorm(10000, mean = 10, sd = 4) #assigning the random draws to a variable z1
hist(z1, probability = TRUE, col = "red",main='Histogram of 10000 draws',xlab='weights')
z2<-rnorm(100000, mean = 10, sd = 4) #assigning the random draws to a variable z1
hist(z2, probability = TRUE, col = "orange",main='Histogram of 100000 draws',xlab='Weights')
#The width of the bars or bin size has decreased in 10000 and the width of
#the bars in case of 100000 has further decreased
#while the standard deviation is fixed. This is due to the fact as the number of
#draws increase more and more data gets packed into each interval. The distribution is a bell curve.
#Most of the data lies in the 8-12 interval.
#let's pick 100 outcomes from 1000 random draws
xx <- seq(min(z), max(z), length=100)
lines(xx, dnorm(xx, mean=10, sd=4)) #this draws a line on the histogram
#Poisson distribution
#let's start with a probability mass function with a rate parameter of 3
n <- 1:100 #setting a vector with numbers 1-100 in ascending order
den <- dpois(n, 3) #using the dpois function to draw 100 values from a poisson distribution with a rate parameter of 3
plot(den, xlab = "Outcome", ylab = "Density",col='red')
#Question 3:
#3a)What happens to the shape of the distribution when the rate parameter is 0.3?
den <- dpois(n, 0.3) #using the dpois function to draw 100 values from a poisson distribution with a rate parameter of 3
plot(den, xlab = "Outcome", ylab = "Density",col='blue')
#The probability of getting 0.3 - 1 event in an interval would be the greatest and
#the probability of getting events greater than 5 would be close to 0
#3b) What happens to the shape of the distribution when the rate parameter is 10?
den <- dpois(n, 10) #using the dpois function to draw 100 values from a poisson distribution with a rate parameter of 3
plot(den, xlab = "Outcome", ylab = "Density",col='orange')
#The probability of getting 0 - 20 events in an interval would be the greatest and
#the probability of getting events greater than 20 would be close to 0
#3c) What happens to the shape of the distribution when the rate parameter is 100?
den <- dpois(n, 100) #using the dpois function to draw 100 values from a poisson distribution with a rate parameter of 3
plot(den, xlab = "Outcome", ylab = "Density",col='blue')
#The probability of getting more than 80 events in an interval would be the greatest and
#the probability of getting events less than 70 would be close to 0
#When we are changing the rate parameter the mean and variance of the distribution is changing
#So as we increase the rate parameter the probability of observing more events would increase
#and if we decrease the rate parameter then the probability of observing less events is higher
#3d) Where are the mass of all points on the distribution?
#make sure to include plots for all three questions
#The mass would be near the rate parameter:
#part (a)
#The probability of getting 0.3 - 1 event in an interval would be the greatest hence most of our mass is concentrated in this region
#part (b)
#The probability of getting 0 - 20 events in an interval would be the greatest hence most of our mass is concentrated in this region
#part (c)
#The probability of getting 80 events or more in an interval would be the greatest hence most of our mass is concentrated in this region
#Question 4
#using the rpois function, extract 1000 random vales from a poisson distribution with a rate parameter of 5
#produce a histogram of these values
#make sure to add a line showing the shape of the distribution
val <- 1:1000
rp<-rpois(val, 5) #assigning the random draws to a variable rp
hist(rp, probability = TRUE, col = "orange")
xx <- seq(min(rp), max(rp), length=100)
xx <- round(xx)
lines(xx, dpois(xx,5)) #this draws a line on the histogram
#Let's generate means from a poisson distribution
myMeans <- vector()
for(i in 1:100){
set.seed(i)
myMeans <- c(myMeans, mean(rpois(10,3)))
}
#creating a histogram of the means
hist(myMeans, main = NULL, xlab = expression(bar(x)),col='green')
hist(myMeans, main = NULL, xlab = expression(bar(x)),col='green',breaks=10)
#Question 4
#4a)What does this distribution look like?
#The distribution looks like a poisson distribution
#4b) What happens to the shape of the distribution if we draw 10000 points instead of 100?
myMeans1 <- vector()
for(i in 1:10000){
set.seed(i)
myMeans1 <- c(myMeans1, mean(rpois(10,3)))
}
hist(myMeans1, main = NULL, xlab = expression(bar(x)),col='grey')
#The shape of the distribution becomes bell shaped in other words in this case we get a normal distribution
#Question 5
#5a)Based on your sampling survey, which of the distributions that we learned in class are relevant to the kind of data that you have collected?
setwd("D:/bilal's books 8/Lie Detector/Lab assignments/Lab_7")
data<-read.csv("datawithcategories.csv", header = TRUE)
data
library(plyr)
tot <- ddply(data,.(Day,Category),nrow)
tot
hist(tot$V1, main="", probability = TRUE, col = "orange", breaks = 10)
#5b)Determine the mean, sd, probability of successes and/or rate parameter (depending on the type of data collected) for your data. Use this information to draw 10000 random variables from the relevant probability distribution. Plot these random draws as a histogram
mean(tot$V1)
sd(tot$V1)
#install.packages("fitdistrplus")
library(fitdistrplus)
rate <- MASS::fitdistr(tot$V1,"Poisson")
rate
#The rate is 6.1764706
val <- 1:10000
rp<-rpois(val, 6.1764706) #assigning the random draws to a variable rp
hist(rp, probability = TRUE, col = "orange",xlab='outcomes',main='Histogram of Poisson')
#5c) Based on your distribution, where do the center/mass of outcomes lie?
#The center or the mass is around the rate parameter or one could say between the intervals 4-7
#5d) What happens to the shape of your distribution when you change your mean/sd/rate parameter/probability of successes to twice that you observed in your data?
val <- 1:10000
rp<-rpois(val, 6.1764706*2) #assigning the random draws to a variable rp
hist(rp, probability = TRUE, col = "brown", main="Poisson distribution",xlab="outcomes")
#By changing the rate parameter to twice(12.35294) the shape of the distribution becomes more symmetric and it looks more like a bell curve with some skewness to the right
#5e) What happens to the shape of your distribution when you change your mean/sd/rate parameter/probability of successes to half that you observed in your data?
val <- 1:10000
rp<-rpois(val, 6.1764706*0.5) #assigning the random draws to a variable rp
hist(rp, probability = TRUE, col = "purple",main="Poisson distribution",xlab='outcomes')
#The distribution is more skewed to the right and is still a poisson distribution. The mass is concentrated around the rate parameter which is 3.088235
#Note: For question #5, please repeat for each variable collected.
#Additional note: It may be possible that your variable(s) of interest from your sampling #survey do not fit any of the distributions used in this script or mentioned in lecture. #If this is the case, please use this website to determine the probability distribution #that best fits the kind of data that you have
#website: https://www.johndcook.com/blog/distributions_r_splus/
#please note that you will need to look up these distributions online and in R help to determine which best fit your data.
|
d79b1147ed7aa86881c53282aded46c4f610591c
|
adf13968c14ecb2f547c8afc96842ffd1b2efa39
|
/man/returnsDistribution.Rd
|
1758e4d6d79615824ee9005999957e803bdf7976
|
[] |
no_license
|
runiaruni/Meucci
|
10ed5635d3a756b743a9f75956e247dadb7489ff
|
9f0a946631eebe65837c15d05b53e22c5333c25d
|
refs/heads/master
| 2021-05-31T18:07:39.024992
| 2016-05-05T17:17:59
| 2016-05-05T17:17:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 521
|
rd
|
returnsDistribution.Rd
|
\docType{data}
\name{returnsDistribution}
\alias{returnsDistribution}
\title{Panel X of joint returns realizations and vector p of respective probabilities for returns}
\description{
Panel X of joint returns realizations and vector p of
respective probabilities for returns
}
\author{
Xavier Valls\email{flamejat@gmail.com}
}
\references{
A. Meucci, "Fully Flexible Views: Theory and Practice",
The Risk Magazine, October 2008, p 100-106.
\url{http://symmys.com/node/158}
}
\keyword{data}
\keyword{datasets}
|
f3e404889f58f624372294295f4572d62626f1d6
|
588ad4f33dc2119680b4078355c8c170243330a8
|
/inst/App/server.R
|
abdd8e04dc6b6ae6b8c8343c1aff53ba80c2f5b5
|
[] |
no_license
|
gfsarmanho/Outliers.App
|
f0b5d48ea2d9c5e0e3f2248d70cc35e02d72d037
|
2a01202a9ba2be49c46a716b4b5fe0a893e7c7da
|
refs/heads/master
| 2020-04-05T05:29:56.710079
| 2018-12-17T16:58:52
| 2018-12-17T16:58:52
| 156,598,272
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 14,552
|
r
|
server.R
|
#==============#
# Begin server #
#==============#
shinyServer(function(input, output, session){
# Add report file to temporary directory
temp_dir_report <- file.path(tempdir(), "logo.png")
file.copy("www/logo.png", temp_dir_report, overwrite=TRUE)
# Reactive variables
RV <- reactiveValues(
dados = NULL,
measu = "",
res_fun_out = NULL,
out.ind = NULL,
res_iqr = NULL,
res_grubbs_10 = NULL,
res_grubbs_11 = NULL,
res_grubbs_20 = NULL,
res_dixon = NULL,
res_chisq = NULL,
res_adj = NULL
)
RVTAB <- reactiveValues(
tab_summary=NULL,
tab_normtest=NULL,
tab_stats=NULL,
tab_outres=NULL,
tab_outtest=NULL
)
observeEvent(
eventExpr={
input$loadFile
},
handlerExpr={
# Ensure there is an input file
req(input$file1)
# Load data file
arq_names <- input$file1$datapath
arq_ext <- tail(unlist(strsplit(x=input$file1$name, split="\\.")), n=1)
if(arq_ext == "txt") dados <- read.table(arq_names, sep="\t", header=input$checkHeader)
if(arq_ext == "csv") dados <- read.csv(arq_names, sep=",", dec=".", header=input$checkHeader)
if(arq_ext %in% c("xls", "xlsx")) dados <- readxl::read_excel(arq_names, sheet=1)
dados <- as.data.frame(dados)
RV$dados <- as.numeric(dados[, ncol(dados)])
if(input$checkHeader) RV$measu <- names(dados)[ncol(dados)] else RV$measu <- ""
RV$n.dados <- length(RV$dados)
# Evaluate tests
RV$res_iqr_0 <- IQR.test(x=RV$dados)
RV$res_iqr <- fun_outlier(RV$res_iqr_0, x.data=RV$dados)
RV$res_grubbs_10_0 <- grubbs.test(x=RV$dados, type=10)
RV$res_grubbs_10 <- fun_outlier(RV$res_grubbs_10_0, x.data=RV$dados)
RV$res_grubbs_11_0 <- grubbs.test(x=RV$dados, type=11)
RV$res_grubbs_11 <- fun_outlier(RV$res_grubbs_11_0, x.data=RV$dados)
RV$res_grubbs_20_0 <- grubbs.test(x=RV$dados, type=20)
RV$res_grubbs_20 <- fun_outlier(RV$res_grubbs_20_0, x.data=RV$dados)
RV$res_dixon_0 <- dixon.test(x=RV$dados, type=0)
RV$res_dixon <- fun_outlier(RV$res_dixon_0, x.data=RV$dados)
RV$res_chisq_0 <- chisq.out.test(x=RV$dados)
RV$res_chisq <- fun_outlier(RV$res_chisq_0, x.data=RV$dados)
RV$res_adj_0 <- adjbox.test(x=RV$dados)
RV$res_adj <- fun_outlier(RV$res_adj_0, x.data=RV$dados)
shinyjs::show(id="showReportBtn")
shinyjs::show(id="mainPanel")
}
) #endof observeEvent()
#----------------------------------------------#
# Data #
#----------------------------------------------#
# output$print_dados <- renderPrint({
# if(is.null(RV$dados)){
# return(invisible())
# } else {
# cat(
# paste("Dados carregados (n=", RV$n.dados, "):\n",
# paste(as.character(RV$dados), collapse=", "), sep="")
# )
# }
# })
#------------------------------------------------#
# Tables #
#------------------------------------------------#
observe(
RV$res_fun_out <- switch(input$outlierTest,
"Intervalo Interquartil" = RV$res_iqr,
"Grubbs 1 outlier" = RV$res_grubbs_10,
"Grubbs 2 outliers (lados opostos)" = RV$res_grubbs_11,
"Grubbs 2 outliers (mesma cauda)" = RV$res_grubbs_20,
"Dixon para outliers" = RV$res_dixon,
"Qui-quadrado para outliers" = RV$res_chisq,
"Boxplot ajustado" = RV$res_adj
)
)
#------------------------#
# TABLE: Data Statistics #
#------------------------#
output$table_summary <- renderFormattable({
# Table to be saved
tab_summary <- data.frame(
Medida = c("Mínimo", "Mediana", "Média", "Desvio-padrão", "Máximo"),
Valor = sapply(list(min, mean, median, sd, max),
function(fun, x) fun(x, na.rm=TRUE), x=RV$dados)
)
# Store dynamic table
RVTAB$tab_summary <- tab_summary
# Table to be show
formattable(tab_summary, align=c("c","c"), list(
Medida = formatter("span", style = ~ style(color="grey", font.weight="bold"))
))
})
#------------------------#
# TABLE: Normality tests #
#------------------------#
output$table_normtest <- renderFormattable({
# Functions to be applied
fun_norm <- list(shapiro.test, function(x) ks.test(x, "pnorm"),
nortest::lillie.test, nortest::ad.test,
moments::jarque.test)
# nortest::cvm.test, nortest::pearson.test, nortest::sf.test
res_norm <- sapply(fun_norm, do.call, args = list(RV$dados))
res_norm.stats <- sapply(res_norm, with, c(statistic, p.value))
# Table to be saved
tab_normtest <- data.frame(
"Teste" = c("Shapiro-Wilk", "Kolmogorov-Smirnov (K-S)",
"Lilliefors K-S", "Anderson-Darling", "Jarque-Bera"),
# "Cramer-von Mises", "Qui-quadrado de Pearson", "Shapiro-Francia"
"Estatística" = res_norm.stats[1, ] ,
"P.valor" = formattable::scientific( res_norm.stats[2, ] )
)
# Store dynamic table
RVTAB$tab_normtest <- tab_normtest
# Table to be show
formattable(tab_normtest, align=c("c","c", "c"), list(
Teste = formatter("span", style = ~ style(color="grey", font.weight="bold")),
"P.valor" = formatter("span", style = x ~ style(color=ifelse(x>=0.05, "green", "red")))
))
})
#-------------------------------#
# TABLE: Assymetry and Kurtosis #
#-------------------------------#
output$table_stats <- renderFormattable({
# Table to be saved
tab_stats <- data.frame(
Medida = c("Coef. Curtose", "Coef. assimetria"),
Valor = c(moments::kurtosis(RV$dados), moments::skewness(RV$dados))
)
# Store dynamic table
RVTAB$tab_stats <- tab_stats
# Table to be show
formattable(tab_stats, align=c("c","c"), list(
Medida = formatter("span", style = ~ style(color="grey", font.weight="bold"))
))
})
#------------------------#
# TABLE: Outlier results #
#------------------------#
output$table_outres <- renderFormattable({
# output$table_results <- function(){
tab_outres <- RV$res_fun_out$tab_outres
RV$out.ind <- RV$res_fun_out$out.ind # Could be anywhere...
# Store dynamic table
RVTAB$tab_outres <- tab_outres
# Table to be show
formattable(tab_outres, align=c("c","c","c"), list(
Réplica = formatter("span", style = ~ style(color="grey", font.weight="bold")),
# Medição = color_tile("white", plot_colors[1]),
Resultado = formatter("span", style = x ~ style(color=ifelse(x, "green", "red")),
x ~ icontext(ifelse(x, "ok", "remove"), ifelse(x, "Ok", "Outlier"))
)
))
})
#----------------------#
# TABLE: Outlier Tests #
#----------------------#
output$table_outtest <- renderFormattable({
tab_outtest <- RV$res_fun_out$tab_outtest
# Store dynamic table
RVTAB$tab_outtest <- tab_outtest
formattable(tab_outtest, align=c("l","r"), list(
"Parâmetro" = formatter("span", style = ~ style(color="grey", font.weight="bold"))
))
})
#-----------------------------------------------#
# Plots #
#-----------------------------------------------#
# Plot - data
output$dados <- renderPlot({
if(is.null(RV$dados)){
return()
} else {
p_name <- "plot_dados"
assign(x=p_name, envir=.GlobalEnv, value= function(){
xx <- RV$dados
cores <- rep(plot_colors[1], length(RV$dados))
if(!is.null(RV$out.ind)){ cores[RV$out.ind] <- plot_colors[2] }
plot(xx[order(xx)], col=cores[order(xx)], pch=19, cex=1.5,
xlab="Dados ordenados", ylab="", main=RV$measu)
# points(RV$out.ind[order(xx)])
if(!is.null(RV$out.ind)){
legend("bottomright", pch=c(19,19), col=plot_colors[1:2],
c("Dados", "Outlier sugerido"), bty="n", cex=1.2, box.col="black")
}
})
get(p_name)()
}
})
#---------------#
# PLOT: BoxPlot #
#---------------#
output$boxplot <- renderPlot({
if(is.null(RV$dados)){
return()
} else {
p_name <- "plot_boxplot"
assign(x=p_name, envir=.GlobalEnv, value= function(){
boxplot(RV$dados, col=plot_colors[2], xlab="", ylab="Dados", main=RV$measu)
})
get(p_name)()
}
})
#-----------------#
# PLOT: Histogram #
#-----------------#
output$histogram <- renderPlot({
if(is.null(RV$dados)){
return()
} else {
p_name <- "plot_histograma"
assign(x=p_name, envir=.GlobalEnv, value= function(){
hist(RV$dados, col=plot_colors[1], prob=TRUE,
xlab="Dados", ylab="Frequencia", main=RV$measu)
lines(density(RV$dados), col=plot_colors[2], lwd=2)
})
get(p_name)()
}
})
#--------------#
# PLOT: QQplot #
#--------------#
output$qqplot <- renderPlot({
if(is.null(RV$dados)){
return()
} else {
p_name <- "plot_qqplot"
assign(x=p_name, envir=.GlobalEnv, value= function(){
qqnorm(RV$dados, col=plot_colors[1], pch=19,
xlab="Quantis Teóricos", ylab="Quantis amostrais", main=RV$measu)
qqline(RV$dados, col=plot_colors[2], lwd=2)
})
get(p_name)()
}
})
#-----------------------------------------------#
# REPORT #
#-----------------------------------------------#
# Modal
observeEvent(input$modalReportBtn, {
showModal(modalDialog(easyClose=TRUE, footer=NULL,
title = "Informações para gerar relatório técnico",
textInput(inputId="personModal", label="Responsável"),
shinyWidgets::awesomeCheckboxGroup(
inputId="testsModal", label="Incluir testes:",
choices=c("Intervalo Interquartil", "Grubbs 1 outlier",
"Grubbs 2 outliers (lados opostos)", "Grubbs 2 outliers (mesma cauda)",
"Dixon para outliers", "Qui-quadrado para outliers",
"Boxplot ajustado"),
selected=c("Intervalo Interquartil")
# "Grubbs 1 outlier",
# "Grubbs 2 outliers (lados opostos)", "Grubbs 2 outliers (mesma cauda)",
# "Dixon para outliers", "Qui-quadrado para outliers",
# "Boxplot ajustado")
# choices=c("Intervalo", "Grubbs one", "Grubbs two", "Grubbs", "Dixon", "Chi-Square"),
# selected=c("Intervalo", "Grubbs one", "Grubbs two", "Grubbs", "Dixon", "Chi-Square")
),
fluidRow(
column(6, shinyWidgets::awesomeCheckboxGroup(
inputId="diagsPlotModal", label="Incluir gráficos diagnóstico:",
choices=c("Histograma", "QQ-plot", "Boxplot"),
selected=c("Histograma", "QQ-plot", "Boxplot")
)),
column(6, shinyWidgets::awesomeCheckboxGroup(
inputId="diagsTableModal", label="Incluir tabelas diagnóstico:",
choices=c("Sumário dos dados", "Testes de Normalidade", "Assimetria e Curtose"),
selected=c("Sumário dos dados", "Testes de Normalidade", "Assimetria e Curtose")
))
),
textAreaInput(inputId="obsModal", label="Observações:", value="",
width='100%',
placeholder="Insira aqui comentários gerais."),
br(),
shinyWidgets::radioGroupButtons(
inputId="format", label="Formato do documento",
choices=c("PDF", "HTML"), #, "Word"),
selected="PDF",
checkIcon = list(yes = tags$i(class = "fa fa-check-square",
style = "color: steelblue"),
no = tags$i(class = "fa fa-square-o",
style = "color: steelblue"))
),
# icon=icon(name="file-pdf", lib="font-awesome")
# icon=icon(name="file-word", lib="font-awesome")
# icon=icon(name="html5", lib="font-awesome")
downloadButton(outputId="downReportBtn", label="Gerar relatório",
class="btn-default") #style="background-color: black; color: white;")
)) #endofshowModal()
})
# Donload report mechanism
output$downReportBtn <- downloadHandler(
filename = function() {
paste("report", sep=".",
switch(input$format, PDF="pdf", HTML="html", Word="docx")
)
},
content = function(file) {
# formato <- switch(input$format, PDF="pdf", HTML="html", Word="docx")
report_name <- paste("report_", input$format, ".Rmd", sep="")
src <- normalizePath(report_name)
owd <- setwd(tempdir())
on.exit(setwd(owd))
file.copy(from=src, to=report_name, overwrite=TRUE)
library(rmarkdown)
# out <- rmarkdown::render(input=paste("report_", input$format, ".Rmd"),
out <- rmarkdown::render(input=report_name,
encoding="UTF-8",
output_format=switch(input$format,
PDF=pdf_document(),
HTML=html_document(),
Word=word_document())
)
file.rename(out, file)
}
) #endof downloadHandler()
})
#===============#
# End of server #
#===============#
|
59b7773a4090f63d309cf435e8a27d3f91706dae
|
2fcf1d9d4c98ced6de0784f941f6d318b79e6d6e
|
/man/area_triangle.Rd
|
3222b1eef8cd6b9cdc884ed0e60b9705a2ba1f85
|
[] |
no_license
|
katiesocolow/Week_2_Project_Package
|
9921e607c3f2799c996ed68289089d00d522e787
|
d825b69e7378ea161018e67de191de86c999d52e
|
refs/heads/master
| 2020-03-19T14:02:05.183081
| 2018-06-08T10:57:30
| 2018-06-08T10:57:30
| 136,605,980
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 427
|
rd
|
area_triangle.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/area_triangle.R
\name{area_triangle}
\alias{area_triangle}
\title{Calculate the area of a triangle.}
\usage{
area_triangle(b, h)
}
\arguments{
\item{b}{base of a triangle.}
\item{h}{height of a triangle.}
}
\value{
The area of the triangle.
}
\description{
Calculate the area of a triangle.
}
\examples{
area_triangle(2, 6)
area_triangle(3.5, 4)
}
|
f6aa8c21a363aa8c8991743085b63b56d6999c57
|
d03924f56c9f09371d9e381421a2c3ce002eb92c
|
/R/Lattice.R
|
0975bec532fbfac8871b03e5d8c8b4a41b7ffda2
|
[] |
no_license
|
cran/distr
|
0b0396bbd5661eb117ca54026afc801afaf25251
|
c6565f7fef060f0e7e7a46320a8fef415d35910f
|
refs/heads/master
| 2023-05-25T00:55:19.097550
| 2023-05-08T07:10:06
| 2023-05-08T07:10:06
| 17,695,561
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 625
|
r
|
Lattice.R
|
setMethod("width", signature(object="Lattice"), function(object) object@width)
setMethod("Length", signature(object="Lattice"), function(object) object@Length)
setMethod("pivot", signature(object="Lattice"), function(object) object@pivot)
setReplaceMethod("width", signature(object="Lattice"),
function(object, value) {object@width <- value; object})
setReplaceMethod("Length", signature(object="Lattice"),
function(object, value) {object@Length <- value; object})
setReplaceMethod("pivot", signature(object="Lattice"),
function(object, value) {object@pivot <- value; object})
|
961473f9c65a8ad3c4c253c933411b2f75c5fcb2
|
2760271256e3f035f97fae8c6c697f0e8ddb79ca
|
/Week3/Quiz3.R
|
8f7bf449b8b034370c7bbf8fd073e28cf32299eb
|
[] |
no_license
|
hd1812/Getting_And_Cleaning_Data
|
1a276d62972fcd15b20819b85e10308e850dd633
|
9a5437730fe888290518ca246242ec7ac850cad7
|
refs/heads/master
| 2020-07-02T02:13:05.837901
| 2015-10-11T12:06:13
| 2015-10-11T12:06:13
| 38,585,776
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 875
|
r
|
Quiz3.R
|
####Quiz3
##Getting data from web
restData <- read.csv("getdata-data-ss06hid.csv")
##Q1
agricultureLogical<-(restData$ACR==3 & restData$AGS==6)
x<-restData[which(agricultureLogical==TRUE),]
head(x)
##Q2
if(!file.exists("./data")){dir.create("./data")}
fileUrl <- "https://d396qusza40orc.cloudfront.net/getdata%2Fjeff.jpg"
download.file(fileUrl,destfile="jeff.jpg",mode="wb")##wb is binary
library(jpeg)
pic<-readJPEG("jeff.jpg",native=TRUE)
quantile(pic,probs=c(0.3,0.8))
##Q3
GDP <- read.csv("getdata-data-GDP.csv",blank.lines.skip = TRUE)
EDU <- read.csv("getdata-data-EDSTATS_Country.csv")
GDPCountry<-as.factor(GDP$X)
nameMatrix<-EDU$CountryCode %in% GDP$X[6:195]
commonCountry<-EDU[nameMatrix,]
x<-sort(as.numeric(GDP$Gross.domestic.product.2012[6:195]),decreasing=TRUE)
x[13]
GDP[as.numeric(GDP$Gross.domestic.product.2012[6:195])==x[13],]
##Q4 Q5 remain unsolved
|
00b08f3846754f68d2b81f18a90e50e39208d758
|
d2ba50e01559fca07314d41432f30c411b773f87
|
/TAXRATE.R
|
651962db2e0d012a1e9b7c3eefcee10dabf859ee
|
[] |
no_license
|
Eikonomics/TwitterOptimalTax
|
6791afa0aeb2455f70d201ec0ca8446a023d1952
|
c680411851eac02e6d21256896b661d69206271d
|
refs/heads/master
| 2021-01-04T14:59:29.973190
| 2020-02-14T22:17:27
| 2020-02-14T22:17:27
| 240,601,453
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,748
|
r
|
TAXRATE.R
|
## ## Boring old set-up
library(tidyverse)
library(scales)
rm(list = ls())
### Fun set-up for calculation
## Baseline data (assumptions)
M <- 6641000 # Soruce: Hagstofan, gross total income 2018
SD <- 7982000 # Soruce: Hagstofan, SD of Total income 2018
MinW <- 50000 * 12 # Min annual income (used to produce chart)
MaxW <- 5000000 * 12 # Max annual income (used to produce chart)
## Create the data to calculate different take-home pay
Data <- data.frame(
seq(from = MinW, to = MaxW, by = 12 * 10000) # 10k per month intervals (over years)
)
Data <- setNames(Data, c("Gehalt")) # Incomes
## Calculate tax rates for all possible incomes
Data$PreictedTaxRate <- 1 / (1 + exp(- (Data$Gehalt - M)/SD))
## Total post-tax income
Data$PredictedAfterTax <- Data$Gehalt * (1 - Data$PreictedTaxRate)
## Total post-tax income, monthly average
Data$PredictedAfterTaxMonthly <- Data$PredictedAfterTax / 12
# marginal pay (additonal take-home pay, per 10k wage increase)
Data$marginalTax <- Data$PredictedAfterTaxMonthly - lag(Data$PredictedAfterTaxMonthly, n = 1L)
## Draw the plots
## plot common features
GoodLook <-
ggplot(Data) +
geom_line(size = 1.5) +
theme_minimal(base_size = 16) +
scale_y_continuous(labels = comma) +
scale_x_continuous(labels = comma)
# Total vs take-home pay
GoodLook +
aes(y = PredictedAfterTaxMonthly, x = Gehalt/12) +
ylab("Mánaðarlaun, eftir skatt") +
xlab("Mánaðarlaun, fyrir skatt")
# marginal total income vs take-home pay (net income)
GoodLook +
aes(y = marginalTax, x = Gehalt/12) +
xlab("Mánaðarlaun, fyrir skatt") +
ylab("Breyting á nettó-tekjum")
|
200d58a94e0544ab6baf1462129fc6233925c8db
|
0d1685a2218c0c37bfc700fcb8008dda69625ede
|
/man/EpivizGenesTrack-class.Rd
|
3cc2b4b0f4dae160bdf163c3141d4e6a34a08685
|
[] |
no_license
|
epiviz/epivizrChart
|
75f41609bd6d82517e374126102a8c32c0c7a060
|
445ac18b7da77581616e0b94785336c53c40c046
|
refs/heads/master
| 2021-11-26T01:26:00.082587
| 2021-07-30T07:35:15
| 2021-07-30T07:35:15
| 89,830,859
| 3
| 1
| null | 2021-01-22T13:06:22
| 2017-04-30T05:15:47
|
HTML
|
UTF-8
|
R
| false
| true
| 611
|
rd
|
EpivizGenesTrack-class.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/EpivizCharts-classes.R
\docType{class}
\name{EpivizGenesTrack-class}
\alias{EpivizGenesTrack-class}
\alias{EpivizGenesTrack}
\title{Data container for an Epiviz Genes Track.}
\description{
Data container for an Epiviz Genes Track.
}
\section{Methods}{
\describe{
\item{\code{get_component_type()}}{Get component type for prefix of random id generator}
\item{\code{get_default_colors()}}{Get default colors}
\item{\code{get_default_settings()}}{Get default settings}
\item{\code{get_name()}}{Get name of Epiviz Web Component}
}}
|
12bb0c478092dcf0478d0006c193936497b12c09
|
cecced4835b4f960141b85e25eabd8756f1702ea
|
/R/sc_aligning.R
|
aaca3f55277a3cf12ab09356508d7701b50ad676
|
[] |
no_license
|
LuyiTian/scPipe
|
13dab9bea3b424d1a196ff2fba39dec8788c2ea8
|
d90f45117bf85e4a738e19adc3354e6d88d67426
|
refs/heads/master
| 2023-06-23T01:44:20.197982
| 2023-04-17T13:26:42
| 2023-04-17T13:26:42
| 71,699,710
| 61
| 26
| null | 2023-06-12T11:04:49
| 2016-10-23T11:53:40
|
HTML
|
UTF-8
|
R
| false
| false
| 12,818
|
r
|
sc_aligning.R
|
##########################################################
# Aligning Demultiplxed FASTQ Reads to a Known Reference
##########################################################
#' @name sc_aligning
#' @title aligning the demultiplexed FASTQ reads using the Rsubread:align()
#' @description after we run the \code{sc_trim_barcode} or \code{sc_atac_trim_barcode} to demultiplex the fastq files, we are using this
#' function to align those fastq files to a known reference.
#' @param ref a character string specifying the path to reference genome file (.fasta, .fa format)
#' @param index_path character string specifying the path/basename of the index files, if the Rsubread genome build is available
#' @param tech a character string giving the sequencing technology. Possible value includes "atac" or "rna"
#' @param R1 a mandatory character vector including names of files that include sequence reads to be aligned. For paired-end reads, this gives the list of files including first reads in each library. File format is FASTQ/FASTA by default.
#' @param R2 a character vector, the second fastq file, which is required if the data is paired-end
#' @param output_folder a character string, the name of the output folder
#' @param output_file a character vector specifying names of output files. By default, names of output files are set as the file names provided in R1 added with an suffix string
#' @param type type of sequencing data (`RNA` or `DNA`)
#' @param input_format a string indicating the input format
#' @param output_format a string indicating the output format
#' @param nthreads numeric value giving the number of threads used for mapping.
#'
#' @returns the file path of the output aligned BAM file
#'
#' @examples
#' \dontrun{
#' sc_aligning(index_path,
#' tech = 'atac',
#' R1,
#' R2,
#' nthreads = 6)
#' }
#'@export
sc_aligning <- function (
R1,
R2 = NULL,
tech = "atac",
index_path = NULL,
ref = NULL,
output_folder = NULL,
output_file = NULL,
input_format = "FASTQ",
output_format = "BAM",
type = "dna",
nthreads = 1){
if(!all(file.exists(R1))){
stop("At least one of the input files for R1 does not exist")
}
if(!is.null(R2) && !all(file.exists(R2))){
stop("At least one of input file for R2 does not exist")
}
if(tech == "atac") {
message("ATAC-Seq mode is selected...")
if(is.null(output_folder)) {
output_folder <- file.path(getwd(), "scPipe-atac-output")
}
if (!dir.exists(output_folder)){
dir.create(output_folder,recursive=TRUE)
message("Output directory is not provided. Created directory: ", output_folder)
}
log_and_stats_folder <- paste0(output_folder, "/scPipe_atac_stats/")
type <- "dna"
} else if(tech == "rna") {
message("RNA-Seq mode is selected...")
if(is.null(output_folder)) {
stop("output_folder cannot be NULL for rna mode. Aborting...\n")
}
log_and_stats_folder <- output_folder
type <- "rna"
} #else
dir.create(log_and_stats_folder, showWarnings = FALSE)
log_file <- paste0(log_and_stats_folder, "log_file.txt")
stats_file <- paste0(log_and_stats_folder, "stats_file_align.txt")
if(!file.exists(log_file)) file.create(log_file)
write(
c(
"sc_aligning starts at ",
as.character(Sys.time()),
"\n"
),
file = log_file, append = TRUE
)
# creating an index if not available
if (is.null(index_path) && is.null(ref)) {
stop("either a subread index path or a reference.fa path needs to be added \n")
} else {
if (!is.null(index_path)) {
indexPath <- index_path
if (!file.exists(paste0(indexPath, ".log"))) {
stop("Genome index does not exist in the specificed location. Please check the full index path again.\n")
}
} else {
message("Genome index location not specified. Looking for the index in", output_folder)
indexPath <- file.path(output_folder, "genome_index")
if (file.exists(paste0(indexPath, ".log"))) {
message("Genome index found in ", output_folder, "...")
} else {
message("Genome index not found. Creating one in ", output_folder, " ...")
if(file.exists(ref)){
Rsubread::buildindex(basename=indexPath, reference=ref)
} else {
stop("reference file does not exist. Please check the path and retry. \n")
}
}
}
}
# Check for partial/nomatch files
if(tech == "atac") {
containing_folder <- dirname(R1) # Assume partial and nomatch files are also in the same directory as supplied input fastq files
input_folder_files <- list.files(containing_folder)
# Initialise demultiplexing stats
barcode_completematch_count <- length(readLines(R1))/2
demux_stats <- data.frame(status = c("barcode_completematch_count"),
count = c(barcode_completematch_count))
# Concatenate the complete and partial matches
partial_matches_R1 <- file.path(containing_folder, input_folder_files[grep("dem.+partialmatch.+R1.+fastq", input_folder_files)])
partial_matches_R3 <- file.path(containing_folder, input_folder_files[grep("dem.+partialmatch.+R3.+fastq", input_folder_files)])
if (all(file.exists(partial_matches_R1, partial_matches_R3)) && !identical(partial_matches_R1, character(0)) && !identical(partial_matches_R3, character(0))) {
if (length(readLines(partial_matches_R1)) > 0 && length(readLines(partial_matches_R3)) > 0) {
message("Found partial match fastq files, proceeding to concatenate with complete match fastq files.")
barcode_partialmatch_count <- length(readLines(partial_matches_R1))/2
demux_stats <- demux_stats %>% tibble::add_row(status = "barcode_partialmatch_count", count = barcode_partialmatch_count)
concat_filename_R1 <- paste0("demultiplexed_complete_partialmatch_", stringr::str_remove(basename(R1), stringr::regex("dem.+completematch_")))
concat_file_R1 <- file.path(containing_folder, concat_filename_R1)
concat_filename_R3 <- paste0("demultiplexed_complete_partialmatch_", stringr::str_remove(basename(R2), stringr::regex("dem.+completematch_")))
concat_file_R3 <- file.path(containing_folder, concat_filename_R3)
system2("zcat", c(R1, partial_matches_R1, "|", "gzip", "-c", ">", concat_file_R1))
system2("zcat", c(R2, partial_matches_R3, "|", "gzip", "-c", ">", concat_file_R3))
if (!all(file.exists(concat_file_R1, concat_file_R3))) {
stop("Couldn't concatenate files!\n")
}
message("Output concatenated read files to:")
message("R1:", concat_file_R1)
message("R3:", concat_file_R3)
# Replace original fastq files with concatenated files for aligning
R1 <- concat_file_R1
R2 <- concat_file_R3
} else {
message("No partial matches, checking for reads with non-matched barcodes.")
}
}
# ------------ Align the nomatch file -------
no_matches_R1 <- file.path(containing_folder, input_folder_files[grep("nomatch.+R1.+fastq", input_folder_files)])
no_matches_R3 <- file.path(containing_folder, input_folder_files[grep("nomatch.+R3.+fastq", input_folder_files)])
if (all(file.exists(no_matches_R1, no_matches_R3)) && !identical(no_matches_R1, character(0)) && !identical(no_matches_R3, character(0))) {
if (length(readLines(no_matches_R1)) > 0 && length(readLines(no_matches_R3)) > 0) {
message("Found barcode non-matched demultiplexed FASTQ files. Proceeding to align them.")
fileNameWithoutExtension <- paste0(output_folder, "/", strsplit(basename(no_matches_R1), "\\.")[[1]][1])
nomatch_bam <- paste0(fileNameWithoutExtension, "_aligned.bam")
Rsubread::align(
index = file.path(output_folder, "genome_index"),
readfile1 = no_matches_R1,
readfile2 = no_matches_R3,
sortReadsByCoordinates = TRUE,
type = "DNA",
nthreads = 12,
output_file = nomatch_bam)
# Extract columns
bam_tags <-list(bc="CB", mb="OX")
param <- Rsamtools::ScanBamParam(tag = as.character(bam_tags), mapqFilter=20)
bamfl <- open(Rsamtools::BamFile(nomatch_bam))
params <- Rsamtools::ScanBamParam(what=c("flag"), tag=c("CB"))
bam0 <- Rsamtools::scanBam(bamfl, param = params)
flag_defs <- tibble::tibble(
type =
paste0("barcode_unmatch_", c("one_read_unmapped", "one_read_unmapped", "one_read_unmapped", "one_read_unmapped", "one_read_unmapped", "one_read_unmapped", "one_read_unmapped", "one_read_unmapped", "one_read_unmapped", "one_read_unmapped", "one_read_unmapped", "one_read_unmapped", "both_reads_unmapped", "both_reads_unmapped", "mapped", "mapped", "mapped", "mapped", "mapped_wrong_orientation", "mapped_wrong_orientation", "mapped_wrong_orientation", "mapped_wrong_orientation", "mapped_ambigously", "mapped_ambigously", "mapped_ambigously", "mapped_ambigously", "mapped_ambigously", "mapped_ambigously", "mapped_ambigously", "mapped_ambigously"))
,
flag =
c(73, 133, 89, 121, 165, 181, 101, 117, 153, 185, 69, 137, 77, 141, 99, 147, 83, 163, 67, 131, 115, 179, 81, 161, 97, 145, 65, 129, 113, 177))
# Create stats data frame
demux_stats <- rbind(demux_stats, as.data.frame(table((data.frame(flag = bam0[[1]]$flag) %>% dplyr::left_join(flag_defs, by = "flag"))[,c('type')])) %>%
dplyr::rename(status = Var1, count = Freq))
} else {
message("No reads found with non-matching barcodes.")
}
}
utils::write.csv(demux_stats, file.path(log_and_stats_folder, "demultiplexing_stats.csv"), row.names = FALSE)
message("Outputted demultiplexing stats file to", file.path(log_and_stats_folder, "demultiplexing_stats.csv"), "\n")
}
# Generate the output filename
if (is.null(output_file)) {
# Only exception is if filename (excluding directory name) contains '.' then will only extract the first part
fileNameWithoutExtension <- paste0(output_folder, "/", strsplit(basename(R1), "\\.")[[1]][1])
outbam <- paste0(fileNameWithoutExtension, "_aligned.bam")
message("Output file name is not provided. Aligned reads are saved in ", outbam)
}
else {
fileNameWithoutExtension <- paste(output_folder, strsplit(output_file, "\\.")[[1]][1], sep = "/")
outbam <- paste0(output_folder, "/", output_file)
}
#execute Rsubread align()
if(!is.null(R2) && file.exists(R2)){ # paired-end
align_output_df <- Rsubread::align(
index = indexPath,
readfile1 = R1,
readfile2 = R2,
sortReadsByCoordinates = TRUE,
type = type,
nthreads = nthreads,
output_file = outbam)
} else { # single-end
align_output_df <- Rsubread::align(
index = indexPath,
readfile1 = R1,
sortReadsByCoordinates = TRUE,
type = type,
nthreads = nthreads,
output_file = outbam)
}
utils::write.csv(align_output_df, file = stats_file, row.names = TRUE, quote = FALSE)
#generating the bam index
Rsamtools::indexBam(paste0(fileNameWithoutExtension, "_aligned.bam"))
# get the unmapped mapped stats to be output and stored in a log file
bamstats <- Rsamtools::idxstatsBam(paste0(fileNameWithoutExtension, "_aligned.bam"))
utils::write.csv(bamstats, file = paste0(log_and_stats_folder, "stats_file_align_per_chrom.csv"), row.names = FALSE, quote = FALSE)
write(
c(
"sc_aligning finishes at ",
as.character(Sys.time()),
"\n\n"
),
file = log_file, append = TRUE)
return(outbam)
}
|
a0ab934e9f15434aeb15d2f2eeb495d2c442a30b
|
5ca77e6f4a0f5803be717464bad720b3b2e2a1ba
|
/hypermap/hyper_embed.R
|
d4e06aafdd0fd3f1aa17e35a049eca98fec7f585
|
[] |
no_license
|
mananshah99/hyperbolic
|
4e591daefc228438e3d520aa3c01ca902cdd6402
|
8cb12c30aceb7890b39de90a702d2c237bea4237
|
refs/heads/master
| 2020-04-04T20:57:49.548332
| 2019-03-01T23:16:20
| 2019-03-01T23:16:20
| 156,267,115
| 6
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 476
|
r
|
hyper_embed.R
|
library(igraph)
load_graph <- function(filename) {
edgelist <- read.table(filename, sep = "", header = F)
return(edgelist)
}
convert_graph <- function(graph_df) {
e <- c()
for(i in 1:nrow(graph_df)) {
row <- graph_df[i,]
e <- c(e, row[[1]] + 1)
e <- c(e, row[[2]] + 1)
}
return(graph(edges = e, n = max(graph_df, na.rm = T) + 1, directed = F))
}
embed_graph <- function(graph) {
return(labne_hm(net = graph, gma = 2.3, Temp = 0.15, k.speedup = 10, w = 2*pi))
}
|
7d6d9620c5bad41fb8796a7c4f6b4a8bb4be0f5f
|
8a270978e710878945f37852d0be9f73cfa75078
|
/scrape_bundesliga_tables/R/scrape_dfb_bundesliga_results.R
|
cc293ccdbc502605cbb61d26e6b3984ad4f16df8
|
[] |
no_license
|
bydata/football_data
|
bdcacdfff7d8d099aaf93637a0f131c48462ae01
|
44e59cd8349f2a02df983b0d16eafc37fbed0e4e
|
refs/heads/master
| 2023-07-08T02:20:20.089361
| 2023-06-30T15:22:04
| 2023-06-30T15:22:04
| 145,601,237
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,622
|
r
|
scrape_dfb_bundesliga_results.R
|
library(tidyverse)
library(rvest)
library(tictoc)
# Scrape page IDs for season final tables from drop-down selection
# The value of the options contains the season id and matchday id (last matchday of season) combined
url <- "https://www.dfb.de/bundesliga/spieltagtabelle/"
page <- read_html(url)
ddoptions <- html_nodes(page, xpath = "//select[@id='seasons']/option")
seasons_mapping <- tibble(
season = html_text(ddoptions),
id = html_attr(ddoptions, name = "value")
) %>%
separate(id, into = c("season_id", "matchday_id"), sep = "\\|") %>%
arrange(season)
# use season mapping to send request to retrieve each seasons final result crosstable page
scrape_crosstable_pages <- function(season, season_id, matchday_id) {
url <- sprintf("https://www.dfb.de/bundesliga/spieltagtabelle/?spieledb_path=%%2Fcompetitions%%2F12%%2Fseasons%%2F%s%%2Fmatchday%%2F%s", season_id, matchday_id)
page <- read_html(url)
page
}
extract_crosstable <- function(season, page) {
# extract crosstable using xpath and transform into data frame
crosstable <- page %>%
html_node(xpath = "//div[@id='table-cross']/table")
df <- crosstable %>%
html_table(crosstable, fill = TRUE, header = TRUE)
# column header
columns <- html_nodes(crosstable, css = "thead th img") %>% html_attr("title")
# row names
rows <- html_nodes(crosstable, css = "tbody tr th img") %>% html_attr("title")
# column names and row names to dataframe
colnames(df) <- c("X1", columns)
df <- df %>%
bind_cols(home = rows) %>%
select(home, everything(), -X1) %>%
pivot_longer(cols = c(-home), names_to = "away", values_to = "result") %>%
mutate(result = str_remove_all(result, fixed("\n")) %>%
str_trim()) %>%
# exclude diagonal
filter(result != "") %>%
separate(result, into = c("home_goals", "away_goals"), sep = ":") %>%
mutate_at(vars(home_goals, away_goals), as.numeric) %>%
mutate(season = season) %>%
select(season, everything())
df
}
# scrape crosstable pages for all seasons at once and store them in a list (takes a while)
tic()
pages <- pmap(seasons_mapping, scrape_crosstable_pages)
toc()
# extract crosstables from pages
crosstables <- pmap(list(seasons_mapping$season, pages), extract_crosstable)
# name the list items with season names
crosstables <- crosstables %>%
set_names(seasons_mapping$season)
write_rds(crosstables, "output/bundesliga_results_crosstable.RData", compress = "gz")
# save as csv file
crosstables_flat <- bind_rows(crosstables)
write_csv(crosstables_flat, "output/bundesliga_crosstables.csv")
|
a3a35deb6d48589052f7bd3bc87bb68bbb34ba9c
|
dc7d3873fd7896fd4a81329a7aa24d4704a8bd90
|
/scripts/BcBOTnet/03_bigRR_cistrans.R
|
a73d77dbc6d6ae490249766c31a10072d2901f61
|
[] |
no_license
|
nicolise/BcAt_RNAGWAS
|
4cd4cf169c06f46057e10ab1773779c8eaf77ab1
|
64f15ad85186718295c6a44146befa3ca57b7efc
|
refs/heads/master
| 2021-01-12T11:40:59.400854
| 2019-10-21T19:54:53
| 2019-10-21T19:54:53
| 72,249,016
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,341
|
r
|
03_bigRR_cistrans.R
|
#Nicole E Soltis
#06/08/18
#03_bigRR_cistrans
#--------------------------------------------------------------------
#extract BotBoaNet5 bigRR data to subfolder
# try post-hoc meta-analysis across phenotypes
#first approach: MANTEL in perl? beta-value scaling
#check cis vs. trans SNP effect estimates
#later repeat this for GEMMA
rm(list = ls())
setwd("~/Documents/GitRepos/BcAt_RNAGWAS/")
#here are the original phenotypes for these SNPs
#total of 30 genes
PhenosNet <- read.csv("data/BcBotGWAS/02_MatchGenos/BOTBOANet5phenos.csv")
setwd("~/Documents/GitRepos/BcAt_RNAGWAS/data/allreads_bigRR/B05.10")
getPhenos <- as.data.frame(names(PhenosNet))
names(getPhenos)[1] <- "gene"
getPhenos$FileName <- paste("03_bigRRout_partial/outfiles/",getPhenos$gene, ".HEM.csv", sep="")
getPhenos <- getPhenos[-c(1:2),]
#file.copy(from=getPhenos$FileName, to="04_NetSubset/", overwrite = TRUE, recursive = FALSE, copy.mode = TRUE) #got 19/ 30. Now unzipping full files to find last 11
getPhenos$ZipFile <- paste("03_bigRRout/03_bigRRout/outfiles/",getPhenos$gene, ".HEM.csv", sep="")
#file.copy(from=getPhenos$ZipFile, to="04_NetSubset/",
# overwrite = TRUE, recursive = FALSE,
# copy.mode = TRUE) #all but one copied
getPhenos$NewFiles <- paste(getPhenos$gene, ".HEM.csv", sep="")
#now extract relevant GWAS data from here...
#from data/allreads_bigRR/B05.10/04_NetSubset/
setwd("~/Documents/GitRepos/BcAt_RNAGWAS/data/allreads_bigRR/B05.10/04_NetSubset")
my.files <- list.files(pattern = c(".HEM.csv"))
#somehow 217749 of the non-duplicated original SNPs have become 217749 SNPs with 205 duplicated. Estimates for duplicated SNPs are very different --> going to drop all of these
full.file <- NULL
for (i in 1:length(my.files)){
my.file <- read.csv(my.files[i], header=TRUE)
names(my.file)[2] <- "chr.ps"
print(sum(duplicated(my.file[,2])))
my.file <- my.file[!duplicated(my.file$chr.ps),]
ifelse(i == 1, full.file <- my.file, full.file <- merge(full.file, my.file[,c(2,3)], by="chr.ps"))
#ifelse(i == 1, names(full.file)[9] <- paste(my.names[i], "_beta", sep=""), names(full.file)[(ncol(full.file)-2)] <- paste(my.names[i], "_beta", sep=""))
}
#10 mins to run
Sys.time()
write.csv(full.file, "BotBoaNet_allGenes_beta.csv")
#next: check for haplotype structure?? haplotype-based model to locate cis effects
|
64b45a96c680b567de80e1b2926d93f5d7b40af5
|
baabef082db1a4504983d24f783e5da0a39ec54f
|
/cachematrix.R
|
86e2902e29d4bbaa27961978e192283152e6847d
|
[] |
no_license
|
alextan2468/ProgrammingAssignment2
|
88de416669815d476515731d4d449734d50af2fa
|
63e78df6cb883c5232c894de146e2b4e0b56bbb1
|
refs/heads/master
| 2020-12-25T06:36:49.445054
| 2015-02-08T13:43:39
| 2015-02-08T13:43:39
| 30,461,817
| 0
| 0
| null | 2015-02-07T16:35:54
| 2015-02-07T16:35:51
| null |
UTF-8
|
R
| false
| false
| 1,390
|
r
|
cachematrix.R
|
## The makeCacheMatrix function will be used to create a matrix
## that can allow the storage of the matrix information itself
## as well as retrieving the matrix and the inverse of the matrix
## The inverse of the matrix would be solved by the cacheSolve function
## makeCacheMatrix creates a storage structure for a matrix
## with additional get function to retrieve matrix value
## and getinverse function to get the inverse of the matrix
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinversematrix <- function(inversematrix) m <<- inversematrix
getinversematrix <- function() m
list(set = set, get = get,
setinversematrix = setinversematrix,
getinversematrix = getinversematrix)
}
## this function can solve the inverse matrix of the object and save it
## to the "cache" environment of the object created bymakeCacheMatrix
## if already solved before and stored, further calling will just return
## the inverse matrix stored in cache and no recalculation will be done
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getinversematrix()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
datamatrix <- x$get()
m <- solve(datamatrix) ##here the inverse matrix is solved
x$setinversematrix(m)
m
}
|
f6fd2704f6e729f8212bc450d5eb14af02841a19
|
7ce53616b1e41c8376bd802e7569fc414094ef9d
|
/lab 15.R
|
53742c288b3afa235931a77ad1a108a4b87032b1
|
[] |
no_license
|
qingze-lan/PSTAT-10
|
693d7c8006ef5fa56996d6746cf958b52339478f
|
6cc4754f43c164af3101d3a7d33534a10c82f97e
|
refs/heads/main
| 2023-03-23T10:36:30.617443
| 2021-03-14T23:36:13
| 2021-03-14T23:36:13
| 347,781,056
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,332
|
r
|
lab 15.R
|
PSTAT10db <- dbConnect(RSQLite::SQLite(), "PSTAT-db.sqlite")
EMPLOYEE <- read.csv("~/Desktop/pstat 10/EMPLOYEE.txt")
DEPARTMENT <- read.csv("~/Desktop/pstat 10/DEPARTMENT.txt")
CUSTOMER <- read.csv("~/Desktop/pstat 10/CUSTOMER.txt")
EMPOLYEE_PHONE <- read.csv("~/Desktop/pstat 10/EMPLOYEE_PHONE.txt")
INVOICES <- read.csv("~/Desktop/pstat 10/INVOICES.txt")
PRODUCT <- read.csv("~/Desktop/pstat 10/PRODUCT.txt")
STOCK_TOTAL <- read.csv("~/Desktop/pstat 10/STOCK_TOTAL.txt")
SALES_ORDER <- read.csv("~/Desktop/pstat 10/SALES_ORDER.txt")
SALES_ORDER_LINE <- read.csv("~/Desktop/pstat 10/SALES_ORDER_LINE.txt")
dbGetQuery(PSTAT10db, 'SELECT DISTINCT NAME FROM EMPLOYEE')
dbGetQuery(PSTAT10db, 'SELECT NAME FROM DEPARTMENT WHERE NAME LIKE "S%r%"')
# Yes
dbGetQuery(PSTAT10db, 'SELECT * FROM PRODUCT WHERE color == "WHITE" AND NAME == "SOCKS"')
dbGetQuery(PSTAT10db, 'SELECT * FROM SALES_ORDER_LINE')
dbGetQuery(PSTAT10db, 'SELECT ORDER_NO, PROD_NO, QUANTITY FROM SALES_ORDER_LINE
WHERE PROD_NO IN ("p1","p2")')
dbGetQuery(PSTAT10db, 'SELECT COUNT(DISTINCT CUST_NO) FROM CUSTOMER')
dbGetQuery(PSTAT10db, 'SELECT color FROM PRODUCT WHERE NAME == "SOCKS"')
dbGetQuery(PSTAT10db, 'SELECT ORDER_NO FROM SALES_ORDER WHERE CUST_NO == "C6"')
dbGetQuery(PSTAT10db, 'SELECT SUM(QUANTITY) FROM INVOICES WHERE ORDER_NO == "02"')
|
fb2a893591c8d6edf05e5230a678b49b5f900394
|
8293856ff3bd5d9eec1dbd76e8370682ef6d3802
|
/tests/testthat/test_connector.R
|
6ac91d640c4ce76c46d2ce503d2bb674bcf38109
|
[] |
no_license
|
14Gus/wdcconnector
|
cabdce0728b69dda70b8bb5d03197359a085b232
|
695eefa6f9a9fc6f40d1f649b7ceccff5612f9ca
|
refs/heads/master
| 2020-03-11T03:16:01.166492
| 2018-04-24T07:21:18
| 2018-04-24T07:21:18
| 129,742,112
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 325
|
r
|
test_connector.R
|
context("connector")
test_that("Generate WDC connector works",{
expected <- "$(document).ready(function () {\n $(\"#submitButton\").click(function () {\n tableau.connectionName =\"iris Table Feed\";\n tableau.submit();\n });\n});"
actual <- generateWDCConnectorJS("iris")
expect_equal(expected, actual)
})
|
70d75b1a2eef83aa59d7d4fc6607e544f2734947
|
b3bf7b8c56b2f3e8d8594cccce6f65981c9514e5
|
/R/efficacy_aggregate.R
|
455868f8c8c289f81643b7f957bdc9285ad6d64c
|
[] |
no_license
|
faustovrz/bugcount
|
055ee388bcf9049e5d01cf3ad19898220f7787a2
|
f3fbb7e9ed5cecae78fdfaa1035e2a87e072be2d
|
refs/heads/master
| 2021-03-27T15:43:12.992541
| 2018-05-04T22:17:49
| 2018-05-04T22:17:49
| 104,142,648
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,149
|
r
|
efficacy_aggregate.R
|
#' Efficacy calculations as ratio of count means, geometric means, or medians
#' @param wf A whitefly count dataframe.
#' @param control Control genotype string.
#' Genotype Denominator in efficacy: e = u1/u0.
#' @return A dataframe with efficacy calculated per clone
#'
#' @examples
#' # efficacy.df <- efficacy.aggregate(wf.per.plant ~ control = "COL1468")
efficacy_aggregate <- function(wf, control = "COL1468"){
# This formula describes experimenl design
form <- formula(nymphs ~ exp_cross + experiment + clone + group)
wf_ag <- aggregate(form, data = wf,
FUN = function(x) c(n = length(x),
mean = mean(x, na.rm = TRUE),
geometric.mean = geometric.mean(x),
median = median(x,na.rm = TRUE)))
# fix aggregate column names
wf_ag <- fix_ag_colnames(wf_ag)
wf_x <- wf_ag[wf_ag$group == "infestation_check" & wf_ag$clone == control,
!(colnames(wf_ag) %in% c("clone", "group","exp.cross"))]
colnames(wf_x) <- gsub("nymphs.","infestation_",
colnames(wf_x), fixed = TRUE)
colnames(wf_x) <- gsub("nymphs.","infestation_",
colnames(wf_x), fixed = TRUE)
wf_y <- wf_ag[wf_ag$group != "infestation_check",
colnames(wf_ag) != "group"]
colnames(wf_y) <- gsub("nymphs.","clone_",
colnames(wf_y), fixed = TRUE)
wf_merge <- merge(wf_x, wf_y,
by = "experiment",
all.y = TRUE)
wf_y <- wf_ag[wf_ag$group != "infestation_check" & wf_ag$clone == control,
!(colnames(wf_ag) %in% c("clone", "group","exp.cross"))]
colnames(wf_y) <- gsub("nymphs.","control_",
colnames(wf_y),fixed=TRUE)
wf_merge <- merge( wf_merge, wf_y,
by="experiment",
all.y = TRUE)
wf_merge <- within(wf_merge,{
mean.eff <- 1 - clone_mean / control_mean
geometric_mean_eff <- 1 - clone_geometric_mean / control_geometric_mean
median <- 1 - clone_median / control_median
})
wf_merge
}
|
988639821c05fcfc7dd70676a9ce975193a61115
|
bf5435204ec8de8afae96e3695c4e2c8b5d86f62
|
/man/setOutputSubdir.Rd
|
308f18a3015028820736d998f53390d5436cacbb
|
[] |
no_license
|
vreuter/projectInit
|
289e7521bb1071b69e367a6aafd9e55edf662563
|
3d3a905f4c623d649a13b102539893b7802e4888
|
refs/heads/master
| 2020-04-10T10:16:35.994057
| 2018-01-19T18:37:17
| 2018-01-19T18:37:17
| 89,945,482
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 253
|
rd
|
setOutputSubdir.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dirAliases.R
\name{setOutputSubdir}
\alias{setOutputSubdir}
\title{Creates and sets outputSubdir}
\usage{
setOutputSubdir(...)
}
\description{
Creates and sets outputSubdir
}
|
06ac95e07df9fb44569be3c4f25813f58613f2dd
|
b2f61fde194bfcb362b2266da124138efd27d867
|
/code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Experiments/Kronegger-Pfandler-Pichler/bomb/p20-5.pddl_planlen=3/p20-5.pddl_planlen=3.R
|
d1e78cd693e1ca1a35e2f364b0fd0b9c3e69276f
|
[] |
no_license
|
arey0pushpa/dcnf-autarky
|
e95fddba85c035e8b229f5fe9ac540b692a4d5c0
|
a6c9a52236af11d7f7e165a4b25b32c538da1c98
|
refs/heads/master
| 2021-06-09T00:56:32.937250
| 2021-02-19T15:15:23
| 2021-02-19T15:15:23
| 136,440,042
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,970
|
r
|
p20-5.pddl_planlen=3.R
|
c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 17763
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 17433
c
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 17433
c
c Input Parameter (command line, file):
c input filename QBFLIB/Kronegger-Pfandler-Pichler/bomb/p20-5.pddl_planlen=3.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 715
c no.of clauses 17763
c no.of taut cls 310
c
c Output Parameters:
c remaining no.of clauses 17433
c
c QBFLIB/Kronegger-Pfandler-Pichler/bomb/p20-5.pddl_planlen=3.qdimacs 715 17763 E1 [1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 29 30 31 32 33 35 36 37 40 41 42 43 44 45 48 49 50 51 52 54 55 56 57 59 61 62 63 64 65 67 68 71 72 73 75 78 79 80 81 82 83 85 86 87 88 90 91 93 94 95 96 97 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715] 310 20 370 17433 RED
|
d997a24dc4e4eb838456a577632845ff926572c4
|
e8327d77350b80110fb20a5506b180155a108e7b
|
/ED_Workflow/2_SAS/compile_SAS_runs.R
|
ec15ebb98620c35d09cc1d4f43a2f80640764a6a
|
[] |
no_license
|
MortonArb-ForestEcology/URF2018-Butkiewicz
|
c537fe28c2eeb886d324b9b8e565d100187fb9ff
|
d5f3f630045e24bd165bc2a35885a5a6e3d0c2c4
|
refs/heads/master
| 2021-06-23T12:27:08.987348
| 2019-06-20T17:49:56
| 2019-06-20T17:49:56
| 136,949,391
| 0
| 0
| null | 2018-07-12T18:37:05
| 2018-06-11T16:00:18
|
R
|
UTF-8
|
R
| false
| false
| 4,565
|
r
|
compile_SAS_runs.R
|
# ------------------------------------------------------------------------------------
# This file compiles the steady-state approximation for an accelerated model spinup
# at individual points (this will need to be modified to work efficiently with spatially
# files)
#
# References:
# 1. Xia, J.Y., Y.Q. Luo, Y.-P. Wang, E.S. Weng, and O. Hararuk. 2012. A semi-analytical
# solution to accelerate spin-up of a coupled carbon and nitrogen land model to
# steady state. Geoscientific Model Development 5:1259-1271.
#
# 2. Xia, J., Y. Luo, Y.-P. Wang, and O. Hararuk. 2013. Traceable components of terrestrial
# carbon storage capacity in biogeochemical models. Global Change Biology 19:2104-2116
#
#
# Original ED SAS solution Script at PalEON modeling HIPS sites:
# Jaclyn Hatala Matthes, 2/18/14
# jaclyn.hatala.matthes@gmail.com
#
# Modifications for greater site flexibility & updated ED
# Christine Rollinson, Aug 2015
# crollinson@gmail.com
#
# Adaptation for regional-scale runs (single-cells run independently, but executed in batches)
# Christine Rollinson, Jan 2016
# crollinson@gmail.com
# ------------------------------------------------------------------------------------
# ------------------------------------------------------------------------------------
# NOTES ON THE SAS SPINUP:
# ------------------------------------------------------------------------------------
# The SAS (semi-analytical solution) should be perfomed on ED runs
# *******WITH DISTURBANCE TURNED OFF*******
# Turning off the disturbance (both treefall & fire) means the model will run with a
# single patch AND we have a robust patch saying what a theoretical old growth looks like
#
# FSC = Fast soil C
# SSC = Structural soil C
# SSL = structural soil L
# MSN = Mineralized Soil N
# FSN = Fast soil N
# ------------------------------------------------------------------------------------
# ------------------------------------------------------------------------------------
# Setting things up to run equations, etc
# ------------------------------------------------------------------------------------
#---------------------------------------
# Define File Structures & steps
# Additional fixed constants and file paths that don't depend on the site
#---------------------------------------
# Site Info
#Setup analysis file structure
# in.base <- "/home/crollinson/URF2018-Butkiewicz/ED_Workflow/1_spin_initial/URF2018_spininit.v1/"
# out.base <- "/home/crollinson/URF2018-Butkiewicz/ED_Workflow/2_SAS/SAS_init_files.v1/"
in.base <- "../1_spin_initial/URF2018_spininit.v5/"
out.base <- "SAS_init_files.v5/"
if(!dir.exists(out.base)) dir.create(out.base)
# Load site characteristic table
expdesign <- read.csv("../0_setup/ExperimentalDesign.csv")
summary(expdesign)
blckyr <- 50 #number of years to chunk data by
disturb <- 0.005 # the treefall disturbance rate you will prescribe in the actual runs (or close to it)
yrs.met <- 30 # The number of met years in the spinup loop
kh_active_depth = -0.2
# pft <- c(5,6,8,9,10,11) #set of PFTs used in analysis
# dpm <- c(31,28,31,30,31,30,31,31,30,31,30,31) # days per month
sufx <- "g01.h5"
expdesign <- expdesign[expdesign$RUNID %in% dir(in.base),] # Do what we've spunup already
expdesign <- expdesign[!expdesign$RUNID %in% dir(out.base),] # Don't do anything we've already done the SAS for
#---------------------------------------
# ------------------------------------------------------------------------------------
# Running the SAS Solution
# ------------------------------------------------------------------------------------
source("../0_setup/ED_Calcs_Soil_Fire.R")
source("SAS.ED2.R")
for(s in 1:nrow(expdesign)){
prefix <- expdesign$RUNID[s]
cat("***** Processing site:", paste(prefix), "\n")
# Read run settings % Sand & % CLAY from table
slxsand <- expdesign$SLXSAND[s]
slxclay <- expdesign$SLXCLAY[s]
sm_fire <- expdesign$SM_FIRE[s]
fire_intensity <- expdesign$Fire.Intensity
lat <- round(expdesign$latitude[s],2)
lon <- round(expdesign$longitude[s],2)
dir.analy <- file.path(in.base, prefix, "analy")
dir.histo <- file.path(in.base, prefix, "histo")
outdir <- file.path(out.base, prefix)
SAS.ED2(dir.analy=dir.analy, dir.histo=dir.histo, outdir=outdir,
prefix, lat, lon,
blckyr=31, yrs.met=30,
treefall=0.005, sm_fire=sm_fire, fire_intensity=fire_intensity, slxsand=slxsand, slxclay=slxclay,
decomp_scheme=2
)
} # End Site Loop!
# -------------------------------------
|
d3f648975c32c44738d6a31ca5ab13ddde5d3d37
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/credule/examples/credule-package.Rd.R
|
8104eacfa62a94f8f0acdceb5ea5ad9129d1be42
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,328
|
r
|
credule-package.Rd.R
|
library(credule)
### Name: credule-package
### Title: Credit Default Swap pricing and Credit Curve bootstrapping
### Aliases: credule-package
### Keywords: Credit Default Swap, Credit Default Swaps, CDS, spread,
### survival probability, survival probabilities, default probability,
### default probabilities, pricing, credit curve, bootstrapping, hazard
### rate, poisson process
### ** Examples
library(credule)
yieldcurveTenor = c(1,2,3,4,5,7)
yieldcurveRate = c(0.0050,0.0070,0.0080,0.0100, 0.0120,0.0150)
creditcurveTenor = c(1,3,5,7)
creditcurveSP = c(0.99,0.98,0.95,0.92)
cdsTenors = c(1,3,5,7)
cdsSpreads = c(0.0050,0.0070,0.00100,0.0120)
premiumFrequency = 4
defaultFrequency = 12
accruedPremium = TRUE
RR = 0.40
# CDS pricing
res_price = priceCDS(yieldcurveTenor,
yieldcurveRate,
creditcurveTenor,
creditcurveSP,
cdsTenors,
RR,
premiumFrequency,
defaultFrequency,
accruedPremium
)
res_price
# Credit curve bootstrapping from CDS spreads
res_bootstrap = bootstrapCDS(yieldcurveTenor,
yieldcurveRate,
res_price$tenor,
res_price$spread,
RR,
premiumFrequency,
defaultFrequency,
accruedPremium)
res_bootstrap
|
194f0f14dd2289ed2fb2a01cadaea1a38374da2b
|
1fd16ea95286ec5a99eeaed2cae20699bc5fb108
|
/Explore relative sample sizes with different variances - add scenarios for VaryR.R
|
d983bf2e9e3d3d4204c02f56858de84beda4fc28
|
[] |
no_license
|
JiananH/Profile-Bayesian
|
4b8ffb9c22a8d80f2d89fbcab794aa4395109a00
|
5fa8aaecf16a953f8acc904f17caa29b37e66634
|
refs/heads/master
| 2020-05-09T10:47:41.722918
| 2020-04-08T14:13:24
| 2020-04-08T14:13:24
| 181,053,790
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 33,309
|
r
|
Explore relative sample sizes with different variances - add scenarios for VaryR.R
|
######################################################################
#### New simulation settings to accomondate reviewer's comments ####
#### Jianan Hui, Apr 7, 2020 ####
######################################################################
# Varying relative sample sizes (ratio of the two sample sizes from adult to pediatric). Specifically, add scenarios where adult sample size = 500 and pediatric sample size varies from 500, 400, 300, 200, 100, 50 and 25.
# The following assumptions will be adopted:
# n_a=500,n_p=500,400,300,200,100,50,25
# Under null hypothesis:
#
# H00-1. Under H0: (mu_a=1, var_a=7^2, mu_p=0, var_p=5^2)
# H00-2. Under H0: (mu_a=1, var_a=7^2, mu_p=0, var_p=7^2)
# H00-3. Under H0: (mu_a=1, var_a=7^2, mu_p=0, var_p=10^2)
#
# Under alternative hypothesis:
#
# H11-1. Under H1: (mu_a=1, var_a=7^2, mu_p=0.8, var_p=7^2)
# H11-2. Under H1: (mu_a=1, var_a=7^2, mu_p=2, var_p=7^2)
# H11-3. Under H1: (mu_a=1, var_a=7^2, mu_p=1.5, var_p=7^2)
setwd("/Users/jianan/Dropbox/Side projects/Profile Bayesian/Profile-Bayesian/")
###Simulating function###
set.seed(1000)
library(data.table)
library(ggplot2)
library(RBesT)
###Continuous endpoints
Bayes_continuous=function(mu_a,var_a,n_a,n_p,mu_p,var_p,n.samples,alpha,rep,gamma=0.5){
#initialize res vectors
reject_mixture55=reject_mixture19=reject_minimax=reject_regular=reject_freq=double(rep)
#simulate adult data
set.seed(25)
data_a=rnorm(n_a,mu_a,sqrt(var_a))
#sd_a=sd(data_a)
sd_a=sqrt(var_a)
mean_a=mean(data_a)
mean_a
for (i in 1:rep){
#simulate pediatric data
#n_p=ceiling(p*n_a)
data_p=rnorm(n_p,mu_p,sqrt(var_p))
mean_p=mean(data_p)
sd_p=sqrt(var_p)
#sd_p=sd(data_p)
#simulate parameter of interest theta
#minimax
mu_theta_minimax=mean_p
#regular
#w1=(var_a/n_a)/((var_a/n_a)+(var_p/n_p))
w1=(n_a/sd_a^2)/(n_a/sd_a^2+n_p/sd_p^2)
#w1=1/(n_a/var_a+n_p/var_p)*n_a/var_a
w2=1-w1
mu_theta_regular=w2*mean_p+w1*mean_a
#common sd
sd_theta=sqrt(1/(n_a/sd_a^2+n_p/sd_p^2))
#simulating
if (mean_p>gamma*mean_a){
theta_minimax=rnorm(n.samples,mu_theta_minimax,sd_theta)
reject_minimax[i]=ifelse(mean(theta_minimax<0)<=alpha,1,0)
}else{
reject_minimax[i]=ifelse(mean_p*sqrt(n_p)/sd_p>qnorm(1-alpha),1,0)
}
theta_regular=rnorm(n.samples,mu_theta_regular,sd_theta)
nm <- mixnorm(adult=c(1, mean_a, sd_a/sqrt(n_a)), sigma=sd_a)
rnMix55 <- robustify(nm, weight=0.5, mean=0, n=1, sigma=sd_a)
posterior.sum55 <- postmix(rnMix55, m=mean_p, n=n_p, sigma=sd_p)
components55 <- sample(1:2,size=n.samples,prob=posterior.sum55[1,],replace=TRUE)
mus <- posterior.sum55[2,]
sds <- posterior.sum55[3,]
theta_mixture55 <- rnorm(n.samples,mean=mus[components55],sd=sds[components55])
#estimate probability of getting theta estimate that is greater than zero
reject_mixture55[i]=ifelse(mean(theta_mixture55<0)<=alpha,1,0)
rnMix19 <- robustify(nm, weight=0.1, mean=0, n=1, sigma=sd_a)
posterior.sum19 <- postmix(rnMix19, m=mean_p, n=n_p, sigma=sd_p)
components19 <- sample(1:2,size=n.samples,prob=posterior.sum19[1,],replace=TRUE)
mus <- posterior.sum19[2,]
sds <- posterior.sum19[3,]
theta_mixture19 <- rnorm(n.samples,mean=mus[components19],sd=sds[components19])
#estimate probability of getting theta estimate that is greater than zero
reject_mixture19[i]=ifelse(mean(theta_mixture19<0)<=alpha,1,0)
reject_regular[i]=ifelse(mean(theta_regular<0)<=alpha,1,0)
reject_freq[i]=ifelse(mean_p*sqrt(n_p)/sd_p>qnorm(1-alpha),1,0)
}
res=data.frame("mixture19"=reject_mixture19,"minimax"=reject_minimax,"regular"=reject_regular,"freq"=reject_freq,"mixture55"=reject_mixture55)
return(colMeans(res))
}
intensity=5000
list_sample_size=list(c(500,500),c(500,400),c(500,300),c(500,200),c(500,100),c(500,50),c(500,25))
###Under null hypothesis
#Scenario I: there is no treatment effect for pediatric population, computes Type I error
res_I <- function(x)Bayes_continuous(mu_a=1,var_a=7^2,n_a=x[1],n_p=x[2],mu_p=0,var_p=5^2,n.samples=intensity,alpha=0.025,rep=intensity)
SI=lapply(list_sample_size,res_I)
SI_res=do.call(rbind,SI)
VaryN_H00_1=data.table(SampleSize_a_p=list_sample_size,mixture55=SI_res[,5],mixture91=SI_res[,1],minimax=SI_res[,2],regular=SI_res[,3],frequentist=SI_res[,4])
#Scenario II: there is no treatment effect for pediatric population, computes Type I error
res_I <- function(x)Bayes_continuous(mu_a=1,var_a=7^2,n_a=x[1],n_p=x[2],mu_p=0,var_p=7^2,n.samples=intensity,alpha=0.025,rep=intensity)
SI=lapply(list_sample_size,res_I)
SI_res=do.call(rbind,SI)
VaryN_H00_2=data.table(SampleSize_a_p=list_sample_size,mixture55=SI_res[,5],mixture91=SI_res[,1],minimax=SI_res[,2],regular=SI_res[,3],frequentist=SI_res[,4])
#Scenario III: there is no treatment effect for pediatric population, computes Type I error
res_I <- function(x)Bayes_continuous(mu_a=1,var_a=7^2,n_a=x[1],n_p=x[2],mu_p=0,var_p=10^2,n.samples=intensity,alpha=0.025,rep=intensity)
SI=lapply(list_sample_size,res_I)
SI_res=do.call(rbind,SI)
VaryN_H00_3=data.table(SampleSize_a_p=list_sample_size,mixture55=SI_res[,5],mixture91=SI_res[,1],minimax=SI_res[,2],regular=SI_res[,3],frequentist=SI_res[,4])
###Under alternative hypothesis
#Scenario I: there is treatment effect for pediatric population, computes power
res_I <- function(x)Bayes_continuous(mu_a=1,var_a=7^2,n_a=x[1],n_p=x[2],mu_p=0.8,var_p=7^2,n.samples=intensity,alpha=0.025,rep=intensity)
SI=lapply(list_sample_size,res_I)
SI_res=do.call(rbind,SI)
VaryN_H11_1=data.table(SampleSize_a_p=list_sample_size,mixture55=SI_res[,5],mixture91=SI_res[,1],minimax=SI_res[,2],regular=SI_res[,3],frequentist=SI_res[,4])
#Scenario II: there is treatment effect for pediatric population, computes power
res_I <- function(x)Bayes_continuous(mu_a=1,var_a=7^2,n_a=x[1],n_p=x[2],mu_p=1,var_p=7^2,n.samples=intensity,alpha=0.025,rep=intensity)
SI=lapply(list_sample_size,res_I)
SI_res=do.call(rbind,SI)
VaryN_H11_2=data.table(SampleSize_a_p=list_sample_size,mixture55=SI_res[,5],mixture91=SI_res[,1],minimax=SI_res[,2],regular=SI_res[,3],frequentist=SI_res[,4])
#Scenario III: there is treatment effect for pediatric population, computes power
res_I <- function(x)Bayes_continuous(mu_a=1,var_a=7^2,n_a=x[1],n_p=x[2],mu_p=1.5,var_p=7^2,n.samples=intensity,alpha=0.025,rep=intensity)
SI=lapply(list_sample_size,res_I)
SI_res=do.call(rbind,SI)
VaryN_H11_3=data.table(SampleSize_a_p=list_sample_size,mixture55=SI_res[,5],mixture91=SI_res[,1],minimax=SI_res[,2],regular=SI_res[,3],frequentist=SI_res[,4])
# Varying r, which is the proportion of effect size p over effect size a.
# r=0, 0.25, 0.5, 0.65, 0.8, 1
#
# Under null hypothesis:
#
# H00-1. Under H0: (mu_a=1, var_a=10^2, mu_p=0, var_p=5^2)
# H00-2. Under H0: (mu_a=1, var_a=10^2, mu_p=0, var_p=10^2)
# H00-3. Under H0: (mu_a=1, var_a=10^2, mu_p=0, var_p=15^2)
#
# Under alternative hypothesis:
#
# H11-1. Under H1: (mu_a=1, var_a=10^2, mu_p=0.8, var_p=10^2)
# H11-2. Under H1: (mu_a=1, var_a=10^2, mu_p=2, var_p=10^2)
# H11-3. Under H1: (mu_a=1, var_a=10^2, mu_p=1.5, var_p=10^2)
r_gamma <- c(0,0.25,0.5,0.65,0.8,1)
intensity <- 5000
n_p <- c(600,400,200,100)
#Under null hypothesis
n_pp <- n_p[1]
# H00-1
res_I <- function(x)Bayes_continuous(mu_a=1,var_a=10^2,n_a=1000,n_p=n_pp,mu_p=0,var_p=5^2,n.samples=intensity,alpha=0.025,rep=intensity,gamma = x)
SI <- sapply(r_gamma,res_I)
SI_res <- t(SI)
VaryR_H00_1<- data.table(r=r_gamma,mixture55=SI_res[,5],mixture91=SI_res[,1],minimax=SI_res[,2],regular=SI_res[,3],frequentist=SI_res[,4])
# H00-2
res_I <- function(x)Bayes_continuous(mu_a=1,var_a=10^2,n_a=1000,n_p=n_pp,mu_p=0,var_p=10^2,n.samples=intensity,alpha=0.025,rep=intensity,gamma = x)
SI <- sapply(r_gamma,res_I)
SI_res <- t(SI)
VaryR_H00_2 <- data.table(r=r_gamma,mixture55=SI_res[,5],mixture91=SI_res[,1],minimax=SI_res[,2],regular=SI_res[,3],frequentist=SI_res[,4])
# H00-3
res_I <- function(x)Bayes_continuous(mu_a=1,var_a=10^2,n_a=1000,n_p=n_pp,mu_p=0,var_p=15^2,n.samples=intensity,alpha=0.025,rep=intensity,gamma = x)
SI <- sapply(r_gamma,res_I)
SI_res <- t(SI)
VaryR_H00_3 <- data.table(r=r_gamma,mixture55=SI_res[,5],mixture91=SI_res[,1],minimax=SI_res[,2],regular=SI_res[,3],frequentist=SI_res[,4])
#Under alternative hypothesis
# H11-1
res_I <- function(x)Bayes_continuous(mu_a=1,var_a=10^2,n_a=1000,n_p=n_pp,mu_p=0.8,var_p=10^2,n.samples=intensity,alpha=0.025,rep=intensity,gamma = x)
SI <- sapply(r_gamma,res_I)
SI_res <- t(SI)
VaryR_H11_1 <- data.table(r=r_gamma,mixture55=SI_res[,5],mixture91=SI_res[,1],minimax=SI_res[,2],regular=SI_res[,3],frequentist=SI_res[,4])
# H11-2
res_I <- function(x)Bayes_continuous(mu_a=1,var_a=10^2,n_a=1000,n_p=n_pp,mu_p=1,var_p=10^2,n.samples=intensity,alpha=0.025,rep=intensity,gamma = x)
SI <- sapply(r_gamma,res_I)
SI_res <- t(SI)
VaryR_H11_2 <- data.table(r=r_gamma,mixture55=SI_res[,5],mixture91=SI_res[,1],minimax=SI_res[,2],regular=SI_res[,3],frequentist=SI_res[,4])
# H11-3
res_I <- function(x)Bayes_continuous(mu_a=1,var_a=10^2,n_a=1000,n_p=n_pp,mu_p=1.5,var_p=10^2,n.samples=intensity,alpha=0.025,rep=intensity,gamma = x)
SI <- sapply(r_gamma,res_I)
SI_res <- t(SI)
VaryR_H11_3 <- data.table(r=r_gamma,mixture55=SI_res[,5],mixture91=SI_res[,1],minimax=SI_res[,2],regular=SI_res[,3],frequentist=SI_res[,4])
VaryR_600_H00_all <- data.frame("r"=VaryR_H00_1$r,"H00-1"=VaryR_H00_1$minimax,"H00-2"=VaryR_H00_2$minimax,"H00-3"=VaryR_H00_3$minimax)
VaryR_600_H11_all <- data.frame("r"=VaryR_H11_1$r,"H00-1"=VaryR_H11_1$minimax,"H00-2"=VaryR_H11_2$minimax,"H00-3"=VaryR_H11_3$minimax)
####
n_pp <- n_p[2]
# H00-1
res_I <- function(x)Bayes_continuous(mu_a=1,var_a=10^2,n_a=1000,n_p=n_pp,mu_p=0,var_p=5^2,n.samples=intensity,alpha=0.025,rep=intensity,gamma = x)
SI <- sapply(r_gamma,res_I)
SI_res <- t(SI)
VaryR_H00_1<- data.table(r=r_gamma,mixture55=SI_res[,5],mixture91=SI_res[,1],minimax=SI_res[,2],regular=SI_res[,3],frequentist=SI_res[,4])
# H00-2
res_I <- function(x)Bayes_continuous(mu_a=1,var_a=10^2,n_a=1000,n_p=n_pp,mu_p=0,var_p=10^2,n.samples=intensity,alpha=0.025,rep=intensity,gamma = x)
SI <- sapply(r_gamma,res_I)
SI_res <- t(SI)
VaryR_H00_2 <- data.table(r=r_gamma,mixture55=SI_res[,5],mixture91=SI_res[,1],minimax=SI_res[,2],regular=SI_res[,3],frequentist=SI_res[,4])
# H00-3
res_I <- function(x)Bayes_continuous(mu_a=1,var_a=10^2,n_a=1000,n_p=n_pp,mu_p=0,var_p=15^2,n.samples=intensity,alpha=0.025,rep=intensity,gamma = x)
SI <- sapply(r_gamma,res_I)
SI_res <- t(SI)
VaryR_H00_3 <- data.table(r=r_gamma,mixture55=SI_res[,5],mixture91=SI_res[,1],minimax=SI_res[,2],regular=SI_res[,3],frequentist=SI_res[,4])
#Under alternative hypothesis
# H11-1
res_I <- function(x)Bayes_continuous(mu_a=1,var_a=10^2,n_a=1000,n_p=n_pp,mu_p=0.8,var_p=10^2,n.samples=intensity,alpha=0.025,rep=intensity,gamma = x)
SI <- sapply(r_gamma,res_I)
SI_res <- t(SI)
VaryR_H11_1 <- data.table(r=r_gamma,mixture55=SI_res[,5],mixture91=SI_res[,1],minimax=SI_res[,2],regular=SI_res[,3],frequentist=SI_res[,4])
# H11-2
res_I <- function(x)Bayes_continuous(mu_a=1,var_a=10^2,n_a=1000,n_p=n_pp,mu_p=1,var_p=10^2,n.samples=intensity,alpha=0.025,rep=intensity,gamma = x)
SI <- sapply(r_gamma,res_I)
SI_res <- t(SI)
VaryR_H11_2 <- data.table(r=r_gamma,mixture55=SI_res[,5],mixture91=SI_res[,1],minimax=SI_res[,2],regular=SI_res[,3],frequentist=SI_res[,4])
# H11-3
res_I <- function(x)Bayes_continuous(mu_a=1,var_a=10^2,n_a=1000,n_p=n_pp,mu_p=1.5,var_p=10^2,n.samples=intensity,alpha=0.025,rep=intensity,gamma = x)
SI <- sapply(r_gamma,res_I)
SI_res <- t(SI)
VaryR_H11_3 <- data.table(r=r_gamma,mixture55=SI_res[,5],mixture91=SI_res[,1],minimax=SI_res[,2],regular=SI_res[,3],frequentist=SI_res[,4])
VaryR_400_H00_all <- data.frame("r"=VaryR_H00_1$r,"H00-1"=VaryR_H00_1$minimax,"H00-2"=VaryR_H00_2$minimax,"H00-3"=VaryR_H00_3$minimax)
VaryR_400_H11_all <- data.frame("r"=VaryR_H11_1$r,"H00-1"=VaryR_H11_1$minimax,"H00-2"=VaryR_H11_2$minimax,"H00-3"=VaryR_H11_3$minimax)
####
####
n_pp <- n_p[3]
# H00-1
# H00-1
res_I <- function(x)Bayes_continuous(mu_a=1,var_a=10^2,n_a=1000,n_p=n_pp,mu_p=0,var_p=5^2,n.samples=intensity,alpha=0.025,rep=intensity,gamma = x)
SI <- sapply(r_gamma,res_I)
SI_res <- t(SI)
VaryR_H00_1<- data.table(r=r_gamma,mixture55=SI_res[,5],mixture91=SI_res[,1],minimax=SI_res[,2],regular=SI_res[,3],frequentist=SI_res[,4])
# H00-2
res_I <- function(x)Bayes_continuous(mu_a=1,var_a=10^2,n_a=1000,n_p=n_pp,mu_p=0,var_p=10^2,n.samples=intensity,alpha=0.025,rep=intensity,gamma = x)
SI <- sapply(r_gamma,res_I)
SI_res <- t(SI)
VaryR_H00_2 <- data.table(r=r_gamma,mixture55=SI_res[,5],mixture91=SI_res[,1],minimax=SI_res[,2],regular=SI_res[,3],frequentist=SI_res[,4])
# H00-3
res_I <- function(x)Bayes_continuous(mu_a=1,var_a=10^2,n_a=1000,n_p=n_pp,mu_p=0,var_p=15^2,n.samples=intensity,alpha=0.025,rep=intensity,gamma = x)
SI <- sapply(r_gamma,res_I)
SI_res <- t(SI)
VaryR_H00_3 <- data.table(r=r_gamma,mixture55=SI_res[,5],mixture91=SI_res[,1],minimax=SI_res[,2],regular=SI_res[,3],frequentist=SI_res[,4])
#Under alternative hypothesis
# H11-1
res_I <- function(x)Bayes_continuous(mu_a=1,var_a=10^2,n_a=1000,n_p=n_pp,mu_p=0.8,var_p=10^2,n.samples=intensity,alpha=0.025,rep=intensity,gamma = x)
SI <- sapply(r_gamma,res_I)
SI_res <- t(SI)
VaryR_H11_1 <- data.table(r=r_gamma,mixture55=SI_res[,5],mixture91=SI_res[,1],minimax=SI_res[,2],regular=SI_res[,3],frequentist=SI_res[,4])
# H11-2
res_I <- function(x)Bayes_continuous(mu_a=1,var_a=10^2,n_a=1000,n_p=n_pp,mu_p=1,var_p=10^2,n.samples=intensity,alpha=0.025,rep=intensity,gamma = x)
SI <- sapply(r_gamma,res_I)
SI_res <- t(SI)
VaryR_H11_2 <- data.table(r=r_gamma,mixture55=SI_res[,5],mixture91=SI_res[,1],minimax=SI_res[,2],regular=SI_res[,3],frequentist=SI_res[,4])
# H11-3
res_I <- function(x)Bayes_continuous(mu_a=1,var_a=10^2,n_a=1000,n_p=n_pp,mu_p=1.5,var_p=10^2,n.samples=intensity,alpha=0.025,rep=intensity,gamma = x)
SI <- sapply(r_gamma,res_I)
SI_res <- t(SI)
VaryR_H11_3 <- data.table(r=r_gamma,mixture55=SI_res[,5],mixture91=SI_res[,1],minimax=SI_res[,2],regular=SI_res[,3],frequentist=SI_res[,4])
VaryR_200_H00_all <- data.frame("r"=VaryR_H00_1$r,"H00-1"=VaryR_H00_1$minimax,"H00-2"=VaryR_H00_2$minimax,"H00-3"=VaryR_H00_3$minimax)
VaryR_200_H11_all <- data.frame("r"=VaryR_H11_1$r,"H00-1"=VaryR_H11_1$minimax,"H00-2"=VaryR_H11_2$minimax,"H00-3"=VaryR_H11_3$minimax)
####
####
n_pp <- n_p[4]
# H00-1
res_I <- function(x)Bayes_continuous(mu_a=1,var_a=10^2,n_a=1000,n_p=n_pp,mu_p=0,var_p=5^2,n.samples=intensity,alpha=0.025,rep=intensity,gamma = x)
SI <- sapply(r_gamma,res_I)
SI_res <- t(SI)
VaryR_H00_1<- data.table(r=r_gamma,mixture55=SI_res[,5],mixture91=SI_res[,1],minimax=SI_res[,2],regular=SI_res[,3],frequentist=SI_res[,4])
# H00-2
res_I <- function(x)Bayes_continuous(mu_a=1,var_a=10^2,n_a=1000,n_p=n_pp,mu_p=0,var_p=10^2,n.samples=intensity,alpha=0.025,rep=intensity,gamma = x)
SI <- sapply(r_gamma,res_I)
SI_res <- t(SI)
VaryR_H00_2 <- data.table(r=r_gamma,mixture55=SI_res[,5],mixture91=SI_res[,1],minimax=SI_res[,2],regular=SI_res[,3],frequentist=SI_res[,4])
# H00-3
res_I <- function(x)Bayes_continuous(mu_a=1,var_a=10^2,n_a=1000,n_p=n_pp,mu_p=0,var_p=15^2,n.samples=intensity,alpha=0.025,rep=intensity,gamma = x)
SI <- sapply(r_gamma,res_I)
SI_res <- t(SI)
VaryR_H00_3 <- data.table(r=r_gamma,mixture55=SI_res[,5],mixture91=SI_res[,1],minimax=SI_res[,2],regular=SI_res[,3],frequentist=SI_res[,4])
#Under alternative hypothesis
# H11-1
res_I <- function(x)Bayes_continuous(mu_a=1,var_a=10^2,n_a=1000,n_p=n_pp,mu_p=0.8,var_p=10^2,n.samples=intensity,alpha=0.025,rep=intensity,gamma = x)
SI <- sapply(r_gamma,res_I)
SI_res <- t(SI)
VaryR_H11_1 <- data.table(r=r_gamma,mixture55=SI_res[,5],mixture91=SI_res[,1],minimax=SI_res[,2],regular=SI_res[,3],frequentist=SI_res[,4])
# H11-2
res_I <- function(x)Bayes_continuous(mu_a=1,var_a=10^2,n_a=1000,n_p=n_pp,mu_p=1,var_p=10^2,n.samples=intensity,alpha=0.025,rep=intensity,gamma = x)
SI <- sapply(r_gamma,res_I)
SI_res <- t(SI)
VaryR_H11_2 <- data.table(r=r_gamma,mixture55=SI_res[,5],mixture91=SI_res[,1],minimax=SI_res[,2],regular=SI_res[,3],frequentist=SI_res[,4])
# H11-3
res_I <- function(x)Bayes_continuous(mu_a=1,var_a=10^2,n_a=1000,n_p=n_pp,mu_p=1.5,var_p=10^2,n.samples=intensity,alpha=0.025,rep=intensity,gamma = x)
SI <- sapply(r_gamma,res_I)
SI_res <- t(SI)
VaryR_H11_3 <- data.table(r=r_gamma,mixture55=SI_res[,5],mixture91=SI_res[,1],minimax=SI_res[,2],regular=SI_res[,3],frequentist=SI_res[,4])
VaryR_100_H00_all <- data.frame("r"=VaryR_H00_1$r,"H00-1"=VaryR_H00_1$minimax,"H00-2"=VaryR_H00_2$minimax,"H00-3"=VaryR_H00_3$minimax)
VaryR_100_H11_all <- data.frame("r"=VaryR_H11_1$r,"H00-1"=VaryR_H11_1$minimax,"H00-2"=VaryR_H11_2$minimax,"H00-3"=VaryR_H11_3$minimax)
####
save.image("VaryNVaryR_adding more sample sizes for VaryR_change variance.RData")
#Rendering plots
library(ggplot2)
#pdf("VaryNVaryR-Images.pdf")
VaryN_H00_1$ss_p=c(500,400,300,200,100,50,25)
VaryN_H00_1=VaryN_H00_1[,c("SampleSize_a_p","mixture91","minimax","regular","frequentist","mixture55","ss_p")]
data_wide=VaryN_H00_1
data=reshape(data_wide,direction="long",varying=list(names(data_wide)[2:6]),v.names="Value",idvar="ss_p")
data$SampleSize_a_p=NULL
data$time=factor(data$time)
p1 = ggplot(data=data,aes(x=ss_p,y=Value,group=time,color=time))+
geom_line()+
geom_point()+
#geom_hline(yintercept=0.025,linetype="dashed",color="darkgrey")+
labs(x = "Pediatric Sample size", y = "Type I Error", title = expression(paste("Under null hypothesis that ",mu[p]," = 0 and ",sigma[p]," = 5")))+
# theme(
# legend.position = c(.95, .95),
# legend.justification = c("right", "top"),
# legend.box.just = "right",
# legend.margin = margin(6, 6, 6, 6)
# )+
# theme(legend.position="right","top")+
scale_colour_discrete(name="Method",breaks=c(1,2,3,4,5),labels=c("Robust mixture prior (w=0.9)","Profile Bayesian","Regular Bayesian","Frequentist","Robust mixture prior (w=0.5)"))
p1
VaryN_H00_2$ss_p=c(500,400,300,200,100,50,25)
VaryN_H00_2=VaryN_H00_2[,c("SampleSize_a_p","mixture91","minimax","regular","frequentist","mixture55","ss_p")]
data_wide=VaryN_H00_2
data=reshape(data_wide,direction="long",varying=list(names(data_wide)[2:6]),v.names="Value",idvar="ss_p")
data$SampleSize_a_p=NULL
data$time=factor(data$time)
p2 = ggplot(data=data,aes(x=ss_p,y=Value,group=time,color=time))+
geom_line()+
geom_point()+
#geom_hline(yintercept=0.025,linetype="dashed",color="darkgrey")+
labs(x = "Pediatric Sample size", y = "Type I Error", title = expression(paste("Under null hypothesis that ",mu[p]," = 0 and ",sigma[p]," = 7")))+
# theme(
# legend.position = c(.95, .95),
# legend.justification = c("right", "top"),
# legend.box.just = "right",
# legend.margin = margin(6, 6, 6, 6)
# )+
# theme(legend.position="right","top")+
scale_colour_discrete(name="Method",breaks=c(1,2,3,4,5),labels=c("Robust mixture prior (w=0.9)","Profile Bayesian","Regular Bayesian","Frequentist","Robust mixture prior (w=0.5)"))
p2
VaryN_H00_3$ss_p=c(500,400,300,200,100,50,25)
data_wide=VaryN_H00_3
VaryN_H00_3=VaryN_H00_3[,c("SampleSize_a_p","mixture91","minimax","regular","frequentist","mixture55","ss_p")]
data=reshape(data_wide,direction="long",varying=list(names(data_wide)[2:6]),v.names="Value",idvar="ss_p")
data$SampleSize_a_p=NULL
data$time=factor(data$time)
p3 = ggplot(data=data,aes(x=ss_p,y=Value,group=time,color=time))+
geom_line()+
geom_point()+
#geom_hline(yintercept=0.025,linetype="dashed",color="darkgrey")+
labs(x = "Pediatric Sample size", y = "Type I Error", title = expression(paste("Under null hypothesis that ",mu[p]," = 0 and ",sigma[p]," = 10")))+
# theme(
# legend.position = c(.95, .95),
# legend.justification = c("right", "top"),
# legend.box.just = "right",
# legend.margin = margin(6, 6, 6, 6)
# )+
# theme(legend.position="right","top")+
scale_colour_discrete(name="Method",breaks=c(1,2,3,4,5),labels=c("Robust mixture prior (w=0.9)","Profile Bayesian","Regular Bayesian","Frequentist","Robust mixture prior (w=0.5)"))
p3
#alternative
VaryN_H11_1$ss_p=c(500,400,300,200,100,50,25)
VaryN_H11_1=VaryN_H11_1[,c("SampleSize_a_p","mixture91","minimax","regular","frequentist","mixture55","ss_p")]
data_wide=VaryN_H11_1
data=reshape(data_wide,direction="long",varying=list(names(data_wide)[2:6]),v.names="Value",idvar="ss_p")
data$SampleSize_a_p=NULL
data$time=factor(data$time)
p4 = ggplot(data=data,aes(x=ss_p,y=Value,group=time,color=time))+
geom_line()+
geom_point()+
#geom_hline(yintercept=0.025,linetype="dashed",color="darkgrey")+
labs(x = "Pediatric Sample size", y = "Power", title = expression(paste("Under alternative hypothesis that ",mu[p]," = 0.8 and ",sigma[p]," = 7")))+
# theme(
# legend.position = c(.95, .05),
# legend.justification = c("right", "bottom"),
# legend.box.just = "right",
# legend.margin = margin(6, 6, 6, 6)
# )+
# theme(legend.position="right","top")+
scale_colour_discrete(name="Method",breaks=c(1,2,3,4,5),labels=c("Robust mixture prior (w=0.9)","Profile Bayesian","Regular Bayesian","Frequentist","Robust mixture prior (w=0.5)"))
p4
VaryN_H11_2$ss_p=c(500,400,300,200,100,50,25)
VaryN_H11_2=VaryN_H11_2[,c("SampleSize_a_p","mixture91","minimax","regular","frequentist","mixture55","ss_p")]
data_wide=VaryN_H11_2
data=reshape(data_wide,direction="long",varying=list(names(data_wide)[2:6]),v.names="Value",idvar="ss_p")
data$SampleSize_a_p=NULL
data$time=factor(data$time)
p5 = ggplot(data=data,aes(x=ss_p,y=Value,group=time,color=time))+
geom_line()+
geom_point()+
#geom_hline(yintercept=0.025,linetype="dashed",color="darkgrey")+
labs(x = "Pediatric Sample size", y = "Power", title = expression(paste("Under alternative hypothesis that ",mu[p]," = 1 and ",sigma[p]," = 7")))+
# theme(
# legend.position = c(.95, .05),
# legend.justification = c("right", "bottom"),
# legend.box.just = "right",
# legend.margin = margin(6, 6, 6, 6)
# )+
# theme(legend.position="right","top")+
scale_colour_discrete(name="Method",breaks=c(1,2,3,4,5),labels=c("Robust mixture prior (w=0.9)","Profile Bayesian","Regular Bayesian","Frequentist","Robust mixture prior (w=0.5)"))
p5
VaryN_H11_3$ss_p=c(500,400,300,200,100,50,25)
VaryN_H11_3=VaryN_H11_3[,c("SampleSize_a_p","mixture91","minimax","regular","frequentist","mixture55","ss_p")]
data_wide=VaryN_H11_3
data=reshape(data_wide,direction="long",varying=list(names(data_wide)[2:6]),v.names="Value",idvar="ss_p")
data$SampleSize_a_p=NULL
data$time=factor(data$time)
p6 = ggplot(data=data,aes(x=ss_p,y=Value,group=time,color=time))+
geom_line()+
geom_point()+
#geom_hline(yintercept=0.025,linetype="dashed",color="darkgrey")+
labs(x = "Pediatric Sample size", y = "Power", title = expression(paste("Under alternative hypothesis that ",mu[p]," = 1.5 and ",sigma[p]," = 7")))+
# theme(
# legend.position = c(.95, .05),
# legend.justification = c("right", "bottom"),
# legend.box.just = "right",
# legend.margin = margin(6, 6, 6, 6)
# )+
# theme(legend.position="right","top")+
scale_colour_discrete(name="Method",breaks=c(1,2,3,4,5),labels=c("Robust mixture prior (w=0.9)","Profile Bayesian","Regular Bayesian","Frequentist","Robust mixture prior (w=0.5)"))
p6
###Rendering plots for varying r
###Pediatric sample size = 600
data_wide=VaryR_600_H00_all
data=reshape(data_wide,direction="long",varying=list(names(data_wide)[2:4]),v.names="Value",idvar="r")
data$time=factor(data$time)
p7 = ggplot(data=data,aes(x=r,y=Value,group=time,color=time))+
geom_line()+
geom_point()+
geom_hline(yintercept=0.025,linetype="dashed",color="darkgrey")+
labs(x = "r", y = "Type I Error", title = "Adult sample size = 1000 and Pediatric sample size = 600")+
# theme(
# legend.position = c(.95, .95),
# legend.justification = c("right", "top"),
# legend.box.just = "right",
# legend.margin = margin(6, 6, 6, 6)
# )+
# theme(legend.position="right","top")+
scale_colour_discrete(name="Standard deviation",breaks=c(1,2,3),labels=c("Sigma = 5","Sigma = 10","Sigma = 15"))
p7
data_wide=VaryR_600_H11_all
data=reshape(data_wide,direction="long",varying=list(names(data_wide)[2:4]),v.names="Value",idvar="r")
data$time=factor(data$time)
p8 = ggplot(data=data,aes(x=r,y=Value,group=time,color=time))+
geom_line()+
geom_point()+
#geom_hline(yintercept=0.025,linetype="dashed",color="darkgrey")+
labs(x = "r", y = "Power", title = "Adult sample size = 1000 and Pediatric sample size = 600")+
# theme(
# legend.position = c(.95, .95),
# legend.justification = c("right", "top"),
# legend.box.just = "right",
# legend.margin = margin(6, 6, 6, 6)
# )+
# theme(legend.position="right","top")+
scale_colour_discrete(name="Mean",breaks=c(1,2,3),labels=c("Mean = 0.8","Mean = 1","Mean = 1.5"))
p8
###Pediatric sample size = 400
data_wide=VaryR_400_H00_all
data=reshape(data_wide,direction="long",varying=list(names(data_wide)[2:4]),v.names="Value",idvar="r")
data$time=factor(data$time)
p9 = ggplot(data=data,aes(x=r,y=Value,group=time,color=time))+
geom_line()+
geom_point()+
geom_hline(yintercept=0.025,linetype="dashed",color="darkgrey")+
labs(x = "r", y = "Type I Error", title = "Adult sample size = 1000 and Pediatric sample size = 400")+
# theme(
# legend.position = c(.95, .95),
# legend.justification = c("right", "top"),
# legend.box.just = "right",
# legend.margin = margin(6, 6, 6, 6)
# )+
# theme(legend.position="right","top")+
scale_colour_discrete(name="Standard deviation",breaks=c(1,2,3),labels=c("Sigma = 5","Sigma = 10","Sigma = 15"))
p9
data_wide=VaryR_400_H11_all
data=reshape(data_wide,direction="long",varying=list(names(data_wide)[2:4]),v.names="Value",idvar="r")
data$time=factor(data$time)
p10 = ggplot(data=data,aes(x=r,y=Value,group=time,color=time))+
geom_line()+
geom_point()+
#geom_hline(yintercept=0.025,linetype="dashed",color="darkgrey")+
labs(x = "r", y = "Power", title = "Adult sample size = 1000 and Pediatric sample size = 400")+
# theme(
# legend.position = c(.95, .95),
# legend.justification = c("right", "top"),
# legend.box.just = "right",
# legend.margin = margin(6, 6, 6, 6)
# )+
# theme(legend.position="right","top")+
scale_colour_discrete(name="Standard deviation",breaks=c(1,2,3),labels=c("Sigma = 5","Sigma = 10","Sigma = 15"))
p10
###Pediatric sample size = 200
data_wide=VaryR_200_H00_all
data=reshape(data_wide,direction="long",varying=list(names(data_wide)[2:4]),v.names="Value",idvar="r")
data$time=factor(data$time)
p11 = ggplot(data=data,aes(x=r,y=Value,group=time,color=time))+
geom_line()+
geom_point()+
geom_hline(yintercept=0.025,linetype="dashed",color="darkgrey")+
labs(x = "r", y = "Type I Error", title = "Adult sample size = 1000 and Pediatric sample size = 200")+
# theme(
# legend.position = c(.95, .95),
# legend.justification = c("right", "top"),
# legend.box.just = "right",
# legend.margin = margin(6, 6, 6, 6)
# )+
# theme(legend.position="right","top")+
scale_colour_discrete(name="Standard deviation",breaks=c(1,2,3),labels=c("Sigma = 5","Sigma = 10","Sigma = 15"))
p11
data_wide=VaryR_200_H11_all
data=reshape(data_wide,direction="long",varying=list(names(data_wide)[2:4]),v.names="Value",idvar="r")
data$time=factor(data$time)
p12 = ggplot(data=data,aes(x=r,y=Value,group=time,color=time))+
geom_line()+
geom_point()+
#geom_hline(yintercept=0.025,linetype="dashed",color="darkgrey")+
labs(x = "r", y = "Power", title = "Adult sample size = 1000 and Pediatric sample size = 200")+
# theme(
# legend.position = c(.95, .95),
# legend.justification = c("right", "top"),
# legend.box.just = "right",
# legend.margin = margin(6, 6, 6, 6)
# )+
# theme(legend.position="right","top")+
scale_colour_discrete(name="Mean",breaks=c(1,2,3),labels=c("Mean = 0.8","Mean = 1","Mean = 1.5"))
p12
###Pediatric sample size = 100
data_wide=VaryR_100_H00_all
data=reshape(data_wide,direction="long",varying=list(names(data_wide)[2:4]),v.names="Value",idvar="r")
data$time=factor(data$time)
p13 = ggplot(data=data,aes(x=r,y=Value,group=time,color=time))+
geom_line()+
geom_point()+
geom_hline(yintercept=0.025,linetype="dashed",color="darkgrey")+
labs(x = "r", y = "Type I Error", title = "Adult sample size = 1000 and Pediatric sample size = 100")+
# theme(
# legend.position = c(.95, .95),
# legend.justification = c("right", "top"),
# legend.box.just = "right",
# legend.margin = margin(6, 6, 6, 6)
# )+
# theme(legend.position="right","top")+
scale_colour_discrete(name="Standard deviation",breaks=c(1,2,3),labels=c("Sigma = 5","Sigma = 10","Sigma = 15"))
p13
data_wide=VaryR_100_H11_all
data=reshape(data_wide,direction="long",varying=list(names(data_wide)[2:4]),v.names="Value",idvar="r")
data$time=factor(data$time)
p14 = ggplot(data=data,aes(x=r,y=Value,group=time,color=time))+
geom_line()+
geom_point()+
#geom_hline(yintercept=0.025,linetype="dashed",color="darkgrey")+
labs(x = "r", y = "Power", title = "Adult sample size = 1000 and Pediatric sample size = 100")+
# theme(
# legend.position = c(.95, .95),
# legend.justification = c("right", "top"),
# legend.box.just = "right",
# legend.margin = margin(6, 6, 6, 6)
# )+
# theme(legend.position="right","top")+
scale_colour_discrete(name="Mean",breaks=c(1,2,3),labels=c("Mean = 0.8","Mean = 1","Mean = 1.5"))
p14
library(ggpubr)
#VaryN
ggarrange(p1, p2, p3, ncol=3, common.legend = TRUE, legend="bottom")
ggarrange(p4, p5, p6, ncol=3, common.legend = TRUE, legend="bottom")
#VaryR
ggarrange(p13, p11, p9, p7, ncol=2, nrow=2, common.legend = TRUE, legend="bottom")
ggarrange(p14, p12, p10, p8, ncol=2, nrow=2, common.legend = TRUE, legend="bottom")
ggarrange(p11, p9, ncol=2, common.legend = TRUE, legend="bottom")
ggarrange(p12, p10, ncol=2, common.legend = TRUE, legend="bottom")
#####line type VaryR####
###Pediatric sample size = 400
data_wide=VaryR_400_H00_all
data=reshape(data_wide,direction="long",varying=list(names(data_wide)[2:4]),v.names="Value",idvar="r")
data$time=factor(data$time)
p9 = ggplot(data=data,aes(x=r,y=Value,group=time))+
geom_line(aes(linetype=time))+
geom_point()+
geom_hline(yintercept=0.025,linetype="dashed",color="darkgrey")+
labs(x = "r", y = "Type I Error", title = "Adult sample size = 1000 and Pediatric sample size = 400")+
# theme(
# legend.position = c(.95, .95),
# legend.justification = c("right", "top"),
# legend.box.just = "right",
# legend.margin = margin(6, 6, 6, 6)
# )+
# theme(legend.position="right","top")+
scale_linetype_discrete(name="Standard deviation",breaks=c(1,2,3),labels=c("Sigma = 5","Sigma = 10","Sigma = 15"))
p9
data_wide=VaryR_400_H11_all
data=reshape(data_wide,direction="long",varying=list(names(data_wide)[2:4]),v.names="Value",idvar="r")
data$time=factor(data$time)
p10 = ggplot(data=data,aes(x=r,y=Value,group=time))+
geom_line(aes(linetype=time))+
geom_point()+
#geom_hline(yintercept=0.025,linetype="dashed",color="darkgrey")+
labs(x = "r", y = "Power", title = "Adult sample size = 1000 and Pediatric sample size = 400")+
# theme(
# legend.position = c(.95, .95),
# legend.justification = c("right", "top"),
# legend.box.just = "right",
# legend.margin = margin(6, 6, 6, 6)
# )+
# theme(legend.position="right","top")+
scale_linetype_discrete(name="Mean",breaks=c(1,2,3),labels=c("Mean = 0.8","Mean = 1","Mean = 1.5"))
p10
###Pediatric sample size = 200
data_wide=VaryR_200_H00_all
data=reshape(data_wide,direction="long",varying=list(names(data_wide)[2:4]),v.names="Value",idvar="r")
data$time=factor(data$time)
p11 = ggplot(data=data,aes(x=r,y=Value,group=time))+
geom_line(aes(linetype=time))+
geom_point()+
geom_hline(yintercept=0.025,linetype="dashed",color="darkgrey")+
labs(x = "r", y = "Type I Error", title = "Adult sample size = 1000 and Pediatric sample size = 200")+
# theme(
# legend.position = c(.95, .95),
# legend.justification = c("right", "top"),
# legend.box.just = "right",
# legend.margin = margin(6, 6, 6, 6)
# )+
# theme(legend.position="right","top")+
scale_linetype_discrete(name="Standard deviation",breaks=c(1,2,3),labels=c("Sigma = 5","Sigma = 10","Sigma = 15"))
p11
data_wide=VaryR_200_H11_all
data=reshape(data_wide,direction="long",varying=list(names(data_wide)[2:4]),v.names="Value",idvar="r")
data$time=factor(data$time)
p12 = ggplot(data=data,aes(x=r,y=Value,group=time))+
geom_line(aes(linetype=time))+
geom_point()+
#geom_hline(yintercept=0.025,linetype="dashed",color="darkgrey")+
labs(x = "r", y = "Power", title = "Adult sample size = 1000 and Pediatric sample size = 200")+
# theme(
# legend.position = c(.95, .95),
# legend.justification = c("right", "top"),
# legend.box.just = "right",
# legend.margin = margin(6, 6, 6, 6)
# )+
# theme(legend.position="right","top")+
scale_linetype_discrete(name="Mean",breaks=c(1,2,3),labels=c("Mean = 0.8","Mean = 1","Mean = 1.5"))
p12
ggarrange(p11, p9, ncol=2, common.legend = TRUE, legend="bottom")
ggarrange(p12, p10, ncol=2, common.legend = TRUE, legend="bottom")
|
ee48239f94fe9e53f39119452fcee62fca66132e
|
73c273fdf85a99b3d6156986537cf82b0876fc5f
|
/R/accessions_by_spp.R
|
81b7b12a059f563b5b49bf251d6a71dddbbb08cc
|
[
"MIT"
] |
permissive
|
NCBI-Hackathons/GeneHummus
|
e55ce7d1fd231db5516ffac039a329c255a68316
|
1fb36181760e0c1b91e65dd3cbd05af27010d8c4
|
refs/heads/master
| 2021-06-03T15:49:38.606418
| 2020-09-02T21:10:25
| 2020-09-02T21:10:25
| 131,613,965
| 8
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,324
|
r
|
accessions_by_spp.R
|
#' Compute the total number of accession proteins per species
#'
#' Summarizes a dataframe of protein ids and return the total number of accessions
#' per organism.
#'
#' @importFrom dplyr %>% count rename
#'
#' @param my_accessions A data frame with accession protein ids and organisms
#'
#' @usage accessions_by_spp(my_accessions)
#'
#' @seealso \code{\link{getAccessions}} to create the data frame with acession
#' id and organism for each protein identifier.
#'
#' @return A \code{data.frame} of summarized results including columns:
#' \itemize{
#' \item organism, taxonomic species
#' \item N.seqs, total number of sequences
#' }
#'
#' @examples
#' my_prots = data.frame(accession = c("XP_014620925", "XP_003546066",
#' "XP_025640041", "XP_019453956", "XP_006584791", "XP_020212415",
#' "XP_017436622", "XP_004503803", "XP_019463844"),
#' organism = c("Glycine max", "Glycine max", "Arachis hypogaea",
#' "Lupinus angustifolius", "Glycine max", "Cajanus cajan",
#' "Vigna angularis", "Cicer arietinum", "Lupinus angustifolius"))
#'
#' accessions_by_spp(my_prots)
#'
#' @author Jose V. Die
#'
#' @export
accessions_by_spp <- function(my_accessions){
my_accessions %>% count(organism) %>% rename(N.seqs = n)
}
utils::globalVariables(c("organism", "N.seqs"))
|
39af362b28a94ce7703d5c6f086abf8ce58a3ea6
|
180d8eb6821307e854d43e93c556eb72af82fcac
|
/R_code/Length_Distribution.R
|
abcc0fc9740b63a7f5d1104a0268665c97e61dd0
|
[
"Apache-2.0"
] |
permissive
|
NCEAS/oss-fishteam
|
9b4864ff0ce63a237086f3f92a9c2a7d134e7d38
|
651fd1b0f8874ea2ab85c67409520192b5074b8e
|
refs/heads/master
| 2021-01-01T16:01:44.082589
| 2018-06-07T17:35:39
| 2018-06-07T17:35:39
| 97,758,520
| 3
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,652
|
r
|
Length_Distribution.R
|
#Using datatables GLFREC and INVREC, determine what the length frequency of red snapper is caught with gear (GEAR_TYPE) shrimp trawl (ST)
#Not looking at sex of fish because "male and female red snapper grow rapidly and at about the same rate until about 8 years old and about 28 inches in length."- Seagrant
rm(list=ls()) #clear workspace
library(tidyverse)
setwd("~/oss/Synthesis")#Merge the tables
INVREC<- read.table("Seamap/INVREC.txt", sep=",", stringsAsFactors = FALSE, header = TRUE)
GLFREC<- read.table("Seamap/GLFREC.txt", sep=",", stringsAsFactors = FALSE, header = TRUE)
INV<- INVREC %>% select(CRUISEID, STATIONID, INVRECID, GEAR_TYPE, GEAR_SIZE, MESH_SIZE) %>% filter(GEAR_TYPE=="ST") #Remove some columns and filter for Shrimp Trawl
GLF<- GLFREC %>% select(CRUISEID, STATIONID, GLFID, SPEC_GLF, LEN_GLF, MEASCD_GLF) %>% filter(SPEC_GLF=="CAMPEC") #Remove some columns and filter for red snapper
#Make Sure these are all true!
length(unique(INV$STATIONID))==length(INV$STATIONID) #Station ID is unique after filtering in tows of INV
length_freq<- left_join(select(GLF, CRUISEID, STATIONID, LEN_GLF, MEASCD_GLF), select(INV, STATIONID, GEAR_TYPE, GEAR_SIZE, MESH_SIZE), by="STATIONID")
length(length_freq$STATIONID)==length(GLF$STATIONID) #New size matches GLF so not repating length values. Merge successful.
#Check freqency of each gear type
length_freq<- length_freq %>% mutate(GEAR_MERGE=paste(GEAR_TYPE,GEAR_SIZE,MESH_SIZE, sep="_"))
(table(length_freq$GEAR_MERGE)/length(length_freq$GEAR_MERGE))*100 #Table of gear frequency in percent
# NA_NA_NA ST_16_0.25 ST_20_1.5 ST_40_1.58 ST_40_1.63 ST_40_1.65
# 5.53870931 0.00880758 1.43689369 0.90340602 91.78001183 0.33217157
# Almost 92% of red snapper catches are with Shrimp trawl of size 40 and mesh size 1.63
#Extract most common gear type and remove ones that are size of NA
length_freq_gs40_ms163<-length_freq %>% filter(GEAR_SIZE==40, MESH_SIZE==1.63) %>% filter(!is.na(LEN_GLF)) #Only looking at Shrimp trawl, GEAR_SIZE=40 and MESH_SIZE=1.63
#What type of lengths are they measuring
meas_type<- data.frame(table(length_freq_gs40_ms163$MEASCD_GLF))
meas_type<- meas_type %>% mutate(meas= "empty")
meas_type$meas[meas_type$Var1==1]="Fork"
meas_type$meas[meas_type$Var1==51]="Fork"
meas_type$meas[meas_type$Var1==2]="Standard"
meas_type$meas[meas_type$Var1==11]="Total"
meas_type$meas[meas_type$Var1==18]="Total"
meas_type$meas[meas_type$Var1==53]="Total"
meas_freq<- meas_type %>% group_by(meas) %>% summarize(Freq=sum(Freq))
meas_freq<- meas_freq %>% mutate(percent=(Freq/sum(Freq))*100)
# meas Freq percent
# <chr> <int> <dbl>
# 1 Fork 70243 96.32489064
# 2 Standard 14 0.01919833
# 3 Total 2666 3.65591103
#Approximately 96% are fork length, but leaving all measurements b/c all used for catch and probably not that different
Lm= 230 #Length at maturity Red Snapper
#Maturity obtained at year 2, but estimate for size at year 2 is greater than 230cm
#Frequency distribution of Red Snapper
ggplot(length_freq_gs40_ms163, aes(length_freq_gs40_ms163$LEN_GLF))+
geom_histogram()+
annotate(geom="text",x=235, y=20000, label="Lm", hjust=0)+
geom_vline(xintercept=230)+
labs(x="Length (mm)", y="counts", title="Length Distribution")+
theme_update(plot.title = element_text(hjust = 0.5))
percent_juv= (sum(length_freq_gs40_ms163$LEN_GLF<230)/length(length_freq_gs40_ms163$LEN_GLF))*100
# 94.30103 are Juveniles
#Von Bert Size at Age
len=c(1:5) #dimension variable of typical length at age
for (t in 1:5){
len[t]=856.4*(1-exp(-.19*(t--0.395))) #Von Bert growth, coefficients from Brad's website
rm(t)
}
|
eeb4a2a8945f762e9734842b5e5d24d6c5292c02
|
28e13e398df9e2b7310b369b4536e8bfdedd9778
|
/inst/examples/ex8.R
|
61d1c4037b3945957461c42e35f2e8bce0b81b71
|
[] |
no_license
|
davids99us/whywhere
|
497274cfc0a8a3c93d85d117ce9e0b3f7f3db2d9
|
e51a00206c495d697cae2530bfb7d498edf13c72
|
refs/heads/master
| 2021-01-18T23:40:37.149298
| 2016-05-22T13:15:40
| 2016-05-22T13:15:40
| 33,341,937
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 539
|
r
|
ex8.R
|
#Compare some of the top variables
par(mfrow=c(2,2), mar=c(2, 2, 2, 2) + 0.1)
path="/home/davids99us/data/Terrestrial"
Tfiles=list.files(path,pattern='pgm')
file <- paste(system.file(package="dismo"), '/ex/bradypus.csv',sep='')
Pres <- fread(file, header=T,sep=",")
Pres$species=NULL
o =ww(Pres,c("lccld08.pgm"),dirname=path)
plot.dseg(o)
plot(predict.dseg(o))
points(o$data$lon,o$data$lat)
title("lccld07")
o =ww(Pres,c("fnocwat.pgm"),dirname=path)
plot.dseg(o)
plot(predict.dseg(o))
points(o$data$lon,o$data$lat)
title("fnocwat")
|
5882f76e7142012017215b3499c78d203710fbe0
|
660b33ebda363b8508bb430c13b664718b34704f
|
/man/seaice.Rd
|
bb71b56bf9ef34c0d49c3b2ca50207e7e7c3bf29
|
[] |
no_license
|
dis-organization/seaice
|
f47692ec6d49217e694288e9affa82ecc9aedd86
|
8116c2f76a283337a15c4a3ad7f99d468e283e96
|
refs/heads/master
| 2021-06-14T17:25:24.658179
| 2017-03-06T22:16:08
| 2017-03-06T22:16:08
| 84,126,578
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 205
|
rd
|
seaice.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/seaice-package.r
\docType{package}
\name{seaice}
\alias{seaice}
\alias{seaice-package}
\title{seaice.}
\description{
seaice.
}
|
093e0f0c1a564049efcfe0c8f416b491a7b0b338
|
2875548a66e0e411567acb689df5bbd3a183e12d
|
/man/derivatives.Rd
|
9ead9f554f33beccc6efc01241df2eb23c46ab20
|
[] |
no_license
|
cran/KSPM
|
6fb1f3d2b73ec4e6e565c19ad8cc690d65137243
|
8a6566f83eced36c4d536a8a02cde7bc88b379d6
|
refs/heads/master
| 2021-07-08T01:02:18.398063
| 2020-08-10T12:32:11
| 2020-08-10T12:32:11
| 164,908,463
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,523
|
rd
|
derivatives.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/derivatives.R
\name{derivatives}
\alias{derivatives}
\title{Computing kernel function derivatives}
\usage{
derivatives(object)
}
\arguments{
\item{object}{an object of class "kspm", usually, a result of a call to \code{kspm}.}
}
\value{
an object of class 'derivatives'
\item{derivmat}{a list of \eqn{n \times d}{n x d} matrix (one for each kernel) where \eqn{n}{n} is the number of subjects and \eqn{d}{d} the number of variables included in the kernel}
\item{rawmat}{a \eqn{n \times q}{n x q} matrix with all variables included in the kernel part of the model \eqn{q}{q} the number of variables included in the whole kernel part}
\item{scalemat}{scaled version of rawmat}
\item{modelmat}{matrix of correspondance between variable and kernels}
}
\description{
\code{derivatives} is a function for "kspm" object computing pointwise partial derivatives of \eqn{h(Z)} accroding to each \eqn{Z} variable.
}
\details{
derivatives are not computed for interactions. If a variable is included in several kernels, the user may obtain the corresponding pointwise derivatives by summing the pointwise derivatives associated with each kernel.
}
\references{
Kim, Choongrak, Byeong U. Park, and Woochul Kim. "Influence diagnostics in semiparametric regression models." Statistics and probability letters 60.1 (2002): 49:58.
}
\seealso{
\link{plot.derivatives}
}
\author{
Catherine Schramm, Aurelie Labbe, Celia Greenwood
}
|
bec7e5196f2412b1d59549cf57d601849f7ea221
|
8340317041a7f6aded928bc61237c78d32e059ee
|
/man/Cb.logistic.Rd
|
75682a5db1a5fe4583f90a5a706809c092a4312f
|
[] |
no_license
|
msadatsafavi/txBenefit
|
c2b2051168db0e0b0ef4a6015136c60f7a4f6b30
|
7342099f8cadadb7090eb7557c330d04f27e0520
|
refs/heads/master
| 2020-12-02T23:35:00.883225
| 2020-02-01T00:57:58
| 2020-02-01T00:57:58
| 231,154,313
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,236
|
rd
|
Cb.logistic.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Cb.R
\name{Cb.logistic}
\alias{Cb.logistic}
\title{Cb calculations for a logistic regression model.}
\usage{
Cb.logistic(reg_object, tx_var, semi_parametric = FALSE)
}
\arguments{
\item{reg_object}{An object of class 'glm' that contains the resuls of the logit model.}
\item{tx_var}{A string containing the name of the treatment indicator variable.}
\item{semi_parametric}{Optional (default=FALSE). If TRUE, the semi-parametric estimator for Cb will be returned.}
}
\value{
This function returns an object of class Cb_output, which includes Cb as a member.
}
\description{
Cb calculations for a logistic regression model.
}
\examples{
data("rct_data")
#Creating a binary variable indicating whether an exacerbation happened during the first 6 months.
#Because everyone is followed for at least 6 months, there is no censoring.
rct_data[,'b_exac']<-rct_data[,'tte']<0.5
rct_data[which(is.na(rct_data[,'b_exac'])),'b_exac']<-FALSE
reg.logostic<-glm(formula = b_exac ~ tx + sgrq + prev_hosp + prev_ster + fev1, data = rct_data, family = binomial(link="logit"))
res.logistic<-Cb.logistic(reg.logostic,tx_var = "tx", semi_parametric = T)
print(res.logistic)
}
|
dc8d14e371c332718b529cfd35f3a91f8d0eacaa
|
6a1ffcaf3fe6081859849f82ea3d8784cd87bb94
|
/man/freshdesk_api_call.Rd
|
000217530b17937f52d35f1069404c01875b0c6a
|
[
"MIT"
] |
permissive
|
jjanuszczak/freshdeskr
|
870cae5da26ec5743ec3efdb779e868111e5672d
|
4cf49becd0bf0d9c4fdecdaf9fa2824d1dd0f7c7
|
refs/heads/master
| 2020-03-14T06:51:55.201309
| 2018-07-22T10:20:17
| 2018-07-22T10:20:17
| 131,492,092
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,251
|
rd
|
freshdesk_api_call.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/api.R
\name{freshdesk_api_call}
\alias{freshdesk_api_call}
\title{Calls Freshdesk API}
\usage{
freshdesk_api_call(client, path, query = NULL)
}
\arguments{
\item{client}{The Freshdesk API client object (see \code{\link{freshdesk_client}}).}
\item{path}{The API query path.}
\item{query}{API query string.}
}
\value{
An S3 object contaitning the following attributes:
\itemize{
\item{\code{content}}: {the parsed content of the response.}
\item{\code{path}}: {the API query path.}
\item{\code{response}}: {the complete httr reponse object.}
\item{\code{rate_limit_remaining}}: {the API calls remaining in the current period.}
\item{\code{rate_limit_total}}: {the total API calls for the current period.}
}
}
\description{
\code{freshdesk_api_call} makes a single query the Freshdesk API and returns a result.
}
\details{
This function queries the Freshdesk API based on a path and returns a \code{freshdesk_api}
object containing the http response, the parsed content, and the API rate limit status.
}
\examples{
\dontrun{
fc <- freshdesk_client("foo", "MyAPIKey")
apidata <- freshdesk_api(fc, "/api/v2/tickets/3")
apidata$rate_limit_remaining
}
}
|
fc02f73aa27937e643ea4d4669bdee755023eeea
|
2b01f6be3f3a4a043effeab2f7bfaa7e6f24e87f
|
/utils/man/read.output.Rd
|
feb875c16ade768e47a74e9e2a2a2a406e6b3333
|
[
"NCSA",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
serbinsh/pecan
|
bebd2b6eb3ce1587783afa987ee94f9a7202fa33
|
3d48860eaf7cd481a67dba3e16eec994f817990b
|
refs/heads/master
| 2023-05-11T09:07:25.415892
| 2017-07-13T22:31:36
| 2017-07-13T22:31:36
| 19,829,867
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,450
|
rd
|
read.output.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/read.output.R
\name{read.output}
\alias{read.output}
\title{Read model output}
\usage{
read.output(runid, outdir, start.year = NA, end.year = NA,
variables = "GPP", dataframe = FALSE)
}
\arguments{
\item{runid}{the id distiguishing the model run.}
\item{outdir}{the directory that the model's output was sent to}
\item{start.year}{first year of output to read (should be greater than )}
\item{end.year}{last year of output to read}
\item{variables}{variables to be read from model output}
\item{dataframe}{A boolean that will return output in a data.frame format with a posix column. Useful for align.data and plotting.}
}
\value{
vector of output variable
}
\description{
Reads the output of a single model run
}
\details{
Generic function to convert model output from model-specific format to
a common PEcAn format. This function uses MsTMIP variables except that units of
(kg m-2 d-1) are converted to kg ha-1 y-1. Currently this function converts
Carbon fluxes: GPP, NPP, NEE, TotalResp, AutoResp, HeteroResp,
DOC_flux, Fire_flux, and Stem (Stem is specific to the BioCro model)
and Water fluxes: Evaporation (Evap), Transpiration(TVeg),
surface runoff (Qs), subsurface runoff (Qsb), and rainfall (Rainf).
For more details, see the
\href{http://nacp.ornl.gov/MsTMIP_variables.shtml}{MsTMIP variables}
documentation
}
\author{
Michael Dietze, David LeBauer
}
|
5998247e1b047a0c0d2c07ea25f41401dbced087
|
b89bebbde0659a8c9ae84a5956c8af0e6e9575de
|
/app_ui.R
|
844adbe6e8bd35908638f9b868d075fdbd2a6bdc
|
[
"MIT"
] |
permissive
|
chloewlee/sp21-lab07
|
a96ab8ed722ad8f1767025381e989ac91fa99c6a
|
689155de6db68abb818ab1ea4b3efb9b17b9e39c
|
refs/heads/main
| 2023-05-01T01:09:48.555807
| 2021-05-12T18:27:34
| 2021-05-12T18:27:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 988
|
r
|
app_ui.R
|
#install packages
library(shiny)
library(ggplot2)
library(plotly)
# read dataset
immunizations <- read.csv("immunizations.csv")
# --------- CREATE WIDGETS ----------
# choose county widget (this widget allows you to
# choose which county you want to have the plot focus on)
# enrollment size widget (this widget allows you to choose the
# range of enrollment size)
# --------- CREATE PAGES ----------
page_one <- tabPanel(
"Page 1", #title of the page, what will appear as the tab name
sidebarLayout(
sidebarPanel(
# left side of the page
# insert widgets or text here -- their variable name(s), NOT the raw code
),
mainPanel( # typically where you place your plots + texts
# insert chart and/or text here -- the variable name NOT the code
)))
# --------- DEFINING UI: PUTTING PAGES TOGETHER ----------
ui <- navbarPage(
"Title",
page_one
#insert other pages here
)
|
871e34794c626c58eebd6820362eb0570ca173f6
|
dc08edafa5740fd34e85da39b4cf46e1a26815f7
|
/r/source_rfcv.r
|
b2cbbafc43641aacae14aae6bbc3a81170bad001
|
[] |
no_license
|
jtresko/GBM_reversion
|
8a6848b47ae67d9049d62eaedd0164cbe9b5d3a2
|
df5291e0a24bd016e5acbc100fe985cacec65c2d
|
refs/heads/master
| 2022-10-03T15:08:55.605348
| 2017-05-02T01:22:13
| 2017-05-02T01:22:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 181
|
r
|
source_rfcv.r
|
A single object matching ‘round’ was found
It was found in the following places
package:base
namespace:base
with value
function (x, digits = 0) .Primitive("round")
|
25d382a9c663a6d3011a1584b93b1ffc3811d62a
|
169a6494a475f42d0452d3ade4622bde1eb939cc
|
/tests/testthat/test-ncbi_downstream.R
|
3c6749b2f663a0578103f2466bfa69c2657eeff8
|
[
"MIT"
] |
permissive
|
ropensci/taxize
|
d205379bc0369d9dcdb48a8e42f3f34e7c546b9b
|
269095008f4d07bfdb76c51b0601be55d4941597
|
refs/heads/master
| 2023-05-25T04:00:46.760165
| 2023-05-02T20:02:50
| 2023-05-02T20:02:50
| 1,771,790
| 224
| 75
|
NOASSERTION
| 2023-05-02T20:02:51
| 2011-05-19T15:05:33
|
R
|
UTF-8
|
R
| false
| false
| 2,006
|
r
|
test-ncbi_downstream.R
|
context("ncbi_downstream")
test_that("ncbi_downstream returns correct structure", {
skip_on_cran() # uses secrets
vcr::use_cassette("ncbi_downstream", {
tt <- ncbi_downstream(id = 7459, downto="species")
})
expect_is(tt, "data.frame")
expect_equal(NCOL(tt), 3)
for (i in seq_along(tt)) expect_is(tt[[i]], "character")
})
test_that("ncbi_downstream does remove some ambiguous taxa", {
skip_on_cran()
# 590 = "Salmonella"
## DOES remove "subsp."
vcr::use_cassette("ncbi_downstream_ambiguous_false", {
amb_no <- ncbi_downstream(id = '590', downto = "species", ambiguous = FALSE)
})
## DOES NOT remove "subsp."
vcr::use_cassette("ncbi_downstream_ambiguous_true", {
amb_yes <- ncbi_downstream(id = '590', downto = "species", ambiguous = TRUE)
})
expect_is(amb_no, "data.frame")
expect_is(amb_yes, "data.frame")
expect_gt(NROW(amb_yes), NROW(amb_no))
})
test_that("ncbi_downstream handles when taxa searches return themselves", {
skip_on_cran() # uses secrets
# eg.., with `Euchloe` ncbi_downstream was fetching 2 subgenus rank children
# which return data that had the ids from those subgenera within it
# fix for https://github.com/ropensci/taxize/issues/807 to remove "self ids"
# and remove any duplicate records resulting
vcr::use_cassette("ncbi_downstream_handles_self_ids", {
tt <- downstream("Euchloe", downto = "species", db = "ncbi",
rank_filter="genus", messages = FALSE)
})
expect_named(tt, "Euchloe")
expect_is(tt, "downstream")
expect_is(tt[[1]], "data.frame")
expect_equal(attr(tt, "db"), "ncbi")
})
test_that("ncbi_downstream doesn't fail on no intermediate data", {
skip_on_cran() # uses secrets
vcr::use_cassette("ncbi_downstream_intermediate", {
tt <- ncbi_downstream(1398485, downto = "no rank", intermediate = TRUE)
})
expect_is(tt, "list")
expect_is(tt$target, "data.frame")
expect_equal(NROW(tt$target), 0)
expect_is(tt$intermediate, "list")
expect_length(tt$intermediate, 0)
})
|
80a586e036897d3b1d8e8a4242fd29560886fffb
|
bd33b34437e80d51fbdca7e703a4b0e505c2ebb1
|
/power_ml.R
|
4b1744a7f7c205b7bbbf5d29116026938a566358
|
[] |
no_license
|
RobbievanAert/power_ml
|
08c7d83599dc4d2aa1766c2ce67a590e3ac97a58
|
db1e946512ae56fab77b0543d398630a576c7b50
|
refs/heads/master
| 2020-05-14T01:23:27.186600
| 2019-04-16T12:46:45
| 2019-04-16T12:46:45
| 181,684,248
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,948
|
r
|
power_ml.R
|
################################################################################
##### STATISTICAL POWER FOR TESTING NULL HYPOTHESIS OF NO EFFECT AND NO #####
##### BETWEEN-STUDY VARIANCE IN A META-ANALYSIS #####
##### Author: Robbie C.M. van Aert #####
################################################################################
rm(list = ls())
#################
### FUNCTIONS ###
#################
### Function for computing estimate of tau2 with Paule-Mandel estimator
PM <- function(tau2, yi, vi)
{
df <- length(yi) - 1 # Degrees of freedom of chi-square distribution
wi <- 1/(vi+tau2) # Weights in meta-analysis
theta <- sum(yi*wi)/sum(wi) # Meta-analytic effect size
Q <- sum(wi*(yi-theta)^2) # Q-statistic
Q - df # Q-statistic minus degrees of freedom
}
### Function to get power in meta-analysis for SMD and correlation
get_power <- function(es, n1i, n2i, ni, rhos, mus, alpha_mu = 0.05, tail = "two",
alpha_tau2 = 0.05, reps = 10000, tau2_max = 5, report = TRUE)
{
# es = effect size measure used (SMD or correlation)
# n1i = sample size group 1 (SMD)
# n2i = sample size group 2 (SMD)
# ni = sample size (correlation)
# rhos = intra-class corelations
# mus = true effect sizes
# alpha_mu = alpha-level for testing null hypothesis of no effect
# tail = whether one or two-tailed tests is conducted in primary studies
# alpha_tau2 = alpha-level for Q_test
# reps = number of replications simulation results are based upon
# tau2_max = upper bound for root-finding algorithm
# report = whether you want to get a HTML report of the results
if (es == "SMD")
{ # Standardized mean difference (Hedges' g)
k <- length(n1i) # Number of studies in meta-analysis
df <- n1i+n2i-2 # Degrees of freedom
} else if (es == "COR")
{
k <- length(ni) # Number of studies in meta-analysis
}
### Empty object for storing results
pow_mu <- pow_tau2 <- mean_I2 <- matrix(NA, nrow = length(mus), ncol = length(rhos),
dimnames = list(as.character(mus), as.character(rhos)))
### Create progress bar
pb <- txtProgressBar(min = 0, max = length(rhos)*length(mus), style = 3)
m <- 0
Sys.sleep(0.1)
for (rho in rhos)
{
for (mu in mus)
{
null_sim <- Q_sim <- I2 <- numeric(reps) # Empty objects for storing results
tau2 <- -rho/(rho-1) # Compute tau^2 based on ICC and sigma2 = 1
for (i in 1:reps)
{
if (es == "SMD")
{ # Standardized mean difference (Hedges' g)
mdiffi <- rnorm(k, mean = mu, sd = sqrt(1/n1i+1/n2i+tau2)) # Observed mean difference
s21i <- 1/(n1i-1) * rchisq(k, df = n1i-1) # Observed variance group 1
s22i <- 1/(n2i-1) * rchisq(k, df = n2i-1) # Observed variance group 2
pool <- ((n1i-1)*s21i + (n2i-1)*s22i)/(n1i+n2i-2) # Observed pooled variances of mean difference
di <- mdiffi/sqrt(pool) # Cohen's d
J <- exp(lgamma(df/2) - log(sqrt(df/2)) - lgamma((df-1)/2)) # Hedges' g correction factor
yi <- J * di # Compute Hedges' g
### Unbiased estimator of variance (Viechtbauer, 2007, equation 23)
vi <- 1/n1i+1/n2i+(1-(n1i+n2i-2-2)/((n1i+n2i-2)*J^2))*yi^2
} else if (es == "COR")
{ # Correlation coefficient (after Fisher-z transformation)
yi <- rnorm(k, mean = mu, sd = sqrt(1/(ni-3)+tau2))
vi <- 1/(ni-3)
}
### Estimate tau^2 with Paule-Mandel estimator
if (PM(tau2 = 0, yi = yi, vi = vi) < 0)
{ # If estimate is smaller than zero, set it equal to zero
tau2_PM <- 0
} else
{ # Estimate tau2 with PM if estimate is larger than zero
tau2_PM <- uniroot(PM, interval = c(0, tau2_max), yi = yi, vi = vi)$root
}
wi_star <- 1/(vi+tau2_PM) # Weights RE model
est <- sum(wi_star*yi)/sum(wi_star) # Estimate RE model
se_est <- sqrt(1/sum(wi_star)) # SE of estimate RE model
wi <- 1/vi # Weights EE model
s2 <- (k-1)*sum(wi)/(sum(wi)^2-sum(wi^2)) # Typical within-study variance
I2[i] <- tau2_PM/(s2+tau2_PM)*100 # I2-statistic
#########################
### TEST OF NO EFFECT ###
#########################
if (tail == "two")
{ # Compute two-tailed p-value
pval <- ifelse(est > 0, 2*pnorm(est/se_est, lower.tail = FALSE), 2*pnorm(est/se_est))
} else if (tail == "one")
{ # Compute one-tailed p-value
pval <- pnorm(est/se_est, lower.tail = FALSE)
}
null_sim[i] <- pval < alpha_mu # Check whether p-value is smaller than alpha_mu
############################################
### TEST OF HOMOGENEOUS TRUE EFFECT SIZE ###
############################################
est <- sum(wi*yi)/sum(wi) # Estimate EE model
Qstat <- sum(wi*(yi-est)^2) # Q-statistic
pval_Q <- pchisq(Qstat, df = k-1, lower.tail = FALSE) # P-value Q-statistic
Q_sim[i] <- pval_Q < alpha_tau2 # Check whether p-value is smaller than alpha_tau2
}
### Compute statitical power across replications
pow_mu[as.character(mu),as.character(rho)] <- mean(null_sim)
pow_tau2[as.character(mu),as.character(rho)] <- mean(Q_sim)
### Mean I2-statistic across replications
mean_I2[as.character(mu),as.character(rho)] <- mean(I2)
### Update progress bar
m <- m + 1
setTxtProgressBar(pb, m)
}
}
close(pb) # Close progress bar
if (report == TRUE)
{ # If the user wants to see the report
res <- list(pow_mu = pow_mu, pow_tau2 = pow_tau2, mean_I2 = mean_I2)
save(res, file = "res.RData") # Save results to working directory
rmarkdown::render("report_power_ml.Rmd") # Create report
browseURL(file.path("report_power_ml.html")) # Open report
}
return(list(pow_mu = pow_mu, pow_tau2 = pow_tau2, mean_I2 = mean_I2))
}
################################################################################
################################################################################
################################################################################
### THE USER HAS TO SPECIFY THE FOLLOWING INFORMATION FOR APPLYING THE FUNCTION:
# es = effect size measure used --> standardized mean difference ("SMD") or correlation ("COR")
# n1i = vector of sample sizes group 1 (only for SMD)
# n2i = vector of sample sizes group 2 (only for SMD)
# ni = vector of sample sizes (only for COR)
# rhos = vector of intra-class corelations
# mus = vector of true effect sizes
# alpha_mu = alpha-level for testing null hypothesis of no effect (default = 0.05)
# tail = whether null-hypothesis of no effect is tested one- ("one") or two-tailed
# ("two") (default = "two")
# alpha_tau2 = alpha-level for Q_test (default = 0.05)
# reps = number of replications for simulations (default = 10000)
# tau2_max = upper bound for root-finding algorithm for estimating tau2 (default = 5)
# report = whether you want to get a HTML report of the results (in order to create
# the report two files will be saved to the working directory of your computer,
# default = TRUE)
### Example standardized mean difference
rhos <- c(0, 0.1, 0.25) # Intra-class correlations
mus <- c(0, 0.5, 1) # True effect size
n1i <- n2i <- c(15, 20, 30, 40, 50, 60, 70, 80, 90, 100) # Sample sizes group 1 and 2
get_power(es = "SMD", n1i = n1i, n2i = n2i, rhos = rhos, mus = mus)
### Example correlation
rhos <- c(0, 0.1, 0.25) # Intra-class correlations
mus <- c(0, 0.5, 1) # True effect size
ni <- c(15, 20, 30, 40, 50, 60, 70, 80, 90, 100) # Sample sizes
get_power(es = "COR", ni = ni, rhos = rhos, mus = mus)
|
78b59b0473c4f0517518f8f792342ecefb54ddde
|
c05e0de22f5699d1c2b2921480be68c8e8b8943f
|
/R/utils_pipe.R
|
e3d536e8d8fa4a4d632b8b34d1621aba4a111d93
|
[
"MIT"
] |
permissive
|
rstudio/gt
|
36ed1a3d5d9a1717dfe71ed61e5c005bc17e0dce
|
c73eeceaa8494180eaf2f0ad981056c53659409b
|
refs/heads/master
| 2023-09-04T06:58:18.903630
| 2023-09-01T02:06:05
| 2023-09-01T02:06:05
| 126,038,547
| 1,812
| 225
|
NOASSERTION
| 2023-09-08T00:21:34
| 2018-03-20T15:18:51
|
R
|
UTF-8
|
R
| false
| false
| 791
|
r
|
utils_pipe.R
|
#------------------------------------------------------------------------------#
#
# /$$
# | $$
# /$$$$$$ /$$$$$$
# /$$__ $$|_ $$_/
# | $$ \ $$ | $$
# | $$ | $$ | $$ /$$
# | $$$$$$$ | $$$$/
# \____ $$ \___/
# /$$ \ $$
# | $$$$$$/
# \______/
#
# This file is part of the 'rstudio/gt' project.
#
# Copyright (c) 2018-2023 gt authors
#
# For full copyright and license information, please look at
# https://gt.rstudio.com/LICENSE.html
#
#------------------------------------------------------------------------------#
#' Pipe operator
#'
#' See \code{magrittr::\link[magrittr]{\%>\%}} for details.
#'
#' @name %>%
#' @rdname pipe
#' @keywords internal
#' @export
#' @importFrom magrittr %>%
#' @usage lhs \%>\% rhs
NULL
|
3da84f9276966b6d836cfea72cc761220f196fa3
|
250c51be2dbb89a73fcc0e4acabe7aa83ce2a199
|
/pipe/lib/megadaph.mtdna/man/compute_strand_bias.Rd
|
eafe561ccc4c9089253477dbf4d03a58b5ffa1d6
|
[
"MIT"
] |
permissive
|
fennerm/daphnia-mtdna-ma
|
744d1530ff9a5aa363e31b127c2de0ac43f2d95a
|
1f257838883924289b16344be877af55383b4f65
|
refs/heads/master
| 2021-03-27T19:53:19.899696
| 2019-03-18T01:11:49
| 2019-03-18T01:11:49
| 92,858,584
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 685
|
rd
|
compute_strand_bias.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/multinomial_variant_calling.R
\name{compute_strand_bias}
\alias{compute_strand_bias}
\title{Calculate strand bias for a single genome position using fisher exact test}
\usage{
compute_strand_bias(counts, wild_type_allele, mutant_allele)
}
\arguments{
\item{counts}{Numeric vector; Allele counts for a single genome position and
sample}
\item{wild_type_allele}{Character; The wild type allele for this position}
\item{mutant_allele}{Character; The wild type allele for this position}
}
\value{
Numeric; A p-value
}
\description{
Calculate strand bias for a single genome position using fisher exact test
}
|
7bc860a6fd318ff65cf49933ff4e14e7a87edb51
|
b0ee96a1b50dd537b080e59bb1a8e786f22f26cb
|
/R/RFLPcombine.R
|
3849e645c0345f8e8bf81b208a97d85452e5ed30
|
[] |
no_license
|
cran/RFLPtools
|
a989cfaf23994bbeaf7604e0ce65435ddf78ad37
|
9f048d015d8df7459ce26c7d5b8ac7ecd1e86b0e
|
refs/heads/master
| 2022-03-05T22:27:27.183989
| 2022-02-08T08:40:02
| 2022-02-08T08:40:02
| 17,681,978
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 978
|
r
|
RFLPcombine.R
|
###############################################################################
## Combine data sets
###############################################################################
RFLPcombine <- function(...){
x <- list(...)
if(length(x) < 2)
stop("You have to specify at least two data sets!")
nams0 <- unique(x[[1]]$Sample)
for(i in 2:length(x)){
n0 <- length(nams0)
## extract sample names for dataset i
nams1 <- unique(x[[i]]$Sample)
## make unique names
nams0 <- make.unique(c(nams0, nams1))
## extract unique names for dataset i
nams2 <- nams0[(n0+1):length(nams0)]
## names that have been changed
nams3 <- nams1[nams1 != nams2]
nams4 <- nams2[nams1 != nams2]
## replace names that have been changed by unique names
for(j in 1:length(nams3)){
x[[i]]$Sample[x[[i]]$Sample == nams3[j]] <- nams4[j]
}
}
do.call('rbind', x)
}
|
17452b042b665d6ce44ff95cb5e7e06f30349568
|
361469edf71a80776e71c2c1b6da64c7a83d27db
|
/R연습2.R
|
ad3f3ce506c29f89c0911abad1a016f09784f4ba
|
[] |
no_license
|
HanseamChung/prac_R
|
d7d06fb811dabcc64cf0c5f116fa5f68c5e7f4f9
|
71d533430ef3d34c82fc503b3241e1b82ed31967
|
refs/heads/master
| 2021-04-30T14:22:51.025394
| 2018-02-27T07:12:46
| 2018-02-27T07:12:46
| 121,216,386
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 984
|
r
|
R연습2.R
|
a<-11
b<-22
setwd('c:\\easy_r')
getwd()
var1 <- c(1, 2, 5, 7, 8)
var2 <- c(1:5)
var3 <- seq(1, 5)
var4 <- seq(1, 10, by =2)
var1+2
var1-var2
str1 <- 'a'
str1
str3 <- 'Hello World!'
str4 <- c('a','b','c')
x <- c(1,2,3)
x
mean(x)
max(x)
min(x)
str5 <- c('Hello!', 'World', 'is', 'good!')
str5
paste(str5, collapse =',')
paste(str5, collapse = ' ')
x_mean <- mean(x)
str5_paste <- paste(str5, collapse = ' ')
x <- c('a', 'a', 'b', 'c')
x
qpolt(x)
qplot(x)
libra(ggplot2)
library(ggplot2)
qplot(x)
qplot(data = mpg, x = hwy)
qplot(data = mpg, x = hwy)
qplot(data = mpg, x = drv)
qplot(data = mpg, x = drv, y = hwy)
qplot(data = mpg, x = drv, y = hwy, geom='line')
qplot(data = mpg, x = drv, y = hwy, geom = 'boxplot')
qplot(data = mpg, x = drv, y = hwy, geom = 'boxplot', colour = drv)
qplot(data = mpg, x = drv, y = hwy, geom = 'boxplot', colour = hwy)
?qplot
date2 <- seq(from=as.Date('2014-01-01'),to=as.Date('2014-05031'),by='month'))
date2
|
3b849553f6a24ca9888e8b337ae8319a9c3347ef
|
67e26dbc19937477f768935005c200bfaae11471
|
/R_practice/Ggplot2_prac.R
|
40e30ad8ebd3921856da7bdbee09c627af4f2a64
|
[] |
no_license
|
skickham/brainteasers
|
f34f9eacd5c7c80adabe7a4b4268c7e0f6a8a6af
|
a707a4b7dbff1ffedb86ae99684f152c1d622da3
|
refs/heads/master
| 2021-04-03T09:20:57.473616
| 2018-05-03T18:21:58
| 2018-05-03T18:21:58
| 125,241,634
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,885
|
r
|
Ggplot2_prac.R
|
setwd(dir = '/Users/skick/Desktop/NYC Data Science Academy/Class_R/')
library(dplyr)
library(ggplot2)
#Question 1
#1
Champions = read.csv('Champions.csv', stringsAsFactors = FALSE)
#View(Champions)
tbl_df = filter(Champions, HomeGoal > AwayGoal)
filter(Champions, HomeTeam == 'Barcelona' | HomeTeam == 'Real Madrid')
#2
Home = select(Champions, starts_with('Home'))
Smaller = select(Champions,
contains('Team'),
contains('Goal'),
contains('Corner'))
head(Home)
head(Smaller)
#3
arrange(Smaller, desc(HomeGoal))
#4
by_hometeam = group_by(Champions, HomeTeam)
summarise(by_hometeam,
Avg_goal = mean(HomeGoal),
Avg_poss = mean(HomePossession),
Avg_yellow = mean(HomeYellow))
#5
#optional
temp = mutate(CL, score = ifelse(HomeGoal > AwayGoal,
paste(HomeGoal, AwayGoal, sep = "-"),
paste(AwayGoal, HomeGoal, sep = "-")))
temp = group_by(temp, score)
temp = arrange(summarise(temp, n = sum(n)), desc(n))
temp[1:5, ]
## Another solution using apply
cl_sub2=select(CL,contains("Goal"))
# Nice solution by transpose the matrix.
all_score<-t(apply(cl_sub2,1,sort))
all<-data.frame(score=apply(all_score,1,paste,collapse=""))
score_frq<-all %>%
group_by(.,score)%>%
summarise(.,count=n()) %>%
arrange(.,desc(count))
score_frq[1:5,]
##### SE version of dplyr
##### https://cran.r-project.org/web/packages/dplyr/vignettes/nse.html
#Question 2
#1
data(cars)
p = ggplot(data = cars, aes(x = speed, y = dist)) +
geom_point()
#2
p +
ggtitle('Speed Vs. Distance') +
labs(x = 'Speed (mpg)', y = 'Stopping Distance (ft)')
#3
ggplot(data = cars, aes(x = speed, y = dist)) +
geom_point(pch = 17, col = 'red')
#Question 3
data(faithful)
#View(faithful)
#1
faithful$length = ifelse(faithful$eruptions < 3.2, 'short', 'long')
faithful$length = as.factor(faithful$length)
#2
ggplot(data = faithful, aes(x = length, y = waiting)) + geom_boxplot(aes(color = length))
#3
ggplot(data= faithful, aes(x = waiting)) + geom_density(aes(color = length))
#4
#From the density curves, it seems the waiting times for the long eruptions are around 80 minutes,
#and the times for the short eruptions is around 54 minutes.
#From the box plots, you can see the same thing within the common values.
#Question 4
knicks = load('Knicks.RDA') #saves the table under "data" for some reason ??????
knicks = data #reassign the data frame to "knicks"
#View(knicks)
#1
Winratio_byseason = knicks %>%
group_by(season) %>%
summarise(winning_ratio = sum(win == 'W')/n())
#could use spread to split the win into two columns then just count the columns that have it
ggplot(Winratio_byseason, aes(x = season, y = winning_ratio)) +
geom_bar(stat = 'identity', aes(fill = season)) #doesn't work unless use stat = 'identity'
#2
Winratio_byhome = knicks %>%
group_by(season, visiting) %>%
mutate(winning_ratio = sum(win == 'W')/n()) #can use summarise instead of mutate
ggplot(Winratio_byhome,
aes(x = season, y = winning_ratio)) +
geom_bar(aes(fill = visiting),
position = 'dodge',
stat = 'identity')
#3
ggplot(knicks, aes(x = points)) +
geom_histogram(binwidth = 5,
aes(fill = season)) +
facet_wrap(~season)
#4
#optional
knicks3 <- group_by(knicks, opponent) %>%
summarise(ratio=sum(win=="W")/n(), diff=mean(points-opp))
ggplot(knicks3,aes(x=diff, y=ratio)) +
geom_point(color='red4',size=4)+
geom_hline(yintercept=0.5,colour='grey20',size=0.5,linetype=2)+ #at 0.5 for winning/losing percentage
geom_vline(xintercept=0,colour='grey20',size=0.5,linetype=2)+ #at 0 for winning/losing point diff #could put at mean
geom_text(aes(label=substring(opponent,1,5)),
hjust=0.7, vjust=1.4,angle = -35)+
theme_bw()
|
3a328d0d0897fe20af3a81ee55754f8513303b3a
|
f7408683a4b9f3ea36e6c56588f257eba9761e12
|
/R/f_sum2.R
|
6b751c6d0907d99f554d8cbd9c1580e2a36ed5cd
|
[] |
no_license
|
refunders/refund
|
a12ad139bc56f4c637ec142f07a78657727cc367
|
93cb2e44106f794491c7008970760efbfc8a744f
|
refs/heads/master
| 2023-07-21T21:00:06.028918
| 2023-07-17T20:52:08
| 2023-07-17T20:52:08
| 30,697,953
| 42
| 22
| null | 2023-06-27T15:17:47
| 2015-02-12T10:41:27
|
R
|
UTF-8
|
R
| false
| false
| 683
|
r
|
f_sum2.R
|
#' Sum computation 2
#'
#' Internal function used compute a sum in FPCA-based covariance updates
#'
#' @param y outcome matrix
#' @param fixef current estimate of fixed effects
#' @param mu.q.c current value of mu.q.c
#' @param kt number of basis functions
#' @param theta spline basis
#'
#' @author Jeff Goldsmith \email{ajg2202@@cumc.columbia.edu}
#'
f_sum2 = function(y, fixef, mu.q.c, kt, theta){
I = dim(mu.q.c)[1]
kp = dim(mu.q.c)[2]
ret.sum = matrix(0, nrow = kp*kt, ncol = 1)
for(i in 1:I){
obs.pts = !is.na(y[i,])
ret.sum = ret.sum + kronecker((matrix(mu.q.c[i,])), theta[,obs.pts]) %*% matrix(y[i, obs.pts] - fixef[i,obs.pts])
}
return(ret.sum)
}
|
f274cd1ce57c9aa316bd26586bc5438662cced42
|
333c0b5c43c56475c0c885b07d58817ae0cd0430
|
/análisis/03_visualización.R
|
537186e138eb77cc6bca5dbc444705a248bea5c2
|
[
"MIT"
] |
permissive
|
RMedina19/Intersecta-PJCDMX
|
6d2172962236ca748176b960fd53f4cc3d8884bc
|
f717781e705fa10d7ea8648d5655ce41b4ba1587
|
refs/heads/main
| 2023-03-03T02:34:41.080298
| 2021-02-10T17:21:06
| 2021-02-10T17:21:06
| 329,800,990
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 25,930
|
r
|
03_visualización.R
|
#------------------------------------------------------------------------------#
# Proyecto: TRIBUNAL SUPERIOR DE JUSTICIA DE CIUDAD DE MÉXICO
# Objetivo: Procesar datos de la PJCDMX
# Encargadas: Estefanía Vela y Regina I. Medina
# Correo: rmedina@intersecta.org
# Fecha de creación: 10 de enero de 2021
# Última actualización: 24 de enero de 2021
#------------------------------------------------------------------------------#
# 0. Configuración inicial -----------------------------------------------------
# Librerías
require(pacman)
p_load(scales, tidyverse, stringi, dplyr, plyr, foreign, readxl, janitor,
extrafont, beepr, extrafont, treemapify, ggmosaic, srvyr, ggrepel,
lubridate, cowplot)
# Limpiar espacio de trabajo
rm(list=ls())
# Establecer directorios
inp <- "datos_limpios/"
out <- "figuras/"
asuntos <- "asuntos_ingresados/"
personas <- "personas_agredidas/"
sitjur <- "situacion_juridica/"
alternas <- "soluciones_alternas/"
medidas <- "medidas_cautelares/"
sentencias <- "sentencias/"
# 1. Cargar datos --------------------------------------------------------------
load(paste0(inp, "df_asuntos_ingresados.RData"))
load(paste0(inp, "df_personas_agredidas.RData"))
load(paste0(inp, "df_situacion_juridica.RData"))
load(paste0(inp, "df_soluciones_alternas.RData"))
load(paste0(inp, "df_medidas_cautelares.RData"))
load(paste0(inp, "df_sentencias.RData"))
# 2. Configuración del tema para visualización ---------------------------------
tema <- theme_linedraw() +
theme(text = element_text(family = "Helvetica", color = "grey35"),
plot.title = element_text(size = 20,
face = "bold",
margin = margin(10,4,5,4),
family="Helvetica",
color = "black"),
plot.subtitle = element_text(size = 18,
color = "#666666",
margin = margin(5, 5, 5, 5),
family="Helvetica"),
plot.caption = element_text(hjust = 1,
size = 14,
family = "Helvetica"),
panel.grid = element_line(linetype = 2),
legend.position = "none",
panel.grid.minor = element_blank(),
legend.title = element_text(size = 16,
family="Helvetica"),
legend.text = element_text(size = 16,
family="Helvetica"),
legend.title.align = 1,
axis.title = element_text(size = 16,
hjust = .5,
margin = margin(1,1,1,1),
family="Helvetica"),
axis.text = element_text(size = 16,
face = "bold",
family="Helvetica",
angle=0,
hjust=.5),
strip.background = element_rect(fill="#525252"),
strip.text.x = element_text(size=16,
family = "Helvetica"),
strip.text.y = element_text(size=16,
family = "Helvetica")) +
scale_fill_manual(values = c("#EDF7FC","#F6CCEE", "#04C0E4", "#016FB9",
"#3AB79C","#A3FEFC", "#FF82A9", "#e63946", "#457b9d", "#2a9d8f",
"#e5989b"))
fill_base <- c("#F178B1","#998FC7", "#FF8C00", "#663399", "#C2F970",
"#00979C", "#B1EDE8", "#FE5F55")
fill_autoridades <- c("#F178B1","#998FC7", "#FF8C00", "#663399", "#C2F970",
"#00979C", "#B1EDE8", "#FE5F55", "#C52233")
fill_dos <- c("#F178B1","#998FC7")
fill_default <- c("#EDF7FC", "#F6CCEE", "#04C0E4", "#016FB9", "#3AB79C",
"#A3FEFC", "#FF82A9", "#e63946", "#457b9d", "#2a9d8f",
"#e5989b", "#9b5de5", "#0466c8", "#ffee32")
# Establecer vectores de texto
leyenda <- "\n Fuente: Respuesta del TSJCDMX a solicitud de acceso a la información pública. "
# 3. Visualizaciones de asuntos ingresados -------------------------------------
# Delitos
df_delitos <- df_asuntos_ingresados %>%
group_by(delitos_cortos) %>%
summarize(total = n()) %>%
mutate(denomin = sum(total, na.rm = T),
porcent = round(total / denomin * 100, 1))
# Visualización
ggplot(df_delitos,
aes(x = delitos_cortos, y = porcent, fill = fill_default[10])) +
geom_bar(stat = "identity") +
geom_text(aes(label = paste0(porcent, "%")),
position = position_stack(vjust = 0.5),
size = 4, color = "black", family = "Helvetica") +
labs(title = "Distribución de delitos cometidos en CDMX",
subtitle = "(2011-2020)",
x = "",
y = "Porcentaje") +
coord_flip(ylim=c(0,100)) +
tema
# Guardar visualización
ggsave(paste0(out, asuntos, "g_delitos.png"), width = 18, height = 10)
# Delitos por comisión
df_delitos_comision <- df_asuntos_ingresados %>%
group_by(delitos_cortos, comision) %>%
summarize(total = n()) %>%
mutate(denomin = sum(total, na.rm = T),
porcent = round(total / denomin * 100, 1))
# Visualización
ggplot(df_delitos_comision,
aes(x = delitos_cortos, y = porcent, fill = comision)) +
geom_bar(stat = "identity") +
geom_text(aes(label = paste0(porcent, "%")),
position = position_stack(vjust = 0.5),
size = 4, color = "black", family = "Helvetica") +
labs(title = "Distribución de delitos cometidos en CDMX",
subtitle = "Según comisión (2011-2020)",
x = "",
y = "Porcentaje") +
coord_flip(ylim=c(0,100)) +
tema +
scale_fill_manual(values= fill_default)
# Guardar visualización
ggsave(paste0(out, asuntos, "g_delitos_comision.png"), width = 18, height = 10)
# Delitos por año
df_delitos_year <- df_asuntos_ingresados %>%
group_by(year_ingreso, delitos_cortos) %>%
summarize(total = n()) %>%
ungroup() %>%
group_by(year_ingreso) %>%
mutate(denomin = sum(total, na.rm = T),
porcent = round(total / denomin * 100, 1))
# View(df_year)
# Visualización
ggplot(df_delitos_year) +
geom_area(aes(x = as.integer(year_ingreso), y = porcent, fill=delitos_cortos), size=2.5) +
labs(title = "Delitos de los asuntos ingresados al TSJ-CDMX",
subtitle = "Por año \n", y = "\n Porcentaje \n", x="",
caption = leyenda,
fill ="Delitos:") +
scale_fill_manual(values = fill_default) +
scale_x_continuous(breaks=seq(from=2011, to=2020, by=1)) +
scale_y_continuous(breaks=seq(from=0, to=100, by=10)) +
tema +
theme(axis.text.x = element_text(angle = 0, hjust = .5, vjust = .5)) +
coord_cartesian(ylim = c(0, 100))+
theme(legend.position = "top")
# Guardar visualización
ggsave(paste0(out, asuntos, "g_delitos_año.png"), width = 20, height = 16)
# Delitos por sexo
df_delitos_sexo <- df_asuntos_ingresados %>%
# Delitos por año y por sexo
df_delitos_year_sexo <- df_asuntos_ingresados %>%
# 4. Visualizaciones de personas agredidas -------------------------------------
# Desagregación por sexo
df_sexo <- df_personas_agredidas %>%
rename(sexo = sexo_victima) %>%
group_by(sexo) %>%
summarize(total = n()) %>%
ungroup() %>%
mutate(denomin = sum(total, na.rm = T),
porcent = round(total / denomin * 100, 1)) %>%
filter(is.na(sexo) == FALSE)
# Visualización
ggplot(df_sexo,
aes(x = sexo, y = porcent, fill = sexo)) +
geom_bar(stat = "identity") +
geom_text(aes(label = paste0(porcent,"%")),
position = position_stack(vjust = 0.5),
size = 4, color = "black", family = "Helvetica") +
labs(title = "Proporción de víctimas",
subtitle = "Por sexo",
caption = leyenda,
x = "", y = "", fill = "") +
tema +
scale_fill_manual(values= fill_default) +
coord_flip(ylim=c(0,100)) +
theme(legend.position = "top")
# Guardar visualización
ggsave(paste0(out, personas, "g_víctimas_genero.png"), width = 18, height = 10)
# Desagregación por edad
df_edad <- df_personas_agredidas %>%
rename(edad = edad_victima) %>%
group_by(edad) %>%
summarize(total = n()) %>%
ungroup() %>%
mutate(denomin = sum(total, na.rm = T),
porcent = round(total / denomin * 100, 1)) %>%
filter(is.na(edad) == FALSE)
# Visualización
ggplot(df_edad,
aes(x = edad, y = porcent, fill = edad)) +
geom_bar(stat = "identity") +
geom_text(aes(label = paste0(porcent,"%")),
position = position_stack(vjust = 0.5),
size = 4, color = "black", family = "Helvetica") +
labs(title = "Proporción de víctimas",
subtitle = "Por edad",
caption = leyenda,
x = "",
y = "",
fill = "") +
tema +
#scale_fill_manual(values=c("#F178B1","#998FC7", "#04C0E4")) +
coord_flip(ylim=c(0,100)) +
theme(legend.position = "top")
# Guardar visualización
ggsave(paste0(out, personas, "g_víctimas_edad.png"), width = 18, height = 10)
# 5. Visualizaciones de situación jurídica -------------------------------------
# Por género
df_genero <- df_situacion_juridica %>%
rename(sexo = sexo_procesada) %>%
group_by(sexo) %>%
summarize(total = n()) %>%
ungroup() %>%
mutate(denomin = sum(total, na.rm = T),
porcent = round(total / denomin * 100, 1)) %>%
filter(is.na(sexo) == FALSE)
# Por delito
# 6. Visualizaciones de soluciones alternas ------------------------------------
# 7. Visualizaciones de medidas cautelares -------------------------------------
# 7.1 Prisión preventiva por delito y comisión ---------------------------------
# Limpieza de datos
df_prisprev <- df_medidas_cautelares %>%
group_by(medida, delitos_cortos, comision) %>%
summarize(total = n()) %>%
ungroup() %>%
group_by(comision, delitos_cortos) %>%
mutate(denomin = sum(total, na.rm = T),
porcent = round(total / denomin * 100, 1))%>%
filter(medida == "Prisión preventiva")
# View(df_prisprev)
# Visualización
ggplot(df_prisprev,
aes(x = comision, y = porcent, fill = medida)) +
geom_bar(stat = "identity", position = "stack") +
geom_text(aes(label = paste0(porcent,"%")),
position = position_stack(vjust = 0.5),
size = 4, color = "black", family = "Helvetica") +
labs(title = "Proporción que representa la prisión preventiva del total de medidas cautelares",
subtitle = "Por delito y forma de comisión \n",
caption = leyenda,
x = "",
y = "",
fill = "") +
tema +
scale_fill_manual(values=fill_default) +
facet_wrap(~delitos_cortos) +
coord_flip(ylim=c(0,100)) +
theme(legend.position = "top")
# Guardar visualización
ggsave(paste0(out, medidas, "g_delitos_medidas_culposos.png"), width = 18, height = 16)
# 7.2 Delitos por comisión -----------------------------------------------------
# Limpieza de datos
df_delito <- df_medidas_cautelares %>%
group_by(delitos_cortos, comision) %>%
summarize(total = n()) %>%
ungroup() %>%
group_by(delitos_cortos) %>%
mutate(denomin = sum(total, na.rm = T),
porcent = round(total / denomin * 100, 1))
# View(df_delito)
# Visualización
ggplot(df_delito, aes(x = delitos_cortos, y=porcent, fill=comision)) +
geom_bar(stat="identity", position="stack") +
scale_fill_manual(values=fill_default)+
guides(fill = guide_legend(reverse=TRUE))+
geom_text(aes(label=paste0(porcent,"%")),
position = position_stack(vjust = 0.5),
size=4, color="black", family = "Helvetica")+
labs(title="Delitos por forma de comisión",
caption=leyenda,
x="", y="",
subtitle = "Por delito y forma de comisión \n", fill="") +
tema +
coord_flip(ylim=c(0,100))+
theme(legend.position = "top")
# Guardar visualización
ggsave(paste0(out, medidas, "g_delitos_forma_comisión.png"), width = 18, height = 16)
# 7.3 Delitos por año ----------------------------------------------------------
# Limpieza de datos
df_year_delito <- df_medidas_cautelares %>%
group_by(year_audiencia, delitos_cortos) %>%
summarize(total = n()) %>%
ungroup() %>%
group_by(year_audiencia) %>%
mutate(denomin = sum(total, na.rm = T),
porcent = round(total / denomin * 100, 1))
# View(df_year)
# Visualización
ggplot(df_year_delito) +
geom_area(aes(x = as.integer(year_audiencia), y = porcent, fill=delitos_cortos), size=2.5) +
labs(title = "Delitos de las personas sentenciadas en la CDMX",
subtitle = "Por año \n", y = "\n Porcentaje \n", x="",
caption = leyenda,
fill ="Delitos:") +
scale_fill_manual(values = fill_default) +
scale_x_continuous(breaks=seq(from=2011, to=2020, by=1)) +
scale_y_continuous(breaks=seq(from=0, to=100, by=10)) +
tema +
theme(axis.text.x = element_text(angle = 0, hjust = .5, vjust = .5)) +
coord_cartesian(ylim = c(0, 100))+
theme(legend.position = "top")
# Guardar visualización
ggsave(paste0(out, medidas, "g_delitos_año.png"), width = 20, height = 16)
# 7.4 Medidas cautelares por delito --------------------------------------------
# Limpieza de datos
df_medidas_delito <- df_medidas_cautelares %>%
group_by(delitos_cortos, year_audiencia, medida) %>%
summarize(total = n()) %>%
ungroup() %>%
group_by(delitos_cortos, year_audiencia) %>%
mutate(denomin = sum(total, na.rm = T),
porcent = round(total / denomin * 100, 1))
# View(df_medidas_delito)
# Visualización
ggplot(df_medidas_delito) +
geom_area(aes(x = as.integer(year_audiencia), y = porcent, fill=medida), size=2.5) +
labs(title = "Delitos de las personas sentenciadas en la CDMX",
subtitle = "Por año \n", y = "\n Porcentaje \n", x="",
caption = leyenda,
fill ="Delitos:") +
scale_fill_manual(values = fill_default) +
scale_x_continuous(breaks=seq(from=2011, to=2020, by=1)) +
scale_y_continuous(breaks=seq(from=0, to=100, by=10)) +
tema +
facet_wrap(~delitos_cortos) +
theme(axis.text.x = element_text(angle = 90, hjust = .5, vjust = .5)) +
coord_cartesian(ylim = c(0, 100))+
theme(legend.position = "right",
legend.key.size = unit(.5, "cm"),
legend.key.width = unit(.5,"cm"))
# Guardar visualización
ggsave(paste0(out, medidas, "g_delitos_medidas.png"), width = 20, height = 16)
# 7.5 Medida cautelar por sexo -------------------------------------------------
# Limpieza de datos
df_medida_sexo <- df_medidas_cautelares %>%
rename(sexo = sexo_vinculada) %>%
group_by(sexo, year_audiencia, medida) %>%
filter(sexo != "No especificado") %>%
summarize(total = n()) %>%
ungroup() %>%
group_by(sexo, year_audiencia) %>%
mutate(denomin = sum(total, na.rm = T),
porcent = round(total / denomin * 100, 1))
# Visualización
ggplot(df_medida_sexo) +
geom_area(aes(x = as.integer(year_audiencia), y = porcent, fill=medida), size=2.5) +
labs(title = "Medidas cautelares dictadas en la CDMX",
subtitle = "Por año, por sexo \n", y = "\n Porcentaje \n", x="",
caption = leyenda,
fill ="Delitos:") +
scale_fill_manual(values = fill_default) +
scale_x_continuous(breaks=seq(from=2011, to=2020, by=1)) +
scale_y_continuous(breaks=seq(from=0, to=100, by=10)) +
tema +
facet_wrap(~sexo) +
theme(axis.text.x = element_text(angle = 90, hjust = .5, vjust = .5)) +
coord_cartesian(ylim = c(0, 100))+
theme(legend.position = "right",
legend.key.size = unit(.5, "cm"),
legend.key.width = unit(.5,"cm"))
# Guardar visualización
ggsave(paste0(out, medidas, "g_medida_sexo.png"), width = 20, height = 16)
# 7.6 Prisión preventiva por delito y por sexo ---------------------------------
# Limpieza de datos
df_medida_delito_sexo <- df_medidas_cautelares %>%
rename(sexo = sexo_vinculada) %>%
group_by(delitos_cortos, year_audiencia, medida, sexo) %>%
summarize(total = n()) %>%
ungroup() %>%
group_by(medida, year_audiencia, sexo) %>%
mutate(denomin = sum(total, na.rm = T),
porcent = round(total / denomin * 100, 1)) %>%
filter(medida == "Prisión preventiva",
sexo != "No especificado")
# Visualización
ggplot(df_medida_delito_sexo) +
geom_area(aes(x = as.integer(year_audiencia), y = porcent, fill=delitos_cortos), size=2.5) +
labs(title = "Delitos que tuvieron prisión preventiva en la CDMX",
subtitle = "Por año, por sexo \n", y = "\n Porcentaje \n", x="",
caption = leyenda,
fill ="Delitos:") +
scale_fill_manual(values = fill_default) +
scale_x_continuous(breaks=seq(from=2011, to=2020, by=1)) +
scale_y_continuous(breaks=seq(from=0, to=100, by=10)) +
tema +
facet_wrap(~sexo) +
theme(axis.text.x = element_text(angle = 90, hjust = .5, vjust = .5)) +
coord_cartesian(ylim = c(0, 100))+
theme(legend.position = "right",
legend.key.size = unit(.5, "cm"),
legend.key.width = unit(.5,"cm"))
# Guardar visualización
ggsave(paste0(out, medidas, "g_medidas_delito_sexo.png"), width = 20, height = 16)
# 7.7 Medidas cautelares por año -----------------------------------------------
# Limpieza de datos
df_year_medidas <- df_medidas_cautelares %>%
group_by(year_audiencia, medida) %>%
summarize(total = n()) %>%
ungroup() %>%
group_by(year_audiencia) %>%
mutate(denomin = sum(total, na.rm = T),
porcent = round(total / denomin * 100, 1))
# Visualización
ggplot(df_year_medidas) +
geom_area(aes(x = as.integer(year_audiencia), y = porcent, fill=medida), size=2.5) +
labs(title = "Medidas cautelares dictadas por el Tribunal Superior de Justicia de la CDMX",
subtitle = "Por año \n", y = "\n Porcentaje \n", x="",
caption = leyenda,
fill ="Medidas cautelares:") +
scale_fill_manual(values = fill_default) +
scale_x_continuous(breaks=seq(from=2011, to=2020, by=1)) +
scale_y_continuous(breaks=seq(from=0, to=100, by=10)) +
tema +
theme(axis.text.x = element_text(angle = 90, hjust = .5, vjust = .5)) +
coord_cartesian(ylim = c(0, 100))+
theme(legend.position = "right",
legend.key.size = unit(.5, "cm"),
legend.key.width = unit(.5,"cm"))
# Guardar visualización
ggsave(paste0(out, medidas, "g_medidas_año.png"), width = 20, height = 16)
# 8. Visualizaciones de sentencias ---------------------------------------------
# 8.1 Sentido de la sentencia por sexo -----------------------------------------
# Limpieza de datos
df_sentencia_sexo <- df_medidas_cautelares %>%
group_by(anio_ing, sentencia, sexo) %>%
filter(sexo != "No especificado") %>%
summarize(total = n()) %>%
ungroup() %>%
group_by(anio_ing, sexo) %>%
mutate(denomin = sum(total, na.rm = T),
porcent = round(total / denomin * 100, 1))
# Visualización
ggplot(df_sentencia_sexo) +
geom_area(aes(x = as.integer(anio_ing), y = porcent, fill=sentencia), size=2.5) +
labs(title = "Sentido de la sentencia",
subtitle = "Por año y sexo de la persona sentenciada \n", y = "\n Porcentaje \n", x="",
caption = leyenda,
fill ="Delitos:") +
scale_fill_manual(values = fill_default) +
scale_x_continuous(breaks=seq(from=2011, to=2019, by=1)) +
scale_y_continuous(breaks=seq(from=0, to=100, by=10)) +
tema +
facet_wrap(~sexo) +
theme(axis.text.x = element_text(angle = 0, hjust = .5, vjust = .5)) +
coord_cartesian(ylim = c(0, 100))+
theme(legend.position = "right")
# Guardar visualización
ggsave(paste(out, "1 sentido sexo.png", sep = "/"), width = 20, height = 16)
# 8.2 Delitos de las personas condenadas ---------------------------------------
# Limpieza de datos
df_delitos_condenadas <- df_medidas_cautelares %>%
group_by(anio_ing, sentencia, delitos_cortos, sexo) %>%
filter(sexo != "No especificado") %>%
filter(sentencia == "Condenatoria") %>%
summarize(total = n()) %>%
ungroup() %>%
group_by(anio_ing, sexo) %>%
mutate(denomin = sum(total, na.rm = T),
porcent = round(total / denomin * 100, 1))
# Visualización
ggplot(df_delitos_condenadas) +
geom_area(aes(x = as.integer(anio_ing), y = porcent, fill=delitos_cortos), size=2.5) +
labs(title = "Delitos de las personas condenadas",
subtitle = "Por año y sexo de la persona condenada \n", y = "\n Porcentaje \n", x="",
caption = leyenda,
fill ="Delitos:") +
scale_fill_manual(values = fill_default) +
scale_x_continuous(breaks=seq(from=2011, to=2019, by=1)) +
scale_y_continuous(breaks=seq(from=0, to=100, by=10)) +
tema +
facet_wrap(~sexo) +
theme(axis.text.x = element_text(angle = 0, hjust = .5, vjust = .5)) +
coord_cartesian(ylim = c(0, 100))+
theme(legend.position = "right")
# Guardar visualización
ggsave(paste(out, "1 condena sexo.png", sep = "/"), width = 20, height = 16)
# 8.3 Personas condenadas por sexo ---------------------------------------------
# Limpieza de datos
df_condenadas_sexo <- sentenciados %>%
group_by(anio_ing, sentencia, delitos_cortos, sexo) %>%
filter(sexo != "No especificado") %>%
filter(sentencia == "Condenatoria") %>%
summarize(total = n()) %>%
ungroup() %>%
group_by(anio_ing, delitos_cortos) %>%
mutate(denomin = sum(total, na.rm = T),
porcent = round(total / denomin * 100, 1))
# Visualización
ggplot(porano) +
geom_area(aes(x = as.integer(anio_ing), y = porcent, fill=sexo), size=2.5) +
labs(title = "Sexo de las personas condenadas en la CDMX",
subtitle = "Por año y por delito \n", y = "\n Porcentaje \n", x="",
caption = leyenda,
fill ="Sexo de la persona condenada:") +
scale_fill_manual(values = fill_default) +
scale_x_continuous(breaks=seq(from=2011, to=2019, by=1)) +
scale_y_continuous(breaks=seq(from=0, to=100, by=10)) +
tema +
facet_wrap(~delitos_cortos) +
theme(axis.text.x = element_text(angle = 90, hjust = .5, vjust = .5)) +
coord_cartesian(ylim = c(0, 100))+
theme(legend.position = "right")
# Guardar visualización
ggsave(paste(out, "condena sexo por año por delito.png", sep = "/"), width = 20, height = 16)
# Fin del código #
|
c6eced4e71e9c64e5f786743267a8b1714b1a081
|
e49fb88b541ac83a3dadb10deaff64e1772dabac
|
/03_getting_and_cleaning_data/quiz02.R
|
230a9bdf3a726a45c89c5d1d47334d639f06c80b
|
[] |
no_license
|
Cardosaum/data_science_specialization_jhu
|
61652b9e4a27a0f716b1f822de28650912e33d29
|
7268ddee814ff1afc90c5e88b382a1afb196d172
|
refs/heads/master
| 2022-12-21T11:14:57.892640
| 2020-09-06T23:22:22
| 2020-09-06T23:22:22
| 257,573,104
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,414
|
r
|
quiz02.R
|
# Quiz, week 2
## Let's set the cwd correctly first. (Using as reference the directory containing the .Rproj file)
setwd("./03_getting_and_cleaning_data")
## Now we load the libraries
library(data.table, quietly = TRUE)
library(tidyverse, quietly = TRUE)
library(RMariaDB, quietly = TRUE)
library(xml2, quietly = TRUE)
library(readxl, quietly = TRUE)
## helper functions
### check if file exists. if don't, download it
fileDownload <- function(fileUrl, fileName){
if (!file.exists(fileName)){
download.file(fileUrl, fileName, "curl")
}
}
# Q1
## Download file
fileDownload("https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2Fss06pid.csv", "./data/american_communit_survey_quiz2.csv")
acs <- read_csv("./data/american_communit_survey_quiz2.csv")
# the package required for this question is outdated...
# Q4
## Download file
fileDownload("http://biostat.jhsph.edu/~jleek/contact.html", "./data/contact.html")
contactFile <- readLines("./data/contact.html")
numberOfLines <- contactFile[c(10, 20, 30, 100)] %>% nchar()
print(numberOfLines)
# Q5
fileDownload("https://d396qusza40orc.cloudfront.net/getdata%2Fwksst8110.for", "./data/sst_data.txt")
### credit for this line: https://stackoverflow.com/questions/14383710/read-fixed-width-text-file
sstData <- read_fwf("./data/sst_data.txt", skip = 4, fwf_widths(c(12, 7, 4, 9, 4, 9, 4, 9, 4)))
sumForth <- sstData[[4]] %>% sum()
print(sumForth)
|
9e7f9f341e00f68944e0df029b9ac524e8ee5446
|
efbdac03439077f3d3059c4ad0aef4e67a63d108
|
/Sport Analytics/Project/R/get_discrete_plot.R
|
1e7cb176f4654acfd89adc6fa5600e561b9ab6d6
|
[] |
no_license
|
anhnguyendepocen/LiU-Statistics-and-Machine-Learning
|
afc8811dacad80ca4e0e7948cbfb4f455f7e5148
|
47067c552a8071753526971dd105730a8683f735
|
refs/heads/master
| 2023-01-20T11:03:16.186847
| 2020-11-13T11:58:11
| 2020-11-13T11:58:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 15,194
|
r
|
get_discrete_plot.R
|
get_discrete_plot <- function(game_shots = choose_data(), orientation) {
#----------------------------get_colours---------------------
den_color <- c("#C2FFC8","#03FF03")
colors_points <- c("goal" = "#eb1010",
"successful" = "#10EA10",
"failed" = "#1010EA")
get_colors_discrete <- function(level_list){
level_list <- level_list - min(level_list)
palette <- colorRampPalette(den_color)(floor(max(level_list*100))+1)
cols_needed <- floor(level_list*100) +1
cols <- palette[cols_needed]
names(cols) <- level_list
return(cols)
}
#----------------------------nshots---------------------
# x_boundary <- 0
# y_boundary <- 50
# n_shots <- c(nrow(game_shots[which((game_shots$yAdjCoord<y_boundary) & (game_shots$xAdjCoord<x_boundary)),]),
# nrow(game_shots[which((game_shots$yAdjCoord>y_boundary) & (game_shots$xAdjCoord<x_boundary)),]),
# nrow(game_shots[which((game_shots$yAdjCoord<y_boundary) & (game_shots$xAdjCoord>x_boundary)),]),
# nrow(game_shots[which((game_shots$yAdjCoord>y_boundary) & (game_shots$xAdjCoord>x_boundary)),]))
#
if(length(game_shots[,1]) > 0){
if(orientation == 'Vertical'){
n_shots <- rep(0,9)
angles <- c(90, 78.69007, 52.43141, 35.62294, 0)
for(i in 1:length(game_shots$xAdjCoord)){
if(game_shots$yAdjCoord[i] < 25.5){
n_shots[9] <- n_shots[9] +1
}
else{
if(game_shots$xAdjCoord[i] > 0){
angle <- atan2(3 + game_shots$xAdjCoord[i],89-game_shots$yAdjCoord[i]) * 180 /pi
print(angle)
for(j in 1:4){
if(angle <= angles[j] && angle >= angles[j+1]){
n_shots[j] <- n_shots[j]+1
}
}
}
else{
angle <- atan2(3 - game_shots$xAdjCoord[i],89-game_shots$yAdjCoord[i]) * 180 /pi
for(j in 1:4){
if(angle <= angles[j] && angle >= angles[j+1]){
n_shots[j+4] <- n_shots[j+4]+1
}
}
}
}
}
n_shots_perc <- n_shots/sum(n_shots)
print(n_shots)
print(n_shots_perc)
cols <- get_colors_discrete(n_shots_perc)
lay <- list.append(rink_shapes,list(type = 'path',
path = ' M 0,89 L 42.5,89 L 42.5,80.9 Z',
fillcolor = cols[[1]],line = list(color = 'rgba(0,0,0,0.3)'), opacity = 0.3))
lay <- list.append(lay,list(type = 'path',
path = ' M 0,89 L 42.5,80.9 L 42.5,54 Z',
fillcolor = cols[[2]],line = list(color = 'rgba(0,0,0,0.3)'), opacity = 0.3))
lay <- list.append(lay,list(type = 'path',
path = ' M 0,89 L 42.5,54 L 42.5,25.5 Z',
fillcolor = cols[[3]],line = list(color = 'rgba(0,0,0,0.3)'), opacity = 0.3))
lay <- list.append(lay,list(type = 'path',
path = ' M 0,89 L 42.5,25.5 L 0,25.5 Z',
fillcolor = cols[[4]],line = list(color = 'rgba(0,0,0,0.3)'), opacity = 0.3))
lay <- list.append(lay,list(type = 'path',
path = ' M 0,89 L -42.5,89 L -42.5,80.9 Z',
fillcolor = cols[[5]],line = list(color = 'rgba(0,0,0,0.3)'), opacity = 0.3))
lay <- list.append(lay,list(type = 'path',
path = ' M 0,89 L -42.5,80.9 L-42.5,54 Z',
fillcolor = cols[[6]],line = list(color = 'rgba(0,0,0,0.3)'), opacity = 0.3))
lay <- list.append(lay,list(type = 'path',
path = ' M 0,89 L -42.5,54 L -42.5,25.5 Z',
fillcolor = cols[[7]],line = list(color = 'rgba(0,0,0,0.3)'), opacity = 0.3))
lay <- list.append(lay,list(type = 'path',
path = ' M 0,89 L -42.5,25.5 L 0,25.5 Z',
fillcolor = cols[[8]],line = list(color = 'rgba(0,0,0,0.3)'), opacity = 0.3))
lay <- list.append(lay,list(type = 'path',
path = ' M -42.5,25.5 L42.5,25.5 L42.5,0 L-42.5,0 Z',
fillcolor = cols[[9]],line = list(color = 'rgba(0,0,0,0.3)'), opacity = 0.3))
p <- plot_ly(x = game_shots$xAdjCoord, y = game_shots$yAdjCoord) %>%
layout(shapes = lay,
xaxis = list(title = list(text ="")),
yaxis = list(title = list(text = ""), #visible=F),
scaleanchor = "x", scaleratio = 0.9))%>%
add_trace(color = game_shots$outcome, colors = colors_points,
type = "scatter", mode = 'markers',
text = game_shots$hover_info, hoverinfo="text",
opacity = 0.8,
marker = list(sizeref=0.7, sizemode="area",
line = list(color='black', width = 1))) %>%
add_text(x = 52, y = 85, showlegend = F,
text = paste("Shots:\n", n_shots[1], "; ",
floor(abs(n_shots_perc[1])*100),'%',sep = ""),
textfont = list(color = '#000000', size = 12, family = 'sans serif'),
hoverinfo = "skip") %>%
add_text(x = 52, y = 68, showlegend = F,
text = paste("Shots:\n", n_shots[2], "; ",
floor(abs(n_shots_perc[2])*100),'%',sep = ""),
textfont = list(color = '#000000', size = 12, family = 'sans serif'),
hoverinfo = "skip") %>%
add_text(x = 52, y = 40, showlegend = F,
text = paste("Shots:\n", n_shots[3], "; ",
floor(abs(n_shots_perc[3])*100),'%',sep = ""),
textfont = list(color = '#000000', size = 12, family = 'sans serif'),
hoverinfo = "skip") %>%
add_text(x = 18, y = 40, showlegend = F,
text = paste("Shots:\n", n_shots[4], "; ",
floor(abs(n_shots_perc[4])*100),'%',sep = ""),
textfont = list(color = '#000000', size = 12, family = 'sans serif'),
hoverinfo = "skip") %>%
add_text(x = -52, y = 85, showlegend = F,
text = paste("Shots:\n", n_shots[5], "; ",
floor(abs(n_shots_perc[5])*100),'%',sep = ""),
textfont = list(color = '#000000', size = 12, family = 'sans serif'),
hoverinfo = "skip") %>%
add_text(x = -52, y = 68, showlegend = F,
text = paste("Shots:\n", n_shots[6], "; ",
floor(abs(n_shots_perc[6])*100),'%',sep = ""),
textfont = list(color = '#000000', size = 12, family = 'sans serif'),
hoverinfo = "skip") %>%
add_text(x = -52, y = 40, showlegend = F,
text = paste("Shots:\n", n_shots[7], "; ",
floor(abs(n_shots_perc[7])*100),'%',sep = ""),
textfont = list(color = '#000000', size = 12, family = 'sans serif'),
hoverinfo = "skip") %>%
add_text(x = -18, y = 40, showlegend = F,
text = paste("Shots:\n", n_shots[8], "; ",
floor(abs(n_shots_perc[8])*100),'%',sep = ""),
textfont = list(color = '#000000', size = 12, family = 'sans serif'),
hoverinfo = "skip") %>%
add_text(x = 0, y = 10, showlegend = F,
text = paste("Shots:\n", n_shots[9], "; ",
floor(abs(n_shots_perc[9])*100),'%',sep = ""),
textfont = list(color = '#000000', size = 12, family = 'sans serif'),
hoverinfo = "skip")
}
else{
n_shots <- rep(0,9)
angles <- c(90, 78.69007, 52.43141, 35.62294, 0)
for(i in 1:length(game_shots$yAdjCoord)){
if(game_shots$xAdjCoord[i] < 25.5){
n_shots[9] <- n_shots[9] +1
}
else{
if(game_shots$yAdjCoord[i] > 0){
angle <- atan2(3 + game_shots$yAdjCoord[i],89-game_shots$xAdjCoord[i]) * 180 /pi
print(angle)
for(j in 1:4){
if(angle <= angles[j] && angle >= angles[j+1]){
n_shots[j] <- n_shots[j]+1
}
}
}
else{
angle <- atan2(3 - game_shots$yAdjCoord[i],89-game_shots$xAdjCoord[i]) * 180 /pi
for(j in 1:4){
if(angle <= angles[j] && angle >= angles[j+1]){
n_shots[j+4] <- n_shots[j+4]+1
}
}
}
}
}
n_shots_perc <- n_shots/sum(n_shots)
print(n_shots)
print(n_shots_perc)
cols <- get_colors_discrete(n_shots_perc)
lay <- list.append(rink_shapes,list(type = 'path',
path = ' M 89,0 L 89,42.5 L 80.9,42.5 Z',
fillcolor = cols[[1]],line = list(color = 'rgba(0,0,0,0.3)'), opacity = 0.3))
lay <- list.append(lay,list(type = 'path',
path = ' M 89,0 L 80.9,42.5 L 54,42.5 Z',
fillcolor = cols[[2]],line = list(color = 'rgba(0,0,0,0.3)'), opacity = 0.3))
lay <- list.append(lay,list(type = 'path',
path = ' M 89,0 L 54,42.5 L 25.5,42.5 Z',
fillcolor = cols[[3]],line = list(color = 'rgba(0,0,0,0.3)'), opacity = 0.3))
lay <- list.append(lay,list(type = 'path',
path = ' M 89,0 L 25.5,42.5 L 25.5,0 Z',
fillcolor = cols[[4]],line = list(color = 'rgba(0,0,0,0.3)'), opacity = 0.3))
lay <- list.append(lay,list(type = 'path',
path = ' M 89,0 L 89,-42.5 L 80.9,-42.5 Z',
fillcolor = cols[[5]],line = list(color = 'rgba(0,0,0,0.3)'), opacity = 0.3))
lay <- list.append(lay,list(type = 'path',
path = ' M 89,0 L 80.9,-42.5 L 54,-42.5 Z',
fillcolor = cols[[6]],line = list(color = 'rgba(0,0,0,0.3)'), opacity = 0.3))
lay <- list.append(lay,list(type = 'path',
path = ' M 89,0 L 54,-42.5 L 25.5,-42.5 Z',
fillcolor = cols[[7]],line = list(color = 'rgba(0,0,0,0.3)'), opacity = 0.3))
lay <- list.append(lay,list(type = 'path',
path = ' M 89,0 L 25.5,-42.5 L 25.5,0 Z',
fillcolor = cols[[8]],line = list(color = 'rgba(0,0,0,0.3)'), opacity = 0.3))
lay <- list.append(lay,list(type = 'path',
path = ' M 25.5,42.5 L25.5,-42.5 L0,-42.5 L0,42.5 Z',
fillcolor = cols[[9]],line = list(color = 'rgba(0,0,0,0.3)'), opacity = 0.3))
p <- plot_ly(x = game_shots$xAdjCoord, y = game_shots$yAdjCoord) %>%
layout(shapes = lay,
xaxis = list(title = list(text ="")),
yaxis = list(title = list(text = ""), #visible=F),
scaleanchor = "x", scaleratio = 0.9))%>%
add_trace(color = game_shots$outcome, colors = colors_points,
type = "scatter", mode = 'markers',
text = game_shots$hover_info, hoverinfo="text",
opacity = 0.8,
marker = list(sizeref=0.7, sizemode="area",
line = list(color='black', width = 1))) %>%
add_text(y = -52, x = 88, showlegend = F,
text = paste("Shots:\n", n_shots[1], "; ",
floor(abs(n_shots_perc[1])*100),'%',sep = ""),
textfont = list(color = '#000000', size = 12, family = 'sans serif'),
hoverinfo = "skip") %>%
add_text(y = -52, x = 65, showlegend = F,
text = paste("Shots:\n", n_shots[2], "; ",
floor(abs(n_shots_perc[2])*100),'%',sep = ""),
textfont = list(color = '#000000', size = 12, family = 'sans serif'),
hoverinfo = "skip") %>%
add_text(y = -52, x = 37, showlegend = F,
text = paste("Shots:\n", n_shots[3], "; ",
floor(abs(n_shots_perc[3])*100),'%',sep = ""),
textfont = list(color = '#000000', size = 12, family = 'sans serif'),
hoverinfo = "skip") %>%
add_text(y = -18, x = 40, showlegend = F,
text = paste("Shots:\n", n_shots[4], "; ",
floor(abs(n_shots_perc[4])*100),'%',sep = ""),
textfont = list(color = '#000000', size = 12, family = 'sans serif'),
hoverinfo = "skip") %>%
add_text(y = 52, x = 88, showlegend = F,
text = paste("Shots:\n", n_shots[5], "; ",
floor(abs(n_shots_perc[5])*100),'%',sep = ""),
textfont = list(color = '#000000', size = 12, family = 'sans serif'),
hoverinfo = "skip") %>%
add_text(y = 52, x = 65, showlegend = F,
text = paste("Shots:\n", n_shots[6], "; ",
floor(abs(n_shots_perc[6])*100),'%',sep = ""),
textfont = list(color = '#000000', size = 12, family = 'sans serif'),
hoverinfo = "skip") %>%
add_text(y = 52, x = 37, showlegend = F,
text = paste("Shots:\n", n_shots[7], "; ",
floor(abs(n_shots_perc[7])*100),'%',sep = ""),
textfont = list(color = '#000000', size = 12, family = 'sans serif'),
hoverinfo = "skip") %>%
add_text(y = 18, x = 40, showlegend = F,
text = paste("Shots:\n", n_shots[8], "; ",
floor(abs(n_shots_perc[8])*100),'%',sep = ""),
textfont = list(color = '#000000', size = 12, family = 'sans serif'),
hoverinfo = "skip") %>%
add_text(y = 0, x = 10, showlegend = F,
text = paste("Shots:\n", n_shots[9], "; ",
floor(abs(n_shots_perc[9])*100),'%',sep = ""),
textfont = list(color = '#000000', size = 12, family = 'sans serif'),
hoverinfo = "skip")
}
}
else{
p <- plot_ly() %>%
layout(shapes = rink_shapes,
xaxis = list(title = list(text ="")),
yaxis = list(title = list(text = ""), #visible=F),
scaleanchor = "x", scaleratio = 0.9))
}
return(p)
}
|
fa51befefa4b9e454ec53103df6c1852f942770f
|
07a74984bf59ce4486e1bcaefafb8ce692b50d5a
|
/tests/testthat/test-layer_path.R
|
8ad78dc3fd4dd079dde557f163442b4a7739b34c
|
[] |
no_license
|
SymbolixAU/mapdeck
|
c3bc3a61b8d8ade69b9b67fa69a00f9294281630
|
6138c6845e37ab3479e4ff65d9b0fff29e20f070
|
refs/heads/master
| 2023-09-03T22:34:43.418728
| 2023-08-24T22:14:59
| 2023-08-24T22:14:59
| 141,350,341
| 344
| 50
| null | 2023-08-09T22:22:59
| 2018-07-17T22:06:34
|
HTML
|
UTF-8
|
R
| false
| false
| 2,906
|
r
|
test-layer_path.R
|
context("path")
test_that("add_path accepts multiple objects", {
# testthat::skip_on_cran()
#
# geo <- '[{"type":"Feature","properties":{"stroke_colour":"#440154FF","stroke_width":1.0,"dash_size":0.0,"dash_gap":0.0,"offset":0.0},"geometry":{"geometry":{"type":"LineString","coordinates":[[145.014291,-37.830458],[145.014345,-37.830574],[145.01449,-37.830703],[145.01599,-37.831484],[145.016479,-37.831699],[145.016813,-37.83175],[145.01712,-37.831742],[145.0175,-37.831667],[145.017843,-37.831559],[145.018349,-37.83138],[145.018603,-37.83133],[145.018901,-37.831301],[145.019136,-37.831301],[145.01943,-37.831333],[145.019733,-37.831377],[145.020195,-37.831462],[145.020546,-37.831544],[145.020641,-37.83159],[145.020748,-37.83159],[145.020993,-37.831664]]}}},{"type":"Feature","properties":{"stroke_colour":"#440154FF","stroke_width":1.0,"dash_size":0.0,"dash_gap":0.0,"offset":0.0},"geometry":{"geometry":{"type":"LineString","coordinates":[[145.015016,-37.830832],[145.015561,-37.831125],[145.016285,-37.831463],[145.016368,-37.8315],[145.016499,-37.831547],[145.016588,-37.831572],[145.01668,-37.831593],[145.01675,-37.831604],[145.016892,-37.83162],[145.016963,-37.831623],[145.017059,-37.831623],[145.017154,-37.831617],[145.017295,-37.831599],[145.017388,-37.831581],[145.017523,-37.831544],[145.018165,-37.831324],[145.018339,-37.831275],[145.018482,-37.831245],[145.018627,-37.831223],[145.01881,-37.831206],[145.018958,-37.831202],[145.019142,-37.831209],[145.019325,-37.831227],[145.019505,-37.831259],[145.020901,-37.831554],[145.020956,-37.83157]]}}}]'
# poly <- '[{"stroke_colour":"#440154FF","stroke_width":1.0,"dash_size":0.0,"dash_gap":0.0,"offset":0.0,"polyline":"hw{eFibbtZVIX]zCiHh@aBJcAA}@OkAUcAa@cBIs@E{@?m@D{@F{@P{ANeAHS?SLq@"},{"stroke_colour":"#440154FF","stroke_width":1.0,"dash_size":0.0,"dash_gap":0.0,"offset":0.0,"polyline":"ty{eFyfbtZx@mBbAoCFOFYDQBS@MB[?M?QASC[AQG[k@_CIa@E]C[Ce@?[?e@Bc@Dc@z@wGBI"}]'
#
# ## sf
# set_token("abc")
# m <- mapdeck()
#
# df <- sfheaders::sf_to_df( roads )
# df <- df[ df$linestring_id %in% c(1,2), ]
# sf <- sfheaders::sf_linestring( df, linestring_id = "linestring_id", x = "x", y = "y")
#
# p <- add_path(map = m, data = sf)
# expect_equal( as.character( p$x$calls[[1]]$args[[2]] ), geo )
#
# ## sfencoded
# enc <- googlePolylines::encode( sf )
# p <- add_path( map = m, data = enc )
# expect_equal( as.character( p$x$calls[[1]]$args[[2]] ), poly )
#
# ## sfencodedLite
# enc <- googlePolylines::encode( sf, strip = T )
# p <- add_path( map = m, data = enc )
# expect_equal( as.character( p$x$calls[[1]]$args[[2]] ), poly )
#
# ## data.frame with polyline
# df <- as.data.frame( enc )
# df$geometry <- unlist( df$geometry )
#
# p <- add_path( map = m, data = df, polyline = "geometry" )
# expect_equal( as.character( p$x$calls[[1]]$args[[2]] ), poly )
#
# ## data.frame - not supported for LINESTRINGS
})
|
c01f33e852e8c36ed15b94a4e51c7c3ac8bfec05
|
b5f93df7ebaaa7e326dec711c55cd9519ce63a71
|
/enetLTS_UPDATED/subsample_sharing.R
|
fd1b2feb8e7a0cfc6a2563c27febb2c899deaaba
|
[] |
no_license
|
VincentWtrs/Sparse_Robust_Logistic_Regression
|
822a700b387a4d66e3c6e0f5a2bdf43eed37c49e
|
ad0212cb3e0a6ac7a245c482c66058edd9cdc603
|
refs/heads/master
| 2021-03-22T03:25:31.737217
| 2019-08-13T17:49:40
| 2019-08-13T17:49:40
| 123,120,927
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,573
|
r
|
subsample_sharing.R
|
index_comparison <- function(indexall){
### index_comparison() FUNCTION: takes the indexall object from the warmCsteps() function and compares them to see if there is a difference
# Extracting dimension sizes
h <- dim(indexall)[1]
length_lambda <- dim(indexall)[2]
length_alpha <- dim(indexall)[3]
# Sorting
for(l in 1:length_lambda){
for(a in 1:length_alpha){
indexall[, l, a] <- sort(indexall[, l, a])
}
}
index_dfrm <- vector("list", length = length_alpha)
uniques <- vector("list", length = length_alpha)
for(a in 1:length_alpha){
index_dfrm[[a]] <- matrix(NA, nrow = length_lambda, ncol = h) # Rows for lambda, for each position a column
for(l in 1:length_lambda){
index_dfrm[[a]][l, ] <- indexall[, l, a]
}
# Gathering the unique best subsets
uniques[[a]] <- unique(index_dfrm[[a]]) # Each row per list element is a unique row
}
### ASSIGNING FOR EACH ALPHA VALUE (LIST), FOR EACH LAMBDA VALUE (POSITION WITHIN THE LIST) THE SUBSAMPLE
identicals <- vector("list", length = length_alpha)
for(a in 1:length_alpha){
identicals[[a]] <- logical(length = length_lambda)
for(i in 1:nrow(uniques[[a]])){ # nrow not length (then it takes rowlength times rows e.g 2 x 75 = 150! (subscript out of bounds))
for(l in 1:length_lambda){
if(all(uniques[[a]][i, ] == index_dfrm[[a]][l, ])){
identicals[[a]][l] <- i # We just assign the current row number
}
}
}
}
# THIS IS IT!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
return(identicals)
}
|
6c399ca0414d89a2f2beaf1fd6b92ff4b150138e
|
c182fa25d27f0f292332620f8240bd26e014a07a
|
/analysis/occupancy/src/setup.R
|
7c8cc92aa6c77704f61fbe32ef762d9b86f4c382
|
[] |
no_license
|
Magaiarsa/intFlex
|
14978e361120d22788ffe5d2f631933e32353481
|
41d18084cb8dfbb92cde416cddbd020c567d01a2
|
refs/heads/main
| 2023-02-24T03:00:01.292007
| 2021-02-01T10:29:11
| 2021-02-01T10:29:11
| 325,310,689
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,665
|
r
|
setup.R
|
## add making data null for each model type
setup <- function(type, input) {
if (type == "partner") {
print("partner setup")
input$monitors <- c("phi.0",
"gam.0",
"phi.fra",
"gam.fra",
"phi.var",
"gam.var")
vars <- c("X", "day", "day.2", "fra", "var.partner")
input$data <- input$data[names(input$data)[grepl(paste(vars, collapse = "|"), names(input$data))]]
remove.inits <- c("cnodf", "role")
input$inits <- input$inits[!grepl(paste(remove.inits, collapse = "|"), names(input$inits))]
names(input$inits)[grep("phi.var.partner", names(input$inits))] <- "phi.var"
names(input$inits)[grep("gam.var.partner", names(input$inits))] <- "gam.var"
}
if (type == "all") {
print("all setup")
#input$data$degree <-NULL
vars <- c("X", "day", "day.2", "fra", "cnodf", "role", "partner")
remove.inits <- c("degree")
input$inits <- input$inits[!grepl(paste(remove.inits, collapse = "|"), names(input$inits))]
}
if (type == "role" | type == "cnodf") {
input$monitors <- c(
"phi.0",
"gam.0",
"phi.fra",
"gam.fra",
"phi.var",
"phi.mean",
"gam.var",
"gam.mean"
)
if (type == "role") {
print("role setup")
vars <- c("X", "day", "day.2", "fra", "role")
input$data <- input$data[names(input$data)[grepl(paste(vars, collapse = "|"), names(input$data))]]
remove.inits <- c("partner", "cnodf")
input$inits <- input$inits[!grepl(paste(remove.inits, collapse = "|"), names(input$inits))]
names(input$inits)[grep("phi.var.role", names(input$inits))] <- "phi.var"
names(input$inits)[grep("phi.mean.role", names(input$inits))] <- "phi.mean"
names(input$inits)[grep("gam.var.role", names(input$inits))] <- "gam.var"
names(input$inits)[grep("gam.mean.role", names(input$inits))] <- "gam.mean"
} else{
print("cnodf setup")
vars <- c("X", "day", "day.2", "fra", "cnodf")
input$data <-
input$data[names(input$data)[grepl(paste(vars, collapse = "|"), names(input$data))]]
remove.inits <- c("partner", "role")
input$inits <- input$inits[!grepl(paste(remove.inits, collapse = "|"), names(input$inits))]
names(input$inits)[grep("phi.var.cnodf", names(input$inits))] <- "phi.var"
names(input$inits)[grep("phi.mean.cnodf", names(input$inits))] <- "phi.mean"
names(input$inits)[grep("gam.var.cnodf", names(input$inits))] <- "gam.var"
names(input$inits)[grep("gam.mean.cnodf", names(input$inits))] <- "gam.mean"
}
}
return(input)
}
|
7a2d558e2132c21a301418700f21793c03221bba
|
ba01ed5947c6ab6c988097d07fa37ee4ad7bf533
|
/man/rhone.Rd
|
f0af1513d541e5e8d5de36548240451ee97387cb
|
[] |
no_license
|
aursiber/adedata
|
63b3380454add630df69afe14967c708949405b0
|
20147d003784425b2a20fd42e3b1f21c41b375e1
|
refs/heads/master
| 2021-12-13T13:26:44.390094
| 2017-03-21T15:24:27
| 2017-03-21T15:24:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,293
|
rd
|
rhone.Rd
|
\name{rhone}
\alias{rhone}
\docType{data}
\title{Physico-Chemistry Data}
\description{
This data set gives for 39 water samples a physico-chemical description
with the number of sample date and the flows of three tributaries.
}
\usage{data(rhone)}
\format{
\code{rhone} is a list of 3 components.
\describe{
\item{tab}{is a data frame with 39 water samples and 15 physico-chemical variables.}
\item{date}{is a vector of the sample date (in days).}
\item{disch}{is a data frame with 39 water samples and the flows of the three tributaries.}
}
}
\source{
Carrel, G., Barthelemy, D., Auda, Y. and Chessel, D. (1986)
Approche graphique de l'analyse en composantes principales normée : utilisation en hydrobiologie.
\emph{Acta Oecologica, Oecologia Generalis}, \bold{7}, 189--203.
}
\examples{
data(rhone)
if(requireNamespace("ade4", quietly = FALSE)) {
library(ade4)
pca1 <- ade4::dudi.pca(rhone$tab, nf = 2, scann = FALSE)
rh1 <- reconst(pca1, 1)
rh2 <- reconst(pca1, 2)
par(mfrow = c(4,4))
par(mar = c(2.6,2.6,1.1,1.1))
for (i in 1:15) {
plot(rhone$date, rhone$tab[,i])
lines(rhone$date, rh1[,i], lwd = 2)
lines(rhone$date, rh2[,i])
ade4:::scatterutil.sub(names(rhone$tab)[i], 2, "topright")
}
par(mfrow = c(1,1))
}
}
\keyword{datasets}
|
65b553cbdf214a7ad2c46f6fdd02224d28a1a5d2
|
2e697124393b5e2a22272a2bf87d77c73f0aeb90
|
/man/dataf.tecator.Rd
|
f36e78e2b1f02b0e69bd5b24abaa11eae509f4f2
|
[] |
no_license
|
cran/ddalpha
|
f083aedb09e1f010b14930761226121aeae3d79a
|
3755274a6ee666258351a01063ebfb311efbfbdb
|
refs/heads/master
| 2022-05-11T10:20:58.026649
| 2022-03-23T06:50:16
| 2022-03-23T06:50:16
| 17,695,418
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,966
|
rd
|
dataf.tecator.Rd
|
\name{dataf.tecator}
\alias{dataf.tecator}
\alias{tecator}
\docType{data}
\title{
Functional Data Set Spectrometric Data (Tecator)
}
\description{
This dataset is a part of the original one which can be found at
\url{https://www.stat.cmu.edu/}. For each peace of finely chopped meat we observe one spectrometric curve which
corresponds to the absorbance measured at 100 wavelengths.
The peaces are split according to Ferraty and Vieu (2006) into two classes: with small (<20) and large fat
content obtained by an analytical chemical processing.
}
\usage{
dataf.tecator()
}
\format{
The functional data as a data structure.
\describe{
\item{\code{dataf}}{
The functional data as a list of objects. Each object is characterized by two coordinates.
\describe{
\item{\code{args}}{\bold{wavelength} - a numeric vector of discretization points from 850 to 1050mm }
\item{\code{vals}}{\bold{absorbance} - a numeric vector of absorbance values}
}
}
\item{\code{labels}}{The classes of the objects: "small" (<20) and "large" fat content}
}
}
\author{
Febrero-Bande, M and Oviedo de la Fuente, Manuel
}
\source{
\url{https://www.stat.cmu.edu/}
}
\references{
Ferraty, F. and Vieu, P. (2006). \emph{Nonparametric functional data analysis: theory and practice}. Springer.
}
\seealso{
\code{\link{dataf.*}} for other functional data sets
\code{\link{plot.functional}} for building plots of functional data
}
\examples{
## load the dataset
dataf = dataf.tecator()
## view the classes
unique(dataf$labels)
## access the 5th point of the 2nd object
dataf$dataf[[2]]$args[5]
dataf$dataf[[2]]$vals[5]
## plot the data
\dontrun{
labels = unlist(dataf$labels)
plot(dataf,
xlab="Wavelengths", ylab="Absorbances",
main=paste("Tecator: < 20 red (", sum(labels == "small"), "),",
" >= 20 blue (", sum(labels == "large"), ")", sep=""),
colors = c("blue", "red"))
}
}
\keyword{datasets}
\keyword{functional}
|
4a48a35032675bc76608972ba3a32669acad0838
|
fdc9f8c5273f456c82a29b62878eb160c95a8789
|
/plot1.R
|
1e2ca817c3638b85e31e3f8eab9e9d2ea699666c
|
[] |
no_license
|
cris1403/EDA-project
|
ea61743d7bb2c3c7c20f50c78f08a644de8d36fa
|
92aa06f2bfb7f7dd3c7d6493d4f210cec8f7f08c
|
refs/heads/master
| 2016-09-05T21:15:40.295918
| 2014-10-28T14:13:38
| 2014-10-28T14:13:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,286
|
r
|
plot1.R
|
#################################################################################################
# Course Project 2 - Question 1
# Have total emissions from PM2.5 decreased in the United States from 1999 to 2008?
# Using the base plotting system, make a plot showing the total PM2.5 emission from all sources
# for each of the years 1999, 2002, 2005, and 2008.
#
# I used a bar plot instead of a line graph because - having one value every 3 years - I don't
# consider the Emissions variable as continous. Emissions clearly went down.
#################################################################################################
rm(list=ls())
if (length(setdiff("plyr", rownames(installed.packages()))) > 0) {
install.packages(setdiff("plyr", rownames(installed.packages())))
}
NEI = readRDS("summarySCC_PM25.rds")
# total emissions by year
dataset = as.data.frame(ddply(NEI, ~ year, summarise, tot=sum(Emissions)))
# create the plot using the base plotting system
png(filename="plot1.png", width = 800, height = 800)
barplot(dataset$tot/1000, col="orange",
main="Total US "~PM[2.5]~" Emissions", xlab="Year", ylab="Emissions (thousands of tons)",
cex.lab=1.3, cex.axis=1, cex.main=1.4,
names.arg = c("1999", "2002", "2005", "2008"))
dev.off()
|
466c26f93382c8e78cecd2765fc11e47e18d95d1
|
035d7e1721cc68aaa80111de35bfd7c47f48ce80
|
/run_analysis.R
|
8dd638d8e357724e7dd9d8ea08bdeee28e5f6cfe
|
[] |
no_license
|
Maca944/GetData-Assignment
|
4af277859a87601ab33dfcb8009829a4b7920ee8
|
e244e45e628acdf07c4d3a9fd1d9b383669eee4c
|
refs/heads/master
| 2020-05-25T11:38:28.590067
| 2014-09-17T14:31:31
| 2014-09-17T14:31:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,597
|
r
|
run_analysis.R
|
###############################################################################################################################
## Coursera Getting and Cleaning Data
## Course Project
## Maarten Caminada
##
## Note: I'm happy with the outcome, although I realize there are more ways of achieving this, and probably
## easier and more elegant ones.
## I understand the idea behind this project is to come up with a model to predict the activity type based
## on signals from the accelerometer and gyroscope from a smartphone. I simply selected all the columns that
## contain 'mean' or 'std'. For a predictive model I might have chosen different columns.
##
## Thanks for reviewing me.
##
###############################################################################################################################
# change the default directory
setwd("c:/maca/Rdata/GetData-Assignment/UCI Har Dataset/")
#load the dplyr pacakge (which is already installed)
library(dplyr)
# read all the text files and put them in memory
Features <- data.frame(read.table("features.txt", stringsAsFactors = FALSE)[,2])
Activity <- read.table("./activity_labels.txt")
TrainData <- read.table("./train/X_train.txt")
TrainLabels <- read.table("./train/y_train.txt")
TrainSubjects <- read.table("./train/subject_train.txt")
TestData <- read.table("./test/X_test.txt")
TestLabels <- read.table("./test/y_test.txt")
TestSubjects <- read.table("./test/subject_test.txt")
# add the subjects and the labels to the data
Train <- cbind(TrainSubjects, TrainLabels, TrainData)
Test <- cbind(TestSubjects, TestLabels, TestData)
# Step 1: Merges the training and the test sets to create one data set
AllData <- rbind(Train, Test)
#add Subjects and Activity as column names
Header <- data.frame(c("Subjects", "ActivityNumber"), stringsAsFactors = FALSE)
# so we can rbind the two
names(Header) <- names(Features)
Features <- rbind(Header, Features)
#here the column names for the Data Frame are set
colnames(AllData) <- Features[,1]
colnames(Activity) <- c("ActivityNumber","ActivityDescription")
# 2. Extracts only the measurements on the mean and standard deviation for each measurement
AllData <- data.frame(cbind(AllData[,1:2]),
AllData[, grepl("mean()", names(AllData))],
AllData[, grepl("std()", names(AllData))])
# 3. Uses descriptive activity names to name the activities in the data set
AllData <- merge(Activity, AllData, by='ActivityNumber')
# get rid of the Activity Numbers, which are redundant imo
AllData$ActivityNumber <- NULL
# 4. Appropriately labels the data set with descriptive variable names
# Honestly, all the colnames are ugly, I don't have enough feeling with this field to make them pretty
names(AllData) <- gsub("std()","Stdev", names(AllData))
names(AllData) <- gsub("^(t)","Time", names(AllData))
names(AllData) <- gsub("^(f)","Freqdomainsignals", names(AllData))
names(AllData) <- gsub("Acc","Acceleration", names(AllData))
names(AllData) <- gsub("Mag","Magnitude", names(AllData))
names(AllData) <- gsub("[.]","",names(AllData))
# 5. From the data set in step 4, creates a second, independent tidy data set with the average
# of each variable for each activity and each subject
AllDataTbl <- tbl_df(AllData)
AllDataTbl <- group_by(AllDataTbl, Subjects, ActivityDescription)
Output <- summarise_each(AllDataTbl, funs(mean))
write.table(Output, file="TidyDataSet.txt", row.name=FALSE)
|
fa0d300695a09170d5e7a7a15a450e79a15ff826
|
5c5c2ca1037fcebb3cd48008c18d543b96c9e66a
|
/vizualization_scripts/plot_TE_ACR_perms.R
|
cda1f1530083921a49463ebbb2d03b9a3b36ce45
|
[] |
no_license
|
plantformatics/multispecies_TE_CRE_analysis
|
7ea05ffdf1cbed9b5059ef88a709fcfbe654845c
|
b1e6c163a3c4f467303d3f2cd93838b756bc5e58
|
refs/heads/master
| 2020-03-26T12:28:35.574746
| 2018-08-16T18:36:03
| 2018-08-16T18:36:03
| 144,894,427
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 865
|
r
|
plot_TE_ACR_perms.R
|
rm(list=ls())
setwd("~/Desktop/sapelo2_mnt/reference_genomes/Athaliana/")
library(scales)
ol <- 2328
a <- read.table("At.TE.perm.txt")
# plot parameters
den <- density(a$V1, from=min(a$V1), to=max(a$V1))
par(xaxp = c(min(ol,den$x),max(den$x,ol) , 4),
yaxp = c(min(den$y),max(den$y), 4))
plot(den, col=alpha("grey75",0.5),
xaxt="none",yaxt="none",
main="A.thaliana ACR transposon overlap",
xlab="Simulated overlap rate (10,000x)",
ylab="",
xlim=c(min(den$x,ol),max(den$x,ol)))
polygon(den, col=alpha("grey75",0.5), border=NA)
abline(v=ol, col="darkorchid", lwd=3)
minx <- round(min(ol,den$x), -2)
maxx <- round(max(ol,den$x), -2)
rangex <- as.integer((maxx-minx)/4)
miny <- round(min(den$y),3)
maxy <- round(max(den$y),3)
rangey <- (maxy-miny)/4
axis(1, at=seq(minx,maxx, by=rangex))
axis(2, at=seq(miny,maxy, by=rangey), las=1)
|
1a3d33c03154d1992a26de60a50738910d9b73c9
|
a3c78700a65f10714471a0d307ab984e8a71644d
|
/base/utils/man/status.Rd
|
5e5aace2ca38daa8ea9d4bced65a59d960540fb3
|
[
"NCSA",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
PecanProject/pecan
|
e42a8a6a0fc9c0bb624e0743ab891f6cf131ed3f
|
ce327b92bf14498fa32fcf4ef500a7a5db5c9c6c
|
refs/heads/develop
| 2023-08-31T23:30:32.388665
| 2023-08-28T13:53:32
| 2023-08-28T13:53:32
| 6,857,384
| 187
| 217
|
NOASSERTION
| 2023-09-14T01:40:24
| 2012-11-25T23:48:26
|
R
|
UTF-8
|
R
| false
| true
| 1,961
|
rd
|
status.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/status.R
\name{status}
\alias{status}
\alias{status.start}
\alias{status.end}
\alias{status.skip}
\alias{status.check}
\title{PEcAn workflow status tracking}
\usage{
status.start(name, file = NULL)
status.end(status = "DONE", file = NULL)
status.skip(name, file = NULL)
status.check(name, file = NULL)
}
\arguments{
\item{name}{one-word description of the module being checked or recorded,
e.g. "TRAIT", "MODEL", "ENSEMBLE"}
\item{file}{path to status file.
If NULL, taken from \code{settings} (see details)}
\item{status}{one-word summary of the module result, e.g. "DONE", "ERROR"}
}
\value{
For \code{status.start}, \code{status.end}, and \code{status.skip}: NULL, invisibly
For \code{status.check}, an integer:
0 if module not run, 1 if done, -1 if error
}
\description{
Records the progress of a PEcAn workflow by writing statuses and timestamps
to a STATUS file. Use these each time a module starts, finishes,
or is skipped.
}
\details{
All of these functions write to or read from a STATUS file in your run's
output directory. If the file is not specified in the call, they will look
for a \code{settings} object in the global environment and use
\verb{<settings$outdir>/STATUS} if possible.
Since the status functions may be called inside error-handling routines,
it's important that they not produce new errors of their own. Therefore
if the output file doesn't exist or is not writable, rather than complain
the writer functions (\code{status.start}, \code{status.end}, \code{status.skip}) will
print to the console and \code{status.check} will simply return 0.
}
\section{Functions}{
\itemize{
\item \code{status.start()}: Record module start time
\item \code{status.end()}: Record module completion time and status
\item \code{status.skip()}: Record that module was skipped
\item \code{status.check()}: Look up module status from file
}}
\author{
Rob Kooper
}
|
5f8dbfeda81fb8b5266a8e194fe6be657933513e
|
100412ee06fe63606e3c7b8c6b6ca73dd04170b0
|
/tests/testthat.R
|
1daf8f2e6aafab59ab80cbcdf76d32683860954b
|
[] |
no_license
|
gudaleon/nhdR
|
bdaf3b1eeaaa5a2a87cffe0be8ae972e8c91b123
|
acd4eaea025bc2a2b7d6d5f63785f3bdcfe0708f
|
refs/heads/master
| 2021-09-03T02:27:18.698124
| 2018-01-04T22:31:52
| 2018-01-04T22:31:52
| 118,405,164
| 1
| 1
| null | 2018-01-22T04:15:53
| 2018-01-22T04:15:53
| null |
UTF-8
|
R
| false
| false
| 52
|
r
|
testthat.R
|
library(testthat)
library(nhdR)
test_check("nhdR")
|
f0852f5e7fa7c6ffeccf60c77e2e5c87ccf6f167
|
c72cbc1e01cb8a7e25266bc2d0a18bfd197612ca
|
/my working files/r_built_in_functions.R
|
d9534c56547e38132f1b655ee750876ddedd4f88
|
[] |
no_license
|
psefton/r_portilla
|
e05ec0c8a22e6f286d49da926d6927ab981bd256
|
2391d93670d625319a3c9732730f34d803e4425b
|
refs/heads/master
| 2021-01-12T10:49:17.657241
| 2016-11-03T07:22:01
| 2016-11-03T07:22:01
| 72,718,616
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 809
|
r
|
r_built_in_functions.R
|
##built in R functions
#seq()
#sort()
#rev() -- reverse
#str() -- structure
#append() -- merge objects together
seq(0,100,2)
seq(0,100,10)
v<- c(1,4,7,2,13,3,11)
sort(v, decreasing = TRUE)
cv <- c('b', 'd', 'a')
sort(cv)
v<- 1:10
rev(v)
str(v)
v <- 1:10
v2 <- 35:40
v
v2
append(v,v2)
#check data type
# is.
# as.
v <- c(1,2,3)
is.vector(v)
v
as.list(v)
as.matrix(v)
#apply
sample(x = 1:100,3)
v <- c(1,2,3,4,5)
addrand <- function(x){
ran <- sample(1:100,1)
return(x+ran)
}
print(addrand(10))
v <- 1:10
sapply(v,addrand)
v<- 1:5
times2 <- function(num){
return(num*2)
}
result <- sapply(v,times2)
print(result)
#anonymous functions
sapply(v,function(num){num*2})
#apply with multiple inputs
add_choice <- function(num,choice){
return(num+choice)
}
sapply(v,add_choice, choice = 100)
|
38247292a1c476f2a48a156c9f677b07c5516640
|
1326e8d034496b76d9d5902b1b5aacca3aa58879
|
/sim_annealing_reheating_2opt_tsp.R
|
9a7dc66554f9c73d2de4612357e6554e473a4a31
|
[] |
no_license
|
phabee/tsp_scripts
|
1215c170422954adfe9ae97dc28dc823051f0f71
|
cb756ddccc2e4a33bec2fd7be97a8200d4963598
|
refs/heads/main
| 2023-01-23T06:47:14.101158
| 2020-12-09T22:37:55
| 2020-12-09T22:37:55
| 316,303,331
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,034
|
r
|
sim_annealing_reheating_2opt_tsp.R
|
#' apply simulated annealing to 2opt algorithm.
#'
#' @param tsp the tsp instance
#' @param dima the distance matrix
#' @param T0 initial temperature
#' @param alpha the temperature reduction factor per iteration
#'
#' @return the solution
#' @export
simulatedAnnealing <- function(tsp, dima, T0 = 1e2, alpha = 0.9) {
# code see slide 53 from week 10
# cur_sol <- getRandomTour(tsp = tsp)
set.seed(1234)
cur_sol <- constructNearestNeighborSolution(tsp, dima)
cur_dist <- calculateTourDistance(cur_sol, dima)
best_sol <- cur_sol
best_dist <- cur_dist
T <- T0
cnt_non_improving_subsequent_iterations <- 0
while(TRUE) {
# choose move randomly: pos 1, pos 2 for swapping with 2opt
tmp <- sort(sample(1:nrow(tsp),2, replace = FALSE))
pos1 <- tmp[1]
pos2 <- tmp[2]
# apply move
new_sol <- apply2opt(cur_sol, pos1, pos2)
new_dist <- calculateTourDistance(new_sol, dima)
dist <- new_dist - cur_dist
# if new sol is better, take it!
if (dist < 0) {
# improving moves are always allowed
cur_sol <- new_sol
cur_dist <- new_dist
} else {
# Generate random number in [0,1]
u <- runif(1)
if (exp(-dist/T) > u) {
# if Temp allows accepting bad value
cur_sol <- new_sol
cur_dist <- new_dist
}
}
if (cur_dist < best_dist) {
best_sol <- cur_sol
best_dist <- cur_dist
# reset unsuccessful iteration counter
cnt_non_improving_subsequent_iterations <- 0
cat("best sol: ", best_dist, "tour: ", best_sol, "\n")
} else {
# increase unsuccessful iteration counter
cnt_non_improving_subsequent_iterations <-
cnt_non_improving_subsequent_iterations +1
}
T <- T / (1 + alpha*T)
if (cnt_non_improving_subsequent_iterations > 300)
break
}
return(best_sol)
}
#' generate a random tour
#'
#' @param tsp the tsp instance
#'
#' @return a random tour
#' @export
getRandomTour <- function(tsp) {
tour <- sample(tsp$stop_id, size = nrow(tsp), replace = FALSE)
return(tour)
}
#' construction heuristic to create an initial tsp-solution based on nearest
#' neighbor starting from a given location
#'
#' @param tsp the tsp instance
#' @param dima the distance matrix
#' @param start_point_id starting point, where the tsp tour should start
#'
#' @return a valid tsp-tour starting at location start_point_id (not listing the
#' last position equal to first one, this is considered in the tour-distance
#' calculation function
constructNearestNeighborSolution <- function(tsp, dima, start_point_id = -1) {
t <- c()
if (start_point_id < 1) {
start_point_id <- sample(tsp$stop_id, 1)
} else {
start_point_id <- tsp$stop_id[start_point_id]
}
tsp_ids <- tsp$stop_id
cur_id <- tsp$stop_id[start_point_id]
repeat {
# insert stop cur_id in tour t
t <- c(t, cur_id)
# remove cur_id from available unvisited stops in tsp_ids
tsp_ids <- tsp_ids[tsp_ids != cur_id, drop = FALSE]
best_stop <- -1
best_dist <- Inf
if (length(tsp_ids) != 0) {
# check all left stops to find the nearest
for (potential_next_id in tsp_ids) {
cur_dist <- getDimaDist(fromLoc = cur_id, toLoc = potential_next_id,
dima = dima)
if (cur_dist < best_dist) {
best_dist <- cur_dist
best_stop <- potential_next_id
}
}
cur_id <- best_stop
} else {
break
}
}
return(t)
}
#' berechnet eine einzelne 2opt nachbarschaft für eine gegebene TSP-Lösung indem
#' die alle Knoten zwischen Node-ID firstNodeId und secondNodeId umgekehrt werden.
#' (Siehe dazu Slide 95 aus der Vorlesung)
#'
#' @param tsp_node_sequence Vektor von TSP Knoten-IDs
#' @param firstNodeId NodeId des ersten Knotens
#' @param secondNodeId NodeId des zweiten Knotens
#'
#' @return eine einzelne neue TSP-Lösung, die der 2opt Nachbarschaft mit den
#' gegebenen input-Parametern entspricht
apply2opt <- function(tsp_node_sequence, firstNodeId, secondNodeId) {
# validiere die werte
num_nodes <- length(tsp_node_sequence)
ret_val <- c()
# mühsame Fallunterscheidung in R (Subsetting nicht konsistent: a[,1:0]) ist
# leider nicht leer, sondern liefert immer 1 Spalte. Daher Fallunterscheidung nötig
# a) alle Nodes vor dem startknoten übernehmen
if (firstNodeId > 1) {
ret_val <- c(ret_val, tsp_node_sequence[1:(firstNodeId-1)])
}
# b) alle knoten zwischen firstNodeId und secondNodeId umkehren
ret_val <- c(ret_val, tsp_node_sequence[rev(firstNodeId:secondNodeId)])
# c) alle knoten nach letztem knoten übernehmen
if (secondNodeId < num_nodes) {
ret_val <- c(ret_val, tsp_node_sequence[(secondNodeId + 1):num_nodes])
}
return(ret_val)
}
#' render a simulated annealing tour by first initializing the dima
#'
#' @param tsp
#'
#' @return the tour
#' @export
renderTour <- function(tsp) {
# build new or load existing distance-matrix
dima <- calculateDima(tsp)
tour <- simulatedAnnealing(tsp = tsp, dima = dima)
return(tour)
}
#' calculate the distance-matrix
#'
#' @param tsp the tsp instance
#'
#' @return the dima
#' @export
calculateDima <- function(tsp) {
dima.dt <- data.table::data.table(loc_from = character(0),
loc_to = character(0),
dist = numeric(0), stringsAsFactors = FALSE)
n <- nrow(tsp)
# since we don't have a matrix but rather a lookup-table, we need to keep
# track of the row-id of the 'dima'
for (from in 1:n) {
fromId <- tsp[from,]$stop_id
for (to in from:n) {
toId <- tsp[to,]$stop_id
lat1 <- tsp[from,]$lat
lng1 <- tsp[from,]$lng
lat2 <- tsp[to,]$lat
lng2 <- tsp[to,]$lng
result <- sqrt((lat2-lat1)^2 + (lng2-lng1)^2)
dima.dt <- rbind(dima.dt, data.table::data.table(loc_from = fromId,
loc_to = toId, dist = result, stringsAsFactors = FALSE))
}
# now set keys on dima
data.table::setkey(dima.dt, loc_from, loc_to)
}
return(dima.dt)
}
#' calculate total tour distance
#'
#' @param tour the tour as a sequence of stopIds
#' @param dima the distance matrix
#'
#' @return the total tsp-distance
#' @export
calculateTourDistance <- function(tour, dima) {
dist <- 0.0
for (i in 2:length(tour)) {
a <- tour[i-1]
b <- tour[i]
dist <- dist + getDimaDist(fromLoc = a, toLoc = b, dima = dima)
}
# now return to start
retDist <- getDimaDist(fromLoc = tour[1], toLoc = tour[length(tour)],
dima = dima)
return(dist + retDist)
}
#' get distance between two points from dima
#'
#' @param fromLoc from location ID
#' @param toLoc to location ID
#' @param dima the distance matrix
#'
#' @return the distance
#' @export
getDimaDist <- function(fromLoc, toLoc, dima) {
dimaEntry <- dima[I(loc_from == fromLoc & loc_to == toLoc)]
if (nrow(dimaEntry) != 1) {
# if a / b lookup failed, try other way round (since we store only one
# direction) in the distance matrix.
dimaEntry <- dima[I(loc_from == toLoc & loc_to == fromLoc)]
if (nrow(dimaEntry) != 1) {
stop(
paste0(
"Expected to find exactly one dima entry corresponding to the given loc_from/loc_to-pair ",
fromLoc, "/",
toLoc,
" but found 0 or more than 1."
)
)
}
}
return(dimaEntry$dist)
}
# best sol: 8039.982 tour: 26 11 27 25 46 28 29 1 6 41 20 22 19 49 15 43 45 24 3 5 14 4 23 47 37 36 39 38 33 34 35 48 31 0 21 30 17 16 2 44 18 40 7 8 9 42 32 50 10 51 13 12
# [1] 26 11 27 25 46 28 29 1 6 41 20 22 19 49 15 43 45 24 3 5 14 4 23 47 37 36 39 38 33 34 35 48 31 0 21 30
# [37] 17 16 2 44 18 40 7 8 9 42 32 50 10 51 13 12
# best sol: 8001.643
# [1] 29 28 46 25 27 26 12 13 51 10 50 11 24 3 5 14 4 23 47 37 36 39 38 35 34 33 43 45 15 49 19 22 30 17 21
# [36] 0 48 31 44 42 32 9 8 7 40 18 2 16 20 41 6 1
tsp <- read.table("berlin52.tsp", skip = 2, col.names = c("stop_id", "lat", "lng"))
tour <- renderTour(tsp)
print(tour)
|
3b6db77a9259ccacaa3eea0c439bf2813bf9f520
|
74d8df7e5a0fd61394fd0494f35ce82dfaa30c96
|
/man/zero_range.Rd
|
84ed2e4469d8f5313c02271e3e9e38086b11c317
|
[
"MIT"
] |
permissive
|
immunogenomics/scpost
|
8bde0fff6be217aa92e5b2cb48d145cd35031343
|
9e6ce336addc7e0d50e266299e8b46bed7df78d0
|
refs/heads/main
| 2023-04-13T13:15:08.708526
| 2021-07-22T14:14:36
| 2021-07-22T14:14:36
| 312,683,900
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 602
|
rd
|
zero_range.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{zero_range}
\alias{zero_range}
\title{Simple implementation from Hadley Wickham that checks if all elements
of a vector are equal}
\usage{
zero_range(x, tol = .Machine$double.eps^0.5)
}
\arguments{
\item{x}{Vector containing numeric elements}
\item{tol}{The tolerated acceptable error in determining whether elements are equal}
}
\value{
A boolean representing if all elements of a vector are equal
}
\description{
Simple implementation from Hadley Wickham that checks if all elements
of a vector are equal
}
|
d44d08328cfbdc7f39dd234a59a76f98cefe195c
|
53f7d6caae267524bfd6084ec622a2f56777bd34
|
/r_scripts/r-analysis.R
|
730947ee14b971ed0ca685e4bdf684b8992592cb
|
[] |
no_license
|
stredger/lind_benchmark
|
e016c651e4d7568c4582904d58e21d8fa4746767
|
371e9a7a6ccdf3f545be94ca103bb5a97da84bcd
|
refs/heads/master
| 2021-01-23T17:31:16.891385
| 2012-02-28T22:38:02
| 2012-02-28T22:38:02
| 3,576,159
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,698
|
r
|
r-analysis.R
|
# Lind benchmarking R script
#
# Stephen Tredger :)
#
#Args <- commandArgs(); # retrieve args
#x <- c(1:as.real(Args[4])); # get the 4th argument
# lind timing files
lind_file_path = "~/Documents/DS/lind_benchmarking/lind_results"
# c timing files
c_file_path = "~/Documents/DS/lind_benchmarking/c_results"
# place to output plots
plot_path = "~/Documents/DS/lind_benchmarking/plots"
# name of file we want to analyze
file_name = "open_read_close"
# plot height and width in # pixels
plot_height = 620
plot_width = 620
pts = c(1:1000)
#
# Gets times by reading in a file, then places the times in a list and returns it
#
get_times_from_file = function(path) {
data = read.csv(path, header=FALSE, strip.white=TRUE, stringsAsFactors=FALSE)
start_time = c(do.call("cbind",data[1]))
finish_time = c(do.call("cbind",data[2]))
elapsed_time = (finish_time - start_time)
# standardize time, make it start at t = 0 msec
std_start_time = start_time - start_time[1]
times = list(start_time=start_time, finish_time=finish_time,
elapsed_time=elapsed_time, std_start_time=std_start_time)
}
# read in times for each file
lind_times = get_times_from_file(paste(lind_file_path, file_name, sep="/"))
c_times = get_times_from_file(paste(c_file_path, file_name, sep="/"))
# lind histograms
png(paste(plot_path, "/lind-", file_name, "-hist.png", sep=""), width=plot_width, height=plot_height)
lind_hist = hist(lind_times$elapsed_time, breaks=50, main=paste("Lind", file_name, "histogram"), xlab="elapsed time (sec)")
dev.off()
# c histogram
png(paste(plot_path, "/c-", file_name, "-hist.png", sep=""), width=plot_width, height=plot_height)
c_hist = hist(c_times$elapsed_time, breaks=50, main=paste("Native C", file_name, "histogram"), xlab="elapsed time (sec)")
dev.off()
# colours for scatterplot points
lind_col="red"
c_col="blue"
# lind scatterplot
png(paste(plot_path, "/lind-", file_name, "-scatter.png", sep=""), width=plot_width, height=plot_height)
lind_scplot = plot(lind_times$std_start_time, lind_times$elapsed_time, log="y", pch=20, cex=0.5, xlab="start time (sec)", ylab="log elapsed time log(sec)", main=paste(file_name, "scatterplot"), col=lind_col)
dev.off()
# native c scatterplot
png(paste(plot_path, "/c-", file_name, "-scatter.png", sep=""), width=plot_width, height=plot_height)
c_scplot = plot(c_times$std_start_time, c_times$elapsed_time, log="y", pch=20, cex=0.5, xlab="start time (sec)", ylab="log elapsed time log(sec)", main=paste(file_name, "scatterplot"), col=c_col)
dev.off()
# scatterplot with both c and lind... right now looks like crap as all the c values are crunched together...
png(paste(plot_path, "/both-", file_name, "-scatter.png", sep=""), width=plot_width, height=plot_height)
both_scplot = plot(lind_times$std_start_time, lind_times$elapsed_time, pch=20, cex=0.5, xlab="start time (sec)", ylab="log elapsed time log(sec)", main=paste(file_name, "scatterplot"), col=lind_col)
points(c_times$std_start_time, c_times$elapsed_time, pch=20, cex=0.5, col=c_col)
dev.off()
# Screwing around to get a look at the values on the same plot
# find max value => so get mins maxes and find bounds, then ylim=c(bounds)!!!!!
max_l = max(lind_times$elapsed_time)
max_c = max(c_times$elapsed_time)
# scatterplot with both c and lind but just point # vs elapsed time
png(paste(plot_path, "/both-ptnum-", file_name, "-scatter.png", sep=""), width=plot_width, height=plot_height)
bothpt_scplot = plot(pts, c_times$elapsed_time / max_l, pch=20, cex=0.5, ylim=c(0,1), xlab="trial number", ylab="log elapsed time log(sec)", main=paste(file_name, "scatterplot"), col=c_col)
points(pts, lind_times$elapsed_time / max_l, pch=20, cex=0.5, col=lind_col)
dev.off()
|
57c3ccf131259a4772a2e2981a8fcbde0941b4ec
|
d481473c7bf59ef07fb2f0f7f6353e0beff5fa48
|
/man/voluminous2.Rd
|
686fe2888bab02fcaee3ef812fd99a3344ba46b8
|
[] |
no_license
|
crumplecup/muddier
|
92e1d4845db3d13e1297060d50d0244d5b00064f
|
c4d67a17377e45a35426cbb11ace342afaed6806
|
refs/heads/master
| 2021-11-28T03:41:33.262356
| 2021-08-13T03:11:39
| 2021-08-13T03:11:39
| 175,301,894
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 738
|
rd
|
voluminous2.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mod.R
\name{voluminous2}
\alias{voluminous2}
\title{voluminous2}
\usage{
voluminous2(nodes, n, vi, vo, tp = 0.1, lp = 0.5)
}
\arguments{
\item{nodes}{is a spatial object (creek nodes)}
\item{n}{is the number of years to simulate accumulation, an integer}
\item{vi}{is the volumetric input rate}
\item{vo}{is the volumetric output rate}
\item{tp}{is the turbulent deposition probability}
\item{lp}{is the laminar deposition probability}
}
\value{
a list with elements c(volumes, levels, arrivals)
}
\description{
Simulates accumulation record, using a linked-bucket model
with backfilling based on average elevation of bank deposits.
}
\seealso{
eventer
}
|
bb807fa3b09a23a8df1682b3780f4eb94a8ca5ce
|
2ae048da4fce01231f9b85796b5c927c10ac1742
|
/plot4.R
|
7cc9ecc9674a541e29260dbccac3bc0447048f6f
|
[] |
no_license
|
owamo/ExData_Plotting1
|
71d613c4124e16c66bab16e99be6f2e129945e0c
|
1e449f85ddc7f06492776a86d5dee09511d8f4a3
|
refs/heads/master
| 2021-01-15T09:14:31.333239
| 2014-09-09T15:38:52
| 2014-09-09T15:38:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,848
|
r
|
plot4.R
|
## Download the data if does not exist on working directory
fileURL <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
fileZIP <- "exdata_data_household_power_consumption.zip"
if (!file.exists(fileZIP)){
download.file(fileURL, destfile = fileZIP)
}
## Read the data from the dates 2007-02-01 and 2007-02-02
data <- read.table(unz(fileZIP,"household_power_consumption.txt"),
header = TRUE, sep = ";", na.strings = "?", nrows = 5)
classes <- sapply(data, class)
colnames <- colnames(data)
data <- read.table(unz(fileZIP,"household_power_consumption.txt"),
header = FALSE, colClasses = classes, col.names = colnames,
sep = ";", na.strings = "?", skip = 66637, nrows = 2880)
close(file(fileZIP))
## Convert date and time variables to respective R classes
data$Time <- strptime(paste(data$Date, data$Time, sep=","),
"%d/%m/%Y,%H:%M:%S", tz = "GMT")
data$Date <- as.Date(data$Date, "%d/%m/%Y")
## Create plot 4 as a PNG file
png(filename = "plot4.png",
width = 480, height = 480)
par(mfrow = c(2,2))
plot(data$Time, data$Global_active_power,
type = "l",
xlab = "",
ylab = "Global Active Power")
plot(data$Time, data$Voltage,
type = "l",
xlab = "datetime",
ylab = "Voltage")
plot(data$Time, data$Sub_metering_1,
type = "l", col = "black",
xlab = "",
ylab = "Energy sub metering")
lines(data$Time, data$Sub_metering_2, col = "red")
lines(data$Time, data$Sub_metering_3, col = "blue")
legend("topright", lty = 1, bty = "n",
col = c("black", "red", "blue"),
legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
plot(data$Time, data$Global_reactive_power,
type = "l",
xlab = "datetime",
ylab = "Global_reactive_power")
dev.off()
|
4f918692f536c7fb38a154033b1fc3ee71eb3d8d
|
b201f1f182b1828a66a2d97baf28224b39d70564
|
/R/modules/ui/submodules/cellimage_main_ui.R
|
899e544a3e47d19b369dccb66d0b47ec7b5bcaf5
|
[
"MIT"
] |
permissive
|
Drinchai/iatlas-app
|
147294b54f64925fb4ee997da98f485965284744
|
261b31224d9949055fc8cbac53cad1c96a6a04de
|
refs/heads/master
| 2023-02-08T08:17:45.384581
| 2020-07-20T23:27:08
| 2020-07-20T23:27:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,949
|
r
|
cellimage_main_ui.R
|
cellimage_main_ui <- function(id){
ns <- shiny::NS(id)
shiny::tagList(
iatlas.app::messageBox(
width = 12,
shiny::includeMarkdown("markdown/cellimage.markdown"),
shiny::actionLink(ns("method_link"), "Click to view method.")
),
shiny::fluidRow(
iatlas.app::optionsBox(
shiny::column(
width = 6,
shiny::radioButtons(
ns("ui1"),
"Select type of visualization:",
choices = c("Illustration", "Network"),
selected = "Illustration"
)
),
shiny::column(
width = 6,
shiny::uiOutput(ns("select_ui"))
)
),
iatlas.app::optionsBox(
shiny::column(
width = 6,
shiny::radioButtons(
ns("ui2"),
"Select type of visualization:",
choices = c("Illustration", "Network"),
selected = "Network"
)
),
shiny::column(
width = 6,
shiny::uiOutput(ns("select_ui2"))
)
)
),
shiny::fluidRow(
iatlas.app::plotBox(
width = 6,
shiny::uiOutput(ns("plot1")) %>%
shinycssloaders::withSpinner(.)
),
iatlas.app::plotBox(
width = 6,
shiny::uiOutput(ns("plot2")) %>%
shinycssloaders::withSpinner(.)
)
),
shiny::img(src = "images/cell-image-legend.png", width = "100%"),
shiny::br(),
shiny::actionButton(ns("methodButton"), "Click to view method")
)
}
|
55749b2b03895d229d450e7270bd1690a450c3f9
|
46124bc5042dded971e14c09440951a5b12392a9
|
/R/BGData.R
|
01462a6fd713ef7ac051c82a21b04c04ebb8414e
|
[] |
no_license
|
minghao2016/BGData
|
38cc55889c0687819955b86656674d4b123b688b
|
028f609d846c8084b06c1b989c378682bc0d29c3
|
refs/heads/master
| 2020-12-30T14:45:29.203046
| 2017-05-12T10:53:12
| 2017-05-12T10:53:12
| 91,086,007
| 1
| 0
| null | 2017-05-12T11:45:56
| 2017-05-12T11:45:56
| null |
UTF-8
|
R
| false
| false
| 28,045
|
r
|
BGData.R
|
# Convert ff_matrix into an S4 class
setOldClass("ff_matrix")
#' An Abstract S4 Class Union of Matrix-Like Types.
#'
#' [geno-class] is a class union of several matrix-like types, many of them
#' suitable for very large datasets. Currently supported are
#' [LinkedMatrix::LinkedMatrix-class], [BEDMatrix::BEDMatrix-class],
#' [bigmemory::big.matrix-class], `ff_matrix`, and `matrix`.
#'
#' @seealso The `@@geno` slot of [BGData-class] that accepts [geno-class]
#' objects.
setClassUnion("geno", c("LinkedMatrix", "BEDMatrix", "big.matrix", "ff_matrix", "matrix"))
#' An S4 Class to Represent Phenotype and Genotype Data.
#'
#' This class is inspired by the phenotype/genotype file format .raw and its
#' binary companion (also known as .bed) of
#' [PLINK](https://www.cog-genomics.org/plink2). It is used by several
#' functions of this package such as [GWAS()] for performing a Genome Wide
#' Association Study or [getG()] for calculating a genomic relationship matrix.
#'
#' There are several ways to create an instance of this class:
#' * from arbitrary phenotype/genotype data using one of the constructors
#' `[BGData(...)][initialize,BGData-method]` or `[new("BGData",
#' ...)][initialize,BGData-method]`.
#' * from a BED file using [as.BGData()].
#' * from a previously saved [BGData-class] object using [load.BGData()].
#' * from multiple files (even a mixture of different file types) using
#' [LinkedMatrix::LinkedMatrix-class].
#' * from a .raw file (or a .ped-like file) using [readRAW()],
#' [readRAW_matrix()], or [readRAW_big.matrix()].
#'
#' A .ped file can be recoded to a .raw file in
#' [PLINK](https://www.cog-genomics.org/plink2) using `plink --file myfile
#' --recodeA`, or converted to a BED file using `plink --file myfile
#' --make-bed`. Conversely, a BED file can be transformed back to a .ped file
#' using `plink --bfile myfile --recode` or to a .raw file using `plink --bfile
#' myfile --recodeA` without losing information.
#'
#' @slot geno A [geno-class] object that contains genotypes. [geno-class] is a
#' class union of several matrix-like types, many of them suitable for very
#' large datasets. Currently supported are [LinkedMatrix::LinkedMatrix-class],
#' [BEDMatrix::BEDMatrix-class], [bigmemory::big.matrix-class], `ff_matrix`,
#' and `matrix`.
#' @slot pheno A `data.frame` that contains phenotypes.
#' @slot map A `data.frame` that contains a genetic map.
#' @example man/examples/BGData.R
#' @export BGData
#' @exportClass BGData
BGData <- setClass("BGData", slots = c(geno = "geno", pheno = "data.frame", map = "data.frame"))
#' Creates a New BGData Instance.
#'
#' This method is run when a [BGData-class] object is created using
#' `BGData(...)` or `new("BGData", ...)`.
#'
#' @param .Object The [BGData-class] instance to be initialized. This argument
#' is passed in by R and can be ignored, but still needs to be documented.
#' @param geno A [geno-class] object that contains genotypes. [geno-class] is a
#' class union of several matrix-like types, many of them suitable for very
#' large datasets. Currently supported are [LinkedMatrix::LinkedMatrix-class],
#' [BEDMatrix::BEDMatrix-class], [bigmemory::big.matrix-class], `ff_matrix`,
#' and `matrix`.
#' @param pheno A `data.frame` that contains phenotypes. A stub that only
#' contains an `IID` column populated with the rownames of `@@geno` will be
#' generated if missing.
#' @param map A `data.frame` that contains a genetic map. A stub that only
#' contains a `mrk` column populated with the colnames of `@@geno` will be
#' generated if missing.
#' @export
setMethod("initialize", "BGData", function(.Object, geno, pheno, map) {
if (!is(geno, "geno")) {
stop("Only LinkedMatrix, BEDMatrix, big.matrix, ff_matrix, or regular matrix objects are allowed for geno.")
}
if (is.null(colnames(geno))) {
colnames(geno) <- paste0("mrk_", seq_len(ncol(geno)))
}
if (is.null(rownames(geno))) {
rownames(geno) <- paste0("id_", seq_len(nrow(geno)))
}
if (missing(pheno)) {
pheno <- data.frame(IID = rownames(geno), stringsAsFactors = FALSE)
}
if (missing(map)) {
map <- data.frame(mrk = colnames(geno), stringsAsFactors = FALSE)
}
.Object@geno <- geno
.Object@pheno <- pheno
.Object@map <- map
return(.Object)
})
pedDims <- function(fileIn, header, n, p, sep = "", nColSkip = 6L) {
if (is.null(n)) {
n <- getLineCount(fileIn, header)
}
if (header) {
headerLine <- getFileHeader(fileIn, sep)
p <- length(headerLine) - nColSkip
} else {
if (is.null(p)) {
p <- getColumnCount(fileIn, sep) - nColSkip
}
}
return(list(n = n, p = p))
}
parseRAW <- function(BGData, fileIn, header, dataType, nColSkip = 6L, idCol = c(1L, 2L), sep = "", na.strings = "NA", verbose = FALSE, ...) {
p <- ncol(BGData@geno)
pedFile <- file(fileIn, open = "r")
# Update colnames
if (header) {
headerLine <- scan(pedFile, nlines = 1L, what = character(), sep = sep, quiet = TRUE)
colnames(BGData@pheno) <- headerLine[seq_len(nColSkip)]
colnames(BGData@geno) <- headerLine[-(seq_len(nColSkip))]
}
# Parse file
j <- seq_len(p)
for (i in seq_len(nrow(BGData@geno))) {
xSkip <- scan(pedFile, n = nColSkip, what = character(), sep = sep, quiet = TRUE)
x <- scan(pedFile, n = p, what = dataType, sep = sep, na.strings = na.strings, quiet = TRUE)
BGData@pheno[i, ] <- xSkip
BGData@geno <- `[<-`(BGData@geno, i, j, ..., value = x)
if (verbose) {
message("Subject ", i, " / ", nrow(BGData@geno))
}
}
close(pedFile)
# Update rownames
IDs <- apply(BGData@pheno[, idCol, drop = FALSE], 1L, paste, collapse = "_")
rownames(BGData@pheno) <- IDs
rownames(BGData@geno) <- IDs
# Convert types in pheno
BGData@pheno[] <- lapply(BGData@pheno, utils::type.convert, as.is = TRUE)
return(BGData)
}
#' Creates a BGData Object From a .raw File or a .ped-Like File.
#'
#' Creates a [BGData-class] object from a .raw file (generated with `--recodeA`
#' in [PLINK](https://www.cog-genomics.org/plink2)). Other text-based file
#' formats are supported as well by tweaking some of the parameters as long as
#' the records of individuals are in rows, and phenotypes, covariates and
#' markers are in columns.
#'
#' The data included in the first couple of columns (up to `nColSkip`) is used
#' to populate the `@@pheno` slot of a [BGData-class] object, and the remaining
#' columns are used to fill the `@@geno` slot. If the first row contains a
#' header (`header = TRUE`), data in this row is used to determine the column
#' names for `@@pheno` and `@@geno`.
#'
#' `@@geno` can take several forms, depending on the function that is called
#' (`readRAW`, `readRAW_matrix`, or `readRAW_big.matrix`). The following
#' sections illustrate each function in detail.
#'
#' @section readRAW:
#' Genotypes are stored in a [LinkedMatrix::LinkedMatrix-class] object where
#' each node is an `ff` instance. Multiple `ff` files are used because the
#' array size in `ff` is limited to the largest integer which can be
#' represented on the system (`.Machine$integer.max`) and for genetic data this
#' limitation is often exceeded. The [LinkedMatrix::LinkedMatrix-class] package
#' makes it possible to link several `ff` files together by columns or by rows
#' and treat them similarly to a single matrix. By default a
#' [LinkedMatrix::ColumnLinkedMatrix-class] is used for `@@geno`, but the user
#' can modify this using the `linked.by` argument. The number of nodes to
#' generate is either specified by the user using the `nNodes` argument or
#' determined internally so that each `ff` object has a number of cells that is
#' smaller than `.Machine$integer.max / 1.2`. A folder (see `folderOut`) that
#' contains the binary flat files (named `geno_*.bin`) and an external
#' representation of the [BGData-class] object in `BGData.RData` is created.
#'
#' @section readRAW_matrix:
#' Genotypes are stored in a regular `matrix` object. Therefore, this function
#' will only work if the .raw file is small enough to fit into memory.
#'
#' @section readRAW_big.matrix:
#' Genotypes are stored in a filebacked [bigmemory::big.matrix-class] object.
#' A folder (see `folderOut`) that contains the binary flat file (named
#' `BGData.bin`), a descriptor file (named `BGData.desc`), and an external
#' representation of the [BGData-class] object in `BGData.RData` are created.
#'
#' @section Reloading a BGData object:
#' To reload a [BGData-class] object, it is recommended to use the
#' [load.BGData()] function instead of the [base::load()] function as
#' [base::load()] does not initialize `ff` objects or attach
#' [bigmemory::big.matrix-class] objects.
#'
#' @param fileIn The path to the plaintext file.
#' @param header Whether `fileIn` contains a header. Defaults to `TRUE`.
#' @param dataType The coding type of genotypes in `fileIn`. Use `integer()` or
#' `double()` for numeric coding. Alpha-numeric coding is currently not
#' supported for [readRAW()] and [readRAW_big.matrix()]: use the `--recodeA`
#' option of PLINK to convert the .ped file into a .raw file. Defaults to
#' `integer()`.
#' @param n The number of individuals. Auto-detect if `NULL`. Defaults to
#' `NULL`.
#' @param p The number of markers. Auto-detect if `NULL`. Defaults to `NULL`.
#' @param sep The field separator character. Values on each line of the file
#' are separated by this character. If `sep = ""` (the default for [readRAW()]
#' the separator is "white space", that is one or more spaces, tabs, newlines
#' or carriage returns.
#' @param na.strings The character string used in the plaintext file to denote
#' missing value. Defaults to `NA`.
#' @param nColSkip The number of columns to be skipped to reach the genotype
#' information in the file. Defaults to `6`.
#' @param idCol The index of the ID column. If more than one index is given,
#' both columns will be concatenated with "_". Defaults to `c(1, 2)`, i.e. a
#' concatenation of the first two columns.
#' @param nNodes The number of nodes to create. Auto-detect if `NULL`. Defaults
#' to `NULL`.
#' @param linked.by If `columns` a column-linked matrix
#' ([LinkedMatrix::ColumnLinkedMatrix-class]) is created, if `rows` a
#' row-linked matrix ([LinkedMatrix::RowLinkedMatrix-class]). Defaults to
#' `rows`.
#' @param folderOut The path to the folder where to save the binary files.
#' Defaults to the name of the input file (`fileIn`) without extension prefixed
#' with "BGData_".
#' @param outputType The `vmode` for `ff` and `type` for
#' [bigmemory::big.matrix-class]) objects. Default to `byte` for `ff` and
#' `char` for [bigmemory::big.matrix-class] objects.
#' @param dimorder The physical layout of the underlying `ff` object of each
#' node.
#' @param verbose Whether progress updates will be posted. Defaults to `FALSE`.
#' @seealso [load.BGData()] to load a previously saved [BGData-class] object,
#' [as.BGData()] to create [BGData-class] objects from non-text files (e.g. BED
#' files).
#' @example man/examples/readRAW.R
#' @export
readRAW <- function(fileIn, header = TRUE, dataType = integer(), n = NULL, p = NULL, sep = "", na.strings = "NA", nColSkip = 6L, idCol = c(1L, 2L), nNodes = NULL, linked.by = "rows", folderOut = paste0("BGData_", sub("\\.[[:alnum:]]+$", "", basename(fileIn))), outputType = "byte", dimorder = if (linked.by == "rows") 2L:1L else 1L:2L, verbose = FALSE) {
# Create output directory
if (file.exists(folderOut)) {
stop(paste("Output folder", folderOut, "already exists. Please move it or pick a different one."))
}
dir.create(folderOut)
dims <- pedDims(fileIn = fileIn, header = header, n = n, p = p, sep = sep, nColSkip = nColSkip)
# Determine number of nodes
if (is.null(nNodes)) {
if (linked.by == "columns") {
chunkSize <- min(dims$p, floor(.Machine$integer.max / dims$n / 1.2))
nNodes <- ceiling(dims$p / chunkSize)
} else {
chunkSize <- min(dims$n, floor(.Machine$integer.max / dims$p / 1.2))
nNodes <- ceiling(dims$n / chunkSize)
}
} else {
if (linked.by == "columns") {
chunkSize <- ceiling(dims$p / nNodes)
if (chunkSize * dims$n >= .Machine$integer.max / 1.2) {
stop("More nodes are needed")
}
} else {
chunkSize <- ceiling(dims$n / nNodes)
if (chunkSize * dims$p >= .Machine$integer.max / 1.2) {
stop("More nodes are needed")
}
}
}
dataType <- normalizeType(dataType)
if (!typeof(dataType) %in% c("integer", "double")) {
stop("dataType must be either integer() or double()")
}
if (!linked.by %in% c("columns", "rows")) {
stop("linked.by must be either columns or rows")
}
# Prepare geno
geno <- LinkedMatrix::LinkedMatrix(nrow = dims$n, ncol = dims$p, nNodes = nNodes, linkedBy = linked.by, nodeInitializer = ffNodeInitializer, vmode = outputType, folderOut = folderOut, dimorder = dimorder)
# Generate nodes
nodes <- LinkedMatrix::nodes(geno)
# Generate index
index <- LinkedMatrix::index(geno)
# Prepare pheno
pheno <- as.data.frame(matrix(nrow = dims$n, ncol = nColSkip), stringsAsFactors = FALSE)
# Construct BGData object
BGData <- new("BGData", geno = geno, pheno = pheno)
# Parse .raw file
BGData <- parseRAW(BGData = BGData, fileIn = fileIn, header = header, dataType = dataType, nColSkip = nColSkip, idCol = idCol, sep = sep, na.strings = na.strings, nodes = nodes, index = index, verbose = verbose)
# Save BGData object
attr(BGData, "origFile") <- list(path = fileIn, dataType = typeof(dataType))
attr(BGData, "dateCreated") <- date()
save(BGData, file = paste0(folderOut, "/BGData.RData"))
return(BGData)
}
#' @rdname readRAW
#' @export
readRAW_matrix <- function(fileIn, header = TRUE, dataType = integer(), n = NULL, p = NULL, sep = "", na.strings = "NA", nColSkip = 6L, idCol = c(1L, 2L), verbose = FALSE) {
dims <- pedDims(fileIn = fileIn, header = header, n = n, p = p, sep = sep, nColSkip = nColSkip)
dataType <- normalizeType(dataType)
# Prepare geno
geno <- matrix(nrow = dims$n, ncol = dims$p)
# Prepare pheno
pheno <- as.data.frame(matrix(nrow = dims$n, ncol = nColSkip), stringsAsFactors = FALSE)
# Construct BGData object
BGData <- new("BGData", geno = geno, pheno = pheno)
# Parse .raw file
BGData <- parseRAW(BGData = BGData, fileIn = fileIn, header = header, dataType = dataType, nColSkip = nColSkip, idCol = idCol, sep = sep, na.strings = na.strings, verbose = verbose)
return(BGData)
}
#' @rdname readRAW
#' @export
readRAW_big.matrix <- function(fileIn, header = TRUE, dataType = integer(), n = NULL, p = NULL, sep = "", na.strings = "NA", nColSkip = 6L, idCol = c(1L, 2L), folderOut = paste0("BGData_", sub("\\.[[:alnum:]]+$", "", basename(fileIn))), outputType = "char", verbose = FALSE) {
if (file.exists(folderOut)) {
stop(paste("Output folder", folderOut, "already exists. Please move it or pick a different one."))
}
dataType <- normalizeType(dataType)
if (!typeof(dataType) %in% c("integer", "double")) {
stop("dataType must be either integer() or double()")
}
dims <- pedDims(fileIn = fileIn, header = header, n = n, p = p, sep = sep, nColSkip = nColSkip)
options(bigmemory.typecast.warning = FALSE)
options(bigmemory.allow.dimnames = TRUE)
# Create output directory
dir.create(folderOut)
# Prepare geno
geno <- bigmemory::filebacked.big.matrix(nrow = dims$n, ncol = dims$p, type = outputType, backingpath = folderOut, backingfile = "BGData.bin", descriptorfile = "BGData.desc")
# Prepare pheno
pheno <- as.data.frame(matrix(nrow = dims$n, ncol = nColSkip), stringsAsFactors = FALSE)
# Construct BGData object
BGData <- new("BGData", geno = geno, pheno = pheno)
# Parse .raw file
BGData <- parseRAW(BGData = BGData, fileIn = fileIn, header = header, dataType = dataType, nColSkip = nColSkip, idCol = idCol, sep = sep, na.strings = na.strings, verbose = verbose)
# Save BGData object
attr(BGData, "origFile") <- list(path = fileIn, dataType = typeof(dataType))
attr(BGData, "dateCreated") <- date()
save(BGData, file = paste0(folderOut, "/BGData.RData"))
return(BGData)
}
loadFamFile <- function(path) {
if (!file.exists(path)) {
stop(path, " not found")
}
message("Extracting phenotypes from .fam file...")
if (requireNamespace("data.table", quietly = TRUE)) {
pheno <- data.table::fread(path, col.names = c(
"FID",
"IID",
"PAT",
"MAT",
"SEX",
"PHENOTYPE"
), data.table = FALSE, showProgress = FALSE)
} else {
pheno <- utils::read.table(path, col.names = c(
"FID",
"IID",
"PAT",
"MAT",
"SEX",
"PHENOTYPE"
), stringsAsFactors = FALSE)
}
return(pheno)
}
generatePheno <- function(x) {
# Extract path to BED file
bedPath <- attr(x, "path")
# Try to load .fam file, generate pheno otherwise
ex <- try({
pheno <- loadFamFile(sub(".bed", ".fam", bedPath))
}, silent = TRUE)
if (class(ex) == "try-error") {
splits <- strsplit(rownames(x), "_")
pheno <- data.frame(FID = sapply(splits, "[", 1L), IID = sapply(splits, "[", 2L), stringsAsFactors = FALSE)
}
return(pheno)
}
loadBimFile <- function(path) {
if (!file.exists(path)) {
stop(path, " not found")
}
message("Extracting map from .bim file...")
if (requireNamespace("data.table", quietly = TRUE)) {
map <- data.table::fread(path, col.names = c(
"chromosome",
"snp_id",
"genetic_distance",
"base_pair_position",
"allele_1",
"allele_2"
), data.table = FALSE, showProgress = FALSE)
} else {
map <- utils::read.table(path, col.names = c(
"chromosome",
"snp_id",
"genetic_distance",
"base_pair_position",
"allele_1",
"allele_2"
), stringsAsFactors = FALSE)
}
return(map)
}
generateMap <- function(x) {
# Extract path to BED file
bedPath <- attr(x, "path")
# Try to load .fam file, generate pheno otherwise
ex <- try({
map <- loadBimFile(sub(".bed", ".bim", bedPath))
}, silent = TRUE)
if (class(ex) == "try-error") {
splits <- strsplit(colnames(x), "_")
map <- data.frame(
snp_id = sapply(splits, function(x) {
paste0(x[seq_len(length(x) - 1L)], collapse = "_")
}),
allele_1 = sapply(splits, function(x) {
x[length(x)]
}),
stringsAsFactors = FALSE
)
}
return(map)
}
loadAlternatePhenotypeFile <- function(path, ...) {
if (!file.exists(path)) {
stop("Alternate phenotype file does not exist.")
} else {
message("Merging alternate phenotype file...")
if (requireNamespace("data.table", quietly = TRUE)) {
alternatePhenotypes <- data.table::fread(path, data.table = FALSE, showProgress = FALSE, ...)
} else {
# Check if the file has a header, i.e. if the first row starts with
# an FID and an IID entry
hasHeader = FALSE
if (grepl("FID\\s+IID", readLines(path, n = 1L))) {
hasHeader = TRUE
}
alternatePhenotypes <- utils::read.table(path, header = hasHeader, stringsAsFactors = FALSE, ...)
}
}
return(alternatePhenotypes)
}
mergeAlternatePhenotypes <- function(pheno, alternatePhenotypes) {
# Add artificial sort column to preserve order after merging
# (merge's `sort = FALSE` order is unspecified)
pheno$.sortColumn <- seq_len(nrow(pheno))
# Merge phenotypes and alternate phenotypes
pheno <- merge(pheno, alternatePhenotypes, by = c(1L, 2L), all.x = TRUE)
# Reorder phenotypes to match original order and delete artificial
# column
pheno <- pheno[order(pheno$.sortColumn), ]
pheno <- pheno[, names(pheno) != ".sortColumn"]
return(pheno)
}
#' Convert Other Objects to BGData Objects.
#'
#' Converts other objects to [BGData-class] objects by loading supplementary
#' phenotypes and map files referenced by the object to be used for the
#' `@@pheno` and `@@map` slot, respectively. Currently supported are
#' [BEDMatrix::BEDMatrix-class] objects, plain or nested in
#' [LinkedMatrix::ColumnLinkedMatrix-class] objects.
#'
#' The .ped and .raw formats only allows for a single phenotype. If more
#' phenotypes are required it is possible to store them in an [alternate
#' phenotype file](https://www.cog-genomics.org/plink2/input#pheno). The path
#' to such a file can be provided with `alternatePhenotypeFile` and will be
#' merged with the data in the `@@pheno` slot.
#'
#' For [BEDMatrix::BEDMatrix-class] objects: If a .fam file (which corresponds
#' to the first six columns of a .ped or .raw file) of the same name and in the
#' same directory as the BED file exists, the `@@pheno` slot will be populated
#' with the data stored in that file. Otherwise a stub that only contains an
#' `IID` column populated with the rownames of `@@geno` will be generated. The
#' same will happen for a .bim file for the `@@map` slot.
#'
#' For [LinkedMatrix::ColumnLinkedMatrix-class] objects: See the case for
#' [BEDMatrix::BEDMatrix-class] objects, but only the .fam file of the first
#' node of the [LinkedMatrix::LinkedMatrix-class] will be read and used for the
#' `@@pheno` slot, and the .bim files of all nodes will be combined and used
#' for the `@@map` slot.
#'
#' @param x An object. Currently supported are [BEDMatrix::BEDMatrix-class]
#' objects, plain or nested in [LinkedMatrix::ColumnLinkedMatrix-class]
#' objects.
#' @param alternatePhenotypeFile Path to an [alternate phenotype
#' file](https://www.cog-genomics.org/plink2/input#pheno).
#' @param ... Additional arguments to the [utils::read.table()] or
#' [data.table::fread()] call (if data.table package is installed) call to
#' parse the alternate pheno file.
#' @return A [BGData-class] object.
#' @seealso [readRAW()] to convert text files to [BGData-class] objects.
#' @example man/examples/as.BGData.R
#' @export
as.BGData <- function(x, alternatePhenotypeFile = NULL, ...) {
UseMethod("as.BGData")
}
#' @rdname as.BGData
#' @export
as.BGData.BEDMatrix <- function(x, alternatePhenotypeFile = NULL, ...) {
# Read in pheno file
fam <- generatePheno(x)
# Read in map file
map <- generateMap(x)
# Load and merge alternate phenotype file
if (!is.null(alternatePhenotypeFile)) {
alternatePhenotypes <- loadAlternatePhenotypeFile(alternatePhenotypeFile, ...)
fam <- mergeAlternatePhenotypes(fam, alternatePhenotypes)
}
BGData(geno = x, pheno = fam, map = map)
}
#' @rdname as.BGData
#' @export
as.BGData.ColumnLinkedMatrix <- function(x, alternatePhenotypeFile = NULL, ...) {
n <- LinkedMatrix::nNodes(x)
# For now, all elements have to be of type BEDMatrix
if (!all(sapply(x, function(node) class(node)) == "BEDMatrix")) {
stop("Only BEDMatrix instances are supported as elements of the LinkedMatrix right now.")
}
# Read in the fam file of the first node
message("Extracting phenotypes from .fam file, assuming that the .fam file of the first BEDMatrix instance is representative of all the other nodes...")
fam <- suppressMessages(generatePheno(x[[1L]]))
# Read in map files
message("Extracting map from .bim files...")
map <- do.call("rbind", lapply(x, function(node) {
suppressMessages(generateMap(node))
}))
# Load and merge alternate phenotype file
if (!is.null(alternatePhenotypeFile)) {
alternatePhenotypes <- loadAlternatePhenotypeFile(alternatePhenotypeFile, ...)
fam <- mergeAlternatePhenotypes(fam, alternatePhenotypes)
}
BGData(geno = x, pheno = fam, map = map)
}
#' @rdname as.BGData
#' @export
as.BGData.RowLinkedMatrix <- function(x, alternatePhenotypeFile = NULL, ...) {
n <- LinkedMatrix::nNodes(x)
# For now, all elements have to be of type BEDMatrix
if (!all(sapply(x, function(node) class(node)) == "BEDMatrix")) {
stop("Only BEDMatrix instances are supported as elements of the LinkedMatrix right now.")
}
# Read in the fam files
message("Extracting phenotypes from .fam files...")
fam <- do.call("rbind", lapply(x, function(node) {
suppressMessages(generatePheno(node))
}))
# Read in the map file of the first node
message("Extracting map from .bim file, assuming that the .bim file of the first BEDMatrix instance is representative of all the other nodes...")
map <- suppressMessages(generateMap(x[[1L]]))
# Load and merge alternate phenotype file
if (!is.null(alternatePhenotypeFile)) {
alternatePhenotypes <- loadAlternatePhenotypeFile(alternatePhenotypeFile, ...)
fam <- mergeAlternatePhenotypes(fam, alternatePhenotypes)
}
BGData(geno = x, pheno = fam, map = map)
}
#' Loads BGData (and Other) Objects from .RData Files.
#'
#' This function is similar to [base::load()], but also initializes the
#' different types of objects that the `@@geno` slot of a [BGData-class] object
#' can take. Currently supported are `ff_matrix`,
#' [bigmemory::big.matrix-class], and [BEDMatrix::BEDMatrix-class] objects. If
#' the object is of type [LinkedMatrix::LinkedMatrix-class], all nodes will be
#' initialized with their appropriate method.
#'
#' @param file The name of the .RData file to be loaded.
#' @param envir The environment where to load the data.
#' @export
load.BGData <- function(file, envir = parent.frame()) {
# Load data into new environment
loadingEnv <- new.env()
load(file = file, envir = loadingEnv)
names <- ls(envir = loadingEnv)
for (name in names) {
object <- get(name, envir = loadingEnv)
# Initialize genotypes of BGData objects
if (class(object) == "BGData") {
object@geno <- initializeGeno(object@geno, path = dirname(file))
}
# Assign object to envir
assign(name, object, envir = envir)
}
message("Loaded objects: ", paste0(names, collapse = ", "))
}
initializeGeno <- function(x, ...) {
UseMethod("initializeGeno")
}
initializeGeno.LinkedMatrix <- function(x, path, ...) {
for (i in seq_len(LinkedMatrix::nNodes(x))) {
x[[i]] <- initializeGeno(x[[i]], path = path)
}
return(x)
}
# Absolute paths to ff files are not stored, so the ff objects have to be
# loaded from the same directory as the RData file.
initializeGeno.ff_matrix <- function(x, path, ...) {
# Store current working directory and set working directory to path
cwd <- getwd()
setwd(path)
# Open ff object
ff::open.ff(x)
# Restore the working directory
setwd(cwd)
return(x)
}
initializeGeno.big.matrix <- function(x, path, ...) {
return(bigmemory::attach.big.matrix(paste0(path, "/BGData.desc")))
}
initializeGeno.BEDMatrix <- function(x, ...) {
dnames <- attr(x, "dnames")
dims <- attr(x, "dims")
path <- attr(x, "path")
x <- BEDMatrix::BEDMatrix(path = path, n = dims[1L], p = dims[2L])
dimnames(x) <- dnames
return(x)
}
initializeGeno.default <- function(x, ...) {
return(x)
}
ffNodeInitializer <- function(nodeIndex, nrow, ncol, vmode, folderOut, ...) {
filename <- paste0("geno_", nodeIndex, ".bin")
node <- ff::ff(dim = c(nrow, ncol), vmode = vmode, filename = paste0(folderOut, "/", filename), ...)
# Change ff path to a relative one
bit::physical(node)$filename <- filename
return(node)
}
|
c765eaebe08b4077758db0253ff13f276db917b1
|
bf86fb3091905ddecfbcc7c7047f17f82ceffe88
|
/man/pathInterpolate.Rd
|
81a1310eceaa798f34eed66d84595a8208ea4806
|
[] |
no_license
|
cbhurley/condvis2
|
0580842f55b2ee7e4ea449eb2b5cba763d565bca
|
60c370cb279fa29337ac0ad7af403ef633274b68
|
refs/heads/master
| 2022-09-23T03:22:43.790073
| 2022-09-13T16:34:31
| 2022-09-13T16:34:31
| 160,338,346
| 6
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 896
|
rd
|
pathInterpolate.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/condpath.R
\name{pathInterpolate}
\alias{pathInterpolate}
\alias{pathInterpolate.default}
\alias{pathInterpolate.factor}
\alias{pathInterpolate.data.frame}
\title{Interpolation}
\usage{
pathInterpolate(x, ninterp = 4)
\method{pathInterpolate}{default}(x, ninterp = 4L)
\method{pathInterpolate}{factor}(x, ninterp = 4L)
\method{pathInterpolate}{data.frame}(x, ninterp = 4L)
}
\arguments{
\item{x}{a numeric or factor vector or dataframe}
\item{ninterp}{number of interpolated steps}
}
\value{
interpolated version of x
}
\description{
Interpolation
}
\section{Methods (by class)}{
\itemize{
\item \code{pathInterpolate(default)}: Default interpolate method
\item \code{pathInterpolate(factor)}: pathInterpolate method for factor
\item \code{pathInterpolate(data.frame)}: pathInterpolate method for data.frame
}}
|
e59322f54eeaef017203a6f6effcd03b8bcf0847
|
0b89292d1fbcd390a0c0c964e46bc54620ffb706
|
/code/functions/lvrates.R
|
498bc1e652cb6948898debd5bb4bb76f8adfa41f
|
[] |
no_license
|
JSHuisman/Recorder
|
90c008eb733bf642e00d5a368b514f1c19fd7693
|
06766477df5656ca54345d5a8ed1b8416dd19a77
|
refs/heads/main
| 2023-08-05T20:58:34.636453
| 2021-11-24T14:30:30
| 2021-11-24T14:30:30
| 344,049,680
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,805
|
r
|
lvrates.R
|
###########################################
## lvrates.R
##
## Returns a function that encodes the transition
## rates of migration, birth, death, and plasmid transmission
## processes for use in the adaptive-tau simulation
##
## This function includes a carrying capacity.
##
## Author: Jana S. Huisman
## Last update: Jan 2021
###########################################
# When specifying an experiment with 2 chromosomal tags
# and 1 tagged plasmid; i.e., Ni=2, Nj=1
# lvrates_carrying_cap_function(2,1)
# the resulting function looks as follows:
# function(x, params, t) {
# return(c(x["D1"]*params$birth_rate*(1+0.0833333333333333),
# x["R1"]*params$birth_rate*(1+0.0833333333333333) + params$migration_rate,
# x["R2"]*params$birth_rate*(1+0.0833333333333333) + params$migration_rate,
# x["T11"]*params$birth_rate*(1+0.0833333333333333),
# x["T21"]*params$birth_rate*(1+0.0833333333333333),
# x["D1"]*(params$death_rate+((x["D1"])/1e+09)*params$birth_rate),
# x["R1"]*(params$death_rate+((x["R1"]+x["R2"]+x["T11"]+x["T21"])/1e+09)*params$birth_rate),
# x["R2"]*(params$death_rate+((x["R1"]+x["R2"]+x["T11"]+x["T21"])/1e+09)*params$birth_rate),
# x["T11"]*(params$death_rate+((x["R1"]+x["R2"]+x["T11"]+x["T21"])/1e+09)*params$birth_rate),
# x["T21"]*(params$death_rate+((x["R1"]+x["R2"]+x["T11"]+x["T21"])/1e+09)*params$birth_rate),
# (params$conj_donor*x["D1"]+params$conj_trans*(x["T11"]+x["T21"]))*x["R1"],
# (params$conj_donor*x["D1"]+params$conj_trans*(x["T11"]+x["T21"]))*x["R2"])) }
#The order of these rates must match the order
# in which transitions were specified.
#D, R, T - birth, death, transition
###########################################
lvrates_function <- function(Ni, Nj, carrying_cap_D = 1e9,
carrying_cap_R = 1e9, leftover_birth = 1./12){
chromosomal_range = 1:Ni
plasmid_range = 1:Nj
######################
# To pre-write some lengthy terms needed for the
# carrying capacity term in the Death rates
all_d_population_terms <- paste0('(', paste0('x["D',1:Nj,'"]', collapse = '+'), ')' )
#all_r_population_terms <- paste0('(', paste0('x["R',1:Ni,'"]', collapse = '+'), ')' )
# so that there is an initial content that we add to (in the loop when i=1)
all_rt_population_terms <- paste0('(', paste0('x["R',1:Ni,'"]', collapse = '+') )
# j = 1, 2, ..., Nj
for (j in plasmid_range){
for (i in chromosomal_range) {
all_rt_population_terms <- paste0(all_rt_population_terms, '+x["T',i,j,'"]')
}
}
all_rt_population_terms <- paste0(all_rt_population_terms, ')')
######################
function_def <- 'function(x, params, t) {
return(c('
######################
# Birth processes
function_def <- paste0(function_def,
paste0('x["D', 1:Nj, '"]*params$birth_rate*(1+',leftover_birth,'), ', collapse = ''))
function_def <- paste0(function_def,
paste0('x["R', 1:Ni, '"]*params$birth_rate*(1+',leftover_birth,') + params$migration_rate/', Ni, ', ', collapse = ''))
for (i in chromosomal_range) {
for (j in plasmid_range) {
function_def<-paste0(function_def, 'x["T', i, j, '"]*params$birth_rate*(1+',leftover_birth,'), ')
}
}
######################
# Death processes
function_def <- paste0(function_def,
paste0('x["D', 1:Nj, '"]*(params$death_rate+(',all_d_population_terms,'/',carrying_cap_D,')*params$birth_rate), ', collapse = ''))
function_def <- paste0(function_def,
paste0('x["R', 1:Ni, '"]*(params$death_rate+(',all_rt_population_terms,'/',carrying_cap_R,')*params$birth_rate), ', collapse = ''))
for (i in chromosomal_range) {
for (j in plasmid_range) {
function_def<-paste0(function_def, 'x["T', i, j, '"]*(params$death_rate+(',all_rt_population_terms,'/',carrying_cap_R,')*params$birth_rate), ')
}
}
######################
# To pre-write some lengthy terms
# each list item contains a string with all transconjugant populations
# with that plasmid
transconjugant_terms <- lapply(1:Nj, function(j) {paste0('x["T',1:Ni,j,'"]', collapse = '+')})
######################
#Interaction
for (i in chromosomal_range) {
for (j in plasmid_range) {
function_def<-paste0(function_def, '(params$conj_donor*x["D',j,'"]',
'+params$conj_trans*(', transconjugant_terms[j], '))*x["R',i,'"]', ',')
}
}
# Because the last entry should not end with a ',' but with function end instead
function_end <- ')) }'
function_def <- sub(pattern=',$', replacement=function_end, x=function_def)
return(eval(parse(text=function_def)))
}
|
9c5e5ebf9f66099ac42dfdae58b13d6f21a89b20
|
aa8a256304ebfcdb556269a5bef26f56940f031c
|
/R/getWaterSpeedRecordFromWiki.R
|
25a4ad8b3d5f83a05903fb3ea689ca5be429316f
|
[] |
no_license
|
canardRapide/speedRecords
|
2aad42409ab87df5206a91d2d16486b2e9fb9f9c
|
7c35334c683d5c143f6708f111ed96104ff6bb15
|
refs/heads/master
| 2021-01-10T13:26:37.221851
| 2016-02-22T05:20:16
| 2016-02-22T05:20:16
| 51,705,207
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 859
|
r
|
getWaterSpeedRecordFromWiki.R
|
getWaterSpeedRecordFromWiki <- function() {
# Water Speed Record (Prop-driven and Jet Hydroplane)
library(rvest)
nHeaderLines <- 0
url <- "https://en.wikipedia.org/wiki/Water_speed_record"
tables <- html(url) %>% html_nodes(".wikitable") %>% html_table(fill = TRUE)
table <- tables[[1]]
date <- as.vector(table[[5]])
speedRecordMph <- as.vector(table[[1]])
year <- vector()
speedMph <- vector()
for (it in (nHeaderLines+1):length(date)) {
# Get time in fraction of year
epoch1900 <- as.POSIXlt(date[it], format = "%d %B %Y")
fractionYear <- 1900 + epoch1900$year + epoch1900$yday/365;
year <- append(year, fractionYear)
# Remove units and kilometer comment
fastestMph <- gsub("mph \\(.*\\)", "", speedRecordMph[it])
speedMph <- append(speedMph, as.numeric(fastestMph))
}
data <- data.frame(year, speedMph)
return(data)
}
|
02571e4232357a192bf719a90c46150c37d87ec8
|
ed28666d9201bf050c305f0740756f7730a66ef3
|
/NatureEE-data-archive/Run203071/JAFSdata/JAFSnumPerPatch30360.R
|
4cacb45bed7220882d8e2e6ec6aaede0209f7090
|
[] |
no_license
|
flaxmans/NatureEE2017
|
7ee3531b08d50b3022d5c23dbcf177156c599f10
|
b3183abe6bb70f34b400d0f5ec990cce45e10b33
|
refs/heads/master
| 2021-01-12T08:02:37.153781
| 2017-01-27T15:48:55
| 2017-01-27T15:48:55
| 77,110,421
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 33
|
r
|
JAFSnumPerPatch30360.R
|
numPerPatch30360 <- c(2444,2556)
|
ccb041f267a0d51a7f3e9add27ca94bb4e567a31
|
bff50b46f43920f28d23fdf01478985fb1abe085
|
/AnalisisDEF2_new_frailty.R
|
b8199d471c2ef5465fe0c55d52d20175a65ea025
|
[] |
no_license
|
dmorinya/miRecSurv
|
352970253e38ea64eee0cbd617f599458e273700
|
b65ab48f655ef56e98dc3158fcb57af80df3ddbb
|
refs/heads/main
| 2023-08-23T02:15:34.992998
| 2021-11-02T11:00:03
| 2021-11-02T11:00:03
| 420,038,521
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 168,282
|
r
|
AnalisisDEF2_new_frailty.R
|
library(survsim)
library(data.table)
library(COMPoissonReg)
library(compoisson)
library(survival)
library(doParallel)
library(MASS)
library(WriteXLS)
nCores <- detectCores()
registerDoParallel(nCores)
#setwd("/home/dmorina/Documents/Docència/Tesis/Gilma/Articles/3 Recurrent events")
#source("scripts/genResultsDEF1_new.R") # CLUSTER
source("scripts/genResultsDEF1_new_frailty.R") # FRAILTY
nsim <- 100
########## POBLACIONES SJWEH #########
# Respiratorio:
d.ev4 <- c('lnorm','llogistic','weibull')
b0.ev4 <- c(7.195, 6.583, 6.678)
a.ev4 <- c(1.498,.924,.923)
d.cens4 <- c('weibull','weibull','weibull')
b0.cens4 <- c(7.315, 6.975, 6.712)
a.cens4 <- c(1.272,1.218,1.341)
# Musculoesquelético:
d.ev5 <- c('llogistic','weibull','lnorm')
b0.ev5 <- c(7.974, 7.109, 5.853)
a.ev5 <- c(.836,.758,1.989)
d.cens5 <- c('weibull','weibull','weibull')
b0.cens5 <- c(7.283, 6.900, 6.507)
a.cens5 <- c(1.332,1.156,1.498)
# Mental:
d.ev6 <- c('lnorm','lnorm','lnorm')
b0.ev6 <- c(8.924, 6.650, 6.696)
a.ev6 <- c(1.545,2.399,2.246)
d.cens6 <- c('weibull','weibull','weibull')
b0.cens6 <- c(7.287, 6.530, 6.212)
a.cens6 <- c(1.352,1.177,1.991)
########## SJWEH: DEPENDENCIA BAJA (CP)
results <- foreach(k=1:nsim, .combine=rbind) %dopar%
genResultsDEF1_new_frailty(k, nm=1000, bef=.1, ft=730, old=730,
d.ev=d.ev4, d.cens=d.cens4, b0.ev=b0.ev4, b0.cens=b0.cens4,
a.ev=a.ev4, a.cens=a.cens4, m=5)
WriteXLS(results, "results/SJWEH/res1111.xls")
c0 <- rbind("AG F", "ComP (CP) F", "ComP (GT) F")
c1 <- rbind(AG=mean(results$gcoefAG.x), COMPois=mean(results$gcoefCOMPois.x), COMPoisB=mean(results$gcoefCOMPoisB.x))
c2 <- rbind(AG=mean(results$gcoefAG.x1), COMPois=mean(results$gcoefCOMPois.x1), COMPoisB=mean(results$gcoefCOMPoisB.x1))
c3 <- rbind(AG=mean(results$gcoefAG.x2), COMPois=mean(results$gcoefCOMPois.x2), COMPoisB=mean(results$gcoefCOMPoisB.x2))
bias25 <- ((c1-.25)/.25)*100
bias50 <- ((c2-.5)/.5)*100
bias75 <- ((c3-.75)/.75)*100
#Para las coberturas y el LPI
results$AGx_ci_i<-results$gcoefAG.x-1.96*results$gsdAG.x
results$AGx_ci_s<-results$gcoefAG.x+1.96*results$gsdAG.x
results$ComPoisx_ci_i<-results$gcoefCOMPois.x-1.96*results$gsdCOMPois.x
results$ComPoisx_ci_s<-results$gcoefCOMPois.x+1.96*results$gsdCOMPois.x
results$ComPoisBx_ci_i<-results$gcoefCOMPoisB.x-1.96*results$gsdCOMPoisB.x
results$ComPoisBx_ci_s<-results$gcoefCOMPoisB.x+1.96*results$gsdCOMPoisB.x
results$AGx1_ci_i<-results$gcoefAG.x1-1.96*results$gsdAG.x1
results$AGx1_ci_s<-results$gcoefAG.x1+1.96*results$gsdAG.x1
results$ComPoisx1_ci_i<-results$gcoefCOMPois.x1-1.96*results$gsdCOMPois.x1
results$ComPoisx1_ci_s<-results$gcoefCOMPois.x1+1.96*results$gsdCOMPois.x1
results$ComPoisBx1_ci_i<-results$gcoefCOMPoisB.x1-1.96*results$gsdCOMPoisB.x1
results$ComPoisBx1_ci_s<-results$gcoefCOMPoisB.x1+1.96*results$gsdCOMPoisB.x1
results$AGx2_ci_i<-results$gcoefAG.x2-1.96*results$gsdAG.x2
results$AGx2_ci_s<-results$gcoefAG.x2+1.96*results$gsdAG.x2
results$ComPoisx2_ci_i<-results$gcoefCOMPois.x2-1.96*results$gsdCOMPois.x2
results$ComPoisx2_ci_s<-results$gcoefCOMPois.x2+1.96*results$gsdCOMPois.x2
results$ComPoisBx2_ci_i<-results$gcoefCOMPoisB.x2-1.96*results$gsdCOMPoisB.x2
results$ComPoisBx2_ci_s<-results$gcoefCOMPoisB.x2+1.96*results$gsdCOMPoisB.x2
#LPI individuales
results$LPIxAG<-results$AGx_ci_s-results$AGx_ci_i
results$LPIx1AG<-results$AGx1_ci_s-results$AGx1_ci_i
results$LPIx2AG<-results$AGx2_ci_s-results$AGx2_ci_i
results$LPIxCOMPois<-results$ComPoisx_ci_s-results$ComPoisx_ci_i
results$LPIx1COMPois<-results$ComPoisx1_ci_s-results$ComPoisx1_ci_i
results$LPIx2COMPois<-results$ComPoisx2_ci_s-results$ComPoisx2_ci_i
results$LPIxCOMPoisB<-results$ComPoisBx_ci_s-results$ComPoisBx_ci_i
results$LPIx1COMPoisB<-results$ComPoisBx1_ci_s-results$ComPoisBx1_ci_i
results$LPIx2COMPoisB<-results$ComPoisBx2_ci_s-results$ComPoisBx2_ci_i
LPIx <- rbind(AG=mean(results$LPIxAG), COMPois=mean(results$LPIxCOMPois), COMPoisB=mean(results$LPIxCOMPoisB))
LPIx1 <-rbind(AG=mean(results$LPIx1AG), COMPois=mean(results$LPIx1COMPois), COMPoisB=mean(results$LPIx1COMPoisB))
LPIx2 <-rbind(AG=mean(results$LPIx2AG), COMPois=mean(results$LPIx2COMPois), COMPoisB=mean(results$LPIx2COMPoisB))
#coberturas
results$AGx_cov<-ifelse(results$AGx_ci_i<=0.25 & 0.25<=results$AGx_ci_s,1 ,0)
results$AGx1_cov<-ifelse(results$AGx1_ci_i<=0.5 & 0.5<=results$AGx1_ci_s,1 ,0)
results$AGx2_cov<-ifelse(results$AGx2_ci_i<=0.75 & 0.75<=results$AGx2_ci_s,1 ,0)
results$ComPoisx_cov<-ifelse(results$ComPoisx_ci_i<=0.25 & 0.25<=results$ComPoisx_ci_s,1 ,0)
results$ComPoisx1_cov<-ifelse(results$ComPoisx1_ci_i<=0.5 & 0.5<=results$ComPoisx1_ci_s,1 ,0)
results$ComPoisx2_cov<-ifelse(results$ComPoisx2_ci_i<=0.75 & 0.75<=results$ComPoisx2_ci_s,1 ,0)
results$ComPoisBx_cov<-ifelse(results$ComPoisBx_ci_i<=0.25 & 0.25<=results$ComPoisBx_ci_s,1 ,0)
results$ComPoisBx1_cov<-ifelse(results$ComPoisBx1_ci_i<=0.5 & 0.5<=results$ComPoisBx1_ci_s,1 ,0)
results$ComPoisBx2_cov<-ifelse(results$ComPoisBx2_ci_i<=0.75 & 0.75<=results$ComPoisBx2_ci_s,1 ,0)
cov.x <-rbind(AG=sum(results$AGx_cov)/nrow(results), COMPois=sum(results$ComPoisx_cov)/nrow(results), COMPoisB=sum(results$ComPoisBx_cov)/nrow(results))
cov.x1 <-rbind(AG=sum(results$AGx1_cov)/nrow(results), COMPois=sum(results$ComPoisx1_cov)/nrow(results), COMPoisB=sum(results$ComPoisBx1_cov)/nrow(results))
cov.x2 <- rbind(AG=sum(results$AGx2_cov)/nrow(results), COMPois=sum(results$ComPoisx2_cov)/nrow(results), COMPoisB=sum(results$ComPoisBx2_cov)/nrow(results))
mean.bias <- (abs(bias25)+abs(bias50)+abs(bias75)) / 3
mean.LPI <- (LPIx+LPIx1+LPIx2) / 3
mean.cob <- (cov.x+cov.x1+cov.x2)*100 / 3
res1111.ag <- data.frame(c0,c1,c2,c3,bias25,bias50,bias75, LPIx, LPIx1, LPIx2, cov.x, cov.x1, cov.x2, mean.bias, mean.LPI, mean.cob)
WriteXLS(res1111.ag, "results/SJWEH/results1111.xls")
results <- foreach(k=1:nsim, .combine=rbind) %dopar%
genResultsDEF1_new_frailty(k, nm=1000, bef=.3, ft=730, old=730,
d.ev=d.ev4, d.cens=d.cens4, b0.ev=b0.ev4, b0.cens=b0.cens4,
a.ev=a.ev4, a.cens=a.cens4, m=5)
WriteXLS(results, "results/SJWEH/res1112.xls")
c0 <- rbind("AG F", "ComP (CP) F", "ComP (GT) F")
c1 <- rbind(AG=mean(results$gcoefAG.x), COMPois=mean(results$gcoefCOMPois.x), COMPoisB=mean(results$gcoefCOMPoisB.x))
c2 <- rbind(AG=mean(results$gcoefAG.x1), COMPois=mean(results$gcoefCOMPois.x1), COMPoisB=mean(results$gcoefCOMPoisB.x1))
c3 <- rbind(AG=mean(results$gcoefAG.x2), COMPois=mean(results$gcoefCOMPois.x2), COMPoisB=mean(results$gcoefCOMPoisB.x2))
bias25 <- ((c1-.25)/.25)*100
bias50 <- ((c2-.5)/.5)*100
bias75 <- ((c3-.75)/.75)*100
#Para las coberturas y el LPI
results$AGx_ci_i<-results$gcoefAG.x-1.96*results$gsdAG.x
results$AGx_ci_s<-results$gcoefAG.x+1.96*results$gsdAG.x
results$ComPoisx_ci_i<-results$gcoefCOMPois.x-1.96*results$gsdCOMPois.x
results$ComPoisx_ci_s<-results$gcoefCOMPois.x+1.96*results$gsdCOMPois.x
results$ComPoisBx_ci_i<-results$gcoefCOMPoisB.x-1.96*results$gsdCOMPoisB.x
results$ComPoisBx_ci_s<-results$gcoefCOMPoisB.x+1.96*results$gsdCOMPoisB.x
results$AGx1_ci_i<-results$gcoefAG.x1-1.96*results$gsdAG.x1
results$AGx1_ci_s<-results$gcoefAG.x1+1.96*results$gsdAG.x1
results$ComPoisx1_ci_i<-results$gcoefCOMPois.x1-1.96*results$gsdCOMPois.x1
results$ComPoisx1_ci_s<-results$gcoefCOMPois.x1+1.96*results$gsdCOMPois.x1
results$ComPoisBx1_ci_i<-results$gcoefCOMPoisB.x1-1.96*results$gsdCOMPoisB.x1
results$ComPoisBx1_ci_s<-results$gcoefCOMPoisB.x1+1.96*results$gsdCOMPoisB.x1
results$AGx2_ci_i<-results$gcoefAG.x2-1.96*results$gsdAG.x2
results$AGx2_ci_s<-results$gcoefAG.x2+1.96*results$gsdAG.x2
results$ComPoisx2_ci_i<-results$gcoefCOMPois.x2-1.96*results$gsdCOMPois.x2
results$ComPoisx2_ci_s<-results$gcoefCOMPois.x2+1.96*results$gsdCOMPois.x2
results$ComPoisBx2_ci_i<-results$gcoefCOMPoisB.x2-1.96*results$gsdCOMPoisB.x2
results$ComPoisBx2_ci_s<-results$gcoefCOMPoisB.x2+1.96*results$gsdCOMPoisB.x2
#LPI individuales
results$LPIxAG<-results$AGx_ci_s-results$AGx_ci_i
results$LPIx1AG<-results$AGx1_ci_s-results$AGx1_ci_i
results$LPIx2AG<-results$AGx2_ci_s-results$AGx2_ci_i
results$LPIxCOMPois<-results$ComPoisx_ci_s-results$ComPoisx_ci_i
results$LPIx1COMPois<-results$ComPoisx1_ci_s-results$ComPoisx1_ci_i
results$LPIx2COMPois<-results$ComPoisx2_ci_s-results$ComPoisx2_ci_i
results$LPIxCOMPoisB<-results$ComPoisBx_ci_s-results$ComPoisBx_ci_i
results$LPIx1COMPoisB<-results$ComPoisBx1_ci_s-results$ComPoisBx1_ci_i
results$LPIx2COMPoisB<-results$ComPoisBx2_ci_s-results$ComPoisBx2_ci_i
LPIx <- rbind(AG=mean(results$LPIxAG), COMPois=mean(results$LPIxCOMPois), COMPoisB=mean(results$LPIxCOMPoisB))
LPIx1 <-rbind(AG=mean(results$LPIx1AG), COMPois=mean(results$LPIx1COMPois), COMPoisB=mean(results$LPIx1COMPoisB))
LPIx2 <-rbind(AG=mean(results$LPIx2AG), COMPois=mean(results$LPIx2COMPois), COMPoisB=mean(results$LPIx2COMPoisB))
#coberturas
results$AGx_cov<-ifelse(results$AGx_ci_i<=0.25 & 0.25<=results$AGx_ci_s,1 ,0)
results$AGx1_cov<-ifelse(results$AGx1_ci_i<=0.5 & 0.5<=results$AGx1_ci_s,1 ,0)
results$AGx2_cov<-ifelse(results$AGx2_ci_i<=0.75 & 0.75<=results$AGx2_ci_s,1 ,0)
results$ComPoisx_cov<-ifelse(results$ComPoisx_ci_i<=0.25 & 0.25<=results$ComPoisx_ci_s,1 ,0)
results$ComPoisx1_cov<-ifelse(results$ComPoisx1_ci_i<=0.5 & 0.5<=results$ComPoisx1_ci_s,1 ,0)
results$ComPoisx2_cov<-ifelse(results$ComPoisx2_ci_i<=0.75 & 0.75<=results$ComPoisx2_ci_s,1 ,0)
results$ComPoisBx_cov<-ifelse(results$ComPoisBx_ci_i<=0.25 & 0.25<=results$ComPoisBx_ci_s,1 ,0)
results$ComPoisBx1_cov<-ifelse(results$ComPoisBx1_ci_i<=0.5 & 0.5<=results$ComPoisBx1_ci_s,1 ,0)
results$ComPoisBx2_cov<-ifelse(results$ComPoisBx2_ci_i<=0.75 & 0.75<=results$ComPoisBx2_ci_s,1 ,0)
cov.x <-rbind(AG=sum(results$AGx_cov)/nrow(results), COMPois=sum(results$ComPoisx_cov)/nrow(results), COMPoisB=sum(results$ComPoisBx_cov)/nrow(results))
cov.x1 <-rbind(AG=sum(results$AGx1_cov)/nrow(results), COMPois=sum(results$ComPoisx1_cov)/nrow(results), COMPoisB=sum(results$ComPoisBx1_cov)/nrow(results))
cov.x2 <- rbind(AG=sum(results$AGx2_cov)/nrow(results), COMPois=sum(results$ComPoisx2_cov)/nrow(results), COMPoisB=sum(results$ComPoisBx2_cov)/nrow(results))
mean.bias <- (abs(bias25)+abs(bias50)+abs(bias75)) / 3
mean.LPI <- (LPIx+LPIx1+LPIx2) / 3
mean.cob <- (cov.x+cov.x1+cov.x2)*100 / 3
res1112.ag <- data.frame(c0,c1,c2,c3,bias25,bias50,bias75, LPIx, LPIx1, LPIx2, cov.x, cov.x1, cov.x2, mean.bias, mean.LPI, mean.cob)
WriteXLS(res1112.ag, "results/SJWEH/results1112.xls")
results <- foreach(k=1:nsim, .combine=rbind) %dopar%
genResultsDEF1_new_frailty(k, nm=1000, bef=.5, ft=730, old=730,
d.ev=d.ev4, d.cens=d.cens4, b0.ev=b0.ev4, b0.cens=b0.cens4,
a.ev=a.ev4, a.cens=a.cens4, m=5)
WriteXLS(results, "results/SJWEH/res1113.xls")
c0 <- rbind("AG F", "ComP (CP) F", "ComP (GT) F")
c1 <- rbind(AG=mean(results$gcoefAG.x), COMPois=mean(results$gcoefCOMPois.x), COMPoisB=mean(results$gcoefCOMPoisB.x))
c2 <- rbind(AG=mean(results$gcoefAG.x1), COMPois=mean(results$gcoefCOMPois.x1), COMPoisB=mean(results$gcoefCOMPoisB.x1))
c3 <- rbind(AG=mean(results$gcoefAG.x2), COMPois=mean(results$gcoefCOMPois.x2), COMPoisB=mean(results$gcoefCOMPoisB.x2))
bias25 <- ((c1-.25)/.25)*100
bias50 <- ((c2-.5)/.5)*100
bias75 <- ((c3-.75)/.75)*100
#Para las coberturas y el LPI
results$AGx_ci_i<-results$gcoefAG.x-1.96*results$gsdAG.x
results$AGx_ci_s<-results$gcoefAG.x+1.96*results$gsdAG.x
results$ComPoisx_ci_i<-results$gcoefCOMPois.x-1.96*results$gsdCOMPois.x
results$ComPoisx_ci_s<-results$gcoefCOMPois.x+1.96*results$gsdCOMPois.x
results$ComPoisBx_ci_i<-results$gcoefCOMPoisB.x-1.96*results$gsdCOMPoisB.x
results$ComPoisBx_ci_s<-results$gcoefCOMPoisB.x+1.96*results$gsdCOMPoisB.x
results$AGx1_ci_i<-results$gcoefAG.x1-1.96*results$gsdAG.x1
results$AGx1_ci_s<-results$gcoefAG.x1+1.96*results$gsdAG.x1
results$ComPoisx1_ci_i<-results$gcoefCOMPois.x1-1.96*results$gsdCOMPois.x1
results$ComPoisx1_ci_s<-results$gcoefCOMPois.x1+1.96*results$gsdCOMPois.x1
results$ComPoisBx1_ci_i<-results$gcoefCOMPoisB.x1-1.96*results$gsdCOMPoisB.x1
results$ComPoisBx1_ci_s<-results$gcoefCOMPoisB.x1+1.96*results$gsdCOMPoisB.x1
results$AGx2_ci_i<-results$gcoefAG.x2-1.96*results$gsdAG.x2
results$AGx2_ci_s<-results$gcoefAG.x2+1.96*results$gsdAG.x2
results$ComPoisx2_ci_i<-results$gcoefCOMPois.x2-1.96*results$gsdCOMPois.x2
results$ComPoisx2_ci_s<-results$gcoefCOMPois.x2+1.96*results$gsdCOMPois.x2
results$ComPoisBx2_ci_i<-results$gcoefCOMPoisB.x2-1.96*results$gsdCOMPoisB.x2
results$ComPoisBx2_ci_s<-results$gcoefCOMPoisB.x2+1.96*results$gsdCOMPoisB.x2
#LPI individuales
results$LPIxAG<-results$AGx_ci_s-results$AGx_ci_i
results$LPIx1AG<-results$AGx1_ci_s-results$AGx1_ci_i
results$LPIx2AG<-results$AGx2_ci_s-results$AGx2_ci_i
results$LPIxCOMPois<-results$ComPoisx_ci_s-results$ComPoisx_ci_i
results$LPIx1COMPois<-results$ComPoisx1_ci_s-results$ComPoisx1_ci_i
results$LPIx2COMPois<-results$ComPoisx2_ci_s-results$ComPoisx2_ci_i
results$LPIxCOMPoisB<-results$ComPoisBx_ci_s-results$ComPoisBx_ci_i
results$LPIx1COMPoisB<-results$ComPoisBx1_ci_s-results$ComPoisBx1_ci_i
results$LPIx2COMPoisB<-results$ComPoisBx2_ci_s-results$ComPoisBx2_ci_i
LPIx <- rbind(AG=mean(results$LPIxAG), COMPois=mean(results$LPIxCOMPois), COMPoisB=mean(results$LPIxCOMPoisB))
LPIx1 <-rbind(AG=mean(results$LPIx1AG), COMPois=mean(results$LPIx1COMPois), COMPoisB=mean(results$LPIx1COMPoisB))
LPIx2 <-rbind(AG=mean(results$LPIx2AG), COMPois=mean(results$LPIx2COMPois), COMPoisB=mean(results$LPIx2COMPoisB))
#coberturas
results$AGx_cov<-ifelse(results$AGx_ci_i<=0.25 & 0.25<=results$AGx_ci_s,1 ,0)
results$AGx1_cov<-ifelse(results$AGx1_ci_i<=0.5 & 0.5<=results$AGx1_ci_s,1 ,0)
results$AGx2_cov<-ifelse(results$AGx2_ci_i<=0.75 & 0.75<=results$AGx2_ci_s,1 ,0)
results$ComPoisx_cov<-ifelse(results$ComPoisx_ci_i<=0.25 & 0.25<=results$ComPoisx_ci_s,1 ,0)
results$ComPoisx1_cov<-ifelse(results$ComPoisx1_ci_i<=0.5 & 0.5<=results$ComPoisx1_ci_s,1 ,0)
results$ComPoisx2_cov<-ifelse(results$ComPoisx2_ci_i<=0.75 & 0.75<=results$ComPoisx2_ci_s,1 ,0)
results$ComPoisBx_cov<-ifelse(results$ComPoisBx_ci_i<=0.25 & 0.25<=results$ComPoisBx_ci_s,1 ,0)
results$ComPoisBx1_cov<-ifelse(results$ComPoisBx1_ci_i<=0.5 & 0.5<=results$ComPoisBx1_ci_s,1 ,0)
results$ComPoisBx2_cov<-ifelse(results$ComPoisBx2_ci_i<=0.75 & 0.75<=results$ComPoisBx2_ci_s,1 ,0)
cov.x <-rbind(AG=sum(results$AGx_cov)/nrow(results), COMPois=sum(results$ComPoisx_cov)/nrow(results), COMPoisB=sum(results$ComPoisBx_cov)/nrow(results))
cov.x1 <-rbind(AG=sum(results$AGx1_cov)/nrow(results), COMPois=sum(results$ComPoisx1_cov)/nrow(results), COMPoisB=sum(results$ComPoisBx1_cov)/nrow(results))
cov.x2 <- rbind(AG=sum(results$AGx2_cov)/nrow(results), COMPois=sum(results$ComPoisx2_cov)/nrow(results), COMPoisB=sum(results$ComPoisBx2_cov)/nrow(results))
mean.bias <- (abs(bias25)+abs(bias50)+abs(bias75)) / 3
mean.LPI <- (LPIx+LPIx1+LPIx2) / 3
mean.cob <- (cov.x+cov.x1+cov.x2)*100 / 3
res1113.ag <- data.frame(c0,c1,c2,c3,bias25,bias50,bias75, LPIx, LPIx1, LPIx2, cov.x, cov.x1, cov.x2, mean.bias, mean.LPI, mean.cob)
WriteXLS(res1113.ag, "results/SJWEH/results1113.xls")
results <- foreach(k=1:nsim, .combine=rbind) %dopar%
genResultsDEF1_new_frailty(k, nm=1000, bef=.1, ft=1825, old=730,
d.ev=d.ev4, d.cens=d.cens4, b0.ev=b0.ev4, b0.cens=b0.cens4,
a.ev=a.ev4, a.cens=a.cens4, m=5)
WriteXLS(results, "results/SJWEH/res1121.xls")
c0 <- rbind("AG F", "ComP (CP) F", "ComP (GT) F")
c1 <- rbind(AG=mean(results$gcoefAG.x), COMPois=mean(results$gcoefCOMPois.x), COMPoisB=mean(results$gcoefCOMPoisB.x))
c2 <- rbind(AG=mean(results$gcoefAG.x1), COMPois=mean(results$gcoefCOMPois.x1), COMPoisB=mean(results$gcoefCOMPoisB.x1))
c3 <- rbind(AG=mean(results$gcoefAG.x2), COMPois=mean(results$gcoefCOMPois.x2), COMPoisB=mean(results$gcoefCOMPoisB.x2))
bias25 <- ((c1-.25)/.25)*100
bias50 <- ((c2-.5)/.5)*100
bias75 <- ((c3-.75)/.75)*100
#Para las coberturas y el LPI
results$AGx_ci_i<-results$gcoefAG.x-1.96*results$gsdAG.x
results$AGx_ci_s<-results$gcoefAG.x+1.96*results$gsdAG.x
results$ComPoisx_ci_i<-results$gcoefCOMPois.x-1.96*results$gsdCOMPois.x
results$ComPoisx_ci_s<-results$gcoefCOMPois.x+1.96*results$gsdCOMPois.x
results$ComPoisBx_ci_i<-results$gcoefCOMPoisB.x-1.96*results$gsdCOMPoisB.x
results$ComPoisBx_ci_s<-results$gcoefCOMPoisB.x+1.96*results$gsdCOMPoisB.x
results$AGx1_ci_i<-results$gcoefAG.x1-1.96*results$gsdAG.x1
results$AGx1_ci_s<-results$gcoefAG.x1+1.96*results$gsdAG.x1
results$ComPoisx1_ci_i<-results$gcoefCOMPois.x1-1.96*results$gsdCOMPois.x1
results$ComPoisx1_ci_s<-results$gcoefCOMPois.x1+1.96*results$gsdCOMPois.x1
results$ComPoisBx1_ci_i<-results$gcoefCOMPoisB.x1-1.96*results$gsdCOMPoisB.x1
results$ComPoisBx1_ci_s<-results$gcoefCOMPoisB.x1+1.96*results$gsdCOMPoisB.x1
results$AGx2_ci_i<-results$gcoefAG.x2-1.96*results$gsdAG.x2
results$AGx2_ci_s<-results$gcoefAG.x2+1.96*results$gsdAG.x2
results$ComPoisx2_ci_i<-results$gcoefCOMPois.x2-1.96*results$gsdCOMPois.x2
results$ComPoisx2_ci_s<-results$gcoefCOMPois.x2+1.96*results$gsdCOMPois.x2
results$ComPoisBx2_ci_i<-results$gcoefCOMPoisB.x2-1.96*results$gsdCOMPoisB.x2
results$ComPoisBx2_ci_s<-results$gcoefCOMPoisB.x2+1.96*results$gsdCOMPoisB.x2
#LPI individuales
results$LPIxAG<-results$AGx_ci_s-results$AGx_ci_i
results$LPIx1AG<-results$AGx1_ci_s-results$AGx1_ci_i
results$LPIx2AG<-results$AGx2_ci_s-results$AGx2_ci_i
results$LPIxCOMPois<-results$ComPoisx_ci_s-results$ComPoisx_ci_i
results$LPIx1COMPois<-results$ComPoisx1_ci_s-results$ComPoisx1_ci_i
results$LPIx2COMPois<-results$ComPoisx2_ci_s-results$ComPoisx2_ci_i
results$LPIxCOMPoisB<-results$ComPoisBx_ci_s-results$ComPoisBx_ci_i
results$LPIx1COMPoisB<-results$ComPoisBx1_ci_s-results$ComPoisBx1_ci_i
results$LPIx2COMPoisB<-results$ComPoisBx2_ci_s-results$ComPoisBx2_ci_i
LPIx <- rbind(AG=mean(results$LPIxAG), COMPois=mean(results$LPIxCOMPois), COMPoisB=mean(results$LPIxCOMPoisB))
LPIx1 <-rbind(AG=mean(results$LPIx1AG), COMPois=mean(results$LPIx1COMPois), COMPoisB=mean(results$LPIx1COMPoisB))
LPIx2 <-rbind(AG=mean(results$LPIx2AG), COMPois=mean(results$LPIx2COMPois), COMPoisB=mean(results$LPIx2COMPoisB))
#coberturas
results$AGx_cov<-ifelse(results$AGx_ci_i<=0.25 & 0.25<=results$AGx_ci_s,1 ,0)
results$AGx1_cov<-ifelse(results$AGx1_ci_i<=0.5 & 0.5<=results$AGx1_ci_s,1 ,0)
results$AGx2_cov<-ifelse(results$AGx2_ci_i<=0.75 & 0.75<=results$AGx2_ci_s,1 ,0)
results$ComPoisx_cov<-ifelse(results$ComPoisx_ci_i<=0.25 & 0.25<=results$ComPoisx_ci_s,1 ,0)
results$ComPoisx1_cov<-ifelse(results$ComPoisx1_ci_i<=0.5 & 0.5<=results$ComPoisx1_ci_s,1 ,0)
results$ComPoisx2_cov<-ifelse(results$ComPoisx2_ci_i<=0.75 & 0.75<=results$ComPoisx2_ci_s,1 ,0)
results$ComPoisBx_cov<-ifelse(results$ComPoisBx_ci_i<=0.25 & 0.25<=results$ComPoisBx_ci_s,1 ,0)
results$ComPoisBx1_cov<-ifelse(results$ComPoisBx1_ci_i<=0.5 & 0.5<=results$ComPoisBx1_ci_s,1 ,0)
results$ComPoisBx2_cov<-ifelse(results$ComPoisBx2_ci_i<=0.75 & 0.75<=results$ComPoisBx2_ci_s,1 ,0)
cov.x <-rbind(AG=sum(results$AGx_cov)/nrow(results), COMPois=sum(results$ComPoisx_cov)/nrow(results), COMPoisB=sum(results$ComPoisBx_cov)/nrow(results))
cov.x1 <-rbind(AG=sum(results$AGx1_cov)/nrow(results), COMPois=sum(results$ComPoisx1_cov)/nrow(results), COMPoisB=sum(results$ComPoisBx1_cov)/nrow(results))
cov.x2 <- rbind(AG=sum(results$AGx2_cov)/nrow(results), COMPois=sum(results$ComPoisx2_cov)/nrow(results), COMPoisB=sum(results$ComPoisBx2_cov)/nrow(results))
mean.bias <- (abs(bias25)+abs(bias50)+abs(bias75)) / 3
mean.LPI <- (LPIx+LPIx1+LPIx2) / 3
mean.cob <- (cov.x+cov.x1+cov.x2)*100 / 3
res1121.ag <- data.frame(c0,c1,c2,c3,bias25,bias50,bias75, LPIx, LPIx1, LPIx2, cov.x, cov.x1, cov.x2, mean.bias, mean.LPI, mean.cob)
WriteXLS(res1121.ag, "results/SJWEH/results1121.xls")
results <- foreach(k=1:nsim, .combine=rbind) %dopar%
genResultsDEF1_new_frailty(k, nm=1000, bef=.3, ft=1825, old=730,
d.ev=d.ev4, d.cens=d.cens4, b0.ev=b0.ev4, b0.cens=b0.cens4,
a.ev=a.ev4, a.cens=a.cens4, m=5)
WriteXLS(results, "results/SJWEH/res1122.xls")
c0 <- rbind("AG F", "ComP (CP) F", "ComP (GT) F")
c1 <- rbind(AG=mean(results$gcoefAG.x), COMPois=mean(results$gcoefCOMPois.x), COMPoisB=mean(results$gcoefCOMPoisB.x))
c2 <- rbind(AG=mean(results$gcoefAG.x1), COMPois=mean(results$gcoefCOMPois.x1), COMPoisB=mean(results$gcoefCOMPoisB.x1))
c3 <- rbind(AG=mean(results$gcoefAG.x2), COMPois=mean(results$gcoefCOMPois.x2), COMPoisB=mean(results$gcoefCOMPoisB.x2))
bias25 <- ((c1-.25)/.25)*100
bias50 <- ((c2-.5)/.5)*100
bias75 <- ((c3-.75)/.75)*100
#Para las coberturas y el LPI
results$AGx_ci_i<-results$gcoefAG.x-1.96*results$gsdAG.x
results$AGx_ci_s<-results$gcoefAG.x+1.96*results$gsdAG.x
results$ComPoisx_ci_i<-results$gcoefCOMPois.x-1.96*results$gsdCOMPois.x
results$ComPoisx_ci_s<-results$gcoefCOMPois.x+1.96*results$gsdCOMPois.x
results$ComPoisBx_ci_i<-results$gcoefCOMPoisB.x-1.96*results$gsdCOMPoisB.x
results$ComPoisBx_ci_s<-results$gcoefCOMPoisB.x+1.96*results$gsdCOMPoisB.x
results$AGx1_ci_i<-results$gcoefAG.x1-1.96*results$gsdAG.x1
results$AGx1_ci_s<-results$gcoefAG.x1+1.96*results$gsdAG.x1
results$ComPoisx1_ci_i<-results$gcoefCOMPois.x1-1.96*results$gsdCOMPois.x1
results$ComPoisx1_ci_s<-results$gcoefCOMPois.x1+1.96*results$gsdCOMPois.x1
results$ComPoisBx1_ci_i<-results$gcoefCOMPoisB.x1-1.96*results$gsdCOMPoisB.x1
results$ComPoisBx1_ci_s<-results$gcoefCOMPoisB.x1+1.96*results$gsdCOMPoisB.x1
results$AGx2_ci_i<-results$gcoefAG.x2-1.96*results$gsdAG.x2
results$AGx2_ci_s<-results$gcoefAG.x2+1.96*results$gsdAG.x2
results$ComPoisx2_ci_i<-results$gcoefCOMPois.x2-1.96*results$gsdCOMPois.x2
results$ComPoisx2_ci_s<-results$gcoefCOMPois.x2+1.96*results$gsdCOMPois.x2
results$ComPoisBx2_ci_i<-results$gcoefCOMPoisB.x2-1.96*results$gsdCOMPoisB.x2
results$ComPoisBx2_ci_s<-results$gcoefCOMPoisB.x2+1.96*results$gsdCOMPoisB.x2
#LPI individuales
results$LPIxAG<-results$AGx_ci_s-results$AGx_ci_i
results$LPIx1AG<-results$AGx1_ci_s-results$AGx1_ci_i
results$LPIx2AG<-results$AGx2_ci_s-results$AGx2_ci_i
results$LPIxCOMPois<-results$ComPoisx_ci_s-results$ComPoisx_ci_i
results$LPIx1COMPois<-results$ComPoisx1_ci_s-results$ComPoisx1_ci_i
results$LPIx2COMPois<-results$ComPoisx2_ci_s-results$ComPoisx2_ci_i
results$LPIxCOMPoisB<-results$ComPoisBx_ci_s-results$ComPoisBx_ci_i
results$LPIx1COMPoisB<-results$ComPoisBx1_ci_s-results$ComPoisBx1_ci_i
results$LPIx2COMPoisB<-results$ComPoisBx2_ci_s-results$ComPoisBx2_ci_i
LPIx <- rbind(AG=mean(results$LPIxAG), COMPois=mean(results$LPIxCOMPois), COMPoisB=mean(results$LPIxCOMPoisB))
LPIx1 <-rbind(AG=mean(results$LPIx1AG), COMPois=mean(results$LPIx1COMPois), COMPoisB=mean(results$LPIx1COMPoisB))
LPIx2 <-rbind(AG=mean(results$LPIx2AG), COMPois=mean(results$LPIx2COMPois), COMPoisB=mean(results$LPIx2COMPoisB))
#coberturas
results$AGx_cov<-ifelse(results$AGx_ci_i<=0.25 & 0.25<=results$AGx_ci_s,1 ,0)
results$AGx1_cov<-ifelse(results$AGx1_ci_i<=0.5 & 0.5<=results$AGx1_ci_s,1 ,0)
results$AGx2_cov<-ifelse(results$AGx2_ci_i<=0.75 & 0.75<=results$AGx2_ci_s,1 ,0)
results$ComPoisx_cov<-ifelse(results$ComPoisx_ci_i<=0.25 & 0.25<=results$ComPoisx_ci_s,1 ,0)
results$ComPoisx1_cov<-ifelse(results$ComPoisx1_ci_i<=0.5 & 0.5<=results$ComPoisx1_ci_s,1 ,0)
results$ComPoisx2_cov<-ifelse(results$ComPoisx2_ci_i<=0.75 & 0.75<=results$ComPoisx2_ci_s,1 ,0)
results$ComPoisBx_cov<-ifelse(results$ComPoisBx_ci_i<=0.25 & 0.25<=results$ComPoisBx_ci_s,1 ,0)
results$ComPoisBx1_cov<-ifelse(results$ComPoisBx1_ci_i<=0.5 & 0.5<=results$ComPoisBx1_ci_s,1 ,0)
results$ComPoisBx2_cov<-ifelse(results$ComPoisBx2_ci_i<=0.75 & 0.75<=results$ComPoisBx2_ci_s,1 ,0)
cov.x <-rbind(AG=sum(results$AGx_cov)/nrow(results), COMPois=sum(results$ComPoisx_cov)/nrow(results), COMPoisB=sum(results$ComPoisBx_cov)/nrow(results))
cov.x1 <-rbind(AG=sum(results$AGx1_cov)/nrow(results), COMPois=sum(results$ComPoisx1_cov)/nrow(results), COMPoisB=sum(results$ComPoisBx1_cov)/nrow(results))
cov.x2 <- rbind(AG=sum(results$AGx2_cov)/nrow(results), COMPois=sum(results$ComPoisx2_cov)/nrow(results), COMPoisB=sum(results$ComPoisBx2_cov)/nrow(results))
mean.bias <- (abs(bias25)+abs(bias50)+abs(bias75)) / 3
mean.LPI <- (LPIx+LPIx1+LPIx2) / 3
mean.cob <- (cov.x+cov.x1+cov.x2)*100 / 3
res1122.ag <- data.frame(c0,c1,c2,c3,bias25,bias50,bias75, LPIx, LPIx1, LPIx2, cov.x, cov.x1, cov.x2, mean.bias, mean.LPI, mean.cob)
WriteXLS(res1122.ag, "results/SJWEH/results1122.xls")
results <- foreach(k=1:nsim, .combine=rbind) %dopar%
genResultsDEF1_new_frailty(k, nm=1000, bef=.5, ft=1825, old=730,
d.ev=d.ev4, d.cens=d.cens4, b0.ev=b0.ev4, b0.cens=b0.cens4,
a.ev=a.ev4, a.cens=a.cens4, m=5)
WriteXLS(results, "results/SJWEH/res1123.xls")
c0 <- rbind("AG F", "ComP (CP) F", "ComP (GT) F")
c1 <- rbind(AG=mean(results$gcoefAG.x), COMPois=mean(results$gcoefCOMPois.x), COMPoisB=mean(results$gcoefCOMPoisB.x))
c2 <- rbind(AG=mean(results$gcoefAG.x1), COMPois=mean(results$gcoefCOMPois.x1), COMPoisB=mean(results$gcoefCOMPoisB.x1))
c3 <- rbind(AG=mean(results$gcoefAG.x2), COMPois=mean(results$gcoefCOMPois.x2), COMPoisB=mean(results$gcoefCOMPoisB.x2))
bias25 <- ((c1-.25)/.25)*100
bias50 <- ((c2-.5)/.5)*100
bias75 <- ((c3-.75)/.75)*100
#Para las coberturas y el LPI
results$AGx_ci_i<-results$gcoefAG.x-1.96*results$gsdAG.x
results$AGx_ci_s<-results$gcoefAG.x+1.96*results$gsdAG.x
results$ComPoisx_ci_i<-results$gcoefCOMPois.x-1.96*results$gsdCOMPois.x
results$ComPoisx_ci_s<-results$gcoefCOMPois.x+1.96*results$gsdCOMPois.x
results$ComPoisBx_ci_i<-results$gcoefCOMPoisB.x-1.96*results$gsdCOMPoisB.x
results$ComPoisBx_ci_s<-results$gcoefCOMPoisB.x+1.96*results$gsdCOMPoisB.x
results$AGx1_ci_i<-results$gcoefAG.x1-1.96*results$gsdAG.x1
results$AGx1_ci_s<-results$gcoefAG.x1+1.96*results$gsdAG.x1
results$ComPoisx1_ci_i<-results$gcoefCOMPois.x1-1.96*results$gsdCOMPois.x1
results$ComPoisx1_ci_s<-results$gcoefCOMPois.x1+1.96*results$gsdCOMPois.x1
results$ComPoisBx1_ci_i<-results$gcoefCOMPoisB.x1-1.96*results$gsdCOMPoisB.x1
results$ComPoisBx1_ci_s<-results$gcoefCOMPoisB.x1+1.96*results$gsdCOMPoisB.x1
results$AGx2_ci_i<-results$gcoefAG.x2-1.96*results$gsdAG.x2
results$AGx2_ci_s<-results$gcoefAG.x2+1.96*results$gsdAG.x2
results$ComPoisx2_ci_i<-results$gcoefCOMPois.x2-1.96*results$gsdCOMPois.x2
results$ComPoisx2_ci_s<-results$gcoefCOMPois.x2+1.96*results$gsdCOMPois.x2
results$ComPoisBx2_ci_i<-results$gcoefCOMPoisB.x2-1.96*results$gsdCOMPoisB.x2
results$ComPoisBx2_ci_s<-results$gcoefCOMPoisB.x2+1.96*results$gsdCOMPoisB.x2
#LPI individuales
results$LPIxAG<-results$AGx_ci_s-results$AGx_ci_i
results$LPIx1AG<-results$AGx1_ci_s-results$AGx1_ci_i
results$LPIx2AG<-results$AGx2_ci_s-results$AGx2_ci_i
results$LPIxCOMPois<-results$ComPoisx_ci_s-results$ComPoisx_ci_i
results$LPIx1COMPois<-results$ComPoisx1_ci_s-results$ComPoisx1_ci_i
results$LPIx2COMPois<-results$ComPoisx2_ci_s-results$ComPoisx2_ci_i
results$LPIxCOMPoisB<-results$ComPoisBx_ci_s-results$ComPoisBx_ci_i
results$LPIx1COMPoisB<-results$ComPoisBx1_ci_s-results$ComPoisBx1_ci_i
results$LPIx2COMPoisB<-results$ComPoisBx2_ci_s-results$ComPoisBx2_ci_i
LPIx <- rbind(AG=mean(results$LPIxAG), COMPois=mean(results$LPIxCOMPois), COMPoisB=mean(results$LPIxCOMPoisB))
LPIx1 <-rbind(AG=mean(results$LPIx1AG), COMPois=mean(results$LPIx1COMPois), COMPoisB=mean(results$LPIx1COMPoisB))
LPIx2 <-rbind(AG=mean(results$LPIx2AG), COMPois=mean(results$LPIx2COMPois), COMPoisB=mean(results$LPIx2COMPoisB))
#coberturas
results$AGx_cov<-ifelse(results$AGx_ci_i<=0.25 & 0.25<=results$AGx_ci_s,1 ,0)
results$AGx1_cov<-ifelse(results$AGx1_ci_i<=0.5 & 0.5<=results$AGx1_ci_s,1 ,0)
results$AGx2_cov<-ifelse(results$AGx2_ci_i<=0.75 & 0.75<=results$AGx2_ci_s,1 ,0)
results$ComPoisx_cov<-ifelse(results$ComPoisx_ci_i<=0.25 & 0.25<=results$ComPoisx_ci_s,1 ,0)
results$ComPoisx1_cov<-ifelse(results$ComPoisx1_ci_i<=0.5 & 0.5<=results$ComPoisx1_ci_s,1 ,0)
results$ComPoisx2_cov<-ifelse(results$ComPoisx2_ci_i<=0.75 & 0.75<=results$ComPoisx2_ci_s,1 ,0)
results$ComPoisBx_cov<-ifelse(results$ComPoisBx_ci_i<=0.25 & 0.25<=results$ComPoisBx_ci_s,1 ,0)
results$ComPoisBx1_cov<-ifelse(results$ComPoisBx1_ci_i<=0.5 & 0.5<=results$ComPoisBx1_ci_s,1 ,0)
results$ComPoisBx2_cov<-ifelse(results$ComPoisBx2_ci_i<=0.75 & 0.75<=results$ComPoisBx2_ci_s,1 ,0)
cov.x <-rbind(AG=sum(results$AGx_cov)/nrow(results), COMPois=sum(results$ComPoisx_cov)/nrow(results), COMPoisB=sum(results$ComPoisBx_cov)/nrow(results))
cov.x1 <-rbind(AG=sum(results$AGx1_cov)/nrow(results), COMPois=sum(results$ComPoisx1_cov)/nrow(results), COMPoisB=sum(results$ComPoisBx1_cov)/nrow(results))
cov.x2 <- rbind(AG=sum(results$AGx2_cov)/nrow(results), COMPois=sum(results$ComPoisx2_cov)/nrow(results), COMPoisB=sum(results$ComPoisBx2_cov)/nrow(results))
mean.bias <- (abs(bias25)+abs(bias50)+abs(bias75)) / 3
mean.LPI <- (LPIx+LPIx1+LPIx2) / 3
mean.cob <- (cov.x+cov.x1+cov.x2)*100 / 3
res1123.ag <- data.frame(c0,c1,c2,c3,bias25,bias50,bias75, LPIx, LPIx1, LPIx2, cov.x, cov.x1, cov.x2, mean.bias, mean.LPI, mean.cob)
WriteXLS(res1123.ag, "results/SJWEH/results1123.xls")
results <- foreach(k=1:nsim, .combine=rbind) %dopar%
genResultsDEF1_new_frailty(k, nm=1000, bef=.1, ft=730, old=3650,
d.ev=d.ev4, d.cens=d.cens4, b0.ev=b0.ev4, b0.cens=b0.cens4,
a.ev=a.ev4, a.cens=a.cens4, m=5)
WriteXLS(results, "results/SJWEH/res1211.xls")
c0 <- rbind("AG F", "ComP (CP) F", "ComP (GT) F")
c1 <- rbind(AG=mean(results$gcoefAG.x), COMPois=mean(results$gcoefCOMPois.x), COMPoisB=mean(results$gcoefCOMPoisB.x))
c2 <- rbind(AG=mean(results$gcoefAG.x1), COMPois=mean(results$gcoefCOMPois.x1), COMPoisB=mean(results$gcoefCOMPoisB.x1))
c3 <- rbind(AG=mean(results$gcoefAG.x2), COMPois=mean(results$gcoefCOMPois.x2), COMPoisB=mean(results$gcoefCOMPoisB.x2))
bias25 <- ((c1-.25)/.25)*100
bias50 <- ((c2-.5)/.5)*100
bias75 <- ((c3-.75)/.75)*100
#Para las coberturas y el LPI
results$AGx_ci_i<-results$gcoefAG.x-1.96*results$gsdAG.x
results$AGx_ci_s<-results$gcoefAG.x+1.96*results$gsdAG.x
results$ComPoisx_ci_i<-results$gcoefCOMPois.x-1.96*results$gsdCOMPois.x
results$ComPoisx_ci_s<-results$gcoefCOMPois.x+1.96*results$gsdCOMPois.x
results$ComPoisBx_ci_i<-results$gcoefCOMPoisB.x-1.96*results$gsdCOMPoisB.x
results$ComPoisBx_ci_s<-results$gcoefCOMPoisB.x+1.96*results$gsdCOMPoisB.x
results$AGx1_ci_i<-results$gcoefAG.x1-1.96*results$gsdAG.x1
results$AGx1_ci_s<-results$gcoefAG.x1+1.96*results$gsdAG.x1
results$ComPoisx1_ci_i<-results$gcoefCOMPois.x1-1.96*results$gsdCOMPois.x1
results$ComPoisx1_ci_s<-results$gcoefCOMPois.x1+1.96*results$gsdCOMPois.x1
results$ComPoisBx1_ci_i<-results$gcoefCOMPoisB.x1-1.96*results$gsdCOMPoisB.x1
results$ComPoisBx1_ci_s<-results$gcoefCOMPoisB.x1+1.96*results$gsdCOMPoisB.x1
results$AGx2_ci_i<-results$gcoefAG.x2-1.96*results$gsdAG.x2
results$AGx2_ci_s<-results$gcoefAG.x2+1.96*results$gsdAG.x2
results$ComPoisx2_ci_i<-results$gcoefCOMPois.x2-1.96*results$gsdCOMPois.x2
results$ComPoisx2_ci_s<-results$gcoefCOMPois.x2+1.96*results$gsdCOMPois.x2
results$ComPoisBx2_ci_i<-results$gcoefCOMPoisB.x2-1.96*results$gsdCOMPoisB.x2
results$ComPoisBx2_ci_s<-results$gcoefCOMPoisB.x2+1.96*results$gsdCOMPoisB.x2
#LPI individuales
results$LPIxAG<-results$AGx_ci_s-results$AGx_ci_i
results$LPIx1AG<-results$AGx1_ci_s-results$AGx1_ci_i
results$LPIx2AG<-results$AGx2_ci_s-results$AGx2_ci_i
results$LPIxCOMPois<-results$ComPoisx_ci_s-results$ComPoisx_ci_i
results$LPIx1COMPois<-results$ComPoisx1_ci_s-results$ComPoisx1_ci_i
results$LPIx2COMPois<-results$ComPoisx2_ci_s-results$ComPoisx2_ci_i
results$LPIxCOMPoisB<-results$ComPoisBx_ci_s-results$ComPoisBx_ci_i
results$LPIx1COMPoisB<-results$ComPoisBx1_ci_s-results$ComPoisBx1_ci_i
results$LPIx2COMPoisB<-results$ComPoisBx2_ci_s-results$ComPoisBx2_ci_i
LPIx <- rbind(AG=mean(results$LPIxAG), COMPois=mean(results$LPIxCOMPois), COMPoisB=mean(results$LPIxCOMPoisB))
LPIx1 <-rbind(AG=mean(results$LPIx1AG), COMPois=mean(results$LPIx1COMPois), COMPoisB=mean(results$LPIx1COMPoisB))
LPIx2 <-rbind(AG=mean(results$LPIx2AG), COMPois=mean(results$LPIx2COMPois), COMPoisB=mean(results$LPIx2COMPoisB))
#coberturas
results$AGx_cov<-ifelse(results$AGx_ci_i<=0.25 & 0.25<=results$AGx_ci_s,1 ,0)
results$AGx1_cov<-ifelse(results$AGx1_ci_i<=0.5 & 0.5<=results$AGx1_ci_s,1 ,0)
results$AGx2_cov<-ifelse(results$AGx2_ci_i<=0.75 & 0.75<=results$AGx2_ci_s,1 ,0)
results$ComPoisx_cov<-ifelse(results$ComPoisx_ci_i<=0.25 & 0.25<=results$ComPoisx_ci_s,1 ,0)
results$ComPoisx1_cov<-ifelse(results$ComPoisx1_ci_i<=0.5 & 0.5<=results$ComPoisx1_ci_s,1 ,0)
results$ComPoisx2_cov<-ifelse(results$ComPoisx2_ci_i<=0.75 & 0.75<=results$ComPoisx2_ci_s,1 ,0)
results$ComPoisBx_cov<-ifelse(results$ComPoisBx_ci_i<=0.25 & 0.25<=results$ComPoisBx_ci_s,1 ,0)
results$ComPoisBx1_cov<-ifelse(results$ComPoisBx1_ci_i<=0.5 & 0.5<=results$ComPoisBx1_ci_s,1 ,0)
results$ComPoisBx2_cov<-ifelse(results$ComPoisBx2_ci_i<=0.75 & 0.75<=results$ComPoisBx2_ci_s,1 ,0)
cov.x <-rbind(AG=sum(results$AGx_cov)/nrow(results), COMPois=sum(results$ComPoisx_cov)/nrow(results), COMPoisB=sum(results$ComPoisBx_cov)/nrow(results))
cov.x1 <-rbind(AG=sum(results$AGx1_cov)/nrow(results), COMPois=sum(results$ComPoisx1_cov)/nrow(results), COMPoisB=sum(results$ComPoisBx1_cov)/nrow(results))
cov.x2 <- rbind(AG=sum(results$AGx2_cov)/nrow(results), COMPois=sum(results$ComPoisx2_cov)/nrow(results), COMPoisB=sum(results$ComPoisBx2_cov)/nrow(results))
mean.bias <- (abs(bias25)+abs(bias50)+abs(bias75)) / 3
mean.LPI <- (LPIx+LPIx1+LPIx2) / 3
mean.cob <- (cov.x+cov.x1+cov.x2)*100 / 3
res1211.ag <- data.frame(c0,c1,c2,c3,bias25,bias50,bias75, LPIx, LPIx1, LPIx2, cov.x, cov.x1, cov.x2, mean.bias, mean.LPI, mean.cob)
WriteXLS(res1211.ag, "results/SJWEH/results1211.xls")
results <- foreach(k=1:nsim, .combine=rbind) %dopar%
genResultsDEF1_new_frailty(k, nm=1000, bef=.3, ft=730, old=3650,
d.ev=d.ev4, d.cens=d.cens4, b0.ev=b0.ev4, b0.cens=b0.cens4,
a.ev=a.ev4, a.cens=a.cens4, m=5)
WriteXLS(results, "results/SJWEH/res1212.xls")
c0 <- rbind("AG F", "ComP (CP) F", "ComP (GT) F")
c1 <- rbind(AG=mean(results$gcoefAG.x), COMPois=mean(results$gcoefCOMPois.x), COMPoisB=mean(results$gcoefCOMPoisB.x))
c2 <- rbind(AG=mean(results$gcoefAG.x1), COMPois=mean(results$gcoefCOMPois.x1), COMPoisB=mean(results$gcoefCOMPoisB.x1))
c3 <- rbind(AG=mean(results$gcoefAG.x2), COMPois=mean(results$gcoefCOMPois.x2), COMPoisB=mean(results$gcoefCOMPoisB.x2))
bias25 <- ((c1-.25)/.25)*100
bias50 <- ((c2-.5)/.5)*100
bias75 <- ((c3-.75)/.75)*100
#Para las coberturas y el LPI
results$AGx_ci_i<-results$gcoefAG.x-1.96*results$gsdAG.x
results$AGx_ci_s<-results$gcoefAG.x+1.96*results$gsdAG.x
results$ComPoisx_ci_i<-results$gcoefCOMPois.x-1.96*results$gsdCOMPois.x
results$ComPoisx_ci_s<-results$gcoefCOMPois.x+1.96*results$gsdCOMPois.x
results$ComPoisBx_ci_i<-results$gcoefCOMPoisB.x-1.96*results$gsdCOMPoisB.x
results$ComPoisBx_ci_s<-results$gcoefCOMPoisB.x+1.96*results$gsdCOMPoisB.x
results$AGx1_ci_i<-results$gcoefAG.x1-1.96*results$gsdAG.x1
results$AGx1_ci_s<-results$gcoefAG.x1+1.96*results$gsdAG.x1
results$ComPoisx1_ci_i<-results$gcoefCOMPois.x1-1.96*results$gsdCOMPois.x1
results$ComPoisx1_ci_s<-results$gcoefCOMPois.x1+1.96*results$gsdCOMPois.x1
results$ComPoisBx1_ci_i<-results$gcoefCOMPoisB.x1-1.96*results$gsdCOMPoisB.x1
results$ComPoisBx1_ci_s<-results$gcoefCOMPoisB.x1+1.96*results$gsdCOMPoisB.x1
results$AGx2_ci_i<-results$gcoefAG.x2-1.96*results$gsdAG.x2
results$AGx2_ci_s<-results$gcoefAG.x2+1.96*results$gsdAG.x2
results$ComPoisx2_ci_i<-results$gcoefCOMPois.x2-1.96*results$gsdCOMPois.x2
results$ComPoisx2_ci_s<-results$gcoefCOMPois.x2+1.96*results$gsdCOMPois.x2
results$ComPoisBx2_ci_i<-results$gcoefCOMPoisB.x2-1.96*results$gsdCOMPoisB.x2
results$ComPoisBx2_ci_s<-results$gcoefCOMPoisB.x2+1.96*results$gsdCOMPoisB.x2
#LPI individuales
results$LPIxAG<-results$AGx_ci_s-results$AGx_ci_i
results$LPIx1AG<-results$AGx1_ci_s-results$AGx1_ci_i
results$LPIx2AG<-results$AGx2_ci_s-results$AGx2_ci_i
results$LPIxCOMPois<-results$ComPoisx_ci_s-results$ComPoisx_ci_i
results$LPIx1COMPois<-results$ComPoisx1_ci_s-results$ComPoisx1_ci_i
results$LPIx2COMPois<-results$ComPoisx2_ci_s-results$ComPoisx2_ci_i
results$LPIxCOMPoisB<-results$ComPoisBx_ci_s-results$ComPoisBx_ci_i
results$LPIx1COMPoisB<-results$ComPoisBx1_ci_s-results$ComPoisBx1_ci_i
results$LPIx2COMPoisB<-results$ComPoisBx2_ci_s-results$ComPoisBx2_ci_i
LPIx <- rbind(AG=mean(results$LPIxAG), COMPois=mean(results$LPIxCOMPois), COMPoisB=mean(results$LPIxCOMPoisB))
LPIx1 <-rbind(AG=mean(results$LPIx1AG), COMPois=mean(results$LPIx1COMPois), COMPoisB=mean(results$LPIx1COMPoisB))
LPIx2 <-rbind(AG=mean(results$LPIx2AG), COMPois=mean(results$LPIx2COMPois), COMPoisB=mean(results$LPIx2COMPoisB))
#coberturas
results$AGx_cov<-ifelse(results$AGx_ci_i<=0.25 & 0.25<=results$AGx_ci_s,1 ,0)
results$AGx1_cov<-ifelse(results$AGx1_ci_i<=0.5 & 0.5<=results$AGx1_ci_s,1 ,0)
results$AGx2_cov<-ifelse(results$AGx2_ci_i<=0.75 & 0.75<=results$AGx2_ci_s,1 ,0)
results$ComPoisx_cov<-ifelse(results$ComPoisx_ci_i<=0.25 & 0.25<=results$ComPoisx_ci_s,1 ,0)
results$ComPoisx1_cov<-ifelse(results$ComPoisx1_ci_i<=0.5 & 0.5<=results$ComPoisx1_ci_s,1 ,0)
results$ComPoisx2_cov<-ifelse(results$ComPoisx2_ci_i<=0.75 & 0.75<=results$ComPoisx2_ci_s,1 ,0)
results$ComPoisBx_cov<-ifelse(results$ComPoisBx_ci_i<=0.25 & 0.25<=results$ComPoisBx_ci_s,1 ,0)
results$ComPoisBx1_cov<-ifelse(results$ComPoisBx1_ci_i<=0.5 & 0.5<=results$ComPoisBx1_ci_s,1 ,0)
results$ComPoisBx2_cov<-ifelse(results$ComPoisBx2_ci_i<=0.75 & 0.75<=results$ComPoisBx2_ci_s,1 ,0)
cov.x <-rbind(AG=sum(results$AGx_cov)/nrow(results), COMPois=sum(results$ComPoisx_cov)/nrow(results), COMPoisB=sum(results$ComPoisBx_cov)/nrow(results))
cov.x1 <-rbind(AG=sum(results$AGx1_cov)/nrow(results), COMPois=sum(results$ComPoisx1_cov)/nrow(results), COMPoisB=sum(results$ComPoisBx1_cov)/nrow(results))
cov.x2 <- rbind(AG=sum(results$AGx2_cov)/nrow(results), COMPois=sum(results$ComPoisx2_cov)/nrow(results), COMPoisB=sum(results$ComPoisBx2_cov)/nrow(results))
mean.bias <- (abs(bias25)+abs(bias50)+abs(bias75)) / 3
mean.LPI <- (LPIx+LPIx1+LPIx2) / 3
mean.cob <- (cov.x+cov.x1+cov.x2)*100 / 3
res1212.ag <- data.frame(c0,c1,c2,c3,bias25,bias50,bias75, LPIx, LPIx1, LPIx2, cov.x, cov.x1, cov.x2, mean.bias, mean.LPI, mean.cob)
WriteXLS(res1212.ag, "results/SJWEH/results1212.xls")
results <- foreach(k=1:nsim, .combine=rbind) %dopar%
genResultsDEF1_new_frailty(k, nm=1000, bef=.5, ft=730, old=3650,
d.ev=d.ev4, d.cens=d.cens4, b0.ev=b0.ev4, b0.cens=b0.cens4,
a.ev=a.ev4, a.cens=a.cens4, m=5)
WriteXLS(results, "results/SJWEH/res1213.xls")
c0 <- rbind("AG F", "ComP (CP) F", "ComP (GT) F")
c1 <- rbind(AG=mean(results$gcoefAG.x), COMPois=mean(results$gcoefCOMPois.x), COMPoisB=mean(results$gcoefCOMPoisB.x))
c2 <- rbind(AG=mean(results$gcoefAG.x1), COMPois=mean(results$gcoefCOMPois.x1), COMPoisB=mean(results$gcoefCOMPoisB.x1))
c3 <- rbind(AG=mean(results$gcoefAG.x2), COMPois=mean(results$gcoefCOMPois.x2), COMPoisB=mean(results$gcoefCOMPoisB.x2))
bias25 <- ((c1-.25)/.25)*100
bias50 <- ((c2-.5)/.5)*100
bias75 <- ((c3-.75)/.75)*100
#Para las coberturas y el LPI
results$AGx_ci_i<-results$gcoefAG.x-1.96*results$gsdAG.x
results$AGx_ci_s<-results$gcoefAG.x+1.96*results$gsdAG.x
results$ComPoisx_ci_i<-results$gcoefCOMPois.x-1.96*results$gsdCOMPois.x
results$ComPoisx_ci_s<-results$gcoefCOMPois.x+1.96*results$gsdCOMPois.x
results$ComPoisBx_ci_i<-results$gcoefCOMPoisB.x-1.96*results$gsdCOMPoisB.x
results$ComPoisBx_ci_s<-results$gcoefCOMPoisB.x+1.96*results$gsdCOMPoisB.x
results$AGx1_ci_i<-results$gcoefAG.x1-1.96*results$gsdAG.x1
results$AGx1_ci_s<-results$gcoefAG.x1+1.96*results$gsdAG.x1
results$ComPoisx1_ci_i<-results$gcoefCOMPois.x1-1.96*results$gsdCOMPois.x1
results$ComPoisx1_ci_s<-results$gcoefCOMPois.x1+1.96*results$gsdCOMPois.x1
results$ComPoisBx1_ci_i<-results$gcoefCOMPoisB.x1-1.96*results$gsdCOMPoisB.x1
results$ComPoisBx1_ci_s<-results$gcoefCOMPoisB.x1+1.96*results$gsdCOMPoisB.x1
results$AGx2_ci_i<-results$gcoefAG.x2-1.96*results$gsdAG.x2
results$AGx2_ci_s<-results$gcoefAG.x2+1.96*results$gsdAG.x2
results$ComPoisx2_ci_i<-results$gcoefCOMPois.x2-1.96*results$gsdCOMPois.x2
results$ComPoisx2_ci_s<-results$gcoefCOMPois.x2+1.96*results$gsdCOMPois.x2
results$ComPoisBx2_ci_i<-results$gcoefCOMPoisB.x2-1.96*results$gsdCOMPoisB.x2
results$ComPoisBx2_ci_s<-results$gcoefCOMPoisB.x2+1.96*results$gsdCOMPoisB.x2
#LPI individuales
results$LPIxAG<-results$AGx_ci_s-results$AGx_ci_i
results$LPIx1AG<-results$AGx1_ci_s-results$AGx1_ci_i
results$LPIx2AG<-results$AGx2_ci_s-results$AGx2_ci_i
results$LPIxCOMPois<-results$ComPoisx_ci_s-results$ComPoisx_ci_i
results$LPIx1COMPois<-results$ComPoisx1_ci_s-results$ComPoisx1_ci_i
results$LPIx2COMPois<-results$ComPoisx2_ci_s-results$ComPoisx2_ci_i
results$LPIxCOMPoisB<-results$ComPoisBx_ci_s-results$ComPoisBx_ci_i
results$LPIx1COMPoisB<-results$ComPoisBx1_ci_s-results$ComPoisBx1_ci_i
results$LPIx2COMPoisB<-results$ComPoisBx2_ci_s-results$ComPoisBx2_ci_i
LPIx <- rbind(AG=mean(results$LPIxAG), COMPois=mean(results$LPIxCOMPois), COMPoisB=mean(results$LPIxCOMPoisB))
LPIx1 <-rbind(AG=mean(results$LPIx1AG), COMPois=mean(results$LPIx1COMPois), COMPoisB=mean(results$LPIx1COMPoisB))
LPIx2 <-rbind(AG=mean(results$LPIx2AG), COMPois=mean(results$LPIx2COMPois), COMPoisB=mean(results$LPIx2COMPoisB))
#coberturas
results$AGx_cov<-ifelse(results$AGx_ci_i<=0.25 & 0.25<=results$AGx_ci_s,1 ,0)
results$AGx1_cov<-ifelse(results$AGx1_ci_i<=0.5 & 0.5<=results$AGx1_ci_s,1 ,0)
results$AGx2_cov<-ifelse(results$AGx2_ci_i<=0.75 & 0.75<=results$AGx2_ci_s,1 ,0)
results$ComPoisx_cov<-ifelse(results$ComPoisx_ci_i<=0.25 & 0.25<=results$ComPoisx_ci_s,1 ,0)
results$ComPoisx1_cov<-ifelse(results$ComPoisx1_ci_i<=0.5 & 0.5<=results$ComPoisx1_ci_s,1 ,0)
results$ComPoisx2_cov<-ifelse(results$ComPoisx2_ci_i<=0.75 & 0.75<=results$ComPoisx2_ci_s,1 ,0)
results$ComPoisBx_cov<-ifelse(results$ComPoisBx_ci_i<=0.25 & 0.25<=results$ComPoisBx_ci_s,1 ,0)
results$ComPoisBx1_cov<-ifelse(results$ComPoisBx1_ci_i<=0.5 & 0.5<=results$ComPoisBx1_ci_s,1 ,0)
results$ComPoisBx2_cov<-ifelse(results$ComPoisBx2_ci_i<=0.75 & 0.75<=results$ComPoisBx2_ci_s,1 ,0)
cov.x <-rbind(AG=sum(results$AGx_cov)/nrow(results), COMPois=sum(results$ComPoisx_cov)/nrow(results), COMPoisB=sum(results$ComPoisBx_cov)/nrow(results))
cov.x1 <-rbind(AG=sum(results$AGx1_cov)/nrow(results), COMPois=sum(results$ComPoisx1_cov)/nrow(results), COMPoisB=sum(results$ComPoisBx1_cov)/nrow(results))
cov.x2 <- rbind(AG=sum(results$AGx2_cov)/nrow(results), COMPois=sum(results$ComPoisx2_cov)/nrow(results), COMPoisB=sum(results$ComPoisBx2_cov)/nrow(results))
mean.bias <- (abs(bias25)+abs(bias50)+abs(bias75)) / 3
mean.LPI <- (LPIx+LPIx1+LPIx2) / 3
mean.cob <- (cov.x+cov.x1+cov.x2)*100 / 3
res1213.ag <- data.frame(c0,c1,c2,c3,bias25,bias50,bias75, LPIx, LPIx1, LPIx2, cov.x, cov.x1, cov.x2, mean.bias, mean.LPI, mean.cob)
WriteXLS(res1213.ag, "results/SJWEH/results1213.xls")
results <- foreach(k=1:nsim, .combine=rbind) %dopar%
genResultsDEF1_new_frailty(k, nm=1000, bef=.1, ft=1825, old=3650,
d.ev=d.ev4, d.cens=d.cens4, b0.ev=b0.ev4, b0.cens=b0.cens4,
a.ev=a.ev4, a.cens=a.cens4, m=5)
WriteXLS(results, "results/SJWEH/res1221.xls")
c0 <- rbind("AG F", "ComP (CP) F", "ComP (GT) F")
c1 <- rbind(AG=mean(results$gcoefAG.x), COMPois=mean(results$gcoefCOMPois.x), COMPoisB=mean(results$gcoefCOMPoisB.x))
c2 <- rbind(AG=mean(results$gcoefAG.x1), COMPois=mean(results$gcoefCOMPois.x1), COMPoisB=mean(results$gcoefCOMPoisB.x1))
c3 <- rbind(AG=mean(results$gcoefAG.x2), COMPois=mean(results$gcoefCOMPois.x2), COMPoisB=mean(results$gcoefCOMPoisB.x2))
bias25 <- ((c1-.25)/.25)*100
bias50 <- ((c2-.5)/.5)*100
bias75 <- ((c3-.75)/.75)*100
#Para las coberturas y el LPI
results$AGx_ci_i<-results$gcoefAG.x-1.96*results$gsdAG.x
results$AGx_ci_s<-results$gcoefAG.x+1.96*results$gsdAG.x
results$ComPoisx_ci_i<-results$gcoefCOMPois.x-1.96*results$gsdCOMPois.x
results$ComPoisx_ci_s<-results$gcoefCOMPois.x+1.96*results$gsdCOMPois.x
results$ComPoisBx_ci_i<-results$gcoefCOMPoisB.x-1.96*results$gsdCOMPoisB.x
results$ComPoisBx_ci_s<-results$gcoefCOMPoisB.x+1.96*results$gsdCOMPoisB.x
results$AGx1_ci_i<-results$gcoefAG.x1-1.96*results$gsdAG.x1
results$AGx1_ci_s<-results$gcoefAG.x1+1.96*results$gsdAG.x1
results$ComPoisx1_ci_i<-results$gcoefCOMPois.x1-1.96*results$gsdCOMPois.x1
results$ComPoisx1_ci_s<-results$gcoefCOMPois.x1+1.96*results$gsdCOMPois.x1
results$ComPoisBx1_ci_i<-results$gcoefCOMPoisB.x1-1.96*results$gsdCOMPoisB.x1
results$ComPoisBx1_ci_s<-results$gcoefCOMPoisB.x1+1.96*results$gsdCOMPoisB.x1
results$AGx2_ci_i<-results$gcoefAG.x2-1.96*results$gsdAG.x2
results$AGx2_ci_s<-results$gcoefAG.x2+1.96*results$gsdAG.x2
results$ComPoisx2_ci_i<-results$gcoefCOMPois.x2-1.96*results$gsdCOMPois.x2
results$ComPoisx2_ci_s<-results$gcoefCOMPois.x2+1.96*results$gsdCOMPois.x2
results$ComPoisBx2_ci_i<-results$gcoefCOMPoisB.x2-1.96*results$gsdCOMPoisB.x2
results$ComPoisBx2_ci_s<-results$gcoefCOMPoisB.x2+1.96*results$gsdCOMPoisB.x2
#LPI individuales
results$LPIxAG<-results$AGx_ci_s-results$AGx_ci_i
results$LPIx1AG<-results$AGx1_ci_s-results$AGx1_ci_i
results$LPIx2AG<-results$AGx2_ci_s-results$AGx2_ci_i
results$LPIxCOMPois<-results$ComPoisx_ci_s-results$ComPoisx_ci_i
results$LPIx1COMPois<-results$ComPoisx1_ci_s-results$ComPoisx1_ci_i
results$LPIx2COMPois<-results$ComPoisx2_ci_s-results$ComPoisx2_ci_i
results$LPIxCOMPoisB<-results$ComPoisBx_ci_s-results$ComPoisBx_ci_i
results$LPIx1COMPoisB<-results$ComPoisBx1_ci_s-results$ComPoisBx1_ci_i
results$LPIx2COMPoisB<-results$ComPoisBx2_ci_s-results$ComPoisBx2_ci_i
LPIx <- rbind(AG=mean(results$LPIxAG), COMPois=mean(results$LPIxCOMPois), COMPoisB=mean(results$LPIxCOMPoisB))
LPIx1 <-rbind(AG=mean(results$LPIx1AG), COMPois=mean(results$LPIx1COMPois), COMPoisB=mean(results$LPIx1COMPoisB))
LPIx2 <-rbind(AG=mean(results$LPIx2AG), COMPois=mean(results$LPIx2COMPois), COMPoisB=mean(results$LPIx2COMPoisB))
#coberturas
results$AGx_cov<-ifelse(results$AGx_ci_i<=0.25 & 0.25<=results$AGx_ci_s,1 ,0)
results$AGx1_cov<-ifelse(results$AGx1_ci_i<=0.5 & 0.5<=results$AGx1_ci_s,1 ,0)
results$AGx2_cov<-ifelse(results$AGx2_ci_i<=0.75 & 0.75<=results$AGx2_ci_s,1 ,0)
results$ComPoisx_cov<-ifelse(results$ComPoisx_ci_i<=0.25 & 0.25<=results$ComPoisx_ci_s,1 ,0)
results$ComPoisx1_cov<-ifelse(results$ComPoisx1_ci_i<=0.5 & 0.5<=results$ComPoisx1_ci_s,1 ,0)
results$ComPoisx2_cov<-ifelse(results$ComPoisx2_ci_i<=0.75 & 0.75<=results$ComPoisx2_ci_s,1 ,0)
results$ComPoisBx_cov<-ifelse(results$ComPoisBx_ci_i<=0.25 & 0.25<=results$ComPoisBx_ci_s,1 ,0)
results$ComPoisBx1_cov<-ifelse(results$ComPoisBx1_ci_i<=0.5 & 0.5<=results$ComPoisBx1_ci_s,1 ,0)
results$ComPoisBx2_cov<-ifelse(results$ComPoisBx2_ci_i<=0.75 & 0.75<=results$ComPoisBx2_ci_s,1 ,0)
cov.x <-rbind(AG=sum(results$AGx_cov)/nrow(results), COMPois=sum(results$ComPoisx_cov)/nrow(results), COMPoisB=sum(results$ComPoisBx_cov)/nrow(results))
cov.x1 <-rbind(AG=sum(results$AGx1_cov)/nrow(results), COMPois=sum(results$ComPoisx1_cov)/nrow(results), COMPoisB=sum(results$ComPoisBx1_cov)/nrow(results))
cov.x2 <- rbind(AG=sum(results$AGx2_cov)/nrow(results), COMPois=sum(results$ComPoisx2_cov)/nrow(results), COMPoisB=sum(results$ComPoisBx2_cov)/nrow(results))
mean.bias <- (abs(bias25)+abs(bias50)+abs(bias75)) / 3
mean.LPI <- (LPIx+LPIx1+LPIx2) / 3
mean.cob <- (cov.x+cov.x1+cov.x2)*100 / 3
res1221.ag <- data.frame(c0,c1,c2,c3,bias25,bias50,bias75, LPIx, LPIx1, LPIx2, cov.x, cov.x1, cov.x2, mean.bias, mean.LPI, mean.cob)
WriteXLS(res1221.ag, "results/SJWEH/results1221.xls")
results <- foreach(k=1:nsim, .combine=rbind) %dopar%
genResultsDEF1_new_frailty(k, nm=1000, bef=.3, ft=1825, old=3650,
d.ev=d.ev4, d.cens=d.cens4, b0.ev=b0.ev4, b0.cens=b0.cens4,
a.ev=a.ev4, a.cens=a.cens4, m=5)
WriteXLS(results, "results/SJWEH/res1222.xls")
c0 <- rbind("AG F", "ComP (CP) F", "ComP (GT) F")
c1 <- rbind(AG=mean(results$gcoefAG.x), COMPois=mean(results$gcoefCOMPois.x), COMPoisB=mean(results$gcoefCOMPoisB.x))
c2 <- rbind(AG=mean(results$gcoefAG.x1), COMPois=mean(results$gcoefCOMPois.x1), COMPoisB=mean(results$gcoefCOMPoisB.x1))
c3 <- rbind(AG=mean(results$gcoefAG.x2), COMPois=mean(results$gcoefCOMPois.x2), COMPoisB=mean(results$gcoefCOMPoisB.x2))
bias25 <- ((c1-.25)/.25)*100
bias50 <- ((c2-.5)/.5)*100
bias75 <- ((c3-.75)/.75)*100
#Para las coberturas y el LPI
results$AGx_ci_i<-results$gcoefAG.x-1.96*results$gsdAG.x
results$AGx_ci_s<-results$gcoefAG.x+1.96*results$gsdAG.x
results$ComPoisx_ci_i<-results$gcoefCOMPois.x-1.96*results$gsdCOMPois.x
results$ComPoisx_ci_s<-results$gcoefCOMPois.x+1.96*results$gsdCOMPois.x
results$ComPoisBx_ci_i<-results$gcoefCOMPoisB.x-1.96*results$gsdCOMPoisB.x
results$ComPoisBx_ci_s<-results$gcoefCOMPoisB.x+1.96*results$gsdCOMPoisB.x
results$AGx1_ci_i<-results$gcoefAG.x1-1.96*results$gsdAG.x1
results$AGx1_ci_s<-results$gcoefAG.x1+1.96*results$gsdAG.x1
results$ComPoisx1_ci_i<-results$gcoefCOMPois.x1-1.96*results$gsdCOMPois.x1
results$ComPoisx1_ci_s<-results$gcoefCOMPois.x1+1.96*results$gsdCOMPois.x1
results$ComPoisBx1_ci_i<-results$gcoefCOMPoisB.x1-1.96*results$gsdCOMPoisB.x1
results$ComPoisBx1_ci_s<-results$gcoefCOMPoisB.x1+1.96*results$gsdCOMPoisB.x1
results$AGx2_ci_i<-results$gcoefAG.x2-1.96*results$gsdAG.x2
results$AGx2_ci_s<-results$gcoefAG.x2+1.96*results$gsdAG.x2
results$ComPoisx2_ci_i<-results$gcoefCOMPois.x2-1.96*results$gsdCOMPois.x2
results$ComPoisx2_ci_s<-results$gcoefCOMPois.x2+1.96*results$gsdCOMPois.x2
results$ComPoisBx2_ci_i<-results$gcoefCOMPoisB.x2-1.96*results$gsdCOMPoisB.x2
results$ComPoisBx2_ci_s<-results$gcoefCOMPoisB.x2+1.96*results$gsdCOMPoisB.x2
#LPI individuales
results$LPIxAG<-results$AGx_ci_s-results$AGx_ci_i
results$LPIx1AG<-results$AGx1_ci_s-results$AGx1_ci_i
results$LPIx2AG<-results$AGx2_ci_s-results$AGx2_ci_i
results$LPIxCOMPois<-results$ComPoisx_ci_s-results$ComPoisx_ci_i
results$LPIx1COMPois<-results$ComPoisx1_ci_s-results$ComPoisx1_ci_i
results$LPIx2COMPois<-results$ComPoisx2_ci_s-results$ComPoisx2_ci_i
results$LPIxCOMPoisB<-results$ComPoisBx_ci_s-results$ComPoisBx_ci_i
results$LPIx1COMPoisB<-results$ComPoisBx1_ci_s-results$ComPoisBx1_ci_i
results$LPIx2COMPoisB<-results$ComPoisBx2_ci_s-results$ComPoisBx2_ci_i
LPIx <- rbind(AG=mean(results$LPIxAG), COMPois=mean(results$LPIxCOMPois), COMPoisB=mean(results$LPIxCOMPoisB))
LPIx1 <-rbind(AG=mean(results$LPIx1AG), COMPois=mean(results$LPIx1COMPois), COMPoisB=mean(results$LPIx1COMPoisB))
LPIx2 <-rbind(AG=mean(results$LPIx2AG), COMPois=mean(results$LPIx2COMPois), COMPoisB=mean(results$LPIx2COMPoisB))
#coberturas
results$AGx_cov<-ifelse(results$AGx_ci_i<=0.25 & 0.25<=results$AGx_ci_s,1 ,0)
results$AGx1_cov<-ifelse(results$AGx1_ci_i<=0.5 & 0.5<=results$AGx1_ci_s,1 ,0)
results$AGx2_cov<-ifelse(results$AGx2_ci_i<=0.75 & 0.75<=results$AGx2_ci_s,1 ,0)
results$ComPoisx_cov<-ifelse(results$ComPoisx_ci_i<=0.25 & 0.25<=results$ComPoisx_ci_s,1 ,0)
results$ComPoisx1_cov<-ifelse(results$ComPoisx1_ci_i<=0.5 & 0.5<=results$ComPoisx1_ci_s,1 ,0)
results$ComPoisx2_cov<-ifelse(results$ComPoisx2_ci_i<=0.75 & 0.75<=results$ComPoisx2_ci_s,1 ,0)
results$ComPoisBx_cov<-ifelse(results$ComPoisBx_ci_i<=0.25 & 0.25<=results$ComPoisBx_ci_s,1 ,0)
results$ComPoisBx1_cov<-ifelse(results$ComPoisBx1_ci_i<=0.5 & 0.5<=results$ComPoisBx1_ci_s,1 ,0)
results$ComPoisBx2_cov<-ifelse(results$ComPoisBx2_ci_i<=0.75 & 0.75<=results$ComPoisBx2_ci_s,1 ,0)
cov.x <-rbind(AG=sum(results$AGx_cov)/nrow(results), COMPois=sum(results$ComPoisx_cov)/nrow(results), COMPoisB=sum(results$ComPoisBx_cov)/nrow(results))
cov.x1 <-rbind(AG=sum(results$AGx1_cov)/nrow(results), COMPois=sum(results$ComPoisx1_cov)/nrow(results), COMPoisB=sum(results$ComPoisBx1_cov)/nrow(results))
cov.x2 <- rbind(AG=sum(results$AGx2_cov)/nrow(results), COMPois=sum(results$ComPoisx2_cov)/nrow(results), COMPoisB=sum(results$ComPoisBx2_cov)/nrow(results))
mean.bias <- (abs(bias25)+abs(bias50)+abs(bias75)) / 3
mean.LPI <- (LPIx+LPIx1+LPIx2) / 3
mean.cob <- (cov.x+cov.x1+cov.x2)*100 / 3
res1222.ag <- data.frame(c0,c1,c2,c3,bias25,bias50,bias75, LPIx, LPIx1, LPIx2, cov.x, cov.x1, cov.x2, mean.bias, mean.LPI, mean.cob)
WriteXLS(res1222.ag, "results/SJWEH/results1222.xls")
results <- foreach(k=1:nsim, .combine=rbind) %dopar%
genResultsDEF1_new_frailty(k, nm=1000, bef=.5, ft=1825, old=3650,
d.ev=d.ev4, d.cens=d.cens4, b0.ev=b0.ev4, b0.cens=b0.cens4,
a.ev=a.ev4, a.cens=a.cens4, m=5)
WriteXLS(results, "results/SJWEH/res1223.xls")
c0 <- rbind("AG F", "ComP (CP) F", "ComP (GT) F")
c1 <- rbind(AG=mean(results$gcoefAG.x), COMPois=mean(results$gcoefCOMPois.x), COMPoisB=mean(results$gcoefCOMPoisB.x))
c2 <- rbind(AG=mean(results$gcoefAG.x1), COMPois=mean(results$gcoefCOMPois.x1), COMPoisB=mean(results$gcoefCOMPoisB.x1))
c3 <- rbind(AG=mean(results$gcoefAG.x2), COMPois=mean(results$gcoefCOMPois.x2), COMPoisB=mean(results$gcoefCOMPoisB.x2))
bias25 <- ((c1-.25)/.25)*100
bias50 <- ((c2-.5)/.5)*100
bias75 <- ((c3-.75)/.75)*100
#Para las coberturas y el LPI
results$AGx_ci_i<-results$gcoefAG.x-1.96*results$gsdAG.x
results$AGx_ci_s<-results$gcoefAG.x+1.96*results$gsdAG.x
results$ComPoisx_ci_i<-results$gcoefCOMPois.x-1.96*results$gsdCOMPois.x
results$ComPoisx_ci_s<-results$gcoefCOMPois.x+1.96*results$gsdCOMPois.x
results$ComPoisBx_ci_i<-results$gcoefCOMPoisB.x-1.96*results$gsdCOMPoisB.x
results$ComPoisBx_ci_s<-results$gcoefCOMPoisB.x+1.96*results$gsdCOMPoisB.x
results$AGx1_ci_i<-results$gcoefAG.x1-1.96*results$gsdAG.x1
results$AGx1_ci_s<-results$gcoefAG.x1+1.96*results$gsdAG.x1
results$ComPoisx1_ci_i<-results$gcoefCOMPois.x1-1.96*results$gsdCOMPois.x1
results$ComPoisx1_ci_s<-results$gcoefCOMPois.x1+1.96*results$gsdCOMPois.x1
results$ComPoisBx1_ci_i<-results$gcoefCOMPoisB.x1-1.96*results$gsdCOMPoisB.x1
results$ComPoisBx1_ci_s<-results$gcoefCOMPoisB.x1+1.96*results$gsdCOMPoisB.x1
results$AGx2_ci_i<-results$gcoefAG.x2-1.96*results$gsdAG.x2
results$AGx2_ci_s<-results$gcoefAG.x2+1.96*results$gsdAG.x2
results$ComPoisx2_ci_i<-results$gcoefCOMPois.x2-1.96*results$gsdCOMPois.x2
results$ComPoisx2_ci_s<-results$gcoefCOMPois.x2+1.96*results$gsdCOMPois.x2
results$ComPoisBx2_ci_i<-results$gcoefCOMPoisB.x2-1.96*results$gsdCOMPoisB.x2
results$ComPoisBx2_ci_s<-results$gcoefCOMPoisB.x2+1.96*results$gsdCOMPoisB.x2
#LPI individuales
results$LPIxAG<-results$AGx_ci_s-results$AGx_ci_i
results$LPIx1AG<-results$AGx1_ci_s-results$AGx1_ci_i
results$LPIx2AG<-results$AGx2_ci_s-results$AGx2_ci_i
results$LPIxCOMPois<-results$ComPoisx_ci_s-results$ComPoisx_ci_i
results$LPIx1COMPois<-results$ComPoisx1_ci_s-results$ComPoisx1_ci_i
results$LPIx2COMPois<-results$ComPoisx2_ci_s-results$ComPoisx2_ci_i
results$LPIxCOMPoisB<-results$ComPoisBx_ci_s-results$ComPoisBx_ci_i
results$LPIx1COMPoisB<-results$ComPoisBx1_ci_s-results$ComPoisBx1_ci_i
results$LPIx2COMPoisB<-results$ComPoisBx2_ci_s-results$ComPoisBx2_ci_i
LPIx <- rbind(AG=mean(results$LPIxAG), COMPois=mean(results$LPIxCOMPois), COMPoisB=mean(results$LPIxCOMPoisB))
LPIx1 <-rbind(AG=mean(results$LPIx1AG), COMPois=mean(results$LPIx1COMPois), COMPoisB=mean(results$LPIx1COMPoisB))
LPIx2 <-rbind(AG=mean(results$LPIx2AG), COMPois=mean(results$LPIx2COMPois), COMPoisB=mean(results$LPIx2COMPoisB))
#coberturas
results$AGx_cov<-ifelse(results$AGx_ci_i<=0.25 & 0.25<=results$AGx_ci_s,1 ,0)
results$AGx1_cov<-ifelse(results$AGx1_ci_i<=0.5 & 0.5<=results$AGx1_ci_s,1 ,0)
results$AGx2_cov<-ifelse(results$AGx2_ci_i<=0.75 & 0.75<=results$AGx2_ci_s,1 ,0)
results$ComPoisx_cov<-ifelse(results$ComPoisx_ci_i<=0.25 & 0.25<=results$ComPoisx_ci_s,1 ,0)
results$ComPoisx1_cov<-ifelse(results$ComPoisx1_ci_i<=0.5 & 0.5<=results$ComPoisx1_ci_s,1 ,0)
results$ComPoisx2_cov<-ifelse(results$ComPoisx2_ci_i<=0.75 & 0.75<=results$ComPoisx2_ci_s,1 ,0)
results$ComPoisBx_cov<-ifelse(results$ComPoisBx_ci_i<=0.25 & 0.25<=results$ComPoisBx_ci_s,1 ,0)
results$ComPoisBx1_cov<-ifelse(results$ComPoisBx1_ci_i<=0.5 & 0.5<=results$ComPoisBx1_ci_s,1 ,0)
results$ComPoisBx2_cov<-ifelse(results$ComPoisBx2_ci_i<=0.75 & 0.75<=results$ComPoisBx2_ci_s,1 ,0)
cov.x <-rbind(AG=sum(results$AGx_cov)/nrow(results), COMPois=sum(results$ComPoisx_cov)/nrow(results), COMPoisB=sum(results$ComPoisBx_cov)/nrow(results))
cov.x1 <-rbind(AG=sum(results$AGx1_cov)/nrow(results), COMPois=sum(results$ComPoisx1_cov)/nrow(results), COMPoisB=sum(results$ComPoisBx1_cov)/nrow(results))
cov.x2 <- rbind(AG=sum(results$AGx2_cov)/nrow(results), COMPois=sum(results$ComPoisx2_cov)/nrow(results), COMPoisB=sum(results$ComPoisBx2_cov)/nrow(results))
mean.bias <- (abs(bias25)+abs(bias50)+abs(bias75)) / 3
mean.LPI <- (LPIx+LPIx1+LPIx2) / 3
mean.cob <- (cov.x+cov.x1+cov.x2)*100 / 3
res1223.ag <- data.frame(c0,c1,c2,c3,bias25,bias50,bias75, LPIx, LPIx1, LPIx2, cov.x, cov.x1, cov.x2, mean.bias, mean.LPI, mean.cob)
WriteXLS(res1223.ag, "results/SJWEH/results1223.xls")
########## SJWEH: DEPENDENCIA MODERADA (CP)
results <- foreach(k=1:nsim, .combine=rbind) %dopar%
genResultsDEF1_new_frailty(k, nm=1000, bef=.1, ft=730, old=730,
d.ev=d.ev5, d.cens=d.cens5, b0.ev=b0.ev5, b0.cens=b0.cens5,
a.ev=a.ev5, a.cens=a.cens5, m=5)
WriteXLS(results, "results/SJWEH/res2111.xls")
c0 <- rbind("AG F", "ComP (CP) F", "ComP (GT) F")
c1 <- rbind(AG=mean(results$gcoefAG.x), COMPois=mean(results$gcoefCOMPois.x), COMPoisB=mean(results$gcoefCOMPoisB.x))
c2 <- rbind(AG=mean(results$gcoefAG.x1), COMPois=mean(results$gcoefCOMPois.x1), COMPoisB=mean(results$gcoefCOMPoisB.x1))
c3 <- rbind(AG=mean(results$gcoefAG.x2), COMPois=mean(results$gcoefCOMPois.x2), COMPoisB=mean(results$gcoefCOMPoisB.x2))
bias25 <- ((c1-.25)/.25)*100
bias50 <- ((c2-.5)/.5)*100
bias75 <- ((c3-.75)/.75)*100
#Para las coberturas y el LPI
results$AGx_ci_i<-results$gcoefAG.x-1.96*results$gsdAG.x
results$AGx_ci_s<-results$gcoefAG.x+1.96*results$gsdAG.x
results$ComPoisx_ci_i<-results$gcoefCOMPois.x-1.96*results$gsdCOMPois.x
results$ComPoisx_ci_s<-results$gcoefCOMPois.x+1.96*results$gsdCOMPois.x
results$ComPoisBx_ci_i<-results$gcoefCOMPoisB.x-1.96*results$gsdCOMPoisB.x
results$ComPoisBx_ci_s<-results$gcoefCOMPoisB.x+1.96*results$gsdCOMPoisB.x
results$AGx1_ci_i<-results$gcoefAG.x1-1.96*results$gsdAG.x1
results$AGx1_ci_s<-results$gcoefAG.x1+1.96*results$gsdAG.x1
results$ComPoisx1_ci_i<-results$gcoefCOMPois.x1-1.96*results$gsdCOMPois.x1
results$ComPoisx1_ci_s<-results$gcoefCOMPois.x1+1.96*results$gsdCOMPois.x1
results$ComPoisBx1_ci_i<-results$gcoefCOMPoisB.x1-1.96*results$gsdCOMPoisB.x1
results$ComPoisBx1_ci_s<-results$gcoefCOMPoisB.x1+1.96*results$gsdCOMPoisB.x1
results$AGx2_ci_i<-results$gcoefAG.x2-1.96*results$gsdAG.x2
results$AGx2_ci_s<-results$gcoefAG.x2+1.96*results$gsdAG.x2
results$ComPoisx2_ci_i<-results$gcoefCOMPois.x2-1.96*results$gsdCOMPois.x2
results$ComPoisx2_ci_s<-results$gcoefCOMPois.x2+1.96*results$gsdCOMPois.x2
results$ComPoisBx2_ci_i<-results$gcoefCOMPoisB.x2-1.96*results$gsdCOMPoisB.x2
results$ComPoisBx2_ci_s<-results$gcoefCOMPoisB.x2+1.96*results$gsdCOMPoisB.x2
#LPI individuales
results$LPIxAG<-results$AGx_ci_s-results$AGx_ci_i
results$LPIx1AG<-results$AGx1_ci_s-results$AGx1_ci_i
results$LPIx2AG<-results$AGx2_ci_s-results$AGx2_ci_i
results$LPIxCOMPois<-results$ComPoisx_ci_s-results$ComPoisx_ci_i
results$LPIx1COMPois<-results$ComPoisx1_ci_s-results$ComPoisx1_ci_i
results$LPIx2COMPois<-results$ComPoisx2_ci_s-results$ComPoisx2_ci_i
results$LPIxCOMPoisB<-results$ComPoisBx_ci_s-results$ComPoisBx_ci_i
results$LPIx1COMPoisB<-results$ComPoisBx1_ci_s-results$ComPoisBx1_ci_i
results$LPIx2COMPoisB<-results$ComPoisBx2_ci_s-results$ComPoisBx2_ci_i
LPIx <- rbind(AG=mean(results$LPIxAG), COMPois=mean(results$LPIxCOMPois), COMPoisB=mean(results$LPIxCOMPoisB))
LPIx1 <-rbind(AG=mean(results$LPIx1AG), COMPois=mean(results$LPIx1COMPois), COMPoisB=mean(results$LPIx1COMPoisB))
LPIx2 <-rbind(AG=mean(results$LPIx2AG), COMPois=mean(results$LPIx2COMPois), COMPoisB=mean(results$LPIx2COMPoisB))
#coberturas
results$AGx_cov<-ifelse(results$AGx_ci_i<=0.25 & 0.25<=results$AGx_ci_s,1 ,0)
results$AGx1_cov<-ifelse(results$AGx1_ci_i<=0.5 & 0.5<=results$AGx1_ci_s,1 ,0)
results$AGx2_cov<-ifelse(results$AGx2_ci_i<=0.75 & 0.75<=results$AGx2_ci_s,1 ,0)
results$ComPoisx_cov<-ifelse(results$ComPoisx_ci_i<=0.25 & 0.25<=results$ComPoisx_ci_s,1 ,0)
results$ComPoisx1_cov<-ifelse(results$ComPoisx1_ci_i<=0.5 & 0.5<=results$ComPoisx1_ci_s,1 ,0)
results$ComPoisx2_cov<-ifelse(results$ComPoisx2_ci_i<=0.75 & 0.75<=results$ComPoisx2_ci_s,1 ,0)
results$ComPoisBx_cov<-ifelse(results$ComPoisBx_ci_i<=0.25 & 0.25<=results$ComPoisBx_ci_s,1 ,0)
results$ComPoisBx1_cov<-ifelse(results$ComPoisBx1_ci_i<=0.5 & 0.5<=results$ComPoisBx1_ci_s,1 ,0)
results$ComPoisBx2_cov<-ifelse(results$ComPoisBx2_ci_i<=0.75 & 0.75<=results$ComPoisBx2_ci_s,1 ,0)
cov.x <-rbind(AG=sum(results$AGx_cov)/nrow(results), COMPois=sum(results$ComPoisx_cov)/nrow(results), COMPoisB=sum(results$ComPoisBx_cov)/nrow(results))
cov.x1 <-rbind(AG=sum(results$AGx1_cov)/nrow(results), COMPois=sum(results$ComPoisx1_cov)/nrow(results), COMPoisB=sum(results$ComPoisBx1_cov)/nrow(results))
cov.x2 <- rbind(AG=sum(results$AGx2_cov)/nrow(results), COMPois=sum(results$ComPoisx2_cov)/nrow(results), COMPoisB=sum(results$ComPoisBx2_cov)/nrow(results))
mean.bias <- (abs(bias25)+abs(bias50)+abs(bias75)) / 3
mean.LPI <- (LPIx+LPIx1+LPIx2) / 3
mean.cob <- (cov.x+cov.x1+cov.x2)*100 / 3
res2111.ag <- data.frame(c0,c1,c2,c3,bias25,bias50,bias75, LPIx, LPIx1, LPIx2, cov.x, cov.x1, cov.x2, mean.bias, mean.LPI, mean.cob)
WriteXLS(res2111.ag, "results/SJWEH/results2111.xls")
results <- foreach(k=1:nsim, .combine=rbind) %dopar%
genResultsDEF1_new_frailty(k, nm=1000, bef=.3, ft=730, old=730,
d.ev=d.ev5, d.cens=d.cens5, b0.ev=b0.ev5, b0.cens=b0.cens5,
a.ev=a.ev5, a.cens=a.cens5, m=5)
WriteXLS(results, "results/SJWEH/res2112.xls")
c0 <- rbind("AG F", "ComP (CP) F", "ComP (GT) F")
c1 <- rbind(AG=mean(results$gcoefAG.x), COMPois=mean(results$gcoefCOMPois.x), COMPoisB=mean(results$gcoefCOMPoisB.x))
c2 <- rbind(AG=mean(results$gcoefAG.x1), COMPois=mean(results$gcoefCOMPois.x1), COMPoisB=mean(results$gcoefCOMPoisB.x1))
c3 <- rbind(AG=mean(results$gcoefAG.x2), COMPois=mean(results$gcoefCOMPois.x2), COMPoisB=mean(results$gcoefCOMPoisB.x2))
bias25 <- ((c1-.25)/.25)*100
bias50 <- ((c2-.5)/.5)*100
bias75 <- ((c3-.75)/.75)*100
#Para las coberturas y el LPI
results$AGx_ci_i<-results$gcoefAG.x-1.96*results$gsdAG.x
results$AGx_ci_s<-results$gcoefAG.x+1.96*results$gsdAG.x
results$ComPoisx_ci_i<-results$gcoefCOMPois.x-1.96*results$gsdCOMPois.x
results$ComPoisx_ci_s<-results$gcoefCOMPois.x+1.96*results$gsdCOMPois.x
results$ComPoisBx_ci_i<-results$gcoefCOMPoisB.x-1.96*results$gsdCOMPoisB.x
results$ComPoisBx_ci_s<-results$gcoefCOMPoisB.x+1.96*results$gsdCOMPoisB.x
results$AGx1_ci_i<-results$gcoefAG.x1-1.96*results$gsdAG.x1
results$AGx1_ci_s<-results$gcoefAG.x1+1.96*results$gsdAG.x1
results$ComPoisx1_ci_i<-results$gcoefCOMPois.x1-1.96*results$gsdCOMPois.x1
results$ComPoisx1_ci_s<-results$gcoefCOMPois.x1+1.96*results$gsdCOMPois.x1
results$ComPoisBx1_ci_i<-results$gcoefCOMPoisB.x1-1.96*results$gsdCOMPoisB.x1
results$ComPoisBx1_ci_s<-results$gcoefCOMPoisB.x1+1.96*results$gsdCOMPoisB.x1
results$AGx2_ci_i<-results$gcoefAG.x2-1.96*results$gsdAG.x2
results$AGx2_ci_s<-results$gcoefAG.x2+1.96*results$gsdAG.x2
results$ComPoisx2_ci_i<-results$gcoefCOMPois.x2-1.96*results$gsdCOMPois.x2
results$ComPoisx2_ci_s<-results$gcoefCOMPois.x2+1.96*results$gsdCOMPois.x2
results$ComPoisBx2_ci_i<-results$gcoefCOMPoisB.x2-1.96*results$gsdCOMPoisB.x2
results$ComPoisBx2_ci_s<-results$gcoefCOMPoisB.x2+1.96*results$gsdCOMPoisB.x2
#LPI individuales
results$LPIxAG<-results$AGx_ci_s-results$AGx_ci_i
results$LPIx1AG<-results$AGx1_ci_s-results$AGx1_ci_i
results$LPIx2AG<-results$AGx2_ci_s-results$AGx2_ci_i
results$LPIxCOMPois<-results$ComPoisx_ci_s-results$ComPoisx_ci_i
results$LPIx1COMPois<-results$ComPoisx1_ci_s-results$ComPoisx1_ci_i
results$LPIx2COMPois<-results$ComPoisx2_ci_s-results$ComPoisx2_ci_i
results$LPIxCOMPoisB<-results$ComPoisBx_ci_s-results$ComPoisBx_ci_i
results$LPIx1COMPoisB<-results$ComPoisBx1_ci_s-results$ComPoisBx1_ci_i
results$LPIx2COMPoisB<-results$ComPoisBx2_ci_s-results$ComPoisBx2_ci_i
LPIx <- rbind(AG=mean(results$LPIxAG), COMPois=mean(results$LPIxCOMPois), COMPoisB=mean(results$LPIxCOMPoisB))
LPIx1 <-rbind(AG=mean(results$LPIx1AG), COMPois=mean(results$LPIx1COMPois), COMPoisB=mean(results$LPIx1COMPoisB))
LPIx2 <-rbind(AG=mean(results$LPIx2AG), COMPois=mean(results$LPIx2COMPois), COMPoisB=mean(results$LPIx2COMPoisB))
#coberturas
results$AGx_cov<-ifelse(results$AGx_ci_i<=0.25 & 0.25<=results$AGx_ci_s,1 ,0)
results$AGx1_cov<-ifelse(results$AGx1_ci_i<=0.5 & 0.5<=results$AGx1_ci_s,1 ,0)
results$AGx2_cov<-ifelse(results$AGx2_ci_i<=0.75 & 0.75<=results$AGx2_ci_s,1 ,0)
results$ComPoisx_cov<-ifelse(results$ComPoisx_ci_i<=0.25 & 0.25<=results$ComPoisx_ci_s,1 ,0)
results$ComPoisx1_cov<-ifelse(results$ComPoisx1_ci_i<=0.5 & 0.5<=results$ComPoisx1_ci_s,1 ,0)
results$ComPoisx2_cov<-ifelse(results$ComPoisx2_ci_i<=0.75 & 0.75<=results$ComPoisx2_ci_s,1 ,0)
results$ComPoisBx_cov<-ifelse(results$ComPoisBx_ci_i<=0.25 & 0.25<=results$ComPoisBx_ci_s,1 ,0)
results$ComPoisBx1_cov<-ifelse(results$ComPoisBx1_ci_i<=0.5 & 0.5<=results$ComPoisBx1_ci_s,1 ,0)
results$ComPoisBx2_cov<-ifelse(results$ComPoisBx2_ci_i<=0.75 & 0.75<=results$ComPoisBx2_ci_s,1 ,0)
cov.x <-rbind(AG=sum(results$AGx_cov)/nrow(results), COMPois=sum(results$ComPoisx_cov)/nrow(results), COMPoisB=sum(results$ComPoisBx_cov)/nrow(results))
cov.x1 <-rbind(AG=sum(results$AGx1_cov)/nrow(results), COMPois=sum(results$ComPoisx1_cov)/nrow(results), COMPoisB=sum(results$ComPoisBx1_cov)/nrow(results))
cov.x2 <- rbind(AG=sum(results$AGx2_cov)/nrow(results), COMPois=sum(results$ComPoisx2_cov)/nrow(results), COMPoisB=sum(results$ComPoisBx2_cov)/nrow(results))
mean.bias <- (abs(bias25)+abs(bias50)+abs(bias75)) / 3
mean.LPI <- (LPIx+LPIx1+LPIx2) / 3
mean.cob <- (cov.x+cov.x1+cov.x2)*100 / 3
res2112.ag <- data.frame(c0,c1,c2,c3,bias25,bias50,bias75, LPIx, LPIx1, LPIx2, cov.x, cov.x1, cov.x2, mean.bias, mean.LPI, mean.cob)
WriteXLS(res2112.ag, "results/SJWEH/results2112.xls")
results <- foreach(k=1:nsim, .combine=rbind) %dopar%
genResultsDEF1_new_frailty(k, nm=1000, bef=.5, ft=730, old=730,
d.ev=d.ev5, d.cens=d.cens5, b0.ev=b0.ev5, b0.cens=b0.cens5,
a.ev=a.ev5, a.cens=a.cens5, m=5)
WriteXLS(results, "results/SJWEH/res2113.xls")
c0 <- rbind("AG F", "ComP (CP) F", "ComP (GT) F")
c1 <- rbind(AG=mean(results$gcoefAG.x), COMPois=mean(results$gcoefCOMPois.x), COMPoisB=mean(results$gcoefCOMPoisB.x))
c2 <- rbind(AG=mean(results$gcoefAG.x1), COMPois=mean(results$gcoefCOMPois.x1), COMPoisB=mean(results$gcoefCOMPoisB.x1))
c3 <- rbind(AG=mean(results$gcoefAG.x2), COMPois=mean(results$gcoefCOMPois.x2), COMPoisB=mean(results$gcoefCOMPoisB.x2))
bias25 <- ((c1-.25)/.25)*100
bias50 <- ((c2-.5)/.5)*100
bias75 <- ((c3-.75)/.75)*100
#Para las coberturas y el LPI
results$AGx_ci_i<-results$gcoefAG.x-1.96*results$gsdAG.x
results$AGx_ci_s<-results$gcoefAG.x+1.96*results$gsdAG.x
results$ComPoisx_ci_i<-results$gcoefCOMPois.x-1.96*results$gsdCOMPois.x
results$ComPoisx_ci_s<-results$gcoefCOMPois.x+1.96*results$gsdCOMPois.x
results$ComPoisBx_ci_i<-results$gcoefCOMPoisB.x-1.96*results$gsdCOMPoisB.x
results$ComPoisBx_ci_s<-results$gcoefCOMPoisB.x+1.96*results$gsdCOMPoisB.x
results$AGx1_ci_i<-results$gcoefAG.x1-1.96*results$gsdAG.x1
results$AGx1_ci_s<-results$gcoefAG.x1+1.96*results$gsdAG.x1
results$ComPoisx1_ci_i<-results$gcoefCOMPois.x1-1.96*results$gsdCOMPois.x1
results$ComPoisx1_ci_s<-results$gcoefCOMPois.x1+1.96*results$gsdCOMPois.x1
results$ComPoisBx1_ci_i<-results$gcoefCOMPoisB.x1-1.96*results$gsdCOMPoisB.x1
results$ComPoisBx1_ci_s<-results$gcoefCOMPoisB.x1+1.96*results$gsdCOMPoisB.x1
results$AGx2_ci_i<-results$gcoefAG.x2-1.96*results$gsdAG.x2
results$AGx2_ci_s<-results$gcoefAG.x2+1.96*results$gsdAG.x2
results$ComPoisx2_ci_i<-results$gcoefCOMPois.x2-1.96*results$gsdCOMPois.x2
results$ComPoisx2_ci_s<-results$gcoefCOMPois.x2+1.96*results$gsdCOMPois.x2
results$ComPoisBx2_ci_i<-results$gcoefCOMPoisB.x2-1.96*results$gsdCOMPoisB.x2
results$ComPoisBx2_ci_s<-results$gcoefCOMPoisB.x2+1.96*results$gsdCOMPoisB.x2
#LPI individuales
results$LPIxAG<-results$AGx_ci_s-results$AGx_ci_i
results$LPIx1AG<-results$AGx1_ci_s-results$AGx1_ci_i
results$LPIx2AG<-results$AGx2_ci_s-results$AGx2_ci_i
results$LPIxCOMPois<-results$ComPoisx_ci_s-results$ComPoisx_ci_i
results$LPIx1COMPois<-results$ComPoisx1_ci_s-results$ComPoisx1_ci_i
results$LPIx2COMPois<-results$ComPoisx2_ci_s-results$ComPoisx2_ci_i
results$LPIxCOMPoisB<-results$ComPoisBx_ci_s-results$ComPoisBx_ci_i
results$LPIx1COMPoisB<-results$ComPoisBx1_ci_s-results$ComPoisBx1_ci_i
results$LPIx2COMPoisB<-results$ComPoisBx2_ci_s-results$ComPoisBx2_ci_i
LPIx <- rbind(AG=mean(results$LPIxAG), COMPois=mean(results$LPIxCOMPois), COMPoisB=mean(results$LPIxCOMPoisB))
LPIx1 <-rbind(AG=mean(results$LPIx1AG), COMPois=mean(results$LPIx1COMPois), COMPoisB=mean(results$LPIx1COMPoisB))
LPIx2 <-rbind(AG=mean(results$LPIx2AG), COMPois=mean(results$LPIx2COMPois), COMPoisB=mean(results$LPIx2COMPoisB))
#coberturas
results$AGx_cov<-ifelse(results$AGx_ci_i<=0.25 & 0.25<=results$AGx_ci_s,1 ,0)
results$AGx1_cov<-ifelse(results$AGx1_ci_i<=0.5 & 0.5<=results$AGx1_ci_s,1 ,0)
results$AGx2_cov<-ifelse(results$AGx2_ci_i<=0.75 & 0.75<=results$AGx2_ci_s,1 ,0)
results$ComPoisx_cov<-ifelse(results$ComPoisx_ci_i<=0.25 & 0.25<=results$ComPoisx_ci_s,1 ,0)
results$ComPoisx1_cov<-ifelse(results$ComPoisx1_ci_i<=0.5 & 0.5<=results$ComPoisx1_ci_s,1 ,0)
results$ComPoisx2_cov<-ifelse(results$ComPoisx2_ci_i<=0.75 & 0.75<=results$ComPoisx2_ci_s,1 ,0)
results$ComPoisBx_cov<-ifelse(results$ComPoisBx_ci_i<=0.25 & 0.25<=results$ComPoisBx_ci_s,1 ,0)
results$ComPoisBx1_cov<-ifelse(results$ComPoisBx1_ci_i<=0.5 & 0.5<=results$ComPoisBx1_ci_s,1 ,0)
results$ComPoisBx2_cov<-ifelse(results$ComPoisBx2_ci_i<=0.75 & 0.75<=results$ComPoisBx2_ci_s,1 ,0)
cov.x <-rbind(AG=sum(results$AGx_cov)/nrow(results), COMPois=sum(results$ComPoisx_cov)/nrow(results), COMPoisB=sum(results$ComPoisBx_cov)/nrow(results))
cov.x1 <-rbind(AG=sum(results$AGx1_cov)/nrow(results), COMPois=sum(results$ComPoisx1_cov)/nrow(results), COMPoisB=sum(results$ComPoisBx1_cov)/nrow(results))
cov.x2 <- rbind(AG=sum(results$AGx2_cov)/nrow(results), COMPois=sum(results$ComPoisx2_cov)/nrow(results), COMPoisB=sum(results$ComPoisBx2_cov)/nrow(results))
mean.bias <- (abs(bias25)+abs(bias50)+abs(bias75)) / 3
mean.LPI <- (LPIx+LPIx1+LPIx2) / 3
mean.cob <- (cov.x+cov.x1+cov.x2)*100 / 3
res2113.ag <- data.frame(c0,c1,c2,c3,bias25,bias50,bias75, LPIx, LPIx1, LPIx2, cov.x, cov.x1, cov.x2, mean.bias, mean.LPI, mean.cob)
WriteXLS(res2113.ag, "results/SJWEH/results2113.xls")
results <- foreach(k=1:nsim, .combine=rbind) %dopar%
genResultsDEF1_new_frailty(k, nm=1000, bef=.1, ft=1825, old=730,
d.ev=d.ev5, d.cens=d.cens5, b0.ev=b0.ev5, b0.cens=b0.cens5,
a.ev=a.ev5, a.cens=a.cens5, m=5)
WriteXLS(results, "results/SJWEH/res2121.xls")
c0 <- rbind("AG F", "ComP (CP) F", "ComP (GT) F")
c1 <- rbind(AG=mean(results$gcoefAG.x), COMPois=mean(results$gcoefCOMPois.x), COMPoisB=mean(results$gcoefCOMPoisB.x))
c2 <- rbind(AG=mean(results$gcoefAG.x1), COMPois=mean(results$gcoefCOMPois.x1), COMPoisB=mean(results$gcoefCOMPoisB.x1))
c3 <- rbind(AG=mean(results$gcoefAG.x2), COMPois=mean(results$gcoefCOMPois.x2), COMPoisB=mean(results$gcoefCOMPoisB.x2))
bias25 <- ((c1-.25)/.25)*100
bias50 <- ((c2-.5)/.5)*100
bias75 <- ((c3-.75)/.75)*100
#Para las coberturas y el LPI
results$AGx_ci_i<-results$gcoefAG.x-1.96*results$gsdAG.x
results$AGx_ci_s<-results$gcoefAG.x+1.96*results$gsdAG.x
results$ComPoisx_ci_i<-results$gcoefCOMPois.x-1.96*results$gsdCOMPois.x
results$ComPoisx_ci_s<-results$gcoefCOMPois.x+1.96*results$gsdCOMPois.x
results$ComPoisBx_ci_i<-results$gcoefCOMPoisB.x-1.96*results$gsdCOMPoisB.x
results$ComPoisBx_ci_s<-results$gcoefCOMPoisB.x+1.96*results$gsdCOMPoisB.x
results$AGx1_ci_i<-results$gcoefAG.x1-1.96*results$gsdAG.x1
results$AGx1_ci_s<-results$gcoefAG.x1+1.96*results$gsdAG.x1
results$ComPoisx1_ci_i<-results$gcoefCOMPois.x1-1.96*results$gsdCOMPois.x1
results$ComPoisx1_ci_s<-results$gcoefCOMPois.x1+1.96*results$gsdCOMPois.x1
results$ComPoisBx1_ci_i<-results$gcoefCOMPoisB.x1-1.96*results$gsdCOMPoisB.x1
results$ComPoisBx1_ci_s<-results$gcoefCOMPoisB.x1+1.96*results$gsdCOMPoisB.x1
results$AGx2_ci_i<-results$gcoefAG.x2-1.96*results$gsdAG.x2
results$AGx2_ci_s<-results$gcoefAG.x2+1.96*results$gsdAG.x2
results$ComPoisx2_ci_i<-results$gcoefCOMPois.x2-1.96*results$gsdCOMPois.x2
results$ComPoisx2_ci_s<-results$gcoefCOMPois.x2+1.96*results$gsdCOMPois.x2
results$ComPoisBx2_ci_i<-results$gcoefCOMPoisB.x2-1.96*results$gsdCOMPoisB.x2
results$ComPoisBx2_ci_s<-results$gcoefCOMPoisB.x2+1.96*results$gsdCOMPoisB.x2
#LPI individuales
results$LPIxAG<-results$AGx_ci_s-results$AGx_ci_i
results$LPIx1AG<-results$AGx1_ci_s-results$AGx1_ci_i
results$LPIx2AG<-results$AGx2_ci_s-results$AGx2_ci_i
results$LPIxCOMPois<-results$ComPoisx_ci_s-results$ComPoisx_ci_i
results$LPIx1COMPois<-results$ComPoisx1_ci_s-results$ComPoisx1_ci_i
results$LPIx2COMPois<-results$ComPoisx2_ci_s-results$ComPoisx2_ci_i
results$LPIxCOMPoisB<-results$ComPoisBx_ci_s-results$ComPoisBx_ci_i
results$LPIx1COMPoisB<-results$ComPoisBx1_ci_s-results$ComPoisBx1_ci_i
results$LPIx2COMPoisB<-results$ComPoisBx2_ci_s-results$ComPoisBx2_ci_i
LPIx <- rbind(AG=mean(results$LPIxAG), COMPois=mean(results$LPIxCOMPois), COMPoisB=mean(results$LPIxCOMPoisB))
LPIx1 <-rbind(AG=mean(results$LPIx1AG), COMPois=mean(results$LPIx1COMPois), COMPoisB=mean(results$LPIx1COMPoisB))
LPIx2 <-rbind(AG=mean(results$LPIx2AG), COMPois=mean(results$LPIx2COMPois), COMPoisB=mean(results$LPIx2COMPoisB))
#coberturas
results$AGx_cov<-ifelse(results$AGx_ci_i<=0.25 & 0.25<=results$AGx_ci_s,1 ,0)
results$AGx1_cov<-ifelse(results$AGx1_ci_i<=0.5 & 0.5<=results$AGx1_ci_s,1 ,0)
results$AGx2_cov<-ifelse(results$AGx2_ci_i<=0.75 & 0.75<=results$AGx2_ci_s,1 ,0)
results$ComPoisx_cov<-ifelse(results$ComPoisx_ci_i<=0.25 & 0.25<=results$ComPoisx_ci_s,1 ,0)
results$ComPoisx1_cov<-ifelse(results$ComPoisx1_ci_i<=0.5 & 0.5<=results$ComPoisx1_ci_s,1 ,0)
results$ComPoisx2_cov<-ifelse(results$ComPoisx2_ci_i<=0.75 & 0.75<=results$ComPoisx2_ci_s,1 ,0)
results$ComPoisBx_cov<-ifelse(results$ComPoisBx_ci_i<=0.25 & 0.25<=results$ComPoisBx_ci_s,1 ,0)
results$ComPoisBx1_cov<-ifelse(results$ComPoisBx1_ci_i<=0.5 & 0.5<=results$ComPoisBx1_ci_s,1 ,0)
results$ComPoisBx2_cov<-ifelse(results$ComPoisBx2_ci_i<=0.75 & 0.75<=results$ComPoisBx2_ci_s,1 ,0)
cov.x <-rbind(AG=sum(results$AGx_cov)/nrow(results), COMPois=sum(results$ComPoisx_cov)/nrow(results), COMPoisB=sum(results$ComPoisBx_cov)/nrow(results))
cov.x1 <-rbind(AG=sum(results$AGx1_cov)/nrow(results), COMPois=sum(results$ComPoisx1_cov)/nrow(results), COMPoisB=sum(results$ComPoisBx1_cov)/nrow(results))
cov.x2 <- rbind(AG=sum(results$AGx2_cov)/nrow(results), COMPois=sum(results$ComPoisx2_cov)/nrow(results), COMPoisB=sum(results$ComPoisBx2_cov)/nrow(results))
mean.bias <- (abs(bias25)+abs(bias50)+abs(bias75)) / 3
mean.LPI <- (LPIx+LPIx1+LPIx2) / 3
mean.cob <- (cov.x+cov.x1+cov.x2)*100 / 3
res2121.ag <- data.frame(c0,c1,c2,c3,bias25,bias50,bias75, LPIx, LPIx1, LPIx2, cov.x, cov.x1, cov.x2, mean.bias, mean.LPI, mean.cob)
WriteXLS(res2121.ag, "results/SJWEH/results2121.xls")
results <- foreach(k=1:nsim, .combine=rbind) %dopar%
genResultsDEF1_new_frailty(k, nm=1000, bef=.3, ft=1825, old=730,
d.ev=d.ev5, d.cens=d.cens5, b0.ev=b0.ev5, b0.cens=b0.cens5,
a.ev=a.ev5, a.cens=a.cens5, m=5)
WriteXLS(results, "results/SJWEH/res2122.xls")
c0 <- rbind("AG F", "ComP (CP) F", "ComP (GT) F")
c1 <- rbind(AG=mean(results$gcoefAG.x), COMPois=mean(results$gcoefCOMPois.x), COMPoisB=mean(results$gcoefCOMPoisB.x))
c2 <- rbind(AG=mean(results$gcoefAG.x1), COMPois=mean(results$gcoefCOMPois.x1), COMPoisB=mean(results$gcoefCOMPoisB.x1))
c3 <- rbind(AG=mean(results$gcoefAG.x2), COMPois=mean(results$gcoefCOMPois.x2), COMPoisB=mean(results$gcoefCOMPoisB.x2))
bias25 <- ((c1-.25)/.25)*100
bias50 <- ((c2-.5)/.5)*100
bias75 <- ((c3-.75)/.75)*100
#Para las coberturas y el LPI
results$AGx_ci_i<-results$gcoefAG.x-1.96*results$gsdAG.x
results$AGx_ci_s<-results$gcoefAG.x+1.96*results$gsdAG.x
results$ComPoisx_ci_i<-results$gcoefCOMPois.x-1.96*results$gsdCOMPois.x
results$ComPoisx_ci_s<-results$gcoefCOMPois.x+1.96*results$gsdCOMPois.x
results$ComPoisBx_ci_i<-results$gcoefCOMPoisB.x-1.96*results$gsdCOMPoisB.x
results$ComPoisBx_ci_s<-results$gcoefCOMPoisB.x+1.96*results$gsdCOMPoisB.x
results$AGx1_ci_i<-results$gcoefAG.x1-1.96*results$gsdAG.x1
results$AGx1_ci_s<-results$gcoefAG.x1+1.96*results$gsdAG.x1
results$ComPoisx1_ci_i<-results$gcoefCOMPois.x1-1.96*results$gsdCOMPois.x1
results$ComPoisx1_ci_s<-results$gcoefCOMPois.x1+1.96*results$gsdCOMPois.x1
results$ComPoisBx1_ci_i<-results$gcoefCOMPoisB.x1-1.96*results$gsdCOMPoisB.x1
results$ComPoisBx1_ci_s<-results$gcoefCOMPoisB.x1+1.96*results$gsdCOMPoisB.x1
results$AGx2_ci_i<-results$gcoefAG.x2-1.96*results$gsdAG.x2
results$AGx2_ci_s<-results$gcoefAG.x2+1.96*results$gsdAG.x2
results$ComPoisx2_ci_i<-results$gcoefCOMPois.x2-1.96*results$gsdCOMPois.x2
results$ComPoisx2_ci_s<-results$gcoefCOMPois.x2+1.96*results$gsdCOMPois.x2
results$ComPoisBx2_ci_i<-results$gcoefCOMPoisB.x2-1.96*results$gsdCOMPoisB.x2
results$ComPoisBx2_ci_s<-results$gcoefCOMPoisB.x2+1.96*results$gsdCOMPoisB.x2
#LPI individuales
results$LPIxAG<-results$AGx_ci_s-results$AGx_ci_i
results$LPIx1AG<-results$AGx1_ci_s-results$AGx1_ci_i
results$LPIx2AG<-results$AGx2_ci_s-results$AGx2_ci_i
results$LPIxCOMPois<-results$ComPoisx_ci_s-results$ComPoisx_ci_i
results$LPIx1COMPois<-results$ComPoisx1_ci_s-results$ComPoisx1_ci_i
results$LPIx2COMPois<-results$ComPoisx2_ci_s-results$ComPoisx2_ci_i
results$LPIxCOMPoisB<-results$ComPoisBx_ci_s-results$ComPoisBx_ci_i
results$LPIx1COMPoisB<-results$ComPoisBx1_ci_s-results$ComPoisBx1_ci_i
results$LPIx2COMPoisB<-results$ComPoisBx2_ci_s-results$ComPoisBx2_ci_i
LPIx <- rbind(AG=mean(results$LPIxAG), COMPois=mean(results$LPIxCOMPois), COMPoisB=mean(results$LPIxCOMPoisB))
LPIx1 <-rbind(AG=mean(results$LPIx1AG), COMPois=mean(results$LPIx1COMPois), COMPoisB=mean(results$LPIx1COMPoisB))
LPIx2 <-rbind(AG=mean(results$LPIx2AG), COMPois=mean(results$LPIx2COMPois), COMPoisB=mean(results$LPIx2COMPoisB))
#coberturas
results$AGx_cov<-ifelse(results$AGx_ci_i<=0.25 & 0.25<=results$AGx_ci_s,1 ,0)
results$AGx1_cov<-ifelse(results$AGx1_ci_i<=0.5 & 0.5<=results$AGx1_ci_s,1 ,0)
results$AGx2_cov<-ifelse(results$AGx2_ci_i<=0.75 & 0.75<=results$AGx2_ci_s,1 ,0)
results$ComPoisx_cov<-ifelse(results$ComPoisx_ci_i<=0.25 & 0.25<=results$ComPoisx_ci_s,1 ,0)
results$ComPoisx1_cov<-ifelse(results$ComPoisx1_ci_i<=0.5 & 0.5<=results$ComPoisx1_ci_s,1 ,0)
results$ComPoisx2_cov<-ifelse(results$ComPoisx2_ci_i<=0.75 & 0.75<=results$ComPoisx2_ci_s,1 ,0)
results$ComPoisBx_cov<-ifelse(results$ComPoisBx_ci_i<=0.25 & 0.25<=results$ComPoisBx_ci_s,1 ,0)
results$ComPoisBx1_cov<-ifelse(results$ComPoisBx1_ci_i<=0.5 & 0.5<=results$ComPoisBx1_ci_s,1 ,0)
results$ComPoisBx2_cov<-ifelse(results$ComPoisBx2_ci_i<=0.75 & 0.75<=results$ComPoisBx2_ci_s,1 ,0)
cov.x <-rbind(AG=sum(results$AGx_cov)/nrow(results), COMPois=sum(results$ComPoisx_cov)/nrow(results), COMPoisB=sum(results$ComPoisBx_cov)/nrow(results))
cov.x1 <-rbind(AG=sum(results$AGx1_cov)/nrow(results), COMPois=sum(results$ComPoisx1_cov)/nrow(results), COMPoisB=sum(results$ComPoisBx1_cov)/nrow(results))
cov.x2 <- rbind(AG=sum(results$AGx2_cov)/nrow(results), COMPois=sum(results$ComPoisx2_cov)/nrow(results), COMPoisB=sum(results$ComPoisBx2_cov)/nrow(results))
mean.bias <- (abs(bias25)+abs(bias50)+abs(bias75)) / 3
mean.LPI <- (LPIx+LPIx1+LPIx2) / 3
mean.cob <- (cov.x+cov.x1+cov.x2)*100 / 3
res2122.ag <- data.frame(c0,c1,c2,c3,bias25,bias50,bias75, LPIx, LPIx1, LPIx2, cov.x, cov.x1, cov.x2, mean.bias, mean.LPI, mean.cob)
WriteXLS(res2122.ag, "results/SJWEH/results2122.xls")
results <- foreach(k=1:nsim, .combine=rbind) %dopar%
genResultsDEF1_new_frailty(k, nm=1000, bef=.5, ft=1825, old=730,
d.ev=d.ev5, d.cens=d.cens5, b0.ev=b0.ev5, b0.cens=b0.cens5,
a.ev=a.ev5, a.cens=a.cens5, m=5)
WriteXLS(results, "results/SJWEH/res2123.xls")
c0 <- rbind("AG F", "ComP (CP) F", "ComP (GT) F")
c1 <- rbind(AG=mean(results$gcoefAG.x), COMPois=mean(results$gcoefCOMPois.x), COMPoisB=mean(results$gcoefCOMPoisB.x))
c2 <- rbind(AG=mean(results$gcoefAG.x1), COMPois=mean(results$gcoefCOMPois.x1), COMPoisB=mean(results$gcoefCOMPoisB.x1))
c3 <- rbind(AG=mean(results$gcoefAG.x2), COMPois=mean(results$gcoefCOMPois.x2), COMPoisB=mean(results$gcoefCOMPoisB.x2))
bias25 <- ((c1-.25)/.25)*100
bias50 <- ((c2-.5)/.5)*100
bias75 <- ((c3-.75)/.75)*100
#Para las coberturas y el LPI
results$AGx_ci_i<-results$gcoefAG.x-1.96*results$gsdAG.x
results$AGx_ci_s<-results$gcoefAG.x+1.96*results$gsdAG.x
results$ComPoisx_ci_i<-results$gcoefCOMPois.x-1.96*results$gsdCOMPois.x
results$ComPoisx_ci_s<-results$gcoefCOMPois.x+1.96*results$gsdCOMPois.x
results$ComPoisBx_ci_i<-results$gcoefCOMPoisB.x-1.96*results$gsdCOMPoisB.x
results$ComPoisBx_ci_s<-results$gcoefCOMPoisB.x+1.96*results$gsdCOMPoisB.x
results$AGx1_ci_i<-results$gcoefAG.x1-1.96*results$gsdAG.x1
results$AGx1_ci_s<-results$gcoefAG.x1+1.96*results$gsdAG.x1
results$ComPoisx1_ci_i<-results$gcoefCOMPois.x1-1.96*results$gsdCOMPois.x1
results$ComPoisx1_ci_s<-results$gcoefCOMPois.x1+1.96*results$gsdCOMPois.x1
results$ComPoisBx1_ci_i<-results$gcoefCOMPoisB.x1-1.96*results$gsdCOMPoisB.x1
results$ComPoisBx1_ci_s<-results$gcoefCOMPoisB.x1+1.96*results$gsdCOMPoisB.x1
results$AGx2_ci_i<-results$gcoefAG.x2-1.96*results$gsdAG.x2
results$AGx2_ci_s<-results$gcoefAG.x2+1.96*results$gsdAG.x2
results$ComPoisx2_ci_i<-results$gcoefCOMPois.x2-1.96*results$gsdCOMPois.x2
results$ComPoisx2_ci_s<-results$gcoefCOMPois.x2+1.96*results$gsdCOMPois.x2
results$ComPoisBx2_ci_i<-results$gcoefCOMPoisB.x2-1.96*results$gsdCOMPoisB.x2
results$ComPoisBx2_ci_s<-results$gcoefCOMPoisB.x2+1.96*results$gsdCOMPoisB.x2
#LPI individuales
results$LPIxAG<-results$AGx_ci_s-results$AGx_ci_i
results$LPIx1AG<-results$AGx1_ci_s-results$AGx1_ci_i
results$LPIx2AG<-results$AGx2_ci_s-results$AGx2_ci_i
results$LPIxCOMPois<-results$ComPoisx_ci_s-results$ComPoisx_ci_i
results$LPIx1COMPois<-results$ComPoisx1_ci_s-results$ComPoisx1_ci_i
results$LPIx2COMPois<-results$ComPoisx2_ci_s-results$ComPoisx2_ci_i
results$LPIxCOMPoisB<-results$ComPoisBx_ci_s-results$ComPoisBx_ci_i
results$LPIx1COMPoisB<-results$ComPoisBx1_ci_s-results$ComPoisBx1_ci_i
results$LPIx2COMPoisB<-results$ComPoisBx2_ci_s-results$ComPoisBx2_ci_i
LPIx <- rbind(AG=mean(results$LPIxAG), COMPois=mean(results$LPIxCOMPois), COMPoisB=mean(results$LPIxCOMPoisB))
LPIx1 <-rbind(AG=mean(results$LPIx1AG), COMPois=mean(results$LPIx1COMPois), COMPoisB=mean(results$LPIx1COMPoisB))
LPIx2 <-rbind(AG=mean(results$LPIx2AG), COMPois=mean(results$LPIx2COMPois), COMPoisB=mean(results$LPIx2COMPoisB))
#coberturas
results$AGx_cov<-ifelse(results$AGx_ci_i<=0.25 & 0.25<=results$AGx_ci_s,1 ,0)
results$AGx1_cov<-ifelse(results$AGx1_ci_i<=0.5 & 0.5<=results$AGx1_ci_s,1 ,0)
results$AGx2_cov<-ifelse(results$AGx2_ci_i<=0.75 & 0.75<=results$AGx2_ci_s,1 ,0)
results$ComPoisx_cov<-ifelse(results$ComPoisx_ci_i<=0.25 & 0.25<=results$ComPoisx_ci_s,1 ,0)
results$ComPoisx1_cov<-ifelse(results$ComPoisx1_ci_i<=0.5 & 0.5<=results$ComPoisx1_ci_s,1 ,0)
results$ComPoisx2_cov<-ifelse(results$ComPoisx2_ci_i<=0.75 & 0.75<=results$ComPoisx2_ci_s,1 ,0)
results$ComPoisBx_cov<-ifelse(results$ComPoisBx_ci_i<=0.25 & 0.25<=results$ComPoisBx_ci_s,1 ,0)
results$ComPoisBx1_cov<-ifelse(results$ComPoisBx1_ci_i<=0.5 & 0.5<=results$ComPoisBx1_ci_s,1 ,0)
results$ComPoisBx2_cov<-ifelse(results$ComPoisBx2_ci_i<=0.75 & 0.75<=results$ComPoisBx2_ci_s,1 ,0)
cov.x <-rbind(AG=sum(results$AGx_cov)/nrow(results), COMPois=sum(results$ComPoisx_cov)/nrow(results), COMPoisB=sum(results$ComPoisBx_cov)/nrow(results))
cov.x1 <-rbind(AG=sum(results$AGx1_cov)/nrow(results), COMPois=sum(results$ComPoisx1_cov)/nrow(results), COMPoisB=sum(results$ComPoisBx1_cov)/nrow(results))
cov.x2 <- rbind(AG=sum(results$AGx2_cov)/nrow(results), COMPois=sum(results$ComPoisx2_cov)/nrow(results), COMPoisB=sum(results$ComPoisBx2_cov)/nrow(results))
mean.bias <- (abs(bias25)+abs(bias50)+abs(bias75)) / 3
mean.LPI <- (LPIx+LPIx1+LPIx2) / 3
mean.cob <- (cov.x+cov.x1+cov.x2)*100 / 3
res2123.ag <- data.frame(c0,c1,c2,c3,bias25,bias50,bias75, LPIx, LPIx1, LPIx2, cov.x, cov.x1, cov.x2, mean.bias, mean.LPI, mean.cob)
WriteXLS(res2123.ag, "results/SJWEH/results2123.xls")
results <- foreach(k=1:nsim, .combine=rbind) %dopar%
genResultsDEF1_new_frailty(k, nm=1000, bef=.1, ft=730, old=3650,
d.ev=d.ev5, d.cens=d.cens5, b0.ev=b0.ev5, b0.cens=b0.cens5,
a.ev=a.ev5, a.cens=a.cens5, m=5)
WriteXLS(results, "results/SJWEH/res2211.xls")
c0 <- rbind("AG F", "ComP (CP) F", "ComP (GT) F")
c1 <- rbind(AG=mean(results$gcoefAG.x), COMPois=mean(results$gcoefCOMPois.x), COMPoisB=mean(results$gcoefCOMPoisB.x))
c2 <- rbind(AG=mean(results$gcoefAG.x1), COMPois=mean(results$gcoefCOMPois.x1), COMPoisB=mean(results$gcoefCOMPoisB.x1))
c3 <- rbind(AG=mean(results$gcoefAG.x2), COMPois=mean(results$gcoefCOMPois.x2), COMPoisB=mean(results$gcoefCOMPoisB.x2))
bias25 <- ((c1-.25)/.25)*100
bias50 <- ((c2-.5)/.5)*100
bias75 <- ((c3-.75)/.75)*100
#Para las coberturas y el LPI
results$AGx_ci_i<-results$gcoefAG.x-1.96*results$gsdAG.x
results$AGx_ci_s<-results$gcoefAG.x+1.96*results$gsdAG.x
results$ComPoisx_ci_i<-results$gcoefCOMPois.x-1.96*results$gsdCOMPois.x
results$ComPoisx_ci_s<-results$gcoefCOMPois.x+1.96*results$gsdCOMPois.x
results$ComPoisBx_ci_i<-results$gcoefCOMPoisB.x-1.96*results$gsdCOMPoisB.x
results$ComPoisBx_ci_s<-results$gcoefCOMPoisB.x+1.96*results$gsdCOMPoisB.x
results$AGx1_ci_i<-results$gcoefAG.x1-1.96*results$gsdAG.x1
results$AGx1_ci_s<-results$gcoefAG.x1+1.96*results$gsdAG.x1
results$ComPoisx1_ci_i<-results$gcoefCOMPois.x1-1.96*results$gsdCOMPois.x1
results$ComPoisx1_ci_s<-results$gcoefCOMPois.x1+1.96*results$gsdCOMPois.x1
results$ComPoisBx1_ci_i<-results$gcoefCOMPoisB.x1-1.96*results$gsdCOMPoisB.x1
results$ComPoisBx1_ci_s<-results$gcoefCOMPoisB.x1+1.96*results$gsdCOMPoisB.x1
results$AGx2_ci_i<-results$gcoefAG.x2-1.96*results$gsdAG.x2
results$AGx2_ci_s<-results$gcoefAG.x2+1.96*results$gsdAG.x2
results$ComPoisx2_ci_i<-results$gcoefCOMPois.x2-1.96*results$gsdCOMPois.x2
results$ComPoisx2_ci_s<-results$gcoefCOMPois.x2+1.96*results$gsdCOMPois.x2
results$ComPoisBx2_ci_i<-results$gcoefCOMPoisB.x2-1.96*results$gsdCOMPoisB.x2
results$ComPoisBx2_ci_s<-results$gcoefCOMPoisB.x2+1.96*results$gsdCOMPoisB.x2
#LPI individuales
results$LPIxAG<-results$AGx_ci_s-results$AGx_ci_i
results$LPIx1AG<-results$AGx1_ci_s-results$AGx1_ci_i
results$LPIx2AG<-results$AGx2_ci_s-results$AGx2_ci_i
results$LPIxCOMPois<-results$ComPoisx_ci_s-results$ComPoisx_ci_i
results$LPIx1COMPois<-results$ComPoisx1_ci_s-results$ComPoisx1_ci_i
results$LPIx2COMPois<-results$ComPoisx2_ci_s-results$ComPoisx2_ci_i
results$LPIxCOMPoisB<-results$ComPoisBx_ci_s-results$ComPoisBx_ci_i
results$LPIx1COMPoisB<-results$ComPoisBx1_ci_s-results$ComPoisBx1_ci_i
results$LPIx2COMPoisB<-results$ComPoisBx2_ci_s-results$ComPoisBx2_ci_i
LPIx <- rbind(AG=mean(results$LPIxAG), COMPois=mean(results$LPIxCOMPois), COMPoisB=mean(results$LPIxCOMPoisB))
LPIx1 <-rbind(AG=mean(results$LPIx1AG), COMPois=mean(results$LPIx1COMPois), COMPoisB=mean(results$LPIx1COMPoisB))
LPIx2 <-rbind(AG=mean(results$LPIx2AG), COMPois=mean(results$LPIx2COMPois), COMPoisB=mean(results$LPIx2COMPoisB))
#coberturas
results$AGx_cov<-ifelse(results$AGx_ci_i<=0.25 & 0.25<=results$AGx_ci_s,1 ,0)
results$AGx1_cov<-ifelse(results$AGx1_ci_i<=0.5 & 0.5<=results$AGx1_ci_s,1 ,0)
results$AGx2_cov<-ifelse(results$AGx2_ci_i<=0.75 & 0.75<=results$AGx2_ci_s,1 ,0)
results$ComPoisx_cov<-ifelse(results$ComPoisx_ci_i<=0.25 & 0.25<=results$ComPoisx_ci_s,1 ,0)
results$ComPoisx1_cov<-ifelse(results$ComPoisx1_ci_i<=0.5 & 0.5<=results$ComPoisx1_ci_s,1 ,0)
results$ComPoisx2_cov<-ifelse(results$ComPoisx2_ci_i<=0.75 & 0.75<=results$ComPoisx2_ci_s,1 ,0)
results$ComPoisBx_cov<-ifelse(results$ComPoisBx_ci_i<=0.25 & 0.25<=results$ComPoisBx_ci_s,1 ,0)
results$ComPoisBx1_cov<-ifelse(results$ComPoisBx1_ci_i<=0.5 & 0.5<=results$ComPoisBx1_ci_s,1 ,0)
results$ComPoisBx2_cov<-ifelse(results$ComPoisBx2_ci_i<=0.75 & 0.75<=results$ComPoisBx2_ci_s,1 ,0)
cov.x <-rbind(AG=sum(results$AGx_cov)/nrow(results), COMPois=sum(results$ComPoisx_cov)/nrow(results), COMPoisB=sum(results$ComPoisBx_cov)/nrow(results))
cov.x1 <-rbind(AG=sum(results$AGx1_cov)/nrow(results), COMPois=sum(results$ComPoisx1_cov)/nrow(results), COMPoisB=sum(results$ComPoisBx1_cov)/nrow(results))
cov.x2 <- rbind(AG=sum(results$AGx2_cov)/nrow(results), COMPois=sum(results$ComPoisx2_cov)/nrow(results), COMPoisB=sum(results$ComPoisBx2_cov)/nrow(results))
mean.bias <- (abs(bias25)+abs(bias50)+abs(bias75)) / 3
mean.LPI <- (LPIx+LPIx1+LPIx2) / 3
mean.cob <- (cov.x+cov.x1+cov.x2)*100 / 3
res2211.ag <- data.frame(c0,c1,c2,c3,bias25,bias50,bias75, LPIx, LPIx1, LPIx2, cov.x, cov.x1, cov.x2, mean.bias, mean.LPI, mean.cob)
WriteXLS(res2211.ag, "results/SJWEH/results2211.xls")
results <- foreach(k=1:nsim, .combine=rbind) %dopar%
genResultsDEF1_new_frailty(k, nm=1000, bef=.3, ft=730, old=3650,
d.ev=d.ev5, d.cens=d.cens5, b0.ev=b0.ev5, b0.cens=b0.cens5,
a.ev=a.ev5, a.cens=a.cens5, m=5)
WriteXLS(results, "results/SJWEH/res2212.xls")
c0 <- rbind("AG F", "ComP (CP) F", "ComP (GT) F")
c1 <- rbind(AG=mean(results$gcoefAG.x), COMPois=mean(results$gcoefCOMPois.x), COMPoisB=mean(results$gcoefCOMPoisB.x))
c2 <- rbind(AG=mean(results$gcoefAG.x1), COMPois=mean(results$gcoefCOMPois.x1), COMPoisB=mean(results$gcoefCOMPoisB.x1))
c3 <- rbind(AG=mean(results$gcoefAG.x2), COMPois=mean(results$gcoefCOMPois.x2), COMPoisB=mean(results$gcoefCOMPoisB.x2))
bias25 <- ((c1-.25)/.25)*100
bias50 <- ((c2-.5)/.5)*100
bias75 <- ((c3-.75)/.75)*100
#Para las coberturas y el LPI
results$AGx_ci_i<-results$gcoefAG.x-1.96*results$gsdAG.x
results$AGx_ci_s<-results$gcoefAG.x+1.96*results$gsdAG.x
results$ComPoisx_ci_i<-results$gcoefCOMPois.x-1.96*results$gsdCOMPois.x
results$ComPoisx_ci_s<-results$gcoefCOMPois.x+1.96*results$gsdCOMPois.x
results$ComPoisBx_ci_i<-results$gcoefCOMPoisB.x-1.96*results$gsdCOMPoisB.x
results$ComPoisBx_ci_s<-results$gcoefCOMPoisB.x+1.96*results$gsdCOMPoisB.x
results$AGx1_ci_i<-results$gcoefAG.x1-1.96*results$gsdAG.x1
results$AGx1_ci_s<-results$gcoefAG.x1+1.96*results$gsdAG.x1
results$ComPoisx1_ci_i<-results$gcoefCOMPois.x1-1.96*results$gsdCOMPois.x1
results$ComPoisx1_ci_s<-results$gcoefCOMPois.x1+1.96*results$gsdCOMPois.x1
results$ComPoisBx1_ci_i<-results$gcoefCOMPoisB.x1-1.96*results$gsdCOMPoisB.x1
results$ComPoisBx1_ci_s<-results$gcoefCOMPoisB.x1+1.96*results$gsdCOMPoisB.x1
results$AGx2_ci_i<-results$gcoefAG.x2-1.96*results$gsdAG.x2
results$AGx2_ci_s<-results$gcoefAG.x2+1.96*results$gsdAG.x2
results$ComPoisx2_ci_i<-results$gcoefCOMPois.x2-1.96*results$gsdCOMPois.x2
results$ComPoisx2_ci_s<-results$gcoefCOMPois.x2+1.96*results$gsdCOMPois.x2
results$ComPoisBx2_ci_i<-results$gcoefCOMPoisB.x2-1.96*results$gsdCOMPoisB.x2
results$ComPoisBx2_ci_s<-results$gcoefCOMPoisB.x2+1.96*results$gsdCOMPoisB.x2
#LPI individuales
results$LPIxAG<-results$AGx_ci_s-results$AGx_ci_i
results$LPIx1AG<-results$AGx1_ci_s-results$AGx1_ci_i
results$LPIx2AG<-results$AGx2_ci_s-results$AGx2_ci_i
results$LPIxCOMPois<-results$ComPoisx_ci_s-results$ComPoisx_ci_i
results$LPIx1COMPois<-results$ComPoisx1_ci_s-results$ComPoisx1_ci_i
results$LPIx2COMPois<-results$ComPoisx2_ci_s-results$ComPoisx2_ci_i
results$LPIxCOMPoisB<-results$ComPoisBx_ci_s-results$ComPoisBx_ci_i
results$LPIx1COMPoisB<-results$ComPoisBx1_ci_s-results$ComPoisBx1_ci_i
results$LPIx2COMPoisB<-results$ComPoisBx2_ci_s-results$ComPoisBx2_ci_i
LPIx <- rbind(AG=mean(results$LPIxAG), COMPois=mean(results$LPIxCOMPois), COMPoisB=mean(results$LPIxCOMPoisB))
LPIx1 <-rbind(AG=mean(results$LPIx1AG), COMPois=mean(results$LPIx1COMPois), COMPoisB=mean(results$LPIx1COMPoisB))
LPIx2 <-rbind(AG=mean(results$LPIx2AG), COMPois=mean(results$LPIx2COMPois), COMPoisB=mean(results$LPIx2COMPoisB))
#coberturas
results$AGx_cov<-ifelse(results$AGx_ci_i<=0.25 & 0.25<=results$AGx_ci_s,1 ,0)
results$AGx1_cov<-ifelse(results$AGx1_ci_i<=0.5 & 0.5<=results$AGx1_ci_s,1 ,0)
results$AGx2_cov<-ifelse(results$AGx2_ci_i<=0.75 & 0.75<=results$AGx2_ci_s,1 ,0)
results$ComPoisx_cov<-ifelse(results$ComPoisx_ci_i<=0.25 & 0.25<=results$ComPoisx_ci_s,1 ,0)
results$ComPoisx1_cov<-ifelse(results$ComPoisx1_ci_i<=0.5 & 0.5<=results$ComPoisx1_ci_s,1 ,0)
results$ComPoisx2_cov<-ifelse(results$ComPoisx2_ci_i<=0.75 & 0.75<=results$ComPoisx2_ci_s,1 ,0)
results$ComPoisBx_cov<-ifelse(results$ComPoisBx_ci_i<=0.25 & 0.25<=results$ComPoisBx_ci_s,1 ,0)
results$ComPoisBx1_cov<-ifelse(results$ComPoisBx1_ci_i<=0.5 & 0.5<=results$ComPoisBx1_ci_s,1 ,0)
results$ComPoisBx2_cov<-ifelse(results$ComPoisBx2_ci_i<=0.75 & 0.75<=results$ComPoisBx2_ci_s,1 ,0)
cov.x <-rbind(AG=sum(results$AGx_cov)/nrow(results), COMPois=sum(results$ComPoisx_cov)/nrow(results), COMPoisB=sum(results$ComPoisBx_cov)/nrow(results))
cov.x1 <-rbind(AG=sum(results$AGx1_cov)/nrow(results), COMPois=sum(results$ComPoisx1_cov)/nrow(results), COMPoisB=sum(results$ComPoisBx1_cov)/nrow(results))
cov.x2 <- rbind(AG=sum(results$AGx2_cov)/nrow(results), COMPois=sum(results$ComPoisx2_cov)/nrow(results), COMPoisB=sum(results$ComPoisBx2_cov)/nrow(results))
mean.bias <- (abs(bias25)+abs(bias50)+abs(bias75)) / 3
mean.LPI <- (LPIx+LPIx1+LPIx2) / 3
mean.cob <- (cov.x+cov.x1+cov.x2)*100 / 3
res2212.ag <- data.frame(c0,c1,c2,c3,bias25,bias50,bias75, LPIx, LPIx1, LPIx2, cov.x, cov.x1, cov.x2, mean.bias, mean.LPI, mean.cob)
WriteXLS(res2212.ag, "results/SJWEH/results2212.xls")
results <- foreach(k=1:nsim, .combine=rbind) %dopar%
genResultsDEF1_new_frailty(k, nm=1000, bef=.5, ft=730, old=3650,
d.ev=d.ev5, d.cens=d.cens5, b0.ev=b0.ev5, b0.cens=b0.cens5,
a.ev=a.ev5, a.cens=a.cens5, m=5)
WriteXLS(results, "results/SJWEH/res2213.xls")
c0 <- rbind("AG F", "ComP (CP) F", "ComP (GT) F")
c1 <- rbind(AG=mean(results$gcoefAG.x), COMPois=mean(results$gcoefCOMPois.x), COMPoisB=mean(results$gcoefCOMPoisB.x))
c2 <- rbind(AG=mean(results$gcoefAG.x1), COMPois=mean(results$gcoefCOMPois.x1), COMPoisB=mean(results$gcoefCOMPoisB.x1))
c3 <- rbind(AG=mean(results$gcoefAG.x2), COMPois=mean(results$gcoefCOMPois.x2), COMPoisB=mean(results$gcoefCOMPoisB.x2))
bias25 <- ((c1-.25)/.25)*100
bias50 <- ((c2-.5)/.5)*100
bias75 <- ((c3-.75)/.75)*100
#Para las coberturas y el LPI
results$AGx_ci_i<-results$gcoefAG.x-1.96*results$gsdAG.x
results$AGx_ci_s<-results$gcoefAG.x+1.96*results$gsdAG.x
results$ComPoisx_ci_i<-results$gcoefCOMPois.x-1.96*results$gsdCOMPois.x
results$ComPoisx_ci_s<-results$gcoefCOMPois.x+1.96*results$gsdCOMPois.x
results$ComPoisBx_ci_i<-results$gcoefCOMPoisB.x-1.96*results$gsdCOMPoisB.x
results$ComPoisBx_ci_s<-results$gcoefCOMPoisB.x+1.96*results$gsdCOMPoisB.x
results$AGx1_ci_i<-results$gcoefAG.x1-1.96*results$gsdAG.x1
results$AGx1_ci_s<-results$gcoefAG.x1+1.96*results$gsdAG.x1
results$ComPoisx1_ci_i<-results$gcoefCOMPois.x1-1.96*results$gsdCOMPois.x1
results$ComPoisx1_ci_s<-results$gcoefCOMPois.x1+1.96*results$gsdCOMPois.x1
results$ComPoisBx1_ci_i<-results$gcoefCOMPoisB.x1-1.96*results$gsdCOMPoisB.x1
results$ComPoisBx1_ci_s<-results$gcoefCOMPoisB.x1+1.96*results$gsdCOMPoisB.x1
results$AGx2_ci_i<-results$gcoefAG.x2-1.96*results$gsdAG.x2
results$AGx2_ci_s<-results$gcoefAG.x2+1.96*results$gsdAG.x2
results$ComPoisx2_ci_i<-results$gcoefCOMPois.x2-1.96*results$gsdCOMPois.x2
results$ComPoisx2_ci_s<-results$gcoefCOMPois.x2+1.96*results$gsdCOMPois.x2
results$ComPoisBx2_ci_i<-results$gcoefCOMPoisB.x2-1.96*results$gsdCOMPoisB.x2
results$ComPoisBx2_ci_s<-results$gcoefCOMPoisB.x2+1.96*results$gsdCOMPoisB.x2
#LPI individuales
results$LPIxAG<-results$AGx_ci_s-results$AGx_ci_i
results$LPIx1AG<-results$AGx1_ci_s-results$AGx1_ci_i
results$LPIx2AG<-results$AGx2_ci_s-results$AGx2_ci_i
results$LPIxCOMPois<-results$ComPoisx_ci_s-results$ComPoisx_ci_i
results$LPIx1COMPois<-results$ComPoisx1_ci_s-results$ComPoisx1_ci_i
results$LPIx2COMPois<-results$ComPoisx2_ci_s-results$ComPoisx2_ci_i
results$LPIxCOMPoisB<-results$ComPoisBx_ci_s-results$ComPoisBx_ci_i
results$LPIx1COMPoisB<-results$ComPoisBx1_ci_s-results$ComPoisBx1_ci_i
results$LPIx2COMPoisB<-results$ComPoisBx2_ci_s-results$ComPoisBx2_ci_i
LPIx <- rbind(AG=mean(results$LPIxAG), COMPois=mean(results$LPIxCOMPois), COMPoisB=mean(results$LPIxCOMPoisB))
LPIx1 <-rbind(AG=mean(results$LPIx1AG), COMPois=mean(results$LPIx1COMPois), COMPoisB=mean(results$LPIx1COMPoisB))
LPIx2 <-rbind(AG=mean(results$LPIx2AG), COMPois=mean(results$LPIx2COMPois), COMPoisB=mean(results$LPIx2COMPoisB))
#coberturas
results$AGx_cov<-ifelse(results$AGx_ci_i<=0.25 & 0.25<=results$AGx_ci_s,1 ,0)
results$AGx1_cov<-ifelse(results$AGx1_ci_i<=0.5 & 0.5<=results$AGx1_ci_s,1 ,0)
results$AGx2_cov<-ifelse(results$AGx2_ci_i<=0.75 & 0.75<=results$AGx2_ci_s,1 ,0)
results$ComPoisx_cov<-ifelse(results$ComPoisx_ci_i<=0.25 & 0.25<=results$ComPoisx_ci_s,1 ,0)
results$ComPoisx1_cov<-ifelse(results$ComPoisx1_ci_i<=0.5 & 0.5<=results$ComPoisx1_ci_s,1 ,0)
results$ComPoisx2_cov<-ifelse(results$ComPoisx2_ci_i<=0.75 & 0.75<=results$ComPoisx2_ci_s,1 ,0)
results$ComPoisBx_cov<-ifelse(results$ComPoisBx_ci_i<=0.25 & 0.25<=results$ComPoisBx_ci_s,1 ,0)
results$ComPoisBx1_cov<-ifelse(results$ComPoisBx1_ci_i<=0.5 & 0.5<=results$ComPoisBx1_ci_s,1 ,0)
results$ComPoisBx2_cov<-ifelse(results$ComPoisBx2_ci_i<=0.75 & 0.75<=results$ComPoisBx2_ci_s,1 ,0)
cov.x <-rbind(AG=sum(results$AGx_cov)/nrow(results), COMPois=sum(results$ComPoisx_cov)/nrow(results), COMPoisB=sum(results$ComPoisBx_cov)/nrow(results))
cov.x1 <-rbind(AG=sum(results$AGx1_cov)/nrow(results), COMPois=sum(results$ComPoisx1_cov)/nrow(results), COMPoisB=sum(results$ComPoisBx1_cov)/nrow(results))
cov.x2 <- rbind(AG=sum(results$AGx2_cov)/nrow(results), COMPois=sum(results$ComPoisx2_cov)/nrow(results), COMPoisB=sum(results$ComPoisBx2_cov)/nrow(results))
mean.bias <- (abs(bias25)+abs(bias50)+abs(bias75)) / 3
mean.LPI <- (LPIx+LPIx1+LPIx2) / 3
mean.cob <- (cov.x+cov.x1+cov.x2)*100 / 3
res2213.ag <- data.frame(c0,c1,c2,c3,bias25,bias50,bias75, LPIx, LPIx1, LPIx2, cov.x, cov.x1, cov.x2, mean.bias, mean.LPI, mean.cob)
WriteXLS(res2213.ag, "results/SJWEH/results2213.xls")
results <- foreach(k=1:nsim, .combine=rbind) %dopar%
genResultsDEF1_new_frailty(k, nm=1000, bef=.1, ft=1825, old=3650,
d.ev=d.ev5, d.cens=d.cens5, b0.ev=b0.ev5, b0.cens=b0.cens5,
a.ev=a.ev5, a.cens=a.cens5, m=5)
WriteXLS(results, "results/SJWEH/res2221.xls")
c0 <- rbind("AG F", "ComP (CP) F", "ComP (GT) F")
c1 <- rbind(AG=mean(results$gcoefAG.x), COMPois=mean(results$gcoefCOMPois.x), COMPoisB=mean(results$gcoefCOMPoisB.x))
c2 <- rbind(AG=mean(results$gcoefAG.x1), COMPois=mean(results$gcoefCOMPois.x1), COMPoisB=mean(results$gcoefCOMPoisB.x1))
c3 <- rbind(AG=mean(results$gcoefAG.x2), COMPois=mean(results$gcoefCOMPois.x2), COMPoisB=mean(results$gcoefCOMPoisB.x2))
bias25 <- ((c1-.25)/.25)*100
bias50 <- ((c2-.5)/.5)*100
bias75 <- ((c3-.75)/.75)*100
#Para las coberturas y el LPI
results$AGx_ci_i<-results$gcoefAG.x-1.96*results$gsdAG.x
results$AGx_ci_s<-results$gcoefAG.x+1.96*results$gsdAG.x
results$ComPoisx_ci_i<-results$gcoefCOMPois.x-1.96*results$gsdCOMPois.x
results$ComPoisx_ci_s<-results$gcoefCOMPois.x+1.96*results$gsdCOMPois.x
results$ComPoisBx_ci_i<-results$gcoefCOMPoisB.x-1.96*results$gsdCOMPoisB.x
results$ComPoisBx_ci_s<-results$gcoefCOMPoisB.x+1.96*results$gsdCOMPoisB.x
results$AGx1_ci_i<-results$gcoefAG.x1-1.96*results$gsdAG.x1
results$AGx1_ci_s<-results$gcoefAG.x1+1.96*results$gsdAG.x1
results$ComPoisx1_ci_i<-results$gcoefCOMPois.x1-1.96*results$gsdCOMPois.x1
results$ComPoisx1_ci_s<-results$gcoefCOMPois.x1+1.96*results$gsdCOMPois.x1
results$ComPoisBx1_ci_i<-results$gcoefCOMPoisB.x1-1.96*results$gsdCOMPoisB.x1
results$ComPoisBx1_ci_s<-results$gcoefCOMPoisB.x1+1.96*results$gsdCOMPoisB.x1
results$AGx2_ci_i<-results$gcoefAG.x2-1.96*results$gsdAG.x2
results$AGx2_ci_s<-results$gcoefAG.x2+1.96*results$gsdAG.x2
results$ComPoisx2_ci_i<-results$gcoefCOMPois.x2-1.96*results$gsdCOMPois.x2
results$ComPoisx2_ci_s<-results$gcoefCOMPois.x2+1.96*results$gsdCOMPois.x2
results$ComPoisBx2_ci_i<-results$gcoefCOMPoisB.x2-1.96*results$gsdCOMPoisB.x2
results$ComPoisBx2_ci_s<-results$gcoefCOMPoisB.x2+1.96*results$gsdCOMPoisB.x2
#LPI individuales
results$LPIxAG<-results$AGx_ci_s-results$AGx_ci_i
results$LPIx1AG<-results$AGx1_ci_s-results$AGx1_ci_i
results$LPIx2AG<-results$AGx2_ci_s-results$AGx2_ci_i
results$LPIxCOMPois<-results$ComPoisx_ci_s-results$ComPoisx_ci_i
results$LPIx1COMPois<-results$ComPoisx1_ci_s-results$ComPoisx1_ci_i
results$LPIx2COMPois<-results$ComPoisx2_ci_s-results$ComPoisx2_ci_i
results$LPIxCOMPoisB<-results$ComPoisBx_ci_s-results$ComPoisBx_ci_i
results$LPIx1COMPoisB<-results$ComPoisBx1_ci_s-results$ComPoisBx1_ci_i
results$LPIx2COMPoisB<-results$ComPoisBx2_ci_s-results$ComPoisBx2_ci_i
LPIx <- rbind(AG=mean(results$LPIxAG), COMPois=mean(results$LPIxCOMPois), COMPoisB=mean(results$LPIxCOMPoisB))
LPIx1 <-rbind(AG=mean(results$LPIx1AG), COMPois=mean(results$LPIx1COMPois), COMPoisB=mean(results$LPIx1COMPoisB))
LPIx2 <-rbind(AG=mean(results$LPIx2AG), COMPois=mean(results$LPIx2COMPois), COMPoisB=mean(results$LPIx2COMPoisB))
#coberturas
results$AGx_cov<-ifelse(results$AGx_ci_i<=0.25 & 0.25<=results$AGx_ci_s,1 ,0)
results$AGx1_cov<-ifelse(results$AGx1_ci_i<=0.5 & 0.5<=results$AGx1_ci_s,1 ,0)
results$AGx2_cov<-ifelse(results$AGx2_ci_i<=0.75 & 0.75<=results$AGx2_ci_s,1 ,0)
results$ComPoisx_cov<-ifelse(results$ComPoisx_ci_i<=0.25 & 0.25<=results$ComPoisx_ci_s,1 ,0)
results$ComPoisx1_cov<-ifelse(results$ComPoisx1_ci_i<=0.5 & 0.5<=results$ComPoisx1_ci_s,1 ,0)
results$ComPoisx2_cov<-ifelse(results$ComPoisx2_ci_i<=0.75 & 0.75<=results$ComPoisx2_ci_s,1 ,0)
results$ComPoisBx_cov<-ifelse(results$ComPoisBx_ci_i<=0.25 & 0.25<=results$ComPoisBx_ci_s,1 ,0)
results$ComPoisBx1_cov<-ifelse(results$ComPoisBx1_ci_i<=0.5 & 0.5<=results$ComPoisBx1_ci_s,1 ,0)
results$ComPoisBx2_cov<-ifelse(results$ComPoisBx2_ci_i<=0.75 & 0.75<=results$ComPoisBx2_ci_s,1 ,0)
cov.x <-rbind(AG=sum(results$AGx_cov)/nrow(results), COMPois=sum(results$ComPoisx_cov)/nrow(results), COMPoisB=sum(results$ComPoisBx_cov)/nrow(results))
cov.x1 <-rbind(AG=sum(results$AGx1_cov)/nrow(results), COMPois=sum(results$ComPoisx1_cov)/nrow(results), COMPoisB=sum(results$ComPoisBx1_cov)/nrow(results))
cov.x2 <- rbind(AG=sum(results$AGx2_cov)/nrow(results), COMPois=sum(results$ComPoisx2_cov)/nrow(results), COMPoisB=sum(results$ComPoisBx2_cov)/nrow(results))
mean.bias <- (abs(bias25)+abs(bias50)+abs(bias75)) / 3
mean.LPI <- (LPIx+LPIx1+LPIx2) / 3
mean.cob <- (cov.x+cov.x1+cov.x2)*100 / 3
res2221.ag <- data.frame(c0,c1,c2,c3,bias25,bias50,bias75, LPIx, LPIx1, LPIx2, cov.x, cov.x1, cov.x2, mean.bias, mean.LPI, mean.cob)
WriteXLS(res2221.ag, "results/SJWEH/results2221.xls")
results <- foreach(k=1:nsim, .combine=rbind) %dopar%
genResultsDEF1_new_frailty(k, nm=1000, bef=.3, ft=1825, old=3650,
d.ev=d.ev5, d.cens=d.cens5, b0.ev=b0.ev5, b0.cens=b0.cens5,
a.ev=a.ev5, a.cens=a.cens5, m=5)
WriteXLS(results, "results/SJWEH/res2222.xls")
c0 <- rbind("AG F", "ComP (CP) F", "ComP (GT) F")
c1 <- rbind(AG=mean(results$gcoefAG.x), COMPois=mean(results$gcoefCOMPois.x), COMPoisB=mean(results$gcoefCOMPoisB.x))
c2 <- rbind(AG=mean(results$gcoefAG.x1), COMPois=mean(results$gcoefCOMPois.x1), COMPoisB=mean(results$gcoefCOMPoisB.x1))
c3 <- rbind(AG=mean(results$gcoefAG.x2), COMPois=mean(results$gcoefCOMPois.x2), COMPoisB=mean(results$gcoefCOMPoisB.x2))
bias25 <- ((c1-.25)/.25)*100
bias50 <- ((c2-.5)/.5)*100
bias75 <- ((c3-.75)/.75)*100
#Para las coberturas y el LPI
results$AGx_ci_i<-results$gcoefAG.x-1.96*results$gsdAG.x
results$AGx_ci_s<-results$gcoefAG.x+1.96*results$gsdAG.x
results$ComPoisx_ci_i<-results$gcoefCOMPois.x-1.96*results$gsdCOMPois.x
results$ComPoisx_ci_s<-results$gcoefCOMPois.x+1.96*results$gsdCOMPois.x
results$ComPoisBx_ci_i<-results$gcoefCOMPoisB.x-1.96*results$gsdCOMPoisB.x
results$ComPoisBx_ci_s<-results$gcoefCOMPoisB.x+1.96*results$gsdCOMPoisB.x
results$AGx1_ci_i<-results$gcoefAG.x1-1.96*results$gsdAG.x1
results$AGx1_ci_s<-results$gcoefAG.x1+1.96*results$gsdAG.x1
results$ComPoisx1_ci_i<-results$gcoefCOMPois.x1-1.96*results$gsdCOMPois.x1
results$ComPoisx1_ci_s<-results$gcoefCOMPois.x1+1.96*results$gsdCOMPois.x1
results$ComPoisBx1_ci_i<-results$gcoefCOMPoisB.x1-1.96*results$gsdCOMPoisB.x1
results$ComPoisBx1_ci_s<-results$gcoefCOMPoisB.x1+1.96*results$gsdCOMPoisB.x1
results$AGx2_ci_i<-results$gcoefAG.x2-1.96*results$gsdAG.x2
results$AGx2_ci_s<-results$gcoefAG.x2+1.96*results$gsdAG.x2
results$ComPoisx2_ci_i<-results$gcoefCOMPois.x2-1.96*results$gsdCOMPois.x2
results$ComPoisx2_ci_s<-results$gcoefCOMPois.x2+1.96*results$gsdCOMPois.x2
results$ComPoisBx2_ci_i<-results$gcoefCOMPoisB.x2-1.96*results$gsdCOMPoisB.x2
results$ComPoisBx2_ci_s<-results$gcoefCOMPoisB.x2+1.96*results$gsdCOMPoisB.x2
#LPI individuales
results$LPIxAG<-results$AGx_ci_s-results$AGx_ci_i
results$LPIx1AG<-results$AGx1_ci_s-results$AGx1_ci_i
results$LPIx2AG<-results$AGx2_ci_s-results$AGx2_ci_i
results$LPIxCOMPois<-results$ComPoisx_ci_s-results$ComPoisx_ci_i
results$LPIx1COMPois<-results$ComPoisx1_ci_s-results$ComPoisx1_ci_i
results$LPIx2COMPois<-results$ComPoisx2_ci_s-results$ComPoisx2_ci_i
results$LPIxCOMPoisB<-results$ComPoisBx_ci_s-results$ComPoisBx_ci_i
results$LPIx1COMPoisB<-results$ComPoisBx1_ci_s-results$ComPoisBx1_ci_i
results$LPIx2COMPoisB<-results$ComPoisBx2_ci_s-results$ComPoisBx2_ci_i
LPIx <- rbind(AG=mean(results$LPIxAG), COMPois=mean(results$LPIxCOMPois), COMPoisB=mean(results$LPIxCOMPoisB))
LPIx1 <-rbind(AG=mean(results$LPIx1AG), COMPois=mean(results$LPIx1COMPois), COMPoisB=mean(results$LPIx1COMPoisB))
LPIx2 <-rbind(AG=mean(results$LPIx2AG), COMPois=mean(results$LPIx2COMPois), COMPoisB=mean(results$LPIx2COMPoisB))
#coberturas
results$AGx_cov<-ifelse(results$AGx_ci_i<=0.25 & 0.25<=results$AGx_ci_s,1 ,0)
results$AGx1_cov<-ifelse(results$AGx1_ci_i<=0.5 & 0.5<=results$AGx1_ci_s,1 ,0)
results$AGx2_cov<-ifelse(results$AGx2_ci_i<=0.75 & 0.75<=results$AGx2_ci_s,1 ,0)
results$ComPoisx_cov<-ifelse(results$ComPoisx_ci_i<=0.25 & 0.25<=results$ComPoisx_ci_s,1 ,0)
results$ComPoisx1_cov<-ifelse(results$ComPoisx1_ci_i<=0.5 & 0.5<=results$ComPoisx1_ci_s,1 ,0)
results$ComPoisx2_cov<-ifelse(results$ComPoisx2_ci_i<=0.75 & 0.75<=results$ComPoisx2_ci_s,1 ,0)
results$ComPoisBx_cov<-ifelse(results$ComPoisBx_ci_i<=0.25 & 0.25<=results$ComPoisBx_ci_s,1 ,0)
results$ComPoisBx1_cov<-ifelse(results$ComPoisBx1_ci_i<=0.5 & 0.5<=results$ComPoisBx1_ci_s,1 ,0)
results$ComPoisBx2_cov<-ifelse(results$ComPoisBx2_ci_i<=0.75 & 0.75<=results$ComPoisBx2_ci_s,1 ,0)
cov.x <-rbind(AG=sum(results$AGx_cov)/nrow(results), COMPois=sum(results$ComPoisx_cov)/nrow(results), COMPoisB=sum(results$ComPoisBx_cov)/nrow(results))
cov.x1 <-rbind(AG=sum(results$AGx1_cov)/nrow(results), COMPois=sum(results$ComPoisx1_cov)/nrow(results), COMPoisB=sum(results$ComPoisBx1_cov)/nrow(results))
cov.x2 <- rbind(AG=sum(results$AGx2_cov)/nrow(results), COMPois=sum(results$ComPoisx2_cov)/nrow(results), COMPoisB=sum(results$ComPoisBx2_cov)/nrow(results))
mean.bias <- (abs(bias25)+abs(bias50)+abs(bias75)) / 3
mean.LPI <- (LPIx+LPIx1+LPIx2) / 3
mean.cob <- (cov.x+cov.x1+cov.x2)*100 / 3
res2222.ag <- data.frame(c0,c1,c2,c3,bias25,bias50,bias75, LPIx, LPIx1, LPIx2, cov.x, cov.x1, cov.x2, mean.bias, mean.LPI, mean.cob)
WriteXLS(res2222.ag, "results/SJWEH/results2222.xls")
results <- foreach(k=1:nsim, .combine=rbind) %dopar%
genResultsDEF1_new_frailty(k, nm=1000, bef=.5, ft=1825, old=3650,
d.ev=d.ev5, d.cens=d.cens5, b0.ev=b0.ev5, b0.cens=b0.cens5,
a.ev=a.ev5, a.cens=a.cens5, m=5)
WriteXLS(results, "results/SJWEH/res2223.xls")
c0 <- rbind("AG F", "ComP (CP) F", "ComP (GT) F")
c1 <- rbind(AG=mean(results$gcoefAG.x), COMPois=mean(results$gcoefCOMPois.x), COMPoisB=mean(results$gcoefCOMPoisB.x))
c2 <- rbind(AG=mean(results$gcoefAG.x1), COMPois=mean(results$gcoefCOMPois.x1), COMPoisB=mean(results$gcoefCOMPoisB.x1))
c3 <- rbind(AG=mean(results$gcoefAG.x2), COMPois=mean(results$gcoefCOMPois.x2), COMPoisB=mean(results$gcoefCOMPoisB.x2))
bias25 <- ((c1-.25)/.25)*100
bias50 <- ((c2-.5)/.5)*100
bias75 <- ((c3-.75)/.75)*100
#Para las coberturas y el LPI
results$AGx_ci_i<-results$gcoefAG.x-1.96*results$gsdAG.x
results$AGx_ci_s<-results$gcoefAG.x+1.96*results$gsdAG.x
results$ComPoisx_ci_i<-results$gcoefCOMPois.x-1.96*results$gsdCOMPois.x
results$ComPoisx_ci_s<-results$gcoefCOMPois.x+1.96*results$gsdCOMPois.x
results$ComPoisBx_ci_i<-results$gcoefCOMPoisB.x-1.96*results$gsdCOMPoisB.x
results$ComPoisBx_ci_s<-results$gcoefCOMPoisB.x+1.96*results$gsdCOMPoisB.x
results$AGx1_ci_i<-results$gcoefAG.x1-1.96*results$gsdAG.x1
results$AGx1_ci_s<-results$gcoefAG.x1+1.96*results$gsdAG.x1
results$ComPoisx1_ci_i<-results$gcoefCOMPois.x1-1.96*results$gsdCOMPois.x1
results$ComPoisx1_ci_s<-results$gcoefCOMPois.x1+1.96*results$gsdCOMPois.x1
results$ComPoisBx1_ci_i<-results$gcoefCOMPoisB.x1-1.96*results$gsdCOMPoisB.x1
results$ComPoisBx1_ci_s<-results$gcoefCOMPoisB.x1+1.96*results$gsdCOMPoisB.x1
results$AGx2_ci_i<-results$gcoefAG.x2-1.96*results$gsdAG.x2
results$AGx2_ci_s<-results$gcoefAG.x2+1.96*results$gsdAG.x2
results$ComPoisx2_ci_i<-results$gcoefCOMPois.x2-1.96*results$gsdCOMPois.x2
results$ComPoisx2_ci_s<-results$gcoefCOMPois.x2+1.96*results$gsdCOMPois.x2
results$ComPoisBx2_ci_i<-results$gcoefCOMPoisB.x2-1.96*results$gsdCOMPoisB.x2
results$ComPoisBx2_ci_s<-results$gcoefCOMPoisB.x2+1.96*results$gsdCOMPoisB.x2
#LPI individuales
results$LPIxAG<-results$AGx_ci_s-results$AGx_ci_i
results$LPIx1AG<-results$AGx1_ci_s-results$AGx1_ci_i
results$LPIx2AG<-results$AGx2_ci_s-results$AGx2_ci_i
results$LPIxCOMPois<-results$ComPoisx_ci_s-results$ComPoisx_ci_i
results$LPIx1COMPois<-results$ComPoisx1_ci_s-results$ComPoisx1_ci_i
results$LPIx2COMPois<-results$ComPoisx2_ci_s-results$ComPoisx2_ci_i
results$LPIxCOMPoisB<-results$ComPoisBx_ci_s-results$ComPoisBx_ci_i
results$LPIx1COMPoisB<-results$ComPoisBx1_ci_s-results$ComPoisBx1_ci_i
results$LPIx2COMPoisB<-results$ComPoisBx2_ci_s-results$ComPoisBx2_ci_i
LPIx <- rbind(AG=mean(results$LPIxAG), COMPois=mean(results$LPIxCOMPois), COMPoisB=mean(results$LPIxCOMPoisB))
LPIx1 <-rbind(AG=mean(results$LPIx1AG), COMPois=mean(results$LPIx1COMPois), COMPoisB=mean(results$LPIx1COMPoisB))
LPIx2 <-rbind(AG=mean(results$LPIx2AG), COMPois=mean(results$LPIx2COMPois), COMPoisB=mean(results$LPIx2COMPoisB))
#coberturas
results$AGx_cov<-ifelse(results$AGx_ci_i<=0.25 & 0.25<=results$AGx_ci_s,1 ,0)
results$AGx1_cov<-ifelse(results$AGx1_ci_i<=0.5 & 0.5<=results$AGx1_ci_s,1 ,0)
results$AGx2_cov<-ifelse(results$AGx2_ci_i<=0.75 & 0.75<=results$AGx2_ci_s,1 ,0)
results$ComPoisx_cov<-ifelse(results$ComPoisx_ci_i<=0.25 & 0.25<=results$ComPoisx_ci_s,1 ,0)
results$ComPoisx1_cov<-ifelse(results$ComPoisx1_ci_i<=0.5 & 0.5<=results$ComPoisx1_ci_s,1 ,0)
results$ComPoisx2_cov<-ifelse(results$ComPoisx2_ci_i<=0.75 & 0.75<=results$ComPoisx2_ci_s,1 ,0)
results$ComPoisBx_cov<-ifelse(results$ComPoisBx_ci_i<=0.25 & 0.25<=results$ComPoisBx_ci_s,1 ,0)
results$ComPoisBx1_cov<-ifelse(results$ComPoisBx1_ci_i<=0.5 & 0.5<=results$ComPoisBx1_ci_s,1 ,0)
results$ComPoisBx2_cov<-ifelse(results$ComPoisBx2_ci_i<=0.75 & 0.75<=results$ComPoisBx2_ci_s,1 ,0)
cov.x <-rbind(AG=sum(results$AGx_cov)/nrow(results), COMPois=sum(results$ComPoisx_cov)/nrow(results), COMPoisB=sum(results$ComPoisBx_cov)/nrow(results))
cov.x1 <-rbind(AG=sum(results$AGx1_cov)/nrow(results), COMPois=sum(results$ComPoisx1_cov)/nrow(results), COMPoisB=sum(results$ComPoisBx1_cov)/nrow(results))
cov.x2 <- rbind(AG=sum(results$AGx2_cov)/nrow(results), COMPois=sum(results$ComPoisx2_cov)/nrow(results), COMPoisB=sum(results$ComPoisBx2_cov)/nrow(results))
mean.bias <- (abs(bias25)+abs(bias50)+abs(bias75)) / 3
mean.LPI <- (LPIx+LPIx1+LPIx2) / 3
mean.cob <- (cov.x+cov.x1+cov.x2)*100 / 3
res2223.ag <- data.frame(c0,c1,c2,c3,bias25,bias50,bias75, LPIx, LPIx1, LPIx2, cov.x, cov.x1, cov.x2, mean.bias, mean.LPI, mean.cob)
WriteXLS(res2223.ag, "results/SJWEH/results2223.xls")
########## SJWEH: DEPENDENCIA ALTA (CP)
results <- foreach(k=1:nsim, .combine=rbind) %dopar%
genResultsDEF1_new_frailty(k, nm=1000, bef=.1, ft=730, old=730,
d.ev=d.ev6, d.cens=d.cens6, b0.ev=b0.ev6, b0.cens=b0.cens6,
a.ev=a.ev6, a.cens=a.cens6, m=5)
WriteXLS(results, "results/SJWEH/res3111.xls")
c0 <- rbind("AG F", "ComP (CP) F", "ComP (GT) F")
c1 <- rbind(AG=mean(results$gcoefAG.x), COMPois=mean(results$gcoefCOMPois.x), COMPoisB=mean(results$gcoefCOMPoisB.x))
c2 <- rbind(AG=mean(results$gcoefAG.x1), COMPois=mean(results$gcoefCOMPois.x1), COMPoisB=mean(results$gcoefCOMPoisB.x1))
c3 <- rbind(AG=mean(results$gcoefAG.x2), COMPois=mean(results$gcoefCOMPois.x2), COMPoisB=mean(results$gcoefCOMPoisB.x2))
bias25 <- ((c1-.25)/.25)*100
bias50 <- ((c2-.5)/.5)*100
bias75 <- ((c3-.75)/.75)*100
#Para las coberturas y el LPI
results$AGx_ci_i<-results$gcoefAG.x-1.96*results$gsdAG.x
results$AGx_ci_s<-results$gcoefAG.x+1.96*results$gsdAG.x
results$ComPoisx_ci_i<-results$gcoefCOMPois.x-1.96*results$gsdCOMPois.x
results$ComPoisx_ci_s<-results$gcoefCOMPois.x+1.96*results$gsdCOMPois.x
results$ComPoisBx_ci_i<-results$gcoefCOMPoisB.x-1.96*results$gsdCOMPoisB.x
results$ComPoisBx_ci_s<-results$gcoefCOMPoisB.x+1.96*results$gsdCOMPoisB.x
results$AGx1_ci_i<-results$gcoefAG.x1-1.96*results$gsdAG.x1
results$AGx1_ci_s<-results$gcoefAG.x1+1.96*results$gsdAG.x1
results$ComPoisx1_ci_i<-results$gcoefCOMPois.x1-1.96*results$gsdCOMPois.x1
results$ComPoisx1_ci_s<-results$gcoefCOMPois.x1+1.96*results$gsdCOMPois.x1
results$ComPoisBx1_ci_i<-results$gcoefCOMPoisB.x1-1.96*results$gsdCOMPoisB.x1
results$ComPoisBx1_ci_s<-results$gcoefCOMPoisB.x1+1.96*results$gsdCOMPoisB.x1
results$AGx2_ci_i<-results$gcoefAG.x2-1.96*results$gsdAG.x2
results$AGx2_ci_s<-results$gcoefAG.x2+1.96*results$gsdAG.x2
results$ComPoisx2_ci_i<-results$gcoefCOMPois.x2-1.96*results$gsdCOMPois.x2
results$ComPoisx2_ci_s<-results$gcoefCOMPois.x2+1.96*results$gsdCOMPois.x2
results$ComPoisBx2_ci_i<-results$gcoefCOMPoisB.x2-1.96*results$gsdCOMPoisB.x2
results$ComPoisBx2_ci_s<-results$gcoefCOMPoisB.x2+1.96*results$gsdCOMPoisB.x2
#LPI individuales
results$LPIxAG<-results$AGx_ci_s-results$AGx_ci_i
results$LPIx1AG<-results$AGx1_ci_s-results$AGx1_ci_i
results$LPIx2AG<-results$AGx2_ci_s-results$AGx2_ci_i
results$LPIxCOMPois<-results$ComPoisx_ci_s-results$ComPoisx_ci_i
results$LPIx1COMPois<-results$ComPoisx1_ci_s-results$ComPoisx1_ci_i
results$LPIx2COMPois<-results$ComPoisx2_ci_s-results$ComPoisx2_ci_i
results$LPIxCOMPoisB<-results$ComPoisBx_ci_s-results$ComPoisBx_ci_i
results$LPIx1COMPoisB<-results$ComPoisBx1_ci_s-results$ComPoisBx1_ci_i
results$LPIx2COMPoisB<-results$ComPoisBx2_ci_s-results$ComPoisBx2_ci_i
LPIx <- rbind(AG=mean(results$LPIxAG), COMPois=mean(results$LPIxCOMPois), COMPoisB=mean(results$LPIxCOMPoisB))
LPIx1 <-rbind(AG=mean(results$LPIx1AG), COMPois=mean(results$LPIx1COMPois), COMPoisB=mean(results$LPIx1COMPoisB))
LPIx2 <-rbind(AG=mean(results$LPIx2AG), COMPois=mean(results$LPIx2COMPois), COMPoisB=mean(results$LPIx2COMPoisB))
#coberturas
results$AGx_cov<-ifelse(results$AGx_ci_i<=0.25 & 0.25<=results$AGx_ci_s,1 ,0)
results$AGx1_cov<-ifelse(results$AGx1_ci_i<=0.5 & 0.5<=results$AGx1_ci_s,1 ,0)
results$AGx2_cov<-ifelse(results$AGx2_ci_i<=0.75 & 0.75<=results$AGx2_ci_s,1 ,0)
results$ComPoisx_cov<-ifelse(results$ComPoisx_ci_i<=0.25 & 0.25<=results$ComPoisx_ci_s,1 ,0)
results$ComPoisx1_cov<-ifelse(results$ComPoisx1_ci_i<=0.5 & 0.5<=results$ComPoisx1_ci_s,1 ,0)
results$ComPoisx2_cov<-ifelse(results$ComPoisx2_ci_i<=0.75 & 0.75<=results$ComPoisx2_ci_s,1 ,0)
results$ComPoisBx_cov<-ifelse(results$ComPoisBx_ci_i<=0.25 & 0.25<=results$ComPoisBx_ci_s,1 ,0)
results$ComPoisBx1_cov<-ifelse(results$ComPoisBx1_ci_i<=0.5 & 0.5<=results$ComPoisBx1_ci_s,1 ,0)
results$ComPoisBx2_cov<-ifelse(results$ComPoisBx2_ci_i<=0.75 & 0.75<=results$ComPoisBx2_ci_s,1 ,0)
cov.x <-rbind(AG=sum(results$AGx_cov)/nrow(results), COMPois=sum(results$ComPoisx_cov)/nrow(results), COMPoisB=sum(results$ComPoisBx_cov)/nrow(results))
cov.x1 <-rbind(AG=sum(results$AGx1_cov)/nrow(results), COMPois=sum(results$ComPoisx1_cov)/nrow(results), COMPoisB=sum(results$ComPoisBx1_cov)/nrow(results))
cov.x2 <- rbind(AG=sum(results$AGx2_cov)/nrow(results), COMPois=sum(results$ComPoisx2_cov)/nrow(results), COMPoisB=sum(results$ComPoisBx2_cov)/nrow(results))
mean.bias <- (abs(bias25)+abs(bias50)+abs(bias75)) / 3
mean.LPI <- (LPIx+LPIx1+LPIx2) / 3
mean.cob <- (cov.x+cov.x1+cov.x2)*100 / 3
res3111.ag <- data.frame(c0,c1,c2,c3,bias25,bias50,bias75, LPIx, LPIx1, LPIx2, cov.x, cov.x1, cov.x2, mean.bias, mean.LPI, mean.cob)
WriteXLS(res3111.ag, "results/SJWEH/results3111.xls")
results <- foreach(k=1:nsim, .combine=rbind) %dopar%
genResultsDEF1_new_frailty(k, nm=1000, bef=.3, ft=730, old=730,
d.ev=d.ev6, d.cens=d.cens6, b0.ev=b0.ev6, b0.cens=b0.cens6,
a.ev=a.ev6, a.cens=a.cens6, m=5)
WriteXLS(results, "results/SJWEH/res3112.xls")
c0 <- rbind("AG F", "ComP (CP) F", "ComP (GT) F")
c1 <- rbind(AG=mean(results$gcoefAG.x), COMPois=mean(results$gcoefCOMPois.x), COMPoisB=mean(results$gcoefCOMPoisB.x))
c2 <- rbind(AG=mean(results$gcoefAG.x1), COMPois=mean(results$gcoefCOMPois.x1), COMPoisB=mean(results$gcoefCOMPoisB.x1))
c3 <- rbind(AG=mean(results$gcoefAG.x2), COMPois=mean(results$gcoefCOMPois.x2), COMPoisB=mean(results$gcoefCOMPoisB.x2))
bias25 <- ((c1-.25)/.25)*100
bias50 <- ((c2-.5)/.5)*100
bias75 <- ((c3-.75)/.75)*100
#Para las coberturas y el LPI
results$AGx_ci_i<-results$gcoefAG.x-1.96*results$gsdAG.x
results$AGx_ci_s<-results$gcoefAG.x+1.96*results$gsdAG.x
results$ComPoisx_ci_i<-results$gcoefCOMPois.x-1.96*results$gsdCOMPois.x
results$ComPoisx_ci_s<-results$gcoefCOMPois.x+1.96*results$gsdCOMPois.x
results$ComPoisBx_ci_i<-results$gcoefCOMPoisB.x-1.96*results$gsdCOMPoisB.x
results$ComPoisBx_ci_s<-results$gcoefCOMPoisB.x+1.96*results$gsdCOMPoisB.x
results$AGx1_ci_i<-results$gcoefAG.x1-1.96*results$gsdAG.x1
results$AGx1_ci_s<-results$gcoefAG.x1+1.96*results$gsdAG.x1
results$ComPoisx1_ci_i<-results$gcoefCOMPois.x1-1.96*results$gsdCOMPois.x1
results$ComPoisx1_ci_s<-results$gcoefCOMPois.x1+1.96*results$gsdCOMPois.x1
results$ComPoisBx1_ci_i<-results$gcoefCOMPoisB.x1-1.96*results$gsdCOMPoisB.x1
results$ComPoisBx1_ci_s<-results$gcoefCOMPoisB.x1+1.96*results$gsdCOMPoisB.x1
results$AGx2_ci_i<-results$gcoefAG.x2-1.96*results$gsdAG.x2
results$AGx2_ci_s<-results$gcoefAG.x2+1.96*results$gsdAG.x2
results$ComPoisx2_ci_i<-results$gcoefCOMPois.x2-1.96*results$gsdCOMPois.x2
results$ComPoisx2_ci_s<-results$gcoefCOMPois.x2+1.96*results$gsdCOMPois.x2
results$ComPoisBx2_ci_i<-results$gcoefCOMPoisB.x2-1.96*results$gsdCOMPoisB.x2
results$ComPoisBx2_ci_s<-results$gcoefCOMPoisB.x2+1.96*results$gsdCOMPoisB.x2
#LPI individuales
results$LPIxAG<-results$AGx_ci_s-results$AGx_ci_i
results$LPIx1AG<-results$AGx1_ci_s-results$AGx1_ci_i
results$LPIx2AG<-results$AGx2_ci_s-results$AGx2_ci_i
results$LPIxCOMPois<-results$ComPoisx_ci_s-results$ComPoisx_ci_i
results$LPIx1COMPois<-results$ComPoisx1_ci_s-results$ComPoisx1_ci_i
results$LPIx2COMPois<-results$ComPoisx2_ci_s-results$ComPoisx2_ci_i
results$LPIxCOMPoisB<-results$ComPoisBx_ci_s-results$ComPoisBx_ci_i
results$LPIx1COMPoisB<-results$ComPoisBx1_ci_s-results$ComPoisBx1_ci_i
results$LPIx2COMPoisB<-results$ComPoisBx2_ci_s-results$ComPoisBx2_ci_i
LPIx <- rbind(AG=mean(results$LPIxAG), COMPois=mean(results$LPIxCOMPois), COMPoisB=mean(results$LPIxCOMPoisB))
LPIx1 <-rbind(AG=mean(results$LPIx1AG), COMPois=mean(results$LPIx1COMPois), COMPoisB=mean(results$LPIx1COMPoisB))
LPIx2 <-rbind(AG=mean(results$LPIx2AG), COMPois=mean(results$LPIx2COMPois), COMPoisB=mean(results$LPIx2COMPoisB))
#coberturas
results$AGx_cov<-ifelse(results$AGx_ci_i<=0.25 & 0.25<=results$AGx_ci_s,1 ,0)
results$AGx1_cov<-ifelse(results$AGx1_ci_i<=0.5 & 0.5<=results$AGx1_ci_s,1 ,0)
results$AGx2_cov<-ifelse(results$AGx2_ci_i<=0.75 & 0.75<=results$AGx2_ci_s,1 ,0)
results$ComPoisx_cov<-ifelse(results$ComPoisx_ci_i<=0.25 & 0.25<=results$ComPoisx_ci_s,1 ,0)
results$ComPoisx1_cov<-ifelse(results$ComPoisx1_ci_i<=0.5 & 0.5<=results$ComPoisx1_ci_s,1 ,0)
results$ComPoisx2_cov<-ifelse(results$ComPoisx2_ci_i<=0.75 & 0.75<=results$ComPoisx2_ci_s,1 ,0)
results$ComPoisBx_cov<-ifelse(results$ComPoisBx_ci_i<=0.25 & 0.25<=results$ComPoisBx_ci_s,1 ,0)
results$ComPoisBx1_cov<-ifelse(results$ComPoisBx1_ci_i<=0.5 & 0.5<=results$ComPoisBx1_ci_s,1 ,0)
results$ComPoisBx2_cov<-ifelse(results$ComPoisBx2_ci_i<=0.75 & 0.75<=results$ComPoisBx2_ci_s,1 ,0)
cov.x <-rbind(AG=sum(results$AGx_cov)/nrow(results), COMPois=sum(results$ComPoisx_cov)/nrow(results), COMPoisB=sum(results$ComPoisBx_cov)/nrow(results))
cov.x1 <-rbind(AG=sum(results$AGx1_cov)/nrow(results), COMPois=sum(results$ComPoisx1_cov)/nrow(results), COMPoisB=sum(results$ComPoisBx1_cov)/nrow(results))
cov.x2 <- rbind(AG=sum(results$AGx2_cov)/nrow(results), COMPois=sum(results$ComPoisx2_cov)/nrow(results), COMPoisB=sum(results$ComPoisBx2_cov)/nrow(results))
mean.bias <- (abs(bias25)+abs(bias50)+abs(bias75)) / 3
mean.LPI <- (LPIx+LPIx1+LPIx2) / 3
mean.cob <- (cov.x+cov.x1+cov.x2)*100 / 3
res3112.ag <- data.frame(c0,c1,c2,c3,bias25,bias50,bias75, LPIx, LPIx1, LPIx2, cov.x, cov.x1, cov.x2, mean.bias, mean.LPI, mean.cob)
WriteXLS(res3112.ag, "results/SJWEH/results3112.xls")
results <- foreach(k=1:nsim, .combine=rbind) %dopar%
genResultsDEF1_new_frailty(k, nm=1000, bef=.5, ft=730, old=730,
d.ev=d.ev6, d.cens=d.cens6, b0.ev=b0.ev6, b0.cens=b0.cens6,
a.ev=a.ev6, a.cens=a.cens6, m=5)
WriteXLS(results, "results/SJWEH/res3113.xls")
c0 <- rbind("AG F", "ComP (CP) F", "ComP (GT) F")
c1 <- rbind(AG=mean(results$gcoefAG.x), COMPois=mean(results$gcoefCOMPois.x), COMPoisB=mean(results$gcoefCOMPoisB.x))
c2 <- rbind(AG=mean(results$gcoefAG.x1), COMPois=mean(results$gcoefCOMPois.x1), COMPoisB=mean(results$gcoefCOMPoisB.x1))
c3 <- rbind(AG=mean(results$gcoefAG.x2), COMPois=mean(results$gcoefCOMPois.x2), COMPoisB=mean(results$gcoefCOMPoisB.x2))
bias25 <- ((c1-.25)/.25)*100
bias50 <- ((c2-.5)/.5)*100
bias75 <- ((c3-.75)/.75)*100
#Para las coberturas y el LPI
results$AGx_ci_i<-results$gcoefAG.x-1.96*results$gsdAG.x
results$AGx_ci_s<-results$gcoefAG.x+1.96*results$gsdAG.x
results$ComPoisx_ci_i<-results$gcoefCOMPois.x-1.96*results$gsdCOMPois.x
results$ComPoisx_ci_s<-results$gcoefCOMPois.x+1.96*results$gsdCOMPois.x
results$ComPoisBx_ci_i<-results$gcoefCOMPoisB.x-1.96*results$gsdCOMPoisB.x
results$ComPoisBx_ci_s<-results$gcoefCOMPoisB.x+1.96*results$gsdCOMPoisB.x
results$AGx1_ci_i<-results$gcoefAG.x1-1.96*results$gsdAG.x1
results$AGx1_ci_s<-results$gcoefAG.x1+1.96*results$gsdAG.x1
results$ComPoisx1_ci_i<-results$gcoefCOMPois.x1-1.96*results$gsdCOMPois.x1
results$ComPoisx1_ci_s<-results$gcoefCOMPois.x1+1.96*results$gsdCOMPois.x1
results$ComPoisBx1_ci_i<-results$gcoefCOMPoisB.x1-1.96*results$gsdCOMPoisB.x1
results$ComPoisBx1_ci_s<-results$gcoefCOMPoisB.x1+1.96*results$gsdCOMPoisB.x1
results$AGx2_ci_i<-results$gcoefAG.x2-1.96*results$gsdAG.x2
results$AGx2_ci_s<-results$gcoefAG.x2+1.96*results$gsdAG.x2
results$ComPoisx2_ci_i<-results$gcoefCOMPois.x2-1.96*results$gsdCOMPois.x2
results$ComPoisx2_ci_s<-results$gcoefCOMPois.x2+1.96*results$gsdCOMPois.x2
results$ComPoisBx2_ci_i<-results$gcoefCOMPoisB.x2-1.96*results$gsdCOMPoisB.x2
results$ComPoisBx2_ci_s<-results$gcoefCOMPoisB.x2+1.96*results$gsdCOMPoisB.x2
#LPI individuales
results$LPIxAG<-results$AGx_ci_s-results$AGx_ci_i
results$LPIx1AG<-results$AGx1_ci_s-results$AGx1_ci_i
results$LPIx2AG<-results$AGx2_ci_s-results$AGx2_ci_i
results$LPIxCOMPois<-results$ComPoisx_ci_s-results$ComPoisx_ci_i
results$LPIx1COMPois<-results$ComPoisx1_ci_s-results$ComPoisx1_ci_i
results$LPIx2COMPois<-results$ComPoisx2_ci_s-results$ComPoisx2_ci_i
results$LPIxCOMPoisB<-results$ComPoisBx_ci_s-results$ComPoisBx_ci_i
results$LPIx1COMPoisB<-results$ComPoisBx1_ci_s-results$ComPoisBx1_ci_i
results$LPIx2COMPoisB<-results$ComPoisBx2_ci_s-results$ComPoisBx2_ci_i
LPIx <- rbind(AG=mean(results$LPIxAG), COMPois=mean(results$LPIxCOMPois), COMPoisB=mean(results$LPIxCOMPoisB))
LPIx1 <-rbind(AG=mean(results$LPIx1AG), COMPois=mean(results$LPIx1COMPois), COMPoisB=mean(results$LPIx1COMPoisB))
LPIx2 <-rbind(AG=mean(results$LPIx2AG), COMPois=mean(results$LPIx2COMPois), COMPoisB=mean(results$LPIx2COMPoisB))
#coberturas
results$AGx_cov<-ifelse(results$AGx_ci_i<=0.25 & 0.25<=results$AGx_ci_s,1 ,0)
results$AGx1_cov<-ifelse(results$AGx1_ci_i<=0.5 & 0.5<=results$AGx1_ci_s,1 ,0)
results$AGx2_cov<-ifelse(results$AGx2_ci_i<=0.75 & 0.75<=results$AGx2_ci_s,1 ,0)
results$ComPoisx_cov<-ifelse(results$ComPoisx_ci_i<=0.25 & 0.25<=results$ComPoisx_ci_s,1 ,0)
results$ComPoisx1_cov<-ifelse(results$ComPoisx1_ci_i<=0.5 & 0.5<=results$ComPoisx1_ci_s,1 ,0)
results$ComPoisx2_cov<-ifelse(results$ComPoisx2_ci_i<=0.75 & 0.75<=results$ComPoisx2_ci_s,1 ,0)
results$ComPoisBx_cov<-ifelse(results$ComPoisBx_ci_i<=0.25 & 0.25<=results$ComPoisBx_ci_s,1 ,0)
results$ComPoisBx1_cov<-ifelse(results$ComPoisBx1_ci_i<=0.5 & 0.5<=results$ComPoisBx1_ci_s,1 ,0)
results$ComPoisBx2_cov<-ifelse(results$ComPoisBx2_ci_i<=0.75 & 0.75<=results$ComPoisBx2_ci_s,1 ,0)
cov.x <-rbind(AG=sum(results$AGx_cov)/nrow(results), COMPois=sum(results$ComPoisx_cov)/nrow(results), COMPoisB=sum(results$ComPoisBx_cov)/nrow(results))
cov.x1 <-rbind(AG=sum(results$AGx1_cov)/nrow(results), COMPois=sum(results$ComPoisx1_cov)/nrow(results), COMPoisB=sum(results$ComPoisBx1_cov)/nrow(results))
cov.x2 <- rbind(AG=sum(results$AGx2_cov)/nrow(results), COMPois=sum(results$ComPoisx2_cov)/nrow(results), COMPoisB=sum(results$ComPoisBx2_cov)/nrow(results))
mean.bias <- (abs(bias25)+abs(bias50)+abs(bias75)) / 3
mean.LPI <- (LPIx+LPIx1+LPIx2) / 3
mean.cob <- (cov.x+cov.x1+cov.x2)*100 / 3
res3113.ag <- data.frame(c0,c1,c2,c3,bias25,bias50,bias75, LPIx, LPIx1, LPIx2, cov.x, cov.x1, cov.x2, mean.bias, mean.LPI, mean.cob)
WriteXLS(res3113.ag, "results/SJWEH/results3113.xls")
results <- foreach(k=1:nsim, .combine=rbind) %dopar%
genResultsDEF1_new_frailty(k, nm=1000, bef=.1, ft=1825, old=730,
d.ev=d.ev6, d.cens=d.cens6, b0.ev=b0.ev6, b0.cens=b0.cens6,
a.ev=a.ev6, a.cens=a.cens6, m=5)
WriteXLS(results, "results/SJWEH/res3121.xls")
c0 <- rbind("AG F", "ComP (CP) F", "ComP (GT) F")
c1 <- rbind(AG=mean(results$gcoefAG.x), COMPois=mean(results$gcoefCOMPois.x), COMPoisB=mean(results$gcoefCOMPoisB.x))
c2 <- rbind(AG=mean(results$gcoefAG.x1), COMPois=mean(results$gcoefCOMPois.x1), COMPoisB=mean(results$gcoefCOMPoisB.x1))
c3 <- rbind(AG=mean(results$gcoefAG.x2), COMPois=mean(results$gcoefCOMPois.x2), COMPoisB=mean(results$gcoefCOMPoisB.x2))
bias25 <- ((c1-.25)/.25)*100
bias50 <- ((c2-.5)/.5)*100
bias75 <- ((c3-.75)/.75)*100
#Para las coberturas y el LPI
results$AGx_ci_i<-results$gcoefAG.x-1.96*results$gsdAG.x
results$AGx_ci_s<-results$gcoefAG.x+1.96*results$gsdAG.x
results$ComPoisx_ci_i<-results$gcoefCOMPois.x-1.96*results$gsdCOMPois.x
results$ComPoisx_ci_s<-results$gcoefCOMPois.x+1.96*results$gsdCOMPois.x
results$ComPoisBx_ci_i<-results$gcoefCOMPoisB.x-1.96*results$gsdCOMPoisB.x
results$ComPoisBx_ci_s<-results$gcoefCOMPoisB.x+1.96*results$gsdCOMPoisB.x
results$AGx1_ci_i<-results$gcoefAG.x1-1.96*results$gsdAG.x1
results$AGx1_ci_s<-results$gcoefAG.x1+1.96*results$gsdAG.x1
results$ComPoisx1_ci_i<-results$gcoefCOMPois.x1-1.96*results$gsdCOMPois.x1
results$ComPoisx1_ci_s<-results$gcoefCOMPois.x1+1.96*results$gsdCOMPois.x1
results$ComPoisBx1_ci_i<-results$gcoefCOMPoisB.x1-1.96*results$gsdCOMPoisB.x1
results$ComPoisBx1_ci_s<-results$gcoefCOMPoisB.x1+1.96*results$gsdCOMPoisB.x1
results$AGx2_ci_i<-results$gcoefAG.x2-1.96*results$gsdAG.x2
results$AGx2_ci_s<-results$gcoefAG.x2+1.96*results$gsdAG.x2
results$ComPoisx2_ci_i<-results$gcoefCOMPois.x2-1.96*results$gsdCOMPois.x2
results$ComPoisx2_ci_s<-results$gcoefCOMPois.x2+1.96*results$gsdCOMPois.x2
results$ComPoisBx2_ci_i<-results$gcoefCOMPoisB.x2-1.96*results$gsdCOMPoisB.x2
results$ComPoisBx2_ci_s<-results$gcoefCOMPoisB.x2+1.96*results$gsdCOMPoisB.x2
#LPI individuales
results$LPIxAG<-results$AGx_ci_s-results$AGx_ci_i
results$LPIx1AG<-results$AGx1_ci_s-results$AGx1_ci_i
results$LPIx2AG<-results$AGx2_ci_s-results$AGx2_ci_i
results$LPIxCOMPois<-results$ComPoisx_ci_s-results$ComPoisx_ci_i
results$LPIx1COMPois<-results$ComPoisx1_ci_s-results$ComPoisx1_ci_i
results$LPIx2COMPois<-results$ComPoisx2_ci_s-results$ComPoisx2_ci_i
results$LPIxCOMPoisB<-results$ComPoisBx_ci_s-results$ComPoisBx_ci_i
results$LPIx1COMPoisB<-results$ComPoisBx1_ci_s-results$ComPoisBx1_ci_i
results$LPIx2COMPoisB<-results$ComPoisBx2_ci_s-results$ComPoisBx2_ci_i
LPIx <- rbind(AG=mean(results$LPIxAG), COMPois=mean(results$LPIxCOMPois), COMPoisB=mean(results$LPIxCOMPoisB))
LPIx1 <-rbind(AG=mean(results$LPIx1AG), COMPois=mean(results$LPIx1COMPois), COMPoisB=mean(results$LPIx1COMPoisB))
LPIx2 <-rbind(AG=mean(results$LPIx2AG), COMPois=mean(results$LPIx2COMPois), COMPoisB=mean(results$LPIx2COMPoisB))
#coberturas
results$AGx_cov<-ifelse(results$AGx_ci_i<=0.25 & 0.25<=results$AGx_ci_s,1 ,0)
results$AGx1_cov<-ifelse(results$AGx1_ci_i<=0.5 & 0.5<=results$AGx1_ci_s,1 ,0)
results$AGx2_cov<-ifelse(results$AGx2_ci_i<=0.75 & 0.75<=results$AGx2_ci_s,1 ,0)
results$ComPoisx_cov<-ifelse(results$ComPoisx_ci_i<=0.25 & 0.25<=results$ComPoisx_ci_s,1 ,0)
results$ComPoisx1_cov<-ifelse(results$ComPoisx1_ci_i<=0.5 & 0.5<=results$ComPoisx1_ci_s,1 ,0)
results$ComPoisx2_cov<-ifelse(results$ComPoisx2_ci_i<=0.75 & 0.75<=results$ComPoisx2_ci_s,1 ,0)
results$ComPoisBx_cov<-ifelse(results$ComPoisBx_ci_i<=0.25 & 0.25<=results$ComPoisBx_ci_s,1 ,0)
results$ComPoisBx1_cov<-ifelse(results$ComPoisBx1_ci_i<=0.5 & 0.5<=results$ComPoisBx1_ci_s,1 ,0)
results$ComPoisBx2_cov<-ifelse(results$ComPoisBx2_ci_i<=0.75 & 0.75<=results$ComPoisBx2_ci_s,1 ,0)
cov.x <-rbind(AG=sum(results$AGx_cov)/nrow(results), COMPois=sum(results$ComPoisx_cov)/nrow(results), COMPoisB=sum(results$ComPoisBx_cov)/nrow(results))
cov.x1 <-rbind(AG=sum(results$AGx1_cov)/nrow(results), COMPois=sum(results$ComPoisx1_cov)/nrow(results), COMPoisB=sum(results$ComPoisBx1_cov)/nrow(results))
cov.x2 <- rbind(AG=sum(results$AGx2_cov)/nrow(results), COMPois=sum(results$ComPoisx2_cov)/nrow(results), COMPoisB=sum(results$ComPoisBx2_cov)/nrow(results))
mean.bias <- (abs(bias25)+abs(bias50)+abs(bias75)) / 3
mean.LPI <- (LPIx+LPIx1+LPIx2) / 3
mean.cob <- (cov.x+cov.x1+cov.x2)*100 / 3
res3121.ag <- data.frame(c0,c1,c2,c3,bias25,bias50,bias75, LPIx, LPIx1, LPIx2, cov.x, cov.x1, cov.x2, mean.bias, mean.LPI, mean.cob)
WriteXLS(res3121.ag, "results/SJWEH/results3121.xls")
results <- foreach(k=1:nsim, .combine=rbind) %dopar%
genResultsDEF1_new_frailty(k, nm=1000, bef=.3, ft=1825, old=730,
d.ev=d.ev6, d.cens=d.cens6, b0.ev=b0.ev6, b0.cens=b0.cens6,
a.ev=a.ev6, a.cens=a.cens6, m=5)
WriteXLS(results, "results/SJWEH/res3122.xls")
c0 <- rbind("AG F", "ComP (CP) F", "ComP (GT) F")
c1 <- rbind(AG=mean(results$gcoefAG.x), COMPois=mean(results$gcoefCOMPois.x), COMPoisB=mean(results$gcoefCOMPoisB.x))
c2 <- rbind(AG=mean(results$gcoefAG.x1), COMPois=mean(results$gcoefCOMPois.x1), COMPoisB=mean(results$gcoefCOMPoisB.x1))
c3 <- rbind(AG=mean(results$gcoefAG.x2), COMPois=mean(results$gcoefCOMPois.x2), COMPoisB=mean(results$gcoefCOMPoisB.x2))
bias25 <- ((c1-.25)/.25)*100
bias50 <- ((c2-.5)/.5)*100
bias75 <- ((c3-.75)/.75)*100
#Para las coberturas y el LPI
results$AGx_ci_i<-results$gcoefAG.x-1.96*results$gsdAG.x
results$AGx_ci_s<-results$gcoefAG.x+1.96*results$gsdAG.x
results$ComPoisx_ci_i<-results$gcoefCOMPois.x-1.96*results$gsdCOMPois.x
results$ComPoisx_ci_s<-results$gcoefCOMPois.x+1.96*results$gsdCOMPois.x
results$ComPoisBx_ci_i<-results$gcoefCOMPoisB.x-1.96*results$gsdCOMPoisB.x
results$ComPoisBx_ci_s<-results$gcoefCOMPoisB.x+1.96*results$gsdCOMPoisB.x
results$AGx1_ci_i<-results$gcoefAG.x1-1.96*results$gsdAG.x1
results$AGx1_ci_s<-results$gcoefAG.x1+1.96*results$gsdAG.x1
results$ComPoisx1_ci_i<-results$gcoefCOMPois.x1-1.96*results$gsdCOMPois.x1
results$ComPoisx1_ci_s<-results$gcoefCOMPois.x1+1.96*results$gsdCOMPois.x1
results$ComPoisBx1_ci_i<-results$gcoefCOMPoisB.x1-1.96*results$gsdCOMPoisB.x1
results$ComPoisBx1_ci_s<-results$gcoefCOMPoisB.x1+1.96*results$gsdCOMPoisB.x1
results$AGx2_ci_i<-results$gcoefAG.x2-1.96*results$gsdAG.x2
results$AGx2_ci_s<-results$gcoefAG.x2+1.96*results$gsdAG.x2
results$ComPoisx2_ci_i<-results$gcoefCOMPois.x2-1.96*results$gsdCOMPois.x2
results$ComPoisx2_ci_s<-results$gcoefCOMPois.x2+1.96*results$gsdCOMPois.x2
results$ComPoisBx2_ci_i<-results$gcoefCOMPoisB.x2-1.96*results$gsdCOMPoisB.x2
results$ComPoisBx2_ci_s<-results$gcoefCOMPoisB.x2+1.96*results$gsdCOMPoisB.x2
#LPI individuales
results$LPIxAG<-results$AGx_ci_s-results$AGx_ci_i
results$LPIx1AG<-results$AGx1_ci_s-results$AGx1_ci_i
results$LPIx2AG<-results$AGx2_ci_s-results$AGx2_ci_i
results$LPIxCOMPois<-results$ComPoisx_ci_s-results$ComPoisx_ci_i
results$LPIx1COMPois<-results$ComPoisx1_ci_s-results$ComPoisx1_ci_i
results$LPIx2COMPois<-results$ComPoisx2_ci_s-results$ComPoisx2_ci_i
results$LPIxCOMPoisB<-results$ComPoisBx_ci_s-results$ComPoisBx_ci_i
results$LPIx1COMPoisB<-results$ComPoisBx1_ci_s-results$ComPoisBx1_ci_i
results$LPIx2COMPoisB<-results$ComPoisBx2_ci_s-results$ComPoisBx2_ci_i
LPIx <- rbind(AG=mean(results$LPIxAG), COMPois=mean(results$LPIxCOMPois), COMPoisB=mean(results$LPIxCOMPoisB))
LPIx1 <-rbind(AG=mean(results$LPIx1AG), COMPois=mean(results$LPIx1COMPois), COMPoisB=mean(results$LPIx1COMPoisB))
LPIx2 <-rbind(AG=mean(results$LPIx2AG), COMPois=mean(results$LPIx2COMPois), COMPoisB=mean(results$LPIx2COMPoisB))
#coberturas
results$AGx_cov<-ifelse(results$AGx_ci_i<=0.25 & 0.25<=results$AGx_ci_s,1 ,0)
results$AGx1_cov<-ifelse(results$AGx1_ci_i<=0.5 & 0.5<=results$AGx1_ci_s,1 ,0)
results$AGx2_cov<-ifelse(results$AGx2_ci_i<=0.75 & 0.75<=results$AGx2_ci_s,1 ,0)
results$ComPoisx_cov<-ifelse(results$ComPoisx_ci_i<=0.25 & 0.25<=results$ComPoisx_ci_s,1 ,0)
results$ComPoisx1_cov<-ifelse(results$ComPoisx1_ci_i<=0.5 & 0.5<=results$ComPoisx1_ci_s,1 ,0)
results$ComPoisx2_cov<-ifelse(results$ComPoisx2_ci_i<=0.75 & 0.75<=results$ComPoisx2_ci_s,1 ,0)
results$ComPoisBx_cov<-ifelse(results$ComPoisBx_ci_i<=0.25 & 0.25<=results$ComPoisBx_ci_s,1 ,0)
results$ComPoisBx1_cov<-ifelse(results$ComPoisBx1_ci_i<=0.5 & 0.5<=results$ComPoisBx1_ci_s,1 ,0)
results$ComPoisBx2_cov<-ifelse(results$ComPoisBx2_ci_i<=0.75 & 0.75<=results$ComPoisBx2_ci_s,1 ,0)
cov.x <-rbind(AG=sum(results$AGx_cov)/nrow(results), COMPois=sum(results$ComPoisx_cov)/nrow(results), COMPoisB=sum(results$ComPoisBx_cov)/nrow(results))
cov.x1 <-rbind(AG=sum(results$AGx1_cov)/nrow(results), COMPois=sum(results$ComPoisx1_cov)/nrow(results), COMPoisB=sum(results$ComPoisBx1_cov)/nrow(results))
cov.x2 <- rbind(AG=sum(results$AGx2_cov)/nrow(results), COMPois=sum(results$ComPoisx2_cov)/nrow(results), COMPoisB=sum(results$ComPoisBx2_cov)/nrow(results))
mean.bias <- (abs(bias25)+abs(bias50)+abs(bias75)) / 3
mean.LPI <- (LPIx+LPIx1+LPIx2) / 3
mean.cob <- (cov.x+cov.x1+cov.x2)*100 / 3
res3122.ag <- data.frame(c0,c1,c2,c3,bias25,bias50,bias75, LPIx, LPIx1, LPIx2, cov.x, cov.x1, cov.x2, mean.bias, mean.LPI, mean.cob)
WriteXLS(res3122.ag, "results/SJWEH/results3122.xls")
results <- foreach(k=1:nsim, .combine=rbind) %dopar%
genResultsDEF1_new_frailty(k, nm=1000, bef=.5, ft=1825, old=730,
d.ev=d.ev6, d.cens=d.cens6, b0.ev=b0.ev6, b0.cens=b0.cens6,
a.ev=a.ev6, a.cens=a.cens6, m=5)
WriteXLS(results, "results/SJWEH/res3123.xls")
c0 <- rbind("AG F", "ComP (CP) F", "ComP (GT) F")
c1 <- rbind(AG=mean(results$gcoefAG.x), COMPois=mean(results$gcoefCOMPois.x), COMPoisB=mean(results$gcoefCOMPoisB.x))
c2 <- rbind(AG=mean(results$gcoefAG.x1), COMPois=mean(results$gcoefCOMPois.x1), COMPoisB=mean(results$gcoefCOMPoisB.x1))
c3 <- rbind(AG=mean(results$gcoefAG.x2), COMPois=mean(results$gcoefCOMPois.x2), COMPoisB=mean(results$gcoefCOMPoisB.x2))
bias25 <- ((c1-.25)/.25)*100
bias50 <- ((c2-.5)/.5)*100
bias75 <- ((c3-.75)/.75)*100
#Para las coberturas y el LPI
results$AGx_ci_i<-results$gcoefAG.x-1.96*results$gsdAG.x
results$AGx_ci_s<-results$gcoefAG.x+1.96*results$gsdAG.x
results$ComPoisx_ci_i<-results$gcoefCOMPois.x-1.96*results$gsdCOMPois.x
results$ComPoisx_ci_s<-results$gcoefCOMPois.x+1.96*results$gsdCOMPois.x
results$ComPoisBx_ci_i<-results$gcoefCOMPoisB.x-1.96*results$gsdCOMPoisB.x
results$ComPoisBx_ci_s<-results$gcoefCOMPoisB.x+1.96*results$gsdCOMPoisB.x
results$AGx1_ci_i<-results$gcoefAG.x1-1.96*results$gsdAG.x1
results$AGx1_ci_s<-results$gcoefAG.x1+1.96*results$gsdAG.x1
results$ComPoisx1_ci_i<-results$gcoefCOMPois.x1-1.96*results$gsdCOMPois.x1
results$ComPoisx1_ci_s<-results$gcoefCOMPois.x1+1.96*results$gsdCOMPois.x1
results$ComPoisBx1_ci_i<-results$gcoefCOMPoisB.x1-1.96*results$gsdCOMPoisB.x1
results$ComPoisBx1_ci_s<-results$gcoefCOMPoisB.x1+1.96*results$gsdCOMPoisB.x1
results$AGx2_ci_i<-results$gcoefAG.x2-1.96*results$gsdAG.x2
results$AGx2_ci_s<-results$gcoefAG.x2+1.96*results$gsdAG.x2
results$ComPoisx2_ci_i<-results$gcoefCOMPois.x2-1.96*results$gsdCOMPois.x2
results$ComPoisx2_ci_s<-results$gcoefCOMPois.x2+1.96*results$gsdCOMPois.x2
results$ComPoisBx2_ci_i<-results$gcoefCOMPoisB.x2-1.96*results$gsdCOMPoisB.x2
results$ComPoisBx2_ci_s<-results$gcoefCOMPoisB.x2+1.96*results$gsdCOMPoisB.x2
#LPI individuales
results$LPIxAG<-results$AGx_ci_s-results$AGx_ci_i
results$LPIx1AG<-results$AGx1_ci_s-results$AGx1_ci_i
results$LPIx2AG<-results$AGx2_ci_s-results$AGx2_ci_i
results$LPIxCOMPois<-results$ComPoisx_ci_s-results$ComPoisx_ci_i
results$LPIx1COMPois<-results$ComPoisx1_ci_s-results$ComPoisx1_ci_i
results$LPIx2COMPois<-results$ComPoisx2_ci_s-results$ComPoisx2_ci_i
results$LPIxCOMPoisB<-results$ComPoisBx_ci_s-results$ComPoisBx_ci_i
results$LPIx1COMPoisB<-results$ComPoisBx1_ci_s-results$ComPoisBx1_ci_i
results$LPIx2COMPoisB<-results$ComPoisBx2_ci_s-results$ComPoisBx2_ci_i
LPIx <- rbind(AG=mean(results$LPIxAG), COMPois=mean(results$LPIxCOMPois), COMPoisB=mean(results$LPIxCOMPoisB))
LPIx1 <-rbind(AG=mean(results$LPIx1AG), COMPois=mean(results$LPIx1COMPois), COMPoisB=mean(results$LPIx1COMPoisB))
LPIx2 <-rbind(AG=mean(results$LPIx2AG), COMPois=mean(results$LPIx2COMPois), COMPoisB=mean(results$LPIx2COMPoisB))
#coberturas
results$AGx_cov<-ifelse(results$AGx_ci_i<=0.25 & 0.25<=results$AGx_ci_s,1 ,0)
results$AGx1_cov<-ifelse(results$AGx1_ci_i<=0.5 & 0.5<=results$AGx1_ci_s,1 ,0)
results$AGx2_cov<-ifelse(results$AGx2_ci_i<=0.75 & 0.75<=results$AGx2_ci_s,1 ,0)
results$ComPoisx_cov<-ifelse(results$ComPoisx_ci_i<=0.25 & 0.25<=results$ComPoisx_ci_s,1 ,0)
results$ComPoisx1_cov<-ifelse(results$ComPoisx1_ci_i<=0.5 & 0.5<=results$ComPoisx1_ci_s,1 ,0)
results$ComPoisx2_cov<-ifelse(results$ComPoisx2_ci_i<=0.75 & 0.75<=results$ComPoisx2_ci_s,1 ,0)
results$ComPoisBx_cov<-ifelse(results$ComPoisBx_ci_i<=0.25 & 0.25<=results$ComPoisBx_ci_s,1 ,0)
results$ComPoisBx1_cov<-ifelse(results$ComPoisBx1_ci_i<=0.5 & 0.5<=results$ComPoisBx1_ci_s,1 ,0)
results$ComPoisBx2_cov<-ifelse(results$ComPoisBx2_ci_i<=0.75 & 0.75<=results$ComPoisBx2_ci_s,1 ,0)
cov.x <-rbind(AG=sum(results$AGx_cov)/nrow(results), COMPois=sum(results$ComPoisx_cov)/nrow(results), COMPoisB=sum(results$ComPoisBx_cov)/nrow(results))
cov.x1 <-rbind(AG=sum(results$AGx1_cov)/nrow(results), COMPois=sum(results$ComPoisx1_cov)/nrow(results), COMPoisB=sum(results$ComPoisBx1_cov)/nrow(results))
cov.x2 <- rbind(AG=sum(results$AGx2_cov)/nrow(results), COMPois=sum(results$ComPoisx2_cov)/nrow(results), COMPoisB=sum(results$ComPoisBx2_cov)/nrow(results))
mean.bias <- (abs(bias25)+abs(bias50)+abs(bias75)) / 3
mean.LPI <- (LPIx+LPIx1+LPIx2) / 3
mean.cob <- (cov.x+cov.x1+cov.x2)*100 / 3
res3123.ag <- data.frame(c0,c1,c2,c3,bias25,bias50,bias75, LPIx, LPIx1, LPIx2, cov.x, cov.x1, cov.x2, mean.bias, mean.LPI, mean.cob)
WriteXLS(res3123.ag, "results/SJWEH/results3123.xls")
results <- foreach(k=1:nsim, .combine=rbind) %dopar%
genResultsDEF1_new_frailty(k, nm=1000, bef=.1, ft=730, old=3650,
d.ev=d.ev6, d.cens=d.cens6, b0.ev=b0.ev6, b0.cens=b0.cens6,
a.ev=a.ev6, a.cens=a.cens6, m=5)
WriteXLS(results, "results/SJWEH/res3211.xls")
c0 <- rbind("AG F", "ComP (CP) F", "ComP (GT) F")
c1 <- rbind(AG=mean(results$gcoefAG.x), COMPois=mean(results$gcoefCOMPois.x), COMPoisB=mean(results$gcoefCOMPoisB.x))
c2 <- rbind(AG=mean(results$gcoefAG.x1), COMPois=mean(results$gcoefCOMPois.x1), COMPoisB=mean(results$gcoefCOMPoisB.x1))
c3 <- rbind(AG=mean(results$gcoefAG.x2), COMPois=mean(results$gcoefCOMPois.x2), COMPoisB=mean(results$gcoefCOMPoisB.x2))
bias25 <- ((c1-.25)/.25)*100
bias50 <- ((c2-.5)/.5)*100
bias75 <- ((c3-.75)/.75)*100
#Para las coberturas y el LPI
results$AGx_ci_i<-results$gcoefAG.x-1.96*results$gsdAG.x
results$AGx_ci_s<-results$gcoefAG.x+1.96*results$gsdAG.x
results$ComPoisx_ci_i<-results$gcoefCOMPois.x-1.96*results$gsdCOMPois.x
results$ComPoisx_ci_s<-results$gcoefCOMPois.x+1.96*results$gsdCOMPois.x
results$ComPoisBx_ci_i<-results$gcoefCOMPoisB.x-1.96*results$gsdCOMPoisB.x
results$ComPoisBx_ci_s<-results$gcoefCOMPoisB.x+1.96*results$gsdCOMPoisB.x
results$AGx1_ci_i<-results$gcoefAG.x1-1.96*results$gsdAG.x1
results$AGx1_ci_s<-results$gcoefAG.x1+1.96*results$gsdAG.x1
results$ComPoisx1_ci_i<-results$gcoefCOMPois.x1-1.96*results$gsdCOMPois.x1
results$ComPoisx1_ci_s<-results$gcoefCOMPois.x1+1.96*results$gsdCOMPois.x1
results$ComPoisBx1_ci_i<-results$gcoefCOMPoisB.x1-1.96*results$gsdCOMPoisB.x1
results$ComPoisBx1_ci_s<-results$gcoefCOMPoisB.x1+1.96*results$gsdCOMPoisB.x1
results$AGx2_ci_i<-results$gcoefAG.x2-1.96*results$gsdAG.x2
results$AGx2_ci_s<-results$gcoefAG.x2+1.96*results$gsdAG.x2
results$ComPoisx2_ci_i<-results$gcoefCOMPois.x2-1.96*results$gsdCOMPois.x2
results$ComPoisx2_ci_s<-results$gcoefCOMPois.x2+1.96*results$gsdCOMPois.x2
results$ComPoisBx2_ci_i<-results$gcoefCOMPoisB.x2-1.96*results$gsdCOMPoisB.x2
results$ComPoisBx2_ci_s<-results$gcoefCOMPoisB.x2+1.96*results$gsdCOMPoisB.x2
#LPI individuales
results$LPIxAG<-results$AGx_ci_s-results$AGx_ci_i
results$LPIx1AG<-results$AGx1_ci_s-results$AGx1_ci_i
results$LPIx2AG<-results$AGx2_ci_s-results$AGx2_ci_i
results$LPIxCOMPois<-results$ComPoisx_ci_s-results$ComPoisx_ci_i
results$LPIx1COMPois<-results$ComPoisx1_ci_s-results$ComPoisx1_ci_i
results$LPIx2COMPois<-results$ComPoisx2_ci_s-results$ComPoisx2_ci_i
results$LPIxCOMPoisB<-results$ComPoisBx_ci_s-results$ComPoisBx_ci_i
results$LPIx1COMPoisB<-results$ComPoisBx1_ci_s-results$ComPoisBx1_ci_i
results$LPIx2COMPoisB<-results$ComPoisBx2_ci_s-results$ComPoisBx2_ci_i
LPIx <- rbind(AG=mean(results$LPIxAG), COMPois=mean(results$LPIxCOMPois), COMPoisB=mean(results$LPIxCOMPoisB))
LPIx1 <-rbind(AG=mean(results$LPIx1AG), COMPois=mean(results$LPIx1COMPois), COMPoisB=mean(results$LPIx1COMPoisB))
LPIx2 <-rbind(AG=mean(results$LPIx2AG), COMPois=mean(results$LPIx2COMPois), COMPoisB=mean(results$LPIx2COMPoisB))
#coberturas
results$AGx_cov<-ifelse(results$AGx_ci_i<=0.25 & 0.25<=results$AGx_ci_s,1 ,0)
results$AGx1_cov<-ifelse(results$AGx1_ci_i<=0.5 & 0.5<=results$AGx1_ci_s,1 ,0)
results$AGx2_cov<-ifelse(results$AGx2_ci_i<=0.75 & 0.75<=results$AGx2_ci_s,1 ,0)
results$ComPoisx_cov<-ifelse(results$ComPoisx_ci_i<=0.25 & 0.25<=results$ComPoisx_ci_s,1 ,0)
results$ComPoisx1_cov<-ifelse(results$ComPoisx1_ci_i<=0.5 & 0.5<=results$ComPoisx1_ci_s,1 ,0)
results$ComPoisx2_cov<-ifelse(results$ComPoisx2_ci_i<=0.75 & 0.75<=results$ComPoisx2_ci_s,1 ,0)
results$ComPoisBx_cov<-ifelse(results$ComPoisBx_ci_i<=0.25 & 0.25<=results$ComPoisBx_ci_s,1 ,0)
results$ComPoisBx1_cov<-ifelse(results$ComPoisBx1_ci_i<=0.5 & 0.5<=results$ComPoisBx1_ci_s,1 ,0)
results$ComPoisBx2_cov<-ifelse(results$ComPoisBx2_ci_i<=0.75 & 0.75<=results$ComPoisBx2_ci_s,1 ,0)
cov.x <-rbind(AG=sum(results$AGx_cov)/nrow(results), COMPois=sum(results$ComPoisx_cov)/nrow(results), COMPoisB=sum(results$ComPoisBx_cov)/nrow(results))
cov.x1 <-rbind(AG=sum(results$AGx1_cov)/nrow(results), COMPois=sum(results$ComPoisx1_cov)/nrow(results), COMPoisB=sum(results$ComPoisBx1_cov)/nrow(results))
cov.x2 <- rbind(AG=sum(results$AGx2_cov)/nrow(results), COMPois=sum(results$ComPoisx2_cov)/nrow(results), COMPoisB=sum(results$ComPoisBx2_cov)/nrow(results))
mean.bias <- (abs(bias25)+abs(bias50)+abs(bias75)) / 3
mean.LPI <- (LPIx+LPIx1+LPIx2) / 3
mean.cob <- (cov.x+cov.x1+cov.x2)*100 / 3
res3211.ag <- data.frame(c0,c1,c2,c3,bias25,bias50,bias75, LPIx, LPIx1, LPIx2, cov.x, cov.x1, cov.x2, mean.bias, mean.LPI, mean.cob)
WriteXLS(res3211.ag, "results/SJWEH/results3211.xls")
results <- foreach(k=1:nsim, .combine=rbind) %dopar%
genResultsDEF1_new_frailty(k, nm=1000, bef=.3, ft=730, old=3650,
d.ev=d.ev6, d.cens=d.cens6, b0.ev=b0.ev6, b0.cens=b0.cens6,
a.ev=a.ev6, a.cens=a.cens6, m=5)
WriteXLS(results, "results/SJWEH/res3212.xls")
c0 <- rbind("AG F", "ComP (CP) F", "ComP (GT) F")
c1 <- rbind(AG=mean(results$gcoefAG.x), COMPois=mean(results$gcoefCOMPois.x), COMPoisB=mean(results$gcoefCOMPoisB.x))
c2 <- rbind(AG=mean(results$gcoefAG.x1), COMPois=mean(results$gcoefCOMPois.x1), COMPoisB=mean(results$gcoefCOMPoisB.x1))
c3 <- rbind(AG=mean(results$gcoefAG.x2), COMPois=mean(results$gcoefCOMPois.x2), COMPoisB=mean(results$gcoefCOMPoisB.x2))
bias25 <- ((c1-.25)/.25)*100
bias50 <- ((c2-.5)/.5)*100
bias75 <- ((c3-.75)/.75)*100
#Para las coberturas y el LPI
results$AGx_ci_i<-results$gcoefAG.x-1.96*results$gsdAG.x
results$AGx_ci_s<-results$gcoefAG.x+1.96*results$gsdAG.x
results$ComPoisx_ci_i<-results$gcoefCOMPois.x-1.96*results$gsdCOMPois.x
results$ComPoisx_ci_s<-results$gcoefCOMPois.x+1.96*results$gsdCOMPois.x
results$ComPoisBx_ci_i<-results$gcoefCOMPoisB.x-1.96*results$gsdCOMPoisB.x
results$ComPoisBx_ci_s<-results$gcoefCOMPoisB.x+1.96*results$gsdCOMPoisB.x
results$AGx1_ci_i<-results$gcoefAG.x1-1.96*results$gsdAG.x1
results$AGx1_ci_s<-results$gcoefAG.x1+1.96*results$gsdAG.x1
results$ComPoisx1_ci_i<-results$gcoefCOMPois.x1-1.96*results$gsdCOMPois.x1
results$ComPoisx1_ci_s<-results$gcoefCOMPois.x1+1.96*results$gsdCOMPois.x1
results$ComPoisBx1_ci_i<-results$gcoefCOMPoisB.x1-1.96*results$gsdCOMPoisB.x1
results$ComPoisBx1_ci_s<-results$gcoefCOMPoisB.x1+1.96*results$gsdCOMPoisB.x1
results$AGx2_ci_i<-results$gcoefAG.x2-1.96*results$gsdAG.x2
results$AGx2_ci_s<-results$gcoefAG.x2+1.96*results$gsdAG.x2
results$ComPoisx2_ci_i<-results$gcoefCOMPois.x2-1.96*results$gsdCOMPois.x2
results$ComPoisx2_ci_s<-results$gcoefCOMPois.x2+1.96*results$gsdCOMPois.x2
results$ComPoisBx2_ci_i<-results$gcoefCOMPoisB.x2-1.96*results$gsdCOMPoisB.x2
results$ComPoisBx2_ci_s<-results$gcoefCOMPoisB.x2+1.96*results$gsdCOMPoisB.x2
#LPI individuales
results$LPIxAG<-results$AGx_ci_s-results$AGx_ci_i
results$LPIx1AG<-results$AGx1_ci_s-results$AGx1_ci_i
results$LPIx2AG<-results$AGx2_ci_s-results$AGx2_ci_i
results$LPIxCOMPois<-results$ComPoisx_ci_s-results$ComPoisx_ci_i
results$LPIx1COMPois<-results$ComPoisx1_ci_s-results$ComPoisx1_ci_i
results$LPIx2COMPois<-results$ComPoisx2_ci_s-results$ComPoisx2_ci_i
results$LPIxCOMPoisB<-results$ComPoisBx_ci_s-results$ComPoisBx_ci_i
results$LPIx1COMPoisB<-results$ComPoisBx1_ci_s-results$ComPoisBx1_ci_i
results$LPIx2COMPoisB<-results$ComPoisBx2_ci_s-results$ComPoisBx2_ci_i
LPIx <- rbind(AG=mean(results$LPIxAG), COMPois=mean(results$LPIxCOMPois), COMPoisB=mean(results$LPIxCOMPoisB))
LPIx1 <-rbind(AG=mean(results$LPIx1AG), COMPois=mean(results$LPIx1COMPois), COMPoisB=mean(results$LPIx1COMPoisB))
LPIx2 <-rbind(AG=mean(results$LPIx2AG), COMPois=mean(results$LPIx2COMPois), COMPoisB=mean(results$LPIx2COMPoisB))
#coberturas
results$AGx_cov<-ifelse(results$AGx_ci_i<=0.25 & 0.25<=results$AGx_ci_s,1 ,0)
results$AGx1_cov<-ifelse(results$AGx1_ci_i<=0.5 & 0.5<=results$AGx1_ci_s,1 ,0)
results$AGx2_cov<-ifelse(results$AGx2_ci_i<=0.75 & 0.75<=results$AGx2_ci_s,1 ,0)
results$ComPoisx_cov<-ifelse(results$ComPoisx_ci_i<=0.25 & 0.25<=results$ComPoisx_ci_s,1 ,0)
results$ComPoisx1_cov<-ifelse(results$ComPoisx1_ci_i<=0.5 & 0.5<=results$ComPoisx1_ci_s,1 ,0)
results$ComPoisx2_cov<-ifelse(results$ComPoisx2_ci_i<=0.75 & 0.75<=results$ComPoisx2_ci_s,1 ,0)
results$ComPoisBx_cov<-ifelse(results$ComPoisBx_ci_i<=0.25 & 0.25<=results$ComPoisBx_ci_s,1 ,0)
results$ComPoisBx1_cov<-ifelse(results$ComPoisBx1_ci_i<=0.5 & 0.5<=results$ComPoisBx1_ci_s,1 ,0)
results$ComPoisBx2_cov<-ifelse(results$ComPoisBx2_ci_i<=0.75 & 0.75<=results$ComPoisBx2_ci_s,1 ,0)
cov.x <-rbind(AG=sum(results$AGx_cov)/nrow(results), COMPois=sum(results$ComPoisx_cov)/nrow(results), COMPoisB=sum(results$ComPoisBx_cov)/nrow(results))
cov.x1 <-rbind(AG=sum(results$AGx1_cov)/nrow(results), COMPois=sum(results$ComPoisx1_cov)/nrow(results), COMPoisB=sum(results$ComPoisBx1_cov)/nrow(results))
cov.x2 <- rbind(AG=sum(results$AGx2_cov)/nrow(results), COMPois=sum(results$ComPoisx2_cov)/nrow(results), COMPoisB=sum(results$ComPoisBx2_cov)/nrow(results))
mean.bias <- (abs(bias25)+abs(bias50)+abs(bias75)) / 3
mean.LPI <- (LPIx+LPIx1+LPIx2) / 3
mean.cob <- (cov.x+cov.x1+cov.x2)*100 / 3
res3212.ag <- data.frame(c0,c1,c2,c3,bias25,bias50,bias75, LPIx, LPIx1, LPIx2, cov.x, cov.x1, cov.x2, mean.bias, mean.LPI, mean.cob)
WriteXLS(res3212.ag, "results/SJWEH/results3212.xls")
results <- foreach(k=1:nsim, .combine=rbind) %dopar%
genResultsDEF1_new_frailty(k, nm=1000, bef=.5, ft=730, old=3650,
d.ev=d.ev6, d.cens=d.cens6, b0.ev=b0.ev6, b0.cens=b0.cens6,
a.ev=a.ev6, a.cens=a.cens6, m=5)
WriteXLS(results, "results/SJWEH/res3213.xls")
c0 <- rbind("AG F", "ComP (CP) F", "ComP (GT) F")
c1 <- rbind(AG=mean(results$gcoefAG.x), COMPois=mean(results$gcoefCOMPois.x), COMPoisB=mean(results$gcoefCOMPoisB.x))
c2 <- rbind(AG=mean(results$gcoefAG.x1), COMPois=mean(results$gcoefCOMPois.x1), COMPoisB=mean(results$gcoefCOMPoisB.x1))
c3 <- rbind(AG=mean(results$gcoefAG.x2), COMPois=mean(results$gcoefCOMPois.x2), COMPoisB=mean(results$gcoefCOMPoisB.x2))
bias25 <- ((c1-.25)/.25)*100
bias50 <- ((c2-.5)/.5)*100
bias75 <- ((c3-.75)/.75)*100
#Para las coberturas y el LPI
results$AGx_ci_i<-results$gcoefAG.x-1.96*results$gsdAG.x
results$AGx_ci_s<-results$gcoefAG.x+1.96*results$gsdAG.x
results$ComPoisx_ci_i<-results$gcoefCOMPois.x-1.96*results$gsdCOMPois.x
results$ComPoisx_ci_s<-results$gcoefCOMPois.x+1.96*results$gsdCOMPois.x
results$ComPoisBx_ci_i<-results$gcoefCOMPoisB.x-1.96*results$gsdCOMPoisB.x
results$ComPoisBx_ci_s<-results$gcoefCOMPoisB.x+1.96*results$gsdCOMPoisB.x
results$AGx1_ci_i<-results$gcoefAG.x1-1.96*results$gsdAG.x1
results$AGx1_ci_s<-results$gcoefAG.x1+1.96*results$gsdAG.x1
results$ComPoisx1_ci_i<-results$gcoefCOMPois.x1-1.96*results$gsdCOMPois.x1
results$ComPoisx1_ci_s<-results$gcoefCOMPois.x1+1.96*results$gsdCOMPois.x1
results$ComPoisBx1_ci_i<-results$gcoefCOMPoisB.x1-1.96*results$gsdCOMPoisB.x1
results$ComPoisBx1_ci_s<-results$gcoefCOMPoisB.x1+1.96*results$gsdCOMPoisB.x1
results$AGx2_ci_i<-results$gcoefAG.x2-1.96*results$gsdAG.x2
results$AGx2_ci_s<-results$gcoefAG.x2+1.96*results$gsdAG.x2
results$ComPoisx2_ci_i<-results$gcoefCOMPois.x2-1.96*results$gsdCOMPois.x2
results$ComPoisx2_ci_s<-results$gcoefCOMPois.x2+1.96*results$gsdCOMPois.x2
results$ComPoisBx2_ci_i<-results$gcoefCOMPoisB.x2-1.96*results$gsdCOMPoisB.x2
results$ComPoisBx2_ci_s<-results$gcoefCOMPoisB.x2+1.96*results$gsdCOMPoisB.x2
#LPI individuales
results$LPIxAG<-results$AGx_ci_s-results$AGx_ci_i
results$LPIx1AG<-results$AGx1_ci_s-results$AGx1_ci_i
results$LPIx2AG<-results$AGx2_ci_s-results$AGx2_ci_i
results$LPIxCOMPois<-results$ComPoisx_ci_s-results$ComPoisx_ci_i
results$LPIx1COMPois<-results$ComPoisx1_ci_s-results$ComPoisx1_ci_i
results$LPIx2COMPois<-results$ComPoisx2_ci_s-results$ComPoisx2_ci_i
results$LPIxCOMPoisB<-results$ComPoisBx_ci_s-results$ComPoisBx_ci_i
results$LPIx1COMPoisB<-results$ComPoisBx1_ci_s-results$ComPoisBx1_ci_i
results$LPIx2COMPoisB<-results$ComPoisBx2_ci_s-results$ComPoisBx2_ci_i
LPIx <- rbind(AG=mean(results$LPIxAG), COMPois=mean(results$LPIxCOMPois), COMPoisB=mean(results$LPIxCOMPoisB))
LPIx1 <-rbind(AG=mean(results$LPIx1AG), COMPois=mean(results$LPIx1COMPois), COMPoisB=mean(results$LPIx1COMPoisB))
LPIx2 <-rbind(AG=mean(results$LPIx2AG), COMPois=mean(results$LPIx2COMPois), COMPoisB=mean(results$LPIx2COMPoisB))
#coberturas
results$AGx_cov<-ifelse(results$AGx_ci_i<=0.25 & 0.25<=results$AGx_ci_s,1 ,0)
results$AGx1_cov<-ifelse(results$AGx1_ci_i<=0.5 & 0.5<=results$AGx1_ci_s,1 ,0)
results$AGx2_cov<-ifelse(results$AGx2_ci_i<=0.75 & 0.75<=results$AGx2_ci_s,1 ,0)
results$ComPoisx_cov<-ifelse(results$ComPoisx_ci_i<=0.25 & 0.25<=results$ComPoisx_ci_s,1 ,0)
results$ComPoisx1_cov<-ifelse(results$ComPoisx1_ci_i<=0.5 & 0.5<=results$ComPoisx1_ci_s,1 ,0)
results$ComPoisx2_cov<-ifelse(results$ComPoisx2_ci_i<=0.75 & 0.75<=results$ComPoisx2_ci_s,1 ,0)
results$ComPoisBx_cov<-ifelse(results$ComPoisBx_ci_i<=0.25 & 0.25<=results$ComPoisBx_ci_s,1 ,0)
results$ComPoisBx1_cov<-ifelse(results$ComPoisBx1_ci_i<=0.5 & 0.5<=results$ComPoisBx1_ci_s,1 ,0)
results$ComPoisBx2_cov<-ifelse(results$ComPoisBx2_ci_i<=0.75 & 0.75<=results$ComPoisBx2_ci_s,1 ,0)
cov.x <-rbind(AG=sum(results$AGx_cov)/nrow(results), COMPois=sum(results$ComPoisx_cov)/nrow(results), COMPoisB=sum(results$ComPoisBx_cov)/nrow(results))
cov.x1 <-rbind(AG=sum(results$AGx1_cov)/nrow(results), COMPois=sum(results$ComPoisx1_cov)/nrow(results), COMPoisB=sum(results$ComPoisBx1_cov)/nrow(results))
cov.x2 <- rbind(AG=sum(results$AGx2_cov)/nrow(results), COMPois=sum(results$ComPoisx2_cov)/nrow(results), COMPoisB=sum(results$ComPoisBx2_cov)/nrow(results))
mean.bias <- (abs(bias25)+abs(bias50)+abs(bias75)) / 3
mean.LPI <- (LPIx+LPIx1+LPIx2) / 3
mean.cob <- (cov.x+cov.x1+cov.x2)*100 / 3
res3213.ag <- data.frame(c0,c1,c2,c3,bias25,bias50,bias75, LPIx, LPIx1, LPIx2, cov.x, cov.x1, cov.x2, mean.bias, mean.LPI, mean.cob)
WriteXLS(res3213.ag, "results/SJWEH/results3213.xls")
results <- foreach(k=1:nsim, .combine=rbind) %dopar%
genResultsDEF1_new_frailty(k, nm=1000, bef=.1, ft=1825, old=3650,
d.ev=d.ev6, d.cens=d.cens6, b0.ev=b0.ev6, b0.cens=b0.cens6,
a.ev=a.ev6, a.cens=a.cens6, m=5)
WriteXLS(results, "results/SJWEH/res3221.xls")
c0 <- rbind("AG F", "ComP (CP) F", "ComP (GT) F")
c1 <- rbind(AG=mean(results$gcoefAG.x), COMPois=mean(results$gcoefCOMPois.x), COMPoisB=mean(results$gcoefCOMPoisB.x))
c2 <- rbind(AG=mean(results$gcoefAG.x1), COMPois=mean(results$gcoefCOMPois.x1), COMPoisB=mean(results$gcoefCOMPoisB.x1))
c3 <- rbind(AG=mean(results$gcoefAG.x2), COMPois=mean(results$gcoefCOMPois.x2), COMPoisB=mean(results$gcoefCOMPoisB.x2))
bias25 <- ((c1-.25)/.25)*100
bias50 <- ((c2-.5)/.5)*100
bias75 <- ((c3-.75)/.75)*100
#Para las coberturas y el LPI
results$AGx_ci_i<-results$gcoefAG.x-1.96*results$gsdAG.x
results$AGx_ci_s<-results$gcoefAG.x+1.96*results$gsdAG.x
results$ComPoisx_ci_i<-results$gcoefCOMPois.x-1.96*results$gsdCOMPois.x
results$ComPoisx_ci_s<-results$gcoefCOMPois.x+1.96*results$gsdCOMPois.x
results$ComPoisBx_ci_i<-results$gcoefCOMPoisB.x-1.96*results$gsdCOMPoisB.x
results$ComPoisBx_ci_s<-results$gcoefCOMPoisB.x+1.96*results$gsdCOMPoisB.x
results$AGx1_ci_i<-results$gcoefAG.x1-1.96*results$gsdAG.x1
results$AGx1_ci_s<-results$gcoefAG.x1+1.96*results$gsdAG.x1
results$ComPoisx1_ci_i<-results$gcoefCOMPois.x1-1.96*results$gsdCOMPois.x1
results$ComPoisx1_ci_s<-results$gcoefCOMPois.x1+1.96*results$gsdCOMPois.x1
results$ComPoisBx1_ci_i<-results$gcoefCOMPoisB.x1-1.96*results$gsdCOMPoisB.x1
results$ComPoisBx1_ci_s<-results$gcoefCOMPoisB.x1+1.96*results$gsdCOMPoisB.x1
results$AGx2_ci_i<-results$gcoefAG.x2-1.96*results$gsdAG.x2
results$AGx2_ci_s<-results$gcoefAG.x2+1.96*results$gsdAG.x2
results$ComPoisx2_ci_i<-results$gcoefCOMPois.x2-1.96*results$gsdCOMPois.x2
results$ComPoisx2_ci_s<-results$gcoefCOMPois.x2+1.96*results$gsdCOMPois.x2
results$ComPoisBx2_ci_i<-results$gcoefCOMPoisB.x2-1.96*results$gsdCOMPoisB.x2
results$ComPoisBx2_ci_s<-results$gcoefCOMPoisB.x2+1.96*results$gsdCOMPoisB.x2
#LPI individuales
results$LPIxAG<-results$AGx_ci_s-results$AGx_ci_i
results$LPIx1AG<-results$AGx1_ci_s-results$AGx1_ci_i
results$LPIx2AG<-results$AGx2_ci_s-results$AGx2_ci_i
results$LPIxCOMPois<-results$ComPoisx_ci_s-results$ComPoisx_ci_i
results$LPIx1COMPois<-results$ComPoisx1_ci_s-results$ComPoisx1_ci_i
results$LPIx2COMPois<-results$ComPoisx2_ci_s-results$ComPoisx2_ci_i
results$LPIxCOMPoisB<-results$ComPoisBx_ci_s-results$ComPoisBx_ci_i
results$LPIx1COMPoisB<-results$ComPoisBx1_ci_s-results$ComPoisBx1_ci_i
results$LPIx2COMPoisB<-results$ComPoisBx2_ci_s-results$ComPoisBx2_ci_i
LPIx <- rbind(AG=mean(results$LPIxAG), COMPois=mean(results$LPIxCOMPois), COMPoisB=mean(results$LPIxCOMPoisB))
LPIx1 <-rbind(AG=mean(results$LPIx1AG), COMPois=mean(results$LPIx1COMPois), COMPoisB=mean(results$LPIx1COMPoisB))
LPIx2 <-rbind(AG=mean(results$LPIx2AG), COMPois=mean(results$LPIx2COMPois), COMPoisB=mean(results$LPIx2COMPoisB))
#coberturas
results$AGx_cov<-ifelse(results$AGx_ci_i<=0.25 & 0.25<=results$AGx_ci_s,1 ,0)
results$AGx1_cov<-ifelse(results$AGx1_ci_i<=0.5 & 0.5<=results$AGx1_ci_s,1 ,0)
results$AGx2_cov<-ifelse(results$AGx2_ci_i<=0.75 & 0.75<=results$AGx2_ci_s,1 ,0)
results$ComPoisx_cov<-ifelse(results$ComPoisx_ci_i<=0.25 & 0.25<=results$ComPoisx_ci_s,1 ,0)
results$ComPoisx1_cov<-ifelse(results$ComPoisx1_ci_i<=0.5 & 0.5<=results$ComPoisx1_ci_s,1 ,0)
results$ComPoisx2_cov<-ifelse(results$ComPoisx2_ci_i<=0.75 & 0.75<=results$ComPoisx2_ci_s,1 ,0)
results$ComPoisBx_cov<-ifelse(results$ComPoisBx_ci_i<=0.25 & 0.25<=results$ComPoisBx_ci_s,1 ,0)
results$ComPoisBx1_cov<-ifelse(results$ComPoisBx1_ci_i<=0.5 & 0.5<=results$ComPoisBx1_ci_s,1 ,0)
results$ComPoisBx2_cov<-ifelse(results$ComPoisBx2_ci_i<=0.75 & 0.75<=results$ComPoisBx2_ci_s,1 ,0)
cov.x <-rbind(AG=sum(results$AGx_cov)/nrow(results), COMPois=sum(results$ComPoisx_cov)/nrow(results), COMPoisB=sum(results$ComPoisBx_cov)/nrow(results))
cov.x1 <-rbind(AG=sum(results$AGx1_cov)/nrow(results), COMPois=sum(results$ComPoisx1_cov)/nrow(results), COMPoisB=sum(results$ComPoisBx1_cov)/nrow(results))
cov.x2 <- rbind(AG=sum(results$AGx2_cov)/nrow(results), COMPois=sum(results$ComPoisx2_cov)/nrow(results), COMPoisB=sum(results$ComPoisBx2_cov)/nrow(results))
mean.bias <- (abs(bias25)+abs(bias50)+abs(bias75)) / 3
mean.LPI <- (LPIx+LPIx1+LPIx2) / 3
mean.cob <- (cov.x+cov.x1+cov.x2)*100 / 3
res3221.ag <- data.frame(c0,c1,c2,c3,bias25,bias50,bias75, LPIx, LPIx1, LPIx2, cov.x, cov.x1, cov.x2, mean.bias, mean.LPI, mean.cob)
WriteXLS(res3221.ag, "results/SJWEH/results3221.xls")
results <- foreach(k=1:nsim, .combine=rbind) %dopar%
genResultsDEF1_new_frailty(k, nm=1000, bef=.3, ft=1825, old=3650,
d.ev=d.ev6, d.cens=d.cens6, b0.ev=b0.ev6, b0.cens=b0.cens6,
a.ev=a.ev6, a.cens=a.cens6, m=5)
WriteXLS(results, "results/SJWEH/res3222.xls")
c0 <- rbind("AG F", "ComP (CP) F", "ComP (GT) F")
c1 <- rbind(AG=mean(results$gcoefAG.x), COMPois=mean(results$gcoefCOMPois.x), COMPoisB=mean(results$gcoefCOMPoisB.x))
c2 <- rbind(AG=mean(results$gcoefAG.x1), COMPois=mean(results$gcoefCOMPois.x1), COMPoisB=mean(results$gcoefCOMPoisB.x1))
c3 <- rbind(AG=mean(results$gcoefAG.x2), COMPois=mean(results$gcoefCOMPois.x2), COMPoisB=mean(results$gcoefCOMPoisB.x2))
bias25 <- ((c1-.25)/.25)*100
bias50 <- ((c2-.5)/.5)*100
bias75 <- ((c3-.75)/.75)*100
#Para las coberturas y el LPI
results$AGx_ci_i<-results$gcoefAG.x-1.96*results$gsdAG.x
results$AGx_ci_s<-results$gcoefAG.x+1.96*results$gsdAG.x
results$ComPoisx_ci_i<-results$gcoefCOMPois.x-1.96*results$gsdCOMPois.x
results$ComPoisx_ci_s<-results$gcoefCOMPois.x+1.96*results$gsdCOMPois.x
results$ComPoisBx_ci_i<-results$gcoefCOMPoisB.x-1.96*results$gsdCOMPoisB.x
results$ComPoisBx_ci_s<-results$gcoefCOMPoisB.x+1.96*results$gsdCOMPoisB.x
results$AGx1_ci_i<-results$gcoefAG.x1-1.96*results$gsdAG.x1
results$AGx1_ci_s<-results$gcoefAG.x1+1.96*results$gsdAG.x1
results$ComPoisx1_ci_i<-results$gcoefCOMPois.x1-1.96*results$gsdCOMPois.x1
results$ComPoisx1_ci_s<-results$gcoefCOMPois.x1+1.96*results$gsdCOMPois.x1
results$ComPoisBx1_ci_i<-results$gcoefCOMPoisB.x1-1.96*results$gsdCOMPoisB.x1
results$ComPoisBx1_ci_s<-results$gcoefCOMPoisB.x1+1.96*results$gsdCOMPoisB.x1
results$AGx2_ci_i<-results$gcoefAG.x2-1.96*results$gsdAG.x2
results$AGx2_ci_s<-results$gcoefAG.x2+1.96*results$gsdAG.x2
results$ComPoisx2_ci_i<-results$gcoefCOMPois.x2-1.96*results$gsdCOMPois.x2
results$ComPoisx2_ci_s<-results$gcoefCOMPois.x2+1.96*results$gsdCOMPois.x2
results$ComPoisBx2_ci_i<-results$gcoefCOMPoisB.x2-1.96*results$gsdCOMPoisB.x2
results$ComPoisBx2_ci_s<-results$gcoefCOMPoisB.x2+1.96*results$gsdCOMPoisB.x2
#LPI individuales
results$LPIxAG<-results$AGx_ci_s-results$AGx_ci_i
results$LPIx1AG<-results$AGx1_ci_s-results$AGx1_ci_i
results$LPIx2AG<-results$AGx2_ci_s-results$AGx2_ci_i
results$LPIxCOMPois<-results$ComPoisx_ci_s-results$ComPoisx_ci_i
results$LPIx1COMPois<-results$ComPoisx1_ci_s-results$ComPoisx1_ci_i
results$LPIx2COMPois<-results$ComPoisx2_ci_s-results$ComPoisx2_ci_i
results$LPIxCOMPoisB<-results$ComPoisBx_ci_s-results$ComPoisBx_ci_i
results$LPIx1COMPoisB<-results$ComPoisBx1_ci_s-results$ComPoisBx1_ci_i
results$LPIx2COMPoisB<-results$ComPoisBx2_ci_s-results$ComPoisBx2_ci_i
LPIx <- rbind(AG=mean(results$LPIxAG), COMPois=mean(results$LPIxCOMPois), COMPoisB=mean(results$LPIxCOMPoisB))
LPIx1 <-rbind(AG=mean(results$LPIx1AG), COMPois=mean(results$LPIx1COMPois), COMPoisB=mean(results$LPIx1COMPoisB))
LPIx2 <-rbind(AG=mean(results$LPIx2AG), COMPois=mean(results$LPIx2COMPois), COMPoisB=mean(results$LPIx2COMPoisB))
#coberturas
results$AGx_cov<-ifelse(results$AGx_ci_i<=0.25 & 0.25<=results$AGx_ci_s,1 ,0)
results$AGx1_cov<-ifelse(results$AGx1_ci_i<=0.5 & 0.5<=results$AGx1_ci_s,1 ,0)
results$AGx2_cov<-ifelse(results$AGx2_ci_i<=0.75 & 0.75<=results$AGx2_ci_s,1 ,0)
results$ComPoisx_cov<-ifelse(results$ComPoisx_ci_i<=0.25 & 0.25<=results$ComPoisx_ci_s,1 ,0)
results$ComPoisx1_cov<-ifelse(results$ComPoisx1_ci_i<=0.5 & 0.5<=results$ComPoisx1_ci_s,1 ,0)
results$ComPoisx2_cov<-ifelse(results$ComPoisx2_ci_i<=0.75 & 0.75<=results$ComPoisx2_ci_s,1 ,0)
results$ComPoisBx_cov<-ifelse(results$ComPoisBx_ci_i<=0.25 & 0.25<=results$ComPoisBx_ci_s,1 ,0)
results$ComPoisBx1_cov<-ifelse(results$ComPoisBx1_ci_i<=0.5 & 0.5<=results$ComPoisBx1_ci_s,1 ,0)
results$ComPoisBx2_cov<-ifelse(results$ComPoisBx2_ci_i<=0.75 & 0.75<=results$ComPoisBx2_ci_s,1 ,0)
cov.x <-rbind(AG=sum(results$AGx_cov)/nrow(results), COMPois=sum(results$ComPoisx_cov)/nrow(results), COMPoisB=sum(results$ComPoisBx_cov)/nrow(results))
cov.x1 <-rbind(AG=sum(results$AGx1_cov)/nrow(results), COMPois=sum(results$ComPoisx1_cov)/nrow(results), COMPoisB=sum(results$ComPoisBx1_cov)/nrow(results))
cov.x2 <- rbind(AG=sum(results$AGx2_cov)/nrow(results), COMPois=sum(results$ComPoisx2_cov)/nrow(results), COMPoisB=sum(results$ComPoisBx2_cov)/nrow(results))
mean.bias <- (abs(bias25)+abs(bias50)+abs(bias75)) / 3
mean.LPI <- (LPIx+LPIx1+LPIx2) / 3
mean.cob <- (cov.x+cov.x1+cov.x2)*100 / 3
res3222.ag <- data.frame(c0,c1,c2,c3,bias25,bias50,bias75, LPIx, LPIx1, LPIx2, cov.x, cov.x1, cov.x2, mean.bias, mean.LPI, mean.cob)
WriteXLS(res3222.ag, "results/SJWEH/results3222.xls")
results <- foreach(k=1:nsim, .combine=rbind) %dopar%
genResultsDEF1_new_frailty(k, nm=1000, bef=.5, ft=1825, old=3650,
d.ev=d.ev6, d.cens=d.cens6, b0.ev=b0.ev6, b0.cens=b0.cens6,
a.ev=a.ev6, a.cens=a.cens6, m=5)
WriteXLS(results, "results/SJWEH/res3223.xls")
c0 <- rbind("AG F", "ComP (CP) F", "ComP (GT) F")
c1 <- rbind(AG=mean(results$gcoefAG.x), COMPois=mean(results$gcoefCOMPois.x), COMPoisB=mean(results$gcoefCOMPoisB.x))
c2 <- rbind(AG=mean(results$gcoefAG.x1), COMPois=mean(results$gcoefCOMPois.x1), COMPoisB=mean(results$gcoefCOMPoisB.x1))
c3 <- rbind(AG=mean(results$gcoefAG.x2), COMPois=mean(results$gcoefCOMPois.x2), COMPoisB=mean(results$gcoefCOMPoisB.x2))
bias25 <- ((c1-.25)/.25)*100
bias50 <- ((c2-.5)/.5)*100
bias75 <- ((c3-.75)/.75)*100
#Para las coberturas y el LPI
results$AGx_ci_i<-results$gcoefAG.x-1.96*results$gsdAG.x
results$AGx_ci_s<-results$gcoefAG.x+1.96*results$gsdAG.x
results$ComPoisx_ci_i<-results$gcoefCOMPois.x-1.96*results$gsdCOMPois.x
results$ComPoisx_ci_s<-results$gcoefCOMPois.x+1.96*results$gsdCOMPois.x
results$ComPoisBx_ci_i<-results$gcoefCOMPoisB.x-1.96*results$gsdCOMPoisB.x
results$ComPoisBx_ci_s<-results$gcoefCOMPoisB.x+1.96*results$gsdCOMPoisB.x
results$AGx1_ci_i<-results$gcoefAG.x1-1.96*results$gsdAG.x1
results$AGx1_ci_s<-results$gcoefAG.x1+1.96*results$gsdAG.x1
results$ComPoisx1_ci_i<-results$gcoefCOMPois.x1-1.96*results$gsdCOMPois.x1
results$ComPoisx1_ci_s<-results$gcoefCOMPois.x1+1.96*results$gsdCOMPois.x1
results$ComPoisBx1_ci_i<-results$gcoefCOMPoisB.x1-1.96*results$gsdCOMPoisB.x1
results$ComPoisBx1_ci_s<-results$gcoefCOMPoisB.x1+1.96*results$gsdCOMPoisB.x1
results$AGx2_ci_i<-results$gcoefAG.x2-1.96*results$gsdAG.x2
results$AGx2_ci_s<-results$gcoefAG.x2+1.96*results$gsdAG.x2
results$ComPoisx2_ci_i<-results$gcoefCOMPois.x2-1.96*results$gsdCOMPois.x2
results$ComPoisx2_ci_s<-results$gcoefCOMPois.x2+1.96*results$gsdCOMPois.x2
results$ComPoisBx2_ci_i<-results$gcoefCOMPoisB.x2-1.96*results$gsdCOMPoisB.x2
results$ComPoisBx2_ci_s<-results$gcoefCOMPoisB.x2+1.96*results$gsdCOMPoisB.x2
#LPI individuales
results$LPIxAG<-results$AGx_ci_s-results$AGx_ci_i
results$LPIx1AG<-results$AGx1_ci_s-results$AGx1_ci_i
results$LPIx2AG<-results$AGx2_ci_s-results$AGx2_ci_i
results$LPIxCOMPois<-results$ComPoisx_ci_s-results$ComPoisx_ci_i
results$LPIx1COMPois<-results$ComPoisx1_ci_s-results$ComPoisx1_ci_i
results$LPIx2COMPois<-results$ComPoisx2_ci_s-results$ComPoisx2_ci_i
results$LPIxCOMPoisB<-results$ComPoisBx_ci_s-results$ComPoisBx_ci_i
results$LPIx1COMPoisB<-results$ComPoisBx1_ci_s-results$ComPoisBx1_ci_i
results$LPIx2COMPoisB<-results$ComPoisBx2_ci_s-results$ComPoisBx2_ci_i
LPIx <- rbind(AG=mean(results$LPIxAG), COMPois=mean(results$LPIxCOMPois), COMPoisB=mean(results$LPIxCOMPoisB))
LPIx1 <-rbind(AG=mean(results$LPIx1AG), COMPois=mean(results$LPIx1COMPois), COMPoisB=mean(results$LPIx1COMPoisB))
LPIx2 <-rbind(AG=mean(results$LPIx2AG), COMPois=mean(results$LPIx2COMPois), COMPoisB=mean(results$LPIx2COMPoisB))
#coberturas
results$AGx_cov<-ifelse(results$AGx_ci_i<=0.25 & 0.25<=results$AGx_ci_s,1 ,0)
results$AGx1_cov<-ifelse(results$AGx1_ci_i<=0.5 & 0.5<=results$AGx1_ci_s,1 ,0)
results$AGx2_cov<-ifelse(results$AGx2_ci_i<=0.75 & 0.75<=results$AGx2_ci_s,1 ,0)
results$ComPoisx_cov<-ifelse(results$ComPoisx_ci_i<=0.25 & 0.25<=results$ComPoisx_ci_s,1 ,0)
results$ComPoisx1_cov<-ifelse(results$ComPoisx1_ci_i<=0.5 & 0.5<=results$ComPoisx1_ci_s,1 ,0)
results$ComPoisx2_cov<-ifelse(results$ComPoisx2_ci_i<=0.75 & 0.75<=results$ComPoisx2_ci_s,1 ,0)
results$ComPoisBx_cov<-ifelse(results$ComPoisBx_ci_i<=0.25 & 0.25<=results$ComPoisBx_ci_s,1 ,0)
results$ComPoisBx1_cov<-ifelse(results$ComPoisBx1_ci_i<=0.5 & 0.5<=results$ComPoisBx1_ci_s,1 ,0)
results$ComPoisBx2_cov<-ifelse(results$ComPoisBx2_ci_i<=0.75 & 0.75<=results$ComPoisBx2_ci_s,1 ,0)
cov.x <-rbind(AG=sum(results$AGx_cov)/nrow(results), COMPois=sum(results$ComPoisx_cov)/nrow(results), COMPoisB=sum(results$ComPoisBx_cov)/nrow(results))
cov.x1 <-rbind(AG=sum(results$AGx1_cov)/nrow(results), COMPois=sum(results$ComPoisx1_cov)/nrow(results), COMPoisB=sum(results$ComPoisBx1_cov)/nrow(results))
cov.x2 <- rbind(AG=sum(results$AGx2_cov)/nrow(results), COMPois=sum(results$ComPoisx2_cov)/nrow(results), COMPoisB=sum(results$ComPoisBx2_cov)/nrow(results))
mean.bias <- (abs(bias25)+abs(bias50)+abs(bias75)) / 3
mean.LPI <- (LPIx+LPIx1+LPIx2) / 3
mean.cob <- (cov.x+cov.x1+cov.x2)*100 / 3
res3223.ag <- data.frame(c0,c1,c2,c3,bias25,bias50,bias75, LPIx, LPIx1, LPIx2, cov.x, cov.x1, cov.x2, mean.bias, mean.LPI, mean.cob)
WriteXLS(res3223.ag, "results/SJWEH/results3223.xls")
|
ce5a0f6fb5c5834f04c2ab3d8063d55507953d7d
|
17f6825befaa193b78eb585851e8215121285481
|
/1_Generate_FRK_L3/4-plot.R
|
49cf1ae39cea8950e4907c47a9f26176db27bef5
|
[] |
no_license
|
andrewzm/oco2-frk
|
58cde159b41c03ed943132d0939120db69cdabe0
|
641778dd11bc2ecd068fca48a098af7709d44427
|
refs/heads/master
| 2018-10-31T16:23:27.053989
| 2018-01-23T12:43:26
| 2018-01-23T12:43:26
| 109,343,849
| 4
| 2
| null | 2017-11-10T18:00:54
| 2017-11-03T02:43:57
|
R
|
UTF-8
|
R
| false
| false
| 5,987
|
r
|
4-plot.R
|
## Produces daily map plots from the Fixed Rank Kriging results.
# Change this to "oco2v7" or "oco2v8"
data_version <- "oco2v7"
library(FRK)
library(dplyr)
library(ggplot2)
theme_set(theme_grey(base_size = 20))
my_colours <- c("#03006d","#02008f","#0000b6","#0001ef","#0000f6","#0428f6","#0b53f7","#0f81f3",
"#18b1f5","#1ff0f7","#27fada","#3efaa3","#5dfc7b","#85fd4e","#aefc2a","#e9fc0d",
"#f6da0c","#f5a009","#f6780a","#f34a09","#f2210a","#f50008","#d90009","#a80109","#730005")
my_theme <- theme(panel.background = element_rect(fill = "white",colour = "white"), panel.grid = element_blank(), axis.ticks = element_blank(),
panel.grid.major = element_blank(), panel.grid.minor = element_blank(), axis.text.x = element_blank(), axis.text.y = element_blank(),
plot.title = element_text(hjust = 0.5))
plotOneDay <- function(selecteddate,sixteendays) {
print("Plotting one day")
oneday <- sixteendays[as.Date(sixteendays$day,tz="UTC")==as.Date(selecteddate,tz="UTC"),]
ggsave(
(ggplot(oneday) +
my_theme +
geom_point(aes(lon,lat,colour=pmin(pmax(xco2,390),410))) +
lims(x = c(-180, 180), y = c(-90, 90)) +
scale_colour_gradientn(colours=my_colours, limits=c(390,410)) +
labs(x="lon (deg)", y="lat (deg)", colour="XCO2\n(ppm)\n", title=paste(selecteddate,data_version,"DAILY DATA"))+
coord_map("mollweide")) %>%
draw_world(inc_border=TRUE),
filename = file.path(paste0(data_version,"plots"),paste0(selecteddate,"_data.png")), width=16, height=9, dpi=120)
}
plotSixteenDays <- function(selecteddate,sixteendays) {
print("Plotting 16 days")
ggsave(
(ggplot(sixteendays) +
my_theme +
geom_point(aes(lon,lat,colour=pmin(pmax(xco2,390),410))) +
lims(x = c(-180, 180), y = c(-90, 90)) +
scale_colour_gradientn(colours=my_colours, limits=c(390,410)) +
labs(x="lon (deg)", y="lat (deg)", colour="XCO2\n(ppm)\n", title=paste(selecteddate,data_version,"16-DAY MOVING WINDOW"))+
coord_map("mollweide")) %>%
draw_world(inc_border=TRUE),
filename = file.path(paste0(data_version,"plots"),paste0(selecteddate,"_16days.png")), width=16, height=9, dpi=120)
}
plotPredictions <- function(selecteddate, level3) {
print("Plotting FRK Predictions")
ggsave(
(ggplot(level3) +
my_theme +
geom_tile(aes(lon,lat,fill=pmin(pmax(mu,390),410))) +
lims(x = c(-180, 180), y = c(-90, 90)) +
scale_fill_gradientn(colours=my_colours, limits=c(390,410)) +
labs(x="lon (deg)", y="lat (deg)", fill="pred\n(ppm)\n", title=paste(selecteddate,data_version," FIXED RANK KRIGING (FRK)")) +
coord_map("mollweide")) %>%
draw_world(inc_border=TRUE),
filename = file.path(paste0(data_version,"plots"),paste0(selecteddate,"_prediction.png")), width=16, height=9, dpi=120)
}
plotUncertainty <- function(selecteddate, level3){
print("Plotting FRK Uncertainty")
ggsave(
(ggplot(level3) +
my_theme +
geom_tile(aes(lon,lat,fill=pmin(pmax(sd,0.00),2.00))) +
lims(x = c(-180, 180), y = c(-90, 90)) +
scale_fill_gradient(low="Green",high="Brown", limits=c(0.00,2.00)) +
labs(x="lon (deg)", y="lat (deg)", fill="s.e.\n(ppm)\n", title=paste(selecteddate,data_version," FRK STANDARD ERROR")) +
coord_map("mollweide")) %>%
draw_world(inc_border=TRUE),
filename = file.path(paste0(data_version,"plots"),paste0(selecteddate,"_uncertainty.png")),width=16,height=9,dpi=120)
}
plotAnomaly <- function(selecteddate, level3) {
print("Plotting Anomaly")
mu_mean <- mean(level3$mu)
level3$anomaly <- level3$mu - mu_mean
ggsave(
(ggplot(level3) +
my_theme +
geom_tile(aes(lon,lat,fill=pmin(pmax(anomaly,-5),5))) +
lims(x = c(-180, 180), y = c(-90, 90)) +
scale_fill_gradientn(colours=my_colours, limits=c(-5,5)) +
labs(x="lon (deg)", y="lat (deg)", fill="anomaly\n(ppm)\n", title=paste0(selecteddate,data_version," Anomaly (pred - pred mean ",round(mu_mean,2),"ppm)")) +
coord_map("mollweide")) %>%
draw_world(inc_border=TRUE),
filename = file.path(paste0(data_version,"plots"),paste0(selecteddate,"_anomaly.png")), width=16,height=9,dpi=120)
}
oco2lite <- read.csv(paste0(data_version,'lite.csv'))
oco2lite$day <- as.Date(oco2lite$day, tz="UTC")
if (!dir.exists(paste0(data_version,"plots"))) {
dir.create(paste0(data_version,"plots"))
}
inputfiles <- list.files(path=paste0(data_version,"level3"), pattern="*.csv$", full.names=FALSE, recursive=FALSE)
for (i in 1:length(inputfiles)) {
selecteddate <- as.Date(strsplit(inputfiles[i],"[.]")[[1]][1], tz="UTC")
if ( file.exists(file.path(paste0(data_version,"plots"),paste0(selecteddate,"_anomaly.png"))) ) {
# This date has already been plotted.
next
}
file.create(file.path(paste0(data_version,"plots"),paste0(selecteddate,"_anomaly.png")))
print(selecteddate)
startdate <- as.Date(selecteddate,tz="UTC")-7
enddate <- as.Date(selecteddate,tz="UTC")+8
sixteendays <- oco2lite[oco2lite$day >= startdate & oco2lite$day <= enddate,]
# Create a dummy data frame if there is no data
if (is.null(sixteendays)) {
sixteendays <- data.frame("date"=selecteddate,"lat"=0,"lon"=0,"xco2"=0,"std"=0)
}
plotOneDay(selecteddate,sixteendays)
plotSixteenDays(selecteddate,sixteendays)
if (!file.exists(file.path(paste0(data_version,"level3"),paste0(selecteddate,".csv"))) | file.size(file.path(paste0(data_version,"level3"),paste0(selecteddate,".csv"))) == 0) {
# Input data does not exist for this date, create an empty data frame instead.
level3 <- data.frame("date"=selecteddate,"lat"=0,"lon"=0,"mu"=0,"sd"=0)
} else {
level3 <- read.csv(file.path(paste0(data_version,"level3"),paste0(selecteddate,".csv")))
level3$date <- as.Date(level3$date, tz="UTC")
}
plotPredictions(selecteddate, level3)
plotUncertainty(selecteddate, level3)
plotAnomaly(selecteddate, level3)
}
|
eb6b61755465ba84754f7b6a4bdf963694d8abdf
|
c1c3cda3cd900f8ec3a3c7dfa9d8ab5e5aeaffa7
|
/day11.R
|
1e9cba98e56427e6e7a7f7d57b430393d01d3e2d
|
[] |
no_license
|
sethmcg/advent-2015
|
75c7434e0784822da604396de7f1a98b78170b48
|
9ff5aff58e126317c6de11911f2e1afa25503767
|
refs/heads/master
| 2021-05-31T02:30:57.595405
| 2016-03-19T23:31:12
| 2016-03-19T23:31:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 832
|
r
|
day11.R
|
pw <- "hxbxwxba"
pw <- match(unlist(strsplit(pw,"")),letters)
inc <- function(v){
v[8] <- v[8]+1
while(any(v > 26)){
i <- max(which(v > 26))
v[i-1] <- v[i-1]+1
v[i] <- 1
}
return(v)
}
bad <- c("l","i","o")
doubles <- paste0(letters,letters)
runs <- paste0(letters[-c(25,26)],letters[-c(1,26)],letters[-c(1,2)])
invalid <- function(v){
w <- letters[v]
nv <- length(v)
if(any(bad %in% w)){return(TRUE)}
ww <- paste0(w[-nv],w[-1])
www <- paste0(ww[-nv],w[-c(1,2)])
if(sum(doubles %in% ww) < 2){return(TRUE)}
if(any(runs %in% www)){return(FALSE)}
return(TRUE)
}
while(invalid(pw)){pw <- inc(pw)}
print(paste(letters[pw],collapse=""))
## Part 2
pw <- inc(pw)
while(invalid(pw)){pw <- inc(pw)}
print(paste(letters[pw],collapse=""))
|
1e74229b9595bf7dfb07cd53bc57df0ca9396820
|
dc7c1016493af2179bd6834614be0902a0133754
|
/boxplotex.R
|
62b793ea2b163d50bfeb0d8dc75eb5c50d171d80
|
[] |
no_license
|
ashishjsharda/R
|
5f9dc17fe33e22be9a6031f2688229e436ffc35c
|
fc6f76740a78d85c50eaf6519cec5c0206b2910c
|
refs/heads/master
| 2023-08-08T13:57:05.868593
| 2023-07-30T13:51:56
| 2023-07-30T13:51:56
| 208,248,049
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 137
|
r
|
boxplotex.R
|
png(file="boxplotex.png")
boxplot(mpg~hp,data = mtcars,xlab="Number of Cylinders",ylab="Miles per Gallon",main="Mileage Data")
dev.off()
|
f6e4011b4ebdf53f15c3b754631e5225cf2325c7
|
7b4ec05acf034f52643945b46fb069ab51613af3
|
/tutorial_R/R_rainclouds.R
|
a5b9ffcde938863c3ee7a92ba61e735cdb63fe11
|
[
"MIT"
] |
permissive
|
RainCloudPlots/RainCloudPlots
|
489f91bb4f5c44d012f7ae27b8fa9047dc2c722a
|
4ceeb06dc5bd5b9911e7147d1b0b452c4c2f9b1d
|
refs/heads/master
| 2023-05-29T03:23:31.551781
| 2023-03-27T09:40:01
| 2023-03-27T09:40:01
| 144,041,501
| 732
| 235
|
MIT
| 2023-03-27T09:40:02
| 2018-08-08T16:40:34
|
HTML
|
UTF-8
|
R
| false
| false
| 2,715
|
r
|
R_rainclouds.R
|
### This script creates an R function to generate raincloud plots, then simulates
### data for plots. If using for your own data, you only need lines 1-80.
### It relies largely on code previously written by David Robinson
### (https://gist.github.com/dgrtwo/eb7750e74997891d7c20)
### and the package ggplot2 by Hadley Wickham
# Check if required packages are installed ----
packages <- c("cowplot", "readr", "ggplot2", "dplyr", "lavaan", "Hmisc")
if (length(setdiff(packages, rownames(installed.packages()))) > 0) {
install.packages(setdiff(packages, rownames(installed.packages())))
}
# Load packages ----
library(ggplot2)
# Defining the geom_flat_violin function ----
# Note: the below code modifies the
# existing github page by removing a parenthesis in line 50
"%||%" <- function(a, b) {
if (!is.null(a)) a else b
}
geom_flat_violin <- function(mapping = NULL, data = NULL, stat = "ydensity",
position = "dodge", trim = TRUE, scale = "area",
show.legend = NA, inherit.aes = TRUE, ...) {
layer(
data = data,
mapping = mapping,
stat = stat,
geom = GeomFlatViolin,
position = position,
show.legend = show.legend,
inherit.aes = inherit.aes,
params = list(
trim = trim,
scale = scale,
...
)
)
}
#' @rdname ggplot2-ggproto
#' @format NULL
#' @usage NULL
#' @export
GeomFlatViolin <-
ggproto("GeomFlatViolin", Geom,
setup_data = function(data, params) {
data$width <- data$width %||%
params$width %||% (resolution(data$x, FALSE) * 0.9)
# ymin, ymax, xmin, and xmax define the bounding rectangle for each group
data %>%
group_by(group) %>%
mutate(
ymin = min(y),
ymax = max(y),
xmin = x,
xmax = x + width / 2
)
},
draw_group = function(data, panel_scales, coord) {
# Find the points for the line to go all the way around
data <- transform(data,
xminv = x,
xmaxv = x + violinwidth * (xmax - x)
)
# Make sure it's sorted properly to draw the outline
newdata <- rbind(
plyr::arrange(transform(data, x = xminv), y),
plyr::arrange(transform(data, x = xmaxv), -y)
)
# Close the polygon: set first and last point the same
# Needed for coord_polar and such
newdata <- rbind(newdata, newdata[1, ])
ggplot2:::ggname("geom_flat_violin", GeomPolygon$draw_panel(newdata, panel_scales, coord))
},
draw_key = draw_key_polygon,
default_aes = aes(
weight = 1, colour = "grey20", fill = "white", size = 0.5,
alpha = NA, linetype = "solid"
),
required_aes = c("x", "y")
)
|
9bb2af8fde92517cf0258b78949e5f7763f1fdf3
|
7e1cd4641569868113092e90721b8c88ec58c853
|
/stages2.R
|
17af4261ebc21e1c3aed8f11428d3aa31fe0bc39
|
[
"MIT"
] |
permissive
|
quevedomario/eco3r
|
ef9f38996bb991eaf70b6ef5ee0dfa5d18a8ddea
|
e358a173b5e876869a4379c12db09a8cc77e21fa
|
refs/heads/master
| 2022-05-09T05:04:22.482384
| 2022-04-06T07:28:36
| 2022-04-06T07:28:36
| 173,913,221
| 0
| 0
| null | 2019-05-01T11:11:35
| 2019-03-05T09:11:26
| null |
UTF-8
|
R
| false
| false
| 1,715
|
r
|
stages2.R
|
## ----setup, include=FALSE----------------------------------------------------------------------
knitr::opts_chunk$set(echo = TRUE)
options(warn=-1)
## ----message=FALSE, warning=FALSE--------------------------------------------------------------
library (popbio)
## ----------------------------------------------------------------------------------------------
stages_arisaema <- c("seeds", "size1", "size2", "size3",
"size4", "size5", "size6")
## ----------------------------------------------------------------------------------------------
arisaema <- c(
0.00,0.00,0.00,0.25,0.82,4.51,5.99,
0.30,0.58,0.30,0.06,0.06,0.10,0.06,
0.00,0.20,0.59,0.19,0.02,0.05,0.09,
0.00,0.00,0.08,0.47,0.12,0.05,0.00,
0.00,0.00,0.02,0.23,0.38,0.22,0.09,
0.00,0.00,0.00,0.05,0.40,0.34,0.43,
0.00,0.00,0.00,0.00,0.02,0.25,0.34
)
## ----------------------------------------------------------------------------------------------
arisaema_matrix <- matrix2(arisaema, stages_arisaema)
arisaema_matrix
## ----------------------------------------------------------------------------------------------
lambda(arisaema_matrix)
stable.stage(arisaema_matrix)
## ----------------------------------------------------------------------------------------------
n0_arisaema <- c(29,37,21,4,4,3,2)
arisaema_nt <- pop.projection (arisaema_matrix, n0_arisaema, 25)
## ----------------------------------------------------------------------------------------------
plot(arisaema_nt$pop.sizes, ylim=c(90, 110), xlab = "año", ylab="Nt")
## ----------------------------------------------------------------------------------------------
stage.vector.plot (arisaema_nt$stage.vectors, ylim = c(0, 0.7))
|
8f9b203f1bfcd9724827ae00e12d7310ba4510d5
|
c90ed7da05ae61c51b752b7086a74f4e35053755
|
/R/pwrEWAS.shiny_v1.7.R
|
e5d164ca5214228c454b4fec1a79fba3853b90f3
|
[] |
no_license
|
stefangraw/pwrEWAS
|
9e382c7cd3cf3674cca3666be720be8235256f67
|
945c77d69f5c7e80240f91b5319623efd012ceb0
|
refs/heads/master
| 2021-12-01T23:46:32.454505
| 2019-10-25T12:42:00
| 2019-10-25T12:42:00
| 133,530,231
| 6
| 5
| null | 2021-11-12T14:39:57
| 2018-05-15T14:42:52
|
R
|
UTF-8
|
R
| false
| false
| 15,269
|
r
|
pwrEWAS.shiny_v1.7.R
|
#' @title Shiny pwrEWAS
#'
#' @description pwrEWAS_shiny provides a user-friendly point-and-click interface for pwrEWAS
#'
#' @keywords DNAm microarray power Shiny
#'
#' @return pwrEWAS_shiny initializes pwrEWAS's user-interface
#'
#' @export
#'
#' @examples
#'
#' if(interactive()) {
#' pwrEWAS_shiny()
#' }
pwrEWAS_shiny <- function(){
# library(shiny)
# library(shinyBS)
# library(ggplot2)
# library(parallel)
# user input / default values
input2 <- NULL
input2$Nmin <- 10
input2$Nmax <- 50
input2$NCntPer <- 0.5
input2$Nsteps <- 10
input2$J <- 100000 # simulated CPGs
input2$targetDmCpGs <- 100
input2$targetDeltaString <- "0.2, 0.5"
input2$tauString <- "0.01, 0.03"
input2$targetDelta <- as.numeric(unlist(strsplit(input2$targetDeltaString,",")))
input2$method <- "limma"
input2$detectionLimit <- 0.01
input2$FDRcritVal <- 0.05
input2$cores <- round(parallel::detectCores(all.tests = FALSE, logical = TRUE)/2)
input2$sim <- 50
input2$tissueType <- "Saliva"
# input <- input2
#############################################################
server <- function(input,output){
shiny::observeEvent(input$goButton, {
# reset plots
output$powerPlot <- NULL
output$meanPower <- NULL
output$probTP <- NULL
output$deltaDensity <- NULL
output$log <- NULL
shiny::withProgress(message = 'Program running. Please wait.', detail = "This can take several minutes. Progress will be displayed in R console.", value = NULL, {
runTimeStart <- Sys.time()
if(input$switchTargetDmSd == 1){
out <- pwrEWAS(minTotSampleSize = input$Nmin,
maxTotSampleSize = input$Nmax,
SampleSizeSteps = input$Nsteps,
NcntPer = input$NCntPer,
targetDelta = as.numeric(unlist(strsplit(input$targetDeltaString,","))),
J = input$J,
targetDmCpGs = input$targetDmCpGs,
tissueType = input$tissueType,
detectionLimit = input$detectionLimit,
DMmethod = input$method,
FDRcritVal = input$FDRcritVal,
core = input$cores,
sims = input$sim)
} else if(input$switchTargetDmSd == 2){
out <- pwrEWAS(minTotSampleSize = input$Nmin,
maxTotSampleSize = input$Nmax,
SampleSizeSteps = input$Nsteps,
NcntPer = input$NCntPer,
deltaSD = as.numeric(unlist(strsplit(input$tauString,","))),
J = input$J,
targetDmCpGs = input$targetDmCpGs,
tissueType = input$tissueType,
detectionLimit = input$detectionLimit,
DMmethod = input$method,
FDRcritVal = input$FDRcritVal,
core = input$cores,
sims = input$sim)
}
output$powerPlot <- shiny::renderPlot({isolate(pwrEWAS_powerPlot(out$powerArray, sd = ifelse(input$switchTargetDmSd == 1, FALSE, TRUE)))})
# mean power table
meanPowerTable <- cbind(rownames(out$meanPower), round(out$meanPower, 2))
if(input$switchTargetDmSd == 1){
colnames(meanPowerTable)[1] <- shiny::HTML("N</sub> \\ Δ<sub>β")
} else if(input$switchTargetDmSd == 2){
colnames(meanPowerTable)[1] <- shiny::HTML("N</sub> \\ SD(Δ<sub>β)")
}
positionToAddTitle <- ceiling(dim(meanPowerTable)[2]/2)
colnames(meanPowerTable)[positionToAddTitle] <- paste0(shiny::HTML("Power<br/>"), colnames(meanPowerTable)[positionToAddTitle])
output$meanPower <- shiny::renderTable({meanPowerTable}, sanitize.text.function = function(x) x)
# delta density plot
output$deltaDensity <- shiny::renderPlot({isolate(pwrEWAS_deltaDensity(out$deltaArray, input$detectionLimit, sd = ifelse(input$switchTargetDmSd == 1, FALSE, TRUE)))})
# probability of detecting at least one TP
probTPTable <- cbind(rownames(out$metric$probTP), round(out$metric$probTP, 2))
if(input$switchTargetDmSd == 1){
colnames(probTPTable)[1] <- shiny::HTML("N</sub> \\ Δ<sub>β")
} else if(input$switchTargetDmSd == 2){
colnames(probTPTable)[1] <- shiny::HTML("N</sub> \\ SD(Δ<sub>β)")
}
colnames(probTPTable)[positionToAddTitle] <- paste0(shiny::HTML("P(#TP≥1) <br/>"), colnames(probTPTable)[positionToAddTitle])
output$probTP <- shiny::renderTable({probTPTable}, sanitize.text.function = function(x) x)
# run time
runTimeStop <- difftime(Sys.time(), runTimeStart, units = "auto")
# log
logString <- paste0(
"Tissue type = ", input$tissueType, "\n",
"Minimum total sample size = ", input$Nmin, "\n",
"Maximum total sample size = ", input$Nmax, "\n",
"Sample size increments = ", input$Nsteps, "\n",
"Percentage samples in group 1 = ", input$NCntPer, "\n",
"Number of CpGs to be tested = ", input$J, "\n",
"Target number of DM CpGs = ", input$targetDmCpGs, "\n",
if(input$switchTargetDmSd == 1){
paste0("'Target max Delta' was selected \n",
"Target maximal difference in DNAm (comma delimited) = ", input$targetDeltaString)
} else if(input$switchTargetDmSd == 2){
paste0("'SD(Δ)Delta)' was selected \n",
"Std. dev. of difference in DNAm (comma delimited) = ", input$tauString)}, "\n",
"Target FDR = ", input$FDRcritVal, "\n",
"Detection Limit = ", input$detectionLimit, "\n",
"Method for DM analysis = ", input$method, "\n",
"Number of simulated data sets = ", input$sim, "\n",
"Threads = ", input$cores, "\n",
"Run time = ", round(runTimeStop,1), " ", attr(runTimeStop, "units"))
output$log <- renderText({HTML(logString)})
}) # processbar done
})
}
ui <- shiny::fluidPage(
shiny::tags$head(shiny::tags$style(shiny::HTML(".shiny-notification {
height: 150px;
width: 400px;
position:fixed;
font-size: 200%;
top: calc(50% - 35px);;
left: calc(50% - 100px);;}"))),
shiny::tags$style(type='text/css', '#log {text-align: left;}'),
shiny::titlePanel("pwrEWAS"),
shiny::HTML("pwrEWAS is a computationally efficient tool to estimate power in EWAS as a function of sample and effect size
for two-group comparisons of DNAm (e.g., case vs control, exposed vs non-exposed, etc.). Detailed description
of in-/outputs, instructions and an example, as well as interpretations of the example results are provided in
the following vignette: "),
shiny::tags$a(href="https://bioconductor.org/packages/devel/bioc/vignettes/pwrEWAS/inst/doc/pwrEWAS.pdf", "pwrEWAS vignette"),
shiny::HTML("</br></br>Authors: Stefan Graw, Devin Koestler </br>"),
shiny::HTML("Department of Biostatistics, University of Kansas School of Medicine"),
shiny::sidebarLayout(
shiny::sidebarPanel(
### Inputs
shinyBS::popify(shiny::selectInput(inputId = "tissueType", label = "Tissue Type", choices = c("Adult (PBMC)",
"Saliva",
"Sperm",
"Lymphoma",
"Placenta",
"Liver",
"Colon",
"Blood adult",
"Blood 5 year olds",
"Blood newborns",
"Cord-blood (whole blood)",
"Cord-blood (PBMC)")),
'Heterogeneity of different tissue types can have effects on the results. Please select your tissue type of interest or one you believe is the closest.', placement = "top"),
shinyBS::popify(shiny::numericInput(inputId = "Nmin", label = "Minimum total sample size", value = input2$Nmin, min = 4, step = 1),
'Lowest total sample sizes to be considered.'),
shinyBS::popify(shiny::numericInput(inputId = "Nmax", label = "Maximum total sample size", value = input2$Nmax, min = 4, step = 1),
'Highest total sample sizes to be considered.'),
shinyBS::popify(shiny::numericInput(inputId = "Nsteps", label = "Sample size increments", value = input2$Nsteps, min = 1, step = 1),
'Steps with which total sample size increases from "Minimum total sample size" to "Maximum total sample size".'),
shinyBS::popify(shiny::numericInput(inputId = "NCntPer", label = "Samples rate for group 1", value = input2$NCntPer, min = 0, max = 1, step = 0.1),
'Rate by which the total sample size is split into groups (0.5 corresponds to a balanced study; rate for group 2 is equal to 1 rate of group 1)'),
shinyBS::popify(shiny::numericInput(inputId = "J", label = "Number of CpGs tested", value = input2$J, min = 1, step = 10000),
'Number of CpG site that will simulated and tested (increasing Number of CpGs tested will require increasing RAM (memory)).'),
shinyBS::popify(shiny::numericInput(inputId = "targetDmCpGs", label = "Target number of DM CpGs", value = input2$targetDmCpGs, min = 1, step = 10),
'Target number of CpGs simulated with meaningful differences (differences greater than detection limit)'),
shinyBS::popify(shinyWidgets::radioGroupButtons(inputId = "switchTargetDmSd",choiceValues = c(1,2), justified = TRUE, choiceNames = c(shiny::HTML("Target max Δ"), shiny::HTML("SD(Δ)"))),
shiny::HTML('The expected simulated differences in methylation can be control by "Target max Δ" or "SD(Δ)". For "Target max Δ" standard deviations of the simulated differences is automatically determined such that the 99%til of the simulated differences are within a range around the provided values. If "SD(Δ)" is chosen, differences in methylation will be simulated using provided standard deviation.')),
shiny::conditionalPanel(
condition = "input.switchTargetDmSd == 1",
shinyBS::popify(shiny::textInput(inputId = "targetDeltaString", label = "Target maximal difference in DNAm (comma delimited)", value = input2$targetDeltaString),
'Standard deviations of the simulated differences is automatically determined such that the 99%til of the simulated differences are within a range around the provided values.')
),
shiny::conditionalPanel(
condition = "input.switchTargetDmSd == 2",
shinyBS::popify(shiny::textInput(inputId = "tauString", label = "Std. dev. of difference in DNAm (comma delimited)", value = input2$tauString),
'Differnces in methylation will be simulated using provided standard deviation.')
),
shinyBS::popify(shiny::numericInput(inputId = "FDRcritVal", label = "Target FDR", value = input2$FDRcritVal, min = 0, max = 1, step = 0.01),
'Critical value to control the False Discovery Rate (FDR) using the Benjamini and Hochberg method.'),
shiny::checkboxInput(inputId = "advancedSettings", label = "Advanced settings"),
shiny::conditionalPanel(
condition = "input.advancedSettings == 1",
shinyBS::popify(shiny::numericInput(inputId = "detectionLimit", label = "Detection Limit", value = input2$detectionLimit, min = 0, max = 1, step = 0.01),
'Limit to detect changes in methylation. Simulated differences below the detection limit will not be consider as meaningful differentially methylated CpGs.'),
shinyBS::popify(shiny::selectInput(inputId = "method", label = "Method for DM analysis", choices = c("limma", "t-test (unequal var)", "t-test (equal var)", "Wilcox rank sum", "CPGassoc")),
'Method used to perform differential methylation analysis.', placement = "top"),
shinyBS::popify(shiny::numericInput(inputId = "sim", label = "Number of simulated data sets", value = input2$sim, min = 1, step = 10),
'Number of repeated simulation/simulated data sets under the same conditions for consistent results.'),
shinyBS::popify(shiny::numericInput(inputId = "cores", label = "Threads", value = input2$cores, min = 1, max = parallel::detectCores(all.tests = FALSE, logical = TRUE)-1, step = 1),
'Number of cores used to run multiple threads. Ideally, the number of different total samples sizes multiplied by the number of effect sizes should be a multiple (m) of the number of cores (#sampleSizes * #effectSizes = m * #threads). An increasing number of threads will require an increasing amount of RAM (memory).', placement = "top")
),
# submitButton(text = "Simulate"),
shiny::actionButton(inputId = "goButton", label = "Go!", width = '100%', style='font-size:150%')
),
### Outputs
shiny::mainPanel(
shiny::fluidRow(
shiny::column(12, align="center",
shiny::plotOutput("powerPlot"),
shiny::br(),shiny::br(),shiny::br(),
shiny::splitLayout(cellWidths = c("50%", "50%"),
shiny::tableOutput(outputId = "meanPower"),
shiny::tableOutput(outputId = "probTP")),
shiny::plotOutput("deltaDensity"),
shiny::verbatimTextOutput(outputId = "log")
)
)
)
)
)
shiny::shinyApp(ui = ui, server = server)
}
# pwrEWAS_shiny()
|
3efcf8fefa4836b04ee4324d35d467f4c0db1afe
|
df6638f57ede542680cfd4634effcc1e205ae07f
|
/DAY 0.R
|
7c83cb7b088d99277a12eda6e90e69ca5160f953
|
[] |
no_license
|
BioinformaticsDeepLearning/Learn_R_and_Use_R
|
1d4804b90e262ffffa9a656a3beec5ea2e206b61
|
8d987ee4afbb2e415ee8cd6732a6369d71413bf8
|
refs/heads/master
| 2022-12-04T19:20:39.877479
| 2020-08-11T08:04:48
| 2020-08-11T08:04:48
| 273,900,570
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,627
|
r
|
DAY 0.R
|
#What is R programming?
R is an integrated suite of software facilities for data manipulation, calculation and graphical display. It includes
a) an effective data handling and storage facility,
b) a suite of operators for calculations on arrays, in particular matrices,
c) a large, coherent, integrated collection of intermediate tools for data analysis,
d) graphical facilities for data analysis and display either on-screen or on hardcopy, and
e) a well-developed, simple and effective programming language which includes conditionals, loops, user-defined recursive functions and input and output facilities.
#Installation of R and R studio on Windows 10#
Installing R on Windows 10 is very straightforward. The easiest way is to install it through CRAN (https://cran.r-project.org/), which stands for The Comprehensive R Archive Network. Once the download is finished, you will obtain a file named "R-3.6.3-win.exe" or similar depending on the version of R that you download. The links shown in the video above will take you to the most recent version. To finish installing R on your computer, all that is left to do is to run the .exe file. Most of the time, you will likely want to go with the defaults, so click the button 'Next' until the process is complete, as shown in the video below. Note that, even though I do not do so, you can add desktop or quick start shortcuts during the process.
#Installation of Rstudio#
Once R is installed, you can proceed to install the RStudio IDE to have a much-improved environment to work in your R scripts. It includes a console that supports direct code execution and tools for plotting and keeping track of your variables in the workspace, among other features. The installation process is very straightforward, as well. Simply go to the RStudio from this given link (https://rstudio.com/products/rstudio/download/#down. Once the download is complete, you will get a file named "RStudio-1.2.5033.exe" or similar. Again this will be dependent on the version. To complete the installation, it is as easy as before. Just run the previously mentioned .exe file with the default settings by clicking 'Next', and wait until the installation finishes. Bear in mind that RStudio requires that R is installed beforehand.
#Installation of R an R studio on Mac Os#
Installing R on Mac OS is similar to Windows. Once again, The easiest way is to install it through CRAN by going to the CRAN downloads page (https://cran.r-project.org/).
#Installation of Rstudio#
This process is essentially the same as in Windows. To download RStudio, go to the RStudio downloads page and get the .dmg for Mac OS (https://rstudio.com/products/rstudio/download/#download)
#Installation of R and Rstudio on Linux#
Installing R on Ubuntu maybe a little bit more tricky for those unused to working in the command line. However, it is perhaps just as easy as with Windows or Mac OS. Before you start, make sure to have root access in order to use sudo.
As it is common, prior to installing R, let us update the system package index and upgrade all our installed packages using the following two commands:
sudo apt update
sudo apt -y upgrade
After that, all that you have to do is run the following in the command line to install base R.
sudo apt -y install r-base
#Installation of Rstudio#
Once base R is installed, you can go ahead and install RStudio. For that we are going to head over again to the RStudio downloads page (https://rstudio.com/products/rstudio/download/#download) and download the .deb file for our Ubuntu version
Hurray ! R platform is ready to use
Play with data................
|
d2694ecc9631ac5b424b5cd181c7bd6e799e0a34
|
ccd2ba51797d860fe7e7b69ff1d14301d74493a1
|
/examples.r
|
ebe8c0dc8b3181d4365f2e0177f1e7a9a71bb800
|
[] |
no_license
|
moone009/wec-preprocess
|
841959f2cac105e115a97fd15677a802d0e52608
|
cdcd5a8e22dd0c0c14faa79bdc329516048007be
|
refs/heads/master
| 2021-01-21T04:54:00.900386
| 2016-07-25T15:51:13
| 2016-07-25T15:51:13
| 54,666,609
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,398
|
r
|
examples.r
|
##_____________________________________________________________________________________________________________________________
# setup test data
mtcars$carb = as.factor(mtcars$carb)
mtcars$am = as.character(mtcars$am)
##_____________________________________________________________________________________________________________________________
# Execute function
df = DummyCode(mtcars,c('carb','am'))
library(lubridate)
##_____________________________________________________________________________________________________________________________
# Sample Data
data=as.data.frame(list(ID=1:55,
variable=rnorm(55,50,15)))
#This function will generate a uniform sample of dates from
#within a designated start and end date:
rand.date=function(start.day,end.day,data){
size=dim(data)[1]
days=seq.Date(as.Date(start.day),as.Date(end.day),by="day")
pick.day=runif(size,1,length(days))
date=days[pick.day]
}
#This will create a new column within your data frame called date:
data$date=rand.date("2013-01-01","2014-02-28",data)
##_____________________________________________________________________________________________________________________________
# Sample Data
data <- date_engineer(data,'date',F)
##_____________________________________________________________________________________________________________________________
# Kfold
mtcars$folds = 5
mtcars <- kfold(mtcars,3)
rm(mtcars)
mtcars <- kfold(mtcars,3)
##_____________________________________________________________________________________________________________________________
# Static Variables
mtcars$Id <- 1
mtcars$Idd <- 1
mtcars <- Static_Missing_Vars(mtcars)
##_____________________________________________________________________________________________________________________________
# parallel process
p.func <- function(x){
if(x > 2){"big"
}else if(x == 1){"Thats random"
}else{"Hello"}
}
m.func <- function(x,y){
if(x > 20 & y > 200){"big"
}else if(x < 20 & y > 188){"Thats random"
}else{"Hello"}
}
df <- data.frame(id = rnorm(10000))
df <- parallelApply(df,1,p.func,1)
table(df$ParRow)
df <- data.frame(id = rnorm(10000,mean = 18,sd = 10),x =rnorm(10000), y = rnorm(10000,mean = 200,sd = 60))
df <- parallelApply(df,c(1,3),m.func,2)
table(df$ParRow)
##_____________________________________________________________________________________________________________________________
# pre process
data <- preprocess(mtcars,'vs',c(1:7),T,F)
head(data)
data <- preprocess(mtcars,'vs',c(1:7),F,F)
head(data)
data <- df_stats(mtcars)
data
data <- df_stats(iris)
data
##_____________________________________________________________________________________________________________________________
# changeclass
df = data.frame(point1 = rnorm(1000,1,0),point2 = rnorm(1000,1,100),point3 = rnorm(1000,1,100),point4 = rnorm(1000,1,100))
df$point1 = as.character(df$point1)
df$point2 = as.numeric(df$point2)
df$point3 = as.factor(df$point3)
df$point4 = as.character(df$point4)
str(df)
head(df)
df <- changeclass(df)
head(df)
str(df)
##_____________________________________________________________________________________________________________________________
#
data <- cbind(prodNA(iris[c(1,2,3,4)], noNA = 0.1),iris[,c(5)])
colnames(data)[5] <- 'Species'
target = 'Species'
columns = c(1:4)
df <- preprocess(data,target,columns,F)
|
50d7dc354eaecd476f66547b6b424c3290eb35f5
|
7b8da296768b3586d71ff5b9808d36fed89c98fc
|
/plotOutlierClines.R
|
ac8b44984e661490ff14b4ed95d38f81ec6c8a10
|
[] |
no_license
|
raonyguimaraes/exomeAnalysis
|
6e7e9a19dc46979f2f03e1594bba7b2e574ee4bb
|
b21deb76e8425fa009d137eb62dbbd08bbdf2264
|
refs/heads/master
| 2020-04-14T14:53:47.675052
| 2013-04-11T16:57:10
| 2013-04-11T16:57:10
| 10,248,781
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,312
|
r
|
plotOutlierClines.R
|
clines <- 5
fit <- read.table("/Users/singhal/thesisWork/introgression/clineAndSummaryStats.out",header=T)
fit$outtype <- factor(fit$outtype, levels = c(levels(fit$outtype), "sweep"))
indices <- which(fit$type == "widerange")
fit$outtype = replace(fit$outtype,indices,"sweep")
contact <- "gillies"
fit <- fit[complete.cases(fit$outtype),]
fit <- fit[fit$contact==contact,]
types <- c("sweep","narrow","normal","wide")
colors <- c("#00BFC4","#F8766D","gray","#C77CFF")
mean_center = mean(fit$center,na.rm=T)
x <- seq(0,10e3,by=50)
plot(NULL,xlim=range(x),ylim=c(-0.1,1.1),xaxt='n',xlab="distance (m)",ylab="allele freq.")
axis(1,at=c(1000,3000,5000,7000,9000),labels=c(-4000,-2000,0,2000,4000))
for (i in 2:length(types)) {
tmp <- fit[fit$outtype==types[i],]
count = 0
while (count < clines) {
n = round(runif(1,min=0,max=dim(tmp)[1]))
a <- tmp[n,]
center <- 5000 #this is so all clines have the same center
width <- a$width
xfit <- ((1+tanh(2*(x-center)/width))/2)
lines(x,xfit,col=colors[i])
count = count + 1
}
}
dist <- read.table("/Users/singhal/thesisWork/introgression/distances/distances",header=T)
pops <- c('10kN','10kS','1kN','1kS','2kN','2kS','ancN','ancS','center','nTail','sTail')
for (i in 1:1) {
tmp <- fit[fit$outtype==types[i],]
count = 0
af_file <- paste("/Users/singhal/thesisWork/introgression/clineAF/", contact, ".cline.out",sep="")
af <- read.table(af_file,header=F,stringsAsFactors=F)
names(af) <- c("locus","pos","pop","af")
#add distance
dist_contact = dist[dist$contact==contact,]
af <- data.frame(af,rep(NA,dim(af)[1]))
names(af)[5] <- c("dist")
for (p in 1:length(pops)) {
af[af$pop == pops[p],]$dist = dist_contact[dist_contact$library==pops[p],]$distance
}
while (count < clines) {
n = round(runif(1,min=0,max=dim(tmp)[1]))
a <- tmp[n,]
contig <- a$contig
pos <- a$pos
tmp_af <- af[af$locus==contig & af$pos == pos,]
line <- lm(tmp_af[2:10,]$af~tmp_af[2:10,]$dist)
b <- line$coefficients[1]
m <- line$coefficients[2]
xstart <- min(x)
xend <- max(x)
ystart <- m * xstart + b
yend <- m * xend + b
if (ystart < 0.3 | ystart > 0.7) {
if (yend > 0.7 | yend < 0.3) {
segments(xstart,ystart,x1=xend,y1=yend,col=colors[i])
count = count + 1
}
}
}
}
|
bb8a1ddd0e53a0531f00ce447d39b735e1221345
|
b033ba5c86bbccca8f33a17a91d7d8ba1fc41976
|
/R/regionList.R
|
2d3cea5de51c4f9392c0e62ab23c7d9708791ea1
|
[] |
no_license
|
neuroconductor/brainKCCA
|
889419ba83967592cc5f70cddaf8a23d4abbe27f
|
e8e08788b4ec395cfe5ba670d13332e03a35814f
|
refs/heads/master
| 2021-07-19T05:44:31.800018
| 2021-05-17T13:38:42
| 2021-05-17T13:38:44
| 126,418,981
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,209
|
r
|
regionList.R
|
#read nii region file provided by user and transform it into RData.
#oh<-regionList("AAL_MNI_2mm.nii","RegionList.txt")
#example<-regionList("AAL_MNI_2mm.nii", "RegionList.txt")
regionList<-function(regionData, regionCode, resolution="2mm"){
cat("reading and manipulating the regionData...", "\n")
largeMatrix<-oro.nifti::readNIfTI(regionData)
longVector<-expand.grid(largeMatrix)
#Same as below
#longVector<-0
#for(i in 1:prod(dim(largeMatrix))) longVector[i] = largeMatrix[[i]]
cat("reading and manipulating the regionCode...", "\n")
regionCode<-read.table(regionCode)
if(dim(regionCode)[2]!=3) stop("Region list can only have 3 columns.")
center2mm = c(46,64,37)
if(resolution=="2mm") coords2mm = expand.grid(-2*(1:91-center2mm[1]),2*(1:109-center2mm[2]),2*(1:91-center2mm[3]))
if(resolution=="3mm") coords2mm = expand.grid(-3*(1:91-center2mm[1]),3*(1:109-center2mm[2]),3*(1:91-center2mm[3]))
temp<-NULL
for(i in 1:dim(regionCode)[1])
temp<- rbind(temp,t(as.matrix(colMeans(coords2mm[which(largeMatrix==regionCode[i,2],arr.ind = F),]))))
regionCode<-cbind(temp, regionCode)
return(list(longVector, regionCode))
}
|
f077bf0f2b4da19e022c93b8c03ce9954916a97d
|
b67bef2e6295b68a6ba404e78505258a1ac2f95f
|
/man/gdirmn.Rd
|
93c3dbf89d00a8abd1ebabf6c2c799fd88dc8e3f
|
[] |
no_license
|
cran/MGLM
|
beda91fe76a43884434647620d2bf4aebedc1a59
|
e0b8d5d6dec9b3b0dcc74514b0b68438276513d4
|
refs/heads/master
| 2022-05-01T07:22:15.450258
| 2022-04-13T22:32:32
| 2022-04-13T22:32:32
| 17,680,602
| 3
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 4,411
|
rd
|
gdirmn.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/MGLMgen.R, R/pdfln.R
\name{rgdirmn}
\alias{rgdirmn}
\alias{gdirmn}
\alias{dgdirmn}
\title{The Generalized Dirichlet Multinomial Distribution}
\usage{
rgdirmn(n, size, alpha, beta)
dgdirmn(Y, alpha, beta)
}
\arguments{
\item{n}{the number of random vectors to generate. When \code{size} is a scalar and \code{alpha} is a vector,
must specify \code{n}. When \code{size} is a vector and \code{alpha} is a matrix, \code{n} is optional.
The default value of \code{n} is the length of \code{size}. If given, \code{n} should be equal to
the length of \code{size}.}
\item{size}{a number or vector specifying the total number of objects that are put
into d categories in the generalized Dirichlet multinomial distribution.}
\item{alpha}{the parameter of the generalized Dirichlet multinomial distribution.
\code{alpha} is a numerical positive vector or matrix.
For \code{gdirmn}, \code{alpha} should match the size of \code{Y}. If \code{alpha}
is a vector, it will be replicated \eqn{n} times to match the dimension of \code{Y}.
For \code{rdirmn}, if \code{alpha} is a vector, \code{size} must be a scalar. All the random vectors will
be drawn from the same \code{alpha} and \code{size}. If \code{alpha} is a matrix, the
number of rows should match the length of \code{size}. Each random vector
will be drawn from the corresponding row of \code{alpha} and the corresponding element of \code{size}.}
\item{beta}{the parameter of the generalized Dirichlet multinomial distribution. \code{beta} should
have the same dimension as \code{alpha}.
For \code{rdirm}, if \code{beta} is a vector, \code{size} must be a scalar. All the random samples will
be drawn from the same \code{beta} and \code{size}. If \code{beta} is a matrix, the
number of rows should match the length of \code{size}. Each random vector
will be drawn from the corresponding row of \code{beta} and the corresponding element of \code{size}.}
\item{Y}{the multivariate count matrix with dimensions \eqn{n \times d}{nxd}, where
\eqn{n = 1,2, \ldots} is the number of observations and \eqn{d=3,4,\ldots} is the number of categories.}
}
\value{
\code{dgdirmn} returns the value of
\eqn{\log(P(y|\alpha, \beta))}{logP(y|\alpha, \beta)}.
When \code{Y} is a matrix of \eqn{n} rows, the function \code{dgdirmn} returns a vector of length \eqn{n}.
\code{rgdirmn} returns a \eqn{n\times d}{nxd} matrix of the generated random observations.
}
\description{
\code{rgdirmn} generates random observations from the generalized Dirichlet multinomial distribution.
\code{dgdirmn} computes the log of the generalized Dirichlet multinomial probability mass function.
}
\details{
\eqn{Y=(y_1, \ldots, y_d)} are the \eqn{d} category count vectors. Given the parameter vector \eqn{\alpha = (\alpha_1, \ldots, \alpha_{d-1}),
\alpha_j>0}, and \eqn{\beta=(\beta_1, \ldots, \beta_{d-1}), \beta_j>0},
the generalized Dirichlet multinomial probability mass function is
\deqn{
P(y|\alpha,\beta)
=C_{y_1, \ldots, y_d}^{m} \prod_{j=1}^{d-1}
\frac{\Gamma(\alpha_j+y_j)}{\Gamma(\alpha_j)}
\frac{\Gamma(\beta_j+z_{j+1})}{\Gamma(\beta_j)}
\frac{\Gamma(\alpha_j+\beta_j)}{\Gamma(\alpha_j+\beta_j+z_j)} ,
}{
P(y|\alpha,\beta)
=C_{y_1, \ldots, y_d}^{m} prod_{j=1}^{d-1} {Gamma(\alpha_j+y_j)Gamma(\beta_j+z_{j+1})Gamma(\alpha_j+\beta_j)} / {Gamma(\alpha_j)Gamma(\beta_j)Gamma(\alpha_j+\beta_j+z_j)},
}
where \eqn{z_j = \sum_{k=j}^d y_k}{z_j = sum_{k=j}^d y_k} and \eqn{m = \sum_{j=1}^d y_j}{m = sum_{j=1}^d y_j}.
Here, \eqn{C_k^n}, often read as "\eqn{n} choose \eqn{k}",
refers the number of \eqn{k} combinations from a set of \eqn{n} elements.
The \eqn{\alpha} and \eqn{\beta} parameters can be vectors, like the results from the
distribution
fitting function, or they can be matrices with \eqn{n} rows,
like the estimate
from the regression function multiplied by the covariate matrix
\eqn{exp(X\alpha)} and \eqn{exp(X\beta)}
}
\examples{
# example 1
m <- 20
alpha <- c(0.2, 0.5)
beta <- c(0.7, 0.4)
Y <- rgdirmn(10, m, alpha, beta)
dgdirmn(Y, alpha, beta)
# example 2
set.seed(100)
alpha <- matrix(abs(rnorm(40)), 10, 4)
beta <- matrix(abs(rnorm(40)), 10, 4)
size <- rbinom(10, 10, 0.5)
GDM.rdm <- rgdirmn(size=size, alpha=alpha, beta=beta)
GDM.rdm1 <- rgdirmn(n=20, size=10, alpha=abs(rnorm(4)), beta=abs(rnorm(4)))
}
\keyword{distribution}
\keyword{models}
|
95c7a0f3f8d11421c6148731d97fe861fe29104c
|
5aaa165524c0f6a1cb3c91906275e04fd40bbe85
|
/JE_Visualize/Supporting_Functions/Reduce_size_burdenmap.R
|
a4a59adf9b6b38e76f468abe65b1442017cbf47a
|
[] |
no_license
|
m2man/SIYOUCRU
|
8ad8c6250b63673360314243d8991a463615495c
|
6b1db34c0cb7476aab4f85707ecb2bb53cf1c7a8
|
refs/heads/master
| 2020-07-05T00:34:59.076099
| 2019-08-30T09:50:07
| 2019-08-30T09:50:07
| 202,470,701
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,235
|
r
|
Reduce_size_burdenmap.R
|
# --- NOTE ---
# Use this script to generate Burden_Cases_Map.Rds (which will be used in Tab 4 Shiny)
# Also use this script to generate Incidence Rate or Deaths (instead of Cases) to be used in Tab 4 Shiny
# ---------- #
# Get directory of the script (this part only work if source the code, wont work if run directly in the console)
# This can be set manually !!!
script.dir <- dirname(sys.frame(1)$ofile)
script.dir <- paste0(script.dir, '/')
setwd(script.dir)
# Create folder to store the result (will show warnings if the folder already exists --> but just warning, no problem)
dir.create(file.path('Generate'), showWarnings = TRUE)
Savepath <- 'Generate/'
DataPath.Map <- 'Data/'
FileName.Map <- 'burden_map.rds'
Map <- readRDS(paste0(DataPath.Map, FileName.Map))
Map@data$dist <- NULL
Map@data$admin_level <- NULL
Map@data$value <- NULL
Map@data$value.vaccine <- 0
Map@data$value.unvaccine <- 0
# Exclude these regions: HKG - MAC - SaLa.MYS - Pen.MYS - Sara.MYS
Map <- Map[c(-7, -18, -20, -21, -22),]
# saveRDS(Map, 'burden_map_cutoff.Rds')
# Map@data$Subnation <- NULL
# Map@data$FOI <- c(1.7, 6.2, 6.2, 6.2, 11.1, 17.8, 26.5, 26.5, 1.7, 14.4, 14.1, 0.1,
# 8.7, 4.1, 7.3, 4.0, 7.3, 7.7, 9.0, 8.4, 1.7, 16.5, 26.5, 7.7, 1.7, 1.7, 7.7, 26.5, 6.1, 17.8)/100
# Map@data$id = rownames(Map@data)
# Map.points = fortify(Map, region="id")
# Map.df = join(Map.points, Map@data, by="id")
# ggplot(Map.df) + aes(long, lat, group = group, fill = FOI) + geom_polygon()
# Map.df.w <- Map.df[, c(1, 2, 12)]
# rasterdf <- rasterFromXYZ(Map.df.w)
# writeOGR(obj=Map, dsn = '~/DuyNguyen/', layer="Map", driver="ESRI Shapefile")
library(shiny)
library(shinycssloaders)
library(shinyjs)
library(ggplot2)
library(plotly)
library(RColorBrewer)
library(DT)
library(leaflet)
library(sp)
library(rgdal)
library(rgeos)
library(data.table)
library(plyr)
# ----- LIBRARY FUNCTION -----
Create_Region_Burden_All <- function(cv, cuv, dv, duv, pv, puv, region, agegroup, listtime){
if (region[1] != 'World')
idx.region <- which(names(cv) %in% region)
else
idx.region <- c(1 : length(cv))
list.dt.integrate <- list(cv, cuv, dv, duv)
list.dt.integrate.result <- lapply(list.dt.integrate, function(x){Reduce('+', x[idx.region])})
rm(list.dt.integrate)
list.burden.result <- lapply(list.dt.integrate.result,
function(x){
# All age group
if (agegroup == 1){
x <- x[, seq(1, ncol(x), 2)] + x[, seq(2, ncol(x), 2)]
}else{
# Children
if (agegroup == 2){
x <- x[, seq(1, ncol(x), 2)]
}else{ # Adult
x <- x[, seq(2, ncol(x), 2)]
}
}
x <- data.frame(x)
colnames(x) <- listtime
x <- melt(x)
colnames(x) <- c('Year', 'Burden_Value')
return(x)
})
rm(list.dt.integrate.result)
# ----- Find Difference Cases and Deaths in Vacc and Unvacc -----
list.burden.result[[5]] <- list.burden.result[[2]] - list.burden.result[[1]] # Diff in Cases of Unvacc - Vacc
list.burden.result[[5]]$Year <- list.burden.result[[1]]$Year
list.burden.result[[6]] <- list.burden.result[[4]] - list.burden.result[[3]] # Diff in Deaths of Unvacc - Vacc
list.burden.result[[6]]$Year <- list.burden.result[[1]]$Year
# ----- Find IR -----
if (region[1] != 'World'){
df.vaccine.region <- pv[which(pv$region %in% region), -ncol(pv)]
df.unvaccine.region <- puv[which(puv$region %in% region), -ncol(puv)]
}else{
df.vaccine.region <- pv[ , -ncol(pv)]
df.unvaccine.region <- puv[ , -ncol(puv)]
}
vec.vaccine.region.total <- 0
vec.unvaccine.region.total <- 0
if (agegroup == 1){
vec.vaccine.region.total <- as.numeric(colSums(df.vaccine.region))
vec.unvaccine.region.total <- as.numeric(colSums(df.unvaccine.region))
}
i <- c(1 : length(region))
if (agegroup == 2){
vec.vaccine.region.total <- as.numeric(colSums(df.vaccine.region[rep((i - 1)*100, each = 15) + 1 : 15, ]))
vec.unvaccine.region.total <- as.numeric(colSums(df.unvaccine.region[rep((i - 1)*100, each = 15) + 1 : 15, ]))
}
if (agegroup == 3){
vec.vaccine.region.total <- as.numeric(colSums(df.vaccine.region[rep((i - 1)*100, each = 75) + 16 : 100, ]))
vec.unvaccine.region.total <- as.numeric(colSums(df.unvaccine.region[rep((i - 1)*100, each = 75) + 16 : 100, ]))
}
vec.pop.region <- vec.vaccine.region.total + vec.unvaccine.region.total
rm(df.vaccine.region, df.unvaccine.region, vec.unvaccine.region.total, vec.vaccine.region.total)
vec.pop.region <- rep(vec.pop.region, each = nrow(list.burden.result[[1]]) / length(listtime))
t1 <- list.burden.result[[1]]
t2 <- list.burden.result[[2]]
t1$Burden_Value <- t1$Burden_Value / vec.pop.region * 100000
t2$Burden_Value <- t2$Burden_Value / vec.pop.region * 100000
rm(vec.pop.region)
list.burden.result[[7]] <- t1
list.burden.result[[8]] <- t2
return(list.burden.result)
}
Create_Region_Specific_Burden_All <- function(cv, cuv, dv, duv, pv, puv, region, agegroup, listtime, idx.burden){
# find mean of burden (cases, deaths, IR) in 2 scenario vaccine and unvaccine for whole time
# idx.burden = 1 --> cases
# idx.burden = 2 --> deaths
# idx.burden = 3 --> IR
list.burden <- Create_Region_Burden_All(cv, cuv, dv, duv, pv, puv, region, agegroup, listtime)
if (idx.burden == 1){
bvl.v <- aggregate(list.burden[[1]]$Burden_Value, list(Year = list.burden[[1]]$Year), FUN = mean)
bvl.uv <- aggregate(list.burden[[2]]$Burden_Value, list(Year = list.burden[[1]]$Year), FUN = mean)
}else{
if (idx.burden == 2){
bvl.v <- aggregate(list.burden[[3]]$Burden_Value, list(Year = list.burden[[1]]$Year), FUN = mean)
bvl.uv <- aggregate(list.burden[[4]]$Burden_Value, list(Year = list.burden[[1]]$Year), FUN = mean)
}else{
if (idx.burden == 3){
bvl.v <- aggregate(list.burden[[7]]$Burden_Value, list(Year = list.burden[[1]]$Year), FUN = mean)
bvl.uv <- aggregate(list.burden[[8]]$Burden_Value, list(Year = list.burden[[1]]$Year), FUN = mean)
}else{
return(NULL)
}
}
}
bvl <- cbind(bvl.v, bvl.uv[[2]])
colnames(bvl) <- c('Year', 'Burden_Value.Vaccine', 'Burden_Value.Unvaccine')
return(bvl)
}
# ----- Preprocess Tab 2 -----
DataPath.Map <- '../Data/' # Turn back to the data folder of Shiny
# Load Population data
Pop.Total <- readRDS(paste0(DataPath.Map, 'Pop_Total.rds'))
Pop.Unvaccine <- readRDS(paste0(DataPath.Map, 'Pop_UnVaccine.rds'))
Pop.Total <- Pop.Total[ , colnames(Pop.Unvaccine)]
Pop.Vaccine <- Pop.Total[ , -ncol(Pop.Total)] - Pop.Unvaccine[ , -ncol(Pop.Unvaccine)]
Pop.Vaccine$region <- Pop.Total$region
Subregions <- unique(Pop.Unvaccine$region)
Pop.Time <- colnames(Pop.Unvaccine)[-ncol(Pop.Total)] #remove region column name
Pop.Time <- sapply(Pop.Time, function(x){substr(x, 2, 5)})
Pop.Time <- as.numeric(Pop.Time) # Time year
rm(Pop.Total)
# ----- Preprocess Tab 3 -----
Cases.Vaccine <- readRDS(paste0(DataPath.Map, 'vac_cases_agegroup.rds'))
Cases.Unvaccine <- readRDS(paste0(DataPath.Map, 'no_vac_cases_agegroup.rds'))
Deaths.Vaccine <- readRDS(paste0(DataPath.Map, 'vac_deaths_agegroup.rds'))
Deaths.Unvaccine <- readRDS(paste0(DataPath.Map, 'no_vac_deaths_agegroup.rds'))
# ----- PROCESS -----
# if burden is cases --> idx.burden = 1 (2 for deaths, 3 for IR)
idx.burden <- 1
for (i in 1 : length(Map)){
cat('Processing', as.character(Map@data$Country[i]), '\n')
list.burden <- Create_Region_Specific_Burden_All(Cases.Vaccine, Cases.Unvaccine, Deaths.Vaccine, Deaths.Unvaccine,
Pop.Vaccine, Pop.Unvaccine, Map@data$Country[i],
agegroup = 1, Pop.Time, idx.burden = idx.burden)
Map@data$value.vaccine[i] <- list(list.burden$Burden_Value.Vaccine)
Map@data$value.unvaccine[i] <- list(list.burden$Burden_Value.Unvaccine)
}
saveRDS(Map, file = paste0(Savepath, 'Burden_Cases_Map.rds'))
# ----- PLOT -----
# # qt <- quantile(unlist(Map@data$value.vaccine), probs = c(0, 10, 25, 40, 55, 70, 85, 100)/100)
# # qt <- as.numeric(qt)
# # bins <- qt # bin for legend
# bins <- c(0, 2, 4, 6, 8, 10, 12, 20, 30, 40) * 1000
# legend_label <- paste(head(round(bins, 2),-1), tail(round(bins, 2), -1), sep = "-")
#
# idx <- seq(1, length(unlist(Map$value.vaccine)), 66) # first year
# pal <- colorBin("YlOrRd", domain = unlist(Map$value.vaccine), bins = bins) # color function
#
# labels <- paste('Region:', Map$Country, "<br/>Burden:", round(unlist(Map$value.vaccine)[idx], 3)) # label for FOI value
#
# m <- leaflet(Map) %>% addProviderTiles(providers$Esri.WorldGrayCanvas) %>% setView(107, 25, 3.25) %>%
# addPolygons(
# fillColor = ~pal(unlist(Map$value.vaccine)[idx]), weight = 1, opacity = 1, color = "black",
# fillOpacity = 1, stroke = T, layerId = ~Country,
# label = lapply(labels, HTML),
# labelOptions = labelOptions(style = list("font-weight" = "normal", padding = "3px 8px"),
# textsize = "16px", direction = "auto", opacity = 0.88),
# highlightOptions = highlightOptions(color = "blue", weight = 2)
# ) %>%
# addLegend(colors = pal((head(bins,-1) + tail(bins, -1))/2), opacity = 1, labels = legend_label,
# position = "bottomright", title = "Burden") %>%
# addMiniMap() # add mini map to show zoom area
# m
|
130a941435cfa409f0cd5fa1887b201ba9dad4d2
|
83ae358d90cb1c54c8be380bc7bd628a2f6ed530
|
/man/bread.Rd
|
5cbe237086023e011f8e6b7e82cc3206a32f3a4e
|
[] |
no_license
|
cran/Rlab
|
c7963e1210e2140fc6d397ff6a2cf289f0dd3bd2
|
c72e630626f6df15cf75ffd8b9ee7c85322aeda8
|
refs/heads/master
| 2022-05-28T16:35:40.306539
| 2022-05-04T22:10:02
| 2022-05-04T22:10:02
| 17,693,343
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 755
|
rd
|
bread.Rd
|
\name{bread}
\alias{bread}
\title{Bread rising experiment}
\description{
The data set bread contains height measurements of 48 cupcakes.
A batch of Hodgson Mill Wholesome White Bread mix was divided into three parts
and mixed with 0.75, 1.0, and 1.25 teaspoons of yeast, respectively.
Each part was made into 8 different cupcakes and baked at 350 degrees.
After baking, the height of each cupcake was measured.
Then the experiment was repeated at 450 degrees.
}
\format{
A data frame with 48 observations on the following 3 variables.
\describe{
\item{yeast}{: quantity of yeast (.75, 1 or 1.25 teaspoons)}
\item{temp}{: baking temperature (350 or 450 degrees)}
\item{height}{: cupcake height}
}
}
\keyword{datasets}
|
f72733b4889db966c3c00faa47489a31558d723d
|
41e6440cae2f89175d5c1b356402c47cc581dd62
|
/R/app.R
|
d338d96098d4c6ec6b458bda7fb881bf94ee9446
|
[
"MIT"
] |
permissive
|
derryleng/metar
|
00f963d1ecb47f02b118c190f64fd5b5d2978617
|
3683a3aef41d71c3270a2e15d4e7de76e9559f5b
|
refs/heads/main
| 2023-06-20T15:08:20.205025
| 2021-07-21T06:04:40
| 2021-07-21T06:04:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,058
|
r
|
app.R
|
metarShinyApp <- function() {
if (!requireNamespace("shiny", quietly = T) | !requireNamespace("shinyFiles", quietly = T)) {
stop("Packages \"shiny\" and \"shinyFiles\" are not installed.", call. = F)
}
require(data.table)
require(shiny)
require(shinyjs)
require(shinyWidgets)
require(shinyFiles)
require(DT)
input_type_options <- c("CSV", "Text", "Raw")
read_file <- function(input_file, file_type) {
if (file_type == "CSV") {
# Try to find a METAR column, otherwise combine all columns
raw_metar <- fread(file = input_file)
found_metar_column <- F
for (col in names(raw_metar)) {
if (is.character(raw_metar[[col]][1])) {
identify_tokens <- table(sapply(unlist(strsplit(raw_metar[[col]][1], " ")), identify_METAR_token))
if (length(identify_tokens) > 1) {
raw_metar <- raw_metar[[col]]
found_metar_column <- T
break
}
}
}
if (!found_metar_column) {
raw_metar <- apply(raw_metar, 1, function(x) paste(x, collapse = " "))
}
} else if (file_type == "Text") {
raw_metar <- readLines(input_file)
} else if (file_type == "Raw") {
raw_metar <- unlist(strsplit(input_file, "\n"))
}
return(parse_METAR(raw_metar))
}
datatable_customised <- function(
data,
rownames = F,
selection = "none",
style = "bootstrap4",
options = list(
pageLength = 15,
lengthMenu = seq(5, 100, 5),
columnDefs = list(list(className = 'dt-center', targets = "_all")),
scrollX = T,
dom = '<"dataTables_row"lBf>rt<"dataTables_row"ip>',
buttons = c('copy', 'csv', 'excel')
),
extensions = c("Buttons"),
...
){
datatable(
data = data,
rownames = rownames,
selection = selection,
style = style,
options = options,
extensions = extensions,
...
)
}
ui <- fluidPage(
useShinyjs(),
tags$head(
tags$style(HTML("
body {
color: #333;
background-color: #FFF;
}
body.dark-theme {
color: #eee;
background-color: #121212;
}
.modal-content {
color: #333;
background-color: #FFF;
}
.modal-content.dark-theme {
color: #eee;
background-color: #121212;
}
#output_table {
background-color: #f9f9f9;
}
#output_table.dark-theme {
background-color: #121212;
}
.spinner {
color: #ffffff;
font-size: 90px;
text-indent: -9999em;
overflow: hidden;
width: 1em;
height: 1em;
margin-left: calc(50vw - 0.5em);
margin-top: calc(10vh - 0.5em);
border-radius: 50%;
position: relative;
-webkit-transform: translateZ(0);
-ms-transform: translateZ(0);
transform: translateZ(0);
-webkit-animation: load6 1.7s infinite ease, round 1.7s infinite ease;
animation: load6 1.7s infinite ease, round 1.7s infinite ease;
}
@-webkit-keyframes load6 {
0% {
box-shadow: 0 -0.83em 0 -0.4em, 0 -0.83em 0 -0.42em, 0 -0.83em 0 -0.44em, 0 -0.83em 0 -0.46em, 0 -0.83em 0 -0.477em;
}
5%,
95% {
box-shadow: 0 -0.83em 0 -0.4em, 0 -0.83em 0 -0.42em, 0 -0.83em 0 -0.44em, 0 -0.83em 0 -0.46em, 0 -0.83em 0 -0.477em;
}
10%,
59% {
box-shadow: 0 -0.83em 0 -0.4em, -0.087em -0.825em 0 -0.42em, -0.173em -0.812em 0 -0.44em, -0.256em -0.789em 0 -0.46em, -0.297em -0.775em 0 -0.477em;
}
20% {
box-shadow: 0 -0.83em 0 -0.4em, -0.338em -0.758em 0 -0.42em, -0.555em -0.617em 0 -0.44em, -0.671em -0.488em 0 -0.46em, -0.749em -0.34em 0 -0.477em;
}
38% {
box-shadow: 0 -0.83em 0 -0.4em, -0.377em -0.74em 0 -0.42em, -0.645em -0.522em 0 -0.44em, -0.775em -0.297em 0 -0.46em, -0.82em -0.09em 0 -0.477em;
}
100% {
box-shadow: 0 -0.83em 0 -0.4em, 0 -0.83em 0 -0.42em, 0 -0.83em 0 -0.44em, 0 -0.83em 0 -0.46em, 0 -0.83em 0 -0.477em;
}
}
@keyframes load6 {
0% {
box-shadow: 0 -0.83em 0 -0.4em, 0 -0.83em 0 -0.42em, 0 -0.83em 0 -0.44em, 0 -0.83em 0 -0.46em, 0 -0.83em 0 -0.477em;
}
5%,
95% {
box-shadow: 0 -0.83em 0 -0.4em, 0 -0.83em 0 -0.42em, 0 -0.83em 0 -0.44em, 0 -0.83em 0 -0.46em, 0 -0.83em 0 -0.477em;
}
10%,
59% {
box-shadow: 0 -0.83em 0 -0.4em, -0.087em -0.825em 0 -0.42em, -0.173em -0.812em 0 -0.44em, -0.256em -0.789em 0 -0.46em, -0.297em -0.775em 0 -0.477em;
}
20% {
box-shadow: 0 -0.83em 0 -0.4em, -0.338em -0.758em 0 -0.42em, -0.555em -0.617em 0 -0.44em, -0.671em -0.488em 0 -0.46em, -0.749em -0.34em 0 -0.477em;
}
38% {
box-shadow: 0 -0.83em 0 -0.4em, -0.377em -0.74em 0 -0.42em, -0.645em -0.522em 0 -0.44em, -0.775em -0.297em 0 -0.46em, -0.82em -0.09em 0 -0.477em;
}
100% {
box-shadow: 0 -0.83em 0 -0.4em, 0 -0.83em 0 -0.42em, 0 -0.83em 0 -0.44em, 0 -0.83em 0 -0.46em, 0 -0.83em 0 -0.477em;
}
}
@-webkit-keyframes round {
0% {
-webkit-transform: rotate(0deg);
transform: rotate(0deg);
}
100% {
-webkit-transform: rotate(360deg);
transform: rotate(360deg);
}
}
@keyframes round {
0% {
-webkit-transform: rotate(0deg);
transform: rotate(0deg);
}
100% {
-webkit-transform: rotate(360deg);
transform: rotate(360deg);
}
}
#spinner_wrapper {
position: fixed;
left: 0;
top: 0;
width: 100vw;
height: 100vh;
display: flex;
justify-content: center;
flex-direction: column;
text-align: center;
color: #FFFFFF;
background-color: #555759;
animation: fadeIn 0.5s linear 0.5s forwards alternate;
z-index: 10000;
visibility: hidden;
}
@keyframes fadeIn {
0% {
visibility: visible;
opacity: 0;
}
100% {
visibility: visible;
opacity: 0.5;
}
}
.dataTables_row {
display: flex;
justify-content: space-between;
}
"))
),
tags$body(
class = "dark-theme || light-theme",
div(
style = "position: absolute; padding-top: 12px;",
materialSwitch("toggle_theme", "Dark Mode", T, status = "primary")
),
div(
style = "text-align: center;",
h2("METAR Shiny App"),
radioGroupButtons("input_type", "Input Type", input_type_options, selected = "CSV"),
uiOutput("input_ui"),
actionButton("metar_process", "Process METAR")
),
div(style = "height: 30px;"),
uiOutput("output_ui"),
conditionalPanel(
condition = "$('html').hasClass('shiny-busy')",
id = "spinner_wrapper",
htmltools::HTML('<div class="spinner"></div>')
)
)
)
server <- function(input, output, session) {
output$input_ui <- renderUI({
if (input$input_type == "Raw") {
div(
style = "display: flex; justify-content: center",
textAreaInput("input_raw", NULL, width = "90%", resize = "vertical")
)
} else if (input$input_type %in% c("CSV", "Text")) {
div(
style = "padding-bottom: 15px;",
shinyFilesButton("metar_file", label = "Choose File", title = "Select METAR file", multiple = F),
textOutput("file_name")
)
}
})
shinyFileChoose(input, "metar_file", roots = getVolumes())
metar_file <- reactive({
as.vector(parseFilePaths(roots = getVolumes(), input$metar_file)$datapath)
})
output$file_name <- renderText(metar_file())
processed_metar <- eventReactive(input$metar_process, {
if (input$input_type == "Raw") {
read_file(input$input_raw, file_type = input$input_type)
} else if (input$input_type %in% c("CSV", "Text") & length(metar_file()) > 0) {
read_file(metar_file(), file_type = input$input_type)
}
})
output$output_ui <- renderUI({
req(processed_metar())
div(
DT::dataTableOutput(outputId = "output_table")
)
})
output$output_table <- DT::renderDataTable({
datatable_customised(processed_metar())
}, server = T)
onclick("toggle_theme", runjs("
$('body').toggle('dark-theme');
$('.model-content').toggle('dark-theme');
$('#output_table').toggle('dark-theme');
"))
session$onSessionEnded(function() {
stopApp()
})
}
shinyApp(ui, server)
}
|
04f665d0f92abed3c13618fa5d5c95b362674b44
|
bd74dfc8aecca440ca0f94627c85118ecca19a3d
|
/Figure6/iba1-new.R
|
3fd6101d3669c46f12e1b0acf5ca142b88cddeda
|
[] |
no_license
|
ErbB4/LMM-GLMM-R-plasticity-paper
|
74c94427ef1e7067eb65c8bd9b0cda8bbf15c67d
|
0d8c3c7d95d18f11a5476bb953885e6c17f6a832
|
refs/heads/master
| 2023-04-17T06:11:44.508678
| 2021-04-28T20:02:11
| 2021-04-28T20:02:11
| 269,365,261
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,763
|
r
|
iba1-new.R
|
#analyses for IBA1 protein expression
library(lme4)
# importin data
data = read.csv("iba1.csv")
# specifiy certain variables as factors
str(data)
head(data)
data$Time <- as.factor(data$Time)
data$Sham <- as.factor(data$Sham)
data$Ipsi <- as.factor(data$Ipsi)
data$Distance2 <- as.factor(data$Distance2)
data$Distance1 <- as.factor(data$Distance1)
data$AnimalNo <- as.factor(data$AnimalNo)
#get data for each time point
time1 <- as.data.frame(data[data$Time=="1",])
time2 <- as.data.frame(data[data$Time=="2",])
time3 <- as.data.frame(data[data$Time=="3",])
time1$Counting <- time1$Counting
time2$Counting <- time2$Counting
time3$Counting <- time3$Counting
# Time 1
m.T1.int <- lmer(Counting ~ Sham * Ipsi + (1|AnimalNo/Distance2),data = time1)
m.T1.add <- lmer(Counting ~ Sham + Ipsi + (1|AnimalNo/Distance2),data = time1)
m.T1.sham <- lmer(Counting ~ Sham + (1|AnimalNo/Distance2),data = time1)
m.T1.ipsi <- lmer(Counting ~ Ipsi + (1|AnimalNo/Distance2),data = time1)
m.T1.null = lmer(Counting ~ 1 + (1|AnimalNo/Distance2),data = time1)
AIC(m.T1.int,m.T1.add,m.T1.sham,m.T1.ipsi,m.T1.null)# interaction model is the best
# Then check whether the model fulfills the assumptions:
# residuals look very good
library(lattice)
trellis.par.set("background$col" = "white")
par(mar = rep(2, 4))
plot(m.T1.int)
qqnorm(residuals(m.T1.int))
qqline(residuals(m.T1.int))
#print confidence interval
summary(m.T1.int)
confint(m.T1.int) # not significant
# Time 2
m.T2.int <- lmer(Counting ~ Sham * Ipsi + (1|AnimalNo/Distance2),data = time2)
m.T2.add <- lmer(Counting ~ Sham + Ipsi + (1|AnimalNo/Distance2),data = time2)
m.T2.sham <- lmer(Counting ~ Sham + (1|AnimalNo/Distance2),data = time2)
m.T2.ipsi <- lmer(Counting ~ Ipsi + (1|AnimalNo/Distance2),data = time2)
m.T2.null = lmer(Counting ~ 1 + (1|AnimalNo/Distance2),data = time2)
AIC(m.T2.int,m.T2.add,m.T2.sham,m.T2.ipsi,m.T2.null)# again: the interaction should be kept
plot(m.T2.int) # this looks ok
summary(m.T2.int) # stimulationa and the interaction with ipsi are significant. Please ignore the effects of distance in this case
confint(m.T2.int,level=0.995)
# Time 3: here the interaction is significant too!
m.T3.int <- lmer(Counting ~ Sham * Ipsi + (1|AnimalNo/Distance2),data = time3)
m.T3.add <- lmer(Counting ~ Sham + Ipsi + (1|AnimalNo/Distance2),data = time3)
m.T3.sham <- lmer(Counting ~ Sham + (1|AnimalNo/Distance2),data = time3)
m.T3.ipsi <- lmer(Counting ~ Ipsi + (1|AnimalNo/Distance2),data = time3)
m.T3.null = lmer(Counting ~ 1 + (1|AnimalNo/Distance2),data = time3)
AIC(m.T3.int,m.T3.add,m.T3.sham,m.T3.ipsi,m.T3.null)#
plot(m.T3.int) # I removed point 338
summary(m.T3.int) #
confint(m.T3.int,level=0.995)
|
3859dd8f221bb31bb9171739be62a26ab734acbe
|
adcfd5d1a21ca60bb6d34324fddc0e63c7794f60
|
/Scripts/Scripts_Alfonso/8_observed_VS_expected_probabilities.R
|
8db3532ea0e0ac529b215b2ea470a0e989097c6c
|
[] |
no_license
|
JoseBSL/FunctionalMotifs
|
349c81472347230ac6fe9466f8fe8939fcdf7c96
|
33e827c120bc7efb94dd60299d98a4e1bde2a0fe
|
refs/heads/main
| 2023-04-16T09:31:19.110654
| 2023-02-16T12:46:22
| 2023-02-16T12:46:22
| 339,147,455
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,384
|
r
|
8_observed_VS_expected_probabilities.R
|
library(tidyverse)
# https://bartomeuslab.com/2014/12/17/preferring-a-preference-index/
chi_pref <- function(obs, exp, alpha = 0.05){
chi <- chisq.test(obs, p = exp, rescale.p = TRUE, simulate.p.value = F, B = 2000)
print(chi) #tells you if there is an overall preference. (sig = pref)
res <- chi$residuals
#res <- (obs-exp)/sqrt(exp) #hand calculation, same result.
#calculate bonferoni Z-statistic for each plant.
alpha <- alpha
k <- length(obs)
n <- sum(obs)
p_obs <- obs/n
ak <- alpha/(2*k)
Zak <- abs(qnorm(ak))
low_interval <- p_obs - (Zak*(sqrt(p_obs*(1-p_obs)/n)))
upper_interval <- p_obs + (Zak*(sqrt(p_obs*(1-p_obs)/n)))
p_exp <- exp/sum(exp)
sig <- ifelse(p_exp >= low_interval & p_exp <= upper_interval, "ns", "sig")
plot(c(0,k+1), c(min(low_interval),max(upper_interval)), type = "n",
ylab = "Preference", xlab = "items", las = 1)
arrows(x0 = c(1:k), y0 = low_interval, x1 = c(1:k), y1 = upper_interval, code = 3
,angle = 90)
points(p_exp, col = "red")
out <- data.frame(chi_test_p = rep(chi$p.value, length(res)),
chi_residuals = res, sig = sig)
out
}
motifs_observed_probability <- read_csv("Data/Csv/motifs_observed_probability.csv")
motifs_expected_probability <- read_csv("Data/Csv/node_motifs_theoretical_probability.csv") %>%
select(motif,motif_functional_ID,motif_probability) %>% unique() %>%
rename(motif_expected_probability = motif_probability)
motifs_probability <- motifs_expected_probability %>%
left_join(motifs_observed_probability, by = c("motif","motif_functional_ID"))
motifs_probability[is.na(motifs_probability)] <- 0
motifs_probability <- motifs_probability %>% arrange(desc(motif_observed_probability))
obs <- motifs_probability$counts_observed
exp <- motifs_probability$motif_expected_probability
chi_test <- chi_pref(obs, exp, alpha = 0.05)
res <- chisq.test(x = obs, p = exp, rescale.p = TRUE, simulate.p.value = T, B = 900)
res
res$expected %>% as.vector()
res$observed %>% as.vector()
# Null hypothesis (H0): There is no significant difference between the observed and the
# expected value.
# The p-value of the test is less than the significance level alpha = 0.05.
# We can conclude that the motifs are significantly not distributed as expected
# with a p-value = 2.2e-16.
motifs_probability$chi_test_p <- chi_test$chi_test_p
motifs_probability$chi_residuals <- chi_test$chi_residuals
motifs_probability$sig <- chi_test$sig
motifs_probability <- motifs_probability %>% arrange(desc(sig),desc(abs(chi_residuals)))
# expected values
chi_pref(motifs_probability$counts_observed,
motifs_probability$motif_expected_probability,
alpha = 0.05)
hist(motifs_probability$counts_observed[motifs_probability$counts_observed>1000],300)
chi_pref(obs = c(0,25,200), exp = c(0.00003,50,100),alpha = 0.05)
Convictions <- matrix(c(2, 10, 15, 3), nrow = 2,
dimnames =
list(c("Dizygotic", "Monozygotic"),
c("Convicted", "Not convicted")))
#######################
# Note that, the chi-square test should be used only when all calculated expected
# values are greater than 5. In our case there are several categories equal to zero
expected <- sum(motifs_probability$counts_observed)*
motifs_probability$motif_expected_probability
expected[expected < 5]
|
5a2c40f1c5bc568bb353a00fe0e2915a834d8228
|
ef35717b113233dc1a9122df61cf1c06645ceaec
|
/man/autoSpec.Rd
|
06424cb4560ae1e5717c31cb01d4dd1e191ccb2a
|
[] |
no_license
|
cran/astsa
|
d33ba640a0edda0dd9e112ed011bb05ac5c36fb3
|
1e597fa74efc437eb247787fcf7d26e0fe0c6b17
|
refs/heads/master
| 2023-04-10T07:36:18.196423
| 2023-01-09T21:50:14
| 2023-01-09T21:50:14
| 17,694,513
| 7
| 14
| null | 2016-03-21T15:10:46
| 2014-03-13T04:00:06
|
R
|
UTF-8
|
R
| false
| false
| 4,608
|
rd
|
autoSpec.Rd
|
\name{autoSpec}
\alias{autoSpec}
\title{
autoSpec - Changepoint Detection of Narrowband Frequency Changes
}
\description{
Uses changepoint detection to discover if there have been slight changes in frequency
in a time series. The autoSpec procedure uses minimum description length (MDL)
to do nonparametric spectral estimation with the goal of detecting changepoints.
Optimization is accomplished via a genetic algorithm (GA).
}
\usage{
autoSpec(xdata, Pi.B = NULL, Pi.C = NULL, PopSize = 70, generation = 70, m0 = 10,
Pi.P = 0.3, Pi.N = 0.3, NI = 7, taper = .5, min.freq = 0, max.freq = .5)
}
\arguments{
\item{xdata}{
time series (of length n at least 100) to be analyzed; the \code{ts} attributes are stripped
prior to the analysis
}
\item{Pi.B}{
probability of being a breakpoint in initial stage; default is 10/n.
Does not need to be specified.
}
\item{Pi.C}{
probability of conducting crossover; default is (n-10)/n.
Does not need to be specified.
}
\item{PopSize}{
population size (default is 70); the number of chromosomes in each generation.
Does not need to be specified.
}
\item{generation}{
number of iterations; default is 70.
Does not need to be specified.
}
\item{m0}{
maximum width of the Bartlett kernel is \code{2*m0 + 1}; default is 10.
Does not need to be specified.
}
\item{Pi.P}{
probability of taking parent's gene in mutation; default is 0.3.
Does not need to be specified.
}
\item{Pi.N}{
probability of taking -1 in mutation; default is 0.3
Does not need to be specified.
}
\item{NI}{
number if islands; default is 7.
Does not need to be specified.
}
\item{taper}{
half width of taper used in spectral estimate; .5 (default) is full taper
Does not need to be specified.
}
\item{min.freq, max.freq}{
the frequency range (min.freq, max.freq) over which to calculate the Whittle likelihood;
the default is (0, .5). Does not need to be specified. If min > max, the roles are
reversed, and reset to the default if either is out of range.
}
}
\details{
Details my be found in Stoffer, D. S. (2023). AutoSpec: Detection of narrowband frequency changes in time series. Statistics and Its Interface, 16(1), 97-108. \doi{10.4310/21-SII703}
}
\value{
Returns three values, (1) the breakpoints including the endpoints, (2) the number of
segments, and (3) the segment kernel orders. See the examples.
}
\references{You can find demonstrations of astsa capabilities at
\href{https://github.com/nickpoison/astsa/blob/master/fun_with_astsa/fun_with_astsa.md}{FUN WITH ASTSA}.
The most recent version of the package can be found at \url{https://github.com/nickpoison/astsa/}.
In addition, the News and ChangeLog files are at \url{https://github.com/nickpoison/astsa/blob/master/NEWS.md}.
The webpages for the texts and some help on using R for time series analysis can be found at
\url{https://nickpoison.github.io/}.
}
\author{
D.S. Stoffer
}
\source{
The genetic algorithm code is adapted from R code provided to us by Rex Cheung (\kbd{https://www.linkedin.com/in/rexcheung}). The code originally supported Aue, Cheung, Lee, & Zhong (2014). Segmented model selection in quantile regression using the minimum description length principle. JASA, 109, 1241-1256. A similar version also supported Davis, Lee, & Rodriguez-Yam (2006). Structural break estimation for nonstationary time series models. JASA, 101, 223-239.
}
\seealso{
\code{\link{autoParm}}
}
\note{The GA is a stochastic optimization procedure and consequently will give different results at
each run. It is a good idea to run the algorithm a few times before coming to a final decision.
}
\examples{
\dontrun{
##-- simulation
set.seed(1)
num = 500
t = 1:num
w = 2*pi/25
d = 2*pi/150
x1 = 2*cos(w*t)*cos(d*t) + rnorm(num)
x2 = cos(w*t) + rnorm(num)
x = c(x1,x2)
##-- plot and periodogram (all action below 0.1)
tsplot(x, main='not easy to see the change')
mvspec(x)
##-- run procedure
autoSpec(x, max.freq=.1)
##-- output (yours will be slightly different -
##-- the nature of GA)
# returned breakpoints include the endpoints
# $breakpoints
# [1] 1 503 1000
#
# $number_of_segments
# [1] 2
#
# $segment_kernel_orders_m
# [1] 2 4
##-- plot everything
par(mfrow=c(3,1))
tsplot(x, col=4)
abline(v=503, col=6, lty=2, lwd=2)
mvspec(x[1:502], kernel=bart(2), taper=.5, main='segment 1', col=4, xlim=c(0,.25))
mvspec(x[503:1000], kernel=bart(4), taper=.5, main='segment 2', col=4, xlim=c(0,.25))
}
}
\keyword{ ts }
|
7e0108a8f2b40323e519375bc5b1e304e9d6ae08
|
d5d5ebf85c43614cc5d5d10cde2e729e546ba1cc
|
/dataR.R
|
46b0c9ce55cdeecf2aa8e2886d9d799f23d6304d
|
[] |
no_license
|
gaurav1988007/Jigsaw-Assignment
|
de22f4e7764089b9d2e5ed96f67a7705f48f6eb8
|
0e76a4cc6bf117326411b87ad42d8068d203c25f
|
refs/heads/master
| 2020-12-24T09:30:53.641293
| 2016-11-21T15:55:08
| 2016-11-21T15:55:08
| 73,287,772
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 78
|
r
|
dataR.R
|
# Creating git R file
pp <- print("Gaurav")
pp
library(datasets)
data(mtcars)
|
c89b52afc4e50aedb9a95f671b634d36ffc10bdf
|
db8a43ce4e4d58a57a0a2bb29b63acf6c30b5092
|
/R/diff.R
|
c5acb10c214207970bd856b64da7fe5b80906456
|
[] |
no_license
|
zhaoxiaohe/MachineShop
|
ca6fa7d6e7f00ac7d6f8522d50faeec2f4735b2d
|
85b1ff6a9d7df425d041289856861e75ce596621
|
refs/heads/master
| 2020-04-11T06:30:08.059577
| 2018-12-13T00:45:43
| 2018-12-13T00:45:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,489
|
r
|
diff.R
|
#' Model Performance Differences
#'
#' Pairwise model differences in resampled performance metrics.
#'
#' @name diff
#' @rdname diff-methods
#'
#' @param x object containing resampled metrics.
#' @param ... arguments to be passed to other methods.
#'
#' @return \code{ModelMetricsDiff} class object that inherits from
#' \code{ModelMetrics}.
#'
#' @seealso \code{\link{modelmetrics}}, \code{\link{resample}},
#' \code{\link{tune}}, \code{\link{plot}}, \code{\link{summary}},
#' \code{\link{t.test}}
#'
diff.ModelMetrics <- function(x, ...) {
if (length(dim(x)) <= 2) stop("more than one model needed to diff")
indices <- combn(dim(x)[3], 2)
indices1 <- indices[1,]
indices2 <- indices[2,]
xdiff <- x[, , indices1, drop = FALSE] - x[, , indices2, drop = FALSE]
model_names <- dimnames(x)[[3]]
dimnames(xdiff)[[3]] <-
paste(model_names[indices1], "-", model_names[indices2])
ModelMetricsDiff(xdiff, model_names = model_names)
}
#' @rdname diff-methods
#'
#' @examples
#' ## Survival response example
#' library(survival)
#' library(MASS)
#'
#' fo <- Surv(time, status != 2) ~ sex + age + year + thickness + ulcer
#' control <- CVControl()
#'
#' gbmres1 <- resample(fo, Melanoma, GBMModel(n.trees = 25), control)
#' gbmres2 <- resample(fo, Melanoma, GBMModel(n.trees = 50), control)
#' gbmres3 <- resample(fo, Melanoma, GBMModel(n.trees = 100), control)
#'
#' res <- Resamples(GBM1 = gbmres1, GBM2 = gbmres2, GBM3 = gbmres3)
#' perfdiff <- diff(res)
#' summary(perfdiff)
#' plot(perfdiff)
#'
diff.Resamples <- function(x, ...) {
diff(modelmetrics(x))
}
#' @rdname diff-methods
#'
diff.MLModelTune <- function(x, ...) {
diff(x@resamples)
}
#' Paired t-Tests for Model Comparisons
#'
#' Paired t-test comparisons of resampled performance metrics from different
#' models.
#'
#' @name t.test
#'
#' @param x object containing paired differences between resampled metrics.
#' @param adjust p-value adjustment for multiple statistical comparisons as
#' implemented by \code{\link[stats]{p.adjust}}.
#' @param ... arguments passed to other metrics.
#'
#' @return \code{HTestResamples} class object that inherits from \code{array}.
#' p-values and mean differences are contained in the lower and upper triangular
#' portions, respectively, of the first two dimensions. Model pairs are
#' contined in the third dimension.
#'
#' @seealso \code{\link{diff}}
#'
#' @examples
#' ## Numeric response example
#' library(MASS)
#'
#' fo <- medv ~ .
#' control <- CVControl()
#'
#' gbmres1 <- resample(fo, Boston, GBMModel(n.trees = 25), control)
#' gbmres2 <- resample(fo, Boston, GBMModel(n.trees = 50), control)
#' gbmres3 <- resample(fo, Boston, GBMModel(n.trees = 100), control)
#'
#' res <- Resamples(GBM1 = gbmres1, GBM2 = gbmres2, GBM3 = gbmres3)
#' perfdiff <- diff(res)
#' t.test(perfdiff)
#'
t.test.ModelMetricsDiff <- function(x, adjust = "holm", ...)
{
pvalues <- x %>%
apply(c(3, 2), function(resample) t.test(resample)$p.value) %>%
apply(2, p.adjust, method = adjust)
meandiffs <- apply(x, c(3, 2), mean, na.rm = TRUE)
model_names <- x@model_names
num_models <- length(model_names)
results <- array(NA, dim = c(num_models, num_models, dim(x)[2]),
dimnames = list(model_names, model_names, dimnames(x)[[2]]))
indices <- lower.tri(results[, , 1])
results[indices] <- meandiffs
results <- aperm(results, perm = c(2, 1, 3))
results[indices] <- pvalues
HTestResamples(results, adjust = adjust)
}
|
34d4796923b4a0eccc536b028102caaa40fcb902
|
0084280ad5d1400c280c110c402d3018b7a129af
|
/R/preprocess/aliquots-coverage-metrics.R
|
cdd9fb8613eba8bb6ad5f5eb3799871a45018432
|
[
"MIT"
] |
permissive
|
fpbarthel/GLASS
|
457626861206a5b6a6f1c9541a5a7c032a55987a
|
333d5d01477e49bb2cf87be459d4161d4cde4483
|
refs/heads/master
| 2022-09-22T00:45:41.045137
| 2020-06-01T19:12:30
| 2020-06-01T19:12:47
| 131,726,642
| 24
| 10
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,474
|
r
|
aliquots-coverage-metrics.R
|
#######################################################
# Enumerate cumulative coverage per aliquot for WGS/WXS
# Date: 2018.11.06
# Author: Kevin J.
#######################################################
# Directory for GLASS analysis.
mybasedir = '/Volumes/verhaak-lab/GLASS-analysis/'
datadir = 'results/align/wgsmetrics/'
pattern = '.WgsMetrics.txt$'
#######################################################
# Necessary packages:
library(parallel)
library(tidyverse)
library(data.table)
library(DBI)
#######################################################
# Establish connection with the database.
con <- DBI::dbConnect(odbc::odbc(), "VerhaakDB")
## Read in an example "*.WgsMetrics.txt" file to test the calling.
files = list.files(datadir, full.names = T, pattern = pattern, recursive=T)
# If it is desirable to include the sample names.
samples = data.frame(sample_id=gsub(".WgsMetrics.txt", "", basename(files)), library_type = substring(basename(files), 21, 23))
# The first 10 rows of each file represent a header of additional information.
cov_dat = mclapply(files, function(f){
dat = tryCatch(read.delim(f,as.is=T, header=T, row.names = NULL, skip = 10), error=function(e) e)
if(inherits(dat,'error')) {
message(f, '\n', dat, '\n')
return()
}
# Truncate the file name to just the sample_id.
dat = dat %>%
mutate(sample_id = gsub(".WgsMetrics.txt", "", basename(f))) # %>%
# filter(coverage!="0") # Filter out those bases with `0` coverage.
return(dat)
}, mc.cores=20)
## Combine all the samples from the GLASS cohort.
glass_cov = data.table::rbindlist(cov_dat)
# Cumulatively add the number of bases at each level:
glass_samples_cumulative_cov = glass_cov %>%
group_by(sample_id) %>%
mutate(cumulative_coverage = rev(cumsum(rev(high_quality_coverage_count)))) %>%
# Make sure colnames are formatting right.
select(aliquot_barcode = sample_id, coverage, high_quality_coverage_count, cumulative_coverage)
# Total number should be 1166 (2019.03.08).
n_distinct(glass_samples_cumulative_cov$aliquot_barcode)
# Write output as one table or a table for each file:
# write.table(glass_samples_cumulative_cov, file = "/Users/johnsk/Documents/Life-History/GLASS-WG/data/ref/glass-cumulative-coverage.txt", sep="\t", row.names = F, col.names = T, quote = F)
# Write to cumulative coverage files to database.
dbWriteTable(con, Id(schema="analysis",table="coverage"), glass_samples_cumulative_cov, append=T)
|
438edd113a771e49fb836006418a47e2e2d4875c
|
efbff8ea44ac87a421dedb6b8e87182f93292e75
|
/app.R
|
8e52bcbc83394965ea7894026d28f213ff922355
|
[
"MIT"
] |
permissive
|
smsaladi/em_data_requirements
|
acdc0410c409c0241dd50e808469bbc25d3242da
|
20cd3f423b5156d18a8231bb398dc3eb00aee2f9
|
refs/heads/master
| 2021-01-23T05:29:15.462726
| 2018-10-15T14:54:52
| 2018-10-15T14:54:52
| 86,308,659
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,039
|
r
|
app.R
|
#
# EM Data Requirements
#
# A back of the envelope calculation for your bigger-than-an-envelope
# microscope and data
#
# Shyam Saladi (saladi@caltech.edu)
# October 2017
#
library(gdata)
library(tidyverse)
library(shiny)
ui <- fluidPage(
tags$head(includeScript("google_analytics.js")),
includeHTML("github_corner.html"),
titlePanel("EM Microscope Data"),
# Sliders that demonstrate various available options
fluidRow(
column(4,
# dataset size
sliderInput("box_dim", label = h3("Box side length: 2 ^ N * 2^10 (kpixels)"),
min = 1, max = 5, step = 1, value = 2),
sliderInput("bit_depth", label = h3("Bit depth: 2^N depth"), min = 1,
max = 8, step = 1, value = 4),
# alternatives
sliderInput("fps", label = h3("Camera frames per second"), min = 10,
max = 400, step = 10, value = 40),
sliderInput("seconds_collected", label = h3("Seconds collected"),
min = 0, max = 10, step = .5, value = 3)),
column(4, offset = 0.5,
# lagtimes
sliderInput("time_per_movie", label = h3("Total time per movie: N minutes"),
min = 0, max = 5, step = 0.5, value = 1.5),
sliderInput("movies_per_grid", label = h3("Number of movies per grid"),
min = 0, max = 50, step = 5, value = 100),
# another metric for aggregate data rate
sliderInput("grids_per_day", label = h3("Number of grids per day"),
min = 0, max = 100, step = 5, value = 10),
sliderInput("calculation_per_dataset", label = h3("Time to calculate (average)"),
min = 1, max = 60, step = 1, value = 10)),
column(4, offset = 0.5,
# http://www.legitreviews.com/wd-black-512gb-m-2-pcie-nvme-ssd-review_191242/3
# https://www.pcper.com/reviews/Storage/Triple-M2-Samsung-950-Pro-Z170-PCIe-NVMe-RAID-Tested-Why-So-Snappy/Preliminary-Resul
selectInput("disk_speed_GB", label = h3("Local disk selection"),
choices = list("NVMe SSD (~1.2 GB/s)" = 1,
"NVMe SSD RAID (<= 2.5 GB/s)" = 2,
"SATA SSD (750 MB/s)" = 3,
"SATA SSD RAID (<= 1.5 GB/s)" = 4,
"SATA HDD (100 MB/s)" = 5,
"2x SATA HDD RAID (<= 200 MB/s)" = 6,
"4x SATA HDD RAID (<= 400 MB/s)" = 7,
"Ramdisk (link-limited)" = 8),
selected = 5),
selectInput("network_link_Gb", label = h3("Network link (slowest)"),
choices = list("1 Gb/s" = 1,
"10 Gb/s" = 2,
"2 x 10 Gb/s (teamed)" = 3),
selected = 1),
sliderInput("network_duty_cycle", label = h3("Network duty cycle"),
min = 0, max = 1, step = .1, value = 1),
sliderInput("nas_size", label = h3("Storage Array Size (TB)"),
min = 0, max = 1000, step = 50, value = 500)
)),
hr(),
fluidRow(
column(3,
h4("Collection Statistics"),
tableOutput("collection_table")
),
column(4, offset = 0.5,
h4("Link and Usage Statistics"),
tableOutput("usage_table")
),
column(4, offset = 0.5,
h4("Disk Array Capacity"),
tableOutput("capacity_table")
)
)
)
# Define server logic required to show calculations
server <- function(input, output, session) {
# Reactive expression to compose a data frame containing all of the values
get_tables <- reactive({
box_dim <- 2 ^ input$box_dim * 2 ^ 10
# don't convert to bits here
box_dim_formatted <- 2 ^ input$box_dim %>%
paste(., "k", sep = "") %>%
paste(. , "x", . )
n_pixels <- box_dim ^ 2
bit_depth <- 2 ^ input$bit_depth
image_size_bits <- n_pixels * bit_depth
image_size_formatted <- image_size_bits %>%
paste("bits = ", humanReadable(. / 8))
frames_collected <- input$seconds_collected * input$fps
movie_size <- image_size_bits * frames_collected
movie_size_formatted <- movie_size %>%
paste("bits = ", humanReadable(. / 8))
grid_size_formatted <- (movie_size * input$movies_per_grid) %>%
paste("bits = ", humanReadable(. / 8))
# Compose data frame
collection_table <- data.frame(
Name = c("Image Dimensions (px x px)",
# "Number of pixels",
"Bit depth",
"Image size",
"Frames collected",
"Movie size",
"Grid size"),
Value = c(box_dim_formatted,
# n_pixels,
bit_depth,
image_size_formatted,
frames_collected,
movie_size_formatted,
grid_size_formatted) %>% as.character
)
disk_speed <- switch(input$disk_speed_GB,
"1" = 1.2, # "NVMe SSD (~1.2 GB/s)" = 1.2,
"2" = 2.5, # "NVMe SSD RAID (<= 2.5 GB/s)" = 2.5,
"3" = 0.750, # "SATA SSD (750 MB/s)" = .750,
"4" = 1.5, # SATA SSD RAID (<= 1.5 GB/s)" = 1.5,
"5" = 0.100, # SATA HDD ,
"6" = 0.200, # 2x SATA HDD RAID,
"7" = 0.400, # 4x SATA HDD RAID,
"8" = Inf # "Ramdisk (link-limited)" = Inf),
) * 8 # change to Gb/s
network_link <- switch(input$network_link_Gb,
"1" = 1, # "1 Gb/s" = 1,
"2" = 10, # "10 Gb/s" = 10,
"3" = 20 # "2 x 10 Gb/s (teamed)" = 20),
) * input$network_duty_cycle
# in bits/s
link_speed <- ifelse(network_link < disk_speed,
network_link, disk_speed) * (2^10)^3
limiting_link <- ifelse(network_link < disk_speed,
"network", "local disks")
datarate_per_movie <- (movie_size / (input$time_per_movie * 60)) %>%
round %>%
paste(" bits/s = ", humanReadable(. / 8), "/s", sep = "")
transfer_time_per_movie <- (movie_size / link_speed) / 60
collection_time_per_grid <-
input$time_per_movie * input$movies_per_grid
transfer_time_per_grid <-
transfer_time_per_movie * input$movies_per_grid
datarate_per_day <-
movie_size * input$movies_per_grid * input$grids_per_day
datarate_per_day_formatted <- paste(
round(datarate_per_day), " bits/day = ",
humanReadable(datarate_per_day / 8), "/day", sep = "")
movie_capacity <- input$nas_size * (10^3)^4 / (movie_size / 8)
grid_capacity <- movie_capacity / input$movies_per_grid
day_capacity <- grid_capacity / input$grids_per_day
usage_table <- data.frame(
Name = c("Data rate/movie",
"Time to transfer movie (min)",
"Collection time/grid (min)",
"Time to transfer grid (min)",
"Collection vs. Transfer Limiting",
"Daily aggregate data rate",
#"Link speed",
"Limiting link"),
Value = c(datarate_per_movie,
transfer_time_per_movie,
collection_time_per_grid,
transfer_time_per_grid,
ifelse(collection_time_per_grid > transfer_time_per_grid,
"collection", "transfer"),
datarate_per_day_formatted,
#link_speed,
limiting_link) %>% as.character
)
capacity_table <- data.frame(
Name = c("Dataset capacity (movies)",
"Dataset capacity (grids)",
"Dataset capacity (days)"),
Value = c(round(movie_capacity),
round(grid_capacity),
round(day_capacity)) %>% as.character
)
list(collection_table, usage_table, capacity_table)
})
output$collection_table <- renderTable({
get_tables()[[1]]
})
output$usage_table <- renderTable({
get_tables()[[2]]
})
output$capacity_table <- renderTable({
get_tables()[[3]]
})
}
# Run the application
shinyApp(ui = ui, server = server)
|
5c0df0926716cc0a98280ebcf18100e91e014d92
|
f367801e7c4f24560f607a5aaa95187a0f963def
|
/cachematrix.R
|
e793878da85aa12b316af2163910508f36598488
|
[] |
no_license
|
MichaelChoudhury/ProgrammingAssignment2
|
6daf6b112982c12e35a75b787f00e712860b44c6
|
c291323e82cdbf4c4b32ab71c2f4cc1e649dce89
|
refs/heads/master
| 2021-01-24T21:03:30.839872
| 2014-11-23T08:51:42
| 2014-11-23T08:51:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,300
|
r
|
cachematrix.R
|
## The two functions below will calculate the inverse of a matrix x.
## If the inverse has been cached as a result of a previous calculation for the same matrix,
## the result is simply returned and no further calculation takes place.
##
## The "makeCacheMatrix" function creates an object that:
## - Initializes a variable 'I'which will save the inverted matrix
## - Contains a function get()to obtain the original matrix
## - Contains a function setIM()to assign the inverse matrix of x to I
## - Contains a function getIM() to obtain the cached inverse matrix
makeCacheMatrix <- function(x = matrix()) {
I <- NULL
get <- function() x
setIM <- function(IM) I <<- IM
getIM <- function() I
list(get=get, setIM=setIM, getIM=getIM)
}
## The "cacheSolve" function first performs a check to ascertain if the inverted
## martix has already been calculated and cached. If found, it is simly returned.
##If not, the calculation is made and the result cached and returned.
cacheSolve <- function(x) {
I <- x$getIM()
if(!is.null(I)){
message("Getting cached data ...")
return(I)
}
else {
message("Calculating inverse matrix...")
data <- x$get()
I <- solve(data)
x$setIM(I)
return(I)
}
}
|
a3516f24d9e5410616b4ee35fbad627a75450da6
|
4f9aac69cfacaf3605cafaef44412c666f8b83f8
|
/economics/upgrade-languages.R
|
0fd69235aeb96a9d7e3dde70814dd9bfb95f7883
|
[] |
no_license
|
gopinathsubbegowda/ESEUR-code-data
|
b7c11113892d840295baec19c8bc14817a6ad6eb
|
d576dad762e8551272a6ac302eb4ef1de1153158
|
refs/heads/master
| 2021-05-06T18:50:54.604461
| 2017-11-26T00:33:15
| 2017-11-26T00:33:15
| 112,064,865
| 1
| 0
| null | 2017-11-26T07:42:13
| 2017-11-26T07:42:13
| null |
UTF-8
|
R
| false
| false
| 1,765
|
r
|
upgrade-languages.R
|
#
# upgrade-languages.R, 2 Mar 17
# Data from:
# Information Goods Upgrades: {Theory} and Evidence
# V. Brian Viard
#
# Example from:
# Empirical Software Engineering using R
# Derek M. Jones
source("ESEUR_config.r")
pal_col=rainbow(2)
library("plyr")
full_price=function(df)
{
lines(df$Date, df$Full.Price, col="blue")
}
cc_cpp=read.csv(paste0(ESEUR_dir, "economics/upgrade-languages.csv.xz"), as.is=TRUE)
cc_cpp$OS=(cc_cpp$OS == "Windows")
cc_cpp=subset(cc_cpp, !is.na(Full.Price))
cc_cpp$Date=as.Date(paste0("01-", cc_cpp$Date), format="%d-%b-%y")
# table(cc_cpp$OS, cc_cpp$Cpp)
cc=subset(cc_cpp, Cpp == 0)
no_Watcom=subset(cc_cpp, Firm != "Watcom")
plot(no_Watcom$Full.Price, no_Watcom$Upg.Price, col=point_col,
xlab="Full retail price ($)", ylab="Upgrade price ($)\n")
plot(cc_cpp$Date, cc_cpp$Full.Price, col=point_col,
xlab="Date", ylab="Full or Update price ($)\n")
d_ply(cc_cpp, .(Product), full_price)
points(cc_cpp$Date, cc_cpp$Upg.Price, col="green")
upg_mod=glm(Upg.Price ~ Full.Price, data=no_Watcom)
summary(upg_mod)
# Did vendors charge more for Windows versions, compared to DOS,
# of the same compiler?
plot(jitter(no_Watcom$Full.Price), no_Watcom$OS, col=point_col, yaxt="n",
xlab="Full retail price ($)", ylab="OS")
axis(side=2, at=c(0, 1), label=c("MS-DOS", "Windows"))
sl=glm(OS ~ Full.Price, data=no_Watcom)
# summary(sl)
lines(no_Watcom$Full.Price, predict(sl), col=pal_col[1])
b_sl=glm(OS ~ Full.Price, data=no_Watcom, family=binomial)
# summary(b_sl)
x_vals=min(no_Watcom$Full.Price):max(no_Watcom$Full.Price)
lines(x_vals, predict(b_sl, newdata=data.frame(Full.Price=x_vals), type="response"), col=pal_col[2])
prod_b_sl=glm(OS ~ Full.Price:Cpp, data=no_Watcom, family=binomial)
summary(prod_b_sl)
|
87b0cc25bdbebd31f318ca0bbeccf689001567c6
|
dd0d26163c4a0498de5b25e4ee57c4ce70b2676d
|
/R/autoplotECRResult.R
|
cc1bb3aa455ae805c1774ccb23a5df3cb062f8ff
|
[] |
no_license
|
jakobbossek/ecr
|
a1f97be9b4cb3b2538becebb38c9a5085b8464c9
|
f9954f5b1374cc70776f8b7e780f906e57ca50b7
|
refs/heads/master
| 2020-04-04T07:26:32.216427
| 2017-06-06T11:05:27
| 2017-06-06T11:05:27
| 17,904,690
| 13
| 5
| null | 2016-09-27T10:30:10
| 2014-03-19T13:15:56
|
R
|
UTF-8
|
R
| false
| false
| 4,714
|
r
|
autoplotECRResult.R
|
#' @title
#' Plot optimization trace.
#'
#' @description
#' Call this function on the result object of an \code{\link{doTheEvolution}}
#' function call to visualize the optimization trace.
#'
#' @param object [\code{ecr_result}]\cr
#' ecr result object.
#' @param show.process [\code{logical(1)}]\cr
#' Should the function itself with the population be plotted as well? Thinks makes
#' in particular sense with \code{complete.trace = FALSE} to see the progress.
#' Keep in mind, that this is possible only if the representation is \dQuote{float}
#' and the objective function has at most two decision variables.
#' Default is \code{FALSE}.
#' @param log.fitness [\code{logical(1)}]\cr
#' Log-transform fitness values?
#' Default is \code{FALSE}.
#' @param complete.trace [\code{logical(1)}]\cr
#' Direct show the plot with the fitness for all generations.
#' Default is \code{TRUE}.
#' @param ... [any]\cr
#' Not used.
#' @return [\code{invisible(TRUE)}]
#' @export
autoplot.ecr_single_objective_result = function(
object,
show.process = FALSE, log.fitness = FALSE, complete.trace = TRUE, ...) {
assertFlag(show.process, na.ok = FALSE)
assertFlag(complete.trace, na.ok = FALSE)
assertFlag(log.fitness, na.ok = FALSE)
if (is.null(object$opt.path)) {
stopf("Cannot plot optimization trace, since obviously no logging took place.")
}
# extract OptPath
op = object$opt.path
op.df = as.data.frame(op, strings.as.factors = TRUE)
# we start with the second dob, since otherwise there is not enough info to plot
unique.dobs = unique(op.df$dob)[-1]
if (complete.trace) {
unique.dobs = max(unique.dobs)
}
# set bounds
xlim = c(0, max(unique.dobs))
ylim = range(c(op.df$pop.min.fitness, op.df$pop.max.fitness))
for (dob in unique.dobs) {
# get trace
pl.trace = plotTrace(op.df[which(op.df$dob <= dob), ], xlim, ylim, log.fitness, ...)
pl.trace = pl.trace + ggtitle("Optimization trace")
if (show.process) {
if (object$final.opt.state$control$representation == "custom") {
stopf("Process cannot be visualized if custom representation was used.")
}
obj.fun = object$task$fitness.fun
task = object$task
par.set = getParamSet(obj.fun)
n.params = getNumberOfParameters(obj.fun)
if (n.params > 2L) {
stopf("Visualization not possible for functions with more than 2 parameters.")
}
if (hasDiscrete(par.set)) {
stopf("Visualization for mixed/discrete decision spaces not supported at the moment.")
}
if (isMultiobjective(obj.fun)) {
stopf("Visualization not possible for multi-objective functions at the moment.")
}
# call smoof plot function
pl.fun = autoplot(obj.fun)
# get interesting stuff out of opt.path in ggplot2 friendly format
df.points = getOptPathX(op, dob = dob)
y.name = task$objective.names
df.points[[y.name]] = getOptPathY(op, dob = dob)
x.names = getParamIds(par.set, with.nr = TRUE, repeated = TRUE)
if (n.params == 2L) {
pl.fun = pl.fun + geom_point(data = df.points, aes_string(x = x.names[1L], y = x.names[2L]), colour = "tomato")
} else {
pl.fun = pl.fun + geom_point(data = df.points, aes_string(x = x.names, y = y.name), colour = "tomato")
opt.dir.fun = if (task$minimkze) min else max
pl.fun = pl.fun + geom_hline(yintercept = opt.dir.fun(df.points[[y.name]]), linetype = "dashed", colour = "gray")
}
BBmisc::requirePackages(c("grid", "gridExtra"), why = "ecr")
#FIXME: next line returns errors in 'test_autoplot.R'
pl = do.call(gridExtra::grid.arrange, list(pl.fun, pl.trace, ncol = 1L))
print(pl)
} else {
pl = pl.trace
return(pl)
}
print(pl)
if (dob != tail(unique.dobs, 1)) {
pause()
}
}
return(invisible(TRUE))
}
# autoplot function for opt.path used by ecr
plotTrace = function(df, xlim, ylim, log.fitness, ...) {
ggdf = df[c("dob", "pop.min.fitness", "pop.mean.fitness", "pop.median.fitness", "pop.max.fitness")]
assertNumeric(ylim, len = 2L, any.missing = FALSE)
assertNumeric(xlim, len = 2L, any.missing = FALSE)
assertFlag(log.fitness, na.ok = FALSE)
requirePackages("reshape2", why = "ecr")
ggdf = melt(ggdf, c("dob"))
ggdf$variable = as.factor(ggdf$variable)
pl = ggplot(data = ggdf, mapping = aes_string(x = "dob", y = "value", linetype = "variable"))
pl = pl + geom_line()
pl = pl + xlab("Generation") + ylab("Fitness")
pl = pl + xlim(xlim) + ylim(ylim)
pl = pl + scale_linetype_discrete(name = "Type")
if (log.fitness) {
pl = pl + scale_y_log10()
pl = pl + ylab("log(Fitness)")
}
return(pl)
}
|
16a6c5ee7d7941d846fe6b9d8b188556f72f75ad
|
39396f1d2c1ddea904ff24f2e15efdf4470906e7
|
/R/amino_acid_pal.R
|
b818c716b5555ae61ec964ee3f2aa39193ec3cb0
|
[] |
no_license
|
smsaladi/heliquest
|
40a2a7f17964227c6e4113cf0b481b6373851375
|
45da6bd24f5f2f20ca0bc2b0903d29b500d5288f
|
refs/heads/master
| 2021-09-21T19:17:34.042903
| 2018-08-30T15:02:21
| 2018-08-30T15:02:21
| 61,735,718
| 5
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,545
|
r
|
amino_acid_pal.R
|
#' Provide common color palettes for amino acids
#'
#' @name name of palette to retrieve
#' @keywords color protein amino-acid
#' @export
#' @examples
#' amino_acid_pal('shapely')
#' #' @export
amino_acid_pal <- function(name) {
clustal <- c(rep("orange", 4),
rep("red", 3),
rep("blue", 3),
rep("green", 4))
names(clustal) <- c("G", "P", "S", "T",
"H", "K", "R",
"F", "W", "Y",
"I", "L", "M", "V")
lesk <- c(rep("orange", 4),
rep("green", 9),
rep("magenta", 3),
rep("red", 2),
rep("blue", 2))
names(lesk) <- c("G", "A", "S", "T",
"C", "V", "I", "L", "P", "Y", "F", "Y", "M", "W",
"N", "Q", "H",
"D", "E",
"K", "R")
maeditor <- c(rep("lightgreen", 4),
rep("green", 9),
rep("darkgreen", 3),
rep("blue", 2),
rep("lilac", 2),
"darkblue",
rep("orange", 2),
"pink",
rep("red", 2))
names(maeditor) <- c("A", "G",
"C",
"S", "T",
"D", "E", "N", "Q",
"I", "L", "M", "V",
"F", "W", "Y",
"H",
"K", "R",
"P",
"S", "T")
cinema <- c(rep("blue", 2),
rep("red", 2),
rep("green", 4),
rep("white", 5),
rep("magenta", 3),
rep("brown", 2),
"yellow")
names(cinema) <- c("H", "K", "R",
"D", "E",
"S", "T", "N", "Q",
"A", "V", "L", "I", "M",
"F", "W", "Y",
"P", "G",
"C")
shapely <- c(rep("E60A0A", 2),
rep("E6E600", 2),
rep("145AFF", 2),
rep("FA9600", 2),
rep("3232AA", 2),
"00DCDC",
rep("0F820F", 3),
"C8C8C8",
"B45AB4",
"8282D2",
"DC9682")
names(shapely) <- c("D", "E",
"C", "M",
"K", "R",
"S", "T",
"F", "Y",
"N", "Q",
"G",
"L", "V", "I",
"A",
"W",
"H",
"P")
heliquest <- c("gray", "yellow",
"red", "red",
"yellow", "gray", "lightblue", "yellow", "blue",
"yellow", "yellow",
"pink", "green", "pink",
"blue", "purple", "purple",
"yellow", "yellow", "yellow")
names(heliquest) <- c("A", "C",
"D", "E",
"F", "G", "H", "I", "K",
"L", "M",
"N", "P", "Q",
"R", "S", "T",
"V", "W", "Y")
switch(names,
clustal = clustal,
lesk = lesk,
maeditor = maeditor,
heliquest = heliquest,
shapely)
}
|
ec320b6ec3abb0221a1e7a509fc7e55e218088b2
|
0500ba15e741ce1c84bfd397f0f3b43af8cb5ffb
|
/cran/paws.security.identity/man/ram.Rd
|
97c838debbf9111d3532cdcd361d609db561e91b
|
[
"Apache-2.0"
] |
permissive
|
paws-r/paws
|
196d42a2b9aca0e551a51ea5e6f34daca739591b
|
a689da2aee079391e100060524f6b973130f4e40
|
refs/heads/main
| 2023-08-18T00:33:48.538539
| 2023-08-09T09:31:24
| 2023-08-09T09:31:24
| 154,419,943
| 293
| 45
|
NOASSERTION
| 2023-09-14T15:31:32
| 2018-10-24T01:28:47
|
R
|
UTF-8
|
R
| false
| true
| 9,737
|
rd
|
ram.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ram_service.R
\name{ram}
\alias{ram}
\title{AWS Resource Access Manager}
\usage{
ram(config = list(), credentials = list(), endpoint = NULL, region = NULL)
}
\arguments{
\item{config}{Optional configuration of credentials, endpoint, and/or region.
\itemize{
\item{\strong{credentials}:} {\itemize{
\item{\strong{creds}:} {\itemize{
\item{\strong{access_key_id}:} {AWS access key ID}
\item{\strong{secret_access_key}:} {AWS secret access key}
\item{\strong{session_token}:} {AWS temporary session token}
}}
\item{\strong{profile}:} {The name of a profile to use. If not given, then the default profile is used.}
\item{\strong{anonymous}:} {Set anonymous credentials.}
\item{\strong{endpoint}:} {The complete URL to use for the constructed client.}
\item{\strong{region}:} {The AWS Region used in instantiating the client.}
}}
\item{\strong{close_connection}:} {Immediately close all HTTP connections.}
\item{\strong{timeout}:} {The time in seconds till a timeout exception is thrown when attempting to make a connection. The default is 60 seconds.}
\item{\strong{s3_force_path_style}:} {Set this to \code{true} to force the request to use path-style addressing, i.e. \verb{http://s3.amazonaws.com/BUCKET/KEY}.}
\item{\strong{sts_regional_endpoint}:} {Set sts regional endpoint resolver to regional or legacy \url{https://docs.aws.amazon.com/sdkref/latest/guide/feature-sts-regionalized-endpoints.html}}
}}
\item{credentials}{Optional credentials shorthand for the config parameter
\itemize{
\item{\strong{creds}:} {\itemize{
\item{\strong{access_key_id}:} {AWS access key ID}
\item{\strong{secret_access_key}:} {AWS secret access key}
\item{\strong{session_token}:} {AWS temporary session token}
}}
\item{\strong{profile}:} {The name of a profile to use. If not given, then the default profile is used.}
\item{\strong{anonymous}:} {Set anonymous credentials.}
}}
\item{endpoint}{Optional shorthand for complete URL to use for the constructed client.}
\item{region}{Optional shorthand for AWS Region used in instantiating the client.}
}
\value{
A client for the service. You can call the service's operations using
syntax like \code{svc$operation(...)}, where \code{svc} is the name you've assigned
to the client. The available operations are listed in the
Operations section.
}
\description{
This is the \emph{Resource Access Manager API Reference}. This documentation
provides descriptions and syntax for each of the actions and data types
in RAM. RAM is a service that helps you securely share your Amazon Web
Services resources to other Amazon Web Services accounts. If you use
Organizations to manage your accounts, then you can share your resources
with your entire organization or to organizational units (OUs). For
supported resource types, you can also share resources with individual
Identity and Access Management (IAM) roles and users.
To learn more about RAM, see the following resources:
\itemize{
\item \href{https://aws.amazon.com/ram/}{Resource Access Manager product page}
\item \href{https://docs.aws.amazon.com/ram/latest/userguide/}{Resource Access Manager User Guide}
}
}
\section{Service syntax}{
\if{html}{\out{<div class="sourceCode">}}\preformatted{svc <- ram(
config = list(
credentials = list(
creds = list(
access_key_id = "string",
secret_access_key = "string",
session_token = "string"
),
profile = "string",
anonymous = "logical"
),
endpoint = "string",
region = "string",
close_connection = "logical",
timeout = "numeric",
s3_force_path_style = "logical",
sts_regional_endpoint = "string"
),
credentials = list(
creds = list(
access_key_id = "string",
secret_access_key = "string",
session_token = "string"
),
profile = "string",
anonymous = "logical"
),
endpoint = "string",
region = "string"
)
}\if{html}{\out{</div>}}
}
\section{Operations}{
\tabular{ll}{
\link[=ram_accept_resource_share_invitation]{accept_resource_share_invitation} \tab Accepts an invitation to a resource share from another Amazon Web Services account\cr
\link[=ram_associate_resource_share]{associate_resource_share} \tab Adds the specified list of principals and list of resources to a resource share\cr
\link[=ram_associate_resource_share_permission]{associate_resource_share_permission} \tab Adds or replaces the RAM permission for a resource type included in a resource share\cr
\link[=ram_create_permission]{create_permission} \tab Creates a customer managed permission for a specified resource type that you can attach to resource shares\cr
\link[=ram_create_permission_version]{create_permission_version} \tab Creates a new version of the specified customer managed permission\cr
\link[=ram_create_resource_share]{create_resource_share} \tab Creates a resource share\cr
\link[=ram_delete_permission]{delete_permission} \tab Deletes the specified customer managed permission in the Amazon Web Services Region in which you call this operation\cr
\link[=ram_delete_permission_version]{delete_permission_version} \tab Deletes one version of a customer managed permission\cr
\link[=ram_delete_resource_share]{delete_resource_share} \tab Deletes the specified resource share\cr
\link[=ram_disassociate_resource_share]{disassociate_resource_share} \tab Removes the specified principals or resources from participating in the specified resource share\cr
\link[=ram_disassociate_resource_share_permission]{disassociate_resource_share_permission} \tab Removes a managed permission from a resource share\cr
\link[=ram_enable_sharing_with_aws_organization]{enable_sharing_with_aws_organization} \tab Enables resource sharing within your organization in Organizations\cr
\link[=ram_get_permission]{get_permission} \tab Retrieves the contents of a managed permission in JSON format\cr
\link[=ram_get_resource_policies]{get_resource_policies} \tab Retrieves the resource policies for the specified resources that you own and have shared\cr
\link[=ram_get_resource_share_associations]{get_resource_share_associations} \tab Retrieves the lists of resources and principals that associated for resource shares that you own\cr
\link[=ram_get_resource_share_invitations]{get_resource_share_invitations} \tab Retrieves details about invitations that you have received for resource shares\cr
\link[=ram_get_resource_shares]{get_resource_shares} \tab Retrieves details about the resource shares that you own or that are shared with you\cr
\link[=ram_list_pending_invitation_resources]{list_pending_invitation_resources} \tab Lists the resources in a resource share that is shared with you but for which the invitation is still PENDING\cr
\link[=ram_list_permission_associations]{list_permission_associations} \tab Lists information about the managed permission and its associations to any resource shares that use this managed permission\cr
\link[=ram_list_permissions]{list_permissions} \tab Retrieves a list of available RAM permissions that you can use for the supported resource types\cr
\link[=ram_list_permission_versions]{list_permission_versions} \tab Lists the available versions of the specified RAM permission\cr
\link[=ram_list_principals]{list_principals} \tab Lists the principals that you are sharing resources with or that are sharing resources with you\cr
\link[=ram_list_replace_permission_associations_work]{list_replace_permission_associations_work} \tab Retrieves the current status of the asynchronous tasks performed by RAM when you perform the ReplacePermissionAssociationsWork operation\cr
\link[=ram_list_resources]{list_resources} \tab Lists the resources that you added to a resource share or the resources that are shared with you\cr
\link[=ram_list_resource_share_permissions]{list_resource_share_permissions} \tab Lists the RAM permissions that are associated with a resource share\cr
\link[=ram_list_resource_types]{list_resource_types} \tab Lists the resource types that can be shared by RAM\cr
\link[=ram_promote_permission_created_from_policy]{promote_permission_created_from_policy} \tab When you attach a resource-based policy to a resource, RAM automatically creates a resource share of featureSet=CREATED_FROM_POLICY with a managed permission that has the same IAM permissions as the original resource-based policy\cr
\link[=ram_promote_resource_share_created_from_policy]{promote_resource_share_created_from_policy} \tab When you attach a resource-based policy to a resource, RAM automatically creates a resource share of featureSet=CREATED_FROM_POLICY with a managed permission that has the same IAM permissions as the original resource-based policy\cr
\link[=ram_reject_resource_share_invitation]{reject_resource_share_invitation} \tab Rejects an invitation to a resource share from another Amazon Web Services account\cr
\link[=ram_replace_permission_associations]{replace_permission_associations} \tab Updates all resource shares that use a managed permission to a different managed permission\cr
\link[=ram_set_default_permission_version]{set_default_permission_version} \tab Designates the specified version number as the default version for the specified customer managed permission\cr
\link[=ram_tag_resource]{tag_resource} \tab Adds the specified tag keys and values to a resource share or managed permission\cr
\link[=ram_untag_resource]{untag_resource} \tab Removes the specified tag key and value pairs from the specified resource share or managed permission\cr
\link[=ram_update_resource_share]{update_resource_share} \tab Modifies some of the properties of the specified resource share
}
}
\examples{
\dontrun{
svc <- ram()
svc$accept_resource_share_invitation(
Foo = 123
)
}
}
|
2cd8917d49306b36cc1e3f67fcaa7b168fbdb99c
|
bca10cf62a15c32150d9276d520a4f527ce3db23
|
/script.r
|
eebc653086c81cdb1a7973cc1c926669efbf50c3
|
[] |
no_license
|
JonMinton/human_fertility_database
|
f198de9418172b45660f364ceac4cd6a91f142ed
|
a9c396c6616f39d2feee8e8eac76a4941b4cb8f1
|
refs/heads/master
| 2020-05-18T11:31:37.807970
| 2015-04-06T22:02:27
| 2015-04-06T22:02:27
| 25,087,294
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,920
|
r
|
script.r
|
rm(list=ls())
### To do :
# Combine with female population size estimates from hmd
source("scripts/LoadPackages.R")
RequiredPackages(
c(
"r2stl",
"ggplot2",
"reshape2",
"plyr",
"lattice",
"stringr"
)
)
data <- read.csv("data/tidy/lexis_square_combined.csv")
data$X <- NULL
data$code <- tolower(data$code)
ddply(data, .(code), summarise, min_year = min(year), max_year=max(year))
fn <- function(x){
tiff(
paste0(
"figures/asfr/", x$code[1], ".tiff"
),
1000,
1000
)
print(
contourplot(
asfr ~ year * age,
data=x,
region=T,
col.regions=rev(heat.colors(200)),
cuts=50,
main=x$code[1]
)
)
dev.off()
tiff(
paste0(
"figures/total/", x$code[1], ".tiff"
),
1000,
1000
)
print(
contourplot(
total ~ year * age,
data=x,
region=T,
col.regions=rev(heat.colors(200)),
cuts=50,
main=x$code[1]
)
)
dev.off()
tiff(
paste0(
"figures/cpfr/", x$code[1], ".tiff"
),
1000,
1000
)
print(
contourplot(
cpfr ~ year * age,
data=x,
region=T,
col.regions=rev(heat.colors(200)),
cuts=50,
main=x$code[1]
)
)
dev.off()
tiff(
paste0(
"figures/exposure/", x$code[1], ".tiff"
),
1000,
1000
)
print(
contourplot(
exposure ~ year * age,
data=x,
region=T,
col.regions=rev(heat.colors(200)),
cuts=50,
main=x$code[1]
)
)
dev.off()
tiff(
paste0(
"figures/birth_rate/", x$code[1], ".tiff"
),
1000,
1000
)
print(
contourplot(
birth_rate ~ year * age,
data=x,
region=T,
col.regions=rev(heat.colors(200)),
cuts=50,
main=x$code[1]
)
)
dev.off()
return(NULL)
}
d_ply(data, .(code), fn)
|
63d55060bd0ad8f4c255ffc5cfbdd48e790a8767
|
3b1c82ecb7622ce0030470c19732c17f6fda89ff
|
/SC2019Lab-3-王哲涵-16081043.R
|
f5cd20e9cb4912bef1bd8a53a06e6d27c42f9b97
|
[] |
no_license
|
anhnguyendepocen/SC2019-assignments
|
64bbb7a8d82afba4cc636122ed89268db8aca25e
|
a4cc348c40c4dc4cb373cbbde2cf92acb71cd69b
|
refs/heads/master
| 2020-09-02T03:41:35.656473
| 2019-04-12T12:48:48
| 2019-04-12T12:48:48
| null | 0
| 0
| null | null | null | null |
GB18030
|
R
| false
| false
| 811
|
r
|
SC2019Lab-3-王哲涵-16081043.R
|
x<-readLines('C:/BABAnews.txt',encoding="UTF-8") #读取文件 用encoding消除乱码
x #打印
str(x) #确定文章的段落数
library(glue)
trim(x)
nchar(x) #计算字符数
ee<-as.matrix(x) #将x转换为矩阵形式
y<-paste(ee,collapse = " ") #用paste函数将段落合并并且打印
regexpr("技术架构", x) #根据输出结果可知文章中含有技术架构
r <- regexpr("双11", x[1:5])
m <- regmatches(x[1:5], r)
d <- gsub("双11", "双十一", m)
x<-list(a="C:/temp/Bribane",b="C:/temp/Cairns",c="C:/temp/Melbourne",d="C:/temp/Syndey")
hottest<-function(name){ #创建新函数
y<-read.csv("name") #读取文件
max(temp.max) #计算最大值
}
lapply(x,hottest)
debug(hottest)
#通过debug发现在 y<-read.csv("name")中多加了引号将name变成了字符串
|
9d8021f7df5e17f98d514a70d426cf51c01d7f82
|
09486238326c1adcb80b29bdb0023ca65155ccb7
|
/SC reuslts/alltogather.NSGAII.R
|
9144498478518b825316e8720c200911f70608d5
|
[] |
no_license
|
shaikatcse/EnergyPLANDomainKnowledgeEAStep1
|
127b58e0727d19a2ed999f7f24a8921b79d5a6d9
|
649cd38e0ca5e53105f9f6a051831da9cf401646
|
refs/heads/master
| 2022-09-28T20:03:56.327477
| 2022-09-20T15:47:03
| 2022-09-20T15:47:03
| 16,055,790
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,271
|
r
|
alltogather.NSGAII.R
|
#This is a R file that generate a boxplot for six different problem for NSGA-II.
#There are 6 rows and three columns. Each row presents a problem and 1st column present stop generation, 2nd column presents HVd and third column presents epsd.
#Someone needs to select appropriate path to work with.
postscript("ALL_NSGAII.eps", horizontal=FALSE, onefile=FALSE, height=11, width=7, pointsize=10)
#pdf("All_NSGAII.pdf", width=7, height=11,pointsize=10)
#resultDirectory<-"C:/Users/mahbub/Documents/GitHub/EnergyPLANDomainKnowledgeEAStep1/StoppingCriteriaStudies/data/NSGAIISC"
#path to NSGA-II result directory
resultDirectory<-"C:/Users/mahbub/Documents/GitHub/EnergyPLANDomainKnowledgeEAStep1/StoppingCriteriaStudies/data/NSGAIISC"
#default number of generations for each problem
returnStdStGen <- function(problem)
{
if(problem =="ZDT1")
return (200)
else if(problem =="ZDT2")
return (200)
else if(problem =="ZDT3")
return (200)
else if(problem =="ZDT4")
return (200)
else if(problem =="DTLZ2")
return (300)
else if(problem =="DTLZ5")
return (200)
}
funStPlot <- function(problem)
{
#stGenHVdNew is the file that contains stopping generations for 30 runs when AHD+Div is used
StgenHVD<-paste(resultDirectory, problem, sep="/")
StgenHVD<-paste(StgenHVD, "StGenHVDNew", sep="/")
StgenHVD<-scan(StgenHVD)
#stGenAHDNew is the file that contains stopping generations for 30 runs when only AHD is used
StgenAHD<-paste(resultDirectory, problem, sep="/")
StgenAHD<-paste(StgenAHD, "StGenAHDNew", sep="/")
StgenAHD<-scan(StgenAHD)
#stGenDVNew is the file that contains stopping generations for 30 runs when only Div is used
StgenDV<-paste(resultDirectory, problem, sep="/")
StgenDV<-paste(StgenDV, "StGenDVNew", sep="/")
StgenDV<-scan(StgenDV)
#stopOCD is the file that contains stopping generations for 30 runs when OCD is used
StgenOCD<-paste(resultDirectory, problem, sep="/")
StgenOCD<-paste(StgenOCD, "StopOCD", sep="/")
StgenOCD<-scan(StgenOCD)
ind<-c("AHD+Div", "AHD", "Div", "OCD" )
line<-returnStdStGen(problem)
if(problem =="DTLZ2"){
#ylin provided the ylimit of boxplt
#for "DTLX2 it is necessary because ylimit is too low to get a horizontal line at default number of generation
boxplot(StgenHVD,StgenAHD,StgenDV,StgenOCD ,names=ind, notch = FALSE, outline=FALSE, ylim=c(0,310))
}
else{
boxplot(StgenHVD,StgenAHD,StgenDV,StgenOCD ,names=ind, notch = FALSE, outline=FALSE)
}
#boxplot(StgenHVD,StgenAHD,StgenDV,names=ind, notch = FALSE, outline=FALSE, ylim=c(0,line)
abline(h=line)
titulo <-paste(problem, "StopGen", sep=":")
title(font.main = 1, main=titulo)
}
funHVDPlot <- function(problem)
{
#HVDNew is the file that contains HVd for 30 runs when AHD+Div is used
HVD<-paste(resultDirectory, problem, sep="/")
HVD<-paste(HVD, "HVDNew", sep="/")
HVD<-scan(HVD)
#HVDAHDNew is the file that contains HVd for 30 runs when AHD is used
HVDAHD<-paste(resultDirectory, problem, sep="/")
HVDAHD<-paste(HVDAHD, "HVDAHDNew", sep="/")
HVDAHD<-scan(HVDAHD)
#HVDDVNew is the file that contains HVd for 30 runs when Div is used
HVDDV<-paste(resultDirectory, problem, sep="/")
HVDDV<-paste(HVDDV, "HVDDVNew", sep="/")
HVDDV<-scan(HVDDV)
#HVDOCD is the file that contains HVd for 30 runs when OCD is used
HVDOCD<-paste(resultDirectory, problem, sep="/")
HVDOCD<-paste(HVDOCD, "HVDOCD", sep="/")
HVDOCD<-scan(HVDOCD)
#ind<-c(expression('HVD'[all]), expression('HVD'[AHD]), expression('HVD'[DV]) )
ind<-c("AHD+Div", "AHD", "Div", "OCD" )
boxplot(HVD,HVDAHD,HVDDV,HVDOCD, names=ind, notch = FALSE, outline=FALSE)
abline(h=0.0)
#titulo<-paste(problem, expression('HV'[d]), sep=":")
title( main=substitute(problem*':HV'[d], list(problem = problem)))
}
funEpsDPlot <- function(problem)
{
#EpsDNew is the file that contains epsd for 30 runs when AHD+Div is used
EpsD<-paste(resultDirectory, problem, sep="/")
EpsD<-paste(EpsD, "EpsDNew", sep="/")
EpsD<-scan(EpsD)
#EpsDAHDNew is the file that contains epsd for 30 runs when AHD is used
EpsAHD<-paste(resultDirectory, problem, sep="/")
EpsAHD<-paste(EpsAHD, "EpsDAHDNew", sep="/")
EpsAHD<-scan(EpsAHD)
#EpsDDVNew is the file that contains epsd for 30 runs when Div is used
EpsDDV<-paste(resultDirectory, problem, sep="/")
EpsDDV<-paste(EpsDDV, "EpsDDVNew", sep="/")
EpsDDV<-scan(EpsDDV)
#EpsDOCD is the file that contains epsd for 30 runs when OCD is used
EpsDOCD<-paste(resultDirectory, problem, sep="/")
EpsDOCD<-paste(EpsDOCD, "EpsDOCD", sep="/")
EpsDOCD<-scan(EpsDOCD)
#ind<-c(expression('EpsD'[all]), expression('EpsD'[AHD]), expression('EpsD'[DV]) )
ind<-c("AHD+Div", "AHD", "Div", "OCD" )
boxplot(EpsD,EpsAHD,EpsDDV,EpsDOCD, names=ind, notch = FALSE, outline=FALSE)
abline(h=0.0)
#titulo <-paste(problem,expression('HV'[D]), sep=":")
title( main=substitute(problem*':eps'[d], list(problem = problem)))
}
par(mfrow = c(6,3), mar=c(2, 2, 2, 2) + 0.1)
#par(mfrow=c(6,3))
prob1<-"ZDT1"
funStPlot(prob1)
funHVDPlot(prob1)
funEpsDPlot(prob1)
prob1<-"ZDT2"
funStPlot(prob1)
funHVDPlot(prob1)
funEpsDPlot(prob1)
prob1<-"ZDT3"
funStPlot(prob1)
funHVDPlot(prob1)
funEpsDPlot(prob1)
prob1<-"ZDT4"
funStPlot(prob1)
funHVDPlot(prob1)
funEpsDPlot(prob1)
prob1<-"DTLZ2"
funStPlot(prob1)
funHVDPlot(prob1)
funEpsDPlot(prob1)
prob1<-"DTLZ5"
funStPlot(prob1)
funHVDPlot(prob1)
funEpsDPlot(prob1)
dev.off()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.