blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
d40ea69e6a8b74da71597be6e08756d402d804d6
|
c4b1763f85d9eca3ed919a6392e237b17f018929
|
/man/wrspathrow.Rd
|
048ef7caf18ff90572317dab2cf796e3820ae616
|
[] |
no_license
|
cran/wrspathrow
|
b69b7644cbd584f3ecf6573174ed78d17bd582bc
|
b92e07936950a9783d4304bc7905aaa12c3f9f6f
|
refs/heads/master
| 2021-01-01T17:57:29.424727
| 2014-02-12T00:00:00
| 2014-02-12T00:00:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 138
|
rd
|
wrspathrow.Rd
|
\docType{package}
\name{wrspathrow}
\alias{wrspathrow}
\alias{wrspathrow-package}
\title{wrspathrow}
\description{
wrspathrow
}
|
de02d84e8bd156db890b889f4a8ed01d3a5fcf72
|
120de1ae49850f8212efc39ab9fa266f175dc4c6
|
/man/rquaternion.Rd
|
c053125efce68b408d1c92faee303a52e5e0df7e
|
[] |
no_license
|
vsrimurthy/EPFR
|
168aed47aa2c48c98be82e3d8c833d89e1d11e04
|
544471a8d0cf75c7d65a195b9f6e95d6b1d6800f
|
refs/heads/master
| 2023-08-02T14:50:25.754990
| 2023-07-29T13:56:39
| 2023-07-29T13:56:39
| 118,918,801
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 318
|
rd
|
rquaternion.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/EPFR.r
\name{rquaternion}
\alias{rquaternion}
\title{rquaternion}
\usage{
rquaternion(x)
}
\arguments{
\item{x}{= number of quaternions desired}
}
\description{
n x 4 matrix of randomly generated number of unit size
}
\keyword{rquaternion}
|
4563e024ab2ec36128c13472b63d5017fd63b1d6
|
91a77be68e5ad1aa16e9a2681ba6fb090c118e4d
|
/man/proportions.end_mark_by.Rd
|
8d07922513de7f22c2aab74d6196a91ece0e8c6a
|
[] |
no_license
|
cran/qdap
|
e42f194e98a38eb02084eb6ac92dd587024b8540
|
5f032a6a8bf41255cd2547b11325ed457a02a72a
|
refs/heads/master
| 2023-05-25T03:10:36.324940
| 2023-05-11T05:10:02
| 2023-05-11T05:10:02
| 17,698,836
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 413
|
rd
|
proportions.end_mark_by.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/end_mark.R
\name{proportions.end_mark_by}
\alias{proportions.end_mark_by}
\title{Question Counts}
\usage{
\method{proportions}{end_mark_by}(x, ...)
}
\arguments{
\item{x}{The end_mark_by object.}
\item{\ldots}{ignored}
}
\description{
View \code{\link[qdap]{end_mark_by}} proportions.
}
\details{
end_mark_by Method for proportions
}
|
a43b971b8474c0ccd9895ea3b3b8184abf4f8eb2
|
0dddd513e0dc84f80c46ddb2e1c7b4d6a050993d
|
/resources/cellcounts/houseman.R
|
80df035ce8498b4f44dbf69364cc1600ed349aba
|
[] |
no_license
|
AST87/godmc
|
843542337e9f51fa7c96c83dd1070bac166cd270
|
3dd1949ede6500e134652e1b98a6f8d8a35e116c
|
refs/heads/master
| 2022-12-04T04:59:35.495587
| 2020-08-21T09:26:13
| 2020-08-21T09:26:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,478
|
r
|
houseman.R
|
# Adapted from https://gist.github.com/brentp/5058805
# Make a rank transformed methylation set
# Make a cell-count-adjusted and rank transformed methylation set
# These values will not be 0-1
source('resources/cellcounts/wbcInference-V112.R')
##
arguments <- commandArgs(T)
methylationfile <- arguments[1]
cellcountfile <- arguments[2]
rnmethdatafile <- arguments[3]
ccrnmethdatafile <- arguments[4]
ccrnsquaredmethdatafile <- arguments[5]
nthreads <- as.numeric(arguments[6])
message("Reading methylation data...")
load(methylationfile)
if(cellcountfile != "NULL")
{
cellcounts <- read.table(cellcountfile, he=T)
stopifnot(all(cellcounts$IID == colnames(norm.beta)))
cellcounts <- as.matrix(subset(cellcounts, select=-c(IID)))
}
# Get inverse rank transformed data, no cell count adjustment
dat <- inverse.rank.transform(norm.beta, nthreads)
write.table(round(dat, 3), file=rnmethdatafile, row=TRUE, col=TRUE, qu=FALSE, sep="\t")
# adjust for cell counts
if(cellcounts != "NULL")
{
dat <- adjust.beta(norm.beta, cellcounts, mc.cores=nthreads)
# and rank transformed data of cell countadjusted betas
dat <- inverse.rank.transform(dat)
write.table(round(dat, 3), file=ccrnmethdatafile, row=TRUE, col=TRUE, qu=FALSE, sep="\t")
} else {
system(paste("cp -v", rnmethdatafile, ccrnmethdatafile))
}
# Get squared z values of cell count adjusted data
dat <- dat^2
write.table(round(dat, 3), file=ccrnsquaredmethdatafile, row=TRUE, col=TRUE, qu=FALSE, sep="\t")
|
17077c1955bc82e2e43f609907f3bafdac03c81d
|
85147cd2189fdb0735a41e90a25799d594fbedfc
|
/Research Paper_Soichi/code/T5 with Tests.R
|
1f7e997c98541f153b59fc1b180683ae098d3a97
|
[] |
no_license
|
skajikaji/hello-world
|
7870f523fc35711327acd91a8b1119229b41e787
|
d0a7cff6e45556d953db1536f32652ccce89c952
|
refs/heads/master
| 2021-01-17T11:57:05.140970
| 2017-03-11T22:37:38
| 2017-03-11T22:37:38
| 84,053,205
| 0
| 0
| null | 2017-03-10T17:17:57
| 2017-03-06T09:15:28
| null |
UTF-8
|
R
| false
| false
| 5,373
|
r
|
T5 with Tests.R
|
#rm(list=c("D", "lag.fhpolrigaug", "lag.fhpolrigaug."))
require(plyr)
#T5.C1
TwoSLS=plm(fhpolrigaug~lag(lrgdpch)+factor(year)+factor(country)|.-lag(lrgdpch)+lag(nsave, 2)+factor(year)+factor(country), data=Five, index = c("code","year"),subset = sample =="1", model = "within", effect="individual")
#Extract observations used in Two stage least squaure regression model.
A = model.frame(TwoSLS)
#rename the variables
Five_inst = rename(A, c("factor(year)" = "year", "factor(country)" = "country"))
F_pols_inst = plm(fhpolrigaug ~ lag.lrgdpch.+factor(year), data = Five_inst,index = c("country","year"), model = "pooling")
summary(F_pols_inst)
#Test for multicollinearity by using pooled data
#VIF(Variance inflation factior)
vif(F_pols_inst)
#Breusch-Godfrey test
pbgtest(F_pols_inst)
#Breusch-Pagan test for homoskedastisity
plmtest(F_pols_inst, type="bp")
#T5.C2
F_folswod_inst = plm(fhpolrigaug ~ lag.lrgdpch.+factor(year)+factor(country), data = Five_inst, index = c("country","year"), model = "within", effect = "individual")
summary(F_folswod_inst)
#Breusch-Godfrey test
pbgtest(F_folswod_inst)
#Breusch-Pagan test for homoskedastisity
plmtest(F_folswod_inst, type="bp")
#T5.C3
#To make a observation data set used in column (5)
TwoSLS_1=plm(fhpolrigaug~lag(lrgdpch)+lag(fhpolrigaug)+factor(year)+factor(country)|.-lag(lrgdpch)+lag(nsave, 2)+factor(year)+factor(country), data=Five, index = c("code","year"),subset = sample =="1", model = "within", effect="individual")
#Extract observations used in Two stage least squaure regression model.
B = model.frame(TwoSLS_1)
#Rename the variables in data B
Five_inst_1 = rename(B, c("factor(year)" = "year", "factor(country)" = "country"))
F_fols_inst = plm(fhpolrigaug ~ lag.fhpolrigaug.+lag.lrgdpch.+factor(country)+factor(year), data = Five_inst_1, index = c("country","year"), model = "within", effect = "individual")
summary(F_fols_inst)
#Breusch-Godfrey test
pbgtest(F_fols_inst)
#Breusch-Pagan test for homoskedastisity
plmtest(F_fols_inst, type="bp")
#T5.C4
# First stage regression
Firststage_4=plm(lag(lrgdpch)~lag(nsave, 2)+factor(year)+factor(country), data = Five, subset = sample == "1", na.action = na.omit, index = c("code","year"), model = "within", effect="individual")
summary(Firststage_4)
# 2SLS regression by hand
#fit_lrgdpch=predict(Firststage)
#length(fit_lrgdpch)
#Five$fit_lrgdpch=predict(Firststage)
TwoSLS=plm(fhpolrigaug~lag(lrgdpch)+factor(year)+factor(country)|.-lag(lrgdpch)+lag(nsave, 2)+factor(year)+factor(country), data=Five, index = c("code","year"),subset = sample =="1", model = "within", effect="individual") # Second Stage: Regress outcome on x and fitted values from the first stage
summary(TwoSLS)
#Breusch-Godfrey test
pbgtest(TwoSLS)
#Breusch-Pagan test for homoskedastisity
plmtest(TwoSLS, type="bp")
#T5. C5
TwoSLS_5=plm(fhpolrigaug~lag(fhpolrigaug)+lag(lrgdpch)+factor(year)+factor(country)|.-lag(lrgdpch)+lag(fhpolrigaug)+lag(nsave, 2)+factor(year)+factor(country), data=Five, index = c("code","year"),subset = sample =="1", model = "within", effect="individual") # Second Stage: Regress outcome on x and fitted values from the first stage
summary(TwoSLS_5)
Firststage_5=plm(lag(lrgdpch)~lag(fhpolrigaug)+lag(nsave, 2)+factor(year)+factor(country), data = Five, subset = sample == "1", na.action = na.omit, index = c("code","year"), model = "within", effect="individual")
summary(Firststage_5)
#Breusch-Godfrey test
pbgtest(TwoSLS_5)
#Breusch-Pagan test for homoskedastisity
plmtest(TwoSLS_5, type="bp")
#T5. C7
TwoSLS_7=plm(fhpolrigaug~lag(lrgdpch)+lag(laborshare)+factor(year)+factor(country)|.-lag(lrgdpch)+lag(laborshare)+lag(nsave, 2)+factor(year)+factor(country), data=Five, index = c("code","year"),subset = sample =="1", model = "within", effect="individual") # Second Stage: Regress outcome on x and fitted values from the first stage
summary(TwoSLS_7)
#Breusch-Godfrey test
pbgtest(TwoSLS_7)
#Breusch-Pagan test for homoskedastisity
plmtest(TwoSLS_7, type="bp")
#T5. C8
TwoSLS_8=plm(fhpolrigaug~lag(fhpolrigaug, 1:3)+lag(lrgdpch)+factor(year)+factor(country)|.-lag(lrgdpch)+lag(fhpolrigaug)+lag(nsave, 2)+factor(year)+factor(country), data=Five, index = c("code","year"),subset = sample =="1", model = "within", effect="individual") # Second Stage: Regress outcome on x and fitted values from the first stage
summary(TwoSLS_8)
(R <- matrix(c(1,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0),byrow=TRUE,nrow=3))
(r <- c(0,0,0))
linearHypothesis(TwoSLS_8, hypothesis.matrix=R, rhs = r)
(R <- matrix(c(1,0,0,0,0,0,0,0,0,0,0), byrow=TRUE, nrow = 1))
(r = c(0))
linearHypothesis(TwoSLS_8, hypothesis.matrix=R, rhs = r)
#Breusch-Godfrey test
pbgtest(TwoSLS_8)
#Breusch-Pagan test for homoskedastisity
plmtest(TwoSLS_8, type="bp")
#T5. C9
TwoSLS_9=plm(fhpolrigaug~lag(lrgdpch)+factor(year)+factor(country)|.-lag(lrgdpch)+lag(nsave, 2:3)+factor(year)+factor(country), data=Five, index = c("code","year"),subset = sample =="1", model = "within", effect="individual") # Second Stage: Regress outcome on x and fitted values from the first stage
summary(TwoSLS_9)
#Breusch-Godfrey test
pbgtest(TwoSLS_9)
#Breusch-Pagan test for homoskedastisity
plmtest(TwoSLS_9, type="bp")
|
97c6773ddc2c47dc5ff6f61228f86bb70f944e1e
|
4aa3258f6d4299229ac4571a0f8a2f379725c787
|
/man/SlagPreprosess.Rd
|
fdc01f34b6879223de8a75ba01fcd3c044b0e67d
|
[] |
no_license
|
Rapporteket/Hjerneslag
|
63eb024fd4dd23e5c2ffb94626a0b0710b4ebb00
|
8e31d9c200b6f2abb7d0a843f870ccd812e92b80
|
refs/heads/rel
| 2020-04-12T09:37:44.384385
| 2019-09-05T09:03:33
| 2019-09-05T09:03:33
| 53,346,839
| 0
| 1
| null | 2016-08-23T07:11:04
| 2016-03-07T18:01:03
|
R
|
UTF-8
|
R
| false
| true
| 656
|
rd
|
SlagPreprosess.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/SlagPreprosesser.R
\name{SlagPreprosess}
\alias{SlagPreprosess}
\title{Preprosesser data fra Hjerneslag}
\usage{
SlagPreprosess(RegData = RegData, reshID = reshID)
}
\arguments{
\item{RegData}{En dataramme med alle nødvendige variabler fra registeret}
\item{reshID}{Parameter følger fra innlogging helseregister.no og angir
hvilken enhet i spesialisthelsetjenesten brukeren tilhører}
}
\value{
Data En list med det filtrerte datasettet og sykehusnavnet som tilsvarer reshID
}
\description{
Denne funksjonen definerer variabler og fjerner ikke-ferdigstilte registreringer
}
|
f69eadf31603551029197b2ec93d424125f0af9d
|
a82f3bbae551066b5c331b34abde9c3fe5596028
|
/man/decomp.Rd
|
200276256a2a72c078523fbd4c38aa90864aad90
|
[] |
no_license
|
xiangzhou09/rwrmed
|
cb69579e4d3b1e3f6439263a1b61b3bdfbc8edd6
|
9f80f387797a5964888410c3da0b56cb8ed26e47
|
refs/heads/master
| 2020-05-18T04:49:24.005141
| 2019-04-30T19:20:30
| 2019-04-30T19:20:30
| 184,185,920
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,684
|
rd
|
decomp.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/decomp.R
\name{decomp}
\alias{decomp}
\title{Causal Effect Decomposition Based on a Fitted \code{rwrmed} Model}
\usage{
decomp(object, a0 = 0, a1 = 1, m = 0, bootstrap = TRUE,
rep = 250)
}
\arguments{
\item{object}{An object of class \code{rwrmed}.}
\item{a0}{The baseline level of treatment.}
\item{a1}{The level of treatment to be contrasted with the baseline.}
\item{m}{The level of the mediator at which the CDE is evaluated.}
\item{bootstrap}{Whether to compute standard errors and 95% confidence intervals using the
nonparametric bootstrap.}
\item{rep}{Number of bootstrap replications if \code{bootstrap = TRUE}. Default is 250.}
}
\value{
A list of two elements.
\item{twocomp}{Two component decomposition of the rATE into rNDE and rNIE.}
\item{fourcomp}{Four component decomposition of the rATE into CDE, rINTREF, rPIE, and rINTMED.}
}
\description{
\code{decomp} is a function that implements causal effect decomposition based on a fitted
\code{rwrmed} model. It returns a two-component decomposition of the total effect into
the randomized interventional analogues of the natural direct effect (rNDE) and the natural
indirect effect (rNIE). It also returns a four-component decomposition of the total effect into
the controlled direct effect (CDE) and the randomized analogues of the reference interaction
effect (rINTREF), the mediated interaction effect (rINTMED), and the pure indirect effect (rPIE).
}
\seealso{
\code{\link{rwrmed}} for implementing the regression-with-residuals (RWR)
approach to causal mediation.
a0 = 0; a1 = 1; m = 0; bootstrap = TRUE; rep = 250
}
|
07080a3928f3fcdd8562d0b4d05c08bb478c9647
|
9b7fa6fd3ff07365a824b6e2725dfe6e7e6adb51
|
/plot3.R
|
ecd561c9e187fdbd24bb7f79122eb2edc18b6cd9
|
[] |
no_license
|
NancyChang/ExData_Project2
|
31a769f568a48ae9f5f2ef494d228de637b5d663
|
4e42621ba0491662370de8b68124c1b7576788af
|
refs/heads/master
| 2016-09-06T05:52:52.956148
| 2015-01-28T17:04:04
| 2015-01-28T17:04:04
| 29,081,451
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,332
|
r
|
plot3.R
|
#download zip file and unzip the files into working directory
temp <- tempfile()
download.file("https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2FNEI_data.zip",temp)
list.files(unzip(temp,exdir=".",overwrite=TRUE))
unlink(temp)
#check if the files exist and read the files into R
if (file.exists('./summarySCC_PM25.rds')) {
NEI <- readRDS("summarySCC_PM25.rds")
}
if (file.exists('./Source_Classification_Code.rds')) {
SCC <- readRDS("Source_Classification_Code.rds")
}
#extract data only in Baltimore and make the table for plot3 using ggplot2
library(ggplot2)
NEI_baltimore <- subset(NEI, NEI$fips == "24510")
#remove NA and infinite elements in the data frame
NEI_baltimore$logEmission<- na.omit(log(NEI_baltimore$Emissions))
NEI_baltimore$logEmission<- log(NEI_baltimore$Emissions)
#remove NA/NaN/Inf in 'y'axis for plotting
NEI_baltimore$logEmission[!is.finite(NEI_baltimore$logEmission)] <- 0
#plot the bar chart and save as png file
par(mar=c(4,4,4,4))
png(filename='project2_plots/plot3.png',width=480,height=480,units='px')
plot <- ggplot(NEI_baltimore,aes(year,logEmission))
plot + geom_point(aes(color=type),size=2) + geom_smooth(aes(color=type),method="lm")+labs(title="Comparing Emissions from Four Type
Sources in Baltimore City", size=6) +labs(x = "Year", y = expression("log"*PM[2.5]))
dev.off()
|
444c9bf888c71d5c2ea85880502473cb347f7202
|
4fc48d3f06d822656d4e52fc323be81ad939bff9
|
/R/io.R
|
3a2e91ef977f6bae00ace62b1b79e8f2ed8ccb00
|
[] |
no_license
|
assaron/r-utils
|
bb4b7592a6a33b0f67a03e4d1687dbf916397e05
|
91eab12fb69b2beed96b4c46ba9f9e25d440de9e
|
refs/heads/master
| 2020-05-22T04:23:05.733484
| 2018-10-27T12:39:24
| 2018-10-27T12:39:24
| 27,912,603
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,468
|
r
|
io.R
|
options(stringsAsFactors=F)
#' @export
read.table.smart <- function(path, ...) {
fields <- list(...)
conn <- file(path)
header <- readLines(conn, n=1)
close(conn)
sep <- "\t"
for (s in c("\t", " ", ",")) {
if (grepl(s, header)) {
sep <- s
break
}
}
res <- as.data.table(read.table(path, sep=sep, header=T, stringsAsFactors=F))
oldnames <- character(0)
newnames <- character(0)
for (field in names(fields)) {
if (field %in% colnames(res)) {
next
}
z <- na.omit(
match(
tolower(c(field, fields[[field]])),
tolower(colnames(res))))
if (length(z) == 0) {
next
}
oldnames <- c(oldnames, colnames(res)[z])
newnames <- c(newnames, field)
}
setnames(res, oldnames, newnames)
res
}
#' @export
read.tsv <- function(file, header=T, sep="\t", quote="", comment.char="", check.names=FALSE, ...) {
args <- list(...)
res <- read.table(file, header=header, sep=sep, quote=quote,
comment.char=comment.char, check.names=check.names,
stringsAsFactors=FALSE,
...)
if ((!"row.names" %in% names(args)) && (colnames(res)[1] == "")) {
rownames(res) <- res[, 1]
res[[1]] <- NULL
}
res
}
#' @export
write.tsv <- function(table, dir, file=NULL, gzip=FALSE, row.names=NA, col.names=NA, ...) {
name <- deparse(substitute(table))
table <- as.data.frame(table)
if (is.null(file)) {
file <- file.path(dir, paste0(name, ".tsv", if (gzip) ".gz"))
}
if (is.na(row.names)) {
row.names <- is.character(attr(table, "row.names"))
}
if (!row.names && is.na(col.names)) {
col.names=T
}
for (c in colnames(table)) {
if (is.character(table[[c]])) {
table[[c]] <- sub("#", "", table[[c]])
}
}
if (gzip) {
file <- gzfile(file, "w")
}
write.table(table, file, quote=F,
row.names=row.names, col.names=col.names, sep="\t")
if (gzip) {
close(file)
}
}
#' Reads gtf file to data.table
#' @param file Path to file
#' @import data.table
#' @export
read.gtf <- function(file, attrs.to.extract=c("gene_id", "transcript_id", "gene_type", "gene_name"),
features.to.extract=NULL) {
res <- fread(file, header=F, col.names = c("chr", "source", "feature", "start", "end", "score", "strand", "frame", "attribute"))
if (!is.null(features.to.extract)) {
res <- res[feature %in% features.to.extract,]
}
attrlist <- strsplit(res$attribute, "; *")
attrlist_length <- sapply(attrlist, length)
attrtable <- data.table(rn=rep(seq_along(attrlist), attrlist_length), raw=unlist(attrlist))
attrtable[, name := gsub(" .*", "", raw)]
attrtable[, value := gsub(".* ", "", raw)]
attrtable[, value := gsub('^"(.*)"$', "\\1", value)]
attrtable[, raw := NULL]
all_attrs <- unique(attrtable$name)
attrmatrix <- matrix(nrow = length(attrlist), ncol=length(all_attrs))
colnames(attrmatrix) <- all_attrs
attrtable[, name := match(name, all_attrs)]
attrmatrix[cbind(attrtable$rn, attrtable$name)] <- attrtable$value
res[, attribute := NULL]
res <- cbind(res, attrmatrix)
res
}
|
af5d21321e96cf43a872a5f8dcadc4cf55d7cdf2
|
9363049a3b860e7237a54131e8e4e33ab571a6b2
|
/app_project/ui.R
|
1747a8dddf83e010a09b2a0299a21e2f1e6e054b
|
[] |
no_license
|
travs/datasciencecoursera
|
0c08ad301bc721f6b14d3a343627cfdd13b96fe8
|
7757aeb4d2e65e132e66f3e95289c9cca045fe50
|
refs/heads/master
| 2021-01-20T02:20:21.309902
| 2014-09-21T21:17:18
| 2014-09-21T21:17:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 726
|
r
|
ui.R
|
library(shiny)
shinyUI(pageWithSidebar(
headerPanel("Population of Newfoundland"),
sidebarPanel(
p("Click on a gender, select a year on the slider and watch the plot change!"),
p("The graph on the right represents the population of Newfoundland across age groups."),
radioButtons("gen", "Gender:",
choices=list("Male" = "Male",
"Female" = "Female",
"Total" = "Total"),
selected="Total"
),
sliderInput("y",
"Year:",
value = 2012,
min = 2010,
max = 2012,
format = "####")
),
mainPanel(
plotOutput("pop_plot")
)
))
|
f5b76ae9fc548776335f971df5cf63dabb1c5fe1
|
d4aa0c3d335e7a058936140a489f8192de8fd06e
|
/plot2.R
|
efb1091315bb93050d7b2aa178725d03153d2a2e
|
[] |
no_license
|
gaelberon/ExploData-Week4-ProgrammingAssignment
|
c87825803db293cda5b3cc2d8778d53aea35b82d
|
e931989eb4c3f49f5c73978531ad836c21e96d39
|
refs/heads/master
| 2021-01-19T05:44:45.654238
| 2017-08-17T08:44:15
| 2017-08-17T08:44:15
| 100,581,443
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,905
|
r
|
plot2.R
|
# COURSERA - DATA SCIENCE
# EXPLORATORY DATA - WEEK 4
# PROGRAMMING ASSIGNMENT
## The R file 'plot2.R' aims to answer the following question:
## "Have total emissions from PM2.5 decreased in the Baltimore City, Maryland
## (fips == "24510") from 1999 to 2008?"
## summarySCC_PM25.rds (PM2.5 Emissions Data): This file contains a data frame
## with all of the PM2.5 emissions data for 1999, 2002, 2005, and 2008.
## For each year, the table contains number of tons of PM2.5 emitted from a
## specific type of source for the entire year. Here are the first few rows
##
## List of variables:
## fips: A five-digit number (represented as a string) indicating the U.S.
## county
## SCC: The name of the source as indicated by a digit string (see source code
## classification table)
## Pollutant: A string indicating the pollutant
## Emissions: Amount of PM2.5 emitted, in tons
## type: The type of source (point, non-point, on-road, or non-road)
## year: The year of emissions recorded
## Source_Classification_Code.rds (Source Classification Code Table): This table
## provides a mapping from the SCC digit strings in the Emissions table to the
## actual name of the PM2.5 source. The sources are categorized in a few
## different ways from more general to more specific and you may choose to
## explore whatever categories you think are most useful. For example, source
## “10100101” is known as “Ext Comb /Electric Gen /Anthracite Coal /Pulverized
## Coal”.
################################################################################
## 1. Read the input files
## NB. This first line will likely take a few seconds. Be patient!
NEI <- readRDS("summarySCC_PM25.rds")
#SCC <- readRDS("Source_Classification_Code.rds")
################################################################################
## 2. Produce a barplot to answer the question:
## "Have total emissions from PM2.5 decreased in the Baltimore City, Maryland
## (fips == "24510") from 1999 to 2008?
## Use the base plotting system to make a plot answering this question."
## Compute dataset for Baltimore
sumPerYearBaltimore <- with(NEI[NEI$fips == "24510", ],
tapply(Emissions, year, sum, na.rm = TRUE))
## Initialize a plotting area with 1 row and 1 column
par(mfrow = c(1,1))
## Plot dataset for Baltimore
barplot(height = sumPerYearBaltimore,
names.arg = names(sumPerYearBaltimore),
main = "PM2.5 Emissions in Baltimore, Maryland",
xlab = "Year",
ylab = "Amount of PM2.5 emitted (tons)")
################################################################################
## 3. Store the plot into the file plot2.png
dev.copy(png,
file = 'plot2.png',
width = 480,
height = 480,
units = "px")
########################################################################
## 4. Close the 'png file' device
dev.off()
|
41a46cad90dc783e4d0a9969d6900ab9adbbdf3b
|
5a7f7ebee0e458863e1da9d2a0fcc93b600d1786
|
/man/getName.environment.Rd
|
261abb246bed579e28f20496546e6539a6f6c1f1
|
[] |
no_license
|
HenrikBengtsson/R.oo
|
68071bacb43afe2a46201aea0350a3597ee19e6c
|
4101a141b2fa49a43a10df99f56c180ba2c662e6
|
refs/heads/master
| 2023-01-06T23:48:54.872999
| 2022-06-12T18:04:23
| 2022-06-12T18:04:23
| 19,437,907
| 20
| 1
| null | 2018-05-02T04:51:57
| 2014-05-04T22:47:54
|
R
|
UTF-8
|
R
| false
| false
| 958
|
rd
|
getName.environment.Rd
|
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Do not modify this file since it was automatically generated from:
%
% getName.environment.R
%
% by the Rdoc compiler part of the R.oo package.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\name{getName.environment}
\alias{getName.environment}
\title{Gets the name of an environment}
\description{
Gets the name of an environment, e.g. \code{"R_GlobalEnv"} or \code{"0x01ddd060"}.
}
\usage{
\method{getName}{environment}(env, ...)
}
\arguments{
\item{env}{An \code{\link[base]{environment}}.}
\item{...}{Not used.}
}
\value{
Returns a \code{\link[base]{character}} string.
}
\examples{
name <- getName(globalenv())
print(name)
stopifnot(identical(name, "R_GlobalEnv"))
getName(new.env())
}
\author{Henrik Bengtsson}
\seealso{
\code{\link[base:environment]{environmentName}()}.
}
\keyword{programming}
\keyword{methods}
|
71c44173c1cfd701c6a47a6768c1b661e734ebd4
|
ecc14c4cf0997cfeebcf18375cb6b41eec27e842
|
/R/predict.knn.R
|
8dfc9f815c1a68b86c24ee06b9ffcc65d0a89a0b
|
[] |
no_license
|
cran/knnTree
|
f5fb8d7f2306f9713090b201dda974070024794c
|
7ee46eb731e119379369c49400e4f63274d294ae
|
refs/heads/master
| 2021-01-01T05:39:15.332980
| 2008-05-06T00:00:00
| 2008-05-06T00:00:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,149
|
r
|
predict.knn.R
|
predict.knn <- function(object, test, train, theyre.the.same = FALSE, return.classifications = FALSE,
verbose = 0, ...)
{
#
# predict.knn: predict from an object with class "knn."
#
# Arguments: object: object of class knn.
# test: Items to be predicted
# train: Things to use to do prediction
# theyre.the.same: Are test and train the same? Then use leave-one-out cv
# return.classifcations: If true, return the classifications of the test set
# verbose: Level of verbosity
#
# First check to see whether this training set is pure. If so this is easy: if
# theyre the same, the error rate is zero; if not it's the proportion of test set
# items whose classes are different from the training set's unique class. In either
# class every item's classification is the training set one.
#
if(all(object$which == FALSE) || (any(names(object) == "pure") && object$
pure == TRUE) || length(unique(train[, 1])) == 1) {
train.11 <- as.character(as.vector(train[1, 1]))
if(theyre.the.same) {
if(return.classifications)
return(list(rate = 0, classifications = rep(
train.11, nrow(train))))
else return(list(rate = 0))
}
else if(return.classifications) {
return(list(rate = mean(test[, 1] != train[1, 1]),
classifications = rep(train.11, nrow(test))))
}
else return(list(rate = mean(test[, 1] != train[1, 1])))
}
#
# Grab the true classes of the training set from column 1. If that column is a factor,
# save the levels and then convert that column to numeric.
#
if(is.factor(train[, 1])) {
class.is.factor <- TRUE
labels <- levels(train[, 1])
train[, 1] <- as.integer(unclass(train[, 1])) - 1
}
else {
class.is.factor <- FALSE
labels <- unique(train[, 1])
}
train <- as.matrix(train)
if(theyre.the.same && !missing(train)) {
warning("They're the same, so I'm ignoring the 'test' argument"
)
test <- 0
}
else {
if(is.factor(test[, 1])) {
class.is.factor <- TRUE
labels <- levels(test[, 1])
test[, 1] <- as.integer(unclass(test[, 1])) - 1
}
else {
class.is.factor <- FALSE
labels <- unique(test[, 1])
}
test <- as.matrix(test)
}
number.of.classes <- length(labels)
k.vec <- object$best.k
k.length <- 1
return.all.rates <- 0
best.error.rate <- 1.1
best.k.index <- -1 # Will come back zero-based, so add 1
which <- c(TRUE, object$which)
scaled <- object$scaled
col.sds <- object$col.sds
if(return.classifications == TRUE)
classifications <- numeric(nrow(test))
else classifications <- 0
backward <- 0
status <- 1 #
#
# Get "filename" (which will only be used if verbose > 0)
#
pr <- Sys.getenv("R_HOME")
pr.chars <- substring(pr, 1:nchar(pr), 1:nchar(pr))
backslash <- pr.chars == "\\"
pr.chars[backslash] <- "/"
pr <- paste(pr.chars, collapse = "")
filename <- paste(pr, "/status.txt", sep = "") #
#
# Call the DLL, and save the result.
#
thang <- .C("knnvar",
as.double(train),
as.integer(c(nrow(train), ncol(train))),
as.double(test),
as.integer(c(nrow(test), ncol(test))),
as.integer(number.of.classes),
as.integer(k.vec),
as.integer(k.length),
as.integer(theyre.the.same),
rate = as.double(best.error.rate),
as.integer(return.all.rates),
best.k.index = as.integer(best.k.index),
which = as.double(which),
scaled = as.integer(scaled),
col.sds = as.double(col.sds),
as.integer(return.classifications),
classifications = as.integer(classifications),
backward = as.integer(backward),
as.integer (0),
as.integer(verbose),
filename,
status = as.integer(status), PACKAGE="knnTree") #
# Produce the output list, which contains the rate plus, if return.classifications
# is true, a vector of predictions. Factorize if necessary.
#
status <- thang$status
if(status != 0)
warning(paste("Uh-oh; bad status", status, "returned"))
if(return.classifications) {
if(class.is.factor)
classes <- factor(thang$classifications, labels =
labels, levels = 0:(length(labels) - 1))
else classes <- thang$classifications
out <- list(rate = thang$rate, classifications = classes)
}
else out <- list(rate = thang$rate) #
return(out)
}
|
ad5696080a1854dd729be0093ab4635e56d4175a
|
cf0d12010f7863fd6ac85c370ef6f7318eec54bd
|
/rankhospital.R
|
4a0c712548f1c9181d488ce98d9f93f121ec9eb4
|
[] |
no_license
|
jorgenquaade/ProgAssignment3
|
278430ce760f72cdfa58eca2d98ac7b346934810
|
fc3cf9b10cbe547e685287425e86b2a16dc509a7
|
refs/heads/master
| 2021-01-10T09:20:36.253103
| 2015-10-02T10:09:38
| 2015-10-02T10:09:38
| 43,416,663
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,846
|
r
|
rankhospital.R
|
## Rank hospitals according to deathrate and return specific rank
## For readability a lot of the statements are broken into multiple lines
rankhospital <- function(state, outcome, num = "best") {
## Initialize parameters we want to be sure are dataframes
inputData <- data.frame()
stateData <- data.frame()
rankedData <- data.frame()
outputdata<-data.frame()
rank<-0
numhosp<-0
## Read the input data
inputData <- read.csv("outcome-of-care-measures.csv",
na.strings = c("Not Available"), colClasses = "character")
## Check that state and outcome are valid
if (!state %in% inputData[,7]) stop("invalid state")
if (outcome == "heart attack")
colnum<-11
else if (outcome == "heart failure")
colnum<-17
else if (outcome == "pneumonia")
colnum<-23
else
stop("invalid outcome")
## get the number of hospitals in state
## Subset data according to input parameter state
stateData <- subset(inputData, inputData[,7]==state)
## Now stateData needs to be ordered by rank after mortality rates
## but first mortality rates must be converted to numerics and NA's removed
## Using <=100 is to cheat subset into accepting a logical statement since
## rate cannot exceed 100 percent
stateData[,colnum] <- as.numeric(stateData[,colnum])
## Order the rankedData after rate and hospitalname so that we can return the
## hospital with the name that comes first alphabetically
outputData <- stateData[order(stateData[,colnum],stateData[,2]),]
numhosp <- length(outputData[,2])
if (num == "best"){
outputData <- outputData[1,2]
}
else if (num == "worst"){
outputData <- stateData[order(stateData[,colnum], stateData[,2], decreasing = TRUE),]
outputData <- outputData[1,2]
}
else if ( num > 0 & num <= numhosp){
rank <- num
outputData<-outputData[rank,2]
}
else
outputData<-NA
outputData
}
|
dd519b786c65b4e1ca07930206bec136f2249ccc
|
27d6a145454a231f1f95560cbbbd2e28177c2ab9
|
/arules-analysis.R
|
ab8dee0c1c18d9451ef518b47110667bf8b008c4
|
[] |
no_license
|
TuxML/compilation-analysis
|
d5bb76d1d9dcfde3d2a6bf4c8d99e90b1d79643c
|
e6105852719f11fbb8466c25396813dec3ca6278
|
refs/heads/master
| 2021-09-26T02:08:57.828119
| 2021-09-20T09:30:19
| 2021-09-20T09:30:19
| 179,365,612
| 0
| 3
| null | 2019-04-19T17:15:33
| 2019-04-03T20:30:49
|
Jupyter Notebook
|
UTF-8
|
R
| false
| false
| 1,033
|
r
|
arules-analysis.R
|
library(readr)
library(arules)
library(dplyr)
df <- read_csv('/Users/macher1/Documents/SANDBOX/compilation-analysis/dataset_after_encoding.csv',
col_types = list(
OPENVSWITCH = col_factor(c("0", "1", "2"))
))
# df <- read_csv('/Users/macher1/Documents/SANDBOX/tuxml-datasets/config_bdd40000-60000.csv')
#df <- read_csv('/Users/macher1/Documents/SANDBOX/tuxml-datasets/config_bdd60000-90000.csv')
df <- Filter(function(x)(length(unique(x))>1), df)
# df['compilation_failure'] <- df['vmlinux'] == -1
#df2 <- Filter(function(x)(Negate(is.numeric(x))), df1)
# df <- df %>% dplyr::select_if(Negate(is.numeric))
# df['date'] <- NULL
rules <- apriori (data=df, parameter=list (supp=0.0000000001, conf = 1.0)) #, appearance = list (default="lhs", rhs="compilation_failure"), control = list (verbose=F))
# rules_conf <- sort (rules, by="support", decreasing=TRUE)
rules_failure <- subset(rules, subset = rhs %in% "compile_success")
tristate <- c(0, 1, 2)
parse_factor(df, levels = tristate)
|
63f0a439047b29934acd83eb94cc6345b7e9570c
|
5ea0c9249859bf4aa5222ffbf6eb6ff3ccec8de0
|
/R/fit_house.R
|
38ab16b50b0fe7a876ae8c2156e6abc58f59a97a
|
[] |
no_license
|
alexpavlakis/yapa
|
287eabfc7a1d01f781233794f1c8d98cd22b36c1
|
ac91bba669a481295185eec68de0e54d0521599b
|
refs/heads/master
| 2021-03-28T16:49:16.491998
| 2020-11-02T20:28:40
| 2020-11-02T20:28:40
| 247,876,792
| 0
| 1
| null | 2020-03-21T00:49:00
| 2020-03-17T04:10:02
|
HTML
|
UTF-8
|
R
| false
| false
| 10,017
|
r
|
fit_house.R
|
library(tidyverse)
library(rstan)
options(mc.cores = parallel::detectCores())
rstan_options(auto_write = TRUE)
source("R/process_polls.R")
#source("R/house_data.R")
prior_data <- read_csv("data/prior_data.csv")
gen_res <- read_csv("data/gen_res.csv")
if(!exists("exec_date")) exec_date <- Sys.Date()
generic_ballot <- process_538_gb() %>%
filter(end_date <= exec_date,
end_date > '2020-01-01')
house_races <- process_538_house() %>%
filter(end_date <= exec_date,
end_date > '2020-01-01') %>%
distinct()
write_csv(house_races, "data/house_races.csv")
district_polls <- house_races %>%
#mutate(dem = round(dem + .4*Other), rep = round(rep + .4*Other),
# Other = round(0.2*Other)) %>%
right_join(prior_data %>%
distinct(state, district)) %>%
mutate(state_district = paste(state, district, sep = "-")) %>%
arrange(state, district) %>%
mutate(district_id = match(state_district, unique(state_district)),
days_out = ifelse(is.na(days_out), 365, days_out))
state_district <- unique(district_polls$state_district)
lean <- prior_data %>%
select(-inc) %>%
spread(party, p_lean1) %>%
mutate(state_district = paste(state, district, sep = "-")) %>%
arrange(state, district) %>%
mutate(district_id = match(state_district, unique(state_district))) %>%
select(republican, democrat, other) %>%
mutate(democrat = ifelse(is.na(democrat), -gen_res$gen_t[gen_res$party == 'democrat'], democrat),
republican = ifelse(is.na(republican), -gen_res$gen_t[gen_res$party == 'republican'], republican),
other = ifelse(is.na(other), -gen_res$gen_t[gen_res$party == 'other'], other)) %>%
as.matrix()
# Counts for each option in each district poll
y_r <- district_polls %>%
select(rep, dem, Other) %>%
as.matrix() %>%
apply(., 2, function(x) ifelse(is.na(x), 0, x))
# Counts for each option in each GB poll
y_g <- generic_ballot %>%
#mutate(dem = round(dem + .4*Other), rep = round(rep + .4*Other),
# Other = round(0.2*Other)) %>%
select(rep, dem, Other) %>%
as.matrix() %>%
apply(., 2, function(x) ifelse(is.na(x), 0, x))
# Number of district polls
n_polls_r <- nrow(y_r)
# Number of GB polls
n_polls_g <- nrow(y_g)
# Number of candidates
n_options <- ncol(y_g)
# Days out from election (for weighting)
days_out_r <- as.numeric(district_polls$days_out)
days_out_g <- as.numeric(generic_ballot$days_out)
region_id <- district_polls$district_id
n_regions <- n_distinct(region_id)
load('data/house_error')
load('data/house_corr')
region_error <- rep(0.01, n_regions)
# Combine into list
model_data <- list(n_options = n_options,
n_regions = n_regions,
n_options = n_options,
n_polls_r = n_polls_r,
n_polls_g = n_polls_g,
y_g = y_g, y_r = y_r,
region_id = region_id,
lean = lean,
days_out_g = days_out_g,
days_out_r = days_out_r,
non_samp_error = house_error,
non_samp_corr = house_corr,
region_error = region_error,
decay_param = 40,
prior_g = c(0.45, 0.53, 0.02),
prior_sd_g = c(0.01, 0.01, 0.01))
#start <- Sys.time()
fit_house <- stan("stan/yapa.stan", data = model_data,
chains = 10, iter = 2000)
#print(Sys.time() - start)
# results -----------------------------------------------------------------
ef_house <- extract(fit_house)
colMeans(ef_house$res_g)
# Poll averages -----------------------------------------------------------
# National
poll_averages_gb_today <- data_frame(
date = exec_date,
lower_rep = quantile(ef_house$res_g[, 1], 0.1),
mean_rep = quantile(ef_house$res_g[, 1], 0.5),
upper_rep = quantile(ef_house$res_g[, 1], 0.9),
lower_dem = quantile(ef_house$res_g[, 2], 0.1),
mean_dem = quantile(ef_house$res_g[, 2], 0.5),
upper_dem = quantile(ef_house$res_g[, 2], 0.9),
lower_other = quantile(ef_house$res_g[, 3], 0.1),
mean_other = quantile(ef_house$res_g[, 3], 0.5),
upper_other = quantile(ef_house$res_g[, 3], 0.9)
) %>%
gather(metric, value, -date) %>%
mutate(candidate = sapply(strsplit(metric, "_"), tail, 1),
metric = sapply(strsplit(metric, "_"), head, 1)) %>%
spread(metric, value)
# Append to tracking data
poll_averages_gb <- read_csv("results/poll_averages_gb.csv")
poll_averages_gb <- poll_averages_gb %>%
filter(date != exec_date) %>%
rbind(poll_averages_gb_today)
write_csv(poll_averages_gb, "results/poll_averages_gb.csv")
# District
tmp_district <- vector("list", nrow(colMeans(ef_house$res_r)))
for(s in 1:length(tmp_district)) {
tmp_district[[s]] <- data_frame(
date = exec_date,
district = unique(district_polls$state_district)[s],
lower_rep = quantile(ef_house$res_r[, s, 1], 0.1),
mean_rep = quantile(ef_house$res_r[, s, 1], 0.5),
upper_rep = quantile(ef_house$res_r[, s, 1], 0.9),
lower_dem = quantile(ef_house$res_r[, s, 2], 0.1),
mean_dem = quantile(ef_house$res_r[, s, 2], 0.5),
upper_dem = quantile(ef_house$res_r[, s, 2], 0.9),
lower_other = quantile(ef_house$res_r[, s, 3], 0.1),
mean_other = quantile(ef_house$res_r[, s, 3], 0.5),
upper_other = quantile(ef_house$res_r[, s, 3], 0.9)
)
}
district_averages_today <- do.call(rbind, tmp_district) %>%
gather(metric, value, -date, -district) %>%
mutate(candidate = sapply(strsplit(metric, "_"), tail, 1),
metric = sapply(strsplit(metric, "_"), head, 1)) %>%
spread(metric, value)
# Append to tracking data
district_averages <- read_csv("results/district_averages.csv")
district_averages <- district_averages %>%
filter(date != exec_date) %>%
rbind(district_averages_today)
write_csv(district_averages, "results/district_averages.csv")
# Simulated results -------------------------------------------------------
# State
# Results
means_rep <- apply(ef_house$res_r, 2, function(x) mean(x[, 1]))
quantiles_rep <- apply(ef_house$res_r, 2, function(x) quantile(x[, 1], c(0.1, 0.9)))
means_dem <- apply(ef_house$res_r, 2, function(x) mean(x[, 2]))
quantiles_dem <- apply(ef_house$res_r, 2, function(x) quantile(x[, 2], c(0.1, 0.9)))
results_dem <- data_frame(
district = state_district,
lower = quantiles_dem[1, ],
mean = means_dem,
upper = quantiles_dem[2, ],
cand = 'dem')
results_rep <- data_frame(
district = state_district,
lower = quantiles_rep[1, ],
mean = means_rep,
upper = quantiles_rep[2, ],
cand = 'rep')
# Save
save(results_dem, file = "results/res_r_dem")
save(results_rep, file = "results/res_r_rep")
# Formatted Table
district_results <- results_dem %>%
rename(`Lower Dem` = lower,
`Upper Dem` = upper,
`Mean Dem` = mean) %>%
select(-cand) %>%
left_join(results_rep %>%
rename(`Lower Rep` = lower,
`Upper Rep` = upper,
`Mean Rep` = mean) %>%
select(-cand)) %>%
rename(District = district) %>%
mutate_if(is.numeric, function(x) paste0(round(x*100), "%"))
save(district_results, file = "results/district_results")
# P-win --------------------------------------------------------------------
# Probability of winning the district
p_dem <- round(apply(ef_house$res_r, 2, function(x) mean(x[, 2] > x[, 1])), 3)
names(p_dem) <- state_district
p_dem <- data.frame(p_dem) %>%
tibble::rownames_to_column("district") %>%
mutate(state = str_split(district, '-')[[1]][1],
no = str_split(district, '-')[[1]][2]) %>%
arrange(state, no)
save(p_dem, file = "results/district_p_dem")
# Simulate electoral college ----------------------------------------------
res_sims <- matrix(0, nrow = dim(ef_house$res_r)[1], ncol = dim(ef_house$res_r)[3])
for(i in 1:dim(ef_house$res_r)[1]) {
winner <- apply(ef_house$res_r[i, , ], 1, function(x) which(x == max(x)))
for(s in 1:dim(ef_house$res_r)[2]) {
res_sims[i, winner[s]] <- res_sims[i, winner[s]] + 1
}
}
save(res_sims, file = "results/senate_res_sims")
# Create data frame of results for tracking
house_ts_today <- data_frame(
date = exec_date,
lower_rep = quantile(res_sims[, 1], 0.05),
mean_rep = mean(res_sims[, 1]),
upper_rep = quantile(res_sims[, 1], 0.95),
lower_dem = quantile(res_sims[, 2], 0.05),
mean_dem = mean(res_sims[, 2]),
upper_dem = quantile(res_sims[, 2], 0.95)
)
# Append to tracking data
house_ts <- read_csv("results/house_ts.csv")
house_ts <- house_ts %>%
filter(date != exec_date) %>%
rbind(house_ts_today)
write_csv(house_ts, "results/house_ts.csv")
# State simulations -------------------------------------------------------
house_simulations <- data_frame(
value = round(c(c(ef_house$res_r[, , 1]), c(ef_house$res_r[, , 2]), c(ef_house$res_r[, , 3])), 3),
state = rep(rep(state_district, each = dim(ef_house$res_r)[1]), times = 3),
party = rep(c("rep", "dem", "other"), each = dim(ef_house$res_r)[1]*n_regions)
) %>%
group_by(state, party) %>%
mutate(mean = round(mean(value), 3)) %>%
ungroup() %>%
arrange(state)
save(house_simulations, file = "results/house_simulations")
tmp_district <- vector("list", n_distinct(state_district))
for(d in 1:length(tmp_district)) {
tmp_district[[d]] <- data_frame(
date = exec_date,
district = state_district[d],
lower_rep = quantile(ef_house$res_r[, d, 1], 0.1),
mean_rep = quantile(ef_house$res_r[, d, 1], 0.5),
upper_rep = quantile(ef_house$res_r[, d, 1], 0.9),
lower_dem = quantile(ef_house$res_r[, d, 2], 0.1),
mean_dem = quantile(ef_house$res_r[, d, 2], 0.5),
upper_dem = quantile(ef_house$res_r[, d, 2], 0.9)
)
}
district_ts_today <- do.call(rbind, tmp_district)
# Append to state tracking data
district_ts <- read_csv("results/district_ts.csv")
district_ts <- district_ts %>%
filter(date != exec_date) %>%
rbind(district_ts_today)
write_csv(district_ts, "results/district_ts.csv")
|
0e55d81e21733bd70d7df1f74de33ad894573d38
|
348fa0dc8997772fb008e81fcfbd2f76275a1aa0
|
/R_02.R
|
b2b6b852ee77e4ceceedb32bb1c69ee92150daec
|
[] |
no_license
|
LiamMJ/15
|
02ac0471298836327c6e83401bd6c05747d41e1a
|
4be64283f05dddbed98078ce005041c5705df81f
|
refs/heads/master
| 2020-03-27T23:36:48.673171
| 2018-09-04T12:44:40
| 2018-09-04T12:44:40
| 147,332,627
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,542
|
r
|
R_02.R
|
# 15-2. 변수 타입
# 변수에는 여러 가지 타입(Type, 속성)이 있음
# • 함수에 따라 적용 가능한 변수 타입 다름
# • 분석 전에 변수 타입이 무엇인지 확인 필요
# • 함수 실행했을 때 오류 발생 또는 예상과 다른 결과가 출력되면 변수 타입 확인 후 함수에 맞게 변경
# • 1. 연속 변수(Continuous Variable) - Numeric 타입
# – 값이 연속적이고 크기를 의미
# – 더하기 빼기, 평균 구하기 등 산술 가능
# – ex) 키, 몸무게, 소득
# • 2. 범주 변수(Categorical Variable) - Factor 타입
# – 값이 대상을 분류하는 의미를 지님
# – 산술 불가능
# – ex) 성별, 거주지
# 변수 Data Type 예
# --------------------------------------------------------------------------
# 연속 변수 Numeric 키(..., 151, 152, ...), 몸무게(..., 58, 59, ...)
# 범주 변수 Factor 성별(1, 2), 지역(1, 2, 3, 4)
# 변수 타입 간 차이 알아보기
var1 = c(1,2,3,1,2) # numeric 변수 생성
var2 = factor(c(1,2,3,1,2)) # factor 변수 생성
var1 # numeric 변수 출력
var2 # factor 변수 출력
var1 +2 # numeric 변수로 연산
var2 +2 # factor 변수로 연산
# 변수 타입 확인하기
class(var1)
class(var2)
# factor 변수의 구성 범주 확인하기
levels(var1)
levels(var2)
# 문자로 구성된 factor 변수
var3 = c("a", "b", "b", "c") # 문자 변수 생성
var4 = factor(c("a", "b", "b", "c")) # 문자로 된 factor 변수 생성
var3
var4
class(var3)
class(var4)
# 함수마다 적용 가능한 변수 타입이 다르다
mean(var1)
mean(var2)
# 변수 타입 바꾸기
var2 = as.numeric(var2) # numeric 타입으로 변환
mean(var2) # 함수 재적용
class(var2) # 타입 확인
levels(var2) # 범주 확인
# 변환 함수(Coercion Function)
# 함수 기능
# --------------------------------------
# as.numeric() numeric으로 변환
# as.factor() factor로 변환
# as.character() character로 변환
# as.Date() Date로 변환
# as.data.frame() Data Frame으로 변환
# 혼자서 해보기
# • Q1. drv 변수의 타입을 확인해 보세요.
class(mpg$drv)
# • Q2. drv 변수를 as.factor()를 이용해 factor 타입으로 변환한 후 다시 타입을 확인해 보세요.
as.factor(mpg$drv)
class(mpg$drv)
# • Q3. drv가 어떤 범주로 구성되는지 확인해 보세요.
levels(mpg$drv)
|
4c74c0a3e92b17c231529f60034e3674ec9b69a4
|
4838ff30f4cf7fc1d6a268a1f28f122dea90d056
|
/man/PopFlyAnalysis.Rd
|
352b9f31f8828bf49e8806b760d5f98d4515c00e
|
[] |
no_license
|
BGD-UAB/iMKT
|
319ff35016ba54108fe1dc3ce64e90e623b733b0
|
1fb62426f850c4423c042f027fd9fb844f56304b
|
refs/heads/master
| 2021-06-15T23:20:46.913981
| 2021-02-15T12:07:05
| 2021-02-15T12:07:05
| 144,316,815
| 8
| 2
| null | 2019-04-29T13:25:01
| 2018-08-10T17:51:58
|
R
|
UTF-8
|
R
| false
| true
| 2,208
|
rd
|
PopFlyAnalysis.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/PopFlyAnalysis.R
\name{PopFlyAnalysis}
\alias{PopFlyAnalysis}
\title{iMKT using PopFly data}
\usage{
PopFlyAnalysis(
genes = c("gene1", "gene2", "..."),
pops = c("pop1", "pop2", "..."),
cutoff = 0.05,
recomb = TRUE/FALSE,
bins = 0,
test = c("standardMKT", "imputedMKT", "FWW", "aMKT"),
xlow = 0,
xhigh = 1,
plot = FALSE
)
}
\arguments{
\item{genes}{list of genes to analyze}
\item{pops}{list of populations to analyze}
\item{recomb}{group genes according to recombination values (TRUE/FALSE)}
\item{bins}{number of recombination bins to compute (mandatory if recomb=TRUE)}
\item{test}{which test to perform. Options include: standardMKT (default), imputedMKT, FWW, aMKT}
\item{xlow}{lower limit for asymptotic alpha fit (default=0)}
\item{xhigh}{higher limit for asymptotic alpha fit (default=1)}
\item{plot}{report plot (optional). Default is FALSE}
\item{cutoffs}{list of cutofs to perform FWW and/or imputedMKT}
}
\value{
List of lists with the default test output for each selected population (and recombination bin when defined)
}
\description{
Perform any MKT method using a subset of PopFly data defined by custom genes and populations lists
}
\details{
Execute any MKT method (standardMKT, FWW, imputedMKT, aMKT) using a subset of PopFly data defined by custom genes and populations lists. It uses the dataframe PopFlyData, which can be already loaded in the workspace (using loadPopFly()) or is directly loaded when executing this function. It also allows deciding whether to analyze genes groupped by recombination bins or not, using recombination rate estimates from Comeron et al. 2012 Plos Genetics.
}
\examples{
## List of genes
mygenes = c('FBgn0053196', 'FBgn0086906', 'FBgn0261836', 'FBgn0031617',
'FBgn0260965', 'FBgn0028899', 'FBgn0052580', 'FBgn0036181',
'FBgn0263077', 'FBgn0013733', 'FBgn0031857', 'FBgn0037836')
## Perform analyses
PopFlyAnalysis(genes=mygenes, pops='RAL', recomb=FALSE, test='aMKT', xlow=0, xhigh=0.9, plot=TRUE)
PopFlyAnalysis(genes=mygenes, pops=c('RAL','ZI'), recomb=TRUE, bins=3, test='imputedMKT', plot=FALSE)
}
\keyword{PopData}
|
3768a9fed94feeca80ac5d878a24a04a063ad9dd
|
6baf4bcae9d076f640f622f5bff282c2c9c13b67
|
/mrsb_annotate_orthology_2015-07-10.R
|
3834c59630bf1f2d018e1e6e7a33795e6f38c2d7
|
[] |
no_license
|
msaul/three_species_orthology
|
b7f6a0aa34e0e0603ca95f525b056717dfdd2a05
|
03e5f9dadda721c12d6b9fbe27aabb86bfdc430c
|
refs/heads/master
| 2021-09-03T22:46:06.557511
| 2018-01-12T16:12:15
| 2018-01-12T16:12:15
| 117,261,083
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,706
|
r
|
mrsb_annotate_orthology_2015-07-10.R
|
# 2015-07-10
# Michael C. Saul
# msaul [at] illinois.edu
# MRSB orthology annotation
load("~/Desktop/mrsb/orthology/OrthoDB/three_species_OrthoDB_parse_2015-04-07.Rdata")
stopifnot(require("biomaRt"))
# Setting biomaRt to generate annotations from Ensembl gene IDs (the row names)
mm_maRt = useMart(biomart = "ENSEMBL_MART_ENSEMBL",
host = "may2012.archive.ensembl.org",
dataset = "mmusculus_gene_ensembl")
mm_maRt_filter = "ensembl_peptide_id"
mm_maRt_attributes = c("ensembl_peptide_id","ensembl_gene_id")
mm_mrsb_maRt_query = unlist(strsplit(threespeciesTriplets$mouse_protein_ids, ";"))
# Grabbing biomart data to annotate the analyzed files
mm_mrsb_biomaRt = getBM(mm_maRt_attributes, mm_maRt_filter, mm_mrsb_maRt_query, mm_maRt)
mm_peptides_not_in_old_annotation = mm_mrsb_maRt_query[which(!mm_mrsb_maRt_query %in% mm_mrsb_biomaRt$ensembl_peptide_id)]
# Setting biomaRt to generate annotations from new Ensembl peptide IDs (the row names)
mm_new_maRt = useMart(biomart = "ENSEMBL_MART_ENSEMBL",
host = "feb2014.archive.ensembl.org",
dataset = "mmusculus_gene_ensembl")
mm_mrsb_new_biomaRt = getBM(mm_maRt_attributes, mm_maRt_filter, mm_peptides_not_in_old_annotation, mm_new_maRt)
mm_gene_annotations = rbind(mm_mrsb_biomaRt, mm_mrsb_new_biomaRt)
sb_maRt = useMart(biomart = "ENSEMBL_MART_ENSEMBL",
host = "feb2014.archive.ensembl.org",
dataset = "gaculeatus_gene_ensembl")
sb_maRt_filter = "ensembl_peptide_id"
sb_maRt_attributes = c("ensembl_peptide_id","ensembl_gene_id")
sb_mrsb_maRt_query = unlist(strsplit(threespeciesTriplets$stickleback_protein_ids, ";"))
sb_mrsb_new_biomaRt = getBM(sb_maRt_attributes, sb_maRt_filter, sb_mrsb_maRt_query, sb_maRt)
sb_gene_annotations = sb_mrsb_new_biomaRt
threespeciesTriplets$bee_gene_ids = as.character(gsub("-PA","",threespeciesTriplets$bee_protein_ids))
threespeciesTriplets$mouse_gene_ids = as.character(threespeciesTriplets$mouse_protein_ids)
threespeciesTriplets$stickleback_gene_ids = as.character(threespeciesTriplets$stickleback_protein_ids)
for (i in 1:nrow(mm_gene_annotations)) {
current_gene = mm_gene_annotations[i,"ensembl_gene_id"]
current_peptide = mm_gene_annotations[i,"ensembl_peptide_id"]
threespeciesTriplets$mouse_gene_ids = gsub(current_peptide, current_gene, threespeciesTriplets$mouse_gene_ids)
}
for (i in 1:nrow(sb_gene_annotations)) {
current_gene = sb_gene_annotations[i,"ensembl_gene_id"]
current_peptide = sb_gene_annotations[i,"ensembl_peptide_id"]
threespeciesTriplets$stickleback_gene_ids = gsub(current_peptide, current_gene, threespeciesTriplets$stickleback_gene_ids)
}
orthology_edges = as.data.frame(matrix(nrow = 0, ncol = 5))
colnames(orthology_edges) = c("orthology_group","species_1","species_2","gene_species_1","gene_species_2")
for (i in 1:nrow(threespeciesTriplets)) {
current_orthology_group = threespeciesTriplets[i,"odb8_og_id"]
current_bee_genes = unlist(strsplit(threespeciesTriplets[i,"bee_gene_ids"],";"))
current_mouse_genes = unlist(strsplit(threespeciesTriplets[i,"mouse_gene_ids"],";"))
current_stickleback_genes = unlist(strsplit(threespeciesTriplets[i,"stickleback_gene_ids"],";"))
l_bee = length(current_bee_genes)
l_mouse = length(current_mouse_genes)
l_stickleback = length(current_stickleback_genes)
for (j in 1:l_bee) {
for (k in 1:l_mouse) {
current_df = data.frame(orthology_group = current_orthology_group,
species_1 = "honeybee",
species_2 = "mouse",
gene_species_1 = current_bee_genes[j],
gene_species_2 = current_mouse_genes[k])
orthology_edges = rbind(orthology_edges, current_df)
}
}
for (j in 1:l_bee) {
for (k in 1:l_stickleback) {
current_df = data.frame(orthology_group = current_orthology_group,
species_1 = "honeybee",
species_2 = "stickleback",
gene_species_1 = current_bee_genes[j],
gene_species_2 = current_stickleback_genes[k])
orthology_edges = rbind(orthology_edges, current_df)
}
}
for (j in 1:l_mouse) {
for (k in 1:l_stickleback) {
current_df = data.frame(orthology_group = current_orthology_group,
species_1 = "mouse",
species_2 = "stickleback",
gene_species_1 = current_mouse_genes[j],
gene_species_2 = current_stickleback_genes[k])
orthology_edges = rbind(orthology_edges, current_df)
}
}
}
|
52fbc04f34c36077ca255854d24a2295dcf06f6a
|
dce8898cc3c560d96e5709cc9f91c5e385a54a77
|
/FA.R
|
6e69726b1724b9d55c4f75405a2c536ec91ac2a7
|
[] |
no_license
|
simaonogueira101/CLSBE_BusinessStatistics-R_Assignements
|
4b36d0dc4bf8177c9901647a40ca1f4ed48fa77a
|
f51e27b89bab0f5a181d4c0cb497282f030193f3
|
refs/heads/master
| 2023-02-04T02:15:41.844230
| 2020-12-20T21:30:09
| 2020-12-20T21:30:09
| 307,807,212
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,628
|
r
|
FA.R
|
# Turn off scientific notation
options(scipen = 999)
# Install required packages
# install.packages("lm.beta")
# Load required packages
library(lm.beta)
# Load DF
# FAdatabase <- read.csv("WA_ESSDatabase.csv", header = TRUE)
dependent_var <- "trstplc"
iteration_num <- 4
# Columns to be excluded from analysis
excluded_columns <- c(
# Dependent variable
dependent_var,
# DF info variables
"name",
"essround",
"edition",
"proddate",
"idno",
"cntry",
"region",
"regunit",
"inwtm",
# Other unusable variables
"ctzshipc",
"cntbrthc",
"livecnta",
"lnghom1",
"lnghom2",
"fbrncntb",
"mbrncntb",
"hhmmb",
"agea",
"yrbrn",
"icpart1",
"pdjobyr",
"emplno",
"eduyrs",
"njbspv",
"wkhct",
"wkhtotp",
"isco08",
"prtvtal",
"wrkorg",
"prtclal",
"rlgdnal",
"rlgdeal",
"cldgng",
"rshpscz",
"rshpsfi",
"lvgptnea",
"mbtru",
"edulvlb",
"edulvlpb",
"mainact",
"mnactp"
)
debug_columns <- c(
"trstplc",
"trstlgl",
"mnactp",
"tporgwk"
)
# Columns to be used in analysis
# good_columns <- debug_columns
good_columns <- colnames(FAdatabase)[! colnames(FAdatabase) %in% excluded_columns]
# Initiate model variable
model <- c(dependent_var)
# Function to clean database
cleanDatabase <- function (database, variables) {
# Determine type of scale and clean column
for (variable in variables) {
max_answer <- max(database[variable], na.rm = TRUE)
if (max_answer > 9) {
database[variable][database[variable] > 10 | database[variable] == ""] <- NA
} else if (max_answer > 6) {
database[variable][database[variable] > 6 | database[variable] == ""] <- NA
}
# Remove rows with NA values
database <- database[!is.na(database[variable]), ]
}
return(database)
}
progress_total <- iteration_num * length(good_columns) - iteration_num + 1
progress_current <- 0
# Iterate to find each variable for the model
for (i in 1:iteration_num) {
# Create empty results array
results <- data.frame(
variable = character(0),
p_value_individual = numeric(0),
r_squared_overall = numeric(0),
p_value_overall = numeric(0)
)
for (column in good_columns[! good_columns %in% model]) {
# Duplicate DF into a temporary object
temp_database <- data.frame(FAdatabase)
proposed_model <- c(model, column)
temp_database <- cleanDatabase(temp_database, proposed_model)
current_model <- ""
for (i in 1:length(model)) {
if(i < 2) {
current_model <- paste(
"temp_database$",
current_model,
toString(model[i]),
" ~ ",
sep = ""
)
} else {
current_model <- paste(
current_model,
"temp_database$",
toString(model[i]),
" + ",
sep = ""
)
}
}
current_model <- paste(
current_model,
"temp_database$",
toString(column),
sep = ""
)
# Temporary model analysis
temp_model <- lm(formula(current_model))
temp_summary <- summary(lm.beta(temp_model))
temp_p_value_individual <- format(round(as.numeric(
temp_summary$coefficients[length(temp_summary$coefficients)]
), 10), nsmall = 10)
temp_r_squared_overall <- as.numeric(temp_summary$r.squared)
temp_p_value_overall <- format(round(as.numeric(pf(
temp_summary$fstatistic[[1]],
temp_summary$fstatistic[[2]],
temp_summary$fstatistic[[3]],
lower.tail = FALSE
)), 10), nsmall = 10)
# Record results
temp_results <- c(
column,
temp_p_value_individual,
temp_r_squared_overall,
temp_p_value_overall
)
results[nrow(results) + 1,] <- temp_results
# Update progress indicator
print(paste(
"Progress: ",
format(round(progress_current / progress_total * 100, 0),nsmall = 0),
"%",
sep = ""
))
progress_current <- progress_current + 1
}
# Order result based on PRE
results <- results[order(results$r_square, decreasing = TRUE), ]
# Remove results with no statistical significance
results <- results[results$p_value_individual < 0.05 & results$p_value_overall < 0.05, ]
head(results, 10)
# Record winning variable in model
print("-----------------")
model <- c(model, results[1, 1])
print(paste("Variable #", i, ": ", results[1, 1], sep=""))
print(paste("Current R-Squared: ", results[1, 3], sep=""))
print("-----------------")
}
createFormula <- function (model, database) {
formula <- ""
for (i in 1:length(model)) {
if(i == 1) {
formula <- paste(
formula,
database,
"$",
toString(model[i]),
" ~ ",
sep = ""
)
} else if (i < length(model)) {
formula <- paste(
formula,
database,
"$",
toString(model[i]),
" + ",
sep = ""
)
} else {
formula <- paste(
formula,
database,
"$",
toString(model[i]),
sep = ""
)
}
}
return(formula)
}
adapted_variables <- c(
dependent_var,
"wkdcorga",
"iprspot",
"impfree",
"tporgwk"
)
all_variables <- c(
model,
adapted_variables
)
final_database <- clean_database(FAdatabase, all_variables)
optimized_model <- createFormula(model, "final_database")
adapted_model <- createFormula(adapted_variables, "final_database")
optimized_summary <- summary(lm.beta(lm(optimized_model)))
adapted_summary <- summary(lm.beta(lm(adapted_model)))
pre_change <- optimized_summary$r.squared - adapted_summary$r.squared
|
696114fb00037834ec0e49775d746d997498ec86
|
ceb61818823e8f275b86b394137ddc87e0bf6283
|
/IAM65/man/iamArgs.rd
|
7ab62cc3aa6696562980a3029cf47a850864367e
|
[] |
no_license
|
florencebriton/IAM_Dvt
|
7628e4b15f89849b4b900d739dba63d52bffd5ae
|
c4cdbbd99cf4b93d143547e7f89532af12519c1d
|
refs/heads/master
| 2020-04-28T08:34:04.663339
| 2019-03-12T04:24:00
| 2019-03-12T04:24:00
| 175,133,130
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 332
|
rd
|
iamArgs.rd
|
\name{iamArgs-class}
\docType{class}
\alias{iamArgs-class}
\title{Class "iamArgs"}
\description{ToDo}
\section{Slots}{
\describe{
\item{\code{desc}:}{object description}
\item{\code{arguments}:}{ToDo}
\item{\code{specific}:}{ToDo}
}
}
\author{Mathieu Merzereaud}
\examples{
showClass("iamArgs")
}
\keyword{classes}
|
0a53da4909464a364d159639d2067e75409fc8e5
|
410a2dfcbe74978eb10912a3dfe74bff3d136357
|
/man/designPlot.Rd
|
a95da696b17f002399e151dd605fa287aa6a001b
|
[] |
no_license
|
cran/dae
|
7ee9c2ad4fcb3c97b846eb76ed560c20c591a977
|
df5b45cc69b9984473f254891510687f2eb114ef
|
refs/heads/master
| 2023-08-18T19:51:15.580290
| 2023-08-07T15:50:14
| 2023-08-07T17:30:45
| 17,695,368
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,809
|
rd
|
designPlot.Rd
|
\name{designPlot}
\alias{designPlot}
\title{A graphical representation of an experimental design using labels stored in a matrix.}
\description{This function uses labels, usually derived from treatment and blocking factors from
an experimental design and stored in a matrix, to build a graphical representation of
the matrix, highlighting the position of certain labels .
It is a modified version of the function supplied with DiGGer.
It includes more control over the labelling of the rows and columns
of the design and allows for more flexible plotting of designs with
unequal block size.}
\usage{
designPlot(designMatrix, labels = NULL, altlabels = NULL, plotlabels = TRUE,
rtitle = NULL, ctitle = NULL,
rlabelsreverse = FALSE, clabelsreverse = FALSE,
font = 1, chardivisor = 2, rchardivisor = 1, cchardivisor = 1,
cellfillcolour = NA, plotcellboundary = TRUE,
rcellpropn = 1, ccellpropn = 1,
blocksequence = FALSE, blockdefinition = NULL,
blocklinecolour = 1, blocklinewidth = 2,
rotate = FALSE, new = TRUE, ...)
}
\arguments{
\item{designMatrix}{A \code{\link{matrix}} containing a set of numerics or characters
being the labels as they have been assigned to the cells of the grid
represented by the \code{\link{matrix}}.}
\item{labels}{A \code{\link{numeric}} or \code{\link{character}} vector giving the
cells in \code{designMatrix} that are to be plotted in this call to
\code{designPlot}. If \code{NULL} then all the cells are plotted.
What is actually plotted for a cell is controlled jointly by \code{labels},
\code{plotlabels}, \code{altlabels}, \code{plotcellboundary} and
\code{cellfillcolour}. If \code{plotlabels} is TRUE and \code{altlabels}
is \code{NULL} then \code{labels} are plotted in the cells, unless
\code{labels} is \code{NULL} when the labels in \code{designMatrix}
are plotted.
Whatever is being plotted, \code{altlabels} and
\code{cellfillcolour} must have an appropriate number of values.
See \code{\link{text}} for more information on specifying the labels.}
\item{altlabels}{Either a \code{\link{character}} vector containing an alternative set of
labels for the \code{labels} currently being plotted or a single \code{\link{integer}}
specifying an alternative symbol to be used in plotting cells when
\code{plotlabels} is \code{TRUE}. The length of \code{altlabels} must be one or
the same length as \code{labels}, unless \code{labels} is \code{NULL} in which case
it must equal the number of unique labels in \code{designMatrix}.
If \code{altlabels} is \code{NULL}, the labels specified in \code{labels} are
plotted when \code{plotlabels} is \code{TRUE}. If \code{labels} is also \code{NULL},
the labels in \code{designMatrix} are plotted.
See \code{\link{text}} for more information on specifying the labels.}
\item{plotlabels}{A \code{\link{logical}} to indicate whether labels are to be
plotted in the cells. If TRUE, print all labels or
the specific labels listed in \code{labels}. If FALSE, no labels are
printed in the cells.}
\item{rtitle}{A \code{\link{character}} string to use as a title for rows of the plot.
If \code{rtitle} is \code{NULL} then no title is plotted.}
\item{ctitle}{A \code{\link{character}} string to use as a title for columns of the plot.
If \code{ctitle} is \code{NULL} then no title is plotted.}
\item{rlabelsreverse}{A \code{\link{logical}} indicating whether to reverse the row labels.}
\item{clabelsreverse}{A \code{\link{logical}} indicating whether to reverse the column labels.}
\item{font}{An \code{\link{integer}} specifying the font to be used for row and column labelling.
See \code{\link{par}} for further details.}
\item{chardivisor}{A \code{\link{numeric}} that changes the size of text and symbols in the cells
by dividing the default size by it.}
\item{rchardivisor}{A \code{\link{numeric}} that changes the size of the labels of the rows of the
design by dividing the default size by it.}
\item{cchardivisor}{A \code{\link{numeric}} that changes the size of the labels of the columns of the
design by dividing the default size by it.}
\item{cellfillcolour}{A \code{\link{character}} string specifying the colour of the fill for
the cells to be plotted in this call. If there is only one colour
then all cells being plotted with that colour. If there is more than one
colour then, unless \code{labels} is \code{NULL}, the number of colours must
at least equal the number of labels
and then the fill colours will be matched, one for one from the first colour,
with the labels. If \code{labels} is \code{NULL} then the number of colours
must at least equal the number of unique labels in \code{designMatrix}.
The default, \code{NA}, is to leave ther cells unfilled.
See also \code{Colour specification} under the \code{\link{par}} function.}
\item{plotcellboundary}{A \code{\link{logical}} indicting whether a boundary is to plotted
around a cell.}
\item{rcellpropn}{a value between 0 and 1 giving the proportion of the standard row size of
a cell size to be plotted as a cell.}
\item{ccellpropn}{a value between 0 and 1 giving the proportion of the standard column size of
a cell size to be plotted as a cell.}
\item{blocksequence}{A \code{\link{logical}} that determines whether block numbers are repetitions
or sequences of block numbers.}
\item{blockdefinition}{A \code{\link{matrix}} of block sizes:
\itemize{
\item if there is only one row, then the first element is interpreted as the no. rows in
each block and blocks with this number of rows are to be repeated across the rows of the design.
\item if there is more than one row, then each row of the matrix specifies a block,
with the sequence of rows in the matrix specifying a corresponding
sequence of blocks down the rows of the design.}
Similarly, a single value for a column specifies a repetition of blocks of that size
across the columns of the design, while several column values specifies a
sequence of blocks across the columns of the size specified.}
\item{blocklinecolour}{A \code{\link{character}} string specifying the colour of the block boundary.
See also \code{Colour specification} under the \code{\link{par}} function.}
\item{blocklinewidth}{A \code{\link{numeric}} giving the width of the block boundary to be plotted.}
\item{rotate}{A \code{\link{logical}} which, if \code{TRUE}, results in the matrix being rotated
90 degrees for plotting.}
\item{new}{A \code{\link{logical}} indicating if a new plot is to be produced or the current
plot is added to.}
\item{...}{further arguments passed to \code{\link{polygon}} in plotting the cell.}
}
\value{no values are returned, but a plot is produced.}
\references{Coombes, N. E. (2009). \emph{DiGGer design search tool in R}.
\url{http://nswdpibiom.org/austatgen/software/}}
\author{Chris Brien}
\seealso{\code{\link{blockboundaryPlot}}, \code{\link{designPlotlabels}}, \code{\link{designLatinSqrSys}}, \code{\link{designRandomize}}, \code{\link{designAnatomy}}
in package \pkg{dae}. \cr
Also, \code{\link{par}}, \code{\link{polygon}},
\code{DiGGer}}
\examples{\dontrun{
designPlot(des.mat, labels=1:4, cellfillcolour="lightblue", new=TRUE,
plotcellboundary = TRUE, chardivisor=3,
rtitle="Lanes", ctitle="Positions",
rcellpropn = 1, ccellpropn=1)
designPlot(des.mat, labels=5:87, plotlabels=TRUE, cellfillcolour="grey", new=FALSE,
plotcellboundary = TRUE, chardivisor=3)
designPlot(des.mat, labels=88:434, plotlabels=TRUE, cellfillcolour="lightgreen",
new=FALSE, plotcellboundary = TRUE, chardivisor=3,
blocksequence=TRUE, blockdefinition=cbind(4,10,12),
blocklinewidth=3, blockcolour="blue")}}
\keyword{design}
\keyword{plot}
|
82a659caaacf0f974a7837ab6ea422c0d373a215
|
fdc11ac8fd91a0e3f384c5be41422900379e768f
|
/covid_shiny_v5.1_summary_dashboard_dep.R
|
cdf3e6cc510c52c135b16bbb5c0ac44e091efcca
|
[] |
no_license
|
kylebennison/covid-19-dashboard
|
a4c28c12415bb3d068609c6ac7d06889890e065b
|
44347bc145fc4e20f8c1d5769d3d12ee9f4c1ad4
|
refs/heads/master
| 2022-11-08T19:22:21.650759
| 2020-07-02T16:20:06
| 2020-07-02T16:20:06
| 268,882,880
| 0
| 0
| null | 2020-07-01T16:14:07
| 2020-06-02T18:50:50
|
R
|
UTF-8
|
R
| false
| false
| 46,830
|
r
|
covid_shiny_v5.1_summary_dashboard_dep.R
|
### Load necessary packages ###
library(shiny)
library(tidyverse)
library(lubridate)
library(scales)
library(DT)
library(jsonlite)
library(plotly)
library(htmlwidgets)
library(gt)
### Load theme ###
#Staturdays Colors
staturdays_col_list <- c(
lightest_blue = "#5c6272",
lighter_blue = "#4c5872",
light_blue = "#394871",
medium_blue = "#22345a",
dark_blue = "#041e42",
orange = "#de703b",
sign = "#1e1e1e",
white = "#FFFFFF"
)
staturdays_colors <- function(...) {
cols <- c(...)
if (is.null(cols))
return (staturdays_col_list)
staturdays_col_list[cols]
}
staturdays_theme <- theme(plot.caption = element_text(size = 12, hjust = 1, color = staturdays_colors("orange")),
plot.title = element_text(color = staturdays_colors("dark_blue"), size = 30, face = "bold"),
plot.subtitle = element_text(color = staturdays_colors("lightest_blue"), size = 20),
axis.text = element_text(color = staturdays_colors("lightest_blue"), size = 15),
axis.title = element_text(color = staturdays_colors("lightest_blue"), size = 15),
legend.title = element_text(color = staturdays_colors("lightest_blue"), size = 15),
legend.text = element_text(color = staturdays_colors("lightest_blue"), size = 15)
)
# User two action buttons and reactiveValues to let users select data source between cases and deaths
### Load in Data ###
# Johns Hopkins Case and Death Timeseries by County -----------------------
data_source <- "https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_US.csv"
data_csv <- read.csv(data_source, header = TRUE, sep = ",") # Read in csv file from the web
data_csv <- data_csv %>% pivot_longer(cols = -c(1:11), names_to = "Date") # Move Dates to one column
data_csv <- data_csv %>% mutate(Date = str_remove(data_csv$Date, "X")) # Remove the X from the date
data_csv <- data_csv %>% mutate(date = mdy(data_csv$Date)) # Convert to actual date values
data_csv <- data_csv %>% select(-Date) # Get rid of unnecessary Date column
covid_data_cases <- data_csv
covid_data_cases <- covid_data_cases %>%
mutate(New_Cases =
case_when(
lag(Combined_Key, n = 1L) == Combined_Key ~ value - lag(value, 1L),
TRUE ~ as.integer(0)
)
)
covid_data_cases <- covid_data_cases %>% rename(Cumulative_Cases = value)
data_source_deaths <- "https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_deaths_US.csv"
data_csv_deaths <- read.csv(data_source_deaths, header = TRUE, sep = ",") # Read in csv file from the web
data_csv_deaths <- data_csv_deaths %>% pivot_longer(cols = -c(1:12), names_to = "Date") # Move Dates to one column
data_csv_deaths <- data_csv_deaths %>% mutate(Date = str_remove(data_csv_deaths$Date, "X")) # Remove the X from the date
data_csv_deaths <- data_csv_deaths %>% mutate(date = mdy(data_csv_deaths$Date)) # Convert to actual date values
data_csv_deaths <- data_csv_deaths %>% select(-Date) # Get rid of unnecessary Date column
covid_data_deaths <- data_csv_deaths %>%
mutate(New_Deaths =
case_when(
lag(Combined_Key, n = 1L) == Combined_Key ~ value - lag(value, 1L),
TRUE ~ as.integer(0)
)
)
covid_data_deaths <- covid_data_deaths %>% rename(Cumulative_Deaths = value)
covid_data <- left_join(covid_data_cases, covid_data_deaths, by = c("Province_State", "Combined_Key", "date"))
covid_data <- covid_data %>%
mutate(New_Cases_Per_Cap = (New_Cases / Population) * 100000,
Cum_Cases_Per_Cap = (Cumulative_Cases / Population) * 100000,
New_Deaths_Per_Cap = (New_Deaths / Population) * 100000,
Cum_Deaths_Per_Cap = (Cumulative_Deaths / Population) * 100000)
# State and US Summaries --------------------------------------------------
covid_data_state <- covid_data %>%
group_by(Province_State, date) %>%
summarise(New_Cases = sum(New_Cases), New_Deaths = sum(New_Deaths), Cumulative_Cases = sum(Cumulative_Cases), Cumulative_Deaths = sum(Cumulative_Deaths), Population = sum(Population)) %>%
mutate(New_Cases_Per_Cap = (New_Cases / Population) * 100000,
Cum_Cases_Per_Cap = (Cumulative_Cases / Population) * 100000,
New_Deaths_Per_Cap = (Cumulative_Cases / Population) * 100000,
Cum_Deaths_Per_Cap = (Cumulative_Cases / Population) * 100000)
covid_data_us <- covid_data_state %>%
group_by(date) %>%
summarise(New_Cases = sum(New_Cases), New_Deaths = sum(New_Deaths), Cumulative_Cases = sum(Cumulative_Cases), Cumulative_Deaths = sum(Cumulative_Deaths), Population = sum(Population)) %>%
mutate(New_Cases_Per_Cap = (New_Cases / Population) * 100000,
Cum_Cases_Per_Cap = (Cumulative_Cases / Population) * 100000,
New_Deaths_Per_Cap = (Cumulative_Cases / Population) * 100000,
Cum_Deaths_Per_Cap = (Cumulative_Cases / Population) * 100000)
# Global Data -------------------------------------------------------------
#Global Confirmed Cases
data_source_global <- "https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv"
data_csv <- read.csv(data_source_global, header = TRUE, sep = ",") # Read in csv file from the web
data_csv <- data_csv %>% pivot_longer(cols = -c(1:4), names_to = "Date") # Move Dates to one column
data_csv <- data_csv %>% mutate(Date = str_remove(data_csv$Date, "X")) # Remove the X from the date
data_csv <- data_csv %>% mutate(date = mdy(data_csv$Date)) # Convert to actual date values
data_csv <- data_csv %>% select(-Date) # Get rid of unnecessary Date column
covid_data_cases <- data_csv
covid_data_cases <- covid_data_cases %>%
group_by(Country.Region, date) %>%
summarise(Cumulative_Cases = as.integer(sum(value)))
covid_data_cases <- covid_data_cases %>%
mutate(New_Cases =
case_when(
lag(Country.Region, n = 1L) == Country.Region ~ Cumulative_Cases - lag(Cumulative_Cases, 1L),
TRUE ~ as.integer(0)
)
)
data_source_global_deaths <- "https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_deaths_global.csv"
data_csv <- read.csv(data_source_global_deaths, header = TRUE, sep = ",") # Read in csv file from the web
data_csv <- data_csv %>% pivot_longer(cols = -c(1:4), names_to = "Date") # Move Dates to one column
data_csv <- data_csv %>% mutate(Date = str_remove(data_csv$Date, "X")) # Remove the X from the date
data_csv <- data_csv %>% mutate(date = mdy(data_csv$Date)) # Convert to actual date values
data_csv <- data_csv %>% select(-Date) # Get rid of unnecessary Date column
covid_data_deaths <- data_csv
covid_data_deaths <- covid_data_deaths %>%
group_by(Country.Region, date) %>%
summarise(Cumulative_Deaths = as.integer(sum(value)))
covid_data_deaths <- covid_data_deaths %>%
mutate(New_Deaths =
case_when(
lag(Country.Region, n = 1L) == Country.Region ~ Cumulative_Deaths - lag(Cumulative_Deaths, 1L),
TRUE ~ as.integer(0)
)
)
covid_data_global <- left_join(covid_data_cases, covid_data_deaths)
data_source_global_recovered <- "https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_recovered_global.csv"
data_csv <- read.csv(data_source_global_recovered, header = TRUE, sep = ",") # Read in csv file from the web
data_csv <- data_csv %>% pivot_longer(cols = -c(1:4), names_to = "Date") # Move Dates to one column
data_csv <- data_csv %>% mutate(Date = str_remove(data_csv$Date, "X")) # Remove the X from the date
data_csv <- data_csv %>% mutate(date = mdy(data_csv$Date)) # Convert to actual date values
data_csv <- data_csv %>% select(-Date) # Get rid of unnecessary Date column
covid_data_recovered <- data_csv
covid_data_recovered <- covid_data_recovered %>%
group_by(Country.Region, date) %>%
summarise(Cumulative_Recovered = as.integer(sum(value)))
covid_data_recovered <- covid_data_recovered %>%
mutate(New_Recovered =
case_when(
lag(Country.Region, n = 1L) == Country.Region ~ Cumulative_Recovered - lag(Cumulative_Recovered, 1L),
TRUE ~ as.integer(0)
)
)
covid_data_global <- left_join(covid_data_global, covid_data_recovered)
covid_data_global_longer <- covid_data_global %>%
pivot_longer(cols = c(Cumulative_Deaths, Cumulative_Recovered, New_Deaths, New_Recovered), names_to = "Case_Outcome", values_to = "Count")
# Calculate Recovery Rate
covid_data_global_recoveries <- covid_data_global %>%
group_by(Country.Region) %>%
summarise(Total_Deaths = max(Cumulative_Deaths), Total_Recovered = max(Cumulative_Recovered), Recovery_Rate = Total_Recovered/(Total_Deaths+Total_Recovered))
# Testing Data Source ---------------------------------------------------------
covid_state_daily_source <- "https://covidtracking.com/api/v1/states/daily.json"
covid_state_daily <- jsonlite::fromJSON(covid_state_daily_source)
#update date
covid_state_daily <- covid_state_daily %>% mutate(date = ymd(date))
#update state names
state_names <- as.data.frame(cbind(state.abb,state.name))
# #Fix non-US states AS, GU, etc.
# covid_state_daily %>%
# filter(is.na(state.name) == T) %>%
# count(state)
state_names <- state_names %>% rbind(tribble(
~ state.abb, ~ state.name,
"AS", "American Samoa",
"DC", "District of Columbia",
"GU", "Guam",
"MP", "Northern Mariana Islands",
"PR", "Puerto Rico",
"VI", "Virgin Islands"
)
)
covid_state_daily <- left_join(covid_state_daily, state_names, by = c("state" = "state.abb"))
# Hospital Data (Beds and ICUs in use by Date and State)
covid_state_daily_longer <- covid_state_daily %>%
pivot_longer(cols = c(hospitalizedCurrently, inIcuCurrently, onVentilatorCurrently),
names_to = ("current_hospitalization_severity"))
# Join in population data to show % of pop tested
state_populations <- select(covid_data_state, c(Province_State, Population)) %>%
distinct()
covid_state_daily_pop <- left_join(covid_state_daily, state_populations, by = c("state.name" = "Province_State"))
# Total Tests Done by Each State
temp_rank <- covid_state_daily_pop %>%
filter(date == max(date)) %>%
group_by(state.name) %>%
summarise(percent_tests_pos = positive / posNeg,
percent_pop_tested = posNeg / Population,
percent_pop_positive = positive / Population,
positive,
negative,
total_tests = posNeg) %>%
arrange(desc(percent_pop_positive))
covid_state_daily_rank <- temp_rank %>% mutate(percent_tests_pos_rank = row_number(desc(percent_tests_pos)),
total_positive_rank = row_number(desc(positive)),
total_testing_rank = row_number(desc(total_tests))) %>%
arrange(percent_tests_pos_rank)
# WOW and MOM changes in cases, hospitalizations, and deaths
# Monthly
covid_state_daily_MOM <- covid_state_daily %>% mutate(week = epiweek(date), month = month(date, label = TRUE, abbr = FALSE), day = mday(date))
datemax <- max(covid_state_daily_MOM$date) # need to get current max date then filter previous months to do MTD calculation
covid_state_daily_MOM <- covid_state_daily_MOM %>%
filter(day <= day(datemax)) %>% # Make data MTD for each month
group_by(state.name,month) %>%
summarise(monthly_cases = sum(positiveIncrease), monthly_hospitalizations = sum(hospitalizedIncrease), monthly_deaths = sum(deathIncrease)) %>%
arrange(desc(state.name, month)) %>%
summarise(state.name, month, MOM_Cases = case_when(state.name == lag(state.name, 1L) ~ ((monthly_cases - lag(monthly_cases, 1L))/lag(monthly_cases, 1L)),
TRUE ~ 0
),
MOM_Hospitalizations = case_when(state.name == lag(state.name, 1L) ~ ((monthly_hospitalizations - lag(monthly_hospitalizations, 1L))/lag(monthly_hospitalizations, 1L)),
TRUE ~ 0
),
MOM_Deaths = case_when(state.name == lag(state.name, 1L) ~ ((monthly_deaths - lag(monthly_deaths, 1L))/lag(monthly_deaths, 1L)),
TRUE ~ 0
)) %>%
filter(month == max(month))
# Weekly
covid_state_daily_WOW <- covid_state_daily %>% mutate(week = epiweek(date), month = month(date)) %>%
group_by(state.name,week) %>%
summarise(weekly_cases = sum(positiveIncrease), weekly_hospitalizations = sum(hospitalizedIncrease), weekly_deaths = sum(deathIncrease)) %>%
arrange(desc(state.name, week)) %>%
summarise(state.name, week,
WOW_Cases = case_when(state.name == lag(state.name, 1L) ~ ((weekly_cases - lag(weekly_cases, 1L))/lag(weekly_cases, 1L)),
TRUE ~ 0
),
WOW_Hospitalizations = case_when(state.name == lag(state.name, 1L) ~ ((weekly_hospitalizations - lag(weekly_hospitalizations, 1L))/lag(weekly_hospitalizations, 1L)),
TRUE ~ 0
),
WOW_Deaths = case_when(state.name == lag(state.name, 1L) ~ ((weekly_deaths - lag(weekly_deaths, 1L))/lag(weekly_deaths, 1L)),
TRUE ~ 0
)) %>%
filter(week == max(week) - 1)
# Join tables
covid_state_daily_WOW_MOM <- covid_state_daily_MOM %>%
left_join(covid_state_daily_WOW, by = "state.name")
# Start Shiny App ---------------------------------------------------------
# UI ----------------------------------------------------------------------
ui <- navbarPage(title = "COVID-19 Case Tracker",
tabPanel("Summary",
fluidPage(
sidebarLayout(
sidebarPanel(
plotOutput(outputId = "top10_counties_summary"),
plotOutput(outputId = "top10_states_summary")
),
mainPanel(
tags$h2("States With New Case Peak in the Past Week"),
dataTableOutput(outputId = "recent_peaks_state", width = "100%"),
plotOutput(outputId = "total_tests"),
plotOutput(outputId = "summary_pct_pos_tests"),
tags$h2("% Increase in Cases, Hospitalizations, and Deaths by State"),
tags$h3("Current Month-to-Date and Most Recent Week"),
dataTableOutput(outputId = "WOW_MOM_state", width = "100%"),
tags$p("A shiny app by ",
tags$a("Kyle Bennison", href="https://www.linkedin.com/in/kylebennison", target="_blank"),
" - ",
tags$a("@kylebeni012", href="https://www.twitter.com/kylebeni012", target="_blank")),
tags$p("Data - ",
tags$a("Johns Hopkins CSSE" , href="https://github.com/CSSEGISandData/COVID-19/tree/master/csse_covid_19_data/csse_covid_19_time_series", target="_blank"))
)
)
)
),
tabPanel("County",
fluidPage(
sidebarLayout(
sidebarPanel(
selectizeInput(inputId = "casetype", label = "Select Cases to Display", choices = c("New Cases", "Total Cases")),
selectizeInput(inputId = "county", label = "Select a county",
choices = unique(covid_data$Combined_Key),
selected = NULL,
multiple = TRUE,
options = list(maxItems = 3, placeholder = "Select up to 3 counties")
),
tags$h6("Optionally, you can type the state name to filter the list down to just the counties in that state.", style = "margin-bottom:30px; color:#545454"),
dateRangeInput(inputId = "daterange", label = "Select Date Range", start = today()-60, end = max(covid_data$date), min = "2020-01-15"),
plotOutput(outputId = "top10_counties")
),
mainPanel(
plotOutput(outputId = "cases"),
plotOutput(outputId = "deaths"),
tags$p("A shiny app by ",
tags$a("Kyle Bennison", href="https://www.linkedin.com/in/kylebennison", target="_blank"),
" - ",
tags$a("@kylebeni012", href="https://www.twitter.com/kylebeni012", target="_blank")),
tags$p("Data - ",
tags$a("Johns Hopkins CSSE" , href="https://github.com/CSSEGISandData/COVID-19/tree/master/csse_covid_19_data/csse_covid_19_time_series", target="_blank"))
)
)
)
),
navbarMenu("State",
tabPanel("Cases and Deaths",
fluidPage(
sidebarLayout(
sidebarPanel(
selectizeInput(inputId = "casetype_state", label = "Select Cases to Display", choices = c("New Cases", "Total Cases")),
selectizeInput(inputId = "state", label = "Select a state",
choices = unique(covid_data$Province_State),
selected = NULL,
multiple = TRUE,
options = list(maxItems = 3, placeholder = "Select up to 3 states")
),
dateRangeInput(inputId = "daterange_state", label = "Select Date Range", start = today()-60, end = max(covid_data$date), min = "2020-01-15"),
plotOutput(outputId = "top10_states")
),
mainPanel(
plotOutput(outputId = "cases_state"),
plotOutput(outputId = "deaths_state"),
tags$p("A shiny app by ",
tags$a("Kyle Bennison", href="https://www.linkedin.com/in/kylebennison", target="_blank"),
" - ",
tags$a("@kylebeni012", href="https://www.twitter.com/kylebeni012", target="_blank")),
tags$p("Data - ",
tags$a("Johns Hopkins CSSE" , href="https://github.com/CSSEGISandData/COVID-19/tree/master/csse_covid_19_data/csse_covid_19_time_series", target="_blank"))
)
)
)
),
tabPanel("Testing and Hospitalizations",
fluidPage(
tags$h2("Detailed Testing Data by State"),
dataTableOutput(outputId = "total_testing_dt_state", width = "100%"),
tags$p("Data - ",
tags$a("COVID Tracking Project" , href="https://covidtracking.com/api", target="_blank")),
tags$hr(),
dateRangeInput(inputId = "daterange_testing", label = "Select Date Range", start = today()-60, end = max(covid_state_daily$date), min = "2020-01-15"),
selectizeInput(inputId = "testing_state", label = "Select a state",
choices = unique(covid_state_daily$state.name),
selected = NULL,
multiple = FALSE,
options = list(maxItems = 1, placeholder = "Select a state")
),
plotOutput(outputId = "hospitalizations_state"),
tags$p("Data - ",
tags$a("COVID Tracking Project" , href="https://covidtracking.com/api", target="_blank")),
plotOutput(outputId = "testing_state"),
tags$p("Data - ",
tags$a("COVID Tracking Project" , href="https://covidtracking.com/api", target="_blank")),
selectizeInput(inputId = "pos_test_input_state", label = "Select a state",
choices = unique(covid_state_daily$state.name),
selected = NULL,
multiple = TRUE,
options = list(maxItems = 3, placeholder = "Select up to 3 states")
),
plotOutput(outputId = "pos_tests_state"),
tags$p("Data - ",
tags$a("COVID Tracking Project" , href="https://covidtracking.com/api", target="_blank")),
tags$p("A shiny app by ",
tags$a("Kyle Bennison", href="https://www.linkedin.com/in/kylebennison", target="_blank"),
" - ",
tags$a("@kylebeni012", href="https://www.twitter.com/kylebeni012", target="_blank"))
)
)
),
tabPanel("US",
fluidPage(
sidebarLayout(
sidebarPanel(
selectizeInput(inputId = "casetype_US", label = "Select Cases to Display", choices = c("New Cases", "Total Cases")),
dateRangeInput(inputId = "daterange_US", label = "Select Date Range", start = today()-60, end = max(covid_data$date), min = "2020-01-15")
),
mainPanel(
plotOutput(outputId = "cases_US"),
plotOutput(outputId = "deaths_US"),
tags$p("A shiny app by ",
tags$a("Kyle Bennison", href="https://www.linkedin.com/in/kylebennison", target="_blank"),
" - ",
tags$a("@kylebeni012", href="https://www.twitter.com/kylebeni012", target="_blank")),
tags$p("Data - ",
tags$a("Johns Hopkins CSSE" , href="https://github.com/CSSEGISandData/COVID-19/tree/master/csse_covid_19_data/csse_covid_19_time_series", target="_blank"))
)
)
)
),
tabPanel("Global",
fluidPage(
sidebarLayout(
sidebarPanel(
selectizeInput(inputId = "casetype_global", label = "Select Cases to Display", choices = c("New Cases", "Total Cases")),
selectizeInput(inputId = "country", label = "Select a country",
choices = unique(covid_data_global$Country.Region),
selected = NULL,
multiple = TRUE,
options = list(maxItems = 1, placeholder = "Select a country")
),
dateRangeInput(inputId = "daterange_global", label = "Select Date Range", start = today()-60, end = max(covid_data_global$date), min = "2020-01-15")
),
mainPanel(
plotOutput(outputId = "cases_global"),
plotOutput(outputId = "deaths_global"),
tags$hr(),
tags$h2("Recovery Rate by Country"),
dataTableOutput(outputId = "recovery_rate_global"),
tags$p("A shiny app by ",
tags$a("Kyle Bennison", href="https://www.linkedin.com/in/kylebennison", target="_blank"),
" - ",
tags$a("@kylebeni012", href="https://www.twitter.com/kylebeni012", target="_blank")),
tags$p("Data - ",
tags$a("Johns Hopkins CSSE" , href="https://github.com/CSSEGISandData/COVID-19/tree/master/csse_covid_19_data/csse_covid_19_time_series", target="_blank"))
)
)
)
)
)
# Server ------------------------------------------------------------------
server <- function(input, output) {
# Summary Output ----------------------------------------------------------
output$top10_counties_summary <- renderPlot(
{
covid_data %>%
filter(date == max(covid_data$date)) %>%
top_n(10, New_Cases) %>%
ggplot() +
geom_col(aes(x = Combined_Key, y = New_Cases), fill = staturdays_colors("lightest_blue")) +
labs(title = "Counties with Most\nNew Cases Today",
subtitle = paste0("Data as of ", format.Date(max(covid_data$date), "%B %d, %Y")),
x = "County",
y = "Number of Cases",
caption = "@kylebeni012 | @staturdays") +
staturdays_theme +
theme(plot.title = element_text(color = staturdays_colors("dark_blue"), size = 15, face = "bold"),
plot.subtitle = element_text(size = 10)) +
scale_y_continuous(labels = comma) +
theme(axis.text.x = element_text(angle = 90)) +
scale_x_discrete(labels = function(x) str_remove(x, ", US"))
}
)
output$top10_states_summary <- renderPlot(
{
covid_data_state %>%
ungroup() %>%
filter(date == max(date)) %>%
slice_max(n = 10, order_by = New_Cases) %>%
ggplot() +
geom_col(aes(x = Province_State, y = New_Cases), fill = staturdays_colors("light_blue")) +
labs(title = "States with Most\nNew Cases Today",
subtitle = paste0("Data as of ", format.Date(max(covid_data_state$date), "%B %d, %Y")),
x = "State",
y = "Number of Cases",
caption = "@kylebeni012 | @staturdays") +
staturdays_theme +
theme(plot.title = element_text(color = staturdays_colors("dark_blue"), size = 15, face = "bold"),
plot.subtitle = element_text(size = 10)) +
scale_y_continuous(labels = comma) +
theme(axis.text.x = element_text(angle = 90))
}
)
output$recent_peaks_state <- renderDataTable(
{
datatable({covid_data_state %>%
group_by(Province_State) %>%
slice_max(New_Cases) %>%
filter(New_Cases != 0, today()-date <= 7) %>%
select(1:3)}, colnames = c("State", "Date of Peak", "New Cases"),
caption = paste0("Data as of ", format.Date(max(covid_data_state$date), "%B %d, %Y")), options = list(scrollX = TRUE, pageLength = 50)) %>%
DT::formatRound(3, digits = 0) %>%
DT::formatDate(2)
}
)
output$total_tests <- renderPlot(
{
covid_state_daily %>%
filter(date >= today() - 30) %>%
group_by(date) %>%
summarise(total_tests = sum(totalTestResultsIncrease)) %>%
ggplot(aes(x = date, y = total_tests)) +
geom_col(fill = staturdays_colors("orange")) +
labs(title = "New Tests in US by Day",
subtitle = paste0("Data as of ", format.Date(max(covid_state_daily$date), "%B %d, %Y")),
x = "Date",
y = "Number of Tests",
caption = "@kylebeni012 | @staturdays") +
staturdays_theme +
theme(plot.title = element_text(color = staturdays_colors("dark_blue"), size = 15, face = "bold"),
plot.subtitle = element_text(size = 10)) +
scale_y_continuous(labels = comma)
}
)
output$summary_pct_pos_tests <- renderPlot(
{
covid_state_daily %>%
filter(date >= today() - 60) %>%
group_by(date) %>%
summarise(pct_positive = (sum(positiveIncrease)/sum(totalTestResultsIncrease))) %>%
ggplot(aes(x = date, y = pct_positive)) +
geom_line(colour = staturdays_colors("light_blue")) +
labs(title = "Positive Test Rate in the US by Day",
subtitle = paste0("Data as of ", format.Date(max(covid_state_daily$date), "%B %d, %Y")),
x = "Date",
y = "Positive Test Percentage",
caption = "@kylebeni012 | @staturdays") +
staturdays_theme +
theme(plot.title = element_text(color = staturdays_colors("dark_blue"), size = 15, face = "bold"),
plot.subtitle = element_text(size = 10)) +
scale_y_continuous(labels = percent)
}
)
output$WOW_MOM_state <- renderDataTable(
{
datatable(
covid_state_daily_WOW_MOM,
colnames = c("State", "Month", "Monthly Cases", "Monthly Hospitalizations", "Monthly Deaths", "Week of Year", "Weekly Cases", "Weekly Hospitalizations", "Weekly Deaths"),
caption = paste0("Data as of ", format.Date(today() - 1, "%B %d, %Y")), options = list(pageLength = 60, scrollX = TRUE, columnDefs = list(list(className = 'dt-left', targets = 0:8))),
rownames = FALSE) %>%
DT::formatPercentage(c(3:5, 7:9))
}
)
# County Output -----------------------------------------------------------
output$top10_counties <- renderPlot(
{
covid_data %>%
filter(date == max(covid_data$date)) %>%
top_n(10, New_Cases) %>%
ggplot() +
geom_col(aes(x = Combined_Key, y = New_Cases), fill = staturdays_colors("orange")) +
labs(title = "Counties with Most\nNew Cases Today",
subtitle = paste0("Data as of ", format.Date(max(covid_data$date), "%B %d, %Y")),
x = "County",
y = "Number of Cases",
caption = "@kylebeni012 | @staturdays") +
staturdays_theme +
theme(plot.title = element_text(color = staturdays_colors("dark_blue"), size = 15, face = "bold"),
plot.subtitle = element_text(size = 10)) +
scale_y_continuous(labels = comma) +
theme(axis.text.x = element_text(angle = 90)) +
scale_x_discrete(labels = function(x) str_remove(x, ", US"))
}
)
output$cases <- renderPlot(
{
covid_data %>%
filter(Combined_Key %in% c(input$county), date >= input$daterange[1] & date <= input$daterange[2]) %>%
ggplot() + {
if(input$casetype == "New Cases")
geom_line(aes(y = New_Cases, x = date, colour = Combined_Key), na.rm = T)} + {
if(input$casetype == "Total Cases")
geom_line(aes(y = Cumulative_Cases, x = date, colour = Combined_Key), na.rm = T)} +
labs(title = input$casetype,
subtitle = paste0("Data as of ", format.Date(max(covid_data$date), "%B %d, %Y")),
x = "Date",
y = "Number of Cases",
color = "County",
caption = "@kylebeni012 | @staturdays") +
staturdays_theme +
theme(legend.position = "bottom") +
guides(color = guide_legend(nrow = 3, byrow = TRUE)) +
expand_limits(y = 0) +
scale_y_continuous(labels = comma)
}
)
output$deaths <- renderPlot(
{
covid_data %>%
filter(Combined_Key %in% c(input$county), date >= input$daterange[1] & date <= input$daterange[2]) %>%
ggplot() + {
if(input$casetype == "New Cases")
geom_line(aes(y = New_Deaths, x = date, colour = Combined_Key), na.rm = T)} + {
if(input$casetype == "Total Cases")
geom_line(aes(y = Cumulative_Deaths, x = date, colour = Combined_Key), na.rm = T)} +
labs(title = paste0(str_remove(input$casetype, " Cases"), " Deaths"),
subtitle = paste0("Data as of ", format.Date(max(covid_data$date), "%B %d, %Y")),
x = "Date",
y = "Number of Deaths",
color = "County",
caption = "@kylebeni012 | @staturdays") +
staturdays_theme +
theme(legend.position = "bottom") +
guides(color = guide_legend(nrow = 3, byrow = TRUE)) +
expand_limits(y = 0) +
scale_y_continuous(labels = comma)
}
)
# State Cases Output ------------------------------------------------------
output$top10_states <- renderPlot(
{
covid_data_state %>%
ungroup() %>%
filter(date == max(date)) %>%
slice_max(n = 10, order_by = New_Cases) %>%
ggplot() +
geom_col(aes(x = Province_State, y = New_Cases), fill = staturdays_colors("orange")) +
labs(title = "States with Most\nNew Cases Today",
subtitle = paste0("Data as of ", format.Date(max(covid_data_state$date), "%B %d, %Y")),
x = "State",
y = "Number of Cases",
caption = "@kylebeni012 | @staturdays") +
staturdays_theme +
theme(plot.title = element_text(color = staturdays_colors("dark_blue"), size = 15, face = "bold"),
plot.subtitle = element_text(size = 10)) +
scale_y_continuous(labels = comma) +
theme(axis.text.x = element_text(angle = 90))
}
)
output$cases_state <- renderPlot(
{
covid_data_state %>%
filter(Province_State %in% c(input$state), date >= input$daterange_state[1] & date <= input$daterange_state[2]) %>%
ggplot() + {
if(input$casetype_state == "New Cases")
geom_line(aes(y = New_Cases, x = date, colour = Province_State), na.rm = T)} + {
if(input$casetype_state == "Total Cases")
geom_line(aes(y = Cumulative_Cases, x = date, colour = Province_State), na.rm = T)} +
labs(title = input$casetype_state,
subtitle = paste0("Data as of ", format.Date(max(covid_data_state$date), "%B %d, %Y")),
x = "Date",
y = "Number of Cases",
color = "State",
caption = "@kylebeni012 | @staturdays") +
staturdays_theme +
theme(legend.position = "bottom") +
guides(color = guide_legend(nrow = 3, byrow = TRUE)) +
expand_limits(y = 0) +
scale_y_continuous(labels = comma)
}
)
output$deaths_state <- renderPlot(
{
covid_data_state %>%
filter(Province_State %in% c(input$state), date >= input$daterange_state[1] & date <= input$daterange_state[2]) %>%
ggplot() + {
if(input$casetype_state == "New Cases")
geom_line(aes(y = New_Deaths, x = date, colour = Province_State), na.rm = T)} + {
if(input$casetype_state == "Total Cases")
geom_line(aes(y = Cumulative_Deaths, x = date, colour = Province_State), na.rm = T)} +
labs(title = paste0(str_remove(input$casetype_state, " Cases"), " Deaths"),
subtitle = paste0("Data as of ", format.Date(max(covid_data_state$date), "%B %d, %Y")),
x = "Date",
y = "Number of Deaths",
color = "State",
caption = "@kylebeni012 | @staturdays") +
staturdays_theme +
theme(legend.position = "bottom") +
guides(color = guide_legend(nrow = 3, byrow = TRUE)) +
expand_limits(y = 0) +
scale_y_continuous(labels = comma)
}
)
# State Testing Output ----------------------------------------------------
output$hospitalizations_state <- renderPlot(
{
covid_state_daily_longer %>%
filter(state.name %in% c(input$testing_state), date >= input$daterange_testing[1] & date <= input$daterange_testing[2]) %>%
ggplot() +
geom_line(aes(y = value, x = date, color = current_hospitalization_severity), na.rm = T) +
labs(title = "Current Hospitalization Data",
subtitle = paste0("Data as of ", format.Date(max(covid_state_daily_longer$date), "%B %d, %Y")),
x = "Date",
y = "Number of Patients",
color = "Severity Level",
caption = "@kylebeni012 | @staturdays") +
staturdays_theme +
theme(legend.position = "bottom", plot.title = element_text(size = 20)) +
guides(color = guide_legend(nrow = 3, byrow = TRUE)) +
expand_limits(y = 0) +
scale_y_continuous(labels = comma) +
scale_color_viridis_d(labels = c("Hospitalized", "In ICU", "On Ventilator"))
}
)
output$testing_state <- renderPlot(
{
if(input$testing_state == ""){
return(covid_state_daily %>%
ggplot() +
labs(title = "Current Testing Data",
subtitle = paste0("Data as of ", format.Date(max(covid_state_daily$date), "%B %d, %Y")),
x = "Date",
y = "Number of Tests",
color = "Test Outcome",
caption = "@kylebeni012 | @staturdays") +
staturdays_theme +
theme(legend.position = "bottom", plot.title = element_text(size = 20)) +
guides(color = guide_legend(nrow = 3, byrow = TRUE)) +
expand_limits(y = 0) +
scale_y_continuous(labels = comma) +
scale_color_viridis_d(labels = c("Negative", "Positive"), option = "C", begin = .3, end = .7))
} else {
covid_state_daily %>%
filter(state.name %in% c(input$testing_state), date >= input$daterange_testing[1] & date <= input$daterange_testing[2]) %>%
ggplot() +
geom_line(aes(y = positiveIncrease, x = date, color = "Positive"), na.rm = T) +
geom_line(aes(y = negativeIncrease, x = date, color = "Negative"), na.rm = T) +
labs(title = "Current Testing Data",
subtitle = paste0("Data as of ", format.Date(max(covid_state_daily$date), "%B %d, %Y")),
x = "Date",
y = "Number of Tests",
color = "Test Outcome",
caption = "@kylebeni012 | @staturdays") +
staturdays_theme +
theme(legend.position = "bottom", plot.title = element_text(size = 20)) +
guides(color = guide_legend(nrow = 3, byrow = TRUE)) +
expand_limits(y = 0) +
scale_y_continuous(labels = comma) +
scale_color_viridis_d(labels = c("Negative", "Positive"), option = "C", begin = .3, end = .7) #First label is getting applied via alphabetical order of colors above
}
}
)
output$pos_tests_state <- renderPlot(
{
covid_state_daily %>%
group_by(state.name, date) %>%
summarise(percent_positive_test = positiveIncrease/(sum(positiveIncrease, negativeIncrease))) %>%
filter(state.name %in% c(input$pos_test_input_state), date >= input$daterange_testing[1] & date <= input$daterange_testing[2]) %>%
ggplot() +
geom_smooth(aes(y = percent_positive_test, x = date, colour = state.name), na.rm = T) +
labs(title = "Positive Test Percentage",
subtitle = paste0("Data as of ", format.Date(max(covid_state_daily$date), "%B %d, %Y")),
x = "Date",
y = "Percent of Tests Positive",
color = "State",
caption = "@kylebeni012 | @staturdays") +
staturdays_theme +
guides(color = guide_legend(nrow = 3, byrow = TRUE)) +
theme(legend.position = "bottom", plot.title = element_text(size = 20)) +
expand_limits(y = 0) +
scale_y_continuous(labels = percent) +
scale_color_viridis_d()
}
)
output$total_testing_dt_state <- renderDataTable(
{
datatable(covid_state_daily_rank, colnames = c("State", "Percent of Tests Positive", "Percent of Population Tested", "Percent of Population Positive", "Positive Results", "Negative Results", "Total Tests", "Ranking - Percent of Tests Positive", "Ranking - Total Positives", "Ranking - Total Tests"),
caption = paste0("Data as of ", format.Date(max(covid_state_daily_pop$date), "%B %d, %Y")), options = list(scrollX = TRUE)) %>%
formatPercentage(2:4, digits = 2) %>%
DT::formatRound(5:7, digits = 0)
}
)
# US Output ---------------------------------------------------------------
output$cases_US <- renderPlot(
{
covid_data_us %>%
filter(date >= input$daterange_US[1] & date <= input$daterange_US[2]) %>%
ggplot() + {
if(input$casetype_US == "New Cases")
geom_line(aes(y = New_Cases, x = date), na.rm = T)} + {
if(input$casetype_US == "Total Cases")
geom_line(aes(y = Cumulative_Cases, x = date), na.rm = T)} +
labs(title = input$casetype_US,
subtitle = paste0("Data as of ", format.Date(max(covid_data_us$date), "%B %d, %Y")),
x = "Date",
y = "Number of Cases",
color = "State",
caption = "@kylebeni012 | @staturdays") +
staturdays_theme +
theme(legend.position = "bottom") +
guides(color = guide_legend(nrow = 3, byrow = TRUE)) +
expand_limits(y = 0) +
scale_y_continuous(labels = comma)
}
)
output$deaths_US <- renderPlot(
{
covid_data_us %>%
filter(date >= input$daterange_US[1] & date <= input$daterange_US[2]) %>%
ggplot() + {
if(input$casetype_US == "New Cases")
geom_line(aes(y = New_Deaths, x = date), na.rm = T)} + {
if(input$casetype_US == "Total Cases")
geom_line(aes(y = Cumulative_Deaths, x = date), na.rm = T)} +
labs(title = paste0(str_remove(input$casetype_US, " Cases"), " Deaths"),
subtitle = paste0("Data as of ", format.Date(max(covid_data_us$date), "%B %d, %Y")),
x = "Date",
y = "Number of Deaths",
color = "State",
caption = "@kylebeni012 | @staturdays") +
staturdays_theme +
theme(legend.position = "bottom") +
guides(color = guide_legend(nrow = 3, byrow = TRUE)) +
expand_limits(y = 0) +
scale_y_continuous(labels = comma)
}
)
# Global Output -----------------------------------------------------------
output$cases_global <- renderPlot(
{
covid_data_global %>%
filter(Country.Region %in% c(input$country), date >= input$daterange_global[1] & date <= input$daterange_global[2]) %>%
ggplot() + {
if(input$casetype_global == "New Cases")
geom_line(aes(y = New_Cases, x = date, colour = Country.Region), na.rm = T)} + {
if(input$casetype_global == "Total Cases")
geom_line(aes(y = Cumulative_Cases, x = date, colour = Country.Region), na.rm = T)} +
labs(title = input$casetype_global,
subtitle = paste0("Data as of ", format.Date(max(covid_data_global$date), "%B %d, %Y")),
x = "Date",
y = "Number of Cases",
color = "Country",
caption = "@kylebeni012 | @staturdays") +
staturdays_theme +
theme(legend.position = "bottom") +
guides(color = guide_legend(nrow = 3, byrow = TRUE)) +
expand_limits(y = 0) +
scale_y_continuous(labels = comma)
}
)
output$deaths_global <- renderPlot(
{
global_recover_1 <- if (identical(input$casetype_global, "New Cases")) {
filter(covid_data_global_longer,Case_Outcome %in% c("New_Deaths", "New_Recovered"))
} else if (identical(input$casetype_global, "Total Cases")) {
filter(covid_data_global_longer,Case_Outcome %in% c("Cumulative_Deaths", "Cumulative_Recovered"))
}
global_recover_2 <- global_recover_1 %>%
filter(Country.Region %in% c(input$country), date >= input$daterange_global[1] & date <= input$daterange_global[2])
global_recover_2 %>%
ggplot() +
geom_line(aes(y = Count, x = date, colour = Country.Region, linetype = Case_Outcome), na.rm = T) +
labs(title = paste0(str_remove(input$casetype_global, " Cases"), " Outcomes"),
subtitle = paste0("Data as of ", format.Date(max(covid_data_global$date), "%B %d, %Y")),
x = "Date",
y = "Number of Outcomes",
color = "Country",
linetype = "Case Outcome",
caption = "@kylebeni012 | @staturdays") +
staturdays_theme +
theme(legend.position = c(0.75, 0.75), legend.spacing = unit(0, units = "points")) +
guides(color = guide_legend(nrow = 1, byrow = TRUE), linetype = guide_legend(nrow = 2, byrow = TRUE)) +
expand_limits(y = 0) +
scale_y_continuous(labels = comma) +
{
if(input$casetype_global == "New Cases")
scale_linetype_manual(labels = c("New Deaths", "New Recoveries"), values = c("solid", "dashed"))
} +
{
if(input$casetype_global == "Total Cases")
scale_linetype_manual(labels = c("Total Deaths", "Total Recoveries"), values = c("solid", "dashed"))
}
}
)
output$recovery_rate_global <- renderDataTable(
datatable(covid_data_global_recoveries, colnames = c("Country", "Total Deaths", "Total Recoveries", "Recovery Rate"),
caption = "Total Recoveries Divided By Recoveries Plus Deaths") %>%
formatPercentage(4, digits = 2) %>%
DT::formatRound(2:3, digits = 0)
)
}
# Run App -----------------------------------------------------------------
shinyApp(ui = ui, server = server)
|
63d46caa7f9c092e131411e7baec5dfdbf9c1e91
|
fc45ee77e310e641c1164db159865e5aea355560
|
/man/ExpectationGaussianSufficientStatistics.Rd
|
cdd34060a8c9b219bda835c747afe74245103cb2
|
[] |
no_license
|
jatotterdell/varapproxr
|
2baee2d0c07a96ebae7ff499293903e9e0c91df8
|
b2e9105894e1a9f2ac7366b84d9ed701e8a5cf21
|
refs/heads/master
| 2023-07-20T15:15:23.772555
| 2023-07-14T03:35:33
| 2023-07-14T03:35:33
| 172,455,526
| 1
| 6
| null | null | null | null |
UTF-8
|
R
| false
| true
| 444
|
rd
|
ExpectationGaussianSufficientStatistics.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RcppExports.R
\name{ExpectationGaussianSufficientStatistics}
\alias{ExpectationGaussianSufficientStatistics}
\title{Gaussian transform natural parameters to E[T(X)] parameters.}
\usage{
ExpectationGaussianSufficientStatistics(eta)
}
\arguments{
\item{eta}{The natural parameter vector.}
}
\description{
Gaussian transform natural parameters to E[T(X)] parameters.
}
|
d574a6de70bee42817cade475101be99fdf3f5b2
|
f60aa173436c790a668d76d33a3a3be34011cf68
|
/R/notebook.R
|
c2da08fd9be5ebc4a4a3fc9d1b402da43c1b4d67
|
[] |
no_license
|
MehdiChelh/shinynotebook
|
8a91cb94499278f5ec1bb22649ca46219c1cebb7
|
73c2e9787897c564b0978547b02fd1bec207c253
|
refs/heads/master
| 2020-05-03T15:00:21.497938
| 2019-04-30T07:33:58
| 2019-04-30T07:33:58
| 178,694,186
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,199
|
r
|
notebook.R
|
# Notebook's basic functions
#
# You can learn more about package authoring with RStudio at:
#
# http://r-pkgs.had.co.nz/
#
# Some useful keyboard shortcuts for package authoring:
#
# Build and Reload Package: 'Cmd + Shift + B'
# Check Package: 'Cmd + Shift + E'
# Test Package: 'Cmd + Shift + T'
#' Return a Notebook
#'
#' If \code{id=NULL} the function runs an new empty notebook, else the notebook given as argument is run.
#'
#' @param id id of the notebook
#'
#' @return A shinyApp object which run in browser (see https://shiny.rstudio.com/reference/shiny/latest/shinyApp.html for more information).
#'
#' @examples
#' runNotebook()
#'
#' @import shiny
#' @import shinydashboard
#' @export
runNotebook <- function(id=NULL, port=1994) {
message("Start notebook...") # add notebook name/id in the future
shinyApp(ui=notebookUI, server=notebookServer, options = list(port=port))
}
#' Notebook UI
#'
#' If \code{id=NULL} the function runs an new empty notebook, else the notebook given as argument is run.
#'
#' @param id id of the notebook
#'
#' @return A shinyApp object which run in browser (see https://shiny.rstudio.com/reference/shiny/latest/shinyApp.html for more information).
#'
#' @examples
#' runNotebook()
#'
#' @import shiny
#' @import shinydashboard
#' @export
notebookUI <- function(request){
header <- dashboardHeader(title = "{ ShinyNotebook }",
tags$li(tags$a(tags$span(icon("plus"), style="margin-right:10px")," Cell",
`class`="dropdown-toggle action-button shiny-bound-input",
`id`="addCellBtn"), `class`="dropdown"),
tags$li(tags$a(tags$span(icon("save")),
`class`="dropdown-toggle action-button shiny-bound-input",
`id`="._bookmark_"), `class`="dropdown"))
sidebar <- dashboardSidebar(
sidebarMenu(style="position:fixed; width:230px; height:calc(100vh - 50px); overflow-y:scroll",
div(id='end_menu_out_treat')
)
)
body <- dashboardBody()
return(dashboardPage(header, sidebar, body))
}
#' Notebook Server
#'
#' If \code{id=NULL} the function runs an new empty notebook, else the notebook given as argument is run.
#'
#' @param id id of the notebook
#'
#' @return A shinyApp object which run in browser (see https://shiny.rstudio.com/reference/shiny/latest/shinyApp.html for more information).
#'
#' @examples
#' runNotebook()
#'
#' @import shiny
#' @import shinydashboard
#' @export
notebookServer <- function(input, output, session){
# get_page <- function(session = shiny::getDefaultReactiveDomain()) {
# session$userData$shiny.router.page()$path
# }
enableBookmarking(store = "server")
# NotebookSession
# NotebookSession is an Reference Class (see Reference Class documentation)
# which is pass through all the modules of the session through the session$userData$NS variable
#
# This object can be seen as a slight wrapper to shiny library.
# It allows to easily share variable between notebook cells and bookmark/restore notebooks.
session$userData$NS <- NotebookSession$new(reactive=reactiveValues(),
static=list(),
private.reactive=reactiveValues(),
private.static=list())
session$userData$NS$private.reactive[["cellCount"]] <- 0
session$userData$NS$private.reactive[["cellNames"]] <- c()
session$userData$NS$private.reactive[["SessionCells"]] <- list() # Liste de Cell SessionCells$new(id=NULL, name=NULL)
# UI button
# The notebook UI is quite minimalist, thus there arren't a lot of observeEvent()
# The few ones are defined bellow.
# They concern the following buttons : addCellBtn, ...
observeEvent(input[['addCellBtn']], {
# Compute cell id
new_cell_id <- session$userData$NS$private.reactive[["cellCount"]] + 1
session$userData$NS$private.reactive[["cellCount"]] <- new_cell_id
session$userData$NS$private.reactive[["cellNames"]] <- c(session$userData$NS$private.reactive[["cellNames"]], paste("Cell", new_cell_id))
# session$userData$NS$private.reactive[["SessionCells"]]$addCell(id = new_cell_id, name = paste("Cell", new_cell_id))
print(NS(session$ns(new_cell_id))("ok"))
session$userData$NS$private.reactive[["SessionCells"]][[new_cell_id]] <- session.new_cell <- CellSession$new(id=new_cell_id, userData=list(), bookmark = list(), ns = NS(session$ns(new_cell_id)))
# Insert cell in UI
session$userData$NS$insert_cell_UI(new_cell_id, session.new_cell)
})
# Bookmarking is slightly refined with NotebookSession
#
# Bookmarking is very convenient in shiny as it automates bookmark procedure.
# However, bookmarking has some intrinsinc limitations : can't handle UI's inserted through insertUI(), ...
# Thus bookmarking need to be revised in the NotebookSession framework.
#
# Currently onBookmark and onRestore call session bookmarking are defined below.
# Future improvements : make onBookmark() and onRestore() wrappers
#
# > Bookmark NotebookSession
# called if the session is being bookmarked (bookmark button)
onBookmark(function(state){
# 1. Bookmark NotebookSession
state$values$NS <- session$userData$NS$bookmark_state()
# 2. Bookmark cells
for (bk_id in names(cell.session$bookmarkIds)){
if (cell.session$bookmarkIds[[bk_id]] == "textInput"){
print("wesh")
print(input[[cell.session(bk_id)]])
cell.session$bookmark[[bk_id]] <- input[[cell.session(bk_id)]]
}
}
})
#
# > Restore NotebookSession
# called if the session is being restored from a bookmark
onRestore(function(state){
# 1. Restore NotebookSession
session$userData$NS$restore_from_bookmarked_state(state)
session$userData$NS$static$test <- "QLF"
# 2. Restore cells
lapply(1:session$userData$NS$private.reactive[["cellCount"]], function(cell_id){
session$userData$NS$insert_cell_UI(cell_id)
})
})
}
|
9e208524e9e67bc294517787a5754f4d16d19937
|
05fce8fc1042ff01c7470f6b8c40f6329d6eaab8
|
/SEB113_CSA.R
|
335e325230d1d959ab2ca5360a2556d17e2a0c72
|
[
"MIT"
] |
permissive
|
nixsiow/CSA_113
|
52b3348444e7878fa1c450fcc1734e49767cf16c
|
6aa82863eda871454a1ab042c0ee009b2826045f
|
refs/heads/master
| 2021-01-20T21:13:21.358852
| 2016-06-12T10:58:38
| 2016-06-12T10:58:38
| 59,937,791
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 20,182
|
r
|
SEB113_CSA.R
|
# ===== START OF SCRIPT =====
# SEB 113
# Collaborative Scientific Article (CSA)
# === Group member === #
# Last Name: Siow
# First Name: Yun Kai
# Student No.: 9598138
# Course code: SE50
# ===== Dependency check =====
## pacman, the package manager to check whether
## dependency libraries are installed, and load it if it does
if (!require("pacman")) install.packages("pacman") # install pacman itself if it's not already
pacman::p_load(ggplot2, dplyr, openair, GGally, reshape2, broom, lubridate, ggmap, gridExtra) # pacman to install/load libraries
# latest ggplot2 needed to render subtitle, devtools::install_github("hadley/ggplot2")
# ===== Dependency check =====
## read raw csv file from cwd
# air.quality.clinton.raw <- read.csv(file="data/clinton-aq-2015.csv", as.is=T, head=T)
# Or read directly from data source.
url <- "http://www.ehp.qld.gov.au/data-sets/air-quality/clinton-aq-2015.csv"
air.quality.clinton.raw <- read.csv(file=url, as.is=T, head=T)
## Have a look on the first few data entries in the raw data frame
head(air.quality.clinton.raw)
## Dimension of raw data frame
dim(air.quality.clinton.raw) # it has 8760 entries and 17 variables
## Quick way to summarize the raw data frame
summary(air.quality.clinton.raw)
## ==================== ##
## DATA WRANGLING
## ==================== ##
# Lubridate to deal with date and time. Convert them to single variable colume
# with POSIXct format so that R can understand it.
# Overwrite the old "Date" variable.
air.quality.clinton.raw$Date <- dmy_hm(paste(air.quality.clinton.raw$Date, air.quality.clinton.raw$Time))
# get rid of the old "Time" variable colume by assigning NULL to it
# air.quality.clinton.raw$Time <- NULL
# Lubridate to add few extra colume: day, month, year, yday, day_of_week
air.quality.clinton.raw <- mutate(air.quality.clinton.raw,
# day = day(Date),
month = month(Date, label=T),
# year = year(Date),
# yday = yday(Date),
day_of_week = wday(Date, label=T))
## define data of interest, rearrange the seq
data.of.interest <- c("Date", "Time", "month", "day_of_week", "PM2.5..ug.m.3.", "Wind.Speed..m.s.", "Wind.Direction..degTN.")
air.quality.clinton <- subset(air.quality.clinton.raw, select = data.of.interest)
## Rename variable name
names(air.quality.clinton) <- c("date", "time", "month", "day_of_week", "pm2.5", "ws", "wd")
head(air.quality.clinton)
## Assign breakboint for cutting and labelling
## 0 and 360 is for NORTH
breaks = c(0, seq(22.5, 337.5, by=45), 360)
## cut function dplyr to divides the range of feeded data into
## intervals and codes the values in "Direction" such as NE, E ...
## according to which interval they fall.
## Turn the continuous variable to categorial variable
wd.label <- cut(air.quality.clinton$wd, breaks = breaks, dig.lab= 4, labels = c("N", "NE", "E", "SE", "S", "SW", "W", "NW", "N"), include.lowest = TRUE)
## Create new variable log.pm2.5 and wd.label
air.quality.clinton <- mutate(air.quality.clinton, wd.label = wd.label) %>%
mutate(log.pm2.5 = log(pm2.5)) %>%
na.omit %>%
filter(!is.nan(log.pm2.5) & !is.infinite(log.pm2.5))
# Check the levels of wd.label
levels(air.quality.clinton$wd.label)
# regroup both "N" level into only one level, should be only 8 instead of 9
levels(air.quality.clinton$wd.label) <- c("N", "NE", "E", "SE", "S", "SW", "W", "NW", "N")
## Check the range of produced log.pm2.5
range(air.quality.clinton$log.pm2.5)
## Check the latest data frame
head(air.quality.clinton)
##
summary(air.quality.clinton)
## ==================== ##
## Exploratory Data Visualisation
## ==================== ##
# summarise variables
summary(air.quality.clinton[,c(6,8,9)])
# Plot the pm2.5 concentration in air in histrogram
pm1 <- ggplot(data=air.quality.clinton, aes(x=pm2.5)) +
geom_histogram(binwidth = 0.1, col="#CD5D67", fill="#CD5D67", alpha=0.4) +
theme_bw() +
theme(plot.title = element_text(lineheight=.8,
face="bold"),
strip.background = element_rect(fill = "white",
colour = "white")) +
labs(title=expression(paste("Before: ",PM[2.5]," data skewed")),
subtitle="bla bla bla",
x= expression(paste(PM[2.5]," (",mu,g,m^-3,")")),
y= "Count")
# Plot the pm2.5 concentration in air in histrogram with log scale
pm2 <- ggplot(data=air.quality.clinton, aes(x=log.pm2.5)) +
geom_histogram(binwidth = 0.1, col="#61C9A8", fill="#61C9A8", alpha=0.7) +
theme_bw() +
theme(plot.title = element_text(lineheight=.8,
face="bold"),
strip.background = element_rect(fill = "white",
colour = "white")) +
labs(title="After log transformation",
subtitle="bla bla bla",
x= expression(paste(log," ",PM[2.5]," (",mu,g,m^-3,")")),
y= "")
grid.arrange(pm1, pm2, ncol=2)
# Plot the wind direction in pie chart
(wind_direction.pie <-
ggplot(data=air.quality.clinton, aes(x=wd.label, fill = factor(wd.label))) +
geom_bar(width = 1, color="white", alpha=0.7) +
# rotation of the pie chart to 5.89 radian, clockwise.
coord_polar(start = 5.89, direction=1) +
theme_bw() +
theme(legend.position="none") +
labs(fill="Wind\nDirection",
title = "Frequency of various wind direction",
subtitle = "Site location: Latitude: -23.8701; Longitude: 151.2216",
x = "Wind Direction",
y = "Count")
)
# histogram of ws
(ws.histogram <- ggplot(data=air.quality.clinton, aes(x=ws)) +
geom_histogram(binwidth = 0.1, col="#1789FC", fill="#1789FC", alpha=0.6) +
theme_bw() +
theme(plot.title = element_text(lineheight=.8,
face="bold"),
strip.background = element_rect(fill = "white",
colour = "white")) +
labs(title="Wind speed distribution",
subtitle="Site location: Latitude: -23.8701; Longitude: 151.2216",
x= expression(paste("Wind Speed (",ms^-1,")")),
y= "Count")
)
# ws vs wd
ws_wd.plot <- ggplot(data=air.quality.clinton, aes(x=wd.label, y=ws)) +
geom_boxplot(outlier.colour = NULL, aes_string(colour="wd.label", fill="wd.label")) +
theme_bw() +
theme(legend.position="none",
plot.title = element_text(lineheight=.8,
face="bold"),
strip.background = element_rect(fill = "white",
colour = "white")) +
labs(title = "Wind speed vs Wind direction",
subtitle = "further split it into 8 different wind direction facet",
y = expression(paste("Wind speed, ", ms^-1)),
x = "Wind Direction")
# use the details of the plot, to derive the coordinates of where the median line is,
# and then add colour to it using geom_segment.
dat <- ggplot_build(ws_wd.plot)$data[[1]]
(ws_wd.plot <-
ws_wd.plot + geom_segment(data=dat,
aes(x=xmin, xend=xmax, y=middle, yend=middle),
colour="white", size=0.8)
)
# Plot the pm2.5 concentration varies according to wind direction in boxplot
pm_wd.plot <-
ggplot(data=air.quality.clinton, aes(x=wd.label, y=log.pm2.5)) +
geom_boxplot(outlier.colour = NULL, aes_string(colour="wd.label", fill="wd.label")) +
theme_bw() +
theme(legend.position="none",
plot.title = element_text(lineheight=.8,
face="bold"),
strip.background = element_rect(fill = "white",
colour = "white")) +
labs(title = "PM2.5 concentration varies according to wind direction",
subtitle = "Site location: Latitude: -23.8701; Longitude: 151.2216",
x = "Wind Direction",
y = expression(paste(log," ",PM[2.5]," (",mu,g,m^-3,")")))
# use the details of the plot, to derive the coordinates of where the median line is,
# and then add colour to it using geom_segment.
dat <- ggplot_build(pm_wd.plot)$data[[1]]
(pm_wd.plot <-
pm_wd.plot + geom_segment(data=dat, aes(x=xmin, xend=xmax, y=middle, yend=middle),
colour="white", size=0.8)
)
# ws and pm
(pm_ws.plot <- ggplot(data=air.quality.clinton, aes(x=ws, y=log.pm2.5)) +
geom_point( size=8, alpha=0.1, color="#545E56") +
geom_smooth(method="lm", se=FALSE, color="#FF6978", alpha=0.6, size=0.5) +
theme_bw() +
theme(legend.position="none",
plot.title = element_text(lineheight=.8,
face="bold"),
strip.background = element_rect(fill = "white",
colour = "white")) +
labs(title = "PM2.5 concentration plot against Wind Speed",
subtitle = "",
x = expression(paste("Wind Speed (",ms^-1,")")),
y = expression(paste(log," ",PM[2.5]," (",mu,g,m^-3,")")))
)
# Look up table for facet_wrap label
Direction <- c(
N = "North",
NE = "North East",
E = "East",
SE = "South East",
S = "South",
SW = "South West",
W = "West",
NW = "North West",
N = "North"
)
# global labeller used for facet label
global_labeller <- labeller(
wd.label = Direction
)
# Facet plot, log PM2.5 concentration varies according to wind speed and wind direction
(pm_ws_wd.plot <- ggplot(data=air.quality.clinton, aes(x=ws, y=log.pm2.5)) +
geom_point(aes(color=wd.label), size=4, alpha=0.3) +
geom_smooth(method="lm", se=FALSE, color="grey40", alpha=0.6, size=0.5) +
facet_wrap(~wd.label, nrow = 2,
labeller = global_labeller) +
theme_bw() +
theme(legend.position="none",
plot.title = element_text(lineheight=.8,
face="bold"),
strip.background = element_rect(fill = "white",
colour = "white")) +
labs(title = "PM2.5 concentration varies with Wind speed and Wind Direction",
subtitle = "further split it into 8 different wind direction facet",
x = expression(paste("Wind Speed (",ms^-1,")")),
y = expression(paste(log," ",PM[2.5]," (",mu,g,m^-3,")"))
)
)
## ==================== ##
## Fitting a Linear Model with Multiple Explanatory Variables
## ==================== ##
## fitting four model to see what happen to estimates
# 1. PM2.5 ~ Wind Speed
# 2. PM2.5 ~ Wind direction[s]
# 3. PM2.5 ~ Wind Speed + Wind direction[s]
# 4. PM2.5 ~ Wind speed * wind direction[s] (PM2.5 ~ WS + WD + WS x WD)
# 5. PM2.5 ~ Wind speed : wind direction[s] (PM2.5 ~ WS x WD)
# ======================================== #
## 1. log.pm2.5 ~ Wind Speed (y = B_0 + B_1X1)
lm.pm_ws <- lm(data=air.quality.clinton, log.pm2.5 ~ ws)
lm.pm_ws
# summary of lm
summary(lm.pm_ws)
# Confident Interval
tidy(lm.pm_ws, conf.int = T)
# Check lm whether the residuals are normally distributed
df.fort.pm_ws <- fortify(lm.pm_ws)
head(df.fort.pm_ws)
# ======================================== #
## 2. log.pm2.5 ~ Wind direction (y = B_0 + B_2X2)
(lm.pm_wd <- lm(data=air.quality.clinton, log.pm2.5 ~ wd.label - 1))
(lm.pm_wd.intercept <- lm(data=air.quality.clinton, log.pm2.5 ~ wd.label))
# summary of lm
summary(lm.pm_wd)
summary(lm.pm_wd.intercept)
# Confident Interval
tidy(lm.pm_wd, conf.int = T)
# Check lm whether the residuals are normally distributed
df.fort.pm_wd <- fortify(lm.pm_wd)
head(df.fort.pm_wd)
# ======================================== #
## 3. log.pm2.5 ~ Wind Speed + Wind direction (y = B_0 + B_1X1 + B_2X2)
(lm.pm_ws_wd <- lm(data=air.quality.clinton, log.pm2.5 ~ ws+wd.label - 1))
(lm.pm_ws_wd.intercept <- lm(data=air.quality.clinton, log.pm2.5 ~ ws+wd.label))
# summary of lm
summary(lm.pm_ws_wd)
summary(lm.pm_ws_wd.intercept)
# Confident Interval
tidy(lm.pm_ws_wd, conf.int = T)
# Check lm whether the residuals are normally distributed
df.fort.pm_ws_wd <- fortify(lm.pm_ws_wd)
head(df.fort.pm_ws_wd)
# =========== JUST THE INTERACTION TERMs ========= #
## 4. log.pm2.5 ~ Wind speed : wind direction. (log.pm2.5 ~ WS x WD) (y = B1X1Z1 + B2X1Z2 + ....)
(lm.pm_wswd <- lm(data=air.quality.clinton, log.pm2.5 ~ ws:wd.label -1))
(lm.pm_wswd.intercept <- lm(data=air.quality.clinton, log.pm2.5 ~ ws:wd.label))
# summary of lm
summary(lm.pm_wswd)
summary(lm.pm_wswd.intercept) # for r2 ONLY
# Confident Interval
tidy(lm.pm_wswd, conf.int = T)
# Check lm whether the residuals are normally distributed
df.fort.pm_wswd <- fortify(lm.pm_wswd)
head(df.fort.pm_wswd)
# =========== WD AND THE INTERACTION TERM, THE BEST ========= #
## 5. log.pm2.5 ~ Wind direction + Wind speed : wind direction. (log.pm2.5 ~ WD + WS x WD) (y = B_0 + WD + B_1X1Z1 + ....)
(lm.pm_wd_wswd <- lm(data=air.quality.clinton, log.pm2.5 ~ wd.label-1 + ws:wd.label))
(lm.pm_wd_wswd.intercept <- lm(data=air.quality.clinton, log.pm2.5 ~ wd.label + ws:wd.label))
# summary of lm
summary(lm.pm_wd_wswd)
summary(lm.pm_wd_wswd.intercept) # for r2 ONLY
# Confident Interval
tidy(lm.pm_wd_wswd, conf.int = T)
# Check lm whether the residuals are normally distributed
df.fort.pm_wd_wswd <- fortify(lm.pm_wd_wswd)
head(df.fort.pm_wd_wswd)
# =========== Both terms and with Interaction term ============ #
## 6. log.pm2.5 ~ Wind speed * wind direction (y = B1X1 + B2X2 + .... + BiX1X2 + BiX1Xi)
(lm.pm_ws_wd_wswd <- lm(data=air.quality.clinton, log.pm2.5 ~ ws*wd.label-1))
(lm.pm_ws_wd_wswd.intercept <- lm(data=air.quality.clinton, log.pm2.5 ~ ws*wd.label))
# summary of lm
summary(lm.pm_ws_wd_wswd)
summary(lm.pm_ws_wd_wswd.intercept)
# Confident Interval
tidy(lm.pm_ws_wd_wswd, conf.int = T)
# Check lm whether the residuals are normally distributed
df.fort.pm_ws_wd_wswd <- fortify(lm.pm_ws_wd_wswd)
head(df.fort.pm_ws_wd_wswd)
# Model fitness
# What is the coefficient of determination, R2, for these models?
r2.check <- function(lm){
r2 <- broom::glance(lm)$r.squared
return(r2)
}
# another function to check r2, for linear model with no intercept
# r2.noint.check <- function(lm){
# 1 - sum(residuals(lm)^2) / sum((air.quality.clinton$log.pm2.5 - mean(air.quality.clinton$log.pm2.5))^2)
# }
# df to visualise all the model's r2
Model <- c("M1", "M2", "M3", "M4", "M5", "M6")
R2s <- c(r2.check(lm.pm_ws),
r2.check(lm.pm_wd.intercept),
r2.check(lm.pm_ws_wd.intercept),
r2.check(lm.pm_wswd.intercept),
r2.check(lm.pm_wd_wswd.intercept),
r2.check(lm.pm_ws_wd_wswd.intercept)
)
(models.fitness.df <- data.frame(cbind(Model, R2s)))
## ========== Diagnostics for residuals ========== ##
# residuals of each model into one big dataframe
dat.resid <- data.frame(M1 = df.fort.pm_ws$.resid,
M2 = df.fort.pm_wd$.resid,
M3 = df.fort.pm_ws_wd$.resid,
M4 = df.fort.pm_wswd$.resid,
M5 = df.fort.pm_wd_wswd$.resid,
M6 = df.fort.pm_ws_wd_wswd$.resid)
head(dat.resid)
# melt the data, and make a histogram
# facet_wrap layer, to split the data into its four models
dat.resid.melt <- melt(dat.resid)
# plot
all.resid.histogram <- ggplot(dat.resid.melt, aes(x = value)) +
geom_histogram(binwidth = 0.5, col="white", alpha=0.7, aes(y = ..density.., fill=variable)) +
facet_wrap(~variable) +
stat_function(fun = dnorm, colour = "grey50", size=1) +
theme_bw() +
theme(legend.position="none",
plot.title = element_text(lineheight=.8,
face="bold"),
strip.background = element_rect(fill = "white",
colour = "white")) +
labs(title="Residuals of each model against normal distribution",
subtitle="bla bla bla",
x="Residual",
y="Density")
# Residual histogram
Resid.histogram <- ggplot(df.fort.pm_wd_wswd, aes(x = .resid)) +
geom_histogram(binwidth = 0.5, col="white", alpha=0.7, fill="#4981D1", aes(y = ..density..)) +
stat_function(fun = dnorm, colour = "grey50", size=1) +
theme_bw() +
theme(legend.position="none",
plot.title = element_text(lineheight=.8,
face="bold"),
strip.background = element_rect(fill = "white",
colour = "white")) +
labs(title="Residuals from the model 5 plot against Normal Distribution",
subtitle="Histogram of the residuals with density on the y axis with assumption of normal distribution of the residuals",
x="Residual",
y="Density")
# Model 5
# Make a scatter plot of the fitted values vs the residuals,
# including a smooth line of best fit to determine whether the residuals have a mean of zero
homoplot <- ggplot(df.fort.pm_wd_wswd, aes(y=.resid, x=.fitted)) +
geom_point(aes(color=wd.label), alpha=0.3) +
geom_smooth(se=FALSE, color="blue", alpha=0.6, size=0.5) +
theme_bw() +
theme(plot.title = element_text(lineheight=.8,
face="bold"),
strip.background = element_rect(fill = "white",
colour = "white")) +
labs(title="Homogeneity of errors",
subtitle="Residuals doesn't look like the variance stays the same as we move from left to right along the fitted values axis.",
x="Fitted value",
y="Residuals")
# Does it look like there’s unexplained variation in the residuals? Why or why not?
# Make a quantile-quantile plot of the standardised residuals from the interaction model.
qqplot <- ggplot(df.fort.pm_wd_wswd, aes(sample=.stdresid)) +
stat_qq(geom="point", color="#00CC99", size=5, alpha=0.3) +
# geom_abline(xintercept=0, slope=1, lty=2)
geom_abline(intercept=0, slope=1, lty=2, color="#FF1654", size=0.8) + # xintercept depreciated
theme_bw() +
theme(legend.position="none",
plot.title = element_text(lineheight=.8,
face="bold")) +
labs(title="Normality of the residuals, \nquantile-quantile (QQ) plot of the standardised residuals.",
subtitle="The quantiles of the values are plotted against the quantiles of a standard Normal.",
x="Theoretical (Z ~ N(0,1))",
y="Sample")
# Create a scatterplot of the fitted vs observed data (yhat vs y),
# including a line showing where the yhat and y are equal.
goodnessplot <- ggplot(data=df.fort.pm_wd_wswd, aes(y=log.pm2.5, x=.fitted)) +
geom_point(aes(color=wd.label), alpha=0.3, size=3) +
geom_abline(intercept=0, slope=1) +
theme_bw() +
theme(plot.title = element_text(lineheight=.8,
face="bold"),
strip.background = element_rect(fill = "white",
colour = "white")) +
labs(title="Fitted vs observed data (yhat vs y)",
subtitle="Line showing where the yhat and y are equal.",
x="Fitted value (yhat)",
y=expression(paste(log," ",PM[2.5]," (observed data)")))
# ==== ANOVA ====
# Use the F test via the anova() function to compare both model
# model 4 and model 5 has diff r2, but close. Anova to compare both of them
# model 4 nested inside of model 5
# use the fitted model with intercept when using anova comparison
models.fitness.df
anova(lm.pm_wswd.intercept, lm.pm_wd_wswd.intercept)
# Model 5 vs. Model 6, model 5 nested in model 6
# Same r2 but model 6 has extra term (ws)
anova(lm.pm_wd_wswd.intercept, lm.pm_ws_wd_wswd.intercept)
# =========== ggmap ========= #
# ggmap
clinton = get_map(location = c(lon = 151.2216, lat = -23.8701), zoom = 16)
clinton.satelite = get_map(location = c(lon = 151.2216, lat = -23.8701), zoom = 16, maptype = "satellite")
# google map layer
sensor_map <- ggmap(clinton, extent="device") +
geom_point(aes(x=151.2216, y=-23.8701), color="red", size=7, alpha=0.05) +
theme_bw() +
annotate("text", x=151.2216, y=-23.8701, label = "Sensor", colour = I("red"), size = 3.5) +
labs(title = "Instrument location at Clinton",
subtitle = "Site location: Latitude: -23.8701; Longitude: 151.2216",
x = "Longitude",
y = "Latitude")
# save objects --------------------
# create a new directory for the report stuff
save.image(file="data/SEB113_CSA_Objects.RData", safe = TRUE)
# ===== END OF SCRIPT =====
|
663bbe146cf65112e9df924ae2b345afa85a4a3f
|
bb3fd8e814b3210022371974c95684066034ec39
|
/man/regular.Rd
|
fbb785d8caa79340ba040eec93302bc470e8cf18
|
[] |
no_license
|
xtmgah/tsibble
|
34f9990f5db6c39552e446ebee7b399d05fa12b2
|
1c5ec296036d86cae1a4c24bc01fa9e676cd63ed
|
refs/heads/master
| 2020-03-23T00:40:20.332753
| 2018-07-13T13:06:38
| 2018-07-13T13:06:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 659
|
rd
|
regular.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/as-tsibble.R
\name{is_regular}
\alias{is_regular}
\alias{is.regular}
\alias{is.regular}
\alias{is_ordered}
\title{\code{is_regular} checks if a tsibble is spaced at regular time or not; \code{is_ordered}
checks if a tsibble is ordered by key and index.}
\usage{
is_regular(x)
is.regular(x)
is_ordered(x)
}
\arguments{
\item{x}{A tsibble object.}
}
\description{
\code{is_regular} checks if a tsibble is spaced at regular time or not; \code{is_ordered}
checks if a tsibble is ordered by key and index.
}
\examples{
data(pedestrian)
is_regular(pedestrian)
is_ordered(pedestrian)
}
|
b686744ec5331a911d9a899cd4b8c03c58c90b28
|
b1201a76b097cd057208a3799bef3bbb0e37ea6d
|
/dummy variables.R
|
721577620c85e9aac376a9c5af9127f831dcd865
|
[] |
no_license
|
dhanapalaprudhviraj/analyticsproject1
|
faf3b56cba05698cf4b552fe7d01f680a058e3cd
|
243b8d58005e157e61d29366fbbf85a8b8d5a672
|
refs/heads/master
| 2020-03-07T03:57:40.502434
| 2018-04-02T11:04:14
| 2018-04-02T11:04:14
| 127,252,287
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 327
|
r
|
dummy variables.R
|
#dummy variables
df=mtcars
df$cyl=factor(df$cyl)
df$am=factor(df$am)
summary(df$cyl)
summary(df$am)
summary (df)
m1=lm(mpg~wt+cyl+am,data=df)
summary(m1)
predict(m1,newdata=data.frame(wt=c(2,2),cyl=factor(c(4,6)),am=factor(c(1,1))))
y4=33.9908+-3.2*wt+0
y6=33.9908+-3.2*wt+-4.2*cyl6(=1)
y8=33.9908+-3.2*wt+-6.07*cyl8(=1)
|
c06afaa0f892e349b8148436c2fc24597e428774
|
e7c040329363e813d79b64513bb3ffb3b7573617
|
/pySAHM/Resources/R_Modules/Testing/YetAnotherDebugIDrive.r
|
a5c9aac660d7df596fb5b45c9ebd1db679452bb9
|
[] |
no_license
|
jpocom/sahm
|
800e9c62b401a11334de74cf1b903f9fc20dd886
|
9b2f764ec1bb6a01077ddd0030e6dcf26fb24867
|
refs/heads/SAHM_1_1
| 2021-01-21T03:51:02.865367
| 2015-04-07T17:14:45
| 2015-04-07T17:14:45
| 33,557,074
| 0
| 0
| null | 2015-04-07T17:11:39
| 2015-04-07T17:11:37
| null |
UTF-8
|
R
| false
| false
| 9,572
|
r
|
YetAnotherDebugIDrive.r
|
#debug branch
setwd("I:\\VisTrails\\VisTrails_SAHM_x64_debug\\VisTrails\\vistrails\\packages\\sahm_MarianDev\\pySAHM\\Resources\\R_Modules")
ScriptPath="I:\\VisTrails\\VisTrails_SAHM_x64_debug\\VisTrails\\vistrails\\packages\\sahm_MarianDev\\pySAHM\\Resources\\R_Modules"
dir.path<-"I:\\VisTrails\\VisTrails_SAHM_x32_debug\\VisTrails\\vistrails\\packages\\TestingRCode2\\TestSuite\\AcrossModelPerformance\\Debug10.29"
#master branch
setwd("I:\\VisTrails\\VisTrails_SAHM_x64_debug\\VisTrails\\vistrails\\packages\\sahm\\pySAHM\\Resources\\R_Modules")
ScriptPath="I:\\VisTrails\\VisTrails_SAHM_x64_debug\\VisTrails\\vistrails\\packages\\sahm\\pySAHM\\Resources\\R_Modules"
dir.path<-"I:\\VisTrails\\VisTrails_SAHM_x32_debug\\VisTrails\\vistrails\\packages\\TestingRCode2\\TestSuite\\AcrossModelPerformance\\Master10.15"
#For Model tests
source("LoadRequiredCode.r")
source("MARS.helper.fcts.r")
source("GLM.helper.fcts.r")
source("BRT.helper.fcts.r")
source("RF.helper.fcts.r")
#For Apply Model Tests
source("EvaluateNewData.r")
#For PairsExplore and parameter inspection
source("PairsExplore.r")
source("Predictor.inspection.r")
source("my.panel.smooth.binary.r")
#For Data Splitting
source("TestTrainSplit.r")
source("CrossValidationSplit.r")
file.list<-list.files("I:\\VisTrails\\VisTrails_SAHM_x32_debug\\VisTrails\\vistrails\\packages\\TestingRCode2\\TestSuite")
rc=c(rep("responseCount",times=3),rep("responseBinary",times=10))
input.file<-vector()
input.file=c("I:\\VisTrails\\VisTrails_SAHM_x32_debug\\VisTrails\\vistrails\\packages\\TestingRCode2\\TestSuite\\CountFactorCVEvaluation.csv",
"I:\\VisTrails\\VisTrails_SAHM_x32_debug\\VisTrails\\vistrails\\packages\\TestingRCode2\\TestSuite\\CountFactorEvaluation.csv",
"I:\\VisTrails\\VisTrails_SAHM_x32_debug\\VisTrails\\vistrails\\packages\\TestingRCode2\\TestSuite\\CountFactorSplit.csv",
"I:\\VisTrails\\VisTrails_SAHM_x32_debug\\VisTrails\\vistrails\\packages\\TestingRCode2\\TestSuite\\ElithPsdoAbs.csv",
"I:\\VisTrails\\VisTrails_SAHM_x32_debug\\VisTrails\\vistrails\\packages\\TestingRCode2\\TestSuite\\ElithSyntheticPresAbsLargeDat.csv",
"I:\\VisTrails\\VisTrails_SAHM_x32_debug\\VisTrails\\vistrails\\packages\\TestingRCode2\\TestSuite\\ElithSynthPresAbsTestTrainEval.csv",
"I:\\VisTrails\\VisTrails_SAHM_x32_debug\\VisTrails\\vistrails\\packages\\TestingRCode2\\TestSuite\\PresAbsEval.csv",
"I:\\VisTrails\\VisTrails_SAHM_x32_debug\\VisTrails\\vistrails\\packages\\TestingRCode2\\TestSuite\\PresAbsFactorCVEvaluation.csv",
"I:\\VisTrails\\VisTrails_SAHM_x32_debug\\VisTrails\\vistrails\\packages\\TestingRCode2\\TestSuite\\PresAbsNonSpatial.csv",
"I:\\VisTrails\\VisTrails_SAHM_x32_debug\\VisTrails\\vistrails\\packages\\TestingRCode2\\TestSuite\\PresAbsNoSplit.csv",
"I:\\VisTrails\\VisTrails_SAHM_x32_debug\\VisTrails\\vistrails\\packages\\TestingRCode2\\TestSuite\\PresAbsSelectionEval.csv",
"I:\\VisTrails\\VisTrails_SAHM_x32_debug\\VisTrails\\vistrails\\packages\\TestingRCode2\\TestSuite\\UsedAvailableSp1CV.csv",
"I:\\VisTrails\\VisTrails_SAHM_x32_debug\\VisTrails\\vistrails\\packages\\TestingRCode2\\TestSuite\\UsedAvailableSp1NoCV.csv")
predictor<-c("NDVI_annualMinimumValue_2005","NDVI_browndownrates1_2009","romoveg_rc_categorical","Temperature","Noise2Rast","NDVI_amplitudes1_2006","ppt_1971_2000_06_800m",
"NDVI_annualMeanValue_2006","NDVI_greenuprates1_2003")
responseCol<-c(rep("responseBinary",times=6),rep("responseCount",times=1))
#I'm cutting these out of the standard test suite because they take a long time to run
#and only test whether we run well on large datasets or big tiffs
#"C:/VisTrails/mtalbert_20110504T132851/readMaTests/CanadaThistleNewFormat.csv"
#"C:/VisTrails/mtalbert_20110504T132851/readMaTests/LargeSplit.csv"
#add a missing data csv and maybe a couple with pseudo absence
output.dir<-vector()
output.dir[1]<-paste(dir.path,"\\rf",sep="")
output.dir[2]<-paste(dir.path,"\\brt",sep="")
output.dir[3]<-paste(dir.path,"\\mars",sep="")
output.dir[4]<-paste(dir.path,"\\glm",sep="")
output.dir[5]<-paste(dir.path,"\\maxlike",sep="")
######## Model Fit Test ###########
##BRT
for(i in 1:length(input.file)){
try(FitModels(ma.name=input.file[i],
tif.dir=NULL,output.dir=output.dir[2],
response.col=rc[i],make.p.tif=T,make.binary.tif=F,n.folds=3,simp.method="cross-validation",tc=NULL,alpha=1,
family = "bernoulli",max.trees = 10000,tolerance.method = "auto",
tolerance = 0.001,seed=1,opt.methods=2,
simp.method="cross-validation",debug.mode=T,responseCurveForm="pdf",script.name="brt",
learning.rate =NULL, bag.fraction = 0.5,prev.stratify = TRUE, max.trees = NULL,opt.methods=2,MESS=F))
try(rm(out),silent=TRUE)
}
##MARS
for(i in 1:length(input.file)){
try(FitModels(ma.name=input.file[i],
tif.dir=NULL,output.dir=output.dir[3],
response.col=rc[i],make.p.tif=T,make.binary.tif=T,
mars.degree=1,mars.penalty=2,debug.mode=T,responseCurveForm="pdf",script.name="mars",opt.methods=2,MESS=TRUE))
}
##GLM
for(i in 1:length(input.file)){
try(FitModels(ma.name=input.file[i],
tif.dir=NULL,
output.dir=output.dir[4],
response.col=rc[i],make.p.tif=T,make.binary.tif=F,
simp.method="AIC",debug.mode=T,responseCurveForm="pdf",script.name="glm",MESS=FALSE,opt.methods=2,squared.terms=TRUE))
}
### Random Forest
for(i in 1:length(input.file)){
proximity=NULL
try(FitModels(ma.name=input.file[i],
tif.dir=NULL,
output.dir=output.dir[1],
response.col=rc[i],make.p.tif=T,make.binary.tif=F,
debug.mode=T,opt.methods=2,script.name="rf",
responseCurveForm="pdf",xtest=NULL,ytest=NULL,n.trees=1000,mtry=NULL,
samp.replace=FALSE,sampsize=NULL,nodesize=NULL,maxnodes=NULL,importance=FALSE,
localImp=FALSE,nPerm=1,proximity=NULL,oob.prox=proximity,norm.votes=TRUE,
do.trace=FALSE,keep.forest=NULL,keep.inbag=FALSE,MESS=F,seed=1))
}
### Maxlike
Formula="~bio_06_2000_2km + bio_14_2000_4km + NDVI_annualMaximumValue_2009 + NDVI_greenuprates1_2003 + NDVI_peakdates1_2003"
for(i in 1:2){
try(FitModels(ma.name=input.file[i],
tif.dir=NULL,
output.dir=output.dir[5],
response.col=rc[i],
make.p.tif=T,make.binary.tif=T,
debug.mode=T,responseCurveForm="pdf",script.name="maxlike",
opt.methods=2,MESS=T,Formula=Formula,UseTiffs=FALSE))
}
### Pairs Explore Tests #####
source("PairsExplore.r")
source("PairsExploreHelperFcts.r")
source("read.dat.r")
source("chk.libs.r")
source("read.dat.r")
source("my.panel.smooth.binary.r")
source("Predictor.inspection.r")
for(i in 1:length(predictor)){
if(i==1) {
try(Pairs.Explore(num.plots=5,
min.cor=.5,
input.file=input.file[i],
output.file=paste(dir.path,"\\",i,"Par1",".jpg",sep=""),
response.col=rc[i],
pres=TRUE,
absn=TRUE,
bgd=TRUE))
try(Pairs.Explore(num.plots=10,
min.cor=.5,
input.file=input.file[i],
output.file=paste(dir.path,"\\",i,"Par2",".jpg",sep=""),
response.col=rc[i],
pres=TRUE,
absn=FALSE,
bgd=FALSE,
cors.w.highest=TRUE))
try(Predictor.inspection(predictor[i],
input.file=input.file[i],
output.dir=dir.path,
response.col=rc[i],
pres=TRUE,
absn=TRUE,
bgd=TRUE))
}
try(Pairs.Explore(num.plots=15,
min.cor=min.cor,
input.file=input.file[i],
output.file=paste(dir.path,"\\",i,".jpg",sep=""),
response.col=rc[i],
pres=TRUE,
absn=TRUE,
bgd=TRUE))
try(Predictor.inspection(predictor[i],
input.file[i],
output.dir=paste(dir.path,"\\",sep=""),
response.col=rc[i],
pres=TRUE,
absn=TRUE,
bgd=TRUE))
}
input.file<-"I:\\VisTrails\\VisTrails_SAHM_x32_debug\\VisTrails\\vistrails\\packages\\TestingRCode2\\TestSuite\\PairsExploreManyPredictors.csv"
for (i in 1:25){
try(Pairs.Explore(num.plots=i,
min.cor=.5,
input.file=input.file,
output.file=paste(dir.path,"\\",i,"NumPlotsTest",".jpg",sep=""),
response.col=rc[4],
pres=TRUE,
absn=TRUE,
bgd=TRUE
))
}
### Apply Model Test
input.workspace=list(
for(i in 1:length(input.workspace){
EvaluateNewData(workspace=paste(output.dir,"modelWorkspace",sep="\\"),out.dir=output.dir,b.tif=TRUE,p.tif=TRUE,mess=TRUE,new.tifs="I:\\VisTrails\\WorkingFiles\\workspace\\_applyModel\\Error\\MergedDataset_10.csv",produce.metrics=TRUE)
}
### Data Splitting Tests
|
6636be9f1d884f078b7ae0b47b8670194a321dc2
|
66137fe9c871c88aa344d2b61696b57822a519a6
|
/run_analysis.R
|
61d7588cd4f73131ce5c004b81d0901f2169abe3
|
[] |
no_license
|
ilabiga/Getting_and_Cleaning_Data_Course_Project
|
d392fd93ec21976007901b130f6590910214149f
|
707538d8ef71c8fe4beaf02f900428b5112412ad
|
refs/heads/master
| 2020-05-29T14:39:40.565184
| 2016-08-04T13:23:53
| 2016-08-04T13:23:53
| 64,925,190
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,017
|
r
|
run_analysis.R
|
## load the package dplyr
library(dplyr)
## before running this script please save it into the directory where the data folder UCI HAR Dataset has been saved
## now set the R working directory equal to that directory with the setwd(dir) comamnd
## READ DATA
## read 'features.txt' and 'activity_labels.txt'
## inside the directory 'UCI HAR Dataset':
features <- read.table("UCI HAR Dataset/features.txt")
activityLabels <- read.table("UCI HAR Dataset/activity_labels.txt")
## read 'subject_train.txt', 'y_train.txt' and 'X_train.txt'
## inside the directory 'UCI HAR Dataset/train':
subjectTrain <- read.table("UCI HAR Dataset/train/subject_train.txt")
yTrain <- read.table("UCI HAR Dataset/train/y_train.txt")
xTrain <- read.table("UCI HAR Dataset/train/X_train.txt")
## read 'subject_test.txt', 'y_test.txt' and 'X_test.txt'
## inside the directory 'UCI HAR Dataset/test':
subjectTest <- read.table("UCI HAR Dataset/test/subject_test.txt")
yTest <- read.table("UCI HAR Dataset/test/y_test.txt")
xTest <- read.table("UCI HAR Dataset/test/X_test.txt")
## DATA PREPARATION
## append subjectTest after subjectTrain:
subject <- rbind(subjectTrain,subjectTest)
## left join between yTrain/yTest and activityLabels; the corresponding
## activity name is associated to each experiment (= each row):
yTrainActivity <- left_join(yTrain,activityLabels)
yTestActivity <- left_join(yTest,activityLabels)
## append yTestActivity after yTrainActivity:
activity <- rbind(yTrainActivity,yTestActivity)
## 1) MERGE THE TRAINING AND TEST SETS TO CREATE ONE DATA SET
## append xTest after xTrain and use
## descriptive feature names as column names
xDataSet <- rbind(xTrain,xTest)
colnames(xDataSet) <- features$V2
## 2) EXTRACTS ONLY THE MEASUREMENTS ON THE MEAN AND STANDARD DEVIATION
## search for the text "meaN()" or "std()" inside the feature names:
mean <- grepl("mean()",features$V2)
std <- grepl("std()",features$V2)
extraction <- mean | std
## column extraction:
xDataSetExtract <- xDataSet[,extraction]
## 3) USES DESCRIPTIVE ACTIVITY NAMES THE ACTIVITIES IN THE DATA SET
## add columns containing the subject ID and the activity name
## assicuated ti each experiment:
DataAll <- cbind(subject, activity[,2],xDataSetExtract)
## 4) APPROPIATELY LABELS THE DATA SET WITH DESCRIPTIVE VARIABLE NAMES
## adjust column names:
colnames(DataAll) <- c("ID_Subject","Activity",colnames(xDataSetExtract))
## 5) CREATE A FINAL TIDY DATA SET WITH THE AVERAGE OF EACH VARIABLE
## FOR EACH ACTIVITY AND EACH SUBJECT
## aggregation on activity and subject and average calculation:
DataFinal <-aggregate(DataAll[,3:81], by=list(DataAll[,1],DataAll[,2]),FUN=mean, na.rm=TRUE)
## adjust column names:
colnames(DataFinal) <- c("Subject","Activity",colnames(xDataSetExtract))
## Export the final data set:
write.table(DataFinal, "C:/Users/UL16971/Downloads/Dati-R/Corso-Getting-Cleaning-Data/Project/UCI HAR Dataset/OutputData.txt", sep="\t", row.name=FALSE)
|
d8403d5f6f8f82a43982ec6c5b49adb715b35893
|
4c8b6df0fa874f9fa9f8c20e4598a30c9b990011
|
/R/master.r
|
4aa5c1ddd18c41ef6d15b3dfd00ac11b35f8af96
|
[
"Apache-2.0"
] |
permissive
|
statquant/clustermq
|
071cb42e29ba29ec7f0025d8e446d6e480126a6f
|
ee71d865f4776ec1b0e76021820fdc04f273169f
|
refs/heads/master
| 2023-07-03T02:24:15.136141
| 2021-06-24T08:48:32
| 2021-06-24T08:48:32
| 273,743,962
| 0
| 0
|
Apache-2.0
| 2020-06-20T16:26:49
| 2020-06-20T16:26:48
| null |
UTF-8
|
R
| false
| false
| 4,796
|
r
|
master.r
|
#' Master controlling the workers
#'
#' exchanging messages between the master and workers works the following way:
#' * we have submitted a job where we don't know when it will start up
#' * it starts, sends is a message list(id=0) indicating it is ready
#' * we send it the function definition and common data
#' * we also send it the first data set to work on
#' * when we get any id > 0, it is a result that we store
#' * and send the next data set/index to work on
#' * when computatons are complete, we send id=0 to the worker
#' * it responds with id=-1 (and usage stats) and shuts down
#'
#' @param qsys Instance of QSys object
#' @param iter Objects to be iterated in each function call
#' @param rettype Return type of function
#' @param fail_on_error If an error occurs on the workers, continue or fail?
#' @param chunk_size Number of function calls to chunk together
#' defaults to 100 chunks per worker or max. 500 kb per chunk
#' @param timeout Maximum time in seconds to wait for worker (default: Inf)
#' @param max_calls_worker Maxmimum number of function calls that will be sent to one worker
#' @param verbose Print progress messages
#' @return A list of whatever `fun` returned
#' @keywords internal
master = function(qsys, iter, rettype="list", fail_on_error=TRUE,
chunk_size=NA, timeout=Inf, max_calls_worker=Inf, verbose=TRUE) {
# prepare empty variables for managing results
n_calls = nrow(iter)
job_result = rep(vec_lookup[[rettype]], n_calls)
submit_index = 1:chunk_size
jobs_running = 0
cond_msgs = list()
n_errors = 0
n_warnings = 0
shutdown = FALSE
kill_workers = FALSE
on.exit(qsys$finalize())
if (verbose) {
message("Running ", format(n_calls, big.mark=",", scientific=FALSE),
" calculations (", qsys$data_num, " objs/",
format(qsys$data_size, big.mark=",", units="Mb"),
" common; ", chunk_size, " calls/chunk) ...")
pb = progress::progress_bar$new(total = n_calls,
format = "[:bar] :percent (:wup/:wtot wrk) eta: :eta")
pb$tick(0, tokens=list(wtot=qsys$workers, wup=qsys$workers_running))
}
# main event loop
while((!shutdown && submit_index[1] <= n_calls) || jobs_running > 0) {
msg = qsys$receive_data(timeout=timeout)
if (is.null(msg)) { # timeout reached
if (shutdown) {
kill_workers = TRUE
break
} else
stop("Socket timeout reached, likely due to a worker crash")
}
if (verbose)
pb$tick(length(msg$result),
tokens=list(wtot=qsys$workers, wup=qsys$workers_running))
# process the result data if we got some
if (!is.null(msg$result)) {
call_id = names(msg$result)
jobs_running = jobs_running - length(call_id)
job_result[as.integer(call_id)] = msg$result
n_warnings = n_warnings + length(msg$warnings)
n_errors = n_errors + length(msg$errors)
if (n_errors > 0 && fail_on_error == TRUE) {
shutdown = TRUE
timeout = getOption("clustermq.error.timeout", min(timeout, 30))
}
new_msgs = c(msg$errors, msg$warnings)
if (length(new_msgs) > 0 && length(cond_msgs) < 50)
cond_msgs = c(cond_msgs, new_msgs[order(names(new_msgs))])
}
if (shutdown || (!is.null(msg$n_calls) && msg$n_calls >= max_calls_worker)) {
qsys$send_shutdown_worker()
next
}
if (msg$token != qsys$data_token) {
qsys$send_common_data()
} else if (submit_index[1] <= n_calls) {
# if we have work, send it to the worker
submit_index = submit_index[submit_index <= n_calls]
qsys$send_job_data(chunk = chunk(iter, submit_index))
jobs_running = jobs_running + length(submit_index)
submit_index = submit_index + chunk_size
# adapt chunk size towards end of processing
cs = ceiling((n_calls - submit_index[1]) / qsys$workers_running)
if (cs < chunk_size) {
chunk_size = max(cs, 1)
submit_index = submit_index[1:chunk_size]
}
} else if (qsys$reusable) {
qsys$send_wait()
} else { # or else shut it down
qsys$send_shutdown_worker()
}
}
if (!kill_workers && (qsys$reusable || qsys$cleanup(quiet=!verbose)))
on.exit(NULL)
summarize_result(job_result, n_errors, n_warnings, cond_msgs,
min(submit_index)-1, fail_on_error)
}
|
668fd9cb8d9aeec98314b140d16edd2a4df0f28d
|
715c1eee20f2ce63b755f5fe32f3734778432c9c
|
/01_48_spineplot.R
|
6d386cf7bbd7640bc545c949001e18cacb3dbcdd
|
[] |
no_license
|
nikhiljohnbejoy/Rconcepts
|
47dd030d10abf027338a657e7c97826cc0677798
|
321693fb423d118593b9cce0266ca52e1dcd02fd
|
refs/heads/master
| 2022-12-01T02:28:16.322446
| 2020-07-22T03:52:05
| 2020-07-22T03:52:05
| 281,564,317
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 686
|
r
|
01_48_spineplot.R
|
# main idea: creating spineplots
# width of bars = frequency of X
# height of bars = frequency of y
# Y must be a factor and is the dependent variable
spineplot(ChickWeight$weight, ChickWeight$Diet) # spineplot(x,y)
# interesting observations
# Height of bars indicates obs per diet. Diet 1 has more obs
# Width of bars indicates obs per weight. More chicks are weighed between 50 and 100
# or...
spineplot(Diet ~ weight, data = ChickWeight) # spineplot(y ~ x)
# bells and whistles
spineplot(Diet ~ weight,
data = ChickWeight,
breaks = fivenum(ChickWeight$weight),
col = c(5:8),
xlab = "Chicken Weight",
ylab = "Chicken Diet")
|
3c2ebb9272d9b0a02765e0f3eb2b516d850e7682
|
b2d96a8590e7895501a21c52c9036fee9bde1c41
|
/Artificial Intelligence/Next Word Auto Completion/task6_Next_Word.R
|
717380a8febeb3cf25c5e7bcea5d8e2eed34da62
|
[] |
no_license
|
xchromosome219/Portfolio
|
37264581e50e5e40fcd0c61c17c10ef02b6a4340
|
f31a5f4ee835a8e59affe5dba492f0e5a8ecfa96
|
refs/heads/master
| 2020-03-08T14:00:20.812769
| 2016-12-18T11:51:14
| 2016-12-18T11:51:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,401
|
r
|
task6_Next_Word.R
|
library(data.table)
path<-"./data"
bigram <- readRDS(paste0(path,"/smle.n2.join.rds",sep=""))
trigram <- readRDS(paste0(path,"/smle.n3.join.rds",sep=""))
quadrigram <- readRDS(paste0(path,"/smle.n4.join.rds",sep=""))
wordBank <- readRDS(paste0(path,"/wordBank.rds",sep=""))
wordBank$word <- as.character(wordBank$word)
splitString <- function(text) {
text <- gsub("[^A-Za-z\']", "", text)
text <- tolower(text)
text <- text[text != ""]
if (length(text) == 3) {
gram <- 3
string3 <- paste0(tail(text, 3), collapse = " ")
string2 <- paste0(tail(text, 2), collapse = " ")
string1 <- paste0(tail(text, 1), collapse = " ")
} else if (length(text) == 2){
gram <- 2
string3 <- NA
string2 <- paste0(tail(text, 2), collapse = " ")
string1 <- paste0(tail(text, 1), collapse = " ")
} else {
gram <- 1
string3 <- NA
string2 <- NA
string1 <- paste0(tail(text, 1), collapse = " ")
}
return(c(gram, string1, string2, string3))
}
Backoff.1 <- function(string1, idx = 0) {
print("GRAM1")
result <- c()
defult <- c("the", "to", "and", "a", "of")
if (length(grep(string1, bigram$input)) != 0) { # string is in the DB
print("GRAM1-1")
if(idx != 0) { # called from Backedoff.3, return single value only
outputX <- paste0("output", idx)
outputX <- bigram[input == string1 & rank == idx, ]$output
outputX <- ifelse(length(outputX) == 0, defult[idx], outputX)
return(outputX)
}
for (i in seq(1, 4)) { # obtain top 5 choices
outputX <- paste0("output", i)
outputX <- bigram[input == string1 & rank == i, ]$output
outputX <- ifelse(length(outputX) == 0, defult[i], outputX)
result <- c(result, outputX)
}
} else if (idx != 0) { # getting only one missing value from backoff.1
print("GRAM1-2")
return(defult[idx])
} else {
print("GRAM1-3")
return(defult)
}
return(result)
}
Backoff.2 <- function(string1, string2, idx = 0) {
print("GRAM2")
result <- c()
if (length(grep(string2, trigram$input)) != 0) { # string is in the DB
print("GRAM2-1")
print(idx)
if(idx != 0) { # called from Backedoff.3, return single value only
outputX <- paste0("output", idx)
outputX <- trigram[input == string2 & rank == idx, ]$output
outputX <- ifelse(length(outputX) == 0,
Backoff.1(string1, idx=idx), outputX)
return(outputX)
}
for (i in seq(1, 4)) { # obtain top 5 choices
outputX <- paste0("output", i)
outputX <- trigram[input == string2 & rank == i, ]$output
outputX <- ifelse(length(outputX) == 0,
Backoff.1(string1, idx=i), outputX)
result <- c(result, outputX)
}
} else if (idx != 0) { # getting only one missing value from backoff.1
print("GRAM2-2")
return(Backoff.1(string1, idx))
} else { # string isn't in the DB, move to backoff.1
print("GRAM2-3")
return(Backoff.1(string1, 0))
}
return(result)
}
Backoff.3 <- function(string1, string2, string3) {
print("GRAM3")
result <- c()
if (length(grep(string3, quadrigram$input)) != 0) { # string is in the DB
for (i in seq(1, 4)) { # obtain top 5 choices
print("GRAM3-2")
outputX <- paste0("output", i)
outputX <- quadrigram[input == string3 & rank == i, ]$output
outputX <- ifelse(length(outputX) == 0,
Backoff.2(string1, string2, idx=i), outputX)
result <- c(result, outputX)
}
} else # string isn't in the DB, move to Backoff.2
return(Backoff.2(string1, string2, 0))
return(result)
}
predictWord <- function(text) {
text <- splitString(text)
if (text[1] == 3){
t<-Backoff.3(text[2], text[3], text[4])
print(class(t))
return(t)
} else if (text[1] == 2) {
return(Backoff.2(text[2], text[3]))
} else
return(Backoff.1(text[2]))
}
predictWord2 <- function(text) {
text <- gsub("[^A-Za-z\']", "", text)
text <- tolower(text)
index <- grepl(paste0("^", text), wordBank$word)
result <- wordBank[index, ][1:4]
return(result)
}
text <- tail(unlist(strsplit("asd asd sad sad adsade2", split = ' ')), 3)
predictWord(text)
text <- tail(unlist(strsplit("i would like b", split = ' ')), 3)
predictWord(text)
text <- tail(unlist(strsplit("i would like to get the fir", split = ' ')), 3)
predictWord(text)
text <- tail(unlist(strsplit("i", split = ' ')), 1)
predictWord2(text)
text <- tail(unlist(strsplit("i would", split = ' ')), 1)
predictWord2(text)
text <- tail(unlist(strsplit("i would like", split = ' ')), 1)
predictWord2(text)
text <- tail(unlist(strsplit("i would like to g", split = ' ')), 1)
predictWord2(text)
text <- tail(unlist(strsplit("i would like to get the fir", split = ' ')), 1)
predictWord2(text)
|
83957e24e0aa9e18bbedf53dcdd8563e15cd2856
|
da13d72ff7ef77cfa0cf378a86dac4f6d7fa2f94
|
/plot1.R
|
d6ad585e48d3b650bb300c221a1f430442c7a4e3
|
[] |
no_license
|
Venks88/RepData_PeerAssessment1
|
72c9441d4a349f25777b34ed58569de897ff2695
|
d20fd189ebadb8c394e09ea9eaed383bccaeaad1
|
refs/heads/master
| 2021-01-20T23:47:46.132843
| 2015-03-15T17:12:30
| 2015-03-15T17:12:30
| 31,823,055
| 0
| 0
| null | 2015-03-07T18:42:17
| 2015-03-07T18:42:16
| null |
UTF-8
|
R
| false
| false
| 145
|
r
|
plot1.R
|
##setting the working directory
setwd("C:/Users/Venkata/Downloads/repdata_data_activity")
library(knitr)
knit2html(input = "PA1_template.Rmd")
|
8a81e05532c55ab42f75ae769bc70c3bd8556fc8
|
7a01ec83c1f85b1681d9e74104978406ecd009af
|
/Simon/lab4_assignment1/lab4-1.R
|
6837af918ed463b6a9a857c617e8c33a9c0e2dc5
|
[] |
no_license
|
adamnyberg/machine-learning
|
971509bde9a44024a4196eaa6ee4c31497ee109f
|
1c98d1c5799286529b2fa39f6bf120db4fda4fc9
|
refs/heads/master
| 2021-06-25T01:49:30.609443
| 2017-09-09T21:17:08
| 2017-09-09T21:17:08
| 72,445,761
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,282
|
r
|
lab4-1.R
|
setwd("/Users/Simon/Documents/TDDE01/tdde01/Simon/lab4_assignment1")
library(readxl)
library(tree)
library(boot)
set.seed(12345)
data=read_excel("xls_state.xls")
#1.1
reorder_data = data[with(data, order(data$MET)), ]
plot(reorder_data$MET,reorder_data$EX, ylab = "EX", xlab = "MET", main="1.1 EX vs MET")
#---->regression tree model
#1.2
MET = reorder_data$MET # make output look better
model = tree(reorder_data$EX~MET, data=reorder_data,
control = tree.control(minsize = 8, nobs = nrow(reorder_data)))
model.cv <- cv.tree(object = model)
#plot(model.cv$size,model.cv$dev,'b') #Best (lowest deviance) value is 3. See plot.
model.pruned <- prune.tree(model,best=3)
plot(model.pruned)
text(model.pruned, cex=.75)
pred_data = predict(model.pruned,newdata = reorder_data)
plot(reorder_data$MET,reorder_data$EX, ylab = "EX", xlab = "MET", main="1.2 Original and fitted data")
points(reorder_data$MET, pred_data, col="blue")
legend("top", lty=c(1,1), col=c("black","blue"), legend = c("Original", "Fitted"))
residuals = reorder_data$EX-pred_data
hist(residuals)
#1.3
# computing bootstrap samples
bootstrap=function(data, ind){
boot_data=data[ind,]# extract bootstrap sample
res=tree(EX~MET, data=boot_data,
control = tree.control(minsize = 8, nobs = nrow(reorder_data)))
#predict values for all Area values from the original
model.pruned = prune.tree(res,best=3)
EX_pred=predict(model.pruned,newdata=reorder_data)
return(EX_pred)
}
res=boot(reorder_data, bootstrap, R=1000) #make bootstrap
e = envelope(res, level=0.95)
plot(reorder_data$MET, reorder_data$EX, pch=21, bg="orange", main="Non-parametric bootstrap", xlab="MET", ylab="EX")
legend("top", lty=c(1,1,1), col=c("orange","black", "blue"), legend = c("Data", "Model", "Confidence bands"))
points(reorder_data$MET,pred_data,type="l") #plot fitted line
#plot cofidence bands
points(reorder_data$MET,e$point[2,], type="l", col="blue")
points(reorder_data$MET,e$point[1,], type="l", col="blue")
#bumpy, tries to fit every data point
#reliable, inside the confidence bands
#1.4
mle=tree(reorder_data$EX~reorder_data$MET, data=reorder_data,
control = tree.control(minsize = 8, nobs = nrow(reorder_data)))
mle.pruned=prune.tree(mle,best=3)
rng=function(data, mle) {
data1=data.frame(EX=reorder_data$EX,
MET=reorder_data$MET)
n=length(data$EX)
print(data1)
#generate new EX
p = predict(mle,newdata = data1)
residuals = data1$EX-p
data1$EX=rnorm(n,p,sd(residuals))
return(data1)
}
f1=function(data1){
#print(data1)
res=tree(EX~MET, data=data1,
control = tree.control(minsize = 8, nobs = nrow(reorder_data)))
#predict values for all Area values from the original data
model.pruned = prune.tree(res,best=3)
EX_pred=predict(model.pruned,newdata=reorder_data)
return(EX_pred)
}
res_parametric=boot(reorder_data, statistic=f1, R=1000,
mle=mle.pruned,ran.gen=rng, sim="parametric")
e_parametric = envelope(res_parametric, level=0.95)
plot(reorder_data$MET, reorder_data$EX, pch=21, bg="orange", main="Bootstrap comparison", xlab = "MET", ylab = "EX",ylim=c(100,600))
legend("top", lty=c(1,1), col=c("pink", "black", "blue","red"), legend = c("Prediction bands", "Predictions", "Non-parametric confidence bands", "Parametric confidence bands"))
points(reorder_data$MET,pred_data,type="l") #plot fitted line
#plot cofidence bands
points(reorder_data$MET,e_parametric$point[2,], type="l", col="red")
points(reorder_data$MET,e_parametric$point[1,], type="l", col="red")
points(reorder_data$MET,e$point[2,], type="l", col="blue")
points(reorder_data$MET,e$point[1,], type="l", col="blue")
#Prediction boundary
bootstrap.prediction <- function(data)
{
model = tree(EX ~ MET, data=data, control = tree.control(nobs = nrow(data), minsize = 8))
model.pruned = prune.tree(model,best=3)
EX_pred = predict(model.pruned,data)
n = length(data$EX)
ndata = rnorm(n , EX_pred,sd(resid(model.pruned)))
return(ndata)
}
bootstrap3 = boot(reorder_data, bootstrap.prediction,R=1500, mle=mle.pruned, ran.gen=rng, sim="parametric")
e_pred_bands = envelope(bootstrap3,level=0.95)
points(reorder_data$MET, e_pred_bands$point[2,], type="l", col="pink")
points(reorder_data$MET, e_pred_bands$point[1,], type="l", col="pink")
|
8e9fc0b2ec0ab44f2a6273d07a1a2368b1721074
|
c0eb015d406f6eac1c573c1224cb754d5e9b7721
|
/inst/julia/test-julia.R
|
41e8df21885eec4fa8bbfaaa485760e7fca99f23
|
[] |
no_license
|
cran/jack
|
a115fc5b4653f07eeb93c4c25018d9c13eba3caa
|
ae5a01692fedee7a144f477e9f7aaf05d797a38d
|
refs/heads/master
| 2023-07-11T02:04:12.732248
| 2023-07-04T10:30:07
| 2023-07-04T10:30:07
| 208,781,647
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,207
|
r
|
test-julia.R
|
test_that("Julia", {
skip_if_not(JuliaConnectoR::juliaSetupOk(), "Julia setup is not OK")
julia <- Jack_julia()
# numerical ####
x <- c("1/2", "2/3", "5")
xq <- gmp::as.bigq(x)
lambda <- c(2, 1, 1)
# jack
alpha <- "2/3"
alphaq <- gmp::as.bigq(alpha)
expect_equal(
julia$Jack(x, lambda, alpha), Jack(xq, lambda, alphaq)
)
# zonal
expect_equal(
julia$Zonal(x, lambda), Zonal(xq, lambda)
)
# zonalQ
expect_equal(
julia$ZonalQ(x, lambda), ZonalQ(xq, lambda)
)
# Schur
expect_equal(
julia$Schur(x, lambda), Schur(xq, lambda)
)
# polynomials ####
n <- 3
lambda <- c(3, 2)
# jack
alpha <- "2/3"
alphaq <- gmp::as.bigq(alpha)
mvpol_julia <- julia$JackPol(n, lambda, alpha, poly = "mvp")
gmpol_julia <- julia$JackPol(n, lambda, alpha, poly = "qspray")
gmpol_r <- JackPol(n, lambda, alphaq)
#expect_true(mvpEqual(mvpol_julia, gmpoly::gmpoly2mvp(gmpol_julia)))
expect_true(gmpol_r == gmpol_julia)
# zonal
mvpol_julia <- julia$ZonalPol(n, lambda, poly = "mvp")
gmpol_julia <- julia$ZonalPol(n, lambda, poly = "qspray")
gmpol_r <- ZonalPol(n, lambda)
#expect_true(mvpEqual(mvpol_julia, gmpoly::gmpoly2mvp(gmpol_julia)))
expect_true(gmpol_r == gmpol_julia)
# zonalq
mvpol_julia <- julia$ZonalQPol(n, lambda, poly = "mvp")
gmpol_julia <- julia$ZonalQPol(n, lambda, poly = "qspray")
gmpol_r <- ZonalQPol(n, lambda)
#expect_true(mvpEqual(mvpol_julia, gmpoly::gmpoly2mvp(gmpol_julia)))
expect_true(gmpol_r == gmpol_julia)
# schur
mvpol_julia <- julia$SchurPol(n, lambda, poly = "mvp")
gmpol_julia <- julia$SchurPol(n, lambda, poly = "qspray")
gmpol_r <- SchurPol(n, lambda)
#expect_true(mvpEqual(mvpol_julia, gmpoly::gmpoly2mvp(gmpol_julia)))
expect_true(gmpol_r == gmpol_julia)
# as.function
mvpol <- julia$JackPol(m = 2, lambda = c(3, 1), alpha = "2/5", poly = "mvp")
gmpol <- julia$JackPol(m = 2, lambda = c(3, 1), alpha = "2/5", poly = "qspray")
f <- as.function(mvpol)
y1 <- f("2/3", "7/5")
y2 <- as.character(qspray::evalQspray(gmpol, c("2/3", "7/5")))
expect_equal(y1, y2)
#
JuliaConnectoR::stopJulia()
})
|
8d490818252b7626451106c17fea78f6fa4160f4
|
520b7ee4b967adab4aeb39f5a948889b13f5518d
|
/man/levelfun.Rd
|
ca6056cbb5793fc44807ae95cc716eff99d5628c
|
[] |
no_license
|
jknowles/merTools
|
d0e178a03b4da0af8ce133cf0c3b45d50741b8b6
|
178248e8ebf5eacb44c8a7e75951c65071678449
|
refs/heads/master
| 2023-04-07T11:31:45.834722
| 2023-03-20T16:44:14
| 2023-03-20T16:44:14
| 34,033,193
| 111
| 25
| null | 2023-03-16T20:52:29
| 2015-04-16T03:53:06
|
R
|
UTF-8
|
R
| false
| true
| 270
|
rd
|
levelfun.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/helpers.R
\name{levelfun}
\alias{levelfun}
\title{Parse merMod levels}
\usage{
levelfun(x, nl.n, allow.new.levels = FALSE)
}
\description{
Parse merMod levels
}
\keyword{internal}
|
fe1cd5422303a759aa24cb39b47e78914292a1bb
|
7cdc6e44005e8a8f8bc5ca3eaad97ada7839e006
|
/man/func6.Rd
|
0a420ad5c089e89810eb3ff6a0e11a1274ceb1c6
|
[] |
no_license
|
benjnguyen/NguyenTools
|
6e549edf2c9ec4f63e11f1ca616b1532a7a2a426
|
474cefe873c7a110a429e96ba5e6a2403e26acab
|
refs/heads/master
| 2021-01-25T13:47:56.989724
| 2018-03-02T22:52:06
| 2018-03-02T22:52:06
| 123,615,905
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 335
|
rd
|
func6.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/NguyenToolsRfunctions.R
\name{func6}
\alias{func6}
\title{Highlevel check function}
\usage{
func6(x)
}
\arguments{
\item{x}{object}
}
\value{
object
}
\description{
Checks and throws error if not numeric, finit, zero lenth, NA, NAN
}
\examples{
func6(NA)
}
|
744022589fc6aa8a11df50abb58c7365ad5ad30a
|
8097bd6207ce3ed587b16b0ea7bb4d3e13394a1f
|
/Tidy R Run Analysis Script.R
|
2467b49292a5f53dd5eab08fdd0cc1212ad22bd6
|
[] |
no_license
|
rkvidyar/Human-Activity-Recognition-Using-Smartphones-Dataset---Tidy-Data-Project
|
310ad1484db8508aad30127c4d42d521111c476b
|
ea42edad5683df131581eebb302deb547ae715c8
|
refs/heads/main
| 2023-08-31T15:40:10.476431
| 2021-10-04T21:32:09
| 2021-10-04T21:32:09
| 413,550,663
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,107
|
r
|
Tidy R Run Analysis Script.R
|
X_test <- read.table("X_test.txt", header=FALSE, fill=TRUE)
y_test <- read.table("y_test.txt", header=FALSE)
subject_test <- read.table("subject_test.txt", header=FALSE)
subject_train <- read.table("subject_train.txt", header=FALSE)
X_train <- read.table("X_train.txt", header=FALSE, fill=TRUE)
y_train <- read.table("y_train.txt", header=FALSE)
features <- read.table("features.txt", header=FALSE, stringsAsFactors=FALSE)
activity <- read.table("activity_labels.txt", header=FALSE, stringsAsFactors=FALSE)
X_bind <- rbind(X_test, X_train)
y_bind <- rbind(y_test, y_train)
subject_bind <- rbind(subject_test, subject_train)
avector <- as.vector(features[ , 2])
avector2 <- make.names(avector, unique = TRUE)
colnames(X_bind) <- avector2
selectVector <- grep("mean|std", names(X_bind), ignore.case=FALSE)
X_bind1 <- X_bind[ , selectVector]
selectVectorF <- !grepl("meanFreq", names(X_bind1), ignore.case=FALSE)
table(selectVectorF)
X_bind2 <- X_bind1[ , selectVectorF]
pre_tidy1 <- cbind(y_bind, X_bind2)
pre_tidy2 <- cbind(subject_bind, pre_tidy1)
merged_Data = merge(pre_tidy2, activity, by.x=2, by.y=1, all=TRUE)
pre_tidy3 <- merged_Data[ , c(2, 69, 3:68)]
colnames(pre_tidy3)[1] <- "subject.id"
colnames(pre_tidy3)[2] <- "activity.label"
colnames(pre_tidy3) <- tolower(names(pre_tidy3))
freq <- sub("^f+", "freq.", names(pre_tidy3))
time <- sub("^t+", "time.", freq)
body <- sub("body+", "body.", time)
body2 <- sub("body.body+", "body.", body)
gravity <- sub("gravity+", "gravity.", body2)
jerk <- sub("jerk+", ".jerk", gravity)
mag <- sub("mag+", ".mag", jerk)
periods <- sub("\\.\\.", "", mag)
colnames(pre_tidy3) <- periods
tidy1 <- pre_tidy3
install.packages("dplyr")
library(dplyr)
tidy1 <- tbl_df(tidy1)
tidy2 <- group_by(tidy1, subject.id, activity.label) %>% summarise_each(funs(mean), "time.body.acc.mean.x" : "freq.body.gyro.jerk.mag.std")
write.table(tidy2, file="tidy2RM.txt", row.names=FALSE, col.names=TRUE, sep=" ", quote=FALSE)
tidy_data <- read.table("tidy2RM.txt", header = TRUE)
View(tidy_data)
|
334496dfc95dfb805507b2d215ea1e14cceb3768
|
51b8a66b8bc4a4bd63ddb0fda9f320136a2aba12
|
/r/src/na_interp.r
|
7cc84d885ed0dcd3026e3c164b1be38fb1ad2c95
|
[] |
no_license
|
uataq/stilt
|
2fe80bfe323919ae9d8f8e02f78aaeea93719a95
|
fc4daceee2f608650040688213fdb53554d37b62
|
refs/heads/main
| 2023-02-08T05:01:48.399607
| 2023-02-03T19:27:51
| 2023-02-03T19:27:51
| 62,921,306
| 41
| 18
| null | 2023-02-03T19:27:53
| 2016-07-08T22:37:37
|
R
|
UTF-8
|
R
| false
| false
| 747
|
r
|
na_interp.r
|
#' Linearly interpolate NA values
#'
#' \code{na_interp} linearly interpolates NA values found in vector y with
#' respect to index x (e.g. timestamp).
#'
#' @param y numeric vector in which to fill bracketed NA values
#' @param x vector giving index of y. NULL if y is equally spaced
#'
#' @importFrom stats approx
#' @importFrom utils head tail
#'
#' @export
na_interp <- function (y, x = NULL) {
if (is.null(x))
x <- 1:length(y)
nona <- which(!is.na(y))
start <- head(nona, 1)
end <- tail(nona, 1)
if (length(end - start) < 1)
return(y)
xsub <- x[start:end]
ysub <- y[start:end]
idx <- which(is.na(ysub))
ysub[idx] <- approx(xsub, ysub, xout = xsub[idx], method = "linear")$y
y[start:end] <- ysub
return(y)
}
|
c138d81d3c171cc7ce59f19b60756c649034b50a
|
bbe69e27263454120aa4ada60eaed9116fccb6da
|
/R/outsider.devtools.R
|
7d2c3bcc6c1d3d65c3cd8ac84cc49d7738156eea
|
[] |
no_license
|
ropensci-archive/outsider.devtools
|
702812e099b9983f022f985ac607e5004c620489
|
787f92887435e7aedf4721170511ca4e790d2df2
|
refs/heads/master
| 2023-04-17T17:31:03.227335
| 2022-06-17T07:11:38
| 2022-06-17T07:11:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 407
|
r
|
outsider.devtools.R
|
#' outsider.devtools: Build 'outsider' Modules
#'
#' Tools, resources and information for making it easier to build your own
#' outsider modules.
#'
#' For more information visit the outsider website
#' (\url{https://docs.ropensci.org/outsider.devtools/}).
#'
#' @docType package
#' @name outsider.devtools
#' @importFrom outsider.base cat_line char func stat
#' @importFrom outsider module_uninstall
NULL
|
ad60e0b9b6be58e0f91f3bf542ae79115b9ebcf7
|
33d1e1eee68663bb91b6952555dd3ff2d11b4ff1
|
/man/quickPlottables-class.Rd
|
339dcebbc693be71ff4b902ea78953e7472f6906
|
[] |
no_license
|
PredictiveEcology/NetLogoR
|
976d379e8e176158e3401e0960d4b0761ecb3947
|
9860ad160a41638702e1164842876e6e322472a3
|
refs/heads/master
| 2023-07-09T05:16:33.058552
| 2022-08-16T22:41:35
| 2022-08-16T22:41:35
| 53,686,905
| 34
| 7
| null | 2023-07-04T21:09:57
| 2016-03-11T18:07:02
|
R
|
UTF-8
|
R
| false
| true
| 1,076
|
rd
|
quickPlottables-class.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/quickPlot.R
\name{.quickPlottables-class}
\alias{.quickPlottables-class}
\alias{quickPlottables}
\title{\code{quickPlot} classes}
\description{
\pkg{quickPlot} offers a type of plotting that is modular.
Users of NetLogoR may find this useful for simulation modeling.
We have put in place the required methods and imported the appropriate classes
to use the \code{quickPlot::Plot} function.
Users can still use \code{plot} from the \pkg{graphics} package, but it is not modular.
}
\details{
This adds \code{agentMatrix} to the \code{.quickPlottables}, \code{.quickObjects},
and \code{spatialObjects}.
This adds \code{worldMatrix} to the \code{.quickPlottables}, \code{.quickObjects},
\code{spatialObjects} and \code{griddedClasses}.
}
\section{Slots}{
\describe{
\item{\code{members}}{\code{\link[=.quickPlotObjects]{.quickPlotObjects()}} and \code{\link[=.quickPlot]{.quickPlot()}}}
}}
\seealso{
\code{\link[=quickPlotClasses]{quickPlotClasses()}}
}
\author{
Eliot McIntire
}
\keyword{internal}
|
4c585cb8079e6496e6cc988f7e49b1b302c39e32
|
c88b0cbeda0edf9e745e324ef942a504e27d4f87
|
/NLSY-CogEpi/NLSY_cw/Revisions.R
|
2d9a0668d67ffedb0eb9eee5a44f8ab224b4baee
|
[] |
no_license
|
Diapadion/R
|
5535b2373bcb5dd9a8bbc0b517f0f9fcda498f27
|
1485c43c0e565a947fdc058a1019a74bdd97f265
|
refs/heads/master
| 2023-05-12T04:21:15.761115
| 2023-04-27T16:26:35
| 2023-04-27T16:26:35
| 28,046,921
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,017
|
r
|
Revisions.R
|
### Revisions
library(survival)
library(eha)
library(ggplot2)
library(survminer)
### Missing sample descriptives
#summary(ht.df[!ccs,c('AFQT89','SAMPLE_SEX','Child_SES','Adult_SES','SES_Education_USE','SES_OccStatus_USE','SES_Income_USE')])
sum(ht.df[!ccs,]$SAMPLE_SEX=='MALE')
sum(ht.df[!ccs,]$SAMPLE_SEX=='FEMALE')
## IQ
aggregate(AFQT89 ~ SAMPLE_SEX, data=ht.df[!ccs,], FUN=mean)
aggregate(AFQT89 ~ SAMPLE_SEX, data=ht.df[!ccs,], FUN=sd)
## Child SES
aggregate(Child_SES ~ SAMPLE_SEX, data=ht.df[!ccs,], FUN=mean)
aggregate(Child_SES ~ SAMPLE_SEX, data=ht.df[!ccs,], FUN=sd)
## Adult SES
aggregate(Adult_SES ~ SAMPLE_SEX, data=ht.df[!ccs,], FUN=mean)
aggregate(Adult_SES ~ SAMPLE_SEX, data=ht.df[!ccs,], FUN=sd)
## Income
aggregate(SES_Income_USE ~ SAMPLE_SEX, data=ht.df[!ccs,], FUN=mean)
aggregate(SES_Income_USE ~ SAMPLE_SEX, data=ht.df[!ccs,], FUN=sd)
## Education
aggregate(SES_Education_USE ~ SAMPLE_SEX, data=ht.df[!ccs,], FUN=mean)
aggregate(SES_Education_USE ~ SAMPLE_SEX, data=ht.df[!ccs,], FUN=sd)
## Occupation Status
aggregate(SES_OccStatus_USE ~ SAMPLE_SEX, data=ht.df[!ccs,], FUN=mean)
aggregate(SES_OccStatus_USE ~ SAMPLE_SEX, data=ht.df[!ccs,], FUN=sd)
## Hypertension
# table(ht.df[ccs,]$hasHT,ht.df[!ccs,]$SAMPLE_SEX)
#
# hist(ht.df[ccs,]$HTage50t[ht.df[ccs,]$hasHT])
## FIXED:
aggregate(recordTime ~ SAMPLE_SEX, data=ht.df[!ccs,][ht.df[!ccs,]$hasHT,], FUN=mean)
aggregate(recordTime ~ SAMPLE_SEX, data=ht.df[!ccs,][ht.df[!ccs,]$hasHT,], FUN=sd)
### Comparing analytic and non-analytic means
(aggregate(AFQT89 ~ SAMPLE_SEX, data=ht.df[!ccs,], FUN=mean) - aggregate(AFQT89 ~ SAMPLE_SEX, data=ht.df[ccs,], FUN=mean))/aggregate(AFQT89 ~ SAMPLE_SEX, data=ht.df[!ccs,], FUN=sd)
(aggregate(Child_SES ~ SAMPLE_SEX, data=ht.df[!ccs,], FUN=mean) - aggregate(Child_SES ~ SAMPLE_SEX, data=ht.df[ccs,], FUN=mean))/aggregate(Child_SES ~ SAMPLE_SEX, data=ht.df[!ccs,], FUN=sd)
(aggregate(Adult_SES ~ SAMPLE_SEX, data=ht.df[!ccs,], FUN=mean) - aggregate(Adult_SES ~ SAMPLE_SEX, data=ht.df[ccs,], FUN=mean))/aggregate(Adult_SES ~ SAMPLE_SEX, data=ht.df[!ccs,], FUN=sd)
(aggregate(SES_Income_USE ~ SAMPLE_SEX, data=ht.df[!ccs,], FUN=mean) - aggregate(SES_Income_USE ~ SAMPLE_SEX, data=ht.df[ccs,], FUN=mean))/aggregate(SES_Income_USE ~ SAMPLE_SEX, data=ht.df[!ccs,], FUN=sd)
(aggregate(SES_Education_USE ~ SAMPLE_SEX, data=ht.df[!ccs,], FUN=mean) - aggregate(SES_Education_USE ~ SAMPLE_SEX, data=ht.df[ccs,], FUN=mean))/aggregate(SES_Education_USE ~ SAMPLE_SEX, data=ht.df[!ccs,], FUN=sd)
(aggregate(SES_OccStatus_USE ~ SAMPLE_SEX, data=ht.df[!ccs,], FUN=mean) - aggregate(SES_OccStatus_USE ~ SAMPLE_SEX, data=ht.df[ccs,], FUN=mean))/aggregate(SES_OccStatus_USE ~ SAMPLE_SEX, data=ht.df[!ccs,], FUN=sd)
### More details on education and income
summary(ht.df$SES_Education_a[ccs])
ht.df$Edu_int = as.integer(ht.df$SES_Education_a)
table(ht.df$Edu_int[ccs], useNA='ifany')
mean(ht.df$Edu_int[ccs])
summary(ht.df$SES_Income_a[ccs])
summary(ht.df$SES_Income_b[ccs]) # B seems to be higher in a few fringe cases
#ht.df$Inc_dollars = ht.df$
View(ht.df[ccs,c('SES_Income_a','SES_Income_b')
])
cor(ht.df[ccs,c('SES_Income_a','SES_Income_b')])
mean(ht.df[ccs,c('SES_Income_b')])
### Partner status - sensitivity models
partn.14 = read.csv('./PartnerStat2014.csv')
colnames(partn.14)[c(2,6)] = c('caseid','partner.status')
partn.df = merge(ht.df, partn.14[,c('caseid','partner.status')],
by.x='CASEID_1979',by.y='caseid')
table(partn.df$partner.status, useNA='ifany')
partn.df$partner.status[partn.df$partner.status==-999] = NA
partn.df$partner.status[partn.df$partner.status==33] = 1 # just 'partner's
aft.gd3.4.partn = aftreg(y.ccs ~ SAMPLE_SEX * AFQT89 + age_1979 + Child_SES
+ SES_Income_USE + partner.status
,data = partn.df[ccs,], dist='loglogistic')
aft.gd3.4.partn$n
aft.gd3.4$n
aft.gd3.4.partn$loglik
extractAIC(aft.gd3.4.partn)
summary(aft.gd3.4.partn)
## nothing at all...
# aft.gd3.8.partn = aftreg(y.ccs ~ SAMPLE_SEX * AFQT89 + age_1979 + Child_SES + Adult_SES
# + SES_Income_USE*SAMPLE_SEX + partner.status
# ,data = partn.df[ccs,], dist='loglogistic')
# extractAIC(aft.gd3.8.partn)
# summary(aft.gd3.8.partn)
### Split by sex - sensitivity models
M.ccs = ((ht.df$SAMPLE_SEX=='MALE')&ccs)
head(y.ccs[ht.df[ccs,]$SAMPLE_SEX=='MALE'])
aft.gd3.3.M = aftreg(y.ccs[ht.df[ccs,]$SAMPLE_SEX=='MALE'] ~ AFQT89 + age_1979 + Child_SES +
+ Adult_SES
, data = ht.df[ccs&(ht.df$SAMPLE_SEX=='MALE'),],
dist='loglogistic')
aft.gd3.3.F = aftreg(y.ccs[ht.df[ccs,]$SAMPLE_SEX=='FEMALE'] ~ AFQT89 + age_1979 + Child_SES +
+ Adult_SES
, data = ht.df[ccs&(ht.df$SAMPLE_SEX=='FEMALE'),],
dist='loglogistic')
summary(aft.gd3.3.M)
summary(aft.gd3.3.F)
### Make Figure 2 *taller*
ggfit = survfit(Surv(recordTime, hasHT) ~ ccs.df$sex_tert, data=ccs.df)
g <- ggsurvplot(ggfit, conf.int=T,censor=F
, linetype = c(1,1,2,2,3,3)
#, color = c(1,2,1,2,1,2)
, palette = c('dodgerblue','violetred1','dodgerblue','violetred1','dodgerblue','violetred1')
, legend = 'none' #c(0.2, 0.2) #'none'
, xlim = c(19,55), ylim = c(0.5,1)
, break.x.by = 5
)
g = g + xlab('Age') +ylab('Proportion that remains normotensive')
g
# ggfit = survfit(Surv(recordTime, hasHT) ~ ht.df$IQtert, data=ht.df)
#
# g <- ggsurvplot(ggfit, conf.int=T,censor=F
# , linetype = c(1,2,3)
# #, color = c(1,2,1,2,1,2)
# , palette = c('dodgerblue','violetred1','green','violetred1','dodgerblue','violetred1')
# , legend = 'none' #c(0.2, 0.2) #'none'
# , xlim = c(19,54), ylim = c(0.5,1)
# , break.x.by = 5
# )
#
# g = g + xlab('Age') +ylab('Proportion that remains normotensive')
#
# g
|
a1dc18a71bbf2988ec60c254a405a93ba2ec7b6b
|
f1fde487d0ad042920a0c7b8b77982a73b295834
|
/code/workspace_scripts/ddm_measure_labels.R
|
375cfb3536673601533264bf5f757932580180a9
|
[] |
no_license
|
zenkavi/SRO_DDM_Analyses
|
5d52960f2c327bdd2e8aee34018aed69e4eb2d3c
|
9b1ab73b22f8e67f7b47cc68ac43f2632b2c1de5
|
refs/heads/master
| 2021-04-09T11:04:34.767730
| 2019-08-08T18:24:35
| 2019-08-08T18:24:35
| 125,401,621
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 799
|
r
|
ddm_measure_labels.R
|
if(!exists('from_gh')){
from_gh=TRUE
}
if(from_gh){
input_path = 'https://raw.githubusercontent.com/zenkavi/SRO_DDM_Analyses/master/input/'
}else{
input_path = '/Users/zeynepenkavi/Dropbox/PoldrackLab/SRO_DDM_Analyses/input/'
}
measure_labels <- read.csv(paste0(input_path, 'measure_labels.csv'))
measure_labels = measure_labels %>%
select(-measure_description) %>%
filter(ddm_task == 1) %>%
select(-ddm_task) %>%
filter(rt_acc != "other") %>%
mutate(dv = as.character(dv),
overall_difference = factor(overall_difference,levels = c("overall", "difference", "condition"), labels = c("non-contrast", "contrast", "condition")),
ddm_raw = ifelse(raw_fit == "raw", "raw", "ddm")) %>%
separate(dv, c("task_group", "var"), sep = "\\.", remove=FALSE, extra = "merge")
|
66699832db663a111a509e193964a286ef0a1944
|
5875798a477588ce11efa1d044d0f1996c9cd366
|
/reverse_sequences.R
|
a5dd5be6f612f8d4efe3f4bb17f71b89e033d26b
|
[] |
no_license
|
kbodulic/eunapius_lncRNA
|
b98e1a93c616287fdf871cdd9a345fa9780cb132
|
5c155cfd2537c5349cd4bf74f66c89a66ed21fcc
|
refs/heads/master
| 2023-01-03T01:54:58.292377
| 2020-10-14T08:29:20
| 2020-10-14T08:29:20
| 287,954,950
| 0
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 947
|
r
|
reverse_sequences.R
|
args <- commandArgs(trailingOnly = TRUE)
library(Biostrings)
library(IRanges)
library(GenomicRanges)
library(BSgenome)
setwd(".")
#reversing the rev_comp hits
#Arguments: 1 - concatanated fasta files of transcripts with similarities
sequences_list<-list()
for(i in list.files()) {
sequences_list<-c(sequences_list,readDNAStringSet(i,format = "fasta"))
}
for (i in 1:length(sequences_list)){
for(j in 1:length(sequences_list[[i]])) {
if(score(pairwiseAlignment(sequences_list[[i]][j],sequences_list[[i]][length(sequences_list[[i]])])) < score(pairwiseAlignment(sequences_list[[i]][j],reverseComplement(sequences_list[[i]][length(sequences_list[[i]])])))) {
sequences_list[[i]][length(sequences_list[[i]])]<-reverseComplement(sequences_list[[i]][length(sequences_list[[i]])])
}
}
writeXStringSet(x = sequences_list[[i]],filepath = paste(names(sequences_list[[i]][length(sequences_list[[i]])]),"R.fa",sep="_"),)
}
|
feecc4bed71ab831ea83deb7e68c0b69805e7382
|
c42e4faa9a5a546fc8fbe7cc367383a14c2a7d8a
|
/part3/R 러닝/0613그래프 평균 줄 표시or 구글빕이용.R
|
05e707b4649db60c48c089e174ce2c209986e9bb
|
[] |
no_license
|
wisdom009/R
|
4be82b65a9d1b79085cf40ced11173421f405548
|
202dce45c6faac6bd4563f96a40a09b09ae416ed
|
refs/heads/master
| 2020-06-03T19:03:28.896231
| 2019-06-17T08:30:42
| 2019-06-17T08:30:42
| 189,162,812
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,572
|
r
|
0613그래프 평균 줄 표시or 구글빕이용.R
|
setwd('D:/workspace/R_date/Part2/Stage3_StructuredData')
count = read.csv("연도별요양기관별보험청구건수_2001_2013_세로.csv",
stringsAsFactors= F)
count
colname <- count$년도
colname
v1 <- count[,2]/100000
v2 <- count[,3]/100000
v3 <- count[,4]/100000
v4 <- count[,5]/100000
v5 <- count[,6]/100000
v6 <- count[,7]/100000
v7 <- count[,8]/100000
v8 <- count[,9]/100000
v9 <- count[,10]/100000
v10 <- count[,11]/100000
plot(v1, xlab="",ylab="",ylim = c(0,10000), axes=F, col="violet", type = "o", lwd=2,
main=paste("연도별 금액", "\n" , "출처:검보심"))
# 년도
axis(1, at=1:10, label = colname, las=2)
#금액
axis(2,las=1)
# 각 년도별 금액 그래프
lines(v2, col = "blue",type="o",lwd=2)
lines(v3, col = "red",type="o",lwd=2)
lines(v4, col = "black",type="o",lwd=2)
lines(v5, col = "orange",type="o",lwd=2)
lines(v6, col = "cyan",type="o",lwd=2)
lines(v7, col = "yellow",type="o",lwd=2)
lines(v8, col = "brown",type="o",lwd=2)
lines(v9, col = "green",type="o",lwd=2)
lines(v10, col = "navy",type="o",lwd=2)
abline(h=seq(0,15000, 10000), v=seq(1,100,1),lty=3,lwd=0.2)
col = names(count[1,2:11])
colors=c("blue","red","black","orange","cyan","yellow",
"brown","green","navy")
legend(1,10000,col,cex=0.8,col=colors,lty=1,lwd=2,bg="white")
count2$
ggplot(count2)
# -----3-5 ---------------------------------------------------------
library(ggplot2)
library(reshape2)
library()
windowsFonts(malgun = windowsFont("맑은 고딕"))
count=read.csv("연도별요양기관별보험청구금액_2004_2013_세로.csv")
count
count2 = melt(count, id = c('년도'), variable.names ='병원종류', value.name = '금액')
count2 = as.data.frame(count)
count$금액 = 금액/1000000
options(2)
ggplot(count, aes(x=년도, y="금액", fill='병원종류', color='병원종류')) +
geom_line(linetype=1, size=1) +
geom_point(size=3) +
geom_hline(yintercept = seq(0,8000,1000), lty='dotted', size=0.1)+
theme_classic(base_family = "malgum", base_size = 10)+
ggtitle(paste('연도별 기관 보험청구금액','\n', '(단위:백만원)'))+
theme(plot.title = element_text(family="malgun", face= "bold", hjust = 0.5,
size = 15, color = "darkblue"))
# 연습문제 야구
bb= read.csv("야구성적.csv")
bb
ggplot(bb, aes(x=선수명,y=연봉대비출루율))+
geom_bar(stat = 'identity')
mean_ops = mean(bb$연봉대비출루율)
ggplot(bb, aes(x=선수명, y=연봉대비출루율))+
geom_bar(stat = 'identity', fill=palete) +
geom_text(aes(y=연봉대비출루율+0.8, label=연봉대비출루율), # 그래프 바 위에 있는 텍스트
color="black", size=3)+
theme(axis.text.x=element_text(angle=45, hjust=1, vjust=1,
colour="black", size=9)) +
ggtitle('프로야구선수 밥값은 하고 있나?') +
theme(plot.title = element_text(face = "bold", hjust = 0.5,
size = 15, color = "darkblue")) +
geom_hline(yintercept=mean_obp, color='purple', linetype = 'dashed') # 평균 값을 보여주는 선
pie(value, labels=label, radius=0.1,cex=0.6)
#구글 차트
library(dplyr)
library(googleVis)
data = read.csv("2013년_서울_구별_주요과목별병원현황_구글용.csv")
data
has = gvisColumnChart(data, options = list(title="지역별 병원현황",
height=400,weight=500))
plot(has)
header= has$html$header
header = gsub('charset=utf-8', 'charset=euc-kr',header)
has$html$header = header
plot(has)
|
347ff68cf2fb7a3aa56643b69e465f521113dc5c
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/rbmn/examples/rm8nd4adja.Rd.R
|
9f16a9fe1c4dba11c566af13a9a68b99fea66ddd
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 206
|
r
|
rm8nd4adja.Rd.R
|
library(rbmn)
### Name: rm8nd4adja
### Title: removes somes nodes from an adjacency matrix
### Aliases: rm8nd4adja
### Keywords: utilities PKEYWORDS
### ** Examples
rm8nd4adja(rbmn0adja.04, "1.1");
|
145cacf7cebf154b6608c6fc6b60b73c2abd9f67
|
e5c43a31a082bbfec5ebbc20b34d373896721579
|
/R/functions/archive/proc.vba.survey.R
|
6177620dbc27ab06787d4a04703373ff364cb4dd
|
[] |
no_license
|
geryan/rfst
|
3dde3a499651f3a1ccc736f8c6597c5972f0e17c
|
0aac1f0c3b17096af0c5b0b06e1ad80ac6d709ca
|
refs/heads/master
| 2023-05-02T12:32:51.743467
| 2021-04-27T01:26:47
| 2021-04-27T01:26:47
| 164,573,310
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 950
|
r
|
proc.vba.survey.R
|
proc.vba2 <- function(x, project.crs, vba.crs = 4283, cutoff.date = "2009-03-01"){
source(file = "R/functions/read.vba.R")
library(dplyr)
z <- read.vba(x) %>%
dplyr::rename("date" = `Survey Start Date`,
"lon" = `Longitude GDA94`,
"lat" = `Latitude GDA94`,
"count" = `Total Count`,
"sm" = `Survey method`) %>%
dplyr::select(sm, date, lon, lat, count) %>%
mutate(date = dmy(date)) %>%
filter(date > ymd(cutoff.date)) %>%
mutate(PA = ifelse(count != 0 | is.na(count), 1, 0)) %>%
dplyr::select(lon, lat, PA, date, sm) %>%
dplyr::arrange(date, PA, lon, lat) %>%
st_as_sf(coords = c("lon", "lat"), crs = vba.crs) %>%
st_transform(crs = project.crs)
z <- z[grep(pattern = "spotlight",
x = z$sm,
ignore.case = TRUE),] %>%
mutate(PA = 0) %>%
dplyr::select(PA, date, geometry)
return(z)
}
|
c2181162a4bc1a841c30b89fe5424c3392123ee9
|
7971223b5ed15e39718b262d739274ef7ffd0eaa
|
/man/hello.Rd
|
751fdf3bb39fa24f752540a9470cd36394fe4ac5
|
[] |
no_license
|
uk-gov-mirror/ukgovdatascience.testpackage
|
20c8edfc1c2319f1edaab07369b38c8036c564d3
|
cf7f7ede8a3a7ee68a9cc1cf4c59c4846978ce6f
|
refs/heads/master
| 2021-06-28T13:57:15.965212
| 2017-09-19T13:58:50
| 2017-09-19T13:58:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 349
|
rd
|
hello.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/hello.R
\name{hello}
\alias{hello}
\title{Print hello world}
\usage{
hello(x)
}
\arguments{
\item{x}{A character string to be concatencated in print statement}
}
\value{
Hello world
}
\description{
Prints hello world
}
\examples{
library(testpackage)
hello('string')
}
|
b647fb093fa54395626bc02fd36d50a17ca9b14b
|
df71ad9bc4409e48d395a90ce0b3a72f47c049bd
|
/man/dds.Rd
|
aedc8d81d27263dfaecdb43bde27222be6c6ebf4
|
[
"MIT"
] |
permissive
|
YTLogos/bcbioRNASeq
|
60e3ed07ce04bb6b924bb98a0d690067672f7861
|
6d6f01a063c668856ff6146e7e6e12c7b9e16a58
|
refs/heads/master
| 2021-08-07T21:30:20.243822
| 2017-11-08T15:51:21
| 2017-11-08T15:51:21
| 110,054,097
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 350
|
rd
|
dds.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/datasets.R
\docType{data}
\name{dds}
\alias{dds}
\title{Example \link{DESeqDataSet}}
\format{An object of class \code{DESeqDataSet} with 505 rows and 4 columns.}
\usage{
dds
}
\description{
Derived from \code{bcb}, the example \link{bcbioRNASeq} run.
}
\keyword{internal}
|
6485469ade4678778533d0de32e6f0881bbfd8d3
|
4d102b41cf2bb8283a0cd7f61e9c7ec064486aa1
|
/test_meth_elle.R
|
6348de4021be21ca74dcb6a086c5d77809317210
|
[] |
no_license
|
AlexisAyme/estimateurs-robustes
|
f4077576caee84f9ae1d84c43da300d95230132d
|
5b31fea1db70ac5ea98660f74fc4934f9da23987
|
refs/heads/master
| 2022-07-26T07:18:52.419914
| 2020-05-20T22:01:43
| 2020-05-20T22:01:43
| 265,636,473
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,618
|
r
|
test_meth_elle.R
|
source("methode_ellipsoide.R")
source("spectralMethod.R")
source("contamination.R")
#X= oneParameterContamination(n=1000,s=100,p=3,mean1=cbind(c(1,1,1)),mean2=cbind(c(3,3,3)))
#X<- oneParameterContamination(n=10000,s=1000,p=32,mean1=cbind(rep(1,32)),mean2=cbind(rep(3,32)))
#print (diakonikolasInequality(X,100,0.05))
#
#print(system.time(muSp <- spectralMethod(X,C=0.7)))
#
#muEll =ellipsoideMethode(X,tau=0.2,s=100)
#
#
#
#muEmp= cbind(rowMeans(X))
#
#
#print(list( diffEll= norm(muEll-cbind(rep(1,3)),type="F"),
# diffSp= norm(muSp-cbind(rep(1,3)),type="F"),
# diffEmp= norm(muEmp-cbind(rep(1,3)),type="F")))
#
traceResult <- function (X,s,mu,a,b,pt){
C= a *rep(1,pt) + ((b-a)/pt)*c(1:pt)
res= rep(0,pt)
muEmp= cbind(rowMeans(X))
for (i in 1:pt){
diffSp <- norm(spectralMethod(X,C[i])-mu,type="F")
res[i] <- diffSp
}
plot(C,res,type="l",xlim=c(a,b),ylim=c(min(res),max(res)), xlab = "C", ylab = "distance to mu",main = "teststst")
abline(b = laiInequality(X,s), a = 0, col = 2)
text(0.5,0.5*laiInequality(X,s), "Lai inequality", col = 2, adj = c(-.1, -.1))
abline(b = 0, a = norm(muEmp-mu,type="F"), col = 3)
text(1, norm(muEmp-mu,type="F"), "distance with mean emp", col = 3, adj = c(-.1, -.1))
print(list(distanceWithMuEmp= norm(muEmp-mu,type="F")))
}
findConstante <- function(X,mu,a,b,s,eps){
repeat{
c <- (a+b)/2
if (b-a < eps){
return (c)
}
if (norm(spectralMethod(X,c)-mu,type="F")> c*laiInequality(X,s)){
a <- c
}else {
b <- c
}
}
}
#
#X<- oneParameterContamination(n=10000,s=1000,p=32,mean1=cbind(rep(1,32)),mean2=cbind(rep(3,32)))
#traceResult(X,1000,cbind(rep(1,32)),0.3,2,100)
#
#X<- oneParameterContamination(n=9000,s=500,p=32,mean1=cbind(rep(1,32)),mean2=cbind(rep(3,32)))
#traceResult(X,500,cbind(rep(1,32)),0.3,2,100)
#
matMeans <- matrix (data= rep(1,32*3), ncol=3,nrow=32)
mean1<- cbind(rep(1,32))
mean1[1:10,1]<- rep(2,10)
mean2 <- cbind(rep(1,32)+ rnorm(32,0,5))
mean3 <- cbind(rep(3,32))
matMeans[,1]<- mean1
matMeans[,2]<- mean2
matMeans[,3]<- mean3
X <- uniParameterContamination(n=10000,s=500,p=32,mean1=cbind(rep(1,32)),matMeans = matMeans)
print(findConstante (X,mu=cbind(rep(1,32)),a=0.3,b=2,s=500,eps=0.05))
#traceResult(X,500,cbind(rep(1,32)),0.3,2,100)
X<- oneParameterContamination2(n=10000,s=1000,p=32,mean1=cbind(rep(1,32)),mean2=cbind(rep(1.5,32)),sd=0.3)
traceResult(X,1000,cbind(rep(1,32)),0.3,2,100)
#X<- oneParameterContamination(n=10000,s=1000,p=32,mean1=cbind(rep(1,32)),mean2=cbind(rep(1.5,32)))
#traceResult(X,1000,cbind(rep(1,32)),0.3,2,100)
|
e0a1e9c9d7c6022ba9382e35cd33df993e0987ef
|
b1909b0d26a4beffdace564ec48b532e8bb8b8bc
|
/week3/run_analysis.R
|
cc7f54aa69350f2a07e767220dd367b8bef24985
|
[] |
no_license
|
Gcabrera1/datasciencecoursera
|
53bc4850c6606be77c8005225354062ec66fd0fb
|
c1c116dc5300a5d27256f52a0a80765eb6a20e7c
|
refs/heads/master
| 2020-06-04T14:35:49.763956
| 2015-10-25T21:26:57
| 2015-10-25T21:26:57
| 40,682,798
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 373
|
r
|
run_analysis.R
|
run_analysis <- function() {
dat <- read.table("./test/X_test.txt")
dat1 <- read.table("./train/X_train.txt")
labels <- read.table("features.txt")
dat3 <- rbind(dat, dat1)
names(dat3) <- labels[,"V2"]
filtro1 <- (labels[,"V2"] %like% "mean" | labels[,"V2"] %like% "std")
prefinal <- dat3[,filtro1]
final <- colMeans(prefinal, na.rm = TRUE)
}
|
493b34960d5eea3f61569db2e1e5d3782950f868
|
71c4a42149298baa946614f0251efdeebe11501f
|
/cachematrix.R
|
36f93eab7879bb70b2ba49477afe6c05a8d6cb9a
|
[] |
no_license
|
u001624/ProgrammingAssignment2
|
7b4bb9fc6fbb1a4e2a329f27f949fc13326c921a
|
835a1e9748a65fd2a5c3e315024d97654b67f9e7
|
refs/heads/master
| 2021-01-17T18:12:33.516183
| 2014-10-26T10:41:34
| 2014-10-26T10:41:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,792
|
r
|
cachematrix.R
|
## Put comments here that give an overall description of what your
## functions do
# Function makeCacheMatrix creates a cached matrix and a list of functions
# Function cacheSolve find the inverse of the matrix, either if it has been cached
## Write a short comment describing this function
# Function makeCacheMatrix creates a list containing a function to
#1. set the value of the matrix
#2. get the value of the matrix
#3. set the value of the inverse of matrix
#4. get the value of the inverse of matrix
# these functions maniplulate a cached matrix
makeCacheMatrix <- function(x = matrix()) {
m <- NULL # creates a variable in the environment of the makeCacheMatrix() function, to store/cache a result
# why isn't the cached x also need a variable in this environment, so a single function could used instead of forcing the calling program to know when to set a new result?
set <- function(y) { # function used internally
x <<- y # store the inputs in x but is x in the environment of the caller or this function ie. makeCacheMatrix parameter, which is a copy of a variable in the callers environment
m <<- NULL # clear the previous 'cached' result
}
get <- function() {x} # this is callable from the object returned from makeCacheMatrix
setInverseOfMatrix <- function(InverseOfMatrix) {m <<- InverseOfMatrix} # do NOT call setInverseOfMatrix() directly despite it being accessible
getInverseOfMatrix <- function() {m} # return the 'cached' result
list(set = set, get = get, # return a list, containing four functions
setInverseOfMatrix = setInverseOfMatrix,
getInverseOfMatrix = getInverseOfMatrix)
}
## Write a short comment describing this function
#Function cacheSolve calculates the inverse of matrix, which is initially created with the function makeCacheMatrix.
#It first checks to see if the inverse of matrix has already been calculated. If so, it `get`s the inverse of matrix from the
#cache and skips the computation.
#Otherwise, it calculates the inverse of the matrix of data and sets the value of the inverse of matrix in the cache via the `setInverseOfMatrix` function.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getInverseOfMatrix()
if(!is.null(m)) { # cache contain something, a result, but where is the test that input x matches the non-existent cached/stored inputs?
message("getting cached data")
return(m)
}
data <- x$get() # store/cache the inputs. Why can't this simply be coded as data <- x ?
m <- solve(data, ...) # process inputs to produce result
x$setInverseOfMatrix(m) # store/cache result. Why isn't this simply m <<- m. Lexical Scoping would resolve both sides to the same variable, which isn't what is needed.
m # return result
}
|
4c1e49305bf541f9b1f7cc90e0e1a385ae95f754
|
03f2c9c3e7d87c11400511a2994536efa8eba6de
|
/R/backup/backup June 29/avs_models_evaluation.R
|
58c30cd722928b836642e44eb6deea2ace0deb29
|
[] |
no_license
|
lorenzol/kaggle_avs
|
43dc9b16ad8be1a2726ecae7a5ebdddf8031247e
|
c61d2d43562dad2411bc22994f84b968f68e4ab5
|
refs/heads/master
| 2021-01-10T22:09:55.147687
| 2014-09-18T00:46:23
| 2014-09-18T00:46:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,299
|
r
|
avs_models_evaluation.R
|
# Model Evaluation
################################################
#install.packages("e1071")
#library(e1071)
## Logistic regression model
glm.pred <- predict(glm.tune, test.batch)
confusionMatrix(glm.pred, test.batch$repeater)
## Decison Tree
ctree.pred <- predict(ctree.tune, test.batch)
confusionMatrix(ctree.pred, test.batch$repeater)
## Boosted model
ada.pred <- predict(ada.tune, test.batch)
confusionMatrix(ada.pred, test.batch$repeater)
## Random Forest model
rf.pred <- predict(rf.tune, test.batch)
confusionMatrix(rf.pred, test.batch$repeater)
## SVM model
svm.pred <- predict(svm.tune, test.batch)
confusionMatrix(svm.pred, test.batch$repeater)
## KNN model
knn.pred <- predict(knn.tune, test.batch)
confusionMatrix(knn.pred, test.batch$repeater)
## AVG NNET model
avnnet.pred <- predict(avnnet.tune, test.batch)
confusionMatrix(avnnet.pred, test.batch$repeater)
## GBM model
gbm.pred <- predict(gbm.tune, test.batch)
confusionMatrix(gbm.pred, test.batch$repeater)
## glmnet model
glmnet.pred <- predict(glmnet.tune, test.batch)
confusionMatrix(glmnet.pred, test.batch$repeater)
# ROC curves
################################################
## Logistic regression model (BLACK curve)
glm.probs <- predict(glm.tune, test.batch, type = "prob")
glm.ROC <- roc(response = test.batch$repeater,
predictor = glm.probs$t,
levels = levels(test.batch$repeater))
plot(glm.ROC, type="S")
glm.ROC
## CTree model (Grey curve)
ctree.probs <- predict(ctree.tune, test.batch, type = "prob")
ctree.ROC <- roc(response = test.batch$repeater,
predictor = ctree.probs$t,
levels = levels(test.batch$repeater))
plot(ctree.ROC, add=TRUE, col="grey")
ctree.ROC
## Boosted model (GREEN curve)
ada.probs <- predict(ada.tune, test.batch, type = "prob")
ada.ROC <- roc(response = test.batch$repeater,
predictor = ada.probs$t,
levels = levels(test.batch$repeater))
plot(ada.ROC, add=TRUE, col="green")
ada.ROC
## Random Forest model (RED curve)
rf.probs <- predict(rf.tune, test.batch, type = "prob")
rf.ROC <- roc(response = test.batch$repeater,
predictor = rf.probs$t,
levels = levels(test.batch$repeater))
plot(rf.ROC, add=TRUE, col="red")
rf.ROC
## SVM model (BLUE curve)
svm.probs <- predict(svm.tune, test.batch, type = "prob")
svm.ROC <- roc(response = test.batch$repeater,
predictor = svm.probs$t,
levels = levels(test.batch$repeater))
plot(svm.ROC, add=TRUE, col="blue")
svm.ROC
## KNN model (YELLOW curve)
knn.probs <- predict(knn.tune, test.batch, type = "prob")
knn.ROC <- roc(response = test.batch$repeater,
predictor = knn.probs$t,
levels = levels(test.batch$repeater))
plot(knn.ROC, add=TRUE, col="yellow")
knn.ROC
## AVNNET model (ORANGE curve)
avnnet.probs <- predict(avnnet.tune, test.batch, type = "prob")
avnnet.ROC <- roc(response = test.batch$repeater,
predictor = avnnet.probs$t,
levels = levels(test.batch$repeater))
plot(avnnet.ROC, add=TRUE, col="orange")
avnnet.ROC
## GBM model (PINK curve)
gbm.probs <- predict(gbm.tune, test.batch, type = "prob")
gbm.ROC <- roc(response = test.batch$repeater,
predictor = gbm.probs$t,
levels = levels(test.batch$repeater))
plot(gbm.ROC, add=TRUE, col="pink")
gbm.ROC
## glmnet model (BROWN curve)
glmnet.probs <- predict(glmnet.tune, test.batch, type = "prob")
glmnet.ROC <- roc(response = test.batch$repeater,
predictor = glmnet.probs$t,
levels = levels(test.batch$repeater))
plot(glmnet.ROC, add=TRUE, col="brown")
glmnet.ROC
# graph which sums up the performance of the four models
########################################################
cv.values <- resamples(list(Logit = #glm.tune,
#ctree = ctree.tune,
ada.tune,
RF = rf.tune,
SVM = svm.tune,
KNN = knn.tune,
AVNNET = avnnet.tune,
GBM = gbm.tune,
GLMNET = glmnet.tune))
dotplot(cv.values, metric = "ROC")
|
3c3ff29f766220682ea15128539e1f8f65449184
|
66a7e127fdbbc1c454122f1bff5dfed3e8f18974
|
/Cleaning lynching data.R
|
91a8f2dfab5609c3390446eee4dbe532d00e5b39
|
[] |
no_license
|
ZIBOWANGKANGYU/POLVILINDIA
|
657e5024ae46dc05260d7f1e87ed34fdb12c9bca
|
b12749f69e00efab180430e95b4b2b686ad067f2
|
refs/heads/master
| 2023-06-01T04:21:50.635960
| 2021-07-02T17:46:59
| 2021-07-02T17:46:59
| 181,230,385
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,198
|
r
|
Cleaning lynching data.R
|
# Mark Wang, kaw129@ucsd.edu
# 11 April 2019
library(xlsx)
library(ggplot2)
library(sf)
library(dplyr)
library(tmap)
Sys.setlocale("LC_TIME", "English")
Sys.setenv(LANG = "en_US.UTF-8")
# cleaning and importing lynching data
lynchings_date<-read.xlsx(file = "lynchings.xlsx", sheetIndex = 2, encoding = "UTF-8")
colnames(lynchings_date)[16]<-"Notes"
lynchings_date<-lynchings_date[1:125,1:16]
lynchings_date$DATE<-as.Date(paste(lynchings_date$MONTH, lynchings_date$DAY,lynchings_date$YEAR, sep=" "), "%B %d %Y")
lynchings_date$STATE<-as.character(lynchings_date$STATE)
lynchings_date$STATE[lynchings_date$STATE=="TELENGANA"]<-"TELANGANA"
# cleaning and importing maps
map_Tehsil<-st_read("India_Tehsil_Boundary.shp")
map_Tehsil_df<-data.frame(map_Tehsil)%>%select(-geometry)
map_District<-st_read("India_District_Boundary.shp")
map_District_df<-data.frame(map_District)%>%select(-geometry)
map_State<-st_read("India_State_Boundary.shp")
map_State_df<-data.frame(map_State)%>%select(-geometry)
# importing election data
elections<-read.xlsx("elections.xlsx", encoding="UTF=8", sheetIndex = 2)
colnames(elections)[1]<-"STATE"
elections$STATE<-toupper(elections$STATE)
elections$date_begining<-as.Date(as.character(elections$date_begining), "%d %B %Y")
elections$date_ending<-as.Date(as.character(elections$date_ending), "%d %B %Y")
elections$date_government<-as.Date(as.character(elections$date_government), "%d %B %Y")
elections$STATE[elections$STATE=="JAMMU & KASHMIR"]<-"JAMMU AND KASHMIR"
elections$STATE[elections$STATE=="ODISHA"]<-"ORISSA"
# aggregate by month and state
lynchings_date$ARRESTS_MADE<-as.numeric(as.character(lynchings_date$ARRESTS_MADE))
lynchings_date$STATE<-as.character(lynchings_date$STATE)
lynchings_date$STATE[lynchings_date$STATE=="JAMMU & KASHMIR"]<-"JAMMU AND KASHMIR"
lynchings_date$STATE[lynchings_date$STATE=="ODISHA"]<-"ORISSA"
lynchings_date$STATE<-as.factor(as.character(lynchings_date$STATE))
# May need population data on this
# Number of victims of important states
ggplot(lynchings_date[lynchings_date$STATE=="UTTAR PRADESH",], aes(x=DATE))+
geom_point(aes(y=VICTIMS))+scale_y_continuous(labels = scales::number_format(accuracy = 1))+
labs(x="time", y="victims", title = "Cow lynching and number of victims in Uttar Pradesh")+
geom_rect(aes(xmin=as.Date("2017-02-11"), xmax=as.Date("2017-03-08"),ymin=0, ymax=Inf, fill="BJP win"), alpha = 0.5, show.legend = TRUE)+
scale_fill_manual("MLA election",values=c("orange"))
ggplot(lynchings_date[lynchings_date$STATE=="HARYANA",], aes(x=DATE))+
geom_point(aes(y=VICTIMS))+scale_y_continuous(labels = scales::number_format(accuracy = 1))+
labs(x="time", y="victims", title = "Cow lynching and number of victims in Haryana")+
geom_vline(aes(xintercept=as.Date("2014-10-15"), color="BJP win"), size=1.5) +
scale_color_manual("MLA election",values=c("orange"))
# automize ggplot
elections<-elections[order(elections$date_begining),]
elections$winner<-"BJP losing"
elections$winner[elections$Government=="NDA"]<-"BJP winning"
cols <- c("BJP winning" = "orange", "BJP losing" = "green")
cols_incumbent <- c("BJP" = "orange", "non-BJP" = "green")
# plot states with cow lynching
ggplot(lynchings_date, aes(x=DATE))+
geom_point(aes(y=VICTIMS))+scale_y_continuous(labels = scales::number_format(accuracy = 1))+
labs(x="time", y="victims")+xlim(as.Date("2012-6-1"), Sys.Date())+
geom_vline(data=elections[elections$STATE %in% lynchings_STATE$STATE,], mapping = aes(xintercept=date, color=winner), size=1.5, show.legend = TRUE)+
scale_color_manual("MLA election",values=cols)+facet_wrap(~STATE, ncol = 3)+labs(title = "State election cycle and cow lynching victim, 2012-2019")
ggsave("state election cycle and cow lynching victim.pdf", width = 12, height = 10)
# cow lynchings relative to elections on t=0
for (i in 1:nrow(lynchings_date)){
dif_vec<-lynchings_date$DATE[i]-elections$date_begining[elections$STATE==lynchings_date$STATE[i]]
winner_vec<-elections$winner[elections$STATE==lynchings_date$STATE[i]]
lynchings_date$date_gap_beginning[i]<-dif_vec[which.min(abs(dif_vec))]
lynchings_date$winning_party[i]<-winner_vec[which.min(abs(dif_vec))]
}
for (i in 1:nrow(lynchings_date)){
dif_vec<-lynchings_date$DATE[i]-elections$date_ending[elections$STATE==lynchings_date$STATE[i]]
lynchings_date$date_gap_ending[i]<-dif_vec[which.min(abs(dif_vec))]
}
for (i in 1:nrow(lynchings_date)){
dif_vec<-lynchings_date$DATE[i]-elections$date_government[elections$STATE==lynchings_date$STATE[i]]
lynchings_date$date_gap_government[i]<-dif_vec[which.min(abs(dif_vec))]
}
lynchings_date$date_gap_government[lynchings_date$date_gap_government>lynchings_date$date_gap_beginning]<-NA
# get rid of more than 2-year gaps
ggplot()+
geom_density(data=lynchings_date[lynchings_date$winning_party=="BJP winning",], aes(x=date_gap_beginning, color=winning_party), size=1.5, bw=60)+
geom_density(data=lynchings_date[lynchings_date$winning_party=="BJP losing",], aes(x=date_gap_beginning, color=winning_party), size=1.5, bw=60)+labs(x="Days to MLA election", y="frequency of cow linching")+
labs(x="Days to MLA election (beginning)", y="frequency of cow linching", title="Density of caw lynching on the elction timeline, by winner", caption = "kernal bandwidth: 60 days")+
geom_vline(xintercept = 0, color="red", size=1.5)+
scale_color_manual("MLA election",values=cols)+xlim(-365, 365)
ggsave("plots/summary/density of cow lynching on the election timeline, by winner1.jpg", width = 6, height = 5)
ggplot()+
geom_density(data=lynchings_date[lynchings_date$winning_party=="BJP winning",], aes(x=date_gap_ending, color=winning_party), size=1.5, bw=60)+
geom_density(data=lynchings_date[lynchings_date$winning_party=="BJP losing",], aes(x=date_gap_ending, color=winning_party), size=1.5, bw=60)+labs(x="Days to MLA election", y="frequency of cow linching")+
labs(x="Days to MLA election (ending)", y="frequency of cow linching", title="Density of caw lynching on the elction timeline, by winner", caption = "kernal bandwidth: 60 days")+
geom_vline(xintercept = 0, color="red", size=1.5)+
scale_color_manual("MLA election",values=cols)+xlim(-365, 365)
ggsave("plots/summary/density of cow lynching on the election timeline, by winner2.jpg", width = 6, height = 5)
ggplot()+
geom_density(data=lynchings_date[lynchings_date$winning_party=="BJP winning",], aes(x=date_gap_government, color=winning_party), size=1.5, bw=60)+
geom_density(data=lynchings_date[lynchings_date$winning_party=="BJP losing",], aes(x=date_gap_government, color=winning_party), size=1.5, bw=60)+labs(x="Days to MLA election", y="frequency of cow linching")+
labs(x="Days to MLA election (new government)", y="frequency of cow linching", title="Density of caw lynching on the elction timeline, by winner", caption = "kernal bandwidth: 60 days")+
geom_vline(xintercept = 0, color="red", size=1.5)+
scale_color_manual("MLA election",values=cols)+xlim(-365, 365)
ggsave("plots/summary/density of cow lynching on the election timeline, by winner3.jpg", width = 6, height = 5)
# get the previous winning party
for (i in 1:nrow(lynchings_date)){
if (lynchings_date$STATE[i]=="TELANGANA"){
dif_vec_pre<-elections$date_begining[elections$STATE=="ANDHRA PRADESH" & elections$date_begining<lynchings_date$DATE[i]]-lynchings_date$DATE[i]
winner_vec_pre<-elections$winner[elections$STATE=="ANDHRA PRADESH" & elections$date_begining<lynchings_date$DATE[i]]
lynchings_date$winning_party_pre[i]<-winner_vec_pre[which.min(abs(dif_vec_pre))]
}
else{
dif_vec_pre<-elections$date_begining[elections$STATE==lynchings_date$STATE[i] & elections$date_begining<lynchings_date$DATE[i]]-lynchings_date$DATE[i]
winner_vec_pre<-elections$winner[elections$STATE==lynchings_date$STATE[i] & elections$date_begining<lynchings_date$DATE[i]]
lynchings_date$winning_party_pre[i]<-winner_vec_pre[which.min(abs(dif_vec_pre))]
}
}
lynchings_date$incumbent[lynchings_date$winning_party_pre=="BJP winning"]<-"BJP"
lynchings_date$incumbent[lynchings_date$winning_party_pre=="BJP losing"]<-"non-BJP"
|
cda14ac8e3ec4a9a15a4d1aa77531734dfbe7808
|
c0c90d01f887bb2ee42608e2dae0eaecb4567757
|
/man/mfcluster-class.Rd
|
0fc58d9132d1b18c980b5836cf236bb718886c11
|
[] |
no_license
|
tmrealphd/MF
|
386cdad3f9353712d8843685342d99ea548fa319
|
362f9f5b2b24e094af492ac5e9f755745d94631f
|
refs/heads/master
| 2021-01-21T02:36:34.942956
| 2015-03-31T20:32:21
| 2015-03-31T20:32:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 957
|
rd
|
mfcluster-class.Rd
|
\docType{class}
\name{mfcluster-class}
\alias{mfcluster-class}
\title{Class mfcluster}
\usage{
mfcluster$new(All, bycluster, excludedClusters, call, compare)
}
\description{
Class mfcluster is created from output of function MFClus
}
\section{Fields}{
\itemize{ \item{\code{All: }}{vector with elements:
\itemize{ \item{\emph{w }}{Wilcoxon statistic}
\item{\emph{u }}{Mann-Whitney statistic} \item{\emph{r
}}{mean ridit} \item{\emph{n1 }}{size of group 1}
\item{\emph{n2 }}{size of group 2} \item{\emph{mf
}}{mitigated fraction} }} \item{\code{byCluster: }}{As
for All, by clusters} \item{\code{excludedClusters:
}}{character vector naming clusters excluded because of
missing treatment} \item{\code{call: }}{the call to
\code{MFClus}} \item{\code{compare: }}{character vector
naming groups compared} }
}
\author{
Marie Vendettuoli
\email{marie.c.vendettuoli@aphis.usda.gov}
}
\seealso{
\code{\link{MFClus}}
}
\keyword{documentation}
|
e553b8684099c869719439af6ac6fe20876b4967
|
12681d234a68bc85b8c99b9dd9dde1e7ac3cc89e
|
/Twitter API for brand sentiment/02a_Score_Tweets.R
|
58a1a9e6d3110ae4fea951ab21d63d24641fab23
|
[] |
no_license
|
siddharthksuri/data_analysis
|
fced3c65f96a0791ab027e256001a642e5dc5597
|
96cc348166e5e15ff95eabebf14e72350f02bc4e
|
refs/heads/master
| 2021-03-13T01:21:39.402088
| 2017-05-16T16:28:11
| 2017-05-16T16:28:11
| 91,475,196
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,588
|
r
|
02a_Score_Tweets.R
|
setwd("D:/G Drive/04 Career/Interview Prep/THINX/Twitter")
thinx= read.csv("thinxtweets.csv")
lulu= read.csv("lulutweets.csv")
#readymade dictionaries for scoring
hlpos=scan("D:/G Drive/04 Career/Interview Prep/THINX/Twitter/dict/positive-words.txt"
, what='character', comment.char=';')
hlneg=scan("D:/G Drive/04 Career/Interview Prep/THINX/Twitter/dict/negative-words.txt"
, what='character', comment.char=';')
# This is a custom function written by Jeffrey Breen
# score.sentiment(), must be loaded from a separate script
trial<-c("accolade I recieved","thrilled to be here","beautiful is the world",
"dismayed Yoda was","angry I am","unhappy we all are")
score.sentiment(trial, hlpos, hlneg)
#scoring the actual tweets
thinx$sentiment = score.sentiment(thinx$text, hlpos, hlneg)$score
lulu$sentiment = score.sentiment(lulu$text, hlpos, hlneg)$score
#exploring the scores
range(thinx$sentiment)
range(lulu$sentiment)
hist(thinx$sentiment)
hist(lulu$sentiment)
mean(thinx$sentiment)
mean(lulu$sentiment)
# Using sentimentr package by Tler Rinker
# Returns a score for each sentence in string input.
library(sqldf)
library(sentimentr)
thinx_rscore<-sentiment(thinx$text, polarity_dt = lexicon::hash_sentiment_jockers,
valence_shifters_dt = lexicon::hash_valence_shifters, hyphen = "",
amplifier.weight = 0.8, n.before = 5, n.after = 2,
question.weight = 1, adversative.weight = 0.85, missing_value = 0)
thinx_rscore<-sentiment(thinx$text)
lulu_rscore<-sentiment(lulu$text)
#aggregating the scores to tweet level.
thinx_rscore<-sqldf("select sum(sentiment)
from thinx_rscore
group by element_id
")
thinx<-cbind(thinx,thinx_rscore,"thinx")
colnames(thinx)[19:20]<-c("sentiment2","company")
#thinx$sentiment1<-NULL
lulu_rscore<-sqldf("select sum(sentiment)
from lulu_rscore
group by element_id
")
lulu<-cbind(lulu,lulu_rscore,"lulu")
colnames(lulu)[19:20]<-c("sentiment2","company")
#Some more stats just to see if anything odd shows up
cor(thinx$sentiment, thinx$sentiment1)
cor(lulu$sentiment, lulu$sentiment1)
range(thinx$sentiment1)
range(lulu$sentiment1)
hist(thinx$sentiment1$`sum(sentiment)`)
hist(lulu$sentiment1$`sum(sentiment)`)
mean(thinx$sentiment1$`sum(sentiment)`)
mean(lulu$sentiment1$`sum(sentiment)`)
### Output the file
write.csv(lulu,"lulu_scored.csv")
write.csv(thinx,"thinx_scored.csv")
tweet_scored<-rbind(thinx,lulu)
write.csv(tweet_scored,"tweets_scored.csv")
|
6ac7398df68066f85d9cd2f7a31f34bcf53d68f5
|
120de1ae49850f8212efc39ab9fa266f175dc4c6
|
/man/bbk.fanChart.Rd
|
4f48312d79dbcb90bb80c0f169845f2b23074c43
|
[] |
no_license
|
vsrimurthy/EPFR
|
168aed47aa2c48c98be82e3d8c833d89e1d11e04
|
544471a8d0cf75c7d65a195b9f6e95d6b1d6800f
|
refs/heads/master
| 2023-08-02T14:50:25.754990
| 2023-07-29T13:56:39
| 2023-07-29T13:56:39
| 118,918,801
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 664
|
rd
|
bbk.fanChart.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/EPFR.r
\name{bbk.fanChart}
\alias{bbk.fanChart}
\title{bbk.fanChart}
\usage{
bbk.fanChart(x)
}
\arguments{
\item{x}{= "rets" part of the output of function bbk}
}
\description{
quintile fan charts
}
\seealso{
Other bbk: \code{\link{bbk.bin.rets.prd.summ}},
\code{\link{bbk.bin.rets.summ}},
\code{\link{bbk.bin.xRet}}, \code{\link{bbk.data}},
\code{\link{bbk.drawdown}}, \code{\link{bbk.fwdRet}},
\code{\link{bbk.histogram}}, \code{\link{bbk.holidays}},
\code{\link{bbk.matrix}}, \code{\link{bbk.summ}},
\code{\link{bbk.turnover}}, \code{\link{bbk}}
}
\keyword{bbk.fanChart}
|
1c535ee283a8c4a62bcfb8a2279344f044e8d14b
|
4fc1ae226c2f0722f998d4eccdedd75e79d8e04c
|
/5_DNB Screening/PPI网络/top50/new/minus.R
|
92182e3dc8a89728eeb7eacb507c84ba336e15b5
|
[] |
no_license
|
TwinkleKim/Reproducible-DNB-downstream-analysis
|
7f2726ca40108f2bc089bfbb01eac7f3c0b7b8ab
|
83f1d0bd52bf8b79a970b63fce3cb19653b623d1
|
refs/heads/main
| 2023-04-25T21:40:58.913936
| 2021-05-19T11:58:52
| 2021-05-19T11:58:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 137
|
r
|
minus.R
|
node<-read.csv("nodedis.csv",header = T,row.names = 1)
View(node)
node<-scale(node,center = T,scale = T)
write.csv(node,"nodeminus.csv")
|
0deaa6a557bafe765553466f452d8f68eb32bd45
|
04ae878f467098c69dc72d6b4811e76a77ef0532
|
/Data Wrangling and Visualization Based on County-level Oil and Gas Data/Data Wrangling and Visualization code/Oil and Gas withdraws each year in terms of state.R
|
bc658d8eac4e7dcedc8abbae1500270a8391216b
|
[] |
no_license
|
ZihuanQiao/Course-Projects
|
e81ec5cb008882b2ddac63ae0ef539386f23ac18
|
e0808d5e2d8982c9f9ce317e43aab90e7d39a555
|
refs/heads/master
| 2021-01-11T22:22:54.680146
| 2017-01-15T23:19:37
| 2017-01-15T23:19:37
| 78,954,690
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 832
|
r
|
Oil and Gas withdraws each year in terms of state.R
|
#Oil and Gas withdraws each year in terms of State
library(ggplot2)
library(dplyr)
#read tidy data
dfnew <- read.table("oilTidyData.txt")
dfnew <- data.frame(dfnew)
#select top 10000 oil/ gas withdraw county data
dfnew.oil <- arrange(dfnew, desc(oilwithdraw))
dfnew.oil <- dfnew.oil[1:10000,]
dfnew.gas <- arrange(dfnew, desc(gaswithdraw))
dfnew.gas <- dfnew.gas[1:10000,]
#draw point plot and line to indicate oil/ gas withdraws from year to year in terms of state
gg <- ggplot(dfnew.oil, aes(x = year, y = oilwithdraw, colour = factor(Stabr)))
gg + geom_point() + labs(title = "Oil Withdraw from 2000 to 2011", x = "Year", y = "Oil Withdraw")
qq <- ggplot(dfnew.gas, aes(x = year, y = gaswithdraw, colour = factor(Stabr)))
qq + geom_point() + labs(title = "Gas Withdraw from 2000 to 2011", x = "Year", y = "Gas Withdraw")
|
590694aecb4ceaad2bfa6dbbbc30dada9d5713a1
|
72d9009d19e92b721d5cc0e8f8045e1145921130
|
/atakrig/man/extractPointVgm.Rd
|
35fc6d6ec79c3c3af7d59d764f749241b6413e67
|
[] |
no_license
|
akhikolla/TestedPackages-NoIssues
|
be46c49c0836b3f0cf60e247087089868adf7a62
|
eb8d498cc132def615c090941bc172e17fdce267
|
refs/heads/master
| 2023-03-01T09:10:17.227119
| 2021-01-25T19:44:44
| 2021-01-25T19:44:44
| 332,027,727
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 338
|
rd
|
extractPointVgm.Rd
|
\name{extractPointVgm}
\alias{extractPointVgm}
\title{
Extract point-scale variogram from deconvoluted ataKrigVgm.
}
\description{
Extract point-scale variogram from deconvoluted ataKrigVgm.
}
\usage{
extractPointVgm(g)
}
\arguments{
\item{g}{
deconvoluted ataKrigVgm object.
}
}
\value{
a list of gstat vgm model.
}
|
492403fb41b3f40c4dd528925173275b01b824d4
|
eb8b0d82328715285dff79922d093cb31583d93f
|
/Coursework/05 - Statistical Programming 2.R
|
1a327d76dc1fd9062ff2ffa718bbd00c1ea98d1e
|
[] |
no_license
|
kevinmaske/Sample-R-Codes
|
94f6c561958e1f7526fb1d64a46657d4b03a72ef
|
5cbfb573346e6ef480e4302f4f2947a2b9450b76
|
refs/heads/master
| 2020-04-17T07:46:52.879449
| 2019-01-18T10:28:00
| 2019-01-18T10:28:00
| 166,383,883
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 16,505
|
r
|
05 - Statistical Programming 2.R
|
##
##
## -------------------------------------------
## LOAD PACKAGES
## -------------------------------------------
##
##
## Load any R package required for your code
## to successfully run!
library(tidyverse)
## ;;
setwd("")
## -------------------------------------------
## READING DATA
## -------------------------------------------
## ;;
load("burlington.RData")
## --------------------------------------------------
## --------------------------------------------------
## ;;
## ---------------------------------------------
## Q1: -- add your code below
## ---------------------------------------------
## ;;
## 1.1
# Constructs the 95% bootstrap CI using R "replications"
CI.cor <- function(raw, R = 10000, conf = 0.95){
n <- raw$n
data <- cbind(raw$lsat, # Compile the data into a 2 column matrix
raw$gpa)
rho_star <- NULL # Initialize vector of covariances
for(r in 1:R){ # For R Replications
index_star <- 1:n %>%
sample(size = n, replace = TRUE) # Get n resamples from data
data_star <- data[index_star,] # Obtains the resamples
rho_star[r] <- cor(data_star)[1,2] # Gets correl and stores in vector
}
# Computing estimate of bias and variance for bootstrap rho
bias_rho_star <- mean(rho_star - cor(data)[1,2])
var_rho_star <- var(rho_star)
# Define alpha confidence level
alpha = 1 - conf
# Obtain Normal Bootstrap CI
CI_normal_low <- cor(data)[1,2] - bias_rho_star +
qnorm(alpha/2) * sqrt(var_rho_star)
CI_normal_high <- cor(data)[1,2] - bias_rho_star +
qnorm(1-alpha/2) * sqrt(var_rho_star)
# Obtain Percentile Bootstrap CI
CI_low <- quantile(rho_star, alpha/2)
CI_high <- quantile(rho_star, 1-alpha/2)
# Tabulate the results for output
results <- data.frame(
"Lower Bound" = c(CI_normal_low, CI_low),
"Upper Bound" = c(CI_normal_high, CI_high),
row.names = c("Normal", "Percentile"))
return(results)
}
# Construct Data
raw.1 <- list(lsat = c(576, 635, 558, 578, 666, 580, 555,
661, 651, 605, 653, 575, 545, 572, 594),
gpa = c(3.39, 3.30, 2.81, 3.03, 3.55, 3.07, 3.00,
3.43, 3.36, 3.13, 3.12, 2.74, 2.76, 2.88, 2.96),
n = 15)
# Call Function
CI.cor(raw.1)
## 1.2
CI.var.ratio <- function(raw, R = 10000, conf = 0.95){
n <- raw$n
data <- cbind(raw$lsat, # Compile the data into a 2 column matrix
raw$gpa)
vr_star <- NULL # Initialize vector of variance ratios
for(r in 1:R){ # For R Replications
index_star <- 1:n %>%
sample(size = n, replace = TRUE) # Get n resamples from data
data_star <- data[index_star,] # Obtains the resamples
vr_star[r] <- var(data_star[,1])/var(data_star[,2]) # Gets var ratios
}
vr_hat = var(data[,1])/var(data[,2]) # Define for easier referencing
# Computing estimate of bias and variance for bootstrap var ratios
bias_vr_star <- mean(vr_star - vr_hat)
var_vr_star <- var(vr_star)
# Define alpha confidence level
alpha = 1 - conf
# Obtain Normal Bootstrap CI
CI_normal_low <- vr_hat - bias_vr_star +
qnorm(alpha/2) * sqrt(var_vr_star)
CI_normal_high <- vr_hat - bias_vr_star +
qnorm(1-alpha/2) * sqrt(var_vr_star)
# Obtain Percentile Bootstrap CI
CI_low <- quantile(vr_star, alpha/2)
CI_high <- quantile(vr_star, 1-alpha/2)
# Tabulate the results for output
results <- data.frame(
"Lower Bound" = c(CI_normal_low, CI_low),
"Upper Bound" = c(CI_normal_high, CI_high),
row.names = c("Normal", "Percentile"))
return(results)
}
CI.var.ratio(raw.1) # Call Function
## ;;
## -------------------------------------------
## Q2: -- add your code below
## -------------------------------------------
## ;;
# The given code for GDP
fit.gpd <- function(x, thresh, tol.xi.limit=5e-2, ...){
llik.gpd <- function(par, x, thresh, tol.xi.limit=5e-2)
{
y <- x[x>thresh]
sigma <- exp(par[1])
xi <- par[2]
n <- length(y)
if(abs(xi)<=tol.xi.limit)
{
llik <- n*log(sigma) + sum((y-thresh)/sigma)
return(llik)
}
par.log.max <- log( pmax( 1+xi*(y-thresh)/sigma, 0 ) )
llik <- -n*log( sigma )-(1+( 1/xi ))*sum( par.log.max )
llik <- -ifelse( llik > -Inf, llik, -1e40 )
return(llik)
}
fit <- optim(par = c(0, 0), fn = llik.gpd,
x=x, thresh=thresh,
control=list( maxit=10000, ... ))
sigmahat <- exp( fit$par[1] )
xihat <- fit$par[2]
return(c(sigmahat, xihat))
}
fit.gpd(c(0,0),
x=burlington$Precipitation,
thresh=quantile(burlington$Precipitation,0.8))
## 2.1
# Write an inversion sampling function for GDP
rgdp <- function(n, thresh, sigma, xi, tol.xi.limit = 5e-2){
z <- runif(n) # Generate uniform RV's
if(abs(xi) < tol.xi.limit){ # Consider the limiting case
x <- thresh - sigma*log(1-z) # Inverse of limiting CDF
return(x)
} else {
x <- thresh + (sigma/xi) * ((1-z)^(-xi) - 1)
return(x)
}
}
# Parametric Bootstrap Function
parboot.gpd <- function(data, thresh, R=10000){
n <- length(data) # Get length of data
# Fit the data to GDP
model <- fit.gpd(c(0,0),
x=data,
thresh=thresh)
sigma <- model[1]
xi <- model[2]
# Initialize estimate of p.hat is ratio of "successes" over total trials
p.hat <- length(data[data > thresh]) / length(data)
mle <- c(sigma, xi, p.hat) # Store the MLE Estimators based on data
theta.star <- matrix(ncol=3, nrow=R) # Intialize matrix to store parameter fits
p <- p.hat # Initialize for first replication
for(r in 1:R){
n.star <- rbinom(1, n, p) # Generate binomial random variables to get exceedence counts
x <- rgdp(n.star, thresh = thresh,
sigma = sigma, xi = xi) # Get GPD Samples
mod.star <- fit.gpd(c(0,0), # Fit the samples to a pareto
x=x,
thresh=thresh)
theta.star[r, 1] <- mod.star[1] # Store sigma in column 1
theta.star[r, 2] <- mod.star[2] # Store xi in column 2
theta.star[r, 3] <- p
p <- n.star/n # Update p for next replication
}
# Compute Biases
bias.sigma <- mean(theta.star[,1] - sigma)
bias.xi <- mean(theta.star[,2] - xi)
bias.p <- mean(theta.star[,3] - p.hat)
biases <- c(bias.sigma, bias.xi, bias.p) # Store biases in vector
# Compute standard errors
se.sigma <- sd(theta.star[,1])
se.xi <- sd(theta.star[,2])
se.p <- sd(theta.star[,3])
se <- c(se.sigma, se.xi, se.p)
return(list(mle = mle, bias = biases, # Return the named list
se = se, distn = theta.star))
}
answer2.1 <- parboot.gpd(data=burlington$Precipitation,
thresh=quantile(burlington$Precipitation,0.9))
## 2.2
# Non-Parametric Bootstrap Function
npboot.gpd <- function(data, thresh, R=10000){
n <- length(data) # Get length of data
# Get MLE estimates from original data
mle.fit <- fit.gpd(c(0,0),
x=data,
thresh=thresh)
sigma.hat <- mle.fit[1]
xi.hat <- mle.fit[2]
p.hat <- length(data[data > thresh]) / length(data)
mle <- c(sigma.hat, xi.hat, p.hat) # Store the MLE Estimators based on data
theta.star <- matrix(ncol=3, nrow=R) # Intialize matrix to store parameter fits
for(r in 1:R){
# Get a sample from the original data
data.star <- data %>%
sample(size = n, replace = TRUE)
# Fit the model to the sample data
fit.star <- fit.gpd(c(0,0),
x = data.star,
thresh = thresh)
theta.star[r,1] <- fit.star[1] # Store sigma
theta.star[r,2] <- fit.star[2] # Store xi
theta.star[r,3] <- length(data.star[data.star > thresh]) / n # Store p
}
# Compute Biases
bias.sigma <- mean(theta.star[,1] - sigma.hat)
bias.xi <- mean(theta.star[,2] - xi.hat)
bias.p <- mean(theta.star[,3] - p.hat)
biases <- c(bias.sigma, bias.xi, bias.p) # Store biases in vector
# Compute standard errors
se.sigma <- sd(theta.star[,1])
se.xi <- sd(theta.star[,2])
se.p <- sd(theta.star[,3])
se <- c(se.sigma, se.xi, se.p)
return(list(mle = mle, bias = biases, # Return the named list
se = se, distn = theta.star))
}
answer2.2 <- npboot.gpd(data=burlington$Precipitation,
thresh=quantile(burlington$Precipitation,0.9))
## 2.3
thresh = quantile(burlington$Precipitation,0.9) # Define for future use
# Non-parametric bootstrap for returns and 95% CI
ret.level <- function(T, param = FALSE, thresh = quantile(burlington$Precipitation,0.9),
alpha = 0.05){
if(param == TRUE) {
gpd <- answer2.1 # Get parametric data
} else {
gpd <- answer2.2 # Get non-parametric data
}
# Obtain distribution using bootstrap distributions of GPD
distn <- gpd$distn
r.distn <- thresh + (distn[,1]/distn[,2]) * ((T*distn[,3])^distn[,2] - 1)
# Compute Percentile Confidence Intervals
CI_lower <- quantile(r.distn, alpha/2)
CI_upper <- quantile(r.distn, 1-alpha/2)
r.CI <- c(CI_lower, CI_upper)
return(list(distn = r.distn,
CI = r.CI))
}
# Obtain parametric bootstrap for different T's
r.p.100 <- ret.level(100, param = TRUE)
r.p.500 <- ret.level(500, param = TRUE)
r.p.1000 <- ret.level(1000, param = TRUE)
r.p.10000 <- ret.level(10000, param = TRUE)
# Obtain non-parametric bootstraps for different T's
r.np.100 <- ret.level(100)
r.np.500 <- ret.level(500)
r.np.1000 <- ret.level(1000)
r.np.10000 <- ret.level(10000)
## ;;
## -------------------------------------------
## Q3: -- add your code below ;;
## -------------------------------------------
## ;;
## 3.1
## Posteriors in PDF file
## 3.2
# Initialize data
pumps.data <- list(t = c(94.3, 15.7, 62.9, 126, 5.24, 31.4, 1.05, 1.05, 2.1, 10.5),
x = c(5, 1, 5, 14, 3, 19, 1, 1, 4, 22),
n = 10)
# Define acceptance probability for alpha
accept.alpha <- function(a0, a1, V=1, n=10, beta=1, theta=rep(1,10)) {
return( (gamma(a0)/gamma(a1))^n *
beta^(n*(a1-a0)) *
prod(theta) ^ (a1-a0) *
exp(-(a1-a0)) *
a1/a0 )
}
# Generate samples from the posterior using MCMC
# Ndraw is the number of draws, nburn is how many to discard among first entries,
# V is the variance of random walk on log a
mcmc.pumps <- function(data, ndraw=100, nburn=0, # Default initial parameters
theta0 = rep(0.5,10), beta0 = 1,
alpha0 = 0.5, V = 1) {
# Retrieve values from data
n <- data$n
t <- data$t
x <- data$x
# The parameters differ from each pump to pump, i think
d.theta <- matrix(nrow = ndraw, ncol=n) # Intialize a matrix to store draws for theta
d.others <- matrix(nrow = ndraw, ncol=2) # Initialize matrix for beta and alpha
iter <- -nburn # For burning entries
# Set initial parameters as the current parameters
theta <- theta0
alpha <- alpha0
beta <- beta0
alpha.accept.count <- 0
while(iter < ndraw) {
iter <- iter + 1 # Increase counter
# Update thetas
for(i in 1:n) { # Update done for each pump in turn
# Update using conditional distributions
theta[i] <- rgamma(1, shape = x[i] + alpha0, rate = beta0 + t[i])
}
# Update beta
beta <- rgamma(1, shape = n * alpha, rate = sum(theta) + 0.01)
# Generate proposal alpha
alpha.prop <- exp(log(alpha) + rnorm(1,0,V)) # Random walk on log alpha
alpha.accept <- min(1, accept.alpha(a0 = alpha, # Get acceptance criterion
a1 = alpha.prop,
V = V,
n = n,
beta = beta,
theta = theta))
if(runif(1) <= alpha.accept){ # If unif is below the acceptance criterion
alpha <- alpha.prop # accept alpha
if(iter > 0) {
alpha.accept.count <- alpha.accept.count + 1 # Count accepted alphas
}
}
# Record draws if burning is done
if(iter > 0) {
d.theta[iter,] <- theta
d.others[iter,1] <- beta
d.others[iter,2] <- alpha
}
}
# Return Values
return( list(theta = d.theta,
parameters = d.others[,1:2],
acceptance = alpha.accept.count / ndraw))
}
answer3.2 <- mcmc.pumps(pumps.data)
## 3.3
# Predictive distribution for failure for ith pump
predictive.pumps <- function(i, t, pumps.data = answer3.2, max.x = 30){
# Retrieve samples from posterior distribution
theta <- pumps.data$theta[,i]
x <- 1:max.x # Maximum number of failures to compute distribution for
pred.dist <- NULL
for(f in 1:length(x)){ # Prob of X failures, X=0,1,2,...,30
pred.dist[f] <- mean(dpois(f, lambda=theta*t)) # Predictive distribution
}
return(cbind("Failures" = x, # Return a table that lists the prob for x failures
"Probability" = pred.dist))
}
# Call the function for pump 1 and 94.3 time units for up to 25 failures
answer3.3 <- predictive.pumps(1, 94.3, max.x = 25)
plot(answer3.3) # Compare with expected probabilities based on data
## ;;
## -------------------------------------------
## Q4: -- add your code below;;
## -------------------------------------------
## ;;
## 4.1
accept.mu <- function(x, mu0, mu1){
terms <- ((10+(x-mu1)^2)/(10+(x-mu0)^2))^(-11/2)
first.part <- prod(terms)
second.part <- exp( -(1/2) * (mu1^2 - mu0^2))
return(first.part * second.part)
}
# MCMC for t-distribution using random walk
# V is the sd of the normal candidate generator
mcmc.t <- function(x, ndraw=1000, nburn=100, mu0=0, v=1){
mu <- mu0 # Set initial mu as current
iter <- -nburn # Initialize for data discarding
draws <- NULL # Initialize vector of draws
while(iter < ndraw){
iter <- iter + 1 # update counter
mu.prop <- rnorm(1, mu, v) # Normal Proposal
acc.mu <- min(1, accept.mu(x, mu, mu.prop)) # Compute acceptance probability
if(runif(1) <= acc.mu){ # If accepted, change the current mu
mu <- mu.prop
}
if(iter > 0){ # Once burning is done
draws[iter] <- mu
}
}
# Return the draws
return(draws)
}
# Generate t-distribution samples
x <- rt(12, df = 10)
# Generate samples from posterior distribution for mu
answer4.1 <- mcmc.t(x)
## 4.2
mcmc.gibbs <- function(x, mu0=0, z0=1, ndraw = 1000, nburn=100){
iter <- -nburn # Initialize for burning entries
n <- length(x)
# Initialize variables
mu <- mu0
z <- rep(z0, n)
d.mu <- NULL # Initialize an empty vetor for mu
d.z <- matrix(nrow = ndraw, ncol = n) # Initialize matrix for draws of z
while(iter < ndraw){
iter <- iter + 1 # Increment counter
# Update mu and z via conditional posteriors
mu <- rnorm(1, mean = sum(x*z)/sum(z+1), sd = sqrt(sum(z+1)))
for(i in 1:n) {
z[i] <- rgamma(1, shape = 11/2, rate = (1/2)*(x[i]-mu)^2 + 5)
}
if(iter > 0){ # Record draws if done burning
d.mu[iter] <- mu
d.z[iter,] <- z
}
}
# Return named list
return(list(mu = d.mu,
z = d.z))
}
# Generate data that comes from t-distribution with 10 df
x <- rt(12, df=10)
plot(density(x))
answer4.2 <- mcmc.gibbs(x)
plot(density(answer4.2$mu)) # Inspect the distribution for mu
## 4.3
# Predictive distribution of t
predictive.t <- function(mu = answer4.2$mu){
x <- seq(-3,5, len=100) # points at which distribution will be evaluated
n <- length(x)
m <- length(mu)
dist <- NULL
for(i in 1:n){ # Evaluate the integral at each point
dist[i] <- mean(dnorm(x[i],
mean = mu,
sd = sqrt(rgamma(m, shape = 11/2,
rate = (1/2)*(x[i]-mu)^2 + 5))))
}
return(cbind(x, dist))
}
answer4.3 <- predictive.t()
plot(density(answer4.3))
## 4.4
## Predictive Quantile
predictive.quantile <- function(mu = answer4.2$mu, alpha = 0.05){
preds <- predictive.t(mu)[,2] # Runs the prediction distribution and gets the values
# Compute CI bounds
CI_lower <- quantile(preds, alpha/2)
CI_upper <- quantile(preds, 1-alpha/2)
return(c(CI_lower, CI_upper))
}
predictive.quantile()
## ------------------------------------------------
## ;;
## -------------------------------------------
## DRAFT
## -------------------------------------------
## ;;
## foo <- rnorm(100)
##
##
##
|
f3962b54f96f55a331187ebdef3f034c483d0212
|
a94154fe2b706fd76955a3f4073809507495036c
|
/man/readLPIdata.KAIRA.Rd
|
39e15cdf2b41e132dd785b6c00ba126e5a43db62
|
[
"BSD-2-Clause"
] |
permissive
|
ilkkavir/LPI.KAIRA
|
f755c58ede14d8339ea30b7981e828c8447e6be4
|
2d7f1141bf2790272706b1a490b0116908df319e
|
refs/heads/master
| 2020-07-14T18:42:25.025605
| 2019-08-30T12:21:46
| 2019-08-30T12:21:46
| 205,376,454
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,666
|
rd
|
readLPIdata.KAIRA.Rd
|
\name{readLPIdata.KAIRA}
\title{readLPIdata.KAIRA}
\alias{readLPIdata.KAIRA}
\description{Read one integration period of voltage level
data. Transmitter samples are recorded with USRP at Tromso and
receiver samples with KLP at Kilpisjarvi.}
\usage{readLPIdata.KAIRA( LPIparam , intPeriod )}
\arguments{
\item{ LPIparam }{ An LPI parameter list from \link{LPI.KAIRA} }
\item{ intPeriod }{ Integration period number. Integration periods are
counted in steps of LPIparam$timeres.s, the period number 1 starting
at LPIparam$startTime.}
}
\value{
A list with the following contents
\item{'RX1'}{ First receiver samples }
\item{'RX2'}{ Second receiver samples. Will be identical with 'RX1'
in autocovariance function estimation}
\item{'TX1'}{ First transmitter samples}
\item{'TX2'}{ Second transmitter samples. Will usually be identical
with 'TX1', but may be different e.g. in orthogonal polarization
experiments.}
\item{success}{ TRUE if all requested data was successfully read,
FALSE otherwise.}
The elements "RX1", "RX2", "TX1", and "TX2" are lists
themselves with elements
\item{'cdata'}{ Complex data vector.}
\item{'idata'}{ Logical vector, TRUE for samples that should be used
in LPI.}
\item{'ndata'}{ Number of samples in the data vectors.}
}
\details{
\describe{
\item{'LPIparam contents'}{ Following components of the LPI parameter
list are used for selecting the correct signal samples
\describe{
\item{'startTime'}{'beginTime' converted into POSIX format,
i.e. second count from 1970-01-01 00:00:00.
}
\item{'dataStartTimes'}{A named vector with components 'RX1',
'RX2', 'TX1', and 'TX2'. Each element is samling time of the
first sample of corresponding data type. The times are in
seconds in POSIX format. }
\item{'dataSampleFreqs'}{ A named vector with components 'RX1',
'RX2', 'TX1', and 'TX2'. Each element is the sample rate of the
corresponding data type in Hz.}
\item{'timeRes.s'}{ Analysis time resolution (incoherent
integration period) in seconds. }
\item{'dataFileLengths'}{A named vector with components 'RX1',
'RX2', 'TX1', and 'TX2'. Each element is the number of complex
samples in one data file of the corresponding data type. }
\item{'fileNamePrefix'}{A named vector with components 'RX1',
'RX2', 'TX1', and 'TX2'. Each element contains the file name
prefix of the corresponding data type as a string. }
}
}
}
}
\seealso{LPI.gdf , LPI}
\author{Ilkka Virtanen (University of Oulu, Finland) \cr
\email{ilkka.i.virtanen@oulu.fi}}
|
0ff16232bd0169705515a67b38b0c269d275f424
|
d64f92b2d2a3522442b68cef07af758c6db5ea3d
|
/App-positivity/inst/main.R
|
f45a6ef11db87c215a95a63a78a3cf2bbe84a734
|
[] |
no_license
|
tranlm/positivity
|
f75c24b1f4c48bbfdbc87969e858ebb75e2644dd
|
c8346fee56c999d9fa1cb2a26b641f362453af17
|
refs/heads/master
| 2021-01-15T23:50:10.850559
| 2015-03-12T18:42:34
| 2015-03-12T18:42:34
| 32,095,685
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,158
|
r
|
main.R
|
###############################################################################
# Description: Master file that calls in all other functions
#
# Author: Linh Tran <tranlm@berkeley.edu>
# Title: Handling positivity violations in longitudinal data
# Date: 2015-01-08
###############################################################################
rm(list = ls())
#################
## DESCRIPTION ##
#################
# This study simulates positivity violations in a longitudinal setting and tries
# out four different approaches at handling it:
#
# 1) Choose regimens where not a problem (eg switch immediately or never)
# 2) Dynamic regimes, eg switch as soon as you come in after your assigned time
# 3) Joint (partially stochastic) intervention on both
# (i.e. leave visits random until time of assigned switch and then force someone to come in)
# 4) Don't adjust for the covariate causing the problem (ie dont adjust for whether a patient comes in)
# and could show that this might or might not cause a problem depending on whether coming to
# clinic has a direct effect on outcome not via switch.
#############
## OPTIONS ##
#############
ncores = 8
setwd("~/Dropbox/Studies/positivity")
options("mc.cores" = ncores, stringsAsFactors=FALSE, digits=4)
words = function(...) paste(substitute(list(...)))[-1]
#############
## LIBRARY ##
#############
load.packages = words(foreach, iterators, snow, doSNOW)
lapply(load.packages, require, character.only=T)
#########################
## SIMULATION SETTINGS ##
#########################
n = 1000
sim = 1000
time.pt = 10
ZaffectsY = TRUE
########################
## TRUE DISTRIBUTIONS ##
########################
#Time ordering: W, Y(t), L(t), Z(t), A(t), C(t) : W=(W1,W2) and L(t) = (L2(t),L1(t))
#n.b. Within L(t) there is no implied time-ordering...i.e. either of L2(t) or L1(t) can go first
rexpit = function(x) rbinom(n=length(x), size=1, prob=x)
QW1 = function(n) rnorm(n, mean=0, sd=1)
QW2 = function(n) rep(plogis(.2), n)
QL1.t = function(y, w1, prev_l1, prev_l2, prev_a) ifelse(y==1, prev_l1, 0.1 + 0.4*w1 + 0.6*prev_l1 - 0.7*prev_l2 - 0.45*prev_a - rnorm(length(w1), sd=0.5))
QL2.t = function(y, w1, w2, prev_l1, prev_l2, prev_a) ifelse(y==1, prev_l2, -0.55 + 0.5*w1 + 0.75*w2 + 0.1*prev_l1 + 0.3*prev_l2 - 0.75*prev_a - rnorm(length(w1), sd=0.5))
gZ.t = function(y, w1, w2, l1, l2, prev_a) ifelse(y==1, 0, plogis(2.8 - 0.5*w1 + 0.6*w2 + 0.7*l1 + 0.7*l2))
gA.t = function(y, w1, w2, l1, l2, prev_a, z) ifelse(y==1, prev_a, ifelse(z==0, prev_a, ifelse(prev_a==1, 1, plogis(-1.5 - 1.5*w1 + 0.75*w2 + 0.8*l1 + 0.8*l2))))
if(ZaffectsY) {
QY.t = function(prev_y, w1, w2, prev_l1, prev_l2, prev_a, prev_z) ifelse(prev_y==1, 1, plogis(-1.8 + 1.2*w1 - 2.4*w2 - 1.6*prev_l1 - 1*prev_l2 - 1.9*prev_a - 1.25*prev_z))
} else {
QY.t = function(prev_y, w1, w2, prev_l1, prev_l2, prev_a, prev_z) ifelse(prev_y==1, 1, plogis(-1.8 + 1.2*w1 - 2.4*w2 - 1.6*prev_l1 - 1*prev_l2 - 1.9*prev_a + 0*prev_z))
}
# nb. Distribution is set up such that:
# Y(0)=0 for everyone, ie. Everyone is alive at the beginning of follow-up
# if Y(t)=1, then they don't come in for a visit (ie. Z(t)=0))
# if Y(t)=1, then all remaining covariate last values get carried forward
# if Z(t)=0, then A(t-1) gets carried forward
# if A(t-1)=1 then A(t)=1
#############
## FOLDERS ##
#############
subdirectories = list.dirs()
if(!"./data" %in% subdirectories) system("mkdir ./data")
if(!"./inst" %in% subdirectories) system("mkdir ./inst")
if(!"./inst/results" %in% subdirectories) system("mkdir ./inst/results")
###########
## CODES ##
###########
source("./R/generateData.R")
source("./R/generatePsi.R")
source("./R/plotPsi.R")
###############################################################################
############################# TRUE PARAMETER VALUES ###########################
###############################################################################
skip = function() {
## STARTS PARALLEL CLUSTER ##
set.seed(1989)
superman <- makeCluster(ncores, type="SOCK")
registerDoSNOW(superman)
clusterExport(cl = superman, c("time.pt", "generate.data", "rexpit", "QW1", "QW2", "QL1.t", "QL2.t", "gZ.t", "gA.t", "QY.t"))
cat("Cluster summary\n"); getDoParRegistered(); getDoParName(); getDoParWorkers(); getDoParVersion()
## CALCULATES TRUE VALUES ##
truePsi = generatePsi()
save(truePsi, file=paste0("./data/truePsi_", ifelse(ZaffectsY,"ZaffectsY","noZaffectsY"), ".Rda"))
## STOPS CLUSTER ##
stopCluster(superman)
#######################
## PLOTS TRUE VALUES ##
#######################
pdf(paste0("./inst/results/truePsi_", ifelse(ZaffectsY,"ZaffectsY","noZaffectsY"), ".pdf"), height=6, width=6)
plotPsi(main=ifelse(ZaffectsY,"(a)","(b)"))
dev.off()
}
## LOADS PREVIOUSLY COMPUTED/SAVED VALUES ##
load(file=paste0("./data/truePsi_", ifelse(ZaffectsY,"ZaffectsY","noZaffectsY"), ".Rda"))
###############################################################################
################################## SIMULATION #################################
###############################################################################
set.seed(1)
|
2e0ed1a5e763d8e08b291d2bc6283cf1f8af814a
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/Rgb/examples/Annotation.Rd.R
|
3217ba588934ad6672a8f4caa5a2e47740a13947
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 372
|
r
|
Annotation.Rd.R
|
library(Rgb)
### Name: Annotation
### Title: Annotation track constructors
### Aliases: Annotation track.table.GTF track.exons.CCDS track.CNV.DGV
### track.genes.NCBI track.bands.UCSC
### ** Examples
# From the "How-to" vignette, section "Custom annotation tracks"
file <- system.file("extdata/Cosmic_ATM.gtf.gz", package="Rgb")
tt <- track.table.GTF(file)
|
fbe0bdab29444174b8c82bcdd5c3577288fd4b4e
|
f0aa4e9dd55a5511a76c0fdd9a92c2012daa455c
|
/man/fine_map.Rd
|
f3e2cae5c30e9b8341858ddf8675b07b068f6342
|
[] |
no_license
|
william-denault/mWaveQTL
|
068760edc2df3e5a97a71533f8826bb73e9eee3f
|
6871517d1d602f35f558f4aeb68866c40a6d1be5
|
refs/heads/master
| 2022-12-12T13:45:20.373190
| 2020-09-07T06:34:38
| 2020-09-07T06:34:38
| 293,328,869
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 897
|
rd
|
fine_map.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{fine_map}
\alias{fine_map}
\title{Defintion of the regions with Bayes factor over a threshold.}
\usage{
fine_map(res, lev_res, thresh, start, end, chr)
}
\arguments{
\item{res}{Output of mWaveQTL.}
\item{lev_res}{the maximum level of resolution of the previous analysis.}
\item{thresh}{numeric, Bayes factor threshold to defined the fine mapping. If missing set as 1.}
\item{start}{numeric, start in base pair of the analyzed regions .}
\item{chr}{numeric, end in base pair of the analyzed regions .}
}
\description{
internal function for fine mapping tool for output of the mWaveQTL function.
}
\details{
return a list of chr, start, end position, that correspond of the sub regions defined by the dyadic decomposition of wavelet that are associated with a Bayes factor over the defined threshold.
}
|
01ebf2188018257953785eccc41154cc907b0b7f
|
846559394848a21cf4859096455b2e2b31c8c77e
|
/R/NSSTDJD.R
|
1e2a5b3f696e01c12746c7f8b27f48ac92e6c84c
|
[] |
no_license
|
ivotebexreni/JADE
|
e8d2c2087b35a7236a607e04317904da96ab97d3
|
c9efee1d940dc49d30739f88ce6855caa4f00284
|
refs/heads/master
| 2023-03-18T20:39:48.617218
| 2020-03-25T10:30:02
| 2020-03-25T10:30:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,466
|
r
|
NSSTDJD.R
|
# Method NSS.JD
NSS.TD.JD <- function(X,...) UseMethod("NSS.TD.JD")
# main function for NSS.TD.JD
#
# input:
# x = data matrix
# K = number of intervals, default 12, obsolete when n.cuts is provided.
# Tau = lags used to compute the autocovariance matrices for each interval, default = 0:11
# n.cuts = Cut of for intervals, must be of the form c(1, ..., n) where n is the sample size. If NULL, then K is used
# eps = eps for the JD
# maxiter = maxiter for the JD
# output
#
# list of class "bss" with components
# W = unmixing matrix
# k = lag used
# n.cut = n.cut used
# K = number of intervals used
# S = sources as a time series object
NSS.TD.JD.default <- function(X, K=12, Tau=0:11, n.cuts=NULL, eps = 1e-06, maxiter = 100, ...)
{
n <- nrow(X)
MEAN <- colMeans(X)
COV <- cov(X)
EVD.COV <- eigen(COV, symmetric=TRUE)
COV.sqrt.inv <- EVD.COV$vectors %*% tcrossprod(diag(sqrt(1/EVD.COV$values)),EVD.COV$vectors)
X.C <- sweep(X,2,MEAN,"-")
Y <- tcrossprod(X.C, COV.sqrt.inv)
p <- ncol(X)
if (is.null(n.cuts)) n.cuts <- ceiling(seq(1,n,length=K+1)) else K <- length(n.cuts)-1
N.cuts <- n.cuts + c(rep(0,K),1)
L<- length(Tau)
R <- array(0, dim=c(p,p,L*K))
ii<-1
for (i in 1:K){
Y.i<-Y[N.cuts[i]:(N.cuts[i+1]-1),]
for (j in 1:L){
R[,,ii] <- M.x(Y.i, Tau=Tau[j])
ii<-ii+1
}
}
W <- crossprod(frjd(R, eps=eps, maxiter=maxiter)$V, COV.sqrt.inv)
S <- tcrossprod(X.C,W)
S <- ts(S, names=paste("Series",1:p))
RES <- list(W=W, k=Tau, n.cut=n.cuts, K=K, S=S)
class(RES) <- "bss"
RES
}
NSS.TD.JD.ts <- function(X, ...)
{
x <- as.matrix(X)
RES <- NSS.TD.JD.default(x,...)
S <- RES$S
attr(S, "tsp") <- attr(X, "tsp")
RES$S <- S
RES
}
##################################################################
# helper function M.x #
# Autocovariance matrix for a centered time series at lag Tau #
# (symmetrized) #
##################################################################
M.x <- function(X,Tau=0)
{
n<- nrow(X)
Xt <- X[1:(n-Tau),]
Xti <- X[(1+Tau):n,]
Ri <- crossprod(Xt,Xti)/nrow(Xt)
(Ri+t(Ri))/2
}
|
7279f162f7f36037aa86df3b17a25608669176f1
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/descomponer/examples/gdf.Rd.R
|
a34c047f5515f108cf5b241cbaf1904d40bbac37
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 190
|
r
|
gdf.Rd.R
|
library(descomponer)
### Name: gdf
### Title: Get Frequency Data
### Aliases: gdf
### Keywords: smooth
### ** Examples
n<-100;x<-seq(0,24*pi,length=n);y<-sin(x)+rnorm(n,sd=.3)
gdf(y)
|
ca928f3ab02733636a9818e120df7d7d247b79dc
|
345fe8e9b2fa22dbd8d37bacb245f9f14c77c5e4
|
/fisherX2.R
|
766598bec196373839dcdb051aca9ce5e434d645
|
[] |
no_license
|
jamesbrownlow/LoadNORCdata
|
3563d22a846f166a87f989954fa279d8033d3df2
|
23380985474f2483e135c36a3bf711f60615473d
|
refs/heads/master
| 2023-07-28T23:44:04.839453
| 2021-09-21T14:48:29
| 2021-09-21T14:48:29
| 408,231,818
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 573
|
r
|
fisherX2.R
|
fisher.two = function(x, y){
# x and y are factor variables
levelsY = levels(y)
n= length(levelsY)
for (i in 1:(n-1))
for (j in (i+1):n) {
print(c(levelsY[i],levelsY[j]))
tempDF=data.frame(x,y)
tempDFfiltered = filter(tempDF, y %in% c(levelsY[i],levelsY[j]))
droplevels(tempDFfiltered$y)
dropIndex = c(i,j)
fisherTable = as.matrix(table(tempDFfiltered$x, tempDFfiltered$y)[,dropIndex])
print(fisherTable)
fisherTest=fisher.test(fisherTable, conf.int = TRUE)
print(fisherTest)
}
}
|
d4d3e9636b66d220f2780041ae524333f9ad85e7
|
dca51772513b329d11702834f9e08c825f86596b
|
/man/bin_search_.Rd
|
db5104e15a074343a27470f3467fab5a2747c8a9
|
[] |
no_license
|
ebenmichael/ents
|
cc168a3a5d1378d9160a62055a1f16ea359e13e2
|
75fd372d766d91a92eb5fbcf9dff6802c9c89f18
|
refs/heads/master
| 2021-04-25T04:16:30.181641
| 2018-11-05T22:47:18
| 2018-11-05T22:47:18
| 115,158,450
| 3
| 1
| null | 2018-04-05T17:45:59
| 2017-12-22T23:56:41
|
R
|
UTF-8
|
R
| false
| true
| 492
|
rd
|
bin_search_.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/selection.R
\name{bin_search_}
\alias{bin_search_}
\title{Perform binary search for the smallest balance which is feasible}
\usage{
bin_search_(eps, feasfunc)
}
\arguments{
\item{eps}{Sorted list of balance tolerances to try}
\item{feasfunc}{Function which returns True if feasible}
}
\value{
smallest tolerance which is feasible
}
\description{
Perform binary search for the smallest balance which is feasible
}
|
d4b7a461931da04d2f90617d689253f8b77a7e3e
|
cde2e7c77f5427d3cdb106271c20b6c312c271ac
|
/plot4.R
|
8f025f59aa42bfe73e3204183d1e3b8de13c5228
|
[] |
no_license
|
MVB1/ExData_Plotting1
|
75143fe10dffdfa11cbbcaa0e463aab256a429d0
|
e7c38b394979397cfd3d2fb21acb48c282afe447
|
refs/heads/master
| 2021-01-18T01:28:07.703690
| 2015-01-11T16:01:59
| 2015-01-11T16:01:59
| 29,068,058
| 0
| 0
| null | 2015-01-10T19:22:42
| 2015-01-10T19:22:41
| null |
UTF-8
|
R
| false
| false
| 4,050
|
r
|
plot4.R
|
# Exploratory Data Analysis
# Week 1, Assignment 1
# Plot 4, MVB
## Start
message("This script will generate Plot 4")
DateStarted <- date()
DateStarted
getwd()
## Functions for download and decompression
Download <- function () {
message("Downloading zip file")
fileUrl <- "http://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(fileUrl, destfile="./Data.zip")
message("Download completed")
}
Unzip <- function () {
message("Decompressing file")
message("This may take a few minutes")
unzip(zipfile="./Data.zip")
message("Decompression completed")
}
## Check if file is present in working directory (download and decompression will be skipped if file is present)
if(!file.exists("./household_power_consumption.txt")){
message("File is not present in working directory")
Download()
dateDownloaded <- date()
dateDownloaded
Unzip()
}
## Read file
message("Reading file")
HPC1 <- read.table(file="./household_power_consumption.txt", header=TRUE, sep=";", na.strings="?")
## Inspect data
str(HPC1)
head(HPC1)
## Generate tidy data set with appropriate column names
names(HPC1) <- tolower(names(HPC1))
names(HPC1) <- gsub("_", "", names(HPC1))
names(HPC1) <- gsub("1", "kitchen", names(HPC1))
names(HPC1) <- gsub("2", "laundry", names(HPC1))
names(HPC1) <- gsub("3", "waterheaterairconditioner", names(HPC1))
names(HPC1)
## Convert date variable
HPC1$date <- as.Date(HPC1$date, format="%d/%m/%Y")
class(HPC1$date)
## Select subset
HPC2 <- subset(HPC1, date >= "2007-02-01" & date <= "2007-02-02")
str(HPC2)
head(HPC2)
## Create datetime variable ('chron' package is preferred, but not required)
if("chron" %in% rownames(installed.packages()) == FALSE) {
message("Package 'chron' is not installed")
datetime <- paste(HPC2$date, HPC2$time)
HPC2$datetime <- strptime(datetime, format="%Y-%m-%d %H:%M:%S")
HPC2$time <- as.character(HPC2$time)
str(HPC2)
head(HPC2)
} else {
message("Package 'chron' is installed")
library(chron)
datetime <- paste(HPC2$date, HPC2$time)
HPC2$datetime <- strptime(datetime, format="%Y-%m-%d %H:%M:%S")
HPC2$time <- times(format(HPC2$datetime, "%H:%M:%S"))
str(HPC2)
head(HPC2)
}
## Generate Plot 4 on screen (for visualization)
par(mfcol=c(2, 2))
plot(HPC2$datetime, HPC2$globalactivepower, type="l", xlab="", ylab="Global Active Power", cex=0.9)
plot(HPC2$datetime, HPC2$submeteringkitchen, type="n", xlab="", ylab="Energy sub metering", cex=0.9)
lines(HPC2$datetime, HPC2$submeteringkitchen, col="black")
lines(HPC2$datetime, HPC2$submeteringlaundry, col="red")
lines(HPC2$datetime, HPC2$submeteringwaterheaterairconditioner, col="blue")
legend("topright", bty="n", cex=0.9, legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=1, col=c("black", "red", "blue"))
plot(HPC2$datetime, HPC2$voltage, type="l", xlab="datetime", ylab="Voltage", cex=0.9)
plot(HPC2$datetime, HPC2$globalreactivepower, type="l", xlab="datetime", ylab="Global_reactive_power", cex=0.9)
## Create png file for Plot 4
png(filename="plot4.png", width=480, height=480, bg="transparent")
par(mfcol=c(2, 2))
plot(HPC2$datetime, HPC2$globalactivepower, type="l", xlab="", ylab="Global Active Power", cex=0.9)
plot(HPC2$datetime, HPC2$submeteringkitchen, type="n", xlab="", ylab="Energy sub metering", cex=0.9)
lines(HPC2$datetime, HPC2$submeteringkitchen, col="black")
lines(HPC2$datetime, HPC2$submeteringlaundry, col="red")
lines(HPC2$datetime, HPC2$submeteringwaterheaterairconditioner, col="blue")
legend("topright", bty="n", cex=0.9, legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=1, col=c("black", "red", "blue"))
plot(HPC2$datetime, HPC2$voltage, type="l", xlab="datetime", ylab="Voltage", cex=0.9)
plot(HPC2$datetime, HPC2$globalreactivepower, type="l", xlab="datetime", ylab="Global_reactive_power", cex=0.9)
dev.off()
## Finish
message("Plot 4 is generated")
DateCompleted <- date()
DateCompleted
## This script has been optimized for Windows 7 Professional and RStudio Version 0.98.1087
|
6d789111431659243d147704a15fdc72046d4955
|
80ee145d21975068bd722749697382db78575471
|
/man/distmap.Rd
|
dc7ed03265dd26feb19d9d7717ab993fc4badfa6
|
[] |
no_license
|
jukent/climod
|
9d5f28afd4b7b083792e6bf872c375c5f8d5d2b8
|
203d57875c38a57e8e3b8ad01ce9d692506d3e44
|
refs/heads/master
| 2021-11-23T20:55:40.727516
| 2021-11-16T19:24:23
| 2021-11-16T19:24:23
| 193,576,176
| 0
| 0
| null | 2021-11-16T22:15:20
| 2019-06-24T20:35:09
|
R
|
UTF-8
|
R
| false
| true
| 3,577
|
rd
|
distmap.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/distmap.R
\name{distmap}
\alias{distmap}
\title{Construct a mapping between the distributions of two sets of
values.}
\usage{
distmap(x, y, densfun = KernSmooth::bkde, pgrid = ppoints(1000),
truncate = FALSE, trim = NULL, na.rm = TRUE, ...)
}
\arguments{
\item{x}{A vector of values whose distribution is to be mapped.}
\item{y}{A vector of values whose distribution is the target of
the mapping.}
\item{densfun}{A kernel density estimation function, such as
\code{stats::density} or \code{KernSmooth::bkde}. This function
must return an object that can be passed as input to
\code{\link{pdf2cdf}}.}
\item{pgrid}{A vector of probability values used for the mapping
between CDFs. It need not be regular but must be strictly
increasing.}
\item{truncate}{Logical; if TRUE, truncates the kernel density
estimates of the PDFs at the minimum value of the input data.
This is useful for preventing the transfer function from
generating negative values in the case of a variable bounded at
zero like precipitation. Defaults to FALSE.}
\item{trim}{a function used to omit unwanted values when
constructing the mapping. This is useful for preventing
overfitting of the tails in heavy-tailed variables like
precipitation by trimming very extreme values using a function
like \code{\link{lof1d}} or \code{\link{chauvenet}}. The function
should take a vector of values as input and return the same vector
with the unwanted values removed. Defaults to NULL.}
\item{na.rm}{Logical; if TRUE (default), remove NA values before
constructing distribution mapping.}
\item{...}{Additional arguments to densfun.}
}
\value{
A list of class \code{distmap}, with the following elements:
x,y: The input x and y data.
pgrid: The vector of probabilities used to construct the mapping.
xpdf,ypdf: The estimated PDFs of x and y.
xq,yq: The quantiles of x and y corresponding to the probabilities
in pgrid.
transfer: A function that will transform x to have the same
distribution as y.
}
\description{
Distribution mapping adjusts the individual values of a dataset, x,
such that its statistical distribution matches that of a second
dataset,y. This is accomplished by converting data values to
probabilities using the CDF of the first distribution, and then
from probabilities back to data values using the CDF of the second
distribution.
}
\details{
The \code{distmap} function constructs this mapping and a transfer
function that implements it non-parametrically, by integrating
kernel density estimation of the PDFs using the trapezoid rule. It
uses monotone Hermite splines for the construction to guarantee
monotonicity of the transfer function.
}
\examples{
library(nor1mix)
set.seed(222)
x <- rnorMix(1e6, norMix(mu=c(-3,2),sigma=c(1,2),w=c(2,1)))
y <- rnorMix(1e6, norMix(mu=c(-2,2)))
dmap <- distmap(x,y)
z <- predict(dmap)
title = "PDFs; z = x mapped to match y"
plot(NA, type="n", xlim=c(-10,10), ylim=c(0,0.33), main=title)
lines(density(x), col="blue", lwd=2)
lines(density(y), col="black", lwd=3)
lines(density(z), col="red", lwd=2, lty=2)
legend("topright", c("x","y","z"), col=c("blue","black","red"), lty=c(1,1,2))
dev.new()
pp <- pnorm(seq(-2,2))
yr <- c(-5,5)
par(mfrow=c(3,1))
plot(y[1:100],type="b",ylim=yr,xlab="")
abline(h=quantile(y,pp), lty=c(4,3,2,3,4))
plot(x[1:100],type="b",col="red", ylim=yr, xlab="")
abline(h=quantile(x,pp), col="red", lty=c(4,3,2,3,4))
plot(z[1:100],type="b",col="blue", ylim=yr, xlab="")
abline(h=quantile(z,pp), col="blue", lty=c(4,3,2,3,4))
}
|
8eb48395d6a306d3acfb67df794cf171e6a3f155
|
aa8c7a36275e11ceb5294d678c83a05655b0fb41
|
/R/functions/fe_fe_icar_spaciotemporal_functions.R
|
74129d9651ee09c05d81bc6ac0800664c6351051
|
[] |
no_license
|
enoch26/Mmodels_SERRA_article
|
ebaec2234db69e3e2f29b6542d67acaf4e978e45
|
41f7e06468c5279399cb9ad516ec930be3f77cf1
|
refs/heads/master
| 2023-03-01T17:10:02.858492
| 2021-02-11T10:33:39
| 2021-02-11T10:33:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 20,012
|
r
|
fe_fe_icar_spaciotemporal_functions.R
|
##################################################################################################
## Spatial: FE (icar); Temporal: FE (rw1); Spatio-temporal: ad
## FE.FE.ad.icar
##################################################################################################
FE.FE.ad.icar <- function(){
for (j in 1:Ndiseases){
for (i in 1:Nareas){
for(l in 1:Nyears){
## Observed of the mean for each area, disease and time
O[i,j,l] ~ dpois(lambda[i,j,l])
## Modeling of the mean for each area, disease and time
log(lambda[i,j,l]) <- log(E[i,j,l]) + mu[j] + Theta[i,j] + Gam[l,j]
## Risk for each area, disease and time
SMR[i,j,l] <- exp(mu[j] + Theta[i,j] + Gam[l,j])
smr.prob[i,j,l]<- step(SMR[i,j,l]-1)
}
## Spatial effects
Espat[i,j] <- exp(Theta[i,j])
espat.prob[i,j]<- step(Espat[i,j]-1)
}
## Temporal effects
for(l in 1:Nyears){
Etemp[l,j] <- exp(Gam[l,j])
etemp.prob[l,j] <- step(Etemp[l,j]-1)
}
}
####################################
## Prior distribution for the mean risk for all areas
####################################
for (j in 1:Ndiseases){ mu[j] ~ dflat() }
####################################
## spatial
####################################
##################
## Theta = Phi * M; (IxJ) = (IxK)*(KxJ)
##################
for (i in 1:Nareas){
for (j in 1:Ndiseases){
Theta[i,j] <- inprod2(tPhi[,i], M[,j])
}
}
##################
## Phi (IxK, K=2J)
##################
for (j in 1:Ndiseases){
Spatial[j, 1:Nareas] ~ car.normal(adj[], weights[], num[], 1)
for (i in 1:Nareas){
tPhi[j,i] <- Spatial[j,i]
}
}
##################
## M-matrix (KxJ, K=2J)
##################
for (i in 1:(1 * Ndiseases)) {
for (j in 1:Ndiseases) {
M[i,j] ~ dflat()
}
}
##################
## Sigma=M'M and correlations
##################
# Sigma_b =t(M)%*%M
for(i in 1:Ndiseases){
for(j in 1:Ndiseases){
Sigma.s[i,j] <- inprod2(M[,i], M[,j])
Corre.s[i,j] <- Sigma.s[i,j]/(pow(Sigma.s[i,i],0.5)*pow(Sigma.s[j,j],0.5))
}
}
####################################
# temporal
####################################
##################
# Gamma = Phig * Mg; (txJ) = (txK)*(KxJ)
##################
for(l in 1:Nyears){
for(j in 1:Ndiseases){
Gam[l,j]<- inprod2(tPhiG[,l], Mg[,j])
}
}
##################
## PhiG (txK, K=2J)
##################
for (j in 1:(Ndiseases)){
Temporal[j, 1:Nyears] ~ car.normal(adjt[], weightst[], numt[],1)
for (l in 1:Nyears){
tPhiG[j,l] <-Temporal[j,l]
}
}
##################
## Mg-matrix (KxJ, K=2J)
##################
for (j in 1:Ndiseases){
for (i in 1:(Ndiseases)){
Mg[i,j] ~ dflat()
}
}
##################
## Sigma=M'M and correlations
##################
for(i in 1:Ndiseases){
for(j in 1:Ndiseases){
Sigma.t[i,j] <- inprod2(Mg[,i], Mg[,j])
Corre.t[i,j] <- Sigma.t[i,j]/(pow(Sigma.t[i,i],0.5)*pow(Sigma.t[j,j],0.5))
}
}
######################################
######################################
}
##################################################################################################
## Spatial: FE (icar); Temporal: FE (rw1); Spatio-temporal: Type I
## FE.FE.t1.icar
##################################################################################################
FE.FE.t1.icar <- function(){
for (j in 1:Ndiseases){
for (i in 1:Nareas){
for(l in 1:Nyears){
## Observed of the mean for each area, disease and time
O[i,j,l] ~ dpois(lambda[i,j,l])
## Modeling of the mean for each area, disease and time
log(lambda[i,j,l]) <- log(E[i,j,l]) + mu[j] + Theta[i,j] + Gam[l,j] + sdZet[j] * Zet[i,j,l]
## Risk for each area, disease and time
SMR[i,j,l] <- exp(mu[j] + Theta[i,j] + Gam[l,j] + sdZet[j] * Zet[i,j,l])
smr.prob[i,j,l]<- step(SMR[i,j,l]-1)
## Spatio-temporal effect
Eint[i,j,l] <- exp(sdZet[j] * Zet[i,j,l])
eint.prob[i,j,l]<- step(Eint[i,j,l]-1)
}
## Spatial effects
Espat[i,j] <- exp(Theta[i,j])
espat.prob[i,j]<- step(Espat[i,j]-1)
}
## Temporal effects
for(l in 1:Nyears){
Etemp[l,j] <- exp(Gam[l,j])
etemp.prob[l,j] <- step(Etemp[l,j]-1)
}
}
####################################
## Prior distribution for the mean risk for all areas
####################################
for (j in 1:Ndiseases){ mu[j] ~ dflat() }
####################################
## spatial
####################################
##################
## Theta = Phi * M; (IxJ) = (IxK)*(KxJ)
##################
for (i in 1:Nareas){
for (j in 1:Ndiseases){
Theta[i,j] <- inprod2(tPhi[,i], M[,j])
}
}
##################
## Phi (IxK, K=2J)
##################
for (j in 1:Ndiseases){
Spatial[j, 1:Nareas] ~ car.normal(adj[], weights[], num[], 1)
for (i in 1:Nareas){
tPhi[j,i] <- Spatial[j,i]
}
}
##################
## M-matrix (KxJ, K=2J)
##################
for (i in 1:(1 * Ndiseases)) {
for (j in 1:Ndiseases) {
M[i,j] ~ dflat()
}
}
##################
## Sigma=M'M and correlations
##################
# Sigma_b =t(M)%*%M
for(i in 1:Ndiseases){
for(j in 1:Ndiseases){
Sigma.s[i,j] <- inprod2(M[,i], M[,j])
Corre.s[i,j] <- Sigma.s[i,j]/(pow(Sigma.s[i,i],0.5)*pow(Sigma.s[j,j],0.5))
}
}
####################################
# temporal
####################################
##################
# Gamma = Phig * Mg; (txJ) = (txK)*(KxJ)
##################
for(l in 1:Nyears){
for(j in 1:Ndiseases){
Gam[l,j]<- inprod2(tPhiG[,l], Mg[,j])
}
}
##################
## PhiG (txK, K=2J)
##################
for (j in 1:(Ndiseases)){
Temporal[j, 1:Nyears] ~ car.normal(adjt[], weightst[], numt[],1)
for (l in 1:Nyears){
tPhiG[j,l] <-Temporal[j,l]
}
}
##################
## Mg-matrix (KxJ, K=2J)
##################
for (j in 1:Ndiseases){
for (i in 1:(Ndiseases)){
Mg[i,j] ~ dflat()
}
}
##################
## Sigma=M'M and correlations
##################
for(i in 1:Ndiseases){
for(j in 1:Ndiseases){
Sigma.t[i,j] <- inprod2(Mg[,i], Mg[,j])
Corre.t[i,j] <- Sigma.t[i,j]/(pow(Sigma.t[i,i],0.5)*pow(Sigma.t[j,j],0.5))
}
}
######################################
# spatio-temporal (M-model)
######################################
##################
# Zeta = Phiz * Mz #(IxT) = (IxK)*(KxT) K=T
##################
for(j in 1:Ndiseases){
for(i in 1:Nareas){
for(l in 1:Nyears){
Zet.aux[i,j,l] ~ dnorm(0,1)
Zet[i,j,l] <- Zet.aux[i,j,l]
}
}
}
##################
# Prior distribution for the standard deviations
##################
for(j in 1:Ndiseases){
sdZet[j] ~ dunif(0,100)
}
######################################
######################################
}
##################################################################################################
# Spatial: FE (icar); Temporal: FE (rw1); Spatio-temporal: Type II
# FE.FE.t2.icar
##################################################################################################
FE.FE.t2.icar <- function(){
for (j in 1:Ndiseases){
for (i in 1:Nareas){
for(l in 1:Nyears){
## Observed of the mean for each area, disease and time
O[i,j,l] ~ dpois(lambda[i,j,l])
## Modeling of the mean for each area, disease and time
log(lambda[i,j,l]) <- log(E[i,j,l]) + mu[j] + Theta[i,j] + Gam[l,j] + sdZet[j] * Zet[i,j,l]
## Risk for each area, disease and time
SMR[i,j,l] <- exp(mu[j] + Theta[i,j] + Gam[l,j] + sdZet[j] * Zet[i,j,l])
smr.prob[i,j,l]<- step(SMR[i,j,l]-1)
## Spatio-temporal effect
Eint[i,j,l] <- exp(sdZet[j] * Zet[i,j,l])
eint.prob[i,j,l]<- step(Eint[i,j,l]-1)
}
## Spatial effect
Espat[i,j] <- exp(Theta[i,j])
espat.prob[i,j]<- step(Espat[i,j]-1)
}
## Temporal effect
for(l in 1:Nyears){
Etemp[l,j] <- exp(Gam[l,j])
etemp.prob[l,j] <- step(Etemp[l,j]-1)
}
}
####################################
## Prior distribution for the mean risk for all municipalities
####################################
for (j in 1:Ndiseases){ mu[j] ~ dflat() }
####################################
## spatial
####################################
##################
## Theta = Phi * M; (IxJ) = (IxK)*(KxJ)
##################
for (i in 1:Nareas){
for (j in 1:Ndiseases){
Theta[i,j] <- inprod2(tPhi[,i], M[,j])
}
}
##################
## Phi (IxK, K=2J)
##################
for (j in 1:Ndiseases){
Spatial[j, 1:Nareas] ~ car.normal(adj[], weights[], num[], 1) # structured
for (i in 1:Nareas){
tPhi[j,i] <- Spatial[j,i]
}
}
##################
## M-matrix (KxJ, K=2J)
##################
for (i in 1:(1 * Ndiseases)) {
for (j in 1:Ndiseases) {
M[i,j] ~ dflat()
}
}
##################
## Sigma=M'M and correlations
##################
for(i in 1:Ndiseases){
for(j in 1:Ndiseases){
Sigma.s[i,j] <- inprod2(M[,i], M[,j])
Corre.s[i,j] <- Sigma.s[i,j]/(pow(Sigma.s[i,i],0.5)*pow(Sigma.s[j,j],0.5))
}
}
####################################
## temporal
####################################
##################
## Gamma = Phig * Mg; (txJ) = (txK)*(KxJ)
##################
for(l in 1:Nyears){
for(j in 1:Ndiseases){
Gam[l,j]<- inprod2(tPhiG[,l], Mg[,j])
}
}
##################
## PhiG (txK, K=2J)
##################
for (j in 1:(Ndiseases)){
Temporal[j, 1:Nyears] ~ car.normal(adjt[], weightst[], numt[],1) # structured
for (l in 1:Nyears){
tPhiG[j,l] <-Temporal[j,l]
}
}
##################
## Mg-matrix (KxJ, K=2J)
##################
for (j in 1:Ndiseases){
for (i in 1:(Ndiseases)){
Mg[i,j] ~ dflat()
}
}
##################
## Sigma=M'M and correlations
##################
for(i in 1:Ndiseases){
for(j in 1:Ndiseases){
Sigma.t[i,j] <- inprod2(Mg[,i], Mg[,j])
Corre.t[i,j] <- Sigma.t[i,j]/(pow(Sigma.t[i,i],0.5)*pow(Sigma.t[j,j],0.5))
}
}
######################################
## spatio-temporal (M-model)
######################################
##################
## Zet = Phi.z * M; (IxT) = (IxK)*(KxT)
##################
for(j in 1:Ndiseases){
for (i in 1:Nareas){
for (l in 1:Nyears){
Zet[i,j,l] <- Mz[i,j,l]
}
}
}
##################
## M-matrix (KxJ, K=2J)
##################
for(j in 1:Ndiseases){
for (i in 1:(1*Nareas)){
for (l in 1:Nyears){
Mz[i,j,l]<- Mzaux[i,j,l]
}
}
## Mzaux
for (i in 1:(Nareas)){
Temporal.z[i,j,1:Nyears] ~ car.normal(adj.zt[], weights.zt[], num.zt[],1) # structured
for (l in 1:Nyears){
Mzaux[i,j,l] <- Temporal.z[i,j,l]
}
}
}
##################
## Prior distribution for the standard deviations of the random effects
##################
for(j in 1:Ndiseases){
sdZet[j] ~ dunif(0,100)
}
######################################
######################################
}
##################################################################################################
## Spatial: FE (icar); Temporal: FE (rw1); Spatio-temporal: Type III
## FE.FE.t3.icar
##################################################################################################
FE.FE.t3.icar <- function(){
for (j in 1:Ndiseases){
for (i in 1:Nareas){
for(l in 1:Nyears){
## Observed of the mean for each area, disease and time
O[i,j,l] ~ dpois(lambda[i,j,l])
## Modeling of the mean for each area, disease and time
log(lambda[i,j,l]) <- log(E[i,j,l]) + mu[j] + Theta[i,j] + Gam[l,j] + sdZet[j] * Zet[i,j,l]
## Risk for each area, disease and time
SMR[i,j,l] <- exp(mu[j] + Theta[i,j] + Gam[l,j] + sdZet[j] * Zet[i,j,l])
smr.prob[i,j,l]<- step(SMR[i,j,l]-1)
## Spatio-temporal effect
Eint[i,j,l] <- exp(sdZet[j] * Zet[i,j,l])
eint.prob[i,j,l]<- step(Eint[i,j,l]-1)
}
## Spatial effect
Espat[i,j] <- exp(Theta[i,j])
espat.prob[i,j]<- step(Espat[i,j]-1)
}
## Temporal effect
for(l in 1:Nyears){
Etemp[l,j] <- exp(Gam[l,j])
etemp.prob[l,j] <- step(Etemp[l,j]-1)
}
}
####################################
## Prior distribution for the mean risk for all areas
####################################
for (j in 1:Ndiseases){ mu[j] ~ dflat() }
####################################
## spatial
####################################
##################
## Theta = Phi * M; (IxJ) = (IxK)*(KxJ)
##################
for (i in 1:Nareas){
for (j in 1:Ndiseases){
Theta[i,j] <- inprod2(tPhi[,i], M[,j])
}
}
##################
## Phi (IxK, K=2J)
##################
for (j in 1:Ndiseases){
Spatial[j, 1:Nareas] ~ car.normal(adj[], weights[], num[], 1) # structured
for (i in 1:Nareas){
tPhi[j,i] <- Spatial[j,i]
}
}
##################
## M-matrix (KxJ, K=2J)
##################
for (i in 1:(1 * Ndiseases)) {
for (j in 1:Ndiseases) {
M[i,j] ~ dflat()
}
}
##################
## Sigma=M'M and correlation
##################
for(i in 1:Ndiseases){
for(j in 1:Ndiseases){
Sigma.s[i,j] <- inprod2(M[,i], M[,j])
Corre.s[i,j] <- Sigma.s[i,j]/(pow(Sigma.s[i,i],0.5)*pow(Sigma.s[j,j],0.5))
}
}
####################################
## temporal
####################################
##################
## Gamma = Phig * Mg; (txJ) = (txK)*(KxJ)
##################
for(l in 1:Nyears){
for(j in 1:Ndiseases){
Gam[l,j]<- inprod2(tPhiG[,l], Mg[,j])
}
}
##################
## PhiG (txK, K=2J)
##################
for (j in 1:(Ndiseases)){
Temporal[j, 1:Nyears] ~ car.normal(adjt[], weightst[], numt[],1) # structured
for (l in 1:Nyears){
tPhiG[j,l] <-Temporal[j,l]
}
}
##################
## Mg-matrix (KxJ, K=2J)
##################
for (j in 1:Ndiseases){
for (i in 1:(Ndiseases)){
Mg[i,j] ~ dflat()
}
}
##################
## Sigma=M'M and correlation
##################
for(i in 1:Ndiseases){
for(j in 1:Ndiseases){
Sigma.t[i,j] <- inprod2(Mg[,i], Mg[,j])
Corre.t[i,j] <- Sigma.t[i,j]/(pow(Sigma.t[i,i],0.5)*pow(Sigma.t[j,j],0.5))
}
}
######################################
## spatio-temporal (M-model)
######################################
##################
## Zet = Phi.z * M; (IxT) = (IxK)*(KxT)
##################
for(j in 1:Ndiseases){
for (i in 1:Nareas){
for (l in 1:Nyears){
Zet[i,j,l] <- tPhi.z[l,j,i]
}
}
}
##################
# Phi.z (IxK, K=T)
##################
for(j in 1:Ndiseases){
for (l in 1:Nyears){
Spatial.z[l, j, 1:Nareas] ~ car.normal(adj.zs[], weights.zs[], num.zs[],1) # structured
for (i in 1:Nareas){
tPhi.z[l,j,i] <- Spatial.z[l,j,i]
}
}
}
##################
## Prior distribution for the standard deviations of the random effects
##################
for(j in 1:Ndiseases){
sdZet[j] ~ dunif(0,100)
}
######################################
######################################
}
##################################################################################################
## Spatial: FE (icar); Temporal: FE (rw1); Spatio-temporal: Type IV
## FE.FE.t4.icar
##################################################################################################
FE.FE.t4.icar <- function(){
for (j in 1:Ndiseases){
for (i in 1:Nareas){
for(l in 1:Nyears){
## Observed of the mean for each area, disease and time
O[i,j,l] ~ dpois(lambda[i,j,l])
## Modeling of the mean for each area, disease and time
log(lambda[i,j,l]) <- log(E[i,j,l]) + mu[j] + Theta[i,j] + Gam[l,j] + sdZet[j] * Zet[i,j,l]
## Risk for each area, disease and time
SMR[i,j,l] <- exp(mu[j] + Theta[i,j] + Gam[l,j] + sdZet[j] * Zet[i,j,l])
smr.prob[i,j,l]<- step(SMR[i,j,l]-1)
## Spatio-temporal effect
Eint[i,j,l] <- exp(sdZet[j] * Zet[i,j,l])
eint.prob[i,j,l]<- step(Eint[i,j,l]-1)
}
## Spatial effect
Espat[i,j] <- exp(Theta[i,j])
espat.prob[i,j]<- step(Espat[i,j]-1)
}
## Temporal effect
for(l in 1:Nyears){
Etemp[l,j] <- exp(Gam[l,j])
etemp.prob[l,j] <- step(Etemp[l,j]-1)
}
}
####################################
## Prior distribution for the mean risk for all areas
####################################
for (j in 1:Ndiseases){ mu[j] ~ dflat() }
####################################
## spatial
####################################
##################
## Theta = Phi * M; (IxJ) = (IxK)*(KxJ)
##################
for (i in 1:Nareas){
for (j in 1:Ndiseases){
Theta[i,j] <- inprod2(tPhi[,i], M[,j])
}
}
##################
## Phi (IxK, K=2J)
##################
for (j in 1:Ndiseases){
Spatial[j, 1:Nareas] ~ car.normal(adj[], weights[], num[], 1) # structured
for (i in 1:Nareas){
tPhi[j,i] <- Spatial[j,i]
}
}
##################
## M-matrix (KxJ, K=2J)
##################
for (i in 1:(1 * Ndiseases)) {
for (j in 1:Ndiseases) {
M[i,j] ~ dflat()
}
}
##################
## Sigma=M'M and correlation
##################
for(i in 1:Ndiseases){
for(j in 1:Ndiseases){
Sigma.s[i,j] <- inprod2(M[,i], M[,j])
Corre.s[i,j] <- Sigma.s[i,j]/(pow(Sigma.s[i,i],0.5)*pow(Sigma.s[j,j],0.5))
}
}
####################################
## temporal
####################################
##################
## Gamma = Phig * Mg; (txJ) = (txK)*(KxJ)
##################
for(l in 1:Nyears){
for(j in 1:Ndiseases){
Gam[l,j]<- inprod2(tPhiG[,l], Mg[,j])
}
}
##################
## PhiG (txK, K=2J)
##################
for (j in 1:(Ndiseases)){
Temporal[j, 1:Nyears] ~ car.normal(adjt[], weightst[], numt[],1)
for (l in 1:Nyears){
tPhiG[j,l] <-Temporal[j,l]
}
}
##################
## Mg-matrix (KxJ, K=2J)
##################
for (j in 1:Ndiseases){
for (i in 1:(Ndiseases)){
Mg[i,j] ~ dflat()
}
}
##################
## Sigma=M'M and correlation
##################
for(i in 1:Ndiseases){
for(j in 1:Ndiseases){
Sigma.t[i,j] <- inprod2(Mg[,i], Mg[,j])
Corre.t[i,j] <- Sigma.t[i,j]/(pow(Sigma.t[i,i],0.5)*pow(Sigma.t[j,j],0.5))
}
}
######################################
## spatio-temporal (M-model)
######################################
##################
## Zet = Phi.z * M; (IxT) = (IxK)*(KxT)
##################
for(j in 1:Ndiseases){
for (i in 1:Nareas){
for (l in 1:Nyears){
Zet[i,j,l] <- inprod2(tPhi.z[,j,i], Mz[,l])
}
}
}
##################
# Phi.z (IxK, K=T)
##################
for(j in 1:Ndiseases){
for (l in 1:Nyears){
Spatial.z[l,j, 1:Nareas] ~ car.normal(adj.zs[], weights.zs[], num.zs[],1) # structured
for (i in 1:Nareas){
tPhi.z[l,j,i] <- Spatial.z[l,j,i]
}
}
}
##################
## M-matrix (KxJ, K=2J)
##################
for (i in 1:(1*Nyears)){
for (l in 1:Nyears){
Mz[i,l]<- Mzz[i,l]
}
}
##################
## Prior distribution for the standard deviations of the random effects
##################
for(j in 1:Ndiseases){
sdZet[j] ~ dunif(0,100)
}
######################################
######################################
}
##################################################################################################
##################################################################################################
|
446c3ca054b3d67d8159ea2cefda2584ba23e912
|
1e00b53399a80d931239d90c1ac5e9d9d549b1ee
|
/tambomap_app.R
|
606d748dc6aad11f424e822198957f4fca4b5ee1
|
[] |
no_license
|
luiscartor/tambomaps-shinyapp
|
5094fdbf8a7be2155bab34b6dc1fe0689c4388ed
|
c7bb0af11831bb5250c52c370d74f096b5878f75
|
refs/heads/master
| 2023-07-28T00:31:47.559938
| 2021-08-27T08:46:16
| 2021-08-27T08:46:16
| 352,005,547
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 27,876
|
r
|
tambomap_app.R
|
# tambomap_app.R
# by Luis Carrasco
# on March 2021
# tambomap_app.R includes the Shiny IU and SERVER to run the tambomaps shiny application
##############################################################################################################
# Load packages
if(!require(shiny)) install.packages("shiny", repos = "http://cran.us.r-project.org")
if(!require(leaflet)) install.packages("leaflet", repos = "http://cran.us.r-project.org")
if(!require(raster)) install.packages("raster", repos = "http://cran.us.r-project.org")
if(!require(ggplot2)) install.packages("ggplot2", repos = "http://cran.us.r-project.org")
if(!require(RColorBrewer)) install.packages("EColorBrewer", repos = "http://cran.us.r-project.org")
if(!require(shinyWidgets)) install.packages("shinyWidgets", repos = "http://cran.us.r-project.org")
if(!require(shinydashboard)) install.packages("shinydashboard", repos = "http://cran.us.r-project.org")
if(!require(shinythemes)) install.packages("shinythemes", repos = "http://cran.us.r-project.org")
if(!require(plotly)) install.packages("plotly", repos = "http://cran.us.r-project.org")
if(!require(dplyr)) install.packages("dplyr", repos = "http://cran.us.r-project.org")
if(!require(rgdal)) install.packages("rgdal", repos = "http://cran.us.r-project.org")
if(!require(leafem)) install.packages("leafem", repos = "http://cran.us.r-project.org")
if(!require(mapview)) install.packages("mapview", repos = "http://cran.us.r-project.org")
if(!require(leafpop)) install.packages("leafpop", repos = "http://cran.us.r-project.org")
if(!require(lattice)) install.packages("lattice", repos = "http://cran.us.r-project.org")
if(!require(stringr)) install.packages("stringr", repos = "http://cran.us.r-project.org")
##############################################################################################################
# 1. DATA INPUTS
options(max.print=100000)
# INPUT Shapefiles
# Shapefile of the GADM data for Japanese prefectures (does not include Okinawa)
prefsshapefile <- readOGR(dsn='inputdata', layer='gadm36_JPN_1_noOkinawa_simp')
# Shapefile of the GADM data for Japanese sub-prefectures (does not include Okinawa)
subprefsshapefile <- readOGR(dsn='inputdata', layer='gadm36_JPN_2_noOkinawa_simp')
# INPUT Data tables
# Data on aggregated rice area at the prefecture and subprefecture levels
prefsdata <- read.csv(file='inputdata/riceareabyprefecture.csv', header =TRUE)
subprefsdata <- read.csv(file='inputdata/riceareabysubprefecture.csv', header =TRUE)
# 2. DATA PRE-PROCESSING
# Merge shapefiles with area change table: (function merge fails to keep polygon order to I use match function here)
prefsshapefile@data = data.frame(prefsshapefile@data, prefsdata[match(prefsshapefile@data$NAME_1, prefsdata$NAME_1),])
subprefsshapefile@data = data.frame(subprefsshapefile@data, subprefsdata[match(subprefsshapefile@data$GID_2, subprefsdata$GID_2),])
# There are NA values for subN level percentage change (0 initial rice area), so:
subprefsshapefile@data$percchange <- as.numeric(subprefsshapefile@data$percchange)
# Area columns names
areacolumns <- c("area198589","area199094","area199599","area200004",
"area200509","area201014","area201519")
# Value ranges to use in color scales
prefsarearange <- c(min(prefsshapefile@data[,areacolumns],na.rm = T),max(prefsshapefile@data[,areacolumns],na.rm = T))
subprefsarearange <- c(min(subprefsshapefile@data[,areacolumns],na.rm = T),max(subprefsshapefile@data[,areacolumns],na.rm = T))
# Period names
periods <- c("1985-89","1990-94","1995-99","2000-04","2005-09","2010-14","2015-19","Total Change")
##############################################################################################################
# 3. LAYERS CREATION
# Colour pallets
# Create Diverging palette for percentage change in prefectures
# vector of colors for values smaller than 0 (80 colors)
rc1 <- colorRampPalette(colors = c("red", "white"), space = "Lab")(60)
## vector of colors for values larger than 0 (20 colors)
rc2 <- colorRampPalette(colors = c("white", "blue"), space = "Lab")(10)
## Combine the two color palettes
prefdivcols <- c(rc1, rc2)
# Create Diverging palette for percentage change in subprefectures
rc3 <- colorRampPalette(colors = c("red", "white"), space = "Lab")(100)
## vector of colors for values larger than 0 (20 colors)
rc4 <- colorRampPalette(colors = c("white", "blue"), space = "Lab")(250)
## Combine the two color palettes
subprefdivcols <- c(rc3, rc4)
# CREATE BASEMAP
basemap <- leaflet() %>% setView(lng = 138, lat = 38, zoom = 6) %>%
# Adds order to layers (first background, then rasters, shapefiles, and lables)
addMapPane("background_map", zIndex = 410) %>% # Level 1: bottom
addMapPane("rastermaps", zIndex = 420) %>%
addMapPane("prefslayer", zIndex = 430) %>%
addMapPane("subprefslayer", zIndex = 440) %>%
addMapPane("cartolabels", zIndex = 450) %>%
# Adds carto DB maps (labels separated so that the are not overlapped by rasters/shapefiles)
addProviderTiles("CartoDB.PositronNoLabels") %>%
#addProviderTiles("CartoDB.PositronOnlyLabels",
# options = leafletOptions(pane = "cartolabels"),
# group = "cartolabels") %>%
# Adds Ersri World Imagery map
addProviderTiles(provider = "Esri.WorldImagery", group = "Esri World Imagery", #) %>%
options = pathOptions(pane = "background_map")) %>%
# Adds coords
addMouseCoordinates() %>%
# Adds background maps layer control
addLayersControl(
baseGroups = c("Carto", "Esri World Imagery"),
options = layersControlOptions(collapsed = FALSE)) %>%
# These widgets add title to LayersControl and put the base layer to the background
htmlwidgets::onRender("
function() {
$('.leaflet-control-layers-list').prepend('<label style=\"text-align:center\">Background Map</label>');
}
function(el, x) {
this.on('baselayerchange', function(e) {
e.layer.bringToBack();
})
}
")
##############################################################################################################
### 4. SHINY UI
ui <- bootstrapPage(
tags$head(includeHTML("gtag.html")),
navbarPage(theme = shinytheme("flatly"), collapsible = TRUE,
HTML('<a style="text-decoration:none;cursor:default;color:#FFFFFF;" class="active" href="#">TAMBO Project</a>'), id="nav",
windowTitle = "TAMBO Project",
# FIRST PANNEL: MAPS
tabPanel("Rice Mapping",
div(class="outer",
tags$head(includeCSS("styles.css")),
leafletOutput("map", width="100%", height="100%"),
absolutePanel(id = "controls", class = "panel panel-default",
top = 75, left = 55, width = 350, fixed=TRUE,
draggable = TRUE, height = "auto",
sliderTextInput("periods","Select Period or Total Change" ,
choices = periods,
selected = periods[1],
grid = TRUE), #values which will be selected by default
selectInput("mapselection", "Select Map",
choices = list("Background only","Rice map","Rice paddy area (Prefecture-level)","Rice paddy area (County-level)"),
selected = "Rice map"),
# selectInput("colors", "Color Scheme for Area Maps",
# rownames(subset(brewer.pal.info, category %in% c("seq", "div")))),
checkboxInput("legend", "Show legend", TRUE),
tags$br(),
"Total Change represents lost rice fields, estimated from the difference
between the period 1985-89 and 2015-19."
)
)
),
# SECOND PANEL: DATA AND PLOTS
tabPanel("Data and Plots",
sidebarPanel(
span(tags$h4("Visualize and download data"), style="color:#045a8d"),
pickerInput("selectpref", "Select Prefecture",
choices = c(prefsdata$NAME_1),
selected = "All Japan"),
pickerInput("selectcounty", "Select County" ,choices = NULL),
),
mainPanel(
tabsetPanel(
tabPanel("Plots",
br(), plotOutput("plot")),
tabPanel("Tables", style = "overflow-y:scroll; max-height: 600px",
br(), verbatimTextOutput("table"),
downloadButton("downloadCsv", "Download as CSV")
)
)
)
),
# THIRD PANEL: ABOUT
tabPanel("About",
tags$div(
tags$h4("Tambo Project"),
"The Tambo Project aims to map rice fields in Japan since the 80's.
These data can be used for ecological or agricultural research,
conservation, management and planning, etc. We suggest users to check the associated documentation and to understand the
uncertainties and limitations of this dataset.",
tags$br(),tags$br(),tags$h4("Raster layers"),
"The rice raster layers were created by classfying landsat images, combining image temporal aggregation and phenology
of rice fields in Google Earth Engine. Each layer represents an averaged representation of the distribution of rice fields over a period of 5 years.
Methods for creating the rice layers are fully described here:", tags$br(),
"Carrasco et al. 2022. Historical mapping of rice fields in Japan using phenology andtemporally aggregated Landsat images in Google Earth Engine. Under review.", tags$br(),
"Original rice layers have a spatial resolution of 30m. In this app, rice layers are rendered using leaflet tiles and
the resolution depends on the zoom level. Original rasters can be downloaded here:",tags$br(),
"Extra post-processing was conducted for the presented rices layers: water bodies masking, and masking out rice pixels for Hokkaido cities were rice was never recorded in the last decades.",
tags$br(), tags$br(),tags$h4("Rice paddy area aggregations"),
"Polygon layers represent aggregated rice area based on the raster layers, at the prefecture and sub-prefectural levels.",
tags$br(),tags$br(),tags$h4("Code"),
"Code and input data used to generate this Shiny application are available on ",tags$a(href="https://github.com/luiscartor/rice-mapping-app/", "Github."),
tags$br(),tags$br(),tags$h4("Contact"),
"Luis Carrasco Tornero",tags$br(),
"Laboratory of Biodiversity Sciences",tags$br(),
"Graduate School of Agricultural and Life Sciences",tags$br(),
"The University of Tokyo",tags$br(),
"luiscartor@gmail.com"
)
)
)
)
##############################################################################################################
### 5. SHINY SERVER
server <- function(input, output, session) {
# Reactive expression to select which layer to render
renderlayer <- reactive({
input$mapselection
})
# This reactive expression represents the path to the rice map for certain period
# The folder with tiles must be inside www folder in shiny project (only this solution works)
# If working in web server (shinyapps), tiles are stored in personal website (luiscartor.github.io)
ricemappath <- reactive({
# Reads tiles locally (impossible to upload to shinyapps.io)
#paste("tambo",input$periods,"tiles/{z}/{x}/{y}.png",sep="")
# Reads tiles from my github website
paste("https://luiscartor.github.io/tiles/tanboproject/tambo",input$periods,"tiles/{z}/{x}/{y}.png",sep="")
})
# Reactive expression to select period
areayear <- reactive({
if(input$periods == "Total Change"){
"percchange"
} else
paste("area",gsub("-", "", input$periods),sep="")
})
# Render basemap
output$map <- renderLeaflet({
basemap
})
# Background only option
observe({
# Render when "background only"
if(renderlayer() == "Background only"){
leafletProxy("map") %>%
# Delete all layers
clearGroup('rastermaps') %>%
clearGroup('prefslayer') %>%
clearGroup('subprefslayer')
}
})
# Rice maps
observe({
# Render when "background only" option is not selected
if(renderlayer() == "Rice map"){
leafletProxy("map") %>%
# Adds rice maps
# Only render rice maps if "background only" not selected
clearGroup('rastermaps') %>%
clearGroup('prefslayer') %>%
clearGroup('subprefslayer') %>%
addTiles(urlTemplate = ricemappath(),
option = c(tileOptions(tms = T, minZoom = 5, maxZoom = 12),
# This option fixes these tiles on top always
pathOptions(pane = "rastermaps")),
group = "rastermaps")
}
})
# Prefectures area maps
observe({
if(renderlayer() == "Rice paddy area (Prefecture-level)"){
# Polygon popup information (Prefecture and rice area)
if(input$periods == "Total Change"){
# Pallets for area change
#pal <- colorBin("viridis", prefsshapefile@data$area198589, bins=round(seq(0,max(subprefsarearange),length.out=10),1), na.color = "#bdbdbd")
pal <- colorNumeric(palette = prefdivcols, domain = c(-60,10), na.color = "transparent")
# Popup for change
popup <- paste0("<strong>Prefecture: </strong>",
prefsshapefile@data$NAME_1,
"<br><strong>Rice area change: </strong>",
round(prefsshapefile@data$areachange,2), " km<sup>2</sup>",
"<br><strong>Rice area percentage change: </strong>",
round(prefsshapefile@data$percchange,2), " %")
}
else{
# Palettes for area polygon maps
#pal <- colorNumeric( palette="YlOrRd",domain = prefsshapefile@data[,areacolumns], na.color="transparent")
pal <- colorBin("viridis", prefsarearange, bins=8, na.color = "#bdbdbd",pretty = FALSE)
# Pop-up for area
popup <- paste0("<strong>Prefecture: </strong>",
prefsshapefile@data$NAME_1,
"<br><strong>Rice area: </strong>",
prefsshapefile@data[,areayear()], " km<sup>2</sup>")
}
leafletProxy("map", data = prefsshapefile) %>%
# Adds prefectural rice-area maps
clearGroup('rastermaps') %>%
clearGroup('prefslayer') %>%
clearGroup('subprefslayer') %>%
# For practical reasons, we need to re-render the rice map when we select area layers
addTiles(urlTemplate = ricemappath(),
option = c(tileOptions(tms = T, minZoom = 6, maxZoom = 12),
# This option fixes these tiles on top always
pathOptions(pane = "rastermaps")),
group = "rastermaps") %>%
addPolygons(data = prefsshapefile, fillColor = ~pal(prefsshapefile@data[,areayear()]), fillOpacity = 0.7,
color = "white", weight = 2, group = "prefslayer",
option = pathOptions(pane = "prefslayer"),
layerId = ~NAME_1, popup = popup)
}
})
# SUB-Prefectures area maps
observe({
if(renderlayer() == "Rice paddy area (County-level)"){
# Polygon popup information (Sub prefecture and rice area)
if(input$periods == "Total Change"){
# Pallets for area change
#pal <- colorBin("YlOrRd", subprefsshapefile@data[,"areachange"], bins=10, na.color = "#bdbdbd")
pal <- colorNumeric(palette = subprefdivcols, domain = c(-100,250), na.color = "transparent")
popup <- paste0("<strong>Prefecture: </strong>",
subprefsshapefile@data$NAME_1,
"<br><strong>County: </strong>",
subprefsshapefile@data$NAME_2,
"<br><strong>Rice area change: </strong>",
round(subprefsshapefile@data$areachange,2), " km<sup>2</sup>",
"<br><strong>Rice area percentage change: </strong>",
#if(is.numeric(subprefsshapefile@data$percchange)){
round(subprefsshapefile@data$percchange,2), " %")
#} else
# subprefsshapefile@data$percchange, " %")
}
else{
# Pallets for area polygon maps
#pal <- colorBin("viridis", subprefsarearange, bins=10, na.color = "#bdbdbd")
pal <- colorBin("viridis", subprefsarearange, bins = round(seq(0, sqrt(max(subprefsarearange)), length.out = 10)^2, 1), na.color = "#bdbdbd",pretty = FALSE)
popup <- paste0("<strong>Prefecture: </strong>",
subprefsshapefile@data$NAME_1,
"<br><strong>County: </strong>",
subprefsshapefile@data$NAME_2,
"<br><strong>Rice area: </strong>",
subprefsshapefile@data[,areayear()], " km<sup>2</sup>")
}
leafletProxy("map", data = subprefsshapefile) %>%
# Adds subprefectural rice-area maps
clearGroup('rastermaps') %>%
clearGroup('subprefslayer') %>%
clearGroup('prefslayer') %>%
# For practical reasons, we need to re-render the rice map when we select area layers
addTiles(urlTemplate = ricemappath(),
option = c(tileOptions(tms = T, minZoom = 6, maxZoom = 12),
# This option fixes these tiles on top always
pathOptions(pane = "rastermaps")),
group = "rastermaps") %>%
addPolygons(data = subprefsshapefile, fillColor = ~pal(subprefsshapefile@data[,areayear()]), fillOpacity = 0.7,
color = "white", weight = 2, group = "subprefslayer", # adding layerId messes up the map
option = pathOptions(pane = "subprefslayer"), popup = popup)
}
})
# Use a separate observer to recreate the legend as needed.
observe({
proxy <- leafletProxy("map", data = prefsshapefile)
# Remove any existing legend, and only if the legend is
# enabled, create a new one.
proxy %>% clearControls()
if (input$legend & renderlayer() == "Rice paddy area (Prefecture-level)") {
if(input$periods == "Total Change"){
pal <- colorNumeric(palette = prefdivcols, domain = c(-60,10), na.color = "transparent")
proxy %>% addLegend(position = "bottomright", title = "Rice paddy </br>area change (%)",
pal = pal, values = ~c(-60,10))
} else{
#pal <- colorNumeric( palette="viridis", domain=prefsshapefile@data[,areacolumns], na.color="transparent")
pal <- colorBin("viridis", prefsarearange, bins=round(seq(0,max(subprefsarearange),length.out=10),1), na.color = "#bdbdbd",pretty = FALSE)
proxy %>% addLegend(position = "bottomright", title = "Rice paddy </br>area (km<sup>2</sup>)",
pal = pal, values = ~prefsarearange)
}
}
# Legend for county-level area
if (input$legend & renderlayer() == "Rice paddy area (County-level)") {
if(input$periods == "Total Change"){
pal <- colorNumeric(palette = subprefdivcols, domain = c(-100,250), na.color = "transparent")
proxy %>% addLegend(position = "bottomright", title = "Rice paddy </br>area change (%)",
pal = pal, values = ~c(-100,250))
} else{
#pal <- colorNumeric( palette="viridis", domain=subprefsshapefile@data[,areacolumns], na.color="transparent")
pal <- colorBin("viridis", subprefsarearange, bins = round(seq(0, sqrt(max(subprefsarearange)), length.out = 10)^2, 1), na.color = "#bdbdbd",pretty = FALSE)
proxy %>% addLegend(position = "bottomright", title = "Rice paddy </br>area (km<sup>2</sup>)",
pal = pal, values = ~subprefsarearange)
}
}
})
# This observeEvent is needed in order to create a conditional seletInput (subprefecture level) for plot and data
observeEvent(input$selectpref,{
updatePickerInput(session,'selectcounty',
choices=as.character(c("No selection","All counties", subprefsshapefile@data[subprefsshapefile@data$NAME_1==input$selectpref,"NAME_2"])))
})
# PLOT ANT TABLE OUTPUTS
# Reactive expression to select which prefecture
prefselection <- reactive({
input$selectpref
})
subprefselection <- reactive({
if (identical(input$selectcounty, "NULL"))
NULL
else
input$selectcounty
})
# PLOT OUTPUT
output$plot <- renderPlot({
# Prefectural plots
if(is.null(subprefselection())){
df <- data.frame(period = periods[1:7],area = as.numeric(prefsdata[prefsdata$NAME_1==prefselection(),
c("area198589","area199094","area199599","area200004","area200509","area201014","area201519")]))
areachange <- as.numeric(prefsdata[prefsdata$NAME_1==prefselection(),"areachange"])
percchange <- as.numeric(prefsdata[prefsdata$NAME_1==prefselection(),"percchange"])
ggplot(data = df,aes(x= period, y = area)) +
scale_color_manual("blue") +
geom_line(aes(group = 1),color='lightblue')+ geom_point(color='lightblue')+ ylab("Area "~(km^2))+ xlab("") +
ggtitle(paste0("Prefecture: ",prefselection(),"\nTotal rice field area change: ", round(areachange,2)," km²",
"\nChange percentage: ",round(percchange,2),"%")) +
theme(axis.text=element_text(size=12),axis.title=element_text(size=14,face="bold"),plot.title = element_text(size=16))
} else if(subprefselection()=="All counties" | subprefselection()=="No selection"){
df <- data.frame(period = periods[1:7],area = as.numeric(prefsdata[prefsdata$NAME_1==prefselection(),
c("area198589","area199094","area199599","area200004","area200509","area201014","area201519")]))
areachange <- as.numeric(prefsdata[prefsdata$NAME_1==prefselection(),"areachange"])
percchange <- as.numeric(prefsdata[prefsdata$NAME_1==prefselection(),"percchange"])
ggplot(data = df,aes(x= period, y = area)) +
scale_color_manual("blue") +
geom_line(aes(group = 1),color='lightblue')+ geom_point(color='lightblue')+ ylab("Area "~(km^2))+ xlab("") +
ggtitle(paste0("Prefecture: ",prefselection(),"\nTotal rice field area change: ", round(areachange,2)," km²",
"\nChange percentage: ",round(percchange,2),"%")) +
theme(axis.text=element_text(size=12),axis.title=element_text(size=14,face="bold"),plot.title = element_text(size=16))
}
else{
df <- data.frame(period = periods[1:7],area = as.numeric(subprefsdata[subprefsdata$Prefecture==prefselection() & subprefsdata$Subprefecture==subprefselection(),
c("area198589","area199094","area199599","area200004","area200509","area201014","area201519")]))
areachange <- as.numeric(subprefsdata[subprefsdata$Prefecture==prefselection() & subprefsdata$Subprefecture==subprefselection(),"areachange"])
percchange <- as.numeric(subprefsdata[subprefsdata$Prefecture==prefselection() & subprefsdata$Subprefecture==subprefselection(),"percchange"])
ggplot(data = df,aes(x= period, y = area)) +
scale_color_manual("blue") +
geom_line(aes(group = 1),color='lightblue')+ geom_point(color='lightblue')+ ylab("Area "~(km^2))+ xlab("") +
ggtitle(paste0("Prefecture: ",prefselection(),"\nTotal rice field area change: ", round(areachange,2)," km²",
"\nChange percentage: ",round(percchange,2),"%")) +
theme(axis.text=element_text(size=12),axis.title=element_text(size=14,face="bold"),plot.title = element_text(size=16))
}
})
# TABLE OUTPUT
preftablenames <- c("Prefecture","area1985-89","area1990-94","area1995-99","area2000-04",
"area2005-09","area201-014","area2015-19","areachange","percentchange")
subpreftablenames <- c("Prefecture","County","area1985-89","area1990-94","area1995-99","area2000-04",
"area2005-09","area201-014","area2015-19","areachange","percentchange")
datasetInput <- reactive({
if(is.null(subprefselection())){
if(prefselection()=="All Japan"){
mytable <- prefsdata
names(mytable) <- preftablenames
} else
mytable <- prefsdata[prefsdata$NAME_1==prefselection(),]
names(mytable) <- preftablenames
} else if(prefselection()=="All Japan" & subprefselection()=="No selection" ){
mytable <- prefsdata
names(mytable) <- preftablenames
} else if(subprefselection()=="No selection"){
mytable <- prefsdata[prefsdata$NAME_1==prefselection(),]
names(mytable) <- preftablenames
} else if(subprefselection()=="All counties"){
if(prefselection()=="All Japan"){
mytable <- subprefsdata[,-3]
} else
mytable <- subprefsdata[subprefsdata$Prefecture==prefselection(),-3]
} else {
mytable <- subprefsdata[subprefsdata$Prefecture==prefselection() & subprefsdata$Subprefecture==subprefselection(),-3]
}
print(mytable, row.names = FALSE)
})
# RENDER output table
output$table <- renderPrint({
datasetInput()
})
# DOWNLOAD table
# Downloadable csv of selected dataset ----
output$downloadCsv <- downloadHandler(
filename = function() {
paste("TAMBOproject_riceareachanges", ".csv", sep = "")
},
content = function(file) {
write.csv(datasetInput(), file, row.names = FALSE)
}
)
}
##############################################################################################################
shinyApp(ui, server)
|
16e1b7c216ee4e642f785a2fc7a195bba841f3ae
|
0cc576d0a0690fc458db4dd97fdf48b004d45604
|
/R/test.R
|
3ab92875730f853117ff1c29da643c6f327e145c
|
[] |
no_license
|
mayu-y/statistical_analysis
|
83a33e5c964877b5d8d5f5828cfa4575ace8bb72
|
af55b1a759fac45f54be39d06fe33923faf05830
|
refs/heads/master
| 2021-03-03T07:42:44.280269
| 2020-03-10T02:27:51
| 2020-03-10T02:27:51
| 245,943,655
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 259
|
r
|
test.R
|
###wilcox.test
#2標本検定
wilcox.test(x=da[,2], y=db[,2], var.equal=F)
wilcox.test(x=dc[,2], y=dd[,2], var.equal=F)
wilcox.test(x=de[,2], y=df[,2], var.equal=F)
wilcox.test(x=da[,2], y=dc[,2], var.equal=F)
wilcox.test(x=da[,2], y=de[,2], var.equal=F)
|
e481abc89ded1a4205a91431f405d941ee0a9be6
|
625ababd193e2c3ff77081ab0a3640aca26fa7bf
|
/HighThroughputData/ExploratoryDataAnalysis.R
|
ee14a2c230408e1c5bbc02fc16a04295e233ad9b
|
[] |
no_license
|
AndrewS622/Data-Analysis-for-the-Life-Sciences
|
2121db45368efb0e51c31b3688f20ed556c22518
|
7aec64fca87c22b07b86a9002358617c55973917
|
refs/heads/master
| 2022-12-05T18:10:42.885403
| 2020-09-02T00:24:47
| 2020-09-02T00:24:47
| 282,118,076
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 827
|
r
|
ExploratoryDataAnalysis.R
|
library(SpikeInSubset)
data(mas133)
e <- exprs(mas133)
plot(e[,1],e[,2],main=paste0("corr=",signif(cor(e[,1],e[,2]),3)),cex=0.5)
k <- 3000
b <- 1000 #a buffer
# correlation between first two samples
polygon(c(-b,k,k,-b),c(-b,-b,k,k),col="red",density=0,border="red")
# points in red box
sum(e[,1] < k & e[,2] < k)/length(e[,1])
# plot log of values so tails do not dominate
plot(log2(e[,1]),log2(e[,2]),main=paste0("corr=",signif(cor(log2(e[,1]),log2(e[,2])),2)),cex=0.5)
k <- log2(k)
b <- log2(0.5)
polygon(c(b,k,k,b),c(b,b,k,k),col="red",density=0,border="red")
sum(log2(e[,1]) < k & log2(e[,2]) < k)/length(e[,1])
# MA plot
e <- log2(exprs(mas133))
plot((e[,1]+e[,2])/2,e[,2]-e[,1],cex=0.5)
# standard deviation of log ratios
sd(e[,2]-e[,1])
# > 2-fold changes
sum(abs(e[,2]-e[,1]) > 1)
|
07be121d481d7a030ddccd361f10eb233fd9a524
|
5c6e8f322dc82416fd43e03fea5ddb5342d3a5b7
|
/R/operations.R
|
73c3f0121a58136cf6844b94d9e4517088277ea2
|
[] |
no_license
|
ArnaudDroitLab/GenomicOperations
|
a847efea620adad0690f820572efe48062ee05e5
|
0d7ce960f5c18545d9e170fd605f0a22deba6342
|
refs/heads/master
| 2020-06-05T21:26:53.338085
| 2019-10-18T20:02:28
| 2019-10-18T20:02:28
| 192,550,540
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,598
|
r
|
operations.R
|
#' Projects the ranges in query into the ranges in target.
#'
#' Returns the ranges in target overlapping the ranges in query, adjusting
#' their boundaries so that only the overlapping parts of the target ranges
#' are returned.
#'
#' @param query The ranges to be projected.
#' @param target The ranges to be projected against.
#'
#' @return The projection of the query ranges on the target ranges.
#' @importFrom S4Vectors subjectHits queryHits
#' @export
project_ranges <- function(query, target) {
hits = GenomicRanges::findOverlaps(target, query)
new_start = pmax(start(query)[subjectHits(hits)],
start(target)[queryHits(hits)])
new_end = pmin(end(query)[subjectHits(hits)],
end(target)[queryHits(hits)])
ranges_df = data.frame(seqname=seqnames(target)[queryHits(hits)],
start=new_start,
end=new_end,
strand=strand(target)[queryHits(hits)])
ranges_df = cbind(ranges_df,
mcols(target)[queryHits(hits),],
mcols(query)[subjectHits(hits),])
colnames(ranges_df) = c(colnames(ranges_df)[seq_len(4)],
colnames(mcols(target)),
colnames(mcols(query)))
return(GRanges(ranges_df))
}
#' Collapses a list of genomic ranges into a single set of unique,
#' non-overlapping ranges.
#'
#' Ranges are prioritized in the input list order. So, if the first element
#' of the list (A) covers the range 1-10, and the second element (B) covers
#' the range 5-15, then the resulting ranges will have a 1-10 range named A,
#' and a 11-15 range named 'B'.
#'
#' @param grl The ranges to be collapsed.
#'
#' @return The collapsed regions.
#' @export
collapse_regions <- function(grl) {
# Resulting regions.
collapsed.regions = list()
# Keep track of the ranges that have already been assigned.
combined.regions = GenomicRanges::GRanges()
for(region.group in names(grl)) {
# The ranges assigned to this element are all the specified ranges,
# minus any range that has already been assigned.
collapsed.regions[[region.group]] = GenomicRanges::setdiff(grl[[region.group]], combined.regions)
collapsed.regions[[region.group]]$name = region.group
# Add the newly assigned ranges to the set of assigned ranges.
combined.regions = GenomicRanges::union(combined.regions, collapsed.regions[[region.group]])
}
# Return a single set of ranges.
return(unlist(GenomicRanges::GRangesList(collapsed.regions)))
}
|
311e012e693c68e5020efe82dea911eec6d46e9b
|
691fd24cebd159deeb0885d0191f0fc58809a46a
|
/R/fig1.R
|
90d070bcf9de740d15ae3ac90701f388d2322bae
|
[
"CC-BY-4.0",
"CC-BY-3.0"
] |
permissive
|
Shicheng-Guo/Paper_Rqtl2
|
21a97776315f1dbf550306bdd50a8e6a52e3d816
|
a723c1d49ba9c9dec606f4d1cd5884a49f63c95b
|
refs/heads/master
| 2023-07-13T23:29:30.884010
| 2021-08-28T12:30:43
| 2021-08-28T12:30:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,710
|
r
|
fig1.R
|
# figure 1: reproduce Fig 5 from Gatti et al (2014)
#
# "We regressed log neutrophil counts on founder allele dosages at
# each marker using a kinship correction with sex and log white blood cell counts as covariates"
# load the data; 2nd phenotype is "NEUT"
library(qtl2)
set.seed(33003221)
file <- "cache/do.rds"
dir <- dirname(file)
if(!dir.exists(dir)) {
dir.create(dir)
}
if(file.exists(file)) {
do <- readRDS(file)
} else {
do <- read_cross2("https://raw.githubusercontent.com/rqtl/qtl2data/master/DO_Gatti2014/do.zip")
saveRDS(do, file)
}
# pseudomarker maps
file <- "cache/maps_n_phe.RData"
if(file.exists(file)) {
load(file)
} else {
gmap <- insert_pseudomarkers(do$gmap, stepwidth="max", step=0.2)
pmap <- interp_map(gmap, do$gmap, do$pmap)
# phenotypes and covariates
phe <- log10(do$pheno[,2])
covar <- cbind(sex=(do$is_female*1),
wbc=log10(do$pheno[,1]))
save(gmap, pmap, phe, covar, file=file)
}
# calculate genotype probabilities
file <- "cache/probs.rds"
if(file.exists(file)) {
# pr <- readRDS(file)
} else {
pr <- calc_genoprob(do, gmap, error_prob=0.002, map_function="c-f", cores=0)
saveRDS(pr, file)
}
# calculate allele dosages
file <- "cache/aprobs.rds"
if(file.exists(file)) {
# apr <- readRDS(file)
} else {
apr <- genoprob_to_alleleprob(pr, cores=0)
saveRDS(apr, file)
}
# calculate kinship
file <- "cache/kinship.rds"
if(file.exists(file)) {
k <- readRDS(file)
} else {
k <- calc_kinship(apr, "loco", cores=0)
saveRDS(k, file)
}
# genome scan with full model
file <- "cache/out_full.rds"
if(file.exists(file)) {
out_full <- readRDS(file)
} else {
out_full <- scan1(pr, phe, k, addcovar=covar, cores=0)
saveRDS(out_full, file)
}
# genome scan with additive model
file <- "cache/out_add.rds"
if(file.exists(file)) {
out_add <- readRDS(file)
} else {
out_add <- scan1(apr, phe, k, addcovar=covar, cores=0)
saveRDS(out_add, file)
}
# functions to query snps and genes
qv <- create_variant_query_func("~/Data/CCdb/cc_variants.sqlite")
qg <- create_gene_query_func("~/Data/CCdb/mouse_genes_mgi.sqlite")
# GWAS at SNPs
file <- "cache/out_snps.rds"
if(file.exists(file)) {
out_snps <- readRDS(file)
} else {
out_snps <- scan1snps(pr, pmap, phe, k,
addcovar=covar, query_func=qv, cores=0)
saveRDS(out_snps, file)
}
# estimate coefficients on chr 1
file <- "cache/coef_c1.rds"
if(file.exists(file)) {
co <- readRDS(file)
} else {
co <- scan1coef(apr[,1], phe, k[1], addcovar=covar)
co[,1:8] <- co[,1:8] - rowMeans(co[,1:8])
saveRDS(co, file)
}
file <- "cache/blup_c1.rds"
if(file.exists(file)) {
blup <- readRDS(file)
} else {
blup <- scan1blup(apr[,1], phe, k[1], addcovar=covar, cores=0)
saveRDS(blup, file)
}
# snp scan of a small region
li <- lod_int(out_add, pmap, chr=1)
file <- "cache/out_finemap_c1.rds"
if(file.exists(file)) {
out_finemap <- readRDS(file)
} else {
out_finemap <- scan1snps(pr, pmap, phe, k, addcovar=covar,
query_func=qv, chr=1, start=li[1], end=li[3],
keep_all_snps=TRUE, cores=0)
saveRDS(out_finemap, file)
}
genes <- qg(chr=1, start=li[1], end=li[3])
# line colors
altcolor <- "green4"
linecolor <- "violetred"
# subset genes to 1/2
sub <- sort(unique(c(sample(nrow(genes), nrow(genes)/2),
match(c("Cxcr4", "Tmcc2"), genes$Name))))
genes <- genes[sub,]
gene_col <- rep("gray40", nrow(genes))
names(gene_col) <- genes$Name
gene_col[c("Cxcr4", "Tmcc2")] <- linecolor
# load permutation results
operm_full <- readRDS("Perms/operm_full.rds")
operm_add <- readRDS("Perms/operm_add.rds")
operm_snps <- readRDS("Perms/operm_snps.rds")
# calculate thresholds
thr_full <- summary(operm_full)
thr_add <- summary(operm_add)
thr_snps <- summary(operm_snps)
# ylim
ymx_full <- thr_full$A/thr_add$A*maxlod(out_add)*1.04
ymx_add <- maxlod(out_add)*1.04
ymx_snps <- thr_snps$A/thr_add$A*maxlod(out_add)*1.04
# make the plots
panel_lab_adj <- c(0.12, 0.06)
panel_lab_cex <- 1.3
res <- 256
for(figtype in c("png", "eps")) {
if(figtype == "png") {
png("../Figs/fig1.png", height=7.5*res, width=10*res, pointsize=14, res=res)
} else {
postscript("../Figs/fig1.eps", paper="special", height=7.5, width=10, horizontal=FALSE, onefile=FALSE)
panel_lab_adj[1] <- 0.10
}
layout(cbind(rep(1:3, each=4),
c(4,4,4,5,5,6,6,6,7,7,7,7)))
par(mar=c(2.1, 4.1, 1.6, 1.1))
plot(out_full, pmap, xlab="", ylim=c(0, ymx_full), altcol=altcolor)
u <- par("usr")
endA <- xpos_scan1(pmap, thechr=19, thepos=max(pmap[[19]]))+25/2
segments(u[1], thr_full$A, endA, thr_full$A, col=linecolor, lty=2)
segments(endA, thr_full$X, u[2], thr_full$X, col=linecolor, lty=2)
u <- par("usr")
text(u[1]-diff(u[1:2])*panel_lab_adj[1], u[4]+diff(u[3:4])*panel_lab_adj[2], "A", font=2, xpd=TRUE, cex=panel_lab_cex)
plot(out_add, pmap, xlab="", ylim=c(0, ymx_add), altcol=altcolor)
u <- par("usr")
segments(u[1], thr_add$A, endA, thr_add$A, col=linecolor, lty=2)
segments(endA, thr_add$X, u[2], thr_add$X, col=linecolor, lty=2)
text(u[1]-diff(u[1:2])*panel_lab_adj[1], u[4]+diff(u[3:4])*panel_lab_adj[2], "B", font=2, xpd=TRUE, cex=panel_lab_cex)
plot(out_snps$lod, out_snps$snpinfo, altcol=altcolor, xlab="",
ylim=c(0, ymx_snps))
u <- par("usr")
segments(u[1], thr_snps$A, endA, thr_snps$A, col=linecolor, lty=2)
segments(endA, thr_snps$X, u[2], thr_snps$X, col=linecolor, lty=2)
text(u[1]-diff(u[1:2])*panel_lab_adj[1], u[4]+diff(u[3:4])*panel_lab_adj[2], "C", font=2, xpd=TRUE, cex=panel_lab_cex)
par(mar=c(0.5,4.1,1.6,1.1))
ymx <- max(abs(blup[,1:8]))*1.04 * 1.6
mgp <- c(2.1, 0.3, 0)
if(figtype=="eps") mgp <- c(2.8, 0.3, 0)
plot_coefCC(blup, pmap, xaxt="n", ylim=c(-ymx, ymx), xlab="", mgp=mgp)
legend("topleft", ncol=4, lwd=2, col=CCcolors, legend=names(CCcolors), bg="gray92")
u <- par("usr")
text(u[1]-diff(u[1:2])*panel_lab_adj[1], u[4]+diff(u[3:4])*panel_lab_adj[2], "D", font=2, xpd=TRUE, cex=panel_lab_cex)
par(mar=c(3.1,4.1,0,1.1))
plot(out_add, pmap[1], xlab="", xaxt="n", mgp=mgp)
axis(side=1, at=pretty(par("usr")[1:2]), tick=FALSE, mgp=c(0, 0.2, 0))
title(xlab="Chr 1 position (Mbp)", mgp=c(1.8, 0, 0))
par(mar=c(0.5,4.1,1.6,1.1))
plot(out_finemap$lod, out_finemap$snpinfo, show=TRUE, drop=1.5,
xaxt="n", xlab="")
u <- par("usr")
text(u[1]-diff(u[1:2])*panel_lab_adj[1], u[4]+diff(u[3:4])*panel_lab_adj[2], "E", font=2, xpd=TRUE, cex=panel_lab_cex)
par(mar=c(3.1,4.1,0,1.1))
plot_genes(genes, col=gene_col, xlab="", mgp=c(0,0.2,0),
xlim=c(u[1], u[2]), xaxs="i")
title(xlab="Chr 1 position (Mbp)", mgp=c(1.8, 0, 0))
dev.off()
} # end loop over figure type
|
f5b9bdab4ecba2b936008fcd08d9882e5f75a7da
|
f04e7ca50355951461fcb28bb87b27314cc9e253
|
/run_analysis.R
|
8b7ebb2fe3ed3a3fcfddaacded2871191c3209a4
|
[] |
no_license
|
srisraman/Getting-and-Cleaning-Data-Project
|
efc75241965197d3aae7497e156dac7e85fdd860
|
26b8c0a108da658e747719c772075d61c641097f
|
refs/heads/master
| 2020-05-19T09:42:28.860643
| 2015-01-25T23:36:34
| 2015-01-25T23:36:34
| 29,833,923
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,064
|
r
|
run_analysis.R
|
# Get the data
unzip("getdata_projectfiles_UCI HAR Dataset.zip")
subjectTest<-read.table("~/Coursera/Data Cleaning/UCI HAR Dataset/test/subject_test.txt",sep="", header=FALSE, col.names="subjectId")
subjectTrain<-read.table("~/Coursera/Data Cleaning/UCI HAR Dataset/train/subject_train.txt",sep="", header=FALSE, col.names="subjectId")
activityTest<-read.table("~/Coursera/Data Cleaning/UCI HAR Dataset/test/y_test.txt",sep="", header=FALSE, col.names="activityId")
activityTrain<-read.table("~/Coursera/Data Cleaning/UCI HAR Dataset/train/y_train.txt",sep="", header=FALSE, col.names="activityId")
activityLabels<-read.table("~/Coursera/Data Cleaning/UCI HAR Dataset/activity_labels.txt",sep="", header=FALSE,col.names=c("activityId","activityDesc"))
colHeaders<-read.table("~/Coursera/Data Cleaning/UCI HAR Dataset/features.txt")
testSet<-read.table("~/Coursera/Data Cleaning/UCI HAR Dataset/test/X_test.txt",sep="", header=FALSE)
trainSet<-read.table("~/Coursera/Data Cleaning/UCI HAR Dataset/train/X_train.txt",sep="", header=FALSE)
# Label with variable Names
colnames(testSet)<- colHeaders[,2]
colnames(trainSet)<- colHeaders[,2]
testSet <- cbind(subjectTest, activityTest, testSet)
trainSet <- cbind(subjectTrain, activityTrain, trainSet)
# Assign Activity Names
testSet <- merge(activityLabels,testSet,by="activityId")
trainSet <- merge(activityLabels,trainSet,by="activityId")
# Merge Data sets
mergedSet <- rbind(testSet,trainSet)
# Extract only the mean and standard deviation for each measurement.
subsetMean <- grepl("mean()",colnames(mergedSet), fixed=TRUE)
subsetStd <- grepl("std()", colnames(mergedSet), fixed=TRUE)
subsetAll <- as.logical(subsetMean + subsetStd)
subsetAll[2:3] <- TRUE
subMergedSet <- mergedSet[,subsetAll]
# Create tidy data set
tidySet <- aggregate(. ~ subMergedSet$subjectId+subMergedSet$activityDesc, data=subMergedSet ,FUN=mean)
# Rename columns in new data set
tidySet <- tidySet[,-c(3,4)]
colnames(tidySet)[1:2] <- c("subjectId","activity")
# Write output
write.table(tidySet, "tidySet.txt", row.name=FALSE)
|
2e98749d2ec82e27d146960ce6718466472ed662
|
6baccd95ac85151fc4654223f771f986a843ef47
|
/Fig3A_plotModuleEnrichment.r
|
480a949a216b62812681623b4114c68d90541098
|
[] |
no_license
|
Bucanlab/Ji_PNAS_2016
|
6c218b6185e61edb2ec02ed709dd149bed85af88
|
af49701f68610334197cac2e501bc598e8a52f83
|
refs/heads/master
| 2021-05-03T19:25:05.602351
| 2016-10-26T20:24:28
| 2016-10-26T20:24:28
| 69,603,620
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,934
|
r
|
Fig3A_plotModuleEnrichment.r
|
library(ggplot2)
library(plyr)
setwd("...")
dat=read.table("enrichmentPerModule_EGcell_NEG_v2_TADA0.5_updated.txt",header=T,sep='\t')
dat_EG=dat[,c(1,2,3,4,5)]
dat_EG=data.frame(dat_EG,rep(NA,dim(dat_EG)[1]))
names(dat_EG)=c("Module","EG_all","NLG_all","OR","P","Category")
dat_EG$Category[which(dat_EG$OR<1)] = "3NEG enrichment"
dat_EG$Category[which(dat_EG$OR>1)] = "1EG enrichment"
FDR_EG=p.adjust(dat_EG$P,method="BH")
plotDat1=data.frame(substr(dat_EG$Module,1,3),-log10(dat_EG$P),-log10(FDR_EG),dat_EG$Category)
names(plotDat1)=c("Module","Pval","FDR","Category")
dat_TADA=dat[,c(1,6,7,8,9)]
dat_TADA=data.frame(dat_TADA,rep('2Potential ASD gene enrichment',dim(dat_EG)[1]))
names(dat_TADA)=c("Module","TADA0.5","nonTADA0.5","OR","P","Category")
FDR_TADA=p.adjust(dat_TADA$P,method="BH")
plotDat2=data.frame(substr(dat_TADA$Module,1,3),log10(dat_TADA$P),log10(FDR_TADA),dat_TADA$Category)
names(plotDat2)=c("Module","Pval","FDR","Category")
plotDat=rbind(plotDat1,plotDat2)
names(plotDat)=c("Module","Pval","FDR","Category")
plotDat$Category=factor(plotDat$Category,levels=c("1EG enrichment","2Potential ASD gene enrichment","3NEG enrichment"))
orderedEGmods=substr(c("M01","M02","M07","M26","M22","M08","M37","M17","M25","M34","M14","M16","M10","M15","M18","M24","M33"),1,3)
orderedNEGmods=substr(c("M04","M19","M12","M11","M06","M38","M09","M13","M30","M03","M32","M23","M29","M35","M27","M41","M05","M20","M36","M40","M31","M39","M21","M28"),1,3)
plotDat$Module=factor(plotDat$Module,levels=c(rev(orderedEGmods),orderedNEGmods))
write.table(plotDat,file="enrichmentPerModule_EGcell_NLG_v2_TADA_FDR_updated.txt",sep='\t',quote=F,row.names=F)
#myColors=c("darkgray","turquoise3","#FF6666","springgreen3")
png("brainSpanModules_enrichment_EGcell_NEG_v2_TADA_1_largeFont_updated.png",height=7.5,width=10.5,units='in',res=500)
ggplot(plotDat,aes(x= Module, y = FDR, fill=Category, order=Category)) +
geom_bar(data=plotDat[which(plotDat$Category=="1EG enrichment" | plotDat$Category=="3NEG enrichment"),] , stat = "identity") +
geom_bar(data=plotDat[which(plotDat$Category=="2Potential ASD gene enrichment"),], stat = "identity") +
scale_fill_manual(name="",values=c("#F8766D","#00BA38","#00BFC4","black")) +
geom_abline(intercept=-log10(0.1),slope=0, linetype="dashed",color="red") +
geom_abline(intercept=log10(0.1),slope=0, linetype="dashed",color="red") +
geom_abline(intercept=0,slope=0, linetype="solid",color="gray50") +
theme_bw() + xlab("Module ID") + ylab("-log10(q)")+ ggtitle("")+
theme(plot.title=element_text(size=20),legend.text=element_text(size=14),axis.text=element_text(size=14),axis.title=element_text(size=18),axis.text.x = element_text(angle = 90, vjust = 0.5)) +
scale_y_continuous(breaks=rev(c(40,35,30,25,20,15,10,5,1,0,-1,-5)),labels=rev(c("40","35","30","25","20","15","10","5","1","0","1","5")))
#+ coord_flip()
dev.off()
|
27e7756b4c3368edbfdf544f6093862d6b4c4aa8
|
1da9ba1ee08b899af8b107d3d81281d559e2200c
|
/CA5A_10363676/calculator.R
|
6e17cdcbf7e23ce7ab1ec2f854bfc6db3906da6c
|
[] |
no_license
|
mapster21/CF_DB
|
bc3c701db59b0f00ec4864297f2cbb8e94080cfc
|
2c7923df943b43147555a3035727544fa48597ef
|
refs/heads/master
| 2021-05-15T12:29:09.342656
| 2017-12-13T10:56:23
| 2017-12-13T10:56:23
| 108,444,616
| 0
| 0
| null | 2017-12-09T22:04:26
| 2017-10-26T17:35:37
|
Python
|
UTF-8
|
R
| false
| false
| 1,956
|
r
|
calculator.R
|
# Scientific calculator using functions written in R.
# Select steps between the dashed lines and run in sequence:
# Step 1---------------------------------------------------------------------------------------------------
add <- function(x, y) {
return(x + y)
}
subtract <- function(x, y) {
return(x - y)
}
multiply <- function(x, y) {
return(x * y)
}
divide <- function(x, y) {
return(x / y)
}
square <- function(x) {
return(x * x)
}
cube <- function(x) {
return(x * x * x)
}
squarert <- function(x) {
return(sqrt(x))
}
cosine <- function(x) {
return(cos(x))
}
sine <- function(x) {
return(sin(x))
}
tangent <- function(x) {
return(tan(x))
}
# take input from the user
print("Select operation.")
print("1.Add")
print("2.Subtract")
print("3.Multiply")
print("4.Divide")
print("5.Square")
print("6.Cube")
print("7.Squarert")
print("8.Cosine")
print("9.Sine")
print("10.Tangent")
choice = as.integer(readline(prompt="Enter choice[1/2/3/4/5/6/7/8/9/10]: "))
# Step 2--------------------------------------------------------------------------------------------------
num1 = as.integer(readline(prompt="Enter first number: "))
# For choices 5 - 10 inclusive skip step 3
# Step 3 -------------------------------------------------------------------------------------------------
num2 = as.integer(readline(prompt="Enter second number: "))
# Step 4 -------------------------------------------------------------------------------------------------
operator <- switch(choice,"+","-","*","/")
result <- switch(choice, add(num1, num2), subtract(num1, num2), multiply(num1, num2), divide(num1, num2),
square(num1), cube(num1), sqrt(num1), cos(num1), sin(num1), tan(num1))
print(paste(num1, operator, num2, "=", result))
# End -----------------------------------------------------------------------------------------------------
|
45630548b2778b51e05c3e653d252fd441986704
|
93b800fb677d6c7685a2934be4a44079de170990
|
/man/pvw.Rd
|
7ace16f1421d1ffd172ea170848b7a7c599d6631
|
[
"MIT"
] |
permissive
|
mkearney/lop
|
fa47144543f9952745c31c8aa0cd7b28ad7ee6e4
|
d0a0d468454881462753f81793aa1a23dbc53c1b
|
refs/heads/master
| 2022-04-11T01:49:57.379555
| 2020-04-06T22:02:34
| 2020-04-06T22:02:34
| 202,009,106
| 4
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 431
|
rd
|
pvw.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils-funs.R
\name{pvw}
\alias{pvw}
\title{pvw}
\usage{
pvw(
data,
id_cols = NULL,
names_from = name,
names_prefix = "",
names_sep = "_",
names_repair = "check_unique",
values_from = value,
values_fill = NULL,
values_fn = NULL
)
}
\description{
See \code{tidyr::\link[tidyr:pivot_wider]{pivot_wider}} for details.
}
\keyword{internal}
|
ea5f423eff9749038d901e3cff315ca19345e561
|
70b5810e0a9e8193baf3dc83f183c07cdc67cd5f
|
/Mineria_datos_aspectos_avanzados/trabajo_clasificacion_ordinal/esl-j48ordinal.R
|
428a83da13de8de29decdde0268c4ab554b629a7
|
[] |
no_license
|
Nico-Cubero/master-datcom-2019-2020
|
11924c895ff52f91cbdb254d932ee6cecc20800d
|
e956799382b147aff385d62749bf915116893449
|
refs/heads/main
| 2023-06-08T09:17:40.930193
| 2021-06-13T11:54:11
| 2021-06-13T11:54:11
| 376,529,939
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,541
|
r
|
esl-j48ordinal.R
|
library(RWeka)
library(partykit)
library(abind)
library(ggplot2)
set.seed(2)
# Cargar dataset esl
esl <- read.arff('esl.arff')
ordinal.label.encoder <- function(X, class.col, labels.ordered=NULL) {
# Check parameters input
if (!(is.data.frame(X)) || is.null(colnames(X))) {
stop('X must be a dataframe with named columns')
}
if (length(class.col)!=1) {
stop('Only one value allowed for "label.col"')
}
# Número de columna de clase
class.column.num <- NA
# Tomar el número de la columna de clase
if (is.character(class.col)) {
if (!class.col %in% colnames(X)) {
stop(paste('"',class.col,'" not included in dataset'))
}
class.column.num <- which(colnames(X)==class.col)
} else if (is.integer(class.col)) {
if (class.col < 1 || class.col>ncol(X)) {
stop('Column ',class.col,' not a valid column')
}
class.column.num <- class.col
} else {
stop('Only character or integers allowed for "class.col"')
}
# Las etiquetas pasadas deben de ser valores de etiqueta de la clase
if (is.null(labels.ordered)) {
# Tomar orden de los valores de la columna de clase si ningún orden es especificado
if (is.ordered(X[,class.column.num]) || is.numeric(X[,class.column.num])) {
labels.ordered <- sort(unique(X[,class.column.num]))
}
else {
stop('Cannot deduce implicit order from class')
}
} else if (any(!labels.ordered %in% unique(X[,class.column.num]))) {
stop('Any label in "labels.ordered" not a class of X')
}
# Nuevo conjunto de etiquetas de clase con información ordinal
target <- matrix(data=rep(NA, nrow(X)*(length(labels.ordered)-1)),
nrow=nrow(X))
# Para todas las etiquetas de la clase
for (i in 1:(ncol(target))) {
# Los índices para la etiqueta de clase i
index.class <- X[,class.column.num]==labels.ordered[i]
#Copiar las etiquetas de la anterior columna ordinal que representa
# una clase de orden inferior
if (i>1) {
target[target[,i-1]==0,i] <- 0
}
# Generar columna con 0's para la etiqueta i y 1's para el resto
target[index.class,i] <- 0
target[is.na(target[,i]),i] <- 1
}
# Convertir target en columnas de dataframe y agregarlo al df original
target <- as.data.frame(target)
target[] <- lapply(target, factor)
colnames(target) <- paste(colnames(X)[class.column.num],'_', 1:ncol(target), sep='')
# Reemplazar la columna de clase original por las ordinales
X_new <- cbind(X[,-class.column.num], target)
return(X_new)
}
# Convertir la clase en clases ordinales
esl.ordinal <- ordinal.label.encoder(esl, 'out1')
# De este modo, la columna de clase ha sido convertida en (nº clases-1 ) 8
# columnas de clases ordinales: out1_1,....,out1_8
colnames(esl.ordinal)
# División en conjunto de train y test
train=sample(1:nrow(esl.ordinal), nrow(esl.ordinal)-100)
esl.test=esl.ordinal[-train ,]
# Desarrollar modelo para cada clase ordinal
#### Modelo m1 para out1_1
m1 = J48(out1_1~in1+in2+in3+in4,data = esl.ordinal,subset = train)
m1
# Puesto que en el conjunto de train, sólo 2 instancias pertenecen
# a la clase 1, la salida ordinal out1_1, sólo presenta 2 instancias
# con valor 0, mientras el resto de instancias adopta el valor 1.
# Como consecuencia, se ha originado un clasificador trivial que
# que clasifica todas las instancias como pertenecientes a la clase 1
# de la salida out1_1, es decir, cualquier instancia es clasificada
# como perteneciente a cualquier clase distinta de 1.
# No se realiza una evaluación del modelo mediante validación cruzada
# al existir únicamente 2 patrones pertenecientes a la clase 1.
# El conjunto de test no presentaba ninguna instancia perteneciente a la clase 1.
# Este clasificador trivial, ha clasificado todas las instancias como diferentes
# a la clase 1, por lo que el porcentaje de aciertos es del 100%
# Predecir todo el conjunto de test, tomando las probabilidades de pertenencia
pred_m1 <- predict(m1, newdata = esl.test, 'probability')
pred_m1
#### Modelo m2 para out1_2
m2 = J48(out1_2~in1+in2+in3+in4,data = esl.ordinal,subset = train)
m2
# Dibujar el árbol obtenido
plot(m2)
# Evaluar el modelo sobre el conjunto de test
eval_m2 <- evaluate_Weka_classifier(m2, numFolds = 5, class = TRUE)
eval_m2
# Si bien el modelo generado presenta un rendimiento alto, hay que tener en cuenta que
# Predecir todo el conjunto de test, tomando las probabilidades de pertenencia
pred_m2 <- predict(m2, newdata = esl.test, 'probability')
pred_m2
#### Modelo m3 para out1_3
m3 = J48(out1_3~in1+in2+in3+in4,data = esl.ordinal,subset = train)
m3
plot(m3)
# Evaluar el modelo sobre el conjunto de test
eval_m3 <- evaluate_Weka_classifier(m3, numFolds = 5, class = TRUE)
eval_m3
# Predecir todo el conjunto de test, tomando las probabilidades de pertenencia
pred_m3 <- predict(m3, newdata = esl.test, 'probability')
pred_m3
#### Modelo m4 para out1_4
m4 = J48(out1_4~in1+in2+in3+in4,data = esl.ordinal,subset = train)
m4
plot(m4)
# Evaluar el modelo sobre el conjunto de test
eval_m4 <- evaluate_Weka_classifier(m4, numFolds = 5, class = TRUE)
eval_m4
# Predecir todo el conjunto de test, tomando las probabilidades de pertenencia
pred_m4 <- predict(m4, newdata = esl.test, 'probability')
pred_m4
#### Modelo m5 para out1_5
m5 = J48(out1_5~in1+in2+in3+in4,data = esl.ordinal,subset = train)
m5
plot(m5)
# Evaluar el modelo sobre el conjunto de test
eval_m5 <- evaluate_Weka_classifier(m5, numFolds = 5, class = TRUE)
eval_m5
# Predecir todo el conjunto de test, tomando las probabilidades de pertenencia
pred_m5 <- predict(m5, newdata = esl.test, 'probability')
pred_m5
#### Modelo m6 para out1_6
m6 = J48(out1_6~in1+in2+in3+in4,data = esl.ordinal,subset = train)
m6
plot(m6)
# Evaluar el modelo sobre el conjunto de test
eval_m6 <- evaluate_Weka_classifier(m6, numFolds = 5, class = TRUE)
eval_m6
# Predecir todo el conjunto de test, tomando las probabilidades de pertenencia
pred_m6 <- predict(m6, newdata = esl.test, 'probability')
pred_m6
#### Modelo m7 para out1_7
m7 = J48(out1_7~in1+in2+in3+in4,data = esl.ordinal,subset = train)
m7
plot(m7)
# Evaluar el modelo sobre el conjunto de test
eval_m7 <- evaluate_Weka_classifier(m7, numFolds = 5, class = TRUE)
eval_m7
# Predecir todo el conjunto de test, tomando las probabilidades de pertenencia
pred_m7 <- predict(m7, newdata = esl.test, 'probability')
pred_m7
# Modelo m8 para out1_8
m8 = J48(out1_8~in1+in2+in3+in4,data = esl.ordinal,subset = train)
m8
# Al igual que con el clasificador m1, este modelo la presencia de pocas
# instancias pertencientes a la clase 9, lleva a la construcción de
# un clasificador trivial que clasifica todos los patrones como
# no pertenecientes a la clase 9, es decir, todos los patrones son clasificados
# en la clase 0 de la columna out1_8
# Evaluar el modelo sobre el conjunto de test
eval_m8 <- evaluate_Weka_classifier(m8, numFolds = 5, class = TRUE)
eval_m8
# El clasificador ha clasificado erróneamente todos los patrones de la
# clase 9
# Predecir todo el conjunto de test, tomando las probabilidades de pertenencia
pred_m8 <- predict(m8, newdata = esl.test, 'probability')
pred_m8
# Una vez realizadas todas las preciciones por parte de todos los clasificadores
# se combinan todas las probabilidades haciendo uso de la probabilidad condicional
# para conocer las probabilidades asignadas conjuntamente por los clasificadores a
# todas las clases.
compute.probability.from.ordinal <- function(P) {
# Función que calcula la probabilidad de clases
# a partir de un conjunto de probabilidades ordinales
# dadas por un conjunto de clasificadores binarios
# ordinales.
#
# Recibe: P: Array de (n_instancias x 2 x prob ordinales)
# Calcular número de clases y el de instancias
n_class <- dim(P)[3] + 1
n_inst <- dim(P)[1]
# Matriz final con la probabilidades de cada instancia
# para cada clase
prob_class <- matrix(data = 1, ncol = n_class, nrow = n_inst)
# Matriz auxiliar para calcular probabilidades condicionales
aux_cond_prob <- matrix(data = 1, ncol = 1, nrow = n_inst)
# La probabilidad de la clase 1 ya se conoce
prob_class[,1] <- P[,1,1]
for(i in 2:(n_class-1)) {
# Añadir y calcular P(C>Ci-1)
aux_cond_prob = aux_cond_prob*P[,2,i-1]
# Calcular P(C=Ci)
prob_class[,i] <- aux_cond_prob*P[,1,i]
}
# Asignar la probabilidad de la úlima clase
prob_class[,ncol(prob_class)] <- P[,2,n_class-1]*aux_cond_prob
return(prob_class)
}
# Combinar todas las predicciones probabilísticas en un array
pred_all <- abind(pred_m1, pred_m2, pred_m3, pred_m4,
pred_m5, pred_m6, pred_m7, pred_m8,
along = 3)
# Calcular las probabilidades reales de cada clase mediante
# probabilidad condicional
prob <- compute.probability.from.ordinal(pred_all)
# Determinar las clases a las que han sido clasificados los patrones
pred_class <- max.col(prob)
# Calcular el score en test
score <- mean(pred_class==esl[-train,]$out1)
score
# Conjuntamente, la predicción realizada alcanza un valor de accuraccy
# de 0.62 sobre el conjunto de test
# Analizamos la presencia de cada clase real en el conjunto de test
# y la comparamos con la presencia de cada clase predicha
# con ayuda de un diagrama de barras
score_comparison <- data.frame(value=factor(esl[-train,]$out1),
type='real')
score_comparison <- rbind(score_comparison,
data.frame(value=factor(pred_class),
type='predicted'))
ggplot2::ggplot(score_comparison, aes(value, fill = type)) +
ggplot2::geom_bar(position = position_dodge(), colour = 'black') +
xlab('Clase') +
ylab('Número de patrones') +
scale_y_continuous(breaks = scales::pretty_breaks(n=15))
# Se observa que el modelo conjunto generado, carece de capacidad predictiva
# frente a la clase 1 y frente a la clase 9, lo que se explica por la presencia
# de pocos patrones pertenecientes a estas clases en el dataset.
# Comparamos el rendimiento de este modelo con un clasificador J48 no ordinal
esl$out1 <- as.factor(esl$out1)
m.no.ord <- J48(out1~in1+in2+in3+in4, data = esl,subset = train)
m.no.ord
# Evaluar el modelo sobre el conjunto de test
eval_m.no.ord <- evaluate_Weka_classifier(m.no.ord, numFolds = 5, class = TRUE)
eval_m.no.ord
# Predecir todo el conjunto de test, tomando las probabilidades de pertenencia
pred_m.no.ord <- predict(m.no.ord, newdata = esl[-train,])
score.no.ord <- mean(pred_m.no.ord==esl[-train,]$out1)
score.no.ord
# Se observa que el rendimiento obtenido con este modelo no ordinal,
# es similar al del modelo ordinal. Nuevamente, deseamos comparar las
# predicciones realizadas con la proporción real de clases
score_comparison.no.ord <- data.frame(value=factor(esl[-train,]$out1),
type='real')
score_comparison.no.ord <- rbind(score_comparison.no.ord,
data.frame(value=factor(pred_m.no.ord),
type='predicted'))
ggplot2::ggplot(score_comparison.no.ord, aes(value, fill = type)) +
ggplot2::geom_bar(position = position_dodge(), colour = 'black') +
xlab('Clase') +
ylab('Número de patrones') +
scale_y_continuous(breaks = scales::pretty_breaks(n=15))
# Al igual que sucede con el modelo ordinal, no se clasifica ningún
# patrón en la clase 1 debido a la carencia de patrones de esta clase
|
dfa6d6f926a5e4805031cbd1d1f46991dcceeb1a
|
93203fbd153d7eafa1cffbdff9973a5f90a52f5e
|
/man/lad.Rd
|
f93915cb06d021be896ef8b07915de401bdbbb3a
|
[] |
no_license
|
twolodzko/L1pack
|
c6ca048e665de16daa1eeb18da99c83d5f9c0d01
|
7c932d62f80060fbd632ff3e3ddeddb24fa5c372
|
refs/heads/master
| 2021-01-16T18:34:54.922778
| 2017-02-20T03:16:44
| 2017-02-20T03:16:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,122
|
rd
|
lad.Rd
|
\name{lad}
\alias{lad}
\title{Least absolute deviations regression}
\description{
This function is used to fit linear models considering Laplace errors.
}
\usage{
lad(formula, data, method = c("BR", "EM"), subset, na.action,
control, model = TRUE, x = FALSE, y = FALSE, contrasts = NULL)
}
\arguments{
\item{formula}{an object of class \code{"formula"}: a symbolic description of
the model to be fitted.}
\item{data}{an optional data frame containing the variables in the model. If
not found in \code{data}, the variables are taken from \code{environment(formula)},
typically the environment from which \code{lad} is called.}
\item{method}{character string specifying the algorithm to use. The default
algorithm is the Barrodale and Roberts algorithm \code{method = "BR"}. Other
possible value is \code{method = "EM"} for an EM algorithm using IRLS.}
\item{subset}{an optional expression indicating the subset of the rows of
data that should be used in the fit.}
\item{na.action}{a function that indicates what should happen when the data contain NAs.}
\item{control}{a list of control values for the estimation algorithm to replace
the default values returned by the function \code{\link{l1pack.control}}.}
\item{model, x, y}{logicals. If \code{TRUE} the corresponding components of
the fit (the model frame, the model matrix, the response) are returned.}
\item{contrasts}{an optional list. See the \code{contrasts.arg} of \code{model.matrix.default}.}
}
\value{
an object of class \code{lad} representing the linear model fit. Generic
function \code{print}, show the results of the fit.
}
\author{The design was inspired by the R function \code{\link{lm}}.}
\references{
Barrodale, I., and Roberts, F.D.K. (1974).
Solution of an overdetermined system of equations in the L1 norm.
\emph{Communications of the ACM} \bold{17}, 319-320.
Phillips, R.F. (2002).
Least absolute deviations estimation via the EM algorithm.
\emph{Statistics and Computing} \bold{12}, 281-285.
}
\examples{
lad(stack.loss ~ ., data = stackloss, method = "EM")
}
\keyword{regression}
|
619f8feca1aae9d91e2e262f0fc5a7bf32c59b07
|
7b74f00cd80694634e6925067aaeb6572b09aef8
|
/2020/Assignment-2020/Selected/Met-FE8828-Fan Wenqing/Assignment 4/assignment4-2.R
|
fc5f36c5106160a896751814766a561ab79b5ff3
|
[] |
no_license
|
leafyoung/fe8828
|
64c3c52f1587a8e55ef404e8cedacbb28dd10f3f
|
ccd569c1caed8baae8680731d4ff89699405b0f9
|
refs/heads/master
| 2023-01-13T00:08:13.213027
| 2020-11-08T14:08:10
| 2020-11-08T14:08:10
| 107,782,106
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,675
|
r
|
assignment4-2.R
|
library(shiny)
library(tidyverse)
ui <- fluidPage(
numericInput("r1", "Infection Rate", 0.05),
numericInput("r2", "Specificity and Sensitivity", 0.95),
plotOutput("p1"),
h4("The percentage of an individual who tests negative is actually negative is:"),
textOutput("t1")
)
server <- function(input, output) {
output$p1 <- renderPlot({
df_sensi <- full_join(
tibble(x = 1:25, color = 'Actual Neg'),
tibble(y = 1:20, color = 'Actual Neg'), by = 'color')
n1 <- round(500*(input$r1)*(1 - input$r2))
n2 <- 500*(input$r1) - n1
n3 <- 500*(1 - input$r2) - n1
n4 <- 500 - n1 -n2 - n3
df_sensi['color'] <- c(rep('False Neg', n1),
rep('Actual Pos', n2),
rep('False Pos', n3),
rep('Actual Neg', n4))
ggplot(df_sensi) +
geom_point(aes(x, y,colour = color),
size = 4, shape="circle") +
theme_bw() +
theme(axis.title.x=element_blank(),
axis.title.y=element_blank(),
axis.line=element_blank(),
axis.text.x=element_blank(),
axis.text.y=element_blank(),
axis.ticks=element_blank())
})
output$t1 <- renderPrint({
n1 <- round(500*(input$r1)*(1 - input$r2))
n2 <- 500*(input$r1) - n1
n3 <- 500*(1 - input$r2) - n1
n4 <- 500 - n1 -n2 - n3
chance = (n3+n4)/(n1+n3+n4) *100
chance
})
}
# Run the application
shinyApp(ui = ui, server = server)
|
76cf8f917ab69af2a922a51d70f86e843bc197a0
|
0ea92a0e1eace26312972c5d2be22ae49b59c99c
|
/R/wsp/wsp2020/l90_and_l30_year_comparison.R
|
8691e7cb0072c53ed6debb9661c87b812413afbb
|
[] |
no_license
|
HARPgroup/vahydro
|
852425ccf9271ebe2ff95fb6b48d8b7eb23e473f
|
73ea02207eee96f574f24db95fad6a2f6a496e69
|
refs/heads/master
| 2023-09-04T00:11:29.732366
| 2023-09-01T18:13:46
| 2023-09-01T18:13:46
| 147,414,383
| 0
| 0
| null | 2023-09-08T13:52:26
| 2018-09-04T20:37:13
|
R
|
UTF-8
|
R
| false
| false
| 3,271
|
r
|
l90_and_l30_year_comparison.R
|
# Upper and Middle Potomac cia table for model debugging
# where is the extra water comingfrom in 2040 baseline flows?
library("sqldf")
library("stringr") #for str_remove()
# Load Libraries
basepath='/var/www/R';
site <- "http://deq2.bse.vt.edu/d.dh" #Specify the site of interest, either d.bet OR d.dh
source("/var/www/R/config.local.private");
source(paste(basepath,'config.R',sep='/'))
source(paste(hydro_tools_location,'/R/om_vahydro_metric_grid.R', sep = ''));
folder <- "C:/Workspace/tmp/"
df <- data.frame(
'model_version' = c('vahydro-1.0', 'vahydro-1.0'),
'runid' = c('runid_11', 'runid_11'),
'runlabel' = c('L90 Year', 'L30 Year'),
'metric' = c('l90_year', 'l30_year')
)
wshed_data <- om_vahydro_metric_grid(metric, df)
dsame <- sqldf(
'select count(*)
from wshed_data as a
where L90_Year = L30_Year
')
ddiff <- sqldf(
'select count(*)
from wshed_data as a
where L90_Year <> L30_Year
')
print(
paste(
"# of watersheds with L90 and L30 occuring in same year =", dsame,
"and", ddiff, "watersheds had L90 and L30 in different years."
)
)
df <- data.frame(
'model_version' = c('vahydro-1.0', 'vahydro-1.0', 'vahydro-1.0', 'vahydro-1.0', 'vahydro-1.0', 'vahydro-1.0'),
'runid' = c('runid_11', 'runid_13', 'runid_11', 'runid_13', 'runid_11', 'runid_13'),
'runlabel' = c('l30_2020', 'l30_2040', 'L90_2020', 'L90_2040', 'wdc_2020', 'wdc_2040'),
'metric' = c('l30_Qout', 'l30_Qout','l90_Qout','l90_Qout', 'wd_cumulative_mgd', 'wd_cumulative_mgd')
)
wshed_data <- om_vahydro_metric_grid(metric, df)
wshed_data <- sqldf(
"select a.*, b.da
from wshed_data as a
left outer join da_data as b
on (a.pid = b.pid)
order by da
")
# filter on watershed major/minor basin
# where hydrocode like 'vahydrosw_wshed_P%'
# and hydrocode not like 'vahydrosw_wshed_PL%'
wshed_data$dl30 <- round((wshed_data$l30_2040 - wshed_data$l30_2020) / wshed_data$l30_2020,4)
wshed_data$dl90 <- round((wshed_data$L90_2040 - wshed_data$L90_2020) / wshed_data$L90_2020,4)
wshed_case <- sqldf(
"select * from
wshed_data
where
hydrocode not like '%0000'
"
)
wshed_wu <- sqldf(
"select * from
wshed_data
where
hydrocode not like '%0000'
and wdc_2020 > 0
and wdc_2040 > 0
"
)
ql90_all <- quantile(wshed_case$dl90, probs = c(0, 0.01,0.05, 0.1, 0.25, 0.5), na.rm=TRUE)
ql30_all <- quantile(wshed_case$dl30, probs = c(0, 0.01,0.05, 0.1, 0.25, 0.5), na.rm=TRUE)
ql90_haswd <- quantile(wshed_wu$dl90, probs = c(0, 0.01,0.05, 0.1, 0.25, 0.5), na.rm=TRUE)
ql30_haswd <- quantile(wshed_wu$dl30, probs = c(0, 0.01,0.05, 0.1, 0.25, 0.5), na.rm=TRUE)
ql_table <- as.data.frame(rbind(ql90_all, ql30_all, ql90_haswd, ql30_haswd))
names(ql_table) <- c(
)
table_tex <- kable(sum_tbl,align = "l", booktabs = T,format = "latex",longtable =T,
caption = "Unmet Demand Summaries, 2020 veruss 2040 demands.",
label = "Unmet Demand Summary") %>%
kable_styling(latex_options = "striped") %>%
column_spec(2, width = "12em")
table_tex <- gsub(pattern = "{table}[t]",
repl = "{table}[H]",
x = table_tex, fixed = T )
table_tex %>%
cat(., file = paste0(export_path,"\\unmet_summary_tbl.tex"),sep="")
|
df6561ac59719679b51f49364f2dfe2c6803fa17
|
eee92b835939722ba33fe8244d3953cd7625f40d
|
/api_server.R
|
2835a8f4f3caf5db74be397c50ec98dfe842c452
|
[] |
no_license
|
padamu1/R_api_server
|
77695001044fd55d3e7413a22e3d758681aead1a
|
4571c813aa8831cd1843440ad4af7e8d0eba02f4
|
refs/heads/master
| 2022-12-02T08:42:10.359212
| 2020-08-19T11:04:47
| 2020-08-19T11:04:47
| 288,114,059
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 61
|
r
|
api_server.R
|
library(plumber)
r <- plumb("./api_test.R")
r$run(port=8000)
|
3ac040b92f06a3aa26144edb8d535b4b41bd2d7f
|
8c6dcd32f211a7b4dc11fa416460a150ee1be985
|
/tibbles.R
|
11669a167af784e86f0c930393a035e4129ecea3
|
[] |
no_license
|
CSU-con-gen-bioinformatics-2020/unix-play-with-genomic-data-RGCheek
|
39f565f54c2e8d64146f4b84681cc7813fef5811
|
448917e4a461464a593243f96c54b5625bc531be
|
refs/heads/master
| 2020-12-22T17:29:55.324340
| 2020-01-31T04:00:02
| 2020-01-31T04:00:02
| 236,875,105
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 613
|
r
|
tibbles.R
|
##Playing with tibbles
#30 Jan 2020
library(tidyverse)
#Make sure to be int he directory setwd("C:/Users/Rebecca/Dropbox/Course Work/Spring 2020/ConGen/unix-play-with-genomic-data-RGCheek")
# comment tells it to ignore the comments from sam tools, which all starts with @
sammy <- read_tsv("sam/DPCh_plate1_A05_S5.sam", comment="@", col_names = FALSE) %>%
view(sammy) #always pipe into the view to make sure the data makes sense
print(sammy)
#So pretty!
#sam files don't have collumn names, so tibble just assigned some.
#ask if there are any issues with reading in the data.
problems(sammy)
|
5758d3dfc4e140de413b9604e2efbac6c940827c
|
420a484effad4d24619e2981c034186a3fcb9811
|
/man/gse4335pheno.Rd
|
d7cc6d99327cef01ad099fd3237e2e4566683340
|
[] |
no_license
|
cran/survJamda.data
|
224f9f0f177879188caee3ecb9da94a6767fb8ec
|
1bc198461f0eddd69cfd67c4c759615f7eec73c5
|
refs/heads/master
| 2021-01-01T05:42:03.129385
| 2015-05-01T00:00:00
| 2015-05-01T00:00:00
| 17,725,745
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,713
|
rd
|
gse4335pheno.Rd
|
\name{gse4335pheno}
\alias{gse4335pheno}
\docType{data}
\title{
Phenotype data of GSE4335.
}
\description{
Data such as patients' information, tumor characteristics, samples information related to GSE4335.
}
\usage{data(gse4335pheno)}
\format{
A data frame with 115 observations on the following 17 variables.
\describe{
\item{\code{Array_ID}}{a factor with levels \code{shac091} \code{shac092} \code{shac093} \code{shac100} \code{shac107} \code{shac110} \code{shac112} \code{shac113} \code{shaz104} \code{shaz110} \code{shaz112} \code{shaz114} \code{shaz116} \code{shaz121} \code{shaz124} \code{shaz125} \code{shaz126} \code{shaz129} \code{shaz131} \code{shaz132} \code{shaz133} \code{shaz134} \code{shaz135} \code{shaz140} \code{shbg109} \code{shbg110} \code{shbg128} \code{shby020} \code{shby021} \code{shby022} \code{shby028} \code{shby033} \code{shby035} \code{shby037} \code{shby038} \code{shby039} \code{shby040} \code{shby041} \code{shby042} \code{shby043} \code{shby046} \code{shby049} \code{shby050} \code{shby051} \code{shby236} \code{shby245} \code{shby249} \code{svcc100} \code{svcc101} \code{svcc104} \code{svcc105} \code{svcc106} \code{svcc107} \code{svcc1077} \code{svcc108} \code{svcc111} \code{svcc1114} \code{svcc115} \code{svcc118} \code{svcc119} \code{svcc120} \code{svcc122} \code{svcc124} \code{svcc130} \code{svcc131} \code{svcc132} \code{svcc134} \code{svcc137} \code{svcc51} \code{svcc53} \code{svcc55} \code{svcc61} \code{svcc68} \code{svcc76} \code{svcc78} \code{svcc81} \code{svcc83} \code{svcc84} \code{svcc87} \code{svcc88} \code{svcc89} \code{svcc92} \code{svcc93} \code{svcc96} \code{svcc98} \code{svcc99} \code{svj104} \code{svl002} \code{svl003} \code{svl006} \code{svl007} \code{svl012} \code{svl015} \code{svl016} \code{svl018} \code{svl020} \code{svl022} \code{svl026} \code{svl027} \code{svl029} \code{svl031} \code{svl033} \code{svl034} \code{svl035} \code{svl036} \code{svl037} \code{svl039} \code{svl041} \code{svl103} \code{svl106} \code{svl108} \code{svl109} \code{svl110} \code{svn007} \code{svn015}}
\item{\code{Patient_ID}}{a factor with levels \code{new_york_1} \code{New_York_2} \code{New_York_3} \code{Norway_06FU} \code{Norway_10} \code{Norway_100} \code{Norway_101} \code{Norway_102} \code{Norway_104} \code{Norway_109} \code{Norway_11} \code{Norway_111} \code{Norway_112} \code{Norway_12} \code{Norway_14} \code{Norway_15} \code{Norway_16} \code{Norway_17} \code{Norway_18} \code{Norway_19} \code{Norway_2} \code{Norway_21} \code{Norway_22} \code{Norway_24} \code{Norway_26} \code{Norway_27} \code{Norway_29} \code{Norway_32} \code{Norway_37} \code{Norway_39} \code{Norway_4} \code{Norway_41} \code{Norway_43} \code{Norway_47} \code{Norway_48} \code{Norway_5} \code{Norway_51} \code{Norway_53} \code{Norway_55} \code{Norway_56} \code{Norway_57} \code{Norway_6} \code{Norway_61} \code{Norway_63} \code{Norway_64} \code{Norway_65} \code{Norway_7} \code{Norway_74} \code{Norway_75} \code{Norway_8} \code{Norway_80} \code{Norway_81} \code{Norway_83} \code{Norway_85} \code{Norway_90} \code{Norway_92} \code{Norway_95} \code{Norway_96} \code{Norway_98} \code{Norway_FU01} \code{Norway_FU02} \code{Norway_FU04} \code{Norway_FU05} \code{Norway_FU07} \code{Norway_FU08} \code{Norway_FU09} \code{Norway_FU10} \code{Norway_FU11} \code{Norway_FU12} \code{Norway_FU14} \code{Norway_FU15} \code{Norway_FU16} \code{Norway_FU17} \code{Norway_FU18} \code{Norway_FU19} \code{Norway_FU20} \code{Norway_FU22} \code{Norway_FU23} \code{Norway_FU24} \code{Norway_FU25} \code{Norway_FU26} \code{Norway_FU27} \code{Norway_FU29} \code{Norway_FU30} \code{Norway_FU35} \code{Norway_FU37} \code{Norway_FU39} \code{Norway_FU40} \code{Norway_FU41} \code{Norway_FU43} \code{Norway_FU44} \code{Norway_FU45} \code{Norway_H2} \code{Norway_H3} \code{Norway_H4} \code{Norway_H5} \code{Norway_H6} \code{Stanford_14} \code{Stanford_16} \code{Stanford_17} \code{Stanford_18} \code{Stanford_2} \code{Stanford_23} \code{Stanford_24} \code{Stanford_31} \code{Stanford_35} \code{Stanford_38} \code{Stanford_4} \code{Stanford_40} \code{Stanford_44} \code{Stanford_45} \code{Stanford_46} \code{Stanford_48} \code{Stanford_6} \code{Stanford_A}}
\item{\code{Sample_ID}}{a factor with levels \code{BC102B-BE} \code{BC104A-BE} \code{BC105A-BE} \code{BC106B-BE} \code{BC107B-BE} \code{BC108A-BE} \code{BC110B-BE} \code{BC111A-BE} \code{BC111B-BE} \code{BC112B-BE} \code{BC114A-BE} \code{BC115B-BE} \code{BC116A-BE} \code{BC117A-BE} \code{BC118B-BE} \code{BC119A-BE} \code{BC120A-BE} \code{BC121B-BE} \code{BC123B-BE} \code{BC124A-BE} \code{BC1257} \code{BC125A-BE} \code{BC1369} \code{BC14} \code{BC16} \code{BC17} \code{BC18} \code{BC2} \code{BC201B-BE} \code{BC205A-BE} \code{BC206A-BE} \code{BC208A-BE} \code{BC210B-AF} \code{BC213B-BE} \code{BC214B-BE} \code{BC23} \code{BC24} \code{BC303B-BE} \code{BC305A-BE} \code{BC307B-BE} \code{BC308B-BE} \code{BC309A-BE} \code{BC31-0} \code{BC35-0} \code{BC38} \code{BC40} \code{BC402B-BE} \code{BC404B-BE} \code{BC405A-BE} \code{BC406A-2ndTUMOR} \code{BC44} \code{BC45} \code{BC46-LN46} \code{BC48-0} \code{BC4-LN4} \code{BC503B-BE} \code{BC6} \code{BC601A-BE} \code{BC605B-BE} \code{BC606B-AF} \code{BC608B-BE} \code{BC610A-BE} \code{BC702B-BE} \code{BC703B-BE} \code{BC704B-AF} \code{BC706A-BE} \code{BC708B-BE} \code{BC709B-BE} \code{BC710A-BE} \code{BC711B-BE} \code{BC713A-BE} \code{BC790} \code{BC805A-BE} \code{BC807A-BE} \code{BC808A-BE} \code{BC-A} \code{BC-HBC2} \code{BC-HBC3} \code{BC-HBC4-T1} \code{BC-HBC5} \code{BC-HBC6} \code{FU_01-BE} \code{FU_02-BE} \code{FU_04-BE} \code{FU_05-BE} \code{FU_06-BE} \code{FU_07-BE} \code{FU_08-BE} \code{FU_09-BE} \code{FU_10-BE} \code{FU_11-BE} \code{FU_12-BE} \code{FU_14-BE} \code{FU_15-BE} \code{FU_16-BE} \code{FU_17-AF} \code{FU_18-BE} \code{FU_19-BE} \code{FU_20-BE} \code{FU_22-BE} \code{FU_23-BE} \code{FU_24-BE} \code{FU_25-BE} \code{FU_26-BE} \code{FU_27-BE} \code{FU_29-BE} \code{FU_30-AF} \code{FU_35-BE} \code{FU_37-BE} \code{FU_39-BE} \code{FU_40-BE} \code{FU_41-BE} \code{FU_43-BE} \code{FU_44-BE} \code{FU_45-BE}}
\item{\code{Age_at_diagnosis}}{a numeric vector}
\item{\code{X.Status_0.A._1.AWD._2.DOD._3.DOC.}}{a numeric vector}
\item{\code{Overall_survival_.months.}}{a numeric vector}
\item{\code{Relapse.free_survival_.months.}}{a numeric vector}
\item{\code{X._ER_status_.0.neg._1.pos..}}{a factor with levels \code{0} \code{1} \code{na}}
\item{\code{T_.tumor_size.}}{a factor with levels \code{1} \code{2} \code{3} \code{4} \code{na}}
\item{\code{N_.node_status.}}{a factor with levels \code{0} \code{1} \code{2} \code{na} \code{x}}
\item{\code{M_.metastasis.}}{a numeric vector}
\item{\code{Grade}}{a factor with levels \code{1} \code{2} \code{3} \code{na}}
\item{\code{Histology}}{a factor with levels \code{DCIS} \code{Ductal} \code{Lobular} \code{Mucinous} \code{Papillary} \code{Pleomorph} \code{Undifferentiated}}
\item{\code{reference_sample_batch_ID_}}{a factor with levels \code{CRA} \code{CRB} \code{CRD} \code{CRF} \code{CRG}}
\item{\code{Microarray_batch..genes}}{a factor with levels \code{shac-23k} \code{shaz-49k} \code{shbg-49k} \code{shby-43k} \code{svcc-8k} \code{svj-8k} \code{svl-8k} \code{svn-8k}}
\item{\code{Comments}}{a factor}
\item{\code{Previously_published}}{a factor with levels \code{no} \code{yes}}
}
}
\source{
\url{http://genome-www.stanford.edu/breast_cancer/robustness/data/SupplTable2.xls}
}
\references{
Yasrebi H, Sperisen P, Praz V, Bucher P, 2009 Can Survival Prediction Be Imp
roved By Merging Gene Expression Data Sets?. PLoS ONE 4(10): e7431. doi:10.1371/journal.pone.0007431
}
\seealso{
\code{\link{gse4335}}
}
\examples{
data(gse4335pheno)
}
\keyword{datasets}
|
3dd278f3086fbc041d332d409a3dae74af179a2c
|
b763aa436f5b1e4038ccdc035d7c1db2e8dc4121
|
/FigS1_Grb2/2022_06_21_Grb2.R
|
3e00551d6e080703e152b7f2779f33bc72d9bf2c
|
[] |
no_license
|
toettchlab/Farahani2022
|
a1194f652a40f275502074972a8efd0ded8fbceb
|
4f33d26dabef0e394a1fa7cf8297f8dbf9808ad2
|
refs/heads/main
| 2023-05-31T22:18:19.536421
| 2022-07-25T17:54:45
| 2022-07-25T17:54:45
| 517,766,459
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,404
|
r
|
2022_06_21_Grb2.R
|
library(tidyverse)
library(grid)
library(gridExtra)
library(reshape2)
library(xlsx)
rm(list = ls())
##########################################################################
# enter parameters
frame_pre <- 10; # final frame before ligand addition
time_ligand <- 9; # ligand added after this time point
##########################################################################
# gather files
setwd("/Users/payamfarahani/Documents/Python_R/RTK BIOSENSORS/Grb2/2022_06_21_Grb2")
ZtSH2_names <- list.files(pattern="*ZtSH2.csv");
ZtSH2_list <- lapply(ZtSH2_names, read.csv);
ZtSH2_names <- t(str_replace(ZtSH2_names,".csv",""));
Grb2_names <- list.files(pattern="*Grb2.csv");
Grb2_list <- lapply(Grb2_names, read.csv);
Grb2_names <- t(str_replace(Grb2_names,".csv",""));
# create lists for dataframes
list_raw <- list();
list_abs_decrease <- list();
list_rate_of_change <- list();
list_half_life <- list();
list <- list();
##########################################################################
# data analysis
for (i in seq_along(ZtSH2_names)) {
# raw values
matrix_ZtSH2 <- ZtSH2_list[[i]];
colnames(matrix_ZtSH2) <- str_replace_all(colnames(matrix_ZtSH2), "cell.", "");
matrix_Grb2 <- Grb2_list[[i]];
colnames(matrix_Grb2) <- str_replace_all(colnames(matrix_Grb2), "cell.", "");
# melt raw reporter values matrix and classify observations by condition & date
matrix_ZtSH2_melt <- melt(matrix_ZtSH2,id = c("time"));
colnames(matrix_ZtSH2_melt) <- c("time", "cell", "reporter_raw");
matrix_ZtSH2_melt <- mutate(matrix_ZtSH2_melt,
condition = str_replace(str_extract(ZtSH2_names[[i]],"[:alnum:]*_"),"_",""),
date = str_extract(ZtSH2_names[[i]],"[:digit:]{8}"),
reporter = str_replace_all(str_extract(ZtSH2_names[[i]],"_[:alnum:]{4}2"),"_",""));
matrix_Grb2_melt <- melt(matrix_Grb2,id = c("time"));
colnames(matrix_Grb2_melt) <- c("time", "cell", "reporter_raw");
matrix_Grb2_melt <- mutate(matrix_Grb2_melt,
condition = str_replace(str_extract(Grb2_names[[i]],"[:alnum:]*_"),"_",""),
date = str_extract(Grb2_names[[i]],"[:digit:]{8}"),
reporter = str_replace_all(str_extract(Grb2_names[[i]],"_[:alnum:]{3}2"),"_",""));
# combine matrices for ZtSH2 and Grb2
matrix <- bind_rows(matrix_ZtSH2_melt,matrix_Grb2_melt, .id = NULL)
# normalize time to point of ligand addition
matrix["time"] <- matrix["time"] - time_ligand;
# calculate ZtSH2 response
matrix <- matrix %>%
group_by(cell,reporter) %>%
mutate(reporter_norm_max = reporter_raw / mean(head(reporter_raw,frame_pre)),
reporter_abs_decrease = (reporter_norm_max - 1) * -100);
list[[i]] <- matrix;
print(str_c(ZtSH2_names[[i]]," done"))
}
output <- rbind_list(list);
##########################################################################
# plot themes
theme_black <- theme_classic() +
theme(legend.position="right",
legend.background = element_rect(fill = 'black', colour = 'black'),
legend.text = element_text(colour="white"),
plot.background = element_rect(fill = 'black', colour = 'black'),
panel.background = element_rect(fill = 'black', colour = 'black'),
axis.text = element_text(colour="white"),
axis.title = element_text(colour="white"),
axis.line = element_line(colour="white"),
text = element_text(family = "Arial")) +
theme(aspect.ratio=1)
theme_white <- theme_classic() +
theme(legend.position="right",
legend.background = element_rect(fill = 'white', colour = 'white'),
legend.text = element_text(colour="black"),
plot.background = element_rect(fill = 'white', colour = 'white'),
panel.background = element_rect(fill = 'white', colour = 'white'),
axis.text = element_text(colour="black"),
axis.title = element_text(colour="black"),
axis.line = element_line(colour="black"),
text = element_text(family = "Arial")) +
theme(aspect.ratio=1)
##########################################################################
# activity vs. time (cd3e)
dat <- filter(output, condition == "cd3e" & time <=15) %>%
group_by(time,condition,reporter,date) %>%
summarise(mean = mean(reporter_abs_decrease))
dat <- dat %>%
group_by(time,condition,reporter) %>%
summarise(mean_rep = mean(mean),
sd = sd(mean))
p1 <- ggplot() +
geom_ribbon(data=dat, aes(x = time,
ymin = mean_rep - sd,
ymax = mean_rep + sd,
group = reporter,
fill = reporter),
alpha=0.2) +
geom_line(data=dat, aes(x = time, y = mean_rep,group = reporter, color=reporter)) +
scale_x_continuous(expand = c(0, 0), limits = c(-2, 15)) +
scale_y_continuous(expand = c(0, 0), limits = c(-10, 80)) +
labs(x = "time (min)", y = "RTK activity")
p1 + theme_white
##########################################################################
# activity vs. time (wt)
dat <- filter(output, condition == "wt" & time <=15) %>%
group_by(time,condition,reporter,date) %>%
summarise(mean = mean(reporter_abs_decrease))
dat <- dat %>%
group_by(time,condition,reporter) %>%
summarise(mean_rep = mean(mean),
sd = sd(mean))
p1 <- ggplot() +
geom_ribbon(data=dat, aes(x = time,
ymin = mean_rep - sd,
ymax = mean_rep + sd,
group = reporter,
fill = reporter),
alpha=0.2) +
geom_line(data=dat, aes(x = time, y = mean_rep,group = reporter, color=reporter)) +
scale_x_continuous(expand = c(0, 0), limits = c(-2, 15)) +
scale_y_continuous(expand = c(0, 0), limits = c(-10, 80)) +
labs(x = "time (min)", y = "RTK activity")
p1 + theme_white
##########################################################################
# boxplot
dat <- filter(output,time == 5)
x = c("ZtSH2", "Grb2")
dat$reporter <- factor(dat$reporter,
levels = x)
p1 <- ggplot(dat, aes(x = interaction(condition,reporter), y = reporter_abs_decrease, fill = reporter)) +
geom_boxplot(colour = "black") +
labs(x = "reporter", y = "norm. cyt. intensity")
p1 + theme_white
|
3eb25116720a3a9e10471432ccc7e49352d11562
|
854f48870852808b76f821c4f607437b5a3818b9
|
/uebungserie-10/uebung10-recherche.R
|
21293f4ba256e4725a39842715cc4da89739ebc9
|
[] |
no_license
|
spip333/statistik-uebungen
|
6462f959f69f349bb55187f7ac6219a12d02e0ba
|
51a6965e8963392b7efc866b12b7f2097171d01a
|
refs/heads/master
| 2020-04-10T16:28:08.346739
| 2019-01-22T16:12:46
| 2019-01-22T16:12:46
| 161,146,242
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,295
|
r
|
uebung10-recherche.R
|
###########################
# Recherche Uebung 10
###########################
?lm
head(mtcars)
y <- mtcars$wt
x <- mtcars$hp
plot(x, y)
# model wt as function der hp
wt.hp.model <- lm (wt~hp, data=mtcars)
wt.hp.model$coefficients
plot(wt.hp.model)
coeffs <- coefficients(wt.hp.model)
coeffs
est.wt <- coeffs[1]+ 200 * coeffs[2]
est.wt
max(mtcars$hp)
?mtcars
plot(x, y)
lines(x2, coeffs[1] + x2*coeffs[2])
?plot
# gewicht bei 200hp:
coeffs[1]+ 200 * coeffs[2]
paste("a", "b", "c")
newdata <- data.frame(hp=200)
predict(wt.hp.model,newdata)
hps <- c(110)
newdata <- data.frame(hp=hps)
predict(wt.hp.model,newdata)
head(mtcars)
model$fitted.values
summary(wt.hp.model)$r.squared
# Bestimmtheitsmass
eruption.lm <- lm(eruptions ~ waiting, data=faithful)
summary(eruption.lm)
# Signifikanztest
summary()
## Normalverteilte Residuen
# Betrachten Sie das lineare Modell, welches das Gewicht eines Autos als Funktions
# des Motorenleistung modelliert. Erstellen Sie ein Normal-Wahrscheinlichkeits-Diagramm
# und testen Sie, ob die Residuen normalverteilt sind.
plot(wt.hp.model, which=2)
x <- c(1,2,3,4,5,6,7,8,9,10)
y <- (x * 3) + 2
data <- as.data.frame(x, y)
data$y <- y
data
plot(x2,y)
plot(x2,y, type="line")
mod <- lm(y~x, data=data)
mod
plot(mod, which=3)
|
3501dac0ee1b3c8f11b288528566a5d8d627ace6
|
2e33a31aedf695ee5be6535aaee65ddc121a41f2
|
/plot3.R
|
bd055cb80f949ffafa84a3b02da3afe93e649f96
|
[] |
no_license
|
rickosborne/ExData_Plotting1
|
883cdc36234bca80abed07ae3219c708f30671c7
|
77b299d428be0437f69488deff5b6aec10ab4830
|
refs/heads/master
| 2021-01-17T23:38:26.395575
| 2014-05-11T19:29:38
| 2014-05-11T19:29:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,068
|
r
|
plot3.R
|
#!/opt/local/bin/Rscript
# Original code by Rick Osborne
library(chron)
library(data.table)
source("get_power_data.R")
OUT_FILE <- "plot3.png"
all_data <- fread(DATA_FILE, sep=";", na.strings="?")
relevant <- all_data[Date=="1/2/2007"|Date=="2/2/2007"]
all_data <- NULL
relevant[,DateTime:=chron(Date,Time, format=c(dates="d/m/Y",times="h:m:s"))]
# relevant[,GAP:=as.numeric(Global_active_power)]
relevant[,SM1:=as.numeric(Sub_metering_1)]
relevant[,SM2:=as.numeric(Sub_metering_2)]
relevant[,SM3:=as.numeric(Sub_metering_3)]
png(filename=OUT_FILE, width=480, height=480, units="px")
par(mar=c(2,4,1,2))
with(relevant, plot(DateTime, Sub_metering_1, type="n", xlab="", ylab="Energy sub metering", xaxt="n"))
axis.Date(1, at=relevant$DateTime, format="%a")
with(relevant, lines(DateTime, SM1, type="l", col="black"))
with(relevant, lines(DateTime, SM2, type="l", col="red"))
with(relevant, lines(DateTime, SM3, type="l", col="blue"))
legend("topright", legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"), col=c("black","red","blue"), pch="_")
dev.off()
|
ac7e169220befcfa59c9bc153cac5e6d0458babe
|
d1cb58172c4e0ee75bb05269ed1eea00b27465f6
|
/man/has_email_addresses.Rd
|
2ca195cedbce439e20b68e4901c2ed9d26762e72
|
[] |
no_license
|
cran/detector
|
3a996d7b8f2ed19a9d8228c55c73ffa3c7f5311e
|
596561d22b479e09bc1ddfa01f45f8bacca0e583
|
refs/heads/master
| 2021-01-10T13:17:22.004948
| 2015-08-28T01:13:37
| 2015-08-28T01:13:37
| 48,078,917
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 577
|
rd
|
has_email_addresses.Rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/email_addresses.R
\name{has_email_addresses}
\alias{has_email_addresses}
\title{Test if a character vector has any e-mail addresses.}
\usage{
has_email_addresses(.x)
}
\arguments{
\item{.x}{A character vector.}
}
\value{
A logical value indicating if that string has any e-mail addresses.
}
\description{
Test if a character vector has any e-mail addresses.
}
\examples{
# Examples
has_email_addresses("hello") # FALSE
has_email_addresses("hello@world.edu") # TRUE
}
|
71c9c66d494ed9a7568a060ffb3ffb8a08955acd
|
804019e5175b5637b7cd16cd8ff5914671a8fe4e
|
/gmse_06beta_landplacement_test.R
|
621b039f9758034ff4cbd2c2e98cfd1f92e19547
|
[] |
no_license
|
jejoenje/gmse_vary
|
dde8573adeff9d9c6736a7a834a0a8942c5fcc5d
|
057a36896b3b77a0a3e56d1436abe7a45e38cc2d
|
refs/heads/master
| 2022-11-28T11:31:54.294640
| 2020-08-03T10:12:42
| 2020-08-03T10:12:42
| 197,375,284
| 0
| 0
| null | 2019-12-10T21:08:45
| 2019-07-17T11:21:27
|
HTML
|
UTF-8
|
R
| false
| false
| 1,972
|
r
|
gmse_06beta_landplacement_test.R
|
library(GMSE)
### All of these only run with 100x100 default landscape dimensions
### 4 stakeholders, no public land:
test1 = gmse(time_max = 2, land_ownership = T, stakeholders = 8, plotting = FALSE, ownership_var = 0.9)
image(test1$land[[1]][,,3])
table(test1$land[[1]][,,3])/sum(table(test1$land[[1]][,,3]))
ov = seq(0,0.95,0.05)
out8 = as.data.frame(NULL)
stakeholders = 8
for(i in 1:length(ov)) {
test = gmse(time_max = 2, land_ownership = T, stakeholders = stakeholders, plotting = FALSE, ownership_var = ov[i])
out8 = rbind(out8, c(min(table(test$land[[1]][,,3])),max(table(test$land[[1]][,,3])),sum(table(test$land[[1]][,,3]))))
}
names(out8) = c("lmin","lmax","ltotal")
out16 = as.data.frame(NULL)
stakeholders = 16
for(i in 1:length(ov)) {
test = gmse(time_max = 2, land_ownership = T, stakeholders = stakeholders, plotting = FALSE, ownership_var = ov[i])
out16 = rbind(out16, c(min(table(test$land[[1]][,,3])),max(table(test$land[[1]][,,3])),sum(table(test$land[[1]][,,3]))))
}
names(out16) = c("lmin","lmax","ltotal")
out32 = as.data.frame(NULL)
stakeholders = 32
for(i in 1:length(ov)) {
test = gmse(time_max = 2, land_ownership = T, stakeholders = stakeholders, plotting = FALSE, ownership_var = ov[i])
out32 = rbind(out32, c(min(table(test$land[[1]][,,3])),max(table(test$land[[1]][,,3])),sum(table(test$land[[1]][,,3]))))
}
names(out32) = c("lmin","lmax","ltotal")
out8$rratio = (out8$lmax-out8$lmin)/out8$ltotal
out16$rratio = (out16$lmax-out16$lmin)/out16$ltotal
out32$rratio = (out32$lmax-out32$lmin)/out32$ltotal
par(mfrow=c(1,1))
yl = max(c(out8$rratio, out16$rratio, out32$rratio))
plot(out8$rratio, type = "l", ylim = c(0,yl))
lines(out16$rratio, col = "red")
lines(out32$rratio, col = "red")
### Tests with public land:
test2 = gmse(time_max = 2, land_ownership = T, stakeholders = 4, plotting = FALSE, public_land = 0.2)
image(test2$land[[1]][,,3])
table(test2$land[[1]][,,3])/sum(table(test2$land[[1]][,,3]))
|
b60b69cc6ef3759cdf840542849e5a95d331a267
|
4af8e8b359627ba501c5f19733c00f0aaf14671b
|
/R/item_upload_create.R
|
fb07a2a9fc47eba07e5434dc3573996eef0e58e0
|
[
"CC0-1.0",
"LicenseRef-scancode-public-domain-disclaimer",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
arturochian/sbtools
|
11ca06d6e5307879360d51d252ccee00f9c29d9b
|
b1dc9a96cb1c48c310a4427be6502dac26b005c3
|
refs/heads/master
| 2021-01-18T04:20:41.308832
| 2015-01-16T17:29:32
| 2015-01-16T17:29:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 740
|
r
|
item_upload_create.R
|
#'
#'@title Upload file(s) and create a new item
#'
#'@param parent_id SB id of parent item for the newly created item
#'@param files A string vector of paths to files to be uploaded
#'@param session Session object from authenticate_sb
#'
#'
#'@import httr
#'
#'@export
item_upload_create = function(parent_id, files, session){
body = list()
for(i in 1:length(files)){
if(!file.exists(files[i])){
stop('This file does not exist or cannot be accessed: ', files[i])
}
body[[paste0('file', i)]] = upload_file(files[i])
}
names(body) = rep('file', length(body))
url = paste0(pkg.env$url_upload_create, parent_id)
r = POST(url, body=body, accept_json(), handle=session, query=list(title='title'))
return(content(r)$id)
}
|
f2a6ff15b9550dcb74dae255e4d266d878000441
|
af57194cdf1b15cae89516f7f067f45afda81c96
|
/R/sample.zi.r
|
5513a545fe4badfc30a938f9bf5a9094b67d91ef
|
[] |
no_license
|
cran/iZID
|
1e635ce3945a4532376d1af77a2dd93a596154a5
|
9cfc679b0a69542a0b592006c55f90160f19751e
|
refs/heads/master
| 2020-12-22T00:38:30.712072
| 2019-11-06T11:40:02
| 2019-11-06T11:40:02
| 236,616,660
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,800
|
r
|
sample.zi.r
|
#'Generate random deviates for zero-inflated models
#' @export
#'
#' @rdname sample.h
sample.zi<-function(N,phi,distri='poisson',lambda=NA,r=NA,p=NA,alpha1=NA,alpha2=NA,n=NA) {
##check the validity of some inputs
distri=tolower(distri)[1] ##so that all letters are in lower case. only use the first value if user inputs a vector but not a scalar.
N=ceiling(N)[1]
phi=phi[1]
if(N<=0)
stop('The sample size N is too small.')
if((phi>=1)|(phi<=0))
stop('phi is not between 0 and 1 (not including 0 and 1).')
if(!(distri%in%c('poisson','nb','bb','bnb')))
stop('please input a distribution name among poisson,nb,bb,bnb.')
if(distri=="poisson"){
lambda=lambda[1]
if(lambda<=0)
stop('lambda is less or equal to 0.')
}
if(distri=="nb"){
r=ceiling(r[1])
p=p[1]
if(r<=0)
stop('r is too small.')
if((p<=0)|(p>=1))
stop('p is not between 0 and 1 (not including 0 and 1).')
}
if((distri=="bb")|(distri=="bnb")){
alpha1=alpha1[1]
alpha2=alpha2[1]
if(alpha1<=0)
stop('alpha1 is less or equal to 0.')
if(alpha2<=0)
stop('alpha2 is less or equal to 0.')
}
if(distri=="bb"){
n=ceiling(n[1])
if(n<=0)
stop('n is too small.')
}
if(distri=="bnb"){
r=ceiling(r[1])
if(r<=0)
stop('r is too small.')
}
ans=stats::rbinom(N,size=1,prob=phi) ##simulate Z_1,Z_2,...,Z_N from Bernoulli(phi).
m=sum(ans==0) ##if Z_i=1, let X_i=0, if Z_i==0, generate new samples from original distribution.
temp1=NULL
if(m>0){
temp1=switch(distri,nb=stats::rnbinom(m,size=r,prob=p),bb=extraDistr::rbbinom(m,n,alpha1,alpha2),bnb=extraDistr::rbnbinom(m,r,alpha1,alpha2),stats::rpois(m,lambda))
}
temp2=ans
temp2[ans==1]=0
temp2[ans==0]=temp1
return(temp2)
}
|
4e6435bdd1211628522b46c0e6d27467f16d745b
|
6141757f22f5f1bd84f699cd805d070e369c050a
|
/man/plot_all.Rd
|
9838251a10fdd87893bb3d06f244ee077ca732d4
|
[] |
no_license
|
cran/densitr
|
78d57d4db0e9139d676ba879a77b2a473bfb9079
|
bc87841123b340ffe1fbcc9662a7d5e5189174d7
|
refs/heads/master
| 2022-04-30T18:16:38.598254
| 2022-03-22T08:00:09
| 2022-03-22T08:00:09
| 253,826,317
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 613
|
rd
|
plot_all.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plots.R
\name{plot_all}
\alias{plot_all}
\title{Plot a list of dp objects, one by one}
\usage{
plot_all(dp.list)
}
\arguments{
\item{dp.list}{A list of dp objects, see \code{dpload}}
}
\description{
Plot a list of dp objects, one by one. Press any key to move to
the next dp object. Returns nothing.
}
\examples{
## load several dp files
dp.list <- dpload(dp.directory = system.file("extdata", package = "densitr"))
## trim the measurements
\donttest{
if(interactive()){
plot_all(dp.list)
}}
}
\seealso{
dptrim, dptrim_s, dptriml_s,
}
|
1ded28d466c5ec677e2dd883a2b740f65dc12675
|
d87ab91651efa9c1b2e49c14948beeb74f364678
|
/R/R code/hw3.3.R
|
5de4d97a04e23bdd6c2f3ee3aed85ed8b166ab60
|
[] |
no_license
|
tonycao/CodeSnippets
|
758fb96addc6965661329cef219d17347d16457b
|
0e82b1496947c62b5ea8ef7ca654f1ce27323608
|
refs/heads/master
| 2020-12-24T15:50:13.397710
| 2016-03-17T16:17:26
| 2016-03-17T16:17:26
| 3,567,949
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 221
|
r
|
hw3.3.R
|
#hw3.3
flag = 0
x0 = 0
esp = .00000001
while(flag == 0) {
x = x0 - (3*exp(x0) - 4*cos(x0))/(3*exp(x0) + 4*sin(x0))
tolerance = abs(x-x0)
if (tolerance <= esp) {
flag = 1
}
else {
x0 = x
}
}
print(x)
|
54d4244c646b13316f0d8f47b2d903a0bc864616
|
04d77d4648dd89dfd85adf2215061feaef9de996
|
/man/Summary-BASiCS_Chain-method.Rd
|
4e322a6e6471867486f0359eef1228cc62218565
|
[] |
no_license
|
kevinrue/BASiCS
|
af6df97a84688bfd38a56e096462e8eabf386f3d
|
1f5286d0a2108ebe6a415f00a2b2bcba68aa2d5f
|
refs/heads/master
| 2021-01-09T06:01:37.076340
| 2017-02-09T13:27:34
| 2017-02-09T13:27:34
| 80,932,934
| 0
| 0
| null | 2017-02-04T16:45:00
| 2017-02-04T16:45:00
| null |
UTF-8
|
R
| false
| true
| 1,086
|
rd
|
Summary-BASiCS_Chain-method.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Methods.R
\docType{methods}
\name{Summary}
\alias{Summary}
\alias{Summary,BASiCS_Chain-method}
\title{'Summary' method for BASiCS_Chain objects}
\usage{
\S4method{Summary}{BASiCS_Chain}(x, prob = 0.95)
}
\arguments{
\item{x}{A \code{BASiCS_Chain} object.}
\item{prob}{\code{prob} argument for \code{\link[coda]{HPDinterval}} function.}
}
\value{
An object of class \code{\link[BASiCS]{BASiCS_Summary-class}}.
}
\description{
For each of the BASiCS parameters (see Vallejos et al 2015),
\code{Summary} returns the corresponding postior medians and limits of the high posterior
density interval with probabilty equal to \code{prob}.
}
\examples{
Data = makeExampleBASiCS_Data()
MCMC_Output <- BASiCS_MCMC(Data, N = 50, Thin = 2, Burn = 2)
MCMC_Summary <- Summary(MCMC_Output)
# See documentation of function BASiCS_MCMC
}
\author{
Catalina A. Vallejos \email{catalina.vallejos@mrc-bsu.cam.ac.uk}
}
\references{
Vallejos, Marioni and Richardson (2015). Bayesian Analysis of Single-Cell Sequencing data.
}
|
3b7c18fad290bda2e0b7dfa6e1cbbc6b30e8715e
|
9d6675d5dcc05874e0bd1d0496908bfbb3165502
|
/plot4.R
|
c01fe3a648488653787d83a335d9192873ab2bd5
|
[] |
no_license
|
PiyushDeshmukh/C4_project2_EDA
|
0d6cb93f2785355244c3fc1e7bd33ff1766c58fd
|
098acce45f602dd5b65df83092ed98fce19cb2a1
|
refs/heads/master
| 2021-01-11T15:18:45.240236
| 2017-01-29T03:41:01
| 2017-01-29T03:41:01
| 80,326,821
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 812
|
r
|
plot4.R
|
library(ggplot2)
nei_file_name = "FNEI_data/summarySCC_PM25.rds"
scc_file_name = "FNEI_data/summarySCC_PM25.rds"
NEI <- readRDS(nei_file_name)
SCC <- readRDS(scc_file_name)
combustion <- grepl("comb", SCC$SCC.Level.One, ignore.case=TRUE)
coal <- grepl("coal", SCC$SCC.Level.Four, ignore.case=TRUE)
coal_combustion <- (combustion & coal)
combustion_SCC <- SCC[coal_combustion,]$SCC
combustion_NEI <- NEI[NEI$SCC %in% combustion_SCC,]
png('plot4.png', width = 480, height = 480)
plt <- ggplot(combustion_NEI, aes(factor(year), Emissions/10^5)) +
geom_bar(stat = "identity", fill = "grey", width = 1) +
labs(x = "Year", y = expression("Total PM"[2.5]*" Emission (10^5 tons)")) +
ggtitle(expression("PM"[2.5]*" Coal Combustion Source Emissions Across US from 1999-2008"))
print(plt)
print(plt)
dev.off()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.