content
large_stringlengths 0
6.46M
| path
large_stringlengths 3
331
| license_type
large_stringclasses 2
values | repo_name
large_stringlengths 5
125
| language
large_stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.46M
| extension
large_stringclasses 75
values | text
stringlengths 0
6.46M
|
|---|---|---|---|---|---|---|---|---|---|
library(productivity)
### Name: Shadowp
### Title: Shadow prices used in productivity and profitability
### computations
### Aliases: Shadowp
### Keywords: manip
### ** Examples
## Not run:
##D FISHER <- fisher(data = usagri, id.var = "States", time.var = "Years", x.vars = c(7:10),
##D y.vars = c(4:6), w.vars = c(14:17), p.vars = c(11:13), orientation = "out", shadow = TRUE)
##D Fisher.shadowprices <- Shadowp(FISHER)
##D head(Fisher.shadowprices)
## End(Not run)
|
/data/genthat_extracted_code/productivity/examples/Shadowp.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 486
|
r
|
library(productivity)
### Name: Shadowp
### Title: Shadow prices used in productivity and profitability
### computations
### Aliases: Shadowp
### Keywords: manip
### ** Examples
## Not run:
##D FISHER <- fisher(data = usagri, id.var = "States", time.var = "Years", x.vars = c(7:10),
##D y.vars = c(4:6), w.vars = c(14:17), p.vars = c(11:13), orientation = "out", shadow = TRUE)
##D Fisher.shadowprices <- Shadowp(FISHER)
##D head(Fisher.shadowprices)
## End(Not run)
|
load('dc_terr_elsig_holocene.Rdata')
## re-define period margins
periodsBP <- c(-11000,-9000,-7000,-5000,-3000,-1000)
periodsAD <- periodsBP + 1950
periodsName <- c("11-9","9-7","7-5","5-3","3-1")
periodsBP <- periodsAD - 1950
period_margins <- periodsAD
nper <- length(period_margins)-1
do.load <- FALSE
if (do.load){
## Create data frame for evaluating periods
## period margins (start and end)
dc_per <- data.frame( per_start=period_margins[1:(length(period_margins)-1)], per_end=period_margins[2:length(period_margins)] )
dc_per$iper_start <- rep( NA, nper )
dc_per$iper_end <- rep( NA, nper )
## period margin's corresponding index in full (annual) data frame
for (i in 1:nper){
dc_per$iper_start[i] <- which.min( abs( dc_per$per_start[i] - df_cum_land_uptake$year ) )
dc_per$iper_end[i] <- which.min( abs( dc_per$per_end[i] - df_cum_land_uptake$year ) )
}
## Evaluate cumulative balance change in different periods for each run
for (n in 1:nruns){
numstring <- sprintf( "%05d", n-1)
colstring <- paste( "r", numstring, sep="" )
dc_per[[ colstring ]] <- rep( NA, nper )
for (i in 1:nper){
dc_per[[ colstring ]][i] <- df_cum_land_uptake[[ colstring ]][dc_per$iper_end[i]] - df_cum_land_uptake[[ colstring ]][dc_per$iper_start[i]]
}
}
## add column for period "name"
dc_per$name <- paste( as.character( dc_per$per_start ) , "-", as.character( dc_per$per_end ), sep="" )
## Get mean and SD of cumulative balance changes in each period
dc_per$mean <- apply( dc_per[,5:nruns+4], c(1), FUN=mean )
dc_per$sd <- apply( dc_per[,5:nruns+4], c(1), FUN=sd )
dc_per$median <- apply( dc_per[,5:nruns+4], c(1), FUN=median )
dc_per$q90 <- apply( dc_per[,5:nruns+4], c(1), FUN=function(x) quantile( x, 0.9 ) )
dc_per$q10 <- apply( dc_per[,5:nruns+4], c(1), FUN=function(x) quantile( x, 0.1 ) )
## take only subset of dc_per
dc_sub <- subset( dc_per, select=c( name, per_start, per_end, iper_start, iper_end, mean, sd, median, q90, q10 ))
}
## create data frames to be plotted with hist() and errbar()
dc_elsig_outarr <- data.frame( median=dc_sub$median, mean=dc_sub$mean, q10=dc_sub$q10, q90=dc_sub$q90, sd=dc_sub$sd )
row.names( dc_elsig_outarr ) <- periodsName
## Plot bars for the periods
pdf("/alphadata01/bstocker/holoLU2/cbal_barplot_holocene_holoLU2.pdf", width=8, height=6 )
par( las=1 )
ylim <- c(-60,300)
# rect( mybar1[1,], rep(ylim[1],4), mybar1[4,], rep(ylim[2],4), col=rgb(0,0,0,0.2), border=NA )
mybar1 <- barplot(
t(dc_elsig_outarr$mean),
ylim=ylim,
col=c("green3"),
border=TRUE,
xlab="period [kyr BP]"
)
abline(0,0)
errbar(
mybar1,
t(dc_elsig_outarr$mean),
t(dc_elsig_outarr$mean-dc_elsig_outarr$sd),
t(dc_elsig_outarr$mean+dc_elsig_outarr$sd),
add=TRUE
)
## add grey rectangles
left <- mybar1 - 0.6
right <- mybar1 + 0.6
rect( left[1], ylim[1], right[1], ylim[2], col=rgb(0,0,0,0.1), border=NA )
rect( left[3], ylim[1], right[3], ylim[2], col=rgb(0,0,0,0.1), border=NA )
rect( left[5], ylim[1], right[5], ylim[2], col=rgb(0,0,0,0.1), border=NA )
dev.off()
save( df_land_uptake, dc_elsig_outarr, dc_elsig_outarrerr, dc_per, file="dc_elsig.Rdata" )
|
/eval_cbudget/get_dc_terr_elsig_periods.R
|
no_license
|
stineb/holoLU_lc6k
|
R
| false
| false
| 3,363
|
r
|
load('dc_terr_elsig_holocene.Rdata')
## re-define period margins
periodsBP <- c(-11000,-9000,-7000,-5000,-3000,-1000)
periodsAD <- periodsBP + 1950
periodsName <- c("11-9","9-7","7-5","5-3","3-1")
periodsBP <- periodsAD - 1950
period_margins <- periodsAD
nper <- length(period_margins)-1
do.load <- FALSE
if (do.load){
## Create data frame for evaluating periods
## period margins (start and end)
dc_per <- data.frame( per_start=period_margins[1:(length(period_margins)-1)], per_end=period_margins[2:length(period_margins)] )
dc_per$iper_start <- rep( NA, nper )
dc_per$iper_end <- rep( NA, nper )
## period margin's corresponding index in full (annual) data frame
for (i in 1:nper){
dc_per$iper_start[i] <- which.min( abs( dc_per$per_start[i] - df_cum_land_uptake$year ) )
dc_per$iper_end[i] <- which.min( abs( dc_per$per_end[i] - df_cum_land_uptake$year ) )
}
## Evaluate cumulative balance change in different periods for each run
for (n in 1:nruns){
numstring <- sprintf( "%05d", n-1)
colstring <- paste( "r", numstring, sep="" )
dc_per[[ colstring ]] <- rep( NA, nper )
for (i in 1:nper){
dc_per[[ colstring ]][i] <- df_cum_land_uptake[[ colstring ]][dc_per$iper_end[i]] - df_cum_land_uptake[[ colstring ]][dc_per$iper_start[i]]
}
}
## add column for period "name"
dc_per$name <- paste( as.character( dc_per$per_start ) , "-", as.character( dc_per$per_end ), sep="" )
## Get mean and SD of cumulative balance changes in each period
dc_per$mean <- apply( dc_per[,5:nruns+4], c(1), FUN=mean )
dc_per$sd <- apply( dc_per[,5:nruns+4], c(1), FUN=sd )
dc_per$median <- apply( dc_per[,5:nruns+4], c(1), FUN=median )
dc_per$q90 <- apply( dc_per[,5:nruns+4], c(1), FUN=function(x) quantile( x, 0.9 ) )
dc_per$q10 <- apply( dc_per[,5:nruns+4], c(1), FUN=function(x) quantile( x, 0.1 ) )
## take only subset of dc_per
dc_sub <- subset( dc_per, select=c( name, per_start, per_end, iper_start, iper_end, mean, sd, median, q90, q10 ))
}
## create data frames to be plotted with hist() and errbar()
dc_elsig_outarr <- data.frame( median=dc_sub$median, mean=dc_sub$mean, q10=dc_sub$q10, q90=dc_sub$q90, sd=dc_sub$sd )
row.names( dc_elsig_outarr ) <- periodsName
## Plot bars for the periods
pdf("/alphadata01/bstocker/holoLU2/cbal_barplot_holocene_holoLU2.pdf", width=8, height=6 )
par( las=1 )
ylim <- c(-60,300)
# rect( mybar1[1,], rep(ylim[1],4), mybar1[4,], rep(ylim[2],4), col=rgb(0,0,0,0.2), border=NA )
mybar1 <- barplot(
t(dc_elsig_outarr$mean),
ylim=ylim,
col=c("green3"),
border=TRUE,
xlab="period [kyr BP]"
)
abline(0,0)
errbar(
mybar1,
t(dc_elsig_outarr$mean),
t(dc_elsig_outarr$mean-dc_elsig_outarr$sd),
t(dc_elsig_outarr$mean+dc_elsig_outarr$sd),
add=TRUE
)
## add grey rectangles
left <- mybar1 - 0.6
right <- mybar1 + 0.6
rect( left[1], ylim[1], right[1], ylim[2], col=rgb(0,0,0,0.1), border=NA )
rect( left[3], ylim[1], right[3], ylim[2], col=rgb(0,0,0,0.1), border=NA )
rect( left[5], ylim[1], right[5], ylim[2], col=rgb(0,0,0,0.1), border=NA )
dev.off()
save( df_land_uptake, dc_elsig_outarr, dc_elsig_outarrerr, dc_per, file="dc_elsig.Rdata" )
|
# plots cross section of a raster matrix given two endpoints of a line
xsec <- function (data, x1, y1, x2, y2){
d <- sqrt((x2-x1)^2 + (y2-y1)^2)
print (d)
a <- (y2-y1)/(x2-x1)
print (a)
d.int <- as.integer(d) #iterator
print(d.int)
x <- vector()
y <- vector()
z <- vector()
for (i in 1:d.int){
xn <- i/sqrt(a^2 + 1)
x <- c(x, x1+xn)
yn <- a*(xn)
y <- c(y, y1+yn)
zn <- data[x[i], y[i]]
z <- c(z, zn)
}
z <- c(data[x1,y1], z)
d.int <- c(0, 1:d.int)
print (length(x))
print (length(y))
print (length(z))
print (length(d.int))
par(ask = TRUE)
plot(x,y, type = "l", xlim = c(0,nrow(data)), ylim = c(0,ncol(data))) #need to get scale 1:1
plot (d.int, z, type = "l")
}
# does not work when slope is undefined
|
/xsec.R
|
no_license
|
caluchko/r_topo_profile
|
R
| false
| false
| 797
|
r
|
# plots cross section of a raster matrix given two endpoints of a line
xsec <- function (data, x1, y1, x2, y2){
d <- sqrt((x2-x1)^2 + (y2-y1)^2)
print (d)
a <- (y2-y1)/(x2-x1)
print (a)
d.int <- as.integer(d) #iterator
print(d.int)
x <- vector()
y <- vector()
z <- vector()
for (i in 1:d.int){
xn <- i/sqrt(a^2 + 1)
x <- c(x, x1+xn)
yn <- a*(xn)
y <- c(y, y1+yn)
zn <- data[x[i], y[i]]
z <- c(z, zn)
}
z <- c(data[x1,y1], z)
d.int <- c(0, 1:d.int)
print (length(x))
print (length(y))
print (length(z))
print (length(d.int))
par(ask = TRUE)
plot(x,y, type = "l", xlim = c(0,nrow(data)), ylim = c(0,ncol(data))) #need to get scale 1:1
plot (d.int, z, type = "l")
}
# does not work when slope is undefined
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ActivateAction.R
\name{ActivateAction}
\alias{ActivateAction}
\title{ActivateAction}
\usage{
ActivateAction(id = NULL, target = NULL, startTime = NULL,
result = NULL, participant = NULL, object = NULL, location = NULL,
instrument = NULL, error = NULL, endTime = NULL, agent = NULL,
actionStatus = NULL, url = NULL, sameAs = NULL,
potentialAction = NULL, name = NULL, mainEntityOfPage = NULL,
image = NULL, identifier = NULL, disambiguatingDescription = NULL,
description = NULL, alternateName = NULL, additionalType = NULL)
}
\arguments{
\item{id}{identifier for the object (URI)}
\item{target}{(EntryPoint type.) Indicates a target EntryPoint for an Action.}
\item{startTime}{(DateTime or DateTime type.) The startTime of something. For a reserved event or service (e.g. FoodEstablishmentReservation), the time that it is expected to start. For actions that span a period of time, when the action was performed. e.g. John wrote a book from *January* to December.Note that Event uses startDate/endDate instead of startTime/endTime, even when describing dates with times. This situation may be clarified in future revisions.}
\item{result}{(Thing type.) The result produced in the action. e.g. John wrote *a book*.}
\item{participant}{(Person or Organization type.) Other co-agents that participated in the action indirectly. e.g. John wrote a book with *Steve*.}
\item{object}{(Thing type.) The object upon which the action is carried out, whose state is kept intact or changed. Also known as the semantic roles patient, affected or undergoer (which change their state) or theme (which doesn't). e.g. John read *a book*.}
\item{location}{(Text or PostalAddress or Place or Text or PostalAddress or Place or Text or PostalAddress or Place type.) The location of for example where the event is happening, an organization is located, or where an action takes place.}
\item{instrument}{(Thing type.) The object that helped the agent perform the action. e.g. John wrote a book with *a pen*.}
\item{error}{(Thing type.) For failed actions, more information on the cause of the failure.}
\item{endTime}{(DateTime or DateTime type.) The endTime of something. For a reserved event or service (e.g. FoodEstablishmentReservation), the time that it is expected to end. For actions that span a period of time, when the action was performed. e.g. John wrote a book from January to *December*.Note that Event uses startDate/endDate instead of startTime/endTime, even when describing dates with times. This situation may be clarified in future revisions.}
\item{agent}{(Person or Organization type.) The direct performer or driver of the action (animate or inanimate). e.g. *John* wrote a book.}
\item{actionStatus}{(ActionStatusType type.) Indicates the current disposition of the Action.}
\item{url}{(URL type.) URL of the item.}
\item{sameAs}{(URL type.) URL of a reference Web page that unambiguously indicates the item's identity. E.g. the URL of the item's Wikipedia page, Wikidata entry, or official website.}
\item{potentialAction}{(Action type.) Indicates a potential Action, which describes an idealized action in which this thing would play an 'object' role.}
\item{name}{(Text type.) The name of the item.}
\item{mainEntityOfPage}{(URL or CreativeWork type.) Indicates a page (or other CreativeWork) for which this thing is the main entity being described. See [background notes](/docs/datamodel.html#mainEntityBackground) for details.}
\item{image}{(URL or ImageObject type.) An image of the item. This can be a [[URL]] or a fully described [[ImageObject]].}
\item{identifier}{(URL or Text or PropertyValue type.) The identifier property represents any kind of identifier for any kind of [[Thing]], such as ISBNs, GTIN codes, UUIDs etc. Schema.org provides dedicated properties for representing many of these, either as textual strings or as URL (URI) links. See [background notes](/docs/datamodel.html#identifierBg) for more details.}
\item{disambiguatingDescription}{(Text type.) A sub property of description. A short description of the item used to disambiguate from other, similar items. Information from other properties (in particular, name) may be necessary for the description to be useful for disambiguation.}
\item{description}{(Text type.) A description of the item.}
\item{alternateName}{(Text type.) An alias for the item.}
\item{additionalType}{(URL type.) An additional type for the item, typically used for adding more specific types from external vocabularies in microdata syntax. This is a relationship between something and a class that the thing is in. In RDFa syntax, it is better to use the native RDFa syntax - the 'typeof' attribute - for multiple types. Schema.org tools may have only weaker understanding of extra types, in particular those defined externally.}
}
\value{
a list object corresponding to a schema:ActivateAction
}
\description{
The act of starting or activating a device or application (e.g. starting a timer or turning on a flashlight).
}
|
/man/ActivateAction.Rd
|
no_license
|
cboettig/schemar
|
R
| false
| true
| 5,091
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ActivateAction.R
\name{ActivateAction}
\alias{ActivateAction}
\title{ActivateAction}
\usage{
ActivateAction(id = NULL, target = NULL, startTime = NULL,
result = NULL, participant = NULL, object = NULL, location = NULL,
instrument = NULL, error = NULL, endTime = NULL, agent = NULL,
actionStatus = NULL, url = NULL, sameAs = NULL,
potentialAction = NULL, name = NULL, mainEntityOfPage = NULL,
image = NULL, identifier = NULL, disambiguatingDescription = NULL,
description = NULL, alternateName = NULL, additionalType = NULL)
}
\arguments{
\item{id}{identifier for the object (URI)}
\item{target}{(EntryPoint type.) Indicates a target EntryPoint for an Action.}
\item{startTime}{(DateTime or DateTime type.) The startTime of something. For a reserved event or service (e.g. FoodEstablishmentReservation), the time that it is expected to start. For actions that span a period of time, when the action was performed. e.g. John wrote a book from *January* to December.Note that Event uses startDate/endDate instead of startTime/endTime, even when describing dates with times. This situation may be clarified in future revisions.}
\item{result}{(Thing type.) The result produced in the action. e.g. John wrote *a book*.}
\item{participant}{(Person or Organization type.) Other co-agents that participated in the action indirectly. e.g. John wrote a book with *Steve*.}
\item{object}{(Thing type.) The object upon which the action is carried out, whose state is kept intact or changed. Also known as the semantic roles patient, affected or undergoer (which change their state) or theme (which doesn't). e.g. John read *a book*.}
\item{location}{(Text or PostalAddress or Place or Text or PostalAddress or Place or Text or PostalAddress or Place type.) The location of for example where the event is happening, an organization is located, or where an action takes place.}
\item{instrument}{(Thing type.) The object that helped the agent perform the action. e.g. John wrote a book with *a pen*.}
\item{error}{(Thing type.) For failed actions, more information on the cause of the failure.}
\item{endTime}{(DateTime or DateTime type.) The endTime of something. For a reserved event or service (e.g. FoodEstablishmentReservation), the time that it is expected to end. For actions that span a period of time, when the action was performed. e.g. John wrote a book from January to *December*.Note that Event uses startDate/endDate instead of startTime/endTime, even when describing dates with times. This situation may be clarified in future revisions.}
\item{agent}{(Person or Organization type.) The direct performer or driver of the action (animate or inanimate). e.g. *John* wrote a book.}
\item{actionStatus}{(ActionStatusType type.) Indicates the current disposition of the Action.}
\item{url}{(URL type.) URL of the item.}
\item{sameAs}{(URL type.) URL of a reference Web page that unambiguously indicates the item's identity. E.g. the URL of the item's Wikipedia page, Wikidata entry, or official website.}
\item{potentialAction}{(Action type.) Indicates a potential Action, which describes an idealized action in which this thing would play an 'object' role.}
\item{name}{(Text type.) The name of the item.}
\item{mainEntityOfPage}{(URL or CreativeWork type.) Indicates a page (or other CreativeWork) for which this thing is the main entity being described. See [background notes](/docs/datamodel.html#mainEntityBackground) for details.}
\item{image}{(URL or ImageObject type.) An image of the item. This can be a [[URL]] or a fully described [[ImageObject]].}
\item{identifier}{(URL or Text or PropertyValue type.) The identifier property represents any kind of identifier for any kind of [[Thing]], such as ISBNs, GTIN codes, UUIDs etc. Schema.org provides dedicated properties for representing many of these, either as textual strings or as URL (URI) links. See [background notes](/docs/datamodel.html#identifierBg) for more details.}
\item{disambiguatingDescription}{(Text type.) A sub property of description. A short description of the item used to disambiguate from other, similar items. Information from other properties (in particular, name) may be necessary for the description to be useful for disambiguation.}
\item{description}{(Text type.) A description of the item.}
\item{alternateName}{(Text type.) An alias for the item.}
\item{additionalType}{(URL type.) An additional type for the item, typically used for adding more specific types from external vocabularies in microdata syntax. This is a relationship between something and a class that the thing is in. In RDFa syntax, it is better to use the native RDFa syntax - the 'typeof' attribute - for multiple types. Schema.org tools may have only weaker understanding of extra types, in particular those defined externally.}
}
\value{
a list object corresponding to a schema:ActivateAction
}
\description{
The act of starting or activating a device or application (e.g. starting a timer or turning on a flashlight).
}
|
/Véletlenszám_Generátor_szemléltetés/Egyenletesbol_Exp_gen.R
|
no_license
|
tenkiX/R_peldak
|
R
| false
| false
| 509
|
r
| ||
library(qlcData)
### Name: launch_shiny
### Title: Launch Shiny app
### Aliases: launch_shiny
### ** Examples
## Not run:
##D launch_shiny("tokenize")
## End(Not run)
|
/data/genthat_extracted_code/qlcData/examples/launch_shiny.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 175
|
r
|
library(qlcData)
### Name: launch_shiny
### Title: Launch Shiny app
### Aliases: launch_shiny
### ** Examples
## Not run:
##D launch_shiny("tokenize")
## End(Not run)
|
tabBox(title = "Developer Reference",
width = 20,
side = "left",
tabPanel(title = "Shiny Dashboard",
value = "releaseNotes",
icon = icon("code"),
includeMarkdown("assets/ui/help/shinyDevRef.md")
),
tabPanel(title = "Data Updates",
value = "dataNotes",
icon = icon("server"),
includeMarkdown("assets/ui/help/data.md"))
)
|
/assets/ui/devRef.R
|
no_license
|
SuperJohn/personal-finance-app
|
R
| false
| false
| 450
|
r
|
tabBox(title = "Developer Reference",
width = 20,
side = "left",
tabPanel(title = "Shiny Dashboard",
value = "releaseNotes",
icon = icon("code"),
includeMarkdown("assets/ui/help/shinyDevRef.md")
),
tabPanel(title = "Data Updates",
value = "dataNotes",
icon = icon("server"),
includeMarkdown("assets/ui/help/data.md"))
)
|
library(glmnet)
mydata = read.table("../../../../TrainingSet/FullSet/Lasso/stomach.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mse",alpha=0.55,family="gaussian",standardize=TRUE)
sink('./stomach_061.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
/Model/EN/Lasso/stomach/stomach_061.R
|
no_license
|
esbgkannan/QSMART
|
R
| false
| false
| 346
|
r
|
library(glmnet)
mydata = read.table("../../../../TrainingSet/FullSet/Lasso/stomach.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mse",alpha=0.55,family="gaussian",standardize=TRUE)
sink('./stomach_061.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
testlist <- list(ends = c(-1125300777L, 765849518L, -1760774663L, 791623263L, 1358782356L, -128659642L, -14914341L, 1092032927L, 1837701012L, 1632068659L), pts = c(1758370433L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L), starts = c(16777216L, 0L, 738263040L, 682962941L, 1612840977L, 150997320L, 747898999L, -1195392662L, 2024571419L, 808515032L, 1373469055L, -282236997L, -207881465L, -237801926L, -168118689L, -1090227888L, 235129118L, 949454105L, 1651285440L, -1119277667L, -1328604284L), members = NULL, total_members = 0L)
result <- do.call(IntervalSurgeon:::rcpp_pile,testlist)
str(result)
|
/IntervalSurgeon/inst/testfiles/rcpp_pile/AFL_rcpp_pile/rcpp_pile_valgrind_files/1609874388-test.R
|
no_license
|
akhikolla/updated-only-Issues
|
R
| false
| false
| 729
|
r
|
testlist <- list(ends = c(-1125300777L, 765849518L, -1760774663L, 791623263L, 1358782356L, -128659642L, -14914341L, 1092032927L, 1837701012L, 1632068659L), pts = c(1758370433L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L), starts = c(16777216L, 0L, 738263040L, 682962941L, 1612840977L, 150997320L, 747898999L, -1195392662L, 2024571419L, 808515032L, 1373469055L, -282236997L, -207881465L, -237801926L, -168118689L, -1090227888L, 235129118L, 949454105L, 1651285440L, -1119277667L, -1328604284L), members = NULL, total_members = 0L)
result <- do.call(IntervalSurgeon:::rcpp_pile,testlist)
str(result)
|
#load document
params <- read.csv('./NIPS/docword.nips.txt', header=FALSE, sep=" ", nrows=3)
docwords <- read.csv('./NIPS/docword.nips.txt', header=FALSE, sep=" ", skip=3)
vocab <-read.csv('./NIPS/vocab.nips.txt', header=FALSE, sep=' ')
colnames(docwords) <- c("DocID", "WordID", "Count")
d <-params[1,] #number of documents
w <-params[2,] #number of words in the vocabulary
n <-params[3,] #total number of words
k <-30 #number of clusters
#build my vectors of x's. This should be d vectors of w length
x1 <- rep(0.0, w)
x <- matrix(0.0, 10, 45) #initialize to zeros
three <- docwords$Count[[9]]
x2<- matrix(0.0, 2,3)
x[2][2] <- 10
rowcount<-nrow(docwords)
print(rowcount)
for(i in 1:3) {
row <- docwords[i,]
doc <- row[[1]]
print(paste0("doc ",doc))
word <- row[[2]]
print(paste0("word ",word))
count <- row[[3]]
print(paste0("count ",count))
# do stuff with row
x[doc][word]<-count
}
|
/Part1.R
|
no_license
|
nramaker/AML_HW7
|
R
| false
| false
| 913
|
r
|
#load document
params <- read.csv('./NIPS/docword.nips.txt', header=FALSE, sep=" ", nrows=3)
docwords <- read.csv('./NIPS/docword.nips.txt', header=FALSE, sep=" ", skip=3)
vocab <-read.csv('./NIPS/vocab.nips.txt', header=FALSE, sep=' ')
colnames(docwords) <- c("DocID", "WordID", "Count")
d <-params[1,] #number of documents
w <-params[2,] #number of words in the vocabulary
n <-params[3,] #total number of words
k <-30 #number of clusters
#build my vectors of x's. This should be d vectors of w length
x1 <- rep(0.0, w)
x <- matrix(0.0, 10, 45) #initialize to zeros
three <- docwords$Count[[9]]
x2<- matrix(0.0, 2,3)
x[2][2] <- 10
rowcount<-nrow(docwords)
print(rowcount)
for(i in 1:3) {
row <- docwords[i,]
doc <- row[[1]]
print(paste0("doc ",doc))
word <- row[[2]]
print(paste0("word ",word))
count <- row[[3]]
print(paste0("count ",count))
# do stuff with row
x[doc][word]<-count
}
|
region_spei <- function(spei_files,start_y,end_y,lon_min,lon_max,lat_min,lat_max)
{
# install and load needed packages
list.of.packages <- c("ncdf4","tidyverse","chron","stringr")
new.packages <- list.of.packages[!(list.of.packages %in% installed.packages()[,"Package"])]
if(length(new.packages)) install.packages(new.packages)
require(tidyverse)
require(ncdf4)
require(chron)
require(stringr)
spei_files <- matrix(spei_files) # convert input vector into a matrix
n_spei <- length(spei_files) # number of spei files included by the user
# Main loop - run loop for each spei file separately: ####
for(s in 1:n_spei) {
# name input variables:
nc_name <- spei_files[s,1] # spei file of this loop
file_name <- paste(nc_name, ".nc", sep = "") # create filename
var_name <- "spei" # variable name
spei_nc <- nc_open(file_name) # open the respective netcdf file of this loop, using the ncdf4 package
# get max time of the dataset in months
max_time <- length(ncvar_get(spei_nc,"time"))
# save longitude and latitude information:
lon <- ncvar_get(spei_nc, "lon")
lat <- ncvar_get(spei_nc, "lat", verbose = F)
lonlat <- expand.grid(lon, lat)
spei_array <- ncvar_get(spei_nc, var_name) # get array
fillvalue <- ncatt_get(spei_nc, var_name, "_FillValue") # extract filling value
nc_close(spei_nc) # close the netcdf file
spei_array[spei_array == fillvalue$value] <- NA # change spei values with filling value into NAs
data_start <- 1901 # set start year of the database (current standard).
start_i <- (start_y-data_start)*12+1 # months since the start of the database (1901) in the first month of the start year (i.e. start year inquired by user)
end_i <- (end_y-data_start+1)*12 # months between the start of the database (1901) and the last month of the end year (i.e. end year inquired by user)
if(end_i > max_time){end_i = max_time} # if the end of the required period is not yet in the SPEI netcdf data, take the latest time available
# First sub-loop - run loop for each month of the spei file separately to extract month by month information: ####
for(i in start_i:end_i) {
spei_slice <- spei_array[, ,i] # extract a single 'brick' of the array
spei_vec <- as.vector(spei_slice) # save as an vector
cur_year <- as.integer( start_y-1+ceiling( (i-start_i+1)/12 )) # define current year
cur_month <- as.integer( (i-start_i+1) + ((start_y-cur_year)*12) ) # define current month
d_aux1 <- data.frame(cbind(lonlat, spei_vec)) # save single 'brick' of the array and coordinates together as data frame
names(d_aux1) <- c("lon", "lat", paste(nc_name, as.character(cur_month),as.character(cur_year), sep = "_")) # re-name variables
# combine different speis
if (i==start_i) { # if first year, start new data frame
names(d_aux1) <- c("lon", "lat", nc_name)
d_aux2 <- d_aux1 %>% mutate(year = cur_year, month = cur_month)
} else{ # if year is not the first year, add to existing data frame
names(d_aux1) <- c("lon", "lat", nc_name)
d_aux2 <-bind_rows(d_aux2,(d_aux1 %>% mutate(year = cur_year, month = cur_month))) }
}
# cut out requested region:
if (s==1) { # run loop only once: round provided coordinates in a way that ensures that provided values are in the returned region
# first, adjust minimum longitute to either x.25 or x.75:
lon_min
aux_lon = round(lon_min*4)/4
aux1_lon = abs(lon_min) - floor(abs(lon_min))
aux2_lon = abs(aux_lon) - floor(abs(lon_min))
aux3_lon = abs(aux1_lon) - abs(aux2_lon)
lon_min_rounded = ifelse(!aux2_lon==0.5 & !aux2_lon==0 & !aux2_lon==1,aux_lon,round((lon_min+ifelse( aux3_lon<0,-0.25,.25))*4)/4)
# second, adjust maximum longitute to either x.25 or x.75:
aux_lon = round(lon_max*4)/4
aux1_lon = abs(lon_max) - floor(abs(lon_max))
aux2_lon = abs(aux_lon) - floor(abs(lon_max))
aux3_lon = abs(aux1_lon) - abs(aux2_lon)
lon_max_rounded = ifelse(!aux2_lon==0.5 & !aux2_lon==0 & !aux2_lon==1,aux_lon,round((lon_max+ifelse( aux3_lon<0,-0.25,.25))*4)/4)
# third, adjust minimum latitude to either x.25 or x.75:
aux_lat = round(lat_min*4)/4
aux1_lat = abs(lat_min) - floor(abs(lat_min))
aux2_lat = abs(aux_lat) - floor(abs(lat_min))
aux3_lat = abs(aux1_lat) - abs(aux2_lat)
lat_min_rounded = ifelse(!aux2_lat==0.5 & !aux2_lat==0 & !aux2_lat==1,aux_lat,round((lat_min+ifelse( aux3_lat<0,-0.25,.25))*4)/4)
# fourth, adjust minimum latitude to either x.25 or x.75:
aux_lat = round(lat_max*4)/4
aux1_lat = abs(lat_max) - floor(abs(lat_max))
aux2_lat = abs(aux_lat) - floor(abs(lat_max))
aux3_lat = abs(aux1_lat) - abs(aux2_lat)
lat_max_rounded = ifelse(!aux2_lat==0.5 & !aux2_lat==0 & !aux2_lat==1,aux_lat,round((lat_max+ifelse( aux3_lat<0,-0.25,.25))*4)/4)
}
d_aux2 <- d_aux2 %>% filter(lon>=lon_min_rounded & lon<=lon_max_rounded & lat>=lat_min_rounded & lat<=lat_max_rounded)
if (s==1) { # if first spei value, start new data frame, if not add to existing data frame
d1 <- d_aux2 %>% select(c(lon,lat,year,month,paste(nc_name))) } else {
d1 <- bind_cols(d1,(d_aux2 %>% select(c(paste(nc_name)))))}
}
return(d1)
}
|
/R/region_spei.R
|
no_license
|
seschaub/getSpei
|
R
| false
| false
| 5,609
|
r
|
region_spei <- function(spei_files,start_y,end_y,lon_min,lon_max,lat_min,lat_max)
{
# install and load needed packages
list.of.packages <- c("ncdf4","tidyverse","chron","stringr")
new.packages <- list.of.packages[!(list.of.packages %in% installed.packages()[,"Package"])]
if(length(new.packages)) install.packages(new.packages)
require(tidyverse)
require(ncdf4)
require(chron)
require(stringr)
spei_files <- matrix(spei_files) # convert input vector into a matrix
n_spei <- length(spei_files) # number of spei files included by the user
# Main loop - run loop for each spei file separately: ####
for(s in 1:n_spei) {
# name input variables:
nc_name <- spei_files[s,1] # spei file of this loop
file_name <- paste(nc_name, ".nc", sep = "") # create filename
var_name <- "spei" # variable name
spei_nc <- nc_open(file_name) # open the respective netcdf file of this loop, using the ncdf4 package
# get max time of the dataset in months
max_time <- length(ncvar_get(spei_nc,"time"))
# save longitude and latitude information:
lon <- ncvar_get(spei_nc, "lon")
lat <- ncvar_get(spei_nc, "lat", verbose = F)
lonlat <- expand.grid(lon, lat)
spei_array <- ncvar_get(spei_nc, var_name) # get array
fillvalue <- ncatt_get(spei_nc, var_name, "_FillValue") # extract filling value
nc_close(spei_nc) # close the netcdf file
spei_array[spei_array == fillvalue$value] <- NA # change spei values with filling value into NAs
data_start <- 1901 # set start year of the database (current standard).
start_i <- (start_y-data_start)*12+1 # months since the start of the database (1901) in the first month of the start year (i.e. start year inquired by user)
end_i <- (end_y-data_start+1)*12 # months between the start of the database (1901) and the last month of the end year (i.e. end year inquired by user)
if(end_i > max_time){end_i = max_time} # if the end of the required period is not yet in the SPEI netcdf data, take the latest time available
# First sub-loop - run loop for each month of the spei file separately to extract month by month information: ####
for(i in start_i:end_i) {
spei_slice <- spei_array[, ,i] # extract a single 'brick' of the array
spei_vec <- as.vector(spei_slice) # save as an vector
cur_year <- as.integer( start_y-1+ceiling( (i-start_i+1)/12 )) # define current year
cur_month <- as.integer( (i-start_i+1) + ((start_y-cur_year)*12) ) # define current month
d_aux1 <- data.frame(cbind(lonlat, spei_vec)) # save single 'brick' of the array and coordinates together as data frame
names(d_aux1) <- c("lon", "lat", paste(nc_name, as.character(cur_month),as.character(cur_year), sep = "_")) # re-name variables
# combine different speis
if (i==start_i) { # if first year, start new data frame
names(d_aux1) <- c("lon", "lat", nc_name)
d_aux2 <- d_aux1 %>% mutate(year = cur_year, month = cur_month)
} else{ # if year is not the first year, add to existing data frame
names(d_aux1) <- c("lon", "lat", nc_name)
d_aux2 <-bind_rows(d_aux2,(d_aux1 %>% mutate(year = cur_year, month = cur_month))) }
}
# cut out requested region:
if (s==1) { # run loop only once: round provided coordinates in a way that ensures that provided values are in the returned region
# first, adjust minimum longitute to either x.25 or x.75:
lon_min
aux_lon = round(lon_min*4)/4
aux1_lon = abs(lon_min) - floor(abs(lon_min))
aux2_lon = abs(aux_lon) - floor(abs(lon_min))
aux3_lon = abs(aux1_lon) - abs(aux2_lon)
lon_min_rounded = ifelse(!aux2_lon==0.5 & !aux2_lon==0 & !aux2_lon==1,aux_lon,round((lon_min+ifelse( aux3_lon<0,-0.25,.25))*4)/4)
# second, adjust maximum longitute to either x.25 or x.75:
aux_lon = round(lon_max*4)/4
aux1_lon = abs(lon_max) - floor(abs(lon_max))
aux2_lon = abs(aux_lon) - floor(abs(lon_max))
aux3_lon = abs(aux1_lon) - abs(aux2_lon)
lon_max_rounded = ifelse(!aux2_lon==0.5 & !aux2_lon==0 & !aux2_lon==1,aux_lon,round((lon_max+ifelse( aux3_lon<0,-0.25,.25))*4)/4)
# third, adjust minimum latitude to either x.25 or x.75:
aux_lat = round(lat_min*4)/4
aux1_lat = abs(lat_min) - floor(abs(lat_min))
aux2_lat = abs(aux_lat) - floor(abs(lat_min))
aux3_lat = abs(aux1_lat) - abs(aux2_lat)
lat_min_rounded = ifelse(!aux2_lat==0.5 & !aux2_lat==0 & !aux2_lat==1,aux_lat,round((lat_min+ifelse( aux3_lat<0,-0.25,.25))*4)/4)
# fourth, adjust minimum latitude to either x.25 or x.75:
aux_lat = round(lat_max*4)/4
aux1_lat = abs(lat_max) - floor(abs(lat_max))
aux2_lat = abs(aux_lat) - floor(abs(lat_max))
aux3_lat = abs(aux1_lat) - abs(aux2_lat)
lat_max_rounded = ifelse(!aux2_lat==0.5 & !aux2_lat==0 & !aux2_lat==1,aux_lat,round((lat_max+ifelse( aux3_lat<0,-0.25,.25))*4)/4)
}
d_aux2 <- d_aux2 %>% filter(lon>=lon_min_rounded & lon<=lon_max_rounded & lat>=lat_min_rounded & lat<=lat_max_rounded)
if (s==1) { # if first spei value, start new data frame, if not add to existing data frame
d1 <- d_aux2 %>% select(c(lon,lat,year,month,paste(nc_name))) } else {
d1 <- bind_cols(d1,(d_aux2 %>% select(c(paste(nc_name)))))}
}
return(d1)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/eval_g.R
\name{eval.g.ar2}
\alias{eval.g.ar2}
\title{Evaluate sum of Hermite polynomials for g(pacf1) and g(pacf2)}
\usage{
eval.g.ar2(x, beta)
}
\description{
Evaluate sum of Hermite polynomials for g(pacf1) and g(pacf2)
}
\keyword{internal}
|
/man/eval.g.ar2.Rd
|
no_license
|
pedrognicolau/ARbiascorrect-v1
|
R
| false
| true
| 321
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/eval_g.R
\name{eval.g.ar2}
\alias{eval.g.ar2}
\title{Evaluate sum of Hermite polynomials for g(pacf1) and g(pacf2)}
\usage{
eval.g.ar2(x, beta)
}
\description{
Evaluate sum of Hermite polynomials for g(pacf1) and g(pacf2)
}
\keyword{internal}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tools-api.R
\name{batcher}
\alias{batcher}
\title{Download in batches}
\usage{
batcher(ids, func, ps, lvl = 0)
}
\arguments{
\item{ids}{Vector of record ids}
\item{func}{Downloader function}
\item{ps}{Parameters list, generated with parameters()}
\item{lvl}{Integer, number of message indentations indicating code
depth.}
}
\value{
Vector of records
vector of rentrez function results
}
\description{
Run downloader function in batches for sequences or
taxonomic records
}
\seealso{
Other run-private:
\code{\link{blast_clstr}()},
\code{\link{blast_filter}()},
\code{\link{blast_setup}()},
\code{\link{blast_sqs}()},
\code{\link{blastcache_load}()},
\code{\link{blastcache_save}()},
\code{\link{blastdb_gen}()},
\code{\link{blastn_run}()},
\code{\link{cache_rm}()},
\code{\link{cache_setup}()},
\code{\link{clade_select}()},
\code{\link{clstr2_calc}()},
\code{\link{clstr_all}()},
\code{\link{clstr_direct}()},
\code{\link{clstr_sqs}()},
\code{\link{clstr_subtree}()},
\code{\link{clstrarc_gen}()},
\code{\link{clstrarc_join}()},
\code{\link{clstrrec_gen}()},
\code{\link{clstrs_calc}()},
\code{\link{clstrs_join}()},
\code{\link{clstrs_merge}()},
\code{\link{clstrs_renumber}()},
\code{\link{clstrs_save}()},
\code{\link{cmdln}()},
\code{\link{descendants_get}()},
\code{\link{download_obj_check}()},
\code{\link{error}()},
\code{\link{gb_extract}()},
\code{\link{hierarchic_download}()},
\code{\link{info}()},
\code{\link{ncbicache_load}()},
\code{\link{ncbicache_save}()},
\code{\link{obj_check}()},
\code{\link{obj_load}()},
\code{\link{obj_save}()},
\code{\link{outfmt_get}()},
\code{\link{parameters_load}()},
\code{\link{parameters_setup}()},
\code{\link{parent_get}()},
\code{\link{progress_init}()},
\code{\link{progress_read}()},
\code{\link{progress_reset}()},
\code{\link{progress_save}()},
\code{\link{rank_get}()},
\code{\link{rawseqrec_breakdown}()},
\code{\link{safely_connect}()},
\code{\link{search_and_cache}()},
\code{\link{searchterm_gen}()},
\code{\link{seeds_blast}()},
\code{\link{seq_download}()},
\code{\link{seqarc_gen}()},
\code{\link{seqrec_augment}()},
\code{\link{seqrec_convert}()},
\code{\link{seqrec_gen}()},
\code{\link{seqrec_get}()},
\code{\link{sids_check}()},
\code{\link{sids_get}()},
\code{\link{sids_load}()},
\code{\link{sids_save}()},
\code{\link{sqs_count}()},
\code{\link{sqs_save}()},
\code{\link{stage_args_check}()},
\code{\link{stages_run}()},
\code{\link{tax_download}()},
\code{\link{taxdict_gen}()},
\code{\link{taxtree_gen}()},
\code{\link{txids_get}()},
\code{\link{txnds_count}()},
\code{\link{warn}()}
}
\concept{run-private}
|
/man/batcher.Rd
|
permissive
|
ropensci/phylotaR
|
R
| false
| true
| 2,666
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tools-api.R
\name{batcher}
\alias{batcher}
\title{Download in batches}
\usage{
batcher(ids, func, ps, lvl = 0)
}
\arguments{
\item{ids}{Vector of record ids}
\item{func}{Downloader function}
\item{ps}{Parameters list, generated with parameters()}
\item{lvl}{Integer, number of message indentations indicating code
depth.}
}
\value{
Vector of records
vector of rentrez function results
}
\description{
Run downloader function in batches for sequences or
taxonomic records
}
\seealso{
Other run-private:
\code{\link{blast_clstr}()},
\code{\link{blast_filter}()},
\code{\link{blast_setup}()},
\code{\link{blast_sqs}()},
\code{\link{blastcache_load}()},
\code{\link{blastcache_save}()},
\code{\link{blastdb_gen}()},
\code{\link{blastn_run}()},
\code{\link{cache_rm}()},
\code{\link{cache_setup}()},
\code{\link{clade_select}()},
\code{\link{clstr2_calc}()},
\code{\link{clstr_all}()},
\code{\link{clstr_direct}()},
\code{\link{clstr_sqs}()},
\code{\link{clstr_subtree}()},
\code{\link{clstrarc_gen}()},
\code{\link{clstrarc_join}()},
\code{\link{clstrrec_gen}()},
\code{\link{clstrs_calc}()},
\code{\link{clstrs_join}()},
\code{\link{clstrs_merge}()},
\code{\link{clstrs_renumber}()},
\code{\link{clstrs_save}()},
\code{\link{cmdln}()},
\code{\link{descendants_get}()},
\code{\link{download_obj_check}()},
\code{\link{error}()},
\code{\link{gb_extract}()},
\code{\link{hierarchic_download}()},
\code{\link{info}()},
\code{\link{ncbicache_load}()},
\code{\link{ncbicache_save}()},
\code{\link{obj_check}()},
\code{\link{obj_load}()},
\code{\link{obj_save}()},
\code{\link{outfmt_get}()},
\code{\link{parameters_load}()},
\code{\link{parameters_setup}()},
\code{\link{parent_get}()},
\code{\link{progress_init}()},
\code{\link{progress_read}()},
\code{\link{progress_reset}()},
\code{\link{progress_save}()},
\code{\link{rank_get}()},
\code{\link{rawseqrec_breakdown}()},
\code{\link{safely_connect}()},
\code{\link{search_and_cache}()},
\code{\link{searchterm_gen}()},
\code{\link{seeds_blast}()},
\code{\link{seq_download}()},
\code{\link{seqarc_gen}()},
\code{\link{seqrec_augment}()},
\code{\link{seqrec_convert}()},
\code{\link{seqrec_gen}()},
\code{\link{seqrec_get}()},
\code{\link{sids_check}()},
\code{\link{sids_get}()},
\code{\link{sids_load}()},
\code{\link{sids_save}()},
\code{\link{sqs_count}()},
\code{\link{sqs_save}()},
\code{\link{stage_args_check}()},
\code{\link{stages_run}()},
\code{\link{tax_download}()},
\code{\link{taxdict_gen}()},
\code{\link{taxtree_gen}()},
\code{\link{txids_get}()},
\code{\link{txnds_count}()},
\code{\link{warn}()}
}
\concept{run-private}
|
#' create curly braces as a layer in ggplot
#'
#' Imports:
#' ggplot2
#'
#' @inheritParams ggplot2::geom_path
#' @inheritParams ggplot2::geom_text
#' @import ggplot2
#'
#' @param rotate number, defines where the brace is pointing to: 0=up, 90=right, 180=down, 270=left. When specified by user, will overwrite other directions the brace might have from x/y coordinates.
#' @param mid number, where the pointer is within the bracket space (between 0.25 and 0.75)
#' @param width number, regulates how wide the braces are
#' @param distance number between the brace and the nearest data point
#' @param outerstart number, overwrites distance and provides one coordinate for all braces
#' @param bending number, how strongly the curves of the braces should be bent (the higher the more round). Note: too high values will result in the brace showing zick-zack lines
#' @param label string, a custom text to be displayed at the brace.
#' @param labeldistance number, distance of the label to the brace pointer
#' @param labelsize number, changing the font size of the label. Only takes effect if the parameter "label" was defined.
#' @param labelcolor string, defining the color of the label. Only takes effect if the parameter "label" was defined.
#' @param textORlabel string, either "text" or "label" to define whether geom_text or geom_label should be used
#' @param npoints integer, number of points generated for the brace curves (resolution). This number will be rounded to be a multiple of 4 for calculation purposes.
#' @return ggplot2 layer object (geom_path) that can directly be added to a ggplot2 object. If a label was provided, a another layer (geom_text) is added.
#' @export
#' @examples
#' library(ggbrace)
#' library(ggplot2)
#' ggplot(mtcars, aes(mpg, wt, color=factor(am))) +
#' geom_point() +
#' facet_wrap(~vs) +
#' stat_brace(rotate=90, aes(label=factor(am)))
#'
stat_brace <- function(mapping = NULL, data = NULL, inherit.aes=TRUE, #mapping-related
rotate=0, mid=NULL, bending=NULL, npoints=100, #orientation and shape
labelsize = 0, labeldistance=NULL, labelrotate=0, #labels
distance=NULL, outerstart=NULL, width=NULL, #position
textORlabel="text",
...){
#================#
#==preparations==#
#================#
# Extract arguments for ggplot functions
dots <- list(...)
gglabel_args <- intersect(names(dots), unique(c(formalArgs(geom_label),formalArgs(layer))))
if(length(gglabel_args)>0){
dots_label <- dots[gglabel_args]
}else{
dots_label <- list(color="black")
}
ggpath_args <- intersect(names(dots), unique(c(formalArgs(geom_path),formalArgs(layer))))
if(length(ggpath_args)>0){
dots_path <- dots[ggpath_args]
}else{
dots_path <- list(color="black")
}
#force mid between 0.25 and 0.75
mid <- ifelse(mid>0.75, 0.75, ifelse(mid<0.25, 0.25, mid))
#up and right will be positive; down and left will be negative
direction <- -sign(rotate/90-1.5)
#=======================#
#==label for the brace==#
#=======================#
added_labels <- NULL #in case there is no label, the actual brace will be combined with NULL instead of something that doesn't exist
if(labelsize>0){
#hjust and vjust of the text depend on the brace rotation and the label rotation
if(any(labelrotate==c(90,270))){
txtvjust <- switch(rotate/90+1, 0.5, 1, 0.5, 0)
txthjust <- switch(rotate/90+1, 0, 0.5, 1, 0.5)
}else{
txtvjust <- switch(rotate/90+1, 0, 0.5, 1, 0.5)
txthjust <- switch(rotate/90+1, 0.5, 0, 0.5, 1)
}
#create ggplot layer. StatBraceLabel will do the calculations where to put the text (calling .coordCorrection)
added_labels <- ggplot2::layer(
stat = StatBraceLabel,
data = data, mapping = mapping, geom = textORlabel,
position = "identity", show.legend = FALSE, inherit.aes = inherit.aes,
params = append(dots_label, list(vjust=txtvjust, hjust=txthjust, size=labelsize, angle=labelrotate,
rotate=rotate, bending=bending, npoints=npoints, mid=mid,
labeldistance=labeldistance, width=width,
distance=distance, outerstart=outerstart,
direction=direction, outside=TRUE))
)
}
#====================#
#==the brace itself==#
#====================#
mapping$label <- NULL #set this to null to avoid a message from the next ggplot layer (which has no label option)
#create ggplot layer. StatBrace will do the calculations where and how to draw the brace (calling .coordCorrection and then .seekBrace)
outbrace <- ggplot2::layer(
stat = StatBrace,
data = data, mapping = mapping, geom = "path",
position = "identity", show.legend = FALSE, inherit.aes = inherit.aes,
params = append(dots_path, list(rotate=rotate, bending=bending, npoints=npoints, mid=mid,
distance=distance, outerstart=outerstart, width=width,
direction=direction, outside=TRUE))
)
outbrace <- c(outbrace, added_labels)
return(outbrace)
}
|
/R/stat_brace.R
|
permissive
|
NicolasH2/ggbrace
|
R
| false
| false
| 5,129
|
r
|
#' create curly braces as a layer in ggplot
#'
#' Imports:
#' ggplot2
#'
#' @inheritParams ggplot2::geom_path
#' @inheritParams ggplot2::geom_text
#' @import ggplot2
#'
#' @param rotate number, defines where the brace is pointing to: 0=up, 90=right, 180=down, 270=left. When specified by user, will overwrite other directions the brace might have from x/y coordinates.
#' @param mid number, where the pointer is within the bracket space (between 0.25 and 0.75)
#' @param width number, regulates how wide the braces are
#' @param distance number between the brace and the nearest data point
#' @param outerstart number, overwrites distance and provides one coordinate for all braces
#' @param bending number, how strongly the curves of the braces should be bent (the higher the more round). Note: too high values will result in the brace showing zick-zack lines
#' @param label string, a custom text to be displayed at the brace.
#' @param labeldistance number, distance of the label to the brace pointer
#' @param labelsize number, changing the font size of the label. Only takes effect if the parameter "label" was defined.
#' @param labelcolor string, defining the color of the label. Only takes effect if the parameter "label" was defined.
#' @param textORlabel string, either "text" or "label" to define whether geom_text or geom_label should be used
#' @param npoints integer, number of points generated for the brace curves (resolution). This number will be rounded to be a multiple of 4 for calculation purposes.
#' @return ggplot2 layer object (geom_path) that can directly be added to a ggplot2 object. If a label was provided, a another layer (geom_text) is added.
#' @export
#' @examples
#' library(ggbrace)
#' library(ggplot2)
#' ggplot(mtcars, aes(mpg, wt, color=factor(am))) +
#' geom_point() +
#' facet_wrap(~vs) +
#' stat_brace(rotate=90, aes(label=factor(am)))
#'
stat_brace <- function(mapping = NULL, data = NULL, inherit.aes=TRUE, #mapping-related
rotate=0, mid=NULL, bending=NULL, npoints=100, #orientation and shape
labelsize = 0, labeldistance=NULL, labelrotate=0, #labels
distance=NULL, outerstart=NULL, width=NULL, #position
textORlabel="text",
...){
#================#
#==preparations==#
#================#
# Extract arguments for ggplot functions
dots <- list(...)
gglabel_args <- intersect(names(dots), unique(c(formalArgs(geom_label),formalArgs(layer))))
if(length(gglabel_args)>0){
dots_label <- dots[gglabel_args]
}else{
dots_label <- list(color="black")
}
ggpath_args <- intersect(names(dots), unique(c(formalArgs(geom_path),formalArgs(layer))))
if(length(ggpath_args)>0){
dots_path <- dots[ggpath_args]
}else{
dots_path <- list(color="black")
}
#force mid between 0.25 and 0.75
mid <- ifelse(mid>0.75, 0.75, ifelse(mid<0.25, 0.25, mid))
#up and right will be positive; down and left will be negative
direction <- -sign(rotate/90-1.5)
#=======================#
#==label for the brace==#
#=======================#
added_labels <- NULL #in case there is no label, the actual brace will be combined with NULL instead of something that doesn't exist
if(labelsize>0){
#hjust and vjust of the text depend on the brace rotation and the label rotation
if(any(labelrotate==c(90,270))){
txtvjust <- switch(rotate/90+1, 0.5, 1, 0.5, 0)
txthjust <- switch(rotate/90+1, 0, 0.5, 1, 0.5)
}else{
txtvjust <- switch(rotate/90+1, 0, 0.5, 1, 0.5)
txthjust <- switch(rotate/90+1, 0.5, 0, 0.5, 1)
}
#create ggplot layer. StatBraceLabel will do the calculations where to put the text (calling .coordCorrection)
added_labels <- ggplot2::layer(
stat = StatBraceLabel,
data = data, mapping = mapping, geom = textORlabel,
position = "identity", show.legend = FALSE, inherit.aes = inherit.aes,
params = append(dots_label, list(vjust=txtvjust, hjust=txthjust, size=labelsize, angle=labelrotate,
rotate=rotate, bending=bending, npoints=npoints, mid=mid,
labeldistance=labeldistance, width=width,
distance=distance, outerstart=outerstart,
direction=direction, outside=TRUE))
)
}
#====================#
#==the brace itself==#
#====================#
mapping$label <- NULL #set this to null to avoid a message from the next ggplot layer (which has no label option)
#create ggplot layer. StatBrace will do the calculations where and how to draw the brace (calling .coordCorrection and then .seekBrace)
outbrace <- ggplot2::layer(
stat = StatBrace,
data = data, mapping = mapping, geom = "path",
position = "identity", show.legend = FALSE, inherit.aes = inherit.aes,
params = append(dots_path, list(rotate=rotate, bending=bending, npoints=npoints, mid=mid,
distance=distance, outerstart=outerstart, width=width,
direction=direction, outside=TRUE))
)
outbrace <- c(outbrace, added_labels)
return(outbrace)
}
|
#' Extract data from objects to use in a shiny app
#'
#' This function joins the result of [tune::fit_resamples()] to the original
#' dataset to give a list that can be an input for the Shiny app.
#' @param x The [tune::fit_resamples()] result.
#' @param hover_cols The columns to display while hovering.
#' @param ... Other parameters not currently used.
#' @keywords models, regression, graphs, classes, classif
#' @export
#' @return
#' A list with elements data frame and character vectors. The data frame includes
#' an outcome variable `.outcome`, a prediction variable `.pred`, model
#' configuration variable `.config`, and hovering columns `.hover`. The default
#' configuration is based on the optimal value of the first metric.
organize_data <- function(x, hover_cols = NULL, ...) {
UseMethod("organize_data")
}
#' @export
#' @rdname organize_data
organize_data.default <- function(x, hover_cols = NULL, ...) {
rlang::abort("No `organize_data()` exists for this type of object.")
}
#' @export
#' @rdname organize_data
organize_data.tune_results <-
function(x,
hover_cols = NULL,
...) {
hover_expr <- rlang::enquo(hover_cols)
original_data <- x$splits[[1]]$data
if (!(".predictions" %in% colnames(x))) {
rlang::abort(
paste0(
"The `.predictions` column does not exist. ",
"Refit with the control argument `save_pred = TRUE` to save predictions."
)
)
}
y_name <- tune::.get_tune_outcome_names(x)
if (!(y_name %in% names(original_data))) {
rlang::abort(glue::glue("'{y_name}' is not a column in the orignal data"))
}
rn_lst <- list(.outcome = y_name)
sample_predictions <-
tune::collect_predictions(x, summarize = TRUE) %>%
rename(!!!rn_lst)
if (is.numeric(sample_predictions$.outcome)) {
sample_predictions <- sample_predictions %>%
dplyr::mutate(.residual = .outcome - .pred)
}
num_cols <- vapply(original_data, is.numeric, logical(1))
num_col_names <- names(num_cols)[num_cols]
fac_cols <- vapply(original_data, is.factor, logical(1))
fac_col_names <- names(fac_cols)[fac_cols]
preds <- sample_predictions %>%
dplyr::inner_join(original_data %>%
parsnip::add_rowindex(),
by = ".row"
)
if (quo_is_null(hover_expr)) {
var <- preds %>% dplyr::select(dplyr::all_of(y_name))
} else {
pos <- tidyselect::eval_select(hover_expr, data = preds)
var <- rlang::set_names(preds[pos], names(pos))
}
preds$.hover <- format_hover(var, ...)
app_type <- get_app_type(original_data[[y_name]])
default_config <- tune::select_best(x, tune::.get_tune_metric_names(x)[1])$.config
new_shiny_data(preds, y_name, app_type, num_col_names, fac_col_names, x, default_config)
}
# ------------------------------------------------------------------------------
new_shiny_data <- function(predictions, y_name, subclass, numeric_cols, factor_cols, x, default_config) {
if (!inherits(predictions, "data.frame")) {
rlang::abort("predictions should be a data frame")
}
if (nrow(predictions) == 0) {
rlang::abort("there should be at least one row of predictions")
}
if (!(y_name %in% names(predictions))) {
rlang::abort(glue::glue("'{y_name}' should be a column in the predictions"))
}
if (!is.character(y_name)) {
rlang::abort("y_name should be a character string")
}
if (!is.character(numeric_cols)) {
rlang::abort("numeric_cols should be a character string")
}
if (!is.character(factor_cols)) {
rlang::abort("factor_cols should be a character string")
}
if (!is.character(default_config)) {
rlang::abort("default_config should be a character string")
}
if (!(default_config %in% predictions$.config)) {
rlang::abort("default_config should be a character string in predictions")
}
res <- list(
predictions = predictions,
y_name = y_name,
app_type = subclass,
default_config = default_config,
num_cols = numeric_cols,
fac_cols = factor_cols,
tune_results = x
)
result <- structure(res, class = c(paste0(subclass, "_shiny_data"), "shiny_data"))
result
}
# ------------------------------------------------------------------------------
get_app_type <- function(y) {
if (is.numeric(y)) {
res <- "reg"
}
else if (is.factor(y)) {
if (nlevels(y) == 2) {
res <- "two_cls"
}
else {
res <- "multi_cls"
}
}
else {
rlang::abort("outcome should be factor or numeric")
}
res
}
# ------------------------------------------------------------------------------
#' Returns the class, app type, y name, and the number of rows of an object of
#' `shiny_data` class
#'
#' This is a print method for a shiny_data class
#' @param x an object of class shiny_data
#' @param ... Other parameters not currently used
#' @export
print.shiny_data <- function(x, ...) {
string <- paste(
paste("classes: ", paste0(class(x), collapse = ", ")),
paste("app_type:", x$app_type),
paste("y_name:", x$y_name),
paste("nrows:", nrow(x$predictions)),
paste("tuning parameters:", .get_tune_parameter_names(x$tune_results)),
sep = "\n"
)
cat(string)
}
|
/R/organize_data.R
|
permissive
|
cpsievert/shinymodels
|
R
| false
| false
| 5,254
|
r
|
#' Extract data from objects to use in a shiny app
#'
#' This function joins the result of [tune::fit_resamples()] to the original
#' dataset to give a list that can be an input for the Shiny app.
#' @param x The [tune::fit_resamples()] result.
#' @param hover_cols The columns to display while hovering.
#' @param ... Other parameters not currently used.
#' @keywords models, regression, graphs, classes, classif
#' @export
#' @return
#' A list with elements data frame and character vectors. The data frame includes
#' an outcome variable `.outcome`, a prediction variable `.pred`, model
#' configuration variable `.config`, and hovering columns `.hover`. The default
#' configuration is based on the optimal value of the first metric.
organize_data <- function(x, hover_cols = NULL, ...) {
UseMethod("organize_data")
}
#' @export
#' @rdname organize_data
organize_data.default <- function(x, hover_cols = NULL, ...) {
rlang::abort("No `organize_data()` exists for this type of object.")
}
#' @export
#' @rdname organize_data
organize_data.tune_results <-
function(x,
hover_cols = NULL,
...) {
hover_expr <- rlang::enquo(hover_cols)
original_data <- x$splits[[1]]$data
if (!(".predictions" %in% colnames(x))) {
rlang::abort(
paste0(
"The `.predictions` column does not exist. ",
"Refit with the control argument `save_pred = TRUE` to save predictions."
)
)
}
y_name <- tune::.get_tune_outcome_names(x)
if (!(y_name %in% names(original_data))) {
rlang::abort(glue::glue("'{y_name}' is not a column in the orignal data"))
}
rn_lst <- list(.outcome = y_name)
sample_predictions <-
tune::collect_predictions(x, summarize = TRUE) %>%
rename(!!!rn_lst)
if (is.numeric(sample_predictions$.outcome)) {
sample_predictions <- sample_predictions %>%
dplyr::mutate(.residual = .outcome - .pred)
}
num_cols <- vapply(original_data, is.numeric, logical(1))
num_col_names <- names(num_cols)[num_cols]
fac_cols <- vapply(original_data, is.factor, logical(1))
fac_col_names <- names(fac_cols)[fac_cols]
preds <- sample_predictions %>%
dplyr::inner_join(original_data %>%
parsnip::add_rowindex(),
by = ".row"
)
if (quo_is_null(hover_expr)) {
var <- preds %>% dplyr::select(dplyr::all_of(y_name))
} else {
pos <- tidyselect::eval_select(hover_expr, data = preds)
var <- rlang::set_names(preds[pos], names(pos))
}
preds$.hover <- format_hover(var, ...)
app_type <- get_app_type(original_data[[y_name]])
default_config <- tune::select_best(x, tune::.get_tune_metric_names(x)[1])$.config
new_shiny_data(preds, y_name, app_type, num_col_names, fac_col_names, x, default_config)
}
# ------------------------------------------------------------------------------
new_shiny_data <- function(predictions, y_name, subclass, numeric_cols, factor_cols, x, default_config) {
if (!inherits(predictions, "data.frame")) {
rlang::abort("predictions should be a data frame")
}
if (nrow(predictions) == 0) {
rlang::abort("there should be at least one row of predictions")
}
if (!(y_name %in% names(predictions))) {
rlang::abort(glue::glue("'{y_name}' should be a column in the predictions"))
}
if (!is.character(y_name)) {
rlang::abort("y_name should be a character string")
}
if (!is.character(numeric_cols)) {
rlang::abort("numeric_cols should be a character string")
}
if (!is.character(factor_cols)) {
rlang::abort("factor_cols should be a character string")
}
if (!is.character(default_config)) {
rlang::abort("default_config should be a character string")
}
if (!(default_config %in% predictions$.config)) {
rlang::abort("default_config should be a character string in predictions")
}
res <- list(
predictions = predictions,
y_name = y_name,
app_type = subclass,
default_config = default_config,
num_cols = numeric_cols,
fac_cols = factor_cols,
tune_results = x
)
result <- structure(res, class = c(paste0(subclass, "_shiny_data"), "shiny_data"))
result
}
# ------------------------------------------------------------------------------
get_app_type <- function(y) {
if (is.numeric(y)) {
res <- "reg"
}
else if (is.factor(y)) {
if (nlevels(y) == 2) {
res <- "two_cls"
}
else {
res <- "multi_cls"
}
}
else {
rlang::abort("outcome should be factor or numeric")
}
res
}
# ------------------------------------------------------------------------------
#' Returns the class, app type, y name, and the number of rows of an object of
#' `shiny_data` class
#'
#' This is a print method for a shiny_data class
#' @param x an object of class shiny_data
#' @param ... Other parameters not currently used
#' @export
print.shiny_data <- function(x, ...) {
string <- paste(
paste("classes: ", paste0(class(x), collapse = ", ")),
paste("app_type:", x$app_type),
paste("y_name:", x$y_name),
paste("nrows:", nrow(x$predictions)),
paste("tuning parameters:", .get_tune_parameter_names(x$tune_results)),
sep = "\n"
)
cat(string)
}
|
library(png)
library(ggplot2)
library(grid)
load(paste(raw.data.path, "eyEdu_data.Rda", sep = ""))
# The shinyApp would display an error if it would not find background images
# Below we create an images folder and a placeholder background image with
# width and height defined by two values that must be provided by the function
# that calls the shiny app
if (!dir.exists(paste(raw.data.path,"images/", sep = ""))){
# Creates folder
dir.create(paste(raw.data.path,"images/", sep = ""))
# Creates placeholder image
png(filename = paste(raw.data.path,"images/placeholder.png", sep = ""),
width = background.x,
height = background.y,
units = "px",
pointsize = 6,
bg = "grey")
par(mar = c(0, 0, 0, 0))
plot(x = 0:500, y = 0:500, ann = F,bty = "n",type = "n",
xaxt = "n", yaxt = "n")
text(x = 250,y = 250,"No background images found! \n
Attention: The plot dimension might be incorrect. \n
To fix this, either provide background png-files in the images folder, \n
or delete the images folder and provide correct plot dimensions through \n
background.x = pixels and background.y = pixels in the function that calls \n
the eyEdu Trial Viewer shiny app.", cex=5)
dev.off()
}
initial.background.file <- list.files(paste(raw.data.path,"images/", sep = ""))[1]
initial.background.file <- readPNG((paste(raw.data.path,"images/", initial.background.file, sep = "")))
page.width <- dim(initial.background.file)[2]
page.height <- dim(initial.background.file)[1]
### provided by function that calls the Shiny app
# scale.var = 0.55
# aoi.color = "red"
# aoi.names.screenshot = T
# fix.size.scale = 10
# background.x = 1680
# background.y = 1050
|
/inst/eyEduTrialViewer/global.R
|
no_license
|
SebastianKorinth/eyEdu
|
R
| false
| false
| 1,729
|
r
|
library(png)
library(ggplot2)
library(grid)
load(paste(raw.data.path, "eyEdu_data.Rda", sep = ""))
# The shinyApp would display an error if it would not find background images
# Below we create an images folder and a placeholder background image with
# width and height defined by two values that must be provided by the function
# that calls the shiny app
if (!dir.exists(paste(raw.data.path,"images/", sep = ""))){
# Creates folder
dir.create(paste(raw.data.path,"images/", sep = ""))
# Creates placeholder image
png(filename = paste(raw.data.path,"images/placeholder.png", sep = ""),
width = background.x,
height = background.y,
units = "px",
pointsize = 6,
bg = "grey")
par(mar = c(0, 0, 0, 0))
plot(x = 0:500, y = 0:500, ann = F,bty = "n",type = "n",
xaxt = "n", yaxt = "n")
text(x = 250,y = 250,"No background images found! \n
Attention: The plot dimension might be incorrect. \n
To fix this, either provide background png-files in the images folder, \n
or delete the images folder and provide correct plot dimensions through \n
background.x = pixels and background.y = pixels in the function that calls \n
the eyEdu Trial Viewer shiny app.", cex=5)
dev.off()
}
initial.background.file <- list.files(paste(raw.data.path,"images/", sep = ""))[1]
initial.background.file <- readPNG((paste(raw.data.path,"images/", initial.background.file, sep = "")))
page.width <- dim(initial.background.file)[2]
page.height <- dim(initial.background.file)[1]
### provided by function that calls the Shiny app
# scale.var = 0.55
# aoi.color = "red"
# aoi.names.screenshot = T
# fix.size.scale = 10
# background.x = 1680
# background.y = 1050
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/iam_operations.R
\name{iam_create_saml_provider}
\alias{iam_create_saml_provider}
\title{Creates an IAM resource that describes an identity provider (IdP) that
supports SAML 2}
\usage{
iam_create_saml_provider(SAMLMetadataDocument, Name)
}
\arguments{
\item{SAMLMetadataDocument}{[required] An XML document generated by an identity provider (IdP) that supports
SAML 2.0. The document includes the issuer's name, expiration
information, and keys that can be used to validate the SAML
authentication response (assertions) that are received from the IdP. You
must generate the metadata document using the identity management
software that is used as your organization's IdP.
For more information, see \href{https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_saml.html}{About SAML 2.0-based Federation}
in the \emph{IAM User Guide}}
\item{Name}{[required] The name of the provider to create.
This parameter allows (through its \href{https://en.wikipedia.org/wiki/Regex}{regex pattern}) a string of characters
consisting of upper and lowercase alphanumeric characters with no
spaces. You can also include any of the following characters: \\_+=,.@-}
}
\description{
Creates an IAM resource that describes an identity provider (IdP) that
supports SAML 2.0.
The SAML provider resource that you create with this operation can be
used as a principal in an IAM role's trust policy. Such a policy can
enable federated users who sign in using the SAML IdP to assume the
role. You can create an IAM role that supports Web-based single sign-on
(SSO) to the AWS Management Console or one that supports API access to
AWS.
When you create the SAML provider resource, you upload a SAML metadata
document that you get from your IdP. That document includes the issuer's
name, expiration information, and keys that can be used to validate the
SAML authentication response (assertions) that the IdP sends. You must
generate the metadata document using the identity management software
that is used as your organization's IdP.
This operation requires \href{https://docs.aws.amazon.com/general/latest/gr/signature-version-4.html}{Signature Version 4}.
For more information, see \href{https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-saml.html}{Enabling SAML 2.0 Federated Users to Access the AWS Management Console}
and \href{https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_saml.html}{About SAML 2.0-based Federation}
in the \emph{IAM User Guide}.
}
\section{Request syntax}{
\preformatted{svc$create_saml_provider(
SAMLMetadataDocument = "string",
Name = "string"
)
}
}
\keyword{internal}
|
/cran/paws.security.identity/man/iam_create_saml_provider.Rd
|
permissive
|
sanchezvivi/paws
|
R
| false
| true
| 2,726
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/iam_operations.R
\name{iam_create_saml_provider}
\alias{iam_create_saml_provider}
\title{Creates an IAM resource that describes an identity provider (IdP) that
supports SAML 2}
\usage{
iam_create_saml_provider(SAMLMetadataDocument, Name)
}
\arguments{
\item{SAMLMetadataDocument}{[required] An XML document generated by an identity provider (IdP) that supports
SAML 2.0. The document includes the issuer's name, expiration
information, and keys that can be used to validate the SAML
authentication response (assertions) that are received from the IdP. You
must generate the metadata document using the identity management
software that is used as your organization's IdP.
For more information, see \href{https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_saml.html}{About SAML 2.0-based Federation}
in the \emph{IAM User Guide}}
\item{Name}{[required] The name of the provider to create.
This parameter allows (through its \href{https://en.wikipedia.org/wiki/Regex}{regex pattern}) a string of characters
consisting of upper and lowercase alphanumeric characters with no
spaces. You can also include any of the following characters: \\_+=,.@-}
}
\description{
Creates an IAM resource that describes an identity provider (IdP) that
supports SAML 2.0.
The SAML provider resource that you create with this operation can be
used as a principal in an IAM role's trust policy. Such a policy can
enable federated users who sign in using the SAML IdP to assume the
role. You can create an IAM role that supports Web-based single sign-on
(SSO) to the AWS Management Console or one that supports API access to
AWS.
When you create the SAML provider resource, you upload a SAML metadata
document that you get from your IdP. That document includes the issuer's
name, expiration information, and keys that can be used to validate the
SAML authentication response (assertions) that the IdP sends. You must
generate the metadata document using the identity management software
that is used as your organization's IdP.
This operation requires \href{https://docs.aws.amazon.com/general/latest/gr/signature-version-4.html}{Signature Version 4}.
For more information, see \href{https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-saml.html}{Enabling SAML 2.0 Federated Users to Access the AWS Management Console}
and \href{https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_saml.html}{About SAML 2.0-based Federation}
in the \emph{IAM User Guide}.
}
\section{Request syntax}{
\preformatted{svc$create_saml_provider(
SAMLMetadataDocument = "string",
Name = "string"
)
}
}
\keyword{internal}
|
# args <- commandArgs(trailingOnly = TRUE)
# setwd("/Users/Runze/Documents/GitHub/LLG/Code/R")
# setwd("E:/GitHub/LLG/Code/R")
setwd("/cis/home/rtang/LLG/Code/R")
# m = as.numeric(args[1])
# m = 1
# isSVD = 1
# mVec = c(1, 5, 10)
mVec = 10
for (m in mVec) {
for (isSVD in 0) {
print(c(m, isSVD))
nIter = 500
nCores = 12
# dataName = "CPAC200"
# dataName = "desikan"
dataName = "JHU"
# dataName = "slab907"
# dataName = "slab1068"
# dataName = "Talairach"
source("function_collection.R")
tmpList = read_data(dataName, DA=F, newGraph=T)
A_all = tmpList[[1]]
n = tmpList[[2]]
M = tmpList[[3]]
rm(tmpList)
# nZeroVec <- sapply(1:M, function(ind) {sum(A_all[[ind]] == 0)})
# hist(nZeroVec)
dVec = 1:n
nD = length(dVec)
A_sum = add(A_all)
error_P_hat = matrix(0, nD, nIter)
error_A_bar = matrix(0, nD, nIter)
require(parallel)
# ptm <- proc.time()
# proc.time() - ptm
# out <- mclapply(1:nIter, function(x) sapply(dVec, function(d) dim_brute(M, m, d, A_all, A_sum)),
# mc.cores=nCores)
# out = array(unlist(out), dim = c(2, nD, nIter))
# error_A_bar = out[1,,]
# error_P_hat = out[2,,]
out <- mclapply(1:nIter, function(x) dim_brute2(M, m, dVec, A_all, A_sum, isSVD),
mc.cores=nCores)
out = array(unlist(out), dim = c(nD+3, nIter))
error_A_bar = out[1,]
error_P_hat = out[2:(nD+1),]
dim_ZG = out[nD+2,]
dim_USVT = out[nD+3,]
error_P_hat_ZG = rep(0, length(dim_ZG))
error_P_hat_USVT = rep(0, length(dim_USVT))
for (i in 1:length(dim_ZG)) {
error_P_hat_ZG[i] = error_P_hat[dim_ZG[i], i]
error_P_hat_USVT[i] = error_P_hat[dim_USVT[i], i]
}
# mean(error_A_bar)
# mean(error_P_hat[3,])
# for (iD in 1:nD) {
# print(iD)
# d = dVec[iD]
# tmp = replicate(nIter, dim_brute(M, m, d, A_all, A_sum))
# }
# library(foreach)
# library(doParallel)
# registerDoParallel(4)
# ptm <- proc.time()
# foreach(iD = 1:nD) %dopar% {
# print(iD)
# d = dVec[iD]
# for (iIter in 1:nIter) {
# out = dim_brute(M, m, d, A_all, A_sum)
# error_A_bar[iD, iIter] = out[1]
# error_P_hat[iD, iIter] = out[2]
# }
# }
# proc.time() - ptm
# stopImplicitCluster()
# for (iD in 1:nD) {
# print(iD)
# d = dVec[iD]
# for (iIter in 1:nIter) {
# out = dim_brute(M, m, d, A_all)
# error_A_bar[iD, iIter] = out[1]
# error_P_hat[iD, iIter] = out[2]
# }
# }
if (isSVD) {
fileName = paste("../../Result/result_", dataName, "_new_brute_", "m_", m, "_svd.RData", sep="")
} else {
fileName = paste("../../Result/result_", dataName, "_new_brute_", "m_", m, "_eig.RData", sep="")
}
save(error_A_bar, error_P_hat, error_P_hat_ZG, error_P_hat_USVT,
dim_ZG, dim_USVT, n, M, m, dVec, nIter, file=fileName)
}
}
|
/Code/R/main_brute_JHU_10.R
|
no_license
|
neurodata/LLG
|
R
| false
| false
| 3,092
|
r
|
# args <- commandArgs(trailingOnly = TRUE)
# setwd("/Users/Runze/Documents/GitHub/LLG/Code/R")
# setwd("E:/GitHub/LLG/Code/R")
setwd("/cis/home/rtang/LLG/Code/R")
# m = as.numeric(args[1])
# m = 1
# isSVD = 1
# mVec = c(1, 5, 10)
mVec = 10
for (m in mVec) {
for (isSVD in 0) {
print(c(m, isSVD))
nIter = 500
nCores = 12
# dataName = "CPAC200"
# dataName = "desikan"
dataName = "JHU"
# dataName = "slab907"
# dataName = "slab1068"
# dataName = "Talairach"
source("function_collection.R")
tmpList = read_data(dataName, DA=F, newGraph=T)
A_all = tmpList[[1]]
n = tmpList[[2]]
M = tmpList[[3]]
rm(tmpList)
# nZeroVec <- sapply(1:M, function(ind) {sum(A_all[[ind]] == 0)})
# hist(nZeroVec)
dVec = 1:n
nD = length(dVec)
A_sum = add(A_all)
error_P_hat = matrix(0, nD, nIter)
error_A_bar = matrix(0, nD, nIter)
require(parallel)
# ptm <- proc.time()
# proc.time() - ptm
# out <- mclapply(1:nIter, function(x) sapply(dVec, function(d) dim_brute(M, m, d, A_all, A_sum)),
# mc.cores=nCores)
# out = array(unlist(out), dim = c(2, nD, nIter))
# error_A_bar = out[1,,]
# error_P_hat = out[2,,]
out <- mclapply(1:nIter, function(x) dim_brute2(M, m, dVec, A_all, A_sum, isSVD),
mc.cores=nCores)
out = array(unlist(out), dim = c(nD+3, nIter))
error_A_bar = out[1,]
error_P_hat = out[2:(nD+1),]
dim_ZG = out[nD+2,]
dim_USVT = out[nD+3,]
error_P_hat_ZG = rep(0, length(dim_ZG))
error_P_hat_USVT = rep(0, length(dim_USVT))
for (i in 1:length(dim_ZG)) {
error_P_hat_ZG[i] = error_P_hat[dim_ZG[i], i]
error_P_hat_USVT[i] = error_P_hat[dim_USVT[i], i]
}
# mean(error_A_bar)
# mean(error_P_hat[3,])
# for (iD in 1:nD) {
# print(iD)
# d = dVec[iD]
# tmp = replicate(nIter, dim_brute(M, m, d, A_all, A_sum))
# }
# library(foreach)
# library(doParallel)
# registerDoParallel(4)
# ptm <- proc.time()
# foreach(iD = 1:nD) %dopar% {
# print(iD)
# d = dVec[iD]
# for (iIter in 1:nIter) {
# out = dim_brute(M, m, d, A_all, A_sum)
# error_A_bar[iD, iIter] = out[1]
# error_P_hat[iD, iIter] = out[2]
# }
# }
# proc.time() - ptm
# stopImplicitCluster()
# for (iD in 1:nD) {
# print(iD)
# d = dVec[iD]
# for (iIter in 1:nIter) {
# out = dim_brute(M, m, d, A_all)
# error_A_bar[iD, iIter] = out[1]
# error_P_hat[iD, iIter] = out[2]
# }
# }
if (isSVD) {
fileName = paste("../../Result/result_", dataName, "_new_brute_", "m_", m, "_svd.RData", sep="")
} else {
fileName = paste("../../Result/result_", dataName, "_new_brute_", "m_", m, "_eig.RData", sep="")
}
save(error_A_bar, error_P_hat, error_P_hat_ZG, error_P_hat_USVT,
dim_ZG, dim_USVT, n, M, m, dVec, nIter, file=fileName)
}
}
|
mapdeckMeshDependency <- function() {
list(
createHtmlDependency(
name = "mesh",
version = "1.0.0",
src = system.file("htmlwidgets/lib/mesh", package = "mapdeck"),
script = c("mesh.js"),
all_files = FALSE
)
)
}
find_mesh_index <- function( data ) {
switch(
data[["primitivetype"]]
, "quad" = "ib"
, "triangle" = "it"
)
}
#' Add Mesh
#'
#' Adds polygons to the map from a \code{quadmesh} object
#'
#' @inheritParams add_polygon
#'
#' @inheritSection add_arc legend
#' @inheritSection add_arc id
#'
#' @details
#'
#' \code{add_mesh} supports quadmesh objects
#'
#' @export
add_mesh <- function(
map,
data = get_map_data(map),
fill_opacity = NULL,
elevation = NULL,
tooltip = NULL,
auto_highlight = FALSE,
highlight_colour = "#AAFFFFFF",
light_settings = list(),
layer_id = NULL,
id = NULL,
palette = "viridis",
na_colour = "#808080FF",
legend = FALSE,
legend_options = NULL,
legend_format = NULL,
update_view = TRUE,
focus_layer = FALSE,
transitions = NULL
) {
#if( is.null( stroke_colour )) stroke_colour <- fill_colour
experimental_layer( "mesh" )
if(!inherits(data, "quadmesh")) {
stop("expecting quadmesh object")
}
l <- list()
fill_colour = "average_z"
l[["fill_colour"]] <- force( fill_colour )
l[["fill_opacity"]] <- resolve_opacity( fill_opacity )
l[["elevation"]] <- force( elevation )
l[["tooltip"]] <- force( tooltip )
l[["id"]] <- force( id )
l[["na_colour"]] <- force( na_colour )
vertex <- "vb"
index <- find_mesh_index( data )
## check:
if ( data[["primitivetype"]] == "quad" & is.null( data[["ib"]] ) ) {
stop("badly formed quadmesh type. Found quad and expecting ib index")
}
l <- resolve_palette( l, palette )
l <- resolve_legend( l, legend )
l <- resolve_legend_options( l, legend_options )
l <- resolve_data( data, l, c("POLYGON","MULTIPOLYGON") )
bbox <- init_bbox()
update_view <- force( update_view )
focus_layer <- force( focus_layer )
is_extruded <- TRUE
# if( !is.null( l[["stroke_width"]] ) | !is.null( l[["stroke_colour"]] ) ) {
# is_extruded <- FALSE
# if( !is.null( elevation ) ) {
# message("stroke provided, ignoring elevation")
# }
# if( is.null( l[["stroke_width"]] ) ) {
# l[["stroke_width"]] <- 1L
# }
# }
if ( !is.null(l[["data"]]) ) {
data <- l[["data"]]
l[["data"]] <- NULL
}
## sf objects come with a bounding box
if( !is.null(l[["bbox"]] ) ) {
bbox <- l[["bbox"]]
l[["bbox"]] <- NULL
}
checkHexAlpha(highlight_colour)
layer_id <- layerId(layer_id, "polygon")
map <- addDependency(map, mapdeckMeshDependency())
tp <- l[["data_type"]]
l[["data_type"]] <- NULL
jsfunc <- "add_mesh"
if ( tp == "mesh" ) {
# geometry_column <- c( "geometry" )
geometry_column <- c( vertex, index )
shape <- rcpp_mesh_geojson( data, l, geometry_column )
}
# geometry_column <- c( "geometry" ) ## This is where we woudl also specify 'origin' or 'destination'
# shape <- rcpp_polygon_geojson( data, l, geometry_column )
# } else if ( tp == "sfencoded" ) {
# geometry_column <- "polyline"
# shape <- rcpp_polygon_polyline( data, l, geometry_column )
# jsfunc <- "add_polygon_polyline"
# }
# return( shape )
light_settings <- jsonify::to_json(light_settings, unbox = T)
js_transitions <- resolve_transitions( transitions, "polygon" )
if( inherits( legend, "json" ) ) {
shape[["legend"]] <- legend
} else {
shape[["legend"]] <- resolve_legend_format( shape[["legend"]], legend_format )
}
invoke_method(
map, jsfunc, map_type( map ), shape[["data"]], layer_id, light_settings,
auto_highlight, highlight_colour, shape[["legend"]], bbox, update_view, focus_layer,
js_transitions, is_extruded
)
}
#' @rdname clear
#' @export
clear_mesh <- function( map, layer_id = NULL) {
layer_id <- layerId(layer_id, "mesh")
invoke_method(map, "md_layer_clear", map_type( map ), layer_id, "mesh" )
}
|
/R/map_layer_mesh.R
|
no_license
|
mdsumner/mapdeck
|
R
| false
| false
| 3,873
|
r
|
mapdeckMeshDependency <- function() {
list(
createHtmlDependency(
name = "mesh",
version = "1.0.0",
src = system.file("htmlwidgets/lib/mesh", package = "mapdeck"),
script = c("mesh.js"),
all_files = FALSE
)
)
}
find_mesh_index <- function( data ) {
switch(
data[["primitivetype"]]
, "quad" = "ib"
, "triangle" = "it"
)
}
#' Add Mesh
#'
#' Adds polygons to the map from a \code{quadmesh} object
#'
#' @inheritParams add_polygon
#'
#' @inheritSection add_arc legend
#' @inheritSection add_arc id
#'
#' @details
#'
#' \code{add_mesh} supports quadmesh objects
#'
#' @export
add_mesh <- function(
map,
data = get_map_data(map),
fill_opacity = NULL,
elevation = NULL,
tooltip = NULL,
auto_highlight = FALSE,
highlight_colour = "#AAFFFFFF",
light_settings = list(),
layer_id = NULL,
id = NULL,
palette = "viridis",
na_colour = "#808080FF",
legend = FALSE,
legend_options = NULL,
legend_format = NULL,
update_view = TRUE,
focus_layer = FALSE,
transitions = NULL
) {
#if( is.null( stroke_colour )) stroke_colour <- fill_colour
experimental_layer( "mesh" )
if(!inherits(data, "quadmesh")) {
stop("expecting quadmesh object")
}
l <- list()
fill_colour = "average_z"
l[["fill_colour"]] <- force( fill_colour )
l[["fill_opacity"]] <- resolve_opacity( fill_opacity )
l[["elevation"]] <- force( elevation )
l[["tooltip"]] <- force( tooltip )
l[["id"]] <- force( id )
l[["na_colour"]] <- force( na_colour )
vertex <- "vb"
index <- find_mesh_index( data )
## check:
if ( data[["primitivetype"]] == "quad" & is.null( data[["ib"]] ) ) {
stop("badly formed quadmesh type. Found quad and expecting ib index")
}
l <- resolve_palette( l, palette )
l <- resolve_legend( l, legend )
l <- resolve_legend_options( l, legend_options )
l <- resolve_data( data, l, c("POLYGON","MULTIPOLYGON") )
bbox <- init_bbox()
update_view <- force( update_view )
focus_layer <- force( focus_layer )
is_extruded <- TRUE
# if( !is.null( l[["stroke_width"]] ) | !is.null( l[["stroke_colour"]] ) ) {
# is_extruded <- FALSE
# if( !is.null( elevation ) ) {
# message("stroke provided, ignoring elevation")
# }
# if( is.null( l[["stroke_width"]] ) ) {
# l[["stroke_width"]] <- 1L
# }
# }
if ( !is.null(l[["data"]]) ) {
data <- l[["data"]]
l[["data"]] <- NULL
}
## sf objects come with a bounding box
if( !is.null(l[["bbox"]] ) ) {
bbox <- l[["bbox"]]
l[["bbox"]] <- NULL
}
checkHexAlpha(highlight_colour)
layer_id <- layerId(layer_id, "polygon")
map <- addDependency(map, mapdeckMeshDependency())
tp <- l[["data_type"]]
l[["data_type"]] <- NULL
jsfunc <- "add_mesh"
if ( tp == "mesh" ) {
# geometry_column <- c( "geometry" )
geometry_column <- c( vertex, index )
shape <- rcpp_mesh_geojson( data, l, geometry_column )
}
# geometry_column <- c( "geometry" ) ## This is where we woudl also specify 'origin' or 'destination'
# shape <- rcpp_polygon_geojson( data, l, geometry_column )
# } else if ( tp == "sfencoded" ) {
# geometry_column <- "polyline"
# shape <- rcpp_polygon_polyline( data, l, geometry_column )
# jsfunc <- "add_polygon_polyline"
# }
# return( shape )
light_settings <- jsonify::to_json(light_settings, unbox = T)
js_transitions <- resolve_transitions( transitions, "polygon" )
if( inherits( legend, "json" ) ) {
shape[["legend"]] <- legend
} else {
shape[["legend"]] <- resolve_legend_format( shape[["legend"]], legend_format )
}
invoke_method(
map, jsfunc, map_type( map ), shape[["data"]], layer_id, light_settings,
auto_highlight, highlight_colour, shape[["legend"]], bbox, update_view, focus_layer,
js_transitions, is_extruded
)
}
#' @rdname clear
#' @export
clear_mesh <- function( map, layer_id = NULL) {
layer_id <- layerId(layer_id, "mesh")
invoke_method(map, "md_layer_clear", map_type( map ), layer_id, "mesh" )
}
|
model1<-lm(deaths ~ pop, data = mort)
#gleason model
model2<-lm(deaths ~ log(1+pop), data = mort)
#nonlinear Arrhenius model
model3 <- nls(deaths~b0*pop^b1,data=mort,start=list(b0=1,b1=1))
#Poisson and negative binomial models
model4<-glm(deaths~log(1+pop),data=mort,family=poisson)
library(MASS)
model5<-glm.nb(deaths~log(1+pop),data=mort)
sapply(list(model1,model2,model3,model4,model5),AIC)
sapply(list(model1,model2,model3,model4,model5),logLik)
#graph Poisson and NB together
plot(deaths~log(1+pop),data=mort)
coef(model4)
poisson.func<-function(x) exp(coef(model4)[1]+coef(model4)[2]*x)
coef(model5)
NB.func<-function(x) exp(coef(model5)[1]+coef(model5)[2]*x)
curve(poisson.func,add=T,col=2)
curve(NB.func,add=T,col=4,lty=2)
#transformed response models
model6<-lm(log(1+deaths)~log(1+pop),data=mort)
model7<-lm(sqrt(deaths)~log(1+pop),data=mort)
#log-likelihoods are not comparable
logLik(model5)
logLik(model6)
logLik(model7)
#function to obtain normal log-likelihood for untransformed response
norm.test <- function(model,y) {
#t.y<-log(y)
s <- sqrt(sum(residuals(model)^2)/length(residuals(model)))
LL <- sum(log(dnorm(y,mean=predict(model),sd=s)))
LL
}
model6a<-lm(deaths~log(1+pop),data=mort)
logLik(model6a)
norm.test(model6a,mort$deaths)
#function to obtain log-likelihood of y when fit normal model to log(y)
norm.log <- function(model,y) {
t.y<-log(y)
s <- sqrt(sum(residuals(model)^2)/length(residuals(model)))
LL <- sum(log(dnorm(t.y,mean=predict(model),sd=s)*1/y))
LL
}
norm.log(model6,mort$deaths)
#function to obtain log-likelihood of y when fit normal model to sqrt(y)
norm.sqrt <- function(model,y) {
t.y<-sqrt(y)
s <- sqrt(sum(residuals(model)^2)/length(residuals(model)))
LL <- sum(log(dnorm(t.y,mean=predict(model),sd=s)*1/(2*sqrt(y))))
LL
}
norm.sqrt(model7,mort$deaths)
#obtain log-likelihoods of all models
c(sapply(list(model1,model2,model3,model4,model5),logLik),norm.log(model6,mort$deaths),norm.sqrt(model7,mort$deaths))
#obtain AIC of all models
c(sapply(list(model1,model2,model3,model4,model5),AIC),-2*norm.log(model6,mort$deaths)+2*3,-2*norm.sqrt(model7,mort$deaths)+2*3)
plot(deaths~log(1+pop), data=mort)
curve(poisson.func, add=T, col='pink', lwd=2)
curve(NB.func, add=T, col=2, lty=2)
lognorm.median <- function(x) exp(coef(model6)[1]+coef(model6)[2]*x)
sqrtnorm.median <- function(x) (coef(model7)[1]+coef(model7)[2]*x)^2
curve(lognorm.median, add=T, col='grey60', lwd=2)
curve(sqrtnorm.median, add=T, lty=2)
legend('topleft', c('Poisson (mean)', 'negative binomial (mean)', 'lognormal (median)', 'sqrt-normal (median)'), col=c('pink',2,'grey60',1), lty=c(1,2,1,2), lwd=c(2,1,2,1), cex=.9, bty='n')
model.names <- paste('model', 1:7, sep='')
#obtain log-likelihoods of all models
model.LL <- c(sapply(list(model1, model2, model3, model4, model5), logLik), norm.log(model6, mort$deaths), norm.sqrt(model7, mort$deaths))
#obtain AIC of all models
model.AIC <- c(sapply(list(model1, model2, model3, model4, model5), AIC), -2*norm.log(model6,mort$deaths)+2*3, -2*norm.sqrt(model7,mort$deaths)+2*3)
data.frame(model=model.names, LL=model.LL, AIC=model.AIC)
|
/adapt2.R
|
no_license
|
MattHealey/M16
|
R
| false
| false
| 3,128
|
r
|
model1<-lm(deaths ~ pop, data = mort)
#gleason model
model2<-lm(deaths ~ log(1+pop), data = mort)
#nonlinear Arrhenius model
model3 <- nls(deaths~b0*pop^b1,data=mort,start=list(b0=1,b1=1))
#Poisson and negative binomial models
model4<-glm(deaths~log(1+pop),data=mort,family=poisson)
library(MASS)
model5<-glm.nb(deaths~log(1+pop),data=mort)
sapply(list(model1,model2,model3,model4,model5),AIC)
sapply(list(model1,model2,model3,model4,model5),logLik)
#graph Poisson and NB together
plot(deaths~log(1+pop),data=mort)
coef(model4)
poisson.func<-function(x) exp(coef(model4)[1]+coef(model4)[2]*x)
coef(model5)
NB.func<-function(x) exp(coef(model5)[1]+coef(model5)[2]*x)
curve(poisson.func,add=T,col=2)
curve(NB.func,add=T,col=4,lty=2)
#transformed response models
model6<-lm(log(1+deaths)~log(1+pop),data=mort)
model7<-lm(sqrt(deaths)~log(1+pop),data=mort)
#log-likelihoods are not comparable
logLik(model5)
logLik(model6)
logLik(model7)
#function to obtain normal log-likelihood for untransformed response
norm.test <- function(model,y) {
#t.y<-log(y)
s <- sqrt(sum(residuals(model)^2)/length(residuals(model)))
LL <- sum(log(dnorm(y,mean=predict(model),sd=s)))
LL
}
model6a<-lm(deaths~log(1+pop),data=mort)
logLik(model6a)
norm.test(model6a,mort$deaths)
#function to obtain log-likelihood of y when fit normal model to log(y)
norm.log <- function(model,y) {
t.y<-log(y)
s <- sqrt(sum(residuals(model)^2)/length(residuals(model)))
LL <- sum(log(dnorm(t.y,mean=predict(model),sd=s)*1/y))
LL
}
norm.log(model6,mort$deaths)
#function to obtain log-likelihood of y when fit normal model to sqrt(y)
norm.sqrt <- function(model,y) {
t.y<-sqrt(y)
s <- sqrt(sum(residuals(model)^2)/length(residuals(model)))
LL <- sum(log(dnorm(t.y,mean=predict(model),sd=s)*1/(2*sqrt(y))))
LL
}
norm.sqrt(model7,mort$deaths)
#obtain log-likelihoods of all models
c(sapply(list(model1,model2,model3,model4,model5),logLik),norm.log(model6,mort$deaths),norm.sqrt(model7,mort$deaths))
#obtain AIC of all models
c(sapply(list(model1,model2,model3,model4,model5),AIC),-2*norm.log(model6,mort$deaths)+2*3,-2*norm.sqrt(model7,mort$deaths)+2*3)
plot(deaths~log(1+pop), data=mort)
curve(poisson.func, add=T, col='pink', lwd=2)
curve(NB.func, add=T, col=2, lty=2)
lognorm.median <- function(x) exp(coef(model6)[1]+coef(model6)[2]*x)
sqrtnorm.median <- function(x) (coef(model7)[1]+coef(model7)[2]*x)^2
curve(lognorm.median, add=T, col='grey60', lwd=2)
curve(sqrtnorm.median, add=T, lty=2)
legend('topleft', c('Poisson (mean)', 'negative binomial (mean)', 'lognormal (median)', 'sqrt-normal (median)'), col=c('pink',2,'grey60',1), lty=c(1,2,1,2), lwd=c(2,1,2,1), cex=.9, bty='n')
model.names <- paste('model', 1:7, sep='')
#obtain log-likelihoods of all models
model.LL <- c(sapply(list(model1, model2, model3, model4, model5), logLik), norm.log(model6, mort$deaths), norm.sqrt(model7, mort$deaths))
#obtain AIC of all models
model.AIC <- c(sapply(list(model1, model2, model3, model4, model5), AIC), -2*norm.log(model6,mort$deaths)+2*3, -2*norm.sqrt(model7,mort$deaths)+2*3)
data.frame(model=model.names, LL=model.LL, AIC=model.AIC)
|
#!/usr/bin/Rscript
require(docopt)
'Usage:
neighborhoods.R [-g <geodata> -o <output> -z <zone> ]
Options:
-g Geographic data in geojson format [default: data/spatial/Neighborhoods_Philadelphia.geojson]
-o Neighborhood distance matrix [default: cache/neighborhood_distances.csv]
-z UTM Zone [default: 17]
' -> doc
opts <- docopt(doc)
p4string <- sprintf("+proj=utm +zone=%s",opts$z)
require(raster)
require(rgdal)
require(dplyr)
require(stringr)
require(pipeR)
require(readr)
distances <- function(x1, x2, y1, y2) {
return(sqrt((x1-x2)**2 + (y1-y2)**2))
}
## Load up philly shapefile
readOGR(opts$g, "OGRGeoJSON") %>>%
## Project to utm
spTransform(CRS=CRS(p4string)) -> m
## Get neighborhood coordinates
z <- coordinates(m)
data.frame(NEIGHBORHOOD = m$name,
CENTROID_X = z[,1],
CENTROID_Y = z[,2]) %>>%
arrange(NEIGHBORHOOD) %>>%
{mutate(.,NEIGHBORHOOD_ID = 1:nrow(.))}-> centroids
out_df <- data.frame()
N <- nrow(centroids)
for (i in 1:N) {
dist <- distances(centroids$CENTROID_X[i], centroids$CENTROID_X, centroids$CENTROID_Y[i], centroids$CENTROID_Y)
df <- data.frame(from = centroids$NEIGHBORHOOD[i],
to = centroids$NEIGHBORHOOD,
from_id = centroids$NEIGHBORHOOD_ID[i],
to_id = centroids$NEIGHBORHOOD_ID,
distance = dist/1000)
out_df <- rbind(out_df, df)
}
write_csv(out_df, opts$o)
|
/munge/neighborhood_distance.R
|
no_license
|
jzelner/phl-crime-model
|
R
| false
| false
| 1,433
|
r
|
#!/usr/bin/Rscript
require(docopt)
'Usage:
neighborhoods.R [-g <geodata> -o <output> -z <zone> ]
Options:
-g Geographic data in geojson format [default: data/spatial/Neighborhoods_Philadelphia.geojson]
-o Neighborhood distance matrix [default: cache/neighborhood_distances.csv]
-z UTM Zone [default: 17]
' -> doc
opts <- docopt(doc)
p4string <- sprintf("+proj=utm +zone=%s",opts$z)
require(raster)
require(rgdal)
require(dplyr)
require(stringr)
require(pipeR)
require(readr)
distances <- function(x1, x2, y1, y2) {
return(sqrt((x1-x2)**2 + (y1-y2)**2))
}
## Load up philly shapefile
readOGR(opts$g, "OGRGeoJSON") %>>%
## Project to utm
spTransform(CRS=CRS(p4string)) -> m
## Get neighborhood coordinates
z <- coordinates(m)
data.frame(NEIGHBORHOOD = m$name,
CENTROID_X = z[,1],
CENTROID_Y = z[,2]) %>>%
arrange(NEIGHBORHOOD) %>>%
{mutate(.,NEIGHBORHOOD_ID = 1:nrow(.))}-> centroids
out_df <- data.frame()
N <- nrow(centroids)
for (i in 1:N) {
dist <- distances(centroids$CENTROID_X[i], centroids$CENTROID_X, centroids$CENTROID_Y[i], centroids$CENTROID_Y)
df <- data.frame(from = centroids$NEIGHBORHOOD[i],
to = centroids$NEIGHBORHOOD,
from_id = centroids$NEIGHBORHOOD_ID[i],
to_id = centroids$NEIGHBORHOOD_ID,
distance = dist/1000)
out_df <- rbind(out_df, df)
}
write_csv(out_df, opts$o)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AllGenerics.R, R/AllMethods.R
\docType{methods}
\name{union}
\alias{union}
\alias{union,groups,groups-method}
\alias{union,snppicker,snppicker-method}
\alias{union,tags,tags-method}
\title{Create a union of groups, snppicker or tags objects}
\usage{
union(x, y)
\S4method{union}{snppicker,snppicker}(x, y)
\S4method{union}{tags,tags}(x, y)
\S4method{union}{groups,groups}(x, y)
}
\arguments{
\item{x}{object of class \code{groups}, \code{snppicker} or \code{tags}}
\item{y}{object of same class as x}
}
\value{
object of class groups
}
\description{
First, objects are converted to class groups. Then any groups
which overlap are identified, and replaced by their union. Groups
which do not overlap are unchanged. The combined set of groups is
returned.
}
\author{
Chris Wallace
}
|
/man/union.Rd
|
no_license
|
stas-g/GUESSFM
|
R
| false
| true
| 867
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AllGenerics.R, R/AllMethods.R
\docType{methods}
\name{union}
\alias{union}
\alias{union,groups,groups-method}
\alias{union,snppicker,snppicker-method}
\alias{union,tags,tags-method}
\title{Create a union of groups, snppicker or tags objects}
\usage{
union(x, y)
\S4method{union}{snppicker,snppicker}(x, y)
\S4method{union}{tags,tags}(x, y)
\S4method{union}{groups,groups}(x, y)
}
\arguments{
\item{x}{object of class \code{groups}, \code{snppicker} or \code{tags}}
\item{y}{object of same class as x}
}
\value{
object of class groups
}
\description{
First, objects are converted to class groups. Then any groups
which overlap are identified, and replaced by their union. Groups
which do not overlap are unchanged. The combined set of groups is
returned.
}
\author{
Chris Wallace
}
|
# Foundation of Inference
# install.packages("WDI")
library(WDI)
WDI.data <-
WDI(country = "all",
indicator = c("SH.DYN.NMRT","DC.DAC.USAL.CD", "SH.VAC.TTNS.ZS",
"SP.URB.TOTL.IN.ZS", "NE.TRD.GNFS.ZS"),
start = 1990, end = 2005, extra = FALSE, cache = NULL)
par(mfrow = c(1, 3))
hist(WDI.data$SH.DYN.NMRT, main = "Mortality rate")
hist(WDI.data$DC.DAC.USAL.CD, main = "Aid Raw")
hist(log(WDI.data$DC.DAC.USAL.CD+.1), main = "Logged Aid")
hist(WDI.data$SH.VAC.TTNS.ZS, main = "Newborns vaccinated for tetanus")
hist(WDI.data$SP.URB.TOTL.IN.ZS, main = "Urban population rate")
hist(WDI.data$NE.TRD.GNFS.ZS, main = "Trade openness_raw")
hist(log(WDI.data$NE.TRD.GNFS.ZS), main = "Logged trade openness")
samp1.mort <- sample(WDI.data$SH.DYN.NMRT, 50)
mean(samp1.mort, na.rm = TRUE) #
mean(WDI.data$SH.DYN.NMRT, na.rm = TRUE)
sample_means50.mort <- rep(NA, 5000)
for(i in 1:5000) {
samp <- sample(WDI.data$SH.DYN.NMRT, 50)
sample_means50.mort[i] <- mean(samp, na.rm = TRUE)
}
mean(sample_means50.mort, na.rm = TRUE)
mean(WDI.data$SH.DYN.NMRT, na.rm = TRUE)
samp.pop <- sample(WDI.data$SP.URB.TOTL.IN.ZS, 50)
mean.pop <- mean(samp.pop, na.rm = TRUE) # í‘œë³¸ì˜ í‰ê·
se.pop <- sd(samp.pop, na.rm = TRUE)/sqrt(length(samp.pop)) # 표준오차
lower.pop <- mean.pop - (1.96 * se.pop)
upper.pop <- mean.pop + (1.96 * se.pop)
c(lower.pop, mean.pop, upper.pop)
library(tidyverse)
estimates.df <- bind_rows(tibble(lower.pop, mean.pop, upper.pop))
estimates.df$sample.no <- 1
library(ggplot2)
ggplot(data = estimates.df, aes(x = sample.no)) +
geom_pointrange(aes(y = mean.pop, ymin = lower.pop, ymax = upper.pop))
mean.pop.20 <- rep(NA, 20)
lower.pop.20 <- rep(NA, 20)
upper.pop.20 <- rep(NA, 20)
for(x in 1:20) {
samp.pop <- sample(WDI.data$SP.URB.TOTL.IN.ZS, 50)
mean.pop.20[x] <- mean(samp.pop, na.rm = TRUE)
lower.pop.20[x] <- mean.pop.20[x] -
(1.96 * (sd(samp.pop, na.rm = TRUE)/sqrt(length(samp.pop))))
upper.pop.20[x] <- mean.pop.20[x] +
(1.96 * (sd(samp.pop, na.rm = TRUE)/sqrt(length(samp.pop))))
}
estimates.df.20 <- tibble(lower.pop.20, mean.pop.20,upper.pop.20)
estimates.df.20$sample.no <- c(1:20)
ggplot(data = estimates.df.20, aes(x = sample.no)) +
geom_pointrange(aes(y = mean.pop.20,
ymin = lower.pop.20,
ymax = upper.pop.20))
ggplot(data = estimates.df.20, aes(x = sample.no)) +
geom_pointrange(aes(y = mean.pop.20,
ymin = lower.pop.20, ymax = upper.pop.20)) +
geom_hline(data = WDI.data,
aes(yintercept = mean(SP.URB.TOTL.IN.ZS, na.rm = TRUE))) +
coord_flip() +
theme_bw() +
xlab("Sample number") + ylab("Estimates") +
ggtitle("Sample means for urban pop, with 95% confidence intervals")
estimates.df.20$outside <-
ifelse(estimates.df.20$lower.pop.20 >
mean(WDI.data$SP.URB.TOTL.IN.ZS, na.rm = TRUE) |
estimates.df.20$upper.pop.20 <
mean(WDI.data$SP.URB.TOTL.IN.ZS, na.rm = TRUE), 1, 0)
ggplot(data = estimates.df.20,
aes(x = sample.no, color = as.factor(outside))) +
geom_pointrange(aes(y = mean.pop.20,
ymin = lower.pop.20,
ymax = upper.pop.20)) +
geom_hline(data = WDI.data,
aes(yintercept = mean(SP.URB.TOTL.IN.ZS, na.rm = TRUE))) +
coord_flip() +
theme_bw() +
scale_color_manual(name = "", values=c("#9999CC", "#CC6666")) +
theme(legend.position="none") +
xlab("Sample number") + ylab("Estimates") +
ggtitle("Sample means for urban pop, with 95% confidence intervals")
library(ezpickr)
QOG <- pick(file = "http://www.qogdata.pol.gu.se/data/qog_bas_ts_jan19.dta")
QOG <- QOG[QOG$year==2010, ]
knitr::kable(table(is.na(QOG$p_polity2)))
QOG$democracy <- ifelse(QOG$p_polity2 >= 7, 1, 0)
knitr::kable(table(QOG$democracy))
by(QOG$gle_cgdpc, QOG$democracy, mean, na.rm = TRUE)
boxplot(gle_cgdpc ~ democracy, data = QOG)
QOG %>% drop_na(democracy) %>%
ggplot(aes(y = gle_cgdpc, x = as.factor(democracy))) +
geom_boxplot() +
labs(x = "Regime type", y = "GDPPC by constant 2010 US dollars") +
scale_x_discrete(labels=c("0" = "Non democracy", "1" = "Democracy")) +
scale_y_continuous(labels = scales::dollar_format()) +
theme(
axis.title.x = element_text(margin = margin(t = 20, b = 10)),
axis.title.y = element_text(margin = margin(r = 20, l = 20))) +
guides(fill=FALSE) + theme_bw()
mean.dem <- mean(QOG$gle_cgdpc[QOG$democracy == 1],
na.rm = TRUE)
mean.nondem <- mean(QOG$gle_cgdpc[QOG$democracy == 0],
na.rm = TRUE)
sd.dem <- sd(QOG$gle_cgdpc[QOG$democracy == 1],
na.rm = TRUE)
sd.nondem <- sd(QOG$gle_cgdpc[QOG$democracy == 0],
na.rm = TRUE)
n.dem <- length(QOG$wdi_gdpcapcon2010[QOG$democracy == 1 &
is.na(QOG$democracy) == FALSE &
is.na(QOG$wdi_gdpcapcon2010) == FALSE])
n.nondem <- length(QOG$wdi_gdpcapcon2010[QOG$democracy == 0 &
is.na(QOG$democracy) == FALSE &
is.na(QOG$wdi_gdpcapcon2010) == FALSE])
se.dnd <- sqrt((sd.dem^2 / n.dem) + (sd.nondem^2/n.nondem))
t <- ((mean.dem - mean.nondem) - 0) / se.dnd
t
2 * (1 - pt(t, df = min(n.dem - 1, n.nondem - 1)))
(1 - pt(t, df = min(n.dem - 1, n.nondem - 1)))
t.alt <- (mean(QOG$gle_cgdpc[QOG$democracy == 1],
na.rm = TRUE) -
mean(QOG$gle_cgdpc[QOG$democracy == 0],
na.rm = TRUE) - 0) /
sqrt(
(
sd(QOG$gle_cgdpc[QOG$democracy == 1],
na.rm = TRUE)^2 /
length(QOG$gle_cgdpc[QOG$democracy == 1 &
is.na(QOG$democracy) == FALSE &
is.na(QOG$gle_cgdpc) == FALSE])
) +
(
sd(QOG$gle_cgdpc[QOG$democracy == 0],
na.rm = TRUE)^2 /
length(QOG$gle_cgdpc[QOG$democracy == 0 &
is.na(QOG$democracy) == FALSE &
is.na(QOG$gle_cgdpc) == FALSE])
)
)
t.alt
ttest1 <- t.test(QOG$gle_cgdpc[QOG$democracy == 1],
QOG$gle_cgdpc[QOG$democracy == 0])
ttest1
ttest2 <- t.test(gle_cgdpc ~ democracy, data = QOG)
ttest2
knitr::kable(table(QOG$ciri_tort))
boxplot(gle_cgdpc ~ ciri_tort, data = QOG)
QOG %>% drop_na(ciri_tort) %>%
ggplot(aes(y = gle_cgdpc, x = as.factor(ciri_tort))) +
geom_boxplot() +
labs(x = "Torture type", y = "GDPPC by constant 2010 US dollars") +
scale_x_discrete(labels=c("0" = "Torture (+50)",
"1" = "Torture (1-49)",
"2" = "No torture")) +
scale_y_continuous(labels = scales::dollar_format()) +
theme(
axis.title.x = element_text(margin = margin(t = 20, b = 10)),
axis.title.y = element_text(margin = margin(r = 20, l = 10))) +
guides(fill=FALSE) + theme_bw()
model1 <- aov(gle_cgdpc ~ as.factor(ciri_tort), data = QOG)
summary(model1)
boxplot(gle_cgdpc ~ ciri_tort:democracy, data = QOG)
QOG %>% drop_na(ciri_tort) %>% drop_na(democracy) %>%
ggplot(aes(y=gle_cgdpc, x=as.factor(ciri_tort):as.factor(democracy),
group=interaction(democracy, ciri_tort))) + geom_boxplot() +
labs(x = "Torture X Regime type", y = "GDPPC by constant 2010 US dollars") +
scale_x_discrete(labels=c("0:0" = "Torture (+50)\nNon Democracy",
"0:1" = "Torture (+50)\nDemocracy",
"1:0" = "Torture (1-49)\nNon Democracy",
"1:1" = "Torture (1-49)\nDemocracy",
"2:0" = "No torture\nNon Democracy",
"2:1" = "No torture\nDemocracy")) +
scale_y_continuous(labels = scales::dollar_format()) +
theme(
axis.title.x = element_text(margin = margin(t = 15, b = 10)),
axis.title.y = element_text(margin = margin(r = 20, l = 10)),
axis.text.x = element_text(margin = margin(t = 10))) +
guides(fill=FALSE) + theme_bw()
model2 <- aov(gle_cgdpc ~ as.factor(ciri_tort):as.factor(democracy), data = QOG)
summary(model2)
here::here() %>% setwd()
download.file("http://www.openintro.org/stat/data/atheism.RData",
destfile = "atheism.RData")
load("atheism.RData")
glimpse(atheism)
knitr::kable(table(atheism$response))
table1 <- table(atheism$response, atheism$year)
margin.table(table1, 1) %>% knitr::kable()
margin.table(table1, 2) %>% knitr::kable()
prop.table(table1, 1) %>% knitr::kable()
prop.table(table1, 2) %>% knitr::kable()
us <- subset(atheism, nationality == "United States")
prop.test(table(us$year, us$response), correct = FALSE)
prop.test(table(us$response, us$year), correct = FALSE)
ustab <- table(us$year, us$response)
prop.test(ustab, correct = FALSE)
prop.test(table(atheism$year[atheism$nationality ==
"United States"],
atheism$response[atheism$nationality ==
"United States"]),
correct = FALSE)
anes <- pick("example.data/anes_timeseries_2012_Stata12.dta")
xtabs( ~ pid_x + presvote2012_x, data = anes) %>% knitr::kable()
table(anes$pid_x, anes$presvote2012_x) %>% knitr::kable()
table(anes$pid_x) %>% knitr::kable()
table(anes$presvote2012_x) %>% knitr::kable()
anes.nomissing <- subset(anes, pid_x != "-2" &
presvote2012_x != "-9" &
presvote2012_x != "-6" &
presvote2012_x != "-2")
table(anes.nomissing$pid_x) %>% knitr::kable()
table(anes.nomissing$presvote2012_x) %>% knitr::kable()
anes.nomissing$pid_x <- factor(anes.nomissing$pid_x)
anes.nomissing$presvote2012_x <-
factor(anes.nomissing$presvote2012_x,
labels = c("Obama", "Romney", "Other"))
table(anes.nomissing$pid_x) %>% t() %>% knitr::kable()
table(anes.nomissing$presvote2012_x) %>% knitr::kable()
xtabs( ~ pid_x + presvote2012_x, data = anes.nomissing) %>% knitr::kable()
chisq.test(xtabs( ~ pid_x + presvote2012_x, data = anes.nomissing))
model1 <- xtabs( ~ pid_x + presvote2012_x, data = anes.nomissing)
chisq.test(model1)
premade.table <- as_tibble(as.data.frame(table(anes.nomissing$pid_x,
anes.nomissing$presvote2012_x)))
premade.table
xtabs(Freq ~ Var1 + Var2, data = premade.table)
xtabs(weight_full ~ pid_x + presvote2012_x, data = anes.nomissing)
chisq.test(xtabs(weight_full ~ pid_x + presvote2012_x, data = anes.nomissing))
|
/Chapters_pdfR/Foundation of Inference_ Kor_BasicStats.R
|
permissive
|
pherephobia/Kor_BasicStats
|
R
| false
| false
| 10,597
|
r
|
# Foundation of Inference
# install.packages("WDI")
library(WDI)
WDI.data <-
WDI(country = "all",
indicator = c("SH.DYN.NMRT","DC.DAC.USAL.CD", "SH.VAC.TTNS.ZS",
"SP.URB.TOTL.IN.ZS", "NE.TRD.GNFS.ZS"),
start = 1990, end = 2005, extra = FALSE, cache = NULL)
par(mfrow = c(1, 3))
hist(WDI.data$SH.DYN.NMRT, main = "Mortality rate")
hist(WDI.data$DC.DAC.USAL.CD, main = "Aid Raw")
hist(log(WDI.data$DC.DAC.USAL.CD+.1), main = "Logged Aid")
hist(WDI.data$SH.VAC.TTNS.ZS, main = "Newborns vaccinated for tetanus")
hist(WDI.data$SP.URB.TOTL.IN.ZS, main = "Urban population rate")
hist(WDI.data$NE.TRD.GNFS.ZS, main = "Trade openness_raw")
hist(log(WDI.data$NE.TRD.GNFS.ZS), main = "Logged trade openness")
samp1.mort <- sample(WDI.data$SH.DYN.NMRT, 50)
mean(samp1.mort, na.rm = TRUE) #
mean(WDI.data$SH.DYN.NMRT, na.rm = TRUE)
sample_means50.mort <- rep(NA, 5000)
for(i in 1:5000) {
samp <- sample(WDI.data$SH.DYN.NMRT, 50)
sample_means50.mort[i] <- mean(samp, na.rm = TRUE)
}
mean(sample_means50.mort, na.rm = TRUE)
mean(WDI.data$SH.DYN.NMRT, na.rm = TRUE)
samp.pop <- sample(WDI.data$SP.URB.TOTL.IN.ZS, 50)
mean.pop <- mean(samp.pop, na.rm = TRUE) # í‘œë³¸ì˜ í‰ê·
se.pop <- sd(samp.pop, na.rm = TRUE)/sqrt(length(samp.pop)) # 표준오차
lower.pop <- mean.pop - (1.96 * se.pop)
upper.pop <- mean.pop + (1.96 * se.pop)
c(lower.pop, mean.pop, upper.pop)
library(tidyverse)
estimates.df <- bind_rows(tibble(lower.pop, mean.pop, upper.pop))
estimates.df$sample.no <- 1
library(ggplot2)
ggplot(data = estimates.df, aes(x = sample.no)) +
geom_pointrange(aes(y = mean.pop, ymin = lower.pop, ymax = upper.pop))
mean.pop.20 <- rep(NA, 20)
lower.pop.20 <- rep(NA, 20)
upper.pop.20 <- rep(NA, 20)
for(x in 1:20) {
samp.pop <- sample(WDI.data$SP.URB.TOTL.IN.ZS, 50)
mean.pop.20[x] <- mean(samp.pop, na.rm = TRUE)
lower.pop.20[x] <- mean.pop.20[x] -
(1.96 * (sd(samp.pop, na.rm = TRUE)/sqrt(length(samp.pop))))
upper.pop.20[x] <- mean.pop.20[x] +
(1.96 * (sd(samp.pop, na.rm = TRUE)/sqrt(length(samp.pop))))
}
estimates.df.20 <- tibble(lower.pop.20, mean.pop.20,upper.pop.20)
estimates.df.20$sample.no <- c(1:20)
ggplot(data = estimates.df.20, aes(x = sample.no)) +
geom_pointrange(aes(y = mean.pop.20,
ymin = lower.pop.20,
ymax = upper.pop.20))
ggplot(data = estimates.df.20, aes(x = sample.no)) +
geom_pointrange(aes(y = mean.pop.20,
ymin = lower.pop.20, ymax = upper.pop.20)) +
geom_hline(data = WDI.data,
aes(yintercept = mean(SP.URB.TOTL.IN.ZS, na.rm = TRUE))) +
coord_flip() +
theme_bw() +
xlab("Sample number") + ylab("Estimates") +
ggtitle("Sample means for urban pop, with 95% confidence intervals")
estimates.df.20$outside <-
ifelse(estimates.df.20$lower.pop.20 >
mean(WDI.data$SP.URB.TOTL.IN.ZS, na.rm = TRUE) |
estimates.df.20$upper.pop.20 <
mean(WDI.data$SP.URB.TOTL.IN.ZS, na.rm = TRUE), 1, 0)
ggplot(data = estimates.df.20,
aes(x = sample.no, color = as.factor(outside))) +
geom_pointrange(aes(y = mean.pop.20,
ymin = lower.pop.20,
ymax = upper.pop.20)) +
geom_hline(data = WDI.data,
aes(yintercept = mean(SP.URB.TOTL.IN.ZS, na.rm = TRUE))) +
coord_flip() +
theme_bw() +
scale_color_manual(name = "", values=c("#9999CC", "#CC6666")) +
theme(legend.position="none") +
xlab("Sample number") + ylab("Estimates") +
ggtitle("Sample means for urban pop, with 95% confidence intervals")
library(ezpickr)
QOG <- pick(file = "http://www.qogdata.pol.gu.se/data/qog_bas_ts_jan19.dta")
QOG <- QOG[QOG$year==2010, ]
knitr::kable(table(is.na(QOG$p_polity2)))
QOG$democracy <- ifelse(QOG$p_polity2 >= 7, 1, 0)
knitr::kable(table(QOG$democracy))
by(QOG$gle_cgdpc, QOG$democracy, mean, na.rm = TRUE)
boxplot(gle_cgdpc ~ democracy, data = QOG)
QOG %>% drop_na(democracy) %>%
ggplot(aes(y = gle_cgdpc, x = as.factor(democracy))) +
geom_boxplot() +
labs(x = "Regime type", y = "GDPPC by constant 2010 US dollars") +
scale_x_discrete(labels=c("0" = "Non democracy", "1" = "Democracy")) +
scale_y_continuous(labels = scales::dollar_format()) +
theme(
axis.title.x = element_text(margin = margin(t = 20, b = 10)),
axis.title.y = element_text(margin = margin(r = 20, l = 20))) +
guides(fill=FALSE) + theme_bw()
mean.dem <- mean(QOG$gle_cgdpc[QOG$democracy == 1],
na.rm = TRUE)
mean.nondem <- mean(QOG$gle_cgdpc[QOG$democracy == 0],
na.rm = TRUE)
sd.dem <- sd(QOG$gle_cgdpc[QOG$democracy == 1],
na.rm = TRUE)
sd.nondem <- sd(QOG$gle_cgdpc[QOG$democracy == 0],
na.rm = TRUE)
n.dem <- length(QOG$wdi_gdpcapcon2010[QOG$democracy == 1 &
is.na(QOG$democracy) == FALSE &
is.na(QOG$wdi_gdpcapcon2010) == FALSE])
n.nondem <- length(QOG$wdi_gdpcapcon2010[QOG$democracy == 0 &
is.na(QOG$democracy) == FALSE &
is.na(QOG$wdi_gdpcapcon2010) == FALSE])
se.dnd <- sqrt((sd.dem^2 / n.dem) + (sd.nondem^2/n.nondem))
t <- ((mean.dem - mean.nondem) - 0) / se.dnd
t
2 * (1 - pt(t, df = min(n.dem - 1, n.nondem - 1)))
(1 - pt(t, df = min(n.dem - 1, n.nondem - 1)))
t.alt <- (mean(QOG$gle_cgdpc[QOG$democracy == 1],
na.rm = TRUE) -
mean(QOG$gle_cgdpc[QOG$democracy == 0],
na.rm = TRUE) - 0) /
sqrt(
(
sd(QOG$gle_cgdpc[QOG$democracy == 1],
na.rm = TRUE)^2 /
length(QOG$gle_cgdpc[QOG$democracy == 1 &
is.na(QOG$democracy) == FALSE &
is.na(QOG$gle_cgdpc) == FALSE])
) +
(
sd(QOG$gle_cgdpc[QOG$democracy == 0],
na.rm = TRUE)^2 /
length(QOG$gle_cgdpc[QOG$democracy == 0 &
is.na(QOG$democracy) == FALSE &
is.na(QOG$gle_cgdpc) == FALSE])
)
)
t.alt
ttest1 <- t.test(QOG$gle_cgdpc[QOG$democracy == 1],
QOG$gle_cgdpc[QOG$democracy == 0])
ttest1
ttest2 <- t.test(gle_cgdpc ~ democracy, data = QOG)
ttest2
knitr::kable(table(QOG$ciri_tort))
boxplot(gle_cgdpc ~ ciri_tort, data = QOG)
QOG %>% drop_na(ciri_tort) %>%
ggplot(aes(y = gle_cgdpc, x = as.factor(ciri_tort))) +
geom_boxplot() +
labs(x = "Torture type", y = "GDPPC by constant 2010 US dollars") +
scale_x_discrete(labels=c("0" = "Torture (+50)",
"1" = "Torture (1-49)",
"2" = "No torture")) +
scale_y_continuous(labels = scales::dollar_format()) +
theme(
axis.title.x = element_text(margin = margin(t = 20, b = 10)),
axis.title.y = element_text(margin = margin(r = 20, l = 10))) +
guides(fill=FALSE) + theme_bw()
model1 <- aov(gle_cgdpc ~ as.factor(ciri_tort), data = QOG)
summary(model1)
boxplot(gle_cgdpc ~ ciri_tort:democracy, data = QOG)
QOG %>% drop_na(ciri_tort) %>% drop_na(democracy) %>%
ggplot(aes(y=gle_cgdpc, x=as.factor(ciri_tort):as.factor(democracy),
group=interaction(democracy, ciri_tort))) + geom_boxplot() +
labs(x = "Torture X Regime type", y = "GDPPC by constant 2010 US dollars") +
scale_x_discrete(labels=c("0:0" = "Torture (+50)\nNon Democracy",
"0:1" = "Torture (+50)\nDemocracy",
"1:0" = "Torture (1-49)\nNon Democracy",
"1:1" = "Torture (1-49)\nDemocracy",
"2:0" = "No torture\nNon Democracy",
"2:1" = "No torture\nDemocracy")) +
scale_y_continuous(labels = scales::dollar_format()) +
theme(
axis.title.x = element_text(margin = margin(t = 15, b = 10)),
axis.title.y = element_text(margin = margin(r = 20, l = 10)),
axis.text.x = element_text(margin = margin(t = 10))) +
guides(fill=FALSE) + theme_bw()
model2 <- aov(gle_cgdpc ~ as.factor(ciri_tort):as.factor(democracy), data = QOG)
summary(model2)
here::here() %>% setwd()
download.file("http://www.openintro.org/stat/data/atheism.RData",
destfile = "atheism.RData")
load("atheism.RData")
glimpse(atheism)
knitr::kable(table(atheism$response))
table1 <- table(atheism$response, atheism$year)
margin.table(table1, 1) %>% knitr::kable()
margin.table(table1, 2) %>% knitr::kable()
prop.table(table1, 1) %>% knitr::kable()
prop.table(table1, 2) %>% knitr::kable()
us <- subset(atheism, nationality == "United States")
prop.test(table(us$year, us$response), correct = FALSE)
prop.test(table(us$response, us$year), correct = FALSE)
ustab <- table(us$year, us$response)
prop.test(ustab, correct = FALSE)
prop.test(table(atheism$year[atheism$nationality ==
"United States"],
atheism$response[atheism$nationality ==
"United States"]),
correct = FALSE)
anes <- pick("example.data/anes_timeseries_2012_Stata12.dta")
xtabs( ~ pid_x + presvote2012_x, data = anes) %>% knitr::kable()
table(anes$pid_x, anes$presvote2012_x) %>% knitr::kable()
table(anes$pid_x) %>% knitr::kable()
table(anes$presvote2012_x) %>% knitr::kable()
anes.nomissing <- subset(anes, pid_x != "-2" &
presvote2012_x != "-9" &
presvote2012_x != "-6" &
presvote2012_x != "-2")
table(anes.nomissing$pid_x) %>% knitr::kable()
table(anes.nomissing$presvote2012_x) %>% knitr::kable()
anes.nomissing$pid_x <- factor(anes.nomissing$pid_x)
anes.nomissing$presvote2012_x <-
factor(anes.nomissing$presvote2012_x,
labels = c("Obama", "Romney", "Other"))
table(anes.nomissing$pid_x) %>% t() %>% knitr::kable()
table(anes.nomissing$presvote2012_x) %>% knitr::kable()
xtabs( ~ pid_x + presvote2012_x, data = anes.nomissing) %>% knitr::kable()
chisq.test(xtabs( ~ pid_x + presvote2012_x, data = anes.nomissing))
model1 <- xtabs( ~ pid_x + presvote2012_x, data = anes.nomissing)
chisq.test(model1)
premade.table <- as_tibble(as.data.frame(table(anes.nomissing$pid_x,
anes.nomissing$presvote2012_x)))
premade.table
xtabs(Freq ~ Var1 + Var2, data = premade.table)
xtabs(weight_full ~ pid_x + presvote2012_x, data = anes.nomissing)
chisq.test(xtabs(weight_full ~ pid_x + presvote2012_x, data = anes.nomissing))
|
X <- seq (0, 4, by = 1)
p <- c (1/2, 1/4, 1/8, 1/16, 1/16)
F <- cumsum (p)
data.frame (X, p, F)
|
/cviko5/5.R
|
no_license
|
spetrovi/MV011
|
R
| false
| false
| 97
|
r
|
X <- seq (0, 4, by = 1)
p <- c (1/2, 1/4, 1/8, 1/16, 1/16)
F <- cumsum (p)
data.frame (X, p, F)
|
#' Cross-spectrum based on mutual information
#' @export
mics <- function(
spectro1= NULL, spectro2= NULL,
feature1= NULL, feature2= NULL,
plot= F, zerofill= T, lagw = NULL,
...
){
if (!is.null(spectro1) & !is.null(spectro2)){
x <- spectro1
y <- spectro2
# Select longer signal as the static one
if (ncol(x) >= ncol(y)) {
static <- x
lagged <- y
}else {
static <- y
lagged <- x
}
# Generate band of indices for the lag series
lagged <- cbind(lagged, rep(0, nrow(lagged)))
band <- c(
rep(ncol(lagged), ncol(static) - 1),
1:(ncol(lagged) - 1),
rep(ncol(lagged), ncol(static) - 1)
)
imax <- length(band) - ncol(static) + 1
lags <- (ncol(static) - 1):(ncol(static) - imax)
if (!is.null(lagw)){
is <- (1:imax)[lags == lagw]
}else{
is <- 1:imax
}
lagstats <- lapply(is, function(i){
iband <- (1:length(band) - 2 + i) %% length(band) + 1
ycol <- band[iband][1:ncol(static)]
if (zerofill){
xcol <- 1:ncol(static)
ycol <- band[iband][1:ncol(static)]
}else{
xcol <- (1:ncol(static))[-which(ycol == max(band))]
ycol <- ycol[-which(ycol == max(band))]
}
x <- as.numeric(static[,xcol])
y <- as.numeric(lagged[,ycol])
mi_vector(x,y, ...)
})
}else if (!is.null(feature1) & !is.null(feature2)){
x <- feature1
y <- feature2
# Select longer signal as the static one
if (length(x) >= length(y)) {
static <- x
lagged <- y
}else {
static <- y
lagged <- x
}
# Generate band of indices for the lag series
lagged <- c(lagged, 0)
band <- c(
rep(length(lagged), length(static) - 1),
1:(length(lagged) - 1),
rep(length(lagged), length(static) - 1)
)
imax <- length(band) - length(static) + 1
lags <- (length(static) - 1):(length(static) - imax)
if (!is.null(lagw)){
is <- (1:imax)[lags == lagw]
}else{
is <- 1:imax
}
lagstats <- lapply(is, function(i){
iband <- (1:length(band) - 2 + i) %% length(band) + 1
yis <- band[iband][1:length(static)]
if (zerofill){
xis <- 1:length(static)
yis <- band[iband][1:length(static)]
}else{
xis <- (1:length(static))[-which(yis == max(band))]
yis <- yis[-which(yis == max(band))]
}
x <- as.numeric(static[xis])
y <- as.numeric(lagged[yis])
mi_vector(x,y, ...)
})
}
lagstats <- do.call("rbind", lagstats)
if (!is.null(lagw)) {
lagdf <- lagw
}else{
lagdf <- lags
}
lagstats <- data.frame(
lag= lagdf,
lagstats
)
if (plot){
plot(mi ~ lag, data= lagstats, type= "l",
ylab= "Mutual information", xlab= "lag (spectral windows)")
plot(lc ~ lag, data= lagstats, type= "l",
ylab= "Linear correlation", xlab= "lag (spectra windows")
}
return(lagstats)
}
|
/R/mics.R
|
no_license
|
crodriguez-saltos/misound
|
R
| false
| false
| 2,944
|
r
|
#' Cross-spectrum based on mutual information
#' @export
mics <- function(
spectro1= NULL, spectro2= NULL,
feature1= NULL, feature2= NULL,
plot= F, zerofill= T, lagw = NULL,
...
){
if (!is.null(spectro1) & !is.null(spectro2)){
x <- spectro1
y <- spectro2
# Select longer signal as the static one
if (ncol(x) >= ncol(y)) {
static <- x
lagged <- y
}else {
static <- y
lagged <- x
}
# Generate band of indices for the lag series
lagged <- cbind(lagged, rep(0, nrow(lagged)))
band <- c(
rep(ncol(lagged), ncol(static) - 1),
1:(ncol(lagged) - 1),
rep(ncol(lagged), ncol(static) - 1)
)
imax <- length(band) - ncol(static) + 1
lags <- (ncol(static) - 1):(ncol(static) - imax)
if (!is.null(lagw)){
is <- (1:imax)[lags == lagw]
}else{
is <- 1:imax
}
lagstats <- lapply(is, function(i){
iband <- (1:length(band) - 2 + i) %% length(band) + 1
ycol <- band[iband][1:ncol(static)]
if (zerofill){
xcol <- 1:ncol(static)
ycol <- band[iband][1:ncol(static)]
}else{
xcol <- (1:ncol(static))[-which(ycol == max(band))]
ycol <- ycol[-which(ycol == max(band))]
}
x <- as.numeric(static[,xcol])
y <- as.numeric(lagged[,ycol])
mi_vector(x,y, ...)
})
}else if (!is.null(feature1) & !is.null(feature2)){
x <- feature1
y <- feature2
# Select longer signal as the static one
if (length(x) >= length(y)) {
static <- x
lagged <- y
}else {
static <- y
lagged <- x
}
# Generate band of indices for the lag series
lagged <- c(lagged, 0)
band <- c(
rep(length(lagged), length(static) - 1),
1:(length(lagged) - 1),
rep(length(lagged), length(static) - 1)
)
imax <- length(band) - length(static) + 1
lags <- (length(static) - 1):(length(static) - imax)
if (!is.null(lagw)){
is <- (1:imax)[lags == lagw]
}else{
is <- 1:imax
}
lagstats <- lapply(is, function(i){
iband <- (1:length(band) - 2 + i) %% length(band) + 1
yis <- band[iband][1:length(static)]
if (zerofill){
xis <- 1:length(static)
yis <- band[iband][1:length(static)]
}else{
xis <- (1:length(static))[-which(yis == max(band))]
yis <- yis[-which(yis == max(band))]
}
x <- as.numeric(static[xis])
y <- as.numeric(lagged[yis])
mi_vector(x,y, ...)
})
}
lagstats <- do.call("rbind", lagstats)
if (!is.null(lagw)) {
lagdf <- lagw
}else{
lagdf <- lags
}
lagstats <- data.frame(
lag= lagdf,
lagstats
)
if (plot){
plot(mi ~ lag, data= lagstats, type= "l",
ylab= "Mutual information", xlab= "lag (spectral windows)")
plot(lc ~ lag, data= lagstats, type= "l",
ylab= "Linear correlation", xlab= "lag (spectra windows")
}
return(lagstats)
}
|
#### Random Linear Models ####
random_lm1 <- lm(wage ~ factor(state) + factor(FS) + AGEP
+ factor(ENG) + factor(edu) + factor(SEX) + WKHP + factor(DIS)
+ factor(indp) + factor(occp) + factor(NATIVITY) + factor(RAC1P),
na.action=na.omit,
data=train)
random_lm2 <- lm(wage ~ factor(state) + factor(FS) + AGEP
+ factor(ENG) + factor(FOD1P) + factor(SEX) + WKHP + factor(DIS)
+ factor(indp) + factor(occp) + factor(NATIVITY) + factor(RAC1P),
na.action=na.omit,
data=train)
print(AIC(random_lm1))
print(AIC(random_lm2))
|
/random_lm_models.R
|
no_license
|
thegarthman/MastersProject
|
R
| false
| false
| 674
|
r
|
#### Random Linear Models ####
random_lm1 <- lm(wage ~ factor(state) + factor(FS) + AGEP
+ factor(ENG) + factor(edu) + factor(SEX) + WKHP + factor(DIS)
+ factor(indp) + factor(occp) + factor(NATIVITY) + factor(RAC1P),
na.action=na.omit,
data=train)
random_lm2 <- lm(wage ~ factor(state) + factor(FS) + AGEP
+ factor(ENG) + factor(FOD1P) + factor(SEX) + WKHP + factor(DIS)
+ factor(indp) + factor(occp) + factor(NATIVITY) + factor(RAC1P),
na.action=na.omit,
data=train)
print(AIC(random_lm1))
print(AIC(random_lm2))
|
#source("https://bioconductor.org/biocLite.R")
#biocLite("VariantAnnotation")
options(echo=FALSE)
args <- commandArgs(trailingOnly = TRUE)
print(args)
minDP <- as.integer(args[1])
minAF <- as.numeric(args[2])
minStrand <- as.numeric(args[3])
file <- args[4]
library(VariantAnnotation)
pre <- FilterRules(list(isLowCoverageExomeSnp = function(x) {
!grepl("./.", x, fixed=TRUE)
}
))
fileExtention <- paste(paste(paste(paste(paste(paste("_filtered_DP",minDP,sep=""), "_AF", sep=""), minAF, sep=""), "", sep=""),"", sep=""), ".vcf", sep="")
destination <- paste( substr(file, 1, nchar(file)-7), fileExtention,sep="")#tempfile()
filt <- FilterRules(list(
isSNP = function(x) {
geno(x)$GT != "0/0"
},
isDP = function(x) {
info(x)$DP >= minDP
},
checkAllelFrequency = function(x){
dp4 <- info(x)$DP4
ref.for <- unlist(lapply(dp4, '[[', 1))
ref.rev <- unlist(lapply(dp4, '[[', 2))
alt.for <- unlist(lapply(dp4, '[[', 3))
alt.rev <- unlist(lapply(dp4, '[[', 4))
ratioRef.for <- ref.for/(ref.for+ref.rev)
ratioRef.rev <- ref.for/(ref.for+ref.rev)
ratioAlt.for <- alt.for/(alt.for+alt.rev)
ratioAlt.rev <- alt.rev/(alt.for+alt.rev)
# fisher.pValues <- unlist(lapply(dp4, function(x){fisher.test(matrix(x,2))$p.value}))
af <- (alt.for + alt.rev) /(ref.for+ref.rev+alt.for + alt.rev)
ratioRef <- if((ref.for+ref.rev)>=10) {
ratioAlt.for >= minStrand & ratioAlt.rev >= minStrand}
else {
TRUE
}
(geno(x)$GT == "0/1" & !is.na(af) & af >= minAF & ratioAlt.for >= minStrand & ratioAlt.rev >= minStrand) | (geno(x)$GT != "0/1")
#(geno(x)$GT == "0/1" & !is.na(af) & af >= minAF) | (geno(x)$GT != "0/1")
}
))
filtered <- filterVcf(file, "hg19", destination, filters=filt)
#file.remove(destination)
|
/scripts/dna/VariantFilter_mpileup_SNP.R
|
no_license
|
BenF777/illumina_tst15
|
R
| false
| false
| 1,899
|
r
|
#source("https://bioconductor.org/biocLite.R")
#biocLite("VariantAnnotation")
options(echo=FALSE)
args <- commandArgs(trailingOnly = TRUE)
print(args)
minDP <- as.integer(args[1])
minAF <- as.numeric(args[2])
minStrand <- as.numeric(args[3])
file <- args[4]
library(VariantAnnotation)
pre <- FilterRules(list(isLowCoverageExomeSnp = function(x) {
!grepl("./.", x, fixed=TRUE)
}
))
fileExtention <- paste(paste(paste(paste(paste(paste("_filtered_DP",minDP,sep=""), "_AF", sep=""), minAF, sep=""), "", sep=""),"", sep=""), ".vcf", sep="")
destination <- paste( substr(file, 1, nchar(file)-7), fileExtention,sep="")#tempfile()
filt <- FilterRules(list(
isSNP = function(x) {
geno(x)$GT != "0/0"
},
isDP = function(x) {
info(x)$DP >= minDP
},
checkAllelFrequency = function(x){
dp4 <- info(x)$DP4
ref.for <- unlist(lapply(dp4, '[[', 1))
ref.rev <- unlist(lapply(dp4, '[[', 2))
alt.for <- unlist(lapply(dp4, '[[', 3))
alt.rev <- unlist(lapply(dp4, '[[', 4))
ratioRef.for <- ref.for/(ref.for+ref.rev)
ratioRef.rev <- ref.for/(ref.for+ref.rev)
ratioAlt.for <- alt.for/(alt.for+alt.rev)
ratioAlt.rev <- alt.rev/(alt.for+alt.rev)
# fisher.pValues <- unlist(lapply(dp4, function(x){fisher.test(matrix(x,2))$p.value}))
af <- (alt.for + alt.rev) /(ref.for+ref.rev+alt.for + alt.rev)
ratioRef <- if((ref.for+ref.rev)>=10) {
ratioAlt.for >= minStrand & ratioAlt.rev >= minStrand}
else {
TRUE
}
(geno(x)$GT == "0/1" & !is.na(af) & af >= minAF & ratioAlt.for >= minStrand & ratioAlt.rev >= minStrand) | (geno(x)$GT != "0/1")
#(geno(x)$GT == "0/1" & !is.na(af) & af >= minAF) | (geno(x)$GT != "0/1")
}
))
filtered <- filterVcf(file, "hg19", destination, filters=filt)
#file.remove(destination)
|
testlist <- list(hi = -1.00252054090433e+120, lo = -1.00252054090433e+120, mu = -1.00252054090433e+120, sig = -1.00252054090433e+120)
result <- do.call(gjam:::tnormRcpp,testlist)
str(result)
|
/gjam/inst/testfiles/tnormRcpp/libFuzzer_tnormRcpp/tnormRcpp_valgrind_files/1610045573-test.R
|
no_license
|
akhikolla/updated-only-Issues
|
R
| false
| false
| 195
|
r
|
testlist <- list(hi = -1.00252054090433e+120, lo = -1.00252054090433e+120, mu = -1.00252054090433e+120, sig = -1.00252054090433e+120)
result <- do.call(gjam:::tnormRcpp,testlist)
str(result)
|
############################################################################
## Purpose: Variable Importance
############################################################################
library(cftf,lib.loc=".")
library(randomForest)
library(rpart)
library(pls)
library(gam)
####
## Function Definition
####
na.mean<-function(x,...){
handle.na<-function(x){ #plug the mean into missing values
x[is.na(x)]<-mean(x,na.rm=TRUE)
return(x)
}
p<-dim(x)
if(is.null(p)){ #special case: `x' is a vector
return(handle.na(x))
}
for(i in 1:p[2]){
if(!is.factor(x[,i]))# if `x' is a factor ignore it
x[,i]<-handle.na(x[,i])
}
return(x)
}
nipal<-function(x,y,k){
x<-scale(x,,scale=FALSE)
y<-scale(y,,scale=FALSE)
b<-p<-a<-u<-NULL
for(i in 1:k){
wt<-t(x)%*%y/as.numeric(t(y)%*%x%*%t(x)%*%y)
tt<-x%*%wt
pt<- t(x)%*%tt/as.numeric(t(tt)%*%tt)
npt<-as.numeric(sqrt(t(pt)%*%pt))
wn<-npt*wt
tn<-npt*tt
pn<-pt/npt
bn<-as.numeric(t(tn)%*%y)/as.numeric(t(tn)%*%tn)
x<-x-tn%*%t(pn)
y<-y-bn*tn
b<-c(b,bn)
p<-cbind(p,pn)
a<-cbind(a,wn)
u<-cbind(u,tn)
}
dim(x)[2]->ab
return(structure(list(b=b,p=p,a=a,u=u,d=ab,q=k),class="upls"))
}
vip<-function(obj,y,nm=NULL){
if(class(obj)!="upls"){
stop("Object is not of type upls")
}
a1<-as.vector(obj$a^2%*%cor(obj$u,y)^2 )
if(is.null(nm)){
a2<-1:length(a1)
names(a1)<-paste(a2)
}else{
names(a1)<-nm
}
return(a1)##sort(a1,decreasing=TRUE))
}
####
## Read in Data
###
###
## Fit learner and make plots for each view
###
##bio
set.seed(020) ##so RF gets same result as in paper
nu=1:9/10
lnu=length(nu)
L1=50
teacc=matrix(0,nrow=L1*lnu,ncol=6)
tesen=matrix(0,nrow=L1*lnu,ncol=6)
tespec=matrix(0,nrow=L1*lnu,ncol=6)
k3<-1
t1<-proc.time()
for(j in 1:lnu){
for(i in 1:L1){
L=sample(1:n,ceiling(nu[j]*n))
U=setdiff(1:n,L)
teacc[k3,1]<-nu[j]
tesen[k3,1]<-nu[j]
tespec[k3,1]<-nu[j]
ytr<-y
ytr[U]=NA
yval<-factor(y)
crf<-cftf(x,z,ytr,k=5,L,U,learn="RF",type=1)
v12<-factor(crf$model$yU,levels=c(0,1))
tab=table(v12,yval[U])
v1<-diag(tab)/ apply(tab,2,sum)
teacc[k3,2]<-sum(diag(tab))/sum(tab)
tesen[k3,2]<-v1[2]
tespec[k3,2]<-v1[1]
crf<-cftf(x,z,ytr,k=5,L,U,learn="RF",type=0)
v12<-factor(crf$model$yU,levels=c(0,1))
tab=table(v12,yval[U])
v1<-diag(tab)/ apply(tab,2,sum)
teacc[k3,3]<-sum(diag(tab))/sum(tab)
tesen[k3,3]<-v1[2]
tespec[k3,3]<-v1[1]
crf<-cftf(x,z,ytr,k=5,L,U,learn="RF",type=NA)
v12<-factor(crf$model$yU,levels=c(0,1))
tab=table(v12,yval[U])
v1<-diag(tab)/ apply(tab,2,sum)
teacc[k3,4]<-sum(diag(tab))/sum(tab)
tesen[k3,4]<-v1[2]
tespec[k3,4]<-v1[1]
gpls1<-plsr(y~.,data=data.frame(y,x)[L,])
gpls2<-plsr(y~.,data=data.frame(y,z)[L,])
dat1=cbind(gpls1$scores[,1:2],gpls2$scores[,1:2])
g1<-gam(y~.,data=data.frame(y=y[L],x=dat1))
dat2<-cbind(predict(gpls1,type="scores",newdata=x[U,])[,1:2],
predict(gpls2,type="scores",newdata=z[U,])[,1:2])
tab=table(predict(g1,newdata=data.frame(x=dat2))>0.5,y[U])
v1<-diag(tab)/ apply(tab,2,sum)
teacc[k3,5]<-sum(diag(tab))/sum(tab)
tesen[k3,5]<-v1[2]
tespec[k3,5]<-v1[1]
k3<-k3+1
}
cat("nu=",nu[j]," k3=",k3," time=",(proc.time()-t1)/60,"\n")
write.table(teacc,"te-acc_fused.txt",quote=FALSE,row.names=FALSE,col.names=FALSE)
write.table(tespec,"te-spec_fused.txt",quote=FALSE,row.names=FALSE,col.names=FALSE)
write.table(tesen,"te-sen_fused.txt",quote=FALSE,row.names=FALSE,col.names=FALSE)
}
q(save="no")
|
/code/jochem_results/perf_fuse.R
|
no_license
|
mmorehea/NeuroMiner
|
R
| false
| false
| 3,768
|
r
|
############################################################################
## Purpose: Variable Importance
############################################################################
library(cftf,lib.loc=".")
library(randomForest)
library(rpart)
library(pls)
library(gam)
####
## Function Definition
####
na.mean<-function(x,...){
handle.na<-function(x){ #plug the mean into missing values
x[is.na(x)]<-mean(x,na.rm=TRUE)
return(x)
}
p<-dim(x)
if(is.null(p)){ #special case: `x' is a vector
return(handle.na(x))
}
for(i in 1:p[2]){
if(!is.factor(x[,i]))# if `x' is a factor ignore it
x[,i]<-handle.na(x[,i])
}
return(x)
}
nipal<-function(x,y,k){
x<-scale(x,,scale=FALSE)
y<-scale(y,,scale=FALSE)
b<-p<-a<-u<-NULL
for(i in 1:k){
wt<-t(x)%*%y/as.numeric(t(y)%*%x%*%t(x)%*%y)
tt<-x%*%wt
pt<- t(x)%*%tt/as.numeric(t(tt)%*%tt)
npt<-as.numeric(sqrt(t(pt)%*%pt))
wn<-npt*wt
tn<-npt*tt
pn<-pt/npt
bn<-as.numeric(t(tn)%*%y)/as.numeric(t(tn)%*%tn)
x<-x-tn%*%t(pn)
y<-y-bn*tn
b<-c(b,bn)
p<-cbind(p,pn)
a<-cbind(a,wn)
u<-cbind(u,tn)
}
dim(x)[2]->ab
return(structure(list(b=b,p=p,a=a,u=u,d=ab,q=k),class="upls"))
}
vip<-function(obj,y,nm=NULL){
if(class(obj)!="upls"){
stop("Object is not of type upls")
}
a1<-as.vector(obj$a^2%*%cor(obj$u,y)^2 )
if(is.null(nm)){
a2<-1:length(a1)
names(a1)<-paste(a2)
}else{
names(a1)<-nm
}
return(a1)##sort(a1,decreasing=TRUE))
}
####
## Read in Data
###
###
## Fit learner and make plots for each view
###
##bio
set.seed(020) ##so RF gets same result as in paper
nu=1:9/10
lnu=length(nu)
L1=50
teacc=matrix(0,nrow=L1*lnu,ncol=6)
tesen=matrix(0,nrow=L1*lnu,ncol=6)
tespec=matrix(0,nrow=L1*lnu,ncol=6)
k3<-1
t1<-proc.time()
for(j in 1:lnu){
for(i in 1:L1){
L=sample(1:n,ceiling(nu[j]*n))
U=setdiff(1:n,L)
teacc[k3,1]<-nu[j]
tesen[k3,1]<-nu[j]
tespec[k3,1]<-nu[j]
ytr<-y
ytr[U]=NA
yval<-factor(y)
crf<-cftf(x,z,ytr,k=5,L,U,learn="RF",type=1)
v12<-factor(crf$model$yU,levels=c(0,1))
tab=table(v12,yval[U])
v1<-diag(tab)/ apply(tab,2,sum)
teacc[k3,2]<-sum(diag(tab))/sum(tab)
tesen[k3,2]<-v1[2]
tespec[k3,2]<-v1[1]
crf<-cftf(x,z,ytr,k=5,L,U,learn="RF",type=0)
v12<-factor(crf$model$yU,levels=c(0,1))
tab=table(v12,yval[U])
v1<-diag(tab)/ apply(tab,2,sum)
teacc[k3,3]<-sum(diag(tab))/sum(tab)
tesen[k3,3]<-v1[2]
tespec[k3,3]<-v1[1]
crf<-cftf(x,z,ytr,k=5,L,U,learn="RF",type=NA)
v12<-factor(crf$model$yU,levels=c(0,1))
tab=table(v12,yval[U])
v1<-diag(tab)/ apply(tab,2,sum)
teacc[k3,4]<-sum(diag(tab))/sum(tab)
tesen[k3,4]<-v1[2]
tespec[k3,4]<-v1[1]
gpls1<-plsr(y~.,data=data.frame(y,x)[L,])
gpls2<-plsr(y~.,data=data.frame(y,z)[L,])
dat1=cbind(gpls1$scores[,1:2],gpls2$scores[,1:2])
g1<-gam(y~.,data=data.frame(y=y[L],x=dat1))
dat2<-cbind(predict(gpls1,type="scores",newdata=x[U,])[,1:2],
predict(gpls2,type="scores",newdata=z[U,])[,1:2])
tab=table(predict(g1,newdata=data.frame(x=dat2))>0.5,y[U])
v1<-diag(tab)/ apply(tab,2,sum)
teacc[k3,5]<-sum(diag(tab))/sum(tab)
tesen[k3,5]<-v1[2]
tespec[k3,5]<-v1[1]
k3<-k3+1
}
cat("nu=",nu[j]," k3=",k3," time=",(proc.time()-t1)/60,"\n")
write.table(teacc,"te-acc_fused.txt",quote=FALSE,row.names=FALSE,col.names=FALSE)
write.table(tespec,"te-spec_fused.txt",quote=FALSE,row.names=FALSE,col.names=FALSE)
write.table(tesen,"te-sen_fused.txt",quote=FALSE,row.names=FALSE,col.names=FALSE)
}
q(save="no")
|
library(amt)
### Name: hr_area
### Title: Home ranges
### Aliases: hr_area hr hr_isopleths hr_kde hr_kde.track_xy hr_kde_ref
### hr_kde_ref.track_xy hr_locoh_k hr_locoh_k.track_xy hr_mcp
### hr_mcp.track_xy
### ** Examples
data(deer)
# MCP ---------------------------------------------------------------------
mcp1 <- hr_mcp(deer)
hr_area(mcp1)
# calculated MCP at different levels
mcp1 <- hr_mcp(deer, levels = seq(0.3, 1, 0.1))
hr_area(mcp1)
# CRS are inherited
get_crs(deer)
mcps <- hr_mcp(deer, levels = c(0.5, 0.95, 1))
has_crs(mcps)
# Local Convex Hull (LoCoH) -----------------------------------------------
locoh1 <- hr_locoh_k(deer)
hr_area(locoh1)
# calculated MCP at different levels
locoh <- hr_locoh_k(deer, levels = seq(0.3, 1, 0.1))
hr_area(locoh)
# CRS are inherited
get_crs(deer)
get_crs(locoh1)
# Kernel density estimaiton (KDE) -----------------------------------------
kde1 <- hr_kde(deer)
hr_area(kde1)
get_crs(kde1)
|
/data/genthat_extracted_code/amt/examples/hr.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 957
|
r
|
library(amt)
### Name: hr_area
### Title: Home ranges
### Aliases: hr_area hr hr_isopleths hr_kde hr_kde.track_xy hr_kde_ref
### hr_kde_ref.track_xy hr_locoh_k hr_locoh_k.track_xy hr_mcp
### hr_mcp.track_xy
### ** Examples
data(deer)
# MCP ---------------------------------------------------------------------
mcp1 <- hr_mcp(deer)
hr_area(mcp1)
# calculated MCP at different levels
mcp1 <- hr_mcp(deer, levels = seq(0.3, 1, 0.1))
hr_area(mcp1)
# CRS are inherited
get_crs(deer)
mcps <- hr_mcp(deer, levels = c(0.5, 0.95, 1))
has_crs(mcps)
# Local Convex Hull (LoCoH) -----------------------------------------------
locoh1 <- hr_locoh_k(deer)
hr_area(locoh1)
# calculated MCP at different levels
locoh <- hr_locoh_k(deer, levels = seq(0.3, 1, 0.1))
hr_area(locoh)
# CRS are inherited
get_crs(deer)
get_crs(locoh1)
# Kernel density estimaiton (KDE) -----------------------------------------
kde1 <- hr_kde(deer)
hr_area(kde1)
get_crs(kde1)
|
/5_Data_Browser/English.lproj/PrefsDialog.r
|
no_license
|
fruitsamples/CarbonPorting
|
R
| false
| false
| 10,159
|
r
| ||
demo_data # combiroc built-in demo data (proteomics data from Zingaretti et al. 2012 - PMC3518104)
combs <- combi(data= demo_data, signalthr=450, combithr=1) # compute combinations
combs_SE_SP <- se_sp(data=demo_data, combinations_table=combs) # compute SE and SP
# of each combination
# To rank combinations by Youden index and filter-out the ones with SE < min_SE and SP < min_SP
rc <- ranked_combs(data= demo_data, combo_table= combs_SE_SP,
case_class='A', min_SE=40, min_SP=80)
rc$table # to visualize the selected gold combinations through a data.frame
rc$bubble_chart # to visualize the selected gold combinations through a data.frame
|
/R/examples/ranked_combs_example.R
|
permissive
|
minghao2016/combiroc
|
R
| false
| false
| 734
|
r
|
demo_data # combiroc built-in demo data (proteomics data from Zingaretti et al. 2012 - PMC3518104)
combs <- combi(data= demo_data, signalthr=450, combithr=1) # compute combinations
combs_SE_SP <- se_sp(data=demo_data, combinations_table=combs) # compute SE and SP
# of each combination
# To rank combinations by Youden index and filter-out the ones with SE < min_SE and SP < min_SP
rc <- ranked_combs(data= demo_data, combo_table= combs_SE_SP,
case_class='A', min_SE=40, min_SP=80)
rc$table # to visualize the selected gold combinations through a data.frame
rc$bubble_chart # to visualize the selected gold combinations through a data.frame
|
769968480da7f26299574d9c24fcc606 dungeon_i10-m5-u10-v0.pddl_planlen=157.qdimacs 32519 87850
|
/code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Database/Kronegger-Pfandler-Pichler/dungeon/dungeon_i10-m5-u10-v0.pddl_planlen=157/dungeon_i10-m5-u10-v0.pddl_planlen=157.R
|
no_license
|
arey0pushpa/dcnf-autarky
|
R
| false
| false
| 91
|
r
|
769968480da7f26299574d9c24fcc606 dungeon_i10-m5-u10-v0.pddl_planlen=157.qdimacs 32519 87850
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{json2tidy}
\alias{json2tidy}
\title{json2tidy}
\usage{
json2tidy(url)
}
\arguments{
\item{url}{The yahoo finance URL to tidy.}
}
\description{
json2tidy
}
|
/man/json2tidy.Rd
|
no_license
|
Stanleymu/yfinance
|
R
| false
| true
| 247
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{json2tidy}
\alias{json2tidy}
\title{json2tidy}
\usage{
json2tidy(url)
}
\arguments{
\item{url}{The yahoo finance URL to tidy.}
}
\description{
json2tidy
}
|
#!/usr/bin/env Rscript
#
# This file is part of the `OmnipathR` R package
#
# Copyright
# 2018-2021
# Saez Lab, Uniklinik RWTH Aachen, Heidelberg University
#
# File author(s): Alberto Valdeolivas
# Dénes Türei (turei.denes@gmail.com)
# Attila Gábor
#
# Distributed under the MIT (Expat) License.
# See accompanying file `LICENSE` or find a copy at
# https://directory.fsf.org/wiki/License:Expat
#
# Website: https://saezlab.github.io/omnipathr
# Git repo: https://github.com/saezlab/OmnipathR
#
.onLoad <- function(libname, pkgname){
omnipath_init_config()
patch_logger()
omnipath_init_log(pkgname = pkgname)
if(Sys.info()['user'] == 'biocbuild'){
omnipath_set_console_loglevel('trace')
}
omnipath_init_cache()
omnipath_init_db(pkgname)
.load_magic_bytes(pkgname)
.load_urls(pkgname)
.load_id_types(pkgname)
logger::log_info('Welcome to OmnipathR!')
}
|
/R/zzz.R
|
permissive
|
kerwin12580/OmnipathR
|
R
| false
| false
| 965
|
r
|
#!/usr/bin/env Rscript
#
# This file is part of the `OmnipathR` R package
#
# Copyright
# 2018-2021
# Saez Lab, Uniklinik RWTH Aachen, Heidelberg University
#
# File author(s): Alberto Valdeolivas
# Dénes Türei (turei.denes@gmail.com)
# Attila Gábor
#
# Distributed under the MIT (Expat) License.
# See accompanying file `LICENSE` or find a copy at
# https://directory.fsf.org/wiki/License:Expat
#
# Website: https://saezlab.github.io/omnipathr
# Git repo: https://github.com/saezlab/OmnipathR
#
.onLoad <- function(libname, pkgname){
omnipath_init_config()
patch_logger()
omnipath_init_log(pkgname = pkgname)
if(Sys.info()['user'] == 'biocbuild'){
omnipath_set_console_loglevel('trace')
}
omnipath_init_cache()
omnipath_init_db(pkgname)
.load_magic_bytes(pkgname)
.load_urls(pkgname)
.load_id_types(pkgname)
logger::log_info('Welcome to OmnipathR!')
}
|
#install.packages("twitteR")
#install.packages("plyr")
#install.packages("stringr")
#install.packages("ggvis")
#install.packages("ggplot2")
#install.packages("memoise")
#install.packages("gridExtra")
# Function to check if package exists already
checkForPackage <- function(pack){
is.element(pack, installed.packages()[,1])
}
if(!checkForPackage("twitteR")){
install.packages("twitteR")
}
if(!checkForPackage("plyr")){
install.packages("plyr")
}
if(!checkForPackage("stringr")){
install.packages("stringr")
}
if(!checkForPackage("ggvis")){
install.packages("ggvis")
}
if(!checkForPackage("ggplot2")){
install.packages("ggplot2")
}
if(!checkForPackage("memoise")){
install.packages("memoise")
}
if(!checkForPackage("gridExtra")){
install.packages("gridExtra")
}
library(twitteR)
library(plyr)
library(stringr)
library(ggvis)
library(ggplot2)
library(memoise)
library(gridExtra)
options(shiny.trace=TRUE)
n_tweets <- 180
n_summary <- 10
consumerKey <- "PPPleVlMJJ92SJyoSscjabRdy"
consumerSecret <- "hI6OzAUGOpYePHaCIkuq6dOfMNJ2BTL51EixB687KmeH3hQ8X8"
acessToken <- "97679565-0nRLpRoI5SnHwjob6Czya5xfci9BBZynyUqpeyTTd"
accessTokenSecret <- "cviNSxQagHsE44S8lT5jecUos11aNPfRhivNFx6AQaBSo"
setup_twitter_oauth(consumerKey, consumerSecret, acessToken, accessTokenSecret)
shinyServer(function(input, output, session) {
# Define a reactive expression for the document term matrix
tryTolower = function(x){
# create missing value
# this is where the returned value will be
y = NA
# tryCatch error
try_error = tryCatch(tolower(x), error = function(e) e)
# if not an error
if (!inherits(try_error, "error"))
y = tolower(x)
return(y)
}
score.sentiment <- function(sentences, pos.words, neg.words, .progress='none')
{
scores = laply(sentences, function(sentence, pos.words, neg.words) {
# clean up sentences with R's regex-driven global substitute, gsub():
sentence = gsub('[[:punct:]]', '', sentence)
sentence = gsub('[[:cntrl:]]', '', sentence)
sentence = gsub('\\d+', '', sentence)
# and convert to lower case:
#sentence = tolower(sentence)
# split into words. str_split is in the stringr package
word.list = str_split(sentence, '\\s+')
# sometimes a list() is one level of hierarchy too much
words = unlist(word.list)
# compare our words to the dictionaries of positive & negative terms
pos.matches = match(words, pos.words)
neg.matches = match(words, neg.words)
# match() returns the position of the matched term or NA
# we just want a TRUE/FALSE:
pos.matches = !is.na(pos.matches)
neg.matches = !is.na(neg.matches)
# and conveniently enough, TRUE/FALSE will be treated as 1/0 by sum():
score = sum(pos.matches) - sum(neg.matches)
return(score)
}, pos.words, neg.words, .progress=.progress )
scores.df = data.frame(score=scores, text=sentences)
return(scores.df)
}
cleanFun <- function(htmlString) {
return(gsub("<.*?>", "", htmlString))
}
get_source <- function(x){
X <- cleanFun(x[["statusSource"]])
X
}
tweets_df <- reactive({
# Change when the "update" button is pressed...
input$plot_feel
# ...but not for anything else
isolate({
withProgress({
setProgress(message = "Please wait for a moment......")
tweets <- searchTwitter(input$source1,n=n_tweets)
tweets <- strip_retweets(tweets, strip_manual=TRUE, strip_mt=TRUE)
df <- twListToDF(tweets)
df$Search <- input$source1
if( (input$show_source2 == TRUE) && (input$source2 != ""))
{
tweets2 <- searchTwitter(input$source2, n=n_tweets)
tweets2 <- strip_retweets(tweets2, strip_manual=TRUE, strip_mt=TRUE)
df2 <- twListToDF(tweets2)
df2$Search <- input$source2
df <- rbind(df, df2)
tweets <- c(tweets, tweets2)
}
df$Date <- format(df$created,'%m/%d/%Y %H:%I:%S')
df$Source <- apply(df, 1, get_source)
sentences <- sapply(df$text, function(x) tryTolower(x))
scores <- score.sentiment(sentences, pos.words, neg.words)
df <- cbind(df, scores)
df <- df[, c("id", "text", "Source", "Date", "Search", "created", "score")]
names(df) <- c("id", "Post", "Source", "Date", "Search", "created", "score")
df
})
})
})
tweets_df_old <- reactive({
# Change when the "update" button is pressed...
input$slider
input$plot_feel
# ...but not for anything else
isolate({
withProgress({
setProgress(message = "Please wait for a moment......")
tweetdays <- input$slider
tweets <- searchTwitter(input$source1,n=n_tweets,since =as.character(Sys.Date()-tweetdays), until= as.character(Sys.Date()-tweetdays+1))
tweets <- strip_retweets(tweets, strip_manual=TRUE, strip_mt=TRUE)
df <- twListToDF(tweets)
df$Search <- input$source1
if( (input$show_source2 == TRUE) && (input$source2 != ""))
{
tweets2 <- searchTwitter(input$source2, n=n_tweets,since =as.character(Sys.Date()-tweetdays), until= as.character(Sys.Date()-tweetdays+1))
tweets2 <- strip_retweets(tweets2, strip_manual=TRUE, strip_mt=TRUE)
df2 <- twListToDF(tweets2)
df2$Search <- input$source2
df <- rbind(df, df2)
tweets <- c(tweets, tweets2)
}
df$Date <- format(df$created,'%m/%d/%Y %H:%I:%S')
df$Source <- apply(df, 1, get_source)
sentences <- sapply(df$text, function(x) tryTolower(x))
scores <- score.sentiment(sentences, pos.words, neg.words)
df <- cbind(df, scores)
df <- df[, c("id", "text", "Source", "Date", "Search", "created", "score")]
names(df) <- c("id", "Post", "Source", "Date", "Search", "created", "score")
df
})
})
})
output$plot <- renderPlot({
df <- tweets_df()
sources <- df$Source
sources <- sapply(sources, function(x) ifelse(length(x) > 1, x[2], x[1]))
source_table <- table(sources)
s_t <- source_table[source_table > 10]
pie(s_t, col = rainbow(length(s_t)))
})
output$plot_old <- renderPlot({
df <- tweets_df_old()
sources <- df$Source
sources <- sapply(sources, function(x) ifelse(length(x) > 1, x[2], x[1]))
source_table <- table(sources)
s_t <- source_table[source_table > 10]
pie(s_t, col = rainbow(length(s_t)))
})
output$trends <- reactive({
df <- tweets_df()
source1 <- df[df$Search==input$source1,]
p1 <- ggplot(source1, aes(x=created, y=score)) + geom_point(shape=1, size=0)+geom_smooth(se=F)+labs(title=input$source1, x = "Date /Time", y = "Popularity") + ylim(-5, 5)
if( (input$show_source2 == TRUE) && (input$source2 != ""))
{
source2 <- df[df$Search==input$source2,]
p2 <- ggplot(source2, aes(x=created, y=score)) + geom_point(shape=1, size=0)+geom_smooth(se=F)+labs(title=input$source2, x = "Date /Time", y = "Popularity") + ylim(-5, 5)
grid.arrange(p1, p2, nrow=1, ncol=2)
}
else
print(p1)
})
output$trends_old <- reactive({
df <- tweets_df_old()
source1 <- df[df$Search==input$source1,]
p1 <- ggplot(source1, aes(x=created, y=score)) + geom_point(shape=1, size=0)+geom_smooth(se=F)+labs(title=input$source1, x = "Date /Time", y = "Popularity") + ylim(-5, 5)
if( (input$show_source2 == TRUE) && (input$source2 != ""))
{
source2 <- df[df$Search==input$source2,]
p2 <- ggplot(source2, aes(x=created, y=score)) + geom_point(shape=1, size=0)+geom_smooth(se=F)+labs(title=input$source2, x = "Date /Time", y = "Popularity") + ylim(-5, 5)
grid.arrange(p1, p2, nrow=1, ncol=2)
}
else
print(p1)
})
output$tweet_view <- renderPrint({
if( (input$show_source2 == TRUE) && (input$source2 != ""))
cat(paste(input$source1, " vs. ", input$source2))
else
cat(input$source1)
})
output$printer <- renderPrint({
df <- tweets_df()
df <- df[df$Search==input$source1,]
df$Post[1]
})
output$printer_old <- renderPrint({
df <- tweets_df_old()
df <- df[df$Search==input$source1,]
df$Post[1]
})
output$viewtable <- renderTable({
df <- tweets_df()
df <- df[df$Search==input$source1,]
head(df, n = 5, addrownums=F)
})
output$vs_viewtable <- renderTable({
df <- tweets_df_old()
df <- df[df$Search==input$source1,]
head(df, n = 5, addrownums=F)
})
# Function for generating tooltip text
movie_tooltip <- function(x) {
if (is.null(x)) return(NULL)
if (is.null(x$id)) return(NULL)
all_tweets <- isolate(tweets_df())
tweet <- all_tweets[all_tweets$id == x$id, ]
paste0("<b>", tweet$Post, "</b><br><em><small>from ", tweet$Source, " (", tweet$Date, ")</small></em>")
}
movie_tooltip_old <- function(x) {
if (is.null(x)) return(NULL)
if (is.null(x$id)) return(NULL)
all_tweets <- isolate(tweets_df_old())
tweet <- all_tweets[all_tweets$id == x$id, ]
paste0("<b>", tweet$Post, "</b><br><em><small>from ", tweet$Source, " (", tweet$Date, ")</small></em>")
}
vis <- reactive({
legend_val <- c(input$source1)
if( (input$show_source2 == TRUE) && (input$source2 != ""))
legend_val <- c(input$source1, input$source2)
df <- tweets_df()
df %>% ggvis(~created, ~score) %>% layer_points(fill = ~Search, key := ~id) %>% layer_lines(stroke=~Search) %>% add_legend(c("fill", "stroke"), orient="left") %>% add_axis("x", title = "Date Time") %>% add_axis("y", title = "Popularity") %>% set_options(width = 800, height = 300) %>% add_tooltip(movie_tooltip, "click")
})
vis3 <- reactive({
df <- tweets_df_old()
df %>% ggvis(~created, ~score) %>% layer_points(fill = ~Search, key := ~id) %>% layer_lines(stroke=~Search) %>% add_legend(c("fill", "stroke"), orient="left") %>% add_axis("x", title = "Time") %>% add_axis("y", title = "Popularity") %>% set_options(width = 750, height = 300) %>% add_tooltip(movie_tooltip_old, "click")
})
vis %>% bind_shiny("plot1")
vis3 %>% bind_shiny("plot2")
#vis6 %>% bind_shiny("plot3")
})
|
/server.R
|
no_license
|
karthikchaganti/EDA-on-2016-US-Elections-Tweets
|
R
| false
| false
| 10,754
|
r
|
#install.packages("twitteR")
#install.packages("plyr")
#install.packages("stringr")
#install.packages("ggvis")
#install.packages("ggplot2")
#install.packages("memoise")
#install.packages("gridExtra")
# Function to check if package exists already
checkForPackage <- function(pack){
is.element(pack, installed.packages()[,1])
}
if(!checkForPackage("twitteR")){
install.packages("twitteR")
}
if(!checkForPackage("plyr")){
install.packages("plyr")
}
if(!checkForPackage("stringr")){
install.packages("stringr")
}
if(!checkForPackage("ggvis")){
install.packages("ggvis")
}
if(!checkForPackage("ggplot2")){
install.packages("ggplot2")
}
if(!checkForPackage("memoise")){
install.packages("memoise")
}
if(!checkForPackage("gridExtra")){
install.packages("gridExtra")
}
library(twitteR)
library(plyr)
library(stringr)
library(ggvis)
library(ggplot2)
library(memoise)
library(gridExtra)
options(shiny.trace=TRUE)
n_tweets <- 180
n_summary <- 10
consumerKey <- "PPPleVlMJJ92SJyoSscjabRdy"
consumerSecret <- "hI6OzAUGOpYePHaCIkuq6dOfMNJ2BTL51EixB687KmeH3hQ8X8"
acessToken <- "97679565-0nRLpRoI5SnHwjob6Czya5xfci9BBZynyUqpeyTTd"
accessTokenSecret <- "cviNSxQagHsE44S8lT5jecUos11aNPfRhivNFx6AQaBSo"
setup_twitter_oauth(consumerKey, consumerSecret, acessToken, accessTokenSecret)
shinyServer(function(input, output, session) {
# Define a reactive expression for the document term matrix
tryTolower = function(x){
# create missing value
# this is where the returned value will be
y = NA
# tryCatch error
try_error = tryCatch(tolower(x), error = function(e) e)
# if not an error
if (!inherits(try_error, "error"))
y = tolower(x)
return(y)
}
score.sentiment <- function(sentences, pos.words, neg.words, .progress='none')
{
scores = laply(sentences, function(sentence, pos.words, neg.words) {
# clean up sentences with R's regex-driven global substitute, gsub():
sentence = gsub('[[:punct:]]', '', sentence)
sentence = gsub('[[:cntrl:]]', '', sentence)
sentence = gsub('\\d+', '', sentence)
# and convert to lower case:
#sentence = tolower(sentence)
# split into words. str_split is in the stringr package
word.list = str_split(sentence, '\\s+')
# sometimes a list() is one level of hierarchy too much
words = unlist(word.list)
# compare our words to the dictionaries of positive & negative terms
pos.matches = match(words, pos.words)
neg.matches = match(words, neg.words)
# match() returns the position of the matched term or NA
# we just want a TRUE/FALSE:
pos.matches = !is.na(pos.matches)
neg.matches = !is.na(neg.matches)
# and conveniently enough, TRUE/FALSE will be treated as 1/0 by sum():
score = sum(pos.matches) - sum(neg.matches)
return(score)
}, pos.words, neg.words, .progress=.progress )
scores.df = data.frame(score=scores, text=sentences)
return(scores.df)
}
cleanFun <- function(htmlString) {
return(gsub("<.*?>", "", htmlString))
}
get_source <- function(x){
X <- cleanFun(x[["statusSource"]])
X
}
tweets_df <- reactive({
# Change when the "update" button is pressed...
input$plot_feel
# ...but not for anything else
isolate({
withProgress({
setProgress(message = "Please wait for a moment......")
tweets <- searchTwitter(input$source1,n=n_tweets)
tweets <- strip_retweets(tweets, strip_manual=TRUE, strip_mt=TRUE)
df <- twListToDF(tweets)
df$Search <- input$source1
if( (input$show_source2 == TRUE) && (input$source2 != ""))
{
tweets2 <- searchTwitter(input$source2, n=n_tweets)
tweets2 <- strip_retweets(tweets2, strip_manual=TRUE, strip_mt=TRUE)
df2 <- twListToDF(tweets2)
df2$Search <- input$source2
df <- rbind(df, df2)
tweets <- c(tweets, tweets2)
}
df$Date <- format(df$created,'%m/%d/%Y %H:%I:%S')
df$Source <- apply(df, 1, get_source)
sentences <- sapply(df$text, function(x) tryTolower(x))
scores <- score.sentiment(sentences, pos.words, neg.words)
df <- cbind(df, scores)
df <- df[, c("id", "text", "Source", "Date", "Search", "created", "score")]
names(df) <- c("id", "Post", "Source", "Date", "Search", "created", "score")
df
})
})
})
tweets_df_old <- reactive({
# Change when the "update" button is pressed...
input$slider
input$plot_feel
# ...but not for anything else
isolate({
withProgress({
setProgress(message = "Please wait for a moment......")
tweetdays <- input$slider
tweets <- searchTwitter(input$source1,n=n_tweets,since =as.character(Sys.Date()-tweetdays), until= as.character(Sys.Date()-tweetdays+1))
tweets <- strip_retweets(tweets, strip_manual=TRUE, strip_mt=TRUE)
df <- twListToDF(tweets)
df$Search <- input$source1
if( (input$show_source2 == TRUE) && (input$source2 != ""))
{
tweets2 <- searchTwitter(input$source2, n=n_tweets,since =as.character(Sys.Date()-tweetdays), until= as.character(Sys.Date()-tweetdays+1))
tweets2 <- strip_retweets(tweets2, strip_manual=TRUE, strip_mt=TRUE)
df2 <- twListToDF(tweets2)
df2$Search <- input$source2
df <- rbind(df, df2)
tweets <- c(tweets, tweets2)
}
df$Date <- format(df$created,'%m/%d/%Y %H:%I:%S')
df$Source <- apply(df, 1, get_source)
sentences <- sapply(df$text, function(x) tryTolower(x))
scores <- score.sentiment(sentences, pos.words, neg.words)
df <- cbind(df, scores)
df <- df[, c("id", "text", "Source", "Date", "Search", "created", "score")]
names(df) <- c("id", "Post", "Source", "Date", "Search", "created", "score")
df
})
})
})
output$plot <- renderPlot({
df <- tweets_df()
sources <- df$Source
sources <- sapply(sources, function(x) ifelse(length(x) > 1, x[2], x[1]))
source_table <- table(sources)
s_t <- source_table[source_table > 10]
pie(s_t, col = rainbow(length(s_t)))
})
output$plot_old <- renderPlot({
df <- tweets_df_old()
sources <- df$Source
sources <- sapply(sources, function(x) ifelse(length(x) > 1, x[2], x[1]))
source_table <- table(sources)
s_t <- source_table[source_table > 10]
pie(s_t, col = rainbow(length(s_t)))
})
output$trends <- reactive({
df <- tweets_df()
source1 <- df[df$Search==input$source1,]
p1 <- ggplot(source1, aes(x=created, y=score)) + geom_point(shape=1, size=0)+geom_smooth(se=F)+labs(title=input$source1, x = "Date /Time", y = "Popularity") + ylim(-5, 5)
if( (input$show_source2 == TRUE) && (input$source2 != ""))
{
source2 <- df[df$Search==input$source2,]
p2 <- ggplot(source2, aes(x=created, y=score)) + geom_point(shape=1, size=0)+geom_smooth(se=F)+labs(title=input$source2, x = "Date /Time", y = "Popularity") + ylim(-5, 5)
grid.arrange(p1, p2, nrow=1, ncol=2)
}
else
print(p1)
})
output$trends_old <- reactive({
df <- tweets_df_old()
source1 <- df[df$Search==input$source1,]
p1 <- ggplot(source1, aes(x=created, y=score)) + geom_point(shape=1, size=0)+geom_smooth(se=F)+labs(title=input$source1, x = "Date /Time", y = "Popularity") + ylim(-5, 5)
if( (input$show_source2 == TRUE) && (input$source2 != ""))
{
source2 <- df[df$Search==input$source2,]
p2 <- ggplot(source2, aes(x=created, y=score)) + geom_point(shape=1, size=0)+geom_smooth(se=F)+labs(title=input$source2, x = "Date /Time", y = "Popularity") + ylim(-5, 5)
grid.arrange(p1, p2, nrow=1, ncol=2)
}
else
print(p1)
})
output$tweet_view <- renderPrint({
if( (input$show_source2 == TRUE) && (input$source2 != ""))
cat(paste(input$source1, " vs. ", input$source2))
else
cat(input$source1)
})
output$printer <- renderPrint({
df <- tweets_df()
df <- df[df$Search==input$source1,]
df$Post[1]
})
output$printer_old <- renderPrint({
df <- tweets_df_old()
df <- df[df$Search==input$source1,]
df$Post[1]
})
output$viewtable <- renderTable({
df <- tweets_df()
df <- df[df$Search==input$source1,]
head(df, n = 5, addrownums=F)
})
output$vs_viewtable <- renderTable({
df <- tweets_df_old()
df <- df[df$Search==input$source1,]
head(df, n = 5, addrownums=F)
})
# Function for generating tooltip text
movie_tooltip <- function(x) {
if (is.null(x)) return(NULL)
if (is.null(x$id)) return(NULL)
all_tweets <- isolate(tweets_df())
tweet <- all_tweets[all_tweets$id == x$id, ]
paste0("<b>", tweet$Post, "</b><br><em><small>from ", tweet$Source, " (", tweet$Date, ")</small></em>")
}
movie_tooltip_old <- function(x) {
if (is.null(x)) return(NULL)
if (is.null(x$id)) return(NULL)
all_tweets <- isolate(tweets_df_old())
tweet <- all_tweets[all_tweets$id == x$id, ]
paste0("<b>", tweet$Post, "</b><br><em><small>from ", tweet$Source, " (", tweet$Date, ")</small></em>")
}
vis <- reactive({
legend_val <- c(input$source1)
if( (input$show_source2 == TRUE) && (input$source2 != ""))
legend_val <- c(input$source1, input$source2)
df <- tweets_df()
df %>% ggvis(~created, ~score) %>% layer_points(fill = ~Search, key := ~id) %>% layer_lines(stroke=~Search) %>% add_legend(c("fill", "stroke"), orient="left") %>% add_axis("x", title = "Date Time") %>% add_axis("y", title = "Popularity") %>% set_options(width = 800, height = 300) %>% add_tooltip(movie_tooltip, "click")
})
vis3 <- reactive({
df <- tweets_df_old()
df %>% ggvis(~created, ~score) %>% layer_points(fill = ~Search, key := ~id) %>% layer_lines(stroke=~Search) %>% add_legend(c("fill", "stroke"), orient="left") %>% add_axis("x", title = "Time") %>% add_axis("y", title = "Popularity") %>% set_options(width = 750, height = 300) %>% add_tooltip(movie_tooltip_old, "click")
})
vis %>% bind_shiny("plot1")
vis3 %>% bind_shiny("plot2")
#vis6 %>% bind_shiny("plot3")
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{match_call_arg}
\alias{match_call_arg}
\title{Matches the arg to anal_prnGSPA}
\usage{
match_call_arg(call_rda = "foo", arg = "scale_log2r")
}
\arguments{
\item{call_rda}{the name of a rda.}
\item{arg}{Argument to be matched.}
}
\description{
Matches the arg to anal_prnGSPA
}
|
/man/match_call_arg.Rd
|
permissive
|
sailfish009/proteoQ
|
R
| false
| true
| 370
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{match_call_arg}
\alias{match_call_arg}
\title{Matches the arg to anal_prnGSPA}
\usage{
match_call_arg(call_rda = "foo", arg = "scale_log2r")
}
\arguments{
\item{call_rda}{the name of a rda.}
\item{arg}{Argument to be matched.}
}
\description{
Matches the arg to anal_prnGSPA
}
|
library(RDStreeboot)
### Name: sample.RDS
### Title: Draw RDS Sample
### Aliases: sample.RDS
### ** Examples
## load data
data(faux.network)
## draw RDS from network
samp <- sample.RDS(faux.network$traits, faux.network$adj.mat, 100, 2, 3, c(0,1/3,1/3,1/3), TRUE)
|
/data/genthat_extracted_code/RDStreeboot/examples/sample.RDS.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 271
|
r
|
library(RDStreeboot)
### Name: sample.RDS
### Title: Draw RDS Sample
### Aliases: sample.RDS
### ** Examples
## load data
data(faux.network)
## draw RDS from network
samp <- sample.RDS(faux.network$traits, faux.network$adj.mat, 100, 2, 3, c(0,1/3,1/3,1/3), TRUE)
|
<?xml version="1.0" encoding="utf-8"?>
<serviceModel xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:xsd="http://www.w3.org/2001/XMLSchema" name="SocketService" generation="1" functional="0" release="0" Id="eabf9def-fdd5-4dd0-ba87-b36318d5dad9" dslVersion="1.2.0.0" xmlns="http://schemas.microsoft.com/dsltools/RDSM">
<groups>
<group name="SocketServiceGroup" generation="1" functional="0" release="0">
<componentports>
<inPort name="WorkerRole1:Endpoint1" protocol="tcp">
<inToChannel>
<lBChannelMoniker name="/SocketService/SocketServiceGroup/LB:WorkerRole1:Endpoint1" />
</inToChannel>
</inPort>
</componentports>
<settings>
<aCS name="WorkerRole1:Microsoft.WindowsAzure.Plugins.Diagnostics.ConnectionString" defaultValue="">
<maps>
<mapMoniker name="/SocketService/SocketServiceGroup/MapWorkerRole1:Microsoft.WindowsAzure.Plugins.Diagnostics.ConnectionString" />
</maps>
</aCS>
<aCS name="WorkerRole1Instances" defaultValue="[1,1,1]">
<maps>
<mapMoniker name="/SocketService/SocketServiceGroup/MapWorkerRole1Instances" />
</maps>
</aCS>
</settings>
<channels>
<lBChannel name="LB:WorkerRole1:Endpoint1">
<toPorts>
<inPortMoniker name="/SocketService/SocketServiceGroup/WorkerRole1/Endpoint1" />
</toPorts>
</lBChannel>
</channels>
<maps>
<map name="MapWorkerRole1:Microsoft.WindowsAzure.Plugins.Diagnostics.ConnectionString" kind="Identity">
<setting>
<aCSMoniker name="/SocketService/SocketServiceGroup/WorkerRole1/Microsoft.WindowsAzure.Plugins.Diagnostics.ConnectionString" />
</setting>
</map>
<map name="MapWorkerRole1Instances" kind="Identity">
<setting>
<sCSPolicyIDMoniker name="/SocketService/SocketServiceGroup/WorkerRole1Instances" />
</setting>
</map>
</maps>
<components>
<groupHascomponents>
<role name="WorkerRole1" generation="1" functional="0" release="0" software="C:\Users\test01\Desktop\Refthis\Refthis!\RefthisSocketServer\SocketService\csx\Debug\roles\WorkerRole1" entryPoint="base\x64\WaHostBootstrapper.exe" parameters="base\x64\WaWorkerHost.exe " memIndex="-1" hostingEnvironment="consoleroleadmin" hostingEnvironmentVersion="2">
<componentports>
<inPort name="Endpoint1" protocol="tcp" portRanges="10100" />
</componentports>
<settings>
<aCS name="Microsoft.WindowsAzure.Plugins.Diagnostics.ConnectionString" defaultValue="" />
<aCS name="__ModelData" defaultValue="<m role="WorkerRole1" xmlns="urn:azure:m:v1"><r name="WorkerRole1"><e name="Endpoint1" /></r></m>" />
</settings>
<resourcereferences>
<resourceReference name="DiagnosticStore" defaultAmount="[4096,4096,4096]" defaultSticky="true" kind="Directory" />
<resourceReference name="EventStore" defaultAmount="[1000,1000,1000]" defaultSticky="false" kind="LogStore" />
</resourcereferences>
</role>
<sCSPolicy>
<sCSPolicyIDMoniker name="/SocketService/SocketServiceGroup/WorkerRole1Instances" />
<sCSPolicyUpdateDomainMoniker name="/SocketService/SocketServiceGroup/WorkerRole1UpgradeDomains" />
<sCSPolicyFaultDomainMoniker name="/SocketService/SocketServiceGroup/WorkerRole1FaultDomains" />
</sCSPolicy>
</groupHascomponents>
</components>
<sCSPolicy>
<sCSPolicyUpdateDomain name="WorkerRole1UpgradeDomains" defaultPolicy="[5,5,5]" />
<sCSPolicyFaultDomain name="WorkerRole1FaultDomains" defaultPolicy="[2,2,2]" />
<sCSPolicyID name="WorkerRole1Instances" defaultPolicy="[1,1,1]" />
</sCSPolicy>
</group>
</groups>
<implements>
<implementation Id="db9ff8ee-45b1-4e02-868e-e9578d4ca268" ref="Microsoft.RedDog.Contract\ServiceContract\SocketServiceContract@ServiceDefinition">
<interfacereferences>
<interfaceReference Id="39d102dc-77fe-45c0-83b0-2e166f8d424b" ref="Microsoft.RedDog.Contract\Interface\WorkerRole1:Endpoint1@ServiceDefinition">
<inPort>
<inPortMoniker name="/SocketService/SocketServiceGroup/WorkerRole1:Endpoint1" />
</inPort>
</interfaceReference>
</interfacereferences>
</implementation>
</implements>
</serviceModel>
|
/SocketService/csx/Debug/ServiceDefinition.rd
|
no_license
|
jsonknightlee/RefthisSocketServer
|
R
| false
| false
| 4,612
|
rd
|
<?xml version="1.0" encoding="utf-8"?>
<serviceModel xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:xsd="http://www.w3.org/2001/XMLSchema" name="SocketService" generation="1" functional="0" release="0" Id="eabf9def-fdd5-4dd0-ba87-b36318d5dad9" dslVersion="1.2.0.0" xmlns="http://schemas.microsoft.com/dsltools/RDSM">
<groups>
<group name="SocketServiceGroup" generation="1" functional="0" release="0">
<componentports>
<inPort name="WorkerRole1:Endpoint1" protocol="tcp">
<inToChannel>
<lBChannelMoniker name="/SocketService/SocketServiceGroup/LB:WorkerRole1:Endpoint1" />
</inToChannel>
</inPort>
</componentports>
<settings>
<aCS name="WorkerRole1:Microsoft.WindowsAzure.Plugins.Diagnostics.ConnectionString" defaultValue="">
<maps>
<mapMoniker name="/SocketService/SocketServiceGroup/MapWorkerRole1:Microsoft.WindowsAzure.Plugins.Diagnostics.ConnectionString" />
</maps>
</aCS>
<aCS name="WorkerRole1Instances" defaultValue="[1,1,1]">
<maps>
<mapMoniker name="/SocketService/SocketServiceGroup/MapWorkerRole1Instances" />
</maps>
</aCS>
</settings>
<channels>
<lBChannel name="LB:WorkerRole1:Endpoint1">
<toPorts>
<inPortMoniker name="/SocketService/SocketServiceGroup/WorkerRole1/Endpoint1" />
</toPorts>
</lBChannel>
</channels>
<maps>
<map name="MapWorkerRole1:Microsoft.WindowsAzure.Plugins.Diagnostics.ConnectionString" kind="Identity">
<setting>
<aCSMoniker name="/SocketService/SocketServiceGroup/WorkerRole1/Microsoft.WindowsAzure.Plugins.Diagnostics.ConnectionString" />
</setting>
</map>
<map name="MapWorkerRole1Instances" kind="Identity">
<setting>
<sCSPolicyIDMoniker name="/SocketService/SocketServiceGroup/WorkerRole1Instances" />
</setting>
</map>
</maps>
<components>
<groupHascomponents>
<role name="WorkerRole1" generation="1" functional="0" release="0" software="C:\Users\test01\Desktop\Refthis\Refthis!\RefthisSocketServer\SocketService\csx\Debug\roles\WorkerRole1" entryPoint="base\x64\WaHostBootstrapper.exe" parameters="base\x64\WaWorkerHost.exe " memIndex="-1" hostingEnvironment="consoleroleadmin" hostingEnvironmentVersion="2">
<componentports>
<inPort name="Endpoint1" protocol="tcp" portRanges="10100" />
</componentports>
<settings>
<aCS name="Microsoft.WindowsAzure.Plugins.Diagnostics.ConnectionString" defaultValue="" />
<aCS name="__ModelData" defaultValue="<m role="WorkerRole1" xmlns="urn:azure:m:v1"><r name="WorkerRole1"><e name="Endpoint1" /></r></m>" />
</settings>
<resourcereferences>
<resourceReference name="DiagnosticStore" defaultAmount="[4096,4096,4096]" defaultSticky="true" kind="Directory" />
<resourceReference name="EventStore" defaultAmount="[1000,1000,1000]" defaultSticky="false" kind="LogStore" />
</resourcereferences>
</role>
<sCSPolicy>
<sCSPolicyIDMoniker name="/SocketService/SocketServiceGroup/WorkerRole1Instances" />
<sCSPolicyUpdateDomainMoniker name="/SocketService/SocketServiceGroup/WorkerRole1UpgradeDomains" />
<sCSPolicyFaultDomainMoniker name="/SocketService/SocketServiceGroup/WorkerRole1FaultDomains" />
</sCSPolicy>
</groupHascomponents>
</components>
<sCSPolicy>
<sCSPolicyUpdateDomain name="WorkerRole1UpgradeDomains" defaultPolicy="[5,5,5]" />
<sCSPolicyFaultDomain name="WorkerRole1FaultDomains" defaultPolicy="[2,2,2]" />
<sCSPolicyID name="WorkerRole1Instances" defaultPolicy="[1,1,1]" />
</sCSPolicy>
</group>
</groups>
<implements>
<implementation Id="db9ff8ee-45b1-4e02-868e-e9578d4ca268" ref="Microsoft.RedDog.Contract\ServiceContract\SocketServiceContract@ServiceDefinition">
<interfacereferences>
<interfaceReference Id="39d102dc-77fe-45c0-83b0-2e166f8d424b" ref="Microsoft.RedDog.Contract\Interface\WorkerRole1:Endpoint1@ServiceDefinition">
<inPort>
<inPortMoniker name="/SocketService/SocketServiceGroup/WorkerRole1:Endpoint1" />
</inPort>
</interfaceReference>
</interfacereferences>
</implementation>
</implements>
</serviceModel>
|
# =============================================================================================================================================
# Attempt
library(deSolve)
SIR <- function(t, x, parms) {
with(as.list(c(parms, x)), {
dS <- -beta*I/1000*S
dI <- beta*I/1000*S - r*I
dR <- r*I
output <- c(dS, dI, dR)
list(output)
})
}
start <- c(S=999, I=1, R=0)
parms <- c(beta=0.8, r=1/10)
times <- seq(0, 100, 1)
run_d <- ode(times=times, y=start, func=SIR, parms=parms)
summary(run_d)
matplot(run_d[,2:4], type="l")
plot(run_d[,2], col="red", ylim=c(0,1000), type="l", main="SIR Model", ylab="Population")
lines(run_d[,3], col="blue")
lines(run_d[,4], col="green")
legend("topright",legend=c("S", "I", "R"), col=c("red", "blue", "green"), lty=c(1,1,1))
library(animation)
saveGIF({
beta <- seq(0, 5, 0.1)
for (i in 2:length(beta)) {
parms[1] <- beta[i]
run_d <- ode(times=times, y=start, func=sir, parms = parms)
plot(run_d[,2], col="red", ylim=c(0,1000), type="l", main="SIR Model")
lines(run_d[,3], col="blue")
lines(run_d[,4], col="green")
legend("topright",legend=c("S", "I", "R"), col=c("red", "blue", "green"), lty=c(1,1,1))
Sys.sleep(0.3)
}
},movie.name = "beta_sir.gif", internal=0.2)
# ------------------------------------------------ SEIR Model ------------------------------------------------ x
SEIR <- function(t, x, parms) {
with(as.list(c(parms, x)), {
dS <- -beta*I/1000*S
dE <- beta*I/1000*S - a*E
dI <- a*E - r*I
dR <- r*I
output <- c(dS, dE, dI, dR)
list(output)
})
}
start <- c(S=999, E=0, I=1, R=0)
parms <- c(beta=0.8, a=1/12, r=1/10)
times <- seq(0, 100, 1)
run_d <- ode(times=times, y=start, func=SEIR, parms=parms)
matplot(run_d[,2:4], type="l")
plot(run_d[,2], col="red", ylim=c(0,1000), type="l", main="SIR Model", ylab="Population")
lines(run_d[,3], col="orange")
lines(run_d[,4], col="blue")
lines(run_d[,5], col="green")
legend("topright",legend=c("S", 'E', "I", "R"), col=c("red", 'orange', "blue", "green"), lty=c(1,1,1))
beta<-seq(0,5,0.1)
for (i in 1:length(beta)){
parms[1]<-beta[i]
run_d<-ode(times=times, y=start, func=SEIR,parms=parms)
plot(run_d[,2], col="red", ylim=c(0,1000), type="l", main="SEIR")
lines(run_d[,3], col="blue")
lines(run_d[,4], col="green")
lines(run_d[,5], col="orange")
legend("topright",legend=c("S","E", "I", "R"), col=c("red", "blue", "green", "orange"), lty=c(1,1,1,1))
Sys.sleep(0.2)
}
# =============================================================================================================================================
# Solution
#IDM Exercise 1
#Differential Equations
#SIR model for measles
library(deSolve)
sir <- function(t, x, parms) {
with(as.list(c(parms, x)), {
dS=-beta*I/1000*S
dI=beta*I/1000*S-r*I
dR=r*I
output <- c(dS, dI, dR)
list(output)
})
}
#the Initial values
start<-c(S=999, I=1,R=0 )
## The parameters
parms <- c(beta=0.8,r=1/10)
## vector of timesteps
times <- seq(0, 100, 1)
run_d<-ode(times=times, y=start, func=sir,parms=parms)
#summary(run_d)
matplot(run_d[,2:4], type="l")
plot(run_d[,2], col="red", ylim=c(0,1000), type="l", main="SIR Model", ylab="Population")
lines(run_d[,3], col="blue")
lines(run_d[,4], col="green")
legend("topright",legend=c("S", "I", "R"), col=c("red", "blue", "green"), lty=c(1,1,1))
#(c)
library(animation)
saveGIF({
beta<-seq(0,5,0.1)
#r<-seq(1/20,1,0.05)
for (i in 1:length(beta)){
parms[1]<-beta[i]
# parms[2]<-r[i]
run_d<-ode(times=times, y=start, func=sir,parms=parms)
plot(run_d[,2], col="red", ylim=c(0,1000), type="l", main="SIR Model")
lines(run_d[,3], col="blue")
lines(run_d[,4], col="green")
legend("topright",legend=c("S", "I", "R"), col=c("red", "blue", "green"), lty=c(1,1,1))
Sys.sleep(0.3)
}
}, movie.name = "beta_sir.gif", internal=0.2)
#SEIR model for measles
library(deSolve)
seir <- function(t, x, parms) {
with(as.list(c(parms, x)), {
dS=-beta*I/1000*S
dE=beta*I/1000*S-f*E
dI=f*E-r*I
dR=r*I
output <- c(dS, dE, dI, dR)
list(output)
})
}
#the Initial values
start<-c(S=999, E=0, I=1,R=0 )
## The parameters
parms <- c(beta=0.8,f=1/12,r=1/10)
## vector of timesteps
times <- seq(0, 100, 1)
run_d<-ode(times=times, y=start, func=seir,parms=parms)
#plot(run_d, ylim=c(0,1))
#summary(run_d)
plot(run_d[,2], col="red", ylim=c(0,1000), type="l", main="SEIR")
lines(run_d[,3], col="blue")
lines(run_d[,4], col="green")
lines(run_d[,5], col="orange")
legend("topright",legend=c("S","E", "I", "R"), col=c("red", "blue", "green", "orange"), lty=c(1,1,1,1))
beta<-seq(0,5,0.1)
for (i in 1:length(beta)){
parms[1]<-beta[i]
run_d<-ode(times=times, y=start, func=seir,parms=parms)
plot(run_d[,2], col="red", ylim=c(0,1000), type="l", main="SEIR")
lines(run_d[,3], col="blue")
lines(run_d[,4], col="green")
lines(run_d[,5], col="orange")
legend("topright",legend=c("S","E", "I", "R"), col=c("red", "blue", "green", "orange"), lty=c(1,1,1,1))
Sys.sleep(0.2)
}
|
/specification/lecture notes/Day 2/ex1.R
|
no_license
|
ZachWolpe/BAMM
|
R
| false
| false
| 5,028
|
r
|
# =============================================================================================================================================
# Attempt
library(deSolve)
SIR <- function(t, x, parms) {
with(as.list(c(parms, x)), {
dS <- -beta*I/1000*S
dI <- beta*I/1000*S - r*I
dR <- r*I
output <- c(dS, dI, dR)
list(output)
})
}
start <- c(S=999, I=1, R=0)
parms <- c(beta=0.8, r=1/10)
times <- seq(0, 100, 1)
run_d <- ode(times=times, y=start, func=SIR, parms=parms)
summary(run_d)
matplot(run_d[,2:4], type="l")
plot(run_d[,2], col="red", ylim=c(0,1000), type="l", main="SIR Model", ylab="Population")
lines(run_d[,3], col="blue")
lines(run_d[,4], col="green")
legend("topright",legend=c("S", "I", "R"), col=c("red", "blue", "green"), lty=c(1,1,1))
library(animation)
saveGIF({
beta <- seq(0, 5, 0.1)
for (i in 2:length(beta)) {
parms[1] <- beta[i]
run_d <- ode(times=times, y=start, func=sir, parms = parms)
plot(run_d[,2], col="red", ylim=c(0,1000), type="l", main="SIR Model")
lines(run_d[,3], col="blue")
lines(run_d[,4], col="green")
legend("topright",legend=c("S", "I", "R"), col=c("red", "blue", "green"), lty=c(1,1,1))
Sys.sleep(0.3)
}
},movie.name = "beta_sir.gif", internal=0.2)
# ------------------------------------------------ SEIR Model ------------------------------------------------ x
SEIR <- function(t, x, parms) {
with(as.list(c(parms, x)), {
dS <- -beta*I/1000*S
dE <- beta*I/1000*S - a*E
dI <- a*E - r*I
dR <- r*I
output <- c(dS, dE, dI, dR)
list(output)
})
}
start <- c(S=999, E=0, I=1, R=0)
parms <- c(beta=0.8, a=1/12, r=1/10)
times <- seq(0, 100, 1)
run_d <- ode(times=times, y=start, func=SEIR, parms=parms)
matplot(run_d[,2:4], type="l")
plot(run_d[,2], col="red", ylim=c(0,1000), type="l", main="SIR Model", ylab="Population")
lines(run_d[,3], col="orange")
lines(run_d[,4], col="blue")
lines(run_d[,5], col="green")
legend("topright",legend=c("S", 'E', "I", "R"), col=c("red", 'orange', "blue", "green"), lty=c(1,1,1))
beta<-seq(0,5,0.1)
for (i in 1:length(beta)){
parms[1]<-beta[i]
run_d<-ode(times=times, y=start, func=SEIR,parms=parms)
plot(run_d[,2], col="red", ylim=c(0,1000), type="l", main="SEIR")
lines(run_d[,3], col="blue")
lines(run_d[,4], col="green")
lines(run_d[,5], col="orange")
legend("topright",legend=c("S","E", "I", "R"), col=c("red", "blue", "green", "orange"), lty=c(1,1,1,1))
Sys.sleep(0.2)
}
# =============================================================================================================================================
# Solution
#IDM Exercise 1
#Differential Equations
#SIR model for measles
library(deSolve)
sir <- function(t, x, parms) {
with(as.list(c(parms, x)), {
dS=-beta*I/1000*S
dI=beta*I/1000*S-r*I
dR=r*I
output <- c(dS, dI, dR)
list(output)
})
}
#the Initial values
start<-c(S=999, I=1,R=0 )
## The parameters
parms <- c(beta=0.8,r=1/10)
## vector of timesteps
times <- seq(0, 100, 1)
run_d<-ode(times=times, y=start, func=sir,parms=parms)
#summary(run_d)
matplot(run_d[,2:4], type="l")
plot(run_d[,2], col="red", ylim=c(0,1000), type="l", main="SIR Model", ylab="Population")
lines(run_d[,3], col="blue")
lines(run_d[,4], col="green")
legend("topright",legend=c("S", "I", "R"), col=c("red", "blue", "green"), lty=c(1,1,1))
#(c)
library(animation)
saveGIF({
beta<-seq(0,5,0.1)
#r<-seq(1/20,1,0.05)
for (i in 1:length(beta)){
parms[1]<-beta[i]
# parms[2]<-r[i]
run_d<-ode(times=times, y=start, func=sir,parms=parms)
plot(run_d[,2], col="red", ylim=c(0,1000), type="l", main="SIR Model")
lines(run_d[,3], col="blue")
lines(run_d[,4], col="green")
legend("topright",legend=c("S", "I", "R"), col=c("red", "blue", "green"), lty=c(1,1,1))
Sys.sleep(0.3)
}
}, movie.name = "beta_sir.gif", internal=0.2)
#SEIR model for measles
library(deSolve)
seir <- function(t, x, parms) {
with(as.list(c(parms, x)), {
dS=-beta*I/1000*S
dE=beta*I/1000*S-f*E
dI=f*E-r*I
dR=r*I
output <- c(dS, dE, dI, dR)
list(output)
})
}
#the Initial values
start<-c(S=999, E=0, I=1,R=0 )
## The parameters
parms <- c(beta=0.8,f=1/12,r=1/10)
## vector of timesteps
times <- seq(0, 100, 1)
run_d<-ode(times=times, y=start, func=seir,parms=parms)
#plot(run_d, ylim=c(0,1))
#summary(run_d)
plot(run_d[,2], col="red", ylim=c(0,1000), type="l", main="SEIR")
lines(run_d[,3], col="blue")
lines(run_d[,4], col="green")
lines(run_d[,5], col="orange")
legend("topright",legend=c("S","E", "I", "R"), col=c("red", "blue", "green", "orange"), lty=c(1,1,1,1))
beta<-seq(0,5,0.1)
for (i in 1:length(beta)){
parms[1]<-beta[i]
run_d<-ode(times=times, y=start, func=seir,parms=parms)
plot(run_d[,2], col="red", ylim=c(0,1000), type="l", main="SEIR")
lines(run_d[,3], col="blue")
lines(run_d[,4], col="green")
lines(run_d[,5], col="orange")
legend("topright",legend=c("S","E", "I", "R"), col=c("red", "blue", "green", "orange"), lty=c(1,1,1,1))
Sys.sleep(0.2)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/datasets_documentation.R
\docType{data}
\name{sumtax_example_data}
\alias{sumtax_example_data}
\title{Taxa Summary from Produce Dataset}
\format{
A data frame
}
\source{
Adapted from \url{
http://journals.plos.org/plosone/article?id=10.1371/journal.pone.0059310}
}
\usage{
sumtax_example_data
}
\description{
Taxa (phyla) relative abundances across all samples in the example dataset.
Produced using \code{mctoolsr::summarize_taxonomy()}.
}
\keyword{datasets}
|
/man/sumtax_example_data.Rd
|
no_license
|
leffj/mctoolsr
|
R
| false
| true
| 539
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/datasets_documentation.R
\docType{data}
\name{sumtax_example_data}
\alias{sumtax_example_data}
\title{Taxa Summary from Produce Dataset}
\format{
A data frame
}
\source{
Adapted from \url{
http://journals.plos.org/plosone/article?id=10.1371/journal.pone.0059310}
}
\usage{
sumtax_example_data
}
\description{
Taxa (phyla) relative abundances across all samples in the example dataset.
Produced using \code{mctoolsr::summarize_taxonomy()}.
}
\keyword{datasets}
|
# Exercise 3: using the pipe operator
# Install (if needed) and load the "dplyr" library
#install.packages("dplyr")
library("dplyr")
# Install (if needed) and load the "fueleconomy" package
#install.packages('devtools')
#devtools::install_github("hadley/fueleconomy")
library(fueleconomy)
# Which 2015 Acura model has the best hwy MGH? (Use dplyr, but without method
# chaining or pipes--use temporary variables!)
acura_model <- filter(vehicles, make == "Acura", year == 2015)
best <- filter(acura_model, hwy == max(hwy))
best_model <- select(best, model)
# Which 2015 Acura model has the best hwy MPG? (Use dplyr, nesting functions)
best_model <- select(
filter(filter(vehicles, make == "Acura", year == 2015), hwy == max(hwy), model)
)
# Which 2015 Acura model has the best hwy MPG? (Use dplyr and the pipe operator)
best_model <- filter(vehicles, make == "Acura", year == 2015) %>%
filter(hwy == max(hwy)) %>%
select(model)
### Bonus
# Write 3 functions, one for each approach. Then,
# Test how long it takes to perform each one 1000 times
|
/exercise-3/exercise.R
|
permissive
|
abondarenko98/ch10-dplyr
|
R
| false
| false
| 1,062
|
r
|
# Exercise 3: using the pipe operator
# Install (if needed) and load the "dplyr" library
#install.packages("dplyr")
library("dplyr")
# Install (if needed) and load the "fueleconomy" package
#install.packages('devtools')
#devtools::install_github("hadley/fueleconomy")
library(fueleconomy)
# Which 2015 Acura model has the best hwy MGH? (Use dplyr, but without method
# chaining or pipes--use temporary variables!)
acura_model <- filter(vehicles, make == "Acura", year == 2015)
best <- filter(acura_model, hwy == max(hwy))
best_model <- select(best, model)
# Which 2015 Acura model has the best hwy MPG? (Use dplyr, nesting functions)
best_model <- select(
filter(filter(vehicles, make == "Acura", year == 2015), hwy == max(hwy), model)
)
# Which 2015 Acura model has the best hwy MPG? (Use dplyr and the pipe operator)
best_model <- filter(vehicles, make == "Acura", year == 2015) %>%
filter(hwy == max(hwy)) %>%
select(model)
### Bonus
# Write 3 functions, one for each approach. Then,
# Test how long it takes to perform each one 1000 times
|
triggerReproduction <- function(length, reproState,
currentMonth, reproScenario = 'fixedClutchSize'){
if (reproScenario == 'fixedClutchSize'){
triggerEnergy <<- (length * 150.83 - 3027.244) * weightEgg
return(reproState > triggerEnergy & currentMonth %in% spawningWindow)
}
}
|
/functions/triggerReproduction.R
|
no_license
|
dbahlburg/SERBIK
|
R
| false
| false
| 320
|
r
|
triggerReproduction <- function(length, reproState,
currentMonth, reproScenario = 'fixedClutchSize'){
if (reproScenario == 'fixedClutchSize'){
triggerEnergy <<- (length * 150.83 - 3027.244) * weightEgg
return(reproState > triggerEnergy & currentMonth %in% spawningWindow)
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/networks.R
\name{get_degree_distribution}
\alias{get_degree_distribution}
\title{Get the degree distribution for a network.}
\usage{
get_degree_distribution(network)
}
\arguments{
\item{network}{A network object.}
}
\value{
A vector of length p, containing the degree for each node in the
network.
}
\description{
Counts the connections to each node within each structure. Note, this
is not the same as the degree distribution from the adjacency matrix
obtained from the network, which collapses the individual structures into
one graph.
}
\examples{
set.seed(13245)
nw <- random_network(10)
deg <- get_degree_distribution(nw) # Degree of each node.
table(deg) # Frequency table of degrees.
# Five nodes have degree 2, three nodes have degree 3, etc.
}
|
/SeqNet/man/get_degree_distribution.Rd
|
no_license
|
akhikolla/TestedPackages-NoIssues
|
R
| false
| true
| 832
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/networks.R
\name{get_degree_distribution}
\alias{get_degree_distribution}
\title{Get the degree distribution for a network.}
\usage{
get_degree_distribution(network)
}
\arguments{
\item{network}{A network object.}
}
\value{
A vector of length p, containing the degree for each node in the
network.
}
\description{
Counts the connections to each node within each structure. Note, this
is not the same as the degree distribution from the adjacency matrix
obtained from the network, which collapses the individual structures into
one graph.
}
\examples{
set.seed(13245)
nw <- random_network(10)
deg <- get_degree_distribution(nw) # Degree of each node.
table(deg) # Frequency table of degrees.
# Five nodes have degree 2, three nodes have degree 3, etc.
}
|
catalogue <- read.csv("depends/mics_survey_catalogue_filenames.csv",
na = "", stringsAsFactors = FALSE)
is_available <- !is.na(catalogue$datasets) & catalogue$datasets == "Available"
## assign ISO3 and survey_id
custom_matches <- c("Eswatini" = "SWZ",
"Kosovo under UNSC res. 1244" = "RKS",
"Kosovo under UNSC res. 1244 (Roma settlements)" = "RKS",
"Kosovo under UNSC res. 1244 (Roma, Ashkali, and Egyptian Communities)" = "RKS",
"Indonesia (Papua Selected Districts)" = "IDN",
"Indonesia (West Papua Selected Districts)" = "IDN",
"Lebanon (Palestinians)" = "LBN",
"Syrian Arab Republic (Palestinian Refugee Camps and Gatherings)" = "SYR",
"Syrian Arab Republic (Palestinian Refugee Camps and Gatherings)" = "SYR",
"Yugoslavia, The Federal Republic of (including current Serbia and Montenegro)" = "YUG",
"Sudan (South)" = "SSD")
catalogue$iso3 <- countrycode(catalogue$country, "country.name", "iso3c",
custom_match = custom_matches)
if(any(is.na(catalogue$iso3))) {
stop("ISO3 missing for surveys: ",
paste0(catalogue$country[is.na(catalogue$iso3)], collapse = ","))
}
## For subnational MICS, assign a custom location prefix different from the ISO3.
## Check that this does not conflict with any ISO3.
custom_loc_prefix <- c(
"Bosnia and Herzegovina (Roma Settlements)" = "BIR",
"Kosovo under UNSC res. 1244 (Roma settlements)" = "RKR",
"Serbia (Roma Settlements)" = "SRR",
"North Macedonia, Republic of (Roma Settlements)" = "MKR",
"Montenegro (Roma Settlements)" = "MNR",
"Kosovo under UNSC res. 1244 (Roma, Ashkali, and Egyptian Communities)" = "RKR",
"Syrian Arab Republic (Palestinian Refugee Camps and Gatherings)" = "SYP",
"Pakistan (Gilgit-Baltistan)" = "PAG",
"Pakistan (Khyber Pakhtunkhwa)" = "PKK",
"Pakistan (Khyber Pakhtunkhwa)" = "PKP",
"Mongolia (Khuvsgul Aimag)" = "MNK",
"Mongolia (Nalaikh District)" = "MNN",
"Pakistan (Punjab)" = "PAP",
"Pakistan (Sindh)" = "PAS",
"Kenya (Bungoma County)" = "KEB",
"Kenya (Kakamega County)" = "KEK",
"Kenya (Turkana County)" = "KET",
"Indonesia (Papua Selected Districts)" = "IDP",
"Indonesia (West Papua Selected Districts)" = "IDW",
"Somalia (Northeast Zone)" = "SON",
"Somalia (Somaliland)" = "SOS",
"Thailand (Bangkok Small Community)" = "THB"
)
iso3_clash <- intersect(custom_loc_prefix, countrycode::codelist$iso3c)
if(length(iso3_clash)) {
stop("Custom location prefix clashes with ISO3 codes:",
paste(iso3_clash, collapse = ", "))
}
catalogue$location_prefix <- catalogue$iso3
catalogue$location_prefix <- recode(catalogue$country, !!!custom_loc_prefix,
.default = catalogue$iso3)
duplicated_location_years <- duplicated(catalogue[c("location_prefix", "year")])
if(any(duplicated_location_years)) {
stop("Duplicated location code and years in: ",
paste(
catalogue$location_prefix[duplicated_location_years],
catalogue$year[duplicated_location_years],
collapse = ", "
)
)
}
catalogue$survey_id <- paste0(catalogue$location_prefix, substr(catalogue$year, 1, 4), "MICS")
## Update orderly yaml. Must be done manually
raw_dir <- "mics_datasets_raw"
raw_paths <- file.path(raw_dir, catalogue$filename[is_available])
rds_dir <- "mics_datasets_rds"
rds_files <- paste0(tolower(catalogue$survey_id[is_available]), ".rds")
rds_paths <- file.path(rds_dir, rds_files)
## Temporarily drop files with non-ASCII name (CIV MICS5)
has_nonascii <- raw_paths %in% tools::showNonASCII(raw_paths)
raw_paths <- raw_paths[!has_nonascii]
rds_paths <- rds_paths[!has_nonascii]
yml <- yaml::read_yaml("orderly.yml")
yml$depends$download_mics_datasets$use <-
as.list(c("depends/mics_survey_catalogue_filenames.csv" =
"mics_survey_catalogue_filenames.csv",
setNames(raw_paths, file.path("depends", raw_paths))))
yml$artefacts[[2]] <- list(data = list(description = "MICS datasets RDS",
filenames = rds_paths))
yaml::write_yaml(yml, "orderly.yml")
## Reorder columns and save the catalogue
catalogue <- select(catalogue, -filename, -url, everything(), filename, url)
write.csv(catalogue, "mics_survey_catalogue.csv", row.names = FALSE, na = "")
## Most of the MICS datasets have a .txt file in them which is the README.
## Early surveys have a MS Word .doc. For now, parse the .txt, but don't
## do anything to the .doc files.
##
## Check only one .txt in each file, which will assume is README.
num_txt_files <- raw_paths %>%
file.path("depends", .) %>%
lapply(unzip, list = TRUE) %>%
lapply("[[", "Name") %>%
lapply(grep, pattern = "\\.txt$") %>%
lengths()
table(num_txt_files)
if(any(num_txt_files > 1))
stop("MICS dataset has more than one .txt: ",
paste(basename(raw_paths)[num_txt_files > 1], collapse = ", "))
## * Specify haven::read_sav(..., encoding = "latin1") to catch encoding errors
save_rds <- function(path_zip, rds_path) {
print(basename(path_zip))
## extract contents and read in .savs
tf <- tempfile()
files <- unzip(file.path("depends", path_zip), exdir = tf)
if(length(files) == 1 && grepl("\\.zip$", files))
files <- unzip(files, exdir = tf)
sav_files <- grep("\\.sav$", files, value = TRUE)
file_type <- gsub("^(.*)\\.sav$", "\\1", basename(sav_files))
## parse these into a named list
res <- lapply(sav_files, haven::read_sav, encoding = "latin1")
names(res) <- file_type
## append readme if it exists
readme_file <- grep("\\.txt$", files, value = TRUE, ignore.case = TRUE)
if(length(readme_file)) {
readme <- readLines(readme_file)
res <- c(list(readme = readme), res)
}
saveRDS(res, rds_path)
}
dir.create(rds_dir)
res <- Map(save_rds, raw_paths, rds_paths)
|
/src/mics_rds/script.R
|
no_license
|
mrc-ide/mics-datasets
|
R
| false
| false
| 5,987
|
r
|
catalogue <- read.csv("depends/mics_survey_catalogue_filenames.csv",
na = "", stringsAsFactors = FALSE)
is_available <- !is.na(catalogue$datasets) & catalogue$datasets == "Available"
## assign ISO3 and survey_id
custom_matches <- c("Eswatini" = "SWZ",
"Kosovo under UNSC res. 1244" = "RKS",
"Kosovo under UNSC res. 1244 (Roma settlements)" = "RKS",
"Kosovo under UNSC res. 1244 (Roma, Ashkali, and Egyptian Communities)" = "RKS",
"Indonesia (Papua Selected Districts)" = "IDN",
"Indonesia (West Papua Selected Districts)" = "IDN",
"Lebanon (Palestinians)" = "LBN",
"Syrian Arab Republic (Palestinian Refugee Camps and Gatherings)" = "SYR",
"Syrian Arab Republic (Palestinian Refugee Camps and Gatherings)" = "SYR",
"Yugoslavia, The Federal Republic of (including current Serbia and Montenegro)" = "YUG",
"Sudan (South)" = "SSD")
catalogue$iso3 <- countrycode(catalogue$country, "country.name", "iso3c",
custom_match = custom_matches)
if(any(is.na(catalogue$iso3))) {
stop("ISO3 missing for surveys: ",
paste0(catalogue$country[is.na(catalogue$iso3)], collapse = ","))
}
## For subnational MICS, assign a custom location prefix different from the ISO3.
## Check that this does not conflict with any ISO3.
custom_loc_prefix <- c(
"Bosnia and Herzegovina (Roma Settlements)" = "BIR",
"Kosovo under UNSC res. 1244 (Roma settlements)" = "RKR",
"Serbia (Roma Settlements)" = "SRR",
"North Macedonia, Republic of (Roma Settlements)" = "MKR",
"Montenegro (Roma Settlements)" = "MNR",
"Kosovo under UNSC res. 1244 (Roma, Ashkali, and Egyptian Communities)" = "RKR",
"Syrian Arab Republic (Palestinian Refugee Camps and Gatherings)" = "SYP",
"Pakistan (Gilgit-Baltistan)" = "PAG",
"Pakistan (Khyber Pakhtunkhwa)" = "PKK",
"Pakistan (Khyber Pakhtunkhwa)" = "PKP",
"Mongolia (Khuvsgul Aimag)" = "MNK",
"Mongolia (Nalaikh District)" = "MNN",
"Pakistan (Punjab)" = "PAP",
"Pakistan (Sindh)" = "PAS",
"Kenya (Bungoma County)" = "KEB",
"Kenya (Kakamega County)" = "KEK",
"Kenya (Turkana County)" = "KET",
"Indonesia (Papua Selected Districts)" = "IDP",
"Indonesia (West Papua Selected Districts)" = "IDW",
"Somalia (Northeast Zone)" = "SON",
"Somalia (Somaliland)" = "SOS",
"Thailand (Bangkok Small Community)" = "THB"
)
iso3_clash <- intersect(custom_loc_prefix, countrycode::codelist$iso3c)
if(length(iso3_clash)) {
stop("Custom location prefix clashes with ISO3 codes:",
paste(iso3_clash, collapse = ", "))
}
catalogue$location_prefix <- catalogue$iso3
catalogue$location_prefix <- recode(catalogue$country, !!!custom_loc_prefix,
.default = catalogue$iso3)
duplicated_location_years <- duplicated(catalogue[c("location_prefix", "year")])
if(any(duplicated_location_years)) {
stop("Duplicated location code and years in: ",
paste(
catalogue$location_prefix[duplicated_location_years],
catalogue$year[duplicated_location_years],
collapse = ", "
)
)
}
catalogue$survey_id <- paste0(catalogue$location_prefix, substr(catalogue$year, 1, 4), "MICS")
## Update orderly yaml. Must be done manually
raw_dir <- "mics_datasets_raw"
raw_paths <- file.path(raw_dir, catalogue$filename[is_available])
rds_dir <- "mics_datasets_rds"
rds_files <- paste0(tolower(catalogue$survey_id[is_available]), ".rds")
rds_paths <- file.path(rds_dir, rds_files)
## Temporarily drop files with non-ASCII name (CIV MICS5)
has_nonascii <- raw_paths %in% tools::showNonASCII(raw_paths)
raw_paths <- raw_paths[!has_nonascii]
rds_paths <- rds_paths[!has_nonascii]
yml <- yaml::read_yaml("orderly.yml")
yml$depends$download_mics_datasets$use <-
as.list(c("depends/mics_survey_catalogue_filenames.csv" =
"mics_survey_catalogue_filenames.csv",
setNames(raw_paths, file.path("depends", raw_paths))))
yml$artefacts[[2]] <- list(data = list(description = "MICS datasets RDS",
filenames = rds_paths))
yaml::write_yaml(yml, "orderly.yml")
## Reorder columns and save the catalogue
catalogue <- select(catalogue, -filename, -url, everything(), filename, url)
write.csv(catalogue, "mics_survey_catalogue.csv", row.names = FALSE, na = "")
## Most of the MICS datasets have a .txt file in them which is the README.
## Early surveys have a MS Word .doc. For now, parse the .txt, but don't
## do anything to the .doc files.
##
## Check only one .txt in each file, which will assume is README.
num_txt_files <- raw_paths %>%
file.path("depends", .) %>%
lapply(unzip, list = TRUE) %>%
lapply("[[", "Name") %>%
lapply(grep, pattern = "\\.txt$") %>%
lengths()
table(num_txt_files)
if(any(num_txt_files > 1))
stop("MICS dataset has more than one .txt: ",
paste(basename(raw_paths)[num_txt_files > 1], collapse = ", "))
## * Specify haven::read_sav(..., encoding = "latin1") to catch encoding errors
save_rds <- function(path_zip, rds_path) {
print(basename(path_zip))
## extract contents and read in .savs
tf <- tempfile()
files <- unzip(file.path("depends", path_zip), exdir = tf)
if(length(files) == 1 && grepl("\\.zip$", files))
files <- unzip(files, exdir = tf)
sav_files <- grep("\\.sav$", files, value = TRUE)
file_type <- gsub("^(.*)\\.sav$", "\\1", basename(sav_files))
## parse these into a named list
res <- lapply(sav_files, haven::read_sav, encoding = "latin1")
names(res) <- file_type
## append readme if it exists
readme_file <- grep("\\.txt$", files, value = TRUE, ignore.case = TRUE)
if(length(readme_file)) {
readme <- readLines(readme_file)
res <- c(list(readme = readme), res)
}
saveRDS(res, rds_path)
}
dir.create(rds_dir)
res <- Map(save_rds, raw_paths, rds_paths)
|
library(BPEC)
### Name: bpec
### Title: Bayesian Phylogeographic and Ecological Clustering (BPEC)
### Aliases: bpec plot.bpec summary.bpec mean.bpec print.bpec
### Keywords: phylogeography clustering Bayesian phylogenetics
### ** Examples
## if you want to load the `mini' example Brown Frog dataset
data(MacrocnemisRawSeqs)
data(MacrocnemisCoordsLocsMini)
rawSeqs <- MacrocnemisRawSeqs
coordsLocs <- MacrocnemisCoordsLocsMini
dims <- 3 #this is 2 if you only have geographical longitude/latitude.
#(add 1 for each environmental or phenotypic covariate)
maxMig <- 2 #you will need a higher maximum number of migrations, suggest 7
ds <- 0 #start with ds=0 and increase to 1 and then to 2
iter <- 1000 #you will need far more iterations for convergence, start with 100,000
postSamples <- 100 #you will need at least 100 saved posterior samples
#run the Markov chain Monte Carlo sampler
bpecout <- bpec.mcmc(rawSeqs,coordsLocs,maxMig,iter,ds,postSamples,dims)
par(mar=c(0,0,0,0),pty="m",mfrow=c(1,2)) #no plot margins, plot contours and tree side-by-side
# plot geographical cluster contour map
bpec.contourPlot(bpecout,GoogleEarth=0)
# plot tree network with cluster indicators
bpec.Tree <- bpec.treePlot(bpecout)
# now also plot the environmental covariates
bpec.covariatesPlot(bpecout)
bpec.Geo <- bpec.geoTree(bpecout,file="GoogleEarthTree.kml")
## Not run:
##D # if you want to load the example burnet moth dataset
##D data(TransalpinaRawSeqs)
##D data(TransalpinaCoordsLocs)
##D rawSeqs <- TransalpinaRawSeqs
##D coordsLocs <- TransalpinaCoordsLocs
##D
##D ##if you want to use your own dataset, use setwd() to enter the correct folder,
##D ##then run the command below, changing the input parameters if necessary
##D #rawSeqs <- bpec.loadSeq('haplotypes.nex')
##D #coordsLocs <- bpec.loadCoords("coordsLocsFile.txt")
##D
##D ## to set phenotypic/environmental covariate names manually, use (as appropriate)
##D # colnames(CoordsLocs)[1:dims] <- c('lat','long','cov1','cov2','cov3')
##D ## where dims is the corresponding number of measurements available
##D ## (2 for latitude and longitude only, add one for each additional available measurement)
##D
##D dims <- 2 #this is 2 if you only have geographical longitude/latitude.
##D #(add 1 for each environmental or phenotypic covariate)
##D maxMig <- 5 #you will need a higher maximum number of migrations, suggest 7
##D ds <- 0 #start with ds=0 and increase to 1 and then to 2
##D iter <- 10000 #you will need far more iterations for convergence, start with 100,000
##D postSamples <- 2 #you will need at least 100 saved posterior samples
##D
##D #run the Markov chain Monte Carlo sampler
##D bpecout <- bpec.mcmc(rawSeqs,coordsLocs,maxMig,iter,ds,postSamples,dims)
##D
##D par(mar=c(0,0,0,0),pty="m",mfrow=c(1,2)) #No plot margins. Contours and tree side-by-side
##D # plot geographical cluster contour map
##D bpec.contourPlot(bpecout, GoogleEarth=0, mapType = 'plain')
##D
##D # plot tree network with cluster indicators
##D bpec.Tree <- bpec.treePlot(bpecout)
##D
##D ## if you want to load the example Brown Frog dataset
##D data(MacrocnemisRawSeqs)
##D data(MacrocnemisCoordsLocs)
##D rawSeqs <- MacrocnemisRawSeqs
##D coordsLocs <- MacrocnemisCoordsLocs
##D
##D dims <- 8 #this is 2 if you only have geographical longitude/latitude.
##D #(add 1 for each environmental or phenotypic covariate)
##D maxMig <- 4 #you will need a higher maximum number of migrations, suggest 7
##D ds <- 2 #start with ds=0 and increase to 1 and then to 2
##D iter <- 10000 #you will need far more iterations for convergence, start with 100,000
##D postSamples <- 2 #you will need at least 100 saved posterior samples
##D
##D #run the Markov chain Monte Carlo sampler
##D bpecout <- bpec.mcmc(rawSeqs,coordsLocs,maxMig,iter,ds,postSamples,dims)
##D
##D par(mar=c(0,0,0,0),pty="m",mfrow=c(1,2)) #no plot margins, plot contours and tree side-by-side
##D # plot geographical cluster contour map
##D bpec.contourPlot(bpecout,GoogleEarth=0)
##D
##D # plot tree network with cluster indicators
##D bpec.Tree <- bpec.treePlot(bpecout)
##D
##D # now also plot the environmental covariates
##D par(mfrow=c(2,3)) #split the plot window into 2x3 to fit all the covariates
##D bpec.covariatesPlot(bpecout)
##D
##D bpec.Geo <- bpec.geoTree(bpecout,file="GoogleEarthTree.kml")
##D
## End(Not run)
|
/data/genthat_extracted_code/BPEC/examples/bpec.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 4,391
|
r
|
library(BPEC)
### Name: bpec
### Title: Bayesian Phylogeographic and Ecological Clustering (BPEC)
### Aliases: bpec plot.bpec summary.bpec mean.bpec print.bpec
### Keywords: phylogeography clustering Bayesian phylogenetics
### ** Examples
## if you want to load the `mini' example Brown Frog dataset
data(MacrocnemisRawSeqs)
data(MacrocnemisCoordsLocsMini)
rawSeqs <- MacrocnemisRawSeqs
coordsLocs <- MacrocnemisCoordsLocsMini
dims <- 3 #this is 2 if you only have geographical longitude/latitude.
#(add 1 for each environmental or phenotypic covariate)
maxMig <- 2 #you will need a higher maximum number of migrations, suggest 7
ds <- 0 #start with ds=0 and increase to 1 and then to 2
iter <- 1000 #you will need far more iterations for convergence, start with 100,000
postSamples <- 100 #you will need at least 100 saved posterior samples
#run the Markov chain Monte Carlo sampler
bpecout <- bpec.mcmc(rawSeqs,coordsLocs,maxMig,iter,ds,postSamples,dims)
par(mar=c(0,0,0,0),pty="m",mfrow=c(1,2)) #no plot margins, plot contours and tree side-by-side
# plot geographical cluster contour map
bpec.contourPlot(bpecout,GoogleEarth=0)
# plot tree network with cluster indicators
bpec.Tree <- bpec.treePlot(bpecout)
# now also plot the environmental covariates
bpec.covariatesPlot(bpecout)
bpec.Geo <- bpec.geoTree(bpecout,file="GoogleEarthTree.kml")
## Not run:
##D # if you want to load the example burnet moth dataset
##D data(TransalpinaRawSeqs)
##D data(TransalpinaCoordsLocs)
##D rawSeqs <- TransalpinaRawSeqs
##D coordsLocs <- TransalpinaCoordsLocs
##D
##D ##if you want to use your own dataset, use setwd() to enter the correct folder,
##D ##then run the command below, changing the input parameters if necessary
##D #rawSeqs <- bpec.loadSeq('haplotypes.nex')
##D #coordsLocs <- bpec.loadCoords("coordsLocsFile.txt")
##D
##D ## to set phenotypic/environmental covariate names manually, use (as appropriate)
##D # colnames(CoordsLocs)[1:dims] <- c('lat','long','cov1','cov2','cov3')
##D ## where dims is the corresponding number of measurements available
##D ## (2 for latitude and longitude only, add one for each additional available measurement)
##D
##D dims <- 2 #this is 2 if you only have geographical longitude/latitude.
##D #(add 1 for each environmental or phenotypic covariate)
##D maxMig <- 5 #you will need a higher maximum number of migrations, suggest 7
##D ds <- 0 #start with ds=0 and increase to 1 and then to 2
##D iter <- 10000 #you will need far more iterations for convergence, start with 100,000
##D postSamples <- 2 #you will need at least 100 saved posterior samples
##D
##D #run the Markov chain Monte Carlo sampler
##D bpecout <- bpec.mcmc(rawSeqs,coordsLocs,maxMig,iter,ds,postSamples,dims)
##D
##D par(mar=c(0,0,0,0),pty="m",mfrow=c(1,2)) #No plot margins. Contours and tree side-by-side
##D # plot geographical cluster contour map
##D bpec.contourPlot(bpecout, GoogleEarth=0, mapType = 'plain')
##D
##D # plot tree network with cluster indicators
##D bpec.Tree <- bpec.treePlot(bpecout)
##D
##D ## if you want to load the example Brown Frog dataset
##D data(MacrocnemisRawSeqs)
##D data(MacrocnemisCoordsLocs)
##D rawSeqs <- MacrocnemisRawSeqs
##D coordsLocs <- MacrocnemisCoordsLocs
##D
##D dims <- 8 #this is 2 if you only have geographical longitude/latitude.
##D #(add 1 for each environmental or phenotypic covariate)
##D maxMig <- 4 #you will need a higher maximum number of migrations, suggest 7
##D ds <- 2 #start with ds=0 and increase to 1 and then to 2
##D iter <- 10000 #you will need far more iterations for convergence, start with 100,000
##D postSamples <- 2 #you will need at least 100 saved posterior samples
##D
##D #run the Markov chain Monte Carlo sampler
##D bpecout <- bpec.mcmc(rawSeqs,coordsLocs,maxMig,iter,ds,postSamples,dims)
##D
##D par(mar=c(0,0,0,0),pty="m",mfrow=c(1,2)) #no plot margins, plot contours and tree side-by-side
##D # plot geographical cluster contour map
##D bpec.contourPlot(bpecout,GoogleEarth=0)
##D
##D # plot tree network with cluster indicators
##D bpec.Tree <- bpec.treePlot(bpecout)
##D
##D # now also plot the environmental covariates
##D par(mfrow=c(2,3)) #split the plot window into 2x3 to fit all the covariates
##D bpec.covariatesPlot(bpecout)
##D
##D bpec.Geo <- bpec.geoTree(bpecout,file="GoogleEarthTree.kml")
##D
## End(Not run)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/auxiliary_giotto.R
\name{rowMeans_giotto}
\alias{rowMeans_giotto}
\title{rowMeans_giotto}
\usage{
rowMeans_giotto(mymatrix)
}
\description{
rowMeans_giotto
}
\keyword{internal}
|
/man/rowMeans_giotto.Rd
|
permissive
|
bernard2012/Giotto
|
R
| false
| true
| 255
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/auxiliary_giotto.R
\name{rowMeans_giotto}
\alias{rowMeans_giotto}
\title{rowMeans_giotto}
\usage{
rowMeans_giotto(mymatrix)
}
\description{
rowMeans_giotto
}
\keyword{internal}
|
Pa <- 1/prod(49:52); Pa
Pb <- 1/choose(52,4); Pb
|
/chap04/exa_0402.R
|
no_license
|
leetschau/app-of-rlang-in-stats
|
R
| false
| false
| 51
|
r
|
Pa <- 1/prod(49:52); Pa
Pb <- 1/choose(52,4); Pb
|
#' Function to normalize TF scores
#'
#' @param vipermat - matrix of VIPER scores with columns as samples,
#' rows as protein names
#' @param fdr.thresh - BH-FDR threshold (default 0.05 FDR rate)
#' @return A vector of normalized z-scores, named by TF id
#' @keywords internal
viperGetTFScores <- function(vipermat, fdr.thresh = 0.05) {
# for each gene, count the number samples with scores for each, and weight by that
w.counts <- apply(vipermat, 1, function(x) {
data.counts <- length(which(!is.na(x)))
data.counts
})
w.counts <- w.counts/ncol(vipermat)
vipermat[is.na(vipermat)] <- 0
# normalize element scores to sum to 1 (optional - use weighted element scores based on silhouette)
element.scores <- rep(1, ncol(vipermat))
element.scores <- element.scores/sum(element.scores)
# mean weighted VIPER score across samples
w.means <- apply(vipermat, 1, function(x) {
res <- sum(x * element.scores)
res
})
# weight by the counts for each
w.means <- w.means * w.counts
names(w.means) <- rownames(vipermat)
# only look at those with positive (high) score w.means <- sort(w.means[which(w.means > 0)], decreasing=TRUE)
zscores <- w.means
zscores
}
#' Calculate p-values from pseudo zscores / VIPER aREA scores, threshold
#'
#' @param zscores Vector of normally distributed z-scores representing protein
#' activities.
#' @param fdr.thresh Threshold for false discovery rate, default is 0.05
#' @return Get the names of proteins with significant z-scores, after
#' multi-hypothesis correction
#' @keywords internal
viperGetSigTFS <- function(zscores, fdr.thresh = 0.05) {
# calculate pseudo-pvalues and look at just significant pvals/scores
pvals <- -pnorm(abs(zscores), log.p = TRUE) * 2
pvals[which(pvals > 1)] <- 1
# correct unless option is NULL
sig.idx <- which(p.adjust(pvals, method = "BH") < fdr.thresh)
pvals <- pvals[sig.idx]
names(pvals)
}
#' Compute the empirical q-values of each genomic-event/VIPER gene pair
#'
#' Use against the background distribution of associations with a given set of
#' 'null' VIPER genes (i.e. low activity TFs)
#'
#' @param vipermat viper inferences matrix, samples are columns,
#' rows are TF entrez gene IDs
#' @param nes scores for each mutation (rows) against each TF (columns)
#' @param null.TFs low-importance TFs used to calculate null distributions
#' @param alternative Alternative defaults to 'both' : significant p-values can
#' come from both sides of the null distribution
#' @return A named list of qvalues for each TF/cMR protein. Each entry contains
#' a vector of q-values for all associated events; names are gene ids
#' @keywords internal
getDiggitEmpiricalQvalues <- function(vipermat, nes, null.TFs,
alternative = "both") {
# subset NES to Viper Proteins in the vipermat only
nes <- nes[, as.character(rownames(vipermat))]
nes.em.qvals <- apply(nes, 1, function(x, alternative) {
null.VEC <- x[as.character(null.TFs)]
null.VEC <- null.VEC[which(!is.na(null.VEC))]
# get empirical q-values for both upper and lower tails of NES / DIGGIT statistics
qvals <- getEmpiricalQvals(x, null.VEC, alternative)
qvals
}, alternative = alternative)
names(nes.em.qvals) <- rownames(nes)
nes.em.qvals
}
#' Get empirical qvals
#'
#' @param test.statistics P-values generated from the test comparisons
#' @param null.statistics P-values generated under the null (permutation) model
#' @param alternative Optional : 1 or 2 tails used to generate the p-value
#' @return A list with both the qvalues and empirical p-values from the supplied
#' test and null stats
#' @keywords internal
getEmpiricalQvals <- function(test.statistics, null.statistics,
alternative = "both") {
# calculate the upper and lower tail
if (alternative == "both") {
test.statistics <- sort(abs(test.statistics), decreasing = TRUE)
null.statistics <- abs(null.statistics)
em.pvals <- qvalue::empPvals(test.statistics, null.statistics)
qvals <- rep(1, length(em.pvals))
tryCatch({
qvals <- qvalue::qvalue(em.pvals)$qvalue
}, error = function(e) {
# if pi0, the estimated proportion of true null hypothesis <= 0, it might fail: in that case set to zero and return p-values anyways
qvals <- rep(1, length(em.pvals))
})
names(qvals) <- names(test.statistics)
names(em.pvals) <- names(test.statistics)
return(list(qvals = qvals, pvals = em.pvals))
} else {
stop(paste(" alternative ", alternative, " not implemented yet!"))
}
}
#' Filter interactions from NES (DIGGIT) scores and corresponding
#' background-corrected scores.
#'
#' Use this version in the Bayes model to rank TFs
#'
#' @import stats
#' @param corrected.scores A list indexed by the genomic event/gene with
#' corresponding pvals and qvals for each TF
#' @param nes.scores Matrix with tfs as columns, rows are genomic events
#' @param cindy CINDy algorithm output matrix
#' @param p.thresh P-value threshold (default=0.05)
#' @param cindy.only Consider only CINDy validated interactions (default=TRUE)
#' @return a list (indexed by VIPER protein) of significant genomic interactions
#' and associated pvals over the background (null TF) model, and NES scores
#' @keywords internal
sigInteractorsDIGGIT <- function(corrected.scores, nes.scores, cindy,
p.thresh = 0.05, cindy.only = TRUE) {
pvals.matrix <- getPvalsMatrix(corrected.scores)
# input validation
if (!is.numeric(p.thresh)) {
stop("Invalid value supplied for p-value threshold!")
}
## Apply joint NES + p-value over background (null TF) threshold over each Viper Protein return the raw NES scores only for those significant over the
## background and including CINDy (if applicable)
viper.interactors <- lapply(colnames(pvals.matrix), function(viperProt) {
# find the over-null-TF-background scores with an significant, uncorrected p-value
pvals <- as.numeric(pvals.matrix[, as.character(viperProt)])
nes.vec <- as.numeric(nes.scores[, as.character(viperProt)])
row.idx <- which(pvals < p.thresh)
pvals <- pvals[row.idx]
names(pvals) <- rownames(pvals.matrix)[row.idx]
if (!all(names(pvals) == names(nes.vec))) {
stop('Data not aligned for aREA / aREA corrected p-values')
}
# subset the NES vector for this TF, threshold again on NES scores as a sanity check on the basic enrichment (i.e. remove those with high
# over-background scores simply because the background is de-enriched)
nes.vec <- nes.scores[, which(colnames(nes.scores) == as.character(viperProt))]
nes.vec <- nes.vec[which(2 * (1 - pnorm(abs(nes.vec))) < p.thresh)]
fusion.index <- unlist(lapply(names(nes.vec), function(x) if (length(strsplit(x, "_")[[1]]) > 1)
TRUE else FALSE))
# subset to CINDY validated upstream regulators
if (cindy.only && is.null(cindy[[viperProt]])) {
return(c())
} else if (cindy.only) {
# lookup takes about a second...
cindy.thisTF <- cindy[[viperProt]]
upstream.cindy.modulators <- names(cindy.thisTF)
cindy.nes.vec <- na.omit(nes.vec[match(upstream.cindy.modulators, names(nes.vec))])
if (length(cindy.nes.vec) == 0) {
return(c())
}
# add fusions in, they don't have CINDy scores
fus.vec <- nes.vec[fusion.index]
entrez.vec <- cindy.nes.vec[which(!is.na(cindy.nes.vec))]
# Independence of these: multiply through
cindy.entrez.pvals <- cindy.thisTF[(names(cindy.thisTF) %in% names(entrez.vec))]
entrez.pvals <- 2 * (1 - pnorm(abs(entrez.vec)))
entrez.pvals <- entrez.pvals * cindy.entrez.pvals
entrez.vec.corrected <- (1 - qnorm(entrez.pvals)) * sign(entrez.vec)
nes.vec <- c(entrez.vec.corrected, fus.vec)
}
# keep interactions significant above the null model and with a significant raw aREA score
nes.vec <- nes.vec[intersect(names(nes.vec), names(pvals))]
nes.vec <- nes.vec[which(!is.na(nes.vec))]
nes.vec <- sort(nes.vec, decreasing = TRUE)
nes.vec
})
names(viper.interactors) <- colnames(pvals.matrix)
viper.interactors
}
#' Utility function
#'
#' @param corrected.scores - corrected p-values processed by 'qvals' package
#' @return A matrix of p-values for scores between genes/events (rows) and
#' TFs (columns)
#' @keywords internal
getPvalsMatrix <- function(corrected.scores) {
# order of VIPER proteins/TFs
tf.names.order <- names(corrected.scores[[1]]$qvals)
pvals.matrix <- matrix(unlist(lapply(corrected.scores, function(x) {
pvals <- x$pvals[tf.names.order]
pvals
})), byrow = TRUE, ncol = length(tf.names.order))
colnames(pvals.matrix) <- tf.names.order
rownames(pvals.matrix) <- names(corrected.scores)
pvals.matrix
}
|
/R/make.interactions.r
|
no_license
|
califano-lab/MOMA
|
R
| false
| false
| 8,958
|
r
|
#' Function to normalize TF scores
#'
#' @param vipermat - matrix of VIPER scores with columns as samples,
#' rows as protein names
#' @param fdr.thresh - BH-FDR threshold (default 0.05 FDR rate)
#' @return A vector of normalized z-scores, named by TF id
#' @keywords internal
viperGetTFScores <- function(vipermat, fdr.thresh = 0.05) {
# for each gene, count the number samples with scores for each, and weight by that
w.counts <- apply(vipermat, 1, function(x) {
data.counts <- length(which(!is.na(x)))
data.counts
})
w.counts <- w.counts/ncol(vipermat)
vipermat[is.na(vipermat)] <- 0
# normalize element scores to sum to 1 (optional - use weighted element scores based on silhouette)
element.scores <- rep(1, ncol(vipermat))
element.scores <- element.scores/sum(element.scores)
# mean weighted VIPER score across samples
w.means <- apply(vipermat, 1, function(x) {
res <- sum(x * element.scores)
res
})
# weight by the counts for each
w.means <- w.means * w.counts
names(w.means) <- rownames(vipermat)
# only look at those with positive (high) score w.means <- sort(w.means[which(w.means > 0)], decreasing=TRUE)
zscores <- w.means
zscores
}
#' Calculate p-values from pseudo zscores / VIPER aREA scores, threshold
#'
#' @param zscores Vector of normally distributed z-scores representing protein
#' activities.
#' @param fdr.thresh Threshold for false discovery rate, default is 0.05
#' @return Get the names of proteins with significant z-scores, after
#' multi-hypothesis correction
#' @keywords internal
viperGetSigTFS <- function(zscores, fdr.thresh = 0.05) {
# calculate pseudo-pvalues and look at just significant pvals/scores
pvals <- -pnorm(abs(zscores), log.p = TRUE) * 2
pvals[which(pvals > 1)] <- 1
# correct unless option is NULL
sig.idx <- which(p.adjust(pvals, method = "BH") < fdr.thresh)
pvals <- pvals[sig.idx]
names(pvals)
}
#' Compute the empirical q-values of each genomic-event/VIPER gene pair
#'
#' Use against the background distribution of associations with a given set of
#' 'null' VIPER genes (i.e. low activity TFs)
#'
#' @param vipermat viper inferences matrix, samples are columns,
#' rows are TF entrez gene IDs
#' @param nes scores for each mutation (rows) against each TF (columns)
#' @param null.TFs low-importance TFs used to calculate null distributions
#' @param alternative Alternative defaults to 'both' : significant p-values can
#' come from both sides of the null distribution
#' @return A named list of qvalues for each TF/cMR protein. Each entry contains
#' a vector of q-values for all associated events; names are gene ids
#' @keywords internal
getDiggitEmpiricalQvalues <- function(vipermat, nes, null.TFs,
alternative = "both") {
# subset NES to Viper Proteins in the vipermat only
nes <- nes[, as.character(rownames(vipermat))]
nes.em.qvals <- apply(nes, 1, function(x, alternative) {
null.VEC <- x[as.character(null.TFs)]
null.VEC <- null.VEC[which(!is.na(null.VEC))]
# get empirical q-values for both upper and lower tails of NES / DIGGIT statistics
qvals <- getEmpiricalQvals(x, null.VEC, alternative)
qvals
}, alternative = alternative)
names(nes.em.qvals) <- rownames(nes)
nes.em.qvals
}
#' Get empirical qvals
#'
#' @param test.statistics P-values generated from the test comparisons
#' @param null.statistics P-values generated under the null (permutation) model
#' @param alternative Optional : 1 or 2 tails used to generate the p-value
#' @return A list with both the qvalues and empirical p-values from the supplied
#' test and null stats
#' @keywords internal
getEmpiricalQvals <- function(test.statistics, null.statistics,
alternative = "both") {
# calculate the upper and lower tail
if (alternative == "both") {
test.statistics <- sort(abs(test.statistics), decreasing = TRUE)
null.statistics <- abs(null.statistics)
em.pvals <- qvalue::empPvals(test.statistics, null.statistics)
qvals <- rep(1, length(em.pvals))
tryCatch({
qvals <- qvalue::qvalue(em.pvals)$qvalue
}, error = function(e) {
# if pi0, the estimated proportion of true null hypothesis <= 0, it might fail: in that case set to zero and return p-values anyways
qvals <- rep(1, length(em.pvals))
})
names(qvals) <- names(test.statistics)
names(em.pvals) <- names(test.statistics)
return(list(qvals = qvals, pvals = em.pvals))
} else {
stop(paste(" alternative ", alternative, " not implemented yet!"))
}
}
#' Filter interactions from NES (DIGGIT) scores and corresponding
#' background-corrected scores.
#'
#' Use this version in the Bayes model to rank TFs
#'
#' @import stats
#' @param corrected.scores A list indexed by the genomic event/gene with
#' corresponding pvals and qvals for each TF
#' @param nes.scores Matrix with tfs as columns, rows are genomic events
#' @param cindy CINDy algorithm output matrix
#' @param p.thresh P-value threshold (default=0.05)
#' @param cindy.only Consider only CINDy validated interactions (default=TRUE)
#' @return a list (indexed by VIPER protein) of significant genomic interactions
#' and associated pvals over the background (null TF) model, and NES scores
#' @keywords internal
sigInteractorsDIGGIT <- function(corrected.scores, nes.scores, cindy,
p.thresh = 0.05, cindy.only = TRUE) {
pvals.matrix <- getPvalsMatrix(corrected.scores)
# input validation
if (!is.numeric(p.thresh)) {
stop("Invalid value supplied for p-value threshold!")
}
## Apply joint NES + p-value over background (null TF) threshold over each Viper Protein return the raw NES scores only for those significant over the
## background and including CINDy (if applicable)
viper.interactors <- lapply(colnames(pvals.matrix), function(viperProt) {
# find the over-null-TF-background scores with an significant, uncorrected p-value
pvals <- as.numeric(pvals.matrix[, as.character(viperProt)])
nes.vec <- as.numeric(nes.scores[, as.character(viperProt)])
row.idx <- which(pvals < p.thresh)
pvals <- pvals[row.idx]
names(pvals) <- rownames(pvals.matrix)[row.idx]
if (!all(names(pvals) == names(nes.vec))) {
stop('Data not aligned for aREA / aREA corrected p-values')
}
# subset the NES vector for this TF, threshold again on NES scores as a sanity check on the basic enrichment (i.e. remove those with high
# over-background scores simply because the background is de-enriched)
nes.vec <- nes.scores[, which(colnames(nes.scores) == as.character(viperProt))]
nes.vec <- nes.vec[which(2 * (1 - pnorm(abs(nes.vec))) < p.thresh)]
fusion.index <- unlist(lapply(names(nes.vec), function(x) if (length(strsplit(x, "_")[[1]]) > 1)
TRUE else FALSE))
# subset to CINDY validated upstream regulators
if (cindy.only && is.null(cindy[[viperProt]])) {
return(c())
} else if (cindy.only) {
# lookup takes about a second...
cindy.thisTF <- cindy[[viperProt]]
upstream.cindy.modulators <- names(cindy.thisTF)
cindy.nes.vec <- na.omit(nes.vec[match(upstream.cindy.modulators, names(nes.vec))])
if (length(cindy.nes.vec) == 0) {
return(c())
}
# add fusions in, they don't have CINDy scores
fus.vec <- nes.vec[fusion.index]
entrez.vec <- cindy.nes.vec[which(!is.na(cindy.nes.vec))]
# Independence of these: multiply through
cindy.entrez.pvals <- cindy.thisTF[(names(cindy.thisTF) %in% names(entrez.vec))]
entrez.pvals <- 2 * (1 - pnorm(abs(entrez.vec)))
entrez.pvals <- entrez.pvals * cindy.entrez.pvals
entrez.vec.corrected <- (1 - qnorm(entrez.pvals)) * sign(entrez.vec)
nes.vec <- c(entrez.vec.corrected, fus.vec)
}
# keep interactions significant above the null model and with a significant raw aREA score
nes.vec <- nes.vec[intersect(names(nes.vec), names(pvals))]
nes.vec <- nes.vec[which(!is.na(nes.vec))]
nes.vec <- sort(nes.vec, decreasing = TRUE)
nes.vec
})
names(viper.interactors) <- colnames(pvals.matrix)
viper.interactors
}
#' Utility function
#'
#' @param corrected.scores - corrected p-values processed by 'qvals' package
#' @return A matrix of p-values for scores between genes/events (rows) and
#' TFs (columns)
#' @keywords internal
getPvalsMatrix <- function(corrected.scores) {
# order of VIPER proteins/TFs
tf.names.order <- names(corrected.scores[[1]]$qvals)
pvals.matrix <- matrix(unlist(lapply(corrected.scores, function(x) {
pvals <- x$pvals[tf.names.order]
pvals
})), byrow = TRUE, ncol = length(tf.names.order))
colnames(pvals.matrix) <- tf.names.order
rownames(pvals.matrix) <- names(corrected.scores)
pvals.matrix
}
|
library(shiny)
library(shinyjs)
library(shinythemes)
library(DT)
library(dplyr)
setwd("C:/Users/193344/Desktop/tracker gui")
ARMASTER <- read.csv("//knx1fs01/ED Reporting/Lowhorn Big Data/Golden Rule Data/ARMASTER.csv")
ar <- select(ARMASTER,A.R,desk,manager)
ar <- rename(ar,Desk=desk)
ar <- rename(ar,Manager=manager)
saveData <- function(data) {
data <- as.data.frame(t(data))
if (exists("responses")) {
responses <<- rbind(responses, data)
} else {
responses <<- data
}
}
loadData <- function() {
if (exists("responses")) {
responses
}
}
fields <- c("Desk","Program","File #", "Set Up Date","Tier","Due Date","Payment Method","Payment Amount")
shinyApp(
ui = fluidPage(shinyjs::useShinyjs(), theme=shinytheme("readable"),
tags$head(
tags$style(HTML("
@import url('//fonts.googleapis.com/css?family=Lobster|Cabin:400,700');
h1 {
font-family: 'Lobster', cursive;
font-weight: 500;
line-height: 1.1;
color: #191970;
}
"))
),
headerPanel("AM Tracker App"),
tabsetPanel(
tabPanel("User Input",div(id = "form",
column(4),column(4,
numericInput("Desk", "Desk", ""),
numericInput("File #","File #",""),
dateInput("Set Up Date","Set Up Date",value=Sys.Date()),
selectInput("Program","Program",choices=c("15%","FIS","SIF","BIF")),
selectInput("Tier","Tier",choices=c("Extreme","High","Medium","Low")),
dateInput("Due Date","Due Date",value=Sys.Date()),
selectInput("Payment Method","Payment Method",choices=c("Credit Card","Debit Card","Check","Money Order","WU","Mail In")),
numericInput("Payment Amount","Payment Amount",""),
actionButton("submit", "Submit",icon=icon("eye-open", lib = "glyphicon")),
tags$style(type='text/css', "#submit { vertical-align: middle; height: 50px; width: 69%; font-size: 30px; background-color: LightGray; border-color: black; color:blue}")
)),
shinyjs::hidden(
div(
id = "thankyou_msg",
h3("Thanks, your account was submitted successfully!"),
actionLink("submit_another", "Submit another account")
))),
tabPanel("Daily Tracker",
fluidRow (
dataTableOutput("table1"
))),
tabPanel("Daily Duplicates",
dataTableOutput("dupes")
),
tabPanel("Master Tracker", fluidRow(column(1),
column(5,
selectInput("Manager",
"Manager Select",
choices=levels(MasterTracker$Manager),
multiple=T,
selected=levels(MasterTracker$Manager),
selectize=T,
width=1000)),
column(5,
selectInput('Month',"Month Select",
choices=c("August 2015","September 2015"),
selected="September 2015",
selectize=T,
multiple=T,
width=1000)
),
column(1)),
DT::dataTableOutput("MasterTracker")),
tabPanel("Office Tracker", fluidRow(column(1),
column(5,
selectInput("MGR",
"Manager Select",
choices=levels(Budgets$Manager),
multiple=T,
selected=levels(Budgets$Manager),
selectize=T,
width=1000)),
column(5,selectInput('MTH',"Month Select",
choices=c("September 2015","August 2015"),
selected="September 2015",
selectize=T,
multiple=T,
width=1000)),
column(1),
DT::dataTableOutput("OfficeTracker")
)),
tabPanel("Historical Duplicates",
DT::dataTableOutput("MTH")
)
)),
server = function(input, output, session) {
# Whenever a field is filled, aggregate all form data
formData <- reactive({
data <- sapply(fields, function(x) input[[x]])
data
})
# action to take when submit button is pressed
observeEvent(input$submit, {
saveData(formData())
shinyjs::reset("form")
shinyjs::hide("form")
shinyjs::show("thankyou_msg")
})
observeEvent(input$submit_another, {
shinyjs::show("form")
shinyjs::hide("thankyou_msg")
})
output$table1 <- renderDataTable({
input$submit
loadData()
responses$Desk <- unlist(as.integer(as.character(responses$Desk)))
responses <- left_join(responses,ar,by="Desk")
responses$"Set Up Date" <- unlist(as.numeric(as.character(responses$"Set Up Date")))
responses$"Set Up Date" <- as.Date(responses$"Set Up Date", origin = "1970-01-01")
responses$"Due Date" <- as.numeric(as.character((responses$"Due Date")))
responses$"Due Date" <- as.Date(responses$"Due Date", origin = "1970-01-01")
responses
},options = list(lengthMenu = c(10, 50, 100, 3000), pageLength = 3000))
output$dupes <- renderDataTable({
input$submit
loadData()
responses$Desk <- unlist(as.integer(as.character(responses$Desk)))
responses$"Set Up Date" <- as.numeric(as.character(responses$"Set Up Date"))
responses$"Set Up Date" <- as.Date(responses$"Set Up Date", origin = "1970-01-01")
responses$"Due Date" <- as.numeric(as.character(responses$"Due Date"))
responses$"Due Date" <- as.Date(responses$"Due Date", origin = "1970-01-01")
responses2 <- responses[duplicated(responses[,3]),]
responses2 <- responses2$"File #"
response <- responses[responses$"File #" %in% responses2,]
response <- left_join(response,ar,by="Desk")
#response$"Set Up Date" <- as.numeric(as.character(response$"Set Up Date"))
response$"Set Up Date" <- as.Date(response$"Set Up Date", origin = "1970-01-01")
#response$"Due Date" <- as.numeric(as.character(response$"Due Date"))
response$"Due Date" <- as.Date(response$"Due Date", origin = "1970-01-01")
response
})
output$MasterTracker <- DT::renderDataTable({
track <- subset(MasterTracker,Manager %in% c(input$Manager))
track2 <- subset(track,Setup.Month %in% c(input$Month))
datatable(track2,extensions = 'TableTools', rownames=FALSE, options = list(
pageLength=100,
"sDom" = 'T<"clear">lfrtip',
"oTableTools" = list(
"sSwfPath" = "//cdnjs.cloudflare.com/ajax/libs/datatables-tabletools/2.1.5/swf/copy_csv_xls.swf",
"aButtons" = list(
"copy",
"print",
list("sExtends" = "collection",
"sButtonText" = "Save",
"aButtons" = c("csv","xls"))))))%>%
formatCurrency(c("CurrBal","Payment.Amount","EffDt"),"$")
})
output$OfficeTracker <- DT::renderDataTable({
TB <- subset(TR,Manager%in%c(input$MGR))
TL <- subset(TB,Setup.Month%in%c(input$MTH))
datatable(TL,extensions = 'TableTools', rownames=FALSE,options = list(
pageLength=3000,
"sDom" = 'T<"clear">lfrtip',
"oTableTools" = list(
"sSwfPath" = "//cdnjs.cloudflare.com/ajax/libs/datatables-tabletools/2.1.5/swf/copy_csv_xls.swf",
"aButtons" = list(
"copy",
"print",
list("sExtends" = "collection",
"sButtonText" = "Save",
"aButtons" = c("csv","xls")))))
) %>%
formatCurrency(c("Dollar.Budget","Dollar_Initiated","Dollar_Posted"),"$") %>%
formatPercentage(c("Dollar_BVA","RHB_BVA","RHB_Posted_BVA"))
})
output$MTH <- DT::renderDataTable({
Dupes <- MasterTracker[duplicated(MasterTracker[,8]),]
Dupes <- Dupes$"File.."
Duplicates <- MasterTracker[MasterTracker$"File.." %in% Dupes,]
row.names(Duplicates)<-NULL
Duplicates <- Duplicates[,-c(1,2,3,13:26)]
datatable(Duplicates,extensions = 'TableTools',rownames=FALSE, options = list(
pageLength=3000,
"sDom" = 'T<"clear">lfrtip',
"oTableTools" = list(
"sSwfPath" = "//cdnjs.cloudflare.com/ajax/libs/datatables-tabletools/2.1.5/swf/copy_csv_xls.swf",
"aButtons" = list(
"copy",
"print",
list("sExtends" = "collection",
"sButtonText" = "Save",
"aButtons" = c("csv","xls"))))))%>%
formatCurrency(c("CurrBal"),"$")
})
observeEvent(input$refresh, {
shinyjs::reset("form")
})
output$downloadData <- downloadHandler(
filename = function() {
paste('data-', Sys.Date(), '.csv', sep='')
},
content = function(file) {
write.csv(responses, file)
}
)
outputOptions(output, "table1", suspendWhenHidden = FALSE)
outputOptions(output, "dupes", suspendWhenHidden = FALSE)
}
)
|
/App.R
|
no_license
|
jlow2499/Shiny-Google-Form
|
R
| false
| false
| 12,586
|
r
|
library(shiny)
library(shinyjs)
library(shinythemes)
library(DT)
library(dplyr)
setwd("C:/Users/193344/Desktop/tracker gui")
ARMASTER <- read.csv("//knx1fs01/ED Reporting/Lowhorn Big Data/Golden Rule Data/ARMASTER.csv")
ar <- select(ARMASTER,A.R,desk,manager)
ar <- rename(ar,Desk=desk)
ar <- rename(ar,Manager=manager)
saveData <- function(data) {
data <- as.data.frame(t(data))
if (exists("responses")) {
responses <<- rbind(responses, data)
} else {
responses <<- data
}
}
loadData <- function() {
if (exists("responses")) {
responses
}
}
fields <- c("Desk","Program","File #", "Set Up Date","Tier","Due Date","Payment Method","Payment Amount")
shinyApp(
ui = fluidPage(shinyjs::useShinyjs(), theme=shinytheme("readable"),
tags$head(
tags$style(HTML("
@import url('//fonts.googleapis.com/css?family=Lobster|Cabin:400,700');
h1 {
font-family: 'Lobster', cursive;
font-weight: 500;
line-height: 1.1;
color: #191970;
}
"))
),
headerPanel("AM Tracker App"),
tabsetPanel(
tabPanel("User Input",div(id = "form",
column(4),column(4,
numericInput("Desk", "Desk", ""),
numericInput("File #","File #",""),
dateInput("Set Up Date","Set Up Date",value=Sys.Date()),
selectInput("Program","Program",choices=c("15%","FIS","SIF","BIF")),
selectInput("Tier","Tier",choices=c("Extreme","High","Medium","Low")),
dateInput("Due Date","Due Date",value=Sys.Date()),
selectInput("Payment Method","Payment Method",choices=c("Credit Card","Debit Card","Check","Money Order","WU","Mail In")),
numericInput("Payment Amount","Payment Amount",""),
actionButton("submit", "Submit",icon=icon("eye-open", lib = "glyphicon")),
tags$style(type='text/css', "#submit { vertical-align: middle; height: 50px; width: 69%; font-size: 30px; background-color: LightGray; border-color: black; color:blue}")
)),
shinyjs::hidden(
div(
id = "thankyou_msg",
h3("Thanks, your account was submitted successfully!"),
actionLink("submit_another", "Submit another account")
))),
tabPanel("Daily Tracker",
fluidRow (
dataTableOutput("table1"
))),
tabPanel("Daily Duplicates",
dataTableOutput("dupes")
),
tabPanel("Master Tracker", fluidRow(column(1),
column(5,
selectInput("Manager",
"Manager Select",
choices=levels(MasterTracker$Manager),
multiple=T,
selected=levels(MasterTracker$Manager),
selectize=T,
width=1000)),
column(5,
selectInput('Month',"Month Select",
choices=c("August 2015","September 2015"),
selected="September 2015",
selectize=T,
multiple=T,
width=1000)
),
column(1)),
DT::dataTableOutput("MasterTracker")),
tabPanel("Office Tracker", fluidRow(column(1),
column(5,
selectInput("MGR",
"Manager Select",
choices=levels(Budgets$Manager),
multiple=T,
selected=levels(Budgets$Manager),
selectize=T,
width=1000)),
column(5,selectInput('MTH',"Month Select",
choices=c("September 2015","August 2015"),
selected="September 2015",
selectize=T,
multiple=T,
width=1000)),
column(1),
DT::dataTableOutput("OfficeTracker")
)),
tabPanel("Historical Duplicates",
DT::dataTableOutput("MTH")
)
)),
server = function(input, output, session) {
# Whenever a field is filled, aggregate all form data
formData <- reactive({
data <- sapply(fields, function(x) input[[x]])
data
})
# action to take when submit button is pressed
observeEvent(input$submit, {
saveData(formData())
shinyjs::reset("form")
shinyjs::hide("form")
shinyjs::show("thankyou_msg")
})
observeEvent(input$submit_another, {
shinyjs::show("form")
shinyjs::hide("thankyou_msg")
})
output$table1 <- renderDataTable({
input$submit
loadData()
responses$Desk <- unlist(as.integer(as.character(responses$Desk)))
responses <- left_join(responses,ar,by="Desk")
responses$"Set Up Date" <- unlist(as.numeric(as.character(responses$"Set Up Date")))
responses$"Set Up Date" <- as.Date(responses$"Set Up Date", origin = "1970-01-01")
responses$"Due Date" <- as.numeric(as.character((responses$"Due Date")))
responses$"Due Date" <- as.Date(responses$"Due Date", origin = "1970-01-01")
responses
},options = list(lengthMenu = c(10, 50, 100, 3000), pageLength = 3000))
output$dupes <- renderDataTable({
input$submit
loadData()
responses$Desk <- unlist(as.integer(as.character(responses$Desk)))
responses$"Set Up Date" <- as.numeric(as.character(responses$"Set Up Date"))
responses$"Set Up Date" <- as.Date(responses$"Set Up Date", origin = "1970-01-01")
responses$"Due Date" <- as.numeric(as.character(responses$"Due Date"))
responses$"Due Date" <- as.Date(responses$"Due Date", origin = "1970-01-01")
responses2 <- responses[duplicated(responses[,3]),]
responses2 <- responses2$"File #"
response <- responses[responses$"File #" %in% responses2,]
response <- left_join(response,ar,by="Desk")
#response$"Set Up Date" <- as.numeric(as.character(response$"Set Up Date"))
response$"Set Up Date" <- as.Date(response$"Set Up Date", origin = "1970-01-01")
#response$"Due Date" <- as.numeric(as.character(response$"Due Date"))
response$"Due Date" <- as.Date(response$"Due Date", origin = "1970-01-01")
response
})
output$MasterTracker <- DT::renderDataTable({
track <- subset(MasterTracker,Manager %in% c(input$Manager))
track2 <- subset(track,Setup.Month %in% c(input$Month))
datatable(track2,extensions = 'TableTools', rownames=FALSE, options = list(
pageLength=100,
"sDom" = 'T<"clear">lfrtip',
"oTableTools" = list(
"sSwfPath" = "//cdnjs.cloudflare.com/ajax/libs/datatables-tabletools/2.1.5/swf/copy_csv_xls.swf",
"aButtons" = list(
"copy",
"print",
list("sExtends" = "collection",
"sButtonText" = "Save",
"aButtons" = c("csv","xls"))))))%>%
formatCurrency(c("CurrBal","Payment.Amount","EffDt"),"$")
})
output$OfficeTracker <- DT::renderDataTable({
TB <- subset(TR,Manager%in%c(input$MGR))
TL <- subset(TB,Setup.Month%in%c(input$MTH))
datatable(TL,extensions = 'TableTools', rownames=FALSE,options = list(
pageLength=3000,
"sDom" = 'T<"clear">lfrtip',
"oTableTools" = list(
"sSwfPath" = "//cdnjs.cloudflare.com/ajax/libs/datatables-tabletools/2.1.5/swf/copy_csv_xls.swf",
"aButtons" = list(
"copy",
"print",
list("sExtends" = "collection",
"sButtonText" = "Save",
"aButtons" = c("csv","xls")))))
) %>%
formatCurrency(c("Dollar.Budget","Dollar_Initiated","Dollar_Posted"),"$") %>%
formatPercentage(c("Dollar_BVA","RHB_BVA","RHB_Posted_BVA"))
})
output$MTH <- DT::renderDataTable({
Dupes <- MasterTracker[duplicated(MasterTracker[,8]),]
Dupes <- Dupes$"File.."
Duplicates <- MasterTracker[MasterTracker$"File.." %in% Dupes,]
row.names(Duplicates)<-NULL
Duplicates <- Duplicates[,-c(1,2,3,13:26)]
datatable(Duplicates,extensions = 'TableTools',rownames=FALSE, options = list(
pageLength=3000,
"sDom" = 'T<"clear">lfrtip',
"oTableTools" = list(
"sSwfPath" = "//cdnjs.cloudflare.com/ajax/libs/datatables-tabletools/2.1.5/swf/copy_csv_xls.swf",
"aButtons" = list(
"copy",
"print",
list("sExtends" = "collection",
"sButtonText" = "Save",
"aButtons" = c("csv","xls"))))))%>%
formatCurrency(c("CurrBal"),"$")
})
observeEvent(input$refresh, {
shinyjs::reset("form")
})
output$downloadData <- downloadHandler(
filename = function() {
paste('data-', Sys.Date(), '.csv', sep='')
},
content = function(file) {
write.csv(responses, file)
}
)
outputOptions(output, "table1", suspendWhenHidden = FALSE)
outputOptions(output, "dupes", suspendWhenHidden = FALSE)
}
)
|
\name{earthquake}
\alias{earthquake}
\docType{data}
\title{
Earthquake locations
}
\description{
Longitude, latitude and times (starting from zero) of a set of earthquakes in and around California.
}
\usage{data(earthquake)}
\format{
A dataframe with 3 columns.
}
\keyword{datasets}
|
/stppResid/man/earthquake.Rd
|
no_license
|
r-clements/stppResid
|
R
| false
| false
| 284
|
rd
|
\name{earthquake}
\alias{earthquake}
\docType{data}
\title{
Earthquake locations
}
\description{
Longitude, latitude and times (starting from zero) of a set of earthquakes in and around California.
}
\usage{data(earthquake)}
\format{
A dataframe with 3 columns.
}
\keyword{datasets}
|
#' Plot collapsed co-ranking matrix for heatmap for projection in a benchmark
#'
#' Creates plot showing collapsed co-ranking matrix of a dimension-reduction layout as a heatmap.
#'
#' @param benchmark an object of class \code{Benchmark}, as generated by the constructor \code{Benchmark} and evaluated using \code{Evaluate.Benchmark}
#' @param idx.subpipeline integer value: index of subpipeline that includes a projection step
#' @param idx.n_param optional integer value: index of subpipeline n-parameter iteration. Default value is \code{NULL}
#'
#' @export
PlotCoRanking <- function(
benchmark,
idx.subpipeline,
idx.n_param = NULL,
log = TRUE
) {
.PlotProjection.ValidityChecks(environment())
res <- GetCoRanking(benchmark, idx.subpipeline, idx.n_param)
Q <- res$Matrix
collapsed <- res$Collapsed
if (collapsed) {
PlotCollapsedCorankingMatrix(Q, log = log)
} else {
PlotFullCorankingMatrix(Q, res$K)
}
}
PlotFullCorankingMatrix <- function(Q, k) {
## Full (non-truncated) co-ranking matrix (joint histogram of rank
## errors for a lower-dimensional projection of data, versus original)
line_val <- max(Q) + 10
dQ <- rbind(Q[1:k, ], line_val, Q[(k+1):nrow(Q), ])
dQ <- cbind(dQ[, 1:k], line_val, dQ[, (k+1):nrow(Q)])
d <- log(dQ[, ncol(dQ):1])
image(d, axes = FALSE, main = 'Co-ranking matrix', sub = paste0('Min=', min(Q), ', Mean=', round(mean(Q), 2), ', Median=', median(Q),', Max=', max(Q)), col = hcl.colors(12, 'viridis', rev = TRUE))
}
PlotCollapsedCorankingMatrix <- function(cQ, log = TRUE) {
## Truncated (collapsed) co-ranking matrix (joint histogram of rank errors
## for a lower-dimensional projection of data, versus original). This version
## does not quantify the size of hard-k intrusions and extrusions
n <- nrow(cQ)
extruders <- data.frame(cQ[, n][-n])
intruders <- data.frame(cQ[n, ][-n])
cQ <- cQ[-n, -n]
colnames(cQ) <- rownames(cQ) <- rownames(extruders) <- rownames(intruders) <- 1:(n - 1)
colnames(extruders) <- 'extruders'
colnames(intruders) <- 'intruders'
if (log) {
cQ[cQ == 0] <- 0.9
cQ <- log(cQ)
}
p <- pheatmap::pheatmap(
cQ, cluster_rows = FALSE, cluster_cols = FALSE,
annotation_row = extruders, annotation_col = intruders,
silent = TRUE, show_rownames = FALSE, show_colnames = FALSE,
main = paste0('Collapsed co-ranking matrix of k-ary point neighbourhoods (log scale)')
)
p$gtable$layout[which(p$gtable$layout$name == 'row_annotation'), 1:5] <- c('t' = 4, 'l' = 4, 'b' = 4, 'r' = 5, 'z'= 4)
p$gtable$layout[which(p$gtable$layout$name == 'col_annotation'), 1:5] <- c('t' = 2, 'l' = 3, 'b' = 5, 'r' = 3, 'z'= 3)
p$gtable$grobs[[which(p$gtable$layout$name == 'row_annotation')]]$x <- p$gtable$grobs[[which(p$gtable$layout$name == 'row_annotation')]]$x - grid::unit(2.5, 'bigpts')
p$gtable$grobs[[which(p$gtable$layout$name == 'row_annotation')]]$width <- p$gtable$grobs[[which(p$gtable$layout$name == 'row_annotation')]]$width * 3
p$gtable$grobs[[which(p$gtable$layout$name == 'col_annotation')]]$y <- p$gtable$grobs[[which(p$gtable$layout$name == 'col_annotation')]]$y + grid::unit(20, 'bigpts')
p$gtable$grobs[[which(p$gtable$layout$name == 'col_annotation')]]$height <- p$gtable$grobs[[which(p$gtable$layout$name == 'col_annotation')]]$height * 3
p$gtable$grobs[[which(p$gtable$layout$name == 'annotation_legend')]]$children[[1]]$label <-
p$gtable$grobs[[which(p$gtable$layout$name == 'annotation_legend')]]$children[[5]]$label <- ''
for (idx in 1:8)
p$gtable$grobs[[which(p$gtable$layout$name == 'annotation_legend')]]$children[[idx]]$x <-
p$gtable$grobs[[which(p$gtable$layout$name == 'annotation_legend')]]$children[[idx]]$x + grid::unit(15, 'bigpts')
p$gtable$grobs[[which(p$gtable$layout$name == 'legend')]]$children[[1]]$x <-
p$gtable$grobs[[which(p$gtable$layout$name == 'legend')]]$children[[1]]$x + grid::unit(45, 'bigpts')
p$gtable$grobs[[which(p$gtable$layout$name == 'legend')]]$children[[2]]$x <-
p$gtable$grobs[[which(p$gtable$layout$name == 'legend')]]$children[[2]]$x + grid::unit(45, 'bigpts')
p$gtable$grobs[[which(p$gtable$layout$name == 'legend')]]$children[[1]]$y <-
p$gtable$grobs[[which(p$gtable$layout$name == 'legend')]]$children[[1]]$y - grid::unit(180, 'bigpts')
p$gtable$grobs[[which(p$gtable$layout$name == 'legend')]]$children[[2]]$y <-
p$gtable$grobs[[which(p$gtable$layout$name == 'legend')]]$children[[2]]$y - grid::unit(180, 'bigpts')
p$gtable$grobs[[which(p$gtable$layout$name == 'main')]]$y <-
p$gtable$grobs[[which(p$gtable$layout$name == 'main')]]$y - grid::unit(10, 'bigpts')
p$gtable$grobs[[which(p$gtable$layout$name == 'matrix')]]$children[[1]]$gp$col <-
p$gtable$grobs[[which(p$gtable$layout$name == 'matrix')]]$children[[1]]$gp$fill
p$gtable$grobs[[which(p$gtable$layout$name == 'row_annotation')]]$gp$col <-
p$gtable$grobs[[which(p$gtable$layout$name == 'row_annotation')]]$gp$fill
p$gtable$grobs[[which(p$gtable$layout$name == 'col_annotation')]]$gp$col <-
p$gtable$grobs[[which(p$gtable$layout$name == 'col_annotation')]]$gp$fill
p$gtable$grobs[[which(p$gtable$layout$name == 'col_annotation_names')]]$label <-
p$gtable$grobs[[which(p$gtable$layout$name == 'row_annotation_names')]]$label <- ''
p
}
|
/R/04_Visual_CoRanking.R
|
permissive
|
davnovak/SingleBench
|
R
| false
| false
| 5,297
|
r
|
#' Plot collapsed co-ranking matrix for heatmap for projection in a benchmark
#'
#' Creates plot showing collapsed co-ranking matrix of a dimension-reduction layout as a heatmap.
#'
#' @param benchmark an object of class \code{Benchmark}, as generated by the constructor \code{Benchmark} and evaluated using \code{Evaluate.Benchmark}
#' @param idx.subpipeline integer value: index of subpipeline that includes a projection step
#' @param idx.n_param optional integer value: index of subpipeline n-parameter iteration. Default value is \code{NULL}
#'
#' @export
PlotCoRanking <- function(
benchmark,
idx.subpipeline,
idx.n_param = NULL,
log = TRUE
) {
.PlotProjection.ValidityChecks(environment())
res <- GetCoRanking(benchmark, idx.subpipeline, idx.n_param)
Q <- res$Matrix
collapsed <- res$Collapsed
if (collapsed) {
PlotCollapsedCorankingMatrix(Q, log = log)
} else {
PlotFullCorankingMatrix(Q, res$K)
}
}
PlotFullCorankingMatrix <- function(Q, k) {
## Full (non-truncated) co-ranking matrix (joint histogram of rank
## errors for a lower-dimensional projection of data, versus original)
line_val <- max(Q) + 10
dQ <- rbind(Q[1:k, ], line_val, Q[(k+1):nrow(Q), ])
dQ <- cbind(dQ[, 1:k], line_val, dQ[, (k+1):nrow(Q)])
d <- log(dQ[, ncol(dQ):1])
image(d, axes = FALSE, main = 'Co-ranking matrix', sub = paste0('Min=', min(Q), ', Mean=', round(mean(Q), 2), ', Median=', median(Q),', Max=', max(Q)), col = hcl.colors(12, 'viridis', rev = TRUE))
}
PlotCollapsedCorankingMatrix <- function(cQ, log = TRUE) {
## Truncated (collapsed) co-ranking matrix (joint histogram of rank errors
## for a lower-dimensional projection of data, versus original). This version
## does not quantify the size of hard-k intrusions and extrusions
n <- nrow(cQ)
extruders <- data.frame(cQ[, n][-n])
intruders <- data.frame(cQ[n, ][-n])
cQ <- cQ[-n, -n]
colnames(cQ) <- rownames(cQ) <- rownames(extruders) <- rownames(intruders) <- 1:(n - 1)
colnames(extruders) <- 'extruders'
colnames(intruders) <- 'intruders'
if (log) {
cQ[cQ == 0] <- 0.9
cQ <- log(cQ)
}
p <- pheatmap::pheatmap(
cQ, cluster_rows = FALSE, cluster_cols = FALSE,
annotation_row = extruders, annotation_col = intruders,
silent = TRUE, show_rownames = FALSE, show_colnames = FALSE,
main = paste0('Collapsed co-ranking matrix of k-ary point neighbourhoods (log scale)')
)
p$gtable$layout[which(p$gtable$layout$name == 'row_annotation'), 1:5] <- c('t' = 4, 'l' = 4, 'b' = 4, 'r' = 5, 'z'= 4)
p$gtable$layout[which(p$gtable$layout$name == 'col_annotation'), 1:5] <- c('t' = 2, 'l' = 3, 'b' = 5, 'r' = 3, 'z'= 3)
p$gtable$grobs[[which(p$gtable$layout$name == 'row_annotation')]]$x <- p$gtable$grobs[[which(p$gtable$layout$name == 'row_annotation')]]$x - grid::unit(2.5, 'bigpts')
p$gtable$grobs[[which(p$gtable$layout$name == 'row_annotation')]]$width <- p$gtable$grobs[[which(p$gtable$layout$name == 'row_annotation')]]$width * 3
p$gtable$grobs[[which(p$gtable$layout$name == 'col_annotation')]]$y <- p$gtable$grobs[[which(p$gtable$layout$name == 'col_annotation')]]$y + grid::unit(20, 'bigpts')
p$gtable$grobs[[which(p$gtable$layout$name == 'col_annotation')]]$height <- p$gtable$grobs[[which(p$gtable$layout$name == 'col_annotation')]]$height * 3
p$gtable$grobs[[which(p$gtable$layout$name == 'annotation_legend')]]$children[[1]]$label <-
p$gtable$grobs[[which(p$gtable$layout$name == 'annotation_legend')]]$children[[5]]$label <- ''
for (idx in 1:8)
p$gtable$grobs[[which(p$gtable$layout$name == 'annotation_legend')]]$children[[idx]]$x <-
p$gtable$grobs[[which(p$gtable$layout$name == 'annotation_legend')]]$children[[idx]]$x + grid::unit(15, 'bigpts')
p$gtable$grobs[[which(p$gtable$layout$name == 'legend')]]$children[[1]]$x <-
p$gtable$grobs[[which(p$gtable$layout$name == 'legend')]]$children[[1]]$x + grid::unit(45, 'bigpts')
p$gtable$grobs[[which(p$gtable$layout$name == 'legend')]]$children[[2]]$x <-
p$gtable$grobs[[which(p$gtable$layout$name == 'legend')]]$children[[2]]$x + grid::unit(45, 'bigpts')
p$gtable$grobs[[which(p$gtable$layout$name == 'legend')]]$children[[1]]$y <-
p$gtable$grobs[[which(p$gtable$layout$name == 'legend')]]$children[[1]]$y - grid::unit(180, 'bigpts')
p$gtable$grobs[[which(p$gtable$layout$name == 'legend')]]$children[[2]]$y <-
p$gtable$grobs[[which(p$gtable$layout$name == 'legend')]]$children[[2]]$y - grid::unit(180, 'bigpts')
p$gtable$grobs[[which(p$gtable$layout$name == 'main')]]$y <-
p$gtable$grobs[[which(p$gtable$layout$name == 'main')]]$y - grid::unit(10, 'bigpts')
p$gtable$grobs[[which(p$gtable$layout$name == 'matrix')]]$children[[1]]$gp$col <-
p$gtable$grobs[[which(p$gtable$layout$name == 'matrix')]]$children[[1]]$gp$fill
p$gtable$grobs[[which(p$gtable$layout$name == 'row_annotation')]]$gp$col <-
p$gtable$grobs[[which(p$gtable$layout$name == 'row_annotation')]]$gp$fill
p$gtable$grobs[[which(p$gtable$layout$name == 'col_annotation')]]$gp$col <-
p$gtable$grobs[[which(p$gtable$layout$name == 'col_annotation')]]$gp$fill
p$gtable$grobs[[which(p$gtable$layout$name == 'col_annotation_names')]]$label <-
p$gtable$grobs[[which(p$gtable$layout$name == 'row_annotation_names')]]$label <- ''
p
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/misc.R
\name{domain_to_R}
\alias{domain_to_R}
\title{Rectangular Domain -> Unbounded Domain}
\usage{
domain_to_R(f, domain)
}
\arguments{
\item{f}{The function to wrap, should have a single vector-valued input.}
\item{domain}{A list of real tuples, indicating the original domain of the function.}
}
\value{
A function wrapping f.
}
\description{
Given an m dimensional function whose inputs live in bounded intervals [a1, b1], ..., [am, bm], return a wrapped version of the function whose inputs live in R^m. Transformed using the logit function.
}
|
/activegp/man/domain_to_R.Rd
|
no_license
|
akhikolla/InformationHouse
|
R
| false
| true
| 629
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/misc.R
\name{domain_to_R}
\alias{domain_to_R}
\title{Rectangular Domain -> Unbounded Domain}
\usage{
domain_to_R(f, domain)
}
\arguments{
\item{f}{The function to wrap, should have a single vector-valued input.}
\item{domain}{A list of real tuples, indicating the original domain of the function.}
}
\value{
A function wrapping f.
}
\description{
Given an m dimensional function whose inputs live in bounded intervals [a1, b1], ..., [am, bm], return a wrapped version of the function whose inputs live in R^m. Transformed using the logit function.
}
|
source.dir <- dirname(rstudioapi::getActiveDocumentContext()$path)
setwd(source.dir)
library(tm)
library(SnowballC)
library(dplyr)
library(hunspell)
library(ggplot2)
selected_cols <- c('text', 'favoriteCount', 'created', 'screenName', 'retweetCount')
john_tweets_df <- readRDS("../../Twitter Mining/John/john_house_tweets.rds") %>% select(selected_cols)
eunsik_tweets_df <- readRDS("../../Twitter Mining/Eunsik/sen_tweets.rds") %>% select(selected_cols)
all_tweets_df <- rbind(john_tweets_df, eunsik_tweets_df)
party_name_twit <- readRDS("all_twitter.rds")
names(party_name_twit) <- c("State", "Party", "screenName")
party_name_twit$screenName <- gsub("@", "", party_name_twit$screenName)
party_name_twit$screenName <- tolower(party_name_twit$screenName)
all_tweets_df$screenName <- tolower(all_tweets_df$screenName)
#tweets with Party column
tweets_party<-full_join(all_tweets_df, party_name_twit, by = c("screenName"))
#Republican tweets
tweets_party_rep<-filter(tweets_party, Party == "R")
#Democratic tweets
tweets_party_dem<-filter(tweets_party, Party == "D")
#Reacted tweets
react_rep_tweet <- na.omit(tweets_party_rep)
react_dem_tweet <- na.omit(tweets_party_dem)
#reaction rates (who posted about it)
react_rate_rep <- nrow(react_rep_tweet)/nrow(tweets_party_rep)
react_rate_dem <- nrow(react_dem_tweet)/nrow(tweets_party_dem)
#Likes per post
like_per_post_rep <- sum(react_rep_tweet$favoriteCount)/nrow(react_rep_tweet)
like_per_post_dem <- sum(react_dem_tweet$favoriteCount)/nrow(react_dem_tweet)
#Most famous post by party
famous_r<-which.max(react_rep_tweet$favoriteCount)
react_rep_tweet[famous_r,]$text
react_rep_tweet[famous_r,]$favoriteCount
react_rep_tweet[famous_r,]$State
#Most famous post by party
famous_d<-which.max(react_dem_tweet$favoriteCount)
react_dem_tweet[famous_d,]$text
react_dem_tweet[famous_d,]$favoriteCount
react_dem_tweet[famous_d,]$State
tx_tweets <- tweets_party[tweets_party$State=="Texas",]
tx_tweets_rep <-filter(tx_tweets, Party == "R")
tx_tweets_dem <-filter(tx_tweets, Party == "D")
react_tx_rep <- na.omit(tx_tweets_rep)
react_tx_dem <- na.omit(tx_tweets_dem)
nrow(react_tx_rep)/nrow(tx_tweets_rep)
nrow(react_tx_dem)/nrow(tx_tweets_dem)
#combine reacted tweets for both parties
react_tweet <- na.omit(tweets_party)
react_tweet_avgs <- react_tweet %>% group_by(State, Party) %>% summarise(fav_avg = mean(favoriteCount))
ggplot(react_tweet_avgs, aes(x = Party, y = log(fav_avg), fill = Party)) +
geom_bar(stat = "identity") + facet_wrap(~State) + ggtitle("Log of Average of Favorites by Party") +
scale_fill_manual(values = c('steelblue4', 'firebrick2')) + ylab("Average Favorites")
# jpeg("log_avg_favorite.jpg")
corpus_rep <- Corpus(VectorSource(iconv(react_rep_tweet$text, "latin1", "ASCII", sub="")))
skipWords <- function(x) removeWords(x, stopwords("english"))
funcs <- list(tolower, removePunctuation, removeNumbers, stripWhitespace, skipWords)
a <- tm_map(corpus_rep, FUN = tm_reduce, tmFuns = funcs)
a.dtm1 <- TermDocumentMatrix(a, control = list(wordLengths = c(3,10)))
findFreqTerms(a.dtm1, 10)
corpus_dem <- Corpus(VectorSource(iconv(react_dem_tweet$text, "latin1", "ASCII", sub="")))
skipWords <- function(x) removeWords(x, stopwords("english"))
funcs <- list(tolower, removePunctuation, removeNumbers, stripWhitespace, skipWords)
a <- tm_map(corpus_dem, FUN = tm_reduce, tmFuns = funcs)
a.dtm1 <- TermDocumentMatrix(a, control = list(wordLengths = c(3,10)))
findFreqTerms(a.dtm1, 10)
|
/Codes/Analysis/by_party.R
|
no_license
|
swannyy14/Sentiment-Analysis-on-Sutherland-Springs-Shooting-by-Party
|
R
| false
| false
| 3,472
|
r
|
source.dir <- dirname(rstudioapi::getActiveDocumentContext()$path)
setwd(source.dir)
library(tm)
library(SnowballC)
library(dplyr)
library(hunspell)
library(ggplot2)
selected_cols <- c('text', 'favoriteCount', 'created', 'screenName', 'retweetCount')
john_tweets_df <- readRDS("../../Twitter Mining/John/john_house_tweets.rds") %>% select(selected_cols)
eunsik_tweets_df <- readRDS("../../Twitter Mining/Eunsik/sen_tweets.rds") %>% select(selected_cols)
all_tweets_df <- rbind(john_tweets_df, eunsik_tweets_df)
party_name_twit <- readRDS("all_twitter.rds")
names(party_name_twit) <- c("State", "Party", "screenName")
party_name_twit$screenName <- gsub("@", "", party_name_twit$screenName)
party_name_twit$screenName <- tolower(party_name_twit$screenName)
all_tweets_df$screenName <- tolower(all_tweets_df$screenName)
#tweets with Party column
tweets_party<-full_join(all_tweets_df, party_name_twit, by = c("screenName"))
#Republican tweets
tweets_party_rep<-filter(tweets_party, Party == "R")
#Democratic tweets
tweets_party_dem<-filter(tweets_party, Party == "D")
#Reacted tweets
react_rep_tweet <- na.omit(tweets_party_rep)
react_dem_tweet <- na.omit(tweets_party_dem)
#reaction rates (who posted about it)
react_rate_rep <- nrow(react_rep_tweet)/nrow(tweets_party_rep)
react_rate_dem <- nrow(react_dem_tweet)/nrow(tweets_party_dem)
#Likes per post
like_per_post_rep <- sum(react_rep_tweet$favoriteCount)/nrow(react_rep_tweet)
like_per_post_dem <- sum(react_dem_tweet$favoriteCount)/nrow(react_dem_tweet)
#Most famous post by party
famous_r<-which.max(react_rep_tweet$favoriteCount)
react_rep_tweet[famous_r,]$text
react_rep_tweet[famous_r,]$favoriteCount
react_rep_tweet[famous_r,]$State
#Most famous post by party
famous_d<-which.max(react_dem_tweet$favoriteCount)
react_dem_tweet[famous_d,]$text
react_dem_tweet[famous_d,]$favoriteCount
react_dem_tweet[famous_d,]$State
tx_tweets <- tweets_party[tweets_party$State=="Texas",]
tx_tweets_rep <-filter(tx_tweets, Party == "R")
tx_tweets_dem <-filter(tx_tweets, Party == "D")
react_tx_rep <- na.omit(tx_tweets_rep)
react_tx_dem <- na.omit(tx_tweets_dem)
nrow(react_tx_rep)/nrow(tx_tweets_rep)
nrow(react_tx_dem)/nrow(tx_tweets_dem)
#combine reacted tweets for both parties
react_tweet <- na.omit(tweets_party)
react_tweet_avgs <- react_tweet %>% group_by(State, Party) %>% summarise(fav_avg = mean(favoriteCount))
ggplot(react_tweet_avgs, aes(x = Party, y = log(fav_avg), fill = Party)) +
geom_bar(stat = "identity") + facet_wrap(~State) + ggtitle("Log of Average of Favorites by Party") +
scale_fill_manual(values = c('steelblue4', 'firebrick2')) + ylab("Average Favorites")
# jpeg("log_avg_favorite.jpg")
corpus_rep <- Corpus(VectorSource(iconv(react_rep_tweet$text, "latin1", "ASCII", sub="")))
skipWords <- function(x) removeWords(x, stopwords("english"))
funcs <- list(tolower, removePunctuation, removeNumbers, stripWhitespace, skipWords)
a <- tm_map(corpus_rep, FUN = tm_reduce, tmFuns = funcs)
a.dtm1 <- TermDocumentMatrix(a, control = list(wordLengths = c(3,10)))
findFreqTerms(a.dtm1, 10)
corpus_dem <- Corpus(VectorSource(iconv(react_dem_tweet$text, "latin1", "ASCII", sub="")))
skipWords <- function(x) removeWords(x, stopwords("english"))
funcs <- list(tolower, removePunctuation, removeNumbers, stripWhitespace, skipWords)
a <- tm_map(corpus_dem, FUN = tm_reduce, tmFuns = funcs)
a.dtm1 <- TermDocumentMatrix(a, control = list(wordLengths = c(3,10)))
findFreqTerms(a.dtm1, 10)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mvcokm.param.R
\name{mvcokm.param}
\alias{mvcokm.param}
\title{Get model parameters in autoregressive cokriging models for multivarite output}
\usage{
mvcokm.param(obj)
}
\arguments{
\item{obj}{a \code{\link{mvcokm}} object construted via the function \code{\link{mvcokm}} in
this package}
}
\value{
a list of model parameters including regression coefficients \eqn{\beta},
scale discrepancy \eqn{\gamma}, variance parameters
\eqn{\sigma^2}, and correlation parameters \eqn{\phi} in covariance functions.
If nugget parameters are included in the model, then nugget parameters are shown in \eqn{\phi}.
}
\description{
This function computes estimates for regression and variance parameters
given the correlation parameters are known. It is used to show all model
parameters in one place.
}
\seealso{
\code{\link{mvcokm}}, \code{\link{mvcokm.fit}}, \code{\link{mvcokm.predict}}, \code{\link{ARCokrig}}
}
\author{
Pulong Ma <mpulong@gmail.com>
}
|
/fuzzedpackages/ARCokrig/man/mvcokm.param.Rd
|
no_license
|
akhikolla/testpackages
|
R
| false
| true
| 1,025
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mvcokm.param.R
\name{mvcokm.param}
\alias{mvcokm.param}
\title{Get model parameters in autoregressive cokriging models for multivarite output}
\usage{
mvcokm.param(obj)
}
\arguments{
\item{obj}{a \code{\link{mvcokm}} object construted via the function \code{\link{mvcokm}} in
this package}
}
\value{
a list of model parameters including regression coefficients \eqn{\beta},
scale discrepancy \eqn{\gamma}, variance parameters
\eqn{\sigma^2}, and correlation parameters \eqn{\phi} in covariance functions.
If nugget parameters are included in the model, then nugget parameters are shown in \eqn{\phi}.
}
\description{
This function computes estimates for regression and variance parameters
given the correlation parameters are known. It is used to show all model
parameters in one place.
}
\seealso{
\code{\link{mvcokm}}, \code{\link{mvcokm.fit}}, \code{\link{mvcokm.predict}}, \code{\link{ARCokrig}}
}
\author{
Pulong Ma <mpulong@gmail.com>
}
|
home_dir <- "~/Desktop/compass/resource/matrices/"
out_dir <- "~/Desktop/compass/resource/plots/"
plotting_flag <- TRUE
library(PharmacoGx)
library(Cairo)
library(devtools)
library(readxl)
library(openxlsx)
options(stringsAsFactors = FALSE)
identifiers <- read_xlsx("~/Desktop/compass/resource/List of COMPASS PDO Identifiers.xlsx", sheet=1)
identifiers$`Study ID` <- sub("^(.{4})", "\\1_", identifiers$`Study ID`)
ids <- identifiers$`Study ID`
names(ids) <- identifiers$`Model ID`
drugDoseResponseCurve2 <- function(drug, cellline, conc, viability, aac, nreplicates, nconcentration, plate) {
doses <- list(); responses <- list(); legend.values <- list(); j <- 0; pSetIndex <- list()
doses[[1]] <- conc
responses[[1]] <- viability
dose.range <- c(10^100 , 0)
viability.range <- c(0 , 10)
for(i in 1:length(doses)) {
dose.range <- c(min(dose.range[1], min(doses[[i]], na.rm=TRUE), na.rm=TRUE),
max(dose.range[2], max(doses[[i]], na.rm=TRUE), na.rm=TRUE))
viability.range <- c(0, max(viability.range[2], max(responses[[i]], na.rm=TRUE), na.rm=TRUE))
}
x1 <- 10 ^ 10; x2 <- 0
plot(NA, xlab="Concentration (uM)", ylab="% Viability", axes =FALSE,
main=sprintf("%s, %s (%s) treated with %s", cellline, plate, ids[cellline], drug),
log="x", ylim=c(0,115), xlim=dose.range, cex=1, cex.main=1,
sub=paste0("#doses = ", nconcentration, ", #replicates = ", nreplicates))
magicaxis::magaxis(side=1:2, frame.plot=TRUE, tcl=-.3, majorn=c(5,3), minorn=c(5,2))
if (length(doses) > 1) {
rect(xleft=x1, xright=x2, ybottom=viability.range[1] , ytop=viability.range[2] ,
col=rgb(240, 240, 240, maxColorValue = 255), border=FALSE)
}
points(doses[[1]],responses[[1]], col="black", pch=20)
log_logistic_params <- PharmacoGx::logLogisticRegression(conc = doses[[i]], viability = responses[[i]])
log10_x_vals <- PharmacoGx:::.GetSupportVec(log10(doses[[i]]))
lines(10 ^ log10_x_vals, PharmacoGx:::.Hill(log10_x_vals, pars=c(log_logistic_params$HS,
log_logistic_params$E_inf/100, log10(log_logistic_params$EC50))) * 100 ,lty=1, lwd=2, col="red")
legend("bottomleft", legend=paste0("AAC = ", format(round(aac, 4), nsmall = 4)), bty="n")
abline(h=50, lty=2, lwd=0.5, col="grey")
abline(h=100, lty=2, lwd=0.5, col="grey")
}
options(stringsAsFactors = FALSE)
all_files <- list.files(home_dir)
all_files <- sub('.txt', '', all_files)
aac_output <- c()
for(idx in 1:length(all_files)) {
buff <- unlist(strsplit(all_files[idx], "__"))
drug_name <- buff[1]
cell_line <- buff[2]
plate_number <- buff[3]
header_scan <- scan(paste0(home_dir, all_files[idx], ".txt"), nlines = 8, what = character(), sep="\n")
headers <- data.frame(strsplit(header_scan, ":"))[2,]
names(headers) <- gsub("#", "", data.frame(strsplit(header_scan, ":"))[1,])
print(headers$sampleID)
# modify
untreated_control <- as.numeric(headers[6])
dose <- as.numeric(headers[7])
fold <- as.numeric(headers[8])
matrix <- read.table(paste0(home_dir, all_files[idx], ".txt"), sep="\t")
no_of_replicates <- ncol(matrix)
no_of_concentrations <- nrow(matrix)
replicate <- (matrix/untreated_control) * 100
dose_scale <- dose
dose_temp <- dose
for (s in 1:(no_of_concentrations-1)) {
x <- dose_temp
y <- x/fold
dose_scale <- c(dose_scale, y)
dose_temp <- y
}
input <- data.frame(concentration = rep(dose_scale, times=3), viability = unlist(replicate))
aac <- computeAUC(concentration = input$concentration, viability = input$viability, viability_as_pct = TRUE, verbose = F) / 100
aac_output <- rbind(aac_output, c(drug_name, cell_line, plate_number, no_of_concentrations, no_of_replicates, ids[cell_line], aac))
if(plotting_flag) {
png_file_name = paste0(out_dir, drug_name, "__", cell_line, "__", plate_number, "__", ids[cell_line], ".png")
Cairo(width = 1000, height = 800, file = png_file_name, type = "png", bg = "white", canvas = "white", units = "px", dpi = 200)
drugDoseResponseCurve2(drug = drug_name, cellline = cell_line, conc = input$concentration, viability = input$viability,
aac = aac, nreplicates = no_of_replicates, nconcentration = no_of_concentrations, plate = plate_number)
dev.off()
}
}
colnames(aac_output) <- c("Drug", "Sample", "Plate", "Doses", "Replicates", "Patient", "AAC")
write.table(aac_output, file=paste0(out_dir, "AAC_profiles.txt"), row.names=F, col.names=T, sep="\t", quote=F)
q("no")
|
/mono_therapy/drug_response_curves/mono_curves.R
|
no_license
|
anthfm/PDAC_DrugResponse
|
R
| false
| false
| 4,584
|
r
|
home_dir <- "~/Desktop/compass/resource/matrices/"
out_dir <- "~/Desktop/compass/resource/plots/"
plotting_flag <- TRUE
library(PharmacoGx)
library(Cairo)
library(devtools)
library(readxl)
library(openxlsx)
options(stringsAsFactors = FALSE)
identifiers <- read_xlsx("~/Desktop/compass/resource/List of COMPASS PDO Identifiers.xlsx", sheet=1)
identifiers$`Study ID` <- sub("^(.{4})", "\\1_", identifiers$`Study ID`)
ids <- identifiers$`Study ID`
names(ids) <- identifiers$`Model ID`
drugDoseResponseCurve2 <- function(drug, cellline, conc, viability, aac, nreplicates, nconcentration, plate) {
doses <- list(); responses <- list(); legend.values <- list(); j <- 0; pSetIndex <- list()
doses[[1]] <- conc
responses[[1]] <- viability
dose.range <- c(10^100 , 0)
viability.range <- c(0 , 10)
for(i in 1:length(doses)) {
dose.range <- c(min(dose.range[1], min(doses[[i]], na.rm=TRUE), na.rm=TRUE),
max(dose.range[2], max(doses[[i]], na.rm=TRUE), na.rm=TRUE))
viability.range <- c(0, max(viability.range[2], max(responses[[i]], na.rm=TRUE), na.rm=TRUE))
}
x1 <- 10 ^ 10; x2 <- 0
plot(NA, xlab="Concentration (uM)", ylab="% Viability", axes =FALSE,
main=sprintf("%s, %s (%s) treated with %s", cellline, plate, ids[cellline], drug),
log="x", ylim=c(0,115), xlim=dose.range, cex=1, cex.main=1,
sub=paste0("#doses = ", nconcentration, ", #replicates = ", nreplicates))
magicaxis::magaxis(side=1:2, frame.plot=TRUE, tcl=-.3, majorn=c(5,3), minorn=c(5,2))
if (length(doses) > 1) {
rect(xleft=x1, xright=x2, ybottom=viability.range[1] , ytop=viability.range[2] ,
col=rgb(240, 240, 240, maxColorValue = 255), border=FALSE)
}
points(doses[[1]],responses[[1]], col="black", pch=20)
log_logistic_params <- PharmacoGx::logLogisticRegression(conc = doses[[i]], viability = responses[[i]])
log10_x_vals <- PharmacoGx:::.GetSupportVec(log10(doses[[i]]))
lines(10 ^ log10_x_vals, PharmacoGx:::.Hill(log10_x_vals, pars=c(log_logistic_params$HS,
log_logistic_params$E_inf/100, log10(log_logistic_params$EC50))) * 100 ,lty=1, lwd=2, col="red")
legend("bottomleft", legend=paste0("AAC = ", format(round(aac, 4), nsmall = 4)), bty="n")
abline(h=50, lty=2, lwd=0.5, col="grey")
abline(h=100, lty=2, lwd=0.5, col="grey")
}
options(stringsAsFactors = FALSE)
all_files <- list.files(home_dir)
all_files <- sub('.txt', '', all_files)
aac_output <- c()
for(idx in 1:length(all_files)) {
buff <- unlist(strsplit(all_files[idx], "__"))
drug_name <- buff[1]
cell_line <- buff[2]
plate_number <- buff[3]
header_scan <- scan(paste0(home_dir, all_files[idx], ".txt"), nlines = 8, what = character(), sep="\n")
headers <- data.frame(strsplit(header_scan, ":"))[2,]
names(headers) <- gsub("#", "", data.frame(strsplit(header_scan, ":"))[1,])
print(headers$sampleID)
# modify
untreated_control <- as.numeric(headers[6])
dose <- as.numeric(headers[7])
fold <- as.numeric(headers[8])
matrix <- read.table(paste0(home_dir, all_files[idx], ".txt"), sep="\t")
no_of_replicates <- ncol(matrix)
no_of_concentrations <- nrow(matrix)
replicate <- (matrix/untreated_control) * 100
dose_scale <- dose
dose_temp <- dose
for (s in 1:(no_of_concentrations-1)) {
x <- dose_temp
y <- x/fold
dose_scale <- c(dose_scale, y)
dose_temp <- y
}
input <- data.frame(concentration = rep(dose_scale, times=3), viability = unlist(replicate))
aac <- computeAUC(concentration = input$concentration, viability = input$viability, viability_as_pct = TRUE, verbose = F) / 100
aac_output <- rbind(aac_output, c(drug_name, cell_line, plate_number, no_of_concentrations, no_of_replicates, ids[cell_line], aac))
if(plotting_flag) {
png_file_name = paste0(out_dir, drug_name, "__", cell_line, "__", plate_number, "__", ids[cell_line], ".png")
Cairo(width = 1000, height = 800, file = png_file_name, type = "png", bg = "white", canvas = "white", units = "px", dpi = 200)
drugDoseResponseCurve2(drug = drug_name, cellline = cell_line, conc = input$concentration, viability = input$viability,
aac = aac, nreplicates = no_of_replicates, nconcentration = no_of_concentrations, plate = plate_number)
dev.off()
}
}
colnames(aac_output) <- c("Drug", "Sample", "Plate", "Doses", "Replicates", "Patient", "AAC")
write.table(aac_output, file=paste0(out_dir, "AAC_profiles.txt"), row.names=F, col.names=T, sep="\t", quote=F)
q("no")
|
#' wrapper f to construct phage annotation counts for sample
#' while keeping sample name.
#' @param contig-cdd file
#' @param name of sample
#' @param cdd-annotation datafram
#' @return df with 3 columns: sample_name, annotation, count
annotation_counts_for_sample <- function(contig_file, sample_name, cdd_annotation) {
contig_cdd <- read_contig_cdd(contig_file)
contig_cdd_annotation <- add_annotation_to_cdd(contig_cdd, cdd_annotation)
annotation_count <- cdd_annotation_counts(contig_cdd_annotation)
annotation_count$sample_name <- sample_name
annotation_count
}
|
/annotation_count_sample.R
|
no_license
|
anatolydryga/CDD_cluster
|
R
| false
| false
| 589
|
r
|
#' wrapper f to construct phage annotation counts for sample
#' while keeping sample name.
#' @param contig-cdd file
#' @param name of sample
#' @param cdd-annotation datafram
#' @return df with 3 columns: sample_name, annotation, count
annotation_counts_for_sample <- function(contig_file, sample_name, cdd_annotation) {
contig_cdd <- read_contig_cdd(contig_file)
contig_cdd_annotation <- add_annotation_to_cdd(contig_cdd, cdd_annotation)
annotation_count <- cdd_annotation_counts(contig_cdd_annotation)
annotation_count$sample_name <- sample_name
annotation_count
}
|
context("knitr")
test_that("An R Markdown document can be rendered using reticulate", {
skip_on_cran()
skip_on_os("windows")
skip_if_no_python()
skip_if_not_installed("rmarkdown")
modules <- c("numpy", "matplotlib")
for (module in modules) {
if (!py_module_available(module)) {
fmt <- "module '%s' not available; skipping"
skip(sprintf(fmt, module))
}
}
owd <- setwd("resources")
status <- rmarkdown::render("eng-reticulate-example.Rmd", quiet = TRUE)
setwd(owd)
expect_true(file.exists(status), "example.Rmd rendered successfully")
})
|
/tests/testthat/test-python-knitr-engine.R
|
permissive
|
24sharkS/reticulate
|
R
| false
| false
| 584
|
r
|
context("knitr")
test_that("An R Markdown document can be rendered using reticulate", {
skip_on_cran()
skip_on_os("windows")
skip_if_no_python()
skip_if_not_installed("rmarkdown")
modules <- c("numpy", "matplotlib")
for (module in modules) {
if (!py_module_available(module)) {
fmt <- "module '%s' not available; skipping"
skip(sprintf(fmt, module))
}
}
owd <- setwd("resources")
status <- rmarkdown::render("eng-reticulate-example.Rmd", quiet = TRUE)
setwd(owd)
expect_true(file.exists(status), "example.Rmd rendered successfully")
})
|
hyper_test <- function (
pathway_gene,
gezogene,
alle_gene,
more = T,
unique = T)
{
if (unique == T) {
pathway_gene <- unique(pathway_gene)
gezogene <- unique(gezogene)
alle_gene <- unique(alle_gene)
}
pathway_gene_inbg = pathway_gene[pathway_gene %in% alle_gene]
message(
"Schraenke 'pathway_gene' auf 'alle_gene' ein: Using ",
length(pathway_gene_inbg),
" instead of ",
length(pathway_gene)
)
pathway_gene = pathway_gene_inbg
gezogene_inbg = gezogene[gezogene %in% alle_gene]
message(
"Schraenke 'gezogene' auf 'alle_gene' ein: Using ",
length(gezogene_inbg),
" instead of ",
length(gezogene)
)
gezogene = gezogene_inbg
if (all(pathway_gene %in% alle_gene) == F)
stop("nicht alle 'pathway_gene' (alle weissen Kugeln) in 'alle_gene' (der alle_gene)")
if (all(gezogene %in% alle_gene) == F)
stop("nicht alle 'gezogene' (alle gezogenen Kugeln) in 'alle_gene' (der alle_gene)")
if (any(is.na(pathway_gene),
is.na(pathway_gene),
is.na(pathway_gene)) ==
T)
stop("NA in den daten versteckt!")
n_inPathway_inGezogen <- sum(pathway_gene %in% gezogene)
n_inPathway_notGezogen <- length(pathway_gene) - n_inPathway_inGezogen
n_notPathway_inGezogen <- length(gezogene) - n_inPathway_inGezogen
n_notPathway_notGezogen <- length(alle_gene) - n_notPathway_inGezogen - n_inPathway_notGezogen - n_inPathway_inGezogen
pval = stats::phyper(
n_inPathway_inGezogen - 1,
n_inPathway_inGezogen + n_inPathway_notGezogen,
n_notPathway_inGezogen + n_notPathway_notGezogen,
n_inPathway_inGezogen + n_notPathway_inGezogen,
lower.tail = !more
)
in_gezogen <- round(
(
n_inPathway_inGezogen / (n_inPathway_inGezogen + n_notPathway_inGezogen)
) * 100, 3)
in_bk <- round(
(
(
n_inPathway_inGezogen + n_inPathway_notGezogen) / (
n_inPathway_inGezogen + n_inPathway_notGezogen + n_notPathway_inGezogen + n_notPathway_notGezogen
)
) * 100, 3)
enr <- round(in_gezogen / in_bk, 3)
mymatrix = matrix(
c(
n_inPathway_inGezogen,
n_inPathway_notGezogen,
n_notPathway_inGezogen,
n_notPathway_notGezogen
),
nrow = 2
)
or = stats::fisher.test(mymatrix)
pvalfisher = or$p.value
message1 = paste(
in_gezogen,
"% vs. ",
in_bk,
"% Enrichment:",
enr,
"OR (95%CI) =",
signif(or$estimate, 3),
paste0(
"(",
signif(or$conf.int[1], 3), "-", signif(or$conf.int[2]),
")"),
sep = " "
)
message2 = paste("p hypergeomtrisch=",
signif(pval, 3),
"p fisher",
signif(pvalfisher, 3))
message3 = paste(
n_inPathway_inGezogen,
"in",
n_inPathway_inGezogen + n_notPathway_inGezogen,
"gezogenen vs.",
n_inPathway_inGezogen +
n_inPathway_notGezogen,
"in",
n_inPathway_inGezogen + n_inPathway_notGezogen + n_notPathway_inGezogen + n_notPathway_notGezogen,
"(grundgesamtheit)",
sep = " "
)
message(message1)
message(message2)
message(message3)
res = list(
in_gezogen = in_gezogen,
in_bk = in_bk,
enrichment = enr,
pval = pval,
pval_fisher = pvalfisher,
or = or$estimate,
or_lower = or$conf.int[1],
or_upper = or$conf.int[2],
matrix = mymatrix,
messages = c(message1,
message2,
message3),
compactresult = data.frame(
in_gezogen = in_gezogen,
in_bk = in_bk,
enrichment = enr,
pval = pval,
pval_fisher = pvalfisher,
or = or$estimate,
or_lower = or$conf.int[1],
or_upper = or$conf.int[2],
Bes_Gez = mymatrix[1],
Bes_nichtGez = mymatrix[3],
nichtBes_Gez = mymatrix[2],
nichtBes_nichtGez = mymatrix[4],
matrix = paste(mymatrix, collapse = ", "),
row.names = NULL
)
)
res
}
|
/hyper_test.R
|
no_license
|
cfbeuchel/imise_functions
|
R
| false
| false
| 4,015
|
r
|
hyper_test <- function (
pathway_gene,
gezogene,
alle_gene,
more = T,
unique = T)
{
if (unique == T) {
pathway_gene <- unique(pathway_gene)
gezogene <- unique(gezogene)
alle_gene <- unique(alle_gene)
}
pathway_gene_inbg = pathway_gene[pathway_gene %in% alle_gene]
message(
"Schraenke 'pathway_gene' auf 'alle_gene' ein: Using ",
length(pathway_gene_inbg),
" instead of ",
length(pathway_gene)
)
pathway_gene = pathway_gene_inbg
gezogene_inbg = gezogene[gezogene %in% alle_gene]
message(
"Schraenke 'gezogene' auf 'alle_gene' ein: Using ",
length(gezogene_inbg),
" instead of ",
length(gezogene)
)
gezogene = gezogene_inbg
if (all(pathway_gene %in% alle_gene) == F)
stop("nicht alle 'pathway_gene' (alle weissen Kugeln) in 'alle_gene' (der alle_gene)")
if (all(gezogene %in% alle_gene) == F)
stop("nicht alle 'gezogene' (alle gezogenen Kugeln) in 'alle_gene' (der alle_gene)")
if (any(is.na(pathway_gene),
is.na(pathway_gene),
is.na(pathway_gene)) ==
T)
stop("NA in den daten versteckt!")
n_inPathway_inGezogen <- sum(pathway_gene %in% gezogene)
n_inPathway_notGezogen <- length(pathway_gene) - n_inPathway_inGezogen
n_notPathway_inGezogen <- length(gezogene) - n_inPathway_inGezogen
n_notPathway_notGezogen <- length(alle_gene) - n_notPathway_inGezogen - n_inPathway_notGezogen - n_inPathway_inGezogen
pval = stats::phyper(
n_inPathway_inGezogen - 1,
n_inPathway_inGezogen + n_inPathway_notGezogen,
n_notPathway_inGezogen + n_notPathway_notGezogen,
n_inPathway_inGezogen + n_notPathway_inGezogen,
lower.tail = !more
)
in_gezogen <- round(
(
n_inPathway_inGezogen / (n_inPathway_inGezogen + n_notPathway_inGezogen)
) * 100, 3)
in_bk <- round(
(
(
n_inPathway_inGezogen + n_inPathway_notGezogen) / (
n_inPathway_inGezogen + n_inPathway_notGezogen + n_notPathway_inGezogen + n_notPathway_notGezogen
)
) * 100, 3)
enr <- round(in_gezogen / in_bk, 3)
mymatrix = matrix(
c(
n_inPathway_inGezogen,
n_inPathway_notGezogen,
n_notPathway_inGezogen,
n_notPathway_notGezogen
),
nrow = 2
)
or = stats::fisher.test(mymatrix)
pvalfisher = or$p.value
message1 = paste(
in_gezogen,
"% vs. ",
in_bk,
"% Enrichment:",
enr,
"OR (95%CI) =",
signif(or$estimate, 3),
paste0(
"(",
signif(or$conf.int[1], 3), "-", signif(or$conf.int[2]),
")"),
sep = " "
)
message2 = paste("p hypergeomtrisch=",
signif(pval, 3),
"p fisher",
signif(pvalfisher, 3))
message3 = paste(
n_inPathway_inGezogen,
"in",
n_inPathway_inGezogen + n_notPathway_inGezogen,
"gezogenen vs.",
n_inPathway_inGezogen +
n_inPathway_notGezogen,
"in",
n_inPathway_inGezogen + n_inPathway_notGezogen + n_notPathway_inGezogen + n_notPathway_notGezogen,
"(grundgesamtheit)",
sep = " "
)
message(message1)
message(message2)
message(message3)
res = list(
in_gezogen = in_gezogen,
in_bk = in_bk,
enrichment = enr,
pval = pval,
pval_fisher = pvalfisher,
or = or$estimate,
or_lower = or$conf.int[1],
or_upper = or$conf.int[2],
matrix = mymatrix,
messages = c(message1,
message2,
message3),
compactresult = data.frame(
in_gezogen = in_gezogen,
in_bk = in_bk,
enrichment = enr,
pval = pval,
pval_fisher = pvalfisher,
or = or$estimate,
or_lower = or$conf.int[1],
or_upper = or$conf.int[2],
Bes_Gez = mymatrix[1],
Bes_nichtGez = mymatrix[3],
nichtBes_Gez = mymatrix[2],
nichtBes_nichtGez = mymatrix[4],
matrix = paste(mymatrix, collapse = ", "),
row.names = NULL
)
)
res
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plotCNV.R
\name{.selectRange.2}
\alias{.selectRange.2}
\title{Subset genomic ranges from multiple samples' segment-level SCNA data}
\usage{
.selectRange.2(
segDf,
chr = NULL,
start = NULL,
end = NULL,
genomeVersion = "hg38"
)
}
\arguments{
\item{segDf}{segment-level SCNA data frame containing one or multiple samples. This is usually the same input as GISTIC2.
Make sure there are these columns: "sample", "chromosome", "start", "end", "log2".}
\item{chr}{a character vector specifying which chromosome(s) to select}
\item{start}{a integer vector specifying the start positions of each chromosome.
If NULL, the position will be the begining of the chromosome(s). Default is NULL.}
\item{end}{a integer vector specifying the end positions of each chromosome.
If NULL, the position will be the end of the chromosome(s). Default is NULL.}
\item{genomeVersion}{which genomeVersion are you working on? Can be either 'hg38' and 'hg19'. Default is 'hg38'.}
}
\value{
a data frame representing the subsetted segment-level SCNA data from the input.
}
\description{
Subset genomic ranges from multiple samples' segment-level SCNA data
}
|
/man/dot-selectRange.2.Rd
|
no_license
|
bzhanglab/genomicWidgets
|
R
| false
| true
| 1,220
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plotCNV.R
\name{.selectRange.2}
\alias{.selectRange.2}
\title{Subset genomic ranges from multiple samples' segment-level SCNA data}
\usage{
.selectRange.2(
segDf,
chr = NULL,
start = NULL,
end = NULL,
genomeVersion = "hg38"
)
}
\arguments{
\item{segDf}{segment-level SCNA data frame containing one or multiple samples. This is usually the same input as GISTIC2.
Make sure there are these columns: "sample", "chromosome", "start", "end", "log2".}
\item{chr}{a character vector specifying which chromosome(s) to select}
\item{start}{a integer vector specifying the start positions of each chromosome.
If NULL, the position will be the begining of the chromosome(s). Default is NULL.}
\item{end}{a integer vector specifying the end positions of each chromosome.
If NULL, the position will be the end of the chromosome(s). Default is NULL.}
\item{genomeVersion}{which genomeVersion are you working on? Can be either 'hg38' and 'hg19'. Default is 'hg38'.}
}
\value{
a data frame representing the subsetted segment-level SCNA data from the input.
}
\description{
Subset genomic ranges from multiple samples' segment-level SCNA data
}
|
## Checking the availability of required packages & loading the packages
dplyr <- "dplyr" %in% rownames(installed.packages())
if(dplyr == FALSE){
install.packages("dplyr")
}
library(dplyr)
## Checking the availability of the folder
fileUrl <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
zipName <- "projectDataset.zip"
folderName <- "UCI HAR Dataset"
if(!file.exists(folderName)) {
download.file(fileUrl, zipName)
unzip(zipName)
}
## Reading the files
features <- read.table("UCI HAR Dataset/features.txt", col.names = c("i", "feature"))
activity_labels <- read.table("UCI HAR Dataset/activity_labels.txt", col.names = c("index", "activity"))
subject_test <- read.table("UCI HAR Dataset/test/subject_test.txt", col.names = "subject")
x_test <- read.table("UCI HAR Dataset/test/X_test.txt", col.names = features$feature)
y_test <- read.table("UCI HAR Dataset/test/y_test.txt", col.names = "index")
subject_train <- read.table("UCI HAR Dataset/train/subject_train.txt", col.names = "subject")
x_train <- read.table("UCI HAR Dataset/train/X_train.txt", col.names = features$feature)
y_train <- read.table("UCI HAR Dataset/train/y_train.txt", col.names = "index")
## Merging test and training datasets into one
subject <- rbind(subject_test, subject_train)
y <- rbind(y_test, y_train)
x <- rbind(x_test, x_train)
complete_dataset <- cbind(subject, y, x)
## Extracting measurements on mean and standard deviation
tidy <- select(complete_dataset, subject, index, contains("mean"), contains("std") )
## Describing activity names
tidy$index <- factor(tidy$index,labels = activity_labels$activity)
## Renaming the variables so that they are more descriptive
names(tidy) <- gsub("BodyBody", "Body", names(tidy), ignore.case = T)
names(tidy) <- gsub("freq", "Frequency", names(tidy), ignore.case = T)
names(tidy) <- gsub("std", "Standard.Deviation", names(tidy), ignore.case = T)
names(tidy) <- gsub("mag", "Magnitude", names(tidy), ignore.case = T)
names(tidy) <- gsub("gyro", "Gyroscope", names(tidy), ignore.case = T)
names(tidy) <- gsub("acc", "Accelerometer", names(tidy), ignore.case = T)
names(tidy) <- gsub("^angle", "Angle", names(tidy), ignore.case = T)
names(tidy) <- gsub("gravity", "Gravity", names(tidy), ignore.case = T)
names(tidy) <- gsub("mean", "Mean", names(tidy), ignore.case = T)
names(tidy) <- gsub("^Angle.t", "Angle.Time", names(tidy))
names(tidy) <- gsub("^f", "Frequency", names(tidy))
names(tidy) <- gsub("^t", "Time", names(tidy))
names(tidy)[1] <- "Subject"
names(tidy)[2] <- "Activity"
## Placing dots between every word in the variable name
names(tidy) <- gsub("\\.", "", names(tidy)) ## removes all dots
names(tidy) <- gsub("(?!^)(?=[[:upper:]])", ".", names(tidy), perl=T) ## adds new dots
## Creating the averaged database grouped by activity & subject
summaryDB <- group_by(tidy, Subject, Activity)
final <- summarise_all(summaryDB, mean)
write.table(final, "final.txt", row.name=FALSE)
|
/run_analysis.R
|
no_license
|
AhmadAbdulhameed/Getting-and-Cleaning-Data-Project
|
R
| false
| false
| 3,112
|
r
|
## Checking the availability of required packages & loading the packages
dplyr <- "dplyr" %in% rownames(installed.packages())
if(dplyr == FALSE){
install.packages("dplyr")
}
library(dplyr)
## Checking the availability of the folder
fileUrl <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
zipName <- "projectDataset.zip"
folderName <- "UCI HAR Dataset"
if(!file.exists(folderName)) {
download.file(fileUrl, zipName)
unzip(zipName)
}
## Reading the files
features <- read.table("UCI HAR Dataset/features.txt", col.names = c("i", "feature"))
activity_labels <- read.table("UCI HAR Dataset/activity_labels.txt", col.names = c("index", "activity"))
subject_test <- read.table("UCI HAR Dataset/test/subject_test.txt", col.names = "subject")
x_test <- read.table("UCI HAR Dataset/test/X_test.txt", col.names = features$feature)
y_test <- read.table("UCI HAR Dataset/test/y_test.txt", col.names = "index")
subject_train <- read.table("UCI HAR Dataset/train/subject_train.txt", col.names = "subject")
x_train <- read.table("UCI HAR Dataset/train/X_train.txt", col.names = features$feature)
y_train <- read.table("UCI HAR Dataset/train/y_train.txt", col.names = "index")
## Merging test and training datasets into one
subject <- rbind(subject_test, subject_train)
y <- rbind(y_test, y_train)
x <- rbind(x_test, x_train)
complete_dataset <- cbind(subject, y, x)
## Extracting measurements on mean and standard deviation
tidy <- select(complete_dataset, subject, index, contains("mean"), contains("std") )
## Describing activity names
tidy$index <- factor(tidy$index,labels = activity_labels$activity)
## Renaming the variables so that they are more descriptive
names(tidy) <- gsub("BodyBody", "Body", names(tidy), ignore.case = T)
names(tidy) <- gsub("freq", "Frequency", names(tidy), ignore.case = T)
names(tidy) <- gsub("std", "Standard.Deviation", names(tidy), ignore.case = T)
names(tidy) <- gsub("mag", "Magnitude", names(tidy), ignore.case = T)
names(tidy) <- gsub("gyro", "Gyroscope", names(tidy), ignore.case = T)
names(tidy) <- gsub("acc", "Accelerometer", names(tidy), ignore.case = T)
names(tidy) <- gsub("^angle", "Angle", names(tidy), ignore.case = T)
names(tidy) <- gsub("gravity", "Gravity", names(tidy), ignore.case = T)
names(tidy) <- gsub("mean", "Mean", names(tidy), ignore.case = T)
names(tidy) <- gsub("^Angle.t", "Angle.Time", names(tidy))
names(tidy) <- gsub("^f", "Frequency", names(tidy))
names(tidy) <- gsub("^t", "Time", names(tidy))
names(tidy)[1] <- "Subject"
names(tidy)[2] <- "Activity"
## Placing dots between every word in the variable name
names(tidy) <- gsub("\\.", "", names(tidy)) ## removes all dots
names(tidy) <- gsub("(?!^)(?=[[:upper:]])", ".", names(tidy), perl=T) ## adds new dots
## Creating the averaged database grouped by activity & subject
summaryDB <- group_by(tidy, Subject, Activity)
final <- summarise_all(summaryDB, mean)
write.table(final, "final.txt", row.name=FALSE)
|
rm(list=ls())
library(igraph)
human.pin <- read.csv("../../human.pin.csv", header=T, stringsAsFactors = F)
geneA <- human.pin$geneA
geneB <- human.pin$geneB
#human.pin <- read.csv("../../../ms02star/human/ms02.1.csv", header=T, stringsAsFactors=F)
#geneA <- human.pin$id1
#geneB <- human.pin$id2
#All genes appear in PIN
all.list <- unique(c(geneA, geneB))
#the Gene list
geneset <- read.csv("HALLMARK_INFLAMMATORY_RESPONSE.csv", header=T, stringsAsFactors = F)
gene.list <- geneset$gene
go.file <- read.csv("../../human.all.go.csv", header=T, stringsAsFactors = F)
go.id <- go.file$goid
go.gene <- go.file$gene
go.type <- c("bp", "cc", "mf")
record <- c()
#loop1
for (i in 1:3) {
gofile.name <- paste("HMK.", go.type[i], ".new.txt", sep="")
enriched <- read.csv(gofile.name, sep="\t", header=T, stringsAsFactors=F)
#loop2
for (k in 1:10) {
title <- paste(go.type[i], ".", k, sep="")
term <- enriched$GO.ID[k]
term.list <- go.gene[which(go.id %in% term)]
neiborA <- geneA[which(geneB %in% gene.list)]
subA <- geneB[which(geneB %in% gene.list)]
neiborB <- geneB[which(geneA %in% gene.list)]
subB <- geneA[which(geneA %in% gene.list)]
selA <- neiborA[which(neiborA %in% term.list)]
selA2 <- subA[which(neiborA %in% term.list)]
selB <- subB[which(neiborB %in% term.list)]
selB2 <- neiborB[which(neiborB %in% term.list)]
web1 <- cbind(selA, selA2)
web2 <- cbind(selB, selB2)
web <- rbind(web1,web2)
neibor.web <- data.frame(web)
neibor.graph <- graph.data.frame(neibor.web, directed = F)
'%ni%' <- Negate('%in%')
blue.id <- which(as_ids(V(neibor.graph)) %in% term.list & as_ids(V(neibor.graph)) %ni% gene.list)
red.id <- which(as_ids(V(neibor.graph)) %in% gene.list & as_ids(V(neibor.graph)) %ni% term.list)
green.id <- which(as_ids(V(neibor.graph)) %in% gene.list & as_ids(V(neibor.graph)) %in% term.list)
record <- rbind(record, c(title, length(blue.id), length(red.id), length(green.id)))
}#loop1
}#loop2
write.table(record, file="neighbor.network.dist.csv", sep=",", row.names=F, col.names=F, quote=F)
|
/Data/MSigDB.go.pathway/HALLMARK_INFLAMMATORY_RESPONSE/neibor.network.R
|
no_license
|
haoboguo/NetBAS
|
R
| false
| false
| 2,024
|
r
|
rm(list=ls())
library(igraph)
human.pin <- read.csv("../../human.pin.csv", header=T, stringsAsFactors = F)
geneA <- human.pin$geneA
geneB <- human.pin$geneB
#human.pin <- read.csv("../../../ms02star/human/ms02.1.csv", header=T, stringsAsFactors=F)
#geneA <- human.pin$id1
#geneB <- human.pin$id2
#All genes appear in PIN
all.list <- unique(c(geneA, geneB))
#the Gene list
geneset <- read.csv("HALLMARK_INFLAMMATORY_RESPONSE.csv", header=T, stringsAsFactors = F)
gene.list <- geneset$gene
go.file <- read.csv("../../human.all.go.csv", header=T, stringsAsFactors = F)
go.id <- go.file$goid
go.gene <- go.file$gene
go.type <- c("bp", "cc", "mf")
record <- c()
#loop1
for (i in 1:3) {
gofile.name <- paste("HMK.", go.type[i], ".new.txt", sep="")
enriched <- read.csv(gofile.name, sep="\t", header=T, stringsAsFactors=F)
#loop2
for (k in 1:10) {
title <- paste(go.type[i], ".", k, sep="")
term <- enriched$GO.ID[k]
term.list <- go.gene[which(go.id %in% term)]
neiborA <- geneA[which(geneB %in% gene.list)]
subA <- geneB[which(geneB %in% gene.list)]
neiborB <- geneB[which(geneA %in% gene.list)]
subB <- geneA[which(geneA %in% gene.list)]
selA <- neiborA[which(neiborA %in% term.list)]
selA2 <- subA[which(neiborA %in% term.list)]
selB <- subB[which(neiborB %in% term.list)]
selB2 <- neiborB[which(neiborB %in% term.list)]
web1 <- cbind(selA, selA2)
web2 <- cbind(selB, selB2)
web <- rbind(web1,web2)
neibor.web <- data.frame(web)
neibor.graph <- graph.data.frame(neibor.web, directed = F)
'%ni%' <- Negate('%in%')
blue.id <- which(as_ids(V(neibor.graph)) %in% term.list & as_ids(V(neibor.graph)) %ni% gene.list)
red.id <- which(as_ids(V(neibor.graph)) %in% gene.list & as_ids(V(neibor.graph)) %ni% term.list)
green.id <- which(as_ids(V(neibor.graph)) %in% gene.list & as_ids(V(neibor.graph)) %in% term.list)
record <- rbind(record, c(title, length(blue.id), length(red.id), length(green.id)))
}#loop1
}#loop2
write.table(record, file="neighbor.network.dist.csv", sep=",", row.names=F, col.names=F, quote=F)
|
setwd("C:/Users/Nites/Desktop/Labs/7th Semester/DSR/Lab - 02/csv/")
data <- read.csv("indian_food.csv")
data
summary(data)
ncol(data)
nrow(data)
plot(x=data$prep_time,y=data$cook_time)
|
/7th Semester/DSR/Lab - 02/execution/1a.R
|
no_license
|
niteshsrivats/Labs
|
R
| false
| false
| 185
|
r
|
setwd("C:/Users/Nites/Desktop/Labs/7th Semester/DSR/Lab - 02/csv/")
data <- read.csv("indian_food.csv")
data
summary(data)
ncol(data)
nrow(data)
plot(x=data$prep_time,y=data$cook_time)
|
#' S3 class bake_par
#
#' @param .x A list to be constructed into **bake_par**.
#'
#' @returns
#'
#' Constructor function for bake_par class. This function ensures that leaf
#' temperature gets properly "baked" into leaf parameters.
#'
#' @export
bake_par = function(.x) {
which = "bake"
# Check parameters names ----
nms = check_parameter_names(.x, which = which, use_tealeaves = FALSE)
.x = .x[nms]
# Set units ----
.x = .x |>
set_parameter_units(
.data$type == which,
!.data$temperature_response,
!.data$tealeaves
)
# Assert bounds on values ----
.x |>
assert_parameter_bounds(
.data$type == which,
!.data$temperature_response,
!.data$tealeaves
)
structure(.x, class = c(stringr::str_c(which, "_par"), "list"))
}
|
/R/bake-par.R
|
permissive
|
cran/photosynthesis
|
R
| false
| false
| 808
|
r
|
#' S3 class bake_par
#
#' @param .x A list to be constructed into **bake_par**.
#'
#' @returns
#'
#' Constructor function for bake_par class. This function ensures that leaf
#' temperature gets properly "baked" into leaf parameters.
#'
#' @export
bake_par = function(.x) {
which = "bake"
# Check parameters names ----
nms = check_parameter_names(.x, which = which, use_tealeaves = FALSE)
.x = .x[nms]
# Set units ----
.x = .x |>
set_parameter_units(
.data$type == which,
!.data$temperature_response,
!.data$tealeaves
)
# Assert bounds on values ----
.x |>
assert_parameter_bounds(
.data$type == which,
!.data$temperature_response,
!.data$tealeaves
)
structure(.x, class = c(stringr::str_c(which, "_par"), "list"))
}
|
library(readstata13) #for reading the stata files
library(dplyr) #data manipulation
library(haven)##foreign
library(htmlTable)##html tables
library(magrittr) #manipulate
library(loose.rock) #for proper changing of the string cases
#####from yvone mergeed
df1<- read.dta13("C:/Users/user/Desktop/timeuse/datastata/tus_rediac.dta") ##read the merged data from stata
#View(df1)
#head(df1)
length(unique(df1$b02)) ##the people selected by the KISH
str(df1$r02) #structure of the day of the week
levels(df1$r02) ##levels
#df1 %>% filter(is.na(r02)) %>% as.data.frame() ##missing day of the week
#df1 %>% filter(a09==3199)%>% View() ##its a school going day, we randomly allocate TUESDAY
df1$r02[is.na(df1$r02)]<-"TUESDAY" ##replacing the NA in the dataset with TUESDAY
df1 %>% filter(a09==3199)%>%
group_by(a09,a10,r02) %>%
summarise() %>%
select(a09,a10,r02) %>% View() #cluster household number and day of the week for the cluster ensure none is missing
##create new categories for marital status according to suggestions by team
df1$b07_1<-df1$b07
df1$b07_1<-as.character(df1$b07_1)
df1$b07_1[df1$b07_1=="MARRIED MONOGAMOUS" | df1$b07_1=="MARRIED POLYGAMOUS" | df1$b07_1=="LIVING TOGETHER"] <- "married(mono.poly.living_together)"
df1$b07_1[df1$b07_1=="SEPARATED" | df1$b07_1=="DIVORCED" | df1$b07_1=="WIDOW OR WIDOWER" ] <- "not.married(divorced.separated.widow_widower"
df1$b07_1[df1$b07_1=="NEVER MARRIED"]<-"never.married"
df1$b07_1<-as.factor(df1$b07_1)
table(df1$b07_1)
##proposed age categories <15, 15-24, 25-44, 45-54, 55-64 and 65+
##adding the age categories in the dataset
df1$agecat[df1$b05_years<15] <- "<15"
df1$agecat[15<=df1$b05_years & df1$b05_years<=24 ] <- "15-24"
df1$agecat[25<=df1$b05_years & df1$b05_years<=44 ] <- "25-44"
df1$agecat[45<=df1$b05_years & df1$b05_years<=54 ] <- "45-54"
df1$agecat[55<=df1$b05_years & df1$b05_years<=64 ] <- "55-64"
df1$agecat[df1$b05_years>=65] <- "65+"
#######BEGINING OF THE BASICS
df1 %>%
count(Residence, b04) %>% #line one counts total residence and gender
rename(gender=b04) %>% #renaming the columns in the dataset
mutate(Residence=recode(Residence,'1'="Rural",'2'="Urban")) %>% ##recoding the variables into respective groups
as.data.frame() %>% #convert the data to dataframe
# addmargins() %>% #summing the rows and columns
htmlTable ## produce an html table that is copied direct to excel. click on unwrap in excel to format
###### END OF THE BASICS
#respodents whose paid activity exceeeded the normal 8hr*60=480 minutes put it at 600 minutes
head(df1)
#View(df1)
#paid work is r09; time in minutes is r04; gender is b04 ;name is b02 ;
#filtering paid work whose minutes exceed the normal
d1<-df1 %>%
group_by(b02,r09,b04)%>%
summarise(time_tot=sum(r04))%>%
filter(r09=="Productive Paid Work")%>%
arrange(desc(time_tot))%>%
filter(time_tot>600)%>%
data.frame()%>%
select(b02,b04)
d1 ##all the names of the people whose paid work exceeded the slated 600 minutes
d2<-df1 %>% filter(b02 %in% d1$b02) #filtering the people in the large dataset
head(d2)
n_distinct(d2$b02) #confirming the unique number of the selected people
###
#Table 2 starts here activity, gender; residence, and time
###
####how many hours per day do respodents take on the 4 different activities
##method 1
d2<-df1 %>%
group_by(r09,b04,Residence)%>%
summarise(sum.tot.time=sum(r04),no.of.occurence.in.data=length(r04))%>% as.data.frame() #%>% htmlTable()
d2
###method 2 reolace the length() with n()
d2<-df1 %>%
group_by(r09,b04,Residence)%>%
summarise(sum.tot.time=sum(r04),no.of.occurence.in.data=n())
d2
##number of respodents in terms of gender and the said activity and the hours they spend on the main activity
d3<-df1 %>%
group_by(b02,r09,b04,Residence) %>%
summarise(no.of.occurence.in.hrs.per.person=n())# %>%arrange(desc(no.of.occurence.in.hrs)) ##add this if you want to sort in descening order
d3
d3<-df1 %>%
group_by(b02,r09,b04,Residence) %>%
summarise(no.of.occurence.in.hrs.per.person=n()) %>%arrange(desc(no.of.occurence.in.hrs.per.person)) ##add this if you want to sort in descening order
d3
#### table lost
#method 1
d2<-df1 %>%
group_by(r09,b04)%>%
summarise(sum.tot.time=sum(r04),no.of.occurence.in.data=n())
d2
d4<-d3 %>% group_by(r09,b04)%>%
summarise(no.of.people.in.activity=n())
d4
##geat table in HTML and you copy paste in excel
d5<- d4 %>% left_join(d2, by=c("r09", "b04"))
d5$average.min.in.activ<-round((d5$sum.tot.time/d5$no.of.people.in.activity),0) #divide by 60 minutes
d5$average.hour.in.activ<-round((d5$average.min.in.activ/60),1)
d5 %>% as.data.frame() %>%
#select(r09,b04,no.of.people.in.activity,average.min.in.activ,average.hour.in.activ) %>% #if you wish to see number of ppl who reported this
select(r09,b04,average.min.in.activ,average.hour.in.activ) %>% ##avoid reporting number of people in activity
rename(Main_Activity=r09,Gender=b04)%>%
# mutate(Residence=recode(Residence,'1'="Rural",'2'="Urban")) %>%
htmlTable()
###
#find the number of people involved in each activity
#method 1
d2<-df1 %>%
group_by(r09,b04,Residence)%>%
summarise(sum.tot.time=sum(r04),no.of.occurence.in.data=n())
d2
d4<-d3 %>% group_by(r09,b04,Residence)%>%
summarise(no.of.people.in.activity=n())
d4
##geat table in HTML and you copy paste in excel
d5<- d4 %>% left_join(d2, by=c("r09", "b04","Residence"))
d5$average.min.in.activ<-round((d5$sum.tot.time/d5$no.of.people.in.activity),0) ##minutes in activity
d5$average.hours.in.activ<-round((d5$average.min.in.activ/60),1) #divide by 60 minutes for hours
d5 %>% as.data.frame() %>%
# select(r09,b04,Residence,no.of.people.in.activity,average.min.in.activ) %>% #see the abobe comment on reporting number of people in activity
select(r09,b04,Residence,average.min.in.activ,average.hours.in.activ) %>%
rename(Main_Activity=r09,Gender=b04)%>%
mutate(Residence=recode(Residence,'1'="Rural",'2'="Urban")) %>%
htmlTable
###
#Table 3 starts here; activity, gender: day of the week
###
d2<-df1 %>%
group_by(r09,b04,r02)%>%
summarise(sum.tot.time=sum(r04),no.of.occurence.in.data=length(r04))
d2
d2 %>% filter(is.na(r02)) #there people who never included day of the week
##number of respodents in terms of gender, day of the week, and the said activity and the hours they spend on the main activity
d3<-df1 %>%
group_by(b02,r09,b04,r02) %>%
summarise(no.of.occurence.in.hrs.per.person=n())# %>%arrange(desc(no.of.occurence.in.hrs)) ##add this if you want to sort in descening order
d3
#find the number of people involved in each activity
d4<-d3 %>% group_by(r09,b04,r02)%>%
summarise(no.of.people.in.activity=n())
d4
d5<- d4 %>% left_join(d2, by=c("r09", "b04","r02"))
d5$average.min.in.activ<-round((d5$sum.tot.time/d5$no.of.people.in.activity),0) #divide by 60 minutes
d5$average.hours.in.activ<-round(d5$average.min.in.activ/60,0)
d5 %>%
rename(Main_activity=r09, Gender=b04, Day.of.week=r02)%>%
as.data.frame() %>%
select(Main_activity, Gender, Day.of.week,average.min.in.activ,average.hours.in.activ) %>%
htmlTable
d<-d5
d<-d5 %>% filter(!is.na(r02)) ##check if the NA could be sartudays since its missing from data
###
#Table 4 starts here; activity, sex: marital status
###
d2<-df1 %>%
group_by(r09,b04,b07_1)%>%
summarise(sum.tot.time=sum(r04),no.of.occurence.in.data=length(r04))
d2 #there people who never included day of the week
##number of respodents in terms of gender, day of the week, and the said activity and the hours they spend on the main activity
d3<-df1 %>%
group_by(b02,r09,b04,b07_1) %>%
summarise(no.of.occurence.in.hrs.per.person=n())# %>%arrange(desc(no.of.occurence.in.hrs)) ##add this if you want to sort in descening order
d3
#find the number of people involved in each activity
d4<-d3 %>% group_by(r09,b04,b07_1)%>%
summarise(no.of.people.in.activity=n())
d4
d5<- d4 %>% left_join(d2, by=c("r09", "b04","b07_1"))
d5$average.min.in.activ<-round((d5$sum.tot.time/d5$no.of.people.in.activity),0) #divide by 60 minutes
d5$average.hours.in.activ<-round(d5$average.min.in.activ/60,0)
d5 %>% as.data.frame()%>%
rename(Activity=r09,sex=b04,marital.status=b07_1)%>%
select(Activity,sex,marital.status,average.min.in.activ,average.hours.in.activ) %>%
htmlTable
names(d5)
d<-d5
###
#Table 4b starts here; activity, without _sex: marital status
d2<-df1 %>%
group_by(r09,b07_1)%>%
summarise(sum.tot.time=sum(r04),no.of.occurence.in.data=length(r04))
d2 #there people who never included day of the week
##number of respodents in terms of gender, day of the week, and the said activity and the hours they spend on the main activity
d3<-df1 %>%
group_by(b02,r09,b07_1) %>%
summarise(no.of.occurence.in.hrs.per.person=n())# %>%arrange(desc(no.of.occurence.in.hrs)) ##add this if you want to sort in descening order
d3
#find the number of people involved in each activity
d4<-d3 %>% group_by(r09,b07_1)%>%
summarise(no.of.people.in.activity=n())
d4
d5<- d4 %>% left_join(d2, by=c("r09","b07_1"))
d5$average.min.in.activ<-round((d5$sum.tot.time/d5$no.of.people.in.activity),0) #divide by 60 minutes
d5$average.hours.in.activ<-round(d5$average.min.in.activ/60,0)
d5 %>% as.data.frame()%>%
rename(Activity=r09,marital.status=b07_1)%>%
select(Activity,marital.status,average.min.in.activ,average.hours.in.activ) %>%
htmlTable()
##################
#####
####
#Table 5 starts here; activity, gender: age category 12-17, 18-34, 35 and above
###
d2<-df1 %>%
group_by(r09,b04,agecat)%>%
summarise(sum.tot.time=sum(r04),no.of.occurence.in.data=length(r04))
d2 #there people who never included day of the week
##number of respodents in terms of gender, day of the week, and the said activity and the hours they spend on the main activity
d3<-df1 %>%
group_by(b02,r09,b04,agecat) %>%
summarise(no.of.occurence.in.hrs.per.person=n())# %>%arrange(desc(no.of.occurence.in.hrs)) ##add this if you want to sort in descening order
d3
#find the number of people involved in each activity
d4<-d3 %>% group_by(r09,b04,agecat)%>%
summarise(no.of.people.in.activity=n())
d4
d5<- d4 %>% left_join(d2, by=c("r09", "b04","agecat"))
d5$average.min.in.activ<-round((d5$sum.tot.time/d5$no.of.people.in.activity),0) #divide by 60 minutes
d5$average.hours.in.activ<-round(d5$average.min.in.activ/60,0)
d5
d5 %>% as.data.frame()%>%
rename(Activity=r09,sex=b04)%>%
select(Activity,sex,agecat,average.min.in.activ,average.hours.in.activ) %>%
htmlTable
###----------------------starts here
#Table 5 starts here; activity, gender: age category 12-17, 18-34, 35 and above
###
d2<-df1 %>%
group_by(r09,b04,r07)%>%
summarise(sum.tot.time=sum(r04),no.of.occurence.in.data=length(r04))
d2 #there people who never included day of the week
##number of respodents in terms of gender, day of the week, and the said activity and the hours they spend on the main activity
d3<-df1 %>%
group_by(b02,r09,b04,r07) %>%
summarise(no.of.occurence.in.hrs.per.person=n())# %>%arrange(desc(no.of.occurence.in.hrs)) ##add this if you want to sort in descening order
d3
#find the number of people involved in each activity
d4<-d3 %>% group_by(r09,b04,r07)%>%
summarise(no.of.people.in.activity=n())
d4
d5<- d4 %>% left_join(d2, by=c("r09", "b04","r07"))
d5$average.min.in.activ<-round((d5$sum.tot.time/d5$no.of.people.in.activity),0) #divide by 60 minutes
d5$average.hours.in.activ<-round(d5$average.min.in.activ/60,0)
d5 %>% as.data.frame()%>%
rename(Activity=r09,sex=b04,who.did.you.do.it.for=r07)%>%
select(Activity,sex,who.did.you.do.it.for,average.min.in.activ,average.hours.in.activ) %>%
htmlTable
######----------------ends here
###----------------------starts here
#Table 6 starts here; activity, gender: age category 12-17, 18-34, 35 and above
###
d2<-df1 %>%
group_by(r09,b04,r08)%>%
summarise(sum.tot.time=sum(r04),no.of.occurence.in.data=length(r04))
d2 #there people who never included day of the week
##number of respodents in terms of gender, day of the week, and the said activity and the hours they spend on the main activity
d3<-df1 %>%
group_by(b02,r09,b04,r08) %>%
summarise(no.of.occurence.in.hrs.per.person=n())# %>%arrange(desc(no.of.occurence.in.hrs)) ##add this if you want to sort in descening order
d3
#find the number of people involved in each activity
d4<-d3 %>% group_by(r09,b04,r08)%>%
summarise(no.of.people.in.activity=n())
d4
d5<- d4 %>% left_join(d2, by=c("r09", "b04","r08"))
d5$average.min.in.activ<-round((d5$sum.tot.time/d5$no.of.people.in.activity),0) #divide by 60 minutes
d5$average.hours.in.activ<-round(d5$average.min.in.activ/60,0)
d5 %>% as.data.frame() %>%
rename(Activity=r09,sex=b04,was.it.for.pay=r08)%>%
select(Activity,sex,was.it.for.pay,average.min.in.activ,average.hours.in.activ) %>%
htmlTable
######----------------ends here
###----------------------starts here
#######
#Table 7 starts here; activity, place of occurence and without_sex
###
d2<-df1 %>%
group_by(r09,r11)%>%
summarise(sum.tot.time=sum(r04),no.of.occurence.in.data=length(r04))
d2 #there people who never included day of the week
##number of respodents in terms of gender, day of the week, and the said activity and the hours they spend on the main activity
d3<-df1 %>%
group_by(b02,r09,r11) %>%
summarise(no.of.occurence.in.hrs.per.person=n())# %>%arrange(desc(no.of.occurence.in.hrs)) ##add this if you want to sort in descening order
d3
#find the number of people involved in each activity
d4<-d3 %>% group_by(r09,r11)%>%
summarise(no.of.people.in.activity=n())
d4
d5<- d4 %>% left_join(d2, by=c("r09","r11"))
d5$average.min.in.activ<-round((d5$sum.tot.time/d5$no.of.people.in.activity),0) #divide by 60 minutes
d5$average.hours.in.activ<-round(d5$average.min.in.activ/60,0)
d5 %>% rename(Activity=r09,place.activity.occur=r11)%>%
select(Activity,place.activity.occur,average.min.in.activ,average.hours.in.activ) %>%
htmlTable
#Table 7 starts here; activity, place of occurence and sex
###
d2<-df1 %>%
group_by(r09,b04,r11)%>%
summarise(sum.tot.time=sum(r04),no.of.occurence.in.data=length(r04))
d2 #there people who never included day of the week
##number of respodents in terms of gender, day of the week, and the said activity and the hours they spend on the main activity
d3<-df1 %>%
group_by(b02,r09,b04,r11) %>%
summarise(no.of.occurence.in.hrs.per.person=n())# %>%arrange(desc(no.of.occurence.in.hrs)) ##add this if you want to sort in descening order
d3
#find the number of people involved in each activity
d4<-d3 %>% group_by(r09,b04,r11)%>%
summarise(no.of.people.in.activity=n())
d4
d5<- d4 %>% left_join(d2, by=c("r09", "b04","r11"))
d5$average.min.in.activ<-round((d5$sum.tot.time/d5$no.of.people.in.activity),0) #divide by 60 minutes
d5$average.hours.in.activ<-round(d5$average.min.in.activ/60,0)
d5 %>% rename(Activity=r09,sex=b04,place.activity.occur=r11)%>%
select(Activity,sex,place.activity.occur,average.min.in.activ,average.hours.in.activ) %>%
htmlTable
######----------------ends here
###----------------------starts here
#Table 8 starts here; r09_1, gender: r10__1 activity done alone by genderMALE
d3<-df1 %>%
filter(!is.na(r09_1)) %>%
group_by(b02,r09_1,b04,r10__1) %>%
summarise(no.of.occurence.in.hrs.per.person=n())# %>%arrange(desc(no.of.occurence.in.hrs)) ##add this if you want to sort in descening order
d3
#find the number of people involved in each activity top 5 activities by gender
d3 %>% group_by(r09_1,b04,r10__1)%>%
summarise(no.of.people.in.activity=n())%>%
arrange(desc(no.of.people.in.activity))%>%
ungroup() %>%
filter(r10__1==1) %>%
filter(b04=="Male")%>%
slice(1,2,3,4,5)%>%
ungroup() %>%
rename(FIVE.TOP.specific.activity.R091.DONE.BY.SELF=r09_1, gender=b04)%>%
select(FIVE.TOP.specific.activity.R091.DONE.BY.SELF,gender,no.of.people.in.activity)%>%
htmlTable()
#Table 8 starts here; r09_1, gender: r10__1 activity done alone by gender FEMALE
d3<-df1 %>%
filter(!is.na(r09_1)) %>%
group_by(b02,r09_1,b04,r10__1) %>%
summarise(no.of.occurence.in.hrs.per.person=n())# %>%arrange(desc(no.of.occurence.in.hrs)) ##add this if you want to sort in descening order
d3
#find the number of people involved in each activity top 5 activities by gender
d3 %>% group_by(r09_1,b04,r10__1)%>%
summarise(no.of.people.in.activity=n())%>%
arrange(desc(no.of.people.in.activity))%>%
ungroup() %>%
filter(r10__1==1) %>%
filter(b04=="Female")%>%
slice(1,2,3,4,5)%>%
ungroup() %>%
rename(FIVE.TOP.specific.activity.R091.DONE.BY.SELF=r09_1, gender=b04)%>%
select(FIVE.TOP.specific.activity.R091.DONE.BY.SELF,gender,no.of.people.in.activity)%>%
htmlTable()
######----------------ends here
#Table xxx starts here; r09_1, gender: r10__2 ,children 5years and below MALE
### male
d3<-df1 %>%
filter(!is.na(r09_1)) %>%
group_by(b02,r09_1,b04,r10__2) %>%
summarise(no.of.occurence.in.hrs.per.person=n())# %>%arrange(desc(no.of.occurence.in.hrs)) ##add this if you want to sort in descening order
d3
#find the number of people involved in each activity top 5 activities by gender
d3 %>% group_by(r09_1,b04,r10__2)%>%
summarise(no.of.people.in.activity=n())%>%
arrange(desc(no.of.people.in.activity))%>%
ungroup() %>%
filter(r10__2==1) %>%
filter(b04=="Male")%>%
slice(1,2,3,4,5)%>%
ungroup() %>%
rename(FIVE.TOP.specific.activity.R091.DONE.with.CHILDREN.BELOW.FIVE.YEARS=r09_1, gender=b04)%>%
select(FIVE.TOP.specific.activity.R091.DONE.with.CHILDREN.BELOW.FIVE.YEARS,gender,no.of.people.in.activity)%>%
htmlTable()
#####for female
#Table xxx starts here; r09_1, gender: r10__2 ,children 5years and below FEMALE
d3<-df1 %>%
filter(!is.na(r09_1)) %>%
group_by(b02,r09_1,b04,r10__2) %>%
summarise(no.of.occurence.in.hrs.per.person=n())# %>%arrange(desc(no.of.occurence.in.hrs)) ##add this if you want to sort in descening order
d3
#find the number of people involved in each activity top 5 activities by gender
d3 %>% group_by(r09_1,b04,r10__2)%>%
summarise(no.of.people.in.activity=n())%>%
arrange(desc(no.of.people.in.activity))%>%
ungroup() %>%
filter(r10__2==1) %>%
filter(b04=="Female")%>%
slice(1,2,3,4,5)%>%
ungroup() %>%
rename(FIVE.TOP.specific.activity.R091.DONE.BY.CHILDREN.BELOW.FIVE.YEARS=r09_1, gender=b04)%>%
select(FIVE.TOP.specific.activity.R091.DONE.BY.CHILDREN.BELOW.FIVE.YEARS,gender,no.of.people.in.activity)%>%
htmlTable()
######----------------ends here
#Table xxx starts here; r09_1, gender: r10__3 ,children 5years to 17 MALE
### male
d3<-df1 %>%
filter(!is.na(r09_1)) %>%
group_by(b02,r09_1,b04,r10__3) %>%
summarise(no.of.occurence.in.hrs.per.person=n())# %>%arrange(desc(no.of.occurence.in.hrs)) ##add this if you want to sort in descening order
d3
#find the number of people involved in each activity top 5 activities by gender
d3 %>% group_by(r09_1,b04,r10__3)%>%
summarise(no.of.people.in.activity=n())%>%
arrange(desc(no.of.people.in.activity))%>%
ungroup() %>%
filter(r10__3==1) %>%
filter(b04=="Male")%>%
slice(1,2,3,4,5)%>%
ungroup() %>%
rename(FIVE.TOP.specific.activity.R091.DONE.between.FIVE.AND.SEVENTEEN.YEARS=r09_1, gender=b04)%>%
select(FIVE.TOP.specific.activity.R091.DONE.between.FIVE.AND.SEVENTEEN.YEARS,gender,no.of.people.in.activity)%>%
htmlTable()
#####for female
#Table xxx starts here; r09_1, gender: r10__3 ,children 5years to 17 FEMALE
d3<-df1 %>%
filter(!is.na(r09_1)) %>%
group_by(b02,r09_1,b04,r10__3) %>%
summarise(no.of.occurence.in.hrs.per.person=n())# %>%arrange(desc(no.of.occurence.in.hrs)) ##add this if you want to sort in descening order
d3
#find the number of people involved in each activity top 5 activities by gender
d3 %>% group_by(r09_1,b04,r10__3)%>%
summarise(no.of.people.in.activity=n())%>%
arrange(desc(no.of.people.in.activity))%>%
ungroup() %>%
filter(r10__3==1) %>%
filter(b04=="Female")%>%
slice(1,2,3,4,5)%>%
ungroup() %>%
rename(FIVE.TOP.specific.activity.R091.DONE.between.FIVE.AND.SEVENTEEN.YEARS=r09_1, gender=b04)%>%
select(FIVE.TOP.specific.activity.R091.DONE.between.FIVE.AND.SEVENTEEN.YEARS,gender,no.of.people.in.activity)%>%
htmlTable()
######----------------ends here
#Table xxx starts here; r09_1, gender: r10__4 ,with another household member MALE
### male
d3<-df1 %>%
filter(!is.na(r09_1)) %>%
group_by(b02,r09_1,b04,r10__4) %>%
summarise(no.of.occurence.in.hrs.per.person=n())# %>%arrange(desc(no.of.occurence.in.hrs)) ##add this if you want to sort in descening order
d3
#find the number of people involved in each activity top 5 activities by gender
d3 %>% group_by(r09_1,b04,r10__4)%>%
summarise(no.of.people.in.activity=n())%>%
arrange(desc(no.of.people.in.activity))%>%
ungroup() %>%
filter(r10__4==1) %>%
filter(b04=="Male")%>%
slice(1,2,3,4,5)%>%
ungroup() %>%
rename(FIVE.TOP.specific.activity.R091.DONE.WITH.ANOTHER.HH.MEMBER=r09_1, gender=b04)%>%
select(FIVE.TOP.specific.activity.R091.DONE.WITH.ANOTHER.HH.MEMBER,gender,no.of.people.in.activity)%>%
htmlTable()
#Table xxx starts here; r09_1, gender: r10__4 ,with another household member FEMALE
### male
d3<-df1 %>%
filter(!is.na(r09_1)) %>%
group_by(b02,r09_1,b04,r10__4) %>%
summarise(no.of.occurence.in.hrs.per.person=n())# %>%arrange(desc(no.of.occurence.in.hrs)) ##add this if you want to sort in descening order
d3
#find the number of people involved in each activity top 5 activities by gender
d3 %>% group_by(r09_1,b04,r10__4)%>%
summarise(no.of.people.in.activity=n())%>%
arrange(desc(no.of.people.in.activity))%>%
ungroup() %>%
filter(r10__4==1) %>%
filter(b04=="Female")%>%
slice(1,2,3,4,5)%>%
ungroup() %>%
rename(FIVE.TOP.specific.activity.R091.DONE.WITH.ANOTHER.HH.MEMBER=r09_1, gender=b04)%>%
select(FIVE.TOP.specific.activity.R091.DONE.WITH.ANOTHER.HH.MEMBER,gender,no.of.people.in.activity)%>%
htmlTable()
#Table xxx starts here; r09_1, gender: r10__4 ,with a collegue MALE
### male
d3<-df1 %>%
filter(!is.na(r09_1)) %>%
group_by(b02,r09_1,b04,r10__5) %>%
summarise(no.of.occurence.in.hrs.per.person=n())# %>%arrange(desc(no.of.occurence.in.hrs)) ##add this if you want to sort in descening order
d3
#find the number of people involved in each activity top 5 activities by gender
d3 %>% group_by(r09_1,b04,r10__5)%>%
summarise(no.of.people.in.activity=n())%>%
arrange(desc(no.of.people.in.activity))%>%
ungroup() %>%
filter(r10__5==1) %>%
filter(b04=="Male")%>%
slice(1,2,3,4,5)%>%
ungroup() %>%
rename(FIVE.TOP.specific.activity.R091.DONE.WITH.colleague=r09_1, gender=b04)%>%
select(FIVE.TOP.specific.activity.R091.DONE.WITH.colleague,gender,no.of.people.in.activity)%>%
htmlTable()
#Table xxx starts here; r09_1, gender: r10__4 ,with a collegue FEMALE
### male
d3<-df1 %>%
filter(!is.na(r09_1)) %>%
group_by(b02,r09_1,b04,r10__5) %>%
summarise(no.of.occurence.in.hrs.per.person=n())# %>%arrange(desc(no.of.occurence.in.hrs)) ##add this if you want to sort in descening order
d3
#find the number of people involved in each activity top 5 activities by gender
d3 %>% group_by(r09_1,b04,r10__5)%>%
summarise(no.of.people.in.activity=n())%>%
arrange(desc(no.of.people.in.activity))%>%
ungroup() %>%
filter(r10__5==1) %>%
filter(b04=="Female")%>%
slice(1,2,3,4,5)%>%
ungroup() %>%
rename(FIVE.TOP.specific.activity.R091.DONE.WITH.colleague=r09_1, gender=b04)%>%
select(FIVE.TOP.specific.activity.R091.DONE.WITH.colleague,gender,no.of.people.in.activity)%>%
htmlTable()
#Table xxx starts here; r09_1, gender: r10__6 ,with a other known to the respodent MALE
### male
d3<-df1 %>%
filter(!is.na(r09_1)) %>%
group_by(b02,r09_1,b04,r10__6) %>%
summarise(no.of.occurence.in.hrs.per.person=n())# %>%arrange(desc(no.of.occurence.in.hrs)) ##add this if you want to sort in descening order
d3
#find the number of people involved in each activity top 5 activities by gender
d3 %>% group_by(r09_1,b04,r10__6)%>%
summarise(no.of.people.in.activity=n())%>%
arrange(desc(no.of.people.in.activity))%>%
ungroup() %>%
filter(r10__6==1) %>%
filter(b04=="Male")%>%
slice(1,2,3,4,5)%>%
ungroup() %>%
rename(FIVE.TOP.specific.activity.R091.DONE.WITH.others=r09_1, gender=b04)%>%
select(FIVE.TOP.specific.activity.R091.DONE.WITH.others,gender,no.of.people.in.activity)%>%
htmlTable()
#Table xxx starts here; r09_1, gender: r10__6 ,with a other known to the respodent FEMALE
### female
d3<-df1 %>%
filter(!is.na(r09_1)) %>%
group_by(b02,r09_1,b04,r10__6) %>%
summarise(no.of.occurence.in.hrs.per.person=n())# %>%arrange(desc(no.of.occurence.in.hrs)) ##add this if you want to sort in descening order
d3
#find the number of people involved in each activity top 5 activities by gender
d3 %>% group_by(r09_1,b04,r10__6)%>%
summarise(no.of.people.in.activity=n())%>%
arrange(desc(no.of.people.in.activity))%>%
ungroup() %>%
filter(r10__6==1) %>%
filter(b04=="Female")%>%
slice(1,2,3,4,5)%>%
ungroup() %>%
rename(FIVE.TOP.specific.activity.R091.DONE.WITH.others=r09_1, gender=b04)%>%
select(FIVE.TOP.specific.activity.R091.DONE.WITH.others,gender,no.of.people.in.activity)%>%
htmlTable()
###ends here
##################### activities with whom ends here
##maximum 3 main activities in the
#####3 main activities in every hour
df1 %>%
select(tus_diary__id, r09_1) %>% ##select only the diary and the specific activities
filter(!is.na(r09_1)) %>% ##filter any variable whhich is missing
group_by(tus_diary__id, r09_1) %>% ##grouping by time in diary and the specific activity
summarise(no.of.activities=n()) %>% ##number of people reporting activities at that time
ungroup() %>%
group_by(tus_diary__id) %>%
arrange(desc(no.of.activities)) %>% ## arrange the activities in descedning order within the group.
slice(1,2,3)%>% ##slice(1) for the top most activity; slice(1,2) for the two top most; slice(1,2,3) the third top most activity in that hour
ungroup()%>% ##ungroup from the main groupings
as.data.frame() %>% ###incase you want to write or merge twith other dataset
htmlTable() #create a html table, copy to excel and unwrap, justify left
##perception tables
dp1 <- read.dta13("C:/Users/user/Desktop/timeuse/datastata/perception_roster.dta")
head(dp1)
##merge with the main data from yvone
dp4<-df1 %>% left_join(dp1, by=c("tus_respondent__id","interview__id"))
#head(dp4)
dp44<-dp4 %>% select(-contains("r06__")) ##unselect the numerous r06__which making the data heavier
head(dp44)
dp44<-dp4 %>% filter(contains("r06__"))
dp55<- dp44 %>% select(interview__id,b04,b07_1,agecat,perception_roster__id,r15_32)
#View(dp55)
dpft<-dp55 %>% filter(!is.na(r15_32)) ##remove the NA from the answers
dp56<-dpft %>% group_by(interview__id,b04,b07_1,agecat,perception_roster__id,r15_32) %>% summarise() ##removing duplivated by grouping
head(dp56)
table(dp56$r15_32)
levels(dp1$perception_roster__id)
[1] "R15:Girls under 18 years may be married"
[2] "R16:Boys under 18 years may marry"
[3] "R17:Girls and women should undergo Female Genital Mutilation/Cut (FGM/C) as a rite of passage"
[4] "R18:Girls should spend more time on domestic work than boys"
[5] "R19:Girls and boys should spend the same amount of time on domestic work"
[6] "R20:It is the responsibility of women and girls to cook for their families"
[7] "R21:Men should help with cooking for their families"
[8] "R22:It is a woman's responsibility to take care of her home, family and the elderly"
[9] "R23:Childcare is the mother's responsibility"
[10] "R24:It is shameful for men to be found by friends and neighbours performing household chores"
[11] "R25:Men and women should equally share household tasks and childcare if both are working"
[12] "R26:Men's work is more important than women's work"
[13] "R27:Both husband and wife should contribute financially for the wellbeing of the family"
[14] "R28:Both husband and wife should manage the income/expenses of the household"
[15] "R29:Housewives would prefer to do paid work if they could"
[16] "R30:Husbands prefer housewives to working wives"
[17] "R31:It is justified for a man to beat a wife/partner for not completing household chores"
[18] "R32:It is justified for a woman to beat a husband/partner for neglect of responsibility"
d<-table(dp56$perception_roster__id,dp56$r15_32,dp56$b04) # %>% htmlTable()
d<-table(dp56$perception_roster__id,dp56$r15_32,dp56$agecat)
results_clipboard <- function(d, sep="\t", dec=".", max.size=(200*1000)) # Copy a data.frame to clipboard
{
write.table(d, paste0("clipboard-", formatC(max.size, format="f", digits=0)), sep=sep, row.names=TRUE, dec=dec)
}
results_clipboard(d) ###then paste in excel
########tables and percentages
|
/TimeUseSurvey.R
|
no_license
|
samwenda/Time-use-survey-analysis
|
R
| false
| false
| 29,975
|
r
|
library(readstata13) #for reading the stata files
library(dplyr) #data manipulation
library(haven)##foreign
library(htmlTable)##html tables
library(magrittr) #manipulate
library(loose.rock) #for proper changing of the string cases
#####from yvone mergeed
df1<- read.dta13("C:/Users/user/Desktop/timeuse/datastata/tus_rediac.dta") ##read the merged data from stata
#View(df1)
#head(df1)
length(unique(df1$b02)) ##the people selected by the KISH
str(df1$r02) #structure of the day of the week
levels(df1$r02) ##levels
#df1 %>% filter(is.na(r02)) %>% as.data.frame() ##missing day of the week
#df1 %>% filter(a09==3199)%>% View() ##its a school going day, we randomly allocate TUESDAY
df1$r02[is.na(df1$r02)]<-"TUESDAY" ##replacing the NA in the dataset with TUESDAY
df1 %>% filter(a09==3199)%>%
group_by(a09,a10,r02) %>%
summarise() %>%
select(a09,a10,r02) %>% View() #cluster household number and day of the week for the cluster ensure none is missing
##create new categories for marital status according to suggestions by team
df1$b07_1<-df1$b07
df1$b07_1<-as.character(df1$b07_1)
df1$b07_1[df1$b07_1=="MARRIED MONOGAMOUS" | df1$b07_1=="MARRIED POLYGAMOUS" | df1$b07_1=="LIVING TOGETHER"] <- "married(mono.poly.living_together)"
df1$b07_1[df1$b07_1=="SEPARATED" | df1$b07_1=="DIVORCED" | df1$b07_1=="WIDOW OR WIDOWER" ] <- "not.married(divorced.separated.widow_widower"
df1$b07_1[df1$b07_1=="NEVER MARRIED"]<-"never.married"
df1$b07_1<-as.factor(df1$b07_1)
table(df1$b07_1)
##proposed age categories <15, 15-24, 25-44, 45-54, 55-64 and 65+
##adding the age categories in the dataset
df1$agecat[df1$b05_years<15] <- "<15"
df1$agecat[15<=df1$b05_years & df1$b05_years<=24 ] <- "15-24"
df1$agecat[25<=df1$b05_years & df1$b05_years<=44 ] <- "25-44"
df1$agecat[45<=df1$b05_years & df1$b05_years<=54 ] <- "45-54"
df1$agecat[55<=df1$b05_years & df1$b05_years<=64 ] <- "55-64"
df1$agecat[df1$b05_years>=65] <- "65+"
#######BEGINING OF THE BASICS
df1 %>%
count(Residence, b04) %>% #line one counts total residence and gender
rename(gender=b04) %>% #renaming the columns in the dataset
mutate(Residence=recode(Residence,'1'="Rural",'2'="Urban")) %>% ##recoding the variables into respective groups
as.data.frame() %>% #convert the data to dataframe
# addmargins() %>% #summing the rows and columns
htmlTable ## produce an html table that is copied direct to excel. click on unwrap in excel to format
###### END OF THE BASICS
#respodents whose paid activity exceeeded the normal 8hr*60=480 minutes put it at 600 minutes
head(df1)
#View(df1)
#paid work is r09; time in minutes is r04; gender is b04 ;name is b02 ;
#filtering paid work whose minutes exceed the normal
d1<-df1 %>%
group_by(b02,r09,b04)%>%
summarise(time_tot=sum(r04))%>%
filter(r09=="Productive Paid Work")%>%
arrange(desc(time_tot))%>%
filter(time_tot>600)%>%
data.frame()%>%
select(b02,b04)
d1 ##all the names of the people whose paid work exceeded the slated 600 minutes
d2<-df1 %>% filter(b02 %in% d1$b02) #filtering the people in the large dataset
head(d2)
n_distinct(d2$b02) #confirming the unique number of the selected people
###
#Table 2 starts here activity, gender; residence, and time
###
####how many hours per day do respodents take on the 4 different activities
##method 1
d2<-df1 %>%
group_by(r09,b04,Residence)%>%
summarise(sum.tot.time=sum(r04),no.of.occurence.in.data=length(r04))%>% as.data.frame() #%>% htmlTable()
d2
###method 2 reolace the length() with n()
d2<-df1 %>%
group_by(r09,b04,Residence)%>%
summarise(sum.tot.time=sum(r04),no.of.occurence.in.data=n())
d2
##number of respodents in terms of gender and the said activity and the hours they spend on the main activity
d3<-df1 %>%
group_by(b02,r09,b04,Residence) %>%
summarise(no.of.occurence.in.hrs.per.person=n())# %>%arrange(desc(no.of.occurence.in.hrs)) ##add this if you want to sort in descening order
d3
d3<-df1 %>%
group_by(b02,r09,b04,Residence) %>%
summarise(no.of.occurence.in.hrs.per.person=n()) %>%arrange(desc(no.of.occurence.in.hrs.per.person)) ##add this if you want to sort in descening order
d3
#### table lost
#method 1
d2<-df1 %>%
group_by(r09,b04)%>%
summarise(sum.tot.time=sum(r04),no.of.occurence.in.data=n())
d2
d4<-d3 %>% group_by(r09,b04)%>%
summarise(no.of.people.in.activity=n())
d4
##geat table in HTML and you copy paste in excel
d5<- d4 %>% left_join(d2, by=c("r09", "b04"))
d5$average.min.in.activ<-round((d5$sum.tot.time/d5$no.of.people.in.activity),0) #divide by 60 minutes
d5$average.hour.in.activ<-round((d5$average.min.in.activ/60),1)
d5 %>% as.data.frame() %>%
#select(r09,b04,no.of.people.in.activity,average.min.in.activ,average.hour.in.activ) %>% #if you wish to see number of ppl who reported this
select(r09,b04,average.min.in.activ,average.hour.in.activ) %>% ##avoid reporting number of people in activity
rename(Main_Activity=r09,Gender=b04)%>%
# mutate(Residence=recode(Residence,'1'="Rural",'2'="Urban")) %>%
htmlTable()
###
#find the number of people involved in each activity
#method 1
d2<-df1 %>%
group_by(r09,b04,Residence)%>%
summarise(sum.tot.time=sum(r04),no.of.occurence.in.data=n())
d2
d4<-d3 %>% group_by(r09,b04,Residence)%>%
summarise(no.of.people.in.activity=n())
d4
##geat table in HTML and you copy paste in excel
d5<- d4 %>% left_join(d2, by=c("r09", "b04","Residence"))
d5$average.min.in.activ<-round((d5$sum.tot.time/d5$no.of.people.in.activity),0) ##minutes in activity
d5$average.hours.in.activ<-round((d5$average.min.in.activ/60),1) #divide by 60 minutes for hours
d5 %>% as.data.frame() %>%
# select(r09,b04,Residence,no.of.people.in.activity,average.min.in.activ) %>% #see the abobe comment on reporting number of people in activity
select(r09,b04,Residence,average.min.in.activ,average.hours.in.activ) %>%
rename(Main_Activity=r09,Gender=b04)%>%
mutate(Residence=recode(Residence,'1'="Rural",'2'="Urban")) %>%
htmlTable
###
#Table 3 starts here; activity, gender: day of the week
###
d2<-df1 %>%
group_by(r09,b04,r02)%>%
summarise(sum.tot.time=sum(r04),no.of.occurence.in.data=length(r04))
d2
d2 %>% filter(is.na(r02)) #there people who never included day of the week
##number of respodents in terms of gender, day of the week, and the said activity and the hours they spend on the main activity
d3<-df1 %>%
group_by(b02,r09,b04,r02) %>%
summarise(no.of.occurence.in.hrs.per.person=n())# %>%arrange(desc(no.of.occurence.in.hrs)) ##add this if you want to sort in descening order
d3
#find the number of people involved in each activity
d4<-d3 %>% group_by(r09,b04,r02)%>%
summarise(no.of.people.in.activity=n())
d4
d5<- d4 %>% left_join(d2, by=c("r09", "b04","r02"))
d5$average.min.in.activ<-round((d5$sum.tot.time/d5$no.of.people.in.activity),0) #divide by 60 minutes
d5$average.hours.in.activ<-round(d5$average.min.in.activ/60,0)
d5 %>%
rename(Main_activity=r09, Gender=b04, Day.of.week=r02)%>%
as.data.frame() %>%
select(Main_activity, Gender, Day.of.week,average.min.in.activ,average.hours.in.activ) %>%
htmlTable
d<-d5
d<-d5 %>% filter(!is.na(r02)) ##check if the NA could be sartudays since its missing from data
###
#Table 4 starts here; activity, sex: marital status
###
d2<-df1 %>%
group_by(r09,b04,b07_1)%>%
summarise(sum.tot.time=sum(r04),no.of.occurence.in.data=length(r04))
d2 #there people who never included day of the week
##number of respodents in terms of gender, day of the week, and the said activity and the hours they spend on the main activity
d3<-df1 %>%
group_by(b02,r09,b04,b07_1) %>%
summarise(no.of.occurence.in.hrs.per.person=n())# %>%arrange(desc(no.of.occurence.in.hrs)) ##add this if you want to sort in descening order
d3
#find the number of people involved in each activity
d4<-d3 %>% group_by(r09,b04,b07_1)%>%
summarise(no.of.people.in.activity=n())
d4
d5<- d4 %>% left_join(d2, by=c("r09", "b04","b07_1"))
d5$average.min.in.activ<-round((d5$sum.tot.time/d5$no.of.people.in.activity),0) #divide by 60 minutes
d5$average.hours.in.activ<-round(d5$average.min.in.activ/60,0)
d5 %>% as.data.frame()%>%
rename(Activity=r09,sex=b04,marital.status=b07_1)%>%
select(Activity,sex,marital.status,average.min.in.activ,average.hours.in.activ) %>%
htmlTable
names(d5)
d<-d5
###
#Table 4b starts here; activity, without _sex: marital status
d2<-df1 %>%
group_by(r09,b07_1)%>%
summarise(sum.tot.time=sum(r04),no.of.occurence.in.data=length(r04))
d2 #there people who never included day of the week
##number of respodents in terms of gender, day of the week, and the said activity and the hours they spend on the main activity
d3<-df1 %>%
group_by(b02,r09,b07_1) %>%
summarise(no.of.occurence.in.hrs.per.person=n())# %>%arrange(desc(no.of.occurence.in.hrs)) ##add this if you want to sort in descening order
d3
#find the number of people involved in each activity
d4<-d3 %>% group_by(r09,b07_1)%>%
summarise(no.of.people.in.activity=n())
d4
d5<- d4 %>% left_join(d2, by=c("r09","b07_1"))
d5$average.min.in.activ<-round((d5$sum.tot.time/d5$no.of.people.in.activity),0) #divide by 60 minutes
d5$average.hours.in.activ<-round(d5$average.min.in.activ/60,0)
d5 %>% as.data.frame()%>%
rename(Activity=r09,marital.status=b07_1)%>%
select(Activity,marital.status,average.min.in.activ,average.hours.in.activ) %>%
htmlTable()
##################
#####
####
#Table 5 starts here; activity, gender: age category 12-17, 18-34, 35 and above
###
d2<-df1 %>%
group_by(r09,b04,agecat)%>%
summarise(sum.tot.time=sum(r04),no.of.occurence.in.data=length(r04))
d2 #there people who never included day of the week
##number of respodents in terms of gender, day of the week, and the said activity and the hours they spend on the main activity
d3<-df1 %>%
group_by(b02,r09,b04,agecat) %>%
summarise(no.of.occurence.in.hrs.per.person=n())# %>%arrange(desc(no.of.occurence.in.hrs)) ##add this if you want to sort in descening order
d3
#find the number of people involved in each activity
d4<-d3 %>% group_by(r09,b04,agecat)%>%
summarise(no.of.people.in.activity=n())
d4
d5<- d4 %>% left_join(d2, by=c("r09", "b04","agecat"))
d5$average.min.in.activ<-round((d5$sum.tot.time/d5$no.of.people.in.activity),0) #divide by 60 minutes
d5$average.hours.in.activ<-round(d5$average.min.in.activ/60,0)
d5
d5 %>% as.data.frame()%>%
rename(Activity=r09,sex=b04)%>%
select(Activity,sex,agecat,average.min.in.activ,average.hours.in.activ) %>%
htmlTable
###----------------------starts here
#Table 5 starts here; activity, gender: age category 12-17, 18-34, 35 and above
###
d2<-df1 %>%
group_by(r09,b04,r07)%>%
summarise(sum.tot.time=sum(r04),no.of.occurence.in.data=length(r04))
d2 #there people who never included day of the week
##number of respodents in terms of gender, day of the week, and the said activity and the hours they spend on the main activity
d3<-df1 %>%
group_by(b02,r09,b04,r07) %>%
summarise(no.of.occurence.in.hrs.per.person=n())# %>%arrange(desc(no.of.occurence.in.hrs)) ##add this if you want to sort in descening order
d3
#find the number of people involved in each activity
d4<-d3 %>% group_by(r09,b04,r07)%>%
summarise(no.of.people.in.activity=n())
d4
d5<- d4 %>% left_join(d2, by=c("r09", "b04","r07"))
d5$average.min.in.activ<-round((d5$sum.tot.time/d5$no.of.people.in.activity),0) #divide by 60 minutes
d5$average.hours.in.activ<-round(d5$average.min.in.activ/60,0)
d5 %>% as.data.frame()%>%
rename(Activity=r09,sex=b04,who.did.you.do.it.for=r07)%>%
select(Activity,sex,who.did.you.do.it.for,average.min.in.activ,average.hours.in.activ) %>%
htmlTable
######----------------ends here
###----------------------starts here
#Table 6 starts here; activity, gender: age category 12-17, 18-34, 35 and above
###
d2<-df1 %>%
group_by(r09,b04,r08)%>%
summarise(sum.tot.time=sum(r04),no.of.occurence.in.data=length(r04))
d2 #there people who never included day of the week
##number of respodents in terms of gender, day of the week, and the said activity and the hours they spend on the main activity
d3<-df1 %>%
group_by(b02,r09,b04,r08) %>%
summarise(no.of.occurence.in.hrs.per.person=n())# %>%arrange(desc(no.of.occurence.in.hrs)) ##add this if you want to sort in descening order
d3
#find the number of people involved in each activity
d4<-d3 %>% group_by(r09,b04,r08)%>%
summarise(no.of.people.in.activity=n())
d4
d5<- d4 %>% left_join(d2, by=c("r09", "b04","r08"))
d5$average.min.in.activ<-round((d5$sum.tot.time/d5$no.of.people.in.activity),0) #divide by 60 minutes
d5$average.hours.in.activ<-round(d5$average.min.in.activ/60,0)
d5 %>% as.data.frame() %>%
rename(Activity=r09,sex=b04,was.it.for.pay=r08)%>%
select(Activity,sex,was.it.for.pay,average.min.in.activ,average.hours.in.activ) %>%
htmlTable
######----------------ends here
###----------------------starts here
#######
#Table 7 starts here; activity, place of occurence and without_sex
###
d2<-df1 %>%
group_by(r09,r11)%>%
summarise(sum.tot.time=sum(r04),no.of.occurence.in.data=length(r04))
d2 #there people who never included day of the week
##number of respodents in terms of gender, day of the week, and the said activity and the hours they spend on the main activity
d3<-df1 %>%
group_by(b02,r09,r11) %>%
summarise(no.of.occurence.in.hrs.per.person=n())# %>%arrange(desc(no.of.occurence.in.hrs)) ##add this if you want to sort in descening order
d3
#find the number of people involved in each activity
d4<-d3 %>% group_by(r09,r11)%>%
summarise(no.of.people.in.activity=n())
d4
d5<- d4 %>% left_join(d2, by=c("r09","r11"))
d5$average.min.in.activ<-round((d5$sum.tot.time/d5$no.of.people.in.activity),0) #divide by 60 minutes
d5$average.hours.in.activ<-round(d5$average.min.in.activ/60,0)
d5 %>% rename(Activity=r09,place.activity.occur=r11)%>%
select(Activity,place.activity.occur,average.min.in.activ,average.hours.in.activ) %>%
htmlTable
#Table 7 starts here; activity, place of occurence and sex
###
d2<-df1 %>%
group_by(r09,b04,r11)%>%
summarise(sum.tot.time=sum(r04),no.of.occurence.in.data=length(r04))
d2 #there people who never included day of the week
##number of respodents in terms of gender, day of the week, and the said activity and the hours they spend on the main activity
d3<-df1 %>%
group_by(b02,r09,b04,r11) %>%
summarise(no.of.occurence.in.hrs.per.person=n())# %>%arrange(desc(no.of.occurence.in.hrs)) ##add this if you want to sort in descening order
d3
#find the number of people involved in each activity
d4<-d3 %>% group_by(r09,b04,r11)%>%
summarise(no.of.people.in.activity=n())
d4
d5<- d4 %>% left_join(d2, by=c("r09", "b04","r11"))
d5$average.min.in.activ<-round((d5$sum.tot.time/d5$no.of.people.in.activity),0) #divide by 60 minutes
d5$average.hours.in.activ<-round(d5$average.min.in.activ/60,0)
d5 %>% rename(Activity=r09,sex=b04,place.activity.occur=r11)%>%
select(Activity,sex,place.activity.occur,average.min.in.activ,average.hours.in.activ) %>%
htmlTable
######----------------ends here
###----------------------starts here
#Table 8 starts here; r09_1, gender: r10__1 activity done alone by genderMALE
d3<-df1 %>%
filter(!is.na(r09_1)) %>%
group_by(b02,r09_1,b04,r10__1) %>%
summarise(no.of.occurence.in.hrs.per.person=n())# %>%arrange(desc(no.of.occurence.in.hrs)) ##add this if you want to sort in descening order
d3
#find the number of people involved in each activity top 5 activities by gender
d3 %>% group_by(r09_1,b04,r10__1)%>%
summarise(no.of.people.in.activity=n())%>%
arrange(desc(no.of.people.in.activity))%>%
ungroup() %>%
filter(r10__1==1) %>%
filter(b04=="Male")%>%
slice(1,2,3,4,5)%>%
ungroup() %>%
rename(FIVE.TOP.specific.activity.R091.DONE.BY.SELF=r09_1, gender=b04)%>%
select(FIVE.TOP.specific.activity.R091.DONE.BY.SELF,gender,no.of.people.in.activity)%>%
htmlTable()
#Table 8 starts here; r09_1, gender: r10__1 activity done alone by gender FEMALE
d3<-df1 %>%
filter(!is.na(r09_1)) %>%
group_by(b02,r09_1,b04,r10__1) %>%
summarise(no.of.occurence.in.hrs.per.person=n())# %>%arrange(desc(no.of.occurence.in.hrs)) ##add this if you want to sort in descening order
d3
#find the number of people involved in each activity top 5 activities by gender
d3 %>% group_by(r09_1,b04,r10__1)%>%
summarise(no.of.people.in.activity=n())%>%
arrange(desc(no.of.people.in.activity))%>%
ungroup() %>%
filter(r10__1==1) %>%
filter(b04=="Female")%>%
slice(1,2,3,4,5)%>%
ungroup() %>%
rename(FIVE.TOP.specific.activity.R091.DONE.BY.SELF=r09_1, gender=b04)%>%
select(FIVE.TOP.specific.activity.R091.DONE.BY.SELF,gender,no.of.people.in.activity)%>%
htmlTable()
######----------------ends here
#Table xxx starts here; r09_1, gender: r10__2 ,children 5years and below MALE
### male
d3<-df1 %>%
filter(!is.na(r09_1)) %>%
group_by(b02,r09_1,b04,r10__2) %>%
summarise(no.of.occurence.in.hrs.per.person=n())# %>%arrange(desc(no.of.occurence.in.hrs)) ##add this if you want to sort in descening order
d3
#find the number of people involved in each activity top 5 activities by gender
d3 %>% group_by(r09_1,b04,r10__2)%>%
summarise(no.of.people.in.activity=n())%>%
arrange(desc(no.of.people.in.activity))%>%
ungroup() %>%
filter(r10__2==1) %>%
filter(b04=="Male")%>%
slice(1,2,3,4,5)%>%
ungroup() %>%
rename(FIVE.TOP.specific.activity.R091.DONE.with.CHILDREN.BELOW.FIVE.YEARS=r09_1, gender=b04)%>%
select(FIVE.TOP.specific.activity.R091.DONE.with.CHILDREN.BELOW.FIVE.YEARS,gender,no.of.people.in.activity)%>%
htmlTable()
#####for female
#Table xxx starts here; r09_1, gender: r10__2 ,children 5years and below FEMALE
d3<-df1 %>%
filter(!is.na(r09_1)) %>%
group_by(b02,r09_1,b04,r10__2) %>%
summarise(no.of.occurence.in.hrs.per.person=n())# %>%arrange(desc(no.of.occurence.in.hrs)) ##add this if you want to sort in descening order
d3
#find the number of people involved in each activity top 5 activities by gender
d3 %>% group_by(r09_1,b04,r10__2)%>%
summarise(no.of.people.in.activity=n())%>%
arrange(desc(no.of.people.in.activity))%>%
ungroup() %>%
filter(r10__2==1) %>%
filter(b04=="Female")%>%
slice(1,2,3,4,5)%>%
ungroup() %>%
rename(FIVE.TOP.specific.activity.R091.DONE.BY.CHILDREN.BELOW.FIVE.YEARS=r09_1, gender=b04)%>%
select(FIVE.TOP.specific.activity.R091.DONE.BY.CHILDREN.BELOW.FIVE.YEARS,gender,no.of.people.in.activity)%>%
htmlTable()
######----------------ends here
#Table xxx starts here; r09_1, gender: r10__3 ,children 5years to 17 MALE
### male
d3<-df1 %>%
filter(!is.na(r09_1)) %>%
group_by(b02,r09_1,b04,r10__3) %>%
summarise(no.of.occurence.in.hrs.per.person=n())# %>%arrange(desc(no.of.occurence.in.hrs)) ##add this if you want to sort in descening order
d3
#find the number of people involved in each activity top 5 activities by gender
d3 %>% group_by(r09_1,b04,r10__3)%>%
summarise(no.of.people.in.activity=n())%>%
arrange(desc(no.of.people.in.activity))%>%
ungroup() %>%
filter(r10__3==1) %>%
filter(b04=="Male")%>%
slice(1,2,3,4,5)%>%
ungroup() %>%
rename(FIVE.TOP.specific.activity.R091.DONE.between.FIVE.AND.SEVENTEEN.YEARS=r09_1, gender=b04)%>%
select(FIVE.TOP.specific.activity.R091.DONE.between.FIVE.AND.SEVENTEEN.YEARS,gender,no.of.people.in.activity)%>%
htmlTable()
#####for female
#Table xxx starts here; r09_1, gender: r10__3 ,children 5years to 17 FEMALE
d3<-df1 %>%
filter(!is.na(r09_1)) %>%
group_by(b02,r09_1,b04,r10__3) %>%
summarise(no.of.occurence.in.hrs.per.person=n())# %>%arrange(desc(no.of.occurence.in.hrs)) ##add this if you want to sort in descening order
d3
#find the number of people involved in each activity top 5 activities by gender
d3 %>% group_by(r09_1,b04,r10__3)%>%
summarise(no.of.people.in.activity=n())%>%
arrange(desc(no.of.people.in.activity))%>%
ungroup() %>%
filter(r10__3==1) %>%
filter(b04=="Female")%>%
slice(1,2,3,4,5)%>%
ungroup() %>%
rename(FIVE.TOP.specific.activity.R091.DONE.between.FIVE.AND.SEVENTEEN.YEARS=r09_1, gender=b04)%>%
select(FIVE.TOP.specific.activity.R091.DONE.between.FIVE.AND.SEVENTEEN.YEARS,gender,no.of.people.in.activity)%>%
htmlTable()
######----------------ends here
#Table xxx starts here; r09_1, gender: r10__4 ,with another household member MALE
### male
d3<-df1 %>%
filter(!is.na(r09_1)) %>%
group_by(b02,r09_1,b04,r10__4) %>%
summarise(no.of.occurence.in.hrs.per.person=n())# %>%arrange(desc(no.of.occurence.in.hrs)) ##add this if you want to sort in descening order
d3
#find the number of people involved in each activity top 5 activities by gender
d3 %>% group_by(r09_1,b04,r10__4)%>%
summarise(no.of.people.in.activity=n())%>%
arrange(desc(no.of.people.in.activity))%>%
ungroup() %>%
filter(r10__4==1) %>%
filter(b04=="Male")%>%
slice(1,2,3,4,5)%>%
ungroup() %>%
rename(FIVE.TOP.specific.activity.R091.DONE.WITH.ANOTHER.HH.MEMBER=r09_1, gender=b04)%>%
select(FIVE.TOP.specific.activity.R091.DONE.WITH.ANOTHER.HH.MEMBER,gender,no.of.people.in.activity)%>%
htmlTable()
#Table xxx starts here; r09_1, gender: r10__4 ,with another household member FEMALE
### male
d3<-df1 %>%
filter(!is.na(r09_1)) %>%
group_by(b02,r09_1,b04,r10__4) %>%
summarise(no.of.occurence.in.hrs.per.person=n())# %>%arrange(desc(no.of.occurence.in.hrs)) ##add this if you want to sort in descening order
d3
#find the number of people involved in each activity top 5 activities by gender
d3 %>% group_by(r09_1,b04,r10__4)%>%
summarise(no.of.people.in.activity=n())%>%
arrange(desc(no.of.people.in.activity))%>%
ungroup() %>%
filter(r10__4==1) %>%
filter(b04=="Female")%>%
slice(1,2,3,4,5)%>%
ungroup() %>%
rename(FIVE.TOP.specific.activity.R091.DONE.WITH.ANOTHER.HH.MEMBER=r09_1, gender=b04)%>%
select(FIVE.TOP.specific.activity.R091.DONE.WITH.ANOTHER.HH.MEMBER,gender,no.of.people.in.activity)%>%
htmlTable()
#Table xxx starts here; r09_1, gender: r10__4 ,with a collegue MALE
### male
d3<-df1 %>%
filter(!is.na(r09_1)) %>%
group_by(b02,r09_1,b04,r10__5) %>%
summarise(no.of.occurence.in.hrs.per.person=n())# %>%arrange(desc(no.of.occurence.in.hrs)) ##add this if you want to sort in descening order
d3
#find the number of people involved in each activity top 5 activities by gender
d3 %>% group_by(r09_1,b04,r10__5)%>%
summarise(no.of.people.in.activity=n())%>%
arrange(desc(no.of.people.in.activity))%>%
ungroup() %>%
filter(r10__5==1) %>%
filter(b04=="Male")%>%
slice(1,2,3,4,5)%>%
ungroup() %>%
rename(FIVE.TOP.specific.activity.R091.DONE.WITH.colleague=r09_1, gender=b04)%>%
select(FIVE.TOP.specific.activity.R091.DONE.WITH.colleague,gender,no.of.people.in.activity)%>%
htmlTable()
#Table xxx starts here; r09_1, gender: r10__4 ,with a collegue FEMALE
### male
d3<-df1 %>%
filter(!is.na(r09_1)) %>%
group_by(b02,r09_1,b04,r10__5) %>%
summarise(no.of.occurence.in.hrs.per.person=n())# %>%arrange(desc(no.of.occurence.in.hrs)) ##add this if you want to sort in descening order
d3
#find the number of people involved in each activity top 5 activities by gender
d3 %>% group_by(r09_1,b04,r10__5)%>%
summarise(no.of.people.in.activity=n())%>%
arrange(desc(no.of.people.in.activity))%>%
ungroup() %>%
filter(r10__5==1) %>%
filter(b04=="Female")%>%
slice(1,2,3,4,5)%>%
ungroup() %>%
rename(FIVE.TOP.specific.activity.R091.DONE.WITH.colleague=r09_1, gender=b04)%>%
select(FIVE.TOP.specific.activity.R091.DONE.WITH.colleague,gender,no.of.people.in.activity)%>%
htmlTable()
#Table xxx starts here; r09_1, gender: r10__6 ,with a other known to the respodent MALE
### male
d3<-df1 %>%
filter(!is.na(r09_1)) %>%
group_by(b02,r09_1,b04,r10__6) %>%
summarise(no.of.occurence.in.hrs.per.person=n())# %>%arrange(desc(no.of.occurence.in.hrs)) ##add this if you want to sort in descening order
d3
#find the number of people involved in each activity top 5 activities by gender
d3 %>% group_by(r09_1,b04,r10__6)%>%
summarise(no.of.people.in.activity=n())%>%
arrange(desc(no.of.people.in.activity))%>%
ungroup() %>%
filter(r10__6==1) %>%
filter(b04=="Male")%>%
slice(1,2,3,4,5)%>%
ungroup() %>%
rename(FIVE.TOP.specific.activity.R091.DONE.WITH.others=r09_1, gender=b04)%>%
select(FIVE.TOP.specific.activity.R091.DONE.WITH.others,gender,no.of.people.in.activity)%>%
htmlTable()
#Table xxx starts here; r09_1, gender: r10__6 ,with a other known to the respodent FEMALE
### female
d3<-df1 %>%
filter(!is.na(r09_1)) %>%
group_by(b02,r09_1,b04,r10__6) %>%
summarise(no.of.occurence.in.hrs.per.person=n())# %>%arrange(desc(no.of.occurence.in.hrs)) ##add this if you want to sort in descening order
d3
#find the number of people involved in each activity top 5 activities by gender
d3 %>% group_by(r09_1,b04,r10__6)%>%
summarise(no.of.people.in.activity=n())%>%
arrange(desc(no.of.people.in.activity))%>%
ungroup() %>%
filter(r10__6==1) %>%
filter(b04=="Female")%>%
slice(1,2,3,4,5)%>%
ungroup() %>%
rename(FIVE.TOP.specific.activity.R091.DONE.WITH.others=r09_1, gender=b04)%>%
select(FIVE.TOP.specific.activity.R091.DONE.WITH.others,gender,no.of.people.in.activity)%>%
htmlTable()
###ends here
##################### activities with whom ends here
##maximum 3 main activities in the
#####3 main activities in every hour
df1 %>%
select(tus_diary__id, r09_1) %>% ##select only the diary and the specific activities
filter(!is.na(r09_1)) %>% ##filter any variable whhich is missing
group_by(tus_diary__id, r09_1) %>% ##grouping by time in diary and the specific activity
summarise(no.of.activities=n()) %>% ##number of people reporting activities at that time
ungroup() %>%
group_by(tus_diary__id) %>%
arrange(desc(no.of.activities)) %>% ## arrange the activities in descedning order within the group.
slice(1,2,3)%>% ##slice(1) for the top most activity; slice(1,2) for the two top most; slice(1,2,3) the third top most activity in that hour
ungroup()%>% ##ungroup from the main groupings
as.data.frame() %>% ###incase you want to write or merge twith other dataset
htmlTable() #create a html table, copy to excel and unwrap, justify left
##perception tables
dp1 <- read.dta13("C:/Users/user/Desktop/timeuse/datastata/perception_roster.dta")
head(dp1)
##merge with the main data from yvone
dp4<-df1 %>% left_join(dp1, by=c("tus_respondent__id","interview__id"))
#head(dp4)
dp44<-dp4 %>% select(-contains("r06__")) ##unselect the numerous r06__which making the data heavier
head(dp44)
dp44<-dp4 %>% filter(contains("r06__"))
dp55<- dp44 %>% select(interview__id,b04,b07_1,agecat,perception_roster__id,r15_32)
#View(dp55)
dpft<-dp55 %>% filter(!is.na(r15_32)) ##remove the NA from the answers
dp56<-dpft %>% group_by(interview__id,b04,b07_1,agecat,perception_roster__id,r15_32) %>% summarise() ##removing duplivated by grouping
head(dp56)
table(dp56$r15_32)
levels(dp1$perception_roster__id)
[1] "R15:Girls under 18 years may be married"
[2] "R16:Boys under 18 years may marry"
[3] "R17:Girls and women should undergo Female Genital Mutilation/Cut (FGM/C) as a rite of passage"
[4] "R18:Girls should spend more time on domestic work than boys"
[5] "R19:Girls and boys should spend the same amount of time on domestic work"
[6] "R20:It is the responsibility of women and girls to cook for their families"
[7] "R21:Men should help with cooking for their families"
[8] "R22:It is a woman's responsibility to take care of her home, family and the elderly"
[9] "R23:Childcare is the mother's responsibility"
[10] "R24:It is shameful for men to be found by friends and neighbours performing household chores"
[11] "R25:Men and women should equally share household tasks and childcare if both are working"
[12] "R26:Men's work is more important than women's work"
[13] "R27:Both husband and wife should contribute financially for the wellbeing of the family"
[14] "R28:Both husband and wife should manage the income/expenses of the household"
[15] "R29:Housewives would prefer to do paid work if they could"
[16] "R30:Husbands prefer housewives to working wives"
[17] "R31:It is justified for a man to beat a wife/partner for not completing household chores"
[18] "R32:It is justified for a woman to beat a husband/partner for neglect of responsibility"
d<-table(dp56$perception_roster__id,dp56$r15_32,dp56$b04) # %>% htmlTable()
d<-table(dp56$perception_roster__id,dp56$r15_32,dp56$agecat)
results_clipboard <- function(d, sep="\t", dec=".", max.size=(200*1000)) # Copy a data.frame to clipboard
{
write.table(d, paste0("clipboard-", formatC(max.size, format="f", digits=0)), sep=sep, row.names=TRUE, dec=dec)
}
results_clipboard(d) ###then paste in excel
########tables and percentages
|
Outp3state <- function(data, v, ...) {
return( data.frame("times1"=data[,v[1]], "delta"=as.integer(data[,v[3]] > data[,v[1]]), "times2"=data[,v[3]]-data[,v[1]], "time"=data[,v[3]], "status"=data[,v[4]], data[,-v]) )
}
|
/TPmsm/R/Outp3state.R
|
no_license
|
ingted/R-Examples
|
R
| false
| false
| 219
|
r
|
Outp3state <- function(data, v, ...) {
return( data.frame("times1"=data[,v[1]], "delta"=as.integer(data[,v[3]] > data[,v[1]]), "times2"=data[,v[3]]-data[,v[1]], "time"=data[,v[3]], "status"=data[,v[4]], data[,-v]) )
}
|
library(ape)
testtree <- read.tree("6623_1.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="6623_1_unrooted.txt")
|
/codeml_files/newick_trees_processed/6623_1/rinput.R
|
no_license
|
DaniBoo/cyanobacteria_project
|
R
| false
| false
| 135
|
r
|
library(ape)
testtree <- read.tree("6623_1.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="6623_1_unrooted.txt")
|
# Rclient -> EODC version: 0.4.2
library(openeo)
library(tibble)
host_url = "https://openeo.eodc.eu"
con = connect(host = host_url, version="0.4.0", login_type = "oidc",external="google",exchange_token = "id_token")
capabilities()
list_file_types()
list_collections()
d= describe_collection(id="s2a_prd_msil1c")
list_processes()
describe_process("reduce")
p = processes()
data = p$load_collection(id = p$data$s2a_prd_msil1c,
spatial_extent = list(
west = 652000,
south = 5161000,
north = 5181000,
east = 672000,
crs = 32632
),
temporal_extent = c("2017-01-01T00:00:00Z","2017-01-08T00:00:00Z"),
bands = c("B08","B04"))
ndvi = p$ndvi(data = data,name="ndvi")
min_time = p$reduce(data = ndvi, dimension = "temporal", reducer = function(x) {
min(x,na.rm=TRUE)
})
result = p$save_result(data = min_time,format = "GTiff")
job_id = create_job(graph = result,
title = "Min NDVI example",
description = "Calculates the minimum NDVI from R client",
format = "GTiff")
describe_job(job = job_id)
start_job(job=job_id)
describe_job(job = job_id)
download_results(job=job_id, folder = "eodc-uc1-test.tif")
|
/examples/eodc-uc1-example.R
|
permissive
|
jonathom/openeo-r-client
|
R
| false
| false
| 1,412
|
r
|
# Rclient -> EODC version: 0.4.2
library(openeo)
library(tibble)
host_url = "https://openeo.eodc.eu"
con = connect(host = host_url, version="0.4.0", login_type = "oidc",external="google",exchange_token = "id_token")
capabilities()
list_file_types()
list_collections()
d= describe_collection(id="s2a_prd_msil1c")
list_processes()
describe_process("reduce")
p = processes()
data = p$load_collection(id = p$data$s2a_prd_msil1c,
spatial_extent = list(
west = 652000,
south = 5161000,
north = 5181000,
east = 672000,
crs = 32632
),
temporal_extent = c("2017-01-01T00:00:00Z","2017-01-08T00:00:00Z"),
bands = c("B08","B04"))
ndvi = p$ndvi(data = data,name="ndvi")
min_time = p$reduce(data = ndvi, dimension = "temporal", reducer = function(x) {
min(x,na.rm=TRUE)
})
result = p$save_result(data = min_time,format = "GTiff")
job_id = create_job(graph = result,
title = "Min NDVI example",
description = "Calculates the minimum NDVI from R client",
format = "GTiff")
describe_job(job = job_id)
start_job(job=job_id)
describe_job(job = job_id)
download_results(job=job_id, folder = "eodc-uc1-test.tif")
|
### R code from vignette source 'Prinsimp-introduction.Rnw'
|
/data/genthat_extracted_code/prinsimp/vignettes/Prinsimp-introduction.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 61
|
r
|
### R code from vignette source 'Prinsimp-introduction.Rnw'
|
## Alexandra King - V00827380
## Geog418 Assignment 4
################################
#### Prepare Pollution Data ####
################################
install.packages("rgdal")
install.packages("gstat")
install.packages("sp")
install.packages("spatstat")
install.packages("maptools")
install.packages("raster")
install.packages("tmap")
library(rgdal)
library(gstat)
library(sp)
library(spatstat) # Used for the dirichlet tessellation function
library(maptools) # Used for conversion from SPDF to ppp
library(raster) # Used to clip out thiessen polygons
library(tmap)
dir <- "Z:\\Geog418\\Assignment4\\Working"
setwd(dir)
#DATASET 1
#Read the pollution csv dataset.
ozone = read.csv("OZONE_PICKDATA_2016-4-30.csv", header = T, sep = ",")
#DATASET 2
#Read the monitoring station spatial dataset as an OGR data object.
monitor = readOGR(dsn = ".", layer = "airmonitoringstations")
#Extract the monitoring stations for the South Coast (SC) - taking a subset into new variable
SC.monitor = monitor[monitor$AIRBASIN %in% c("South Coast"),]
#Reproject the data to a suitable projection. Here we use a UTM projection because of the scale of the analysis.
#California is UTM Zone 11 in NAD83. (espg 26911)
SC.monitor.t = spTransform(SC.monitor, CRS("+init=epsg:26911"))
#DATASET 3
#Read the California Air Basin spatial dataset.
Ca.AirBasin = readOGR(dsn = ".", layer = "CaAirBasin")
#Extract the South Coast air basin from the spatial dataset.
SC.AirBasin = Ca.AirBasin[Ca.AirBasin$NAME %in% c("South Coast"),]
#Reproject the South Coast air basin spatial dataset to match the projeciton of the monitoring station dataset.
SC.AirBasin.t = spTransform(SC.AirBasin, CRS("+init=epsg:26911"))
################################
#### Process Pollution Data ####
################################
#You need to represent each location with a single value in order to perform statistical analyses.
#Examine the first several rows of the ozone dataset.
head(ozone)
#Looking at the date and hour columns, you can see that we need to process the data
#to get summary statistics.
#Calculate the mean and max ozone level for each site for all readings.
#only highlight and enter "aggregate(value ~ site, ozone, mean)" to get value on console
#these become just dataframes. we will need to spatialize these later on in the code
mean.ozone <- aggregate(value ~ site, ozone, mean)
max.ozone <- aggregate(value ~ site, ozone, max)
#Join the mean and max ozone values to their respective monitoring stations. In doing so, you will need to rename the
#first column of the monitoring data to site in order to have a unique name to match the two datasets.
names(SC.monitor.t)[1] ="site"
#Merge the the monitoring station (x) shapefile with the ozone data (y) using the site column.
#the sp:: is saying to pull the merge tool from the sp package, instead of its default package "raster"
#'all.x = FALSE' is saying that we'll only keep the x's that match the y's, with no leftovers. but we'll keep all y's.
mrg.tab.mean <- sp::merge(SC.monitor.t, mean.ozone, by = "site", all.x = FALSE)
mrg.tab.max <- sp::merge(SC.monitor.t, max.ozone, by = "site", all.x = FALSE)
#Create a max and a mean spatialPointDataFrame, removing N/A values
ozone.mean.spdf <- na.omit(mrg.tab.mean)
ozone.max.spdf <- na.omit(mrg.tab.max)
view(ozone.mean.spdf)
# Load and observe ozone data.... change
tm_shape(SC.AirBasin.t) +
tm_polygons() +
tm_shape(ozone.mean.spdf) +
tm_layout(main.title = "Mean Ozone Levels in Southern Coast Air Basin") +
tm_dots(col="value", palette = "YlOrBr",
title="Sampled Ozone \n(in ppm)", size=0.7) +
tm_legend(legend.outside=TRUE)
#study area map
studymap_tm <- tm_shape(Ca.AirBasin) +
tm_fill("lightgrey") +
tm_borders("black") +
tm_shape(SC.AirBasin) +
tm_fill("coral") +
tm_borders("black") +
tm_add_legend(type= "symbol", labels = "Southern Coast Air Basin", col="coral", shape = 19) +
tm_layout(title = "Map of California\nAir Basins", title.position = c(0.55, 0.87),
legend.position = c(0.55, 0.7)) +
tm_compass(position = c(0.02, 0.085)) +
tm_scale_bar(position= c("left", "bottom"))
studymap_tm
#SoCAB with monitoring sites
studymap2 <- tm_shape(SC.AirBasin) +
tm_fill("coral")+
tm_borders("black")+
tm_add_legend
tmaptools::palette_explorer()
####################################################
### Spatial Interpolation with Thiessen Polygons ###
####################################################
# Create a tessellated surface
th <- as(dirichlet(as.ppp(ozone.mean.spdf)), "SpatialPolygons")
# The dirichlet function does not carry over projection information
# requiring that this information be added manually
proj4string(th) <- proj4string(ozone.mean.spdf)
# The tessellated surface does not store attribute information
# from the point data layer. We'll use the over() function (from the sp
# package) to join the point attributes to the tesselated surface via
# a spatial join. The over() function creates a dataframe that will need to
# be added to the `th` object thus creating a SpatialPolygonsDataFrame object
th.z <- over(th, ozone.mean.spdf, fn=mean) #some get N/A values
th.spdf <- SpatialPolygonsDataFrame(th, th.z)
# Finally, we'll clip the tessellated surface to the South Coast Air Basin boundaries
th.clp <- raster::intersect(SC.AirBasin.t,th.spdf)
# Map the data
tm_shape(th.clp) +
tm_polygons(col="value", palette="YlOrBr",
title="Predicted Ozone \n(in ppm)") +
tm_layout(main.title = "Thiessen Polygons Interpolation") +
tm_legend(legend.outside=TRUE)
########################################
#### Spatial Interpolation with IDW ####
########################################
# Create an empty grid where n is the total number of cells
grd <- as.data.frame(spsample(ozone.mean.spdf, "regular", n=50000))
names(grd) <- c("X", "Y")
coordinates(grd) <- c("X", "Y")
gridded(grd) <- TRUE # Create SpatialPixel object
fullgrid(grd) <- TRUE # Create SpatialGrid object
proj4string(grd) <- proj4string(SC.monitor.t)
P.idw <- gstat::idw(value ~ 1, ozone.mean.spdf, newdata=grd, idp=2.5)
r <- raster(P.idw)
r.m <- mask(r, SC.AirBasin.t)
tm_shape(r.m) +
tm_raster(n=10,palette = "YlOrBr",
title="Predicted Ozone \n(in ppm)") +
tm_layout(main.title = "IDW Interpolation") +
tm_shape(ozone.mean.spdf) + tm_dots(size=0.2) +
tm_legend(legend.outside=TRUE)
#################################################
# Leave-one-out validation routine
IDW.out <- vector(length = length(ozone.mean.spdf))
for (i in 1:length(ozone.mean.spdf)) {
IDW.out[i] <- gstat::idw(value ~ 1, ozone.mean.spdf[-i,], ozone.mean.spdf[i,], idp=2.5)$var1.pred
}
#we've run the for loop and created surfaces where we remove one point, interpolate it, and then compare
# what the interpolated value is under that point in comparison to the observation
# Plot the differences (similar to regression) using actual value of ozone to compare to interpolated IDW
#with a line of fit to see how accurate we are
OP <- par(pty="s", mar=c(4,3,0,0))
plot(IDW.out ~ ozone.mean.spdf$value, asp=1, xlab="Observed", ylab="Predicted", pch=16,
col=rgb(0,0,0,0.5))
abline(lm(IDW.out ~ ozone.mean.spdf$value), col="red", lw=2,lty=2)
abline(0,1)
par(OP)
#determine root mean square error
sqrt( sum((IDW.out - ozone.mean.spdf$value)^2) / length(ozone.mean.spdf))
#our plot shows that we're terrible! adjust the idp value in for loop, see how trendline and error value change
#################################################
# Implementation of a jackknife technique to estimate a confidence interval at each unsampled point.
# Create the interpolated surface
img <- gstat::idw(value~1, ozone.mean.spdf, newdata=grd, idp=2.5)
n <- length(ozone.mean.spdf)
Zi <- matrix(nrow = length(img$var1.pred), ncol = n)
# Remove a point then interpolate (do this n times for each point)
st <- stack()
for (i in 1:n){
Z1 <- gstat::idw(value~1, ozone.mean.spdf[-i,], newdata=grd, idp=2.5)
st <- addLayer(st,raster(Z1,layer=1))
# Calculated pseudo-value Z at j
Zi[,i] <- n * img$var1.pred - (n-1) * Z1$var1.pred
}
# Jackknife estimator of parameter Z at location j
Zj <- as.matrix(apply(Zi, 1, sum, na.rm=T) / n )
# Compute (Zi* - Zj)^2
c1 <- apply(Zi,2,'-',Zj) # Compute the difference
c1 <- apply(c1^2, 1, sum, na.rm=T ) # Sum the square of the difference
# Compute the confidence interval
CI <- sqrt( 1/(n*(n-1)) * c1)
# Create (CI / interpolated value) raster
img.sig <- img
img.sig$v <- CI /img$var1.pred
# Clip the confidence raster to Southern California
r <- raster(img.sig, layer="v")
r.m <- mask(r, SC.AirBasin.t)
# Plot the map
tm_shape(r.m) + tm_raster(n=7,title="95% confidence \ninterval \n(in ppm)") +
tm_shape(ozone.mean.spdf) + tm_dots(size=0.2) +
tm_legend(legend.outside=TRUE)
###################################################
### Spatial Interpolation with Ordinary Krieging ##
###################################################
# just fitting on the points, not counting for trends
#assigning value = 1 for a polynomial
f.0 <- as.formula(value ~ 1)
#pulls out the variogram, getting the mean bin values
#we can play around with our sill, range, and nugget values AND the model type
var.smpl <- variogram(f.0, ozone.mean.spdf, cloud = FALSE) #, cutoff=1000000, width=89900)
dat.fit <- fit.variogram(var.smpl, fit.ranges = FALSE, fit.sills = FALSE,
vgm(psill=1.45e-05, model="Sph", range=40000, nugget=0))
plot(var.smpl, dat.fit, main= "Spherical Semivariogram Model: Manual")
################
# MAGIC
#here we take out the range, nugg, and sill specification ability
# we only put the model type and it automatically does the magic best fit
var.smpl <- variogram(f.0, ozone.mean.spdf, cloud = FALSE) #, cutoff=1000000, width=89900)
dat.fitEx <- fit.variogram(var.smpl, fit.ranges = FALSE, fit.sills = FALSE,
vgm(model="Exp"))
plot(var.smpl, dat.fitEx, main= "Exponential Semivariogram Model: Automatic")
################
# Define the model
f.0 <- as.formula(value ~ 1)
# Perform the krige interpolation (note the use of the variogram model
# created in the earlier step)
##ordinary kriging: not defining a polynomial trend surface,
# it is just krieging on raw data, not looking at any trends
dat.krg <- krige( f.0, ozone.mean.spdf, grd, dat.fit)
# Convert kriged surface to a raster object for clipping
r <- raster(dat.krg)
r.m <- mask(r, SC.AirBasin.t)
# Plot the map
tm_shape(r.m) +
tm_raster(n=10, palette="YlOrBr",
title="Predicted Ozone \n(in ppm)") +
tm_shape(ozone.mean.spdf) + tm_dots(size=0.2) +
tm_layout(main.title = "Ordinary Krieging with Spherical Model") +
tm_legend(legend.outside=TRUE)
#can also map the variance
r <- raster(dat.krg, layer="var1.var")
r.m <- mask(r, SC.AirBasin.t)
tm_shape(r.m) +
tm_raster(n=7, palette ="YlOrBr",
title="Variance map \n(in squared ppm)") +tm_shape(ozone.mean.spdf) + tm_dots(size=0.2) +
tm_layout(main.title = "Variance using Spherical") +
tm_legend(legend.outside=TRUE)
#and map the confidence interval
r <- sqrt(raster(dat.krg, layer="var1.var")) * 1.96
r.m <- mask(r, SC.AirBasin.t)
tm_shape(r.m) +
tm_raster(n=7, palette ="YlOrBr",
title="95% CI map \n(in ppm)") +tm_shape(ozone.mean.spdf) + tm_dots(size=0.2) +
tm_layout(main.title = "Confidence Interval using Spherical") +
tm_legend(legend.outside=TRUE)
########################################################
### Spatial Interpolation with Trend Surface Analysis ##
########################################################
# Polynomial Trends
##############
#Define the 1st order polynomial equation
###############
#linear trend with just x + y
f.1 <- as.formula(value ~ X + Y)
# Add X and Y to P - predict attribute based on x and y coordinates
ozone.mean.spdf$X <- coordinates(ozone.mean.spdf)[,1]
ozone.mean.spdf$Y <- coordinates(ozone.mean.spdf)[,2]
# Run the regression model (lm is a linear regression model)
# giving it value of function = x + y while looking at ozone data
lm.1 <- lm( f.1, data=ozone.mean.spdf)
# Use the regression model output to interpolate the surface
dat.1st <- SpatialGridDataFrame(grd, data.frame(var1.pred = predict(lm.1, newdata=grd)))
# Clip the interpolated raster to Southern California
r <- raster(dat.1st)
r.m <- mask(r, SC.AirBasin.t)
# Plot the map
tm_shape(r.m) +
tm_raster(n=10, palette="YlOrBr",
title="Predicted Ozone \n(in ppm)") +
tm_shape(ozone.mean.spdf) + tm_dots(size=0.2) +
tm_layout(main.title = "1st Order Polynomial Trend Surface") +
tm_legend(legend.outside=TRUE)
#change colour pallette since this is a diverging one (do just reds or blues)
#interesting results that show ozone is increasing as we go Southeast
#ideas: going into the valleys - where wildfires occur, air is trapped in these areas
#interesting though because away from major cities.
#ocean coastal air sweeps away pollutants and dilutes the concentrations
###############
# Define the 2nd order polynomial equation
###############
#I functions means "keep this intact and don't try to interpret it" (ie there is no X variable)
f.2 <- as.formula(value ~ X + Y + I(X*X)+I(Y*Y) + I(X*Y))
# Add X and Y to P
ozone.mean.spdf$X <- coordinates(ozone.mean.spdf)[,1]
ozone.mean.spdf$Y <- coordinates(ozone.mean.spdf)[,2]
# Run the regression model again using the 2nd polynomial equation
lm.2 <- lm( f.2, data=ozone.mean.spdf)
# Use the regression model output to interpolate the surface
dat.2nd <- SpatialGridDataFrame(grd, data.frame(var1.pred = predict(lm.2, newdata=grd)))
# Clip the interpolated raster to South Cali
r <- raster(dat.2nd)
r.m <- mask(r, SC.AirBasin.t)
# Plot the map
tm_shape(r.m) +
tm_raster(n=10, palette="YlOrBr",
title="Predicted Ozone \n(in ppm)") +
tm_shape(ozone.mean.spdf) + tm_dots(size=0.2) +
tm_layout(main.title = "2nd Order Polynomial Trend Surface") +
tm_legend(legend.outside=TRUE)
#can keep going and do third, fourth, fifth order polynomials
##################################################
## Spatial Interpolation with Universal Kriging ##
##################################################
#counting for trends in the data
#we can choose one of these trend functions to use (1st order or 2nd order) - justify your choice
f.1 <- as.formula(value ~ X + Y)
f.2 <- as.formula(value ~ X + Y + I(X*X)+I(Y*Y) + I(X*Y))
var.smpl <- variogram(f.1, ozone.mean.spdf, cloud = FALSE) #, cutoff=1000000, width=89900)
dat.fit <- fit.variogram(var.smpl, fit.ranges = FALSE, fit.sills = FALSE,
vgm(psill=1.45e-05, model="Sph", range=40000, nugget=0))
plot(var.smpl, dat.fit)
# Define the trend model - copy and paste the value from the equations (f1 or f2) above
f.2 <- as.formula(value ~ X + Y + I(X*X)+I(Y*Y) + I(X*Y))
# Perform the krige interpolation (note the use of the variogram model
# created in the earlier step)
dat.krg <- krige( f.1, ozone.mean.spdf, grd, dat.fit)
# Convert kriged surface to a raster object for clipping
r <- raster(dat.krg)
r.m <- mask(r, SC.AirBasin.t)
# Plot the map
tm_shape(r.m) +
tm_raster(n=10, palette="YlOrBr",
title="Predicted Ozone \n(in ppm)") +
tm_shape(ozone.mean.spdf) + tm_dots(size=0.2) +
tm_layout(main.title = "Interpolation of Ozone using Universal Krieging") +
tm_legend(legend.outside=TRUE)
#plot the variance
r <- raster(dat.krg, layer="var1.var")
r.m <- mask(r, SC.AirBasin.t)
tm_shape(r.m) +
tm_raster(n=7, palette ="YlOrBr",
title="Variance \n(in squared ppm)") +tm_shape(ozone.mean.spdf) + tm_dots(size=0.2) +
tm_legend(legend.outside=TRUE)
#plot the confidence
r <- sqrt(raster(dat.krg, layer="var1.var")) * 1.96
r.m <- mask(r, SC.AirBasin.t)
tm_shape(r.m) +
tm_raster(n=7, palette ="YlOrBr",
title="95% Confidence \nInterval \n(in ppm)") +tm_shape(ozone.mean.spdf) + tm_dots(size=0.2) +
tm_legend(legend.outside=TRUE)
|
/Lab4_RCode_AKing.R
|
no_license
|
alxandraking/geog418
|
R
| false
| false
| 16,495
|
r
|
## Alexandra King - V00827380
## Geog418 Assignment 4
################################
#### Prepare Pollution Data ####
################################
install.packages("rgdal")
install.packages("gstat")
install.packages("sp")
install.packages("spatstat")
install.packages("maptools")
install.packages("raster")
install.packages("tmap")
library(rgdal)
library(gstat)
library(sp)
library(spatstat) # Used for the dirichlet tessellation function
library(maptools) # Used for conversion from SPDF to ppp
library(raster) # Used to clip out thiessen polygons
library(tmap)
dir <- "Z:\\Geog418\\Assignment4\\Working"
setwd(dir)
#DATASET 1
#Read the pollution csv dataset.
ozone = read.csv("OZONE_PICKDATA_2016-4-30.csv", header = T, sep = ",")
#DATASET 2
#Read the monitoring station spatial dataset as an OGR data object.
monitor = readOGR(dsn = ".", layer = "airmonitoringstations")
#Extract the monitoring stations for the South Coast (SC) - taking a subset into new variable
SC.monitor = monitor[monitor$AIRBASIN %in% c("South Coast"),]
#Reproject the data to a suitable projection. Here we use a UTM projection because of the scale of the analysis.
#California is UTM Zone 11 in NAD83. (espg 26911)
SC.monitor.t = spTransform(SC.monitor, CRS("+init=epsg:26911"))
#DATASET 3
#Read the California Air Basin spatial dataset.
Ca.AirBasin = readOGR(dsn = ".", layer = "CaAirBasin")
#Extract the South Coast air basin from the spatial dataset.
SC.AirBasin = Ca.AirBasin[Ca.AirBasin$NAME %in% c("South Coast"),]
#Reproject the South Coast air basin spatial dataset to match the projeciton of the monitoring station dataset.
SC.AirBasin.t = spTransform(SC.AirBasin, CRS("+init=epsg:26911"))
################################
#### Process Pollution Data ####
################################
#You need to represent each location with a single value in order to perform statistical analyses.
#Examine the first several rows of the ozone dataset.
head(ozone)
#Looking at the date and hour columns, you can see that we need to process the data
#to get summary statistics.
#Calculate the mean and max ozone level for each site for all readings.
#only highlight and enter "aggregate(value ~ site, ozone, mean)" to get value on console
#these become just dataframes. we will need to spatialize these later on in the code
mean.ozone <- aggregate(value ~ site, ozone, mean)
max.ozone <- aggregate(value ~ site, ozone, max)
#Join the mean and max ozone values to their respective monitoring stations. In doing so, you will need to rename the
#first column of the monitoring data to site in order to have a unique name to match the two datasets.
names(SC.monitor.t)[1] ="site"
#Merge the the monitoring station (x) shapefile with the ozone data (y) using the site column.
#the sp:: is saying to pull the merge tool from the sp package, instead of its default package "raster"
#'all.x = FALSE' is saying that we'll only keep the x's that match the y's, with no leftovers. but we'll keep all y's.
mrg.tab.mean <- sp::merge(SC.monitor.t, mean.ozone, by = "site", all.x = FALSE)
mrg.tab.max <- sp::merge(SC.monitor.t, max.ozone, by = "site", all.x = FALSE)
#Create a max and a mean spatialPointDataFrame, removing N/A values
ozone.mean.spdf <- na.omit(mrg.tab.mean)
ozone.max.spdf <- na.omit(mrg.tab.max)
view(ozone.mean.spdf)
# Load and observe ozone data.... change
tm_shape(SC.AirBasin.t) +
tm_polygons() +
tm_shape(ozone.mean.spdf) +
tm_layout(main.title = "Mean Ozone Levels in Southern Coast Air Basin") +
tm_dots(col="value", palette = "YlOrBr",
title="Sampled Ozone \n(in ppm)", size=0.7) +
tm_legend(legend.outside=TRUE)
#study area map
studymap_tm <- tm_shape(Ca.AirBasin) +
tm_fill("lightgrey") +
tm_borders("black") +
tm_shape(SC.AirBasin) +
tm_fill("coral") +
tm_borders("black") +
tm_add_legend(type= "symbol", labels = "Southern Coast Air Basin", col="coral", shape = 19) +
tm_layout(title = "Map of California\nAir Basins", title.position = c(0.55, 0.87),
legend.position = c(0.55, 0.7)) +
tm_compass(position = c(0.02, 0.085)) +
tm_scale_bar(position= c("left", "bottom"))
studymap_tm
#SoCAB with monitoring sites
studymap2 <- tm_shape(SC.AirBasin) +
tm_fill("coral")+
tm_borders("black")+
tm_add_legend
tmaptools::palette_explorer()
####################################################
### Spatial Interpolation with Thiessen Polygons ###
####################################################
# Create a tessellated surface
th <- as(dirichlet(as.ppp(ozone.mean.spdf)), "SpatialPolygons")
# The dirichlet function does not carry over projection information
# requiring that this information be added manually
proj4string(th) <- proj4string(ozone.mean.spdf)
# The tessellated surface does not store attribute information
# from the point data layer. We'll use the over() function (from the sp
# package) to join the point attributes to the tesselated surface via
# a spatial join. The over() function creates a dataframe that will need to
# be added to the `th` object thus creating a SpatialPolygonsDataFrame object
th.z <- over(th, ozone.mean.spdf, fn=mean) #some get N/A values
th.spdf <- SpatialPolygonsDataFrame(th, th.z)
# Finally, we'll clip the tessellated surface to the South Coast Air Basin boundaries
th.clp <- raster::intersect(SC.AirBasin.t,th.spdf)
# Map the data
tm_shape(th.clp) +
tm_polygons(col="value", palette="YlOrBr",
title="Predicted Ozone \n(in ppm)") +
tm_layout(main.title = "Thiessen Polygons Interpolation") +
tm_legend(legend.outside=TRUE)
########################################
#### Spatial Interpolation with IDW ####
########################################
# Create an empty grid where n is the total number of cells
grd <- as.data.frame(spsample(ozone.mean.spdf, "regular", n=50000))
names(grd) <- c("X", "Y")
coordinates(grd) <- c("X", "Y")
gridded(grd) <- TRUE # Create SpatialPixel object
fullgrid(grd) <- TRUE # Create SpatialGrid object
proj4string(grd) <- proj4string(SC.monitor.t)
P.idw <- gstat::idw(value ~ 1, ozone.mean.spdf, newdata=grd, idp=2.5)
r <- raster(P.idw)
r.m <- mask(r, SC.AirBasin.t)
tm_shape(r.m) +
tm_raster(n=10,palette = "YlOrBr",
title="Predicted Ozone \n(in ppm)") +
tm_layout(main.title = "IDW Interpolation") +
tm_shape(ozone.mean.spdf) + tm_dots(size=0.2) +
tm_legend(legend.outside=TRUE)
#################################################
# Leave-one-out validation routine
IDW.out <- vector(length = length(ozone.mean.spdf))
for (i in 1:length(ozone.mean.spdf)) {
IDW.out[i] <- gstat::idw(value ~ 1, ozone.mean.spdf[-i,], ozone.mean.spdf[i,], idp=2.5)$var1.pred
}
#we've run the for loop and created surfaces where we remove one point, interpolate it, and then compare
# what the interpolated value is under that point in comparison to the observation
# Plot the differences (similar to regression) using actual value of ozone to compare to interpolated IDW
#with a line of fit to see how accurate we are
OP <- par(pty="s", mar=c(4,3,0,0))
plot(IDW.out ~ ozone.mean.spdf$value, asp=1, xlab="Observed", ylab="Predicted", pch=16,
col=rgb(0,0,0,0.5))
abline(lm(IDW.out ~ ozone.mean.spdf$value), col="red", lw=2,lty=2)
abline(0,1)
par(OP)
#determine root mean square error
sqrt( sum((IDW.out - ozone.mean.spdf$value)^2) / length(ozone.mean.spdf))
#our plot shows that we're terrible! adjust the idp value in for loop, see how trendline and error value change
#################################################
# Implementation of a jackknife technique to estimate a confidence interval at each unsampled point.
# Create the interpolated surface
img <- gstat::idw(value~1, ozone.mean.spdf, newdata=grd, idp=2.5)
n <- length(ozone.mean.spdf)
Zi <- matrix(nrow = length(img$var1.pred), ncol = n)
# Remove a point then interpolate (do this n times for each point)
st <- stack()
for (i in 1:n){
Z1 <- gstat::idw(value~1, ozone.mean.spdf[-i,], newdata=grd, idp=2.5)
st <- addLayer(st,raster(Z1,layer=1))
# Calculated pseudo-value Z at j
Zi[,i] <- n * img$var1.pred - (n-1) * Z1$var1.pred
}
# Jackknife estimator of parameter Z at location j
Zj <- as.matrix(apply(Zi, 1, sum, na.rm=T) / n )
# Compute (Zi* - Zj)^2
c1 <- apply(Zi,2,'-',Zj) # Compute the difference
c1 <- apply(c1^2, 1, sum, na.rm=T ) # Sum the square of the difference
# Compute the confidence interval
CI <- sqrt( 1/(n*(n-1)) * c1)
# Create (CI / interpolated value) raster
img.sig <- img
img.sig$v <- CI /img$var1.pred
# Clip the confidence raster to Southern California
r <- raster(img.sig, layer="v")
r.m <- mask(r, SC.AirBasin.t)
# Plot the map
tm_shape(r.m) + tm_raster(n=7,title="95% confidence \ninterval \n(in ppm)") +
tm_shape(ozone.mean.spdf) + tm_dots(size=0.2) +
tm_legend(legend.outside=TRUE)
###################################################
### Spatial Interpolation with Ordinary Krieging ##
###################################################
# just fitting on the points, not counting for trends
#assigning value = 1 for a polynomial
f.0 <- as.formula(value ~ 1)
#pulls out the variogram, getting the mean bin values
#we can play around with our sill, range, and nugget values AND the model type
var.smpl <- variogram(f.0, ozone.mean.spdf, cloud = FALSE) #, cutoff=1000000, width=89900)
dat.fit <- fit.variogram(var.smpl, fit.ranges = FALSE, fit.sills = FALSE,
vgm(psill=1.45e-05, model="Sph", range=40000, nugget=0))
plot(var.smpl, dat.fit, main= "Spherical Semivariogram Model: Manual")
################
# MAGIC
#here we take out the range, nugg, and sill specification ability
# we only put the model type and it automatically does the magic best fit
var.smpl <- variogram(f.0, ozone.mean.spdf, cloud = FALSE) #, cutoff=1000000, width=89900)
dat.fitEx <- fit.variogram(var.smpl, fit.ranges = FALSE, fit.sills = FALSE,
vgm(model="Exp"))
plot(var.smpl, dat.fitEx, main= "Exponential Semivariogram Model: Automatic")
################
# Define the model
f.0 <- as.formula(value ~ 1)
# Perform the krige interpolation (note the use of the variogram model
# created in the earlier step)
##ordinary kriging: not defining a polynomial trend surface,
# it is just krieging on raw data, not looking at any trends
dat.krg <- krige( f.0, ozone.mean.spdf, grd, dat.fit)
# Convert kriged surface to a raster object for clipping
r <- raster(dat.krg)
r.m <- mask(r, SC.AirBasin.t)
# Plot the map
tm_shape(r.m) +
tm_raster(n=10, palette="YlOrBr",
title="Predicted Ozone \n(in ppm)") +
tm_shape(ozone.mean.spdf) + tm_dots(size=0.2) +
tm_layout(main.title = "Ordinary Krieging with Spherical Model") +
tm_legend(legend.outside=TRUE)
#can also map the variance
r <- raster(dat.krg, layer="var1.var")
r.m <- mask(r, SC.AirBasin.t)
tm_shape(r.m) +
tm_raster(n=7, palette ="YlOrBr",
title="Variance map \n(in squared ppm)") +tm_shape(ozone.mean.spdf) + tm_dots(size=0.2) +
tm_layout(main.title = "Variance using Spherical") +
tm_legend(legend.outside=TRUE)
#and map the confidence interval
r <- sqrt(raster(dat.krg, layer="var1.var")) * 1.96
r.m <- mask(r, SC.AirBasin.t)
tm_shape(r.m) +
tm_raster(n=7, palette ="YlOrBr",
title="95% CI map \n(in ppm)") +tm_shape(ozone.mean.spdf) + tm_dots(size=0.2) +
tm_layout(main.title = "Confidence Interval using Spherical") +
tm_legend(legend.outside=TRUE)
########################################################
### Spatial Interpolation with Trend Surface Analysis ##
########################################################
# Polynomial Trends
##############
#Define the 1st order polynomial equation
###############
#linear trend with just x + y
f.1 <- as.formula(value ~ X + Y)
# Add X and Y to P - predict attribute based on x and y coordinates
ozone.mean.spdf$X <- coordinates(ozone.mean.spdf)[,1]
ozone.mean.spdf$Y <- coordinates(ozone.mean.spdf)[,2]
# Run the regression model (lm is a linear regression model)
# giving it value of function = x + y while looking at ozone data
lm.1 <- lm( f.1, data=ozone.mean.spdf)
# Use the regression model output to interpolate the surface
dat.1st <- SpatialGridDataFrame(grd, data.frame(var1.pred = predict(lm.1, newdata=grd)))
# Clip the interpolated raster to Southern California
r <- raster(dat.1st)
r.m <- mask(r, SC.AirBasin.t)
# Plot the map
tm_shape(r.m) +
tm_raster(n=10, palette="YlOrBr",
title="Predicted Ozone \n(in ppm)") +
tm_shape(ozone.mean.spdf) + tm_dots(size=0.2) +
tm_layout(main.title = "1st Order Polynomial Trend Surface") +
tm_legend(legend.outside=TRUE)
#change colour pallette since this is a diverging one (do just reds or blues)
#interesting results that show ozone is increasing as we go Southeast
#ideas: going into the valleys - where wildfires occur, air is trapped in these areas
#interesting though because away from major cities.
#ocean coastal air sweeps away pollutants and dilutes the concentrations
###############
# Define the 2nd order polynomial equation
###############
#I functions means "keep this intact and don't try to interpret it" (ie there is no X variable)
f.2 <- as.formula(value ~ X + Y + I(X*X)+I(Y*Y) + I(X*Y))
# Add X and Y to P
ozone.mean.spdf$X <- coordinates(ozone.mean.spdf)[,1]
ozone.mean.spdf$Y <- coordinates(ozone.mean.spdf)[,2]
# Run the regression model again using the 2nd polynomial equation
lm.2 <- lm( f.2, data=ozone.mean.spdf)
# Use the regression model output to interpolate the surface
dat.2nd <- SpatialGridDataFrame(grd, data.frame(var1.pred = predict(lm.2, newdata=grd)))
# Clip the interpolated raster to South Cali
r <- raster(dat.2nd)
r.m <- mask(r, SC.AirBasin.t)
# Plot the map
tm_shape(r.m) +
tm_raster(n=10, palette="YlOrBr",
title="Predicted Ozone \n(in ppm)") +
tm_shape(ozone.mean.spdf) + tm_dots(size=0.2) +
tm_layout(main.title = "2nd Order Polynomial Trend Surface") +
tm_legend(legend.outside=TRUE)
#can keep going and do third, fourth, fifth order polynomials
##################################################
## Spatial Interpolation with Universal Kriging ##
##################################################
#counting for trends in the data
#we can choose one of these trend functions to use (1st order or 2nd order) - justify your choice
f.1 <- as.formula(value ~ X + Y)
f.2 <- as.formula(value ~ X + Y + I(X*X)+I(Y*Y) + I(X*Y))
var.smpl <- variogram(f.1, ozone.mean.spdf, cloud = FALSE) #, cutoff=1000000, width=89900)
dat.fit <- fit.variogram(var.smpl, fit.ranges = FALSE, fit.sills = FALSE,
vgm(psill=1.45e-05, model="Sph", range=40000, nugget=0))
plot(var.smpl, dat.fit)
# Define the trend model - copy and paste the value from the equations (f1 or f2) above
f.2 <- as.formula(value ~ X + Y + I(X*X)+I(Y*Y) + I(X*Y))
# Perform the krige interpolation (note the use of the variogram model
# created in the earlier step)
dat.krg <- krige( f.1, ozone.mean.spdf, grd, dat.fit)
# Convert kriged surface to a raster object for clipping
r <- raster(dat.krg)
r.m <- mask(r, SC.AirBasin.t)
# Plot the map
tm_shape(r.m) +
tm_raster(n=10, palette="YlOrBr",
title="Predicted Ozone \n(in ppm)") +
tm_shape(ozone.mean.spdf) + tm_dots(size=0.2) +
tm_layout(main.title = "Interpolation of Ozone using Universal Krieging") +
tm_legend(legend.outside=TRUE)
#plot the variance
r <- raster(dat.krg, layer="var1.var")
r.m <- mask(r, SC.AirBasin.t)
tm_shape(r.m) +
tm_raster(n=7, palette ="YlOrBr",
title="Variance \n(in squared ppm)") +tm_shape(ozone.mean.spdf) + tm_dots(size=0.2) +
tm_legend(legend.outside=TRUE)
#plot the confidence
r <- sqrt(raster(dat.krg, layer="var1.var")) * 1.96
r.m <- mask(r, SC.AirBasin.t)
tm_shape(r.m) +
tm_raster(n=7, palette ="YlOrBr",
title="95% Confidence \nInterval \n(in ppm)") +tm_shape(ozone.mean.spdf) + tm_dots(size=0.2) +
tm_legend(legend.outside=TRUE)
|
#' Get Operator
#'
#' Gets an operator.
#'
#' @param token
#' Your API token.
#'
#' @rdname operators
#' @export
get_operator <- function(token) {
path <- sprintf("/operators/%s", get_segment(token))
response <- GET(get_endpoint(path), add_headers(.headers = to_headers(token)))
status_code <- status_code(response)
content <- content(response, "text", encoding = "UTF-8")
switch(
as.character(status_code),
"200" = {
from_content(content, "soracom_operator")
},
"400" = {
stop("Invalid operator ID.")
},
{
stop(content)
}
)
}
|
/R/operators.R
|
no_license
|
kos59125/soracomr
|
R
| false
| false
| 619
|
r
|
#' Get Operator
#'
#' Gets an operator.
#'
#' @param token
#' Your API token.
#'
#' @rdname operators
#' @export
get_operator <- function(token) {
path <- sprintf("/operators/%s", get_segment(token))
response <- GET(get_endpoint(path), add_headers(.headers = to_headers(token)))
status_code <- status_code(response)
content <- content(response, "text", encoding = "UTF-8")
switch(
as.character(status_code),
"200" = {
from_content(content, "soracom_operator")
},
"400" = {
stop("Invalid operator ID.")
},
{
stop(content)
}
)
}
|
library(minpack.lm)
require(graphics)
# Data <- read.table(file="../Data/900days.txt", header=TRUE, sep="")
Data <- read.csv(file="../Data/ClimateData.csv", header=TRUE)
Data <- Data[!is.na(Data$NZS),]
x_0_strt <- mean(Data$NZS) # Vertical shift
A_strt <- (max(Data$NZS) - min(Data$NZS)) / 2 # Amplitude
L_strt <- 12 # length of a cycle, 12 months
# find predictions for original time series
myFit <- nls(NZS~x_0 + A*sin(2*pi * Data$TimePt / L), data = Data, start = list(x_0 = x_0_strt, A = A_strt, L = L_strt))
# x = x_t + A*sin(2*pi*t/L)
sumFit <- summary(myFit)
x_0 <- sumFit$coefficients[1]
A <- sumFit$coefficients[2]
L <- sumFit$coefficients[3]
plot(Data$NZS ~ Data$TimePt, data = Data)
lines(Data$TimePt, x_0 + A*sin(2*pi * Data$TimePt / L), col="blue")
# lines(Data$TimePt,Data$NZS, col = "red")
|
/content/code/TimeSer.R
|
permissive
|
joseph-palmer/TheMulQuaBio
|
R
| false
| false
| 811
|
r
|
library(minpack.lm)
require(graphics)
# Data <- read.table(file="../Data/900days.txt", header=TRUE, sep="")
Data <- read.csv(file="../Data/ClimateData.csv", header=TRUE)
Data <- Data[!is.na(Data$NZS),]
x_0_strt <- mean(Data$NZS) # Vertical shift
A_strt <- (max(Data$NZS) - min(Data$NZS)) / 2 # Amplitude
L_strt <- 12 # length of a cycle, 12 months
# find predictions for original time series
myFit <- nls(NZS~x_0 + A*sin(2*pi * Data$TimePt / L), data = Data, start = list(x_0 = x_0_strt, A = A_strt, L = L_strt))
# x = x_t + A*sin(2*pi*t/L)
sumFit <- summary(myFit)
x_0 <- sumFit$coefficients[1]
A <- sumFit$coefficients[2]
L <- sumFit$coefficients[3]
plot(Data$NZS ~ Data$TimePt, data = Data)
lines(Data$TimePt, x_0 + A*sin(2*pi * Data$TimePt / L), col="blue")
# lines(Data$TimePt,Data$NZS, col = "red")
|
# testing generate_figures
# R studio top right window
# Go to more>document
# Install and Restart
library(magrittr)
library(jgcricolors)
library(dplyr)
library(rmap)
images = r"{C:\Users\thom927\Documents\metarepos\khan-etal_2022_tethysSSPRCP\webpage\images\}"
folder = "C:/Users/thom927/Documents/Data/tethysDemeterOutputs"
GCAM_withdrawals_csv = "C:/Users/thom927/Documents/Data/GrahamGCAM/water_withdrawals_by_mapping_source.csv"
GCAM_consumption_csv = "C:/Users/thom927/Documents/Data/GrahamGCAM/water_consumption_by_mapping_source.csv"
out <- generate_figures(annual_rds = "annual_data.rds",
monthly_rds = "monthly_data.rds",
folder = "C:/Users/thom927/Documents/Data/tethysDemeterOutputs",
temporal_scale = "all")
|
/scripts/devTests.R
|
no_license
|
JGCRI/khan-etal_2022_tethysSSPRCP
|
R
| false
| false
| 801
|
r
|
# testing generate_figures
# R studio top right window
# Go to more>document
# Install and Restart
library(magrittr)
library(jgcricolors)
library(dplyr)
library(rmap)
images = r"{C:\Users\thom927\Documents\metarepos\khan-etal_2022_tethysSSPRCP\webpage\images\}"
folder = "C:/Users/thom927/Documents/Data/tethysDemeterOutputs"
GCAM_withdrawals_csv = "C:/Users/thom927/Documents/Data/GrahamGCAM/water_withdrawals_by_mapping_source.csv"
GCAM_consumption_csv = "C:/Users/thom927/Documents/Data/GrahamGCAM/water_consumption_by_mapping_source.csv"
out <- generate_figures(annual_rds = "annual_data.rds",
monthly_rds = "monthly_data.rds",
folder = "C:/Users/thom927/Documents/Data/tethysDemeterOutputs",
temporal_scale = "all")
|
## Copyright 2012 Sebastian Gibb
## <mail@sebastiangibb.de>
##
## This is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## It is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## See <http://www.gnu.org/licenses/>
## print percentage text on a barplot
printPercentage <- function(x, n, p) {
percentage <- format(x/n*100, digits=3);
text(p, x, labels=percentage, pos=3)
}
|
/printPercentage-functions.R
|
no_license
|
sgibb/analyzezamsdatabases
|
R
| false
| false
| 755
|
r
|
## Copyright 2012 Sebastian Gibb
## <mail@sebastiangibb.de>
##
## This is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## It is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## See <http://www.gnu.org/licenses/>
## print percentage text on a barplot
printPercentage <- function(x, n, p) {
percentage <- format(x/n*100, digits=3);
text(p, x, labels=percentage, pos=3)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/alignment-utils.R
\name{aberrantSep}
\alias{aberrantSep}
\title{Assesses whether two reads in a pair are aberrantly separated with respect to the reference genome}
\usage{
aberrantSep(gpairs, distance = 10000)
}
\arguments{
\item{gpairs}{a \code{GAlignmentPairs} object}
\item{distance}{the minimum distance in base pairs between two reads in
order to call them aberrantly separated}
}
\value{
logical vector of the same length as \code{gpairs}
}
\description{
Determines whether separation between first and last read in a
GAlignmentPairs is greater than some distance.
}
\details{
Evaluates to TRUE if
}
|
/man/aberrantSep.Rd
|
no_license
|
cancer-genomics/trellis
|
R
| false
| true
| 686
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/alignment-utils.R
\name{aberrantSep}
\alias{aberrantSep}
\title{Assesses whether two reads in a pair are aberrantly separated with respect to the reference genome}
\usage{
aberrantSep(gpairs, distance = 10000)
}
\arguments{
\item{gpairs}{a \code{GAlignmentPairs} object}
\item{distance}{the minimum distance in base pairs between two reads in
order to call them aberrantly separated}
}
\value{
logical vector of the same length as \code{gpairs}
}
\description{
Determines whether separation between first and last read in a
GAlignmentPairs is greater than some distance.
}
\details{
Evaluates to TRUE if
}
|
## 5. Užduotis
set.seed(314)
library(MASS)
## Duomenys iš 1) ir 2). Įvertinkite parametrus naudodami `R` funkciją
## `fitdistr(duomenys, "skirstinys")`
n <- 100L
lambda <- 7 ## laisvai pasirinkta reikšmė
exp_inverse_cdf <- function(theta) { ## Eksponentinis
function(probs) {
-1 / theta[1] * log(1 - probs)
}
}
exp_observ <- exp_inverse_cdf(lambda)(runif(n))
eta <- 2 # laisvai pasirinkti
nu <- 2
weibull_inverse_cdf <- function(theta) {
function(probs) {
theta[1] * (-1 * log(1 - probs)) ^ (1 / theta[2])
}
}
weibull_observ <- weibull_inverse_cdf(c(eta, nu))(runif(n))
fitdistr(weibull_observ, "Weibull")
## > shape scale
## > 2.0686329 2.2249776
## > (0.1632889) (0.1131470)
fitdistr(exp_observ, "exponential")
## > rate
## > 8.8168220
## > (0.8816822)
|
/part-1/task-5.R
|
no_license
|
vabalas/pns-tasks
|
R
| false
| false
| 821
|
r
|
## 5. Užduotis
set.seed(314)
library(MASS)
## Duomenys iš 1) ir 2). Įvertinkite parametrus naudodami `R` funkciją
## `fitdistr(duomenys, "skirstinys")`
n <- 100L
lambda <- 7 ## laisvai pasirinkta reikšmė
exp_inverse_cdf <- function(theta) { ## Eksponentinis
function(probs) {
-1 / theta[1] * log(1 - probs)
}
}
exp_observ <- exp_inverse_cdf(lambda)(runif(n))
eta <- 2 # laisvai pasirinkti
nu <- 2
weibull_inverse_cdf <- function(theta) {
function(probs) {
theta[1] * (-1 * log(1 - probs)) ^ (1 / theta[2])
}
}
weibull_observ <- weibull_inverse_cdf(c(eta, nu))(runif(n))
fitdistr(weibull_observ, "Weibull")
## > shape scale
## > 2.0686329 2.2249776
## > (0.1632889) (0.1131470)
fitdistr(exp_observ, "exponential")
## > rate
## > 8.8168220
## > (0.8816822)
|
# Sample Abe-Ley
#
# This script contains helper functions for sampling from the Abe-Ley model for cylindrical data
#
## simulate from wrapped cauchy distribution ##
# input
# n = sample size
# mu = circular mean
# rho = circular concentration
rwcauchy <- function(n, mu, rho){
if (rho == 0)
result <- runif(n, 0, 2 * pi)
else if (rho == 1)
result <- rep(mu, n)
else {
scale <- -log(rho)
result <- rcauchy(n, mu, scale)%%(2 * pi)
}
return(result)
}
## simulate from Abe-Ley distribution ##
# input
# n = sample size
# nu, alpha, mu, kappa, lambda = parameters Abe-Ley distribution
rweiSSVM <- function(n, nu = 1, alpha = 1, mu = pi, kappa = 1, lambda = 0){
theta <- c()
x <- c()
#simulate theta
for(i in 1:n){
u <- runif(1,0,1)
theta.1 <- rwcauchy(1, mu, tanh(kappa/2))
if(u < (1 + lambda*sin(theta.1 - mu))/2){
theta[i] <- theta.1
}else{
theta[i] <- -theta.1
}
shape <- nu * (1-tanh(kappa)*cos(theta-mu))^(1/alpha)
# We compute the scale parameter for a different parametrization,
# shape = (scale_parWeibull in R)^(-alpha) = 1/(scale_parWeibull in R)^alpha so,
# scale_parWeibull in R = kth root(1/shape) so,
# scale_parWeibull in R = (1/b)^(1/alpha)
scale <- (1/shape)^(1/alpha)
x[i] <- rweibull(1, shape = alpha, scale = scale)
}
out <- cbind(theta %% (2*pi), x)
colnames(out) <- c("theta", "x")
return(as.data.frame(out))
}
|
/Manuscript/R-code/Sample Abe-Ley.R
|
no_license
|
joliencremers/CylindricalComparisonCircumplex
|
R
| false
| false
| 1,499
|
r
|
# Sample Abe-Ley
#
# This script contains helper functions for sampling from the Abe-Ley model for cylindrical data
#
## simulate from wrapped cauchy distribution ##
# input
# n = sample size
# mu = circular mean
# rho = circular concentration
rwcauchy <- function(n, mu, rho){
if (rho == 0)
result <- runif(n, 0, 2 * pi)
else if (rho == 1)
result <- rep(mu, n)
else {
scale <- -log(rho)
result <- rcauchy(n, mu, scale)%%(2 * pi)
}
return(result)
}
## simulate from Abe-Ley distribution ##
# input
# n = sample size
# nu, alpha, mu, kappa, lambda = parameters Abe-Ley distribution
rweiSSVM <- function(n, nu = 1, alpha = 1, mu = pi, kappa = 1, lambda = 0){
theta <- c()
x <- c()
#simulate theta
for(i in 1:n){
u <- runif(1,0,1)
theta.1 <- rwcauchy(1, mu, tanh(kappa/2))
if(u < (1 + lambda*sin(theta.1 - mu))/2){
theta[i] <- theta.1
}else{
theta[i] <- -theta.1
}
shape <- nu * (1-tanh(kappa)*cos(theta-mu))^(1/alpha)
# We compute the scale parameter for a different parametrization,
# shape = (scale_parWeibull in R)^(-alpha) = 1/(scale_parWeibull in R)^alpha so,
# scale_parWeibull in R = kth root(1/shape) so,
# scale_parWeibull in R = (1/b)^(1/alpha)
scale <- (1/shape)^(1/alpha)
x[i] <- rweibull(1, shape = alpha, scale = scale)
}
out <- cbind(theta %% (2*pi), x)
colnames(out) <- c("theta", "x")
return(as.data.frame(out))
}
|
library(PLMIX)
### Name: label_switchPLMIX
### Title: Label switching adjustment for Bayesian mixtures of
### Plackett-Luce models
### Aliases: label_switchPLMIX
### ** Examples
data(d_carconf)
K <- ncol(d_carconf)
n.start <- 2
MAP_1 <- mapPLMIX_multistart(pi_inv=d_carconf, K=K, G=1,
n_start=n.start, n_iter=400*1)
MAP_2 <- mapPLMIX_multistart(pi_inv=d_carconf, K=K, G=2,
n_start=n.start, n_iter=400*2)
MAP_3 <- mapPLMIX_multistart(pi_inv=d_carconf, K=K, G=3,
n_start=n.start, n_iter=400*3)
mcmc_iter <- 30
burnin <- 10
GIBBS_1 <- gibbsPLMIX(pi_inv=d_carconf, K=K, G=1, n_iter=mcmc_iter,
n_burn=burnin, init=list(p=MAP_1$mod$P_map,
z=binary_group_ind(MAP_1$mod$class_map,G=1)))
GIBBS_2 <- gibbsPLMIX(pi_inv=d_carconf, K=K, G=2, n_iter=mcmc_iter,
n_burn=burnin, init=list(p=MAP_2$mod$P_map,
z=binary_group_ind(MAP_2$mod$class_map,G=2)))
GIBBS_3 <- gibbsPLMIX(pi_inv=d_carconf, K=K, G=3, n_iter=mcmc_iter,
n_burn=burnin, init=list(p=MAP_3$mod$P_map,
z=binary_group_ind(MAP_3$mod$class_map,G=3)))
# Adjusting the MCMC samples for label switching
require(doParallel)
run_in_parallel <- !is.na(detectCores())
if(run_in_parallel){
registerDoParallel(2)
getDoParWorkers()
}
LS <- label_switchPLMIX(pi_inv=d_carconf, seq_G=1:3,
MCMCsampleP=list(GIBBS_1$P, GIBBS_2$P, GIBBS_3$P),
MCMCsampleW=list(GIBBS_1$W, GIBBS_2$W, GIBBS_3$W),
MAPestP=list(MAP_1$mod$P_map, MAP_2$mod$P_map, MAP_3$mod$P_map),
MAPestW=list(MAP_1$mod$W_map, MAP_2$mod$W_map, MAP_3$mod$W_map),
parallel = run_in_parallel)
str(LS)
|
/data/genthat_extracted_code/PLMIX/examples/label_switchPLMIX.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 1,948
|
r
|
library(PLMIX)
### Name: label_switchPLMIX
### Title: Label switching adjustment for Bayesian mixtures of
### Plackett-Luce models
### Aliases: label_switchPLMIX
### ** Examples
data(d_carconf)
K <- ncol(d_carconf)
n.start <- 2
MAP_1 <- mapPLMIX_multistart(pi_inv=d_carconf, K=K, G=1,
n_start=n.start, n_iter=400*1)
MAP_2 <- mapPLMIX_multistart(pi_inv=d_carconf, K=K, G=2,
n_start=n.start, n_iter=400*2)
MAP_3 <- mapPLMIX_multistart(pi_inv=d_carconf, K=K, G=3,
n_start=n.start, n_iter=400*3)
mcmc_iter <- 30
burnin <- 10
GIBBS_1 <- gibbsPLMIX(pi_inv=d_carconf, K=K, G=1, n_iter=mcmc_iter,
n_burn=burnin, init=list(p=MAP_1$mod$P_map,
z=binary_group_ind(MAP_1$mod$class_map,G=1)))
GIBBS_2 <- gibbsPLMIX(pi_inv=d_carconf, K=K, G=2, n_iter=mcmc_iter,
n_burn=burnin, init=list(p=MAP_2$mod$P_map,
z=binary_group_ind(MAP_2$mod$class_map,G=2)))
GIBBS_3 <- gibbsPLMIX(pi_inv=d_carconf, K=K, G=3, n_iter=mcmc_iter,
n_burn=burnin, init=list(p=MAP_3$mod$P_map,
z=binary_group_ind(MAP_3$mod$class_map,G=3)))
# Adjusting the MCMC samples for label switching
require(doParallel)
run_in_parallel <- !is.na(detectCores())
if(run_in_parallel){
registerDoParallel(2)
getDoParWorkers()
}
LS <- label_switchPLMIX(pi_inv=d_carconf, seq_G=1:3,
MCMCsampleP=list(GIBBS_1$P, GIBBS_2$P, GIBBS_3$P),
MCMCsampleW=list(GIBBS_1$W, GIBBS_2$W, GIBBS_3$W),
MAPestP=list(MAP_1$mod$P_map, MAP_2$mod$P_map, MAP_3$mod$P_map),
MAPestW=list(MAP_1$mod$W_map, MAP_2$mod$W_map, MAP_3$mod$W_map),
parallel = run_in_parallel)
str(LS)
|
/Practica_applys.R
|
no_license
|
YarlyMadrid/Codigo_Rproject
|
R
| false
| false
| 1,126
|
r
| ||
# Reading data
power <- read.table("household_power_consumption.txt",skip=1,sep=";")
# Naming power
names(power) <- c("Date","Time","Global_active_power","Global_reactive_power","Voltage","Global_intensity","Sub_metering_1","Sub_metering_2","Sub_metering_3")
# Selecting from power
subpower <- subset(power,power$Date=="1/2/2007" | power$Date =="2/2/2007")
# Transforming the Date and Time vars from characters into objects of type Date and POSIXlt respectively
subpower$Date <- as.Date(subpower$Date, format="%d/%m/%Y")
subpower$Time <- strptime(subpower$Time, format="%H:%M:%S")
subpower[1:1440,"Time"] <- format(subpower[1:1440,"Time"],"2007-02-01 %H:%M:%S")
subpower[1441:2880,"Time"] <- format(subpower[1441:2880,"Time"],"2007-02-02 %H:%M:%S")
# calling the basic plot functions
plot(subpower$Time,subpower$Sub_metering_1,type="n",xlab="",ylab="Energy sub metering")
with(subpower,lines(Time,as.numeric(as.character(Sub_metering_1))))
with(subpower,lines(Time,as.numeric(as.character(Sub_metering_2)),col="red"))
with(subpower,lines(Time,as.numeric(as.character(Sub_metering_3)),col="blue"))
legend("topright", lty=1, col=c("black","red","blue"),legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"))
# annotating graph
title(main="Energy sub-metering")
|
/plot3.R
|
no_license
|
JLandion/ExData_Plotting1
|
R
| false
| false
| 1,293
|
r
|
# Reading data
power <- read.table("household_power_consumption.txt",skip=1,sep=";")
# Naming power
names(power) <- c("Date","Time","Global_active_power","Global_reactive_power","Voltage","Global_intensity","Sub_metering_1","Sub_metering_2","Sub_metering_3")
# Selecting from power
subpower <- subset(power,power$Date=="1/2/2007" | power$Date =="2/2/2007")
# Transforming the Date and Time vars from characters into objects of type Date and POSIXlt respectively
subpower$Date <- as.Date(subpower$Date, format="%d/%m/%Y")
subpower$Time <- strptime(subpower$Time, format="%H:%M:%S")
subpower[1:1440,"Time"] <- format(subpower[1:1440,"Time"],"2007-02-01 %H:%M:%S")
subpower[1441:2880,"Time"] <- format(subpower[1441:2880,"Time"],"2007-02-02 %H:%M:%S")
# calling the basic plot functions
plot(subpower$Time,subpower$Sub_metering_1,type="n",xlab="",ylab="Energy sub metering")
with(subpower,lines(Time,as.numeric(as.character(Sub_metering_1))))
with(subpower,lines(Time,as.numeric(as.character(Sub_metering_2)),col="red"))
with(subpower,lines(Time,as.numeric(as.character(Sub_metering_3)),col="blue"))
legend("topright", lty=1, col=c("black","red","blue"),legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"))
# annotating graph
title(main="Energy sub-metering")
|
library(ggplot2)
library(MASS)
filelist = list.files(pattern = "*.txt")
datalist = lapply(filelist, function(x)read.csv(x,skip = 2, stringsAsFactors = F))
raw = do.call("rbind", datalist)
# put all the raw data into a vector
matrix.raw <- as.matrix(raw)[,-1]
vector.raw <- as.vector(t(matrix.raw))
length(vector.raw)
vector.data <- vector.raw[-length(vector.raw)]
length(vector.data)
# find position of the starting time of a rain storm
find.start <- function(vec){
tmp <- rep(0,length(vec))
for (i in 1:length(vec)-1){
if(vec[i]=="----"&&vec[i+1]!="----"){
tmp[i]=i+1
}
}
return(tmp[tmp!=0])
}
starting.pos <- find.start(vector.data)
length(starting.pos)
# find position of the ending time of a rain storm
find.end <- function(vec){
tmp <- rep(0,length(vec))
for (i in 1:(length(vec)-1)){
if(vec[i] != "----" && vec[i+1] == "----"){
tmp[i]=i
}
}
return(tmp[tmp!=0])
}
ending.pos <- find.end(vector.raw)
length(ending.pos)
# get the date by summing up the time interval rain storms
sum_rain_seq <- rep(NA,length(starting.pos))
vector.raw[vector.raw=="T "] <- 0
vector.raw[vector.raw =="M"] <- 0
vector.raw[vector.raw =="M "] <- 0
vector.raw[1:100]
for (i in 1:length(starting.pos)){
sum_rain_seq[i] <- sum(as.numeric(vector.raw[starting.pos[i]:ending.pos[i]]))
}
raindata1 <- sum_rain_seq[!is.na(sum_rain_seq)]
raindata <- raindata1[!raindata1==0]
# Method of Moment
hist(raindata)
mean(raindata)
var(raindata)
alpha <- mean(raindata)^2/var(raindata)
lambda <- mean(raindata)/var(raindata)
# Estimated parameters using Method of Moment
# alpha = 0.3621776
# lambda = 1.276349
# MLE (method 1)
n <- length(raindata)
minus.likelihood <- function(theta) {-(n*theta[1]*log(theta[2])-n*lgamma(theta[1])+(theta[1]-1)*sum(log(raindata))-theta[2]*sum(raindata))}
max.likelihood <- nlminb(start=c(alpha, lambda), obj = minus.likelihood)
max.likelihood$par
# MLE (method 2)
params <- fitdistr(raindata, "gamma")
params
# MLE estimators for gamma distribution
# shape = 0.54579027
# rate = 1.92341816
# Check whether gamma distribution fit data well
simdata <- qgamma(ppoints(length(raindata)), shape = params$estimate[1], rate = params$estimate[2])
qqplot(raindata, simdata)
|
/rain gauge.R
|
no_license
|
YitongZhou/881
|
R
| false
| false
| 2,243
|
r
|
library(ggplot2)
library(MASS)
filelist = list.files(pattern = "*.txt")
datalist = lapply(filelist, function(x)read.csv(x,skip = 2, stringsAsFactors = F))
raw = do.call("rbind", datalist)
# put all the raw data into a vector
matrix.raw <- as.matrix(raw)[,-1]
vector.raw <- as.vector(t(matrix.raw))
length(vector.raw)
vector.data <- vector.raw[-length(vector.raw)]
length(vector.data)
# find position of the starting time of a rain storm
find.start <- function(vec){
tmp <- rep(0,length(vec))
for (i in 1:length(vec)-1){
if(vec[i]=="----"&&vec[i+1]!="----"){
tmp[i]=i+1
}
}
return(tmp[tmp!=0])
}
starting.pos <- find.start(vector.data)
length(starting.pos)
# find position of the ending time of a rain storm
find.end <- function(vec){
tmp <- rep(0,length(vec))
for (i in 1:(length(vec)-1)){
if(vec[i] != "----" && vec[i+1] == "----"){
tmp[i]=i
}
}
return(tmp[tmp!=0])
}
ending.pos <- find.end(vector.raw)
length(ending.pos)
# get the date by summing up the time interval rain storms
sum_rain_seq <- rep(NA,length(starting.pos))
vector.raw[vector.raw=="T "] <- 0
vector.raw[vector.raw =="M"] <- 0
vector.raw[vector.raw =="M "] <- 0
vector.raw[1:100]
for (i in 1:length(starting.pos)){
sum_rain_seq[i] <- sum(as.numeric(vector.raw[starting.pos[i]:ending.pos[i]]))
}
raindata1 <- sum_rain_seq[!is.na(sum_rain_seq)]
raindata <- raindata1[!raindata1==0]
# Method of Moment
hist(raindata)
mean(raindata)
var(raindata)
alpha <- mean(raindata)^2/var(raindata)
lambda <- mean(raindata)/var(raindata)
# Estimated parameters using Method of Moment
# alpha = 0.3621776
# lambda = 1.276349
# MLE (method 1)
n <- length(raindata)
minus.likelihood <- function(theta) {-(n*theta[1]*log(theta[2])-n*lgamma(theta[1])+(theta[1]-1)*sum(log(raindata))-theta[2]*sum(raindata))}
max.likelihood <- nlminb(start=c(alpha, lambda), obj = minus.likelihood)
max.likelihood$par
# MLE (method 2)
params <- fitdistr(raindata, "gamma")
params
# MLE estimators for gamma distribution
# shape = 0.54579027
# rate = 1.92341816
# Check whether gamma distribution fit data well
simdata <- qgamma(ppoints(length(raindata)), shape = params$estimate[1], rate = params$estimate[2])
qqplot(raindata, simdata)
|
# If the file is not there, we will download and extract from the zip file. the File it's huge so just download it if we need it
if (!file.exists('household_power_consumption.zip')) {
# Download file
download.file(url='https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip', destfile='household_power_consumption.zip', method='curl')
# Unzip file
unzip('household_power_consumption.zip')
}
data <- read.table("household_power_consumption.txt", header=TRUE, sep=";", na.strings = "?")
# Subset data- Let's do this first so we don't need to convert dates in all rows we will discard anyway
data <- data[data$Date %in% c("1/2/2007", "2/2/2007"), ]
# Let's convert the dates
data$DateTime <- strptime(paste(data$Date, data$Time), format= "%d/%m/%Y %H:%M:%S")
#Let's creata the first plot:
png(file = "plot4.png", width = 480, height = 480)
# Define 2x2 plot structure, by rows
par(mfrow=c(2,2))
# tile 1 (top left)
plot(data[,c("DateTime","Global_active_power")],type="l",xlab="",ylab="Global Active Power")
# tile 3 (top right)
plot(data[,c("DateTime","Voltage")],type="l",xlab="datetime",ylab="Voltage")
# tile 2 (bottom left)
plot(data[,c("DateTime","Sub_metering_1")],type="l",col=1,xlab='',ylab="Energy sub metering")
lines(data[,c("DateTime","Sub_metering_2")],type="l",col="red")
lines(data[,c("DateTime","Sub_metering_3")],type="l",col="blue")
legend('topright',c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),lty=1,col=c("black","red","blue"),box.lwd=0)
# tile 4 (bottom right)
plot(data[,c("DateTime","Global_reactive_power")],type="l",xlab="datetime")
dev.off() #close the dev
|
/plot4.R
|
no_license
|
jgarcia241/ExData_Plotting1
|
R
| false
| false
| 1,641
|
r
|
# If the file is not there, we will download and extract from the zip file. the File it's huge so just download it if we need it
if (!file.exists('household_power_consumption.zip')) {
# Download file
download.file(url='https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip', destfile='household_power_consumption.zip', method='curl')
# Unzip file
unzip('household_power_consumption.zip')
}
data <- read.table("household_power_consumption.txt", header=TRUE, sep=";", na.strings = "?")
# Subset data- Let's do this first so we don't need to convert dates in all rows we will discard anyway
data <- data[data$Date %in% c("1/2/2007", "2/2/2007"), ]
# Let's convert the dates
data$DateTime <- strptime(paste(data$Date, data$Time), format= "%d/%m/%Y %H:%M:%S")
#Let's creata the first plot:
png(file = "plot4.png", width = 480, height = 480)
# Define 2x2 plot structure, by rows
par(mfrow=c(2,2))
# tile 1 (top left)
plot(data[,c("DateTime","Global_active_power")],type="l",xlab="",ylab="Global Active Power")
# tile 3 (top right)
plot(data[,c("DateTime","Voltage")],type="l",xlab="datetime",ylab="Voltage")
# tile 2 (bottom left)
plot(data[,c("DateTime","Sub_metering_1")],type="l",col=1,xlab='',ylab="Energy sub metering")
lines(data[,c("DateTime","Sub_metering_2")],type="l",col="red")
lines(data[,c("DateTime","Sub_metering_3")],type="l",col="blue")
legend('topright',c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),lty=1,col=c("black","red","blue"),box.lwd=0)
# tile 4 (bottom right)
plot(data[,c("DateTime","Global_reactive_power")],type="l",xlab="datetime")
dev.off() #close the dev
|
#setwd("spongeEMPchemistry-master")
##paths
dir <- "3.results/otu.create.data.output/"
#after running 1.filter.awk.sh
read.table(file="2.scripts/4.otu.create.offset/final.otu.txt", sep="\t", header=TRUE, check.names = FALSE) -> emp.orig_all # this is the data for the offset-calculation-file-table
load(file="3.results/otu.create.data.output/factors.RData")
emp.orig <- droplevels(emp.orig_all[,-2420]) # the awk script adds an NA column to the end, hence I need to remove that here
emp.sum <- apply(emp.orig, 2, sum, na.rm=TRUE)
emp.sum.df <- as.data.frame(emp.sum)
emp.sum.df <- droplevels(merge(emp.sum.df, factors, by="row.names", keep=FALSE)) # sum file same obs. order as factor and abundance
rownames(emp.sum.df) <- emp.sum.df[,1]
emp.sum.df[,1] <- NULL
emp.sum.final <- emp.sum.df[,1]
save(emp.sum.final, file = paste(dir, "emp.sum.final.otu", ".RData", sep = ""))
rm(dir)
|
/2.scripts/4.otu.create.offset/4.2.otu.create.offset.R
|
no_license
|
marinemoleco/spongeEMPchemistry
|
R
| false
| false
| 889
|
r
|
#setwd("spongeEMPchemistry-master")
##paths
dir <- "3.results/otu.create.data.output/"
#after running 1.filter.awk.sh
read.table(file="2.scripts/4.otu.create.offset/final.otu.txt", sep="\t", header=TRUE, check.names = FALSE) -> emp.orig_all # this is the data for the offset-calculation-file-table
load(file="3.results/otu.create.data.output/factors.RData")
emp.orig <- droplevels(emp.orig_all[,-2420]) # the awk script adds an NA column to the end, hence I need to remove that here
emp.sum <- apply(emp.orig, 2, sum, na.rm=TRUE)
emp.sum.df <- as.data.frame(emp.sum)
emp.sum.df <- droplevels(merge(emp.sum.df, factors, by="row.names", keep=FALSE)) # sum file same obs. order as factor and abundance
rownames(emp.sum.df) <- emp.sum.df[,1]
emp.sum.df[,1] <- NULL
emp.sum.final <- emp.sum.df[,1]
save(emp.sum.final, file = paste(dir, "emp.sum.final.otu", ".RData", sep = ""))
rm(dir)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{last_assess_yr}
\alias{last_assess_yr}
\title{The last year the stock was assessed}
\format{
A character string
}
\usage{
last_assess_yr
}
\description{
The last year the stock was assessed
}
\keyword{datasets}
|
/man/last_assess_yr.Rd
|
no_license
|
pbs-assess/arrowtooth
|
R
| false
| true
| 317
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{last_assess_yr}
\alias{last_assess_yr}
\title{The last year the stock was assessed}
\format{
A character string
}
\usage{
last_assess_yr
}
\description{
The last year the stock was assessed
}
\keyword{datasets}
|
#'////////////////////////////////////////////////////////////////////////////
#' FILE: app.R
#' AUTHOR: David Ruvolo
#' CREATED: 2020-03-15
#' MODIFIED: 2020-12-01
#' PURPOSE: a simple responsive datatable with images
#' STATUS: in.progress
#' PACKAGES: shiny; accessibleshiny
#' COMMENTS: NA
#'////////////////////////////////////////////////////////////////////////////
#' install packages
#' install.packages("shiny")
#' install.packages("rlang")
#' install.packages("purrr")
#' install.packages("htmltools")
#' install.packages("golem")
#' pkgs
suppressPackageStartupMessages(library(shiny))
#' load data
birds <- readRDS("data/birds.RDS")
# set golem
golem::with_golem_options(
app = shinyApp(
ui = app_ui,
server = app_server
),
golem_opts = list(
data = birds
)
)
|
/bird-counts-example/app.R
|
no_license
|
davidruvolo51/shinyAppGallery
|
R
| false
| false
| 815
|
r
|
#'////////////////////////////////////////////////////////////////////////////
#' FILE: app.R
#' AUTHOR: David Ruvolo
#' CREATED: 2020-03-15
#' MODIFIED: 2020-12-01
#' PURPOSE: a simple responsive datatable with images
#' STATUS: in.progress
#' PACKAGES: shiny; accessibleshiny
#' COMMENTS: NA
#'////////////////////////////////////////////////////////////////////////////
#' install packages
#' install.packages("shiny")
#' install.packages("rlang")
#' install.packages("purrr")
#' install.packages("htmltools")
#' install.packages("golem")
#' pkgs
suppressPackageStartupMessages(library(shiny))
#' load data
birds <- readRDS("data/birds.RDS")
# set golem
golem::with_golem_options(
app = shinyApp(
ui = app_ui,
server = app_server
),
golem_opts = list(
data = birds
)
)
|
/libc/stdio/fopen/man.r
|
no_license
|
paulohrpinheiro/tropix-libs
|
R
| false
| false
| 3,921
|
r
| ||
\name{deleteSelectedEdges}
\alias{deleteSelectedEdges}
\alias{deleteSelectedEdges,CytoscapeWindowClass-method}
\title{deleteSelectedEdges}
\description{In Cytoscape, remove all selected edges. These edges will
still exist in the corresponding R graph until you delete them there
as well.
}
\usage{
deleteSelectedEdges(obj)
}
\arguments{
\item{obj}{a \code{CytoscapeWindowClass} object. }
}
\value{
None.
}
\author{Paul Shannon}
\seealso{
selectEdges
cy2.edge.names
deleteSelectedNodes
}
\examples{
cw <- new.CytoscapeWindow ('deleteSelectedEdges.test', graph=makeSimpleGraph())
displayGraph (cw)
redraw (cw)
layoutNetwork(cw, 'jgraph-spring')
print (cy2.edge.names (cw@graph)) # find out Cytoscape's names for these edges
selectEdges (cw, "B (synthetic lethal) C")
deleteSelectedEdges (cw)
redraw (cw)
}
\keyword{graph}
|
/man/deleteSelectedEdges.Rd
|
no_license
|
pshannon-bioc/RCy3
|
R
| false
| false
| 859
|
rd
|
\name{deleteSelectedEdges}
\alias{deleteSelectedEdges}
\alias{deleteSelectedEdges,CytoscapeWindowClass-method}
\title{deleteSelectedEdges}
\description{In Cytoscape, remove all selected edges. These edges will
still exist in the corresponding R graph until you delete them there
as well.
}
\usage{
deleteSelectedEdges(obj)
}
\arguments{
\item{obj}{a \code{CytoscapeWindowClass} object. }
}
\value{
None.
}
\author{Paul Shannon}
\seealso{
selectEdges
cy2.edge.names
deleteSelectedNodes
}
\examples{
cw <- new.CytoscapeWindow ('deleteSelectedEdges.test', graph=makeSimpleGraph())
displayGraph (cw)
redraw (cw)
layoutNetwork(cw, 'jgraph-spring')
print (cy2.edge.names (cw@graph)) # find out Cytoscape's names for these edges
selectEdges (cw, "B (synthetic lethal) C")
deleteSelectedEdges (cw)
redraw (cw)
}
\keyword{graph}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/docx_replace.R
\name{body_replace_all_text}
\alias{body_replace_all_text}
\alias{headers_replace_all_text}
\alias{footers_replace_all_text}
\title{Replace text anywhere in the document, or at a cursor}
\usage{
body_replace_all_text(x, old_value, new_value, only_at_cursor = FALSE, ...)
headers_replace_all_text(x, old_value, new_value, only_at_cursor = FALSE, ...)
footers_replace_all_text(x, old_value, new_value, only_at_cursor = FALSE, ...)
}
\arguments{
\item{x}{a docx device}
\item{old_value}{the value to replace}
\item{new_value}{the value to replace it with}
\item{only_at_cursor}{if \code{TRUE}, only search-and-replace at the current
cursor; if \code{FALSE} (default), search-and-replace in the entire document
(this can be slow on large documents!)}
\item{...}{optional arguments to grepl/gsub (e.g. \code{fixed=TRUE})}
}
\description{
Replace all occurrences of old_value with new_value. This method
uses \code{\link{grepl}}/\code{\link{gsub}} for pattern matching; you may
supply arguments as required (and therefore use \code{\link{regex}} features)
using the optional \code{...} argument.
Note that by default, grepl/gsub will use \code{fixed=FALSE}, which means
that \code{old_value} and \code{new_value} will be interepreted as regular
expressions.
\strong{Chunking of text}
Note that the behind-the-scenes representation of text in a Word document is
frequently not what you might expect! Sometimes a paragraph of text is broken
up (or "chunked") into several "runs," as a result of style changes, pauses
in text entry, later revisions and edits, etc. If you have not styled the
text, and have entered it in an "all-at-once" fashion, e.g. by pasting it or
by outputing it programmatically into your Word document, then this will
likely not be a problem. If you are working with a manually-edited document,
however, this can lead to unexpected failures to find text.
You can use the officer function \code{\link{docx_show_chunk}} to
show how the paragraph of text at the current cursor has been chunked into
runs, and what text is in each chunk. This can help troubleshoot unexpected
failures to find text.
}
\section{header_replace_all_text}{
Replacements will be performed in each header of all sections.
Replacements will be performed in each footer of all sections.
}
\examples{
library(magrittr)
doc <- read_docx() \%>\%
body_add_par("Placeholder one") \%>\%
body_add_par("Placeholder two")
# Show text chunk at cursor
docx_show_chunk(doc) # Output is 'Placeholder two'
# Simple search-and-replace at current cursor, with regex turned off
doc <- body_replace_all_text(doc, old_value = "Placeholder",
new_value = "new", only_at_cursor = TRUE, fixed = TRUE)
docx_show_chunk(doc) # Output is 'new two'
# Do the same, but in the entire document and ignoring case
doc <- body_replace_all_text(doc, old_value = "placeholder",
new_value = "new", only_at_cursor=FALSE, ignore.case = TRUE)
doc <- cursor_backward(doc)
docx_show_chunk(doc) # Output is 'new one'
# Use regex : replace all words starting with "n" with the word "example"
doc <- body_replace_all_text(doc, "\\\\bn.*?\\\\b", "example")
docx_show_chunk(doc) # Output is 'example one'
}
\seealso{
\code{\link{grep}}, \code{\link{regex}}, \code{\link{docx_show_chunk}}
}
\author{
Frank Hangler, \email{frank@plotandscatter.com}
}
|
/man/body_replace_all_text.Rd
|
no_license
|
kashenfelter/officer
|
R
| false
| true
| 3,410
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/docx_replace.R
\name{body_replace_all_text}
\alias{body_replace_all_text}
\alias{headers_replace_all_text}
\alias{footers_replace_all_text}
\title{Replace text anywhere in the document, or at a cursor}
\usage{
body_replace_all_text(x, old_value, new_value, only_at_cursor = FALSE, ...)
headers_replace_all_text(x, old_value, new_value, only_at_cursor = FALSE, ...)
footers_replace_all_text(x, old_value, new_value, only_at_cursor = FALSE, ...)
}
\arguments{
\item{x}{a docx device}
\item{old_value}{the value to replace}
\item{new_value}{the value to replace it with}
\item{only_at_cursor}{if \code{TRUE}, only search-and-replace at the current
cursor; if \code{FALSE} (default), search-and-replace in the entire document
(this can be slow on large documents!)}
\item{...}{optional arguments to grepl/gsub (e.g. \code{fixed=TRUE})}
}
\description{
Replace all occurrences of old_value with new_value. This method
uses \code{\link{grepl}}/\code{\link{gsub}} for pattern matching; you may
supply arguments as required (and therefore use \code{\link{regex}} features)
using the optional \code{...} argument.
Note that by default, grepl/gsub will use \code{fixed=FALSE}, which means
that \code{old_value} and \code{new_value} will be interepreted as regular
expressions.
\strong{Chunking of text}
Note that the behind-the-scenes representation of text in a Word document is
frequently not what you might expect! Sometimes a paragraph of text is broken
up (or "chunked") into several "runs," as a result of style changes, pauses
in text entry, later revisions and edits, etc. If you have not styled the
text, and have entered it in an "all-at-once" fashion, e.g. by pasting it or
by outputing it programmatically into your Word document, then this will
likely not be a problem. If you are working with a manually-edited document,
however, this can lead to unexpected failures to find text.
You can use the officer function \code{\link{docx_show_chunk}} to
show how the paragraph of text at the current cursor has been chunked into
runs, and what text is in each chunk. This can help troubleshoot unexpected
failures to find text.
}
\section{header_replace_all_text}{
Replacements will be performed in each header of all sections.
Replacements will be performed in each footer of all sections.
}
\examples{
library(magrittr)
doc <- read_docx() \%>\%
body_add_par("Placeholder one") \%>\%
body_add_par("Placeholder two")
# Show text chunk at cursor
docx_show_chunk(doc) # Output is 'Placeholder two'
# Simple search-and-replace at current cursor, with regex turned off
doc <- body_replace_all_text(doc, old_value = "Placeholder",
new_value = "new", only_at_cursor = TRUE, fixed = TRUE)
docx_show_chunk(doc) # Output is 'new two'
# Do the same, but in the entire document and ignoring case
doc <- body_replace_all_text(doc, old_value = "placeholder",
new_value = "new", only_at_cursor=FALSE, ignore.case = TRUE)
doc <- cursor_backward(doc)
docx_show_chunk(doc) # Output is 'new one'
# Use regex : replace all words starting with "n" with the word "example"
doc <- body_replace_all_text(doc, "\\\\bn.*?\\\\b", "example")
docx_show_chunk(doc) # Output is 'example one'
}
\seealso{
\code{\link{grep}}, \code{\link{regex}}, \code{\link{docx_show_chunk}}
}
\author{
Frank Hangler, \email{frank@plotandscatter.com}
}
|
RunStanGit=function(url.loc,dat.loc.in,r.file,flag=T){
# Internal Functions ----
unpack.list <- function(object) {
for(.x in names(object)){
assign(value = object[[.x]], x=.x, envir = parent.frame())
}
}
strip.path=function(y){
str=strsplit(y,'[\\/]')[[1]]
str[length(str)]
}
setwd.url=function(y){
x=c(as.numeric(gregexpr('\\"',y)[[1]]),as.numeric(gregexpr("\\'",y)[[1]]))
x=x[x!=-1]
str.old=substr(y,x[1],x[2])
str.change=strip.path(substr(y,x[1]+1,x[2]-1))
if(grepl('source',y)){
str.out=paste0('unpack.list(RunStanGit(url.loc,dat.loc.in,"',str.change,'",flag=F))')
}else{
str.new=paste0('"',dat.loc,str.change,'"')
file.name=gsub(' ','',strsplit(y,'<-|=')[[1]][1])
eval(parse(text=paste0(file.name,' <<- tempfile()')))
eval(parse(text=paste0('download.file(',str.new,',',file.name,',quiet = T,method="curl")')))
str.out=gsub(str.old,file.name,y)
}
str.out
}
dat.loc=paste0(url.loc,dat.loc.in)
code.loc=paste0(dat.loc,r.file)
#Read R code ----
r.code=strsplit(gsub('\\r','',getURL(code.loc)[1]),'\\n')[[1]]
#Rewrite paths for source and read commands to url path ----
for(i in which(grepl('read|source',r.code))) r.code[i]=setwd.url(r.code[i])
stan.find=which(grepl('stan\\(',r.code))
to.unlink=rep(NA,length(stan.find))
#Find the names of the objects that the stan calls are saved to ----
keep.files=gsub(' ','',unlist(lapply(strsplit(r.code[which(grepl('stan\\(',r.code))],'<-'),'[',1)))
# Comment out print calls ----
pr.code <- parse(text=r.code)
pr.code <- utils::getParseData(pr.code,includeText = TRUE)
pr.code <- pr.code[pr.code$parent==0&grepl('^print|^pairs',pr.code$text),]
comment.lines <- unique(unlist(mapply(seq,from=pr.code$line1,to=pr.code$line2)))
r.code[comment.lines] <- sprintf('#%s',r.code[comment.lines])
#r.code=gsub('print','#print',r.code)
#r.code=gsub('pairs','#pairs',r.code)
if(length(keep.files)>0){
for(i in 1:length(keep.files)){
comment.out=r.code[grep(keep.files[i],r.code)[!grepl('#|<-|=',r.code[grep(keep.files[i],r.code)])]]
r.code[grep(keep.files[i],r.code)[!grepl('#|<-|=',r.code[grep(keep.files[i],r.code)])]]=paste0('#',comment.out)
}
}
#Download the stan file to a temp file and change the call to stan from a text object to a connection ----
if(length(stan.find)>0){
for(i in 1:length(stan.find)){
x=c(as.numeric(gregexpr('\\"',r.code[stan.find[i]])[[1]]),as.numeric(gregexpr("\\'",r.code[stan.find[i]])[[1]]))
x=x[x!=-1]
file.name=strip.path(substr(r.code[stan.find[i]],x[1]+1,x[2]-1))
eval(parse(text=paste0(file.name," <- tempfile(fileext = '.stan')")))
loc.file=paste0('"',dat.loc,file.name,'"')
eval(parse(text=paste0('download.file(',loc.file,',',file.name,',quiet = TRUE,method="curl")')))
to.unlink[i]=file.name
r.code[stan.find[i]]=gsub(substr(r.code[stan.find[i]],x[1],x[2]),strip.path(substr(r.code[stan.find[i]],x[1]+1,x[2]-1)),r.code[stan.find[i]])
}
}
#Evaluate new code ----
eval(parse(text=r.code))
#Unlink temp stan files ----
junk=sapply(to.unlink[!is.na(to.unlink)],unlink)
#Return objects (conditional if call is nested or not) ----
if(flag){ret.obj=keep.files}else{ret.obj=ls(pattern = '[^(flag|r.code|keep.files)]')}
list.out <- sapply(ls()[ls()%in%ret.obj], function(x) get(x))
return(list.out)
#End of function ----
}
#example ----
# url.loc='https://raw.githubusercontent.com/stan-dev/example-models/master/ARM/'
# ex=data.frame(r.file=c('10.4_LackOfOverlapWhenTreat.AssignmentIsUnknown.R',
# '10.5_CasualEffectsUsingIV.R',
# '10.6_IVinaRegressionFramework.R', #sourcing another file
# '3.1_OnePredictor.R', #removing partial path to file
# '8.4_PredictiveSimulationToCheckFitOfTimeSeriesModels.R'), #removing echo call from readlines
# stringsAsFactors = F)
#
# ex$chapter=unlist(lapply(lapply(strsplit(ex$r.file,'[\\_]'),'[',1),function(x) paste('Ch',strsplit(x,'[\\.]')[[1]][1],sep='.')))
# ex$example=unlist(lapply(lapply(strsplit(ex$r.file,'[\\_]'),'[',1),function(x) strsplit(x,'[\\.]')[[1]][2]))
#
# a=plyr::dlply(ex%>%slice(c(1)),.(r.file),.fun=function(x) RunStanGit(url.loc,dat.loc=paste0(x$chapter,'/'),r.file=x$r.file),.progress = 'text')
#
# Functions to read output into nested list structure with data.frame in leaf
# stan.sim.out=llply(a,.fun=function(m){
# llply(m,.fun=function(stan.out){
# x=attributes(stan.out)
# x1=llply(x$sim$samples,attributes)
# names(x1)=c(1:length(x1))
# df.model=ldply(x1,.fun=function(x) do.call('cbind',x$sampler_params)%>%data.frame%>%mutate(Iter=1:nrow(.)),.id="Chain")
#
# df.samples=stan.out@sim$samples
# names(df.samples)=c(1:length(df.samples))
# df.samples=ldply(df.samples,.fun = function(y) data.frame(y)%>%mutate(Iter=1:nrow(.)),.id = 'Chain')
#
# df.model%>%left_join(df.samples,by=c('Chain','Iter'))
# })
# } )
#
# stan.sim.out.files=ldply(a,.fun=function(x) data.frame(stan.obj.output=names(x)))
|
/inst/examples/remote_git/www/functions/RunStanGit.r
|
no_license
|
cran/d3Tree
|
R
| false
| false
| 5,278
|
r
|
RunStanGit=function(url.loc,dat.loc.in,r.file,flag=T){
# Internal Functions ----
unpack.list <- function(object) {
for(.x in names(object)){
assign(value = object[[.x]], x=.x, envir = parent.frame())
}
}
strip.path=function(y){
str=strsplit(y,'[\\/]')[[1]]
str[length(str)]
}
setwd.url=function(y){
x=c(as.numeric(gregexpr('\\"',y)[[1]]),as.numeric(gregexpr("\\'",y)[[1]]))
x=x[x!=-1]
str.old=substr(y,x[1],x[2])
str.change=strip.path(substr(y,x[1]+1,x[2]-1))
if(grepl('source',y)){
str.out=paste0('unpack.list(RunStanGit(url.loc,dat.loc.in,"',str.change,'",flag=F))')
}else{
str.new=paste0('"',dat.loc,str.change,'"')
file.name=gsub(' ','',strsplit(y,'<-|=')[[1]][1])
eval(parse(text=paste0(file.name,' <<- tempfile()')))
eval(parse(text=paste0('download.file(',str.new,',',file.name,',quiet = T,method="curl")')))
str.out=gsub(str.old,file.name,y)
}
str.out
}
dat.loc=paste0(url.loc,dat.loc.in)
code.loc=paste0(dat.loc,r.file)
#Read R code ----
r.code=strsplit(gsub('\\r','',getURL(code.loc)[1]),'\\n')[[1]]
#Rewrite paths for source and read commands to url path ----
for(i in which(grepl('read|source',r.code))) r.code[i]=setwd.url(r.code[i])
stan.find=which(grepl('stan\\(',r.code))
to.unlink=rep(NA,length(stan.find))
#Find the names of the objects that the stan calls are saved to ----
keep.files=gsub(' ','',unlist(lapply(strsplit(r.code[which(grepl('stan\\(',r.code))],'<-'),'[',1)))
# Comment out print calls ----
pr.code <- parse(text=r.code)
pr.code <- utils::getParseData(pr.code,includeText = TRUE)
pr.code <- pr.code[pr.code$parent==0&grepl('^print|^pairs',pr.code$text),]
comment.lines <- unique(unlist(mapply(seq,from=pr.code$line1,to=pr.code$line2)))
r.code[comment.lines] <- sprintf('#%s',r.code[comment.lines])
#r.code=gsub('print','#print',r.code)
#r.code=gsub('pairs','#pairs',r.code)
if(length(keep.files)>0){
for(i in 1:length(keep.files)){
comment.out=r.code[grep(keep.files[i],r.code)[!grepl('#|<-|=',r.code[grep(keep.files[i],r.code)])]]
r.code[grep(keep.files[i],r.code)[!grepl('#|<-|=',r.code[grep(keep.files[i],r.code)])]]=paste0('#',comment.out)
}
}
#Download the stan file to a temp file and change the call to stan from a text object to a connection ----
if(length(stan.find)>0){
for(i in 1:length(stan.find)){
x=c(as.numeric(gregexpr('\\"',r.code[stan.find[i]])[[1]]),as.numeric(gregexpr("\\'",r.code[stan.find[i]])[[1]]))
x=x[x!=-1]
file.name=strip.path(substr(r.code[stan.find[i]],x[1]+1,x[2]-1))
eval(parse(text=paste0(file.name," <- tempfile(fileext = '.stan')")))
loc.file=paste0('"',dat.loc,file.name,'"')
eval(parse(text=paste0('download.file(',loc.file,',',file.name,',quiet = TRUE,method="curl")')))
to.unlink[i]=file.name
r.code[stan.find[i]]=gsub(substr(r.code[stan.find[i]],x[1],x[2]),strip.path(substr(r.code[stan.find[i]],x[1]+1,x[2]-1)),r.code[stan.find[i]])
}
}
#Evaluate new code ----
eval(parse(text=r.code))
#Unlink temp stan files ----
junk=sapply(to.unlink[!is.na(to.unlink)],unlink)
#Return objects (conditional if call is nested or not) ----
if(flag){ret.obj=keep.files}else{ret.obj=ls(pattern = '[^(flag|r.code|keep.files)]')}
list.out <- sapply(ls()[ls()%in%ret.obj], function(x) get(x))
return(list.out)
#End of function ----
}
#example ----
# url.loc='https://raw.githubusercontent.com/stan-dev/example-models/master/ARM/'
# ex=data.frame(r.file=c('10.4_LackOfOverlapWhenTreat.AssignmentIsUnknown.R',
# '10.5_CasualEffectsUsingIV.R',
# '10.6_IVinaRegressionFramework.R', #sourcing another file
# '3.1_OnePredictor.R', #removing partial path to file
# '8.4_PredictiveSimulationToCheckFitOfTimeSeriesModels.R'), #removing echo call from readlines
# stringsAsFactors = F)
#
# ex$chapter=unlist(lapply(lapply(strsplit(ex$r.file,'[\\_]'),'[',1),function(x) paste('Ch',strsplit(x,'[\\.]')[[1]][1],sep='.')))
# ex$example=unlist(lapply(lapply(strsplit(ex$r.file,'[\\_]'),'[',1),function(x) strsplit(x,'[\\.]')[[1]][2]))
#
# a=plyr::dlply(ex%>%slice(c(1)),.(r.file),.fun=function(x) RunStanGit(url.loc,dat.loc=paste0(x$chapter,'/'),r.file=x$r.file),.progress = 'text')
#
# Functions to read output into nested list structure with data.frame in leaf
# stan.sim.out=llply(a,.fun=function(m){
# llply(m,.fun=function(stan.out){
# x=attributes(stan.out)
# x1=llply(x$sim$samples,attributes)
# names(x1)=c(1:length(x1))
# df.model=ldply(x1,.fun=function(x) do.call('cbind',x$sampler_params)%>%data.frame%>%mutate(Iter=1:nrow(.)),.id="Chain")
#
# df.samples=stan.out@sim$samples
# names(df.samples)=c(1:length(df.samples))
# df.samples=ldply(df.samples,.fun = function(y) data.frame(y)%>%mutate(Iter=1:nrow(.)),.id = 'Chain')
#
# df.model%>%left_join(df.samples,by=c('Chain','Iter'))
# })
# } )
#
# stan.sim.out.files=ldply(a,.fun=function(x) data.frame(stan.obj.output=names(x)))
|
# e2 - for etsim iteration 2, and beyond
weights[,,4] <- weights[,,4] * weights[,,1] * weights[,,2] * weights[,,3]
ind.agg[,,1] <- ind.agg[,,4]
# re-weighting for constraint 1 via IPF
for (j in 1:nrow(all.msim)){
for(i in 1:ncol(con1)){
weights[which(ind.cat[,i] == 1),j,1] <- con1[j,i] /ind.agg[j,i,1]}}
# convert con1 weights back into aggregates
for (i in 1:nrow(all.msim)){
ind.agg[i,,2] <- colSums(ind.cat * weights[,i,4] * weights[,i,1])}
# test results for first row
ind.agg[1,1:ncol(con1),2] - all.msim[1,1:ncol(con1)]
# second constraint
for (j in 1:nrow(all.msim)){
for(i in 1:ncol(con2) + ncol(con1)){
weights[which(ind.cat[,i] == 1),j,2] <- all.msim[j,i] /ind.agg[j,i,2]}}
# convert con2 back into aggregate
for (i in 1:nrow(all.msim)){
ind.agg[i,,3] <- colSums(ind.cat * weights[,i,4] * weights[,i,1] *
weights[,i,2])}
# test results for first row
ind.agg[5,ncol(con1)+1:ncol(con2),3]
all.msim[5,ncol(con1)+1:ncol(con2)]
# third constraint
for (j in 1:nrow(all.msim)){
for(i in 1:ncol(con3) + ncol(con1) + ncol(con2)){
weights[which(ind.cat[,i] == 1),j,3] <- all.msim[j,i] /ind.agg[j,i,3]}}
# convert con3 back into aggregate
for (i in 1:nrow(all.msim)){
ind.agg[i,,4] <- colSums(ind.cat * weights[,i,4] * weights[,i,1] * weights[,i,2] *
weights[,i,3])}
|
/models/small-area/e2.R
|
no_license
|
Robinlovelace/IPF-performance-testing
|
R
| false
| false
| 1,371
|
r
|
# e2 - for etsim iteration 2, and beyond
weights[,,4] <- weights[,,4] * weights[,,1] * weights[,,2] * weights[,,3]
ind.agg[,,1] <- ind.agg[,,4]
# re-weighting for constraint 1 via IPF
for (j in 1:nrow(all.msim)){
for(i in 1:ncol(con1)){
weights[which(ind.cat[,i] == 1),j,1] <- con1[j,i] /ind.agg[j,i,1]}}
# convert con1 weights back into aggregates
for (i in 1:nrow(all.msim)){
ind.agg[i,,2] <- colSums(ind.cat * weights[,i,4] * weights[,i,1])}
# test results for first row
ind.agg[1,1:ncol(con1),2] - all.msim[1,1:ncol(con1)]
# second constraint
for (j in 1:nrow(all.msim)){
for(i in 1:ncol(con2) + ncol(con1)){
weights[which(ind.cat[,i] == 1),j,2] <- all.msim[j,i] /ind.agg[j,i,2]}}
# convert con2 back into aggregate
for (i in 1:nrow(all.msim)){
ind.agg[i,,3] <- colSums(ind.cat * weights[,i,4] * weights[,i,1] *
weights[,i,2])}
# test results for first row
ind.agg[5,ncol(con1)+1:ncol(con2),3]
all.msim[5,ncol(con1)+1:ncol(con2)]
# third constraint
for (j in 1:nrow(all.msim)){
for(i in 1:ncol(con3) + ncol(con1) + ncol(con2)){
weights[which(ind.cat[,i] == 1),j,3] <- all.msim[j,i] /ind.agg[j,i,3]}}
# convert con3 back into aggregate
for (i in 1:nrow(all.msim)){
ind.agg[i,,4] <- colSums(ind.cat * weights[,i,4] * weights[,i,1] * weights[,i,2] *
weights[,i,3])}
|
#' Get / Set SPSS missing values
#'
#' @param x A vector.
#' @param value A vector of values that should also be considered as missing
#' (for \code{na_values}) or a numeric vector of length two giving the (inclusive)
#' extents of the range (for \code{na_values}, use \code{-Inf} and \code{Inf} if you
#' want the range to be open ended).
#' @details
#' See \code{\link{labelled_spss}} for a presentation of SPSS's user defined missing values.
#' Note that it is mandatory to define value labels before defining missing values.
#' You can use \code{\link{user_na_to_na}} to convert user defined missing values to \code{NA}.
#' @return
#' \code{na_values} will return a vector of values that should also be considered as missing.
#' \code{na_label} will return a numeric vector of length two giving the (inclusive)
#' extents of the range.
#' @seealso \code{\link{labelled_spss}}, \code{\link{user_na_to_na}}
#' @examples
#' v <- labelled(c(1,2,2,2,3,9,1,3,2,NA), c(yes = 1, no = 3, "don't know" = 9))
#' v
#' na_values(v) <- 9
#' na_values(v)
#' v
#' na_values(v) <- NULL
#' v
#' na_range(v) <- c(5, Inf)
#' na_range(v)
#' v
#' @export
na_values <- function(x) {
UseMethod("na_values")
}
#' @export
na_values.default <- function(x) {
# return nothing
NULL
}
#' @export
na_values.labelled_spss <- function(x) {
attr(x, "na_values", exact = TRUE)
}
#' @export
na_values.data.frame <- function(x) {
lapply(x, na_values)
}
#' @rdname na_values
#' @export
`na_values<-` <- function(x, value) {
UseMethod("na_values<-")
}
#' @export
`na_values<-.default` <- function(x, value) {
if (is.null(val_labels(x)) & !is.null(value))
stop("Value labels need to be defined first. Please use val_labels().")
# else do nothing
x
}
#' @export
`na_values<-.labelled` <- function(x, value) {
if (is.null(value)) {
attr(x, "na_values") <- NULL
if (is.null(attr(x, "na_range")))
class(x) <- "labelled"
} else {
if (is.null(val_labels(x)))
stop("Value labels need to be defined first. Please use val_labels().")
x <- labelled_spss(x, val_labels(x), na_values = value, na_range = attr(x, "na_range"))
}
x
}
#' @rdname na_values
#' @export
na_range <- function(x) {
UseMethod("na_range")
}
#' @export
na_range.default <- function(x) {
# return nothing
NULL
}
#' @export
na_range.labelled_spss <- function(x) {
attr(x, "na_range", exact = TRUE)
}
#' @export
na_range.data.frame <- function(x) {
lapply(x, na_range)
}
#' @rdname na_values
#' @export
`na_range<-` <- function(x, value) {
UseMethod("na_range<-")
}
#' @export
`na_range<-.default` <- function(x, value) {
if (is.null(val_labels(x)) & !is.null(value))
stop("Value labels need to be defined first. Please use val_labels().")
# else do nothing
x
}
#' @export
`na_range<-.labelled` <- function(x, value) {
if (is.null(value)) {
attr(x, "na_range") <- NULL
if (is.null(attr(x, "na_values")))
class(x) <- "labelled"
} else {
if (is.null(val_labels(x)))
stop("Value labels need to be defined first. Please use val_labels().")
x <- labelled_spss(x, val_labels(x), na_values = attr(x, "na_values"), na_range = value)
}
x
}
#' @rdname na_values
#' @param .data a data frame
#' @param ... name-value pairs of missing values (see examples)
#' @note
#' \code{set_na_values} and \code{set_na_range} could be used with \code{dplyr}.
#' @return
#' \code{set_na_values} and \code{set_na_range} will return an updated
#' copy of \code{.data}.
#' @examples
#' if (require(dplyr)) {
#' # setting value labels
#' df <- data_frame(s1 = c("M", "M", "F", "F"), s2 = c(1, 1, 2, 9)) %>%
#' set_value_labels(s2 = c(yes = 1, no = 2)) %>%
#' set_na_values(s2 = 9)
#' na_values(df)
#'
#' # removing missing values
#' df <- df %>% set_na_values(s2 = NULL)
#' df$s2
#' }
#' @export
set_na_values <- function(.data, ...) {
values <- list(...)
if (!all(names(values) %in% names(.data)))
stop("some variables not found in .data")
for (v in names(values))
na_values(.data[[v]]) <- values[[v]]
.data
}
#' @rdname na_values
#' @export
set_na_range <- function(.data, ...) {
values <- list(...)
if (!all(names(values) %in% names(.data)))
stop("some variables not found in .data")
for (v in names(values))
na_range(.data[[v]]) <- values[[v]]
.data
}
|
/R/na_values.R
|
no_license
|
gdutz/labelled
|
R
| false
| false
| 4,347
|
r
|
#' Get / Set SPSS missing values
#'
#' @param x A vector.
#' @param value A vector of values that should also be considered as missing
#' (for \code{na_values}) or a numeric vector of length two giving the (inclusive)
#' extents of the range (for \code{na_values}, use \code{-Inf} and \code{Inf} if you
#' want the range to be open ended).
#' @details
#' See \code{\link{labelled_spss}} for a presentation of SPSS's user defined missing values.
#' Note that it is mandatory to define value labels before defining missing values.
#' You can use \code{\link{user_na_to_na}} to convert user defined missing values to \code{NA}.
#' @return
#' \code{na_values} will return a vector of values that should also be considered as missing.
#' \code{na_label} will return a numeric vector of length two giving the (inclusive)
#' extents of the range.
#' @seealso \code{\link{labelled_spss}}, \code{\link{user_na_to_na}}
#' @examples
#' v <- labelled(c(1,2,2,2,3,9,1,3,2,NA), c(yes = 1, no = 3, "don't know" = 9))
#' v
#' na_values(v) <- 9
#' na_values(v)
#' v
#' na_values(v) <- NULL
#' v
#' na_range(v) <- c(5, Inf)
#' na_range(v)
#' v
#' @export
na_values <- function(x) {
UseMethod("na_values")
}
#' @export
na_values.default <- function(x) {
# return nothing
NULL
}
#' @export
na_values.labelled_spss <- function(x) {
attr(x, "na_values", exact = TRUE)
}
#' @export
na_values.data.frame <- function(x) {
lapply(x, na_values)
}
#' @rdname na_values
#' @export
`na_values<-` <- function(x, value) {
UseMethod("na_values<-")
}
#' @export
`na_values<-.default` <- function(x, value) {
if (is.null(val_labels(x)) & !is.null(value))
stop("Value labels need to be defined first. Please use val_labels().")
# else do nothing
x
}
#' @export
`na_values<-.labelled` <- function(x, value) {
if (is.null(value)) {
attr(x, "na_values") <- NULL
if (is.null(attr(x, "na_range")))
class(x) <- "labelled"
} else {
if (is.null(val_labels(x)))
stop("Value labels need to be defined first. Please use val_labels().")
x <- labelled_spss(x, val_labels(x), na_values = value, na_range = attr(x, "na_range"))
}
x
}
#' @rdname na_values
#' @export
na_range <- function(x) {
UseMethod("na_range")
}
#' @export
na_range.default <- function(x) {
# return nothing
NULL
}
#' @export
na_range.labelled_spss <- function(x) {
attr(x, "na_range", exact = TRUE)
}
#' @export
na_range.data.frame <- function(x) {
lapply(x, na_range)
}
#' @rdname na_values
#' @export
`na_range<-` <- function(x, value) {
UseMethod("na_range<-")
}
#' @export
`na_range<-.default` <- function(x, value) {
if (is.null(val_labels(x)) & !is.null(value))
stop("Value labels need to be defined first. Please use val_labels().")
# else do nothing
x
}
#' @export
`na_range<-.labelled` <- function(x, value) {
if (is.null(value)) {
attr(x, "na_range") <- NULL
if (is.null(attr(x, "na_values")))
class(x) <- "labelled"
} else {
if (is.null(val_labels(x)))
stop("Value labels need to be defined first. Please use val_labels().")
x <- labelled_spss(x, val_labels(x), na_values = attr(x, "na_values"), na_range = value)
}
x
}
#' @rdname na_values
#' @param .data a data frame
#' @param ... name-value pairs of missing values (see examples)
#' @note
#' \code{set_na_values} and \code{set_na_range} could be used with \code{dplyr}.
#' @return
#' \code{set_na_values} and \code{set_na_range} will return an updated
#' copy of \code{.data}.
#' @examples
#' if (require(dplyr)) {
#' # setting value labels
#' df <- data_frame(s1 = c("M", "M", "F", "F"), s2 = c(1, 1, 2, 9)) %>%
#' set_value_labels(s2 = c(yes = 1, no = 2)) %>%
#' set_na_values(s2 = 9)
#' na_values(df)
#'
#' # removing missing values
#' df <- df %>% set_na_values(s2 = NULL)
#' df$s2
#' }
#' @export
set_na_values <- function(.data, ...) {
values <- list(...)
if (!all(names(values) %in% names(.data)))
stop("some variables not found in .data")
for (v in names(values))
na_values(.data[[v]]) <- values[[v]]
.data
}
#' @rdname na_values
#' @export
set_na_range <- function(.data, ...) {
values <- list(...)
if (!all(names(values) %in% names(.data)))
stop("some variables not found in .data")
for (v in names(values))
na_range(.data[[v]]) <- values[[v]]
.data
}
|
library(tidyverse)
library(ggbump)
library(ggtext)
library(extrafont)
loadfonts(device = "win")
df_years <- readxl::read_excel(here::here("data", "day_5_ascensores.xlsx"),
sheet = "years") %>%
mutate(fin = ifelse(is.na(Cese_de_actividad), 2021, Cese_de_actividad)) %>%
rowid_to_column("id") %>%
mutate(start = mean(id)) %>%
mutate(end = ifelse(!is.na(Cese_de_actividad), NA, mean(id)))
ggplot(df_years) +
geom_sigmoid(
aes(x = 1850,
xend = 1870,
y = mean(id),
yend = id,
group = factor(Nombre)),
color = "#8d8073",
size = 0.04) +
geom_segment(
aes(x = 1870,
y = id,
xend = Inauguracion,
yend = id),
color = "#8d8073",
size = 0.04) +
geom_segment(
aes(x = Inauguracion,
y = id,
xend = fin,
yend = id),
size = 1.2,
color = "#9e9082") +
geom_sigmoid(
data = filter(df_years, is.na(Cese_de_actividad)),
aes(x = 2021,
xend = 2040,
y = id,
yend = mean(id),
group = factor(Nombre)),
color = "#8d8073",
size = 0.04) +
labs(
title = "The rise and decline of the Valparaíso funiculars",
caption = "Visualisation: Marcin Stepniak • Source: wikipedia"
) +
# geom_text(
# aes(x = (Inauguracion + fin)/2,
# y = id,
# label = Nombre),
# size = 8.5*5/14,
# color = "#46403a",
# family = "Segoe UI Light"
# ) +
ggthemes::theme_solid(fill = "#d8d0c8") +
scale_x_continuous(breaks = c(1900, 1950, 2000),
labels = c(1900, 1950, 2000)) +
theme(
plot.title = element_text(
family = "Segoe UI Historic",
size = 10,
face = "bold",
color = "#585048",
margin = margin(t = 3, b = 2, unit = "mm")),
plot.margin = margin(b = 0),
plot.caption = element_text(
family = "Segoe UI Historic",
size = 4,
# face = "bold",
color = "#585048",
hjust = 0.5,
margin = margin(t = 3, b = 2, unit = "mm")
),
panel.grid.major.x = element_line(
size = 0.1,
# linetype = "dotted",
color = "#efece9" # "#9e9082"
),
axis.text.x = element_text(
family = "Segoe UI",
size = 5,
color = "#585048"
)
)
ggsave(file = here::here("img", "tests", "day_6.svg"),
height = 7,
width = 12,
units = "cm",
dpi = 600)
ggsave(file = here::here("img", "day_6.png"),
height = 6,
width = 12,
units = "cm",
dpi = 600)
df_share <- tibble(
name = c("total", "in use", "share"),
n = c(nrow(df_years),
nrow(filter(df_years, !is.na(Cese_de_actividad))),
round(nrow(filter(df_years, is.na(Cese_de_actividad)))*100/nrow(df_years)))
)
# tibble(
# year = seq(min(df_years$Inauguracion), 2020)) %>%
# fuzzyjoin::fuzzy_left_join(
# df_years %>%
# select(Nombre, Inauguracion, fin),
# by = c("year" = "Inauguracion",
# "year" = "fin"),
# match_fun = list(`>=`, `<=`)
# )
|
/R/day_6_wip.R
|
permissive
|
stmarcin/30DayChartChallenge
|
R
| false
| false
| 3,418
|
r
|
library(tidyverse)
library(ggbump)
library(ggtext)
library(extrafont)
loadfonts(device = "win")
df_years <- readxl::read_excel(here::here("data", "day_5_ascensores.xlsx"),
sheet = "years") %>%
mutate(fin = ifelse(is.na(Cese_de_actividad), 2021, Cese_de_actividad)) %>%
rowid_to_column("id") %>%
mutate(start = mean(id)) %>%
mutate(end = ifelse(!is.na(Cese_de_actividad), NA, mean(id)))
ggplot(df_years) +
geom_sigmoid(
aes(x = 1850,
xend = 1870,
y = mean(id),
yend = id,
group = factor(Nombre)),
color = "#8d8073",
size = 0.04) +
geom_segment(
aes(x = 1870,
y = id,
xend = Inauguracion,
yend = id),
color = "#8d8073",
size = 0.04) +
geom_segment(
aes(x = Inauguracion,
y = id,
xend = fin,
yend = id),
size = 1.2,
color = "#9e9082") +
geom_sigmoid(
data = filter(df_years, is.na(Cese_de_actividad)),
aes(x = 2021,
xend = 2040,
y = id,
yend = mean(id),
group = factor(Nombre)),
color = "#8d8073",
size = 0.04) +
labs(
title = "The rise and decline of the Valparaíso funiculars",
caption = "Visualisation: Marcin Stepniak • Source: wikipedia"
) +
# geom_text(
# aes(x = (Inauguracion + fin)/2,
# y = id,
# label = Nombre),
# size = 8.5*5/14,
# color = "#46403a",
# family = "Segoe UI Light"
# ) +
ggthemes::theme_solid(fill = "#d8d0c8") +
scale_x_continuous(breaks = c(1900, 1950, 2000),
labels = c(1900, 1950, 2000)) +
theme(
plot.title = element_text(
family = "Segoe UI Historic",
size = 10,
face = "bold",
color = "#585048",
margin = margin(t = 3, b = 2, unit = "mm")),
plot.margin = margin(b = 0),
plot.caption = element_text(
family = "Segoe UI Historic",
size = 4,
# face = "bold",
color = "#585048",
hjust = 0.5,
margin = margin(t = 3, b = 2, unit = "mm")
),
panel.grid.major.x = element_line(
size = 0.1,
# linetype = "dotted",
color = "#efece9" # "#9e9082"
),
axis.text.x = element_text(
family = "Segoe UI",
size = 5,
color = "#585048"
)
)
ggsave(file = here::here("img", "tests", "day_6.svg"),
height = 7,
width = 12,
units = "cm",
dpi = 600)
ggsave(file = here::here("img", "day_6.png"),
height = 6,
width = 12,
units = "cm",
dpi = 600)
df_share <- tibble(
name = c("total", "in use", "share"),
n = c(nrow(df_years),
nrow(filter(df_years, !is.na(Cese_de_actividad))),
round(nrow(filter(df_years, is.na(Cese_de_actividad)))*100/nrow(df_years)))
)
# tibble(
# year = seq(min(df_years$Inauguracion), 2020)) %>%
# fuzzyjoin::fuzzy_left_join(
# df_years %>%
# select(Nombre, Inauguracion, fin),
# by = c("year" = "Inauguracion",
# "year" = "fin"),
# match_fun = list(`>=`, `<=`)
# )
|
#' Calculate model performance metrics
#'
#' Calculates model performace metrics from the netcdf file generated by running `run_ensemble`
#'
#' @param ncdf Path to the netcdf file generated by running `run_ensemble`
#' @param list Alternatively to `ncdf` a list of siimulated variables, as returned by
#' `run_ensemble()` when argument `return_list = TRUE`
#' @param dim character; NetCDF dimensions to extract. Must be either "member" or "model". Defaults to "model". Only used if using the netCDF file. Currently only works with "model".
#' @param dim_index numeric; Index of dimension chosen to extract from. Defaults to 1. Only used if using the netCDF file.
#' @param model Vector of models for which to calculate the performance measures
#' @param var Variable for which to calculate the performance measures.
#' Defaults to "temp".
#' @param qualfun Function to calculate the performance measures. Per default calculates root
#' mean suqared error (rmse), Nash-Shutcliff efficiency (nse), Pearson correlation (r),
#' bias (bias), mean absolute error (mae), normalized mean absolute error (nmae), and bias.
#' Can be any function that takes observed data as first, and simulated data at the same time
#' and depth as the second argument.
#' @param avfun Name of the function to calculate the ensemble average, defaults to "mean"
#' @author Johannes Feldbauer
#' @export
#' @examples
#' \dontrun{
#' # using standard quality measures
#' calc_fit(ncdf = "output/ensemble_output.nc",
#' model = c("FLake", "GLM", "GOTM", "Simstrat", "MyLake"),
#' var = "temp")
#' # using own performance measure
#' calc_fit(ncdf = "output/ensemble_output.nc",
#' model = c("FLake", "GLM", "GOTM", "Simstrat", "MyLake"),
#' var = "temp", qualfun = function(O, S) mean(O - S, na.rm = TRUE))
#' }
calc_fit <- function(ncdf, list = NULL, model, var = "temp", dim = "model", dim_index = 1,
qualfun = qual_fun, avfun = "mean") {
# check if model input is correct
model <- check_models(model)
if(is.null(list)) {
# get variable
var_list <- load_var(ncdf, var = var, return = "list", dim = dim,
dim_index = dim_index, print = FALSE)
if(dim_index != 1 & dim == "model") {
obs_l <- load_var(ncdf, var = var, return = "list", dim = dim,
dim_index = 1, print = FALSE)
var_list$Obs <- obs_l$Obs
}
} else {
var_list <- list
if(any(names(var_list) %in% paste0(c(model, "Obs"), "_", var))) {
names(var_list) <- c(model, "Obs")
}
}
# only the selected models
if(dim == "model") {
var_list <- var_list[c(model, "Obs")]
n <- names(var_list)
n_no_obs <- model
# only select depth where observations are available
obs_col <- which(apply(var_list$Obs, 2, function(x)sum(!is.na(x))) != 0)
var_list <- lapply(c(model, "Obs"), function(m) dplyr::select(var_list[[m]], all_of(obs_col)))
names(var_list) <- c(model, "Obs")
# create list with long format data.frames
var_long <- lapply(model, function(m)
cbind(data.frame(reshape2::melt(var_list[[m]],id.vars = "datetime")),
data.frame(obs = reshape2::melt(var_list$Obs,id.vars = "datetime")$value)))
names(var_long) <- model
} else {
# load observations
obs_list <- load_var(ncdf, var = var, return = "list", dim = "model",
dim_index = 1, print = FALSE)
var_list <- c(var_list, Obs = list(obs_list$Obs))
# only select depth where observations are available
obs_col <- which(apply(obs_list$Obs, 2, function(x)sum(!is.na(x))) != 0)
n <- names(var_list)
var_list <- lapply(n, function(m) dplyr::select(var_list[[m]], all_of(obs_col)))
names(var_list) <- n
n_no_obs <- n[! n %in% "Obs"]
# create list with long format data.frames
var_long <- lapply(n_no_obs, function(m)
cbind(data.frame(reshape2::melt(var_list[[m]],id.vars = "datetime")),
data.frame(obs = reshape2::melt(var_list$Obs,id.vars = "datetime")$value)))
names(var_long) <- n_no_obs
}
# change water depth to nummeric
var_long <- purrr::map(var_long,
function(m) dplyr::mutate(m, variable = -as.numeric(gsub("wtr_", "",
variable))))
# calculate ensemble average
ens_data <- var_long[[n_no_obs[1]]]
ens_data$value <- apply(sapply(n_no_obs, function(m) var_long[[m]]$value), 1, get(avfun),
na.rm = TRUE)
var_long[[paste0("ensemble_",avfun)]] <- ens_data
# calculate quality measures
qual <- lapply(var_long, function(m){qualfun(m$obs, m$value)})
return(qual)
}
|
/R/calc_fit.R
|
permissive
|
addelany/LakeEnsemblR
|
R
| false
| false
| 4,759
|
r
|
#' Calculate model performance metrics
#'
#' Calculates model performace metrics from the netcdf file generated by running `run_ensemble`
#'
#' @param ncdf Path to the netcdf file generated by running `run_ensemble`
#' @param list Alternatively to `ncdf` a list of siimulated variables, as returned by
#' `run_ensemble()` when argument `return_list = TRUE`
#' @param dim character; NetCDF dimensions to extract. Must be either "member" or "model". Defaults to "model". Only used if using the netCDF file. Currently only works with "model".
#' @param dim_index numeric; Index of dimension chosen to extract from. Defaults to 1. Only used if using the netCDF file.
#' @param model Vector of models for which to calculate the performance measures
#' @param var Variable for which to calculate the performance measures.
#' Defaults to "temp".
#' @param qualfun Function to calculate the performance measures. Per default calculates root
#' mean suqared error (rmse), Nash-Shutcliff efficiency (nse), Pearson correlation (r),
#' bias (bias), mean absolute error (mae), normalized mean absolute error (nmae), and bias.
#' Can be any function that takes observed data as first, and simulated data at the same time
#' and depth as the second argument.
#' @param avfun Name of the function to calculate the ensemble average, defaults to "mean"
#' @author Johannes Feldbauer
#' @export
#' @examples
#' \dontrun{
#' # using standard quality measures
#' calc_fit(ncdf = "output/ensemble_output.nc",
#' model = c("FLake", "GLM", "GOTM", "Simstrat", "MyLake"),
#' var = "temp")
#' # using own performance measure
#' calc_fit(ncdf = "output/ensemble_output.nc",
#' model = c("FLake", "GLM", "GOTM", "Simstrat", "MyLake"),
#' var = "temp", qualfun = function(O, S) mean(O - S, na.rm = TRUE))
#' }
calc_fit <- function(ncdf, list = NULL, model, var = "temp", dim = "model", dim_index = 1,
qualfun = qual_fun, avfun = "mean") {
# check if model input is correct
model <- check_models(model)
if(is.null(list)) {
# get variable
var_list <- load_var(ncdf, var = var, return = "list", dim = dim,
dim_index = dim_index, print = FALSE)
if(dim_index != 1 & dim == "model") {
obs_l <- load_var(ncdf, var = var, return = "list", dim = dim,
dim_index = 1, print = FALSE)
var_list$Obs <- obs_l$Obs
}
} else {
var_list <- list
if(any(names(var_list) %in% paste0(c(model, "Obs"), "_", var))) {
names(var_list) <- c(model, "Obs")
}
}
# only the selected models
if(dim == "model") {
var_list <- var_list[c(model, "Obs")]
n <- names(var_list)
n_no_obs <- model
# only select depth where observations are available
obs_col <- which(apply(var_list$Obs, 2, function(x)sum(!is.na(x))) != 0)
var_list <- lapply(c(model, "Obs"), function(m) dplyr::select(var_list[[m]], all_of(obs_col)))
names(var_list) <- c(model, "Obs")
# create list with long format data.frames
var_long <- lapply(model, function(m)
cbind(data.frame(reshape2::melt(var_list[[m]],id.vars = "datetime")),
data.frame(obs = reshape2::melt(var_list$Obs,id.vars = "datetime")$value)))
names(var_long) <- model
} else {
# load observations
obs_list <- load_var(ncdf, var = var, return = "list", dim = "model",
dim_index = 1, print = FALSE)
var_list <- c(var_list, Obs = list(obs_list$Obs))
# only select depth where observations are available
obs_col <- which(apply(obs_list$Obs, 2, function(x)sum(!is.na(x))) != 0)
n <- names(var_list)
var_list <- lapply(n, function(m) dplyr::select(var_list[[m]], all_of(obs_col)))
names(var_list) <- n
n_no_obs <- n[! n %in% "Obs"]
# create list with long format data.frames
var_long <- lapply(n_no_obs, function(m)
cbind(data.frame(reshape2::melt(var_list[[m]],id.vars = "datetime")),
data.frame(obs = reshape2::melt(var_list$Obs,id.vars = "datetime")$value)))
names(var_long) <- n_no_obs
}
# change water depth to nummeric
var_long <- purrr::map(var_long,
function(m) dplyr::mutate(m, variable = -as.numeric(gsub("wtr_", "",
variable))))
# calculate ensemble average
ens_data <- var_long[[n_no_obs[1]]]
ens_data$value <- apply(sapply(n_no_obs, function(m) var_long[[m]]$value), 1, get(avfun),
na.rm = TRUE)
var_long[[paste0("ensemble_",avfun)]] <- ens_data
# calculate quality measures
qual <- lapply(var_long, function(m){qualfun(m$obs, m$value)})
return(qual)
}
|
#library(hexSticker)
#red on white
imgurl <- "./inst/safetyGraphicsHex/noun_heart_rate_210541_ec5d57.png"
# sticker(
# imgurl,
# filename="./inst/safetyGraphicsHex/safetyGraphicsHex.png",
# package = "safetyGraphics",
# p_color = "#666666",
# p_size = 5,
# p_family = "serif",
# p_y = 0.5,
# s_x = 1,
# s_y=1.1,
# s_width = 0.8,
# h_color = "black",
# h_fill= "white")
|
/inst/safetyGraphicsHex/safetyHex.R
|
no_license
|
cran/safetyGraphics
|
R
| false
| false
| 394
|
r
|
#library(hexSticker)
#red on white
imgurl <- "./inst/safetyGraphicsHex/noun_heart_rate_210541_ec5d57.png"
# sticker(
# imgurl,
# filename="./inst/safetyGraphicsHex/safetyGraphicsHex.png",
# package = "safetyGraphics",
# p_color = "#666666",
# p_size = 5,
# p_family = "serif",
# p_y = 0.5,
# s_x = 1,
# s_y=1.1,
# s_width = 0.8,
# h_color = "black",
# h_fill= "white")
|
rename<-function(data){
saminfo<-read.table("/media/NAS1/shg047/monod/hapinfo/N37Salk.saminfo",sep="\t")
head(saminfo)
colnames(data)<-gsub(".allchrs.rmdup.bam.mhbs","",colnames(data))
colnames(data)<-gsub(".allchrs.sorted.clipped.bam.mhbs","",colnames(data))
colnames(data)[grep("age|new|centenarian",colnames(data))]<-"WBC"
colnames(data)[grep("X7.T",colnames(data))]<-"LCT"
colnames(data)[grep("X6.T",colnames(data))]<-"CCT"
colnames(data)[grep("methylC-seq_h1",colnames(data))]<-"H1"
colnames(data)[grep("normal_lung",colnames(data))]<-"Lung"
colnames(data)[grep("normal_prostate",colnames(data))]<-"Prostate"
colnames(data)[grep("normal_brain",colnames(data))]<-"Brain"
colnames(data)[grep("normal_colon|Colon_Primary_Normal",colnames(data))]<-"Colon"
colnames(data)[grep("normal_breast",colnames(data))]<-"Breast"
colnames(data)[grep("normal_liver",colnames(data))]<-"Liver"
colnames(data)[grep("normal_CD19",colnames(data))]<-"WBC"
colnames(data)[grep("Frontal_cortex_normal",colnames(data))]<-"Brain"
colnames(data)[grep("fetal_heart_cells",colnames(data))]<-"Heart"
colnames(data)[grep("SRX381710_normal_placenta|fPlacenta_cells",colnames(data))]<-"Placenta"
colnames(data)[grep("adult_CD*",colnames(data))]<-"WBC"
colnames(data)[grep("fAdrenal_cells",colnames(data))]<-"Adrenal"
colnames(data)[grep("fThymus_cells",colnames(data))]<-"Thymus"
colnames(data)[grep("fMuscle_Leg__cells|Muscle_Trunk__cells",colnames(data))]<-"Muscle"
colnames(data)[grep("fThymus_cells",colnames(data))]<-"Thymus"
colnames(data)[grep("fStomach_cells",colnames(data))]<-"Stomach"
colnames(data)[grep("N37",colnames(data))]<-as.character(saminfo[match(colnames(data)[grep("N37",colnames(data))],saminfo[,1]),2])
colnames(data)[grep("STL",colnames(data))]<-as.character(saminfo[match(colnames(data)[grep("STL",colnames(data))],saminfo[,1]),2])
colnames(data)[grep("Indx",colnames(data))]<-as.character(saminfo[match(colnames(data)[grep("Indx",colnames(data))],saminfo[,1]),2])
colnames(data)[grep("X6.P|RRBS.6P",colnames(data))]<-"CCP"
colnames(data)[grep("X7.P|RRBS.7P",colnames(data))]<-"LCP"
colnames(data)[grep("X7.P|RRBS.7P",colnames(data))]<-"LCP"
colnames(data)[grep("BS-seq-P1-N|BS-seq-P2-N",colnames(data))]<-"Kidney"
colnames(data)[grep("BS-seq-P1-T|BS-seq-P2-T",colnames(data))]<-"PKIRCT"
colnames(data)[grep("tumor_liver",colnames(data))]<-"PLIHCT"
colnames(data)[grep("tumor_glioblastoma|tumor_neuroblastoma",colnames(data))]<-"PGBMT"
colnames(data)[grep("CD19_cells",colnames(data))]<-"WBC"
colnames(data)[grep("Colon_Tumor_Primary|SRX381569_tumor_colon",colnames(data))]<-"PCOADT"
colnames(data)[grep("SRX381621_tumor_breast",colnames(data))]<-"PBRCAT"
colnames(data)[grep("tumor_prostate",colnames(data))]<-"PPRADT"
colnames(data)[grep("SRX381722_small_cell_tumor_lung|SRX381719_squamous_cell_tumor_lung|SRX381716_adenocarcinoma_lung",colnames(data))]<-"PLCT"
return(data)
}
setwd("/media/Home_Raid1/shg047/NAS1/monod/mhl/0530")
data<-read.table("WGBS.getHaplo.mhl.mhbs",head=T,row.names=1,sep="\t",as.is=T,check.names = F)
save(data,file="data.RData")
load("data.RData")
colnames(data)
data<-rename(data)
colnames(data)
|
/monod/Manuscript/MONOD_analysis_scripts/bak/rename.R
|
no_license
|
Shicheng-Guo/methylation2020
|
R
| false
| false
| 3,206
|
r
|
rename<-function(data){
saminfo<-read.table("/media/NAS1/shg047/monod/hapinfo/N37Salk.saminfo",sep="\t")
head(saminfo)
colnames(data)<-gsub(".allchrs.rmdup.bam.mhbs","",colnames(data))
colnames(data)<-gsub(".allchrs.sorted.clipped.bam.mhbs","",colnames(data))
colnames(data)[grep("age|new|centenarian",colnames(data))]<-"WBC"
colnames(data)[grep("X7.T",colnames(data))]<-"LCT"
colnames(data)[grep("X6.T",colnames(data))]<-"CCT"
colnames(data)[grep("methylC-seq_h1",colnames(data))]<-"H1"
colnames(data)[grep("normal_lung",colnames(data))]<-"Lung"
colnames(data)[grep("normal_prostate",colnames(data))]<-"Prostate"
colnames(data)[grep("normal_brain",colnames(data))]<-"Brain"
colnames(data)[grep("normal_colon|Colon_Primary_Normal",colnames(data))]<-"Colon"
colnames(data)[grep("normal_breast",colnames(data))]<-"Breast"
colnames(data)[grep("normal_liver",colnames(data))]<-"Liver"
colnames(data)[grep("normal_CD19",colnames(data))]<-"WBC"
colnames(data)[grep("Frontal_cortex_normal",colnames(data))]<-"Brain"
colnames(data)[grep("fetal_heart_cells",colnames(data))]<-"Heart"
colnames(data)[grep("SRX381710_normal_placenta|fPlacenta_cells",colnames(data))]<-"Placenta"
colnames(data)[grep("adult_CD*",colnames(data))]<-"WBC"
colnames(data)[grep("fAdrenal_cells",colnames(data))]<-"Adrenal"
colnames(data)[grep("fThymus_cells",colnames(data))]<-"Thymus"
colnames(data)[grep("fMuscle_Leg__cells|Muscle_Trunk__cells",colnames(data))]<-"Muscle"
colnames(data)[grep("fThymus_cells",colnames(data))]<-"Thymus"
colnames(data)[grep("fStomach_cells",colnames(data))]<-"Stomach"
colnames(data)[grep("N37",colnames(data))]<-as.character(saminfo[match(colnames(data)[grep("N37",colnames(data))],saminfo[,1]),2])
colnames(data)[grep("STL",colnames(data))]<-as.character(saminfo[match(colnames(data)[grep("STL",colnames(data))],saminfo[,1]),2])
colnames(data)[grep("Indx",colnames(data))]<-as.character(saminfo[match(colnames(data)[grep("Indx",colnames(data))],saminfo[,1]),2])
colnames(data)[grep("X6.P|RRBS.6P",colnames(data))]<-"CCP"
colnames(data)[grep("X7.P|RRBS.7P",colnames(data))]<-"LCP"
colnames(data)[grep("X7.P|RRBS.7P",colnames(data))]<-"LCP"
colnames(data)[grep("BS-seq-P1-N|BS-seq-P2-N",colnames(data))]<-"Kidney"
colnames(data)[grep("BS-seq-P1-T|BS-seq-P2-T",colnames(data))]<-"PKIRCT"
colnames(data)[grep("tumor_liver",colnames(data))]<-"PLIHCT"
colnames(data)[grep("tumor_glioblastoma|tumor_neuroblastoma",colnames(data))]<-"PGBMT"
colnames(data)[grep("CD19_cells",colnames(data))]<-"WBC"
colnames(data)[grep("Colon_Tumor_Primary|SRX381569_tumor_colon",colnames(data))]<-"PCOADT"
colnames(data)[grep("SRX381621_tumor_breast",colnames(data))]<-"PBRCAT"
colnames(data)[grep("tumor_prostate",colnames(data))]<-"PPRADT"
colnames(data)[grep("SRX381722_small_cell_tumor_lung|SRX381719_squamous_cell_tumor_lung|SRX381716_adenocarcinoma_lung",colnames(data))]<-"PLCT"
return(data)
}
setwd("/media/Home_Raid1/shg047/NAS1/monod/mhl/0530")
data<-read.table("WGBS.getHaplo.mhl.mhbs",head=T,row.names=1,sep="\t",as.is=T,check.names = F)
save(data,file="data.RData")
load("data.RData")
colnames(data)
data<-rename(data)
colnames(data)
|
#######################################################################
# set parameters of XBART
get_XBART_params <- function(y) {
XBART_params = list(num_trees = 30, # number of trees
num_sweeps = 40, # number of sweeps (samples of the forest)
n_min = 1, # minimal node size
alpha = 0.95, # BART prior parameter
beta = 1.25, # BART prior parameter
mtry = 10, # number of variables sampled in each split
burnin = 15,
no_split_penality = "Auto"
) # burnin of MCMC sample
num_tress = XBART_params$num_trees
XBART_params$max_depth = 250
XBART_params$num_cutpoints = 100;
# number of adaptive cutpoints
XBART_params$tau = var(y) / num_tress # prior variance of mu (leaf parameter)
return(XBART_params)
}
# data generating process
# it can be a string of linear, singleindex, tripoly or max
type = "singleindex"
noise_ratio = 1
rep = 5
cover = matrix(0, rep, 3)
len = matrix(0, rep, 3)
running_time = matrix(0, rep, 3)
rmse = matrix(0, rep, 3)
#######################################################################
library(XBART)
library(BART)
set.seed(100)
new_data = TRUE # generate new data
run_dbarts = FALSE # run dbarts
run_xgboost = FALSE # run xgboost
run_lightgbm = FALSE # run lightgbm
parl = TRUE # parallel computing
small_case = TRUE # run simulation on small data set
verbose = FALSE # print the progress on screen
if (small_case) {
n = 10000 # size of training set
nt = 5000 # size of testing set
d = 30 # number of TOTAL variables
dcat = 0 # number of categorical variables
# must be d >= dcat
# (X_continuous, X_categorical), 10 and 10 for each case, 20 in total
} else {
n = 1000000
nt = 10000
d = 50
dcat = 0
}
for(kk in 1:rep){
#######################################################################
# Data generating process
#######################################################################
# Have to put continuous variables first, then categorical variables #
# X = (X_continuous, X_cateogrical) #
#######################################################################
if (new_data) {
if (d != dcat) {
x = matrix(runif((d - dcat) * n, -2, 2), n, d - dcat)
if (dcat > 0) {
x = cbind(x, matrix(as.numeric(sample(-2:2, dcat * n, replace = TRUE)), n, dcat))
}
} else {
x = matrix(as.numeric(sample(-2:2, dcat * n, replace = TRUE)), n, dcat)
}
if (d != dcat) {
xtest = matrix(runif((d - dcat) * nt, -2, 2), nt, d - dcat)
if (dcat > 0) {
xtest = cbind(xtest, matrix(as.numeric(sample(-2:2, dcat * nt, replace = TRUE)), nt, dcat))
}
} else {
xtest = matrix(as.numeric(sample(-2:2, dcat * nt, replace = TRUE)), nt, dcat)
}
f = function(x) {
# sin(rowSums(x[, 3:4] ^ 2)) + sin(rowSums(x[, 1:2] ^ 2)) + (x[, 15] + x[, 14]) ^ 2 * (x[, 1] + x[, 2] ^ 2) / (3 + x[, 3] + x[, 14] ^ 2)
#rowSums(x[,1:30]^2)
#pmax(x[,1]*x[,2], abs(x[,3])*(x[,10]>x[,15])+abs(x[,4])*(x[,10]<=x[,15]))
output = 0
if(type == "linear"){
for(i in 1:d){
output = output + x[,i] * (-2 + 4 * (i -1) / (d - 1))
}
}else if(type == "singleindex"){
a = 0
for(i in 1:10){
g = -1.5 + (i - 1) / 3
a = a + (x[,i] - g)^2
}
output = 10 * sqrt(a) + sin(5 * a)
}else if(type == "tripoly"){
output = 5 * sin(3 * x[,1]) + 2 * x[,2]^2 + 3 * x[,3] * x[,4]
}else if(type == "max"){
output = rep(0, dim(x)[1])
for(i in 1:(dim(x)[1])){
output[i] = max(max(x[i, 1], x[i, 2]), x[i, 3])
}
}
return(output)
}
# to test if ties cause a crash in continuous variables
# x[, 1] = round(x[, 1], 4)
#xtest[,1] = round(xtest[,1],2)
ftrue = f(x)
ftest = f(xtest)
sigma = noise_ratio * sd(ftrue)
#y = ftrue + sigma*(rgamma(n,1,1)-1)/(3+x[,d])
#y_test = ftest + sigma*(rgamma(nt,1,1)-1)/(3+xtest[,d])
y = ftrue + sigma * rnorm(n)
y_test = ftest + sigma * rnorm(nt)
}
#######################################################################
# XBART
categ <- function(z, j) {
q = as.numeric(quantile(x[, j], seq(0, 1, length.out = 100)))
output = findInterval(z, c(q, + Inf))
return(output)
}
params = get_XBART_params(y)
time = proc.time()
# XBART
fit = XBART(as.matrix(y), as.matrix(x), as.matrix(xtest), p_categorical = dcat,
params$num_trees, params$num_sweeps, params$max_depth,
params$n_min, alpha = params$alpha, beta = params$beta, tau = params$tau, s = 1, kap = 1,
mtry = params$mtry, verbose = TRUE,
num_cutpoints = params$num_cutpoints, parallel = parl, random_seed = 100, no_split_penality = params$no_split_penality)
################################
# two ways to predict on testing set
# 1. set xtest as input to main fitting function
fhat.1 = apply(fit$yhats_test[, params$burnin:params$num_sweeps], 1, mean)
time = proc.time() - time
print(time[3])
# 2. a separate predict function
pred = predict(fit, xtest)
pred = rowMeans(pred[, params$burnin:params$num_sweeps])
time_XBART = round(time[3], 3)
pred2 = predict(fit, xtest)
pred2 = rowMeans(pred2[, params$burnin:params$num_sweeps])
stopifnot(pred == pred2)
#####
# bart with default initialization
time = proc.time()
fit_bart = wbart(x, y, x.test = xtest, numcut = params$num_cutpoints, ntree = params$num_trees, ndpost = 100 * (params$num_sweeps - params$burnin), nskip = 1000)
time = proc.time() - time
time_BART = round(time[3], 3)
pred_bart = colMeans(predict(fit_bart, xtest))
# # bart with XBART initialization
# fit_bart2 = wbart_ini(treedraws = fit$treedraws, x, y, x.test = xtest, numcut = params$num_cutpoints, ntree = params$num_trees, nskip = 0, ndpost = 100, sigest = mean(fit$sigma))
# pred_bart_ini = colMeans(predict(fit_bart2, xtest))
# xbart_rmse = sqrt(mean((fhat.1 - ftest) ^ 2))
# bart_rmse = sqrt(mean((pred_bart - ftest)^2))
# bart_ini_rmse = sqrt(mean((pred_bart_ini - ftest)^2))
# xbart_rmse
# bart_rmse
# bart_ini_rmse
#######################################################################
# Calculate coverage
#######################################################################
# coverage of the real average
draw_BART_XBART = c()
time_warm_start_all = rep(0, length(params$burnin:params$num_sweeps))
for(i in params$burnin:params$num_sweeps){
# bart with XBART initialization
cat("------------- i ", i , "\n")
set.seed(1)
time = proc.time()
fit_bart2 = wbart_ini(treedraws = fit$treedraws[i], x, y, x.test = xtest, numcut = params$num_cutpoints, ntree = params$num_trees, nskip = 0, ndpost = 100)
time = proc.time() - time
draw_BART_XBART = rbind(draw_BART_XBART, fit_bart2$yhat.test)
time_warm_start_all[i - params$burnin + 1] = time[3]
}
# #######################################################################
# # print
xbart_rmse = sqrt(mean((fhat.1 - ftest) ^ 2))
bart_rmse = sqrt(mean((pred_bart - ftest)^2))
bart_ini_rmse = sqrt(mean((colMeans(draw_BART_XBART) - ftest)^2))
xbart_rmse
bart_rmse
bart_ini_rmse
coverage = c(0,0,0)
length = matrix(0, nt, 3)
for(i in 1:nt){
lower = quantile(fit$yhats_test[i, params$burnin:params$num_sweeps], 0.025)
higher = quantile(fit$yhats_test[i, params$burnin:params$num_sweeps], 0.975)
if(ftest[i] < higher && ftest[i] > lower){
coverage[1] = coverage[1] + 1
}
length[i,1] = higher - lower
lower = quantile(fit_bart$yhat.test[,i], 0.025)
higher = quantile(fit_bart$yhat.test[,i], 0.975)
if(ftest[i] < higher && ftest[i] > lower){
coverage[2] = coverage[2] + 1
}
length[i,2] = higher - lower
lower = quantile(draw_BART_XBART[,i], 0.025)
higher = quantile(draw_BART_XBART[,i], 0.975)
if(ftest[i] < higher && ftest[i] > lower){
coverage[3] = coverage[3] + 1
}
length[i,3] = higher - lower
}
cover[kk, ] = coverage / nt
len[kk, ] = colMeans(length)
running_time[kk, ] = c(time_XBART, time_BART, mean(time_warm_start_all))
rmse[kk, ] = c(xbart_rmse, bart_rmse, bart_ini_rmse)
}
results = rbind(colMeans(cover), colMeans(len), colMeans(running_time), colMeans(rmse))
colnames(results) = c("XBART", "BART", "warm start")
rownames(results) = c("coverage", "interval length", "running time", "RMSE")
results = round(results, 4)
print(results)
|
/tests/test_BART_init.R
|
no_license
|
andrewherren/XBCF
|
R
| false
| false
| 8,386
|
r
|
#######################################################################
# set parameters of XBART
get_XBART_params <- function(y) {
XBART_params = list(num_trees = 30, # number of trees
num_sweeps = 40, # number of sweeps (samples of the forest)
n_min = 1, # minimal node size
alpha = 0.95, # BART prior parameter
beta = 1.25, # BART prior parameter
mtry = 10, # number of variables sampled in each split
burnin = 15,
no_split_penality = "Auto"
) # burnin of MCMC sample
num_tress = XBART_params$num_trees
XBART_params$max_depth = 250
XBART_params$num_cutpoints = 100;
# number of adaptive cutpoints
XBART_params$tau = var(y) / num_tress # prior variance of mu (leaf parameter)
return(XBART_params)
}
# data generating process
# it can be a string of linear, singleindex, tripoly or max
type = "singleindex"
noise_ratio = 1
rep = 5
cover = matrix(0, rep, 3)
len = matrix(0, rep, 3)
running_time = matrix(0, rep, 3)
rmse = matrix(0, rep, 3)
#######################################################################
library(XBART)
library(BART)
set.seed(100)
new_data = TRUE # generate new data
run_dbarts = FALSE # run dbarts
run_xgboost = FALSE # run xgboost
run_lightgbm = FALSE # run lightgbm
parl = TRUE # parallel computing
small_case = TRUE # run simulation on small data set
verbose = FALSE # print the progress on screen
if (small_case) {
n = 10000 # size of training set
nt = 5000 # size of testing set
d = 30 # number of TOTAL variables
dcat = 0 # number of categorical variables
# must be d >= dcat
# (X_continuous, X_categorical), 10 and 10 for each case, 20 in total
} else {
n = 1000000
nt = 10000
d = 50
dcat = 0
}
for(kk in 1:rep){
#######################################################################
# Data generating process
#######################################################################
# Have to put continuous variables first, then categorical variables #
# X = (X_continuous, X_cateogrical) #
#######################################################################
if (new_data) {
if (d != dcat) {
x = matrix(runif((d - dcat) * n, -2, 2), n, d - dcat)
if (dcat > 0) {
x = cbind(x, matrix(as.numeric(sample(-2:2, dcat * n, replace = TRUE)), n, dcat))
}
} else {
x = matrix(as.numeric(sample(-2:2, dcat * n, replace = TRUE)), n, dcat)
}
if (d != dcat) {
xtest = matrix(runif((d - dcat) * nt, -2, 2), nt, d - dcat)
if (dcat > 0) {
xtest = cbind(xtest, matrix(as.numeric(sample(-2:2, dcat * nt, replace = TRUE)), nt, dcat))
}
} else {
xtest = matrix(as.numeric(sample(-2:2, dcat * nt, replace = TRUE)), nt, dcat)
}
f = function(x) {
# sin(rowSums(x[, 3:4] ^ 2)) + sin(rowSums(x[, 1:2] ^ 2)) + (x[, 15] + x[, 14]) ^ 2 * (x[, 1] + x[, 2] ^ 2) / (3 + x[, 3] + x[, 14] ^ 2)
#rowSums(x[,1:30]^2)
#pmax(x[,1]*x[,2], abs(x[,3])*(x[,10]>x[,15])+abs(x[,4])*(x[,10]<=x[,15]))
output = 0
if(type == "linear"){
for(i in 1:d){
output = output + x[,i] * (-2 + 4 * (i -1) / (d - 1))
}
}else if(type == "singleindex"){
a = 0
for(i in 1:10){
g = -1.5 + (i - 1) / 3
a = a + (x[,i] - g)^2
}
output = 10 * sqrt(a) + sin(5 * a)
}else if(type == "tripoly"){
output = 5 * sin(3 * x[,1]) + 2 * x[,2]^2 + 3 * x[,3] * x[,4]
}else if(type == "max"){
output = rep(0, dim(x)[1])
for(i in 1:(dim(x)[1])){
output[i] = max(max(x[i, 1], x[i, 2]), x[i, 3])
}
}
return(output)
}
# to test if ties cause a crash in continuous variables
# x[, 1] = round(x[, 1], 4)
#xtest[,1] = round(xtest[,1],2)
ftrue = f(x)
ftest = f(xtest)
sigma = noise_ratio * sd(ftrue)
#y = ftrue + sigma*(rgamma(n,1,1)-1)/(3+x[,d])
#y_test = ftest + sigma*(rgamma(nt,1,1)-1)/(3+xtest[,d])
y = ftrue + sigma * rnorm(n)
y_test = ftest + sigma * rnorm(nt)
}
#######################################################################
# XBART
categ <- function(z, j) {
q = as.numeric(quantile(x[, j], seq(0, 1, length.out = 100)))
output = findInterval(z, c(q, + Inf))
return(output)
}
params = get_XBART_params(y)
time = proc.time()
# XBART
fit = XBART(as.matrix(y), as.matrix(x), as.matrix(xtest), p_categorical = dcat,
params$num_trees, params$num_sweeps, params$max_depth,
params$n_min, alpha = params$alpha, beta = params$beta, tau = params$tau, s = 1, kap = 1,
mtry = params$mtry, verbose = TRUE,
num_cutpoints = params$num_cutpoints, parallel = parl, random_seed = 100, no_split_penality = params$no_split_penality)
################################
# two ways to predict on testing set
# 1. set xtest as input to main fitting function
fhat.1 = apply(fit$yhats_test[, params$burnin:params$num_sweeps], 1, mean)
time = proc.time() - time
print(time[3])
# 2. a separate predict function
pred = predict(fit, xtest)
pred = rowMeans(pred[, params$burnin:params$num_sweeps])
time_XBART = round(time[3], 3)
pred2 = predict(fit, xtest)
pred2 = rowMeans(pred2[, params$burnin:params$num_sweeps])
stopifnot(pred == pred2)
#####
# bart with default initialization
time = proc.time()
fit_bart = wbart(x, y, x.test = xtest, numcut = params$num_cutpoints, ntree = params$num_trees, ndpost = 100 * (params$num_sweeps - params$burnin), nskip = 1000)
time = proc.time() - time
time_BART = round(time[3], 3)
pred_bart = colMeans(predict(fit_bart, xtest))
# # bart with XBART initialization
# fit_bart2 = wbart_ini(treedraws = fit$treedraws, x, y, x.test = xtest, numcut = params$num_cutpoints, ntree = params$num_trees, nskip = 0, ndpost = 100, sigest = mean(fit$sigma))
# pred_bart_ini = colMeans(predict(fit_bart2, xtest))
# xbart_rmse = sqrt(mean((fhat.1 - ftest) ^ 2))
# bart_rmse = sqrt(mean((pred_bart - ftest)^2))
# bart_ini_rmse = sqrt(mean((pred_bart_ini - ftest)^2))
# xbart_rmse
# bart_rmse
# bart_ini_rmse
#######################################################################
# Calculate coverage
#######################################################################
# coverage of the real average
draw_BART_XBART = c()
time_warm_start_all = rep(0, length(params$burnin:params$num_sweeps))
for(i in params$burnin:params$num_sweeps){
# bart with XBART initialization
cat("------------- i ", i , "\n")
set.seed(1)
time = proc.time()
fit_bart2 = wbart_ini(treedraws = fit$treedraws[i], x, y, x.test = xtest, numcut = params$num_cutpoints, ntree = params$num_trees, nskip = 0, ndpost = 100)
time = proc.time() - time
draw_BART_XBART = rbind(draw_BART_XBART, fit_bart2$yhat.test)
time_warm_start_all[i - params$burnin + 1] = time[3]
}
# #######################################################################
# # print
xbart_rmse = sqrt(mean((fhat.1 - ftest) ^ 2))
bart_rmse = sqrt(mean((pred_bart - ftest)^2))
bart_ini_rmse = sqrt(mean((colMeans(draw_BART_XBART) - ftest)^2))
xbart_rmse
bart_rmse
bart_ini_rmse
coverage = c(0,0,0)
length = matrix(0, nt, 3)
for(i in 1:nt){
lower = quantile(fit$yhats_test[i, params$burnin:params$num_sweeps], 0.025)
higher = quantile(fit$yhats_test[i, params$burnin:params$num_sweeps], 0.975)
if(ftest[i] < higher && ftest[i] > lower){
coverage[1] = coverage[1] + 1
}
length[i,1] = higher - lower
lower = quantile(fit_bart$yhat.test[,i], 0.025)
higher = quantile(fit_bart$yhat.test[,i], 0.975)
if(ftest[i] < higher && ftest[i] > lower){
coverage[2] = coverage[2] + 1
}
length[i,2] = higher - lower
lower = quantile(draw_BART_XBART[,i], 0.025)
higher = quantile(draw_BART_XBART[,i], 0.975)
if(ftest[i] < higher && ftest[i] > lower){
coverage[3] = coverage[3] + 1
}
length[i,3] = higher - lower
}
cover[kk, ] = coverage / nt
len[kk, ] = colMeans(length)
running_time[kk, ] = c(time_XBART, time_BART, mean(time_warm_start_all))
rmse[kk, ] = c(xbart_rmse, bart_rmse, bart_ini_rmse)
}
results = rbind(colMeans(cover), colMeans(len), colMeans(running_time), colMeans(rmse))
colnames(results) = c("XBART", "BART", "warm start")
rownames(results) = c("coverage", "interval length", "running time", "RMSE")
results = round(results, 4)
print(results)
|
## Read data from file and filter only the rows needed for this exercise
rawdata <- read.table("household_power_consumption.txt", header = FALSE, sep = ";", na.strings = "?", skip = 66637, nrow = 2880, stringsAsFactors = FALSE)
colnames(rawdata) <- colnames(read.table("household_power_consumption.txt", header = TRUE, sep = ";", nrow = 1))
## Convert Date and Time to true date time values
rawdata$datetime <- strptime(paste(rawdata$Date, rawdata$Time), "%d/%m/%Y %H:%M:%S", tz = "CET")
## Set output to PNG
png("plot4.png", width = 480, height = 480)
## Configure 2 x 2 plot
par(mfrow=c(2,2))
## Generate top left plot - this is copied from plot2.R
plot(rawdata[c("datetime", "Global_active_power")], type = "l", xlab = "", ylab = "Global Active Power")
## Generate top right plot
plot(rawdata[c("datetime", "Voltage")], type = "l")
## Generae bottom left plot - this is copied from plot3.R
plot(rawdata[c("datetime", "Sub_metering_1")], type = "l", xlab = "", ylab = "Energy sub metering")
lines(rawdata[c("datetime", "Sub_metering_2")], col = "red")
lines(rawdata[c("datetime", "Sub_metering_3")], col = "blue")
legend("topright", bty = "n", lwd = 2, col = c("black", "red", "blue"), c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
## Generate bottom right plot
plot(rawdata[c("datetime", "Global_reactive_power")], type = "l")
dev.off()
|
/plot4.R
|
no_license
|
felixlauyh/ExData_Plotting1
|
R
| false
| false
| 1,358
|
r
|
## Read data from file and filter only the rows needed for this exercise
rawdata <- read.table("household_power_consumption.txt", header = FALSE, sep = ";", na.strings = "?", skip = 66637, nrow = 2880, stringsAsFactors = FALSE)
colnames(rawdata) <- colnames(read.table("household_power_consumption.txt", header = TRUE, sep = ";", nrow = 1))
## Convert Date and Time to true date time values
rawdata$datetime <- strptime(paste(rawdata$Date, rawdata$Time), "%d/%m/%Y %H:%M:%S", tz = "CET")
## Set output to PNG
png("plot4.png", width = 480, height = 480)
## Configure 2 x 2 plot
par(mfrow=c(2,2))
## Generate top left plot - this is copied from plot2.R
plot(rawdata[c("datetime", "Global_active_power")], type = "l", xlab = "", ylab = "Global Active Power")
## Generate top right plot
plot(rawdata[c("datetime", "Voltage")], type = "l")
## Generae bottom left plot - this is copied from plot3.R
plot(rawdata[c("datetime", "Sub_metering_1")], type = "l", xlab = "", ylab = "Energy sub metering")
lines(rawdata[c("datetime", "Sub_metering_2")], col = "red")
lines(rawdata[c("datetime", "Sub_metering_3")], col = "blue")
legend("topright", bty = "n", lwd = 2, col = c("black", "red", "blue"), c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
## Generate bottom right plot
plot(rawdata[c("datetime", "Global_reactive_power")], type = "l")
dev.off()
|
# kalman filter using fkf
source("FilteringHelper.R")
# INPUT
# Tt: system model coefficient
# HHt: system model noize variance matrix ^2
# Zt: observation model coefficient
# GGt: observation model noize variance matrix ^2
# OUTPUT
# att: filtered state
# at: predicted state
# Ptt: variance of att
# Pt: variance of at
KalmanFilter <- function(P0=matrix(1), dt=matrix(0), ct=matrix(0), Tt=array(1,c(1,1,1)), Zt=array(1,c(1,1,1)),
HHt=array(1,c(1,1,1)), GGt=array(1,c(1,1,1))){
return(
function(observation, initialState, check.input = TRUE){
if(check.input) checkInput(observation=observation,initialState=initialState)
library(FKF)
fkf.obj <- fkf(a0=initialState,P0=P0,dt=dt,ct=ct,Tt=Tt,Zt=Zt,HHt=HHt,GGt=GGt,yt=observation,check.input=check.input)
return(fkf.obj)
}
)
}
|
/KalmanFilter.R
|
no_license
|
watermouth/FiringRateEstimation
|
R
| false
| false
| 842
|
r
|
# kalman filter using fkf
source("FilteringHelper.R")
# INPUT
# Tt: system model coefficient
# HHt: system model noize variance matrix ^2
# Zt: observation model coefficient
# GGt: observation model noize variance matrix ^2
# OUTPUT
# att: filtered state
# at: predicted state
# Ptt: variance of att
# Pt: variance of at
KalmanFilter <- function(P0=matrix(1), dt=matrix(0), ct=matrix(0), Tt=array(1,c(1,1,1)), Zt=array(1,c(1,1,1)),
HHt=array(1,c(1,1,1)), GGt=array(1,c(1,1,1))){
return(
function(observation, initialState, check.input = TRUE){
if(check.input) checkInput(observation=observation,initialState=initialState)
library(FKF)
fkf.obj <- fkf(a0=initialState,P0=P0,dt=dt,ct=ct,Tt=Tt,Zt=Zt,HHt=HHt,GGt=GGt,yt=observation,check.input=check.input)
return(fkf.obj)
}
)
}
|
glm_model_tx <- linear_reg(
mixture = 1,
penalty = tune()
) %>%
set_mode("regression") %>%
set_engine("glmnet")
glm_model_grid_tx <- glm_model_tx %>%
parameters() %>%
update(penalty = penalty(range = c(0, 2), trans = NULL)) %>%
grid_regular(levels = 50)
glm_rcp_tx <- . %>%
select(-any_of(c("consid", "product"))) %>%
recipe(formula = Target ~ .) %>%
step_zv(all_predictors()) %>%
step_impute_median(all_numeric(), -all_outcomes()) %>%
step_YeoJohnson(all_predictors())
# Model config to tibble
model_configs_tx <- list(
GLMNet_num_tx = list(
model = glm_model_tx,
grid = glm_model_grid_tx,
pre_rcp = glm_rcp_tx,
type = "num"
)
) %>%
map(enframe) %>%
enframe("model_name") %>%
unnest("value") %>%
pivot_wider() %>%
mutate(type = unlist(type))
|
/scripts/scripts/model_development/model_config_tx.R
|
no_license
|
paritosh-aigora/gsk-centrum-classic
|
R
| false
| false
| 804
|
r
|
glm_model_tx <- linear_reg(
mixture = 1,
penalty = tune()
) %>%
set_mode("regression") %>%
set_engine("glmnet")
glm_model_grid_tx <- glm_model_tx %>%
parameters() %>%
update(penalty = penalty(range = c(0, 2), trans = NULL)) %>%
grid_regular(levels = 50)
glm_rcp_tx <- . %>%
select(-any_of(c("consid", "product"))) %>%
recipe(formula = Target ~ .) %>%
step_zv(all_predictors()) %>%
step_impute_median(all_numeric(), -all_outcomes()) %>%
step_YeoJohnson(all_predictors())
# Model config to tibble
model_configs_tx <- list(
GLMNet_num_tx = list(
model = glm_model_tx,
grid = glm_model_grid_tx,
pre_rcp = glm_rcp_tx,
type = "num"
)
) %>%
map(enframe) %>%
enframe("model_name") %>%
unnest("value") %>%
pivot_wider() %>%
mutate(type = unlist(type))
|
# ====================== Generate species viral discovery rate curves for mammals at the Order level =====================
# proof of concept/general trends
# fit curves across time epochs to examine changes in trends
# how robust are these to adjusting for annual publication effort?
# root dir and dependencies
# dependencies and basedir
pacman::p_load("dplyr", "magrittr", "stringr", "ggplot2", "inlabru", "INLA", "here")
setwd("C:/Users/roryj/Documents/PhD/202008_pathogendiscovery/code/pathogen_discovery/")
# domestic species to label
domestic = read.csv("./data/clover/domestic_status/HostLookup_Domestic.csv", stringsAsFactors = FALSE)
# associations data and no humans
clover = read.csv("./data/clover/Clover_v1.0_NBCIreconciled_20201218.csv", stringsAsFactors = FALSE) %>%
dplyr::filter(Host != "Homo sapiens") %>%
dplyr::mutate(Domestic = ifelse(Host %in% domestic$Host, TRUE, FALSE)) %>%
dplyr::filter(DetectionMethod != "Not specified")
# total viral family richness by order
tr = clover %>%
group_by(HostOrder) %>%
dplyr::summarise(VRichness = n_distinct(VirusFamily))
# publication effort by year at Order-level, with wild/domestic split
# this needs some thought and work if it's worth including
# pubs = read.csv("./output/host_effort/PubMed_Hosts_PubsByYear_Final.csv", stringsAsFactors = FALSE) %>%
# dplyr::filter(Note == "") %>%
# dplyr::select(1:3)
# pubs = pubs %>%
# left_join(assoc[ , c("Host", "HostOrder", "Domestic") ]) %>%
# dplyr::mutate(NumPubs = replace(NumPubs, is.na(NumPubs), 0))
# pubs_order_all = pubsx %>%
# dplyr::group_by(HostOrder, Year) %>%
# dplyr::summarise(NumPubs = sum(NumPubs, na.rm=TRUE))
# pubs_order_dom = pubsx %>%
# dplyr::group_by(HostOrder, Year, Domestic) %>%
# dplyr::summarise(NumPubs = sum(NumPubs, na.rm=TRUE))
# 1. all species
# unique associations by order and year
dd = clover %>%
dplyr::group_by(HostOrder, VirusFamily) %>%
dplyr::summarise(Database = paste(unique(Database), collapse=", "),
HostSpecies = paste(unique(Host), collapse=", "),
NumRecords = length(Year),
YearEarliest = min(Year, na.rm=TRUE),
YearLatest = max(Year, na.rm=TRUE)) %>%
left_join(tr) %>%
dplyr::arrange(desc(VRichness), HostOrder, YearEarliest)
# create cumulative curves of all years
curves = expand.grid(unique(dd$HostOrder), 1930:2016) %>%
dplyr::rename("HostOrder" = 1, "YearEarliest" = 2) %>%
left_join(dd[ , c("HostOrder", "YearEarliest", "VirusFamily")]) %>%
dplyr::group_by(HostOrder, YearEarliest) %>%
dplyr::summarise(Discovered = sum(!is.na(VirusFamily)),
VirusFamily = paste(unique(VirusFamily), collapse=", ")) %>%
left_join(dd[ !duplicated(dd$HostOrder), c("HostOrder", "VRichness") ]) %>%
dplyr::arrange(desc(VRichness), HostOrder, YearEarliest) %>%
dplyr::group_by(HostOrder) %>%
dplyr::mutate(VirusCumulative = cumsum(Discovered)) %>%
dplyr::rename("Year" = YearEarliest)
ggplot(curves[ curves$Year <= 2010, ]) +
geom_line(aes(Year, VirusCumulative)) +
facet_wrap(~HostOrder, scales="free_y")
# combine with publication effort
# curves = left_join(curves, pubs_order_all) %>%
# dplyr::mutate(NumPubs = replace(NumPubs, is.na(NumPubs), 0))
# 2. split out by wild/domestic
# # unique associations by order, year
# ddw = clover %>%
# dplyr::filter(!is.na(Year)) %>%
# dplyr::group_by(HostOrder, Domestic, Virus) %>%
# dplyr::summarise(Database = paste(unique(Database), collapse=", "),
# HostSpecies = paste(unique(Host), collapse=", "),
# NumRecords = length(Year),
# YearEarliest = min(Year, na.rm=TRUE),
# YearLatest = max(Year, na.rm=TRUE)) %>%
# left_join(tr) %>%
# dplyr::arrange(desc(VRichness), HostOrder, YearEarliest)
#
# # create cumulative curves of all years
# curvesw = expand.grid(unique(ddw$HostOrder), unique(ddw$Domestic), 1930:2016) %>%
# dplyr::rename("HostOrder" = 1, "Domestic" = 2, "YearEarliest" = 3) %>%
# left_join(ddw[ , c("HostOrder", "Domestic", "YearEarliest", "Virus")]) %>%
# dplyr::group_by(HostOrder, Domestic, YearEarliest) %>%
# dplyr::summarise(Discovered = sum(!is.na(Virus)),
# Virus = paste(unique(Virus), collapse=", ")) %>%
# left_join(ddw[ !duplicated(ddw$HostOrder), c("HostOrder", "VRichness") ]) %>%
# dplyr::arrange(desc(VRichness), HostOrder, Domestic, YearEarliest) %>%
# dplyr::group_by(HostOrder, Domestic) %>%
# dplyr::mutate(VirusCumulative = cumsum(Discovered)) %>%
# dplyr::rename("Year" = YearEarliest)
#
# # combine with publication effort
# # # need to debug this
# # curvesw = left_join(curves, pubs_order_dom) %>%
# # dplyr::mutate(NumPubs = replace(NumPubs, is.na(NumPubs), 0))
#
# ggplot(curvesw[ curvesw$Year <= 2010, ]) +
# geom_line(aes(Year, VirusCumulative, col=Domestic)) +
# facet_wrap(~HostOrder, scales="free_y")
# ================ for each Order, fit poisson model to discovery rates ===================
# fits Poisson model of discovery rates (novel virus counts per year) for a specified order with specified data
# equivalent to fitting inhomogenous 1D Poisson process but without smudging event times
# for 3 time epochs: 1930 to present, 1960 to present, and 2000 to present
# currently effect of year is linear (i.e. exponential with log-link); could explore SPDE but may be more intractable if we're interested in broad trends
fitDiscoveryRateCurve = function(order, data){
# data with hard cut-off at 2010 (from Reconciliation paper, this is when reports taper off)
print(order)
dx = data[ data$HostOrder == order & data$Year <= 2010, ]
# 1. fit model from 1930 to present
dx$yearx = 1:nrow(dx)
form = Discovered ~ yearx + Intercept
bru_mod = bru(form, dx, family = "poisson")
#summary(bru_mod)
# extract fixed effects
fx = bru_mod$summary.fixed
fx$param = row.names(fx)
names(fx)[ 3:5 ] = c("q0.025", "median", "q0.975")
fx$model = "1930"
fx$HostOrder = order
row.names(fx) = c()
# predict curve
x4pred = data.frame(yearx = 1:nrow(dx))
predx_bru1 = predict(bru_mod, x4pred, ~ exp(yearx + Intercept), n.samples=2000)
resx = left_join(dx, predx_bru1, by="yearx")
resx$model = "1930"
# 2. fit model from 1960 to present
dx = dx[ dx$Year >= 1960, ]
dx$yearx = 1:nrow(dx)
form = Discovered ~ yearx + Intercept
bru_mod = bru(form, dx, family = "poisson")
# extract fixed effects
fy = bru_mod$summary.fixed
fy$param = row.names(fy)
names(fy)[ 3:5 ] = c("q0.025", "median", "q0.975")
fy$model = "1960"
fy$HostOrder = order
row.names(fy) = c()
# predict curve
x4pred = data.frame(yearx = 1:nrow(dx))
predx_bru1 = predict(bru_mod, x4pred, ~ exp(yearx + Intercept), n.samples=2000)
resy = left_join(dx, predx_bru1, by="yearx")
resy$model = "1960"
# 2. fit model from 1990 to present
dx = dx[ dx$Year >= 1990, ]
dx$yearx = 1:nrow(dx)
form = Discovered ~ yearx + Intercept
bru_mod = bru(form, dx, family = "poisson")
# extract fixed effects
fz = bru_mod$summary.fixed
fz$param = row.names(fz)
names(fz)[ 3:5 ] = c("q0.025", "median", "q0.975")
fz$model = "1990"
fz$HostOrder = order
row.names(fz) = c()
# predict curve
x4pred = data.frame(yearx = 1:nrow(dx))
predx_bru1 = predict(bru_mod, x4pred, ~ exp(yearx + Intercept), n.samples=1995)
resz = left_join(dx, predx_bru1, by="yearx")
resz$model = "1990"
# create results
ff = rbind(fx, fy, fz)
res = rbind(resx, resy, resz)
return(list(
fixed = ff,
pred_curve = res
))
}
# 1. run models for all species
result = lapply(unique(curves$HostOrder)[ 1:10 ], fitDiscoveryRateCurve, data=curves)
# extract estimates
fixed = do.call(rbind.data.frame, lapply(result, "[[", 1))
curve_preds = do.call(rbind.data.frame, lapply(result, "[[", 2))
write.csv(fixed, "./output/order_models/fixedeffects_allspecies_byorder_virusfamily_inlabrupois_20200106.csv", row.names=FALSE)
write.csv(curve_preds, "./output/order_models/curves_allspecies_byorder_virusfamily_inlabrupois_20200106.csv", row.names=FALSE)
# # 2. run models for wild species only
# result = lapply(unique(curvesw$HostOrder)[ 1:10 ], fitDiscoveryRateCurve, data=curvesw[ curvesw$Domestic == "Wild", ])
#
# # extract estimates
# fixed = do.call(rbind.data.frame, lapply(result, "[[", 1))
# curve_preds = do.call(rbind.data.frame, lapply(result, "[[", 2))
# write.csv(fixed, "./output/order_models/fixedeffects_wild_byorder_inlabrupois.csv", row.names=FALSE)
# write.csv(curve_preds, "./output/order_models/curves_wild_byorder_inlabrupois.csv", row.names=FALSE)
#
# ======================== visualise =====================
curve_preds$HostOrder = factor(curve_preds$HostOrder, levels=unique(curve_preds$HostOrder), ordered=TRUE)
# ggplot(curve_preds[ curve_preds$model == 1930, ]) +
# geom_point(aes(Year, Discovered), size=1, col="grey70", alpha=0.8) +
# geom_line(aes(Year, median)) +
# geom_ribbon(aes(Year, ymin=q0.025, ymax=q0.975, fill=HostOrder), alpha=0.25) +
# theme_minimal() +
# facet_wrap(~HostOrder) +
# #ggtitle(Hmisc::capitalize(daty$Host[1])) +
# ylab(expression(lambda)) +
# xlab("Year") +
# theme(plot.title=element_text(size=14, hjust=0.5),
# axis.title.y = element_text(size=14),
# axis.title.x = element_text(size=12),
# axis.text = element_text(size=11))
#
# ggplot(curve_preds[ curve_preds$model == 1960, ]) +
# geom_point(aes(Year, Discovered), size=1, col="grey70", alpha=0.8) +
# geom_line(aes(Year, median)) +
# geom_ribbon(aes(Year, ymin=q0.025, ymax=q0.975, fill=HostOrder), alpha=0.25) +
# theme_minimal() +
# facet_wrap(~HostOrder) +
# #ggtitle(Hmisc::capitalize(daty$Host[1])) +
# ylab(expression(lambda)) +
# xlab("Year") +
# theme(plot.title=element_text(size=14, hjust=0.5),
# axis.title.y = element_text(size=14),
# axis.title.x = element_text(size=12),
# axis.text = element_text(size=11))
#
# ggplot(curve_preds[ curve_preds$model == 2000, ]) +
# geom_point(aes(Year, Discovered), size=1, col="grey70", alpha=0.8) +
# geom_line(aes(Year, median)) +
# geom_ribbon(aes(Year, ymin=q0.025, ymax=q0.975, fill=HostOrder), alpha=0.25) +
# theme_minimal() +
# facet_wrap(~HostOrder) +
# #ggtitle(Hmisc::capitalize(daty$Host[1])) +
# ylab("Viral discovery rate (viruses/year)") +
# xlab("Year") +
# theme(plot.title=element_text(size=14, hjust=0.5),
# axis.title.y = element_text(size=14),
# axis.title.x = element_text(size=12),
# axis.text = element_text(size=11))
curve_preds$model2 = paste(curve_preds$model, "-2010", sep="")
p1 = ggplot(curve_preds[ curve_preds$model %in% c(1930, 1960), ]) +
geom_point(aes(Year, Discovered), size=1, col="grey70", alpha=0.8) +
geom_line(aes(Year, median, group=factor(model2))) +
geom_ribbon(aes(Year, ymin=q0.025, ymax=q0.975, fill=factor(model2)), alpha=0.3) +
theme_minimal() +
facet_wrap(~HostOrder, scales="free_y", nrow=2) +
scale_fill_viridis_d( name="Time epoch", begin=0.2, end=0.7) +
#ggtitle(Hmisc::capitalize(daty$Host[1])) +
#ylab(expression(lambda)) +
ylab("Virus family discovery rate (viral families/year)") +
xlab("Year") +
theme(plot.title=element_text(size=14, hjust=0.5),
axis.title.y = element_text(size=12),
axis.title.x = element_text(size=11),
legend.title = element_text(size=12),
strip.text = element_text(size=13),
legend.position="bottom",
legend.text = element_text(size=11),
axis.text = element_text(size=11))
ggsave(p1, file="./output/figures/Order_ViralDiscoveryRates_20201203.png", device="png", dpi=300, width=17, height=8, units="in", scale=0.95)
fixed$model2 = paste(fixed$model, "-2010", sep="")
#ggplot(fixed[ fixed$param %in% c("yearx") & fixed$model != "2000", ]) +
p2 = ggplot(fixed[ fixed$param %in% c("yearx"), ]) +
geom_point(aes(HostOrder, median, col=model2, group=model2), position=position_dodge(width=0.5)) +
geom_linerange(aes(HostOrder, ymin=q0.025, ymax=q0.975, col=model2, group=model2), position=position_dodge(width=0.5)) +
geom_hline(yintercept=0, lty=2) +
theme_minimal() +
ylab(expression(beta[year])) +
scale_color_viridis_d( name="Time epoch", begin=0.2, end=0.7) +
theme(plot.title=element_text(size=14, hjust=0.5),
axis.title.y = element_text(size=16),
axis.title.x = element_blank(),
legend.title = element_text(size=12),
legend.text = element_text(size=11),
axis.text.y = element_text(size=11),
axis.text.x = element_text(size=11, angle=90))
ggsave(p2, file="./output/figures/Order_Betaestimates_20201203.png", device="png", dpi=300, width=8, height=5, units="in", scale=0.95)
# plot cumulative curves
curves2 = curves
curves2$HostOrder = factor(curves2$HostOrder, levels=unique(curves2$HostOrder), ordered=TRUE)
p3 =ggplot(curves2[ curves2$Year <= 2010, ]) +
geom_line(aes(Year, VirusCumulative, group=HostOrder), size=0.8, col="skyblue4") +
facet_wrap(~HostOrder, scales="free_y") +
theme_minimal() +
ylab("Virus family richness") +
theme(plot.title=element_text(size=14, hjust=0.5),
axis.title.y = element_text(size=12),
axis.title.x = element_text(size=11),
legend.title = element_text(size=12),
strip.text = element_text(size=12.5),
legend.text = element_text(size=11),
axis.text = element_text(size=11))
ggsave(p3, file="./output/figures/Order_CumulativeCurves_20201203.png", device="png", dpi=300, width=12, height=8, units="in", scale=0.95)
# ggplot(curve_preds[ curve_preds$model == 1930, ]) +
# geom_point(aes(Year, Discovered), size=1, col="grey70", alpha=0.8) +
# geom_line(aes(Year, median)) +
# geom_ribbon(aes(Year, ymin=q0.025, ymax=q0.975, fill=HostOrder), alpha=0.25) +
# theme_minimal() +
# facet_wrap(~HostOrder, scales="free_y") +
# #ggtitle(Hmisc::capitalize(daty$Host[1])) +
# ylab(expression(lambda)) +
# xlab("Year") +
# theme(plot.title=element_text(size=14, hjust=0.5),
# axis.title.y = element_text(size=14),
# axis.title.x = element_text(size=12),
# axis.text = element_text(size=11))
#
#
#
# ggplot() +
# geom_point(data = resy, aes(Year, Discovered), size=1, col="grey70", alpha=0.8) +
# geom_line(data = resy, aes(Year, median)) +
# geom_ribbon(data = resy, aes(Year, ymin=q0.025, ymax=q0.975), fill="skyblue4", alpha=0.25) +
# theme_minimal() +
# #ggtitle(Hmisc::capitalize(daty$Host[1])) +
# ylab(expression(lambda)) +
# xlab("Year") +
# theme(plot.title=element_text(size=14, hjust=0.5),
# axis.title.y = element_text(size=14),
# axis.title.x = element_text(size=12),
# axis.text = element_text(size=11))
#
#
# # species
# spp = "pan troglodytes"
# datx = curves[ curves$Host == spp & curves$Year <= 2015, ]
# #datx = curves[ curves$Year <= 2015 & curves$Host %in% unique(curves$Host)[1:250] & curves$Host != "homo sapiens", ]
#
# # formula: linear effect of year + intercept
# datx$x = 1:nrow(datx)
# form = Discovered ~ x + Intercept
# bru_mod = bru(form, datx, family = "poisson")
# summary(bru_mod)
#
# # predict field for each year
# x4pred = data.frame(x = 1:nrow(datx))
# predx_bru1 = predict(bru_mod, x4pred, ~ exp(x + Intercept), n.samples=2000)
# daty = left_join(datx, predx_bru1)
#
# ggplot() +
# geom_point(data = daty, aes(Year, Discovered), size=1, col="grey70", alpha=0.8) +
# geom_line(data = daty, aes(Year, median)) +
# geom_ribbon(data = daty, aes(Year, ymin=q0.025, ymax=q0.975), fill="skyblue4", alpha=0.25) +
# theme_minimal() +
# #ggtitle(Hmisc::capitalize(daty$Host[1])) +
# ylab(expression(lambda)) +
# xlab("Year") +
# theme(plot.title=element_text(size=14, hjust=0.5),
# axis.title.y = element_text(size=14),
# axis.title.x = element_text(size=12),
# axis.text = element_text(size=11))
#
# # formula: linear effect of year + intercept > 1995
# datx = datx[ datx$Year >= 1995 ,]
# datx$x = 1:nrow(datx)
# form = Discovered ~ x + Intercept
# bru_mod = bru(form, datx, family = "poisson")
# summary(bru_mod)
#
# # predict field for each year
# x4pred = data.frame(x = 1:nrow(datx))
# predx_bru1 = predict(bru_mod, x4pred, ~ exp(x + Intercept), n.samples=2000)
# daty = left_join(datx, predx_bru1)
#
# ggplot() +
# geom_point(data = daty, aes(Year, Discovered), size=1, col="grey70", alpha=0.8) +
# geom_line(data = daty, aes(Year, median)) +
# geom_ribbon(data = daty, aes(Year, ymin=q0.025, ymax=q0.975), fill="skyblue4", alpha=0.25) +
# theme_minimal() +
# #ggtitle(Hmisc::capitalize(daty$Host[1])) +
# #ylab("Number of viruses discovered") +
# ylab(expression(lambda)) +
# xlab("Year") +
# theme(plot.title=element_text(size=14, hjust=0.5),
# axis.title.y = element_text(size=14),
# axis.title.x = element_text(size=12),
# axis.text = element_text(size=11))
#
#
# # formula: linear effect of year + intercept
# datx = datx[ datx$Year >= 1970 ,]
# datx$x = 1:nrow(datx)
# form = Discovered ~ x + Intercept
# bru_mod = bru(form, datx, family = "poisson")
# summary(bru_mod)
#
# # predict field for each year
# x4pred = data.frame(x = 1:nrow(datx))
# predx_bru1 = predict(bru_mod, x4pred, ~ exp(x + Intercept), n.samples=2000)
# daty = left_join(datx, predx_bru1)
#
# ggplot() +
# geom_point(data = daty, aes(Year, Discovered), size=1, col="grey70", alpha=0.8) +
# geom_line(data = daty, aes(Year, median)) +
# geom_ribbon(data = daty, aes(Year, ymin=q0.025, ymax=q0.975), fill="skyblue4", alpha=0.25) +
# theme_minimal() +
# ggtitle(Hmisc::capitalize(daty$Host[1])) +
# ylab("Number of viruses discovered") +
# xlab("Year") +
# theme(plot.title=element_text(size=14, hjust=0.5),
# axis.title.y = element_text(size=14),
# axis.title.x = element_text(size=12),
# axis.text = element_text(size=11))
#
#
#
# ======================= 2. Comparison of decay in total richness by
|
/scripts_archive/curves_mammalorders_virfam.R
|
no_license
|
rorygibb/pathogen_discovery
|
R
| false
| false
| 18,057
|
r
|
# ====================== Generate species viral discovery rate curves for mammals at the Order level =====================
# proof of concept/general trends
# fit curves across time epochs to examine changes in trends
# how robust are these to adjusting for annual publication effort?
# root dir and dependencies
# dependencies and basedir
pacman::p_load("dplyr", "magrittr", "stringr", "ggplot2", "inlabru", "INLA", "here")
setwd("C:/Users/roryj/Documents/PhD/202008_pathogendiscovery/code/pathogen_discovery/")
# domestic species to label
domestic = read.csv("./data/clover/domestic_status/HostLookup_Domestic.csv", stringsAsFactors = FALSE)
# associations data and no humans
clover = read.csv("./data/clover/Clover_v1.0_NBCIreconciled_20201218.csv", stringsAsFactors = FALSE) %>%
dplyr::filter(Host != "Homo sapiens") %>%
dplyr::mutate(Domestic = ifelse(Host %in% domestic$Host, TRUE, FALSE)) %>%
dplyr::filter(DetectionMethod != "Not specified")
# total viral family richness by order
tr = clover %>%
group_by(HostOrder) %>%
dplyr::summarise(VRichness = n_distinct(VirusFamily))
# publication effort by year at Order-level, with wild/domestic split
# this needs some thought and work if it's worth including
# pubs = read.csv("./output/host_effort/PubMed_Hosts_PubsByYear_Final.csv", stringsAsFactors = FALSE) %>%
# dplyr::filter(Note == "") %>%
# dplyr::select(1:3)
# pubs = pubs %>%
# left_join(assoc[ , c("Host", "HostOrder", "Domestic") ]) %>%
# dplyr::mutate(NumPubs = replace(NumPubs, is.na(NumPubs), 0))
# pubs_order_all = pubsx %>%
# dplyr::group_by(HostOrder, Year) %>%
# dplyr::summarise(NumPubs = sum(NumPubs, na.rm=TRUE))
# pubs_order_dom = pubsx %>%
# dplyr::group_by(HostOrder, Year, Domestic) %>%
# dplyr::summarise(NumPubs = sum(NumPubs, na.rm=TRUE))
# 1. all species
# unique associations by order and year
dd = clover %>%
dplyr::group_by(HostOrder, VirusFamily) %>%
dplyr::summarise(Database = paste(unique(Database), collapse=", "),
HostSpecies = paste(unique(Host), collapse=", "),
NumRecords = length(Year),
YearEarliest = min(Year, na.rm=TRUE),
YearLatest = max(Year, na.rm=TRUE)) %>%
left_join(tr) %>%
dplyr::arrange(desc(VRichness), HostOrder, YearEarliest)
# create cumulative curves of all years
curves = expand.grid(unique(dd$HostOrder), 1930:2016) %>%
dplyr::rename("HostOrder" = 1, "YearEarliest" = 2) %>%
left_join(dd[ , c("HostOrder", "YearEarliest", "VirusFamily")]) %>%
dplyr::group_by(HostOrder, YearEarliest) %>%
dplyr::summarise(Discovered = sum(!is.na(VirusFamily)),
VirusFamily = paste(unique(VirusFamily), collapse=", ")) %>%
left_join(dd[ !duplicated(dd$HostOrder), c("HostOrder", "VRichness") ]) %>%
dplyr::arrange(desc(VRichness), HostOrder, YearEarliest) %>%
dplyr::group_by(HostOrder) %>%
dplyr::mutate(VirusCumulative = cumsum(Discovered)) %>%
dplyr::rename("Year" = YearEarliest)
ggplot(curves[ curves$Year <= 2010, ]) +
geom_line(aes(Year, VirusCumulative)) +
facet_wrap(~HostOrder, scales="free_y")
# combine with publication effort
# curves = left_join(curves, pubs_order_all) %>%
# dplyr::mutate(NumPubs = replace(NumPubs, is.na(NumPubs), 0))
# 2. split out by wild/domestic
# # unique associations by order, year
# ddw = clover %>%
# dplyr::filter(!is.na(Year)) %>%
# dplyr::group_by(HostOrder, Domestic, Virus) %>%
# dplyr::summarise(Database = paste(unique(Database), collapse=", "),
# HostSpecies = paste(unique(Host), collapse=", "),
# NumRecords = length(Year),
# YearEarliest = min(Year, na.rm=TRUE),
# YearLatest = max(Year, na.rm=TRUE)) %>%
# left_join(tr) %>%
# dplyr::arrange(desc(VRichness), HostOrder, YearEarliest)
#
# # create cumulative curves of all years
# curvesw = expand.grid(unique(ddw$HostOrder), unique(ddw$Domestic), 1930:2016) %>%
# dplyr::rename("HostOrder" = 1, "Domestic" = 2, "YearEarliest" = 3) %>%
# left_join(ddw[ , c("HostOrder", "Domestic", "YearEarliest", "Virus")]) %>%
# dplyr::group_by(HostOrder, Domestic, YearEarliest) %>%
# dplyr::summarise(Discovered = sum(!is.na(Virus)),
# Virus = paste(unique(Virus), collapse=", ")) %>%
# left_join(ddw[ !duplicated(ddw$HostOrder), c("HostOrder", "VRichness") ]) %>%
# dplyr::arrange(desc(VRichness), HostOrder, Domestic, YearEarliest) %>%
# dplyr::group_by(HostOrder, Domestic) %>%
# dplyr::mutate(VirusCumulative = cumsum(Discovered)) %>%
# dplyr::rename("Year" = YearEarliest)
#
# # combine with publication effort
# # # need to debug this
# # curvesw = left_join(curves, pubs_order_dom) %>%
# # dplyr::mutate(NumPubs = replace(NumPubs, is.na(NumPubs), 0))
#
# ggplot(curvesw[ curvesw$Year <= 2010, ]) +
# geom_line(aes(Year, VirusCumulative, col=Domestic)) +
# facet_wrap(~HostOrder, scales="free_y")
# ================ for each Order, fit poisson model to discovery rates ===================
# fits Poisson model of discovery rates (novel virus counts per year) for a specified order with specified data
# equivalent to fitting inhomogenous 1D Poisson process but without smudging event times
# for 3 time epochs: 1930 to present, 1960 to present, and 2000 to present
# currently effect of year is linear (i.e. exponential with log-link); could explore SPDE but may be more intractable if we're interested in broad trends
fitDiscoveryRateCurve = function(order, data){
# data with hard cut-off at 2010 (from Reconciliation paper, this is when reports taper off)
print(order)
dx = data[ data$HostOrder == order & data$Year <= 2010, ]
# 1. fit model from 1930 to present
dx$yearx = 1:nrow(dx)
form = Discovered ~ yearx + Intercept
bru_mod = bru(form, dx, family = "poisson")
#summary(bru_mod)
# extract fixed effects
fx = bru_mod$summary.fixed
fx$param = row.names(fx)
names(fx)[ 3:5 ] = c("q0.025", "median", "q0.975")
fx$model = "1930"
fx$HostOrder = order
row.names(fx) = c()
# predict curve
x4pred = data.frame(yearx = 1:nrow(dx))
predx_bru1 = predict(bru_mod, x4pred, ~ exp(yearx + Intercept), n.samples=2000)
resx = left_join(dx, predx_bru1, by="yearx")
resx$model = "1930"
# 2. fit model from 1960 to present
dx = dx[ dx$Year >= 1960, ]
dx$yearx = 1:nrow(dx)
form = Discovered ~ yearx + Intercept
bru_mod = bru(form, dx, family = "poisson")
# extract fixed effects
fy = bru_mod$summary.fixed
fy$param = row.names(fy)
names(fy)[ 3:5 ] = c("q0.025", "median", "q0.975")
fy$model = "1960"
fy$HostOrder = order
row.names(fy) = c()
# predict curve
x4pred = data.frame(yearx = 1:nrow(dx))
predx_bru1 = predict(bru_mod, x4pred, ~ exp(yearx + Intercept), n.samples=2000)
resy = left_join(dx, predx_bru1, by="yearx")
resy$model = "1960"
# 2. fit model from 1990 to present
dx = dx[ dx$Year >= 1990, ]
dx$yearx = 1:nrow(dx)
form = Discovered ~ yearx + Intercept
bru_mod = bru(form, dx, family = "poisson")
# extract fixed effects
fz = bru_mod$summary.fixed
fz$param = row.names(fz)
names(fz)[ 3:5 ] = c("q0.025", "median", "q0.975")
fz$model = "1990"
fz$HostOrder = order
row.names(fz) = c()
# predict curve
x4pred = data.frame(yearx = 1:nrow(dx))
predx_bru1 = predict(bru_mod, x4pred, ~ exp(yearx + Intercept), n.samples=1995)
resz = left_join(dx, predx_bru1, by="yearx")
resz$model = "1990"
# create results
ff = rbind(fx, fy, fz)
res = rbind(resx, resy, resz)
return(list(
fixed = ff,
pred_curve = res
))
}
# 1. run models for all species
result = lapply(unique(curves$HostOrder)[ 1:10 ], fitDiscoveryRateCurve, data=curves)
# extract estimates
fixed = do.call(rbind.data.frame, lapply(result, "[[", 1))
curve_preds = do.call(rbind.data.frame, lapply(result, "[[", 2))
write.csv(fixed, "./output/order_models/fixedeffects_allspecies_byorder_virusfamily_inlabrupois_20200106.csv", row.names=FALSE)
write.csv(curve_preds, "./output/order_models/curves_allspecies_byorder_virusfamily_inlabrupois_20200106.csv", row.names=FALSE)
# # 2. run models for wild species only
# result = lapply(unique(curvesw$HostOrder)[ 1:10 ], fitDiscoveryRateCurve, data=curvesw[ curvesw$Domestic == "Wild", ])
#
# # extract estimates
# fixed = do.call(rbind.data.frame, lapply(result, "[[", 1))
# curve_preds = do.call(rbind.data.frame, lapply(result, "[[", 2))
# write.csv(fixed, "./output/order_models/fixedeffects_wild_byorder_inlabrupois.csv", row.names=FALSE)
# write.csv(curve_preds, "./output/order_models/curves_wild_byorder_inlabrupois.csv", row.names=FALSE)
#
# ======================== visualise =====================
curve_preds$HostOrder = factor(curve_preds$HostOrder, levels=unique(curve_preds$HostOrder), ordered=TRUE)
# ggplot(curve_preds[ curve_preds$model == 1930, ]) +
# geom_point(aes(Year, Discovered), size=1, col="grey70", alpha=0.8) +
# geom_line(aes(Year, median)) +
# geom_ribbon(aes(Year, ymin=q0.025, ymax=q0.975, fill=HostOrder), alpha=0.25) +
# theme_minimal() +
# facet_wrap(~HostOrder) +
# #ggtitle(Hmisc::capitalize(daty$Host[1])) +
# ylab(expression(lambda)) +
# xlab("Year") +
# theme(plot.title=element_text(size=14, hjust=0.5),
# axis.title.y = element_text(size=14),
# axis.title.x = element_text(size=12),
# axis.text = element_text(size=11))
#
# ggplot(curve_preds[ curve_preds$model == 1960, ]) +
# geom_point(aes(Year, Discovered), size=1, col="grey70", alpha=0.8) +
# geom_line(aes(Year, median)) +
# geom_ribbon(aes(Year, ymin=q0.025, ymax=q0.975, fill=HostOrder), alpha=0.25) +
# theme_minimal() +
# facet_wrap(~HostOrder) +
# #ggtitle(Hmisc::capitalize(daty$Host[1])) +
# ylab(expression(lambda)) +
# xlab("Year") +
# theme(plot.title=element_text(size=14, hjust=0.5),
# axis.title.y = element_text(size=14),
# axis.title.x = element_text(size=12),
# axis.text = element_text(size=11))
#
# ggplot(curve_preds[ curve_preds$model == 2000, ]) +
# geom_point(aes(Year, Discovered), size=1, col="grey70", alpha=0.8) +
# geom_line(aes(Year, median)) +
# geom_ribbon(aes(Year, ymin=q0.025, ymax=q0.975, fill=HostOrder), alpha=0.25) +
# theme_minimal() +
# facet_wrap(~HostOrder) +
# #ggtitle(Hmisc::capitalize(daty$Host[1])) +
# ylab("Viral discovery rate (viruses/year)") +
# xlab("Year") +
# theme(plot.title=element_text(size=14, hjust=0.5),
# axis.title.y = element_text(size=14),
# axis.title.x = element_text(size=12),
# axis.text = element_text(size=11))
curve_preds$model2 = paste(curve_preds$model, "-2010", sep="")
p1 = ggplot(curve_preds[ curve_preds$model %in% c(1930, 1960), ]) +
geom_point(aes(Year, Discovered), size=1, col="grey70", alpha=0.8) +
geom_line(aes(Year, median, group=factor(model2))) +
geom_ribbon(aes(Year, ymin=q0.025, ymax=q0.975, fill=factor(model2)), alpha=0.3) +
theme_minimal() +
facet_wrap(~HostOrder, scales="free_y", nrow=2) +
scale_fill_viridis_d( name="Time epoch", begin=0.2, end=0.7) +
#ggtitle(Hmisc::capitalize(daty$Host[1])) +
#ylab(expression(lambda)) +
ylab("Virus family discovery rate (viral families/year)") +
xlab("Year") +
theme(plot.title=element_text(size=14, hjust=0.5),
axis.title.y = element_text(size=12),
axis.title.x = element_text(size=11),
legend.title = element_text(size=12),
strip.text = element_text(size=13),
legend.position="bottom",
legend.text = element_text(size=11),
axis.text = element_text(size=11))
ggsave(p1, file="./output/figures/Order_ViralDiscoveryRates_20201203.png", device="png", dpi=300, width=17, height=8, units="in", scale=0.95)
fixed$model2 = paste(fixed$model, "-2010", sep="")
#ggplot(fixed[ fixed$param %in% c("yearx") & fixed$model != "2000", ]) +
p2 = ggplot(fixed[ fixed$param %in% c("yearx"), ]) +
geom_point(aes(HostOrder, median, col=model2, group=model2), position=position_dodge(width=0.5)) +
geom_linerange(aes(HostOrder, ymin=q0.025, ymax=q0.975, col=model2, group=model2), position=position_dodge(width=0.5)) +
geom_hline(yintercept=0, lty=2) +
theme_minimal() +
ylab(expression(beta[year])) +
scale_color_viridis_d( name="Time epoch", begin=0.2, end=0.7) +
theme(plot.title=element_text(size=14, hjust=0.5),
axis.title.y = element_text(size=16),
axis.title.x = element_blank(),
legend.title = element_text(size=12),
legend.text = element_text(size=11),
axis.text.y = element_text(size=11),
axis.text.x = element_text(size=11, angle=90))
ggsave(p2, file="./output/figures/Order_Betaestimates_20201203.png", device="png", dpi=300, width=8, height=5, units="in", scale=0.95)
# plot cumulative curves
curves2 = curves
curves2$HostOrder = factor(curves2$HostOrder, levels=unique(curves2$HostOrder), ordered=TRUE)
p3 =ggplot(curves2[ curves2$Year <= 2010, ]) +
geom_line(aes(Year, VirusCumulative, group=HostOrder), size=0.8, col="skyblue4") +
facet_wrap(~HostOrder, scales="free_y") +
theme_minimal() +
ylab("Virus family richness") +
theme(plot.title=element_text(size=14, hjust=0.5),
axis.title.y = element_text(size=12),
axis.title.x = element_text(size=11),
legend.title = element_text(size=12),
strip.text = element_text(size=12.5),
legend.text = element_text(size=11),
axis.text = element_text(size=11))
ggsave(p3, file="./output/figures/Order_CumulativeCurves_20201203.png", device="png", dpi=300, width=12, height=8, units="in", scale=0.95)
# ggplot(curve_preds[ curve_preds$model == 1930, ]) +
# geom_point(aes(Year, Discovered), size=1, col="grey70", alpha=0.8) +
# geom_line(aes(Year, median)) +
# geom_ribbon(aes(Year, ymin=q0.025, ymax=q0.975, fill=HostOrder), alpha=0.25) +
# theme_minimal() +
# facet_wrap(~HostOrder, scales="free_y") +
# #ggtitle(Hmisc::capitalize(daty$Host[1])) +
# ylab(expression(lambda)) +
# xlab("Year") +
# theme(plot.title=element_text(size=14, hjust=0.5),
# axis.title.y = element_text(size=14),
# axis.title.x = element_text(size=12),
# axis.text = element_text(size=11))
#
#
#
# ggplot() +
# geom_point(data = resy, aes(Year, Discovered), size=1, col="grey70", alpha=0.8) +
# geom_line(data = resy, aes(Year, median)) +
# geom_ribbon(data = resy, aes(Year, ymin=q0.025, ymax=q0.975), fill="skyblue4", alpha=0.25) +
# theme_minimal() +
# #ggtitle(Hmisc::capitalize(daty$Host[1])) +
# ylab(expression(lambda)) +
# xlab("Year") +
# theme(plot.title=element_text(size=14, hjust=0.5),
# axis.title.y = element_text(size=14),
# axis.title.x = element_text(size=12),
# axis.text = element_text(size=11))
#
#
# # species
# spp = "pan troglodytes"
# datx = curves[ curves$Host == spp & curves$Year <= 2015, ]
# #datx = curves[ curves$Year <= 2015 & curves$Host %in% unique(curves$Host)[1:250] & curves$Host != "homo sapiens", ]
#
# # formula: linear effect of year + intercept
# datx$x = 1:nrow(datx)
# form = Discovered ~ x + Intercept
# bru_mod = bru(form, datx, family = "poisson")
# summary(bru_mod)
#
# # predict field for each year
# x4pred = data.frame(x = 1:nrow(datx))
# predx_bru1 = predict(bru_mod, x4pred, ~ exp(x + Intercept), n.samples=2000)
# daty = left_join(datx, predx_bru1)
#
# ggplot() +
# geom_point(data = daty, aes(Year, Discovered), size=1, col="grey70", alpha=0.8) +
# geom_line(data = daty, aes(Year, median)) +
# geom_ribbon(data = daty, aes(Year, ymin=q0.025, ymax=q0.975), fill="skyblue4", alpha=0.25) +
# theme_minimal() +
# #ggtitle(Hmisc::capitalize(daty$Host[1])) +
# ylab(expression(lambda)) +
# xlab("Year") +
# theme(plot.title=element_text(size=14, hjust=0.5),
# axis.title.y = element_text(size=14),
# axis.title.x = element_text(size=12),
# axis.text = element_text(size=11))
#
# # formula: linear effect of year + intercept > 1995
# datx = datx[ datx$Year >= 1995 ,]
# datx$x = 1:nrow(datx)
# form = Discovered ~ x + Intercept
# bru_mod = bru(form, datx, family = "poisson")
# summary(bru_mod)
#
# # predict field for each year
# x4pred = data.frame(x = 1:nrow(datx))
# predx_bru1 = predict(bru_mod, x4pred, ~ exp(x + Intercept), n.samples=2000)
# daty = left_join(datx, predx_bru1)
#
# ggplot() +
# geom_point(data = daty, aes(Year, Discovered), size=1, col="grey70", alpha=0.8) +
# geom_line(data = daty, aes(Year, median)) +
# geom_ribbon(data = daty, aes(Year, ymin=q0.025, ymax=q0.975), fill="skyblue4", alpha=0.25) +
# theme_minimal() +
# #ggtitle(Hmisc::capitalize(daty$Host[1])) +
# #ylab("Number of viruses discovered") +
# ylab(expression(lambda)) +
# xlab("Year") +
# theme(plot.title=element_text(size=14, hjust=0.5),
# axis.title.y = element_text(size=14),
# axis.title.x = element_text(size=12),
# axis.text = element_text(size=11))
#
#
# # formula: linear effect of year + intercept
# datx = datx[ datx$Year >= 1970 ,]
# datx$x = 1:nrow(datx)
# form = Discovered ~ x + Intercept
# bru_mod = bru(form, datx, family = "poisson")
# summary(bru_mod)
#
# # predict field for each year
# x4pred = data.frame(x = 1:nrow(datx))
# predx_bru1 = predict(bru_mod, x4pred, ~ exp(x + Intercept), n.samples=2000)
# daty = left_join(datx, predx_bru1)
#
# ggplot() +
# geom_point(data = daty, aes(Year, Discovered), size=1, col="grey70", alpha=0.8) +
# geom_line(data = daty, aes(Year, median)) +
# geom_ribbon(data = daty, aes(Year, ymin=q0.025, ymax=q0.975), fill="skyblue4", alpha=0.25) +
# theme_minimal() +
# ggtitle(Hmisc::capitalize(daty$Host[1])) +
# ylab("Number of viruses discovered") +
# xlab("Year") +
# theme(plot.title=element_text(size=14, hjust=0.5),
# axis.title.y = element_text(size=14),
# axis.title.x = element_text(size=12),
# axis.text = element_text(size=11))
#
#
#
# ======================= 2. Comparison of decay in total richness by
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.