content
large_stringlengths 0
6.46M
| path
large_stringlengths 3
331
| license_type
large_stringclasses 2
values | repo_name
large_stringlengths 5
125
| language
large_stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.46M
| extension
large_stringclasses 75
values | text
stringlengths 0
6.46M
|
|---|---|---|---|---|---|---|---|---|---|
##' Add RDF data cube codelist to RDF store
##'
##' This function could be split into two function corresponding to the usage.
##' @param obsData Data Frame with the data for which the code list is to be generated
##' @param codeType Character "DATA" or "SDTM".
##' "DATA" to derive code list from the data.
##' "SDTM" to derive the code list from the rdf.cdisc.org documentation using a SPARQL query
##' @param nciDomainValue When codetype="SDTM" the nciDomain used for identifying the codelist
##' @param dimName the name of the dimension - for codeType="DATA" the name of the variable in the data frame ObsData
##' @param underlDataSetName underlying data set name. Used for finding name for D2RQ propertybridge. If NULL then not used.
##' @param remote.endpoint Used when codetype="SDTM" to give the URL for the remote endpoint. If NULL then the local rdf.cdisc.store from the environment is used.
##' @param extension.rrdfqbcrnd0 If TRUE then rrdfqbcrnd0 specific values will be added to the generated cube
##' @return Alway TRUE - to be corrected
##' @author Tim Williams, Marc Andersen
##' @export
## TODO(mja): Move this to rrdqbcrnd0 as it uses rrdfcdisc
## TODO(mja): split function in two - one CDISC related and one for codelists from values
buildCodelist <- function(
store,
prefixlist,
obsData,
codeType,
nciDomainValue,
dimName,
underlDataSetName=NULL,
remote.endpoint=NULL,
extension.rrdfqbcrnd0=FALSE
## codelist.source
)
{
cat("!!!!!!!!!\n")
dimName <- tolower(dimName) ## dimName in all lower case is default
capDimName <- capitalize(dimName) ## capDim used in Class name
################
## Obtain codes #
#############################################################################
## codeNoBlank - used in URI formation
## SDTM: cdiscSumbissionValue -> code (the term to be coded)
if (codeType=="DATA"){
if (! dimName %in% names(obsData)) {
stop( dimName, " not a column in input obsData")
}
codeSource <- as.data.frame(unique(obsData[,dimName])) #Unique values as dataframe
colnames(codeSource) <- ("code") ## Rename to match SDTM approach
}
else if (codeType=="SDTM"){
query <- GetCDISCCodeListSparqlQuery( Get.rq.prefixlist.df(qbCDISCprefixes), paste0("sdtmct:", nciDomainValue ))
if (! is.null(remote.endpoint) ) {
codeSource <- as.data.frame(sparql.remote(remote.endpoint, query))
} else {
Get.env.cdiscstandards() ## the CDISC standards are cached, so the
## loading only happens first time the
## local environment is used
## TODO make the
## loading of cdiscstandards more clever
codeSource <- as.data.frame(sparql.rdf(rrdfcdisc:::env$cdiscstandards, query))
## message("Result of sparql using local store")
## print(codeSource)
## print(str(codeSource))
## print(typeof(codeSource))
## print(is.data.frame(codeSource))
}
}
else {
message("ERROR: unknown codeType ", codeType, " for ", dimName )
}
## codeSource[,"codeNoBlank"]<- toupper(gsub(" ","_",codeSource[,"code"]))
for (i in 1:nrow(codeSource)) {
# message( i, ": ", codeSource[i,"code"] )
codeSource[i,"codeNoBlank"]<- encodetouri( as.character(codeSource[i,"code"]))
}
#############################################################################
## SKELETON
## --------- Class ---------
add.triple(store,
paste0(prefixlist$prefixCODE, capDimName),
paste0(prefixlist$prefixRDF,"type" ),
paste0(prefixlist$prefixOWL, "Class"))
add.triple(store,
paste0(prefixlist$prefixCODE, capDimName),
paste0(prefixlist$prefixRDF,"type" ),
paste0(prefixlist$prefixRDFS, "Class"))
add.triple(store,
paste0(prefixlist$prefixCODE, capDimName),
paste0(prefixlist$prefixRDFS, "subClassOf"),
paste0(prefixlist$prefixSKOS, "Concept"))
## Cross reference between the Class (capDimName) and the codelist (dimName)
add.triple(store,
paste0(prefixlist$prefixCODE, capDimName),
paste0(prefixlist$prefixRDFS, "seeAlso"),
paste0(prefixlist$prefixCODE, dimName))
add.triple(store,
paste0(prefixlist$prefixCODE, dimName),
paste0(prefixlist$prefixRDFS, "seeAlso"),
paste0(prefixlist$prefixCODE, capDimName))
add.data.triple(store,
paste0(prefixlist$prefixCODE, capDimName),
paste0(prefixlist$prefixRDFS,"label"),
paste0("Class for code list: ", dimName),
lang="en")
add.data.triple(store,
paste0(prefixlist$prefixCODE, capDimName),
paste0(prefixlist$prefixRDFS,"comment"),
paste0("Specifies the ", dimName, " for each observation"),
lang="en")
## --------- ConceptScheme ---------
add.triple(store,
paste0(prefixlist$prefixCODE,dimName),
paste0(prefixlist$prefixRDF,"type" ),
paste0(prefixlist$prefixSKOS, "ConceptScheme"))
add.data.triple(store,
paste0(prefixlist$prefixCODE,dimName),
paste0(prefixlist$prefixSKOS,"prefLabel"),
paste0("Codelist scheme: ", dimName),
lang="en")
add.data.triple(store,
paste0(prefixlist$prefixCODE,dimName),
paste0(prefixlist$prefixRDFS,"label"),
paste0("Codelist scheme: ", dimName),
lang="en")
add.data.triple(store,
paste0(prefixlist$prefixCODE,dimName),
paste0(prefixlist$prefixSKOS,"note"),
paste0("Specifies the ", dimName, " for each observation, group of obs. or all categories (_ALL_)label "),
lang="en")
## skos:notation is uppercase by convention. Eg: CL_SEX, CL_RACE
add.data.triple(store,
paste0(prefixlist$prefixCODE,dimName),
paste0(prefixlist$prefixSKOS,"notation"),
paste0("CL_",toupper(dimName)))
## Add rrdfqbcrnd0 information
if (extension.rrdfqbcrnd0) {
add.data.triple(store,
paste0(prefixlist$prefixCODE,dimName),
paste0(prefixlist$prefixRRDFQBCRND0, "codeType"),
paste0(codeType),
type="string"
)
## Should only be added if R data set is available
add.data.triple(store,
paste0(prefixlist$prefixCODE,dimName),
paste0(prefixlist$prefixRRDFQBCRND0, "R-columnname"),
paste0(dimName),
type="string"
)
}
## Should only be added if data available in D2RQ format
## ToDo(mja): the stem for the URI for the property is hard coded - this should be changed to use a prefix
## ToDo(mja): The derivation of property name should be more integrated with D2RQ
if (!is.null(underlDataSetName) & dimName!="procedure" & dimName!="factor") {
datasetname.subject<- paste0(prefixlist$prefixRRDFQBCRND0,toupper(underlDataSetName),"_", toupper(dimName))
if (extension.rrdfqbcrnd0) {
add.triple(store,
paste0(prefixlist$prefixCODE,dimName),
paste0(prefixlist$prefixRRDFQBCRND0, "DataSetRefD2RQ"),
datasetname.subject
)
add.triple(store,
datasetname.subject,
paste0(prefixlist$prefixRRDFQBCRND0, "D2RQ-PropertyBridge"),
paste0("http://www.example.org/datasets/vocab/", toupper(underlDataSetName), "_", toupper(dimName))
)
add.data.triple(store,
datasetname.subject,
paste0(prefixlist$prefixRRDFQBCRND0, "D2RQ-DataSetName"),
toupper(underlDataSetName),
type="string"
)
}
}
if (codeType=="SDTM"){
add.data.triple(store,
paste0(prefixlist$prefixCODE,dimName),
paste0(prefixlist$prefixMMS,"inValueDomain"),
paste0(nciDomainValue),
type="string"
)
}
## --------- hasTopConcept ---------
## For each unique code
for (i in 1:nrow(codeSource)){
add.triple(store,
paste0(prefixlist$prefixCODE,dimName),
paste0(prefixlist$prefixSKOS, "hasTopConcept"),
paste0(prefixlist$prefixCODE,dimName,"-",codeSource[i,"codeNoBlank"]))
}
#############################################################################
## Code values
hasALL<- FALSE
hasNONMISS<- FALSE
for (i in 1:nrow(codeSource)) {
subjcode<- paste0(prefixlist$prefixCODE,dimName,"-",codeSource[i,"codeNoBlank"])
codeSubj<- paste0(prefixlist$prefixCODE,dimName,"-",codeSource[i,"codeNoBlank"])
add.triple(store,
codeSubj,
paste0(prefixlist$prefixRDF,"type" ),
paste0(prefixlist$prefixSKOS, "Concept"))
add.triple(store,
codeSubj,
paste0(prefixlist$prefixRDF,"type" ),
paste0(prefixlist$prefixCODE, capDimName))
add.triple(store,
codeSubj,
paste0(prefixlist$prefixSKOS,"topConceptOf"),
paste0(prefixlist$prefixCODE, dimName))
add.triple(store,
codeSubj,
paste0(prefixlist$prefixSKOS,"inScheme"),
paste0(prefixlist$prefixCODE, dimName))
add.data.triple(store,
codeSubj,
paste0(prefixlist$prefixSKOS,"prefLabel"),
paste0(codeSource[i,"code"]),
type="string"
)
if (! hasALL ) { hasALL<- codeSource[i,"code"]=="_ALL_" }
if (! hasNONMISS ) { hasNONMISS<- codeSource[i,"code"]=="_NONMISS_" }
if (codeSource[i,"code"]!="_ALL_" & codeSource[i,"code"]!="_NONMISS_") {
if (extension.rrdfqbcrnd0) {
add.data.triple(store,
codeSubj,
paste0(prefixlist$prefixRRDFQBCRND0, "R-selectionoperator"),
"==",
type="string"
)
if (mode(codeSource[i,"code"])=="character") {
add.data.triple(store,
codeSubj,
paste0(prefixlist$prefixRRDFQBCRND0, "R-selectionvalue"),
paste0('\\"',codeSource[i,"code"], '\\"'),
type="string"
)
} else {
add.data.triple(store,
codeSubj,
paste0(prefixlist$prefixRRDFQBCRND0, "R-selectionvalue"),
paste0(codeSource[i,"code"]),
type="string"
)
}
}
}
## Should only be added if data available in D2RQ format
## ToDo(mja): the stem for the URI for the property is hard coded - this should be changed to use a prefix
## ToDo(mja): The derivation of property name should be more integrated with D2RQ
if (!is.null(underlDataSetName) & dimName=="factor" & ! (codeSource[i,"code"] %in% c("quantity","proportion") ) ) {
datasetname.subject<- paste0(prefixlist$prefixRRDFQBCRND0,toupper(underlDataSetName),"_", toupper(codeSource[i,"code"]))
if (extension.rrdfqbcrnd0) {
add.triple(store,
codeSubj,
paste0(prefixlist$prefixRRDFQBCRND0, "DataSetRefD2RQ"),
datasetname.subject
)
add.triple(store,
datasetname.subject,
paste0(prefixlist$prefixRRDFQBCRND0, "D2RQ-PropertyBridge"),
paste0("http://www.example.org/datasets/vocab/", toupper(underlDataSetName), "_", toupper(codeSource[i,"code"]) )
)
add.data.triple(store,
datasetname.subject,
paste0(prefixlist$prefixRRDFQBCRND0, "D2RQ-DataSetName"),
toupper(underlDataSetName),
type="string"
)
}
}
## Document when the codes come from the source data without reconciliation
## against other sources.
if (codeType=="DATA" & dimName!="procedure"){
add.data.triple(store,
codeSubj,
paste0(prefixlist$prefixRDFS, "comment"),
"Coded values from data source. No reconciliation against another source",
lang="en")
}
## SDTM Terminology
## Additional triples available from the SDTM Terminology file.
if (codeType=="SDTM"){
add.data.triple(store,
codeSubj,
paste0(prefixlist$prefixCTS,"cdiscSynonym"),
paste0(codeSource[i,"cdiscSynonym"]))
## Remove the prefix colon to specify the value directly (without prefix)
nciDomain<-gsub("sdtmct:","",codeSource[i,"nciDomain"])
## ? MMS may be incorrect here. How refer back to sdtm-terminology?
add.data.triple(store,
codeSubj,
paste0(prefixlist$prefixMMS,"nciDomain"),
paste(nciDomain))
## add.data.triple(store,
## codeSubj,
## paste0(prefixlist$prefixSKOS,"prefLabel"),
## paste0(codeSource[i,"code"],"-A123"),
## type="string")
add.data.triple(store,
codeSubj,
paste0(prefixlist$prefixCTS,"cdiscDefinition"),
paste0(codeSource[i,"cdiscDefinition"]))
add.data.triple(store,
codeSubj,
paste0(prefixlist$prefixCTS,"cdiscSubmissionValue"),
paste0(codeSource[i,"code"]))
}
}
## _NONMISS_
## Cross reference: _NONMISS_ creation for TopConcept.
## TODO: Create function that creates _NONMISS_ based on either presence in data
## or function parameter.
## It is merely coincidental that the Terminology values in the current
## example both have _NONMISS_ . This logic MUST change.
if ( dimName=="procedure") {
## TODO(mja): not straightforward, make more clear
proc<-GetDescrStatProcedure()
for (i in 1:nrow(codeSource)){
## The two variables should be the same codeSubjInList and codeSubj
codeSubjInList<- paste0("code:",dimName,"-",codeSource[i,"codeNoBlank"])
codeSubj<- paste0(prefixlist$prefixCODE,dimName,"-",codeSource[i,"codeNoBlank"])
if (codeSubjInList %in% names(proc)) {
if (extension.rrdfqbcrnd0) {
add.data.triple(store,
codeSubj,
paste0(prefixlist$prefixRRDFQBCRND0, "RdescStatDefFun"),
paste0(deparse(proc[[codeSubjInList]]$fun), collapse=" ")
)
}
add.data.triple(store,
codeSubj,
paste0(prefixlist$prefixRDFS, "comment"),
paste("Descriptive statistics", codeSource[i,"codeNoBlank"], sep=" ", collapse=" "),
lang="en")
}
}
} else {
if (! hasNONMISS ) {
add.triple(store,
paste0(prefixlist$prefixCODE,dimName),
paste0(prefixlist$prefixSKOS, "hasTopConcept"),
paste0(prefixlist$prefixCODE,dimName,"-_NONMISS_"))
codeSubj<- paste0(prefixlist$prefixCODE, dimName,"-_NONMISS_")
add.triple(store,
codeSubj,
paste0(prefixlist$prefixRDF,"type" ),
paste0(prefixlist$prefixSKOS,"Concept"))
add.triple(store,
codeSubj,
paste0(prefixlist$prefixRDF,"type" ),
paste0(prefixlist$prefixCODE, capDimName))
add.triple(store,
codeSubj,
paste0(prefixlist$prefixSKOS,"topConceptOf"),
paste0(prefixlist$prefixCODE, dimName))
add.data.triple(store,
codeSubj,
paste0(prefixlist$prefixSKOS,"prefLabel"),
"_NONMISS_",
type="string"
)
add.triple(store,
codeSubj,
paste0(prefixlist$prefixSKOS,"inScheme"),
paste0(prefixlist$prefixCODE, dimName))
}
add.data.triple(store,
codeSubj,
paste0(prefixlist$prefixRDFS, "comment"),
"NON-CDISC: Represents the non-missing codelist categories. Does not include missing values.",
lang="en")
if (extension.rrdfqbcrnd0) {
add.data.triple(store,
codeSubj,
paste0(prefixlist$prefixRRDFQBCRND0, "R-selectionfunction"),
"is.na",
type="string"
)
}
## _ALL_
## Cross reference: _ALL_ creation for TopConcept.
## TODO: Create function that creates _ALL_ based on either presence in data
## or function parameter.
## It is merely coincidental that the Terminology values in the current
## example both have _ALL_ . This logic MUST change.
if (!hasALL) {
add.triple(store,
paste0(prefixlist$prefixCODE,dimName),
paste0(prefixlist$prefixSKOS, "hasTopConcept"),
paste0(prefixlist$prefixCODE,dimName,"-_ALL_"))
add.triple(store,
paste0(prefixlist$prefixCODE, dimName,"-_ALL_"),
paste0(prefixlist$prefixRDF,"type" ),
paste0(prefixlist$prefixSKOS,"Concept"))
add.triple(store,
paste0(prefixlist$prefixCODE, dimName,"-_ALL_"),
paste0(prefixlist$prefixRDF,"type" ),
paste0(prefixlist$prefixCODE, capDimName))
add.triple(store,
paste0(prefixlist$prefixCODE, dimName,"-_ALL_"),
paste0(prefixlist$prefixSKOS,"topConceptOf"),
paste0(prefixlist$prefixCODE, dimName))
add.data.triple(store,
paste0(prefixlist$prefixCODE, dimName,"-_ALL_"),
paste0(prefixlist$prefixSKOS,"prefLabel"),
"_ALL_",
type="string"
)
add.triple(store,
paste0(prefixlist$prefixCODE, dimName,"-_ALL_"),
paste0(prefixlist$prefixSKOS,"inScheme"),
paste0(prefixlist$prefixCODE, dimName))
}
add.data.triple(store,
paste0(prefixlist$prefixCODE, dimName,"-_ALL_"),
paste0(prefixlist$prefixRDFS, "comment"),
"NON-CDISC: Represents all codelist categories.",
lang="en")
}
invisible(TRUE)
}
|
/rrdfqb/R/buildCodelist.R
|
no_license
|
rjsheperd/rrdfqbcrnd0
|
R
| false
| false
| 19,314
|
r
|
##' Add RDF data cube codelist to RDF store
##'
##' This function could be split into two function corresponding to the usage.
##' @param obsData Data Frame with the data for which the code list is to be generated
##' @param codeType Character "DATA" or "SDTM".
##' "DATA" to derive code list from the data.
##' "SDTM" to derive the code list from the rdf.cdisc.org documentation using a SPARQL query
##' @param nciDomainValue When codetype="SDTM" the nciDomain used for identifying the codelist
##' @param dimName the name of the dimension - for codeType="DATA" the name of the variable in the data frame ObsData
##' @param underlDataSetName underlying data set name. Used for finding name for D2RQ propertybridge. If NULL then not used.
##' @param remote.endpoint Used when codetype="SDTM" to give the URL for the remote endpoint. If NULL then the local rdf.cdisc.store from the environment is used.
##' @param extension.rrdfqbcrnd0 If TRUE then rrdfqbcrnd0 specific values will be added to the generated cube
##' @return Alway TRUE - to be corrected
##' @author Tim Williams, Marc Andersen
##' @export
## TODO(mja): Move this to rrdqbcrnd0 as it uses rrdfcdisc
## TODO(mja): split function in two - one CDISC related and one for codelists from values
buildCodelist <- function(
store,
prefixlist,
obsData,
codeType,
nciDomainValue,
dimName,
underlDataSetName=NULL,
remote.endpoint=NULL,
extension.rrdfqbcrnd0=FALSE
## codelist.source
)
{
cat("!!!!!!!!!\n")
dimName <- tolower(dimName) ## dimName in all lower case is default
capDimName <- capitalize(dimName) ## capDim used in Class name
################
## Obtain codes #
#############################################################################
## codeNoBlank - used in URI formation
## SDTM: cdiscSumbissionValue -> code (the term to be coded)
if (codeType=="DATA"){
if (! dimName %in% names(obsData)) {
stop( dimName, " not a column in input obsData")
}
codeSource <- as.data.frame(unique(obsData[,dimName])) #Unique values as dataframe
colnames(codeSource) <- ("code") ## Rename to match SDTM approach
}
else if (codeType=="SDTM"){
query <- GetCDISCCodeListSparqlQuery( Get.rq.prefixlist.df(qbCDISCprefixes), paste0("sdtmct:", nciDomainValue ))
if (! is.null(remote.endpoint) ) {
codeSource <- as.data.frame(sparql.remote(remote.endpoint, query))
} else {
Get.env.cdiscstandards() ## the CDISC standards are cached, so the
## loading only happens first time the
## local environment is used
## TODO make the
## loading of cdiscstandards more clever
codeSource <- as.data.frame(sparql.rdf(rrdfcdisc:::env$cdiscstandards, query))
## message("Result of sparql using local store")
## print(codeSource)
## print(str(codeSource))
## print(typeof(codeSource))
## print(is.data.frame(codeSource))
}
}
else {
message("ERROR: unknown codeType ", codeType, " for ", dimName )
}
## codeSource[,"codeNoBlank"]<- toupper(gsub(" ","_",codeSource[,"code"]))
for (i in 1:nrow(codeSource)) {
# message( i, ": ", codeSource[i,"code"] )
codeSource[i,"codeNoBlank"]<- encodetouri( as.character(codeSource[i,"code"]))
}
#############################################################################
## SKELETON
## --------- Class ---------
add.triple(store,
paste0(prefixlist$prefixCODE, capDimName),
paste0(prefixlist$prefixRDF,"type" ),
paste0(prefixlist$prefixOWL, "Class"))
add.triple(store,
paste0(prefixlist$prefixCODE, capDimName),
paste0(prefixlist$prefixRDF,"type" ),
paste0(prefixlist$prefixRDFS, "Class"))
add.triple(store,
paste0(prefixlist$prefixCODE, capDimName),
paste0(prefixlist$prefixRDFS, "subClassOf"),
paste0(prefixlist$prefixSKOS, "Concept"))
## Cross reference between the Class (capDimName) and the codelist (dimName)
add.triple(store,
paste0(prefixlist$prefixCODE, capDimName),
paste0(prefixlist$prefixRDFS, "seeAlso"),
paste0(prefixlist$prefixCODE, dimName))
add.triple(store,
paste0(prefixlist$prefixCODE, dimName),
paste0(prefixlist$prefixRDFS, "seeAlso"),
paste0(prefixlist$prefixCODE, capDimName))
add.data.triple(store,
paste0(prefixlist$prefixCODE, capDimName),
paste0(prefixlist$prefixRDFS,"label"),
paste0("Class for code list: ", dimName),
lang="en")
add.data.triple(store,
paste0(prefixlist$prefixCODE, capDimName),
paste0(prefixlist$prefixRDFS,"comment"),
paste0("Specifies the ", dimName, " for each observation"),
lang="en")
## --------- ConceptScheme ---------
add.triple(store,
paste0(prefixlist$prefixCODE,dimName),
paste0(prefixlist$prefixRDF,"type" ),
paste0(prefixlist$prefixSKOS, "ConceptScheme"))
add.data.triple(store,
paste0(prefixlist$prefixCODE,dimName),
paste0(prefixlist$prefixSKOS,"prefLabel"),
paste0("Codelist scheme: ", dimName),
lang="en")
add.data.triple(store,
paste0(prefixlist$prefixCODE,dimName),
paste0(prefixlist$prefixRDFS,"label"),
paste0("Codelist scheme: ", dimName),
lang="en")
add.data.triple(store,
paste0(prefixlist$prefixCODE,dimName),
paste0(prefixlist$prefixSKOS,"note"),
paste0("Specifies the ", dimName, " for each observation, group of obs. or all categories (_ALL_)label "),
lang="en")
## skos:notation is uppercase by convention. Eg: CL_SEX, CL_RACE
add.data.triple(store,
paste0(prefixlist$prefixCODE,dimName),
paste0(prefixlist$prefixSKOS,"notation"),
paste0("CL_",toupper(dimName)))
## Add rrdfqbcrnd0 information
if (extension.rrdfqbcrnd0) {
add.data.triple(store,
paste0(prefixlist$prefixCODE,dimName),
paste0(prefixlist$prefixRRDFQBCRND0, "codeType"),
paste0(codeType),
type="string"
)
## Should only be added if R data set is available
add.data.triple(store,
paste0(prefixlist$prefixCODE,dimName),
paste0(prefixlist$prefixRRDFQBCRND0, "R-columnname"),
paste0(dimName),
type="string"
)
}
## Should only be added if data available in D2RQ format
## ToDo(mja): the stem for the URI for the property is hard coded - this should be changed to use a prefix
## ToDo(mja): The derivation of property name should be more integrated with D2RQ
if (!is.null(underlDataSetName) & dimName!="procedure" & dimName!="factor") {
datasetname.subject<- paste0(prefixlist$prefixRRDFQBCRND0,toupper(underlDataSetName),"_", toupper(dimName))
if (extension.rrdfqbcrnd0) {
add.triple(store,
paste0(prefixlist$prefixCODE,dimName),
paste0(prefixlist$prefixRRDFQBCRND0, "DataSetRefD2RQ"),
datasetname.subject
)
add.triple(store,
datasetname.subject,
paste0(prefixlist$prefixRRDFQBCRND0, "D2RQ-PropertyBridge"),
paste0("http://www.example.org/datasets/vocab/", toupper(underlDataSetName), "_", toupper(dimName))
)
add.data.triple(store,
datasetname.subject,
paste0(prefixlist$prefixRRDFQBCRND0, "D2RQ-DataSetName"),
toupper(underlDataSetName),
type="string"
)
}
}
if (codeType=="SDTM"){
add.data.triple(store,
paste0(prefixlist$prefixCODE,dimName),
paste0(prefixlist$prefixMMS,"inValueDomain"),
paste0(nciDomainValue),
type="string"
)
}
## --------- hasTopConcept ---------
## For each unique code
for (i in 1:nrow(codeSource)){
add.triple(store,
paste0(prefixlist$prefixCODE,dimName),
paste0(prefixlist$prefixSKOS, "hasTopConcept"),
paste0(prefixlist$prefixCODE,dimName,"-",codeSource[i,"codeNoBlank"]))
}
#############################################################################
## Code values
hasALL<- FALSE
hasNONMISS<- FALSE
for (i in 1:nrow(codeSource)) {
subjcode<- paste0(prefixlist$prefixCODE,dimName,"-",codeSource[i,"codeNoBlank"])
codeSubj<- paste0(prefixlist$prefixCODE,dimName,"-",codeSource[i,"codeNoBlank"])
add.triple(store,
codeSubj,
paste0(prefixlist$prefixRDF,"type" ),
paste0(prefixlist$prefixSKOS, "Concept"))
add.triple(store,
codeSubj,
paste0(prefixlist$prefixRDF,"type" ),
paste0(prefixlist$prefixCODE, capDimName))
add.triple(store,
codeSubj,
paste0(prefixlist$prefixSKOS,"topConceptOf"),
paste0(prefixlist$prefixCODE, dimName))
add.triple(store,
codeSubj,
paste0(prefixlist$prefixSKOS,"inScheme"),
paste0(prefixlist$prefixCODE, dimName))
add.data.triple(store,
codeSubj,
paste0(prefixlist$prefixSKOS,"prefLabel"),
paste0(codeSource[i,"code"]),
type="string"
)
if (! hasALL ) { hasALL<- codeSource[i,"code"]=="_ALL_" }
if (! hasNONMISS ) { hasNONMISS<- codeSource[i,"code"]=="_NONMISS_" }
if (codeSource[i,"code"]!="_ALL_" & codeSource[i,"code"]!="_NONMISS_") {
if (extension.rrdfqbcrnd0) {
add.data.triple(store,
codeSubj,
paste0(prefixlist$prefixRRDFQBCRND0, "R-selectionoperator"),
"==",
type="string"
)
if (mode(codeSource[i,"code"])=="character") {
add.data.triple(store,
codeSubj,
paste0(prefixlist$prefixRRDFQBCRND0, "R-selectionvalue"),
paste0('\\"',codeSource[i,"code"], '\\"'),
type="string"
)
} else {
add.data.triple(store,
codeSubj,
paste0(prefixlist$prefixRRDFQBCRND0, "R-selectionvalue"),
paste0(codeSource[i,"code"]),
type="string"
)
}
}
}
## Should only be added if data available in D2RQ format
## ToDo(mja): the stem for the URI for the property is hard coded - this should be changed to use a prefix
## ToDo(mja): The derivation of property name should be more integrated with D2RQ
if (!is.null(underlDataSetName) & dimName=="factor" & ! (codeSource[i,"code"] %in% c("quantity","proportion") ) ) {
datasetname.subject<- paste0(prefixlist$prefixRRDFQBCRND0,toupper(underlDataSetName),"_", toupper(codeSource[i,"code"]))
if (extension.rrdfqbcrnd0) {
add.triple(store,
codeSubj,
paste0(prefixlist$prefixRRDFQBCRND0, "DataSetRefD2RQ"),
datasetname.subject
)
add.triple(store,
datasetname.subject,
paste0(prefixlist$prefixRRDFQBCRND0, "D2RQ-PropertyBridge"),
paste0("http://www.example.org/datasets/vocab/", toupper(underlDataSetName), "_", toupper(codeSource[i,"code"]) )
)
add.data.triple(store,
datasetname.subject,
paste0(prefixlist$prefixRRDFQBCRND0, "D2RQ-DataSetName"),
toupper(underlDataSetName),
type="string"
)
}
}
## Document when the codes come from the source data without reconciliation
## against other sources.
if (codeType=="DATA" & dimName!="procedure"){
add.data.triple(store,
codeSubj,
paste0(prefixlist$prefixRDFS, "comment"),
"Coded values from data source. No reconciliation against another source",
lang="en")
}
## SDTM Terminology
## Additional triples available from the SDTM Terminology file.
if (codeType=="SDTM"){
add.data.triple(store,
codeSubj,
paste0(prefixlist$prefixCTS,"cdiscSynonym"),
paste0(codeSource[i,"cdiscSynonym"]))
## Remove the prefix colon to specify the value directly (without prefix)
nciDomain<-gsub("sdtmct:","",codeSource[i,"nciDomain"])
## ? MMS may be incorrect here. How refer back to sdtm-terminology?
add.data.triple(store,
codeSubj,
paste0(prefixlist$prefixMMS,"nciDomain"),
paste(nciDomain))
## add.data.triple(store,
## codeSubj,
## paste0(prefixlist$prefixSKOS,"prefLabel"),
## paste0(codeSource[i,"code"],"-A123"),
## type="string")
add.data.triple(store,
codeSubj,
paste0(prefixlist$prefixCTS,"cdiscDefinition"),
paste0(codeSource[i,"cdiscDefinition"]))
add.data.triple(store,
codeSubj,
paste0(prefixlist$prefixCTS,"cdiscSubmissionValue"),
paste0(codeSource[i,"code"]))
}
}
## _NONMISS_
## Cross reference: _NONMISS_ creation for TopConcept.
## TODO: Create function that creates _NONMISS_ based on either presence in data
## or function parameter.
## It is merely coincidental that the Terminology values in the current
## example both have _NONMISS_ . This logic MUST change.
if ( dimName=="procedure") {
## TODO(mja): not straightforward, make more clear
proc<-GetDescrStatProcedure()
for (i in 1:nrow(codeSource)){
## The two variables should be the same codeSubjInList and codeSubj
codeSubjInList<- paste0("code:",dimName,"-",codeSource[i,"codeNoBlank"])
codeSubj<- paste0(prefixlist$prefixCODE,dimName,"-",codeSource[i,"codeNoBlank"])
if (codeSubjInList %in% names(proc)) {
if (extension.rrdfqbcrnd0) {
add.data.triple(store,
codeSubj,
paste0(prefixlist$prefixRRDFQBCRND0, "RdescStatDefFun"),
paste0(deparse(proc[[codeSubjInList]]$fun), collapse=" ")
)
}
add.data.triple(store,
codeSubj,
paste0(prefixlist$prefixRDFS, "comment"),
paste("Descriptive statistics", codeSource[i,"codeNoBlank"], sep=" ", collapse=" "),
lang="en")
}
}
} else {
if (! hasNONMISS ) {
add.triple(store,
paste0(prefixlist$prefixCODE,dimName),
paste0(prefixlist$prefixSKOS, "hasTopConcept"),
paste0(prefixlist$prefixCODE,dimName,"-_NONMISS_"))
codeSubj<- paste0(prefixlist$prefixCODE, dimName,"-_NONMISS_")
add.triple(store,
codeSubj,
paste0(prefixlist$prefixRDF,"type" ),
paste0(prefixlist$prefixSKOS,"Concept"))
add.triple(store,
codeSubj,
paste0(prefixlist$prefixRDF,"type" ),
paste0(prefixlist$prefixCODE, capDimName))
add.triple(store,
codeSubj,
paste0(prefixlist$prefixSKOS,"topConceptOf"),
paste0(prefixlist$prefixCODE, dimName))
add.data.triple(store,
codeSubj,
paste0(prefixlist$prefixSKOS,"prefLabel"),
"_NONMISS_",
type="string"
)
add.triple(store,
codeSubj,
paste0(prefixlist$prefixSKOS,"inScheme"),
paste0(prefixlist$prefixCODE, dimName))
}
add.data.triple(store,
codeSubj,
paste0(prefixlist$prefixRDFS, "comment"),
"NON-CDISC: Represents the non-missing codelist categories. Does not include missing values.",
lang="en")
if (extension.rrdfqbcrnd0) {
add.data.triple(store,
codeSubj,
paste0(prefixlist$prefixRRDFQBCRND0, "R-selectionfunction"),
"is.na",
type="string"
)
}
## _ALL_
## Cross reference: _ALL_ creation for TopConcept.
## TODO: Create function that creates _ALL_ based on either presence in data
## or function parameter.
## It is merely coincidental that the Terminology values in the current
## example both have _ALL_ . This logic MUST change.
if (!hasALL) {
add.triple(store,
paste0(prefixlist$prefixCODE,dimName),
paste0(prefixlist$prefixSKOS, "hasTopConcept"),
paste0(prefixlist$prefixCODE,dimName,"-_ALL_"))
add.triple(store,
paste0(prefixlist$prefixCODE, dimName,"-_ALL_"),
paste0(prefixlist$prefixRDF,"type" ),
paste0(prefixlist$prefixSKOS,"Concept"))
add.triple(store,
paste0(prefixlist$prefixCODE, dimName,"-_ALL_"),
paste0(prefixlist$prefixRDF,"type" ),
paste0(prefixlist$prefixCODE, capDimName))
add.triple(store,
paste0(prefixlist$prefixCODE, dimName,"-_ALL_"),
paste0(prefixlist$prefixSKOS,"topConceptOf"),
paste0(prefixlist$prefixCODE, dimName))
add.data.triple(store,
paste0(prefixlist$prefixCODE, dimName,"-_ALL_"),
paste0(prefixlist$prefixSKOS,"prefLabel"),
"_ALL_",
type="string"
)
add.triple(store,
paste0(prefixlist$prefixCODE, dimName,"-_ALL_"),
paste0(prefixlist$prefixSKOS,"inScheme"),
paste0(prefixlist$prefixCODE, dimName))
}
add.data.triple(store,
paste0(prefixlist$prefixCODE, dimName,"-_ALL_"),
paste0(prefixlist$prefixRDFS, "comment"),
"NON-CDISC: Represents all codelist categories.",
lang="en")
}
invisible(TRUE)
}
|
legend(4, 400, c("Orange", "Appple", "Banana"), cex=0.8, col=c("red","green","blue"), pch=21, lty=1:3)
|
/work/r데이터분석_예제파일/예제/4_16.R
|
no_license
|
bass4th/R
|
R
| false
| false
| 103
|
r
|
legend(4, 400, c("Orange", "Appple", "Banana"), cex=0.8, col=c("red","green","blue"), pch=21, lty=1:3)
|
cv.ncvsurv <- function(X, y, ..., nfolds=10, seed, returnY=FALSE, trace=FALSE, events.only=TRUE) {
## Error checking
if (class(X) != "matrix") {
tmp <- try(X <- as.matrix(X), silent=TRUE)
if (class(tmp)[1] == "try-error") stop("X must be a matrix or able to be coerced to a matrix")
}
if (class(y) != "matrix") {
tmp <- try(y <- as.matrix(y), silent=TRUE)
if (class(tmp)[1] == "try-error") stop("y must be a matrix or able to be coerced to a matrix")
if (ncol(y)!=2) stop("y must have two columns for survival data: time-on-study and a censoring indicator")
}
fit <- ncvsurv(X=X, y=y, ...)
n <- nrow(X)
E <- Y <- matrix(NA, nrow=n, ncol=length(fit$lambda))
if (!missing(seed)) set.seed(seed)
cv.ind <- ceiling(sample(1:n)/n*nfolds)
for (i in 1:nfolds) {
if (trace) cat("Starting CV fold #",i,sep="","\n")
cv.args <- list(...)
cv.args$X <- X[cv.ind!=i, , drop=FALSE]
cv.args$y <- y[cv.ind!=i,]
cv.args$lambda <- fit$lambda
cv.args$warn <- FALSE
fit.i <- do.call("ncvsurv", cv.args)
X2 <- X[cv.ind==i, , drop=FALSE]
y2 <- y[cv.ind==i,]
nl <- length(fit.i$lambda)
Y[cv.ind==i, 1:nl] <- predict(fit.i, X2)
if (fit$model=="cox") {
eta <- predict(fit.i, X)
ll <- loss.ncvsurv(y, eta)
for (ii in which(cv.ind==i)) {
eta.ii <- predict(fit.i, X[-ii,])
E[ii, 1:nl] <- 2*(ll-loss.ncvsurv(y[-ii,], eta.ii))
}
}
}
## Eliminate saturated lambda values, if any
ind <- which(apply(is.finite(E), 2, all))
E <- E[,ind]
Y <- Y[,ind]
lambda <- fit$lambda[ind]
## Return
if (events.only) E <- E[y[,2]==1,]
cve <- apply(E, 2, mean)
cvse <- apply(E, 2, sd) / sqrt(nrow(E))
min <- which.min(cve)
val <- list(cve=cve, cvse=cvse, lambda=lambda, fit=fit, min=min, lambda.min=lambda[min], null.dev=cve[1])
if (returnY) val$Y <- Y
structure(val, class=c("cv.ncvsurv", "cv.ncvreg"))
}
|
/R/cv.ncvsurv.R
|
no_license
|
YaohuiZeng/ncvreg
|
R
| false
| false
| 1,926
|
r
|
cv.ncvsurv <- function(X, y, ..., nfolds=10, seed, returnY=FALSE, trace=FALSE, events.only=TRUE) {
## Error checking
if (class(X) != "matrix") {
tmp <- try(X <- as.matrix(X), silent=TRUE)
if (class(tmp)[1] == "try-error") stop("X must be a matrix or able to be coerced to a matrix")
}
if (class(y) != "matrix") {
tmp <- try(y <- as.matrix(y), silent=TRUE)
if (class(tmp)[1] == "try-error") stop("y must be a matrix or able to be coerced to a matrix")
if (ncol(y)!=2) stop("y must have two columns for survival data: time-on-study and a censoring indicator")
}
fit <- ncvsurv(X=X, y=y, ...)
n <- nrow(X)
E <- Y <- matrix(NA, nrow=n, ncol=length(fit$lambda))
if (!missing(seed)) set.seed(seed)
cv.ind <- ceiling(sample(1:n)/n*nfolds)
for (i in 1:nfolds) {
if (trace) cat("Starting CV fold #",i,sep="","\n")
cv.args <- list(...)
cv.args$X <- X[cv.ind!=i, , drop=FALSE]
cv.args$y <- y[cv.ind!=i,]
cv.args$lambda <- fit$lambda
cv.args$warn <- FALSE
fit.i <- do.call("ncvsurv", cv.args)
X2 <- X[cv.ind==i, , drop=FALSE]
y2 <- y[cv.ind==i,]
nl <- length(fit.i$lambda)
Y[cv.ind==i, 1:nl] <- predict(fit.i, X2)
if (fit$model=="cox") {
eta <- predict(fit.i, X)
ll <- loss.ncvsurv(y, eta)
for (ii in which(cv.ind==i)) {
eta.ii <- predict(fit.i, X[-ii,])
E[ii, 1:nl] <- 2*(ll-loss.ncvsurv(y[-ii,], eta.ii))
}
}
}
## Eliminate saturated lambda values, if any
ind <- which(apply(is.finite(E), 2, all))
E <- E[,ind]
Y <- Y[,ind]
lambda <- fit$lambda[ind]
## Return
if (events.only) E <- E[y[,2]==1,]
cve <- apply(E, 2, mean)
cvse <- apply(E, 2, sd) / sqrt(nrow(E))
min <- which.min(cve)
val <- list(cve=cve, cvse=cvse, lambda=lambda, fit=fit, min=min, lambda.min=lambda[min], null.dev=cve[1])
if (returnY) val$Y <- Y
structure(val, class=c("cv.ncvsurv", "cv.ncvreg"))
}
|
library(lubridate) #needed library
#An initial inspection reveals the general location of desired data in the
#larger file. This first part reads in that section.
#This assumes the file household_power_consumption.txt is in the working
#directory.
hpc <- read.table("household_power_consumption.txt",
sep=";", header=FALSE, skip=60000, nrows=10000,
stringsAsFactors = FALSE)
#This reads the variable names from the first line and attaches them to the
#data frame.
names(hpc) <- read.table("household_power_consumption.txt", sep=";",
nrows=1, stringsAsFactors = FALSE)
#This puts the hpc$Date variable in POSIXct format.
hpc$Date <- dmy(hpc$Date)
#This subsets the desired time interval.
hpc <- hpc[hpc$Date>=ymd("2007/02/01") & hpc$Date<=ymd("2007/02/02"),]
#make the variables numeric
hpc$Global_active_power <- as.numeric(hpc$Global_active_power)
hpc$Sub_metering_1 <- as.numeric(hpc$Sub_metering_1)
hpc$Sub_metering_2 <- as.numeric(hpc$Sub_metering_2)
hpc$Sub_metering_3 <- as.numeric(hpc$Sub_metering_3)
hpc$Voltage <- as.numeric(hpc$Voltage)
hpc$Global_reactive_power <- as.numeric(hpc$Global_reactive_power)
#combine date and time
hpc$dateandtime <- ymd_hms(paste(hpc$Date, hpc$Time))
#create the plot
png(file="plot4.png", width = 480, height = 480)
par(mfcol=c(2,2)) #panels
#plot (1,1) (from plot2 with (kilowatts removed))
plot(hpc$dateandtime, hpc$Global_active_power, type="l", xlab = "",
ylab = "Global Active Power")
#plot (2,1) (from plot 3 with legend bounding box off)
plot(hpc$dateandtime, hpc$Sub_metering_1, type="l",
xlab="", ylab="Energy sub metering")
lines(hpc$dateandtime, hpc$Sub_metering_2, type="l", col="red")
lines(hpc$dateandtime, hpc$Sub_metering_3, type="l", col="blue")
legend("topright", lty=1,col=c("black","red","blue"),
legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),
bty="n")
#plot (1,2)
plot(hpc$dateandtime,hpc$Voltage, type="l", xlab="datetime", ylab="Voltage")
#plot (2,2)
plot(hpc$dateandtime,hpc$Global_reactive_power, type="l",
xlab="datetime", ylab="Global_reactive_power")
dev.off()
|
/plot4.R
|
no_license
|
ttegt/ExData_Plotting1
|
R
| false
| false
| 2,177
|
r
|
library(lubridate) #needed library
#An initial inspection reveals the general location of desired data in the
#larger file. This first part reads in that section.
#This assumes the file household_power_consumption.txt is in the working
#directory.
hpc <- read.table("household_power_consumption.txt",
sep=";", header=FALSE, skip=60000, nrows=10000,
stringsAsFactors = FALSE)
#This reads the variable names from the first line and attaches them to the
#data frame.
names(hpc) <- read.table("household_power_consumption.txt", sep=";",
nrows=1, stringsAsFactors = FALSE)
#This puts the hpc$Date variable in POSIXct format.
hpc$Date <- dmy(hpc$Date)
#This subsets the desired time interval.
hpc <- hpc[hpc$Date>=ymd("2007/02/01") & hpc$Date<=ymd("2007/02/02"),]
#make the variables numeric
hpc$Global_active_power <- as.numeric(hpc$Global_active_power)
hpc$Sub_metering_1 <- as.numeric(hpc$Sub_metering_1)
hpc$Sub_metering_2 <- as.numeric(hpc$Sub_metering_2)
hpc$Sub_metering_3 <- as.numeric(hpc$Sub_metering_3)
hpc$Voltage <- as.numeric(hpc$Voltage)
hpc$Global_reactive_power <- as.numeric(hpc$Global_reactive_power)
#combine date and time
hpc$dateandtime <- ymd_hms(paste(hpc$Date, hpc$Time))
#create the plot
png(file="plot4.png", width = 480, height = 480)
par(mfcol=c(2,2)) #panels
#plot (1,1) (from plot2 with (kilowatts removed))
plot(hpc$dateandtime, hpc$Global_active_power, type="l", xlab = "",
ylab = "Global Active Power")
#plot (2,1) (from plot 3 with legend bounding box off)
plot(hpc$dateandtime, hpc$Sub_metering_1, type="l",
xlab="", ylab="Energy sub metering")
lines(hpc$dateandtime, hpc$Sub_metering_2, type="l", col="red")
lines(hpc$dateandtime, hpc$Sub_metering_3, type="l", col="blue")
legend("topright", lty=1,col=c("black","red","blue"),
legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),
bty="n")
#plot (1,2)
plot(hpc$dateandtime,hpc$Voltage, type="l", xlab="datetime", ylab="Voltage")
#plot (2,2)
plot(hpc$dateandtime,hpc$Global_reactive_power, type="l",
xlab="datetime", ylab="Global_reactive_power")
dev.off()
|
#************************************Investment Case Study****************************************************
#*********************************** Loading libraries ***********************************************
library(tidyr)
library(dplyr)
library(stringr)
#Loading data in the data frames
companies <- read.table("companies.txt", sep="\t", header = TRUE, comment.char = "", quote = "\"", stringsAsFactors=FALSE)
rounds2 <- read.csv("rounds2.csv", header = TRUE, stringsAsFactors=FALSE)
mapping <- read.csv("mapping.csv", header = TRUE, stringsAsFactors=FALSE, check.names = F)
#Converting permalinks to lower case to facilitate merging
companies$permalink <- tolower(companies$permalink)
rounds2$company_permalink <- tolower(rounds2$company_permalink)
#Which permalinks are present in companies but not in rounds2?
setdiff(rounds2$company_permalink, companies$permalink)
#Which permalinks are present in rounds2 but not in companies?
setdiff(companies$permalink, rounds2$company_permalink)
#****************************** Checkpoint 1: Data Preparation ************************************
#Question 1: How many unique companies are present in rounds2?
#Answer: 66368
length(unique(rounds2$company_permalink))
#Question 2: How many unique companies are present in companies?
#Answer: 66368
length(unique(companies$permalink))
#Question 3: In the companies data frame, which column can be used as the unique key for each company? Write the name of the column.
#Answer: permalink
#Question 4: Are there any companies in the rounds2 file which are not present in companies? Answer yes or no: Y/N
#Answer: N
#Question 5: Merge the two data frames so that all variables (columns) in the companies frame are added to the rounds2 data frame. Name the merged frame master_frame. How many observations are present in master_frame?
master_frame <- merge(rounds2, companies, by.x = "company_permalink", by.y = "permalink", all = T)
#****************************** Checkpoint 2: Funding Type Analysis ******************************
#Question 1: Average funding amount of venture type
#11,748,949.1294895
avg_venture_funding <- mean(master_frame[which(master_frame$funding_round_type=="venture"),"raised_amount_usd"], na.rm = T)
#Question 2: Average funding amount of angel type
#958,694.469753086
avg_angel_funding <- mean(master_frame[which(master_frame$funding_round_type=="angel"),"raised_amount_usd"], na.rm = T)
#Question 3: Average funding amount of seed type
#719,817.996907173
avg_seed_funding <- mean(master_frame[which(master_frame$funding_round_type=="seed"),"raised_amount_usd"], na.rm = T)
#Question 4: Average funding amount of Private Equity type
#73,308,593.0294421
avg_priv_equity_funding <- mean(master_frame[which(master_frame$funding_round_type=="private_equity"),"raised_amount_usd"], na.rm = T)
#Question 5: Considering that Spark Funds wants to invest between 5 to 15 million USD per investment round, which investment type is the most suitable for them?
#Answer: venture
#****************************** Checkpoint 3: Country Analysis ******************************
venture_investments <- filter(master_frame, funding_round_type=="venture")
country_group <- group_by(venture_investments, country_code)
country_wise_investment <- summarise(country_group, total_amt_raised = sum(raised_amount_usd, na.rm = T))
top9 <- arrange(country_wise_investment[which(country_wise_investment$country_code!=""),], desc(total_amt_raised))[1:9,]
#Question 1: Top English speaking country
#Answer: United States (Country Code USA)
#Question 2: Second English speaking country
#Answer: United Kingdom (Country Code GRB)
#Question 3: Third English speaking country
#Answer: India (Country Code IND)
#****************************** Checkpoint 4: Data Preparation for sectoral analysis ******************************
#Extract the primary sector of each category list from the category_list column
master_frame$primary_sector <- sapply(master_frame$category_list, function(x) {str_trim(unlist(str_split(x, fixed("|")))[1])})
#Converting wide to long in the mapping file
mapping_long <- gather(mapping, sector, sec_val, 2:10)
#Remove extra rows which have value 0, as well as the last column as it will always be 1
mapping_long <- mapping_long[!(mapping_long$sec_val == 0),1:2]
#Correcting the data. Replacing "0" with "na"
mapping_long$category_list_new <- str_replace(mapping_long$category_list, fixed("0"), "na")
#Covert primary_sector in master_frame as well as category_list_new in mapping_long to facilitate merging
master_frame$primary_sector <- tolower(master_frame$primary_sector)
mapping_long$category_list_new <- tolower(mapping_long$category_list_new)
#Check which categories in mapping_long are not present in master_frame
setdiff(mapping_long$category_list_new, master_frame$primary_sector)
#Check which categories in master_frame are not present in mapping_long
setdiff(master_frame$primary_sector, mapping_long$category_list_new)
#Some more data cleaning for "enterprise 2.na" and "personal fi0nce"
mapping_long[which(mapping_long$category_list_new=="enterprise 2.na"), "category_list_new"] <- "enterprise 2.0"
mapping_long[which(mapping_long$category_list_new=="personal fi0nce"), "category_list_new"] <- "personal finance"
#Reverify which categories in mapping_long are not present in master_frame. Should show 0
setdiff(mapping_long$category_list_new, master_frame$primary_sector)
#Merge master_frame with mapping_long on primary_sector and category_list respectively to get the "main sector"
master_frame_with_main_sector <- merge(master_frame, mapping_long, by.x = "primary_sector", by.y = "category_list_new", all = T)
#************************** Checkpoint 5: Sectoral Analysis ****************************
D1 <- filter(master_frame_with_main_sector, country_code=="USA", funding_round_type=="venture", raised_amount_usd>=5000000, raised_amount_usd<=15000000)
D2 <- filter(master_frame_with_main_sector, country_code=="GBR", funding_round_type=="venture", raised_amount_usd>=5000000, raised_amount_usd<=15000000)
D3 <- filter(master_frame_with_main_sector, country_code=="IND", funding_round_type=="venture", raised_amount_usd>=5000000, raised_amount_usd<=15000000)
#The total number (or count) of investments for each main sector in a separate column
#The total amount invested in each main sector in a separate column
D1<-mutate(group_by(D1,primary_sector),total_number_of_investment=length(raised_amount_usd),total_amount_invested=sum(raised_amount_usd))
D2<-mutate(group_by(D2,primary_sector),total_number_of_investment=length(raised_amount_usd),total_amount_invested=sum(raised_amount_usd))
D3<-mutate(group_by(D3,primary_sector),total_number_of_investment=length(raised_amount_usd),total_amount_invested=sum(raised_amount_usd))
#Question 1: Total number of Investments (count)
#Number of Investments in USA : 12150
nrow(D1)
#Number of Investments in GBR : 628
nrow(D2)
#Number of Investments in IND : 330
nrow(D3)
#Question 2: Total amount of investment
#In USA: 108,531,347,515
sum(D1$raised_amount_usd)
#In GBR: 5436843539
sum(D2$raised_amount_usd)
#In IND: 2976543602
sum(D3$raised_amount_usd)
#Group investments within each country by sectors.
usa_sector_group <- group_by(D1, sector)
usa_num_investments_per_sector <- arrange(summarise(usa_sector_group, num_investment = n()), desc(num_investment))
gbr_sector_group <- group_by(D2, sector)
gbr_num_investments_per_sector <- arrange(summarise(gbr_sector_group, num_investment = n()), desc(num_investment))
ind_sector_group <- group_by(D3, sector)
ind_num_investments_per_sector <- arrange(summarise(ind_sector_group, num_investment = n()), desc(num_investment))
#Questions 3-7 are based on the values in usa_num_investments_per_sector,gbr_num_investments_per_sector ind_num_investments_per_sector
#Question 3: Top Sector name (no. of investment-wise)
#Top sector in USA: "Others"
#Top sector in GBR: "Others"
#Top sector in IND: "Others"
#Question 4:
#Second sector in USA: "Social, Finance, Analytics, Advertising"
#Second sector in GBR: "Social, Finance, Analytics, Advertising"
#Second sector in IND: "Social, Finance, Analytics, Advertising"
#Question 5:
#Third sector in USA: "Cleantech / Semiconductors"
#Third sector in GBR: "Cleantech / Semiconductors"
#Third sector in IND: "News, Search and Messaging"
#Question 6: Number of investments in top sector
#In USA: 2950
#In GBR: 147
#In IND: 110
#Question 7: Number of investments in second sector
#In USA: 2714
#In GBR: 133
#In IND: 60
#Question 8: Number of investments in third sector
#In USA: 2350
#In GBR: 130
#In IND: 52
#Calculating top sector for USA,GBR,IND for Q9
usa_top_sector<-usa_num_investments_per_sector[1,1] #Others
gbr_top_sector<-gbr_num_investments_per_sector[1,1] #Others
ind_top_sector<-ind_num_investments_per_sector[1,1] #Others
#Question 9: For point 3 (top sector count-wise), which company received the highest investment?
usa_others <- filter(D1, sector==usa_top_sector)
usa_company_group <- group_by(usa_others, company_permalink)
usa_others_top_company <- arrange(summarise(usa_company_group, total_amt_raised = sum(raised_amount_usd)), desc(total_amt_raised))[1,1]
#"/organization/virtustream" is the top company for USA in sector "Others"
gbr_others <- filter(D2, sector==gbr_top_sector)
gbr_company_group <- group_by(gbr_others, company_permalink)
gbr_others_top_company <- arrange(summarise(gbr_company_group, total_amt_raised = sum(raised_amount_usd)), desc(total_amt_raised))[1,1]
#"/organization/electric-cloud" is the top company for United Kingdom in sector "Others"
ind_others <- filter(D3, sector==ind_top_sector)
ind_company_group <- group_by(ind_others, company_permalink)
ind_others_top_company <- arrange(summarise(ind_company_group, total_amt_raised = sum(raised_amount_usd)), desc(total_amt_raised))[1,1]
#"/organization/firstcry-com" is the top company for India in sector "Others"
#Calculating second best sector for USA,GBR,IND for Q9
usa_secondbest_sector<-usa_num_investments_per_sector[2,1] #Social, Finance, Analytics, Advertising
gbr_secondbest_sector<-gbr_num_investments_per_sector[2,1] #Social, Finance, Analytics, Advertising
ind_secondbest_sector<-ind_num_investments_per_sector[2,1] #Social, Finance, Analytics, Advertising
#Question 10: For point 4 (second best sector count-wise), which company received the highest investment?
usa_social_etc <- filter(D1, sector==usa_secondbest_sector)
usa_company_group <- group_by(usa_social_etc, company_permalink)
usa_social_top_company <- arrange(summarise(usa_company_group, total_amt_raised = sum(raised_amount_usd)), desc(total_amt_raised))[1,1]
#"/organization/shotspotter" is the top company for USA in sector "Social, Finance, Analytics, Advertising"
gbr_social_etc <- filter(D2, sector==gbr_secondbest_sector)
gbr_company_group <- group_by(gbr_social_etc, company_permalink)
gbr_social_top_company <- arrange(summarise(gbr_company_group, total_amt_raised = sum(raised_amount_usd)), desc(total_amt_raised))[1,1]
#"/organization/celltick-technologies" is the top company for United Kingdok in sector "Social, Finance, Analytics, Advertising"
ind_social_etc <- filter(D3, sector==ind_secondbest_sector)
ind_company_group <- group_by(ind_social_etc, company_permalink)
ind_social_top_company <- arrange(summarise(ind_company_group, total_amt_raised = sum(raised_amount_usd)), desc(total_amt_raised))[1,1]
#"/organization/manthan-systems" is the top company for India in sector "Social, Finance, Analytics, Advertising"
|
/Solution.R
|
no_license
|
kanupriya-singh/investment-case-study-R
|
R
| false
| false
| 11,742
|
r
|
#************************************Investment Case Study****************************************************
#*********************************** Loading libraries ***********************************************
library(tidyr)
library(dplyr)
library(stringr)
#Loading data in the data frames
companies <- read.table("companies.txt", sep="\t", header = TRUE, comment.char = "", quote = "\"", stringsAsFactors=FALSE)
rounds2 <- read.csv("rounds2.csv", header = TRUE, stringsAsFactors=FALSE)
mapping <- read.csv("mapping.csv", header = TRUE, stringsAsFactors=FALSE, check.names = F)
#Converting permalinks to lower case to facilitate merging
companies$permalink <- tolower(companies$permalink)
rounds2$company_permalink <- tolower(rounds2$company_permalink)
#Which permalinks are present in companies but not in rounds2?
setdiff(rounds2$company_permalink, companies$permalink)
#Which permalinks are present in rounds2 but not in companies?
setdiff(companies$permalink, rounds2$company_permalink)
#****************************** Checkpoint 1: Data Preparation ************************************
#Question 1: How many unique companies are present in rounds2?
#Answer: 66368
length(unique(rounds2$company_permalink))
#Question 2: How many unique companies are present in companies?
#Answer: 66368
length(unique(companies$permalink))
#Question 3: In the companies data frame, which column can be used as the unique key for each company? Write the name of the column.
#Answer: permalink
#Question 4: Are there any companies in the rounds2 file which are not present in companies? Answer yes or no: Y/N
#Answer: N
#Question 5: Merge the two data frames so that all variables (columns) in the companies frame are added to the rounds2 data frame. Name the merged frame master_frame. How many observations are present in master_frame?
master_frame <- merge(rounds2, companies, by.x = "company_permalink", by.y = "permalink", all = T)
#****************************** Checkpoint 2: Funding Type Analysis ******************************
#Question 1: Average funding amount of venture type
#11,748,949.1294895
avg_venture_funding <- mean(master_frame[which(master_frame$funding_round_type=="venture"),"raised_amount_usd"], na.rm = T)
#Question 2: Average funding amount of angel type
#958,694.469753086
avg_angel_funding <- mean(master_frame[which(master_frame$funding_round_type=="angel"),"raised_amount_usd"], na.rm = T)
#Question 3: Average funding amount of seed type
#719,817.996907173
avg_seed_funding <- mean(master_frame[which(master_frame$funding_round_type=="seed"),"raised_amount_usd"], na.rm = T)
#Question 4: Average funding amount of Private Equity type
#73,308,593.0294421
avg_priv_equity_funding <- mean(master_frame[which(master_frame$funding_round_type=="private_equity"),"raised_amount_usd"], na.rm = T)
#Question 5: Considering that Spark Funds wants to invest between 5 to 15 million USD per investment round, which investment type is the most suitable for them?
#Answer: venture
#****************************** Checkpoint 3: Country Analysis ******************************
venture_investments <- filter(master_frame, funding_round_type=="venture")
country_group <- group_by(venture_investments, country_code)
country_wise_investment <- summarise(country_group, total_amt_raised = sum(raised_amount_usd, na.rm = T))
top9 <- arrange(country_wise_investment[which(country_wise_investment$country_code!=""),], desc(total_amt_raised))[1:9,]
#Question 1: Top English speaking country
#Answer: United States (Country Code USA)
#Question 2: Second English speaking country
#Answer: United Kingdom (Country Code GRB)
#Question 3: Third English speaking country
#Answer: India (Country Code IND)
#****************************** Checkpoint 4: Data Preparation for sectoral analysis ******************************
#Extract the primary sector of each category list from the category_list column
master_frame$primary_sector <- sapply(master_frame$category_list, function(x) {str_trim(unlist(str_split(x, fixed("|")))[1])})
#Converting wide to long in the mapping file
mapping_long <- gather(mapping, sector, sec_val, 2:10)
#Remove extra rows which have value 0, as well as the last column as it will always be 1
mapping_long <- mapping_long[!(mapping_long$sec_val == 0),1:2]
#Correcting the data. Replacing "0" with "na"
mapping_long$category_list_new <- str_replace(mapping_long$category_list, fixed("0"), "na")
#Covert primary_sector in master_frame as well as category_list_new in mapping_long to facilitate merging
master_frame$primary_sector <- tolower(master_frame$primary_sector)
mapping_long$category_list_new <- tolower(mapping_long$category_list_new)
#Check which categories in mapping_long are not present in master_frame
setdiff(mapping_long$category_list_new, master_frame$primary_sector)
#Check which categories in master_frame are not present in mapping_long
setdiff(master_frame$primary_sector, mapping_long$category_list_new)
#Some more data cleaning for "enterprise 2.na" and "personal fi0nce"
mapping_long[which(mapping_long$category_list_new=="enterprise 2.na"), "category_list_new"] <- "enterprise 2.0"
mapping_long[which(mapping_long$category_list_new=="personal fi0nce"), "category_list_new"] <- "personal finance"
#Reverify which categories in mapping_long are not present in master_frame. Should show 0
setdiff(mapping_long$category_list_new, master_frame$primary_sector)
#Merge master_frame with mapping_long on primary_sector and category_list respectively to get the "main sector"
master_frame_with_main_sector <- merge(master_frame, mapping_long, by.x = "primary_sector", by.y = "category_list_new", all = T)
#************************** Checkpoint 5: Sectoral Analysis ****************************
D1 <- filter(master_frame_with_main_sector, country_code=="USA", funding_round_type=="venture", raised_amount_usd>=5000000, raised_amount_usd<=15000000)
D2 <- filter(master_frame_with_main_sector, country_code=="GBR", funding_round_type=="venture", raised_amount_usd>=5000000, raised_amount_usd<=15000000)
D3 <- filter(master_frame_with_main_sector, country_code=="IND", funding_round_type=="venture", raised_amount_usd>=5000000, raised_amount_usd<=15000000)
#The total number (or count) of investments for each main sector in a separate column
#The total amount invested in each main sector in a separate column
D1<-mutate(group_by(D1,primary_sector),total_number_of_investment=length(raised_amount_usd),total_amount_invested=sum(raised_amount_usd))
D2<-mutate(group_by(D2,primary_sector),total_number_of_investment=length(raised_amount_usd),total_amount_invested=sum(raised_amount_usd))
D3<-mutate(group_by(D3,primary_sector),total_number_of_investment=length(raised_amount_usd),total_amount_invested=sum(raised_amount_usd))
#Question 1: Total number of Investments (count)
#Number of Investments in USA : 12150
nrow(D1)
#Number of Investments in GBR : 628
nrow(D2)
#Number of Investments in IND : 330
nrow(D3)
#Question 2: Total amount of investment
#In USA: 108,531,347,515
sum(D1$raised_amount_usd)
#In GBR: 5436843539
sum(D2$raised_amount_usd)
#In IND: 2976543602
sum(D3$raised_amount_usd)
#Group investments within each country by sectors.
usa_sector_group <- group_by(D1, sector)
usa_num_investments_per_sector <- arrange(summarise(usa_sector_group, num_investment = n()), desc(num_investment))
gbr_sector_group <- group_by(D2, sector)
gbr_num_investments_per_sector <- arrange(summarise(gbr_sector_group, num_investment = n()), desc(num_investment))
ind_sector_group <- group_by(D3, sector)
ind_num_investments_per_sector <- arrange(summarise(ind_sector_group, num_investment = n()), desc(num_investment))
#Questions 3-7 are based on the values in usa_num_investments_per_sector,gbr_num_investments_per_sector ind_num_investments_per_sector
#Question 3: Top Sector name (no. of investment-wise)
#Top sector in USA: "Others"
#Top sector in GBR: "Others"
#Top sector in IND: "Others"
#Question 4:
#Second sector in USA: "Social, Finance, Analytics, Advertising"
#Second sector in GBR: "Social, Finance, Analytics, Advertising"
#Second sector in IND: "Social, Finance, Analytics, Advertising"
#Question 5:
#Third sector in USA: "Cleantech / Semiconductors"
#Third sector in GBR: "Cleantech / Semiconductors"
#Third sector in IND: "News, Search and Messaging"
#Question 6: Number of investments in top sector
#In USA: 2950
#In GBR: 147
#In IND: 110
#Question 7: Number of investments in second sector
#In USA: 2714
#In GBR: 133
#In IND: 60
#Question 8: Number of investments in third sector
#In USA: 2350
#In GBR: 130
#In IND: 52
#Calculating top sector for USA,GBR,IND for Q9
usa_top_sector<-usa_num_investments_per_sector[1,1] #Others
gbr_top_sector<-gbr_num_investments_per_sector[1,1] #Others
ind_top_sector<-ind_num_investments_per_sector[1,1] #Others
#Question 9: For point 3 (top sector count-wise), which company received the highest investment?
usa_others <- filter(D1, sector==usa_top_sector)
usa_company_group <- group_by(usa_others, company_permalink)
usa_others_top_company <- arrange(summarise(usa_company_group, total_amt_raised = sum(raised_amount_usd)), desc(total_amt_raised))[1,1]
#"/organization/virtustream" is the top company for USA in sector "Others"
gbr_others <- filter(D2, sector==gbr_top_sector)
gbr_company_group <- group_by(gbr_others, company_permalink)
gbr_others_top_company <- arrange(summarise(gbr_company_group, total_amt_raised = sum(raised_amount_usd)), desc(total_amt_raised))[1,1]
#"/organization/electric-cloud" is the top company for United Kingdom in sector "Others"
ind_others <- filter(D3, sector==ind_top_sector)
ind_company_group <- group_by(ind_others, company_permalink)
ind_others_top_company <- arrange(summarise(ind_company_group, total_amt_raised = sum(raised_amount_usd)), desc(total_amt_raised))[1,1]
#"/organization/firstcry-com" is the top company for India in sector "Others"
#Calculating second best sector for USA,GBR,IND for Q9
usa_secondbest_sector<-usa_num_investments_per_sector[2,1] #Social, Finance, Analytics, Advertising
gbr_secondbest_sector<-gbr_num_investments_per_sector[2,1] #Social, Finance, Analytics, Advertising
ind_secondbest_sector<-ind_num_investments_per_sector[2,1] #Social, Finance, Analytics, Advertising
#Question 10: For point 4 (second best sector count-wise), which company received the highest investment?
usa_social_etc <- filter(D1, sector==usa_secondbest_sector)
usa_company_group <- group_by(usa_social_etc, company_permalink)
usa_social_top_company <- arrange(summarise(usa_company_group, total_amt_raised = sum(raised_amount_usd)), desc(total_amt_raised))[1,1]
#"/organization/shotspotter" is the top company for USA in sector "Social, Finance, Analytics, Advertising"
gbr_social_etc <- filter(D2, sector==gbr_secondbest_sector)
gbr_company_group <- group_by(gbr_social_etc, company_permalink)
gbr_social_top_company <- arrange(summarise(gbr_company_group, total_amt_raised = sum(raised_amount_usd)), desc(total_amt_raised))[1,1]
#"/organization/celltick-technologies" is the top company for United Kingdok in sector "Social, Finance, Analytics, Advertising"
ind_social_etc <- filter(D3, sector==ind_secondbest_sector)
ind_company_group <- group_by(ind_social_etc, company_permalink)
ind_social_top_company <- arrange(summarise(ind_company_group, total_amt_raised = sum(raised_amount_usd)), desc(total_amt_raised))[1,1]
#"/organization/manthan-systems" is the top company for India in sector "Social, Finance, Analytics, Advertising"
|
knn.multi.sim <- function(Z, ny = dim(Z)[1]){
sims <- array(0,dim=c(dim(Z),nsims))
K <- sqrt(ny)
W <- 1/(1:K)
W <- cumsum( W/sum(W) )
for(i in 1:nsims){
this.sim <- array(0,c(ny*12,2))
## Start with a previous Dec value..
Z.last <- rbind( Z[sample(1:ny,1), 12, ] )
for(j in 1:(ny*12)){
month = j %% 12 ## the month we are simulating
if(month == 0) month = 12
if(month == 1) {
neighbors <- rbind(Z[1:(ny-1),12,])
neighbors <- rbind(Z.last,neighbors)
N <- ny - 1
}else{
neighbors <- rbind(Z[,month-1,])
neighbors <- rbind(Z.last,neighbors)
N <- ny
}
ordered.distance <-
order(as.matrix(dist(neighbors))[1,2:(N+1)])
rand <- runif(1)
nearest <- rank(c(rand,W))[1]
this.neighbor <- ordered.distance[nearest]
if(month == 1)this.neighbor <- this.neighbor + 1
Zt = t(Z[this.neighbor,month,])
this.sim[j,] = Zt
## set the previous value and continue.
Z.last = Zt
}
# Put back in sim array
for(s in 1:dim(Z)[3])
sims[,,s,i] <- matrix(this.sim[,s],ncol=12,byrow=T)
}
return(sims)
}
|
/data-analysis/4-multivariate/src/knn-multi-sim.R
|
no_license
|
cameronbracken/classy
|
R
| false
| false
| 1,109
|
r
|
knn.multi.sim <- function(Z, ny = dim(Z)[1]){
sims <- array(0,dim=c(dim(Z),nsims))
K <- sqrt(ny)
W <- 1/(1:K)
W <- cumsum( W/sum(W) )
for(i in 1:nsims){
this.sim <- array(0,c(ny*12,2))
## Start with a previous Dec value..
Z.last <- rbind( Z[sample(1:ny,1), 12, ] )
for(j in 1:(ny*12)){
month = j %% 12 ## the month we are simulating
if(month == 0) month = 12
if(month == 1) {
neighbors <- rbind(Z[1:(ny-1),12,])
neighbors <- rbind(Z.last,neighbors)
N <- ny - 1
}else{
neighbors <- rbind(Z[,month-1,])
neighbors <- rbind(Z.last,neighbors)
N <- ny
}
ordered.distance <-
order(as.matrix(dist(neighbors))[1,2:(N+1)])
rand <- runif(1)
nearest <- rank(c(rand,W))[1]
this.neighbor <- ordered.distance[nearest]
if(month == 1)this.neighbor <- this.neighbor + 1
Zt = t(Z[this.neighbor,month,])
this.sim[j,] = Zt
## set the previous value and continue.
Z.last = Zt
}
# Put back in sim array
for(s in 1:dim(Z)[3])
sims[,,s,i] <- matrix(this.sim[,s],ncol=12,byrow=T)
}
return(sims)
}
|
##Chapter 14 Notes##
#finish when you get the chance
|
/chapter.14.tanjanay.R
|
no_license
|
paul-shannon/learningR
|
R
| false
| false
| 52
|
r
|
##Chapter 14 Notes##
#finish when you get the chance
|
#' @title taxize authentication
#'
#' @description Help on authentication
#'
#' @name taxize-authentication
#' @aliases authentication
#' @seealso [key_helpers()]
#'
#' @section What is an API?:
#' An API is an Application Programming Interface. The term "API" can be used
#' for lots of scenarios, but in this case we're talking about web APIs,
#' or APIs (interfaces) to web resources. \pkg{taxize} interacts with
#' remote databases on the web via their APIs. You don't need to worry
#' about the details of how that all works; just know that some of them
#' require authentication and some do not.
#'
#' @section What are API keys?:
#' For those APIs that require authentication, the way that's typically done
#' is through API keys: alphanumeric strings of variable lengths that are
#' supplied with a request to an API.
#'
#' \pkg{taxize} won't get these keys for you; rather, you have to
#' go get a key for each service, but we do provide information on how
#' to get those keys. See [key_helpers()] for help on how to
#' obtain keys for this package.
#'
#' @section Using API keys:
#' You can store API keys as R options in your `.Rprofile` file, or
#' as environment variables in either your `.Renviron` file or
#' `.bash_profile` file, o`.zshrc` file (if you use oh-my-zsh) or
#' similar. See [Startup] for help on R options and environment
#' variables.
#'
#' Save your API keys with the following names:
#' * Tropicos: R option or env var as 'TROPICOS_KEY'
#' * IUCN: R option or env var as 'IUCN_REDLIST_KEY'
#' * ENTREZ: R option or env var as 'ENTREZ_KEY'
#'
#' If you save in .Renviron it looks like: `ENTREZ_KEY=somekey`
#'
#' If you save in a .bash_profile, .zshrc, or similar file it looks like:
#' `export ENTREZ_KEY=somekey`
#'
#' If you save in a .Rprofile it looks like: `options(ENTREZ_KEY = "somekey")`
#'
#' Remember to restart your R session (and to start a new shell window/tab
#' if you're using the shell) to take advantage of the new R options
#' or environment variables.
#'
#' We strongly recommend using environment variables
#' (<https://en.wikipedia.org/wiki/Environment_variable>) over R options
#' because environment variables are widely used across programming
#' languages, operating systems, and computing environments; whereas
#' R options are specific to R.
#'
#' Note that NCBI Entrez doesn't require that you use an API key,
#' but you do get a higher rate limit with a key (more requests per
#' time period), from 3 to 10 requests per second, so do get one.
NULL
|
/R/taxize-authentication.R
|
permissive
|
puyo/taxize
|
R
| false
| false
| 2,513
|
r
|
#' @title taxize authentication
#'
#' @description Help on authentication
#'
#' @name taxize-authentication
#' @aliases authentication
#' @seealso [key_helpers()]
#'
#' @section What is an API?:
#' An API is an Application Programming Interface. The term "API" can be used
#' for lots of scenarios, but in this case we're talking about web APIs,
#' or APIs (interfaces) to web resources. \pkg{taxize} interacts with
#' remote databases on the web via their APIs. You don't need to worry
#' about the details of how that all works; just know that some of them
#' require authentication and some do not.
#'
#' @section What are API keys?:
#' For those APIs that require authentication, the way that's typically done
#' is through API keys: alphanumeric strings of variable lengths that are
#' supplied with a request to an API.
#'
#' \pkg{taxize} won't get these keys for you; rather, you have to
#' go get a key for each service, but we do provide information on how
#' to get those keys. See [key_helpers()] for help on how to
#' obtain keys for this package.
#'
#' @section Using API keys:
#' You can store API keys as R options in your `.Rprofile` file, or
#' as environment variables in either your `.Renviron` file or
#' `.bash_profile` file, o`.zshrc` file (if you use oh-my-zsh) or
#' similar. See [Startup] for help on R options and environment
#' variables.
#'
#' Save your API keys with the following names:
#' * Tropicos: R option or env var as 'TROPICOS_KEY'
#' * IUCN: R option or env var as 'IUCN_REDLIST_KEY'
#' * ENTREZ: R option or env var as 'ENTREZ_KEY'
#'
#' If you save in .Renviron it looks like: `ENTREZ_KEY=somekey`
#'
#' If you save in a .bash_profile, .zshrc, or similar file it looks like:
#' `export ENTREZ_KEY=somekey`
#'
#' If you save in a .Rprofile it looks like: `options(ENTREZ_KEY = "somekey")`
#'
#' Remember to restart your R session (and to start a new shell window/tab
#' if you're using the shell) to take advantage of the new R options
#' or environment variables.
#'
#' We strongly recommend using environment variables
#' (<https://en.wikipedia.org/wiki/Environment_variable>) over R options
#' because environment variables are widely used across programming
#' languages, operating systems, and computing environments; whereas
#' R options are specific to R.
#'
#' Note that NCBI Entrez doesn't require that you use an API key,
#' but you do get a higher rate limit with a key (more requests per
#' time period), from 3 to 10 requests per second, so do get one.
NULL
|
epa=read.csv("epa_lista.csv")
nombres=read.csv("../DICCIONARIO.csv")[c("CAS","NOMBRE.QUIMICO")]
A=merge(epa,nombres,by="CAS",all.x=TRUE)
write.csv(A,"epa_lista_castellano.csv")
|
/EPA/traducirNombres.R
|
permissive
|
gauss-ma/toxDB
|
R
| false
| false
| 180
|
r
|
epa=read.csv("epa_lista.csv")
nombres=read.csv("../DICCIONARIO.csv")[c("CAS","NOMBRE.QUIMICO")]
A=merge(epa,nombres,by="CAS",all.x=TRUE)
write.csv(A,"epa_lista_castellano.csv")
|
library(tidyverse)
data <- read_csv("data/dataset_hr.csv",
col_names = TRUE)
data$index<-1:nrow(data)
head(data)
summary(data)
data.long<-data%>%pivot_longer(cols=`14.6`:`661.2`,names_to = "size",values_to = "value")
#no values below zero
data.long%>%filter(value<0)
#921 values of exactly zero. Fill with half of minimum
data.long%>%filter(value==0)
#join in half of minimum value
data.long.c<-left_join(data.long,data.long%>%filter(value>0)%>%group_by(size)%>%summarise(value_min=min(value,na.rm=T)/2),by="size")
data.long.c<-data.long.c%>%rowwise()%>%mutate(value.c=max(value,value_min))
data.long.c<-data.long.c%>%mutate(size_num=as.numeric(size))
data.long.c.log<-data.long.c%>%mutate(value.c.log=log(value.c))
data.log<-data.long.c.log%>%select(-value,-value_min,-size_num,-value.c)%>%pivot_wider(names_from = size,values_from = value.c.log)
data.lin<-data.long.c.log%>%select(-value,-value_min,-size_num,-value.c.log)%>%pivot_wider(names_from = size,values_from = value.c)
# cor.matrix.log<-as.data.frame(cor(data.log%>%select(-date,-index),use="pairwise.complete.obs"))
# names<-names(data%>%select(-date,-index))
# cor.matrix.log$size1<-names
# cor.matrix.log<-cor.matrix%>%pivot_longer(cols=!c("size1"),names_to="size2",values_to = "cor")
cor.matrix<-as.data.frame(cor(data.lin%>%select(-date,-index),use="pairwise.complete.obs"))
names<-names(data%>%select(-date,-index))
cor.matrix$size1<-names
cor.matrix<-cor.matrix%>%pivot_longer(cols=!c("size1"),names_to="size2",values_to = "cor")
#this part basically does the grouping
low_lim<-0.97 #0.944 for the log one
groups.df<-data.frame(size=names,group=NA)
groups<-list()
z<-1
groups.df$group[1]<-z
groups[[z]]<-names[1]
for(i in 2:length(names)){
check<-as.logical(cor.matrix%>%filter(size1==groups[[z]][1],size2==names[i])%>%mutate(res= cor>low_lim)%>%pull())
if(check){
groups[[z]]<-c(groups[[z]],names[i])
}
else{
z<-z+1
groups[[z]]<-names[i]
}
groups.df$group[i]<-z
}
groups.df<-left_join(groups.df,groups.df%>%group_by(group)%>%summarise(size.agg=ifelse(min(as.numeric(size))==max(as.numeric(size)),size,paste0(min(as.numeric(size))," - ",max(as.numeric(size))))))
center<-round(as.numeric(groups.df%>%group_by(group)%>%summarise(m=mean(as.numeric(size),na.rm=T))%>%pull(m)),1)
cor.matrix<-cor.matrix%>%mutate(gt_lim=as.factor(as.numeric(cor>0.97)))
ggplot(cor.matrix)+geom_tile(aes(x=as.numeric(as.character(size1)),y=as.numeric(as.character(size2)),fill=cor),size=2)+scale_fill_viridis_c()+labs(x="Particle size (nm)",y="Particle size (nm)",fill="Correlation")+scale_x_log10()+scale_y_log10()+
geom_point(data=groups.df,aes(x=as.numeric(as.character(size)),y=13.5,colour=as.factor(group %% 2)),shape=15,size=2)+scale_colour_discrete(guide="none")
ggplot(cor.matrix)+geom_tile(aes(x=as.numeric(as.character(size1)),y=as.numeric(as.character(size2)),fill=gt_lim))+scale_fill_viridis_d()+labs(x="Particle size (nm)",y="Particle size (nm)",fill="Correlation>0.97")+scale_x_log10()+scale_y_log10()
ggsave(file=paste0("plots/correlation-matrix.pdf"),device="pdf",width=20,height=15,units="cm")
ggplot(cor.matrix)+
geom_tile(aes(x=as.numeric(as.character(size1)),y=as.numeric(as.character(size2)),fill=cor),size=2)+
labs(x="Particle size (nm)",y="Particle size (nm)",fill="Correlation")+
scale_x_log10()+scale_y_log10()+
scale_fill_binned(type = "viridis",breaks = c(0,.5,.8,.97,1),guide = guide_coloursteps(even.steps = FALSE),
labels=c("<0","[0,0.5)","[0.5,0.8)","[0.8,0.97)","0.97>="))+
guides(fill = guide_legend(label.position = "right"))+
geom_point(data=groups.df,aes(x=as.numeric(as.character(size)),y=13.5,colour=as.factor(group %% 2)),shape=15,size=2)+scale_colour_discrete(guide="none")
ggsave(file=paste0("plots/fig1-corr-matrix.pdf"),device="pdf",width=20,height=15,units="cm")
ggsave(file=paste0("plots/fig1-corr-matrix.png"),
dpi=300,device="png",width=20,height=15,units="cm")
|
/code/correlation-matrix.R
|
no_license
|
OBaerenbold/UFP-TIES
|
R
| false
| false
| 3,953
|
r
|
library(tidyverse)
data <- read_csv("data/dataset_hr.csv",
col_names = TRUE)
data$index<-1:nrow(data)
head(data)
summary(data)
data.long<-data%>%pivot_longer(cols=`14.6`:`661.2`,names_to = "size",values_to = "value")
#no values below zero
data.long%>%filter(value<0)
#921 values of exactly zero. Fill with half of minimum
data.long%>%filter(value==0)
#join in half of minimum value
data.long.c<-left_join(data.long,data.long%>%filter(value>0)%>%group_by(size)%>%summarise(value_min=min(value,na.rm=T)/2),by="size")
data.long.c<-data.long.c%>%rowwise()%>%mutate(value.c=max(value,value_min))
data.long.c<-data.long.c%>%mutate(size_num=as.numeric(size))
data.long.c.log<-data.long.c%>%mutate(value.c.log=log(value.c))
data.log<-data.long.c.log%>%select(-value,-value_min,-size_num,-value.c)%>%pivot_wider(names_from = size,values_from = value.c.log)
data.lin<-data.long.c.log%>%select(-value,-value_min,-size_num,-value.c.log)%>%pivot_wider(names_from = size,values_from = value.c)
# cor.matrix.log<-as.data.frame(cor(data.log%>%select(-date,-index),use="pairwise.complete.obs"))
# names<-names(data%>%select(-date,-index))
# cor.matrix.log$size1<-names
# cor.matrix.log<-cor.matrix%>%pivot_longer(cols=!c("size1"),names_to="size2",values_to = "cor")
cor.matrix<-as.data.frame(cor(data.lin%>%select(-date,-index),use="pairwise.complete.obs"))
names<-names(data%>%select(-date,-index))
cor.matrix$size1<-names
cor.matrix<-cor.matrix%>%pivot_longer(cols=!c("size1"),names_to="size2",values_to = "cor")
#this part basically does the grouping
low_lim<-0.97 #0.944 for the log one
groups.df<-data.frame(size=names,group=NA)
groups<-list()
z<-1
groups.df$group[1]<-z
groups[[z]]<-names[1]
for(i in 2:length(names)){
check<-as.logical(cor.matrix%>%filter(size1==groups[[z]][1],size2==names[i])%>%mutate(res= cor>low_lim)%>%pull())
if(check){
groups[[z]]<-c(groups[[z]],names[i])
}
else{
z<-z+1
groups[[z]]<-names[i]
}
groups.df$group[i]<-z
}
groups.df<-left_join(groups.df,groups.df%>%group_by(group)%>%summarise(size.agg=ifelse(min(as.numeric(size))==max(as.numeric(size)),size,paste0(min(as.numeric(size))," - ",max(as.numeric(size))))))
center<-round(as.numeric(groups.df%>%group_by(group)%>%summarise(m=mean(as.numeric(size),na.rm=T))%>%pull(m)),1)
cor.matrix<-cor.matrix%>%mutate(gt_lim=as.factor(as.numeric(cor>0.97)))
ggplot(cor.matrix)+geom_tile(aes(x=as.numeric(as.character(size1)),y=as.numeric(as.character(size2)),fill=cor),size=2)+scale_fill_viridis_c()+labs(x="Particle size (nm)",y="Particle size (nm)",fill="Correlation")+scale_x_log10()+scale_y_log10()+
geom_point(data=groups.df,aes(x=as.numeric(as.character(size)),y=13.5,colour=as.factor(group %% 2)),shape=15,size=2)+scale_colour_discrete(guide="none")
ggplot(cor.matrix)+geom_tile(aes(x=as.numeric(as.character(size1)),y=as.numeric(as.character(size2)),fill=gt_lim))+scale_fill_viridis_d()+labs(x="Particle size (nm)",y="Particle size (nm)",fill="Correlation>0.97")+scale_x_log10()+scale_y_log10()
ggsave(file=paste0("plots/correlation-matrix.pdf"),device="pdf",width=20,height=15,units="cm")
ggplot(cor.matrix)+
geom_tile(aes(x=as.numeric(as.character(size1)),y=as.numeric(as.character(size2)),fill=cor),size=2)+
labs(x="Particle size (nm)",y="Particle size (nm)",fill="Correlation")+
scale_x_log10()+scale_y_log10()+
scale_fill_binned(type = "viridis",breaks = c(0,.5,.8,.97,1),guide = guide_coloursteps(even.steps = FALSE),
labels=c("<0","[0,0.5)","[0.5,0.8)","[0.8,0.97)","0.97>="))+
guides(fill = guide_legend(label.position = "right"))+
geom_point(data=groups.df,aes(x=as.numeric(as.character(size)),y=13.5,colour=as.factor(group %% 2)),shape=15,size=2)+scale_colour_discrete(guide="none")
ggsave(file=paste0("plots/fig1-corr-matrix.pdf"),device="pdf",width=20,height=15,units="cm")
ggsave(file=paste0("plots/fig1-corr-matrix.png"),
dpi=300,device="png",width=20,height=15,units="cm")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/report.R
\name{report_progress}
\alias{report_progress}
\title{Print issue-milestone progress in RMarkdown friendly way}
\usage{
report_progress(issues, group_var = "milestone_title", link_url = TRUE,
show_ratio = TRUE, show_pct = TRUE)
}
\arguments{
\item{issues}{Dataframe or tibble of issues and milestones, as returned by \code{get_issues()} and \code{parse_issues()}}
\item{group_var}{Character string variable name by which to group issues. Defaults to \code{"milestone_title"}}
\item{link_url}{Boolean. Whether or not to provide link to each item, as provided by \code{url} column in dataset}
\item{show_ratio}{Boolean. Whether or not to report (# Closed Items / # Total Items) for each group as a ratio}
\item{show_pct}{Boolean. Whether or not to report (# Closed Items / # Total Items) for each group as a percent}
}
\value{
Returns character string of HTML with class attribute to be correctly
shown "as-is" in RMarkdown
}
\description{
Interprets dataframe or tibble of items (e.g. issues) by breaking apart groups (e.g. milestones), listing each
item title as open or closed, and using HTML to format results in a highly readable and
attractive way. Resulting object returned is a character vector of HTML code with the added
class of \code{'knit_asis'} so that when included in an RMarkdown document knitting to HTML,
the results will be correctly rendered as HTML.
}
\details{
The resulting HTML unordered list (\code{<ul>}) is tagged with class 'report_progress' for custom CSS styling.
Items without a related group are put into an "Ungrouped" category. Filter these out before using this function if
you wish to only show items that are in a group.
}
\examples{
\dontrun{
repo <- create_repo_ref("emilyriederer", "projmgr")
issues <- get_issues(repo, state = 'all')
issues_df <- parse_issues(issues)
report_progress(issues_df)
}
}
\seealso{
Other issues: \code{\link{get_issue_comments}},
\code{\link{get_issue_events}}, \code{\link{get_issues}},
\code{\link{parse_issue_comments}},
\code{\link{parse_issue_events}},
\code{\link{parse_issues}}, \code{\link{post_issue}},
\code{\link{report_discussion}},
\code{\link{viz_waterfall}}
}
\concept{issues}
|
/man/report_progress.Rd
|
permissive
|
silvrwolfboy/projmgr
|
R
| false
| true
| 2,269
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/report.R
\name{report_progress}
\alias{report_progress}
\title{Print issue-milestone progress in RMarkdown friendly way}
\usage{
report_progress(issues, group_var = "milestone_title", link_url = TRUE,
show_ratio = TRUE, show_pct = TRUE)
}
\arguments{
\item{issues}{Dataframe or tibble of issues and milestones, as returned by \code{get_issues()} and \code{parse_issues()}}
\item{group_var}{Character string variable name by which to group issues. Defaults to \code{"milestone_title"}}
\item{link_url}{Boolean. Whether or not to provide link to each item, as provided by \code{url} column in dataset}
\item{show_ratio}{Boolean. Whether or not to report (# Closed Items / # Total Items) for each group as a ratio}
\item{show_pct}{Boolean. Whether or not to report (# Closed Items / # Total Items) for each group as a percent}
}
\value{
Returns character string of HTML with class attribute to be correctly
shown "as-is" in RMarkdown
}
\description{
Interprets dataframe or tibble of items (e.g. issues) by breaking apart groups (e.g. milestones), listing each
item title as open or closed, and using HTML to format results in a highly readable and
attractive way. Resulting object returned is a character vector of HTML code with the added
class of \code{'knit_asis'} so that when included in an RMarkdown document knitting to HTML,
the results will be correctly rendered as HTML.
}
\details{
The resulting HTML unordered list (\code{<ul>}) is tagged with class 'report_progress' for custom CSS styling.
Items without a related group are put into an "Ungrouped" category. Filter these out before using this function if
you wish to only show items that are in a group.
}
\examples{
\dontrun{
repo <- create_repo_ref("emilyriederer", "projmgr")
issues <- get_issues(repo, state = 'all')
issues_df <- parse_issues(issues)
report_progress(issues_df)
}
}
\seealso{
Other issues: \code{\link{get_issue_comments}},
\code{\link{get_issue_events}}, \code{\link{get_issues}},
\code{\link{parse_issue_comments}},
\code{\link{parse_issue_events}},
\code{\link{parse_issues}}, \code{\link{post_issue}},
\code{\link{report_discussion}},
\code{\link{viz_waterfall}}
}
\concept{issues}
|
#################### 1. Setting environment ##################################
# Load SparkR
spark_path <- '/usr/local/spark'
if (nchar(Sys.getenv("SPARK_HOME")) < 1) {
Sys.setenv(SPARK_HOME = spark_path)
}
library(SparkR, lib.loc = c(file.path(Sys.getenv("SPARK_HOME"), "R", "lib")))
sparkR.session(master = "yarn", sparkConfig = list(spark.driver.memory = "1g"))
# Before executing any hive-sql query from RStudio, you need to add a jar file in RStudio
sql("ADD JAR /opt/cloudera/parcels/CDH/lib/hive/lib/hive-hcatalog-core-1.1.0-cdh5.11.2.jar")
df_2015 <- read.df("hdfs:///common_folder/nyc_parking/Parking_Violations_Issued_-_Fiscal_Year_2015.csv", source = "csv",
inferSchema = "true", header = "true")
df_2016 <- read.df("hdfs:///common_folder/nyc_parking/Parking_Violations_Issued_-_Fiscal_Year_2016.csv", source = "csv",
inferSchema = "true", header = "true")
df_2017 <- read.df("hdfs:///common_folder/nyc_parking/Parking_Violations_Issued_-_Fiscal_Year_2017.csv", source = "csv",
inferSchema = "true", header = "true")
################################################################################
################### 2. Basic data cleaning ########################################
### 1. Remove duplicate rows
df_2015 <- dropDuplicates(df_2015)
df_2016 <- dropDuplicates(df_2016)
df_2017 <- dropDuplicates(df_2017)
## Group by Summons Number to find duplicate summons number.
## We see there is one "Summons Number" that repeats, but rows are not duplicate
## Considering it's not stated in the data and information provided that the number has to be unique, we have retained the row.
dftemp <- SparkR::summarize(groupBy(df_2015, df_2015$`Summons Number`), count = n(df_2015$`Summons Number`))
head(SparkR::filter(dftemp, dftemp$count >1))
head(SparkR:: filter(df_2015, df_2015$`Summons Number`== 1368159308))
################# 3. Examing the Data #############################################
# 1. Find the total number of tickets for each year.
#Since we have removed all duplicate rows, we simply use nrow
total_entry_2015<- nrow(df_2015)
total_entry_2015
#[1] 10951257
total_entry_2016<- nrow(df_2016)
total_entry_2016
#[1] 10626899
total_entry_2017<- nrow(df_2017)
total_entry_2017
#[1] 10803028
##################### Comparison of total number of tickets across year #####################
#Create a data frame with three years and the total number of tickets
df_three_years <- data.frame( x=c(2015,2016,2017), y = c(total_entry_2015,total_entry_2016,total_entry_2017))
#Initialize the ggplot librarby
library(ggplot2)
#Plot a bar graph comparing the tickets across the three years.
plot <- ggplot(df_three_years, aes(x =df_three_years$x, y = df_three_years$y,label=y,fill=x)) +
geom_bar(stat = "identity") +
xlab("Year") + ylab("Total number of tickets")+ scale_y_continuous(labels = function(l) {trans = l / 1000}) +
labs(x = "Year ", y = "Total number of tickets")+
geom_text(nudge_y = 1,color="white",size = 3, hjust = 0.5,vjust=1.5)+ theme_bw() + theme(legend.position = "none")
#View the plot
plot
################################################################################
#2. Find out the number of unique states from where the cars that got parking tickets came from.
##################### 2015 #################################################
### Count frequency of tickets per Registration State and arrange in decreasing order of number of tickets per each state
createOrReplaceTempView(df_2015,"dfv2015")
df_by_state_2015 <- SparkR::sql("SELECT `Registration State`, count(*) as frequency_of_tickets from dfv2015
group by `Registration State`
order by frequency_of_tickets desc")
showDF(df_by_state_2015)
### Registration state Code with maximum entry for parking tickets raised.
stateCodeMax2015 <- first(df_by_state_2015)$`Registration State`
stateCodeMax2015
### Select State code having numeric entry and store it in stateCodeNum
dftemp_2015 <- SparkR::sql("SELECT `Registration State` from dfv2015 where `Registration State` rlike '\\[0-9]\\[0-9]'")
stateCodeNum2015 <- first(dftemp_2015)$ `Registration State`
stateCodeNum2015
### Replace entries with "stateCodeNum" ( numeric entry ) with "stateCodeMax" (state with maximum entry for tickets)
### Since count has modified now, summarize and arrange now and check again
df_by_state_2015 <- withColumn(df_2015, "Registration State", ifelse(df_2015$`Registration State` == stateCodeNum2015,stateCodeMax2015,df_2015$`Registration State`))
createOrReplaceTempView(df_by_state_2015, "dfv_state_2015")
df_by_state_2015 <- SparkR::sql("SELECT `Registration State`, count(*) as frequency_of_tickets from dfv_state_2015
group by `Registration State`
order by frequency_of_tickets desc")
showDF(df_by_state_2015)
##################### 2016 #################################################
### Count frequency of tickets per Registration State and arrange in decreasing order of number of tickets per each state
createOrReplaceTempView(df_2016,"dfv2016")
df_by_state_2016 <- SparkR::sql("SELECT `Registration State`, count(*) as frequency_of_tickets from dfv2016
group by `Registration State`
order by frequency_of_tickets desc")
showDF(df_by_state_2016)
### Registration state Code with maximum entry for parking tickets raised.
stateCodeMax2016 <- first(df_by_state_2016)$`Registration State`
stateCodeMax2016
### Select State code having numeric entry and store it in stateCodeNum
dftemp_2016 <- SparkR::sql("SELECT `Registration State` from dfv2016 where `Registration State` rlike '\\[0-9]\\[0-9]'")
stateCodeNum2016 <- first(dftemp_2016)$ `Registration State`
stateCodeNum2016
### Replace entries with "stateCodeNum" ( numeric entry ) with "stateCodeMax" (state with maximum entry for tickets)
### Since count has modified now, summarize and arrange now and check again
df_by_state_2016 <- withColumn(df_2016, "Registration State", ifelse(df_2016$`Registration State` == stateCodeNum2016,stateCodeMax2016,df_2016$`Registration State`))
createOrReplaceTempView(df_by_state_2016, "dfv_state_2016")
df_by_state_2016 <- SparkR::sql("SELECT `Registration State`, count(*) as frequency_of_tickets from dfv_state_2016
group by `Registration State`
order by frequency_of_tickets desc")
showDF(df_by_state_2016)
##################### 2017 #################################################
### Count frequency of tickets per Registration State and arrange in decreasing order of number of tickets per each state
createOrReplaceTempView(df_2017,"dfv2017")
df_by_state_2017 <- SparkR::sql("SELECT `Registration State`, count(*) as frequency_of_tickets from dfv2017
group by `Registration State`
order by frequency_of_tickets desc")
showDF(df_by_state_2017)
### Registration state Code with maximum entry for parking tickets raised.
stateCodeMax2017 <- first(df_by_state_2017)$`Registration State`
stateCodeMax2017
### Select State code having numeric entry and store it in stateCodeNum
dftemp_2017 <- SparkR::sql("SELECT `Registration State` from dfv2017 where `Registration State` rlike '\\[0-9]\\[0-9]'")
stateCodeNum2017 <- first(dftemp_2017)$ `Registration State`
stateCodeNum2017
### Replace entries with "stateCodeNum" ( numeric entry ) with "stateCodeMax" (state with maximum entry for tickets)
### Since count has modified now, summarize and arrange now and check again
df_by_state_2017 <- withColumn(df_2017, "Registration State", ifelse(df_2017$`Registration State` == stateCodeNum2017,stateCodeMax2017,df_2017$`Registration State`))
createOrReplaceTempView(df_by_state_2017, "dfv_state_2017")
df_by_state_2017 <- SparkR::sql("SELECT `Registration State`, count(*) as frequency_of_tickets from dfv_state_2017
group by `Registration State`
order by frequency_of_tickets desc")
showDF(df_by_state_2017)
#################################################################################################################
#3.Some parking tickets don't have the address for violation location on them, which is a cause for concern. Write a query to check the number of such tickets.
#We assume that we are using 'Violation Location' Column for this task.
###### 2015
df_2015_no_violation_location <- SparkR::filter(df_2015, isNull(df_2015$`Violation Location`))
nrow(df_2015_no_violation_location)
###### 2016
df_2016_no_violation_location <- SparkR::filter(df_2016, isNull(df_2016$`Violation Location`))
nrow(df_2016_no_violation_location)
###### 2017
df_2017_no_violation_location <- SparkR::filter(df_2017, isNull(df_2017$`Violation Location`))
nrow(df_2017_no_violation_location)
################# 4. Aggregation Tasks #############################################
# 1. How often does each violation code occur? Display the frequency of the top five violation codes.
######### Find the Violation Code and the frequency of Violation for each year and then list the top 5
######### 2015
df_2015_violation_freq <-SparkR::summarize(groupBy(df_2015, df_2015$`Violation code`), count = n(df_2015$`Violation Code`))
df_2015_violation_freq<- arrange(df_2015_violation_freq,"count",decreasing=TRUE)
head(df_2015_violation_freq,5)
######### 2016
df_2016_violation_freq <-SparkR::summarize(groupBy(df_2016, df_2016$`Violation code`), count = n(df_2016$`Violation Code`))
df_2016_violation_freq<- arrange(df_2016_violation_freq,"count",decreasing=TRUE)
head(df_2016_violation_freq,5)
######### 2017
df_2017_violation_freq <-SparkR::summarize(groupBy(df_2017, df_2017$`Violation code`), count = n(df_2017$`Violation Code`))
df_2017_violation_freq<- arrange(df_2017_violation_freq,"count",decreasing=TRUE)
head(df_2017_violation_freq,5)
############################################################################################################################
#2.How often does each 'vehicle body type' get a parking ticket? How about the 'vehicle make'? (Hint: find the top 5 for both)
######### Similar as above, but we find the frequency of tickets for vehicle make .
######### 2015
df_2015_vehicle_body <-SparkR::summarize(groupBy(df_2015, df_2015$`Vehicle Body Type`), count = n(df_2015$`Vehicle Body Type`))
df_2015_vehicle_body<- arrange(df_2015_vehicle_body,"count",decreasing=TRUE)
head(df_2015_vehicle_body,5)
######### 2016
df_2016_vehicle_make <-SparkR::summarize(groupBy(df_2016, df_2016$`Vehicle Make`), count = n(df_2016$`Vehicle Make`))
df_2016_vehicle_make<- arrange(df_2016_vehicle_make,"count",decreasing=TRUE)
head(df_2016_vehicle_make,5)
######### 2017
df_2017_vehicle_make <-SparkR::summarize(groupBy(df_2017, df_2017$`Vehicle Make`), count = n(df_2017$`Vehicle Make`))
df_2017_vehicle_make<- arrange(df_2017_vehicle_make,"count",decreasing=TRUE)
head(df_2017_vehicle_make,5)
############################################################################################################################
#3. Find the (5 highest) frequency of tickets for each of the following:
############# 2015
# a. Violation Precinct
df_2015_violation_prec <-SparkR::summarize(groupBy(df_2015, df_2015$`Violation Precinct`), count = n(df_2015$`Violation Precinct`))
head(arrange(filter(df_2015_violation_prec,df_2015_violation_prec$`Violation Precinct` != 0),"count",decreasing=TRUE),5)
#b. Issuer Precinct
df_2015_issuer_prec <-SparkR::summarize(groupBy(df_2015, df_2015$`Issuer Precinct`), count = n(df_2015$`Issuer Precinct`))
head(arrange(filter(df_2015_issuer_prec,df_2015_issuer_prec$`Issuer Precinct` != 0),"count",decreasing=TRUE), 5)
############# 2016
# a. Violation Precinct
df_2016_violation_prec <-SparkR::summarize(groupBy(df_2016, df_2016$`Violation Precinct`), count = n(df_2016$`Violation Precinct`))
head(arrange(filter(df_2016_violation_prec,df_2016_violation_prec$`Violation Precinct` != 0),"count",decreasing=TRUE),5)
#b. Issuer Precinct
df_2016_issuer_prec <-SparkR::summarize(groupBy(df_2016, df_2016$`Issuer Precinct`), count = n(df_2016$`Issuer Precinct`))
head(arrange(filter(df_2016_issuer_prec,df_2016_issuer_prec$`Issuer Precinct` != 0),"count",decreasing=TRUE), 5)
############# 2017
# a. Violation Precinct
df_2017_violation_prec <-SparkR::summarize(groupBy(df_2017, df_2017$`Violation Precinct`), count = n(df_2017$`Violation Precinct`))
head(arrange(filter(df_2017_violation_prec,df_2017_violation_prec$`Violation Precinct` != 0),"count",decreasing=TRUE),5)
#b. Issuer Precinct
df_2017_issuer_prec <-SparkR::summarize(groupBy(df_2017, df_2017$`Issuer Precinct`), count = n(df_2017$`Issuer Precinct`))
head(arrange(filter(df_2017_issuer_prec,df_2017_issuer_prec$`Issuer Precinct` != 0),"count",decreasing=TRUE), 5)
############################################################################################################################
#4. Find the violation code frequency across three precincts which have issued the most number of tickets - do these precinct zones
#have an exceptionally high frequency of certain violation codes? Are these codes common across precincts?
### We have found the top 3 precincts which have most number of tickets in previous question ie. ( Question no: 3 above)
### We see that those precicts are 19, 18 and 14 for 2015. We use the same for solving this query.
############# 2015
createOrReplaceTempView(df_2015, "dfv_2015")
df_viloation_code_top_precincts <-
SparkR::sql("select `Violation Precinct`,`Violation Code`, count(*) as frequency from dfv_2015
where `Violation Precinct` = 19
group by `Violation Precinct` ,`Violation Code`
union all
Select `Violation Precinct` ,`Violation Code`, count(*) as frequency from dfv_2015
where `Violation Precinct` = 18
group by `Violation Precinct` ,`Violation Code`
union all
select `Violation Precinct` ,`Violation Code`, count(*) as frequency from dfv_2015
where `Violation Precinct` = 14
group by `Violation Precinct` ,`Violation Code`
order by `Violation Precinct`, frequency desc ")
createOrReplaceTempView(df_viloation_code_top_precincts,"df_viloation_code_top_precincts_2015")
df_viloation_code_top_precincts <- SparkR::sql("select `Violation Precinct` ,`Violation Code`, frequency, ROW_NUMBER() OVER (PARTITION BY `Violation Precinct` order by `Violation Precinct` asc, frequency desc) as rank
from df_viloation_code_top_precincts_2015
having rank <=5 ")
showDF(df_viloation_code_top_precincts)
head(df_viloation_code_top_precincts,15)
############# 2016
createOrReplaceTempView(df_2016, "dfv_2016")
df_viloation_code_top_precincts <-
SparkR::sql("select `Violation Precinct`,`Violation Code`, count(*) as frequency from dfv_2016
where `Violation Precinct` = 19
group by `Violation Precinct` ,`Violation Code`
union all
Select `Violation Precinct` ,`Violation Code`, count(*) as frequency from dfv_2016
where `Violation Precinct` = 18
group by `Violation Precinct` ,`Violation Code`
union all
select `Violation Precinct` ,`Violation Code`, count(*) as frequency from dfv_2016
where `Violation Precinct` = 14
group by `Violation Precinct` ,`Violation Code`
order by `Violation Precinct`, frequency desc ")
createOrReplaceTempView(df_viloation_code_top_precincts,"df_viloation_code_top_precincts_2016")
df_viloation_code_top_precincts <- SparkR::sql("select `Violation Precinct` ,`Violation Code`, frequency, ROW_NUMBER() OVER (PARTITION BY `Violation Precinct` order by `Violation Precinct` asc, frequency desc) as rank
from df_viloation_code_top_precincts_2016
having rank <=5 ")
showDF(df_viloation_code_top_precincts)
head(df_viloation_code_top_precincts,15)
############# 2017
createOrReplaceTempView(df_2017, "dfv_2017")
df_viloation_code_top_precincts <-
SparkR::sql("select `Violation Precinct`,`Violation Code`, count(*) as frequency from dfv_2017
where `Violation Precinct` = 19
group by `Violation Precinct` ,`Violation Code`
union all
Select `Violation Precinct` ,`Violation Code`, count(*) as frequency from dfv_2017
where `Violation Precinct` = 14
group by `Violation Precinct` ,`Violation Code`
union all
select `Violation Precinct` ,`Violation Code`, count(*) as frequency from dfv_2017
where `Violation Precinct` = 1
group by `Violation Precinct` ,`Violation Code`
order by `Violation Precinct`, frequency desc ")
createOrReplaceTempView(df_viloation_code_top_precincts,"df_viloation_code_top_precincts_2017")
df_viloation_code_top_precincts <- SparkR::sql("select `Violation Precinct` ,`Violation Code`, frequency, ROW_NUMBER() OVER (PARTITION BY `Violation Precinct` order by `Violation Precinct` asc, frequency desc) as rank
from df_viloation_code_top_precincts_2017
having rank <=5 ")
showDF(df_viloation_code_top_precincts)
head(df_viloation_code_top_precincts,15)
############################################################################################################################
#5.You’d want to find out the properties of parking violations across different times of the day:
############# 2015
#Find a way to deal with missing values, if any.
#Hint: Check for the null values using 'isNull' under the SQL. Also, to remove the null values, check the 'dropna' command in the API documentation.
# Filter and included only the non null values for this analysis
df_2015_violation_time <- SparkR::filter(df_2015, isNotNull(df_2015$`Violation Time`))
#The Violation Time field is specified in a strange format. Find a way to make this into a time attribute that you can use to divide into groups.
#Divide 24 hours into six equal discrete bins of time. The intervals you choose are at your discretion. For each of these groups, find the three most
#commonly occurring violations.
#Hint: Use the CASE-WHEN in SQL view to segregate into bins.
# Split the Violation Time column Ex: 0945A to be interpreted as 9:45 AM
# Hence, we split the string into hour as 09 and ignore the minutes
# We use the last column 'A' or 'P' to convert time into 24 hour format
# We add 00 to the time if the last column is A( refering to AM)
# We add 12 to the time if the last column is P( refering to PM)
#Consider the hour column
df_2015_violation_time$hour<- substr(df_2015_violation_time$`Violation Time`,1,2)
#EXtract the AM or PM information
df_2015_violation_time$AMPM <- substr(df_2015_violation_time$`Violation Time`,5,5)
#Convert AM to 0 and PM to 12
df_2015_violation_time$AMPM <- regexp_replace(df_2015_violation_time$AMPM,'A','0')
df_2015_violation_time$AMPM <- regexp_replace(df_2015_violation_time$AMPM,'P','12')
#Convert time to 24 hour format
df_2015_violation_time$hour <- df_2015_violation_time$hour+df_2015_violation_time$AMPM
createOrReplaceTempView(df_2015_violation_time, "dfv_2015")
#Create 6 different bins for time of hour and group the data
#Primary Group - Time Of Day , Secondary Group - Violation Code
#Arrange in the descending order of total violation records per time of day per violation code
bins <- sql("SELECT `Violation Code`,count(`Violation Code`)as `Total Violation`, \
CASE WHEN ( hour >=0 and hour <= 3 ) THEN 1\
WHEN (hour > 3 and hour <= 7) THEN 2\
WHEN (hour > 8 and hour <= 12) THEN 3\
WHEN (hour > 12 and hour <= 16) THEN 4\
WHEN (hour > 16 and hour <= 20) THEN 5\
ELSE 6 END as timeOfDay FROM dfv_2015
group by timeOfDay,`Violation Code`
order by timeOfDay ASC ,`Total Violation` DESC ")
head(bins)
createOrReplaceTempView(bins, "dfv_2015_Total_Violation")
#For finding the most commonly occurring violations, a similar approach can be used as mention in the hint for question 4.
#Find the top 3 violation codes for each time of the day
df_top_3_violation_for_each_time_of_day_2015 <- sql("(select * from dfv_2015_Total_Violation where timeOfDay = 1 order by `Total Violation` DESC limit 3)
union all
(select * from dfv_2015_Total_Violation where timeOfDay = 2 order by `Total Violation` DESC limit 3)
union all
(select * from dfv_2015_Total_Violation where timeOfDay = 3 order by `Total Violation` DESC limit 3)
union all
(select * from dfv_2015_Total_Violation where timeOfDay = 4 order by `Total Violation` DESC limit 3)
union all
(select * from dfv_2015_Total_Violation where timeOfDay = 5 order by `Total Violation` DESC limit 3)
union all
(select * from dfv_2015_Total_Violation where timeOfDay = 6 order by `Total Violation` DESC limit 3)")
df_top_3_violation_for_each_time_of_day_2015$timeOfDay <- regexp_replace(df_top_3_violation_for_each_time_of_day_2015$timeOfDay,"1",'Midnight')
df_top_3_violation_for_each_time_of_day_2015$timeOfDay <- regexp_replace(df_top_3_violation_for_each_time_of_day_2015$timeOfDay,"2",'Earlymorning')
df_top_3_violation_for_each_time_of_day_2015$timeOfDay <- regexp_replace(df_top_3_violation_for_each_time_of_day_2015$timeOfDay,"3",'Morning')
df_top_3_violation_for_each_time_of_day_2015$timeOfDay <- regexp_replace(df_top_3_violation_for_each_time_of_day_2015$timeOfDay,"4",'Afternoon')
df_top_3_violation_for_each_time_of_day_2015$timeOfDay <- regexp_replace(df_top_3_violation_for_each_time_of_day_2015$timeOfDay,"5",'Evening')
df_top_3_violation_for_each_time_of_day_2015$timeOfDay <- regexp_replace(df_top_3_violation_for_each_time_of_day_2015$timeOfDay,"6",'Night')
head(df_top_3_violation_for_each_time_of_day_2015)
#Find time of the day for top three violation codes
bins <- sql("SELECT distinct `Violation Code`,count(`Violation Code`)as `Total Violation`, \
CASE WHEN ( hour >=0 and hour <= 3 ) THEN 1\
WHEN (hour > 3 and hour <= 7) THEN 2\
WHEN (hour > 8 and hour <= 12) THEN 3\
WHEN (hour > 12 and hour <= 16) THEN 4\
WHEN (hour > 16 and hour <= 20) THEN 5\
ELSE 6 END as timeOfDay FROM dfv_2015
group by `Violation Code`,timeOfDay
order by `Total Violation` DESC Limit 3 ")
bins$timeOfDay <- regexp_replace(bins$timeOfDay,"1",'Midnight')
bins$timeOfDay <- regexp_replace(bins$timeOfDay,"2",'Earlymorning')
bins$timeOfDay <- regexp_replace(bins$timeOfDay,"3",'Morning')
bins$timeOfDay <- regexp_replace(bins$timeOfDay,"4",'Afternoon')
bins$timeOfDay <- regexp_replace(bins$timeOfDay,"5",'Evening')
bins$timeOfDay <- regexp_replace(bins$timeOfDay,"6",'Night')
showDF(bins)
############# 2016
#Find a way to deal with missing values, if any.
#Hint: Check for the null values using 'isNull' under the SQL. Also, to remove the null values, check the 'dropna' command in the API documentation.
# Filter and included only the non null values for this analysis
df_2016_violation_time <- SparkR::filter(df_2016, isNotNull(df_2016$`Violation Time`))
#The Violation Time field is specified in a strange format. Find a way to make this into a time attribute that you can use to divide into groups.
#Divide 24 hours into six equal discrete bins of time. The intervals you choose are at your discretion. For each of these groups, find the three most
#commonly occurring violations.
#Hint: Use the CASE-WHEN in SQL view to segregate into bins.
# Split the Violation Time column Ex: 0945A to be interpreted as 9:45 AM
# Hence, we split the string into hour as 09 and ignore the minutes
# We use the last column 'A' or 'P' to convert time into 24 hour format
# We add 00 to the time if the last column is A( refering to AM)
# We add 12 to the time if the last column is P( refering to PM)
#Consider the hour column
df_2016_violation_time$hour<- substr(df_2016_violation_time$`Violation Time`,1,2)
#EXtract the AM or PM information
df_2016_violation_time$AMPM <- substr(df_2016_violation_time$`Violation Time`,5,5)
#Convert AM to 0 and PM to 12
df_2016_violation_time$AMPM <- regexp_replace(df_2016_violation_time$AMPM,'A','0')
df_2016_violation_time$AMPM <- regexp_replace(df_2016_violation_time$AMPM,'P','12')
#Convert time to 24 hour format
df_2016_violation_time$hour <- df_2016_violation_time$hour+df_2016_violation_time$AMPM
createOrReplaceTempView(df_2016_violation_time, "dfv_2016")
#Create 6 different bins for time of hour and group the data
#Primary Group - Time Of Day , Secondary Group - Violation Code
#Arrange in the descending order of total violation records per time of day per violation code
bins <- sql("SELECT `Violation Code`,count(`Violation Code`)as `Total Violation`, \
CASE WHEN ( hour >=0 and hour <= 3 ) THEN 1\
WHEN (hour > 3 and hour <= 7) THEN 2\
WHEN (hour > 8 and hour <= 12) THEN 3\
WHEN (hour > 12 and hour <= 16) THEN 4\
WHEN (hour > 16 and hour <= 20) THEN 5\
ELSE 6 END as timeOfDay FROM dfv_2016
group by timeOfDay,`Violation Code`
order by timeOfDay ASC ,`Total Violation` DESC ")
head(bins)
createOrReplaceTempView(bins, "dfv_2016_Total_Violation")
#For finding the most commonly occurring violations, a similar approach can be used as mention in the hint for question 4.
#Find the top 3 violation codes for each time of the day
df_top_3_violation_for_each_time_of_day_2016 <- sql("(select * from dfv_2016_Total_Violation where timeOfDay = 1 order by `Total Violation` DESC limit 3)
union all
(select * from dfv_2016_Total_Violation where timeOfDay = 2 order by `Total Violation` DESC limit 3)
union all
(select * from dfv_2016_Total_Violation where timeOfDay = 3 order by `Total Violation` DESC limit 3)
union all
(select * from dfv_2016_Total_Violation where timeOfDay = 4 order by `Total Violation` DESC limit 3)
union all
(select * from dfv_2016_Total_Violation where timeOfDay = 5 order by `Total Violation` DESC limit 3)
union all
(select * from dfv_2015_Total_Violation where timeOfDay = 6 order by `Total Violation` DESC limit 3)")
df_top_3_violation_for_each_time_of_day_2016$timeOfDay <- regexp_replace(df_top_3_violation_for_each_time_of_day_2016$timeOfDay,"1",'Midnight')
df_top_3_violation_for_each_time_of_day_2016$timeOfDay <- regexp_replace(df_top_3_violation_for_each_time_of_day_2016$timeOfDay,"2",'Earlymorning')
df_top_3_violation_for_each_time_of_day_2016$timeOfDay <- regexp_replace(df_top_3_violation_for_each_time_of_day_2016$timeOfDay,"3",'Morning')
df_top_3_violation_for_each_time_of_day_2016$timeOfDay <- regexp_replace(df_top_3_violation_for_each_time_of_day_2016$timeOfDay,"4",'Afternoon')
df_top_3_violation_for_each_time_of_day_2016$timeOfDay <- regexp_replace(df_top_3_violation_for_each_time_of_day_2016$timeOfDay,"5",'Evening')
df_top_3_violation_for_each_time_of_day_2016$timeOfDay <- regexp_replace(df_top_3_violation_for_each_time_of_day_2016$timeOfDay,"6",'Night')
head(df_top_3_violation_for_each_time_of_day_2016)
#Find time of the day for top three violation codes
bins <- sql("SELECT distinct `Violation Code`,count(`Violation Code`)as `Total Violation`, \
CASE WHEN ( hour >=0 and hour <= 3 ) THEN 1\
WHEN (hour > 3 and hour <= 7) THEN 2\
WHEN (hour > 8 and hour <= 12) THEN 3\
WHEN (hour > 12 and hour <= 16) THEN 4\
WHEN (hour > 16 and hour <= 20) THEN 5\
ELSE 6 END as timeOfDay FROM dfv_2016
group by `Violation Code`,timeOfDay
order by `Total Violation` DESC Limit 3 ")
bins$timeOfDay <- regexp_replace(bins$timeOfDay,"1",'Midnight')
bins$timeOfDay <- regexp_replace(bins$timeOfDay,"2",'Earlymorning')
bins$timeOfDay <- regexp_replace(bins$timeOfDay,"3",'Morning')
bins$timeOfDay <- regexp_replace(bins$timeOfDay,"4",'Afternoon')
bins$timeOfDay <- regexp_replace(bins$timeOfDay,"5",'Evening')
bins$timeOfDay <- regexp_replace(bins$timeOfDay,"6",'Night')
showDF(bins)
############# 2017
#Find a way to deal with missing values, if any.
#Hint: Check for the null values using 'isNull' under the SQL. Also, to remove the null values, check the 'dropna' command in the API documentation.
# Filter and included only the non null values for this analysis
df_2017_violation_time <- SparkR::filter(df_2017, isNotNull(df_2017$`Violation Time`))
#The Violation Time field is specified in a strange format. Find a way to make this into a time attribute that you can use to divide into groups.
#Divide 24 hours into six equal discrete bins of time. The intervals you choose are at your discretion. For each of these groups, find the three most
#commonly occurring violations.
#Hint: Use the CASE-WHEN in SQL view to segregate into bins.
# Split the Violation Time column Ex: 0945A to be interpreted as 9:45 AM
# Hence, we split the string into hour as 09 and ignore the minutes
# We use the last column 'A' or 'P' to convert time into 24 hour format
# We add 00 to the time if the last column is A( refering to AM)
# We add 12 to the time if the last column is P( refering to PM)
#Consider the hour column
df_2017_violation_time$hour<- substr(df_2017_violation_time$`Violation Time`,1,2)
#EXtract the AM or PM information
df_2017_violation_time$AMPM <- substr(df_2017_violation_time$`Violation Time`,5,5)
#Convert AM to 0 and PM to 12
df_2017_violation_time$AMPM <- regexp_replace(df_2017_violation_time$AMPM,'A','0')
df_2017_violation_time$AMPM <- regexp_replace(df_2017_violation_time$AMPM,'P','12')
#Convert time to 24 hour format
df_2017_violation_time$hour <- df_2017_violation_time$hour+df_2017_violation_time$AMPM
createOrReplaceTempView(df_2017_violation_time, "dfv_2017")
#Create 6 different bins for time of hour and group the data
#Primary Group - Time Of Day , Secondary Group - Violation Code
#Arrange in the descending order of total violation records per time of day per violation code
bins <- sql("SELECT `Violation Code`,count(`Violation Code`)as `Total Violation`, \
CASE WHEN ( hour >=0 and hour <= 3 ) THEN 1\
WHEN (hour > 3 and hour <= 7) THEN 2\
WHEN (hour > 8 and hour <= 12) THEN 3\
WHEN (hour > 12 and hour <= 16) THEN 4\
WHEN (hour > 16 and hour <= 20) THEN 5\
ELSE 6 END as timeOfDay FROM dfv_2017
group by timeOfDay,`Violation Code`
order by timeOfDay ASC ,`Total Violation` DESC ")
head(bins)
createOrReplaceTempView(bins, "dfv_2017_Total_Violation")
#For finding the most commonly occurring violations, a similar approach can be used as mention in the hint for question 4.
#Find the top 3 violation codes for each time of the day
df_top_3_violation_for_each_time_of_day_2017 <- sql("(select * from dfv_2017_Total_Violation where timeOfDay = 1 order by `Total Violation` DESC limit 3)
union all
(select * from dfv_2017_Total_Violation where timeOfDay = 2 order by `Total Violation` DESC limit 3)
union all
(select * from dfv_2017_Total_Violation where timeOfDay = 3 order by `Total Violation` DESC limit 3)
union all
(select * from dfv_2017_Total_Violation where timeOfDay = 4 order by `Total Violation` DESC limit 3)
union all
(select * from dfv_2017_Total_Violation where timeOfDay = 5 order by `Total Violation` DESC limit 3)
union all
(select * from dfv_2015_Total_Violation where timeOfDay = 6 order by `Total Violation` DESC limit 3)")
df_top_3_violation_for_each_time_of_day_2017$timeOfDay <- regexp_replace(df_top_3_violation_for_each_time_of_day_2017$timeOfDay,"1",'Midnight')
df_top_3_violation_for_each_time_of_day_2017$timeOfDay <- regexp_replace(df_top_3_violation_for_each_time_of_day_2017$timeOfDay,"2",'Earlymorning')
df_top_3_violation_for_each_time_of_day_2017$timeOfDay <- regexp_replace(df_top_3_violation_for_each_time_of_day_2017$timeOfDay,"3",'Morning')
df_top_3_violation_for_each_time_of_day_2017$timeOfDay <- regexp_replace(df_top_3_violation_for_each_time_of_day_2017$timeOfDay,"4",'Afternoon')
df_top_3_violation_for_each_time_of_day_2017$timeOfDay <- regexp_replace(df_top_3_violation_for_each_time_of_day_2017$timeOfDay,"5",'Evening')
df_top_3_violation_for_each_time_of_day_2017$timeOfDay <- regexp_replace(df_top_3_violation_for_each_time_of_day_2017$timeOfDay,"6",'Night')
head(df_top_3_violation_for_each_time_of_day_2017)
#Find time of the day for top three violation codes
bins <- sql("SELECT distinct `Violation Code`,count(`Violation Code`)as `Total Violation`, \
CASE WHEN ( hour >=0 and hour <= 3 ) THEN 1\
WHEN (hour > 3 and hour <= 7) THEN 2\
WHEN (hour > 8 and hour <= 12) THEN 3\
WHEN (hour > 12 and hour <= 16) THEN 4\
WHEN (hour > 16 and hour <= 20) THEN 5\
ELSE 6 END as timeOfDay FROM dfv_2017
group by `Violation Code`,timeOfDay
order by `Total Violation` DESC Limit 3 ")
bins$timeOfDay <- regexp_replace(bins$timeOfDay,"1",'Midnight')
bins$timeOfDay <- regexp_replace(bins$timeOfDay,"2",'Earlymorning')
bins$timeOfDay <- regexp_replace(bins$timeOfDay,"3",'Morning')
bins$timeOfDay <- regexp_replace(bins$timeOfDay,"4",'Afternoon')
bins$timeOfDay <- regexp_replace(bins$timeOfDay,"5",'Evening')
bins$timeOfDay <- regexp_replace(bins$timeOfDay,"6",'Night')
showDF(bins)
############################################################################################################################
#6.First, divide the year into some number of seasons, and
## 1. Find frequencies of tickets for each season. (Hint: Use Issue Date to segregate into seasons)
## 2. Then, find the three most common violations for each of these seasons.
############# 2015
df_2015$`Issue Month` <- month(SparkR::to_date(df_2015$`Issue Date`,format ="MM/dd/yyyy"))
createOrReplaceTempView(df_2015, "dfv2015")
df_segregated_seasons_2015 <- SparkR::sql("SELECT
CASE WHEN(`Issue Month`>=4 and `Issue Month` <= 6 ) THEN 1 \
WHEN ( `Issue Month` >=7 and `Issue Month` <= 9) THEN 2 \
WHEN ( `Issue Month` >=10 and `Issue Month` <= 12 ) THEN 3 \
ELSE 4 END as season, `Violation Code`,count(`Violation Code`)as `Total Violation` FROM dfv2015
group by season,`Violation Code`
order by season asc, `Total Violation` desc")
createOrReplaceTempView(df_segregated_seasons_2015, "binv2015")
df_top_violation_perseason_2015 <- SparkR::sql("select season, `Violation Code`,`Total Violation`, ROW_NUMBER() OVER (PARTITION BY season order by season asc,`Total Violation` desc) as rank
from binv2015
having rank <=3")
cast(df_top_violation_perseason_2015$season, "string")
df_top_violation_perseason_2015$season <- regexp_replace(df_top_violation_perseason_2015$season,"1",'Summer')
df_top_violation_perseason_2015$season <- regexp_replace(df_top_violation_perseason_2015$season,"2",'Rainy')
df_top_violation_perseason_2015$season <- regexp_replace(df_top_violation_perseason_2015$season,"3",'Autumn')
df_top_violation_perseason_2015$season <- regexp_replace(df_top_violation_perseason_2015$season,"4",'Winter')
showDF(df_top_violation_perseason_2015)
############# 2016
df_2016$`Issue Month` <- month(SparkR::to_date(df_2016$`Issue Date`,format ="MM/dd/yyyy"))
createOrReplaceTempView(df_2016, "dfv2016")
df_segregated_seasons_2016 <- SparkR::sql("SELECT
CASE WHEN(`Issue Month`>=4 and `Issue Month` <= 6 ) THEN 1 \
WHEN ( `Issue Month` >=7 and `Issue Month` <= 9) THEN 2 \
WHEN ( `Issue Month` >=10 and `Issue Month` <= 12 ) THEN 3 \
ELSE 4 END as season, `Violation Code`,count(`Violation Code`)as `Total Violation` FROM dfv2016
group by season,`Violation Code`
order by season asc, `Total Violation` desc")
#Then, find the three most common violations for each of these seasons.
#(Hint: A similar approach can be used as mention in the hint for question 4.)
createOrReplaceTempView(df_segregated_seasons_2016, "binv2016")
df_top_violation_perseason_2016 <- SparkR::sql("select season, `Violation Code`,`Total Violation`, ROW_NUMBER() OVER (PARTITION BY season order by season asc,`Total Violation` desc) as rank
from binv2016
having rank <=3")
cast(df_top_violation_perseason_2016$season, "string")
df_top_violation_perseason_2016$season <- regexp_replace(df_top_violation_perseason_2016$season,"1",'Summer')
df_top_violation_perseason_2016$season <- regexp_replace(df_top_violation_perseason_2016$season,"2",'Rainy')
df_top_violation_perseason_2016$season <- regexp_replace(df_top_violation_perseason_2016$season,"3",'Autumn')
df_top_violation_perseason_2016$season <- regexp_replace(df_top_violation_perseason_2016$season,"4",'Winter')
showDF(df_top_violation_perseason_2016)
############# 2017
df_2017$`Issue Month` <- month(SparkR::to_date(df_2017$`Issue Date`,format ="MM/dd/yyyy"))
createOrReplaceTempView(df_2017, "dfv2017")
df_segregated_seasons_2017 <- SparkR::sql("SELECT
CASE WHEN(`Issue Month`>=4 and `Issue Month` <= 6 ) THEN 1 \
WHEN ( `Issue Month` >=7 and `Issue Month` <= 9) THEN 2 \
WHEN ( `Issue Month` >=10 and `Issue Month` <= 12 ) THEN 3 \
ELSE 4 END as season, `Violation Code`,count(`Violation Code`)as `Total Violation` FROM dfv2017
group by season,`Violation Code`
order by season asc, `Total Violation` desc")
#Then, find the three most common violations for each of these seasons.
#(Hint: A similar approach can be used as mention in the hint for question 4.)
createOrReplaceTempView(df_segregated_seasons_2017, "binv2017")
df_top_violation_perseason_2017 <- SparkR::sql("select season, `Violation Code`,`Total Violation`, ROW_NUMBER() OVER (PARTITION BY season order by season asc,`Total Violation` desc) as rank
from binv2017
having rank <=3")
cast(df_top_violation_perseason_2017$season, "string")
df_top_violation_perseason_2017$season <- regexp_replace(df_top_violation_perseason_2017$season,"1",'Summer')
df_top_violation_perseason_2017$season <- regexp_replace(df_top_violation_perseason_2017$season,"2",'Rainy')
df_top_violation_perseason_2017$season <- regexp_replace(df_top_violation_perseason_2017$season,"3",'Autumn')
df_top_violation_perseason_2017$season <- regexp_replace(df_top_violation_perseason_2017$season,"4",'Winter')
showDF(df_top_violation_perseason_2017)
############################################################################################################################
#7.The fines collected from all the parking violation constitute a revenue source for the NYC police department. Let’s take an example of estimating that for the
#three most commonly occurring codes.
############# 2015
createOrReplaceTempView(df_2015, "dfv2015")
#Find total occurrences of the three most common violation codes
df_fine <- SparkR::sql("SELECT `Violation Code`,count(*)as `Total Violation` \
from dfv2015
group by `Violation Code`
order by `Total Violation` DESC
Limit 3")
df_fine <- data.frame(head(df_fine))
head(df_fine)
#Then, visit the website:
#http://www1.nyc.gov/site/finance/vehicles/services-violation-codes.page
#It lists the fines associated with different violation codes. They’re divided into two categories, one for the highest-density locations of the city, the other for the rest of the city. #For simplicity, take an average of the two.
df_fee_r <- data.frame(Fee=c(55,50,115))
df_fine <- cbind(df_fine,df_fee_r)
df_fine$`Total_Fine` <- df_fine$`Total.Violation` * df_fine$`Fee`
head(df_fine)
#Using this information, find the total amount collected for the three violation codes with maximum tickets. State the code which
# has the highest total collection.
head(df_fine[order(-df_fine$Total_Fine),],1)
############# 2016
createOrReplaceTempView(df_2016, "dfv2016")
#Find total occurrences of the three most common violation codes
df_fine <- SparkR::sql("SELECT `Violation Code`,count(*)as `Total Violation` \
from dfv2016
group by `Violation Code`
order by `Total Violation` DESC
Limit 3")
df_fine <- data.frame(head(df_fine))
head(df_fine)
#Then, visit the website:
#http://www1.nyc.gov/site/finance/vehicles/services-violation-codes.page
#It lists the fines associated with different violation codes. They’re divided into two categories, one for the highest-density locations of the city, the other for the rest of the city. #For simplicity, take an average of the two.
df_fee_r <- data.frame(Fee=c(55,50,50))
df_fine <- cbind(df_fine,df_fee_r)
df_fine$`Total_Fine` <- df_fine$`Total.Violation` * df_fine$`Fee`
head(df_fine)
#Using this information, find the total amount collected for the three violation codes with maximum tickets. State the code which
# has the highest total collection.
head(df_fine[order(-df_fine$Total_Fine),],1)
############# 2017
createOrReplaceTempView(df_2017, "dfv2017")
#Find total occurrences of the three most common violation codes
df_fine <- SparkR::sql("SELECT `Violation Code`,count(*)as `Total Violation` \
from dfv2017
group by `Violation Code`
order by `Total Violation` DESC
Limit 3")
df_fine <- data.frame(head(df_fine))
head(df_fine)
#Then, visit the website:
#http://www1.nyc.gov/site/finance/vehicles/services-violation-codes.page
#It lists the fines associated with different violation codes. They’re divided into two categories, one for the highest-density locations of the city, the other for the rest of the city. #For simplicity, take an average of the two.
df_fee_r <- data.frame(Fee=c(55,50,50))
df_fine <- cbind(df_fine,df_fee_r)
df_fine$`Total_Fine` <- df_fine$`Total.Violation` * df_fine$`Fee`
head(df_fine)
#Using this information, find the total amount collected for the three violation codes with maximum tickets. State the code which
# has the highest total collection.
head(df_fine[order(-df_fine$Total_Fine),],1)
############################################################################################################################
|
/NYC Parking Ticket Case Study/NYC_case_study.R
|
no_license
|
nayakvidya/PGDDS-IIITB
|
R
| false
| false
| 45,366
|
r
|
#################### 1. Setting environment ##################################
# Load SparkR
spark_path <- '/usr/local/spark'
if (nchar(Sys.getenv("SPARK_HOME")) < 1) {
Sys.setenv(SPARK_HOME = spark_path)
}
library(SparkR, lib.loc = c(file.path(Sys.getenv("SPARK_HOME"), "R", "lib")))
sparkR.session(master = "yarn", sparkConfig = list(spark.driver.memory = "1g"))
# Before executing any hive-sql query from RStudio, you need to add a jar file in RStudio
sql("ADD JAR /opt/cloudera/parcels/CDH/lib/hive/lib/hive-hcatalog-core-1.1.0-cdh5.11.2.jar")
df_2015 <- read.df("hdfs:///common_folder/nyc_parking/Parking_Violations_Issued_-_Fiscal_Year_2015.csv", source = "csv",
inferSchema = "true", header = "true")
df_2016 <- read.df("hdfs:///common_folder/nyc_parking/Parking_Violations_Issued_-_Fiscal_Year_2016.csv", source = "csv",
inferSchema = "true", header = "true")
df_2017 <- read.df("hdfs:///common_folder/nyc_parking/Parking_Violations_Issued_-_Fiscal_Year_2017.csv", source = "csv",
inferSchema = "true", header = "true")
################################################################################
################### 2. Basic data cleaning ########################################
### 1. Remove duplicate rows
df_2015 <- dropDuplicates(df_2015)
df_2016 <- dropDuplicates(df_2016)
df_2017 <- dropDuplicates(df_2017)
## Group by Summons Number to find duplicate summons number.
## We see there is one "Summons Number" that repeats, but rows are not duplicate
## Considering it's not stated in the data and information provided that the number has to be unique, we have retained the row.
dftemp <- SparkR::summarize(groupBy(df_2015, df_2015$`Summons Number`), count = n(df_2015$`Summons Number`))
head(SparkR::filter(dftemp, dftemp$count >1))
head(SparkR:: filter(df_2015, df_2015$`Summons Number`== 1368159308))
################# 3. Examing the Data #############################################
# 1. Find the total number of tickets for each year.
#Since we have removed all duplicate rows, we simply use nrow
total_entry_2015<- nrow(df_2015)
total_entry_2015
#[1] 10951257
total_entry_2016<- nrow(df_2016)
total_entry_2016
#[1] 10626899
total_entry_2017<- nrow(df_2017)
total_entry_2017
#[1] 10803028
##################### Comparison of total number of tickets across year #####################
#Create a data frame with three years and the total number of tickets
df_three_years <- data.frame( x=c(2015,2016,2017), y = c(total_entry_2015,total_entry_2016,total_entry_2017))
#Initialize the ggplot librarby
library(ggplot2)
#Plot a bar graph comparing the tickets across the three years.
plot <- ggplot(df_three_years, aes(x =df_three_years$x, y = df_three_years$y,label=y,fill=x)) +
geom_bar(stat = "identity") +
xlab("Year") + ylab("Total number of tickets")+ scale_y_continuous(labels = function(l) {trans = l / 1000}) +
labs(x = "Year ", y = "Total number of tickets")+
geom_text(nudge_y = 1,color="white",size = 3, hjust = 0.5,vjust=1.5)+ theme_bw() + theme(legend.position = "none")
#View the plot
plot
################################################################################
#2. Find out the number of unique states from where the cars that got parking tickets came from.
##################### 2015 #################################################
### Count frequency of tickets per Registration State and arrange in decreasing order of number of tickets per each state
createOrReplaceTempView(df_2015,"dfv2015")
df_by_state_2015 <- SparkR::sql("SELECT `Registration State`, count(*) as frequency_of_tickets from dfv2015
group by `Registration State`
order by frequency_of_tickets desc")
showDF(df_by_state_2015)
### Registration state Code with maximum entry for parking tickets raised.
stateCodeMax2015 <- first(df_by_state_2015)$`Registration State`
stateCodeMax2015
### Select State code having numeric entry and store it in stateCodeNum
dftemp_2015 <- SparkR::sql("SELECT `Registration State` from dfv2015 where `Registration State` rlike '\\[0-9]\\[0-9]'")
stateCodeNum2015 <- first(dftemp_2015)$ `Registration State`
stateCodeNum2015
### Replace entries with "stateCodeNum" ( numeric entry ) with "stateCodeMax" (state with maximum entry for tickets)
### Since count has modified now, summarize and arrange now and check again
df_by_state_2015 <- withColumn(df_2015, "Registration State", ifelse(df_2015$`Registration State` == stateCodeNum2015,stateCodeMax2015,df_2015$`Registration State`))
createOrReplaceTempView(df_by_state_2015, "dfv_state_2015")
df_by_state_2015 <- SparkR::sql("SELECT `Registration State`, count(*) as frequency_of_tickets from dfv_state_2015
group by `Registration State`
order by frequency_of_tickets desc")
showDF(df_by_state_2015)
##################### 2016 #################################################
### Count frequency of tickets per Registration State and arrange in decreasing order of number of tickets per each state
createOrReplaceTempView(df_2016,"dfv2016")
df_by_state_2016 <- SparkR::sql("SELECT `Registration State`, count(*) as frequency_of_tickets from dfv2016
group by `Registration State`
order by frequency_of_tickets desc")
showDF(df_by_state_2016)
### Registration state Code with maximum entry for parking tickets raised.
stateCodeMax2016 <- first(df_by_state_2016)$`Registration State`
stateCodeMax2016
### Select State code having numeric entry and store it in stateCodeNum
dftemp_2016 <- SparkR::sql("SELECT `Registration State` from dfv2016 where `Registration State` rlike '\\[0-9]\\[0-9]'")
stateCodeNum2016 <- first(dftemp_2016)$ `Registration State`
stateCodeNum2016
### Replace entries with "stateCodeNum" ( numeric entry ) with "stateCodeMax" (state with maximum entry for tickets)
### Since count has modified now, summarize and arrange now and check again
df_by_state_2016 <- withColumn(df_2016, "Registration State", ifelse(df_2016$`Registration State` == stateCodeNum2016,stateCodeMax2016,df_2016$`Registration State`))
createOrReplaceTempView(df_by_state_2016, "dfv_state_2016")
df_by_state_2016 <- SparkR::sql("SELECT `Registration State`, count(*) as frequency_of_tickets from dfv_state_2016
group by `Registration State`
order by frequency_of_tickets desc")
showDF(df_by_state_2016)
##################### 2017 #################################################
### Count frequency of tickets per Registration State and arrange in decreasing order of number of tickets per each state
createOrReplaceTempView(df_2017,"dfv2017")
df_by_state_2017 <- SparkR::sql("SELECT `Registration State`, count(*) as frequency_of_tickets from dfv2017
group by `Registration State`
order by frequency_of_tickets desc")
showDF(df_by_state_2017)
### Registration state Code with maximum entry for parking tickets raised.
stateCodeMax2017 <- first(df_by_state_2017)$`Registration State`
stateCodeMax2017
### Select State code having numeric entry and store it in stateCodeNum
dftemp_2017 <- SparkR::sql("SELECT `Registration State` from dfv2017 where `Registration State` rlike '\\[0-9]\\[0-9]'")
stateCodeNum2017 <- first(dftemp_2017)$ `Registration State`
stateCodeNum2017
### Replace entries with "stateCodeNum" ( numeric entry ) with "stateCodeMax" (state with maximum entry for tickets)
### Since count has modified now, summarize and arrange now and check again
df_by_state_2017 <- withColumn(df_2017, "Registration State", ifelse(df_2017$`Registration State` == stateCodeNum2017,stateCodeMax2017,df_2017$`Registration State`))
createOrReplaceTempView(df_by_state_2017, "dfv_state_2017")
df_by_state_2017 <- SparkR::sql("SELECT `Registration State`, count(*) as frequency_of_tickets from dfv_state_2017
group by `Registration State`
order by frequency_of_tickets desc")
showDF(df_by_state_2017)
#################################################################################################################
#3.Some parking tickets don't have the address for violation location on them, which is a cause for concern. Write a query to check the number of such tickets.
#We assume that we are using 'Violation Location' Column for this task.
###### 2015
df_2015_no_violation_location <- SparkR::filter(df_2015, isNull(df_2015$`Violation Location`))
nrow(df_2015_no_violation_location)
###### 2016
df_2016_no_violation_location <- SparkR::filter(df_2016, isNull(df_2016$`Violation Location`))
nrow(df_2016_no_violation_location)
###### 2017
df_2017_no_violation_location <- SparkR::filter(df_2017, isNull(df_2017$`Violation Location`))
nrow(df_2017_no_violation_location)
################# 4. Aggregation Tasks #############################################
# 1. How often does each violation code occur? Display the frequency of the top five violation codes.
######### Find the Violation Code and the frequency of Violation for each year and then list the top 5
######### 2015
df_2015_violation_freq <-SparkR::summarize(groupBy(df_2015, df_2015$`Violation code`), count = n(df_2015$`Violation Code`))
df_2015_violation_freq<- arrange(df_2015_violation_freq,"count",decreasing=TRUE)
head(df_2015_violation_freq,5)
######### 2016
df_2016_violation_freq <-SparkR::summarize(groupBy(df_2016, df_2016$`Violation code`), count = n(df_2016$`Violation Code`))
df_2016_violation_freq<- arrange(df_2016_violation_freq,"count",decreasing=TRUE)
head(df_2016_violation_freq,5)
######### 2017
df_2017_violation_freq <-SparkR::summarize(groupBy(df_2017, df_2017$`Violation code`), count = n(df_2017$`Violation Code`))
df_2017_violation_freq<- arrange(df_2017_violation_freq,"count",decreasing=TRUE)
head(df_2017_violation_freq,5)
############################################################################################################################
#2.How often does each 'vehicle body type' get a parking ticket? How about the 'vehicle make'? (Hint: find the top 5 for both)
######### Similar as above, but we find the frequency of tickets for vehicle make .
######### 2015
df_2015_vehicle_body <-SparkR::summarize(groupBy(df_2015, df_2015$`Vehicle Body Type`), count = n(df_2015$`Vehicle Body Type`))
df_2015_vehicle_body<- arrange(df_2015_vehicle_body,"count",decreasing=TRUE)
head(df_2015_vehicle_body,5)
######### 2016
df_2016_vehicle_make <-SparkR::summarize(groupBy(df_2016, df_2016$`Vehicle Make`), count = n(df_2016$`Vehicle Make`))
df_2016_vehicle_make<- arrange(df_2016_vehicle_make,"count",decreasing=TRUE)
head(df_2016_vehicle_make,5)
######### 2017
df_2017_vehicle_make <-SparkR::summarize(groupBy(df_2017, df_2017$`Vehicle Make`), count = n(df_2017$`Vehicle Make`))
df_2017_vehicle_make<- arrange(df_2017_vehicle_make,"count",decreasing=TRUE)
head(df_2017_vehicle_make,5)
############################################################################################################################
#3. Find the (5 highest) frequency of tickets for each of the following:
############# 2015
# a. Violation Precinct
df_2015_violation_prec <-SparkR::summarize(groupBy(df_2015, df_2015$`Violation Precinct`), count = n(df_2015$`Violation Precinct`))
head(arrange(filter(df_2015_violation_prec,df_2015_violation_prec$`Violation Precinct` != 0),"count",decreasing=TRUE),5)
#b. Issuer Precinct
df_2015_issuer_prec <-SparkR::summarize(groupBy(df_2015, df_2015$`Issuer Precinct`), count = n(df_2015$`Issuer Precinct`))
head(arrange(filter(df_2015_issuer_prec,df_2015_issuer_prec$`Issuer Precinct` != 0),"count",decreasing=TRUE), 5)
############# 2016
# a. Violation Precinct
df_2016_violation_prec <-SparkR::summarize(groupBy(df_2016, df_2016$`Violation Precinct`), count = n(df_2016$`Violation Precinct`))
head(arrange(filter(df_2016_violation_prec,df_2016_violation_prec$`Violation Precinct` != 0),"count",decreasing=TRUE),5)
#b. Issuer Precinct
df_2016_issuer_prec <-SparkR::summarize(groupBy(df_2016, df_2016$`Issuer Precinct`), count = n(df_2016$`Issuer Precinct`))
head(arrange(filter(df_2016_issuer_prec,df_2016_issuer_prec$`Issuer Precinct` != 0),"count",decreasing=TRUE), 5)
############# 2017
# a. Violation Precinct
df_2017_violation_prec <-SparkR::summarize(groupBy(df_2017, df_2017$`Violation Precinct`), count = n(df_2017$`Violation Precinct`))
head(arrange(filter(df_2017_violation_prec,df_2017_violation_prec$`Violation Precinct` != 0),"count",decreasing=TRUE),5)
#b. Issuer Precinct
df_2017_issuer_prec <-SparkR::summarize(groupBy(df_2017, df_2017$`Issuer Precinct`), count = n(df_2017$`Issuer Precinct`))
head(arrange(filter(df_2017_issuer_prec,df_2017_issuer_prec$`Issuer Precinct` != 0),"count",decreasing=TRUE), 5)
############################################################################################################################
#4. Find the violation code frequency across three precincts which have issued the most number of tickets - do these precinct zones
#have an exceptionally high frequency of certain violation codes? Are these codes common across precincts?
### We have found the top 3 precincts which have most number of tickets in previous question ie. ( Question no: 3 above)
### We see that those precicts are 19, 18 and 14 for 2015. We use the same for solving this query.
############# 2015
createOrReplaceTempView(df_2015, "dfv_2015")
df_viloation_code_top_precincts <-
SparkR::sql("select `Violation Precinct`,`Violation Code`, count(*) as frequency from dfv_2015
where `Violation Precinct` = 19
group by `Violation Precinct` ,`Violation Code`
union all
Select `Violation Precinct` ,`Violation Code`, count(*) as frequency from dfv_2015
where `Violation Precinct` = 18
group by `Violation Precinct` ,`Violation Code`
union all
select `Violation Precinct` ,`Violation Code`, count(*) as frequency from dfv_2015
where `Violation Precinct` = 14
group by `Violation Precinct` ,`Violation Code`
order by `Violation Precinct`, frequency desc ")
createOrReplaceTempView(df_viloation_code_top_precincts,"df_viloation_code_top_precincts_2015")
df_viloation_code_top_precincts <- SparkR::sql("select `Violation Precinct` ,`Violation Code`, frequency, ROW_NUMBER() OVER (PARTITION BY `Violation Precinct` order by `Violation Precinct` asc, frequency desc) as rank
from df_viloation_code_top_precincts_2015
having rank <=5 ")
showDF(df_viloation_code_top_precincts)
head(df_viloation_code_top_precincts,15)
############# 2016
createOrReplaceTempView(df_2016, "dfv_2016")
df_viloation_code_top_precincts <-
SparkR::sql("select `Violation Precinct`,`Violation Code`, count(*) as frequency from dfv_2016
where `Violation Precinct` = 19
group by `Violation Precinct` ,`Violation Code`
union all
Select `Violation Precinct` ,`Violation Code`, count(*) as frequency from dfv_2016
where `Violation Precinct` = 18
group by `Violation Precinct` ,`Violation Code`
union all
select `Violation Precinct` ,`Violation Code`, count(*) as frequency from dfv_2016
where `Violation Precinct` = 14
group by `Violation Precinct` ,`Violation Code`
order by `Violation Precinct`, frequency desc ")
createOrReplaceTempView(df_viloation_code_top_precincts,"df_viloation_code_top_precincts_2016")
df_viloation_code_top_precincts <- SparkR::sql("select `Violation Precinct` ,`Violation Code`, frequency, ROW_NUMBER() OVER (PARTITION BY `Violation Precinct` order by `Violation Precinct` asc, frequency desc) as rank
from df_viloation_code_top_precincts_2016
having rank <=5 ")
showDF(df_viloation_code_top_precincts)
head(df_viloation_code_top_precincts,15)
############# 2017
createOrReplaceTempView(df_2017, "dfv_2017")
df_viloation_code_top_precincts <-
SparkR::sql("select `Violation Precinct`,`Violation Code`, count(*) as frequency from dfv_2017
where `Violation Precinct` = 19
group by `Violation Precinct` ,`Violation Code`
union all
Select `Violation Precinct` ,`Violation Code`, count(*) as frequency from dfv_2017
where `Violation Precinct` = 14
group by `Violation Precinct` ,`Violation Code`
union all
select `Violation Precinct` ,`Violation Code`, count(*) as frequency from dfv_2017
where `Violation Precinct` = 1
group by `Violation Precinct` ,`Violation Code`
order by `Violation Precinct`, frequency desc ")
createOrReplaceTempView(df_viloation_code_top_precincts,"df_viloation_code_top_precincts_2017")
df_viloation_code_top_precincts <- SparkR::sql("select `Violation Precinct` ,`Violation Code`, frequency, ROW_NUMBER() OVER (PARTITION BY `Violation Precinct` order by `Violation Precinct` asc, frequency desc) as rank
from df_viloation_code_top_precincts_2017
having rank <=5 ")
showDF(df_viloation_code_top_precincts)
head(df_viloation_code_top_precincts,15)
############################################################################################################################
#5.You’d want to find out the properties of parking violations across different times of the day:
############# 2015
#Find a way to deal with missing values, if any.
#Hint: Check for the null values using 'isNull' under the SQL. Also, to remove the null values, check the 'dropna' command in the API documentation.
# Filter and included only the non null values for this analysis
df_2015_violation_time <- SparkR::filter(df_2015, isNotNull(df_2015$`Violation Time`))
#The Violation Time field is specified in a strange format. Find a way to make this into a time attribute that you can use to divide into groups.
#Divide 24 hours into six equal discrete bins of time. The intervals you choose are at your discretion. For each of these groups, find the three most
#commonly occurring violations.
#Hint: Use the CASE-WHEN in SQL view to segregate into bins.
# Split the Violation Time column Ex: 0945A to be interpreted as 9:45 AM
# Hence, we split the string into hour as 09 and ignore the minutes
# We use the last column 'A' or 'P' to convert time into 24 hour format
# We add 00 to the time if the last column is A( refering to AM)
# We add 12 to the time if the last column is P( refering to PM)
#Consider the hour column
df_2015_violation_time$hour<- substr(df_2015_violation_time$`Violation Time`,1,2)
#EXtract the AM or PM information
df_2015_violation_time$AMPM <- substr(df_2015_violation_time$`Violation Time`,5,5)
#Convert AM to 0 and PM to 12
df_2015_violation_time$AMPM <- regexp_replace(df_2015_violation_time$AMPM,'A','0')
df_2015_violation_time$AMPM <- regexp_replace(df_2015_violation_time$AMPM,'P','12')
#Convert time to 24 hour format
df_2015_violation_time$hour <- df_2015_violation_time$hour+df_2015_violation_time$AMPM
createOrReplaceTempView(df_2015_violation_time, "dfv_2015")
#Create 6 different bins for time of hour and group the data
#Primary Group - Time Of Day , Secondary Group - Violation Code
#Arrange in the descending order of total violation records per time of day per violation code
bins <- sql("SELECT `Violation Code`,count(`Violation Code`)as `Total Violation`, \
CASE WHEN ( hour >=0 and hour <= 3 ) THEN 1\
WHEN (hour > 3 and hour <= 7) THEN 2\
WHEN (hour > 8 and hour <= 12) THEN 3\
WHEN (hour > 12 and hour <= 16) THEN 4\
WHEN (hour > 16 and hour <= 20) THEN 5\
ELSE 6 END as timeOfDay FROM dfv_2015
group by timeOfDay,`Violation Code`
order by timeOfDay ASC ,`Total Violation` DESC ")
head(bins)
createOrReplaceTempView(bins, "dfv_2015_Total_Violation")
#For finding the most commonly occurring violations, a similar approach can be used as mention in the hint for question 4.
#Find the top 3 violation codes for each time of the day
df_top_3_violation_for_each_time_of_day_2015 <- sql("(select * from dfv_2015_Total_Violation where timeOfDay = 1 order by `Total Violation` DESC limit 3)
union all
(select * from dfv_2015_Total_Violation where timeOfDay = 2 order by `Total Violation` DESC limit 3)
union all
(select * from dfv_2015_Total_Violation where timeOfDay = 3 order by `Total Violation` DESC limit 3)
union all
(select * from dfv_2015_Total_Violation where timeOfDay = 4 order by `Total Violation` DESC limit 3)
union all
(select * from dfv_2015_Total_Violation where timeOfDay = 5 order by `Total Violation` DESC limit 3)
union all
(select * from dfv_2015_Total_Violation where timeOfDay = 6 order by `Total Violation` DESC limit 3)")
df_top_3_violation_for_each_time_of_day_2015$timeOfDay <- regexp_replace(df_top_3_violation_for_each_time_of_day_2015$timeOfDay,"1",'Midnight')
df_top_3_violation_for_each_time_of_day_2015$timeOfDay <- regexp_replace(df_top_3_violation_for_each_time_of_day_2015$timeOfDay,"2",'Earlymorning')
df_top_3_violation_for_each_time_of_day_2015$timeOfDay <- regexp_replace(df_top_3_violation_for_each_time_of_day_2015$timeOfDay,"3",'Morning')
df_top_3_violation_for_each_time_of_day_2015$timeOfDay <- regexp_replace(df_top_3_violation_for_each_time_of_day_2015$timeOfDay,"4",'Afternoon')
df_top_3_violation_for_each_time_of_day_2015$timeOfDay <- regexp_replace(df_top_3_violation_for_each_time_of_day_2015$timeOfDay,"5",'Evening')
df_top_3_violation_for_each_time_of_day_2015$timeOfDay <- regexp_replace(df_top_3_violation_for_each_time_of_day_2015$timeOfDay,"6",'Night')
head(df_top_3_violation_for_each_time_of_day_2015)
#Find time of the day for top three violation codes
bins <- sql("SELECT distinct `Violation Code`,count(`Violation Code`)as `Total Violation`, \
CASE WHEN ( hour >=0 and hour <= 3 ) THEN 1\
WHEN (hour > 3 and hour <= 7) THEN 2\
WHEN (hour > 8 and hour <= 12) THEN 3\
WHEN (hour > 12 and hour <= 16) THEN 4\
WHEN (hour > 16 and hour <= 20) THEN 5\
ELSE 6 END as timeOfDay FROM dfv_2015
group by `Violation Code`,timeOfDay
order by `Total Violation` DESC Limit 3 ")
bins$timeOfDay <- regexp_replace(bins$timeOfDay,"1",'Midnight')
bins$timeOfDay <- regexp_replace(bins$timeOfDay,"2",'Earlymorning')
bins$timeOfDay <- regexp_replace(bins$timeOfDay,"3",'Morning')
bins$timeOfDay <- regexp_replace(bins$timeOfDay,"4",'Afternoon')
bins$timeOfDay <- regexp_replace(bins$timeOfDay,"5",'Evening')
bins$timeOfDay <- regexp_replace(bins$timeOfDay,"6",'Night')
showDF(bins)
############# 2016
#Find a way to deal with missing values, if any.
#Hint: Check for the null values using 'isNull' under the SQL. Also, to remove the null values, check the 'dropna' command in the API documentation.
# Filter and included only the non null values for this analysis
df_2016_violation_time <- SparkR::filter(df_2016, isNotNull(df_2016$`Violation Time`))
#The Violation Time field is specified in a strange format. Find a way to make this into a time attribute that you can use to divide into groups.
#Divide 24 hours into six equal discrete bins of time. The intervals you choose are at your discretion. For each of these groups, find the three most
#commonly occurring violations.
#Hint: Use the CASE-WHEN in SQL view to segregate into bins.
# Split the Violation Time column Ex: 0945A to be interpreted as 9:45 AM
# Hence, we split the string into hour as 09 and ignore the minutes
# We use the last column 'A' or 'P' to convert time into 24 hour format
# We add 00 to the time if the last column is A( refering to AM)
# We add 12 to the time if the last column is P( refering to PM)
#Consider the hour column
df_2016_violation_time$hour<- substr(df_2016_violation_time$`Violation Time`,1,2)
#EXtract the AM or PM information
df_2016_violation_time$AMPM <- substr(df_2016_violation_time$`Violation Time`,5,5)
#Convert AM to 0 and PM to 12
df_2016_violation_time$AMPM <- regexp_replace(df_2016_violation_time$AMPM,'A','0')
df_2016_violation_time$AMPM <- regexp_replace(df_2016_violation_time$AMPM,'P','12')
#Convert time to 24 hour format
df_2016_violation_time$hour <- df_2016_violation_time$hour+df_2016_violation_time$AMPM
createOrReplaceTempView(df_2016_violation_time, "dfv_2016")
#Create 6 different bins for time of hour and group the data
#Primary Group - Time Of Day , Secondary Group - Violation Code
#Arrange in the descending order of total violation records per time of day per violation code
bins <- sql("SELECT `Violation Code`,count(`Violation Code`)as `Total Violation`, \
CASE WHEN ( hour >=0 and hour <= 3 ) THEN 1\
WHEN (hour > 3 and hour <= 7) THEN 2\
WHEN (hour > 8 and hour <= 12) THEN 3\
WHEN (hour > 12 and hour <= 16) THEN 4\
WHEN (hour > 16 and hour <= 20) THEN 5\
ELSE 6 END as timeOfDay FROM dfv_2016
group by timeOfDay,`Violation Code`
order by timeOfDay ASC ,`Total Violation` DESC ")
head(bins)
createOrReplaceTempView(bins, "dfv_2016_Total_Violation")
#For finding the most commonly occurring violations, a similar approach can be used as mention in the hint for question 4.
#Find the top 3 violation codes for each time of the day
df_top_3_violation_for_each_time_of_day_2016 <- sql("(select * from dfv_2016_Total_Violation where timeOfDay = 1 order by `Total Violation` DESC limit 3)
union all
(select * from dfv_2016_Total_Violation where timeOfDay = 2 order by `Total Violation` DESC limit 3)
union all
(select * from dfv_2016_Total_Violation where timeOfDay = 3 order by `Total Violation` DESC limit 3)
union all
(select * from dfv_2016_Total_Violation where timeOfDay = 4 order by `Total Violation` DESC limit 3)
union all
(select * from dfv_2016_Total_Violation where timeOfDay = 5 order by `Total Violation` DESC limit 3)
union all
(select * from dfv_2015_Total_Violation where timeOfDay = 6 order by `Total Violation` DESC limit 3)")
df_top_3_violation_for_each_time_of_day_2016$timeOfDay <- regexp_replace(df_top_3_violation_for_each_time_of_day_2016$timeOfDay,"1",'Midnight')
df_top_3_violation_for_each_time_of_day_2016$timeOfDay <- regexp_replace(df_top_3_violation_for_each_time_of_day_2016$timeOfDay,"2",'Earlymorning')
df_top_3_violation_for_each_time_of_day_2016$timeOfDay <- regexp_replace(df_top_3_violation_for_each_time_of_day_2016$timeOfDay,"3",'Morning')
df_top_3_violation_for_each_time_of_day_2016$timeOfDay <- regexp_replace(df_top_3_violation_for_each_time_of_day_2016$timeOfDay,"4",'Afternoon')
df_top_3_violation_for_each_time_of_day_2016$timeOfDay <- regexp_replace(df_top_3_violation_for_each_time_of_day_2016$timeOfDay,"5",'Evening')
df_top_3_violation_for_each_time_of_day_2016$timeOfDay <- regexp_replace(df_top_3_violation_for_each_time_of_day_2016$timeOfDay,"6",'Night')
head(df_top_3_violation_for_each_time_of_day_2016)
#Find time of the day for top three violation codes
bins <- sql("SELECT distinct `Violation Code`,count(`Violation Code`)as `Total Violation`, \
CASE WHEN ( hour >=0 and hour <= 3 ) THEN 1\
WHEN (hour > 3 and hour <= 7) THEN 2\
WHEN (hour > 8 and hour <= 12) THEN 3\
WHEN (hour > 12 and hour <= 16) THEN 4\
WHEN (hour > 16 and hour <= 20) THEN 5\
ELSE 6 END as timeOfDay FROM dfv_2016
group by `Violation Code`,timeOfDay
order by `Total Violation` DESC Limit 3 ")
bins$timeOfDay <- regexp_replace(bins$timeOfDay,"1",'Midnight')
bins$timeOfDay <- regexp_replace(bins$timeOfDay,"2",'Earlymorning')
bins$timeOfDay <- regexp_replace(bins$timeOfDay,"3",'Morning')
bins$timeOfDay <- regexp_replace(bins$timeOfDay,"4",'Afternoon')
bins$timeOfDay <- regexp_replace(bins$timeOfDay,"5",'Evening')
bins$timeOfDay <- regexp_replace(bins$timeOfDay,"6",'Night')
showDF(bins)
############# 2017
#Find a way to deal with missing values, if any.
#Hint: Check for the null values using 'isNull' under the SQL. Also, to remove the null values, check the 'dropna' command in the API documentation.
# Filter and included only the non null values for this analysis
df_2017_violation_time <- SparkR::filter(df_2017, isNotNull(df_2017$`Violation Time`))
#The Violation Time field is specified in a strange format. Find a way to make this into a time attribute that you can use to divide into groups.
#Divide 24 hours into six equal discrete bins of time. The intervals you choose are at your discretion. For each of these groups, find the three most
#commonly occurring violations.
#Hint: Use the CASE-WHEN in SQL view to segregate into bins.
# Split the Violation Time column Ex: 0945A to be interpreted as 9:45 AM
# Hence, we split the string into hour as 09 and ignore the minutes
# We use the last column 'A' or 'P' to convert time into 24 hour format
# We add 00 to the time if the last column is A( refering to AM)
# We add 12 to the time if the last column is P( refering to PM)
#Consider the hour column
df_2017_violation_time$hour<- substr(df_2017_violation_time$`Violation Time`,1,2)
#EXtract the AM or PM information
df_2017_violation_time$AMPM <- substr(df_2017_violation_time$`Violation Time`,5,5)
#Convert AM to 0 and PM to 12
df_2017_violation_time$AMPM <- regexp_replace(df_2017_violation_time$AMPM,'A','0')
df_2017_violation_time$AMPM <- regexp_replace(df_2017_violation_time$AMPM,'P','12')
#Convert time to 24 hour format
df_2017_violation_time$hour <- df_2017_violation_time$hour+df_2017_violation_time$AMPM
createOrReplaceTempView(df_2017_violation_time, "dfv_2017")
#Create 6 different bins for time of hour and group the data
#Primary Group - Time Of Day , Secondary Group - Violation Code
#Arrange in the descending order of total violation records per time of day per violation code
bins <- sql("SELECT `Violation Code`,count(`Violation Code`)as `Total Violation`, \
CASE WHEN ( hour >=0 and hour <= 3 ) THEN 1\
WHEN (hour > 3 and hour <= 7) THEN 2\
WHEN (hour > 8 and hour <= 12) THEN 3\
WHEN (hour > 12 and hour <= 16) THEN 4\
WHEN (hour > 16 and hour <= 20) THEN 5\
ELSE 6 END as timeOfDay FROM dfv_2017
group by timeOfDay,`Violation Code`
order by timeOfDay ASC ,`Total Violation` DESC ")
head(bins)
createOrReplaceTempView(bins, "dfv_2017_Total_Violation")
#For finding the most commonly occurring violations, a similar approach can be used as mention in the hint for question 4.
#Find the top 3 violation codes for each time of the day
df_top_3_violation_for_each_time_of_day_2017 <- sql("(select * from dfv_2017_Total_Violation where timeOfDay = 1 order by `Total Violation` DESC limit 3)
union all
(select * from dfv_2017_Total_Violation where timeOfDay = 2 order by `Total Violation` DESC limit 3)
union all
(select * from dfv_2017_Total_Violation where timeOfDay = 3 order by `Total Violation` DESC limit 3)
union all
(select * from dfv_2017_Total_Violation where timeOfDay = 4 order by `Total Violation` DESC limit 3)
union all
(select * from dfv_2017_Total_Violation where timeOfDay = 5 order by `Total Violation` DESC limit 3)
union all
(select * from dfv_2015_Total_Violation where timeOfDay = 6 order by `Total Violation` DESC limit 3)")
df_top_3_violation_for_each_time_of_day_2017$timeOfDay <- regexp_replace(df_top_3_violation_for_each_time_of_day_2017$timeOfDay,"1",'Midnight')
df_top_3_violation_for_each_time_of_day_2017$timeOfDay <- regexp_replace(df_top_3_violation_for_each_time_of_day_2017$timeOfDay,"2",'Earlymorning')
df_top_3_violation_for_each_time_of_day_2017$timeOfDay <- regexp_replace(df_top_3_violation_for_each_time_of_day_2017$timeOfDay,"3",'Morning')
df_top_3_violation_for_each_time_of_day_2017$timeOfDay <- regexp_replace(df_top_3_violation_for_each_time_of_day_2017$timeOfDay,"4",'Afternoon')
df_top_3_violation_for_each_time_of_day_2017$timeOfDay <- regexp_replace(df_top_3_violation_for_each_time_of_day_2017$timeOfDay,"5",'Evening')
df_top_3_violation_for_each_time_of_day_2017$timeOfDay <- regexp_replace(df_top_3_violation_for_each_time_of_day_2017$timeOfDay,"6",'Night')
head(df_top_3_violation_for_each_time_of_day_2017)
#Find time of the day for top three violation codes
bins <- sql("SELECT distinct `Violation Code`,count(`Violation Code`)as `Total Violation`, \
CASE WHEN ( hour >=0 and hour <= 3 ) THEN 1\
WHEN (hour > 3 and hour <= 7) THEN 2\
WHEN (hour > 8 and hour <= 12) THEN 3\
WHEN (hour > 12 and hour <= 16) THEN 4\
WHEN (hour > 16 and hour <= 20) THEN 5\
ELSE 6 END as timeOfDay FROM dfv_2017
group by `Violation Code`,timeOfDay
order by `Total Violation` DESC Limit 3 ")
bins$timeOfDay <- regexp_replace(bins$timeOfDay,"1",'Midnight')
bins$timeOfDay <- regexp_replace(bins$timeOfDay,"2",'Earlymorning')
bins$timeOfDay <- regexp_replace(bins$timeOfDay,"3",'Morning')
bins$timeOfDay <- regexp_replace(bins$timeOfDay,"4",'Afternoon')
bins$timeOfDay <- regexp_replace(bins$timeOfDay,"5",'Evening')
bins$timeOfDay <- regexp_replace(bins$timeOfDay,"6",'Night')
showDF(bins)
############################################################################################################################
#6.First, divide the year into some number of seasons, and
## 1. Find frequencies of tickets for each season. (Hint: Use Issue Date to segregate into seasons)
## 2. Then, find the three most common violations for each of these seasons.
############# 2015
df_2015$`Issue Month` <- month(SparkR::to_date(df_2015$`Issue Date`,format ="MM/dd/yyyy"))
createOrReplaceTempView(df_2015, "dfv2015")
df_segregated_seasons_2015 <- SparkR::sql("SELECT
CASE WHEN(`Issue Month`>=4 and `Issue Month` <= 6 ) THEN 1 \
WHEN ( `Issue Month` >=7 and `Issue Month` <= 9) THEN 2 \
WHEN ( `Issue Month` >=10 and `Issue Month` <= 12 ) THEN 3 \
ELSE 4 END as season, `Violation Code`,count(`Violation Code`)as `Total Violation` FROM dfv2015
group by season,`Violation Code`
order by season asc, `Total Violation` desc")
createOrReplaceTempView(df_segregated_seasons_2015, "binv2015")
df_top_violation_perseason_2015 <- SparkR::sql("select season, `Violation Code`,`Total Violation`, ROW_NUMBER() OVER (PARTITION BY season order by season asc,`Total Violation` desc) as rank
from binv2015
having rank <=3")
cast(df_top_violation_perseason_2015$season, "string")
df_top_violation_perseason_2015$season <- regexp_replace(df_top_violation_perseason_2015$season,"1",'Summer')
df_top_violation_perseason_2015$season <- regexp_replace(df_top_violation_perseason_2015$season,"2",'Rainy')
df_top_violation_perseason_2015$season <- regexp_replace(df_top_violation_perseason_2015$season,"3",'Autumn')
df_top_violation_perseason_2015$season <- regexp_replace(df_top_violation_perseason_2015$season,"4",'Winter')
showDF(df_top_violation_perseason_2015)
############# 2016
df_2016$`Issue Month` <- month(SparkR::to_date(df_2016$`Issue Date`,format ="MM/dd/yyyy"))
createOrReplaceTempView(df_2016, "dfv2016")
df_segregated_seasons_2016 <- SparkR::sql("SELECT
CASE WHEN(`Issue Month`>=4 and `Issue Month` <= 6 ) THEN 1 \
WHEN ( `Issue Month` >=7 and `Issue Month` <= 9) THEN 2 \
WHEN ( `Issue Month` >=10 and `Issue Month` <= 12 ) THEN 3 \
ELSE 4 END as season, `Violation Code`,count(`Violation Code`)as `Total Violation` FROM dfv2016
group by season,`Violation Code`
order by season asc, `Total Violation` desc")
#Then, find the three most common violations for each of these seasons.
#(Hint: A similar approach can be used as mention in the hint for question 4.)
createOrReplaceTempView(df_segregated_seasons_2016, "binv2016")
df_top_violation_perseason_2016 <- SparkR::sql("select season, `Violation Code`,`Total Violation`, ROW_NUMBER() OVER (PARTITION BY season order by season asc,`Total Violation` desc) as rank
from binv2016
having rank <=3")
cast(df_top_violation_perseason_2016$season, "string")
df_top_violation_perseason_2016$season <- regexp_replace(df_top_violation_perseason_2016$season,"1",'Summer')
df_top_violation_perseason_2016$season <- regexp_replace(df_top_violation_perseason_2016$season,"2",'Rainy')
df_top_violation_perseason_2016$season <- regexp_replace(df_top_violation_perseason_2016$season,"3",'Autumn')
df_top_violation_perseason_2016$season <- regexp_replace(df_top_violation_perseason_2016$season,"4",'Winter')
showDF(df_top_violation_perseason_2016)
############# 2017
df_2017$`Issue Month` <- month(SparkR::to_date(df_2017$`Issue Date`,format ="MM/dd/yyyy"))
createOrReplaceTempView(df_2017, "dfv2017")
df_segregated_seasons_2017 <- SparkR::sql("SELECT
CASE WHEN(`Issue Month`>=4 and `Issue Month` <= 6 ) THEN 1 \
WHEN ( `Issue Month` >=7 and `Issue Month` <= 9) THEN 2 \
WHEN ( `Issue Month` >=10 and `Issue Month` <= 12 ) THEN 3 \
ELSE 4 END as season, `Violation Code`,count(`Violation Code`)as `Total Violation` FROM dfv2017
group by season,`Violation Code`
order by season asc, `Total Violation` desc")
#Then, find the three most common violations for each of these seasons.
#(Hint: A similar approach can be used as mention in the hint for question 4.)
createOrReplaceTempView(df_segregated_seasons_2017, "binv2017")
df_top_violation_perseason_2017 <- SparkR::sql("select season, `Violation Code`,`Total Violation`, ROW_NUMBER() OVER (PARTITION BY season order by season asc,`Total Violation` desc) as rank
from binv2017
having rank <=3")
cast(df_top_violation_perseason_2017$season, "string")
df_top_violation_perseason_2017$season <- regexp_replace(df_top_violation_perseason_2017$season,"1",'Summer')
df_top_violation_perseason_2017$season <- regexp_replace(df_top_violation_perseason_2017$season,"2",'Rainy')
df_top_violation_perseason_2017$season <- regexp_replace(df_top_violation_perseason_2017$season,"3",'Autumn')
df_top_violation_perseason_2017$season <- regexp_replace(df_top_violation_perseason_2017$season,"4",'Winter')
showDF(df_top_violation_perseason_2017)
############################################################################################################################
#7.The fines collected from all the parking violation constitute a revenue source for the NYC police department. Let’s take an example of estimating that for the
#three most commonly occurring codes.
############# 2015
createOrReplaceTempView(df_2015, "dfv2015")
#Find total occurrences of the three most common violation codes
df_fine <- SparkR::sql("SELECT `Violation Code`,count(*)as `Total Violation` \
from dfv2015
group by `Violation Code`
order by `Total Violation` DESC
Limit 3")
df_fine <- data.frame(head(df_fine))
head(df_fine)
#Then, visit the website:
#http://www1.nyc.gov/site/finance/vehicles/services-violation-codes.page
#It lists the fines associated with different violation codes. They’re divided into two categories, one for the highest-density locations of the city, the other for the rest of the city. #For simplicity, take an average of the two.
df_fee_r <- data.frame(Fee=c(55,50,115))
df_fine <- cbind(df_fine,df_fee_r)
df_fine$`Total_Fine` <- df_fine$`Total.Violation` * df_fine$`Fee`
head(df_fine)
#Using this information, find the total amount collected for the three violation codes with maximum tickets. State the code which
# has the highest total collection.
head(df_fine[order(-df_fine$Total_Fine),],1)
############# 2016
createOrReplaceTempView(df_2016, "dfv2016")
#Find total occurrences of the three most common violation codes
df_fine <- SparkR::sql("SELECT `Violation Code`,count(*)as `Total Violation` \
from dfv2016
group by `Violation Code`
order by `Total Violation` DESC
Limit 3")
df_fine <- data.frame(head(df_fine))
head(df_fine)
#Then, visit the website:
#http://www1.nyc.gov/site/finance/vehicles/services-violation-codes.page
#It lists the fines associated with different violation codes. They’re divided into two categories, one for the highest-density locations of the city, the other for the rest of the city. #For simplicity, take an average of the two.
df_fee_r <- data.frame(Fee=c(55,50,50))
df_fine <- cbind(df_fine,df_fee_r)
df_fine$`Total_Fine` <- df_fine$`Total.Violation` * df_fine$`Fee`
head(df_fine)
#Using this information, find the total amount collected for the three violation codes with maximum tickets. State the code which
# has the highest total collection.
head(df_fine[order(-df_fine$Total_Fine),],1)
############# 2017
createOrReplaceTempView(df_2017, "dfv2017")
#Find total occurrences of the three most common violation codes
df_fine <- SparkR::sql("SELECT `Violation Code`,count(*)as `Total Violation` \
from dfv2017
group by `Violation Code`
order by `Total Violation` DESC
Limit 3")
df_fine <- data.frame(head(df_fine))
head(df_fine)
#Then, visit the website:
#http://www1.nyc.gov/site/finance/vehicles/services-violation-codes.page
#It lists the fines associated with different violation codes. They’re divided into two categories, one for the highest-density locations of the city, the other for the rest of the city. #For simplicity, take an average of the two.
df_fee_r <- data.frame(Fee=c(55,50,50))
df_fine <- cbind(df_fine,df_fee_r)
df_fine$`Total_Fine` <- df_fine$`Total.Violation` * df_fine$`Fee`
head(df_fine)
#Using this information, find the total amount collected for the three violation codes with maximum tickets. State the code which
# has the highest total collection.
head(df_fine[order(-df_fine$Total_Fine),],1)
############################################################################################################################
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/test.R
\name{test}
\alias{test}
\title{Bootstrap based test for covariate selection}
\usage{
test(x, y, method = "lm", family = "gaussian", nboot = 50,
speedup = TRUE, qmin = NULL, unique = FALSE, q = NULL,
bootseed = NULL, cluster = TRUE, ncores = NULL)
}
\arguments{
\item{x}{A data frame containing all the covariates.}
\item{y}{A vector with the response values.}
\item{method}{A character string specifying which regression method is used,
i.e., linear models (\code{"lm"}), generalized additive models.}
\item{family}{A description of the error distribution and link function to be
used in the model: (\code{"gaussian"}), (\code{"binomial"}) or
(\code{"poisson"}).}
\item{nboot}{Number of bootstrap repeats.}
\item{speedup}{A logical value. If \code{TRUE} (default), the testing procedure
is computationally efficient since it considers one more variable to fit
the alternative model than the number of variables used to fit the null.
If \code{FALSE}, the fit of the alternative model is based on considering
the best subset of variables of size greater than \code{q}, the one that minimizes an
information criterion. The size of this subset must be given by the user
filling the argument \code{qmin}.}
\item{qmin}{By default \code{NULL}. If \code{speedup} is \code{FALSE},
\code{qmin} is an integer number selected by the user. To help you select
this argument, it is recommended to visualize the graphical output of the
\code{plot} function and choose the number \code{q} which minimizes the curve.}
\item{unique}{A logical value. By default \code{FALSE}. If \code{TRUE},
the test is performed only for one null hypothesis, given by the argument \code{q}.}
\item{q}{By default \code{NULL}. If \code{unique} is \code{TRUE}, \code{q}
is the size of the subset of variables to be tested.}
\item{bootseed}{Seed to be used in the bootstrap procedure.}
\item{cluster}{A logical value. If \code{TRUE} (default), the testing
procedure is parallelized.}
\item{ncores}{An integer value specifying the number of cores to be used
in the parallelized procedure. If \code{NULL} (default), the number of cores to be used
is equal to the number of cores of the machine - 1.}
}
\value{
A list with two objects. The first one is a table containing
\item{Hypothesis}{Number of the null hypothesis tested}
\item{Statistic}{Value of the T statistic}
\item{pvalue}{pvalue obtained in the testing procedure}
\item{Decision}{Result of the test for a significance level of 0.05}
The second argument \code{nvar} indicates the number of variables that
have to be included in the model.
}
\description{
Function that applies a bootstrap based test for covariate
selection. It helps to determine the number of variables to be included in
the model.
}
\details{
In a regression framework, let \eqn{X_1, X_2, \ldots, X_p}, a set of
\eqn{p} initial variables and \eqn{Y} the response variable, we propose a
procedure to test the null hypothesis of \eqn{q} significant variables in
the model --\eqn{q} effects not equal to zero-- versus the alternative in
which the model contains more than \eqn{q} variables. Based on the general
model \deqn{Y=m(\textbf{X})+\varepsilon \quad {\rm{where}} \quad
m(\textbf{X})= m_{1}(X_{1})+m_{2}(X_{2})+\ldots+m_{p}(X_{p})} the following
strategy is considered: for a subset of size \eqn{q}, considerations will be
given to a test for the null hypothesis \deqn{H_{0} (q): \sum_{j=1}^p
I_{\{m_j \ne 0\}} \le q} vs. the general hypothesis \deqn{H_{1} :
\sum_{j=1}^p I_{\{m_j \ne 0\}} > q}
}
\note{
The detailed expression of the formulas are described in HTML help
\url{http://cran.r-project.org/web/packages/FWDselect/FWDselect.pdf}
}
\examples{
library(FWDselect)
data(diabetes)
x = diabetes[ ,2:11]
y = diabetes[ ,1]
test(x, y, method = "lm", cluster = FALSE, nboot = 5)
## for speedup = FALSE
# obj2 = qselection(x, y, qvector = c(1:9), method = "lm",
# cluster = FALSE)
# plot(obj2) # we choose q = 7 for the argument qmin
# test(x, y, method = "lm", cluster = FALSE, nboot = 5,
# speedup = FALSE, qmin = 7)
}
\author{
Marta Sestelo, Nora M. Villanueva and Javier Roca-Pardinas.
}
\references{
Sestelo, M., Villanueva, N. M. and Roca-Pardinas, J. (2013).
FWDselect: an R package for selecting variables in regression models.
Discussion Papers in Statistics and Operation Research, University of Vigo, 13/01.
}
\seealso{
\code{\link{selection}}
}
|
/man/test.Rd
|
no_license
|
sestelo/fwdselect
|
R
| false
| true
| 4,481
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/test.R
\name{test}
\alias{test}
\title{Bootstrap based test for covariate selection}
\usage{
test(x, y, method = "lm", family = "gaussian", nboot = 50,
speedup = TRUE, qmin = NULL, unique = FALSE, q = NULL,
bootseed = NULL, cluster = TRUE, ncores = NULL)
}
\arguments{
\item{x}{A data frame containing all the covariates.}
\item{y}{A vector with the response values.}
\item{method}{A character string specifying which regression method is used,
i.e., linear models (\code{"lm"}), generalized additive models.}
\item{family}{A description of the error distribution and link function to be
used in the model: (\code{"gaussian"}), (\code{"binomial"}) or
(\code{"poisson"}).}
\item{nboot}{Number of bootstrap repeats.}
\item{speedup}{A logical value. If \code{TRUE} (default), the testing procedure
is computationally efficient since it considers one more variable to fit
the alternative model than the number of variables used to fit the null.
If \code{FALSE}, the fit of the alternative model is based on considering
the best subset of variables of size greater than \code{q}, the one that minimizes an
information criterion. The size of this subset must be given by the user
filling the argument \code{qmin}.}
\item{qmin}{By default \code{NULL}. If \code{speedup} is \code{FALSE},
\code{qmin} is an integer number selected by the user. To help you select
this argument, it is recommended to visualize the graphical output of the
\code{plot} function and choose the number \code{q} which minimizes the curve.}
\item{unique}{A logical value. By default \code{FALSE}. If \code{TRUE},
the test is performed only for one null hypothesis, given by the argument \code{q}.}
\item{q}{By default \code{NULL}. If \code{unique} is \code{TRUE}, \code{q}
is the size of the subset of variables to be tested.}
\item{bootseed}{Seed to be used in the bootstrap procedure.}
\item{cluster}{A logical value. If \code{TRUE} (default), the testing
procedure is parallelized.}
\item{ncores}{An integer value specifying the number of cores to be used
in the parallelized procedure. If \code{NULL} (default), the number of cores to be used
is equal to the number of cores of the machine - 1.}
}
\value{
A list with two objects. The first one is a table containing
\item{Hypothesis}{Number of the null hypothesis tested}
\item{Statistic}{Value of the T statistic}
\item{pvalue}{pvalue obtained in the testing procedure}
\item{Decision}{Result of the test for a significance level of 0.05}
The second argument \code{nvar} indicates the number of variables that
have to be included in the model.
}
\description{
Function that applies a bootstrap based test for covariate
selection. It helps to determine the number of variables to be included in
the model.
}
\details{
In a regression framework, let \eqn{X_1, X_2, \ldots, X_p}, a set of
\eqn{p} initial variables and \eqn{Y} the response variable, we propose a
procedure to test the null hypothesis of \eqn{q} significant variables in
the model --\eqn{q} effects not equal to zero-- versus the alternative in
which the model contains more than \eqn{q} variables. Based on the general
model \deqn{Y=m(\textbf{X})+\varepsilon \quad {\rm{where}} \quad
m(\textbf{X})= m_{1}(X_{1})+m_{2}(X_{2})+\ldots+m_{p}(X_{p})} the following
strategy is considered: for a subset of size \eqn{q}, considerations will be
given to a test for the null hypothesis \deqn{H_{0} (q): \sum_{j=1}^p
I_{\{m_j \ne 0\}} \le q} vs. the general hypothesis \deqn{H_{1} :
\sum_{j=1}^p I_{\{m_j \ne 0\}} > q}
}
\note{
The detailed expression of the formulas are described in HTML help
\url{http://cran.r-project.org/web/packages/FWDselect/FWDselect.pdf}
}
\examples{
library(FWDselect)
data(diabetes)
x = diabetes[ ,2:11]
y = diabetes[ ,1]
test(x, y, method = "lm", cluster = FALSE, nboot = 5)
## for speedup = FALSE
# obj2 = qselection(x, y, qvector = c(1:9), method = "lm",
# cluster = FALSE)
# plot(obj2) # we choose q = 7 for the argument qmin
# test(x, y, method = "lm", cluster = FALSE, nboot = 5,
# speedup = FALSE, qmin = 7)
}
\author{
Marta Sestelo, Nora M. Villanueva and Javier Roca-Pardinas.
}
\references{
Sestelo, M., Villanueva, N. M. and Roca-Pardinas, J. (2013).
FWDselect: an R package for selecting variables in regression models.
Discussion Papers in Statistics and Operation Research, University of Vigo, 13/01.
}
\seealso{
\code{\link{selection}}
}
|
severity = function(mu0, xbar, sigma, n, alpha)
{
require(graphics)
### check inputs ###
mu0 = as.integer(mu0) # make sure it is an integer
n = as.integer(n) # make sure it is an integer
sigma = as.numeric(sigma)
alpha = as.numeric(alpha)
if(class(xbar) != "numeric")
{
xbar = as.numeric(xbar)
}
### begin severity calculations ###
r = length(xbar)
gamma = seq(from = -0.5, to = 1.5, by = 0.05) # discrepancies of interest
l = length(gamma)
mu1 = rep(x = mu0, times = l) + gamma
c_alpha = qnorm(alpha, lower.tail = FALSE) # cut-off (for rejection region)
sigma_x = sigma / sqrt(n)
sigma_x_inv = 1 / sigma_x
d_x0 = sigma_x_inv * (xbar - rep(x = mu0, times = r)) # test statistic
# delta1 = [sqrt(n) * (mu1 - mu0)] / sigma
delta1 = sigma_x_inv * gamma # non-centrality parameter
p = pnorm(d_x0, lower.tail = FALSE) # p-value
power = pnorm(q = rep(x = c_alpha, times = l) - delta1, lower.tail = FALSE) # power curve
# "accept" indicates whether to 'accept H0' (accept = 1) OR to 'reject H0' (accept = 0)
accept = rep(0, times = r) # reject null hypothesis (default)
for(k in 1:r)
{
if(d_x0[k] < c_alpha)
{
accept[k] = 1 # accept null hypothesis
}
}
x1 = xbar[which(accept == 0)]
x2 = xbar[which(accept == 1)]
m1 = length(x1)
m2 = length(x2)
q1 = matrix(nrow = l, ncol = m1)
q2 = matrix(nrow = l, ncol = m2)
sev_rejectH0 = matrix(nrow = l, ncol = m1)
sev_acceptH0 = matrix(nrow = l, ncol = m2)
x_reject = matrix(nrow = l, ncol = m1) # for plotting
x_accept = matrix(nrow = l, ncol = m2) # for plotting
if((m1 > 0) && (m2 > 0))
{
par(mfrow = c(1, 2))
}
### reject H0 ###
if(m1 > 0)
{
for(i in 1:m1)
{
x_reject[, i] = mu1 # for plotting
q1[, i] = sigma_x_inv * (rep(x = x1[i], times = l) - mu1)
sev_rejectH0[, i] = pnorm(q = q1[, i]) # severity
}
# plot
plot(x = x_reject[, 1], y = sev_rejectH0[, 1], type = "l", lty = 1, col = "blue", main = expression(paste("severity curves for inference: ", mu > mu[1], "")), xlab = expression(paste("values of ", mu[1], "")), ylab = "severity / power", xaxt = "n", yaxt = "n", cex.main = 0.90)
axis(side = 1, at = seq(from = mu1[1], to = mu1[l], by = 0.10), labels = seq(from = mu1[1], to = mu1[l], by = 0.10)) # x-axis
axis(side = 2, at = seq(from = 0, to = 1, by = 0.05), labels = seq(from = 0, to = 1, by = 0.05)) # y-axis
lines(x = mu1, y = power, col = "red", lty = 2) # power curve
if(m1 == 1)
{
if(m2 > 0)
{
legend(x = mu1[floor(l/2) - 3], y = 0.525, col = c("blue", "red"), lty = c(1, 2), legend = c(as.expression(bquote(bar(x) == .(x1[1]))), "power"), cex = 0.65)
}
else
{
legend(x = mu1[l - 10], y = 0.525, col = c("blue", "red"), lty = c(1, 2), legend = c(as.expression(bquote(bar(x) == .(x1[1]))), "power"), cex = 0.65)
}
}
}
if(m1 == 2)
{
lines(x = x_reject[, 2], y = sev_rejectH0[, 2], col = "blue", lty = 3)
if(m2 > 0)
{
legend(x = mu1[floor(l/2) - 3], y = 0.525, col = c("blue", "blue", "red"), lty = c(1, 3, 2), legend = c(as.expression(bquote(bar(x) == .(x1[1]))), as.expression(bquote(bar(x) == .(x1[2]))), "power"), cex = 0.65)
}
else
{
legend(x = mu1[l - 10], y = 0.525, col = c("blue", "blue", "red"), lty = c(1, 3, 2), legend = c(as.expression(bquote(bar(x) == .(x1[1]))), as.expression(bquote(bar(x) == .(x1[2]))), "power"), cex = 0.65)
}
}
else if(m1 >= 3)
{
lines(x = x_reject[, 2], y = sev_rejectH0[, 2], col = "blue", lty = 3)
lines(x = x_reject[, 3], y = sev_rejectH0[, 3], col = "blue", lty = 4)
if(m2 > 0)
{
legend(x = mu1[floor(l/2) - 3], y = 0.5, col = c("blue", "blue", "blue", "red"), lty = c(1, 3, 4, 2), legend = c(as.expression(bquote(bar(x) == .(x1[1]))), as.expression(bquote(bar(x) == .(x1[2]))), as.expression(bquote(bar(x) == .(x1[3]))), "power"), cex = 0.6)
}
else
{
legend(x = mu1[l - 10], y = 0.5, col = c("blue", "blue", "blue", "red"), lty = c(1, 3, 4, 2), legend = c(as.expression(bquote(bar(x) == .(x1[1]))), as.expression(bquote(bar(x) == .(x1[2]))), as.expression(bquote(bar(x) == .(x1[3]))), "power"), cex = 0.6)
}
}
### accept H0 ###
if(m2 > 0)
{
for(j in 1:m2)
{
x_accept[, j] = mu1 # for plotting
q2[, j] = sigma_x_inv * (rep(x = x2[j], times = l) - mu1)
sev_acceptH0[, j] = pnorm(q = q2[, j], lower.tail = FALSE) # severity
}
# plot
plot(x = x_accept[, 1], y = sev_acceptH0[, 1], type = "l", lty = 1, col = "blue", main = expression(paste("severity curves for inference: ", mu <= mu[1], "")), xlab = expression(paste("values of ", mu[1], "")), ylab = "severity / power", xaxt = "n", yaxt = "n", cex.main = 0.90)
axis(side = 1, at = seq(from = mu1[1], to = mu1[l], by = 0.10), labels = seq(from = mu1[1], to = mu1[l], by = 0.10)) # x-axis
axis(side = 2, at = seq(from = 0, to = 1, by = 0.05), labels = seq(from = 0, to = 1, by = 0.05)) # y-axis
lines(x = mu1, y = power, col = "red", lty = 2) # power curve
if(m2 == 1)
{
legend(x = mu1[floor(l/2) - 2], y = 0.275, col = c("blue", "red"), lty = c(1, 2), legend = c(as.expression(bquote(bar(x) == .(x2[1]))), "power"), cex = 0.65)
}
}
if(m2 == 2)
{
lines(x = x_accept[, 2], y = sev_acceptH0[, 2], col = "blue", lty = 3)
legend(x = mu1[floor(l/2) - 2], y = 0.275, col = c("blue", "blue", "red"), lty = c(1, 3, 2), legend = c(as.expression(bquote(bar(x) == .(x2[1]))), as.expression(bquote(bar(x) == .(x2[2]))), "power"), cex = 0.65)
}
else if(m2 >= 3)
{
lines(x = x_accept[, 2], y = sev_acceptH0[, 2], col = "blue", lty = 3)
lines(x = x_accept[, 3], y = sev_acceptH0[, 3], col = "blue", lty = 4)
legend(x = mu1[floor(l/2) - 2], y = 0.25, col = c("blue", "blue", "blue", "red"), lty = c(1, 3, 4, 2), legend = c(as.expression(bquote(bar(x) == .(x2[1]))), as.expression(bquote(bar(x) == .(x2[2]))), as.expression(bquote(bar(x) == .(x2[3]))), "power"), cex = 0.6)
}
output = data.frame(sev_rejectH0, sev_acceptH0, power, gamma)
return(list(accept = accept, p = p, "severity_acceptH0" = sev_acceptH0, "severity_rejectH0" = sev_rejectH0, power = power, "discrepancy" = gamma))
}
|
/severity/R/severity.R
|
no_license
|
ingted/R-Examples
|
R
| false
| false
| 6,069
|
r
|
severity = function(mu0, xbar, sigma, n, alpha)
{
require(graphics)
### check inputs ###
mu0 = as.integer(mu0) # make sure it is an integer
n = as.integer(n) # make sure it is an integer
sigma = as.numeric(sigma)
alpha = as.numeric(alpha)
if(class(xbar) != "numeric")
{
xbar = as.numeric(xbar)
}
### begin severity calculations ###
r = length(xbar)
gamma = seq(from = -0.5, to = 1.5, by = 0.05) # discrepancies of interest
l = length(gamma)
mu1 = rep(x = mu0, times = l) + gamma
c_alpha = qnorm(alpha, lower.tail = FALSE) # cut-off (for rejection region)
sigma_x = sigma / sqrt(n)
sigma_x_inv = 1 / sigma_x
d_x0 = sigma_x_inv * (xbar - rep(x = mu0, times = r)) # test statistic
# delta1 = [sqrt(n) * (mu1 - mu0)] / sigma
delta1 = sigma_x_inv * gamma # non-centrality parameter
p = pnorm(d_x0, lower.tail = FALSE) # p-value
power = pnorm(q = rep(x = c_alpha, times = l) - delta1, lower.tail = FALSE) # power curve
# "accept" indicates whether to 'accept H0' (accept = 1) OR to 'reject H0' (accept = 0)
accept = rep(0, times = r) # reject null hypothesis (default)
for(k in 1:r)
{
if(d_x0[k] < c_alpha)
{
accept[k] = 1 # accept null hypothesis
}
}
x1 = xbar[which(accept == 0)]
x2 = xbar[which(accept == 1)]
m1 = length(x1)
m2 = length(x2)
q1 = matrix(nrow = l, ncol = m1)
q2 = matrix(nrow = l, ncol = m2)
sev_rejectH0 = matrix(nrow = l, ncol = m1)
sev_acceptH0 = matrix(nrow = l, ncol = m2)
x_reject = matrix(nrow = l, ncol = m1) # for plotting
x_accept = matrix(nrow = l, ncol = m2) # for plotting
if((m1 > 0) && (m2 > 0))
{
par(mfrow = c(1, 2))
}
### reject H0 ###
if(m1 > 0)
{
for(i in 1:m1)
{
x_reject[, i] = mu1 # for plotting
q1[, i] = sigma_x_inv * (rep(x = x1[i], times = l) - mu1)
sev_rejectH0[, i] = pnorm(q = q1[, i]) # severity
}
# plot
plot(x = x_reject[, 1], y = sev_rejectH0[, 1], type = "l", lty = 1, col = "blue", main = expression(paste("severity curves for inference: ", mu > mu[1], "")), xlab = expression(paste("values of ", mu[1], "")), ylab = "severity / power", xaxt = "n", yaxt = "n", cex.main = 0.90)
axis(side = 1, at = seq(from = mu1[1], to = mu1[l], by = 0.10), labels = seq(from = mu1[1], to = mu1[l], by = 0.10)) # x-axis
axis(side = 2, at = seq(from = 0, to = 1, by = 0.05), labels = seq(from = 0, to = 1, by = 0.05)) # y-axis
lines(x = mu1, y = power, col = "red", lty = 2) # power curve
if(m1 == 1)
{
if(m2 > 0)
{
legend(x = mu1[floor(l/2) - 3], y = 0.525, col = c("blue", "red"), lty = c(1, 2), legend = c(as.expression(bquote(bar(x) == .(x1[1]))), "power"), cex = 0.65)
}
else
{
legend(x = mu1[l - 10], y = 0.525, col = c("blue", "red"), lty = c(1, 2), legend = c(as.expression(bquote(bar(x) == .(x1[1]))), "power"), cex = 0.65)
}
}
}
if(m1 == 2)
{
lines(x = x_reject[, 2], y = sev_rejectH0[, 2], col = "blue", lty = 3)
if(m2 > 0)
{
legend(x = mu1[floor(l/2) - 3], y = 0.525, col = c("blue", "blue", "red"), lty = c(1, 3, 2), legend = c(as.expression(bquote(bar(x) == .(x1[1]))), as.expression(bquote(bar(x) == .(x1[2]))), "power"), cex = 0.65)
}
else
{
legend(x = mu1[l - 10], y = 0.525, col = c("blue", "blue", "red"), lty = c(1, 3, 2), legend = c(as.expression(bquote(bar(x) == .(x1[1]))), as.expression(bquote(bar(x) == .(x1[2]))), "power"), cex = 0.65)
}
}
else if(m1 >= 3)
{
lines(x = x_reject[, 2], y = sev_rejectH0[, 2], col = "blue", lty = 3)
lines(x = x_reject[, 3], y = sev_rejectH0[, 3], col = "blue", lty = 4)
if(m2 > 0)
{
legend(x = mu1[floor(l/2) - 3], y = 0.5, col = c("blue", "blue", "blue", "red"), lty = c(1, 3, 4, 2), legend = c(as.expression(bquote(bar(x) == .(x1[1]))), as.expression(bquote(bar(x) == .(x1[2]))), as.expression(bquote(bar(x) == .(x1[3]))), "power"), cex = 0.6)
}
else
{
legend(x = mu1[l - 10], y = 0.5, col = c("blue", "blue", "blue", "red"), lty = c(1, 3, 4, 2), legend = c(as.expression(bquote(bar(x) == .(x1[1]))), as.expression(bquote(bar(x) == .(x1[2]))), as.expression(bquote(bar(x) == .(x1[3]))), "power"), cex = 0.6)
}
}
### accept H0 ###
if(m2 > 0)
{
for(j in 1:m2)
{
x_accept[, j] = mu1 # for plotting
q2[, j] = sigma_x_inv * (rep(x = x2[j], times = l) - mu1)
sev_acceptH0[, j] = pnorm(q = q2[, j], lower.tail = FALSE) # severity
}
# plot
plot(x = x_accept[, 1], y = sev_acceptH0[, 1], type = "l", lty = 1, col = "blue", main = expression(paste("severity curves for inference: ", mu <= mu[1], "")), xlab = expression(paste("values of ", mu[1], "")), ylab = "severity / power", xaxt = "n", yaxt = "n", cex.main = 0.90)
axis(side = 1, at = seq(from = mu1[1], to = mu1[l], by = 0.10), labels = seq(from = mu1[1], to = mu1[l], by = 0.10)) # x-axis
axis(side = 2, at = seq(from = 0, to = 1, by = 0.05), labels = seq(from = 0, to = 1, by = 0.05)) # y-axis
lines(x = mu1, y = power, col = "red", lty = 2) # power curve
if(m2 == 1)
{
legend(x = mu1[floor(l/2) - 2], y = 0.275, col = c("blue", "red"), lty = c(1, 2), legend = c(as.expression(bquote(bar(x) == .(x2[1]))), "power"), cex = 0.65)
}
}
if(m2 == 2)
{
lines(x = x_accept[, 2], y = sev_acceptH0[, 2], col = "blue", lty = 3)
legend(x = mu1[floor(l/2) - 2], y = 0.275, col = c("blue", "blue", "red"), lty = c(1, 3, 2), legend = c(as.expression(bquote(bar(x) == .(x2[1]))), as.expression(bquote(bar(x) == .(x2[2]))), "power"), cex = 0.65)
}
else if(m2 >= 3)
{
lines(x = x_accept[, 2], y = sev_acceptH0[, 2], col = "blue", lty = 3)
lines(x = x_accept[, 3], y = sev_acceptH0[, 3], col = "blue", lty = 4)
legend(x = mu1[floor(l/2) - 2], y = 0.25, col = c("blue", "blue", "blue", "red"), lty = c(1, 3, 4, 2), legend = c(as.expression(bquote(bar(x) == .(x2[1]))), as.expression(bquote(bar(x) == .(x2[2]))), as.expression(bquote(bar(x) == .(x2[3]))), "power"), cex = 0.6)
}
output = data.frame(sev_rejectH0, sev_acceptH0, power, gamma)
return(list(accept = accept, p = p, "severity_acceptH0" = sev_acceptH0, "severity_rejectH0" = sev_rejectH0, power = power, "discrepancy" = gamma))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/consistency.R
\name{consistency}
\alias{consistency}
\alias{inconsistency}
\title{MSC network meta-analysis models}
\usage{
consistency(x, mods = NULL, ...)
inconsistency(x, mods = NULL, ...)
}
\arguments{
\item{x}{A set of aggregated performance score data, as computed with the \code{\link{aggregate_performance}} function.}
\item{mods}{Which moderators should be included in the model? These can be any of the same moderators included in \code{\link{get_bs_samples}}, all others will be ignored.}
\item{...}{Any other arguments will be passed directly to \code{\link[metafor]{rma.mv}}}
}
\value{
The results of an \code{\link[metafor]{rma.mv}} multivariate/multilevel Linear (mixed-effects) model (class "rma.mv"), with the following additional components, which can be passed along to other functions:
#' \describe{
\item{reference}{name of reference score}
\item{scores}{vector of scores analysed}
\item{model}{If \code{\link{consistency}} was used, "consistency", else "inconsistency"}
\item{performance}{Label of performance, as used in previous functions}
}
}
\description{
MSC network meta-analysis models
}
\details{
The consistency and inconsistency models are those found in \href{https://doi.org/10.1186/s12874-016-0184-5}{Law et al 2016}:
Specifically, we fit one of two models using "Jackson's Model", as described in the paper, which differ only in their random effects:
\describe{
\item{consistency}{First item} random contrast within study
\item{inconsistency}{Second item} random contrast within study, and random contrast within design
}
}
\section{Functions}{
\itemize{
\item \code{inconsistency}: Estimate differences in score performance using inconsistency model
}}
\examples{
dat <- msc_sample_data()
bssamp <- get_bs_samples(dat, id, study, outcome, n.samples = 10,
scores = c("a", "b", "c", "d", "e", "f"),
moderators = c("age", "female", "x1"))
perf <- compute_performance(bssamp, fn = calibration_slope, lbl = "CS")
agg <- aggregate_performance(perf)
consistency(agg)
agg.c <- aggregate_performance(perf, "c")
consistency(agg.c)
}
\seealso{
Law, M.; Jackson, D.; Turner, R.; Rhodes, K. & Viechtbauer, W. Two new methods to fit models for network meta-analysis with random inconsistency effects BMC Medical Research Methodology, 2016, 16, 87.
}
|
/man/consistency.Rd
|
no_license
|
srhaile/mscpredmodel
|
R
| false
| true
| 2,407
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/consistency.R
\name{consistency}
\alias{consistency}
\alias{inconsistency}
\title{MSC network meta-analysis models}
\usage{
consistency(x, mods = NULL, ...)
inconsistency(x, mods = NULL, ...)
}
\arguments{
\item{x}{A set of aggregated performance score data, as computed with the \code{\link{aggregate_performance}} function.}
\item{mods}{Which moderators should be included in the model? These can be any of the same moderators included in \code{\link{get_bs_samples}}, all others will be ignored.}
\item{...}{Any other arguments will be passed directly to \code{\link[metafor]{rma.mv}}}
}
\value{
The results of an \code{\link[metafor]{rma.mv}} multivariate/multilevel Linear (mixed-effects) model (class "rma.mv"), with the following additional components, which can be passed along to other functions:
#' \describe{
\item{reference}{name of reference score}
\item{scores}{vector of scores analysed}
\item{model}{If \code{\link{consistency}} was used, "consistency", else "inconsistency"}
\item{performance}{Label of performance, as used in previous functions}
}
}
\description{
MSC network meta-analysis models
}
\details{
The consistency and inconsistency models are those found in \href{https://doi.org/10.1186/s12874-016-0184-5}{Law et al 2016}:
Specifically, we fit one of two models using "Jackson's Model", as described in the paper, which differ only in their random effects:
\describe{
\item{consistency}{First item} random contrast within study
\item{inconsistency}{Second item} random contrast within study, and random contrast within design
}
}
\section{Functions}{
\itemize{
\item \code{inconsistency}: Estimate differences in score performance using inconsistency model
}}
\examples{
dat <- msc_sample_data()
bssamp <- get_bs_samples(dat, id, study, outcome, n.samples = 10,
scores = c("a", "b", "c", "d", "e", "f"),
moderators = c("age", "female", "x1"))
perf <- compute_performance(bssamp, fn = calibration_slope, lbl = "CS")
agg <- aggregate_performance(perf)
consistency(agg)
agg.c <- aggregate_performance(perf, "c")
consistency(agg.c)
}
\seealso{
Law, M.; Jackson, D.; Turner, R.; Rhodes, K. & Viechtbauer, W. Two new methods to fit models for network meta-analysis with random inconsistency effects BMC Medical Research Methodology, 2016, 16, 87.
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/exportMetaData.R
\name{exportMetaData}
\alias{exportMetaData}
\alias{exportMetaData.redcapApiConnection}
\alias{exportMetaData.redcapDbConnection}
\title{Export Meta Data from a REDCap Database}
\usage{
exportMetaData(rcon, ...)
\method{exportMetaData}{redcapDbConnection}(rcon, ...)
\method{exportMetaData}{redcapApiConnection}(rcon, ...)
}
\arguments{
\item{rcon}{A REDCap connection object as generated by \code{redcapConnection.}}
\item{...}{Arguments to be passed to other methods.}
}
\description{
Retrieves the meta data for a REDcap database, including
field names, labels, types, formulas, etc. This file can be used to parse
levels of factors, apply labels, and other data management tasks once the
data are retrieved
}
\details{
A record of this export is placed in the REDCap logging page,
but the file that is exported is not stored in the database.
}
\examples{
\dontrun{
#*** Note: I cannot provide working examples without
#*** compromising security. Instead, I will try to
#*** offer up sample code with the matching results
#*** Create the connection object
rcon <- redcapConnection(url=[YOUR_REDCAP_URL], token=[API_TOKEN])
exportMetaData(rcon)
}
}
\author{
Jeffrey Horner
}
\references{
This functionality were originally developed by Jeffrey Horner in the \code{redcap} package.
\url{https://github.com/vubiostat/redcap}
Please refer to your institution's API documentation.
Additional details on API parameters are found on the package wiki at
\url{https://github.com/nutterb/redcapAPI/wiki/REDCap-API-Parameters}
}
|
/man/exportMetaData.Rd
|
no_license
|
thensle/redcapAPI
|
R
| false
| true
| 1,632
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/exportMetaData.R
\name{exportMetaData}
\alias{exportMetaData}
\alias{exportMetaData.redcapApiConnection}
\alias{exportMetaData.redcapDbConnection}
\title{Export Meta Data from a REDCap Database}
\usage{
exportMetaData(rcon, ...)
\method{exportMetaData}{redcapDbConnection}(rcon, ...)
\method{exportMetaData}{redcapApiConnection}(rcon, ...)
}
\arguments{
\item{rcon}{A REDCap connection object as generated by \code{redcapConnection.}}
\item{...}{Arguments to be passed to other methods.}
}
\description{
Retrieves the meta data for a REDcap database, including
field names, labels, types, formulas, etc. This file can be used to parse
levels of factors, apply labels, and other data management tasks once the
data are retrieved
}
\details{
A record of this export is placed in the REDCap logging page,
but the file that is exported is not stored in the database.
}
\examples{
\dontrun{
#*** Note: I cannot provide working examples without
#*** compromising security. Instead, I will try to
#*** offer up sample code with the matching results
#*** Create the connection object
rcon <- redcapConnection(url=[YOUR_REDCAP_URL], token=[API_TOKEN])
exportMetaData(rcon)
}
}
\author{
Jeffrey Horner
}
\references{
This functionality were originally developed by Jeffrey Horner in the \code{redcap} package.
\url{https://github.com/vubiostat/redcap}
Please refer to your institution's API documentation.
Additional details on API parameters are found on the package wiki at
\url{https://github.com/nutterb/redcapAPI/wiki/REDCap-API-Parameters}
}
|
t <- read.table("household_power_consumption.txt", header=TRUE, sep=";", na.strings = "?", colClasses = c('character','character','numeric','numeric','numeric','numeric','numeric','numeric','numeric'))
## Format date to Type Date
t$Date <- as.Date(t$Date, "%d/%m/%Y")
## Filter data set from Feb. 1, 2007 to Feb. 2, 2007
t <- subset(t,Date >= as.Date("2007-2-1") & Date <= as.Date("2007-2-2"))
## Remove incomplete observation
t <- t[complete.cases(t),]
## Combine Date and Time column
dateTime <- paste(t$Date, t$Time)
## Name the vector
dateTime <- setNames(dateTime, "DateTime")
## Remove Date and Time column
t <- t[ ,!(names(t) %in% c("Date","Time"))]
## Add DateTime column
t <- cbind(dateTime, t)
## Format dateTime Column
t$dateTime <- as.POSIXct(dateTime)
## Create Plot 3
with(t, {
plot(Sub_metering_1~dateTime, type="l",
ylab="Global Active Power (kilowatts)", xlab="")
lines(Sub_metering_2~dateTime,col='Red')
lines(Sub_metering_3~dateTime,col='Blue')
})
legend("topright", col=c("black", "red", "blue"), lwd=c(1,1,1),
c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
## Saving to file
dev.copy(png, file="plot3.png", height=480, width=480)
dev.off()
|
/plot3.R
|
no_license
|
N-Nirpeksh/ExData_Plotting1
|
R
| false
| false
| 1,199
|
r
|
t <- read.table("household_power_consumption.txt", header=TRUE, sep=";", na.strings = "?", colClasses = c('character','character','numeric','numeric','numeric','numeric','numeric','numeric','numeric'))
## Format date to Type Date
t$Date <- as.Date(t$Date, "%d/%m/%Y")
## Filter data set from Feb. 1, 2007 to Feb. 2, 2007
t <- subset(t,Date >= as.Date("2007-2-1") & Date <= as.Date("2007-2-2"))
## Remove incomplete observation
t <- t[complete.cases(t),]
## Combine Date and Time column
dateTime <- paste(t$Date, t$Time)
## Name the vector
dateTime <- setNames(dateTime, "DateTime")
## Remove Date and Time column
t <- t[ ,!(names(t) %in% c("Date","Time"))]
## Add DateTime column
t <- cbind(dateTime, t)
## Format dateTime Column
t$dateTime <- as.POSIXct(dateTime)
## Create Plot 3
with(t, {
plot(Sub_metering_1~dateTime, type="l",
ylab="Global Active Power (kilowatts)", xlab="")
lines(Sub_metering_2~dateTime,col='Red')
lines(Sub_metering_3~dateTime,col='Blue')
})
legend("topright", col=c("black", "red", "blue"), lwd=c(1,1,1),
c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
## Saving to file
dev.copy(png, file="plot3.png", height=480, width=480)
dev.off()
|
# Generating automatic tests for input checks.
for(dens in densities) {
eval(call("context", paste0("Input check: ", dens)))
eval(call("expect_error", call(dens, "x")))
eval(call("expect_error", call(dens, lm)))
eval(call("expect_error", call(dens, replicate(3, 1:3))))
eval(call("expect_error", call(dens, NA)))
}
|
/tests/testthat/test_automatic.R
|
permissive
|
vbaliga/univariateML
|
R
| false
| false
| 328
|
r
|
# Generating automatic tests for input checks.
for(dens in densities) {
eval(call("context", paste0("Input check: ", dens)))
eval(call("expect_error", call(dens, "x")))
eval(call("expect_error", call(dens, lm)))
eval(call("expect_error", call(dens, replicate(3, 1:3))))
eval(call("expect_error", call(dens, NA)))
}
|
#' movies
#'
#' A dataframe containing information about the top 5000 grossing movies of all time.
#'
#'
#' @format A data frame containing 5000 rows and 13 columns
#' \describe{
#' \item{name}{Movie name}
#' \item{rating}{MPAA rating}
#' \item{genre}{Movie genre}
#' \item{creative.type}{Creative type}
#' \item{time}{Running time in minutes}
#' \item{year}{Year of release}
#' \item{production.method}{Production method}
#' \item{sequel}{Was the movie a sequel? 1 = yes, 0 = no}
#' \item{budget}{Movie budget (in $USD millions)}
#' \item{revenue.all}{Gross worldwide revenue in $USD millions}
#' \item{revenue.dom}{Domestic revenue in $USD millions}
#' \item{revenue.int}{International revenue in $USD millions}
#' \item{revenue.inf}{Inflation adjusted worldwide revenue in $USD millions}
#' }
#' @source www.the-numbers.com
#'
#'
#'
"movies"
|
/R/movies_doc.R
|
no_license
|
ndphillips/yarrr
|
R
| false
| false
| 872
|
r
|
#' movies
#'
#' A dataframe containing information about the top 5000 grossing movies of all time.
#'
#'
#' @format A data frame containing 5000 rows and 13 columns
#' \describe{
#' \item{name}{Movie name}
#' \item{rating}{MPAA rating}
#' \item{genre}{Movie genre}
#' \item{creative.type}{Creative type}
#' \item{time}{Running time in minutes}
#' \item{year}{Year of release}
#' \item{production.method}{Production method}
#' \item{sequel}{Was the movie a sequel? 1 = yes, 0 = no}
#' \item{budget}{Movie budget (in $USD millions)}
#' \item{revenue.all}{Gross worldwide revenue in $USD millions}
#' \item{revenue.dom}{Domestic revenue in $USD millions}
#' \item{revenue.int}{International revenue in $USD millions}
#' \item{revenue.inf}{Inflation adjusted worldwide revenue in $USD millions}
#' }
#' @source www.the-numbers.com
#'
#'
#'
"movies"
|
ff_opti_bisect_pmap_multi <- function(df, fc_withroot,
fl_lower_x, fl_upper_x,
ls_svr_df_in_func,
svr_root_x = 'x',
it_iter_tol = 50, fl_zero_tol = 10^-5,
bl_keep_iter = TRUE,
st_bisec_prefix = 'bisec_',
st_lower_x = 'a', st_lower_fx = 'fa',
st_upper_x = 'b', st_upper_fx = 'fb') {
#' Dataframe rows are individuals, find root for each individual, given cts diff monotonic function with root.
#'
#' @description
#' This is only for strictly monotonically functions that have unique crossing at zero. There are potentially three
#' types of inputs relevant for the bisection root evaluation. Values in each row are parameters for the same nonlinear
#' function, we want to find roots for N nonlinear functions defined by each row. First type of input are these row specific
#' variable values. Second type of inputs are scalars or arrays that are fixed over all rows. Third type of inputs are values
#' that are shifting over bisection iterations. The implementation here assumes that we have lower and upper bound values
#' that are common across all individauls (rows), and that garantee opposing signs.
#'
#' @param df dataframe containing all row/individual specific variable information, will append bisection results to datafram
#' @param fc_withroot function with root, the function should have hard-coded in scalars and arrays that
#' would not change over iterations and would not change across individuals
#' @param fl_lower_x float value of common lower bound
#' @param fl_upper_x float value of common upper bound, opposing sign
#' @param ls_svr_df_in_func list of string names variables in df that are inputs for fc_withroot.
#' @param svr_root_x string the x variable name that appears n fc_withroot.
#' @param it_iter_tol integer how many maximum iterations to allow for bisection at most
#' @param fl_zero_tol float at what gap to zero will algorithm stop
#' @param bl_keep_iter whether to keep all iteration results as data columns
#' @param st_bisec_prefix string prefix for all bisection iteration etc results variables
#' @param st_lower_x string variable name component for lower bound x
#' @param st_lower_fx string variable name component for lower bound x evaluated at function
#' @param st_upper_x string variable name component for upper bound x
#' @param st_upper_fx string variable name component for upper bound x evaluated at function
#' @return dataframe containing bisection root for each individual/row
#' @author Fan Wang, \url{http://fanwangecon.github.io}
#' @references
#' \url{https://fanwangecon.github.io/REconTools/reference/ff_opti_bisect_pmap_multi.html}
#' \url{https://fanwangecon.github.io/REconTools/articles/fv_opti_bisect_pmap_multi.html}
#' \url{https://github.com/FanWangEcon/REconTools/blob/master/R/ff_opti_bisect.R}
#' @export
#' @import tibble tidyr purrr dplyr
#' @examples
#' library(dplyr)
#' library(tibble)
#' it_N_child_cnt <- 9
#' ar_intercept = seq(-10, -1, length.out = it_N_child_cnt)
#' ar_slope = seq(0.1, 1, length.out = it_N_child_cnt)
#' df_lines <- as_tibble(cbind(ar_intercept, ar_slope)) %>% rowid_to_column(var='ID')
#' ar_st_col_names = c('ID','fl_int', 'fl_slope')
#' df_lines <- df_lines %>% rename_all(~c(ar_st_col_names))
#' fc_withroot_line <- function(fl_int, fl_slope, x){
#' return(fl_int + fl_slope*x)
#' }
#' fl_lower_x_line <- 0
#' fl_upper_x_line <- 100000
#' ls_svr_df_in_func_line <- c('fl_int', 'fl_slope')
#' svr_root_x_line <- 'x'
#' fl_zero_tol = 10^-6
#' df_bisec <- ff_opti_bisect_pmap_multi(df_lines, fc_withroot_line,
#' fl_lower_x_line, fl_upper_x_line,
#' ls_svr_df_in_func_line, svr_root_x_line, bl_keep_iter = FALSE)
#' df_bisec %>% select(-one_of('f_p_t_f_a'))
# A. common prefix to make reshaping easier
svr_a_lst <- paste0(st_bisec_prefix, st_lower_x, '_0')
svr_b_lst <- paste0(st_bisec_prefix, st_upper_x, '_0')
svr_fa_lst <- paste0(st_bisec_prefix, st_lower_fx, '_0')
svr_fb_lst <- paste0(st_bisec_prefix, st_upper_fx, '_0')
svr_fxvr_name <- paste0('f', svr_root_x)
ls_pmap_vars <- unique(c(ls_svr_df_in_func, svr_root_x))
# B. Add initial a and b
df_bisec <- df %>% mutate(!!sym(svr_a_lst) := fl_lower_x, !!sym(svr_b_lst) := fl_upper_x)
# C. Evaluate function f(a_0) and f(b_0)
# 1. set x = a_0
# 2. evaluate f(a_0)
# 3. set x = b_0
# 4. evaluate f(b_0)
df_bisec <- df_bisec %>% mutate(!!sym(svr_root_x) := !!sym(svr_a_lst))
df_bisec <- df_bisec %>% mutate(
!!sym(svr_fa_lst) :=
unlist(
pmap(df_bisec %>% select(ls_pmap_vars), fc_withroot)
)
)
df_bisec <- df_bisec %>% mutate(!!sym(svr_root_x) := !!sym(svr_b_lst))
df_bisec <- df_bisec %>% mutate(
!!sym(svr_fb_lst) :=
unlist(
pmap(df_bisec %>% select(ls_pmap_vars), fc_withroot)
)
)
# D. Iteration Convergence Criteria
# fl_p_dist2zr = distance to zero to initalize
fl_p_dist2zr <- 1000
it_cur <- 0
while (it_cur <= it_iter_tol && fl_p_dist2zr >= fl_zero_tol ) {
it_cur <- it_cur + 1
# New Variables
svr_a_cur <- paste0(st_bisec_prefix, st_lower_x, '_', it_cur)
svr_b_cur <- paste0(st_bisec_prefix, st_upper_x, '_', it_cur)
svr_fa_cur <- paste0(st_bisec_prefix, st_lower_fx, '_', it_cur)
svr_fb_cur <- paste0(st_bisec_prefix, st_upper_fx, '_', it_cur)
# Evaluate function f(a_0) and f(b_0)
# 1. generate p
# 2. generate f_p
# 3. generate f_p*f_a
df_bisec <- df_bisec %>% mutate(!!sym(svr_root_x) := ((!!sym(svr_a_lst) + !!sym(svr_b_lst))/2))
df_bisec <- df_bisec %>% mutate(
!!sym(svr_fxvr_name) :=
unlist(
pmap(df_bisec %>% select(ls_pmap_vars), fc_withroot)
)
) %>%
mutate(f_p_t_f_a = !!sym(svr_fxvr_name)*!!sym(svr_fa_lst))
# fl_p_dist2zr = sum(abs(p))
fl_p_dist2zr <- mean(abs(df_bisec %>% pull(!!sym(svr_fxvr_name))))
# Update a and b
df_bisec <- df_bisec %>%
mutate(!!sym(svr_a_cur) :=
case_when(f_p_t_f_a < 0 ~ !!sym(svr_a_lst),
TRUE ~ !!sym(svr_root_x))) %>%
mutate(!!sym(svr_b_cur) :=
case_when(f_p_t_f_a < 0 ~ !!sym(svr_root_x),
TRUE ~ !!sym(svr_b_lst)))
# Update f(a) and f(b)
df_bisec <- df_bisec %>%
mutate(!!sym(svr_fa_cur) :=
case_when(f_p_t_f_a < 0 ~ !!sym(svr_fa_lst),
TRUE ~ !!sym(svr_fxvr_name))) %>%
mutate(!!sym(svr_fb_cur) :=
case_when(f_p_t_f_a < 0 ~ !!sym(svr_fxvr_name),
TRUE ~ !!sym(svr_fb_lst)))
# Drop past record possibly
if(!bl_keep_iter) {
df_bisec <- df_bisec %>% select(-one_of(c(svr_a_lst, svr_b_lst, svr_fa_lst, svr_fb_lst)))
}
# Save from last
svr_a_lst <- svr_a_cur
svr_b_lst <- svr_b_cur
svr_fa_lst <- svr_fa_cur
svr_fb_lst <- svr_fb_cur
# Summar current round
message(paste0('it_cur:', it_cur, ', fl_p_dist2zr:', fl_p_dist2zr))
}
# return
return(df_bisec)
}
|
/R/ff_opti_bisect.R
|
permissive
|
FanWangEcon/REconTools
|
R
| false
| false
| 7,594
|
r
|
ff_opti_bisect_pmap_multi <- function(df, fc_withroot,
fl_lower_x, fl_upper_x,
ls_svr_df_in_func,
svr_root_x = 'x',
it_iter_tol = 50, fl_zero_tol = 10^-5,
bl_keep_iter = TRUE,
st_bisec_prefix = 'bisec_',
st_lower_x = 'a', st_lower_fx = 'fa',
st_upper_x = 'b', st_upper_fx = 'fb') {
#' Dataframe rows are individuals, find root for each individual, given cts diff monotonic function with root.
#'
#' @description
#' This is only for strictly monotonically functions that have unique crossing at zero. There are potentially three
#' types of inputs relevant for the bisection root evaluation. Values in each row are parameters for the same nonlinear
#' function, we want to find roots for N nonlinear functions defined by each row. First type of input are these row specific
#' variable values. Second type of inputs are scalars or arrays that are fixed over all rows. Third type of inputs are values
#' that are shifting over bisection iterations. The implementation here assumes that we have lower and upper bound values
#' that are common across all individauls (rows), and that garantee opposing signs.
#'
#' @param df dataframe containing all row/individual specific variable information, will append bisection results to datafram
#' @param fc_withroot function with root, the function should have hard-coded in scalars and arrays that
#' would not change over iterations and would not change across individuals
#' @param fl_lower_x float value of common lower bound
#' @param fl_upper_x float value of common upper bound, opposing sign
#' @param ls_svr_df_in_func list of string names variables in df that are inputs for fc_withroot.
#' @param svr_root_x string the x variable name that appears n fc_withroot.
#' @param it_iter_tol integer how many maximum iterations to allow for bisection at most
#' @param fl_zero_tol float at what gap to zero will algorithm stop
#' @param bl_keep_iter whether to keep all iteration results as data columns
#' @param st_bisec_prefix string prefix for all bisection iteration etc results variables
#' @param st_lower_x string variable name component for lower bound x
#' @param st_lower_fx string variable name component for lower bound x evaluated at function
#' @param st_upper_x string variable name component for upper bound x
#' @param st_upper_fx string variable name component for upper bound x evaluated at function
#' @return dataframe containing bisection root for each individual/row
#' @author Fan Wang, \url{http://fanwangecon.github.io}
#' @references
#' \url{https://fanwangecon.github.io/REconTools/reference/ff_opti_bisect_pmap_multi.html}
#' \url{https://fanwangecon.github.io/REconTools/articles/fv_opti_bisect_pmap_multi.html}
#' \url{https://github.com/FanWangEcon/REconTools/blob/master/R/ff_opti_bisect.R}
#' @export
#' @import tibble tidyr purrr dplyr
#' @examples
#' library(dplyr)
#' library(tibble)
#' it_N_child_cnt <- 9
#' ar_intercept = seq(-10, -1, length.out = it_N_child_cnt)
#' ar_slope = seq(0.1, 1, length.out = it_N_child_cnt)
#' df_lines <- as_tibble(cbind(ar_intercept, ar_slope)) %>% rowid_to_column(var='ID')
#' ar_st_col_names = c('ID','fl_int', 'fl_slope')
#' df_lines <- df_lines %>% rename_all(~c(ar_st_col_names))
#' fc_withroot_line <- function(fl_int, fl_slope, x){
#' return(fl_int + fl_slope*x)
#' }
#' fl_lower_x_line <- 0
#' fl_upper_x_line <- 100000
#' ls_svr_df_in_func_line <- c('fl_int', 'fl_slope')
#' svr_root_x_line <- 'x'
#' fl_zero_tol = 10^-6
#' df_bisec <- ff_opti_bisect_pmap_multi(df_lines, fc_withroot_line,
#' fl_lower_x_line, fl_upper_x_line,
#' ls_svr_df_in_func_line, svr_root_x_line, bl_keep_iter = FALSE)
#' df_bisec %>% select(-one_of('f_p_t_f_a'))
# A. common prefix to make reshaping easier
svr_a_lst <- paste0(st_bisec_prefix, st_lower_x, '_0')
svr_b_lst <- paste0(st_bisec_prefix, st_upper_x, '_0')
svr_fa_lst <- paste0(st_bisec_prefix, st_lower_fx, '_0')
svr_fb_lst <- paste0(st_bisec_prefix, st_upper_fx, '_0')
svr_fxvr_name <- paste0('f', svr_root_x)
ls_pmap_vars <- unique(c(ls_svr_df_in_func, svr_root_x))
# B. Add initial a and b
df_bisec <- df %>% mutate(!!sym(svr_a_lst) := fl_lower_x, !!sym(svr_b_lst) := fl_upper_x)
# C. Evaluate function f(a_0) and f(b_0)
# 1. set x = a_0
# 2. evaluate f(a_0)
# 3. set x = b_0
# 4. evaluate f(b_0)
df_bisec <- df_bisec %>% mutate(!!sym(svr_root_x) := !!sym(svr_a_lst))
df_bisec <- df_bisec %>% mutate(
!!sym(svr_fa_lst) :=
unlist(
pmap(df_bisec %>% select(ls_pmap_vars), fc_withroot)
)
)
df_bisec <- df_bisec %>% mutate(!!sym(svr_root_x) := !!sym(svr_b_lst))
df_bisec <- df_bisec %>% mutate(
!!sym(svr_fb_lst) :=
unlist(
pmap(df_bisec %>% select(ls_pmap_vars), fc_withroot)
)
)
# D. Iteration Convergence Criteria
# fl_p_dist2zr = distance to zero to initalize
fl_p_dist2zr <- 1000
it_cur <- 0
while (it_cur <= it_iter_tol && fl_p_dist2zr >= fl_zero_tol ) {
it_cur <- it_cur + 1
# New Variables
svr_a_cur <- paste0(st_bisec_prefix, st_lower_x, '_', it_cur)
svr_b_cur <- paste0(st_bisec_prefix, st_upper_x, '_', it_cur)
svr_fa_cur <- paste0(st_bisec_prefix, st_lower_fx, '_', it_cur)
svr_fb_cur <- paste0(st_bisec_prefix, st_upper_fx, '_', it_cur)
# Evaluate function f(a_0) and f(b_0)
# 1. generate p
# 2. generate f_p
# 3. generate f_p*f_a
df_bisec <- df_bisec %>% mutate(!!sym(svr_root_x) := ((!!sym(svr_a_lst) + !!sym(svr_b_lst))/2))
df_bisec <- df_bisec %>% mutate(
!!sym(svr_fxvr_name) :=
unlist(
pmap(df_bisec %>% select(ls_pmap_vars), fc_withroot)
)
) %>%
mutate(f_p_t_f_a = !!sym(svr_fxvr_name)*!!sym(svr_fa_lst))
# fl_p_dist2zr = sum(abs(p))
fl_p_dist2zr <- mean(abs(df_bisec %>% pull(!!sym(svr_fxvr_name))))
# Update a and b
df_bisec <- df_bisec %>%
mutate(!!sym(svr_a_cur) :=
case_when(f_p_t_f_a < 0 ~ !!sym(svr_a_lst),
TRUE ~ !!sym(svr_root_x))) %>%
mutate(!!sym(svr_b_cur) :=
case_when(f_p_t_f_a < 0 ~ !!sym(svr_root_x),
TRUE ~ !!sym(svr_b_lst)))
# Update f(a) and f(b)
df_bisec <- df_bisec %>%
mutate(!!sym(svr_fa_cur) :=
case_when(f_p_t_f_a < 0 ~ !!sym(svr_fa_lst),
TRUE ~ !!sym(svr_fxvr_name))) %>%
mutate(!!sym(svr_fb_cur) :=
case_when(f_p_t_f_a < 0 ~ !!sym(svr_fxvr_name),
TRUE ~ !!sym(svr_fb_lst)))
# Drop past record possibly
if(!bl_keep_iter) {
df_bisec <- df_bisec %>% select(-one_of(c(svr_a_lst, svr_b_lst, svr_fa_lst, svr_fb_lst)))
}
# Save from last
svr_a_lst <- svr_a_cur
svr_b_lst <- svr_b_cur
svr_fa_lst <- svr_fa_cur
svr_fb_lst <- svr_fb_cur
# Summar current round
message(paste0('it_cur:', it_cur, ', fl_p_dist2zr:', fl_p_dist2zr))
}
# return
return(df_bisec)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/freqlist.R
\name{freqlist}
\alias{freqlist}
\alias{freqlist.table}
\alias{freqlist.formula}
\title{freqlist}
\usage{
freqlist(object, ...)
\method{freqlist}{table}(object, na.options = c("include", "showexclude",
"remove"), strata = NULL, labelTranslations = NULL, control = NULL,
...)
\method{freqlist}{formula}(formula, data, subset, na.action,
strata = NULL, labelTranslations = NULL, control = NULL, addNA,
exclude, drop.unused.levels, ...)
}
\arguments{
\item{object}{An R object, usually of class \code{"table"} or class \code{"xtabs"}}
\item{...}{additional arguments. In the formula method, these are passed to the table method. These are also passed to
\code{\link{freq.control}}}
\item{na.options}{a character string indicating how to handling missing values: \code{"include"}
(include values with NAs in counts and percentages),
\code{"showexclude"} (show NAs but exclude from cumulative counts and all percentages),
\code{"remove"} (remove values with NAs); default is \code{"include"}.}
\item{strata}{(formerly \code{groupBy}) an optional character string specifying a variable(s) to use for grouping when calculating cumulative
counts and percentages. \code{\link{summary.freqlist}} will also separate by grouping variable for printing. Note that this is different
from \code{modelsum} and \code{tableby}, which take bare column names (and only one, at that!)}
\item{labelTranslations}{an optional character string (or list) of labels to use for variable levels when summarizing.
Names will be matched appropriately.}
\item{control}{control parameters to handle optional settings within \code{freqlist}. See \code{\link{freq.control}}}
\item{formula, data, subset, na.action, addNA, exclude, drop.unused.levels}{Arguments passed to \code{\link[stats]{xtabs}}. Note
that \code{addNA=} only works in R >= 3.4.0.}
}
\value{
An object of class \code{c("freqlist", "arsenal_table")}
}
\description{
Approximate the output from SAS's \code{PROC FREQ} procedure when using the \code{/list} option of the \code{TABLE} statement.
}
\examples{
# load mockstudy data
data(mockstudy)
tab.ex <- table(mockstudy[c("arm", "sex", "mdquality.s")], useNA = "ifany")
noby <- freqlist(tab.ex, na.options = "include")
summary(noby)
# show the top 6 rows' frequencies and percents
head(summary(sort(noby, decreasing = TRUE)[c(1:4, 6)]))
withby <- freqlist(tab.ex, strata = c("arm","sex"), na.options = "showexclude")
summary(withby)
}
\seealso{
\code{\link{arsenal_table}}, \code{\link{summary.freqlist}}, \code{\link{freq.control}}, \code{\link{freqlist.internal}},
\code{\link[base]{table}}, \code{\link[stats]{xtabs}}
}
\author{
Tina Gunderson, with revisions by Ethan Heinzen
}
|
/man/freqlist.Rd
|
no_license
|
bzkrouse/arsenal
|
R
| false
| true
| 2,772
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/freqlist.R
\name{freqlist}
\alias{freqlist}
\alias{freqlist.table}
\alias{freqlist.formula}
\title{freqlist}
\usage{
freqlist(object, ...)
\method{freqlist}{table}(object, na.options = c("include", "showexclude",
"remove"), strata = NULL, labelTranslations = NULL, control = NULL,
...)
\method{freqlist}{formula}(formula, data, subset, na.action,
strata = NULL, labelTranslations = NULL, control = NULL, addNA,
exclude, drop.unused.levels, ...)
}
\arguments{
\item{object}{An R object, usually of class \code{"table"} or class \code{"xtabs"}}
\item{...}{additional arguments. In the formula method, these are passed to the table method. These are also passed to
\code{\link{freq.control}}}
\item{na.options}{a character string indicating how to handling missing values: \code{"include"}
(include values with NAs in counts and percentages),
\code{"showexclude"} (show NAs but exclude from cumulative counts and all percentages),
\code{"remove"} (remove values with NAs); default is \code{"include"}.}
\item{strata}{(formerly \code{groupBy}) an optional character string specifying a variable(s) to use for grouping when calculating cumulative
counts and percentages. \code{\link{summary.freqlist}} will also separate by grouping variable for printing. Note that this is different
from \code{modelsum} and \code{tableby}, which take bare column names (and only one, at that!)}
\item{labelTranslations}{an optional character string (or list) of labels to use for variable levels when summarizing.
Names will be matched appropriately.}
\item{control}{control parameters to handle optional settings within \code{freqlist}. See \code{\link{freq.control}}}
\item{formula, data, subset, na.action, addNA, exclude, drop.unused.levels}{Arguments passed to \code{\link[stats]{xtabs}}. Note
that \code{addNA=} only works in R >= 3.4.0.}
}
\value{
An object of class \code{c("freqlist", "arsenal_table")}
}
\description{
Approximate the output from SAS's \code{PROC FREQ} procedure when using the \code{/list} option of the \code{TABLE} statement.
}
\examples{
# load mockstudy data
data(mockstudy)
tab.ex <- table(mockstudy[c("arm", "sex", "mdquality.s")], useNA = "ifany")
noby <- freqlist(tab.ex, na.options = "include")
summary(noby)
# show the top 6 rows' frequencies and percents
head(summary(sort(noby, decreasing = TRUE)[c(1:4, 6)]))
withby <- freqlist(tab.ex, strata = c("arm","sex"), na.options = "showexclude")
summary(withby)
}
\seealso{
\code{\link{arsenal_table}}, \code{\link{summary.freqlist}}, \code{\link{freq.control}}, \code{\link{freqlist.internal}},
\code{\link[base]{table}}, \code{\link[stats]{xtabs}}
}
\author{
Tina Gunderson, with revisions by Ethan Heinzen
}
|
# load packages
library(sf)
library(raster)
suppressMessages(library(dplyr))
library(mapview)
# set paths
data_dir <- "data/NDR/"
# utility functions
raster_sum <- function(x) sum(x[], na.rm = TRUE)
get_values_from_rasters <- function(sf_point, raster_list){
lapply(raster_list, function(x) raster::extract(x, sf_point))
}
|
/scripts/99_utils.R
|
no_license
|
jsta/invest_ndr
|
R
| false
| false
| 328
|
r
|
# load packages
library(sf)
library(raster)
suppressMessages(library(dplyr))
library(mapview)
# set paths
data_dir <- "data/NDR/"
# utility functions
raster_sum <- function(x) sum(x[], na.rm = TRUE)
get_values_from_rasters <- function(sf_point, raster_list){
lapply(raster_list, function(x) raster::extract(x, sf_point))
}
|
library(plyr)
library(ggplot2)
if(!exists("NEI")) NEI <- readRDS("exdata_data_NEI_data/summarySCC_PM25.rds")
if(!exists("SCC")) SCC <- readRDS("exdata_data_NEI_data/Source_Classification_Code.rds")
scCoal <- SCC[grepl("coal", tolower(SCC$EI.Sector)),]
data <- join(scCoal, NEI, by="SCC", type="inner")
aggData <- data.frame(tapply(data$Emissions,data$year,sum))
finalAggData <- data.frame(matrix(vector(), 4, 2, dimnames=list(c(), c("TotalEmissions", "Year"))), stringsAsFactors=F)
finalAggData$TotalEmissions <- as.numeric(aggData[,1])
finalAggData$Year <- rownames(aggData)
png("./export/plot4.png", height=768, width=768)
qplot(x=finalAggData$Year,y=finalAggData$TotalEmissions,data=finalAggData,stat="identity",geom="histogram",ylab = "Total Emissions", xlab = "Year", main = "Total PM²⁵ emissions in Baltimore City (1999 - 2008)")
# plot(finalAggData$Year,finalAggData$TotalEmissions, type = "l", ylab = "Total Emissions", xlab = "Year", main = "Total PM²⁵ emissions in Baltimore City (1999 - 2008)")
dev.off()
|
/plot4.R
|
permissive
|
Vongo/Exploratory2
|
R
| false
| false
| 1,026
|
r
|
library(plyr)
library(ggplot2)
if(!exists("NEI")) NEI <- readRDS("exdata_data_NEI_data/summarySCC_PM25.rds")
if(!exists("SCC")) SCC <- readRDS("exdata_data_NEI_data/Source_Classification_Code.rds")
scCoal <- SCC[grepl("coal", tolower(SCC$EI.Sector)),]
data <- join(scCoal, NEI, by="SCC", type="inner")
aggData <- data.frame(tapply(data$Emissions,data$year,sum))
finalAggData <- data.frame(matrix(vector(), 4, 2, dimnames=list(c(), c("TotalEmissions", "Year"))), stringsAsFactors=F)
finalAggData$TotalEmissions <- as.numeric(aggData[,1])
finalAggData$Year <- rownames(aggData)
png("./export/plot4.png", height=768, width=768)
qplot(x=finalAggData$Year,y=finalAggData$TotalEmissions,data=finalAggData,stat="identity",geom="histogram",ylab = "Total Emissions", xlab = "Year", main = "Total PM²⁵ emissions in Baltimore City (1999 - 2008)")
# plot(finalAggData$Year,finalAggData$TotalEmissions, type = "l", ylab = "Total Emissions", xlab = "Year", main = "Total PM²⁵ emissions in Baltimore City (1999 - 2008)")
dev.off()
|
#' Exponential distribution maximum likelihood estimation
#'
#' The maximum likelihood estimate of `rate` is the inverse sample mean.
#'
#' For the density function of the exponential distribution see
#' [Exponential][stats::Exponential].
#'
#' @param x a (non-empty) numeric vector of data values.
#' @param na.rm logical. Should missing values be removed? If `FALSE`,
#' the function fails when `x` contains missing values.
#' @param ... currently affects nothing.
#' @return `mlexp` returns an object of [class][base::class] `univariateML`.
#' This is a named numeric vector with maximum likelihood estimates for
#' `rate` and the following attributes:
#' \item{`model`}{The name of the model.}
#' \item{`density`}{The density associated with the estimates.}
#' \item{`logLik`}{The loglikelihood at the maximum.}
#' \item{`support`}{The support of the density.}
#' \item{`n`}{The number of observations.}
#' \item{`call`}{The call as captured my `match.call`}
#' @examples
#' mlexp(precip)
#' @seealso [Exponential][stats::Exponential] for the exponential density.
#' @references Johnson, N. L., Kotz, S. and Balakrishnan, N. (1995)
#' Continuous Univariate Distributions, Volume 1, Chapter 19. Wiley, New York.
#' @export
mlexp <- function(x, na.rm = FALSE, ...) {
if (na.rm) x <- x[!is.na(x)] else assertthat::assert_that(!anyNA(x))
ml_input_checker(x)
assertthat::assert_that(min(x) >= 0)
rate <- 1 / mean(x)
object <- c(rate = rate)
class(object) <- "univariateML"
attr(object, "model") <- "Exponential"
attr(object, "density") <- "stats::dexp"
attr(object, "logLik") <- length(x) * (log(rate) - 1)
attr(object, "support") <- c(0, Inf)
attr(object, "n") <- length(x)
attr(object, "call") <- match.call()
object
}
|
/R/mlexp.R
|
permissive
|
JonasMoss/univariateML
|
R
| false
| false
| 1,782
|
r
|
#' Exponential distribution maximum likelihood estimation
#'
#' The maximum likelihood estimate of `rate` is the inverse sample mean.
#'
#' For the density function of the exponential distribution see
#' [Exponential][stats::Exponential].
#'
#' @param x a (non-empty) numeric vector of data values.
#' @param na.rm logical. Should missing values be removed? If `FALSE`,
#' the function fails when `x` contains missing values.
#' @param ... currently affects nothing.
#' @return `mlexp` returns an object of [class][base::class] `univariateML`.
#' This is a named numeric vector with maximum likelihood estimates for
#' `rate` and the following attributes:
#' \item{`model`}{The name of the model.}
#' \item{`density`}{The density associated with the estimates.}
#' \item{`logLik`}{The loglikelihood at the maximum.}
#' \item{`support`}{The support of the density.}
#' \item{`n`}{The number of observations.}
#' \item{`call`}{The call as captured my `match.call`}
#' @examples
#' mlexp(precip)
#' @seealso [Exponential][stats::Exponential] for the exponential density.
#' @references Johnson, N. L., Kotz, S. and Balakrishnan, N. (1995)
#' Continuous Univariate Distributions, Volume 1, Chapter 19. Wiley, New York.
#' @export
mlexp <- function(x, na.rm = FALSE, ...) {
if (na.rm) x <- x[!is.na(x)] else assertthat::assert_that(!anyNA(x))
ml_input_checker(x)
assertthat::assert_that(min(x) >= 0)
rate <- 1 / mean(x)
object <- c(rate = rate)
class(object) <- "univariateML"
attr(object, "model") <- "Exponential"
attr(object, "density") <- "stats::dexp"
attr(object, "logLik") <- length(x) * (log(rate) - 1)
attr(object, "support") <- c(0, Inf)
attr(object, "n") <- length(x)
attr(object, "call") <- match.call()
object
}
|
library(ggplot2)
library(dplyr)
source('R/analysis_utils.R')
rna.meta <- readRDS('seurat/RNA_seq_metadata.rds')
atac.meta <- readRDS('ATAC_metadata_df.rds')
atac.meta$sample <- rna.meta$sample
comb.meta <- rbind(rna.meta, atac.meta)
comb.meta$Assay <- c(rep("RNA",18),rep("ATAC",18))
collapsed.comb.meta <- comb.meta %>%
group_by(individual, Assay) %>%
summarise(num_cells=sum(ncells), mean_saturation=mean(saturation, na.rm=T), sd_saturation=sd(saturation, na.rm=T))
p1 <- ggplot(collapsed.comb.meta, aes(x=individual,y=mean_saturation, fill=Assay)) +
geom_bar(position="dodge", stat="identity") +
ggClean(rotate_axis=TRUE) + xlab("Donor") + ylab("% Saturation") +
geom_errorbar(aes(ymin=mean_saturation-sd_saturation/2, ymax=mean_saturation+sd_saturation/2), width=.2,
position=position_dodge(.9))
p2 <- ggplot(collapsed.comb.meta, aes(x=individual,y=num_cells, fill=Assay)) +
geom_bar(position="dodge", stat="identity") +
ggClean(rotate_axis=TRUE) + xlab("Donor") + ylab("Cells Detected")
ggsave(filename = "individual_saturation_qc.png", p1, dpi=300, width=8, height=6)
ggsave(filename = "individual_cells_detected.png", p2, dpi=300, width=8, height=6)
individuals <- unique(comb.meta$individual)
for(ind in individuals){
curr_df <- comb.meta[comb.meta$individual==ind,]
p1 <- ggplot(curr_df, aes(x=regions,y=saturation, fill=Assay)) +
geom_bar(position="dodge", stat="identity") +
ggClean(rotate_axis=TRUE) + xlab("Region") + ylab("% Saturation") + ggtitle(ind) + ylim(c(0, 100))
p2 <- ggplot(curr_df, aes(x=regions,y=ncells, fill=Assay)) +
geom_bar(position="dodge", stat="identity") +
ggClean(rotate_axis=TRUE) + xlab("Region") + ylab("Cells Detected") + ggtitle(ind) + ylim(c(0,8000))
ggsave(filename = paste0("regions_",ind,"_saturation_qc.png"), p1, dpi=300, width=8, height=6)
ggsave(filename = paste0("regions_",ind,"_ncells_qc.png"), p2, dpi=300, width=8, height=6)
}
|
/R/plot_metadata.R
|
no_license
|
sq-96/heart_atlas
|
R
| false
| false
| 1,965
|
r
|
library(ggplot2)
library(dplyr)
source('R/analysis_utils.R')
rna.meta <- readRDS('seurat/RNA_seq_metadata.rds')
atac.meta <- readRDS('ATAC_metadata_df.rds')
atac.meta$sample <- rna.meta$sample
comb.meta <- rbind(rna.meta, atac.meta)
comb.meta$Assay <- c(rep("RNA",18),rep("ATAC",18))
collapsed.comb.meta <- comb.meta %>%
group_by(individual, Assay) %>%
summarise(num_cells=sum(ncells), mean_saturation=mean(saturation, na.rm=T), sd_saturation=sd(saturation, na.rm=T))
p1 <- ggplot(collapsed.comb.meta, aes(x=individual,y=mean_saturation, fill=Assay)) +
geom_bar(position="dodge", stat="identity") +
ggClean(rotate_axis=TRUE) + xlab("Donor") + ylab("% Saturation") +
geom_errorbar(aes(ymin=mean_saturation-sd_saturation/2, ymax=mean_saturation+sd_saturation/2), width=.2,
position=position_dodge(.9))
p2 <- ggplot(collapsed.comb.meta, aes(x=individual,y=num_cells, fill=Assay)) +
geom_bar(position="dodge", stat="identity") +
ggClean(rotate_axis=TRUE) + xlab("Donor") + ylab("Cells Detected")
ggsave(filename = "individual_saturation_qc.png", p1, dpi=300, width=8, height=6)
ggsave(filename = "individual_cells_detected.png", p2, dpi=300, width=8, height=6)
individuals <- unique(comb.meta$individual)
for(ind in individuals){
curr_df <- comb.meta[comb.meta$individual==ind,]
p1 <- ggplot(curr_df, aes(x=regions,y=saturation, fill=Assay)) +
geom_bar(position="dodge", stat="identity") +
ggClean(rotate_axis=TRUE) + xlab("Region") + ylab("% Saturation") + ggtitle(ind) + ylim(c(0, 100))
p2 <- ggplot(curr_df, aes(x=regions,y=ncells, fill=Assay)) +
geom_bar(position="dodge", stat="identity") +
ggClean(rotate_axis=TRUE) + xlab("Region") + ylab("Cells Detected") + ggtitle(ind) + ylim(c(0,8000))
ggsave(filename = paste0("regions_",ind,"_saturation_qc.png"), p1, dpi=300, width=8, height=6)
ggsave(filename = paste0("regions_",ind,"_ncells_qc.png"), p2, dpi=300, width=8, height=6)
}
|
setwd("~/dev/prime_project/R_code/data/")
# loading the variables names based on the NHANES code book
source("./real_data/NHANES/subset_data_based_on_hypothesis/variable_names/exposure_name_loading.R")
# loading some helper functions for update LC
source("./real_data/NHANES/subset_data_based_on_hypothesis/LC_update.R")
# loading some required packages
library(sas7bdat)
library(SASxport)
library(tidyverse)
library(data.table)
library(foreign)
##########################################################################################
# Are POPs associated with hemoglobin A1c in non-diabetics? ####
#######################################################################################
hemoglobin_data_sas <- read.sas7bdat("./real_data/NHANES/subset_data_based_on_hypothesis/hemoglobin/nhance_hemoglobin.sas7bdat")
hemoglobin_data <- hemoglobin_data_sas %>% data.table(.)
# select the PCB exposure and outcome
outcome_name <- c("LBXGH")
selected_col <- c(outcome_name, name_PCB, name_PCB_LC, name_other_variables)
hemoglobin_data_selected <- hemoglobin_data[,..selected_col]
# replace all the PCB related exposures' name to PCB
colnames(hemoglobin_data_selected)[-1] <- gsub(pattern = "^(LBX|LBD)",
replacement = "PCB",
x = colnames(hemoglobin_data_selected)[-1],
perl = TRUE)
name_PCB <- gsub(pattern = "^(LBX|LBD)",
replacement = "PCB",
x = name_PCB,
perl = TRUE)
name_PCB_LC <- gsub(pattern = "^(LBX|LBD)",
replacement = "PCB",
x = name_PCB_LC,
perl = TRUE)
name_PCB_LA <- gsub(pattern = "^(LBX|LBD)",
replacement = "PCB",
x = name_PCB_LA,
perl = TRUE)
name_PCB_all <- gsub(pattern = "^(LBX|LBD)",
replacement = "PCB",
x = name_PCB_all,
perl = TRUE)
# 0. remove all the NA response # 1623 rows removed
hemoglobin_data_tmp <- hemoglobin_data_selected[!(is.na(LBXGH)),]
(nrow(hemoglobin_data_selected) - nrow(hemoglobin_data_tmp)) %>% cat(., "row removed")
# 1. remove all empty PCB and PCBLC row, # 8351 rows removed
n1 <- nrow(hemoglobin_data_tmp)
selected_col <- c(name_PCB, name_PCB_LC)
index <- hemoglobin_data_tmp[,rowSums(is.na(.SD)) == length(selected_col), .SDcols = selected_col]
hemoglobin_data_tmp <- hemoglobin_data_tmp[!index,]
(n1 - nrow(hemoglobin_data_tmp)) %>% cat(., "row removed")
# 2. recover the limit of detection and generate the comprehensive table
name_PCB_with_LC <- gsub(pattern = "LC",
replacement = "",
x = name_PCB_LC,
perl = TRUE)
update_LC(data = hemoglobin_data_tmp, PCB_names = name_PCB_with_LC)
# 2.1 remove all the PCB_LC
hemoglobin_data_tmp[,(name_PCB_LC) := NULL]
# 3 check the special values for indicating missing value for the confonder variables
special_value_confonder <- list()
for(n in name_other_variables){
special_value_confonder <- append(special_value_confonder, list(hemoglobin_data_tmp[,table(eval(parse(text = n)), exclude = NULL)]))
}
names(special_value_confonder) <- name_other_variables
# capture.output(print(special_value_confonder),
# file = "./real_data/NHANES/hemoglobin/special_value_confonder.txt")
# 3.1 indicator variable for confonders
add_missing_indicator(data = hemoglobin_data_tmp, var = name_other_variables)
# 4 reorder the name
sorted_name <- sort(colnames(hemoglobin_data_tmp))
hemoglobin_data_tmp <- hemoglobin_data_tmp[, ..sorted_name]
# # 5 save output as csv
# write.csv(hemoglobin_data_tmp, file = "./real_data/NHANES/hemoglobin/nhance_hemoglobin_PCB_LC_comp.csv", row.names = FALSE)
#
# # 5 output as sas file
# write.foreign(df=hemoglobin_data_tmp, datafile="./real_data/NHANES/hemoglobin/SAS_nhance_hemoglobin_PCB_LC_comp.csv", codefile="./real_data/NHANES/hemoglobin/SAS_nhance_hemoglobin_PCB_LC_comp.sas", package="SAS")
# raw data with only response and PCB
# nhance_hemoglobin_PCB.csv
# outcome_name <- c("LBXGH")
# selected_col <- c(outcome_name, name_PCB)
# hemoglobin_data <- hemoglobin_data[,.SD, .SDcols = selected_col]
# hemoglobin_data_tmp <- hemoglobin_data[rowSums(is.na(hemoglobin_data)) < ncol(hemoglobin_data),]
##########################################################################################
# Are POPs associated with diabetes prevalence A1c in non-diabetics? ####
#######################################################################################
diabetes_data_sas <- read.sas7bdat("./real_data/NHANES/subset_data_based_on_hypothesis/diabetes/nhance_diabetes.sas7bdat")
diabetes_data <- diabetes_data_sas %>% data.table(.)
# select the PCB exposure and outcome
outcome_name <- c("diabetes")
selected_col <- c(outcome_name, name_PCB, name_PCB_LC, name_other_variables)
diabetes_data_selected <- diabetes_data[,..selected_col]
# replace all the PCB related exposures' name to PCB
colnames(diabetes_data_selected) <- gsub(pattern = "^(LBX|LBD)",
replacement = "PCB",
x = colnames(diabetes_data_selected),
perl = TRUE)
name_PCB <- gsub(pattern = "^(LBX|LBD)",
replacement = "PCB",
x = name_PCB,
perl = TRUE)
name_PCB_LC <- gsub(pattern = "^(LBX|LBD)",
replacement = "PCB",
x = name_PCB_LC,
perl = TRUE)
name_PCB_LA <- gsub(pattern = "^(LBX|LBD)",
replacement = "PCB",
x = name_PCB_LA,
perl = TRUE)
name_PCB_all <- gsub(pattern = "^(LBX|LBD)",
replacement = "PCB",
x = name_PCB_all,
perl = TRUE)
# 0. remove all the NA response # 2 rows removed
diabetes_data_tmp <- diabetes_data_selected[!(is.na(diabetes)),]
(nrow(diabetes_data_selected) - nrow(diabetes_data_tmp)) %>% cat(., "row removed")
# 1. remove all empty PCB and PCBLC row, # 11163 rows removed
n1 <- nrow(diabetes_data_tmp)
selected_col <- c(name_PCB, name_PCB_LC)
index <- diabetes_data_tmp[,rowSums(is.na(.SD)) == length(selected_col), .SDcols = selected_col]
diabetes_data_tmp <- diabetes_data_tmp[!index,]
(n1 - nrow(diabetes_data_tmp)) %>% cat(., "row removed")
# 2. recover the limit of detection and generate the comprehensive table
name_PCB_with_LC <- gsub(pattern = "LC",
replacement = "",
x = name_PCB_LC,
perl = TRUE)
update_LC(data = diabetes_data_tmp, PCB_names = name_PCB_with_LC)
# 2.1 remove all the PCB_LC
diabetes_data_tmp[,(name_PCB_LC) := NULL]
# 3 check the special values for indicating missing value for the confonder variables
special_value_confonder <- list()
for(n in name_other_variables){
special_value_confonder <- append(special_value_confonder, list(diabetes_data_tmp[,table(eval(parse(text = n)), exclude = NULL)]))
}
names(special_value_confonder) <- name_other_variables
# capture.output(print(special_value_confonder),
# file = "./real_data/NHANES/diabetes/special_value_confonder.txt")
# 3.1 indicator variable for confonders
add_missing_indicator(data = diabetes_data_tmp, var = name_other_variables)
# 4 reorder the name
sorted_name <- sort(colnames(diabetes_data_tmp))
diabetes_data_tmp <- diabetes_data_tmp[, ..sorted_name]
# # 5 save output as csv
# write.csv(diabetes_data_tmp, file = "./real_data/NHANES/diabetes/nhance_diabetes_PCB_LC_comp.csv", row.names = FALSE)
#
# # 5 output as sas file
# write.foreign(df=diabetes_data_tmp, datafile="./real_data/NHANES/diabetes/SAS_nhance_diabetes_PCB_LC_comp.csv", codefile="./real_data/NHANES/diabetes/SAS_nhance_diabetes_PCB_LC_comp.sas", package="SAS")
# raw data with only response and PCB
# nhance_diabetes_PCB.csv
# outcome_name <- c("LBXGH")
# selected_col <- c(outcome_name, name_PCB)
# diabetes_data <- diabetes_data[,.SD, .SDcols = selected_col]
# diabetes_data_tmp <- diabetes_data[rowSums(is.na(diabetes_data)) < ncol(diabetes_data),]
write.csv(diabetes_data_selected, file = "./real_data/NHANES/subset_data_based_on_hypothesis/diabetes/nhance_diabetes_PCB.csv", row.names = FALSE)
|
/R_code/data/real_data/NHANES/subset_data_based_on_hypothesis/missing_PCB_and_LC.R
|
no_license
|
wal615/prime_project
|
R
| false
| false
| 8,421
|
r
|
setwd("~/dev/prime_project/R_code/data/")
# loading the variables names based on the NHANES code book
source("./real_data/NHANES/subset_data_based_on_hypothesis/variable_names/exposure_name_loading.R")
# loading some helper functions for update LC
source("./real_data/NHANES/subset_data_based_on_hypothesis/LC_update.R")
# loading some required packages
library(sas7bdat)
library(SASxport)
library(tidyverse)
library(data.table)
library(foreign)
##########################################################################################
# Are POPs associated with hemoglobin A1c in non-diabetics? ####
#######################################################################################
hemoglobin_data_sas <- read.sas7bdat("./real_data/NHANES/subset_data_based_on_hypothesis/hemoglobin/nhance_hemoglobin.sas7bdat")
hemoglobin_data <- hemoglobin_data_sas %>% data.table(.)
# select the PCB exposure and outcome
outcome_name <- c("LBXGH")
selected_col <- c(outcome_name, name_PCB, name_PCB_LC, name_other_variables)
hemoglobin_data_selected <- hemoglobin_data[,..selected_col]
# replace all the PCB related exposures' name to PCB
colnames(hemoglobin_data_selected)[-1] <- gsub(pattern = "^(LBX|LBD)",
replacement = "PCB",
x = colnames(hemoglobin_data_selected)[-1],
perl = TRUE)
name_PCB <- gsub(pattern = "^(LBX|LBD)",
replacement = "PCB",
x = name_PCB,
perl = TRUE)
name_PCB_LC <- gsub(pattern = "^(LBX|LBD)",
replacement = "PCB",
x = name_PCB_LC,
perl = TRUE)
name_PCB_LA <- gsub(pattern = "^(LBX|LBD)",
replacement = "PCB",
x = name_PCB_LA,
perl = TRUE)
name_PCB_all <- gsub(pattern = "^(LBX|LBD)",
replacement = "PCB",
x = name_PCB_all,
perl = TRUE)
# 0. remove all the NA response # 1623 rows removed
hemoglobin_data_tmp <- hemoglobin_data_selected[!(is.na(LBXGH)),]
(nrow(hemoglobin_data_selected) - nrow(hemoglobin_data_tmp)) %>% cat(., "row removed")
# 1. remove all empty PCB and PCBLC row, # 8351 rows removed
n1 <- nrow(hemoglobin_data_tmp)
selected_col <- c(name_PCB, name_PCB_LC)
index <- hemoglobin_data_tmp[,rowSums(is.na(.SD)) == length(selected_col), .SDcols = selected_col]
hemoglobin_data_tmp <- hemoglobin_data_tmp[!index,]
(n1 - nrow(hemoglobin_data_tmp)) %>% cat(., "row removed")
# 2. recover the limit of detection and generate the comprehensive table
name_PCB_with_LC <- gsub(pattern = "LC",
replacement = "",
x = name_PCB_LC,
perl = TRUE)
update_LC(data = hemoglobin_data_tmp, PCB_names = name_PCB_with_LC)
# 2.1 remove all the PCB_LC
hemoglobin_data_tmp[,(name_PCB_LC) := NULL]
# 3 check the special values for indicating missing value for the confonder variables
special_value_confonder <- list()
for(n in name_other_variables){
special_value_confonder <- append(special_value_confonder, list(hemoglobin_data_tmp[,table(eval(parse(text = n)), exclude = NULL)]))
}
names(special_value_confonder) <- name_other_variables
# capture.output(print(special_value_confonder),
# file = "./real_data/NHANES/hemoglobin/special_value_confonder.txt")
# 3.1 indicator variable for confonders
add_missing_indicator(data = hemoglobin_data_tmp, var = name_other_variables)
# 4 reorder the name
sorted_name <- sort(colnames(hemoglobin_data_tmp))
hemoglobin_data_tmp <- hemoglobin_data_tmp[, ..sorted_name]
# # 5 save output as csv
# write.csv(hemoglobin_data_tmp, file = "./real_data/NHANES/hemoglobin/nhance_hemoglobin_PCB_LC_comp.csv", row.names = FALSE)
#
# # 5 output as sas file
# write.foreign(df=hemoglobin_data_tmp, datafile="./real_data/NHANES/hemoglobin/SAS_nhance_hemoglobin_PCB_LC_comp.csv", codefile="./real_data/NHANES/hemoglobin/SAS_nhance_hemoglobin_PCB_LC_comp.sas", package="SAS")
# raw data with only response and PCB
# nhance_hemoglobin_PCB.csv
# outcome_name <- c("LBXGH")
# selected_col <- c(outcome_name, name_PCB)
# hemoglobin_data <- hemoglobin_data[,.SD, .SDcols = selected_col]
# hemoglobin_data_tmp <- hemoglobin_data[rowSums(is.na(hemoglobin_data)) < ncol(hemoglobin_data),]
##########################################################################################
# Are POPs associated with diabetes prevalence A1c in non-diabetics? ####
#######################################################################################
diabetes_data_sas <- read.sas7bdat("./real_data/NHANES/subset_data_based_on_hypothesis/diabetes/nhance_diabetes.sas7bdat")
diabetes_data <- diabetes_data_sas %>% data.table(.)
# select the PCB exposure and outcome
outcome_name <- c("diabetes")
selected_col <- c(outcome_name, name_PCB, name_PCB_LC, name_other_variables)
diabetes_data_selected <- diabetes_data[,..selected_col]
# replace all the PCB related exposures' name to PCB
colnames(diabetes_data_selected) <- gsub(pattern = "^(LBX|LBD)",
replacement = "PCB",
x = colnames(diabetes_data_selected),
perl = TRUE)
name_PCB <- gsub(pattern = "^(LBX|LBD)",
replacement = "PCB",
x = name_PCB,
perl = TRUE)
name_PCB_LC <- gsub(pattern = "^(LBX|LBD)",
replacement = "PCB",
x = name_PCB_LC,
perl = TRUE)
name_PCB_LA <- gsub(pattern = "^(LBX|LBD)",
replacement = "PCB",
x = name_PCB_LA,
perl = TRUE)
name_PCB_all <- gsub(pattern = "^(LBX|LBD)",
replacement = "PCB",
x = name_PCB_all,
perl = TRUE)
# 0. remove all the NA response # 2 rows removed
diabetes_data_tmp <- diabetes_data_selected[!(is.na(diabetes)),]
(nrow(diabetes_data_selected) - nrow(diabetes_data_tmp)) %>% cat(., "row removed")
# 1. remove all empty PCB and PCBLC row, # 11163 rows removed
n1 <- nrow(diabetes_data_tmp)
selected_col <- c(name_PCB, name_PCB_LC)
index <- diabetes_data_tmp[,rowSums(is.na(.SD)) == length(selected_col), .SDcols = selected_col]
diabetes_data_tmp <- diabetes_data_tmp[!index,]
(n1 - nrow(diabetes_data_tmp)) %>% cat(., "row removed")
# 2. recover the limit of detection and generate the comprehensive table
name_PCB_with_LC <- gsub(pattern = "LC",
replacement = "",
x = name_PCB_LC,
perl = TRUE)
update_LC(data = diabetes_data_tmp, PCB_names = name_PCB_with_LC)
# 2.1 remove all the PCB_LC
diabetes_data_tmp[,(name_PCB_LC) := NULL]
# 3 check the special values for indicating missing value for the confonder variables
special_value_confonder <- list()
for(n in name_other_variables){
special_value_confonder <- append(special_value_confonder, list(diabetes_data_tmp[,table(eval(parse(text = n)), exclude = NULL)]))
}
names(special_value_confonder) <- name_other_variables
# capture.output(print(special_value_confonder),
# file = "./real_data/NHANES/diabetes/special_value_confonder.txt")
# 3.1 indicator variable for confonders
add_missing_indicator(data = diabetes_data_tmp, var = name_other_variables)
# 4 reorder the name
sorted_name <- sort(colnames(diabetes_data_tmp))
diabetes_data_tmp <- diabetes_data_tmp[, ..sorted_name]
# # 5 save output as csv
# write.csv(diabetes_data_tmp, file = "./real_data/NHANES/diabetes/nhance_diabetes_PCB_LC_comp.csv", row.names = FALSE)
#
# # 5 output as sas file
# write.foreign(df=diabetes_data_tmp, datafile="./real_data/NHANES/diabetes/SAS_nhance_diabetes_PCB_LC_comp.csv", codefile="./real_data/NHANES/diabetes/SAS_nhance_diabetes_PCB_LC_comp.sas", package="SAS")
# raw data with only response and PCB
# nhance_diabetes_PCB.csv
# outcome_name <- c("LBXGH")
# selected_col <- c(outcome_name, name_PCB)
# diabetes_data <- diabetes_data[,.SD, .SDcols = selected_col]
# diabetes_data_tmp <- diabetes_data[rowSums(is.na(diabetes_data)) < ncol(diabetes_data),]
write.csv(diabetes_data_selected, file = "./real_data/NHANES/subset_data_based_on_hypothesis/diabetes/nhance_diabetes_PCB.csv", row.names = FALSE)
|
## ui.R
library(shiny)
library(rCharts)
shinyUI(fluidPage( conditionalPanel(
titlePanel("Przykład rCharts")),
sidebarLayout(
sidebarPanel(
selectInput("rodzaj", label = "Wybierz rodzaj wykresu:",
choices = list("slupkowy", "liniowy"),
selected = "liniowy"),
selectInput("rok", label = "Wybierz rok:",
choices = as.character(1958:2009),
selected = "2009")
),
mainPanel(
h3(p("G3 - Jak długość życia [jej rozkład] różni się pomiędzy płciami?")),
h3(p(" ")),
showOutput("dekompozycja", lib="NVD3"), # biblioteka javascriptowa
plotOutput("smoothPlot", width=1200, height=800)
)
))
)
|
/magisterskie/5_rok/1_semestr/TWDwR/laboratoria/RCharts/G3/ui.R
|
no_license
|
sommermarta/studia
|
R
| false
| false
| 733
|
r
|
## ui.R
library(shiny)
library(rCharts)
shinyUI(fluidPage( conditionalPanel(
titlePanel("Przykład rCharts")),
sidebarLayout(
sidebarPanel(
selectInput("rodzaj", label = "Wybierz rodzaj wykresu:",
choices = list("slupkowy", "liniowy"),
selected = "liniowy"),
selectInput("rok", label = "Wybierz rok:",
choices = as.character(1958:2009),
selected = "2009")
),
mainPanel(
h3(p("G3 - Jak długość życia [jej rozkład] różni się pomiędzy płciami?")),
h3(p(" ")),
showOutput("dekompozycja", lib="NVD3"), # biblioteka javascriptowa
plotOutput("smoothPlot", width=1200, height=800)
)
))
)
|
options(scipen=100)
setDir=""
# Rscript plot_signif_TADs_overallTop.R
# Rscript plot_signif_TADs_overallTop.R wt_vs_mut
# Rscript plot_signif_TADs_overallTop.R norm_vs_tumor
# Rscript plot_signif_TADs_overallTop.R subtypes
script_name <- "plot_signif_TADs_overallTop.R"
startTime <- Sys.time()
cat("> START ", script_name, "\n")
SSHFS <- FALSE
buildData <- TRUE
require(foreach)
require(doMC)
registerDoMC(ifelse(SSHFS, 2, 40))
require(ggplot2)
require(reshape2)
source("../Cancer_HiC_data_TAD_DA/utils_fct.R")
# source("../Yuanlong_Cancer_HiC_data_TAD_DA/subtype_cols.R")
source("/mnt/ed4/marie/scripts/TAD_DE_pipeline_v2/TAD_DE_utils.R")
source("../2_Yuanlong_Cancer_HiC_data_TAD_DA/utils_fct.R")
source("../Cancer_HiC_data_TAD_DA/utils_fct.R")
source("plot_lolliTAD_funct.R")
source("../Cancer_HiC_data_TAD_DA/colors_utils.R")
source("subtype_cols.R")
plotType <- "png"
myHeight <- ifelse(plotType=="png", 400, 7)
myWidth <- myHeight
plotCex <- 1.4
nToPlot <- 10
nPlotted <- nToPlot
pipOutFolder <- file.path("PIPELINE", "OUTPUT_FOLDER")
entrez2symb_dt <- read.delim(file.path(setDir,
"/mnt/ed4/marie/entrez2synonym/entrez/ENTREZ_POS/gff_entrez_position_GRCh37p13_nodup.txt"),
header=T, stringsAsFactors = FALSE)
entrez2symb_dt$entrezID <- as.character(entrez2symb_dt$entrezID)
pipFolder <- file.path(".")
stopifnot(dir.exists(pipFolder))
pipOutFolder <- file.path(pipFolder, "PIPELINE", "OUTPUT_FOLDER")
stopifnot(dir.exists(pipOutFolder))
outFolder <- "PLOT_SIGNIF_TADS_OVERALLTOP"
dir.create(outFolder, recursive=TRUE)
args <- commandArgs(trailingOnly = TRUE)
toplot <- args[1]
final_DT <- get(load("CREATE_FINAL_TABLE/all_result_dt.Rdata"))
final_DT <- final_DT[order(final_DT$adjPvalComb),]
fileprefix <- ""
if(length(args) == 1) {
final_DT$cmpType <- all_cmps[final_DT$exprds]
stopifnot(!is.na(final_DT$cmpType))
stopifnot(toplot %in% final_DT$cmpType)
final_DT <- final_DT[final_DT$cmpType %in% toplot,]
fileprefix <- paste0(toplot, "_")
}
### BUILD SIGNIF ALONG FDR THRESH
cat("... start retrieving FDR signif. TADs\n")
plotList <- list()
for(i_tad in 1:nToPlot) {
plotList[[i_tad]] <- plot_lolliTAD_ds(exprds = final_DT$exprds[i_tad],
hicds = final_DT$hicds[i_tad],
all_TADs = final_DT$region[i_tad],
mytitle = paste0(final_DT$exprds[i_tad], "-",final_DT$hicds[i_tad],"-",final_DT$region[i_tad]," (", formatC(final_DT$adjPvalComb[i_tad], format = "e", digits = 2), ")"),
orderByLolli = "startPos")
} # end-for iterating over TADs to plot
outFile <- file.path(outFolder, paste0(fileprefix, "_overallTop_adjCombPvalSignifTADs", "_nToPlot", nToPlot, ".", plotType ))
mytit <- paste0("overall Top Adj Pval Comb")
all_plots <- do.call(grid.arrange, c(plotList, list(ncol=ifelse(nToPlot == 1, 1, 2), top=textGrob(mytit, gp=gpar(fontsize=20,font=2)))))
outHeightGG <- min(c(7 * nPlotted/2, 49))
outWidthGG <- ifelse(nPlotted == 1, 20/2, 20)
ggsave(filename = outFile, all_plots, width=outWidthGG, height = outHeightGG)
cat("... written: ", outFile, "\n")
##############################
cat("***** DONE: ", script_name, "\n")
cat(paste0(startTime, "\n", Sys.time(), "\n"))
|
/plot_signif_TADs_overallTop.R
|
no_license
|
marzuf/v2_Yuanlong_Cancer_HiC_data_TAD_DA
|
R
| false
| false
| 3,362
|
r
|
options(scipen=100)
setDir=""
# Rscript plot_signif_TADs_overallTop.R
# Rscript plot_signif_TADs_overallTop.R wt_vs_mut
# Rscript plot_signif_TADs_overallTop.R norm_vs_tumor
# Rscript plot_signif_TADs_overallTop.R subtypes
script_name <- "plot_signif_TADs_overallTop.R"
startTime <- Sys.time()
cat("> START ", script_name, "\n")
SSHFS <- FALSE
buildData <- TRUE
require(foreach)
require(doMC)
registerDoMC(ifelse(SSHFS, 2, 40))
require(ggplot2)
require(reshape2)
source("../Cancer_HiC_data_TAD_DA/utils_fct.R")
# source("../Yuanlong_Cancer_HiC_data_TAD_DA/subtype_cols.R")
source("/mnt/ed4/marie/scripts/TAD_DE_pipeline_v2/TAD_DE_utils.R")
source("../2_Yuanlong_Cancer_HiC_data_TAD_DA/utils_fct.R")
source("../Cancer_HiC_data_TAD_DA/utils_fct.R")
source("plot_lolliTAD_funct.R")
source("../Cancer_HiC_data_TAD_DA/colors_utils.R")
source("subtype_cols.R")
plotType <- "png"
myHeight <- ifelse(plotType=="png", 400, 7)
myWidth <- myHeight
plotCex <- 1.4
nToPlot <- 10
nPlotted <- nToPlot
pipOutFolder <- file.path("PIPELINE", "OUTPUT_FOLDER")
entrez2symb_dt <- read.delim(file.path(setDir,
"/mnt/ed4/marie/entrez2synonym/entrez/ENTREZ_POS/gff_entrez_position_GRCh37p13_nodup.txt"),
header=T, stringsAsFactors = FALSE)
entrez2symb_dt$entrezID <- as.character(entrez2symb_dt$entrezID)
pipFolder <- file.path(".")
stopifnot(dir.exists(pipFolder))
pipOutFolder <- file.path(pipFolder, "PIPELINE", "OUTPUT_FOLDER")
stopifnot(dir.exists(pipOutFolder))
outFolder <- "PLOT_SIGNIF_TADS_OVERALLTOP"
dir.create(outFolder, recursive=TRUE)
args <- commandArgs(trailingOnly = TRUE)
toplot <- args[1]
final_DT <- get(load("CREATE_FINAL_TABLE/all_result_dt.Rdata"))
final_DT <- final_DT[order(final_DT$adjPvalComb),]
fileprefix <- ""
if(length(args) == 1) {
final_DT$cmpType <- all_cmps[final_DT$exprds]
stopifnot(!is.na(final_DT$cmpType))
stopifnot(toplot %in% final_DT$cmpType)
final_DT <- final_DT[final_DT$cmpType %in% toplot,]
fileprefix <- paste0(toplot, "_")
}
### BUILD SIGNIF ALONG FDR THRESH
cat("... start retrieving FDR signif. TADs\n")
plotList <- list()
for(i_tad in 1:nToPlot) {
plotList[[i_tad]] <- plot_lolliTAD_ds(exprds = final_DT$exprds[i_tad],
hicds = final_DT$hicds[i_tad],
all_TADs = final_DT$region[i_tad],
mytitle = paste0(final_DT$exprds[i_tad], "-",final_DT$hicds[i_tad],"-",final_DT$region[i_tad]," (", formatC(final_DT$adjPvalComb[i_tad], format = "e", digits = 2), ")"),
orderByLolli = "startPos")
} # end-for iterating over TADs to plot
outFile <- file.path(outFolder, paste0(fileprefix, "_overallTop_adjCombPvalSignifTADs", "_nToPlot", nToPlot, ".", plotType ))
mytit <- paste0("overall Top Adj Pval Comb")
all_plots <- do.call(grid.arrange, c(plotList, list(ncol=ifelse(nToPlot == 1, 1, 2), top=textGrob(mytit, gp=gpar(fontsize=20,font=2)))))
outHeightGG <- min(c(7 * nPlotted/2, 49))
outWidthGG <- ifelse(nPlotted == 1, 20/2, 20)
ggsave(filename = outFile, all_plots, width=outWidthGG, height = outHeightGG)
cat("... written: ", outFile, "\n")
##############################
cat("***** DONE: ", script_name, "\n")
cat(paste0(startTime, "\n", Sys.time(), "\n"))
|
### R code from vignette source 'cias.Rnw'
###################################################
### code chunk number 1: set_seed_chunk
###################################################
set.seed(0)
###################################################
### code chunk number 2: time_saver
###################################################
calc_from_scratch <- TRUE
###################################################
### code chunk number 3: cias.Rnw:82-85
###################################################
ignore <- require(multivator,quietly=TRUE)
ignore <- require(abind,quietly=TRUE)
ignore <- require(emulator,quietly=TRUE)
###################################################
### code chunk number 4: setupcias
###################################################
jj <- latin.hypercube(21,6)
colnames(jj) <- c("cias1","cias2","A_p1","B_p1","B_p2","C_p1")
rownames(jj) <- c(
paste("module_A_run",1:7,sep=""),
paste("module_B_run",1:7,sep=""),
paste("module_C_run",1:7,sep="")
)
jj[1:7 , 4:6 ] <- 0
jj[8:14 ,c(3,6)] <- 0
jj[15:21, 3:5 ] <- 0
real_cias_mdm <- mdm(jj, factor(rep(LETTERS[1:3],each=7)))
###################################################
### code chunk number 5: makedisplayableciasmdm
###################################################
# have to create a version that looks good. The
# difference is that '0.000' displays as '0'.
jj <- xold(real_cias_mdm)
jj <- round(jj,3)
storage.mode(jj) <- 'character'
jj[nchar(jj)==3] <- paste(jj[nchar(jj)==3] ,'0',sep='')
jj[nchar(jj)==4] <- paste(jj[nchar(jj)==4] ,'0',sep='')
cias_mdm <- noquote(jj)
###################################################
### code chunk number 6: showcias
###################################################
cias_mdm
###################################################
### code chunk number 7: definemhp
###################################################
jjM <- matrix(1,3,3)
diag(jjM) <- 2
jjB <- matrix(0,6,6)
diag(jjB) <- 1
jjB <- abind(jjB,jjB,jjB,along=3)
cias_mhp <- mhp(M=jjM, B = jjB, levels=levels(real_cias_mdm),names=names(real_cias_mdm))
###################################################
### code chunk number 8: cheat
###################################################
cias_mdm <- real_cias_mdm
###################################################
### code chunk number 9: showsummary
###################################################
summary(cias_mhp)
###################################################
### code chunk number 10: cias.Rnw:187-194
###################################################
cias_LoF <- list(
A = function(x){ c(const=1,x[1:2],x[3 ]) },
B = function(x){ c(const=1,x[1:2],x[4:5]) },
C = function(x){ c(const=1,x[1:2],x[6 ]) }
)
cias_beta <- 1:13
###################################################
### code chunk number 11: showlof
###################################################
cias_LoF
###################################################
### code chunk number 12: dosomestuff
###################################################
cias_obs <- obs_maker(cias_mdm, cias_mhp, cias_LoF, cias_beta)
cias_expt <- experiment(cias_mdm , cias_obs)
###################################################
### code chunk number 13: definineunk
###################################################
jj <- cias_mdm[1:3,]
types(jj) <- levels(jj)
xold(jj)[,3] <- 0
xold(jj)[,1:2] <- 0.5
rownames(jj) <- paste("m",LETTERS[1:3],sep=".")
cias_unknown <- jj
###################################################
### code chunk number 14: showunk
###################################################
cias_unknown
###################################################
### code chunk number 15: usemultem
###################################################
multem(cias_unknown, cias_expt, cias_mhp, cias_LoF, give=TRUE)
|
/inst/doc/cias.R
|
no_license
|
cran/multivator
|
R
| false
| false
| 3,985
|
r
|
### R code from vignette source 'cias.Rnw'
###################################################
### code chunk number 1: set_seed_chunk
###################################################
set.seed(0)
###################################################
### code chunk number 2: time_saver
###################################################
calc_from_scratch <- TRUE
###################################################
### code chunk number 3: cias.Rnw:82-85
###################################################
ignore <- require(multivator,quietly=TRUE)
ignore <- require(abind,quietly=TRUE)
ignore <- require(emulator,quietly=TRUE)
###################################################
### code chunk number 4: setupcias
###################################################
jj <- latin.hypercube(21,6)
colnames(jj) <- c("cias1","cias2","A_p1","B_p1","B_p2","C_p1")
rownames(jj) <- c(
paste("module_A_run",1:7,sep=""),
paste("module_B_run",1:7,sep=""),
paste("module_C_run",1:7,sep="")
)
jj[1:7 , 4:6 ] <- 0
jj[8:14 ,c(3,6)] <- 0
jj[15:21, 3:5 ] <- 0
real_cias_mdm <- mdm(jj, factor(rep(LETTERS[1:3],each=7)))
###################################################
### code chunk number 5: makedisplayableciasmdm
###################################################
# have to create a version that looks good. The
# difference is that '0.000' displays as '0'.
jj <- xold(real_cias_mdm)
jj <- round(jj,3)
storage.mode(jj) <- 'character'
jj[nchar(jj)==3] <- paste(jj[nchar(jj)==3] ,'0',sep='')
jj[nchar(jj)==4] <- paste(jj[nchar(jj)==4] ,'0',sep='')
cias_mdm <- noquote(jj)
###################################################
### code chunk number 6: showcias
###################################################
cias_mdm
###################################################
### code chunk number 7: definemhp
###################################################
jjM <- matrix(1,3,3)
diag(jjM) <- 2
jjB <- matrix(0,6,6)
diag(jjB) <- 1
jjB <- abind(jjB,jjB,jjB,along=3)
cias_mhp <- mhp(M=jjM, B = jjB, levels=levels(real_cias_mdm),names=names(real_cias_mdm))
###################################################
### code chunk number 8: cheat
###################################################
cias_mdm <- real_cias_mdm
###################################################
### code chunk number 9: showsummary
###################################################
summary(cias_mhp)
###################################################
### code chunk number 10: cias.Rnw:187-194
###################################################
cias_LoF <- list(
A = function(x){ c(const=1,x[1:2],x[3 ]) },
B = function(x){ c(const=1,x[1:2],x[4:5]) },
C = function(x){ c(const=1,x[1:2],x[6 ]) }
)
cias_beta <- 1:13
###################################################
### code chunk number 11: showlof
###################################################
cias_LoF
###################################################
### code chunk number 12: dosomestuff
###################################################
cias_obs <- obs_maker(cias_mdm, cias_mhp, cias_LoF, cias_beta)
cias_expt <- experiment(cias_mdm , cias_obs)
###################################################
### code chunk number 13: definineunk
###################################################
jj <- cias_mdm[1:3,]
types(jj) <- levels(jj)
xold(jj)[,3] <- 0
xold(jj)[,1:2] <- 0.5
rownames(jj) <- paste("m",LETTERS[1:3],sep=".")
cias_unknown <- jj
###################################################
### code chunk number 14: showunk
###################################################
cias_unknown
###################################################
### code chunk number 15: usemultem
###################################################
multem(cias_unknown, cias_expt, cias_mhp, cias_LoF, give=TRUE)
|
rm(list=ls())
##Installing and loading libraries (RUN THIS PART LINE BY LINE!!)####
# Install and load package devtools
if (!requireNamespace("devtools"))
install.packages("devtools")
library("devtools") #Use of version 2.0.2
# Install and load package Bikecourse
install_github('NDFabri/Bike_sharing_course/packages/Bikecourse@main')
library("Bikecourse") #Use of version 0.1.0
# Install and load package tidyverse
if (!requireNamespace("tidyverse"))
install.packages("tidyverse")
library("tidyverse") #Use of version 1.2.1
# Install and load package plyr
if (!requireNamespace("plyr"))
install.packages("plyr")
library("plyr") #Use of version 1.8.4
##Loading datafile####
Data <- read_csv("data/train.csv")
##Check if there is an association between count and season####
#Making season as a factor and renaming values in season
Data$season <- as.character(Data$season)
Data$season <- revalue(Data$season, c("1"="1:January-March", "2"="2:April-June", "3"="3:July-September", "4"="4:October-December"))
#See if there is an association between count and season
CS <- bikecourse_function(Data$count, Data$season, data=Data,
threshold = 0.05,
xlab = "season", ylab = "count",
yax = 900, size = 7)
CS #Make a boxplot of the association
#Saving the boxplot as a jpeg
jpeg(file="plots/count_vs_season.jpeg")
CS
dev.off()
##Check if there is an association between count and holiday####
#Making holiday as a factor and renaming values in holiday
Data$holiday <- as.character(Data$holiday)
Data$holiday <- revalue(Data$holiday, c("1"="Yes", "0"="No"))
#See if there is an association between count and holiday
CH <- bikecourse_function(Data$count, Data$holiday, data=Data,
threshold = 0.05,
xlab = "holiday", ylab = "count",
yax = 900, size = 7)
##Check if there is an association between count and workingday####
#Making workingday as a factor and renaming values in workingday
Data$workingday <- as.character(Data$workingday)
Data$workingday <- revalue(Data$workingday, c("1"="Yes", "0"="No"))
#See if there is an association between count and workingday
CW <- bikecourse_function(Data$count, Data$workingday, data=Data,
threshold = 0.05,
xlab = "workingday", ylab = "count",
yax = 900, size = 7)
##Check if there is an association between count and weather####
#Making weather as a factor
Data$weather <- as.character(Data$weather)
#See if there is an association between count and weather
CWe <- bikecourse_function(Data$count, Data$weather, data=Data,
threshold = 0.05,
xlab = "weathertype", ylab = "count",
yax = 900, size = 7)
CWe #Make a boxplot of the association
#Saving the boxplot as a jpeg
jpeg(file="plots/count_vs_weather.jpeg")
CWe
dev.off()
##Check if there is an association between count and temp####
#See if there is an association between count and temp
CT <- bikecourse_function(Data$count, Data$temp, data=Data,
threshold = 0.05,
xlab = "temp", ylab = "count",
yax = 900, size = 7)
CT #Make a scatterplot of the association
#Saving the scatterplot as a jpeg
jpeg(file="plots/count_vs_temp.jpeg")
CT
dev.off()
##Check if there is an association between count and atemp####
#See if there is an association between count and atemp
CA <- bikecourse_function(Data$count, Data$atemp, data=Data,
threshold = 0.05,
xlab = "atemp", ylab = "count",
yax = 900, size = 7)
CA #Make a scatterplot of the association
#Saving the scatterplot as a jpeg
jpeg(file="plots/count_vs_atemp.jpeg")
CA
dev.off()
##Check if there is an association between count and humidity####
#See if there is an association between count and humidity
CHu <-bikecourse_function(Data$count, Data$humidity, data=Data,
threshold = 0.05,
xlab = "humidity", ylab = "count",
yax = 900, size = 7)
CHu #Make a scatterplot of the association
#Saving the scatterplot as a jpeg
jpeg(file="plots/count_vs_humidity.jpeg")
CHu
dev.off()
##Check if there is an association between count and windspeed####
#See if there is an association between count and windspeed
CWi <-bikecourse_function(Data$count, Data$windspeed, data=Data,
threshold = 0.05,
xlab = "windspeed", ylab = "count",
yax = 900, size = 7)
CWi #Make a scatterplot of the assocation
#Saving the scatterplot as a jpeg
jpeg(file="plots/count_vs_windspeed.jpeg")
CWi
dev.off()
|
/scripts/Bike sharing.R
|
no_license
|
NDFabri/Bike_sharing_course
|
R
| false
| false
| 4,716
|
r
|
rm(list=ls())
##Installing and loading libraries (RUN THIS PART LINE BY LINE!!)####
# Install and load package devtools
if (!requireNamespace("devtools"))
install.packages("devtools")
library("devtools") #Use of version 2.0.2
# Install and load package Bikecourse
install_github('NDFabri/Bike_sharing_course/packages/Bikecourse@main')
library("Bikecourse") #Use of version 0.1.0
# Install and load package tidyverse
if (!requireNamespace("tidyverse"))
install.packages("tidyverse")
library("tidyverse") #Use of version 1.2.1
# Install and load package plyr
if (!requireNamespace("plyr"))
install.packages("plyr")
library("plyr") #Use of version 1.8.4
##Loading datafile####
Data <- read_csv("data/train.csv")
##Check if there is an association between count and season####
#Making season as a factor and renaming values in season
Data$season <- as.character(Data$season)
Data$season <- revalue(Data$season, c("1"="1:January-March", "2"="2:April-June", "3"="3:July-September", "4"="4:October-December"))
#See if there is an association between count and season
CS <- bikecourse_function(Data$count, Data$season, data=Data,
threshold = 0.05,
xlab = "season", ylab = "count",
yax = 900, size = 7)
CS #Make a boxplot of the association
#Saving the boxplot as a jpeg
jpeg(file="plots/count_vs_season.jpeg")
CS
dev.off()
##Check if there is an association between count and holiday####
#Making holiday as a factor and renaming values in holiday
Data$holiday <- as.character(Data$holiday)
Data$holiday <- revalue(Data$holiday, c("1"="Yes", "0"="No"))
#See if there is an association between count and holiday
CH <- bikecourse_function(Data$count, Data$holiday, data=Data,
threshold = 0.05,
xlab = "holiday", ylab = "count",
yax = 900, size = 7)
##Check if there is an association between count and workingday####
#Making workingday as a factor and renaming values in workingday
Data$workingday <- as.character(Data$workingday)
Data$workingday <- revalue(Data$workingday, c("1"="Yes", "0"="No"))
#See if there is an association between count and workingday
CW <- bikecourse_function(Data$count, Data$workingday, data=Data,
threshold = 0.05,
xlab = "workingday", ylab = "count",
yax = 900, size = 7)
##Check if there is an association between count and weather####
#Making weather as a factor
Data$weather <- as.character(Data$weather)
#See if there is an association between count and weather
CWe <- bikecourse_function(Data$count, Data$weather, data=Data,
threshold = 0.05,
xlab = "weathertype", ylab = "count",
yax = 900, size = 7)
CWe #Make a boxplot of the association
#Saving the boxplot as a jpeg
jpeg(file="plots/count_vs_weather.jpeg")
CWe
dev.off()
##Check if there is an association between count and temp####
#See if there is an association between count and temp
CT <- bikecourse_function(Data$count, Data$temp, data=Data,
threshold = 0.05,
xlab = "temp", ylab = "count",
yax = 900, size = 7)
CT #Make a scatterplot of the association
#Saving the scatterplot as a jpeg
jpeg(file="plots/count_vs_temp.jpeg")
CT
dev.off()
##Check if there is an association between count and atemp####
#See if there is an association between count and atemp
CA <- bikecourse_function(Data$count, Data$atemp, data=Data,
threshold = 0.05,
xlab = "atemp", ylab = "count",
yax = 900, size = 7)
CA #Make a scatterplot of the association
#Saving the scatterplot as a jpeg
jpeg(file="plots/count_vs_atemp.jpeg")
CA
dev.off()
##Check if there is an association between count and humidity####
#See if there is an association between count and humidity
CHu <-bikecourse_function(Data$count, Data$humidity, data=Data,
threshold = 0.05,
xlab = "humidity", ylab = "count",
yax = 900, size = 7)
CHu #Make a scatterplot of the association
#Saving the scatterplot as a jpeg
jpeg(file="plots/count_vs_humidity.jpeg")
CHu
dev.off()
##Check if there is an association between count and windspeed####
#See if there is an association between count and windspeed
CWi <-bikecourse_function(Data$count, Data$windspeed, data=Data,
threshold = 0.05,
xlab = "windspeed", ylab = "count",
yax = 900, size = 7)
CWi #Make a scatterplot of the assocation
#Saving the scatterplot as a jpeg
jpeg(file="plots/count_vs_windspeed.jpeg")
CWi
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/abundance.R
\name{abundance}
\alias{abundance}
\title{Abundance}
\usage{
abundance(
phylo_ob,
level = "genus",
id = "abcno",
sample_id = "Time",
relative_abun = TRUE,
strata = NULL,
strata_val = NULL,
remove_collapsed_taxa = FALSE,
select_taxa = NULL,
select_level = NULL
)
}
\arguments{
\item{phylo_ob}{Phyloseq object with metadata in sample_data.}
\item{level}{Taxonomic rank from tax_table, case insensitive; default is "genus".}
\item{id}{Subject id; default is "abcno".}
\item{sample_id}{Sample identifier, to be used for repeated sampling of the same subject or different compartments; default is "Time".}
\item{relative_abun}{Use relative abundances, else absolute; default is TRUE.}
\item{strata}{Name of variable for stratification, e.g. "Time";}
\item{strata_val}{Value in variable @strata to keep; default is NULL.}
\item{remove_collapsed_taxa}{Will remove unclassified taxa which are elsewise merged; default is FALSE.}
\item{select_taxa}{Choose all taxa from a Taxonomic variable, eg. "Staphylocuccus" or "Staph" or "cuccus"; default is NULL.}
\item{select_level}{Taxonomic rank of the @select_taxa; default is NULL.}
\item{predictor}{Predictor of interestfor stastics/plotting in sample_data.}
}
\value{
A data.frame
}
\description{
For creating relative abundance data.frame
}
|
/man/abundance.Rd
|
no_license
|
jstokholm/abundance
|
R
| false
| true
| 1,401
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/abundance.R
\name{abundance}
\alias{abundance}
\title{Abundance}
\usage{
abundance(
phylo_ob,
level = "genus",
id = "abcno",
sample_id = "Time",
relative_abun = TRUE,
strata = NULL,
strata_val = NULL,
remove_collapsed_taxa = FALSE,
select_taxa = NULL,
select_level = NULL
)
}
\arguments{
\item{phylo_ob}{Phyloseq object with metadata in sample_data.}
\item{level}{Taxonomic rank from tax_table, case insensitive; default is "genus".}
\item{id}{Subject id; default is "abcno".}
\item{sample_id}{Sample identifier, to be used for repeated sampling of the same subject or different compartments; default is "Time".}
\item{relative_abun}{Use relative abundances, else absolute; default is TRUE.}
\item{strata}{Name of variable for stratification, e.g. "Time";}
\item{strata_val}{Value in variable @strata to keep; default is NULL.}
\item{remove_collapsed_taxa}{Will remove unclassified taxa which are elsewise merged; default is FALSE.}
\item{select_taxa}{Choose all taxa from a Taxonomic variable, eg. "Staphylocuccus" or "Staph" or "cuccus"; default is NULL.}
\item{select_level}{Taxonomic rank of the @select_taxa; default is NULL.}
\item{predictor}{Predictor of interestfor stastics/plotting in sample_data.}
}
\value{
A data.frame
}
\description{
For creating relative abundance data.frame
}
|
\name{bas.glm}
\alias{bas.glm}
\title{Bayesian Adaptive Sampling Without Replacement for Variable Selection in Generalized Linear Models}
\description{Sample with or without replacement from a posterior distribution on GLMs}
\usage{
bas.glm(formula, family = binomial(link = "logit"), data,
weights, subset, offset, na.action="na.omit",
n.models = NULL, betaprior=CCH(alpha=.5, beta=nrow(data), s=0),
modelprior = beta.binomial(1,1),
initprobs = "Uniform", method = "MCMC", update = NULL,
bestmodel = NULL, prob.rw = 0.5,
MCMC.iterations = NULL, control = glm.control(),
laplace=FALSE, renormalize=FALSE)
}
\arguments{
\item{formula}{generalized linear model formula for the full model with all
predictors, Y ~ X. All code assumes that an intercept will be
included in each model.}
\item{family}{a description of the error distribution and link
function for exponential family;
currently only binomial() with the logitistic link and poission() with the log link are available.}
\item{data}{data frame}
\item{weights}{optional vector of weights to be used in the fitting
process. May be missing in which case weights are 1.}
\item{subset}{subset of data used in fitting}
\item{offset}{a priori known component to be included in the linear
predictor; by default 0.}
\item{na.action}{a function which indicates what should happen when the data contain NAs. The default is "na.omit".}
\item{n.models}{number of unique models to keep. If NULL, BAS will
attempt to enumerate unless p > 35 or method="MCMC". For any of
methods using MCMC algorithms that sample with replacement, sampling
will stop when the number of iterations exceeds the min of
'n.models' or 'MCMC.iterations' and on exit 'n.models' is updated to
reflect the unique number of models that have been sampled. }
\item{betaprior}{Prior on coefficients for model coefficients (except
intercept). Options include \code{\link{g.prior}}, \code{\link{CCH}}, \code{\link{robust}}, \code{\link{intrinsic}},
\code{\link{beta.prime}}, \code{\link{EB.local}}, \code{\link{AIC}}, and \code{\link{BIC}}. }
\item{modelprior}{Family of prior distribution on the models. Choices
include \code{\link{uniform}}, \code{\link{Bernoulli}},
\code{\link{beta.binomial}}, truncated Beta-Binomial,
\code{\link{tr.beta.binomial}}, and truncated power family \code{\link{tr.power.prior}}.}
\item{initprobs}{vector of length p with the initial inclusion
probabilities used for sampling without replacement (the intercept
will be included with probability one and does not need to be added here) or a character
string giving the method used to construct the sampling probabilities
if "Uniform" each predictor variable is equally likely to be
sampled (equivalent to random sampling without replacement). If
"eplogp", use the \code{\link{eplogprob}} function to aproximate the
Bayes factor using p-values to find initial marginal inclusion probabilitites and
sample without replacement using these
inclusion probabilaties, which may be updated using estimates of the
marginal inclusion probabilites. "eplogp" assumes that MLEs from the
full model exist; for problems where that is not the case or 'p' is
large, initial sampling probabilities may be obtained using
\code{\link{eplogprob.marg}} which fits a model to each predictor seaparately.
For variables that should always be
included set the corresponding initprobs to 1. To run a
Markov Chain to provide initial estimates of marginal
inclusion probabilities, use method="MCMC+BAS" below.}
\item{method}{A character variable indicating which sampling method to
use: method="BAS" uses Bayesian Adaptive Sampling (without
replacement) using the sampling probabilities given in initprobs and
updates using the marginal inclusion probabilities to direct the search/sample;
method="MCMC" combines a random walk Metropolis Hastings (as in MC3 of
Raftery et al 1997) with a random swap of a variable included with a
variable that is currently excluded (see Clyde, Ghosh, and Littman
(2010) for details);
method="MCMC+BAS" runs an initial MCMC as above to calculate marginal
inclusion probabilities and then samples without replacement as in
BAS; method = "deterministic" runs an deterministic sampling using
the initial probabilites (no updating); this is recommended for fast
enumeration or if a model of independence is a good approximation to
the joint posterior distribution of the model indicators. For
BAS, the sampling probabilities can be updated as more models are
sampled. (see 'update' below). We recommend "MCMC+BAS" or "MCMC"
for high dimensional problems.}
\item{update}{number of iterations between potential updates of the
sampling probabilities in the "BAS" method. If NULL do not update,
otherwise the algorithm will update using the marginal inclusion
probabilities as they change while sampling takes place. For large
model spaces, updating is recommended. If the model space will be
enumerated, leave at the default.}
\item{bestmodel}{optional binary vector representing a model to
initialize the sampling. If NULL sampling starts with the null
model}
\item{prob.rw}{For any of the MCMC methods, probability of using the
random-walk proposal; otherwise use a random "flip" move to propose
a new model.}
\item{MCMC.iterations}{Number of models to sample when using any of
the MCMC options; should be greater than 'n.models'.}
\item{control}{a list of parameters that control convergence in the
fitting process. See the documentation for
\code{glm.control()}}
\item{laplace}{logical variable for whether to use a Laplace
approximate for integration with respect to g to obtain the marginal
likelihood. If FALSE the Cephes library is used which may be
inaccurate for large n or large values of the Wald Chisquared statistic.}
\item{renormalize}{logical variable for whether posterior probabilities should be based on renormalizing marginal likelihoods times prior probabilities or use Monte Carlo frequencies. Applies only to MCMC sampling}
}
\details{BAS provides several search
algorithms to find high probability models for use in Bayesian Model
Averaging or Bayesian model selection. For p less than 20-25, BAS can
enumerate all models depending on memory availability, for larger p, BAS
samples without replacement using random or deterministic sampling. The
Bayesian Adaptive Sampling algorithm of Clyde, Ghosh, Littman (2010)
samples models without replacement using the initial sampling
probabilities, and will optionally update the sampling probabilities
every "update" models using the estimated marginal inclusion
probabilties. BAS uses different methods to obtain the \code{initprobs},
which may impact the results in high-dimensional problems.
The deterinistic sampler provides a list of the top models in order of an
approximation of independence using the provided \code{initprobs}. This
may be effective after running the other algorithms to identify high
probability models and works well if
the correlations of variables are small to modest. The priors on
coefficients are mixtures of g-priors that provide approximations to the
power prior.}
\value{
\code{bas.glm} returns an object of class \code{basglm}
An object of class \code{basglm} is a list containing at least the following components:
\item{postprobs}{the posterior probabilities of the models selected}
\item{priorprobs}{the prior probabilities of the models selected}
\item{logmarg}{values of the log of the marginal likelihood for the
models}
\item{n.vars}{total number of independent variables in the full model,
including the intercept}
\item{size}{the number of independent variables in each of the models,
includes the intercept}
\item{which}{a list of lists with one list per model with variables
that are included in the model}
\item{probne0}{the posterior probability that each variable is non-zero}
\item{coefficients}{list of lists with one list per model giving the GLM estimate of each (nonzero) coefficient for each model.}
\item{se}{list of lists with one list per model giving the GLM standard error of each coefficient for each model}
\item{deviance}{the GLM deviance for each model}
\item{modelprior}{the prior distribution on models that created the BMA object}
\item{Q}{the Q statistic for each model used in the
marginal likelihood approximation}
\item{Y}{response}
\item{X}{matrix of predictors}
\item{family}{family object from the original call}
\item{betaprior}{family object for prior on coefficients, including hyperparamters}
\item{modelprior}{family object for prior on the models}
}
\references{Li, Y. and Clyde, M. (2015) Mixtures of g-priors in
Generalized Linear Models. \url{http://arxiv.org/abs/1503.06913}
Clyde, M. Ghosh, J. and Littman, M. (2010) Bayesian Adaptive Sampling
for Variable Selection and Model Averaging. Journal of Computational
Graphics and Statistics. 20:80-101 \cr \url{http://dx.doi.org/10.1198/jcgs.2010.09049}
Raftery, A.E, Madigan, D. and Hoeting, J.A. (1997) Bayesian Model
Averaging for Linear Regression Models. Journal of the American
Statistical Association.
}
\author{Merlise Clyde (\email{clyde@stat.duke.edu}), Quanli Wang and Yingbo Li}
\examples{
library(MASS)
data(Pima.tr)
pima.cch = bas.glm(type ~ ., data=Pima.tr, n.models= 2^7, method="BAS",
betaprior=CCH(a=1, b=532/2, s=0), family=binomial(),
modelprior=beta.binomial(1,1))
summary(pima.cch)
image(pima.cch)
pima.robust = bas.glm(type ~ ., data=Pima.tr, n.models= 2^7,
method="BAS",
betaprior=robust(), family=binomial(),
modelprior=beta.binomial(1,1))
pima.BIC = bas.glm(type ~ ., data=Pima.tr, n.models= 2^7,
method="BAS",
betaprior=bic.prior(), family=binomial(),
modelprior=uniform())
}
\keyword{GLM}
\keyword{regression}
|
/man/bas.glm.Rd
|
no_license
|
aespar21/BAS
|
R
| false
| false
| 10,150
|
rd
|
\name{bas.glm}
\alias{bas.glm}
\title{Bayesian Adaptive Sampling Without Replacement for Variable Selection in Generalized Linear Models}
\description{Sample with or without replacement from a posterior distribution on GLMs}
\usage{
bas.glm(formula, family = binomial(link = "logit"), data,
weights, subset, offset, na.action="na.omit",
n.models = NULL, betaprior=CCH(alpha=.5, beta=nrow(data), s=0),
modelprior = beta.binomial(1,1),
initprobs = "Uniform", method = "MCMC", update = NULL,
bestmodel = NULL, prob.rw = 0.5,
MCMC.iterations = NULL, control = glm.control(),
laplace=FALSE, renormalize=FALSE)
}
\arguments{
\item{formula}{generalized linear model formula for the full model with all
predictors, Y ~ X. All code assumes that an intercept will be
included in each model.}
\item{family}{a description of the error distribution and link
function for exponential family;
currently only binomial() with the logitistic link and poission() with the log link are available.}
\item{data}{data frame}
\item{weights}{optional vector of weights to be used in the fitting
process. May be missing in which case weights are 1.}
\item{subset}{subset of data used in fitting}
\item{offset}{a priori known component to be included in the linear
predictor; by default 0.}
\item{na.action}{a function which indicates what should happen when the data contain NAs. The default is "na.omit".}
\item{n.models}{number of unique models to keep. If NULL, BAS will
attempt to enumerate unless p > 35 or method="MCMC". For any of
methods using MCMC algorithms that sample with replacement, sampling
will stop when the number of iterations exceeds the min of
'n.models' or 'MCMC.iterations' and on exit 'n.models' is updated to
reflect the unique number of models that have been sampled. }
\item{betaprior}{Prior on coefficients for model coefficients (except
intercept). Options include \code{\link{g.prior}}, \code{\link{CCH}}, \code{\link{robust}}, \code{\link{intrinsic}},
\code{\link{beta.prime}}, \code{\link{EB.local}}, \code{\link{AIC}}, and \code{\link{BIC}}. }
\item{modelprior}{Family of prior distribution on the models. Choices
include \code{\link{uniform}}, \code{\link{Bernoulli}},
\code{\link{beta.binomial}}, truncated Beta-Binomial,
\code{\link{tr.beta.binomial}}, and truncated power family \code{\link{tr.power.prior}}.}
\item{initprobs}{vector of length p with the initial inclusion
probabilities used for sampling without replacement (the intercept
will be included with probability one and does not need to be added here) or a character
string giving the method used to construct the sampling probabilities
if "Uniform" each predictor variable is equally likely to be
sampled (equivalent to random sampling without replacement). If
"eplogp", use the \code{\link{eplogprob}} function to aproximate the
Bayes factor using p-values to find initial marginal inclusion probabilitites and
sample without replacement using these
inclusion probabilaties, which may be updated using estimates of the
marginal inclusion probabilites. "eplogp" assumes that MLEs from the
full model exist; for problems where that is not the case or 'p' is
large, initial sampling probabilities may be obtained using
\code{\link{eplogprob.marg}} which fits a model to each predictor seaparately.
For variables that should always be
included set the corresponding initprobs to 1. To run a
Markov Chain to provide initial estimates of marginal
inclusion probabilities, use method="MCMC+BAS" below.}
\item{method}{A character variable indicating which sampling method to
use: method="BAS" uses Bayesian Adaptive Sampling (without
replacement) using the sampling probabilities given in initprobs and
updates using the marginal inclusion probabilities to direct the search/sample;
method="MCMC" combines a random walk Metropolis Hastings (as in MC3 of
Raftery et al 1997) with a random swap of a variable included with a
variable that is currently excluded (see Clyde, Ghosh, and Littman
(2010) for details);
method="MCMC+BAS" runs an initial MCMC as above to calculate marginal
inclusion probabilities and then samples without replacement as in
BAS; method = "deterministic" runs an deterministic sampling using
the initial probabilites (no updating); this is recommended for fast
enumeration or if a model of independence is a good approximation to
the joint posterior distribution of the model indicators. For
BAS, the sampling probabilities can be updated as more models are
sampled. (see 'update' below). We recommend "MCMC+BAS" or "MCMC"
for high dimensional problems.}
\item{update}{number of iterations between potential updates of the
sampling probabilities in the "BAS" method. If NULL do not update,
otherwise the algorithm will update using the marginal inclusion
probabilities as they change while sampling takes place. For large
model spaces, updating is recommended. If the model space will be
enumerated, leave at the default.}
\item{bestmodel}{optional binary vector representing a model to
initialize the sampling. If NULL sampling starts with the null
model}
\item{prob.rw}{For any of the MCMC methods, probability of using the
random-walk proposal; otherwise use a random "flip" move to propose
a new model.}
\item{MCMC.iterations}{Number of models to sample when using any of
the MCMC options; should be greater than 'n.models'.}
\item{control}{a list of parameters that control convergence in the
fitting process. See the documentation for
\code{glm.control()}}
\item{laplace}{logical variable for whether to use a Laplace
approximate for integration with respect to g to obtain the marginal
likelihood. If FALSE the Cephes library is used which may be
inaccurate for large n or large values of the Wald Chisquared statistic.}
\item{renormalize}{logical variable for whether posterior probabilities should be based on renormalizing marginal likelihoods times prior probabilities or use Monte Carlo frequencies. Applies only to MCMC sampling}
}
\details{BAS provides several search
algorithms to find high probability models for use in Bayesian Model
Averaging or Bayesian model selection. For p less than 20-25, BAS can
enumerate all models depending on memory availability, for larger p, BAS
samples without replacement using random or deterministic sampling. The
Bayesian Adaptive Sampling algorithm of Clyde, Ghosh, Littman (2010)
samples models without replacement using the initial sampling
probabilities, and will optionally update the sampling probabilities
every "update" models using the estimated marginal inclusion
probabilties. BAS uses different methods to obtain the \code{initprobs},
which may impact the results in high-dimensional problems.
The deterinistic sampler provides a list of the top models in order of an
approximation of independence using the provided \code{initprobs}. This
may be effective after running the other algorithms to identify high
probability models and works well if
the correlations of variables are small to modest. The priors on
coefficients are mixtures of g-priors that provide approximations to the
power prior.}
\value{
\code{bas.glm} returns an object of class \code{basglm}
An object of class \code{basglm} is a list containing at least the following components:
\item{postprobs}{the posterior probabilities of the models selected}
\item{priorprobs}{the prior probabilities of the models selected}
\item{logmarg}{values of the log of the marginal likelihood for the
models}
\item{n.vars}{total number of independent variables in the full model,
including the intercept}
\item{size}{the number of independent variables in each of the models,
includes the intercept}
\item{which}{a list of lists with one list per model with variables
that are included in the model}
\item{probne0}{the posterior probability that each variable is non-zero}
\item{coefficients}{list of lists with one list per model giving the GLM estimate of each (nonzero) coefficient for each model.}
\item{se}{list of lists with one list per model giving the GLM standard error of each coefficient for each model}
\item{deviance}{the GLM deviance for each model}
\item{modelprior}{the prior distribution on models that created the BMA object}
\item{Q}{the Q statistic for each model used in the
marginal likelihood approximation}
\item{Y}{response}
\item{X}{matrix of predictors}
\item{family}{family object from the original call}
\item{betaprior}{family object for prior on coefficients, including hyperparamters}
\item{modelprior}{family object for prior on the models}
}
\references{Li, Y. and Clyde, M. (2015) Mixtures of g-priors in
Generalized Linear Models. \url{http://arxiv.org/abs/1503.06913}
Clyde, M. Ghosh, J. and Littman, M. (2010) Bayesian Adaptive Sampling
for Variable Selection and Model Averaging. Journal of Computational
Graphics and Statistics. 20:80-101 \cr \url{http://dx.doi.org/10.1198/jcgs.2010.09049}
Raftery, A.E, Madigan, D. and Hoeting, J.A. (1997) Bayesian Model
Averaging for Linear Regression Models. Journal of the American
Statistical Association.
}
\author{Merlise Clyde (\email{clyde@stat.duke.edu}), Quanli Wang and Yingbo Li}
\examples{
library(MASS)
data(Pima.tr)
pima.cch = bas.glm(type ~ ., data=Pima.tr, n.models= 2^7, method="BAS",
betaprior=CCH(a=1, b=532/2, s=0), family=binomial(),
modelprior=beta.binomial(1,1))
summary(pima.cch)
image(pima.cch)
pima.robust = bas.glm(type ~ ., data=Pima.tr, n.models= 2^7,
method="BAS",
betaprior=robust(), family=binomial(),
modelprior=beta.binomial(1,1))
pima.BIC = bas.glm(type ~ ., data=Pima.tr, n.models= 2^7,
method="BAS",
betaprior=bic.prior(), family=binomial(),
modelprior=uniform())
}
\keyword{GLM}
\keyword{regression}
|
rm(list=ls())
#Some functions and code to simulate predators depending on prey abundance
#######Some Functions#########
###Function to calculate unfished "SSBR", and abundance or biomass, or equilibrium characteristics given exploitation rate for delay-difference dynamics with Beverton-Holt SR.
#function(unfishedspr,desired unfished N or B, steepness,units of numbs or bio?, annual natural mortality, annual fishery exploitation rate, ford walford parameter intercept, ford walford slope param, do unfished or exploited equilib?)
unfishspr<-function(predunfishspr=NULL,targzero=NULL,steep=NULL,numbs_weightb=NULL,annualnatmort=NULL,annualexploit=NULL,FWalpha=NULL,FWrho=NULL,unfishflag=NULL) {
#see delay-diff chapter of hilborn and walters and common pubs on steepness (Mangel et al 2010)
predrzero<-1/predunfishspr*targzero #unfished recruitment=f(unfishedSSBR,unfished N or B specified by user
predalpha<-(4*steep*predrzero)/(5*steep-1) #alpha of BH SR #1/beta? alpha in Mangel et al 2010 is (targzero/predrzero)*((1-steep)/(4*steep)) #
predbeta<-((targzero/predrzero)*((1-steep)/(4*steep)))/((5*steep-1)/(4*steep*predrzero)) #beta of BH SR #alpha/beta? beta ibid (5*steep-1)/(4*steep*predrzero)
#predalpha<- (targzero/predrzero)*((1-steep)/(4*steep)) #direct from Mangel et al 2010
#predbeta<-(5*steep-1)/(4*steep*predrzero) #direct from Mangel et al 2010
if(numbs_weightb==1){ #solving in units of numbers or biomass
kappa<-1-(1-annualnatmort)*(1-annualexploit) #growth-survival constant (pg 339 Hilborn and Walters)
eqvalue<-(predalpha/kappa)-(predbeta/(1-annualexploit)) #equilibrium N
Ceq<-eqvalue*annualexploit #equilibrium catch
Req<-kappa*eqvalue #equilibrium recruitment
SSeq<-eqvalue-Ceq #equilibrium spawning stock
} else {
kappa<-((1-(1+FWrho)*(1-annualnatmort)*(1-annualexploit))+(FWrho*(1-annualnatmort)^2*(1-annualexploit)^2))/FWalpha #growth-survival constant (pg 339 Hilborn and Walters)
#kappa<-((1-(1+FWrho)*(1-annualnatmort)*(1-annualexploit))+(FWrho*(1-annualnatmort)^2*(1-annualexploit)^2))/(Recwt-((Prerecwt*FWrho)*(1-annualnatmort)*(1-annualexploit)))
eqvalue<-(predalpha/kappa)-(predbeta/(1-annualexploit)) #equilibrium Biomass
Yeq<-eqvalue*annualexploit #equilibrium yield
Req<-kappa*eqvalue #equilibrium recruitment
SSeq<-eqvalue-Yeq #equilibrium spawning stock
Neq<-Req/(1-(1-annualnatmort)*(1-annualexploit)) #equilibrium N
Weq<-eqvalue/Neq #equilibrium average weight
}
if(unfishflag==0){ #if doing unfished then just return unfished spr to optimize function; required for equlibrium calcs
return((targzero-eqvalue)^2)
} else { #if not doing unfished (exploit>0) then return various equilibrium values
if(numbs_weightb==1) { #units of numbers or biomass?
return(data.frame("Neq"=eqvalue,"Ceq"=Ceq,"Req"=Req,"SSeq"=SSeq,"predrzero"=predrzero,"predalpha"=predalpha,"predbeta"=predbeta))
}else {
return(data.frame("Beq"=eqvalue,"Yeq"=Yeq,"Req"=Req,"SSeq"=SSeq,"Neq"=Neq,"Weq"=Weq,"predrzero"=predrzero,"predalpha"=predalpha,"predbeta"=predbeta))
}
}
}
#####end function unfishspr#####
#### Common Tern Gulf of Maine pars (see HerringMSEpredspars.xlsx for derivation
numbs_weight <- 1 #2 #1=numbers, else weight. Will predator be tracked just in numbers or numbers and weight? Weight allows for growth effects.
Predzero <- 45000 #5.90E+05 # #5.60E+09 #50000 #predator unfished total N or biomass
Predsteep <- 0.26 #0.25 #0.8 #predator steepness
PredannualA <- 0.1 #0.6 #predator annual natural mortality rate (0-1); serves as max if time varying
Predexploit <- 0.00 #0.3 #predator annual exploitation rate (0-1)
FWalpha <- 0.00015#1.5 # #0.243 #ford-walford plot intercept parameter if doing biomass units tons
FWrho <- 0.0 #1.45 #ford-walford plot slope parameter if doing biomass units; serves as max if time varying
#Recwt <- 0.041 #age 6 not 1 0.00278 #avg wt at recruitment age, t (wt at PredRecdelay)
#Prerecwt <- 0.027 #age 5 not 0 0.00047 #avg wt at rec age - 1, t
PredN <- c() #for predator abundance vector
PredN[1] <- 3000 #7000 #initial abundance in year 1
PredB <- c() #for predator biomass vector if doing in those units
PredB[1] <- 1.5 #6000 #initial biomass in year 1
PredRecdelay <- 4 #1 #delay in years for when recruits in year y are added to population
preypredrec<-1.09 #strength of effect of prey N on predator recruitment (like Plaganyi and Butterworth) >=1; 1=no effect
preyprednatm<-0 #strength of effect of prey N on predator annual natural mort (0-1) 0=no effect
preypredgrow<-1 #strengh of effect of prey N on predator growth, >=1, 1=no effect
BFT<-F
COTE<-T
if(COTE){
#pars below for alternative steepness=0.41, same B0=45000
#predunfishspr<-15
#predalpha<- 4686
#predbeta<- 25286
COTEprodThresh<-400000
}
doplotsbysim<-F
dosummaryplots<-T
#####End user inputs########
#Do predator unfished equilibrium calculation and get unfished spr
predunfishsprfxn<-optimize(f=unfishspr,interval=c(0,50),targzero=Predzero,steep=Predsteep,numbs_weightb=numbs_weight,annualnatmort=PredannualA,annualexploit=0,FWalpha=FWalpha,FWrho=FWrho,unfishflag=0,tol=0.0000001)
predunfishspr<-predunfishsprfxn$minimum
#send unfished spr through function again with an exploitation rate to get equlibrium conditions
PredEquilib<-unfishspr(predunfishspr=predunfishspr,targzero=Predzero,steep=Predsteep,numbs_weightb=numbs_weight,annualnatmort=PredannualA,annualexploit=Predexploit,FWalpha=FWalpha,FWrho=FWrho,unfishflag=1)
predalpha<-PredEquilib$predalpha #predator BH SR parm
predbeta<-PredEquilib$predbeta #predator BH SR parm
Req<-PredEquilib$Req
Weq<-PredEquilib$Weq
###################################################################
#### Read in Herring "data" for each OM and Control Rule ###########
require(dplyr)
#setwd("~/Data/Projects/MSE/HerringMSE")
#setwd("~/Documents/0_Data/MSE/HerringMSE")
#Set up directory to save output and graphics
#ResultsDirec<-c('//net/home5/jderoba/MSE/HiM_LowSteep_NoAssBias_OldWt/')
PredDirec<-c('~/Data/Projects/MSE/HerringMSE/Terns')
#OMDirec<-c('//net/home5/jderoba/MSE/HiM_LowSteep_NoAssBias_RecWt/') #Change for different OM combinations HOW TO ACCESS FROM MY COMPUTER?
OMDirec<-c('//net/mse/')
#OMtype<-c('HiM_LowSteep_NoAssBias_OldWt') #done first
#OMlist<-c('LoM_HiSteep_NoAssBias_OldWt','HiM_LowSteep_NoAssBias_OldWt','LoM_HiSteep_NoAssBias_RecWt', 'HiM_LowSteep_NoAssBias_RecWt')
OMlist<-c('LoM_HiSteep_AssBias_OldWt','HiM_LowSteep_AssBias_OldWt','LoM_HiSteep_AssBias_RecWt', 'HiM_LowSteep_AssBias_RecWt')
CRtype<-c("BB", "BB3yr", "BB5yr", "BB3yrPerc", "CC", "CCC")
for(OMtype in OMlist){
dir.create(PredDirecOM<-file.path(PredDirec,paste(OMtype)))
for(crt in CRtype){
dir.create(PredDirecCR<-file.path(PredDirecOM,paste(crt)))
PreyDirecCR<-paste(OMDirec,OMtype,"/",crt,"/", sep="")
#get control rule number from filenames
library(stringr)
k<-list.files(path=PreyDirecCR,pattern="\\SimCharAA.txt$")
# prepare regular expression
regexp <- "[[:digit:]]+"
CRnum<-str_extract(k, regexp)
for(crnum in CRnum){
filename1<-paste("Unadj", crnum, "NAASimYear.txt", sep="")
filename2<-paste("Unadj", crnum, "SimCharAA.txt", sep="")
filename3<-paste("Unadj", crnum, "TotBioSimYear.txt", sep="")
NAAfile<-paste(PreyDirecCR,"/",filename1, sep="")
charfile<-paste(PreyDirecCR,"/",filename2, sep="")
totBfile<-paste(PreyDirecCR,"/",filename3, sep="")
preyB<-read.table(totBfile, header=T)
preysim<-read.table(NAAfile, header=T)
preyNsim<-transmute(preysim, preyN=Age1+Age2+Age3+Age4+Age5+Age6+Age7+Age8, Sim=Sim)
by_simN<-group_by(preyNsim, Sim)
Nyears<-summarise(by_simN, n=n())
preychar<-read.table(charfile, header=T)
preychar<-cbind(preychar, Age=rep(1:8,max(preychar$Sim)))
#preyAvgWt<-#for each Sim, for ages <2, keep for tern forage
#Ternforage<-select(preysim, Age1, Sim)
#BASE ON TOTAL B
Ternforage<-cbind(preyB, Yr=rep(1:Nyears$n, max(preychar$Sim)))
#Ternforage<- cbind(Ternforage, Yr=rep(1:Nyears$n, max(preychar$Sim)))
# add loop for multiple sims per pred
for (h in 1:max(preyNsim$Sim)){
#preyN for terns is just age 1
#preyN<-Ternforage$Age1[Ternforage$Sim==h]
#preyN for terns is totalB
preyN<-Ternforage$TotalBio[Ternforage$Sim==h]
#preyN<-preyNsim$preyN[preyNsim$Sim==h] #this would be if all ages were forage
nyears<-Nyears$n[h]
#preyNzero for terns is just age1 unfished
#preyNzero<-preychar$UnfishedNAA[preychar$Age==1 & preychar$Sim==h]
#preyNzero for terns is the threshold total B where prod drops, FIXED based on GOM data
preyNzero<-COTEprodThresh
###predator dynamics###
#stuff I need
Recmult<-(preypredrec*(preyN/preyNzero))/((preypredrec-1)+(preyN/preyNzero)) #fraction of expected predator BH recruitment dependent on prey N
PredAnnualNatM<-PredannualA*exp(-(preyN/preyNzero)*preyprednatm) #needed if annual nat mort is time varying
TotalSurv<-(1-PredAnnualNatM)*(1-Predexploit) #total annual predator survival
catch<-c()
Spawn<-c() #spawners in numbers or biomass depending
yield<-c()
if(BFT){ #base growth on prey avereage weight; use generalized logistic with lower bound on growth rate (97% of 102% of FWrho to start, trying to center on FWrho)
AnnualAlpha<-(0.9*FWalpha) + ((1.1*FWalpha) - (0.9*FWalpha))/(1+exp((1-preypredgrow)*(100*(preyAvgWt-BFTGrowThresh)/BFTGrowThresh))) #alpha changes with herring avg wt, not slope
} else { #base growth on abundance of prey
AnnualGrowParm<-FWrho*((preypredgrow*(preyN/preyNzero))/((preypredgrow-1)+(preyN/preyNzero))) #if grow time varies then this is annual FW slope
}
Predrec<-c()
Predrec[1:PredRecdelay]<-Req #set recruitment in initial years at equlibrium to account for delay
###year loop for predator dynamics
for(y in 1:(nyears-1)){
if(numbs_weight==1){
catch[y]<-PredN[y]*Predexploit
Spawn[y]<-PredN[y]-catch[y]
} else {
yield[y]<-PredB[y]*Predexploit
Spawn[y]<-PredB[y]-yield[y]
}
Predrec[y+PredRecdelay]<-Recmult[y]*((predalpha*Spawn[y])/(predbeta+Spawn[y])) #SR
#Predrec[y+PredRecdelay]<-Recmult[y]*(Spawn[y])/(predalpha+(predbeta*Spawn[y])) #SR from Mangel
PredN[y+1]<-PredN[y]*TotalSurv[y]+Predrec[y+1]
if(numbs_weight!=1){ #only do biomass calcs if requested in those units
if(BFT){
PredB[y+1]<-TotalSurv[y]*(AnnualAlpha[y]*PredN[y]+FWrho*PredB[y])+AnnualAlpha[y]*Predrec[y+1]
} else {
PredB[y+1]<-TotalSurv[y]*(FWalpha*PredN[y]+AnnualGrowParm[y]*PredB[y])+FWalpha*Predrec[y+1]
}
}
} #end y loop for predators
#construct and append to dataframe for all sims
if(numbs_weight==1){
if(h==1){
#predOut<-data.frame(Sim=h, Yr=c(1:nyears), PredN=PredN, PredRec=c(Predrec[5:153], 0), PredProd=c(Predrec[5:153]*10/PredN[1:149], 0))
predOut<-data.frame(Sim=h, Yr=c(1:nyears), PredN=PredN, PredRec=Predrec[4:153], PredProd=c(0,Predrec[5:153]*10/PredN[1:149]))
} else {
#predOutn<-data.frame(Sim=h, Yr=c(1:nyears), PredN=PredN, PredRec=c(Predrec[5:153], 0), PredProd=c(Predrec[5:153]*10/PredN[1:149], 0))
predOutn<-data.frame(Sim=h, Yr=c(1:nyears), PredN=PredN, PredRec=Predrec[4:153], PredProd=c(0, Predrec[5:153]*10/PredN[1:149]))
predOut<-bind_rows(predOut, predOutn)
}
} else {
if(h==1){
predOut<-data.frame(Sim=h, Yr=c(1:nyears), PredB=PredB, PredN=PredN, PredRec=Predrec, PredAvWt=PredB/PredN, PredB_status=PredB/SSB_MSY, PredAvWt_status=(PredB/PredN)/Weq )
} else {
predOutn<-data.frame(Sim=h, Yr=c(1:nyears), PredB=PredB, PredN=PredN, PredRec=Predrec, PredAvWt=PredB/PredN, PredB_status=PredB/SSB_MSY, PredAvWt_status=(PredB/PredN)/Weq )
predOut<-bind_rows(predOut, predOutn)
}
}
if(doplotsbysim){
par(mfrow=c(3,2))
par(mar=c(4,4,2,2)+0.1)
par(oma=c(2,2,2,0))
plot(PredN,type='l',col="black",xlab="Year",ylab="Predator Abundance",lwd=2, ylim=c(0, max(PredN)))
plot(preyN,Recmult,col="black",xlab="Prey Abundance",ylab="Recruitment Fraction",lwd=2)
title(paste("preypredrec = ", preypredrec), line=-2)
plot(preyN,PredAnnualNatM,col="black",xlab="Prey Abundance",ylab="Annual Natural Mortality Rate",lwd=2)
title(paste("preyprednatm = ", preyprednatm), line=-2)
if(BFT){
plot(preyAvgWt,AnnualAlpha,col="black",xlab="Prey Avg Wt",ylab="Growth Intercept",lwd=2)
title(paste("preypredgrow = ", preypredgrow), line=-2)
} else {
plot(preyN,AnnualGrowParm,col="black",xlab="Prey Abundance",ylab="Growth Rate",lwd=2)
title(paste("preypredgrow = ", preypredgrow), line=-2)
}
plot(preyN, type="l", xlab="Year", ylab="Prey Abundance", lwd=2)
if(numbs_weight == 1){ #only do for numbers
plot(Predrec[5:153]*10/PredN[1:149], type="l", xlab="Year", ylab="Predator Productivity", lwd=2, ylim=c(0,2))
abline(h=1, col="blue", lty=3)
}
if(numbs_weight != 1){ #only do if biomass units requested
plot(PredB,type='l',col="black",xlab="Year",ylab="Predator Biomass",lwd=2, ylim=c(0, max(PredB)))
abline(h=SSB_MSY)
par(new=T)
plot(PredB/PredN, type="l", col="blue", axes=F, xlab=NA, ylab=NA, ylim=c(0, max(PredB/PredN)))
axis(side = 4)
mtext(side = 4, line = 3, 'Pop avg wt')
lines(PredB/PredN, type='l',col="blue")
abline(h=Weq, col="blue", lty=3)
}
mtext(paste("Sim ", h, sep=""), outer=T, side=3)
} # end doplots
} # end loop over prey sims
write.table(predOut, paste(PredDirecCR,"/",crnum,"predBNAvWtStatus.txt", sep="")) #put control rule id in filename
if(dosummaryplots) {
par(mfrow=c(3,1))
if(numbs_weight != 1){ #only do if biomass units requested
plot(predOut$Yr[predOut$Sim==1], predOut$PredB[predOut$Sim==1], type="l", col = rgb(0, 0, 0, 0.3), ylab="PredB", xlab="")
for (j in 1:max(predOut$Sim)){
lines(predOut$Yr[predOut$Sim==j], predOut$PredB[predOut$Sim==j], type="l", col = rgb(0, 0, 0, 0.3))
}
}
plot(predOut$Yr[predOut$Sim==1], predOut$PredN[predOut$Sim==1], ylim=c(0,max(predOut$PredN)),type="l", col = rgb(0, 0, 0, 0.3), ylab="PredN", xlab="")
for (j in 1:max(predOut$Sim)){
lines(predOut$Yr[predOut$Sim==j], predOut$PredN[predOut$Sim==j], type="l", col = rgb(0, 0, 0, 0.3))
}
if(numbs_weight != 1){ #only do if biomass units requested
plot(predOut$Yr[predOut$Sim==1], predOut$PredAvWt_status[predOut$Sim==1], type="l", col = rgb(0, 0, 0, 0.3), ylab="PredAvWt", xlab="")
for (j in 1:max(predOut$Sim)){
lines(predOut$Yr[predOut$Sim==j], predOut$PredAvWt_status[predOut$Sim==j], type="l", col = rgb(0, 0, 0, 0.3))
}
abline(h=1.0, col="blue", lwd=3)
mtext(paste("Control Rule ", crnum, sep=""), outer=T, side=3)
}
if(numbs_weight == 1){ #only do if numbers units requested
plot(predOut$Yr[predOut$Sim==1], predOut$PredProd[predOut$Sim==1], type="l", col = rgb(0, 0, 0, 0.3), ylab="PredProd", xlab="")
for (j in 1:max(predOut$Sim)){
lines(predOut$Yr[predOut$Sim==j], predOut$PredProd[predOut$Sim==j], type="l", col = rgb(0, 0, 0, 0.3))
}
abline(h=1.0, col="blue", lwd=3)
mtext(paste("Control Rule ", crnum, sep=""), outer=T, side=3)
}
}
}#end crnum loop over control rule variants
}#end CRtype loop over control rule type folders
}#end OMtype list over operating model folders
|
/PredScriptArchive/MSE_TernLink_AssBias.R
|
no_license
|
sgaichas/herringMSE
|
R
| false
| false
| 16,137
|
r
|
rm(list=ls())
#Some functions and code to simulate predators depending on prey abundance
#######Some Functions#########
###Function to calculate unfished "SSBR", and abundance or biomass, or equilibrium characteristics given exploitation rate for delay-difference dynamics with Beverton-Holt SR.
#function(unfishedspr,desired unfished N or B, steepness,units of numbs or bio?, annual natural mortality, annual fishery exploitation rate, ford walford parameter intercept, ford walford slope param, do unfished or exploited equilib?)
unfishspr<-function(predunfishspr=NULL,targzero=NULL,steep=NULL,numbs_weightb=NULL,annualnatmort=NULL,annualexploit=NULL,FWalpha=NULL,FWrho=NULL,unfishflag=NULL) {
#see delay-diff chapter of hilborn and walters and common pubs on steepness (Mangel et al 2010)
predrzero<-1/predunfishspr*targzero #unfished recruitment=f(unfishedSSBR,unfished N or B specified by user
predalpha<-(4*steep*predrzero)/(5*steep-1) #alpha of BH SR #1/beta? alpha in Mangel et al 2010 is (targzero/predrzero)*((1-steep)/(4*steep)) #
predbeta<-((targzero/predrzero)*((1-steep)/(4*steep)))/((5*steep-1)/(4*steep*predrzero)) #beta of BH SR #alpha/beta? beta ibid (5*steep-1)/(4*steep*predrzero)
#predalpha<- (targzero/predrzero)*((1-steep)/(4*steep)) #direct from Mangel et al 2010
#predbeta<-(5*steep-1)/(4*steep*predrzero) #direct from Mangel et al 2010
if(numbs_weightb==1){ #solving in units of numbers or biomass
kappa<-1-(1-annualnatmort)*(1-annualexploit) #growth-survival constant (pg 339 Hilborn and Walters)
eqvalue<-(predalpha/kappa)-(predbeta/(1-annualexploit)) #equilibrium N
Ceq<-eqvalue*annualexploit #equilibrium catch
Req<-kappa*eqvalue #equilibrium recruitment
SSeq<-eqvalue-Ceq #equilibrium spawning stock
} else {
kappa<-((1-(1+FWrho)*(1-annualnatmort)*(1-annualexploit))+(FWrho*(1-annualnatmort)^2*(1-annualexploit)^2))/FWalpha #growth-survival constant (pg 339 Hilborn and Walters)
#kappa<-((1-(1+FWrho)*(1-annualnatmort)*(1-annualexploit))+(FWrho*(1-annualnatmort)^2*(1-annualexploit)^2))/(Recwt-((Prerecwt*FWrho)*(1-annualnatmort)*(1-annualexploit)))
eqvalue<-(predalpha/kappa)-(predbeta/(1-annualexploit)) #equilibrium Biomass
Yeq<-eqvalue*annualexploit #equilibrium yield
Req<-kappa*eqvalue #equilibrium recruitment
SSeq<-eqvalue-Yeq #equilibrium spawning stock
Neq<-Req/(1-(1-annualnatmort)*(1-annualexploit)) #equilibrium N
Weq<-eqvalue/Neq #equilibrium average weight
}
if(unfishflag==0){ #if doing unfished then just return unfished spr to optimize function; required for equlibrium calcs
return((targzero-eqvalue)^2)
} else { #if not doing unfished (exploit>0) then return various equilibrium values
if(numbs_weightb==1) { #units of numbers or biomass?
return(data.frame("Neq"=eqvalue,"Ceq"=Ceq,"Req"=Req,"SSeq"=SSeq,"predrzero"=predrzero,"predalpha"=predalpha,"predbeta"=predbeta))
}else {
return(data.frame("Beq"=eqvalue,"Yeq"=Yeq,"Req"=Req,"SSeq"=SSeq,"Neq"=Neq,"Weq"=Weq,"predrzero"=predrzero,"predalpha"=predalpha,"predbeta"=predbeta))
}
}
}
#####end function unfishspr#####
#### Common Tern Gulf of Maine pars (see HerringMSEpredspars.xlsx for derivation
numbs_weight <- 1 #2 #1=numbers, else weight. Will predator be tracked just in numbers or numbers and weight? Weight allows for growth effects.
Predzero <- 45000 #5.90E+05 # #5.60E+09 #50000 #predator unfished total N or biomass
Predsteep <- 0.26 #0.25 #0.8 #predator steepness
PredannualA <- 0.1 #0.6 #predator annual natural mortality rate (0-1); serves as max if time varying
Predexploit <- 0.00 #0.3 #predator annual exploitation rate (0-1)
FWalpha <- 0.00015#1.5 # #0.243 #ford-walford plot intercept parameter if doing biomass units tons
FWrho <- 0.0 #1.45 #ford-walford plot slope parameter if doing biomass units; serves as max if time varying
#Recwt <- 0.041 #age 6 not 1 0.00278 #avg wt at recruitment age, t (wt at PredRecdelay)
#Prerecwt <- 0.027 #age 5 not 0 0.00047 #avg wt at rec age - 1, t
PredN <- c() #for predator abundance vector
PredN[1] <- 3000 #7000 #initial abundance in year 1
PredB <- c() #for predator biomass vector if doing in those units
PredB[1] <- 1.5 #6000 #initial biomass in year 1
PredRecdelay <- 4 #1 #delay in years for when recruits in year y are added to population
preypredrec<-1.09 #strength of effect of prey N on predator recruitment (like Plaganyi and Butterworth) >=1; 1=no effect
preyprednatm<-0 #strength of effect of prey N on predator annual natural mort (0-1) 0=no effect
preypredgrow<-1 #strengh of effect of prey N on predator growth, >=1, 1=no effect
BFT<-F
COTE<-T
if(COTE){
#pars below for alternative steepness=0.41, same B0=45000
#predunfishspr<-15
#predalpha<- 4686
#predbeta<- 25286
COTEprodThresh<-400000
}
doplotsbysim<-F
dosummaryplots<-T
#####End user inputs########
#Do predator unfished equilibrium calculation and get unfished spr
predunfishsprfxn<-optimize(f=unfishspr,interval=c(0,50),targzero=Predzero,steep=Predsteep,numbs_weightb=numbs_weight,annualnatmort=PredannualA,annualexploit=0,FWalpha=FWalpha,FWrho=FWrho,unfishflag=0,tol=0.0000001)
predunfishspr<-predunfishsprfxn$minimum
#send unfished spr through function again with an exploitation rate to get equlibrium conditions
PredEquilib<-unfishspr(predunfishspr=predunfishspr,targzero=Predzero,steep=Predsteep,numbs_weightb=numbs_weight,annualnatmort=PredannualA,annualexploit=Predexploit,FWalpha=FWalpha,FWrho=FWrho,unfishflag=1)
predalpha<-PredEquilib$predalpha #predator BH SR parm
predbeta<-PredEquilib$predbeta #predator BH SR parm
Req<-PredEquilib$Req
Weq<-PredEquilib$Weq
###################################################################
#### Read in Herring "data" for each OM and Control Rule ###########
require(dplyr)
#setwd("~/Data/Projects/MSE/HerringMSE")
#setwd("~/Documents/0_Data/MSE/HerringMSE")
#Set up directory to save output and graphics
#ResultsDirec<-c('//net/home5/jderoba/MSE/HiM_LowSteep_NoAssBias_OldWt/')
PredDirec<-c('~/Data/Projects/MSE/HerringMSE/Terns')
#OMDirec<-c('//net/home5/jderoba/MSE/HiM_LowSteep_NoAssBias_RecWt/') #Change for different OM combinations HOW TO ACCESS FROM MY COMPUTER?
OMDirec<-c('//net/mse/')
#OMtype<-c('HiM_LowSteep_NoAssBias_OldWt') #done first
#OMlist<-c('LoM_HiSteep_NoAssBias_OldWt','HiM_LowSteep_NoAssBias_OldWt','LoM_HiSteep_NoAssBias_RecWt', 'HiM_LowSteep_NoAssBias_RecWt')
OMlist<-c('LoM_HiSteep_AssBias_OldWt','HiM_LowSteep_AssBias_OldWt','LoM_HiSteep_AssBias_RecWt', 'HiM_LowSteep_AssBias_RecWt')
CRtype<-c("BB", "BB3yr", "BB5yr", "BB3yrPerc", "CC", "CCC")
for(OMtype in OMlist){
dir.create(PredDirecOM<-file.path(PredDirec,paste(OMtype)))
for(crt in CRtype){
dir.create(PredDirecCR<-file.path(PredDirecOM,paste(crt)))
PreyDirecCR<-paste(OMDirec,OMtype,"/",crt,"/", sep="")
#get control rule number from filenames
library(stringr)
k<-list.files(path=PreyDirecCR,pattern="\\SimCharAA.txt$")
# prepare regular expression
regexp <- "[[:digit:]]+"
CRnum<-str_extract(k, regexp)
for(crnum in CRnum){
filename1<-paste("Unadj", crnum, "NAASimYear.txt", sep="")
filename2<-paste("Unadj", crnum, "SimCharAA.txt", sep="")
filename3<-paste("Unadj", crnum, "TotBioSimYear.txt", sep="")
NAAfile<-paste(PreyDirecCR,"/",filename1, sep="")
charfile<-paste(PreyDirecCR,"/",filename2, sep="")
totBfile<-paste(PreyDirecCR,"/",filename3, sep="")
preyB<-read.table(totBfile, header=T)
preysim<-read.table(NAAfile, header=T)
preyNsim<-transmute(preysim, preyN=Age1+Age2+Age3+Age4+Age5+Age6+Age7+Age8, Sim=Sim)
by_simN<-group_by(preyNsim, Sim)
Nyears<-summarise(by_simN, n=n())
preychar<-read.table(charfile, header=T)
preychar<-cbind(preychar, Age=rep(1:8,max(preychar$Sim)))
#preyAvgWt<-#for each Sim, for ages <2, keep for tern forage
#Ternforage<-select(preysim, Age1, Sim)
#BASE ON TOTAL B
Ternforage<-cbind(preyB, Yr=rep(1:Nyears$n, max(preychar$Sim)))
#Ternforage<- cbind(Ternforage, Yr=rep(1:Nyears$n, max(preychar$Sim)))
# add loop for multiple sims per pred
for (h in 1:max(preyNsim$Sim)){
#preyN for terns is just age 1
#preyN<-Ternforage$Age1[Ternforage$Sim==h]
#preyN for terns is totalB
preyN<-Ternforage$TotalBio[Ternforage$Sim==h]
#preyN<-preyNsim$preyN[preyNsim$Sim==h] #this would be if all ages were forage
nyears<-Nyears$n[h]
#preyNzero for terns is just age1 unfished
#preyNzero<-preychar$UnfishedNAA[preychar$Age==1 & preychar$Sim==h]
#preyNzero for terns is the threshold total B where prod drops, FIXED based on GOM data
preyNzero<-COTEprodThresh
###predator dynamics###
#stuff I need
Recmult<-(preypredrec*(preyN/preyNzero))/((preypredrec-1)+(preyN/preyNzero)) #fraction of expected predator BH recruitment dependent on prey N
PredAnnualNatM<-PredannualA*exp(-(preyN/preyNzero)*preyprednatm) #needed if annual nat mort is time varying
TotalSurv<-(1-PredAnnualNatM)*(1-Predexploit) #total annual predator survival
catch<-c()
Spawn<-c() #spawners in numbers or biomass depending
yield<-c()
if(BFT){ #base growth on prey avereage weight; use generalized logistic with lower bound on growth rate (97% of 102% of FWrho to start, trying to center on FWrho)
AnnualAlpha<-(0.9*FWalpha) + ((1.1*FWalpha) - (0.9*FWalpha))/(1+exp((1-preypredgrow)*(100*(preyAvgWt-BFTGrowThresh)/BFTGrowThresh))) #alpha changes with herring avg wt, not slope
} else { #base growth on abundance of prey
AnnualGrowParm<-FWrho*((preypredgrow*(preyN/preyNzero))/((preypredgrow-1)+(preyN/preyNzero))) #if grow time varies then this is annual FW slope
}
Predrec<-c()
Predrec[1:PredRecdelay]<-Req #set recruitment in initial years at equlibrium to account for delay
###year loop for predator dynamics
for(y in 1:(nyears-1)){
if(numbs_weight==1){
catch[y]<-PredN[y]*Predexploit
Spawn[y]<-PredN[y]-catch[y]
} else {
yield[y]<-PredB[y]*Predexploit
Spawn[y]<-PredB[y]-yield[y]
}
Predrec[y+PredRecdelay]<-Recmult[y]*((predalpha*Spawn[y])/(predbeta+Spawn[y])) #SR
#Predrec[y+PredRecdelay]<-Recmult[y]*(Spawn[y])/(predalpha+(predbeta*Spawn[y])) #SR from Mangel
PredN[y+1]<-PredN[y]*TotalSurv[y]+Predrec[y+1]
if(numbs_weight!=1){ #only do biomass calcs if requested in those units
if(BFT){
PredB[y+1]<-TotalSurv[y]*(AnnualAlpha[y]*PredN[y]+FWrho*PredB[y])+AnnualAlpha[y]*Predrec[y+1]
} else {
PredB[y+1]<-TotalSurv[y]*(FWalpha*PredN[y]+AnnualGrowParm[y]*PredB[y])+FWalpha*Predrec[y+1]
}
}
} #end y loop for predators
#construct and append to dataframe for all sims
if(numbs_weight==1){
if(h==1){
#predOut<-data.frame(Sim=h, Yr=c(1:nyears), PredN=PredN, PredRec=c(Predrec[5:153], 0), PredProd=c(Predrec[5:153]*10/PredN[1:149], 0))
predOut<-data.frame(Sim=h, Yr=c(1:nyears), PredN=PredN, PredRec=Predrec[4:153], PredProd=c(0,Predrec[5:153]*10/PredN[1:149]))
} else {
#predOutn<-data.frame(Sim=h, Yr=c(1:nyears), PredN=PredN, PredRec=c(Predrec[5:153], 0), PredProd=c(Predrec[5:153]*10/PredN[1:149], 0))
predOutn<-data.frame(Sim=h, Yr=c(1:nyears), PredN=PredN, PredRec=Predrec[4:153], PredProd=c(0, Predrec[5:153]*10/PredN[1:149]))
predOut<-bind_rows(predOut, predOutn)
}
} else {
if(h==1){
predOut<-data.frame(Sim=h, Yr=c(1:nyears), PredB=PredB, PredN=PredN, PredRec=Predrec, PredAvWt=PredB/PredN, PredB_status=PredB/SSB_MSY, PredAvWt_status=(PredB/PredN)/Weq )
} else {
predOutn<-data.frame(Sim=h, Yr=c(1:nyears), PredB=PredB, PredN=PredN, PredRec=Predrec, PredAvWt=PredB/PredN, PredB_status=PredB/SSB_MSY, PredAvWt_status=(PredB/PredN)/Weq )
predOut<-bind_rows(predOut, predOutn)
}
}
if(doplotsbysim){
par(mfrow=c(3,2))
par(mar=c(4,4,2,2)+0.1)
par(oma=c(2,2,2,0))
plot(PredN,type='l',col="black",xlab="Year",ylab="Predator Abundance",lwd=2, ylim=c(0, max(PredN)))
plot(preyN,Recmult,col="black",xlab="Prey Abundance",ylab="Recruitment Fraction",lwd=2)
title(paste("preypredrec = ", preypredrec), line=-2)
plot(preyN,PredAnnualNatM,col="black",xlab="Prey Abundance",ylab="Annual Natural Mortality Rate",lwd=2)
title(paste("preyprednatm = ", preyprednatm), line=-2)
if(BFT){
plot(preyAvgWt,AnnualAlpha,col="black",xlab="Prey Avg Wt",ylab="Growth Intercept",lwd=2)
title(paste("preypredgrow = ", preypredgrow), line=-2)
} else {
plot(preyN,AnnualGrowParm,col="black",xlab="Prey Abundance",ylab="Growth Rate",lwd=2)
title(paste("preypredgrow = ", preypredgrow), line=-2)
}
plot(preyN, type="l", xlab="Year", ylab="Prey Abundance", lwd=2)
if(numbs_weight == 1){ #only do for numbers
plot(Predrec[5:153]*10/PredN[1:149], type="l", xlab="Year", ylab="Predator Productivity", lwd=2, ylim=c(0,2))
abline(h=1, col="blue", lty=3)
}
if(numbs_weight != 1){ #only do if biomass units requested
plot(PredB,type='l',col="black",xlab="Year",ylab="Predator Biomass",lwd=2, ylim=c(0, max(PredB)))
abline(h=SSB_MSY)
par(new=T)
plot(PredB/PredN, type="l", col="blue", axes=F, xlab=NA, ylab=NA, ylim=c(0, max(PredB/PredN)))
axis(side = 4)
mtext(side = 4, line = 3, 'Pop avg wt')
lines(PredB/PredN, type='l',col="blue")
abline(h=Weq, col="blue", lty=3)
}
mtext(paste("Sim ", h, sep=""), outer=T, side=3)
} # end doplots
} # end loop over prey sims
write.table(predOut, paste(PredDirecCR,"/",crnum,"predBNAvWtStatus.txt", sep="")) #put control rule id in filename
if(dosummaryplots) {
par(mfrow=c(3,1))
if(numbs_weight != 1){ #only do if biomass units requested
plot(predOut$Yr[predOut$Sim==1], predOut$PredB[predOut$Sim==1], type="l", col = rgb(0, 0, 0, 0.3), ylab="PredB", xlab="")
for (j in 1:max(predOut$Sim)){
lines(predOut$Yr[predOut$Sim==j], predOut$PredB[predOut$Sim==j], type="l", col = rgb(0, 0, 0, 0.3))
}
}
plot(predOut$Yr[predOut$Sim==1], predOut$PredN[predOut$Sim==1], ylim=c(0,max(predOut$PredN)),type="l", col = rgb(0, 0, 0, 0.3), ylab="PredN", xlab="")
for (j in 1:max(predOut$Sim)){
lines(predOut$Yr[predOut$Sim==j], predOut$PredN[predOut$Sim==j], type="l", col = rgb(0, 0, 0, 0.3))
}
if(numbs_weight != 1){ #only do if biomass units requested
plot(predOut$Yr[predOut$Sim==1], predOut$PredAvWt_status[predOut$Sim==1], type="l", col = rgb(0, 0, 0, 0.3), ylab="PredAvWt", xlab="")
for (j in 1:max(predOut$Sim)){
lines(predOut$Yr[predOut$Sim==j], predOut$PredAvWt_status[predOut$Sim==j], type="l", col = rgb(0, 0, 0, 0.3))
}
abline(h=1.0, col="blue", lwd=3)
mtext(paste("Control Rule ", crnum, sep=""), outer=T, side=3)
}
if(numbs_weight == 1){ #only do if numbers units requested
plot(predOut$Yr[predOut$Sim==1], predOut$PredProd[predOut$Sim==1], type="l", col = rgb(0, 0, 0, 0.3), ylab="PredProd", xlab="")
for (j in 1:max(predOut$Sim)){
lines(predOut$Yr[predOut$Sim==j], predOut$PredProd[predOut$Sim==j], type="l", col = rgb(0, 0, 0, 0.3))
}
abline(h=1.0, col="blue", lwd=3)
mtext(paste("Control Rule ", crnum, sep=""), outer=T, side=3)
}
}
}#end crnum loop over control rule variants
}#end CRtype loop over control rule type folders
}#end OMtype list over operating model folders
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/generateNoiseImage.R
\name{generateNoiseImage}
\alias{generateNoiseImage}
\title{Generate single noise image based on parameter vector}
\usage{
generateNoiseImage(params, p)
}
\arguments{
\item{params}{Vector with each value specifying the contrast of each patch in noise.}
\item{p}{3D patch matrix (generated using \code{generateNoisePattern()}).}
}
\value{
The noise pattern as pixel matrix.
}
\description{
Generate single noise image based on parameter vector
}
\examples{
#params <- rnorm(4092) # generates 4092 normally distributed random values
#s <- generateNoisePattern(img_size=256)
#noise <- generateNoiseImage(params, p)
}
|
/man/generateNoiseImage.Rd
|
no_license
|
rdotsch/rcicr
|
R
| false
| true
| 714
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/generateNoiseImage.R
\name{generateNoiseImage}
\alias{generateNoiseImage}
\title{Generate single noise image based on parameter vector}
\usage{
generateNoiseImage(params, p)
}
\arguments{
\item{params}{Vector with each value specifying the contrast of each patch in noise.}
\item{p}{3D patch matrix (generated using \code{generateNoisePattern()}).}
}
\value{
The noise pattern as pixel matrix.
}
\description{
Generate single noise image based on parameter vector
}
\examples{
#params <- rnorm(4092) # generates 4092 normally distributed random values
#s <- generateNoisePattern(img_size=256)
#noise <- generateNoiseImage(params, p)
}
|
test_metrics <- kf_init_metrics()
test_that("Input validation works", {
expect_error(kf_add_metric(test_metrics, "ROC"), "Name does not match required pattern.")
expect_error(kf_add_metric(test_metrics, name = "roc", value = "15"), "Value must be numeric.")
expect_error(kf_add_metric(test_metrics, name = "roc", value = .15, format = "plain"), "Format must be either RAW or PERCENTAGE.")
})
test_that("Single metric is added correctly", {
one_added <- kf_add_metric(test_metrics, "roc", .5, "RAW")
expect_output(print(one_added, pretty = FALSE), '{"metrics":[{"name":"roc","numberValue":0.5,"format":"RAW"}]}', fixed = TRUE)
})
test_that("Multiple metrics are added correctly", {
two_added <-
test_metrics %>%
kf_add_metric("roc", .5, "RAW") %>%
kf_add_metric("accuracy", .5, "PERCENTAGE")
expect_output(print(two_added, pretty = FALSE), '{"metrics":[{"name":"roc","numberValue":0.5,"format":"RAW"},{"name":"accuracy","numberValue":0.5,"format":"PERCENTAGE"}]}', fixed = TRUE)
})
|
/tests/testthat/test-kf_add_metric.R
|
permissive
|
ndiquattro/kflow
|
R
| false
| false
| 1,012
|
r
|
test_metrics <- kf_init_metrics()
test_that("Input validation works", {
expect_error(kf_add_metric(test_metrics, "ROC"), "Name does not match required pattern.")
expect_error(kf_add_metric(test_metrics, name = "roc", value = "15"), "Value must be numeric.")
expect_error(kf_add_metric(test_metrics, name = "roc", value = .15, format = "plain"), "Format must be either RAW or PERCENTAGE.")
})
test_that("Single metric is added correctly", {
one_added <- kf_add_metric(test_metrics, "roc", .5, "RAW")
expect_output(print(one_added, pretty = FALSE), '{"metrics":[{"name":"roc","numberValue":0.5,"format":"RAW"}]}', fixed = TRUE)
})
test_that("Multiple metrics are added correctly", {
two_added <-
test_metrics %>%
kf_add_metric("roc", .5, "RAW") %>%
kf_add_metric("accuracy", .5, "PERCENTAGE")
expect_output(print(two_added, pretty = FALSE), '{"metrics":[{"name":"roc","numberValue":0.5,"format":"RAW"},{"name":"accuracy","numberValue":0.5,"format":"PERCENTAGE"}]}', fixed = TRUE)
})
|
#' Bisulfite conversion rate visualization
#'
#'Plot the bisulfite conversion rate for each sample
#'based on the pheno data in the bs object
#'@param bs bsseq object
#'@return Plot showing bisulfite conversion rate for each sample
#'@examples
#'directory <- system.file("extdata/bismark_data", package='scmeth')
#'bs <- HDF5Array::loadHDF5SummarizedExperiment(directory)
#'bsConversionPlot(bs)
#'@export
bsConversionPlot <- function(bs){
phenoData <- bsseq::pData(bs)
phenoData$bsconversion <- 1 - (phenoData$CHH_meth + phenoData$CHG_meth)/
(phenoData$CHH_meth + phenoData$CHH_unmeth+
phenoData$CHG_meth + phenoData$CHG_unmeth)
bscDf <- data.frame(sample=rownames(phenoData), bsc=phenoData$bsconversion)
return(bscDf)
}
|
/R/bsConversionPlot.R
|
no_license
|
aryeelab/scmeth
|
R
| false
| false
| 811
|
r
|
#' Bisulfite conversion rate visualization
#'
#'Plot the bisulfite conversion rate for each sample
#'based on the pheno data in the bs object
#'@param bs bsseq object
#'@return Plot showing bisulfite conversion rate for each sample
#'@examples
#'directory <- system.file("extdata/bismark_data", package='scmeth')
#'bs <- HDF5Array::loadHDF5SummarizedExperiment(directory)
#'bsConversionPlot(bs)
#'@export
bsConversionPlot <- function(bs){
phenoData <- bsseq::pData(bs)
phenoData$bsconversion <- 1 - (phenoData$CHH_meth + phenoData$CHG_meth)/
(phenoData$CHH_meth + phenoData$CHH_unmeth+
phenoData$CHG_meth + phenoData$CHG_unmeth)
bscDf <- data.frame(sample=rownames(phenoData), bsc=phenoData$bsconversion)
return(bscDf)
}
|
setwd("C:/Users/lorenzo/Documents/Master in Economics/Programming Course/R/Hackathon")
#Inserting sample B
B <- read_csv("sample_B.csv", col_types = cols(new_hrs = col_double(), new_wage = col_double()))
B
#Table 2
#Period: 1967-79
B_6779 <- filter(B, year %in% 1968:1980)
B_6779 %>%
group_by(sex) %>%
summarize(mean(age))
nmale <- B_6779 %>%
group_by(sex) %>%
summarize(nmale = n())
n1male <- B_6779 %>%
filter(edu > 15) %>%
group_by(sex) %>%
summarize(n1male = n())
counts <- left_join(n1male, nmale) %>%
mutate(percentage = n1male/nmale)
#Period: 1980-1989
B_8089 <- filter(B, year %in% 1981:1990)
B_8089 %>%
group_by(sex) %>%
summarize(mean(age))
nmale <- B_8089 %>%
group_by(sex) %>%
summarize(nmale = n())
n1male <- B_8089 %>%
filter(edu > 15) %>%
group_by(sex) %>%
summarize(n1male = n())
counts <- left_join(n1male, nmale) %>%
mutate(percentage = n1male/nmale)
counts
#Period: 1990-99
B_9099 <- filter(B, year %in% 1991:2000)
B_9099 %>%
group_by(sex) %>%
summarize(mean(age))
nmale <- B_9099 %>%
group_by(sex) %>%
summarize(nmale = n())
n1male <- B_9099 %>%
filter(edu > 15) %>%
group_by(sex) %>%
summarize(n1male = n())
counts <- left_join(n1male, nmale) %>%
mutate(percentage = n1male/nmale)
counts
#Period 2000-today
B_0005 <- filter(B, year >= 2001)
B_0005 %>%
group_by(sex) %>%
summarize(mean(age))
nmale <- B_0005 %>%
group_by(sex) %>%
summarize(nmale = n())
n1male <- B_0005 %>%
filter(edu > 15) %>%
group_by(sex) %>%
summarize(n1male = n())
counts <- left_join(n1male, nmale) %>%
mutate(percentage = n1male/nmale)
counts
|
/Lorenzo/Table2.R
|
no_license
|
lorenzokaaks/Hackathon18
|
R
| false
| false
| 1,638
|
r
|
setwd("C:/Users/lorenzo/Documents/Master in Economics/Programming Course/R/Hackathon")
#Inserting sample B
B <- read_csv("sample_B.csv", col_types = cols(new_hrs = col_double(), new_wage = col_double()))
B
#Table 2
#Period: 1967-79
B_6779 <- filter(B, year %in% 1968:1980)
B_6779 %>%
group_by(sex) %>%
summarize(mean(age))
nmale <- B_6779 %>%
group_by(sex) %>%
summarize(nmale = n())
n1male <- B_6779 %>%
filter(edu > 15) %>%
group_by(sex) %>%
summarize(n1male = n())
counts <- left_join(n1male, nmale) %>%
mutate(percentage = n1male/nmale)
#Period: 1980-1989
B_8089 <- filter(B, year %in% 1981:1990)
B_8089 %>%
group_by(sex) %>%
summarize(mean(age))
nmale <- B_8089 %>%
group_by(sex) %>%
summarize(nmale = n())
n1male <- B_8089 %>%
filter(edu > 15) %>%
group_by(sex) %>%
summarize(n1male = n())
counts <- left_join(n1male, nmale) %>%
mutate(percentage = n1male/nmale)
counts
#Period: 1990-99
B_9099 <- filter(B, year %in% 1991:2000)
B_9099 %>%
group_by(sex) %>%
summarize(mean(age))
nmale <- B_9099 %>%
group_by(sex) %>%
summarize(nmale = n())
n1male <- B_9099 %>%
filter(edu > 15) %>%
group_by(sex) %>%
summarize(n1male = n())
counts <- left_join(n1male, nmale) %>%
mutate(percentage = n1male/nmale)
counts
#Period 2000-today
B_0005 <- filter(B, year >= 2001)
B_0005 %>%
group_by(sex) %>%
summarize(mean(age))
nmale <- B_0005 %>%
group_by(sex) %>%
summarize(nmale = n())
n1male <- B_0005 %>%
filter(edu > 15) %>%
group_by(sex) %>%
summarize(n1male = n())
counts <- left_join(n1male, nmale) %>%
mutate(percentage = n1male/nmale)
counts
|
# Create graphs for visualizations
# set seed for random processes
set.seed(617)
library(igraph)
library('cluster')
library('animation')
# Set to directory which contains files
setwd('/Users/angelavierling-claassen/Documents/DataScience/GayGraphs/DataFromTrials')
# read in the graph edges data and the nodes data
gaygraph_data_frame<-read.table('edgesTrial14end.txt')
nodes<-read.table('nodesTrial14end.txt')
# Make the names of columns for edges and notes
colnames(gaygraph_data_frame) <- c('ego', 'alter', 'family', 'friend')
colnames(nodes)<-c('ego','opin','gay')
# Eliminate zero edges (that is, not family or friend)
gaygraph_nonzero_edges <- subset(gaygraph_data_frame,(family > 0 | friend > 0 ))
# Make the data a graph object, include nodes (vertices), method of 'each' will
# simply replace each directed edge with an undirected edge (and keeps edge attributes)
gaygraph <- as.undirected(graph.data.frame(d=gaygraph_nonzero_edges,vertices=nodes), mode='collapse')
# do walktrap algorithm to determine communities and add this to nodes
wt <- cluster_walktrap(gaygraph, steps=4,modularity=TRUE,membership=TRUE)
# since last time I did this these functions changed, so having trouble with next line
#nodes<-data.frame(nodes[1:3], community_wt["membership"])
gaygraph <- as.undirected(graph.data.frame(d=gaygraph_nonzero_edges,vertices=nodes), mode='collapse')
# Simple graph with a layout, vertex colors
fullGraph_layout<- layout.kamada.kawai(gaygraph)
# Color vertices red if gay
# If not gay, darker colors are more accepting
gay_vertex_colors = get.vertex.attribute(gaygraph,"opin")
colors = c('Black', 'gray48','gray78','gray88','gray98','Red')
gay_vertex_colors[gay_vertex_colors < .2] = colors[1]
gay_vertex_colors[gay_vertex_colors < .4] = colors[2]
gay_vertex_colors[gay_vertex_colors < .6] = colors[3]
gay_vertex_colors[gay_vertex_colors < .8] = colors[4]
gay_vertex_colors[gay_vertex_colors < 1] = colors[5]
gay_vertex_colors[gay_vertex_colors == 1] = colors[6]
# Make the tie types colored by friend or family
# Blue = Family
tie_type_colors = c('Blue', 'Green')
E(gaygraph)$color[ E(gaygraph)$family==1 ] = tie_type_colors[1]
E(gaygraph)$color[ E(gaygraph)$friend==1 ] = tie_type_colors[2]
E(gaygraph)$arrow.size=.5
V(gaygraph)$color = gay_vertex_colors
V(gaygraph)$frame = gay_vertex_colors
# Plot the graph
jpeg("trial14end.jpg")
plot(gaygraph,
layout=fullGraph_layout,
vertex.color=gay_vertex_colors,
vertex.label=NA,
edge.arrow.size=.5)
dev.off()
###############################
## Next try looking at just gay nodes
gay_only_graph <- delete.vertices(gaygraph, V(gaygraph)[get.vertex.attribute(gaygraph,name = "gay")==0])
gay_only_graph
gay_layout <- layout.fruchterman.reingold(gay_only_graph)
jpeg("trial14_gayonly_end.jpg")
plot(gay_only_graph, layout=gay_layout)
dev.off()
################################
## Look at subgraph of same number of straight nodes
straight_only_graph <- delete.vertices(gaygraph, V(gaygraph)[get.vertex.attribute(gaygraph,name = "gay")==1])
straight_only_graph<- delete.vertices(straight_only_graph, V(straight_only_graph)[1:length(V(straight_only_graph))-length(V(gay_only_graph))])
jpeg("trial14_straightonly_end.jpg")
straight_layout <- layout.fruchterman.reingold(straight_only_graph)
plot(straight_only_graph, layout=straight_layout)
dev.off()
################################3
## Next try for graphs of communities
comm0_graph <- delete.vertices(gaygraph, V(gaygraph)[get.vertex.attribute(gaygraph,name = "membership")!=0])
community_layout <- layout.fruchterman.reingold(comm0_graph)
jpeg("trial14_comm0_end.jpg")
plot(comm0_graph, layout=community_layout)
dev.off()
comm1_graph <- delete.vertices(gaygraph, V(gaygraph)[get.vertex.attribute(gaygraph,name = "membership")!=1])
community_layout <- layout.fruchterman.reingold(comm1_graph)
jpeg("trial14_comm1_end.jpg")
plot(comm1_graph, layout=community_layout)
dev.off()
comm2_graph <- delete.vertices(gaygraph, V(gaygraph)[get.vertex.attribute(gaygraph,name = "membership")!=2])
community_layout <- layout.fruchterman.reingold(comm2_graph)
jpeg("trial14_comm2_end.jpg")
plot(comm2_graph, layout=community_layout)
dev.off()
comm3_graph <- delete.vertices(gaygraph, V(gaygraph)[get.vertex.attribute(gaygraph,name = "membership")!=3])
community_layout <- layout.fruchterman.reingold(comm3_graph)
jpeg("trial14_comm3_end.jpg")
plot(comm3_graph, layout=community_layout)
dev.off()
comm4_graph <- delete.vertices(gaygraph, V(gaygraph)[get.vertex.attribute(gaygraph,name = "membership")!=3])
community_layout <- layout.fruchterman.reingold(comm4_graph)
jpeg("trial14_comm4_end.jpg")
plot(comm4_graph, layout=community_layout)
dev.off()
## Repeat it all for the data from start of run
# read in the graph edges data and the nodes data
gaygraph_data_frame<-read.table('edgesTrial14start.txt')
nodes<-read.table('nodesTrial14start.txt')
# Make the names of columns for edges and notes
colnames(gaygraph_data_frame) <- c('ego', 'alter', 'family', 'friend')
colnames(nodes)<-c('ego','opin','gay')
# Eliminate nonzero edges
gaygraph_nonzero_edges <- subset(gaygraph_data_frame,(family > 0 | friend > 0 ))
# Make the data as a graph object, include nodes (vertices), method of 'each' will
# simply replace each directed edge with an undirected edge (and keeps edge attributes)
gaygraph <- as.undirected(graph.data.frame(d=gaygraph_nonzero_edges,vertices=nodes), mode='each')
# Simple graph with a layout, vertex colors
fullGraph_layout<- layout.kamada.kawai(gaygraph)
gay_vertex_colors = get.vertex.attribute(gaygraph,"opin")
colors = c('Black', 'gray48','gray78','gray88','gray98','Red')
gay_vertex_colors[gay_vertex_colors < .2] = colors[1]
gay_vertex_colors[gay_vertex_colors < .4] = colors[2]
gay_vertex_colors[gay_vertex_colors < .6] = colors[3]
gay_vertex_colors[gay_vertex_colors < .8] = colors[4]
gay_vertex_colors[gay_vertex_colors < 1] = colors[5]
gay_vertex_colors[gay_vertex_colors == 1] = colors[6]
# Make the tie types colored by friend or family
tie_type_colors = c('Blue', 'Green')
E(gaygraph)$color[ E(gaygraph)$family==1 ] = tie_type_colors[1]
E(gaygraph)$color[ E(gaygraph)$friend==1 ] = tie_type_colors[2]
E(gaygraph)$arrow.size=.5
V(gaygraph)$color = gay_vertex_colors
V(gaygraph)$frame = gay_vertex_colors
# Plot the graph
jpeg("trial14start.jpg")
plot(gaygraph,
layout=fullGraph_layout,
vertex.color=gay_vertex_colors,
vertex.label=NA,
edge.arrow.size=.5)
dev.off()
###############################
## Next try looking at just gay nodes
gay_only_graph <- delete.vertices(gaygraph, V(gaygraph)[get.vertex.attribute(gaygraph,name = "gay")==0])
gay_layout <- layout.fruchterman.reingold(gay_only_graph)
jpeg("trial14_gayonly_start.jpg")
plot(gay_only_graph, layout=gay_layout)
dev.off()
################################
## Look at subgraph of same number of straight nodes
straight_only_graph <- delete.vertices(gaygraph, V(gaygraph)[get.vertex.attribute(gaygraph,name = "gay")==1])
straight_only_graph<- delete.vertices(straight_only_graph, V(straight_only_graph)[1:length(V(straight_only_graph))-length(V(gay_only_graph))])
jpeg("trial14_straightonly_start.jpg")
straight_layout <- layout.fruchterman.reingold(straight_only_graph)
plot(straight_only_graph, layout=straight_layout)
dev.off()
################################3
## Next try for graphs of communities
comm0_graph <- delete.vertices(gaygraph, V(gaygraph)[get.vertex.attribute(gaygraph,name = "membership")!=0])
community_layout <- layout.fruchterman.reingold(comm0_graph)
jpeg("trial14_comm0_end.jpg")
plot(comm0_graph, layout=community_layout)
dev.off()
comm1_graph <- delete.vertices(gaygraph, V(gaygraph)[get.vertex.attribute(gaygraph,name = "membership")!=1])
community_layout <- layout.fruchterman.reingold(comm1_graph)
jpeg("trial14_comm1_end.jpg")
plot(comm1_graph, layout=community_layout)
dev.off()
comm2_graph <- delete.vertices(gaygraph, V(gaygraph)[get.vertex.attribute(gaygraph,name = "membership")!=2])
community_layout <- layout.fruchterman.reingold(comm2_graph)
jpeg("trial14_comm2_end.jpg")
plot(comm2_graph, layout=community_layout)
dev.off()
comm3_graph <- delete.vertices(gaygraph, V(gaygraph)[get.vertex.attribute(gaygraph,name = "membership")!=3])
community_layout <- layout.fruchterman.reingold(comm3_graph)
jpeg("trial14_comm3_end.jpg")
plot(comm3_graph, layout=community_layout)
dev.off()
comm4_graph <- delete.vertices(gaygraph, V(gaygraph)[get.vertex.attribute(gaygraph,name = "membership")!=3])
community_layout <- layout.fruchterman.reingold(comm4_graph)
jpeg("trial14_comm4_end.jpg")
plot(comm4_graph, layout=community_layout)
dev.off()
|
/gaygraph_viz_old.R
|
no_license
|
techtronics/GayGraphs
|
R
| false
| false
| 8,665
|
r
|
# Create graphs for visualizations
# set seed for random processes
set.seed(617)
library(igraph)
library('cluster')
library('animation')
# Set to directory which contains files
setwd('/Users/angelavierling-claassen/Documents/DataScience/GayGraphs/DataFromTrials')
# read in the graph edges data and the nodes data
gaygraph_data_frame<-read.table('edgesTrial14end.txt')
nodes<-read.table('nodesTrial14end.txt')
# Make the names of columns for edges and notes
colnames(gaygraph_data_frame) <- c('ego', 'alter', 'family', 'friend')
colnames(nodes)<-c('ego','opin','gay')
# Eliminate zero edges (that is, not family or friend)
gaygraph_nonzero_edges <- subset(gaygraph_data_frame,(family > 0 | friend > 0 ))
# Make the data a graph object, include nodes (vertices), method of 'each' will
# simply replace each directed edge with an undirected edge (and keeps edge attributes)
gaygraph <- as.undirected(graph.data.frame(d=gaygraph_nonzero_edges,vertices=nodes), mode='collapse')
# do walktrap algorithm to determine communities and add this to nodes
wt <- cluster_walktrap(gaygraph, steps=4,modularity=TRUE,membership=TRUE)
# since last time I did this these functions changed, so having trouble with next line
#nodes<-data.frame(nodes[1:3], community_wt["membership"])
gaygraph <- as.undirected(graph.data.frame(d=gaygraph_nonzero_edges,vertices=nodes), mode='collapse')
# Simple graph with a layout, vertex colors
fullGraph_layout<- layout.kamada.kawai(gaygraph)
# Color vertices red if gay
# If not gay, darker colors are more accepting
gay_vertex_colors = get.vertex.attribute(gaygraph,"opin")
colors = c('Black', 'gray48','gray78','gray88','gray98','Red')
gay_vertex_colors[gay_vertex_colors < .2] = colors[1]
gay_vertex_colors[gay_vertex_colors < .4] = colors[2]
gay_vertex_colors[gay_vertex_colors < .6] = colors[3]
gay_vertex_colors[gay_vertex_colors < .8] = colors[4]
gay_vertex_colors[gay_vertex_colors < 1] = colors[5]
gay_vertex_colors[gay_vertex_colors == 1] = colors[6]
# Make the tie types colored by friend or family
# Blue = Family
tie_type_colors = c('Blue', 'Green')
E(gaygraph)$color[ E(gaygraph)$family==1 ] = tie_type_colors[1]
E(gaygraph)$color[ E(gaygraph)$friend==1 ] = tie_type_colors[2]
E(gaygraph)$arrow.size=.5
V(gaygraph)$color = gay_vertex_colors
V(gaygraph)$frame = gay_vertex_colors
# Plot the graph
jpeg("trial14end.jpg")
plot(gaygraph,
layout=fullGraph_layout,
vertex.color=gay_vertex_colors,
vertex.label=NA,
edge.arrow.size=.5)
dev.off()
###############################
## Next try looking at just gay nodes
gay_only_graph <- delete.vertices(gaygraph, V(gaygraph)[get.vertex.attribute(gaygraph,name = "gay")==0])
gay_only_graph
gay_layout <- layout.fruchterman.reingold(gay_only_graph)
jpeg("trial14_gayonly_end.jpg")
plot(gay_only_graph, layout=gay_layout)
dev.off()
################################
## Look at subgraph of same number of straight nodes
straight_only_graph <- delete.vertices(gaygraph, V(gaygraph)[get.vertex.attribute(gaygraph,name = "gay")==1])
straight_only_graph<- delete.vertices(straight_only_graph, V(straight_only_graph)[1:length(V(straight_only_graph))-length(V(gay_only_graph))])
jpeg("trial14_straightonly_end.jpg")
straight_layout <- layout.fruchterman.reingold(straight_only_graph)
plot(straight_only_graph, layout=straight_layout)
dev.off()
################################3
## Next try for graphs of communities
comm0_graph <- delete.vertices(gaygraph, V(gaygraph)[get.vertex.attribute(gaygraph,name = "membership")!=0])
community_layout <- layout.fruchterman.reingold(comm0_graph)
jpeg("trial14_comm0_end.jpg")
plot(comm0_graph, layout=community_layout)
dev.off()
comm1_graph <- delete.vertices(gaygraph, V(gaygraph)[get.vertex.attribute(gaygraph,name = "membership")!=1])
community_layout <- layout.fruchterman.reingold(comm1_graph)
jpeg("trial14_comm1_end.jpg")
plot(comm1_graph, layout=community_layout)
dev.off()
comm2_graph <- delete.vertices(gaygraph, V(gaygraph)[get.vertex.attribute(gaygraph,name = "membership")!=2])
community_layout <- layout.fruchterman.reingold(comm2_graph)
jpeg("trial14_comm2_end.jpg")
plot(comm2_graph, layout=community_layout)
dev.off()
comm3_graph <- delete.vertices(gaygraph, V(gaygraph)[get.vertex.attribute(gaygraph,name = "membership")!=3])
community_layout <- layout.fruchterman.reingold(comm3_graph)
jpeg("trial14_comm3_end.jpg")
plot(comm3_graph, layout=community_layout)
dev.off()
comm4_graph <- delete.vertices(gaygraph, V(gaygraph)[get.vertex.attribute(gaygraph,name = "membership")!=3])
community_layout <- layout.fruchterman.reingold(comm4_graph)
jpeg("trial14_comm4_end.jpg")
plot(comm4_graph, layout=community_layout)
dev.off()
## Repeat it all for the data from start of run
# read in the graph edges data and the nodes data
gaygraph_data_frame<-read.table('edgesTrial14start.txt')
nodes<-read.table('nodesTrial14start.txt')
# Make the names of columns for edges and notes
colnames(gaygraph_data_frame) <- c('ego', 'alter', 'family', 'friend')
colnames(nodes)<-c('ego','opin','gay')
# Eliminate nonzero edges
gaygraph_nonzero_edges <- subset(gaygraph_data_frame,(family > 0 | friend > 0 ))
# Make the data as a graph object, include nodes (vertices), method of 'each' will
# simply replace each directed edge with an undirected edge (and keeps edge attributes)
gaygraph <- as.undirected(graph.data.frame(d=gaygraph_nonzero_edges,vertices=nodes), mode='each')
# Simple graph with a layout, vertex colors
fullGraph_layout<- layout.kamada.kawai(gaygraph)
gay_vertex_colors = get.vertex.attribute(gaygraph,"opin")
colors = c('Black', 'gray48','gray78','gray88','gray98','Red')
gay_vertex_colors[gay_vertex_colors < .2] = colors[1]
gay_vertex_colors[gay_vertex_colors < .4] = colors[2]
gay_vertex_colors[gay_vertex_colors < .6] = colors[3]
gay_vertex_colors[gay_vertex_colors < .8] = colors[4]
gay_vertex_colors[gay_vertex_colors < 1] = colors[5]
gay_vertex_colors[gay_vertex_colors == 1] = colors[6]
# Make the tie types colored by friend or family
tie_type_colors = c('Blue', 'Green')
E(gaygraph)$color[ E(gaygraph)$family==1 ] = tie_type_colors[1]
E(gaygraph)$color[ E(gaygraph)$friend==1 ] = tie_type_colors[2]
E(gaygraph)$arrow.size=.5
V(gaygraph)$color = gay_vertex_colors
V(gaygraph)$frame = gay_vertex_colors
# Plot the graph
jpeg("trial14start.jpg")
plot(gaygraph,
layout=fullGraph_layout,
vertex.color=gay_vertex_colors,
vertex.label=NA,
edge.arrow.size=.5)
dev.off()
###############################
## Next try looking at just gay nodes
gay_only_graph <- delete.vertices(gaygraph, V(gaygraph)[get.vertex.attribute(gaygraph,name = "gay")==0])
gay_layout <- layout.fruchterman.reingold(gay_only_graph)
jpeg("trial14_gayonly_start.jpg")
plot(gay_only_graph, layout=gay_layout)
dev.off()
################################
## Look at subgraph of same number of straight nodes
straight_only_graph <- delete.vertices(gaygraph, V(gaygraph)[get.vertex.attribute(gaygraph,name = "gay")==1])
straight_only_graph<- delete.vertices(straight_only_graph, V(straight_only_graph)[1:length(V(straight_only_graph))-length(V(gay_only_graph))])
jpeg("trial14_straightonly_start.jpg")
straight_layout <- layout.fruchterman.reingold(straight_only_graph)
plot(straight_only_graph, layout=straight_layout)
dev.off()
################################3
## Next try for graphs of communities
comm0_graph <- delete.vertices(gaygraph, V(gaygraph)[get.vertex.attribute(gaygraph,name = "membership")!=0])
community_layout <- layout.fruchterman.reingold(comm0_graph)
jpeg("trial14_comm0_end.jpg")
plot(comm0_graph, layout=community_layout)
dev.off()
comm1_graph <- delete.vertices(gaygraph, V(gaygraph)[get.vertex.attribute(gaygraph,name = "membership")!=1])
community_layout <- layout.fruchterman.reingold(comm1_graph)
jpeg("trial14_comm1_end.jpg")
plot(comm1_graph, layout=community_layout)
dev.off()
comm2_graph <- delete.vertices(gaygraph, V(gaygraph)[get.vertex.attribute(gaygraph,name = "membership")!=2])
community_layout <- layout.fruchterman.reingold(comm2_graph)
jpeg("trial14_comm2_end.jpg")
plot(comm2_graph, layout=community_layout)
dev.off()
comm3_graph <- delete.vertices(gaygraph, V(gaygraph)[get.vertex.attribute(gaygraph,name = "membership")!=3])
community_layout <- layout.fruchterman.reingold(comm3_graph)
jpeg("trial14_comm3_end.jpg")
plot(comm3_graph, layout=community_layout)
dev.off()
comm4_graph <- delete.vertices(gaygraph, V(gaygraph)[get.vertex.attribute(gaygraph,name = "membership")!=3])
community_layout <- layout.fruchterman.reingold(comm4_graph)
jpeg("trial14_comm4_end.jpg")
plot(comm4_graph, layout=community_layout)
dev.off()
|
###
### low-level file handling functions
###
## txt format
get_txt <- function(f, ...) {
txt <- paste(readLines(con <- file(f, ...), warn = FALSE), collapse = "\n")
close(con)
data.frame(text = txt, stringsAsFactors = FALSE)
}
## csv format
get_csv <- function(path, text_field, ...) {
args <- list(...)
# Replace native.enc with UTF-8 if that's what it is
if (args$encoding == 'native.enc') {
# http://r.789695.n4.nabble.com/Find-out-what-quot-native-enc-quot-corresponds-to-td4639208.html
args$encoding = strsplit(Sys.getlocale("LC_CTYPE"), '\\.')[[1]][2]
}
if (!(args$encoding %in% c('Latin-1', 'UTF-8'))) {
# If the encoding is not one fread supports, open the file using R's native function
# Use the encoding arg to open the file, pass all other args to fread
txt <- paste(readLines(con <- file(path), encoding=args$encoding, warn = FALSE), collapse="\n")
close(con)
args$encoding <- NULL
args <- c(list(input=txt, data.table=F, stringsAsFactors=F), args)
}
else {
args <- c(list(input=path, data.table=F, stringsAsFactors=F), args)
}
docs <- do.call(data.table::fread, args)
text_field <- get_numeric_textfield(text_field, docs, path)
data.frame(text = docs[, text_field], docs[, -text_field, drop = FALSE],
stringsAsFactors = FALSE)
}
# Dispatch to get_json_object or get_json_tweets depending on whether
# it looks like a twitter json file
get_json <- function(path, text_field, encoding, ...) {
# encoding param is not used
stopifnot(file.exists(path))
tryCatch({
return(get_json_tweets(path, ...))
},
error = function(e) {
tryCatch({
if (getOption("readtext_verbosity") >= 1) warning("Doesn't look like Tweets json file, trying general JSON")
return(get_json_object(path, text_field, ...))
},
error = function(e) {
if (e == paste("There is no field called", text_field, "in file", path)) {
stop(e)
}
if (getOption("readtext_verbosity") >= 1) warning("File doesn't contain a single valid JSON object, trying line-delimited json")
return(get_json_lines(path, text_field, ...))
})
})
}
## Twitter json
get_json_tweets <- function(path, source="twitter", ...) {
# if (!requireNamespace("streamR", quietly = TRUE))
# stop("You must have streamR installed to read Twitter json files.")
# read raw json data
txt <- readLines(path, warn = FALSE, ...)
results <- streamR::parseTweets(txt, verbose=FALSE, ...)
data.frame(text = results[, 1], as.data.frame(results[, -1, drop = FALSE]),
stringsAsFactors = FALSE)
}
## general json
#' @importFrom data.table setDT
get_json_object <- function(path, text_field, ...) {
# if (!requireNamespace("jsonlite", quietly = TRUE))
# stop("You must have jsonlite installed to read json files.")
if (is.numeric(text_field)) {
stop('Cannot use numeric text_field with json file')
}
docs <- jsonlite::fromJSON(path, flatten=TRUE, ...)
docs <- data.table::setDT(docs)
if (!(text_field %in% colnames(docs))) {
stop(paste("There is no field called", text_field, "in file", path))
}
data.frame(text = docs[[text_field]], docs[, -text_field, with = FALSE],
stringsAsFactors = FALSE)
}
#' @importFrom data.table rbindlist
get_json_lines <- function(path, text_field, ...) {
# if (!requireNamespace("jsonlite", quietly = TRUE))
# stop("You must have jsonlite installed to read json files.")
if (is.numeric(text_field)) {
stop('Cannot use numeric text_field with json file')
}
lines <- readLines(path, warn = FALSE)
docs <- data.table::rbindlist(
lapply(lines, function(x)jsonlite::fromJSON(x, flatten=TRUE, ...)),
use.names = TRUE, fill = TRUE
)
if (!(text_field %in% colnames(docs))) {
stop(paste("There is no field called", text_field, "in file", path))
}
data.frame(text = docs[[text_field]], docs[, -text_field, with = FALSE],
stringsAsFactors = FALSE)
}
## flat xml format
get_xml <- function(path, text_field, encoding, collapse = "", ...) {
# TODO: encoding param is ignored
# if (!requireNamespace("XML", quietly = TRUE))
# stop("You must have XML installed to read XML files.")
if (is_probably_xpath(text_field)) {
xml <- XML::xmlTreeParse(path, useInternalNodes = TRUE)
txt <- XML::xpathApply(xml, text_field, XML::xmlValue, ...)
txt <- paste0(txt, collapse = collapse)
return(data.frame(text = txt, stringsAsFactors = FALSE))
}
else {
docs <- XML::xmlToDataFrame(path, stringsAsFactors = FALSE, ...)
if (is.numeric(text_field) & (text_field > ncol(docs))) {
stop(paste0("There is no ", text_field, "th field in file ", path))
}
if (is.character(text_field)) {
text_fieldi <- which(names(docs)==text_field)
if (length(text_fieldi)==0)
stop(paste("There is no node called", text_field, "in file", path))
text_field <- text_fieldi
}
else {
if (getOption("readtext_verbosity") >= 1) {
warning(paste("You should specify text_field by name rather than by index, unless",
"you're certain that your XML file's fields are always in the same order."))
}
}
# Because XML::xmlToDataFrame doesn't impute column types, we have to do it
# ourselves, to match get_csv's behaviour
return(data.frame(text = docs[, text_field],
imputeDocvarsTypes(docs[, -text_field, drop = FALSE]),
stringsAsFactors = FALSE))
}
}
get_html <- function(f, ...) {
args <- list(...)
# http://stackoverflow.com/a/3195926
html <- XML::htmlTreeParse(f, useInternal = TRUE)
txt <- XML::xpathApply(html, "//body//text()[not(ancestor::script)][not(ancestor::style)][not(ancestor::noscript)]",
XML::xmlValue)
txt <- txt[!grepl('^\\s*$', txt)] # Remove text which is just whitespace
txt <- paste0(txt, collapse='\n')
data.frame(text = txt, stringsAsFactors = FALSE)
}
get_pdf <- function(f, ...) {
args <- list(...)
txt <- pdftools::pdf_text(as.character(f))
txt <- paste0(txt, collapse='\n')
Encoding(txt) <- "UTF-8"
data.frame(text = txt, stringsAsFactors = FALSE)
}
get_docx <- function(f, ...) {
args <- list(...)
path <- extractArchive(f, ignoreMissing=FALSE)
path <- sub('/\\*$', '', path)
path <- file.path(path, 'word', 'document.xml')
xml <- XML::xmlTreeParse(path, useInternalNodes = TRUE)
txt <- XML::xpathApply(xml, "//w:p", XML::xmlValue)
txt <- txt[!grepl('^\\s*$', txt)] # Remove text which is just whitespace
txt <- paste0(txt, collapse = "\n")
data.frame(text = txt, stringsAsFactors = FALSE)
}
get_doc <- function(f, ...) {
args <- list(...)
txt <- antiword::antiword(as.character(normalizePath(f)))
# tryCatch({
# txt <- system2("antiword", shQuote(normalizePath(f)), stdout = TRUE)
# },
# error = function(e) {
# if (grepl('error in running command', e)) {
# stop(e, 'Please check whether antiword is installed. You can download it from http://www.winfield.demon.nl/')
# } else {
# stop(e)
# }
# })
txt <- paste0(txt, collapse = "\n")
txt <- trimws(txt)
data.frame(text = txt, stringsAsFactors = FALSE)
}
get_excel <- function(f, text_field, ...) {
sheet_names <- readxl::excel_sheets(f)
sheets <- lapply(sheet_names, function(x, ...) {readxl::read_excel(f, sheet=x, ...)})
if (length(unique(sapply(sheets, ncol))) != 1) {
warning('Not all worksheets in file "', f, '" have the same number of columns.')
}
docs <- data.table::rbindlist(sheets, fill=TRUE)
text_field <- get_numeric_textfield(text_field, docs, path=f)
data.frame(text = docs[,text_field, with=F], docs[, -text_field, with=FALSE],
stringsAsFactors = FALSE)
}
get_ods <- function(f, text_field, ...) {
sheet_names <- readODS::ods_sheets(f)
sheets <- lapply(sheet_names, function(x, ...) {readODS::read_ods(f, sheet=x, ...)})
if (length(unique(sapply(sheets, ncol))) != 1) {
warning('Not all worksheets in file "', f, '" have the same number of columns.')
}
docs <- data.table::rbindlist(sheets, fill=TRUE)
text_field <- get_numeric_textfield(text_field, docs, path=f)
data.frame(text = docs[,text_field, with=F], docs[, -text_field, with=FALSE],
stringsAsFactors = FALSE)
}
|
/R/get-functions.R
|
no_license
|
thrinu/readtext
|
R
| false
| false
| 8,879
|
r
|
###
### low-level file handling functions
###
## txt format
get_txt <- function(f, ...) {
txt <- paste(readLines(con <- file(f, ...), warn = FALSE), collapse = "\n")
close(con)
data.frame(text = txt, stringsAsFactors = FALSE)
}
## csv format
get_csv <- function(path, text_field, ...) {
args <- list(...)
# Replace native.enc with UTF-8 if that's what it is
if (args$encoding == 'native.enc') {
# http://r.789695.n4.nabble.com/Find-out-what-quot-native-enc-quot-corresponds-to-td4639208.html
args$encoding = strsplit(Sys.getlocale("LC_CTYPE"), '\\.')[[1]][2]
}
if (!(args$encoding %in% c('Latin-1', 'UTF-8'))) {
# If the encoding is not one fread supports, open the file using R's native function
# Use the encoding arg to open the file, pass all other args to fread
txt <- paste(readLines(con <- file(path), encoding=args$encoding, warn = FALSE), collapse="\n")
close(con)
args$encoding <- NULL
args <- c(list(input=txt, data.table=F, stringsAsFactors=F), args)
}
else {
args <- c(list(input=path, data.table=F, stringsAsFactors=F), args)
}
docs <- do.call(data.table::fread, args)
text_field <- get_numeric_textfield(text_field, docs, path)
data.frame(text = docs[, text_field], docs[, -text_field, drop = FALSE],
stringsAsFactors = FALSE)
}
# Dispatch to get_json_object or get_json_tweets depending on whether
# it looks like a twitter json file
get_json <- function(path, text_field, encoding, ...) {
# encoding param is not used
stopifnot(file.exists(path))
tryCatch({
return(get_json_tweets(path, ...))
},
error = function(e) {
tryCatch({
if (getOption("readtext_verbosity") >= 1) warning("Doesn't look like Tweets json file, trying general JSON")
return(get_json_object(path, text_field, ...))
},
error = function(e) {
if (e == paste("There is no field called", text_field, "in file", path)) {
stop(e)
}
if (getOption("readtext_verbosity") >= 1) warning("File doesn't contain a single valid JSON object, trying line-delimited json")
return(get_json_lines(path, text_field, ...))
})
})
}
## Twitter json
get_json_tweets <- function(path, source="twitter", ...) {
# if (!requireNamespace("streamR", quietly = TRUE))
# stop("You must have streamR installed to read Twitter json files.")
# read raw json data
txt <- readLines(path, warn = FALSE, ...)
results <- streamR::parseTweets(txt, verbose=FALSE, ...)
data.frame(text = results[, 1], as.data.frame(results[, -1, drop = FALSE]),
stringsAsFactors = FALSE)
}
## general json
#' @importFrom data.table setDT
get_json_object <- function(path, text_field, ...) {
# if (!requireNamespace("jsonlite", quietly = TRUE))
# stop("You must have jsonlite installed to read json files.")
if (is.numeric(text_field)) {
stop('Cannot use numeric text_field with json file')
}
docs <- jsonlite::fromJSON(path, flatten=TRUE, ...)
docs <- data.table::setDT(docs)
if (!(text_field %in% colnames(docs))) {
stop(paste("There is no field called", text_field, "in file", path))
}
data.frame(text = docs[[text_field]], docs[, -text_field, with = FALSE],
stringsAsFactors = FALSE)
}
#' @importFrom data.table rbindlist
get_json_lines <- function(path, text_field, ...) {
# if (!requireNamespace("jsonlite", quietly = TRUE))
# stop("You must have jsonlite installed to read json files.")
if (is.numeric(text_field)) {
stop('Cannot use numeric text_field with json file')
}
lines <- readLines(path, warn = FALSE)
docs <- data.table::rbindlist(
lapply(lines, function(x)jsonlite::fromJSON(x, flatten=TRUE, ...)),
use.names = TRUE, fill = TRUE
)
if (!(text_field %in% colnames(docs))) {
stop(paste("There is no field called", text_field, "in file", path))
}
data.frame(text = docs[[text_field]], docs[, -text_field, with = FALSE],
stringsAsFactors = FALSE)
}
## flat xml format
get_xml <- function(path, text_field, encoding, collapse = "", ...) {
# TODO: encoding param is ignored
# if (!requireNamespace("XML", quietly = TRUE))
# stop("You must have XML installed to read XML files.")
if (is_probably_xpath(text_field)) {
xml <- XML::xmlTreeParse(path, useInternalNodes = TRUE)
txt <- XML::xpathApply(xml, text_field, XML::xmlValue, ...)
txt <- paste0(txt, collapse = collapse)
return(data.frame(text = txt, stringsAsFactors = FALSE))
}
else {
docs <- XML::xmlToDataFrame(path, stringsAsFactors = FALSE, ...)
if (is.numeric(text_field) & (text_field > ncol(docs))) {
stop(paste0("There is no ", text_field, "th field in file ", path))
}
if (is.character(text_field)) {
text_fieldi <- which(names(docs)==text_field)
if (length(text_fieldi)==0)
stop(paste("There is no node called", text_field, "in file", path))
text_field <- text_fieldi
}
else {
if (getOption("readtext_verbosity") >= 1) {
warning(paste("You should specify text_field by name rather than by index, unless",
"you're certain that your XML file's fields are always in the same order."))
}
}
# Because XML::xmlToDataFrame doesn't impute column types, we have to do it
# ourselves, to match get_csv's behaviour
return(data.frame(text = docs[, text_field],
imputeDocvarsTypes(docs[, -text_field, drop = FALSE]),
stringsAsFactors = FALSE))
}
}
get_html <- function(f, ...) {
args <- list(...)
# http://stackoverflow.com/a/3195926
html <- XML::htmlTreeParse(f, useInternal = TRUE)
txt <- XML::xpathApply(html, "//body//text()[not(ancestor::script)][not(ancestor::style)][not(ancestor::noscript)]",
XML::xmlValue)
txt <- txt[!grepl('^\\s*$', txt)] # Remove text which is just whitespace
txt <- paste0(txt, collapse='\n')
data.frame(text = txt, stringsAsFactors = FALSE)
}
get_pdf <- function(f, ...) {
args <- list(...)
txt <- pdftools::pdf_text(as.character(f))
txt <- paste0(txt, collapse='\n')
Encoding(txt) <- "UTF-8"
data.frame(text = txt, stringsAsFactors = FALSE)
}
get_docx <- function(f, ...) {
args <- list(...)
path <- extractArchive(f, ignoreMissing=FALSE)
path <- sub('/\\*$', '', path)
path <- file.path(path, 'word', 'document.xml')
xml <- XML::xmlTreeParse(path, useInternalNodes = TRUE)
txt <- XML::xpathApply(xml, "//w:p", XML::xmlValue)
txt <- txt[!grepl('^\\s*$', txt)] # Remove text which is just whitespace
txt <- paste0(txt, collapse = "\n")
data.frame(text = txt, stringsAsFactors = FALSE)
}
get_doc <- function(f, ...) {
args <- list(...)
txt <- antiword::antiword(as.character(normalizePath(f)))
# tryCatch({
# txt <- system2("antiword", shQuote(normalizePath(f)), stdout = TRUE)
# },
# error = function(e) {
# if (grepl('error in running command', e)) {
# stop(e, 'Please check whether antiword is installed. You can download it from http://www.winfield.demon.nl/')
# } else {
# stop(e)
# }
# })
txt <- paste0(txt, collapse = "\n")
txt <- trimws(txt)
data.frame(text = txt, stringsAsFactors = FALSE)
}
get_excel <- function(f, text_field, ...) {
sheet_names <- readxl::excel_sheets(f)
sheets <- lapply(sheet_names, function(x, ...) {readxl::read_excel(f, sheet=x, ...)})
if (length(unique(sapply(sheets, ncol))) != 1) {
warning('Not all worksheets in file "', f, '" have the same number of columns.')
}
docs <- data.table::rbindlist(sheets, fill=TRUE)
text_field <- get_numeric_textfield(text_field, docs, path=f)
data.frame(text = docs[,text_field, with=F], docs[, -text_field, with=FALSE],
stringsAsFactors = FALSE)
}
get_ods <- function(f, text_field, ...) {
sheet_names <- readODS::ods_sheets(f)
sheets <- lapply(sheet_names, function(x, ...) {readODS::read_ods(f, sheet=x, ...)})
if (length(unique(sapply(sheets, ncol))) != 1) {
warning('Not all worksheets in file "', f, '" have the same number of columns.')
}
docs <- data.table::rbindlist(sheets, fill=TRUE)
text_field <- get_numeric_textfield(text_field, docs, path=f)
data.frame(text = docs[,text_field, with=F], docs[, -text_field, with=FALSE],
stringsAsFactors = FALSE)
}
|
# Install and load packages
package_names <- c("survey","dplyr","foreign","devtools")
lapply(package_names, function(x) if(!x %in% installed.packages()) install.packages(x))
lapply(package_names, require, character.only=T)
install_github("e-mitchell/meps_r_pkg/MEPS")
library(MEPS)
options(survey.lonely.psu="adjust")
# Load FYC file
FYC <- read.xport('C:/MEPS/.FYC..ssp');
year <- .year.
if(year <= 2001) FYC <- FYC %>% mutate(VARPSU = VARPSU.yy., VARSTR=VARSTR.yy.)
if(year <= 1998) FYC <- FYC %>% rename(PERWT.yy.F = WTDPER.yy.)
if(year == 1996) FYC <- FYC %>% mutate(AGE42X = AGE2X, AGE31X = AGE1X)
FYC <- FYC %>%
mutate_at(vars(starts_with("AGE")),funs(replace(., .< 0, NA))) %>%
mutate(AGELAST = coalesce(AGE.yy.X, AGE42X, AGE31X))
FYC$ind = 1
# Poverty status
if(year == 1996)
FYC <- FYC %>% rename(POVCAT96 = POVCAT)
FYC <- FYC %>%
mutate(poverty = recode_factor(POVCAT.yy., .default = "Missing", .missing = "Missing",
"1" = "Negative or poor",
"2" = "Near-poor",
"3" = "Low income",
"4" = "Middle income",
"5" = "High income"))
# Insurance coverage
# To compute for all insurance categories, replace 'insurance' in the 'svyby' function with 'insurance_v2X'
if(year == 1996){
FYC <- FYC %>%
mutate(MCDEV96 = MCDEVER, MCREV96 = MCREVER,
OPAEV96 = OPAEVER, OPBEV96 = OPBEVER)
}
if(year < 2011){
FYC <- FYC %>%
mutate(
public = (MCDEV.yy.==1|OPAEV.yy.==1|OPBEV.yy.==1),
medicare = (MCREV.yy.==1),
private = (INSCOV.yy.==1),
mcr_priv = (medicare & private),
mcr_pub = (medicare & !private & public),
mcr_only = (medicare & !private & !public),
no_mcr = (!medicare),
ins_gt65 = 4*mcr_only + 5*mcr_priv + 6*mcr_pub + 7*no_mcr,
INSURC.yy. = ifelse(AGELAST < 65, INSCOV.yy., ins_gt65)
)
}
FYC <- FYC %>%
mutate(insurance = recode_factor(INSCOV.yy., .default = "Missing", .missing = "Missing",
"1" = "Any private, all ages",
"2" = "Public only, all ages",
"3" = "Uninsured, all ages")) %>%
mutate(insurance_v2X = recode_factor(INSURC.yy., .default = "Missing", .missing = "Missing",
"1" = "<65, Any private",
"2" = "<65, Public only",
"3" = "<65, Uninsured",
"4" = "65+, Medicare only",
"5" = "65+, Medicare and private",
"6" = "65+, Medicare and other public",
"7" = "65+, No medicare",
"8" = "65+, No medicare"))
FYCdsgn <- svydesign(
id = ~VARPSU,
strata = ~VARSTR,
weights = ~PERWT.yy.F,
data = FYC,
nest = TRUE)
results <- svyby(~TOTEXP.yy., FUN = svytotal, by = ~insurance + poverty, design = FYCdsgn)
print(results)
|
/mepstrends/hc_use/json/code/r/totEXP__insurance__poverty__.r
|
permissive
|
RandomCriticalAnalysis/MEPS-summary-tables
|
R
| false
| false
| 2,729
|
r
|
# Install and load packages
package_names <- c("survey","dplyr","foreign","devtools")
lapply(package_names, function(x) if(!x %in% installed.packages()) install.packages(x))
lapply(package_names, require, character.only=T)
install_github("e-mitchell/meps_r_pkg/MEPS")
library(MEPS)
options(survey.lonely.psu="adjust")
# Load FYC file
FYC <- read.xport('C:/MEPS/.FYC..ssp');
year <- .year.
if(year <= 2001) FYC <- FYC %>% mutate(VARPSU = VARPSU.yy., VARSTR=VARSTR.yy.)
if(year <= 1998) FYC <- FYC %>% rename(PERWT.yy.F = WTDPER.yy.)
if(year == 1996) FYC <- FYC %>% mutate(AGE42X = AGE2X, AGE31X = AGE1X)
FYC <- FYC %>%
mutate_at(vars(starts_with("AGE")),funs(replace(., .< 0, NA))) %>%
mutate(AGELAST = coalesce(AGE.yy.X, AGE42X, AGE31X))
FYC$ind = 1
# Poverty status
if(year == 1996)
FYC <- FYC %>% rename(POVCAT96 = POVCAT)
FYC <- FYC %>%
mutate(poverty = recode_factor(POVCAT.yy., .default = "Missing", .missing = "Missing",
"1" = "Negative or poor",
"2" = "Near-poor",
"3" = "Low income",
"4" = "Middle income",
"5" = "High income"))
# Insurance coverage
# To compute for all insurance categories, replace 'insurance' in the 'svyby' function with 'insurance_v2X'
if(year == 1996){
FYC <- FYC %>%
mutate(MCDEV96 = MCDEVER, MCREV96 = MCREVER,
OPAEV96 = OPAEVER, OPBEV96 = OPBEVER)
}
if(year < 2011){
FYC <- FYC %>%
mutate(
public = (MCDEV.yy.==1|OPAEV.yy.==1|OPBEV.yy.==1),
medicare = (MCREV.yy.==1),
private = (INSCOV.yy.==1),
mcr_priv = (medicare & private),
mcr_pub = (medicare & !private & public),
mcr_only = (medicare & !private & !public),
no_mcr = (!medicare),
ins_gt65 = 4*mcr_only + 5*mcr_priv + 6*mcr_pub + 7*no_mcr,
INSURC.yy. = ifelse(AGELAST < 65, INSCOV.yy., ins_gt65)
)
}
FYC <- FYC %>%
mutate(insurance = recode_factor(INSCOV.yy., .default = "Missing", .missing = "Missing",
"1" = "Any private, all ages",
"2" = "Public only, all ages",
"3" = "Uninsured, all ages")) %>%
mutate(insurance_v2X = recode_factor(INSURC.yy., .default = "Missing", .missing = "Missing",
"1" = "<65, Any private",
"2" = "<65, Public only",
"3" = "<65, Uninsured",
"4" = "65+, Medicare only",
"5" = "65+, Medicare and private",
"6" = "65+, Medicare and other public",
"7" = "65+, No medicare",
"8" = "65+, No medicare"))
FYCdsgn <- svydesign(
id = ~VARPSU,
strata = ~VARSTR,
weights = ~PERWT.yy.F,
data = FYC,
nest = TRUE)
results <- svyby(~TOTEXP.yy., FUN = svytotal, by = ~insurance + poverty, design = FYCdsgn)
print(results)
|
##########################################################################
# # R version:
# # File Name:
# # Author:
# # Process:
# # Inputs:
# # Outputs:
# # File history:
##########################################################################
setwd("/Users/fabiotejedor/Documents/TU_Delft_University/Thesis_Project/Thesis\ Preparation/Thesis_Airbnb_Disruption/Methodology/Data")
##########################################################################
## loading packages
##########################################################################
library(tidyverse)
library(rgdal)
library(xlsx)
library(readxl)
library(dplyr)
library(tigris)
library(ggplot2)
library(mapproj)
library(maps)
library(ggthemes)
library(gridExtra)
library(ggpubr)
library(tidyr)
library(rlang)
library(rgeos)
library(EnvStats)
library(sf)
library(tmap)
library(sf)
library(mapview)
library(geojsonio)
library(spatialEco)
library(FRK)
library(FactoMineR)
library(psych)
library(mice)
library(reshape2)
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # functions
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
rm(list = ls())
fun_cut <- function(x, nCut=6) {
z <- cut(x,quantile(x, seq(0, 1, len = nCut), na.rm = T),
include.lowest = TRUE, dig.lab = 10, ordered_result = T)
z
}
fun_sta <- function(x){
z <- (x- mean(x, na.rm = T))/sd(x, na.rm = T)
z
}
fun_growth <- function(x){
lll <- x
vvv <- NULL
for(jj in 1:(length(x)-1)){
# # growth year per year
vvv[jj] <- (lll[jj+1] -lll[jj] )/lll[jj]
}
return(mean(vvv)*100)
}
# # # Loading GIS information
load("../Output/Chapter_1/Step3/dataList_airbnb.Rdata")
load("../Output/Chapter_1/Step3/dataList_airbnb_sf.Rdata")
load("../Output/Chapter_1/Step3/map_AMS_wijken_sf.Rdata")
map_AMS_district <- readOGR(dsn = "./GIS/geojson_district", layer = "geojson_districts-polygon")
plot(map_AMS_district)
colType <- read_excel("./OIS/AMS_DATA_NEIGH_0720.xlsx", sheet = "coltypes")
data_AMS_BBGA <- read_excel("./OIS/AMS_DATA_NEIGH_0720.xlsx", sheet = "bbga_excel_2020-07-09", col_types = colType$Type)
data_IND <- read_excel("./OIS/VARIABLE_INDICATORS.xlsx", sheet = "Indicators Total V03")
vars_fix <- c("niveau", "niveaunaam", "SD", "sdnaam", "gebiedcode15",
"gebiedcodenaam", "gebiednaam","jaar")
map_AMS_wijken <- readOGR(dsn = "./GIS/geojson_wijken", layer = "geojson_wijken")
plot(map_AMS_wijken)
list_jaar = 2007:2018
level <- c("Wijken")
level_region <- "Buurtcombi"
level_unknow <- "Z onbekend"
codeNeiG <- "gebiedcode15"
nameNeiG <- "gebiednaam"
colIntOrRd <- c('#fef0d9','#fdcc8a','#fc8d59','#e34a33','#b30000') # quintiles
# Indicator 1: Residential characteristics
# # Residential characteristics
list_IND1 <- subset(data_IND, Type == "Residential Characteristics" & Selected == "Included")
list_IND1 <- list_IND1 %>% filter(!is.na(list_IND1$Variable))
list_IND1$Variable <- toupper(list_IND1$Variable)
cols_IND1 <- colType %>% filter(Variable %in% list_IND1$Variable) %>% dplyr:::select(Col, Variable)
dim(cols_IND1)
cols_IND1 <- merge(cols_IND1, list_IND1, all.x = T)
dim(cols_IND1)
cols_IND1 <- cols_IND1 %>% select(-c(Definition, Definitie))
varsIND1 <- unlist(cols_IND1$Col, use.names = FALSE)
vvv1 <- c(vars_fix, varsIND1)
# lll <- data_AMS_BBGA %>%
# filter(niveaunaam %in% level & jaar %in% list_jaar & !sdnaam %in% level_unknow)
#
# dim(lll[is.na(lll$BEVALLEENHH_P), c("gebiednaam", "jaar")])
# dim(data_IND1[is.na(data_IND1$EDU_High), ])
data_IND1 <- data_AMS_BBGA %>%
filter(niveaunaam %in% level & jaar %in% list_jaar & !sdnaam %in% level_unknow) %>%
dplyr:::select(codeNeiG, nameNeiG, jaar, vvv1)
summary(data_IND1)
ppp <- data_AMS_BBGA %>%
filter(niveaunaam %in% level & jaar %in% list_jaar & !sdnaam %in% level_unknow) %>%
dplyr:::select(codeNeiG, nameNeiG, jaar, c("BEV18_26", "BEV27_65","BEV66PLUS", "BEVVEST", "BEVVERT", "BEVVESTINT", "BEVVERTINT", "BEVGELIJKINT", "BEVWOONDUUR"))
summary(ppp)
cor(ppp[, c("BEV18_26", "BEV27_65","BEV66PLUS")], use = "pairwise.complete.obs")
cor(ppp[, c("BEV18_26", "BEV27_65","BEV66PLUS", "BEVVEST", "BEVVERT", "BEVVESTINT", "BEVVERTINT", "BEVGELIJKINT", "BEVWOONDUUR")], use = "pairwise.complete.obs")
map_info <- map_AMS_wijken_sf[, c("Stadsdeel", "Buurtcombi", "Buurtcom_1", "Stadsdeel_", "Opp_m2")]
map_info <- data.frame(map_info)[, -6] ## eliinate geometry
data_IND1 <- merge(data_IND1, map_info, by.x = codeNeiG, by.y = "Buurtcombi", all.x= T)
list_neigh <- unique(data_IND1$Buurtcom_1)
data_IND1_Stad <- data_AMS_BBGA %>%
filter(niveaunaam %in% "Stadsdelen" & jaar %in% list_jaar & !sdnaam %in% level_unknow) %>%
dplyr:::select(codeNeiG, nameNeiG, jaar, vvv1)
summary(data_IND1_Stad)
# # neighborhoods without data
data_IND1 <- data_IND1 %>% filter(!gebiednaam %in% "IJburg Oost")
data_IND1$ID <- 1:nrow(data_IND1)
# # Variable imputation using stadsdelen
for(ii in 9:length(vvv1)){
ddd1 <- is.na(data_IND1[, vvv1[ii]])
ddd <- sum(ddd1)/length(data_IND1[, vvv1[ii]])
if(ddd>0){
nnn <- data_IND1_Stad[, c("SD", "jaar", vvv1[ii])]
colnames(nnn) <- c("Stadsdeel_","jaar", "varImp")
data_IND1 <- merge(data_IND1, nnn, by = c("Stadsdeel_", "jaar"), all.x = T)
qqq <- sum(is.na(data_IND1[, vvv1[ii]]) & !is.na(data_IND1$varImp))
cat(vvv1[ii], " - ", sum(ddd1), ".Replaced: ",qqq ,"\n")
data_IND1[, vvv1[ii]] <- ifelse(is.na(data_IND1[, vvv1[ii]]), data_IND1$varImp, data_IND1[, vvv1[ii]])
data_IND1$varImp <- NULL
}
}
outAn <- NULL
kk <- 1
for(ii in 9:length(vvv1)){
www <- prop.table(table(is.na(data_IND1[, vvv1[ii]])))
if(length(www) == 1) next
if(www[2] > 0.3){
outAn[kk] <- vvv1[ii]
kk = kk + 1
}
}
data_IND1 <- data_IND1[, !colnames(data_IND1) %in% outAn]
summary(data_IND1)
data_IND1$popKm2 <- with(data_IND1, BEVTOTAAL/(Opp_m2*1e-6))# population per Km2
varsIND1 <- c(varsIND1, "popKm2")
renameCol <- cols_IND1$Relabel[match(colnames(data_IND1), cols_IND1$Col)]
renameCol[is.na(renameCol)] <- colnames(data_IND1)[is.na(renameCol)]
colnames(data_IND1) <- renameCol
data_IND1$"PopDut65Plus" <- data_IND1$"PopDut65_79" + data_IND1$"PopDut80Plus"
data_IND1$"PopWE65Plus" <- data_IND1$"PopWE65_79" + data_IND1$"PopWE80Plus"
data_IND1$"PopNWe65Plus" <- data_IND1$"PopNWe65_79" + data_IND1$"PopNWe80Plus"
data_IND1$"PopDut65_79" <- NULL
data_IND1$"PopDut80+" <- NULL
data_IND1$"PopWE65_79" <- NULL
data_IND1$"PopWE80+" <- NULL
data_IND1$"PopNWe65_79" <- NULL
data_IND1$"PopNWe80+" <- NULL
data_IND1$"popReg" <- NULL
list_IND1_02 <- subset(data_IND, Type == "Residential Characteristics" & Selected == "Included")
varIND1_02 <- list_IND1_02[list_IND1_02$Relabel %in%colnames(data_IND1) ,]
varIND1_02 <- varIND1_02[order(varIND1_02$ID), ]
varIND1_02 <- varIND1_02$Relabel
ttt <- colnames(data_IND1)[!colnames(data_IND1) %in% varIND1_02]
ttt <- c(ttt, varIND1_02)
data_IND1_02 <- data_IND1[, ttt]
data_IND1_02 <- data_IND1_02[order(data_IND1_02$gebiedcode15, data_IND1_02$jaar), ]
# colnames(data_IND1_02) <- gsub("\\+", "oM", colnames(data_IND1_02))
# varIND1_02 <- gsub("\\+", "oM", varIND1_02)
# ttt <- gsub("\\+", "oM", ttt)
# # dataset imputation
varsImp <- c("jaar", varIND1_02)
md.pattern(data_IND1_02[, varsImp])
imputed_Data <- mice(data_IND1_02[, varsImp],
maxit = 50, m = 1,
method = 'pmm', seed = 500)
summary(imputed_Data)
png(filename = "../Output/Chapter_1/Step4/ImputedData_HouseholdChar.png", units="px", width=1300, height=800)
densityplot(imputed_Data)
dev.off()
# stripplot(imputed_Data, pch = 20, cex = 1.2)
completeData <- complete(imputed_Data, 1)
# completeData <- complete(imputed_Data,"long", include=TRUE)
# imp <- melt(completeData, c(".imp",".id","jaar"))
# imp$Imputed<-ifelse(imp$".imp"==0,"Observed","Imputed")
# ggplot(imp[1:5000, ], aes(x=value, group=.imp, colour=Imputed)) +
# stat_density(geom = "path",position = "identity") +
# facet_wrap(~variable, ncol=2, scales="free")
summary(completeData)
summary(data_IND1_02)
data_IND1_02I <- completeData# imputed data
data_IND1_02I <- cbind(data_IND1_02I, data_IND1_02[, !colnames(data_IND1_02) %in% colnames(data_IND1_02I)])
data_IND1_02I <- data_IND1_02I[, ttt]
summary(data_IND1_02I)
#
# countNA <- list()
# data_INDYEAR <- NULL
# for(ii in 1:length(varIND1_02)){
# ddd <- data_IND1[!is.na(data_IND1[, varIND1_02[ii]]), ] %>% select(Buurtcom_1, jaar, varIND1_02[ii])
# eee <- ddd %>% group_by(Buurtcom_1) %>% summarise(nY = n())
# fff <- data.frame(Variable = varIND1_02[ii], Year = sort(unique(ddd$jaar)))
# data_INDYEAR <- rbind(data_INDYEAR, fff)
# }
# datrea <- data_INDYEAR %>% group_by(Variable) %>% summarise(1- n()/length(list_jaar))
# ppp <- data_IND1[is.na(data_IND1$EDU_High), ]
# ppp %>% filter(gebiedcode15 == "E36")
# nrow(ppp)/nrow(data_IND1)
# #IJburg Oost discarded
### aggregated data
data_IND1_02IA <- data_IND1_02I %>% group_by(Stadsdeel, Buurtcom_1) %>% summarise_at(varIND1_02, mean, na.rm = T)
neigDeleted <- unlist(data_IND1_02IA[is.na(data_IND1_02IA$popKm2), "Buurtcom_1"], use.names = F)
neigDeleted <- as.character(neigDeleted)
cat("Neigh deleted:", neigDeleted, "\n")
# # calculating maps for each set of variables
# # # Age and Racial origin (4 Ages and 3 Racial categories)
data_IND1_02IA$Pop18_22 <- with(data_IND1_02IA, PopWE18_22+PopNWe18_22+PopDut18_22)
data_IND1_02IA$Pop23_39<- with(data_IND1_02IA, PopWE23_39+PopNWe23_39+PopDut23_39)
data_IND1_02IA$Pop40_64 <- with(data_IND1_02IA, PopWE40_64+PopNWe40_64+PopDut40_64)
data_IND1_02IA$Pop65Plus <- with(data_IND1_02IA, PopWE65Plus+PopNWe65Plus+PopDut65Plus)
pop1 <- c("PopWE18_22","PopNWe18_22", "PopDut18_22", "Pop18_22")
pop2 <- c("PopWE23_39","PopNWe23_39","PopDut23_39" , "Pop23_39")
pop3 <- c("PopWE40_64","PopNWe40_64", "PopDut40_64", "Pop40_64")
pop4 <- c("PopWE65Plus","PopNWe65Plus", "PopDut65Plus", "Pop65Plus")
ppp1 <- t(apply(data.frame(data_IND1_02IA[, pop1]), 1, function(x)x[1:3]*1/x[4]) * 100)
ppp1 <- data.frame(ppp1)
colnames(ppp1) <- paste(pop1[1:3], "_P", sep = "")
ppp2 <- t(apply(data.frame(data_IND1_02IA[, pop2]), 1, function(x)x[1:3]*1/x[4]) * 100)
ppp2 <- data.frame(ppp2)
colnames(ppp2) <- paste(pop2[1:3], "_P", sep = "")
ppp3 <- t(apply(data.frame(data_IND1_02IA[, pop3]), 1, function(x)x[1:3]*1/x[4]) * 100)
ppp3 <- data.frame(ppp3)
colnames(ppp3) <- paste(pop3[1:3], "_P", sep = "")
ppp4 <- t(apply(data.frame(data_IND1_02IA[, pop4]), 1, function(x)x[1:3]*1/x[4]) * 100)
ppp4 <- data.frame(ppp4)
colnames(ppp4) <- paste(pop4[1:3], "_P", sep = "")
data_IND1_02IA <- cbind(data.frame(data_IND1_02IA), ppp1, ppp2, ppp3, ppp4)
pop5 <- c(sort(pop1[-4]), sort(pop2[-4]),sort(pop3[-4]),sort(pop4[-4]))
pop5 <- paste(pop5, "_P", sep = "")
# # # for the maps
map_AMS_wijken_sf <- merge(map_AMS_wijken_sf, data_IND1_02IA, by = "Buurtcom_1", all.x = T)
vect_map_AgeRac <- paste("map", pop5, sep = "_")
for(ii in 1:length(pop5)){
mmm <- tm_shape(map_AMS_wijken_sf) +
tm_borders(col = gray(0.6))+
tm_fill(pop5[ii]) +
tm_shape(map_AMS_district) +
tm_borders(alpha = 1, lwd = 2, col = gray(0.6)) +
tm_text("Stadsdeel", size = 0.5, shadow=TRUE)
assign(x = vect_map_AgeRac[ii], value = mmm)
rm(mmm)
}
tpArr_AgeRac <- tmap_arrange(map_PopDut18_22_P, map_PopNWe18_22_P, map_PopWE18_22_P,
map_PopDut23_39_P, map_PopNWe23_39_P, map_PopWE23_39_P,
map_PopDut40_64_P, map_PopNWe40_64_P, map_PopWE40_64_P,
map_PopDut65Plus_P, map_PopNWe65Plus_P, map_PopWE65Plus_P, ncol = 3)
tmap_save(tpArr_AgeRac, "../Output/Chapter_1/Step4/Map_PerAge_RacialDistr.png", width=5000, height=3000)
# # for the concentration of given Racial per each age group
pop6 <- c(sort(pop1[-4]), sort(pop2[-4]),sort(pop3[-4]),sort(pop4[-4]))
ppp6 <- apply(data_IND1_02IA[, pop6], 2, function(x)x/sum(x) * 100)
ppp6 <- data.frame(ppp6)
pop6_c <-paste(pop6, "_PNe", sep = "")
colnames(ppp6) <- pop6_c # per neighborhood
data_IND1_02IA <- cbind(data_IND1_02IA, ppp6)
# # # for the maps per neighborhood
map_AMS_wijken_sf <- merge(map_AMS_wijken_sf, data_IND1_02IA[, c("Buurtcom_1", pop6_c)],
by = "Buurtcom_1", all.x = T)
vect_map_AgeRac_PNe <- paste("map", pop6_c, sep = "_")
for(ii in 1:length(pop6_c)){
mmm <- tm_shape(map_AMS_wijken_sf) +
tm_borders(col = gray(0.6))+
tm_fill(pop6_c[ii]) +
tm_shape(map_AMS_district) +
tm_borders(alpha = 1, lwd = 2, col = gray(0.6)) +
tm_text("Stadsdeel", size = 0.5, shadow=TRUE)
assign(x = vect_map_AgeRac_PNe[ii], value = mmm)
rm(mmm)
}
tpArr_AgeRac <- tmap_arrange(map_PopDut18_22_PNe, map_PopNWe18_22_PNe, map_PopWE18_22_PNe,
map_PopDut23_39_PNe, map_PopNWe23_39_PNe, map_PopWE23_39_PNe,
map_PopDut40_64_PNe, map_PopNWe40_64_PNe, map_PopWE40_64_PNe,
map_PopDut65Plus_PNe, map_PopNWe65Plus_PNe, map_PopWE65Plus_PNe, ncol = 3)
tmap_save(tpArr_AgeRac, "../Output/Chapter_1/Step4/Map_PerNeighAge&RacialDistr.png", width=5000, height=3000)
# # variables that measure concentration in neighborhoods
# # set variables for households
data_IND1_02IA_Copy <- data_IND1_02IA
varHouseH <- c("HSingle_PaFa", "HSingle_Pe", "HMar_WOCh", "HMar_WCh",
"HUnMar_WOCh", "HUnMar_Ch", "HOther")
varPopArr <- c("Pop_Arr", "Pop_Dep", "Pop_Arr_AMS", "Pop_Dep_AMS",
"Pop_Mig_Ar", "Dur_Res")
varYear <- c("Pop18_22", "Pop23_39", "Pop40_64", "Pop65Plus")
varRac <- c("Dutch", "NonWest", "West")
varComp <- c("popKm2", "New_Urb", "EDU_Low", "EDU_Medium","EDU_High")
pop1 <- c("PopWE18_22","PopNWe18_22", "PopDut18_22", "Pop18_22")
pop2 <- c("PopWE23_39","PopNWe23_39","PopDut23_39" , "Pop23_39")
pop3 <- c("PopWE40_64","PopNWe40_64", "PopDut40_64", "Pop40_64")
pop4 <- c("PopWE65Plus","PopNWe65Plus", "PopDut65Plus", "Pop65Plus")
data_IND1_02IA$Dutch <- with(data_IND1_02IA, PopDut18_22 + PopDut23_39 + PopDut40_64 + PopDut65Plus)
data_IND1_02IA$NonWest <- with(data_IND1_02IA, PopNWe18_22 + PopNWe23_39 + PopNWe40_64 + PopNWe65Plus)
data_IND1_02IA$West <- with(data_IND1_02IA, PopWE18_22 + PopWE23_39 + PopWE40_64 + PopWE65Plus)
data_IND1_02IA[, varHouseH] <- t(apply(data_IND1_02IA[, varHouseH], 1, function(x)x/sum(x) * 100))
data_IND1_02IA[, varRac] <- t(apply(data_IND1_02IA[, varRac], 1, function(x)x/sum(x) * 100))
data_IND1_02IA[, varYear] <- t(apply(data_IND1_02IA[, varYear], 1, function(x)x/sum(x) * 100))
varIND1_03 <- c(varHouseH, varYear, varRac, varPopArr, varComp)
# Neigh deleted: IJburg Oost
library(corrplot)
library(xtable)
M <- cor(data_IND1_02IA[, varIND1_03])
col3 <- colorRampPalette(c("red", "green", "blue"))
col2 <- colorRampPalette(c("#67001F", "#B2182B", "#D6604D", "#F4A582",
"#FDDBC7", "#FFFFFF", "#D1E5F0", "#92C5DE",
"#4393C3", "#2166AC", "#053061"))
png(filename = "../Output/Chapter_1/Step4/MatrixCor_IND1_TOTALS.png", units="px", width=1300, height=800)
corrplot(M, method = "circle", type = "upper", tl.col = "black", diag = F)
dev.off()
xtable(M, caption = "Correlation matrix Household Characteristics")
# # exclude Pop_Mig_Ar because of low correlation and results ACP
# # correlation among variables
# # summary main variables
forSumm <- merge(data_IND1_02I, map_AMS_wijken_sf[, c("Buurtcombi", "gentry_opDef", "gentryABnB_opDef")],
by.x = "gebiedcode15", by.y = "Buurtcombi", all.x = T)
mmm <- forSumm[, c(varIND1_03, "jaar", "gentry_opDef")] %>%
group_by(jaar, gentry_opDef) %>%
summarise_at(varIND1_03, mean)
mmm2 <- forSumm[, c(varIND1_03, "jaar", "gentry_opDef")] %>%
group_by(jaar) %>%
summarise_at(varIND1_03, mean)
mmm3 <- cbind(
t(mmm2 %>% summarise_at(varIND1_03, mean)),
t(mmm2 %>% summarise_at(varIND1_03, sd)),
t((mmm %>% group_by(gentry_opDef) %>% summarise_at(varIND1_03, mean))[, -1]),
t((mmm %>% group_by(gentry_opDef) %>% summarise_at(varIND1_03, fun_growth))[, -1])
)
colnames(mmm3) <- c("mean", "sd", "mean_g", "mean_ng", "growth_g", "growth_ng")
mmm3 <- data.frame(mmm3)
mmm3$mean_g <- mmm3$mean_g - mmm3$mean
mmm3$mean_ng <- mmm3$mean_ng -mmm3$mean
varsPCA <- c(varIND1_03, "Stadsdeel")
id_qualiSup <- which(varsPCA %in% "Stadsdeel")
pcaIND1 <- PCA(data_IND1_02IA[, varsPCA], quali.sup = id_qualiSup, ncp = 5)
pca_Weigh <- data.frame(pcaIND1$svd$V[, 1:2])
rownames(pca_Weigh) <- varIND1_03
colnames(pca_Weigh) <- c("Dim1", "Dim2")
xtable(pca_Weigh, caption = "Weights PCA - Household Characteristics", label = "tab:CH06:Weights1_ACP")
summary(pcaIND1)
anly_PCAIND1 <- dimdesc(pcaIND1, proba = 0.1) # analysis PCA
xtable(anly_PCAIND1$Dim.1$quanti, caption = "Correlation test between First dimension and variables", label = "tab:CH06:CorrFactor1_1ACP")
xtable(anly_PCAIND1$Dim.2$quanti, caption = "Correlation test between Second dimension and variables", label = "tab:CH06:CorrFactor1_2ACP")
xtable(pcaIND1$var$cos2[, 1:2], caption = "Cosine square for columns representation", label = "tab:CH06:Cosine_1ACP")
write.infile(pcaIND1, file = "../Output/Chapter_1/Step4/PCA_IND1.csv")
# # comp 1-2 (60%)
png(filename = "../Output/Chapter_1/Step4/PCA_IND1_Indiv_12.png", units="px",
width=1600, height=1200, res=300)
plot(pcaIND1,choix="ind", habillage = id_qualiSup, size = 5,cex= 0.7,
label = "quali", title = "IND1: Household Characteristics", axes = c(1, 2))
dev.off()
png(filename = "../Output/Chapter_1/Step4/PCA_IND1_Var12.png", units="px",
width=1600, height=1600, res=300)
plot(pcaIND1,choix="var",
size = 5,cex= 0.7,
title = "IND1: Household Characteristics", axes = c(1, 2))
dev.off()
# # Quintiles to classify the index
pcaDataIND1 <- pcaIND1$ind$coord[, 1:2]
varEig <- pcaIND1$eig[1:2, 1]
pcaDataIND1[, 1] <- (pcaDataIND1[, 1] / sqrt(varEig[1]))* 15 + 50
pcaDataIND1[, 2] <- (pcaDataIND1[, 2] / sqrt(varEig[2]))* 15 + 50
IND1_1_Q <- quantile(pcaDataIND1[, 1], probs = c(0.20, 0.40, 0.60, 0.80))
IND1_1_Q <- round(IND1_1_Q, 2)
IND1_2_Q <- quantile(pcaDataIND1[, 2], probs = c(0.20, 0.40, 0.60, 0.80))
IND1_2_Q <- round(IND1_2_Q, 2)
colnames(pcaDataIND1) <- c("IND1_1", "IND1_2")
data_IND1_02IA <- cbind(data_IND1_02IA, pcaDataIND1)
data_IND1_02IA$IND1_1_Q <- cut(data_IND1_02IA$IND1_1, breaks = c(-Inf,IND1_1_Q, Inf), labels = c("Low", "Medium-Low", "Medium", "Medium-High", "High"))
data_IND1_02IA$IND1_2_Q <- cut(data_IND1_02IA$IND1_2, breaks = c(-Inf,IND1_2_Q, Inf), labels = c("Low", "Medium-Low", "Medium", "Medium-High", "High"))
# # Characterization of each quintiles
# # comparison with income
data_AMS_INCOME <- data_AMS_BBGA %>%
filter(niveaunaam %in% level & jaar %in% list_jaar & !sdnaam %in% level_unknow) %>%
select(codeNeiG, nameNeiG, jaar, "IHHINK_GEM")
data_AMS_INCOME <- data_AMS_INCOME %>% filter(gebiedcode15 %in% data_IND1_02I$gebiedcode15)
data_AMS_INCOMEA <- data_AMS_INCOME %>% group_by(gebiedcode15) %>% summarise(avIncome= median(IHHINK_GEM, na.rm = T))
data_IND1_02IA <- merge(data_IND1_02IA, data_AMS_INCOMEA, all.x = T)
ttIND1 <- data_IND1_02IA[, c(varIND1_03, "avIncome", "IND1_1", "IND1_2", "IND1_1_Q", "IND1_2_Q")] %>%
group_by(IND1_1_Q) %>%
summarise_at(c(varIND1_03, "avIncome"), mean, na.rm = T)
ttIND1 <- data.frame(ttIND1)
t(ttIND1)
ttIND2 <- data_IND1_02IA[, c(varIND1_03, "avIncome", "IND1_1", "IND1_2", "IND1_1_Q", "IND1_2_Q")] %>%
group_by(IND1_2_Q) %>%
summarise_at(c(varIND1_03, "avIncome"), mean, na.rm = T)
t(ttIND2)
data_IND1_02IA %>% group_by(Stadsdeel) %>% summarise(mean(IND1_1), mean(IND1_2))
# # how it looks in time (PREDICTION)
data_IND1_02I$Pop18_22 <- with(data_IND1_02I, PopWE18_22+PopNWe18_22+PopDut18_22)
data_IND1_02I$Pop23_39<- with(data_IND1_02I, PopWE23_39+PopNWe23_39+PopDut23_39)
data_IND1_02I$Pop40_64 <- with(data_IND1_02I, PopWE40_64+PopNWe40_64+PopDut40_64)
data_IND1_02I$Pop65Plus <- with(data_IND1_02I, PopWE65Plus+PopNWe65Plus+PopDut65Plus)
ppp1 <- t(apply(data.frame(data_IND1_02I[, pop1]), 1, function(x)x[1:3]*1/x[4]) * 100)
ppp1 <- data.frame(ppp1)
colnames(ppp1) <- paste(pop1[1:3], "_P", sep = "")
ppp2 <- t(apply(data.frame(data_IND1_02I[, pop2]), 1, function(x)x[1:3]*1/x[4]) * 100)
ppp2 <- data.frame(ppp2)
colnames(ppp2) <- paste(pop2[1:3], "_P", sep = "")
ppp3 <- t(apply(data.frame(data_IND1_02I[, pop3]), 1, function(x)x[1:3]*1/x[4]) * 100)
ppp3 <- data.frame(ppp3)
colnames(ppp3) <- paste(pop3[1:3], "_P", sep = "")
ppp4 <- t(apply(data.frame(data_IND1_02I[, pop4]), 1, function(x)x[1:3]*1/x[4]) * 100)
ppp4 <- data.frame(ppp4)
colnames(ppp4) <- paste(pop4[1:3], "_P", sep = "")
data_IND1_02I <- cbind(data.frame(data_IND1_02I), ppp1, ppp2, ppp3, ppp4)
ppp6 <- apply(data_IND1_02I[, pop6], 2, function(x)x/sum(x) * 100)
ppp6 <- data.frame(ppp6)
pop6_c <-paste(pop6, "_PNe", sep = "")
colnames(ppp6) <- pop6_c # per neighborhood
data_IND1_02I <- cbind(data_IND1_02I, ppp6)
data_IND1_02I_Copy <- data_IND1_02I
data_IND1_02I$Dutch <- with(data_IND1_02I, PopDut18_22 + PopDut23_39 + PopDut40_64 + PopDut65Plus)
data_IND1_02I$NonWest <- with(data_IND1_02I, PopNWe18_22 + PopNWe23_39 + PopNWe40_64 + PopNWe65Plus)
data_IND1_02I$West <- with(data_IND1_02I, PopWE18_22 + PopWE23_39 + PopWE40_64 + PopWE65Plus)
data_IND1_02I[, varRac] <- t(apply(data_IND1_02I[, varRac], 1, function(x)x/sum(x)* 100))
data_IND1_02I[, varYear] <- t(apply(data_IND1_02I[, varYear], 1, function(x)x/sum(x)* 100))
data_IND1_02I[, varHouseH] <- t(apply(data_IND1_02I[, varHouseH], 1, function(x)x/sum(x)* 100))
predIND1 <- predict(pcaIND1, data_IND1_02I)$coord
# varIND1_03 <- c("popKm2", varHouseH, varYear, varRac, varPopArr, "New_Urb", "EDU_Low", "EDU_High")
predIND1 <- predIND1[, 1:2]
colnames(predIND1) <- c("IND1_1", "IND1_2")
pcaVarAll <- pcaIND1$var$coord
pcaVarAll <- pcaVarAll[, 1:2]
predIND1[, 1] <- (predIND1[, 1] / sqrt(varEig[1]))* 15 + 50
predIND1[, 2] <- (predIND1[, 2] / sqrt(varEig[2]))* 15 + 50
data_IND1_02I <- cbind(data_IND1_02I, predIND1)
data_IND1_02I$IND1_1_Q <- cut(data_IND1_02I$IND1_1, breaks = c(-Inf,IND1_1_Q, Inf), labels = c("Low", "Medium-Low", "Medium", "Medium-High", "High"))
data_IND1_02I$IND1_2_Q <- cut(data_IND1_02I$IND1_2, breaks = c(-Inf,IND1_2_Q, Inf), labels = c("Low", "Medium-Low", "Medium", "Medium-High", "High"))
pcaStatsY <- data_IND1_02I %>% group_by(jaar) %>% summarise(pcaMean1 = mean(IND1_1),
pcaMean2= mean(IND1_2),
pcaSd1 = sd(IND1_1),
pcaSd2 = sd(IND1_2))
pcaStatsY_L <- melt(pcaStatsY[, 1:3], id.vars = "jaar")
plot1 <- ggplot(pcaStatsY_L, aes(x = jaar, y = value, color = variable)) +
geom_point(alpha = 0.5) +
stat_smooth(aes(x = jaar, y = value), method = "lm",
formula = y ~ poly(x, 4), se = FALSE) +
theme(legend.text=element_text(size=8), legend.title=element_text(size=10))+
guides(color=guide_legend(title="Indicator")) +
scale_x_continuous(name = "Time", breaks = list_jaar) +
scale_y_continuous(name = "Average Indicators") +
geom_hline(yintercept = 50, linetype="dotted") +
ggtitle("Household Characteristics in time")
ggsave("../Output/Chapter_1/Step4/HouseholdCharac_TimeAver.png", plot = plot1, width = 15)
############################################################
### INDICATOR 1
############################################################
# # boxplot
data_IND1_02I$jaar <- as.factor(data_IND1_02I$jaar)
plot1 <- ggplot(data_IND1_02I, aes(x = jaar, y = IND1_1)) +
geom_boxplot() + theme(
# Remove panel border
panel.border = element_blank(),
# Remove panel grid lines
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
# Remove panel background
panel.background = element_blank(),
# Add axis line
axis.line = element_line(colour = "grey")
) + scale_x_discrete(name = "Time", breaks = list_jaar) +
scale_y_continuous(name = "Indicator-Household Charatcteristics 1",
breaks = seq(0, 100, by =10), limits = c(10, 90)) +
geom_hline(yintercept = 50, linetype="dotted") +
ggtitle("Household Characteristics (Indicator 1)")
ggsave("../Output/Chapter_1/Step4/HouseholdChar_IND1_TimeAver_Boxplot.png", plot = plot1, width = 15)
# # Tile plot for indicator 1
# # resume characteristics neighborhoods and Quintiles
map_AMS_wijken_sf <- map_AMS_wijken_sf[order(map_AMS_wijken_sf$Buurtcombi), ]
data_IND1_02I$Buurtcom_1 <- factor(data_IND1_02I$Buurtcom_1, levels = as.character(map_AMS_wijken_sf$Buurtcom_1))
plot2 <- ggplot(data = data_IND1_02I, mapping = aes(y = Buurtcom_1,
x = jaar,
fill = IND1_1_Q)) +
geom_tile() +
scale_fill_manual(values= colIntOrRd) +
labs(fill = "Indicator 1") + xlab("Time") + ylab("Neighborhood")
ggsave("../Output/Chapter_1/Step4/HeatMap_Indic1Time.png",
plot = plot2, height = 15)
vect_map_IND1 <- paste("map", list_jaar, "IND1_1",sep = "_")
for(ii in 1:length(list_jaar)){
map_AMS_wijken_sf_Jaar <- map_AMS_wijken_sf
data_IND1_02I_jaar <- data_IND1_02I %>% filter(jaar == list_jaar[ii]) %>% select("Buurtcom_1", "IND1_1_Q")
map_AMS_wijken_sf_Jaar <- merge(map_AMS_wijken_sf_Jaar, data_IND1_02I_jaar, by = "Buurtcom_1", all.x = T)
mmm <- tm_shape(map_AMS_wijken_sf_Jaar) +
tm_borders(col = gray(0.5), alpha = 0.6)+
tm_fill(col = "IND1_1_Q", palette = "seq", textNA = "Other Neighborhoods", colorNA = "white") +
tm_shape(map_AMS_district) +
tm_borders(alpha = 1, lwd = 2, col = gray(0.5)) +
tm_text("Stadsdeel", size = 0.5, shadow=TRUE) +
tm_layout( title = list_jaar[ii], frame = FALSE,
inner.margins=c(0,0,.1,0), title.size=.8)
assign(x = vect_map_IND1[ii], value = mmm)
rm(mmm)
}
tpArr_YearIND1 <- tmap_arrange(map_2007_IND1_1, map_2008_IND1_1, map_2009_IND1_1, map_2010_IND1_1,
map_2011_IND1_1, map_2012_IND1_1, map_2013_IND1_1, map_2014_IND1_1,
map_2015_IND1_1, map_2016_IND1_1, map_2017_IND1_1, map_2018_IND1_1, ncol = 3)
tmap_save(tpArr_YearIND1, "../Output/Chapter_1/Step4/Map_PerNeighYearIND1.png", width=5000, height=3000)
############################################################
### INDICATOR 2
############################################################
# # boxplot
data_IND1_02I$jaar <- as.factor(data_IND1_02I$jaar)
plot1 <- ggplot(data_IND1_02I, aes(x = jaar, y = IND1_2)) +
geom_boxplot() + theme(
# Remove panel border
panel.border = element_blank(),
# Remove panel grid lines
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
# Remove panel background
panel.background = element_blank(),
# Add axis line
axis.line = element_line(colour = "grey")
) + scale_x_discrete(name = "Time", breaks = list_jaar) +
scale_y_continuous(name = "Indicator-Household Charatcteristics 2",
breaks = seq(0, 100, by =10), limits = c(10, 90)) +
geom_hline(yintercept = 50, linetype="dotted") +
ggtitle("Household Characteristics (Indicator 2)")
ggsave("../Output/Chapter_1/Step4/HouseholdChar_IND2_TimeAver_Boxplot.png", plot = plot1, width = 15)
# # Tile plot 2
plot3 <- ggplot(data = data_IND1_02I, mapping = aes(y = Buurtcom_1,
x = jaar,
fill = IND1_2_Q)) +
geom_tile() +
scale_fill_manual(values= colIntOrRd) +
labs(fill = "Indicator 2") + xlab("Time") + ylab("Neighborhood")
ggsave("../Output/Chapter_1/Step4/HeatMap_Indic2Time.png",
plot = plot3, height = 15)
vect_map_IND2 <- paste("map", list_jaar, "IND1_2",sep = "_")
for(ii in 1:length(list_jaar)){
map_AMS_wijken_sf_Jaar <- map_AMS_wijken_sf
data_IND1_02I_jaar <- data_IND1_02I %>% filter(jaar == list_jaar[ii]) %>% select("Buurtcom_1", "IND1_2_Q")
map_AMS_wijken_sf_Jaar <- merge(map_AMS_wijken_sf_Jaar, data_IND1_02I_jaar, by = "Buurtcom_1", all.x = T)
mmm <- tm_shape(map_AMS_wijken_sf_Jaar) +
tm_borders(col = gray(0.5), alpha = 0.6)+
tm_fill(col = "IND1_2_Q", palette = "seq", textNA = "Other Neighborhoods", colorNA = "white") +
tm_shape(map_AMS_district) +
tm_borders(alpha = 1, lwd = 2, col = gray(0.5)) +
tm_text("Stadsdeel", size = 0.5, shadow=TRUE) +
tm_layout( title = list_jaar[ii], frame = FALSE,
inner.margins=c(0,0,.1,0), title.size=.8)
assign(x = vect_map_IND2[ii], value = mmm)
rm(mmm)
}
tpArr_YearIND2 <- tmap_arrange(map_2007_IND1_2, map_2008_IND1_2, map_2009_IND1_2, map_2010_IND1_2,
map_2011_IND1_2, map_2012_IND1_2, map_2013_IND1_2, map_2014_IND1_2,
map_2015_IND1_2, map_2016_IND1_2, map_2017_IND1_2, map_2018_IND1_2, ncol = 3)
tmap_save(tpArr_YearIND2, "../Output/Chapter_1/Step4/Map_PerNeighYearIND2.png", width=5000, height=3000)
save(data_IND1_02I, file= "../Output/Chapter_1/Step4/data_IND1_02I.Rdata")
# # # comp 1-3
# png(filename = "../Output/Chapter_1/Step4/PCA_IND1_Indiv13.png", units="px",
# width=1600, height=1200, res=300)
# plot(pcaIND1,choix="ind", habillage = id_qualiSup, size = 5,cex= 0.7,
# label = "quali", title = "IND1: Residential Characteristics", axes = c(1, 3))
# dev.off()
#
# png(filename = "../Output/Chapter_1/Step4/PCA_IND1_Var13.png", units="px",
# width=1600, height=1600, res=300)
# plot(pcaIND1,choix="var",
# size = 5,cex= 0.7,
# title = "IND1: Residential Characteristics", axes = c(1, 3))
# dev.off()
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # housing PCA
|
/03_pca_socioeconomic.R
|
no_license
|
fhtejedorg/Airbnb_Gentrification_AMS
|
R
| false
| false
| 29,723
|
r
|
##########################################################################
# # R version:
# # File Name:
# # Author:
# # Process:
# # Inputs:
# # Outputs:
# # File history:
##########################################################################
setwd("/Users/fabiotejedor/Documents/TU_Delft_University/Thesis_Project/Thesis\ Preparation/Thesis_Airbnb_Disruption/Methodology/Data")
##########################################################################
## loading packages
##########################################################################
library(tidyverse)
library(rgdal)
library(xlsx)
library(readxl)
library(dplyr)
library(tigris)
library(ggplot2)
library(mapproj)
library(maps)
library(ggthemes)
library(gridExtra)
library(ggpubr)
library(tidyr)
library(rlang)
library(rgeos)
library(EnvStats)
library(sf)
library(tmap)
library(sf)
library(mapview)
library(geojsonio)
library(spatialEco)
library(FRK)
library(FactoMineR)
library(psych)
library(mice)
library(reshape2)
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # functions
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
rm(list = ls())
fun_cut <- function(x, nCut=6) {
z <- cut(x,quantile(x, seq(0, 1, len = nCut), na.rm = T),
include.lowest = TRUE, dig.lab = 10, ordered_result = T)
z
}
fun_sta <- function(x){
z <- (x- mean(x, na.rm = T))/sd(x, na.rm = T)
z
}
fun_growth <- function(x){
lll <- x
vvv <- NULL
for(jj in 1:(length(x)-1)){
# # growth year per year
vvv[jj] <- (lll[jj+1] -lll[jj] )/lll[jj]
}
return(mean(vvv)*100)
}
# # # Loading GIS information
load("../Output/Chapter_1/Step3/dataList_airbnb.Rdata")
load("../Output/Chapter_1/Step3/dataList_airbnb_sf.Rdata")
load("../Output/Chapter_1/Step3/map_AMS_wijken_sf.Rdata")
map_AMS_district <- readOGR(dsn = "./GIS/geojson_district", layer = "geojson_districts-polygon")
plot(map_AMS_district)
colType <- read_excel("./OIS/AMS_DATA_NEIGH_0720.xlsx", sheet = "coltypes")
data_AMS_BBGA <- read_excel("./OIS/AMS_DATA_NEIGH_0720.xlsx", sheet = "bbga_excel_2020-07-09", col_types = colType$Type)
data_IND <- read_excel("./OIS/VARIABLE_INDICATORS.xlsx", sheet = "Indicators Total V03")
vars_fix <- c("niveau", "niveaunaam", "SD", "sdnaam", "gebiedcode15",
"gebiedcodenaam", "gebiednaam","jaar")
map_AMS_wijken <- readOGR(dsn = "./GIS/geojson_wijken", layer = "geojson_wijken")
plot(map_AMS_wijken)
list_jaar = 2007:2018
level <- c("Wijken")
level_region <- "Buurtcombi"
level_unknow <- "Z onbekend"
codeNeiG <- "gebiedcode15"
nameNeiG <- "gebiednaam"
colIntOrRd <- c('#fef0d9','#fdcc8a','#fc8d59','#e34a33','#b30000') # quintiles
# Indicator 1: Residential characteristics
# # Residential characteristics
list_IND1 <- subset(data_IND, Type == "Residential Characteristics" & Selected == "Included")
list_IND1 <- list_IND1 %>% filter(!is.na(list_IND1$Variable))
list_IND1$Variable <- toupper(list_IND1$Variable)
cols_IND1 <- colType %>% filter(Variable %in% list_IND1$Variable) %>% dplyr:::select(Col, Variable)
dim(cols_IND1)
cols_IND1 <- merge(cols_IND1, list_IND1, all.x = T)
dim(cols_IND1)
cols_IND1 <- cols_IND1 %>% select(-c(Definition, Definitie))
varsIND1 <- unlist(cols_IND1$Col, use.names = FALSE)
vvv1 <- c(vars_fix, varsIND1)
# lll <- data_AMS_BBGA %>%
# filter(niveaunaam %in% level & jaar %in% list_jaar & !sdnaam %in% level_unknow)
#
# dim(lll[is.na(lll$BEVALLEENHH_P), c("gebiednaam", "jaar")])
# dim(data_IND1[is.na(data_IND1$EDU_High), ])
data_IND1 <- data_AMS_BBGA %>%
filter(niveaunaam %in% level & jaar %in% list_jaar & !sdnaam %in% level_unknow) %>%
dplyr:::select(codeNeiG, nameNeiG, jaar, vvv1)
summary(data_IND1)
ppp <- data_AMS_BBGA %>%
filter(niveaunaam %in% level & jaar %in% list_jaar & !sdnaam %in% level_unknow) %>%
dplyr:::select(codeNeiG, nameNeiG, jaar, c("BEV18_26", "BEV27_65","BEV66PLUS", "BEVVEST", "BEVVERT", "BEVVESTINT", "BEVVERTINT", "BEVGELIJKINT", "BEVWOONDUUR"))
summary(ppp)
cor(ppp[, c("BEV18_26", "BEV27_65","BEV66PLUS")], use = "pairwise.complete.obs")
cor(ppp[, c("BEV18_26", "BEV27_65","BEV66PLUS", "BEVVEST", "BEVVERT", "BEVVESTINT", "BEVVERTINT", "BEVGELIJKINT", "BEVWOONDUUR")], use = "pairwise.complete.obs")
map_info <- map_AMS_wijken_sf[, c("Stadsdeel", "Buurtcombi", "Buurtcom_1", "Stadsdeel_", "Opp_m2")]
map_info <- data.frame(map_info)[, -6] ## eliinate geometry
data_IND1 <- merge(data_IND1, map_info, by.x = codeNeiG, by.y = "Buurtcombi", all.x= T)
list_neigh <- unique(data_IND1$Buurtcom_1)
data_IND1_Stad <- data_AMS_BBGA %>%
filter(niveaunaam %in% "Stadsdelen" & jaar %in% list_jaar & !sdnaam %in% level_unknow) %>%
dplyr:::select(codeNeiG, nameNeiG, jaar, vvv1)
summary(data_IND1_Stad)
# # neighborhoods without data
data_IND1 <- data_IND1 %>% filter(!gebiednaam %in% "IJburg Oost")
data_IND1$ID <- 1:nrow(data_IND1)
# # Variable imputation using stadsdelen
for(ii in 9:length(vvv1)){
ddd1 <- is.na(data_IND1[, vvv1[ii]])
ddd <- sum(ddd1)/length(data_IND1[, vvv1[ii]])
if(ddd>0){
nnn <- data_IND1_Stad[, c("SD", "jaar", vvv1[ii])]
colnames(nnn) <- c("Stadsdeel_","jaar", "varImp")
data_IND1 <- merge(data_IND1, nnn, by = c("Stadsdeel_", "jaar"), all.x = T)
qqq <- sum(is.na(data_IND1[, vvv1[ii]]) & !is.na(data_IND1$varImp))
cat(vvv1[ii], " - ", sum(ddd1), ".Replaced: ",qqq ,"\n")
data_IND1[, vvv1[ii]] <- ifelse(is.na(data_IND1[, vvv1[ii]]), data_IND1$varImp, data_IND1[, vvv1[ii]])
data_IND1$varImp <- NULL
}
}
outAn <- NULL
kk <- 1
for(ii in 9:length(vvv1)){
www <- prop.table(table(is.na(data_IND1[, vvv1[ii]])))
if(length(www) == 1) next
if(www[2] > 0.3){
outAn[kk] <- vvv1[ii]
kk = kk + 1
}
}
data_IND1 <- data_IND1[, !colnames(data_IND1) %in% outAn]
summary(data_IND1)
data_IND1$popKm2 <- with(data_IND1, BEVTOTAAL/(Opp_m2*1e-6))# population per Km2
varsIND1 <- c(varsIND1, "popKm2")
renameCol <- cols_IND1$Relabel[match(colnames(data_IND1), cols_IND1$Col)]
renameCol[is.na(renameCol)] <- colnames(data_IND1)[is.na(renameCol)]
colnames(data_IND1) <- renameCol
data_IND1$"PopDut65Plus" <- data_IND1$"PopDut65_79" + data_IND1$"PopDut80Plus"
data_IND1$"PopWE65Plus" <- data_IND1$"PopWE65_79" + data_IND1$"PopWE80Plus"
data_IND1$"PopNWe65Plus" <- data_IND1$"PopNWe65_79" + data_IND1$"PopNWe80Plus"
data_IND1$"PopDut65_79" <- NULL
data_IND1$"PopDut80+" <- NULL
data_IND1$"PopWE65_79" <- NULL
data_IND1$"PopWE80+" <- NULL
data_IND1$"PopNWe65_79" <- NULL
data_IND1$"PopNWe80+" <- NULL
data_IND1$"popReg" <- NULL
list_IND1_02 <- subset(data_IND, Type == "Residential Characteristics" & Selected == "Included")
varIND1_02 <- list_IND1_02[list_IND1_02$Relabel %in%colnames(data_IND1) ,]
varIND1_02 <- varIND1_02[order(varIND1_02$ID), ]
varIND1_02 <- varIND1_02$Relabel
ttt <- colnames(data_IND1)[!colnames(data_IND1) %in% varIND1_02]
ttt <- c(ttt, varIND1_02)
data_IND1_02 <- data_IND1[, ttt]
data_IND1_02 <- data_IND1_02[order(data_IND1_02$gebiedcode15, data_IND1_02$jaar), ]
# colnames(data_IND1_02) <- gsub("\\+", "oM", colnames(data_IND1_02))
# varIND1_02 <- gsub("\\+", "oM", varIND1_02)
# ttt <- gsub("\\+", "oM", ttt)
# # dataset imputation
varsImp <- c("jaar", varIND1_02)
md.pattern(data_IND1_02[, varsImp])
imputed_Data <- mice(data_IND1_02[, varsImp],
maxit = 50, m = 1,
method = 'pmm', seed = 500)
summary(imputed_Data)
png(filename = "../Output/Chapter_1/Step4/ImputedData_HouseholdChar.png", units="px", width=1300, height=800)
densityplot(imputed_Data)
dev.off()
# stripplot(imputed_Data, pch = 20, cex = 1.2)
completeData <- complete(imputed_Data, 1)
# completeData <- complete(imputed_Data,"long", include=TRUE)
# imp <- melt(completeData, c(".imp",".id","jaar"))
# imp$Imputed<-ifelse(imp$".imp"==0,"Observed","Imputed")
# ggplot(imp[1:5000, ], aes(x=value, group=.imp, colour=Imputed)) +
# stat_density(geom = "path",position = "identity") +
# facet_wrap(~variable, ncol=2, scales="free")
summary(completeData)
summary(data_IND1_02)
data_IND1_02I <- completeData# imputed data
data_IND1_02I <- cbind(data_IND1_02I, data_IND1_02[, !colnames(data_IND1_02) %in% colnames(data_IND1_02I)])
data_IND1_02I <- data_IND1_02I[, ttt]
summary(data_IND1_02I)
#
# countNA <- list()
# data_INDYEAR <- NULL
# for(ii in 1:length(varIND1_02)){
# ddd <- data_IND1[!is.na(data_IND1[, varIND1_02[ii]]), ] %>% select(Buurtcom_1, jaar, varIND1_02[ii])
# eee <- ddd %>% group_by(Buurtcom_1) %>% summarise(nY = n())
# fff <- data.frame(Variable = varIND1_02[ii], Year = sort(unique(ddd$jaar)))
# data_INDYEAR <- rbind(data_INDYEAR, fff)
# }
# datrea <- data_INDYEAR %>% group_by(Variable) %>% summarise(1- n()/length(list_jaar))
# ppp <- data_IND1[is.na(data_IND1$EDU_High), ]
# ppp %>% filter(gebiedcode15 == "E36")
# nrow(ppp)/nrow(data_IND1)
# #IJburg Oost discarded
### aggregated data
data_IND1_02IA <- data_IND1_02I %>% group_by(Stadsdeel, Buurtcom_1) %>% summarise_at(varIND1_02, mean, na.rm = T)
neigDeleted <- unlist(data_IND1_02IA[is.na(data_IND1_02IA$popKm2), "Buurtcom_1"], use.names = F)
neigDeleted <- as.character(neigDeleted)
cat("Neigh deleted:", neigDeleted, "\n")
# # calculating maps for each set of variables
# # # Age and Racial origin (4 Ages and 3 Racial categories)
data_IND1_02IA$Pop18_22 <- with(data_IND1_02IA, PopWE18_22+PopNWe18_22+PopDut18_22)
data_IND1_02IA$Pop23_39<- with(data_IND1_02IA, PopWE23_39+PopNWe23_39+PopDut23_39)
data_IND1_02IA$Pop40_64 <- with(data_IND1_02IA, PopWE40_64+PopNWe40_64+PopDut40_64)
data_IND1_02IA$Pop65Plus <- with(data_IND1_02IA, PopWE65Plus+PopNWe65Plus+PopDut65Plus)
pop1 <- c("PopWE18_22","PopNWe18_22", "PopDut18_22", "Pop18_22")
pop2 <- c("PopWE23_39","PopNWe23_39","PopDut23_39" , "Pop23_39")
pop3 <- c("PopWE40_64","PopNWe40_64", "PopDut40_64", "Pop40_64")
pop4 <- c("PopWE65Plus","PopNWe65Plus", "PopDut65Plus", "Pop65Plus")
ppp1 <- t(apply(data.frame(data_IND1_02IA[, pop1]), 1, function(x)x[1:3]*1/x[4]) * 100)
ppp1 <- data.frame(ppp1)
colnames(ppp1) <- paste(pop1[1:3], "_P", sep = "")
ppp2 <- t(apply(data.frame(data_IND1_02IA[, pop2]), 1, function(x)x[1:3]*1/x[4]) * 100)
ppp2 <- data.frame(ppp2)
colnames(ppp2) <- paste(pop2[1:3], "_P", sep = "")
ppp3 <- t(apply(data.frame(data_IND1_02IA[, pop3]), 1, function(x)x[1:3]*1/x[4]) * 100)
ppp3 <- data.frame(ppp3)
colnames(ppp3) <- paste(pop3[1:3], "_P", sep = "")
ppp4 <- t(apply(data.frame(data_IND1_02IA[, pop4]), 1, function(x)x[1:3]*1/x[4]) * 100)
ppp4 <- data.frame(ppp4)
colnames(ppp4) <- paste(pop4[1:3], "_P", sep = "")
data_IND1_02IA <- cbind(data.frame(data_IND1_02IA), ppp1, ppp2, ppp3, ppp4)
pop5 <- c(sort(pop1[-4]), sort(pop2[-4]),sort(pop3[-4]),sort(pop4[-4]))
pop5 <- paste(pop5, "_P", sep = "")
# # # for the maps
map_AMS_wijken_sf <- merge(map_AMS_wijken_sf, data_IND1_02IA, by = "Buurtcom_1", all.x = T)
vect_map_AgeRac <- paste("map", pop5, sep = "_")
for(ii in 1:length(pop5)){
mmm <- tm_shape(map_AMS_wijken_sf) +
tm_borders(col = gray(0.6))+
tm_fill(pop5[ii]) +
tm_shape(map_AMS_district) +
tm_borders(alpha = 1, lwd = 2, col = gray(0.6)) +
tm_text("Stadsdeel", size = 0.5, shadow=TRUE)
assign(x = vect_map_AgeRac[ii], value = mmm)
rm(mmm)
}
tpArr_AgeRac <- tmap_arrange(map_PopDut18_22_P, map_PopNWe18_22_P, map_PopWE18_22_P,
map_PopDut23_39_P, map_PopNWe23_39_P, map_PopWE23_39_P,
map_PopDut40_64_P, map_PopNWe40_64_P, map_PopWE40_64_P,
map_PopDut65Plus_P, map_PopNWe65Plus_P, map_PopWE65Plus_P, ncol = 3)
tmap_save(tpArr_AgeRac, "../Output/Chapter_1/Step4/Map_PerAge_RacialDistr.png", width=5000, height=3000)
# # for the concentration of given Racial per each age group
pop6 <- c(sort(pop1[-4]), sort(pop2[-4]),sort(pop3[-4]),sort(pop4[-4]))
ppp6 <- apply(data_IND1_02IA[, pop6], 2, function(x)x/sum(x) * 100)
ppp6 <- data.frame(ppp6)
pop6_c <-paste(pop6, "_PNe", sep = "")
colnames(ppp6) <- pop6_c # per neighborhood
data_IND1_02IA <- cbind(data_IND1_02IA, ppp6)
# # # for the maps per neighborhood
map_AMS_wijken_sf <- merge(map_AMS_wijken_sf, data_IND1_02IA[, c("Buurtcom_1", pop6_c)],
by = "Buurtcom_1", all.x = T)
vect_map_AgeRac_PNe <- paste("map", pop6_c, sep = "_")
for(ii in 1:length(pop6_c)){
mmm <- tm_shape(map_AMS_wijken_sf) +
tm_borders(col = gray(0.6))+
tm_fill(pop6_c[ii]) +
tm_shape(map_AMS_district) +
tm_borders(alpha = 1, lwd = 2, col = gray(0.6)) +
tm_text("Stadsdeel", size = 0.5, shadow=TRUE)
assign(x = vect_map_AgeRac_PNe[ii], value = mmm)
rm(mmm)
}
tpArr_AgeRac <- tmap_arrange(map_PopDut18_22_PNe, map_PopNWe18_22_PNe, map_PopWE18_22_PNe,
map_PopDut23_39_PNe, map_PopNWe23_39_PNe, map_PopWE23_39_PNe,
map_PopDut40_64_PNe, map_PopNWe40_64_PNe, map_PopWE40_64_PNe,
map_PopDut65Plus_PNe, map_PopNWe65Plus_PNe, map_PopWE65Plus_PNe, ncol = 3)
tmap_save(tpArr_AgeRac, "../Output/Chapter_1/Step4/Map_PerNeighAge&RacialDistr.png", width=5000, height=3000)
# # variables that measure concentration in neighborhoods
# # set variables for households
data_IND1_02IA_Copy <- data_IND1_02IA
varHouseH <- c("HSingle_PaFa", "HSingle_Pe", "HMar_WOCh", "HMar_WCh",
"HUnMar_WOCh", "HUnMar_Ch", "HOther")
varPopArr <- c("Pop_Arr", "Pop_Dep", "Pop_Arr_AMS", "Pop_Dep_AMS",
"Pop_Mig_Ar", "Dur_Res")
varYear <- c("Pop18_22", "Pop23_39", "Pop40_64", "Pop65Plus")
varRac <- c("Dutch", "NonWest", "West")
varComp <- c("popKm2", "New_Urb", "EDU_Low", "EDU_Medium","EDU_High")
pop1 <- c("PopWE18_22","PopNWe18_22", "PopDut18_22", "Pop18_22")
pop2 <- c("PopWE23_39","PopNWe23_39","PopDut23_39" , "Pop23_39")
pop3 <- c("PopWE40_64","PopNWe40_64", "PopDut40_64", "Pop40_64")
pop4 <- c("PopWE65Plus","PopNWe65Plus", "PopDut65Plus", "Pop65Plus")
data_IND1_02IA$Dutch <- with(data_IND1_02IA, PopDut18_22 + PopDut23_39 + PopDut40_64 + PopDut65Plus)
data_IND1_02IA$NonWest <- with(data_IND1_02IA, PopNWe18_22 + PopNWe23_39 + PopNWe40_64 + PopNWe65Plus)
data_IND1_02IA$West <- with(data_IND1_02IA, PopWE18_22 + PopWE23_39 + PopWE40_64 + PopWE65Plus)
data_IND1_02IA[, varHouseH] <- t(apply(data_IND1_02IA[, varHouseH], 1, function(x)x/sum(x) * 100))
data_IND1_02IA[, varRac] <- t(apply(data_IND1_02IA[, varRac], 1, function(x)x/sum(x) * 100))
data_IND1_02IA[, varYear] <- t(apply(data_IND1_02IA[, varYear], 1, function(x)x/sum(x) * 100))
varIND1_03 <- c(varHouseH, varYear, varRac, varPopArr, varComp)
# Neigh deleted: IJburg Oost
library(corrplot)
library(xtable)
M <- cor(data_IND1_02IA[, varIND1_03])
col3 <- colorRampPalette(c("red", "green", "blue"))
col2 <- colorRampPalette(c("#67001F", "#B2182B", "#D6604D", "#F4A582",
"#FDDBC7", "#FFFFFF", "#D1E5F0", "#92C5DE",
"#4393C3", "#2166AC", "#053061"))
png(filename = "../Output/Chapter_1/Step4/MatrixCor_IND1_TOTALS.png", units="px", width=1300, height=800)
corrplot(M, method = "circle", type = "upper", tl.col = "black", diag = F)
dev.off()
xtable(M, caption = "Correlation matrix Household Characteristics")
# # exclude Pop_Mig_Ar because of low correlation and results ACP
# # correlation among variables
# # summary main variables
forSumm <- merge(data_IND1_02I, map_AMS_wijken_sf[, c("Buurtcombi", "gentry_opDef", "gentryABnB_opDef")],
by.x = "gebiedcode15", by.y = "Buurtcombi", all.x = T)
mmm <- forSumm[, c(varIND1_03, "jaar", "gentry_opDef")] %>%
group_by(jaar, gentry_opDef) %>%
summarise_at(varIND1_03, mean)
mmm2 <- forSumm[, c(varIND1_03, "jaar", "gentry_opDef")] %>%
group_by(jaar) %>%
summarise_at(varIND1_03, mean)
mmm3 <- cbind(
t(mmm2 %>% summarise_at(varIND1_03, mean)),
t(mmm2 %>% summarise_at(varIND1_03, sd)),
t((mmm %>% group_by(gentry_opDef) %>% summarise_at(varIND1_03, mean))[, -1]),
t((mmm %>% group_by(gentry_opDef) %>% summarise_at(varIND1_03, fun_growth))[, -1])
)
colnames(mmm3) <- c("mean", "sd", "mean_g", "mean_ng", "growth_g", "growth_ng")
mmm3 <- data.frame(mmm3)
mmm3$mean_g <- mmm3$mean_g - mmm3$mean
mmm3$mean_ng <- mmm3$mean_ng -mmm3$mean
varsPCA <- c(varIND1_03, "Stadsdeel")
id_qualiSup <- which(varsPCA %in% "Stadsdeel")
pcaIND1 <- PCA(data_IND1_02IA[, varsPCA], quali.sup = id_qualiSup, ncp = 5)
pca_Weigh <- data.frame(pcaIND1$svd$V[, 1:2])
rownames(pca_Weigh) <- varIND1_03
colnames(pca_Weigh) <- c("Dim1", "Dim2")
xtable(pca_Weigh, caption = "Weights PCA - Household Characteristics", label = "tab:CH06:Weights1_ACP")
summary(pcaIND1)
anly_PCAIND1 <- dimdesc(pcaIND1, proba = 0.1) # analysis PCA
xtable(anly_PCAIND1$Dim.1$quanti, caption = "Correlation test between First dimension and variables", label = "tab:CH06:CorrFactor1_1ACP")
xtable(anly_PCAIND1$Dim.2$quanti, caption = "Correlation test between Second dimension and variables", label = "tab:CH06:CorrFactor1_2ACP")
xtable(pcaIND1$var$cos2[, 1:2], caption = "Cosine square for columns representation", label = "tab:CH06:Cosine_1ACP")
write.infile(pcaIND1, file = "../Output/Chapter_1/Step4/PCA_IND1.csv")
# # comp 1-2 (60%)
png(filename = "../Output/Chapter_1/Step4/PCA_IND1_Indiv_12.png", units="px",
width=1600, height=1200, res=300)
plot(pcaIND1,choix="ind", habillage = id_qualiSup, size = 5,cex= 0.7,
label = "quali", title = "IND1: Household Characteristics", axes = c(1, 2))
dev.off()
png(filename = "../Output/Chapter_1/Step4/PCA_IND1_Var12.png", units="px",
width=1600, height=1600, res=300)
plot(pcaIND1,choix="var",
size = 5,cex= 0.7,
title = "IND1: Household Characteristics", axes = c(1, 2))
dev.off()
# # Quintiles to classify the index
pcaDataIND1 <- pcaIND1$ind$coord[, 1:2]
varEig <- pcaIND1$eig[1:2, 1]
pcaDataIND1[, 1] <- (pcaDataIND1[, 1] / sqrt(varEig[1]))* 15 + 50
pcaDataIND1[, 2] <- (pcaDataIND1[, 2] / sqrt(varEig[2]))* 15 + 50
IND1_1_Q <- quantile(pcaDataIND1[, 1], probs = c(0.20, 0.40, 0.60, 0.80))
IND1_1_Q <- round(IND1_1_Q, 2)
IND1_2_Q <- quantile(pcaDataIND1[, 2], probs = c(0.20, 0.40, 0.60, 0.80))
IND1_2_Q <- round(IND1_2_Q, 2)
colnames(pcaDataIND1) <- c("IND1_1", "IND1_2")
data_IND1_02IA <- cbind(data_IND1_02IA, pcaDataIND1)
data_IND1_02IA$IND1_1_Q <- cut(data_IND1_02IA$IND1_1, breaks = c(-Inf,IND1_1_Q, Inf), labels = c("Low", "Medium-Low", "Medium", "Medium-High", "High"))
data_IND1_02IA$IND1_2_Q <- cut(data_IND1_02IA$IND1_2, breaks = c(-Inf,IND1_2_Q, Inf), labels = c("Low", "Medium-Low", "Medium", "Medium-High", "High"))
# # Characterization of each quintiles
# # comparison with income
data_AMS_INCOME <- data_AMS_BBGA %>%
filter(niveaunaam %in% level & jaar %in% list_jaar & !sdnaam %in% level_unknow) %>%
select(codeNeiG, nameNeiG, jaar, "IHHINK_GEM")
data_AMS_INCOME <- data_AMS_INCOME %>% filter(gebiedcode15 %in% data_IND1_02I$gebiedcode15)
data_AMS_INCOMEA <- data_AMS_INCOME %>% group_by(gebiedcode15) %>% summarise(avIncome= median(IHHINK_GEM, na.rm = T))
data_IND1_02IA <- merge(data_IND1_02IA, data_AMS_INCOMEA, all.x = T)
ttIND1 <- data_IND1_02IA[, c(varIND1_03, "avIncome", "IND1_1", "IND1_2", "IND1_1_Q", "IND1_2_Q")] %>%
group_by(IND1_1_Q) %>%
summarise_at(c(varIND1_03, "avIncome"), mean, na.rm = T)
ttIND1 <- data.frame(ttIND1)
t(ttIND1)
ttIND2 <- data_IND1_02IA[, c(varIND1_03, "avIncome", "IND1_1", "IND1_2", "IND1_1_Q", "IND1_2_Q")] %>%
group_by(IND1_2_Q) %>%
summarise_at(c(varIND1_03, "avIncome"), mean, na.rm = T)
t(ttIND2)
data_IND1_02IA %>% group_by(Stadsdeel) %>% summarise(mean(IND1_1), mean(IND1_2))
# # how it looks in time (PREDICTION)
data_IND1_02I$Pop18_22 <- with(data_IND1_02I, PopWE18_22+PopNWe18_22+PopDut18_22)
data_IND1_02I$Pop23_39<- with(data_IND1_02I, PopWE23_39+PopNWe23_39+PopDut23_39)
data_IND1_02I$Pop40_64 <- with(data_IND1_02I, PopWE40_64+PopNWe40_64+PopDut40_64)
data_IND1_02I$Pop65Plus <- with(data_IND1_02I, PopWE65Plus+PopNWe65Plus+PopDut65Plus)
ppp1 <- t(apply(data.frame(data_IND1_02I[, pop1]), 1, function(x)x[1:3]*1/x[4]) * 100)
ppp1 <- data.frame(ppp1)
colnames(ppp1) <- paste(pop1[1:3], "_P", sep = "")
ppp2 <- t(apply(data.frame(data_IND1_02I[, pop2]), 1, function(x)x[1:3]*1/x[4]) * 100)
ppp2 <- data.frame(ppp2)
colnames(ppp2) <- paste(pop2[1:3], "_P", sep = "")
ppp3 <- t(apply(data.frame(data_IND1_02I[, pop3]), 1, function(x)x[1:3]*1/x[4]) * 100)
ppp3 <- data.frame(ppp3)
colnames(ppp3) <- paste(pop3[1:3], "_P", sep = "")
ppp4 <- t(apply(data.frame(data_IND1_02I[, pop4]), 1, function(x)x[1:3]*1/x[4]) * 100)
ppp4 <- data.frame(ppp4)
colnames(ppp4) <- paste(pop4[1:3], "_P", sep = "")
data_IND1_02I <- cbind(data.frame(data_IND1_02I), ppp1, ppp2, ppp3, ppp4)
ppp6 <- apply(data_IND1_02I[, pop6], 2, function(x)x/sum(x) * 100)
ppp6 <- data.frame(ppp6)
pop6_c <-paste(pop6, "_PNe", sep = "")
colnames(ppp6) <- pop6_c # per neighborhood
data_IND1_02I <- cbind(data_IND1_02I, ppp6)
data_IND1_02I_Copy <- data_IND1_02I
data_IND1_02I$Dutch <- with(data_IND1_02I, PopDut18_22 + PopDut23_39 + PopDut40_64 + PopDut65Plus)
data_IND1_02I$NonWest <- with(data_IND1_02I, PopNWe18_22 + PopNWe23_39 + PopNWe40_64 + PopNWe65Plus)
data_IND1_02I$West <- with(data_IND1_02I, PopWE18_22 + PopWE23_39 + PopWE40_64 + PopWE65Plus)
data_IND1_02I[, varRac] <- t(apply(data_IND1_02I[, varRac], 1, function(x)x/sum(x)* 100))
data_IND1_02I[, varYear] <- t(apply(data_IND1_02I[, varYear], 1, function(x)x/sum(x)* 100))
data_IND1_02I[, varHouseH] <- t(apply(data_IND1_02I[, varHouseH], 1, function(x)x/sum(x)* 100))
predIND1 <- predict(pcaIND1, data_IND1_02I)$coord
# varIND1_03 <- c("popKm2", varHouseH, varYear, varRac, varPopArr, "New_Urb", "EDU_Low", "EDU_High")
predIND1 <- predIND1[, 1:2]
colnames(predIND1) <- c("IND1_1", "IND1_2")
pcaVarAll <- pcaIND1$var$coord
pcaVarAll <- pcaVarAll[, 1:2]
predIND1[, 1] <- (predIND1[, 1] / sqrt(varEig[1]))* 15 + 50
predIND1[, 2] <- (predIND1[, 2] / sqrt(varEig[2]))* 15 + 50
data_IND1_02I <- cbind(data_IND1_02I, predIND1)
data_IND1_02I$IND1_1_Q <- cut(data_IND1_02I$IND1_1, breaks = c(-Inf,IND1_1_Q, Inf), labels = c("Low", "Medium-Low", "Medium", "Medium-High", "High"))
data_IND1_02I$IND1_2_Q <- cut(data_IND1_02I$IND1_2, breaks = c(-Inf,IND1_2_Q, Inf), labels = c("Low", "Medium-Low", "Medium", "Medium-High", "High"))
pcaStatsY <- data_IND1_02I %>% group_by(jaar) %>% summarise(pcaMean1 = mean(IND1_1),
pcaMean2= mean(IND1_2),
pcaSd1 = sd(IND1_1),
pcaSd2 = sd(IND1_2))
pcaStatsY_L <- melt(pcaStatsY[, 1:3], id.vars = "jaar")
plot1 <- ggplot(pcaStatsY_L, aes(x = jaar, y = value, color = variable)) +
geom_point(alpha = 0.5) +
stat_smooth(aes(x = jaar, y = value), method = "lm",
formula = y ~ poly(x, 4), se = FALSE) +
theme(legend.text=element_text(size=8), legend.title=element_text(size=10))+
guides(color=guide_legend(title="Indicator")) +
scale_x_continuous(name = "Time", breaks = list_jaar) +
scale_y_continuous(name = "Average Indicators") +
geom_hline(yintercept = 50, linetype="dotted") +
ggtitle("Household Characteristics in time")
ggsave("../Output/Chapter_1/Step4/HouseholdCharac_TimeAver.png", plot = plot1, width = 15)
############################################################
### INDICATOR 1
############################################################
# # boxplot
data_IND1_02I$jaar <- as.factor(data_IND1_02I$jaar)
plot1 <- ggplot(data_IND1_02I, aes(x = jaar, y = IND1_1)) +
geom_boxplot() + theme(
# Remove panel border
panel.border = element_blank(),
# Remove panel grid lines
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
# Remove panel background
panel.background = element_blank(),
# Add axis line
axis.line = element_line(colour = "grey")
) + scale_x_discrete(name = "Time", breaks = list_jaar) +
scale_y_continuous(name = "Indicator-Household Charatcteristics 1",
breaks = seq(0, 100, by =10), limits = c(10, 90)) +
geom_hline(yintercept = 50, linetype="dotted") +
ggtitle("Household Characteristics (Indicator 1)")
ggsave("../Output/Chapter_1/Step4/HouseholdChar_IND1_TimeAver_Boxplot.png", plot = plot1, width = 15)
# # Tile plot for indicator 1
# # resume characteristics neighborhoods and Quintiles
map_AMS_wijken_sf <- map_AMS_wijken_sf[order(map_AMS_wijken_sf$Buurtcombi), ]
data_IND1_02I$Buurtcom_1 <- factor(data_IND1_02I$Buurtcom_1, levels = as.character(map_AMS_wijken_sf$Buurtcom_1))
plot2 <- ggplot(data = data_IND1_02I, mapping = aes(y = Buurtcom_1,
x = jaar,
fill = IND1_1_Q)) +
geom_tile() +
scale_fill_manual(values= colIntOrRd) +
labs(fill = "Indicator 1") + xlab("Time") + ylab("Neighborhood")
ggsave("../Output/Chapter_1/Step4/HeatMap_Indic1Time.png",
plot = plot2, height = 15)
vect_map_IND1 <- paste("map", list_jaar, "IND1_1",sep = "_")
for(ii in 1:length(list_jaar)){
map_AMS_wijken_sf_Jaar <- map_AMS_wijken_sf
data_IND1_02I_jaar <- data_IND1_02I %>% filter(jaar == list_jaar[ii]) %>% select("Buurtcom_1", "IND1_1_Q")
map_AMS_wijken_sf_Jaar <- merge(map_AMS_wijken_sf_Jaar, data_IND1_02I_jaar, by = "Buurtcom_1", all.x = T)
mmm <- tm_shape(map_AMS_wijken_sf_Jaar) +
tm_borders(col = gray(0.5), alpha = 0.6)+
tm_fill(col = "IND1_1_Q", palette = "seq", textNA = "Other Neighborhoods", colorNA = "white") +
tm_shape(map_AMS_district) +
tm_borders(alpha = 1, lwd = 2, col = gray(0.5)) +
tm_text("Stadsdeel", size = 0.5, shadow=TRUE) +
tm_layout( title = list_jaar[ii], frame = FALSE,
inner.margins=c(0,0,.1,0), title.size=.8)
assign(x = vect_map_IND1[ii], value = mmm)
rm(mmm)
}
tpArr_YearIND1 <- tmap_arrange(map_2007_IND1_1, map_2008_IND1_1, map_2009_IND1_1, map_2010_IND1_1,
map_2011_IND1_1, map_2012_IND1_1, map_2013_IND1_1, map_2014_IND1_1,
map_2015_IND1_1, map_2016_IND1_1, map_2017_IND1_1, map_2018_IND1_1, ncol = 3)
tmap_save(tpArr_YearIND1, "../Output/Chapter_1/Step4/Map_PerNeighYearIND1.png", width=5000, height=3000)
############################################################
### INDICATOR 2
############################################################
# # boxplot
data_IND1_02I$jaar <- as.factor(data_IND1_02I$jaar)
plot1 <- ggplot(data_IND1_02I, aes(x = jaar, y = IND1_2)) +
geom_boxplot() + theme(
# Remove panel border
panel.border = element_blank(),
# Remove panel grid lines
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
# Remove panel background
panel.background = element_blank(),
# Add axis line
axis.line = element_line(colour = "grey")
) + scale_x_discrete(name = "Time", breaks = list_jaar) +
scale_y_continuous(name = "Indicator-Household Charatcteristics 2",
breaks = seq(0, 100, by =10), limits = c(10, 90)) +
geom_hline(yintercept = 50, linetype="dotted") +
ggtitle("Household Characteristics (Indicator 2)")
ggsave("../Output/Chapter_1/Step4/HouseholdChar_IND2_TimeAver_Boxplot.png", plot = plot1, width = 15)
# # Tile plot 2
plot3 <- ggplot(data = data_IND1_02I, mapping = aes(y = Buurtcom_1,
x = jaar,
fill = IND1_2_Q)) +
geom_tile() +
scale_fill_manual(values= colIntOrRd) +
labs(fill = "Indicator 2") + xlab("Time") + ylab("Neighborhood")
ggsave("../Output/Chapter_1/Step4/HeatMap_Indic2Time.png",
plot = plot3, height = 15)
vect_map_IND2 <- paste("map", list_jaar, "IND1_2",sep = "_")
for(ii in 1:length(list_jaar)){
map_AMS_wijken_sf_Jaar <- map_AMS_wijken_sf
data_IND1_02I_jaar <- data_IND1_02I %>% filter(jaar == list_jaar[ii]) %>% select("Buurtcom_1", "IND1_2_Q")
map_AMS_wijken_sf_Jaar <- merge(map_AMS_wijken_sf_Jaar, data_IND1_02I_jaar, by = "Buurtcom_1", all.x = T)
mmm <- tm_shape(map_AMS_wijken_sf_Jaar) +
tm_borders(col = gray(0.5), alpha = 0.6)+
tm_fill(col = "IND1_2_Q", palette = "seq", textNA = "Other Neighborhoods", colorNA = "white") +
tm_shape(map_AMS_district) +
tm_borders(alpha = 1, lwd = 2, col = gray(0.5)) +
tm_text("Stadsdeel", size = 0.5, shadow=TRUE) +
tm_layout( title = list_jaar[ii], frame = FALSE,
inner.margins=c(0,0,.1,0), title.size=.8)
assign(x = vect_map_IND2[ii], value = mmm)
rm(mmm)
}
tpArr_YearIND2 <- tmap_arrange(map_2007_IND1_2, map_2008_IND1_2, map_2009_IND1_2, map_2010_IND1_2,
map_2011_IND1_2, map_2012_IND1_2, map_2013_IND1_2, map_2014_IND1_2,
map_2015_IND1_2, map_2016_IND1_2, map_2017_IND1_2, map_2018_IND1_2, ncol = 3)
tmap_save(tpArr_YearIND2, "../Output/Chapter_1/Step4/Map_PerNeighYearIND2.png", width=5000, height=3000)
save(data_IND1_02I, file= "../Output/Chapter_1/Step4/data_IND1_02I.Rdata")
# # # comp 1-3
# png(filename = "../Output/Chapter_1/Step4/PCA_IND1_Indiv13.png", units="px",
# width=1600, height=1200, res=300)
# plot(pcaIND1,choix="ind", habillage = id_qualiSup, size = 5,cex= 0.7,
# label = "quali", title = "IND1: Residential Characteristics", axes = c(1, 3))
# dev.off()
#
# png(filename = "../Output/Chapter_1/Step4/PCA_IND1_Var13.png", units="px",
# width=1600, height=1600, res=300)
# plot(pcaIND1,choix="var",
# size = 5,cex= 0.7,
# title = "IND1: Residential Characteristics", axes = c(1, 3))
# dev.off()
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # housing PCA
|
# ---- functions-to-examine-temporal-patterns -------------------
# examine the pattern of measures over time for a given individual
temporal_pattern <- function(d,time, measure, seed_value = 42){
set.seed(seed_value)
d_long <- d
(ids <- sample(unique(d_long$id),1))
d1 <-d_long %>%
dplyr::filter(id %in% ids ) %>%
dplyr::select_(.dots = c("id",time, measure))
print(d1)
}
# ds %>% temporal_pattern("year","srmemory", 42)
# examine the descriptives of a measure across time time points
over_time <- function(ds,time, measure_name, exclude_values="") {
ds <- as.data.frame(ds)
testit::assert("No such measure in the dataset", measure_name %in% unique(names(ds)))
# measure_name = "htval"; wave_name = "wave"; exclude_values = c(-99999, -1)
cat("Measure : ", measure_name,"\n", sep="")
t <- table( ds[,measure_name], ds[,time], useNA = "always"); t[t==0] <- ".";t
print(t)
cat("\n")
ds[,measure_name] <- as.numeric(ds[,measure_name])
d <- ds[!(ds[,measure_name] %in% exclude_values), ]
a <- lazyeval::interp(~ round(mean(var),2) , var = as.name(measure_name))
b <- lazyeval::interp(~ round(sd(var),3), var = as.name(measure_name))
c <- lazyeval::interp(~ n())
dots <- list(a,b,c)
t <- d %>%
dplyr::select_("id",time, measure_name) %>%
na.omit() %>%
# dplyr::mutate_(measure_name = as.numeric(measure_name)) %>%
dplyr::group_by_(time) %>%
dplyr::summarize_(.dots = setNames(dots, c("mean","sd","count")))
return(as.data.frame(t))
}
# ds %>% over_time("year", "srmemory")
# ds %>% over_time("lb_wave", "srmemory")
# a function that provides a table of mean, sd, and count over time.
summarize_over_time <- function(ds,time, measure_name, exclude_values="") {
d <- ds[!(ds[,measure_name] %in% exclude_values), ]
a <- lazyeval::interp(~ round(mean(var),2) , var = as.name(measure_name))
b <- lazyeval::interp(~ round(sd(var),3), var = as.name(measure_name))
c <- lazyeval::interp(~ n())
dots <- list(a,b,c)
t <- d %>%
dplyr::select_("id",time, measure_name) %>%
na.omit() %>%
# dplyr::mutate_(measure_name = as.numeric(measure_name)) %>%
dplyr::group_by_(time) %>%
dplyr::summarize_(.dots = setNames(dots, c("mean","sd","count")))
return(as.data.frame(t))
}
# ---- utility-functions -------------------------------------------------------
# adds neat styling to your knitr table
neat <- function(x, output_format = "html"){
# knitr.table.format = output_format
if(output_format == "pandoc"){
x_t <- knitr::kable(x)
}else{
x_t <- x %>%
# x %>%
# knitr::kable() %>%
knitr::kable(format=output_format) %>%
kableExtra::kable_styling(
bootstrap_options = c("striped", "hover", "condensed","responsive"),
# bootstrap_options = c( "condensed"),
full_width = F,
position = "left"
)
}
return(x_t)
}
# ds %>% distinct(id) %>% count() %>% neat(10)
# adds a formated datatable
neat_DT <- function(x, filter_="top"){
xt <- x %>%
DT::datatable(
class = 'cell-border stripe'
,filter = filter_
,options = list(
pageLength = 6,
autoWidth = FALSE
)
)
return(dt)
}
# Some variables have different character codes for missing values
# Translate various character values into NA values
replace_with_na <- function(x){
# x <- ds_location_map$facility_name
na_tokens <- c(
"^NULL$"
,"^-$"
,"^NA$"
,"^\\{blank\\}$"
,"^n/a$"
)
for(token in na_tokens){
if(is.character(x)){
x <- gsub(token,NA,x)
}
}
return(x)
}
# Usage:
# ds_patient_profiles <- ds_patient_profiles %>%
# dplyr::mutate_all(dplyr::funs(replace_with_na) )
|
/scripts/common-functions.R
|
no_license
|
casslbrown/brown-2017-disseration
|
R
| false
| false
| 3,669
|
r
|
# ---- functions-to-examine-temporal-patterns -------------------
# examine the pattern of measures over time for a given individual
temporal_pattern <- function(d,time, measure, seed_value = 42){
set.seed(seed_value)
d_long <- d
(ids <- sample(unique(d_long$id),1))
d1 <-d_long %>%
dplyr::filter(id %in% ids ) %>%
dplyr::select_(.dots = c("id",time, measure))
print(d1)
}
# ds %>% temporal_pattern("year","srmemory", 42)
# examine the descriptives of a measure across time time points
over_time <- function(ds,time, measure_name, exclude_values="") {
ds <- as.data.frame(ds)
testit::assert("No such measure in the dataset", measure_name %in% unique(names(ds)))
# measure_name = "htval"; wave_name = "wave"; exclude_values = c(-99999, -1)
cat("Measure : ", measure_name,"\n", sep="")
t <- table( ds[,measure_name], ds[,time], useNA = "always"); t[t==0] <- ".";t
print(t)
cat("\n")
ds[,measure_name] <- as.numeric(ds[,measure_name])
d <- ds[!(ds[,measure_name] %in% exclude_values), ]
a <- lazyeval::interp(~ round(mean(var),2) , var = as.name(measure_name))
b <- lazyeval::interp(~ round(sd(var),3), var = as.name(measure_name))
c <- lazyeval::interp(~ n())
dots <- list(a,b,c)
t <- d %>%
dplyr::select_("id",time, measure_name) %>%
na.omit() %>%
# dplyr::mutate_(measure_name = as.numeric(measure_name)) %>%
dplyr::group_by_(time) %>%
dplyr::summarize_(.dots = setNames(dots, c("mean","sd","count")))
return(as.data.frame(t))
}
# ds %>% over_time("year", "srmemory")
# ds %>% over_time("lb_wave", "srmemory")
# a function that provides a table of mean, sd, and count over time.
summarize_over_time <- function(ds,time, measure_name, exclude_values="") {
d <- ds[!(ds[,measure_name] %in% exclude_values), ]
a <- lazyeval::interp(~ round(mean(var),2) , var = as.name(measure_name))
b <- lazyeval::interp(~ round(sd(var),3), var = as.name(measure_name))
c <- lazyeval::interp(~ n())
dots <- list(a,b,c)
t <- d %>%
dplyr::select_("id",time, measure_name) %>%
na.omit() %>%
# dplyr::mutate_(measure_name = as.numeric(measure_name)) %>%
dplyr::group_by_(time) %>%
dplyr::summarize_(.dots = setNames(dots, c("mean","sd","count")))
return(as.data.frame(t))
}
# ---- utility-functions -------------------------------------------------------
# adds neat styling to your knitr table
neat <- function(x, output_format = "html"){
# knitr.table.format = output_format
if(output_format == "pandoc"){
x_t <- knitr::kable(x)
}else{
x_t <- x %>%
# x %>%
# knitr::kable() %>%
knitr::kable(format=output_format) %>%
kableExtra::kable_styling(
bootstrap_options = c("striped", "hover", "condensed","responsive"),
# bootstrap_options = c( "condensed"),
full_width = F,
position = "left"
)
}
return(x_t)
}
# ds %>% distinct(id) %>% count() %>% neat(10)
# adds a formated datatable
neat_DT <- function(x, filter_="top"){
xt <- x %>%
DT::datatable(
class = 'cell-border stripe'
,filter = filter_
,options = list(
pageLength = 6,
autoWidth = FALSE
)
)
return(dt)
}
# Some variables have different character codes for missing values
# Translate various character values into NA values
replace_with_na <- function(x){
# x <- ds_location_map$facility_name
na_tokens <- c(
"^NULL$"
,"^-$"
,"^NA$"
,"^\\{blank\\}$"
,"^n/a$"
)
for(token in na_tokens){
if(is.character(x)){
x <- gsub(token,NA,x)
}
}
return(x)
}
# Usage:
# ds_patient_profiles <- ds_patient_profiles %>%
# dplyr::mutate_all(dplyr::funs(replace_with_na) )
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/onsite.R
\name{get_mesoonsite}
\alias{get_mesoonsite}
\title{Get mesocosm onsite data}
\usage{
get_mesoonsite(onsitepath = file.path("Raw", "onsite"),
experiment = "SoilPlant")
}
\arguments{
\item{onsitepath}{character file.path to onsite raw data}
\item{experiment}{character choice of "soilplant" or "soil"}
}
\description{
Get mesocosm onsite data
}
\examples{
\dontrun{
mesoonsite <- get_mesoonsite(onsitepath = file.path("Raw", "onsite"), experiment = "SoilPlant")
}
}
|
/man/get_mesoonsite.Rd
|
no_license
|
jsta/peatcollapse
|
R
| false
| true
| 556
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/onsite.R
\name{get_mesoonsite}
\alias{get_mesoonsite}
\title{Get mesocosm onsite data}
\usage{
get_mesoonsite(onsitepath = file.path("Raw", "onsite"),
experiment = "SoilPlant")
}
\arguments{
\item{onsitepath}{character file.path to onsite raw data}
\item{experiment}{character choice of "soilplant" or "soil"}
}
\description{
Get mesocosm onsite data
}
\examples{
\dontrun{
mesoonsite <- get_mesoonsite(onsitepath = file.path("Raw", "onsite"), experiment = "SoilPlant")
}
}
|
wd <- getwd()
setwd("/Users/bobfridley/Documents/Coursera/03 - Getting and Cleaning Data/R-wd")
# load packages using sapply
# non-verbose (no package messages)
packages <- c("dplyr", "data.table")
loadp <- sapply(packages, library, character.only=TRUE, quietly=TRUE,
logical.return=TRUE)
if (!all(loadp)) {
stop("unable to load required packages")
}
# set filepath
dataDirectory <- "./data"
FGDPdestFile <- "FGDP.csv"
FGDPdestFilePath <- paste(dataDirectory, FGDPdestFile, sep="/")
FEDSTATSdestFile <- "FEDSTATS_Country.csv"
FEDSTATSdestFilePath <- paste(dataDirectory, FEDSTATSdestFile, sep="/")
# create data directory if not exists
if (!file.exists(dataDirectory)) {
dir.create(dataDirectory)
}
# download files if not exists
if (!file.exists(FGDPdestFilePath)) {
fileUrl <- "https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2FGDP.csv"
download.file(fileUrl, destfile=FGDPdestFilePath, method="curl")
}
if (!file.exists(FEDSTATSdestFilePath)) {
fileUrl <- "https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2FEDSTATS_Country.csv"
download.file(fileUrl, destfile=FEDSTATSdestFilePath, method="curl")
}
# Read GDP file into data table, skip 1st 5 rows
FGDP <- data.table(read.csv(FGDPdestFilePath, skip=4, nrows=215, stringsAsFactors=FALSE))
# Remove missing country codes
FGDP <- FGDP[X != '']
# subset data table with valid columns
FGDP <- subset(FGDP, select=c("X", "X.1", "X.3", "X.4"))
# rename columns
setnames(FGDP, c("X", "X.1", "X.3", "X.4"),
c("CountryCode", "rank", "countryname", "gdp"))
# Read FEDSTATS file into data table
FEDSTATS <- data.table(read.csv(FEDSTATSdestFilePath, stringsAsFactors=FALSE))
# merge data tables
mergedData <- merge(FGDP, FEDSTATS, by="CountryCode", all=TRUE)
# sort by Rank desc
md <- mergedData[order(mergedData$rank, decreasing=TRUE),]
# answers
# number of matches
print(sum(!is.na(unique(mergedData$rank))))
# 13th country
print(md[13])
# result
# Table.Name Short.Name
# 1: St. Kitts and Nevis St. Kitts and Nevis
# restore working directory
setwd(wd)
|
/R-wd/quiz3-3.R
|
no_license
|
bobfridley/03-Getting-and-Cleaning-Data
|
R
| false
| false
| 2,104
|
r
|
wd <- getwd()
setwd("/Users/bobfridley/Documents/Coursera/03 - Getting and Cleaning Data/R-wd")
# load packages using sapply
# non-verbose (no package messages)
packages <- c("dplyr", "data.table")
loadp <- sapply(packages, library, character.only=TRUE, quietly=TRUE,
logical.return=TRUE)
if (!all(loadp)) {
stop("unable to load required packages")
}
# set filepath
dataDirectory <- "./data"
FGDPdestFile <- "FGDP.csv"
FGDPdestFilePath <- paste(dataDirectory, FGDPdestFile, sep="/")
FEDSTATSdestFile <- "FEDSTATS_Country.csv"
FEDSTATSdestFilePath <- paste(dataDirectory, FEDSTATSdestFile, sep="/")
# create data directory if not exists
if (!file.exists(dataDirectory)) {
dir.create(dataDirectory)
}
# download files if not exists
if (!file.exists(FGDPdestFilePath)) {
fileUrl <- "https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2FGDP.csv"
download.file(fileUrl, destfile=FGDPdestFilePath, method="curl")
}
if (!file.exists(FEDSTATSdestFilePath)) {
fileUrl <- "https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2FEDSTATS_Country.csv"
download.file(fileUrl, destfile=FEDSTATSdestFilePath, method="curl")
}
# Read GDP file into data table, skip 1st 5 rows
FGDP <- data.table(read.csv(FGDPdestFilePath, skip=4, nrows=215, stringsAsFactors=FALSE))
# Remove missing country codes
FGDP <- FGDP[X != '']
# subset data table with valid columns
FGDP <- subset(FGDP, select=c("X", "X.1", "X.3", "X.4"))
# rename columns
setnames(FGDP, c("X", "X.1", "X.3", "X.4"),
c("CountryCode", "rank", "countryname", "gdp"))
# Read FEDSTATS file into data table
FEDSTATS <- data.table(read.csv(FEDSTATSdestFilePath, stringsAsFactors=FALSE))
# merge data tables
mergedData <- merge(FGDP, FEDSTATS, by="CountryCode", all=TRUE)
# sort by Rank desc
md <- mergedData[order(mergedData$rank, decreasing=TRUE),]
# answers
# number of matches
print(sum(!is.na(unique(mergedData$rank))))
# 13th country
print(md[13])
# result
# Table.Name Short.Name
# 1: St. Kitts and Nevis St. Kitts and Nevis
# restore working directory
setwd(wd)
|
library(gamlss.countKinf)
### Name: KINBF
### Title: K-inflated Negative Binomial Family distributions for fitting a
### GAMLSS model
### Aliases: KINBF dKINBF pKINBF qKINBF rKINBF
### Keywords: distribution regression
### ** Examples
#--------------------------------------------------------------------------------
# gives information about the default links for the Negative Binomial Family distribution
KINBF()
#--------------------------------------------------------------------------------
# generate zero inflated Negative Binomial Family distribution
gen.Kinf(family=NBF, kinf=0)
# generate random sample from zero inflated Negative Binomial Family distribution
x<-rinf0NBF(1000,mu=1, sigma=.5, nu=-.2, tau=.2)
# fit the zero inflated Negative Binomial Family distribution using gamlss
data<-data.frame(x=x)
## Not run:
##D gamlss(x~1, family=inf0NBF, data=data)
##D histDist(x, family=inf0NBF)
## End(Not run)
#--------------------------------------------------------------------------------
# generated one inflated Negative Binomial Family distribution
gen.Kinf(family=NBF, kinf=1)
# generate random sample from one inflated Negative Binomial Family distribution
x<-rinf1NBF(1000,mu=1, sigma=.5, nu=-.2, tau=.2)
# fit the one inflated Negative Binomial Family distribution using gamlss
data<-data.frame(x=x)
## Not run:
##D gamlss(x~1, family=inf1NBF, data=data)
##D histDist(x, family=inf1NBF)
## End(Not run)
#--------------------------------------------------------------------------------
mu=4; sigma=.5; nu=.2; tau=.2;
par(mgp=c(2,1,0),mar=c(4,4,4,1)+0.1)
#plot the pdf using plot
plot(function(x) dinf1NBF(x, mu=mu, sigma=sigma, nu=nu, tau=tau), from=0, to=20,
n=20+1, type="h",xlab="x",ylab="f(x)",cex.lab=1.5)
#--------------------------------------------------------------------------------
#plot the cdf using plot
cdf <- stepfun(0:19, c(0,pinf1NBF(0:19, mu=mu, sigma=sigma, nu=nu, tau=tau)), f = 0)
plot(cdf, xlab="x", ylab="F(x)", verticals=FALSE, cex.points=.8, pch=16, main="",cex.lab=1.5)
#--------------------------------------------------------------------------------
#plot the qdf using plot
invcdf <- stepfun(seq(0.01,.99,length=19), qinf1NBF(seq(0.1,.99,length=20),mu, sigma), f = 0)
plot(invcdf, ylab=expression(x[p]==F^{-1}(p)), do.points=FALSE,verticals=TRUE,
cex.points=.8, pch=16, main="",cex.lab=1.5, xlab="p")
#--------------------------------------------------------------------------------
# generate random sample
Ni <- rinf1NBF(1000, mu=mu, sigma=sigma, nu=nu, tau=tau)
hist(Ni,breaks=seq(min(Ni)-0.5,max(Ni)+0.5,by=1),col="lightgray", main="",cex.lab=2)
barplot(table(Ni))
#--------------------------------------------------------------------------------
|
/data/genthat_extracted_code/gamlss.countKinf/examples/KINBF.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 2,737
|
r
|
library(gamlss.countKinf)
### Name: KINBF
### Title: K-inflated Negative Binomial Family distributions for fitting a
### GAMLSS model
### Aliases: KINBF dKINBF pKINBF qKINBF rKINBF
### Keywords: distribution regression
### ** Examples
#--------------------------------------------------------------------------------
# gives information about the default links for the Negative Binomial Family distribution
KINBF()
#--------------------------------------------------------------------------------
# generate zero inflated Negative Binomial Family distribution
gen.Kinf(family=NBF, kinf=0)
# generate random sample from zero inflated Negative Binomial Family distribution
x<-rinf0NBF(1000,mu=1, sigma=.5, nu=-.2, tau=.2)
# fit the zero inflated Negative Binomial Family distribution using gamlss
data<-data.frame(x=x)
## Not run:
##D gamlss(x~1, family=inf0NBF, data=data)
##D histDist(x, family=inf0NBF)
## End(Not run)
#--------------------------------------------------------------------------------
# generated one inflated Negative Binomial Family distribution
gen.Kinf(family=NBF, kinf=1)
# generate random sample from one inflated Negative Binomial Family distribution
x<-rinf1NBF(1000,mu=1, sigma=.5, nu=-.2, tau=.2)
# fit the one inflated Negative Binomial Family distribution using gamlss
data<-data.frame(x=x)
## Not run:
##D gamlss(x~1, family=inf1NBF, data=data)
##D histDist(x, family=inf1NBF)
## End(Not run)
#--------------------------------------------------------------------------------
mu=4; sigma=.5; nu=.2; tau=.2;
par(mgp=c(2,1,0),mar=c(4,4,4,1)+0.1)
#plot the pdf using plot
plot(function(x) dinf1NBF(x, mu=mu, sigma=sigma, nu=nu, tau=tau), from=0, to=20,
n=20+1, type="h",xlab="x",ylab="f(x)",cex.lab=1.5)
#--------------------------------------------------------------------------------
#plot the cdf using plot
cdf <- stepfun(0:19, c(0,pinf1NBF(0:19, mu=mu, sigma=sigma, nu=nu, tau=tau)), f = 0)
plot(cdf, xlab="x", ylab="F(x)", verticals=FALSE, cex.points=.8, pch=16, main="",cex.lab=1.5)
#--------------------------------------------------------------------------------
#plot the qdf using plot
invcdf <- stepfun(seq(0.01,.99,length=19), qinf1NBF(seq(0.1,.99,length=20),mu, sigma), f = 0)
plot(invcdf, ylab=expression(x[p]==F^{-1}(p)), do.points=FALSE,verticals=TRUE,
cex.points=.8, pch=16, main="",cex.lab=1.5, xlab="p")
#--------------------------------------------------------------------------------
# generate random sample
Ni <- rinf1NBF(1000, mu=mu, sigma=sigma, nu=nu, tau=tau)
hist(Ni,breaks=seq(min(Ni)-0.5,max(Ni)+0.5,by=1),col="lightgray", main="",cex.lab=2)
barplot(table(Ni))
#--------------------------------------------------------------------------------
|
####TEAD2 dependency in pancancer
library("ggplot2")
library("ggthemes")
data=read.delim("D:/demo/TEAD2_dependency_pancancer.txt",head=T,sep="\t")
p=ggplot(data,aes(x=Cancer_type,y=TEAD2))
pp=p+geom_boxplot(size=1,color="pink")+geom_point(size=3,color="black")
pp+theme_bw()+ylim(-3,3)
####YAP1 dependency in pancancer
data=read.delim("D:/demo/YAP1_dependency_pancancer.txt",head=T,sep="\t")
p=ggplot(data,aes(x=Cancer_type,y=YAP1))
pp=p+geom_boxplot(size=1,color="pink")+geom_point(size=3,color="black")
pp+theme_bw()+ylim(-3,3)
####TEAD1 dependency in pancancer
data=read.delim("D:/demo/TEAD1_dependency_pancancer.txt",head=T,sep="\t")
p=ggplot(data,aes(x=Cancer_type,y=TEAD1))
pp=p+geom_boxplot(size=1,color="pink")+geom_point(size=3,color="black")
pp+theme_bw()+ylim(-4,4)
####TEAD3 dependency in pancancer
data=read.delim("D:/demo/TEAD3_dependency_pancancer.txt",head=T,sep="\t")
p=ggplot(data,aes(x=Cancer_type,y=TEAD3))
pp=p+geom_boxplot(size=1,color="pink")+geom_point(size=3,color="black")
pp+theme_bw()+ylim(-3,3)
|
/dependency in pancancer.R
|
no_license
|
dreamerwu/Gene-dependency-in-pancancer
|
R
| false
| false
| 1,060
|
r
|
####TEAD2 dependency in pancancer
library("ggplot2")
library("ggthemes")
data=read.delim("D:/demo/TEAD2_dependency_pancancer.txt",head=T,sep="\t")
p=ggplot(data,aes(x=Cancer_type,y=TEAD2))
pp=p+geom_boxplot(size=1,color="pink")+geom_point(size=3,color="black")
pp+theme_bw()+ylim(-3,3)
####YAP1 dependency in pancancer
data=read.delim("D:/demo/YAP1_dependency_pancancer.txt",head=T,sep="\t")
p=ggplot(data,aes(x=Cancer_type,y=YAP1))
pp=p+geom_boxplot(size=1,color="pink")+geom_point(size=3,color="black")
pp+theme_bw()+ylim(-3,3)
####TEAD1 dependency in pancancer
data=read.delim("D:/demo/TEAD1_dependency_pancancer.txt",head=T,sep="\t")
p=ggplot(data,aes(x=Cancer_type,y=TEAD1))
pp=p+geom_boxplot(size=1,color="pink")+geom_point(size=3,color="black")
pp+theme_bw()+ylim(-4,4)
####TEAD3 dependency in pancancer
data=read.delim("D:/demo/TEAD3_dependency_pancancer.txt",head=T,sep="\t")
p=ggplot(data,aes(x=Cancer_type,y=TEAD3))
pp=p+geom_boxplot(size=1,color="pink")+geom_point(size=3,color="black")
pp+theme_bw()+ylim(-3,3)
|
testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(2.31584307392677e+77, 9.53818252170339e+295, 1.22810536108214e+146, 4.13693025791547e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(10L, 3L)))
result <- do.call(CNull:::communities_individual_based_sampling_alpha,testlist)
str(result)
|
/CNull/inst/testfiles/communities_individual_based_sampling_alpha/AFL_communities_individual_based_sampling_alpha/communities_individual_based_sampling_alpha_valgrind_files/1615779108-test.R
|
no_license
|
akhikolla/updatedatatype-list2
|
R
| false
| false
| 348
|
r
|
testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(2.31584307392677e+77, 9.53818252170339e+295, 1.22810536108214e+146, 4.13693025791547e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(10L, 3L)))
result <- do.call(CNull:::communities_individual_based_sampling_alpha,testlist)
str(result)
|
#' Setup environment variables for \code{slack.com} API
#'
#' Initialize all the environment variables \link{slackr} will need to use to
#' work properly.
#'
#' By default, \code{slackr} (and other functions) will use the \code{#general} room and a username
#' of \code{slackr()} with no emoji and the default \url{slack.com} API prefix URL. You
#' still need to provide the webhook API token in \code{token} for anyting to work.
#' Failure to call this function before calling \code{slackr()} will result in a
#' message to do so.
#'
#' If a valid file is found at the locaiton pointed to by \code{config_file}, the
#' values there will be used. The fields should be specified as such in the file: \cr
#' \cr
#' \code{ token: yourTokenCode} \cr
#' \code{ channel: #general} \cr
#' \code{ username: slackr} \cr
#' \code{ icon_emoji:} \cr
#' \code{ incoming_webhook_url: https://yourgroup.slack.com/services/hooks/incoming-webhook?} \cr \cr
#' @param channel default channel to send the output to (chr) defaults to \code{#general}
#' @param username the username output will appear from (chr) defaults to \code{slackr}
#' @param icon_emoji which emoji picture to use (chr) defaults to none (can be left blank in config file as well)
#' @param token the \url{slack.com} webhook API token string (chr) defaults to none
#' @param incoming_webhook_url the slack.com URL prefix to use (chr) defaults to none
#' @param api_token the slack.com full API token (chr)
#' @param config_file a configuration file (DCF) - see \link{read.dcf} - format with the config values.
#' @param echo display the configuraiton variables (bool) initially \code{FALSE}
#' @note You need a \url{slack.com} account and will also need to setup an incoming webhook and full API tokens: \url{https://api.slack.com/}
#' @seealso \code{\link{slackr}}, \code{\link{dev.slackr}}, \code{\link{save.slackr}}, \code{\link{slackrUpload}}
#' @examples
#' \dontrun{
#' # reads from default file
#' slackrSetup()
#'
#' # reads from alternate config
#' slackrSetup(config_file="/path/to/my/slackrconfig)
#'
#' # the hard way
#' slackrSetup(channel="#code", token="mytoken",
#' url_prefix="http://myslack.slack.com/services/hooks/incoming-webhook?")
#' }
#' @export
slackrSetup <- function(channel="#general", username="slackr",
icon_emoji="", token="", incoming_webhook_url="",
api_token="", config_file="~/.slackr", echo=FALSE) {
if (file.exists(config_file)) {
config <- read.dcf(config_file,
fields=c("token", "channel", "icon_emoji",
"username", "incoming_webhook_url", "api_token"))
Sys.setenv(SLACK_CHANNEL=config[,"channel"])
Sys.setenv(SLACK_USERNAME=config[,"username"])
Sys.setenv(SLACK_ICON_EMOJI=config[,"icon_emoji"])
Sys.setenv(SLACK_TOKEN=config[,"token"])
Sys.setenv(SLACK_INCOMING_URL_PREFIX=config[,"incoming_webhook_url"])
Sys.setenv(SLACK_API_TOKEN=config[,"api_token"])
} else {
Sys.setenv(SLACK_CHANNEL=channel)
Sys.setenv(SLACK_USERNAME=username)
Sys.setenv(SLACK_ICON_EMOJI=icon_emoji)
Sys.setenv(SLACK_TOKEN=token)
Sys.setenv(SLACK_INCOMING_URL_PREFIX=incoming_webhook_url)
Sys.setenv(SLACK_API_TOKEN=api_token)
}
if (!grepl("?$", Sys.getenv("SLACK_INCOMING_URL_PREFIX"))) {
Sys.setenv(SLACK_INCOMING_URL_PREFIX=sprintf("%s?", config[,"incoming_webhook_url"]))
}
if (length(Sys.getenv("SLACK_CHANNEL"))==0) {
Sys.setenv("SLACK_CHANNEL", "#general")
}
if (length(Sys.getenv("SLACK_USERNAME"))==0) {
Sys.setenv("SLACK_USERNAME", "slackr")
}
if (echo) {
print(Sys.getenv(c("SLACK_CHANNEL", "SLACK_USERNAME",
"SLACK_ICON_EMOJI", "SLACK_TOKEN",
"SLACK_INCOMING_URL_PREFIX", "SLACK_API_TOKEN")))
}
}
#' Output R expressions to a \code{slack.com} channel/user (as \code{slackbot})
#'
#' Takes an \code{expr}, evaluates it and sends the output to a \url{slack.com}
#' chat destination. Useful for logging, messaging on long compute tasks or
#' general information sharing.
#'
#' By default, everyting but \code{expr} will be looked for in a "\code{SLACK_}"
#' environment variable. You can override or just specify these values directly instead,
#' but it's probably better to call \link{slackrSetup} first.
#'
#' This function uses the incoming webhook API and posts user messages as \code{slackbot}
#'
#' @param ... expressions to be sent to Slack.com
#' @param channel which channel to post the message to (chr)
#' @param username what user should the bot be named as (chr)
#' @param icon_emoji what emoji to use (chr) \code{""} will mean use the default
#' @param incoming_webhook_url which \url{slack.com} API endpoint URL to use
#' @param token your webhook API token
#' @note You need a \url{slack.com} account and will also need to setup an incoming webhook: \url{https://api.slack.com/}
#' @seealso \code{\link{slackrSetup}}, \code{\link{slackr}}, \code{\link{dev.slackr}}, \code{\link{save.slackr}}, \code{\link{slackrUpload}}
#' @examples
#' \dontrun{
#' slackrSetup()
#' slackr("iris info", head(iris), str(iris))
#' }
#' @export
slackrBot <- function(...,
channel=Sys.getenv("SLACK_CHANNEL"),
username=Sys.getenv("SLACK_USERNAME"),
icon_emoji=Sys.getenv("SLACK_ICON_EMOJI"),
incoming_webhook_url=Sys.getenv("SLACK_INCOMING_URL_PREFIX"),
token=Sys.getenv("SLACK_TOKEN")) {
if (incoming_webhook_url == "" | token == "") {
stop("No URL prefix and/or token specified. Did you forget to call slackrSetup()?", call. = FALSE)
}
if (icon_emoji != "") { icon_emoji <- sprintf(', "icon_emoji": "%s"', icon_emoji) }
resp_ret <- ""
if (!missing(...)) {
input_list <- as.list(substitute(list(...)))[-1L]
for(i in 1:length(input_list)) {
expr <- input_list[[i]]
if (class(expr) == "call") {
expr_text <- sprintf("> %s", deparse(expr))
data <- capture.output(eval(expr))
data <- paste0(data, collapse="\n")
data <- sprintf("%s\n%s", expr_text, data)
} else {
data <- as.character(expr)
}
output <- gsub('^\"|\"$', "", toJSON(data, simplifyVector=TRUE, flatten=TRUE, auto_unbox=TRUE))
resp <- POST(url=paste0(incoming_webhook_url, "token=", token),
add_headers(`Content-Type`="application/x-www-form-urlencoded", `Accept`="*/*"),
body=URLencode(sprintf('payload={"channel": "%s", "username": "%s", "text": "```%s```"%s}',
channel, username, output, icon_emoji)))
warn_for_status(resp)
if (resp$status_code > 200) { print(str(expr))}
}
}
return(invisible())
}
#' Output R expressions to a \code{slack.com} channel/user
#'
#' Takes an \code{expr}, evaluates it and sends the output to a \url{slack.com}
#' chat destination. Useful for logging, messaging on long compute tasks or
#' general information sharing.
#'
#' By default, everyting but \code{expr} will be looked for in a "\code{SLACK_}"
#' environment variable. You can override or just specify these values directly instead,
#' but it's probably better to call \link{slackrSetup} first.
#'
#' @param ... expressions to be sent to Slack.com
#' @param channel which channel to post the message to (chr)
#' @param username what user should the bot be named as (chr)
#' @param icon_emoji what emoji to use (chr) \code{""} will mean use the default
#' @param api_token your full slack.com API token
#' @note You need a \url{slack.com} account and will also need to setup an API token \url{https://api.slack.com/}
#' @seealso \code{\link{slackrSetup}}, \code{\link{slackrBot}}, \code{\link{dev.slackr}}, \code{\link{save.slackr}}, \code{\link{slackrUpload}}
#' @examples
#' \dontrun{
#' slackrSetup()
#' slackr("iris info", head(iris), str(iris))
#' }
#' @export
slackr <- function(...,
channel=Sys.getenv("SLACK_CHANNEL"),
username=Sys.getenv("SLACK_USERNAME"),
icon_emoji=Sys.getenv("SLACK_ICON_EMOJI"),
api_token=Sys.getenv("SLACK_API_TOKEN")) {
if (api_token == "") {
stop("No token specified. Did you forget to call slackrSetup()?", call. = FALSE)
}
if (icon_emoji != "") { icon_emoji <- sprintf(', "icon_emoji": "%s"', icon_emoji) }
resp_ret <- ""
if (!missing(...)) {
input_list <- as.list(substitute(list(...)))[-1L]
for(i in 1:length(input_list)) {
expr <- input_list[[i]]
if (class(expr) == "call") {
expr_text <- sprintf("> %s", deparse(expr))
data <- capture.output(eval(expr))
data <- paste0(data, collapse="\n")
data <- sprintf("%s\n%s", expr_text, data)
} else {
data <- as.character(expr)
}
output <- data
resp <- POST(url="https://slack.com/api/chat.postMessage",
body=list(token=api_token, channel=channel,
username=username, icon_emoji=icon_emoji,
text=sprintf("```%s```", output), link_names=1))
warn_for_status(resp)
if (resp$status_code > 200) { print(str(expr))}
}
}
return(invisible())
}
#' Send the graphics contents of the current device to a \code{slack.com} channel
#'
#' \code{dev.slackr} sends the graphics contents of the current device to the specified \code{slack.com} channel.
#' This requires setting up a full API token (i.e. not a webhook & not OAuth) for this to work.
#'
#' @param channels list of channels to post image to
#' @param ... other arguments passed into png device
#' @param api_token the slack.com full API token (chr)
#' @return \code{httr} response object from \code{POST} call
#' @seealso \code{\link{slackrSetup}}, \code{\link{save.slackr}}, \code{\link{slackrUpload}}
#' @examples
#' \dontrun{
#' slackrSetup()
#'
#' # ggplot
#' library(ggplot2)
#'
#' dev.slackr("#results")
#'
#' # base
#' barplot(VADeaths)
#' dev.slackr("@@jayjacobs")
#' }
#' @export
dev.slackr <- function(channels=Sys.getenv("SLACK_CHANNEL"), ...,
api_token=Sys.getenv("SLACK_API_TOKEN")) {
Sys.setlocale('LC_ALL','C')
ftmp <- tempfile("plot", fileext=".png")
dev.copy(png, file=ftmp, ...)
dev.off()
modchan <- slackrChTrans(channels)
POST(url="https://slack.com/api/files.upload",
add_headers(`Content-Type`="multipart/form-data"),
body=list( file=upload_file(ftmp), token=api_token, channels=modchan))
}
#' Post a ggplot to a \url{slack.com} channel
#'
#' Unlike the \code{\link{dev.slackr}} function, this one takes a \code{ggplot} object,
#' eliminating the need to have a graphics device (think use in scripts).
#'
#' @param plot ggplot object to save, defaults to last plot displayed
#' @param channels list of channels to post image to
#' @param scale scaling factor
#' @param width width (defaults to the width of current plotting window)
#' @param height height (defaults to the height of current plotting window)
#' @param units units for width and height when either one is explicitly specified (in, cm, or mm)
#' @param dpi dpi to use for raster graphics
#' @param limitsize when TRUE (the default), ggsave will not save images larger than 50x50 inches, to prevent the common error of specifying dimensions in pixels.
#' @param api_token the slack.com full API token (chr)
#' @param ... other arguments passed to graphics device
#' @note You need to setup a full API token (i.e. not a webhook & not OAuth) for this to work
#' @return \code{httr} response object
#' @examples
#' \dontrun{
#' slackrSetup()
#' ggslackr(qplot(mpg, wt, data=mtcars))
#' }
#' @export
ggslackr <- function(plot=last_plot(), channels=Sys.getenv("SLACK_CHANNEL"), scale=1, width=par("din")[1], height=par("din")[2],
units=c("in", "cm", "mm"), dpi=300, limitsize=TRUE, api_token=Sys.getenv("SLACK_API_TOKEN"), ...) {
Sys.setlocale('LC_ALL','C')
ftmp <- tempfile("ggplot", fileext=".png")
ggsave(filename=ftmp, plot=plot, scale=scale, width=width, height=height, units=units, dpi=dpi, limitsize=limitsize, ...)
modchan <- slackrChTrans(channels)
POST(url="https://slack.com/api/files.upload",
add_headers(`Content-Type`="multipart/form-data"),
body=list( file=upload_file(ftmp), token=api_token, channels=modchan))
}
#' Save R objects to an RData file on \code{slack.com}
#'
#' \code{save.slackr} enables you upload R objects (as an R data file)
#' to \code{slack.com} and (optionally) post them to one or more channels
#' (if \code{channels} is not empty).
#'
#' @param ... objects to store in the R data file
#' @param channels slack.com channels to save to (optional)
#' @param file filename (without extension) to use
#' @param api_token full API token
#' @return \code{httr} response object from \code{POST} call
#' @seealso \code{\link{slackrSetup}}, \code{\link{dev.slackr}}, \code{\link{slackrUpload}}
#' @export
save.slackr <- function(..., channels="",
file="slackr",
api_token=Sys.getenv("SLACK_API_TOKEN")) {
Sys.setlocale('LC_ALL','C')
ftmp <- tempfile(file, fileext=".rda")
save(..., file=ftmp)
modchan <- slackrChTrans(channels)
POST(url="https://slack.com/api/files.upload",
add_headers(`Content-Type`="multipart/form-data"),
body=list(file=upload_file(ftmp), filename=sprintf("%s.rda", file),
token=api_token, channels=modchan))
}
#' Send a file to \code{slack.com}
#'
#' \code{slackrUoload} enables you upload files to \code{slack.com} and
#' (optionally) post them to one or more channels (if \code{channels} is not empty).
#'
#' @param filename path to file
#' @param title title on slack (optional - defaults to filename)
#' @param initial_comment comment for file on slack (optional - defaults to filename)
#' @param channels slack.com channels to save to (optional)
#' @param api_token full API token
#' @return \code{httr} response object from \code{POST} call
#' @seealso \code{\link{slackrSetup}}, \code{\link{dev.slackr}}, \code{\link{save.slackr}}
#' @export
slackrUpload <- function(filename, title=basename(filename),
initial_comment=basename(filename),
channels="", api_token=Sys.getenv("SLACK_API_TOKEN")) {
f_path <- path.expand(filename)
if (file.exists(f_path)) {
f_name <- basename(f_path)
Sys.setlocale('LC_ALL','C')
modchan <- slackrChTrans(channels)
POST(url="https://slack.com/api/files.upload",
add_headers(`Content-Type`="multipart/form-data"),
body=list( file=upload_file(f_path), filename=f_name,
title=title, initial_comment=initial_comment,
token=api_token, channels=modchan))
}
}
#' Translate vector of channel names to channel ID's for API
#'
#' Given a vector of one or more channel names, it will retrieve list of
#' active channels and try to replace channels that begin with "\code{#}" or "\code{@@}"
#' with the channel ID for that channel. Also incorporates groups.
#'
#' @param channels vector of channel names to parse
#' @param api_token the slack.com full API token (chr)
#' @note Renamed from \code{slackr_chtrans}
#' @return character vector - original channel list with \code{#} or \code{@@} channels replaced with ID's.
#' @export
slackrChTrans <- function(channels, api_token=Sys.getenv("SLACK_API_TOKEN")) {
chan <- slackrChannels(api_token)
users <- slackrUsers(api_token)
groups <- slackrGroups(api_token)
chan$name <- sprintf("#%s", chan$name)
users$name <- sprintf("@%s", users$name)
chan_list <- data.table(id=character(0), name=character(0))
if (length(chan) > 0) { chan_list <- rbind(chan_list, chan[,1:2,with=FALSE]) }
if (length(users) > 0) { chan_list <- rbind(chan_list, users[,1:2,with=FALSE]) }
if (length(groups) > 0) { chan_list <- rbind(chan_list, groups[,1:2,with=FALSE]) }
chan_xref <- merge(data.frame(name=channels), chan_list, all.x=TRUE)
ifelse(is.na(chan_xref$id),
as.character(chan_xref$name),
as.character(chan_xref$id))
}
#' Get a data frame of slack.com users
#'
#' need to setup a full API token (i.e. not a webhook & not OAuth) for this to work
#'
#' @param api_token the slack.com full API token (chr)
#' @return data.table of users
#' @export
slackrUsers <- function(api_token=Sys.getenv("SLACK_API_TOKEN")) {
Sys.setlocale('LC_ALL','C')
tmp <- POST("https://slack.com/api/users.list", body=list(token=api_token))
tmp_p <- content(tmp, as="parsed")
rbindlist(lapply(tmp_p$members, function(x) {
if ( is.null(x$real_name) ) { x$real_name <- "" }
data.frame(id=nax(x$id), name=nax(x$name), real_name=nax(x$real_name))
}) )
}
#' Get a data frame of slack.com channels
#'
#' need to setup a full API token (i.e. not a webhook & not OAuth) for this to work
#'
#' @param api_token the slack.com full API token (chr)
#' @return data.table of channels
#' @note Renamed from \code{slackr_channels}
#' @export
slackrChannels <- function(api_token=Sys.getenv("SLACK_API_TOKEN")) {
Sys.setlocale('LC_ALL','C')
tmp <- POST("https://slack.com/api/channels.list", body=list(token=api_token))
tmp_p <- content(tmp, as="parsed")
rbindlist(lapply(tmp_p$channels, function(x) {
data.frame(id=nax(x$id), name=nax(x$name), is_member=nax(x$is_member))
}) )
}
#' Get a data frame of slack.com groups
#'
#' need to setup a full API token (i.e. not a webhook & not OAuth) for this to work
#'
#' @param api_token the slack.com full API token (chr)
#' @return data.table of channels
#' @export
slackrGroups <- function(api_token=Sys.getenv("SLACK_API_TOKEN")) {
Sys.setlocale('LC_ALL','C')
tmp <- POST("https://slack.com/api/groups.list", body=list(token=api_token))
tmp_p <- content(tmp, as="parsed")
rbindlist(lapply(tmp_p$groups, function(x) {
data.frame(id=nax(x$id), name=nax(x$name), is_archived=nax(x$is_archived))
}) )
}
# helper function for NULLs as return value
nax <- function(x) {
ifelse(is.null(x), NA, x)
}
|
/R/slackr.R
|
no_license
|
arturochian/slackr
|
R
| false
| false
| 18,143
|
r
|
#' Setup environment variables for \code{slack.com} API
#'
#' Initialize all the environment variables \link{slackr} will need to use to
#' work properly.
#'
#' By default, \code{slackr} (and other functions) will use the \code{#general} room and a username
#' of \code{slackr()} with no emoji and the default \url{slack.com} API prefix URL. You
#' still need to provide the webhook API token in \code{token} for anyting to work.
#' Failure to call this function before calling \code{slackr()} will result in a
#' message to do so.
#'
#' If a valid file is found at the locaiton pointed to by \code{config_file}, the
#' values there will be used. The fields should be specified as such in the file: \cr
#' \cr
#' \code{ token: yourTokenCode} \cr
#' \code{ channel: #general} \cr
#' \code{ username: slackr} \cr
#' \code{ icon_emoji:} \cr
#' \code{ incoming_webhook_url: https://yourgroup.slack.com/services/hooks/incoming-webhook?} \cr \cr
#' @param channel default channel to send the output to (chr) defaults to \code{#general}
#' @param username the username output will appear from (chr) defaults to \code{slackr}
#' @param icon_emoji which emoji picture to use (chr) defaults to none (can be left blank in config file as well)
#' @param token the \url{slack.com} webhook API token string (chr) defaults to none
#' @param incoming_webhook_url the slack.com URL prefix to use (chr) defaults to none
#' @param api_token the slack.com full API token (chr)
#' @param config_file a configuration file (DCF) - see \link{read.dcf} - format with the config values.
#' @param echo display the configuraiton variables (bool) initially \code{FALSE}
#' @note You need a \url{slack.com} account and will also need to setup an incoming webhook and full API tokens: \url{https://api.slack.com/}
#' @seealso \code{\link{slackr}}, \code{\link{dev.slackr}}, \code{\link{save.slackr}}, \code{\link{slackrUpload}}
#' @examples
#' \dontrun{
#' # reads from default file
#' slackrSetup()
#'
#' # reads from alternate config
#' slackrSetup(config_file="/path/to/my/slackrconfig)
#'
#' # the hard way
#' slackrSetup(channel="#code", token="mytoken",
#' url_prefix="http://myslack.slack.com/services/hooks/incoming-webhook?")
#' }
#' @export
slackrSetup <- function(channel="#general", username="slackr",
icon_emoji="", token="", incoming_webhook_url="",
api_token="", config_file="~/.slackr", echo=FALSE) {
if (file.exists(config_file)) {
config <- read.dcf(config_file,
fields=c("token", "channel", "icon_emoji",
"username", "incoming_webhook_url", "api_token"))
Sys.setenv(SLACK_CHANNEL=config[,"channel"])
Sys.setenv(SLACK_USERNAME=config[,"username"])
Sys.setenv(SLACK_ICON_EMOJI=config[,"icon_emoji"])
Sys.setenv(SLACK_TOKEN=config[,"token"])
Sys.setenv(SLACK_INCOMING_URL_PREFIX=config[,"incoming_webhook_url"])
Sys.setenv(SLACK_API_TOKEN=config[,"api_token"])
} else {
Sys.setenv(SLACK_CHANNEL=channel)
Sys.setenv(SLACK_USERNAME=username)
Sys.setenv(SLACK_ICON_EMOJI=icon_emoji)
Sys.setenv(SLACK_TOKEN=token)
Sys.setenv(SLACK_INCOMING_URL_PREFIX=incoming_webhook_url)
Sys.setenv(SLACK_API_TOKEN=api_token)
}
if (!grepl("?$", Sys.getenv("SLACK_INCOMING_URL_PREFIX"))) {
Sys.setenv(SLACK_INCOMING_URL_PREFIX=sprintf("%s?", config[,"incoming_webhook_url"]))
}
if (length(Sys.getenv("SLACK_CHANNEL"))==0) {
Sys.setenv("SLACK_CHANNEL", "#general")
}
if (length(Sys.getenv("SLACK_USERNAME"))==0) {
Sys.setenv("SLACK_USERNAME", "slackr")
}
if (echo) {
print(Sys.getenv(c("SLACK_CHANNEL", "SLACK_USERNAME",
"SLACK_ICON_EMOJI", "SLACK_TOKEN",
"SLACK_INCOMING_URL_PREFIX", "SLACK_API_TOKEN")))
}
}
#' Output R expressions to a \code{slack.com} channel/user (as \code{slackbot})
#'
#' Takes an \code{expr}, evaluates it and sends the output to a \url{slack.com}
#' chat destination. Useful for logging, messaging on long compute tasks or
#' general information sharing.
#'
#' By default, everyting but \code{expr} will be looked for in a "\code{SLACK_}"
#' environment variable. You can override or just specify these values directly instead,
#' but it's probably better to call \link{slackrSetup} first.
#'
#' This function uses the incoming webhook API and posts user messages as \code{slackbot}
#'
#' @param ... expressions to be sent to Slack.com
#' @param channel which channel to post the message to (chr)
#' @param username what user should the bot be named as (chr)
#' @param icon_emoji what emoji to use (chr) \code{""} will mean use the default
#' @param incoming_webhook_url which \url{slack.com} API endpoint URL to use
#' @param token your webhook API token
#' @note You need a \url{slack.com} account and will also need to setup an incoming webhook: \url{https://api.slack.com/}
#' @seealso \code{\link{slackrSetup}}, \code{\link{slackr}}, \code{\link{dev.slackr}}, \code{\link{save.slackr}}, \code{\link{slackrUpload}}
#' @examples
#' \dontrun{
#' slackrSetup()
#' slackr("iris info", head(iris), str(iris))
#' }
#' @export
slackrBot <- function(...,
channel=Sys.getenv("SLACK_CHANNEL"),
username=Sys.getenv("SLACK_USERNAME"),
icon_emoji=Sys.getenv("SLACK_ICON_EMOJI"),
incoming_webhook_url=Sys.getenv("SLACK_INCOMING_URL_PREFIX"),
token=Sys.getenv("SLACK_TOKEN")) {
if (incoming_webhook_url == "" | token == "") {
stop("No URL prefix and/or token specified. Did you forget to call slackrSetup()?", call. = FALSE)
}
if (icon_emoji != "") { icon_emoji <- sprintf(', "icon_emoji": "%s"', icon_emoji) }
resp_ret <- ""
if (!missing(...)) {
input_list <- as.list(substitute(list(...)))[-1L]
for(i in 1:length(input_list)) {
expr <- input_list[[i]]
if (class(expr) == "call") {
expr_text <- sprintf("> %s", deparse(expr))
data <- capture.output(eval(expr))
data <- paste0(data, collapse="\n")
data <- sprintf("%s\n%s", expr_text, data)
} else {
data <- as.character(expr)
}
output <- gsub('^\"|\"$', "", toJSON(data, simplifyVector=TRUE, flatten=TRUE, auto_unbox=TRUE))
resp <- POST(url=paste0(incoming_webhook_url, "token=", token),
add_headers(`Content-Type`="application/x-www-form-urlencoded", `Accept`="*/*"),
body=URLencode(sprintf('payload={"channel": "%s", "username": "%s", "text": "```%s```"%s}',
channel, username, output, icon_emoji)))
warn_for_status(resp)
if (resp$status_code > 200) { print(str(expr))}
}
}
return(invisible())
}
#' Output R expressions to a \code{slack.com} channel/user
#'
#' Takes an \code{expr}, evaluates it and sends the output to a \url{slack.com}
#' chat destination. Useful for logging, messaging on long compute tasks or
#' general information sharing.
#'
#' By default, everyting but \code{expr} will be looked for in a "\code{SLACK_}"
#' environment variable. You can override or just specify these values directly instead,
#' but it's probably better to call \link{slackrSetup} first.
#'
#' @param ... expressions to be sent to Slack.com
#' @param channel which channel to post the message to (chr)
#' @param username what user should the bot be named as (chr)
#' @param icon_emoji what emoji to use (chr) \code{""} will mean use the default
#' @param api_token your full slack.com API token
#' @note You need a \url{slack.com} account and will also need to setup an API token \url{https://api.slack.com/}
#' @seealso \code{\link{slackrSetup}}, \code{\link{slackrBot}}, \code{\link{dev.slackr}}, \code{\link{save.slackr}}, \code{\link{slackrUpload}}
#' @examples
#' \dontrun{
#' slackrSetup()
#' slackr("iris info", head(iris), str(iris))
#' }
#' @export
slackr <- function(...,
channel=Sys.getenv("SLACK_CHANNEL"),
username=Sys.getenv("SLACK_USERNAME"),
icon_emoji=Sys.getenv("SLACK_ICON_EMOJI"),
api_token=Sys.getenv("SLACK_API_TOKEN")) {
if (api_token == "") {
stop("No token specified. Did you forget to call slackrSetup()?", call. = FALSE)
}
if (icon_emoji != "") { icon_emoji <- sprintf(', "icon_emoji": "%s"', icon_emoji) }
resp_ret <- ""
if (!missing(...)) {
input_list <- as.list(substitute(list(...)))[-1L]
for(i in 1:length(input_list)) {
expr <- input_list[[i]]
if (class(expr) == "call") {
expr_text <- sprintf("> %s", deparse(expr))
data <- capture.output(eval(expr))
data <- paste0(data, collapse="\n")
data <- sprintf("%s\n%s", expr_text, data)
} else {
data <- as.character(expr)
}
output <- data
resp <- POST(url="https://slack.com/api/chat.postMessage",
body=list(token=api_token, channel=channel,
username=username, icon_emoji=icon_emoji,
text=sprintf("```%s```", output), link_names=1))
warn_for_status(resp)
if (resp$status_code > 200) { print(str(expr))}
}
}
return(invisible())
}
#' Send the graphics contents of the current device to a \code{slack.com} channel
#'
#' \code{dev.slackr} sends the graphics contents of the current device to the specified \code{slack.com} channel.
#' This requires setting up a full API token (i.e. not a webhook & not OAuth) for this to work.
#'
#' @param channels list of channels to post image to
#' @param ... other arguments passed into png device
#' @param api_token the slack.com full API token (chr)
#' @return \code{httr} response object from \code{POST} call
#' @seealso \code{\link{slackrSetup}}, \code{\link{save.slackr}}, \code{\link{slackrUpload}}
#' @examples
#' \dontrun{
#' slackrSetup()
#'
#' # ggplot
#' library(ggplot2)
#'
#' dev.slackr("#results")
#'
#' # base
#' barplot(VADeaths)
#' dev.slackr("@@jayjacobs")
#' }
#' @export
dev.slackr <- function(channels=Sys.getenv("SLACK_CHANNEL"), ...,
api_token=Sys.getenv("SLACK_API_TOKEN")) {
Sys.setlocale('LC_ALL','C')
ftmp <- tempfile("plot", fileext=".png")
dev.copy(png, file=ftmp, ...)
dev.off()
modchan <- slackrChTrans(channels)
POST(url="https://slack.com/api/files.upload",
add_headers(`Content-Type`="multipart/form-data"),
body=list( file=upload_file(ftmp), token=api_token, channels=modchan))
}
#' Post a ggplot to a \url{slack.com} channel
#'
#' Unlike the \code{\link{dev.slackr}} function, this one takes a \code{ggplot} object,
#' eliminating the need to have a graphics device (think use in scripts).
#'
#' @param plot ggplot object to save, defaults to last plot displayed
#' @param channels list of channels to post image to
#' @param scale scaling factor
#' @param width width (defaults to the width of current plotting window)
#' @param height height (defaults to the height of current plotting window)
#' @param units units for width and height when either one is explicitly specified (in, cm, or mm)
#' @param dpi dpi to use for raster graphics
#' @param limitsize when TRUE (the default), ggsave will not save images larger than 50x50 inches, to prevent the common error of specifying dimensions in pixels.
#' @param api_token the slack.com full API token (chr)
#' @param ... other arguments passed to graphics device
#' @note You need to setup a full API token (i.e. not a webhook & not OAuth) for this to work
#' @return \code{httr} response object
#' @examples
#' \dontrun{
#' slackrSetup()
#' ggslackr(qplot(mpg, wt, data=mtcars))
#' }
#' @export
ggslackr <- function(plot=last_plot(), channels=Sys.getenv("SLACK_CHANNEL"), scale=1, width=par("din")[1], height=par("din")[2],
units=c("in", "cm", "mm"), dpi=300, limitsize=TRUE, api_token=Sys.getenv("SLACK_API_TOKEN"), ...) {
Sys.setlocale('LC_ALL','C')
ftmp <- tempfile("ggplot", fileext=".png")
ggsave(filename=ftmp, plot=plot, scale=scale, width=width, height=height, units=units, dpi=dpi, limitsize=limitsize, ...)
modchan <- slackrChTrans(channels)
POST(url="https://slack.com/api/files.upload",
add_headers(`Content-Type`="multipart/form-data"),
body=list( file=upload_file(ftmp), token=api_token, channels=modchan))
}
#' Save R objects to an RData file on \code{slack.com}
#'
#' \code{save.slackr} enables you upload R objects (as an R data file)
#' to \code{slack.com} and (optionally) post them to one or more channels
#' (if \code{channels} is not empty).
#'
#' @param ... objects to store in the R data file
#' @param channels slack.com channels to save to (optional)
#' @param file filename (without extension) to use
#' @param api_token full API token
#' @return \code{httr} response object from \code{POST} call
#' @seealso \code{\link{slackrSetup}}, \code{\link{dev.slackr}}, \code{\link{slackrUpload}}
#' @export
save.slackr <- function(..., channels="",
file="slackr",
api_token=Sys.getenv("SLACK_API_TOKEN")) {
Sys.setlocale('LC_ALL','C')
ftmp <- tempfile(file, fileext=".rda")
save(..., file=ftmp)
modchan <- slackrChTrans(channels)
POST(url="https://slack.com/api/files.upload",
add_headers(`Content-Type`="multipart/form-data"),
body=list(file=upload_file(ftmp), filename=sprintf("%s.rda", file),
token=api_token, channels=modchan))
}
#' Send a file to \code{slack.com}
#'
#' \code{slackrUoload} enables you upload files to \code{slack.com} and
#' (optionally) post them to one or more channels (if \code{channels} is not empty).
#'
#' @param filename path to file
#' @param title title on slack (optional - defaults to filename)
#' @param initial_comment comment for file on slack (optional - defaults to filename)
#' @param channels slack.com channels to save to (optional)
#' @param api_token full API token
#' @return \code{httr} response object from \code{POST} call
#' @seealso \code{\link{slackrSetup}}, \code{\link{dev.slackr}}, \code{\link{save.slackr}}
#' @export
slackrUpload <- function(filename, title=basename(filename),
initial_comment=basename(filename),
channels="", api_token=Sys.getenv("SLACK_API_TOKEN")) {
f_path <- path.expand(filename)
if (file.exists(f_path)) {
f_name <- basename(f_path)
Sys.setlocale('LC_ALL','C')
modchan <- slackrChTrans(channels)
POST(url="https://slack.com/api/files.upload",
add_headers(`Content-Type`="multipart/form-data"),
body=list( file=upload_file(f_path), filename=f_name,
title=title, initial_comment=initial_comment,
token=api_token, channels=modchan))
}
}
#' Translate vector of channel names to channel ID's for API
#'
#' Given a vector of one or more channel names, it will retrieve list of
#' active channels and try to replace channels that begin with "\code{#}" or "\code{@@}"
#' with the channel ID for that channel. Also incorporates groups.
#'
#' @param channels vector of channel names to parse
#' @param api_token the slack.com full API token (chr)
#' @note Renamed from \code{slackr_chtrans}
#' @return character vector - original channel list with \code{#} or \code{@@} channels replaced with ID's.
#' @export
slackrChTrans <- function(channels, api_token=Sys.getenv("SLACK_API_TOKEN")) {
chan <- slackrChannels(api_token)
users <- slackrUsers(api_token)
groups <- slackrGroups(api_token)
chan$name <- sprintf("#%s", chan$name)
users$name <- sprintf("@%s", users$name)
chan_list <- data.table(id=character(0), name=character(0))
if (length(chan) > 0) { chan_list <- rbind(chan_list, chan[,1:2,with=FALSE]) }
if (length(users) > 0) { chan_list <- rbind(chan_list, users[,1:2,with=FALSE]) }
if (length(groups) > 0) { chan_list <- rbind(chan_list, groups[,1:2,with=FALSE]) }
chan_xref <- merge(data.frame(name=channels), chan_list, all.x=TRUE)
ifelse(is.na(chan_xref$id),
as.character(chan_xref$name),
as.character(chan_xref$id))
}
#' Get a data frame of slack.com users
#'
#' need to setup a full API token (i.e. not a webhook & not OAuth) for this to work
#'
#' @param api_token the slack.com full API token (chr)
#' @return data.table of users
#' @export
slackrUsers <- function(api_token=Sys.getenv("SLACK_API_TOKEN")) {
Sys.setlocale('LC_ALL','C')
tmp <- POST("https://slack.com/api/users.list", body=list(token=api_token))
tmp_p <- content(tmp, as="parsed")
rbindlist(lapply(tmp_p$members, function(x) {
if ( is.null(x$real_name) ) { x$real_name <- "" }
data.frame(id=nax(x$id), name=nax(x$name), real_name=nax(x$real_name))
}) )
}
#' Get a data frame of slack.com channels
#'
#' need to setup a full API token (i.e. not a webhook & not OAuth) for this to work
#'
#' @param api_token the slack.com full API token (chr)
#' @return data.table of channels
#' @note Renamed from \code{slackr_channels}
#' @export
slackrChannels <- function(api_token=Sys.getenv("SLACK_API_TOKEN")) {
Sys.setlocale('LC_ALL','C')
tmp <- POST("https://slack.com/api/channels.list", body=list(token=api_token))
tmp_p <- content(tmp, as="parsed")
rbindlist(lapply(tmp_p$channels, function(x) {
data.frame(id=nax(x$id), name=nax(x$name), is_member=nax(x$is_member))
}) )
}
#' Get a data frame of slack.com groups
#'
#' need to setup a full API token (i.e. not a webhook & not OAuth) for this to work
#'
#' @param api_token the slack.com full API token (chr)
#' @return data.table of channels
#' @export
slackrGroups <- function(api_token=Sys.getenv("SLACK_API_TOKEN")) {
Sys.setlocale('LC_ALL','C')
tmp <- POST("https://slack.com/api/groups.list", body=list(token=api_token))
tmp_p <- content(tmp, as="parsed")
rbindlist(lapply(tmp_p$groups, function(x) {
data.frame(id=nax(x$id), name=nax(x$name), is_archived=nax(x$is_archived))
}) )
}
# helper function for NULLs as return value
nax <- function(x) {
ifelse(is.null(x), NA, x)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{kremlin_en}
\alias{kremlin_en}
\title{A dataset including all contents published on the English-language version of [kremlin.ru}
\format{
A data frame with 24338 rows and 8 columns:
\describe{
\item{doc_id}{the id is a composed string, that should make the identifier unique even when used together with other similarly shaped datasets. Elements are separated by a an \href{https://en.wikipedia.org/wiki/Hyphen-minus}{hyphen-minus}. A an example \code{doc_id} would be \code{president_ru-en-012345}.}
\item{text}{this includes the full text of the document, \emph{including} the title and the textual string with date and location (when present). }
\item{date}{date of publication in the date format.}
\item{title}{the title of the document}
\item{location}{the location from where the document was issued as reported at the beginning of each post, e.g. "Novo-Ogaryovo, Moscow Region"; if not given, an empty string.}
\item{link}{a URL, source of the document}
\item{id}{numeric id; includes only the numeric part of \code{doc_id}, may be useful if only a numeric identifier is needed.}
\item{term}{a character string referring to the presidential term. The period after Yeltsin's resignation, but before Putin's first inauguration in May 2000 is indicated as "Putin 0", the following as "Putin 1", "Putin 2", "Medvedev 1", "Putin 3", and "Putin 4"}
}
}
\source{
\url{http://en.kremlin.ru/}
}
\usage{
kremlin_en
}
\description{
A dataset with 24 338 textual items.
}
\keyword{datasets}
|
/man/kremlin_en.Rd
|
permissive
|
giocomai/tifkremlinen
|
R
| false
| true
| 1,592
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{kremlin_en}
\alias{kremlin_en}
\title{A dataset including all contents published on the English-language version of [kremlin.ru}
\format{
A data frame with 24338 rows and 8 columns:
\describe{
\item{doc_id}{the id is a composed string, that should make the identifier unique even when used together with other similarly shaped datasets. Elements are separated by a an \href{https://en.wikipedia.org/wiki/Hyphen-minus}{hyphen-minus}. A an example \code{doc_id} would be \code{president_ru-en-012345}.}
\item{text}{this includes the full text of the document, \emph{including} the title and the textual string with date and location (when present). }
\item{date}{date of publication in the date format.}
\item{title}{the title of the document}
\item{location}{the location from where the document was issued as reported at the beginning of each post, e.g. "Novo-Ogaryovo, Moscow Region"; if not given, an empty string.}
\item{link}{a URL, source of the document}
\item{id}{numeric id; includes only the numeric part of \code{doc_id}, may be useful if only a numeric identifier is needed.}
\item{term}{a character string referring to the presidential term. The period after Yeltsin's resignation, but before Putin's first inauguration in May 2000 is indicated as "Putin 0", the following as "Putin 1", "Putin 2", "Medvedev 1", "Putin 3", and "Putin 4"}
}
}
\source{
\url{http://en.kremlin.ru/}
}
\usage{
kremlin_en
}
\description{
A dataset with 24 338 textual items.
}
\keyword{datasets}
|
# Load app
library(shiny)
library(ggplot2)
# Define UI for application that draws a histogram
shinyUI(fluidPage(
# Application title
titlePanel("Correlation on Life Expectancy at Birth around the world (2015)"),
# Sidebar
sidebarLayout(
sidebarPanel(
radioButtons("radio", label = h3("Economic Indicator"),
choices = list("Domestic general government health expenditure [% of GDP]" = 5,
"Production age (15-64) population [% of total]" = 6,
"GDP per capita [current US$]" = 7),
selected = 5),
checkboxGroupInput("checkGroup", label = h3("Regions"),
choices = list("East Asia & Pacific" = "East Asia & Pacific",
"Europe & Central Asia" = "Europe & Central Asia",
"Latin America & Caribbean " = "Latin America & Caribbean ",
"Middle East & North Africa" = "Middle East & North Africa",
"North America" = "North America",
"South Asia" = "South Asia",
"Sub-Saharan Africa " = "Sub-Saharan Africa "),
selected = c("East Asia & Pacific",
"Europe & Central Asia",
"Latin America & Caribbean ",
"Middle East & North Africa",
"North America",
"South Asia",
"Sub-Saharan Africa ")),
radioButtons("method", label = h3("Line Fit Method"),
choices = list("lm" = "lm",
"loess" = "loess"),
selected = "loess")
),
# Show a plot of the generated distribution
mainPanel(
plotOutput("distPlot"),
h3(textOutput("Cor"), style="color:blue"),
h4("Data Summary : "),
tableOutput("Table")
)
)
))
|
/ui.R
|
no_license
|
jkosuke/ShinyApp
|
R
| false
| false
| 2,175
|
r
|
# Load app
library(shiny)
library(ggplot2)
# Define UI for application that draws a histogram
shinyUI(fluidPage(
# Application title
titlePanel("Correlation on Life Expectancy at Birth around the world (2015)"),
# Sidebar
sidebarLayout(
sidebarPanel(
radioButtons("radio", label = h3("Economic Indicator"),
choices = list("Domestic general government health expenditure [% of GDP]" = 5,
"Production age (15-64) population [% of total]" = 6,
"GDP per capita [current US$]" = 7),
selected = 5),
checkboxGroupInput("checkGroup", label = h3("Regions"),
choices = list("East Asia & Pacific" = "East Asia & Pacific",
"Europe & Central Asia" = "Europe & Central Asia",
"Latin America & Caribbean " = "Latin America & Caribbean ",
"Middle East & North Africa" = "Middle East & North Africa",
"North America" = "North America",
"South Asia" = "South Asia",
"Sub-Saharan Africa " = "Sub-Saharan Africa "),
selected = c("East Asia & Pacific",
"Europe & Central Asia",
"Latin America & Caribbean ",
"Middle East & North Africa",
"North America",
"South Asia",
"Sub-Saharan Africa ")),
radioButtons("method", label = h3("Line Fit Method"),
choices = list("lm" = "lm",
"loess" = "loess"),
selected = "loess")
),
# Show a plot of the generated distribution
mainPanel(
plotOutput("distPlot"),
h3(textOutput("Cor"), style="color:blue"),
h4("Data Summary : "),
tableOutput("Table")
)
)
))
|
dataset<-read.table("household_power_consumption.txt",header=TRUE,sep=";",dec=".")
dataset$Date2<-as.Date(as.character(dataset$Date),"%d/%m/%Y")
newdata<-subset(dataset,Date2 >= as.Date("2007-02-01"))
newdata2<-subset(newdata,Date2 <= as.Date("2007-02-02"))
newdata2$datetime<-strptime(paste(newdata2$Date,newdata2$Time), format = "%d/%m/%Y %H:%M:%OS")
print(head(newdata2))
png(file="plot4.png")
par(mfcol=c(2,2))
with(newdata2,plot(datetime,as.numeric(Global_active_power),type="l",ylab="Global Active Power (kilowatts)",xlab=""))
with(newdata2,plot(datetime,as.numeric(Sub_metering_1),type="l",ylab="Energy sub metering",xlab="",col="black"))
with(newdata2, lines(datetime, as.numeric(Sub_metering_2),col="red",type="l"))
with(newdata2, lines(datetime, as.numeric(Sub_metering_3),col="blue",type="l"))
legend("topright",legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),col=c("black","red","blue"),lty=1,bty="n")
with(newdata2,plot(datetime,as.numeric(Voltage),type="l",ylab="Voltage"))
with(newdata2,plot(datetime,as.numeric(Global_reactive_power),type="l",ylab="Global_reactive_power"))
dev.off()
|
/plot4.R
|
no_license
|
gitjo-1997/ExData_Plotting1
|
R
| false
| false
| 1,112
|
r
|
dataset<-read.table("household_power_consumption.txt",header=TRUE,sep=";",dec=".")
dataset$Date2<-as.Date(as.character(dataset$Date),"%d/%m/%Y")
newdata<-subset(dataset,Date2 >= as.Date("2007-02-01"))
newdata2<-subset(newdata,Date2 <= as.Date("2007-02-02"))
newdata2$datetime<-strptime(paste(newdata2$Date,newdata2$Time), format = "%d/%m/%Y %H:%M:%OS")
print(head(newdata2))
png(file="plot4.png")
par(mfcol=c(2,2))
with(newdata2,plot(datetime,as.numeric(Global_active_power),type="l",ylab="Global Active Power (kilowatts)",xlab=""))
with(newdata2,plot(datetime,as.numeric(Sub_metering_1),type="l",ylab="Energy sub metering",xlab="",col="black"))
with(newdata2, lines(datetime, as.numeric(Sub_metering_2),col="red",type="l"))
with(newdata2, lines(datetime, as.numeric(Sub_metering_3),col="blue",type="l"))
legend("topright",legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),col=c("black","red","blue"),lty=1,bty="n")
with(newdata2,plot(datetime,as.numeric(Voltage),type="l",ylab="Voltage"))
with(newdata2,plot(datetime,as.numeric(Global_reactive_power),type="l",ylab="Global_reactive_power"))
dev.off()
|
##
## intsurv: Integrative Survival Models
## Copyright (C) 2017-2019 Wenjie Wang <wjwang.stat@gmail.com>
##
## This file is part of the R package intsurv.
##
## The R package intsurv is free software: You can redistribute it and/or
## modify it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or any later
## version (at your option). See the GNU General Public License at
## <https://www.gnu.org/licenses/> for details.
##
## The R package intsurv is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
##
##' Simulate Data from Cox Cure Model with Uncertain Event Status
##'
##' @usage
##' simData4cure(nSubject = 1e3,
##' shape = 2, scale = 0.1,
##' lambda_censor = 1, max_censor = Inf,
##' p1 = 0.9, p2 = 0.9, p3 = 0.9,
##' survMat, cureMat = survMat,
##' b0 = stats::binomial()$linkfun(0.7),
##' survCoef = rep(1, ncol(survMat)),
##' cureCoef = rep(1, ncol(cureMat)),
##' ...)
##'
##' @param nSubject A positive integer specifying number of subjects.
##' @param shape A positive number specifying the shape parameter of the
##' distribution of the event times.
##' @param scale A positive number specifying the scale parameter of the
##' distribution of the event times.
##' @param lambda_censor A positive number specifying the rate parameter of the
##' exponential distribution for generating censoring times.
##' @param max_censor A positive number specifying the largest censoring time.
##' @param p1 A number between 0 and 1 specifying the probability of simulating
##' events with observed event indicators given the simulated event times.
##' @param p2 A number between 0 and 1 specifying the probability of simulating
##' susceptible censoring times with observed event status given the
##' simulated susceptible censoring times.
##' @param p3 A number between 0 and 1 specifying the probability of simulating
##' cured censoring times with observed event status given the simulated
##' cured censoring times.
##' @param survMat A numeric matrix representing the design matrix of the
##' survival model part.
##' @param cureMat A numeric matrix representing the design matrix excluding
##' intercept of the cure rate model part.
##' @param b0 A number representing the intercept term for the cure rate model
##' part.
##' @param survCoef A numeric vector for the covariate coefficients of the
##' survival model part.
##' @param cureCoef A numeric vector for the covariate coefficients of the
##' cure model part.
##' @param ... Other arguments not used currently.
##'
##' @return
##' A data.frame with the following columns:
##' \itemize{
##'
##' \item \code{obs_time}: Observed event/survival times.
##'
##' \item \code{obs_event}: Observed event status.
##'
##' \item \code{event_time}: Underlying true event times.
##'
##' \item \code{censor_time}: underlying true censoring times.
##'
##' \item \code{oracle_event}: underlying true event indicators.
##'
##' \item \code{oracle_cure}: underlying true cure indicators.
##'
##' \item \code{case}: underlying true case labels.
##'
##' }
##'
##' @references
##'
##' Wang, W., Chen, K., Luo, C., & Yan, J. (2019+). Cox Cure Model with
##' Uncertain Event Status with application to a Suicide Risk
##' Study. \emph{Working in Progress}.
##'
##' @examples
##' ## see examples of function cox_cure
##' @importFrom stats binomial rbinom rexp runif
##' @export
simData4cure <- function(nSubject = 1e3,
shape = 2, scale = 0.1,
lambda_censor = 1, max_censor = Inf,
p1 = 0.9, p2 = 0.9, p3 = 0.9,
survMat, cureMat = survMat,
b0 = stats::binomial()$linkfun(0.7),
survCoef = rep(1, ncol(survMat)),
cureCoef = rep(1, ncol(cureMat)),
...)
{
simu_one <- function(i)
{
## 1. generate cure indicator for each subject based on logistics model.
expit <- binomial()$linkinv
not_cure_p <- expit(as.numeric(cureMat[i, ] %*% cureCoef + b0))
cure_ind <- rbinom(1, size = 1, prob = not_cure_p) < 1
expXbeta <- as.numeric(exp(survMat[i, ] %*% survCoef))
if (cure_ind) {
## 2.1 if cured
censorTime <- min(rexp(1, rate = lambda_censor), max_censor)
b <- rbinom(1, 1, p3)
out <- data.frame(obs_time = censorTime,
obs_event = ifelse(b, 0, NA),
event_time = Inf,
censor_time = censorTime,
oracle_event = 0,
oracle_cure = 1,
case = ifelse(b, "2b", "3c"))
} else {
## 2.2 otherwise (not cured)
eventTime <- rWeibull(1, shape, scale * expXbeta)
censorTime <- min(rexp(1, rate = lambda_censor), max_censor)
obsTime <- min(eventTime, censorTime)
obsEvent <- as.integer(eventTime <= censorTime)
if (obsEvent) {
b <- rbinom(1, 1, p1)
out <- data.frame(obs_time = obsTime,
obs_event = ifelse(b, 1, NA),
event_time = eventTime,
censor_time = censorTime,
oracle_event = 1,
oracle_cure = 0,
case = ifelse(b, "1", "3a"))
} else {
b <- rbinom(1, 1, p2)
out <- data.frame(obs_time = obsTime,
obs_event = ifelse(b, 0, NA),
event_time = eventTime,
censor_time = censorTime,
oracle_event = 0,
oracle_cure = 0,
case = ifelse(b, "2a", "3b"))
}
}
## Bayes classification
## event_den <- dWeibull(out$obs_time, shape, scale * expXbeta)
## censor_den <- dexp(out$obs_time, lambda_censor)
## event_surv <- 1 - pWeibull(out$obs_time, shape, scale * expXbeta)
## censor_surv <- 1 - pexp(out$obs_time, lambda_censor)
## m1 <- event_den * censor_surv * not_cure_p
## m2 <- censor_den * event_surv * not_cure_p
## m3 <- (1 - not_cure_p) * censor_den
## out$bayes_event <- m1 / (m1 + m2 + m3)
## return
return(out)
}
res <- do.call(rbind, lapply(seq_len(nSubject), simu_one))
colnames(survMat) <- paste0("x", seq_len(ncol(survMat)))
colnames(cureMat) <- paste0("z", seq_len(ncol(cureMat)))
cbind(res, survMat, cureMat)
}
### internal function ==========================================================
## similar function with eha::rgompertz, where param == "canonical"
rGompertz <- function(n, shape = 1, scale = 1, ...) {
u <- runif(n)
1 / shape * log1p(- shape * log1p(- u) / (scale * shape))
}
## similar function with stats::rweibull but with different parametrization
## reduces to exponential distribution when shape == 1
rWeibull <- function(n, shape = 1, scale = 1, ...) {
u <- runif(n)
(- log1p(- u) / scale) ^ (1 / shape)
}
hWeibull <- function(x, shape = 1, scale = 1) {
scale * shape * x ^ (shape - 1)
}
HWeibull <- function(x, shape = 1, scale = 1) {
scale * x ^ shape
}
pWeibull <- function(x, shape = 1, scale = 1) {
1 - exp(- scale * x ^ shape)
}
dWeibull <- function(x, shape = 1, scale = 1) {
hWeibull(x, shape, scale) * exp(- scale * x ^ shape)
}
|
/fuzzedpackages/intsurv/R/simData4cure.R
|
no_license
|
akhikolla/testpackages
|
R
| false
| false
| 7,942
|
r
|
##
## intsurv: Integrative Survival Models
## Copyright (C) 2017-2019 Wenjie Wang <wjwang.stat@gmail.com>
##
## This file is part of the R package intsurv.
##
## The R package intsurv is free software: You can redistribute it and/or
## modify it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or any later
## version (at your option). See the GNU General Public License at
## <https://www.gnu.org/licenses/> for details.
##
## The R package intsurv is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
##
##' Simulate Data from Cox Cure Model with Uncertain Event Status
##'
##' @usage
##' simData4cure(nSubject = 1e3,
##' shape = 2, scale = 0.1,
##' lambda_censor = 1, max_censor = Inf,
##' p1 = 0.9, p2 = 0.9, p3 = 0.9,
##' survMat, cureMat = survMat,
##' b0 = stats::binomial()$linkfun(0.7),
##' survCoef = rep(1, ncol(survMat)),
##' cureCoef = rep(1, ncol(cureMat)),
##' ...)
##'
##' @param nSubject A positive integer specifying number of subjects.
##' @param shape A positive number specifying the shape parameter of the
##' distribution of the event times.
##' @param scale A positive number specifying the scale parameter of the
##' distribution of the event times.
##' @param lambda_censor A positive number specifying the rate parameter of the
##' exponential distribution for generating censoring times.
##' @param max_censor A positive number specifying the largest censoring time.
##' @param p1 A number between 0 and 1 specifying the probability of simulating
##' events with observed event indicators given the simulated event times.
##' @param p2 A number between 0 and 1 specifying the probability of simulating
##' susceptible censoring times with observed event status given the
##' simulated susceptible censoring times.
##' @param p3 A number between 0 and 1 specifying the probability of simulating
##' cured censoring times with observed event status given the simulated
##' cured censoring times.
##' @param survMat A numeric matrix representing the design matrix of the
##' survival model part.
##' @param cureMat A numeric matrix representing the design matrix excluding
##' intercept of the cure rate model part.
##' @param b0 A number representing the intercept term for the cure rate model
##' part.
##' @param survCoef A numeric vector for the covariate coefficients of the
##' survival model part.
##' @param cureCoef A numeric vector for the covariate coefficients of the
##' cure model part.
##' @param ... Other arguments not used currently.
##'
##' @return
##' A data.frame with the following columns:
##' \itemize{
##'
##' \item \code{obs_time}: Observed event/survival times.
##'
##' \item \code{obs_event}: Observed event status.
##'
##' \item \code{event_time}: Underlying true event times.
##'
##' \item \code{censor_time}: underlying true censoring times.
##'
##' \item \code{oracle_event}: underlying true event indicators.
##'
##' \item \code{oracle_cure}: underlying true cure indicators.
##'
##' \item \code{case}: underlying true case labels.
##'
##' }
##'
##' @references
##'
##' Wang, W., Chen, K., Luo, C., & Yan, J. (2019+). Cox Cure Model with
##' Uncertain Event Status with application to a Suicide Risk
##' Study. \emph{Working in Progress}.
##'
##' @examples
##' ## see examples of function cox_cure
##' @importFrom stats binomial rbinom rexp runif
##' @export
simData4cure <- function(nSubject = 1e3,
shape = 2, scale = 0.1,
lambda_censor = 1, max_censor = Inf,
p1 = 0.9, p2 = 0.9, p3 = 0.9,
survMat, cureMat = survMat,
b0 = stats::binomial()$linkfun(0.7),
survCoef = rep(1, ncol(survMat)),
cureCoef = rep(1, ncol(cureMat)),
...)
{
simu_one <- function(i)
{
## 1. generate cure indicator for each subject based on logistics model.
expit <- binomial()$linkinv
not_cure_p <- expit(as.numeric(cureMat[i, ] %*% cureCoef + b0))
cure_ind <- rbinom(1, size = 1, prob = not_cure_p) < 1
expXbeta <- as.numeric(exp(survMat[i, ] %*% survCoef))
if (cure_ind) {
## 2.1 if cured
censorTime <- min(rexp(1, rate = lambda_censor), max_censor)
b <- rbinom(1, 1, p3)
out <- data.frame(obs_time = censorTime,
obs_event = ifelse(b, 0, NA),
event_time = Inf,
censor_time = censorTime,
oracle_event = 0,
oracle_cure = 1,
case = ifelse(b, "2b", "3c"))
} else {
## 2.2 otherwise (not cured)
eventTime <- rWeibull(1, shape, scale * expXbeta)
censorTime <- min(rexp(1, rate = lambda_censor), max_censor)
obsTime <- min(eventTime, censorTime)
obsEvent <- as.integer(eventTime <= censorTime)
if (obsEvent) {
b <- rbinom(1, 1, p1)
out <- data.frame(obs_time = obsTime,
obs_event = ifelse(b, 1, NA),
event_time = eventTime,
censor_time = censorTime,
oracle_event = 1,
oracle_cure = 0,
case = ifelse(b, "1", "3a"))
} else {
b <- rbinom(1, 1, p2)
out <- data.frame(obs_time = obsTime,
obs_event = ifelse(b, 0, NA),
event_time = eventTime,
censor_time = censorTime,
oracle_event = 0,
oracle_cure = 0,
case = ifelse(b, "2a", "3b"))
}
}
## Bayes classification
## event_den <- dWeibull(out$obs_time, shape, scale * expXbeta)
## censor_den <- dexp(out$obs_time, lambda_censor)
## event_surv <- 1 - pWeibull(out$obs_time, shape, scale * expXbeta)
## censor_surv <- 1 - pexp(out$obs_time, lambda_censor)
## m1 <- event_den * censor_surv * not_cure_p
## m2 <- censor_den * event_surv * not_cure_p
## m3 <- (1 - not_cure_p) * censor_den
## out$bayes_event <- m1 / (m1 + m2 + m3)
## return
return(out)
}
res <- do.call(rbind, lapply(seq_len(nSubject), simu_one))
colnames(survMat) <- paste0("x", seq_len(ncol(survMat)))
colnames(cureMat) <- paste0("z", seq_len(ncol(cureMat)))
cbind(res, survMat, cureMat)
}
### internal function ==========================================================
## similar function with eha::rgompertz, where param == "canonical"
rGompertz <- function(n, shape = 1, scale = 1, ...) {
u <- runif(n)
1 / shape * log1p(- shape * log1p(- u) / (scale * shape))
}
## similar function with stats::rweibull but with different parametrization
## reduces to exponential distribution when shape == 1
rWeibull <- function(n, shape = 1, scale = 1, ...) {
u <- runif(n)
(- log1p(- u) / scale) ^ (1 / shape)
}
hWeibull <- function(x, shape = 1, scale = 1) {
scale * shape * x ^ (shape - 1)
}
HWeibull <- function(x, shape = 1, scale = 1) {
scale * x ^ shape
}
pWeibull <- function(x, shape = 1, scale = 1) {
1 - exp(- scale * x ^ shape)
}
dWeibull <- function(x, shape = 1, scale = 1) {
hWeibull(x, shape, scale) * exp(- scale * x ^ shape)
}
|
rm(list=ls())
library(ggplot2);library(cvTools);library(MASS);library(plyr);library(dplyr); library(ggeffects);library(tseries); library(plm); library(nlme); library(lme4); library(lattice); library(car); library(lmerTest); library(optimx)
load("dataFS/Main/DaTS.RData")
# load("Rcodes/DecemberNew/KEN11d_stepNice.RData")
#oooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooo
tapply(DaTS$SeasPr, DaTS$Year, mean)
aggregate(DaTS$SeasPr,by=list(DaTS$ASAL,DaTS$Year),mean )
aggregate(DaTS$SeasPr,by=list(DaTS$Year),mean )
Tonka<-t(tapply(DaTS$SeasPr, list(DaTS$ASAL,DaTS$Year), mean))
write.csv(Tonka,"~/Rvarious/ddplyN2.csv")
ddplyM<-function(data,groupname, varname) {
require(plyr)
summaryFUNC<-function(x,col) { c(mean=mean(x[[col]],na.rm=TRUE),sd=sd(x[[col]],na.rm=TRUE)) }
ddply_sum<-ddply(data, groupname,.fun=summaryFUNC,varname)
ddply_sum<-rename(ddply_sum,c("mean"=varname))
return(ddply_sum)
}
ddply_Monika<-function(data,varname,groupnames)
{ require(plyr)
summary_func <- function(x,col) { c(mean=mean(x[[col]],na.rm=TRUE ),sd=sd(x[[col]],na.rm=TRUE) ) }
data_sum<-ddply(data, groupnames,.fun=summary_func,varname )
data_sum <- rename(data_sum, c("mean" = varname))
return(data_sum)
}
exodus<-ddplyM(DaTS,"Year","SeasPr")
meanRM<-function(x) mean(x,na.rm=TRUE)
tonka2<-ddply(DaTS, .(Year), summarize,mean=mean(SeasPr,na.rm=TRUE))
#-----------------------------------------------------------------------------------------------------------------------------------------------------
.MySum<-function(x,y,z) {
a= x+y
return (a+z)
}
.MySum(1,2,3)
.MySum2<-function(x,y,.fun) {
require(plyr)
innerF<-function(a,b,.fun) {.fun(a,b)}
return (innerF)
}
.MySum2(1,2,.fun=sum)
myFun1<-function(a,b,funct){
res<-funct(a,b)+3
return(res-1)}
myFun1(2,2,sum)
#kkkkkkkkkkkkkkkkkkkkoooooooooooooooooooooeeeeeeeellllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllll
#vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
funM<-function(.x,.y,.theFunct) {.theFunct(.x,.y)}
myFun1<-function(a,b){
require(plyr)
funct<-function(z,x) {z+x-3}
someRes<-funM(a,b,.theFunct=funct)
return(someRes)}
myFun1(2,3)
# now will try without dplyr
funM2<-function(x,y,theFunct) {theFunct(x,y)}
myFun3<-function(a,b){
funct<-function(z,x) {z+x-3}
someRes<-funM2(a,b,theFunct=funct)
return(someRes)}
myFun3(80,10)
#AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
ddply_Monika<-function(data,varname,groupnames)
{ require(plyr)
summary_func <- function(x,col) { c(mean=mean(x[[col]],na.rm=TRUE ),sd=sd(x[[col]],na.rm=TRUE) ) }
data_sum<-ddply(data, groupnames,.fun=summary_func,varname )
data_sum <- rename(data_sum, c("mean" = varname))
return(data_sum)
}
#-----------------------------------------------------------------------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------------------------------------------------------------------
write.csv(Tonka,"~/Rvarious/ddplyN2.csv")
|
/ddplyNewbie2.R
|
no_license
|
MonikaNovackova/Rvarious
|
R
| false
| false
| 3,672
|
r
|
rm(list=ls())
library(ggplot2);library(cvTools);library(MASS);library(plyr);library(dplyr); library(ggeffects);library(tseries); library(plm); library(nlme); library(lme4); library(lattice); library(car); library(lmerTest); library(optimx)
load("dataFS/Main/DaTS.RData")
# load("Rcodes/DecemberNew/KEN11d_stepNice.RData")
#oooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooo
tapply(DaTS$SeasPr, DaTS$Year, mean)
aggregate(DaTS$SeasPr,by=list(DaTS$ASAL,DaTS$Year),mean )
aggregate(DaTS$SeasPr,by=list(DaTS$Year),mean )
Tonka<-t(tapply(DaTS$SeasPr, list(DaTS$ASAL,DaTS$Year), mean))
write.csv(Tonka,"~/Rvarious/ddplyN2.csv")
ddplyM<-function(data,groupname, varname) {
require(plyr)
summaryFUNC<-function(x,col) { c(mean=mean(x[[col]],na.rm=TRUE),sd=sd(x[[col]],na.rm=TRUE)) }
ddply_sum<-ddply(data, groupname,.fun=summaryFUNC,varname)
ddply_sum<-rename(ddply_sum,c("mean"=varname))
return(ddply_sum)
}
ddply_Monika<-function(data,varname,groupnames)
{ require(plyr)
summary_func <- function(x,col) { c(mean=mean(x[[col]],na.rm=TRUE ),sd=sd(x[[col]],na.rm=TRUE) ) }
data_sum<-ddply(data, groupnames,.fun=summary_func,varname )
data_sum <- rename(data_sum, c("mean" = varname))
return(data_sum)
}
exodus<-ddplyM(DaTS,"Year","SeasPr")
meanRM<-function(x) mean(x,na.rm=TRUE)
tonka2<-ddply(DaTS, .(Year), summarize,mean=mean(SeasPr,na.rm=TRUE))
#-----------------------------------------------------------------------------------------------------------------------------------------------------
.MySum<-function(x,y,z) {
a= x+y
return (a+z)
}
.MySum(1,2,3)
.MySum2<-function(x,y,.fun) {
require(plyr)
innerF<-function(a,b,.fun) {.fun(a,b)}
return (innerF)
}
.MySum2(1,2,.fun=sum)
myFun1<-function(a,b,funct){
res<-funct(a,b)+3
return(res-1)}
myFun1(2,2,sum)
#kkkkkkkkkkkkkkkkkkkkoooooooooooooooooooooeeeeeeeellllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllll
#vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
funM<-function(.x,.y,.theFunct) {.theFunct(.x,.y)}
myFun1<-function(a,b){
require(plyr)
funct<-function(z,x) {z+x-3}
someRes<-funM(a,b,.theFunct=funct)
return(someRes)}
myFun1(2,3)
# now will try without dplyr
funM2<-function(x,y,theFunct) {theFunct(x,y)}
myFun3<-function(a,b){
funct<-function(z,x) {z+x-3}
someRes<-funM2(a,b,theFunct=funct)
return(someRes)}
myFun3(80,10)
#AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
ddply_Monika<-function(data,varname,groupnames)
{ require(plyr)
summary_func <- function(x,col) { c(mean=mean(x[[col]],na.rm=TRUE ),sd=sd(x[[col]],na.rm=TRUE) ) }
data_sum<-ddply(data, groupnames,.fun=summary_func,varname )
data_sum <- rename(data_sum, c("mean" = varname))
return(data_sum)
}
#-----------------------------------------------------------------------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------------------------------------------------------------------
write.csv(Tonka,"~/Rvarious/ddplyN2.csv")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/temp.kalman.R
\name{temp.kalman}
\alias{temp.kalman}
\title{Smooth temperature time series using a Kalman filter/ smoother}
\usage{
temp.kalman(wtr, watts, ampH=1, ...)
}
\arguments{
\item{wtr}{Vector (regular time series) of water temperature in degrees C}
\item{watts}{estimate of watts entering the layer at each time step, from \link{watts.in}}
\item{ampH}{factor by which to artificially amplify the observation error variance, H}
\item{...}{parameters to be passed to \link{optim}}
}
\value{
a smoothed temperature time series
}
\description{
Smoothes a temperature time series uses a Kalman filter/ smoother.
}
\details{
basic model process is \code{x[t] = beta*x[t-1] + c1*watts[t-1]}
}
\references{
Batt, Ryan D. and Stephen R. Carpenter. 2012. \emph{Free-water lake metabolism:
addressing noisy time series with a Kalman filter}. Limnology and
Oceanography: Methods 10: 20-30. doi: 10.4319/lom.2012.10.20
}
\seealso{
\link{watts.in} \link{metab.kalman}
}
\author{
Ryan Batt
}
|
/man/temp.kalman.Rd
|
no_license
|
cran/LakeMetabolizer
|
R
| false
| true
| 1,106
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/temp.kalman.R
\name{temp.kalman}
\alias{temp.kalman}
\title{Smooth temperature time series using a Kalman filter/ smoother}
\usage{
temp.kalman(wtr, watts, ampH=1, ...)
}
\arguments{
\item{wtr}{Vector (regular time series) of water temperature in degrees C}
\item{watts}{estimate of watts entering the layer at each time step, from \link{watts.in}}
\item{ampH}{factor by which to artificially amplify the observation error variance, H}
\item{...}{parameters to be passed to \link{optim}}
}
\value{
a smoothed temperature time series
}
\description{
Smoothes a temperature time series uses a Kalman filter/ smoother.
}
\details{
basic model process is \code{x[t] = beta*x[t-1] + c1*watts[t-1]}
}
\references{
Batt, Ryan D. and Stephen R. Carpenter. 2012. \emph{Free-water lake metabolism:
addressing noisy time series with a Kalman filter}. Limnology and
Oceanography: Methods 10: 20-30. doi: 10.4319/lom.2012.10.20
}
\seealso{
\link{watts.in} \link{metab.kalman}
}
\author{
Ryan Batt
}
|
#' @title XXX
#'
#' @description XXX
#'
#' @export
qtrend.f <- function(year,quarter,start.year=1958){
#Function to calculate 'trend' vector for time-series:
nyear=year-start.year
trend=quarter+nyear*4
trend
}
|
/R/qtrend.r
|
no_license
|
einarhjorleifsson/datrasr
|
R
| false
| false
| 213
|
r
|
#' @title XXX
#'
#' @description XXX
#'
#' @export
qtrend.f <- function(year,quarter,start.year=1958){
#Function to calculate 'trend' vector for time-series:
nyear=year-start.year
trend=quarter+nyear*4
trend
}
|
# 1
library(ggplot2)
z <- ggplot(Eggs, aes(x = Week, y = Cases))
z + geom_point(aes(color = factor(Easter)))
# 2
df <- Eggs[,c(1,6:10)]
g <- ggplot(df, aes(Week))
g <- g + geom_line(aes(y=Egg.Pr), colour="red")
g <- g + geom_line(aes(y=Beef.Pr), colour="green")
g <- g + geom_line(aes(y=Pork.Pr), colour="yellow")
g <- g + geom_line(aes(y=Cereal.Pr), colour="blue")
g <- g + geom_line(aes(y=Chicken.Pr), colour="purple")
g <- g + geom_line(aes(y=Cereal.Pr), colour="grey")
g + labs(y = "Price")
|
/SGH/R/h2/h2.R
|
no_license
|
Valkoiset/myrepo
|
R
| false
| false
| 498
|
r
|
# 1
library(ggplot2)
z <- ggplot(Eggs, aes(x = Week, y = Cases))
z + geom_point(aes(color = factor(Easter)))
# 2
df <- Eggs[,c(1,6:10)]
g <- ggplot(df, aes(Week))
g <- g + geom_line(aes(y=Egg.Pr), colour="red")
g <- g + geom_line(aes(y=Beef.Pr), colour="green")
g <- g + geom_line(aes(y=Pork.Pr), colour="yellow")
g <- g + geom_line(aes(y=Cereal.Pr), colour="blue")
g <- g + geom_line(aes(y=Chicken.Pr), colour="purple")
g <- g + geom_line(aes(y=Cereal.Pr), colour="grey")
g + labs(y = "Price")
|
#!/usr/bin/Rscript
# inverse_methods.R Author "Nathan Wycoff <nathanbrwycoff@gmail.com>" Date 10.03.2017
source("forward_methods.R")
#TODO: Inverse methods may not work due to change of syntax for forward methods
################################################################################
## Functions for inverse
inverse_cost <- function(weights, user_low_d_dist, high_d, k,
n.inits, dist.func) {
#Get the low d projection induced by the weights, and its distance matrix
weights_low_d <- forward_mds(high_d, weights, dist.func,
n.inits = n.inits,
seed = sample(1:1000,1))$par
weights_low_d_dist <- good.dist(weights_low_d, dist.func)
#Compare the two distance matrices
diff <- weights_low_d_dist - user_low_d_dist
stress <- sum(diff^2)
return(stress)
}
###
#@param n.inits the number of intial weight configs
#@param forward.n.inits the number of initial point configs for each forward step
inverse_step <- function(user_low_d, high_d, dist.func, n.inits,
forward.n.inits, seed) {
set.seed(seed)
#Calculate the low d distance matrix
user_low_d_dist <- good.dist(user_low_d, dist.func)
#Get some random
p <- dim(high_d)[2]
init.weights <- lapply(1:n.inits, function(i) rgamma(p, 1, 1))
#Run the solver a couple times
results <- lapply(init.weights, function(init) optim(init, inverse_cost, method = "L-BFGS-B", user_low_d_dist = user_low_d_dist, high_d = high_d, k = k, n.inits = forward.n.inits, dist.func = dist.func, lower = 0))
result <- results[[which.min(sapply(results, function(i) i$value))]]
return(result)
}
#############################################################################
old_inverse_cost <- function(weights, low_d, high_d, dist.func) {
low_d <- matrix(low_d, ncol = 2)
low_d_dist <- good.dist(low_d, dist.func)
#Normalize the distance matrices
high_d_dist <- good.dist(high_d, dist.func, weights)
high_d_dist <- high_d_dist / sum(high_d_dist)
low_d_dist <- low_d_dist / sum(low_d_dist)
diff_mat <- low_d_dist - high_d_dist
#Calculate the stress of all elements
stress <- sum(diff_mat^2)
return(stress)
}
old_inverse <- function(user_low_d, high_d, dist.func) {
weights <- rgamma(ncol(high_d), 1, 1)
weights <- weights / sum(weights)
optim_weights <- optim(weights, old_inverse_cost, lower = 0, method = 'L-BFGS-B', low_d = user_low_d, high_d = high_d, dist.func = dist.func)
return(optim_weights)
}
########################################################## SMACOF methods
smacof_inverse_cost <- function(weights, user_low_d_dist, high_d,
k, n.inits, dist.func) {
#Get the low d projection induced by the weights, and its distance matrix
weights_low_d <- smacof_forward_mds(high_d, weights,
dist.func = dist.func,
n.inits = n.inits)$par
weights_low_d_dist <- good.dist(weights_low_d, dist.func)
#Compare the two distance matrices
diff <- weights_low_d_dist - user_low_d_dist
stress <- sum(diff^2)
return(stress)
}
smacof_inverse_mds <- function(user_low_d, high_d, dist.func, n.inits, forward.n.inits, seed) {
set.seed(seed)
#Calculate the low d distance matrix
user_low_d_dist <- good.dist(user_low_d, dist.func)
#Get some random
p <- dim(high_d)[2]
init.weights <- lapply(1:n.inits, function(i) rgamma(p, 1, 1))
#Run the solver a couple times
results <- lapply(init.weights, function(init)
optim(init,smacof_inverse_cost, method = "L-BFGS-B",
user_low_d_dist = user_low_d_dist, high_d = high_d,
k = k, n.inits = forward.n.inits, dist.func = dist.func,
lower = 0.01))
result <- results[[which.min(sapply(results, function(i) i$value))]]
return(result)
}
|
/dev/inverse_methods.R
|
permissive
|
NathanWycoff/mds.methods
|
R
| false
| false
| 4,003
|
r
|
#!/usr/bin/Rscript
# inverse_methods.R Author "Nathan Wycoff <nathanbrwycoff@gmail.com>" Date 10.03.2017
source("forward_methods.R")
#TODO: Inverse methods may not work due to change of syntax for forward methods
################################################################################
## Functions for inverse
inverse_cost <- function(weights, user_low_d_dist, high_d, k,
n.inits, dist.func) {
#Get the low d projection induced by the weights, and its distance matrix
weights_low_d <- forward_mds(high_d, weights, dist.func,
n.inits = n.inits,
seed = sample(1:1000,1))$par
weights_low_d_dist <- good.dist(weights_low_d, dist.func)
#Compare the two distance matrices
diff <- weights_low_d_dist - user_low_d_dist
stress <- sum(diff^2)
return(stress)
}
###
#@param n.inits the number of intial weight configs
#@param forward.n.inits the number of initial point configs for each forward step
inverse_step <- function(user_low_d, high_d, dist.func, n.inits,
forward.n.inits, seed) {
set.seed(seed)
#Calculate the low d distance matrix
user_low_d_dist <- good.dist(user_low_d, dist.func)
#Get some random
p <- dim(high_d)[2]
init.weights <- lapply(1:n.inits, function(i) rgamma(p, 1, 1))
#Run the solver a couple times
results <- lapply(init.weights, function(init) optim(init, inverse_cost, method = "L-BFGS-B", user_low_d_dist = user_low_d_dist, high_d = high_d, k = k, n.inits = forward.n.inits, dist.func = dist.func, lower = 0))
result <- results[[which.min(sapply(results, function(i) i$value))]]
return(result)
}
#############################################################################
old_inverse_cost <- function(weights, low_d, high_d, dist.func) {
low_d <- matrix(low_d, ncol = 2)
low_d_dist <- good.dist(low_d, dist.func)
#Normalize the distance matrices
high_d_dist <- good.dist(high_d, dist.func, weights)
high_d_dist <- high_d_dist / sum(high_d_dist)
low_d_dist <- low_d_dist / sum(low_d_dist)
diff_mat <- low_d_dist - high_d_dist
#Calculate the stress of all elements
stress <- sum(diff_mat^2)
return(stress)
}
old_inverse <- function(user_low_d, high_d, dist.func) {
weights <- rgamma(ncol(high_d), 1, 1)
weights <- weights / sum(weights)
optim_weights <- optim(weights, old_inverse_cost, lower = 0, method = 'L-BFGS-B', low_d = user_low_d, high_d = high_d, dist.func = dist.func)
return(optim_weights)
}
########################################################## SMACOF methods
smacof_inverse_cost <- function(weights, user_low_d_dist, high_d,
k, n.inits, dist.func) {
#Get the low d projection induced by the weights, and its distance matrix
weights_low_d <- smacof_forward_mds(high_d, weights,
dist.func = dist.func,
n.inits = n.inits)$par
weights_low_d_dist <- good.dist(weights_low_d, dist.func)
#Compare the two distance matrices
diff <- weights_low_d_dist - user_low_d_dist
stress <- sum(diff^2)
return(stress)
}
smacof_inverse_mds <- function(user_low_d, high_d, dist.func, n.inits, forward.n.inits, seed) {
set.seed(seed)
#Calculate the low d distance matrix
user_low_d_dist <- good.dist(user_low_d, dist.func)
#Get some random
p <- dim(high_d)[2]
init.weights <- lapply(1:n.inits, function(i) rgamma(p, 1, 1))
#Run the solver a couple times
results <- lapply(init.weights, function(init)
optim(init,smacof_inverse_cost, method = "L-BFGS-B",
user_low_d_dist = user_low_d_dist, high_d = high_d,
k = k, n.inits = forward.n.inits, dist.func = dist.func,
lower = 0.01))
result <- results[[which.min(sapply(results, function(i) i$value))]]
return(result)
}
|
testlist <- list(A = structure(c(1.38997190095789e-309, 3.81575932257023e-236, 3.81571422914747e-236), .Dim = c(1L, 3L)), B = structure(0, .Dim = c(1L, 1L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result)
|
/multivariance/inst/testfiles/match_rows/AFL_match_rows/match_rows_valgrind_files/1613125918-test.R
|
no_license
|
akhikolla/updatedatatype-list3
|
R
| false
| false
| 226
|
r
|
testlist <- list(A = structure(c(1.38997190095789e-309, 3.81575932257023e-236, 3.81571422914747e-236), .Dim = c(1L, 3L)), B = structure(0, .Dim = c(1L, 1L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result)
|
#####################################
## VARIOUS PACKAGES REQUIRED
#####################################
## Back End
# Project Pursuit Package for Hyperplane SOlution
library(PPCI)
# JPEG package to read in Image
library(jpeg)
# Package to convert Data into Long Format
library(reshape2)
# Various other packages for mathematical calculations etc.
library(akima)
library(grpss)
library(ks)
library(Matrix)
#####################################
## VARIOUS FUNCTIONS
#####################################
## Function to read in jpeg with indices
Read.jpeg <- function(FILE, Image_Name, Url=TRUE){
## Function to download jpeg images from the internet using full html address
## Function requires jpeg package
## Empty list to collect URLs of image/images
Images<-list()
if(Url==TRUE){
## Download image/images from url FILE
for(i in 1:length(FILE)){
z <- tempfile()
download.file(FILE[i], z, mode = 'wb')
Images[[i]] <- jpeg::readJPEG(z)
file.remove(z)
}
}else{
for(i in 1:length(FILE)){
Images[[i]] <- jpeg::readJPEG(FILE[i])
}
}
## Indexing images with x and y coordinates
## Empty set to index images
Indexed <- list()
## indexing image/images
for(i in 1:length(Images)){
Indexed[[i]] <- data.frame( x = rep(1:dim(Images[[i]])[2], each = dim(Images[[i]])[1]),
y = rep(dim(Images[[i]])[1]:1, dim(Images[[i]])[2]),
R = as.vector(Images[[i]][,,1]),
G = as.vector(Images[[i]][,,2]),
B = as.vector(Images[[i]][,,3]))
}
## Output final indexed images with coordinates and R, G, B values measuring intensities of each value [0,1]
names(Indexed) <- Image_Name
Indexed
}
## Contrasting image function, a.k.a. DeCorrelation Stretch
DC.Stretch<-function(input, Up = 1, Low = 0){
## A function to decorrelate and stretch range of input according to upper limit "Up" and lower limit "Low"
## Decorrelating input
DC <- as.matrix(input) %*% eigen(cov(input))$vector
## Stretching input according to Up and Low
Stretch <- function(input, Up=Up, Low=Low){
Max<-max(input)
Min<-min(input)
((Up-Low)/(Max-Min))*(input-Min)+Low
}
## Applying Stretching function to decorrelated input
DCS <- apply(DC, 2, function(x) Stretch(x, Up=Up, Low=Low))
## final decorrelated stretched output from input
DCS
}
## Function to plot images
Plot.img <- function(data = DCS.img, cluster = 0, Title = '', Gamma.region.color = 'black', preprocessed = FALSE, original.data = Original.img){
## Function to plot image data containing x y R G B
## Option to plot image according to mean RGB values of each cluster
## If region around mdh solution is included within cluster assignment then
## Gamma.region.color will be colored according to selection
## Choosing an CEX size according to image size
NR <- nrow(data)
CEX <-ifelse(NR <= 2500, 4.51,
ifelse(NR <= 10000, 2.1,
ifelse( NR <= 40000, 0.85,0.225)))
## Setting theme for pictures
THEME.Pic<-theme_bw() + theme(text=element_text(family='sans'),
panel.border = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
plot.title = element_text(size=12, hjust=0.5),
axis.line = element_blank(),
axis.text = element_blank(),
axis.ticks = element_blank(),
axis.title = element_blank(),
plot.margin = rep(unit(0,"cm"),4),
panel.spacing = unit(0,"null"),
axis.ticks.length = unit(0,"null"))
## Generating SuperPixel color based on cluster
if(cluster[1]!=0){
## To maintain original mean color clusters while using preprocessed images,
## indicate preprocessed==TRUE & include original image data (original.data)
if(preprocessed==TRUE){
Data <- cbind(original.data, cluster)
}
else{
Data <- cbind(data, cluster)
}
RGB.mean <- Data %>% group_by(cluster) %>% dplyr::summarize(R = mean(R), G = mean(G), B =mean(B))
## Mean R G B values to use within plot
if(nrow(RGB.mean) == 1){
levels(RGB.mean$cluster)<-c(1,2,3)
COLOR <- rbind(RGB.mean, RGB.mean, RGB.mean)
COLOR[3,1]<-3
COLOR[2,1]<-2
}
else
COLOR <- data.frame(RGB.mean)
## Plotting image according to cluster mean color
plt<-ggplot(data=data, aes(x=x, y=y, col=ifelse(cluster == 1, 'Cluster 1',
ifelse(cluster == 2, 'Cluster 2', 'Gamma')))) +
geom_point(size=as.numeric(CEX), shape=15) +
scale_color_manual(values=c(rgb(COLOR[1,2:4]), rgb(COLOR[2,2:4]), Gamma.region.color)) +
coord_fixed() +
ggtitle(Title) +
THEME.Pic +
theme(legend.position = '')
}
else{
## Original R G B values to use within plot
COLOR <- data[, c('R', 'G', 'B')]
## Plotting image according to original R G B values
plt<-ggplot(data=data, aes(x=x, y=y)) +
geom_point(size=as.numeric(CEX), shape=15, col=rgb(COLOR[,c('R', 'G', 'B')])) +
scale_color_manual() +
coord_fixed() +
ggtitle(Title) +
THEME.Pic
}
list(Colors=COLOR, Plot=plt)
}
## Function to pull values within gamma of the hyperplane
GammaHype <- function(gamma = 0.1, B = sol.mdh[[1]]$b, V = sol.mdh[[1]]$v, CL = sol.mdh[[1]]$cluster, input = DCS.img, Picture = FALSE){
## Function that takes an input and outputs a subset of pixels related to a gamma region (i.e. half-width interval around mdh hyperplane solution)
## The gamma parameter captures an interval that contains a % of the data around B
if(!Picture){
## Checks to ensure user has correct format
if(length(V) != ncol(input))
stop('Error in input %*% V : non-conformable arguments.\nInput must have columns equal to rows of projection vector V.\n')
## Linear transformation according to MDH solution vector V
Xv <- as.matrix(input) %*% as.matrix(V)
}
if(Picture){
## Checks to ensure user has correct format
CN <- colnames(input)
FN <- sum(CN=='R' | CN=='B' | CN=='G')
if(FN!=3)
stop('Incorrect data input format.\nPlease rename red, green, and blue column names to "R", "G" , and "B".')
## Linear transformation according to MDH solution vector V
Xv <- as.matrix(input[,c('R','G','B')]) %*% as.matrix(V)
}
## Creating half width length around MDH hyperplane solution (B)
hw <- diff(range(Xv))*(gamma/2)
## Collecting index of observations around hyperplane
Index <- which(Xv >= (B-hw) & Xv <= (B+hw))
## Combining current mdh cluster assignment with data
data <- cbind(input, CL)
colnames(data)[(ncol(input)+1)] <- 'cluster'
## Subsetting input via index created according to gamma half width to mdh hyperplane solution
Output <- data[Index,]
## Output half-width, mdh projected solution, index of gamma values, and subset of points within gamma of hyperplane
list(hw=hw, sol.Xv=Xv, Index=Index, Points=Output)
}
#####################################
## SERVER SIDE CODE
#####################################
shinyServer(function(input, output) {
## URLs of images
URLs<-c("https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcT2w5H7Jpr0_DJkpLpCZE80T7aiRzeJtyjhH6wQpPwwUfg8XRbW-w",
"http://i42.photobucket.com/albums/e315/tntoxfox/wall-esmall.jpg",
"https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcQD7G1JmkGeFg63G99upZycjQhq_9VZN9V25Vqx3tK9Loe2MNQgkQ",
"http://static.newworldencyclopedia.org/thumb/6/62/CreoleFood.jpg/200px-CreoleFood.jpg",
"http://wiki.metin2.co.uk/images/thumb/0/06/Ds_attack_round.jpg/200px-Ds_attack_round.jpg")
## Importing images using function from line 5
img<-Read.jpeg(Url=FALSE,
FILE = c("./www/dog.jpg",
"./www/robot.jpg",
"./www/tamarin.jpg",
"./www/dinner.jpg",
"./www/circle.jpg"),
Image_Name = c('Dog', 'Robot', 'Tamarin', 'Dinner','Circle'))
## Theme for plots
THEME<-theme_bw() + theme(text=element_text(family='serif')) +
theme(panel.border=element_blank(), panel.grid.major=element_blank(),
panel.grid.minor=element_blank(), axis.line=element_line(colour='gray'),
plot.title=element_text(face='bold', size=15, hjust=0.5))
datasetInput <- reactive({
switch(input$image,
"Dog" = "Dog",
'Robot' = 'Robot',
'Tamarin'='Tamarin',
'Dinner'='Dinner',
'Circle'='Circle')
})
ContrastInput<-reactive({
DTA <- datasetInput()
pic <- img[[DTA]]
Contrast <- cbind(pic[,1:2], DC.Stretch(pic[,3:5]))
colnames(Contrast) <- c('x', 'y', 'R', 'G', 'B')
list(Contrast=Contrast, pic=pic)
})
DCSInput <- reactive({
input$DCS
})
S.Prop <- reactive({
input$SPROP
})
Indy<-reactive({
Contrast <- ContrastInput()
S.prop <- S.Prop()
Indy <- sample(x=1:nrow(Contrast$Contrast), size=ceiling(nrow(Contrast$Contrast)*S.prop), replace = FALSE)
return(list(Indy=Indy))
})
MethodInput<-reactive({
Contrast <- ContrastInput()
Alpha <- alphainput()
Reassign <- AssInput()
Gamma <- gammainput()
Indy <- Indy()$Indy
BOUND <- 0
DCS <- DCSInput()
if(DCS==TRUE)
Contrast<-Contrast$Contrast
else
Contrast<-Contrast$pic
USE <- as.matrix(Contrast[Indy,c('R','G','B')])
sol.mdh <- mdh(X = USE, alphamin = Alpha, alphamax = Alpha)
V <- sol.mdh[[1]]$v
B <- sol.mdh[[1]]$b
BW <- sol.mdh[[1]]$params$h
oim <- as.matrix(Contrast[,3:5]) %*% V
Clust <- ifelse(oim > B, 1, 2)
rownames(Clust) <- seq(1:nrow(Clust))
O.lab <- Clust
## Gamma Region
Gamma.region <- GammaHype(gamma=Gamma, input = Contrast, B = B, V = V, CL = Clust, Picture = TRUE)
## Gamma Region RGB values
Gam.points <- Gamma.region$Points
G.index <- Gamma.region$Index
if(Reassign==3){
Heuristic <- kdde(x=Contrast[Indy,3:5], H=diag(rep(BW,3)), eval.points=Gam.points[,3:5], deriv.order = 1)
Uni.side<- Heuristic$estimate %*% V
NewLabel <- ifelse(Uni.side > B, 1, 2)
O.lab[Gamma.region$Index]<-NewLabel
}else
O.lab[Gamma.region$Index]<-Reassign
return(list(V=V, B=B, Clust=Clust, BW=BW, sol=sol.mdh, Gamma.region=Gamma.region, G.index=G.index, Gamma.Labels=O.lab))
})
Meltinput<-reactive({
Contrast <- ContrastInput()
DCS <- DCSInput()
if(DCS==TRUE)
Contrast <- Contrast$Contrast
else
Contrast <- Contrast$pic
melt(Contrast[,3:5])
})
getPage <- function(X){
return(includeHTML(X))}
DensityInput<-reactive({
Contrast <- ContrastInput()
DCS <- DCSInput()
if(DCS==TRUE)
Contrast<-Contrast$Contrast
else
Contrast<-Contrast$pic
Clust <- MethodInput()
B <- Clust$B
V <- Clust$V
Rezone <- as.matrix(Contrast[,c('R','G','B')]) %*% V
dens <- density(Rezone, n=nrow(Contrast))
DEN <- as.data.frame(cbind(x=dens$x, y=dens$y))
return(list(Rezone=Rezone, DEN=DEN, B=B))
})
cexInput<-reactive({
Contrast <- ContrastInput()
DCS <- DCSInput()
if(DCS==TRUE)
Contrast <- Contrast$Contrast
else
Contrast <- Contrast$pic
NR <- nrow(Contrast)
CEX <- ifelse(NR <= 2500, 4.51,
ifelse(NR <= 10000, 2.1,
ifelse( NR <= 40000, 0.85,0.225)))
return(CEX)
})
DimInput<-reactive({
Contrast <- ContrastInput()
DCS <- DCSInput()
if(DCS==TRUE)
Contrast <- Contrast$Contrast
else
Contrast <- Contrast$pic
fhat <- kde(Contrast[,3:5])
return(fhat)
})
inputDD<-reactive({
input$DD})
gammainput<-reactive({
input$GAMMA
})
alphainput<-reactive({
input$ALPHA
})
AssInput <- reactive({input$ASS})
## Output image according to ImgPlot
output$ImgPlot <- renderPlot({
## Collecting Reactives
Contrast <- ContrastInput()
O.pic <- Contrast$pic
Contrast <- Contrast$Contrast
Outsidein <- MethodInput()
G.labs <- Outsidein$Gamma.Labels
G.points <- Outsidein$Gamma.region
CL <- Outsidein$Clust
DCS <- DCSInput()
CEX <- cexInput()
## Updating Cluster List (CL) to account for 3rd cluster, Gamma cluster
CL.gamma <- CL
CL.gamma[G.points$Index] <- 3
## Illustrate DCS image or not
if(DCS==TRUE){
p1 <- Plot.img(data = Contrast, Title = 'Decorrelated & Stretched Image')
p2 <- Plot.img(data = Contrast, cluster = CL, Title = 'MDH Solution',
preprocessed = FALSE, original.data = O.pic)
p3 <- Plot.img(data = Contrast, preprocessed = FALSE, Title = '\u0393 Region',
cluster = CL.gamma, Gamma.region.color = 'cyan')
p4 <- Plot.img(data = Contrast, cluster = G.labs, Title = 'Adjusted Solution',
preprocessed = FALSE, original.data = O.pic)
}else{
p1 <- Plot.img(data = O.pic, Title = 'Original Image')
p2 <- Plot.img(data = Contrast, cluster = CL, Title = 'MDH Solution',
preprocessed = TRUE, original.data = O.pic)
p3 <- Plot.img(data = O.pic, cluster = CL.gamma, Title = '\u0393 Region', Gamma.region.color = 'cyan')
p4 <- Plot.img(data = Contrast, cluster = G.labs, Title = 'Adjusted Solution',
preprocessed = TRUE, original.data = O.pic)
}
grid.arrange(p1$Plot, p2$Plot, p3$Plot, p4$Plot, ncol=2, nrow=2)
})
output$DenPlot <- renderPlot({
## Collecting Reactives of image and cluster by method
Contrast <- ContrastInput()
O.pic <- Contrast$pic
Contrast <- Contrast$Contrast
G.dex <- gammainput()
Clust <- MethodInput()
Sol <- Clust$sol
Indy <- Indy()
USE <- Indy$Indy
B <- Clust$B
V <- Clust$V
Clust <-Clust$Clust
## Using reactive to melt RGB for density plots
RGB <- Meltinput()
## Using reactive to select Rezoned values and density
DEN<-DensityInput()
Rezone <- DEN$Rezone
DEN<-DEN$DEN
dens <- density(Rezone, n=nrow(Contrast))
DEN <- as.data.frame(cbind(x=dens$x, y=dens$y))
p2<-ggplot(data=RGB, aes(x=value, fill=variable, color=variable)) +
geom_density(alpha=0.35) +
labs(x="Color Intensity", y = "Density", title="Initial RGB Density") +
THEME +
theme(legend.position = c(0.99,0.8),
legend.title = element_text(colour = 'white'),
legend.spacing.y = unit(4,'cm'))
## Illustrate DCS colours or original in density plot
DCS<-DCSInput()
if(DCS==TRUE){
Colors <- Plot.img(data = Contrast, cluster = Clust, Title = 'Clustered Image',
preprocessed = FALSE, original.data = O.pic)$Colors
DATA<-as.matrix(Contrast[,3:5])
}
if(DCS==FALSE){
Colors <- Plot.img(data = Contrast, cluster = Clust, Title = 'Clustered Image',
preprocessed = TRUE,
original.data = O.pic)$Colors
DATA<-as.matrix(O.pic[,3:5])
}
## Gamma region colour
G.low<-B-(G.dex/2)
G.hig<-B+(G.dex/2)
ecdf_fun <- function(x,perc) ecdf(x)(perc)
Lower<-ecdf_fun(DEN$x, G.low)
Upper<-ecdf_fun(DEN$x, G.hig)
p4<-ggplot(data=DEN, aes(x, y)) +
geom_ribbon(data=subset(DEN, x <= quantile(DEN$x,Lower)[1]),
aes(x=x, ymax=y), ymin=0, fill=rgb(Colors[2,2:4]), alpha=0.4)+
geom_ribbon(data=subset(DEN, x > quantile(DEN$x,Lower)[1] & x <= quantile(DEN$x,Upper)[1]),
aes(x=x, ymax=y), ymin=0, fill=rgb(0, 1, 1), alpha=0.4)+
geom_ribbon(data=subset(DEN, x > quantile(DEN$x,Upper)[1]),
aes(x=x, ymax=y), ymin=0, fill=rgb(Colors[1,2:4]), alpha=0.4) +
geom_vline(xintercept = B, linetype='dashed') +
ggtitle("Minimum Density Hyperplane") +
labs(x="Optimized values for separation", y = "") +
THEME +
scale_x_continuous(breaks = round(seq(min(DEN$x), max(DEN$x), by = 0.2),1))
plot.new()
grid.newpage()
pushViewport(viewport(layout = grid.layout(2, 2)))
#Draw ggplot
pushViewport(viewport(layout.pos.row = 1, layout.pos.col = 2))
print(p4, newpage = FALSE)
popViewport()
#Draw ggplot
pushViewport(viewport(layout.pos.row = 1, layout.pos.col = 1))
print(p2, newpage = FALSE)
popViewport()
#Draw base plot
pushViewport(viewport(layout.pos.row = 2, layout.pos.col = c(1:2)))
par(fig = gridFIG(), new = TRUE)
hp_plot(Sol, DATA)
popViewport()
#grid.arrange(p2, p4, ncol=2)
})
output$ManPlotDensity<- renderPlot({
## Manual Adjustments
Contrast <- ContrastInput()
O.pic <- Contrast$pic
Contrast <- as.data.frame(Contrast$Contrast)
DEN <- DensityInput()
Rezone <- DEN$Rezone
DEN <- DEN$DEN
MD <- input$DD
CEX <- cexInput()
Contrast$MCG <- ifelse(Rezone <= MD, 'red', 'black')
p5 <- ggplot(data=Contrast, aes(x=x, y=y, col=MCG)) +
geom_point(shape=15)+
scale_color_identity() +
coord_fixed() +
ggtitle("Manual Clustered Image") +
THEME +
theme(axis.line=element_blank(),
axis.text.x=element_blank(),
axis.text.y=element_blank(),
axis.ticks=element_blank(),
axis.title.x=element_blank(),
axis.title.y=element_blank())
p6 <- ggplot(data=DEN, aes(x, y)) +
geom_ribbon(data=subset(DEN, x <= MD), aes(x=x, ymax=y), ymin=0, fill='red', alpha=0.4)+
geom_ribbon(data=subset(DEN, x > MD), aes(x=x, ymax=y), ymin=0, fill='black', alpha=0.4) +
geom_vline(xintercept = MD, linetype='dashed') +
labs(x="Solution values", y = "Density", title="Kernel Density of RGB Values") +
THEME +
scale_x_continuous(breaks = round(seq(min(DEN$x), max(DEN$x), by = 0.2),1))
grid.arrange(p5, p6, nrow = 1)
})
output$ManPlotImage<- renderPlot({
## Manual Adjustments
Contrast<-ContrastInput()
O.pic <- Contrast$pic
Contrast<-Contrast$Contrast
DEN<-DensityInput()
Rezone<-DEN$Rezone
DEN<- DEN$DEN
MD<-input$DD
CEX<-cexInput()
Contrast$MCG <- ifelse(Rezone <= MD, 'red', 'black')
p5 <-ggplot(data=Contrast, aes(x=x, y=y, col=MCG)) +
geom_point(size=as.numeric(CEX), shape=15)+
scale_color_identity() +
coord_fixed() +
ggtitle("Manual Clustered Image") +
THEME +
theme(axis.line=element_blank(),
axis.text.x=element_blank(),
axis.text.y=element_blank(),
axis.ticks=element_blank(),
axis.title.x=element_blank(),
axis.title.y=element_blank())
print(p5)
})
output$plot<-renderPlotly({
## Reactive elements
Contrast <- ContrastInput()
USE <- Indy()
USE <- USE$Indy
DCS <- DCSInput()
if(DCS==TRUE)
Contrast <- Contrast$Contrast[USE,]
else
Contrast <- Contrast$pic[USE,]
ax <- list(title = "", zeroline = FALSE, showline = FALSE, showticklabels = FALSE, showgrid = FALSE)
plot_ly(Contrast, x = ~R, y = ~G, z = ~B,
hoverinfo="none",
marker = list(size = 3,
color = rgb(Contrast[,3:5]))) %>%
add_markers()%>%
layout(xaxis = ax, yaxis=ax, showlegend = FALSE, plot_bgcolor = '#222d32',
paper_bgcolor = '#222d32')
})
output$GAMplot<-renderPlotly({
GWIDTH <- gammainput()
Contrast <- ContrastInput()
DCS <- DCSInput()
Outsidein<- MethodInput()
G.index <- Outsidein$G.index
B <- Outsidein$B
V <- Outsidein$V
BW <- Outsidein$BW
CL <- Outsidein$Clust
Indy <- Indy()
USE <- Indy$Indy
if(DCS==TRUE)
Contrast <- Contrast$Contrast[USE,]
else
Contrast <- Contrast$pic[USE,]
## Updating Cluster List (CL) to account for 3rd cluster, Gamma cluster
CL.gamma <- CL
# CL.gamma[Gamma.region$Index] <- 3
CL.gamma[G.index] <- 3
p3 <- Plot.img(data = Contrast, preprocessed = FALSE, Title = '\u0393 Region',
cluster = CL[USE])$Color
ax <- list(title = "", zeroline = FALSE, showline = FALSE, showticklabels = FALSE, showgrid = FALSE)
COLORS<-c(rgb(p3[1,2:4], maxColorValue = 1),
rgb(p3[2,2:4], maxColorValue = 1), rgb(red=0, blue = 1, green = 1, maxColorValue = 1))
Contrast<-cbind(Contrast, CL.gamma[USE])
Contrast<-as.data.frame(Contrast)
Contrast$CL.gamma <- ifelse(Contrast$CL.gamma == 1, COLORS[1],
ifelse(Contrast$CL.gamma == 2, COLORS[2], COLORS[3]))
plot_ly(Contrast, x = ~R, y = ~G, z = ~B, type="scatter3d", hoverinfo="none",
marker = list(size=3,
color = ~CL.gamma,
colors= c(COLORS[1], COLORS[2], COLORS[3]))) %>%
layout(xaxis = ax, yaxis=ax, showlegend = FALSE, plot_bgcolor = '#222d32',
paper_bgcolor = '#222d32')
})
output$Finalplot<-renderPlotly({
Gamma.lab <- MethodInput()
Indy <- Indy()
USE <- Indy$Indy
CL <- Gamma.lab$Clust
G.index <- Gamma.lab$G.index
Gamma.lab <- Gamma.lab$Gamma.Labels
Contrast <- ContrastInput()
DCS <- DCSInput()
if(DCS==TRUE)
Contrast <- Contrast$Contrast[USE,]
else
Contrast <- Contrast$pic[USE,]
p3 <- Plot.img(data = Contrast, preprocessed = FALSE, Title = '\u0393 Region',
cluster = Gamma.lab[USE])$Color
ax <- list(title = "", zeroline = FALSE, showline = FALSE, showticklabels = FALSE, showgrid = FALSE)
COLORS<-c(rgb(p3[1,2:4], maxColorValue = 1), rgb(p3[2,2:4], maxColorValue = 1), rgb(red=0, blue = 1, green = 1, maxColorValue = 1))
Contrast<-cbind(Contrast, Gamma.lab[USE])
Contrast<-as.data.frame(Contrast)
Contrast$CL.gamma <- ifelse(Contrast$Gamma.lab == 1, COLORS[1],
ifelse(Contrast$Gamma.lab == 2, COLORS[2], COLORS[3]))
plot_ly(Contrast, x = ~R, y = ~G, z = ~B, type="scatter3d", hoverinfo="none",
marker = list(size=3, color=~CL.gamma, colors=c(COLORS[1], COLORS[2]))) %>%
layout(xaxis=ax, yaxis=ax, showlegend=FALSE, plot_bgcolor='#222d32', paper_bgcolor='#222d32')
})
output$RESMDH <- renderUI({getPage("https://cran.r-project.org/web/packages/PPCI/index.html")})
})
|
/server.R
|
no_license
|
JacobBradleyKenyon/hyper-planes-in-images
|
R
| false
| false
| 24,876
|
r
|
#####################################
## VARIOUS PACKAGES REQUIRED
#####################################
## Back End
# Project Pursuit Package for Hyperplane SOlution
library(PPCI)
# JPEG package to read in Image
library(jpeg)
# Package to convert Data into Long Format
library(reshape2)
# Various other packages for mathematical calculations etc.
library(akima)
library(grpss)
library(ks)
library(Matrix)
#####################################
## VARIOUS FUNCTIONS
#####################################
## Function to read in jpeg with indices
Read.jpeg <- function(FILE, Image_Name, Url=TRUE){
## Function to download jpeg images from the internet using full html address
## Function requires jpeg package
## Empty list to collect URLs of image/images
Images<-list()
if(Url==TRUE){
## Download image/images from url FILE
for(i in 1:length(FILE)){
z <- tempfile()
download.file(FILE[i], z, mode = 'wb')
Images[[i]] <- jpeg::readJPEG(z)
file.remove(z)
}
}else{
for(i in 1:length(FILE)){
Images[[i]] <- jpeg::readJPEG(FILE[i])
}
}
## Indexing images with x and y coordinates
## Empty set to index images
Indexed <- list()
## indexing image/images
for(i in 1:length(Images)){
Indexed[[i]] <- data.frame( x = rep(1:dim(Images[[i]])[2], each = dim(Images[[i]])[1]),
y = rep(dim(Images[[i]])[1]:1, dim(Images[[i]])[2]),
R = as.vector(Images[[i]][,,1]),
G = as.vector(Images[[i]][,,2]),
B = as.vector(Images[[i]][,,3]))
}
## Output final indexed images with coordinates and R, G, B values measuring intensities of each value [0,1]
names(Indexed) <- Image_Name
Indexed
}
## Contrasting image function, a.k.a. DeCorrelation Stretch
DC.Stretch<-function(input, Up = 1, Low = 0){
## A function to decorrelate and stretch range of input according to upper limit "Up" and lower limit "Low"
## Decorrelating input
DC <- as.matrix(input) %*% eigen(cov(input))$vector
## Stretching input according to Up and Low
Stretch <- function(input, Up=Up, Low=Low){
Max<-max(input)
Min<-min(input)
((Up-Low)/(Max-Min))*(input-Min)+Low
}
## Applying Stretching function to decorrelated input
DCS <- apply(DC, 2, function(x) Stretch(x, Up=Up, Low=Low))
## final decorrelated stretched output from input
DCS
}
## Function to plot images
Plot.img <- function(data = DCS.img, cluster = 0, Title = '', Gamma.region.color = 'black', preprocessed = FALSE, original.data = Original.img){
## Function to plot image data containing x y R G B
## Option to plot image according to mean RGB values of each cluster
## If region around mdh solution is included within cluster assignment then
## Gamma.region.color will be colored according to selection
## Choosing an CEX size according to image size
NR <- nrow(data)
CEX <-ifelse(NR <= 2500, 4.51,
ifelse(NR <= 10000, 2.1,
ifelse( NR <= 40000, 0.85,0.225)))
## Setting theme for pictures
THEME.Pic<-theme_bw() + theme(text=element_text(family='sans'),
panel.border = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
plot.title = element_text(size=12, hjust=0.5),
axis.line = element_blank(),
axis.text = element_blank(),
axis.ticks = element_blank(),
axis.title = element_blank(),
plot.margin = rep(unit(0,"cm"),4),
panel.spacing = unit(0,"null"),
axis.ticks.length = unit(0,"null"))
## Generating SuperPixel color based on cluster
if(cluster[1]!=0){
## To maintain original mean color clusters while using preprocessed images,
## indicate preprocessed==TRUE & include original image data (original.data)
if(preprocessed==TRUE){
Data <- cbind(original.data, cluster)
}
else{
Data <- cbind(data, cluster)
}
RGB.mean <- Data %>% group_by(cluster) %>% dplyr::summarize(R = mean(R), G = mean(G), B =mean(B))
## Mean R G B values to use within plot
if(nrow(RGB.mean) == 1){
levels(RGB.mean$cluster)<-c(1,2,3)
COLOR <- rbind(RGB.mean, RGB.mean, RGB.mean)
COLOR[3,1]<-3
COLOR[2,1]<-2
}
else
COLOR <- data.frame(RGB.mean)
## Plotting image according to cluster mean color
plt<-ggplot(data=data, aes(x=x, y=y, col=ifelse(cluster == 1, 'Cluster 1',
ifelse(cluster == 2, 'Cluster 2', 'Gamma')))) +
geom_point(size=as.numeric(CEX), shape=15) +
scale_color_manual(values=c(rgb(COLOR[1,2:4]), rgb(COLOR[2,2:4]), Gamma.region.color)) +
coord_fixed() +
ggtitle(Title) +
THEME.Pic +
theme(legend.position = '')
}
else{
## Original R G B values to use within plot
COLOR <- data[, c('R', 'G', 'B')]
## Plotting image according to original R G B values
plt<-ggplot(data=data, aes(x=x, y=y)) +
geom_point(size=as.numeric(CEX), shape=15, col=rgb(COLOR[,c('R', 'G', 'B')])) +
scale_color_manual() +
coord_fixed() +
ggtitle(Title) +
THEME.Pic
}
list(Colors=COLOR, Plot=plt)
}
## Function to pull values within gamma of the hyperplane
GammaHype <- function(gamma = 0.1, B = sol.mdh[[1]]$b, V = sol.mdh[[1]]$v, CL = sol.mdh[[1]]$cluster, input = DCS.img, Picture = FALSE){
## Function that takes an input and outputs a subset of pixels related to a gamma region (i.e. half-width interval around mdh hyperplane solution)
## The gamma parameter captures an interval that contains a % of the data around B
if(!Picture){
## Checks to ensure user has correct format
if(length(V) != ncol(input))
stop('Error in input %*% V : non-conformable arguments.\nInput must have columns equal to rows of projection vector V.\n')
## Linear transformation according to MDH solution vector V
Xv <- as.matrix(input) %*% as.matrix(V)
}
if(Picture){
## Checks to ensure user has correct format
CN <- colnames(input)
FN <- sum(CN=='R' | CN=='B' | CN=='G')
if(FN!=3)
stop('Incorrect data input format.\nPlease rename red, green, and blue column names to "R", "G" , and "B".')
## Linear transformation according to MDH solution vector V
Xv <- as.matrix(input[,c('R','G','B')]) %*% as.matrix(V)
}
## Creating half width length around MDH hyperplane solution (B)
hw <- diff(range(Xv))*(gamma/2)
## Collecting index of observations around hyperplane
Index <- which(Xv >= (B-hw) & Xv <= (B+hw))
## Combining current mdh cluster assignment with data
data <- cbind(input, CL)
colnames(data)[(ncol(input)+1)] <- 'cluster'
## Subsetting input via index created according to gamma half width to mdh hyperplane solution
Output <- data[Index,]
## Output half-width, mdh projected solution, index of gamma values, and subset of points within gamma of hyperplane
list(hw=hw, sol.Xv=Xv, Index=Index, Points=Output)
}
#####################################
## SERVER SIDE CODE
#####################################
shinyServer(function(input, output) {
## URLs of images
URLs<-c("https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcT2w5H7Jpr0_DJkpLpCZE80T7aiRzeJtyjhH6wQpPwwUfg8XRbW-w",
"http://i42.photobucket.com/albums/e315/tntoxfox/wall-esmall.jpg",
"https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcQD7G1JmkGeFg63G99upZycjQhq_9VZN9V25Vqx3tK9Loe2MNQgkQ",
"http://static.newworldencyclopedia.org/thumb/6/62/CreoleFood.jpg/200px-CreoleFood.jpg",
"http://wiki.metin2.co.uk/images/thumb/0/06/Ds_attack_round.jpg/200px-Ds_attack_round.jpg")
## Importing images using function from line 5
img<-Read.jpeg(Url=FALSE,
FILE = c("./www/dog.jpg",
"./www/robot.jpg",
"./www/tamarin.jpg",
"./www/dinner.jpg",
"./www/circle.jpg"),
Image_Name = c('Dog', 'Robot', 'Tamarin', 'Dinner','Circle'))
## Theme for plots
THEME<-theme_bw() + theme(text=element_text(family='serif')) +
theme(panel.border=element_blank(), panel.grid.major=element_blank(),
panel.grid.minor=element_blank(), axis.line=element_line(colour='gray'),
plot.title=element_text(face='bold', size=15, hjust=0.5))
datasetInput <- reactive({
switch(input$image,
"Dog" = "Dog",
'Robot' = 'Robot',
'Tamarin'='Tamarin',
'Dinner'='Dinner',
'Circle'='Circle')
})
ContrastInput<-reactive({
DTA <- datasetInput()
pic <- img[[DTA]]
Contrast <- cbind(pic[,1:2], DC.Stretch(pic[,3:5]))
colnames(Contrast) <- c('x', 'y', 'R', 'G', 'B')
list(Contrast=Contrast, pic=pic)
})
DCSInput <- reactive({
input$DCS
})
S.Prop <- reactive({
input$SPROP
})
Indy<-reactive({
Contrast <- ContrastInput()
S.prop <- S.Prop()
Indy <- sample(x=1:nrow(Contrast$Contrast), size=ceiling(nrow(Contrast$Contrast)*S.prop), replace = FALSE)
return(list(Indy=Indy))
})
MethodInput<-reactive({
Contrast <- ContrastInput()
Alpha <- alphainput()
Reassign <- AssInput()
Gamma <- gammainput()
Indy <- Indy()$Indy
BOUND <- 0
DCS <- DCSInput()
if(DCS==TRUE)
Contrast<-Contrast$Contrast
else
Contrast<-Contrast$pic
USE <- as.matrix(Contrast[Indy,c('R','G','B')])
sol.mdh <- mdh(X = USE, alphamin = Alpha, alphamax = Alpha)
V <- sol.mdh[[1]]$v
B <- sol.mdh[[1]]$b
BW <- sol.mdh[[1]]$params$h
oim <- as.matrix(Contrast[,3:5]) %*% V
Clust <- ifelse(oim > B, 1, 2)
rownames(Clust) <- seq(1:nrow(Clust))
O.lab <- Clust
## Gamma Region
Gamma.region <- GammaHype(gamma=Gamma, input = Contrast, B = B, V = V, CL = Clust, Picture = TRUE)
## Gamma Region RGB values
Gam.points <- Gamma.region$Points
G.index <- Gamma.region$Index
if(Reassign==3){
Heuristic <- kdde(x=Contrast[Indy,3:5], H=diag(rep(BW,3)), eval.points=Gam.points[,3:5], deriv.order = 1)
Uni.side<- Heuristic$estimate %*% V
NewLabel <- ifelse(Uni.side > B, 1, 2)
O.lab[Gamma.region$Index]<-NewLabel
}else
O.lab[Gamma.region$Index]<-Reassign
return(list(V=V, B=B, Clust=Clust, BW=BW, sol=sol.mdh, Gamma.region=Gamma.region, G.index=G.index, Gamma.Labels=O.lab))
})
Meltinput<-reactive({
Contrast <- ContrastInput()
DCS <- DCSInput()
if(DCS==TRUE)
Contrast <- Contrast$Contrast
else
Contrast <- Contrast$pic
melt(Contrast[,3:5])
})
getPage <- function(X){
return(includeHTML(X))}
DensityInput<-reactive({
Contrast <- ContrastInput()
DCS <- DCSInput()
if(DCS==TRUE)
Contrast<-Contrast$Contrast
else
Contrast<-Contrast$pic
Clust <- MethodInput()
B <- Clust$B
V <- Clust$V
Rezone <- as.matrix(Contrast[,c('R','G','B')]) %*% V
dens <- density(Rezone, n=nrow(Contrast))
DEN <- as.data.frame(cbind(x=dens$x, y=dens$y))
return(list(Rezone=Rezone, DEN=DEN, B=B))
})
cexInput<-reactive({
Contrast <- ContrastInput()
DCS <- DCSInput()
if(DCS==TRUE)
Contrast <- Contrast$Contrast
else
Contrast <- Contrast$pic
NR <- nrow(Contrast)
CEX <- ifelse(NR <= 2500, 4.51,
ifelse(NR <= 10000, 2.1,
ifelse( NR <= 40000, 0.85,0.225)))
return(CEX)
})
DimInput<-reactive({
Contrast <- ContrastInput()
DCS <- DCSInput()
if(DCS==TRUE)
Contrast <- Contrast$Contrast
else
Contrast <- Contrast$pic
fhat <- kde(Contrast[,3:5])
return(fhat)
})
inputDD<-reactive({
input$DD})
gammainput<-reactive({
input$GAMMA
})
alphainput<-reactive({
input$ALPHA
})
AssInput <- reactive({input$ASS})
## Output image according to ImgPlot
output$ImgPlot <- renderPlot({
## Collecting Reactives
Contrast <- ContrastInput()
O.pic <- Contrast$pic
Contrast <- Contrast$Contrast
Outsidein <- MethodInput()
G.labs <- Outsidein$Gamma.Labels
G.points <- Outsidein$Gamma.region
CL <- Outsidein$Clust
DCS <- DCSInput()
CEX <- cexInput()
## Updating Cluster List (CL) to account for 3rd cluster, Gamma cluster
CL.gamma <- CL
CL.gamma[G.points$Index] <- 3
## Illustrate DCS image or not
if(DCS==TRUE){
p1 <- Plot.img(data = Contrast, Title = 'Decorrelated & Stretched Image')
p2 <- Plot.img(data = Contrast, cluster = CL, Title = 'MDH Solution',
preprocessed = FALSE, original.data = O.pic)
p3 <- Plot.img(data = Contrast, preprocessed = FALSE, Title = '\u0393 Region',
cluster = CL.gamma, Gamma.region.color = 'cyan')
p4 <- Plot.img(data = Contrast, cluster = G.labs, Title = 'Adjusted Solution',
preprocessed = FALSE, original.data = O.pic)
}else{
p1 <- Plot.img(data = O.pic, Title = 'Original Image')
p2 <- Plot.img(data = Contrast, cluster = CL, Title = 'MDH Solution',
preprocessed = TRUE, original.data = O.pic)
p3 <- Plot.img(data = O.pic, cluster = CL.gamma, Title = '\u0393 Region', Gamma.region.color = 'cyan')
p4 <- Plot.img(data = Contrast, cluster = G.labs, Title = 'Adjusted Solution',
preprocessed = TRUE, original.data = O.pic)
}
grid.arrange(p1$Plot, p2$Plot, p3$Plot, p4$Plot, ncol=2, nrow=2)
})
output$DenPlot <- renderPlot({
## Collecting Reactives of image and cluster by method
Contrast <- ContrastInput()
O.pic <- Contrast$pic
Contrast <- Contrast$Contrast
G.dex <- gammainput()
Clust <- MethodInput()
Sol <- Clust$sol
Indy <- Indy()
USE <- Indy$Indy
B <- Clust$B
V <- Clust$V
Clust <-Clust$Clust
## Using reactive to melt RGB for density plots
RGB <- Meltinput()
## Using reactive to select Rezoned values and density
DEN<-DensityInput()
Rezone <- DEN$Rezone
DEN<-DEN$DEN
dens <- density(Rezone, n=nrow(Contrast))
DEN <- as.data.frame(cbind(x=dens$x, y=dens$y))
p2<-ggplot(data=RGB, aes(x=value, fill=variable, color=variable)) +
geom_density(alpha=0.35) +
labs(x="Color Intensity", y = "Density", title="Initial RGB Density") +
THEME +
theme(legend.position = c(0.99,0.8),
legend.title = element_text(colour = 'white'),
legend.spacing.y = unit(4,'cm'))
## Illustrate DCS colours or original in density plot
DCS<-DCSInput()
if(DCS==TRUE){
Colors <- Plot.img(data = Contrast, cluster = Clust, Title = 'Clustered Image',
preprocessed = FALSE, original.data = O.pic)$Colors
DATA<-as.matrix(Contrast[,3:5])
}
if(DCS==FALSE){
Colors <- Plot.img(data = Contrast, cluster = Clust, Title = 'Clustered Image',
preprocessed = TRUE,
original.data = O.pic)$Colors
DATA<-as.matrix(O.pic[,3:5])
}
## Gamma region colour
G.low<-B-(G.dex/2)
G.hig<-B+(G.dex/2)
ecdf_fun <- function(x,perc) ecdf(x)(perc)
Lower<-ecdf_fun(DEN$x, G.low)
Upper<-ecdf_fun(DEN$x, G.hig)
p4<-ggplot(data=DEN, aes(x, y)) +
geom_ribbon(data=subset(DEN, x <= quantile(DEN$x,Lower)[1]),
aes(x=x, ymax=y), ymin=0, fill=rgb(Colors[2,2:4]), alpha=0.4)+
geom_ribbon(data=subset(DEN, x > quantile(DEN$x,Lower)[1] & x <= quantile(DEN$x,Upper)[1]),
aes(x=x, ymax=y), ymin=0, fill=rgb(0, 1, 1), alpha=0.4)+
geom_ribbon(data=subset(DEN, x > quantile(DEN$x,Upper)[1]),
aes(x=x, ymax=y), ymin=0, fill=rgb(Colors[1,2:4]), alpha=0.4) +
geom_vline(xintercept = B, linetype='dashed') +
ggtitle("Minimum Density Hyperplane") +
labs(x="Optimized values for separation", y = "") +
THEME +
scale_x_continuous(breaks = round(seq(min(DEN$x), max(DEN$x), by = 0.2),1))
plot.new()
grid.newpage()
pushViewport(viewport(layout = grid.layout(2, 2)))
#Draw ggplot
pushViewport(viewport(layout.pos.row = 1, layout.pos.col = 2))
print(p4, newpage = FALSE)
popViewport()
#Draw ggplot
pushViewport(viewport(layout.pos.row = 1, layout.pos.col = 1))
print(p2, newpage = FALSE)
popViewport()
#Draw base plot
pushViewport(viewport(layout.pos.row = 2, layout.pos.col = c(1:2)))
par(fig = gridFIG(), new = TRUE)
hp_plot(Sol, DATA)
popViewport()
#grid.arrange(p2, p4, ncol=2)
})
output$ManPlotDensity<- renderPlot({
## Manual Adjustments
Contrast <- ContrastInput()
O.pic <- Contrast$pic
Contrast <- as.data.frame(Contrast$Contrast)
DEN <- DensityInput()
Rezone <- DEN$Rezone
DEN <- DEN$DEN
MD <- input$DD
CEX <- cexInput()
Contrast$MCG <- ifelse(Rezone <= MD, 'red', 'black')
p5 <- ggplot(data=Contrast, aes(x=x, y=y, col=MCG)) +
geom_point(shape=15)+
scale_color_identity() +
coord_fixed() +
ggtitle("Manual Clustered Image") +
THEME +
theme(axis.line=element_blank(),
axis.text.x=element_blank(),
axis.text.y=element_blank(),
axis.ticks=element_blank(),
axis.title.x=element_blank(),
axis.title.y=element_blank())
p6 <- ggplot(data=DEN, aes(x, y)) +
geom_ribbon(data=subset(DEN, x <= MD), aes(x=x, ymax=y), ymin=0, fill='red', alpha=0.4)+
geom_ribbon(data=subset(DEN, x > MD), aes(x=x, ymax=y), ymin=0, fill='black', alpha=0.4) +
geom_vline(xintercept = MD, linetype='dashed') +
labs(x="Solution values", y = "Density", title="Kernel Density of RGB Values") +
THEME +
scale_x_continuous(breaks = round(seq(min(DEN$x), max(DEN$x), by = 0.2),1))
grid.arrange(p5, p6, nrow = 1)
})
output$ManPlotImage<- renderPlot({
## Manual Adjustments
Contrast<-ContrastInput()
O.pic <- Contrast$pic
Contrast<-Contrast$Contrast
DEN<-DensityInput()
Rezone<-DEN$Rezone
DEN<- DEN$DEN
MD<-input$DD
CEX<-cexInput()
Contrast$MCG <- ifelse(Rezone <= MD, 'red', 'black')
p5 <-ggplot(data=Contrast, aes(x=x, y=y, col=MCG)) +
geom_point(size=as.numeric(CEX), shape=15)+
scale_color_identity() +
coord_fixed() +
ggtitle("Manual Clustered Image") +
THEME +
theme(axis.line=element_blank(),
axis.text.x=element_blank(),
axis.text.y=element_blank(),
axis.ticks=element_blank(),
axis.title.x=element_blank(),
axis.title.y=element_blank())
print(p5)
})
output$plot<-renderPlotly({
## Reactive elements
Contrast <- ContrastInput()
USE <- Indy()
USE <- USE$Indy
DCS <- DCSInput()
if(DCS==TRUE)
Contrast <- Contrast$Contrast[USE,]
else
Contrast <- Contrast$pic[USE,]
ax <- list(title = "", zeroline = FALSE, showline = FALSE, showticklabels = FALSE, showgrid = FALSE)
plot_ly(Contrast, x = ~R, y = ~G, z = ~B,
hoverinfo="none",
marker = list(size = 3,
color = rgb(Contrast[,3:5]))) %>%
add_markers()%>%
layout(xaxis = ax, yaxis=ax, showlegend = FALSE, plot_bgcolor = '#222d32',
paper_bgcolor = '#222d32')
})
output$GAMplot<-renderPlotly({
GWIDTH <- gammainput()
Contrast <- ContrastInput()
DCS <- DCSInput()
Outsidein<- MethodInput()
G.index <- Outsidein$G.index
B <- Outsidein$B
V <- Outsidein$V
BW <- Outsidein$BW
CL <- Outsidein$Clust
Indy <- Indy()
USE <- Indy$Indy
if(DCS==TRUE)
Contrast <- Contrast$Contrast[USE,]
else
Contrast <- Contrast$pic[USE,]
## Updating Cluster List (CL) to account for 3rd cluster, Gamma cluster
CL.gamma <- CL
# CL.gamma[Gamma.region$Index] <- 3
CL.gamma[G.index] <- 3
p3 <- Plot.img(data = Contrast, preprocessed = FALSE, Title = '\u0393 Region',
cluster = CL[USE])$Color
ax <- list(title = "", zeroline = FALSE, showline = FALSE, showticklabels = FALSE, showgrid = FALSE)
COLORS<-c(rgb(p3[1,2:4], maxColorValue = 1),
rgb(p3[2,2:4], maxColorValue = 1), rgb(red=0, blue = 1, green = 1, maxColorValue = 1))
Contrast<-cbind(Contrast, CL.gamma[USE])
Contrast<-as.data.frame(Contrast)
Contrast$CL.gamma <- ifelse(Contrast$CL.gamma == 1, COLORS[1],
ifelse(Contrast$CL.gamma == 2, COLORS[2], COLORS[3]))
plot_ly(Contrast, x = ~R, y = ~G, z = ~B, type="scatter3d", hoverinfo="none",
marker = list(size=3,
color = ~CL.gamma,
colors= c(COLORS[1], COLORS[2], COLORS[3]))) %>%
layout(xaxis = ax, yaxis=ax, showlegend = FALSE, plot_bgcolor = '#222d32',
paper_bgcolor = '#222d32')
})
output$Finalplot<-renderPlotly({
Gamma.lab <- MethodInput()
Indy <- Indy()
USE <- Indy$Indy
CL <- Gamma.lab$Clust
G.index <- Gamma.lab$G.index
Gamma.lab <- Gamma.lab$Gamma.Labels
Contrast <- ContrastInput()
DCS <- DCSInput()
if(DCS==TRUE)
Contrast <- Contrast$Contrast[USE,]
else
Contrast <- Contrast$pic[USE,]
p3 <- Plot.img(data = Contrast, preprocessed = FALSE, Title = '\u0393 Region',
cluster = Gamma.lab[USE])$Color
ax <- list(title = "", zeroline = FALSE, showline = FALSE, showticklabels = FALSE, showgrid = FALSE)
COLORS<-c(rgb(p3[1,2:4], maxColorValue = 1), rgb(p3[2,2:4], maxColorValue = 1), rgb(red=0, blue = 1, green = 1, maxColorValue = 1))
Contrast<-cbind(Contrast, Gamma.lab[USE])
Contrast<-as.data.frame(Contrast)
Contrast$CL.gamma <- ifelse(Contrast$Gamma.lab == 1, COLORS[1],
ifelse(Contrast$Gamma.lab == 2, COLORS[2], COLORS[3]))
plot_ly(Contrast, x = ~R, y = ~G, z = ~B, type="scatter3d", hoverinfo="none",
marker = list(size=3, color=~CL.gamma, colors=c(COLORS[1], COLORS[2]))) %>%
layout(xaxis=ax, yaxis=ax, showlegend=FALSE, plot_bgcolor='#222d32', paper_bgcolor='#222d32')
})
output$RESMDH <- renderUI({getPage("https://cran.r-project.org/web/packages/PPCI/index.html")})
})
|
testlist <- list(ends = c(-1125300777L, 765849512L, -1760774663L, 791623263L, 1358782356L, -128659642L, -14914341L, 1092032927L, 1837701012L, 1632068659L), pts = c(1758370433L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L), starts = c(16777216L, 0L, 738263040L, 682962941L, 1612840977L, 150997320L, 747898999L, 2025832810L, 2024571419L, 808515032L, 1373469055L, -282236997L, -207881465L, -237801926L, -168118689L, -1090227888L, 235129118L, 949454105L, 1651285440L, -1119277667L, -1328604284L), members = NULL, total_members = 0L)
result <- do.call(IntervalSurgeon:::rcpp_pile,testlist)
str(result)
|
/IntervalSurgeon/inst/testfiles/rcpp_pile/AFL_rcpp_pile/rcpp_pile_valgrind_files/1609860670-test.R
|
no_license
|
akhikolla/updated-only-Issues
|
R
| false
| false
| 728
|
r
|
testlist <- list(ends = c(-1125300777L, 765849512L, -1760774663L, 791623263L, 1358782356L, -128659642L, -14914341L, 1092032927L, 1837701012L, 1632068659L), pts = c(1758370433L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L), starts = c(16777216L, 0L, 738263040L, 682962941L, 1612840977L, 150997320L, 747898999L, 2025832810L, 2024571419L, 808515032L, 1373469055L, -282236997L, -207881465L, -237801926L, -168118689L, -1090227888L, 235129118L, 949454105L, 1651285440L, -1119277667L, -1328604284L), members = NULL, total_members = 0L)
result <- do.call(IntervalSurgeon:::rcpp_pile,testlist)
str(result)
|
\name{nonadditivity}
\alias{nonadditivity}
%- nonadditivity.
\title{ Nonadditivity model test }
\description{
The resistance for the transformable nonadditivity, due to J. W. Tukey,
is based on the detection of a curvilinear relation between y-est(y)
and est(y). A freedom degree for the transformable nonadditivity.
}
\usage{
nonadditivity(y, factor1, factor2, df, MSerror)
}
\arguments{
\item{y}{ Answer of the experimental unit }
\item{factor1}{ Firts treatment applied to each experimental unit }
\item{factor2}{ Second treatment applied to each experimental unit }
\item{df}{ Degrees of freedom of the experimental error }
\item{MSerror}{ Means square error of the experimental }
}
\details{
Only two factor: Block and treatment or factor 1 and factor 2.
}
\value{
\item{y }{Numeric}
\item{factor1 }{alfanumeric}
\item{factor2 }{alfanumeric}
\item{df }{Numeric}
\item{MSerror }{Numeric}
}
\references{
1. Steel, R.; Torri,J; Dickey, D.(1997)
Principles and Procedures of Statistics
A Biometrical Approach
2. George E.P. Box; J. Stuart Hunter and William G. Hunter.
Statistics for experimenters.
Wile Series in probability and statistics }
\author{ Felipe de Mendiburu }
\examples{
library(agricolae)
data(potato )
potato[,1]<-as.factor(potato[,1])
model<-lm(cutting ~ date + variety,potato)
df<-df.residual(model)
MSerror<-deviance(model)/df
attach(potato)
analysis<-nonadditivity(cutting, date, variety, df, MSerror)
detach(potato)
}
\keyword{ models }% at least one, from doc/KEYWORDS
|
/man/nonadditivity.Rd
|
no_license
|
JeanLescut/agricolae
|
R
| false
| false
| 1,595
|
rd
|
\name{nonadditivity}
\alias{nonadditivity}
%- nonadditivity.
\title{ Nonadditivity model test }
\description{
The resistance for the transformable nonadditivity, due to J. W. Tukey,
is based on the detection of a curvilinear relation between y-est(y)
and est(y). A freedom degree for the transformable nonadditivity.
}
\usage{
nonadditivity(y, factor1, factor2, df, MSerror)
}
\arguments{
\item{y}{ Answer of the experimental unit }
\item{factor1}{ Firts treatment applied to each experimental unit }
\item{factor2}{ Second treatment applied to each experimental unit }
\item{df}{ Degrees of freedom of the experimental error }
\item{MSerror}{ Means square error of the experimental }
}
\details{
Only two factor: Block and treatment or factor 1 and factor 2.
}
\value{
\item{y }{Numeric}
\item{factor1 }{alfanumeric}
\item{factor2 }{alfanumeric}
\item{df }{Numeric}
\item{MSerror }{Numeric}
}
\references{
1. Steel, R.; Torri,J; Dickey, D.(1997)
Principles and Procedures of Statistics
A Biometrical Approach
2. George E.P. Box; J. Stuart Hunter and William G. Hunter.
Statistics for experimenters.
Wile Series in probability and statistics }
\author{ Felipe de Mendiburu }
\examples{
library(agricolae)
data(potato )
potato[,1]<-as.factor(potato[,1])
model<-lm(cutting ~ date + variety,potato)
df<-df.residual(model)
MSerror<-deviance(model)/df
attach(potato)
analysis<-nonadditivity(cutting, date, variety, df, MSerror)
detach(potato)
}
\keyword{ models }% at least one, from doc/KEYWORDS
|
ca <- commandArgs(trailingOnly = TRUE)
cat(" * compiling list of required packages\n")
pkglib <- file.path("Installer", "iNZightVIT", ".library")
if (!dir.exists(pkglib)) dir.create(pkglib)
pkgversions <- installed.packages(pkglib)[, 'Version']
repos <- c('https://r.docker.stat.auckland.ac.nz', 'https://cran.stat.auckland.ac.nz')
if (!requireNamespace('packrat', quietly = TRUE)) install.packages('packrat', repos = repos[2])
if (!requireNamespace('devtools', quietly = TRUE)) install.packages('devtools', repos = repos[2])
if (!requireNamespace('readr', quietly = TRUE) ||
packageVersion('readr') < numeric_version('1.2')) {
install.packages('readr', repos = repos)
## copy to `pkglib`
if (dir.exists(file.path(pkglib, 'readr'))) unlink(file.path(pkglib, 'readr'))
file.copy(file.path(.libPaths()[1], 'readr'), file.path(pkglib, 'readr'), recursive = TRUE)
}
srclib <- .libPaths()[1]
inzpkgs <- c('iNZight', 'iNZightPlots', 'iNZightModules', 'iNZightTools',
'iNZightRegression', 'iNZightMR', 'iNZightTS', 'vit')
if (length(ca) > 0)
inzpkgs <- c(inzpkgs, ca)
ap <- packrat:::availablePackages(repos = repos, type = "mac.binary.mavericks")
extrapkgs <- packrat:::getPackageDependencies(inzpkgs, srclib, ap,
fields = c('Depends', 'Imports', 'Suggests', 'LinkingTo'))
if (!'iNZightMaps' %in% inzpkgs)
extrapkgs <- extrapkgs[extrapkgs != "iNZightMaps"]
extrapkgs <- extrapkgs[extrapkgs != "Acinonyx"]
extrapkgs <- extrapkgs[extrapkgs != "RODBC"]
## Installing additional packages specified on command line ...
deps <- unique(c(inzpkgs, extrapkgs,
packrat:::recursivePackageDependencies(unique(c(inzpkgs, extrapkgs)), srclib, ap)))
missing <- deps[!deps %in% names(pkgversions)]
pkgu <- pkgversions[names(pkgversions) %in% rownames(ap)]
outdated <- names(pkgu)[ap[names(pkgu), 'Version'] > pkgu]
grab <- unique(c(missing, outdated))
cat(" * downloading packages\n")
pkgs <- download.packages(grab, pkglib, repos = repos, type = 'mac.binary.mavericks', quiet = TRUE)
## missing
missing <- grab[!grab %in% pkgs[, 1]]
cat(" * extracting packages into place\n")
x <- apply(pkgs, 1, function(pkg) {
pkgd <- file.path("Installer", "iNZightVIT", ".library", pkg[1])
if (dir.exists(pkgd)) unlink(pkgd, TRUE, TRUE)
untar(pkg[2], exdir = file.path("Installer", "iNZightVIT", ".library"))
unlink(pkg[2])
})
cat(" * Installing Acinonyx\n")
install.packages('Acinonyx', lib = pkglib, repos = 'http://rforge.net', type = 'binary')
# untar("../Acinonyx_3.0-0.tar.gz", exdir = pkglib)
## then try install missing ...
if (length(missing) > 0) {
cat(" * some packages weren't available: ")
cat(paste(missing, collapse = ", "))
cat("\n")
}
cat(" * Done!\n\nNow go to `dev` and install the development iNZight packages\nif this isn't the master release\n")
|
/bootstrap.R
|
no_license
|
iNZightVIT/iNZightVIT-osx-installer
|
R
| false
| false
| 2,871
|
r
|
ca <- commandArgs(trailingOnly = TRUE)
cat(" * compiling list of required packages\n")
pkglib <- file.path("Installer", "iNZightVIT", ".library")
if (!dir.exists(pkglib)) dir.create(pkglib)
pkgversions <- installed.packages(pkglib)[, 'Version']
repos <- c('https://r.docker.stat.auckland.ac.nz', 'https://cran.stat.auckland.ac.nz')
if (!requireNamespace('packrat', quietly = TRUE)) install.packages('packrat', repos = repos[2])
if (!requireNamespace('devtools', quietly = TRUE)) install.packages('devtools', repos = repos[2])
if (!requireNamespace('readr', quietly = TRUE) ||
packageVersion('readr') < numeric_version('1.2')) {
install.packages('readr', repos = repos)
## copy to `pkglib`
if (dir.exists(file.path(pkglib, 'readr'))) unlink(file.path(pkglib, 'readr'))
file.copy(file.path(.libPaths()[1], 'readr'), file.path(pkglib, 'readr'), recursive = TRUE)
}
srclib <- .libPaths()[1]
inzpkgs <- c('iNZight', 'iNZightPlots', 'iNZightModules', 'iNZightTools',
'iNZightRegression', 'iNZightMR', 'iNZightTS', 'vit')
if (length(ca) > 0)
inzpkgs <- c(inzpkgs, ca)
ap <- packrat:::availablePackages(repos = repos, type = "mac.binary.mavericks")
extrapkgs <- packrat:::getPackageDependencies(inzpkgs, srclib, ap,
fields = c('Depends', 'Imports', 'Suggests', 'LinkingTo'))
if (!'iNZightMaps' %in% inzpkgs)
extrapkgs <- extrapkgs[extrapkgs != "iNZightMaps"]
extrapkgs <- extrapkgs[extrapkgs != "Acinonyx"]
extrapkgs <- extrapkgs[extrapkgs != "RODBC"]
## Installing additional packages specified on command line ...
deps <- unique(c(inzpkgs, extrapkgs,
packrat:::recursivePackageDependencies(unique(c(inzpkgs, extrapkgs)), srclib, ap)))
missing <- deps[!deps %in% names(pkgversions)]
pkgu <- pkgversions[names(pkgversions) %in% rownames(ap)]
outdated <- names(pkgu)[ap[names(pkgu), 'Version'] > pkgu]
grab <- unique(c(missing, outdated))
cat(" * downloading packages\n")
pkgs <- download.packages(grab, pkglib, repos = repos, type = 'mac.binary.mavericks', quiet = TRUE)
## missing
missing <- grab[!grab %in% pkgs[, 1]]
cat(" * extracting packages into place\n")
x <- apply(pkgs, 1, function(pkg) {
pkgd <- file.path("Installer", "iNZightVIT", ".library", pkg[1])
if (dir.exists(pkgd)) unlink(pkgd, TRUE, TRUE)
untar(pkg[2], exdir = file.path("Installer", "iNZightVIT", ".library"))
unlink(pkg[2])
})
cat(" * Installing Acinonyx\n")
install.packages('Acinonyx', lib = pkglib, repos = 'http://rforge.net', type = 'binary')
# untar("../Acinonyx_3.0-0.tar.gz", exdir = pkglib)
## then try install missing ...
if (length(missing) > 0) {
cat(" * some packages weren't available: ")
cat(paste(missing, collapse = ", "))
cat("\n")
}
cat(" * Done!\n\nNow go to `dev` and install the development iNZight packages\nif this isn't the master release\n")
|
## Downloading the data and unzipping it
if(!file.exists("household_power_consumption.txt")) {
download.file("https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip", destfile = "data.zip")
unzip("data.zip")
}
## Reading data and keeping only certain dates
data <- read.table("household_power_consumption.txt", header = T,
sep=";", na.strings = "?")
data <- data[data$Date == "1/2/2007" | data$Date == "2/2/2007",]
## Converting to date
data$Date <- paste(data$Date, data$Time)
data$Date <- strptime(data$Date, "%d/%m/%Y %H:%M:%S")
## Dev on
png(filename = "plot3.png")
## Plotting y- and x-axis (using sub meter 1 since it's
## power is highest)
plot(data$Date, data$Sub_metering_1, type = "n", ylab = "Energy sub metering",
xlab = "")
## Drawing lines
lines(data$Date, data$Sub_metering_1)
lines(data$Date, data$Sub_metering_2, col = "red")
lines(data$Date, data$Sub_metering_3, col = "blue")
## ... and the legend.
legend("topright", c("Sub_metering_1", "Sub_metering_2",
"Sub_metering_3"), lty = c(1,1,1),
col=c("black", "red", "blue"))
## Dev off
dev.off()
|
/plot3.R
|
no_license
|
vlavikainen/ExData_Plotting1
|
R
| false
| false
| 1,155
|
r
|
## Downloading the data and unzipping it
if(!file.exists("household_power_consumption.txt")) {
download.file("https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip", destfile = "data.zip")
unzip("data.zip")
}
## Reading data and keeping only certain dates
data <- read.table("household_power_consumption.txt", header = T,
sep=";", na.strings = "?")
data <- data[data$Date == "1/2/2007" | data$Date == "2/2/2007",]
## Converting to date
data$Date <- paste(data$Date, data$Time)
data$Date <- strptime(data$Date, "%d/%m/%Y %H:%M:%S")
## Dev on
png(filename = "plot3.png")
## Plotting y- and x-axis (using sub meter 1 since it's
## power is highest)
plot(data$Date, data$Sub_metering_1, type = "n", ylab = "Energy sub metering",
xlab = "")
## Drawing lines
lines(data$Date, data$Sub_metering_1)
lines(data$Date, data$Sub_metering_2, col = "red")
lines(data$Date, data$Sub_metering_3, col = "blue")
## ... and the legend.
legend("topright", c("Sub_metering_1", "Sub_metering_2",
"Sub_metering_3"), lty = c(1,1,1),
col=c("black", "red", "blue"))
## Dev off
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/scoring_matrices.R
\name{make_scoring_matrix_aem}
\alias{make_scoring_matrix_aem}
\title{Make scoring matrix}
\usage{
make_scoring_matrix_aem(
responses,
sequence = c("mae", "mea", "aem", "ame", "ema", "eam", "gpcm"),
nMiddle = 2L,
nExtreme = 1L,
nAcquiescence = floor(length(responses)/2),
reversed = FALSE,
aType = c("separate", "common"),
iType = c("separate", "common")
)
}
\arguments{
\item{responses}{a vector of available responses (\emph{categories}) - can be
a character vector or positive integer describing number of responses}
\item{sequence}{a string: "gpcm" or a three-letters sequence describing
the order of nodes in the IRTree:
\itemize{
\item{'m' stands for choosing between middle \emph{category} and some other
\emph{category}}
\item{'a' stands for choosing between \emph{acquiescence} response (i.e.
located \emph{after/below} a \emph{middle} one) and some other
response}
\item{'e' stands for choosing between \emph{extreme} category and some
other \emph{category}}
}}
\item{nMiddle}{the (maximum) number of \emph{middle} \emph{categories}}
\item{nExtreme}{(half of) the number of \emph{extreme} \emph{categories}}
\item{nAcquiescence}{the number of \emph{acquiescence} \emph{categories}}
\item{reversed}{a logical value - is item a reversed one? (see details)}
\item{aType}{determines a way in which scoring pattern for acquiescence is
generated when it appears in different branches of the IRTree (whether to
create separate columns allowing for different discrimination of the
acquiescence in different nodes of the tree or to create only a single column
holding discrimination in different nodes of the tree constant)}
\item{iType}{determines a way in which scoring pattern for additional (see
the description of the `aType` parameter above) \emph{intensity} trait will
be generated (see details)}
}
\value{
a matrix of integers
}
\description{
Makes response matrix, i.e. matrix describing how each latent
trait (represented in columns) affects (or not) chances to choose each
response category (represented in rows) assuming effects of
\emph{acquiescence}, \emph{extreme} and \emph{middle} response styles.
}
\details{
\strong{\code{sequence} other than "gpcm":}
For important remarks on the possibilities and limitations of interpretation
of IRTree models, that are represented by this type of scoring matrices,
see Plieninger (2020).
For number of responses between 5 and 6 function generates scoring
matrix in a way mimicking Böckenholt's approach (2017) to describe
response to the item as a sequence of binary decisions involving choosing
of the middle, extreme and acquiescence categories - this decisions may be
made in different order, what is controlled by argument \code{sequence}.
Please note that following Böckenholt \emph{acquiescence} trait is managed in
a little different way that the other two. If choice involving
\emph{acquiescence} may be made in different nodes of IRTree (i.e. for
different combinations of values in previous columns of the scoring matrix),
separate column describing decision in each node (for each combination) is
created by default (and names of these columns are \emph{a} followed by
integer index). That allows for specifying different IRT parameters for each
node. Setting argument \code{aType = "common"} allows to collapse these
column into one if you want to constrain model parameters between nodes in
a convenient way.
With less than 5 possible responses functions apply the same logic, but not
all of the three aforementioned styles can be involved because lack of
variability in possible responses.
With more than 6 possible responses there must be additional trait added to
scoringMatrix to describe process of choice between all the possible
responses. In such a case function adds additional columns to a scoring
matrix that names are \emph{i} (standing for intensity) followed by an index
and are filled up with scores for such combinations of values in previous
columns of the scoring matrix that occur more than once. Scores in these
columns are sequences of non-negative integers either increasing
(\code{reversed=FALSE}) or decreasing (\code{reversed=TRUE}) that are
generated independent for each unique combination of values in the previous
columns and by default each of such combinations is described by a separate
column (allowing for specification of different model parameters).
Analogously to \emph{acquiescence} trait these columns can be collapsed into
one by setting \code{iType = "common"}.
\strong{\code{sequence} is "gpcm":}
In this case a GPCM scoring matrix is generated mimicking approach of
Plieninger (2016), i.e. assuming that response process is
a \emph{gpcm} and four factors: intensity of the trait that
is \strong{not} a response style (column \emph{i}), tendency to choose middle
\emph{categories} (column \emph{m}) tendency to choose extreme
\emph{categories} (column \emph{e}) and tendency to choose acquiescence
\emph{categories} (column \emph{a}) contribute altogether to propensity
of choosing each response.
}
\examples{
# Bockenholt 2017: 73
(bockenholtMAE5 <- make_scoring_matrix_aem(5, "mae"))
# Bockenholt 2017: 76
(bockenholtMAE6 <- make_scoring_matrix_aem(6, "mae"))
# Bockenholt 2017: 77
(bockenholtAEM6 <- make_scoring_matrix_aem(6, "aem"))
# Plieninger 2016: 39
(plieninger5 <- make_scoring_matrix_aem(5, "gpcm"))
(plieninger5r <- make_scoring_matrix_aem(5, "gpcm", reversed = TRUE))
# some more complicated cases:
make_scoring_matrix_aem(10, "ema", nMiddle = 3, nExtreme = 2)
make_scoring_matrix_aem(10, "ema", nMiddle = 3, nExtreme = 2,
aType = "common", iType = "common")
make_scoring_matrix_aem(9, "mae", nMiddle = 3, nExtreme = 2, reversed = TRUE)
}
|
/man/make_scoring_matrix_aem.Rd
|
no_license
|
tzoltak/rstyles
|
R
| false
| true
| 5,857
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/scoring_matrices.R
\name{make_scoring_matrix_aem}
\alias{make_scoring_matrix_aem}
\title{Make scoring matrix}
\usage{
make_scoring_matrix_aem(
responses,
sequence = c("mae", "mea", "aem", "ame", "ema", "eam", "gpcm"),
nMiddle = 2L,
nExtreme = 1L,
nAcquiescence = floor(length(responses)/2),
reversed = FALSE,
aType = c("separate", "common"),
iType = c("separate", "common")
)
}
\arguments{
\item{responses}{a vector of available responses (\emph{categories}) - can be
a character vector or positive integer describing number of responses}
\item{sequence}{a string: "gpcm" or a three-letters sequence describing
the order of nodes in the IRTree:
\itemize{
\item{'m' stands for choosing between middle \emph{category} and some other
\emph{category}}
\item{'a' stands for choosing between \emph{acquiescence} response (i.e.
located \emph{after/below} a \emph{middle} one) and some other
response}
\item{'e' stands for choosing between \emph{extreme} category and some
other \emph{category}}
}}
\item{nMiddle}{the (maximum) number of \emph{middle} \emph{categories}}
\item{nExtreme}{(half of) the number of \emph{extreme} \emph{categories}}
\item{nAcquiescence}{the number of \emph{acquiescence} \emph{categories}}
\item{reversed}{a logical value - is item a reversed one? (see details)}
\item{aType}{determines a way in which scoring pattern for acquiescence is
generated when it appears in different branches of the IRTree (whether to
create separate columns allowing for different discrimination of the
acquiescence in different nodes of the tree or to create only a single column
holding discrimination in different nodes of the tree constant)}
\item{iType}{determines a way in which scoring pattern for additional (see
the description of the `aType` parameter above) \emph{intensity} trait will
be generated (see details)}
}
\value{
a matrix of integers
}
\description{
Makes response matrix, i.e. matrix describing how each latent
trait (represented in columns) affects (or not) chances to choose each
response category (represented in rows) assuming effects of
\emph{acquiescence}, \emph{extreme} and \emph{middle} response styles.
}
\details{
\strong{\code{sequence} other than "gpcm":}
For important remarks on the possibilities and limitations of interpretation
of IRTree models, that are represented by this type of scoring matrices,
see Plieninger (2020).
For number of responses between 5 and 6 function generates scoring
matrix in a way mimicking Böckenholt's approach (2017) to describe
response to the item as a sequence of binary decisions involving choosing
of the middle, extreme and acquiescence categories - this decisions may be
made in different order, what is controlled by argument \code{sequence}.
Please note that following Böckenholt \emph{acquiescence} trait is managed in
a little different way that the other two. If choice involving
\emph{acquiescence} may be made in different nodes of IRTree (i.e. for
different combinations of values in previous columns of the scoring matrix),
separate column describing decision in each node (for each combination) is
created by default (and names of these columns are \emph{a} followed by
integer index). That allows for specifying different IRT parameters for each
node. Setting argument \code{aType = "common"} allows to collapse these
column into one if you want to constrain model parameters between nodes in
a convenient way.
With less than 5 possible responses functions apply the same logic, but not
all of the three aforementioned styles can be involved because lack of
variability in possible responses.
With more than 6 possible responses there must be additional trait added to
scoringMatrix to describe process of choice between all the possible
responses. In such a case function adds additional columns to a scoring
matrix that names are \emph{i} (standing for intensity) followed by an index
and are filled up with scores for such combinations of values in previous
columns of the scoring matrix that occur more than once. Scores in these
columns are sequences of non-negative integers either increasing
(\code{reversed=FALSE}) or decreasing (\code{reversed=TRUE}) that are
generated independent for each unique combination of values in the previous
columns and by default each of such combinations is described by a separate
column (allowing for specification of different model parameters).
Analogously to \emph{acquiescence} trait these columns can be collapsed into
one by setting \code{iType = "common"}.
\strong{\code{sequence} is "gpcm":}
In this case a GPCM scoring matrix is generated mimicking approach of
Plieninger (2016), i.e. assuming that response process is
a \emph{gpcm} and four factors: intensity of the trait that
is \strong{not} a response style (column \emph{i}), tendency to choose middle
\emph{categories} (column \emph{m}) tendency to choose extreme
\emph{categories} (column \emph{e}) and tendency to choose acquiescence
\emph{categories} (column \emph{a}) contribute altogether to propensity
of choosing each response.
}
\examples{
# Bockenholt 2017: 73
(bockenholtMAE5 <- make_scoring_matrix_aem(5, "mae"))
# Bockenholt 2017: 76
(bockenholtMAE6 <- make_scoring_matrix_aem(6, "mae"))
# Bockenholt 2017: 77
(bockenholtAEM6 <- make_scoring_matrix_aem(6, "aem"))
# Plieninger 2016: 39
(plieninger5 <- make_scoring_matrix_aem(5, "gpcm"))
(plieninger5r <- make_scoring_matrix_aem(5, "gpcm", reversed = TRUE))
# some more complicated cases:
make_scoring_matrix_aem(10, "ema", nMiddle = 3, nExtreme = 2)
make_scoring_matrix_aem(10, "ema", nMiddle = 3, nExtreme = 2,
aType = "common", iType = "common")
make_scoring_matrix_aem(9, "mae", nMiddle = 3, nExtreme = 2, reversed = TRUE)
}
|
mcia <- function (df.list, cia.nf = 2, cia.scan = FALSE, nsc = T, svd=TRUE)
{
df.list <- lapply (df.list, function(x) {
if (inherits(x, "ExpressionSet")) {
r <- made4:::getdata(x)
} else {
r <- x
}
return(r)
})
for (i in names(df.list)) {
df <- df.list[[i]]
minn <- min(df)
ind <- apply(df, 1, function(x) all(x == minn))
if (any(ind))
stop(paste("There are features in data.frame ", i,
" do not\n expressed in all observations, please remove these features"))
}
N <- sapply(df.list, ncol)
df.list <- lapply(df.list, as.matrix)
if (length(unique(N)) != 1)
stop("Nonequal number of individual across data.frames")
infi <- sapply(df.list, function(x) any(is.infinite(x)))
if (any(infi))
stop("Infinite numeric in the data.frames")
na <- sapply(df.list, function(x) any(is.na(x)))
if (any(na))
stop("NAs in the data.frames")
if (is.null(names(df.list)))
names(df.list) <- paste("df", 1:length(df.list), sep = "")
# ====================================
# ===== lapack function which is called by svd fails to converge in some cases
# ===== This function is used to replace svd when this happens
# =================================
mcoaEnv <- environment(mcoa)
fakeEnv <- new.env(parent = mcoaEnv)
mcoa2 <- ade4::mcoa
environment(mcoa2) <- fakeEnv
if (is.logical(svd)) {
if (svd)
assign("svd", base::svd, fakeEnv)
else
assign("svd", function(df) {
res <- list()
m <- tcrossprod(df, df)
em <- eigen(m)
em$values[em$values < 0] <- 1e-30
res$d <- sqrt(em$values)
res$u <- em$vectors
res$v <- t(apply(t(df) %*% em$vectors, 1, function(x) x/sqrt(em$values)))
return(res)},
fakeEnv)
} else
stop("logical value required for svd")
pairwise.rv <- function(data.list) {
ms <- sapply(data.list, function(x) {
x <- c(crossprod(as.matrix(x)))
x <- x/sqrt(sum(x^2))})
m <- crossprod(ms)
colnames(m) <- rownames(m) <- names(data.list)
return(m)
}
if (nsc) {
df.list <- lapply(df.list, made4::array2ade4, pos = TRUE)
coa.list <- lapply(df.list, dudi.nsc, scannf = FALSE, nf = cia.nf)
coa.list.t <- lapply(coa.list, ade4:::t.dudi)
dfl <- lapply(coa.list, function(x) x$tab)
RV <- pairwise.rv(dfl)
ktcoa <- ktab.list.dudi(coa.list.t)
}
if (!nsc) {
df.list <- lapply(df.list, t)
df.list <- lapply(df.list, made4::array2ade4, pos = TRUE)
coa1 <- dudi.coa(df.list[[1]], scannf = FALSE, nf = cia.nf)
coa.list <- lapply(df.list[-1], made4:::dudi.rwcoa, rowweights = coa1$lw,
scannf = FALSE, nf = cia.nf)
coa.list.name <- names(coa.list)
coa.list$coa1 <- coa1
coa.list <- coa.list[c("coa1", coa.list.name)]
names(coa.list)[1] <- names(df.list)[1]
for (i in 1:length(coa.list)) {
coa.list[[i]]$lw <- round(coa.list[[i]]$lw, digits = 8)
}
dfl <- lapply(coa.list, function(x) x$tab)
dfl <- lapply(dfl, t)
RV <- pairwise.rv(dfl)
ktcoa <- ktab.list.dudi(coa.list)
}
mcoin <- try(mcoa2(X = ktcoa, nf = cia.nf, scannf = FALSE), silent=TRUE) # ...
if (inherits (mcoin, "try-error")) {
warning("'svd' fail to convergence, 'eigen' used to perform singular value decomposition")
assign("svd", function(df) {
res <- list()
m <- tcrossprod(df, df)
em <- eigen(m)
em$values[em$values < 0] <- 1e-30
res$d <- sqrt(em$values)
res$u <- em$vectors
res$v <- t(apply(t(df) %*% em$vectors, 1, function(x) x/sqrt(em$values)))
return(res)},
fakeEnv)
mcoin <- mcoa2(X = ktcoa, nf = cia.nf, scannf = FALSE)
}
tab <- scalewt(mcoin$Tco, wt = ktcoa$cw, center = F, scale = T)
colnames(tab) <- paste("Axis", 1:ncol(tab), sep = "")
mcoin$Tlw <- ktcoa$lw
mcoin$Tcw <- ktcoa$cw
mcoin$blo <- ktcoa$blo
mcoin$Tc1 <- tab
mcoin$RV <- RV
call <- match.call()
mciares <- list(call = call, mcoa = mcoin, coa = coa.list)
class(mciares) <- "mcia"
return(mciares)
}
|
/R/mcia.R
|
no_license
|
aedin/omicade4
|
R
| false
| false
| 4,234
|
r
|
mcia <- function (df.list, cia.nf = 2, cia.scan = FALSE, nsc = T, svd=TRUE)
{
df.list <- lapply (df.list, function(x) {
if (inherits(x, "ExpressionSet")) {
r <- made4:::getdata(x)
} else {
r <- x
}
return(r)
})
for (i in names(df.list)) {
df <- df.list[[i]]
minn <- min(df)
ind <- apply(df, 1, function(x) all(x == minn))
if (any(ind))
stop(paste("There are features in data.frame ", i,
" do not\n expressed in all observations, please remove these features"))
}
N <- sapply(df.list, ncol)
df.list <- lapply(df.list, as.matrix)
if (length(unique(N)) != 1)
stop("Nonequal number of individual across data.frames")
infi <- sapply(df.list, function(x) any(is.infinite(x)))
if (any(infi))
stop("Infinite numeric in the data.frames")
na <- sapply(df.list, function(x) any(is.na(x)))
if (any(na))
stop("NAs in the data.frames")
if (is.null(names(df.list)))
names(df.list) <- paste("df", 1:length(df.list), sep = "")
# ====================================
# ===== lapack function which is called by svd fails to converge in some cases
# ===== This function is used to replace svd when this happens
# =================================
mcoaEnv <- environment(mcoa)
fakeEnv <- new.env(parent = mcoaEnv)
mcoa2 <- ade4::mcoa
environment(mcoa2) <- fakeEnv
if (is.logical(svd)) {
if (svd)
assign("svd", base::svd, fakeEnv)
else
assign("svd", function(df) {
res <- list()
m <- tcrossprod(df, df)
em <- eigen(m)
em$values[em$values < 0] <- 1e-30
res$d <- sqrt(em$values)
res$u <- em$vectors
res$v <- t(apply(t(df) %*% em$vectors, 1, function(x) x/sqrt(em$values)))
return(res)},
fakeEnv)
} else
stop("logical value required for svd")
pairwise.rv <- function(data.list) {
ms <- sapply(data.list, function(x) {
x <- c(crossprod(as.matrix(x)))
x <- x/sqrt(sum(x^2))})
m <- crossprod(ms)
colnames(m) <- rownames(m) <- names(data.list)
return(m)
}
if (nsc) {
df.list <- lapply(df.list, made4::array2ade4, pos = TRUE)
coa.list <- lapply(df.list, dudi.nsc, scannf = FALSE, nf = cia.nf)
coa.list.t <- lapply(coa.list, ade4:::t.dudi)
dfl <- lapply(coa.list, function(x) x$tab)
RV <- pairwise.rv(dfl)
ktcoa <- ktab.list.dudi(coa.list.t)
}
if (!nsc) {
df.list <- lapply(df.list, t)
df.list <- lapply(df.list, made4::array2ade4, pos = TRUE)
coa1 <- dudi.coa(df.list[[1]], scannf = FALSE, nf = cia.nf)
coa.list <- lapply(df.list[-1], made4:::dudi.rwcoa, rowweights = coa1$lw,
scannf = FALSE, nf = cia.nf)
coa.list.name <- names(coa.list)
coa.list$coa1 <- coa1
coa.list <- coa.list[c("coa1", coa.list.name)]
names(coa.list)[1] <- names(df.list)[1]
for (i in 1:length(coa.list)) {
coa.list[[i]]$lw <- round(coa.list[[i]]$lw, digits = 8)
}
dfl <- lapply(coa.list, function(x) x$tab)
dfl <- lapply(dfl, t)
RV <- pairwise.rv(dfl)
ktcoa <- ktab.list.dudi(coa.list)
}
mcoin <- try(mcoa2(X = ktcoa, nf = cia.nf, scannf = FALSE), silent=TRUE) # ...
if (inherits (mcoin, "try-error")) {
warning("'svd' fail to convergence, 'eigen' used to perform singular value decomposition")
assign("svd", function(df) {
res <- list()
m <- tcrossprod(df, df)
em <- eigen(m)
em$values[em$values < 0] <- 1e-30
res$d <- sqrt(em$values)
res$u <- em$vectors
res$v <- t(apply(t(df) %*% em$vectors, 1, function(x) x/sqrt(em$values)))
return(res)},
fakeEnv)
mcoin <- mcoa2(X = ktcoa, nf = cia.nf, scannf = FALSE)
}
tab <- scalewt(mcoin$Tco, wt = ktcoa$cw, center = F, scale = T)
colnames(tab) <- paste("Axis", 1:ncol(tab), sep = "")
mcoin$Tlw <- ktcoa$lw
mcoin$Tcw <- ktcoa$cw
mcoin$blo <- ktcoa$blo
mcoin$Tc1 <- tab
mcoin$RV <- RV
call <- match.call()
mciares <- list(call = call, mcoa = mcoin, coa = coa.list)
class(mciares) <- "mcia"
return(mciares)
}
|
### Utah DWQ Lake Dashboard
### Jake Vander Laan, Utah DWQ, jvander@utah.gov
### Version 3.0 2022 including 2022 IR data
library(wqTools)
library(leaflet)
library(plotly)
#heatmap_param_choices=c("Dissolved oxygen (DO)","Temperature, water","pH","DO-temperature habitat profile width")
#names(heatmap_param_choices)=c("Dissolved oxygen", "Temperature", "pH", "DO/temperature lens")
ui <-fluidPage(
tags$head(
tags$script(src="https://cdnjs.cloudflare.com/ajax/libs/iframe-resizer/3.5.16/iframeResizer.contentWindow.min.js",
type="text/javascript")
),
# Header
headerPanel(
title=tags$a(href='https://deq.utah.gov/division-water-quality/',tags$img(src='deq_dwq_logo.png', height = 75, width = 75*2.85), target="_blank"),
tags$head(tags$link(rel = "icon", type = "image/png", href = "dwq_logo_small.png"), windowTitle="Lake profile dashboard")
),
# Input widgets
fluidRow(column(5),column(7,
actionButton('help', 'User guide', icon=icon('question'), onclick ="window.open('https://bookdown.org/jakevl/user_guide/user_guide.html', '_blank')",
style='color: #fff; background-color: #337ab7; border-color: #2e6da4%')
)),
br(),
fluidRow(
column(5,
conditionalPanel(condition="input.plot_tabs!='User guide'",
tabsetPanel(id="ui_tab",
tabPanel("Map",
column(12,h4("Click a site or AU"),shinycssloaders::withSpinner(leaflet::leafletOutput("map", height="600px"),size=2, color="#0080b7"))
),
tabPanel("Table",
column(12, h4("Click a row"), div(DT::dataTableOutput("table_input"), style = "font-size:70%"))
)
)
),
conditionalPanel(condition="input.plot_tabs=='User guide'",
column(12)
)
),
column(7,tabsetPanel(id="plot_tabs",
tabPanel("Profile time series",
fluidRow(column(8,
uiOutput("date_slider"),
radioButtons("ts_plot_type","Plot type:", choices=c("Habitable width", "Water column exceedances"), inline=T),
#conditionalPanel(condition="input.ts_plot_type=='Heatmap'",
# selectInput("heatmap_param",label="Heatmap parameter:",choices=heatmap_param_choices)
#),
checkboxInput("show_dates", label="Show all profile dates", value=TRUE),
#conditionalPanel(condition="input.ts_plot_type=='Heatmap'",
# plotOutput("heatmap")
#),
conditionalPanel(condition="input.ts_plot_type=='Habitable width'",
plotOutput("hab_width")
),
conditionalPanel(condition="input.ts_plot_type=='Water column exceedances'",
plotOutput("pct_exc")
)
))
),
tabPanel("Individual profiles",
fluidRow(
column(4, uiOutput("date_select"))
),
fluidRow(
column(4,h4("Profile plot"),plotOutput("ind_prof_plot", height="500px")),
column(8,h4("Profile data"),div(DT::dataTableOutput("profile_table"), style = "font-size:80%"))
)
),
tabPanel("Trophic indicators",
shinyWidgets::radioGroupButtons('trophic_type', 'Plot type:', choices=c('Time series','Boxplot','Scatter plot'), checkIcon = list(yes = icon("check"))),
conditionalPanel(condition="input.trophic_type=='Time series'",
plotlyOutput('tsi_timeseries', height="600px", width="1100px")
),
conditionalPanel(condition="input.trophic_type=='Boxplot'",
plotlyOutput('tsi_boxplot', height="600px", width="900px")
),
conditionalPanel(condition="input.trophic_type=='Scatter plot'",
plotOutput('tsi3d', height="600px", width="600px")
)
)
))
)
)
server <- function(input, output, session){
options(warn=-1)
# Loading modal to keep user out of trouble while map draws...
showModal(modalDialog(title="MAP LOADING - PLEASE WAIT...","Please wait for map to draw before proceeding.",size="l",footer=NULL))
# Remove modal when app is ready
observe({
req(map,mlid_param_asmnts)
removeModal()
})
# Load data
load("data/lake_data.Rdata")
# Subset polygons to lake polygons
au_poly=wqTools::au_poly
lake_aus=au_poly[au_poly$AU_Type=="Reservoir/Lake",]
# Extract site locations
prof_sites=unique(prof_asmnts_all$profile_asmnts_mlid_param[,c("ASSESS_ID","AU_NAME","IR_MLID","IR_MLNAME","IR_Lat","IR_Long")])
prof_sites$MonitoringLocationTypeName="Lake/Reservoir"
prof_sites=dplyr::rename(prof_sites, LatitudeMeasure="IR_Lat", LongitudeMeasure="IR_Long", MonitoringLocationIdentifier= "IR_MLID",MonitoringLocationName="IR_MLNAME")
prof_sites$LatitudeMeasure=wqTools::facToNum(prof_sites$LatitudeMeasure)
prof_sites$LongitudeMeasure=wqTools::facToNum(prof_sites$LongitudeMeasure)
# Extract profiles long
profiles_long=prof_asmnts_all$profiles_long
### Delete when data are fixed
profiles_long=subset(profiles_long, !(ASSESS_ID %in% c("UT-L-16020201-004_01", "UT-L-16020201-004_02") & (ActivityStartDate >= "2021-03-15") | ActivityStartDate=="2019-11-04" | ActivityStartDate=="2018-10-10")) ###drop faulty Utah Lake profiles
###
profiles_long$MonitoringLocationIdentifier=profiles_long$IR_MLID
profiles_long=unique(profiles_long[,c("DataLoggerLine","ActivityIdentifier","ActivityStartDate","R3172ParameterName","IR_Value","IR_Unit","NumericCriterion","MonitoringLocationIdentifier")])
profiles_long$ActivityStartDate=as.Date(profiles_long$ActivityStartDate,format='%Y-%m-%d')
# Remove profiles where depths are not provided
depths=profiles_long[profiles_long$R3172ParameterName=="Profile depth",]
depth_actids=unique(depths$ActivityIdentifier)
profiles_long=profiles_long[profiles_long$ActivityIdentifier %in% depth_actids,]
# Remove any sites that do not produce any valid profiles
prof_sites=prof_sites[prof_sites$MonitoringLocationIdentifier %in% profiles_long$MonitoringLocationIdentifier,]
prof_sites$QA=ifelse(grepl("Duplicate",prof_sites$MonitoringLocationName) | grepl("Replicate",prof_sites$MonitoringLocationName) | grepl("Dummy",prof_sites$MonitoringLocationName) |
grepl("replaced",prof_sites$MonitoringLocationName) | grepl("Blank",prof_sites$MonitoringLocationName) | grepl("QA",prof_sites$MonitoringLocationName) |
grepl("QC",prof_sites$MonitoringLocationName),"QA","Not QA")
prof_sites=subset(prof_sites, QA!="QA")
#prof_sites<<-prof_sites
# Extract profiles wide
profiles_wide=prof_asmnts_all$profiles_wide
profiles_wide=profiles_wide[profiles_wide$ActivityIdentifier %in% profiles_long$ActivityIdentifier,]
profiles_wide$ActivityStartDate=as.Date(profiles_wide$ActivityStartDate,format='%Y-%m-%d')
# Calc max depth for each profile
max_depth=aggregate(Depth_m~ActivityIdentifier,data=profiles_wide, FUN='max', na.rm=T)
names(max_depth)[names(max_depth)=="Depth_m"]="max_depth_m"
# Extract individual profile assessments
ind_prof_asmnts=prof_asmnts_all$profile_asmnts_individual
ind_prof_asmnts=ind_prof_asmnts[ind_prof_asmnts$ActivityIdentifier %in% profiles_long$ActivityIdentifier,]
ind_prof_asmnts$ActivityStartDate=as.Date(ind_prof_asmnts$ActivityStartDate,format='%Y-%m-%d')
ind_prof_asmnts=merge(ind_prof_asmnts,max_depth,all.x=T)
ind_prof_asmnts=within(ind_prof_asmnts,{
ph_pct_exc=pH_exc_cnt/samp_count*100
temp_pct_exc=temp_exc_cnt/samp_count*100
do_pct_exc=do_exc_cnt/samp_count*100
})
# Extract mlid/param level assessments
mlid_param_asmnts=prof_asmnts_all$profile_asmnts_mlid_param
mlid_param_asmnts=mlid_param_asmnts[,!names(mlid_param_asmnts) %in% c("IR_Lat","IR_Long","IR_MLNAME")]
names(mlid_param_asmnts)[names(mlid_param_asmnts)=='IR_Cat']='prelim_asmnt'
# Empty reactive values object
reactive_objects=reactiveValues()
# Select map set up
map = leaflet::createLeafletMap(session, 'map')
session$onFlushed(once = T, function() {
output$map <- leaflet::renderLeaflet({
buildMap(sites=prof_sites, plot_polys=TRUE, au_poly=lake_aus) %>% leaflet::showGroup('Assessment units') %>% leaflet::clearControls() %>% addMapPane("highlight", zIndex = 418)
})
})
# Table interface
output$table_input=DT::renderDataTable({
DT::datatable(mlid_param_asmnts, selection='single', rownames=FALSE, filter="top",
options = list(scrollY = '600px', paging = FALSE, scrollX=TRUE, dom="ltipr"#,
#searchCols = list(NULL,list(search=paste(reactive_objects$sel_mlid)))
)
)
})
# Map marker click (to identify selected site)
observeEvent(input$map_marker_click, {
req(profiles_long)
site_click <- input$map_marker_click
if (is.null(site_click)){return()}
siteid=site_click$id
reactive_objects$sel_mlid=siteid
reactive_objects$selected_au=as.character(mlid_param_asmnts[mlid_param_asmnts$IR_MLID==siteid,"ASSESS_ID"][1])
})
# Highlight selected marker
observeEvent(reactive_objects$sel_mlid, {
map_proxy %>%
clearGroup(group='highlight') %>%
addCircleMarkers(data=subset(prof_sites, MonitoringLocationIdentifier %in% reactive_objects$sel_mlid), lat=~LatitudeMeasure, lng=~LongitudeMeasure,
group='highlight', options = pathOptions(pane = "highlight"), radius = 20, color='chartreuse', opacity = 0.75, fillOpacity = 0.4)
})
# Map AU click (to identify selected AU and site)
observeEvent(input$map_shape_click,{
au_click = input$map_shape_click$id
if(!is.null(au_click)){
reactive_objects$selected_au=as.character(unique(au_poly$ASSESS_ID[au_poly$polyID==au_click]))
if(is.null(reactive_objects$sel_mlid)){
reactive_objects$sel_mlid=mlid_param_asmnts[mlid_param_asmnts$ASSESS_ID==reactive_objects$selected_au,"IR_MLID"][1]
}else{
if((!prof_sites[prof_sites$MonitoringLocationIdentifier==reactive_objects$sel_mlid,"ASSESS_ID"][1]==reactive_objects$selected_au) | (is.na(reactive_objects$sel_mlid))){
reactive_objects$sel_mlid=mlid_param_asmnts[mlid_param_asmnts$ASSESS_ID==reactive_objects$selected_au,"IR_MLID"][1]
}
}
}
})
# Table row click (to identify selected site & parameter)
observe({
req(input$table_input_rows_selected)
row_click=input$table_input_rows_selected
siteid=mlid_param_asmnts[row_click,"IR_MLID"]
reactive_objects$sel_param=mlid_param_asmnts[row_click,"R3172ParameterName"]
reactive_objects$sel_mlid=siteid
reactive_objects$selected_au=as.character(mlid_param_asmnts[mlid_param_asmnts$IR_MLID==siteid,"ASSESS_ID"][1])
})
# Change map zoom on table click & update selected heatmap_param to selected row param
map_proxy=leaflet::leafletProxy("map")
observeEvent(input$table_input_rows_selected,{
lat=prof_sites[prof_sites$MonitoringLocationIdentifier==reactive_objects$sel_mlid,"LatitudeMeasure"]
long=prof_sites[prof_sites$MonitoringLocationIdentifier==reactive_objects$sel_mlid,"LongitudeMeasure"]
map_proxy %>% leaflet::setView(lng=long, lat=lat, zoom=12)
#updateSelectInput(session, "heatmap_param",selected=reactive_objects$sel_param)
})
# Select profiles & date options based on selected site ID
observe({
req(reactive_objects$sel_mlid)
reactive_objects$sel_profiles=profiles_long[profiles_long$MonitoringLocationIdentifier==reactive_objects$sel_mlid,]
profile_dates=unique(reactive_objects$sel_profiles$ActivityStartDate)
profile_dates=profile_dates[order(profile_dates)]
reactive_objects$profile_dates=profile_dates
})
## Filter table to match clicked site from map
#input_table_proxy = DT::dataTableProxy('table_input')
#observeEvent(reactive_objects$sel_mlid,{
# search=as.character(reactive_objects$sel_mlid)
# input_table_proxy %>% DT::clearSearch() %>% DT::updateSearch(keywords = list(global = NULL, columns=c("","",search)))
#})
# Profile date selection
output$date_select <- renderUI({
req(reactive_objects$profile_dates)
selectInput("date_select", "Profile date:", reactive_objects$profile_dates)
})
output$date_slider <- renderUI({
req(reactive_objects$profile_dates)
date_min=min(reactive_objects$profile_dates)
date_max=max(reactive_objects$profile_dates)
sliderInput("date_slider", "Date range:", min=date_min, max=date_max, value=c(date_min,date_max))
})
# Generate selected aid
observe({
req(input$date_select)
reactive_objects$selectedActID=reactive_objects$sel_profiles[reactive_objects$sel_profiles$ActivityStartDate==input$date_select,"ActivityIdentifier"][1]
})
# Profile plot output
output$ind_prof_plot=renderPlot({
req(reactive_objects$sel_profiles,reactive_objects$selectedActID)
one_profile=reactive_objects$sel_profiles[reactive_objects$sel_profiles$ActivityIdentifier==reactive_objects$selectedActID,]
do_crit=one_profile[one_profile$R3172ParameterName=="Dissolved oxygen (DO)","NumericCriterion"][1]
temp_crit=one_profile[one_profile$R3172ParameterName=="Temperature, water","NumericCriterion"][1]
one_profile=unique(one_profile[,c("DataLoggerLine","ActivityIdentifier","ActivityStartDate","R3172ParameterName","IR_Value","IR_Unit","MonitoringLocationIdentifier")])
profilePlot(one_profile, parameter = "R3172ParameterName",
units = "IR_Unit",
depth = "Profile depth", do = "Dissolved oxygen (DO)",
temp = "Temperature, water", pH = "pH",
value_var = "IR_Value", line_no = "DataLoggerLine",
pH_crit=c(6.5,9), do_crit=do_crit, temp_crit=temp_crit)
box()
})
# Data table output
observe({
req(reactive_objects$selectedActID)
table_data=profiles_wide[profiles_wide$ActivityIdentifier==reactive_objects$selectedActID,c("IR_MLID","ActivityStartDate","Depth_m","DO_mgL","pH","Temp_degC","do_exc","pH_exc","temp_exc")]
reactive_objects$table_data=table_data[order(table_data$Depth_m),]
})
output$profile_table=DT::renderDataTable({
req(reactive_objects$table_data)
DT::datatable(reactive_objects$table_data, selection='multiple',
options = list(scrollY = '500px', paging = FALSE, scrollX = TRUE, searching=F, digits = 3)
) %>%
DT::formatRound(columns=c('Depth_m','DO_mgL','pH','Temp_degC'), digits=3) %>%
DT::formatStyle("DO_mgL", "do_exc", backgroundColor = DT::styleEqual(1, "orange")) %>%
DT::formatStyle("pH", "pH_exc", backgroundColor = DT::styleEqual(1, "orange")) %>%
DT::formatStyle("Temp_degC", "temp_exc", backgroundColor = DT::styleEqual(1, "orange"))
})
prof_table_proxy = DT::dataTableProxy('profile_table')
observe({
prof_table_proxy %>% DT::hideCols(hide=which(names(reactive_objects$table_data) %in% c("do_exc","pH_exc","temp_exc")))
})
# Extract profile assessments & profiles_wide for selected site
observe({
req(reactive_objects$sel_mlid,input$date_slider)
selected_prof_asmnts=ind_prof_asmnts[
ind_prof_asmnts$IR_MLID == reactive_objects$sel_mlid &
ind_prof_asmnts$ActivityStartDate>=input$date_slider[1] &
ind_prof_asmnts$ActivityStartDate<=input$date_slider[2]
,]
selected_prof_asmnts=selected_prof_asmnts[order(selected_prof_asmnts$ActivityStartDate),]
reactive_objects$selected_prof_asmnts=selected_prof_asmnts
reactive_objects$sel_profs_wide=profiles_wide[
profiles_wide$IR_MLID == reactive_objects$sel_mlid &
profiles_wide$ActivityStartDate>=input$date_slider[1] &
profiles_wide$ActivityStartDate<=input$date_slider[2]
,]
})
# Hab width plot output
output$hab_width=renderPlot({
req(reactive_objects$selected_prof_asmnts)
if(dim(reactive_objects$selected_prof_asmnts)[1]>0){
par(mar=c(7.1,5.1,7.1,2.1))
plot(max_hab_width~ActivityStartDate, data=reactive_objects$selected_prof_asmnts, pch=NA, cex=1.5, ylab="Width (m)", xlab="", cex.axis=1.25, cex.lab=1.5, xaxt='n',
ylim=c(0,max(reactive_objects$selected_prof_asmnts$max_depth_m,na.rm=T))
)
abline(h=3,lty=3,lwd=2,col="red")
if(input$show_dates){
axis(1, at=unique(reactive_objects$selected_prof_asmnts$ActivityStartDate), labels=unique(as.Date(reactive_objects$selected_prof_asmnts$ActivityStartDate)), par(las=2))
}else{
axis.Date(1, reactive_objects$selected_prof_asmnts$ActivityStartDate)
}
points(max_depth_m~ActivityStartDate, data=reactive_objects$selected_prof_asmnts, type='l',lty=2,lwd=2,col="blue")
points(max_hab_width~ActivityStartDate, data=reactive_objects$selected_prof_asmnts, type='b', pch=21, cex=1.5, bg="grey", cex.axis=1.25, cex.lab=1.5)
par(xpd=TRUE)
legend("topleft", inset=c(0.05,-0.3), bty='n', pch=c(NA,21),pt.bg=c(NA,'grey'),lty=c(2,1),col=c("blue","black"),lwd=c(2,1),cex=1.5, legend=c("Max depth","DO/temp habitat"))
par(xpd=FALSE)
}
})
# pct exceedance plot
output$pct_exc=renderPlot({
req(reactive_objects$selected_prof_asmnts)
if(dim(reactive_objects$selected_prof_asmnts)[1]>0){
ymax=max(5,max(max(reactive_objects$selected_prof_asmnts$do_pct_exc, na.rm=T),max(reactive_objects$selected_prof_asmnts$temp_pct_exc, na.rm=T),max(reactive_objects$selected_prof_asmnts$ph_pct_exc, na.rm=T))*1.1)
par(mar=c(7.1,5.1,7.1,2.1))
plot(do_pct_exc~ActivityStartDate, data=reactive_objects$selected_prof_asmnts,ylim=c(0,ymax), pch=24, bg="deepskyblue3", type='b', ylab="% exceedance", cex=1.5, xlab="", xaxt='n')
points(temp_pct_exc~ActivityStartDate, data=reactive_objects$selected_prof_asmnts, pch=21, bg="orange", type='b', cex=1.5)
points(ph_pct_exc~ActivityStartDate, data=reactive_objects$selected_prof_asmnts, pch=22, bg="green", type='b', cex=1.5)
if(input$show_dates){
axis(1, at=unique(reactive_objects$selected_prof_asmnts$ActivityStartDate), labels=unique(as.Date(reactive_objects$selected_prof_asmnts$ActivityStartDate)), par(las=2))
}else{
axis.Date(1, reactive_objects$selected_prof_asmnts$ActivityStartDate)
}
par(xpd=TRUE)
legend("topleft", inset=c(0.05,-0.3), bty='n',horiz=T,
legend=c("Dissolved oxygen","Temperature","pH"),
pch=c(24,21,22), pt.bg=c("deepskyblue3","orange","green"), cex=1.5)
par(xpd=FALSE)
}
})
## Profile heatmap plot
#output$heatmap=renderPlot({
# req(reactive_objects$sel_profs_wide, reactive_objects$sel_profiles)
# if(dim(reactive_objects$sel_profs_wide)[1]>0){
# if(length(unique(reactive_objects$sel_profs_wide$ActivityStartDate))==1 | dim(reactive_objects$sel_profs_wide)[1]<=2){
# plot.new()
# text(0.5,0.5,"Cannot interpolate. See individual profiles.")
# box()
# }else{
# # Define heatmap inputs based on selected parameter
# if(input$heatmap_param=="Dissolved oxygen (DO)"){
# name="Dissolved oxygen (DO)"
# parameter="DO_mgL"
# param_units="mg/L"
# param_lab="Dissolved oxygen"
# }
# if(input$heatmap_param=="pH"){
# name="pH"
# parameter="pH"
# param_units=""
# param_lab="pH"
# }
# if(input$heatmap_param=="Temperature, water"){
# name="Temperature, water"
# parameter="Temp_degC"
# param_units="deg C"
# param_lab="Temperature"
# }
# if(input$heatmap_param=="DO-temperature habitat profile width"){
# name="DO/temperature lens"
# parameter="do_temp_exc"
# param_units=""
# param_lab="DO/temp exc."
# }
# # Define criteria
# if(input$heatmap_param!="DO-temperature habitat profile width"){
# criteria=unique(reactive_objects$sel_profiles[reactive_objects$sel_profiles$R3172ParameterName==name,"NumericCriterion"])
# }else{criteria=1}
# # heat map
# if(input$show_dates){show_dates=TRUE}else{show_dates=FALSE}
# profileHeatMap(reactive_objects$sel_profs_wide,parameter=parameter,param_units=param_units,param_lab=param_lab,depth="Depth_m",depth_units="m",criteria=criteria,show_dates=show_dates)
# }
# }
#})
# Trophic indicators tab
## Extract trophic data
observeEvent(reactive_objects$selected_au, ignoreInit=T, {
#reactive_objects=list()
#reactive_objects$selected_au='UT-L-16020201-004_01'
trophic_data_flat=trophic_data[trophic_data$ASSESS_ID==reactive_objects$selected_au,]
trophic_data_flat$TSI[trophic_data_flat$TSI<1]=1
trophic_data_flat$TSI[trophic_data_flat$TSI>100]=100
trophic_data_flat$year=lubridate::year(trophic_data_flat$ActivityStartDate)
trophic_data_flat$month=lubridate::month(trophic_data_flat$ActivityStartDate)
tsi_wide=reshape2::dcast(trophic_data_flat, MonitoringLocationIdentifier+ActivityStartDate+AU_NAME+ASSESS_ID~CharacteristicName, value.var='TSI', fun.aggregate=mean, na.rm=T)
reactive_objects$trophic_data_flat=trophic_data_flat
reactive_objects$tsi_wide=dplyr::rename(tsi_wide, TSIchl='Chlorophyll a', TSIsd='Depth, Secchi disk depth', TSItp='Phosphate-phosphorus')
})
## 3D TSI plot
### plot function
plot3dTSI=function(data, title){
data=na.omit(data)
ChlA_Secchi=data$TSIchl-data$TSIsd
ChlA_TP=data$TSIchl-data$TSItp
plot(NA,NA,xaxt='n',yaxt='n',ylim=c(-100,100),xlim=c(-100,100),ylab="",xlab="",bty="n",main=title,cex.axis=2,cex.lab=1.5)
axis(1,at=c(-50,50),pos=0)
axis(1,at=seq(-75,75,25),pos=0,labels=F)
axis(2,at=c(-50,50),pos=0,las=1)
axis(2,at=seq(-75,75,25),pos=0,las=1,labels=F)
axis(2,at=seq(-75,75,25),pos=0,las=1,labels=F)
segments(-55,-55,55,55,lty=2,lwd=2,col="dimgrey")
arrows(40,-40,52,-52,lwd=2,col="dimgrey",length=0.125)
arrows(-40,40,-52,52,lwd=2,col="dimgrey",length=0.125)
rect(-100,-100,100,100)
points(ChlA_Secchi,ChlA_TP,pch=21,col="black",bg="orange",cex=3)
#points(mean(ChlA_Secchi),mean(ChlA_TP),pch="+",col="darkgreen",cex=5)
par(xpd=NA)
text(x=-75,y=110,"Small particulates")
arrows(-15,110,-45,110,lwd=2,col="dimgrey",length=0.125)
text(x=75,y=110,"Large particulates")
arrows(15,110,45,110,lwd=2,col="dimgrey",length=0.125)
text(x=-50,y=-110,"TSI ChlA < TSI Secchi")
text(y=-125,x=0,"TSI ChlA - TSI Secchi", cex=1.5)
text(x=-125,y=0,"TSI ChlA - TSI TP", cex=1.5,srt=90)
text(x=50,y=-110,"TSI ChlA > TSI Secchi")
text(x=-110,y=-50,"TSI ChlA < TSI TP",srt=90)
text(x=-110,y=50,"TSI ChlA > TSI TP",srt=90)
text(x=50,y=-90,"Zooplankton grazing")
text(x=-50,y=90,"Dissolved color/clay particles")
text(x=57,y=-57,"TSI Secchi < TSI TP",srt=45)
text(x=-57,y=57,"TSI Secchi > TSI TP",srt=45)
text(x=-95,y=-65,"P surplus",srt=90)
text(x=-95,y=65,"P limitation",srt=90)
text(x=40,y=40,adj=c(0,-0.5),"TSI Secchi = TSI TP",srt=45)
arrows(-95,-15,-95,-45,lwd=2,col="dimgrey",length=0.125)
arrows(-95,15,-95,45,lwd=2,col="dimgrey",length=0.125)
}
output$tsi3d=renderPlot({
req(reactive_objects$tsi_wide)
plot3dTSI(reactive_objects$tsi_wide, title=reactive_objects$tsi_wide$AU_NAME[1])
})
output$tsi_boxplot=renderPlotly({
req(reactive_objects$trophic_data_flat)
title=reactive_objects$trophic_data_flat$AU_NAME[1]
au_vis=as.list(append(T, rep(F, length(unique(reactive_objects$trophic_data_flat$MonitoringLocationIdentifier)))))
site_vis=as.list(append(F, rep(T, length(unique(reactive_objects$trophic_data_flat$MonitoringLocationIdentifier)))))
plot_ly(data=reactive_objects$trophic_data_flat) %>%
add_trace(type = 'box', y = ~TSI, x=~CharacteristicName, visible=T, name='TSI') %>%
add_trace(type = 'box', y = ~TSI, x=~CharacteristicName, color=~MonitoringLocationIdentifier, visible=F) %>%
layout(title = title,
boxmode = "group",
yaxis = list(side = 'left', title = 'TSI'),
xaxis = list(title = ''),
updatemenus = list(
list(
buttons = list(
list(method = "update", label='Group to AU',
args = list(list(visible = au_vis))
),
list(method = "update", label='Split by site',
args = list(list(visible = site_vis))
)
)
)
)
) %>%
config(displaylogo = FALSE,
modeBarButtonsToRemove = c(
'sendDataToCloud',
'select2d',
'lasso2d'
)
)
})
output$tsi_timeseries=renderPlotly({
req(reactive_objects$trophic_data_flat)
title=reactive_objects$trophic_data_flat$AU_NAME[1]
param_length=length(unique(reactive_objects$trophic_data_flat$CharacteristicName))
year_vis=as.list(append(rep(T, param_length), rep(F, param_length)))
month_vis=as.list(append(rep(F, param_length), rep(T, param_length)))
plot_ly(data=reactive_objects$trophic_data_flat) %>%
add_trace(type = 'box', y = ~TSI, x=~year, color=~CharacteristicName, visible=T) %>%
add_trace(type = 'box', y = ~TSI, x=~month, color=~CharacteristicName, visible=F) %>%
layout(title = title,
boxmode = "group",
yaxis = list(side = 'left', title = 'TSI'),
xaxis = list(title = 'Year'),
updatemenus = list(
list(
buttons = list(
list(method = "update", label='Year',
args = list(list(visible = year_vis),
list(xaxis = list(title = 'Year'))
)
),
list(method = "update", label='Month',
args = list(list(visible = month_vis),
list(xaxis = list(title = 'Month'))
)
)
)
)
)
) %>%
config(displaylogo = FALSE,
modeBarButtonsToRemove = c(
'sendDataToCloud',
'select2d',
'lasso2d'
)
)
})
}
## run app
shinyApp(ui = ui, server = server)
|
/inst/lakeDashboard/app.r
|
permissive
|
utah-dwq/irTools
|
R
| false
| false
| 24,481
|
r
|
### Utah DWQ Lake Dashboard
### Jake Vander Laan, Utah DWQ, jvander@utah.gov
### Version 3.0 2022 including 2022 IR data
library(wqTools)
library(leaflet)
library(plotly)
#heatmap_param_choices=c("Dissolved oxygen (DO)","Temperature, water","pH","DO-temperature habitat profile width")
#names(heatmap_param_choices)=c("Dissolved oxygen", "Temperature", "pH", "DO/temperature lens")
ui <-fluidPage(
tags$head(
tags$script(src="https://cdnjs.cloudflare.com/ajax/libs/iframe-resizer/3.5.16/iframeResizer.contentWindow.min.js",
type="text/javascript")
),
# Header
headerPanel(
title=tags$a(href='https://deq.utah.gov/division-water-quality/',tags$img(src='deq_dwq_logo.png', height = 75, width = 75*2.85), target="_blank"),
tags$head(tags$link(rel = "icon", type = "image/png", href = "dwq_logo_small.png"), windowTitle="Lake profile dashboard")
),
# Input widgets
fluidRow(column(5),column(7,
actionButton('help', 'User guide', icon=icon('question'), onclick ="window.open('https://bookdown.org/jakevl/user_guide/user_guide.html', '_blank')",
style='color: #fff; background-color: #337ab7; border-color: #2e6da4%')
)),
br(),
fluidRow(
column(5,
conditionalPanel(condition="input.plot_tabs!='User guide'",
tabsetPanel(id="ui_tab",
tabPanel("Map",
column(12,h4("Click a site or AU"),shinycssloaders::withSpinner(leaflet::leafletOutput("map", height="600px"),size=2, color="#0080b7"))
),
tabPanel("Table",
column(12, h4("Click a row"), div(DT::dataTableOutput("table_input"), style = "font-size:70%"))
)
)
),
conditionalPanel(condition="input.plot_tabs=='User guide'",
column(12)
)
),
column(7,tabsetPanel(id="plot_tabs",
tabPanel("Profile time series",
fluidRow(column(8,
uiOutput("date_slider"),
radioButtons("ts_plot_type","Plot type:", choices=c("Habitable width", "Water column exceedances"), inline=T),
#conditionalPanel(condition="input.ts_plot_type=='Heatmap'",
# selectInput("heatmap_param",label="Heatmap parameter:",choices=heatmap_param_choices)
#),
checkboxInput("show_dates", label="Show all profile dates", value=TRUE),
#conditionalPanel(condition="input.ts_plot_type=='Heatmap'",
# plotOutput("heatmap")
#),
conditionalPanel(condition="input.ts_plot_type=='Habitable width'",
plotOutput("hab_width")
),
conditionalPanel(condition="input.ts_plot_type=='Water column exceedances'",
plotOutput("pct_exc")
)
))
),
tabPanel("Individual profiles",
fluidRow(
column(4, uiOutput("date_select"))
),
fluidRow(
column(4,h4("Profile plot"),plotOutput("ind_prof_plot", height="500px")),
column(8,h4("Profile data"),div(DT::dataTableOutput("profile_table"), style = "font-size:80%"))
)
),
tabPanel("Trophic indicators",
shinyWidgets::radioGroupButtons('trophic_type', 'Plot type:', choices=c('Time series','Boxplot','Scatter plot'), checkIcon = list(yes = icon("check"))),
conditionalPanel(condition="input.trophic_type=='Time series'",
plotlyOutput('tsi_timeseries', height="600px", width="1100px")
),
conditionalPanel(condition="input.trophic_type=='Boxplot'",
plotlyOutput('tsi_boxplot', height="600px", width="900px")
),
conditionalPanel(condition="input.trophic_type=='Scatter plot'",
plotOutput('tsi3d', height="600px", width="600px")
)
)
))
)
)
server <- function(input, output, session){
options(warn=-1)
# Loading modal to keep user out of trouble while map draws...
showModal(modalDialog(title="MAP LOADING - PLEASE WAIT...","Please wait for map to draw before proceeding.",size="l",footer=NULL))
# Remove modal when app is ready
observe({
req(map,mlid_param_asmnts)
removeModal()
})
# Load data
load("data/lake_data.Rdata")
# Subset polygons to lake polygons
au_poly=wqTools::au_poly
lake_aus=au_poly[au_poly$AU_Type=="Reservoir/Lake",]
# Extract site locations
prof_sites=unique(prof_asmnts_all$profile_asmnts_mlid_param[,c("ASSESS_ID","AU_NAME","IR_MLID","IR_MLNAME","IR_Lat","IR_Long")])
prof_sites$MonitoringLocationTypeName="Lake/Reservoir"
prof_sites=dplyr::rename(prof_sites, LatitudeMeasure="IR_Lat", LongitudeMeasure="IR_Long", MonitoringLocationIdentifier= "IR_MLID",MonitoringLocationName="IR_MLNAME")
prof_sites$LatitudeMeasure=wqTools::facToNum(prof_sites$LatitudeMeasure)
prof_sites$LongitudeMeasure=wqTools::facToNum(prof_sites$LongitudeMeasure)
# Extract profiles long
profiles_long=prof_asmnts_all$profiles_long
### Delete when data are fixed
profiles_long=subset(profiles_long, !(ASSESS_ID %in% c("UT-L-16020201-004_01", "UT-L-16020201-004_02") & (ActivityStartDate >= "2021-03-15") | ActivityStartDate=="2019-11-04" | ActivityStartDate=="2018-10-10")) ###drop faulty Utah Lake profiles
###
profiles_long$MonitoringLocationIdentifier=profiles_long$IR_MLID
profiles_long=unique(profiles_long[,c("DataLoggerLine","ActivityIdentifier","ActivityStartDate","R3172ParameterName","IR_Value","IR_Unit","NumericCriterion","MonitoringLocationIdentifier")])
profiles_long$ActivityStartDate=as.Date(profiles_long$ActivityStartDate,format='%Y-%m-%d')
# Remove profiles where depths are not provided
depths=profiles_long[profiles_long$R3172ParameterName=="Profile depth",]
depth_actids=unique(depths$ActivityIdentifier)
profiles_long=profiles_long[profiles_long$ActivityIdentifier %in% depth_actids,]
# Remove any sites that do not produce any valid profiles
prof_sites=prof_sites[prof_sites$MonitoringLocationIdentifier %in% profiles_long$MonitoringLocationIdentifier,]
prof_sites$QA=ifelse(grepl("Duplicate",prof_sites$MonitoringLocationName) | grepl("Replicate",prof_sites$MonitoringLocationName) | grepl("Dummy",prof_sites$MonitoringLocationName) |
grepl("replaced",prof_sites$MonitoringLocationName) | grepl("Blank",prof_sites$MonitoringLocationName) | grepl("QA",prof_sites$MonitoringLocationName) |
grepl("QC",prof_sites$MonitoringLocationName),"QA","Not QA")
prof_sites=subset(prof_sites, QA!="QA")
#prof_sites<<-prof_sites
# Extract profiles wide
profiles_wide=prof_asmnts_all$profiles_wide
profiles_wide=profiles_wide[profiles_wide$ActivityIdentifier %in% profiles_long$ActivityIdentifier,]
profiles_wide$ActivityStartDate=as.Date(profiles_wide$ActivityStartDate,format='%Y-%m-%d')
# Calc max depth for each profile
max_depth=aggregate(Depth_m~ActivityIdentifier,data=profiles_wide, FUN='max', na.rm=T)
names(max_depth)[names(max_depth)=="Depth_m"]="max_depth_m"
# Extract individual profile assessments
ind_prof_asmnts=prof_asmnts_all$profile_asmnts_individual
ind_prof_asmnts=ind_prof_asmnts[ind_prof_asmnts$ActivityIdentifier %in% profiles_long$ActivityIdentifier,]
ind_prof_asmnts$ActivityStartDate=as.Date(ind_prof_asmnts$ActivityStartDate,format='%Y-%m-%d')
ind_prof_asmnts=merge(ind_prof_asmnts,max_depth,all.x=T)
ind_prof_asmnts=within(ind_prof_asmnts,{
ph_pct_exc=pH_exc_cnt/samp_count*100
temp_pct_exc=temp_exc_cnt/samp_count*100
do_pct_exc=do_exc_cnt/samp_count*100
})
# Extract mlid/param level assessments
mlid_param_asmnts=prof_asmnts_all$profile_asmnts_mlid_param
mlid_param_asmnts=mlid_param_asmnts[,!names(mlid_param_asmnts) %in% c("IR_Lat","IR_Long","IR_MLNAME")]
names(mlid_param_asmnts)[names(mlid_param_asmnts)=='IR_Cat']='prelim_asmnt'
# Empty reactive values object
reactive_objects=reactiveValues()
# Select map set up
map = leaflet::createLeafletMap(session, 'map')
session$onFlushed(once = T, function() {
output$map <- leaflet::renderLeaflet({
buildMap(sites=prof_sites, plot_polys=TRUE, au_poly=lake_aus) %>% leaflet::showGroup('Assessment units') %>% leaflet::clearControls() %>% addMapPane("highlight", zIndex = 418)
})
})
# Table interface
output$table_input=DT::renderDataTable({
DT::datatable(mlid_param_asmnts, selection='single', rownames=FALSE, filter="top",
options = list(scrollY = '600px', paging = FALSE, scrollX=TRUE, dom="ltipr"#,
#searchCols = list(NULL,list(search=paste(reactive_objects$sel_mlid)))
)
)
})
# Map marker click (to identify selected site)
observeEvent(input$map_marker_click, {
req(profiles_long)
site_click <- input$map_marker_click
if (is.null(site_click)){return()}
siteid=site_click$id
reactive_objects$sel_mlid=siteid
reactive_objects$selected_au=as.character(mlid_param_asmnts[mlid_param_asmnts$IR_MLID==siteid,"ASSESS_ID"][1])
})
# Highlight selected marker
observeEvent(reactive_objects$sel_mlid, {
map_proxy %>%
clearGroup(group='highlight') %>%
addCircleMarkers(data=subset(prof_sites, MonitoringLocationIdentifier %in% reactive_objects$sel_mlid), lat=~LatitudeMeasure, lng=~LongitudeMeasure,
group='highlight', options = pathOptions(pane = "highlight"), radius = 20, color='chartreuse', opacity = 0.75, fillOpacity = 0.4)
})
# Map AU click (to identify selected AU and site)
observeEvent(input$map_shape_click,{
au_click = input$map_shape_click$id
if(!is.null(au_click)){
reactive_objects$selected_au=as.character(unique(au_poly$ASSESS_ID[au_poly$polyID==au_click]))
if(is.null(reactive_objects$sel_mlid)){
reactive_objects$sel_mlid=mlid_param_asmnts[mlid_param_asmnts$ASSESS_ID==reactive_objects$selected_au,"IR_MLID"][1]
}else{
if((!prof_sites[prof_sites$MonitoringLocationIdentifier==reactive_objects$sel_mlid,"ASSESS_ID"][1]==reactive_objects$selected_au) | (is.na(reactive_objects$sel_mlid))){
reactive_objects$sel_mlid=mlid_param_asmnts[mlid_param_asmnts$ASSESS_ID==reactive_objects$selected_au,"IR_MLID"][1]
}
}
}
})
# Table row click (to identify selected site & parameter)
observe({
req(input$table_input_rows_selected)
row_click=input$table_input_rows_selected
siteid=mlid_param_asmnts[row_click,"IR_MLID"]
reactive_objects$sel_param=mlid_param_asmnts[row_click,"R3172ParameterName"]
reactive_objects$sel_mlid=siteid
reactive_objects$selected_au=as.character(mlid_param_asmnts[mlid_param_asmnts$IR_MLID==siteid,"ASSESS_ID"][1])
})
# Change map zoom on table click & update selected heatmap_param to selected row param
map_proxy=leaflet::leafletProxy("map")
observeEvent(input$table_input_rows_selected,{
lat=prof_sites[prof_sites$MonitoringLocationIdentifier==reactive_objects$sel_mlid,"LatitudeMeasure"]
long=prof_sites[prof_sites$MonitoringLocationIdentifier==reactive_objects$sel_mlid,"LongitudeMeasure"]
map_proxy %>% leaflet::setView(lng=long, lat=lat, zoom=12)
#updateSelectInput(session, "heatmap_param",selected=reactive_objects$sel_param)
})
# Select profiles & date options based on selected site ID
observe({
req(reactive_objects$sel_mlid)
reactive_objects$sel_profiles=profiles_long[profiles_long$MonitoringLocationIdentifier==reactive_objects$sel_mlid,]
profile_dates=unique(reactive_objects$sel_profiles$ActivityStartDate)
profile_dates=profile_dates[order(profile_dates)]
reactive_objects$profile_dates=profile_dates
})
## Filter table to match clicked site from map
#input_table_proxy = DT::dataTableProxy('table_input')
#observeEvent(reactive_objects$sel_mlid,{
# search=as.character(reactive_objects$sel_mlid)
# input_table_proxy %>% DT::clearSearch() %>% DT::updateSearch(keywords = list(global = NULL, columns=c("","",search)))
#})
# Profile date selection
output$date_select <- renderUI({
req(reactive_objects$profile_dates)
selectInput("date_select", "Profile date:", reactive_objects$profile_dates)
})
output$date_slider <- renderUI({
req(reactive_objects$profile_dates)
date_min=min(reactive_objects$profile_dates)
date_max=max(reactive_objects$profile_dates)
sliderInput("date_slider", "Date range:", min=date_min, max=date_max, value=c(date_min,date_max))
})
# Generate selected aid
observe({
req(input$date_select)
reactive_objects$selectedActID=reactive_objects$sel_profiles[reactive_objects$sel_profiles$ActivityStartDate==input$date_select,"ActivityIdentifier"][1]
})
# Profile plot output
output$ind_prof_plot=renderPlot({
req(reactive_objects$sel_profiles,reactive_objects$selectedActID)
one_profile=reactive_objects$sel_profiles[reactive_objects$sel_profiles$ActivityIdentifier==reactive_objects$selectedActID,]
do_crit=one_profile[one_profile$R3172ParameterName=="Dissolved oxygen (DO)","NumericCriterion"][1]
temp_crit=one_profile[one_profile$R3172ParameterName=="Temperature, water","NumericCriterion"][1]
one_profile=unique(one_profile[,c("DataLoggerLine","ActivityIdentifier","ActivityStartDate","R3172ParameterName","IR_Value","IR_Unit","MonitoringLocationIdentifier")])
profilePlot(one_profile, parameter = "R3172ParameterName",
units = "IR_Unit",
depth = "Profile depth", do = "Dissolved oxygen (DO)",
temp = "Temperature, water", pH = "pH",
value_var = "IR_Value", line_no = "DataLoggerLine",
pH_crit=c(6.5,9), do_crit=do_crit, temp_crit=temp_crit)
box()
})
# Data table output
observe({
req(reactive_objects$selectedActID)
table_data=profiles_wide[profiles_wide$ActivityIdentifier==reactive_objects$selectedActID,c("IR_MLID","ActivityStartDate","Depth_m","DO_mgL","pH","Temp_degC","do_exc","pH_exc","temp_exc")]
reactive_objects$table_data=table_data[order(table_data$Depth_m),]
})
output$profile_table=DT::renderDataTable({
req(reactive_objects$table_data)
DT::datatable(reactive_objects$table_data, selection='multiple',
options = list(scrollY = '500px', paging = FALSE, scrollX = TRUE, searching=F, digits = 3)
) %>%
DT::formatRound(columns=c('Depth_m','DO_mgL','pH','Temp_degC'), digits=3) %>%
DT::formatStyle("DO_mgL", "do_exc", backgroundColor = DT::styleEqual(1, "orange")) %>%
DT::formatStyle("pH", "pH_exc", backgroundColor = DT::styleEqual(1, "orange")) %>%
DT::formatStyle("Temp_degC", "temp_exc", backgroundColor = DT::styleEqual(1, "orange"))
})
prof_table_proxy = DT::dataTableProxy('profile_table')
observe({
prof_table_proxy %>% DT::hideCols(hide=which(names(reactive_objects$table_data) %in% c("do_exc","pH_exc","temp_exc")))
})
# Extract profile assessments & profiles_wide for selected site
observe({
req(reactive_objects$sel_mlid,input$date_slider)
selected_prof_asmnts=ind_prof_asmnts[
ind_prof_asmnts$IR_MLID == reactive_objects$sel_mlid &
ind_prof_asmnts$ActivityStartDate>=input$date_slider[1] &
ind_prof_asmnts$ActivityStartDate<=input$date_slider[2]
,]
selected_prof_asmnts=selected_prof_asmnts[order(selected_prof_asmnts$ActivityStartDate),]
reactive_objects$selected_prof_asmnts=selected_prof_asmnts
reactive_objects$sel_profs_wide=profiles_wide[
profiles_wide$IR_MLID == reactive_objects$sel_mlid &
profiles_wide$ActivityStartDate>=input$date_slider[1] &
profiles_wide$ActivityStartDate<=input$date_slider[2]
,]
})
# Hab width plot output
output$hab_width=renderPlot({
req(reactive_objects$selected_prof_asmnts)
if(dim(reactive_objects$selected_prof_asmnts)[1]>0){
par(mar=c(7.1,5.1,7.1,2.1))
plot(max_hab_width~ActivityStartDate, data=reactive_objects$selected_prof_asmnts, pch=NA, cex=1.5, ylab="Width (m)", xlab="", cex.axis=1.25, cex.lab=1.5, xaxt='n',
ylim=c(0,max(reactive_objects$selected_prof_asmnts$max_depth_m,na.rm=T))
)
abline(h=3,lty=3,lwd=2,col="red")
if(input$show_dates){
axis(1, at=unique(reactive_objects$selected_prof_asmnts$ActivityStartDate), labels=unique(as.Date(reactive_objects$selected_prof_asmnts$ActivityStartDate)), par(las=2))
}else{
axis.Date(1, reactive_objects$selected_prof_asmnts$ActivityStartDate)
}
points(max_depth_m~ActivityStartDate, data=reactive_objects$selected_prof_asmnts, type='l',lty=2,lwd=2,col="blue")
points(max_hab_width~ActivityStartDate, data=reactive_objects$selected_prof_asmnts, type='b', pch=21, cex=1.5, bg="grey", cex.axis=1.25, cex.lab=1.5)
par(xpd=TRUE)
legend("topleft", inset=c(0.05,-0.3), bty='n', pch=c(NA,21),pt.bg=c(NA,'grey'),lty=c(2,1),col=c("blue","black"),lwd=c(2,1),cex=1.5, legend=c("Max depth","DO/temp habitat"))
par(xpd=FALSE)
}
})
# pct exceedance plot
output$pct_exc=renderPlot({
req(reactive_objects$selected_prof_asmnts)
if(dim(reactive_objects$selected_prof_asmnts)[1]>0){
ymax=max(5,max(max(reactive_objects$selected_prof_asmnts$do_pct_exc, na.rm=T),max(reactive_objects$selected_prof_asmnts$temp_pct_exc, na.rm=T),max(reactive_objects$selected_prof_asmnts$ph_pct_exc, na.rm=T))*1.1)
par(mar=c(7.1,5.1,7.1,2.1))
plot(do_pct_exc~ActivityStartDate, data=reactive_objects$selected_prof_asmnts,ylim=c(0,ymax), pch=24, bg="deepskyblue3", type='b', ylab="% exceedance", cex=1.5, xlab="", xaxt='n')
points(temp_pct_exc~ActivityStartDate, data=reactive_objects$selected_prof_asmnts, pch=21, bg="orange", type='b', cex=1.5)
points(ph_pct_exc~ActivityStartDate, data=reactive_objects$selected_prof_asmnts, pch=22, bg="green", type='b', cex=1.5)
if(input$show_dates){
axis(1, at=unique(reactive_objects$selected_prof_asmnts$ActivityStartDate), labels=unique(as.Date(reactive_objects$selected_prof_asmnts$ActivityStartDate)), par(las=2))
}else{
axis.Date(1, reactive_objects$selected_prof_asmnts$ActivityStartDate)
}
par(xpd=TRUE)
legend("topleft", inset=c(0.05,-0.3), bty='n',horiz=T,
legend=c("Dissolved oxygen","Temperature","pH"),
pch=c(24,21,22), pt.bg=c("deepskyblue3","orange","green"), cex=1.5)
par(xpd=FALSE)
}
})
## Profile heatmap plot
#output$heatmap=renderPlot({
# req(reactive_objects$sel_profs_wide, reactive_objects$sel_profiles)
# if(dim(reactive_objects$sel_profs_wide)[1]>0){
# if(length(unique(reactive_objects$sel_profs_wide$ActivityStartDate))==1 | dim(reactive_objects$sel_profs_wide)[1]<=2){
# plot.new()
# text(0.5,0.5,"Cannot interpolate. See individual profiles.")
# box()
# }else{
# # Define heatmap inputs based on selected parameter
# if(input$heatmap_param=="Dissolved oxygen (DO)"){
# name="Dissolved oxygen (DO)"
# parameter="DO_mgL"
# param_units="mg/L"
# param_lab="Dissolved oxygen"
# }
# if(input$heatmap_param=="pH"){
# name="pH"
# parameter="pH"
# param_units=""
# param_lab="pH"
# }
# if(input$heatmap_param=="Temperature, water"){
# name="Temperature, water"
# parameter="Temp_degC"
# param_units="deg C"
# param_lab="Temperature"
# }
# if(input$heatmap_param=="DO-temperature habitat profile width"){
# name="DO/temperature lens"
# parameter="do_temp_exc"
# param_units=""
# param_lab="DO/temp exc."
# }
# # Define criteria
# if(input$heatmap_param!="DO-temperature habitat profile width"){
# criteria=unique(reactive_objects$sel_profiles[reactive_objects$sel_profiles$R3172ParameterName==name,"NumericCriterion"])
# }else{criteria=1}
# # heat map
# if(input$show_dates){show_dates=TRUE}else{show_dates=FALSE}
# profileHeatMap(reactive_objects$sel_profs_wide,parameter=parameter,param_units=param_units,param_lab=param_lab,depth="Depth_m",depth_units="m",criteria=criteria,show_dates=show_dates)
# }
# }
#})
# Trophic indicators tab
## Extract trophic data
observeEvent(reactive_objects$selected_au, ignoreInit=T, {
#reactive_objects=list()
#reactive_objects$selected_au='UT-L-16020201-004_01'
trophic_data_flat=trophic_data[trophic_data$ASSESS_ID==reactive_objects$selected_au,]
trophic_data_flat$TSI[trophic_data_flat$TSI<1]=1
trophic_data_flat$TSI[trophic_data_flat$TSI>100]=100
trophic_data_flat$year=lubridate::year(trophic_data_flat$ActivityStartDate)
trophic_data_flat$month=lubridate::month(trophic_data_flat$ActivityStartDate)
tsi_wide=reshape2::dcast(trophic_data_flat, MonitoringLocationIdentifier+ActivityStartDate+AU_NAME+ASSESS_ID~CharacteristicName, value.var='TSI', fun.aggregate=mean, na.rm=T)
reactive_objects$trophic_data_flat=trophic_data_flat
reactive_objects$tsi_wide=dplyr::rename(tsi_wide, TSIchl='Chlorophyll a', TSIsd='Depth, Secchi disk depth', TSItp='Phosphate-phosphorus')
})
## 3D TSI plot
### plot function
plot3dTSI=function(data, title){
data=na.omit(data)
ChlA_Secchi=data$TSIchl-data$TSIsd
ChlA_TP=data$TSIchl-data$TSItp
plot(NA,NA,xaxt='n',yaxt='n',ylim=c(-100,100),xlim=c(-100,100),ylab="",xlab="",bty="n",main=title,cex.axis=2,cex.lab=1.5)
axis(1,at=c(-50,50),pos=0)
axis(1,at=seq(-75,75,25),pos=0,labels=F)
axis(2,at=c(-50,50),pos=0,las=1)
axis(2,at=seq(-75,75,25),pos=0,las=1,labels=F)
axis(2,at=seq(-75,75,25),pos=0,las=1,labels=F)
segments(-55,-55,55,55,lty=2,lwd=2,col="dimgrey")
arrows(40,-40,52,-52,lwd=2,col="dimgrey",length=0.125)
arrows(-40,40,-52,52,lwd=2,col="dimgrey",length=0.125)
rect(-100,-100,100,100)
points(ChlA_Secchi,ChlA_TP,pch=21,col="black",bg="orange",cex=3)
#points(mean(ChlA_Secchi),mean(ChlA_TP),pch="+",col="darkgreen",cex=5)
par(xpd=NA)
text(x=-75,y=110,"Small particulates")
arrows(-15,110,-45,110,lwd=2,col="dimgrey",length=0.125)
text(x=75,y=110,"Large particulates")
arrows(15,110,45,110,lwd=2,col="dimgrey",length=0.125)
text(x=-50,y=-110,"TSI ChlA < TSI Secchi")
text(y=-125,x=0,"TSI ChlA - TSI Secchi", cex=1.5)
text(x=-125,y=0,"TSI ChlA - TSI TP", cex=1.5,srt=90)
text(x=50,y=-110,"TSI ChlA > TSI Secchi")
text(x=-110,y=-50,"TSI ChlA < TSI TP",srt=90)
text(x=-110,y=50,"TSI ChlA > TSI TP",srt=90)
text(x=50,y=-90,"Zooplankton grazing")
text(x=-50,y=90,"Dissolved color/clay particles")
text(x=57,y=-57,"TSI Secchi < TSI TP",srt=45)
text(x=-57,y=57,"TSI Secchi > TSI TP",srt=45)
text(x=-95,y=-65,"P surplus",srt=90)
text(x=-95,y=65,"P limitation",srt=90)
text(x=40,y=40,adj=c(0,-0.5),"TSI Secchi = TSI TP",srt=45)
arrows(-95,-15,-95,-45,lwd=2,col="dimgrey",length=0.125)
arrows(-95,15,-95,45,lwd=2,col="dimgrey",length=0.125)
}
output$tsi3d=renderPlot({
req(reactive_objects$tsi_wide)
plot3dTSI(reactive_objects$tsi_wide, title=reactive_objects$tsi_wide$AU_NAME[1])
})
output$tsi_boxplot=renderPlotly({
req(reactive_objects$trophic_data_flat)
title=reactive_objects$trophic_data_flat$AU_NAME[1]
au_vis=as.list(append(T, rep(F, length(unique(reactive_objects$trophic_data_flat$MonitoringLocationIdentifier)))))
site_vis=as.list(append(F, rep(T, length(unique(reactive_objects$trophic_data_flat$MonitoringLocationIdentifier)))))
plot_ly(data=reactive_objects$trophic_data_flat) %>%
add_trace(type = 'box', y = ~TSI, x=~CharacteristicName, visible=T, name='TSI') %>%
add_trace(type = 'box', y = ~TSI, x=~CharacteristicName, color=~MonitoringLocationIdentifier, visible=F) %>%
layout(title = title,
boxmode = "group",
yaxis = list(side = 'left', title = 'TSI'),
xaxis = list(title = ''),
updatemenus = list(
list(
buttons = list(
list(method = "update", label='Group to AU',
args = list(list(visible = au_vis))
),
list(method = "update", label='Split by site',
args = list(list(visible = site_vis))
)
)
)
)
) %>%
config(displaylogo = FALSE,
modeBarButtonsToRemove = c(
'sendDataToCloud',
'select2d',
'lasso2d'
)
)
})
output$tsi_timeseries=renderPlotly({
req(reactive_objects$trophic_data_flat)
title=reactive_objects$trophic_data_flat$AU_NAME[1]
param_length=length(unique(reactive_objects$trophic_data_flat$CharacteristicName))
year_vis=as.list(append(rep(T, param_length), rep(F, param_length)))
month_vis=as.list(append(rep(F, param_length), rep(T, param_length)))
plot_ly(data=reactive_objects$trophic_data_flat) %>%
add_trace(type = 'box', y = ~TSI, x=~year, color=~CharacteristicName, visible=T) %>%
add_trace(type = 'box', y = ~TSI, x=~month, color=~CharacteristicName, visible=F) %>%
layout(title = title,
boxmode = "group",
yaxis = list(side = 'left', title = 'TSI'),
xaxis = list(title = 'Year'),
updatemenus = list(
list(
buttons = list(
list(method = "update", label='Year',
args = list(list(visible = year_vis),
list(xaxis = list(title = 'Year'))
)
),
list(method = "update", label='Month',
args = list(list(visible = month_vis),
list(xaxis = list(title = 'Month'))
)
)
)
)
)
) %>%
config(displaylogo = FALSE,
modeBarButtonsToRemove = c(
'sendDataToCloud',
'select2d',
'lasso2d'
)
)
})
}
## run app
shinyApp(ui = ui, server = server)
|
# This function calculates the number of characters from the beginning before the first mismatch between two strings
#
#
.datatable.aware=TRUE #You have to declare this at https://github.com/tidyverse/dplyr/issues/548
firstmismatch <- function(a, b, verbose=T) {
dt <- data.table::data.table(a = a,
b = b)
dt <- data.table::as.data.table(dt)
dt[, counts := 0, ] # All counts start as zero matches
dt[, condition := T, ] # All eligibile from the start
dt[, nchars1 := nchar(a), by = a] # Stop counting for each when we pass this
dt[, nchars2 := nchar(b), by = b]
nchars_max <- max(dt$nchars1,na.rm=T)
for (i in 1:nchars_max) {
if (verbose) {
cat("Letter ", i, " ", sum(dt$condition), " left to check; ")
}
dt[
condition == T,
a_substring := substr(a, 1, i), # cut first string down
by = a
]
dt[
condition == T,
a_substring_nchar := nchar(trimws(a_substring)), #
by = a_substring
]
# dt[condition==T,
# condition:=condition &
# a_substring_nchar <= nchars2 &
# grepl(paste0("^",a_substring), b),
# by=b]
dt[
condition == T,
condition := condition & # Was already on
a_substring_nchar <= nchars2 & # Stop counting when longer than second string
nchars1 >= i & # Stop counting when longer than current count
startsWith(b, a_substring[1]), # is this substring part of A at the beginning of B
# grepl(paste0("^",a_substring[1]), b),
# re2_match(string=b, pattern=paste0("^",a_substring[1]), parallel = T),
by = a_substring # parallelize over the search pattern, b can be whatever
]
dt[
condition == T,
counts := counts + 1,
]
}
return(dt$counts)
}
# Ok this piece of code counts until the first mismatch. It's pretty fast, so can do between each pair if necessary
# temp <- firstmismatch("murinduko sub-location",flatfiles_unique$name_clean)
# tail(flatfiles_unique$name_clean[order(temp)])
# Quick function to count the number of characters of overlap up front only
# b=c("murinduko hill forest","murinduko hill","forest murinduko settlement scheme")
# library(re2r)
# library(devtools)
# install_github("qinwf/re2r", build_vignettes = T)
# library(re2r)
|
/R/first_match.r
|
no_license
|
jelenavicic/MeasuringLandscape
|
R
| false
| false
| 2,303
|
r
|
# This function calculates the number of characters from the beginning before the first mismatch between two strings
#
#
.datatable.aware=TRUE #You have to declare this at https://github.com/tidyverse/dplyr/issues/548
firstmismatch <- function(a, b, verbose=T) {
dt <- data.table::data.table(a = a,
b = b)
dt <- data.table::as.data.table(dt)
dt[, counts := 0, ] # All counts start as zero matches
dt[, condition := T, ] # All eligibile from the start
dt[, nchars1 := nchar(a), by = a] # Stop counting for each when we pass this
dt[, nchars2 := nchar(b), by = b]
nchars_max <- max(dt$nchars1,na.rm=T)
for (i in 1:nchars_max) {
if (verbose) {
cat("Letter ", i, " ", sum(dt$condition), " left to check; ")
}
dt[
condition == T,
a_substring := substr(a, 1, i), # cut first string down
by = a
]
dt[
condition == T,
a_substring_nchar := nchar(trimws(a_substring)), #
by = a_substring
]
# dt[condition==T,
# condition:=condition &
# a_substring_nchar <= nchars2 &
# grepl(paste0("^",a_substring), b),
# by=b]
dt[
condition == T,
condition := condition & # Was already on
a_substring_nchar <= nchars2 & # Stop counting when longer than second string
nchars1 >= i & # Stop counting when longer than current count
startsWith(b, a_substring[1]), # is this substring part of A at the beginning of B
# grepl(paste0("^",a_substring[1]), b),
# re2_match(string=b, pattern=paste0("^",a_substring[1]), parallel = T),
by = a_substring # parallelize over the search pattern, b can be whatever
]
dt[
condition == T,
counts := counts + 1,
]
}
return(dt$counts)
}
# Ok this piece of code counts until the first mismatch. It's pretty fast, so can do between each pair if necessary
# temp <- firstmismatch("murinduko sub-location",flatfiles_unique$name_clean)
# tail(flatfiles_unique$name_clean[order(temp)])
# Quick function to count the number of characters of overlap up front only
# b=c("murinduko hill forest","murinduko hill","forest murinduko settlement scheme")
# library(re2r)
# library(devtools)
# install_github("qinwf/re2r", build_vignettes = T)
# library(re2r)
|
#'The function pareto() will take a vector of numbers (and also titles) and will
#'display a pareto chart of the data. This chart includes a barplot of individual
#'values represented in descending order, as well as the cumulative total
#'represented by the line. The data used to represent this function is from Assignment 1.
#'
#'@param x = vector of numbers
#'
#'@return nothing, displays pareto graph
#'
#'@examples
#'limb.counts = c(15, 8, 63, 20);
#'limb.labels = c("None", "Both", "Legs ONLY", "Wheels ONLY");
#'limb.freq.df = as.data.frame(matrix(data= limb.counts/sum(limb.counts), nrow=4, ncol=1), row.name
#'s = limb.labels);
#'limb.freq.df
#'limb.raw = rep(limb.labels, limb.counts);
#'pareto(limb.raw)
#'
#'@export
pareto<-function(x,mn="Pareto barplot",...){ # x is a vector
x.tab=table(x)
xx.tab=sort(x.tab, decreasing=TRUE,index.return=FALSE)
cumsum(as.vector(xx.tab))->cs
length(x.tab)->lenx
bp<-barplot(xx.tab,ylim=c(0,max(cs)),las=2)
lb<-seq(0,cs[lenx],l=11)
axis(side=4,at=lb,labels=paste(seq(0,100,length=11),"%",sep=""),las=1,line=-1,col="Blue",col.axis="Red")
for(i in 1:(lenx-1)){
segments(bp[i],cs[i],bp[i+1],cs[i+1],col=i,lwd=2)
}
title(main=mn,...)
}
|
/PackageZuyus/R/pareto.R
|
no_license
|
marissazuyus/Stat_PackageZuyus
|
R
| false
| false
| 1,203
|
r
|
#'The function pareto() will take a vector of numbers (and also titles) and will
#'display a pareto chart of the data. This chart includes a barplot of individual
#'values represented in descending order, as well as the cumulative total
#'represented by the line. The data used to represent this function is from Assignment 1.
#'
#'@param x = vector of numbers
#'
#'@return nothing, displays pareto graph
#'
#'@examples
#'limb.counts = c(15, 8, 63, 20);
#'limb.labels = c("None", "Both", "Legs ONLY", "Wheels ONLY");
#'limb.freq.df = as.data.frame(matrix(data= limb.counts/sum(limb.counts), nrow=4, ncol=1), row.name
#'s = limb.labels);
#'limb.freq.df
#'limb.raw = rep(limb.labels, limb.counts);
#'pareto(limb.raw)
#'
#'@export
pareto<-function(x,mn="Pareto barplot",...){ # x is a vector
x.tab=table(x)
xx.tab=sort(x.tab, decreasing=TRUE,index.return=FALSE)
cumsum(as.vector(xx.tab))->cs
length(x.tab)->lenx
bp<-barplot(xx.tab,ylim=c(0,max(cs)),las=2)
lb<-seq(0,cs[lenx],l=11)
axis(side=4,at=lb,labels=paste(seq(0,100,length=11),"%",sep=""),las=1,line=-1,col="Blue",col.axis="Red")
for(i in 1:(lenx-1)){
segments(bp[i],cs[i],bp[i+1],cs[i+1],col=i,lwd=2)
}
title(main=mn,...)
}
|
/PCA_RF.R
|
no_license
|
jgshim/PONV
|
R
| false
| false
| 4,498
|
r
| ||
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/helpers.r
\name{get_request_type}
\alias{get_request_type}
\alias{is_get}
\alias{is_post}
\title{Retrieve or test request type}
\usage{
get_request_type(har_resp_obj)
is_get(har_resp_obj)
is_post(har_resp_obj)
}
\arguments{
\item{har_resp_obj}{HAR response object}
}
\description{
Retrieve or test request type
}
\seealso{
Other splash_har_helpers: \code{\link{get_content_size}},
\code{\link{get_content_type}},
\code{\link{get_har_entry}},
\code{\link{get_request_url}},
\code{\link{get_response_body}},
\code{\link{har_entry_count}}
}
|
/man/get_request_type.Rd
|
no_license
|
nikolayvoronchikhin/splashr
|
R
| false
| true
| 628
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/helpers.r
\name{get_request_type}
\alias{get_request_type}
\alias{is_get}
\alias{is_post}
\title{Retrieve or test request type}
\usage{
get_request_type(har_resp_obj)
is_get(har_resp_obj)
is_post(har_resp_obj)
}
\arguments{
\item{har_resp_obj}{HAR response object}
}
\description{
Retrieve or test request type
}
\seealso{
Other splash_har_helpers: \code{\link{get_content_size}},
\code{\link{get_content_type}},
\code{\link{get_har_entry}},
\code{\link{get_request_url}},
\code{\link{get_response_body}},
\code{\link{har_entry_count}}
}
|
# D33 data
raw_data <- read.table("data/D33/processed/mas5-original.tsv",
sep = "\t", header = T, row.names = 1)
yeoh_data <- read.table("data/GSE67684/processed/mas5_ordered.tsv",
sep = "\t", header = T, row.names = 1)
select_logvec <- rownames(raw_data) %in% rownames(yeoh_data)
write.table(raw_data[select_logvec,], "data/D33/processed/mas5_filtered.tsv",
quote = F, sep = "\t")
# Normal data
raw_data <- read.table("data/leuk_normal/processed/mas5-original.tsv",
sep = "\t", header = T, row.names = 1)
yeoh_data <- read.table("data/GSE67684/processed/mas5_ordered.tsv",
sep = "\t", header = T, row.names = 1)
select_logvec <- rownames(raw_data) %in% rownames(yeoh_data)
sum(select_logvec)
write.table(raw_data[select_logvec,], "data/leuk_normal/processed/mas5_filtered.tsv",
quote = F, sep = "\t")
|
/preprocess/process_data-d33_normal.R
|
no_license
|
dblux/relapse_prediction
|
R
| false
| false
| 923
|
r
|
# D33 data
raw_data <- read.table("data/D33/processed/mas5-original.tsv",
sep = "\t", header = T, row.names = 1)
yeoh_data <- read.table("data/GSE67684/processed/mas5_ordered.tsv",
sep = "\t", header = T, row.names = 1)
select_logvec <- rownames(raw_data) %in% rownames(yeoh_data)
write.table(raw_data[select_logvec,], "data/D33/processed/mas5_filtered.tsv",
quote = F, sep = "\t")
# Normal data
raw_data <- read.table("data/leuk_normal/processed/mas5-original.tsv",
sep = "\t", header = T, row.names = 1)
yeoh_data <- read.table("data/GSE67684/processed/mas5_ordered.tsv",
sep = "\t", header = T, row.names = 1)
select_logvec <- rownames(raw_data) %in% rownames(yeoh_data)
sum(select_logvec)
write.table(raw_data[select_logvec,], "data/leuk_normal/processed/mas5_filtered.tsv",
quote = F, sep = "\t")
|
glPcaFast <- function(x,
center=TRUE,
scale=FALSE,
nf=NULL,
loadings=TRUE,
alleleAsUnit=FALSE,
returnDotProd=FALSE){
if(!inherits(x, "genlight")) stop("x is not a genlight object")
# keep the original mean / var code, as it's used further down
# and has some NA checks..
if(center) {
vecMeans <- glMean(x, alleleAsUnit=alleleAsUnit)
if(any(is.na(vecMeans))) stop("NAs detected in the vector of means")
}
if(scale){
vecVar <- glVar(x, alleleAsUnit=alleleAsUnit)
if(any(is.na(vecVar))) stop("NAs detected in the vector of variances")
}
# convert to full data, try to keep the NA handling as similar
# to the original as possible
# - dividing by ploidy keeps the NAs
mx <- t(sapply(x$gen, as.integer)) / ploidy(x)
# handle NAs
NAidx <- which(is.na(mx), arr.ind = T)
if (center) {
mx[NAidx] <- vecMeans[NAidx[,2]]
} else {
mx[NAidx] <- 0
}
# center and scale
mx <- scale(mx,
center = if (center) vecMeans else F,
scale = if (scale) vecVar else F)
# all dot products at once using underlying BLAS
# to support thousands of samples, this could be
# replaced by 'Truncated SVD', but it would require more changes
# in the code around
allProd <- tcrossprod(mx) / nInd(x) # assume uniform weights
## PERFORM THE ANALYSIS ##
## eigenanalysis
eigRes <- eigen(allProd, symmetric=TRUE, only.values=FALSE)
rank <- sum(eigRes$values > 1e-12)
eigRes$values <- eigRes$values[1:rank]
eigRes$vectors <- eigRes$vectors[, 1:rank, drop=FALSE]
## scan nb of axes retained
if(is.null(nf)){
barplot(eigRes$values, main="Eigenvalues", col=heat.colors(rank))
cat("Select the number of axes: ")
nf <- as.integer(readLines(n = 1))
}
## rescale PCs
res <- list()
res$eig <- eigRes$values
nf <- min(nf, sum(res$eig>1e-10))
##res$matprod <- allProd # for debugging
## use: li = XQU = V\Lambda^(1/2)
eigRes$vectors <- eigRes$vectors * sqrt(nInd(x)) # D-normalize vectors
res$scores <- sweep(eigRes$vectors[, 1:nf, drop=FALSE],2,
sqrt(eigRes$values[1:nf]), FUN="*")
## GET LOADINGS ##
## need to decompose X^TDV into a sum of n matrices of dim p*r
## but only two such matrices are represented at a time
if(loadings){
if(scale) {
vecSd <- sqrt(vecVar)
}
res$loadings <- matrix(0, nrow=nLoc(x), ncol=nf) # create empty matrix
## use: c1 = X^TDV
## and X^TV = A_1 + ... + A_n
## with A_k = X_[k-]^T v[k-]
myPloidy <- ploidy(x)
for(k in 1:nInd(x)){
temp <- as.integer(x@gen[[k]]) / myPloidy[k]
if(center) {
temp[is.na(temp)] <- vecMeans[is.na(temp)]
temp <- temp - vecMeans
} else {
temp[is.na(temp)] <- 0
}
if(scale){
temp <- temp/vecSd
}
res$loadings <- res$loadings + matrix(temp) %*% eigRes$vectors[k,
1:nf, drop=FALSE]
}
res$loadings <- res$loadings / nInd(x) # don't forget the /n of X_tDV
res$loadings <- sweep(res$loadings, 2, sqrt(eigRes$values[1:nf]),
FUN="/") }
## FORMAT OUTPUT ##
colnames(res$scores) <- paste("PC", 1:nf, sep="")
if(!is.null(indNames(x))){
rownames(res$scores) <- indNames(x)
} else {
rownames(res$scores) <- 1:nInd(x)
}
if(!is.null(res$loadings)){
colnames(res$loadings) <- paste("Axis", 1:nf, sep="")
if(!is.null(locNames(x)) & !is.null(alleles(x))){
rownames(res$loadings) <- paste(locNames(x),alleles(x), sep=".")
} else {
rownames(res$loadings) <- 1:nLoc(x)
}
}
if(returnDotProd){
res$dotProd <- allProd
rownames(res$dotProd) <- colnames(res$dotProd) <- indNames(x)
}
res$call <- match.call()
class(res) <- "glPca"
return(res)
}
|
/program_comparison_PCA/glPcaFast_script.R
|
no_license
|
mnr006/Ritter_Defense_2019
|
R
| false
| false
| 3,921
|
r
|
glPcaFast <- function(x,
center=TRUE,
scale=FALSE,
nf=NULL,
loadings=TRUE,
alleleAsUnit=FALSE,
returnDotProd=FALSE){
if(!inherits(x, "genlight")) stop("x is not a genlight object")
# keep the original mean / var code, as it's used further down
# and has some NA checks..
if(center) {
vecMeans <- glMean(x, alleleAsUnit=alleleAsUnit)
if(any(is.na(vecMeans))) stop("NAs detected in the vector of means")
}
if(scale){
vecVar <- glVar(x, alleleAsUnit=alleleAsUnit)
if(any(is.na(vecVar))) stop("NAs detected in the vector of variances")
}
# convert to full data, try to keep the NA handling as similar
# to the original as possible
# - dividing by ploidy keeps the NAs
mx <- t(sapply(x$gen, as.integer)) / ploidy(x)
# handle NAs
NAidx <- which(is.na(mx), arr.ind = T)
if (center) {
mx[NAidx] <- vecMeans[NAidx[,2]]
} else {
mx[NAidx] <- 0
}
# center and scale
mx <- scale(mx,
center = if (center) vecMeans else F,
scale = if (scale) vecVar else F)
# all dot products at once using underlying BLAS
# to support thousands of samples, this could be
# replaced by 'Truncated SVD', but it would require more changes
# in the code around
allProd <- tcrossprod(mx) / nInd(x) # assume uniform weights
## PERFORM THE ANALYSIS ##
## eigenanalysis
eigRes <- eigen(allProd, symmetric=TRUE, only.values=FALSE)
rank <- sum(eigRes$values > 1e-12)
eigRes$values <- eigRes$values[1:rank]
eigRes$vectors <- eigRes$vectors[, 1:rank, drop=FALSE]
## scan nb of axes retained
if(is.null(nf)){
barplot(eigRes$values, main="Eigenvalues", col=heat.colors(rank))
cat("Select the number of axes: ")
nf <- as.integer(readLines(n = 1))
}
## rescale PCs
res <- list()
res$eig <- eigRes$values
nf <- min(nf, sum(res$eig>1e-10))
##res$matprod <- allProd # for debugging
## use: li = XQU = V\Lambda^(1/2)
eigRes$vectors <- eigRes$vectors * sqrt(nInd(x)) # D-normalize vectors
res$scores <- sweep(eigRes$vectors[, 1:nf, drop=FALSE],2,
sqrt(eigRes$values[1:nf]), FUN="*")
## GET LOADINGS ##
## need to decompose X^TDV into a sum of n matrices of dim p*r
## but only two such matrices are represented at a time
if(loadings){
if(scale) {
vecSd <- sqrt(vecVar)
}
res$loadings <- matrix(0, nrow=nLoc(x), ncol=nf) # create empty matrix
## use: c1 = X^TDV
## and X^TV = A_1 + ... + A_n
## with A_k = X_[k-]^T v[k-]
myPloidy <- ploidy(x)
for(k in 1:nInd(x)){
temp <- as.integer(x@gen[[k]]) / myPloidy[k]
if(center) {
temp[is.na(temp)] <- vecMeans[is.na(temp)]
temp <- temp - vecMeans
} else {
temp[is.na(temp)] <- 0
}
if(scale){
temp <- temp/vecSd
}
res$loadings <- res$loadings + matrix(temp) %*% eigRes$vectors[k,
1:nf, drop=FALSE]
}
res$loadings <- res$loadings / nInd(x) # don't forget the /n of X_tDV
res$loadings <- sweep(res$loadings, 2, sqrt(eigRes$values[1:nf]),
FUN="/") }
## FORMAT OUTPUT ##
colnames(res$scores) <- paste("PC", 1:nf, sep="")
if(!is.null(indNames(x))){
rownames(res$scores) <- indNames(x)
} else {
rownames(res$scores) <- 1:nInd(x)
}
if(!is.null(res$loadings)){
colnames(res$loadings) <- paste("Axis", 1:nf, sep="")
if(!is.null(locNames(x)) & !is.null(alleles(x))){
rownames(res$loadings) <- paste(locNames(x),alleles(x), sep=".")
} else {
rownames(res$loadings) <- 1:nLoc(x)
}
}
if(returnDotProd){
res$dotProd <- allProd
rownames(res$dotProd) <- colnames(res$dotProd) <- indNames(x)
}
res$call <- match.call()
class(res) <- "glPca"
return(res)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fence.lmer.R
\name{fence.lmer}
\alias{fence.lmer}
\title{Fence model selection (Linear Mixed Model)}
\usage{
fence.lmer(full, data, B = 100, grid = 101, fence = c("adaptive",
"nonadaptive"), cn = NA, REML = TRUE, bandwidth = NA,
cpus = parallel::detectCores())
}
\arguments{
\item{full}{formula of full model}
\item{data}{data}
\item{B}{number of bootstrap samples, parametric bootstrap is used}
\item{grid}{grid for c}
\item{fence}{a procedure of the fence method to be used.
It's suggested to choose nonadaptive procedure if c is known; otherwise nonadaptive must be chosen}
\item{cn}{cn value for nonadaptive}
\item{REML}{Restricted Maximum Likelihood approach}
\item{bandwidth}{bandwidth for kernel smooth function}
\item{cpus}{Number of parallel computers}
}
\value{
\item{models}{list all model candidates in the model space}
\item{B}{list the number of bootstrap samples that have been used}
\item{lack_of_fit_matrix}{list a matrix of Qs for all model candidates (in columns). Each row is for each bootstrap sample}
\item{Qd_matrix}{list a matrix of QM - QM.tilde for all model candidates. Each row is for each bootrap sample}
\item{bandwidth}{list the value of bandwidth}
\item{model_mat}{list a matrix of selected models at each c values in grid (in columns). Each row is for each bootstrap sample}
\item{freq_mat}{list a matrix of coverage probabilities (frequency/smooth_frequency) of each selected models for a given c value (index)}
\item{c}{list the adaptive choice of c value from which the parsimonious model is selected}
\item{sel_model}{list the selected (parsimonious) model given the adaptive c value}
@note The current Fence package focuses on variable selection.
However, Fence methods can be used to select other parameters of interest, e.g., tunning parameter, variance-covariance structure, etc.
}
\description{
Fence model selection (Linear Mixed Model)
}
\details{
In Jiang et. al (2008), the adaptive c value is chosen from the highest peak in the p* vs. c plot.
In Jiang et. al (2009), 95\% CI is taken into account while choosing such an adaptive choice of c.
In Thuan Nguyen et. al (2014), the adaptive c value is chosen from the first peak. This approach works better in the
moderate sample size or weak signal situations. Empirically, the first peak becomes highest peak when sample size
increases or signals become stronger
}
\examples{
require(fence)
library(snow)
#### Example 1 #####
data(iris)
full = Sepal.Length ~ Sepal.Width + Petal.Length + Petal.Width + (1|Species)
# Takes greater than 5 seconds to run
# test_af = fence.lmer(full, iris)
# test_af$c
# test_naf = fence.lmer(full, iris, fence = "nonadaptive", cn = 12)
# plot(test_af)
# test_af$sel_model
# test_naf$sel_model
}
\references{
\itemize{
\item{Jiang J., Rao J.S., Gu Z., Nguyen T. (2008), Fence Methods for Mixed Model Selection. The Annals of Statistics, 36(4): 1669-1692}
\item{Jiang J., Nguyen T., Rao J.S. (2009), A Simplified Adaptive Fence Procedure. Statistics and Probability Letters, 79, 625-629}
\item{Thuan Nguyen, Jie Peng, Jiming Jiang (2014), Fence Methods for Backcross Experiments. Statistical Computation and Simulation, 84(3), 644-662}
}
}
\author{
Jiming Jiang Jianyang Zhao J. Sunil Rao Thuan Nguyen
}
|
/man/fence.lmer.Rd
|
no_license
|
cran/fence
|
R
| false
| true
| 3,413
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fence.lmer.R
\name{fence.lmer}
\alias{fence.lmer}
\title{Fence model selection (Linear Mixed Model)}
\usage{
fence.lmer(full, data, B = 100, grid = 101, fence = c("adaptive",
"nonadaptive"), cn = NA, REML = TRUE, bandwidth = NA,
cpus = parallel::detectCores())
}
\arguments{
\item{full}{formula of full model}
\item{data}{data}
\item{B}{number of bootstrap samples, parametric bootstrap is used}
\item{grid}{grid for c}
\item{fence}{a procedure of the fence method to be used.
It's suggested to choose nonadaptive procedure if c is known; otherwise nonadaptive must be chosen}
\item{cn}{cn value for nonadaptive}
\item{REML}{Restricted Maximum Likelihood approach}
\item{bandwidth}{bandwidth for kernel smooth function}
\item{cpus}{Number of parallel computers}
}
\value{
\item{models}{list all model candidates in the model space}
\item{B}{list the number of bootstrap samples that have been used}
\item{lack_of_fit_matrix}{list a matrix of Qs for all model candidates (in columns). Each row is for each bootstrap sample}
\item{Qd_matrix}{list a matrix of QM - QM.tilde for all model candidates. Each row is for each bootrap sample}
\item{bandwidth}{list the value of bandwidth}
\item{model_mat}{list a matrix of selected models at each c values in grid (in columns). Each row is for each bootstrap sample}
\item{freq_mat}{list a matrix of coverage probabilities (frequency/smooth_frequency) of each selected models for a given c value (index)}
\item{c}{list the adaptive choice of c value from which the parsimonious model is selected}
\item{sel_model}{list the selected (parsimonious) model given the adaptive c value}
@note The current Fence package focuses on variable selection.
However, Fence methods can be used to select other parameters of interest, e.g., tunning parameter, variance-covariance structure, etc.
}
\description{
Fence model selection (Linear Mixed Model)
}
\details{
In Jiang et. al (2008), the adaptive c value is chosen from the highest peak in the p* vs. c plot.
In Jiang et. al (2009), 95\% CI is taken into account while choosing such an adaptive choice of c.
In Thuan Nguyen et. al (2014), the adaptive c value is chosen from the first peak. This approach works better in the
moderate sample size or weak signal situations. Empirically, the first peak becomes highest peak when sample size
increases or signals become stronger
}
\examples{
require(fence)
library(snow)
#### Example 1 #####
data(iris)
full = Sepal.Length ~ Sepal.Width + Petal.Length + Petal.Width + (1|Species)
# Takes greater than 5 seconds to run
# test_af = fence.lmer(full, iris)
# test_af$c
# test_naf = fence.lmer(full, iris, fence = "nonadaptive", cn = 12)
# plot(test_af)
# test_af$sel_model
# test_naf$sel_model
}
\references{
\itemize{
\item{Jiang J., Rao J.S., Gu Z., Nguyen T. (2008), Fence Methods for Mixed Model Selection. The Annals of Statistics, 36(4): 1669-1692}
\item{Jiang J., Nguyen T., Rao J.S. (2009), A Simplified Adaptive Fence Procedure. Statistics and Probability Letters, 79, 625-629}
\item{Thuan Nguyen, Jie Peng, Jiming Jiang (2014), Fence Methods for Backcross Experiments. Statistical Computation and Simulation, 84(3), 644-662}
}
}
\author{
Jiming Jiang Jianyang Zhao J. Sunil Rao Thuan Nguyen
}
|
# Load activity labels from activity_labels.txt
activity_labels <- read.table("activity_labels.txt")
names(activity_labels) <- c("activity code", "activity")
# Load features from features.txt
features <- read.table("features.txt")[, 2]
# Load training data from files in train subdirectory
subject_train <- read.table("train/subject_train.txt")
x_train <- read.table("train/X_train.txt")
y_train <- read.table("train/y_train.txt")
# Load test data from files in test subdirectory
subject_test <- read.table("test/subject_test.txt")
x_test <- read.table("test/X_test.txt")
y_test <- read.table("test/y_test.txt")
# Merge the training and the test sets
subject <- rbind(subject_train, subject_test)
x <- rbind(x_train, x_test)
y <- rbind(y_train, y_test)
# Label the columns
names(subject) <- "subject"
names(x) <- features
names(y) <- "activity code"
# Extract only the features related to the mean or standard deviation for each measurement
# Find features of interest
features_mean_std <- grepl("mean\\(\\)|std\\(\\)", features)
# Limit x to columns with the mean or standard deviation for each measurement
x_mean_std <- x[, features_mean_std]
# Bind subject, y and feature measurements of interest (x_mean_std)
mean_std_data <- cbind(subject, y, x_mean_std)
# Add activity labels based on activity codes in y using merge
mean_std_data_activity <- merge(mean_std_data, activity_labels, by = "activity code")
# Exclude column with activity codes
mean_std_data_final <- mean_std_data_activity[names(mean_std_data_activity) != "activity code"]
# mean_std_data_final satisfies project requirements #1, #2, #3 and #4.
# It is merged from test and training data, has only the mean and standard deviation for each feature,
# uses descriptive labels for activities, as well as for all the variables (features).
# create tidy data set with the average of each variable for each activity and each subject
tidy_data <- aggregate(. ~ subject + activity, data = mean_std_data_final, FUN = mean)
# Create txt file with write.table() using row.name=FALSE
write.table(tidy_data, file="tidy_data.txt", row.names=FALSE)
|
/run_analysis.R
|
no_license
|
mristic/getting_and_cleaning_data_project
|
R
| false
| false
| 2,116
|
r
|
# Load activity labels from activity_labels.txt
activity_labels <- read.table("activity_labels.txt")
names(activity_labels) <- c("activity code", "activity")
# Load features from features.txt
features <- read.table("features.txt")[, 2]
# Load training data from files in train subdirectory
subject_train <- read.table("train/subject_train.txt")
x_train <- read.table("train/X_train.txt")
y_train <- read.table("train/y_train.txt")
# Load test data from files in test subdirectory
subject_test <- read.table("test/subject_test.txt")
x_test <- read.table("test/X_test.txt")
y_test <- read.table("test/y_test.txt")
# Merge the training and the test sets
subject <- rbind(subject_train, subject_test)
x <- rbind(x_train, x_test)
y <- rbind(y_train, y_test)
# Label the columns
names(subject) <- "subject"
names(x) <- features
names(y) <- "activity code"
# Extract only the features related to the mean or standard deviation for each measurement
# Find features of interest
features_mean_std <- grepl("mean\\(\\)|std\\(\\)", features)
# Limit x to columns with the mean or standard deviation for each measurement
x_mean_std <- x[, features_mean_std]
# Bind subject, y and feature measurements of interest (x_mean_std)
mean_std_data <- cbind(subject, y, x_mean_std)
# Add activity labels based on activity codes in y using merge
mean_std_data_activity <- merge(mean_std_data, activity_labels, by = "activity code")
# Exclude column with activity codes
mean_std_data_final <- mean_std_data_activity[names(mean_std_data_activity) != "activity code"]
# mean_std_data_final satisfies project requirements #1, #2, #3 and #4.
# It is merged from test and training data, has only the mean and standard deviation for each feature,
# uses descriptive labels for activities, as well as for all the variables (features).
# create tidy data set with the average of each variable for each activity and each subject
tidy_data <- aggregate(. ~ subject + activity, data = mean_std_data_final, FUN = mean)
# Create txt file with write.table() using row.name=FALSE
write.table(tidy_data, file="tidy_data.txt", row.names=FALSE)
|
library(ggplot2)
qplot(factor(names(prob), levels = names(prob)), hebrew, geom = "histogram")
|
/Programming Language Detection/Experiment-2/Dataset/Train/R/probabilistic-choice-2.r
|
no_license
|
dlaststark/machine-learning-projects
|
R
| false
| false
| 94
|
r
|
library(ggplot2)
qplot(factor(names(prob), levels = names(prob)), hebrew, geom = "histogram")
|
###
### Prune tree tips based on an imported .csv file
###
Prune.Tree <- function(SPECIES_TO_REMOVE,TREE,SAVE_FILE){
PRUNED_TREE<-drop.tip(TREE,TREE$tip.label[SPECIES_TO_REMOVE])
write.tree(PRUNED_TREE,file=SAVE_FILE)
return(PRUNED_TREE)
}
|
/DNC_Toolbox/Prune_Tree.R
|
no_license
|
Gene-Weaver/Testing-Darwin-s-Naturalization-Conundrum
|
R
| false
| false
| 253
|
r
|
###
### Prune tree tips based on an imported .csv file
###
Prune.Tree <- function(SPECIES_TO_REMOVE,TREE,SAVE_FILE){
PRUNED_TREE<-drop.tip(TREE,TREE$tip.label[SPECIES_TO_REMOVE])
write.tree(PRUNED_TREE,file=SAVE_FILE)
return(PRUNED_TREE)
}
|
library(tidyverse)
library(plotly)
library(htmlwidgets)
library(argparser)
library(methods)
# Purpose: This script takes the RAW output from sambamba and produces summary tables and plots highlighting the uniformity of coverage
# Usage: Rscript sambamba_exon_coverage.R --args "/path_to_folder/exon_coverage"
# Functions:
p <-
arg_parser("Calculate the uniformity of coverage over multiple samples")
p <-
add_argument(p, "--input_directory", help = "Input data directory containing the sambamba output files")
p <-
add_argument(p, "--output_directory", help = "Output directory for results")
p <-
add_argument(p, "--suffix_pattern", help = "Suffix pattern to match Sambamba input", default =
".sambamba_output.bed")
p <-
add_argument(p, "--group_by", help = "User provided Pan numbers to group results together in the format ParentPan=ChildPan1,ChildPan2;", default =
"")
p <-
add_argument(p, "--plot_figures", help = "Plot figures (May be very slow for large data sets)", flag =
TRUE)
p <-
add_argument(p, "--simple_plot_only", help = "Plot only the simplified static figure (May speed up drawing plots for large data sets)", flag =
TRUE)
p <-
add_argument(p, "--no_jitter", help = "Don't plot data points on barplots (May speed up drawing plots for large data sets)", flag =
TRUE)
args <- parse_args(p)
generate_coverage_plot <- function(df, panel, simplify) {
# Remove rows with NAs caused by regions not included between panels
df <- df[complete.cases(df),]
# Reorder the factors in region by median (Ensures the boxplots are plotted in order form lowest to highest)
df$region <-
fct_reorder(df$region,
df$scaled_meanCoverage,
.fun = median,
.desc = FALSE)
# Create a color palette to highlight samples by gene/transcript
col = rainbow(length(levels(factor(df$gene))))[factor(df$gene)]
# Plot coverage data (A series of boxplots showing coverage for each region, ordered by median)
p <- df %>%
ggplot(aes(x = region, y = scaled_meanCoverage)) +
geom_boxplot(outlier.size = 0.5, aes(fill = gene)) +
{ if (args$no_jitter == FALSE) geom_jitter(color = "grey", width = 0.01, size = 1, alpha = 0.25 , shape = 1 ) } +
theme(
plot.title = element_text(size = 11),
axis.text.x = element_text(angle = 45,hjust = 1,size = 6)
) +
theme(
plot.title = element_text(size = 11),
axis.text.x = element_text(
angle = 45,
hjust = 1,
size = 6
)
) +
ggtitle(
paste0(
"Run ",
run_name,
", ",
panel ,
" (",
num_target_regions,
" target regions), Coverage over ",
num_samples,
" samples"
)
) +
xlab("Target Region") +
ylab("Scaled average coverage")
return(p)
}
generate_simple_coverage_plot <- function(df, panel) {
# Remove rows with NAs caused by regions not included between panels
df <- df[complete.cases(df),]
# Group the tibble data structure by 'region'
region_mean <- df %>%
group_by(region) %>%
summarise(
gene = unique(gene),
transcript = unique(transcript),
genomicCoordinates = unique(genomicCoordinates),
region_meanCoverage = mean(scaled_meanCoverage)
)
# Order region factors by region_meanCoverage to produce plot in correct order
region_mean$region <- as.factor(region_mean$region)
region_mean$region <-
fct_reorder(
region_mean$region,
region_mean$region_meanCoverage,
.fun = median,
.desc = FALSE
)
# Plot region
region_mean %>%
ggplot(aes(x = region, y = region_meanCoverage)) +
geom_point(col = "red",
shape = 20,
size = 0.1) +
theme(
legend.position = "none",
plot.title = element_text(size = 11),
axis.text.x = element_text(
angle = 45,
hjust = 1,
size = 1
)
) +
ggtitle(
paste0(
"Run ",
run_name,
", ",
panel ,
" (",
num_target_regions,
" target regions), Coverage over ",
num_samples,
" samples"
)
) +
xlab("Target Region") +
ylab("Scaled average coverage")
}
# Uses scale() function on data - scaling is done by dividing the columns for each sample by their root mean square.
# This allows easier comparison between samples.
scale_rms <-
function(x)
as.vector(scale(x, scale = TRUE, center = FALSE))
# Main Script:
# Get directory location from commandline - directory should contain the raw exon level coverage files
data_directory <- args$input_directory
output_directory <- args$output_directory
suffix_pattern <- args$suffix_pattern
pan_numbers_for_grouping <-
args$group_by # Optional comma delimited string of pan numbers to group together
# Create output directory if it does not already exists
dir.create(output_directory, showWarnings = FALSE)
# Get all files with the suffix "*..refined.sambamba_output.bed" from data directory
sambamba_files <-
list.files(
path = data_directory,
pattern = paste0("*", suffix_pattern),
full.names = TRUE
)
# Import coverage data and add relevant sample ID to each imported row
tbl <-
sapply(sambamba_files ,
read_tsv,
col_types = "ciicicccinnc",
simplify = FALSE) %>%
bind_rows(.id = "sample_id")
# Simplify & cleanup sample names
tbl$sample_id <-
gsub(basename(tbl$sample_id),
pattern = suffix_pattern,
replacement = "")
# Rename 2nd column to remove proceding '#'
colnames(tbl)[2] <- "chrom"
# Replace F1:F6 labels with meaningful names
colnames(tbl)[5:9] <-
c("genomicCoordinates",
"score",
"strand",
"gene_transcript",
"accessionNum")
# Add new column 'region' so that each target region is represented by unique ID
tbl$region <- paste(tbl$chrom,
tbl$chromStart,
tbl$chromEnd,
tbl$gene_transcript,
sep = ";")
# Group the tibble data structure by samples so that average can be calculated accross samples
tbl <- tbl %>%
group_by(sample_id) %>%
mutate(scaled_meanCoverage = scale_rms(meanCoverage))
# Identify Run ID from sample name and add as additional column
tbl$run_name <-
stringr::str_split(string = tbl$sample_id,
pattern = "_",
simplify = TRUE)[, 1]
# Extract gene and transcript names into separate columns:
tbl$gene <-
stringr::str_split(
string = tbl$gene_transcript,
pattern = ";",
simplify = TRUE
)[, 1]
tbl$transcript <-
stringr::str_split(
string = tbl$gene_transcript,
pattern = ";",
simplify = TRUE
)[, 2]
# Any SNPs referenced by their RS accession number will not have a transcript - label as 'dbSNP'
tbl$gene[tbl$transcript == ""] <- "dbSNP"
# Identify Pan number from sample name and add as additional column
tbl$pan_number <-
stringr::str_extract(string = tbl$sample_id, pattern = "Pan[0-9]+")
# If commandline argument group_by is provided collapse all child Pan numbers into Parent PanNumber
print(pan_numbers_for_grouping)
if (pan_numbers_for_grouping != "") {
group_by_input <- strsplit(pan_numbers_for_grouping, ";")[[1]]
for (line in group_by_input) {
print(line)
parent_pan <- strsplit(line, "=")[[1]][1]
print(parent_pan)
child_pans <- strsplit(strsplit(line, "=")[[1]][2], ",")[[1]]
print(child_pans)
# Replace child terms with parent term
print(tbl$pan_number)
tbl$pan_number[tbl$pan_number %in% child_pans] <- parent_pan
}
}
# Produce separate output for each panel
# Extract meta data from sample name
for (run_name in unique(tbl$run_name)) {
print(paste("Processing run name =", run_name))
for (panel in unique(tbl$pan_number)) {
print(paste("Processing panel number =", panel))
df <- tbl[tbl$pan_number == panel, ]
# Update number of samples to be plotted
num_samples <- length(unique(df$sample_id))
print(paste("Number of samples =", num_samples))
# Update number of target regions for this panel
num_target_regions <- length(unique(df$region))
print(paste("Number of target regions =", num_target_regions))
# Generate file name:
filename <- paste0(run_name, "_", panel)
# Create simple plot
if (args$plot_figures == TRUE){
# Create coverage plot of means for PDF
print("Generating simplified plot")
simplified_plot <- generate_simple_coverage_plot(df, panel)
# Save simplified plot to pdf:
filepath <-
paste0(output_directory, "/", filename, "_coverage.pdf")
print(paste0("Saving file", filepath))
ggsave(
filename = filepath,
simplified_plot,
device = "pdf",
width = 297,
height = 200,
units = "mm"
)
}
# Generate interactive plot
if (args$plot_figures == TRUE && args$simple_plot_only == FALSE){
# Generate static plot of data for each
print("Generating static ggplot")
static_plot <- generate_coverage_plot(df, panel)
# Add interactivity to plot:
print("Generating Interactive plot")
interactive_plot <- ggplotly(static_plot)
# Save interactive plot as a single html file:
filepath <-
paste0(output_directory, '/', filename, "_coverage.html")
print(paste0("Saving file", filepath))
saveWidget(ggplotly(interactive_plot), file = filepath)
}else{
print("Skipping Interactive plot")
}
if (args$plot_figures == FALSE){
print("Skipping all plots")
}
# Save table
filepath <-
paste0(output_directory, "/", filename, "_coverage.csv")
print(paste0("Saving file", filepath))
summary_df <- df %>%
group_by(region) %>%
# Summarise data by region
summarise(
gene = unique(gene),
run_name = unique(run_name),
panel = unique(panel),
transcript = unique(transcript),
genomicCoordinates = unique(genomicCoordinates),
accessionNum = unique(accessionNum),
region_meanCoverage = mean(scaled_meanCoverage)
) %>%
arrange(region_meanCoverage)
print("Saving CSV file")
write_delim(summary_df, filepath, delim = "\t")
}
}
|
/sambamba_exon_coverage.R
|
no_license
|
moka-guys/Coverage_Uniformity_Report
|
R
| false
| false
| 10,366
|
r
|
library(tidyverse)
library(plotly)
library(htmlwidgets)
library(argparser)
library(methods)
# Purpose: This script takes the RAW output from sambamba and produces summary tables and plots highlighting the uniformity of coverage
# Usage: Rscript sambamba_exon_coverage.R --args "/path_to_folder/exon_coverage"
# Functions:
p <-
arg_parser("Calculate the uniformity of coverage over multiple samples")
p <-
add_argument(p, "--input_directory", help = "Input data directory containing the sambamba output files")
p <-
add_argument(p, "--output_directory", help = "Output directory for results")
p <-
add_argument(p, "--suffix_pattern", help = "Suffix pattern to match Sambamba input", default =
".sambamba_output.bed")
p <-
add_argument(p, "--group_by", help = "User provided Pan numbers to group results together in the format ParentPan=ChildPan1,ChildPan2;", default =
"")
p <-
add_argument(p, "--plot_figures", help = "Plot figures (May be very slow for large data sets)", flag =
TRUE)
p <-
add_argument(p, "--simple_plot_only", help = "Plot only the simplified static figure (May speed up drawing plots for large data sets)", flag =
TRUE)
p <-
add_argument(p, "--no_jitter", help = "Don't plot data points on barplots (May speed up drawing plots for large data sets)", flag =
TRUE)
args <- parse_args(p)
generate_coverage_plot <- function(df, panel, simplify) {
# Remove rows with NAs caused by regions not included between panels
df <- df[complete.cases(df),]
# Reorder the factors in region by median (Ensures the boxplots are plotted in order form lowest to highest)
df$region <-
fct_reorder(df$region,
df$scaled_meanCoverage,
.fun = median,
.desc = FALSE)
# Create a color palette to highlight samples by gene/transcript
col = rainbow(length(levels(factor(df$gene))))[factor(df$gene)]
# Plot coverage data (A series of boxplots showing coverage for each region, ordered by median)
p <- df %>%
ggplot(aes(x = region, y = scaled_meanCoverage)) +
geom_boxplot(outlier.size = 0.5, aes(fill = gene)) +
{ if (args$no_jitter == FALSE) geom_jitter(color = "grey", width = 0.01, size = 1, alpha = 0.25 , shape = 1 ) } +
theme(
plot.title = element_text(size = 11),
axis.text.x = element_text(angle = 45,hjust = 1,size = 6)
) +
theme(
plot.title = element_text(size = 11),
axis.text.x = element_text(
angle = 45,
hjust = 1,
size = 6
)
) +
ggtitle(
paste0(
"Run ",
run_name,
", ",
panel ,
" (",
num_target_regions,
" target regions), Coverage over ",
num_samples,
" samples"
)
) +
xlab("Target Region") +
ylab("Scaled average coverage")
return(p)
}
generate_simple_coverage_plot <- function(df, panel) {
# Remove rows with NAs caused by regions not included between panels
df <- df[complete.cases(df),]
# Group the tibble data structure by 'region'
region_mean <- df %>%
group_by(region) %>%
summarise(
gene = unique(gene),
transcript = unique(transcript),
genomicCoordinates = unique(genomicCoordinates),
region_meanCoverage = mean(scaled_meanCoverage)
)
# Order region factors by region_meanCoverage to produce plot in correct order
region_mean$region <- as.factor(region_mean$region)
region_mean$region <-
fct_reorder(
region_mean$region,
region_mean$region_meanCoverage,
.fun = median,
.desc = FALSE
)
# Plot region
region_mean %>%
ggplot(aes(x = region, y = region_meanCoverage)) +
geom_point(col = "red",
shape = 20,
size = 0.1) +
theme(
legend.position = "none",
plot.title = element_text(size = 11),
axis.text.x = element_text(
angle = 45,
hjust = 1,
size = 1
)
) +
ggtitle(
paste0(
"Run ",
run_name,
", ",
panel ,
" (",
num_target_regions,
" target regions), Coverage over ",
num_samples,
" samples"
)
) +
xlab("Target Region") +
ylab("Scaled average coverage")
}
# Uses scale() function on data - scaling is done by dividing the columns for each sample by their root mean square.
# This allows easier comparison between samples.
scale_rms <-
function(x)
as.vector(scale(x, scale = TRUE, center = FALSE))
# Main Script:
# Get directory location from commandline - directory should contain the raw exon level coverage files
data_directory <- args$input_directory
output_directory <- args$output_directory
suffix_pattern <- args$suffix_pattern
pan_numbers_for_grouping <-
args$group_by # Optional comma delimited string of pan numbers to group together
# Create output directory if it does not already exists
dir.create(output_directory, showWarnings = FALSE)
# Get all files with the suffix "*..refined.sambamba_output.bed" from data directory
sambamba_files <-
list.files(
path = data_directory,
pattern = paste0("*", suffix_pattern),
full.names = TRUE
)
# Import coverage data and add relevant sample ID to each imported row
tbl <-
sapply(sambamba_files ,
read_tsv,
col_types = "ciicicccinnc",
simplify = FALSE) %>%
bind_rows(.id = "sample_id")
# Simplify & cleanup sample names
tbl$sample_id <-
gsub(basename(tbl$sample_id),
pattern = suffix_pattern,
replacement = "")
# Rename 2nd column to remove proceding '#'
colnames(tbl)[2] <- "chrom"
# Replace F1:F6 labels with meaningful names
colnames(tbl)[5:9] <-
c("genomicCoordinates",
"score",
"strand",
"gene_transcript",
"accessionNum")
# Add new column 'region' so that each target region is represented by unique ID
tbl$region <- paste(tbl$chrom,
tbl$chromStart,
tbl$chromEnd,
tbl$gene_transcript,
sep = ";")
# Group the tibble data structure by samples so that average can be calculated accross samples
tbl <- tbl %>%
group_by(sample_id) %>%
mutate(scaled_meanCoverage = scale_rms(meanCoverage))
# Identify Run ID from sample name and add as additional column
tbl$run_name <-
stringr::str_split(string = tbl$sample_id,
pattern = "_",
simplify = TRUE)[, 1]
# Extract gene and transcript names into separate columns:
tbl$gene <-
stringr::str_split(
string = tbl$gene_transcript,
pattern = ";",
simplify = TRUE
)[, 1]
tbl$transcript <-
stringr::str_split(
string = tbl$gene_transcript,
pattern = ";",
simplify = TRUE
)[, 2]
# Any SNPs referenced by their RS accession number will not have a transcript - label as 'dbSNP'
tbl$gene[tbl$transcript == ""] <- "dbSNP"
# Identify Pan number from sample name and add as additional column
tbl$pan_number <-
stringr::str_extract(string = tbl$sample_id, pattern = "Pan[0-9]+")
# If commandline argument group_by is provided collapse all child Pan numbers into Parent PanNumber
print(pan_numbers_for_grouping)
if (pan_numbers_for_grouping != "") {
group_by_input <- strsplit(pan_numbers_for_grouping, ";")[[1]]
for (line in group_by_input) {
print(line)
parent_pan <- strsplit(line, "=")[[1]][1]
print(parent_pan)
child_pans <- strsplit(strsplit(line, "=")[[1]][2], ",")[[1]]
print(child_pans)
# Replace child terms with parent term
print(tbl$pan_number)
tbl$pan_number[tbl$pan_number %in% child_pans] <- parent_pan
}
}
# Produce separate output for each panel
# Extract meta data from sample name
for (run_name in unique(tbl$run_name)) {
print(paste("Processing run name =", run_name))
for (panel in unique(tbl$pan_number)) {
print(paste("Processing panel number =", panel))
df <- tbl[tbl$pan_number == panel, ]
# Update number of samples to be plotted
num_samples <- length(unique(df$sample_id))
print(paste("Number of samples =", num_samples))
# Update number of target regions for this panel
num_target_regions <- length(unique(df$region))
print(paste("Number of target regions =", num_target_regions))
# Generate file name:
filename <- paste0(run_name, "_", panel)
# Create simple plot
if (args$plot_figures == TRUE){
# Create coverage plot of means for PDF
print("Generating simplified plot")
simplified_plot <- generate_simple_coverage_plot(df, panel)
# Save simplified plot to pdf:
filepath <-
paste0(output_directory, "/", filename, "_coverage.pdf")
print(paste0("Saving file", filepath))
ggsave(
filename = filepath,
simplified_plot,
device = "pdf",
width = 297,
height = 200,
units = "mm"
)
}
# Generate interactive plot
if (args$plot_figures == TRUE && args$simple_plot_only == FALSE){
# Generate static plot of data for each
print("Generating static ggplot")
static_plot <- generate_coverage_plot(df, panel)
# Add interactivity to plot:
print("Generating Interactive plot")
interactive_plot <- ggplotly(static_plot)
# Save interactive plot as a single html file:
filepath <-
paste0(output_directory, '/', filename, "_coverage.html")
print(paste0("Saving file", filepath))
saveWidget(ggplotly(interactive_plot), file = filepath)
}else{
print("Skipping Interactive plot")
}
if (args$plot_figures == FALSE){
print("Skipping all plots")
}
# Save table
filepath <-
paste0(output_directory, "/", filename, "_coverage.csv")
print(paste0("Saving file", filepath))
summary_df <- df %>%
group_by(region) %>%
# Summarise data by region
summarise(
gene = unique(gene),
run_name = unique(run_name),
panel = unique(panel),
transcript = unique(transcript),
genomicCoordinates = unique(genomicCoordinates),
accessionNum = unique(accessionNum),
region_meanCoverage = mean(scaled_meanCoverage)
) %>%
arrange(region_meanCoverage)
print("Saving CSV file")
write_delim(summary_df, filepath, delim = "\t")
}
}
|
## This should be in base R!
.FUNCall <- function(f) function(...) f(...)
|
/R/utils.R
|
no_license
|
cran/registry
|
R
| false
| false
| 74
|
r
|
## This should be in base R!
.FUNCall <- function(f) function(...) f(...)
|
c44de1132ed6a6af2aba60c67446132b aim-100-6_0-yes1-3-90.qdimacs 822 2432
|
/code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1+A1/Database/Letombe/Abduction/aim-100-6_0-yes1-3-90/aim-100-6_0-yes1-3-90.R
|
no_license
|
arey0pushpa/dcnf-autarky
|
R
| false
| false
| 71
|
r
|
c44de1132ed6a6af2aba60c67446132b aim-100-6_0-yes1-3-90.qdimacs 822 2432
|
## The first function makeCacheMatrix takes a matrix as and argument and returns
## a list of functions, get, set, getInv and setInv
## set caches the value of x and set the inverse to Null
## get retrives the x value
## setInv calculate the inverse of the matrix and cache it as I
## getInv retrive the value of I
makeCacheMatrix <- function(x = matrix()) {
I <- NULL
set <- function(y) {
x <<- y
I <<- NULL
}
get <- function() x
setinv <- function(solve) I <<- solve
getinv <- function() I
list(set = set, get = get,
setinv = setinv,
getinv = getinv)
}
## cacheSolve takes in list from the makeCacheMatrix function
## checks if the inverse has been calculated already
## if yes, it retrives its value and prints it
## else, it calculate the inverse and caches it as I
cacheSolve <- function(x, ...) {
I <- x$getinv()
if(!is.null(I)) {
message("getting cached data")
return(I)
}
## Return a matrix that is the inverse of 'x'
data <- x$get()
I <- solve(data, ...)
x$setinv(I)
I
}
|
/cachematrix.R
|
no_license
|
mha595/ProgrammingAssignment2
|
R
| false
| false
| 1,203
|
r
|
## The first function makeCacheMatrix takes a matrix as and argument and returns
## a list of functions, get, set, getInv and setInv
## set caches the value of x and set the inverse to Null
## get retrives the x value
## setInv calculate the inverse of the matrix and cache it as I
## getInv retrive the value of I
makeCacheMatrix <- function(x = matrix()) {
I <- NULL
set <- function(y) {
x <<- y
I <<- NULL
}
get <- function() x
setinv <- function(solve) I <<- solve
getinv <- function() I
list(set = set, get = get,
setinv = setinv,
getinv = getinv)
}
## cacheSolve takes in list from the makeCacheMatrix function
## checks if the inverse has been calculated already
## if yes, it retrives its value and prints it
## else, it calculate the inverse and caches it as I
cacheSolve <- function(x, ...) {
I <- x$getinv()
if(!is.null(I)) {
message("getting cached data")
return(I)
}
## Return a matrix that is the inverse of 'x'
data <- x$get()
I <- solve(data, ...)
x$setinv(I)
I
}
|
#
# xts: eXtensible time-series
#
# Copyright (C) 2008 Jeffrey A. Ryan jeff.a.ryan @ gmail.com
#
# Contributions from Joshua M. Ulrich
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
coredata.xts <- function(x, fmt=FALSE, ...) {
x.attr <- attributes(x)
if(is.character(fmt)) {
indexFormat(x) <- fmt
fmt <- TRUE
}
if(length(x) > 0 && fmt) {
if(!is.null(indexFormat(x))) {
x.attr$dimnames <- list(format(index(x), format=indexFormat(x)),
dimnames(x)[[2]])
indexFormat(x) <- NULL # remove before printing
} else {
x.attr$dimnames <- list(format(index(x)),dimnames(x)[[2]])
}
#attributes not to be kept
original.attr <- x.attr[!names(x.attr) %in%
c('dim','dimnames')]
if(is.null(dim(x))) {
xx <- structure(coredata(x), names=x.attr$dimnames[[1]])
} else {
xx <- structure(coredata(x), dim=dim(x), dimnames=x.attr$dimnames)
}
for(i in names(original.attr)) {
attr(xx,i) <- NULL
}
return(xx)
}
if(length(x) == 0) {
return(vector(storage.mode(x)))
} else
return(.Call("coredata_xts", x, PACKAGE="xts"))
}
`xcoredata.default` <-
function(x,...) {
x.attr <- attributes(x)
original.attr <- x.attr[!names(x.attr) %in%
c('dim','dimnames')]
original.attr
}
`xcoredata` <-
function(x,...) {
UseMethod('xcoredata')
}
`xcoredata<-` <- function(x,value) {
UseMethod('xcoredata<-')
}
`xcoredata<-.default` <- function(x,value) {
if(is.null(value)) {
return(coredata(x))
} else {
for(att in names(value)) {
if(!att %in% c('dim','dimnames'))
attr(x,att) <- value[[att]]
}
return(x)
}
}
`xtsAttributes` <-
function(x, user=NULL) {
# get all additional attributes not standard to xts object
#stopifnot(is.xts(x))
rm.attr <- c('dim','dimnames','index','class','names')
x.attr <- attributes(x)
if(is.null(user)) {
# Both xts and user attributes
rm.attr <- c(rm.attr,'.CLASS','.CLASSnames','.ROWNAMES', '.indexCLASS', '.indexFORMAT', '.indexTZ', 'tzone', 'tclass')
xa <- x.attr[!names(x.attr) %in% rm.attr]
}
else
if(user) {
# Only user attributes
rm.attr <- c(rm.attr,'.CLASS','.CLASSnames','.ROWNAMES', '.indexCLASS', '.indexFORMAT','.indexTZ','tzone','tclass',
x.attr$.CLASSnames)
xa <- x.attr[!names(x.attr) %in% rm.attr]
} else {
# Only xts attributes
xa <- x.attr[names(x.attr) %in% x.attr$.CLASSnames]
}
if(length(xa) == 0) return(NULL)
xa
}
`xtsAttributes<-` <-
function(x,value) {
UseMethod('xtsAttributes<-')
}
`xtsAttributes<-.xts` <-
function(x,value) {
if(is.null(value)) {
for(nm in names(xtsAttributes(x))) {
attr(x,nm) <- NULL
}
} else
for(nv in names(value)) {
if(!nv %in% c('dim','dimnames','index','class','.CLASS','.ROWNAMES','.CLASSnames',
'.indexCLASS','.indexFORMAT','.indexTZ'))
attr(x,nv) <- value[[nv]]
}
x
}
|
/R/coredata.xts.R
|
no_license
|
DavisVaughan/xts
|
R
| false
| false
| 3,605
|
r
|
#
# xts: eXtensible time-series
#
# Copyright (C) 2008 Jeffrey A. Ryan jeff.a.ryan @ gmail.com
#
# Contributions from Joshua M. Ulrich
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
coredata.xts <- function(x, fmt=FALSE, ...) {
x.attr <- attributes(x)
if(is.character(fmt)) {
indexFormat(x) <- fmt
fmt <- TRUE
}
if(length(x) > 0 && fmt) {
if(!is.null(indexFormat(x))) {
x.attr$dimnames <- list(format(index(x), format=indexFormat(x)),
dimnames(x)[[2]])
indexFormat(x) <- NULL # remove before printing
} else {
x.attr$dimnames <- list(format(index(x)),dimnames(x)[[2]])
}
#attributes not to be kept
original.attr <- x.attr[!names(x.attr) %in%
c('dim','dimnames')]
if(is.null(dim(x))) {
xx <- structure(coredata(x), names=x.attr$dimnames[[1]])
} else {
xx <- structure(coredata(x), dim=dim(x), dimnames=x.attr$dimnames)
}
for(i in names(original.attr)) {
attr(xx,i) <- NULL
}
return(xx)
}
if(length(x) == 0) {
return(vector(storage.mode(x)))
} else
return(.Call("coredata_xts", x, PACKAGE="xts"))
}
`xcoredata.default` <-
function(x,...) {
x.attr <- attributes(x)
original.attr <- x.attr[!names(x.attr) %in%
c('dim','dimnames')]
original.attr
}
`xcoredata` <-
function(x,...) {
UseMethod('xcoredata')
}
`xcoredata<-` <- function(x,value) {
UseMethod('xcoredata<-')
}
`xcoredata<-.default` <- function(x,value) {
if(is.null(value)) {
return(coredata(x))
} else {
for(att in names(value)) {
if(!att %in% c('dim','dimnames'))
attr(x,att) <- value[[att]]
}
return(x)
}
}
`xtsAttributes` <-
function(x, user=NULL) {
# get all additional attributes not standard to xts object
#stopifnot(is.xts(x))
rm.attr <- c('dim','dimnames','index','class','names')
x.attr <- attributes(x)
if(is.null(user)) {
# Both xts and user attributes
rm.attr <- c(rm.attr,'.CLASS','.CLASSnames','.ROWNAMES', '.indexCLASS', '.indexFORMAT', '.indexTZ', 'tzone', 'tclass')
xa <- x.attr[!names(x.attr) %in% rm.attr]
}
else
if(user) {
# Only user attributes
rm.attr <- c(rm.attr,'.CLASS','.CLASSnames','.ROWNAMES', '.indexCLASS', '.indexFORMAT','.indexTZ','tzone','tclass',
x.attr$.CLASSnames)
xa <- x.attr[!names(x.attr) %in% rm.attr]
} else {
# Only xts attributes
xa <- x.attr[names(x.attr) %in% x.attr$.CLASSnames]
}
if(length(xa) == 0) return(NULL)
xa
}
`xtsAttributes<-` <-
function(x,value) {
UseMethod('xtsAttributes<-')
}
`xtsAttributes<-.xts` <-
function(x,value) {
if(is.null(value)) {
for(nm in names(xtsAttributes(x))) {
attr(x,nm) <- NULL
}
} else
for(nv in names(value)) {
if(!nv %in% c('dim','dimnames','index','class','.CLASS','.ROWNAMES','.CLASSnames',
'.indexCLASS','.indexFORMAT','.indexTZ'))
attr(x,nv) <- value[[nv]]
}
x
}
|
############################
#fenetre 1kB
test = read.csv("tables_w1Kb/train/all_stats_propre.csv",header=F)
#fenetre 100kB avec id
test = read.csv("tables_w100Kb/avec_id/all_stats_propre.csv")
##fenetre 20kB avec id
test = read.csv("tables_w20Kb/avec_id/all_stats_propre.csv")
test = read.csv("tables_w20Kb/avec_id/all_stats.csv")
colnames(test)= c("id","chrom","position", "is_outlier", "nt_diversity","nt_diversity_per_site","D_tajima","Fu_li_D")
#snp density
snpdensity = test$nt_diversity/test$nt_diversity_per_site
snpdensity=replace(snpdensity,is.na(snpdensity),0)
test = cbind(test,snpdensity)
outlier = test[which(test$is_outlier==1),]
inlier = test[which(test$is_outlier==0),]
zin = inlier[which(inlier$snpdensity > 2),]
#wilcoxon tester liaison outlier/inlier (quali) versus nt_divers (quanti)
wilcox.test(outlier$nt_diversity,inlier$nt_diversity)
wilcox.test(outlier$nt_diversity_per_site,inlier$nt_diversity_per_site)
wilcox.test(outlier$D_tajima,inlier$D_tajima)
res = t.test(outlier$nt_diversity,inlier$nt_diversity)
|
/analysis_tables.R
|
no_license
|
Grelot/aker--beetGenomeEnvironmentAssociation
|
R
| false
| false
| 1,053
|
r
|
############################
#fenetre 1kB
test = read.csv("tables_w1Kb/train/all_stats_propre.csv",header=F)
#fenetre 100kB avec id
test = read.csv("tables_w100Kb/avec_id/all_stats_propre.csv")
##fenetre 20kB avec id
test = read.csv("tables_w20Kb/avec_id/all_stats_propre.csv")
test = read.csv("tables_w20Kb/avec_id/all_stats.csv")
colnames(test)= c("id","chrom","position", "is_outlier", "nt_diversity","nt_diversity_per_site","D_tajima","Fu_li_D")
#snp density
snpdensity = test$nt_diversity/test$nt_diversity_per_site
snpdensity=replace(snpdensity,is.na(snpdensity),0)
test = cbind(test,snpdensity)
outlier = test[which(test$is_outlier==1),]
inlier = test[which(test$is_outlier==0),]
zin = inlier[which(inlier$snpdensity > 2),]
#wilcoxon tester liaison outlier/inlier (quali) versus nt_divers (quanti)
wilcox.test(outlier$nt_diversity,inlier$nt_diversity)
wilcox.test(outlier$nt_diversity_per_site,inlier$nt_diversity_per_site)
wilcox.test(outlier$D_tajima,inlier$D_tajima)
res = t.test(outlier$nt_diversity,inlier$nt_diversity)
|
\name{GPW.logrank}
\alias{GPW.logrank}
\title{Generalized Piecewise Weighted Logrank Test}
\description{
Compute the p-value based on the
generalized piecewise weighted log-rank test when the treatment
time-lag effect is present and the lag duration
varies heterogeneously from individual to individual or from study to study, within a
certain domain and following a uniform pattern.
}
\usage{GPW.logrank(data, obs.time, time.to.event, event.status, trt.group, tl, tu)
}
\arguments{
\item{data}{Data frame}
\item{obs.time}{Column name in \code{data} for the observational time.}
\item{time.to.event}{Column name in \code{data} for the event time.}
\item{event.status}{Column name in \code{data} for the event status, where
0 denotes being censored, and 1 denotes events.}
\item{trt.group}{Column name in \code{data} for the treatment group, where
0 denotes controls, and 1 denotes treated subjects.}
\item{tl}{Lower bound of delayed duration domain}
\item{tu}{Upper bound of delayed duration domain}
}
%\details{}
\value{ The p-value of the test.
}
\references{
Xu, Z., Park, Y., Zhen, B. & Zhu, B. (2017).
Achieving optimal power of logrank test with random treatment time-lag effect.
Biometrika. Under review.
Xu, Z., Zhen, B., Park, Y., & Zhu, B. (2017). Designing therapeutic cancer vaccine trials with
delayed treatment effect. Statistics in medicine, 36(4), 592-605.
}
\author{Zhenzhen Xu <Zhenzhen.Xu@fda.hhs.gov> , Boguang Zhen<Boguang.Zhen@fda.hhs.gov>,
Yongsoek Park <yongpark@pitt.edu> and Bin Zhu <bin.zhu@nih.gov>}
\examples{
data(data, package="DelayedEffect.Design")
GPW.logrank(data, "X", "time_to_event", "event_status", "Z", 30, 30*11)
}
\keyword{ test }
\seealso{\code{\link{pow.SEPPLE.plus}} }
|
/man/GPW.logrank.Rd
|
no_license
|
cran/DelayedEffect.Design
|
R
| false
| false
| 1,817
|
rd
|
\name{GPW.logrank}
\alias{GPW.logrank}
\title{Generalized Piecewise Weighted Logrank Test}
\description{
Compute the p-value based on the
generalized piecewise weighted log-rank test when the treatment
time-lag effect is present and the lag duration
varies heterogeneously from individual to individual or from study to study, within a
certain domain and following a uniform pattern.
}
\usage{GPW.logrank(data, obs.time, time.to.event, event.status, trt.group, tl, tu)
}
\arguments{
\item{data}{Data frame}
\item{obs.time}{Column name in \code{data} for the observational time.}
\item{time.to.event}{Column name in \code{data} for the event time.}
\item{event.status}{Column name in \code{data} for the event status, where
0 denotes being censored, and 1 denotes events.}
\item{trt.group}{Column name in \code{data} for the treatment group, where
0 denotes controls, and 1 denotes treated subjects.}
\item{tl}{Lower bound of delayed duration domain}
\item{tu}{Upper bound of delayed duration domain}
}
%\details{}
\value{ The p-value of the test.
}
\references{
Xu, Z., Park, Y., Zhen, B. & Zhu, B. (2017).
Achieving optimal power of logrank test with random treatment time-lag effect.
Biometrika. Under review.
Xu, Z., Zhen, B., Park, Y., & Zhu, B. (2017). Designing therapeutic cancer vaccine trials with
delayed treatment effect. Statistics in medicine, 36(4), 592-605.
}
\author{Zhenzhen Xu <Zhenzhen.Xu@fda.hhs.gov> , Boguang Zhen<Boguang.Zhen@fda.hhs.gov>,
Yongsoek Park <yongpark@pitt.edu> and Bin Zhu <bin.zhu@nih.gov>}
\examples{
data(data, package="DelayedEffect.Design")
GPW.logrank(data, "X", "time_to_event", "event_status", "Z", 30, 30*11)
}
\keyword{ test }
\seealso{\code{\link{pow.SEPPLE.plus}} }
|
url <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(url,"//Txdf2fpw01cbtp/txcbt-redirected/smb001/Downloads/data.zip")
unzip(zipfile="//Txdf2fpw01cbtp/txcbt-redirected/smb001/Downloads/data.zip", exdir = "//Txdf2fpw01cbtp/txcbt-redirected/smb001/Downloads")
data <- read.table("//Txdf2fpw01cbtp/txcbt-redirected/smb001/Downloads/household_power_consumption.txt",header=FALSE, skip=66637, sep=";", nrows=2880)
names(data)<-c("Date","Time","Global_active_power", "Global_reactive_power", "Voltage", "Global_intensity", "Sub_metering_1", "Sub_metering_2","Sub_metering_3")
head(data)
#####plot4
plot4 <- function() {
par(mfrow = c(2,2))
data$Date <- as.Date(data$Date, format = '%d/%m/%Y')
data$DateTime <- as.POSIXct(paste(data$Date, data$Time))
plot(data$DateTime, data$Global_active_power, xlab = '', ylab = 'Global Active Power', type = "l")
plot(data$DateTime, data$Voltage, xlab = 'datetime', ylab = 'Voltage', type = "l")
plot(data$DateTime, data$Sub_metering_1, xlab = '', ylab = 'Energy sub metering', type = "l")
lines(data$DateTime,data$Sub_metering_2, col="red")
lines(data$DateTime,data$Sub_metering_3, col="blue")
legend("topright", lty=c(1,1,1), bty = 'n', cex = .25, col=c("black", "red", "blue"), legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"))
plot(data$DateTime, data$Global_active_power, xlab = 'datetime', ylab = 'Global_active_power', type = "l")
dev.copy(png, file="plot4.png", width=480, height=480)
dev.off()
}
plot4()
|
/plot4.R
|
no_license
|
brodo80/Exploratory-Data-Analysis-Project-1
|
R
| false
| false
| 1,517
|
r
|
url <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(url,"//Txdf2fpw01cbtp/txcbt-redirected/smb001/Downloads/data.zip")
unzip(zipfile="//Txdf2fpw01cbtp/txcbt-redirected/smb001/Downloads/data.zip", exdir = "//Txdf2fpw01cbtp/txcbt-redirected/smb001/Downloads")
data <- read.table("//Txdf2fpw01cbtp/txcbt-redirected/smb001/Downloads/household_power_consumption.txt",header=FALSE, skip=66637, sep=";", nrows=2880)
names(data)<-c("Date","Time","Global_active_power", "Global_reactive_power", "Voltage", "Global_intensity", "Sub_metering_1", "Sub_metering_2","Sub_metering_3")
head(data)
#####plot4
plot4 <- function() {
par(mfrow = c(2,2))
data$Date <- as.Date(data$Date, format = '%d/%m/%Y')
data$DateTime <- as.POSIXct(paste(data$Date, data$Time))
plot(data$DateTime, data$Global_active_power, xlab = '', ylab = 'Global Active Power', type = "l")
plot(data$DateTime, data$Voltage, xlab = 'datetime', ylab = 'Voltage', type = "l")
plot(data$DateTime, data$Sub_metering_1, xlab = '', ylab = 'Energy sub metering', type = "l")
lines(data$DateTime,data$Sub_metering_2, col="red")
lines(data$DateTime,data$Sub_metering_3, col="blue")
legend("topright", lty=c(1,1,1), bty = 'n', cex = .25, col=c("black", "red", "blue"), legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"))
plot(data$DateTime, data$Global_active_power, xlab = 'datetime', ylab = 'Global_active_power', type = "l")
dev.copy(png, file="plot4.png", width=480, height=480)
dev.off()
}
plot4()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/all_classes.R
\name{enriched_result}
\alias{enriched_result}
\title{creates enriched result}
\usage{
enriched_result(features, universe, annotation, statistics)
}
\arguments{
\item{features}{the features that were differentially expressed (see details)}
\item{universe}{all of the features that were measured}
\item{annotation}{an \linkS4class{annotation} object}
\item{statistics}{a \linkS4class{statistical_results} object}
}
\value{
enriched_result
}
\description{
given all the slots for an \linkS4class{enriched_result}, checks that all
the data is self-consistent, and creates the \code{enriched_result} object.
}
|
/man/enriched_result_constructor.Rd
|
no_license
|
ehinderer/categoryCompare2
|
R
| false
| true
| 702
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/all_classes.R
\name{enriched_result}
\alias{enriched_result}
\title{creates enriched result}
\usage{
enriched_result(features, universe, annotation, statistics)
}
\arguments{
\item{features}{the features that were differentially expressed (see details)}
\item{universe}{all of the features that were measured}
\item{annotation}{an \linkS4class{annotation} object}
\item{statistics}{a \linkS4class{statistical_results} object}
}
\value{
enriched_result
}
\description{
given all the slots for an \linkS4class{enriched_result}, checks that all
the data is self-consistent, and creates the \code{enriched_result} object.
}
|
################### uniform ##########################
### CLT uniform
## my Central Limit Function
## Notice that I have assigned default values which can be changed when the function is called
#' CLT Uniform
#'
#' @param n size of sample
#' @param iter number of iterations or spales taken
#' @param a lower limit of possible outcome
#' @param b upper limit of possible outcone
#'
#' @return
#' @export
#'
#' @examples mycltu(n=20,iter=10000,a=0,b=10)
mycltu=function(n=20,iter=10000,a=0,b=10){
## r-random sample from the uniform
y=runif(n*iter,a,b)
## Place these numbers into a matrix
## The columns will correspond to the iteration and the rows will equal the sample size n
data=matrix(y,nr=n,nc=iter,byrow=TRUE)
## apply the function mean to the columns (2) of the matrix
## these are placed in a vector w
w=apply(data,2,mean)
## We will make a histogram of the values in w
## How high should we make y axis?
## All the values used to make a histogram are placed in param (nothing is plotted yet)
param=hist(w,plot=FALSE)
## Since the histogram will be a density plot we will find the max density
ymax=max(param$density)
## To be on the safe side we will add 10% more to this
ymax=1.1*ymax
## Now we can make the histogram
hist(w,freq=FALSE, ylim=c(0,ymax), main=paste("Histogram of sample mean",
"\n", "sample size= ",n,sep=""),xlab="Sample mean")
## add a density curve made from the sample distribution
lines(density(w),col="Blue",lwd=3) # add a density plot
## Add a theoretical normal curve
curve(dnorm(x,mean=(a+b)/2,sd=(b-a)/(sqrt(12*n))),add=TRUE,col="Red",lty=2,lwd=3) # add a theoretical curve
## Add the density from which the samples were taken
curve(dunif(x,a,b),add=TRUE,lwd=4)
}
|
/R/mycltu.R
|
no_license
|
lmtrevisi/MATH4753ouTrev0002
|
R
| false
| false
| 1,801
|
r
|
################### uniform ##########################
### CLT uniform
## my Central Limit Function
## Notice that I have assigned default values which can be changed when the function is called
#' CLT Uniform
#'
#' @param n size of sample
#' @param iter number of iterations or spales taken
#' @param a lower limit of possible outcome
#' @param b upper limit of possible outcone
#'
#' @return
#' @export
#'
#' @examples mycltu(n=20,iter=10000,a=0,b=10)
mycltu=function(n=20,iter=10000,a=0,b=10){
## r-random sample from the uniform
y=runif(n*iter,a,b)
## Place these numbers into a matrix
## The columns will correspond to the iteration and the rows will equal the sample size n
data=matrix(y,nr=n,nc=iter,byrow=TRUE)
## apply the function mean to the columns (2) of the matrix
## these are placed in a vector w
w=apply(data,2,mean)
## We will make a histogram of the values in w
## How high should we make y axis?
## All the values used to make a histogram are placed in param (nothing is plotted yet)
param=hist(w,plot=FALSE)
## Since the histogram will be a density plot we will find the max density
ymax=max(param$density)
## To be on the safe side we will add 10% more to this
ymax=1.1*ymax
## Now we can make the histogram
hist(w,freq=FALSE, ylim=c(0,ymax), main=paste("Histogram of sample mean",
"\n", "sample size= ",n,sep=""),xlab="Sample mean")
## add a density curve made from the sample distribution
lines(density(w),col="Blue",lwd=3) # add a density plot
## Add a theoretical normal curve
curve(dnorm(x,mean=(a+b)/2,sd=(b-a)/(sqrt(12*n))),add=TRUE,col="Red",lty=2,lwd=3) # add a theoretical curve
## Add the density from which the samples were taken
curve(dunif(x,a,b),add=TRUE,lwd=4)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plots.R
\name{make_bar_plot}
\alias{make_bar_plot}
\title{Create a Barplot of Country Obesity Rankings}
\usage{
make_bar_plot(
.region = NULL,
.year = 2016,
.income = NULL,
.sex = NULL,
.ascending = TRUE,
.n = 10
)
}
\arguments{
\item{.region}{The region input callback (character vector)}
\item{.year}{The year input callback (integer vector)}
\item{.income}{The income group callback (character vector)}
\item{.sex}{The sex group callback (scalar character)}
\item{.ascending}{a logical indicating the selection for top or bottom for
the country rankings. Also accepts a character. Defaults to TRUE.}
\item{.n}{a scalar representing the number of countries to chart.}
}
\value{
A plotly object.
}
\description{
Create a Barplot of Country Obesity Rankings
}
\examples{
make_bar_plot()
make_bar_plot(.n = 5)
make_bar_plot(.n = 5, .ascending = FALSE)
make_bar_plot(.n = 5, .ascending = "FALSE")
}
|
/man/make_bar_plot.Rd
|
permissive
|
UBC-MDS/obesity-explorer-R
|
R
| false
| true
| 992
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plots.R
\name{make_bar_plot}
\alias{make_bar_plot}
\title{Create a Barplot of Country Obesity Rankings}
\usage{
make_bar_plot(
.region = NULL,
.year = 2016,
.income = NULL,
.sex = NULL,
.ascending = TRUE,
.n = 10
)
}
\arguments{
\item{.region}{The region input callback (character vector)}
\item{.year}{The year input callback (integer vector)}
\item{.income}{The income group callback (character vector)}
\item{.sex}{The sex group callback (scalar character)}
\item{.ascending}{a logical indicating the selection for top or bottom for
the country rankings. Also accepts a character. Defaults to TRUE.}
\item{.n}{a scalar representing the number of countries to chart.}
}
\value{
A plotly object.
}
\description{
Create a Barplot of Country Obesity Rankings
}
\examples{
make_bar_plot()
make_bar_plot(.n = 5)
make_bar_plot(.n = 5, .ascending = FALSE)
make_bar_plot(.n = 5, .ascending = "FALSE")
}
|
# ===== CHURN RATE MIXPANEL - COHORT ANALYSIS =====
# READ THE DATA
cw.churn = read.csv(file = 'D:/Audhi Aprilliant/IPB Works/Statistics Department/Data Science Practices/Projects/Cohort Analysis - Online Retail Data/Datasets/cohort2011 Wide Data.csv',
header = TRUE,
sep = ',')
View(cw.churn)
# Structure Manipulation - Column names
colnames(cw.churn) = c('Cohort','0','1','2','3','4','5','6','7','8','9','10','11')
# CHURN RATE MIXPANEL
for (i in rev(3:ncol(cw.churn))){
#Calculates the retention rate
cw.churn[,i] = round(x = cw.churn[,i]/cw.churn[,2],
digits = 4)
# Turns the retention rate into the churn rate. The ifelse
# part is to avoid doing any calculations to the zeros.
cw.churn[,i] = ifelse(cw.churn[,i] != 0,
yes = 1.0 - cw.churn[,i],
no = 0 + cw.churn[,i])
}
rm(i) # Remove object i vector from environment
# Cloning the churn mixpanel
churn.avgs = cw.churn
# Converting 0.0000 to NAs
churn.avgs[churn.avgs == 0.0000] = NA
avgs.chu = round(x = apply(X = churn.avgs[,-1],
MARGIN = 2,
FUN = mean,
na.rm = TRUE),
digits = 4)
avgs.chu = c(0,avgs.chu) # Add zero for first column
# Adding the averages row to the churn mixpanel
cw.churn = rbind(cw.churn,avgs.chu)
# SAVE COHORT WIDE CHURN DATA INTO CSV FILE
write.csv(x = cw.churn,
file = 'D:/Audhi Aprilliant/IPB Works/Statistics Department/Data Science Practices/Projects/Cohort Analysis - Online Retail Data/Datasets/CW Churn Rate.csv',
row.names = FALSE)
# VISUALIZATION
# Creating 19 breaks and 20 rgb color values ranging from red to white
breaks2 = quantile(cw.churn[,3:13],
probs = seq(.05, .95, .05),
na.rm = TRUE)
colors2 = sapply(round(seq(255, 40, length.out = length(breaks2) + 1), 0),
function(x){ rgb(255,x,x, maxColorValue = 255)})
# The churn rate mixpanel
DT::datatable(cw.churn,
class = 'cell-border stripe',
rownames = FALSE,
options = list(ordering = FALSE,
dom = 't',
pageLength = 13)) %>%
formatStyle("0",
backgroundColor = 'lightgrey',
fontWeight = 'bold') %>%
formatPercentage(c(3:13),2) %>% # We don't want column 0 in %
formatStyle("1",
fontWeight = 'bold') %>%
formatStyle(names(cw.churn[c(-1,-2)]),
color = 'white',
fontWeight = 'bold',
backgroundColor = styleInterval(breaks2,colors2))
|
/R-Scripts/Total Customer/5 Churn Rate Mixpanel Customer - Cohort Analysis Online Retail Data.R
|
no_license
|
audhiaprilliant/Cohort-Analysis-Online-Retail-Data
|
R
| false
| false
| 2,737
|
r
|
# ===== CHURN RATE MIXPANEL - COHORT ANALYSIS =====
# READ THE DATA
cw.churn = read.csv(file = 'D:/Audhi Aprilliant/IPB Works/Statistics Department/Data Science Practices/Projects/Cohort Analysis - Online Retail Data/Datasets/cohort2011 Wide Data.csv',
header = TRUE,
sep = ',')
View(cw.churn)
# Structure Manipulation - Column names
colnames(cw.churn) = c('Cohort','0','1','2','3','4','5','6','7','8','9','10','11')
# CHURN RATE MIXPANEL
for (i in rev(3:ncol(cw.churn))){
#Calculates the retention rate
cw.churn[,i] = round(x = cw.churn[,i]/cw.churn[,2],
digits = 4)
# Turns the retention rate into the churn rate. The ifelse
# part is to avoid doing any calculations to the zeros.
cw.churn[,i] = ifelse(cw.churn[,i] != 0,
yes = 1.0 - cw.churn[,i],
no = 0 + cw.churn[,i])
}
rm(i) # Remove object i vector from environment
# Cloning the churn mixpanel
churn.avgs = cw.churn
# Converting 0.0000 to NAs
churn.avgs[churn.avgs == 0.0000] = NA
avgs.chu = round(x = apply(X = churn.avgs[,-1],
MARGIN = 2,
FUN = mean,
na.rm = TRUE),
digits = 4)
avgs.chu = c(0,avgs.chu) # Add zero for first column
# Adding the averages row to the churn mixpanel
cw.churn = rbind(cw.churn,avgs.chu)
# SAVE COHORT WIDE CHURN DATA INTO CSV FILE
write.csv(x = cw.churn,
file = 'D:/Audhi Aprilliant/IPB Works/Statistics Department/Data Science Practices/Projects/Cohort Analysis - Online Retail Data/Datasets/CW Churn Rate.csv',
row.names = FALSE)
# VISUALIZATION
# Creating 19 breaks and 20 rgb color values ranging from red to white
breaks2 = quantile(cw.churn[,3:13],
probs = seq(.05, .95, .05),
na.rm = TRUE)
colors2 = sapply(round(seq(255, 40, length.out = length(breaks2) + 1), 0),
function(x){ rgb(255,x,x, maxColorValue = 255)})
# The churn rate mixpanel
DT::datatable(cw.churn,
class = 'cell-border stripe',
rownames = FALSE,
options = list(ordering = FALSE,
dom = 't',
pageLength = 13)) %>%
formatStyle("0",
backgroundColor = 'lightgrey',
fontWeight = 'bold') %>%
formatPercentage(c(3:13),2) %>% # We don't want column 0 in %
formatStyle("1",
fontWeight = 'bold') %>%
formatStyle(names(cw.churn[c(-1,-2)]),
color = 'white',
fontWeight = 'bold',
backgroundColor = styleInterval(breaks2,colors2))
|
DEFF<-expand.grid(HH=1:100,Village=1:4)
DEFF$Gender<-sample(c("Male","Female"),size = 400,replace=TRUE)
DEFF$TimeToHospital<-abs(round(rnorm(mean=60*(DEFF$Village-1),sd=15,n=400)/10)*10)
DEFF$Income<-round(with(DEFF,ifelse(Village==1,rnorm(100,mean=5000,sd=1500),ifelse(Village==2,rnorm(100,mean=6500,sd=2000),
ifelse(Village==3,rnorm(100,mean=6000,sd=1500),ifelse(Village==4,rnorm(100,mean=4000,sd=1500),NA)))))/100)*100
DEFF$Perception<-c(sample(c("Yes","No"),size = 100,replace=TRUE,prob=c(0.75,0.25)),sample(c("Yes","No"),size = 100,replace=TRUE,prob=c(0.6,0.4)),
sample(c("Yes","No"),size = 100,replace=TRUE,prob=c(0.4,0.6)),sample(c("Yes","No"),size = 100,replace=TRUE,prob=c(0.25,0.75)))
ggplot(data=DEFF,aes(y=Income,x=1))+facet_wrap(~Village,nrow=1)+geom_boxplot()+geom_point(col="red",alpha=0.5)
library(lme4)
library(sjstats)
fit1 <- lmer(TimeToHospital ~ 1 + (1 | Village), DEFF)
fit2 <- glmer(factor(Gender) ~ 1 + (1 | Village), DEFF,family="binomial")
fit3 <- lmer(Income ~ 1 + (1 | Village), DEFF)
fit4 <- glmer(factor(Perception) ~ 1 + (1 | Village), DEFF,family="binomial")
icc(fit0)
icc(fit1)
icc(fit2)
icc(fit3)
n=15
library(scales)
library(ggplot2)
summary<-data.frame(short=c("TimeToHospital","Gender","Income","Perception"),
snappy=c("Time To Nearest Hospital","Gender of Respondent","Household Income","Happy With Healthcare"),
full=c("Time To Nearest Hospital (minutes)","Gender of Respondent","Annual Household Income ($)",
"Are you happy With the service provided at your local hospital?"),type=c("numeric","factor","numeric","factor"),
icc=c(icc(fit1)[[1]],icc(fit2)[[1]],icc(fit3)[[1]],icc(fit4)[[1]]))
variable="Perception"
DData<-subset(summary,short==variable)
DEFF$Village<-paste("Village",DEFF$Village)
if(DData$type=="numeric"){
p1<-ggplot(data=DEFF,aes_string(x=variable,y="(..count..)/sum(..count..)"))+geom_histogram(bins=20,fill="red",col="black")+
scale_y_continuous(labels=percent)
}
if(DData$type=="factor"){
p1<-ggplot(data=DEFF,aes_string(x=1,group=variable,fill=variable))+geom_bar(show.legend=TRUE,col="black")
}
p1<-p1+facet_wrap(~Village)+
ylab("% of Responses")+xlab(DData$full)+
ggtitle(DData$full,subtitle =
paste("ICC =",round(DData$icc,2),"\nWith a cluster size of",n,"Households This Would Give A Design Effect of",
round(((n-1)*DData$icc)+1,1)))
p1
plot1<-ggplot(data=DEFF,aes_string(x="TimeToHospital",y="(..count..)/sum(..count..)"))+geom_histogram(bins=40,fill="red",col="black")+ylab("")+
ggtitle(summary$full[1])+xlab("")+
scale_y_continuous(labels=percent)
plot2<-ggplot(data=DEFF,aes_string(y="(..count..)/sum(..count..)",x="Gender",group="Gender",fill="Gender"))+
geom_bar(show.legend=TRUE,col="black",position="dodge")+ylab("")+
ggtitle(summary$full[2])+xlab("")+
scale_y_continuous(labels=percent)
plot3<-ggplot(data=DEFF,aes_string(x="Income",y="(..count..)/sum(..count..)"))+geom_histogram(bins=40,fill="red",col="black")+ylab("")+
ggtitle(summary$full[3])+xlab("")+
scale_y_continuous(labels=percent)
plot4<-ggplot(data=DEFF,aes_string(y="(..count..)/sum(..count..)",x="Perception",group="Perception",fill="Perception"))+
geom_bar(show.legend=TRUE,col="black",position="dodge")+
ggtitle(summary$full[4])+xlab("")+ylab("")+
scale_y_continuous(labels=percent)
library(gridExtra)
startplot<-grid.arrange(plot1,plot2,plot3,plot4)
enddata<-expand.grid(icc=summary$icc,n=2:50)
enddata<-merge(enddata,summary)
enddata$deff<-((enddata$n-1)*enddata$icc)+1
endplot<-ggplot(data=enddata,aes(y=deff,x=n,col=snappy,group=snappy))+geom_line(size=1)+xlab("Cluster Size")+
ylab("Design Effect")+scale_color_discrete(name="")+scale_y_continuous(breaks=seq(0,50,by=2))
save(startplot,endplot,plot1,plot2,plot3,summary,DEFF,file="C:/Users/sdumb/Dropbox (SSD)/ssd-shiny-server/DEFF/shinydata.RData")
|
/DEFF/calculation.R
|
no_license
|
sdumble1/ShinyLibrary
|
R
| false
| false
| 3,988
|
r
|
DEFF<-expand.grid(HH=1:100,Village=1:4)
DEFF$Gender<-sample(c("Male","Female"),size = 400,replace=TRUE)
DEFF$TimeToHospital<-abs(round(rnorm(mean=60*(DEFF$Village-1),sd=15,n=400)/10)*10)
DEFF$Income<-round(with(DEFF,ifelse(Village==1,rnorm(100,mean=5000,sd=1500),ifelse(Village==2,rnorm(100,mean=6500,sd=2000),
ifelse(Village==3,rnorm(100,mean=6000,sd=1500),ifelse(Village==4,rnorm(100,mean=4000,sd=1500),NA)))))/100)*100
DEFF$Perception<-c(sample(c("Yes","No"),size = 100,replace=TRUE,prob=c(0.75,0.25)),sample(c("Yes","No"),size = 100,replace=TRUE,prob=c(0.6,0.4)),
sample(c("Yes","No"),size = 100,replace=TRUE,prob=c(0.4,0.6)),sample(c("Yes","No"),size = 100,replace=TRUE,prob=c(0.25,0.75)))
ggplot(data=DEFF,aes(y=Income,x=1))+facet_wrap(~Village,nrow=1)+geom_boxplot()+geom_point(col="red",alpha=0.5)
library(lme4)
library(sjstats)
fit1 <- lmer(TimeToHospital ~ 1 + (1 | Village), DEFF)
fit2 <- glmer(factor(Gender) ~ 1 + (1 | Village), DEFF,family="binomial")
fit3 <- lmer(Income ~ 1 + (1 | Village), DEFF)
fit4 <- glmer(factor(Perception) ~ 1 + (1 | Village), DEFF,family="binomial")
icc(fit0)
icc(fit1)
icc(fit2)
icc(fit3)
n=15
library(scales)
library(ggplot2)
summary<-data.frame(short=c("TimeToHospital","Gender","Income","Perception"),
snappy=c("Time To Nearest Hospital","Gender of Respondent","Household Income","Happy With Healthcare"),
full=c("Time To Nearest Hospital (minutes)","Gender of Respondent","Annual Household Income ($)",
"Are you happy With the service provided at your local hospital?"),type=c("numeric","factor","numeric","factor"),
icc=c(icc(fit1)[[1]],icc(fit2)[[1]],icc(fit3)[[1]],icc(fit4)[[1]]))
variable="Perception"
DData<-subset(summary,short==variable)
DEFF$Village<-paste("Village",DEFF$Village)
if(DData$type=="numeric"){
p1<-ggplot(data=DEFF,aes_string(x=variable,y="(..count..)/sum(..count..)"))+geom_histogram(bins=20,fill="red",col="black")+
scale_y_continuous(labels=percent)
}
if(DData$type=="factor"){
p1<-ggplot(data=DEFF,aes_string(x=1,group=variable,fill=variable))+geom_bar(show.legend=TRUE,col="black")
}
p1<-p1+facet_wrap(~Village)+
ylab("% of Responses")+xlab(DData$full)+
ggtitle(DData$full,subtitle =
paste("ICC =",round(DData$icc,2),"\nWith a cluster size of",n,"Households This Would Give A Design Effect of",
round(((n-1)*DData$icc)+1,1)))
p1
plot1<-ggplot(data=DEFF,aes_string(x="TimeToHospital",y="(..count..)/sum(..count..)"))+geom_histogram(bins=40,fill="red",col="black")+ylab("")+
ggtitle(summary$full[1])+xlab("")+
scale_y_continuous(labels=percent)
plot2<-ggplot(data=DEFF,aes_string(y="(..count..)/sum(..count..)",x="Gender",group="Gender",fill="Gender"))+
geom_bar(show.legend=TRUE,col="black",position="dodge")+ylab("")+
ggtitle(summary$full[2])+xlab("")+
scale_y_continuous(labels=percent)
plot3<-ggplot(data=DEFF,aes_string(x="Income",y="(..count..)/sum(..count..)"))+geom_histogram(bins=40,fill="red",col="black")+ylab("")+
ggtitle(summary$full[3])+xlab("")+
scale_y_continuous(labels=percent)
plot4<-ggplot(data=DEFF,aes_string(y="(..count..)/sum(..count..)",x="Perception",group="Perception",fill="Perception"))+
geom_bar(show.legend=TRUE,col="black",position="dodge")+
ggtitle(summary$full[4])+xlab("")+ylab("")+
scale_y_continuous(labels=percent)
library(gridExtra)
startplot<-grid.arrange(plot1,plot2,plot3,plot4)
enddata<-expand.grid(icc=summary$icc,n=2:50)
enddata<-merge(enddata,summary)
enddata$deff<-((enddata$n-1)*enddata$icc)+1
endplot<-ggplot(data=enddata,aes(y=deff,x=n,col=snappy,group=snappy))+geom_line(size=1)+xlab("Cluster Size")+
ylab("Design Effect")+scale_color_discrete(name="")+scale_y_continuous(breaks=seq(0,50,by=2))
save(startplot,endplot,plot1,plot2,plot3,summary,DEFF,file="C:/Users/sdumb/Dropbox (SSD)/ssd-shiny-server/DEFF/shinydata.RData")
|
# script to format the extra metadata
rm(list = ls())
library(dplyr)
source('paths.r')
#load mapping file; this tells us which samples we want to look at
map <- readRDS(emp_map_clean.path)
#load the metadata files we need
study_632 <- read.csv(emp_study632_metadata.path)
study_722 <- read.csv(emp_study722_metadata.path)
study_808 <- read.csv(emp_study808_metadata.path)
study_864 <- read.csv(emp_study864_metadata.path)
study_895 <- read.csv(emp_study895_metadata.path)
study_990 <- read.csv(emp_study990_metadata.path)
study_1031 <- read.csv(emp_study1031_metadata.path)
study_1037 <- read.csv(emp_study1037_metadata.path)
study_1043 <- read.csv(emp_study1043_metadata.path)
study_1521 <- read.csv(emp_study1521_metadata.path)
study_1579 <- read.csv(emp_study632_metadata.path)
study_1674 <- read.csv(emp_study1674_metadata.path)
study_1702 <- read.csv(emp_study1702_metadata.path)
study_1716 <- read.csv(emp_study1716_metadata.path)
study_1747 <- read.csv(emp_study1747_metadata.path)
#make a list of the metadata files
data.list <- list(study_632, study_722, study_808, study_864, study_895, study_990,
study_1031, study_1037, study_1043, study_1521, study_1579, study_1674,
study_1702, study_1716, study_1747)
#isolate the samples we want to look at
samples <- map$`#SampleID`
for(i in 1:length(data.list)){
data.list[[i]] <- subset(data.list[[i]], data.list[[i]]$sample_name %in% samples)
}
#make column names consistent
for(i in 1:length(data.list)){
colnames(data.list[[i]])[colnames(data.list[[i]]) == 'carb_nitro_ratio'] <- 'c_n_ratio'
colnames(data.list[[i]])[colnames(data.list[[i]]) == 'perc_nitrogen'] <- 'percent_n'
colnames(data.list[[i]])[colnames(data.list[[i]]) == 'perc_total_c'] <- 'percent_c'
if('tot_carb' %in% colnames(data.list[[i]]) & 'tot_org_carb' %in% colnames(data.list[[i]])){
data.list[[i]] <- data.list[[i]][-data.list[[i]]$tot_org_carb]
}
if('tot_carb' %in% colnames(data.list[[i]]) & 'percent_c' %in% colnames(data.list[[i]])){
data.list[[i]] <- data.list[[i]][-data.list[[i]]$tot_carb]
}
}
#ensure desired columns are numerics, not factors
for(i in 1:length(data.list)){
if('sample_name' %in% colnames(data.list[[i]])){
data.list[[i]]$sample_name <- as.numeric(data.list[[i]]$sample_name)
}
if('c_n_ratio' %in% colnames(data.list[[i]])){
data.list[[i]]$c_n_ratio <- as.numeric(data.list[[i]]$c_n_ratio)
}
if('percent_n' %in% colnames(data.list[[i]])){
data.list[[i]]$percent_n <- as.numeric(data.list[[i]]$percent_n)
}
if('percent_c' %in% colnames(data.list[[i]])){
data.list[[i]]$percent_c <- as.numeric(data.list[[i]]$percent_c)
}
if('water_content_soil' %in% colnames(data.list[[i]])){
data.list[[i]]$water_content_soil <- as.numeric(data.list[[i]]$water_content_soil)
}
if('tot_nitro' %in% colnames(data.list[[i]])){
data.list[[i]]$tot_nitro <- as.numeric(data.list[[i]]$tot_nitro)
}
if('tot_org_carb' %in% colnames(data.list[[i]])){
data.list[[i]]$tot_org_carb <- as.numeric(data.list[[i]]$tot_org_carb)
}
if('tot_carb' %in% colnames(data.list[[i]])){
data.list[[i]]$tot_carb <- as.numeric(data.list[[i]]$tot_carb)
}
}
#make units consistent and update column names
#3. 808: water content: unknown units
#11. 1579: water content: unknown units
for(i in 1:length(data.list)){
if('tot_nitro' %in% colnames(data.list[[i]])){
data.list[[i]]$tot_nitro <- data.list[[i]]$tot_nitro / 1000 * 100
colnames(data.list[[i]])[colnames(data.list[[i]]) == 'tot_nitro'] <- 'percent_n'
}
if('tot_org_carb' %in% colnames(data.list[[i]])){
data.list[[i]]$tot_org_carb <- data.list[[i]]$tot_org_carb / 1000 * 100
colnames(data.list[[i]])[colnames(data.list[[i]]) == 'tot_org_carb'] <- 'percent_org_c'
}
if('tot_carb' %in% colnames(data.list[[i]])){
data.list[[i]]$tot_carb <- data.list[[i]]$tot_carb / 1000 * 100
colnames(data.list[[i]])[colnames(data.list[[i]]) == 'tot_carb'] <- 'percent_c'
}
}
#isolate the variables we are interested in
vars <- list('sample_name', 'water_content_soil', 'percent_n', 'percent_c', 'percent_org_c', 'c_n_ratio')
for(i in 1:length(data.list)){
data.list[[i]] <- subset(data.list[[i]], select = colnames(data.list[[i]]) %in% vars)
}
#combine rows into one new dataframe
metadata <- dplyr::bind_rows(data.list[[1]], data.list[[2]], data.list[[3]], data.list[[4]], data.list[[5]], data.list[[6]], data.list[[7]], data.list[[8]], data.list[[9]], data.list[[10]], data.list[[11]], data.list[[12]], data.list[[13]], data.list[[14]])
metadata <- metadata[,1:6]
#save metadata
saveRDS(metadata, emp_metadata.path)
|
/data_construction/EMP_data_construction/format_metadata.R
|
no_license
|
microbesatBU/NEFI_16S
|
R
| false
| false
| 4,657
|
r
|
# script to format the extra metadata
rm(list = ls())
library(dplyr)
source('paths.r')
#load mapping file; this tells us which samples we want to look at
map <- readRDS(emp_map_clean.path)
#load the metadata files we need
study_632 <- read.csv(emp_study632_metadata.path)
study_722 <- read.csv(emp_study722_metadata.path)
study_808 <- read.csv(emp_study808_metadata.path)
study_864 <- read.csv(emp_study864_metadata.path)
study_895 <- read.csv(emp_study895_metadata.path)
study_990 <- read.csv(emp_study990_metadata.path)
study_1031 <- read.csv(emp_study1031_metadata.path)
study_1037 <- read.csv(emp_study1037_metadata.path)
study_1043 <- read.csv(emp_study1043_metadata.path)
study_1521 <- read.csv(emp_study1521_metadata.path)
study_1579 <- read.csv(emp_study632_metadata.path)
study_1674 <- read.csv(emp_study1674_metadata.path)
study_1702 <- read.csv(emp_study1702_metadata.path)
study_1716 <- read.csv(emp_study1716_metadata.path)
study_1747 <- read.csv(emp_study1747_metadata.path)
#make a list of the metadata files
data.list <- list(study_632, study_722, study_808, study_864, study_895, study_990,
study_1031, study_1037, study_1043, study_1521, study_1579, study_1674,
study_1702, study_1716, study_1747)
#isolate the samples we want to look at
samples <- map$`#SampleID`
for(i in 1:length(data.list)){
data.list[[i]] <- subset(data.list[[i]], data.list[[i]]$sample_name %in% samples)
}
#make column names consistent
for(i in 1:length(data.list)){
colnames(data.list[[i]])[colnames(data.list[[i]]) == 'carb_nitro_ratio'] <- 'c_n_ratio'
colnames(data.list[[i]])[colnames(data.list[[i]]) == 'perc_nitrogen'] <- 'percent_n'
colnames(data.list[[i]])[colnames(data.list[[i]]) == 'perc_total_c'] <- 'percent_c'
if('tot_carb' %in% colnames(data.list[[i]]) & 'tot_org_carb' %in% colnames(data.list[[i]])){
data.list[[i]] <- data.list[[i]][-data.list[[i]]$tot_org_carb]
}
if('tot_carb' %in% colnames(data.list[[i]]) & 'percent_c' %in% colnames(data.list[[i]])){
data.list[[i]] <- data.list[[i]][-data.list[[i]]$tot_carb]
}
}
#ensure desired columns are numerics, not factors
for(i in 1:length(data.list)){
if('sample_name' %in% colnames(data.list[[i]])){
data.list[[i]]$sample_name <- as.numeric(data.list[[i]]$sample_name)
}
if('c_n_ratio' %in% colnames(data.list[[i]])){
data.list[[i]]$c_n_ratio <- as.numeric(data.list[[i]]$c_n_ratio)
}
if('percent_n' %in% colnames(data.list[[i]])){
data.list[[i]]$percent_n <- as.numeric(data.list[[i]]$percent_n)
}
if('percent_c' %in% colnames(data.list[[i]])){
data.list[[i]]$percent_c <- as.numeric(data.list[[i]]$percent_c)
}
if('water_content_soil' %in% colnames(data.list[[i]])){
data.list[[i]]$water_content_soil <- as.numeric(data.list[[i]]$water_content_soil)
}
if('tot_nitro' %in% colnames(data.list[[i]])){
data.list[[i]]$tot_nitro <- as.numeric(data.list[[i]]$tot_nitro)
}
if('tot_org_carb' %in% colnames(data.list[[i]])){
data.list[[i]]$tot_org_carb <- as.numeric(data.list[[i]]$tot_org_carb)
}
if('tot_carb' %in% colnames(data.list[[i]])){
data.list[[i]]$tot_carb <- as.numeric(data.list[[i]]$tot_carb)
}
}
#make units consistent and update column names
#3. 808: water content: unknown units
#11. 1579: water content: unknown units
for(i in 1:length(data.list)){
if('tot_nitro' %in% colnames(data.list[[i]])){
data.list[[i]]$tot_nitro <- data.list[[i]]$tot_nitro / 1000 * 100
colnames(data.list[[i]])[colnames(data.list[[i]]) == 'tot_nitro'] <- 'percent_n'
}
if('tot_org_carb' %in% colnames(data.list[[i]])){
data.list[[i]]$tot_org_carb <- data.list[[i]]$tot_org_carb / 1000 * 100
colnames(data.list[[i]])[colnames(data.list[[i]]) == 'tot_org_carb'] <- 'percent_org_c'
}
if('tot_carb' %in% colnames(data.list[[i]])){
data.list[[i]]$tot_carb <- data.list[[i]]$tot_carb / 1000 * 100
colnames(data.list[[i]])[colnames(data.list[[i]]) == 'tot_carb'] <- 'percent_c'
}
}
#isolate the variables we are interested in
vars <- list('sample_name', 'water_content_soil', 'percent_n', 'percent_c', 'percent_org_c', 'c_n_ratio')
for(i in 1:length(data.list)){
data.list[[i]] <- subset(data.list[[i]], select = colnames(data.list[[i]]) %in% vars)
}
#combine rows into one new dataframe
metadata <- dplyr::bind_rows(data.list[[1]], data.list[[2]], data.list[[3]], data.list[[4]], data.list[[5]], data.list[[6]], data.list[[7]], data.list[[8]], data.list[[9]], data.list[[10]], data.list[[11]], data.list[[12]], data.list[[13]], data.list[[14]])
metadata <- metadata[,1:6]
#save metadata
saveRDS(metadata, emp_metadata.path)
|
## File Name: tam_calc_prob_helper_subtract_max.R
## File Version: 0.12
tam_calc_prob_helper_subtract_max <- function( rr0 )
{
RR0 <- dim(rr0)
rr0M <- matrix( rr0, nrow=RR0[1]*RR0[2], ncol=RR0[3] )
NI <- RR0[1]
NK <- RR0[2]
TP <- RR0[3]
rr1M <- tam_rcpp_calc_prob_subtract_max( rr0M=rr0M, NI=NI, NK=NK, TP=TP )
rr1 <- array( rr1M, dim=RR0 )
return(rr1)
}
|
/R/tam_calc_prob_helper_subtract_max.R
|
no_license
|
cran/TAM
|
R
| false
| false
| 402
|
r
|
## File Name: tam_calc_prob_helper_subtract_max.R
## File Version: 0.12
tam_calc_prob_helper_subtract_max <- function( rr0 )
{
RR0 <- dim(rr0)
rr0M <- matrix( rr0, nrow=RR0[1]*RR0[2], ncol=RR0[3] )
NI <- RR0[1]
NK <- RR0[2]
TP <- RR0[3]
rr1M <- tam_rcpp_calc_prob_subtract_max( rr0M=rr0M, NI=NI, NK=NK, TP=TP )
rr1 <- array( rr1M, dim=RR0 )
return(rr1)
}
|
\docType{methods}
\name{split}
\alias{split}
\alias{split,ANY-method}
\alias{split,hyperSpec-method}
\alias{split-methods}
\title{Split a hyperSpec object according to groups
\code{split} divides the \code{hyperSpec} object into a list of
\code{hyperSpec} objects according to the groups given by \code{f}.}
\usage{
\S4method{split}{hyperSpec}(x, f, drop = TRUE,
short = NULL, user = NULL, date = NULL)
}
\arguments{
\item{x}{the \code{hyperSpec} object}
\item{f}{a factor giving the grouping (or a variable that
can be converted into a factor by \code{as.factor})}
\item{drop}{if \code{TRUE}, levels of\code{f} that do not
occur are dropped.}
\item{short,user,date}{handed to \code{\link{logentry}}}
}
\value{
A list of \code{hyperSpec} objects.
}
\description{
The \code{hyperSpec} objects in the list may be bound
together again by \code{\link{bind} ("r",
list_of_hyperSpec_objects)}.
}
\examples{
dist <- pearson.dist (chondro[[]])
dend <- hclust (dist, method = "ward")
z <- cutree (dend, h = 0.15)
clusters <- split (chondro, z)
length (clusters)
# difference in cluster mean spectra
plot (apply (clusters[[2]], 2, mean) - apply (clusters[[1]], 2, mean))
}
\author{
C. Beleites
}
\seealso{
\code{\link[base]{split}}
}
\keyword{methods}
|
/man/split.Rd
|
no_license
|
fornasaros/hyperSpec
|
R
| false
| false
| 1,278
|
rd
|
\docType{methods}
\name{split}
\alias{split}
\alias{split,ANY-method}
\alias{split,hyperSpec-method}
\alias{split-methods}
\title{Split a hyperSpec object according to groups
\code{split} divides the \code{hyperSpec} object into a list of
\code{hyperSpec} objects according to the groups given by \code{f}.}
\usage{
\S4method{split}{hyperSpec}(x, f, drop = TRUE,
short = NULL, user = NULL, date = NULL)
}
\arguments{
\item{x}{the \code{hyperSpec} object}
\item{f}{a factor giving the grouping (or a variable that
can be converted into a factor by \code{as.factor})}
\item{drop}{if \code{TRUE}, levels of\code{f} that do not
occur are dropped.}
\item{short,user,date}{handed to \code{\link{logentry}}}
}
\value{
A list of \code{hyperSpec} objects.
}
\description{
The \code{hyperSpec} objects in the list may be bound
together again by \code{\link{bind} ("r",
list_of_hyperSpec_objects)}.
}
\examples{
dist <- pearson.dist (chondro[[]])
dend <- hclust (dist, method = "ward")
z <- cutree (dend, h = 0.15)
clusters <- split (chondro, z)
length (clusters)
# difference in cluster mean spectra
plot (apply (clusters[[2]], 2, mean) - apply (clusters[[1]], 2, mean))
}
\author{
C. Beleites
}
\seealso{
\code{\link[base]{split}}
}
\keyword{methods}
|
#======================================================================
# This script plots cumulative metrics, relative to the branch-based
# algorithm, versus iterations for all input graphs given a single
# algorithm and architecture.
# ======================================================================
source ("rvplot-inc.R")
#======================================================================
# Script parameters
#======================================================================
assign.if.undef ("METRIC", "Time")
assign.if.undef ("SAVE.PDF", FALSE)
# Check parameters
stopifnot (METRIC %in% c ("Time", "Mispredictions", "Branches", "Instructions"))
#======================================================================
# Load and transform data
#======================================================================
Data.set <- load.xform.many (ALGS, ARCHS, GRAPHS)
Data <- Data.set[["Data"]]
Summary <- Data.set[["Summary"]]
Totals <- merge (Data.set$Totals
, Summary[, c ("Comp", "Arch", "Graph", "Time.bry")]
, by=c ("Comp", "Arch", "Graph"))
setDevHD (l=15)
#Q <- qplot (Comp, Time.bry / Time.tot, data=Totals, facets=Arch ~ Graph, colour=Alg)
Q <- qplot (Comp, Speedup, data=Summary, facets=Arch ~ Graph, colour=Comp)
Q <- Q + geom_hline (yintercept=1, linetype="dashed", alpha=0.5)
Q <- set.hpcgarage.colours (Q)
print (Q)
if (SAVE.PDF) {
outfilename <- "figs/summary.pdf"
cat (sprintf ("Saving: %s ...\n", outfilename))
setDevHD.pdf (outfilename, l=15)
print (Q)
dev.off ()
}
# eof
|
/rvplot/deprecated/plot-summary.R
|
no_license
|
ogreen/BranchlessGraphs
|
R
| false
| false
| 1,567
|
r
|
#======================================================================
# This script plots cumulative metrics, relative to the branch-based
# algorithm, versus iterations for all input graphs given a single
# algorithm and architecture.
# ======================================================================
source ("rvplot-inc.R")
#======================================================================
# Script parameters
#======================================================================
assign.if.undef ("METRIC", "Time")
assign.if.undef ("SAVE.PDF", FALSE)
# Check parameters
stopifnot (METRIC %in% c ("Time", "Mispredictions", "Branches", "Instructions"))
#======================================================================
# Load and transform data
#======================================================================
Data.set <- load.xform.many (ALGS, ARCHS, GRAPHS)
Data <- Data.set[["Data"]]
Summary <- Data.set[["Summary"]]
Totals <- merge (Data.set$Totals
, Summary[, c ("Comp", "Arch", "Graph", "Time.bry")]
, by=c ("Comp", "Arch", "Graph"))
setDevHD (l=15)
#Q <- qplot (Comp, Time.bry / Time.tot, data=Totals, facets=Arch ~ Graph, colour=Alg)
Q <- qplot (Comp, Speedup, data=Summary, facets=Arch ~ Graph, colour=Comp)
Q <- Q + geom_hline (yintercept=1, linetype="dashed", alpha=0.5)
Q <- set.hpcgarage.colours (Q)
print (Q)
if (SAVE.PDF) {
outfilename <- "figs/summary.pdf"
cat (sprintf ("Saving: %s ...\n", outfilename))
setDevHD.pdf (outfilename, l=15)
print (Q)
dev.off ()
}
# eof
|
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ###
### ===== Hierarchical Multinomial Logit Model in STAN =====
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ###
setwd("~/tennis_analytics/projects/roland_garros_tracking_data/stan_code/prototypes/")
source('../src/helper_functions.R')
library(rstan)
library(bayesplot)
library(dplyr)
rstan_options(auto_write=TRUE) # writes a compiled Stan program to the disk to avoid recompiling
options(mc.cores = parallel::detectCores()) # uses multiple cores for stan
set.seed(824)
### ### ### ### ### ### ### ### ###
### ===== Simulation No. 1 =====
### ### ### ### ### ### ### ### ###
# -- simulation from:
# https://rawgit.com/rtrangucci/class_20170809/master/multinomial-logit/multinomial-logit-regression.html
# -- 3 fixed Covariates, 3 response levels.
# -- 2 Random Intercepts
N <- 2500
K <- 3 # No. classes
P <- 3 # No. predictors
# -- group size for each cluster group
J_effect1 <- 15
J_effect2 <- 10
G <- 3
# ???
X <- matrix(rnorm(N * P), N, P)
# (N x P) Fixed Effects Model Matrix
beta <- cbind(matrix(rnorm((K - 1) * P), P, K - 1),0)
# (P x K) Fixed slopes. Kth column is set to 0.
alpha <- c(rnorm(K - 1), 0)
# Fixed Intercepts. Kth intercept is 0.
# -- Create random intercepts
eta_1 <- matrix(rnorm((K - 1) * J_effect1), K - 1, J_effect1)
# (K-1 x 15)
eta_2 <- matrix(rnorm((K - 1) * J_effect2), K - 1, J_effect2)
# (K-1 x 10)
# -- Initialize random intercept for each group
# (K x J) matrix
alpha_1 <- matrix(0, K, J_effect1)
alpha_2 <- matrix(0, K, J_effect2)
# Simulate sd of random effects
sigma_1 <- abs(rnorm(K - 1))
sigma_2 <- abs(rnorm(K - 1))
sigma_inter_eqn <- abs(rnorm(G))
# ???
for (k in 1:(K - 1)) {
alpha_1[k,] <- sigma_inter_eqn[1] * sigma_1[k] * eta_1[k,]
alpha_2[k,] <- sigma_inter_eqn[2] * sigma_2[k] * eta_2[k,]
}
# -- Set last row of random intercept matrix to zero.
alpha_1[K,] <- rep(0, J_effect1)
alpha_2[K,] <- rep(0, J_effect2)
# Create group labels for each observation.
idx_1 <- sample(J_effect1, N, replace = T)
idx_2 <- sample(J_effect2, N, replace = T)
mu <- sweep(x = X %*% beta, MARGIN = 2, STATS = alpha, FUN = '+')
mu <- t(t(mu) + alpha_1[, idx_1] + alpha_2[, idx_2])
mu_soft <- t(apply(mu, 1, softmax))
y <- sapply(1:N, function(x) rmultinom(1, size = 1, prob = mu_soft[x,]))
y <- apply(y, 2, function(x) which(as.logical(x)))
## -- For organization, save all info into a single dataframe
training_data <- data.frame(cbind(X, y, idx_1, idx_2))
# -- Fit MNL Hierarchical Model
hier_mnl <- stan_model(file = 'test_hier.stan')
stan_dat <- list(
N = N,
N_1 = length(unique(training_data$idx_1)),
N_2 = length(unique(training_data$idx_2)),
y = training_data$y,
id_1 = training_data$idx_1,
id_2 = training_data$idx_2,
x1 = training_data$V1,
x2 = training_data$V2,
x3 = training_data$V3,
K = K
)
fit_hier_mnl <- sampling(hier_mnl, data = stan_dat, iter = 1000)
# -- Summary diagnostics of MCMC
summary(fit_hier_mnl)$summary %>% View()
plot(fit_hier_mnl, plotfun="trace")
# Get posterior means & medians
names(extract(fit_hier_mnl))
extract_elements <- extract(fit_hier_mnl)
### ### ### ### ### ### ### ### ### ### ### ### ### ### ###
# -- Check how good were our fixed effect estimates =====
### ### ### ### ### ### ### ### ### ### ### ### ### ### ###
plot(fit_hier_mnl, plotfun="hist", pars = c('B_0', 'B_1', 'B_2', 'B_3'))
plot(fit_hier_mnl, plotfun="dens", pars = c('B_0', 'B_1', 'B_2', 'B_3'))
# -- Fixed Effects MCMC samples
B_0_samples <- extract_elements$B_0
B_1_samples <- extract_elements$B_1
B_2_samples <- extract_elements$B_2
B_3_samples <- extract_elements$B_3
# Compare Fixed intercept
colMeans(B_0_samples)
apply(B_0_samples, 2, median)
alpha
# Compare Fixed Slopes
apply(B_1_samples, 2, median)
apply(B_2_samples, 2, median)
apply(B_3_samples, 2, median)
beta
### ### ### ### ### ### ### ### ### ### ### ### ### ### ###
# -- Check how good were our Random Effects estimates =====
### ### ### ### ### ### ### ### ### ### ### ### ### ### ###
plot(fit_hier_mnl, plotfun="dens", pars = c('v_id1'))
# -- Group 1
rand_intercept_1_samples <- extract_elements$v_id1
dim(rand_intercept_1_samples)
rbind(apply(rand_intercept_1_samples[,,1], 2, median),
apply(rand_intercept_1_samples[,,2], 2, median)
)
alpha_1
# -- Group 2
rand_intercept_2_samples <- extract_elements$v_id2
dim(rand_intercept_2_samples)
rbind(apply(rand_intercept_2_samples[,,1], 2, median),
apply(rand_intercept_2_samples[,,2], 2, median)
)
alpha_2
## - Look at log-likelihood
log_lik <- extract(fit_hier_mnl)$log_lik
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ###
### ===== Hierarchical Multinomial Logit Model in STAN No.2 =====
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ###
setwd("~/tennis_analytics/projects/roland_garros_tracking_data/stan_code/prototypes/")
source('../src/helper_functions.R')
library(rstan)
library(bayesplot)
library(dplyr)
rstan_options(auto_write=TRUE) # writes a compiled Stan program to the disk to avoid recompiling
options(mc.cores = parallel::detectCores()) # uses multiple cores for stan
set.seed(824)
### ### ### ### ### ### ### ### ###
### ===== Simulation No. 2 =====
### ### ### ### ### ### ### ### ###
# -- simulation from:
# https://rawgit.com/rtrangucci/class_20170809/master/multinomial-logit/multinomial-logit-regression.html
# -- 3 fixed Covariates, 3 response levels.
# -- 3 random intercepts varying by Age (5 groups), Ethnicity? (4 groups), Education ( 5 groups)
N <- 2000
K <- 3 # No. classes
D <- 3 # No. predictors
J_age <- 15
J_eth <- 4
J_edu <- 5
G <- 3
X <- matrix(rnorm(N * D), N, D)
# Fixed Effects Model Matrix
beta <- cbind(matrix(rnorm((K - 1) * D), D, K - 1),0)
# Fixed slopes
alpha <- c(rnorm(K - 1), 0)
# Fixed Intercepts
# -- Compute random interceps
eta_age <- matrix(rnorm((K - 1) * J_age), K - 1, J_age)
eta_eth <- matrix(rnorm((K - 1) * J_eth), K - 1, J_eth)
eta_edu <- matrix(rnorm((K - 1) * J_edu), K - 1, J_edu)
alpha_age <- matrix(0, K, J_age)
alpha_eth <- matrix(0, K, J_eth)
alpha_edu <- matrix(0, K, J_edu)
sigma_age <- abs(rnorm(K - 1))
sigma_eth <- abs(rnorm(K - 1))
sigma_edu <- abs(rnorm(K - 1))
sigma_inter_eqn <- abs(rnorm(G))
for (k in 1:(K - 1)) {
alpha_age[k,] <- sigma_inter_eqn[1] * sigma_age[k] * eta_age[k,]
alpha_eth[k,] <- sigma_inter_eqn[2] * sigma_eth[k] * eta_eth[k,]
alpha_edu[k,] <- sigma_inter_eqn[3] * sigma_edu[k] * eta_edu[k,]
}
alpha_age[K,] <- rep(0, J_age)
alpha_eth[K,] <- rep(0, J_eth)
alpha_edu[K,] <- rep(0, J_edu)
idx_age <- sample(J_age, N, replace = T)
idx_eth <- sample(J_eth, N, replace = T)
idx_edu <- sample(J_edu, N, replace = T)
mu <- sweep(x = X %*% beta, MARGIN = 2, STATS = alpha, FUN = '+')
mu <- t(t(mu) + alpha_age[, idx_age] + alpha_eth[, idx_eth] + alpha_edu[, idx_edu])
mu_soft <- t(apply(mu, 1, softmax))
y <- sapply(1:N, function(x) rmultinom(1, size = 1, prob = mu_soft[x,]))
y <- apply(y, 2, function(x) which(as.logical(x)))
## -- For organization, save all info into a single dataframe
training_data <- data.frame(cbind(X, y, idx_age, idx_eth, idx_edu))
hier_mnl <- stan_model(file = 'test_hier.stan')
stan_dat <- list(
N = N,
N_id = length(unique(training_data$idx_age)),
y = training_data$y,
id = training_data$idx_age,
age_z = training_data$V1,
age_zq = training_data$V2,
wz = training_data$V3,
K = K
)
fit_hier_mnl <- sampling(hier_mnl, data = stan_dat, iter = 1000)
summary(fit_hier_mnl)$summary
plot(fit_hier_mnl, plotfun="trace")
plot(fit_hier_mnl, plotfun="hist")
plot(test.stan, plotfun="dens")
# compare to true values
alpha
beta
|
/projects/roland_garros_project/modelling/prototypes/stan_code/prototypes/OLD_test_hier_mnl_stan.R
|
no_license
|
petertea96/tennis_analytics
|
R
| false
| false
| 7,720
|
r
|
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ###
### ===== Hierarchical Multinomial Logit Model in STAN =====
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ###
setwd("~/tennis_analytics/projects/roland_garros_tracking_data/stan_code/prototypes/")
source('../src/helper_functions.R')
library(rstan)
library(bayesplot)
library(dplyr)
rstan_options(auto_write=TRUE) # writes a compiled Stan program to the disk to avoid recompiling
options(mc.cores = parallel::detectCores()) # uses multiple cores for stan
set.seed(824)
### ### ### ### ### ### ### ### ###
### ===== Simulation No. 1 =====
### ### ### ### ### ### ### ### ###
# -- simulation from:
# https://rawgit.com/rtrangucci/class_20170809/master/multinomial-logit/multinomial-logit-regression.html
# -- 3 fixed Covariates, 3 response levels.
# -- 2 Random Intercepts
N <- 2500
K <- 3 # No. classes
P <- 3 # No. predictors
# -- group size for each cluster group
J_effect1 <- 15
J_effect2 <- 10
G <- 3
# ???
X <- matrix(rnorm(N * P), N, P)
# (N x P) Fixed Effects Model Matrix
beta <- cbind(matrix(rnorm((K - 1) * P), P, K - 1),0)
# (P x K) Fixed slopes. Kth column is set to 0.
alpha <- c(rnorm(K - 1), 0)
# Fixed Intercepts. Kth intercept is 0.
# -- Create random intercepts
eta_1 <- matrix(rnorm((K - 1) * J_effect1), K - 1, J_effect1)
# (K-1 x 15)
eta_2 <- matrix(rnorm((K - 1) * J_effect2), K - 1, J_effect2)
# (K-1 x 10)
# -- Initialize random intercept for each group
# (K x J) matrix
alpha_1 <- matrix(0, K, J_effect1)
alpha_2 <- matrix(0, K, J_effect2)
# Simulate sd of random effects
sigma_1 <- abs(rnorm(K - 1))
sigma_2 <- abs(rnorm(K - 1))
sigma_inter_eqn <- abs(rnorm(G))
# ???
for (k in 1:(K - 1)) {
alpha_1[k,] <- sigma_inter_eqn[1] * sigma_1[k] * eta_1[k,]
alpha_2[k,] <- sigma_inter_eqn[2] * sigma_2[k] * eta_2[k,]
}
# -- Set last row of random intercept matrix to zero.
alpha_1[K,] <- rep(0, J_effect1)
alpha_2[K,] <- rep(0, J_effect2)
# Create group labels for each observation.
idx_1 <- sample(J_effect1, N, replace = T)
idx_2 <- sample(J_effect2, N, replace = T)
mu <- sweep(x = X %*% beta, MARGIN = 2, STATS = alpha, FUN = '+')
mu <- t(t(mu) + alpha_1[, idx_1] + alpha_2[, idx_2])
mu_soft <- t(apply(mu, 1, softmax))
y <- sapply(1:N, function(x) rmultinom(1, size = 1, prob = mu_soft[x,]))
y <- apply(y, 2, function(x) which(as.logical(x)))
## -- For organization, save all info into a single dataframe
training_data <- data.frame(cbind(X, y, idx_1, idx_2))
# -- Fit MNL Hierarchical Model
hier_mnl <- stan_model(file = 'test_hier.stan')
stan_dat <- list(
N = N,
N_1 = length(unique(training_data$idx_1)),
N_2 = length(unique(training_data$idx_2)),
y = training_data$y,
id_1 = training_data$idx_1,
id_2 = training_data$idx_2,
x1 = training_data$V1,
x2 = training_data$V2,
x3 = training_data$V3,
K = K
)
fit_hier_mnl <- sampling(hier_mnl, data = stan_dat, iter = 1000)
# -- Summary diagnostics of MCMC
summary(fit_hier_mnl)$summary %>% View()
plot(fit_hier_mnl, plotfun="trace")
# Get posterior means & medians
names(extract(fit_hier_mnl))
extract_elements <- extract(fit_hier_mnl)
### ### ### ### ### ### ### ### ### ### ### ### ### ### ###
# -- Check how good were our fixed effect estimates =====
### ### ### ### ### ### ### ### ### ### ### ### ### ### ###
plot(fit_hier_mnl, plotfun="hist", pars = c('B_0', 'B_1', 'B_2', 'B_3'))
plot(fit_hier_mnl, plotfun="dens", pars = c('B_0', 'B_1', 'B_2', 'B_3'))
# -- Fixed Effects MCMC samples
B_0_samples <- extract_elements$B_0
B_1_samples <- extract_elements$B_1
B_2_samples <- extract_elements$B_2
B_3_samples <- extract_elements$B_3
# Compare Fixed intercept
colMeans(B_0_samples)
apply(B_0_samples, 2, median)
alpha
# Compare Fixed Slopes
apply(B_1_samples, 2, median)
apply(B_2_samples, 2, median)
apply(B_3_samples, 2, median)
beta
### ### ### ### ### ### ### ### ### ### ### ### ### ### ###
# -- Check how good were our Random Effects estimates =====
### ### ### ### ### ### ### ### ### ### ### ### ### ### ###
plot(fit_hier_mnl, plotfun="dens", pars = c('v_id1'))
# -- Group 1
rand_intercept_1_samples <- extract_elements$v_id1
dim(rand_intercept_1_samples)
rbind(apply(rand_intercept_1_samples[,,1], 2, median),
apply(rand_intercept_1_samples[,,2], 2, median)
)
alpha_1
# -- Group 2
rand_intercept_2_samples <- extract_elements$v_id2
dim(rand_intercept_2_samples)
rbind(apply(rand_intercept_2_samples[,,1], 2, median),
apply(rand_intercept_2_samples[,,2], 2, median)
)
alpha_2
## - Look at log-likelihood
log_lik <- extract(fit_hier_mnl)$log_lik
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ###
### ===== Hierarchical Multinomial Logit Model in STAN No.2 =====
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ###
setwd("~/tennis_analytics/projects/roland_garros_tracking_data/stan_code/prototypes/")
source('../src/helper_functions.R')
library(rstan)
library(bayesplot)
library(dplyr)
rstan_options(auto_write=TRUE) # writes a compiled Stan program to the disk to avoid recompiling
options(mc.cores = parallel::detectCores()) # uses multiple cores for stan
set.seed(824)
### ### ### ### ### ### ### ### ###
### ===== Simulation No. 2 =====
### ### ### ### ### ### ### ### ###
# -- simulation from:
# https://rawgit.com/rtrangucci/class_20170809/master/multinomial-logit/multinomial-logit-regression.html
# -- 3 fixed Covariates, 3 response levels.
# -- 3 random intercepts varying by Age (5 groups), Ethnicity? (4 groups), Education ( 5 groups)
N <- 2000
K <- 3 # No. classes
D <- 3 # No. predictors
J_age <- 15
J_eth <- 4
J_edu <- 5
G <- 3
X <- matrix(rnorm(N * D), N, D)
# Fixed Effects Model Matrix
beta <- cbind(matrix(rnorm((K - 1) * D), D, K - 1),0)
# Fixed slopes
alpha <- c(rnorm(K - 1), 0)
# Fixed Intercepts
# -- Compute random interceps
eta_age <- matrix(rnorm((K - 1) * J_age), K - 1, J_age)
eta_eth <- matrix(rnorm((K - 1) * J_eth), K - 1, J_eth)
eta_edu <- matrix(rnorm((K - 1) * J_edu), K - 1, J_edu)
alpha_age <- matrix(0, K, J_age)
alpha_eth <- matrix(0, K, J_eth)
alpha_edu <- matrix(0, K, J_edu)
sigma_age <- abs(rnorm(K - 1))
sigma_eth <- abs(rnorm(K - 1))
sigma_edu <- abs(rnorm(K - 1))
sigma_inter_eqn <- abs(rnorm(G))
for (k in 1:(K - 1)) {
alpha_age[k,] <- sigma_inter_eqn[1] * sigma_age[k] * eta_age[k,]
alpha_eth[k,] <- sigma_inter_eqn[2] * sigma_eth[k] * eta_eth[k,]
alpha_edu[k,] <- sigma_inter_eqn[3] * sigma_edu[k] * eta_edu[k,]
}
alpha_age[K,] <- rep(0, J_age)
alpha_eth[K,] <- rep(0, J_eth)
alpha_edu[K,] <- rep(0, J_edu)
idx_age <- sample(J_age, N, replace = T)
idx_eth <- sample(J_eth, N, replace = T)
idx_edu <- sample(J_edu, N, replace = T)
mu <- sweep(x = X %*% beta, MARGIN = 2, STATS = alpha, FUN = '+')
mu <- t(t(mu) + alpha_age[, idx_age] + alpha_eth[, idx_eth] + alpha_edu[, idx_edu])
mu_soft <- t(apply(mu, 1, softmax))
y <- sapply(1:N, function(x) rmultinom(1, size = 1, prob = mu_soft[x,]))
y <- apply(y, 2, function(x) which(as.logical(x)))
## -- For organization, save all info into a single dataframe
training_data <- data.frame(cbind(X, y, idx_age, idx_eth, idx_edu))
hier_mnl <- stan_model(file = 'test_hier.stan')
stan_dat <- list(
N = N,
N_id = length(unique(training_data$idx_age)),
y = training_data$y,
id = training_data$idx_age,
age_z = training_data$V1,
age_zq = training_data$V2,
wz = training_data$V3,
K = K
)
fit_hier_mnl <- sampling(hier_mnl, data = stan_dat, iter = 1000)
summary(fit_hier_mnl)$summary
plot(fit_hier_mnl, plotfun="trace")
plot(fit_hier_mnl, plotfun="hist")
plot(test.stan, plotfun="dens")
# compare to true values
alpha
beta
|
library(s2dverification)
### Name: CDORemap
### Title: Interpolates arrays with longitude and latitude dimensions using
### CDO
### Aliases: CDORemap
### Keywords: datagen
### ** Examples
## Not run:
##D # Interpolating only vectors of longitudes and latitudes
##D lon <- seq(0, 360 - 360/50, length.out = 50)
##D lat <- seq(-90, 90, length.out = 25)
##D tas2 <- CDORemap(NULL, lon, lat, 't170grid', 'bil', TRUE)
##D
##D # Minimal array interpolation
##D tas <- array(1:50, dim = c(25, 50))
##D names(dim(tas)) <- c('lat', 'lon')
##D lon <- seq(0, 360 - 360/50, length.out = 50)
##D lat <- seq(-90, 90, length.out = 25)
##D tas2 <- CDORemap(tas, lon, lat, 't170grid', 'bil', TRUE)
##D
##D # Metadata can be attached to the inputs. It will be preserved and
##D # accordignly modified.
##D tas <- array(1:50, dim = c(25, 50))
##D names(dim(tas)) <- c('lat', 'lon')
##D lon <- seq(0, 360 - 360/50, length.out = 50)
##D metadata <- list(lon = list(units = 'degrees_east'))
##D attr(lon, 'variables') <- metadata
##D lat <- seq(-90, 90, length.out = 25)
##D metadata <- list(lat = list(units = 'degrees_north'))
##D attr(lat, 'variables') <- metadata
##D metadata <- list(tas = list(dim = list(lat = list(len = 25,
##D vals = lat),
##D lon = list(len = 50,
##D vals = lon)
##D )))
##D attr(tas, 'variables') <- metadata
##D tas2 <- CDORemap(tas, lon, lat, 't170grid', 'bil', TRUE)
##D
##D # Arrays of any number of dimensions in any order can be provided.
##D num_lats <- 25
##D num_lons <- 50
##D tas <- array(1:(10*num_lats*10*num_lons*10),
##D dim = c(10, num_lats, 10, num_lons, 10))
##D names(dim(tas)) <- c('a', 'lat', 'b', 'lon', 'c')
##D lon <- seq(0, 360 - 360/num_lons, length.out = num_lons)
##D metadata <- list(lon = list(units = 'degrees_east'))
##D attr(lon, 'variables') <- metadata
##D lat <- seq(-90, 90, length.out = num_lats)
##D metadata <- list(lat = list(units = 'degrees_north'))
##D attr(lat, 'variables') <- metadata
##D metadata <- list(tas = list(dim = list(a = list(),
##D lat = list(len = num_lats,
##D vals = lat),
##D b = list(),
##D lon = list(len = num_lons,
##D vals = lon),
##D c = list()
##D )))
##D attr(tas, 'variables') <- metadata
##D tas2 <- CDORemap(tas, lon, lat, 't17grid', 'bil', TRUE)
##D # The step of permutation can be avoided but more intermediate file writes
##D # will be performed.
##D tas2 <- CDORemap(tas, lon, lat, 't17grid', 'bil', FALSE)
##D
##D # If the provided array has the longitude or latitude dimension in the
##D # right-most position, the same number of file writes will be performed,
##D # even if avoid_wrties = FALSE.
##D num_lats <- 25
##D num_lons <- 50
##D tas <- array(1:(10*num_lats*10*num_lons*10),
##D dim = c(10, num_lats, 10, num_lons))
##D names(dim(tas)) <- c('a', 'lat', 'b', 'lon')
##D lon <- seq(0, 360 - 360/num_lons, length.out = num_lons)
##D metadata <- list(lon = list(units = 'degrees_east'))
##D attr(lon, 'variables') <- metadata
##D lat <- seq(-90, 90, length.out = num_lats)
##D metadata <- list(lat = list(units = 'degrees_north'))
##D attr(lat, 'variables') <- metadata
##D metadata <- list(tas = list(dim = list(a = list(),
##D lat = list(len = num_lats,
##D vals = lat),
##D b = list(),
##D lon = list(len = num_lons,
##D vals = lon)
##D )))
##D attr(tas, 'variables') <- metadata
##D tas2 <- CDORemap(tas, lon, lat, 't17grid', 'bil', TRUE)
##D tas2 <- CDORemap(tas, lon, lat, 't17grid', 'bil', FALSE)
##D
##D # An example of an interpolation from and onto a rectangular regular grid
##D num_lats <- 25
##D num_lons <- 50
##D tas <- array(1:(1*num_lats*num_lons), dim = c(num_lats, num_lons))
##D names(dim(tas)) <- c('y', 'x')
##D lon <- array(seq(0, 360 - 360/num_lons, length.out = num_lons),
##D dim = c(num_lons, num_lats))
##D metadata <- list(lon = list(units = 'degrees_east'))
##D names(dim(lon)) <- c('x', 'y')
##D attr(lon, 'variables') <- metadata
##D lat <- t(array(seq(-90, 90, length.out = num_lats),
##D dim = c(num_lats, num_lons)))
##D metadata <- list(lat = list(units = 'degrees_north'))
##D names(dim(lat)) <- c('x', 'y')
##D attr(lat, 'variables') <- metadata
##D tas2 <- CDORemap(tas, lon, lat, 'r100x50', 'bil')
##D
##D # An example of an interpolation from an irregular grid onto a gaussian grid
##D num_lats <- 25
##D num_lons <- 50
##D tas <- array(1:(10*num_lats*10*num_lons*10),
##D dim = c(10, num_lats, 10, num_lons))
##D names(dim(tas)) <- c('a', 'j', 'b', 'i')
##D lon <- array(seq(0, 360 - 360/num_lons, length.out = num_lons),
##D dim = c(num_lons, num_lats))
##D metadata <- list(lon = list(units = 'degrees_east'))
##D names(dim(lon)) <- c('i', 'j')
##D attr(lon, 'variables') <- metadata
##D lat <- t(array(seq(-90, 90, length.out = num_lats),
##D dim = c(num_lats, num_lons)))
##D metadata <- list(lat = list(units = 'degrees_north'))
##D names(dim(lat)) <- c('i', 'j')
##D attr(lat, 'variables') <- metadata
##D tas2 <- CDORemap(tas, lon, lat, 't17grid', 'bil')
##D
##D # Again, the dimensions can be in any order
##D num_lats <- 25
##D num_lons <- 50
##D tas <- array(1:(10*num_lats*10*num_lons),
##D dim = c(10, num_lats, 10, num_lons))
##D names(dim(tas)) <- c('a', 'j', 'b', 'i')
##D lon <- array(seq(0, 360 - 360/num_lons, length.out = num_lons),
##D dim = c(num_lons, num_lats))
##D names(dim(lon)) <- c('i', 'j')
##D lat <- t(array(seq(-90, 90, length.out = num_lats),
##D dim = c(num_lats, num_lons)))
##D names(dim(lat)) <- c('i', 'j')
##D tas2 <- CDORemap(tas, lon, lat, 't17grid', 'bil')
##D tas2 <- CDORemap(tas, lon, lat, 't17grid', 'bil', FALSE)
##D # It is ossible to specify an external NetCDF file as target grid reference
##D tas2 <- CDORemap(tas, lon, lat, 'external_file.nc', 'bil')
##D
## End(Not run)
|
/data/genthat_extracted_code/s2dverification/examples/CDORemap.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 6,547
|
r
|
library(s2dverification)
### Name: CDORemap
### Title: Interpolates arrays with longitude and latitude dimensions using
### CDO
### Aliases: CDORemap
### Keywords: datagen
### ** Examples
## Not run:
##D # Interpolating only vectors of longitudes and latitudes
##D lon <- seq(0, 360 - 360/50, length.out = 50)
##D lat <- seq(-90, 90, length.out = 25)
##D tas2 <- CDORemap(NULL, lon, lat, 't170grid', 'bil', TRUE)
##D
##D # Minimal array interpolation
##D tas <- array(1:50, dim = c(25, 50))
##D names(dim(tas)) <- c('lat', 'lon')
##D lon <- seq(0, 360 - 360/50, length.out = 50)
##D lat <- seq(-90, 90, length.out = 25)
##D tas2 <- CDORemap(tas, lon, lat, 't170grid', 'bil', TRUE)
##D
##D # Metadata can be attached to the inputs. It will be preserved and
##D # accordignly modified.
##D tas <- array(1:50, dim = c(25, 50))
##D names(dim(tas)) <- c('lat', 'lon')
##D lon <- seq(0, 360 - 360/50, length.out = 50)
##D metadata <- list(lon = list(units = 'degrees_east'))
##D attr(lon, 'variables') <- metadata
##D lat <- seq(-90, 90, length.out = 25)
##D metadata <- list(lat = list(units = 'degrees_north'))
##D attr(lat, 'variables') <- metadata
##D metadata <- list(tas = list(dim = list(lat = list(len = 25,
##D vals = lat),
##D lon = list(len = 50,
##D vals = lon)
##D )))
##D attr(tas, 'variables') <- metadata
##D tas2 <- CDORemap(tas, lon, lat, 't170grid', 'bil', TRUE)
##D
##D # Arrays of any number of dimensions in any order can be provided.
##D num_lats <- 25
##D num_lons <- 50
##D tas <- array(1:(10*num_lats*10*num_lons*10),
##D dim = c(10, num_lats, 10, num_lons, 10))
##D names(dim(tas)) <- c('a', 'lat', 'b', 'lon', 'c')
##D lon <- seq(0, 360 - 360/num_lons, length.out = num_lons)
##D metadata <- list(lon = list(units = 'degrees_east'))
##D attr(lon, 'variables') <- metadata
##D lat <- seq(-90, 90, length.out = num_lats)
##D metadata <- list(lat = list(units = 'degrees_north'))
##D attr(lat, 'variables') <- metadata
##D metadata <- list(tas = list(dim = list(a = list(),
##D lat = list(len = num_lats,
##D vals = lat),
##D b = list(),
##D lon = list(len = num_lons,
##D vals = lon),
##D c = list()
##D )))
##D attr(tas, 'variables') <- metadata
##D tas2 <- CDORemap(tas, lon, lat, 't17grid', 'bil', TRUE)
##D # The step of permutation can be avoided but more intermediate file writes
##D # will be performed.
##D tas2 <- CDORemap(tas, lon, lat, 't17grid', 'bil', FALSE)
##D
##D # If the provided array has the longitude or latitude dimension in the
##D # right-most position, the same number of file writes will be performed,
##D # even if avoid_wrties = FALSE.
##D num_lats <- 25
##D num_lons <- 50
##D tas <- array(1:(10*num_lats*10*num_lons*10),
##D dim = c(10, num_lats, 10, num_lons))
##D names(dim(tas)) <- c('a', 'lat', 'b', 'lon')
##D lon <- seq(0, 360 - 360/num_lons, length.out = num_lons)
##D metadata <- list(lon = list(units = 'degrees_east'))
##D attr(lon, 'variables') <- metadata
##D lat <- seq(-90, 90, length.out = num_lats)
##D metadata <- list(lat = list(units = 'degrees_north'))
##D attr(lat, 'variables') <- metadata
##D metadata <- list(tas = list(dim = list(a = list(),
##D lat = list(len = num_lats,
##D vals = lat),
##D b = list(),
##D lon = list(len = num_lons,
##D vals = lon)
##D )))
##D attr(tas, 'variables') <- metadata
##D tas2 <- CDORemap(tas, lon, lat, 't17grid', 'bil', TRUE)
##D tas2 <- CDORemap(tas, lon, lat, 't17grid', 'bil', FALSE)
##D
##D # An example of an interpolation from and onto a rectangular regular grid
##D num_lats <- 25
##D num_lons <- 50
##D tas <- array(1:(1*num_lats*num_lons), dim = c(num_lats, num_lons))
##D names(dim(tas)) <- c('y', 'x')
##D lon <- array(seq(0, 360 - 360/num_lons, length.out = num_lons),
##D dim = c(num_lons, num_lats))
##D metadata <- list(lon = list(units = 'degrees_east'))
##D names(dim(lon)) <- c('x', 'y')
##D attr(lon, 'variables') <- metadata
##D lat <- t(array(seq(-90, 90, length.out = num_lats),
##D dim = c(num_lats, num_lons)))
##D metadata <- list(lat = list(units = 'degrees_north'))
##D names(dim(lat)) <- c('x', 'y')
##D attr(lat, 'variables') <- metadata
##D tas2 <- CDORemap(tas, lon, lat, 'r100x50', 'bil')
##D
##D # An example of an interpolation from an irregular grid onto a gaussian grid
##D num_lats <- 25
##D num_lons <- 50
##D tas <- array(1:(10*num_lats*10*num_lons*10),
##D dim = c(10, num_lats, 10, num_lons))
##D names(dim(tas)) <- c('a', 'j', 'b', 'i')
##D lon <- array(seq(0, 360 - 360/num_lons, length.out = num_lons),
##D dim = c(num_lons, num_lats))
##D metadata <- list(lon = list(units = 'degrees_east'))
##D names(dim(lon)) <- c('i', 'j')
##D attr(lon, 'variables') <- metadata
##D lat <- t(array(seq(-90, 90, length.out = num_lats),
##D dim = c(num_lats, num_lons)))
##D metadata <- list(lat = list(units = 'degrees_north'))
##D names(dim(lat)) <- c('i', 'j')
##D attr(lat, 'variables') <- metadata
##D tas2 <- CDORemap(tas, lon, lat, 't17grid', 'bil')
##D
##D # Again, the dimensions can be in any order
##D num_lats <- 25
##D num_lons <- 50
##D tas <- array(1:(10*num_lats*10*num_lons),
##D dim = c(10, num_lats, 10, num_lons))
##D names(dim(tas)) <- c('a', 'j', 'b', 'i')
##D lon <- array(seq(0, 360 - 360/num_lons, length.out = num_lons),
##D dim = c(num_lons, num_lats))
##D names(dim(lon)) <- c('i', 'j')
##D lat <- t(array(seq(-90, 90, length.out = num_lats),
##D dim = c(num_lats, num_lons)))
##D names(dim(lat)) <- c('i', 'j')
##D tas2 <- CDORemap(tas, lon, lat, 't17grid', 'bil')
##D tas2 <- CDORemap(tas, lon, lat, 't17grid', 'bil', FALSE)
##D # It is ossible to specify an external NetCDF file as target grid reference
##D tas2 <- CDORemap(tas, lon, lat, 'external_file.nc', 'bil')
##D
## End(Not run)
|
testlist <- list(m = NULL, repetitions = -304992360L, in_m = structure(c(2.31584307392677e+77, 9.53819811420581e+295, 1.22810536108214e+146), .Dim = c(3L, 1L )))
result <- do.call(CNull:::communities_individual_based_sampling_alpha,testlist)
str(result)
|
/CNull/inst/testfiles/communities_individual_based_sampling_alpha/AFL_communities_individual_based_sampling_alpha/communities_individual_based_sampling_alpha_valgrind_files/1615786407-test.R
|
no_license
|
akhikolla/updatedatatype-list2
|
R
| false
| false
| 254
|
r
|
testlist <- list(m = NULL, repetitions = -304992360L, in_m = structure(c(2.31584307392677e+77, 9.53819811420581e+295, 1.22810536108214e+146), .Dim = c(3L, 1L )))
result <- do.call(CNull:::communities_individual_based_sampling_alpha,testlist)
str(result)
|
%% File Name: data.raw1.Rd
%% File Version: 0.05
%% File Last Change: 2017-01-18 18:08:37
\name{data.raw1}
\alias{data.raw1}
\docType{data}
\title{
Dataset with Raw Item Responses
}
\description{
Dataset with raw item responses
}
\usage{data(data.raw1)}
\format{
A data frame with raw item responses of 1200 persons on the following 77 items:
\code{'data.frame': 1200 obs. of 77 variables:} \cr
\code{ $ I101: num 0 0 0 2 0 0 0 0 0 0 ...} \cr
\code{ $ I102: int NA NA 2 1 2 1 3 2 NA NA ...} \cr
\code{ $ I103: int 1 1 NA NA NA NA NA NA 1 1 ...} \cr
\code{ ...} \cr
\code{ $ I179: chr "E" "C" "D" "E" ...} \cr
}
%\details{
%% ~~ If necessary, more details than the __description__ above ~~
%}
%\source{
%% ~~ reference to a publication or URL from which the data were obtained ~~
%}
%\references{
%% ~~ possibly secondary sources and usages ~~
%}
%\examples{
%data(data.raw1)
%## maybe str(data.raw1) ; plot(data.raw1) ...
%}
\keyword{datasets}
|
/man/data.raw1.Rd
|
no_license
|
SanVerhavert/sirt
|
R
| false
| false
| 961
|
rd
|
%% File Name: data.raw1.Rd
%% File Version: 0.05
%% File Last Change: 2017-01-18 18:08:37
\name{data.raw1}
\alias{data.raw1}
\docType{data}
\title{
Dataset with Raw Item Responses
}
\description{
Dataset with raw item responses
}
\usage{data(data.raw1)}
\format{
A data frame with raw item responses of 1200 persons on the following 77 items:
\code{'data.frame': 1200 obs. of 77 variables:} \cr
\code{ $ I101: num 0 0 0 2 0 0 0 0 0 0 ...} \cr
\code{ $ I102: int NA NA 2 1 2 1 3 2 NA NA ...} \cr
\code{ $ I103: int 1 1 NA NA NA NA NA NA 1 1 ...} \cr
\code{ ...} \cr
\code{ $ I179: chr "E" "C" "D" "E" ...} \cr
}
%\details{
%% ~~ If necessary, more details than the __description__ above ~~
%}
%\source{
%% ~~ reference to a publication or URL from which the data were obtained ~~
%}
%\references{
%% ~~ possibly secondary sources and usages ~~
%}
%\examples{
%data(data.raw1)
%## maybe str(data.raw1) ; plot(data.raw1) ...
%}
\keyword{datasets}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/summary.BchronDensityRun.R
\name{summary.BchronDensityRun}
\alias{summary.BchronDensityRun}
\title{Summarise a Bchron density object}
\usage{
\method{summary}{BchronDensityRun}(object, prob = 0.95, ..., digits = max(3,
getOption("digits") - 3))
}
\arguments{
\item{object}{Output from a run of \code{\link{BchronDensity}}}
\item{prob}{Probability for identifying phases}
\item{...}{Other arguments (not currently supported)}
\item{digits}{Number of digits to report values}
}
\description{
Summarise a \code{\link{BchronDensity}} object
}
\seealso{
\code{\link{BchronDensity}}
}
|
/man/summary.BchronDensityRun.Rd
|
no_license
|
ercrema/Bchron
|
R
| false
| true
| 663
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/summary.BchronDensityRun.R
\name{summary.BchronDensityRun}
\alias{summary.BchronDensityRun}
\title{Summarise a Bchron density object}
\usage{
\method{summary}{BchronDensityRun}(object, prob = 0.95, ..., digits = max(3,
getOption("digits") - 3))
}
\arguments{
\item{object}{Output from a run of \code{\link{BchronDensity}}}
\item{prob}{Probability for identifying phases}
\item{...}{Other arguments (not currently supported)}
\item{digits}{Number of digits to report values}
}
\description{
Summarise a \code{\link{BchronDensity}} object
}
\seealso{
\code{\link{BchronDensity}}
}
|
## makeCacheMatrix creates the matrix object that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
i <- NULL
set <- function(y) {
x <<- y
i <<- NULL
}
get <- function() x
setinverse <- function(inverse) i <<- inverse
getinverse <- function() i
list(set = set,
get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## Gives the inverse of the matrix created by makeCacheMatrix
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
i <- x$getinverse()
if (!is.null(i)) {
message("getting cached data")
return(i)
}
data <- x$get()
i <- solve(data, ...)
x$setinverse(i)
i
}
|
/cachematrix.R
|
no_license
|
Silva-M/ProgrammingAssignment2
|
R
| false
| false
| 841
|
r
|
## makeCacheMatrix creates the matrix object that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
i <- NULL
set <- function(y) {
x <<- y
i <<- NULL
}
get <- function() x
setinverse <- function(inverse) i <<- inverse
getinverse <- function() i
list(set = set,
get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## Gives the inverse of the matrix created by makeCacheMatrix
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
i <- x$getinverse()
if (!is.null(i)) {
message("getting cached data")
return(i)
}
data <- x$get()
i <- solve(data, ...)
x$setinverse(i)
i
}
|
#' Create an R Markdown Word Document Topic Guide
#'
#' This is a function called in the output of the yaml of the Rmd file to
#' specify using the standard DLM topic guide word document formatting.
#'
#' @param ... Arguments to be passed to `[bookdown::word_document2]`
#'
#' @return A modified `word_document2` with the standard topic guide formatting.
#' @export
#'
#' @examples
#' \dontrun{
#' output: ratlas::topicguide_docx
#' }
topicguide_docx <- function(...) {
template <- find_resource("topicguide_docx", "template.docx")
base <- bookdown::word_document2(reference_docx = template, ...)
# nolint start
base$knitr$opts_chunk$comment <- "#>"
base$knitr$opts_chunk$message <- FALSE
base$knitr$opts_chunk$warning <- FALSE
base$knitr$opts_chunk$error <- FALSE
base$knitr$opts_chunk$echo <- FALSE
base$knitr$opts_chunk$cache <- FALSE
base$knitr$opts_chunk$fig.width <- 8
base$knitr$opts_chunk$fig.asp <- 0.618
base$knitr$opts_chunk$fig.ext <- "png"
base$knitr$opts_chunk$fig.retina <- 3
base$knitr$opts_chunk$fig.path <- "figures/"
base$knitr$opts_chunk$fig.pos <- "H"
# nolint end
base
}
#' Create an R Markdown PDF Topic Guide
#'
#' This is a function called in the output of the yaml of the Rmd file to
#' specify using the standard DLM topic guide document formatting.
#'
#' @param ... Arguments to be passed to `[bookdown::pdf_document2]`
#'
#' @return A modified `pdf_document2` with the standard tech report formatting.
#' @export
#'
#' @examples
#' \dontrun{
#' output: ratlas::topicguide_pdf
#' }
topicguide_pdf <- function(...) {
topic_guide_template <- find_resource("topicguide_pdf", "template.tex")
base <- bookdown::pdf_document2(template = topic_guide_template,
latex_engine = "xelatex",
citation_package = "biblatex",
keep_tex = TRUE, number_sections = FALSE, ...)
# nolint start
base$knitr$opts_chunk$comment <- "#>"
base$knitr$opts_chunk$message <- FALSE
base$knitr$opts_chunk$warning <- FALSE
base$knitr$opts_chunk$error <- FALSE
base$knitr$opts_chunk$echo <- FALSE
base$knitr$opts_chunk$cache <- FALSE
base$knitr$opts_chunk$fig.width <- 8
base$knitr$opts_chunk$fig.asp <- 0.618
base$knitr$opts_chunk$fig.ext <- "pdf"
base$knitr$opts_chunk$fig.align <- "center"
base$knitr$opts_chunk$fig.retina <- 3
base$knitr$opts_chunk$fig.path <- "figures/"
base$knitr$opts_chunk$fig.pos <- "H"
base$knitr$opts_chunk$out.extra <- ""
base$knitr$opts_chunk$out.width <- "100%"
base$knitr$opts_chunk$fig.show <- "hold"
# nolint end
base$knitr$knit_hooks$plot <- hook_plot_rat
base
}
#' Create an R Markdown PDF Document Tech Report
#'
#' This is a function called in the output of the yaml of the Rmd file to
#' specify using the standard DLM tech report pdf document formatting.
#'
#' @param apa6 Should the old
#' @param ... Arguments to be passed to `[bookdown::pdf_document2]`
#'
#' @return A modified `pdf_document2` with the standard tech report formatting.
#' @export
#'
#' @examples
#' \dontrun{
#' output: ratlas::techreport_pdf
#' }
techreport_pdf <- function(apa6 = FALSE, ...) {
template_name <- ifelse(apa6, "apa6-template.tex", "template.tex")
tech_report_template <- find_resource("techreport", template_name)
base <- bookdown::pdf_document2(template = tech_report_template,
latex_engine = "xelatex",
citation_package = "biblatex",
keep_tex = TRUE, ...)
# nolint start
base$knitr$opts_chunk$comment <- "#>"
base$knitr$opts_chunk$message <- FALSE
base$knitr$opts_chunk$warning <- FALSE
base$knitr$opts_chunk$error <- FALSE
base$knitr$opts_chunk$echo <- FALSE
base$knitr$opts_chunk$cache <- FALSE
base$knitr$opts_chunk$fig.width <- 8
base$knitr$opts_chunk$fig.asp <- 0.618
base$knitr$opts_chunk$fig.ext <- "pdf"
base$knitr$opts_chunk$fig.align <- "center"
base$knitr$opts_chunk$fig.retina <- 3
base$knitr$opts_chunk$fig.path <- "figures/"
base$knitr$opts_chunk$fig.pos <- "H"
base$knitr$opts_chunk$out.extra <- ""
base$knitr$opts_chunk$out.width <- "100%"
base$knitr$opts_chunk$fig.show <- "hold"
# nolint end
if (tolower(apa6) %in% c("true", "yes")) {
base$knitr$knit_hooks$plot <- knitr::hook_plot_tex
} else {
base$knitr$knit_hooks$plot <- hook_plot_rat
}
base
}
#' Create an HTML Slide Deck with R Markdown
#'
#' This is a function called in the output of the YAML of the Rmd file to
#' specify using the standard DLM tech report pdf document formatting.
#'
#' @param ... Arguments to be passed to `[xaringan::moon_reader]`
#'
#' @return A modified `mood_reader` with ATLAS branding applied.
#' @export
#'
#' @examples
#' \dontrun{
#' output: ratlas::slides_html
#' }
slides_html <- function(...) {
default_nature <- list(ratio = "16:9",
highlightStyle = "github",
highlightLines = TRUE,
countIncrementalSlides = FALSE,
slideNumberFormat = "%current%")
dots_args <- list(...)
dots_name <- names(dots_args)
if ("nature" %in% dots_name) {
final_nature <- utils::modifyList(default_nature, dots_args[["nature"]])
} else {
final_nature <- default_nature
}
base <- xaringan::moon_reader(
css = c("default", "assets/css/atlas.css", "assets/css/atlas-fonts.css"),
lib_dir = "libs", nature = final_nature,
includes = list(in_header = "assets/header.html")
)
# nolint start
base$knitr$opts_chunk$comment <- "#>"
base$knitr$opts_chunk$message <- FALSE
base$knitr$opts_chunk$warning <- FALSE
base$knitr$opts_chunk$error <- FALSE
base$knitr$opts_chunk$echo <- FALSE
base$knitr$opts_chunk$cache <- FALSE
base$knitr$opts_chunk$fig.retina <- 3
base$knitr$opts_chunk$fig.path <- "figures/"
# nolint end
base
}
|
/R/render.R
|
no_license
|
esegui/ratlas
|
R
| false
| false
| 5,943
|
r
|
#' Create an R Markdown Word Document Topic Guide
#'
#' This is a function called in the output of the yaml of the Rmd file to
#' specify using the standard DLM topic guide word document formatting.
#'
#' @param ... Arguments to be passed to `[bookdown::word_document2]`
#'
#' @return A modified `word_document2` with the standard topic guide formatting.
#' @export
#'
#' @examples
#' \dontrun{
#' output: ratlas::topicguide_docx
#' }
topicguide_docx <- function(...) {
template <- find_resource("topicguide_docx", "template.docx")
base <- bookdown::word_document2(reference_docx = template, ...)
# nolint start
base$knitr$opts_chunk$comment <- "#>"
base$knitr$opts_chunk$message <- FALSE
base$knitr$opts_chunk$warning <- FALSE
base$knitr$opts_chunk$error <- FALSE
base$knitr$opts_chunk$echo <- FALSE
base$knitr$opts_chunk$cache <- FALSE
base$knitr$opts_chunk$fig.width <- 8
base$knitr$opts_chunk$fig.asp <- 0.618
base$knitr$opts_chunk$fig.ext <- "png"
base$knitr$opts_chunk$fig.retina <- 3
base$knitr$opts_chunk$fig.path <- "figures/"
base$knitr$opts_chunk$fig.pos <- "H"
# nolint end
base
}
#' Create an R Markdown PDF Topic Guide
#'
#' This is a function called in the output of the yaml of the Rmd file to
#' specify using the standard DLM topic guide document formatting.
#'
#' @param ... Arguments to be passed to `[bookdown::pdf_document2]`
#'
#' @return A modified `pdf_document2` with the standard tech report formatting.
#' @export
#'
#' @examples
#' \dontrun{
#' output: ratlas::topicguide_pdf
#' }
topicguide_pdf <- function(...) {
topic_guide_template <- find_resource("topicguide_pdf", "template.tex")
base <- bookdown::pdf_document2(template = topic_guide_template,
latex_engine = "xelatex",
citation_package = "biblatex",
keep_tex = TRUE, number_sections = FALSE, ...)
# nolint start
base$knitr$opts_chunk$comment <- "#>"
base$knitr$opts_chunk$message <- FALSE
base$knitr$opts_chunk$warning <- FALSE
base$knitr$opts_chunk$error <- FALSE
base$knitr$opts_chunk$echo <- FALSE
base$knitr$opts_chunk$cache <- FALSE
base$knitr$opts_chunk$fig.width <- 8
base$knitr$opts_chunk$fig.asp <- 0.618
base$knitr$opts_chunk$fig.ext <- "pdf"
base$knitr$opts_chunk$fig.align <- "center"
base$knitr$opts_chunk$fig.retina <- 3
base$knitr$opts_chunk$fig.path <- "figures/"
base$knitr$opts_chunk$fig.pos <- "H"
base$knitr$opts_chunk$out.extra <- ""
base$knitr$opts_chunk$out.width <- "100%"
base$knitr$opts_chunk$fig.show <- "hold"
# nolint end
base$knitr$knit_hooks$plot <- hook_plot_rat
base
}
#' Create an R Markdown PDF Document Tech Report
#'
#' This is a function called in the output of the yaml of the Rmd file to
#' specify using the standard DLM tech report pdf document formatting.
#'
#' @param apa6 Should the old
#' @param ... Arguments to be passed to `[bookdown::pdf_document2]`
#'
#' @return A modified `pdf_document2` with the standard tech report formatting.
#' @export
#'
#' @examples
#' \dontrun{
#' output: ratlas::techreport_pdf
#' }
techreport_pdf <- function(apa6 = FALSE, ...) {
template_name <- ifelse(apa6, "apa6-template.tex", "template.tex")
tech_report_template <- find_resource("techreport", template_name)
base <- bookdown::pdf_document2(template = tech_report_template,
latex_engine = "xelatex",
citation_package = "biblatex",
keep_tex = TRUE, ...)
# nolint start
base$knitr$opts_chunk$comment <- "#>"
base$knitr$opts_chunk$message <- FALSE
base$knitr$opts_chunk$warning <- FALSE
base$knitr$opts_chunk$error <- FALSE
base$knitr$opts_chunk$echo <- FALSE
base$knitr$opts_chunk$cache <- FALSE
base$knitr$opts_chunk$fig.width <- 8
base$knitr$opts_chunk$fig.asp <- 0.618
base$knitr$opts_chunk$fig.ext <- "pdf"
base$knitr$opts_chunk$fig.align <- "center"
base$knitr$opts_chunk$fig.retina <- 3
base$knitr$opts_chunk$fig.path <- "figures/"
base$knitr$opts_chunk$fig.pos <- "H"
base$knitr$opts_chunk$out.extra <- ""
base$knitr$opts_chunk$out.width <- "100%"
base$knitr$opts_chunk$fig.show <- "hold"
# nolint end
if (tolower(apa6) %in% c("true", "yes")) {
base$knitr$knit_hooks$plot <- knitr::hook_plot_tex
} else {
base$knitr$knit_hooks$plot <- hook_plot_rat
}
base
}
#' Create an HTML Slide Deck with R Markdown
#'
#' This is a function called in the output of the YAML of the Rmd file to
#' specify using the standard DLM tech report pdf document formatting.
#'
#' @param ... Arguments to be passed to `[xaringan::moon_reader]`
#'
#' @return A modified `mood_reader` with ATLAS branding applied.
#' @export
#'
#' @examples
#' \dontrun{
#' output: ratlas::slides_html
#' }
slides_html <- function(...) {
default_nature <- list(ratio = "16:9",
highlightStyle = "github",
highlightLines = TRUE,
countIncrementalSlides = FALSE,
slideNumberFormat = "%current%")
dots_args <- list(...)
dots_name <- names(dots_args)
if ("nature" %in% dots_name) {
final_nature <- utils::modifyList(default_nature, dots_args[["nature"]])
} else {
final_nature <- default_nature
}
base <- xaringan::moon_reader(
css = c("default", "assets/css/atlas.css", "assets/css/atlas-fonts.css"),
lib_dir = "libs", nature = final_nature,
includes = list(in_header = "assets/header.html")
)
# nolint start
base$knitr$opts_chunk$comment <- "#>"
base$knitr$opts_chunk$message <- FALSE
base$knitr$opts_chunk$warning <- FALSE
base$knitr$opts_chunk$error <- FALSE
base$knitr$opts_chunk$echo <- FALSE
base$knitr$opts_chunk$cache <- FALSE
base$knitr$opts_chunk$fig.retina <- 3
base$knitr$opts_chunk$fig.path <- "figures/"
# nolint end
base
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.