content
large_stringlengths 0
6.46M
| path
large_stringlengths 3
331
| license_type
large_stringclasses 2
values | repo_name
large_stringlengths 5
125
| language
large_stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.46M
| extension
large_stringclasses 75
values | text
stringlengths 0
6.46M
|
|---|---|---|---|---|---|---|---|---|---|
CensReg.SMN <- function(cc, x, y, LS=NULL, nu=3, delta=NULL, cens="left", dist="T", show.envelope="FALSE", error=0.0001,iter.max=300)
{
if(cens=="left"){cens = 1}
if(cens=="right"){cens = 2}
if(cens=="interval"){cens = 3}
type = dist
namesx <- ('x1 ')
if(ncol(as.matrix(x))>1)
{
for(i in 2:ncol(as.matrix(x))){namesx <- cbind(namesx, paste("x",i," ",sep=""))}
}
if(ncol(as.matrix(y)) > 1) stop("Only univariate linear regression supported!")
if(ncol(as.matrix(cc)) > 1) stop("Only univariate linear regression supported!")
if( length(y) != nrow(as.matrix(x)) ) stop("X variable does not have the same number of lines than y")
if( (length(x) == 0) | (length(y) == 0) ) stop("All parameters must be provided.")
if( (type != "T") && (type != "Normal") && (type != "PearsonVII") && (type != "Slash") && (type != "NormalC")) stop("Distribution family not supported. Check documentation!")
for(i in 1:length(y))
{
if(cens=="3")
{
if(is.na(y[i])==FALSE && is.na(LS[i])==FALSE)
{
if( (y[i]==-Inf) | (y[i]==Inf) | (LS[i]==Inf) | (LS[i]==-Inf) ) stop("This package does not support mixed types of censoring. For left censoring use cens=1. For right censoring use cens=2. For interval censoring use cens=3.")
}
}else
{
if( (y[i]==-Inf) | (y[i]==Inf) ) stop("This package does not support mixed types of censoring. For left censoring use cens=1. For right censoring use cens=2. For interval censoring use cens=3.")
}
}
if( type == "T" | type == "Slash" )
{
if(length(nu) > 1) stop("nu parameter must be a scalar")
if(length(nu) == 0) stop("nu parameter must be provided.")
if(nu <= 0) stop("nu parameter must be positive.")
}
if( type == "PearsonVII" )
{
if(length(nu) > 1) stop("nu parameter must be a scalar")
if(length(nu) == 0) stop("initial value for nu parameter must be provided in case of Pearson VII distribution.")
if(nu <= 0) stop("nu parameter must be positive.")
if(length(delta) > 1) stop("delta parameter must be a scalar")
if(length(delta) == 0) stop("delta parameter must be provided in case of Pearson VII distribution.")
if(delta <= 0) stop("delta parameter must be positive.")
}
if(type == "NormalC")
{
if(length(nu) !=2) stop("nu must be a bidimensional vector in case of Contaminated Normal distribution")
if(nu[1] <=0 || nu[1] >= 1) stop("nu[1] must lies in (0,1)")
if(nu[2] <=0 || nu[2] >= 1) stop("nu[2] must lies in (0,1)")
}
if( (cens != "1") && (cens != "2") && (cens != "3")) stop("Censored type not supported. 1 for left censoring, 2 for right censoring and 3 for intervalar censoring.")
out <- EM.Cens.Int(cc,x,y,LS,nu,delta,cens,type,error,iter.max)
SE <- t(out$SE)
SE <- round(t(SE),digits=5)
param <- round(cbind(rbind(out$betas,out$sigma2),SE),digits=5)
namespar <- colnames(x)
colx <- ncol(as.matrix(x))
if(length(namespar)==0)namespar <- namesx[1:colx]
dimnames(param) <- list(c(namespar,expression(sigma^2)),c("Estimates", "SE"))
if( type=="PearsonVII")
{
sig2 <- round(t(cbind(out$nu,out$delta )),digits=5)
dimnames(sig2) <- list(c(expression(nu),expression(delta)),"")
}
if( (type=="T") || (type=="Slash"))
{
nu1 <- matrix(round(out$nu,digits=5),ncol=1,nrow=1)
row.names(nu1) <- "nu"
colnames(nu1) <- " "
}
if( type=="NormalC")
{
nuf <- t(as.matrix(out$nu))
nuf <- t(nuf)
sig2 <- round(nuf,digits=5)
dimnames(sig2) <- list(c(expression(nu1),expression(nu2)),"")
}
cat('\n')
cat('-------------------------------------------\n')
cat(' EM estimates and SE \n')
cat('-------------------------------------------\n')
print(param)
if(type!="Normal")
{
if(type=="T"|type=="Slash")
{
print(nu1)
}
else
{
print(sig2)
}
}
cat('------------------------------------------\n')
cat('\r \n')
critFin <- c(out$logver, out$AIC, out$BIC, out$EDC)
critFin <- round(t(as.matrix(critFin)),digits=3)
dimnames(critFin) <- list(c("Value"),c("Loglik", "AIC", "BIC","EDC"))
cat('\n')
cat('Model selection criteria\n')
cat('-------------------------------------------\n')
print(critFin)
cat('-------------------------------------------\n')
cat('\r \n')
if(show.envelope=="TRUE")
{
envelop <- EnvelopeRMT(cc,x,y,LS,nu,delta,cens=cens,type=type)
}
out
}
|
/R/CensRegSMN.r
|
no_license
|
cran/SMNCensReg
|
R
| false
| false
| 4,480
|
r
|
CensReg.SMN <- function(cc, x, y, LS=NULL, nu=3, delta=NULL, cens="left", dist="T", show.envelope="FALSE", error=0.0001,iter.max=300)
{
if(cens=="left"){cens = 1}
if(cens=="right"){cens = 2}
if(cens=="interval"){cens = 3}
type = dist
namesx <- ('x1 ')
if(ncol(as.matrix(x))>1)
{
for(i in 2:ncol(as.matrix(x))){namesx <- cbind(namesx, paste("x",i," ",sep=""))}
}
if(ncol(as.matrix(y)) > 1) stop("Only univariate linear regression supported!")
if(ncol(as.matrix(cc)) > 1) stop("Only univariate linear regression supported!")
if( length(y) != nrow(as.matrix(x)) ) stop("X variable does not have the same number of lines than y")
if( (length(x) == 0) | (length(y) == 0) ) stop("All parameters must be provided.")
if( (type != "T") && (type != "Normal") && (type != "PearsonVII") && (type != "Slash") && (type != "NormalC")) stop("Distribution family not supported. Check documentation!")
for(i in 1:length(y))
{
if(cens=="3")
{
if(is.na(y[i])==FALSE && is.na(LS[i])==FALSE)
{
if( (y[i]==-Inf) | (y[i]==Inf) | (LS[i]==Inf) | (LS[i]==-Inf) ) stop("This package does not support mixed types of censoring. For left censoring use cens=1. For right censoring use cens=2. For interval censoring use cens=3.")
}
}else
{
if( (y[i]==-Inf) | (y[i]==Inf) ) stop("This package does not support mixed types of censoring. For left censoring use cens=1. For right censoring use cens=2. For interval censoring use cens=3.")
}
}
if( type == "T" | type == "Slash" )
{
if(length(nu) > 1) stop("nu parameter must be a scalar")
if(length(nu) == 0) stop("nu parameter must be provided.")
if(nu <= 0) stop("nu parameter must be positive.")
}
if( type == "PearsonVII" )
{
if(length(nu) > 1) stop("nu parameter must be a scalar")
if(length(nu) == 0) stop("initial value for nu parameter must be provided in case of Pearson VII distribution.")
if(nu <= 0) stop("nu parameter must be positive.")
if(length(delta) > 1) stop("delta parameter must be a scalar")
if(length(delta) == 0) stop("delta parameter must be provided in case of Pearson VII distribution.")
if(delta <= 0) stop("delta parameter must be positive.")
}
if(type == "NormalC")
{
if(length(nu) !=2) stop("nu must be a bidimensional vector in case of Contaminated Normal distribution")
if(nu[1] <=0 || nu[1] >= 1) stop("nu[1] must lies in (0,1)")
if(nu[2] <=0 || nu[2] >= 1) stop("nu[2] must lies in (0,1)")
}
if( (cens != "1") && (cens != "2") && (cens != "3")) stop("Censored type not supported. 1 for left censoring, 2 for right censoring and 3 for intervalar censoring.")
out <- EM.Cens.Int(cc,x,y,LS,nu,delta,cens,type,error,iter.max)
SE <- t(out$SE)
SE <- round(t(SE),digits=5)
param <- round(cbind(rbind(out$betas,out$sigma2),SE),digits=5)
namespar <- colnames(x)
colx <- ncol(as.matrix(x))
if(length(namespar)==0)namespar <- namesx[1:colx]
dimnames(param) <- list(c(namespar,expression(sigma^2)),c("Estimates", "SE"))
if( type=="PearsonVII")
{
sig2 <- round(t(cbind(out$nu,out$delta )),digits=5)
dimnames(sig2) <- list(c(expression(nu),expression(delta)),"")
}
if( (type=="T") || (type=="Slash"))
{
nu1 <- matrix(round(out$nu,digits=5),ncol=1,nrow=1)
row.names(nu1) <- "nu"
colnames(nu1) <- " "
}
if( type=="NormalC")
{
nuf <- t(as.matrix(out$nu))
nuf <- t(nuf)
sig2 <- round(nuf,digits=5)
dimnames(sig2) <- list(c(expression(nu1),expression(nu2)),"")
}
cat('\n')
cat('-------------------------------------------\n')
cat(' EM estimates and SE \n')
cat('-------------------------------------------\n')
print(param)
if(type!="Normal")
{
if(type=="T"|type=="Slash")
{
print(nu1)
}
else
{
print(sig2)
}
}
cat('------------------------------------------\n')
cat('\r \n')
critFin <- c(out$logver, out$AIC, out$BIC, out$EDC)
critFin <- round(t(as.matrix(critFin)),digits=3)
dimnames(critFin) <- list(c("Value"),c("Loglik", "AIC", "BIC","EDC"))
cat('\n')
cat('Model selection criteria\n')
cat('-------------------------------------------\n')
print(critFin)
cat('-------------------------------------------\n')
cat('\r \n')
if(show.envelope=="TRUE")
{
envelop <- EnvelopeRMT(cc,x,y,LS,nu,delta,cens=cens,type=type)
}
out
}
|
require(shiny)
require(lubridate)
if (!require('openfda') ) {
devtools::install_github("ropenhealth/openfda")
library(openfda)
}
source('sourcedir.R')
simpleCap <- function(x) {
s <- tolower(x)
s <- strsplit(s, " ")[[1]]
first <- paste(toupper(substring(s[1], 1, 1)), substring(s[1], 2),
sep = "", collapse = " ")
out <- paste( s[-1], sep = "", collapse = " ")
out <- paste(first, out)
}
listtocvect <- function(s, delim=', ', trunc=25){
if (is.null( s) ) {
return('')
}
out <- paste0( s, sep='', collapse=delim)
out <- strtrim(out, trunc)
return(out)
}
listtodf <- function(lis, delim=', ', trunc=100){
out <- data.frame(rownames=1:length(lis[[1]]), stringsAsFactors =FALSE )
for (i in seq_along(lis) )
{
if (is.list(lis[[i]]))
{
tmp <- sapply(lis[[i]], function(x) listtocvect(x, delim, trunc) )
out[[i]] <- tmp
} else {
out[[i]] <- ''
}
}
# print(lis[[i]])
out <- data.frame(out, stringsAsFactors =FALSE)
names(out) <- names(lis)
return(out)
}
listtostring <- function(s, delim=';')
{
myevents <- gsub('\"', '', s, fixed=TRUE)
myevents <- gsub('c(', '', myevents, fixed=TRUE)
myevents <- gsub(')', '', myevents, fixed=TRUE)
myevents <- gsub(',', delim, myevents, fixed=TRUE)
return(myevents)
}
getdf <- function(mydf, name, message='Empty Table')
{
# print(name)
# print(head(mydf) )
err <- data.frame( Note=message )
if ( is.data.frame(mydf) ) {
if (name %in% names(mydf) ) {
tmp <- mydf[, name]
if ( is.data.frame(tmp) ) {
return(tmp)
}
else {
return(err)
}
}
}
return( err )
}
#**************************************
shinyServer(function(input, output, session) {
getskip <- reactive({
return( input$skip-1 )
})
ntext <- eventReactive( input$nextrow, {
myskip <- getskip()
mydf <- getfullquery()
numrecs <- mydf$df.meta$results$total
maxlim <- getopenfdamaxrecords( numrecs )
updateSliderInput( session, 'skip', value= min(myskip+2, maxlim), min=1, step= 1, max=maxlim)
})
gett1 <- function(){
anychanged()
s <- toupper( input$t1 )
return( s )
}
gett2 <- function(){
s <- toupper( input$t2 )
return( s )
}
gett3 <- function(){
s <- toupper( input$t3 )
return( s )
}
getv1 <- function(){
s <- ( input$v1 )
return( s )
}
getv2 <- function(){
s <- ( input$v2)
return( s )
}
getv3 <- function(){
s <- ( input$v3 )
return( s )
}
updatevars <- reactive({
input$update
isolate( {
# updateTextInput(session, "v1", value=( input$v1_2 ) )
# updateTextInput(session, "t1", value= ( input$t1_2 ) )
# updateTextInput(session, "v2", value=( input$v2_2 ) )
# updateTextInput(session, "t2", value= ( input$t2_2 ) )
# updateTextInput(session, "v3", value=( input$v3_2 ) )
# updateTextInput(session, "t3", value= ( input$t3_2 ) )
updateviewerinputs(session)
})
})
anychanged <- reactive({
a <- input$t1
b <- input$v1
c <- input$t2
d <- input$v2
c <- input$t3
d <- input$v3
closeAlert(session, 'erroralert')
})
output$mymodal <- renderText({
if (input$update > 0)
{
updatevars()
toggleModal(session, 'modalExample1', 'close')
}
return('')
})
output$ntext <- renderText( {
ntext()
return('')
})
ptext <- eventReactive( input$prevrow, {
myskip <- getskip()
mydf <- getfullquery()
numrecs <- mydf$df.meta$results$total
maxlim <- getopenfdamaxrecords( numrecs )
updateSliderInput( session, 'skip', value= max(myskip, 1), min=1, step= 1, max=maxlim)
})
output$ptext <- renderText( {
ptext()
return('')
})
getreportid <- reactive({
mydf <- getquery()
tmp <- mydf$df.results
id <- tmp$report_number
if (is.null(id)){
id = 'Missing Report Number'
}
return(id)
})
getfullquery <- reactive({
if ( input$t1=='' & input$t2 == '' & input$t3 == '' ){
v1 = '_exists_'
t1 = 'report_number'
v2 <- ''
t2 <- ''
v3 <- ''
t3 <- ''
} else {
v1 <- c(input$v1, input$v2, input$v3)
t1 <- c(gett1(), gett2(), gett3() )
}
myurl <- buildURL(v1, t1, limit=1, db='/device/')
mydf <- fda_fetch_p(session, myurl)
out <- c(df=mydf, url=myurl)
return(out)
})
getquery <- reactive({
if ( input$t1 == '' & input$t2 == '' & input$t3 == ''){
v1 = '_exists_'
t1 = 'report_number'
v2 <- ''
t2 <- ''
v3 <- ''
t3 <- ''
} else {
v1 <- c(input$v1, input$v2, input$v3)
t1 <- c(gett1(), gett2(), gett3() )
}
myurl <- buildURL(v1, t1, limit=1, skip=getskip(), db='/device/')
mydf <- fda_fetch_p(session, myurl)
# print('url')
out <- c(df=mydf, url=myurl )
return(out)
})
output$v1 <- renderText({
s <- getv1()
if(s == '') {
s <- 'None'
}
out <- paste( '<br><b>Variable:<i>', s, '</i></b>' )
return(out)
})
output$v2 <- renderText({
s <- getv2()
if(s == '') {
s <- 'None'
}
out <- paste( '<b>Variable:<i>', s, '</i></b>' )
return(out)
})
output$v3 <- renderText({
s <- getv3()
if(s == '') {
s <- 'None'
}
out <- paste( '<b>Variable:<i>', s, '</i></b>' )
return(out)
})
output$t1 <- renderText({
s <- gett1()
if(s == '') {
s <- 'None'
}
out <- paste( '<br><b>Term:<i>', s, '</i></b>' )
return(out)
})
output$t2 <- renderText({
s <- gett2()
if(s == '') {
s <- 'None'
}
out <- paste( '<br><b>Term:<i>', s, '</i></b>' )
return(out)
})
output$t3 <- renderText({
s <- gett3()
if(s == '') {
s <- 'None'
}
out <- paste( '<br><b>Term:<i>', s, '</i></b>' )
return(out)
})
#EVENT**********************
output$eventtabletitle <- renderText({
s <- paste('<h4>Report Number=', getreportid(), '<br><br>Event</h4>' )
return( s )
})
output$eventtable <- renderTable({
# if (input$t1=='') {return(data.frame(Drug='Please enter drug name', Count=0))}
mydf <- getquery()$df.results
myvars <- geteventvars()
return(extractcols(mydf, myvars))
})
#Device********************************
output$deviceindextabletitle <- renderText({
s <- paste('<h4>Device Index Variables</h4>' )
s <- paste('<h4>Report Number=', getreportid(), '<br><br>Device Index Variables</h4>' )
return( s )
})
output$deviceindex <- renderTable({
mydf <- getquery()
mydf <- mydf$df.results$device[[1]]
# browser()
myvars <- gsub( 'device.','', getdeviceindexvars(), fixed=TRUE)
return(extractcols(mydf, myvars))
})
output$deviceidentification <- renderTable({
mydf <- getquery()
mydf <- mydf$df.results$device
# browser()
myvars <- gsub( 'device.','', getdeviceidentificationvars(), fixed=TRUE)
return(extractcols(mydf[[1]], myvars))
})
output$deviceidentificationtabletitle <- renderText({
s <- paste('<h4>Device Identification Variables</h4>' )
return( s )
})
output$devicemodtitle <- renderText({
s <- paste('<h4>Device Model Variables</h4>' )
return( s )
})
output$devicemodel <- renderTable({
mydf <- getquery()
mydf <- mydf$df.results$device
# browser()
myvars <- gsub( 'device.','', c( getdevicemodelvars(), getdevicagevars()), fixed=TRUE)
return(extractcols(mydf[[1]], myvars))
})
#REPORT TEXT********************************************
output$mdrtabletitle <- renderText({
s <- paste('<h4>Report Number =', getreportid(), ' <br><br>Report Text</h4>' )
return( s )
})
output$mdr <- renderTable({
mydf <- getquery()
mydf <- mydf$df.results$mdr_text[[1]]
tmp <- mydf
types <- (sapply(tmp, class))
typesval <- types[types =='character']
mydf <- tmp[ , names(typesval) ]
# mydf <- mydf[, which(mydf[,names(typesval)]!='' ) ]
return(mydf)
})
#OpenFDA****************************
output$openfdatabletitle <- renderText({
s <- paste('<h4>Report Number=', getreportid(), '<br><br>OpenFDA variable</h4>')
return( s )
})
output$openfda <- renderTable({
mydf <- getquery()
mydf <- mydf$df.results$device[[1]]$openfda
tmp <- mydf
types <- (sapply(tmp, class))
typesval <- types[types =='character']
mydf <- tmp[ , names(typesval) ]
# mydf <- mydf[, which(mydf[,names(typesval)]!='' ) ]
return(mydf)
})
#SOURCE**************************
output$sourcetabletitle <- renderText({
s <- paste('<h4>Report Number =', getreportid(), '<br><br>Source</h4>' )
return( s )
})
output$source <- renderTable({
mydf <- getquery()
mydf <- (mydf$df.results[, getsourcevars() ])
types <- (sapply(mydf, class))
typesval <- types[types!='data.frame' & types!='list']
# typesdf <- types[types=='data.frame']
# print(typesval)
# print('dfs')
#print(typesdf)
mydf <- mydf[ , names(typesval) ]
if ( is.data.frame( mydf ) ){
return(mydf)
} else {
return( data.frame(Note='No patient data'))
}
})
#PATIENT*********************************************
output$patienttabletitle <- renderText({
s <- paste('<h4>Report ID=', getreportid(), '<br><br>Patient</h4>' )
return( s )
})
output$patient <- renderTable({
mydf <- getquery()
mydf <- (mydf$df.results$patient)
tmp <- mydf[[1]]
types <- (sapply(tmp, class))
typesval <- types[types!='data.frame' & types!='list']
# typesdf <- types[types=='data.frame']
# print(types)
# print('dfs')
mydf <- tmp[ , names(typesval) ]
# print(head(tmp$openfda))
return(mydf)
})
#user_fi**************************
output$user_fitabletitle <- renderText({
s <- paste('<h4>Report Number =', getreportid(), '<br><br>User Facility/Importer</h4>' )
return( s )
})
output$user_fi <- renderTable({
mydf <- getquery()
mydf <- mydf$df.results
# browser()
myvars <- getuser_fivars()
return(extractcols(mydf, myvars))
})
#user_dm**************************
output$user_dmtabletitle <- renderText({
s <- paste('<h4>Report Number =', getreportid(), '<br><br>Device Manufacturere</h4>' )
return( s )
})
output$user_dm <- renderTable({
mydf <- getquery()
mydf <- mydf$df.results
# browser()
myvars <- getuser_dmvars()
return(extractcols(mydf, myvars))
})
#suspect**************************
output$suspecttabletitle <- renderText({
s <- paste('<h4>Report Number =', getreportid(), '<br><br>Suspect device manufacturer</h4>' )
return( s )
})
output$suspect <- renderTable({
mydf <- getquery()
mydf <- mydf$df.results
# browser()
myvars <- getsuspectvars()
return(extractcols(mydf, myvars))
})
#keys**************************
output$keystabletitle <- renderText({
s <- paste('<h4>Report Number =', getreportid(), '<br><br>Keys and flags</h4>' )
return( s )
})
output$keys <- renderTable({
mydf <- getquery()
mydf <- mydf$df.results
# browser()
myvars <- getkeyvars()
return(extractcols(mydf, myvars))
})
#Other**************************
output$othertabletitle <- renderText({
s <- paste('<h4>Report Number =', getreportid(), '<br><br>Patient</h4>' )
return( s )
})
output$other <- renderTable({
mydf <- getquery()
mydf <- (mydf$df.results[, getothervars() ])
types <- (sapply(mydf, class))
typesval <- types[types!='data.frame' & types!='list']
# typesdf <- types[types=='data.frame']
# print(typesval)
# print('dfs')
#print(typesdf)
mydf <- mydf[ , names(typesval) ]
mydf <- mydf[, which(mydf[,names(typesval)]!='' ) ]
if ( is.data.frame( mydf ) ){
return(mydf)
} else {
return( data.frame(Note='No patient data'))
}
})
#META**************************
output$querytitle <- renderText({
return( paste('<h4>Report Number =', getreportid(), '<br><br>Meta Data and Query </h4>' ))
})
output$metatext <- renderText({
mydf <- getfullquery()
mydf2 <- getquery()
# "meta": {
# "disclaimer": "openFDA is a beta research project and not for clinical use. While we make every effort to ensure that data is accurate, you should assume all results are unvalidated.",
# "license": "http://open.fda.gov/license",
# "last_updated": "2014-08-01",
# "results": {
# "skip": 0,
# "limit": 1,
# "total": 1355
#print(mydf)
#print(link)
out <- paste(
'Disclaimer = ', mydf$df.meta$disclaimer,
'<br>License = ', mydf$df.meta$license,
'<br>Last Update=', mydf$df.meta$last_updated,
'<br>Total=', mydf$df.meta$results$total,
'<br> Limit=', mydf$df.meta$results$limit,
'<br> Skip=', mydf$df.meta$results$skip
)
#print( ('output$querytext') )
return(out)
})
output$json <- renderText({
myurl <- getquery()$url
out <- getjson( myurl )
return( out )
})
output$querytext <- renderText({
mydf2 <- getquery()
out <- paste( '<br> URL =', removekey( makelink(mydf2$url) ),
'<BR><BR><b>JSON Output = </b><BR>'
)
return(out)
})
output$reportid <- renderUI({
p( paste('Report Number=', getreportid() ) )
})
output$currec <- renderUI({
mydf <- getfullquery()
numrecs <- mydf$df.meta$results$total
maxlim <- getopenfdamaxrecords( numrecs )
updateSliderInput( session, 'skip', value=getskip()+1, min=1, step= 1, max=maxlim)
out <- paste( 'Viewing #', getskip()+1, 'of', numrecs, 'selected reports')
return(out)
})
getcururl <- reactive({
mypath <- extractbaseurl( session$clientData$url_pathname )
s <- paste0( session$clientData$url_protocol, "//", session$clientData$url_hostname,
':',
session$clientData$url_port,
mypath )
return(s)
})
output$applinks <- renderText({
return( makeapplinks( getcururl() ) )
})
output$date1 <- renderText({
l <- getdaterangedeviceAE()
paste( '<b>', l[3] , 'from', as.Date(l[1], "%Y%m%d") ,'to', as.Date(l[2], "%Y%m%d"), '</b>')
})
geturlquery <- observe({
q <- parseQueryString(session$clientData$url_search)
t1 <- gsub('"[', '[', q$t1, fixed=TRUE)
t1 <- gsub(']"', ']', t1, fixed=TRUE)
t1 <- gsub('""', '"', t1, fixed=TRUE)
updateTextInput(session, "t1", value = t1)
updateTextInput(session, "t1_2", value = t1)
t2 <- gsub('"[', '[', q$t2, fixed=TRUE)
t2 <- gsub(']"', ']', t2, fixed=TRUE)
t2 <- gsub('""', '"', t2, fixed=TRUE)
updateTextInput(session, "t2", value = t2)
updateTextInput(session, "t2_2", value = t2)
if(!is.null(q$t3) )
{
t3 <- gsub('"[', '[', q$t3, fixed=TRUE)
t3 <- gsub(']"', ']', t3, fixed=TRUE)
t3 <- gsub('""', '"', t3, fixed=TRUE)
updateTextInput(session, "t3", value = t3)
updateTextInput(session, "t3_2", value = t3)
}
if(!is.null(q$v1) )
{
v1 <- gsub('"', '', q$v1, fixed=TRUE)
updateSelectizeInput(session, inputId = "v1", selected = v1)
updateSelectizeInput(session, inputId = "v1_2", selected = v1)
}
if(!is.null(q$v2) )
{
v2 <- gsub('"', '', q$v2, fixed=TRUE)
updateSelectizeInput(session, inputId = "v2", selected = v2)
updateSelectizeInput(session, inputId = "v2_2", selected = v2)
}
if(!is.null(q$v3) )
{
v3 <- gsub('"', '', q$v3, fixed=TRUE)
updateSelectizeInput(session, inputId = "v3", selected = v3)
updateSelectizeInput(session, inputId = "v3_2", selected = v3)
}
#
# updateNumericInput(session, "skip", value = q$skip)
# return(q)
})
output$help <- renderUI({
# print('test')
s <- input$sidetabs
# print(s)
out <- switch(s,
'Graph Options'=loadhelp('graphoptions'),
'Data Options'=loadhelp('dataoptions'),
'Axis Options'=loadhelp('axisoptions'),
'Select Vars'= loadhelp('selectvars'),
'Load Data'= loadhelp('loaddata'),
'Overview'= loadhelp('overview'),
'Overviewside'= loadhelp('overviewside'),
'none')
return( HTML(out[[1]]) )
})
})
|
/devicereports/server.R
|
no_license
|
jonathanglevine/openfdashinyapps
|
R
| false
| false
| 15,612
|
r
|
require(shiny)
require(lubridate)
if (!require('openfda') ) {
devtools::install_github("ropenhealth/openfda")
library(openfda)
}
source('sourcedir.R')
simpleCap <- function(x) {
s <- tolower(x)
s <- strsplit(s, " ")[[1]]
first <- paste(toupper(substring(s[1], 1, 1)), substring(s[1], 2),
sep = "", collapse = " ")
out <- paste( s[-1], sep = "", collapse = " ")
out <- paste(first, out)
}
listtocvect <- function(s, delim=', ', trunc=25){
if (is.null( s) ) {
return('')
}
out <- paste0( s, sep='', collapse=delim)
out <- strtrim(out, trunc)
return(out)
}
listtodf <- function(lis, delim=', ', trunc=100){
out <- data.frame(rownames=1:length(lis[[1]]), stringsAsFactors =FALSE )
for (i in seq_along(lis) )
{
if (is.list(lis[[i]]))
{
tmp <- sapply(lis[[i]], function(x) listtocvect(x, delim, trunc) )
out[[i]] <- tmp
} else {
out[[i]] <- ''
}
}
# print(lis[[i]])
out <- data.frame(out, stringsAsFactors =FALSE)
names(out) <- names(lis)
return(out)
}
listtostring <- function(s, delim=';')
{
myevents <- gsub('\"', '', s, fixed=TRUE)
myevents <- gsub('c(', '', myevents, fixed=TRUE)
myevents <- gsub(')', '', myevents, fixed=TRUE)
myevents <- gsub(',', delim, myevents, fixed=TRUE)
return(myevents)
}
getdf <- function(mydf, name, message='Empty Table')
{
# print(name)
# print(head(mydf) )
err <- data.frame( Note=message )
if ( is.data.frame(mydf) ) {
if (name %in% names(mydf) ) {
tmp <- mydf[, name]
if ( is.data.frame(tmp) ) {
return(tmp)
}
else {
return(err)
}
}
}
return( err )
}
#**************************************
shinyServer(function(input, output, session) {
getskip <- reactive({
return( input$skip-1 )
})
ntext <- eventReactive( input$nextrow, {
myskip <- getskip()
mydf <- getfullquery()
numrecs <- mydf$df.meta$results$total
maxlim <- getopenfdamaxrecords( numrecs )
updateSliderInput( session, 'skip', value= min(myskip+2, maxlim), min=1, step= 1, max=maxlim)
})
gett1 <- function(){
anychanged()
s <- toupper( input$t1 )
return( s )
}
gett2 <- function(){
s <- toupper( input$t2 )
return( s )
}
gett3 <- function(){
s <- toupper( input$t3 )
return( s )
}
getv1 <- function(){
s <- ( input$v1 )
return( s )
}
getv2 <- function(){
s <- ( input$v2)
return( s )
}
getv3 <- function(){
s <- ( input$v3 )
return( s )
}
updatevars <- reactive({
input$update
isolate( {
# updateTextInput(session, "v1", value=( input$v1_2 ) )
# updateTextInput(session, "t1", value= ( input$t1_2 ) )
# updateTextInput(session, "v2", value=( input$v2_2 ) )
# updateTextInput(session, "t2", value= ( input$t2_2 ) )
# updateTextInput(session, "v3", value=( input$v3_2 ) )
# updateTextInput(session, "t3", value= ( input$t3_2 ) )
updateviewerinputs(session)
})
})
anychanged <- reactive({
a <- input$t1
b <- input$v1
c <- input$t2
d <- input$v2
c <- input$t3
d <- input$v3
closeAlert(session, 'erroralert')
})
output$mymodal <- renderText({
if (input$update > 0)
{
updatevars()
toggleModal(session, 'modalExample1', 'close')
}
return('')
})
output$ntext <- renderText( {
ntext()
return('')
})
ptext <- eventReactive( input$prevrow, {
myskip <- getskip()
mydf <- getfullquery()
numrecs <- mydf$df.meta$results$total
maxlim <- getopenfdamaxrecords( numrecs )
updateSliderInput( session, 'skip', value= max(myskip, 1), min=1, step= 1, max=maxlim)
})
output$ptext <- renderText( {
ptext()
return('')
})
getreportid <- reactive({
mydf <- getquery()
tmp <- mydf$df.results
id <- tmp$report_number
if (is.null(id)){
id = 'Missing Report Number'
}
return(id)
})
getfullquery <- reactive({
if ( input$t1=='' & input$t2 == '' & input$t3 == '' ){
v1 = '_exists_'
t1 = 'report_number'
v2 <- ''
t2 <- ''
v3 <- ''
t3 <- ''
} else {
v1 <- c(input$v1, input$v2, input$v3)
t1 <- c(gett1(), gett2(), gett3() )
}
myurl <- buildURL(v1, t1, limit=1, db='/device/')
mydf <- fda_fetch_p(session, myurl)
out <- c(df=mydf, url=myurl)
return(out)
})
getquery <- reactive({
if ( input$t1 == '' & input$t2 == '' & input$t3 == ''){
v1 = '_exists_'
t1 = 'report_number'
v2 <- ''
t2 <- ''
v3 <- ''
t3 <- ''
} else {
v1 <- c(input$v1, input$v2, input$v3)
t1 <- c(gett1(), gett2(), gett3() )
}
myurl <- buildURL(v1, t1, limit=1, skip=getskip(), db='/device/')
mydf <- fda_fetch_p(session, myurl)
# print('url')
out <- c(df=mydf, url=myurl )
return(out)
})
output$v1 <- renderText({
s <- getv1()
if(s == '') {
s <- 'None'
}
out <- paste( '<br><b>Variable:<i>', s, '</i></b>' )
return(out)
})
output$v2 <- renderText({
s <- getv2()
if(s == '') {
s <- 'None'
}
out <- paste( '<b>Variable:<i>', s, '</i></b>' )
return(out)
})
output$v3 <- renderText({
s <- getv3()
if(s == '') {
s <- 'None'
}
out <- paste( '<b>Variable:<i>', s, '</i></b>' )
return(out)
})
output$t1 <- renderText({
s <- gett1()
if(s == '') {
s <- 'None'
}
out <- paste( '<br><b>Term:<i>', s, '</i></b>' )
return(out)
})
output$t2 <- renderText({
s <- gett2()
if(s == '') {
s <- 'None'
}
out <- paste( '<br><b>Term:<i>', s, '</i></b>' )
return(out)
})
output$t3 <- renderText({
s <- gett3()
if(s == '') {
s <- 'None'
}
out <- paste( '<br><b>Term:<i>', s, '</i></b>' )
return(out)
})
#EVENT**********************
output$eventtabletitle <- renderText({
s <- paste('<h4>Report Number=', getreportid(), '<br><br>Event</h4>' )
return( s )
})
output$eventtable <- renderTable({
# if (input$t1=='') {return(data.frame(Drug='Please enter drug name', Count=0))}
mydf <- getquery()$df.results
myvars <- geteventvars()
return(extractcols(mydf, myvars))
})
#Device********************************
output$deviceindextabletitle <- renderText({
s <- paste('<h4>Device Index Variables</h4>' )
s <- paste('<h4>Report Number=', getreportid(), '<br><br>Device Index Variables</h4>' )
return( s )
})
output$deviceindex <- renderTable({
mydf <- getquery()
mydf <- mydf$df.results$device[[1]]
# browser()
myvars <- gsub( 'device.','', getdeviceindexvars(), fixed=TRUE)
return(extractcols(mydf, myvars))
})
output$deviceidentification <- renderTable({
mydf <- getquery()
mydf <- mydf$df.results$device
# browser()
myvars <- gsub( 'device.','', getdeviceidentificationvars(), fixed=TRUE)
return(extractcols(mydf[[1]], myvars))
})
output$deviceidentificationtabletitle <- renderText({
s <- paste('<h4>Device Identification Variables</h4>' )
return( s )
})
output$devicemodtitle <- renderText({
s <- paste('<h4>Device Model Variables</h4>' )
return( s )
})
output$devicemodel <- renderTable({
mydf <- getquery()
mydf <- mydf$df.results$device
# browser()
myvars <- gsub( 'device.','', c( getdevicemodelvars(), getdevicagevars()), fixed=TRUE)
return(extractcols(mydf[[1]], myvars))
})
#REPORT TEXT********************************************
output$mdrtabletitle <- renderText({
s <- paste('<h4>Report Number =', getreportid(), ' <br><br>Report Text</h4>' )
return( s )
})
output$mdr <- renderTable({
mydf <- getquery()
mydf <- mydf$df.results$mdr_text[[1]]
tmp <- mydf
types <- (sapply(tmp, class))
typesval <- types[types =='character']
mydf <- tmp[ , names(typesval) ]
# mydf <- mydf[, which(mydf[,names(typesval)]!='' ) ]
return(mydf)
})
#OpenFDA****************************
output$openfdatabletitle <- renderText({
s <- paste('<h4>Report Number=', getreportid(), '<br><br>OpenFDA variable</h4>')
return( s )
})
output$openfda <- renderTable({
mydf <- getquery()
mydf <- mydf$df.results$device[[1]]$openfda
tmp <- mydf
types <- (sapply(tmp, class))
typesval <- types[types =='character']
mydf <- tmp[ , names(typesval) ]
# mydf <- mydf[, which(mydf[,names(typesval)]!='' ) ]
return(mydf)
})
#SOURCE**************************
output$sourcetabletitle <- renderText({
s <- paste('<h4>Report Number =', getreportid(), '<br><br>Source</h4>' )
return( s )
})
output$source <- renderTable({
mydf <- getquery()
mydf <- (mydf$df.results[, getsourcevars() ])
types <- (sapply(mydf, class))
typesval <- types[types!='data.frame' & types!='list']
# typesdf <- types[types=='data.frame']
# print(typesval)
# print('dfs')
#print(typesdf)
mydf <- mydf[ , names(typesval) ]
if ( is.data.frame( mydf ) ){
return(mydf)
} else {
return( data.frame(Note='No patient data'))
}
})
#PATIENT*********************************************
output$patienttabletitle <- renderText({
s <- paste('<h4>Report ID=', getreportid(), '<br><br>Patient</h4>' )
return( s )
})
output$patient <- renderTable({
mydf <- getquery()
mydf <- (mydf$df.results$patient)
tmp <- mydf[[1]]
types <- (sapply(tmp, class))
typesval <- types[types!='data.frame' & types!='list']
# typesdf <- types[types=='data.frame']
# print(types)
# print('dfs')
mydf <- tmp[ , names(typesval) ]
# print(head(tmp$openfda))
return(mydf)
})
#user_fi**************************
output$user_fitabletitle <- renderText({
s <- paste('<h4>Report Number =', getreportid(), '<br><br>User Facility/Importer</h4>' )
return( s )
})
output$user_fi <- renderTable({
mydf <- getquery()
mydf <- mydf$df.results
# browser()
myvars <- getuser_fivars()
return(extractcols(mydf, myvars))
})
#user_dm**************************
output$user_dmtabletitle <- renderText({
s <- paste('<h4>Report Number =', getreportid(), '<br><br>Device Manufacturere</h4>' )
return( s )
})
output$user_dm <- renderTable({
mydf <- getquery()
mydf <- mydf$df.results
# browser()
myvars <- getuser_dmvars()
return(extractcols(mydf, myvars))
})
#suspect**************************
output$suspecttabletitle <- renderText({
s <- paste('<h4>Report Number =', getreportid(), '<br><br>Suspect device manufacturer</h4>' )
return( s )
})
output$suspect <- renderTable({
mydf <- getquery()
mydf <- mydf$df.results
# browser()
myvars <- getsuspectvars()
return(extractcols(mydf, myvars))
})
#keys**************************
output$keystabletitle <- renderText({
s <- paste('<h4>Report Number =', getreportid(), '<br><br>Keys and flags</h4>' )
return( s )
})
output$keys <- renderTable({
mydf <- getquery()
mydf <- mydf$df.results
# browser()
myvars <- getkeyvars()
return(extractcols(mydf, myvars))
})
#Other**************************
output$othertabletitle <- renderText({
s <- paste('<h4>Report Number =', getreportid(), '<br><br>Patient</h4>' )
return( s )
})
output$other <- renderTable({
mydf <- getquery()
mydf <- (mydf$df.results[, getothervars() ])
types <- (sapply(mydf, class))
typesval <- types[types!='data.frame' & types!='list']
# typesdf <- types[types=='data.frame']
# print(typesval)
# print('dfs')
#print(typesdf)
mydf <- mydf[ , names(typesval) ]
mydf <- mydf[, which(mydf[,names(typesval)]!='' ) ]
if ( is.data.frame( mydf ) ){
return(mydf)
} else {
return( data.frame(Note='No patient data'))
}
})
#META**************************
output$querytitle <- renderText({
return( paste('<h4>Report Number =', getreportid(), '<br><br>Meta Data and Query </h4>' ))
})
output$metatext <- renderText({
mydf <- getfullquery()
mydf2 <- getquery()
# "meta": {
# "disclaimer": "openFDA is a beta research project and not for clinical use. While we make every effort to ensure that data is accurate, you should assume all results are unvalidated.",
# "license": "http://open.fda.gov/license",
# "last_updated": "2014-08-01",
# "results": {
# "skip": 0,
# "limit": 1,
# "total": 1355
#print(mydf)
#print(link)
out <- paste(
'Disclaimer = ', mydf$df.meta$disclaimer,
'<br>License = ', mydf$df.meta$license,
'<br>Last Update=', mydf$df.meta$last_updated,
'<br>Total=', mydf$df.meta$results$total,
'<br> Limit=', mydf$df.meta$results$limit,
'<br> Skip=', mydf$df.meta$results$skip
)
#print( ('output$querytext') )
return(out)
})
output$json <- renderText({
myurl <- getquery()$url
out <- getjson( myurl )
return( out )
})
output$querytext <- renderText({
mydf2 <- getquery()
out <- paste( '<br> URL =', removekey( makelink(mydf2$url) ),
'<BR><BR><b>JSON Output = </b><BR>'
)
return(out)
})
output$reportid <- renderUI({
p( paste('Report Number=', getreportid() ) )
})
output$currec <- renderUI({
mydf <- getfullquery()
numrecs <- mydf$df.meta$results$total
maxlim <- getopenfdamaxrecords( numrecs )
updateSliderInput( session, 'skip', value=getskip()+1, min=1, step= 1, max=maxlim)
out <- paste( 'Viewing #', getskip()+1, 'of', numrecs, 'selected reports')
return(out)
})
getcururl <- reactive({
mypath <- extractbaseurl( session$clientData$url_pathname )
s <- paste0( session$clientData$url_protocol, "//", session$clientData$url_hostname,
':',
session$clientData$url_port,
mypath )
return(s)
})
output$applinks <- renderText({
return( makeapplinks( getcururl() ) )
})
output$date1 <- renderText({
l <- getdaterangedeviceAE()
paste( '<b>', l[3] , 'from', as.Date(l[1], "%Y%m%d") ,'to', as.Date(l[2], "%Y%m%d"), '</b>')
})
geturlquery <- observe({
q <- parseQueryString(session$clientData$url_search)
t1 <- gsub('"[', '[', q$t1, fixed=TRUE)
t1 <- gsub(']"', ']', t1, fixed=TRUE)
t1 <- gsub('""', '"', t1, fixed=TRUE)
updateTextInput(session, "t1", value = t1)
updateTextInput(session, "t1_2", value = t1)
t2 <- gsub('"[', '[', q$t2, fixed=TRUE)
t2 <- gsub(']"', ']', t2, fixed=TRUE)
t2 <- gsub('""', '"', t2, fixed=TRUE)
updateTextInput(session, "t2", value = t2)
updateTextInput(session, "t2_2", value = t2)
if(!is.null(q$t3) )
{
t3 <- gsub('"[', '[', q$t3, fixed=TRUE)
t3 <- gsub(']"', ']', t3, fixed=TRUE)
t3 <- gsub('""', '"', t3, fixed=TRUE)
updateTextInput(session, "t3", value = t3)
updateTextInput(session, "t3_2", value = t3)
}
if(!is.null(q$v1) )
{
v1 <- gsub('"', '', q$v1, fixed=TRUE)
updateSelectizeInput(session, inputId = "v1", selected = v1)
updateSelectizeInput(session, inputId = "v1_2", selected = v1)
}
if(!is.null(q$v2) )
{
v2 <- gsub('"', '', q$v2, fixed=TRUE)
updateSelectizeInput(session, inputId = "v2", selected = v2)
updateSelectizeInput(session, inputId = "v2_2", selected = v2)
}
if(!is.null(q$v3) )
{
v3 <- gsub('"', '', q$v3, fixed=TRUE)
updateSelectizeInput(session, inputId = "v3", selected = v3)
updateSelectizeInput(session, inputId = "v3_2", selected = v3)
}
#
# updateNumericInput(session, "skip", value = q$skip)
# return(q)
})
output$help <- renderUI({
# print('test')
s <- input$sidetabs
# print(s)
out <- switch(s,
'Graph Options'=loadhelp('graphoptions'),
'Data Options'=loadhelp('dataoptions'),
'Axis Options'=loadhelp('axisoptions'),
'Select Vars'= loadhelp('selectvars'),
'Load Data'= loadhelp('loaddata'),
'Overview'= loadhelp('overview'),
'Overviewside'= loadhelp('overviewside'),
'none')
return( HTML(out[[1]]) )
})
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/corrtests.R
\name{test2r.steigerz2}
\alias{test2r.steigerz2}
\title{Test the difference between two dependent correlations with the the
Steiger's z2 method}
\usage{
test2r.steigerz2(rjk, rhm, rjh, rkh, rkm, rjm, n, twotailed = TRUE)
}
\arguments{
\item{rjk}{The pearson product moment correlation that is to be tested
against \code{rhm} .}
\item{rhm}{The pearson product moment correlation that is to be tested
against \code{rjk}.}
\item{rjh}{One of the remaining four zero-order correlations among variables
j,k,h, and m.}
\item{rkh}{One of the remaining four zero-order correlations among variables
j,k,h, and m.}
\item{rkm}{One of the remaining four zero-order correlations among variables
j,k,h, and m.}
\item{rjm}{One of the remaining four zero-order correlations among variables
j,k,h, and m.}
\item{n}{Sample Size}
\item{twotailed}{The test can be two-tailed (\code{twotailed=TRUE}) or
one-tailed (\code{twotailed=FALSE}). The default is two-tailed.}
}
\value{
\item{z }{The test statistic value, a standard normal deviate (z'.)}
\item{pvalue }{the one- or two-tailed probability of the 't'.}
}
\description{
Differences in Pearson correlations are tested with the Steiger's Z2 method.
The test is appropriate when the correlations are dependent. More
specifically r(jk) is tested versus r(hm)in one sample of cases. Thus there
are four different variables involved in the analysis. The function
requires the input of the six possible bivariate Pearson product-moment
correlations among the four variables. One, and two-tailed tests are available.
}
\section{Related Functions}{
\code{test2r.steigerz2} is a member of a set of
functions that provide tests of differences between independent and
dependent correlations. The functions were inspired by the \code{paired.r}
function in the \strong{psych} package and some of the code is modeled on code
from that function. See:
\itemize{
\item
\code{\link[bcdstats:test2r.t2]{test2r.t2}}, Test two dependent correlations with the the T2
method: r(yx1) vs r(yx2)
\item
\code{\link[bcdstats:test2r.mengz1]{test2r.mengz1}}, Test the difference between
two dependent correlations with the the Meng z1 method: r(yx1) vs r(yx2)in one
sample of cases.
\item
\code{\link[bcdstats:test2r.steigerz1]{test2r.steigerz1}}, Test the difference between
two dependent correlations with the the Steiger z1 method: r(yx1) vs r(yx2) in one
sample of cases.
\item
\code{\link[bcdstats:test2r.steigerz2]{test2r.steigerz2}}, the present
function
\item
\code{\link[bcdstats:test2r.ind]{test2r.ind}} Test two r(xy) from
Independent Groups }
}
\examples{
test2r.steigerz2(.6,.25,.4,.4,.4,.4,n=125)
test2r.steigerz2(.6,.25,.4,.4,.4,.4,n=125,twotailed=TRUE)
test2r.steigerz2(.6,.25,.4,.4,.4,.4,n=125,twotailed=FALSE)
test2r.steigerz2(.75,.50,.5,.4,.3,.76,n=100,twotailed=TRUE)
test2r.steigerz2(.75,.50,.5,.4,.3,.76,n=40,twotailed=TRUE)
}
\references{
Cheung, M. W. L., & Chan, W. (2004). Testing dependent
correlation coefficients via structural equation modeling.
\emph{Organizational Research Methods}, 7(2), 206-223. \cr Dunn, O. J., &
Clark, V. (1971). Comparison of tests of the equality of dependent
correlation coefficients. \emph{Journal of the American Statistical
Association}, 66(336), 904-908. \cr Hays, W. L. (1994). \emph{Statistics}
(5th ed.). Fort Worth: Harcourt College Publishers.\cr Hendrickson, G. F.,
Stanley, J. C., & Hills, J. R. (1970). Olkin's new formula for significance
of r13 vs. r23 compared with Hotelling's method. \emph{American Educational
Research Journal}, 7(2), 189-195. \cr Hittner, J. B., May, K., & Silver, N.
C. (2003). A Monte Carlo evaluation of tests for comparing dependent
correlations. \emph{The Journal of general psychology}, 130(2), 149-168. \cr
Howell, D. C. (2013). \emph{Statistical methods for psychology} (8th ed.).
Belmont, CA: Wadsworth Cengage Learning.\cr Meng, X. L., Rosenthal, R., &
Rubin, D. B. (1992). Comparing correlated correlation coefficients.
\emph{Psychological Bulletin}, 111(1), 172-175. \cr Neill, J. J., & Dunn, O.
J. (1975). Equality of dependent correlation coefficients.
\emph{Biometrics}, 31(2), 531-543. \cr Olkin, I., & Finn, J. D. (1990).
Testing correlated correlations. \emph{Psychological Bulletin}, 108(2),
330-333. \cr Silver, N. C., Hittner, J. B., & May, K. (2004). Testing
dependent correlations with nonoverlapping variables: A Monte Carlo
simulation. \emph{The Journal of experimental education}, 73(1), 53-69. \cr
Steiger, J. H. (1980). Tests for comparing elements of a correlation matrix.
\emph{Psychological Bulletin}, 87(2), 245-251. \cr Wilcox, R. R. (2012).
\emph{Introduction to robust estimation and hypothesis testing}
}
\seealso{
Analysts are also encouraged to explore robust methods for
evaluation of correlation comparison hypotheses. For example, see work of R.
Wilcox (texts above and also
\emph{http://dornsife.usc.edu/labs/rwilcox/software/}
}
\author{
Bruce Dudek \href{mailto:bruce.dudek@albany.edu}{bruce.dudek@albany.edu}
}
|
/man/test2r.steigerz2.Rd
|
no_license
|
anhnguyendepocen/bcdstats
|
R
| false
| true
| 5,061
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/corrtests.R
\name{test2r.steigerz2}
\alias{test2r.steigerz2}
\title{Test the difference between two dependent correlations with the the
Steiger's z2 method}
\usage{
test2r.steigerz2(rjk, rhm, rjh, rkh, rkm, rjm, n, twotailed = TRUE)
}
\arguments{
\item{rjk}{The pearson product moment correlation that is to be tested
against \code{rhm} .}
\item{rhm}{The pearson product moment correlation that is to be tested
against \code{rjk}.}
\item{rjh}{One of the remaining four zero-order correlations among variables
j,k,h, and m.}
\item{rkh}{One of the remaining four zero-order correlations among variables
j,k,h, and m.}
\item{rkm}{One of the remaining four zero-order correlations among variables
j,k,h, and m.}
\item{rjm}{One of the remaining four zero-order correlations among variables
j,k,h, and m.}
\item{n}{Sample Size}
\item{twotailed}{The test can be two-tailed (\code{twotailed=TRUE}) or
one-tailed (\code{twotailed=FALSE}). The default is two-tailed.}
}
\value{
\item{z }{The test statistic value, a standard normal deviate (z'.)}
\item{pvalue }{the one- or two-tailed probability of the 't'.}
}
\description{
Differences in Pearson correlations are tested with the Steiger's Z2 method.
The test is appropriate when the correlations are dependent. More
specifically r(jk) is tested versus r(hm)in one sample of cases. Thus there
are four different variables involved in the analysis. The function
requires the input of the six possible bivariate Pearson product-moment
correlations among the four variables. One, and two-tailed tests are available.
}
\section{Related Functions}{
\code{test2r.steigerz2} is a member of a set of
functions that provide tests of differences between independent and
dependent correlations. The functions were inspired by the \code{paired.r}
function in the \strong{psych} package and some of the code is modeled on code
from that function. See:
\itemize{
\item
\code{\link[bcdstats:test2r.t2]{test2r.t2}}, Test two dependent correlations with the the T2
method: r(yx1) vs r(yx2)
\item
\code{\link[bcdstats:test2r.mengz1]{test2r.mengz1}}, Test the difference between
two dependent correlations with the the Meng z1 method: r(yx1) vs r(yx2)in one
sample of cases.
\item
\code{\link[bcdstats:test2r.steigerz1]{test2r.steigerz1}}, Test the difference between
two dependent correlations with the the Steiger z1 method: r(yx1) vs r(yx2) in one
sample of cases.
\item
\code{\link[bcdstats:test2r.steigerz2]{test2r.steigerz2}}, the present
function
\item
\code{\link[bcdstats:test2r.ind]{test2r.ind}} Test two r(xy) from
Independent Groups }
}
\examples{
test2r.steigerz2(.6,.25,.4,.4,.4,.4,n=125)
test2r.steigerz2(.6,.25,.4,.4,.4,.4,n=125,twotailed=TRUE)
test2r.steigerz2(.6,.25,.4,.4,.4,.4,n=125,twotailed=FALSE)
test2r.steigerz2(.75,.50,.5,.4,.3,.76,n=100,twotailed=TRUE)
test2r.steigerz2(.75,.50,.5,.4,.3,.76,n=40,twotailed=TRUE)
}
\references{
Cheung, M. W. L., & Chan, W. (2004). Testing dependent
correlation coefficients via structural equation modeling.
\emph{Organizational Research Methods}, 7(2), 206-223. \cr Dunn, O. J., &
Clark, V. (1971). Comparison of tests of the equality of dependent
correlation coefficients. \emph{Journal of the American Statistical
Association}, 66(336), 904-908. \cr Hays, W. L. (1994). \emph{Statistics}
(5th ed.). Fort Worth: Harcourt College Publishers.\cr Hendrickson, G. F.,
Stanley, J. C., & Hills, J. R. (1970). Olkin's new formula for significance
of r13 vs. r23 compared with Hotelling's method. \emph{American Educational
Research Journal}, 7(2), 189-195. \cr Hittner, J. B., May, K., & Silver, N.
C. (2003). A Monte Carlo evaluation of tests for comparing dependent
correlations. \emph{The Journal of general psychology}, 130(2), 149-168. \cr
Howell, D. C. (2013). \emph{Statistical methods for psychology} (8th ed.).
Belmont, CA: Wadsworth Cengage Learning.\cr Meng, X. L., Rosenthal, R., &
Rubin, D. B. (1992). Comparing correlated correlation coefficients.
\emph{Psychological Bulletin}, 111(1), 172-175. \cr Neill, J. J., & Dunn, O.
J. (1975). Equality of dependent correlation coefficients.
\emph{Biometrics}, 31(2), 531-543. \cr Olkin, I., & Finn, J. D. (1990).
Testing correlated correlations. \emph{Psychological Bulletin}, 108(2),
330-333. \cr Silver, N. C., Hittner, J. B., & May, K. (2004). Testing
dependent correlations with nonoverlapping variables: A Monte Carlo
simulation. \emph{The Journal of experimental education}, 73(1), 53-69. \cr
Steiger, J. H. (1980). Tests for comparing elements of a correlation matrix.
\emph{Psychological Bulletin}, 87(2), 245-251. \cr Wilcox, R. R. (2012).
\emph{Introduction to robust estimation and hypothesis testing}
}
\seealso{
Analysts are also encouraged to explore robust methods for
evaluation of correlation comparison hypotheses. For example, see work of R.
Wilcox (texts above and also
\emph{http://dornsife.usc.edu/labs/rwilcox/software/}
}
\author{
Bruce Dudek \href{mailto:bruce.dudek@albany.edu}{bruce.dudek@albany.edu}
}
|
## Analyzing patch variability using vis_model
## written by Zach Quinlan for Ben Scott
## 10/22/2019
# READING -- libraries ------------------------------------------------------
library(tidyverse)
library(pavo)
library(phytools)
library(furrr)
library(future)
# SETTING CORES -- for parrelelizing code ---------------------------------
# sets the number of cores to one fewer than the max available
num_cores <- availableCores() -1
# actually sets the planning algoreithm for how the code will be mapped across the cores.
plan(multiprocess, workers = num_cores)
# READING -- all CSV files in working directory ---------------------------
files <- dir(pattern = "*.csv")
reads <- files%>%
purrr::map(read_csv)
overlap_patches <- c("back", "Mantle", "etc.")
overlap_patches <- c(Crown,Auriculars,Nape,Mantle,Back,Rump,Dorsal.tail,Throat,Breast,Side,Belly,Undertail.coverts,Scapulars,Medium.Coverts,Greater.Coverts,Primaries,Secondaires)
vis_model <-reads%>%
future_map(~ gather(., code, values, 2:ncol(.))%>%
mutate(values = abs(values))%>%
separate(code, c("patch", "speices_code"), sep = "_")%>%
mutate(patch = case_when(patch == "Ventrail.tail" ~ "Ventral.tail",
patch == "Undertail" ~ "Ventral.tail",
patch == "PirFaries" ~ "Pirmaries",
patch == "Wingbars" ~ "Wingbar",
patch == "Beak.2" ~ "Back.2",
patch == "Fantle" ~ "Mantle",
patch == "FediuF.Coverts" ~ "Medium.Coverts",
patch == "Mante.2" ~ "Mantle.2",
patch == "RuFp" ~ "Rump",
TRUE ~ as.character(patch)),
speices_code = case_when(speices_code == "GranSalF" ~ "GranSelF",
TRUE ~ as.character(speices_code)))%>%
dplyr::select(c(wl, overlap_patches))%>%
separate(speices_code, c("species", "sex"), sep = -1)%>%
spread(patch, values)%>%
group_by(species, sex)%>%
nest())%>%
reduce(bind_rows)%>%
mutate(data = future_map(data, ~ as.data.frame(.x)%>%
vismodel(., visual = "avg.uv", achromatic = "bt.dc")))%>%
unnest(data)
write_csv(vis_model, "quatnum.dat")
|
/pipelines/Birds.R
|
no_license
|
Primordial-Haggis/birds_misc_R_Scripts
|
R
| false
| false
| 2,518
|
r
|
## Analyzing patch variability using vis_model
## written by Zach Quinlan for Ben Scott
## 10/22/2019
# READING -- libraries ------------------------------------------------------
library(tidyverse)
library(pavo)
library(phytools)
library(furrr)
library(future)
# SETTING CORES -- for parrelelizing code ---------------------------------
# sets the number of cores to one fewer than the max available
num_cores <- availableCores() -1
# actually sets the planning algoreithm for how the code will be mapped across the cores.
plan(multiprocess, workers = num_cores)
# READING -- all CSV files in working directory ---------------------------
files <- dir(pattern = "*.csv")
reads <- files%>%
purrr::map(read_csv)
overlap_patches <- c("back", "Mantle", "etc.")
overlap_patches <- c(Crown,Auriculars,Nape,Mantle,Back,Rump,Dorsal.tail,Throat,Breast,Side,Belly,Undertail.coverts,Scapulars,Medium.Coverts,Greater.Coverts,Primaries,Secondaires)
vis_model <-reads%>%
future_map(~ gather(., code, values, 2:ncol(.))%>%
mutate(values = abs(values))%>%
separate(code, c("patch", "speices_code"), sep = "_")%>%
mutate(patch = case_when(patch == "Ventrail.tail" ~ "Ventral.tail",
patch == "Undertail" ~ "Ventral.tail",
patch == "PirFaries" ~ "Pirmaries",
patch == "Wingbars" ~ "Wingbar",
patch == "Beak.2" ~ "Back.2",
patch == "Fantle" ~ "Mantle",
patch == "FediuF.Coverts" ~ "Medium.Coverts",
patch == "Mante.2" ~ "Mantle.2",
patch == "RuFp" ~ "Rump",
TRUE ~ as.character(patch)),
speices_code = case_when(speices_code == "GranSalF" ~ "GranSelF",
TRUE ~ as.character(speices_code)))%>%
dplyr::select(c(wl, overlap_patches))%>%
separate(speices_code, c("species", "sex"), sep = -1)%>%
spread(patch, values)%>%
group_by(species, sex)%>%
nest())%>%
reduce(bind_rows)%>%
mutate(data = future_map(data, ~ as.data.frame(.x)%>%
vismodel(., visual = "avg.uv", achromatic = "bt.dc")))%>%
unnest(data)
write_csv(vis_model, "quatnum.dat")
|
library(rioja)
### Name: inkspot
### Title: Two-way ordered bubble plot of a species by sites data table
### Aliases: inkspot
### Keywords: hplot
### ** Examples
data(SWAP)
mx <- apply(SWAP$spec, 2, max)
spec <- SWAP$spec[, mx > 10]
#basic plot of data with legend
inkspot(spec, cex.axis=0.6)
#order sites by pH
pH <- SWAP$pH
inkspot(spec, pH, cex.axis=0.6)
# add a top axis
inkspot(spec, pH, x.axis.top=TRUE, cex.axis=0.6)
# order by pH but plot sites at regular intervals to avoid label overlap
inkspot(spec, pH, use.rank=TRUE, x.axis.top=TRUE, cex.axis=0.6)
# or add long taxon names
oldmar <- par("mar")
par(mar=c(3,12,2,1))
nms <- SWAP$names[mx > 10, 2]
inkspot(spec, pH, spec.names=as.character(nms), use.rank=TRUE,
x.axis.top=TRUE, cex.axis=0.6)
par(mar=oldmar)
|
/data/genthat_extracted_code/rioja/examples/inkspot.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 781
|
r
|
library(rioja)
### Name: inkspot
### Title: Two-way ordered bubble plot of a species by sites data table
### Aliases: inkspot
### Keywords: hplot
### ** Examples
data(SWAP)
mx <- apply(SWAP$spec, 2, max)
spec <- SWAP$spec[, mx > 10]
#basic plot of data with legend
inkspot(spec, cex.axis=0.6)
#order sites by pH
pH <- SWAP$pH
inkspot(spec, pH, cex.axis=0.6)
# add a top axis
inkspot(spec, pH, x.axis.top=TRUE, cex.axis=0.6)
# order by pH but plot sites at regular intervals to avoid label overlap
inkspot(spec, pH, use.rank=TRUE, x.axis.top=TRUE, cex.axis=0.6)
# or add long taxon names
oldmar <- par("mar")
par(mar=c(3,12,2,1))
nms <- SWAP$names[mx > 10, 2]
inkspot(spec, pH, spec.names=as.character(nms), use.rank=TRUE,
x.axis.top=TRUE, cex.axis=0.6)
par(mar=oldmar)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/parmsurvfit.R
\docType{data}
\name{aggressive}
\alias{aggressive}
\title{Data on time until drivers honked their horn when being blocked from an intersection}
\format{A data frame with 57 rows and 2 variables:
\describe{
\item{seconds}{Number of seconds until showing signs of aggression}
\item{censor}{censoring status indicator variable (0 = censored event time, 1 = complete event time)}
}}
\source{
{https://stats.idre.ucla.edu/other/examples/alda/}
}
\usage{
aggressive
}
\description{
Diekmann et al. (1996) investigated the association between driver
characteristics and social status of cars to aggressive driver
responses by measuring the time that elapsed between the being
blocked and honking the horn. Researchers intentionally blocked
57 motorists at a green light by a Volkswagen Jetta, and recorded
the time it took for motorists to show signs of aggression.
Signs of aggression included honking their horn or beaming
the headlights at the Jetta
}
\keyword{datasets}
|
/parmsurvfit/man/aggressive.Rd
|
no_license
|
shannonpileggi/SP18--RSurvival
|
R
| false
| true
| 1,067
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/parmsurvfit.R
\docType{data}
\name{aggressive}
\alias{aggressive}
\title{Data on time until drivers honked their horn when being blocked from an intersection}
\format{A data frame with 57 rows and 2 variables:
\describe{
\item{seconds}{Number of seconds until showing signs of aggression}
\item{censor}{censoring status indicator variable (0 = censored event time, 1 = complete event time)}
}}
\source{
{https://stats.idre.ucla.edu/other/examples/alda/}
}
\usage{
aggressive
}
\description{
Diekmann et al. (1996) investigated the association between driver
characteristics and social status of cars to aggressive driver
responses by measuring the time that elapsed between the being
blocked and honking the horn. Researchers intentionally blocked
57 motorists at a green light by a Volkswagen Jetta, and recorded
the time it took for motorists to show signs of aggression.
Signs of aggression included honking their horn or beaming
the headlights at the Jetta
}
\keyword{datasets}
|
ο»Ώ# 26- Additional calculations for clustering.R
#
# 26-Total_Exp.R
#
# Copyright Β© 2018:Arin Shahbazian
# Licence: GPL-3
#
rm(list=ls())
starttime <- proc.time()
cat("\n\n================ Total =====================================\n")
library(yaml)
Settings <- yaml.load_file("Settings.yaml")
library(readxl)
library(reldist)
library(Hmisc)
library(dplyr)
library(data.table)
# Calories
MinCalories <- 2100
MinCalories2 <- MinCalories^2
library(data.table)
load(file = paste0(Settings$HEISProcessedPath,"Y","95","MyDataRural.rda"))
load(file = paste0(Settings$HEISProcessedPath,"Y","95","MyDataUrban.rda"))
#Sort by Province and Expenditure data
Rur <- MyDataRural[,.(Percentile=as.integer(Percentile),Per_Daily_Calories,Total_Exp_Month_Per,Total_Exp_Month_Per_nondurable,ProvinceCode,Weight)]
Urb <- MyDataUrban[,.(Percentile=as.integer(Percentile),Per_Daily_Calories,Total_Exp_Month_Per,Total_Exp_Month_Per_nondurable,ProvinceCode,Weight)]
Rur<- Rur[order(ProvinceCode,Total_Exp_Month_Per_nondurable)]
Urb<- Urb[order(ProvinceCode,Total_Exp_Month_Per_nondurable)]
#Calculate cumulative weights
Rur$cumWeightProv <-ave(Rur$Weight, Rur$ProvinceCode, FUN=cumsum)
Urb$cumWeightProv <-ave(Urb$Weight, Urb$ProvinceCode, FUN=cumsum)
Rur$rx<-ave(Rur$cumWeight, by=list(Rur$ProvinceCode), FUN=max)
Urb$ux<-ave(Urb$cumWeight, by=list(Urb$ProvinceCode), FUN=max)
#Calculate percentiles by weights for each provinces
Rur<- Rur[, ProvincePercentile := Rur$cumWeightProv/Rur$rx]
Rur<- Rur[, ProvincePercentile := ProvincePercentile*100]
Rur<- Rur[, ProvincePercentile := ceiling(ProvincePercentile)]
Urb<- Urb[, ProvincePercentile := Urb$cumWeightProv/Urb$ux]
Urb<- Urb[, ProvincePercentile := ProvincePercentile*100]
Urb<- Urb[, ProvincePercentile := ceiling(ProvincePercentile)]
######### calculate Rural Pov Line #########
d <- Rur[,.(Percentile=as.integer(Percentile),Per_Daily_Calories,Total_Exp_Month_Per,Total_Exp_Month_Per_nondurable,ProvinceCode,Weight,cumWeightProv,ProvincePercentile)]
setnames(d,c("pct","cal","exp","ndx","prov","w","cumw","provpct"))
d2 <- d [provpct<86]
#plot(cal~exp,data=d)
#plot(cal~exp,data=d2)
#plot(log(cal)~log(exp),data=d)
#plot(log(cal)~log(exp),data=d2)
d$cal2<-d$cal^2
d2$cal2<-d2$cal^2
load(file="dt2.rda")
Rur<-merge(Rur,dt2,by=c("ProvinceCode"),all.x = TRUE)
dx <- d[,lapply(.SD, mean, na.rm=TRUE),by=.(provpct,prov)]
dx2 <- d2[,lapply(.SD, mean, na.rm=TRUE),by=.(provpct,prov)]
###########Rural-all###########
#Nonlog
for(ostan in 0:30){
nam <- paste0("Rur",ostan)
assign(nam,d[prov==ostan])
# save(list=ls(pattern = nam),file = paste0(Settings$HEISProcessedPath,nam,".rda"))
model1 <- lm(exp ~ cal + cal2 , weights = w, data=assign(nam,d[prov==ostan]))
summary(model1)
nam3 <- predict(object = model1, newdata = data.table(pct=NA,cal=MinCalories,cal2=MinCalories2,exp=NA,ndx=NA,w=NA))[[1]]
nam2 <- paste0("Rural1PovLine",ostan)
assign(nam2,nam3)
}
#log
d<-d[cal!=0]
for(ostan in 0:30){
nam <- paste0("Rur",ostan)
assign(nam,d[prov==ostan])
# save(list=ls(pattern = nam),file = paste0(Settings$HEISProcessedPath,nam,".rda"))
model2 <- lm(log(exp) ~ log(cal), weights = w, data=assign(nam,d[prov==ostan]))
summary(model2)
nam3 <- predict(object = model2, newdata = data.table(pct=NA,cal=MinCalories,cal2=MinCalories2,exp=NA,ndx=NA,w=NA))[[1]]
nam2 <- paste0("Rural2PovLine",ostan)
nam3<-exp(nam3)
assign(nam2,nam3)
}
#Nonlog-85 percent
for(ostan in 0:30){
nam <- paste0("Rur",ostan)
assign(nam,d[prov==ostan])
# save(list=ls(pattern = nam),file = paste0(Settings$HEISProcessedPath,nam,".rda"))
model3 <- lm(exp ~ cal + cal2 , weights = w, data=assign(nam,d2[prov==ostan]))
summary(model3)
nam3 <- predict(object = model3, newdata = data.table(pct=NA,cal=MinCalories,cal2=MinCalories2,exp=NA,ndx=NA,w=NA))[[1]]
nam2 <- paste0("Rural3PovLine",ostan)
assign(nam2,nam3)
}
#log-85 percent
d2<-d2[cal!=0]
for(ostan in 0:30){
nam <- paste0("Rur",ostan)
assign(nam,d2[prov==ostan])
# save(list=ls(pattern = nam),file = paste0(Settings$HEISProcessedPath,nam,".rda"))
model4 <- lm(log(exp) ~ log(cal), weights = w, data=assign(nam,d2[prov==ostan]))
summary(model4)
nam3 <- predict(object = model4, newdata = data.table(pct=NA,cal=MinCalories,cal2=MinCalories2,exp=NA,ndx=NA,w=NA))[[1]]
nam2 <- paste0("Rural4PovLine",ostan)
nam3<-exp(nam3)
assign(nam2,nam3)
}
###########Rural-percentiles###########
#Nonlog
for(ostan in 0:30){
nam <- paste0("Rur",ostan)
assign(nam,d[prov==ostan])
# save(list=ls(pattern = nam),file = paste0(Settings$HEISProcessedPath,nam,".rda"))
model1 <- lm(exp ~ cal + cal2 , weights = w, data=assign(nam,dx[prov==ostan]))
summary(model1)
nam3 <- predict(object = model1, newdata = data.table(pct=NA,cal=MinCalories,cal2=MinCalories2,exp=NA,ndx=NA,w=NA))[[1]]
nam2 <- paste0("Rural1PovLine",ostan)
assign(nam2,nam3)
}
#log
dx<-dx[cal!=0]
for(ostan in 0:30){
nam <- paste0("Rur",ostan)
assign(nam,d[prov==ostan])
# save(list=ls(pattern = nam),file = paste0(Settings$HEISProcessedPath,nam,".rda"))
model2 <- lm(log(exp) ~ log(cal), weights = w, data=assign(nam,dx[prov==ostan]))
summary(model2)
nam3 <- predict(object = model2, newdata = data.table(pct=NA,cal=MinCalories,cal2=MinCalories2,exp=NA,ndx=NA,w=NA))[[1]]
nam2 <- paste0("Rural2PovLine",ostan)
nam3<-exp(nam3)
assign(nam2,nam3)
}
#Nonlog-85 percent
for(ostan in 0:30){
nam <- paste0("Rur",ostan)
assign(nam,d[prov==ostan])
# save(list=ls(pattern = nam),file = paste0(Settings$HEISProcessedPath,nam,".rda"))
model3 <- lm(exp ~ cal + cal2 , weights = w, data=assign(nam,dx2[prov==ostan]))
summary(model3)
nam3 <- predict(object = model3, newdata = data.table(pct=NA,cal=MinCalories,cal2=MinCalories2,exp=NA,ndx=NA,w=NA))[[1]]
nam2 <- paste0("Rural3PovLine",ostan)
assign(nam2,nam3)
}
#log-85 percent
dx2<-dx2[cal!=0]
for(ostan in 0:30){
nam <- paste0("Rur",ostan)
assign(nam,d2[prov==ostan])
# save(list=ls(pattern = nam),file = paste0(Settings$HEISProcessedPath,nam,".rda"))
model4 <- lm(log(exp) ~ log(cal), weights = w, data=assign(nam,dx2[prov==ostan]))
summary(model4)
nam3 <- predict(object = model4, newdata = data.table(pct=NA,cal=MinCalories,cal2=MinCalories2,exp=NA,ndx=NA,w=NA))[[1]]
nam2 <- paste0("Rural4PovLine",ostan)
nam3<-exp(nam3)
assign(nam2,nam3)
}
######### calculate Urban Pov Line #########
d <- Urb[,.(Percentile=as.integer(Percentile),Per_Daily_Calories,Total_Exp_Month_Per,Total_Exp_Month_Per_nondurable,ProvinceCode,Weight,cumWeightProv,ProvincePercentile)]
setnames(d,c("pct","cal","exp","ndx","prov","w","cumw","provpct"))
d2 <- d [provpct<86]
#plot(cal~exp,data=d)
#plot(cal~exp,data=d2)
#plot(log(cal)~log(exp),data=d)
#plot(log(cal)~log(exp),data=d2)
d$cal2<-d$cal^2
d2$cal2<-d2$cal^2
dx <- d[,lapply(.SD, mean, na.rm=TRUE),by=.(provpct,prov)]
dx2 <- d2[,lapply(.SD, mean, na.rm=TRUE),by=.(provpct,prov)]
############Urban-all############
#Nonlog
for(ostan in 0:30){
nam <- paste0("Urb",ostan)
assign(nam,d[prov==ostan])
# save(list=ls(pattern = nam),file = paste0(Settings$HEISProcessedPath,nam,".rda"))
model1 <- lm(exp ~ cal + cal2 , weights = w, data=assign(nam,d[prov==ostan]))
summary(model1)
nam3 <- predict(object = model1, newdata = data.table(pct=NA,cal=MinCalories,cal2=MinCalories2,exp=NA,ndx=NA,w=NA))[[1]]
nam2 <- paste0("Urban1PovLine",ostan)
assign(nam2,nam3)
}
#log
d<-d[cal!=0]
for(ostan in 0:30){
nam <- paste0("Urb",ostan)
assign(nam,d[prov==ostan])
# save(list=ls(pattern = nam),file = paste0(Settings$HEISProcessedPath,nam,".rda"))
model2 <- lm(log(exp) ~ log(cal), weights = w, data=assign(nam,d[prov==ostan]))
summary(model2)
nam3 <- predict(object = model2, newdata = data.table(pct=NA,cal=MinCalories,cal2=MinCalories2,exp=NA,ndx=NA,w=NA))[[1]]
nam2 <- paste0("Urban2PovLine",ostan)
nam3<-exp(nam3)
assign(nam2,nam3)
}
#Nonlog-85 percent
for(ostan in 0:30){
nam <- paste0("Urb",ostan)
assign(nam,d[prov==ostan])
# save(list=ls(pattern = nam),file = paste0(Settings$HEISProcessedPath,nam,".rda"))
model3 <- lm(exp ~ cal + cal2 , weights = w, data=assign(nam,d2[prov==ostan]))
summary(model3)
nam3 <- predict(object = model3, newdata = data.table(pct=NA,cal=MinCalories,cal2=MinCalories2,exp=NA,ndx=NA,w=NA))[[1]]
nam2 <- paste0("Urban3PovLine",ostan)
assign(nam2,nam3)
}
#log-85 percent
d2<-d2[cal!=0]
for(ostan in 0:30){
nam <- paste0("Urb",ostan)
assign(nam,d2[prov==ostan])
# save(list=ls(pattern = nam),file = paste0(Settings$HEISProcessedPath,nam,".rda"))
model4 <- lm(log(exp) ~ log(cal), weights = w, data=assign(nam,d2[prov==ostan]))
summary(model4)
nam3 <- predict(object = model4, newdata = data.table(pct=NA,cal=MinCalories,cal2=MinCalories2,exp=NA,ndx=NA,w=NA))[[1]]
nam2 <- paste0("Urban4PovLine",ostan)
nam3<-exp(nam3)
assign(nam2,nam3)
}
############Urban-Percentiles############
#Nonlog
for(ostan in 0:30){
nam <- paste0("Urb",ostan)
assign(nam,d[prov==ostan])
# save(list=ls(pattern = nam),file = paste0(Settings$HEISProcessedPath,nam,".rda"))
model1 <- lm(exp ~ cal + cal2 , weights = w, data=assign(nam,dx[prov==ostan]))
summary(model1)
nam3 <- predict(object = model1, newdata = data.table(pct=NA,cal=MinCalories,cal2=MinCalories2,exp=NA,ndx=NA,w=NA))[[1]]
nam2 <- paste0("Urban1PovLine",ostan)
assign(nam2,nam3)
}
#log
dx<-dx[cal!=0]
for(ostan in 0:30){
nam <- paste0("Urb",ostan)
assign(nam,d[prov==ostan])
# save(list=ls(pattern = nam),file = paste0(Settings$HEISProcessedPath,nam,".rda"))
model2 <- lm(log(exp) ~ log(cal), weights = w, data=assign(nam,dx[prov==ostan]))
summary(model2)
nam3 <- predict(object = model2, newdata = data.table(pct=NA,cal=MinCalories,cal2=MinCalories2,exp=NA,ndx=NA,w=NA))[[1]]
nam2 <- paste0("Urban2PovLine",ostan)
nam3<-exp(nam3)
assign(nam2,nam3)
}
#Nonlog-85 percent
for(ostan in 0:30){
nam <- paste0("Urb",ostan)
assign(nam,d[prov==ostan])
# save(list=ls(pattern = nam),file = paste0(Settings$HEISProcessedPath,nam,".rda"))
model3 <- lm(exp ~ cal + cal2 , weights = w, data=assign(nam,dx2[prov==ostan]))
summary(model3)
nam3 <- predict(object = model3, newdata = data.table(pct=NA,cal=MinCalories,cal2=MinCalories2,exp=NA,ndx=NA,w=NA))[[1]]
nam2 <- paste0("Urban3PovLine",ostan)
assign(nam2,nam3)
}
#log-85 percent
dx2<-dx2[cal!=0]
for(ostan in 0:30){
nam <- paste0("Urb",ostan)
assign(nam,d2[prov==ostan])
# save(list=ls(pattern = nam),file = paste0(Settings$HEISProcessedPath,nam,".rda"))
model4 <- lm(log(exp) ~ log(cal), weights = w, data=assign(nam,dx2[prov==ostan]))
summary(model4)
nam3 <- predict(object = model4, newdata = data.table(pct=NA,cal=MinCalories,cal2=MinCalories2,exp=NA,ndx=NA,w=NA))[[1]]
nam2 <- paste0("Urban4PovLine",ostan)
nam3<-exp(nam3)
assign(nam2,nam3)
}
|
/R/Archive/Other Codes/62-FEI Poverty line for each province.R
|
no_license
|
IPRCIRI/IRHEIS
|
R
| false
| false
| 10,840
|
r
|
ο»Ώ# 26- Additional calculations for clustering.R
#
# 26-Total_Exp.R
#
# Copyright Β© 2018:Arin Shahbazian
# Licence: GPL-3
#
rm(list=ls())
starttime <- proc.time()
cat("\n\n================ Total =====================================\n")
library(yaml)
Settings <- yaml.load_file("Settings.yaml")
library(readxl)
library(reldist)
library(Hmisc)
library(dplyr)
library(data.table)
# Calories
MinCalories <- 2100
MinCalories2 <- MinCalories^2
library(data.table)
load(file = paste0(Settings$HEISProcessedPath,"Y","95","MyDataRural.rda"))
load(file = paste0(Settings$HEISProcessedPath,"Y","95","MyDataUrban.rda"))
#Sort by Province and Expenditure data
Rur <- MyDataRural[,.(Percentile=as.integer(Percentile),Per_Daily_Calories,Total_Exp_Month_Per,Total_Exp_Month_Per_nondurable,ProvinceCode,Weight)]
Urb <- MyDataUrban[,.(Percentile=as.integer(Percentile),Per_Daily_Calories,Total_Exp_Month_Per,Total_Exp_Month_Per_nondurable,ProvinceCode,Weight)]
Rur<- Rur[order(ProvinceCode,Total_Exp_Month_Per_nondurable)]
Urb<- Urb[order(ProvinceCode,Total_Exp_Month_Per_nondurable)]
#Calculate cumulative weights
Rur$cumWeightProv <-ave(Rur$Weight, Rur$ProvinceCode, FUN=cumsum)
Urb$cumWeightProv <-ave(Urb$Weight, Urb$ProvinceCode, FUN=cumsum)
Rur$rx<-ave(Rur$cumWeight, by=list(Rur$ProvinceCode), FUN=max)
Urb$ux<-ave(Urb$cumWeight, by=list(Urb$ProvinceCode), FUN=max)
#Calculate percentiles by weights for each provinces
Rur<- Rur[, ProvincePercentile := Rur$cumWeightProv/Rur$rx]
Rur<- Rur[, ProvincePercentile := ProvincePercentile*100]
Rur<- Rur[, ProvincePercentile := ceiling(ProvincePercentile)]
Urb<- Urb[, ProvincePercentile := Urb$cumWeightProv/Urb$ux]
Urb<- Urb[, ProvincePercentile := ProvincePercentile*100]
Urb<- Urb[, ProvincePercentile := ceiling(ProvincePercentile)]
######### calculate Rural Pov Line #########
d <- Rur[,.(Percentile=as.integer(Percentile),Per_Daily_Calories,Total_Exp_Month_Per,Total_Exp_Month_Per_nondurable,ProvinceCode,Weight,cumWeightProv,ProvincePercentile)]
setnames(d,c("pct","cal","exp","ndx","prov","w","cumw","provpct"))
d2 <- d [provpct<86]
#plot(cal~exp,data=d)
#plot(cal~exp,data=d2)
#plot(log(cal)~log(exp),data=d)
#plot(log(cal)~log(exp),data=d2)
d$cal2<-d$cal^2
d2$cal2<-d2$cal^2
load(file="dt2.rda")
Rur<-merge(Rur,dt2,by=c("ProvinceCode"),all.x = TRUE)
dx <- d[,lapply(.SD, mean, na.rm=TRUE),by=.(provpct,prov)]
dx2 <- d2[,lapply(.SD, mean, na.rm=TRUE),by=.(provpct,prov)]
###########Rural-all###########
#Nonlog
for(ostan in 0:30){
nam <- paste0("Rur",ostan)
assign(nam,d[prov==ostan])
# save(list=ls(pattern = nam),file = paste0(Settings$HEISProcessedPath,nam,".rda"))
model1 <- lm(exp ~ cal + cal2 , weights = w, data=assign(nam,d[prov==ostan]))
summary(model1)
nam3 <- predict(object = model1, newdata = data.table(pct=NA,cal=MinCalories,cal2=MinCalories2,exp=NA,ndx=NA,w=NA))[[1]]
nam2 <- paste0("Rural1PovLine",ostan)
assign(nam2,nam3)
}
#log
d<-d[cal!=0]
for(ostan in 0:30){
nam <- paste0("Rur",ostan)
assign(nam,d[prov==ostan])
# save(list=ls(pattern = nam),file = paste0(Settings$HEISProcessedPath,nam,".rda"))
model2 <- lm(log(exp) ~ log(cal), weights = w, data=assign(nam,d[prov==ostan]))
summary(model2)
nam3 <- predict(object = model2, newdata = data.table(pct=NA,cal=MinCalories,cal2=MinCalories2,exp=NA,ndx=NA,w=NA))[[1]]
nam2 <- paste0("Rural2PovLine",ostan)
nam3<-exp(nam3)
assign(nam2,nam3)
}
#Nonlog-85 percent
for(ostan in 0:30){
nam <- paste0("Rur",ostan)
assign(nam,d[prov==ostan])
# save(list=ls(pattern = nam),file = paste0(Settings$HEISProcessedPath,nam,".rda"))
model3 <- lm(exp ~ cal + cal2 , weights = w, data=assign(nam,d2[prov==ostan]))
summary(model3)
nam3 <- predict(object = model3, newdata = data.table(pct=NA,cal=MinCalories,cal2=MinCalories2,exp=NA,ndx=NA,w=NA))[[1]]
nam2 <- paste0("Rural3PovLine",ostan)
assign(nam2,nam3)
}
#log-85 percent
d2<-d2[cal!=0]
for(ostan in 0:30){
nam <- paste0("Rur",ostan)
assign(nam,d2[prov==ostan])
# save(list=ls(pattern = nam),file = paste0(Settings$HEISProcessedPath,nam,".rda"))
model4 <- lm(log(exp) ~ log(cal), weights = w, data=assign(nam,d2[prov==ostan]))
summary(model4)
nam3 <- predict(object = model4, newdata = data.table(pct=NA,cal=MinCalories,cal2=MinCalories2,exp=NA,ndx=NA,w=NA))[[1]]
nam2 <- paste0("Rural4PovLine",ostan)
nam3<-exp(nam3)
assign(nam2,nam3)
}
###########Rural-percentiles###########
#Nonlog
for(ostan in 0:30){
nam <- paste0("Rur",ostan)
assign(nam,d[prov==ostan])
# save(list=ls(pattern = nam),file = paste0(Settings$HEISProcessedPath,nam,".rda"))
model1 <- lm(exp ~ cal + cal2 , weights = w, data=assign(nam,dx[prov==ostan]))
summary(model1)
nam3 <- predict(object = model1, newdata = data.table(pct=NA,cal=MinCalories,cal2=MinCalories2,exp=NA,ndx=NA,w=NA))[[1]]
nam2 <- paste0("Rural1PovLine",ostan)
assign(nam2,nam3)
}
#log
dx<-dx[cal!=0]
for(ostan in 0:30){
nam <- paste0("Rur",ostan)
assign(nam,d[prov==ostan])
# save(list=ls(pattern = nam),file = paste0(Settings$HEISProcessedPath,nam,".rda"))
model2 <- lm(log(exp) ~ log(cal), weights = w, data=assign(nam,dx[prov==ostan]))
summary(model2)
nam3 <- predict(object = model2, newdata = data.table(pct=NA,cal=MinCalories,cal2=MinCalories2,exp=NA,ndx=NA,w=NA))[[1]]
nam2 <- paste0("Rural2PovLine",ostan)
nam3<-exp(nam3)
assign(nam2,nam3)
}
#Nonlog-85 percent
for(ostan in 0:30){
nam <- paste0("Rur",ostan)
assign(nam,d[prov==ostan])
# save(list=ls(pattern = nam),file = paste0(Settings$HEISProcessedPath,nam,".rda"))
model3 <- lm(exp ~ cal + cal2 , weights = w, data=assign(nam,dx2[prov==ostan]))
summary(model3)
nam3 <- predict(object = model3, newdata = data.table(pct=NA,cal=MinCalories,cal2=MinCalories2,exp=NA,ndx=NA,w=NA))[[1]]
nam2 <- paste0("Rural3PovLine",ostan)
assign(nam2,nam3)
}
#log-85 percent
dx2<-dx2[cal!=0]
for(ostan in 0:30){
nam <- paste0("Rur",ostan)
assign(nam,d2[prov==ostan])
# save(list=ls(pattern = nam),file = paste0(Settings$HEISProcessedPath,nam,".rda"))
model4 <- lm(log(exp) ~ log(cal), weights = w, data=assign(nam,dx2[prov==ostan]))
summary(model4)
nam3 <- predict(object = model4, newdata = data.table(pct=NA,cal=MinCalories,cal2=MinCalories2,exp=NA,ndx=NA,w=NA))[[1]]
nam2 <- paste0("Rural4PovLine",ostan)
nam3<-exp(nam3)
assign(nam2,nam3)
}
######### calculate Urban Pov Line #########
d <- Urb[,.(Percentile=as.integer(Percentile),Per_Daily_Calories,Total_Exp_Month_Per,Total_Exp_Month_Per_nondurable,ProvinceCode,Weight,cumWeightProv,ProvincePercentile)]
setnames(d,c("pct","cal","exp","ndx","prov","w","cumw","provpct"))
d2 <- d [provpct<86]
#plot(cal~exp,data=d)
#plot(cal~exp,data=d2)
#plot(log(cal)~log(exp),data=d)
#plot(log(cal)~log(exp),data=d2)
d$cal2<-d$cal^2
d2$cal2<-d2$cal^2
dx <- d[,lapply(.SD, mean, na.rm=TRUE),by=.(provpct,prov)]
dx2 <- d2[,lapply(.SD, mean, na.rm=TRUE),by=.(provpct,prov)]
############Urban-all############
#Nonlog
for(ostan in 0:30){
nam <- paste0("Urb",ostan)
assign(nam,d[prov==ostan])
# save(list=ls(pattern = nam),file = paste0(Settings$HEISProcessedPath,nam,".rda"))
model1 <- lm(exp ~ cal + cal2 , weights = w, data=assign(nam,d[prov==ostan]))
summary(model1)
nam3 <- predict(object = model1, newdata = data.table(pct=NA,cal=MinCalories,cal2=MinCalories2,exp=NA,ndx=NA,w=NA))[[1]]
nam2 <- paste0("Urban1PovLine",ostan)
assign(nam2,nam3)
}
#log
d<-d[cal!=0]
for(ostan in 0:30){
nam <- paste0("Urb",ostan)
assign(nam,d[prov==ostan])
# save(list=ls(pattern = nam),file = paste0(Settings$HEISProcessedPath,nam,".rda"))
model2 <- lm(log(exp) ~ log(cal), weights = w, data=assign(nam,d[prov==ostan]))
summary(model2)
nam3 <- predict(object = model2, newdata = data.table(pct=NA,cal=MinCalories,cal2=MinCalories2,exp=NA,ndx=NA,w=NA))[[1]]
nam2 <- paste0("Urban2PovLine",ostan)
nam3<-exp(nam3)
assign(nam2,nam3)
}
#Nonlog-85 percent
for(ostan in 0:30){
nam <- paste0("Urb",ostan)
assign(nam,d[prov==ostan])
# save(list=ls(pattern = nam),file = paste0(Settings$HEISProcessedPath,nam,".rda"))
model3 <- lm(exp ~ cal + cal2 , weights = w, data=assign(nam,d2[prov==ostan]))
summary(model3)
nam3 <- predict(object = model3, newdata = data.table(pct=NA,cal=MinCalories,cal2=MinCalories2,exp=NA,ndx=NA,w=NA))[[1]]
nam2 <- paste0("Urban3PovLine",ostan)
assign(nam2,nam3)
}
#log-85 percent
d2<-d2[cal!=0]
for(ostan in 0:30){
nam <- paste0("Urb",ostan)
assign(nam,d2[prov==ostan])
# save(list=ls(pattern = nam),file = paste0(Settings$HEISProcessedPath,nam,".rda"))
model4 <- lm(log(exp) ~ log(cal), weights = w, data=assign(nam,d2[prov==ostan]))
summary(model4)
nam3 <- predict(object = model4, newdata = data.table(pct=NA,cal=MinCalories,cal2=MinCalories2,exp=NA,ndx=NA,w=NA))[[1]]
nam2 <- paste0("Urban4PovLine",ostan)
nam3<-exp(nam3)
assign(nam2,nam3)
}
############Urban-Percentiles############
#Nonlog
for(ostan in 0:30){
nam <- paste0("Urb",ostan)
assign(nam,d[prov==ostan])
# save(list=ls(pattern = nam),file = paste0(Settings$HEISProcessedPath,nam,".rda"))
model1 <- lm(exp ~ cal + cal2 , weights = w, data=assign(nam,dx[prov==ostan]))
summary(model1)
nam3 <- predict(object = model1, newdata = data.table(pct=NA,cal=MinCalories,cal2=MinCalories2,exp=NA,ndx=NA,w=NA))[[1]]
nam2 <- paste0("Urban1PovLine",ostan)
assign(nam2,nam3)
}
#log
dx<-dx[cal!=0]
for(ostan in 0:30){
nam <- paste0("Urb",ostan)
assign(nam,d[prov==ostan])
# save(list=ls(pattern = nam),file = paste0(Settings$HEISProcessedPath,nam,".rda"))
model2 <- lm(log(exp) ~ log(cal), weights = w, data=assign(nam,dx[prov==ostan]))
summary(model2)
nam3 <- predict(object = model2, newdata = data.table(pct=NA,cal=MinCalories,cal2=MinCalories2,exp=NA,ndx=NA,w=NA))[[1]]
nam2 <- paste0("Urban2PovLine",ostan)
nam3<-exp(nam3)
assign(nam2,nam3)
}
#Nonlog-85 percent
for(ostan in 0:30){
nam <- paste0("Urb",ostan)
assign(nam,d[prov==ostan])
# save(list=ls(pattern = nam),file = paste0(Settings$HEISProcessedPath,nam,".rda"))
model3 <- lm(exp ~ cal + cal2 , weights = w, data=assign(nam,dx2[prov==ostan]))
summary(model3)
nam3 <- predict(object = model3, newdata = data.table(pct=NA,cal=MinCalories,cal2=MinCalories2,exp=NA,ndx=NA,w=NA))[[1]]
nam2 <- paste0("Urban3PovLine",ostan)
assign(nam2,nam3)
}
#log-85 percent
dx2<-dx2[cal!=0]
for(ostan in 0:30){
nam <- paste0("Urb",ostan)
assign(nam,d2[prov==ostan])
# save(list=ls(pattern = nam),file = paste0(Settings$HEISProcessedPath,nam,".rda"))
model4 <- lm(log(exp) ~ log(cal), weights = w, data=assign(nam,dx2[prov==ostan]))
summary(model4)
nam3 <- predict(object = model4, newdata = data.table(pct=NA,cal=MinCalories,cal2=MinCalories2,exp=NA,ndx=NA,w=NA))[[1]]
nam2 <- paste0("Urban4PovLine",ostan)
nam3<-exp(nam3)
assign(nam2,nam3)
}
|
#1
# Print the first rows of the data
comics
# Check levels of align
levels(comics$align)
# Check the levels of gender
levels(comics$gender)
# Create a 2-way contingency table
table(comics$align, comics$gender)
|
/Exploratory Analysis in R/exploring categorical data/1.R
|
no_license
|
Pranav-Polavarapu/Datacamp-Data-Scientist-with-R-Track-
|
R
| false
| false
| 226
|
r
|
#1
# Print the first rows of the data
comics
# Check levels of align
levels(comics$align)
# Check the levels of gender
levels(comics$gender)
# Create a 2-way contingency table
table(comics$align, comics$gender)
|
#' Plot a GGB object
#'
#' Uses image function from the \pkg{Matrix} R package.
#'
#' @param x ggb object as returned by \code{\link{ggb}}
#' @param subset which lambda values to plot, expressed as a subset of indices,
#' between 1 and \code{length(x$Sig)}
#' @param ... additional arguments to pass to \code{Matrix::image}
#' @export
plot.ggb <- function(x, subset = seq_along(x$Sig), ...) {
num_sig <- length(x$Sig)
stopifnot(subset %in% seq_along(x$Sig))
num_sig <- length(subset)
nrow <- floor(sqrt(num_sig))
ncol <- ceiling(num_sig / nrow)
a <- lapply(x$Sig[subset], image_covariance)
for (i in seq_along(subset)) {
a[[i]]$main <- list(label = as.character(round(x$lambda[subset[i]], 5)),
cex = 0.75)
a[[i]]$x.scales$draw <- FALSE
a[[i]]$y.scales$draw <- FALSE
a[[i]]$par.settings$layout.widths=list(left.padding = -2,
right.padding = -2)
a[[i]]$par.settings$layout.heights=list(top.padding = 1,
bottom.padding = 0)
}
if (length(a) == 1) return(print(a[[1]]))
for (i in seq(length(subset) - 1)) {
print(a[[i]], split = c(icolrow(i, ncol), ncol, nrow), more = TRUE, ...)
}
print(a[[i + 1]], split = c(icolrow(i + 1, ncol), ncol, nrow), ...)
}
icolrow <- function(i, ncol) {
# allows for par(mfrow)-like behavior from lattice's split
irow <- floor((i - 1) / ncol)
icol <- (i - 1) - ncol * irow
1 + c(icol, irow)
}
#' Show the Image of a Covariance Matrix
#'
#' Uses the \code{image} function defined in \pkg{Matrix} package.
#' @param Sig covariance matrix
#' @param sub subtitle, default NULL
#' @param xlab x label, default NULL
#' @param ylab y label, default NULL
#' @param ... additional arguments to pass to \code{Matrix::image}
#' @export
image_covariance <- function(Sig, sub = NULL, xlab = NULL, ylab = NULL, ...) {
Matrix::image(Matrix::Matrix(Sig), sub = sub, xlab = xlab, ylab = ylab, ...)
}
|
/R/plot.ggb.R
|
no_license
|
jacobbien/ggb
|
R
| false
| false
| 1,985
|
r
|
#' Plot a GGB object
#'
#' Uses image function from the \pkg{Matrix} R package.
#'
#' @param x ggb object as returned by \code{\link{ggb}}
#' @param subset which lambda values to plot, expressed as a subset of indices,
#' between 1 and \code{length(x$Sig)}
#' @param ... additional arguments to pass to \code{Matrix::image}
#' @export
plot.ggb <- function(x, subset = seq_along(x$Sig), ...) {
num_sig <- length(x$Sig)
stopifnot(subset %in% seq_along(x$Sig))
num_sig <- length(subset)
nrow <- floor(sqrt(num_sig))
ncol <- ceiling(num_sig / nrow)
a <- lapply(x$Sig[subset], image_covariance)
for (i in seq_along(subset)) {
a[[i]]$main <- list(label = as.character(round(x$lambda[subset[i]], 5)),
cex = 0.75)
a[[i]]$x.scales$draw <- FALSE
a[[i]]$y.scales$draw <- FALSE
a[[i]]$par.settings$layout.widths=list(left.padding = -2,
right.padding = -2)
a[[i]]$par.settings$layout.heights=list(top.padding = 1,
bottom.padding = 0)
}
if (length(a) == 1) return(print(a[[1]]))
for (i in seq(length(subset) - 1)) {
print(a[[i]], split = c(icolrow(i, ncol), ncol, nrow), more = TRUE, ...)
}
print(a[[i + 1]], split = c(icolrow(i + 1, ncol), ncol, nrow), ...)
}
icolrow <- function(i, ncol) {
# allows for par(mfrow)-like behavior from lattice's split
irow <- floor((i - 1) / ncol)
icol <- (i - 1) - ncol * irow
1 + c(icol, irow)
}
#' Show the Image of a Covariance Matrix
#'
#' Uses the \code{image} function defined in \pkg{Matrix} package.
#' @param Sig covariance matrix
#' @param sub subtitle, default NULL
#' @param xlab x label, default NULL
#' @param ylab y label, default NULL
#' @param ... additional arguments to pass to \code{Matrix::image}
#' @export
image_covariance <- function(Sig, sub = NULL, xlab = NULL, ylab = NULL, ...) {
Matrix::image(Matrix::Matrix(Sig), sub = sub, xlab = xlab, ylab = ylab, ...)
}
|
#' Report status of workflowr project
#'
#' \code{wflow_status} reports the analysis files that require user action.
#'
#' \code{wflow_status} reports analysis files with one of the following
#' statuses:
#'
#' \itemize{
#'
#' \item \bold{Mod}: Modified file. Any published file that has been modified
#' since the last time the HTML was published.
#'
#' \item \bold{Unp}: Unpublished file. Any tracked file whose corresponding HTML
#' is not tracked. May or may not have staged or unstaged changes.
#'
#' \item \bold{Scr}: Scratch file. Any untracked file that is not specifically
#' ignored.
#'
#' }
#'
#' \code{wflow_status} only works for workflowr projects that use Git.
#'
#' @param files character (default: NULL) The analysis file(s) to report the
#' status. By default checks the status of all analysis files. Supports
#' file \href{https://en.wikipedia.org/wiki/Glob_(programming)}{globbing}.
#' @param include_git_status logical (default: TRUE) Include the Git status of
#' the project files in the output. Note that this excludes any files in the
#' website directory, since these generated files should only be committed by
#' workflowr, and not the user.
#' @param project character (default: ".") By default the function assumes the
#' current working directory is within the project. If this is not true,
#' you'll need to provide the path to the project directory.
#'
#' @return Returns an object of class \code{wflow_status}, which is a list with
#' the following elements:
#'
#' \itemize{
#'
#' \item \bold{root}: The relative path to the root directory of the workflowr
#' project (i.e. contains the RStudio .Rproj file).
#'
#' \item \bold{analysis}: The relative path to the directory that contains
#' \code{_site.yml} and the R Markdown files.
#'
#' \item \bold{docs}: The relative path to the directory that contains the HTML
#' files and figures.
#'
#' \item \bold{git}: The relative path to the \code{.git} directory that
#' contains the history of the Git repository.
#'
#' \item \bold{site_yml}: \code{TRUE} if the configuration file \code{_site.yml}
#' has uncommitted changes, otherwise \code{FALSE}.
#'
#' \item \bold{wflow_yml}: \code{TRUE} if the configuration file
#' \code{_workflowr.yml} has uncommitted changes, otherwise \code{FALSE}. If the
#' file does not exist, the result is \code{NULL}. If the file was recently
#' deleted and not yet committed to Git, then it will be \code{TRUE}.
#'
#' \item \bold{git_status} The Git status as a \code{git_status}
#' object from the package \link{git2r} (see \code{git2r::\link[git2r]{status}}).
#'
#' \item \bold{include_git_status} The argument \code{include_git_status}
#' indicating whether the Git status should be printed along with the status of
#' the Rmd files.
#'
#' \item \bold{status}: A data frame with detailed information on the status of
#' each R Markdown file (see below).
#'
#' }
#'
#' The data frame \code{status} contains the following non-mutually exclusive
#' columns (all logical vectors):
#'
#' \itemize{
#'
#' \item \bold{ignored}: The R Markdown file has been ignored by Git according
#' to the patterns in the file \code{.gitignore}.
#'
#' \item \bold{mod_unstaged}: The R Markdown file has unstaged modifications.
#'
#' \item \bold{conflicted}: The R Markdown file has merge conflicts.
#'
#' \item \bold{mod_staged}: The R Markdown file has staged modifications.
#'
#' \item \bold{tracked}: The R Markdown file is tracked by Git.
#'
#' \item \bold{committed}: The R Markdown file has been previously committed to
#' the Git repository.
#'
#' \item \bold{published}: The corresponding HTML file has been previously
#' committed.
#'
#' \item \bold{mod_committed}: The R Markdown file has modifications that have
#' been committed since the last time the HTML was built and committed.
#'
#' \item \bold{modified}: The R Markdown file has been modified since it was
#' last published (i.e. \code{mod_unstaged} or \code{mod_staged} or
#' \code{mod_committed}).
#'
#' \item \bold{unpublished}: The R Markdown file is tracked by Git but not
#' published (i.e. the HTML has not been committed).
#'
#' \item \bold{scratch}: The R Markdown file is untracked by Git, i.e. it is
#' considered a scratch file until it is committed.
#'
#' }
#'
#' @examples
#' \dontrun{
#'
#' wflow_status()
#' # Get status of specific file(s)
#' wflow_status("analysis/file.Rmd")
#' # Save the results
#' s <- wflow_status()
#' }
#' @export
wflow_status <- function(files = NULL, include_git_status = TRUE, project = ".") {
files <- process_input_files(files, allow_null = TRUE, rmd_only = TRUE,
convert_to_relative_paths = TRUE)
assert_is_flag(include_git_status)
check_wd_exists()
assert_is_single_directory(project)
project <- absolute(project)
if (isTRUE(getOption("workflowr.autosave"))) autosave()
# Obtain list of workflowr paths. Throw error if no Git repository.
o <- wflow_paths(error_git = TRUE, project = project)
# Gather analysis files
# (files that start with an underscore are ignored)
files_analysis <- list.files(path = o$analysis, pattern = "^[^_].+[Rr]md$",
full.names = TRUE)
files_analysis <- relative(files_analysis)
if (!is.null(files)) {
files_analysis <- files_analysis[match(files, files_analysis)]
}
if (length(files_analysis) == 0)
stop("files did not include any analysis files")
# Obtain status of each R Markdown file
r <- git2r::repository(o$git)
s <- git2r::status(r, ignored = TRUE)
s_df <- status_to_df(s)
# Fix file paths
s_df$file <- file.path(git2r::workdir(r), s_df$file)
s_df$file <- relative(s_df$file)
# Categorize all files by git status
f_ignored <- s_df$file[s_df$status == "ignored"]
f_unstaged <- s_df$file[s_df$status == "unstaged"]
f_conflicted <- s_df$file[s_df$substatus == "conflicted"]
f_staged <- s_df$file[s_df$status == "staged"]
f_untracked <- s_df$file[s_df$status == "untracked"]
# Determine status of each analysis file (i.e. Rmd) in the Git repository.
# Each status is a logical vector.
ignored <- files_analysis %in% f_ignored
mod_unstaged <- files_analysis %in% f_unstaged
conflicted <- files_analysis %in% f_conflicted
mod_staged <- files_analysis %in% f_staged
tracked <- files_analysis %in% setdiff(files_analysis,
c(f_untracked, f_ignored))
files_committed <- get_committed_files(r)
files_committed <- relative(files_committed)
committed <- files_analysis %in% files_committed
files_html <- to_html(files_analysis, outdir = o$docs)
published <- files_html %in% files_committed
# If a user somehow committed the HTML file but not the source Rmd file, which
# is impossible to do with wflow_publish(), the workflowr report will show a
# warning. However, it will also cause an error when trying to access the date
# of the last commit to the Rmd file
html_only <- !committed & published
if (any(html_only)) {
published[html_only] <- FALSE
html_only_files <- files_analysis[html_only]
warning(call. = FALSE, immediate. = TRUE, wrap(
"The following R Markdown file(s) have not been committed to the
Git repository but their corresponding HTML file(s) have. This
violates the reproducibility guarantee of workflowr. Please
publish these files using wflow_publish() to fix this situation."),
"\n\n", paste(html_only_files, collapse = "\n"))
}
# Do published files have subsequently committed changes?
files_outdated <- get_outdated_files(r,
absolute(files_analysis[published]),
outdir = absolute(o$docs))
files_outdated <- relative(files_outdated)
mod_committed <- files_analysis %in% files_outdated
# Highlevel designations
modified <- published & (mod_unstaged | mod_staged | mod_committed)
# Status Unp
#
# Unpublished file. Any tracked file whose corresponding HTML is not tracked.
# May or may not have staged or unstaged changes.
unpublished <- tracked & !published
# Status Scr
#
# Scratch file. Any untracked file that is not specifically ignored.
scratch <- !tracked & !ignored
# Determine if _site.yml has been edited
o$site_yml <- FALSE
site_yml_path <- relative(file.path(o$analysis, "_site.yml"))
if (site_yml_path %in% s_df$file) o$site_yml <- TRUE
# Determine if _workflowr.yml has been edited
o$wflow_yml <- FALSE
wflow_yml_path <- relative(file.path(o$root, "_workflowr.yml"))
if (!file.exists(wflow_yml_path)) o$wflow_yml <- NULL
if (wflow_yml_path %in% s_df$file) o$wflow_yml <- TRUE
o$status <- data.frame(ignored, mod_unstaged, conflicted, mod_staged, tracked,
committed, published, mod_committed, modified,
unpublished, scratch,
row.names = files_analysis)
# Passing the Git status to print.wflow_status()
o$include_git_status <- include_git_status
o$git_status <- s
class(o) <- "wflow_status"
return(o)
}
#' @export
print.wflow_status <- function(x, ...) {
# The legend key to explain abbreviations of file status
key <- character()
# Report totals
cat(sprintf("Status of %d Rmd files\n\nTotals:\n", nrow(x$status)))
if (sum(x$status$published) > 0 & sum(x$status$modified) > 0) {
cat(sprintf(" %d Published (%d Modified)\n",
sum(x$status$published), sum(x$status$modified)))
key <- c(key, "Mod = Modified")
} else if (sum(x$status$published) > 0) {
cat(sprintf(" %d Published\n", sum(x$status$published)))
}
if (sum(x$status$unpublished) > 0) {
cat(sprintf(" %d Unpublished\n", sum(x$status$unpublished)))
key <- c(key, "Unp = Unpublished")
}
if (sum(x$status$scratch) > 0) {
cat(sprintf(" %d Scratch\n", sum(x$status$scratch)))
key <- c(key, "Scr = Scratch (Untracked)")
}
f <- c(rownames(x$status)[x$status$modified],
rownames(x$status)[x$status$unpublished],
rownames(x$status)[x$status$scratch])
names(f) <- rep(c("Mod", "Unp", "Scr"),
times = c(sum(x$status$modified),
sum(x$status$unpublished),
sum(x$status$scratch)))
if (length(f) > 0) {
cat("\nThe following Rmd files require attention:\n\n")
}
for (i in seq_along(f)) {
o <- sprintf("%s %s\n", names(f)[i], f[i])
cat(o)
}
if (length(f) > 0) {
cat(sprintf("\nKey: %s\n", paste(key, collapse = ", ")))
}
if (x$include_git_status) {
s <- scrub_status(x$git_status, git2r::repository(x$git), output_dir = x$docs,
remove_ignored = TRUE)
s_df <- status_to_df(s)
if (nrow(s_df) > 0) {
s_df$file <- file.path(x$git, s_df$file)
s_df$file <- relative(s_df$file)
cat("\nThe current Git status is:\n\n")
prev <- options(width = 200)
cat(paste(utils::capture.output(print(s_df, row.names = FALSE)), collapse = "\n"))
options(prev)
cat("\n")
} else {
cat("\nThe current Git status is: working directory clean\n")
}
}
if (length(f) == 0) {
cat("\nRmd files are up-to-date\n")
} else {
cat("\n")
cat(wrap("To publish your changes as part of your website, use `wflow_publish()`."))
cat("\n")
cat(wrap("To commit your changes without publishing them yet, use `wflow_git_commit()`."))
cat("\n")
}
if (x$site_yml) {
site_yml_path <- relative(file.path(x$analysis, "_site.yml"))
cat(glue::glue("\n\nThe config file {site_yml_path} has been edited.\n\n"))
}
if (!is.null(x$wflow_yml) && x$wflow_yml) {
wflow_yml_path <- relative(file.path(x$root, "_workflowr.yml"))
cat(glue::glue("\n\nThe config file {wflow_yml_path} has been edited.\n\n"))
}
# It's a convention for S3 print methods to invisibly return the original
# object, e.g. base::print.summaryDefault and stats:::print.lm. I don't
# understand why this is useful. Anyone know why?
return(invisible(x))
}
|
/R/wflow_status.R
|
permissive
|
workflowr/workflowr
|
R
| false
| false
| 12,040
|
r
|
#' Report status of workflowr project
#'
#' \code{wflow_status} reports the analysis files that require user action.
#'
#' \code{wflow_status} reports analysis files with one of the following
#' statuses:
#'
#' \itemize{
#'
#' \item \bold{Mod}: Modified file. Any published file that has been modified
#' since the last time the HTML was published.
#'
#' \item \bold{Unp}: Unpublished file. Any tracked file whose corresponding HTML
#' is not tracked. May or may not have staged or unstaged changes.
#'
#' \item \bold{Scr}: Scratch file. Any untracked file that is not specifically
#' ignored.
#'
#' }
#'
#' \code{wflow_status} only works for workflowr projects that use Git.
#'
#' @param files character (default: NULL) The analysis file(s) to report the
#' status. By default checks the status of all analysis files. Supports
#' file \href{https://en.wikipedia.org/wiki/Glob_(programming)}{globbing}.
#' @param include_git_status logical (default: TRUE) Include the Git status of
#' the project files in the output. Note that this excludes any files in the
#' website directory, since these generated files should only be committed by
#' workflowr, and not the user.
#' @param project character (default: ".") By default the function assumes the
#' current working directory is within the project. If this is not true,
#' you'll need to provide the path to the project directory.
#'
#' @return Returns an object of class \code{wflow_status}, which is a list with
#' the following elements:
#'
#' \itemize{
#'
#' \item \bold{root}: The relative path to the root directory of the workflowr
#' project (i.e. contains the RStudio .Rproj file).
#'
#' \item \bold{analysis}: The relative path to the directory that contains
#' \code{_site.yml} and the R Markdown files.
#'
#' \item \bold{docs}: The relative path to the directory that contains the HTML
#' files and figures.
#'
#' \item \bold{git}: The relative path to the \code{.git} directory that
#' contains the history of the Git repository.
#'
#' \item \bold{site_yml}: \code{TRUE} if the configuration file \code{_site.yml}
#' has uncommitted changes, otherwise \code{FALSE}.
#'
#' \item \bold{wflow_yml}: \code{TRUE} if the configuration file
#' \code{_workflowr.yml} has uncommitted changes, otherwise \code{FALSE}. If the
#' file does not exist, the result is \code{NULL}. If the file was recently
#' deleted and not yet committed to Git, then it will be \code{TRUE}.
#'
#' \item \bold{git_status} The Git status as a \code{git_status}
#' object from the package \link{git2r} (see \code{git2r::\link[git2r]{status}}).
#'
#' \item \bold{include_git_status} The argument \code{include_git_status}
#' indicating whether the Git status should be printed along with the status of
#' the Rmd files.
#'
#' \item \bold{status}: A data frame with detailed information on the status of
#' each R Markdown file (see below).
#'
#' }
#'
#' The data frame \code{status} contains the following non-mutually exclusive
#' columns (all logical vectors):
#'
#' \itemize{
#'
#' \item \bold{ignored}: The R Markdown file has been ignored by Git according
#' to the patterns in the file \code{.gitignore}.
#'
#' \item \bold{mod_unstaged}: The R Markdown file has unstaged modifications.
#'
#' \item \bold{conflicted}: The R Markdown file has merge conflicts.
#'
#' \item \bold{mod_staged}: The R Markdown file has staged modifications.
#'
#' \item \bold{tracked}: The R Markdown file is tracked by Git.
#'
#' \item \bold{committed}: The R Markdown file has been previously committed to
#' the Git repository.
#'
#' \item \bold{published}: The corresponding HTML file has been previously
#' committed.
#'
#' \item \bold{mod_committed}: The R Markdown file has modifications that have
#' been committed since the last time the HTML was built and committed.
#'
#' \item \bold{modified}: The R Markdown file has been modified since it was
#' last published (i.e. \code{mod_unstaged} or \code{mod_staged} or
#' \code{mod_committed}).
#'
#' \item \bold{unpublished}: The R Markdown file is tracked by Git but not
#' published (i.e. the HTML has not been committed).
#'
#' \item \bold{scratch}: The R Markdown file is untracked by Git, i.e. it is
#' considered a scratch file until it is committed.
#'
#' }
#'
#' @examples
#' \dontrun{
#'
#' wflow_status()
#' # Get status of specific file(s)
#' wflow_status("analysis/file.Rmd")
#' # Save the results
#' s <- wflow_status()
#' }
#' @export
wflow_status <- function(files = NULL, include_git_status = TRUE, project = ".") {
files <- process_input_files(files, allow_null = TRUE, rmd_only = TRUE,
convert_to_relative_paths = TRUE)
assert_is_flag(include_git_status)
check_wd_exists()
assert_is_single_directory(project)
project <- absolute(project)
if (isTRUE(getOption("workflowr.autosave"))) autosave()
# Obtain list of workflowr paths. Throw error if no Git repository.
o <- wflow_paths(error_git = TRUE, project = project)
# Gather analysis files
# (files that start with an underscore are ignored)
files_analysis <- list.files(path = o$analysis, pattern = "^[^_].+[Rr]md$",
full.names = TRUE)
files_analysis <- relative(files_analysis)
if (!is.null(files)) {
files_analysis <- files_analysis[match(files, files_analysis)]
}
if (length(files_analysis) == 0)
stop("files did not include any analysis files")
# Obtain status of each R Markdown file
r <- git2r::repository(o$git)
s <- git2r::status(r, ignored = TRUE)
s_df <- status_to_df(s)
# Fix file paths
s_df$file <- file.path(git2r::workdir(r), s_df$file)
s_df$file <- relative(s_df$file)
# Categorize all files by git status
f_ignored <- s_df$file[s_df$status == "ignored"]
f_unstaged <- s_df$file[s_df$status == "unstaged"]
f_conflicted <- s_df$file[s_df$substatus == "conflicted"]
f_staged <- s_df$file[s_df$status == "staged"]
f_untracked <- s_df$file[s_df$status == "untracked"]
# Determine status of each analysis file (i.e. Rmd) in the Git repository.
# Each status is a logical vector.
ignored <- files_analysis %in% f_ignored
mod_unstaged <- files_analysis %in% f_unstaged
conflicted <- files_analysis %in% f_conflicted
mod_staged <- files_analysis %in% f_staged
tracked <- files_analysis %in% setdiff(files_analysis,
c(f_untracked, f_ignored))
files_committed <- get_committed_files(r)
files_committed <- relative(files_committed)
committed <- files_analysis %in% files_committed
files_html <- to_html(files_analysis, outdir = o$docs)
published <- files_html %in% files_committed
# If a user somehow committed the HTML file but not the source Rmd file, which
# is impossible to do with wflow_publish(), the workflowr report will show a
# warning. However, it will also cause an error when trying to access the date
# of the last commit to the Rmd file
html_only <- !committed & published
if (any(html_only)) {
published[html_only] <- FALSE
html_only_files <- files_analysis[html_only]
warning(call. = FALSE, immediate. = TRUE, wrap(
"The following R Markdown file(s) have not been committed to the
Git repository but their corresponding HTML file(s) have. This
violates the reproducibility guarantee of workflowr. Please
publish these files using wflow_publish() to fix this situation."),
"\n\n", paste(html_only_files, collapse = "\n"))
}
# Do published files have subsequently committed changes?
files_outdated <- get_outdated_files(r,
absolute(files_analysis[published]),
outdir = absolute(o$docs))
files_outdated <- relative(files_outdated)
mod_committed <- files_analysis %in% files_outdated
# Highlevel designations
modified <- published & (mod_unstaged | mod_staged | mod_committed)
# Status Unp
#
# Unpublished file. Any tracked file whose corresponding HTML is not tracked.
# May or may not have staged or unstaged changes.
unpublished <- tracked & !published
# Status Scr
#
# Scratch file. Any untracked file that is not specifically ignored.
scratch <- !tracked & !ignored
# Determine if _site.yml has been edited
o$site_yml <- FALSE
site_yml_path <- relative(file.path(o$analysis, "_site.yml"))
if (site_yml_path %in% s_df$file) o$site_yml <- TRUE
# Determine if _workflowr.yml has been edited
o$wflow_yml <- FALSE
wflow_yml_path <- relative(file.path(o$root, "_workflowr.yml"))
if (!file.exists(wflow_yml_path)) o$wflow_yml <- NULL
if (wflow_yml_path %in% s_df$file) o$wflow_yml <- TRUE
o$status <- data.frame(ignored, mod_unstaged, conflicted, mod_staged, tracked,
committed, published, mod_committed, modified,
unpublished, scratch,
row.names = files_analysis)
# Passing the Git status to print.wflow_status()
o$include_git_status <- include_git_status
o$git_status <- s
class(o) <- "wflow_status"
return(o)
}
#' @export
print.wflow_status <- function(x, ...) {
# The legend key to explain abbreviations of file status
key <- character()
# Report totals
cat(sprintf("Status of %d Rmd files\n\nTotals:\n", nrow(x$status)))
if (sum(x$status$published) > 0 & sum(x$status$modified) > 0) {
cat(sprintf(" %d Published (%d Modified)\n",
sum(x$status$published), sum(x$status$modified)))
key <- c(key, "Mod = Modified")
} else if (sum(x$status$published) > 0) {
cat(sprintf(" %d Published\n", sum(x$status$published)))
}
if (sum(x$status$unpublished) > 0) {
cat(sprintf(" %d Unpublished\n", sum(x$status$unpublished)))
key <- c(key, "Unp = Unpublished")
}
if (sum(x$status$scratch) > 0) {
cat(sprintf(" %d Scratch\n", sum(x$status$scratch)))
key <- c(key, "Scr = Scratch (Untracked)")
}
f <- c(rownames(x$status)[x$status$modified],
rownames(x$status)[x$status$unpublished],
rownames(x$status)[x$status$scratch])
names(f) <- rep(c("Mod", "Unp", "Scr"),
times = c(sum(x$status$modified),
sum(x$status$unpublished),
sum(x$status$scratch)))
if (length(f) > 0) {
cat("\nThe following Rmd files require attention:\n\n")
}
for (i in seq_along(f)) {
o <- sprintf("%s %s\n", names(f)[i], f[i])
cat(o)
}
if (length(f) > 0) {
cat(sprintf("\nKey: %s\n", paste(key, collapse = ", ")))
}
if (x$include_git_status) {
s <- scrub_status(x$git_status, git2r::repository(x$git), output_dir = x$docs,
remove_ignored = TRUE)
s_df <- status_to_df(s)
if (nrow(s_df) > 0) {
s_df$file <- file.path(x$git, s_df$file)
s_df$file <- relative(s_df$file)
cat("\nThe current Git status is:\n\n")
prev <- options(width = 200)
cat(paste(utils::capture.output(print(s_df, row.names = FALSE)), collapse = "\n"))
options(prev)
cat("\n")
} else {
cat("\nThe current Git status is: working directory clean\n")
}
}
if (length(f) == 0) {
cat("\nRmd files are up-to-date\n")
} else {
cat("\n")
cat(wrap("To publish your changes as part of your website, use `wflow_publish()`."))
cat("\n")
cat(wrap("To commit your changes without publishing them yet, use `wflow_git_commit()`."))
cat("\n")
}
if (x$site_yml) {
site_yml_path <- relative(file.path(x$analysis, "_site.yml"))
cat(glue::glue("\n\nThe config file {site_yml_path} has been edited.\n\n"))
}
if (!is.null(x$wflow_yml) && x$wflow_yml) {
wflow_yml_path <- relative(file.path(x$root, "_workflowr.yml"))
cat(glue::glue("\n\nThe config file {wflow_yml_path} has been edited.\n\n"))
}
# It's a convention for S3 print methods to invisibly return the original
# object, e.g. base::print.summaryDefault and stats:::print.lm. I don't
# understand why this is useful. Anyone know why?
return(invisible(x))
}
|
#!/usr/bin/env Rscript
suppressPackageStartupMessages(library(dplyr))
# help from:
# https://github.com/LCBC-UiO/ggsegExtra/blob/master/vignettes/customatlas3d.Rmd
# https://github.com/ThomasYeoLab/CBIG
# not in CRAN. need to pull from github
# remotes::install_github('LCBC-UiO/ggsegExtra')
# remotes::install_github('LCBC-UiO/ggseg3d')
# need orca as an external dependency for 3d->2d
# npm install -g electron@6.1.4 orca
library(ggsegExtra)
library(ggseg3d)
library(tidyr)
atlasdir<-"/Volumes/Hera/Datasets/YeoLabCBIG/stable_projects/brain_parcellation/Schaefer2018_LocalGlobal/Parcellations/FreeSurfer5.3"
outdir <- '/Volumes/Hera/Datasets/YeoLabCBIG/ggseg'
# setenv not need if we use `annot_dir`
# Sys.setenv(SUBJECTS_DIR=atlasdir) # also see system("env|grep SUBJ")
# use make_aparc_2_3datlas(...,subject='fsaverage6')
# NOTE HERE -- using fsaverage5 -- maybe want 6 ?!
annote_dir <- file.path(atlasdir,'fsaverage5/label')
mesh_dt <- make_aparc_2_3datlas(annot = "Schaefer2018_100Parcels_7Networks_order",
annot_dir = annote_dir,
output_dir = outdir)
# to 2d
atlasname <- "Yeo100N7_3d"
# atlas name must end in 3d for make_ggseg3d_2_ggset!
# otherwise:
# > Errror in get_atlas "This is not a 3d atlas"
Y100N7_3d <- mesh_dt %>%
mutate(atlas = atlasname)%>%
unnest(ggseg_3d) %>%
select(-region) %>%
left_join(select(ggseg::dk$data, hemi, region, label)) %>%
nest_by(atlas, surf, hemi, .key = "ggseg_3d") %>%
ggseg3d::as_ggseg3d_atlas()
# this can be true, but will still fail if altlas name is not *_3d
is_ggseg3d_atlas(Y100N7_3d) # TRUE
# using 1core so errors are easy to spot
# resoluved orca "GL_INVALID_OPERATION" by using 1.1.1 appimage
Y100N7_2datlas <- make_ggseg3d_2_ggseg(output_dir="2d_orca_retry20210114",
ggseg3d_atlas=Y100N7_3d,
ncores=1)
# BUG: region is all NA in Y100N7_2datlas
cat("Y100N7_2datlas regions: \n")
print(Y100N7_2datlas$data$region)
#ggseg3d:::get_atlas(Y100N7_3d)
save(mesh_dt,Y100N7_3d, Y100N7_2datlas,
file='/Volumes/Hera/Datasets/YeoLabCBIG/ggseg/Y100N7.Rdata')
|
/mkggseg_Y100N7.R
|
no_license
|
LabNeuroCogDevel/mkggseg_atlas
|
R
| false
| false
| 2,173
|
r
|
#!/usr/bin/env Rscript
suppressPackageStartupMessages(library(dplyr))
# help from:
# https://github.com/LCBC-UiO/ggsegExtra/blob/master/vignettes/customatlas3d.Rmd
# https://github.com/ThomasYeoLab/CBIG
# not in CRAN. need to pull from github
# remotes::install_github('LCBC-UiO/ggsegExtra')
# remotes::install_github('LCBC-UiO/ggseg3d')
# need orca as an external dependency for 3d->2d
# npm install -g electron@6.1.4 orca
library(ggsegExtra)
library(ggseg3d)
library(tidyr)
atlasdir<-"/Volumes/Hera/Datasets/YeoLabCBIG/stable_projects/brain_parcellation/Schaefer2018_LocalGlobal/Parcellations/FreeSurfer5.3"
outdir <- '/Volumes/Hera/Datasets/YeoLabCBIG/ggseg'
# setenv not need if we use `annot_dir`
# Sys.setenv(SUBJECTS_DIR=atlasdir) # also see system("env|grep SUBJ")
# use make_aparc_2_3datlas(...,subject='fsaverage6')
# NOTE HERE -- using fsaverage5 -- maybe want 6 ?!
annote_dir <- file.path(atlasdir,'fsaverage5/label')
mesh_dt <- make_aparc_2_3datlas(annot = "Schaefer2018_100Parcels_7Networks_order",
annot_dir = annote_dir,
output_dir = outdir)
# to 2d
atlasname <- "Yeo100N7_3d"
# atlas name must end in 3d for make_ggseg3d_2_ggset!
# otherwise:
# > Errror in get_atlas "This is not a 3d atlas"
Y100N7_3d <- mesh_dt %>%
mutate(atlas = atlasname)%>%
unnest(ggseg_3d) %>%
select(-region) %>%
left_join(select(ggseg::dk$data, hemi, region, label)) %>%
nest_by(atlas, surf, hemi, .key = "ggseg_3d") %>%
ggseg3d::as_ggseg3d_atlas()
# this can be true, but will still fail if altlas name is not *_3d
is_ggseg3d_atlas(Y100N7_3d) # TRUE
# using 1core so errors are easy to spot
# resoluved orca "GL_INVALID_OPERATION" by using 1.1.1 appimage
Y100N7_2datlas <- make_ggseg3d_2_ggseg(output_dir="2d_orca_retry20210114",
ggseg3d_atlas=Y100N7_3d,
ncores=1)
# BUG: region is all NA in Y100N7_2datlas
cat("Y100N7_2datlas regions: \n")
print(Y100N7_2datlas$data$region)
#ggseg3d:::get_atlas(Y100N7_3d)
save(mesh_dt,Y100N7_3d, Y100N7_2datlas,
file='/Volumes/Hera/Datasets/YeoLabCBIG/ggseg/Y100N7.Rdata')
|
# Install and load packages
package_names <- c("survey","dplyr","foreign","devtools")
lapply(package_names, function(x) if(!x %in% installed.packages()) install.packages(x))
lapply(package_names, require, character.only=T)
install_github("e-mitchell/meps_r_pkg/MEPS")
library(MEPS)
options(survey.lonely.psu="adjust")
# Load FYC file
FYC <- read.xport('C:/MEPS/.FYC..ssp');
year <- .year.
if(year <= 2001) FYC <- FYC %>% mutate(VARPSU = VARPSU.yy., VARSTR=VARSTR.yy.)
if(year <= 1998) FYC <- FYC %>% rename(PERWT.yy.F = WTDPER.yy.)
if(year == 1996) FYC <- FYC %>% mutate(AGE42X = AGE2X, AGE31X = AGE1X)
FYC <- FYC %>%
mutate_at(vars(starts_with("AGE")),funs(replace(., .< 0, NA))) %>%
mutate(AGELAST = coalesce(AGE.yy.X, AGE42X, AGE31X))
FYC$ind = 1
# Sex
FYC <- FYC %>%
mutate(sex = recode_factor(SEX, .default = "Missing", .missing = "Missing",
"1" = "Male",
"2" = "Female"))
# Add aggregate sources of payment
if(year <= 1999)
FYC <- FYC %>% mutate(TOTTRI.yy. = TOTCHM.yy.)
FYC <- FYC %>% mutate(
TOTOTH.yy. = TOTOFD.yy. + TOTSTL.yy. + TOTOPR.yy. + TOTOPU.yy. + TOTOSR.yy.,
TOTOTZ.yy. = TOTOTH.yy. + TOTWCP.yy. + TOTVA.yy.,
TOTPTR.yy. = TOTPRV.yy. + TOTTRI.yy.)
FYCdsgn <- svydesign(
id = ~VARPSU,
strata = ~VARSTR,
weights = ~PERWT.yy.F,
data = FYC,
nest = TRUE)
# Loop over sources of payment
sops <- c("EXP", "SLF", "PTR", "MCR", "MCD", "OTZ")
results <- list()
for(sp in sops) {
key <- paste0("TOT", sp)
formula <- as.formula(sprintf("~%s.yy.", key))
results[[key]] <- svyby(formula, FUN = svytotal, by = ~sex, design = FYCdsgn)
}
print(results)
|
/mepstrends/hc_use/json/code/r/totEXP__sop__sex__.r
|
permissive
|
RandomCriticalAnalysis/MEPS-summary-tables
|
R
| false
| false
| 1,671
|
r
|
# Install and load packages
package_names <- c("survey","dplyr","foreign","devtools")
lapply(package_names, function(x) if(!x %in% installed.packages()) install.packages(x))
lapply(package_names, require, character.only=T)
install_github("e-mitchell/meps_r_pkg/MEPS")
library(MEPS)
options(survey.lonely.psu="adjust")
# Load FYC file
FYC <- read.xport('C:/MEPS/.FYC..ssp');
year <- .year.
if(year <= 2001) FYC <- FYC %>% mutate(VARPSU = VARPSU.yy., VARSTR=VARSTR.yy.)
if(year <= 1998) FYC <- FYC %>% rename(PERWT.yy.F = WTDPER.yy.)
if(year == 1996) FYC <- FYC %>% mutate(AGE42X = AGE2X, AGE31X = AGE1X)
FYC <- FYC %>%
mutate_at(vars(starts_with("AGE")),funs(replace(., .< 0, NA))) %>%
mutate(AGELAST = coalesce(AGE.yy.X, AGE42X, AGE31X))
FYC$ind = 1
# Sex
FYC <- FYC %>%
mutate(sex = recode_factor(SEX, .default = "Missing", .missing = "Missing",
"1" = "Male",
"2" = "Female"))
# Add aggregate sources of payment
if(year <= 1999)
FYC <- FYC %>% mutate(TOTTRI.yy. = TOTCHM.yy.)
FYC <- FYC %>% mutate(
TOTOTH.yy. = TOTOFD.yy. + TOTSTL.yy. + TOTOPR.yy. + TOTOPU.yy. + TOTOSR.yy.,
TOTOTZ.yy. = TOTOTH.yy. + TOTWCP.yy. + TOTVA.yy.,
TOTPTR.yy. = TOTPRV.yy. + TOTTRI.yy.)
FYCdsgn <- svydesign(
id = ~VARPSU,
strata = ~VARSTR,
weights = ~PERWT.yy.F,
data = FYC,
nest = TRUE)
# Loop over sources of payment
sops <- c("EXP", "SLF", "PTR", "MCR", "MCD", "OTZ")
results <- list()
for(sp in sops) {
key <- paste0("TOT", sp)
formula <- as.formula(sprintf("~%s.yy.", key))
results[[key]] <- svyby(formula, FUN = svytotal, by = ~sex, design = FYCdsgn)
}
print(results)
|
testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(1.13196051499162e-72, 9.53818252170339e+295, 1.22810536108214e+146, 4.12396251261199e-221, 0), .Dim = c(5L, 1L)))
result <- do.call(CNull:::communities_individual_based_sampling_beta,testlist)
str(result)
|
/CNull/inst/testfiles/communities_individual_based_sampling_beta/AFL_communities_individual_based_sampling_beta/communities_individual_based_sampling_beta_valgrind_files/1615833559-test.R
|
no_license
|
akhikolla/updatedatatype-list2
|
R
| false
| false
| 270
|
r
|
testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(1.13196051499162e-72, 9.53818252170339e+295, 1.22810536108214e+146, 4.12396251261199e-221, 0), .Dim = c(5L, 1L)))
result <- do.call(CNull:::communities_individual_based_sampling_beta,testlist)
str(result)
|
#Install the R package arules
# install.packages("arules");
#load the arules package
library("arules");
#movies <- read.csv("~/programme/data_mining/data/movies.csv", sep=";")
movies<-read.transactions(file="~/programme/data_mining/data/movies.csv", rm.duplicates= FALSE, format="single",sep=";",cols =c(1,2))
inspect(movies)
head(movies)
# movies$ID<-as.factor(movies$ID)
# str(movies)
summary(movies)
basket_rules <- apriori(movies, parameter = list(sup = 0.3, conf = 0.9,target="rules"));
# Check the generated rules using inspect
inspect(basket_rules)
movies <- read.table("~/programme/data_mining/data/movies.csv", sep=";", quote="\"")
|
/r_scripts/Movies/Movies.R
|
no_license
|
gjaehrling/gbdmp
|
R
| false
| false
| 647
|
r
|
#Install the R package arules
# install.packages("arules");
#load the arules package
library("arules");
#movies <- read.csv("~/programme/data_mining/data/movies.csv", sep=";")
movies<-read.transactions(file="~/programme/data_mining/data/movies.csv", rm.duplicates= FALSE, format="single",sep=";",cols =c(1,2))
inspect(movies)
head(movies)
# movies$ID<-as.factor(movies$ID)
# str(movies)
summary(movies)
basket_rules <- apriori(movies, parameter = list(sup = 0.3, conf = 0.9,target="rules"));
# Check the generated rules using inspect
inspect(basket_rules)
movies <- read.table("~/programme/data_mining/data/movies.csv", sep=";", quote="\"")
|
## ----global_options, include=FALSE--------------------------------------------
library(knitr)
opts_chunk$set(fig.width=6, fig.height=3, fig.path='figures/surv-', warning=FALSE)
## ---- message = FALSE, eval = requireNamespace("survival", quietly = TRUE)----
library(ggfortify)
library(survival)
fit <- survfit(Surv(time, status) ~ sex, data = lung)
autoplot(fit)
## ---- message = FALSE, eval = requireNamespace("survival", quietly = TRUE)----
autoplot(fit, surv.linetype = 'dashed', conf.int = FALSE,
censor.shape = '*', censor.size = 5, facets = TRUE, ncol = 2)
autoplot(survfit(Surv(time, status) ~ 1, data = lung), surv.colour = 'orange', censor.colour = 'red')
autoplot(survfit(Surv(time, status) ~ sex, data = lung), fun = 'event')
d.coxph <- survfit(coxph(Surv(time, status) ~ sex, data = lung))
autoplot(d.coxph, surv.linetype = 'dashed', surv.colour = 'blue',
conf.int.fill = 'dodgerblue3', conf.int.alpha = 0.5, censor = FALSE)
## ---- fig.width = 8, fig.height = 4, eval = requireNamespace("survival", quietly = TRUE)----
autoplot(aareg(Surv(time, status) ~ age + sex + ph.ecog, data = lung))
|
/inst/doc/plot_surv.R
|
no_license
|
cran/ggfortify
|
R
| false
| false
| 1,131
|
r
|
## ----global_options, include=FALSE--------------------------------------------
library(knitr)
opts_chunk$set(fig.width=6, fig.height=3, fig.path='figures/surv-', warning=FALSE)
## ---- message = FALSE, eval = requireNamespace("survival", quietly = TRUE)----
library(ggfortify)
library(survival)
fit <- survfit(Surv(time, status) ~ sex, data = lung)
autoplot(fit)
## ---- message = FALSE, eval = requireNamespace("survival", quietly = TRUE)----
autoplot(fit, surv.linetype = 'dashed', conf.int = FALSE,
censor.shape = '*', censor.size = 5, facets = TRUE, ncol = 2)
autoplot(survfit(Surv(time, status) ~ 1, data = lung), surv.colour = 'orange', censor.colour = 'red')
autoplot(survfit(Surv(time, status) ~ sex, data = lung), fun = 'event')
d.coxph <- survfit(coxph(Surv(time, status) ~ sex, data = lung))
autoplot(d.coxph, surv.linetype = 'dashed', surv.colour = 'blue',
conf.int.fill = 'dodgerblue3', conf.int.alpha = 0.5, censor = FALSE)
## ---- fig.width = 8, fig.height = 4, eval = requireNamespace("survival", quietly = TRUE)----
autoplot(aareg(Surv(time, status) ~ age + sex + ph.ecog, data = lung))
|
library("shiny")
library("ggplot2")
library("dplyr")
function(input, output) {
# getFilteredBirthDt <- function() {
# message("filtered birth dt function has been called with ", input$period)
#
# readRDS("cleaned_birth_data.rds") %>%
# filter(year >= input$period[1] & year <= input$period[2])
# }
output$birth_dt <- renderDataTable(
readRDS("cleaned_birth_data.rds") %>%
filter(year >= input$period[1] & year <= input$period[2])
)
output$birth_summary_plot <- renderPlot({
readRDS("cleaned_birth_data.rds") %>%
filter(year >= input$period[1] & year <= input$period[2]) %>%
ggplot(aes(x = age, y = num_birth, fill = education_level)) +
geom_col(position = "dodge") +
facet_grid(year ~ country) +
theme(legend.position = "bottom", legend.direction = "vertical")
})
}
|
/Meetup_2017_09_21/eu_births_shiny_app/server.R
|
no_license
|
judashgriff/meetup-presentations_budapest
|
R
| false
| false
| 936
|
r
|
library("shiny")
library("ggplot2")
library("dplyr")
function(input, output) {
# getFilteredBirthDt <- function() {
# message("filtered birth dt function has been called with ", input$period)
#
# readRDS("cleaned_birth_data.rds") %>%
# filter(year >= input$period[1] & year <= input$period[2])
# }
output$birth_dt <- renderDataTable(
readRDS("cleaned_birth_data.rds") %>%
filter(year >= input$period[1] & year <= input$period[2])
)
output$birth_summary_plot <- renderPlot({
readRDS("cleaned_birth_data.rds") %>%
filter(year >= input$period[1] & year <= input$period[2]) %>%
ggplot(aes(x = age, y = num_birth, fill = education_level)) +
geom_col(position = "dodge") +
facet_grid(year ~ country) +
theme(legend.position = "bottom", legend.direction = "vertical")
})
}
|
function (probabilities, n)
{
e <- get("data.env", .GlobalEnv)
e[["sample_int_replace"]][[length(e[["sample_int_replace"]]) +
1]] <- list(probabilities = probabilities, n = n)
.Call("_xyz_sample_int_replace", probabilities, n)
}
|
/valgrind_test_dir/sample_int_replace-test.R
|
no_license
|
akhikolla/RcppDeepStateTest
|
R
| false
| false
| 251
|
r
|
function (probabilities, n)
{
e <- get("data.env", .GlobalEnv)
e[["sample_int_replace"]][[length(e[["sample_int_replace"]]) +
1]] <- list(probabilities = probabilities, n = n)
.Call("_xyz_sample_int_replace", probabilities, n)
}
|
truncatevector = function(V, left = 0, right = 100) {
W = V[V >= left & V <= right]
return(W)
}
|
/R/truncatevector.R
|
no_license
|
cran/agrmt
|
R
| false
| false
| 100
|
r
|
truncatevector = function(V, left = 0, right = 100) {
W = V[V >= left & V <= right]
return(W)
}
|
testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(2.77448001762435e+180, 1.97274569258754e-154, 1.44017975025437e-303, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(3L, 7L)))
result <- do.call(CNull:::communities_individual_based_sampling_beta_interleaved_matrices,testlist)
str(result)
|
/CNull/inst/testfiles/communities_individual_based_sampling_beta_interleaved_matrices/AFL_communities_individual_based_sampling_beta_interleaved_matrices/communities_individual_based_sampling_beta_interleaved_matrices_valgrind_files/1615840034-test.R
|
no_license
|
akhikolla/updatedatatype-list2
|
R
| false
| false
| 320
|
r
|
testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(2.77448001762435e+180, 1.97274569258754e-154, 1.44017975025437e-303, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(3L, 7L)))
result <- do.call(CNull:::communities_individual_based_sampling_beta_interleaved_matrices,testlist)
str(result)
|
Download_mito <- function(taxon, folder=NULL, minlength=2001, maxlength=80000, custom_query=NULL, setwd=NULL){
if(!is.null(setwd)){logfile <- paste(setwd, "/log.txt", sep="")} else {logfile <- "log.txt"}
cat(paste(Sys.time(), " - Downloading Miochondrial Genomes from GenBank\n\n"), file=logfile, sep="", append=T)
if (is.null(custom_query)){cat(paste("Search query: REPLACE_WITH_TAXA", "[Organism] AND mitochondrion[filter] AND genome AND ", minlength, ":", maxlength ,"[Sequence Length]\n\n", sep=""), file=logfile, sep="", append=T)} else {cat(paste("Search query: REPLACE_WITH_TAXA", custom_query, "\n\n", sep=""), file=logfile, sep="", append=T)}
folder_path <- ""
if(!is.null(folder)){dir.create(folder, showWarnings=F)
folder_path <- paste(folder, "/", sep="")
cat(paste("#mito_data: Folder ",folder, "\n", sep=""), file=logfile, sep="", append=T)
} else {cat(paste("#mito_data: ", "\n", sep=""), file=logfile, sep="", append=T)}
cat("Taxon\tSequences\tdownl_time\n", file=logfile, sep="", append=T)
for (k in 1:length(taxon)){
time <- Sys.time() # get time
# download IDs
if (is.null(custom_query)){
searchQ <- paste(taxon[k],"[Organism] AND mitochondrion[filter] AND genome AND ", minlength, ":", maxlength ,"[Sequence Length]", sep="")
} else {searchQ <- paste(taxon, custom_query, sep="")}
search_results <- entrez_search(db="nuccore", term=searchQ, retmax=9999999)
# save genbank file!
if(length(search_results$ids)!=0){
cat(file=paste(folder_path, taxon[k], "_mito.gb", sep="")) # overwrite old files
for (i in 1:length(search_results$ids)){
download.file(paste("http://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi?db=nuccore&id=", search_results$ids[i], "&rettype=gb&retmode=text", sep=""), destfile=paste(folder_path, taxon[k], "_mito.gb", sep=""), mode="a", quiet=T)}
}
time <- Sys.time() - time
message(paste("Downloaded ", length(search_results$ids)," mitogenomes for ", taxon[k], " in ", format(time, digits=2), ".", sep=""))
cat(paste(taxon[k],"\t", length(search_results$ids), "\t", format(time, digits=2), "\n", sep=""), file=logfile, sep="", append=T)
}
cat(paste("#mito_data_end", "\n\n", sep=""), file=logfile, sep="", append=T)
#return(length(search_results$ids))
}
|
/refLib/scripts/PrimerMiner/Download_mito.R
|
no_license
|
maxfarrell/eDNAcamtrap
|
R
| false
| false
| 2,210
|
r
|
Download_mito <- function(taxon, folder=NULL, minlength=2001, maxlength=80000, custom_query=NULL, setwd=NULL){
if(!is.null(setwd)){logfile <- paste(setwd, "/log.txt", sep="")} else {logfile <- "log.txt"}
cat(paste(Sys.time(), " - Downloading Miochondrial Genomes from GenBank\n\n"), file=logfile, sep="", append=T)
if (is.null(custom_query)){cat(paste("Search query: REPLACE_WITH_TAXA", "[Organism] AND mitochondrion[filter] AND genome AND ", minlength, ":", maxlength ,"[Sequence Length]\n\n", sep=""), file=logfile, sep="", append=T)} else {cat(paste("Search query: REPLACE_WITH_TAXA", custom_query, "\n\n", sep=""), file=logfile, sep="", append=T)}
folder_path <- ""
if(!is.null(folder)){dir.create(folder, showWarnings=F)
folder_path <- paste(folder, "/", sep="")
cat(paste("#mito_data: Folder ",folder, "\n", sep=""), file=logfile, sep="", append=T)
} else {cat(paste("#mito_data: ", "\n", sep=""), file=logfile, sep="", append=T)}
cat("Taxon\tSequences\tdownl_time\n", file=logfile, sep="", append=T)
for (k in 1:length(taxon)){
time <- Sys.time() # get time
# download IDs
if (is.null(custom_query)){
searchQ <- paste(taxon[k],"[Organism] AND mitochondrion[filter] AND genome AND ", minlength, ":", maxlength ,"[Sequence Length]", sep="")
} else {searchQ <- paste(taxon, custom_query, sep="")}
search_results <- entrez_search(db="nuccore", term=searchQ, retmax=9999999)
# save genbank file!
if(length(search_results$ids)!=0){
cat(file=paste(folder_path, taxon[k], "_mito.gb", sep="")) # overwrite old files
for (i in 1:length(search_results$ids)){
download.file(paste("http://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi?db=nuccore&id=", search_results$ids[i], "&rettype=gb&retmode=text", sep=""), destfile=paste(folder_path, taxon[k], "_mito.gb", sep=""), mode="a", quiet=T)}
}
time <- Sys.time() - time
message(paste("Downloaded ", length(search_results$ids)," mitogenomes for ", taxon[k], " in ", format(time, digits=2), ".", sep=""))
cat(paste(taxon[k],"\t", length(search_results$ids), "\t", format(time, digits=2), "\n", sep=""), file=logfile, sep="", append=T)
}
cat(paste("#mito_data_end", "\n\n", sep=""), file=logfile, sep="", append=T)
#return(length(search_results$ids))
}
|
#******* Group information *******
#Group :
#Please rename this file to Group_.R
#Please write your code in each block.
#******* end of Group information *******
#install.packages("epiDisplay")
library(epiDisplay)
library(data.table)
library(caret)
#******* 1. Download data and do some data management tasks *******
df <- na.omit(read.csv("train.csv", header = T))[, 2:12]
attach(df)
#********** end of 1. **********
#********** 2. fit logit models and report 10-fold MR **********
set.seed(87)
ten_folds <- createFolds(1:714, k = 10)
cv_n_p <- function(p){
sapply(1:10, function(i){
trn <- df[-ten_folds[[i]], ]
tst <- df[ ten_folds[[i]], ]
mdl <- glm(fmla, data = trn, family = "binomial")
rsp <- predict(mdl, newdata = tst, type = "response")
prd <- ifelse(rsp > p, 1, 0)
act <- tst[, 1]
mean(prd != act)
})
}
fmla <- Survived ~ (as.factor(Pclass) + Sex )^2
mn_cv_n_p <- cbind(seq(0, 1, .01),
sapply(seq(0, 1, .01), FUN = function(p) mean(cv_n_p(p))))
plot(mn_cv_n_p[, 2] ~ mn_cv_n_p[, 1], type = "l",
main = "10-folds MR", xlab = "cut-off point", ylab = "misclassification rate")
abline(v = mn_cv_n_p[which.min(mn_cv_n_p[, 2]), 1], lty = 2, col = gray(.3))
legend("topright", legend = c(paste0("minimum MR = ", round(min(mn_cv_n_p[, 2]), 4)),
paste0("cut-off point = ", mn_cv_n_p[which.min(mn_cv_n_p[, 2]), 1])))
#********** end of 2. **********
#********** 3. Remember Jack and Rose in the movie Titanic? **********
#********** What are their probabilities of surviving the event? **********
mdl <- glm(formula = fmla, df, family = "binomial")
Jack <- c(0, 3, "Jack", "male", 20, 0, 0, "godie", mean(df$Fare[df$Pclass==3]), NA, "S")
Rose <- c(1, 1, "Rose", "female", 17, 0, 1, "possy", mean(df$Fare[df$Pclass==1]), NA, "S")
JR <- as.data.frame(t(cbind(Jack, Rose)))
for(j in c(1, 2, 5, 6, 7 ,9)){
JR[, j]<- as.numeric(JR[, j])
}
names(JR) <- names(df)[1:11]
predict(mdl, newdata = JR, type = "response")
#********** end of 3. **********
#********** 4. report result of logit model using epiDisplay
epiDisplay::logistic.display(mdl)
#********** end of 4. **********
|
/2017_02/PracticalBigDataAnalysis/GroupExercise/GroupExercise2/Group_7.R
|
no_license
|
tnfsh810124/Courses
|
R
| false
| false
| 2,196
|
r
|
#******* Group information *******
#Group :
#Please rename this file to Group_.R
#Please write your code in each block.
#******* end of Group information *******
#install.packages("epiDisplay")
library(epiDisplay)
library(data.table)
library(caret)
#******* 1. Download data and do some data management tasks *******
df <- na.omit(read.csv("train.csv", header = T))[, 2:12]
attach(df)
#********** end of 1. **********
#********** 2. fit logit models and report 10-fold MR **********
set.seed(87)
ten_folds <- createFolds(1:714, k = 10)
cv_n_p <- function(p){
sapply(1:10, function(i){
trn <- df[-ten_folds[[i]], ]
tst <- df[ ten_folds[[i]], ]
mdl <- glm(fmla, data = trn, family = "binomial")
rsp <- predict(mdl, newdata = tst, type = "response")
prd <- ifelse(rsp > p, 1, 0)
act <- tst[, 1]
mean(prd != act)
})
}
fmla <- Survived ~ (as.factor(Pclass) + Sex )^2
mn_cv_n_p <- cbind(seq(0, 1, .01),
sapply(seq(0, 1, .01), FUN = function(p) mean(cv_n_p(p))))
plot(mn_cv_n_p[, 2] ~ mn_cv_n_p[, 1], type = "l",
main = "10-folds MR", xlab = "cut-off point", ylab = "misclassification rate")
abline(v = mn_cv_n_p[which.min(mn_cv_n_p[, 2]), 1], lty = 2, col = gray(.3))
legend("topright", legend = c(paste0("minimum MR = ", round(min(mn_cv_n_p[, 2]), 4)),
paste0("cut-off point = ", mn_cv_n_p[which.min(mn_cv_n_p[, 2]), 1])))
#********** end of 2. **********
#********** 3. Remember Jack and Rose in the movie Titanic? **********
#********** What are their probabilities of surviving the event? **********
mdl <- glm(formula = fmla, df, family = "binomial")
Jack <- c(0, 3, "Jack", "male", 20, 0, 0, "godie", mean(df$Fare[df$Pclass==3]), NA, "S")
Rose <- c(1, 1, "Rose", "female", 17, 0, 1, "possy", mean(df$Fare[df$Pclass==1]), NA, "S")
JR <- as.data.frame(t(cbind(Jack, Rose)))
for(j in c(1, 2, 5, 6, 7 ,9)){
JR[, j]<- as.numeric(JR[, j])
}
names(JR) <- names(df)[1:11]
predict(mdl, newdata = JR, type = "response")
#********** end of 3. **********
#********** 4. report result of logit model using epiDisplay
epiDisplay::logistic.display(mdl)
#********** end of 4. **********
|
library(poker)
### Name: dotHighcardCompare
### Title: dotHighcardCompare
### Aliases: dotHighcardCompare
### ** Examples
dotHighcardCompare(matrix(c(2,4,5,6,7,13,14,2,3,5,6,7,13,14),2,7,byrow=TRUE))
dotHighcardCompare(matrix(c(2,3,5,6,7,13,14,2,3,5,6,7,13,14),2,7,byrow=TRUE))
|
/data/genthat_extracted_code/poker/examples/dotHighcardCompare.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 285
|
r
|
library(poker)
### Name: dotHighcardCompare
### Title: dotHighcardCompare
### Aliases: dotHighcardCompare
### ** Examples
dotHighcardCompare(matrix(c(2,4,5,6,7,13,14,2,3,5,6,7,13,14),2,7,byrow=TRUE))
dotHighcardCompare(matrix(c(2,3,5,6,7,13,14,2,3,5,6,7,13,14),2,7,byrow=TRUE))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/facebook.R
\name{fb_ads}
\alias{fb_ads}
\title{Facebook Ads API}
\usage{
fb_ads(
token,
which,
start = Sys.Date() - 31,
end = Sys.Date(),
fields = NA,
api_version = "v8.0",
process = TRUE
)
}
\arguments{
\item{token}{Character. This must be a valid access token with sufficient
privileges. Visit the Facebook API Graph Explorer to acquire one.}
\item{which}{Character vector. This is the accounts, campaigns, adsets,
or ads IDs to be queried. Remember: if report_level = "account", you must
start the ID with \code{act_}.}
\item{start}{Character. The first full day to report, in the
format "YYYY-MM-DD"}
\item{end}{Character. The last full day to report, in the
format "YYYY-MM-DD"}
\item{fields}{Character, json format. Leave \code{NA} for default fields.}
\item{api_version}{Character. Facebook API version}
\item{process}{Boolean. Process GET results to a more friendly format?}
}
\description{
This returns all available FB ads for any account, campaign, or ad set id.
For more information on Ad' API, go to the
\href{https://developers.facebook.com/docs/marketing-api/reference/adgroup}{original documentaion}
}
\details{
This function was based on FBinsightsR.
}
\examples{
\dontrun{
token <- YOURTOKEN
which <- act_ADACCOUNT
# Query all ads for "which" with results in the last 10 days
ads <- fb_accounts(YOURTOKEN, which, start = Sys.Date() - 10)
}
}
\seealso{
Other API:
\code{\link{bring_api}()},
\code{\link{fb_accounts}()},
\code{\link{fb_creatives}()},
\code{\link{fb_insights}()},
\code{\link{fb_posts}()},
\code{\link{fb_post}()},
\code{\link{fb_process}()},
\code{\link{fb_rf}()},
\code{\link{li_auth}()},
\code{\link{li_profile}()},
\code{\link{queryGA}()},
\code{\link{slackSend}()}
Other Facebook:
\code{\link{fb_accounts}()},
\code{\link{fb_creatives}()},
\code{\link{fb_insights}()},
\code{\link{fb_posts}()},
\code{\link{fb_post}()},
\code{\link{fb_process}()},
\code{\link{fb_rf}()}
}
\concept{API}
\concept{Facebook}
|
/man/fb_ads.Rd
|
no_license
|
alexandereric995/lares
|
R
| false
| true
| 2,051
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/facebook.R
\name{fb_ads}
\alias{fb_ads}
\title{Facebook Ads API}
\usage{
fb_ads(
token,
which,
start = Sys.Date() - 31,
end = Sys.Date(),
fields = NA,
api_version = "v8.0",
process = TRUE
)
}
\arguments{
\item{token}{Character. This must be a valid access token with sufficient
privileges. Visit the Facebook API Graph Explorer to acquire one.}
\item{which}{Character vector. This is the accounts, campaigns, adsets,
or ads IDs to be queried. Remember: if report_level = "account", you must
start the ID with \code{act_}.}
\item{start}{Character. The first full day to report, in the
format "YYYY-MM-DD"}
\item{end}{Character. The last full day to report, in the
format "YYYY-MM-DD"}
\item{fields}{Character, json format. Leave \code{NA} for default fields.}
\item{api_version}{Character. Facebook API version}
\item{process}{Boolean. Process GET results to a more friendly format?}
}
\description{
This returns all available FB ads for any account, campaign, or ad set id.
For more information on Ad' API, go to the
\href{https://developers.facebook.com/docs/marketing-api/reference/adgroup}{original documentaion}
}
\details{
This function was based on FBinsightsR.
}
\examples{
\dontrun{
token <- YOURTOKEN
which <- act_ADACCOUNT
# Query all ads for "which" with results in the last 10 days
ads <- fb_accounts(YOURTOKEN, which, start = Sys.Date() - 10)
}
}
\seealso{
Other API:
\code{\link{bring_api}()},
\code{\link{fb_accounts}()},
\code{\link{fb_creatives}()},
\code{\link{fb_insights}()},
\code{\link{fb_posts}()},
\code{\link{fb_post}()},
\code{\link{fb_process}()},
\code{\link{fb_rf}()},
\code{\link{li_auth}()},
\code{\link{li_profile}()},
\code{\link{queryGA}()},
\code{\link{slackSend}()}
Other Facebook:
\code{\link{fb_accounts}()},
\code{\link{fb_creatives}()},
\code{\link{fb_insights}()},
\code{\link{fb_posts}()},
\code{\link{fb_post}()},
\code{\link{fb_process}()},
\code{\link{fb_rf}()}
}
\concept{API}
\concept{Facebook}
|
logLikHmm <- function(y,par) {
#
# Function logLikHmm. To calculate the log likelihood of a sequence,
# or collection (list) of sequences, of observations which come
# from a hidden Markov model with discrete non-parametric observation
# distributions. These distributions are specified by a matrix
# Rho = [rho_ij] where rho_ij = P(X = x_i | S = j), X being the
# observable random variable and S being the hidden state.
# If y is a matrix, change it to a list, and put out a
# snarky message to the user.
y <- charList(y)
# Get the parameters.
Rho <- par$Rho
tpm <- par$tpm
ispd <- par$ispd
if(is.null(ispd)) {
ispd <- revise.ispd(tpm=tpm)
}
# Make sure that the entries of the vectors in y correspond
# to the row names of Rho.
Rho <- check.yval(y,Rho)
# If K=1 do the triv thing:
K <- length(ispd)
if(K==1) return(sum(log(ffun(y,Rho))))
lns <- sapply(y,length)
fy <- ffun(y,Rho)
rp <- recurse(fy,tpm,ispd,lns)
sum(log(rp$llc))
}
|
/hmm.discnp/R/logLikHmm.R
|
no_license
|
ingted/R-Examples
|
R
| false
| false
| 947
|
r
|
logLikHmm <- function(y,par) {
#
# Function logLikHmm. To calculate the log likelihood of a sequence,
# or collection (list) of sequences, of observations which come
# from a hidden Markov model with discrete non-parametric observation
# distributions. These distributions are specified by a matrix
# Rho = [rho_ij] where rho_ij = P(X = x_i | S = j), X being the
# observable random variable and S being the hidden state.
# If y is a matrix, change it to a list, and put out a
# snarky message to the user.
y <- charList(y)
# Get the parameters.
Rho <- par$Rho
tpm <- par$tpm
ispd <- par$ispd
if(is.null(ispd)) {
ispd <- revise.ispd(tpm=tpm)
}
# Make sure that the entries of the vectors in y correspond
# to the row names of Rho.
Rho <- check.yval(y,Rho)
# If K=1 do the triv thing:
K <- length(ispd)
if(K==1) return(sum(log(ffun(y,Rho))))
lns <- sapply(y,length)
fy <- ffun(y,Rho)
rp <- recurse(fy,tpm,ispd,lns)
sum(log(rp$llc))
}
|
## this creates a special "matrix", which is really a list containing functions to set and get the
## matrix values and set and get the value of the inverse
#
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinverse <- function(inverse) m <<- inverse
getinverse <- function() m
list(set = set,
get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## The following function first checks to see if the mean has already been calculated and then
## solves the inverse of the special βmatrixβ created by makeCacheMatrix if not
#
cacheSolve <- function(x, ...) {
m <- x$getinverse()
if (!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setinverse(m)
m
}
|
/cachematrix.R
|
no_license
|
nicholas-azar/ProgrammingAssignment2
|
R
| false
| false
| 855
|
r
|
## this creates a special "matrix", which is really a list containing functions to set and get the
## matrix values and set and get the value of the inverse
#
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinverse <- function(inverse) m <<- inverse
getinverse <- function() m
list(set = set,
get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## The following function first checks to see if the mean has already been calculated and then
## solves the inverse of the special βmatrixβ created by makeCacheMatrix if not
#
cacheSolve <- function(x, ...) {
m <- x$getinverse()
if (!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setinverse(m)
m
}
|
testlist <- list(data = structure(Inf, .Dim = c(1L, 1L)), q = 1.63155298226155e-319)
result <- do.call(biwavelet:::rcpp_row_quantile,testlist)
str(result)
|
/biwavelet/inst/testfiles/rcpp_row_quantile/libFuzzer_rcpp_row_quantile/rcpp_row_quantile_valgrind_files/1610556579-test.R
|
no_license
|
akhikolla/updated-only-Issues
|
R
| false
| false
| 154
|
r
|
testlist <- list(data = structure(Inf, .Dim = c(1L, 1L)), q = 1.63155298226155e-319)
result <- do.call(biwavelet:::rcpp_row_quantile,testlist)
str(result)
|
## creates a special "matrix" object that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinverse <- function(inverse) m <<- inverse
getinverse <- function() m
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
#This function computes the inverse of the special "matrix" returned by makeCacheMatrix above.
#If the inverse has already been calculated (and the matrix has not changed),
#then the cachesolve should retrieve the inverse from the cache.
cacheSolve <- function(x, ...) {
m <- x$getinverse()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setinverse(m)
m
## Return a matrix that is the inverse of 'x'
}
|
/cachematrix.R
|
no_license
|
YuchengXie1/ProgrammingAssignment2
|
R
| false
| false
| 905
|
r
|
## creates a special "matrix" object that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinverse <- function(inverse) m <<- inverse
getinverse <- function() m
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
#This function computes the inverse of the special "matrix" returned by makeCacheMatrix above.
#If the inverse has already been calculated (and the matrix has not changed),
#then the cachesolve should retrieve the inverse from the cache.
cacheSolve <- function(x, ...) {
m <- x$getinverse()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setinverse(m)
m
## Return a matrix that is the inverse of 'x'
}
|
c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 344
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 344
c
c Input Parameter (command line, file):
c input filename QBFLIB/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/query26_trivial_1344.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 203
c no.of clauses 344
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 344
c
c QBFLIB/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/query26_trivial_1344.qdimacs 203 344 E1 [] 0 8 195 344 NONE
|
/code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Experiments/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/query26_trivial_1344/query26_trivial_1344.R
|
no_license
|
arey0pushpa/dcnf-autarky
|
R
| false
| false
| 701
|
r
|
c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 344
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 344
c
c Input Parameter (command line, file):
c input filename QBFLIB/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/query26_trivial_1344.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 203
c no.of clauses 344
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 344
c
c QBFLIB/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/query26_trivial_1344.qdimacs 203 344 E1 [] 0 8 195 344 NONE
|
#' Sina plot
#'
#' The sina plot is a data visualization chart suitable for plotting any single
#' variable in a multiclass dataset. It is an enhanced jitter strip chart,
#' where the width of the jitter is controlled by the density distribution of
#' the data within each class.
#'
#' @details There are two available ways to define the x-axis borders for the
#' samples to spread within:
#' \itemize{
#' \item{\code{method == "density"}
#'
#' A density kernel is estimated along the y-axis for every sample group. The
#' borders are then defined by the density curve. Tuning parameter
#' \code{adjust} can be used to control the density bandwidth in the same way
#' it is used in \code{\link[stats]{density}}. }
#'
#' \item{\code{method == "counts"}:
#'
#' The borders are defined by the number of samples that occupy the same bin.
#'
#' }
#' }
#'
#' @section Aesthetics:
#' \Sexpr[results=rd,stage=build]{ggplot2:::rd_aesthetics("geom", "point")}
#'
#' @param mapping Set of aesthetic mappings created by \code{\link[ggplot2]{aes}}
#' or \code{\link[ggplot2]{aes_}}. If specified and \code{inherit.aes = TRUE}
#' (the default), is combined with the default mapping at the top level of the
#' plot. You only need to supply mapping if there isn't a mapping defined for
#' the plot.
#'
#' @param data A data frame. If specified, overrides the default data frame
#' defined at the top level of the plot.
#'
#' @param stat The statistical transformation to use on the data for this layer,
#' as a string.
#'
#'
#' @param position Position adjustment, either as a string, or the result of a
#' call to a position adjustment function.
#'
#' @param binwidth The width of the bins. The default is to use \code{bins}
#' bins that cover the range of the data. You should always override
#' this value, exploring multiple widths to find the best to illustrate the
#' stories in your data.
#'
#' @param bins Number of bins. Overridden by binwidth. Defaults to 50.
#'
#' @param scale Logical. When set to \code{TRUE} x-coordinate widths across all
#' groups are scaled based on the densiest area in the plot.
#' Default: \code{TRUE}
#'
#' @param method Choose the method to spread the samples within the same
#' bin along the x-axis. Available methods: "density", "counts" (can be
#' abbreviated, e.g. "d"). See \code{Details}.
#'
#' @param maxwidth Control the maximum width the points can spread into. Values
#' between 0 and 1.
#'
#' @param adjust Adjusts the bandwidth of the density kernel when
#' \code{method == "density"} (see \code{\link[stats]{density}}).
#'
#' @param bin_limit If the samples within the same y-axis bin are more
#' than \code{bin_limit}, the samples's X coordinates will be adjusted.
#'
#' @param ... other arguments passed on to \code{\link[ggplot2]{layer}}. There
#' are three types of arguments you can use here:
#' \itemize{
#' \item{Aesthetics: to set an aesthetic to a fixed value, like
#' \code{color = "red"} or \code{size = 3.}}
#' \item{Other arguments to the layer, for example you override the default
#' \code{stat} associated with the layer.}
#' \item{Other arguments passed on to the stat.}
#' }
#'
#' @param na.rm If \code{FALSE} (the default), removes missing values with a
#' warning. If \code{TRUE} silently removes missing values.
#'
#' @param show.legend logical. Should this layer be included in the legends?
#' \code{NA}, the default, includes if any aesthetics are mapped. \code{FALSE}
#' never includes, and \code{TRUE} always includes.
#'
#' @param inherit.aes If \code{FALSE}, overrides the default aesthetics, rather
#' than combining with them. This is most useful for helper functions that
#' define both data and aesthetics and shouldn't inherit behaviour from the
#' default plot specification, e.g. borders.
#'
#' @param geom, stat Override the default connection between \code{geom_sina}
#' and \code{stat_sina}.
#'
#' @author Nikos Sidiropoulos
#'
#' @name geom_sina
#' @rdname geom_sina
#'
#' @section Computed variables:
#'
#' \describe{
#' \item{bin_counts}{sample counts per bin per group}
#' \item{scaled}{adjusted x-coordinates}
#' }
#'
#'
#' @examples
#' ggplot(midwest, aes(state, area)) + geom_point()
#'
#' # Boxplot and Violin plots convey information on the distribution but not the
#' # number of samples, while Jitter does the opposite.
#' ggplot(midwest, aes(state, area)) + geom_violin()
#' ggplot(midwest, aes(state, area)) + geom_jitter()
#'
#' # Sina does both!
#' ggplot(midwest, aes(state, area)) + geom_violin() + geom_sina()
#'
#' p <- ggplot(midwest, aes(state, popdensity)) + scale_y_log10()
#' p + geom_sina()
#'
#' # Colour the points based on the data set's columns
#' p + geom_sina(aes(colour = inmetro))
#'
#' # Or any other way
#' cols <- midwest$popdensity > 10000
#' p + geom_sina(colour = cols + 1L)
#'
#' # Sina plots with continuous x:
#' p <- ggplot(midwest, aes(cut_width(area, 0.02), popdensity)) + scale_y_log10()
#' p + geom_sina()
#'
#'
#' ###Sample gaussian distributions
#' # Unimodal
#' a <- rnorm(500, 6, 1)
#' b <- rnorm(400, 5, 1.5)
#'
#' # Bimodal
#' c <- c(rnorm(200, 3, .7), rnorm(50, 7, 0.4))
#'
#' # Trimodal
#' d <- c(rnorm(200, 2, 0.7), rnorm(300, 5.5, 0.4), rnorm(100, 8, 0.4))
#'
#' df <- data.frame(
#' "Distribution" = c(rep("Unimodal 1", length(a)),
#' rep("Unimodal 2", length(b)),
#' rep("Bimodal", length(c)),
#' rep("Trimodal", length(d))),
#' "Value" = c(a, b, c, d))
#'
#' # Reorder levels
#' df$Distribution <- factor(df$Distribution,
#' levels(df$Distribution)[c(3, 4, 1, 2)])
#'
#' p <- ggplot(df, aes(Distribution, Value))
#' p + geom_boxplot()
#' p + geom_violin() + geom_sina()
#'
#' # By default, Sina plot scales the width of the class according to the width
#' # of the class with the highest density. Turn group-wise scaling off with:
#' p + geom_violin() + geom_sina(scale = FALSE)
NULL
#' @rdname ggforce-extensions
#' @format NULL
#' @usage NULL
#' @importFrom ggplot2 ggproto Stat
#' @importFrom plyr ddply
#' @export
StatSina <- ggproto("StatSina", Stat,
required_aes = c("x", "y"),
default_aes = aes(xend = ..scaled..),
setup_data = function(data, params) {
if (is.double(data$x) && !.has_groups(data) && any(data$x != data$x[1L])) {
stop("Continuous x aesthetic -- did you forget aes(group=...)?",
call. = FALSE)
}
data
},
setup_params = function(data, params) {
#Limit maxwidth to 0.96 to leave some space between groups
if (!is.null(params$maxwidth))
params$maxwidth <- (min(abs(params$maxwidth), .96))
else
params$maxwidth <- 0.96
if (is.null(params$binwidth) && is.null(params$bins)) {
params$bins <- 50
}
params
},
compute_panel = function(self, data, scales, binwidth = NULL, bins = NULL,
scale = TRUE, method = "density", maxwidth = NULL,
adjust = 1, bin_limit = 1, na.rm = FALSE) {
if (!is.null(binwidth))
bins <- bin_breaks_width(scales$y$dimension() + 1e-8, binwidth)
else
bins <- bin_breaks_bins(scales$y$dimension() + 1e-8, bins)
data <- ggproto_parent(Stat, self)$compute_panel(data, scales,
scale = scale, method = method, maxwidth = maxwidth, adjust = adjust,
bin_limit = bin_limit, bins = bins$breaks, na.rm = na.rm)
#scale all bins based on their density relative to the densiest bin
if (scale) {
group_scaling_factor <-
ddply(data, "group", plyr::mutate,
group_max = max(bin_counts))$group_max / max(data$bin_counts)
} else {
group_scaling_factor <- 1
}
data$scaled <- data$x + data$x_translation * group_scaling_factor
data$x_translation <- NULL
data
},
compute_group = function(data, scales, scale = TRUE, method = "density",
maxwidth = NULL, adjust = 1, bin_limit = 1,
bins = NULL, na.rm = FALSE) {
#initialize x_translation and bin_counts to 0
data$x_translation <- data$bin_counts <- rep(0, nrow(data))
#if group has less than 2 points return as is
if (nrow(data) < 2) {
data$bin_counts <- 1
return(data)
}
#per bin sample count
bin_counts <- table(findInterval(data$y, bins))
#per bin sample density
if (method == "density") {
densities <- stats::density(data$y, adjust = adjust)
#confine the samples in a (-maxwidth/2, -maxwidth/2) area around the
#group's center
if (max(densities$y) > 0.5 * maxwidth)
intra_scaling_factor <- 0.5 * maxwidth / max(densities$y)
else
intra_scaling_factor <- (0.5 * maxwidth) / max(densities$y)
} else {
#allow up to 50 samples in a bin without scaling
if (max(bin_counts) > 50 * maxwidth) {
intra_scaling_factor <- 50 * maxwidth / max(bin_counts)
} else
intra_scaling_factor <- (50 * maxwidth) / max(bin_counts)
}
for (i in names(bin_counts)) {
#examine bins with more than 'bin_limit' samples
if (bin_counts[i] > bin_limit){
cur_bin <- bins[ as.integer(i) : (as.integer(i) + 1)]
#find samples in the current bin and translate their X coord.
points <- findInterval(data$y, cur_bin) == 1
#compute the border margin for the current bin.
if (method == "density")
xmax <- mean(densities$y[findInterval(densities$x, cur_bin) == 1])
else
xmax <- bin_counts[i] / 100
#assign the samples uniformely within the specified range
x_translation <- stats::runif(bin_counts[i], - xmax, xmax)
#scale and store new x coordinates
data$x_translation[points] <- x_translation * intra_scaling_factor
#store bin counts. Used for group-wise scaling.
data$bin_counts[points] <- bin_counts[i]
}
}
data
}
)
#' @rdname geom_sina
#' @importFrom ggplot2 layer
#' @export
stat_sina <-function(mapping = NULL, data = NULL,
geom = "sina", position = "identity",
...,
binwidth = NULL,
bins = NULL,
scale = TRUE,
method = "density",
maxwidth = NULL,
adjust = 1,
bin_limit = 1,
na.rm = FALSE,
show.legend = NA,
inherit.aes = TRUE) {
method <- match.arg(method, c("density", "counts"))
layer(
data = data,
mapping = mapping,
stat = StatSina,
geom = geom,
position = position,
show.legend = show.legend,
inherit.aes = inherit.aes,
params = list(
binwidth = binwidth,
bins = bins,
scale = scale,
method = method,
maxwidth = maxwidth,
adjust = adjust,
bin_limit = bin_limit,
na.rm = na.rm,
...
)
)
}
#' @rdname ggforce-extensions
#' @format NULL
#' @usage NULL
#' @importFrom ggplot2 ggproto GeomPoint
#' @export
GeomSina <- ggproto("GeomSina", GeomPoint,
setup_data = function(data, params) {
transform(data, x = xend)
}
)
#' @rdname geom_sina
#' @importFrom ggplot2 layer
#' @export
geom_sina <- function(mapping = NULL, data = NULL,
stat = "sina", position = "identity",
...,
na.rm = FALSE,
show.legend = NA,
inherit.aes = TRUE) {
layer(
data = data,
mapping = mapping,
stat = stat,
geom = GeomSina,
position = position,
show.legend = show.legend,
inherit.aes = inherit.aes,
params = list(
na.rm = na.rm,
...
)
)
}
# Binning functions -------------------------------------------------------
bins <- function(breaks, closed = c("right", "left"),
fuzz = 1e-08 * stats::median(diff(breaks))) {
stopifnot(is.numeric(breaks))
closed <- match.arg(closed)
breaks <- sort(breaks)
# Adapted base::hist - this protects from floating point rounding errors
if (closed == "right") {
fuzzes <- c(-fuzz, rep.int(fuzz, length(breaks) - 1))
} else {
fuzzes <- c(rep.int(-fuzz, length(breaks) - 1), fuzz)
}
structure(
list(
breaks = breaks,
fuzzy = breaks + fuzzes,
right_closed = closed == "right"
),
class = "ggplot2_bins"
)
}
# Compute parameters -----------------------------------------------------------
bin_breaks <- function(breaks, closed = c("right", "left")) {
bins(breaks, closed)
}
bin_breaks_width <- function(x_range, width = NULL, center = NULL,
boundary = NULL, closed = c("right", "left")) {
stopifnot(length(x_range) == 2)
# if (length(x_range) == 0) {
# return(bin_params(numeric()))
# }
stopifnot(is.numeric(width), length(width) == 1)
if (width <= 0) {
stop("`binwidth` must be positive", call. = FALSE)
}
if (!is.null(boundary) && !is.null(center)) {
stop("Only one of 'boundary' and 'center' may be specified.")
} else if (is.null(boundary)) {
if (is.null(center)) {
# If neither edge nor center given, compute both using tile layer's
# algorithm. This puts min and max of data in outer half of their bins.
boundary <- width / 2
} else {
# If center given but not boundary, compute boundary.
boundary <- center - width / 2
}
}
# Find the left side of left-most bin: inputs could be Dates or POSIXct, so
# coerce to numeric first.
x_range <- as.numeric(x_range)
width <- as.numeric(width)
boundary <- as.numeric(boundary)
shift <- floor((x_range[1] - boundary) / width)
origin <- boundary + shift * width
# Small correction factor so that we don't get an extra bin when, for
# example, origin = 0, max(x) = 20, width = 10.
max_x <- x_range[2] + (1 - 1e-08) * width
breaks <- seq(origin, max_x, width)
bin_breaks(breaks, closed = closed)
}
bin_breaks_bins <- function(x_range, bins = 30, center = NULL,
boundary = NULL, closed = c("right", "left")) {
stopifnot(length(x_range) == 2)
bins <- as.integer(bins)
if (bins < 1) {
stop("Need at least one bin.", call. = FALSE)
} else if (bins == 1) {
width <- diff(x_range)
boundary <- x_range[1]
} else {
width <- (x_range[2] - x_range[1]) / (bins - 1)
}
bin_breaks_width(x_range, width, boundary = boundary, center = center,
closed = closed)
}
.has_groups <- function(data) {
# If no group aesthetic is specified, all values of the group column equal to
# -1L. On the other hand, if a group aesthetic is specified, all values
# are different from -1L (since they are a result of plyr::id()). NA is
# returned for 0-row data frames.
data$group[1L] != -1L
}
|
/R/sina.R
|
no_license
|
PingPi357/ggforce
|
R
| false
| false
| 14,896
|
r
|
#' Sina plot
#'
#' The sina plot is a data visualization chart suitable for plotting any single
#' variable in a multiclass dataset. It is an enhanced jitter strip chart,
#' where the width of the jitter is controlled by the density distribution of
#' the data within each class.
#'
#' @details There are two available ways to define the x-axis borders for the
#' samples to spread within:
#' \itemize{
#' \item{\code{method == "density"}
#'
#' A density kernel is estimated along the y-axis for every sample group. The
#' borders are then defined by the density curve. Tuning parameter
#' \code{adjust} can be used to control the density bandwidth in the same way
#' it is used in \code{\link[stats]{density}}. }
#'
#' \item{\code{method == "counts"}:
#'
#' The borders are defined by the number of samples that occupy the same bin.
#'
#' }
#' }
#'
#' @section Aesthetics:
#' \Sexpr[results=rd,stage=build]{ggplot2:::rd_aesthetics("geom", "point")}
#'
#' @param mapping Set of aesthetic mappings created by \code{\link[ggplot2]{aes}}
#' or \code{\link[ggplot2]{aes_}}. If specified and \code{inherit.aes = TRUE}
#' (the default), is combined with the default mapping at the top level of the
#' plot. You only need to supply mapping if there isn't a mapping defined for
#' the plot.
#'
#' @param data A data frame. If specified, overrides the default data frame
#' defined at the top level of the plot.
#'
#' @param stat The statistical transformation to use on the data for this layer,
#' as a string.
#'
#'
#' @param position Position adjustment, either as a string, or the result of a
#' call to a position adjustment function.
#'
#' @param binwidth The width of the bins. The default is to use \code{bins}
#' bins that cover the range of the data. You should always override
#' this value, exploring multiple widths to find the best to illustrate the
#' stories in your data.
#'
#' @param bins Number of bins. Overridden by binwidth. Defaults to 50.
#'
#' @param scale Logical. When set to \code{TRUE} x-coordinate widths across all
#' groups are scaled based on the densiest area in the plot.
#' Default: \code{TRUE}
#'
#' @param method Choose the method to spread the samples within the same
#' bin along the x-axis. Available methods: "density", "counts" (can be
#' abbreviated, e.g. "d"). See \code{Details}.
#'
#' @param maxwidth Control the maximum width the points can spread into. Values
#' between 0 and 1.
#'
#' @param adjust Adjusts the bandwidth of the density kernel when
#' \code{method == "density"} (see \code{\link[stats]{density}}).
#'
#' @param bin_limit If the samples within the same y-axis bin are more
#' than \code{bin_limit}, the samples's X coordinates will be adjusted.
#'
#' @param ... other arguments passed on to \code{\link[ggplot2]{layer}}. There
#' are three types of arguments you can use here:
#' \itemize{
#' \item{Aesthetics: to set an aesthetic to a fixed value, like
#' \code{color = "red"} or \code{size = 3.}}
#' \item{Other arguments to the layer, for example you override the default
#' \code{stat} associated with the layer.}
#' \item{Other arguments passed on to the stat.}
#' }
#'
#' @param na.rm If \code{FALSE} (the default), removes missing values with a
#' warning. If \code{TRUE} silently removes missing values.
#'
#' @param show.legend logical. Should this layer be included in the legends?
#' \code{NA}, the default, includes if any aesthetics are mapped. \code{FALSE}
#' never includes, and \code{TRUE} always includes.
#'
#' @param inherit.aes If \code{FALSE}, overrides the default aesthetics, rather
#' than combining with them. This is most useful for helper functions that
#' define both data and aesthetics and shouldn't inherit behaviour from the
#' default plot specification, e.g. borders.
#'
#' @param geom, stat Override the default connection between \code{geom_sina}
#' and \code{stat_sina}.
#'
#' @author Nikos Sidiropoulos
#'
#' @name geom_sina
#' @rdname geom_sina
#'
#' @section Computed variables:
#'
#' \describe{
#' \item{bin_counts}{sample counts per bin per group}
#' \item{scaled}{adjusted x-coordinates}
#' }
#'
#'
#' @examples
#' ggplot(midwest, aes(state, area)) + geom_point()
#'
#' # Boxplot and Violin plots convey information on the distribution but not the
#' # number of samples, while Jitter does the opposite.
#' ggplot(midwest, aes(state, area)) + geom_violin()
#' ggplot(midwest, aes(state, area)) + geom_jitter()
#'
#' # Sina does both!
#' ggplot(midwest, aes(state, area)) + geom_violin() + geom_sina()
#'
#' p <- ggplot(midwest, aes(state, popdensity)) + scale_y_log10()
#' p + geom_sina()
#'
#' # Colour the points based on the data set's columns
#' p + geom_sina(aes(colour = inmetro))
#'
#' # Or any other way
#' cols <- midwest$popdensity > 10000
#' p + geom_sina(colour = cols + 1L)
#'
#' # Sina plots with continuous x:
#' p <- ggplot(midwest, aes(cut_width(area, 0.02), popdensity)) + scale_y_log10()
#' p + geom_sina()
#'
#'
#' ###Sample gaussian distributions
#' # Unimodal
#' a <- rnorm(500, 6, 1)
#' b <- rnorm(400, 5, 1.5)
#'
#' # Bimodal
#' c <- c(rnorm(200, 3, .7), rnorm(50, 7, 0.4))
#'
#' # Trimodal
#' d <- c(rnorm(200, 2, 0.7), rnorm(300, 5.5, 0.4), rnorm(100, 8, 0.4))
#'
#' df <- data.frame(
#' "Distribution" = c(rep("Unimodal 1", length(a)),
#' rep("Unimodal 2", length(b)),
#' rep("Bimodal", length(c)),
#' rep("Trimodal", length(d))),
#' "Value" = c(a, b, c, d))
#'
#' # Reorder levels
#' df$Distribution <- factor(df$Distribution,
#' levels(df$Distribution)[c(3, 4, 1, 2)])
#'
#' p <- ggplot(df, aes(Distribution, Value))
#' p + geom_boxplot()
#' p + geom_violin() + geom_sina()
#'
#' # By default, Sina plot scales the width of the class according to the width
#' # of the class with the highest density. Turn group-wise scaling off with:
#' p + geom_violin() + geom_sina(scale = FALSE)
NULL
#' @rdname ggforce-extensions
#' @format NULL
#' @usage NULL
#' @importFrom ggplot2 ggproto Stat
#' @importFrom plyr ddply
#' @export
StatSina <- ggproto("StatSina", Stat,
required_aes = c("x", "y"),
default_aes = aes(xend = ..scaled..),
setup_data = function(data, params) {
if (is.double(data$x) && !.has_groups(data) && any(data$x != data$x[1L])) {
stop("Continuous x aesthetic -- did you forget aes(group=...)?",
call. = FALSE)
}
data
},
setup_params = function(data, params) {
#Limit maxwidth to 0.96 to leave some space between groups
if (!is.null(params$maxwidth))
params$maxwidth <- (min(abs(params$maxwidth), .96))
else
params$maxwidth <- 0.96
if (is.null(params$binwidth) && is.null(params$bins)) {
params$bins <- 50
}
params
},
compute_panel = function(self, data, scales, binwidth = NULL, bins = NULL,
scale = TRUE, method = "density", maxwidth = NULL,
adjust = 1, bin_limit = 1, na.rm = FALSE) {
if (!is.null(binwidth))
bins <- bin_breaks_width(scales$y$dimension() + 1e-8, binwidth)
else
bins <- bin_breaks_bins(scales$y$dimension() + 1e-8, bins)
data <- ggproto_parent(Stat, self)$compute_panel(data, scales,
scale = scale, method = method, maxwidth = maxwidth, adjust = adjust,
bin_limit = bin_limit, bins = bins$breaks, na.rm = na.rm)
#scale all bins based on their density relative to the densiest bin
if (scale) {
group_scaling_factor <-
ddply(data, "group", plyr::mutate,
group_max = max(bin_counts))$group_max / max(data$bin_counts)
} else {
group_scaling_factor <- 1
}
data$scaled <- data$x + data$x_translation * group_scaling_factor
data$x_translation <- NULL
data
},
compute_group = function(data, scales, scale = TRUE, method = "density",
maxwidth = NULL, adjust = 1, bin_limit = 1,
bins = NULL, na.rm = FALSE) {
#initialize x_translation and bin_counts to 0
data$x_translation <- data$bin_counts <- rep(0, nrow(data))
#if group has less than 2 points return as is
if (nrow(data) < 2) {
data$bin_counts <- 1
return(data)
}
#per bin sample count
bin_counts <- table(findInterval(data$y, bins))
#per bin sample density
if (method == "density") {
densities <- stats::density(data$y, adjust = adjust)
#confine the samples in a (-maxwidth/2, -maxwidth/2) area around the
#group's center
if (max(densities$y) > 0.5 * maxwidth)
intra_scaling_factor <- 0.5 * maxwidth / max(densities$y)
else
intra_scaling_factor <- (0.5 * maxwidth) / max(densities$y)
} else {
#allow up to 50 samples in a bin without scaling
if (max(bin_counts) > 50 * maxwidth) {
intra_scaling_factor <- 50 * maxwidth / max(bin_counts)
} else
intra_scaling_factor <- (50 * maxwidth) / max(bin_counts)
}
for (i in names(bin_counts)) {
#examine bins with more than 'bin_limit' samples
if (bin_counts[i] > bin_limit){
cur_bin <- bins[ as.integer(i) : (as.integer(i) + 1)]
#find samples in the current bin and translate their X coord.
points <- findInterval(data$y, cur_bin) == 1
#compute the border margin for the current bin.
if (method == "density")
xmax <- mean(densities$y[findInterval(densities$x, cur_bin) == 1])
else
xmax <- bin_counts[i] / 100
#assign the samples uniformely within the specified range
x_translation <- stats::runif(bin_counts[i], - xmax, xmax)
#scale and store new x coordinates
data$x_translation[points] <- x_translation * intra_scaling_factor
#store bin counts. Used for group-wise scaling.
data$bin_counts[points] <- bin_counts[i]
}
}
data
}
)
#' @rdname geom_sina
#' @importFrom ggplot2 layer
#' @export
stat_sina <-function(mapping = NULL, data = NULL,
geom = "sina", position = "identity",
...,
binwidth = NULL,
bins = NULL,
scale = TRUE,
method = "density",
maxwidth = NULL,
adjust = 1,
bin_limit = 1,
na.rm = FALSE,
show.legend = NA,
inherit.aes = TRUE) {
method <- match.arg(method, c("density", "counts"))
layer(
data = data,
mapping = mapping,
stat = StatSina,
geom = geom,
position = position,
show.legend = show.legend,
inherit.aes = inherit.aes,
params = list(
binwidth = binwidth,
bins = bins,
scale = scale,
method = method,
maxwidth = maxwidth,
adjust = adjust,
bin_limit = bin_limit,
na.rm = na.rm,
...
)
)
}
#' @rdname ggforce-extensions
#' @format NULL
#' @usage NULL
#' @importFrom ggplot2 ggproto GeomPoint
#' @export
GeomSina <- ggproto("GeomSina", GeomPoint,
setup_data = function(data, params) {
transform(data, x = xend)
}
)
#' @rdname geom_sina
#' @importFrom ggplot2 layer
#' @export
geom_sina <- function(mapping = NULL, data = NULL,
stat = "sina", position = "identity",
...,
na.rm = FALSE,
show.legend = NA,
inherit.aes = TRUE) {
layer(
data = data,
mapping = mapping,
stat = stat,
geom = GeomSina,
position = position,
show.legend = show.legend,
inherit.aes = inherit.aes,
params = list(
na.rm = na.rm,
...
)
)
}
# Binning functions -------------------------------------------------------
bins <- function(breaks, closed = c("right", "left"),
fuzz = 1e-08 * stats::median(diff(breaks))) {
stopifnot(is.numeric(breaks))
closed <- match.arg(closed)
breaks <- sort(breaks)
# Adapted base::hist - this protects from floating point rounding errors
if (closed == "right") {
fuzzes <- c(-fuzz, rep.int(fuzz, length(breaks) - 1))
} else {
fuzzes <- c(rep.int(-fuzz, length(breaks) - 1), fuzz)
}
structure(
list(
breaks = breaks,
fuzzy = breaks + fuzzes,
right_closed = closed == "right"
),
class = "ggplot2_bins"
)
}
# Compute parameters -----------------------------------------------------------
bin_breaks <- function(breaks, closed = c("right", "left")) {
bins(breaks, closed)
}
bin_breaks_width <- function(x_range, width = NULL, center = NULL,
boundary = NULL, closed = c("right", "left")) {
stopifnot(length(x_range) == 2)
# if (length(x_range) == 0) {
# return(bin_params(numeric()))
# }
stopifnot(is.numeric(width), length(width) == 1)
if (width <= 0) {
stop("`binwidth` must be positive", call. = FALSE)
}
if (!is.null(boundary) && !is.null(center)) {
stop("Only one of 'boundary' and 'center' may be specified.")
} else if (is.null(boundary)) {
if (is.null(center)) {
# If neither edge nor center given, compute both using tile layer's
# algorithm. This puts min and max of data in outer half of their bins.
boundary <- width / 2
} else {
# If center given but not boundary, compute boundary.
boundary <- center - width / 2
}
}
# Find the left side of left-most bin: inputs could be Dates or POSIXct, so
# coerce to numeric first.
x_range <- as.numeric(x_range)
width <- as.numeric(width)
boundary <- as.numeric(boundary)
shift <- floor((x_range[1] - boundary) / width)
origin <- boundary + shift * width
# Small correction factor so that we don't get an extra bin when, for
# example, origin = 0, max(x) = 20, width = 10.
max_x <- x_range[2] + (1 - 1e-08) * width
breaks <- seq(origin, max_x, width)
bin_breaks(breaks, closed = closed)
}
bin_breaks_bins <- function(x_range, bins = 30, center = NULL,
boundary = NULL, closed = c("right", "left")) {
stopifnot(length(x_range) == 2)
bins <- as.integer(bins)
if (bins < 1) {
stop("Need at least one bin.", call. = FALSE)
} else if (bins == 1) {
width <- diff(x_range)
boundary <- x_range[1]
} else {
width <- (x_range[2] - x_range[1]) / (bins - 1)
}
bin_breaks_width(x_range, width, boundary = boundary, center = center,
closed = closed)
}
.has_groups <- function(data) {
# If no group aesthetic is specified, all values of the group column equal to
# -1L. On the other hand, if a group aesthetic is specified, all values
# are different from -1L (since they are a result of plyr::id()). NA is
# returned for 0-row data frames.
data$group[1L] != -1L
}
|
# Install the package!
install.packages("coronavirus")
library(coronavirus)
update_dataset()
covid19_df <- refresh_coronavirus_jhu()
head(covid19_df)
data("coronavirus")
head(coronavirus)
library(tidyr)
library(dplyr)
summary_df <- coronavirus %>%
filter(type == "confirmed") %>%
group_by(country) %>%
summarise(total_cases = sum(cases)) %>%
arrange(-total_cases)
summary_df %>% head(20)
library(plotly)
coronavirus %>%
group_by(type, date) %>%
summarise(total_cases = sum(cases)) %>%
pivot_wider(names_from = type, values_from = total_cases) %>%
arrange(date) %>%
mutate(active = confirmed - death - recovered) %>%
mutate(active_total = cumsum(active),
recovered_total = cumsum(recovered),
death_total = cumsum(death)) %>%
plot_ly(x = ~ date,
y = ~ active_total,
name = 'Active',
fillcolor = '#1f77b4',
type = 'scatter',
mode = 'none',
stackgroup = 'one') %>%
add_trace(y = ~ death_total,
name = "Death",
fillcolor = '#E41317') %>%
add_trace(y = ~recovered_total,
name = 'Recovered',
fillcolor = 'forestgreen') %>%
layout(title = "Distribution of Covid19 Cases Worldwide",
legend = list(x = 0.1, y = 0.9),
yaxis = list(title = "Number of Cases"),
xaxis = list(title = "Source: Johns Hopkins University Center for Systems Science and Engineering"))
#### # Plot US data
unique(coronavirus$country)
levels(factor(coronavirus$country))
us<-coronavirus[coronavirus$country=="US"]
unique(coronavirus$country)
coronavirus %>%
group_by(type, date) %>%
summarise(total_cases = sum(cases)) %>%
pivot_wider(names_from = type, values_from = total_cases) %>%
arrange(date) %>%
mutate(active = confirmed - death - recovered) %>%
mutate(active_total = cumsum(active),
recovered_total = cumsum(recovered),
death_total = cumsum(death)) %>%
plot_ly(x = ~ date,
y = ~ active_total,
name = 'Active',
fillcolor = '#1f77b4',
type = 'scatter',
mode = 'none',
stackgroup = 'one') %>%
add_trace(y = ~ death_total,
name = "Death",
fillcolor = '#E41317') %>%
add_trace(y = ~recovered_total,
name = 'Recovered',
fillcolor = 'forestgreen') %>%
layout(title = "Distribution of Covid19 Cases in U.S.",
legend = list(x = 0.1, y = 0.9),
yaxis = list(title = "Number of Cases"),
xaxis = list(title = "Source: Johns Hopkins University Center for Systems Science and Engineering"))
|
/Rscripts/COVID19.R
|
no_license
|
kahot/MarineEcologyCourse
|
R
| false
| false
| 2,737
|
r
|
# Install the package!
install.packages("coronavirus")
library(coronavirus)
update_dataset()
covid19_df <- refresh_coronavirus_jhu()
head(covid19_df)
data("coronavirus")
head(coronavirus)
library(tidyr)
library(dplyr)
summary_df <- coronavirus %>%
filter(type == "confirmed") %>%
group_by(country) %>%
summarise(total_cases = sum(cases)) %>%
arrange(-total_cases)
summary_df %>% head(20)
library(plotly)
coronavirus %>%
group_by(type, date) %>%
summarise(total_cases = sum(cases)) %>%
pivot_wider(names_from = type, values_from = total_cases) %>%
arrange(date) %>%
mutate(active = confirmed - death - recovered) %>%
mutate(active_total = cumsum(active),
recovered_total = cumsum(recovered),
death_total = cumsum(death)) %>%
plot_ly(x = ~ date,
y = ~ active_total,
name = 'Active',
fillcolor = '#1f77b4',
type = 'scatter',
mode = 'none',
stackgroup = 'one') %>%
add_trace(y = ~ death_total,
name = "Death",
fillcolor = '#E41317') %>%
add_trace(y = ~recovered_total,
name = 'Recovered',
fillcolor = 'forestgreen') %>%
layout(title = "Distribution of Covid19 Cases Worldwide",
legend = list(x = 0.1, y = 0.9),
yaxis = list(title = "Number of Cases"),
xaxis = list(title = "Source: Johns Hopkins University Center for Systems Science and Engineering"))
#### # Plot US data
unique(coronavirus$country)
levels(factor(coronavirus$country))
us<-coronavirus[coronavirus$country=="US"]
unique(coronavirus$country)
coronavirus %>%
group_by(type, date) %>%
summarise(total_cases = sum(cases)) %>%
pivot_wider(names_from = type, values_from = total_cases) %>%
arrange(date) %>%
mutate(active = confirmed - death - recovered) %>%
mutate(active_total = cumsum(active),
recovered_total = cumsum(recovered),
death_total = cumsum(death)) %>%
plot_ly(x = ~ date,
y = ~ active_total,
name = 'Active',
fillcolor = '#1f77b4',
type = 'scatter',
mode = 'none',
stackgroup = 'one') %>%
add_trace(y = ~ death_total,
name = "Death",
fillcolor = '#E41317') %>%
add_trace(y = ~recovered_total,
name = 'Recovered',
fillcolor = 'forestgreen') %>%
layout(title = "Distribution of Covid19 Cases in U.S.",
legend = list(x = 0.1, y = 0.9),
yaxis = list(title = "Number of Cases"),
xaxis = list(title = "Source: Johns Hopkins University Center for Systems Science and Engineering"))
|
#' @description Convert edgeList to flat indices
#' @param edgeList nx2 matrix of edges
#' @param max_n Maximum node ID.
#' @noRd
ij2flat = function(edgeList, max_n, undirected = T){
if(missing(max_n)){ stop("max_n must be provided") }
if(undirected){ edgeList = sort_ij(edgeList) }
i = edgeList[,1]
j = edgeList[,2]
flat = i - 1 + (j - 1) * max_n
return(flat)
}
#' @description Convert flat indices to edgeList
#' @param flat Flat index, as returned by `ij2flat`. Starts at 0
#' @param max_n Maximum node ID
#' @noRd
flat2ij = function(flat, max_n){
if(missing(max_n)){
stop("max_n must be included")
}
i = flat %% max_n + 1
j = floor(flat / max_n) + 1
ans = cbind(i,j)
colnames(ans) = c("i", "j")
return(ans)
}
#' @description Sort edgelist s.t. i >= j
#' @noRd
sort_ij = function(edgeList){
j_isLess = edgeList[,2] < edgeList[,1]
ans = edgeList
ans[j_isLess,1] = edgeList[j_isLess, 2]
ans[j_isLess,2] = edgeList[j_isLess, 1]
colnames(ans) = c("i", "j")
return(ans)
}
#' @title Get unique edges from edgelist
#' @description Return unique undirected edges from edgeList
#' @param edgeList A nx2 matrix of edges
#' @noRd
unq_edges = function(edgeList){
max_n = max(edgeList)
sorted_el = sort_ij(edgeList)
flat = ij2flat(sorted_el, max_n, undirected = T)
unq_flat = unique(flat)
ans = flat2ij(unq_flat, max_n)
return(ans)
}
# Return only unique non-selfloop samples from flat_index
unq_nondiag_flat = function(flat, max_n){
ij = flat2ij(flat, max_n)
ij = ij[ ij[,1] != ij[,2],,drop = F ]
ij_sort = sort_ij(ij)
flat = ij2flat(ij_sort, max_n)
ans = unique(flat)
return(ans)
}
#' Check that there are no missing edges in original edge list
#' @noRd
checkMissingList = function(obs_edges, missing_edges, max_n){
flat_edges = ij2flat(obs_edges, max_n, undirected = T)
flat_missing = ij2flat(missing_edges, max_n, undirected = T)
if(any(flat_missing %in% flat_edges))
stop("Missing edges found in observed edges list!")
}
#' @description Sample edges NOT in undirected edgeList
#' @param edgeList nx2 matrix of edges
#' @param n Number of samples
#' @noRd
sample_nonEdges = function(edgeList, n = 100){
edgeList = unq_edges(edgeList)
max_n = max(edgeList)
sort_edgeList = sort_ij(edgeList)
flat_index = ij2flat(sort_edgeList, max_n)
max_flat = max_n^2 - 1
flat_sample = sample(1:max_flat, 4 * n, replace = F)
flat_sample = unq_nondiag_flat(flat_sample, max_n)
tries = 0
flat_unobs = flat_sample[!(flat_sample %in% flat_index)]
while(tries < 10 & length(flat_unobs) < n){
tries = tries + 1
flat_sample = sample(1:max_flat, 4 * n, replace = F)
flat_sample = unq_nondiag_flat(flat_sample, max_n)
flat_unobs = flat_sample[!(flat_sample %in% flat_index)]
}
if(tries == 10 & length(flat_unobs) < n){
stop("some sort of error: failed to find missing edges in 10 tries!")
}
flat_use = flat_unobs[1:n]
ans = flat2ij(flat_use, max_n)
return(ans)
}
#' @title Random split edges
#' @param edgeList nx2 matrix of edges
#' @param nEdges Number of edges to mask
#' @param nNotEdges Number of non-edges that will be masked
#' @description Randomly selects `nEdges` to mask, which
#' will be removed from the observed edgeList and returned
#' as `masked_edges` and `nNotEdges`,
#' which don't appear in original edgeList but will returned
#' in `masked_nonEdges`
#' @noRd
random_splitEdges = function(edgeList, nEdges, nNotEdges){
edgeList = unq_edges(edgeList)
nRow = nrow(edgeList)
edge_indices = sample(1:nRow, nEdges, replace = F)
edge_pairs = edgeList[edge_indices, ]
reduced_edges = edgeList[-edge_indices, ]
nonEdge_pairs = sample_nonEdges(edgeList, nNotEdges)
colnames(reduced_edges) = c("i","j")
colnames(edge_pairs) = c("i", "j")
colnames(nonEdge_pairs) = c("i","j")
ans = list(obs_edges = reduced_edges,
masked_edges = edge_pairs,
masked_nonEdges = nonEdge_pairs)
return(ans)
}
# Extract mean edges from model
meanEdges = function(mod, edgeList){
c_edgeList = as.matrix(edgeList) - 1
isLCN = is(mod, "Rcpp_LCN")
if(!isLCN){
if(!is(mod, "Rcpp_BKN"))
stop("mod must be LCN or BKN mod")
}
ans = numeric(nrow(edgeList))
for(i in seq_len(nrow(edgeList))){
e = c_edgeList[i,]
if(isLCN){ ans[i] = mod$edgeProb(e[1], e[2]) }
else{ ans[i] = mod$meanEdges(e[1],e[2]) }
}
return(ans)
}
|
/R/latChanNet/R/edgeTools.R
|
no_license
|
pistacliffcho/LatentChannelNetworks.R
|
R
| false
| false
| 4,398
|
r
|
#' @description Convert edgeList to flat indices
#' @param edgeList nx2 matrix of edges
#' @param max_n Maximum node ID.
#' @noRd
ij2flat = function(edgeList, max_n, undirected = T){
if(missing(max_n)){ stop("max_n must be provided") }
if(undirected){ edgeList = sort_ij(edgeList) }
i = edgeList[,1]
j = edgeList[,2]
flat = i - 1 + (j - 1) * max_n
return(flat)
}
#' @description Convert flat indices to edgeList
#' @param flat Flat index, as returned by `ij2flat`. Starts at 0
#' @param max_n Maximum node ID
#' @noRd
flat2ij = function(flat, max_n){
if(missing(max_n)){
stop("max_n must be included")
}
i = flat %% max_n + 1
j = floor(flat / max_n) + 1
ans = cbind(i,j)
colnames(ans) = c("i", "j")
return(ans)
}
#' @description Sort edgelist s.t. i >= j
#' @noRd
sort_ij = function(edgeList){
j_isLess = edgeList[,2] < edgeList[,1]
ans = edgeList
ans[j_isLess,1] = edgeList[j_isLess, 2]
ans[j_isLess,2] = edgeList[j_isLess, 1]
colnames(ans) = c("i", "j")
return(ans)
}
#' @title Get unique edges from edgelist
#' @description Return unique undirected edges from edgeList
#' @param edgeList A nx2 matrix of edges
#' @noRd
unq_edges = function(edgeList){
max_n = max(edgeList)
sorted_el = sort_ij(edgeList)
flat = ij2flat(sorted_el, max_n, undirected = T)
unq_flat = unique(flat)
ans = flat2ij(unq_flat, max_n)
return(ans)
}
# Return only unique non-selfloop samples from flat_index
unq_nondiag_flat = function(flat, max_n){
ij = flat2ij(flat, max_n)
ij = ij[ ij[,1] != ij[,2],,drop = F ]
ij_sort = sort_ij(ij)
flat = ij2flat(ij_sort, max_n)
ans = unique(flat)
return(ans)
}
#' Check that there are no missing edges in original edge list
#' @noRd
checkMissingList = function(obs_edges, missing_edges, max_n){
flat_edges = ij2flat(obs_edges, max_n, undirected = T)
flat_missing = ij2flat(missing_edges, max_n, undirected = T)
if(any(flat_missing %in% flat_edges))
stop("Missing edges found in observed edges list!")
}
#' @description Sample edges NOT in undirected edgeList
#' @param edgeList nx2 matrix of edges
#' @param n Number of samples
#' @noRd
sample_nonEdges = function(edgeList, n = 100){
edgeList = unq_edges(edgeList)
max_n = max(edgeList)
sort_edgeList = sort_ij(edgeList)
flat_index = ij2flat(sort_edgeList, max_n)
max_flat = max_n^2 - 1
flat_sample = sample(1:max_flat, 4 * n, replace = F)
flat_sample = unq_nondiag_flat(flat_sample, max_n)
tries = 0
flat_unobs = flat_sample[!(flat_sample %in% flat_index)]
while(tries < 10 & length(flat_unobs) < n){
tries = tries + 1
flat_sample = sample(1:max_flat, 4 * n, replace = F)
flat_sample = unq_nondiag_flat(flat_sample, max_n)
flat_unobs = flat_sample[!(flat_sample %in% flat_index)]
}
if(tries == 10 & length(flat_unobs) < n){
stop("some sort of error: failed to find missing edges in 10 tries!")
}
flat_use = flat_unobs[1:n]
ans = flat2ij(flat_use, max_n)
return(ans)
}
#' @title Random split edges
#' @param edgeList nx2 matrix of edges
#' @param nEdges Number of edges to mask
#' @param nNotEdges Number of non-edges that will be masked
#' @description Randomly selects `nEdges` to mask, which
#' will be removed from the observed edgeList and returned
#' as `masked_edges` and `nNotEdges`,
#' which don't appear in original edgeList but will returned
#' in `masked_nonEdges`
#' @noRd
random_splitEdges = function(edgeList, nEdges, nNotEdges){
edgeList = unq_edges(edgeList)
nRow = nrow(edgeList)
edge_indices = sample(1:nRow, nEdges, replace = F)
edge_pairs = edgeList[edge_indices, ]
reduced_edges = edgeList[-edge_indices, ]
nonEdge_pairs = sample_nonEdges(edgeList, nNotEdges)
colnames(reduced_edges) = c("i","j")
colnames(edge_pairs) = c("i", "j")
colnames(nonEdge_pairs) = c("i","j")
ans = list(obs_edges = reduced_edges,
masked_edges = edge_pairs,
masked_nonEdges = nonEdge_pairs)
return(ans)
}
# Extract mean edges from model
meanEdges = function(mod, edgeList){
c_edgeList = as.matrix(edgeList) - 1
isLCN = is(mod, "Rcpp_LCN")
if(!isLCN){
if(!is(mod, "Rcpp_BKN"))
stop("mod must be LCN or BKN mod")
}
ans = numeric(nrow(edgeList))
for(i in seq_len(nrow(edgeList))){
e = c_edgeList[i,]
if(isLCN){ ans[i] = mod$edgeProb(e[1], e[2]) }
else{ ans[i] = mod$meanEdges(e[1],e[2]) }
}
return(ans)
}
|
library(RunATAC)
library(magrittr)
ins_bw <- "inst/extdata/chrIV.ins.bigwig"
peaks <- read_bed("inst/extdata/chrIV_peaks.narrowPeak")
reb1 <- getMatrixByName(JASPAR2016, name=c("REB1"))
pwm <- reb1@profileMatrix
genome <- Scerevisiae
ips_pks <- "~/polo_iPSC/ATACseq/processed_data/atac_peaks/merged_replicate_peaks/atac_iPSC_combined_replicates_peaks.narrowPeak"
ips_pks <- read_bed(ips_pks)
peaks <- ips_pks[seqnames(ips_pks) %in% "chr1"]
ins_bw <- "/Volumes/Datasets/atac_iPSC_combined_replicates.ins.bigwig"
nuc_bw <- "/Volumes/Datasets/atac_iPSC_combined_replicates.occ.bigwig"
os <- getMatrixByName(JASPAR2016, name=c("Pou5f1::Sox2"))
pwm <- os@profileMatrix
genome <- Mmusculus
min.score="75%"
bias_bw <- "/Volumes/Datasets/mm10_bias.Scores.bigwig"
fp_centipede <- function(ins_bw, peaks, pwm, genome,
min_base_cov=0.1, min.score="75%"){
# Get the motif match positions
motif_pos <- motif_gr(gr = peaks, pwm = pwm, genome = genome,
min.score=min.score)
# Get the pileup data of Tn5 insertions
dat <- range_summary(bigwig = ins_bw, gr = motif_pos, range = 100)
# Filter regions with < min fraction of bases covered
covered_bases <- dat > 0
pass_cov <- rowSums(covered_bases) >= floor(min_base_cov * ncol(dat))
dat <- dat[pass_cov, ]
# Get the nucleosome scores
#nuc <- range_summary(bigwig = nuc_bw, gr = motif_pos, range = 20) %>% rowMeans()
#nuc_score <- -nuc + 1
# Get bias scores
#bias <- range_summary(bigwig = bias_bw, gr = motif_pos)
#ins_over_bias <- dat / bias
# Conservation scores
cons_track <- "http://cpebrazor.ivec.org/public/listerlab/sam/polo_mm_iPSC/resources/mm10.60way.phyloP60way.bw"
cons <- range_summary(bigwig = cons_track, gr = motif_pos, range = ceiling(ncol(pwm)/2))
cons <- rowMeans(cons)
# Setup the priors
priors <- data.frame(X=1,
PWMscore=log(score(motif_pos[pass_cov])),
Cons=cons[pass_cov]) %>%
as.matrix()
# Fit the CENTIPEDE model
centFit <- fitCentipede(Xlist = list(ATAC=as.matrix(cbind(dat, dat[ ,ncol(dat):1]))),
Y = priors)
plotProfile(centFit$LambdaParList[[1]],Mlen=1)
# Format the results
df <- data.frame(chr=seqnames(motif_pos), start=start(motif_pos), end=end(motif_pos),
PostPr=centFit$PostPr, LogRatios=centFit$LogRatios)
results <- list(centFit=df, countMat=dat, motifPos=motif_pos)
return(results)
}
fp1 <- fp_centipede(ins_bw, peaks, pwm, genome)
# iPSC footprinting for Oct/Sox
library(BSgenome.Mmusculus.UCSC.mm10)
ips_pks <- "~/polo_iPSC/ATACseq/processed_data/atac_peaks/merged_replicate_peaks/atac_iPSC_combined_replicates_peaks.narrowPeak"
ips_pks <- read_bed(ips_pks)
ips_pks <- ips_pks[seqnames(ips_pks) %in% "chr1"]
ips_ins <- "/Volumes/Datasets/atac_iPSC_combined_replicates.ins.bigwig"
os <- getMatrixByName(JASPAR2016, name=c("Pou5f1::Sox2"))
os <- os@profileMatrix
os_ips_fp <- fp_centipede(ins_bw = ips_ins, peaks = ips_pks, pwm = os,
genome = Mmusculus, min.score="75%")
ins_bw <- "/Volumes/Datasets/atac_d9_combined_replicates.ins.bigwig"
peaks <- ips_pks
pwm <- os
sox_fl <- "~/polo_iPSC/ChIPseq/processed_data/macs_peaks_replicates/d9_sox_peaks.narrowPeak"
oct_fl <- "~/polo_iPSC/ChIPseq/processed_data/macs_peaks_replicates/d9_oct_peaks.narrowPeak"
sox_peaks <- read_bed(sox_fl)
oct_peaks <- read_bed(oct_fl)
sox_peaks <- sox_peaks[overlapsAny(sox_peaks, oct_peaks)]
oct_peaks <- oct_peaks[overlapsAny(oct_peaks, sox_peaks)]
chip_peaks <- c(sox_peaks, oct_peaks) %>% reduce()
library(caret)
test_fp <- function(peaks, ins_bw, chip_peaks, pwm){
fp <- fp_centipede(ins_bw = ins_bw,
peaks = peaks, pwm = pwm,
genome = Mmusculus, min.score="75%")
# Get the motifs bound by TF
chip_hits <- overlapsAny(fp$motifPos, chip_peaks)
# FP predicted binding
fp_hits <- overlapsAny(fp$motifPos, fp$motifPos[fp$centFit$PostPr > 0.99])
comp <- data.frame(chip=chip_hits, atac=fp_hits)
# Create the matrix for PPV calculation
A <- sum(comp$atac == TRUE & comp$chip == TRUE)
B <- sum(comp$atac == TRUE & comp$chip == FALSE)
C <- sum(comp$atac == FALSE & comp$chip == TRUE)
D <- sum(comp$atac == FALSE & comp$chip == FALSE)
dat <- matrix(c(A, B, C, D), byrow = TRUE, nrow = 2)
res <- c(posP=posPredValue(dat),
negP=negPredValue(dat),
sens=(A/(A+C)),
spec=(D/(D+B)),
n_motif=length(fp$motifPos),
chip_hits=sum(chip_hits),
fp_hits=sum(fp_hits),
overlap=A)
return(res)
}
test_1 <- test_fp(peaks, ins_bw, chip_peaks, pwm)
m2 <- "http://cpebrazor.ivec.org/public/listerlab/sam/polo_mm_iPSC/atac/IPS_05673_M2_ATAC.ins.bw"
test_m2 <- test_fp(peaks, ins_bw=m2, nuc_bw, chip_peaks, pwm) # No PWM scores
bam_fl <- system.file("extdata", "chrIV.bam", package = "RunATAC")
frags <- read_atac_frags(bam_file = bam_fl)
ins <- read_atac_insertions(bam_fl)
ins
peak_fl <- system.file("extdata", "chrIV_peaks.narrowPeak", package = "RunATAC")
peaks <- read_bed(peak_fl)
peaks
library(JASPAR2016)
library(TFBSTools)
library(BSgenome.Scerevisiae.UCSC.sacCer3)
reb1 <- getMatrixByName(JASPAR2016, name=c("REB1"))
reb1 <- reb1@profileMatrix
motif_pos <- motif_gr(gr = peaks, pwm = reb1, genome = Scerevisiae)
motif_pos
ins_bw <- system.file("extdata", "chrIV.ins.bigwig", package = "RunATAC")
dat <- range_summary(bigwig = ins_bw, gr = motif_pos)
# Calculate the bias scores
ins_pwm <- calc_ins_pwm(ins_gr = ins, genome = Scerevisiae)
range=100
regions_gr <- GenomicRanges::resize(motif_pos, width = range+(range+1),
fix = 'center')
bias <- calc_ins_bias(ins_pwm = ins_pwm, regions_gr = regions_gr,
genome = Scerevisiae)
bias <- do.call(rbind, bias)
plot(colMeans(bias))
# Correct for bias
dat <- dat / bias
plot(colSums(dat))
anno_dat <- data.frame(X=1, PWMscore=score(motif_pos)) %>% as.matrix()
library(CENTIPEDE)
data(NRSFcuts, package='CENTIPEDE')
data("NRSF_Anno")
<<<<<<< HEAD
centFit <- fitCentipede(Xlist = list(DNase=as.matrix(cbind(dat,dat))),
Y = anno_dat)
=======
centFit <- fitCentipede(Xlist = list(DNase=as.matrix(cbind(dat))),
Y = anno_dat)
centFit2 <- fitCentipede(Xlist = list(DNase=as.matrix(cbind(dat, dat))),
Y = anno_dat)
table(centFit$PostPr > 0.99)
table(centFit2$PostPr > 0.99)
>>>>>>> origin/master
plotProfile(centFit$LambdaParList[[1]],Mlen=1)
plot(centFit$LambdaParList$DNase[1:201, ])
peak_fl <- "~/polo_iPSC/ATACseq/processed_data/ATAC_peaks/merged_replicate_peaks/atac_iPSC_combined_replicates_peaks.narrowPeak"
peaks <- read_bed(peak_fl)
peaks
os <- getMatrixByName(JASPAR2016, name=c("Pou5f1::Sox2"))
os <- os@profileMatrix
library(BSgenome.Mmusculus.UCSC.mm10)
motif_pos <- motif_gr(gr = peaks, pwm = os, genome = Mmusculus)
motif_pos
ins_bw <- "/Volumes/Datasets/atac_iPSC_combined_replicates.ins.bigwig"
dat <- range_summary(bigwig = ins_bw, gr = motif_pos)
# Calculate the bias scores
ins_pwm <- calc_ins_pwm(ins_gr = ins, genome = Mmusculus)
range=100
regions_gr <- GenomicRanges::resize(motif_pos, width = range+(range+1),
fix = 'center')
bias <- calc_ins_bias(ins_pwm = ins_pwm, regions_gr = regions_gr,
genome = Mmusculus)
bias <- do.call(rbind, bias)
plot(colMeans(bias))
# Correct for bias
dat <- dat / bias
plot(colSums(dat))
anno_dat <- data.frame(X=1, PWMscore=score(motif_pos)) %>% as.matrix()
library(CENTIPEDE)
data(NRSFcuts, package='CENTIPEDE')
data("NRSF_Anno")
centFit <- fitCentipede(Xlist = list(DNase=as.matrix(cbind(dat,dat))),
Y = anno_dat)
plotProfile(centFit$LambdaParList[[1]],Mlen=1)
plot(centFit$LambdaParList$DNase[1:201, ])
|
/testing/centipede_footprinting.R
|
permissive
|
SamBuckberry/RunATAC
|
R
| false
| false
| 8,412
|
r
|
library(RunATAC)
library(magrittr)
ins_bw <- "inst/extdata/chrIV.ins.bigwig"
peaks <- read_bed("inst/extdata/chrIV_peaks.narrowPeak")
reb1 <- getMatrixByName(JASPAR2016, name=c("REB1"))
pwm <- reb1@profileMatrix
genome <- Scerevisiae
ips_pks <- "~/polo_iPSC/ATACseq/processed_data/atac_peaks/merged_replicate_peaks/atac_iPSC_combined_replicates_peaks.narrowPeak"
ips_pks <- read_bed(ips_pks)
peaks <- ips_pks[seqnames(ips_pks) %in% "chr1"]
ins_bw <- "/Volumes/Datasets/atac_iPSC_combined_replicates.ins.bigwig"
nuc_bw <- "/Volumes/Datasets/atac_iPSC_combined_replicates.occ.bigwig"
os <- getMatrixByName(JASPAR2016, name=c("Pou5f1::Sox2"))
pwm <- os@profileMatrix
genome <- Mmusculus
min.score="75%"
bias_bw <- "/Volumes/Datasets/mm10_bias.Scores.bigwig"
fp_centipede <- function(ins_bw, peaks, pwm, genome,
min_base_cov=0.1, min.score="75%"){
# Get the motif match positions
motif_pos <- motif_gr(gr = peaks, pwm = pwm, genome = genome,
min.score=min.score)
# Get the pileup data of Tn5 insertions
dat <- range_summary(bigwig = ins_bw, gr = motif_pos, range = 100)
# Filter regions with < min fraction of bases covered
covered_bases <- dat > 0
pass_cov <- rowSums(covered_bases) >= floor(min_base_cov * ncol(dat))
dat <- dat[pass_cov, ]
# Get the nucleosome scores
#nuc <- range_summary(bigwig = nuc_bw, gr = motif_pos, range = 20) %>% rowMeans()
#nuc_score <- -nuc + 1
# Get bias scores
#bias <- range_summary(bigwig = bias_bw, gr = motif_pos)
#ins_over_bias <- dat / bias
# Conservation scores
cons_track <- "http://cpebrazor.ivec.org/public/listerlab/sam/polo_mm_iPSC/resources/mm10.60way.phyloP60way.bw"
cons <- range_summary(bigwig = cons_track, gr = motif_pos, range = ceiling(ncol(pwm)/2))
cons <- rowMeans(cons)
# Setup the priors
priors <- data.frame(X=1,
PWMscore=log(score(motif_pos[pass_cov])),
Cons=cons[pass_cov]) %>%
as.matrix()
# Fit the CENTIPEDE model
centFit <- fitCentipede(Xlist = list(ATAC=as.matrix(cbind(dat, dat[ ,ncol(dat):1]))),
Y = priors)
plotProfile(centFit$LambdaParList[[1]],Mlen=1)
# Format the results
df <- data.frame(chr=seqnames(motif_pos), start=start(motif_pos), end=end(motif_pos),
PostPr=centFit$PostPr, LogRatios=centFit$LogRatios)
results <- list(centFit=df, countMat=dat, motifPos=motif_pos)
return(results)
}
fp1 <- fp_centipede(ins_bw, peaks, pwm, genome)
# iPSC footprinting for Oct/Sox
library(BSgenome.Mmusculus.UCSC.mm10)
ips_pks <- "~/polo_iPSC/ATACseq/processed_data/atac_peaks/merged_replicate_peaks/atac_iPSC_combined_replicates_peaks.narrowPeak"
ips_pks <- read_bed(ips_pks)
ips_pks <- ips_pks[seqnames(ips_pks) %in% "chr1"]
ips_ins <- "/Volumes/Datasets/atac_iPSC_combined_replicates.ins.bigwig"
os <- getMatrixByName(JASPAR2016, name=c("Pou5f1::Sox2"))
os <- os@profileMatrix
os_ips_fp <- fp_centipede(ins_bw = ips_ins, peaks = ips_pks, pwm = os,
genome = Mmusculus, min.score="75%")
ins_bw <- "/Volumes/Datasets/atac_d9_combined_replicates.ins.bigwig"
peaks <- ips_pks
pwm <- os
sox_fl <- "~/polo_iPSC/ChIPseq/processed_data/macs_peaks_replicates/d9_sox_peaks.narrowPeak"
oct_fl <- "~/polo_iPSC/ChIPseq/processed_data/macs_peaks_replicates/d9_oct_peaks.narrowPeak"
sox_peaks <- read_bed(sox_fl)
oct_peaks <- read_bed(oct_fl)
sox_peaks <- sox_peaks[overlapsAny(sox_peaks, oct_peaks)]
oct_peaks <- oct_peaks[overlapsAny(oct_peaks, sox_peaks)]
chip_peaks <- c(sox_peaks, oct_peaks) %>% reduce()
library(caret)
test_fp <- function(peaks, ins_bw, chip_peaks, pwm){
fp <- fp_centipede(ins_bw = ins_bw,
peaks = peaks, pwm = pwm,
genome = Mmusculus, min.score="75%")
# Get the motifs bound by TF
chip_hits <- overlapsAny(fp$motifPos, chip_peaks)
# FP predicted binding
fp_hits <- overlapsAny(fp$motifPos, fp$motifPos[fp$centFit$PostPr > 0.99])
comp <- data.frame(chip=chip_hits, atac=fp_hits)
# Create the matrix for PPV calculation
A <- sum(comp$atac == TRUE & comp$chip == TRUE)
B <- sum(comp$atac == TRUE & comp$chip == FALSE)
C <- sum(comp$atac == FALSE & comp$chip == TRUE)
D <- sum(comp$atac == FALSE & comp$chip == FALSE)
dat <- matrix(c(A, B, C, D), byrow = TRUE, nrow = 2)
res <- c(posP=posPredValue(dat),
negP=negPredValue(dat),
sens=(A/(A+C)),
spec=(D/(D+B)),
n_motif=length(fp$motifPos),
chip_hits=sum(chip_hits),
fp_hits=sum(fp_hits),
overlap=A)
return(res)
}
test_1 <- test_fp(peaks, ins_bw, chip_peaks, pwm)
m2 <- "http://cpebrazor.ivec.org/public/listerlab/sam/polo_mm_iPSC/atac/IPS_05673_M2_ATAC.ins.bw"
test_m2 <- test_fp(peaks, ins_bw=m2, nuc_bw, chip_peaks, pwm) # No PWM scores
bam_fl <- system.file("extdata", "chrIV.bam", package = "RunATAC")
frags <- read_atac_frags(bam_file = bam_fl)
ins <- read_atac_insertions(bam_fl)
ins
peak_fl <- system.file("extdata", "chrIV_peaks.narrowPeak", package = "RunATAC")
peaks <- read_bed(peak_fl)
peaks
library(JASPAR2016)
library(TFBSTools)
library(BSgenome.Scerevisiae.UCSC.sacCer3)
reb1 <- getMatrixByName(JASPAR2016, name=c("REB1"))
reb1 <- reb1@profileMatrix
motif_pos <- motif_gr(gr = peaks, pwm = reb1, genome = Scerevisiae)
motif_pos
ins_bw <- system.file("extdata", "chrIV.ins.bigwig", package = "RunATAC")
dat <- range_summary(bigwig = ins_bw, gr = motif_pos)
# Calculate the bias scores
ins_pwm <- calc_ins_pwm(ins_gr = ins, genome = Scerevisiae)
range=100
regions_gr <- GenomicRanges::resize(motif_pos, width = range+(range+1),
fix = 'center')
bias <- calc_ins_bias(ins_pwm = ins_pwm, regions_gr = regions_gr,
genome = Scerevisiae)
bias <- do.call(rbind, bias)
plot(colMeans(bias))
# Correct for bias
dat <- dat / bias
plot(colSums(dat))
anno_dat <- data.frame(X=1, PWMscore=score(motif_pos)) %>% as.matrix()
library(CENTIPEDE)
data(NRSFcuts, package='CENTIPEDE')
data("NRSF_Anno")
<<<<<<< HEAD
centFit <- fitCentipede(Xlist = list(DNase=as.matrix(cbind(dat,dat))),
Y = anno_dat)
=======
centFit <- fitCentipede(Xlist = list(DNase=as.matrix(cbind(dat))),
Y = anno_dat)
centFit2 <- fitCentipede(Xlist = list(DNase=as.matrix(cbind(dat, dat))),
Y = anno_dat)
table(centFit$PostPr > 0.99)
table(centFit2$PostPr > 0.99)
>>>>>>> origin/master
plotProfile(centFit$LambdaParList[[1]],Mlen=1)
plot(centFit$LambdaParList$DNase[1:201, ])
peak_fl <- "~/polo_iPSC/ATACseq/processed_data/ATAC_peaks/merged_replicate_peaks/atac_iPSC_combined_replicates_peaks.narrowPeak"
peaks <- read_bed(peak_fl)
peaks
os <- getMatrixByName(JASPAR2016, name=c("Pou5f1::Sox2"))
os <- os@profileMatrix
library(BSgenome.Mmusculus.UCSC.mm10)
motif_pos <- motif_gr(gr = peaks, pwm = os, genome = Mmusculus)
motif_pos
ins_bw <- "/Volumes/Datasets/atac_iPSC_combined_replicates.ins.bigwig"
dat <- range_summary(bigwig = ins_bw, gr = motif_pos)
# Calculate the bias scores
ins_pwm <- calc_ins_pwm(ins_gr = ins, genome = Mmusculus)
range=100
regions_gr <- GenomicRanges::resize(motif_pos, width = range+(range+1),
fix = 'center')
bias <- calc_ins_bias(ins_pwm = ins_pwm, regions_gr = regions_gr,
genome = Mmusculus)
bias <- do.call(rbind, bias)
plot(colMeans(bias))
# Correct for bias
dat <- dat / bias
plot(colSums(dat))
anno_dat <- data.frame(X=1, PWMscore=score(motif_pos)) %>% as.matrix()
library(CENTIPEDE)
data(NRSFcuts, package='CENTIPEDE')
data("NRSF_Anno")
centFit <- fitCentipede(Xlist = list(DNase=as.matrix(cbind(dat,dat))),
Y = anno_dat)
plotProfile(centFit$LambdaParList[[1]],Mlen=1)
plot(centFit$LambdaParList$DNase[1:201, ])
|
## R-13μΌμ°¨(2018.1.12)
#[λ¬Έμ 155] exam.csv fileμλ νμλ€μ μνμ μκ° μμ΅λλ€. νμλ€μ SQL μ μλ₯Ό λ§λκ·Έλνλ‘ μΆλ ₯ν΄μ£ΌμΈμ.
exam
barplot(exam$grade[exam$subject == "SQL"],
names.arg = exam$name[exam$subject == "SQL"],
main = "SQL μ μ",
ylim = c(0, 100),
cex.names = 0.7,
col = topo.colors(NROW(exam$name[exam$subject == "SQL"])), density = 40)
abline(h = seq(0,100,10), lty = 3, col = "red")
box()
#μ μλ νμ΄
graphics.off()
par(mfrow=c(1,1)) # νμ΄
par(mfrow=c(2,2))
barplot(exam[exam$subject=='SQL','grade'], ylim=c(0,100),
names.arg=exam[exam$subject=='SQL','name'],
main='SQL μ μ',
las=2,
col=rainbow(length(exam[exam$subject=='SQL','name'])))
box() # box λνλ΄λ €λ©΄
#[λ¬Έμ 156] exam.csv fileμλ νμλ€μ μνμ μκ° μμ΅λλ€. νμλ€μ R μ μλ₯Ό λ§λκ·Έλνλ‘ μΆλ ₯ν΄μ£ΌμΈμ.
barplot(exam$grade[exam$subject == "R"],
names.arg = exam$name[exam$subject == "R"],
ylim = c(0, 100),
cex.names = 0.7,
col = topo.colors(nrow(ex_158)), density = 40)
#μ μλ νμ΄
barplot(exam[exam$subject=='R','grade'], ylim=c(0,100),
names.arg=exam[exam$subject=='R','name'],
main='R μ μ',
las=2,
col=rainbow(length(exam[exam$subject=='R','name'])))
box()
#[λ¬Έμ 157] exam.csv fileμλ νμλ€μ μνμ μκ° μμ΅λλ€. νμλ€μ PYTHON μ μλ₯Ό λ§λκ·Έλνλ‘ μΆλ ₯ν΄μ£ΌμΈμ.
barplot(exam$grade[exam$subject == "PYTHON"],
names.arg = exam$name[exam$subject == "PYTHON"],
ylim = c(0, 100),
cex.names = 0.7,
col = topo.colors(nrow(ex_158)), density = 40)
#μ μλ νμ΄
barplot(exam[exam$subject=='PYTHON','grade'], ylim=c(0,100),
names.arg=exam[exam$subject=='PYTHON','name'],
main='PYTHON μ μ',
las=2,
col=rainbow(length(exam[exam$subject=='PYTHON','name'])))
box()
#[λ¬Έμ 158] exam.csv fileμλ νμλ€μ μνμ μκ° μμ΅λλ€. νμλ€μ κ³Όλͺ© μ΄ μ μλ₯Ό λ§λκ·Έλνλ‘ μΆλ ₯νμΈμ.
#κ·Έλ£Ήν μ§κ³κ° ꡬνλ λ°©λ²
aggregate(grade ~ name, exam, sum)
library(plyr)
ex_158 <- ddply(exam, 'name', summarise, sum_grade = sum(grade))
library(sqldf)
sqldf("select name, sum(grade) sum_grade
from exam
group by name")
library(dplyr)
exam%>%
group_by(name)%>%
summarise_at('grade', sum)
barplot(ex_158$sum_grade,
names.arg = ex_158$name,
main = "μ΄μ μ",
ylim = c(0,300),
cex.name = 0.7,
col = topo.colors(nrow(ex_158)), density = 40)
#μ μλ νμ΄
exam_t <- aggregate(grade ~ name, exam, sum)
barplot(exam_t$grade,
ylim=c(0,300),
names.arg=exam_t$name,
main='κ³Όλͺ© μ΄ μ μ',
las=2,
col=rainbow(length(exam_t$name)))
graphics.off()
par(mfrow=c(2,2))
barplot(exam[exam$subject=='SQL','grade'], ylim=c(0,100),
names.arg=exam[exam$subject=='SQL','name'],
main='SQL μ μ',
las=2,
col=rainbow(length(exam[exam$subject=='SQL','name'])))
box()
barplot(exam[exam$subject=='R','grade'], ylim=c(0,100),
names.arg=exam[exam$subject=='R','name'],
main='R μ μ',
las=2,
col=rainbow(length(exam[exam$subject=='R','name'])))
box()
barplot(exam[exam$subject=='PYTHON','grade'], ylim=c(0,100),
names.arg=exam[exam$subject=='PYTHON','name'],
main='PYTHON μ μ',
las=2,
col=rainbow(length(exam[exam$subject=='PYTHON','name'])))
box()
exam_t <- aggregate(grade ~ name, exam, sum)
barplot(exam_t$grade,
ylim=c(0,300),
names.arg=exam_t$name,
main='κ³Όλͺ© μ΄ μ μ',
las=2,
col=rainbow(length(exam_t$name)))
box()
#[λ¬Έμ 159] νμλ€μ μ΄λ¦μ κΈ°μ€μΌλ‘ κ³Όλͺ©μ μλ₯Ό μ€νν λ§λκ·Έλνλ‘ μμ±νμΈμ.
#sol.1
x1 <- exam[exam$subject == "SQL", c("name","grade")]
x2 <- exam[exam$subject == "R", c("name","grade")]
x3 <- exam[exam$subject == "PYTHON", c("name","grade")]
names(x1)[2] <- "SQL"
names(x2)[2] <- "R"
names(x3)[2] <- "PYTHON"
x1;x2;x3
ex_159 <- merge(merge(x1, x2), x3)
str(ex_159)
ex_159
barplot(t(as.matrix(ex_159[,2:4])),
names.arg = ex_159[,1],
beside = FALSE,
main = "μ±μ ν",
ylim = c(0,300),
cex.names = 0.7,
col = topo.colors(3), density = 40,
legend.text = names(ex_159)[-1],
args.legend = list(cex = 0.5))
#sol.2
t_159 <- t(tapply(exam$grade, list(exam$name, exam$subject), sum))
t_159
barplot(t_159,
cex.names = 0.7, cex.axis = 0.7,
legend.text = rownames(t_159), args.legend = list(cex = 0.5))
#μ μλ νμ΄
t <- tapply(exam$grade, list(exam$subject, exam$name), sum)
bp <- barplot(t, names.arg=names(t), ylim=c(0,350),
xlab='μ΄λ¦', ylab='μ±μ ',
col=c('blue','green','purple'),
main='κ³Όλͺ©λ³ μ μ',
las=2)
legend('topright',
legend=rownames(t),
title='κ³Όλͺ©',
pch=15,
col=c('blue','green','purple'),
cex=0.9, pt.cex=1)
#[λ¬Έμ 160] νμλ€μ μ΄λ¦μ κΈ°μ€μΌλ‘ κ³Όλͺ©μ μλ₯Ό κ·Έλ£Ήν λ§λκ·Έλνλ‘ μμ±νμΈμ.
barplot(t(as.matrix(ex_159[,2:4])),
names.arg = ex_159[,1],
beside = TRUE,
main = "μ±μ ν",
ylim = c(0,100),
cex.names = 0.7,
col = topo.colors(3), density = 40,
legend.text = names(ex_159)[-1],
args.legend = list(cex = 0.5))
barplot(t_159,
beside = TRUE,
ylim = c(0,100),
cex.names = 0.7, cex.axis = 0.7,
legend.text = rownames(t_159), args.legend = list(cex = 0.5))
abline(h=seq(0,100,20), col='black',lty=3)
box()
#μ μλ νμ΄
bp <- barplot(t, names.arg=names(t), beside=TRUE, ylim=c(0,110),
xlab='μ΄λ¦', ylab='μ±μ ',
col=c('blue','green','purple'),
main='κ³Όλͺ©λ³ μ μ',
las=2)
legend('topright',
legend=rownames(t),
title='κ³Όλͺ©',
pch=15,
col=c('blue','green','purple'),
cex=0.8, pt.cex=0.6)
plot(cars, main = "Stopping Distance versus Speed")
lines(stats::lowess(cars))
#[λ¬Έμ 161] μ°½μ
건μ.csv νμΌμ λ°μ΄ν° μ€μ λ
λλ³ μΉν¨μ§ μ°½μ
건μλ₯Ό λ§λκ·Έλνλ‘ μμ±νμΈμ.
opn <- read.csv("C:/R/μ°½μ
건μ.csv", header = T, stringsAsFactors = F) cls <- read.csv("C:/R/νμ
건μ.csv", header = T, stringsAsFactors = F)
class(opn$X)
t <- tapply(opn$μΉν¨, opn$X, max)
barplot(t, main = "μΉν¨μ§ μ°½μ
건μ", ylim = c(0,1500)) ; box()
abline(h = seq(0,1400,200), lty = 3)
#μ μλ νμ΄
create_cnt <- read.csv("c:/r/μ°½μ
건μ.csv",header=T) drop_cnt <- read.csv("c:/r/νμ
건μ.csv",header=T)
create_cnt drop_cnt
barplot(create_cnt$μΉν¨,main="λ
λλ³ μΉν¨μ§ μ°½μ
건μ", names.arg=create_cnt$X,col=('blue'), ylim=c(0,1300) )
class(opn$X)
t <- tapply(opn$μΉν¨, opn$X, max)
barplot(opn$μΉν¨μ§, names.arg = opn$X, col = "skyblue",
main = "μΉν¨μ§ μ°½μ
건μ",
ylim = c(0,1400), cex.axis = 0.9, las = 1)
abline(h = seq(0,1400, 200), lty = 3, col = "red")
box(col = "orange")
#[λ¬Έμ 162] λ
λλ³ μΉν¨μ§ μ°½μ
, νμ
건μλ₯Ό κ·Έλ£Ήν λ§λκ·Έλνλ‘ μμ±νμΈμ.
ex_162 <- rbind(opn$μΉν¨μ§, cls$μΉν¨μ§)
ex_162
bp_162 <- barplot(ex_162, names.arg = opn$X, beside = T,
ylim = c(0, 4000),
col = c("orange", "green"), density = 50,
legend.text = c("μ°½μ
","νμ
"),
las = 1)
text(x = bp_162[ex_162 == max(ex_162)],
y = max(ex_162), labels = "AIλ°λ³", pos = 3, col = "red")
#μ μλ νμ΄
graphics.off()
x <- rbind(create_cnt$μΉν¨,drop_cnt$μΉν¨) x
barplot(x, main="λ
λλ³ μΉν¨μ§ μ°½μ
,νμ
", names.arg=create_cnt$X,col=c("blue","red"), ylim=c(0,4000), beside=T)
barplot(x, main="λ
λλ³ μΉν¨μ§ μ°½μ
,νμ
", names.arg=create_cnt$X,col=c("blue","red"), ylim=c(0,4000), beside=T, legend=c("μ°½μ
","νμ
") )
#[λ¬Έμ 163] 2014 λ
λ μ
μ’
λ³ μ°½μ
λΉμ¨μ μν κ·Έλνλ‘ μμ±νμΈμ.
opn_2014 <- opn[opn$X==2014, -1]
opn_2014
per <- round(opn_2014 * 100 /sum(opn_2014))
pie(as.numeric(opn_2014), labels = paste(names(opn_2014), per, '%'),
main = '2014λ
μ
μ’
λ³ μ°½μ
λΉμ¨', font.main = 11,
init.angle = 110, col = rainbow(ncol(opn_2014)))
#λ²μΈ μ°κ΅¬ : μλ£ μΆμΆ
subset(opn, X == 2014, select = -1)
filter(opn, X == 2014)[,-1]
#[λ¬Έμ 164] λ
λλ₯Ό μ
λ ₯νλ©΄ ν΄λΉ λ
λμ μν κ·Έλν μμ±ν μ μλ ν¨μλ₯Ό μμ±νμΈμ.
show_pie(2006)
show_pie <- function(y){
res <- filter(opn, X == y)[,-1]
per <- round(res*100/sum(res))
pie(as.numeric(res), labels = paste(names(res),per,'%'),
init.angle = 110, col = rainbow(ncol(res)),
main = paste(y,'λ
μ
μ’
λ³ μ°½μ
λΉμ¨'))
}
show_pie(2005)
show_pie(2006)
show_pie(2007)
show_pie(2008)
show_pie(2009)
show_pie(2010)
show_pie(2011)
show_pie(2012)
show_pie(2013)
show_pie(2014)
#μ κ°μ μ
show_pies <- function(x,y){
tmp <- y - x + 1
if(tmp%%2 == 0){
par(mfrow = c(2, tmp/2))
} else{
par(mfrow = c(2,(tmp+1)/2))
}
for(i in x:y){
res <- filter(op1, X == i)[,-1]
per <- round(res*100/sum(res))
pie(as.numeric(res), labels = paste(names(res),per,'%'),
init.angle = 110, col = rainbow(ncol(res)),
main = paste(i,'λ
μ
μ’
λ³ μ°½μ
λΉμ¨'))
box()
}
}
show_pies(2005, 2014)
round(5/2,0)
5%/%2
par(mfrow=c(1,3))
pie(as.numeric(opn_2014), labels = paste(names(opn_2014), per, '%'),
main = '2014λ
μ
μ’
λ³ μ°½μ
λΉμ¨', font.main = 11,
init.angle = 110, col = rainbow(ncol(opn_2014)))
## 13-1. κ·Έλν μ¬μ§νμΌλ‘ μ μ₯νκΈ°
library(jpeg)
jpeg("c:/r/1.jpg")
pie(as.numeric(opn_2014), labels = paste(names(opn_2014), per, '%'),
main = '2014λ
μ
μ’
λ³ μ°½μ
λΉμ¨', font.main = 11,
init.angle = 110, col = rainbow(ncol(opn_2014)))
dev.off()
jpeg("c:/r/2.jpg")
pie(as.numeric(opn_2014), labels = paste(names(opn_2014), per, '%'),
main = '2014λ
μ
μ’
λ³ μ°½μ
λΉμ¨', font.main = 11,
init.angle = 110, col = rainbow(ncol(opn_2014)))
dev.off()
## 13-2. μ°μ λ(scatter plot)
# - μ£Όμ΄μ§ λ°μ΄ν°λ₯Ό μ μΌλ‘ νμν΄μ ν©λΏλ¦¬ λ―, μκ°νν κ·Έλν
# - λ°μ΄ν°μ μ€μ κ°λ€μ΄ νμλλ―λ‘ λ°μ΄ν°μ λΆν¬λ₯Ό ν λμ νμ
ν μ μλ€.
# * db file scatter read ~ full table scan
# x - y plotting
# μ°ν¬λ(μκ΄λ)
# - μλ£μμ 2κ° νλͺ© κ°μ κ΄κ³λ₯Ό μλλ°λ μ°ν¬λκ° νΈλ¦¬νλ€.
# - μλ£μ λΆμ° μν©μ λνλ΄λ μμ κ°μΌλ‘ λ³λκ³Ό λΆν¬κ° μ£Όμ΄μ‘μ λ,
# λ³λμ΄ λΆν¬μ μ€μ¬κ°μ ν©μ΄μ§ μ λ
# * νκ·λΆμμ΄ μ¬κΈ°μ λΆν° μμλλ€
library(help = datasets) # R λ΄μ₯λ dataset list check
help(women)
Average Heights and Weights for American Women
"""
Description
This data set gives the average heights and weights for American women aged 30???39.
Format
A data frame with 15 observations on 2 variables.
[,1] height numeric Height (in)
[,2] weight numeric Weight (lbs) Details
"""
women
str(women)
plot(women$weight)
plot(women$height)
"""
1) type = p(μ ), l(μ ), b(μ ,μ ), c(bμ μ ), o(μ μμ μ ), h(μμ§μ ), s(κ³λ¨ν), S, n(λνλμ§ μμ)
2) lty : μ μ μ ν 0 ~ 6
- 0 : κ·Έλ¦¬μ§ μμ
- 1 : μ€μ (κΈ°λ³Έκ°)
- 2 : λμ(-)
- 3 : μ
- 4 : μ κ³Ό λμ
- 5 : κΈ΄ λμ
- 6 : λκ°μ λμ
3) lwd : μ μ κ΅΅κΈ°(κΈ°λ³Έκ° 1)
4) pch : μ μ μ’
λ₯(0 ~ 25)
- 0 ~ 18 : SμΈμ΄κ° μ¬μ©νλ νΉμλ¬Έμ
- 19 ~ 25 : RμΈμ΄κ° νμ₯ν νΉμλ¬Έμ
5) cex : μ μ ν¬κΈ°(κΈ°λ³Έκ° 1)
"""
plot(x = women$height, y = women$weight,
xlab = "ν€", ylab = "λͺΈλ¬΄κ²",
main = "μ¬μ±μ ν€μ λͺΈλ¬΄κ²",
sub = "λ―Έκ΅ 70λ
λ κΈ°μ€",
type = "c", lty = 3, lwd = 2, pch = 23)
## 13-3. Orange data
help("Orange")
a1 <- Orange[Orange$Tree == 1, 2:3]
a1
a2 <- Orange[Orange$Tree == 2, 2:3]
a2
a3 <- Orange[Orange$Tree == 3, 2:3]
a3
a4 <- Orange[Orange$Tree == 4, 2:3]
a4
a5 <- Orange[Orange$Tree == 5, 2:3]
a5
plot(a1$age, a1$circumference, type = 'o', pch = 1, axes = FALSE,
xlim = c(110,1600), ylim = c(30,210), col = "red", xlab = "age", ylab = "circumference",
lwd = 1)
lines(a2$age, a2$circumference, type = 'o', pch = 2, col = "blue")
lines(a3$age, a3$circumference, type = 'o', pch = 3, col = "black")
lines(a4$age, a4$circumference, type = 'o', pch = 4, col = "darkgreen")
lines(a5$age, a5$circumference, type = 'o', pch = 5, col = "orange")
axis(side = 2)
axis(side = 1)
box()
legend('topleft', legend = c('Tree1','Tree2','Tree3','Tree4','Tree5'),
col = c("red", "blue","black","darkgreen","orange"), pch = c(1:5), lty = 1)
w <- data.frame(sal = emp$SALARY * 12,
w_day = as.numeric(as.Date(Sys.Date()) - as.Date(emp$HIRE_DATE, format='%Y-%m-%d')))
plot(x = w$w_day, # xμ’ν
y = w$sal, # yμ’ν
xlab = "근무μΌμ", # xμΆ μ΄λ¦
ylab = "μ°λ΄", # yμΆ μ΄λ¦
main = "근무μΌμμ λ°λ₯Έ μ°λ΄ κ΄κ³",
sub = "empν
μ΄λΈ κΈ°μ€",
type = "p",
lty = 4,
lwd = 2,
pch = 20)
scatter.smooth(w$w_day, w$sal,
xlab="근무μΌμ", ylab="μ°λ΄", main="근무μΌμμ λ°λ₯Έ μ°λ΄ κ΄κ³",
span=2/3, degree=1,
lpars = list(col='orange',lwd=2,lty=2), pch=20,
col=ifelse(w$sal==max(w$sal),"red","blue"))
|
/R_Code/[2018.01.12]R-13μΌμ°¨.R
|
no_license
|
Trigger21/R
|
R
| false
| false
| 13,677
|
r
|
## R-13μΌμ°¨(2018.1.12)
#[λ¬Έμ 155] exam.csv fileμλ νμλ€μ μνμ μκ° μμ΅λλ€. νμλ€μ SQL μ μλ₯Ό λ§λκ·Έλνλ‘ μΆλ ₯ν΄μ£ΌμΈμ.
exam
barplot(exam$grade[exam$subject == "SQL"],
names.arg = exam$name[exam$subject == "SQL"],
main = "SQL μ μ",
ylim = c(0, 100),
cex.names = 0.7,
col = topo.colors(NROW(exam$name[exam$subject == "SQL"])), density = 40)
abline(h = seq(0,100,10), lty = 3, col = "red")
box()
#μ μλ νμ΄
graphics.off()
par(mfrow=c(1,1)) # νμ΄
par(mfrow=c(2,2))
barplot(exam[exam$subject=='SQL','grade'], ylim=c(0,100),
names.arg=exam[exam$subject=='SQL','name'],
main='SQL μ μ',
las=2,
col=rainbow(length(exam[exam$subject=='SQL','name'])))
box() # box λνλ΄λ €λ©΄
#[λ¬Έμ 156] exam.csv fileμλ νμλ€μ μνμ μκ° μμ΅λλ€. νμλ€μ R μ μλ₯Ό λ§λκ·Έλνλ‘ μΆλ ₯ν΄μ£ΌμΈμ.
barplot(exam$grade[exam$subject == "R"],
names.arg = exam$name[exam$subject == "R"],
ylim = c(0, 100),
cex.names = 0.7,
col = topo.colors(nrow(ex_158)), density = 40)
#μ μλ νμ΄
barplot(exam[exam$subject=='R','grade'], ylim=c(0,100),
names.arg=exam[exam$subject=='R','name'],
main='R μ μ',
las=2,
col=rainbow(length(exam[exam$subject=='R','name'])))
box()
#[λ¬Έμ 157] exam.csv fileμλ νμλ€μ μνμ μκ° μμ΅λλ€. νμλ€μ PYTHON μ μλ₯Ό λ§λκ·Έλνλ‘ μΆλ ₯ν΄μ£ΌμΈμ.
barplot(exam$grade[exam$subject == "PYTHON"],
names.arg = exam$name[exam$subject == "PYTHON"],
ylim = c(0, 100),
cex.names = 0.7,
col = topo.colors(nrow(ex_158)), density = 40)
#μ μλ νμ΄
barplot(exam[exam$subject=='PYTHON','grade'], ylim=c(0,100),
names.arg=exam[exam$subject=='PYTHON','name'],
main='PYTHON μ μ',
las=2,
col=rainbow(length(exam[exam$subject=='PYTHON','name'])))
box()
#[λ¬Έμ 158] exam.csv fileμλ νμλ€μ μνμ μκ° μμ΅λλ€. νμλ€μ κ³Όλͺ© μ΄ μ μλ₯Ό λ§λκ·Έλνλ‘ μΆλ ₯νμΈμ.
#κ·Έλ£Ήν μ§κ³κ° ꡬνλ λ°©λ²
aggregate(grade ~ name, exam, sum)
library(plyr)
ex_158 <- ddply(exam, 'name', summarise, sum_grade = sum(grade))
library(sqldf)
sqldf("select name, sum(grade) sum_grade
from exam
group by name")
library(dplyr)
exam%>%
group_by(name)%>%
summarise_at('grade', sum)
barplot(ex_158$sum_grade,
names.arg = ex_158$name,
main = "μ΄μ μ",
ylim = c(0,300),
cex.name = 0.7,
col = topo.colors(nrow(ex_158)), density = 40)
#μ μλ νμ΄
exam_t <- aggregate(grade ~ name, exam, sum)
barplot(exam_t$grade,
ylim=c(0,300),
names.arg=exam_t$name,
main='κ³Όλͺ© μ΄ μ μ',
las=2,
col=rainbow(length(exam_t$name)))
graphics.off()
par(mfrow=c(2,2))
barplot(exam[exam$subject=='SQL','grade'], ylim=c(0,100),
names.arg=exam[exam$subject=='SQL','name'],
main='SQL μ μ',
las=2,
col=rainbow(length(exam[exam$subject=='SQL','name'])))
box()
barplot(exam[exam$subject=='R','grade'], ylim=c(0,100),
names.arg=exam[exam$subject=='R','name'],
main='R μ μ',
las=2,
col=rainbow(length(exam[exam$subject=='R','name'])))
box()
barplot(exam[exam$subject=='PYTHON','grade'], ylim=c(0,100),
names.arg=exam[exam$subject=='PYTHON','name'],
main='PYTHON μ μ',
las=2,
col=rainbow(length(exam[exam$subject=='PYTHON','name'])))
box()
exam_t <- aggregate(grade ~ name, exam, sum)
barplot(exam_t$grade,
ylim=c(0,300),
names.arg=exam_t$name,
main='κ³Όλͺ© μ΄ μ μ',
las=2,
col=rainbow(length(exam_t$name)))
box()
#[λ¬Έμ 159] νμλ€μ μ΄λ¦μ κΈ°μ€μΌλ‘ κ³Όλͺ©μ μλ₯Ό μ€νν λ§λκ·Έλνλ‘ μμ±νμΈμ.
#sol.1
x1 <- exam[exam$subject == "SQL", c("name","grade")]
x2 <- exam[exam$subject == "R", c("name","grade")]
x3 <- exam[exam$subject == "PYTHON", c("name","grade")]
names(x1)[2] <- "SQL"
names(x2)[2] <- "R"
names(x3)[2] <- "PYTHON"
x1;x2;x3
ex_159 <- merge(merge(x1, x2), x3)
str(ex_159)
ex_159
barplot(t(as.matrix(ex_159[,2:4])),
names.arg = ex_159[,1],
beside = FALSE,
main = "μ±μ ν",
ylim = c(0,300),
cex.names = 0.7,
col = topo.colors(3), density = 40,
legend.text = names(ex_159)[-1],
args.legend = list(cex = 0.5))
#sol.2
t_159 <- t(tapply(exam$grade, list(exam$name, exam$subject), sum))
t_159
barplot(t_159,
cex.names = 0.7, cex.axis = 0.7,
legend.text = rownames(t_159), args.legend = list(cex = 0.5))
#μ μλ νμ΄
t <- tapply(exam$grade, list(exam$subject, exam$name), sum)
bp <- barplot(t, names.arg=names(t), ylim=c(0,350),
xlab='μ΄λ¦', ylab='μ±μ ',
col=c('blue','green','purple'),
main='κ³Όλͺ©λ³ μ μ',
las=2)
legend('topright',
legend=rownames(t),
title='κ³Όλͺ©',
pch=15,
col=c('blue','green','purple'),
cex=0.9, pt.cex=1)
#[λ¬Έμ 160] νμλ€μ μ΄λ¦μ κΈ°μ€μΌλ‘ κ³Όλͺ©μ μλ₯Ό κ·Έλ£Ήν λ§λκ·Έλνλ‘ μμ±νμΈμ.
barplot(t(as.matrix(ex_159[,2:4])),
names.arg = ex_159[,1],
beside = TRUE,
main = "μ±μ ν",
ylim = c(0,100),
cex.names = 0.7,
col = topo.colors(3), density = 40,
legend.text = names(ex_159)[-1],
args.legend = list(cex = 0.5))
barplot(t_159,
beside = TRUE,
ylim = c(0,100),
cex.names = 0.7, cex.axis = 0.7,
legend.text = rownames(t_159), args.legend = list(cex = 0.5))
abline(h=seq(0,100,20), col='black',lty=3)
box()
#μ μλ νμ΄
bp <- barplot(t, names.arg=names(t), beside=TRUE, ylim=c(0,110),
xlab='μ΄λ¦', ylab='μ±μ ',
col=c('blue','green','purple'),
main='κ³Όλͺ©λ³ μ μ',
las=2)
legend('topright',
legend=rownames(t),
title='κ³Όλͺ©',
pch=15,
col=c('blue','green','purple'),
cex=0.8, pt.cex=0.6)
plot(cars, main = "Stopping Distance versus Speed")
lines(stats::lowess(cars))
#[λ¬Έμ 161] μ°½μ
건μ.csv νμΌμ λ°μ΄ν° μ€μ λ
λλ³ μΉν¨μ§ μ°½μ
건μλ₯Ό λ§λκ·Έλνλ‘ μμ±νμΈμ.
opn <- read.csv("C:/R/μ°½μ
건μ.csv", header = T, stringsAsFactors = F) cls <- read.csv("C:/R/νμ
건μ.csv", header = T, stringsAsFactors = F)
class(opn$X)
t <- tapply(opn$μΉν¨, opn$X, max)
barplot(t, main = "μΉν¨μ§ μ°½μ
건μ", ylim = c(0,1500)) ; box()
abline(h = seq(0,1400,200), lty = 3)
#μ μλ νμ΄
create_cnt <- read.csv("c:/r/μ°½μ
건μ.csv",header=T) drop_cnt <- read.csv("c:/r/νμ
건μ.csv",header=T)
create_cnt drop_cnt
barplot(create_cnt$μΉν¨,main="λ
λλ³ μΉν¨μ§ μ°½μ
건μ", names.arg=create_cnt$X,col=('blue'), ylim=c(0,1300) )
class(opn$X)
t <- tapply(opn$μΉν¨, opn$X, max)
barplot(opn$μΉν¨μ§, names.arg = opn$X, col = "skyblue",
main = "μΉν¨μ§ μ°½μ
건μ",
ylim = c(0,1400), cex.axis = 0.9, las = 1)
abline(h = seq(0,1400, 200), lty = 3, col = "red")
box(col = "orange")
#[λ¬Έμ 162] λ
λλ³ μΉν¨μ§ μ°½μ
, νμ
건μλ₯Ό κ·Έλ£Ήν λ§λκ·Έλνλ‘ μμ±νμΈμ.
ex_162 <- rbind(opn$μΉν¨μ§, cls$μΉν¨μ§)
ex_162
bp_162 <- barplot(ex_162, names.arg = opn$X, beside = T,
ylim = c(0, 4000),
col = c("orange", "green"), density = 50,
legend.text = c("μ°½μ
","νμ
"),
las = 1)
text(x = bp_162[ex_162 == max(ex_162)],
y = max(ex_162), labels = "AIλ°λ³", pos = 3, col = "red")
#μ μλ νμ΄
graphics.off()
x <- rbind(create_cnt$μΉν¨,drop_cnt$μΉν¨) x
barplot(x, main="λ
λλ³ μΉν¨μ§ μ°½μ
,νμ
", names.arg=create_cnt$X,col=c("blue","red"), ylim=c(0,4000), beside=T)
barplot(x, main="λ
λλ³ μΉν¨μ§ μ°½μ
,νμ
", names.arg=create_cnt$X,col=c("blue","red"), ylim=c(0,4000), beside=T, legend=c("μ°½μ
","νμ
") )
#[λ¬Έμ 163] 2014 λ
λ μ
μ’
λ³ μ°½μ
λΉμ¨μ μν κ·Έλνλ‘ μμ±νμΈμ.
opn_2014 <- opn[opn$X==2014, -1]
opn_2014
per <- round(opn_2014 * 100 /sum(opn_2014))
pie(as.numeric(opn_2014), labels = paste(names(opn_2014), per, '%'),
main = '2014λ
μ
μ’
λ³ μ°½μ
λΉμ¨', font.main = 11,
init.angle = 110, col = rainbow(ncol(opn_2014)))
#λ²μΈ μ°κ΅¬ : μλ£ μΆμΆ
subset(opn, X == 2014, select = -1)
filter(opn, X == 2014)[,-1]
#[λ¬Έμ 164] λ
λλ₯Ό μ
λ ₯νλ©΄ ν΄λΉ λ
λμ μν κ·Έλν μμ±ν μ μλ ν¨μλ₯Ό μμ±νμΈμ.
show_pie(2006)
show_pie <- function(y){
res <- filter(opn, X == y)[,-1]
per <- round(res*100/sum(res))
pie(as.numeric(res), labels = paste(names(res),per,'%'),
init.angle = 110, col = rainbow(ncol(res)),
main = paste(y,'λ
μ
μ’
λ³ μ°½μ
λΉμ¨'))
}
show_pie(2005)
show_pie(2006)
show_pie(2007)
show_pie(2008)
show_pie(2009)
show_pie(2010)
show_pie(2011)
show_pie(2012)
show_pie(2013)
show_pie(2014)
#μ κ°μ μ
show_pies <- function(x,y){
tmp <- y - x + 1
if(tmp%%2 == 0){
par(mfrow = c(2, tmp/2))
} else{
par(mfrow = c(2,(tmp+1)/2))
}
for(i in x:y){
res <- filter(op1, X == i)[,-1]
per <- round(res*100/sum(res))
pie(as.numeric(res), labels = paste(names(res),per,'%'),
init.angle = 110, col = rainbow(ncol(res)),
main = paste(i,'λ
μ
μ’
λ³ μ°½μ
λΉμ¨'))
box()
}
}
show_pies(2005, 2014)
round(5/2,0)
5%/%2
par(mfrow=c(1,3))
pie(as.numeric(opn_2014), labels = paste(names(opn_2014), per, '%'),
main = '2014λ
μ
μ’
λ³ μ°½μ
λΉμ¨', font.main = 11,
init.angle = 110, col = rainbow(ncol(opn_2014)))
## 13-1. κ·Έλν μ¬μ§νμΌλ‘ μ μ₯νκΈ°
library(jpeg)
jpeg("c:/r/1.jpg")
pie(as.numeric(opn_2014), labels = paste(names(opn_2014), per, '%'),
main = '2014λ
μ
μ’
λ³ μ°½μ
λΉμ¨', font.main = 11,
init.angle = 110, col = rainbow(ncol(opn_2014)))
dev.off()
jpeg("c:/r/2.jpg")
pie(as.numeric(opn_2014), labels = paste(names(opn_2014), per, '%'),
main = '2014λ
μ
μ’
λ³ μ°½μ
λΉμ¨', font.main = 11,
init.angle = 110, col = rainbow(ncol(opn_2014)))
dev.off()
## 13-2. μ°μ λ(scatter plot)
# - μ£Όμ΄μ§ λ°μ΄ν°λ₯Ό μ μΌλ‘ νμν΄μ ν©λΏλ¦¬ λ―, μκ°νν κ·Έλν
# - λ°μ΄ν°μ μ€μ κ°λ€μ΄ νμλλ―λ‘ λ°μ΄ν°μ λΆν¬λ₯Ό ν λμ νμ
ν μ μλ€.
# * db file scatter read ~ full table scan
# x - y plotting
# μ°ν¬λ(μκ΄λ)
# - μλ£μμ 2κ° νλͺ© κ°μ κ΄κ³λ₯Ό μλλ°λ μ°ν¬λκ° νΈλ¦¬νλ€.
# - μλ£μ λΆμ° μν©μ λνλ΄λ μμ κ°μΌλ‘ λ³λκ³Ό λΆν¬κ° μ£Όμ΄μ‘μ λ,
# λ³λμ΄ λΆν¬μ μ€μ¬κ°μ ν©μ΄μ§ μ λ
# * νκ·λΆμμ΄ μ¬κΈ°μ λΆν° μμλλ€
library(help = datasets) # R λ΄μ₯λ dataset list check
help(women)
Average Heights and Weights for American Women
"""
Description
This data set gives the average heights and weights for American women aged 30???39.
Format
A data frame with 15 observations on 2 variables.
[,1] height numeric Height (in)
[,2] weight numeric Weight (lbs) Details
"""
women
str(women)
plot(women$weight)
plot(women$height)
"""
1) type = p(μ ), l(μ ), b(μ ,μ ), c(bμ μ ), o(μ μμ μ ), h(μμ§μ ), s(κ³λ¨ν), S, n(λνλμ§ μμ)
2) lty : μ μ μ ν 0 ~ 6
- 0 : κ·Έλ¦¬μ§ μμ
- 1 : μ€μ (κΈ°λ³Έκ°)
- 2 : λμ(-)
- 3 : μ
- 4 : μ κ³Ό λμ
- 5 : κΈ΄ λμ
- 6 : λκ°μ λμ
3) lwd : μ μ κ΅΅κΈ°(κΈ°λ³Έκ° 1)
4) pch : μ μ μ’
λ₯(0 ~ 25)
- 0 ~ 18 : SμΈμ΄κ° μ¬μ©νλ νΉμλ¬Έμ
- 19 ~ 25 : RμΈμ΄κ° νμ₯ν νΉμλ¬Έμ
5) cex : μ μ ν¬κΈ°(κΈ°λ³Έκ° 1)
"""
plot(x = women$height, y = women$weight,
xlab = "ν€", ylab = "λͺΈλ¬΄κ²",
main = "μ¬μ±μ ν€μ λͺΈλ¬΄κ²",
sub = "λ―Έκ΅ 70λ
λ κΈ°μ€",
type = "c", lty = 3, lwd = 2, pch = 23)
## 13-3. Orange data
help("Orange")
a1 <- Orange[Orange$Tree == 1, 2:3]
a1
a2 <- Orange[Orange$Tree == 2, 2:3]
a2
a3 <- Orange[Orange$Tree == 3, 2:3]
a3
a4 <- Orange[Orange$Tree == 4, 2:3]
a4
a5 <- Orange[Orange$Tree == 5, 2:3]
a5
plot(a1$age, a1$circumference, type = 'o', pch = 1, axes = FALSE,
xlim = c(110,1600), ylim = c(30,210), col = "red", xlab = "age", ylab = "circumference",
lwd = 1)
lines(a2$age, a2$circumference, type = 'o', pch = 2, col = "blue")
lines(a3$age, a3$circumference, type = 'o', pch = 3, col = "black")
lines(a4$age, a4$circumference, type = 'o', pch = 4, col = "darkgreen")
lines(a5$age, a5$circumference, type = 'o', pch = 5, col = "orange")
axis(side = 2)
axis(side = 1)
box()
legend('topleft', legend = c('Tree1','Tree2','Tree3','Tree4','Tree5'),
col = c("red", "blue","black","darkgreen","orange"), pch = c(1:5), lty = 1)
w <- data.frame(sal = emp$SALARY * 12,
w_day = as.numeric(as.Date(Sys.Date()) - as.Date(emp$HIRE_DATE, format='%Y-%m-%d')))
plot(x = w$w_day, # xμ’ν
y = w$sal, # yμ’ν
xlab = "근무μΌμ", # xμΆ μ΄λ¦
ylab = "μ°λ΄", # yμΆ μ΄λ¦
main = "근무μΌμμ λ°λ₯Έ μ°λ΄ κ΄κ³",
sub = "empν
μ΄λΈ κΈ°μ€",
type = "p",
lty = 4,
lwd = 2,
pch = 20)
scatter.smooth(w$w_day, w$sal,
xlab="근무μΌμ", ylab="μ°λ΄", main="근무μΌμμ λ°λ₯Έ μ°λ΄ κ΄κ³",
span=2/3, degree=1,
lpars = list(col='orange',lwd=2,lty=2), pch=20,
col=ifelse(w$sal==max(w$sal),"red","blue"))
|
#' @importFrom stats setNames p.adjust
named_sort = function(x, nm){
sort(setNames(x, nm), decreasing = TRUE)
}
#' Run a GSEA by group
#'
#' @param formula a `formula` of the kind `numeric_score ~ group1 + group2 + ...` identifying the column containing the score vs the grouping variables, which will be interacted with each other
#' @param gene_identifier `character`
#' @param data `data.frame` in which the `formula` and `gene_identifer` are interpreted
#' @param set_list alternatively, a list of named and sorted input
#' @param ... passed to [fgsea::fgsea()]
#' @inheritParams fgsea::fgsea
#' @return an object of class `GroupedGSEA`, which is internally just a list [fgsea::fgsea()] objects.
#' @export
#'
#' @examples
#' data(exampleRanks, package = 'fgsea')
#' data(examplePathways, package = 'fgsea')
#' data = data.frame(score = exampleRanks, gene = names(exampleRanks), treatment = rep(c('A', 'B'), length.out = length(exampleRanks)))
#' gout = gsea_by_group(score ~ treatment, 'gene', data, pathways = examplePathways)
#' df = as.data.frame(gout)
#' plot(gout)
gsea_by_group = function(formula, gene_identifier, data, set_list, nperm = 500, maxSize = 500, ...){
if( (!missing(formula) || !missing(data) || !missing(gene_identifier)) &&
(!purrr::is_formula(formula) || !inherits(data, 'data.frame') || !is.character(gene_identifier))){
stop("If one of `formula`, `data` or `gene_identifier` is provided, then all must be provided.")
}
if(!missing(formula)){
rhs_formula = gsub("^.*~", "~", as.character(as.expression(formula)))
lhs = gsub("[ ]*~.*$", "", as.character(as.expression(formula)))
if(!(lhs %in% names(data))) stop("`data` does not have column ", lhs)
if(!(gene_identifier %in% names(data))) stop("`data` does not have column ", gene_identifier)
set_list = plyr::dlply(.data = data, formula(rhs_formula),
.fun = function(df) {
named_sort(df[[lhs]], df[[gene_identifier]])
})
}
res = purrr::map(set_list, fgsea::fgsea, nperm = nperm, maxSize = maxSize, ...)
class(res) = c('GroupedGSEA', class(res))
res
}
globalVariables(c('cluster', 'pathway', 'NES', 'ES', 'min_rank', 'pval'))
#' @describeIn gsea_by_group coerce result to a `data.frame`
#' @param x object of class `GroupedGSEA`
#' @param topn report at least this many pathways (by minimum p value across groups)
#' @param wrap_len wrap pathway titles after this many characters
#' @param p.adjust_method passed to [stats::p.adjust()]
#' @param row.names ignored
#' @param optional ignored
#' @export
as.data.frame.GroupedGSEA = function(x, row.names = NULL, optional = TRUE, ..., topn = Inf, wrap_len = 30, p.adjust_method = 'BH'){
if(length(ignored <- list(...))> 0) warning('Arguments ignored: ', names(ignored))
'%>%' = dplyr::'%>%'
gsea_groupdf = purrr::map_dfr(x, as.data.frame, .id = 'cluster')
gsea_groupdf = gsea_groupdf %>% dplyr::mutate(p.adjust = stats::p.adjust(pval, method = p.adjust_method))
gsea_groupdf = gsea_groupdf %>% dplyr::group_by(cluster) %>% dplyr::mutate(rank = rank(pval))
gsea_groupdf = gsea_groupdf %>% dplyr::group_by(pathway) %>%
dplyr::mutate(min_rank = min(rank),
NES = ifelse(!is.finite(NES), 0, NES),
signed_log10p = -log10(p.adjust)*sign(ES))
gsea_groupdf = gsea_groupdf %>% dplyr::filter(min_rank <= topn)
gsea_groupdf = dplyr::ungroup(gsea_groupdf) %>% dplyr::mutate(ID_name = stringr::str_wrap(pathway, wrap_len), cluster = factor(cluster))
gsea_groupdf
}
#' @describeIn gsea_by_group make a plot of the results
#' @export
plot.GroupedGSEA = function(x, ..., topn = 5, wrap_len = 30, p.adjust_method = 'BH'){
if(length(ignored <- list( ...))> 0) warning('Arguments ignored: ', names(ignored))
if(!requireNamespace('ggplot2')) stop('Install ggplot2')
y = as.data.frame(x, topn = topn, wrap_len = wrap_len, p.adjust_method = p.adjust_method)
plt = ggplot2::ggplot(y, mapping = ggplot2::aes(y = cluster, x = NES, fill = cut(p.adjust, c(0, .01, .05, .1, .2, 1)))) + ggplot2::geom_point(shape = 21) + ggplot2::scale_fill_brewer('FDR', direction = -1, type = 'seq', palette = 'YlOrBr') + ggplot2::facet_wrap(~ID_name) + ggplot2::theme_minimal() + ggplot2::geom_vline(xintercept = 0, lty = 2)
plt
}
filter_gsea = function(gsea_result, sets){
stopifnot(length(setdiff(sets, names(gsea_result@geneSets)))==0)
gsea_result@result = subset(gsea_result@result, ID %in% sets)
gsea_result@geneSets = gsea_result@geneSets[sets]
gsea_result
}
|
/R/gsea_methods.R
|
no_license
|
amcdavid/AMmisc
|
R
| false
| false
| 4,654
|
r
|
#' @importFrom stats setNames p.adjust
named_sort = function(x, nm){
sort(setNames(x, nm), decreasing = TRUE)
}
#' Run a GSEA by group
#'
#' @param formula a `formula` of the kind `numeric_score ~ group1 + group2 + ...` identifying the column containing the score vs the grouping variables, which will be interacted with each other
#' @param gene_identifier `character`
#' @param data `data.frame` in which the `formula` and `gene_identifer` are interpreted
#' @param set_list alternatively, a list of named and sorted input
#' @param ... passed to [fgsea::fgsea()]
#' @inheritParams fgsea::fgsea
#' @return an object of class `GroupedGSEA`, which is internally just a list [fgsea::fgsea()] objects.
#' @export
#'
#' @examples
#' data(exampleRanks, package = 'fgsea')
#' data(examplePathways, package = 'fgsea')
#' data = data.frame(score = exampleRanks, gene = names(exampleRanks), treatment = rep(c('A', 'B'), length.out = length(exampleRanks)))
#' gout = gsea_by_group(score ~ treatment, 'gene', data, pathways = examplePathways)
#' df = as.data.frame(gout)
#' plot(gout)
gsea_by_group = function(formula, gene_identifier, data, set_list, nperm = 500, maxSize = 500, ...){
if( (!missing(formula) || !missing(data) || !missing(gene_identifier)) &&
(!purrr::is_formula(formula) || !inherits(data, 'data.frame') || !is.character(gene_identifier))){
stop("If one of `formula`, `data` or `gene_identifier` is provided, then all must be provided.")
}
if(!missing(formula)){
rhs_formula = gsub("^.*~", "~", as.character(as.expression(formula)))
lhs = gsub("[ ]*~.*$", "", as.character(as.expression(formula)))
if(!(lhs %in% names(data))) stop("`data` does not have column ", lhs)
if(!(gene_identifier %in% names(data))) stop("`data` does not have column ", gene_identifier)
set_list = plyr::dlply(.data = data, formula(rhs_formula),
.fun = function(df) {
named_sort(df[[lhs]], df[[gene_identifier]])
})
}
res = purrr::map(set_list, fgsea::fgsea, nperm = nperm, maxSize = maxSize, ...)
class(res) = c('GroupedGSEA', class(res))
res
}
globalVariables(c('cluster', 'pathway', 'NES', 'ES', 'min_rank', 'pval'))
#' @describeIn gsea_by_group coerce result to a `data.frame`
#' @param x object of class `GroupedGSEA`
#' @param topn report at least this many pathways (by minimum p value across groups)
#' @param wrap_len wrap pathway titles after this many characters
#' @param p.adjust_method passed to [stats::p.adjust()]
#' @param row.names ignored
#' @param optional ignored
#' @export
as.data.frame.GroupedGSEA = function(x, row.names = NULL, optional = TRUE, ..., topn = Inf, wrap_len = 30, p.adjust_method = 'BH'){
if(length(ignored <- list(...))> 0) warning('Arguments ignored: ', names(ignored))
'%>%' = dplyr::'%>%'
gsea_groupdf = purrr::map_dfr(x, as.data.frame, .id = 'cluster')
gsea_groupdf = gsea_groupdf %>% dplyr::mutate(p.adjust = stats::p.adjust(pval, method = p.adjust_method))
gsea_groupdf = gsea_groupdf %>% dplyr::group_by(cluster) %>% dplyr::mutate(rank = rank(pval))
gsea_groupdf = gsea_groupdf %>% dplyr::group_by(pathway) %>%
dplyr::mutate(min_rank = min(rank),
NES = ifelse(!is.finite(NES), 0, NES),
signed_log10p = -log10(p.adjust)*sign(ES))
gsea_groupdf = gsea_groupdf %>% dplyr::filter(min_rank <= topn)
gsea_groupdf = dplyr::ungroup(gsea_groupdf) %>% dplyr::mutate(ID_name = stringr::str_wrap(pathway, wrap_len), cluster = factor(cluster))
gsea_groupdf
}
#' @describeIn gsea_by_group make a plot of the results
#' @export
plot.GroupedGSEA = function(x, ..., topn = 5, wrap_len = 30, p.adjust_method = 'BH'){
if(length(ignored <- list( ...))> 0) warning('Arguments ignored: ', names(ignored))
if(!requireNamespace('ggplot2')) stop('Install ggplot2')
y = as.data.frame(x, topn = topn, wrap_len = wrap_len, p.adjust_method = p.adjust_method)
plt = ggplot2::ggplot(y, mapping = ggplot2::aes(y = cluster, x = NES, fill = cut(p.adjust, c(0, .01, .05, .1, .2, 1)))) + ggplot2::geom_point(shape = 21) + ggplot2::scale_fill_brewer('FDR', direction = -1, type = 'seq', palette = 'YlOrBr') + ggplot2::facet_wrap(~ID_name) + ggplot2::theme_minimal() + ggplot2::geom_vline(xintercept = 0, lty = 2)
plt
}
filter_gsea = function(gsea_result, sets){
stopifnot(length(setdiff(sets, names(gsea_result@geneSets)))==0)
gsea_result@result = subset(gsea_result@result, ID %in% sets)
gsea_result@geneSets = gsea_result@geneSets[sets]
gsea_result
}
|
source("R/model_user.r")
#' A Class which represents a channel object
#' @export
#'
#' @slot id the id of this channel
#' @slot type the type of channel
#' @slot guild_id the id of the guild
#' @slot position sorting position of the channel
#' @slot permission_overwrites explicit permission overwrites for members and roles
#' @slot name the name of the channel (2-100 characters)
#' @slot topic the channel topic (0-1024 characters)
#' @slot nsfw whether the channel is nsfw
#' @slot last_message_id the id of the last message sent in this channel (may not point to an existing or valid message)
#' @slot bitrate the bitrate (in bits) of the voice channel
#' @slot user_limit the user limit of the voice channel
#' @slot rate_limit_per_user amount of seconds a user has to wait before sending another message
#' @slot recipients the recipients of the DM
#' @slot icon icon hash
#' @slot owner_id id of the DM creator
#' @slot application_id application id of the group DM creator if it is bot-created
#' @slot parent_id id of the parent category for a channel
#' @slot last_pin_timestamp when the last pinned message was pinned
#'
#' @section Disclaimer: Pirate's channel object won't be separated to types!
#' see https://discordapp.com/developers/docs/resources/channel#channel-object-channel-types
#' to be able differentiate.
#' also, because of that, many fields are subject of being NULL accordingly.
#' @import fastmap
Channel <- function(data) {
value <- list(
id = data$id,
type = data$type,
guild_id = data$guild_id,
position = data$position,
permission_overwrites = data$permission_overwrites,
name = data$name,
topic = data$topic,
nsfw = data$nsfw,
last_message_id = data$last_message_id,
bitrate = data$bitrate,
user_limit = data$user_limit,
rate_limit_per_user = data$rate_limit_per_user,
recipients = data$recipients,
icon = data$icon,
owner_id = data$owner_id,
application_id = data$application_id,
parent_id = data$parent_id,
last_pin_timestamp = data$last_pin_timestamp,
messages = fastmap()
)
if (isFALSE(is.null(data$recipients))) {
value$recipients <- fastmap()
lapply(data$recipients, function(user) value$recipients$set(user$id, User(user)))
}
attr(value, "class") <- "Channel"
value
}
|
/R/model_channel.r
|
no_license
|
TheOnlyArtz/Pirate
|
R
| false
| false
| 2,310
|
r
|
source("R/model_user.r")
#' A Class which represents a channel object
#' @export
#'
#' @slot id the id of this channel
#' @slot type the type of channel
#' @slot guild_id the id of the guild
#' @slot position sorting position of the channel
#' @slot permission_overwrites explicit permission overwrites for members and roles
#' @slot name the name of the channel (2-100 characters)
#' @slot topic the channel topic (0-1024 characters)
#' @slot nsfw whether the channel is nsfw
#' @slot last_message_id the id of the last message sent in this channel (may not point to an existing or valid message)
#' @slot bitrate the bitrate (in bits) of the voice channel
#' @slot user_limit the user limit of the voice channel
#' @slot rate_limit_per_user amount of seconds a user has to wait before sending another message
#' @slot recipients the recipients of the DM
#' @slot icon icon hash
#' @slot owner_id id of the DM creator
#' @slot application_id application id of the group DM creator if it is bot-created
#' @slot parent_id id of the parent category for a channel
#' @slot last_pin_timestamp when the last pinned message was pinned
#'
#' @section Disclaimer: Pirate's channel object won't be separated to types!
#' see https://discordapp.com/developers/docs/resources/channel#channel-object-channel-types
#' to be able differentiate.
#' also, because of that, many fields are subject of being NULL accordingly.
#' @import fastmap
Channel <- function(data) {
value <- list(
id = data$id,
type = data$type,
guild_id = data$guild_id,
position = data$position,
permission_overwrites = data$permission_overwrites,
name = data$name,
topic = data$topic,
nsfw = data$nsfw,
last_message_id = data$last_message_id,
bitrate = data$bitrate,
user_limit = data$user_limit,
rate_limit_per_user = data$rate_limit_per_user,
recipients = data$recipients,
icon = data$icon,
owner_id = data$owner_id,
application_id = data$application_id,
parent_id = data$parent_id,
last_pin_timestamp = data$last_pin_timestamp,
messages = fastmap()
)
if (isFALSE(is.null(data$recipients))) {
value$recipients <- fastmap()
lapply(data$recipients, function(user) value$recipients$set(user$id, User(user)))
}
attr(value, "class") <- "Channel"
value
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{obs.no2}
\alias{obs.no2}
\title{Observed NO2 concentrations}
\format{A data frame with 202752 rows and 8 variables:
\describe{
\item{Time}{time (POSIXct format)}
\item{ID}{station code}
\item{Station}{station name}
\item{Value}{concentration (in \eqn{\mu g/m^3}{\mug/m3})}
\item{ZoneType}{\code{"URB"} (urban), \code{"SBR"} (suburban) or \code{"RUR"} (rural)}
\item{StationType}{always \code{"BKG"} (background)}
\item{Lat}{latitude}
\item{Lon}{longitude}
}}
\source{
\url{http://www.arpa.fvg.it/}
}
\usage{
obs.no2
}
\description{
A dataset containing hourly concentrations of NO2 measured at background sites in the
region of Friuli Venezia Giulia (Italy)
}
\keyword{datasets}
|
/man/obs.no2.Rd
|
permissive
|
jobonaf/dartle
|
R
| false
| true
| 801
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{obs.no2}
\alias{obs.no2}
\title{Observed NO2 concentrations}
\format{A data frame with 202752 rows and 8 variables:
\describe{
\item{Time}{time (POSIXct format)}
\item{ID}{station code}
\item{Station}{station name}
\item{Value}{concentration (in \eqn{\mu g/m^3}{\mug/m3})}
\item{ZoneType}{\code{"URB"} (urban), \code{"SBR"} (suburban) or \code{"RUR"} (rural)}
\item{StationType}{always \code{"BKG"} (background)}
\item{Lat}{latitude}
\item{Lon}{longitude}
}}
\source{
\url{http://www.arpa.fvg.it/}
}
\usage{
obs.no2
}
\description{
A dataset containing hourly concentrations of NO2 measured at background sites in the
region of Friuli Venezia Giulia (Italy)
}
\keyword{datasets}
|
#' confusion
#'
#' Confusion matrix or (for larger number of levels) confusion table.
#'
#' @param object Optional fit object. confusion() assumes object contains holdout/vaidation data as `y_test` and the forecasts/classifications as `predictions` but alternative variable names can be specified with the input arguments by those names.
#' @param y_test A vector of holdout/validation data or the name in object (if fit object provided but alternative variable name required).
#' @param predictions A vector predictions or the name in object (if fit object provided but alternative variable name required).
#' @param return_xtab Logical. If TRUE, returns confusion matrix, which is a crosstable with correct predictions on the diagonal (if all levels are predicted at least once). If FALSE, returns (rectangular) table with columns for percent correct, most common misclassification, second most common misclassification, and other predictions. Defaults to TRUE (crosstable-style) only if number of levels < 6.
#' @return confusion matrix or table as specified by return_xtab.
#' @export
confusion <- function(object = NULL, y_test = NULL, predictions = NULL, return_xtab = NULL){
obj <- data.frame(y_test = if(is.null(object)) y_test else object[[if(is.null(y_test)) "y_test" else y_test]],
predictions = if(is.null(object)) predictions else object[[if(is.null(predictions)) "predictions" else predictions]],
stringsAsFactors = FALSE)
return_xtab <- if(is.null(return_xtab)) n_distinct(obj$predictions) < 6 else return_xtab
if(return_xtab){
cf <- table(obj$y_test, obj$predictions)
return(cf)
}else{
obj <- obj %>% mutate(correct = y_test == predictions)
cf <- data.frame(label = unique(obj$y_test))
# confusion
cf[["pCorrect"]] <- NA
cf[["MCE"]] <- NA # Most Common Error
cf[["pMCE"]] <- 0 # proportion that are MCE
cf[["MCE2"]] <- NA # second most common error
cf[["pMCE2"]] <- 0
cf[["pOther"]] <- 0
for(i in 1:nrow(cf)){
lab_i <- obj$y_test == cf$label[i]
Nlab_i <- sum(lab_i)
cf$pCorrect[i] <- mean(obj$y_test[lab_i] == obj$predictions[lab_i])
tab <- sort(table(obj$predictions[lab_i]), decreasing = TRUE)
tab <- tab[-which(names(tab) == cf$label[i])]
if(cf$pCorrect[i] != 1){
cf$MCE[i] <- names(tab)[1]
cf$pMCE[i] <- tab[1]/Nlab_i
if(cf$pCorrect[i] + cf$pMCE[i] != 1){
cf$MCE2[i] <- names(tab)[2]
cf$pMCE2[i] <- tab[2]/Nlab_i
cf$pOther[i] <- 1 - (cf$pCorrect[i] + cf$pMCE[i] + cf$pMCE2[i])
}
}
}
return(cf)
}
}
|
/R/confusion.R
|
no_license
|
rdrr1990/keras
|
R
| false
| false
| 2,778
|
r
|
#' confusion
#'
#' Confusion matrix or (for larger number of levels) confusion table.
#'
#' @param object Optional fit object. confusion() assumes object contains holdout/vaidation data as `y_test` and the forecasts/classifications as `predictions` but alternative variable names can be specified with the input arguments by those names.
#' @param y_test A vector of holdout/validation data or the name in object (if fit object provided but alternative variable name required).
#' @param predictions A vector predictions or the name in object (if fit object provided but alternative variable name required).
#' @param return_xtab Logical. If TRUE, returns confusion matrix, which is a crosstable with correct predictions on the diagonal (if all levels are predicted at least once). If FALSE, returns (rectangular) table with columns for percent correct, most common misclassification, second most common misclassification, and other predictions. Defaults to TRUE (crosstable-style) only if number of levels < 6.
#' @return confusion matrix or table as specified by return_xtab.
#' @export
confusion <- function(object = NULL, y_test = NULL, predictions = NULL, return_xtab = NULL){
obj <- data.frame(y_test = if(is.null(object)) y_test else object[[if(is.null(y_test)) "y_test" else y_test]],
predictions = if(is.null(object)) predictions else object[[if(is.null(predictions)) "predictions" else predictions]],
stringsAsFactors = FALSE)
return_xtab <- if(is.null(return_xtab)) n_distinct(obj$predictions) < 6 else return_xtab
if(return_xtab){
cf <- table(obj$y_test, obj$predictions)
return(cf)
}else{
obj <- obj %>% mutate(correct = y_test == predictions)
cf <- data.frame(label = unique(obj$y_test))
# confusion
cf[["pCorrect"]] <- NA
cf[["MCE"]] <- NA # Most Common Error
cf[["pMCE"]] <- 0 # proportion that are MCE
cf[["MCE2"]] <- NA # second most common error
cf[["pMCE2"]] <- 0
cf[["pOther"]] <- 0
for(i in 1:nrow(cf)){
lab_i <- obj$y_test == cf$label[i]
Nlab_i <- sum(lab_i)
cf$pCorrect[i] <- mean(obj$y_test[lab_i] == obj$predictions[lab_i])
tab <- sort(table(obj$predictions[lab_i]), decreasing = TRUE)
tab <- tab[-which(names(tab) == cf$label[i])]
if(cf$pCorrect[i] != 1){
cf$MCE[i] <- names(tab)[1]
cf$pMCE[i] <- tab[1]/Nlab_i
if(cf$pCorrect[i] + cf$pMCE[i] != 1){
cf$MCE2[i] <- names(tab)[2]
cf$pMCE2[i] <- tab[2]/Nlab_i
cf$pOther[i] <- 1 - (cf$pCorrect[i] + cf$pMCE[i] + cf$pMCE2[i])
}
}
}
return(cf)
}
}
|
shinyUI(
pageWithSidebar(
headerPanel("Analysis and Investigation of the PropertyPrice Register "),
sidebarPanel("filter"),
mainPanel("Main")
),
mainPanel(plotOutput("Plot_One")
)
)
|
/UI_R_APP.R
|
no_license
|
MichaelDineen97/Final_Year_Housing_Project
|
R
| false
| false
| 244
|
r
|
shinyUI(
pageWithSidebar(
headerPanel("Analysis and Investigation of the PropertyPrice Register "),
sidebarPanel("filter"),
mainPanel("Main")
),
mainPanel(plotOutput("Plot_One")
)
)
|
library(corTools)
### Name: dist
### Title: Distribution Generator
### Aliases: dist
### ** Examples
# Create some sample data, as a dataframe with a numeric column
col1 <- c("L1", "L2", "L4", "E6", "G1")
col2 <- c(1.42, 14.34, 6.43, 32.1, 15.8)
dat <- as.data.frame(cbind(col1, col2))
dat$col2 <- as.numeric(as.character(dat$col2))
dist(dat$col2, 0.05, 0.95)
# dat$col2 denotes the column of the data that the distribution will be based on
# 0.05 and 0.95 indicate that the function will return values that are smaller
# than 5% of the values, or greater than 95% of the values
# function will return values 1.42 and 32.10
|
/data/genthat_extracted_code/corTools/examples/dist.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 633
|
r
|
library(corTools)
### Name: dist
### Title: Distribution Generator
### Aliases: dist
### ** Examples
# Create some sample data, as a dataframe with a numeric column
col1 <- c("L1", "L2", "L4", "E6", "G1")
col2 <- c(1.42, 14.34, 6.43, 32.1, 15.8)
dat <- as.data.frame(cbind(col1, col2))
dat$col2 <- as.numeric(as.character(dat$col2))
dist(dat$col2, 0.05, 0.95)
# dat$col2 denotes the column of the data that the distribution will be based on
# 0.05 and 0.95 indicate that the function will return values that are smaller
# than 5% of the values, or greater than 95% of the values
# function will return values 1.42 and 32.10
|
############ Extracting dataset 2: GSE62646 ############
geo_id = 'GSE62646'
data <- getGEO(geo_id, GSEMatrix=TRUE)
# Extract gene expression data
data.exp <- as.data.frame(exprs(data[[1]]))
# Format to 5 decimals
data.exp=format(as.data.frame(data.exp), digits=5)
#############
# Annotation
#############
# Which annotation file to use?
get_annotation_db("GPL6244")
biocLite("hugene10sttranscriptcluster.db")
library(hugene10sttranscriptcluster.db)
probe_ids = rownames(data.exp)
gene_symbols = unlist(mget(probe_ids, hugene10sttranscriptclusterSYMBOL, ifnotfound=NA))
annotated = as.data.frame(cbind(probe_ids, gene_symbols))
################
# IQR filtering
################
# Merging annotation with expression data
data.exp$probe_ids <- rownames(data.exp)
data.annotated = merge(data.exp, annotated, by.x="probe_ids", by.y="probe_ids")
write.table(data.annotated, paste(geo_id,"_exp_annotated.txt", sep=""),sep="\t",row.names=F)
data.annotated = read.delim(paste(geo_id,"_exp_annotated.txt", sep=""),sep="\t",check.names=F)
# Sorting by gene symbols
data.annotated.sorted = data.annotated[order(data.annotated$gene_symbols),]
logdata = data.annotated.sorted[,!(colnames(data.annotated.sorted) %in% c("probe_ids", "gene_symbols"))]
unlogdata = 2^logdata
# Calculating IQR for all probes using unlog data
iqr <- apply(unlogdata,1,IQR)
data.iqr = cbind(data.annotated.sorted[,(colnames(data.annotated.sorted) %in% c("probe_ids", "gene_symbols"))], iqr, unlogdata)
write.table((data.iqr), paste(geo_id,"_unlog.IQR.txt", sep=""), sep="\t",row.names=F)
# Keep probe with highest iqr in case of multiple probes
names(iqr) = data.annotated.sorted$probe_ids
iqrs = split.default(iqr, data.annotated.sorted$gene_symbols)
maxes = sapply(iqrs, function(x) names(which.max(x)))
singleprobe = data.iqr[data.iqr$probe_ids %in% maxes, !(colnames(data.iqr) == "probe_ids")]
## remove row with gene symbol NA
newdata = singleprobe
write.table(newdata, paste(geo_id,"_singleprobe_unlogged.txt", sep=""),sep="\t", row.names=FALSE,quote=FALSE)
d = newdata[,!(colnames(newdata) %in% c("gene_symbols", "iqr"))]
gene_symbols <- newdata[,(colnames(newdata) %in% c("gene_symbols"))]
logd = cbind(gene_symbols, log2(d))
logd[mapply(is.infinite, logd)] <- 1024.0
write.table(logd, paste(geo_id,"_singleprobe_logged.txt", sep=""),sep="\t", row.names=FALSE,quote=FALSE)
#Set column names to gene
rownames(logd) <- logd[,1]
# Drop column "gene_symbols"
logd <- logd[,-which(names(logd) %in% c("gene_symbols"))]
# Check if column names are unique
length(rownames(logd))
length(unique(rownames(logd)))
# if not unique, make unique
rownames(logd) = make.unique(rownames(logd), sep="_")
# Transpose
logd = t(logd)
#############
# Phenotype
#############
names(pData(data[[1]]))
# ID and Target variable column
data.pheno <- as.data.frame(pData(data[[1]])[, c(2,35,37)])
names(data.pheno)[1]<-"#Identifier"
names(data.pheno)[2]<-"@Class"
names(data.pheno)[3]<-"Group"
# Edit outcome values to Case/Normal
View(data.pheno)
data.pheno$"@Class" <- gsub('STEMI', 'Case', data.pheno$"@Class")
data.pheno$"@Class" <- gsub('CAD', 'Normal', data.pheno$"@Class")
# Merge expression data with phenotype.
data.all <- cbind(data.pheno, logd)
# Only keep instances measured during admission.
data.all <- data.all[grep("^admission",data.pheno$Group),]
data.all <- subset(data.all, select = -c(Group))
# Write data to file
write.table(data.all, paste(geo_id,"_exp.csv", sep = ""),sep=",",row.names=F, quote = FALSE)
|
/r_scripts/GSE62646.R
|
permissive
|
jeyabbalas/Bayesian-Rule-Learning
|
R
| false
| false
| 3,506
|
r
|
############ Extracting dataset 2: GSE62646 ############
geo_id = 'GSE62646'
data <- getGEO(geo_id, GSEMatrix=TRUE)
# Extract gene expression data
data.exp <- as.data.frame(exprs(data[[1]]))
# Format to 5 decimals
data.exp=format(as.data.frame(data.exp), digits=5)
#############
# Annotation
#############
# Which annotation file to use?
get_annotation_db("GPL6244")
biocLite("hugene10sttranscriptcluster.db")
library(hugene10sttranscriptcluster.db)
probe_ids = rownames(data.exp)
gene_symbols = unlist(mget(probe_ids, hugene10sttranscriptclusterSYMBOL, ifnotfound=NA))
annotated = as.data.frame(cbind(probe_ids, gene_symbols))
################
# IQR filtering
################
# Merging annotation with expression data
data.exp$probe_ids <- rownames(data.exp)
data.annotated = merge(data.exp, annotated, by.x="probe_ids", by.y="probe_ids")
write.table(data.annotated, paste(geo_id,"_exp_annotated.txt", sep=""),sep="\t",row.names=F)
data.annotated = read.delim(paste(geo_id,"_exp_annotated.txt", sep=""),sep="\t",check.names=F)
# Sorting by gene symbols
data.annotated.sorted = data.annotated[order(data.annotated$gene_symbols),]
logdata = data.annotated.sorted[,!(colnames(data.annotated.sorted) %in% c("probe_ids", "gene_symbols"))]
unlogdata = 2^logdata
# Calculating IQR for all probes using unlog data
iqr <- apply(unlogdata,1,IQR)
data.iqr = cbind(data.annotated.sorted[,(colnames(data.annotated.sorted) %in% c("probe_ids", "gene_symbols"))], iqr, unlogdata)
write.table((data.iqr), paste(geo_id,"_unlog.IQR.txt", sep=""), sep="\t",row.names=F)
# Keep probe with highest iqr in case of multiple probes
names(iqr) = data.annotated.sorted$probe_ids
iqrs = split.default(iqr, data.annotated.sorted$gene_symbols)
maxes = sapply(iqrs, function(x) names(which.max(x)))
singleprobe = data.iqr[data.iqr$probe_ids %in% maxes, !(colnames(data.iqr) == "probe_ids")]
## remove row with gene symbol NA
newdata = singleprobe
write.table(newdata, paste(geo_id,"_singleprobe_unlogged.txt", sep=""),sep="\t", row.names=FALSE,quote=FALSE)
d = newdata[,!(colnames(newdata) %in% c("gene_symbols", "iqr"))]
gene_symbols <- newdata[,(colnames(newdata) %in% c("gene_symbols"))]
logd = cbind(gene_symbols, log2(d))
logd[mapply(is.infinite, logd)] <- 1024.0
write.table(logd, paste(geo_id,"_singleprobe_logged.txt", sep=""),sep="\t", row.names=FALSE,quote=FALSE)
#Set column names to gene
rownames(logd) <- logd[,1]
# Drop column "gene_symbols"
logd <- logd[,-which(names(logd) %in% c("gene_symbols"))]
# Check if column names are unique
length(rownames(logd))
length(unique(rownames(logd)))
# if not unique, make unique
rownames(logd) = make.unique(rownames(logd), sep="_")
# Transpose
logd = t(logd)
#############
# Phenotype
#############
names(pData(data[[1]]))
# ID and Target variable column
data.pheno <- as.data.frame(pData(data[[1]])[, c(2,35,37)])
names(data.pheno)[1]<-"#Identifier"
names(data.pheno)[2]<-"@Class"
names(data.pheno)[3]<-"Group"
# Edit outcome values to Case/Normal
View(data.pheno)
data.pheno$"@Class" <- gsub('STEMI', 'Case', data.pheno$"@Class")
data.pheno$"@Class" <- gsub('CAD', 'Normal', data.pheno$"@Class")
# Merge expression data with phenotype.
data.all <- cbind(data.pheno, logd)
# Only keep instances measured during admission.
data.all <- data.all[grep("^admission",data.pheno$Group),]
data.all <- subset(data.all, select = -c(Group))
# Write data to file
write.table(data.all, paste(geo_id,"_exp.csv", sep = ""),sep=",",row.names=F, quote = FALSE)
|
##### libraries ####
library(shiny); library(shinyjs); library(shinyalert); library(dplyr);
library(DT); library(shinyBS); library(data.table); library(markdown);
library(rio);
source('functions.R');
requireNamespace('readxl');
requireNamespace('feather');
requireNamespace('fst');
requireNamespace('rmatio');
requireNamespace('jsonlite');
requireNamespace('readODS');
requireNamespace('xml2');
requireNamespace('yaml');
requireNamespace('pzfx');
requireNamespace('csvy');
##### global settings ####
debug <- file.exists('.debug');
gitlink <- 'https://github.com/bokov/anyfile'
source('www/docs/helptext.R');
hcol <- '#008c99';
formats <- gsub('.import.rio_','',grep('^\\.import\\.rio_'
,methods(.import),value=TRUE));
tryfirst <- intersect(c('xlsx','ods','xls','xml','rdata','r','json'
,'html'),formats);
trylast <- c('dat','csvy','yml');
nevertry <- c('clipboard','fortran','csv','csv2','psv','fwf','txt','eviews',trylast);
tryother <- setdiff(formats,c(tryfirst,nevertry));
tryformats <- c(tryfirst,tryother,trylast);
neverexport <- c('clipboard','sqlite');
exportformats <- setdiff(gsub('.export.rio_'
,'',grep('^\\.export\\.rio_'
,methods(.export),value=TRUE))
,neverexport);
# UI ####
ui <- fluidPage(
# + Head ####
tags$head(tags$link(rel="shortcut icon", href="favicon.ico")
,includeScript("ga.js"))
,includeCSS('df.css')
,useShinyjs()
,useShinyalert()
,fluidRow(
# + Title etc. ####
column(1)
,column(2,img(src='sitelogo_color.png',width='100%',maxwidth='100vw'),br()
,if(debug) actionButton('debug','Debug') else c())
,column(8,h3("AnyFile",id='apptitle')
,em('A free, open-source webapp by Alex Bokov, PhD'
,'made possible by support from'
,'NIH/NCATS UL1TR001120 (IIMS) and the'
,'Long School of Medicine KL2 Award.'
,'Uses the',a('rio'
,href='https://github.com/leeper/rio')
,' library by Thomas J. Leeper, PhD.'
,' Source code available on',a('GitHub',href=gitlink
,target='_blank')))
,column(1))
,fluidRow(# + File Upload ####
column(1)
,column(10,hr()
,p("Sometimes you are provided data in an unfamiliar"
," format, or in a format that needs software"
," that you do not own, or even a format that is"
," completely unknown. ", tags$b('AnyFile')
," supports over a "
," dozen of the most common data formats and will"
," do its level best to find a way to read your"
," data, then give you a choice of formats"
," into which you can convert it.")
,fileInput("infile"
,div("Choose a file to upload and convert to a"
," format of your choice" #,HTML(' ')
)
,multiple = FALSE,width = '100%'
))
,column(1))
# + File Convert ####
,hidden(fluidRow(column(1)
,column(10,hr()
,p("Some data formats (e.g. Excel and OpenOffice)"
," may contain multiple tables of data. Here you"
," are being asked which one to import in such a"
," case. If the one you specify is not available "
,tags$b('AnyFile')," will go back to importing"
," the first one it finds.")
,numericInput('which',span('Which sheet or table? '
,'(if in doubt, you can leave it as-is'
,' and just click the button below) '
#,HTML(' ')
)
,min=1,max=20,value=1,width='100%')
,br()
,actionButton('import','Interpret File')
,hidden(div(hr()
,selectInput('saveas','Convert to:'
,choices = exportformats
,selected = 'csv')
#,HTML(' ')
,actionButton('convert','Convert File')
,hidden(textInput('download_clicked'
,label = '',value = ''))
,hidden(span(id='plswait'
,'Converting...'))
,hidden(
span(downloadButton(
'download','Download Converted File')
,id='downloaddiv')),id='convertdiv'))
)
,column(1),id='importdiv'))
,hidden(fluidRow(column(1),column(10,hr(),bsCollapsePanel(span("Preview"
,icon('angle-down'))
,dataTableOutput('preview')))
,column(1),id='previewrow'))
)
# Server ####
server <- function(input, output, session) {
# reactive values ####
rv <- reactiveValues(disclaimerAgreed=F);
# user agreement ####
if(!isTRUE(getOption("shiny.testmode"))&&!file.exists('.testmode')) {
shinyalert('User Agreement',text=helptext$disclaimer
,html=T,confirmButtonText = 'I agree',confirmButtonCol = hcol
,className = 'dfDisclaimer',closeOnEsc = FALSE
,animation = 'slide-from-top',callbackR = function() {
rv[['disclaimerAgreed']] <- TRUE;
show('infile')})} else rv[['disclaimerAgreed']] <- TRUE;
# record file info ####
observeEvent(c(input$infile,rv$disclaimerAgreed,input$which),{
req(input$infile$datapath,rv$disclaimerAgreed);
rv$infile <- input$infile$datapath;
rv$infilename <- input$infile$name;
show('importdiv');
hide('convertdiv');hide('downloaddiv');hide('previewrow');
});
# change in output format ####
observeEvent(input$saveas, hide('downloaddiv'));
# detect download ####
onclick('download',updateTextInput(session,'download_clicked'
,value = as.numeric(Sys.time()))
,add = TRUE);
# read with rio ####
observeEvent(input$import,{
readfile <- try(try_import(rv$infile,which=input$which),silent=TRUE);
if(is(readfile,'try-error')){
shinyalert('You have discovered an (as yet) unsupported file',
'We would appreciate it if you would submit a bug
report to https://github.com/bokov/AnyFile/issues/new
so we can figure out a way to make this app work for
your file as well.
',type='warning')
} else {
if(!is.null(comment(readfile))){
showNotification(paste0(comment(readfile),collapse=' ')
,type='warning')};
rv$readfile <- readfile;
show('convertdiv'); show('previewrow')
hide('downloaddiv');
}
});
# convert with rio ####
observeEvent(input$convert,{
show('plswait');
out <- setNames(rv$readfile,nm=gsub('\\.','_'
,make.names(names(rv$readfile)
,unique = TRUE)));
# hack to avoid errors on pzfx
if(input$saveas == 'pzfx') for(ii in names(out)){
if(inherits(out[[ii]],c('character','factor'))){
out[[ii]] <- as.numeric(factor(out[[ii]]));
showNotification(sprintf('Column %s converted to numeric',ii)
,type='warning');
}
}
result <- try(export(out
,file = tempfile(fileext = paste0('.',input$saveas))
,format=input$saveas));
hide('plswait');
if(is(result,'try-error')) shinyalert('Error converting file'
,as.character(result))
else {
fnicename <- paste0(tools::file_path_sans_ext(rv$infilename)
,'.',input$saveas);
output$download <- downloadHandler(filename=fnicename
,content=function(con) {
file.copy(result,con)});
show('downloaddiv');
}
})
# render datatable ####
output$preview <- renderDataTable({
DT::datatable(rv$readfile,extensions = 'Scroller'
,autoHideNavigation=T,rownames=F,fillContainer=T
,options=list(processing=T,searching=F,scroller=T
,scrollx='100%',scrolly='20vh'
,dom='Bfrtip'
))
},server=FALSE);
# debug ####
observeEvent(input$debug,{
browser();
});
}
# Run the application ####
shinyApp(ui = ui, server = server)
|
/app.R
|
no_license
|
bokov/AnyFile
|
R
| false
| false
| 9,620
|
r
|
##### libraries ####
library(shiny); library(shinyjs); library(shinyalert); library(dplyr);
library(DT); library(shinyBS); library(data.table); library(markdown);
library(rio);
source('functions.R');
requireNamespace('readxl');
requireNamespace('feather');
requireNamespace('fst');
requireNamespace('rmatio');
requireNamespace('jsonlite');
requireNamespace('readODS');
requireNamespace('xml2');
requireNamespace('yaml');
requireNamespace('pzfx');
requireNamespace('csvy');
##### global settings ####
debug <- file.exists('.debug');
gitlink <- 'https://github.com/bokov/anyfile'
source('www/docs/helptext.R');
hcol <- '#008c99';
formats <- gsub('.import.rio_','',grep('^\\.import\\.rio_'
,methods(.import),value=TRUE));
tryfirst <- intersect(c('xlsx','ods','xls','xml','rdata','r','json'
,'html'),formats);
trylast <- c('dat','csvy','yml');
nevertry <- c('clipboard','fortran','csv','csv2','psv','fwf','txt','eviews',trylast);
tryother <- setdiff(formats,c(tryfirst,nevertry));
tryformats <- c(tryfirst,tryother,trylast);
neverexport <- c('clipboard','sqlite');
exportformats <- setdiff(gsub('.export.rio_'
,'',grep('^\\.export\\.rio_'
,methods(.export),value=TRUE))
,neverexport);
# UI ####
ui <- fluidPage(
# + Head ####
tags$head(tags$link(rel="shortcut icon", href="favicon.ico")
,includeScript("ga.js"))
,includeCSS('df.css')
,useShinyjs()
,useShinyalert()
,fluidRow(
# + Title etc. ####
column(1)
,column(2,img(src='sitelogo_color.png',width='100%',maxwidth='100vw'),br()
,if(debug) actionButton('debug','Debug') else c())
,column(8,h3("AnyFile",id='apptitle')
,em('A free, open-source webapp by Alex Bokov, PhD'
,'made possible by support from'
,'NIH/NCATS UL1TR001120 (IIMS) and the'
,'Long School of Medicine KL2 Award.'
,'Uses the',a('rio'
,href='https://github.com/leeper/rio')
,' library by Thomas J. Leeper, PhD.'
,' Source code available on',a('GitHub',href=gitlink
,target='_blank')))
,column(1))
,fluidRow(# + File Upload ####
column(1)
,column(10,hr()
,p("Sometimes you are provided data in an unfamiliar"
," format, or in a format that needs software"
," that you do not own, or even a format that is"
," completely unknown. ", tags$b('AnyFile')
," supports over a "
," dozen of the most common data formats and will"
," do its level best to find a way to read your"
," data, then give you a choice of formats"
," into which you can convert it.")
,fileInput("infile"
,div("Choose a file to upload and convert to a"
," format of your choice" #,HTML(' ')
)
,multiple = FALSE,width = '100%'
))
,column(1))
# + File Convert ####
,hidden(fluidRow(column(1)
,column(10,hr()
,p("Some data formats (e.g. Excel and OpenOffice)"
," may contain multiple tables of data. Here you"
," are being asked which one to import in such a"
," case. If the one you specify is not available "
,tags$b('AnyFile')," will go back to importing"
," the first one it finds.")
,numericInput('which',span('Which sheet or table? '
,'(if in doubt, you can leave it as-is'
,' and just click the button below) '
#,HTML(' ')
)
,min=1,max=20,value=1,width='100%')
,br()
,actionButton('import','Interpret File')
,hidden(div(hr()
,selectInput('saveas','Convert to:'
,choices = exportformats
,selected = 'csv')
#,HTML(' ')
,actionButton('convert','Convert File')
,hidden(textInput('download_clicked'
,label = '',value = ''))
,hidden(span(id='plswait'
,'Converting...'))
,hidden(
span(downloadButton(
'download','Download Converted File')
,id='downloaddiv')),id='convertdiv'))
)
,column(1),id='importdiv'))
,hidden(fluidRow(column(1),column(10,hr(),bsCollapsePanel(span("Preview"
,icon('angle-down'))
,dataTableOutput('preview')))
,column(1),id='previewrow'))
)
# Server ####
server <- function(input, output, session) {
# reactive values ####
rv <- reactiveValues(disclaimerAgreed=F);
# user agreement ####
if(!isTRUE(getOption("shiny.testmode"))&&!file.exists('.testmode')) {
shinyalert('User Agreement',text=helptext$disclaimer
,html=T,confirmButtonText = 'I agree',confirmButtonCol = hcol
,className = 'dfDisclaimer',closeOnEsc = FALSE
,animation = 'slide-from-top',callbackR = function() {
rv[['disclaimerAgreed']] <- TRUE;
show('infile')})} else rv[['disclaimerAgreed']] <- TRUE;
# record file info ####
observeEvent(c(input$infile,rv$disclaimerAgreed,input$which),{
req(input$infile$datapath,rv$disclaimerAgreed);
rv$infile <- input$infile$datapath;
rv$infilename <- input$infile$name;
show('importdiv');
hide('convertdiv');hide('downloaddiv');hide('previewrow');
});
# change in output format ####
observeEvent(input$saveas, hide('downloaddiv'));
# detect download ####
onclick('download',updateTextInput(session,'download_clicked'
,value = as.numeric(Sys.time()))
,add = TRUE);
# read with rio ####
observeEvent(input$import,{
readfile <- try(try_import(rv$infile,which=input$which),silent=TRUE);
if(is(readfile,'try-error')){
shinyalert('You have discovered an (as yet) unsupported file',
'We would appreciate it if you would submit a bug
report to https://github.com/bokov/AnyFile/issues/new
so we can figure out a way to make this app work for
your file as well.
',type='warning')
} else {
if(!is.null(comment(readfile))){
showNotification(paste0(comment(readfile),collapse=' ')
,type='warning')};
rv$readfile <- readfile;
show('convertdiv'); show('previewrow')
hide('downloaddiv');
}
});
# convert with rio ####
observeEvent(input$convert,{
show('plswait');
out <- setNames(rv$readfile,nm=gsub('\\.','_'
,make.names(names(rv$readfile)
,unique = TRUE)));
# hack to avoid errors on pzfx
if(input$saveas == 'pzfx') for(ii in names(out)){
if(inherits(out[[ii]],c('character','factor'))){
out[[ii]] <- as.numeric(factor(out[[ii]]));
showNotification(sprintf('Column %s converted to numeric',ii)
,type='warning');
}
}
result <- try(export(out
,file = tempfile(fileext = paste0('.',input$saveas))
,format=input$saveas));
hide('plswait');
if(is(result,'try-error')) shinyalert('Error converting file'
,as.character(result))
else {
fnicename <- paste0(tools::file_path_sans_ext(rv$infilename)
,'.',input$saveas);
output$download <- downloadHandler(filename=fnicename
,content=function(con) {
file.copy(result,con)});
show('downloaddiv');
}
})
# render datatable ####
output$preview <- renderDataTable({
DT::datatable(rv$readfile,extensions = 'Scroller'
,autoHideNavigation=T,rownames=F,fillContainer=T
,options=list(processing=T,searching=F,scroller=T
,scrollx='100%',scrolly='20vh'
,dom='Bfrtip'
))
},server=FALSE);
# debug ####
observeEvent(input$debug,{
browser();
});
}
# Run the application ####
shinyApp(ui = ui, server = server)
|
#server.R
library(dplyr)
library(httr)
library(jsonlite)
library(shiny)
library(leaflet)
library(lubridate)
library(ggplot2)
# Read in data
# setwd("~/Desktop/INFO 201/9-1-1-Incidence-Response-Data")
#source('')
#source('')
endpoint <- "https://data.seattle.gov/resource/pu5n-trf4.json?"
app.token <- "ky71McxIFKv1aPgDQr0yM0huK"
#Static data load for use with data explorations
static.query.params <- list("$where" = "event_clearance_date between '2010-01-01T0:00:00' and '2017-12-31T23:59:59'")
static.response <- GET(endpoint, query = static.query.params)
static.body <- content(static.response, "text")
static.data <- fromJSON(static.body)
static.data <- flatten(static.data)
#reformat dates for use in graphs
static.data <- static.data %>% mutate(reformatted.date = ymd_hms(event_clearance_date))
static.data <- static.data %>% mutate(hour.of.day = hour(reformatted.date))
#group data by hour of event clearance
data.by.hour <- static.data %>% group_by(hour.of.day) %>% summarise(count = n())
#group data by district sector
data.by.sector <- static.data %>% group_by(district_sector) %>% summarise(count = n())
# group data by types of events
occurred.events <- group_by(static.data, event_clearance_group) %>%
summarise(count = n())
#group data by time and subgroup
time.sub <- group_by(static.data, event_clearance_group, hour.of.day) %>% summarise(count = n())
# Start shinyServer
shinyServer(function(input, output) {
# reactive function to adapt to data changes by user
filteredData <- reactive({
# make api request
query.params <- list("$where" = paste0("event_clearance_date between '", input$year.slider[1], "-01-01T0:00:00' and '", input$year.slider[2], "-12-31T23:59:59'"))
if(input$subgroup != "SELECT...") {
query.params <- c(query.params, event_clearance_subgroup = input$subgroup[1])
}
response <- GET(endpoint, query = query.params)
body <- content(response, "text")
yearly.data <- fromJSON(body)
yearly.data <- flatten(yearly.data)
# coerce longitude and latitude to numerics
yearly.data <- mutate(yearly.data, longitude = as.numeric(longitude), latitude = as.numeric(latitude))
#Reformat dates for easier manipulation
yearly.data <- mutate(yearly.data, hour.of.day = hour(event_clearance_date))
})
# render map with default values
output$incident.map <- renderLeaflet({
# plot points on map
leaflet() %>%
addProviderTiles(providers$CartoDB.Positron) %>%
setView(-122.28, 47.61, zoom = 12)
})
# update map as data changes
observe({
leafletProxy("incident.map", data = filteredData()) %>%
clearMarkers() %>%
addCircleMarkers(~longitude, ~latitude, radius = 4, stroke = FALSE)
})
#Line plot of event clearances by hour of day
output$timeOfDayPlot <- renderPlot({
ggplot(data.by.hour, aes(x = hour.of.day, y = count)) + geom_point() + geom_line() +
labs(x = "Hour of Day", y = "Frequency of Incidences", title = "911 Event Clearances by Hour of Day") +
xlim(0, 23) + ylim(0, 70)
})
#Histogram of events, grouped by the district in which they occurred
output$bySectorPlot <- renderPlot({
ggplot(data.by.sector, aes(x = district_sector, y = count)) + geom_bar(stat = "identity") +
labs(x = "District Sector", y = "Frequency of Incidences", title = "911 Events by District Sector")
})
output$occurredEventsPlot <- renderPlot({
ggplot(data = occurred.events, aes(x = event_clearance_group, y = count)) + geom_point(stat = "identity") +
theme_bw() +
theme(axis.text = element_text(angle = 90, hjust = 1)) +
labs(x = "Types of Events", y = "Number of Occurences", title = "Number of Occurence For Each Type Of Accidents.")
})
#Dotplot of subgroups vs time of incidence
output$subgroupVsTimePlot <- renderPlot({
ggplot(time.sub, aes(x = hour.of.day, y = event_clearance_group)) + geom_point(stat = "identity") +
theme_bw() +
labs(x = "Time of Incidence", y = "Subgroup", title = "Types of Incidence Based on Time of Occurence")
})
})
|
/server.R
|
no_license
|
alkanjon/9-1-1-Incidence-Response-Data
|
R
| false
| false
| 4,108
|
r
|
#server.R
library(dplyr)
library(httr)
library(jsonlite)
library(shiny)
library(leaflet)
library(lubridate)
library(ggplot2)
# Read in data
# setwd("~/Desktop/INFO 201/9-1-1-Incidence-Response-Data")
#source('')
#source('')
endpoint <- "https://data.seattle.gov/resource/pu5n-trf4.json?"
app.token <- "ky71McxIFKv1aPgDQr0yM0huK"
#Static data load for use with data explorations
static.query.params <- list("$where" = "event_clearance_date between '2010-01-01T0:00:00' and '2017-12-31T23:59:59'")
static.response <- GET(endpoint, query = static.query.params)
static.body <- content(static.response, "text")
static.data <- fromJSON(static.body)
static.data <- flatten(static.data)
#reformat dates for use in graphs
static.data <- static.data %>% mutate(reformatted.date = ymd_hms(event_clearance_date))
static.data <- static.data %>% mutate(hour.of.day = hour(reformatted.date))
#group data by hour of event clearance
data.by.hour <- static.data %>% group_by(hour.of.day) %>% summarise(count = n())
#group data by district sector
data.by.sector <- static.data %>% group_by(district_sector) %>% summarise(count = n())
# group data by types of events
occurred.events <- group_by(static.data, event_clearance_group) %>%
summarise(count = n())
#group data by time and subgroup
time.sub <- group_by(static.data, event_clearance_group, hour.of.day) %>% summarise(count = n())
# Start shinyServer
shinyServer(function(input, output) {
# reactive function to adapt to data changes by user
filteredData <- reactive({
# make api request
query.params <- list("$where" = paste0("event_clearance_date between '", input$year.slider[1], "-01-01T0:00:00' and '", input$year.slider[2], "-12-31T23:59:59'"))
if(input$subgroup != "SELECT...") {
query.params <- c(query.params, event_clearance_subgroup = input$subgroup[1])
}
response <- GET(endpoint, query = query.params)
body <- content(response, "text")
yearly.data <- fromJSON(body)
yearly.data <- flatten(yearly.data)
# coerce longitude and latitude to numerics
yearly.data <- mutate(yearly.data, longitude = as.numeric(longitude), latitude = as.numeric(latitude))
#Reformat dates for easier manipulation
yearly.data <- mutate(yearly.data, hour.of.day = hour(event_clearance_date))
})
# render map with default values
output$incident.map <- renderLeaflet({
# plot points on map
leaflet() %>%
addProviderTiles(providers$CartoDB.Positron) %>%
setView(-122.28, 47.61, zoom = 12)
})
# update map as data changes
observe({
leafletProxy("incident.map", data = filteredData()) %>%
clearMarkers() %>%
addCircleMarkers(~longitude, ~latitude, radius = 4, stroke = FALSE)
})
#Line plot of event clearances by hour of day
output$timeOfDayPlot <- renderPlot({
ggplot(data.by.hour, aes(x = hour.of.day, y = count)) + geom_point() + geom_line() +
labs(x = "Hour of Day", y = "Frequency of Incidences", title = "911 Event Clearances by Hour of Day") +
xlim(0, 23) + ylim(0, 70)
})
#Histogram of events, grouped by the district in which they occurred
output$bySectorPlot <- renderPlot({
ggplot(data.by.sector, aes(x = district_sector, y = count)) + geom_bar(stat = "identity") +
labs(x = "District Sector", y = "Frequency of Incidences", title = "911 Events by District Sector")
})
output$occurredEventsPlot <- renderPlot({
ggplot(data = occurred.events, aes(x = event_clearance_group, y = count)) + geom_point(stat = "identity") +
theme_bw() +
theme(axis.text = element_text(angle = 90, hjust = 1)) +
labs(x = "Types of Events", y = "Number of Occurences", title = "Number of Occurence For Each Type Of Accidents.")
})
#Dotplot of subgroups vs time of incidence
output$subgroupVsTimePlot <- renderPlot({
ggplot(time.sub, aes(x = hour.of.day, y = event_clearance_group)) + geom_point(stat = "identity") +
theme_bw() +
labs(x = "Time of Incidence", y = "Subgroup", title = "Types of Incidence Based on Time of Occurence")
})
})
|
context("remote")
test_that("defaults: null", {
path <- test_prepare_orderly_example("minimal")
expect_null(orderly_default_remote_set(NULL, path))
expect_error(orderly_default_remote_get(path, FALSE),
"default remote has not been set yet")
expect_error(get_remote(NULL, path),
"default remote has not been set yet")
})
test_that("get_remote type failure", {
dat <- prepare_orderly_remote_example()
expect_error(get_remote(1, dat$config),
"Unknown remote type")
expect_error(get_remote("extra", dat$config),
"Unknown remote 'extra'")
})
test_that("orderly_pull_archive with wrong version", {
dat <- prepare_orderly_remote_example()
expect_error(
orderly_pull_archive("example", new_report_id(), root = dat$config,
remote = dat$remote),
paste0("Version '.+?' not found at '.+?': valid versions are:.+",
dat$id1))
})
test_that("pull dependencies", {
dat <- prepare_orderly_remote_example()
expect_log_message(
orderly_pull_dependencies("depend", root = dat$config,
remote = dat$remote),
"\\[ pull\\s+ \\] example:")
expect_equal(orderly_list_archive(dat$config),
data_frame(name = "example", id = dat$id2))
## and update
id3 <- orderly_run("example", root = dat$path_remote, echo = FALSE)
orderly_commit(id3, root = dat$path_remote)
expect_log_message(
orderly_pull_dependencies("depend", root = dat$config,
remote = dat$remote),
"\\[ pull\\s+ \\] example:")
expect_equal(orderly_list_archive(dat$config),
data_frame(name = "example", id = c(dat$id2, id3)))
})
test_that("pull dependencies with implied name", {
dat <- prepare_orderly_remote_example()
expect_equal(nrow(orderly_list_archive(dat$config)), 0)
withr::with_dir(
file.path(dat$config$root, "src", "depend"),
orderly_pull_dependencies(remote = dat$remote))
expect_equal(nrow(orderly_list_archive(dat$config)), 1)
})
test_that("pull_dependencies counts dependencies", {
dat <- prepare_orderly_remote_example()
expect_log_message(
orderly_pull_dependencies("example", root = dat$config,
remote = dat$remote),
"\\[ depends\\s+ \\] example has 0 dependencies")
id <- orderly_run("example", root = dat$path_remote, echo = FALSE)
orderly_commit(id, root = dat$path_remote)
expect_log_message(
orderly_pull_dependencies("depend", root = dat$config,
remote = dat$remote),
"\\[ depends\\s+ \\] depend has 1 dependency")
})
## These need dealing with properly, but check that they trigger
## correctly here:
test_that("pull from old remote", {
oo <- options(orderly.nowarnings = TRUE)
on.exit(options(oo))
path_local <- test_prepare_orderly_example("demo")
path_remote <- unpack_reference("0.6.0")
## In order to make this work we do need to update the data table.
## This will stop being a problem shortly.
##
## Once we get a further migration we should disable importing of
## all files prior to archive version 0.6.8 because of this problem.
db_local <- orderly_db("destination", root = path_local)
db_remote <- orderly_db("destination", root = path_remote, validate = FALSE)
tbl_data <- DBI::dbReadTable(db_remote, "data")
DBI::dbWriteTable(db_local, "data", tbl_data, append = TRUE)
DBI::dbDisconnect(db_local)
DBI::dbDisconnect(db_remote)
expect_log_message(
orderly_pull_archive("minimal", root = path_local, remote = path_remote),
"^\\[ migrate")
contents <- orderly_list_archive(path_local)
expect_equal(nrow(contents), 1)
path <- file.path(path_local, "archive", "minimal", contents$id)
expect_equal(
readRDS(path_orderly_run_rds(path))$archive_version,
numeric_version(read_orderly_archive_version(path_local)))
})
## These need dealing with properly, but check that they trigger
## correctly here:
test_that("pull from new remote", {
dat <- prepare_orderly_remote_example()
p <- path_orderly_run_rds(
file.path(dat$path_remote, "archive", "example", dat$id2))
d <- readRDS(p)
d$archive_version <- numeric_version("100.100.100")
saveRDS(d, p)
expect_error(
orderly_pull_archive("example", dat$id2, root = dat$path_local,
remote = dat$remote),
"Report was created with orderly more recent than this, upgrade!")
})
test_that("pull migrated archive", {
oo <- options(orderly.nowarnings = TRUE)
on.exit(options(oo))
path_local <- test_prepare_orderly_example("demo")
unlink(file.path(path_local, "archive"), recursive = TRUE)
dir.create(file.path(path_local, "archive"))
path_remote <- unpack_reference("0.5.4")
withr::with_options(list(orderly.nmowarnings = TRUE),
orderly_migrate(path_remote))
file.copy(file.path(path_local, "orderly_config.yml"),
file.path(path_remote, "orderly_config.yml"),
overwrite = TRUE)
dir.create(file.path(path_remote, "global"))
## Empty archives have a null version:
expect_equal(read_orderly_archive_version(path_local), "0.0.0")
remote <- orderly_remote_path(path_remote)
orderly_pull_archive("use_dependency", root = path_local, remote = remote)
## The archive version has been upgraded:
expect_equal(read_orderly_archive_version(path_local),
as.character(cache$current_archive_version))
expect_setequal(orderly_list_archive(path_local)$name,
c("other", "use_dependency"))
## This fails in old versions, but will work here:
id <- orderly_run("minimal", root = path_local, echo = FALSE)
orderly_commit(id, root = path_local)
expect_true(id %in% orderly_list_archive(path_local)$id)
## And this is not necessary but also fails on the previous version
## because of issues re-running migrations.
expect_silent(orderly_migrate(root = path_local))
})
test_that("silently ignore missing slack url, but resolve args", {
path <- test_prepare_orderly_example("minimal")
append_lines(
c("remote:",
" default:",
" driver: orderly::orderly_remote_path",
" args:",
" path: $ORDERLY_UNSET_REMOTE_PATH",
" slack_url: $ORDERLY_UNSET_SLACK_URL"),
file.path(path, "orderly_config.yml"))
config <- orderly_config_$new(path)
clear_remote_cache()
remote <- withr::with_envvar(
c(ORDERLY_UNSET_REMOTE_PATH = path),
get_remote("default", config))
expect_equal(length(orderly:::cache$remotes), 1L)
expect_null(attr(remote, "slack_url"))
expect_false(attr(remote, "primary"))
clear_remote_cache()
remote <- withr::with_envvar(
c(ORDERLY_UNSET_REMOTE_PATH = path,
ORDERLY_UNSET_SLACK_URL = "http://example.com/slack"),
get_remote("default", config))
expect_equal(length(orderly:::cache$remotes), 1L)
expect_equal(attr(remote, "slack_url"), "http://example.com/slack")
expect_false(attr(remote, "primary"))
})
test_that("get remote", {
path_remote <- test_prepare_orderly_example("minimal")
path_local <- test_prepare_orderly_example("minimal")
## Configure our remote:
path_config <- file.path(path_local, "orderly_config.yml")
txt <- readLines(path_config)
writeLines(c(
txt,
"remote:",
" default:",
" driver: orderly::orderly_remote_path",
" args:",
paste(" path:", path_remote)),
path_config)
## Get our remote:
remote <- orderly_remote(root = path_local)
expect_is(remote, "orderly_remote_path")
expect_equal(remote$name, "default")
expect_true(same_path(remote$config$root, path_remote))
})
test_that("teams url can be configured and silently ignored if missing", {
path <- test_prepare_orderly_example("minimal")
append_lines(
c("remote:",
" default:",
" driver: orderly::orderly_remote_path",
" args:",
" path: $ORDERLY_PATH",
" teams_url: $ORDERLY_TEAMS_URL"),
file.path(path, "orderly_config.yml"))
config <- orderly_config_$new(path)
clear_remote_cache()
remote <- withr::with_envvar(
c(ORDERLY_PATH = path),
get_remote("default", config))
expect_equal(length(orderly:::cache$remotes), 1L)
expect_null(attr(remote, "teams_url"))
expect_false(attr(remote, "primary"))
clear_remote_cache()
remote <- withr::with_envvar(
c(ORDERLY_PATH = path,
ORDERLY_TEAMS_URL = "http://example.com/slack"),
get_remote("default", config))
expect_equal(length(orderly:::cache$remotes), 1L)
expect_equal(attr(remote, "teams_url"), "http://example.com/slack")
expect_false(attr(remote, "primary"))
})
test_that("orderly run remote passes instance to run", {
path_local <- test_prepare_orderly_example("demo")
## Create a minimal remote class which will satisfy implements_remote
mock_remote <- R6::R6Class(
"orderly_mock_remote",
lock_objects = FALSE,
public = list(
list_reports = function() TRUE,
list_versions = function() TRUE,
pull = function() TRUE,
url_report = function() TRUE
)
)
## Bit of awkwardness with adding run function here. We want to mock out new
## function but can't do that inside the class.
remote <- mock_remote$new()
remote$run <- mockery::mock(TRUE, cycle = TRUE)
orderly_run_remote("minimal", remote = remote, root = path_local)
mockery::expect_called(remote$run, 1)
args <- mockery::mock_args(remote$run)[[1]]
expect_null(args$instance)
orderly_run_remote("minimal", remote = remote, root = path_local,
instance = "test")
mockery::expect_called(remote$run, 2)
args <- mockery::mock_args(remote$run)[[2]]
expect_equal(args$instance, "test")
})
test_that("orderly_bundle_(pack|import)_remote do not use root/locate", {
skip_on_cran_windows()
path <- test_prepare_orderly_example("minimal")
remote <- orderly_remote_path(path)
temp <- tempfile()
on.exit(unlink(temp, recursive = TRUE))
dir_create(temp)
res <- withr::with_dir(
temp,
orderly_bundle_pack_remote("example", remote = remote,
root = stop("don't force me"),
locate = stop("don't force me"),
dest = "."))
expect_true(file.exists(file.path(temp, basename(res))))
expect_equal(dirname(res), ".")
ans <- orderly_bundle_run(file.path(temp, basename(res)), echo = FALSE)
withr::with_dir(
temp,
orderly_bundle_import_remote(ans$path, remote = remote,
root = stop("don't force me"),
locate = stop("don't force me")))
expect_equal(remote$list_versions("example"), ans$id)
})
test_that("orderly run remote passes ref to run", {
path <- test_prepare_orderly_git_example()
## Create a minimal remote class which will satisfy implements_remote
mock_remote <- R6::R6Class(
"orderly_mock_remote",
lock_objects = FALSE,
public = list(
list_reports = function() TRUE,
list_versions = function() TRUE,
pull = function() TRUE,
url_report = function() TRUE
)
)
## Bit of awkwardness with adding run function here. We want to mock out new
## function but can't do that inside the class.
remote <- mock_remote$new()
remote$run <- mockery::mock(TRUE, cycle = TRUE)
orderly_run_remote("minimal", remote = remote, root = path[["local"]])
mockery::expect_called(remote$run, 1)
args <- mockery::mock_args(remote$run)[[1]]
expect_null(args$ref)
orderly_run_remote("minimal", remote = remote, root = path[["local"]],
ref = "master")
mockery::expect_called(remote$run, 2)
args <- mockery::mock_args(remote$run)[[2]]
expect_match(args$ref, "[0-9a-f]{40}")
})
test_that("can get status of remote queue", {
path_local <- test_prepare_orderly_example("demo")
status <- list(
tasks = list(
name = "slow3",
version = "20210423-143954-e634ca18",
key = "fungiform_kiwi",
status = "running"
)
)
mock_status <- mockery::mock(status)
## Create a minimal remote class which will satisfy implements_remote
mock_remote <- R6::R6Class(
"orderly_mock_remote",
lock_objects = FALSE,
public = list(
list_reports = function() TRUE,
list_versions = function() TRUE,
pull = function() TRUE,
run = function() TRUE,
url_report = function() TRUE,
queue_status = function() mock_status()
)
)
remote <- mock_remote$new()
res <- orderly_remote_status(remote = remote)
mockery::expect_called(mock_status, 1)
expect_equal(res, status)
})
test_that("pull leaf only", {
dat <- prepare_orderly_remote_example()
id3 <- orderly_run("depend", root = dat$path_remote, echo = FALSE)
orderly_commit(id3, root = dat$path_remote)
orderly_pull_archive("depend", root = dat$config, remote = dat$remote,
recursive = FALSE)
## We only have the one archive report now
expect_equal(
orderly_list_archive(dat$config),
data_frame(name = "depend", id = id3))
## And one with metadata only
expect_equal(
orderly_list_metadata(dat$config),
data_frame(name = "example", id = dat$id2))
expect_equal(
orderly_list_metadata(dat$config, include_archive = TRUE),
data_frame(name = "example", id = dat$id2))
## But we have two in the database
con <- orderly_db("destination", dat$config)
d <- DBI::dbReadTable(con, "report_version")
DBI::dbDisconnect(con)
expect_equal(nrow(d), 2)
expect_equal(d$id, c(dat$id2, id3))
})
test_that("Can rebuild when there is a metadata store", {
dat <- prepare_orderly_remote_example()
id3 <- orderly_run("depend", root = dat$path_remote, echo = FALSE)
orderly_commit(id3, root = dat$path_remote)
orderly_pull_archive("depend", root = dat$config, remote = dat$remote,
recursive = FALSE)
orderly_rebuild(dat$config)
con <- orderly_db("destination", dat$config)
d <- DBI::dbReadTable(con, "report_version")
DBI::dbDisconnect(con)
expect_equal(nrow(d), 2)
expect_equal(d$id, c(dat$id2, id3))
})
test_that("Can cope will pulling complete tree after metadata pulled", {
dat <- prepare_orderly_remote_example()
id3 <- orderly_run("depend", root = dat$path_remote, echo = FALSE)
orderly_commit(id3, root = dat$path_remote)
orderly_pull_archive("depend", root = dat$config, remote = dat$remote,
recursive = FALSE)
orderly_pull_archive("example", root = dat$config, remote = dat$remote)
expect_equal(
orderly_list_metadata(dat$config),
data_frame(name = character(), id = character()))
expect_equal(
orderly_list_metadata(dat$config, include_archive = TRUE),
data_frame(name = "example", id = dat$id2))
orderly_rebuild(dat$config)
})
test_that("re-pulling metadata prints informative message", {
dat <- prepare_orderly_remote_example()
expect_message(
orderly_pull_metadata("example", dat$id2, root = dat$config,
remote = dat$remote),
"metadata.+fetching example:[0-9]{8}-[0-9]{6}-[[:xdigit:]]{8}")
expect_message(
orderly_pull_metadata("example", dat$id2, root = dat$config,
remote = dat$remote),
"metadata.+example:[0-9]{8}-[0-9]{6}-[[:xdigit:]]{8} already exists, skip")
})
test_that("Can't pull incompatible metadata", {
dat <- prepare_orderly_remote_example()
p <- file.path(dat$path_remote, "archive", "example", dat$id2,
"orderly_run.rds")
d <- readRDS(p)
d$archive_version <- numeric_version("1.0.0")
saveRDS(d, p)
expect_error(
orderly_pull_metadata("example", dat$id2, root = dat$config,
remote = dat$remote),
"Can't migrate metadata for 'example:.+', migrate remote or pull archive")
d$archive_version <- numeric_version("9.0.0")
saveRDS(d, p)
expect_error(
orderly_pull_metadata("example", dat$id2, root = dat$config,
remote = dat$remote),
"Report was created with orderly more recent than this, upgrade!")
})
|
/tests/testthat/test-remote.R
|
permissive
|
cran/orderly
|
R
| false
| false
| 16,083
|
r
|
context("remote")
test_that("defaults: null", {
path <- test_prepare_orderly_example("minimal")
expect_null(orderly_default_remote_set(NULL, path))
expect_error(orderly_default_remote_get(path, FALSE),
"default remote has not been set yet")
expect_error(get_remote(NULL, path),
"default remote has not been set yet")
})
test_that("get_remote type failure", {
dat <- prepare_orderly_remote_example()
expect_error(get_remote(1, dat$config),
"Unknown remote type")
expect_error(get_remote("extra", dat$config),
"Unknown remote 'extra'")
})
test_that("orderly_pull_archive with wrong version", {
dat <- prepare_orderly_remote_example()
expect_error(
orderly_pull_archive("example", new_report_id(), root = dat$config,
remote = dat$remote),
paste0("Version '.+?' not found at '.+?': valid versions are:.+",
dat$id1))
})
test_that("pull dependencies", {
dat <- prepare_orderly_remote_example()
expect_log_message(
orderly_pull_dependencies("depend", root = dat$config,
remote = dat$remote),
"\\[ pull\\s+ \\] example:")
expect_equal(orderly_list_archive(dat$config),
data_frame(name = "example", id = dat$id2))
## and update
id3 <- orderly_run("example", root = dat$path_remote, echo = FALSE)
orderly_commit(id3, root = dat$path_remote)
expect_log_message(
orderly_pull_dependencies("depend", root = dat$config,
remote = dat$remote),
"\\[ pull\\s+ \\] example:")
expect_equal(orderly_list_archive(dat$config),
data_frame(name = "example", id = c(dat$id2, id3)))
})
test_that("pull dependencies with implied name", {
dat <- prepare_orderly_remote_example()
expect_equal(nrow(orderly_list_archive(dat$config)), 0)
withr::with_dir(
file.path(dat$config$root, "src", "depend"),
orderly_pull_dependencies(remote = dat$remote))
expect_equal(nrow(orderly_list_archive(dat$config)), 1)
})
test_that("pull_dependencies counts dependencies", {
dat <- prepare_orderly_remote_example()
expect_log_message(
orderly_pull_dependencies("example", root = dat$config,
remote = dat$remote),
"\\[ depends\\s+ \\] example has 0 dependencies")
id <- orderly_run("example", root = dat$path_remote, echo = FALSE)
orderly_commit(id, root = dat$path_remote)
expect_log_message(
orderly_pull_dependencies("depend", root = dat$config,
remote = dat$remote),
"\\[ depends\\s+ \\] depend has 1 dependency")
})
## These need dealing with properly, but check that they trigger
## correctly here:
test_that("pull from old remote", {
oo <- options(orderly.nowarnings = TRUE)
on.exit(options(oo))
path_local <- test_prepare_orderly_example("demo")
path_remote <- unpack_reference("0.6.0")
## In order to make this work we do need to update the data table.
## This will stop being a problem shortly.
##
## Once we get a further migration we should disable importing of
## all files prior to archive version 0.6.8 because of this problem.
db_local <- orderly_db("destination", root = path_local)
db_remote <- orderly_db("destination", root = path_remote, validate = FALSE)
tbl_data <- DBI::dbReadTable(db_remote, "data")
DBI::dbWriteTable(db_local, "data", tbl_data, append = TRUE)
DBI::dbDisconnect(db_local)
DBI::dbDisconnect(db_remote)
expect_log_message(
orderly_pull_archive("minimal", root = path_local, remote = path_remote),
"^\\[ migrate")
contents <- orderly_list_archive(path_local)
expect_equal(nrow(contents), 1)
path <- file.path(path_local, "archive", "minimal", contents$id)
expect_equal(
readRDS(path_orderly_run_rds(path))$archive_version,
numeric_version(read_orderly_archive_version(path_local)))
})
## These need dealing with properly, but check that they trigger
## correctly here:
test_that("pull from new remote", {
dat <- prepare_orderly_remote_example()
p <- path_orderly_run_rds(
file.path(dat$path_remote, "archive", "example", dat$id2))
d <- readRDS(p)
d$archive_version <- numeric_version("100.100.100")
saveRDS(d, p)
expect_error(
orderly_pull_archive("example", dat$id2, root = dat$path_local,
remote = dat$remote),
"Report was created with orderly more recent than this, upgrade!")
})
test_that("pull migrated archive", {
oo <- options(orderly.nowarnings = TRUE)
on.exit(options(oo))
path_local <- test_prepare_orderly_example("demo")
unlink(file.path(path_local, "archive"), recursive = TRUE)
dir.create(file.path(path_local, "archive"))
path_remote <- unpack_reference("0.5.4")
withr::with_options(list(orderly.nmowarnings = TRUE),
orderly_migrate(path_remote))
file.copy(file.path(path_local, "orderly_config.yml"),
file.path(path_remote, "orderly_config.yml"),
overwrite = TRUE)
dir.create(file.path(path_remote, "global"))
## Empty archives have a null version:
expect_equal(read_orderly_archive_version(path_local), "0.0.0")
remote <- orderly_remote_path(path_remote)
orderly_pull_archive("use_dependency", root = path_local, remote = remote)
## The archive version has been upgraded:
expect_equal(read_orderly_archive_version(path_local),
as.character(cache$current_archive_version))
expect_setequal(orderly_list_archive(path_local)$name,
c("other", "use_dependency"))
## This fails in old versions, but will work here:
id <- orderly_run("minimal", root = path_local, echo = FALSE)
orderly_commit(id, root = path_local)
expect_true(id %in% orderly_list_archive(path_local)$id)
## And this is not necessary but also fails on the previous version
## because of issues re-running migrations.
expect_silent(orderly_migrate(root = path_local))
})
test_that("silently ignore missing slack url, but resolve args", {
path <- test_prepare_orderly_example("minimal")
append_lines(
c("remote:",
" default:",
" driver: orderly::orderly_remote_path",
" args:",
" path: $ORDERLY_UNSET_REMOTE_PATH",
" slack_url: $ORDERLY_UNSET_SLACK_URL"),
file.path(path, "orderly_config.yml"))
config <- orderly_config_$new(path)
clear_remote_cache()
remote <- withr::with_envvar(
c(ORDERLY_UNSET_REMOTE_PATH = path),
get_remote("default", config))
expect_equal(length(orderly:::cache$remotes), 1L)
expect_null(attr(remote, "slack_url"))
expect_false(attr(remote, "primary"))
clear_remote_cache()
remote <- withr::with_envvar(
c(ORDERLY_UNSET_REMOTE_PATH = path,
ORDERLY_UNSET_SLACK_URL = "http://example.com/slack"),
get_remote("default", config))
expect_equal(length(orderly:::cache$remotes), 1L)
expect_equal(attr(remote, "slack_url"), "http://example.com/slack")
expect_false(attr(remote, "primary"))
})
test_that("get remote", {
path_remote <- test_prepare_orderly_example("minimal")
path_local <- test_prepare_orderly_example("minimal")
## Configure our remote:
path_config <- file.path(path_local, "orderly_config.yml")
txt <- readLines(path_config)
writeLines(c(
txt,
"remote:",
" default:",
" driver: orderly::orderly_remote_path",
" args:",
paste(" path:", path_remote)),
path_config)
## Get our remote:
remote <- orderly_remote(root = path_local)
expect_is(remote, "orderly_remote_path")
expect_equal(remote$name, "default")
expect_true(same_path(remote$config$root, path_remote))
})
test_that("teams url can be configured and silently ignored if missing", {
path <- test_prepare_orderly_example("minimal")
append_lines(
c("remote:",
" default:",
" driver: orderly::orderly_remote_path",
" args:",
" path: $ORDERLY_PATH",
" teams_url: $ORDERLY_TEAMS_URL"),
file.path(path, "orderly_config.yml"))
config <- orderly_config_$new(path)
clear_remote_cache()
remote <- withr::with_envvar(
c(ORDERLY_PATH = path),
get_remote("default", config))
expect_equal(length(orderly:::cache$remotes), 1L)
expect_null(attr(remote, "teams_url"))
expect_false(attr(remote, "primary"))
clear_remote_cache()
remote <- withr::with_envvar(
c(ORDERLY_PATH = path,
ORDERLY_TEAMS_URL = "http://example.com/slack"),
get_remote("default", config))
expect_equal(length(orderly:::cache$remotes), 1L)
expect_equal(attr(remote, "teams_url"), "http://example.com/slack")
expect_false(attr(remote, "primary"))
})
test_that("orderly run remote passes instance to run", {
path_local <- test_prepare_orderly_example("demo")
## Create a minimal remote class which will satisfy implements_remote
mock_remote <- R6::R6Class(
"orderly_mock_remote",
lock_objects = FALSE,
public = list(
list_reports = function() TRUE,
list_versions = function() TRUE,
pull = function() TRUE,
url_report = function() TRUE
)
)
## Bit of awkwardness with adding run function here. We want to mock out new
## function but can't do that inside the class.
remote <- mock_remote$new()
remote$run <- mockery::mock(TRUE, cycle = TRUE)
orderly_run_remote("minimal", remote = remote, root = path_local)
mockery::expect_called(remote$run, 1)
args <- mockery::mock_args(remote$run)[[1]]
expect_null(args$instance)
orderly_run_remote("minimal", remote = remote, root = path_local,
instance = "test")
mockery::expect_called(remote$run, 2)
args <- mockery::mock_args(remote$run)[[2]]
expect_equal(args$instance, "test")
})
test_that("orderly_bundle_(pack|import)_remote do not use root/locate", {
skip_on_cran_windows()
path <- test_prepare_orderly_example("minimal")
remote <- orderly_remote_path(path)
temp <- tempfile()
on.exit(unlink(temp, recursive = TRUE))
dir_create(temp)
res <- withr::with_dir(
temp,
orderly_bundle_pack_remote("example", remote = remote,
root = stop("don't force me"),
locate = stop("don't force me"),
dest = "."))
expect_true(file.exists(file.path(temp, basename(res))))
expect_equal(dirname(res), ".")
ans <- orderly_bundle_run(file.path(temp, basename(res)), echo = FALSE)
withr::with_dir(
temp,
orderly_bundle_import_remote(ans$path, remote = remote,
root = stop("don't force me"),
locate = stop("don't force me")))
expect_equal(remote$list_versions("example"), ans$id)
})
test_that("orderly run remote passes ref to run", {
path <- test_prepare_orderly_git_example()
## Create a minimal remote class which will satisfy implements_remote
mock_remote <- R6::R6Class(
"orderly_mock_remote",
lock_objects = FALSE,
public = list(
list_reports = function() TRUE,
list_versions = function() TRUE,
pull = function() TRUE,
url_report = function() TRUE
)
)
## Bit of awkwardness with adding run function here. We want to mock out new
## function but can't do that inside the class.
remote <- mock_remote$new()
remote$run <- mockery::mock(TRUE, cycle = TRUE)
orderly_run_remote("minimal", remote = remote, root = path[["local"]])
mockery::expect_called(remote$run, 1)
args <- mockery::mock_args(remote$run)[[1]]
expect_null(args$ref)
orderly_run_remote("minimal", remote = remote, root = path[["local"]],
ref = "master")
mockery::expect_called(remote$run, 2)
args <- mockery::mock_args(remote$run)[[2]]
expect_match(args$ref, "[0-9a-f]{40}")
})
test_that("can get status of remote queue", {
path_local <- test_prepare_orderly_example("demo")
status <- list(
tasks = list(
name = "slow3",
version = "20210423-143954-e634ca18",
key = "fungiform_kiwi",
status = "running"
)
)
mock_status <- mockery::mock(status)
## Create a minimal remote class which will satisfy implements_remote
mock_remote <- R6::R6Class(
"orderly_mock_remote",
lock_objects = FALSE,
public = list(
list_reports = function() TRUE,
list_versions = function() TRUE,
pull = function() TRUE,
run = function() TRUE,
url_report = function() TRUE,
queue_status = function() mock_status()
)
)
remote <- mock_remote$new()
res <- orderly_remote_status(remote = remote)
mockery::expect_called(mock_status, 1)
expect_equal(res, status)
})
test_that("pull leaf only", {
dat <- prepare_orderly_remote_example()
id3 <- orderly_run("depend", root = dat$path_remote, echo = FALSE)
orderly_commit(id3, root = dat$path_remote)
orderly_pull_archive("depend", root = dat$config, remote = dat$remote,
recursive = FALSE)
## We only have the one archive report now
expect_equal(
orderly_list_archive(dat$config),
data_frame(name = "depend", id = id3))
## And one with metadata only
expect_equal(
orderly_list_metadata(dat$config),
data_frame(name = "example", id = dat$id2))
expect_equal(
orderly_list_metadata(dat$config, include_archive = TRUE),
data_frame(name = "example", id = dat$id2))
## But we have two in the database
con <- orderly_db("destination", dat$config)
d <- DBI::dbReadTable(con, "report_version")
DBI::dbDisconnect(con)
expect_equal(nrow(d), 2)
expect_equal(d$id, c(dat$id2, id3))
})
test_that("Can rebuild when there is a metadata store", {
dat <- prepare_orderly_remote_example()
id3 <- orderly_run("depend", root = dat$path_remote, echo = FALSE)
orderly_commit(id3, root = dat$path_remote)
orderly_pull_archive("depend", root = dat$config, remote = dat$remote,
recursive = FALSE)
orderly_rebuild(dat$config)
con <- orderly_db("destination", dat$config)
d <- DBI::dbReadTable(con, "report_version")
DBI::dbDisconnect(con)
expect_equal(nrow(d), 2)
expect_equal(d$id, c(dat$id2, id3))
})
test_that("Can cope will pulling complete tree after metadata pulled", {
dat <- prepare_orderly_remote_example()
id3 <- orderly_run("depend", root = dat$path_remote, echo = FALSE)
orderly_commit(id3, root = dat$path_remote)
orderly_pull_archive("depend", root = dat$config, remote = dat$remote,
recursive = FALSE)
orderly_pull_archive("example", root = dat$config, remote = dat$remote)
expect_equal(
orderly_list_metadata(dat$config),
data_frame(name = character(), id = character()))
expect_equal(
orderly_list_metadata(dat$config, include_archive = TRUE),
data_frame(name = "example", id = dat$id2))
orderly_rebuild(dat$config)
})
test_that("re-pulling metadata prints informative message", {
dat <- prepare_orderly_remote_example()
expect_message(
orderly_pull_metadata("example", dat$id2, root = dat$config,
remote = dat$remote),
"metadata.+fetching example:[0-9]{8}-[0-9]{6}-[[:xdigit:]]{8}")
expect_message(
orderly_pull_metadata("example", dat$id2, root = dat$config,
remote = dat$remote),
"metadata.+example:[0-9]{8}-[0-9]{6}-[[:xdigit:]]{8} already exists, skip")
})
test_that("Can't pull incompatible metadata", {
dat <- prepare_orderly_remote_example()
p <- file.path(dat$path_remote, "archive", "example", dat$id2,
"orderly_run.rds")
d <- readRDS(p)
d$archive_version <- numeric_version("1.0.0")
saveRDS(d, p)
expect_error(
orderly_pull_metadata("example", dat$id2, root = dat$config,
remote = dat$remote),
"Can't migrate metadata for 'example:.+', migrate remote or pull archive")
d$archive_version <- numeric_version("9.0.0")
saveRDS(d, p)
expect_error(
orderly_pull_metadata("example", dat$id2, root = dat$config,
remote = dat$remote),
"Report was created with orderly more recent than this, upgrade!")
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/covarianza.R
\name{covarianza}
\alias{covarianza}
\title{Covarianza}
\usage{
covarianza(x, y)
}
\arguments{
\item{x}{Nombre variable X.}
\item{y}{Nombre variable Y.}
}
\description{
Calcula la covarianza entre dos variables de tipo continuas.
}
\examples{
data("albahaca")
covarianza(albahaca$produccion,albahaca$temp)
}
\references{
Casella, G. (1990). Statistical Inference. Duxburry Press.
}
|
/man/covarianza.Rd
|
no_license
|
ljofreflor/epg3308
|
R
| false
| true
| 497
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/covarianza.R
\name{covarianza}
\alias{covarianza}
\title{Covarianza}
\usage{
covarianza(x, y)
}
\arguments{
\item{x}{Nombre variable X.}
\item{y}{Nombre variable Y.}
}
\description{
Calcula la covarianza entre dos variables de tipo continuas.
}
\examples{
data("albahaca")
covarianza(albahaca$produccion,albahaca$temp)
}
\references{
Casella, G. (1990). Statistical Inference. Duxburry Press.
}
|
library(dplyr)
snake_river_visits <- readRDS("D:/R_programmer/Introduction to Writing Functions in R/snake_river_visits.rds")
# # Look at the gold medals data
# gold_medals
# # Note the arguments to median()
# args(median)
# # Rewrite this function call, following best practices
# median(gold_medals, na.rm = TRUE)
# # Note the arguments to rank()
# args(rank)
# # Rewrite this function call, following best practices
# rank(-gold_medals, na.last = "keep", ties.method = "min")
###############################################################
# Your functions, from previous steps
toss_coin <- function() {
coin_sides <- c("head", "tail")
sample(coin_sides, 1)
}
# Call your function
toss_coin()
###############################################################
# Update the function to return n coin tosses
toss_coin <- function(n_flips) {
coin_sides <- c("head", "tail")
sample(coin_sides, size = n_flips, replace = TRUE)
}
# Generate 10 coin tosses
toss_coin(10)
###############################################################
# Update the function so heads have probability p_head
toss_coin <- function(n_flips, p_head) {
coin_sides <- c("head", "tail")
# Define a vector of weights
weights <- c(p_head, 1 - p_head)
# Modify the sampling to be weighted
sample(coin_sides, n_flips, replace = TRUE, prob = weights)
}
# Generate 10 coin tosses
toss_coin(10, 0.8)
###############################################################
# From previous step
run_poisson_regression <- function(data, formula) {
glm(formula, data, family = poisson)
}
# Re-run the Poisson regression, using your function
model <- snake_river_visits %>%
run_poisson_regression(n_visits ~ gender + income + travel)
snake_river_explanatory <- snake_river_visits %>%
select(gender, income, travel) %>%
na.omit()
# Run this to see the predictions
snake_river_explanatory %>%
mutate(predicted_n_visits = predict(model, ., type = "response"))%>%
arrange(desc(predicted_n_visits))
###############################################################
|
/Introduction to Writing Functions in R/write_functions.R
|
no_license
|
muhammadali229/R_programmer
|
R
| false
| false
| 2,036
|
r
|
library(dplyr)
snake_river_visits <- readRDS("D:/R_programmer/Introduction to Writing Functions in R/snake_river_visits.rds")
# # Look at the gold medals data
# gold_medals
# # Note the arguments to median()
# args(median)
# # Rewrite this function call, following best practices
# median(gold_medals, na.rm = TRUE)
# # Note the arguments to rank()
# args(rank)
# # Rewrite this function call, following best practices
# rank(-gold_medals, na.last = "keep", ties.method = "min")
###############################################################
# Your functions, from previous steps
toss_coin <- function() {
coin_sides <- c("head", "tail")
sample(coin_sides, 1)
}
# Call your function
toss_coin()
###############################################################
# Update the function to return n coin tosses
toss_coin <- function(n_flips) {
coin_sides <- c("head", "tail")
sample(coin_sides, size = n_flips, replace = TRUE)
}
# Generate 10 coin tosses
toss_coin(10)
###############################################################
# Update the function so heads have probability p_head
toss_coin <- function(n_flips, p_head) {
coin_sides <- c("head", "tail")
# Define a vector of weights
weights <- c(p_head, 1 - p_head)
# Modify the sampling to be weighted
sample(coin_sides, n_flips, replace = TRUE, prob = weights)
}
# Generate 10 coin tosses
toss_coin(10, 0.8)
###############################################################
# From previous step
run_poisson_regression <- function(data, formula) {
glm(formula, data, family = poisson)
}
# Re-run the Poisson regression, using your function
model <- snake_river_visits %>%
run_poisson_regression(n_visits ~ gender + income + travel)
snake_river_explanatory <- snake_river_visits %>%
select(gender, income, travel) %>%
na.omit()
# Run this to see the predictions
snake_river_explanatory %>%
mutate(predicted_n_visits = predict(model, ., type = "response"))%>%
arrange(desc(predicted_n_visits))
###############################################################
|
source('code/functions/model.R')
# Define world ----
fuel_price = 0.80 # set fuel price
ex1 <- ex2 <- ex3 <- ex4 <- 0.10 # set ex-vessel
x <- tac.state.sx(abc.state)
x <- sim.season.area(x)
x <- f.simulation.sx(x)
write_csv(x, "output/80_10/state_super_exclusive_80_10.csv")
# notes
# no behavior change - prob of fishing remains same
# quota is equally split across all participants/areas/ports
# sequential abc
x <- tac.state.sx(abc.state.seq)
x <- sim.season.area(x)
x1 <- f.simulation.sx(x) %>% mutate(rep = 1)
write_csv(x1, "output/80_10_abc_seq/state_super_exclusive_80_10_seq.csv")
# to be run later
x2 <- f.simulation.sx(x) %>% mutate(rep = 2)
x2 = f.clean.between(x1, x2); gc()
x3 <- f.simulation.sx(x) %>% mutate(rep = 3)
x3 = f.clean.between(x2, x3); gc()
x4 <- f.simulation.sx(x) %>% mutate(rep = 4)
x4 = f.clean.between(x3, x4); gc()
x5 <- f.simulation.sx(x) %>% mutate(rep = 5)
aa = f.clean.between(x4, x5); gc()
|
/code/scenarios/80_10/state_llp_super_x.R
|
no_license
|
ben-williams/parallel_diverge
|
R
| false
| false
| 934
|
r
|
source('code/functions/model.R')
# Define world ----
fuel_price = 0.80 # set fuel price
ex1 <- ex2 <- ex3 <- ex4 <- 0.10 # set ex-vessel
x <- tac.state.sx(abc.state)
x <- sim.season.area(x)
x <- f.simulation.sx(x)
write_csv(x, "output/80_10/state_super_exclusive_80_10.csv")
# notes
# no behavior change - prob of fishing remains same
# quota is equally split across all participants/areas/ports
# sequential abc
x <- tac.state.sx(abc.state.seq)
x <- sim.season.area(x)
x1 <- f.simulation.sx(x) %>% mutate(rep = 1)
write_csv(x1, "output/80_10_abc_seq/state_super_exclusive_80_10_seq.csv")
# to be run later
x2 <- f.simulation.sx(x) %>% mutate(rep = 2)
x2 = f.clean.between(x1, x2); gc()
x3 <- f.simulation.sx(x) %>% mutate(rep = 3)
x3 = f.clean.between(x2, x3); gc()
x4 <- f.simulation.sx(x) %>% mutate(rep = 4)
x4 = f.clean.between(x3, x4); gc()
x5 <- f.simulation.sx(x) %>% mutate(rep = 5)
aa = f.clean.between(x4, x5); gc()
|
#' Graphical User Interface for Choosing HCL Color Palettes
#'
#' A graphical user interface (GUI) for viewing, manipulating, and choosing HCL
#' color palettes.
#'
#' Computes palettes based on the HCL (hue-chroma-luminance) color model (as
#' implemented by \code{\link{polarLUV}}). The GUIs interface the palette
#' functions \code{\link{qualitative_hcl}} for qualitative palettes,
#' \code{\link{sequential_hcl}} for sequential palettes with a single or
#' multiple hues, and \code{\link{diverge_hcl}} for diverging palettes (composed
#' from two single-hue sequential palettes).
#'
#' Two different GUIs are implemented and can be selected using the function
#' input argument \code{gui} (\code{"tcltk"} or \code{"shiny"}). Both GUIs
#' allows for interactive modification of the arguments of the respective
#' palette-generating functions, i.e., starting/ending hue (wavelength, type of
#' color), minimal/maximal chroma (colorfulness), minimal maximal luminance
#' (brightness, amount of gray), and a power transformations that control how
#' quickly/slowly chroma and/or luminance are changed through the palette.
#' Subsets of the parameters may not be applicable depending on the type of
#' palette chosen. See \code{\link{qualitative_hcl}} and Zeileis et al. (2009) for
#' a more detailed explanation of the different arguments. Stauffer et al.
#' (2015) provide more examples and guidance.
#'
#' Optionally, active palette can be illustrated by using a range of examples
#' such as a map, heatmap, scatter plot, perspective 3D surface etc.
#'
#' To demonstrate different types of deficiencies, the active palette may be
#' desaturated (emulating printing on a grayscale printer) and collapsed to
#' emulate different types of color-blindness (without red-green or green-blue
#' contrasts) using the \code{\link{simulate_cvd}} functions.
#'
#' @param pal function; the initial palette, see \sQuote{Value} below. Only
#' used if \code{gui = "tcltk"}.
#' @param n integer; the initial number of colors in the palette.
#' @param parent tkwin; the GUI parent window. Only used if \code{gui =
#' "tcltk"}.
#' @param gui character; GUI to use. Available options are \code{tcltk} and
#' \code{shiny}, see \sQuote{Details} below.
#' @param shiny.trace boolean, default \code{FALSE}. Used for debugging if
#' \code{gui = "shiny"}.
#' @return Returns a palette-generating function with the selected arguments.
#' Thus, the returned function takes an integer argument and returns the
#' corresponding number of HCL colors by traversing HCL space through
#' interpolation of the specified hue/chroma/luminance/power values.
#' @author Jason C. Fisher, Reto Stauffer, Achim Zeileis
#' @seealso \code{\link{simulate_cvd}}, \code{\link{desaturate}}, \code{\link{qualitative_hcl}}.
#' @references Zeileis A, Hornik K, Murrell P (2009). Escaping RGBland:
#' Selecting Colors for Statistical Graphics. \emph{Computational Statistics &
#' Data Analysis}, \bold{53}, 3259--3270.
#' \doi{10.1016/j.csda.2008.11.033}
#' Preprint available from
#' \url{https://eeecon.uibk.ac.at/~zeileis/papers/Zeileis+Hornik+Murrell-2009.pdf}.
#'
#' Stauffer R, Mayr GJ, Dabernig M, Zeileis A (2015). Somewhere over the
#' Rainbow: How to Make Effective Use of Colors in Meteorological
#' Visualizations. \emph{Bulletin of the American Meteorological Society},
#' \bold{96}(2), 203--216.
#' \doi{10.1175/BAMS-D-13-00155.1}
#' @keywords misc
#' @examples
#' if(interactive()) {
#' ## Using tcltk GUI
#' pal <- choose_palette()
#' ## or equivalently: hclwizard(gui = "tcltk")
#'
#' ## Using shiny GUI
#' pal <- hclwizard()
#' ## or equivalently: choose_palette(gui = "shiny")
#'
#' ## use resulting palette function
#' filled.contour(volcano, color.palette = pal, asp = 1)
#' }
#' @importFrom grDevices dev.cur dev.list dev.new dev.off dev.set
#' @export
choose_palette <- function(pal = diverge_hcl, n = 7L, parent = NULL, gui = "tcltk") {
args <- list("pal" = pal, "n" = n, "parent" = parent)
gui <- match.arg(gui, c("tcltk", "shiny"))
do.call(sprintf("choose_palette_%s", gui), args)
}
#' @rdname choose_palette
#' @export
hclwizard <- function(n = 7L, gui = "shiny", shiny.trace = FALSE) {
args <- list("n" = n, "shiny.trace" = shiny.trace)
gui <- match.arg(gui, c("tcltk", "shiny"))
do.call(sprintf("choose_palette_%s", gui), args)
}
#' @rdname choose_palette
#' @export
hcl_wizard <- function(n = 7L, gui = "shiny", shiny.trace = FALSE)
hclwizard(n=n,gui=gui,shiny.trace=FALSE)
# hclwizard shiny GUI for selecting color palette
choose_palette_shiny <- function(pal, shiny.trace = FALSE, n = 7L, ...) {
# Requirements for shiny application
stopifnot(requireNamespace("shiny"), requireNamespace("shinyjs"))
appDir <- system.file("hclwizard", package = "colorspace")
if (appDir == "")
stop("Could not find hclwizard app directory. Try re-installing `colorspace`.", call. = FALSE)
# Start shiny
Sys.setenv("hclwizard_Ninit"=n)
options(shiny.trace=shiny.trace)
pal <- shiny::runApp(appDir, display.mode = "normal", quiet = TRUE )
Sys.unsetenv("hclwizard_Ninit")
return(pal)
}
# tcltk GUI for selecting a color palette
choose_palette_tcltk <- function( pal = diverge_hcl, n=7L, parent = NULL, ... ) {
# Choose a file interactively
ChooseFile <- function(cmd, win.title, initialfile=NULL,
defaultextension=NULL) {
filetypes <- "{{R Source Files} {.R}} {{All files} {*}}"
if (cmd == "Open") {
args <- list("tk_getOpenFile")
} else {
args <- list("tk_getSaveFile")
if (defaultextension == ".txt")
filetypes <- "{{Text Files} {.txt}} {{All files} {*}}"
}
args[["title"]] <- win.title
args[["parent"]] <- tt
args[["initialdir"]] <- initialdir
args[["filetypes"]] <- filetypes
if (!is.null(initialfile))
args[["initialfile"]] <- initialfile
if (!is.null(defaultextension))
args[["defaultextension"]] <- defaultextension
f <- tcltk::tclvalue(do.call(tcltk::tcl, args))
if (!nzchar(f))
return()
initialdir <<- dirname(f)
f
}
# Open palette from file
OpenPaletteFromFile <- function() {
f <- ChooseFile(cmd="Open", win.title="Open Palette File")
if (is.null(f))
return()
pal <- dget(file=f)
ConvertPaletteToAttributes(pal)
AssignAttributesToWidgets()
UpdateDataType()
}
# Save palette to file
SavePaletteToFile <- function() {
f <- ChooseFile(cmd="Save As", win.title="Save Palette As",
initialfile="color_palette", defaultextension=".R")
if (is.null(f))
return()
type <- as.character(tcltk::tclvalue(nature.var))
pal <- GetPalette(type, h1, h2, c1, c2, l1, l2, p1, p2, fixup)
dput(pal, file=f)
}
# Save colors to file
SaveColorsToFile <- function(type) {
type <- as.character(tcltk::tclvalue(nature.var))
pal <- GetPalette(type, h1, h2, c1, c2, l1, l2, p1, p2, fixup)
cols <- try(hex2RGB(pal(n)), silent=TRUE)
if (inherits(cols, "try-error")) {
msg <- "Palette results in invaild hexadecimal colors."
tcltk::tkmessageBox(icon="error", message=msg, title="Color Error",
parent=tt)
return()
}
f <- ChooseFile(cmd="Save As", win.title="Save Colors As",
initialfile=paste("colors_", type, sep=""),
defaultextension=".txt")
if (is.null(f))
return()
if (type == "HEX") {
writehex(cols, file=f)
} else {
if (type == "sRGB") {
cols <- as(cols, "sRGB")@coords
} else if (type == "HSV") {
cols <- as(cols, "HSV")@coords
} else if (type == "HCL") {
cols <- as(cols, "polarLUV")@coords
} else if (type == "CMYK") {
cols <- as(cols, "RGB")@coords
red <- cols[, "R"]
green <- cols[, "G"]
blue <- cols[, "B"]
black <- sapply(1:n, function(i) min(c(1 - red[i], 1 - green[i],
1 - blue[i])))
cyan <- (1 - red - black) / (1 - black)
magenta <- (1 - green - black) / (1 - black)
yellow <- (1 - blue - black) / (1 - black)
cols <- as.matrix(as.data.frame(list(C=cyan, M=black, Y=yellow,
K=black)))
}
utils::write.table(cols, file=f, quote=FALSE, row.names=FALSE, sep="\t")
}
}
# Save palette and quit
SavePalette <- function() {
type <- as.character(tcltk::tclvalue(nature.var))
pal.rtn <<- GetPalette(type, h1, h2, c1, c2, l1, l2, p1, p2, fixup)
tcltk::tclvalue(tt.done.var) <- 1
}
# Scale change
ScaleChange <- function(x, v, x.ent.var) {
if (x == get(v))
return()
assign(v, x, inherits=TRUE)
fmt <- ifelse(v %in% c("p1", "p2"), "%.1f", "%.0f")
tcltk::tclvalue(x.ent.var) <- sprintf(fmt, x)
DrawPalette(v == "n")
}
# Entry change
EntryChange <- function(v, x.lim, x.ent.var, x.scl.var) {
x <- suppressWarnings(as.numeric(tcltk::tclvalue(x.ent.var)))
if (is.na(x))
return()
if (x < x.lim[1]) {
tcltk::tclvalue(x.ent.var) <- x.lim[1]
x <- x.lim[1]
} else if (x > x.lim[2]) {
tcltk::tclvalue(x.ent.var) <- x.lim[2]
x <- x.lim[2]
}
assign(v, x, inherits=TRUE)
tcltk::tclvalue(x.scl.var) <- x
DrawPalette(v == "n")
}
# Helper function to create the hex palettes.
# Generates "n" colors from palette "pal" and manipulates them
# if desaturation or CVD simulation is required.
get_hex_colors <- function(pal,n) {
pal.cols <- pal(n)
pal.cols[is.na(pal.cols)] <- "#FFFFFF"
if (as.logical(as.integer(tcltk::tclvalue(desaturation.var))))
pal.cols <- desaturate(pal.cols)
if (as.logical(as.integer(tcltk::tclvalue(colorblind.var)))) {
type <- as.character(tcltk::tclvalue(colorblind.type.var))
pal.cols <- do.call(type,list("col"=pal.cols))
}
pal.cols
}
# Draw palette
DrawPalette <- function(is.n=FALSE) {
type <- as.character(tcltk::tclvalue(nature.var))
pal <- GetPalette(type, h1, h2, c1, c2, l1, l2, p1, p2, fixup)
if (!is.n)
tcltk::tcl(frame2.cvs, "delete", "browse")
tcltk::tcl(frame7.cvs, "delete", "pal")
# Reto, Nov 2016: outsourced
pal.cols <- get_hex_colors(pal,n)
dx <- (cvs.width - 1) / n
x2 <- 1
y1 <- 1
y2 <- cvs.height
for (i in pal.cols) {
x1 <- x2
x2 <- x1 + dx
pts <- tcltk::.Tcl.args(c(x1, y1, x2, y1, x2, y2, x1, y2))
tcltk::tkcreate(frame7.cvs, "polygon", pts, fill=i, tag="pal")
}
RegenExample(pal,n)
}
# Update data type
UpdateDataType <- function() {
type <- as.character(tcltk::tclvalue(nature.var))
if (type == "Qualitative") {
is.normal <- c(TRUE, FALSE, FALSE, FALSE, FALSE)
default.pals <<- qual.pals
} else if (type == "Sequential (single hue)") {
is.normal <- c(FALSE, TRUE, TRUE, TRUE, FALSE)
default.pals <<- seqs.pals
} else if (type == "Sequential (multiple hues)") {
is.normal <- c(TRUE, TRUE, TRUE, TRUE, TRUE)
default.pals <<- seqm.pals
} else if (type == "Diverging") {
is.normal <- c(TRUE, FALSE, TRUE, TRUE, FALSE)
default.pals <<- dive.pals
}
# Default palettes
tcltk::tcl(frame2.cvs, "delete", "default")
x1 <- 10
for (i in 1:length(default.pals)) {
# Create numeric palette parameter list, drop name
args <- as.list(as.list(default.pals[[i]][-10]))
args[['type']] <- as.character(tcltk::tclvalue(nature.var))
pal <- do.call(GetPalette, args=args)
pal.cols <- pal(5)
pal.cols[is.na(pal.cols)] <- "#FFFFFF"
y2 <- 10
for (j in pal.cols) {
x2 <- x1 + 20
y1 <- y2
y2 <- y1 + 10
pts <- tcltk::.Tcl.args(c(x1, y1, x2, y1, x2, y2, x1, y2))
tcltk::tkcreate(frame2.cvs, "polygon", pts, fill=j, tag="default")
}
x1 <- x1 + 30
}
s <- ifelse(is.normal, "normal", "disabled")
tcltk::tkconfigure(frame3.lab.2.1, state=s[1])
tcltk::tkconfigure(frame3.lab.4.1, state=s[2])
tcltk::tkconfigure(frame3.lab.6.1, state=s[3])
tcltk::tkconfigure(frame3.lab.7.1, state=s[4])
tcltk::tkconfigure(frame3.lab.8.1, state=s[5])
tcltk::tkconfigure(frame3.ent.2.3, state=s[1])
tcltk::tkconfigure(frame3.ent.4.3, state=s[2])
tcltk::tkconfigure(frame3.ent.6.3, state=s[3])
tcltk::tkconfigure(frame3.ent.7.3, state=s[4])
tcltk::tkconfigure(frame3.ent.8.3, state=s[5])
s <- ifelse(is.normal, "!disabled", "disabled")
tcltk::tcl(frame3.scl.2.2, "state", s[1])
tcltk::tcl(frame3.scl.4.2, "state", s[2])
tcltk::tcl(frame3.scl.6.2, "state", s[3])
tcltk::tcl(frame3.scl.7.2, "state", s[4])
tcltk::tcl(frame3.scl.8.2, "state", s[5])
DrawPalette()
}
# Select default palette
SelectDefaultPalette <- function(x, y) {
x <- as.numeric(x)
y <- as.numeric(y)
if (is.na(x) | is.na(y))
return()
y1 <- 5
y2 <- 65
if (y < y1 | y > y2)
return()
max.x <- length(default.pals) * 30 + 10
if (x < 5 | x > max.x)
return()
x.seq <- seq(5, max.x, by=30)
i <- findInterval(x, x.seq, rightmost.closed=TRUE)
x1 <- x.seq[i]
x2 <- x.seq[i + 1]
for (j in 1:length(vars)) {
if ( vars[j] == "name" ) next
val <- as.numeric(default.pals[[i]][j])
if (is.na(val))
val <- 0
assign(vars[j], val, inherits=TRUE)
}
AssignAttributesToWidgets()
DrawPalette()
pts <- tcltk::.Tcl.args(c(x1, y1, x2, y1, x2, y2, x1, y2) - 0.5)
tcltk::tkcreate(frame2.cvs, "polygon", pts, fill="", outline="black", tag="browse")
}
# Convert palette to attributes
ConvertPaletteToAttributes <- function(pal) {
pal.attributes <- NULL
if (inherits(pal, "function")) {
what <- c("numeric", "integer")
q.args <- c("c", "l", "start", "end")
d.args <- c("h", "c", "l", "power")
s.args <- c("h", "c.", "l", "power")
arg <- sapply(formals(pal), function(i) {if (is.call(i)) eval(i) else i})
if (!is.null(arg$fixup) && is.logical(arg$fixup))
fix.up <- as.integer(arg$fixup)
else
fix.up <- 1
if (all(sapply(q.args, function(i) inherits(arg[[i]], what)))) {
tcltk::tclvalue(nature.var) <- "Qualitative"
pal.attributes <- c(arg$start, arg$end, arg$c, NA, arg$l, NA, NA, NA, fix.up)
} else if (all(sapply(s.args, function(i) inherits(arg[[i]], what)))) {
if (length(arg$h) == 1 && length(arg$p) == 1) {
tcltk::tclvalue(nature.var) <- "Sequential (single hue)"
pal.attributes <- c(arg$h, NA, arg$c., arg$l, arg$power, NA, fix.up)
} else {
tcltk::tclvalue(nature.var) <- "Sequential (multiple hues)"
pal.attributes <- c(arg$h, arg$c., arg$l, arg$power, fix.up)
}
} else if (all(sapply(d.args, function(i) inherits(arg[[i]], what)))) {
tcltk::tclvalue(nature.var) <- "Diverging"
pal.attributes <- c(arg$h, arg$c, NA, arg$l, arg$power, NA, fix.up)
}
}
if (is.null(pal.attributes)) {
tcltk::tclvalue(nature.var) <- "Sequential (multiple hues)"
pal.attributes <- seqm.pals[[4]]
}
for (i in 1:length(vars)) {
if (is.na(pal.attributes[i]))
assign(vars[i], 0, inherits=TRUE)
else
assign(vars[i], pal.attributes[i], inherits=TRUE)
}
AssignAttributesToWidgets()
}
# Assign attributes to widgets
AssignAttributesToWidgets <- function() {
tcltk::tclvalue(h1.ent.var) <- sprintf("%.0f", h1)
tcltk::tclvalue(h2.ent.var) <- sprintf("%.0f", h2)
tcltk::tclvalue(c1.ent.var) <- sprintf("%.0f", c1)
tcltk::tclvalue(c2.ent.var) <- sprintf("%.0f", c2)
tcltk::tclvalue(l1.ent.var) <- sprintf("%.0f", l1)
tcltk::tclvalue(l2.ent.var) <- sprintf("%.0f", l2)
tcltk::tclvalue(p1.ent.var) <- sprintf("%.1f", p1)
tcltk::tclvalue(p2.ent.var) <- sprintf("%.1f", p2)
tcltk::tclvalue(h1.scl.var) <- h1
tcltk::tclvalue(h2.scl.var) <- h2
tcltk::tclvalue(c1.scl.var) <- c1
tcltk::tclvalue(c2.scl.var) <- c2
tcltk::tclvalue(l1.scl.var) <- l1
tcltk::tclvalue(l2.scl.var) <- l2
tcltk::tclvalue(p1.scl.var) <- p1
tcltk::tclvalue(p2.scl.var) <- p2
tcltk::tclvalue(fixup.var) <- fixup
}
# Show example plot
ShowExample <- function() {
if (!dev.example %in% dev.list()) {
dev.new(width=7L, height=7L)
dev.example <<- dev.cur()
}
par(oma=c(0, 0, 0, 0), mar=c(0, 0, 0, 0))
DrawPalette(is.n=TRUE)
}
# Regenerate example plot
RegenExample <- function(pal,n) {
if (dev.example %in% dev.list())
dev.set(which=dev.example)
else
return()
plot_example <- eval(parse(text=sprintf("plot_%s", tolower(tcltk::tclvalue(example.var)))))
# Reto, Nov 2016: Picking colors. For 'Example Spectrum' 100 colors
# will be choosen (overruling input "n").
if ( tcltk::tclvalue(example.var) == "Spectrum" ) n <- 100
pal.cols <- get_hex_colors(pal,n)
if (as.logical(as.integer(tcltk::tclvalue(reverse.var))))
pal.cols <- rev(pal.cols)
plot_example(pal.cols)
}
# Main program
# Initialize directory
initialdir <- getwd()
# Initialize return palette
pal.rtn <- NULL
# Initialize default palettes
default.pals <- NULL
# Initialize data for scatter plot example
xyhclust <- NULL
# Initialize data for mosaic plot example
msc.matrix <- NULL
# Flag graphics device
dev.example <- 1
# Set default and initial palettes
h1 <- h2 <- c1 <- c2 <- l1 <- l2 <- p1 <- p2 <- 0
fixup <- 1
#vars <- c("h1", "h2", "c1", "c2", "l1", "l2", "p1", "p2", "fixup")
# Load/Define palettes
vars <- vars.pal
qual.pals <- qual.pals
seqs.pals <- seqs.pals
seqm.pals <- seqm.pals
dive.pals <- dive.pals
# Set limits for palette attributes
n.lim <- c( 1, 50)
h.lim <- c(-360, 360)
c.lim <- c( 0, 100)
l.lim <- c( 0, 100)
p.lim <- c( 0, 3)
# Set dimensions on palette canvas
cvs.width <- 328 # 30 * 10 + 10 + 18
cvs.height <- 25
# Assign additional variables linked to Tk widgets
example.var <- tcltk::tclVar()
nature.var <- tcltk::tclVar()
n.scl.var <- tcltk::tclVar(n)
n.ent.var <- tcltk::tclVar(n)
h1.scl.var <- tcltk::tclVar()
h1.ent.var <- tcltk::tclVar()
h2.scl.var <- tcltk::tclVar()
h2.ent.var <- tcltk::tclVar()
c1.scl.var <- tcltk::tclVar()
c1.ent.var <- tcltk::tclVar()
c2.scl.var <- tcltk::tclVar()
c2.ent.var <- tcltk::tclVar()
l1.scl.var <- tcltk::tclVar()
l1.ent.var <- tcltk::tclVar()
l2.scl.var <- tcltk::tclVar()
l2.ent.var <- tcltk::tclVar()
p1.scl.var <- tcltk::tclVar()
p1.ent.var <- tcltk::tclVar()
p2.scl.var <- tcltk::tclVar()
p2.ent.var <- tcltk::tclVar()
fixup.var <- tcltk::tclVar(fixup)
reverse.var <- tcltk::tclVar(FALSE)
desaturation.var <- tcltk::tclVar(FALSE)
colorblind.var <- tcltk::tclVar(FALSE)
colorblind.type.var <- tcltk::tclVar("deutan")
tt.done.var <- tcltk::tclVar(0)
# Open GUI
tcltk::tclServiceMode(FALSE)
tt <- tcltk::tktoplevel()
if (!is.null(parent)) {
tcltk::tkwm.transient(tt, parent)
geo <- unlist(strsplit(as.character(tcltk::tkwm.geometry(parent)), "\\+"))
tcltk::tkwm.geometry(tt, paste("+", as.integer(geo[2]) + 25,
"+", as.integer(geo[3]) + 25, sep=""))
}
tcltk::tkwm.resizable(tt, 0, 0)
tcltk::tktitle(tt) <- "Choose Color Palette"
# Top file menu
top.menu <- tcltk::tkmenu(tt, tearoff=0)
menu.file <- tcltk::tkmenu(tt, tearoff=0)
tcltk::tkadd(top.menu, "cascade", label="File", menu=menu.file, underline=0)
tcltk::tkadd(menu.file, "command", label="Open palette", accelerator="Ctrl+O",
command=OpenPaletteFromFile)
tcltk::tkadd(menu.file, "command", label="Save palette as",
accelerator="Shift+Ctrl+S", command=SavePaletteToFile)
menu.file.colors <- tcltk::tkmenu(tt, tearoff=0)
tcltk::tkadd(menu.file.colors, "command", label="HEX",
command=function() SaveColorsToFile("HEX"))
tcltk::tkadd(menu.file.colors, "command", label="sRGB",
command=function() SaveColorsToFile("sRGB"))
tcltk::tkadd(menu.file.colors, "command", label="HSV",
command=function() SaveColorsToFile("HSV"))
tcltk::tkadd(menu.file.colors, "command", label="HCL",
command=function() SaveColorsToFile("HCL"))
tcltk::tkadd(menu.file.colors, "command", label="CMYK",
command=function() SaveColorsToFile("CMYK"))
tcltk::tkadd(menu.file, "cascade", label="Save colors as", menu=menu.file.colors)
tcltk::tkconfigure(tt, menu=top.menu)
# Frame 0, ok and cancel buttons
frame0 <- tcltk::ttkframe(tt, relief="flat")
frame0.but.3 <- tcltk::ttkbutton(frame0, width=12, text="OK", command=SavePalette)
frame0.but.4 <- tcltk::ttkbutton(frame0, width=12, text="Cancel",
command=function() {
pal.rtn <<- NULL
tcltk::tclvalue(tt.done.var) <- 1
})
tcltk::tkgrid("x", frame0.but.3, frame0.but.4, pady=c(10, 10))
tcltk::tkgrid.configure(frame0.but.3, sticky="e")
tcltk::tkgrid.configure(frame0.but.4, sticky="w", padx=c(4, 10))
tcltk::tkgrid.columnconfigure(frame0, 0, weight=1)
tcltk::tkpack(frame0, fill="x", side="bottom", anchor="e")
# Frame 1, choose nature of data
frame1 <- tcltk::ttkframe(tt, relief="flat")
frame1.lab.1 <- tcltk::ttklabel(frame1, text="The nature of your data")
frame1.box.2 <- tcltk::ttkcombobox(frame1, state="readonly", textvariable=nature.var,
values=c("Qualitative", "Sequential (single hue)",
"Sequential (multiple hues)",
"Diverging"))
tcltk::tkgrid(frame1.lab.1, frame1.box.2, pady=c(10, 0))
tcltk::tkgrid.configure(frame1.lab.1, padx=c(10, 2))
tcltk::tkgrid.configure(frame1.box.2, padx=c(0, 10), sticky="we")
tcltk::tkgrid.columnconfigure(frame1, 1, weight=1)
tcltk::tkpack(frame1, fill="x")
# Frame 2, default color schemes
frame2 <- tcltk::ttklabelframe(tt, relief="flat", borderwidth=5, padding=5,
text="Default color schemes")
frame2.cvs <- tcltk::tkcanvas(frame2, relief="flat", width=30 * 10 + 10, height=70,
background="white", confine=TRUE, closeenough=0,
borderwidth=0, highlightthickness=0)
tcltk::tkgrid(frame2.cvs, sticky="we")
tcltk::tkgrid.columnconfigure(frame2, 0, weight=1)
tcltk::tkpack(frame2, fill="x", padx=10, pady=10)
# Frame 3, color description
txt <- "Palette description: Hue, Chroma, Luminance, Power"
frame3 <- tcltk::ttklabelframe(tt, relief="flat", borderwidth=5, padding=5, text=txt)
frame3.lab.1.1 <- tcltk::ttklabel(frame3, text="H1", width=2)
frame3.lab.2.1 <- tcltk::ttklabel(frame3, text="H2", width=2)
frame3.lab.3.1 <- tcltk::ttklabel(frame3, text="C1", width=2)
frame3.lab.4.1 <- tcltk::ttklabel(frame3, text="C2", width=2)
frame3.lab.5.1 <- tcltk::ttklabel(frame3, text="L1", width=2)
frame3.lab.6.1 <- tcltk::ttklabel(frame3, text="L2", width=2)
frame3.lab.7.1 <- tcltk::ttklabel(frame3, text="P1", width=2)
frame3.lab.8.1 <- tcltk::ttklabel(frame3, text="P2", width=2)
frame3.ent.1.3 <- tcltk::ttkentry(frame3, textvariable=h1.ent.var, width=4)
frame3.ent.2.3 <- tcltk::ttkentry(frame3, textvariable=h2.ent.var, width=4)
frame3.ent.3.3 <- tcltk::ttkentry(frame3, textvariable=c1.ent.var, width=4)
frame3.ent.4.3 <- tcltk::ttkentry(frame3, textvariable=c2.ent.var, width=4)
frame3.ent.5.3 <- tcltk::ttkentry(frame3, textvariable=l1.ent.var, width=4)
frame3.ent.6.3 <- tcltk::ttkentry(frame3, textvariable=l2.ent.var, width=4)
frame3.ent.7.3 <- tcltk::ttkentry(frame3, textvariable=p1.ent.var, width=4)
frame3.ent.8.3 <- tcltk::ttkentry(frame3, textvariable=p2.ent.var, width=4)
frame3.scl.1.2 <- tcltk::tkwidget(frame3, "ttk::scale", from=h.lim[1], to=h.lim[2],
orient="horizontal", value=h1, variable=h1.scl.var,
command=function(...) {
ScaleChange(x=round(as.numeric(...)), v="h1",
x.ent.var=h1.ent.var)
})
frame3.scl.2.2 <- tcltk::tkwidget(frame3, "ttk::scale", from=h.lim[1], to=h.lim[2],
orient="horizontal", value=h2, variable=h2.scl.var,
command=function(...) {
ScaleChange(x=round(as.numeric(...)), v="h2",
x.ent.var=h2.ent.var)
})
frame3.scl.3.2 <- tcltk::tkwidget(frame3, "ttk::scale", from=c.lim[1], to=c.lim[2],
orient="horizontal", value=c1, variable=c1.scl.var,
command=function(...) {
ScaleChange(x=round(as.numeric(...)), v="c1",
x.ent.var=c1.ent.var)
})
frame3.scl.4.2 <- tcltk::tkwidget(frame3, "ttk::scale", from=c.lim[1], to=c.lim[2],
orient="horizontal", value=c2, variable=c2.scl.var,
command=function(...) {
ScaleChange(x=round(as.numeric(...)), v="c2",
x.ent.var=c2.ent.var)
})
frame3.scl.5.2 <- tcltk::tkwidget(frame3, "ttk::scale", from=l.lim[1], to=l.lim[2],
orient="horizontal", value=l1, variable=l1.scl.var,
command=function(...) {
ScaleChange(x=round(as.numeric(...)), v="l1",
x.ent.var=l1.ent.var)
})
frame3.scl.6.2 <- tcltk::tkwidget(frame3, "ttk::scale", from=l.lim[1], to=l.lim[2],
orient="horizontal", value=l2, variable=l2.scl.var,
command=function(...) {
ScaleChange(x=round(as.numeric(...)), v="l2",
x.ent.var=l2.ent.var)
})
frame3.scl.7.2 <- tcltk::tkwidget(frame3, "ttk::scale", from=p.lim[1], to=p.lim[2],
orient="horizontal", value=p1, variable=p1.scl.var,
command=function(...) {
ScaleChange(x=as.numeric(...), v="p1",
x.ent.var=p1.ent.var)
})
frame3.scl.8.2 <- tcltk::tkwidget(frame3, "ttk::scale", from=p.lim[1], to=p.lim[2],
orient="horizontal", value=p2, variable=p2.scl.var,
command=function(...) {
ScaleChange(x=as.numeric(...), v="p2",
x.ent.var=p2.ent.var)
})
tcltk::tkgrid(frame3.lab.1.1, frame3.scl.1.2, frame3.ent.1.3, pady=c(0, 5))
tcltk::tkgrid(frame3.lab.2.1, frame3.scl.2.2, frame3.ent.2.3, pady=c(0, 5))
tcltk::tkgrid(frame3.lab.3.1, frame3.scl.3.2, frame3.ent.3.3, pady=c(0, 5))
tcltk::tkgrid(frame3.lab.4.1, frame3.scl.4.2, frame3.ent.4.3, pady=c(0, 5))
tcltk::tkgrid(frame3.lab.5.1, frame3.scl.5.2, frame3.ent.5.3, pady=c(0, 5))
tcltk::tkgrid(frame3.lab.6.1, frame3.scl.6.2, frame3.ent.6.3, pady=c(0, 5))
tcltk::tkgrid(frame3.lab.7.1, frame3.scl.7.2, frame3.ent.7.3, pady=c(0, 5))
tcltk::tkgrid(frame3.lab.8.1, frame3.scl.8.2, frame3.ent.8.3)
tcltk::tkgrid.configure(frame3.scl.1.2, frame3.scl.2.2, frame3.scl.3.2,
frame3.scl.4.2, frame3.scl.5.2, frame3.scl.6.2,
frame3.scl.7.2, frame3.scl.8.2,
sticky="we", padx=c(4, 10))
tcltk::tkgrid.columnconfigure(frame3, 1, weight=1)
tcltk::tkpack(frame3, fill="x", padx=10, pady=0)
# Frame 4, color palette fixup
frame4 <- tcltk::ttkframe(tt, relief="flat")
txt <- "Correct all colors to valid RGB color model values"
frame4.chk.1 <- tcltk::ttkcheckbutton(frame4, text=txt, variable=fixup.var,
command=function() {
fixup <<- as.integer(tcltk::tclvalue(fixup.var))
DrawPalette(is.n=TRUE)
})
tcltk::tkgrid.configure(frame4.chk.1, padx=c(12, 0), pady=c(2, 0))
tcltk::tkpack(frame4, fill="x")
# Frame 5, number of colors in palette
txt <- "Number of colors in palette"
frame5 <- tcltk::ttklabelframe(tt, relief="flat", borderwidth=5, padding=5, text=txt)
frame5.lab.1 <- tcltk::ttklabel(frame5, text="n", width=2)
frame5.ent.3 <- tcltk::ttkentry(frame5, textvariable=n.ent.var, width=4)
frame5.scl.2 <- tcltk::tkwidget(frame5, "ttk::scale", from=n.lim[1], to=n.lim[2],
orient="horizontal", value=n, variable=n.scl.var,
command=function(...) {
ScaleChange(x=round(as.numeric(...)), v="n",
x.ent.var=n.ent.var)
})
tcltk::tkgrid(frame5.lab.1, frame5.scl.2, frame5.ent.3)
tcltk::tkgrid.configure(frame5.scl.2, sticky="we", padx=c(4, 10))
tcltk::tkgrid.columnconfigure(frame5, 1, weight=1)
tcltk::tkpack(frame5, fill="x", padx=10, pady=10)
# Frame 6, example plots and reverse colors
frame6 <- tcltk::ttklabelframe(tt, relief="flat", borderwidth=5, padding=5,
text="Show example")
frame6.lab.1 <- tcltk::ttklabel(frame6, text="Plot type")
frame6.box.2 <- tcltk::ttkcombobox(frame6, state="readonly",
textvariable=example.var,
values=example.plots)
frame6.chk.3 <- tcltk::ttkcheckbutton(frame6, text="Reverse colors",
variable=reverse.var, command=ShowExample)
tcltk::tkgrid(frame6.lab.1, frame6.box.2, frame6.chk.3)
tcltk::tkgrid.configure(frame6.box.2, padx=c(2, 10), sticky="we")
tcltk::tkgrid.columnconfigure(frame6, 1, weight=1)
tcltk::tkpack(frame6, fill="x", padx=10, pady=0)
# Frame 7, color palette and robustness checks
frame7 <- tcltk::ttkframe(tt, relief="flat")
frame7.cvs <- tcltk::tkcanvas(frame7, relief="flat",
width=cvs.width + 1, height=cvs.height + 1,
background="black", confine=TRUE, closeenough=0,
borderwidth=0, highlightthickness=0)
tcltk::tkgrid(frame7.cvs, padx=10, pady=c(12,0))
frame7.chk.1 <- tcltk::ttkcheckbutton(frame7, text="Desaturation",
variable=desaturation.var,
command=function() DrawPalette(is.n=TRUE))
frame7.chk.2 <- tcltk::ttkcheckbutton(frame7, text="Color blindness:",
variable=colorblind.var,
command=function() DrawPalette(is.n=TRUE))
frame7.rb.3 <- tcltk::ttkradiobutton(frame7, variable=colorblind.type.var,
value="deutan", text="deutan",
command=function() DrawPalette(is.n=TRUE))
frame7.rb.4 <- tcltk::ttkradiobutton(frame7, variable=colorblind.type.var,
value="protan", text="protan",
command=function() DrawPalette(is.n=TRUE))
frame7.rb.5 <- tcltk::ttkradiobutton(frame7, variable=colorblind.type.var,
value="tritan", text="tritan",
command=function() DrawPalette(is.n=TRUE))
tcltk::tkgrid(frame7.chk.1, frame7.chk.2, frame7.rb.3, frame7.rb.4, frame7.rb.5, "x",
pady=c(2, 0), sticky="w")
tcltk::tkgrid.configure(frame7.chk.2, padx=c(7, 0))
tcltk::tkgrid.configure(frame7.cvs, columnspan=5)
tcltk::tkgrid.columnconfigure(frame7, 4, weight=1)
tcltk::tkgrid.configure(frame7.chk.1, padx=c(10, 0))
tcltk::tkpack(frame7, fill="x")
# Initial commands
ConvertPaletteToAttributes(pal)
UpdateDataType()
# Bind events
tcltk::tclServiceMode(TRUE)
tcltk::tkbind(tt, "<Control-o>", OpenPaletteFromFile)
tcltk::tkbind(tt, "<Shift-Control-S>", SavePaletteToFile)
tcltk::tkbind(frame1.box.2, "<<ComboboxSelected>>", UpdateDataType)
tcltk::tkbind(frame6.box.2, "<<ComboboxSelected>>", ShowExample)
tcltk::tkbind(frame2.cvs, "<ButtonPress>", function(x, y) SelectDefaultPalette(x, y))
tcltk::tkbind(frame3.ent.1.3, "<KeyRelease>",
function() EntryChange("h1", h.lim, h1.ent.var, h1.scl.var))
tcltk::tkbind(frame3.ent.2.3, "<KeyRelease>",
function() EntryChange("h2", h.lim, h2.ent.var, h2.scl.var))
tcltk::tkbind(frame3.ent.3.3, "<KeyRelease>",
function() EntryChange("c1", c.lim, c1.ent.var, c1.scl.var))
tcltk::tkbind(frame3.ent.4.3, "<KeyRelease>",
function() EntryChange("c2", c.lim, c2.ent.var, c2.scl.var))
tcltk::tkbind(frame3.ent.5.3, "<KeyRelease>",
function() EntryChange("l1", l.lim, l1.ent.var, l1.scl.var))
tcltk::tkbind(frame3.ent.6.3, "<KeyRelease>",
function() EntryChange("l2", l.lim, l2.ent.var, l2.scl.var))
tcltk::tkbind(frame3.ent.7.3, "<KeyRelease>",
function() EntryChange("p1", p.lim, p1.ent.var, p1.scl.var))
tcltk::tkbind(frame3.ent.8.3, "<KeyRelease>",
function() EntryChange("p2", p.lim, p2.ent.var, p2.scl.var))
tcltk::tkbind(frame5.ent.3, "<KeyRelease>",
function() EntryChange("n", n.lim, n.ent.var, n.scl.var))
tcltk::tkbind(tt, "<Destroy>", function() tcltk::tclvalue(tt.done.var) <- 1)
# GUI control
tcltk::tkfocus(tt)
tcltk::tkgrab(tt)
tcltk::tkwait.variable(tt.done.var)
tcltk::tclServiceMode(FALSE)
tcltk::tkgrab.release(tt)
tcltk::tkdestroy(tt)
tcltk::tclServiceMode(TRUE)
if (dev.example %in% dev.list())
dev.off(which=dev.example)
invisible(pal.rtn)
}
# Get color palette as function of n
GetPalette <- function(type, h1, h2, c1, c2, l1, l2, p1, p2, fixup) {
fixup <- as.logical(fixup)
#type <- as.character(tcltk::tclvalue(nature.var))
if (type %in% c("Qualitative","qual")) {
f <- qualitative_hcl
formals(f) <- eval(substitute(alist(n=, h=hh, c=d1, l=d2, start=d3, end=d4,
fixup=d5, gamma=NULL, alpha=1,
palette=NULL, rev=FALSE, ...=,
h1=, h2=, c1=, l1=),
list(hh = c(0,360),
d1=c1, d2=l1, d3=h1, d4=h2, d5=fixup)))
} else if (type %in% c("seqs","Sequential (single hue)")) {
f <- sequential_hcl
formals(f) <- eval(substitute(alist(n=, h=d1, c=d2, l=d3, power=d4,
gamma=NULL, fixup=d5, alpha=1,
palette=NULL, rev=FALSE, ...=,
h1=, h2=, c1=, c2=, l1=, l2=, p1=, p2=, c.=),
list(d1=h1, d2=c1, d3=c(l1, l2),
d4=p1, d5=fixup)))
} else if (type %in% c("seqm","Sequential (multiple hues)")) {
f <- sequential_hcl
formals(f) <- eval(substitute(alist(n=, h=d1, c=d2, l=d3, power=d4,
gamma=NULL, fixup=d5, alpha=1,
palette=NULL, rev=FALSE, ...=,
h1=, h2=, c1=, c2=, l1=, l2=, p1=, p2=, c.=),
list(d1=c(h1, h2), d2=c(c1, c2),
d3=c(l1, l2), d4=c(p1, p2), d5=fixup)))
} else if (type %in% c("dive","Diverging")) {
f <- diverge_hcl
formals(f) <- eval(substitute(alist(n=, h=d1, c=d2, l=d3, power=d4,
gamma=NULL, fixup=d5, alpha=1,
palette=NULL, rev=FALSE, ...=,
h1=, h2=, c1=, l1=, l2=, p1=, p2=),
list(d1=c(h1, h2), d2=c1, d3=c(l1, l2),
d4=p1, d5=fixup)))
}
f
}
|
/colorspace/R/choose_palette.R
|
no_license
|
docker-oilgains/clauswilke.dataviz.2018
|
R
| false
| false
| 36,361
|
r
|
#' Graphical User Interface for Choosing HCL Color Palettes
#'
#' A graphical user interface (GUI) for viewing, manipulating, and choosing HCL
#' color palettes.
#'
#' Computes palettes based on the HCL (hue-chroma-luminance) color model (as
#' implemented by \code{\link{polarLUV}}). The GUIs interface the palette
#' functions \code{\link{qualitative_hcl}} for qualitative palettes,
#' \code{\link{sequential_hcl}} for sequential palettes with a single or
#' multiple hues, and \code{\link{diverge_hcl}} for diverging palettes (composed
#' from two single-hue sequential palettes).
#'
#' Two different GUIs are implemented and can be selected using the function
#' input argument \code{gui} (\code{"tcltk"} or \code{"shiny"}). Both GUIs
#' allows for interactive modification of the arguments of the respective
#' palette-generating functions, i.e., starting/ending hue (wavelength, type of
#' color), minimal/maximal chroma (colorfulness), minimal maximal luminance
#' (brightness, amount of gray), and a power transformations that control how
#' quickly/slowly chroma and/or luminance are changed through the palette.
#' Subsets of the parameters may not be applicable depending on the type of
#' palette chosen. See \code{\link{qualitative_hcl}} and Zeileis et al. (2009) for
#' a more detailed explanation of the different arguments. Stauffer et al.
#' (2015) provide more examples and guidance.
#'
#' Optionally, active palette can be illustrated by using a range of examples
#' such as a map, heatmap, scatter plot, perspective 3D surface etc.
#'
#' To demonstrate different types of deficiencies, the active palette may be
#' desaturated (emulating printing on a grayscale printer) and collapsed to
#' emulate different types of color-blindness (without red-green or green-blue
#' contrasts) using the \code{\link{simulate_cvd}} functions.
#'
#' @param pal function; the initial palette, see \sQuote{Value} below. Only
#' used if \code{gui = "tcltk"}.
#' @param n integer; the initial number of colors in the palette.
#' @param parent tkwin; the GUI parent window. Only used if \code{gui =
#' "tcltk"}.
#' @param gui character; GUI to use. Available options are \code{tcltk} and
#' \code{shiny}, see \sQuote{Details} below.
#' @param shiny.trace boolean, default \code{FALSE}. Used for debugging if
#' \code{gui = "shiny"}.
#' @return Returns a palette-generating function with the selected arguments.
#' Thus, the returned function takes an integer argument and returns the
#' corresponding number of HCL colors by traversing HCL space through
#' interpolation of the specified hue/chroma/luminance/power values.
#' @author Jason C. Fisher, Reto Stauffer, Achim Zeileis
#' @seealso \code{\link{simulate_cvd}}, \code{\link{desaturate}}, \code{\link{qualitative_hcl}}.
#' @references Zeileis A, Hornik K, Murrell P (2009). Escaping RGBland:
#' Selecting Colors for Statistical Graphics. \emph{Computational Statistics &
#' Data Analysis}, \bold{53}, 3259--3270.
#' \doi{10.1016/j.csda.2008.11.033}
#' Preprint available from
#' \url{https://eeecon.uibk.ac.at/~zeileis/papers/Zeileis+Hornik+Murrell-2009.pdf}.
#'
#' Stauffer R, Mayr GJ, Dabernig M, Zeileis A (2015). Somewhere over the
#' Rainbow: How to Make Effective Use of Colors in Meteorological
#' Visualizations. \emph{Bulletin of the American Meteorological Society},
#' \bold{96}(2), 203--216.
#' \doi{10.1175/BAMS-D-13-00155.1}
#' @keywords misc
#' @examples
#' if(interactive()) {
#' ## Using tcltk GUI
#' pal <- choose_palette()
#' ## or equivalently: hclwizard(gui = "tcltk")
#'
#' ## Using shiny GUI
#' pal <- hclwizard()
#' ## or equivalently: choose_palette(gui = "shiny")
#'
#' ## use resulting palette function
#' filled.contour(volcano, color.palette = pal, asp = 1)
#' }
#' @importFrom grDevices dev.cur dev.list dev.new dev.off dev.set
#' @export
choose_palette <- function(pal = diverge_hcl, n = 7L, parent = NULL, gui = "tcltk") {
args <- list("pal" = pal, "n" = n, "parent" = parent)
gui <- match.arg(gui, c("tcltk", "shiny"))
do.call(sprintf("choose_palette_%s", gui), args)
}
#' @rdname choose_palette
#' @export
hclwizard <- function(n = 7L, gui = "shiny", shiny.trace = FALSE) {
args <- list("n" = n, "shiny.trace" = shiny.trace)
gui <- match.arg(gui, c("tcltk", "shiny"))
do.call(sprintf("choose_palette_%s", gui), args)
}
#' @rdname choose_palette
#' @export
hcl_wizard <- function(n = 7L, gui = "shiny", shiny.trace = FALSE)
hclwizard(n=n,gui=gui,shiny.trace=FALSE)
# hclwizard shiny GUI for selecting color palette
choose_palette_shiny <- function(pal, shiny.trace = FALSE, n = 7L, ...) {
# Requirements for shiny application
stopifnot(requireNamespace("shiny"), requireNamespace("shinyjs"))
appDir <- system.file("hclwizard", package = "colorspace")
if (appDir == "")
stop("Could not find hclwizard app directory. Try re-installing `colorspace`.", call. = FALSE)
# Start shiny
Sys.setenv("hclwizard_Ninit"=n)
options(shiny.trace=shiny.trace)
pal <- shiny::runApp(appDir, display.mode = "normal", quiet = TRUE )
Sys.unsetenv("hclwizard_Ninit")
return(pal)
}
# tcltk GUI for selecting a color palette
choose_palette_tcltk <- function( pal = diverge_hcl, n=7L, parent = NULL, ... ) {
# Choose a file interactively
ChooseFile <- function(cmd, win.title, initialfile=NULL,
defaultextension=NULL) {
filetypes <- "{{R Source Files} {.R}} {{All files} {*}}"
if (cmd == "Open") {
args <- list("tk_getOpenFile")
} else {
args <- list("tk_getSaveFile")
if (defaultextension == ".txt")
filetypes <- "{{Text Files} {.txt}} {{All files} {*}}"
}
args[["title"]] <- win.title
args[["parent"]] <- tt
args[["initialdir"]] <- initialdir
args[["filetypes"]] <- filetypes
if (!is.null(initialfile))
args[["initialfile"]] <- initialfile
if (!is.null(defaultextension))
args[["defaultextension"]] <- defaultextension
f <- tcltk::tclvalue(do.call(tcltk::tcl, args))
if (!nzchar(f))
return()
initialdir <<- dirname(f)
f
}
# Open palette from file
OpenPaletteFromFile <- function() {
f <- ChooseFile(cmd="Open", win.title="Open Palette File")
if (is.null(f))
return()
pal <- dget(file=f)
ConvertPaletteToAttributes(pal)
AssignAttributesToWidgets()
UpdateDataType()
}
# Save palette to file
SavePaletteToFile <- function() {
f <- ChooseFile(cmd="Save As", win.title="Save Palette As",
initialfile="color_palette", defaultextension=".R")
if (is.null(f))
return()
type <- as.character(tcltk::tclvalue(nature.var))
pal <- GetPalette(type, h1, h2, c1, c2, l1, l2, p1, p2, fixup)
dput(pal, file=f)
}
# Save colors to file
SaveColorsToFile <- function(type) {
type <- as.character(tcltk::tclvalue(nature.var))
pal <- GetPalette(type, h1, h2, c1, c2, l1, l2, p1, p2, fixup)
cols <- try(hex2RGB(pal(n)), silent=TRUE)
if (inherits(cols, "try-error")) {
msg <- "Palette results in invaild hexadecimal colors."
tcltk::tkmessageBox(icon="error", message=msg, title="Color Error",
parent=tt)
return()
}
f <- ChooseFile(cmd="Save As", win.title="Save Colors As",
initialfile=paste("colors_", type, sep=""),
defaultextension=".txt")
if (is.null(f))
return()
if (type == "HEX") {
writehex(cols, file=f)
} else {
if (type == "sRGB") {
cols <- as(cols, "sRGB")@coords
} else if (type == "HSV") {
cols <- as(cols, "HSV")@coords
} else if (type == "HCL") {
cols <- as(cols, "polarLUV")@coords
} else if (type == "CMYK") {
cols <- as(cols, "RGB")@coords
red <- cols[, "R"]
green <- cols[, "G"]
blue <- cols[, "B"]
black <- sapply(1:n, function(i) min(c(1 - red[i], 1 - green[i],
1 - blue[i])))
cyan <- (1 - red - black) / (1 - black)
magenta <- (1 - green - black) / (1 - black)
yellow <- (1 - blue - black) / (1 - black)
cols <- as.matrix(as.data.frame(list(C=cyan, M=black, Y=yellow,
K=black)))
}
utils::write.table(cols, file=f, quote=FALSE, row.names=FALSE, sep="\t")
}
}
# Save palette and quit
SavePalette <- function() {
type <- as.character(tcltk::tclvalue(nature.var))
pal.rtn <<- GetPalette(type, h1, h2, c1, c2, l1, l2, p1, p2, fixup)
tcltk::tclvalue(tt.done.var) <- 1
}
# Scale change
ScaleChange <- function(x, v, x.ent.var) {
if (x == get(v))
return()
assign(v, x, inherits=TRUE)
fmt <- ifelse(v %in% c("p1", "p2"), "%.1f", "%.0f")
tcltk::tclvalue(x.ent.var) <- sprintf(fmt, x)
DrawPalette(v == "n")
}
# Entry change
EntryChange <- function(v, x.lim, x.ent.var, x.scl.var) {
x <- suppressWarnings(as.numeric(tcltk::tclvalue(x.ent.var)))
if (is.na(x))
return()
if (x < x.lim[1]) {
tcltk::tclvalue(x.ent.var) <- x.lim[1]
x <- x.lim[1]
} else if (x > x.lim[2]) {
tcltk::tclvalue(x.ent.var) <- x.lim[2]
x <- x.lim[2]
}
assign(v, x, inherits=TRUE)
tcltk::tclvalue(x.scl.var) <- x
DrawPalette(v == "n")
}
# Helper function to create the hex palettes.
# Generates "n" colors from palette "pal" and manipulates them
# if desaturation or CVD simulation is required.
get_hex_colors <- function(pal,n) {
pal.cols <- pal(n)
pal.cols[is.na(pal.cols)] <- "#FFFFFF"
if (as.logical(as.integer(tcltk::tclvalue(desaturation.var))))
pal.cols <- desaturate(pal.cols)
if (as.logical(as.integer(tcltk::tclvalue(colorblind.var)))) {
type <- as.character(tcltk::tclvalue(colorblind.type.var))
pal.cols <- do.call(type,list("col"=pal.cols))
}
pal.cols
}
# Draw palette
DrawPalette <- function(is.n=FALSE) {
type <- as.character(tcltk::tclvalue(nature.var))
pal <- GetPalette(type, h1, h2, c1, c2, l1, l2, p1, p2, fixup)
if (!is.n)
tcltk::tcl(frame2.cvs, "delete", "browse")
tcltk::tcl(frame7.cvs, "delete", "pal")
# Reto, Nov 2016: outsourced
pal.cols <- get_hex_colors(pal,n)
dx <- (cvs.width - 1) / n
x2 <- 1
y1 <- 1
y2 <- cvs.height
for (i in pal.cols) {
x1 <- x2
x2 <- x1 + dx
pts <- tcltk::.Tcl.args(c(x1, y1, x2, y1, x2, y2, x1, y2))
tcltk::tkcreate(frame7.cvs, "polygon", pts, fill=i, tag="pal")
}
RegenExample(pal,n)
}
# Update data type
UpdateDataType <- function() {
type <- as.character(tcltk::tclvalue(nature.var))
if (type == "Qualitative") {
is.normal <- c(TRUE, FALSE, FALSE, FALSE, FALSE)
default.pals <<- qual.pals
} else if (type == "Sequential (single hue)") {
is.normal <- c(FALSE, TRUE, TRUE, TRUE, FALSE)
default.pals <<- seqs.pals
} else if (type == "Sequential (multiple hues)") {
is.normal <- c(TRUE, TRUE, TRUE, TRUE, TRUE)
default.pals <<- seqm.pals
} else if (type == "Diverging") {
is.normal <- c(TRUE, FALSE, TRUE, TRUE, FALSE)
default.pals <<- dive.pals
}
# Default palettes
tcltk::tcl(frame2.cvs, "delete", "default")
x1 <- 10
for (i in 1:length(default.pals)) {
# Create numeric palette parameter list, drop name
args <- as.list(as.list(default.pals[[i]][-10]))
args[['type']] <- as.character(tcltk::tclvalue(nature.var))
pal <- do.call(GetPalette, args=args)
pal.cols <- pal(5)
pal.cols[is.na(pal.cols)] <- "#FFFFFF"
y2 <- 10
for (j in pal.cols) {
x2 <- x1 + 20
y1 <- y2
y2 <- y1 + 10
pts <- tcltk::.Tcl.args(c(x1, y1, x2, y1, x2, y2, x1, y2))
tcltk::tkcreate(frame2.cvs, "polygon", pts, fill=j, tag="default")
}
x1 <- x1 + 30
}
s <- ifelse(is.normal, "normal", "disabled")
tcltk::tkconfigure(frame3.lab.2.1, state=s[1])
tcltk::tkconfigure(frame3.lab.4.1, state=s[2])
tcltk::tkconfigure(frame3.lab.6.1, state=s[3])
tcltk::tkconfigure(frame3.lab.7.1, state=s[4])
tcltk::tkconfigure(frame3.lab.8.1, state=s[5])
tcltk::tkconfigure(frame3.ent.2.3, state=s[1])
tcltk::tkconfigure(frame3.ent.4.3, state=s[2])
tcltk::tkconfigure(frame3.ent.6.3, state=s[3])
tcltk::tkconfigure(frame3.ent.7.3, state=s[4])
tcltk::tkconfigure(frame3.ent.8.3, state=s[5])
s <- ifelse(is.normal, "!disabled", "disabled")
tcltk::tcl(frame3.scl.2.2, "state", s[1])
tcltk::tcl(frame3.scl.4.2, "state", s[2])
tcltk::tcl(frame3.scl.6.2, "state", s[3])
tcltk::tcl(frame3.scl.7.2, "state", s[4])
tcltk::tcl(frame3.scl.8.2, "state", s[5])
DrawPalette()
}
# Select default palette
SelectDefaultPalette <- function(x, y) {
x <- as.numeric(x)
y <- as.numeric(y)
if (is.na(x) | is.na(y))
return()
y1 <- 5
y2 <- 65
if (y < y1 | y > y2)
return()
max.x <- length(default.pals) * 30 + 10
if (x < 5 | x > max.x)
return()
x.seq <- seq(5, max.x, by=30)
i <- findInterval(x, x.seq, rightmost.closed=TRUE)
x1 <- x.seq[i]
x2 <- x.seq[i + 1]
for (j in 1:length(vars)) {
if ( vars[j] == "name" ) next
val <- as.numeric(default.pals[[i]][j])
if (is.na(val))
val <- 0
assign(vars[j], val, inherits=TRUE)
}
AssignAttributesToWidgets()
DrawPalette()
pts <- tcltk::.Tcl.args(c(x1, y1, x2, y1, x2, y2, x1, y2) - 0.5)
tcltk::tkcreate(frame2.cvs, "polygon", pts, fill="", outline="black", tag="browse")
}
# Convert palette to attributes
ConvertPaletteToAttributes <- function(pal) {
pal.attributes <- NULL
if (inherits(pal, "function")) {
what <- c("numeric", "integer")
q.args <- c("c", "l", "start", "end")
d.args <- c("h", "c", "l", "power")
s.args <- c("h", "c.", "l", "power")
arg <- sapply(formals(pal), function(i) {if (is.call(i)) eval(i) else i})
if (!is.null(arg$fixup) && is.logical(arg$fixup))
fix.up <- as.integer(arg$fixup)
else
fix.up <- 1
if (all(sapply(q.args, function(i) inherits(arg[[i]], what)))) {
tcltk::tclvalue(nature.var) <- "Qualitative"
pal.attributes <- c(arg$start, arg$end, arg$c, NA, arg$l, NA, NA, NA, fix.up)
} else if (all(sapply(s.args, function(i) inherits(arg[[i]], what)))) {
if (length(arg$h) == 1 && length(arg$p) == 1) {
tcltk::tclvalue(nature.var) <- "Sequential (single hue)"
pal.attributes <- c(arg$h, NA, arg$c., arg$l, arg$power, NA, fix.up)
} else {
tcltk::tclvalue(nature.var) <- "Sequential (multiple hues)"
pal.attributes <- c(arg$h, arg$c., arg$l, arg$power, fix.up)
}
} else if (all(sapply(d.args, function(i) inherits(arg[[i]], what)))) {
tcltk::tclvalue(nature.var) <- "Diverging"
pal.attributes <- c(arg$h, arg$c, NA, arg$l, arg$power, NA, fix.up)
}
}
if (is.null(pal.attributes)) {
tcltk::tclvalue(nature.var) <- "Sequential (multiple hues)"
pal.attributes <- seqm.pals[[4]]
}
for (i in 1:length(vars)) {
if (is.na(pal.attributes[i]))
assign(vars[i], 0, inherits=TRUE)
else
assign(vars[i], pal.attributes[i], inherits=TRUE)
}
AssignAttributesToWidgets()
}
# Assign attributes to widgets
AssignAttributesToWidgets <- function() {
tcltk::tclvalue(h1.ent.var) <- sprintf("%.0f", h1)
tcltk::tclvalue(h2.ent.var) <- sprintf("%.0f", h2)
tcltk::tclvalue(c1.ent.var) <- sprintf("%.0f", c1)
tcltk::tclvalue(c2.ent.var) <- sprintf("%.0f", c2)
tcltk::tclvalue(l1.ent.var) <- sprintf("%.0f", l1)
tcltk::tclvalue(l2.ent.var) <- sprintf("%.0f", l2)
tcltk::tclvalue(p1.ent.var) <- sprintf("%.1f", p1)
tcltk::tclvalue(p2.ent.var) <- sprintf("%.1f", p2)
tcltk::tclvalue(h1.scl.var) <- h1
tcltk::tclvalue(h2.scl.var) <- h2
tcltk::tclvalue(c1.scl.var) <- c1
tcltk::tclvalue(c2.scl.var) <- c2
tcltk::tclvalue(l1.scl.var) <- l1
tcltk::tclvalue(l2.scl.var) <- l2
tcltk::tclvalue(p1.scl.var) <- p1
tcltk::tclvalue(p2.scl.var) <- p2
tcltk::tclvalue(fixup.var) <- fixup
}
# Show example plot
ShowExample <- function() {
if (!dev.example %in% dev.list()) {
dev.new(width=7L, height=7L)
dev.example <<- dev.cur()
}
par(oma=c(0, 0, 0, 0), mar=c(0, 0, 0, 0))
DrawPalette(is.n=TRUE)
}
# Regenerate example plot
RegenExample <- function(pal,n) {
if (dev.example %in% dev.list())
dev.set(which=dev.example)
else
return()
plot_example <- eval(parse(text=sprintf("plot_%s", tolower(tcltk::tclvalue(example.var)))))
# Reto, Nov 2016: Picking colors. For 'Example Spectrum' 100 colors
# will be choosen (overruling input "n").
if ( tcltk::tclvalue(example.var) == "Spectrum" ) n <- 100
pal.cols <- get_hex_colors(pal,n)
if (as.logical(as.integer(tcltk::tclvalue(reverse.var))))
pal.cols <- rev(pal.cols)
plot_example(pal.cols)
}
# Main program
# Initialize directory
initialdir <- getwd()
# Initialize return palette
pal.rtn <- NULL
# Initialize default palettes
default.pals <- NULL
# Initialize data for scatter plot example
xyhclust <- NULL
# Initialize data for mosaic plot example
msc.matrix <- NULL
# Flag graphics device
dev.example <- 1
# Set default and initial palettes
h1 <- h2 <- c1 <- c2 <- l1 <- l2 <- p1 <- p2 <- 0
fixup <- 1
#vars <- c("h1", "h2", "c1", "c2", "l1", "l2", "p1", "p2", "fixup")
# Load/Define palettes
vars <- vars.pal
qual.pals <- qual.pals
seqs.pals <- seqs.pals
seqm.pals <- seqm.pals
dive.pals <- dive.pals
# Set limits for palette attributes
n.lim <- c( 1, 50)
h.lim <- c(-360, 360)
c.lim <- c( 0, 100)
l.lim <- c( 0, 100)
p.lim <- c( 0, 3)
# Set dimensions on palette canvas
cvs.width <- 328 # 30 * 10 + 10 + 18
cvs.height <- 25
# Assign additional variables linked to Tk widgets
example.var <- tcltk::tclVar()
nature.var <- tcltk::tclVar()
n.scl.var <- tcltk::tclVar(n)
n.ent.var <- tcltk::tclVar(n)
h1.scl.var <- tcltk::tclVar()
h1.ent.var <- tcltk::tclVar()
h2.scl.var <- tcltk::tclVar()
h2.ent.var <- tcltk::tclVar()
c1.scl.var <- tcltk::tclVar()
c1.ent.var <- tcltk::tclVar()
c2.scl.var <- tcltk::tclVar()
c2.ent.var <- tcltk::tclVar()
l1.scl.var <- tcltk::tclVar()
l1.ent.var <- tcltk::tclVar()
l2.scl.var <- tcltk::tclVar()
l2.ent.var <- tcltk::tclVar()
p1.scl.var <- tcltk::tclVar()
p1.ent.var <- tcltk::tclVar()
p2.scl.var <- tcltk::tclVar()
p2.ent.var <- tcltk::tclVar()
fixup.var <- tcltk::tclVar(fixup)
reverse.var <- tcltk::tclVar(FALSE)
desaturation.var <- tcltk::tclVar(FALSE)
colorblind.var <- tcltk::tclVar(FALSE)
colorblind.type.var <- tcltk::tclVar("deutan")
tt.done.var <- tcltk::tclVar(0)
# Open GUI
tcltk::tclServiceMode(FALSE)
tt <- tcltk::tktoplevel()
if (!is.null(parent)) {
tcltk::tkwm.transient(tt, parent)
geo <- unlist(strsplit(as.character(tcltk::tkwm.geometry(parent)), "\\+"))
tcltk::tkwm.geometry(tt, paste("+", as.integer(geo[2]) + 25,
"+", as.integer(geo[3]) + 25, sep=""))
}
tcltk::tkwm.resizable(tt, 0, 0)
tcltk::tktitle(tt) <- "Choose Color Palette"
# Top file menu
top.menu <- tcltk::tkmenu(tt, tearoff=0)
menu.file <- tcltk::tkmenu(tt, tearoff=0)
tcltk::tkadd(top.menu, "cascade", label="File", menu=menu.file, underline=0)
tcltk::tkadd(menu.file, "command", label="Open palette", accelerator="Ctrl+O",
command=OpenPaletteFromFile)
tcltk::tkadd(menu.file, "command", label="Save palette as",
accelerator="Shift+Ctrl+S", command=SavePaletteToFile)
menu.file.colors <- tcltk::tkmenu(tt, tearoff=0)
tcltk::tkadd(menu.file.colors, "command", label="HEX",
command=function() SaveColorsToFile("HEX"))
tcltk::tkadd(menu.file.colors, "command", label="sRGB",
command=function() SaveColorsToFile("sRGB"))
tcltk::tkadd(menu.file.colors, "command", label="HSV",
command=function() SaveColorsToFile("HSV"))
tcltk::tkadd(menu.file.colors, "command", label="HCL",
command=function() SaveColorsToFile("HCL"))
tcltk::tkadd(menu.file.colors, "command", label="CMYK",
command=function() SaveColorsToFile("CMYK"))
tcltk::tkadd(menu.file, "cascade", label="Save colors as", menu=menu.file.colors)
tcltk::tkconfigure(tt, menu=top.menu)
# Frame 0, ok and cancel buttons
frame0 <- tcltk::ttkframe(tt, relief="flat")
frame0.but.3 <- tcltk::ttkbutton(frame0, width=12, text="OK", command=SavePalette)
frame0.but.4 <- tcltk::ttkbutton(frame0, width=12, text="Cancel",
command=function() {
pal.rtn <<- NULL
tcltk::tclvalue(tt.done.var) <- 1
})
tcltk::tkgrid("x", frame0.but.3, frame0.but.4, pady=c(10, 10))
tcltk::tkgrid.configure(frame0.but.3, sticky="e")
tcltk::tkgrid.configure(frame0.but.4, sticky="w", padx=c(4, 10))
tcltk::tkgrid.columnconfigure(frame0, 0, weight=1)
tcltk::tkpack(frame0, fill="x", side="bottom", anchor="e")
# Frame 1, choose nature of data
frame1 <- tcltk::ttkframe(tt, relief="flat")
frame1.lab.1 <- tcltk::ttklabel(frame1, text="The nature of your data")
frame1.box.2 <- tcltk::ttkcombobox(frame1, state="readonly", textvariable=nature.var,
values=c("Qualitative", "Sequential (single hue)",
"Sequential (multiple hues)",
"Diverging"))
tcltk::tkgrid(frame1.lab.1, frame1.box.2, pady=c(10, 0))
tcltk::tkgrid.configure(frame1.lab.1, padx=c(10, 2))
tcltk::tkgrid.configure(frame1.box.2, padx=c(0, 10), sticky="we")
tcltk::tkgrid.columnconfigure(frame1, 1, weight=1)
tcltk::tkpack(frame1, fill="x")
# Frame 2, default color schemes
frame2 <- tcltk::ttklabelframe(tt, relief="flat", borderwidth=5, padding=5,
text="Default color schemes")
frame2.cvs <- tcltk::tkcanvas(frame2, relief="flat", width=30 * 10 + 10, height=70,
background="white", confine=TRUE, closeenough=0,
borderwidth=0, highlightthickness=0)
tcltk::tkgrid(frame2.cvs, sticky="we")
tcltk::tkgrid.columnconfigure(frame2, 0, weight=1)
tcltk::tkpack(frame2, fill="x", padx=10, pady=10)
# Frame 3, color description
txt <- "Palette description: Hue, Chroma, Luminance, Power"
frame3 <- tcltk::ttklabelframe(tt, relief="flat", borderwidth=5, padding=5, text=txt)
frame3.lab.1.1 <- tcltk::ttklabel(frame3, text="H1", width=2)
frame3.lab.2.1 <- tcltk::ttklabel(frame3, text="H2", width=2)
frame3.lab.3.1 <- tcltk::ttklabel(frame3, text="C1", width=2)
frame3.lab.4.1 <- tcltk::ttklabel(frame3, text="C2", width=2)
frame3.lab.5.1 <- tcltk::ttklabel(frame3, text="L1", width=2)
frame3.lab.6.1 <- tcltk::ttklabel(frame3, text="L2", width=2)
frame3.lab.7.1 <- tcltk::ttklabel(frame3, text="P1", width=2)
frame3.lab.8.1 <- tcltk::ttklabel(frame3, text="P2", width=2)
frame3.ent.1.3 <- tcltk::ttkentry(frame3, textvariable=h1.ent.var, width=4)
frame3.ent.2.3 <- tcltk::ttkentry(frame3, textvariable=h2.ent.var, width=4)
frame3.ent.3.3 <- tcltk::ttkentry(frame3, textvariable=c1.ent.var, width=4)
frame3.ent.4.3 <- tcltk::ttkentry(frame3, textvariable=c2.ent.var, width=4)
frame3.ent.5.3 <- tcltk::ttkentry(frame3, textvariable=l1.ent.var, width=4)
frame3.ent.6.3 <- tcltk::ttkentry(frame3, textvariable=l2.ent.var, width=4)
frame3.ent.7.3 <- tcltk::ttkentry(frame3, textvariable=p1.ent.var, width=4)
frame3.ent.8.3 <- tcltk::ttkentry(frame3, textvariable=p2.ent.var, width=4)
frame3.scl.1.2 <- tcltk::tkwidget(frame3, "ttk::scale", from=h.lim[1], to=h.lim[2],
orient="horizontal", value=h1, variable=h1.scl.var,
command=function(...) {
ScaleChange(x=round(as.numeric(...)), v="h1",
x.ent.var=h1.ent.var)
})
frame3.scl.2.2 <- tcltk::tkwidget(frame3, "ttk::scale", from=h.lim[1], to=h.lim[2],
orient="horizontal", value=h2, variable=h2.scl.var,
command=function(...) {
ScaleChange(x=round(as.numeric(...)), v="h2",
x.ent.var=h2.ent.var)
})
frame3.scl.3.2 <- tcltk::tkwidget(frame3, "ttk::scale", from=c.lim[1], to=c.lim[2],
orient="horizontal", value=c1, variable=c1.scl.var,
command=function(...) {
ScaleChange(x=round(as.numeric(...)), v="c1",
x.ent.var=c1.ent.var)
})
frame3.scl.4.2 <- tcltk::tkwidget(frame3, "ttk::scale", from=c.lim[1], to=c.lim[2],
orient="horizontal", value=c2, variable=c2.scl.var,
command=function(...) {
ScaleChange(x=round(as.numeric(...)), v="c2",
x.ent.var=c2.ent.var)
})
frame3.scl.5.2 <- tcltk::tkwidget(frame3, "ttk::scale", from=l.lim[1], to=l.lim[2],
orient="horizontal", value=l1, variable=l1.scl.var,
command=function(...) {
ScaleChange(x=round(as.numeric(...)), v="l1",
x.ent.var=l1.ent.var)
})
frame3.scl.6.2 <- tcltk::tkwidget(frame3, "ttk::scale", from=l.lim[1], to=l.lim[2],
orient="horizontal", value=l2, variable=l2.scl.var,
command=function(...) {
ScaleChange(x=round(as.numeric(...)), v="l2",
x.ent.var=l2.ent.var)
})
frame3.scl.7.2 <- tcltk::tkwidget(frame3, "ttk::scale", from=p.lim[1], to=p.lim[2],
orient="horizontal", value=p1, variable=p1.scl.var,
command=function(...) {
ScaleChange(x=as.numeric(...), v="p1",
x.ent.var=p1.ent.var)
})
frame3.scl.8.2 <- tcltk::tkwidget(frame3, "ttk::scale", from=p.lim[1], to=p.lim[2],
orient="horizontal", value=p2, variable=p2.scl.var,
command=function(...) {
ScaleChange(x=as.numeric(...), v="p2",
x.ent.var=p2.ent.var)
})
tcltk::tkgrid(frame3.lab.1.1, frame3.scl.1.2, frame3.ent.1.3, pady=c(0, 5))
tcltk::tkgrid(frame3.lab.2.1, frame3.scl.2.2, frame3.ent.2.3, pady=c(0, 5))
tcltk::tkgrid(frame3.lab.3.1, frame3.scl.3.2, frame3.ent.3.3, pady=c(0, 5))
tcltk::tkgrid(frame3.lab.4.1, frame3.scl.4.2, frame3.ent.4.3, pady=c(0, 5))
tcltk::tkgrid(frame3.lab.5.1, frame3.scl.5.2, frame3.ent.5.3, pady=c(0, 5))
tcltk::tkgrid(frame3.lab.6.1, frame3.scl.6.2, frame3.ent.6.3, pady=c(0, 5))
tcltk::tkgrid(frame3.lab.7.1, frame3.scl.7.2, frame3.ent.7.3, pady=c(0, 5))
tcltk::tkgrid(frame3.lab.8.1, frame3.scl.8.2, frame3.ent.8.3)
tcltk::tkgrid.configure(frame3.scl.1.2, frame3.scl.2.2, frame3.scl.3.2,
frame3.scl.4.2, frame3.scl.5.2, frame3.scl.6.2,
frame3.scl.7.2, frame3.scl.8.2,
sticky="we", padx=c(4, 10))
tcltk::tkgrid.columnconfigure(frame3, 1, weight=1)
tcltk::tkpack(frame3, fill="x", padx=10, pady=0)
# Frame 4, color palette fixup
frame4 <- tcltk::ttkframe(tt, relief="flat")
txt <- "Correct all colors to valid RGB color model values"
frame4.chk.1 <- tcltk::ttkcheckbutton(frame4, text=txt, variable=fixup.var,
command=function() {
fixup <<- as.integer(tcltk::tclvalue(fixup.var))
DrawPalette(is.n=TRUE)
})
tcltk::tkgrid.configure(frame4.chk.1, padx=c(12, 0), pady=c(2, 0))
tcltk::tkpack(frame4, fill="x")
# Frame 5, number of colors in palette
txt <- "Number of colors in palette"
frame5 <- tcltk::ttklabelframe(tt, relief="flat", borderwidth=5, padding=5, text=txt)
frame5.lab.1 <- tcltk::ttklabel(frame5, text="n", width=2)
frame5.ent.3 <- tcltk::ttkentry(frame5, textvariable=n.ent.var, width=4)
frame5.scl.2 <- tcltk::tkwidget(frame5, "ttk::scale", from=n.lim[1], to=n.lim[2],
orient="horizontal", value=n, variable=n.scl.var,
command=function(...) {
ScaleChange(x=round(as.numeric(...)), v="n",
x.ent.var=n.ent.var)
})
tcltk::tkgrid(frame5.lab.1, frame5.scl.2, frame5.ent.3)
tcltk::tkgrid.configure(frame5.scl.2, sticky="we", padx=c(4, 10))
tcltk::tkgrid.columnconfigure(frame5, 1, weight=1)
tcltk::tkpack(frame5, fill="x", padx=10, pady=10)
# Frame 6, example plots and reverse colors
frame6 <- tcltk::ttklabelframe(tt, relief="flat", borderwidth=5, padding=5,
text="Show example")
frame6.lab.1 <- tcltk::ttklabel(frame6, text="Plot type")
frame6.box.2 <- tcltk::ttkcombobox(frame6, state="readonly",
textvariable=example.var,
values=example.plots)
frame6.chk.3 <- tcltk::ttkcheckbutton(frame6, text="Reverse colors",
variable=reverse.var, command=ShowExample)
tcltk::tkgrid(frame6.lab.1, frame6.box.2, frame6.chk.3)
tcltk::tkgrid.configure(frame6.box.2, padx=c(2, 10), sticky="we")
tcltk::tkgrid.columnconfigure(frame6, 1, weight=1)
tcltk::tkpack(frame6, fill="x", padx=10, pady=0)
# Frame 7, color palette and robustness checks
frame7 <- tcltk::ttkframe(tt, relief="flat")
frame7.cvs <- tcltk::tkcanvas(frame7, relief="flat",
width=cvs.width + 1, height=cvs.height + 1,
background="black", confine=TRUE, closeenough=0,
borderwidth=0, highlightthickness=0)
tcltk::tkgrid(frame7.cvs, padx=10, pady=c(12,0))
frame7.chk.1 <- tcltk::ttkcheckbutton(frame7, text="Desaturation",
variable=desaturation.var,
command=function() DrawPalette(is.n=TRUE))
frame7.chk.2 <- tcltk::ttkcheckbutton(frame7, text="Color blindness:",
variable=colorblind.var,
command=function() DrawPalette(is.n=TRUE))
frame7.rb.3 <- tcltk::ttkradiobutton(frame7, variable=colorblind.type.var,
value="deutan", text="deutan",
command=function() DrawPalette(is.n=TRUE))
frame7.rb.4 <- tcltk::ttkradiobutton(frame7, variable=colorblind.type.var,
value="protan", text="protan",
command=function() DrawPalette(is.n=TRUE))
frame7.rb.5 <- tcltk::ttkradiobutton(frame7, variable=colorblind.type.var,
value="tritan", text="tritan",
command=function() DrawPalette(is.n=TRUE))
tcltk::tkgrid(frame7.chk.1, frame7.chk.2, frame7.rb.3, frame7.rb.4, frame7.rb.5, "x",
pady=c(2, 0), sticky="w")
tcltk::tkgrid.configure(frame7.chk.2, padx=c(7, 0))
tcltk::tkgrid.configure(frame7.cvs, columnspan=5)
tcltk::tkgrid.columnconfigure(frame7, 4, weight=1)
tcltk::tkgrid.configure(frame7.chk.1, padx=c(10, 0))
tcltk::tkpack(frame7, fill="x")
# Initial commands
ConvertPaletteToAttributes(pal)
UpdateDataType()
# Bind events
tcltk::tclServiceMode(TRUE)
tcltk::tkbind(tt, "<Control-o>", OpenPaletteFromFile)
tcltk::tkbind(tt, "<Shift-Control-S>", SavePaletteToFile)
tcltk::tkbind(frame1.box.2, "<<ComboboxSelected>>", UpdateDataType)
tcltk::tkbind(frame6.box.2, "<<ComboboxSelected>>", ShowExample)
tcltk::tkbind(frame2.cvs, "<ButtonPress>", function(x, y) SelectDefaultPalette(x, y))
tcltk::tkbind(frame3.ent.1.3, "<KeyRelease>",
function() EntryChange("h1", h.lim, h1.ent.var, h1.scl.var))
tcltk::tkbind(frame3.ent.2.3, "<KeyRelease>",
function() EntryChange("h2", h.lim, h2.ent.var, h2.scl.var))
tcltk::tkbind(frame3.ent.3.3, "<KeyRelease>",
function() EntryChange("c1", c.lim, c1.ent.var, c1.scl.var))
tcltk::tkbind(frame3.ent.4.3, "<KeyRelease>",
function() EntryChange("c2", c.lim, c2.ent.var, c2.scl.var))
tcltk::tkbind(frame3.ent.5.3, "<KeyRelease>",
function() EntryChange("l1", l.lim, l1.ent.var, l1.scl.var))
tcltk::tkbind(frame3.ent.6.3, "<KeyRelease>",
function() EntryChange("l2", l.lim, l2.ent.var, l2.scl.var))
tcltk::tkbind(frame3.ent.7.3, "<KeyRelease>",
function() EntryChange("p1", p.lim, p1.ent.var, p1.scl.var))
tcltk::tkbind(frame3.ent.8.3, "<KeyRelease>",
function() EntryChange("p2", p.lim, p2.ent.var, p2.scl.var))
tcltk::tkbind(frame5.ent.3, "<KeyRelease>",
function() EntryChange("n", n.lim, n.ent.var, n.scl.var))
tcltk::tkbind(tt, "<Destroy>", function() tcltk::tclvalue(tt.done.var) <- 1)
# GUI control
tcltk::tkfocus(tt)
tcltk::tkgrab(tt)
tcltk::tkwait.variable(tt.done.var)
tcltk::tclServiceMode(FALSE)
tcltk::tkgrab.release(tt)
tcltk::tkdestroy(tt)
tcltk::tclServiceMode(TRUE)
if (dev.example %in% dev.list())
dev.off(which=dev.example)
invisible(pal.rtn)
}
# Get color palette as function of n
GetPalette <- function(type, h1, h2, c1, c2, l1, l2, p1, p2, fixup) {
fixup <- as.logical(fixup)
#type <- as.character(tcltk::tclvalue(nature.var))
if (type %in% c("Qualitative","qual")) {
f <- qualitative_hcl
formals(f) <- eval(substitute(alist(n=, h=hh, c=d1, l=d2, start=d3, end=d4,
fixup=d5, gamma=NULL, alpha=1,
palette=NULL, rev=FALSE, ...=,
h1=, h2=, c1=, l1=),
list(hh = c(0,360),
d1=c1, d2=l1, d3=h1, d4=h2, d5=fixup)))
} else if (type %in% c("seqs","Sequential (single hue)")) {
f <- sequential_hcl
formals(f) <- eval(substitute(alist(n=, h=d1, c=d2, l=d3, power=d4,
gamma=NULL, fixup=d5, alpha=1,
palette=NULL, rev=FALSE, ...=,
h1=, h2=, c1=, c2=, l1=, l2=, p1=, p2=, c.=),
list(d1=h1, d2=c1, d3=c(l1, l2),
d4=p1, d5=fixup)))
} else if (type %in% c("seqm","Sequential (multiple hues)")) {
f <- sequential_hcl
formals(f) <- eval(substitute(alist(n=, h=d1, c=d2, l=d3, power=d4,
gamma=NULL, fixup=d5, alpha=1,
palette=NULL, rev=FALSE, ...=,
h1=, h2=, c1=, c2=, l1=, l2=, p1=, p2=, c.=),
list(d1=c(h1, h2), d2=c(c1, c2),
d3=c(l1, l2), d4=c(p1, p2), d5=fixup)))
} else if (type %in% c("dive","Diverging")) {
f <- diverge_hcl
formals(f) <- eval(substitute(alist(n=, h=d1, c=d2, l=d3, power=d4,
gamma=NULL, fixup=d5, alpha=1,
palette=NULL, rev=FALSE, ...=,
h1=, h2=, c1=, l1=, l2=, p1=, p2=),
list(d1=c(h1, h2), d2=c1, d3=c(l1, l2),
d4=p1, d5=fixup)))
}
f
}
|
# R Basics with Examples.
## Data Types
# numerics,character and logical
temperature = 27.5 # temperature in Fahrenheit
class(temperature)
RPI <- "Rensselaer Polytechnic Institue"
class(RPI)
Rpi <- 3.14159265359
class(Rpi)
isSnowing <- FALSE
class(isSnowing)
R <- T
class(R)
## Vectors
num_vec <- c(1,3,5,99)
class(num_vec)
cha_vec <- c("R","P","I")
class(cha_vec)
boolean_vec <- c(T,FALSE, TRUE,F) # T = TRUE, you can simply use T for TURE
class(boolean_vec)
# mixed variable types
vec_mixed <- c("RPI",1824, 3.14)
vec_mixed
class(vec_mixed) # Note: the 1824 and 3.14 are converted to characters
vec_mixed_boolean <- c(TRUE,"RPI", 1824, 3.14)
vec_mixed_boolean
class(vec_mixed_boolean)
vec_numeric_boolean <- c(TRUE,1824,3.14)
vec_numeric_boolean
class(vec_numeric_boolean)
temperature <- c(80,81.3,83,84.2,82.5)
temperature
names(temperature) <- c("Mon","Tue","Web","Thur","Fri")
temperature
# You can do the same thing by:
Week_Days <-c("Mon","Tue","Web","Thur","Fri")
names(temperature) <- Week_Days
temperature
# Indexing Vectors
# Note: indexing in R starts with 1, in python programming language indexing start with 0.
vec1 <- c('R','P','I')
vec2 <- c(1,8,2,4)
vec1[1]
vec2[2]
# Matrix
m <- c(1:10)
m
matrix(m,nrow = 2)
matrix(1:12,byrow = FALSE,nrow = 4)
matrix(1:12,byrow = TRUE,nrow = 4)
# stock prices
goog <- c(560,564,563,562,561)
msft <- c(480,482,484,483,482)
stocks <- c(goog,msft)
stocks
print(stocks)
stock.matrix <- matrix(stocks,byrow = T,nrow = 2)
stock.matrix
days <- c("Mon","Tue","Wed","Thur","Fri")
st.names <- c("goog","msft")
colnames(stock.matrix) <- days
rownames(stock.matrix) <- st.names
print(stock.matrix)
mat <- matrix(1:25,byrow = T, nrow = 5)
mat
mat*2
mat/2
mat^2
1/mat
mat > 15
mat[mat > 15]
mat + mat
mat / mat
colSums(stock.matrix)
rowSums(stock.matrix)
rowMeans(stock.matrix)
# Bind the Columns
FB <- c(223,224,225,223.5,222)
tech.stocks <- rbind(stock.matrix,FB) # Row bind
tech.stocks
avg <- rowMeans(tech.stocks)
avg
tech.stocks <- cbind(tech.stocks,avg) # Column bind
tech.stocks
mat <- matrix(1:50,byrow = T, nrow = 5)
mat
mat[1,] # first row with all the columns
mat[,1] # first column and all the rows
mat[1:3,]
mat[1:2,1:3]
mat[,9:10]
mat[2:3,5:6]
# Factor and Catergorical variables
animal <- c('d','c','d','c','c')
id <- c(1,2,3,4,5)
temps <- c('cold','med','hot','hot','hot','cold','med')
temps
fact.temps <- factor(temps, ordered = TRUE, levels = c('cold','med','hot'))
fact.temps
summary(fact.temps)
summary(temps)
undergrads <-c('Freshman','Junior', 'Sophomore','Junior','Senior','Sophomore','Junior','Freshman','Senior','Junior')
undergrads
factor.undergrads <-factor(undergrads,ordered = TRUE,levels = c('Freshman','Sophomore','Junior','Senior'))
factor.undergrads
summary(factor.undergrads)
# Exercise
# Row bind example
A <- c(1,2,3)
B <- c(4,5,6)
A <- rbind(A,B)
A
mat <- matrix(1:9, nrow = 3)
mat
is.matrix(mat)
mat2 <- matrix(1:25, byrow = T, nrow = 5)
mat2
mat2[2:3,2:3]
mat2[4:5,4:5]
sum(mat2)
help("runif")
u <- runif(20)
u
runif(matrix(20))
matrix(runif(20),nrow = 4)
|
/R_Basics_Teaching_Examples .R
|
no_license
|
thilankam/DataAnlatics_Lab0-R_Basics_Teaching_Examples
|
R
| false
| false
| 3,051
|
r
|
# R Basics with Examples.
## Data Types
# numerics,character and logical
temperature = 27.5 # temperature in Fahrenheit
class(temperature)
RPI <- "Rensselaer Polytechnic Institue"
class(RPI)
Rpi <- 3.14159265359
class(Rpi)
isSnowing <- FALSE
class(isSnowing)
R <- T
class(R)
## Vectors
num_vec <- c(1,3,5,99)
class(num_vec)
cha_vec <- c("R","P","I")
class(cha_vec)
boolean_vec <- c(T,FALSE, TRUE,F) # T = TRUE, you can simply use T for TURE
class(boolean_vec)
# mixed variable types
vec_mixed <- c("RPI",1824, 3.14)
vec_mixed
class(vec_mixed) # Note: the 1824 and 3.14 are converted to characters
vec_mixed_boolean <- c(TRUE,"RPI", 1824, 3.14)
vec_mixed_boolean
class(vec_mixed_boolean)
vec_numeric_boolean <- c(TRUE,1824,3.14)
vec_numeric_boolean
class(vec_numeric_boolean)
temperature <- c(80,81.3,83,84.2,82.5)
temperature
names(temperature) <- c("Mon","Tue","Web","Thur","Fri")
temperature
# You can do the same thing by:
Week_Days <-c("Mon","Tue","Web","Thur","Fri")
names(temperature) <- Week_Days
temperature
# Indexing Vectors
# Note: indexing in R starts with 1, in python programming language indexing start with 0.
vec1 <- c('R','P','I')
vec2 <- c(1,8,2,4)
vec1[1]
vec2[2]
# Matrix
m <- c(1:10)
m
matrix(m,nrow = 2)
matrix(1:12,byrow = FALSE,nrow = 4)
matrix(1:12,byrow = TRUE,nrow = 4)
# stock prices
goog <- c(560,564,563,562,561)
msft <- c(480,482,484,483,482)
stocks <- c(goog,msft)
stocks
print(stocks)
stock.matrix <- matrix(stocks,byrow = T,nrow = 2)
stock.matrix
days <- c("Mon","Tue","Wed","Thur","Fri")
st.names <- c("goog","msft")
colnames(stock.matrix) <- days
rownames(stock.matrix) <- st.names
print(stock.matrix)
mat <- matrix(1:25,byrow = T, nrow = 5)
mat
mat*2
mat/2
mat^2
1/mat
mat > 15
mat[mat > 15]
mat + mat
mat / mat
colSums(stock.matrix)
rowSums(stock.matrix)
rowMeans(stock.matrix)
# Bind the Columns
FB <- c(223,224,225,223.5,222)
tech.stocks <- rbind(stock.matrix,FB) # Row bind
tech.stocks
avg <- rowMeans(tech.stocks)
avg
tech.stocks <- cbind(tech.stocks,avg) # Column bind
tech.stocks
mat <- matrix(1:50,byrow = T, nrow = 5)
mat
mat[1,] # first row with all the columns
mat[,1] # first column and all the rows
mat[1:3,]
mat[1:2,1:3]
mat[,9:10]
mat[2:3,5:6]
# Factor and Catergorical variables
animal <- c('d','c','d','c','c')
id <- c(1,2,3,4,5)
temps <- c('cold','med','hot','hot','hot','cold','med')
temps
fact.temps <- factor(temps, ordered = TRUE, levels = c('cold','med','hot'))
fact.temps
summary(fact.temps)
summary(temps)
undergrads <-c('Freshman','Junior', 'Sophomore','Junior','Senior','Sophomore','Junior','Freshman','Senior','Junior')
undergrads
factor.undergrads <-factor(undergrads,ordered = TRUE,levels = c('Freshman','Sophomore','Junior','Senior'))
factor.undergrads
summary(factor.undergrads)
# Exercise
# Row bind example
A <- c(1,2,3)
B <- c(4,5,6)
A <- rbind(A,B)
A
mat <- matrix(1:9, nrow = 3)
mat
is.matrix(mat)
mat2 <- matrix(1:25, byrow = T, nrow = 5)
mat2
mat2[2:3,2:3]
mat2[4:5,4:5]
sum(mat2)
help("runif")
u <- runif(20)
u
runif(matrix(20))
matrix(runif(20),nrow = 4)
|
library('data.table')
library('tidyverse')
library('caret')
smallratingspath <- 'data/ml-latest-small/ratings.csv'
smallmoviespath <- 'data/ml-latest-small/movies.csv'
largeratingspath <- 'data/ml-latest/ratings.csv'
largemoviespath <- 'data/ml-latest/movies.csv'
##The path can be changed out to use the smaller or larger dataset.
ratings <- fread(text = gsub('::', '/t', readLines(smallratingspath)),
col.names = c('userId', 'movieId', 'rating', 'timestamp'))
##Again, you can choose whether to use the smaller or larger dataset.
movies <- read.csv(smallmoviespath)
colnames(movies) <- c('movieId', 'title', 'genres')
movies <- as.data.frame(movies) %>% mutate(movieId = as.numeric(movieId),
title = as.character(title),
genres = as.character(genres))
movielens <- left_join(ratings, movies, by = 'movieId')
set.seed(1, sample.kind = 'Rounding')
##Useful function to split out dataset.
traintest <- function(train){
test_index <- createDataPartition(y = train$rating, times = 1, p = 0.2, list = FALSE)
train <- movielens[-test_index,]
test <- movielens[test_index,]
##Only include values in test set that are in train set
validation <- test %>% semi_join(train, by = 'movieId') %>% semi_join(train, by = 'userId')
#Get removed rows and re add them to train set
removed <- anti_join(test, validation)
train <- rbind(train, removed)
return (list(train, validation))
}
datasets <- traintest(movielens)
train <- datasets[[1]]
validation <- datasets[[2]]
setwd('rda')
save(train, file = 'train.rda')
save(validation, file = 'validation.rda')
rm(train, validation)
setwd('..')
|
/train-validation-split.R
|
no_license
|
sbellows1/movielens-predict
|
R
| false
| false
| 1,749
|
r
|
library('data.table')
library('tidyverse')
library('caret')
smallratingspath <- 'data/ml-latest-small/ratings.csv'
smallmoviespath <- 'data/ml-latest-small/movies.csv'
largeratingspath <- 'data/ml-latest/ratings.csv'
largemoviespath <- 'data/ml-latest/movies.csv'
##The path can be changed out to use the smaller or larger dataset.
ratings <- fread(text = gsub('::', '/t', readLines(smallratingspath)),
col.names = c('userId', 'movieId', 'rating', 'timestamp'))
##Again, you can choose whether to use the smaller or larger dataset.
movies <- read.csv(smallmoviespath)
colnames(movies) <- c('movieId', 'title', 'genres')
movies <- as.data.frame(movies) %>% mutate(movieId = as.numeric(movieId),
title = as.character(title),
genres = as.character(genres))
movielens <- left_join(ratings, movies, by = 'movieId')
set.seed(1, sample.kind = 'Rounding')
##Useful function to split out dataset.
traintest <- function(train){
test_index <- createDataPartition(y = train$rating, times = 1, p = 0.2, list = FALSE)
train <- movielens[-test_index,]
test <- movielens[test_index,]
##Only include values in test set that are in train set
validation <- test %>% semi_join(train, by = 'movieId') %>% semi_join(train, by = 'userId')
#Get removed rows and re add them to train set
removed <- anti_join(test, validation)
train <- rbind(train, removed)
return (list(train, validation))
}
datasets <- traintest(movielens)
train <- datasets[[1]]
validation <- datasets[[2]]
setwd('rda')
save(train, file = 'train.rda')
save(validation, file = 'validation.rda')
rm(train, validation)
setwd('..')
|
# Primeiro script R
# teste de utilizaΓ§Γ£o de acentuaΓ§Γ£o
# ConfiguraΓ§Γ£o da pasta de trabalho
# NΓ£o utilizar espaΓ§o no nome do diretΓ³rio
setwd("~/Projetos/Python/git/R/DSA/BigDataAnalytics/Cap02-Fundamentos")
# Nome dos contribuidores
contributors()
# LicenΓ§a
license()
# InformaΓ§Γ£o sobre a sessΓ£o
sessionInfo()
# Imprimir algo na tela
print('OlΓ‘ mundo, iniciando na caminhada para mudanΓ§a de rumo')
# Criar grΓ‘ficos
plot(1:25)
# Instalar pacotes
install.packages('randomForest')
install.packages('ggplot2')
install.packages('dplyr')
install.packages('devtools')
# Carregar o pacote
library('ggplot2')
library("caret", lib.loc="~/anaconda3/envs/rstudio/lib/R/library")
# Descarregar o pacote
detach('ggplot2')
detach("package:caret", unload = TRUE)
# ajudar, se souber o nome da funΓ§Γ£o
help(mean)
?mean
# Para mais informaΓ§Γ΅es, use o SOS
install.packages('sos')
library('sos', lib.loc = "~/anaconda3/envs/rstudio/lib/R/library")
findFn("fread")
# Se nΓ£o souber o nome da funΓ§Γ£o
help.search('randomForest')
help.search("matplot")
??matplot
RSiteSearch('matplot')
example("matplot")
# sair
q()
|
/BigDataAnalytics/Cap02-Fundamentos/01-RStudio.r
|
no_license
|
kzenrick/dsa
|
R
| false
| false
| 1,127
|
r
|
# Primeiro script R
# teste de utilizaΓ§Γ£o de acentuaΓ§Γ£o
# ConfiguraΓ§Γ£o da pasta de trabalho
# NΓ£o utilizar espaΓ§o no nome do diretΓ³rio
setwd("~/Projetos/Python/git/R/DSA/BigDataAnalytics/Cap02-Fundamentos")
# Nome dos contribuidores
contributors()
# LicenΓ§a
license()
# InformaΓ§Γ£o sobre a sessΓ£o
sessionInfo()
# Imprimir algo na tela
print('OlΓ‘ mundo, iniciando na caminhada para mudanΓ§a de rumo')
# Criar grΓ‘ficos
plot(1:25)
# Instalar pacotes
install.packages('randomForest')
install.packages('ggplot2')
install.packages('dplyr')
install.packages('devtools')
# Carregar o pacote
library('ggplot2')
library("caret", lib.loc="~/anaconda3/envs/rstudio/lib/R/library")
# Descarregar o pacote
detach('ggplot2')
detach("package:caret", unload = TRUE)
# ajudar, se souber o nome da funΓ§Γ£o
help(mean)
?mean
# Para mais informaΓ§Γ΅es, use o SOS
install.packages('sos')
library('sos', lib.loc = "~/anaconda3/envs/rstudio/lib/R/library")
findFn("fread")
# Se nΓ£o souber o nome da funΓ§Γ£o
help.search('randomForest')
help.search("matplot")
??matplot
RSiteSearch('matplot')
example("matplot")
# sair
q()
|
K <- 5
M <- 400
M_pred <- 400
p <-
c(0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9,
0.95, 1, 1.05, 1.1, 1.15, 1.2, 1.25, 1.3, 1.35, 1.4, 1.45, 1.5, 1.55, 1.6, 1.65, 1.7, 1.75, 1.8, 1.85, 1.9,
1.95, 2, 2.05, 2.1, 2.15, 2.2, 2.25, 2.3, 2.35, 2.4, 2.45, 2.5, 2.55, 2.6, 2.65, 2.7, 2.75, 2.8, 2.85, 2.9,
2.95, 3, 3.05, 3.1, 3.15, 3.2, 3.25, 3.3, 3.35, 3.4, 3.45, 3.5, 3.55, 3.6, 3.65, 3.7, 3.75, 3.8, 3.85, 3.9,
3.95, 4, 4.05, 4.1, 4.15, 4.2, 4.25, 4.3, 4.35, 4.4, 4.45, 4.5, 4.55, 4.6, 4.65, 4.7, 4.75, 4.8, 4.85, 4.9,
4.95, 5, 5.05, 5.1, 5.15, 5.2, 5.25, 5.3, 5.35, 5.4, 5.45, 5.5, 5.55, 5.6, 5.65, 5.7, 5.75, 5.8, 5.85, 5.9,
5.95, 6, 6.05, 6.1, 6.15, 6.2, 6.25, 6.3, 6.35, 6.4, 6.45, 6.5, 6.55, 6.6, 6.65, 6.7, 6.75, 6.8, 6.85, 6.9,
6.95, 7, 7.05, 7.1, 7.15, 7.2, 7.25, 7.3, 7.35, 7.4, 7.45, 7.5, 7.55, 7.6, 7.65, 7.7, 7.75, 7.8, 7.85, 7.9,
7.95, 8, 8.05, 8.1, 8.15, 8.2, 8.25, 8.3, 8.35, 8.4, 8.45, 8.5, 8.55, 8.6, 8.65, 8.7, 8.75, 8.8, 8.85, 8.9,
8.95, 9, 9.05, 9.1, 9.15, 9.2, 9.25, 9.3, 9.35, 9.4, 9.45, 9.5, 9.55, 9.6, 9.65, 9.7, 9.75, 9.8, 9.85, 9.9,
9.95, 10, 10.05, 10.1, 10.15, 10.2, 10.25, 10.3, 10.35, 10.4, 10.45, 10.5, 10.55, 10.6, 10.65, 10.7, 10.75,
10.8, 10.85, 10.9, 10.95, 11, 11.05, 11.1, 11.15, 11.2, 11.25, 11.3, 11.35, 11.4, 11.45, 11.5, 11.55, 11.6,
11.65, 11.7, 11.75, 11.8, 11.85, 11.9, 11.95, 12, 12.05, 12.1, 12.15, 12.2, 12.25, 12.3, 12.35, 12.4, 12.45,
12.5, 12.55, 12.6, 12.65, 12.7, 12.75, 12.8, 12.85, 12.9, 12.95, 13, 13.05, 13.1, 13.15, 13.2, 13.25, 13.3,
13.35, 13.4, 13.45, 13.5, 13.55, 13.6, 13.65, 13.7, 13.75, 13.8, 13.85, 13.9, 13.95, 14, 14.05, 14.1, 14.15,
14.2, 14.25, 14.3, 14.35, 14.4, 14.45, 14.5, 14.55, 14.6, 14.65, 14.7, 14.75, 14.8, 14.85, 14.9, 14.95, 15,
15.05, 15.1, 15.15, 15.2, 15.25, 15.3, 15.35, 15.4, 15.45, 15.5, 15.55, 15.6, 15.65, 15.7, 15.75, 15.8,
15.85, 15.9, 15.95, 16, 16.05, 16.1, 16.15, 16.2, 16.25, 16.3, 16.35, 16.4, 16.45, 16.5, 16.55, 16.6, 16.65,
16.7, 16.75, 16.8, 16.85, 16.9, 16.95, 17, 17.05, 17.1, 17.15, 17.2, 17.25, 17.3, 17.35, 17.4, 17.45, 17.5,
17.55, 17.6, 17.65, 17.7, 17.75, 17.8, 17.85, 17.9, 17.95, 18, 18.05, 18.1, 18.15, 18.2, 18.25, 18.3, 18.35,
18.4, 18.45, 18.5, 18.55, 18.6, 18.65, 18.7, 18.75, 18.8, 18.85, 18.9, 18.95, 19, 19.05, 19.1, 19.15, 19.2,
19.25, 19.3, 19.35, 19.4, 19.45, 19.5, 19.55, 19.6, 19.65, 19.7, 19.75, 19.8, 19.85, 19.9, 19.95, 20)
p_pred <-
c(0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85,
0.9, 0.95, 1, 1.05, 1.1, 1.15, 1.2, 1.25, 1.3, 1.35, 1.4, 1.45, 1.5, 1.55, 1.6, 1.65, 1.7, 1.75, 1.8, 1.85,
1.9, 1.95, 2, 2.05, 2.1, 2.15, 2.2, 2.25, 2.3, 2.35, 2.4, 2.45, 2.5, 2.55, 2.6, 2.65, 2.7, 2.75, 2.8, 2.85,
2.9, 2.95, 3, 3.05, 3.1, 3.15, 3.2, 3.25, 3.3, 3.35, 3.4, 3.45, 3.5, 3.55, 3.6, 3.65, 3.7, 3.75, 3.8, 3.85,
3.9, 3.95, 4, 4.05, 4.1, 4.15, 4.2, 4.25, 4.3, 4.35, 4.4, 4.45, 4.5, 4.55, 4.6, 4.65, 4.7, 4.75, 4.8, 4.85,
4.9, 4.95, 5, 5.05, 5.1, 5.15, 5.2, 5.25, 5.3, 5.35, 5.4, 5.45, 5.5, 5.55, 5.6, 5.65, 5.7, 5.75, 5.8, 5.85,
5.9, 5.95, 6, 6.05, 6.1, 6.15, 6.2, 6.25, 6.3, 6.35, 6.4, 6.45, 6.5, 6.55, 6.6, 6.65, 6.7, 6.75, 6.8, 6.85,
6.9, 6.95, 7, 7.05, 7.1, 7.15, 7.2, 7.25, 7.3, 7.35, 7.4, 7.45, 7.5, 7.55, 7.6, 7.65, 7.7, 7.75, 7.8, 7.85,
7.9, 7.95, 8, 8.05, 8.1, 8.15, 8.2, 8.25, 8.3, 8.35, 8.4, 8.45, 8.5, 8.55, 8.6, 8.65, 8.7, 8.75, 8.8, 8.85,
8.9, 8.95, 9, 9.05, 9.1, 9.15, 9.2, 9.25, 9.3, 9.35, 9.4, 9.45, 9.5, 9.55, 9.6, 9.65, 9.7, 9.75, 9.8, 9.85,
9.9, 9.95, 10, 10.05, 10.1, 10.15, 10.2, 10.25, 10.3, 10.35, 10.4, 10.45, 10.5, 10.55, 10.6, 10.65, 10.7,
10.75, 10.8, 10.85, 10.9, 10.95, 11, 11.05, 11.1, 11.15, 11.2, 11.25, 11.3, 11.35, 11.4, 11.45, 11.5, 11.55,
11.6, 11.65, 11.7, 11.75, 11.8, 11.85, 11.9, 11.95, 12, 12.05, 12.1, 12.15, 12.2, 12.25, 12.3, 12.35, 12.4,
12.45, 12.5, 12.55, 12.6, 12.65, 12.7, 12.75, 12.8, 12.85, 12.9, 12.95, 13, 13.05, 13.1, 13.15, 13.2, 13.25,
13.3, 13.35, 13.4, 13.45, 13.5, 13.55, 13.6, 13.65, 13.7, 13.75, 13.8, 13.85, 13.9, 13.95, 14, 14.05, 14.1,
14.15, 14.2, 14.25, 14.3, 14.35, 14.4, 14.45, 14.5, 14.55, 14.6, 14.65, 14.7, 14.75, 14.8, 14.85, 14.9,
14.95, 15, 15.05, 15.1, 15.15, 15.2, 15.25, 15.3, 15.35, 15.4, 15.45, 15.5, 15.55, 15.6, 15.65, 15.7, 15.75,
15.8, 15.85, 15.9, 15.95, 16, 16.05, 16.1, 16.15, 16.2, 16.25, 16.3, 16.35, 16.4, 16.45, 16.5, 16.55, 16.6,
16.65, 16.7, 16.75, 16.8, 16.85, 16.9, 16.95, 17, 17.05, 17.1, 17.15, 17.2, 17.25, 17.3, 17.35, 17.4, 17.45,
17.5, 17.55, 17.6, 17.65, 17.7, 17.75, 17.8, 17.85, 17.9, 17.95, 18, 18.05, 18.1, 18.15, 18.2, 18.25, 18.3,
18.35, 18.4, 18.45, 18.5, 18.55, 18.6, 18.65, 18.7, 18.75, 18.8, 18.85, 18.9, 18.95, 19, 19.05, 19.1, 19.15,
19.2, 19.25, 19.3, 19.35, 19.4, 19.45, 19.5, 19.55, 19.6, 19.65, 19.7, 19.75, 19.8, 19.85, 19.9, 19.95, 20)
y1 <-
c(0.000950694, 0.000100472, 0.000284786, 0.000723328, 7.59677e-05, 6.60121e-05, 0.000125453, 0.000242469, 2.32638e-06, 0.000171246, 5.62755e-05, 0.000186667,
0.000236849, 0.000728013, 9.30248e-05, 0.00103827, 0.000462487, 0.000417968, 0.000663409, 0.0004442, 0.000130493, 0.000137924, 0.000138866, 5.89177e-05,
2.41176e-05, 3.59655e-07, 8.60833e-05, 0.000763019, 0.00053405, 0.000273502, 0.000148277, 0.000450883, 6.4654e-05, 6.31175e-05, 0.000330077, 0.000345304,
1.7381e-05, 0.000460998, 0.000292563, 1.40214e-05, 0.000421933, 0.000223642, 0.000257569, 0.000789927, 6.18638e-05, 0.000364691, 0.000273004, 0.000184768,
0.000441683, 0.00010666, 7.01966e-06, 0.000593848, 0.000450213, 0.000265892, 0.000204705, 0.000191059, 0.000124129, 0.000152955, 0.000694124, 0.00080139,
0.000574785, 4.09561e-05, 0.000490065, 0.000602765, 2.94108e-05, 0.000116517, 6.59371e-05, 7.66676e-05, 0.000577547, 9.27305e-05, 8.05981e-05, 0.000391608,
0.000310647, 6.36681e-05, 0.000607592, 2.59618e-05, 0.000523886, 0.000210237, 2.97203e-05, 0.000848244, 0.000682861, 0.000196708, 3.97563e-05, 8.47971e-05,
0.000458676, 0.000570211, 0.000275825, 3.24322e-05, 5.42299e-05, 0.000173264, 0.00148345, 0.000558109, 0.000592152, 0.000566271, 8.96652e-05, 0.000712106,
0.000714956, 0.000333143, 0.000714003, 7.59696e-05, 0.000232356, 9.46624e-05, 0.0002187, 5.16105e-05, 0.000218552, 0.000319414, 0.00113855, 0.00019085, 0.000270743,
0.000809653, 0.000354291, 4.57559e-05, 0.000250445, 0.000289647, 0.000105813, 0.00020895, 0.000330067, 0.000330021, 0.000459942, 0.000256966, 0.000389509,
0.000606176, 0.000308375, 0.000205461, 7.63616e-07, 0.00017752, 0.000865931, 0.000187246, 0.000101152, 0.0006392, 0.00217055, 0.000368677, 3.32983e-05, 0.00240438,
0.000202789, 0.0002399, 0.000824301, 0.000411754, 0.000137432, 6.85329e-05, 0.00127287, 0.000510566, 0.000506751, 9.64091e-05, 0.00212425, 0.00198812, 0.000501766,
0.000268495, 0.000417285, 0.000921504, 0.000429363, 0.000450722, 9.86438e-05, 2.08455e-05, 0.000806218, 0.000128374, 0.000669037, 0.000362092, 0.000266457,
0.00123601, 0.00140207, 0.000167541, 0.000133921, 0.000981286, 0.000497034, 0.000185602, 0.000525644, 0.000820744, 0.000445152, 0.000175099, 0.000356201,
0.00057206, 9.60383e-05, 0.00170762, 0.000120999, 1.84466e-05, 0.000535107, 0.000232882, 0.000161675, 1.74486e-06, 6.55386e-05, 0.000570099, 0.00268864,
0.000162227, 0.000204169, 4.6126e-05, 0.000619391, 0.000623291, 0.00082568, 0.00329517, 4.72057e-05, 0.00204973, 0.00107278, 0.00120694, 0.000460209, 0.000441095,
0.00173734, 7.9e-05, 0.000178284, 0.00154437, 0.00152134, 0.00243212, 0.000766236, 0.000172554, 0.00200168, 0.00101942, 0.00114208, 0.00145731, 0.000343897,
0.00145234, 0.000221856, 3.46792e-05, 0.00117203, 0.00244421, 0.00137182, 0.0038075, 0.00080364, 0.00077727, 0.000370457, 0.0015418, 8.52371e-05, 0.00110611,
0.00102401, 0.000192526, 0.00153896, 0.00274202, 0.00247043, 0.00150429, 0.0010302, 0.000621385, 0.000220496, 0.00220061, 2.65343e-05, 0.00110467, 0.00126262,
0.00143174, 0.00042738, 0.000949809, 0.000756996, 0.00464559, 0.00110925, 0.000116117, 0.00664934, 0.00282585, 0.00512823, 0.00268813, 0.00128588, 0.000914513,
0.000203088, 0.00258571, 0.000843777, 0.0004431, 0.00468668, 0.000944523, 0.00192031, 0.000453996, 0.000236769, 5.05225e-05, 4.70717e-06, 0.000437896, 7.85407e-05,
0.000432231, 0.000517015, 0.00106664, 0.00111833, 0.00253613, 0.00502752, 0.00451049, 0.00233734, 0.000795093, 9.95417e-06, 0.00297299, 0.00307492, 0.00067358,
0.000822174, 0.00455754, 0.00134401, 0.00149708, 0.000385462, 0.000318975, 0.000221759, 0.00110539, 0.00272828, 0.000704706, 0.000895241, 0.000738, 0.00185644,
0.00137739, 0.000901178, 0.000196652, 0.000142674, 0.000897528, 0.000253262, 0.0010359, 0.000457437, 0.00115979, 0.00291484, 0.00032283, 0.00065205, 0.000235218,
0.00170149, 0.00118083, 0.0025975, 0.000451825, 0.0011161, 0.00214706, 0.000522623, 0.000221547, 0.000694211, 0.000419788, 7.34707e-05, 0.00134859, 0.000267664,
0.00113681, 0.000214428, 0.000190714, 7.67995e-05, 0.000198368, 0.000535419, 0.00117289, 0.000236443, 0.000140572, 1.84513e-05, 0.000272901, 0.00012438,
6.82109e-05, 0.000444841, 1.66695e-05, 8.11331e-05, 1.03227e-05, 0.000441749, 0.000239293, 6.27909e-05, 0.000195223, 0.000428386, 0.000257245, 0.000168394,
0.000186031, 8.34549e-05, 8.74632e-05, 0.000122486, 0.000118441, 5.01524e-05, 0.000161396, 0.000348425, 0.000120009, 0.00024743, 0.000144947, 0.000510039,
0.000132811, 3.53869e-05, 0.000399068, 0.000478762, 0.000570554, 0.000214899, 1.95449e-05, 0.000190127, 0.000404915, 0.000436742, 0.000147853, 4.698e-05,
8.77014e-06, 0.000632236, 0.000344514, 0.000548116, 0.000137656, 9.80768e-05, 0.000101471, 8.30364e-05, 0.000253635, 5.60513e-05, 8.63947e-05, 2.92953e-06,
8.07772e-05, 0.000212242, 3.03592e-05, 4.26439e-05, 3.00424e-05, 0.000299367, 2.22885e-06, 3.30225e-05, 4.1958e-05, 0.000206661, 0.00044979, 0.000213808,
6.01316e-05, 1.7831e-05, 0.000916718, 3.16138e-05, 9.1341e-05, 0.000526221, 9.73829e-05, 1.03217e-05, 0.000131133, 7.38675e-05, 1.62431e-05, 0.000210628,
7.53628e-05, 2.32968e-05, 3.87115e-05)
y2 <-
c(1.70903e-05, 1.43205e-06, 5.69705e-05, 1.26785e-05, 4.50302e-05, 2.39866e-05, 0.000167932, 5.63378e-05, 1.26468e-05, 5.30216e-05, 6.13271e-05, 0.000169797,
1.67203e-05, 0.000212684, 5.57558e-05, 5.975e-05, 1.43571e-05, 3.09176e-05, 2.27439e-05, 2.77333e-06, 2.00965e-05, 1.05565e-05, 0.000106833, 5.0816e-05,
0.000111036, 0.000111545, 0.000428684, 8.17717e-05, 9.36581e-05, 6.43754e-06, 0.000112246, 5.48189e-05, 0.000214686, 0.000252255, 0.000274713, 2.72032e-05,
1.29854e-06, 6.85314e-05, 7.21489e-05, 0.000559881, 0.000194007, 4.64904e-05, 5.78255e-05, 6.39679e-05, 0.000114837, 0.000113754, 5.91291e-05, 6.29467e-05,
0.000127672, 0.000204454, 0.000186891, 1.49527e-05, 5.93522e-06, 0.000271353, 2.15285e-05, 0.000140128, 2.09503e-05, 0.000197482, 1.77413e-05, 0.000120912,
0.000111355, 0.000162782, 9.06896e-05, 7.00809e-05, 7.27688e-05, 7.23059e-05, 0.000125125, 0.000493616, 8.65903e-06, 0.000123637, 0.000171635, 7.71746e-05,
0.000302026, 0.000293828, 0.000126379, 7.81437e-05, 0.000181503, 8.3496e-05, 9.21776e-05, 0.000128142, 9.49039e-05, 7.74169e-05, 3.03222e-05, 5.61261e-05,
6.95719e-05, 7.25609e-05, 9.43222e-06, 0.000337022, 0.000179926, 2.19074e-05, 0.000132887, 3.88613e-05, 0.000297456, 5.83477e-05, 0.000302062, 8.44286e-05,
0.000267442, 0.000167872, 0.000161967, 0.000363877, 3.4723e-05, 0.000171926, 0.000351901, 0.000142601, 6.54218e-05, 0.000153731, 0.000262694, 0.000156407,
0.000167841, 6.97706e-05, 0.000281245, 0.000332253, 0.000263087, 0.000690213, 7.3549e-06, 0.000258066, 0.000312366, 3.98542e-05, 3.13189e-05, 0.000283254,
4.57523e-05, 0.000307489, 0.00116695, 0.00070937, 0.00106793, 6.66689e-06, 0.000340069, 0.00113324, 0.000249947, 4.60133e-05, 0.000208328, 0.000877775, 7.94085e-06,
0.00041582, 0.000376282, 0.000140936, 0.00110134, 0.00034063, 0.000187602, 0.000132935, 7.06255e-05, 1.10714e-05, 0.00024811, 8.98584e-05, 0.000103464, 0.00021512,
0.000208232, 0.000420656, 4.804e-05, 3.05617e-05, 9.28038e-05, 5.72083e-05, 0.000264751, 7.22665e-05, 5.04768e-05, 3.77965e-05, 0.000107236, 4.5979e-05, 5.3729e-05,
0.000165742, 2.45246e-05, 7.43235e-05, 7.99276e-05, 6.47654e-08, 8.93356e-08, 0.000125654, 3.32633e-05, 2.89942e-06, 3.35838e-05, 0.000127639, 0.0001511,
2.18556e-06, 0.000124776, 5.53378e-05, 4.10249e-05, 1.35217e-05, 4.06359e-05, 4.86716e-05, 5.64223e-05, 3.61076e-06, 3.01501e-05, 1.19816e-05, 2.52752e-05,
7.21517e-05, 0.0001377, 5.05299e-06, 1.93474e-05, 3.68452e-05, 0.00022212, 2.98007e-05, 1.03303e-05, 2.90345e-05, 7.92668e-05, 6.1713e-05, 1.8952e-05, 8.69072e-05,
5.02928e-06, 6.95078e-06, 8.70415e-06, 5.37277e-05, 7.27202e-06, 9.20753e-06, 1.97884e-05, 6.59321e-05, 0.000181516, 0.000110097, 8.29987e-06, 5.00038e-05,
1.65523e-05, 1.54231e-05, 7.1877e-06, 7.86611e-06, 6.24605e-05, 2.14312e-05, 1.39855e-05, 2.37014e-05, 6.96229e-05, 2.96929e-05, 3.7715e-05, 9.03529e-06,
7.10963e-06, 9.95271e-06, 4.41317e-05, 2.68033e-05, 4.42794e-05, 8.46603e-07, 2.05626e-06, 1.02834e-05, 3.41459e-05, 3.56458e-05, 8.57779e-05, 1.62817e-05,
3.55657e-05, 1.04504e-05, 4.29853e-06, 4.81446e-05, 1.42907e-05, 4.08343e-05, 2.28943e-06, 1.90769e-05, 4.10298e-07, 3.12185e-06, 3.46577e-05, 2.7524e-05,
2.85372e-05, 7.43253e-08, 1.18828e-05, 6.1264e-05, 6.20838e-05, 5.06573e-05, 4.73079e-05, 1.35264e-05, 4.34162e-06, 6.65957e-05, 5.54485e-06, 1.52808e-05,
1.78034e-05, 2.89814e-05, 5.69149e-06, 5.4498e-05, 1.311e-05, 4.26476e-05, 7.12221e-06, 1.47295e-05, 7.40497e-06, 8.5157e-06, 2.59274e-05, 2.97215e-06, 2.717e-06,
2.10048e-06, 2.29499e-05, 6.52502e-06, 2.29313e-05, 3.03064e-05, 2.05924e-05, 2.00482e-05, 7.23307e-06, 8.58007e-06, 1.57512e-05, 1.94231e-05, 4.30304e-05,
1.32276e-05, 6.64703e-06, 4.98107e-06, 5.71107e-06, 2.38836e-05, 8.26851e-05, 4.82074e-05, 8.85362e-06, 6.09667e-05, 3.3387e-05, 5.85419e-05, 3.93812e-05,
6.56306e-06, 5.93025e-06, 5.47808e-05, 5.41627e-06, 1.03656e-05, 2.09964e-05, 1.79349e-05, 4.37059e-05, 2.96814e-05, 4.34452e-05, 1.03877e-05, 3.18537e-05,
3.56703e-05, 3.17122e-05, 5.45428e-05, 1.10964e-05, 1.44927e-05, 3.07482e-05, 3.83123e-05, 1.69249e-05, 1.48359e-05, 3.26851e-05, 2.25956e-05, 1.28708e-05,
9.69076e-06, 1.48894e-05, 4.8902e-05, 4.43178e-05, 9.36054e-06, 2.81874e-05, 1.53709e-05, 5.26779e-06, 1.85486e-05, 9.71211e-06, 7.01622e-08, 2.7211e-05,
1.56876e-05, 9.71554e-06, 0.000101066, 1.16948e-05, 7.24089e-05, 6.76482e-06, 4.54506e-06, 0.000106114, 4.50879e-06, 6.89958e-06, 2.14902e-05, 2.90355e-06,
6.56397e-06, 3.55394e-05, 3.90692e-05, 1.96225e-05, 1.10575e-05, 6.48834e-06, 3.97175e-07, 1.3113e-05, 4.34389e-05, 8.29937e-06, 1.25448e-06, 4.94788e-05,
4.76365e-05, 1.58622e-05, 6.26157e-05, 1.56639e-05, 2.36443e-05, 3.35539e-05, 3.41789e-05, 2.32713e-05, 1.43118e-06, 4.78403e-08, 1.34961e-05, 3.49098e-05,
8.19945e-06, 4.04466e-05, 7.06985e-05, 1.93617e-05, 1.14757e-05, 1.91357e-05, 0.000118972, 2.75716e-05, 6.31364e-06, 1.4942e-05, 1.53399e-05, 5.70456e-06,
1.02271e-05, 8.0915e-06, 2.55736e-05, 3.1275e-05, 1.24581e-05, 1.16267e-05, 3.60556e-05, 8.12425e-07, 3.23083e-05, 2.82164e-05, 1.36932e-05, 3.37423e-06,
2.52285e-05, 4.34179e-06, 4.57394e-05, 5.15042e-06, 1.9917e-05, 1.61678e-05, 1.42454e-05, 2.3783e-05, 2.22962e-07, 8.75658e-06, 1.1257e-05)
|
/stan/spectral-inference.data.R
|
permissive
|
numericalalgorithmsgroup/bayesian-uq
|
R
| false
| false
| 15,015
|
r
|
K <- 5
M <- 400
M_pred <- 400
p <-
c(0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9,
0.95, 1, 1.05, 1.1, 1.15, 1.2, 1.25, 1.3, 1.35, 1.4, 1.45, 1.5, 1.55, 1.6, 1.65, 1.7, 1.75, 1.8, 1.85, 1.9,
1.95, 2, 2.05, 2.1, 2.15, 2.2, 2.25, 2.3, 2.35, 2.4, 2.45, 2.5, 2.55, 2.6, 2.65, 2.7, 2.75, 2.8, 2.85, 2.9,
2.95, 3, 3.05, 3.1, 3.15, 3.2, 3.25, 3.3, 3.35, 3.4, 3.45, 3.5, 3.55, 3.6, 3.65, 3.7, 3.75, 3.8, 3.85, 3.9,
3.95, 4, 4.05, 4.1, 4.15, 4.2, 4.25, 4.3, 4.35, 4.4, 4.45, 4.5, 4.55, 4.6, 4.65, 4.7, 4.75, 4.8, 4.85, 4.9,
4.95, 5, 5.05, 5.1, 5.15, 5.2, 5.25, 5.3, 5.35, 5.4, 5.45, 5.5, 5.55, 5.6, 5.65, 5.7, 5.75, 5.8, 5.85, 5.9,
5.95, 6, 6.05, 6.1, 6.15, 6.2, 6.25, 6.3, 6.35, 6.4, 6.45, 6.5, 6.55, 6.6, 6.65, 6.7, 6.75, 6.8, 6.85, 6.9,
6.95, 7, 7.05, 7.1, 7.15, 7.2, 7.25, 7.3, 7.35, 7.4, 7.45, 7.5, 7.55, 7.6, 7.65, 7.7, 7.75, 7.8, 7.85, 7.9,
7.95, 8, 8.05, 8.1, 8.15, 8.2, 8.25, 8.3, 8.35, 8.4, 8.45, 8.5, 8.55, 8.6, 8.65, 8.7, 8.75, 8.8, 8.85, 8.9,
8.95, 9, 9.05, 9.1, 9.15, 9.2, 9.25, 9.3, 9.35, 9.4, 9.45, 9.5, 9.55, 9.6, 9.65, 9.7, 9.75, 9.8, 9.85, 9.9,
9.95, 10, 10.05, 10.1, 10.15, 10.2, 10.25, 10.3, 10.35, 10.4, 10.45, 10.5, 10.55, 10.6, 10.65, 10.7, 10.75,
10.8, 10.85, 10.9, 10.95, 11, 11.05, 11.1, 11.15, 11.2, 11.25, 11.3, 11.35, 11.4, 11.45, 11.5, 11.55, 11.6,
11.65, 11.7, 11.75, 11.8, 11.85, 11.9, 11.95, 12, 12.05, 12.1, 12.15, 12.2, 12.25, 12.3, 12.35, 12.4, 12.45,
12.5, 12.55, 12.6, 12.65, 12.7, 12.75, 12.8, 12.85, 12.9, 12.95, 13, 13.05, 13.1, 13.15, 13.2, 13.25, 13.3,
13.35, 13.4, 13.45, 13.5, 13.55, 13.6, 13.65, 13.7, 13.75, 13.8, 13.85, 13.9, 13.95, 14, 14.05, 14.1, 14.15,
14.2, 14.25, 14.3, 14.35, 14.4, 14.45, 14.5, 14.55, 14.6, 14.65, 14.7, 14.75, 14.8, 14.85, 14.9, 14.95, 15,
15.05, 15.1, 15.15, 15.2, 15.25, 15.3, 15.35, 15.4, 15.45, 15.5, 15.55, 15.6, 15.65, 15.7, 15.75, 15.8,
15.85, 15.9, 15.95, 16, 16.05, 16.1, 16.15, 16.2, 16.25, 16.3, 16.35, 16.4, 16.45, 16.5, 16.55, 16.6, 16.65,
16.7, 16.75, 16.8, 16.85, 16.9, 16.95, 17, 17.05, 17.1, 17.15, 17.2, 17.25, 17.3, 17.35, 17.4, 17.45, 17.5,
17.55, 17.6, 17.65, 17.7, 17.75, 17.8, 17.85, 17.9, 17.95, 18, 18.05, 18.1, 18.15, 18.2, 18.25, 18.3, 18.35,
18.4, 18.45, 18.5, 18.55, 18.6, 18.65, 18.7, 18.75, 18.8, 18.85, 18.9, 18.95, 19, 19.05, 19.1, 19.15, 19.2,
19.25, 19.3, 19.35, 19.4, 19.45, 19.5, 19.55, 19.6, 19.65, 19.7, 19.75, 19.8, 19.85, 19.9, 19.95, 20)
p_pred <-
c(0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85,
0.9, 0.95, 1, 1.05, 1.1, 1.15, 1.2, 1.25, 1.3, 1.35, 1.4, 1.45, 1.5, 1.55, 1.6, 1.65, 1.7, 1.75, 1.8, 1.85,
1.9, 1.95, 2, 2.05, 2.1, 2.15, 2.2, 2.25, 2.3, 2.35, 2.4, 2.45, 2.5, 2.55, 2.6, 2.65, 2.7, 2.75, 2.8, 2.85,
2.9, 2.95, 3, 3.05, 3.1, 3.15, 3.2, 3.25, 3.3, 3.35, 3.4, 3.45, 3.5, 3.55, 3.6, 3.65, 3.7, 3.75, 3.8, 3.85,
3.9, 3.95, 4, 4.05, 4.1, 4.15, 4.2, 4.25, 4.3, 4.35, 4.4, 4.45, 4.5, 4.55, 4.6, 4.65, 4.7, 4.75, 4.8, 4.85,
4.9, 4.95, 5, 5.05, 5.1, 5.15, 5.2, 5.25, 5.3, 5.35, 5.4, 5.45, 5.5, 5.55, 5.6, 5.65, 5.7, 5.75, 5.8, 5.85,
5.9, 5.95, 6, 6.05, 6.1, 6.15, 6.2, 6.25, 6.3, 6.35, 6.4, 6.45, 6.5, 6.55, 6.6, 6.65, 6.7, 6.75, 6.8, 6.85,
6.9, 6.95, 7, 7.05, 7.1, 7.15, 7.2, 7.25, 7.3, 7.35, 7.4, 7.45, 7.5, 7.55, 7.6, 7.65, 7.7, 7.75, 7.8, 7.85,
7.9, 7.95, 8, 8.05, 8.1, 8.15, 8.2, 8.25, 8.3, 8.35, 8.4, 8.45, 8.5, 8.55, 8.6, 8.65, 8.7, 8.75, 8.8, 8.85,
8.9, 8.95, 9, 9.05, 9.1, 9.15, 9.2, 9.25, 9.3, 9.35, 9.4, 9.45, 9.5, 9.55, 9.6, 9.65, 9.7, 9.75, 9.8, 9.85,
9.9, 9.95, 10, 10.05, 10.1, 10.15, 10.2, 10.25, 10.3, 10.35, 10.4, 10.45, 10.5, 10.55, 10.6, 10.65, 10.7,
10.75, 10.8, 10.85, 10.9, 10.95, 11, 11.05, 11.1, 11.15, 11.2, 11.25, 11.3, 11.35, 11.4, 11.45, 11.5, 11.55,
11.6, 11.65, 11.7, 11.75, 11.8, 11.85, 11.9, 11.95, 12, 12.05, 12.1, 12.15, 12.2, 12.25, 12.3, 12.35, 12.4,
12.45, 12.5, 12.55, 12.6, 12.65, 12.7, 12.75, 12.8, 12.85, 12.9, 12.95, 13, 13.05, 13.1, 13.15, 13.2, 13.25,
13.3, 13.35, 13.4, 13.45, 13.5, 13.55, 13.6, 13.65, 13.7, 13.75, 13.8, 13.85, 13.9, 13.95, 14, 14.05, 14.1,
14.15, 14.2, 14.25, 14.3, 14.35, 14.4, 14.45, 14.5, 14.55, 14.6, 14.65, 14.7, 14.75, 14.8, 14.85, 14.9,
14.95, 15, 15.05, 15.1, 15.15, 15.2, 15.25, 15.3, 15.35, 15.4, 15.45, 15.5, 15.55, 15.6, 15.65, 15.7, 15.75,
15.8, 15.85, 15.9, 15.95, 16, 16.05, 16.1, 16.15, 16.2, 16.25, 16.3, 16.35, 16.4, 16.45, 16.5, 16.55, 16.6,
16.65, 16.7, 16.75, 16.8, 16.85, 16.9, 16.95, 17, 17.05, 17.1, 17.15, 17.2, 17.25, 17.3, 17.35, 17.4, 17.45,
17.5, 17.55, 17.6, 17.65, 17.7, 17.75, 17.8, 17.85, 17.9, 17.95, 18, 18.05, 18.1, 18.15, 18.2, 18.25, 18.3,
18.35, 18.4, 18.45, 18.5, 18.55, 18.6, 18.65, 18.7, 18.75, 18.8, 18.85, 18.9, 18.95, 19, 19.05, 19.1, 19.15,
19.2, 19.25, 19.3, 19.35, 19.4, 19.45, 19.5, 19.55, 19.6, 19.65, 19.7, 19.75, 19.8, 19.85, 19.9, 19.95, 20)
y1 <-
c(0.000950694, 0.000100472, 0.000284786, 0.000723328, 7.59677e-05, 6.60121e-05, 0.000125453, 0.000242469, 2.32638e-06, 0.000171246, 5.62755e-05, 0.000186667,
0.000236849, 0.000728013, 9.30248e-05, 0.00103827, 0.000462487, 0.000417968, 0.000663409, 0.0004442, 0.000130493, 0.000137924, 0.000138866, 5.89177e-05,
2.41176e-05, 3.59655e-07, 8.60833e-05, 0.000763019, 0.00053405, 0.000273502, 0.000148277, 0.000450883, 6.4654e-05, 6.31175e-05, 0.000330077, 0.000345304,
1.7381e-05, 0.000460998, 0.000292563, 1.40214e-05, 0.000421933, 0.000223642, 0.000257569, 0.000789927, 6.18638e-05, 0.000364691, 0.000273004, 0.000184768,
0.000441683, 0.00010666, 7.01966e-06, 0.000593848, 0.000450213, 0.000265892, 0.000204705, 0.000191059, 0.000124129, 0.000152955, 0.000694124, 0.00080139,
0.000574785, 4.09561e-05, 0.000490065, 0.000602765, 2.94108e-05, 0.000116517, 6.59371e-05, 7.66676e-05, 0.000577547, 9.27305e-05, 8.05981e-05, 0.000391608,
0.000310647, 6.36681e-05, 0.000607592, 2.59618e-05, 0.000523886, 0.000210237, 2.97203e-05, 0.000848244, 0.000682861, 0.000196708, 3.97563e-05, 8.47971e-05,
0.000458676, 0.000570211, 0.000275825, 3.24322e-05, 5.42299e-05, 0.000173264, 0.00148345, 0.000558109, 0.000592152, 0.000566271, 8.96652e-05, 0.000712106,
0.000714956, 0.000333143, 0.000714003, 7.59696e-05, 0.000232356, 9.46624e-05, 0.0002187, 5.16105e-05, 0.000218552, 0.000319414, 0.00113855, 0.00019085, 0.000270743,
0.000809653, 0.000354291, 4.57559e-05, 0.000250445, 0.000289647, 0.000105813, 0.00020895, 0.000330067, 0.000330021, 0.000459942, 0.000256966, 0.000389509,
0.000606176, 0.000308375, 0.000205461, 7.63616e-07, 0.00017752, 0.000865931, 0.000187246, 0.000101152, 0.0006392, 0.00217055, 0.000368677, 3.32983e-05, 0.00240438,
0.000202789, 0.0002399, 0.000824301, 0.000411754, 0.000137432, 6.85329e-05, 0.00127287, 0.000510566, 0.000506751, 9.64091e-05, 0.00212425, 0.00198812, 0.000501766,
0.000268495, 0.000417285, 0.000921504, 0.000429363, 0.000450722, 9.86438e-05, 2.08455e-05, 0.000806218, 0.000128374, 0.000669037, 0.000362092, 0.000266457,
0.00123601, 0.00140207, 0.000167541, 0.000133921, 0.000981286, 0.000497034, 0.000185602, 0.000525644, 0.000820744, 0.000445152, 0.000175099, 0.000356201,
0.00057206, 9.60383e-05, 0.00170762, 0.000120999, 1.84466e-05, 0.000535107, 0.000232882, 0.000161675, 1.74486e-06, 6.55386e-05, 0.000570099, 0.00268864,
0.000162227, 0.000204169, 4.6126e-05, 0.000619391, 0.000623291, 0.00082568, 0.00329517, 4.72057e-05, 0.00204973, 0.00107278, 0.00120694, 0.000460209, 0.000441095,
0.00173734, 7.9e-05, 0.000178284, 0.00154437, 0.00152134, 0.00243212, 0.000766236, 0.000172554, 0.00200168, 0.00101942, 0.00114208, 0.00145731, 0.000343897,
0.00145234, 0.000221856, 3.46792e-05, 0.00117203, 0.00244421, 0.00137182, 0.0038075, 0.00080364, 0.00077727, 0.000370457, 0.0015418, 8.52371e-05, 0.00110611,
0.00102401, 0.000192526, 0.00153896, 0.00274202, 0.00247043, 0.00150429, 0.0010302, 0.000621385, 0.000220496, 0.00220061, 2.65343e-05, 0.00110467, 0.00126262,
0.00143174, 0.00042738, 0.000949809, 0.000756996, 0.00464559, 0.00110925, 0.000116117, 0.00664934, 0.00282585, 0.00512823, 0.00268813, 0.00128588, 0.000914513,
0.000203088, 0.00258571, 0.000843777, 0.0004431, 0.00468668, 0.000944523, 0.00192031, 0.000453996, 0.000236769, 5.05225e-05, 4.70717e-06, 0.000437896, 7.85407e-05,
0.000432231, 0.000517015, 0.00106664, 0.00111833, 0.00253613, 0.00502752, 0.00451049, 0.00233734, 0.000795093, 9.95417e-06, 0.00297299, 0.00307492, 0.00067358,
0.000822174, 0.00455754, 0.00134401, 0.00149708, 0.000385462, 0.000318975, 0.000221759, 0.00110539, 0.00272828, 0.000704706, 0.000895241, 0.000738, 0.00185644,
0.00137739, 0.000901178, 0.000196652, 0.000142674, 0.000897528, 0.000253262, 0.0010359, 0.000457437, 0.00115979, 0.00291484, 0.00032283, 0.00065205, 0.000235218,
0.00170149, 0.00118083, 0.0025975, 0.000451825, 0.0011161, 0.00214706, 0.000522623, 0.000221547, 0.000694211, 0.000419788, 7.34707e-05, 0.00134859, 0.000267664,
0.00113681, 0.000214428, 0.000190714, 7.67995e-05, 0.000198368, 0.000535419, 0.00117289, 0.000236443, 0.000140572, 1.84513e-05, 0.000272901, 0.00012438,
6.82109e-05, 0.000444841, 1.66695e-05, 8.11331e-05, 1.03227e-05, 0.000441749, 0.000239293, 6.27909e-05, 0.000195223, 0.000428386, 0.000257245, 0.000168394,
0.000186031, 8.34549e-05, 8.74632e-05, 0.000122486, 0.000118441, 5.01524e-05, 0.000161396, 0.000348425, 0.000120009, 0.00024743, 0.000144947, 0.000510039,
0.000132811, 3.53869e-05, 0.000399068, 0.000478762, 0.000570554, 0.000214899, 1.95449e-05, 0.000190127, 0.000404915, 0.000436742, 0.000147853, 4.698e-05,
8.77014e-06, 0.000632236, 0.000344514, 0.000548116, 0.000137656, 9.80768e-05, 0.000101471, 8.30364e-05, 0.000253635, 5.60513e-05, 8.63947e-05, 2.92953e-06,
8.07772e-05, 0.000212242, 3.03592e-05, 4.26439e-05, 3.00424e-05, 0.000299367, 2.22885e-06, 3.30225e-05, 4.1958e-05, 0.000206661, 0.00044979, 0.000213808,
6.01316e-05, 1.7831e-05, 0.000916718, 3.16138e-05, 9.1341e-05, 0.000526221, 9.73829e-05, 1.03217e-05, 0.000131133, 7.38675e-05, 1.62431e-05, 0.000210628,
7.53628e-05, 2.32968e-05, 3.87115e-05)
y2 <-
c(1.70903e-05, 1.43205e-06, 5.69705e-05, 1.26785e-05, 4.50302e-05, 2.39866e-05, 0.000167932, 5.63378e-05, 1.26468e-05, 5.30216e-05, 6.13271e-05, 0.000169797,
1.67203e-05, 0.000212684, 5.57558e-05, 5.975e-05, 1.43571e-05, 3.09176e-05, 2.27439e-05, 2.77333e-06, 2.00965e-05, 1.05565e-05, 0.000106833, 5.0816e-05,
0.000111036, 0.000111545, 0.000428684, 8.17717e-05, 9.36581e-05, 6.43754e-06, 0.000112246, 5.48189e-05, 0.000214686, 0.000252255, 0.000274713, 2.72032e-05,
1.29854e-06, 6.85314e-05, 7.21489e-05, 0.000559881, 0.000194007, 4.64904e-05, 5.78255e-05, 6.39679e-05, 0.000114837, 0.000113754, 5.91291e-05, 6.29467e-05,
0.000127672, 0.000204454, 0.000186891, 1.49527e-05, 5.93522e-06, 0.000271353, 2.15285e-05, 0.000140128, 2.09503e-05, 0.000197482, 1.77413e-05, 0.000120912,
0.000111355, 0.000162782, 9.06896e-05, 7.00809e-05, 7.27688e-05, 7.23059e-05, 0.000125125, 0.000493616, 8.65903e-06, 0.000123637, 0.000171635, 7.71746e-05,
0.000302026, 0.000293828, 0.000126379, 7.81437e-05, 0.000181503, 8.3496e-05, 9.21776e-05, 0.000128142, 9.49039e-05, 7.74169e-05, 3.03222e-05, 5.61261e-05,
6.95719e-05, 7.25609e-05, 9.43222e-06, 0.000337022, 0.000179926, 2.19074e-05, 0.000132887, 3.88613e-05, 0.000297456, 5.83477e-05, 0.000302062, 8.44286e-05,
0.000267442, 0.000167872, 0.000161967, 0.000363877, 3.4723e-05, 0.000171926, 0.000351901, 0.000142601, 6.54218e-05, 0.000153731, 0.000262694, 0.000156407,
0.000167841, 6.97706e-05, 0.000281245, 0.000332253, 0.000263087, 0.000690213, 7.3549e-06, 0.000258066, 0.000312366, 3.98542e-05, 3.13189e-05, 0.000283254,
4.57523e-05, 0.000307489, 0.00116695, 0.00070937, 0.00106793, 6.66689e-06, 0.000340069, 0.00113324, 0.000249947, 4.60133e-05, 0.000208328, 0.000877775, 7.94085e-06,
0.00041582, 0.000376282, 0.000140936, 0.00110134, 0.00034063, 0.000187602, 0.000132935, 7.06255e-05, 1.10714e-05, 0.00024811, 8.98584e-05, 0.000103464, 0.00021512,
0.000208232, 0.000420656, 4.804e-05, 3.05617e-05, 9.28038e-05, 5.72083e-05, 0.000264751, 7.22665e-05, 5.04768e-05, 3.77965e-05, 0.000107236, 4.5979e-05, 5.3729e-05,
0.000165742, 2.45246e-05, 7.43235e-05, 7.99276e-05, 6.47654e-08, 8.93356e-08, 0.000125654, 3.32633e-05, 2.89942e-06, 3.35838e-05, 0.000127639, 0.0001511,
2.18556e-06, 0.000124776, 5.53378e-05, 4.10249e-05, 1.35217e-05, 4.06359e-05, 4.86716e-05, 5.64223e-05, 3.61076e-06, 3.01501e-05, 1.19816e-05, 2.52752e-05,
7.21517e-05, 0.0001377, 5.05299e-06, 1.93474e-05, 3.68452e-05, 0.00022212, 2.98007e-05, 1.03303e-05, 2.90345e-05, 7.92668e-05, 6.1713e-05, 1.8952e-05, 8.69072e-05,
5.02928e-06, 6.95078e-06, 8.70415e-06, 5.37277e-05, 7.27202e-06, 9.20753e-06, 1.97884e-05, 6.59321e-05, 0.000181516, 0.000110097, 8.29987e-06, 5.00038e-05,
1.65523e-05, 1.54231e-05, 7.1877e-06, 7.86611e-06, 6.24605e-05, 2.14312e-05, 1.39855e-05, 2.37014e-05, 6.96229e-05, 2.96929e-05, 3.7715e-05, 9.03529e-06,
7.10963e-06, 9.95271e-06, 4.41317e-05, 2.68033e-05, 4.42794e-05, 8.46603e-07, 2.05626e-06, 1.02834e-05, 3.41459e-05, 3.56458e-05, 8.57779e-05, 1.62817e-05,
3.55657e-05, 1.04504e-05, 4.29853e-06, 4.81446e-05, 1.42907e-05, 4.08343e-05, 2.28943e-06, 1.90769e-05, 4.10298e-07, 3.12185e-06, 3.46577e-05, 2.7524e-05,
2.85372e-05, 7.43253e-08, 1.18828e-05, 6.1264e-05, 6.20838e-05, 5.06573e-05, 4.73079e-05, 1.35264e-05, 4.34162e-06, 6.65957e-05, 5.54485e-06, 1.52808e-05,
1.78034e-05, 2.89814e-05, 5.69149e-06, 5.4498e-05, 1.311e-05, 4.26476e-05, 7.12221e-06, 1.47295e-05, 7.40497e-06, 8.5157e-06, 2.59274e-05, 2.97215e-06, 2.717e-06,
2.10048e-06, 2.29499e-05, 6.52502e-06, 2.29313e-05, 3.03064e-05, 2.05924e-05, 2.00482e-05, 7.23307e-06, 8.58007e-06, 1.57512e-05, 1.94231e-05, 4.30304e-05,
1.32276e-05, 6.64703e-06, 4.98107e-06, 5.71107e-06, 2.38836e-05, 8.26851e-05, 4.82074e-05, 8.85362e-06, 6.09667e-05, 3.3387e-05, 5.85419e-05, 3.93812e-05,
6.56306e-06, 5.93025e-06, 5.47808e-05, 5.41627e-06, 1.03656e-05, 2.09964e-05, 1.79349e-05, 4.37059e-05, 2.96814e-05, 4.34452e-05, 1.03877e-05, 3.18537e-05,
3.56703e-05, 3.17122e-05, 5.45428e-05, 1.10964e-05, 1.44927e-05, 3.07482e-05, 3.83123e-05, 1.69249e-05, 1.48359e-05, 3.26851e-05, 2.25956e-05, 1.28708e-05,
9.69076e-06, 1.48894e-05, 4.8902e-05, 4.43178e-05, 9.36054e-06, 2.81874e-05, 1.53709e-05, 5.26779e-06, 1.85486e-05, 9.71211e-06, 7.01622e-08, 2.7211e-05,
1.56876e-05, 9.71554e-06, 0.000101066, 1.16948e-05, 7.24089e-05, 6.76482e-06, 4.54506e-06, 0.000106114, 4.50879e-06, 6.89958e-06, 2.14902e-05, 2.90355e-06,
6.56397e-06, 3.55394e-05, 3.90692e-05, 1.96225e-05, 1.10575e-05, 6.48834e-06, 3.97175e-07, 1.3113e-05, 4.34389e-05, 8.29937e-06, 1.25448e-06, 4.94788e-05,
4.76365e-05, 1.58622e-05, 6.26157e-05, 1.56639e-05, 2.36443e-05, 3.35539e-05, 3.41789e-05, 2.32713e-05, 1.43118e-06, 4.78403e-08, 1.34961e-05, 3.49098e-05,
8.19945e-06, 4.04466e-05, 7.06985e-05, 1.93617e-05, 1.14757e-05, 1.91357e-05, 0.000118972, 2.75716e-05, 6.31364e-06, 1.4942e-05, 1.53399e-05, 5.70456e-06,
1.02271e-05, 8.0915e-06, 2.55736e-05, 3.1275e-05, 1.24581e-05, 1.16267e-05, 3.60556e-05, 8.12425e-07, 3.23083e-05, 2.82164e-05, 1.36932e-05, 3.37423e-06,
2.52285e-05, 4.34179e-06, 4.57394e-05, 5.15042e-06, 1.9917e-05, 1.61678e-05, 1.42454e-05, 2.3783e-05, 2.22962e-07, 8.75658e-06, 1.1257e-05)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/calculate_pycnocline.R
\name{calculate_pycnocline}
\alias{calculate_pycnocline}
\title{Calculate pycnocline depth as the maximum rate of increase in density at depths below the pycnocline and below the reference depth}
\usage{
calculate_pycnocline(rho, z, mld = NULL, ref.depth = 5)
}
\arguments{
\item{rho}{Numeric vector of density}
\item{z}{Numeric vector of depths}
}
\description{
Find the depth of the pycnocline as the depth where density is changing the fastest
}
|
/man/calculate_pycnocline.Rd
|
no_license
|
sean-rohan-NOAA/TLUtilities
|
R
| false
| true
| 551
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/calculate_pycnocline.R
\name{calculate_pycnocline}
\alias{calculate_pycnocline}
\title{Calculate pycnocline depth as the maximum rate of increase in density at depths below the pycnocline and below the reference depth}
\usage{
calculate_pycnocline(rho, z, mld = NULL, ref.depth = 5)
}
\arguments{
\item{rho}{Numeric vector of density}
\item{z}{Numeric vector of depths}
}
\description{
Find the depth of the pycnocline as the depth where density is changing the fastest
}
|
\name{copulaIVTS-package}
\alias{copulaIVTS-package}
\alias{copulaIVTS}
\docType{package}
\title{
Gaussian copula model for Integer-valued Time series - Maximum Likelihood estimation using the MCEM
}
\description{
The R package copulaIVTS provides all statistical R used to implement a Monte Carlo Expectation Maximisation algorithm for maximum likelihood estimation of the dependence parameters of a digitised Gaussian ARMA model. Useful for count time series also.
Monte Carlo EM algorithm Code.
Code to run the Monte Carlo EM algorithm for parameter estimation of the Gaussian copula model with specified discrete marginals.
To be be added to the R package shortly are two alternative estimation methods to fit the digitised Gaussian ARMA model provided in the Thesis link
via Approximate Bayesian Computation (ABC) and
via approximate MLE via a d-vine bivariate copula representation.
}
\details{
\tabular{ll}{
Package: \tab copulaIVTS\cr
Type: \tab Package\cr
Version: \tab 1.2\cr
Date: \tab 2021-03-23\cr
License: \tab GPL (>=2) \cr
}
}
\author{
Hannah Lennon
<drhannahlennon@gmail.com>
}
\references{
Lennon H., & Yuan J., Estimation of a digitised Gaussian ARMA model by Monte Carlo Expectation Maximisation, Computational Statistics & Data Analysis 2018;8:e020683
Lennon, H., 2016. Gaussian copula modelling for integer-valued time series (Doctoral Thesis, University of Manchester).
}
\keyword{ package }
\seealso{
}
\examples{
}
|
/man/copulaIVTS-package.Rd
|
no_license
|
hlennon/copulaIVTS
|
R
| false
| false
| 1,443
|
rd
|
\name{copulaIVTS-package}
\alias{copulaIVTS-package}
\alias{copulaIVTS}
\docType{package}
\title{
Gaussian copula model for Integer-valued Time series - Maximum Likelihood estimation using the MCEM
}
\description{
The R package copulaIVTS provides all statistical R used to implement a Monte Carlo Expectation Maximisation algorithm for maximum likelihood estimation of the dependence parameters of a digitised Gaussian ARMA model. Useful for count time series also.
Monte Carlo EM algorithm Code.
Code to run the Monte Carlo EM algorithm for parameter estimation of the Gaussian copula model with specified discrete marginals.
To be be added to the R package shortly are two alternative estimation methods to fit the digitised Gaussian ARMA model provided in the Thesis link
via Approximate Bayesian Computation (ABC) and
via approximate MLE via a d-vine bivariate copula representation.
}
\details{
\tabular{ll}{
Package: \tab copulaIVTS\cr
Type: \tab Package\cr
Version: \tab 1.2\cr
Date: \tab 2021-03-23\cr
License: \tab GPL (>=2) \cr
}
}
\author{
Hannah Lennon
<drhannahlennon@gmail.com>
}
\references{
Lennon H., & Yuan J., Estimation of a digitised Gaussian ARMA model by Monte Carlo Expectation Maximisation, Computational Statistics & Data Analysis 2018;8:e020683
Lennon, H., 2016. Gaussian copula modelling for integer-valued time series (Doctoral Thesis, University of Manchester).
}
\keyword{ package }
\seealso{
}
\examples{
}
|
# Load required library
library(data.table)
library(base)
#setwd("C:\\coursera\\course4\\wk1\\project\\submit")
# download zip file
url <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
zipName <- file.path(getwd(), "input_data.zip")
download.file(url, zipName)
unzip(zipName, overwrite = TRUE)
# read input file
idt <- fread("household_power_consumption.txt", sep = ";")
png (file = "plot1.png", width = 480, height = 480, units = "px" )
# note must ignore NA when sum
# sum(as.numeric(t1$Global_active_power), na.rm = TRUE)
# big object 150 MB
object.size(idt)
# get only the data forrequired dates
dt1 <- idt[(idt$Date == "1/2/2007" ),]
dt2 <- idt[(idt$Date == "2/2/2007" ),]
# merge
td <- rbind(dt1,dt2)
# much smaller data set
#object.size(td)
# 394168 bytes
# Plot 1
hist(as.numeric(td$Global_active_power), col = "red" , xlab = "Global Active Power (kilowatts)", main = ("Global Active Power") )
## Copy my plot to a PNG file
#dev.copy(png, file = "plot1.png")
## Don't forget to close the PNG device!
dev.off()
dev.set(which = 2)
|
/plot1.R
|
no_license
|
dajit/ExData_Plotting1
|
R
| false
| false
| 1,142
|
r
|
# Load required library
library(data.table)
library(base)
#setwd("C:\\coursera\\course4\\wk1\\project\\submit")
# download zip file
url <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
zipName <- file.path(getwd(), "input_data.zip")
download.file(url, zipName)
unzip(zipName, overwrite = TRUE)
# read input file
idt <- fread("household_power_consumption.txt", sep = ";")
png (file = "plot1.png", width = 480, height = 480, units = "px" )
# note must ignore NA when sum
# sum(as.numeric(t1$Global_active_power), na.rm = TRUE)
# big object 150 MB
object.size(idt)
# get only the data forrequired dates
dt1 <- idt[(idt$Date == "1/2/2007" ),]
dt2 <- idt[(idt$Date == "2/2/2007" ),]
# merge
td <- rbind(dt1,dt2)
# much smaller data set
#object.size(td)
# 394168 bytes
# Plot 1
hist(as.numeric(td$Global_active_power), col = "red" , xlab = "Global Active Power (kilowatts)", main = ("Global Active Power") )
## Copy my plot to a PNG file
#dev.copy(png, file = "plot1.png")
## Don't forget to close the PNG device!
dev.off()
dev.set(which = 2)
|
library(gamlss.data)
### Name: glasses
### Title: Reading Glasses Data
### Aliases: glasses
### Keywords: datasets
### ** Examples
data(glasses)
plot(ageread~sex, data=glasses)
|
/data/genthat_extracted_code/gamlss.data/examples/glasses.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 184
|
r
|
library(gamlss.data)
### Name: glasses
### Title: Reading Glasses Data
### Aliases: glasses
### Keywords: datasets
### ** Examples
data(glasses)
plot(ageread~sex, data=glasses)
|
## unit-tests
source("../../r-toolkit/checks.R")
test.generate.outcomes <- function() {
K = rpois(1, lambda=5)
S = 1:K
m = 10
N = sum(S) * m
x = generate.outcomes(N, levels=1:K, distr=S)
i = sample(S, size=1)
print(sprintf("Checking K=%d, N=%d units, i=%d", K, N, i))
print(x)
CHECK_EQ(length(which(x==i)), i * m)
}
|
/noncompliance/unit-tests.R
|
no_license
|
ptoulis/causal-effects-mechanisms
|
R
| false
| false
| 334
|
r
|
## unit-tests
source("../../r-toolkit/checks.R")
test.generate.outcomes <- function() {
K = rpois(1, lambda=5)
S = 1:K
m = 10
N = sum(S) * m
x = generate.outcomes(N, levels=1:K, distr=S)
i = sample(S, size=1)
print(sprintf("Checking K=%d, N=%d units, i=%d", K, N, i))
print(x)
CHECK_EQ(length(which(x==i)), i * m)
}
|
library(FHDI)
### Name: FHDI_CellProb
### Title: Joint cell probabilities for multivariate incomplete categorical
### data
### Aliases: FHDI_CellProb
### Keywords: imputation missing data joint probability cellprob EM
### algorithm
### ** Examples
### Toy Example ###
# y : trivariate variables
# r : indicator corresponding to missingness in y
set.seed(1345)
n=100
rho=0.5
e1=rnorm(n,0,1)
e2=rnorm(n,0,1)
e3=rgamma(n,1,1)
e4=rnorm(n,0,sd=sqrt(3/2))
y1=1+e1
y2=2+rho*e1+sqrt(1-rho^2)*e2
y3=y1+e3
y4=-1+0.5*y3+e4
r1=rbinom(n,1,prob=0.6)
r2=rbinom(n,1,prob=0.7)
r3=rbinom(n,1,prob=0.8)
r4=rbinom(n,1,prob=0.9)
y1[r1==0]=NA
y2[r2==0]=NA
y3[r3==0]=NA
y4[r4==0]=NA
daty=cbind(y1,y2,y3,y4)
result_CM=FHDI_CellMake(daty, k=5, s_op_merge="fixed")
datz=result_CM$cell
result_CP=FHDI_CellProb(datz)
names(result_CP)
|
/data/genthat_extracted_code/FHDI/examples/FHDI_CellProb.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 839
|
r
|
library(FHDI)
### Name: FHDI_CellProb
### Title: Joint cell probabilities for multivariate incomplete categorical
### data
### Aliases: FHDI_CellProb
### Keywords: imputation missing data joint probability cellprob EM
### algorithm
### ** Examples
### Toy Example ###
# y : trivariate variables
# r : indicator corresponding to missingness in y
set.seed(1345)
n=100
rho=0.5
e1=rnorm(n,0,1)
e2=rnorm(n,0,1)
e3=rgamma(n,1,1)
e4=rnorm(n,0,sd=sqrt(3/2))
y1=1+e1
y2=2+rho*e1+sqrt(1-rho^2)*e2
y3=y1+e3
y4=-1+0.5*y3+e4
r1=rbinom(n,1,prob=0.6)
r2=rbinom(n,1,prob=0.7)
r3=rbinom(n,1,prob=0.8)
r4=rbinom(n,1,prob=0.9)
y1[r1==0]=NA
y2[r2==0]=NA
y3[r3==0]=NA
y4[r4==0]=NA
daty=cbind(y1,y2,y3,y4)
result_CM=FHDI_CellMake(daty, k=5, s_op_merge="fixed")
datz=result_CM$cell
result_CP=FHDI_CellProb(datz)
names(result_CP)
|
library(shiny)
library(shinydashboard)
library(memoise)
library(plotly)
library(EpiEstim)
port <- Sys.getenv('PORT')
cache_timeout <- 1800 #half an hour
## Functions to get the data
#Get complete data_frame from server
get_data <- memoise(function(location="https://raw.githubusercontent.com/owid/covid-19-data/master/public/data/jhu/full_data.csv") {
download.file(location, "full_data.csv")
data <- read.csv(file = 'full_data.csv')
return(data)
}, cache = cachem::cache_mem(max_age = cache_timeout))
#Get complete data_frame from GUIAD
get_data_guiad <- memoise(function(location="https://raw.githubusercontent.com/GUIAD-COVID/datos-y-visualizaciones-GUIAD/master/datos/estadisticasUY.csv") {
download.file(location, "estadisticasUY.csv")
data <- read.csv(file = 'estadisticasUY.csv',na="N/A")
data[,"fecha"] <- as.Date(data[,"fecha"],format="%d/%m/%Y")
data <- data[order(data[,"fecha"]),]
return(data)
}, cache = cachem::cache_mem(max_age = cache_timeout))
get_stringency_data <- memoise(function(location="https://raw.githubusercontent.com/OxCGRT/covid-policy-tracker/master/data/OxCGRT_latest.csv") {
download.file(location, "stringency.csv")
data <- read.csv(file = 'stringency.csv')
return(data)
}, cache = cachem::cache_mem(max_age = cache_timeout))
#retrieve data at startup
data <- get_data()
guiad <- get_data_guiad()
stringency_data <- get_stringency_data()
shiny::runApp(
appDir = getwd(),
host = '0.0.0.0',
port = as.numeric(port)
)
|
/run.R
|
permissive
|
fgomezotero/covid-r-monitor
|
R
| false
| false
| 1,505
|
r
|
library(shiny)
library(shinydashboard)
library(memoise)
library(plotly)
library(EpiEstim)
port <- Sys.getenv('PORT')
cache_timeout <- 1800 #half an hour
## Functions to get the data
#Get complete data_frame from server
get_data <- memoise(function(location="https://raw.githubusercontent.com/owid/covid-19-data/master/public/data/jhu/full_data.csv") {
download.file(location, "full_data.csv")
data <- read.csv(file = 'full_data.csv')
return(data)
}, cache = cachem::cache_mem(max_age = cache_timeout))
#Get complete data_frame from GUIAD
get_data_guiad <- memoise(function(location="https://raw.githubusercontent.com/GUIAD-COVID/datos-y-visualizaciones-GUIAD/master/datos/estadisticasUY.csv") {
download.file(location, "estadisticasUY.csv")
data <- read.csv(file = 'estadisticasUY.csv',na="N/A")
data[,"fecha"] <- as.Date(data[,"fecha"],format="%d/%m/%Y")
data <- data[order(data[,"fecha"]),]
return(data)
}, cache = cachem::cache_mem(max_age = cache_timeout))
get_stringency_data <- memoise(function(location="https://raw.githubusercontent.com/OxCGRT/covid-policy-tracker/master/data/OxCGRT_latest.csv") {
download.file(location, "stringency.csv")
data <- read.csv(file = 'stringency.csv')
return(data)
}, cache = cachem::cache_mem(max_age = cache_timeout))
#retrieve data at startup
data <- get_data()
guiad <- get_data_guiad()
stringency_data <- get_stringency_data()
shiny::runApp(
appDir = getwd(),
host = '0.0.0.0',
port = as.numeric(port)
)
|
#' Get station list for a particular U.S. county.
#'
#' A wrapper to the \code{isd_stations_search} function
#' in the \code{rnoaa} package, allowing you to search by FIPS code rather than
#' having to know the latitude and longitude of the center of each county. The
#' \code{isd_stations_search} function requires a radius within which to search for
#' stations. This radius is estimated from 2010 U.S. Census Land Area data.
#'
#' @param fips A five-digit FIPS county code.
#' @inheritParams write_daily_timeseries
#'
#' @return A list with four elements. The first element, \code{stations}, is a
#' dataframe of monitors within a calculated radius of the
#' geographic center of the county specified by the FIPS code.
#' This will have the same dataframe format as the output from the
#' \code{isd_stations_search} function in the \code{rnoaa} package. The
#' second element, \code{radius}, gives the radius (in km) within which
#' stations were pulled from the county's geographic center.
#' Elements \code{lat_center} and \code{lon_center} are the latitude and
#' longitude of the county's population-weighted center.
#'
#' @examples
#' \dontrun{
#' fips_list <- isd_fips_stations(fips = "12086")
#' ids <- fips_list$stations
#' head(ids)
#' }
isd_fips_stations <- function(fips, verbose = FALSE) {
# population-weighted center for specified county
census_data <- countyweather::county_centers
loc_fips <- which(census_data$fips == fips)
lat_fips <- as.numeric(census_data[loc_fips, "latitude"])
lon_fips <- as.numeric(census_data[loc_fips, "longitude"])
# radius data for specified county
radius_data <- countyweather::county_radius
loc_rad <- which(radius_data == fips)
radius <- as.numeric(radius_data[loc_rad, "county_radius"])
if(verbose) {
message(paste0("Getting hourly weather monitors for ",
census_data[loc_fips, "name"]))
}
quiet_station_search <- purrr::quietly(rnoaa::isd_stations_search)
stations <- quiet_station_search(lat = lat_fips, lon = lon_fips,
radius = radius)$result
list <- list("stations" = stations,
"radius" = radius,
"lat_center" = lat_fips,
"lon_center" = lon_fips)
return(list)
}
#' Get hourly data for a single monitor.
#'
#' Wraps the \code{isd} function from the \code{rnoaa} package and provides
#' some additional data cleaning.
#'
#' @param usaf_code A character string with a six-digit USAF code for the
#' weather station.
#' @param wban_code A character string with a five-digit WBAN code for the
#' weather station.
#' @param year A four-digit numeric giving the year for which to pull data.
#' @param var A character vector listing the weather variables to pull. In
#' addition quality flag data, choices for main weather variables to pull
#' include \code{wind_direction}, \code{wind_speed},
#' \code{ceiling_height}, \code{visibility_distance}, \code{temperature},
#' \code{temperature_dewpoint} and \code{air_pressure}.
#'
#' @return This function returns the same type of dataframe as that returned
#' by the \code{isd} function from the \code{rnoaa} package, but with the
#' dataframe limited to the selected weather variables and cleaned a bit more.
#'
#' @references
#' For more information on this dataset, see
#' \url{ftp://ftp.ncdc.noaa.gov/pub/data/noaa/ish-format-document.pdf}.
#'
#' @examples
#' \dontrun{
#' ids <- isd_fips_stations(fips = "12086")$stations
#' kendall_station <- int_surface_data(usaf_code = ids$usaf[11],
#' wban_code = ids$wban[11],
#' year = 1992,
#' var = c("wind_speed", "temperature"))
#' head(kendall_station)
#' }
int_surface_data <- function(usaf_code, wban_code, year, var = "all") {
quiet_isd <- purrr::quietly(rnoaa::isd)
isd_df <- quiet_isd(usaf = usaf_code, wban = wban_code, year = year)
isd_df <- isd_df$result
# select variables if `var` isn't "all"
if (length(var) == 1 && var == "all") {
w_vars <- colnames(isd_df)
var <- w_vars[9:length(w_vars)]
}
# add date time (suggested by one of the rnoaa package vignette examples for isd())
isd_df$date_time <- lubridate::ymd_hm(sprintf("%s %s",
as.character(isd_df$date),
isd_df$time))
cols <- c("usaf_station", "wban_station", "date_time",
"latitude", "longitude")
subset_vars <- append(cols, var)
isd_df <- dplyr::select_(isd_df, .dots = subset_vars)
na_code_vars <- colnames(isd_df)[apply(isd_df, 2, max) %in%
c(99.9, 999, 999.9, 9999, 9999.9, 99999,
999999)]
for (na_var in na_code_vars) {
isd_df[[na_var]] <- as.numeric(isd_df[[na_var]])
}
if (length(na_code_vars) > 0) {
for (na_var in na_code_vars) {
isd_df[isd_df[ , na_var] == max(isd_df[ , na_var]), na_var] <- NA
}
}
return(isd_df)
}
#' Pull hourly data for multiple monitors.
#'
#' Pull all available data for all weather monitors within a calculated radius of
#' the geographic center of a U.S. county, based on the county's FIPS
#' code. The radius for each county is calculated using 2010 U.S. Census Land Area
#' data.
#'
#' @param fips A five-digit FIPS county code.
#' @param year A four-digit numeric giving the year for which to pull data.
#' @param var A character vector listing the weather variables to pull. The main
#' available weather variables are \code{wind_direction}, \code{wind_speed},
#' \code{ceiling_height}, \code{visibility_distance}, \code{temperature},
#' \code{temperature_dewpoint} and \code{air_pressure}.
#'
#' @return A list with five elements. \code{ids} is a dataframe of station
#' metadata for all avaiable stations in the given fips code. \code{df} is a
#' data frame with hourly weather data for the given variable(s) and date
#' range. \code{radius} is the calculated radius within which stations
#' were pulled from the county's geographic center. Elements
#' \code{lat_center} and \code{lon_center} are the latitude and longitude
#' of the county's geographic center.
#'
#' @examples
#' \dontrun{
#' fips_list <- isd_monitors_data(fips = "12086", year = 1992,
#' var = c("wind_speed", "temperature"))
#' stationdata <- fips_list$df
#' ggplot(stationdata, aes(x = date_time, y = wind_speed)) +
#' geom_point(alpha = 0.5, size = 0.2) +
#' facet_wrap(~ usaf_station, ncol = 1)
#' }
isd_monitors_data <- function(fips, year, var = "all") {
list <- isd_fips_stations(fips)
ids <- list$stations
radius <- list$radius
lat_center <- list$lat_center
lon_center <- list$lon_center
safe_int <- purrr::safely(int_surface_data)
mult_stations <- mapply(safe_int, usaf_code = ids$usaf,
wban_code = ids$wban,
year = year, var = list(var = var))
good_st <- sapply(mult_stations, function(x) !is.null(dim(x)))
if (sum(good_st) > 0) {
st_out_list <- lapply(which(good_st), function(x) mult_stations[[x]])
st_out_list <- lapply(st_out_list, function(x){
x$usaf_station <- as.numeric(x$usaf_station)
x$wban_station <- as.numeric(x$wban_station)
cols <- colnames(st_out_list[[1]])
# fixes for Error: Can not automatically convert from numeric to
# character in column "wind_speed".
if ("wind_direction" %in% cols) {
x$wind_direction <- as.numeric(x$wind_direction)
}
if ("ceiling_height" %in% cols) {
x$ceiling_height <- as.numeric(x$ceiling_height)
}
if ("visibility_distance" %in% cols) {
x$visibility_distance <- as.numeric(x$visibility_distance)
}
if ("temperature" %in% cols) {
x$temperature <- as.numeric(x$temperature)
}
if ("temperature_dewpoint" %in% cols) {
x$temperature_dewpoint <- as.numeric(x$temperature_dewpoint)
}
if ("air_pressure" %in% cols) {
x$air_pressure <- as.numeric(x$air_pressure)
}
if ("GF1_lowest_cloud_base_height" %in% cols) {
x$GF1_lowest_cloud_base_height <- as.numeric(x$GF1_lowest_cloud_base_height)
}
if ("wind_speed" %in% cols) {
x$wind_speed <- as.numeric(x$wind_speed)
}
return(x)
}
)
st_out_df <- dplyr::bind_rows(st_out_list)
} else {
stop("None of the stations had available data.")
}
list <- list("df" = st_out_df,
"ids" = ids,
"radius" = radius,
"lat_center" = lat_center,
"lon_center" = lon_center)
return(list)
}
#' Average hourly weather data across multiple stations.
#'
#' Returns a dataframe with hourly weather averaged across
#' stations, as well as columns showing the number of stations contributing to
#' average for each variable and each hour.
#'
#' @param hourly_data A dataframe with hourly weather observations. This
#' dataframe is returned from the \code{df} element of the function
#' \code{isd_monitors_data}.
#'
#' @importFrom dplyr %>%
ave_hourly <- function(hourly_data) {
df <- dplyr::mutate_(hourly_data, id = ~ paste0(usaf_station, wban_station))
df <- dplyr::select_(df, .dots = c("-usaf_station", "-wban_station",
"-latitude", "-longitude"))
all_cols <- colnames(df)
not_vars <- c("date_time", "id")
g_cols <- all_cols[!all_cols %in% not_vars]
averaged_data <- tidyr::gather_(data = df, key_col = "key",
value_col = "value", gather_cols = g_cols) %>%
dplyr::group_by_(~ date_time, ~ key) %>%
dplyr::summarize_(mean = ~ mean(value, na.rm = TRUE)) %>%
tidyr::spread_(key_col = "key", value_col = "mean")
n_reporting <- tidyr::gather_(data = df, key_col = "key", value_col = "value",
gather_cols = g_cols) %>%
dplyr::group_by_(~ date_time, ~ key) %>%
dplyr::summarise_(n_reporting = ~ sum(!is.na(value))) %>%
dplyr::mutate_(key = ~ paste(key, "reporting", sep = "_")) %>%
tidyr::spread_(key_col = "key", value_col = "n_reporting")
averaged_data <- dplyr::left_join(averaged_data, n_reporting,
by = "date_time")
averaged_data <- dplyr::ungroup(averaged_data)
averaged_data <- as.data.frame(averaged_data)
return(averaged_data)
}
#' Filter NOAA ISD stations based on "coverage" requirements, and calculate
#' coverage and statistical information for each station-variable combination.
#'
#' Filters available weather stations based on a specified
#' minimum coverage (i.e., percent non-missing hourly observations). Weather
#' stations with non-missing data for fewer days than specified by
#' \code{coverage} will be excluded from the county average.
#'
#' @param fips A character string giving the five-digit U.S. FIPS
#' county code of the county for which the user wants to pull weather data.
#' @param hourly_data A dataframe as returned by the \code{df} element from an
#' \code{isd_monitors_data} call.
#' @param coverage A numeric value in the range of 0 to 1 that specifies the
#' desired percentage coverage for each weather variable (i.e., what percent
#' of each weather variable must be non-missing to include the data from a
#' station when calculating hourly values averaged across stations).
#'
#' @return A list with two elements: \code{df} and \code{stations}. \code{df} is
#' a dataframe of hourly weather data filtered based on the specfified
#' coverage, as well as columns (\code{"var"_reporting}) for each weather
#' variable showing the number of stations contributing to the average for that
#' variable for each hour. The second element, \code{stations}, is a dataframe
#' giving statistical information for stations that meet the specified coverage
#' requirements. The column \code{station} gives the station id (USAF and
#' WBAN identification numbers pasted together, separated by "-"). Note: One
#' of these identification ids is sometimes missing. For example, a value in
#' \code{station} might be \code{722029-NA}. The column \code{var}
#' gives the weather variable associated with the row of statistical values
#' for each station and variable combination. \code{calc_coverage} gives the
#' percentage coverage for each station-weather variable combination. These
#' values will all be greater than or equal to the specified \code{coverage}
#' value. \code{standard_dev} gives the standard deviation of values for each
#' station-weather variable combination. \code{max} and \code{min} give the
#' minimum and maximum values, and \code{range} gives the range of values in
#' each station-weather variable combination. These last four statistical
#' calculations (\code{standard_dev}, \code{max}, \code{min}, and
#' \code{range}) are only included for the seven core hourly weather variables,
#' which include \code{"wind_direction"}, \code{"wind_speed"},
#' \code{"ceiling_height"}, \code{"visibility_distance"}, \code{"temperature"},
#' \code{"temperature_dewpoint"}, and \code{"air_pressure"}. (The values of
#' these columns are set to \code{NA} for other variables, such as quality
#' flag data.)
#'
#' @importFrom dplyr %>%
filter_hourly <- function(fips, hourly_data, coverage = NULL) {
if (is.null(coverage)) {
coverage <- 0
}
all_cols <- colnames(hourly_data)
not_vars <- c("usaf_station", "wban_station", "date_time", "latitude",
"longitude")
g_cols <- all_cols[!all_cols %in% not_vars]
group_cols <- c("station", "key")
# suppressing "NAs introduced by coercion" warning message
df <- suppressWarnings(hourly_data %>%
tidyr::unite_(col = "station", from = c("usaf_station", "wban_station"),
sep = "-") %>%
dplyr::select_(quote(-date_time), quote(-latitude), quote(-longitude)) %>%
tidyr::gather_(key_col = "key", value_col = "value", gather_cols = g_cols) %>%
dplyr::group_by_(.dots = group_cols) %>%
dplyr::mutate_(value = ~ as.numeric(value)) %>%
dplyr::summarize_(calc_coverage = ~ mean(!is.na(value)),
standard_dev = ~ sd(value, na.rm = TRUE),
min = ~ min(value, na.rm = TRUE),
max = ~ max(value, na.rm = TRUE),
range = ~ max - min))
weather_vars <- c("wind_direction", "wind_speed", "ceiling_height",
"visibility_distance", "temperature",
"temperature_dewpoint", "air_pressure")
flag_vars <- df[!df$key %in% weather_vars, "key"]$key
if (length(flag_vars) != 0) {
for (i in 1:length(flag_vars)) {
df[which(df$key == flag_vars[i]), ]$standard_dev <- NA
df[which(df$key == flag_vars[i]), ]$min <- NA
df[which(df$key == flag_vars[i]), ]$max <- NA
df[which(df$key == flag_vars[i]), ]$range <- NA
}
}
group_cols <- c("date_time", "key")
test <- df %>%
dplyr::filter_(~ calc_coverage >= coverage)
if (nrow(test) == 0) {
stop(paste0("Unable to pull weather data for FIPS code ", fips,
" for the specified percent coverage, year(s), and/or",
" weather variables."))
}
filtered <- hourly_data %>%
tidyr::unite_(col = "station", from = c("usaf_station", "wban_station"),
sep = "-") %>%
dplyr::select_(quote(-latitude), quote(-longitude)) %>%
tidyr::gather_(key_col = "key", value_col = "value", gather_cols = g_cols) %>%
dplyr::left_join(df, by = c("station", "key")) %>%
dplyr::filter_(~ calc_coverage >= coverage) %>%
dplyr::group_by_(.dots = group_cols)
stations <- filtered %>%
dplyr::ungroup() %>%
dplyr::select_(quote(-date_time), quote(-value)) %>%
dplyr::distinct()
colnames(stations)[2] <- "var"
df2 <- filtered %>%
dplyr::summarize_(n_reporting = ~ sum(!is.na(value))) %>%
dplyr::mutate_(key = ~ paste(key, "reporting", sep = "_")) %>%
tidyr::spread_(key_col = "key", value_col = "n_reporting")
df3 <- filtered %>%
dplyr::summarize_(value = ~ mean(as.numeric(value), na.rm = TRUE)) %>%
tidyr::spread_(key_col = "key", value_col = "value")
out <- dplyr::full_join(df3, df2, by = "date_time")
list <- list("df" = out,
"stations" = stations)
return(list)
}
#' Plot hourly weather stations for a particular county.
#'
#' Produces a \code{ggplot} object mapping stations that contribute
#' to the weather data returned by \code{hourly_data}.
#'
#' @inheritParams daily_stationmap
#' @param hourly_data A list returned from the function \code{hourly_df}.
#'
#' @return A \code{ggplot} object mapping all weather stations for a particular
#' county that satisfy the conditions present in \code{hourly_df}'s
#' arguments (year(s), coverage, and/or weather variables). Because hourly
#' data is pulled by radius from each county's geograph center, this plot
#' includes the calculated radius from which stations are pulled for that
#' county. This radius
#' is calculated for each county using 2010 U.S. Census Land Area data.
#' 2011 U.S. Census cartographic boundary shapefiles are used to proved
#' county outlines, included on this plot as well. Note: Because stations
#' are pulled within a radius from the county's geographic center, depending
#' on the shape of the county, weather stations from outside the county's
#' boundaries may sometimes be providing data for that county and some
#' weather stations within the county may not be included.
#'
#' @examples
#' \dontrun{
#' hourly_data <- hourly_df(fips = "12086", year = 1992,
#' var = c("wind_speed", "temperature"))
#' hourly_stationmap("12086", hourly_data)
#' }
#'
#' @importFrom dplyr %>%
hourly_stationmap <- function(fips, hourly_data, point_color = "firebrick",
fill_color = "lightgrey",
point_size = 2, station_label = FALSE) {
census_data <- countyweather::county_centers
row_num <- which(grepl(fips, census_data$fips))
title <- as.character(census_data[row_num, "name"])
loc_census <- census_data %>%
dplyr::rename(fc = fips) %>%
dplyr::filter(fc == fips)
suppressMessages(
county_sf <- tigris::counties(state = loc_census$state,
cb = T,
class = "sf") %>%
dplyr::filter(COUNTYFP == stringr::str_sub(fips, 3, 5))
)
map <- ggplot2::ggplot() +
ggplot2::geom_sf(data = county_sf, color = fill_color)
# # for ggmap lat/lon
# loc_fips <- which(census_data$fips == fips)
# lat_fips <- as.numeric(census_data[loc_fips, "latitude"])
# lon_fips <- as.numeric(census_data[loc_fips, "longitude"])
#
# state <- stringi::stri_sub(fips, 1, 2)
# county <- stringi::stri_sub(fips, 3)
#
# shp <- tigris::counties(state, cb = TRUE)
# county_shp <- shp[shp$COUNTYFP == county, ]
#
# # convert to raster so that we can add geom_raster() (which fixes the
# # geom_polygons island problem)
# r <- raster::raster(raster::extent(county_shp))
# raster::res(r) <- 0.001
# raster::projection(r) <- sp::proj4string(county_shp)
# r <- raster::rasterize(county_shp, r)
# rdf <- data.frame(raster::rasterToPoints(r))
#
# # use range of raster object to figure out what zoom to use in ggmap
# x_range <- r@extent[2] - r@extent[1]
# y_range <- r@extent[4] - r@extent[3]
#
# # limits were calculated by finding out the x and y limits of a ggmap at each
# # zoom, then accounting for the extra space we want to add around county
# # shapes.
#
# if (x_range > y_range) {
# if (x_range <= 0.1997) {
#
# zoom <- 12
#
# xmin <- r@extent[1] - 0.01
# xmax <- r@extent[2] + 0.01
# ymin <- r@extent[3] - 0.01
# ymax <- r@extent[4] + 0.01
# }
#
# if (x_range <= 0.3894 & x_range > 0.1997) {
#
# zoom <- 11
#
# xmin <- r@extent[1] - 0.025
# xmax <- r@extent[2] + 0.025
# ymin <- r@extent[3] - 0.025
# ymax <- r@extent[4] + 0.025
# }
#
# if (x_range <= 0.7989 & x_range > 0.3894) {
#
# zoom <- 10
#
# xmin <- r@extent[1] - 0.04
# xmax <- r@extent[2] + 0.04
# ymin <- r@extent[3] - 0.04
# ymax <- r@extent[4] + 0.04
# }
#
# if (x_range <= 1.6378 & x_range > 0.7989) {
#
# zoom <- 9
#
# xmin <- r@extent[1] - 0.06
# xmax <- r@extent[2] + 0.06
# ymin <- r@extent[3] - 0.06
# ymax <- r@extent[4] + 0.06
# }
#
# if (x_range <= 3.3556 & x_range > 1.6378) {
#
# zoom <- 8
#
# xmin <- r@extent[1] - 0.08
# xmax <- r@extent[2] + 0.08
# ymin <- r@extent[3] - 0.08
# ymax <- r@extent[4] + 0.08
# }
#
# if (x_range <= 6.8313 & x_range > 3.3556) {
#
# zoom <- 7
#
# xmin <- r@extent[1] - 0.1
# xmax <- r@extent[2] + 0.1
# ymin <- r@extent[3] - 0.1
# ymax <- r@extent[4] + 0.1
# }
#
# } else {
# if (y_range <= 0.1616) {
#
# zoom <- 12
#
# xmin <- r@extent[1] - 0.01
# xmax <- r@extent[2] + 0.01
# ymin <- r@extent[3] - 0.01
# ymax <- r@extent[4] + 0.01
# }
#
# if (y_range <= 0.3135 & y_range > 0.1616) {
#
# zoom <- 11
#
# xmin <- r@extent[1] - 0.025
# xmax <- r@extent[2] + 0.025
# ymin <- r@extent[3] - 0.025
# ymax <- r@extent[4] + 0.025
# }
#
# if (y_range <= 0.647 & y_range > 0.3135) {
#
# zoom <- 10
#
# xmin <- r@extent[1] - 0.04
# xmax <- r@extent[2] + 0.04
# ymin <- r@extent[3] - 0.04
# ymax <- r@extent[4] + 0.04
# }
#
# if (y_range <= 1.3302 & y_range > 0.647) {
#
# zoom <- 9
#
# xmin <- r@extent[1] - 0.06
# xmax <- r@extent[2] + 0.06
# ymin <- r@extent[3] - 0.06
# ymax <- r@extent[4] + 0.06
# }
#
# if (y_range <= 2.7478 & y_range > 1.3302) {
#
# zoom <- 8
#
# xmin <- r@extent[1] - 0.08
# xmax <- r@extent[2] + 0.08
# ymin <- r@extent[3] - 0.08
# ymax <- r@extent[4] + 0.08
# }
#
# if (y_range <= 2.8313 & y_range > 2.7478) {
#
# zoom <- 7
#
# xmin <- r@extent[1] - 0.1
# xmax <- r@extent[2] + 0.1
# ymin <- r@extent[3] - 0.1
# ymax <- r@extent[4] + 0.1
# }
# }
#
# county <- suppressMessages(ggmap::get_map(c(lon_fips, lat_fips),
# zoom = zoom, color = "bw"))
#
# gg_map <- ggmap::ggmap(county)
#
# # limits of a ggmap depend on your center lat/lon (the limits
# # above won't work exactly for every county)
# map_ymin <- gg_map$data$lat[1]
# map_ymax <- gg_map$data$lat[3]
# map_xmin <- gg_map$data$lon[1]
# map_xmax <- gg_map$data$lon[2]
#
# if ((ymin < map_ymin) | (ymax > map_ymax) | (xmin < map_xmin) |
# (xmax > map_xmax)) {
# zoom <- zoom - 1
# county <- suppressMessages(ggmap::get_map(c(lon_fips, lat_fips),
# zoom = zoom, color = "bw"))
# gg_map <- ggmap::ggmap(county)
# }
#
# map <- gg_map +
# ggplot2::coord_fixed(xlim = c(xmin, xmax),
# ylim = c(ymin, ymax)) +
# ggplot2::geom_raster(mapping = ggplot2::aes_(~x, ~y),
# data = rdf, fill = "yellow",
# alpha = 0.2,
# inherit.aes = FALSE,
# na.rm = TRUE)
#
# r <- hourly_data$radius
# x_c <- hourly_data$lon_center
# y_c <- hourly_data$lat_center
#
# df <- geosphere::destPoint(p = c(x_c, y_c),
# b = 0:360,
# d = r * 1000)
# df <- as.data.frame(df)
# colnames(df) <- c("x_v", "y_v")
station_df <- hourly_data$station_df %>%
dplyr::tbl_df() %>%
dplyr::filter_(~ !duplicated(station)) %>%
dplyr::arrange_(~ dplyr::desc(lat)) %>%
dplyr::rename(name = station_name)
name_levels <- unique(station_df$name)
station_df <- station_df %>%
dplyr::mutate_(name = ~ factor(name, levels = name_levels))
if (station_label == TRUE) {
map_out <- map +
ggplot2::geom_point(data = station_df,
ggplot2::aes_(~ lon, ~ lat,
fill = ~ name),
colour = "black",
size = point_size,
shape = 21) +
ggplot2::ggtitle(title) +
ggplot2::theme_void() +
ggplot2::theme(legend.title = ggplot2::element_blank())
} else {
map_out <- map +
ggplot2::geom_point(data = station_df,
ggplot2::aes_(~ lon, ~ lat),
colour = point_color,
size = point_size) +
ggplot2::theme_void() +
ggplot2::ggtitle(title)
}
return(map_out)
}
|
/R/hourly_helpers.R
|
no_license
|
leighseverson/countyweather
|
R
| false
| false
| 25,124
|
r
|
#' Get station list for a particular U.S. county.
#'
#' A wrapper to the \code{isd_stations_search} function
#' in the \code{rnoaa} package, allowing you to search by FIPS code rather than
#' having to know the latitude and longitude of the center of each county. The
#' \code{isd_stations_search} function requires a radius within which to search for
#' stations. This radius is estimated from 2010 U.S. Census Land Area data.
#'
#' @param fips A five-digit FIPS county code.
#' @inheritParams write_daily_timeseries
#'
#' @return A list with four elements. The first element, \code{stations}, is a
#' dataframe of monitors within a calculated radius of the
#' geographic center of the county specified by the FIPS code.
#' This will have the same dataframe format as the output from the
#' \code{isd_stations_search} function in the \code{rnoaa} package. The
#' second element, \code{radius}, gives the radius (in km) within which
#' stations were pulled from the county's geographic center.
#' Elements \code{lat_center} and \code{lon_center} are the latitude and
#' longitude of the county's population-weighted center.
#'
#' @examples
#' \dontrun{
#' fips_list <- isd_fips_stations(fips = "12086")
#' ids <- fips_list$stations
#' head(ids)
#' }
isd_fips_stations <- function(fips, verbose = FALSE) {
# population-weighted center for specified county
census_data <- countyweather::county_centers
loc_fips <- which(census_data$fips == fips)
lat_fips <- as.numeric(census_data[loc_fips, "latitude"])
lon_fips <- as.numeric(census_data[loc_fips, "longitude"])
# radius data for specified county
radius_data <- countyweather::county_radius
loc_rad <- which(radius_data == fips)
radius <- as.numeric(radius_data[loc_rad, "county_radius"])
if(verbose) {
message(paste0("Getting hourly weather monitors for ",
census_data[loc_fips, "name"]))
}
quiet_station_search <- purrr::quietly(rnoaa::isd_stations_search)
stations <- quiet_station_search(lat = lat_fips, lon = lon_fips,
radius = radius)$result
list <- list("stations" = stations,
"radius" = radius,
"lat_center" = lat_fips,
"lon_center" = lon_fips)
return(list)
}
#' Get hourly data for a single monitor.
#'
#' Wraps the \code{isd} function from the \code{rnoaa} package and provides
#' some additional data cleaning.
#'
#' @param usaf_code A character string with a six-digit USAF code for the
#' weather station.
#' @param wban_code A character string with a five-digit WBAN code for the
#' weather station.
#' @param year A four-digit numeric giving the year for which to pull data.
#' @param var A character vector listing the weather variables to pull. In
#' addition quality flag data, choices for main weather variables to pull
#' include \code{wind_direction}, \code{wind_speed},
#' \code{ceiling_height}, \code{visibility_distance}, \code{temperature},
#' \code{temperature_dewpoint} and \code{air_pressure}.
#'
#' @return This function returns the same type of dataframe as that returned
#' by the \code{isd} function from the \code{rnoaa} package, but with the
#' dataframe limited to the selected weather variables and cleaned a bit more.
#'
#' @references
#' For more information on this dataset, see
#' \url{ftp://ftp.ncdc.noaa.gov/pub/data/noaa/ish-format-document.pdf}.
#'
#' @examples
#' \dontrun{
#' ids <- isd_fips_stations(fips = "12086")$stations
#' kendall_station <- int_surface_data(usaf_code = ids$usaf[11],
#' wban_code = ids$wban[11],
#' year = 1992,
#' var = c("wind_speed", "temperature"))
#' head(kendall_station)
#' }
int_surface_data <- function(usaf_code, wban_code, year, var = "all") {
quiet_isd <- purrr::quietly(rnoaa::isd)
isd_df <- quiet_isd(usaf = usaf_code, wban = wban_code, year = year)
isd_df <- isd_df$result
# select variables if `var` isn't "all"
if (length(var) == 1 && var == "all") {
w_vars <- colnames(isd_df)
var <- w_vars[9:length(w_vars)]
}
# add date time (suggested by one of the rnoaa package vignette examples for isd())
isd_df$date_time <- lubridate::ymd_hm(sprintf("%s %s",
as.character(isd_df$date),
isd_df$time))
cols <- c("usaf_station", "wban_station", "date_time",
"latitude", "longitude")
subset_vars <- append(cols, var)
isd_df <- dplyr::select_(isd_df, .dots = subset_vars)
na_code_vars <- colnames(isd_df)[apply(isd_df, 2, max) %in%
c(99.9, 999, 999.9, 9999, 9999.9, 99999,
999999)]
for (na_var in na_code_vars) {
isd_df[[na_var]] <- as.numeric(isd_df[[na_var]])
}
if (length(na_code_vars) > 0) {
for (na_var in na_code_vars) {
isd_df[isd_df[ , na_var] == max(isd_df[ , na_var]), na_var] <- NA
}
}
return(isd_df)
}
#' Pull hourly data for multiple monitors.
#'
#' Pull all available data for all weather monitors within a calculated radius of
#' the geographic center of a U.S. county, based on the county's FIPS
#' code. The radius for each county is calculated using 2010 U.S. Census Land Area
#' data.
#'
#' @param fips A five-digit FIPS county code.
#' @param year A four-digit numeric giving the year for which to pull data.
#' @param var A character vector listing the weather variables to pull. The main
#' available weather variables are \code{wind_direction}, \code{wind_speed},
#' \code{ceiling_height}, \code{visibility_distance}, \code{temperature},
#' \code{temperature_dewpoint} and \code{air_pressure}.
#'
#' @return A list with five elements. \code{ids} is a dataframe of station
#' metadata for all avaiable stations in the given fips code. \code{df} is a
#' data frame with hourly weather data for the given variable(s) and date
#' range. \code{radius} is the calculated radius within which stations
#' were pulled from the county's geographic center. Elements
#' \code{lat_center} and \code{lon_center} are the latitude and longitude
#' of the county's geographic center.
#'
#' @examples
#' \dontrun{
#' fips_list <- isd_monitors_data(fips = "12086", year = 1992,
#' var = c("wind_speed", "temperature"))
#' stationdata <- fips_list$df
#' ggplot(stationdata, aes(x = date_time, y = wind_speed)) +
#' geom_point(alpha = 0.5, size = 0.2) +
#' facet_wrap(~ usaf_station, ncol = 1)
#' }
isd_monitors_data <- function(fips, year, var = "all") {
list <- isd_fips_stations(fips)
ids <- list$stations
radius <- list$radius
lat_center <- list$lat_center
lon_center <- list$lon_center
safe_int <- purrr::safely(int_surface_data)
mult_stations <- mapply(safe_int, usaf_code = ids$usaf,
wban_code = ids$wban,
year = year, var = list(var = var))
good_st <- sapply(mult_stations, function(x) !is.null(dim(x)))
if (sum(good_st) > 0) {
st_out_list <- lapply(which(good_st), function(x) mult_stations[[x]])
st_out_list <- lapply(st_out_list, function(x){
x$usaf_station <- as.numeric(x$usaf_station)
x$wban_station <- as.numeric(x$wban_station)
cols <- colnames(st_out_list[[1]])
# fixes for Error: Can not automatically convert from numeric to
# character in column "wind_speed".
if ("wind_direction" %in% cols) {
x$wind_direction <- as.numeric(x$wind_direction)
}
if ("ceiling_height" %in% cols) {
x$ceiling_height <- as.numeric(x$ceiling_height)
}
if ("visibility_distance" %in% cols) {
x$visibility_distance <- as.numeric(x$visibility_distance)
}
if ("temperature" %in% cols) {
x$temperature <- as.numeric(x$temperature)
}
if ("temperature_dewpoint" %in% cols) {
x$temperature_dewpoint <- as.numeric(x$temperature_dewpoint)
}
if ("air_pressure" %in% cols) {
x$air_pressure <- as.numeric(x$air_pressure)
}
if ("GF1_lowest_cloud_base_height" %in% cols) {
x$GF1_lowest_cloud_base_height <- as.numeric(x$GF1_lowest_cloud_base_height)
}
if ("wind_speed" %in% cols) {
x$wind_speed <- as.numeric(x$wind_speed)
}
return(x)
}
)
st_out_df <- dplyr::bind_rows(st_out_list)
} else {
stop("None of the stations had available data.")
}
list <- list("df" = st_out_df,
"ids" = ids,
"radius" = radius,
"lat_center" = lat_center,
"lon_center" = lon_center)
return(list)
}
#' Average hourly weather data across multiple stations.
#'
#' Returns a dataframe with hourly weather averaged across
#' stations, as well as columns showing the number of stations contributing to
#' average for each variable and each hour.
#'
#' @param hourly_data A dataframe with hourly weather observations. This
#' dataframe is returned from the \code{df} element of the function
#' \code{isd_monitors_data}.
#'
#' @importFrom dplyr %>%
ave_hourly <- function(hourly_data) {
df <- dplyr::mutate_(hourly_data, id = ~ paste0(usaf_station, wban_station))
df <- dplyr::select_(df, .dots = c("-usaf_station", "-wban_station",
"-latitude", "-longitude"))
all_cols <- colnames(df)
not_vars <- c("date_time", "id")
g_cols <- all_cols[!all_cols %in% not_vars]
averaged_data <- tidyr::gather_(data = df, key_col = "key",
value_col = "value", gather_cols = g_cols) %>%
dplyr::group_by_(~ date_time, ~ key) %>%
dplyr::summarize_(mean = ~ mean(value, na.rm = TRUE)) %>%
tidyr::spread_(key_col = "key", value_col = "mean")
n_reporting <- tidyr::gather_(data = df, key_col = "key", value_col = "value",
gather_cols = g_cols) %>%
dplyr::group_by_(~ date_time, ~ key) %>%
dplyr::summarise_(n_reporting = ~ sum(!is.na(value))) %>%
dplyr::mutate_(key = ~ paste(key, "reporting", sep = "_")) %>%
tidyr::spread_(key_col = "key", value_col = "n_reporting")
averaged_data <- dplyr::left_join(averaged_data, n_reporting,
by = "date_time")
averaged_data <- dplyr::ungroup(averaged_data)
averaged_data <- as.data.frame(averaged_data)
return(averaged_data)
}
#' Filter NOAA ISD stations based on "coverage" requirements, and calculate
#' coverage and statistical information for each station-variable combination.
#'
#' Filters available weather stations based on a specified
#' minimum coverage (i.e., percent non-missing hourly observations). Weather
#' stations with non-missing data for fewer days than specified by
#' \code{coverage} will be excluded from the county average.
#'
#' @param fips A character string giving the five-digit U.S. FIPS
#' county code of the county for which the user wants to pull weather data.
#' @param hourly_data A dataframe as returned by the \code{df} element from an
#' \code{isd_monitors_data} call.
#' @param coverage A numeric value in the range of 0 to 1 that specifies the
#' desired percentage coverage for each weather variable (i.e., what percent
#' of each weather variable must be non-missing to include the data from a
#' station when calculating hourly values averaged across stations).
#'
#' @return A list with two elements: \code{df} and \code{stations}. \code{df} is
#' a dataframe of hourly weather data filtered based on the specfified
#' coverage, as well as columns (\code{"var"_reporting}) for each weather
#' variable showing the number of stations contributing to the average for that
#' variable for each hour. The second element, \code{stations}, is a dataframe
#' giving statistical information for stations that meet the specified coverage
#' requirements. The column \code{station} gives the station id (USAF and
#' WBAN identification numbers pasted together, separated by "-"). Note: One
#' of these identification ids is sometimes missing. For example, a value in
#' \code{station} might be \code{722029-NA}. The column \code{var}
#' gives the weather variable associated with the row of statistical values
#' for each station and variable combination. \code{calc_coverage} gives the
#' percentage coverage for each station-weather variable combination. These
#' values will all be greater than or equal to the specified \code{coverage}
#' value. \code{standard_dev} gives the standard deviation of values for each
#' station-weather variable combination. \code{max} and \code{min} give the
#' minimum and maximum values, and \code{range} gives the range of values in
#' each station-weather variable combination. These last four statistical
#' calculations (\code{standard_dev}, \code{max}, \code{min}, and
#' \code{range}) are only included for the seven core hourly weather variables,
#' which include \code{"wind_direction"}, \code{"wind_speed"},
#' \code{"ceiling_height"}, \code{"visibility_distance"}, \code{"temperature"},
#' \code{"temperature_dewpoint"}, and \code{"air_pressure"}. (The values of
#' these columns are set to \code{NA} for other variables, such as quality
#' flag data.)
#'
#' @importFrom dplyr %>%
filter_hourly <- function(fips, hourly_data, coverage = NULL) {
if (is.null(coverage)) {
coverage <- 0
}
all_cols <- colnames(hourly_data)
not_vars <- c("usaf_station", "wban_station", "date_time", "latitude",
"longitude")
g_cols <- all_cols[!all_cols %in% not_vars]
group_cols <- c("station", "key")
# suppressing "NAs introduced by coercion" warning message
df <- suppressWarnings(hourly_data %>%
tidyr::unite_(col = "station", from = c("usaf_station", "wban_station"),
sep = "-") %>%
dplyr::select_(quote(-date_time), quote(-latitude), quote(-longitude)) %>%
tidyr::gather_(key_col = "key", value_col = "value", gather_cols = g_cols) %>%
dplyr::group_by_(.dots = group_cols) %>%
dplyr::mutate_(value = ~ as.numeric(value)) %>%
dplyr::summarize_(calc_coverage = ~ mean(!is.na(value)),
standard_dev = ~ sd(value, na.rm = TRUE),
min = ~ min(value, na.rm = TRUE),
max = ~ max(value, na.rm = TRUE),
range = ~ max - min))
weather_vars <- c("wind_direction", "wind_speed", "ceiling_height",
"visibility_distance", "temperature",
"temperature_dewpoint", "air_pressure")
flag_vars <- df[!df$key %in% weather_vars, "key"]$key
if (length(flag_vars) != 0) {
for (i in 1:length(flag_vars)) {
df[which(df$key == flag_vars[i]), ]$standard_dev <- NA
df[which(df$key == flag_vars[i]), ]$min <- NA
df[which(df$key == flag_vars[i]), ]$max <- NA
df[which(df$key == flag_vars[i]), ]$range <- NA
}
}
group_cols <- c("date_time", "key")
test <- df %>%
dplyr::filter_(~ calc_coverage >= coverage)
if (nrow(test) == 0) {
stop(paste0("Unable to pull weather data for FIPS code ", fips,
" for the specified percent coverage, year(s), and/or",
" weather variables."))
}
filtered <- hourly_data %>%
tidyr::unite_(col = "station", from = c("usaf_station", "wban_station"),
sep = "-") %>%
dplyr::select_(quote(-latitude), quote(-longitude)) %>%
tidyr::gather_(key_col = "key", value_col = "value", gather_cols = g_cols) %>%
dplyr::left_join(df, by = c("station", "key")) %>%
dplyr::filter_(~ calc_coverage >= coverage) %>%
dplyr::group_by_(.dots = group_cols)
stations <- filtered %>%
dplyr::ungroup() %>%
dplyr::select_(quote(-date_time), quote(-value)) %>%
dplyr::distinct()
colnames(stations)[2] <- "var"
df2 <- filtered %>%
dplyr::summarize_(n_reporting = ~ sum(!is.na(value))) %>%
dplyr::mutate_(key = ~ paste(key, "reporting", sep = "_")) %>%
tidyr::spread_(key_col = "key", value_col = "n_reporting")
df3 <- filtered %>%
dplyr::summarize_(value = ~ mean(as.numeric(value), na.rm = TRUE)) %>%
tidyr::spread_(key_col = "key", value_col = "value")
out <- dplyr::full_join(df3, df2, by = "date_time")
list <- list("df" = out,
"stations" = stations)
return(list)
}
#' Plot hourly weather stations for a particular county.
#'
#' Produces a \code{ggplot} object mapping stations that contribute
#' to the weather data returned by \code{hourly_data}.
#'
#' @inheritParams daily_stationmap
#' @param hourly_data A list returned from the function \code{hourly_df}.
#'
#' @return A \code{ggplot} object mapping all weather stations for a particular
#' county that satisfy the conditions present in \code{hourly_df}'s
#' arguments (year(s), coverage, and/or weather variables). Because hourly
#' data is pulled by radius from each county's geograph center, this plot
#' includes the calculated radius from which stations are pulled for that
#' county. This radius
#' is calculated for each county using 2010 U.S. Census Land Area data.
#' 2011 U.S. Census cartographic boundary shapefiles are used to proved
#' county outlines, included on this plot as well. Note: Because stations
#' are pulled within a radius from the county's geographic center, depending
#' on the shape of the county, weather stations from outside the county's
#' boundaries may sometimes be providing data for that county and some
#' weather stations within the county may not be included.
#'
#' @examples
#' \dontrun{
#' hourly_data <- hourly_df(fips = "12086", year = 1992,
#' var = c("wind_speed", "temperature"))
#' hourly_stationmap("12086", hourly_data)
#' }
#'
#' @importFrom dplyr %>%
hourly_stationmap <- function(fips, hourly_data, point_color = "firebrick",
fill_color = "lightgrey",
point_size = 2, station_label = FALSE) {
census_data <- countyweather::county_centers
row_num <- which(grepl(fips, census_data$fips))
title <- as.character(census_data[row_num, "name"])
loc_census <- census_data %>%
dplyr::rename(fc = fips) %>%
dplyr::filter(fc == fips)
suppressMessages(
county_sf <- tigris::counties(state = loc_census$state,
cb = T,
class = "sf") %>%
dplyr::filter(COUNTYFP == stringr::str_sub(fips, 3, 5))
)
map <- ggplot2::ggplot() +
ggplot2::geom_sf(data = county_sf, color = fill_color)
# # for ggmap lat/lon
# loc_fips <- which(census_data$fips == fips)
# lat_fips <- as.numeric(census_data[loc_fips, "latitude"])
# lon_fips <- as.numeric(census_data[loc_fips, "longitude"])
#
# state <- stringi::stri_sub(fips, 1, 2)
# county <- stringi::stri_sub(fips, 3)
#
# shp <- tigris::counties(state, cb = TRUE)
# county_shp <- shp[shp$COUNTYFP == county, ]
#
# # convert to raster so that we can add geom_raster() (which fixes the
# # geom_polygons island problem)
# r <- raster::raster(raster::extent(county_shp))
# raster::res(r) <- 0.001
# raster::projection(r) <- sp::proj4string(county_shp)
# r <- raster::rasterize(county_shp, r)
# rdf <- data.frame(raster::rasterToPoints(r))
#
# # use range of raster object to figure out what zoom to use in ggmap
# x_range <- r@extent[2] - r@extent[1]
# y_range <- r@extent[4] - r@extent[3]
#
# # limits were calculated by finding out the x and y limits of a ggmap at each
# # zoom, then accounting for the extra space we want to add around county
# # shapes.
#
# if (x_range > y_range) {
# if (x_range <= 0.1997) {
#
# zoom <- 12
#
# xmin <- r@extent[1] - 0.01
# xmax <- r@extent[2] + 0.01
# ymin <- r@extent[3] - 0.01
# ymax <- r@extent[4] + 0.01
# }
#
# if (x_range <= 0.3894 & x_range > 0.1997) {
#
# zoom <- 11
#
# xmin <- r@extent[1] - 0.025
# xmax <- r@extent[2] + 0.025
# ymin <- r@extent[3] - 0.025
# ymax <- r@extent[4] + 0.025
# }
#
# if (x_range <= 0.7989 & x_range > 0.3894) {
#
# zoom <- 10
#
# xmin <- r@extent[1] - 0.04
# xmax <- r@extent[2] + 0.04
# ymin <- r@extent[3] - 0.04
# ymax <- r@extent[4] + 0.04
# }
#
# if (x_range <= 1.6378 & x_range > 0.7989) {
#
# zoom <- 9
#
# xmin <- r@extent[1] - 0.06
# xmax <- r@extent[2] + 0.06
# ymin <- r@extent[3] - 0.06
# ymax <- r@extent[4] + 0.06
# }
#
# if (x_range <= 3.3556 & x_range > 1.6378) {
#
# zoom <- 8
#
# xmin <- r@extent[1] - 0.08
# xmax <- r@extent[2] + 0.08
# ymin <- r@extent[3] - 0.08
# ymax <- r@extent[4] + 0.08
# }
#
# if (x_range <= 6.8313 & x_range > 3.3556) {
#
# zoom <- 7
#
# xmin <- r@extent[1] - 0.1
# xmax <- r@extent[2] + 0.1
# ymin <- r@extent[3] - 0.1
# ymax <- r@extent[4] + 0.1
# }
#
# } else {
# if (y_range <= 0.1616) {
#
# zoom <- 12
#
# xmin <- r@extent[1] - 0.01
# xmax <- r@extent[2] + 0.01
# ymin <- r@extent[3] - 0.01
# ymax <- r@extent[4] + 0.01
# }
#
# if (y_range <= 0.3135 & y_range > 0.1616) {
#
# zoom <- 11
#
# xmin <- r@extent[1] - 0.025
# xmax <- r@extent[2] + 0.025
# ymin <- r@extent[3] - 0.025
# ymax <- r@extent[4] + 0.025
# }
#
# if (y_range <= 0.647 & y_range > 0.3135) {
#
# zoom <- 10
#
# xmin <- r@extent[1] - 0.04
# xmax <- r@extent[2] + 0.04
# ymin <- r@extent[3] - 0.04
# ymax <- r@extent[4] + 0.04
# }
#
# if (y_range <= 1.3302 & y_range > 0.647) {
#
# zoom <- 9
#
# xmin <- r@extent[1] - 0.06
# xmax <- r@extent[2] + 0.06
# ymin <- r@extent[3] - 0.06
# ymax <- r@extent[4] + 0.06
# }
#
# if (y_range <= 2.7478 & y_range > 1.3302) {
#
# zoom <- 8
#
# xmin <- r@extent[1] - 0.08
# xmax <- r@extent[2] + 0.08
# ymin <- r@extent[3] - 0.08
# ymax <- r@extent[4] + 0.08
# }
#
# if (y_range <= 2.8313 & y_range > 2.7478) {
#
# zoom <- 7
#
# xmin <- r@extent[1] - 0.1
# xmax <- r@extent[2] + 0.1
# ymin <- r@extent[3] - 0.1
# ymax <- r@extent[4] + 0.1
# }
# }
#
# county <- suppressMessages(ggmap::get_map(c(lon_fips, lat_fips),
# zoom = zoom, color = "bw"))
#
# gg_map <- ggmap::ggmap(county)
#
# # limits of a ggmap depend on your center lat/lon (the limits
# # above won't work exactly for every county)
# map_ymin <- gg_map$data$lat[1]
# map_ymax <- gg_map$data$lat[3]
# map_xmin <- gg_map$data$lon[1]
# map_xmax <- gg_map$data$lon[2]
#
# if ((ymin < map_ymin) | (ymax > map_ymax) | (xmin < map_xmin) |
# (xmax > map_xmax)) {
# zoom <- zoom - 1
# county <- suppressMessages(ggmap::get_map(c(lon_fips, lat_fips),
# zoom = zoom, color = "bw"))
# gg_map <- ggmap::ggmap(county)
# }
#
# map <- gg_map +
# ggplot2::coord_fixed(xlim = c(xmin, xmax),
# ylim = c(ymin, ymax)) +
# ggplot2::geom_raster(mapping = ggplot2::aes_(~x, ~y),
# data = rdf, fill = "yellow",
# alpha = 0.2,
# inherit.aes = FALSE,
# na.rm = TRUE)
#
# r <- hourly_data$radius
# x_c <- hourly_data$lon_center
# y_c <- hourly_data$lat_center
#
# df <- geosphere::destPoint(p = c(x_c, y_c),
# b = 0:360,
# d = r * 1000)
# df <- as.data.frame(df)
# colnames(df) <- c("x_v", "y_v")
station_df <- hourly_data$station_df %>%
dplyr::tbl_df() %>%
dplyr::filter_(~ !duplicated(station)) %>%
dplyr::arrange_(~ dplyr::desc(lat)) %>%
dplyr::rename(name = station_name)
name_levels <- unique(station_df$name)
station_df <- station_df %>%
dplyr::mutate_(name = ~ factor(name, levels = name_levels))
if (station_label == TRUE) {
map_out <- map +
ggplot2::geom_point(data = station_df,
ggplot2::aes_(~ lon, ~ lat,
fill = ~ name),
colour = "black",
size = point_size,
shape = 21) +
ggplot2::ggtitle(title) +
ggplot2::theme_void() +
ggplot2::theme(legend.title = ggplot2::element_blank())
} else {
map_out <- map +
ggplot2::geom_point(data = station_df,
ggplot2::aes_(~ lon, ~ lat),
colour = point_color,
size = point_size) +
ggplot2::theme_void() +
ggplot2::ggtitle(title)
}
return(map_out)
}
|
# ============================================================================
# Functions for a Negative Binomial to transform (mean,variance)<->(size,prob)
# ============================================================================
dnbinom.size <- function(mean, variance) {
return(mean^2 / (variance - mean))
}
dnbinom.prob <- function(mean, variance) {
return(mean/variance)
}
dnbinom.mean <- function(size, prob) {
return(size/prob - size)
}
dnbinom.variance <- function(size, prob) {
return( (size - prob*size) / prob^2 )
}
dgeom.prob <- function(mean) {
return( 1/(1+mean) )
}
dgeom.mean <- function(prob) {
return( (1-prob)/prob )
}
dgeom.variance <- function(prob) {
return( (1-prob)/prob^2 )
}
dbinom.size <- function(mean, variance) {
return( mean^2/(mean-variance) )
}
dbinom.prob <- function(mean, variance) {
return( (mean-variance)/mean )
}
dbinom.mean <- function(size, prob) {
return( size*prob )
}
dbinom.variance <- function(size, prob) {
return( size*prob * (1-prob) )
}
|
/R/dnbinom.R
|
no_license
|
ataudt/aneufinder
|
R
| false
| false
| 1,019
|
r
|
# ============================================================================
# Functions for a Negative Binomial to transform (mean,variance)<->(size,prob)
# ============================================================================
dnbinom.size <- function(mean, variance) {
return(mean^2 / (variance - mean))
}
dnbinom.prob <- function(mean, variance) {
return(mean/variance)
}
dnbinom.mean <- function(size, prob) {
return(size/prob - size)
}
dnbinom.variance <- function(size, prob) {
return( (size - prob*size) / prob^2 )
}
dgeom.prob <- function(mean) {
return( 1/(1+mean) )
}
dgeom.mean <- function(prob) {
return( (1-prob)/prob )
}
dgeom.variance <- function(prob) {
return( (1-prob)/prob^2 )
}
dbinom.size <- function(mean, variance) {
return( mean^2/(mean-variance) )
}
dbinom.prob <- function(mean, variance) {
return( (mean-variance)/mean )
}
dbinom.mean <- function(size, prob) {
return( size*prob )
}
dbinom.variance <- function(size, prob) {
return( size*prob * (1-prob) )
}
|
##' @title LINKAGES kill function
##' @author Ann Raiho
##'
##' @param max.ind maximum number of individuals
##' @param nspec number of species
##' @param ntrees number of trees of each species
##' @param slta parameter to calculate crown area from diameter
##' @param sltb parameter to calculate crown area from diameter
##' @param agemx max age
##' @param sprtmn minimum diameter for a stump to sprout
##' @param sprtmx maximum diameter for a stump to sprout
##' @param tl leaf litter quality class
##' @param rtst root-shoot ratio for each species
##' @param fwt leaf weight per unit crown area
##' @param frt foliage retention time
##' @param ncohrt number of cohorts
##' @param nogro flags slow growing individuals
##' @param iage age of each individual
##' @param dbh diameter of each individual
##' @param ksprt flags if stump can sprout
##'
##' @description KILL KILLS TREES BY AGE DEPENDENT MORTALITY (ONLY 1%
##' REACH MAXIMUM AGE) AND AGE INDEPENDENT MORTALITY (PROBABILITY OF
##' SURVIVING 10 CONSECUTIVE YEARS OF SLOW GROWTH (SEE GROW) = 1%).
##' DECISIONS ON WHETHER OR NOT TO KILL A TREE ARE PARTLY BASED ON
##' RANDOM NUMBERS SUPPLIED BY rand.
##' KILL ALSO CALCULATES LITTER AMOUNTS, WHICH ARE DECAYED IN
##' SUBROUTINE DECOMP.
##'
##' @return dbh diameter of each individual
##' @return ntrees number of trees of each species
##' @return iage age of each individual
##' @return nogro flags individuals growing slowly
##' @return ncohrt number of cohorts
##' @return tyl total yearly litter,
##' @return ksprt flags stumps that could sprout
##'
kill <- function(nspec, ntrees,slta,sltb,dbh,agemx,ksprt,sprtmn,sprtmx,iage,
nogro,tl,rtst,fwt,max.ind,frt){
knt = 0
nu = 0
#initialize litter
tyl = matrix(0,1,20)
#initialize plot basal area
ba = 0
#begin main killing loop
for(i in 1:nspec){
if(ntrees[i]==0) next
nl = knt + 1
nu = ntrees[i] + knt
for(k in nl:nu){
#calculate leaf production (tree/ha)
folw = ((slta[i] + sltb[i] * dbh[k]) / 2) ^ 2 * 3.14 * fwt[i] * .000012
#calculate basal area
ba = ba + .0314 * (dbh[k]*.5) ^ 2
#kill trees based on probability that only 1% reach max age
yfl = runif(1,0,1) # pexp(agemx[i],1/(agemx[i]/2)) 4.605/agemx[i] iage[k] > runif(1,(agemx[i]-100),agemx[i])
if(yfl <= 4.605/agemx[i] | ntrees[i] > 1000) {
ntrees[i] = ntrees[i] - 1
#check to see if dead tree can stump sprout increment skprt if tree can sprout
if(dbh[k]>sprtmn[i] & dbh[k]<sprtmx[i]) ksprt[i] = ksprt[i] + 1
#calculate woody litter in t/ha
bd = .60
if(dbh[k] <= .1) tyl[14] = tyl[14] + bd * (.00143 * dbh[k] ^ 2.393)
if(dbh[k] > .1) tyl[15] = tyl[15] + bd * (.00143 * dbh[k] ^ 2.393)
#flag dead trees
dbh[k] = -1
} else {
if(nogro[k]<=-2){
yfl = runif(1,0,1)
if(yfl <= .368){
ntrees[i] = ntrees[i] - 1
#check to see if dead tree can sump sprout increment skprt if tree can sprout
if(dbh[k]>sprtmn[i] & dbh[k]<sprtmx[i]) ksprt[i] = ksprt[i] + 1
#calculate woody litter in t/ha
bd = .60
if(dbh[k]<=10) tyl[14] = tyl[14] + bd * (.00143 * dbh[k] ^ 2.393)
if(dbh[k]>10) tyl[15] = tyl[15] + bd * (.00143 * dbh[k] ^ 2.393)
#flag dead trees
dbh[k] = -1
}
}
}
#calculate leaf litter by quality class in t/ha if the tree is slow growing but didn't di, leaf litter is halved
#if the tree died, total leaf biomass is returned to the soil
L = tl[i]
if(nogro[k] == -2 & dbh[k] > -1) folw = folw*.5
if(dbh[k] < 0) folw = folw * frt[i]
tyl[L] = tyl[L] + folw
#calculate root litter (t/ha)
tyl[13] = tyl[13] + 1.3 * folw * rtst[i]
}
knt = nu
}
#calculate total leaf litter (t/ha)
tyl[17] = tyl[17] + sum(tyl[1:12])
#calculate twig litter in t/ha
tyl[16] = ba/333
#calculate total litter (t/ha)
tyl[18] = sum(tyl[13:17])
#rewrite diameters and ages to eliminate dead trees
k = 0
ntot = 0
for(i in 1:max.ind){
if(dbh[i]==0) {
ntot = k
break
}
if(dbh[i]<0){
next
}
k = k+1
dbh[k] = dbh[i]
iage[k] = iage[i]
nogro[k] = nogro[i]
ntot = k
}
if(k!=nu){
ntot1 = k+1
if(ntot1 > max.ind) print("too many trees -- kill")
#eliminate dead trees
dbh[ntot1:nu] = 0
iage[ntot1:nu] = 0
nogro[ntot1:nu] = 0
}
# if(length(which(dbh>0)) < sum(ntrees)){
# ntrees[4] <- ntrees[4] - length(ntot1:nu) + 1
#
# }
# if(ntrees[4]<0) ntrees[4]=0cd
if(length(which(dbh>0)) != sum(ntrees)) browser()
return(list(ntrees = ntrees, dbh = dbh, iage = iage, nogro = nogro,
tyl = tyl, ksprt = ksprt))
}
#
#
#
#
#
# kill.opt <- function(nspec, ntrees,slta,sltb,dbh,agemx,ksprt,sprtmn,sprtmx,iage,
# nogro,tl,rtst,fwt,max.ind,frt){
# knt = 0
# nu = 0
# #initialize litter
# tyl = matrix(0,1,20)
#
# #initialize plot basal area
# ba = 0
#
# #begin main killing loop
# for(i in 1:nspec){
# if(ntrees[i]==0) next
# nl = knt + 1
# nu = ntrees[i] + knt
# #for(k in nl:nu){
# #calculate leaf production (tree/ha)
# folw = ((slta[i] + sltb[i] * dbh[nl:nu]) / 2) ^ 2 * 3.14 * fwt[i] * .000012
#
# #calculate basal area
# ba = ba + .0314 * (dbh[nl:nu]*.5) ^ 2
#
# #kill trees based on probability that only 1% reach max age
# yfl = runif(length(nl:nu),0,1) # pexp(agemx[i],1/(agemx[i]/2)) 4.605/agemx[i] iage[k] > runif(1,(agemx[i]-100),agemx[i])
# # if(yfl <= 4.605/agemx[i]) {
# ntrees[i] = ntrees[i] - length(which(yfl <= 4.605/agemx[i]))
#
# #check to see if dead tree can stump sprout increment skprt if tree can sprout
# ksprt.vec = ifelse(dbh[nl:nu]>sprtmn[i] & dbh[nl:nu]<sprtmx[i], ksprt[i] + 1,ksprt[i])
#
# #calculate woody litter in t/ha
# bd = .60
# tyl[14] = sum(tyl[14] + bd * (.00143 * dbh[dbh<=.1] ^ 2.393))
# tyl[15] = sum(tyl[15] + bd * (.00143 * dbh[dbh>.1] ^ 2.393))
#
# #flag dead trees
# dbh[which(yfl <= 4.605/agemx[i])] = -1
# #} else {
#
# if(nogro[nu:nl]<=-2){
# yfl = runif(1,0,1)
# if(yfl <= .368){
# ntrees[i] = ntrees[i] - 1
#
# #check to see if dead tree can sump sprout increment skprt if tree can sprout
# if(dbh[k]>sprtmn[i] & dbh[k]<sprtmx[i]) ksprt[i] = ksprt[i] + 1
#
# #calculate woody litter in t/ha
# bd = .60
# if(dbh[k]<=10) tyl[14] = tyl[14] + bd * (.00143 * dbh[k] ^ 2.393)
# if(dbh[k]>10) tyl[15] = tyl[15] + bd * (.00143 * dbh[k] ^ 2.393)
#
# #flag dead trees
# dbh[k] = -1
# }
# }
#
# }
# #calculate leaf litter by quality class in t/ha if the tree is slow growing but didn't di, leaf litter is halved
# #if the tree died, total leaf biomass is returned to the soil
# L = tl[i]
# if(nogro[k] == -2 & dbh[k] > -1) folw = folw*.5
# if(dbh[k] < 0) folw = folw * frt[i]
# tyl[L] = tyl[L] + folw
# #calculate root litter (t/ha)
# tyl[13] = tyl[13] + 1.3 * folw * rtst[i]
# }
# knt = nu
# }
# #calculate total leaf litter (t/ha)
# tyl[17] = tyl[17] + sum(tyl[1:12])
#
# #calculate twig litter in t/ha
# tyl[16] = ba/333
#
# #calculate total litter (t/ha)
# tyl[18] = sum(tyl[13:17])
#
# #rewrite diameters and ages to eliminate dead trees
# k = 0
# ntot = 0
# for(i in 1:max.ind){
# if(dbh[i]==0) {
# ntot = k
# break
# }
# if(dbh[i]<0){
# next
# }
# k = k+1
# dbh[k] = dbh[i]
# iage[k] = iage[i]
# nogro[k] = nogro[i]
# ntot = k
# }
#
# if(k!=nu){
# ntot1 = k+1
# if(ntot1 > max.ind) print("too many trees -- kill")
#
# #eliminate dead trees
# dbh[ntot1:nu] = 0
# iage[ntot1:nu] = 0
# nogro[ntot1:nu] = 0
# }
#
# # if(length(which(dbh>0)) < sum(ntrees)){
# # ntrees[4] <- ntrees[4] - length(ntot1:nu) + 1
# #
# # }
# # if(ntrees[4]<0) ntrees[4]=0cd
# if(length(which(dbh>0)) != sum(ntrees)) browser()
#
# return(list(ntrees = ntrees, dbh = dbh, iage = iage, nogro = nogro,
# tyl = tyl, ksprt = ksprt))
#
# }
|
/R/kill.R
|
no_license
|
sativa/linkages_package
|
R
| false
| false
| 8,402
|
r
|
##' @title LINKAGES kill function
##' @author Ann Raiho
##'
##' @param max.ind maximum number of individuals
##' @param nspec number of species
##' @param ntrees number of trees of each species
##' @param slta parameter to calculate crown area from diameter
##' @param sltb parameter to calculate crown area from diameter
##' @param agemx max age
##' @param sprtmn minimum diameter for a stump to sprout
##' @param sprtmx maximum diameter for a stump to sprout
##' @param tl leaf litter quality class
##' @param rtst root-shoot ratio for each species
##' @param fwt leaf weight per unit crown area
##' @param frt foliage retention time
##' @param ncohrt number of cohorts
##' @param nogro flags slow growing individuals
##' @param iage age of each individual
##' @param dbh diameter of each individual
##' @param ksprt flags if stump can sprout
##'
##' @description KILL KILLS TREES BY AGE DEPENDENT MORTALITY (ONLY 1%
##' REACH MAXIMUM AGE) AND AGE INDEPENDENT MORTALITY (PROBABILITY OF
##' SURVIVING 10 CONSECUTIVE YEARS OF SLOW GROWTH (SEE GROW) = 1%).
##' DECISIONS ON WHETHER OR NOT TO KILL A TREE ARE PARTLY BASED ON
##' RANDOM NUMBERS SUPPLIED BY rand.
##' KILL ALSO CALCULATES LITTER AMOUNTS, WHICH ARE DECAYED IN
##' SUBROUTINE DECOMP.
##'
##' @return dbh diameter of each individual
##' @return ntrees number of trees of each species
##' @return iage age of each individual
##' @return nogro flags individuals growing slowly
##' @return ncohrt number of cohorts
##' @return tyl total yearly litter,
##' @return ksprt flags stumps that could sprout
##'
kill <- function(nspec, ntrees,slta,sltb,dbh,agemx,ksprt,sprtmn,sprtmx,iage,
nogro,tl,rtst,fwt,max.ind,frt){
knt = 0
nu = 0
#initialize litter
tyl = matrix(0,1,20)
#initialize plot basal area
ba = 0
#begin main killing loop
for(i in 1:nspec){
if(ntrees[i]==0) next
nl = knt + 1
nu = ntrees[i] + knt
for(k in nl:nu){
#calculate leaf production (tree/ha)
folw = ((slta[i] + sltb[i] * dbh[k]) / 2) ^ 2 * 3.14 * fwt[i] * .000012
#calculate basal area
ba = ba + .0314 * (dbh[k]*.5) ^ 2
#kill trees based on probability that only 1% reach max age
yfl = runif(1,0,1) # pexp(agemx[i],1/(agemx[i]/2)) 4.605/agemx[i] iage[k] > runif(1,(agemx[i]-100),agemx[i])
if(yfl <= 4.605/agemx[i] | ntrees[i] > 1000) {
ntrees[i] = ntrees[i] - 1
#check to see if dead tree can stump sprout increment skprt if tree can sprout
if(dbh[k]>sprtmn[i] & dbh[k]<sprtmx[i]) ksprt[i] = ksprt[i] + 1
#calculate woody litter in t/ha
bd = .60
if(dbh[k] <= .1) tyl[14] = tyl[14] + bd * (.00143 * dbh[k] ^ 2.393)
if(dbh[k] > .1) tyl[15] = tyl[15] + bd * (.00143 * dbh[k] ^ 2.393)
#flag dead trees
dbh[k] = -1
} else {
if(nogro[k]<=-2){
yfl = runif(1,0,1)
if(yfl <= .368){
ntrees[i] = ntrees[i] - 1
#check to see if dead tree can sump sprout increment skprt if tree can sprout
if(dbh[k]>sprtmn[i] & dbh[k]<sprtmx[i]) ksprt[i] = ksprt[i] + 1
#calculate woody litter in t/ha
bd = .60
if(dbh[k]<=10) tyl[14] = tyl[14] + bd * (.00143 * dbh[k] ^ 2.393)
if(dbh[k]>10) tyl[15] = tyl[15] + bd * (.00143 * dbh[k] ^ 2.393)
#flag dead trees
dbh[k] = -1
}
}
}
#calculate leaf litter by quality class in t/ha if the tree is slow growing but didn't di, leaf litter is halved
#if the tree died, total leaf biomass is returned to the soil
L = tl[i]
if(nogro[k] == -2 & dbh[k] > -1) folw = folw*.5
if(dbh[k] < 0) folw = folw * frt[i]
tyl[L] = tyl[L] + folw
#calculate root litter (t/ha)
tyl[13] = tyl[13] + 1.3 * folw * rtst[i]
}
knt = nu
}
#calculate total leaf litter (t/ha)
tyl[17] = tyl[17] + sum(tyl[1:12])
#calculate twig litter in t/ha
tyl[16] = ba/333
#calculate total litter (t/ha)
tyl[18] = sum(tyl[13:17])
#rewrite diameters and ages to eliminate dead trees
k = 0
ntot = 0
for(i in 1:max.ind){
if(dbh[i]==0) {
ntot = k
break
}
if(dbh[i]<0){
next
}
k = k+1
dbh[k] = dbh[i]
iage[k] = iage[i]
nogro[k] = nogro[i]
ntot = k
}
if(k!=nu){
ntot1 = k+1
if(ntot1 > max.ind) print("too many trees -- kill")
#eliminate dead trees
dbh[ntot1:nu] = 0
iage[ntot1:nu] = 0
nogro[ntot1:nu] = 0
}
# if(length(which(dbh>0)) < sum(ntrees)){
# ntrees[4] <- ntrees[4] - length(ntot1:nu) + 1
#
# }
# if(ntrees[4]<0) ntrees[4]=0cd
if(length(which(dbh>0)) != sum(ntrees)) browser()
return(list(ntrees = ntrees, dbh = dbh, iage = iage, nogro = nogro,
tyl = tyl, ksprt = ksprt))
}
#
#
#
#
#
# kill.opt <- function(nspec, ntrees,slta,sltb,dbh,agemx,ksprt,sprtmn,sprtmx,iage,
# nogro,tl,rtst,fwt,max.ind,frt){
# knt = 0
# nu = 0
# #initialize litter
# tyl = matrix(0,1,20)
#
# #initialize plot basal area
# ba = 0
#
# #begin main killing loop
# for(i in 1:nspec){
# if(ntrees[i]==0) next
# nl = knt + 1
# nu = ntrees[i] + knt
# #for(k in nl:nu){
# #calculate leaf production (tree/ha)
# folw = ((slta[i] + sltb[i] * dbh[nl:nu]) / 2) ^ 2 * 3.14 * fwt[i] * .000012
#
# #calculate basal area
# ba = ba + .0314 * (dbh[nl:nu]*.5) ^ 2
#
# #kill trees based on probability that only 1% reach max age
# yfl = runif(length(nl:nu),0,1) # pexp(agemx[i],1/(agemx[i]/2)) 4.605/agemx[i] iage[k] > runif(1,(agemx[i]-100),agemx[i])
# # if(yfl <= 4.605/agemx[i]) {
# ntrees[i] = ntrees[i] - length(which(yfl <= 4.605/agemx[i]))
#
# #check to see if dead tree can stump sprout increment skprt if tree can sprout
# ksprt.vec = ifelse(dbh[nl:nu]>sprtmn[i] & dbh[nl:nu]<sprtmx[i], ksprt[i] + 1,ksprt[i])
#
# #calculate woody litter in t/ha
# bd = .60
# tyl[14] = sum(tyl[14] + bd * (.00143 * dbh[dbh<=.1] ^ 2.393))
# tyl[15] = sum(tyl[15] + bd * (.00143 * dbh[dbh>.1] ^ 2.393))
#
# #flag dead trees
# dbh[which(yfl <= 4.605/agemx[i])] = -1
# #} else {
#
# if(nogro[nu:nl]<=-2){
# yfl = runif(1,0,1)
# if(yfl <= .368){
# ntrees[i] = ntrees[i] - 1
#
# #check to see if dead tree can sump sprout increment skprt if tree can sprout
# if(dbh[k]>sprtmn[i] & dbh[k]<sprtmx[i]) ksprt[i] = ksprt[i] + 1
#
# #calculate woody litter in t/ha
# bd = .60
# if(dbh[k]<=10) tyl[14] = tyl[14] + bd * (.00143 * dbh[k] ^ 2.393)
# if(dbh[k]>10) tyl[15] = tyl[15] + bd * (.00143 * dbh[k] ^ 2.393)
#
# #flag dead trees
# dbh[k] = -1
# }
# }
#
# }
# #calculate leaf litter by quality class in t/ha if the tree is slow growing but didn't di, leaf litter is halved
# #if the tree died, total leaf biomass is returned to the soil
# L = tl[i]
# if(nogro[k] == -2 & dbh[k] > -1) folw = folw*.5
# if(dbh[k] < 0) folw = folw * frt[i]
# tyl[L] = tyl[L] + folw
# #calculate root litter (t/ha)
# tyl[13] = tyl[13] + 1.3 * folw * rtst[i]
# }
# knt = nu
# }
# #calculate total leaf litter (t/ha)
# tyl[17] = tyl[17] + sum(tyl[1:12])
#
# #calculate twig litter in t/ha
# tyl[16] = ba/333
#
# #calculate total litter (t/ha)
# tyl[18] = sum(tyl[13:17])
#
# #rewrite diameters and ages to eliminate dead trees
# k = 0
# ntot = 0
# for(i in 1:max.ind){
# if(dbh[i]==0) {
# ntot = k
# break
# }
# if(dbh[i]<0){
# next
# }
# k = k+1
# dbh[k] = dbh[i]
# iage[k] = iage[i]
# nogro[k] = nogro[i]
# ntot = k
# }
#
# if(k!=nu){
# ntot1 = k+1
# if(ntot1 > max.ind) print("too many trees -- kill")
#
# #eliminate dead trees
# dbh[ntot1:nu] = 0
# iage[ntot1:nu] = 0
# nogro[ntot1:nu] = 0
# }
#
# # if(length(which(dbh>0)) < sum(ntrees)){
# # ntrees[4] <- ntrees[4] - length(ntot1:nu) + 1
# #
# # }
# # if(ntrees[4]<0) ntrees[4]=0cd
# if(length(which(dbh>0)) != sum(ntrees)) browser()
#
# return(list(ntrees = ntrees, dbh = dbh, iage = iage, nogro = nogro,
# tyl = tyl, ksprt = ksprt))
#
# }
|
### Jeroen Roelofs
### January 08 2015
# Value replacement function
cloud2NA <- function(x, y){
x[y != 0] <- NA
return(x)
}
|
/R/cloud2NA.R
|
no_license
|
jdh009/Lesson4exercise4
|
R
| false
| false
| 128
|
r
|
### Jeroen Roelofs
### January 08 2015
# Value replacement function
cloud2NA <- function(x, y){
x[y != 0] <- NA
return(x)
}
|
library(jpeg)
lin<-readJPEG("C:/Users/Por/Desktop/R/lin.jpg")
r<-lin[,,1]
g<-lin[,,2]
b<-lin[,,3]
r.svd<-svd(r)
g.svd<-svd(g)
b.svd<-svd(b)
rgb.svds<-list(r.svd,g.svd,b.svd)
for (j in seq.int(100,400,length.out = 4))
{
a<-sapply(rgb.svds,function(i){
lin.compress<-i$u[,1:j]%*%diag(i$d[1:j])%*%t(i$v[,1:j])
},simplify = 'array')
writeJPEG(a,paste('C:/Users/Por/Desktop/R/','lin_svd_rank',round(j,0),'.jpg',sep=''))
}
|
/SC2019Lab-11-η³ζη³-16081085.R
|
no_license
|
anhnguyendepocen/SC2019-assignments
|
R
| false
| false
| 443
|
r
|
library(jpeg)
lin<-readJPEG("C:/Users/Por/Desktop/R/lin.jpg")
r<-lin[,,1]
g<-lin[,,2]
b<-lin[,,3]
r.svd<-svd(r)
g.svd<-svd(g)
b.svd<-svd(b)
rgb.svds<-list(r.svd,g.svd,b.svd)
for (j in seq.int(100,400,length.out = 4))
{
a<-sapply(rgb.svds,function(i){
lin.compress<-i$u[,1:j]%*%diag(i$d[1:j])%*%t(i$v[,1:j])
},simplify = 'array')
writeJPEG(a,paste('C:/Users/Por/Desktop/R/','lin_svd_rank',round(j,0),'.jpg',sep=''))
}
|
# Author: Robert J. Hijmans, r.hijmans@gmail.com
# Date : September 2009
# Version 0.9
# Licence GPL v3
if (!isGeneric("distance")) {
setGeneric("distance", function(x, ...)
standardGeneric("distance"))
}
setMethod('distance', signature(x='RasterLayer'),
function(x, filename='', doEdge=TRUE, ...) {
if (doEdge) {
r <- boundaries(x, classes=FALSE, type='inner', progress=.progress(...))
pts <- try( rasterToPoints(r, fun=function(z){ z>0 } )[,1:2, drop=FALSE] )
} else {
pts <- try( rasterToPoints(x)[,1:2, drop=FALSE] )
}
if (class(pts) == "try-error") {
return( .distanceRows(x, filename=filename, ...) )
}
if (nrow(pts) == 0) {
stop('RasterLayer has no NA cells (for which to compute a distance)')
}
out <- raster(x)
filename <- trim(filename)
if (couldBeLonLat(x)) {
longlat=TRUE
} else {
longlat=FALSE
}
if (canProcessInMemory(out, 6)) {
pb <- pbCreate(3, label='distance', ...)
x <- values(x)
i <- which(is.na(x))
if (length(i) < 1) {
stop('raster has no NA values to compute distance to')
}
pbStep(pb)
x[] <- 0
xy <- xyFromCell(out, i)
x[i] <- .Call("distanceToNearestPoint", xy, pts, as.integer(longlat), PACKAGE='raster')
pbStep(pb)
out <- setValues(out, x)
if (filename != '') {
out <- writeRaster(out, filename=filename, ...)
}
pbStep(pb)
pbClose(pb)
return(out)
}
out <- writeStart(out, filename=filename, ...)
tr <- blockSize(out)
pb <- pbCreate(tr$n, label='distance', ...)
xy <- cbind(rep(xFromCol(out, 1:ncol(out)), tr$nrows[1]), NA)
for (i in 1:tr$n) {
if (i == tr$n) {
xy <- xy[1:(ncol(out)*tr$nrows[i]), ]
}
xy[,2] <- rep(yFromRow(out, tr$row[i]:(tr$row[i]+tr$nrows[i]-1)), each=ncol(out))
vals <- getValues(x, tr$row[i], tr$nrows[i])
j <- which(is.na(vals))
vals[] <- 0
if (length(j) > 0) {
vals[j] <- .Call("distanceToNearestPoint", xy[j,,drop=FALSE], pts, as.integer(longlat), PACKAGE='raster')
}
out <- writeValues(out, vals, tr$row[i])
pbStep(pb)
}
pbClose(pb)
out <- writeStop(out)
return(out)
}
)
|
/raster/R/distance.R
|
no_license
|
radfordneal/R-package-mods
|
R
| false
| false
| 2,204
|
r
|
# Author: Robert J. Hijmans, r.hijmans@gmail.com
# Date : September 2009
# Version 0.9
# Licence GPL v3
if (!isGeneric("distance")) {
setGeneric("distance", function(x, ...)
standardGeneric("distance"))
}
setMethod('distance', signature(x='RasterLayer'),
function(x, filename='', doEdge=TRUE, ...) {
if (doEdge) {
r <- boundaries(x, classes=FALSE, type='inner', progress=.progress(...))
pts <- try( rasterToPoints(r, fun=function(z){ z>0 } )[,1:2, drop=FALSE] )
} else {
pts <- try( rasterToPoints(x)[,1:2, drop=FALSE] )
}
if (class(pts) == "try-error") {
return( .distanceRows(x, filename=filename, ...) )
}
if (nrow(pts) == 0) {
stop('RasterLayer has no NA cells (for which to compute a distance)')
}
out <- raster(x)
filename <- trim(filename)
if (couldBeLonLat(x)) {
longlat=TRUE
} else {
longlat=FALSE
}
if (canProcessInMemory(out, 6)) {
pb <- pbCreate(3, label='distance', ...)
x <- values(x)
i <- which(is.na(x))
if (length(i) < 1) {
stop('raster has no NA values to compute distance to')
}
pbStep(pb)
x[] <- 0
xy <- xyFromCell(out, i)
x[i] <- .Call("distanceToNearestPoint", xy, pts, as.integer(longlat), PACKAGE='raster')
pbStep(pb)
out <- setValues(out, x)
if (filename != '') {
out <- writeRaster(out, filename=filename, ...)
}
pbStep(pb)
pbClose(pb)
return(out)
}
out <- writeStart(out, filename=filename, ...)
tr <- blockSize(out)
pb <- pbCreate(tr$n, label='distance', ...)
xy <- cbind(rep(xFromCol(out, 1:ncol(out)), tr$nrows[1]), NA)
for (i in 1:tr$n) {
if (i == tr$n) {
xy <- xy[1:(ncol(out)*tr$nrows[i]), ]
}
xy[,2] <- rep(yFromRow(out, tr$row[i]:(tr$row[i]+tr$nrows[i]-1)), each=ncol(out))
vals <- getValues(x, tr$row[i], tr$nrows[i])
j <- which(is.na(vals))
vals[] <- 0
if (length(j) > 0) {
vals[j] <- .Call("distanceToNearestPoint", xy[j,,drop=FALSE], pts, as.integer(longlat), PACKAGE='raster')
}
out <- writeValues(out, vals, tr$row[i])
pbStep(pb)
}
pbClose(pb)
out <- writeStop(out)
return(out)
}
)
|
/plot6.R
|
no_license
|
yiptsangkin/Rproject2
|
R
| false
| false
| 964
|
r
| ||
library(shiny)
library(leaflet)
library(RColorBrewer)
options(java.parameters = "-Xmx4g" )
library(readxl)
library(XLConnect)
library(plyr)
library(dplyr)
library(data.table)
library(foreign)
library(ggplot2)
library(psych)
df <- readWorksheetFromFile("data/preciosEESS_es.xls", sheet=1, startRow = 5)
load(file="data/gasoC") #cargamos las cercanias
load(file="data/preciosG") #cargamos archivo de precios externo
### Formateamos y limpiamos datos para mejorar resultados de regresiΓ³n
df$cod <- as.integer(substr(df$CΓ³digo.postal, 0, 2))
df$Precio.gasolina.95 <- as.numeric(gsub(",", ".", df$Precio.gasolina.95))
df$Precio.gasolina.98 <- as.numeric(gsub(",", ".", df$Precio.gasolina.98))
df$Precio.gasΓ³leo.A <- as.numeric(gsub(",", ".", df$Precio.gasΓ³leo.A))
df$Longitud <- as.numeric(gsub(",", ".", df$Longitud))
df$Latitud <- as.numeric(gsub(",", ".", df$Latitud))
df <- df[!is.na(df$Longitud), ]
df <- df[!is.na(df$Latitud), ]
df <- df[!is.na(df$Precio.gasolina.95), ]
df <- df[!is.na(df$Precio.gasΓ³leo.A), ]
### Montamos el desplegable con las provincias en base al DF de Geoportal
codN <- factor(unlist(df[,"Provincia"]))
codN <- as.list(levels(codN))
codN <- c("TODAS", codN)
codP <- list(1,2,3,4,33,5,6,7,8,9,10,11,39,12,51,13,14,15,16,17,18,19,20,21,22,23,24,25,27,28,29,52,30,31,32,35,34,36,26,37,38,40,41,42,43,44,45,46,47,48,49,50)
rotN <- factor(unlist(df[,"RΓ³tulo"]))
rotN <- as.list(levels(rotN))
rotN <- c("TODAS", rotN)
z<-as.integer(count(df))
### Cargamos dataframe con datos elegidos del excel de geoportal
allG <- as.data.frame("")
allG[1:(z), "G95"] <- (df[,"Precio.gasolina.95"]) * 1000
allG[,"G95"] <-gsub(",","",allG[,"G95"])
allG[,"G95"] <-as.integer(allG[,"G95"])
allG[1:(z), "G98"] <- (df[,"Precio.gasolina.98"]) * 1000
allG[,"G98"] <-gsub(",","",allG[,"G98"])
allG[,"G98"] <-as.integer(allG[,"G98"])
allG[1:(z), "GA"] <- (df[,"Precio.gasΓ³leo.A"]) * 1000
allG[,"GA"] <-gsub(",","",allG[,"GA"])
allG[,"GA"] <-as.integer(allG[,"GA"])
allG[1:z, "latitude"] <- (df[,"Latitud"])
allG[1:z, "longitude"] <- (df[,"Longitud"])
allG[1:z, "cod"] <- (df[,"cod"])
allG[1:z, "rot"] <- df[,"RΓ³tulo"]
allG[1:z, "dir"] <- df[,"DirecciΓ³n"]
allG <- allG[1:z,]
for(i in 1:nrow(allG)) {
allG[i,"codeG"] <- i
}
|
/global.R
|
no_license
|
jadeagustin/EESS_Pricing
|
R
| false
| false
| 2,247
|
r
|
library(shiny)
library(leaflet)
library(RColorBrewer)
options(java.parameters = "-Xmx4g" )
library(readxl)
library(XLConnect)
library(plyr)
library(dplyr)
library(data.table)
library(foreign)
library(ggplot2)
library(psych)
df <- readWorksheetFromFile("data/preciosEESS_es.xls", sheet=1, startRow = 5)
load(file="data/gasoC") #cargamos las cercanias
load(file="data/preciosG") #cargamos archivo de precios externo
### Formateamos y limpiamos datos para mejorar resultados de regresiΓ³n
df$cod <- as.integer(substr(df$CΓ³digo.postal, 0, 2))
df$Precio.gasolina.95 <- as.numeric(gsub(",", ".", df$Precio.gasolina.95))
df$Precio.gasolina.98 <- as.numeric(gsub(",", ".", df$Precio.gasolina.98))
df$Precio.gasΓ³leo.A <- as.numeric(gsub(",", ".", df$Precio.gasΓ³leo.A))
df$Longitud <- as.numeric(gsub(",", ".", df$Longitud))
df$Latitud <- as.numeric(gsub(",", ".", df$Latitud))
df <- df[!is.na(df$Longitud), ]
df <- df[!is.na(df$Latitud), ]
df <- df[!is.na(df$Precio.gasolina.95), ]
df <- df[!is.na(df$Precio.gasΓ³leo.A), ]
### Montamos el desplegable con las provincias en base al DF de Geoportal
codN <- factor(unlist(df[,"Provincia"]))
codN <- as.list(levels(codN))
codN <- c("TODAS", codN)
codP <- list(1,2,3,4,33,5,6,7,8,9,10,11,39,12,51,13,14,15,16,17,18,19,20,21,22,23,24,25,27,28,29,52,30,31,32,35,34,36,26,37,38,40,41,42,43,44,45,46,47,48,49,50)
rotN <- factor(unlist(df[,"RΓ³tulo"]))
rotN <- as.list(levels(rotN))
rotN <- c("TODAS", rotN)
z<-as.integer(count(df))
### Cargamos dataframe con datos elegidos del excel de geoportal
allG <- as.data.frame("")
allG[1:(z), "G95"] <- (df[,"Precio.gasolina.95"]) * 1000
allG[,"G95"] <-gsub(",","",allG[,"G95"])
allG[,"G95"] <-as.integer(allG[,"G95"])
allG[1:(z), "G98"] <- (df[,"Precio.gasolina.98"]) * 1000
allG[,"G98"] <-gsub(",","",allG[,"G98"])
allG[,"G98"] <-as.integer(allG[,"G98"])
allG[1:(z), "GA"] <- (df[,"Precio.gasΓ³leo.A"]) * 1000
allG[,"GA"] <-gsub(",","",allG[,"GA"])
allG[,"GA"] <-as.integer(allG[,"GA"])
allG[1:z, "latitude"] <- (df[,"Latitud"])
allG[1:z, "longitude"] <- (df[,"Longitud"])
allG[1:z, "cod"] <- (df[,"cod"])
allG[1:z, "rot"] <- df[,"RΓ³tulo"]
allG[1:z, "dir"] <- df[,"DirecciΓ³n"]
allG <- allG[1:z,]
for(i in 1:nrow(allG)) {
allG[i,"codeG"] <- i
}
|
#setwd("~/Desktop/PSM/Spring 2019/Multivariate-Stats/Exam 2")
# 1
rm(list=ls())
X <- read.csv("./data/prodimp.csv",header = TRUE) #Get data
library(ggplot2) #Q plots
qplot(X$level,X$productivity, main="qPlot of Productivity Data", xlab="Treatment", ylab="Productivity Improvement")
png("prodq.png")
qplot(X$level,X$productivity, main="qPlot of Productivity Data", xlab="Treatment", ylab="Productivity Improvement")
dev.off()
#make the mdoel
fit<- aov(X$productivity~X$level)
summary(fit)
#Factor and then recompute
X$level <- factor(X$level)
fit2<- aov(X$productivity~X$level)
summary(fit2)
#make the intervals
TukeyHSD(fit2, ordered = FALSE, conf.level = 0.95)
plot(TukeyHSD(fit2, ordered = FALSE, conf.level = 0.95))
png("prodtukey.png")
plot(TukeyHSD(fit2, ordered = FALSE, conf.level = 0.95))
dev.off()
#What are the means for comparison
(m1=mean(X$productivity[which(X$level==1)]))
(m2=mean(X$productivity[which(X$level==2)]))
(m3=mean(X$productivity[which(X$level==3)]))
#******************************************************************#
# 2
rm(list=ls())
X <- read.table("./data/T5-14.dat",header = FALSE)
X <- as.matrix(X)
first <- X[1:30,] #Just the good data
n <- nrow(first)
p <- ncol(first)
#Basic set of computations. Should probably just write a module for these
(xbar <- t(matrix(1,ncol=n) %*% first)/n)
(D <- first - matrix(1,nrow=n) %*% t(xbar))
(S <- (n-1)^(-1) * t(D)%*%D)
Sinv <- solve(S)
#D's
d <- c()
for (i in 1:length(X[,1])){
d[i] <- (X[i,]-t(xbar))%*%Sinv%*%t(X[i,]-t(xbar))
}
#Chi Cutoffs
UCL95 <- qchisq(.95,df=p)
UCL99 <- qchisq(.99,df=p)
#For The In Spec Data Points
#################################################################
plot(x=1:n,y=d[1:n],ylim=c(0,max(UCL99,ceiling(max(d[1:n])))),main="In Spec T2", xlab="Sample",ylab="T2")
abline(h=UCL95,lty=3, col='blue')
abline(h=UCL99, lty=1, col='red')
png("inspec.png")
plot(x=1:n,y=d[1:n],ylim=c(0,max(UCL99,ceiling(max(d[1:n])))),main="In Spec T2",xlab="Sample",ylab="T2")
abline(h=UCL95,lty=3, col='blue')
abline(h=UCL99, lty=1, col='red')
dev.off()
####################################################################
#For the Out of Spec?
###################################################################
plot(x=1:length(d),y=d,ylim=c(0,max(UCL99,ceiling(max(d)))),main="All data T2", xlab="Sample",ylab="T2")
abline(h=UCL95,lty=3, col='blue')
abline(h=UCL99, lty=1, col='red')
png("outspec.png")
plot(x=1:length(d),y=d,ylim=c(0,max(UCL99,ceiling(max(d)))),main="All data T2", xlab="Sample",ylab="T2")
abline(h=UCL95,lty=3, col='blue')
abline(h=UCL99, lty=1, col='red')
dev.off()
#Find the bad data points
(which(d>UCL99))
#T test time
alpha=.05
mu0 <- matrix(c(0,0,0,0,0,0),ncol=1) #Zero vector
(T2 <- n*t(xbar-mu0)%*%Sinv%*%(xbar-mu0))
(Tcrit <- p*(n-1)*qf(1-alpha,p,n-p)/(n-p))
if(T2>Tcrit){
print("Reject H0")
} else{
print("Fail to Reject")
}
library(plotrix)
#Do the plots with all the data
dat <- X
# 1v2
X <- as.matrix(dat[,1:2])
n <- nrow(X)
p <- ncol(X)
X.mean <- t(matrix(1,ncol=n) %*% X)/n
D <- X - matrix(1,nrow=n) %*% t(X.mean)
S <- (n-1)^(-1) * t(D)%*%D
alpha <- 0.01
c2 <- qchisq(1-alpha,df=2)
angle <- atan(eigen(S)$vectors[2,1]/eigen(S)$vectors[1,1]) # sohcahtoa
plot(0,pch='',ylab='X_2',xlab='X_1',xlim=c(-2,1),ylim=c(-1,.75))
points(X)
lengths <- c(sqrt(c2*eigen(S)$values[1]),
sqrt(c2*eigen(S)$values[2]))
draw.ellipse(x=X.mean[1,1],y=X.mean[2,1],a=lengths[1],b=lengths[2],angle=angle,deg=FALSE)
png("12.png")
plot(0,pch='',ylab='X_2',xlab='X_1',xlim=c(-2,1),ylim=c(-1,.75))
points(X)
lengths <- c(sqrt(c2*eigen(S)$values[1]),
sqrt(c2*eigen(S)$values[2]))
draw.ellipse(x=X.mean[1,1],y=X.mean[2,1],a=lengths[1],b=lengths[2],angle=angle,deg=FALSE)
dev.off()
#1v3
X <- as.matrix(dat[,c(1,3)])
n <- nrow(X)
p <- ncol(X)
X.mean <- t(matrix(1,ncol=n) %*% X)/n
D <- X - matrix(1,nrow=n) %*% t(X.mean)
S <- (n-1)^(-1) * t(D)%*%D
alpha <- 0.01
c2 <- qchisq(1-alpha,df=2)
angle <- atan(eigen(S)$vectors[2,1]/eigen(S)$vectors[1,1]) # sohcahtoa
plot(0,pch='',ylab='X_3',xlab='X_1',xlim=c(-1.5,.5),ylim=c(-1.25,1.25))
points(X)
lengths <- c(sqrt(c2*eigen(S)$values[1]),
sqrt(c2*eigen(S)$values[2]))
draw.ellipse(x=X.mean[1,1],y=X.mean[2,1],a=lengths[1],b=lengths[2],angle=angle,deg=FALSE)
# 1v4
X <- as.matrix(dat[,c(1,4)])
n <- nrow(X)
p <- ncol(X)
X.mean <- t(matrix(1,ncol=n) %*% X)/n
D <- X - matrix(1,nrow=n) %*% t(X.mean)
S <- (n-1)^(-1) * t(D)%*%D
alpha <- 0.01
c2 <- qchisq(1-alpha,df=2)
angle <- atan(eigen(S)$vectors[2,1]/eigen(S)$vectors[1,1]) # sohcahtoa
plot(0,pch='',ylab='X_4',xlab='X_1',xlim=c(-1.25,.5),ylim=c(-1.25,1.25))
points(X)
lengths <- c(sqrt(c2*eigen(S)$values[1]),
sqrt(c2*eigen(S)$values[2]))
draw.ellipse(x=X.mean[1,1],y=X.mean[2,1],a=lengths[1],b=lengths[2],angle=angle,deg=FALSE)
#1v5
X <- as.matrix(dat[,c(1,5)])
n <- nrow(X)
p <- ncol(X)
X.mean <- t(matrix(1,ncol=n) %*% X)/n
D <- X - matrix(1,nrow=n) %*% t(X.mean)
S <- (n-1)^(-1) * t(D)%*%D
alpha <- 0.01
c2 <- qchisq(1-alpha,df=2)
angle <- atan(eigen(S)$vectors[2,1]/eigen(S)$vectors[1,1]) # sohcahtoa
plot(0,pch='',ylab='X_5',xlab='X_1',xlim=c(-1.25,.5),ylim=c(-1,2.25))
points(X)
lengths <- c(sqrt(c2*eigen(S)$values[1]),
sqrt(c2*eigen(S)$values[2]))
draw.ellipse(x=X.mean[1,1],y=X.mean[2,1],a=lengths[1],b=lengths[2],angle=angle,deg=FALSE)
#1v6
X <- as.matrix(dat[,c(1,6)])
n <- nrow(X)
p <- ncol(X)
X.mean <- t(matrix(1,ncol=n) %*% X)/n
D <- X - matrix(1,nrow=n) %*% t(X.mean)
S <- (n-1)^(-1) * t(D)%*%D
alpha <- 0.01
c2 <- qchisq(1-alpha,df=2)
angle <- atan(eigen(S)$vectors[2,1]/eigen(S)$vectors[1,1]) # sohcahtoa
plot(0,pch='',ylab='X_6',xlab='X_1',xlim=c(-1.25,.5),ylim=c(-1,1))
points(X)
lengths <- c(sqrt(c2*eigen(S)$values[1]),
sqrt(c2*eigen(S)$values[2]))
draw.ellipse(x=X.mean[1,1],y=X.mean[2,1],a=lengths[1],b=lengths[2],angle=angle,deg=FALSE)
png("16.png")
plot(0,pch='',ylab='X_6',xlab='X_1',xlim=c(-1.25,.5),ylim=c(-1,1))
points(X)
lengths <- c(sqrt(c2*eigen(S)$values[1]),
sqrt(c2*eigen(S)$values[2]))
draw.ellipse(x=X.mean[1,1],y=X.mean[2,1],a=lengths[1],b=lengths[2],angle=angle,deg=FALSE)
dev.off()
#2v6
X <- as.matrix(dat[,c(2,6)])
n <- nrow(X)
p <- ncol(X)
X.mean <- t(matrix(1,ncol=n) %*% X)/n
D <- X - matrix(1,nrow=n) %*% t(X.mean)
S <- (n-1)^(-1) * t(D)%*%D
alpha <- 0.01
c2 <- qchisq(1-alpha,df=2)
angle <- atan(eigen(S)$vectors[2,1]/eigen(S)$vectors[1,1]) # sohcahtoa
plot(0,pch='',ylab='X_6',xlab='X_2',xlim=c(-1.25,.75),ylim=c(-.75,.85))
points(X)
lengths <- c(sqrt(c2*eigen(S)$values[1]),
sqrt(c2*eigen(S)$values[2]))
draw.ellipse(x=X.mean[1,1],y=X.mean[2,1],a=lengths[1],b=lengths[2],angle=angle,deg=FALSE)
###################################################################
#3
rm(list=ls())
#Given calls
library(reshape2)
dat.fill <- read.csv("./data/BHO_Medication_Fill_Data__2010-2014.csv",header=TRUE)
tmp <- dcast(dat.fill, OMH.Region + Year + Quarter + Age.Group ~ Description.of.Metric)
dat <- tmp[,c(1:4,6,8,10)]
names(dat) <- c(names(dat)[1:4],"Mood30","Psychotrop30","Antipsych30")
head(dat)
# Look at dem histograms,
### Mood
hist(dat[,5],breaks=20,main="Mood30",xlab="% Refill")
png("moodhist.png")
hist(dat[,5],breaks=20,main="Mood30",xlab="% Refill")
dev.off()
### Psycho
hist(dat[,6],breaks=20,main="Psych30",xlab="% Refill")
png("psychhist.png")
hist(dat[,6],breaks=20,main="Psych30",xlab="% Refill")
dev.off()
## Anti
hist(dat[,7],breaks=20,main="Anti30",xlab="% Refill")
png("antihist.png")
hist(dat[,7],breaks=20,main="Anti30",xlab="% Refill")
dev.off()
#Q plot time
#QQ Mood
xmood <- sort(dat$Mood30)
n <- length(xmood)
problevels <- ((1:n)-0.5)/n
q <- qnorm(problevels)
plot(q,xmood,pch=19)
png("moodq.png")
plot(q,xmood,pch=19)
dev.off()
#Compute rq statistic from text
q.bar <- mean(q)
x.bar <- mean(xmood)
critval95 <- 0.9913 # from table 4.2 p. 181
(rqmood <- sum((xmood-x.bar)*(q-q.bar))/(sqrt(sum((xmood-x.bar)^2))*sqrt(sum((q-q.bar)^2))))
if(rqmood>critval95){
print("Its Normal Yo")
} else{
print("Nice try bro")
}
## QQ Psycho
xpsych <- sort(dat$Psychotrop30)
n <- length(xpsych)
problevels <- ((1:n)-0.5)/n
q <- qnorm(problevels)
plot(q,xpsych,pch=19)
png("psychq.png")
plot(q,xpsych,pch=19)
dev.off()
#Compute rq statistic from text
q.bar <- mean(q)
x.bar <- mean(xpsych)
critval95 <- 0.9913 # from table 4.2 p. 181
(rqpsych <- sum((xpsych-x.bar)*(q-q.bar))/(sqrt(sum((xpsych-x.bar)^2))*sqrt(sum((q-q.bar)^2))))
if(rqpsych>critval95){
print("Its Normal Yo")
} else{
print("Nice try bro")
}
## QQ Anti
xanti <- sort(dat$Antipsych30)
n <- length(xanti)
problevels <- ((1:n)-0.5)/n
q <- qnorm(problevels)
plot(q,xanti,pch=19)
png("antiq.png")
plot(q,xanti,pch=19)
dev.off()
#Compute rq statistic from text
q.bar <- mean(q)
x.bar <- mean(xanti)
critval95 <- 0.9913 # from table 4.2 p. 181
(rqanti <- sum((xanti-x.bar)*(q-q.bar))/(sqrt(sum((xanti-x.bar)^2))*sqrt(sum((q-q.bar)^2))))
if(rqanti>critval95){
print("Its Normal Yo")
} else{
print("Nice try bro")
}
#Make the Chi Square plot now
n <- nrow(dat)
p <- ncol(dat)
reduced <- as.matrix(dat[,5:7])
(xbar <- t(matrix(1,ncol=n) %*% reduced)/n)
(D <- reduced - matrix(1,nrow=n) %*% t(xbar))
(S <- (n-1)^(-1) * t(D)%*%D)
Sinv <- solve(S)
d <- c()
for (i in 1:length(reduced[,1])){
d[i] <- (reduced[i,]-t(xbar))%*%Sinv%*%t(reduced[i,]-t(xbar))
}
chisq.quantiles <- qchisq(((1:n)-0.5)/n,df=3)
f1<- summary(lm(sort(d)~chisq.quantiles))
plot(y=sort(d),x=chisq.quantiles,pch=19)
abline(f1$coefficients[1],f1$coefficients[2],col="red")
png("originalchi.png")
plot(y=sort(d),x=chisq.quantiles,pch=19)
abline(f1$coefficients[1],f1$coefficients[2],col="red")
dev.off()
#Looks like we have an outlier
which.max(d)
((dat[30,7]-mean(dat[,7]))/sqrt(var(dat[,7])))
#Because we cant see the data, lets just drop it from the analysis
newTest <- as.matrix(dat[,5:7])
newTest <- newTest[-30,]
#new Chi square
n <- nrow(newTest)
p <- ncol(newTest)
(xbar <- t(matrix(1,ncol=n) %*% newTest)/n)
(D <- newTest - matrix(1,nrow=n) %*% t(xbar))
(S <- (n-1)^(-1) * t(D)%*%D)
Sinv <- solve(S)
d <- c()
for (i in 1:length(newTest[,1])){
d[i] <- (newTest[i,]-t(xbar))%*%Sinv%*%t(newTest[i,]-t(xbar))
}
chisq.quantiles <- qchisq(((1:n)-0.5)/n,df=3)
f2<-summary(lm(sort(d)~chisq.quantiles))
plot(y=sort(d),x=chisq.quantiles,pch=19)
abline(f2$coefficients[1],f2$coefficients[2],col="red")
png("newchi.png")
plot(y=sort(d),x=chisq.quantiles,pch=19)
abline(f2$coefficients[1],f2$coefficients[2],col="red")
dev.off()
#Better, lets check the qq stats
# NewQQ Mood
xmood <- sort(newTest[,1])
n <- length(xmood)
problevels <- ((1:n)-0.5)/n
q <- qnorm(problevels)
plot(q,xmood,pch=19)
png("newmoodq.png")
plot(q,xmood,pch=19)
dev.off()
q.bar <- mean(q)
x.bar <- mean(xmood)
critval95 <- 0.9913 # from table 4.2 p. 181
(rqmood <- sum((xmood-x.bar)*(q-q.bar))/(sqrt(sum((xmood-x.bar)^2))*sqrt(sum((q-q.bar)^2))))
if(rqmood>critval95){
print("Its Normal Yo")
} else{
print("Nice try bro")
}
# New QQ Psycho
xpsych <- sort(newTest[,2])
n <- length(xpsych)
problevels <- ((1:n)-0.5)/n
q <- qnorm(problevels)
plot(q,xpsych,pch=19)
png("newpsychq.png")
plot(q,xpsych,pch=19)
dev.off()
q.bar <- mean(q)
x.bar <- mean(xpsych)
critval95 <- 0.9913 # from table 4.2 p. 181
(rqpsych <- sum((xpsych-x.bar)*(q-q.bar))/(sqrt(sum((xpsych-x.bar)^2))*sqrt(sum((q-q.bar)^2))))
if(rqpsych>critval95){
print("Its Normal Yo")
} else{
print("Nice try bro")
}
# New QQ Anti
xanti <- sort(newTest[,3])
n <- length(xanti)
problevels <- ((1:n)-0.5)/n
q <- qnorm(problevels)
plot(q,xanti,pch=19)
png("newantiq.png")
plot(q,xanti,pch=19)
dev.off()
q.bar <- mean(q)
x.bar <- mean(xanti)
critval95 <- 0.9913 # from table 4.2 p. 181
(rqanti <- sum((xanti-x.bar)*(q-q.bar))/(sqrt(sum((xanti-x.bar)^2))*sqrt(sum((q-q.bar)^2))))
if(rqanti>critval95){
print("Its Normal Yo")
} else{
print("Nice try bro")
}
#Psych and Anti still not quite normal. Look at power scaling
##Mood is good, alter Psych
l.lambda <- function(lambda,n,x){
if(lambda==0){
x.lambda <- log(x)
}
else {
x.lambda <- (x^lambda - 1)/lambda
}
return(
(-n/2)*log(
sum(
(x.lambda-mean(x.lambda))^2)
)+
(lambda-1)*sum(log(x)))
}
n <- nrow(as.matrix(newTest[,2]))
lambdas <- seq(from=-1,to=10,by=.01)
l.lambdas <- c()
for (i in 1:length(lambdas)){
l.lambdas[i] <- l.lambda(lambdas[i],n,newTest[,2])
}
plot(lambdas,l.lambdas)
(pstar=lambdas[which(l.lambdas==max(l.lambdas))]) # this is the transformation we should use
newPsych=(-1+newTest[,2]^pstar)/pstar
xpsych <- sort(newPsych)
n <- length(xpsych)
problevels <- ((1:n)-0.5)/n
q <- qnorm(problevels)
plot(q,xpsych,pch=19)
png("scalepq.png")
plot(q,xpsych,pch=19)
dev.off()
q.bar <- mean(q)
x.bar <- mean(xpsych)
critval95 <- 0.9913 # from table 4.2 p. 181
(rqpsych2 <- sum((xpsych-x.bar)*(q-q.bar))/(sqrt(sum((xpsych-x.bar)^2))*sqrt(sum((q-q.bar)^2))))
if(rqpsych2>critval95){
print("Its Normal Yo")
} else{
print("Nice try bro")
}
##Now its normal. Good.
##Now that psych is good, do anti
n <- nrow(as.matrix(newTest[,3]))
lambdas <- seq(from=-1,to=10,by=.01)
l.lambdas <- c()
for (i in 1:length(lambdas)){
l.lambdas[i] <- l.lambda(lambdas[i],n,newTest[,3])
}
plot(lambdas,l.lambdas)
(astar=lambdas[which(l.lambdas==max(l.lambdas))]) # this is the transformation we should use
newAnti = (-1+newTest[,3]^astar)/astar
xanti <- sort(newAnti)
n <- length(xanti)
problevels <- ((1:n)-0.5)/n
q <- qnorm(problevels)
plot(q,xanti,pch=19)
png("scaleaq.png")
plot(q,xanti,pch=19)
dev.off()
q.bar <- mean(q)
x.bar <- mean(xanti)
critval95 <- 0.9913 # from table 4.2 p. 181
(rqanti2 <- sum((xanti-x.bar)*(q-q.bar))/(sqrt(sum((xanti-x.bar)^2))*sqrt(sum((q-q.bar)^2))))
if(rqanti2>critval95){
print("Its Normal Yo")
} else{
print("Nice try bro")
}
#Its not quite perfect, but is as good as we can get (log is much worse)
############
#Recombine our best attempts at normalcy
bestTry = matrix(c(newTest[,1],newPsych, newAnti),ncol=3)
#Make the chi-square on this data
n <- nrow(bestTry)
p <- ncol(bestTry)
(xbar <- t(matrix(1,ncol=n) %*% bestTry)/n)
(D <- bestTry - matrix(1,nrow=n) %*% t(xbar))
(S <- (n-1)^(-1) * t(D)%*%D)
Sinv <- solve(S)
d <- c()
for (i in 1:length(bestTry[,1])){
d[i] <- (bestTry[i,]-t(xbar))%*%Sinv%*%t(bestTry[i,]-t(xbar))
}
chisq.quantiles <- qchisq(((1:n)-0.5)/n,df=3)
f3 <- summary(lm(sort(d)~chisq.quantiles))
plot(y=sort(d),x=chisq.quantiles,pch=19)
abline(f3$coefficients[1],f3$coefficients[2],col="red")
png("bestchi.png")
plot(y=sort(d),x=chisq.quantiles,pch=19)
abline(f3$coefficients[1],f3$coefficients[2],col="red")
dev.off()
#This seems to be the best in terms of linarity, so we continue.
## Now that the data is scaled again, we do our test.
n <- nrow(bestTry)
p <- ncol(bestTry)
alpha=.05
#Base null mu
mu0 <- matrix(c(75,75,75), ncol=1)
mu0[2] <-(-1+mu0[2]^pstar)/pstar #Transform to new scale
mu0[3] <-(-1+mu0[3]^astar)/astar # Transform to its new scale
#Do the test
(T2 <- n*t(xbar-mu0)%*%Sinv%*%(xbar-mu0))
(Tcrit <- p*(n-1)*qf(1-alpha,p,n-p)/(n-p))
if(T2>Tcrit){
print("Reject H0")
} else{
print("Fail to Reject")
}
################################################################
#4
rm(list=ls())
dat1 <- read.csv("./data/TTHM.csv",header=TRUE)
dat2 <- read.csv("./data/HAA5.csv",header=TRUE)
dat <- merge(dat1,dat2,by=names(dat1)[1],all=FALSE)
dat <- dat[,c(1:4,7)]
names(dat) <- c("Date","Year","Quarter","TTHM","HAA5")
head(dat)
#Histograms
hist(dat$TTHM)
png("thist.png")
hist(dat$TTHM)
dev.off()
hist(dat$HAA5)
png("hhist.png")
hist(dat$HAA5)
dev.off()
n <- nrow(dat)
#Xbar Charts
center <- mean(dat$TTHM)
UL <- center + 3*sd(dat$TTHM)
LL <- center - 3*sd(dat$TTHM)
plot(x=1:n,dat$TTHM,type="l",ylim=c(0,110),xlab="Observation Number",ylab="TTHM")
abline(h=center, col="green");abline(h=UL, col="red");abline(h=max(0,LL), col="red")
png("tthmxbar.png")
plot(x=1:n,dat$TTHM,type="l",ylim=c(0,110),xlab="Observation Number",ylab="TTHM")
abline(h=center, col="green");abline(h=UL, col="red");abline(h=max(0,LL), col="red")
dev.off()
center <- mean(dat$HAA5)
UL <- center + 3*sd(dat$HAA5)
LL <- center - 3*sd(dat$HAA5)
plot(x=1:n,dat$HAA5,type="l",ylim=c(0,110),xlab="Observation Number",ylab="HAA5")
abline(h=center, col="green");abline(h=UL, col="red");abline(h=max(0,LL), col="red")
png("haa5xbar.png")
plot(x=1:n,dat$HAA5,type="l",ylim=c(0,110),xlab="Observation Number",ylab="HAA5")
abline(h=center, col="green");abline(h=UL, col="red");abline(h=max(0,LL), col="red")
dev.off()
library(plotrix)
#Ellipse Chart
X <- as.matrix(dat[,4:5])
n <- nrow(X)
p <- ncol(X)
X.mean <- t(matrix(1,ncol=n) %*% X)/n
D <- X - matrix(1,nrow=n) %*% t(X.mean)
S <- (n-1)^(-1) * t(D)%*%D
alpha <- 0.01
c2 <- qchisq(1-alpha,df=2)
angle <- atan(eigen(S)$vectors[2,1]/eigen(S)$vectors[1,1]) # sohcahtoa
plot(0,pch='',ylab='X_2',xlab='X_1',xlim=c(-5,100),ylim=c(-5,90))
points(X)
lengths <- c(sqrt(c2*eigen(S)$values[1]),
sqrt(c2*eigen(S)$values[2]))
draw.ellipse(x=X.mean[1,1],y=X.mean[2,1],a=lengths[1],b=lengths[2],angle=angle,deg=FALSE)
png("waterelip.png")
plot(0,pch='',ylab='HAA5',xlab='TTHM',xlim=c(-5,100),ylim=c(-5,90))
points(X)
lengths <- c(sqrt(c2*eigen(S)$values[1]),
sqrt(c2*eigen(S)$values[2]))
draw.ellipse(x=X.mean[1,1],y=X.mean[2,1],a=lengths[1],b=lengths[2],angle=angle,deg=FALSE)
dev.off()
Sinv <- solve(S)
d <- c()
for (i in 1:length(X[,1])){
d[i] <- (X[i,]-t(X.mean))%*%Sinv%*%t(X[i,]-t(X.mean))
}
#Chi Cutoffs
UCL95 <- qchisq(.95,df=p)
UCL99 <- qchisq(.99,df=p)
#For The In Spec Data Points
#################################################################
plot(x=1:n,y=d[1:n],ylim=c(0,max(UCL99,ceiling(max(d[1:n])))),main="In Spec T2", xlab="Sample",ylab="T2")
abline(h=UCL95,lty=3, col='blue')
abline(h=UCL99, lty=1, col='red')
png("watert.png")
plot(x=1:n,y=d[1:n],ylim=c(0,max(UCL99,ceiling(max(d[1:n])))),main="In Spec T2",xlab="Sample",ylab="T2")
abline(h=UCL95,lty=3, col='blue')
abline(h=UCL99, lty=1, col='red')
dev.off()
n <- nrow(dat)
p <- 2
alpha=.05
#Base null mu
mu0 <- matrix(c(80,60), ncol=1)
#Do the test
(T2 <- n*t(X.mean-mu0)%*%Sinv%*%(X.mean-mu0))
(Tcrit <- p*(n-1)*qf(1-alpha,p,n-p)/(n-p))
if(T2>Tcrit){
print("Reject H0")
} else{
print("Fail to Reject")
}
|
/Exam 2/Exam 2.R
|
no_license
|
justinhood/Multivariate-Stats
|
R
| false
| false
| 18,166
|
r
|
#setwd("~/Desktop/PSM/Spring 2019/Multivariate-Stats/Exam 2")
# 1
rm(list=ls())
X <- read.csv("./data/prodimp.csv",header = TRUE) #Get data
library(ggplot2) #Q plots
qplot(X$level,X$productivity, main="qPlot of Productivity Data", xlab="Treatment", ylab="Productivity Improvement")
png("prodq.png")
qplot(X$level,X$productivity, main="qPlot of Productivity Data", xlab="Treatment", ylab="Productivity Improvement")
dev.off()
#make the mdoel
fit<- aov(X$productivity~X$level)
summary(fit)
#Factor and then recompute
X$level <- factor(X$level)
fit2<- aov(X$productivity~X$level)
summary(fit2)
#make the intervals
TukeyHSD(fit2, ordered = FALSE, conf.level = 0.95)
plot(TukeyHSD(fit2, ordered = FALSE, conf.level = 0.95))
png("prodtukey.png")
plot(TukeyHSD(fit2, ordered = FALSE, conf.level = 0.95))
dev.off()
#What are the means for comparison
(m1=mean(X$productivity[which(X$level==1)]))
(m2=mean(X$productivity[which(X$level==2)]))
(m3=mean(X$productivity[which(X$level==3)]))
#******************************************************************#
# 2
rm(list=ls())
X <- read.table("./data/T5-14.dat",header = FALSE)
X <- as.matrix(X)
first <- X[1:30,] #Just the good data
n <- nrow(first)
p <- ncol(first)
#Basic set of computations. Should probably just write a module for these
(xbar <- t(matrix(1,ncol=n) %*% first)/n)
(D <- first - matrix(1,nrow=n) %*% t(xbar))
(S <- (n-1)^(-1) * t(D)%*%D)
Sinv <- solve(S)
#D's
d <- c()
for (i in 1:length(X[,1])){
d[i] <- (X[i,]-t(xbar))%*%Sinv%*%t(X[i,]-t(xbar))
}
#Chi Cutoffs
UCL95 <- qchisq(.95,df=p)
UCL99 <- qchisq(.99,df=p)
#For The In Spec Data Points
#################################################################
plot(x=1:n,y=d[1:n],ylim=c(0,max(UCL99,ceiling(max(d[1:n])))),main="In Spec T2", xlab="Sample",ylab="T2")
abline(h=UCL95,lty=3, col='blue')
abline(h=UCL99, lty=1, col='red')
png("inspec.png")
plot(x=1:n,y=d[1:n],ylim=c(0,max(UCL99,ceiling(max(d[1:n])))),main="In Spec T2",xlab="Sample",ylab="T2")
abline(h=UCL95,lty=3, col='blue')
abline(h=UCL99, lty=1, col='red')
dev.off()
####################################################################
#For the Out of Spec?
###################################################################
plot(x=1:length(d),y=d,ylim=c(0,max(UCL99,ceiling(max(d)))),main="All data T2", xlab="Sample",ylab="T2")
abline(h=UCL95,lty=3, col='blue')
abline(h=UCL99, lty=1, col='red')
png("outspec.png")
plot(x=1:length(d),y=d,ylim=c(0,max(UCL99,ceiling(max(d)))),main="All data T2", xlab="Sample",ylab="T2")
abline(h=UCL95,lty=3, col='blue')
abline(h=UCL99, lty=1, col='red')
dev.off()
#Find the bad data points
(which(d>UCL99))
#T test time
alpha=.05
mu0 <- matrix(c(0,0,0,0,0,0),ncol=1) #Zero vector
(T2 <- n*t(xbar-mu0)%*%Sinv%*%(xbar-mu0))
(Tcrit <- p*(n-1)*qf(1-alpha,p,n-p)/(n-p))
if(T2>Tcrit){
print("Reject H0")
} else{
print("Fail to Reject")
}
library(plotrix)
#Do the plots with all the data
dat <- X
# 1v2
X <- as.matrix(dat[,1:2])
n <- nrow(X)
p <- ncol(X)
X.mean <- t(matrix(1,ncol=n) %*% X)/n
D <- X - matrix(1,nrow=n) %*% t(X.mean)
S <- (n-1)^(-1) * t(D)%*%D
alpha <- 0.01
c2 <- qchisq(1-alpha,df=2)
angle <- atan(eigen(S)$vectors[2,1]/eigen(S)$vectors[1,1]) # sohcahtoa
plot(0,pch='',ylab='X_2',xlab='X_1',xlim=c(-2,1),ylim=c(-1,.75))
points(X)
lengths <- c(sqrt(c2*eigen(S)$values[1]),
sqrt(c2*eigen(S)$values[2]))
draw.ellipse(x=X.mean[1,1],y=X.mean[2,1],a=lengths[1],b=lengths[2],angle=angle,deg=FALSE)
png("12.png")
plot(0,pch='',ylab='X_2',xlab='X_1',xlim=c(-2,1),ylim=c(-1,.75))
points(X)
lengths <- c(sqrt(c2*eigen(S)$values[1]),
sqrt(c2*eigen(S)$values[2]))
draw.ellipse(x=X.mean[1,1],y=X.mean[2,1],a=lengths[1],b=lengths[2],angle=angle,deg=FALSE)
dev.off()
#1v3
X <- as.matrix(dat[,c(1,3)])
n <- nrow(X)
p <- ncol(X)
X.mean <- t(matrix(1,ncol=n) %*% X)/n
D <- X - matrix(1,nrow=n) %*% t(X.mean)
S <- (n-1)^(-1) * t(D)%*%D
alpha <- 0.01
c2 <- qchisq(1-alpha,df=2)
angle <- atan(eigen(S)$vectors[2,1]/eigen(S)$vectors[1,1]) # sohcahtoa
plot(0,pch='',ylab='X_3',xlab='X_1',xlim=c(-1.5,.5),ylim=c(-1.25,1.25))
points(X)
lengths <- c(sqrt(c2*eigen(S)$values[1]),
sqrt(c2*eigen(S)$values[2]))
draw.ellipse(x=X.mean[1,1],y=X.mean[2,1],a=lengths[1],b=lengths[2],angle=angle,deg=FALSE)
# 1v4
X <- as.matrix(dat[,c(1,4)])
n <- nrow(X)
p <- ncol(X)
X.mean <- t(matrix(1,ncol=n) %*% X)/n
D <- X - matrix(1,nrow=n) %*% t(X.mean)
S <- (n-1)^(-1) * t(D)%*%D
alpha <- 0.01
c2 <- qchisq(1-alpha,df=2)
angle <- atan(eigen(S)$vectors[2,1]/eigen(S)$vectors[1,1]) # sohcahtoa
plot(0,pch='',ylab='X_4',xlab='X_1',xlim=c(-1.25,.5),ylim=c(-1.25,1.25))
points(X)
lengths <- c(sqrt(c2*eigen(S)$values[1]),
sqrt(c2*eigen(S)$values[2]))
draw.ellipse(x=X.mean[1,1],y=X.mean[2,1],a=lengths[1],b=lengths[2],angle=angle,deg=FALSE)
#1v5
X <- as.matrix(dat[,c(1,5)])
n <- nrow(X)
p <- ncol(X)
X.mean <- t(matrix(1,ncol=n) %*% X)/n
D <- X - matrix(1,nrow=n) %*% t(X.mean)
S <- (n-1)^(-1) * t(D)%*%D
alpha <- 0.01
c2 <- qchisq(1-alpha,df=2)
angle <- atan(eigen(S)$vectors[2,1]/eigen(S)$vectors[1,1]) # sohcahtoa
plot(0,pch='',ylab='X_5',xlab='X_1',xlim=c(-1.25,.5),ylim=c(-1,2.25))
points(X)
lengths <- c(sqrt(c2*eigen(S)$values[1]),
sqrt(c2*eigen(S)$values[2]))
draw.ellipse(x=X.mean[1,1],y=X.mean[2,1],a=lengths[1],b=lengths[2],angle=angle,deg=FALSE)
#1v6
X <- as.matrix(dat[,c(1,6)])
n <- nrow(X)
p <- ncol(X)
X.mean <- t(matrix(1,ncol=n) %*% X)/n
D <- X - matrix(1,nrow=n) %*% t(X.mean)
S <- (n-1)^(-1) * t(D)%*%D
alpha <- 0.01
c2 <- qchisq(1-alpha,df=2)
angle <- atan(eigen(S)$vectors[2,1]/eigen(S)$vectors[1,1]) # sohcahtoa
plot(0,pch='',ylab='X_6',xlab='X_1',xlim=c(-1.25,.5),ylim=c(-1,1))
points(X)
lengths <- c(sqrt(c2*eigen(S)$values[1]),
sqrt(c2*eigen(S)$values[2]))
draw.ellipse(x=X.mean[1,1],y=X.mean[2,1],a=lengths[1],b=lengths[2],angle=angle,deg=FALSE)
png("16.png")
plot(0,pch='',ylab='X_6',xlab='X_1',xlim=c(-1.25,.5),ylim=c(-1,1))
points(X)
lengths <- c(sqrt(c2*eigen(S)$values[1]),
sqrt(c2*eigen(S)$values[2]))
draw.ellipse(x=X.mean[1,1],y=X.mean[2,1],a=lengths[1],b=lengths[2],angle=angle,deg=FALSE)
dev.off()
#2v6
X <- as.matrix(dat[,c(2,6)])
n <- nrow(X)
p <- ncol(X)
X.mean <- t(matrix(1,ncol=n) %*% X)/n
D <- X - matrix(1,nrow=n) %*% t(X.mean)
S <- (n-1)^(-1) * t(D)%*%D
alpha <- 0.01
c2 <- qchisq(1-alpha,df=2)
angle <- atan(eigen(S)$vectors[2,1]/eigen(S)$vectors[1,1]) # sohcahtoa
plot(0,pch='',ylab='X_6',xlab='X_2',xlim=c(-1.25,.75),ylim=c(-.75,.85))
points(X)
lengths <- c(sqrt(c2*eigen(S)$values[1]),
sqrt(c2*eigen(S)$values[2]))
draw.ellipse(x=X.mean[1,1],y=X.mean[2,1],a=lengths[1],b=lengths[2],angle=angle,deg=FALSE)
###################################################################
#3
rm(list=ls())
#Given calls
library(reshape2)
dat.fill <- read.csv("./data/BHO_Medication_Fill_Data__2010-2014.csv",header=TRUE)
tmp <- dcast(dat.fill, OMH.Region + Year + Quarter + Age.Group ~ Description.of.Metric)
dat <- tmp[,c(1:4,6,8,10)]
names(dat) <- c(names(dat)[1:4],"Mood30","Psychotrop30","Antipsych30")
head(dat)
# Look at dem histograms,
### Mood
hist(dat[,5],breaks=20,main="Mood30",xlab="% Refill")
png("moodhist.png")
hist(dat[,5],breaks=20,main="Mood30",xlab="% Refill")
dev.off()
### Psycho
hist(dat[,6],breaks=20,main="Psych30",xlab="% Refill")
png("psychhist.png")
hist(dat[,6],breaks=20,main="Psych30",xlab="% Refill")
dev.off()
## Anti
hist(dat[,7],breaks=20,main="Anti30",xlab="% Refill")
png("antihist.png")
hist(dat[,7],breaks=20,main="Anti30",xlab="% Refill")
dev.off()
#Q plot time
#QQ Mood
xmood <- sort(dat$Mood30)
n <- length(xmood)
problevels <- ((1:n)-0.5)/n
q <- qnorm(problevels)
plot(q,xmood,pch=19)
png("moodq.png")
plot(q,xmood,pch=19)
dev.off()
#Compute rq statistic from text
q.bar <- mean(q)
x.bar <- mean(xmood)
critval95 <- 0.9913 # from table 4.2 p. 181
(rqmood <- sum((xmood-x.bar)*(q-q.bar))/(sqrt(sum((xmood-x.bar)^2))*sqrt(sum((q-q.bar)^2))))
if(rqmood>critval95){
print("Its Normal Yo")
} else{
print("Nice try bro")
}
## QQ Psycho
xpsych <- sort(dat$Psychotrop30)
n <- length(xpsych)
problevels <- ((1:n)-0.5)/n
q <- qnorm(problevels)
plot(q,xpsych,pch=19)
png("psychq.png")
plot(q,xpsych,pch=19)
dev.off()
#Compute rq statistic from text
q.bar <- mean(q)
x.bar <- mean(xpsych)
critval95 <- 0.9913 # from table 4.2 p. 181
(rqpsych <- sum((xpsych-x.bar)*(q-q.bar))/(sqrt(sum((xpsych-x.bar)^2))*sqrt(sum((q-q.bar)^2))))
if(rqpsych>critval95){
print("Its Normal Yo")
} else{
print("Nice try bro")
}
## QQ Anti
xanti <- sort(dat$Antipsych30)
n <- length(xanti)
problevels <- ((1:n)-0.5)/n
q <- qnorm(problevels)
plot(q,xanti,pch=19)
png("antiq.png")
plot(q,xanti,pch=19)
dev.off()
#Compute rq statistic from text
q.bar <- mean(q)
x.bar <- mean(xanti)
critval95 <- 0.9913 # from table 4.2 p. 181
(rqanti <- sum((xanti-x.bar)*(q-q.bar))/(sqrt(sum((xanti-x.bar)^2))*sqrt(sum((q-q.bar)^2))))
if(rqanti>critval95){
print("Its Normal Yo")
} else{
print("Nice try bro")
}
#Make the Chi Square plot now
n <- nrow(dat)
p <- ncol(dat)
reduced <- as.matrix(dat[,5:7])
(xbar <- t(matrix(1,ncol=n) %*% reduced)/n)
(D <- reduced - matrix(1,nrow=n) %*% t(xbar))
(S <- (n-1)^(-1) * t(D)%*%D)
Sinv <- solve(S)
d <- c()
for (i in 1:length(reduced[,1])){
d[i] <- (reduced[i,]-t(xbar))%*%Sinv%*%t(reduced[i,]-t(xbar))
}
chisq.quantiles <- qchisq(((1:n)-0.5)/n,df=3)
f1<- summary(lm(sort(d)~chisq.quantiles))
plot(y=sort(d),x=chisq.quantiles,pch=19)
abline(f1$coefficients[1],f1$coefficients[2],col="red")
png("originalchi.png")
plot(y=sort(d),x=chisq.quantiles,pch=19)
abline(f1$coefficients[1],f1$coefficients[2],col="red")
dev.off()
#Looks like we have an outlier
which.max(d)
((dat[30,7]-mean(dat[,7]))/sqrt(var(dat[,7])))
#Because we cant see the data, lets just drop it from the analysis
newTest <- as.matrix(dat[,5:7])
newTest <- newTest[-30,]
#new Chi square
n <- nrow(newTest)
p <- ncol(newTest)
(xbar <- t(matrix(1,ncol=n) %*% newTest)/n)
(D <- newTest - matrix(1,nrow=n) %*% t(xbar))
(S <- (n-1)^(-1) * t(D)%*%D)
Sinv <- solve(S)
d <- c()
for (i in 1:length(newTest[,1])){
d[i] <- (newTest[i,]-t(xbar))%*%Sinv%*%t(newTest[i,]-t(xbar))
}
chisq.quantiles <- qchisq(((1:n)-0.5)/n,df=3)
f2<-summary(lm(sort(d)~chisq.quantiles))
plot(y=sort(d),x=chisq.quantiles,pch=19)
abline(f2$coefficients[1],f2$coefficients[2],col="red")
png("newchi.png")
plot(y=sort(d),x=chisq.quantiles,pch=19)
abline(f2$coefficients[1],f2$coefficients[2],col="red")
dev.off()
#Better, lets check the qq stats
# NewQQ Mood
xmood <- sort(newTest[,1])
n <- length(xmood)
problevels <- ((1:n)-0.5)/n
q <- qnorm(problevels)
plot(q,xmood,pch=19)
png("newmoodq.png")
plot(q,xmood,pch=19)
dev.off()
q.bar <- mean(q)
x.bar <- mean(xmood)
critval95 <- 0.9913 # from table 4.2 p. 181
(rqmood <- sum((xmood-x.bar)*(q-q.bar))/(sqrt(sum((xmood-x.bar)^2))*sqrt(sum((q-q.bar)^2))))
if(rqmood>critval95){
print("Its Normal Yo")
} else{
print("Nice try bro")
}
# New QQ Psycho
xpsych <- sort(newTest[,2])
n <- length(xpsych)
problevels <- ((1:n)-0.5)/n
q <- qnorm(problevels)
plot(q,xpsych,pch=19)
png("newpsychq.png")
plot(q,xpsych,pch=19)
dev.off()
q.bar <- mean(q)
x.bar <- mean(xpsych)
critval95 <- 0.9913 # from table 4.2 p. 181
(rqpsych <- sum((xpsych-x.bar)*(q-q.bar))/(sqrt(sum((xpsych-x.bar)^2))*sqrt(sum((q-q.bar)^2))))
if(rqpsych>critval95){
print("Its Normal Yo")
} else{
print("Nice try bro")
}
# New QQ Anti
xanti <- sort(newTest[,3])
n <- length(xanti)
problevels <- ((1:n)-0.5)/n
q <- qnorm(problevels)
plot(q,xanti,pch=19)
png("newantiq.png")
plot(q,xanti,pch=19)
dev.off()
q.bar <- mean(q)
x.bar <- mean(xanti)
critval95 <- 0.9913 # from table 4.2 p. 181
(rqanti <- sum((xanti-x.bar)*(q-q.bar))/(sqrt(sum((xanti-x.bar)^2))*sqrt(sum((q-q.bar)^2))))
if(rqanti>critval95){
print("Its Normal Yo")
} else{
print("Nice try bro")
}
#Psych and Anti still not quite normal. Look at power scaling
##Mood is good, alter Psych
l.lambda <- function(lambda,n,x){
if(lambda==0){
x.lambda <- log(x)
}
else {
x.lambda <- (x^lambda - 1)/lambda
}
return(
(-n/2)*log(
sum(
(x.lambda-mean(x.lambda))^2)
)+
(lambda-1)*sum(log(x)))
}
n <- nrow(as.matrix(newTest[,2]))
lambdas <- seq(from=-1,to=10,by=.01)
l.lambdas <- c()
for (i in 1:length(lambdas)){
l.lambdas[i] <- l.lambda(lambdas[i],n,newTest[,2])
}
plot(lambdas,l.lambdas)
(pstar=lambdas[which(l.lambdas==max(l.lambdas))]) # this is the transformation we should use
newPsych=(-1+newTest[,2]^pstar)/pstar
xpsych <- sort(newPsych)
n <- length(xpsych)
problevels <- ((1:n)-0.5)/n
q <- qnorm(problevels)
plot(q,xpsych,pch=19)
png("scalepq.png")
plot(q,xpsych,pch=19)
dev.off()
q.bar <- mean(q)
x.bar <- mean(xpsych)
critval95 <- 0.9913 # from table 4.2 p. 181
(rqpsych2 <- sum((xpsych-x.bar)*(q-q.bar))/(sqrt(sum((xpsych-x.bar)^2))*sqrt(sum((q-q.bar)^2))))
if(rqpsych2>critval95){
print("Its Normal Yo")
} else{
print("Nice try bro")
}
##Now its normal. Good.
##Now that psych is good, do anti
n <- nrow(as.matrix(newTest[,3]))
lambdas <- seq(from=-1,to=10,by=.01)
l.lambdas <- c()
for (i in 1:length(lambdas)){
l.lambdas[i] <- l.lambda(lambdas[i],n,newTest[,3])
}
plot(lambdas,l.lambdas)
(astar=lambdas[which(l.lambdas==max(l.lambdas))]) # this is the transformation we should use
newAnti = (-1+newTest[,3]^astar)/astar
xanti <- sort(newAnti)
n <- length(xanti)
problevels <- ((1:n)-0.5)/n
q <- qnorm(problevels)
plot(q,xanti,pch=19)
png("scaleaq.png")
plot(q,xanti,pch=19)
dev.off()
q.bar <- mean(q)
x.bar <- mean(xanti)
critval95 <- 0.9913 # from table 4.2 p. 181
(rqanti2 <- sum((xanti-x.bar)*(q-q.bar))/(sqrt(sum((xanti-x.bar)^2))*sqrt(sum((q-q.bar)^2))))
if(rqanti2>critval95){
print("Its Normal Yo")
} else{
print("Nice try bro")
}
#Its not quite perfect, but is as good as we can get (log is much worse)
############
#Recombine our best attempts at normalcy
bestTry = matrix(c(newTest[,1],newPsych, newAnti),ncol=3)
#Make the chi-square on this data
n <- nrow(bestTry)
p <- ncol(bestTry)
(xbar <- t(matrix(1,ncol=n) %*% bestTry)/n)
(D <- bestTry - matrix(1,nrow=n) %*% t(xbar))
(S <- (n-1)^(-1) * t(D)%*%D)
Sinv <- solve(S)
d <- c()
for (i in 1:length(bestTry[,1])){
d[i] <- (bestTry[i,]-t(xbar))%*%Sinv%*%t(bestTry[i,]-t(xbar))
}
chisq.quantiles <- qchisq(((1:n)-0.5)/n,df=3)
f3 <- summary(lm(sort(d)~chisq.quantiles))
plot(y=sort(d),x=chisq.quantiles,pch=19)
abline(f3$coefficients[1],f3$coefficients[2],col="red")
png("bestchi.png")
plot(y=sort(d),x=chisq.quantiles,pch=19)
abline(f3$coefficients[1],f3$coefficients[2],col="red")
dev.off()
#This seems to be the best in terms of linarity, so we continue.
## Now that the data is scaled again, we do our test.
n <- nrow(bestTry)
p <- ncol(bestTry)
alpha=.05
#Base null mu
mu0 <- matrix(c(75,75,75), ncol=1)
mu0[2] <-(-1+mu0[2]^pstar)/pstar #Transform to new scale
mu0[3] <-(-1+mu0[3]^astar)/astar # Transform to its new scale
#Do the test
(T2 <- n*t(xbar-mu0)%*%Sinv%*%(xbar-mu0))
(Tcrit <- p*(n-1)*qf(1-alpha,p,n-p)/(n-p))
if(T2>Tcrit){
print("Reject H0")
} else{
print("Fail to Reject")
}
################################################################
#4
rm(list=ls())
dat1 <- read.csv("./data/TTHM.csv",header=TRUE)
dat2 <- read.csv("./data/HAA5.csv",header=TRUE)
dat <- merge(dat1,dat2,by=names(dat1)[1],all=FALSE)
dat <- dat[,c(1:4,7)]
names(dat) <- c("Date","Year","Quarter","TTHM","HAA5")
head(dat)
#Histograms
hist(dat$TTHM)
png("thist.png")
hist(dat$TTHM)
dev.off()
hist(dat$HAA5)
png("hhist.png")
hist(dat$HAA5)
dev.off()
n <- nrow(dat)
#Xbar Charts
center <- mean(dat$TTHM)
UL <- center + 3*sd(dat$TTHM)
LL <- center - 3*sd(dat$TTHM)
plot(x=1:n,dat$TTHM,type="l",ylim=c(0,110),xlab="Observation Number",ylab="TTHM")
abline(h=center, col="green");abline(h=UL, col="red");abline(h=max(0,LL), col="red")
png("tthmxbar.png")
plot(x=1:n,dat$TTHM,type="l",ylim=c(0,110),xlab="Observation Number",ylab="TTHM")
abline(h=center, col="green");abline(h=UL, col="red");abline(h=max(0,LL), col="red")
dev.off()
center <- mean(dat$HAA5)
UL <- center + 3*sd(dat$HAA5)
LL <- center - 3*sd(dat$HAA5)
plot(x=1:n,dat$HAA5,type="l",ylim=c(0,110),xlab="Observation Number",ylab="HAA5")
abline(h=center, col="green");abline(h=UL, col="red");abline(h=max(0,LL), col="red")
png("haa5xbar.png")
plot(x=1:n,dat$HAA5,type="l",ylim=c(0,110),xlab="Observation Number",ylab="HAA5")
abline(h=center, col="green");abline(h=UL, col="red");abline(h=max(0,LL), col="red")
dev.off()
library(plotrix)
#Ellipse Chart
X <- as.matrix(dat[,4:5])
n <- nrow(X)
p <- ncol(X)
X.mean <- t(matrix(1,ncol=n) %*% X)/n
D <- X - matrix(1,nrow=n) %*% t(X.mean)
S <- (n-1)^(-1) * t(D)%*%D
alpha <- 0.01
c2 <- qchisq(1-alpha,df=2)
angle <- atan(eigen(S)$vectors[2,1]/eigen(S)$vectors[1,1]) # sohcahtoa
plot(0,pch='',ylab='X_2',xlab='X_1',xlim=c(-5,100),ylim=c(-5,90))
points(X)
lengths <- c(sqrt(c2*eigen(S)$values[1]),
sqrt(c2*eigen(S)$values[2]))
draw.ellipse(x=X.mean[1,1],y=X.mean[2,1],a=lengths[1],b=lengths[2],angle=angle,deg=FALSE)
png("waterelip.png")
plot(0,pch='',ylab='HAA5',xlab='TTHM',xlim=c(-5,100),ylim=c(-5,90))
points(X)
lengths <- c(sqrt(c2*eigen(S)$values[1]),
sqrt(c2*eigen(S)$values[2]))
draw.ellipse(x=X.mean[1,1],y=X.mean[2,1],a=lengths[1],b=lengths[2],angle=angle,deg=FALSE)
dev.off()
Sinv <- solve(S)
d <- c()
for (i in 1:length(X[,1])){
d[i] <- (X[i,]-t(X.mean))%*%Sinv%*%t(X[i,]-t(X.mean))
}
#Chi Cutoffs
UCL95 <- qchisq(.95,df=p)
UCL99 <- qchisq(.99,df=p)
#For The In Spec Data Points
#################################################################
plot(x=1:n,y=d[1:n],ylim=c(0,max(UCL99,ceiling(max(d[1:n])))),main="In Spec T2", xlab="Sample",ylab="T2")
abline(h=UCL95,lty=3, col='blue')
abline(h=UCL99, lty=1, col='red')
png("watert.png")
plot(x=1:n,y=d[1:n],ylim=c(0,max(UCL99,ceiling(max(d[1:n])))),main="In Spec T2",xlab="Sample",ylab="T2")
abline(h=UCL95,lty=3, col='blue')
abline(h=UCL99, lty=1, col='red')
dev.off()
n <- nrow(dat)
p <- 2
alpha=.05
#Base null mu
mu0 <- matrix(c(80,60), ncol=1)
#Do the test
(T2 <- n*t(X.mean-mu0)%*%Sinv%*%(X.mean-mu0))
(Tcrit <- p*(n-1)*qf(1-alpha,p,n-p)/(n-p))
if(T2>Tcrit){
print("Reject H0")
} else{
print("Fail to Reject")
}
|
# Install packages
library(httr)
library(jsonlite)
library(lubridate)
library(magrittr)
library(stringr)
library(tidyverse)
options(stringsAsFactors = FALSE)
### Retrieve data from first event page
print("retrieve data from the 1st page for cocktails events")
# initiate a tracker to log the work activities
tracker <- c()
# record the time when this batch of work began
current_time <- as.character(Sys.time())
tracker <- c(tracker, current_time)
print(current_time)
# set API path with endpoint to get event about vegan foods
call_1 <- paste("https://www.eventbriteapi.com/v3/events/search/?q=cocktail&location.address=new+york+city&token=", "H5ZTGFKD2N3DJCLBGIQX", sep="")
get_events <- GET(call_1)
get_events_text <- content(get_events, "text")
get_events_json <- fromJSON(get_events_text, flatten = TRUE)
first_events_df <- as.data.frame(get_events_json)# the first dataframe to be rbind-ed
tracker <- c(tracker, "page_1")
### Specify interested columns in the dataframe
select_col <- c("pagination.page_number", "pagination.page_size", "pagination.page_count", "events.id", "events.url", "events.organization_id", "events.created", "events.changed", "events.published", "events.status", "events.currency", "events.listed", "events.shareable", "events.online_event", "events.tx_time_limit", "events.locale", "events.is_series", "events.is_series_parent", "events.inventory_type", "events.is_reserved_seating", "events.show_pick_a_seat", "events.show_colors_in_seatmap_thumbnail", "events.source", "events.is_free", "events.summary", "events.organizer_id", "events.venue_id", "events.category_id", "events.subcategory_id", "events.format_id", "events.is_externally_ticketed", "events.name.text", "events.description.text", "events.end.timezone", "events.end.utc", "events.end.utc", "events.logo.original.width", "events.logo.original.height", "location.latitude", "location.longitude", "location.augmented_location.country", "location.address")
### Loop through remaining pages and store information in the current iteration
# subset dataframe with interested columns
first_events_df <- first_events_df[ , select_col]
# get the total number of pages in this daily interation
pages <- get_events_json$pagination$page_count
# loop through the rest of the page
for(i in 2:pages){
call_next <- paste("https://www.eventbriteapi.com/v3/events/search/?q=cocktail&location.address=new+york+city&token=", "H5ZTGFKD2N3DJCLBGIQX", "&page=", i, sep="")
get_events_next <- GET(call_next)
get_events_text_next <- content(get_events_next, "text")
get_events_json_next <- fromJSON(get_events_text_next, flatten = TRUE)
get_events_df_next <- as.data.frame(get_events_json_next)
get_events_df_next <- get_events_df_next[ , select_col]
get_events_df <- rbind(first_events_df, get_events_df_next)
tracker <- c(tracker, paste("page_", i))
}
### Romove duplicated events & store csv files
# remove duplicated events by event ID
df_clean <- get_events_df[!duplicated(get_events_df$events.id), ]
# change event name to lower case
df_clean$events.name.text <- tolower(df_clean$events.name.text)
# sanity check: make sure retrieved events have the keyword 'beer'
df_clean <- df_clean[grep("cocktail", df_clean$events.description.text),]
df_clean['label'] = 'cocktail'
# create dataframe names
current_dataframe_name <- paste("df_cocktail", current_time, ".csv", sep = "")
# save csv files
write.csv(df_clean, current_dataframe_name)
tracker_name <- paste("cocktail tracker", current_time, ".txt", sep = "")
write.table(tracker, file = tracker_name, sep = " ", row.names = TRUE, col.names = NA)
print("cocktail events all done!!")
|
/eventbrite_data_event_analysis/eventbrite-api-tracker&script/cocktails.R
|
no_license
|
eddiecylin/Data-science-projects
|
R
| false
| false
| 3,652
|
r
|
# Install packages
library(httr)
library(jsonlite)
library(lubridate)
library(magrittr)
library(stringr)
library(tidyverse)
options(stringsAsFactors = FALSE)
### Retrieve data from first event page
print("retrieve data from the 1st page for cocktails events")
# initiate a tracker to log the work activities
tracker <- c()
# record the time when this batch of work began
current_time <- as.character(Sys.time())
tracker <- c(tracker, current_time)
print(current_time)
# set API path with endpoint to get event about vegan foods
call_1 <- paste("https://www.eventbriteapi.com/v3/events/search/?q=cocktail&location.address=new+york+city&token=", "H5ZTGFKD2N3DJCLBGIQX", sep="")
get_events <- GET(call_1)
get_events_text <- content(get_events, "text")
get_events_json <- fromJSON(get_events_text, flatten = TRUE)
first_events_df <- as.data.frame(get_events_json)# the first dataframe to be rbind-ed
tracker <- c(tracker, "page_1")
### Specify interested columns in the dataframe
select_col <- c("pagination.page_number", "pagination.page_size", "pagination.page_count", "events.id", "events.url", "events.organization_id", "events.created", "events.changed", "events.published", "events.status", "events.currency", "events.listed", "events.shareable", "events.online_event", "events.tx_time_limit", "events.locale", "events.is_series", "events.is_series_parent", "events.inventory_type", "events.is_reserved_seating", "events.show_pick_a_seat", "events.show_colors_in_seatmap_thumbnail", "events.source", "events.is_free", "events.summary", "events.organizer_id", "events.venue_id", "events.category_id", "events.subcategory_id", "events.format_id", "events.is_externally_ticketed", "events.name.text", "events.description.text", "events.end.timezone", "events.end.utc", "events.end.utc", "events.logo.original.width", "events.logo.original.height", "location.latitude", "location.longitude", "location.augmented_location.country", "location.address")
### Loop through remaining pages and store information in the current iteration
# subset dataframe with interested columns
first_events_df <- first_events_df[ , select_col]
# get the total number of pages in this daily interation
pages <- get_events_json$pagination$page_count
# loop through the rest of the page
for(i in 2:pages){
call_next <- paste("https://www.eventbriteapi.com/v3/events/search/?q=cocktail&location.address=new+york+city&token=", "H5ZTGFKD2N3DJCLBGIQX", "&page=", i, sep="")
get_events_next <- GET(call_next)
get_events_text_next <- content(get_events_next, "text")
get_events_json_next <- fromJSON(get_events_text_next, flatten = TRUE)
get_events_df_next <- as.data.frame(get_events_json_next)
get_events_df_next <- get_events_df_next[ , select_col]
get_events_df <- rbind(first_events_df, get_events_df_next)
tracker <- c(tracker, paste("page_", i))
}
### Romove duplicated events & store csv files
# remove duplicated events by event ID
df_clean <- get_events_df[!duplicated(get_events_df$events.id), ]
# change event name to lower case
df_clean$events.name.text <- tolower(df_clean$events.name.text)
# sanity check: make sure retrieved events have the keyword 'beer'
df_clean <- df_clean[grep("cocktail", df_clean$events.description.text),]
df_clean['label'] = 'cocktail'
# create dataframe names
current_dataframe_name <- paste("df_cocktail", current_time, ".csv", sep = "")
# save csv files
write.csv(df_clean, current_dataframe_name)
tracker_name <- paste("cocktail tracker", current_time, ".txt", sep = "")
write.table(tracker, file = tracker_name, sep = " ", row.names = TRUE, col.names = NA)
print("cocktail events all done!!")
|
#!/usr/bin/env R
library(dplyr)
library(tibble)
library(Seurat); s_o <- readRDS('s_o.clustered.RDS')
s_o <- RunTSNE(s_o, dims.use = 1:45, max_iter=20000, perplexity=5, check_dupli
cates = FALSE, theta=0.05)
s_o <- FindNeighbors(s_o, dims = 1:45)
s_o <- FindClusters(s_o, resolution = 12)
pdf('tsne.pdf', width=8, height=4)
DimPlot(s_o, reduction = "tsne", label=T)
dev.off()
pdf('tsne-features.pdf', width=8, height=4)
FeaturePlot(s_o, features = c("SoxN", "D", "vnd", "wor", "pros"))
dev.off()
saveRDS(s_o, 's_o.bythebook.RDS')
s_o.markers <- FindAllMarkers(s_o, only.pos = TRUE, min.pct = 0.25, logfc.threshold = 0.25)
write.table(s_o.markers, 'so_markers.bythebook.tsv', sep='\t')
write.csv(s_o.markers, 'ClusterMap/allen.markers.csv')
pdf('violin_bythebook.pdf')
RidgePlot(s_o, features = c("SoxN", "D"), ncol = 2)
dev.off()
s_o <- RunUMAP(s_o, dims = 1:45)
pdf('umap.pdf', width=8, height=4)
DimPlot(s_o, reduction = 'umap', label=T) + NoLegend()
dev.off()
pdf('umap-features.pdf', width=8, height=8)
FeaturePlot(s_o, features=c("SoxN", "D", "pros"), ncol=2)
dev.off()
saveRDS(s_o, 's_o.bythebook.RDS')
|
/Computation/Rustbucket Analyses/ekd40_tmp/02_bythebook.R
|
permissive
|
edridgedsouza/mphil-thesis
|
R
| false
| false
| 1,118
|
r
|
#!/usr/bin/env R
library(dplyr)
library(tibble)
library(Seurat); s_o <- readRDS('s_o.clustered.RDS')
s_o <- RunTSNE(s_o, dims.use = 1:45, max_iter=20000, perplexity=5, check_dupli
cates = FALSE, theta=0.05)
s_o <- FindNeighbors(s_o, dims = 1:45)
s_o <- FindClusters(s_o, resolution = 12)
pdf('tsne.pdf', width=8, height=4)
DimPlot(s_o, reduction = "tsne", label=T)
dev.off()
pdf('tsne-features.pdf', width=8, height=4)
FeaturePlot(s_o, features = c("SoxN", "D", "vnd", "wor", "pros"))
dev.off()
saveRDS(s_o, 's_o.bythebook.RDS')
s_o.markers <- FindAllMarkers(s_o, only.pos = TRUE, min.pct = 0.25, logfc.threshold = 0.25)
write.table(s_o.markers, 'so_markers.bythebook.tsv', sep='\t')
write.csv(s_o.markers, 'ClusterMap/allen.markers.csv')
pdf('violin_bythebook.pdf')
RidgePlot(s_o, features = c("SoxN", "D"), ncol = 2)
dev.off()
s_o <- RunUMAP(s_o, dims = 1:45)
pdf('umap.pdf', width=8, height=4)
DimPlot(s_o, reduction = 'umap', label=T) + NoLegend()
dev.off()
pdf('umap-features.pdf', width=8, height=8)
FeaturePlot(s_o, features=c("SoxN", "D", "pros"), ncol=2)
dev.off()
saveRDS(s_o, 's_o.bythebook.RDS')
|
polyvars <- function(X, degr = 2, raw = FALSE)
{
# A function computing polynomials of vectors within a matrix.
# Contrary to function poly() on which it is based, this function
# only computes polynomials separately for each vector of the provided matrix,
# e.g. x, x^2, x^3, and not combinations such as xy, x^2y and so on.
#
# Author: Daniel Borcard, December 2014, March 2017
# License: GPL2
#
# Usage
# -----
# polymatrix(X = rawdatamatrix, degr = 3, raw = FALSE)
#
# Arguments
# ---------
# X: a matrix or data frame containing quantitative variables
#
# degr: the degree to which the variables must be raised. Default: 2
#
# raw: logical; if TRUE raw polynomials are computed directly from
# the raw variables. If FALSE (default), orthogonal polynomials
# are computed.
#
# Value
# -----
# A data frame containing the polynomials. In the output matrix, each
# variable appears in turn, followed by its polynomial terms, e.g.
# v1, v2_square, v2, v2_square, and so on.
#
# Details
# -------
# When raw = FALSE, the function computes orthogonal polynomial terms
# of each variable separately. This means that in the resulting matrix
# the polynomial terms of each variable are orthogonal, but that they
# are not orthogonal to the terms of the other variables.
class.verif <- apply(X, 2, class)
if (any(class.verif == "factor") | any(class.verif == "character") == TRUE)
stop("No factor or character variables allowed.", call. = FALSE)
## Store or assign variable names
if(!is.null(colnames(X)))
{
var.names <- colnames(X)
}
else
{
var.names <- paste("v", 1 : ncol(X), sep = "")
}
## Compute polynomial terms
X.poly <- matrix(0, nrow(X), ncol(X) * degr)
for(i in 0: (ncol(X) - 1))
{
toto <- poly(X[, (i + 1)], degr, raw=raw)
X.poly[,(i * degr + 1) : ((i + 1) * degr)] <- toto
}
if((ncol(X) * degr) > (nrow(X) - 1) )
{
cat("\n------------------------------------------------------------------")
cat("\nWARNING: the number of polynomial terms is equal to or larger than")
cat("\nthe number of observations.")
cat("\n------------------------------------------------------------------\n")
}
## Create new column names
indices <- rep(1 : degr, ncol(X))
tmp <- vector(length = ncol(X.poly))
for(j in 1 : ncol(X))
{
tmp[(j * degr - degr + 1) : (j * degr)] <- rep(var.names[j], degr)
}
var.poly <- paste(tmp, indices, sep = ".")
colnames(X.poly) <- var.poly
X.poly.df <- as.data.frame(X.poly)
X.poly.df
}
## Examples
## Construction of a fictitious matrix of 5 observations and 4 variables:
# env <- matrix(1:20, 5)
## Computation of orthogonal polynomials of degree 3:
# env.ortho.deg3 <- polymatrix(env, degr = 3)
## Computation of a matrix of raw polynomials of degree 4:
# env.raw.deg4 <- polymatrix(env, degr = 4, raw = TRUE)
|
/Functions/polyvars.R
|
no_license
|
2015qyliang/Numerical_Ecology_with_R-Second
|
R
| false
| false
| 2,816
|
r
|
polyvars <- function(X, degr = 2, raw = FALSE)
{
# A function computing polynomials of vectors within a matrix.
# Contrary to function poly() on which it is based, this function
# only computes polynomials separately for each vector of the provided matrix,
# e.g. x, x^2, x^3, and not combinations such as xy, x^2y and so on.
#
# Author: Daniel Borcard, December 2014, March 2017
# License: GPL2
#
# Usage
# -----
# polymatrix(X = rawdatamatrix, degr = 3, raw = FALSE)
#
# Arguments
# ---------
# X: a matrix or data frame containing quantitative variables
#
# degr: the degree to which the variables must be raised. Default: 2
#
# raw: logical; if TRUE raw polynomials are computed directly from
# the raw variables. If FALSE (default), orthogonal polynomials
# are computed.
#
# Value
# -----
# A data frame containing the polynomials. In the output matrix, each
# variable appears in turn, followed by its polynomial terms, e.g.
# v1, v2_square, v2, v2_square, and so on.
#
# Details
# -------
# When raw = FALSE, the function computes orthogonal polynomial terms
# of each variable separately. This means that in the resulting matrix
# the polynomial terms of each variable are orthogonal, but that they
# are not orthogonal to the terms of the other variables.
class.verif <- apply(X, 2, class)
if (any(class.verif == "factor") | any(class.verif == "character") == TRUE)
stop("No factor or character variables allowed.", call. = FALSE)
## Store or assign variable names
if(!is.null(colnames(X)))
{
var.names <- colnames(X)
}
else
{
var.names <- paste("v", 1 : ncol(X), sep = "")
}
## Compute polynomial terms
X.poly <- matrix(0, nrow(X), ncol(X) * degr)
for(i in 0: (ncol(X) - 1))
{
toto <- poly(X[, (i + 1)], degr, raw=raw)
X.poly[,(i * degr + 1) : ((i + 1) * degr)] <- toto
}
if((ncol(X) * degr) > (nrow(X) - 1) )
{
cat("\n------------------------------------------------------------------")
cat("\nWARNING: the number of polynomial terms is equal to or larger than")
cat("\nthe number of observations.")
cat("\n------------------------------------------------------------------\n")
}
## Create new column names
indices <- rep(1 : degr, ncol(X))
tmp <- vector(length = ncol(X.poly))
for(j in 1 : ncol(X))
{
tmp[(j * degr - degr + 1) : (j * degr)] <- rep(var.names[j], degr)
}
var.poly <- paste(tmp, indices, sep = ".")
colnames(X.poly) <- var.poly
X.poly.df <- as.data.frame(X.poly)
X.poly.df
}
## Examples
## Construction of a fictitious matrix of 5 observations and 4 variables:
# env <- matrix(1:20, 5)
## Computation of orthogonal polynomials of degree 3:
# env.ortho.deg3 <- polymatrix(env, degr = 3)
## Computation of a matrix of raw polynomials of degree 4:
# env.raw.deg4 <- polymatrix(env, degr = 4, raw = TRUE)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/exa.readData.R
\name{exa.readData}
\alias{exa.readData}
\title{Low level method to send a query to EXASOL and retrieve a result set.}
\usage{
exa.readData(con, sql, ...)
}
\arguments{
\item{con}{The host's IP or DNS name to connect to.}
\item{sql}{The EXASOL user password; default: 'exasol'.}
\item{...}{Further options to be passed to `as.data.frame()`.}
}
\value{
A connection object.
}
\description{
The function sends an SQL statement via an open connection to an EXASOL DB
and retrieves a result set. The result set is at C++ level fetched via JSON over Websockets,
parsed and handed over to R as a list of vectors, that is then converted to a data.frame.
}
\examples{
\dontrun{
library(exasol6)
con <- exa.connect("192.168.137.8", 8563, "R", "sys", "exasol")
exa.readData(con, "SELECT CURRENT_TIMESTAMP")
df <- exa.readData(con, "SELECT * FROM R.FLIGHTS LIMIT 100",
row.names = c(1:100),
stringsAsFactors=FALSE)
}
}
\author{
Marcel Boldt <marcel.boldt@exasol.com>
}
|
/man/exa.readData.Rd
|
permissive
|
marcelboldt/r-exasol6
|
R
| false
| true
| 1,117
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/exa.readData.R
\name{exa.readData}
\alias{exa.readData}
\title{Low level method to send a query to EXASOL and retrieve a result set.}
\usage{
exa.readData(con, sql, ...)
}
\arguments{
\item{con}{The host's IP or DNS name to connect to.}
\item{sql}{The EXASOL user password; default: 'exasol'.}
\item{...}{Further options to be passed to `as.data.frame()`.}
}
\value{
A connection object.
}
\description{
The function sends an SQL statement via an open connection to an EXASOL DB
and retrieves a result set. The result set is at C++ level fetched via JSON over Websockets,
parsed and handed over to R as a list of vectors, that is then converted to a data.frame.
}
\examples{
\dontrun{
library(exasol6)
con <- exa.connect("192.168.137.8", 8563, "R", "sys", "exasol")
exa.readData(con, "SELECT CURRENT_TIMESTAMP")
df <- exa.readData(con, "SELECT * FROM R.FLIGHTS LIMIT 100",
row.names = c(1:100),
stringsAsFactors=FALSE)
}
}
\author{
Marcel Boldt <marcel.boldt@exasol.com>
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bin_cumulative.R
\name{bin_cumulative}
\alias{bin_cumulative}
\title{Binomial Cumulative}
\usage{
bin_cumulative(trials, prob)
}
\arguments{
\item{trials}{Number of trials (numeric)}
\item{prob}{Probability of successs (real)}
}
\value{
Returns a data.frame of the number of successes and their probabilities as well as the cumulative probability up to a certain number of successes
}
\description{
Computes the binomial distribution of a certain number of success over a given number of trials as well as the cumulative probability up to that number of successes
}
\examples{
dis2 <- bin_cumulative(trials = 5, prob = 0.5)
dis2
1 0 0.03125 0.03125
2 1 0.15625 0.18750
3 2 0.31250 0.50000
4 3 0.31250 0.81250
5 4 0.15625 0.96875
6 5 0.03125 1.00000
plot(dis2)
Returns a plot of the cumulative probability versus number of successess
}
|
/binomial/man/bin_cumulative.Rd
|
no_license
|
stat133-sp19/hw-stat133-TheGoldenKyle
|
R
| false
| true
| 993
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bin_cumulative.R
\name{bin_cumulative}
\alias{bin_cumulative}
\title{Binomial Cumulative}
\usage{
bin_cumulative(trials, prob)
}
\arguments{
\item{trials}{Number of trials (numeric)}
\item{prob}{Probability of successs (real)}
}
\value{
Returns a data.frame of the number of successes and their probabilities as well as the cumulative probability up to a certain number of successes
}
\description{
Computes the binomial distribution of a certain number of success over a given number of trials as well as the cumulative probability up to that number of successes
}
\examples{
dis2 <- bin_cumulative(trials = 5, prob = 0.5)
dis2
1 0 0.03125 0.03125
2 1 0.15625 0.18750
3 2 0.31250 0.50000
4 3 0.31250 0.81250
5 4 0.15625 0.96875
6 5 0.03125 1.00000
plot(dis2)
Returns a plot of the cumulative probability versus number of successess
}
|
#' Add/remove foreign keys
#'
#' @description `dm_add_fk()` marks the specified columns as the foreign key of table `table` with
#' respect to the primary key of table `ref_table`.
#' If `check == TRUE`, then it will first check if the values in columns `columns` are a subset
#' of the values of the primary key in table `ref_table`.
#'
#' @section Compound keys:
#'
#' Currently, keys consisting of more than one column are not supported.
#' [This feature](https://github.com/krlmlr/dm/issues/3) is planned for dm 0.2.0.
#' The syntax of these functions will be extended but will remain compatible
#' with current semantics.
#'
#' @inheritParams dm_add_pk
#' @param columns For `dm_add_fk()`: The columns of `table` which are to become the foreign key columns that
#' reference the primary key of `ref_table`.
#'
#' For `dm_rm_fk()`: The columns of `table` that should no longer be referencing the primary key of `ref_table`.
#' If `NULL`, all columns will be evaluated.
#' @param ref_table For `dm_add_fk()`: The table which `table` will be referencing.
#' This table needs to have a primary key set.
#'
#' For `dm_rm_fk()`: The table that `table` is referencing.
#' @param check Boolean, if `TRUE`, a check will be performed to determine if the values of
#' `column` are a subset of the values of the primary key column of `ref_table`.
#'
#' @family foreign key functions
#'
#' @rdname dm_add_fk
#'
#' @return For `dm_add_fk()`: An updated `dm` with an additional foreign key relation.
#'
#' @export
#' @examples
#' if (rlang::is_installed("nycflights13")) {
#' nycflights_dm <- dm(
#' planes = nycflights13::planes,
#' flights = nycflights13::flights
#' )
#' } else {
#' message("Using mock-up data, install the nycflights13 package to fix.")
#' nycflights_dm <- dm(
#' planes = tibble(tailnum = character()),
#' flights = tibble(tailnum = character())
#' )
#' }
#'
#' nycflights_dm %>%
#' dm_draw()
#'
#' nycflights_dm %>%
#' dm_add_pk(planes, tailnum) %>%
#' dm_add_fk(flights, tailnum, planes) %>%
#' dm_draw()
dm_add_fk <- function(dm, table, columns, ref_table, check = FALSE) {
check_not_zoomed(dm)
table_name <- as_name(ensym(table))
ref_table_name <- as_name(ensym(ref_table))
check_correct_input(dm, c(table_name, ref_table_name), 2L)
column_name <- as_name(ensym(columns))
check_col_input(dm, table_name, column_name)
ref_column_name <- dm_get_pk_impl(dm, ref_table_name)
if (is_empty(ref_column_name)) {
abort_ref_tbl_has_no_pk(ref_table_name)
}
if (check) {
tbl_obj <- dm_get_tables(dm)[[table_name]]
ref_tbl_obj <- dm_get_tables(dm)[[ref_table_name]]
if (!is_subset(tbl_obj, !!column_name, ref_tbl_obj, !!ref_column_name)) {
abort_not_subset_of(table_name, column_name, ref_table_name, ref_column_name)
}
}
dm_add_fk_impl(dm, table_name, column_name, ref_table_name)
}
dm_add_fk_impl <- function(dm, table, column, ref_table) {
def <- dm_get_def(dm)
i <- which(def$table == ref_table)
fks <- def$fks[[i]]
existing <- fks$table == table & !is.na(vctrs::vec_match(fks$column, list(column)))
if (any(existing)) {
if (dm_is_strict_keys(dm)) {
abort_fk_exists(table, column, ref_table)
}
return(dm)
}
def$fks[[i]] <- vctrs::vec_rbind(
fks,
new_fk(table, list(column))
)
new_dm3(def)
}
#' Check if foreign keys exists
#'
#' `dm_has_fk()` checks if a foreign key reference exists between two tables in a `dm`.
#'
#' @inheritParams dm_add_fk
#' @param ref_table The table to be checked if it is referred to.
#'
#' @return A boolean value: `TRUE` if a reference from `table` to `ref_table` exists, `FALSE` otherwise.
#'
#' @family foreign key functions
#'
#' @export
#' @examples
#' dm_nycflights13() %>%
#' dm_has_fk(flights, airports)
#' dm_nycflights13() %>%
#' dm_has_fk(airports, flights)
dm_has_fk <- function(dm, table, ref_table) {
check_not_zoomed(dm)
dm_has_fk_impl(dm, as_name(ensym(table)), as_name(ensym(ref_table)))
}
dm_has_fk_impl <- function(dm, table_name, ref_table_name) {
has_length(dm_get_fk_impl(dm, table_name, ref_table_name))
}
#' Foreign key column names
#'
#' @description `dm_get_fk()` returns the names of the
#' columns marked as foreign key of table `table` with respect to table `ref_table` within a [`dm`] object.
#' If no foreign key is set between the tables, an empty character vector is returned.
#'
#' @section Compound keys:
#'
#' Currently, keys consisting of more than one column are not supported.
#' [This feature](https://github.com/krlmlr/dm/issues/3) is planned for dm 0.2.0.
#' Therefore the function may return vectors of length greater than one in the future.
#'
#' @inheritParams dm_has_fk
#' @param ref_table The table that is referenced from `table`.
#'
#' @family foreign key functions
#'
#' @return A list of character vectors with the column name(s) of `table`,
#' pointing to the primary key of `ref_table`.
#'
#' @export
#' @examples
#' dm_nycflights13() %>%
#' dm_get_fk(flights, airports)
#' dm_nycflights13(cycle = TRUE) %>%
#' dm_get_fk(flights, airports)
dm_get_fk <- function(dm, table, ref_table) {
check_not_zoomed(dm)
table_name <- as_name(ensym(table))
ref_table_name <- as_name(ensym(ref_table))
new_keys(dm_get_fk_impl(dm, table_name, ref_table_name))
}
dm_get_fk_impl <- function(dm, table_name, ref_table_name) {
check_correct_input(dm, c(table_name, ref_table_name), 2L)
fks <- dm_get_data_model_fks(dm)
fks$column[fks$table == table_name & fks$ref == ref_table_name]
}
#' Get foreign key constraints
#'
#' Get a summary of all foreign key relations in a [`dm`].
#'
#' @section Compound keys:
#'
#' Currently, keys consisting of more than one column are not supported.
#' [This feature](https://github.com/krlmlr/dm/issues/3) is planned for dm 0.2.0.
#' Therefore the `child_fk_cols` column may contain vectors of length greater than one.
#'
#' @return A tibble with the following columns:
#' \describe{
#' \item{`child_table`}{child table,}
#' \item{`child_fk_cols`}{foreign key column in child table,}
#' \item{`parent_table`}{parent table.}
#' }
#'
#' @inheritParams dm_has_fk
#'
#' @family foreign key functions
#'
#' @examples
#' dm_get_all_fks(dm_nycflights13())
#' @export
dm_get_all_fks <- function(dm) {
check_not_zoomed(dm)
dm_get_all_fks_impl(dm) %>%
mutate(child_fk_cols = new_keys(child_fk_cols))
}
dm_get_all_fks_impl <- function(dm) {
dm_get_data_model_fks(dm) %>%
select(child_table = table, child_fk_cols = column, parent_table = ref) %>%
arrange(child_table, child_fk_cols)
}
#' Remove the reference(s) from one [`dm`] table to another
#'
#' @description `dm_rm_fk()` can remove either one reference between two tables, or all references at once, if argument `columns = NULL`.
#' All arguments may be provided quoted or unquoted.
#'
#' @rdname dm_add_fk
#'
#' @family foreign key functions
#'
#' @return For `dm_rm_fk()`: An updated `dm` without the given foreign key relation.
#'
#' @export
#' @examples
#'
#' dm_nycflights13(cycle = TRUE) %>%
#' dm_rm_fk(flights, dest, airports) %>%
#' dm_draw()
dm_rm_fk <- function(dm, table, columns, ref_table) {
check_not_zoomed(dm)
column_quo <- enquo(columns)
if (quo_is_missing(column_quo)) {
abort_rm_fk_col_missing()
}
table_name <- as_name(ensym(table))
ref_table_name <- as_name(ensym(ref_table))
check_correct_input(dm, c(table_name, ref_table_name), 2L)
fk_cols <- dm_get_fk_impl(dm, table_name, ref_table_name)
if (is_empty(fk_cols)) {
# FIXME: Simplify, check is already done in dm_rm_fk_impl()
abort_is_not_fkc(table_name, fk_cols, ref_table_name)
}
if (quo_is_null(column_quo)) {
cols <- fk_cols
} else {
# FIXME: Add tidyselect support
cols <- as_name(ensym(columns))
}
dm_rm_fk_impl(dm, table_name, cols, ref_table_name)
}
dm_rm_fk_impl <- function(dm, table_name, cols, ref_table_name) {
# FIXME: compound keys
cols <- as.list(cols)
def <- dm_get_def(dm)
i <- which(def$table == ref_table_name)
fks <- def$fks[[i]]
ii <- fks$table != table_name | is.na(vctrs::vec_match(fks$column, cols))
if (all(ii)) {
abort_is_not_fkc(table_name, cols, ref_table_name)
}
fks <- fks[ii, ]
def$fks[[i]] <- fks
new_dm3(def)
}
#' Foreign key candidates
#'
#' @description \lifecycle{questioning}
#'
#' Determine which columns would be good candidates to be used as foreign keys of a table,
#' to reference the primary key column of another table of the [`dm`] object.
#'
#' @inheritParams dm_add_fk
#' @param table The table whose columns should be tested for suitability as foreign keys.
#' @param ref_table A table with a primary key.
#'
#' @details `dm_enum_fk_candidates()` first checks if `ref_table` has a primary key set,
#' if not, an error is thrown.
#'
#' If `ref_table` does have a primary key, then a join operation will be tried using
#' that key as the `by` argument of join() to match it to each column of `table`.
#' Attempting to join incompatible columns triggers an error.
#'
#' The outcome of the join operation determines the value of the `why` column in the result:
#'
#' - an empty value for a column of `table` that is a suitable foreign key candidate
#' - the count and percentage of missing matches for a column that is not suitable
#' - the error message triggered for unsuitable candidates that may include the types of mismatched columns
#'
#' @section Life cycle:
#' These functions are marked "questioning" because we are not yet sure about
#' the interface, in particular if we need both `dm_enum...()` and `enum...()`
#' variants.
#' Changing the interface later seems harmless because these functions are
#' most likely used interactively.
#'
#' @return A tibble with the following columns:
#' \describe{
#' \item{`columns`}{columns of `table`,}
#' \item{`candidate`}{boolean: are these columns a candidate for a foreign key,}
#' \item{`why`}{if not a candidate for a foreign key, explanation for for this.}
#' }
#'
#' @family foreign key functions
#'
#' @examples
#' dm_nycflights13() %>%
#' dm_enum_fk_candidates(flights, airports)
#'
#' dm_nycflights13() %>%
#' dm_zoom_to(flights) %>%
#' enum_fk_candidates(airports)
#' @export
dm_enum_fk_candidates <- function(dm, table, ref_table) {
check_not_zoomed(dm)
# FIXME: with "direct" filter maybe no check necessary: but do we want to check
# for tables retrieved with `tbl()` or with `dm_get_tables()[[table_name]]`
check_no_filter(dm)
table_name <- as_string(ensym(table))
ref_table_name <- as_string(ensym(ref_table))
check_correct_input(dm, c(table_name, ref_table_name), 2L)
ref_tbl_pk <- dm_get_pk_impl(dm, ref_table_name)
ref_tbl <- tbl(dm, ref_table_name)
tbl <- tbl(dm, table_name)
enum_fk_candidates_impl(table_name, tbl, ref_table_name, ref_tbl, ref_tbl_pk) %>%
rename(columns = column) %>%
mutate(columns = new_keys(columns))
}
#' @details `enum_fk_candidates()` works like `dm_enum_fk_candidates()` with the zoomed table as `table`.
#'
#' @rdname dm_enum_fk_candidates
#' @param zoomed_dm A `dm` with a zoomed table.
#' @export
enum_fk_candidates <- function(zoomed_dm, ref_table) {
check_zoomed(zoomed_dm)
check_no_filter(zoomed_dm)
table_name <- orig_name_zoomed(zoomed_dm)
ref_table_name <- as_string(ensym(ref_table))
check_correct_input(zoomed_dm, ref_table_name)
ref_tbl_pk <- dm_get_pk_impl(zoomed_dm, ref_table_name)
ref_tbl <- dm_get_tables_impl(zoomed_dm)[[ref_table_name]]
enum_fk_candidates_impl(table_name, get_zoomed_tbl(zoomed_dm), ref_table_name, ref_tbl, ref_tbl_pk) %>%
rename(columns = column) %>%
mutate(columns = new_keys(columns))
}
enum_fk_candidates_impl <- function(table_name, tbl, ref_table_name, ref_tbl, ref_tbl_pk) {
if (is_empty(ref_tbl_pk)) {
abort_ref_tbl_has_no_pk(ref_table_name)
}
tbl_colnames <- colnames(tbl)
tibble(
column = tbl_colnames,
why = map_chr(column, ~ check_fk(tbl, table_name, .x, ref_tbl, ref_table_name, ref_tbl_pk))
) %>%
mutate(candidate = ifelse(why == "", TRUE, FALSE)) %>%
select(column, candidate, why) %>%
mutate(arrange_col = as.integer(gsub("(^[0-9]*).*$", "\\1", why))) %>%
arrange(desc(candidate), arrange_col, column) %>%
select(-arrange_col)
}
check_fk <- function(t1, t1_name, colname, t2, t2_name, pk) {
t1_join <- t1 %>% select(value = !!sym(colname))
t2_join <- t2 %>%
select(value = !!sym(pk)) %>%
mutate(match = 1L)
res_tbl <- tryCatch(
left_join(t1_join, t2_join, by = "value") %>%
# if value is NULL, this also counts as a match -- consistent with fk semantics
mutate(mismatch_or_null = if_else(is.na(match), value, NULL)) %>%
safe_count(mismatch_or_null) %>%
ungroup() %>% # dbplyr problem?
mutate(n_mismatch = sum(if_else(is.na(mismatch_or_null), 0L, n), na.rm = TRUE)) %>%
mutate(n_total = sum(n, na.rm = TRUE)) %>%
arrange(desc(n)) %>%
filter(!is.na(mismatch_or_null)) %>%
head(MAX_COMMAS + 1L) %>%
collect(),
error = identity
)
# return error message if error occurred (possibly types didn't match etc.)
if (is_condition(res_tbl)) {
return(conditionMessage(res_tbl))
}
n_mismatch <- pull(head(res_tbl, 1), n_mismatch)
# return empty character if candidate
if (is_empty(n_mismatch)) {
return("")
}
# calculate percentage and compose detailed description for missing values
n_total <- pull(head(res_tbl, 1), n_total)
percentage_missing <- as.character(round((n_mismatch / n_total) * 100, 1))
vals_extended <- res_tbl %>%
mutate(num_mismatch = paste0(mismatch_or_null, " (", n, ")")) %>%
# FIXME: this fails on SQLite, why?
# mutate(num_mismatch = glue("{as.character(mismatch_or_null)} ({as.character(n)})")) %>%
pull()
vals_formatted <- commas(format(vals_extended, trim = TRUE, justify = "none"), capped = TRUE)
glue(
"{as.character(n_mismatch)} entries ({percentage_missing}%) of ",
"{tick(glue('{t1_name}${colname}'))} not in {tick(glue('{t2_name}${pk}'))}: {vals_formatted}"
)
}
# Errors ------------------------------------------------------------------
abort_fk_exists <- function(child_table_name, colnames, parent_table_name) {
abort(
error_txt_fk_exists(
child_table_name, colnames, parent_table_name
),
.subclass = dm_error_full("fk_exists")
)
}
error_txt_fk_exists <- function(child_table_name, colnames, parent_table_name) {
glue(
"({commas(tick(colnames))}) is alreay a foreign key of table ",
"{tick(child_table_name)} into table {tick(parent_table_name)}."
)
}
abort_is_not_fkc <- function(child_table_name, colnames,
parent_table_name) {
abort(
error_txt_is_not_fkc(
child_table_name, colnames, parent_table_name
),
.subclass = dm_error_full("is_not_fkc")
)
}
error_txt_is_not_fkc <- function(child_table_name, colnames,
parent_table_name) {
glue(
"({commas(tick(colnames))}) is not a foreign key of table ",
"{tick(child_table_name)} into table {tick(parent_table_name)}."
)
}
abort_rm_fk_col_missing <- function() {
abort(error_txt_rm_fk_col_missing(), .subclass = dm_error_full("rm_fk_col_missing"))
}
error_txt_rm_fk_col_missing <- function() {
"Parameter `columns` has to be set. Pass `NULL` for removing all references."
}
|
/R/foreign-keys.R
|
permissive
|
pat-s/dm
|
R
| false
| false
| 15,397
|
r
|
#' Add/remove foreign keys
#'
#' @description `dm_add_fk()` marks the specified columns as the foreign key of table `table` with
#' respect to the primary key of table `ref_table`.
#' If `check == TRUE`, then it will first check if the values in columns `columns` are a subset
#' of the values of the primary key in table `ref_table`.
#'
#' @section Compound keys:
#'
#' Currently, keys consisting of more than one column are not supported.
#' [This feature](https://github.com/krlmlr/dm/issues/3) is planned for dm 0.2.0.
#' The syntax of these functions will be extended but will remain compatible
#' with current semantics.
#'
#' @inheritParams dm_add_pk
#' @param columns For `dm_add_fk()`: The columns of `table` which are to become the foreign key columns that
#' reference the primary key of `ref_table`.
#'
#' For `dm_rm_fk()`: The columns of `table` that should no longer be referencing the primary key of `ref_table`.
#' If `NULL`, all columns will be evaluated.
#' @param ref_table For `dm_add_fk()`: The table which `table` will be referencing.
#' This table needs to have a primary key set.
#'
#' For `dm_rm_fk()`: The table that `table` is referencing.
#' @param check Boolean, if `TRUE`, a check will be performed to determine if the values of
#' `column` are a subset of the values of the primary key column of `ref_table`.
#'
#' @family foreign key functions
#'
#' @rdname dm_add_fk
#'
#' @return For `dm_add_fk()`: An updated `dm` with an additional foreign key relation.
#'
#' @export
#' @examples
#' if (rlang::is_installed("nycflights13")) {
#' nycflights_dm <- dm(
#' planes = nycflights13::planes,
#' flights = nycflights13::flights
#' )
#' } else {
#' message("Using mock-up data, install the nycflights13 package to fix.")
#' nycflights_dm <- dm(
#' planes = tibble(tailnum = character()),
#' flights = tibble(tailnum = character())
#' )
#' }
#'
#' nycflights_dm %>%
#' dm_draw()
#'
#' nycflights_dm %>%
#' dm_add_pk(planes, tailnum) %>%
#' dm_add_fk(flights, tailnum, planes) %>%
#' dm_draw()
dm_add_fk <- function(dm, table, columns, ref_table, check = FALSE) {
check_not_zoomed(dm)
table_name <- as_name(ensym(table))
ref_table_name <- as_name(ensym(ref_table))
check_correct_input(dm, c(table_name, ref_table_name), 2L)
column_name <- as_name(ensym(columns))
check_col_input(dm, table_name, column_name)
ref_column_name <- dm_get_pk_impl(dm, ref_table_name)
if (is_empty(ref_column_name)) {
abort_ref_tbl_has_no_pk(ref_table_name)
}
if (check) {
tbl_obj <- dm_get_tables(dm)[[table_name]]
ref_tbl_obj <- dm_get_tables(dm)[[ref_table_name]]
if (!is_subset(tbl_obj, !!column_name, ref_tbl_obj, !!ref_column_name)) {
abort_not_subset_of(table_name, column_name, ref_table_name, ref_column_name)
}
}
dm_add_fk_impl(dm, table_name, column_name, ref_table_name)
}
dm_add_fk_impl <- function(dm, table, column, ref_table) {
def <- dm_get_def(dm)
i <- which(def$table == ref_table)
fks <- def$fks[[i]]
existing <- fks$table == table & !is.na(vctrs::vec_match(fks$column, list(column)))
if (any(existing)) {
if (dm_is_strict_keys(dm)) {
abort_fk_exists(table, column, ref_table)
}
return(dm)
}
def$fks[[i]] <- vctrs::vec_rbind(
fks,
new_fk(table, list(column))
)
new_dm3(def)
}
#' Check if foreign keys exists
#'
#' `dm_has_fk()` checks if a foreign key reference exists between two tables in a `dm`.
#'
#' @inheritParams dm_add_fk
#' @param ref_table The table to be checked if it is referred to.
#'
#' @return A boolean value: `TRUE` if a reference from `table` to `ref_table` exists, `FALSE` otherwise.
#'
#' @family foreign key functions
#'
#' @export
#' @examples
#' dm_nycflights13() %>%
#' dm_has_fk(flights, airports)
#' dm_nycflights13() %>%
#' dm_has_fk(airports, flights)
dm_has_fk <- function(dm, table, ref_table) {
check_not_zoomed(dm)
dm_has_fk_impl(dm, as_name(ensym(table)), as_name(ensym(ref_table)))
}
dm_has_fk_impl <- function(dm, table_name, ref_table_name) {
has_length(dm_get_fk_impl(dm, table_name, ref_table_name))
}
#' Foreign key column names
#'
#' @description `dm_get_fk()` returns the names of the
#' columns marked as foreign key of table `table` with respect to table `ref_table` within a [`dm`] object.
#' If no foreign key is set between the tables, an empty character vector is returned.
#'
#' @section Compound keys:
#'
#' Currently, keys consisting of more than one column are not supported.
#' [This feature](https://github.com/krlmlr/dm/issues/3) is planned for dm 0.2.0.
#' Therefore the function may return vectors of length greater than one in the future.
#'
#' @inheritParams dm_has_fk
#' @param ref_table The table that is referenced from `table`.
#'
#' @family foreign key functions
#'
#' @return A list of character vectors with the column name(s) of `table`,
#' pointing to the primary key of `ref_table`.
#'
#' @export
#' @examples
#' dm_nycflights13() %>%
#' dm_get_fk(flights, airports)
#' dm_nycflights13(cycle = TRUE) %>%
#' dm_get_fk(flights, airports)
dm_get_fk <- function(dm, table, ref_table) {
check_not_zoomed(dm)
table_name <- as_name(ensym(table))
ref_table_name <- as_name(ensym(ref_table))
new_keys(dm_get_fk_impl(dm, table_name, ref_table_name))
}
dm_get_fk_impl <- function(dm, table_name, ref_table_name) {
check_correct_input(dm, c(table_name, ref_table_name), 2L)
fks <- dm_get_data_model_fks(dm)
fks$column[fks$table == table_name & fks$ref == ref_table_name]
}
#' Get foreign key constraints
#'
#' Get a summary of all foreign key relations in a [`dm`].
#'
#' @section Compound keys:
#'
#' Currently, keys consisting of more than one column are not supported.
#' [This feature](https://github.com/krlmlr/dm/issues/3) is planned for dm 0.2.0.
#' Therefore the `child_fk_cols` column may contain vectors of length greater than one.
#'
#' @return A tibble with the following columns:
#' \describe{
#' \item{`child_table`}{child table,}
#' \item{`child_fk_cols`}{foreign key column in child table,}
#' \item{`parent_table`}{parent table.}
#' }
#'
#' @inheritParams dm_has_fk
#'
#' @family foreign key functions
#'
#' @examples
#' dm_get_all_fks(dm_nycflights13())
#' @export
dm_get_all_fks <- function(dm) {
check_not_zoomed(dm)
dm_get_all_fks_impl(dm) %>%
mutate(child_fk_cols = new_keys(child_fk_cols))
}
dm_get_all_fks_impl <- function(dm) {
dm_get_data_model_fks(dm) %>%
select(child_table = table, child_fk_cols = column, parent_table = ref) %>%
arrange(child_table, child_fk_cols)
}
#' Remove the reference(s) from one [`dm`] table to another
#'
#' @description `dm_rm_fk()` can remove either one reference between two tables, or all references at once, if argument `columns = NULL`.
#' All arguments may be provided quoted or unquoted.
#'
#' @rdname dm_add_fk
#'
#' @family foreign key functions
#'
#' @return For `dm_rm_fk()`: An updated `dm` without the given foreign key relation.
#'
#' @export
#' @examples
#'
#' dm_nycflights13(cycle = TRUE) %>%
#' dm_rm_fk(flights, dest, airports) %>%
#' dm_draw()
dm_rm_fk <- function(dm, table, columns, ref_table) {
check_not_zoomed(dm)
column_quo <- enquo(columns)
if (quo_is_missing(column_quo)) {
abort_rm_fk_col_missing()
}
table_name <- as_name(ensym(table))
ref_table_name <- as_name(ensym(ref_table))
check_correct_input(dm, c(table_name, ref_table_name), 2L)
fk_cols <- dm_get_fk_impl(dm, table_name, ref_table_name)
if (is_empty(fk_cols)) {
# FIXME: Simplify, check is already done in dm_rm_fk_impl()
abort_is_not_fkc(table_name, fk_cols, ref_table_name)
}
if (quo_is_null(column_quo)) {
cols <- fk_cols
} else {
# FIXME: Add tidyselect support
cols <- as_name(ensym(columns))
}
dm_rm_fk_impl(dm, table_name, cols, ref_table_name)
}
dm_rm_fk_impl <- function(dm, table_name, cols, ref_table_name) {
# FIXME: compound keys
cols <- as.list(cols)
def <- dm_get_def(dm)
i <- which(def$table == ref_table_name)
fks <- def$fks[[i]]
ii <- fks$table != table_name | is.na(vctrs::vec_match(fks$column, cols))
if (all(ii)) {
abort_is_not_fkc(table_name, cols, ref_table_name)
}
fks <- fks[ii, ]
def$fks[[i]] <- fks
new_dm3(def)
}
#' Foreign key candidates
#'
#' @description \lifecycle{questioning}
#'
#' Determine which columns would be good candidates to be used as foreign keys of a table,
#' to reference the primary key column of another table of the [`dm`] object.
#'
#' @inheritParams dm_add_fk
#' @param table The table whose columns should be tested for suitability as foreign keys.
#' @param ref_table A table with a primary key.
#'
#' @details `dm_enum_fk_candidates()` first checks if `ref_table` has a primary key set,
#' if not, an error is thrown.
#'
#' If `ref_table` does have a primary key, then a join operation will be tried using
#' that key as the `by` argument of join() to match it to each column of `table`.
#' Attempting to join incompatible columns triggers an error.
#'
#' The outcome of the join operation determines the value of the `why` column in the result:
#'
#' - an empty value for a column of `table` that is a suitable foreign key candidate
#' - the count and percentage of missing matches for a column that is not suitable
#' - the error message triggered for unsuitable candidates that may include the types of mismatched columns
#'
#' @section Life cycle:
#' These functions are marked "questioning" because we are not yet sure about
#' the interface, in particular if we need both `dm_enum...()` and `enum...()`
#' variants.
#' Changing the interface later seems harmless because these functions are
#' most likely used interactively.
#'
#' @return A tibble with the following columns:
#' \describe{
#' \item{`columns`}{columns of `table`,}
#' \item{`candidate`}{boolean: are these columns a candidate for a foreign key,}
#' \item{`why`}{if not a candidate for a foreign key, explanation for for this.}
#' }
#'
#' @family foreign key functions
#'
#' @examples
#' dm_nycflights13() %>%
#' dm_enum_fk_candidates(flights, airports)
#'
#' dm_nycflights13() %>%
#' dm_zoom_to(flights) %>%
#' enum_fk_candidates(airports)
#' @export
dm_enum_fk_candidates <- function(dm, table, ref_table) {
check_not_zoomed(dm)
# FIXME: with "direct" filter maybe no check necessary: but do we want to check
# for tables retrieved with `tbl()` or with `dm_get_tables()[[table_name]]`
check_no_filter(dm)
table_name <- as_string(ensym(table))
ref_table_name <- as_string(ensym(ref_table))
check_correct_input(dm, c(table_name, ref_table_name), 2L)
ref_tbl_pk <- dm_get_pk_impl(dm, ref_table_name)
ref_tbl <- tbl(dm, ref_table_name)
tbl <- tbl(dm, table_name)
enum_fk_candidates_impl(table_name, tbl, ref_table_name, ref_tbl, ref_tbl_pk) %>%
rename(columns = column) %>%
mutate(columns = new_keys(columns))
}
#' @details `enum_fk_candidates()` works like `dm_enum_fk_candidates()` with the zoomed table as `table`.
#'
#' @rdname dm_enum_fk_candidates
#' @param zoomed_dm A `dm` with a zoomed table.
#' @export
enum_fk_candidates <- function(zoomed_dm, ref_table) {
check_zoomed(zoomed_dm)
check_no_filter(zoomed_dm)
table_name <- orig_name_zoomed(zoomed_dm)
ref_table_name <- as_string(ensym(ref_table))
check_correct_input(zoomed_dm, ref_table_name)
ref_tbl_pk <- dm_get_pk_impl(zoomed_dm, ref_table_name)
ref_tbl <- dm_get_tables_impl(zoomed_dm)[[ref_table_name]]
enum_fk_candidates_impl(table_name, get_zoomed_tbl(zoomed_dm), ref_table_name, ref_tbl, ref_tbl_pk) %>%
rename(columns = column) %>%
mutate(columns = new_keys(columns))
}
enum_fk_candidates_impl <- function(table_name, tbl, ref_table_name, ref_tbl, ref_tbl_pk) {
if (is_empty(ref_tbl_pk)) {
abort_ref_tbl_has_no_pk(ref_table_name)
}
tbl_colnames <- colnames(tbl)
tibble(
column = tbl_colnames,
why = map_chr(column, ~ check_fk(tbl, table_name, .x, ref_tbl, ref_table_name, ref_tbl_pk))
) %>%
mutate(candidate = ifelse(why == "", TRUE, FALSE)) %>%
select(column, candidate, why) %>%
mutate(arrange_col = as.integer(gsub("(^[0-9]*).*$", "\\1", why))) %>%
arrange(desc(candidate), arrange_col, column) %>%
select(-arrange_col)
}
check_fk <- function(t1, t1_name, colname, t2, t2_name, pk) {
t1_join <- t1 %>% select(value = !!sym(colname))
t2_join <- t2 %>%
select(value = !!sym(pk)) %>%
mutate(match = 1L)
res_tbl <- tryCatch(
left_join(t1_join, t2_join, by = "value") %>%
# if value is NULL, this also counts as a match -- consistent with fk semantics
mutate(mismatch_or_null = if_else(is.na(match), value, NULL)) %>%
safe_count(mismatch_or_null) %>%
ungroup() %>% # dbplyr problem?
mutate(n_mismatch = sum(if_else(is.na(mismatch_or_null), 0L, n), na.rm = TRUE)) %>%
mutate(n_total = sum(n, na.rm = TRUE)) %>%
arrange(desc(n)) %>%
filter(!is.na(mismatch_or_null)) %>%
head(MAX_COMMAS + 1L) %>%
collect(),
error = identity
)
# return error message if error occurred (possibly types didn't match etc.)
if (is_condition(res_tbl)) {
return(conditionMessage(res_tbl))
}
n_mismatch <- pull(head(res_tbl, 1), n_mismatch)
# return empty character if candidate
if (is_empty(n_mismatch)) {
return("")
}
# calculate percentage and compose detailed description for missing values
n_total <- pull(head(res_tbl, 1), n_total)
percentage_missing <- as.character(round((n_mismatch / n_total) * 100, 1))
vals_extended <- res_tbl %>%
mutate(num_mismatch = paste0(mismatch_or_null, " (", n, ")")) %>%
# FIXME: this fails on SQLite, why?
# mutate(num_mismatch = glue("{as.character(mismatch_or_null)} ({as.character(n)})")) %>%
pull()
vals_formatted <- commas(format(vals_extended, trim = TRUE, justify = "none"), capped = TRUE)
glue(
"{as.character(n_mismatch)} entries ({percentage_missing}%) of ",
"{tick(glue('{t1_name}${colname}'))} not in {tick(glue('{t2_name}${pk}'))}: {vals_formatted}"
)
}
# Errors ------------------------------------------------------------------
abort_fk_exists <- function(child_table_name, colnames, parent_table_name) {
abort(
error_txt_fk_exists(
child_table_name, colnames, parent_table_name
),
.subclass = dm_error_full("fk_exists")
)
}
error_txt_fk_exists <- function(child_table_name, colnames, parent_table_name) {
glue(
"({commas(tick(colnames))}) is alreay a foreign key of table ",
"{tick(child_table_name)} into table {tick(parent_table_name)}."
)
}
abort_is_not_fkc <- function(child_table_name, colnames,
parent_table_name) {
abort(
error_txt_is_not_fkc(
child_table_name, colnames, parent_table_name
),
.subclass = dm_error_full("is_not_fkc")
)
}
error_txt_is_not_fkc <- function(child_table_name, colnames,
parent_table_name) {
glue(
"({commas(tick(colnames))}) is not a foreign key of table ",
"{tick(child_table_name)} into table {tick(parent_table_name)}."
)
}
abort_rm_fk_col_missing <- function() {
abort(error_txt_rm_fk_col_missing(), .subclass = dm_error_full("rm_fk_col_missing"))
}
error_txt_rm_fk_col_missing <- function() {
"Parameter `columns` has to be set. Pass `NULL` for removing all references."
}
|
gen.U <- function(C,P,B=1000,mu,cutoff,pvalue=T,seed=1,phi)
{
lambda = mu[rep(1:P,C)] #uses promoter means
phi = phi[rep(1:P,C)]
size=1/phi
set.seed(seed)
if(sum(phi)>(1e-12)) Z = matrix(rnbinom(n=C*P*B,mu = lambda,size=size),nrow=C*P)
if(sum(phi)<(1e-12)) Z = matrix(rpois(C*P*B,lambda = lambda),nrow=C*P)
S = colSums(Z)
Pij = t(t(Z)/S)
c = matrix(NA,nrow=C,ncol=B)
for(k in 1:C) c[k,] = t(t(colSums(Z[seq(1+P*(k-1),P*k),]))/S)
r = matrix(NA,nrow=P,ncol=B)
for(k in 1:P) r[k,] = t(t(colSums(Z[seq(k,C*P-P+k,P),]))/S)
RR = r[rep(1:P,C),]
CC = c[rep(1:C,rep(P,C)),]
temp = Pij*log((RR*CC)/Pij)
temp[is.nan(temp)] = 0
U.num = colSums(temp)
#temp2 = c*log(c)
temp2 = r*log(r)
temp2[is.nan(temp2)] = 0
U.den = colSums(temp2)
U = U.num/U.den
U[is.nan(U)] = 0
U[U<0] = 0
U[U==Inf] = 0
if(!pvalue) return(U)
#if(cutoff==1) return(sum(U==1)/length(U))
if(sum(U)<(1e-12)) return(1) #sometimes always 0 U
xbar = mean(U,na.rm=T)
vbar = var(U,na.rm=T)
alpha = xbar*((xbar*(1-xbar))/vbar - 1)
beta = (1-xbar)*((xbar*(1-xbar))/vbar - 1)
if(alpha<0 | beta<0) return(xbar) #this happens if U is 0 or 1 only
epsilon = 1e6
if(cutoff==1) return(pbeta(q=(cutoff-epsilon),shape1=alpha,shape2=beta,lower.tail=F))
return(pbeta(q=cutoff,shape1=alpha,shape2=beta,lower.tail=F))
}
|
/R/gen.U.R
|
no_license
|
edimont/CAGExploreR
|
R
| false
| false
| 1,299
|
r
|
gen.U <- function(C,P,B=1000,mu,cutoff,pvalue=T,seed=1,phi)
{
lambda = mu[rep(1:P,C)] #uses promoter means
phi = phi[rep(1:P,C)]
size=1/phi
set.seed(seed)
if(sum(phi)>(1e-12)) Z = matrix(rnbinom(n=C*P*B,mu = lambda,size=size),nrow=C*P)
if(sum(phi)<(1e-12)) Z = matrix(rpois(C*P*B,lambda = lambda),nrow=C*P)
S = colSums(Z)
Pij = t(t(Z)/S)
c = matrix(NA,nrow=C,ncol=B)
for(k in 1:C) c[k,] = t(t(colSums(Z[seq(1+P*(k-1),P*k),]))/S)
r = matrix(NA,nrow=P,ncol=B)
for(k in 1:P) r[k,] = t(t(colSums(Z[seq(k,C*P-P+k,P),]))/S)
RR = r[rep(1:P,C),]
CC = c[rep(1:C,rep(P,C)),]
temp = Pij*log((RR*CC)/Pij)
temp[is.nan(temp)] = 0
U.num = colSums(temp)
#temp2 = c*log(c)
temp2 = r*log(r)
temp2[is.nan(temp2)] = 0
U.den = colSums(temp2)
U = U.num/U.den
U[is.nan(U)] = 0
U[U<0] = 0
U[U==Inf] = 0
if(!pvalue) return(U)
#if(cutoff==1) return(sum(U==1)/length(U))
if(sum(U)<(1e-12)) return(1) #sometimes always 0 U
xbar = mean(U,na.rm=T)
vbar = var(U,na.rm=T)
alpha = xbar*((xbar*(1-xbar))/vbar - 1)
beta = (1-xbar)*((xbar*(1-xbar))/vbar - 1)
if(alpha<0 | beta<0) return(xbar) #this happens if U is 0 or 1 only
epsilon = 1e6
if(cutoff==1) return(pbeta(q=(cutoff-epsilon),shape1=alpha,shape2=beta,lower.tail=F))
return(pbeta(q=cutoff,shape1=alpha,shape2=beta,lower.tail=F))
}
|
# This file contains code for:
# 1. Reading the data of Individual Household Electric Power Consumption Data Set from "household_power_consumption.txt"
# File URL: https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip
# 2. merge and convert Date and Time columns into one column "full_time".
# 3. subseet the data using only data from the dates 2007-02-01 and 2007-02-02.
# 4. producing line graph for "Date_full" and Global_actve_power"
# 5. and export the graph to plot2.png file.
# Load "household_power_consumption.txt" data into R
powerData <- read.table(file = "household_power_consumption.txt",
header = TRUE, sep = ";", na.strings = "?",
nrows = 2075259, comment.char = "")
# Merge and convert Date and Time columns into one column "full_time"
powerData$Date_full <- strptime(paste(powerData$Date, powerData$Time),
"%d/%m/%Y%H:%M:%S")
# Subseet powerData using only data from the dates 2007-02-01 and 2007-02-02.
powerDataSubset <- subset(powerData,
powerData$Date_full <= "2007-02-02 23:59:59" &
powerData$Date_full >= "2007-02-01 00:00:00",
select = c(-1, -2))
# Open graphic device (PNG file)
png(file = "plot2.png", height = 480, width = 480)
# Plot a line graph for "Date_full" and Global_actve_power"
plot(x = powerDataSubset$Date_full,
y = powerDataSubset$Global_active_power,
ylab = "Glocal Active Power (kilowatts)",
xlab = "",
type = "l")
# close graphic device (PNG file)
dev.off()
|
/plot2.R
|
no_license
|
7afiz/ExData_Plotting1
|
R
| false
| false
| 1,661
|
r
|
# This file contains code for:
# 1. Reading the data of Individual Household Electric Power Consumption Data Set from "household_power_consumption.txt"
# File URL: https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip
# 2. merge and convert Date and Time columns into one column "full_time".
# 3. subseet the data using only data from the dates 2007-02-01 and 2007-02-02.
# 4. producing line graph for "Date_full" and Global_actve_power"
# 5. and export the graph to plot2.png file.
# Load "household_power_consumption.txt" data into R
powerData <- read.table(file = "household_power_consumption.txt",
header = TRUE, sep = ";", na.strings = "?",
nrows = 2075259, comment.char = "")
# Merge and convert Date and Time columns into one column "full_time"
powerData$Date_full <- strptime(paste(powerData$Date, powerData$Time),
"%d/%m/%Y%H:%M:%S")
# Subseet powerData using only data from the dates 2007-02-01 and 2007-02-02.
powerDataSubset <- subset(powerData,
powerData$Date_full <= "2007-02-02 23:59:59" &
powerData$Date_full >= "2007-02-01 00:00:00",
select = c(-1, -2))
# Open graphic device (PNG file)
png(file = "plot2.png", height = 480, width = 480)
# Plot a line graph for "Date_full" and Global_actve_power"
plot(x = powerDataSubset$Date_full,
y = powerDataSubset$Global_active_power,
ylab = "Glocal Active Power (kilowatts)",
xlab = "",
type = "l")
# close graphic device (PNG file)
dev.off()
|
eigen.analysis<-function(A, zero=TRUE)
{
ev <- eigen(A)
# R sorts eigenvalues in decreasing order, according to Mod(values)
# ususally dominant eigenvalue is first (ev$values[1]), except for imprimitive matrices with d eigenvalues of equal modulus
# this should work for most cases
lmax <- which.max(Re(ev$values))
lambda <- Re(ev$values[lmax])
## Damping ratio. Use second eigenvalue
# dr<- lambda/abs(ev$values[2])
## OR second largest magnitude in case of ties using rle - round needed for imprimitive matrices
dr<-rle(round(Mod(ev$values), 5 ))$values
dr<-dr[1]/dr[2]
W <- ev$vectors
w <- abs(Re(W[, lmax]))
## check if matrix is singular-and output NAs rather than stop (better for loops and bootstrapping)
V <- try(Conj(solve(W)), silent=TRUE)
if (class(V) == "try-error") {
eigen.analysis <- list(lambda1 = lambda, stable.stage = w/sum(w),
sensitivities = A*NA, elasticities = A*NA, repro.value = w*NA,
damping.ratio = dr)
}
else{
v <- abs(Re(V[lmax, ]))
s <- v %o% w
if (zero) {
s[A == 0] <- 0
}
e <- s * A/lambda
x <- dimnames(A)
dimnames(s) <- x
names(w) <- x[[1]]
names(v) <- x[[1]]
eigen.analysis <- list(lambda1 = lambda, stable.stage = w/sum(w),
sensitivities = s, elasticities = e, repro.value = v/v[1],
damping.ratio = dr)
}
eigen.analysis
}
|
/R/eigen.analysis.R
|
no_license
|
kostask84/popbio
|
R
| false
| false
| 1,464
|
r
|
eigen.analysis<-function(A, zero=TRUE)
{
ev <- eigen(A)
# R sorts eigenvalues in decreasing order, according to Mod(values)
# ususally dominant eigenvalue is first (ev$values[1]), except for imprimitive matrices with d eigenvalues of equal modulus
# this should work for most cases
lmax <- which.max(Re(ev$values))
lambda <- Re(ev$values[lmax])
## Damping ratio. Use second eigenvalue
# dr<- lambda/abs(ev$values[2])
## OR second largest magnitude in case of ties using rle - round needed for imprimitive matrices
dr<-rle(round(Mod(ev$values), 5 ))$values
dr<-dr[1]/dr[2]
W <- ev$vectors
w <- abs(Re(W[, lmax]))
## check if matrix is singular-and output NAs rather than stop (better for loops and bootstrapping)
V <- try(Conj(solve(W)), silent=TRUE)
if (class(V) == "try-error") {
eigen.analysis <- list(lambda1 = lambda, stable.stage = w/sum(w),
sensitivities = A*NA, elasticities = A*NA, repro.value = w*NA,
damping.ratio = dr)
}
else{
v <- abs(Re(V[lmax, ]))
s <- v %o% w
if (zero) {
s[A == 0] <- 0
}
e <- s * A/lambda
x <- dimnames(A)
dimnames(s) <- x
names(w) <- x[[1]]
names(v) <- x[[1]]
eigen.analysis <- list(lambda1 = lambda, stable.stage = w/sum(w),
sensitivities = s, elasticities = e, repro.value = v/v[1],
damping.ratio = dr)
}
eigen.analysis
}
|
\name{print.character.table}
\alias{print.character.table}
\title{Prints a character.table object}
\usage{
\method{print}{character.table} (x, digits = 2,
percent = NULL, zero.replace = NULL, ...)
}
\arguments{
\item{x}{The character.table object}
\item{digits}{Integer values specifying the number of
digits to be printed.}
\item{percent}{logical. If \code{TRUE} output given as
percent. If \code{FALSE} the output is proportion. If
\code{NULL} uses the value from
\code{\link[qdap]{termco}}. Only used if \code{label} is
\code{TRUE}.}
\item{zero.replace}{Value to replace 0 values with. If
\code{NULL} uses the value from
\code{\link[qdap]{termco}}. Only used if \code{label} is
\code{TRUE}.}
\item{\ldots}{ignored}
}
\description{
Prints a character.table object.
}
|
/man/print.character.table.Rd
|
no_license
|
abresler/qdap
|
R
| false
| false
| 814
|
rd
|
\name{print.character.table}
\alias{print.character.table}
\title{Prints a character.table object}
\usage{
\method{print}{character.table} (x, digits = 2,
percent = NULL, zero.replace = NULL, ...)
}
\arguments{
\item{x}{The character.table object}
\item{digits}{Integer values specifying the number of
digits to be printed.}
\item{percent}{logical. If \code{TRUE} output given as
percent. If \code{FALSE} the output is proportion. If
\code{NULL} uses the value from
\code{\link[qdap]{termco}}. Only used if \code{label} is
\code{TRUE}.}
\item{zero.replace}{Value to replace 0 values with. If
\code{NULL} uses the value from
\code{\link[qdap]{termco}}. Only used if \code{label} is
\code{TRUE}.}
\item{\ldots}{ignored}
}
\description{
Prints a character.table object.
}
|
Cargar librerΓas --------------------------------------------------------
library(tidyverse)
library(extrafont)
# loadfonts()
# Cargar y procesar datos -------------------------------------------------
dat <- read.csv2("Libro2.csv")
View(datos)
dat <- read.csv2("Libro2.csv") %>%
pivot_longer(cols = -c(1:2),
names_to = "Lesion",
values_to = "Valor") %>%
mutate(Mes = factor(Mes, levels = c("Enero", "Febrero", "Marzo", "Abril", "Mayo",
"Junio", "Julio", "Agosto", "Septiembre",
"Octubre", "Noviembre", "Diciembre")))
datos_la <- dat %>%
group_by(Mes) %>%
summarise(n = sum(Valor) + 500)
getwd()
# probando ----------------------------------------------------------------
kok <- datos%>%
pivot_longer(cols = -c(1:3),
names_to = "Lesion",
values_to = "Valor")
# VisualizaciΓ³n -----------------------------------------------------------
myAng <- seq(-15, -345, length.out = 12)
p <- ggplot() +
geom_col(
data = dat,
aes(x = Mes, y = Valor, fill = Lesion),
width = 1,
colour = "grey33",
size = 0.3,
alpha = 0.7
) +
geom_text(
data = datos_la,
aes(label = Mes, x = Mes, y = n),
angle = myAng,
size = 2,
fontface = "bold"
) +
coord_polar() +
scale_fill_manual(values = c("#bfe6b4",
"#c2a1c7",
"#64b8c0",
"#e5a28f")) +
labs(
title = "Consecuencias de Accidentes de TrΓ‘nsito en Chile\ficticios",
x = "",
y = "",
fill = "",
caption = "@sporella"
) +
theme(
text = element_text(family = "Gill Sans MT Condensed"),
legend.position = "bottom",
axis.text.x = element_blank(),
panel.grid = element_line(linetype = "dotted", colour = "grey50"),
plot.background = element_rect(fill = "grey92"),
legend.background = element_rect(fill = "grey92"),
panel.background = element_rect(fill = "grey92"),
panel.spacing = unit(1, "mm"),
plot.title = element_text(family = "Gill Sans MT Condensed", hjust = 0.5)
) +
guides(fill = guide_legend(
nrow = 2,
keywidth = unit(3, "mm"),
keyheight = unit(3, "mm")
))
p
|
/Q2/A.R
|
no_license
|
Brandon780/scripts_
|
R
| false
| false
| 2,277
|
r
|
Cargar librerΓas --------------------------------------------------------
library(tidyverse)
library(extrafont)
# loadfonts()
# Cargar y procesar datos -------------------------------------------------
dat <- read.csv2("Libro2.csv")
View(datos)
dat <- read.csv2("Libro2.csv") %>%
pivot_longer(cols = -c(1:2),
names_to = "Lesion",
values_to = "Valor") %>%
mutate(Mes = factor(Mes, levels = c("Enero", "Febrero", "Marzo", "Abril", "Mayo",
"Junio", "Julio", "Agosto", "Septiembre",
"Octubre", "Noviembre", "Diciembre")))
datos_la <- dat %>%
group_by(Mes) %>%
summarise(n = sum(Valor) + 500)
getwd()
# probando ----------------------------------------------------------------
kok <- datos%>%
pivot_longer(cols = -c(1:3),
names_to = "Lesion",
values_to = "Valor")
# VisualizaciΓ³n -----------------------------------------------------------
myAng <- seq(-15, -345, length.out = 12)
p <- ggplot() +
geom_col(
data = dat,
aes(x = Mes, y = Valor, fill = Lesion),
width = 1,
colour = "grey33",
size = 0.3,
alpha = 0.7
) +
geom_text(
data = datos_la,
aes(label = Mes, x = Mes, y = n),
angle = myAng,
size = 2,
fontface = "bold"
) +
coord_polar() +
scale_fill_manual(values = c("#bfe6b4",
"#c2a1c7",
"#64b8c0",
"#e5a28f")) +
labs(
title = "Consecuencias de Accidentes de TrΓ‘nsito en Chile\ficticios",
x = "",
y = "",
fill = "",
caption = "@sporella"
) +
theme(
text = element_text(family = "Gill Sans MT Condensed"),
legend.position = "bottom",
axis.text.x = element_blank(),
panel.grid = element_line(linetype = "dotted", colour = "grey50"),
plot.background = element_rect(fill = "grey92"),
legend.background = element_rect(fill = "grey92"),
panel.background = element_rect(fill = "grey92"),
panel.spacing = unit(1, "mm"),
plot.title = element_text(family = "Gill Sans MT Condensed", hjust = 0.5)
) +
guides(fill = guide_legend(
nrow = 2,
keywidth = unit(3, "mm"),
keyheight = unit(3, "mm")
))
p
|
getwd()
setwd('C:/Users/cdivy/Desktop/Coursework/DAND/Term_2/EDA_using_R')
# Read stateData into a variable called statesInfo (a dataframe)
statesInfo <- read.csv('stateData.csv')
#Retrieve subset of data for North east states , states like Connecticut with state.region = 1
stateSubset <- subset(statesInfo,state.region==1)
head(stateSubset,2)
dim(stateSubset)
# Another way to subset the dataframe - dataSet[ROWS,COLUMNS]
# To get all columns, leave it blank
stateSubsetBracket <-statesInfo[statesInfo$state.region ==1, ]
head(stateSubsetBracket,2)
dim(stateSubsetBracket)
|
/R_Practice_code/State_data_analysis/state_data_analysis.r
|
no_license
|
divyachandramouli/Practice_code
|
R
| false
| false
| 582
|
r
|
getwd()
setwd('C:/Users/cdivy/Desktop/Coursework/DAND/Term_2/EDA_using_R')
# Read stateData into a variable called statesInfo (a dataframe)
statesInfo <- read.csv('stateData.csv')
#Retrieve subset of data for North east states , states like Connecticut with state.region = 1
stateSubset <- subset(statesInfo,state.region==1)
head(stateSubset,2)
dim(stateSubset)
# Another way to subset the dataframe - dataSet[ROWS,COLUMNS]
# To get all columns, leave it blank
stateSubsetBracket <-statesInfo[statesInfo$state.region ==1, ]
head(stateSubsetBracket,2)
dim(stateSubsetBracket)
|
#' Plot the parition of variance in a simulation response for each measure
#'
#' @param RESULTS_FILE_PATH Where the eFAST results were saved to
#' @param PARAMETERS Simulation parameters being explored
#' @param si Vector of Si values calculated in eFAST for all parameters
#' @param sti Vector of STi values calculated in eFAST for all parameters
#' @param errors_si Vector of confidence intervals for Si values for all
#' parameters
#' @param errors_sti Vector of confidence intervals for STi values for all
#' parameters
#' @param MEASURES Simulation output measures
#' @param TIMEPOINT Timepoint being analysed
#' @param TIMEPOINTSCALE Scale in which the timepoints are measures
#'
#' @export
efast_graph_Results <- function(RESULTS_FILE_PATH, PARAMETERS, si, sti,
errors_si, errors_sti, MEASURES, TIMEPOINT,
TIMEPOINTSCALE) {
if (requireNamespace("gplots", quietly = TRUE)) {
colors <- c("black", "grey50")
for (MEASURE in seq(length(MEASURES))) {
if (is.null(TIMEPOINT)) {
GRAPHFILE <- paste(RESULTS_FILE_PATH, "/", MEASURES[MEASURE], ".pdf",
sep = "")
GRAPHTITLE <- paste("Partitioning of Variance in Simulation Results
using eFAST \n Measure: ", MEASURES[MEASURE],
sep = "")
} else {
GRAPHFILE <- paste(RESULTS_FILE_PATH, "/", MEASURES[MEASURE], "_",
TIMEPOINT, ".pdf", sep = "")
GRAPHTITLE <- paste("Partitioning of Variance in Simulation Results
using eFAST \n Measure: ", MEASURES[MEASURE],
". Timepoint: ", TIMEPOINT, " ",
TIMEPOINTSCALE, sep = "")
}
pdf(GRAPHFILE)
labelspacing <- seq(2, (length(PARAMETERS) * 3), 3)
# DATA TO GRAPH RETRIEVES THE PARAMETERS,
# si AND sti TO BE GRAPHED FROM THE MAIN RESULT SET
data_to_graph <- data.frame(cbind(si[, , MEASURE], sti[, , MEASURE]),
check.names = FALSE)
# CONSTRUCT THE ERROR BAR
high_si <- data_to_graph[, 1] + errors_si[, MEASURE]
high_sti <- data_to_graph[, 2] + errors_sti[, MEASURE]
# COMBINE
errors_high <- cbind(high_si, high_sti)
colnames(data_to_graph) <- c("Si", "STi")
par(mar = c(9, 4, 4, 2) + 0.1)
gplots::barplot2(t(data_to_graph), names.arg = PARAMETERS, beside = TRUE,
main = GRAPHTITLE,
ylim = c(0, 1.0),
ylab = "eFAST Sensitivity", col = colors, xaxt = "n",
plot.ci = TRUE, ci.u = t(errors_high),
ci.l = t(data_to_graph))
# TEXT SIZE CONTROLLED BY CEX.AXIS
axis(1, at = labelspacing, labels = PARAMETERS, las = 2, cex.axis = 0.6)
legend("topleft", title = NULL, c("Si", "STi"), fill = colors)
dev.off()
}
print(paste("Graphs Output to ", RESULTS_FILE_PATH, sep = ""))
} else {
print("Use of efast_graph_Results requires the gplots library")
}
}
#' Plot the Si value for all parameters for multiple simulation timepoints
#'
#' Permits easy comparison of when a parameter may become more influential
#' than others throughout a simulation timecourse
#'
#' @param FILEPATH Where the eFAST results have been stored
#' @param PARAMETERS Names of simulation parameters being explored
#' @param MEASURES Names of simulation output responses
#' @param EFASTRESULTFILENAME Name of the CSV file output by eFAST Analysis,
#' containing all the Si and STi values
#' @param TIMEPOINTS Timepoints to include in this analysis
#' @param TIMEPOINTSCALE Scale in which the timepoints are measured
#'
#' @export
ploteFASTSiFromTimepointFiles <- function(FILEPATH, PARAMETERS, MEASURES,
EFASTRESULTFILENAME, TIMEPOINTS,
TIMEPOINTSCALE) {
for (m in 1:length(MEASURES)) {
MEASURE <- MEASURES[m]
# Add si onto the measure to get this from the result set
MEASURELABEL <- paste(MEASURE, "_Si", sep = "")
si_measureresults <- data.frame()
for (i in 1:length(TIMEPOINTS)) {
hour <- TIMEPOINTS[i]
# Add the timepoint onto the end of the filename
efastresultfilename_format <- check_file_extension(EFASTRESULTFILENAME)
EFASTRESULTFILENAME_FULL <- paste(substr(EFASTRESULTFILENAME, 0,
nchar(EFASTRESULTFILENAME) - 4),
"_", hour, ".",
efastresultfilename_format, sep = "")
# READ IN THE TIMEPOINT DATA
efast_results <- read.csv(paste(FILEPATH, "/", EFASTRESULTFILENAME_FULL,
sep = ""), header = T)
TIMERESULT <- data.frame(hour, t(efast_results[, MEASURELABEL]))
si_measureresults <- rbind(si_measureresults, TIMERESULT)
}
colnames(si_measureresults) <- c(TIMEPOINTSCALE, PARAMETERS)
# PLOT THE GRAPH
GRAPHFILE <- paste(FILEPATH, "/", MEASURE, "_OT.pdf", sep = "")
pdf(GRAPHFILE, width = 7, height = 7.8)
GRAPHTITLE <- paste("eFAST First Order Sensitivity Indexes Over Simulation
Time\nCell Response Measure: ", MEASURE, sep = "")
plot(TIMEPOINTS, si_measureresults[, 2], main = GRAPHTITLE, type = "o",
lty = 1, ylim = c(0, 1), pch = 1, xaxt = "n", xlab = TIMEPOINTSCALE,
ylab = "eFAST First-Order Sensitivity Index (Si)")
# -1 TO EXCLUDE DUMMY
for (l in 2:length(PARAMETERS) - 1) {
lines(TIMEPOINTS, si_measureresults[, l + 1], type = "o", lty = 5,
pch = l)
}
axis(1, at = seq(as.numeric(min(TIMEPOINTS)), as.numeric(max(TIMEPOINTS)),
by = as.numeric(max(TIMEPOINTS)) / length(TIMEPOINTS)))
legend("topleft", inset = .0, title = "Parameter",
PARAMETERS[1:length(PARAMETERS) - 1],
pch = 1:length(PARAMETERS) - 1, cex = 0.75)
dev.off()
}
}
|
/R/efast_plotting.R
|
no_license
|
jc1571/spartan
|
R
| false
| false
| 6,063
|
r
|
#' Plot the parition of variance in a simulation response for each measure
#'
#' @param RESULTS_FILE_PATH Where the eFAST results were saved to
#' @param PARAMETERS Simulation parameters being explored
#' @param si Vector of Si values calculated in eFAST for all parameters
#' @param sti Vector of STi values calculated in eFAST for all parameters
#' @param errors_si Vector of confidence intervals for Si values for all
#' parameters
#' @param errors_sti Vector of confidence intervals for STi values for all
#' parameters
#' @param MEASURES Simulation output measures
#' @param TIMEPOINT Timepoint being analysed
#' @param TIMEPOINTSCALE Scale in which the timepoints are measures
#'
#' @export
efast_graph_Results <- function(RESULTS_FILE_PATH, PARAMETERS, si, sti,
errors_si, errors_sti, MEASURES, TIMEPOINT,
TIMEPOINTSCALE) {
if (requireNamespace("gplots", quietly = TRUE)) {
colors <- c("black", "grey50")
for (MEASURE in seq(length(MEASURES))) {
if (is.null(TIMEPOINT)) {
GRAPHFILE <- paste(RESULTS_FILE_PATH, "/", MEASURES[MEASURE], ".pdf",
sep = "")
GRAPHTITLE <- paste("Partitioning of Variance in Simulation Results
using eFAST \n Measure: ", MEASURES[MEASURE],
sep = "")
} else {
GRAPHFILE <- paste(RESULTS_FILE_PATH, "/", MEASURES[MEASURE], "_",
TIMEPOINT, ".pdf", sep = "")
GRAPHTITLE <- paste("Partitioning of Variance in Simulation Results
using eFAST \n Measure: ", MEASURES[MEASURE],
". Timepoint: ", TIMEPOINT, " ",
TIMEPOINTSCALE, sep = "")
}
pdf(GRAPHFILE)
labelspacing <- seq(2, (length(PARAMETERS) * 3), 3)
# DATA TO GRAPH RETRIEVES THE PARAMETERS,
# si AND sti TO BE GRAPHED FROM THE MAIN RESULT SET
data_to_graph <- data.frame(cbind(si[, , MEASURE], sti[, , MEASURE]),
check.names = FALSE)
# CONSTRUCT THE ERROR BAR
high_si <- data_to_graph[, 1] + errors_si[, MEASURE]
high_sti <- data_to_graph[, 2] + errors_sti[, MEASURE]
# COMBINE
errors_high <- cbind(high_si, high_sti)
colnames(data_to_graph) <- c("Si", "STi")
par(mar = c(9, 4, 4, 2) + 0.1)
gplots::barplot2(t(data_to_graph), names.arg = PARAMETERS, beside = TRUE,
main = GRAPHTITLE,
ylim = c(0, 1.0),
ylab = "eFAST Sensitivity", col = colors, xaxt = "n",
plot.ci = TRUE, ci.u = t(errors_high),
ci.l = t(data_to_graph))
# TEXT SIZE CONTROLLED BY CEX.AXIS
axis(1, at = labelspacing, labels = PARAMETERS, las = 2, cex.axis = 0.6)
legend("topleft", title = NULL, c("Si", "STi"), fill = colors)
dev.off()
}
print(paste("Graphs Output to ", RESULTS_FILE_PATH, sep = ""))
} else {
print("Use of efast_graph_Results requires the gplots library")
}
}
#' Plot the Si value for all parameters for multiple simulation timepoints
#'
#' Permits easy comparison of when a parameter may become more influential
#' than others throughout a simulation timecourse
#'
#' @param FILEPATH Where the eFAST results have been stored
#' @param PARAMETERS Names of simulation parameters being explored
#' @param MEASURES Names of simulation output responses
#' @param EFASTRESULTFILENAME Name of the CSV file output by eFAST Analysis,
#' containing all the Si and STi values
#' @param TIMEPOINTS Timepoints to include in this analysis
#' @param TIMEPOINTSCALE Scale in which the timepoints are measured
#'
#' @export
ploteFASTSiFromTimepointFiles <- function(FILEPATH, PARAMETERS, MEASURES,
EFASTRESULTFILENAME, TIMEPOINTS,
TIMEPOINTSCALE) {
for (m in 1:length(MEASURES)) {
MEASURE <- MEASURES[m]
# Add si onto the measure to get this from the result set
MEASURELABEL <- paste(MEASURE, "_Si", sep = "")
si_measureresults <- data.frame()
for (i in 1:length(TIMEPOINTS)) {
hour <- TIMEPOINTS[i]
# Add the timepoint onto the end of the filename
efastresultfilename_format <- check_file_extension(EFASTRESULTFILENAME)
EFASTRESULTFILENAME_FULL <- paste(substr(EFASTRESULTFILENAME, 0,
nchar(EFASTRESULTFILENAME) - 4),
"_", hour, ".",
efastresultfilename_format, sep = "")
# READ IN THE TIMEPOINT DATA
efast_results <- read.csv(paste(FILEPATH, "/", EFASTRESULTFILENAME_FULL,
sep = ""), header = T)
TIMERESULT <- data.frame(hour, t(efast_results[, MEASURELABEL]))
si_measureresults <- rbind(si_measureresults, TIMERESULT)
}
colnames(si_measureresults) <- c(TIMEPOINTSCALE, PARAMETERS)
# PLOT THE GRAPH
GRAPHFILE <- paste(FILEPATH, "/", MEASURE, "_OT.pdf", sep = "")
pdf(GRAPHFILE, width = 7, height = 7.8)
GRAPHTITLE <- paste("eFAST First Order Sensitivity Indexes Over Simulation
Time\nCell Response Measure: ", MEASURE, sep = "")
plot(TIMEPOINTS, si_measureresults[, 2], main = GRAPHTITLE, type = "o",
lty = 1, ylim = c(0, 1), pch = 1, xaxt = "n", xlab = TIMEPOINTSCALE,
ylab = "eFAST First-Order Sensitivity Index (Si)")
# -1 TO EXCLUDE DUMMY
for (l in 2:length(PARAMETERS) - 1) {
lines(TIMEPOINTS, si_measureresults[, l + 1], type = "o", lty = 5,
pch = l)
}
axis(1, at = seq(as.numeric(min(TIMEPOINTS)), as.numeric(max(TIMEPOINTS)),
by = as.numeric(max(TIMEPOINTS)) / length(TIMEPOINTS)))
legend("topleft", inset = .0, title = "Parameter",
PARAMETERS[1:length(PARAMETERS) - 1],
pch = 1:length(PARAMETERS) - 1, cex = 0.75)
dev.off()
}
}
|
#
# This is a Shiny web application. You can run the application by clicking
# the 'Run App' button above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
library(ggplot2)
library(shinythemes)
library(leaflet)
library(geojsonio)
library(plotly)
library(reshape2)
# require(devtools)
# install_github("ramnathv/rCharts")
require(rCharts)
#### Load in the data ####
source('extract_data.R')
worldcountry = geojson_read("custom.geo.json", what = "sp")
countrycode <- read.csv('country code.csv') %>%
select(name, alpha.3, alpha.2) %>%
rename(Countrycode = alpha.3,
Country = name)
covid19_cases %>%
left_join(countrycode, by = 'Country') -> covid19_cases
plot_map <- worldcountry[worldcountry$adm0_a3 %in% covid19_cases$Countrycode, ]
covid19_cases = covid19_cases[match(plot_map$adm0_a3, covid19_cases$Countrycode),] %>%
as.data.frame()
# top 10 countries--------------------
top10plot <- function(type){
dat <- covid19_cases[, c('Country',type)]
colnames(dat) <- c('Country', 'Cases')
dat.graph <- dat[order(dat$Cases, decreasing = T),] %>%
top_n(10, Cases)
dat.graph$Country <- reorder(dat.graph$Country, dat.graph$Cases)
graph <- ggplot(dat.graph, aes(x = Cases, y = Country)) +
geom_point(size = 5, color = ifelse(type == 'Confirmed', 'darkseagreen4', ifelse(type == 'Deaths', 'goldenrod1', 'deepskyblue'))) +
theme_bw() +
theme(panel.grid.major.x = element_blank(),
panel.grid.minor.x = element_blank(),
panel.grid.major.y = element_line(color = 'grey60', linetype = 'dashed'),
axis.text.y.left = element_text(size = 11, face = 'italic')) +
xlab(type) +
ylab('')
return(graph)
}
# time series ################
form_timeseriesdata <- function(Country, Type){
if(Type == 'Cumulative'){
date.df <- data.frame(date = seq.Date(from = as.Date('2020-01-22'),
to = Time.default, by = 'day'))
cases.df <- data.frame(Confirmed = as.numeric(aggregated_confirmed %>%
filter(`Country/Region` == Country)%>%
select(-1)),
Deaths = as.numeric(aggregated_deaths %>%
filter(`Country/Region` == Country)%>%
select(-1)),
Recover = as.numeric(aggregated_recover %>%
filter(`Country/Region` == Country)%>%
select(-1)))
}
else{
date.df <- data.frame(date = seq.Date(from = as.Date('2020-01-23'),
to = Time.default, by = 'day'))
cases.df <- data.frame(Confirmed = diff(as.numeric(aggregated_confirmed %>%
filter(`Country/Region` == Country)%>%
select(-1))),
Deaths = diff(as.numeric(aggregated_deaths %>%
filter(`Country/Region` == Country)%>%
select(-1))),
Recover = diff(as.numeric(aggregated_recover %>%
filter(`Country/Region` == Country)%>%
select(-1))))
}
df <- bind_cols(date.df, cases.df)
df <- df %>% reshape2::melt(id = 'date')
return(df)
}
form_timeseriesdata('United States', 'Daily New') -> try1
n1 <- nPlot(value ~ date, group = 'variable', type = 'lineWithFocusChart', data = try1)
n1$xAxis(tickFormat="#!function(d) {return d3.time.format('%Y-%m-%d')(new Date( d * 86400000 ));}!#" )
n1$params$width = 501
# Define UI for application ##-----------
ui <- navbarPage(title = 'Worldwide Covid-19 Data',
id = 'navBar',
position = 'fixed-top',
selected = 'home',
collapsible = TRUE,
theme = shinytheme('cyborg'),
windowTitle = 'CIDER Covid-19 Dashboard',
header = tags$style(type = 'text/css',
"body {padding-top:70px;}"),
footer = includeHTML('./www/footer.html'),
tags$head(includeCSS('mystyle.css')),
tags$link(href = 'https://fonts.googleapis.com/css?family=Cinzel Decorative', rel='stylesheet'),
####### Home #######
tabPanel(title = 'Home',
value = 'home',
icon = icon('home'),
# background-----
fluidRow(
HTML("
<section class = 'banner'>
<h2 class='parallax'> COVID-19 VISUALIZATION </h2>
<p class='parallax_description'> A tool to look up COVID-19 statistics at country level. </p>
</section>")
),
# introduction ------------
tags$hr(),
fluidRow(
column(3),
column(6, br(), br(), h1('INTRODUCTION', style = 'text-align: center;'), br(),
h5('The COVID-19 pandemic, also known as the coronavirus pandemic,
is an ongoing pandemic of coronavirus disease 2019 (COVID-19) caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2),
first identified in December 2019 in Wuhan, China.
The World Health Organization declared the outbreak a Public Health Emergency of International Concern in January 2020 and a pandemic in March 2020.',
style = 'text-align: center;')
),
column(3)
),
br(),
br(),
tags$hr(),
# navigation --------------
fluidRow(
column(3),
column(6,
br(), br(), h1('What you will find here', style = 'text-align: center;'), br(),
h5('The Covid-19 data at country level will be presented from two dimensions of space and time',
style = 'text-align: center;'),
br(), br(),
tags$div(align = 'center',
tags$div(class = 'center', id = 'map-1',
style = 'display: inline-block; width: 225px',
icon('map-marked-alt'),
br(),
br(),
tags$a(class = 'btn',
onclick="$('li:eq(3) a').tab('show')",
'View data on map')),
tags$div(class = 'vertical'),
tags$div(class = 'center',
style = 'display: inline-block; width: 225px',
icon('clock'),
br(),
br(),
tags$a(class = 'btn',
onclick="$('li:eq(4) a').tab('show')",
'View timeline graph')
)
)),
column(3)
)
),
# Map -----------
tabPanel(title = 'Data on Map',
id = 'map',
icon = icon('globe-asia'),
fluidRow(tags$div(class = 'Map',
leafletOutput('Map', width = '100%', height = '100%'),
absolutePanel(class = 'apanel', top = 30, left = 50, draggable = TRUE,
width = 300, height = 'auto',
h3('Global Cases', align = 'left', class = 'Global_num'),
h4(textOutput('Globalconfirm'), align = 'left', class = 'Global_num'),
h5(textOutput('Globaldeaths'), align = 'left', class = 'Global_num'),
h6(textOutput('Globalrecover'), align = 'left', class = 'Global_num'),
p(em(paste('Cut off at', as.character(Time.default))), align = 'left'),
br()),
absolutePanel(class = 'apanel', top = 30, right = 10, draggable = TRUE,
width = 350, height = 'auto',
selectInput(inputId = 'Index', label = 'Choose an indicator',
choices = list('Confirmed Cases' = 'Confirmed',
'Death Cases' = 'Deaths',
'Recover Cases' = 'Recovered')),
h3('Top 10 Countries', align = 'right', class = 'Global_num'),
plotOutput('topten'),
br(),
actionButton('Draw', label = 'Click me to see choropleth', width = '100%'),
checkboxInput('legend', 'Show Legend', TRUE, width = '100%'),
helpText(strong('Instruction:'), br(),
'Step1: Choose the indicator you are concerned. Then number of top 10 countries shows',
br(),
'Step2: Click the button above to see choropleth on map',
br(),
'Step3: Move the cursor on the country area to see specific numbers')
)
)
)
),
###### Timeline #########
tabPanel(title = 'Timeline for Covid-19',
id = 'timeline',
icon = icon('chart-line'),
sidebarLayout(sidebarPanel = sidebarPanel(h3('Control Widget'),
selectInput('Countryselect', label = 'Choose a country', choices = sort(covid19_cases$Country)),
radioButtons('displaymethod', 'Choose a display method', choices = c('Cumulative', 'Daily New')),
helpText(strong('Instruction:'), br(), 'Step1: Choose the country you want to look through', br(),
'Step2: Choose the way data display, cumulative or daily new cases?', br(),
'Step3: Interact with chart, click button on right top to see Confirme, Deaths or Recover number', br(),
'Step4: Click the mini graph below and choose a time interval. Then you can drag it and focus on any specific period')
),
mainPanel = mainPanel(h2(textOutput('Title')),
br(),
br(),
fluidRow(tags$div(class = 'linegrapharea',
showOutput('linegraph', 'nvd3')
)
)
)
)
)
)
# Define server -----------------
server <- function(input, output, session) {
####### Reactive Data ###########
####### Color for Map ###########
colmap <- reactive({
if(input$Index == 'Confirmed'){colorQuantile('Greens', domain = covid19_cases[, input$Index])}
else if(input$Index == 'Deaths'){colorQuantile('Oranges', domain = covid19_cases[, input$Index])}
else {colorQuantile('Blues', domain = covid19_cases[, input$Index])}
})
####### Map Output ##################
output$Map <- renderLeaflet({
leaflet(plot_map) %>%
addProviderTiles(provider = providers$CartoDB.Positron) %>%
setView(-0, 30, zoom = 3)
})
observeEvent(input$Draw, {
pal <- colmap()
leafletProxy('Map', data = plot_map) %>%
clearShapes() %>%
addPolygons(color = 'white',
weight = 1,
smoothFactor = 0.1,
fillOpacity = 0.5,
fillColor = ~pal(covid19_cases[, input$Index]),
label = sprintf('<strong>%s %s:</strong><br/>%i', covid19_cases$Country, input$Index, covid19_cases[, input$Index]) %>% lapply(htmltools::HTML),
highlightOptions = highlightOptions(color = 'black', weight = 2, bringToFront = T),
labelOptions = labelOptions(
style = list("font-weight" = "normal", padding = "3px 8px"),
textsize = "15px", direction = "auto")) %>%
addMiniMap(position = 'bottomleft')-> proxy
proxy %>% clearControls()
if (input$legend){
proxy %>%
addLegend(position = 'bottomright',
pal = pal,
values = covid19_cases[, input$Index],
opacity = 0.8)
}
})
######### Top 10 groph ########
output$topten <- renderPlot({
top10plot(input$Index)
})
######### text ########
output$Globalconfirm <- renderText({
paste(prettyNum(sum(covid19_cases$Confirmed), big.mark = ','), 'Confirmed')
})
output$Globaldeaths <- renderText({
paste(prettyNum(sum(covid19_cases$Deaths), big.mark = ','), 'Deaths')
})
output$Globalrecover <- renderText({
paste(prettyNum(sum(covid19_cases$Recovered), big.mark = ','), 'Recover')
})
# timeline output------------
output$Title <- renderText({
paste('Timelines of Covid 19 in', input$Countryselect, '(', input$displaymethod, ')')
})
dat_timeline <- reactive({
form_timeseriesdata(input$Countryselect, input$displaymethod)
})
output$linegraph <- renderChart2({
p <- nPlot(value ~ date, group = 'variable', type = 'lineWithFocusChart', data = form_timeseriesdata(input$Countryselect, input$displaymethod),
width = session$clientData[["output_plot1_width"]], height = session$clientData[["output_plot1_height"]])
p$xAxis(tickFormat="#!function(d) {return d3.time.format('%Y-%m-%d')(new Date( d * 86400000 ));}!#")
# p$params$width <- 1200
# p$params$height <- 800
return(p)
})
}
# Run the application
shinyApp(ui = ui, server = server)
|
/app.R
|
no_license
|
GuiHAO-GARY/COVID-19-country-level-data
|
R
| false
| false
| 17,220
|
r
|
#
# This is a Shiny web application. You can run the application by clicking
# the 'Run App' button above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
library(ggplot2)
library(shinythemes)
library(leaflet)
library(geojsonio)
library(plotly)
library(reshape2)
# require(devtools)
# install_github("ramnathv/rCharts")
require(rCharts)
#### Load in the data ####
source('extract_data.R')
worldcountry = geojson_read("custom.geo.json", what = "sp")
countrycode <- read.csv('country code.csv') %>%
select(name, alpha.3, alpha.2) %>%
rename(Countrycode = alpha.3,
Country = name)
covid19_cases %>%
left_join(countrycode, by = 'Country') -> covid19_cases
plot_map <- worldcountry[worldcountry$adm0_a3 %in% covid19_cases$Countrycode, ]
covid19_cases = covid19_cases[match(plot_map$adm0_a3, covid19_cases$Countrycode),] %>%
as.data.frame()
# top 10 countries--------------------
top10plot <- function(type){
dat <- covid19_cases[, c('Country',type)]
colnames(dat) <- c('Country', 'Cases')
dat.graph <- dat[order(dat$Cases, decreasing = T),] %>%
top_n(10, Cases)
dat.graph$Country <- reorder(dat.graph$Country, dat.graph$Cases)
graph <- ggplot(dat.graph, aes(x = Cases, y = Country)) +
geom_point(size = 5, color = ifelse(type == 'Confirmed', 'darkseagreen4', ifelse(type == 'Deaths', 'goldenrod1', 'deepskyblue'))) +
theme_bw() +
theme(panel.grid.major.x = element_blank(),
panel.grid.minor.x = element_blank(),
panel.grid.major.y = element_line(color = 'grey60', linetype = 'dashed'),
axis.text.y.left = element_text(size = 11, face = 'italic')) +
xlab(type) +
ylab('')
return(graph)
}
# time series ################
form_timeseriesdata <- function(Country, Type){
if(Type == 'Cumulative'){
date.df <- data.frame(date = seq.Date(from = as.Date('2020-01-22'),
to = Time.default, by = 'day'))
cases.df <- data.frame(Confirmed = as.numeric(aggregated_confirmed %>%
filter(`Country/Region` == Country)%>%
select(-1)),
Deaths = as.numeric(aggregated_deaths %>%
filter(`Country/Region` == Country)%>%
select(-1)),
Recover = as.numeric(aggregated_recover %>%
filter(`Country/Region` == Country)%>%
select(-1)))
}
else{
date.df <- data.frame(date = seq.Date(from = as.Date('2020-01-23'),
to = Time.default, by = 'day'))
cases.df <- data.frame(Confirmed = diff(as.numeric(aggregated_confirmed %>%
filter(`Country/Region` == Country)%>%
select(-1))),
Deaths = diff(as.numeric(aggregated_deaths %>%
filter(`Country/Region` == Country)%>%
select(-1))),
Recover = diff(as.numeric(aggregated_recover %>%
filter(`Country/Region` == Country)%>%
select(-1))))
}
df <- bind_cols(date.df, cases.df)
df <- df %>% reshape2::melt(id = 'date')
return(df)
}
form_timeseriesdata('United States', 'Daily New') -> try1
n1 <- nPlot(value ~ date, group = 'variable', type = 'lineWithFocusChart', data = try1)
n1$xAxis(tickFormat="#!function(d) {return d3.time.format('%Y-%m-%d')(new Date( d * 86400000 ));}!#" )
n1$params$width = 501
# Define UI for application ##-----------
ui <- navbarPage(title = 'Worldwide Covid-19 Data',
id = 'navBar',
position = 'fixed-top',
selected = 'home',
collapsible = TRUE,
theme = shinytheme('cyborg'),
windowTitle = 'CIDER Covid-19 Dashboard',
header = tags$style(type = 'text/css',
"body {padding-top:70px;}"),
footer = includeHTML('./www/footer.html'),
tags$head(includeCSS('mystyle.css')),
tags$link(href = 'https://fonts.googleapis.com/css?family=Cinzel Decorative', rel='stylesheet'),
####### Home #######
tabPanel(title = 'Home',
value = 'home',
icon = icon('home'),
# background-----
fluidRow(
HTML("
<section class = 'banner'>
<h2 class='parallax'> COVID-19 VISUALIZATION </h2>
<p class='parallax_description'> A tool to look up COVID-19 statistics at country level. </p>
</section>")
),
# introduction ------------
tags$hr(),
fluidRow(
column(3),
column(6, br(), br(), h1('INTRODUCTION', style = 'text-align: center;'), br(),
h5('The COVID-19 pandemic, also known as the coronavirus pandemic,
is an ongoing pandemic of coronavirus disease 2019 (COVID-19) caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2),
first identified in December 2019 in Wuhan, China.
The World Health Organization declared the outbreak a Public Health Emergency of International Concern in January 2020 and a pandemic in March 2020.',
style = 'text-align: center;')
),
column(3)
),
br(),
br(),
tags$hr(),
# navigation --------------
fluidRow(
column(3),
column(6,
br(), br(), h1('What you will find here', style = 'text-align: center;'), br(),
h5('The Covid-19 data at country level will be presented from two dimensions of space and time',
style = 'text-align: center;'),
br(), br(),
tags$div(align = 'center',
tags$div(class = 'center', id = 'map-1',
style = 'display: inline-block; width: 225px',
icon('map-marked-alt'),
br(),
br(),
tags$a(class = 'btn',
onclick="$('li:eq(3) a').tab('show')",
'View data on map')),
tags$div(class = 'vertical'),
tags$div(class = 'center',
style = 'display: inline-block; width: 225px',
icon('clock'),
br(),
br(),
tags$a(class = 'btn',
onclick="$('li:eq(4) a').tab('show')",
'View timeline graph')
)
)),
column(3)
)
),
# Map -----------
tabPanel(title = 'Data on Map',
id = 'map',
icon = icon('globe-asia'),
fluidRow(tags$div(class = 'Map',
leafletOutput('Map', width = '100%', height = '100%'),
absolutePanel(class = 'apanel', top = 30, left = 50, draggable = TRUE,
width = 300, height = 'auto',
h3('Global Cases', align = 'left', class = 'Global_num'),
h4(textOutput('Globalconfirm'), align = 'left', class = 'Global_num'),
h5(textOutput('Globaldeaths'), align = 'left', class = 'Global_num'),
h6(textOutput('Globalrecover'), align = 'left', class = 'Global_num'),
p(em(paste('Cut off at', as.character(Time.default))), align = 'left'),
br()),
absolutePanel(class = 'apanel', top = 30, right = 10, draggable = TRUE,
width = 350, height = 'auto',
selectInput(inputId = 'Index', label = 'Choose an indicator',
choices = list('Confirmed Cases' = 'Confirmed',
'Death Cases' = 'Deaths',
'Recover Cases' = 'Recovered')),
h3('Top 10 Countries', align = 'right', class = 'Global_num'),
plotOutput('topten'),
br(),
actionButton('Draw', label = 'Click me to see choropleth', width = '100%'),
checkboxInput('legend', 'Show Legend', TRUE, width = '100%'),
helpText(strong('Instruction:'), br(),
'Step1: Choose the indicator you are concerned. Then number of top 10 countries shows',
br(),
'Step2: Click the button above to see choropleth on map',
br(),
'Step3: Move the cursor on the country area to see specific numbers')
)
)
)
),
###### Timeline #########
tabPanel(title = 'Timeline for Covid-19',
id = 'timeline',
icon = icon('chart-line'),
sidebarLayout(sidebarPanel = sidebarPanel(h3('Control Widget'),
selectInput('Countryselect', label = 'Choose a country', choices = sort(covid19_cases$Country)),
radioButtons('displaymethod', 'Choose a display method', choices = c('Cumulative', 'Daily New')),
helpText(strong('Instruction:'), br(), 'Step1: Choose the country you want to look through', br(),
'Step2: Choose the way data display, cumulative or daily new cases?', br(),
'Step3: Interact with chart, click button on right top to see Confirme, Deaths or Recover number', br(),
'Step4: Click the mini graph below and choose a time interval. Then you can drag it and focus on any specific period')
),
mainPanel = mainPanel(h2(textOutput('Title')),
br(),
br(),
fluidRow(tags$div(class = 'linegrapharea',
showOutput('linegraph', 'nvd3')
)
)
)
)
)
)
# Define server -----------------
server <- function(input, output, session) {
####### Reactive Data ###########
####### Color for Map ###########
colmap <- reactive({
if(input$Index == 'Confirmed'){colorQuantile('Greens', domain = covid19_cases[, input$Index])}
else if(input$Index == 'Deaths'){colorQuantile('Oranges', domain = covid19_cases[, input$Index])}
else {colorQuantile('Blues', domain = covid19_cases[, input$Index])}
})
####### Map Output ##################
output$Map <- renderLeaflet({
leaflet(plot_map) %>%
addProviderTiles(provider = providers$CartoDB.Positron) %>%
setView(-0, 30, zoom = 3)
})
observeEvent(input$Draw, {
pal <- colmap()
leafletProxy('Map', data = plot_map) %>%
clearShapes() %>%
addPolygons(color = 'white',
weight = 1,
smoothFactor = 0.1,
fillOpacity = 0.5,
fillColor = ~pal(covid19_cases[, input$Index]),
label = sprintf('<strong>%s %s:</strong><br/>%i', covid19_cases$Country, input$Index, covid19_cases[, input$Index]) %>% lapply(htmltools::HTML),
highlightOptions = highlightOptions(color = 'black', weight = 2, bringToFront = T),
labelOptions = labelOptions(
style = list("font-weight" = "normal", padding = "3px 8px"),
textsize = "15px", direction = "auto")) %>%
addMiniMap(position = 'bottomleft')-> proxy
proxy %>% clearControls()
if (input$legend){
proxy %>%
addLegend(position = 'bottomright',
pal = pal,
values = covid19_cases[, input$Index],
opacity = 0.8)
}
})
######### Top 10 groph ########
output$topten <- renderPlot({
top10plot(input$Index)
})
######### text ########
output$Globalconfirm <- renderText({
paste(prettyNum(sum(covid19_cases$Confirmed), big.mark = ','), 'Confirmed')
})
output$Globaldeaths <- renderText({
paste(prettyNum(sum(covid19_cases$Deaths), big.mark = ','), 'Deaths')
})
output$Globalrecover <- renderText({
paste(prettyNum(sum(covid19_cases$Recovered), big.mark = ','), 'Recover')
})
# timeline output------------
output$Title <- renderText({
paste('Timelines of Covid 19 in', input$Countryselect, '(', input$displaymethod, ')')
})
dat_timeline <- reactive({
form_timeseriesdata(input$Countryselect, input$displaymethod)
})
output$linegraph <- renderChart2({
p <- nPlot(value ~ date, group = 'variable', type = 'lineWithFocusChart', data = form_timeseriesdata(input$Countryselect, input$displaymethod),
width = session$clientData[["output_plot1_width"]], height = session$clientData[["output_plot1_height"]])
p$xAxis(tickFormat="#!function(d) {return d3.time.format('%Y-%m-%d')(new Date( d * 86400000 ));}!#")
# p$params$width <- 1200
# p$params$height <- 800
return(p)
})
}
# Run the application
shinyApp(ui = ui, server = server)
|
# DESCRIPTION -------------------------------------------------------------------------------------------------
# Author: Elizabeth Hutchins
# Date: May 14th, 2020
#Purpose: summarize DE results
#Imports:
#
#Exports:
# Figure 3
# load packages -------------------------------------------------------------------------------------------------
library(tidyverse) #basis for data manipulation
library(ggpubr) #publication ready plots
library(UpSetR) #venn diagram alternative
library(ggrepel) #adds non-overlapping text to ggplot
library(EnhancedVolcano) #enhanced volcano plots
library(ComplexHeatmap) #for matrix function going into upset plot
library(devtools) #load R scripts from github
source_gist("c579c4ddc06fd2ceb121e690dd9df186") #color palettes
#color palettes
myPalette <- getPalette(21, "multi")
dev.off()
names(myPalette) <- NULL
diseasePalette <- myPalette[c(1:7)]
names(diseasePalette) <- c("Healthy Control", "Genetic Unaffected",
"Idiopathic PD", "Genetic PD",
"Prodromal", "SWEDD", "Other ND")
genDiseasePalette <- myPalette[c(1:15)]
names(genDiseasePalette) <- c(names(diseasePalette),
"GBA+ Affected", "GBA+ Unaffected",
"LRRK2+ Affected", "LRRK2+ Unaffected",
"Prodromal", "GBA+ Prodromal",
"SWEDD", "GBA+ SWEDD")
# import gene names -------------------------------------------------------------------------------------------------
tmpG <- read_tsv("sourceFiles/GRCh38_GENCODE29_geneInfo.txt",
skip = 1)
genes.anno <- tmpG[,c("gene_id", "gene_name", "gene_type")]
genes.anno <- unique(genes.anno)
#load metadata ----------------------------------------------------------------------------------------------
meta <- read_csv("data/megaMetaTable.csv")
meta <- subset(meta, QCflagIR3 == "pass")
names(meta)
meta$GenDisease_Status <- paste(meta$Genetic_Status, meta$Disease_Status, sep = " ")
meta$GenDisease_Status <- gsub("Genetic PD", "Affected", meta$GenDisease_Status)
meta$GenDisease_Status <- gsub("Genetic ", "", meta$GenDisease_Status)
meta$GenDisease_Status <- gsub("LRRK2-/SNCA-/GBA- ", "", meta$GenDisease_Status)
unique(meta$GenDisease_Status)
#load DE lists ------------------------------------------------------------------------------------------------
#grab file names
fileNames <- list.files(path = "analysis/DE/GBA_LRRK2_idio_sizeMatched", full.names = TRUE, recursive = TRUE, pattern = "_full.tsv$")
fileNames <- append(fileNames[c(1, 6, 8)], "analysis/DE/allPD_vs_allCon_sizeMatched/PD_full.tsv")
fileNames <- append(fileNames, "analysis/DE/HC_prodromal_SWEDD/Prodromal_HC_full.tsv")
fileNames <- append(fileNames, "analysis/DE/HC_prodromal_SWEDD/SWEDD_HC_full.tsv")
fileNames <- append(fileNames, "data/PDBP_case_vs_control_limmaResults.tsv")
#read in files
files <- lapply(fileNames, function(x) read_tsv(x))
files <- lapply(files, function(x) right_join(genes.anno, x))
head(files[[7]])
compName <- sapply(str_split(fileNames, "/"), '[', 4)
compName <- gsub("_full.tsv", "", compName)
compName <- gsub(".tsv", "", compName)
compName[4] <- "PPMI_All"
compName[7] <- "PDBP_All"
#make volcano plots ------------------------------------------------------------------------------------------------
#output volcano plot
plotTitles <- gsub("_", " versus ", compName)
plotTitles <- gsub("GBA", "GBA+ ", plotTitles)
plotTitles <- gsub("LRRK2", "LRRK2+ ", plotTitles)
plotTitles <- gsub("Un", "Unaffected", plotTitles)
plotTitles <- gsub("Idio", "Idiopathic ", plotTitles)
plotTitles <- gsub("HC", "Healthy Control", plotTitles)
plotTitles <- gsub("^PD$", "All PD versus All Unaffected", plotTitles)
plotTitles[7] <- "PDBP Cohort: Case versus Control"
plotTitles[4] <- "PPMI Cohort: Case versus Control"
plotTitles
createVolcano <- function(df, plotTitle, geneLabels) {
EnhancedVolcano(df,
lab = df$gene_name,
selectLab = geneLabels,
boxedLabels = TRUE,
labFace = "bold",
drawConnectors = TRUE,
x = 'logFC',
y = 'adj.P.Val',
title = plotTitle,
ylab = bquote(~-Log[10]~adjusted~italic(P)),
legendLabels = c('NS', bquote(~Log[2]~ 'fold change'), 'Adjusted p-value',
bquote('Adjusted p-value and ' ~Log[2]~ 'fold change')),
pCutoff = 0.05,
FCcutoff = 0.1,
pointSize = 2.5,
labSize = 3,
legendPosition = "bottom") +
theme(plot.title = element_text(size = 20),
plot.subtitle = element_blank())
}
volPlots <- list()
dir.create("Figures/volcanoPlots")
x <- 1
while(x <= length(plotTitles)){
df <- files[[x]] %>%
left_join( myGenes) %>%
mutate(category = factor(category, levels = c("other", "PD_risk")))
df$category[is.na(df$category)] <- "other"
df <- arrange(df, category)
riskgenes <- df %>%
filter(category == "PD_risk" & abs(logFC) > 0.1 & adj.P.Val < 0.05) %>%
transmute(gene_name = gene_name)
volPlot <- createVolcano(files[[x]], plotTitles[[x]], "")
volPlots[[x]] <- volPlot
ggsave(paste("Figures/volcanoPlots/", compName[[x]], "_volcano.svg", sep = ""),
volPlot,
dpi = 600, width = 8, height = 8)
x <- x + 1
}
# volcano plots with the PD risk genes: Nails et al ---------------------------------------------------------------------------------
# create plots with PD risk genes labeled
riskPlot <- read_tsv("data/Nalls_2019_genes_manhattan.txt", col_names = "gene_name")
plotGenes <- unique(riskPlot$gene_name)
plotGenes <- append(plotGenes, c("GBAP1"))
createVolcanoCustom <- function(df, plotTitle, keyvals.colour, geneLabels) {
EnhancedVolcano(df,
lab = df$gene_name,
selectLab = geneLabels,
boxedLabels = TRUE,
drawConnectors = TRUE,
x = 'logFC',
y = 'adj.P.Val',
title = plotTitle,
ylab = bquote(~-Log[10]~adjusted~italic(P)),
colCustom = keyvals.colour,
#shapeCustom = keyvals.shape,
colAlpha = 0.6,
pCutoff = 0.05,
FCcutoff = 0.1,
pointSize = 2.5,
labSize = 3,
legendPosition = "bottom") +
theme(plot.title = element_text(size = 20),
plot.subtitle = element_blank())
}
volRiskPlots <- list()
myGenes <- data.frame(gene_name = plotGenes,
category = "PD_risk")
dir.create("Figures/PD_risk")
PPMI <- files[[4]] %>%
left_join( myGenes) %>%
mutate(category = factor(category, levels = c("other", "PD_risk")))
PPMI$category[is.na(PPMI$category)] <- "other"
PPMI <- arrange(PPMI, category)
riskgenes <- PPMI %>% filter(category == "PD_risk" & abs(logFC) > 0.1 & adj.P.Val < 0.05) %>%
transmute(gene_name = gene_name)
geneLabels <- riskgenes$gene_name
x <- 1
while(x <= length(compName)){
df <- files[[x]] %>%
left_join( myGenes) %>%
mutate(category = factor(category, levels = c("other", "PD_risk")))
df$category[is.na(df$category)] <- "other"
df <- arrange(df, category)
riskgenes <- df %>%
filter(category == "PD_risk" & abs(logFC) > 0.1 & adj.P.Val < 0.05) %>%
transmute(gene_name = gene_name)
#custom key values for enhanced volcano plot
keyvals <- rep('gray60', nrow(df))
names(keyvals) <- rep('other', nrow(df))
# modify keyvals for genes in PD risk category
keyvals[which(df$category == "PD_risk")] <- '#440154FF'
names(keyvals)[which(df$category == "PD_risk")] <- 'PD_risk'
unique(names(keyvals))
volPlot <- createVolcanoCustom(df, plotTitles[[x]], keyvals, geneLabels)
volPlot <- volPlot +
geom_rug(data = df %>%
filter(category == "PD_risk" & logFC > 0.1 & adj.P.Val < 0.05),
aes(y = -log10(adj.P.Val), color = category),
inherit.aes = FALSE,
sides = "r", outside = FALSE) +
geom_rug(data = df %>%
filter(category == "PD_risk" & logFC < -0.1 & adj.P.Val < 0.05),
aes(y = -log10(adj.P.Val), color = category),
inherit.aes = FALSE,
sides = "l", outside = FALSE)
volRiskPlots[[x]] <- volPlot
ggsave(paste("Figures/volcanoPlots/", compName[[x]], "_volcano_PDrisk.svg", sep = ""),
volPlot,
dpi = 600, width = 8, height = 8)
x <- x + 1
}
|
/figures/Figure3.R
|
no_license
|
zlpsophina/ppmi-rnaseq-wb-paper
|
R
| false
| false
| 8,654
|
r
|
# DESCRIPTION -------------------------------------------------------------------------------------------------
# Author: Elizabeth Hutchins
# Date: May 14th, 2020
#Purpose: summarize DE results
#Imports:
#
#Exports:
# Figure 3
# load packages -------------------------------------------------------------------------------------------------
library(tidyverse) #basis for data manipulation
library(ggpubr) #publication ready plots
library(UpSetR) #venn diagram alternative
library(ggrepel) #adds non-overlapping text to ggplot
library(EnhancedVolcano) #enhanced volcano plots
library(ComplexHeatmap) #for matrix function going into upset plot
library(devtools) #load R scripts from github
source_gist("c579c4ddc06fd2ceb121e690dd9df186") #color palettes
#color palettes
myPalette <- getPalette(21, "multi")
dev.off()
names(myPalette) <- NULL
diseasePalette <- myPalette[c(1:7)]
names(diseasePalette) <- c("Healthy Control", "Genetic Unaffected",
"Idiopathic PD", "Genetic PD",
"Prodromal", "SWEDD", "Other ND")
genDiseasePalette <- myPalette[c(1:15)]
names(genDiseasePalette) <- c(names(diseasePalette),
"GBA+ Affected", "GBA+ Unaffected",
"LRRK2+ Affected", "LRRK2+ Unaffected",
"Prodromal", "GBA+ Prodromal",
"SWEDD", "GBA+ SWEDD")
# import gene names -------------------------------------------------------------------------------------------------
tmpG <- read_tsv("sourceFiles/GRCh38_GENCODE29_geneInfo.txt",
skip = 1)
genes.anno <- tmpG[,c("gene_id", "gene_name", "gene_type")]
genes.anno <- unique(genes.anno)
#load metadata ----------------------------------------------------------------------------------------------
meta <- read_csv("data/megaMetaTable.csv")
meta <- subset(meta, QCflagIR3 == "pass")
names(meta)
meta$GenDisease_Status <- paste(meta$Genetic_Status, meta$Disease_Status, sep = " ")
meta$GenDisease_Status <- gsub("Genetic PD", "Affected", meta$GenDisease_Status)
meta$GenDisease_Status <- gsub("Genetic ", "", meta$GenDisease_Status)
meta$GenDisease_Status <- gsub("LRRK2-/SNCA-/GBA- ", "", meta$GenDisease_Status)
unique(meta$GenDisease_Status)
#load DE lists ------------------------------------------------------------------------------------------------
#grab file names
fileNames <- list.files(path = "analysis/DE/GBA_LRRK2_idio_sizeMatched", full.names = TRUE, recursive = TRUE, pattern = "_full.tsv$")
fileNames <- append(fileNames[c(1, 6, 8)], "analysis/DE/allPD_vs_allCon_sizeMatched/PD_full.tsv")
fileNames <- append(fileNames, "analysis/DE/HC_prodromal_SWEDD/Prodromal_HC_full.tsv")
fileNames <- append(fileNames, "analysis/DE/HC_prodromal_SWEDD/SWEDD_HC_full.tsv")
fileNames <- append(fileNames, "data/PDBP_case_vs_control_limmaResults.tsv")
#read in files
files <- lapply(fileNames, function(x) read_tsv(x))
files <- lapply(files, function(x) right_join(genes.anno, x))
head(files[[7]])
compName <- sapply(str_split(fileNames, "/"), '[', 4)
compName <- gsub("_full.tsv", "", compName)
compName <- gsub(".tsv", "", compName)
compName[4] <- "PPMI_All"
compName[7] <- "PDBP_All"
#make volcano plots ------------------------------------------------------------------------------------------------
#output volcano plot
plotTitles <- gsub("_", " versus ", compName)
plotTitles <- gsub("GBA", "GBA+ ", plotTitles)
plotTitles <- gsub("LRRK2", "LRRK2+ ", plotTitles)
plotTitles <- gsub("Un", "Unaffected", plotTitles)
plotTitles <- gsub("Idio", "Idiopathic ", plotTitles)
plotTitles <- gsub("HC", "Healthy Control", plotTitles)
plotTitles <- gsub("^PD$", "All PD versus All Unaffected", plotTitles)
plotTitles[7] <- "PDBP Cohort: Case versus Control"
plotTitles[4] <- "PPMI Cohort: Case versus Control"
plotTitles
createVolcano <- function(df, plotTitle, geneLabels) {
EnhancedVolcano(df,
lab = df$gene_name,
selectLab = geneLabels,
boxedLabels = TRUE,
labFace = "bold",
drawConnectors = TRUE,
x = 'logFC',
y = 'adj.P.Val',
title = plotTitle,
ylab = bquote(~-Log[10]~adjusted~italic(P)),
legendLabels = c('NS', bquote(~Log[2]~ 'fold change'), 'Adjusted p-value',
bquote('Adjusted p-value and ' ~Log[2]~ 'fold change')),
pCutoff = 0.05,
FCcutoff = 0.1,
pointSize = 2.5,
labSize = 3,
legendPosition = "bottom") +
theme(plot.title = element_text(size = 20),
plot.subtitle = element_blank())
}
volPlots <- list()
dir.create("Figures/volcanoPlots")
x <- 1
while(x <= length(plotTitles)){
df <- files[[x]] %>%
left_join( myGenes) %>%
mutate(category = factor(category, levels = c("other", "PD_risk")))
df$category[is.na(df$category)] <- "other"
df <- arrange(df, category)
riskgenes <- df %>%
filter(category == "PD_risk" & abs(logFC) > 0.1 & adj.P.Val < 0.05) %>%
transmute(gene_name = gene_name)
volPlot <- createVolcano(files[[x]], plotTitles[[x]], "")
volPlots[[x]] <- volPlot
ggsave(paste("Figures/volcanoPlots/", compName[[x]], "_volcano.svg", sep = ""),
volPlot,
dpi = 600, width = 8, height = 8)
x <- x + 1
}
# volcano plots with the PD risk genes: Nails et al ---------------------------------------------------------------------------------
# create plots with PD risk genes labeled
riskPlot <- read_tsv("data/Nalls_2019_genes_manhattan.txt", col_names = "gene_name")
plotGenes <- unique(riskPlot$gene_name)
plotGenes <- append(plotGenes, c("GBAP1"))
createVolcanoCustom <- function(df, plotTitle, keyvals.colour, geneLabels) {
EnhancedVolcano(df,
lab = df$gene_name,
selectLab = geneLabels,
boxedLabels = TRUE,
drawConnectors = TRUE,
x = 'logFC',
y = 'adj.P.Val',
title = plotTitle,
ylab = bquote(~-Log[10]~adjusted~italic(P)),
colCustom = keyvals.colour,
#shapeCustom = keyvals.shape,
colAlpha = 0.6,
pCutoff = 0.05,
FCcutoff = 0.1,
pointSize = 2.5,
labSize = 3,
legendPosition = "bottom") +
theme(plot.title = element_text(size = 20),
plot.subtitle = element_blank())
}
volRiskPlots <- list()
myGenes <- data.frame(gene_name = plotGenes,
category = "PD_risk")
dir.create("Figures/PD_risk")
PPMI <- files[[4]] %>%
left_join( myGenes) %>%
mutate(category = factor(category, levels = c("other", "PD_risk")))
PPMI$category[is.na(PPMI$category)] <- "other"
PPMI <- arrange(PPMI, category)
riskgenes <- PPMI %>% filter(category == "PD_risk" & abs(logFC) > 0.1 & adj.P.Val < 0.05) %>%
transmute(gene_name = gene_name)
geneLabels <- riskgenes$gene_name
x <- 1
while(x <= length(compName)){
df <- files[[x]] %>%
left_join( myGenes) %>%
mutate(category = factor(category, levels = c("other", "PD_risk")))
df$category[is.na(df$category)] <- "other"
df <- arrange(df, category)
riskgenes <- df %>%
filter(category == "PD_risk" & abs(logFC) > 0.1 & adj.P.Val < 0.05) %>%
transmute(gene_name = gene_name)
#custom key values for enhanced volcano plot
keyvals <- rep('gray60', nrow(df))
names(keyvals) <- rep('other', nrow(df))
# modify keyvals for genes in PD risk category
keyvals[which(df$category == "PD_risk")] <- '#440154FF'
names(keyvals)[which(df$category == "PD_risk")] <- 'PD_risk'
unique(names(keyvals))
volPlot <- createVolcanoCustom(df, plotTitles[[x]], keyvals, geneLabels)
volPlot <- volPlot +
geom_rug(data = df %>%
filter(category == "PD_risk" & logFC > 0.1 & adj.P.Val < 0.05),
aes(y = -log10(adj.P.Val), color = category),
inherit.aes = FALSE,
sides = "r", outside = FALSE) +
geom_rug(data = df %>%
filter(category == "PD_risk" & logFC < -0.1 & adj.P.Val < 0.05),
aes(y = -log10(adj.P.Val), color = category),
inherit.aes = FALSE,
sides = "l", outside = FALSE)
volRiskPlots[[x]] <- volPlot
ggsave(paste("Figures/volcanoPlots/", compName[[x]], "_volcano_PDrisk.svg", sep = ""),
volPlot,
dpi = 600, width = 8, height = 8)
x <- x + 1
}
|
#' @author Adam Wheeler adam.wheeler@thermofisher.com
#' @description \code Tests for logOut
context("Tests for logOut")
test_that(paste("test logOut() on semantic version:", con$coreApi$semVer), {
expect_equivalent(logOut(con$coreApi)$success, "Success")
})
|
/tests/testthat/acceptance_tests/test-logOut.R
|
no_license
|
AmundsenJunior/pfsrsdk
|
R
| false
| false
| 263
|
r
|
#' @author Adam Wheeler adam.wheeler@thermofisher.com
#' @description \code Tests for logOut
context("Tests for logOut")
test_that(paste("test logOut() on semantic version:", con$coreApi$semVer), {
expect_equivalent(logOut(con$coreApi)$success, "Success")
})
|
########################## DEV VERSION
# This alignment has ----- exactly at the primer limits. Could be good.
# Probably don't need to use primers at all and don't need to extract subseqs, only seqs, in correct orientation.
# How to extract ref, trim others and keep track of counts for each.
# Also, should alignment be across all seqs of a marker? Prob not. then becomes run dependent. Do within sample only.
# need to rework blastHit table to separate hits on forward and reverse strand.
# TODO: could use same method to improve MID hits and check both ends are marked. (or use different MIDs for each end).
#topHits <- getTopBlastHits("blastOut.markers.tab")
#topHits$strand <- ifelse(topHits$s.end > topHits$s.start, 1,2)
#fMarkerMap <- split(as.character(topHits$query[topHits$strand==1]), topHits$subject[topHits$strand==1])
#rMarkerMap <- split(as.character(topHits$query[topHits$strand==2]), topHits$subject[topHits$strand==2])
#
getSubSeqsTable <- function(thisMarker, thisSample, sampleMap, fMarkerMap,rmarkerMap, markerSeq) {
varCountTable <- data.frame()
#thisMarker <- "DPA1_E2"
#thisSample <- "MID-1"
#intersect(sampleMap[[thisSample]], markerMap[[thisMarker]])
fPairSeqList <- intersect(sampleMap[[thisSample]], fMarkerMap[[thisMarker]]) # fMarkerMap[[thisMarker]]
rPairSeqList <- intersect(sampleMap[[thisSample]], rMarkerMap[[thisMarker]])
#intersect( fMarkerMap[[thisMarker]], rMarkerMap[[thisMarker]])
# extract raw seqs from blastdb for forward hits. # THIS FUNCT CURRENTLY UNUSED BECAUSE NEED TO ASSESS IF ANYTHING TO QUERY BEFORE RUNNING fastacmd
extractRawSeqsCommand <- function(idList,strand=1, fileName) {
queryList <- paste(idList , collapse=",")
fastacmdCommand <- paste(fastacmdPath, "-p F -t T -d", "inputSeqs" , "-S", strand, "-o", fileName, "-s", queryList)
return(fastacmdCommand)
}
fRawSeqs <- rRawSeqs <- list()
queryList <- paste(fPairSeqList , collapse=",")
if(length(queryList) > 0) {
fRawSeqFileName <- "fRawSeqExtract.fasta"
strand <- 1
fastacmdCommand <- paste(fastacmdPath, "-p F -t T -d", "inputSeqs" , "-S", strand, "-o", fRawSeqFileName, "-s", queryList)
system(fastacmdCommand)
fRawSeqs <- read.fasta(fRawSeqFileName , as.string=T)
}
queryList <- paste(rPairSeqList , collapse=",")
if(length(queryList) > 0) {
rRawSeqFileName <- "rRawSeqExtract.fasta"
strand <- 2
fastacmdCommand <- paste(fastacmdPath, "-p F -t T -d", "inputSeqs" , "-S", strand, "-o", rRawSeqFileName, "-s", queryList)
system(fastacmdCommand)
rRawSeqs <- read.fasta(rRawSeqFileName , as.string=T)
}
# system( extractRawSeqsCommand(idList=fPairSeqList ,strand=1, fileName=fRawSeqFileName ) )
# rRawSeqFileName <- "rRawSeqExtract.fasta"
# system( extractRawSeqsCommand(idList=rPairSeqList ,strand=2, fileName=rRawSeqFileName ) )
# Make file of unique seqs. Can name with sequence
### STRAND!!! - Done!
rawSeqs <- c(fRawSeqs ,rRawSeqs )
if(length(rawSeqs) < 1) {
return(varCountTable)
}
rawSeqCountTable <- as.data.frame(table(unlist(rawSeqs)))
names(rawSeqCountTable) <- c("rawSeq", "rawCount")
rawVariantFile <- paste("test", thisMarker, thisSample, "raw.variants.fasta",sep=".") #"localVariants.fasta"
#rawVariantFile <- paste(runPath, rawVariantFileName, sep="/")
write.fasta(as.list(c(markerSeq,as.character(rawSeqCountTable$rawSeq))) ,c(thisMarker,as.character(rawSeqCountTable$rawSeq)),file=rawVariantFile ,open="w")
# Align all seqs with reference
rawAlignFile <- paste("test", thisMarker, thisSample, "raw.align.fasta",sep=".") #"localAlign.fasta"
#rawAlignFile <- paste(runPath, rawAlignFileName, sep="/")
muscleCommand <- paste(musclePath, "-in", rawVariantFile , "-out", rawAlignFile , "-diags -quiet" )
system(muscleCommand)
# Extract portion corresponding to reference.
rawAlignment <- read.fasta(rawAlignFile, as.string=T) # do not use read.alignment() - broken
alignedMarkerSeq <- s2c(rawAlignment[[thisMarker]])
subStart <- min(grep("-",alignedMarkerSeq ,invert=T))
subEnd <- max(grep("-",alignedMarkerSeq ,invert=T))
alignedSubSeqs <- lapply(rawAlignment, FUN=function(x) substr(x[1], subStart, subEnd))
subAlignFile <- paste("test", thisMarker, thisSample, "sub.align.fasta",sep=".") #"localAlign.fasta"
#subAlignFile <- paste(runPath, subAlignFileName , sep="/")
write.fasta(alignedSubSeqs , names(alignedSubSeqs ), file=subAlignFile )
alignedSubTable <- data.frame(rawSeq = names(alignedSubSeqs ) , subSeq= as.character(unlist(alignedSubSeqs )))
# R-apply count of each seq. There may be some duplicated subSeqs.
combTable <- merge(rawSeqCountTable ,alignedSubTable , by="rawSeq", all.x=T)
varCount <- by(combTable, as.character(combTable$subSeq), FUN=function(x) sum(x$rawCount))
varCountTable <- data.frame(alignedVar=names(varCount), count=as.numeric(varCount))
varCountTable$var <- gsub("-","",varCountTable$alignedVar)
varCountTable <- varCountTable[order(varCountTable$count,decreasing=T),]
# Make unique list, summing counts where same seq found. (easier in table than list).
return(varCountTable)
}
#############################
# pre-specify marker reference sequence.
#markerList <- read.fasta("HLA_MARKERS.fasta",as.string=T)
################
mlgt <- function(object) attributes(object)
setGeneric("mlgt")
mlgt.mlgtDesign <- function(designObject) {
topHits <- getTopBlastHits("blastOut.markers.tab")
topHits$strand <- ifelse(topHits$s.end > topHits$s.start, 1,2)
fMarkerMap <- split(as.character(topHits$query[topHits$strand==1]), topHits$subject[topHits$strand==1])
rMarkerMap <- split(as.character(topHits$query[topHits$strand==2]), topHits$subject[topHits$strand==2])
## NEED TO MAKE SAMPLEMAP WITH HITS TO MID IN BOTH FORWARD AND REVERSE STRANDS like marker hits are split.
## Requires retention of 2 blast hits per sequence.
topSampleHits <- read.delim("blastOut.rTags.tab", header=F)
names(topSampleHits ) <- c("query", "subject", "percentId", "aliLength", "mismatches", "gapOpenings", "q.start","q.end", "s.start","s.end", "p_value", "e_value")
topSampleHits$strand <- ifelse(topSampleHits$s.end > topSampleHits$s.start, 1,2)
fSampleMap <- split(as.character(topSampleHits$query[topSampleHits$strand==1]), topSampleHits$subject[topSampleHits$strand==1])
rSampleMap <- split(as.character(topSampleHits$query[topSampleHits$strand==2]), topSampleHits$subject[topSampleHits$strand==2])
# combind sampleMaps to give sequences with MIDs in both orientations.
pairedSampleMap <- lapply(names(fSampleMap), FUN=function(x) intersect(fSampleMap[[x]], rSampleMap[[x]]))
names(pairedSampleMap) <- names(fSampleMap)
##########ITERATIONS
markerSampleList <- list()
runSummaryTable <- data.frame()
alleleDb <- list()
for(thisMarker in names(designObject@markers)) {
#for(thisMarker in names(markerMap)) {
#for(thisMarker in names(markerMap)[1:2]) { # temp to finish off
cat(paste(thisMarker,"\n"))
#thisMarker <- "DQA1_E2"
## might need to combine all these to return a single item.
summaryList <- list()
summaryTable <- data.frame()
markerSequenceCount <- list("noSeq"=0) # BUG? requires some data otherwise won't sum properly with localSequenceCount.
alleleList <- list()
variantList <- list()
alleleCount <- 1
markerSeq <- unlist(getSequence(markerList[[thisMarker]],as.string=T))
for(thisSample in designObject@samples) {
#for(thisSample in names(pairedSampleMap)[1:4]) {
#print(thisSample)
testPairSeqList <- intersect(pairedSampleMap[[thisSample]], markerMap[[thisMarker]])
#testPairSeqList <- intersect(sampleMap[[thisSample]], markerMap[[thisMarker]])
seqTable <- data.frame()
localAlleleNames <- c("NA","NA","NA")
localAlleleFreqs <- c(0,0,0)
## go through all seq's mapped to this marker/sample pair.
## extract the corresponding sequence delimited by the top blast hits on the primers. IS THIS THE BEST WAY?
## Simple improvement: minimum blast hit length to primer to keep.
## internal Function
recordNoSeqs <- function(summaryTable) { # to record no seqs before skipping out.
summaryRow <- data.frame(marker=thisMarker, sample=thisSample, numbSeqs=0,numbVars=0,
varName.1="NA", varFreq.1= 0,
varName.2="NA", varFreq.2= 0,
varName.3="NA", varFreq.3= 0)
summaryTable <- rbind(summaryTable, summaryRow)
return(summaryTable)
}
if(length(testPairSeqList) < 1) {
#summaryList[[thisMarker]][[thisSample]] <- NA
summaryTable <- recordNoSeqs(summaryTable)
next ; # skip to next sample
}
seqTable <- getSubSeqsTable(thisMarker, thisSample, pairedSampleMap, fMarkerMap,rmarkerMap, markerSeq)
# if no sequences returned, nothing to process.
if(nrow(seqTable) < 1 ) {
summaryTable <- recordNoSeqs(summaryTable)
#summaryList[[thisMarker]][[thisSample]] <- NA
next ; # go to next sample.
}
#localSequenceMap <- split(seqTable[,2], seqTable[,1])
#localSequenceCount <- lapply(localSequenceMap , length) # list named by sequence with counts.
#localSequenceCount <- localSequenceCount[order(as.numeric(localSequenceCount), decreasing=T)]
## test if variants are novel.
## Give allele names?
## Do with first three for now.
alToRecord <- min(3,nrow(seqTable))
if(alToRecord > 0) {
for (a in 1:alToRecord ) {
if(is.null(variantList[[seqTable$var[a]]])) { # novel
alleleName <- paste(thisMarker, alleleCount,sep=".")
variantList[[seqTable$var[a]]] <- alleleName
localAlleleNames[a] <- alleleName
localAlleleFreqs[a] <- seqTable$count[a]
alleleCount <- alleleCount + 1
} else { # pre-existing alllele
localAlleleNames[a] <- variantList[[seqTable$var[a]]]
localAlleleFreqs[a] <- seqTable$count[a]
}
}
}
# sequence correction?
# compile stats
if(nrow(seqTable) >0 ) { # cannot allow assignment from empty list as messes up class of list for remaining iterations
summaryList[[thisMarker]] <- list()
summaryList[[thisMarker]][[thisSample]] <- seqTable
}
summaryRow <- data.frame(marker=thisMarker, sample=thisSample, numbSeqs=sum(seqTable$count),numbVars=nrow(seqTable),
varName.1=localAlleleNames[1], varFreq.1= localAlleleFreqs[1],
varName.2=localAlleleNames[2], varFreq.2= localAlleleFreqs[2],
varName.3=localAlleleNames[3], varFreq.3= localAlleleFreqs[3])
summaryTable <- rbind(summaryTable, summaryRow)
#sequence count across samples?
# need to sum from summaryTable or from summaryList.
#markerSequenceCount <-
#as.list(colSums(merge(m, n, all = TRUE), na.rm = TRUE)) # not working
localSequenceCount <- as.list(seqTable$count)
names(localSequenceCount) <- seqTable$var
markerSequenceCount <- as.list(colSums(merge(markerSequenceCount , localSequenceCount, all = TRUE), na.rm = TRUE))
# might need to instantiate the markerSequenceCount if empty.
} # end of sample loop
markerSampleList[[thisMarker]] <- summaryTable
runSummaryRow <- data.frame(marker=thisMarker, assignedSeqs=sum(summaryTable$numbSeqs), assignedVariants=sum(summaryTable$numbVars),
minVariantLength=min(nchar(names(markerSequenceCount))),
maxVariantLength=max(nchar(names(markerSequenceCount))),
minAlleleLength=min(nchar(names(variantList))), maxAlleleLength=max(nchar(names(variantList))))
runSummaryTable <- rbind(runSummaryTable, runSummaryRow)
if(length(variantList) > 0) {
alleleDb[[thisMarker]] <- variantList # LATER: separate lists for alleles and variants?
}
} # end of marker loop
localMlgtResult <- new("mlgtResult", designObject, runSummaryTable=runSummaryTable , alleleDb=alleleDb, markerSampleList=markerSampleList)
return(localMlgtResult)
} # end of mlgt function
###########################
setMethod("mlgt","mlgtDesign", definition=mlgt.mlgtDesign)
|
/old_mlgt.R
|
no_license
|
davetgerrard/mlgt
|
R
| false
| false
| 11,617
|
r
|
########################## DEV VERSION
# This alignment has ----- exactly at the primer limits. Could be good.
# Probably don't need to use primers at all and don't need to extract subseqs, only seqs, in correct orientation.
# How to extract ref, trim others and keep track of counts for each.
# Also, should alignment be across all seqs of a marker? Prob not. then becomes run dependent. Do within sample only.
# need to rework blastHit table to separate hits on forward and reverse strand.
# TODO: could use same method to improve MID hits and check both ends are marked. (or use different MIDs for each end).
#topHits <- getTopBlastHits("blastOut.markers.tab")
#topHits$strand <- ifelse(topHits$s.end > topHits$s.start, 1,2)
#fMarkerMap <- split(as.character(topHits$query[topHits$strand==1]), topHits$subject[topHits$strand==1])
#rMarkerMap <- split(as.character(topHits$query[topHits$strand==2]), topHits$subject[topHits$strand==2])
#
getSubSeqsTable <- function(thisMarker, thisSample, sampleMap, fMarkerMap,rmarkerMap, markerSeq) {
varCountTable <- data.frame()
#thisMarker <- "DPA1_E2"
#thisSample <- "MID-1"
#intersect(sampleMap[[thisSample]], markerMap[[thisMarker]])
fPairSeqList <- intersect(sampleMap[[thisSample]], fMarkerMap[[thisMarker]]) # fMarkerMap[[thisMarker]]
rPairSeqList <- intersect(sampleMap[[thisSample]], rMarkerMap[[thisMarker]])
#intersect( fMarkerMap[[thisMarker]], rMarkerMap[[thisMarker]])
# extract raw seqs from blastdb for forward hits. # THIS FUNCT CURRENTLY UNUSED BECAUSE NEED TO ASSESS IF ANYTHING TO QUERY BEFORE RUNNING fastacmd
extractRawSeqsCommand <- function(idList,strand=1, fileName) {
queryList <- paste(idList , collapse=",")
fastacmdCommand <- paste(fastacmdPath, "-p F -t T -d", "inputSeqs" , "-S", strand, "-o", fileName, "-s", queryList)
return(fastacmdCommand)
}
fRawSeqs <- rRawSeqs <- list()
queryList <- paste(fPairSeqList , collapse=",")
if(length(queryList) > 0) {
fRawSeqFileName <- "fRawSeqExtract.fasta"
strand <- 1
fastacmdCommand <- paste(fastacmdPath, "-p F -t T -d", "inputSeqs" , "-S", strand, "-o", fRawSeqFileName, "-s", queryList)
system(fastacmdCommand)
fRawSeqs <- read.fasta(fRawSeqFileName , as.string=T)
}
queryList <- paste(rPairSeqList , collapse=",")
if(length(queryList) > 0) {
rRawSeqFileName <- "rRawSeqExtract.fasta"
strand <- 2
fastacmdCommand <- paste(fastacmdPath, "-p F -t T -d", "inputSeqs" , "-S", strand, "-o", rRawSeqFileName, "-s", queryList)
system(fastacmdCommand)
rRawSeqs <- read.fasta(rRawSeqFileName , as.string=T)
}
# system( extractRawSeqsCommand(idList=fPairSeqList ,strand=1, fileName=fRawSeqFileName ) )
# rRawSeqFileName <- "rRawSeqExtract.fasta"
# system( extractRawSeqsCommand(idList=rPairSeqList ,strand=2, fileName=rRawSeqFileName ) )
# Make file of unique seqs. Can name with sequence
### STRAND!!! - Done!
rawSeqs <- c(fRawSeqs ,rRawSeqs )
if(length(rawSeqs) < 1) {
return(varCountTable)
}
rawSeqCountTable <- as.data.frame(table(unlist(rawSeqs)))
names(rawSeqCountTable) <- c("rawSeq", "rawCount")
rawVariantFile <- paste("test", thisMarker, thisSample, "raw.variants.fasta",sep=".") #"localVariants.fasta"
#rawVariantFile <- paste(runPath, rawVariantFileName, sep="/")
write.fasta(as.list(c(markerSeq,as.character(rawSeqCountTable$rawSeq))) ,c(thisMarker,as.character(rawSeqCountTable$rawSeq)),file=rawVariantFile ,open="w")
# Align all seqs with reference
rawAlignFile <- paste("test", thisMarker, thisSample, "raw.align.fasta",sep=".") #"localAlign.fasta"
#rawAlignFile <- paste(runPath, rawAlignFileName, sep="/")
muscleCommand <- paste(musclePath, "-in", rawVariantFile , "-out", rawAlignFile , "-diags -quiet" )
system(muscleCommand)
# Extract portion corresponding to reference.
rawAlignment <- read.fasta(rawAlignFile, as.string=T) # do not use read.alignment() - broken
alignedMarkerSeq <- s2c(rawAlignment[[thisMarker]])
subStart <- min(grep("-",alignedMarkerSeq ,invert=T))
subEnd <- max(grep("-",alignedMarkerSeq ,invert=T))
alignedSubSeqs <- lapply(rawAlignment, FUN=function(x) substr(x[1], subStart, subEnd))
subAlignFile <- paste("test", thisMarker, thisSample, "sub.align.fasta",sep=".") #"localAlign.fasta"
#subAlignFile <- paste(runPath, subAlignFileName , sep="/")
write.fasta(alignedSubSeqs , names(alignedSubSeqs ), file=subAlignFile )
alignedSubTable <- data.frame(rawSeq = names(alignedSubSeqs ) , subSeq= as.character(unlist(alignedSubSeqs )))
# R-apply count of each seq. There may be some duplicated subSeqs.
combTable <- merge(rawSeqCountTable ,alignedSubTable , by="rawSeq", all.x=T)
varCount <- by(combTable, as.character(combTable$subSeq), FUN=function(x) sum(x$rawCount))
varCountTable <- data.frame(alignedVar=names(varCount), count=as.numeric(varCount))
varCountTable$var <- gsub("-","",varCountTable$alignedVar)
varCountTable <- varCountTable[order(varCountTable$count,decreasing=T),]
# Make unique list, summing counts where same seq found. (easier in table than list).
return(varCountTable)
}
#############################
# pre-specify marker reference sequence.
#markerList <- read.fasta("HLA_MARKERS.fasta",as.string=T)
################
mlgt <- function(object) attributes(object)
setGeneric("mlgt")
mlgt.mlgtDesign <- function(designObject) {
topHits <- getTopBlastHits("blastOut.markers.tab")
topHits$strand <- ifelse(topHits$s.end > topHits$s.start, 1,2)
fMarkerMap <- split(as.character(topHits$query[topHits$strand==1]), topHits$subject[topHits$strand==1])
rMarkerMap <- split(as.character(topHits$query[topHits$strand==2]), topHits$subject[topHits$strand==2])
## NEED TO MAKE SAMPLEMAP WITH HITS TO MID IN BOTH FORWARD AND REVERSE STRANDS like marker hits are split.
## Requires retention of 2 blast hits per sequence.
topSampleHits <- read.delim("blastOut.rTags.tab", header=F)
names(topSampleHits ) <- c("query", "subject", "percentId", "aliLength", "mismatches", "gapOpenings", "q.start","q.end", "s.start","s.end", "p_value", "e_value")
topSampleHits$strand <- ifelse(topSampleHits$s.end > topSampleHits$s.start, 1,2)
fSampleMap <- split(as.character(topSampleHits$query[topSampleHits$strand==1]), topSampleHits$subject[topSampleHits$strand==1])
rSampleMap <- split(as.character(topSampleHits$query[topSampleHits$strand==2]), topSampleHits$subject[topSampleHits$strand==2])
# combind sampleMaps to give sequences with MIDs in both orientations.
pairedSampleMap <- lapply(names(fSampleMap), FUN=function(x) intersect(fSampleMap[[x]], rSampleMap[[x]]))
names(pairedSampleMap) <- names(fSampleMap)
##########ITERATIONS
markerSampleList <- list()
runSummaryTable <- data.frame()
alleleDb <- list()
for(thisMarker in names(designObject@markers)) {
#for(thisMarker in names(markerMap)) {
#for(thisMarker in names(markerMap)[1:2]) { # temp to finish off
cat(paste(thisMarker,"\n"))
#thisMarker <- "DQA1_E2"
## might need to combine all these to return a single item.
summaryList <- list()
summaryTable <- data.frame()
markerSequenceCount <- list("noSeq"=0) # BUG? requires some data otherwise won't sum properly with localSequenceCount.
alleleList <- list()
variantList <- list()
alleleCount <- 1
markerSeq <- unlist(getSequence(markerList[[thisMarker]],as.string=T))
for(thisSample in designObject@samples) {
#for(thisSample in names(pairedSampleMap)[1:4]) {
#print(thisSample)
testPairSeqList <- intersect(pairedSampleMap[[thisSample]], markerMap[[thisMarker]])
#testPairSeqList <- intersect(sampleMap[[thisSample]], markerMap[[thisMarker]])
seqTable <- data.frame()
localAlleleNames <- c("NA","NA","NA")
localAlleleFreqs <- c(0,0,0)
## go through all seq's mapped to this marker/sample pair.
## extract the corresponding sequence delimited by the top blast hits on the primers. IS THIS THE BEST WAY?
## Simple improvement: minimum blast hit length to primer to keep.
## internal Function
recordNoSeqs <- function(summaryTable) { # to record no seqs before skipping out.
summaryRow <- data.frame(marker=thisMarker, sample=thisSample, numbSeqs=0,numbVars=0,
varName.1="NA", varFreq.1= 0,
varName.2="NA", varFreq.2= 0,
varName.3="NA", varFreq.3= 0)
summaryTable <- rbind(summaryTable, summaryRow)
return(summaryTable)
}
if(length(testPairSeqList) < 1) {
#summaryList[[thisMarker]][[thisSample]] <- NA
summaryTable <- recordNoSeqs(summaryTable)
next ; # skip to next sample
}
seqTable <- getSubSeqsTable(thisMarker, thisSample, pairedSampleMap, fMarkerMap,rmarkerMap, markerSeq)
# if no sequences returned, nothing to process.
if(nrow(seqTable) < 1 ) {
summaryTable <- recordNoSeqs(summaryTable)
#summaryList[[thisMarker]][[thisSample]] <- NA
next ; # go to next sample.
}
#localSequenceMap <- split(seqTable[,2], seqTable[,1])
#localSequenceCount <- lapply(localSequenceMap , length) # list named by sequence with counts.
#localSequenceCount <- localSequenceCount[order(as.numeric(localSequenceCount), decreasing=T)]
## test if variants are novel.
## Give allele names?
## Do with first three for now.
alToRecord <- min(3,nrow(seqTable))
if(alToRecord > 0) {
for (a in 1:alToRecord ) {
if(is.null(variantList[[seqTable$var[a]]])) { # novel
alleleName <- paste(thisMarker, alleleCount,sep=".")
variantList[[seqTable$var[a]]] <- alleleName
localAlleleNames[a] <- alleleName
localAlleleFreqs[a] <- seqTable$count[a]
alleleCount <- alleleCount + 1
} else { # pre-existing alllele
localAlleleNames[a] <- variantList[[seqTable$var[a]]]
localAlleleFreqs[a] <- seqTable$count[a]
}
}
}
# sequence correction?
# compile stats
if(nrow(seqTable) >0 ) { # cannot allow assignment from empty list as messes up class of list for remaining iterations
summaryList[[thisMarker]] <- list()
summaryList[[thisMarker]][[thisSample]] <- seqTable
}
summaryRow <- data.frame(marker=thisMarker, sample=thisSample, numbSeqs=sum(seqTable$count),numbVars=nrow(seqTable),
varName.1=localAlleleNames[1], varFreq.1= localAlleleFreqs[1],
varName.2=localAlleleNames[2], varFreq.2= localAlleleFreqs[2],
varName.3=localAlleleNames[3], varFreq.3= localAlleleFreqs[3])
summaryTable <- rbind(summaryTable, summaryRow)
#sequence count across samples?
# need to sum from summaryTable or from summaryList.
#markerSequenceCount <-
#as.list(colSums(merge(m, n, all = TRUE), na.rm = TRUE)) # not working
localSequenceCount <- as.list(seqTable$count)
names(localSequenceCount) <- seqTable$var
markerSequenceCount <- as.list(colSums(merge(markerSequenceCount , localSequenceCount, all = TRUE), na.rm = TRUE))
# might need to instantiate the markerSequenceCount if empty.
} # end of sample loop
markerSampleList[[thisMarker]] <- summaryTable
runSummaryRow <- data.frame(marker=thisMarker, assignedSeqs=sum(summaryTable$numbSeqs), assignedVariants=sum(summaryTable$numbVars),
minVariantLength=min(nchar(names(markerSequenceCount))),
maxVariantLength=max(nchar(names(markerSequenceCount))),
minAlleleLength=min(nchar(names(variantList))), maxAlleleLength=max(nchar(names(variantList))))
runSummaryTable <- rbind(runSummaryTable, runSummaryRow)
if(length(variantList) > 0) {
alleleDb[[thisMarker]] <- variantList # LATER: separate lists for alleles and variants?
}
} # end of marker loop
localMlgtResult <- new("mlgtResult", designObject, runSummaryTable=runSummaryTable , alleleDb=alleleDb, markerSampleList=markerSampleList)
return(localMlgtResult)
} # end of mlgt function
###########################
setMethod("mlgt","mlgtDesign", definition=mlgt.mlgtDesign)
|
GAStartupMessage <- function()
{
# Startup message obtained as
# > figlet GA
msg <- c(paste0(
" ____ _
/ ___| / \\ Genetic
| | _ / _ \\ Algorithms
| |_| |/ ___ \\
\\____/_/ \\_\\ version ", packageVersion("GA")),
"\nType 'citation(\"GA\")' for citing this R package in publications.")
return(msg)
}
.onAttach <- function(lib, pkg)
{
# unlock .ga.default variable allowing its modification
unlockBinding(".ga.default", asNamespace("GA"))
# startup message
msg <- GAStartupMessage()
if(!interactive())
msg[1] <- paste("Package 'GA' version", packageVersion("GA"))
packageStartupMessage(msg)
invisible()
}
|
/R/zzz.R
|
no_license
|
luca-scr/GA
|
R
| false
| false
| 655
|
r
|
GAStartupMessage <- function()
{
# Startup message obtained as
# > figlet GA
msg <- c(paste0(
" ____ _
/ ___| / \\ Genetic
| | _ / _ \\ Algorithms
| |_| |/ ___ \\
\\____/_/ \\_\\ version ", packageVersion("GA")),
"\nType 'citation(\"GA\")' for citing this R package in publications.")
return(msg)
}
.onAttach <- function(lib, pkg)
{
# unlock .ga.default variable allowing its modification
unlockBinding(".ga.default", asNamespace("GA"))
# startup message
msg <- GAStartupMessage()
if(!interactive())
msg[1] <- paste("Package 'GA' version", packageVersion("GA"))
packageStartupMessage(msg)
invisible()
}
|
testlist <- list(x = c(0, 0, 0, 0, 5.09911082268234e+182, 5.77636807742164e-275, NaN, -8.1542028248312e-280, -3.85185988649917e-34, -3.74931033101325e-253, -1.05168902913543e-176, 9.34704639616146e-307, 8.3975159971137e-315, 0, -2.37001899157646e+306, NaN, 1.03211490097508e-296, 5.2181291954879e+279, -2.42208305489817e+24, -1.34765550947704e+28, 7.23149031820527e-308, 2.63543261013288e-82, NaN, -9.40591696517169e+24, 1.00496080260073e+180, -2.09594355232913e+294, 3.06571532920587e-115, -4.03496294643223e-87, 2.54672248582809e-313, 1.42760928130527e+181, 7.23149031820522e-308, 5.77636583357087e-275), y = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
result <- do.call(blorr:::blr_pairs_cpp,testlist)
str(result)
|
/blorr/inst/testfiles/blr_pairs_cpp/libFuzzer_blr_pairs_cpp/blr_pairs_cpp_valgrind_files/1609956273-test.R
|
no_license
|
akhikolla/updated-only-Issues
|
R
| false
| false
| 982
|
r
|
testlist <- list(x = c(0, 0, 0, 0, 5.09911082268234e+182, 5.77636807742164e-275, NaN, -8.1542028248312e-280, -3.85185988649917e-34, -3.74931033101325e-253, -1.05168902913543e-176, 9.34704639616146e-307, 8.3975159971137e-315, 0, -2.37001899157646e+306, NaN, 1.03211490097508e-296, 5.2181291954879e+279, -2.42208305489817e+24, -1.34765550947704e+28, 7.23149031820527e-308, 2.63543261013288e-82, NaN, -9.40591696517169e+24, 1.00496080260073e+180, -2.09594355232913e+294, 3.06571532920587e-115, -4.03496294643223e-87, 2.54672248582809e-313, 1.42760928130527e+181, 7.23149031820522e-308, 5.77636583357087e-275), y = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
result <- do.call(blorr:::blr_pairs_cpp,testlist)
str(result)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/quipu.R
\docType{data}
\name{allele.freqs}
\alias{allele.freqs}
\title{Sample allele frequencies}
\format{
Tabular format. The records represent unique SSR alleles with their assigned frequencies.
Frequencies were derived from the sample data and are just for illustrative purposes.
\itemize{
\item{"marker"} {Marker name}
\item{"marker_size"} {Marker size}
\item{"frequency"} {A fraction between 0 and 1.}
}
}
\description{
Sample allele frequencies
}
\keyword{datasets}
|
/man/allele.freqs.Rd
|
permissive
|
c5sire/quipu
|
R
| false
| true
| 554
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/quipu.R
\docType{data}
\name{allele.freqs}
\alias{allele.freqs}
\title{Sample allele frequencies}
\format{
Tabular format. The records represent unique SSR alleles with their assigned frequencies.
Frequencies were derived from the sample data and are just for illustrative purposes.
\itemize{
\item{"marker"} {Marker name}
\item{"marker_size"} {Marker size}
\item{"frequency"} {A fraction between 0 and 1.}
}
}
\description{
Sample allele frequencies
}
\keyword{datasets}
|
## ---------------------------
##
## Script name: `rule add_metadata`
##
## Purpose of script: read in file with given accession and add in the requisite metadata
##
## Author: Will Hannon
##
## Date Created: 2020-06-26
##
## Copyright (c) Will Hannon, 2020
## Email: wwh22@uw.edu
##
## ---------------------------
require(tidyverse)
## ---------------------------
## ==== Get file paths from snakemake object ==== ##
vcf.filepath = snakemake@input[[1]]
metadata.filepath = snakemake@params[[1]]
## ==== Boolean to see if file is empty and there are no columns ==== ##
info = file.info(vcf.filepath)
empty = (info$size == 0)
## ==== Import Metadata, same table used to import samples from `Run Selector w/ minimal changes` ==== ##
#
# Add in any constrains on metadata here.
#
# Download and format metedata. But first, check if the vcf is empty..
if (!empty){
metadata.df = read_csv(metadata.filepath) %>% rename(Accession = "Run")
## ==== Import and process the vcf -> txt files ==== ##
# cols to split `ANN` vcf field into
cols_into = c("ALT_Allele",
"Effect",
"Putative_Impact",
"Gene_Name",
"Gene_ID",
"Feature_Type",
"Feature_ID",
"Transcript_Biotype",
"Rank",
"HGVS.c",
"HGVS.p",
"cDNA_position",
"CDS_position",
"Protein_position",
"Distance_to_feature",
"Errors")
# extract metadata from filepath
sample.info = strsplit(basename(vcf.filepath), "[.]")[[1]]
sample.name = sample.info[1]
sample.aligner = sample.info[2]
sample.caller = sample.info[3]
# different extraction depending on columns in the original VCF file
if (sample.caller == "lofreq"){
# read in vcf table
sample.df = read_tsv(vcf.filepath) %>%
# split into cols with regex
separate(col = ANN, sep = "\\|", remove = F, into = cols_into) %>%
select(POS, REF, ALT,
AF, DP,
Effect, Gene_Name, AA_Change="HGVS.p") %>%
# clean amino acid change column
mutate(AA_Change = gsub("^p\\.", "", AA_Change)) %>%
# simplify the effect column
mutate(Effect = case_when(Effect == "missense_variant" ~ "Missense",
Effect == "synonymous_variant" ~ "Synonymous",
Effect == "upstream_gene_variant" ~ "Synonymous",
Effect == "downstream_gene_variant" ~ "Synonymous",
Effect == "stop_retained_variant" ~ "Synonymous",
Effect == "stop_lost" ~ "Nonsense",
Effect == "start_lost" ~ "Nonsense",
Effect == "stop_gained" ~ "Nonsense")) %>%
# add sample ID
mutate(Accession = sample.name) %>%
mutate(Caller = sample.caller) %>%
mutate(Aligner = sample.aligner)
} else if (sample.caller == "varscan"){
# read in vcf table
sample.df = read_tsv(vcf.filepath) %>%
# split into cols with regex
separate(col = ANN, sep = "\\|", remove = F, into = cols_into) %>%
select(POS, REF, ALT,
AF="Sample1.FREQ", DP="Sample1.DP",
Effect, Gene_Name, AA_Change="HGVS.p") %>%
# clean amino acid change column
mutate(AA_Change = gsub("^p\\.", "", AA_Change)) %>%
# clean allele freq column
mutate(AF = gsub("%", "", AF)) %>%
# simplify the effect column
mutate(Effect = case_when(Effect == "missense_variant" ~ "Missense",
Effect == "synonymous_variant" ~ "Synonymous",
Effect == "upstream_gene_variant" ~ "Synonymous",
Effect == "downstream_gene_variant" ~ "Synonymous",
Effect == "stop_retained_variant" ~ "Synonymous",
Effect == "stop_lost" ~ "Nonsense",
Effect == "start_lost" ~ "Nonsense",
Effect == "stop_gained" ~ "Nonsense")) %>%
# add sample ID
mutate(Accession = sample.name) %>%
mutate(Caller = sample.caller) %>%
mutate(Aligner = sample.aligner)
sample.df$AF = as.numeric(sample.df$AF) / 100
} else if (sample.caller == "ivar"){
# read in vcf table
sample.df = read_tsv(vcf.filepath) %>%
mutate(Effect = if_else(REF_AA == ALT_AA, "Synonymous", "Missense"),
AA_Change = paste0(REF_AA, "-", ALT_AA),
Gene_Name = NA) %>%
select(POS, REF, ALT,
AF="ALT_FREQ", DP="TOTAL_DP",
Effect, Gene_Name, AA_Change) %>%
# add sample ID
mutate(Accession = sample.name) %>%
mutate(Caller = sample.caller) %>%
mutate(Aligner = sample.aligner) %>%
distinct()
}
## ==== Join the variant data with the metadata by 'Run' id ==== ##
# Join with metadata
sample.df = left_join(sample.df, metadata.df, by = "Accession")
# Write out to a file
write.csv(sample.df, file = snakemake@output[[1]], row.names=FALSE, sep="\t")
} else{
# Write an empty output if file is empty
cat(NULL,file=snakemake@output[[1]])
}
## ==== END ==== ##
|
/workflow/scripts/add_metadata.R
|
permissive
|
jbloomlab/SARS-CoV-2_bottleneck
|
R
| false
| false
| 5,425
|
r
|
## ---------------------------
##
## Script name: `rule add_metadata`
##
## Purpose of script: read in file with given accession and add in the requisite metadata
##
## Author: Will Hannon
##
## Date Created: 2020-06-26
##
## Copyright (c) Will Hannon, 2020
## Email: wwh22@uw.edu
##
## ---------------------------
require(tidyverse)
## ---------------------------
## ==== Get file paths from snakemake object ==== ##
vcf.filepath = snakemake@input[[1]]
metadata.filepath = snakemake@params[[1]]
## ==== Boolean to see if file is empty and there are no columns ==== ##
info = file.info(vcf.filepath)
empty = (info$size == 0)
## ==== Import Metadata, same table used to import samples from `Run Selector w/ minimal changes` ==== ##
#
# Add in any constrains on metadata here.
#
# Download and format metedata. But first, check if the vcf is empty..
if (!empty){
metadata.df = read_csv(metadata.filepath) %>% rename(Accession = "Run")
## ==== Import and process the vcf -> txt files ==== ##
# cols to split `ANN` vcf field into
cols_into = c("ALT_Allele",
"Effect",
"Putative_Impact",
"Gene_Name",
"Gene_ID",
"Feature_Type",
"Feature_ID",
"Transcript_Biotype",
"Rank",
"HGVS.c",
"HGVS.p",
"cDNA_position",
"CDS_position",
"Protein_position",
"Distance_to_feature",
"Errors")
# extract metadata from filepath
sample.info = strsplit(basename(vcf.filepath), "[.]")[[1]]
sample.name = sample.info[1]
sample.aligner = sample.info[2]
sample.caller = sample.info[3]
# different extraction depending on columns in the original VCF file
if (sample.caller == "lofreq"){
# read in vcf table
sample.df = read_tsv(vcf.filepath) %>%
# split into cols with regex
separate(col = ANN, sep = "\\|", remove = F, into = cols_into) %>%
select(POS, REF, ALT,
AF, DP,
Effect, Gene_Name, AA_Change="HGVS.p") %>%
# clean amino acid change column
mutate(AA_Change = gsub("^p\\.", "", AA_Change)) %>%
# simplify the effect column
mutate(Effect = case_when(Effect == "missense_variant" ~ "Missense",
Effect == "synonymous_variant" ~ "Synonymous",
Effect == "upstream_gene_variant" ~ "Synonymous",
Effect == "downstream_gene_variant" ~ "Synonymous",
Effect == "stop_retained_variant" ~ "Synonymous",
Effect == "stop_lost" ~ "Nonsense",
Effect == "start_lost" ~ "Nonsense",
Effect == "stop_gained" ~ "Nonsense")) %>%
# add sample ID
mutate(Accession = sample.name) %>%
mutate(Caller = sample.caller) %>%
mutate(Aligner = sample.aligner)
} else if (sample.caller == "varscan"){
# read in vcf table
sample.df = read_tsv(vcf.filepath) %>%
# split into cols with regex
separate(col = ANN, sep = "\\|", remove = F, into = cols_into) %>%
select(POS, REF, ALT,
AF="Sample1.FREQ", DP="Sample1.DP",
Effect, Gene_Name, AA_Change="HGVS.p") %>%
# clean amino acid change column
mutate(AA_Change = gsub("^p\\.", "", AA_Change)) %>%
# clean allele freq column
mutate(AF = gsub("%", "", AF)) %>%
# simplify the effect column
mutate(Effect = case_when(Effect == "missense_variant" ~ "Missense",
Effect == "synonymous_variant" ~ "Synonymous",
Effect == "upstream_gene_variant" ~ "Synonymous",
Effect == "downstream_gene_variant" ~ "Synonymous",
Effect == "stop_retained_variant" ~ "Synonymous",
Effect == "stop_lost" ~ "Nonsense",
Effect == "start_lost" ~ "Nonsense",
Effect == "stop_gained" ~ "Nonsense")) %>%
# add sample ID
mutate(Accession = sample.name) %>%
mutate(Caller = sample.caller) %>%
mutate(Aligner = sample.aligner)
sample.df$AF = as.numeric(sample.df$AF) / 100
} else if (sample.caller == "ivar"){
# read in vcf table
sample.df = read_tsv(vcf.filepath) %>%
mutate(Effect = if_else(REF_AA == ALT_AA, "Synonymous", "Missense"),
AA_Change = paste0(REF_AA, "-", ALT_AA),
Gene_Name = NA) %>%
select(POS, REF, ALT,
AF="ALT_FREQ", DP="TOTAL_DP",
Effect, Gene_Name, AA_Change) %>%
# add sample ID
mutate(Accession = sample.name) %>%
mutate(Caller = sample.caller) %>%
mutate(Aligner = sample.aligner) %>%
distinct()
}
## ==== Join the variant data with the metadata by 'Run' id ==== ##
# Join with metadata
sample.df = left_join(sample.df, metadata.df, by = "Accession")
# Write out to a file
write.csv(sample.df, file = snakemake@output[[1]], row.names=FALSE, sep="\t")
} else{
# Write an empty output if file is empty
cat(NULL,file=snakemake@output[[1]])
}
## ==== END ==== ##
|
library(tidyr)
library(dplyr)
library(ggplot2)
# Plots CIs for the quantile
fullData_ConfidenceLowerBound = read.csv("../../../results/tradeoff/listener-curve-binomial-confidence-bound-quantile-noAssumption-equalOrBigger.tsv", sep="\t") %>% filter(Type != "GROUND")
fullData_ConfidenceLowerBound_05 = read.csv("../../../results/tradeoff/listener-curve-binomial-confidence-bound-quantile-noAssumption-05-equalOrBigger.tsv", sep="\t") %>% filter(Type != "GROUND")
fullData_BinomialTest = read.csv("../../../results/tradeoff/listener-curve-binomial-test-equalOrBigger.tsv", sep="\t") %>% filter(Type != "GROUND")
memListenerSurpPlot_onlyWordForms_boundedVocab = function(language) {
data = fullData_ConfidenceLowerBound %>% filter(Language == language)
data2 = fullData_BinomialTest %>% filter(Language == language)
data3 = fullData_ConfidenceLowerBound_05 %>% filter(Language == language)
#data = merge(data, data2, by=c("Language", "Position", "Type"))
#data$Memory = data$Memory.x
plot = ggplot(data, aes(x=Memory, y=LowerConfidenceBound, fill=Type, color=Type))
plot = plot + geom_line(size=1, linetype="dotted")
plot = plot + geom_line(data=data3, size=2, linetype="dashed")
plot = plot + geom_line(data=data2, aes(x=Memory, y=BetterEmpirical), size=2)
data2 = data2 %>% mutate(pValue_print = ifelse(round(pValue,5) == 0, "p<0.00001", paste("p=", round(pValue,5), sep="")))
plot = plot + geom_text(data=data2 %>% filter(Position %% 9 == 0, Type == "REAL_REAL"), aes(x=Memory, y=BetterEmpirical+0.1, label=pValue_print), size=3)
plot = plot + geom_text(data=data2 %>% filter(Position %% 9 == 0, Type == "GROUND"), aes(x=Memory, y=BetterEmpirical+0.05, label=pValue_print), size=3)
plot = plot + theme_classic()
plot = plot + theme(legend.position="none")
plot = plot + ylim(0,1.1)
plot = plot + ylab("Quantile")
plot = plot + xlab("Memory")
ggsave(plot, file=paste("figures/",language,"-listener-surprisal-memory-QUANTILES_onlyWordForms_boundedVocab_REAL-equalOrBigger.pdf", sep=""), height=3.5, width=4.5)
return(plot)
}
languages = read.csv("languages.tsv", sep="\t")
for(language in languages$Language) {
cat(language, "\n")
memListenerSurpPlot_onlyWordForms_boundedVocab(language)
}
|
/code/analysis/visualize_neural/NOT_USED/plot_wordsOnly_quantile_onlyREAL_equalOrBigger.R
|
no_license
|
m-hahn/memory-surprisal
|
R
| false
| false
| 2,278
|
r
|
library(tidyr)
library(dplyr)
library(ggplot2)
# Plots CIs for the quantile
fullData_ConfidenceLowerBound = read.csv("../../../results/tradeoff/listener-curve-binomial-confidence-bound-quantile-noAssumption-equalOrBigger.tsv", sep="\t") %>% filter(Type != "GROUND")
fullData_ConfidenceLowerBound_05 = read.csv("../../../results/tradeoff/listener-curve-binomial-confidence-bound-quantile-noAssumption-05-equalOrBigger.tsv", sep="\t") %>% filter(Type != "GROUND")
fullData_BinomialTest = read.csv("../../../results/tradeoff/listener-curve-binomial-test-equalOrBigger.tsv", sep="\t") %>% filter(Type != "GROUND")
memListenerSurpPlot_onlyWordForms_boundedVocab = function(language) {
data = fullData_ConfidenceLowerBound %>% filter(Language == language)
data2 = fullData_BinomialTest %>% filter(Language == language)
data3 = fullData_ConfidenceLowerBound_05 %>% filter(Language == language)
#data = merge(data, data2, by=c("Language", "Position", "Type"))
#data$Memory = data$Memory.x
plot = ggplot(data, aes(x=Memory, y=LowerConfidenceBound, fill=Type, color=Type))
plot = plot + geom_line(size=1, linetype="dotted")
plot = plot + geom_line(data=data3, size=2, linetype="dashed")
plot = plot + geom_line(data=data2, aes(x=Memory, y=BetterEmpirical), size=2)
data2 = data2 %>% mutate(pValue_print = ifelse(round(pValue,5) == 0, "p<0.00001", paste("p=", round(pValue,5), sep="")))
plot = plot + geom_text(data=data2 %>% filter(Position %% 9 == 0, Type == "REAL_REAL"), aes(x=Memory, y=BetterEmpirical+0.1, label=pValue_print), size=3)
plot = plot + geom_text(data=data2 %>% filter(Position %% 9 == 0, Type == "GROUND"), aes(x=Memory, y=BetterEmpirical+0.05, label=pValue_print), size=3)
plot = plot + theme_classic()
plot = plot + theme(legend.position="none")
plot = plot + ylim(0,1.1)
plot = plot + ylab("Quantile")
plot = plot + xlab("Memory")
ggsave(plot, file=paste("figures/",language,"-listener-surprisal-memory-QUANTILES_onlyWordForms_boundedVocab_REAL-equalOrBigger.pdf", sep=""), height=3.5, width=4.5)
return(plot)
}
languages = read.csv("languages.tsv", sep="\t")
for(language in languages$Language) {
cat(language, "\n")
memListenerSurpPlot_onlyWordForms_boundedVocab(language)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/style_guides.R
\name{create_style_guide}
\alias{create_style_guide}
\title{Create a style guide}
\usage{
create_style_guide(initialize = initialize_attributes, line_break = NULL,
space = NULL, token = NULL, indention = NULL,
use_raw_indention = FALSE, reindention = tidyverse_reindention())
}
\arguments{
\item{initialize}{A function that initializes various variables on each
level of nesting.}
\item{line_break}{A list of transformer functions that manipulate line_break
information.}
\item{space}{A list of transformer functions that manipulate spacing
information.}
\item{token}{A list of transformer functions that manipulate token text.}
\item{indention}{A list of transformer functions that manipulate indention.}
\item{use_raw_indention}{Boolean indicating whether or not the raw indention
should be used.}
\item{reindention}{A list of parameters for regex re-indention, most
conveniently constructed using \code{\link[=specify_reindention]{specify_reindention()}}.}
}
\description{
This is a helper function to create a style guide, which is technically
speaking a named list of groups of transformer functions where each
transformer function corresponds to one styling rule. The output of this
function can be used as an argument for \code{style} in top level functions
like \code{\link[=style_text]{style_text()}} and friends.
}
|
/man/create_style_guide.Rd
|
no_license
|
applied-statistic-using-r/styler
|
R
| false
| true
| 1,428
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/style_guides.R
\name{create_style_guide}
\alias{create_style_guide}
\title{Create a style guide}
\usage{
create_style_guide(initialize = initialize_attributes, line_break = NULL,
space = NULL, token = NULL, indention = NULL,
use_raw_indention = FALSE, reindention = tidyverse_reindention())
}
\arguments{
\item{initialize}{A function that initializes various variables on each
level of nesting.}
\item{line_break}{A list of transformer functions that manipulate line_break
information.}
\item{space}{A list of transformer functions that manipulate spacing
information.}
\item{token}{A list of transformer functions that manipulate token text.}
\item{indention}{A list of transformer functions that manipulate indention.}
\item{use_raw_indention}{Boolean indicating whether or not the raw indention
should be used.}
\item{reindention}{A list of parameters for regex re-indention, most
conveniently constructed using \code{\link[=specify_reindention]{specify_reindention()}}.}
}
\description{
This is a helper function to create a style guide, which is technically
speaking a named list of groups of transformer functions where each
transformer function corresponds to one styling rule. The output of this
function can be used as an argument for \code{style} in top level functions
like \code{\link[=style_text]{style_text()}} and friends.
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data-movie.R
\docType{data}
\name{movie_508}
\alias{movie_508}
\title{A Nightmare on Elm Street}
\format{
igraph object
}
\source{
https://dataverse.harvard.edu/dataset.xhtml?persistentId=doi:10.7910/DVN/T4HBA3
https://www.imdb.com/title/tt0087800
}
\usage{
movie_508
}
\description{
Interactions of characters in the movie "A Nightmare on Elm Street" (1984)
}
\details{
The networks were built with a movie script parser. Even after multiple manual checks, the data set can still contain minor errors (e.g. typos in character names or wrongly parsed names). This may require some additional manual checks before using the data. Please report any such issues (https://github.com/schochastics/networkdata/issues/)
}
\references{
Kaminski, Jermain; Schober, Michael; Albaladejo, Raymond; Zastupailo, Oleksandr; Hidalgo, CΓ©sar, 2018, Moviegalaxies - Social Networks in Movies, https://doi.org/10.7910/DVN/T4HBA3, Harvard Dataverse, V3
}
\keyword{datasets}
|
/man/movie_508.Rd
|
permissive
|
schochastics/networkdata
|
R
| false
| true
| 1,033
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data-movie.R
\docType{data}
\name{movie_508}
\alias{movie_508}
\title{A Nightmare on Elm Street}
\format{
igraph object
}
\source{
https://dataverse.harvard.edu/dataset.xhtml?persistentId=doi:10.7910/DVN/T4HBA3
https://www.imdb.com/title/tt0087800
}
\usage{
movie_508
}
\description{
Interactions of characters in the movie "A Nightmare on Elm Street" (1984)
}
\details{
The networks were built with a movie script parser. Even after multiple manual checks, the data set can still contain minor errors (e.g. typos in character names or wrongly parsed names). This may require some additional manual checks before using the data. Please report any such issues (https://github.com/schochastics/networkdata/issues/)
}
\references{
Kaminski, Jermain; Schober, Michael; Albaladejo, Raymond; Zastupailo, Oleksandr; Hidalgo, CΓ©sar, 2018, Moviegalaxies - Social Networks in Movies, https://doi.org/10.7910/DVN/T4HBA3, Harvard Dataverse, V3
}
\keyword{datasets}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/color.spectro.R
\name{color.spectro}
\alias{color.spectro}
\title{Highlight spectrogram regions}
\usage{
color.spectro(wave, wl = 512, wn = "hanning", ovlp = 70,
dB = "max0", collevels = NULL, selec.col = "red2", col.clm = NULL,
base.col = "black", bg.col = "white", cexlab = 1, cexaxis = 1, tlab = "Time (s)",
flab = "Frequency (kHz)", title = NULL, axisX = TRUE, axisY = TRUE,
flim = NULL, rm.zero = FALSE, X = NULL, fast.spec = FALSE, t.mar = NULL, f.mar = NULL,
interactive = NULL, add = FALSE)
}
\arguments{
\item{wave}{A 'wave' object produced by \code{\link[tuneR]{readWave}} or similar functions.}
\item{wl}{A numeric vector of length 1 specifying the window length of the spectrogram. Default
is 512.}
\item{wn}{Character vector of length 1 specifying window name. Default is
"hanning". See function \code{\link[seewave]{ftwindow}} for more options.}
\item{ovlp}{Numeric vector of length 1 specifying the percent overlap between two
consecutive windows, as in \code{\link[seewave]{spectro}}. Default is 70.}
\item{dB}{Character vector of length 1 controlling the amplitude weights as in
\code{\link[seewave]{spectro}}. Default is 'max0'.}
\item{collevels}{Numeric. Levels used to partition amplitude range as in \code{\link[seewave]{spectro}}.
Default is \code{NULL}.}
\item{selec.col}{Character vector of length 1 specifying the color to be used to highlight selection.
See 'col.clm' for specifying unique colors for each selection. Default is 'red2'. Ignored if 'col.cm'
and 'X' are provided.}
\item{col.clm}{Character vector of length 1 indicating the name of the column in 'X' that contains the
color names for each selection. Ignored if \code{X == NULL} or \code{interactive != NULL}. Default is \code{NULL}.}
\item{base.col}{Character vector of length 1 specifying the color of the background spectrogram.
Default is 'black'.}
\item{bg.col}{Character vector of length 1 specifying the background color for both base
and highlighted spectrograms. Default is 'white'.}
\item{cexlab}{Numeric vector of length 1 specifying the relative size of axis
labels. See \code{\link[seewave]{spectro}}. Default is 1.}
\item{cexaxis}{Numeric vector of length 1 specifying the relative size of axis. See
\code{\link[seewave]{spectro}}. Default is 1.}
\item{tlab}{Character vector of length 1 specifying the label of the time axis.}
\item{flab}{Character vector of length 1 specifying the label of the frequency axis.}
\item{title}{Logical argument to add a title to individual spectrograms.
Default is \code{TRUE}.}
\item{axisX}{Logical to control whether time axis is plotted. Default is \code{TRUE}.}
\item{axisY}{Logical to control whether frequency axis is plotted. Default is \code{TRUE}.}
\item{flim}{A numeric vector of length 2 for the frequency limit (in kHz) of
the spectrogram, as in \code{\link[seewave]{spectro}}. Default is \code{NULL}.}
\item{rm.zero}{Logical indicated if the 0 at the start of the time axis should be removed. Default is \code{FALSE}.}
\item{X}{Optional. Data frame containing columns for start and end time of signals ('start' and 'end') and low and high frequency ('bottom.freq' and 'top.freq').}
\item{fast.spec}{Logical. If \code{TRUE} then image function is used internally to create spectrograms, which substantially
increases performance (much faster), although some options become unavailable, as collevels, and sc (amplitude scale).
This option is indicated for signals with high background noise levels. Palette colors \code{\link[monitoR]{gray.1}}, \code{\link[monitoR]{gray.2}},
\code{\link[monitoR]{gray.3}}, \code{\link[monitoR]{topo.1}} and \code{\link[monitoR]{rainbow.1}} (which should be imported from the package monitoR) seem
to work better with 'fast' spectrograms. Palette colors \code{\link[monitoR]{gray.1}}, \code{\link[monitoR]{gray.2}},
\code{\link[monitoR]{gray.3}} offer
decreasing darkness levels.}
\item{t.mar}{Numeric vector of length 1. Specifies the margins adjacent to the start and end points to be added when highlighting selection. Default is \code{NULL}.}
\item{f.mar}{Numeric vector of length 1. Specifies the margins adjacent to the low and high frequencies to be added when highlighting selection. Default is \code{NULL}.}
\item{interactive}{Numeric. Allow user to interactively select the signals to be highlighted by clicking
on the graphic device. Users must select the opposite corners of a square delimiting the spectrogram region
to be highlighted. Controls the number of signals that users would be able to select (2 clicks per signal).}
\item{add}{Logical. If \code{TRUE} new highlighting can be applied to the current plot (which means
that the function with \code{add = FALSE} should be run first). Default is \code{FALSE}.}
}
\value{
A plot is produced in the graphic device.
}
\description{
\code{color.spectro} highlights spectrogram regions specified by users
}
\details{
This function highlights regions of the spectrogram with different colors. The regions to be
highlighted can be provided in a selection table (as the example data 'lbh_selec_table') or interactively ('interactive' argument).
}
\examples{
\dontrun{
data(list = c("Phae.long1", "lbh_selec_table"))
writeWave(Phae.long1, file.path(tempdir(), "Phae.long1.wav")) #save sound files
# subset selection table
st <- lbh_selec_table[lbh_selec_table$sound.files == "Phae.long1.wav",]
# read wave file as an R object
sgnl <- tuneR::readWave(file.path(tempdir(), st$sound.files[1]))
# create color column
st$colors <- c("red2", "blue", "green")
# highlight selections
color.spectro(wave = sgnl, wl = 300, ovlp = 90, flim = c(1, 8.6), collevels = seq(-90, 0, 5),
dB = "B", X = st, col.clm = "colors", base.col = "skyblue", t.mar = 0.07, f.mar = 0.1,
interactive = NULL)
# interactive (selected manually: you have to select them by clicking on the spectrogram)
color.spectro(wave = sgnl, wl = 300, ovlp = 90, flim = c(1, 8.6), collevels = seq(-90, 0, 5),
dB = "B", col.clm = "colors", t.mar = 0.07, f.mar = 1, interactive = 2)
}
}
\references{
{Araya-Salas, M., & Smith-Vidaurre, G. (2017). warbleR: An R package to streamline analysis of animal acoustic signals. Methods in Ecology and Evolution, 8(2), 184-191.}
}
\seealso{
\code{\link{trackfreqs}} for creating spectrograms to visualize
frequency measurements by \code{\link{specan}}, \code{\link{snrspecs}} for
creating spectrograms to optimize noise margins used in \code{\link{sig2noise}}
Other spectrogram creators:
\code{\link{dfDTW}()},
\code{\link{dfts}()},
\code{\link{ffDTW}()},
\code{\link{ffts}()},
\code{\link{multi_DTW}()},
\code{\link{phylo_spectro}()},
\code{\link{snrspecs}()},
\code{\link{sp.en.ts}()},
\code{\link{specreator}()},
\code{\link{trackfreqs}()}
}
\author{
Marcelo Araya-Salas (\email{marceloa27@gmail.com}) and Grace Smith Vidaurre
}
\concept{spectrogram creators}
|
/man/color.spectro.Rd
|
no_license
|
edwbaker/warbleR
|
R
| false
| true
| 6,952
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/color.spectro.R
\name{color.spectro}
\alias{color.spectro}
\title{Highlight spectrogram regions}
\usage{
color.spectro(wave, wl = 512, wn = "hanning", ovlp = 70,
dB = "max0", collevels = NULL, selec.col = "red2", col.clm = NULL,
base.col = "black", bg.col = "white", cexlab = 1, cexaxis = 1, tlab = "Time (s)",
flab = "Frequency (kHz)", title = NULL, axisX = TRUE, axisY = TRUE,
flim = NULL, rm.zero = FALSE, X = NULL, fast.spec = FALSE, t.mar = NULL, f.mar = NULL,
interactive = NULL, add = FALSE)
}
\arguments{
\item{wave}{A 'wave' object produced by \code{\link[tuneR]{readWave}} or similar functions.}
\item{wl}{A numeric vector of length 1 specifying the window length of the spectrogram. Default
is 512.}
\item{wn}{Character vector of length 1 specifying window name. Default is
"hanning". See function \code{\link[seewave]{ftwindow}} for more options.}
\item{ovlp}{Numeric vector of length 1 specifying the percent overlap between two
consecutive windows, as in \code{\link[seewave]{spectro}}. Default is 70.}
\item{dB}{Character vector of length 1 controlling the amplitude weights as in
\code{\link[seewave]{spectro}}. Default is 'max0'.}
\item{collevels}{Numeric. Levels used to partition amplitude range as in \code{\link[seewave]{spectro}}.
Default is \code{NULL}.}
\item{selec.col}{Character vector of length 1 specifying the color to be used to highlight selection.
See 'col.clm' for specifying unique colors for each selection. Default is 'red2'. Ignored if 'col.cm'
and 'X' are provided.}
\item{col.clm}{Character vector of length 1 indicating the name of the column in 'X' that contains the
color names for each selection. Ignored if \code{X == NULL} or \code{interactive != NULL}. Default is \code{NULL}.}
\item{base.col}{Character vector of length 1 specifying the color of the background spectrogram.
Default is 'black'.}
\item{bg.col}{Character vector of length 1 specifying the background color for both base
and highlighted spectrograms. Default is 'white'.}
\item{cexlab}{Numeric vector of length 1 specifying the relative size of axis
labels. See \code{\link[seewave]{spectro}}. Default is 1.}
\item{cexaxis}{Numeric vector of length 1 specifying the relative size of axis. See
\code{\link[seewave]{spectro}}. Default is 1.}
\item{tlab}{Character vector of length 1 specifying the label of the time axis.}
\item{flab}{Character vector of length 1 specifying the label of the frequency axis.}
\item{title}{Logical argument to add a title to individual spectrograms.
Default is \code{TRUE}.}
\item{axisX}{Logical to control whether time axis is plotted. Default is \code{TRUE}.}
\item{axisY}{Logical to control whether frequency axis is plotted. Default is \code{TRUE}.}
\item{flim}{A numeric vector of length 2 for the frequency limit (in kHz) of
the spectrogram, as in \code{\link[seewave]{spectro}}. Default is \code{NULL}.}
\item{rm.zero}{Logical indicated if the 0 at the start of the time axis should be removed. Default is \code{FALSE}.}
\item{X}{Optional. Data frame containing columns for start and end time of signals ('start' and 'end') and low and high frequency ('bottom.freq' and 'top.freq').}
\item{fast.spec}{Logical. If \code{TRUE} then image function is used internally to create spectrograms, which substantially
increases performance (much faster), although some options become unavailable, as collevels, and sc (amplitude scale).
This option is indicated for signals with high background noise levels. Palette colors \code{\link[monitoR]{gray.1}}, \code{\link[monitoR]{gray.2}},
\code{\link[monitoR]{gray.3}}, \code{\link[monitoR]{topo.1}} and \code{\link[monitoR]{rainbow.1}} (which should be imported from the package monitoR) seem
to work better with 'fast' spectrograms. Palette colors \code{\link[monitoR]{gray.1}}, \code{\link[monitoR]{gray.2}},
\code{\link[monitoR]{gray.3}} offer
decreasing darkness levels.}
\item{t.mar}{Numeric vector of length 1. Specifies the margins adjacent to the start and end points to be added when highlighting selection. Default is \code{NULL}.}
\item{f.mar}{Numeric vector of length 1. Specifies the margins adjacent to the low and high frequencies to be added when highlighting selection. Default is \code{NULL}.}
\item{interactive}{Numeric. Allow user to interactively select the signals to be highlighted by clicking
on the graphic device. Users must select the opposite corners of a square delimiting the spectrogram region
to be highlighted. Controls the number of signals that users would be able to select (2 clicks per signal).}
\item{add}{Logical. If \code{TRUE} new highlighting can be applied to the current plot (which means
that the function with \code{add = FALSE} should be run first). Default is \code{FALSE}.}
}
\value{
A plot is produced in the graphic device.
}
\description{
\code{color.spectro} highlights spectrogram regions specified by users
}
\details{
This function highlights regions of the spectrogram with different colors. The regions to be
highlighted can be provided in a selection table (as the example data 'lbh_selec_table') or interactively ('interactive' argument).
}
\examples{
\dontrun{
data(list = c("Phae.long1", "lbh_selec_table"))
writeWave(Phae.long1, file.path(tempdir(), "Phae.long1.wav")) #save sound files
# subset selection table
st <- lbh_selec_table[lbh_selec_table$sound.files == "Phae.long1.wav",]
# read wave file as an R object
sgnl <- tuneR::readWave(file.path(tempdir(), st$sound.files[1]))
# create color column
st$colors <- c("red2", "blue", "green")
# highlight selections
color.spectro(wave = sgnl, wl = 300, ovlp = 90, flim = c(1, 8.6), collevels = seq(-90, 0, 5),
dB = "B", X = st, col.clm = "colors", base.col = "skyblue", t.mar = 0.07, f.mar = 0.1,
interactive = NULL)
# interactive (selected manually: you have to select them by clicking on the spectrogram)
color.spectro(wave = sgnl, wl = 300, ovlp = 90, flim = c(1, 8.6), collevels = seq(-90, 0, 5),
dB = "B", col.clm = "colors", t.mar = 0.07, f.mar = 1, interactive = 2)
}
}
\references{
{Araya-Salas, M., & Smith-Vidaurre, G. (2017). warbleR: An R package to streamline analysis of animal acoustic signals. Methods in Ecology and Evolution, 8(2), 184-191.}
}
\seealso{
\code{\link{trackfreqs}} for creating spectrograms to visualize
frequency measurements by \code{\link{specan}}, \code{\link{snrspecs}} for
creating spectrograms to optimize noise margins used in \code{\link{sig2noise}}
Other spectrogram creators:
\code{\link{dfDTW}()},
\code{\link{dfts}()},
\code{\link{ffDTW}()},
\code{\link{ffts}()},
\code{\link{multi_DTW}()},
\code{\link{phylo_spectro}()},
\code{\link{snrspecs}()},
\code{\link{sp.en.ts}()},
\code{\link{specreator}()},
\code{\link{trackfreqs}()}
}
\author{
Marcelo Araya-Salas (\email{marceloa27@gmail.com}) and Grace Smith Vidaurre
}
\concept{spectrogram creators}
|
library(mice)
source("../utils.R")
md.pattern(capacidades)
c <- mice(data = capacidades, m=5, method = "pmm", maxit = 10, seed = 500)
c1 <- complete(c,1)
c2 <- complete(c,2)
c3 <- complete(c,3)
c4 <- complete(c,4)
c5 <- complete(c,5)
c_predictor_matrix <- c$predictorMatrix
c$chainMean
c$chainVar
imp_c1 <- c$imp$complejidad_2014
imp_c2 <- c$imp$flexibilidad_financiera
imp_c3 <- c$imp$capacidad_inversion
imp_c4 <- c$imp$n_usuarios
imp_c5 <- c$imp$pct_viv_con_internet
imp_c6 <- c$imp$densidad_camas
imp_c7 <- c$imp$densidad_hospitales
imp_c8 <- c$imp$densidad_medicos
md.pattern(vulnerabilidades)
v <- mice(data = vulnerabilidades, m = 5, method = "pmm", maxit = 10, seed = 500)
v1 <- complete(v,1)
v2 <- complete(v,2)
v3 <- complete(v,3)
v4 <- complete(v,4)
v5 <- complete(v,5)
imp_v1 <- v$imp$ic_rezedu_porcentaje
imp_v2 <- v$imp$ic_asalud_porcentaje
imp_v3 <- v$imp$ic_segsoc_porcentaje
imp_v4 <- v$imp$ic_cv_porcentaje
imp_v5 <- v$imp$ic_sbv_porcentaje
imp_v6 <- v$imp$pobreza_porcentaje
imp_v7 <- v$imp$vul_ing_porcentaje
v_predictor_matrix <- v$predictorMatrix
v$chainMean
v$chainVar
stripplot(y1 ~ .imp, data = imp_tot2, jit = TRUE, col = col, xlab = "imputation Number")
# 2. Capping
# For missing values that lie outside the 1.5β
*β
IQR limits, we could cap it by replacing those
# observations outside the lower limit with the value of 5th %ile and those that lie above the
# upper limit, with the value of 95th %ile. Below is a sample code that achieves this.
#x <- vulnerabilidades$brecha_horas_cuidados
#qnt <- quantile(x, probs=c(.25, .75), na.rm = T)
#caps <- quantile(x, probs=c(.05, .95), na.rm = T)
#H <- 1.5 * IQR(x, na.rm = T)
#x[x < (qnt[1] - H)] <- caps[1]
#x[x > (qnt[2] + H)] <- caps[2]
# Ejemplo pca tidyverse
#inform_pca <- select_if(inform_mun,is.numeric) %>%
# nest() %>%
# mutate(pca = map(data, ~ prcomp(.x,
# center = TRUE, scale = TRUE)),
# pca_aug = map2(pca, data, ~augment(.x, data = .y)))
|
/notebooks/pruebas_input_agg.R
|
no_license
|
plataformapreventiva/inform-mexico
|
R
| false
| false
| 1,996
|
r
|
library(mice)
source("../utils.R")
md.pattern(capacidades)
c <- mice(data = capacidades, m=5, method = "pmm", maxit = 10, seed = 500)
c1 <- complete(c,1)
c2 <- complete(c,2)
c3 <- complete(c,3)
c4 <- complete(c,4)
c5 <- complete(c,5)
c_predictor_matrix <- c$predictorMatrix
c$chainMean
c$chainVar
imp_c1 <- c$imp$complejidad_2014
imp_c2 <- c$imp$flexibilidad_financiera
imp_c3 <- c$imp$capacidad_inversion
imp_c4 <- c$imp$n_usuarios
imp_c5 <- c$imp$pct_viv_con_internet
imp_c6 <- c$imp$densidad_camas
imp_c7 <- c$imp$densidad_hospitales
imp_c8 <- c$imp$densidad_medicos
md.pattern(vulnerabilidades)
v <- mice(data = vulnerabilidades, m = 5, method = "pmm", maxit = 10, seed = 500)
v1 <- complete(v,1)
v2 <- complete(v,2)
v3 <- complete(v,3)
v4 <- complete(v,4)
v5 <- complete(v,5)
imp_v1 <- v$imp$ic_rezedu_porcentaje
imp_v2 <- v$imp$ic_asalud_porcentaje
imp_v3 <- v$imp$ic_segsoc_porcentaje
imp_v4 <- v$imp$ic_cv_porcentaje
imp_v5 <- v$imp$ic_sbv_porcentaje
imp_v6 <- v$imp$pobreza_porcentaje
imp_v7 <- v$imp$vul_ing_porcentaje
v_predictor_matrix <- v$predictorMatrix
v$chainMean
v$chainVar
stripplot(y1 ~ .imp, data = imp_tot2, jit = TRUE, col = col, xlab = "imputation Number")
# 2. Capping
# For missing values that lie outside the 1.5β
*β
IQR limits, we could cap it by replacing those
# observations outside the lower limit with the value of 5th %ile and those that lie above the
# upper limit, with the value of 95th %ile. Below is a sample code that achieves this.
#x <- vulnerabilidades$brecha_horas_cuidados
#qnt <- quantile(x, probs=c(.25, .75), na.rm = T)
#caps <- quantile(x, probs=c(.05, .95), na.rm = T)
#H <- 1.5 * IQR(x, na.rm = T)
#x[x < (qnt[1] - H)] <- caps[1]
#x[x > (qnt[2] + H)] <- caps[2]
# Ejemplo pca tidyverse
#inform_pca <- select_if(inform_mun,is.numeric) %>%
# nest() %>%
# mutate(pca = map(data, ~ prcomp(.x,
# center = TRUE, scale = TRUE)),
# pca_aug = map2(pca, data, ~augment(.x, data = .y)))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tcgacompare.R
\name{tcgaCompare}
\alias{tcgaCompare}
\title{Compare mutation load against TCGA cohorts}
\usage{
tcgaCompare(
maf,
capture_size = NULL,
tcga_capture_size = 50,
cohortName = NULL,
tcga_cohorts = NULL,
primarySite = FALSE,
col = c("gray70", "black"),
bg_col = c("#EDF8B1", "#2C7FB8"),
medianCol = "red",
decreasing = FALSE,
logscale = TRUE,
rm_hyper = FALSE
)
}
\arguments{
\item{maf}{\code{\link{MAF}} object(s) generated by \code{\link{read.maf}}}
\item{capture_size}{capture size for input MAF in MBs. Default NULL. If provided plot will be scaled to mutations per mb. TCGA capture size is assumed to be 50mb.}
\item{tcga_capture_size}{capture size for TCGA cohort in MB. Default 50}
\item{cohortName}{name for the input MAF cohort. Default "Input"}
\item{tcga_cohorts}{restrict tcga data to these cohorts.}
\item{primarySite}{If TRUE uses primary site of cancer as labels instead of TCGA project IDs. Default FALSE.}
\item{col}{color vector for length 2 TCGA cohorts and input MAF cohort. Default gray70 and black.}
\item{bg_col}{background color. Default'#EDF8B1', '#2C7FB8'}
\item{medianCol}{color for median line. Default red.}
\item{decreasing}{Default FALSE. Cohorts are arranged in increasing mutation burden.}
\item{logscale}{Default TRUE}
\item{rm_hyper}{Remove hyper mutated samples (outliers)? Default FALSE}
}
\value{
data.table with median mutations per cohort
}
\description{
Compares mutation load in input MAF against all of 33 TCGA cohorts derived from MC3 project.
}
\examples{
laml.maf <- system.file("extdata", "tcga_laml.maf.gz", package = "maftools")
laml <- read.maf(maf = laml.maf)
tcgaCompare(maf = laml, cohortName = "AML")
}
|
/man/tcgaCompare.Rd
|
permissive
|
kaigu1990/maftools
|
R
| false
| true
| 1,781
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tcgacompare.R
\name{tcgaCompare}
\alias{tcgaCompare}
\title{Compare mutation load against TCGA cohorts}
\usage{
tcgaCompare(
maf,
capture_size = NULL,
tcga_capture_size = 50,
cohortName = NULL,
tcga_cohorts = NULL,
primarySite = FALSE,
col = c("gray70", "black"),
bg_col = c("#EDF8B1", "#2C7FB8"),
medianCol = "red",
decreasing = FALSE,
logscale = TRUE,
rm_hyper = FALSE
)
}
\arguments{
\item{maf}{\code{\link{MAF}} object(s) generated by \code{\link{read.maf}}}
\item{capture_size}{capture size for input MAF in MBs. Default NULL. If provided plot will be scaled to mutations per mb. TCGA capture size is assumed to be 50mb.}
\item{tcga_capture_size}{capture size for TCGA cohort in MB. Default 50}
\item{cohortName}{name for the input MAF cohort. Default "Input"}
\item{tcga_cohorts}{restrict tcga data to these cohorts.}
\item{primarySite}{If TRUE uses primary site of cancer as labels instead of TCGA project IDs. Default FALSE.}
\item{col}{color vector for length 2 TCGA cohorts and input MAF cohort. Default gray70 and black.}
\item{bg_col}{background color. Default'#EDF8B1', '#2C7FB8'}
\item{medianCol}{color for median line. Default red.}
\item{decreasing}{Default FALSE. Cohorts are arranged in increasing mutation burden.}
\item{logscale}{Default TRUE}
\item{rm_hyper}{Remove hyper mutated samples (outliers)? Default FALSE}
}
\value{
data.table with median mutations per cohort
}
\description{
Compares mutation load in input MAF against all of 33 TCGA cohorts derived from MC3 project.
}
\examples{
laml.maf <- system.file("extdata", "tcga_laml.maf.gz", package = "maftools")
laml <- read.maf(maf = laml.maf)
tcgaCompare(maf = laml, cohortName = "AML")
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dfareporting_functions.R
\name{floodlightActivities.delete}
\alias{floodlightActivities.delete}
\title{Deletes an existing floodlight activity.}
\usage{
floodlightActivities.delete(profileId, id)
}
\arguments{
\item{profileId}{User profile ID associated with this request}
\item{id}{Floodlight activity ID}
}
\description{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
}
\details{
Authentication scopes used by this function are:
\itemize{
\item https://www.googleapis.com/auth/dfatrafficking
}
Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/dfatrafficking)}
Then run \code{googleAuthR::gar_auth()} to authenticate.
See \code{\link[googleAuthR]{gar_auth}} for details.
}
\seealso{
\href{https://developers.google.com/doubleclick-advertisers/reporting/}{Google Documentation}
}
|
/googledfareportingv24.auto/man/floodlightActivities.delete.Rd
|
permissive
|
Phippsy/autoGoogleAPI
|
R
| false
| true
| 918
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dfareporting_functions.R
\name{floodlightActivities.delete}
\alias{floodlightActivities.delete}
\title{Deletes an existing floodlight activity.}
\usage{
floodlightActivities.delete(profileId, id)
}
\arguments{
\item{profileId}{User profile ID associated with this request}
\item{id}{Floodlight activity ID}
}
\description{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
}
\details{
Authentication scopes used by this function are:
\itemize{
\item https://www.googleapis.com/auth/dfatrafficking
}
Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/dfatrafficking)}
Then run \code{googleAuthR::gar_auth()} to authenticate.
See \code{\link[googleAuthR]{gar_auth}} for details.
}
\seealso{
\href{https://developers.google.com/doubleclick-advertisers/reporting/}{Google Documentation}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/breusch_pagan.R
\name{breusch_pagan}
\alias{breusch_pagan}
\title{Breusch-Pagan Test for Heteroskedasticity in a Linear Regression Model}
\usage{
breusch_pagan(mainlm, auxdesign = NA, koenker = TRUE, statonly = FALSE)
}
\arguments{
\item{mainlm}{Either an object of \code{\link[base]{class}} \code{"lm"}
(e.g., generated by \code{\link[stats]{lm}}), or
a list of two objects: a response vector and a design matrix. The objects
are assumed to be in that order, unless they are given the names
\code{"X"} and \code{"y"} to distinguish them. The design matrix passed
in a list must begin with a column of ones if an intercept is to be
included in the linear model. The design matrix passed in a list should
not contain factors, as all columns are treated 'as is'. For tests that
use ordinary least squares residuals, one can also pass a vector of
residuals in the list, which should either be the third object or be
named \code{"e"}.}
\item{auxdesign}{A \code{\link[base]{data.frame}} or
\code{\link[base]{matrix}} representing an auxiliary design matrix of
containing exogenous variables that (under alternative hypothesis) are
related to error variance, or a character "fitted.values" indicating
that the fitted \eqn{\hat{y}_i} values from OLS should be used.
If set to \code{NA} (the default), the
design matrix of the original regression model is used. An intercept
is included in the auxiliary regression even if the first column of
\code{auxdesign} is not a vector of ones.}
\item{koenker}{A logical. Should studentising modification of
\insertCite{Koenker81;textual}{skedastic} be implemented? Defaults to
\code{TRUE}; if \code{FALSE}, the original form of the test proposed by
\insertCite{Breusch79;textual}{skedastic} is used.}
\item{statonly}{A logical. If \code{TRUE}, only the test statistic value
is returned, instead of an object of \code{\link[base]{class}}
\code{"htest"}. Defaults to \code{FALSE}.}
}
\value{
An object of \code{\link[base]{class}} \code{"htest"}. If object
is not assigned, its attributes are displayed in the console as a
\code{\link[tibble]{tibble}} using \code{\link[broom]{tidy}}.
}
\description{
This function implements the popular method of
\insertCite{Breusch79;textual}{skedastic} for testing for
heteroskedasticity in a linear regression model, with or without the
studentising modification of \insertCite{Koenker81;textual}{skedastic}.
}
\details{
The Breusch-Pagan Test entails fitting an auxiliary regression
model in which the response variable is the vector of squared residuals
from the original model and the design matrix \eqn{Z} consists of one or
more exogenous variables that are suspected of being related to the error
variance. In the absence of prior information on a possible choice of
\eqn{Z}, one would typically use the explanatory variables from the
original model. Under the null hypothesis of homoskedasticity, the
distribution of the test statistic is asymptotically chi-squared with
\code{parameter} degrees of freedom. The test is right-tailed.
}
\examples{
mtcars_lm <- lm(mpg ~ wt + qsec + am, data = mtcars)
breusch_pagan(mtcars_lm)
breusch_pagan(mtcars_lm, koenker = FALSE)
# Same as first example
mtcars_list <- list("y" = mtcars$mpg, "X" = cbind(1, mtcars$wt, mtcars$qsec, mtcars$am))
breusch_pagan(mtcars_list)
}
\references{
{\insertAllCited{}}
}
\seealso{
\code{\link[lmtest:bptest]{lmtest::bptest}}, which performs exactly
the same test as this function; \code{\link[car:ncvTest]{car::ncvTest}},
which is not the same test but is implemented in
\code{\link{cook_weisberg}}; \code{\link{white}}, which is a special
case of the Breusch-Pagan Test.
}
|
/man/breusch_pagan.Rd
|
no_license
|
tjfarrar/skedastic
|
R
| false
| true
| 3,757
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/breusch_pagan.R
\name{breusch_pagan}
\alias{breusch_pagan}
\title{Breusch-Pagan Test for Heteroskedasticity in a Linear Regression Model}
\usage{
breusch_pagan(mainlm, auxdesign = NA, koenker = TRUE, statonly = FALSE)
}
\arguments{
\item{mainlm}{Either an object of \code{\link[base]{class}} \code{"lm"}
(e.g., generated by \code{\link[stats]{lm}}), or
a list of two objects: a response vector and a design matrix. The objects
are assumed to be in that order, unless they are given the names
\code{"X"} and \code{"y"} to distinguish them. The design matrix passed
in a list must begin with a column of ones if an intercept is to be
included in the linear model. The design matrix passed in a list should
not contain factors, as all columns are treated 'as is'. For tests that
use ordinary least squares residuals, one can also pass a vector of
residuals in the list, which should either be the third object or be
named \code{"e"}.}
\item{auxdesign}{A \code{\link[base]{data.frame}} or
\code{\link[base]{matrix}} representing an auxiliary design matrix of
containing exogenous variables that (under alternative hypothesis) are
related to error variance, or a character "fitted.values" indicating
that the fitted \eqn{\hat{y}_i} values from OLS should be used.
If set to \code{NA} (the default), the
design matrix of the original regression model is used. An intercept
is included in the auxiliary regression even if the first column of
\code{auxdesign} is not a vector of ones.}
\item{koenker}{A logical. Should studentising modification of
\insertCite{Koenker81;textual}{skedastic} be implemented? Defaults to
\code{TRUE}; if \code{FALSE}, the original form of the test proposed by
\insertCite{Breusch79;textual}{skedastic} is used.}
\item{statonly}{A logical. If \code{TRUE}, only the test statistic value
is returned, instead of an object of \code{\link[base]{class}}
\code{"htest"}. Defaults to \code{FALSE}.}
}
\value{
An object of \code{\link[base]{class}} \code{"htest"}. If object
is not assigned, its attributes are displayed in the console as a
\code{\link[tibble]{tibble}} using \code{\link[broom]{tidy}}.
}
\description{
This function implements the popular method of
\insertCite{Breusch79;textual}{skedastic} for testing for
heteroskedasticity in a linear regression model, with or without the
studentising modification of \insertCite{Koenker81;textual}{skedastic}.
}
\details{
The Breusch-Pagan Test entails fitting an auxiliary regression
model in which the response variable is the vector of squared residuals
from the original model and the design matrix \eqn{Z} consists of one or
more exogenous variables that are suspected of being related to the error
variance. In the absence of prior information on a possible choice of
\eqn{Z}, one would typically use the explanatory variables from the
original model. Under the null hypothesis of homoskedasticity, the
distribution of the test statistic is asymptotically chi-squared with
\code{parameter} degrees of freedom. The test is right-tailed.
}
\examples{
mtcars_lm <- lm(mpg ~ wt + qsec + am, data = mtcars)
breusch_pagan(mtcars_lm)
breusch_pagan(mtcars_lm, koenker = FALSE)
# Same as first example
mtcars_list <- list("y" = mtcars$mpg, "X" = cbind(1, mtcars$wt, mtcars$qsec, mtcars$am))
breusch_pagan(mtcars_list)
}
\references{
{\insertAllCited{}}
}
\seealso{
\code{\link[lmtest:bptest]{lmtest::bptest}}, which performs exactly
the same test as this function; \code{\link[car:ncvTest]{car::ncvTest}},
which is not the same test but is implemented in
\code{\link{cook_weisberg}}; \code{\link{white}}, which is a special
case of the Breusch-Pagan Test.
}
|
# -----------------------------------------------------
# Homework 08
# 22 Mar 2021
# EKG
# -----------------------------------------------------
#
# Library ----------------------------------
library(ggplot2)
library(tidyr)
# Creating a Fake Data Set ----------------------------------
# based on the idea that whistle modulation increases as
# a function of number of tour boats present.
# data derived from: Perez-Ortega, Betzi, et al. "Dolphin-Watching Boats Affect Whistle Frequency Modulation in Bottlenose Dolphins." Frontiers in Marine Science 8 (2021): 102.
taxiBoat <- rnorm(n=8,mean=25.46,sd=27.10)
head(taxiBoat)
tourBoat <- rnorm(n=8,mean=234.10,sd=232)
head(tourBoat)
dataFrame <- data.frame(taxiBoat,tourBoat)
head(dataFrame)
# using tidyverse to group the inflection points based on whether
# they are taxi or tour boats.
orgFrame <- gather(dataFrame,Boat.Type,PFC.Inflection.Pts,
taxiBoat:tourBoat)
# Calculate Stuff ----------------------------------
ANOmodel <- aov(PFC.Inflection.Pts~Boat.Type,data=orgFrame)
print(ANOmodel)
print(summary(ANOmodel))
# print(summary(ANOmodel))
# Df Sum Sq Mean Sq F value Pr(>F)
# Boat.Type 1 4376550 4376550 174.5 <2e-16 ***
# Residuals 482 12086821 25076
# ---
# Signif. codes: 0 β***β 0.001 β**β 0.01 β*β 0.05 β.β 0.1 β β 1
z <- summary(ANOmodel)
str(z)
aggregate(PFC.Inflection.Pts~Boat.Type,data=orgFrame,FUN=mean)
unlist(z)
unlist(z)[7]
ANOsum <- list(Fval=unlist(z)[7],probF=unlist(z)[9])
ANOsum
# ANOsum
# $Fval
# F value1
# 174.5287
# $probF
# Pr(>F)1
# 3.174175e-34
# Plot Results ----------------------------------
ANOPlot <- ggplot(data=orgFrame,
aes(x=Boat.Type,
y=PFC.Inflection.Pts,
fill=Boat.Type)) +
geom_boxplot()
print(ANOPlot)
# Homework Questions ----------------------------------
# Now begin adjusting the means of the different groups. Given the sample sizes you have chosen, how small can the differences between the groups be (the βeffect sizeβ) for you to still detect a significant pattern (p < 0.05)?
# this was the smallest difference with still detcing a significant pattern
taxiBoat <- rnorm(n=242,mean=25.46,sd=27.10)
tourBoat <- rnorm(n=242,mean=29,sd=28)
# > print(summary(ANOmodel))
# Df Sum Sq Mean Sq F value Pr(>F)
# Boat.Type 1 4467 4467 5.465 0.0198 *
# Residuals 482 393982 817
# ---
# Signif. codes: 0 β***β 0.001 β**β 0.01 β*β 0.05 β.β 0.1 β β 1
# Alternatively, for the effect sizes you originally hypothesized, what is the minimum sample size you would need in order to detect a statistically significant effect? Again, run the model a few times with the same parameter set to get a feeling for the effect of random variation in the data.
# this was the smallest difference with still detcing a significant pattern
taxiBoat <- rnorm(n=8,mean=25.46,sd=27.10)
tourBoat <- rnorm(n=8,mean=234.10,sd=232)
# print(summary(ANOmodel))
# Df Sum Sq Mean Sq F value Pr(>F)
# Boat.Type 1 262947 262947 5.11 0.0403 *
# Residuals 14 720448 51461
|
/Homework_08.R
|
permissive
|
emmagagne/GagneBio381
|
R
| false
| false
| 3,223
|
r
|
# -----------------------------------------------------
# Homework 08
# 22 Mar 2021
# EKG
# -----------------------------------------------------
#
# Library ----------------------------------
library(ggplot2)
library(tidyr)
# Creating a Fake Data Set ----------------------------------
# based on the idea that whistle modulation increases as
# a function of number of tour boats present.
# data derived from: Perez-Ortega, Betzi, et al. "Dolphin-Watching Boats Affect Whistle Frequency Modulation in Bottlenose Dolphins." Frontiers in Marine Science 8 (2021): 102.
taxiBoat <- rnorm(n=8,mean=25.46,sd=27.10)
head(taxiBoat)
tourBoat <- rnorm(n=8,mean=234.10,sd=232)
head(tourBoat)
dataFrame <- data.frame(taxiBoat,tourBoat)
head(dataFrame)
# using tidyverse to group the inflection points based on whether
# they are taxi or tour boats.
orgFrame <- gather(dataFrame,Boat.Type,PFC.Inflection.Pts,
taxiBoat:tourBoat)
# Calculate Stuff ----------------------------------
ANOmodel <- aov(PFC.Inflection.Pts~Boat.Type,data=orgFrame)
print(ANOmodel)
print(summary(ANOmodel))
# print(summary(ANOmodel))
# Df Sum Sq Mean Sq F value Pr(>F)
# Boat.Type 1 4376550 4376550 174.5 <2e-16 ***
# Residuals 482 12086821 25076
# ---
# Signif. codes: 0 β***β 0.001 β**β 0.01 β*β 0.05 β.β 0.1 β β 1
z <- summary(ANOmodel)
str(z)
aggregate(PFC.Inflection.Pts~Boat.Type,data=orgFrame,FUN=mean)
unlist(z)
unlist(z)[7]
ANOsum <- list(Fval=unlist(z)[7],probF=unlist(z)[9])
ANOsum
# ANOsum
# $Fval
# F value1
# 174.5287
# $probF
# Pr(>F)1
# 3.174175e-34
# Plot Results ----------------------------------
ANOPlot <- ggplot(data=orgFrame,
aes(x=Boat.Type,
y=PFC.Inflection.Pts,
fill=Boat.Type)) +
geom_boxplot()
print(ANOPlot)
# Homework Questions ----------------------------------
# Now begin adjusting the means of the different groups. Given the sample sizes you have chosen, how small can the differences between the groups be (the βeffect sizeβ) for you to still detect a significant pattern (p < 0.05)?
# this was the smallest difference with still detcing a significant pattern
taxiBoat <- rnorm(n=242,mean=25.46,sd=27.10)
tourBoat <- rnorm(n=242,mean=29,sd=28)
# > print(summary(ANOmodel))
# Df Sum Sq Mean Sq F value Pr(>F)
# Boat.Type 1 4467 4467 5.465 0.0198 *
# Residuals 482 393982 817
# ---
# Signif. codes: 0 β***β 0.001 β**β 0.01 β*β 0.05 β.β 0.1 β β 1
# Alternatively, for the effect sizes you originally hypothesized, what is the minimum sample size you would need in order to detect a statistically significant effect? Again, run the model a few times with the same parameter set to get a feeling for the effect of random variation in the data.
# this was the smallest difference with still detcing a significant pattern
taxiBoat <- rnorm(n=8,mean=25.46,sd=27.10)
tourBoat <- rnorm(n=8,mean=234.10,sd=232)
# print(summary(ANOmodel))
# Df Sum Sq Mean Sq F value Pr(>F)
# Boat.Type 1 262947 262947 5.11 0.0403 *
# Residuals 14 720448 51461
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/util.R
\name{collapseVector}
\alias{collapseVector}
\title{collapse a character vector to SQL multiquery thing}
\usage{
collapseVector(charvec)
}
\arguments{
\item{charvec}{a character vector}
}
\description{
cook a character vector of baz, quux, quux, ... into format suitable for
SELECT * FROM foo WHERE bar IN ("baz", "quux", "quuux")
}
\details{
A simple wrapper for DBI::dbConnect, exists pretty much only to
take advantage of password prompt.
}
|
/man/collapseVector.Rd
|
permissive
|
retaj/cb2tools
|
R
| false
| true
| 529
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/util.R
\name{collapseVector}
\alias{collapseVector}
\title{collapse a character vector to SQL multiquery thing}
\usage{
collapseVector(charvec)
}
\arguments{
\item{charvec}{a character vector}
}
\description{
cook a character vector of baz, quux, quux, ... into format suitable for
SELECT * FROM foo WHERE bar IN ("baz", "quux", "quuux")
}
\details{
A simple wrapper for DBI::dbConnect, exists pretty much only to
take advantage of password prompt.
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/zzz.R
\docType{data}
\name{RP3_MDB_USRPWD}
\alias{RP3_MDB_USRPWD}
\title{Global variable}
\format{
An object of class \code{NULL} of length 0.
}
\usage{
RP3_MDB_USRPWD
}
\description{
Global variable
}
\author{
mvaniterson
}
\keyword{data}
|
/BBMRIomics/man/RP3_MDB_USRPWD.Rd
|
no_license
|
bbmri-nl/BBMRIomics
|
R
| false
| true
| 318
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/zzz.R
\docType{data}
\name{RP3_MDB_USRPWD}
\alias{RP3_MDB_USRPWD}
\title{Global variable}
\format{
An object of class \code{NULL} of length 0.
}
\usage{
RP3_MDB_USRPWD
}
\description{
Global variable
}
\author{
mvaniterson
}
\keyword{data}
|
#' Import all test-set TopDom regions into a single data.frame
#'
#' This script will read all available \file{topdomData/*/*.rds}
#' files, extract the TopDom regions for the test samples (but not the
#' reference) and convert to a data.frame with additional columns on parameter
#' settings and RNG seeds appended. All these data.frames are stacked into one
#' big data.frame which is saved to a \file{overlapScoreData/*,test-topdom.rds}.
#'
#' @param pattern (character string) A regular expression of the subfolders
#' in folder `path` to import.
#'
#' @param path (character string) The root folder of the overlap score data.
#'
#' @param skip (logical) If TRUE, already existing results are returned, otherwise not.
#'
#' @param save_individual (logical) If TRUE, also the imported data.frames of
#' the subfolders are saved in individual \file{*,test-topdom.rds} file in the
#' `path` folder.
#'
#' @return The pathname of the saved \file{*,test-topdom.rds} file in the
#' `path` folder.
#'
#' @importFrom utils file_test
#' @importFrom future.apply future_lapply
#' @export
import_topdom_regions <- function(pattern = paste(c("human,HAP1,unique", "bin_size=.*", "partition_by=cells_by_half", "min_cell_size=2", "window_size=.*", "test=.*", "mainseed=0xBEEF"), collapse = ","), path = "topdomData", skip = TRUE, save_individual = TRUE) {
stopifnot(file_test("-d", path))
stopifnot(is.character(pattern), length(pattern) == 1L,
!is.na(pattern), nzchar(pattern))
parts <- strsplit(pattern, split = ",", fixed = TRUE)[[1]]
parts <- grep("*", parts, fixed = TRUE, invert = TRUE, value = TRUE)
filename <- sprintf("%s,test-topdom.rds", paste(parts, collapse = ","))
pathname <- file.path(path, filename)
if (file_test("-f", pathname)) {
if (skip) return(pathname)
stop("File already exist: ", sQuote(pathname))
}
sets <- dir(path, pattern = pattern, no.. = TRUE)
sets <- sets[file_test("-d", file.path(path, sets))]
message("Number of sets: ", length(sets))
stopifnot(length(sets) > 0L)
data <- list()
for (kk in seq_along(sets)) {
set <- sets[kk]
pathname_kk <- file.path(path, sprintf("%s,test-topdom.rds", set))
message(sprintf("Set #%d (%s) of %d ...", kk, set, length(sets)))
if (file_test("-f", pathname_kk)) {
data_kk <- read_rds(pathname_kk)
data[[kk]] <- data_kk
message(sprintf("Set #%d (%s) of %d ... alread done", kk, set, length(sets)))
next
}
pathnames <- dir(file.path(path, set), pattern = "[.]rds$", recursive = TRUE, full.names = TRUE)
data_kk <- future_lapply(pathnames, FUN = read_topdom_regions)
data_kk <- do.call(rbind, data_kk)
print(data_kk)
if (save_individual) save_rds(data_kk, pathname_kk)
data[[kk]] <- data_kk
message(sprintf("Set #%d (%s) of %d ... saved", kk, set, length(sets)))
}
data <- do.call(rbind, data)
o <- with(data, order(chr, bin_size, fraction, window_size, seed, from.id))
data <- data[o, ]
save_rds(data, pathname)
pathname
}
|
/R/import_topdom_regions.R
|
no_license
|
HenrikBengtsson/TopDomStudy
|
R
| false
| false
| 3,024
|
r
|
#' Import all test-set TopDom regions into a single data.frame
#'
#' This script will read all available \file{topdomData/*/*.rds}
#' files, extract the TopDom regions for the test samples (but not the
#' reference) and convert to a data.frame with additional columns on parameter
#' settings and RNG seeds appended. All these data.frames are stacked into one
#' big data.frame which is saved to a \file{overlapScoreData/*,test-topdom.rds}.
#'
#' @param pattern (character string) A regular expression of the subfolders
#' in folder `path` to import.
#'
#' @param path (character string) The root folder of the overlap score data.
#'
#' @param skip (logical) If TRUE, already existing results are returned, otherwise not.
#'
#' @param save_individual (logical) If TRUE, also the imported data.frames of
#' the subfolders are saved in individual \file{*,test-topdom.rds} file in the
#' `path` folder.
#'
#' @return The pathname of the saved \file{*,test-topdom.rds} file in the
#' `path` folder.
#'
#' @importFrom utils file_test
#' @importFrom future.apply future_lapply
#' @export
import_topdom_regions <- function(pattern = paste(c("human,HAP1,unique", "bin_size=.*", "partition_by=cells_by_half", "min_cell_size=2", "window_size=.*", "test=.*", "mainseed=0xBEEF"), collapse = ","), path = "topdomData", skip = TRUE, save_individual = TRUE) {
stopifnot(file_test("-d", path))
stopifnot(is.character(pattern), length(pattern) == 1L,
!is.na(pattern), nzchar(pattern))
parts <- strsplit(pattern, split = ",", fixed = TRUE)[[1]]
parts <- grep("*", parts, fixed = TRUE, invert = TRUE, value = TRUE)
filename <- sprintf("%s,test-topdom.rds", paste(parts, collapse = ","))
pathname <- file.path(path, filename)
if (file_test("-f", pathname)) {
if (skip) return(pathname)
stop("File already exist: ", sQuote(pathname))
}
sets <- dir(path, pattern = pattern, no.. = TRUE)
sets <- sets[file_test("-d", file.path(path, sets))]
message("Number of sets: ", length(sets))
stopifnot(length(sets) > 0L)
data <- list()
for (kk in seq_along(sets)) {
set <- sets[kk]
pathname_kk <- file.path(path, sprintf("%s,test-topdom.rds", set))
message(sprintf("Set #%d (%s) of %d ...", kk, set, length(sets)))
if (file_test("-f", pathname_kk)) {
data_kk <- read_rds(pathname_kk)
data[[kk]] <- data_kk
message(sprintf("Set #%d (%s) of %d ... alread done", kk, set, length(sets)))
next
}
pathnames <- dir(file.path(path, set), pattern = "[.]rds$", recursive = TRUE, full.names = TRUE)
data_kk <- future_lapply(pathnames, FUN = read_topdom_regions)
data_kk <- do.call(rbind, data_kk)
print(data_kk)
if (save_individual) save_rds(data_kk, pathname_kk)
data[[kk]] <- data_kk
message(sprintf("Set #%d (%s) of %d ... saved", kk, set, length(sets)))
}
data <- do.call(rbind, data)
o <- with(data, order(chr, bin_size, fraction, window_size, seed, from.id))
data <- data[o, ]
save_rds(data, pathname)
pathname
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.