content
large_stringlengths
0
6.46M
path
large_stringlengths
3
331
license_type
large_stringclasses
2 values
repo_name
large_stringlengths
5
125
language
large_stringclasses
1 value
is_vendor
bool
2 classes
is_generated
bool
2 classes
length_bytes
int64
4
6.46M
extension
large_stringclasses
75 values
text
stringlengths
0
6.46M
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data.R \docType{data} \name{adverse_events} \alias{adverse_events} \title{Adverse events} \format{A data.table object with one row for each adverse event and with columns: \describe{ \item{ae_name}{Name of adverse event.} \item{ae_abb}{Abbreviation for the adverse event.} }} \usage{ adverse_events } \description{ Dataset of adverse events included in the model. } \examples{ print(adverse_events) } \keyword{datasets}
/man/adverse_events.Rd
no_license
InnovationValueInitiative/IVI-NSCLC
R
false
true
502
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data.R \docType{data} \name{adverse_events} \alias{adverse_events} \title{Adverse events} \format{A data.table object with one row for each adverse event and with columns: \describe{ \item{ae_name}{Name of adverse event.} \item{ae_abb}{Abbreviation for the adverse event.} }} \usage{ adverse_events } \description{ Dataset of adverse events included in the model. } \examples{ print(adverse_events) } \keyword{datasets}
###################################################### -*- mode: r -*- ##### ## Scenario setup for Iterated Race (irace). ############################################################################ ## To use the default value of a parameter of iRace, simply do not set ## the parameter (comment it out in this file, and do not give any ## value on the command line). ## File that contains the description of the parameters of the target ## algorithm. parameterFile = "./parameters.txt" ## Directory where the programs will be run. execDir = "/Users/richard/Developer/evolution/cga/forked/CGP.jl" ## File to save tuning results as an R dataset, either absolute path or ## relative to execDir. logFile = "./irace.Rdata" ## Previously saved log file to recover the execution of irace, either ## absolute path or relative to the current directory. If empty or NULL, ## recovery is not performed. # recoveryFile = "" ## Directory where training instances are located; either absolute path or ## relative to current directory. If no trainInstancesFiles is provided, ## all the files in trainInstancesDir will be listed as instances. trainInstancesDir = "" ## File that contains a list of training instances and optionally ## additional parameters for them. If trainInstancesDir is provided, irace ## will search for the files in this folder. trainInstancesFile = "instances-list.txt" ## File that contains a set of initial configurations. If empty or NULL, ## all initial configurations are randomly generated. # configurationsFile = "" ## File that contains a list of logical expressions that cannot be TRUE ## for any evaluated configuration. If empty or NULL, do not use forbidden ## expressions. # forbiddenFile = "" ## Script called for each configuration that executes the target algorithm ## to be tuned. See templates. targetRunner = "./target-runner" ## Number of times to retry a call to targetRunner if the call failed. # targetRunnerRetries = 0 ## Optional data passed to targetRunner. This is ignored by the default ## targetRunner function, but it may be used by custom targetRunner ## functions to pass persistent data around. # targetRunnerData = "" ## Optional R function to provide custom parallelization of targetRunner. # targetRunnerParallel = "" ## Optional script or R function that provides a numeric value for each ## configuration. See templates/target-evaluator.tmpl # targetEvaluator = "" ## Maximum number of runs (invocations of targetRunner) that will be ## performed. It determines the maximum budget of experiments for the ## tuning. maxExperiments = 10000 ## Maximum total execution time in seconds for the executions of ## targetRunner. targetRunner must return two values: cost and time. # maxTime = 0 ## Fraction (smaller than 1) of the budget used to estimate the mean ## computation time of a configuration. Only used when maxTime > 0 # budgetEstimation = 0.02 ## Maximum number of decimal places that are significant for numerical ## (real) parameters. digits = 1 ## Debug level of the output of irace. Set this to 0 to silence all debug ## messages. Higher values provide more verbose debug messages. # debugLevel = 0 ## Number of iterations. # nbIterations = 0 ## Number of runs of the target algorithm per iteration. # nbExperimentsPerIteration = 0 ## Randomly sample the training instances or use them in the order given. # sampleInstances = 1 ## Statistical test used for elimination. Default test is always F-test ## unless capping is enabled, in which case the default test is t-test. ## Valid values are: F-test (Friedman test), t-test (pairwise t-tests with ## no correction), t-test-bonferroni (t-test with Bonferroni's correction ## for multiple comparisons), t-test-holm (t-test with Holm's correction ## for multiple comparisons). # testType = "F-test" ## Number of instances evaluated before the first elimination test. It ## must be a multiple of eachTest. # firstTest = 5 ## Number of instances evaluated between elimination tests. # eachTest = 1 ## Minimum number of configurations needed to continue the execution of ## each race (iteration). # minNbSurvival = 0 ## Number of configurations to be sampled and evaluated at each iteration. # nbConfigurations = 0 ## Parameter used to define the number of configurations sampled and ## evaluated at each iteration. mu = 10 ## Confidence level for the elimination test. # confidence = 0.95 ## If the target algorithm is deterministic, configurations will be ## evaluated only once per instance. # deterministic = 0 ## Seed of the random number generator (by default, generate a random ## seed). # seed = NA ## Number of calls to targetRunner to execute in parallel. Values 0 or 1 ## mean no parallelization. # parallel = 0 ## Enable/disable load-balancing when executing experiments in parallel. ## Load-balancing makes better use of computing resources, but increases ## communication overhead. If this overhead is large, disabling ## load-balancing may be faster. # loadBalancing = 1 ## Enable/disable MPI. Use Rmpi to execute targetRunner in parallel ## (parameter parallel is the number of slaves). # mpi = 0 ## Specify how irace waits for jobs to finish when targetRunner submits ## jobs to a batch cluster: sge, pbs, torque or slurm. targetRunner must ## submit jobs to the cluster using, for example, qsub. # batchmode = 0 ## Enable/disable the soft restart strategy that avoids premature ## convergence of the probabilistic model. # softRestart = 1 ## Soft restart threshold value for numerical parameters. If NA, NULL or ## "", it is computed as 10^-digits. # softRestartThreshold = "" ## Directory where testing instances are located, either absolute or ## relative to current directory. # testInstancesDir = "" ## File containing a list of test instances and optionally additional ## parameters for them. # testInstancesFile = "" ## Number of elite configurations returned by irace that will be tested if ## test instances are provided. # testNbElites = 1 ## Enable/disable testing the elite configurations found at each ## iteration. # testIterationElites = 0 ## Enable/disable elitist irace. # elitist = 1 ## Number of instances added to the execution list before previous ## instances in elitist irace. # elitistNewInstances = 1 ## In elitist irace, maximum number per race of elimination tests that do ## not eliminate a configuration. Use 0 for no limit. # elitistLimit = 2 ## User-defined R function that takes a configuration generated by irace ## and repairs it. # repairConfiguration = "" ## Enable the use of adaptive capping, a technique designed for minimizing ## the computation time of configurations. This is only available when ## elitist is active. # capping = 0 ## Measure used to obtain the execution bound from the performance of the ## elite configurations: median, mean, worst, best. # cappingType = "median" ## Method to calculate the mean performance of elite configurations: ## candidate or instance. # boundType = "candidate" ## Maximum execution bound for targetRunner. It must be specified when ## capping is enabled. # boundMax = 0 ## Precision used for calculating the execution time. It must be specified ## when capping is enabled. # boundDigits = 0 ## Penalization constant for timed out executions (executions that reach ## boundMax execution time). # boundPar = 1 ## Replace the configuration cost of bounded executions with boundMax. # boundAsTimeout = 1 ## Percentage of the configuration budget used to perform a postselection ## race of the best configurations of each iteration after the execution ## of irace. # postselection = 0 ## END of scenario file ############################################################################
/irace_tuning/scenario.txt
permissive
d6y/CGP.jl
R
false
false
7,738
txt
###################################################### -*- mode: r -*- ##### ## Scenario setup for Iterated Race (irace). ############################################################################ ## To use the default value of a parameter of iRace, simply do not set ## the parameter (comment it out in this file, and do not give any ## value on the command line). ## File that contains the description of the parameters of the target ## algorithm. parameterFile = "./parameters.txt" ## Directory where the programs will be run. execDir = "/Users/richard/Developer/evolution/cga/forked/CGP.jl" ## File to save tuning results as an R dataset, either absolute path or ## relative to execDir. logFile = "./irace.Rdata" ## Previously saved log file to recover the execution of irace, either ## absolute path or relative to the current directory. If empty or NULL, ## recovery is not performed. # recoveryFile = "" ## Directory where training instances are located; either absolute path or ## relative to current directory. If no trainInstancesFiles is provided, ## all the files in trainInstancesDir will be listed as instances. trainInstancesDir = "" ## File that contains a list of training instances and optionally ## additional parameters for them. If trainInstancesDir is provided, irace ## will search for the files in this folder. trainInstancesFile = "instances-list.txt" ## File that contains a set of initial configurations. If empty or NULL, ## all initial configurations are randomly generated. # configurationsFile = "" ## File that contains a list of logical expressions that cannot be TRUE ## for any evaluated configuration. If empty or NULL, do not use forbidden ## expressions. # forbiddenFile = "" ## Script called for each configuration that executes the target algorithm ## to be tuned. See templates. targetRunner = "./target-runner" ## Number of times to retry a call to targetRunner if the call failed. # targetRunnerRetries = 0 ## Optional data passed to targetRunner. This is ignored by the default ## targetRunner function, but it may be used by custom targetRunner ## functions to pass persistent data around. # targetRunnerData = "" ## Optional R function to provide custom parallelization of targetRunner. # targetRunnerParallel = "" ## Optional script or R function that provides a numeric value for each ## configuration. See templates/target-evaluator.tmpl # targetEvaluator = "" ## Maximum number of runs (invocations of targetRunner) that will be ## performed. It determines the maximum budget of experiments for the ## tuning. maxExperiments = 10000 ## Maximum total execution time in seconds for the executions of ## targetRunner. targetRunner must return two values: cost and time. # maxTime = 0 ## Fraction (smaller than 1) of the budget used to estimate the mean ## computation time of a configuration. Only used when maxTime > 0 # budgetEstimation = 0.02 ## Maximum number of decimal places that are significant for numerical ## (real) parameters. digits = 1 ## Debug level of the output of irace. Set this to 0 to silence all debug ## messages. Higher values provide more verbose debug messages. # debugLevel = 0 ## Number of iterations. # nbIterations = 0 ## Number of runs of the target algorithm per iteration. # nbExperimentsPerIteration = 0 ## Randomly sample the training instances or use them in the order given. # sampleInstances = 1 ## Statistical test used for elimination. Default test is always F-test ## unless capping is enabled, in which case the default test is t-test. ## Valid values are: F-test (Friedman test), t-test (pairwise t-tests with ## no correction), t-test-bonferroni (t-test with Bonferroni's correction ## for multiple comparisons), t-test-holm (t-test with Holm's correction ## for multiple comparisons). # testType = "F-test" ## Number of instances evaluated before the first elimination test. It ## must be a multiple of eachTest. # firstTest = 5 ## Number of instances evaluated between elimination tests. # eachTest = 1 ## Minimum number of configurations needed to continue the execution of ## each race (iteration). # minNbSurvival = 0 ## Number of configurations to be sampled and evaluated at each iteration. # nbConfigurations = 0 ## Parameter used to define the number of configurations sampled and ## evaluated at each iteration. mu = 10 ## Confidence level for the elimination test. # confidence = 0.95 ## If the target algorithm is deterministic, configurations will be ## evaluated only once per instance. # deterministic = 0 ## Seed of the random number generator (by default, generate a random ## seed). # seed = NA ## Number of calls to targetRunner to execute in parallel. Values 0 or 1 ## mean no parallelization. # parallel = 0 ## Enable/disable load-balancing when executing experiments in parallel. ## Load-balancing makes better use of computing resources, but increases ## communication overhead. If this overhead is large, disabling ## load-balancing may be faster. # loadBalancing = 1 ## Enable/disable MPI. Use Rmpi to execute targetRunner in parallel ## (parameter parallel is the number of slaves). # mpi = 0 ## Specify how irace waits for jobs to finish when targetRunner submits ## jobs to a batch cluster: sge, pbs, torque or slurm. targetRunner must ## submit jobs to the cluster using, for example, qsub. # batchmode = 0 ## Enable/disable the soft restart strategy that avoids premature ## convergence of the probabilistic model. # softRestart = 1 ## Soft restart threshold value for numerical parameters. If NA, NULL or ## "", it is computed as 10^-digits. # softRestartThreshold = "" ## Directory where testing instances are located, either absolute or ## relative to current directory. # testInstancesDir = "" ## File containing a list of test instances and optionally additional ## parameters for them. # testInstancesFile = "" ## Number of elite configurations returned by irace that will be tested if ## test instances are provided. # testNbElites = 1 ## Enable/disable testing the elite configurations found at each ## iteration. # testIterationElites = 0 ## Enable/disable elitist irace. # elitist = 1 ## Number of instances added to the execution list before previous ## instances in elitist irace. # elitistNewInstances = 1 ## In elitist irace, maximum number per race of elimination tests that do ## not eliminate a configuration. Use 0 for no limit. # elitistLimit = 2 ## User-defined R function that takes a configuration generated by irace ## and repairs it. # repairConfiguration = "" ## Enable the use of adaptive capping, a technique designed for minimizing ## the computation time of configurations. This is only available when ## elitist is active. # capping = 0 ## Measure used to obtain the execution bound from the performance of the ## elite configurations: median, mean, worst, best. # cappingType = "median" ## Method to calculate the mean performance of elite configurations: ## candidate or instance. # boundType = "candidate" ## Maximum execution bound for targetRunner. It must be specified when ## capping is enabled. # boundMax = 0 ## Precision used for calculating the execution time. It must be specified ## when capping is enabled. # boundDigits = 0 ## Penalization constant for timed out executions (executions that reach ## boundMax execution time). # boundPar = 1 ## Replace the configuration cost of bounded executions with boundMax. # boundAsTimeout = 1 ## Percentage of the configuration budget used to perform a postselection ## race of the best configurations of each iteration after the execution ## of irace. # postselection = 0 ## END of scenario file ############################################################################
library(tidyverse) library(leaflet) #Tri des données net <- function(data){ museefreq = read_csv(paste('DATA/',data, sep=''), locale=locale()) date = museefreq$year[1] museefreq <- museefreq %>% filter(country == 'France', status == 'open') %>% select(!c(website, phone, fax, description, tags, year)) if (date != 2018){ museefreq <- museefreq %>% select(id,stats) } pdate <- paste('payant', date, sep = '.') gdate <- paste('gratuit',date, sep ='.') tdate <-paste('total',date, sep ='.') museefreq <- museefreq %>% separate(stats, c(pdate, gdate, 'label-date'), sep = ';') %>% select(!`label-date`) museefreq[,pdate]<- as.numeric(apply(as.matrix(museefreq[,pdate]),1, substring,first=8)) museefreq[,gdate] <- as.numeric(apply(as.matrix(museefreq[,gdate]),1, substring,first=9)) museefreq[,tdate] <- museefreq[,pdate] + museefreq[,gdate] return(museefreq) } recupdata <- function(){ lmusee = paste('frequentation-musees-de-france-',2018:2013,'.csv',sep = '') list.musee = lapply(lmusee, net) museefreq <- list.musee[[1]] for (i in 2:(length(list.musee))){ museefreq <- inner_join(museefreq, list.musee[[i]]) } museefreq <- museefreq %>% filter(lon > (-20) & lon < 25) loc_musee <- read_delim(paste('DATA/','liste-et-localisation-des-musees-de-france.csv', sep=''),delim = ';') loc_musee <- loc_musee %>% select(c(2,6,8,14,15)) musee <- inner_join(loc_musee, museefreq, by = c('ref_musee'='id')) return(musee) } #Carte testurl <- function(texte){ for (chaine in strsplit(texte, " ")[[1]]){ if(grepl("^http" ,chaine)){return(chaine)} if(grepl("^www.",chaine)){return(paste("http://", chaine, sep=""))} } return(F) } fpopup <- function(rdt){ rdt = data.frame(t(rdt)) popup = " <head> <style> .leaflet-container a{ text-decoration:none; color:#0078A8} a:hover{color:Black} h4{color:none;} #txt{margin-left:5px;} #ad, #txt{ display: inline-block; vertical-align: top; margin-bottom: 0;} .leaflet-container p{color:#293133;} </style> <meta charset='UTF-8'> </head><body>" title = paste('<h4>', as.character(rdt$name), "</h4>") if (testurl(rdt$sitweb) != F){ popup = popup %>% paste("<a href='", testurl(rdt$sitweb), "' target=_blank>", title, "</a>", sep ="") }else{ popup = popup %>% paste(title) } popup = popup %>% paste('<p id="ad"><strong>Adresse:</strong></p>') rdt = unite(rdt, "ad", number, street, sep=' ',na.rm = T) if(is.na(rdt$postal_code)){ popup = popup %>% paste('<p id="txt">', rdt$ad , "<br>", rdt$city,"</p>", sep="") }else{ popup = popup %>% paste('<p id="txt">', rdt$ad , "<br>", rdt$postal_code, " ", rdt$city,"</p>", sep="") } popup = popup %>% paste('<p><strong>Téléphone: </strong>', rdt$telephone1, "</p>", sep="") popup = popup %>% paste('<p><strong>Total visiteur en 2018: </strong>', rdt$total.2018, "</p></body>", sep="") return(popup) } ## Geocode if (!(require(jsonlite))) install.packages("jsonlite") mygeocode <- function(adresses){ # adresses est un vecteur contenant toutes les adresses sous forme de chaine de caracteres nominatim_osm <- function(address = NULL){ ## details: http://wiki.openstreetmap.org/wiki/Nominatim ## fonction nominatim_osm proposée par D.Kisler if(suppressWarnings(is.null(address))) return(data.frame()) tryCatch( d <- jsonlite::fromJSON( gsub('\\@addr\\@', gsub('\\s+', '\\%20', address), 'http://nominatim.openstreetmap.org/search/@addr@?format=json&addressdetails=0&limit=1') ), error = function(c) return(data.frame()) ) if(length(d) == 0) return(data.frame()) return(c(as.numeric(d$lon), as.numeric(d$lat))) } tableau <- t(sapply(adresses,nominatim_osm)) colnames(tableau) <- c("lon","lat") return(tableau) }
/APP/SCRIPT/rmusee.R
no_license
caillardc/VisuMarineClement
R
false
false
3,826
r
library(tidyverse) library(leaflet) #Tri des données net <- function(data){ museefreq = read_csv(paste('DATA/',data, sep=''), locale=locale()) date = museefreq$year[1] museefreq <- museefreq %>% filter(country == 'France', status == 'open') %>% select(!c(website, phone, fax, description, tags, year)) if (date != 2018){ museefreq <- museefreq %>% select(id,stats) } pdate <- paste('payant', date, sep = '.') gdate <- paste('gratuit',date, sep ='.') tdate <-paste('total',date, sep ='.') museefreq <- museefreq %>% separate(stats, c(pdate, gdate, 'label-date'), sep = ';') %>% select(!`label-date`) museefreq[,pdate]<- as.numeric(apply(as.matrix(museefreq[,pdate]),1, substring,first=8)) museefreq[,gdate] <- as.numeric(apply(as.matrix(museefreq[,gdate]),1, substring,first=9)) museefreq[,tdate] <- museefreq[,pdate] + museefreq[,gdate] return(museefreq) } recupdata <- function(){ lmusee = paste('frequentation-musees-de-france-',2018:2013,'.csv',sep = '') list.musee = lapply(lmusee, net) museefreq <- list.musee[[1]] for (i in 2:(length(list.musee))){ museefreq <- inner_join(museefreq, list.musee[[i]]) } museefreq <- museefreq %>% filter(lon > (-20) & lon < 25) loc_musee <- read_delim(paste('DATA/','liste-et-localisation-des-musees-de-france.csv', sep=''),delim = ';') loc_musee <- loc_musee %>% select(c(2,6,8,14,15)) musee <- inner_join(loc_musee, museefreq, by = c('ref_musee'='id')) return(musee) } #Carte testurl <- function(texte){ for (chaine in strsplit(texte, " ")[[1]]){ if(grepl("^http" ,chaine)){return(chaine)} if(grepl("^www.",chaine)){return(paste("http://", chaine, sep=""))} } return(F) } fpopup <- function(rdt){ rdt = data.frame(t(rdt)) popup = " <head> <style> .leaflet-container a{ text-decoration:none; color:#0078A8} a:hover{color:Black} h4{color:none;} #txt{margin-left:5px;} #ad, #txt{ display: inline-block; vertical-align: top; margin-bottom: 0;} .leaflet-container p{color:#293133;} </style> <meta charset='UTF-8'> </head><body>" title = paste('<h4>', as.character(rdt$name), "</h4>") if (testurl(rdt$sitweb) != F){ popup = popup %>% paste("<a href='", testurl(rdt$sitweb), "' target=_blank>", title, "</a>", sep ="") }else{ popup = popup %>% paste(title) } popup = popup %>% paste('<p id="ad"><strong>Adresse:</strong></p>') rdt = unite(rdt, "ad", number, street, sep=' ',na.rm = T) if(is.na(rdt$postal_code)){ popup = popup %>% paste('<p id="txt">', rdt$ad , "<br>", rdt$city,"</p>", sep="") }else{ popup = popup %>% paste('<p id="txt">', rdt$ad , "<br>", rdt$postal_code, " ", rdt$city,"</p>", sep="") } popup = popup %>% paste('<p><strong>Téléphone: </strong>', rdt$telephone1, "</p>", sep="") popup = popup %>% paste('<p><strong>Total visiteur en 2018: </strong>', rdt$total.2018, "</p></body>", sep="") return(popup) } ## Geocode if (!(require(jsonlite))) install.packages("jsonlite") mygeocode <- function(adresses){ # adresses est un vecteur contenant toutes les adresses sous forme de chaine de caracteres nominatim_osm <- function(address = NULL){ ## details: http://wiki.openstreetmap.org/wiki/Nominatim ## fonction nominatim_osm proposée par D.Kisler if(suppressWarnings(is.null(address))) return(data.frame()) tryCatch( d <- jsonlite::fromJSON( gsub('\\@addr\\@', gsub('\\s+', '\\%20', address), 'http://nominatim.openstreetmap.org/search/@addr@?format=json&addressdetails=0&limit=1') ), error = function(c) return(data.frame()) ) if(length(d) == 0) return(data.frame()) return(c(as.numeric(d$lon), as.numeric(d$lat))) } tableau <- t(sapply(adresses,nominatim_osm)) colnames(tableau) <- c("lon","lat") return(tableau) }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/biomart.R \name{getTSS} \alias{getTSS} \title{getTSS to fetch GENCODE gene annotation (transcripts level) from Bioconductor package biomaRt If upstream and downstream are specified in TSS list, promoter regions of GENCODE gene will be generated.} \usage{ getTSS(genome = "hg38", TSS = list(upstream = NULL, downstream = NULL)) } \arguments{ \item{genome}{Which genome build will be used: hg38 (default) or hg19.} \item{TSS}{A list. Contains upstream and downstream like TSS=list(upstream, downstream). When upstream and downstream is specified, coordinates of promoter regions with gene annotation will be generated.} } \value{ GENCODE gene annotation if TSS is not specified. Coordinates of GENCODE gene promoter regions if TSS is specified. } \description{ getTSS to fetch GENCODE gene annotation (transcripts level) from Bioconductor package biomaRt If upstream and downstream are specified in TSS list, promoter regions of GENCODE gene will be generated. } \examples{ # get GENCODE gene annotation (transcripts level) \dontrun{ getTSS <- getTSS() getTSS <- getTSS(genome.build = "hg38", TSS=list(upstream=1000, downstream=1000)) } }
/man/getTSS.Rd
no_license
daniel615212950/TCGAbiolinks
R
false
true
1,224
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/biomart.R \name{getTSS} \alias{getTSS} \title{getTSS to fetch GENCODE gene annotation (transcripts level) from Bioconductor package biomaRt If upstream and downstream are specified in TSS list, promoter regions of GENCODE gene will be generated.} \usage{ getTSS(genome = "hg38", TSS = list(upstream = NULL, downstream = NULL)) } \arguments{ \item{genome}{Which genome build will be used: hg38 (default) or hg19.} \item{TSS}{A list. Contains upstream and downstream like TSS=list(upstream, downstream). When upstream and downstream is specified, coordinates of promoter regions with gene annotation will be generated.} } \value{ GENCODE gene annotation if TSS is not specified. Coordinates of GENCODE gene promoter regions if TSS is specified. } \description{ getTSS to fetch GENCODE gene annotation (transcripts level) from Bioconductor package biomaRt If upstream and downstream are specified in TSS list, promoter regions of GENCODE gene will be generated. } \examples{ # get GENCODE gene annotation (transcripts level) \dontrun{ getTSS <- getTSS() getTSS <- getTSS(genome.build = "hg38", TSS=list(upstream=1000, downstream=1000)) } }
## setting the data folder as Working Directory #setwd("C:/Users/Americo/Estudos/Coursera/Getting and Cleaning Data/Project/Dados") # ### Reading the Test Dataset # fileConn <- file("X_test.txt") testX<-read.fwf(fileConn, widths=c(16), sep = "\n") # > str(testX) #'data.frame': 2947 obs. of 1 variable: # $ V1: num 0.257 0.286 0.275 0.27 0.275 ... # fileConn <- file("Y_test.txt") testY<-read.fwf(fileConn, widths=c(1), sep = "\n") #> str(testY) #'data.frame': 2947 obs. of 1 variable: # $ V1: int 5 5 5 5 5 5 5 5 5 5 ... # fileConn <- file("subject_test.txt") testSubject<-read.fwf(fileConn, widths=c(2), sep = "\n") #> str(testSubject) #'data.frame': 2947 obs. of 1 variable: # $ V1: int 2 2 2 2 2 2 2 2 2 2 ... # ### Merging the Test Dataset in one data frame using cbind # test <- cbind(testSubject,testY,testX) #> str(test) #'data.frame': 2947 obs. of 3 variables: # $ V1: int 2 2 2 2 2 2 2 2 2 2 ... #$ V1: int 5 5 5 5 5 5 5 5 5 5 ... #$ V1: num 0.257 0.286 0.275 0.27 0.275 ... # # ### Reading the Train Dataset # fileConn <- file("X_train.txt") trainX<-read.fwf(fileConn, widths=c(16), sep = "\n") #> str(trainX) #'data.frame': 7352 obs. of 1 variable: # $ V1: num 0.289 0.278 0.28 0.279 0.277 ... # fileConn <- file("Y_train.txt") trainY<-read.fwf(fileConn, widths=c(1), sep = "\n") #> str(trainY) #'data.frame': 7352 obs. of 1 variable: # $ V1: int 5 5 5 5 5 5 5 5 5 5 ... # fileConn <- file("subject_train.txt") trainSubject<-read.fwf(fileConn, widths=c(2), sep = "\n") #> str(trainSubject) #'data.frame': 7352 obs. of 1 variable: # $ V1: int 1 1 1 1 1 1 1 1 1 1 ... # ### Merging the Train Dataset in one data frame using cbind # train <- cbind(trainSubject,trainY,trainX) #> str(train) #'data.frame': 7352 obs. of 3 variables: #$ V1: int 1 1 1 1 1 1 1 1 1 1 ... #$ V1: int 5 5 5 5 5 5 5 5 5 5 ... #$ V1: num 0.289 0.278 0.28 0.279 0.277 ... # ### Merging the Test and Train using rbind # alldata <- rbind(train,test) #> str(alldata) #'data.frame': 10299 obs. of 3 variables: # $ V1: int 1 1 1 1 1 1 1 1 1 1 ... #$ V1: int 5 5 5 5 5 5 5 5 5 5 ... #$ V1: num 0.289 0.278 0.28 0.279 0.277 ... # # Changing variables named V1 to more significant names names(alldata) <- c('Id','Activity','Measurement') # library(plyr) ## Sorting the data to put all measurements in order of subjects and activities alldatasorted <- arrange(alldata, Id, Activity) # ## Calculating mean and standard deviation by subject and activity, using ddply databySubjectActivity <- ddply(alldata, .(Id, Activity), summarize, mean=mean(Measurement), std_dev=sd(Measurement)) #> str(databySubjectActivity) #'data.frame': 180 obs. of 4 variables: # $ Id : int 1 1 1 1 1 1 2 2 2 2 ... #$ Activity: int 1 2 3 4 5 6 1 2 3 4 ... #$ mean : num 0.277 0.255 0.289 0.261 0.279 ... #$ std_dev : num 0.04639 0.04996 0.08416 0.06333 0.00799 ... # # There are six lines for each subject in the data.frame, corresponding to activities: it is the Long Form tidy data set # ## Converting activities in lines into columns in a new data.frame # The final data.frame is tidy considering each Subject in one single line # It is possible because the set of activities is fixed in 6 items # ## The operation is done by using reshape to convert data in lines into columns tidydata <- reshape(databySubjectActivity, idvar="Id", timevar="Activity", direction="wide") # #> str(tidydata) #'data.frame': 30 obs. of 13 variables: # $ Id : int 1 2 3 4 5 6 7 8 9 10 ... #$ mean.1 : num 0.277 0.276 0.276 0.279 0.278 ... #$ std_dev.1: num 0.0464 0.0513 0.0475 0.0292 0.0508 ... #$ mean.2 : num 0.255 0.247 0.261 0.271 0.268 ... #$ std_dev.2: num 0.05 0.0595 0.077 0.0626 0.0953 ... #$ mean.3 : num 0.289 0.278 0.292 0.28 0.294 ... #$ std_dev.3: num 0.0842 0.0803 0.0808 0.0789 0.1027 ... #$ mean.4 : num 0.261 0.277 0.257 0.272 0.274 ... #$ std_dev.4: num 0.0633 0.0231 0.0866 0.0313 0.0266 ... #$ mean.5 : num 0.279 0.278 0.28 0.28 0.283 ... #$ std_dev.5: num 0.00799 0.01406 0.02518 0.02843 0.03481 ... #$ mean.6 : num 0.222 0.281 0.276 0.264 0.278 ... #$ std_dev.6: num 0.1689 0.0242 0.0141 0.0884 0.0322 ... #- attr(*, "reshapeWide")=List of 5 #..$ v.names: NULL #..$ timevar: chr "Activity" #..$ idvar : chr "Id" #..$ times : int 1 2 3 4 5 6 #..$ varying: chr [1:2, 1:6] "mean.1" "std_dev.1" "mean.2" "std_dev.2" ... # ## Last operation is to put meaningful names to the variables names(tidydata) <- c("Id","mean_WALKING","stddev_WALKING","mean_WALKING_UPSTAIRS","stddev_WALKING_UPSTAIRS","mean_WALKING_DOWNSTAIRS","stddev_WALKING_DOWNSTAIRS","mean_SITTING","stddev_SITTING","mean_STANDING","stddev_STANDING","mean_LAYING","stddev_LAYING") #> str(tidydata) #'data.frame': 30 obs. of 13 variables: # $ Id : int 1 2 3 4 5 6 7 8 9 10 ... #$ mean_WALKING : num 0.277 0.276 0.276 0.279 0.278 ... #$ stddev_WALKING : num 0.0464 0.0513 0.0475 0.0292 0.0508 ... #$ mean_WALKING_UPSTAIRS : num 0.255 0.247 0.261 0.271 0.268 ... #$ stddev_WALKING_UPSTAIRS : num 0.05 0.0595 0.077 0.0626 0.0953 ... #$ mean_WALKING_DOWNSTAIRS : num 0.289 0.278 0.292 0.28 0.294 ... # ## tidydata contains 30 lines, one for each subject, with the mean and standard deviation ## for each of 6 activities measured ## ## To generate the text file to be uploaded write.table(tidydata, file = "tidydata.txt", row.names = FALSE)
/run_analysis.R
no_license
AmericoKutomi/GetandCleaningProject
R
false
false
5,402
r
## setting the data folder as Working Directory #setwd("C:/Users/Americo/Estudos/Coursera/Getting and Cleaning Data/Project/Dados") # ### Reading the Test Dataset # fileConn <- file("X_test.txt") testX<-read.fwf(fileConn, widths=c(16), sep = "\n") # > str(testX) #'data.frame': 2947 obs. of 1 variable: # $ V1: num 0.257 0.286 0.275 0.27 0.275 ... # fileConn <- file("Y_test.txt") testY<-read.fwf(fileConn, widths=c(1), sep = "\n") #> str(testY) #'data.frame': 2947 obs. of 1 variable: # $ V1: int 5 5 5 5 5 5 5 5 5 5 ... # fileConn <- file("subject_test.txt") testSubject<-read.fwf(fileConn, widths=c(2), sep = "\n") #> str(testSubject) #'data.frame': 2947 obs. of 1 variable: # $ V1: int 2 2 2 2 2 2 2 2 2 2 ... # ### Merging the Test Dataset in one data frame using cbind # test <- cbind(testSubject,testY,testX) #> str(test) #'data.frame': 2947 obs. of 3 variables: # $ V1: int 2 2 2 2 2 2 2 2 2 2 ... #$ V1: int 5 5 5 5 5 5 5 5 5 5 ... #$ V1: num 0.257 0.286 0.275 0.27 0.275 ... # # ### Reading the Train Dataset # fileConn <- file("X_train.txt") trainX<-read.fwf(fileConn, widths=c(16), sep = "\n") #> str(trainX) #'data.frame': 7352 obs. of 1 variable: # $ V1: num 0.289 0.278 0.28 0.279 0.277 ... # fileConn <- file("Y_train.txt") trainY<-read.fwf(fileConn, widths=c(1), sep = "\n") #> str(trainY) #'data.frame': 7352 obs. of 1 variable: # $ V1: int 5 5 5 5 5 5 5 5 5 5 ... # fileConn <- file("subject_train.txt") trainSubject<-read.fwf(fileConn, widths=c(2), sep = "\n") #> str(trainSubject) #'data.frame': 7352 obs. of 1 variable: # $ V1: int 1 1 1 1 1 1 1 1 1 1 ... # ### Merging the Train Dataset in one data frame using cbind # train <- cbind(trainSubject,trainY,trainX) #> str(train) #'data.frame': 7352 obs. of 3 variables: #$ V1: int 1 1 1 1 1 1 1 1 1 1 ... #$ V1: int 5 5 5 5 5 5 5 5 5 5 ... #$ V1: num 0.289 0.278 0.28 0.279 0.277 ... # ### Merging the Test and Train using rbind # alldata <- rbind(train,test) #> str(alldata) #'data.frame': 10299 obs. of 3 variables: # $ V1: int 1 1 1 1 1 1 1 1 1 1 ... #$ V1: int 5 5 5 5 5 5 5 5 5 5 ... #$ V1: num 0.289 0.278 0.28 0.279 0.277 ... # # Changing variables named V1 to more significant names names(alldata) <- c('Id','Activity','Measurement') # library(plyr) ## Sorting the data to put all measurements in order of subjects and activities alldatasorted <- arrange(alldata, Id, Activity) # ## Calculating mean and standard deviation by subject and activity, using ddply databySubjectActivity <- ddply(alldata, .(Id, Activity), summarize, mean=mean(Measurement), std_dev=sd(Measurement)) #> str(databySubjectActivity) #'data.frame': 180 obs. of 4 variables: # $ Id : int 1 1 1 1 1 1 2 2 2 2 ... #$ Activity: int 1 2 3 4 5 6 1 2 3 4 ... #$ mean : num 0.277 0.255 0.289 0.261 0.279 ... #$ std_dev : num 0.04639 0.04996 0.08416 0.06333 0.00799 ... # # There are six lines for each subject in the data.frame, corresponding to activities: it is the Long Form tidy data set # ## Converting activities in lines into columns in a new data.frame # The final data.frame is tidy considering each Subject in one single line # It is possible because the set of activities is fixed in 6 items # ## The operation is done by using reshape to convert data in lines into columns tidydata <- reshape(databySubjectActivity, idvar="Id", timevar="Activity", direction="wide") # #> str(tidydata) #'data.frame': 30 obs. of 13 variables: # $ Id : int 1 2 3 4 5 6 7 8 9 10 ... #$ mean.1 : num 0.277 0.276 0.276 0.279 0.278 ... #$ std_dev.1: num 0.0464 0.0513 0.0475 0.0292 0.0508 ... #$ mean.2 : num 0.255 0.247 0.261 0.271 0.268 ... #$ std_dev.2: num 0.05 0.0595 0.077 0.0626 0.0953 ... #$ mean.3 : num 0.289 0.278 0.292 0.28 0.294 ... #$ std_dev.3: num 0.0842 0.0803 0.0808 0.0789 0.1027 ... #$ mean.4 : num 0.261 0.277 0.257 0.272 0.274 ... #$ std_dev.4: num 0.0633 0.0231 0.0866 0.0313 0.0266 ... #$ mean.5 : num 0.279 0.278 0.28 0.28 0.283 ... #$ std_dev.5: num 0.00799 0.01406 0.02518 0.02843 0.03481 ... #$ mean.6 : num 0.222 0.281 0.276 0.264 0.278 ... #$ std_dev.6: num 0.1689 0.0242 0.0141 0.0884 0.0322 ... #- attr(*, "reshapeWide")=List of 5 #..$ v.names: NULL #..$ timevar: chr "Activity" #..$ idvar : chr "Id" #..$ times : int 1 2 3 4 5 6 #..$ varying: chr [1:2, 1:6] "mean.1" "std_dev.1" "mean.2" "std_dev.2" ... # ## Last operation is to put meaningful names to the variables names(tidydata) <- c("Id","mean_WALKING","stddev_WALKING","mean_WALKING_UPSTAIRS","stddev_WALKING_UPSTAIRS","mean_WALKING_DOWNSTAIRS","stddev_WALKING_DOWNSTAIRS","mean_SITTING","stddev_SITTING","mean_STANDING","stddev_STANDING","mean_LAYING","stddev_LAYING") #> str(tidydata) #'data.frame': 30 obs. of 13 variables: # $ Id : int 1 2 3 4 5 6 7 8 9 10 ... #$ mean_WALKING : num 0.277 0.276 0.276 0.279 0.278 ... #$ stddev_WALKING : num 0.0464 0.0513 0.0475 0.0292 0.0508 ... #$ mean_WALKING_UPSTAIRS : num 0.255 0.247 0.261 0.271 0.268 ... #$ stddev_WALKING_UPSTAIRS : num 0.05 0.0595 0.077 0.0626 0.0953 ... #$ mean_WALKING_DOWNSTAIRS : num 0.289 0.278 0.292 0.28 0.294 ... # ## tidydata contains 30 lines, one for each subject, with the mean and standard deviation ## for each of 6 activities measured ## ## To generate the text file to be uploaded write.table(tidydata, file = "tidydata.txt", row.names = FALSE)
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/ReadPubMed.R \name{GetPubMedRelated} \alias{GetPubMedRelated} \title{Retrieve related articles from PubMed using PubMed ID's} \usage{ GetPubMedRelated( id, database = "pubmed", batch.mode = TRUE, max.results = 10, return.sim.scores = FALSE, return.related.ids = FALSE ) } \arguments{ \item{id}{either a character vector of PubMed ID's or a BibEntry object, which is expected to have at least some entries with \code{eprinttype = "pubmed"} and eprint field specifying a PubMed ID.} \item{database}{string; the Entrez database to search} \item{batch.mode}{logical; if \code{TRUE}, the PubMed IDs in \code{id} are combined by Entrez when searching for linked IDs so that only one set of linked IDs is returned. If \code{FALSE}, a set of linked IDs is obtained for each ID in \code{id}. will be returned} \item{max.results}{numeric vector; the maximum number of results to return if \code{batch.mode} \code{TRUE}; or if \code{batch.mode} is \code{FALSE}, this should have the same length as \code{id} with each element giving the maximum number of results to return for the corresponding ID.} \item{return.sim.scores}{logical; Entrez returns a similarity score with each returned citation giving a measure of how similar the returned entry is to the ones specified by the query. If \code{TRUE} these scores are added to the returned BibEntry object in a field called \sQuote{score}.} \item{return.related.ids}{logical; should the original PubMed ID(s) that a returned entry is related to be stored in a field called \sQuote{PMIDrelated}.} } \value{ an object of class BibEntry. } \description{ Searches PubMed for articles related to a set of PubMed ID's using NCBI's E-Utilities. } \examples{ if (interactive() && !httr::http_error("https://eutils.ncbi.nlm.nih.gov/")){ file.name <- system.file("Bib", "RJC.bib", package="RefManageR") bib <- ReadBib(file.name) bib <- LookupPubMedID(bib[[101:102]]) toBiblatex(GetPubMedRelated(bib, batch.mode = TRUE, max.results = 2, return.sim.scores = TRUE, return.related.ids = TRUE)) GetPubMedRelated(bib, batch.mode = FALSE, max.results = c(2, 2)) } } \references{ \url{https://www.ncbi.nlm.nih.gov/books/NBK25500/} } \seealso{ Other pubmed: \code{\link{GetPubMedByID}()}, \code{\link{LookupPubMedID}()}, \code{\link{ReadCrossRef}()}, \code{\link{ReadPubMed}()} } \concept{pubmed} \keyword{database}
/man/GetPubMedRelated.Rd
no_license
cran/RefManageR
R
false
true
2,518
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/ReadPubMed.R \name{GetPubMedRelated} \alias{GetPubMedRelated} \title{Retrieve related articles from PubMed using PubMed ID's} \usage{ GetPubMedRelated( id, database = "pubmed", batch.mode = TRUE, max.results = 10, return.sim.scores = FALSE, return.related.ids = FALSE ) } \arguments{ \item{id}{either a character vector of PubMed ID's or a BibEntry object, which is expected to have at least some entries with \code{eprinttype = "pubmed"} and eprint field specifying a PubMed ID.} \item{database}{string; the Entrez database to search} \item{batch.mode}{logical; if \code{TRUE}, the PubMed IDs in \code{id} are combined by Entrez when searching for linked IDs so that only one set of linked IDs is returned. If \code{FALSE}, a set of linked IDs is obtained for each ID in \code{id}. will be returned} \item{max.results}{numeric vector; the maximum number of results to return if \code{batch.mode} \code{TRUE}; or if \code{batch.mode} is \code{FALSE}, this should have the same length as \code{id} with each element giving the maximum number of results to return for the corresponding ID.} \item{return.sim.scores}{logical; Entrez returns a similarity score with each returned citation giving a measure of how similar the returned entry is to the ones specified by the query. If \code{TRUE} these scores are added to the returned BibEntry object in a field called \sQuote{score}.} \item{return.related.ids}{logical; should the original PubMed ID(s) that a returned entry is related to be stored in a field called \sQuote{PMIDrelated}.} } \value{ an object of class BibEntry. } \description{ Searches PubMed for articles related to a set of PubMed ID's using NCBI's E-Utilities. } \examples{ if (interactive() && !httr::http_error("https://eutils.ncbi.nlm.nih.gov/")){ file.name <- system.file("Bib", "RJC.bib", package="RefManageR") bib <- ReadBib(file.name) bib <- LookupPubMedID(bib[[101:102]]) toBiblatex(GetPubMedRelated(bib, batch.mode = TRUE, max.results = 2, return.sim.scores = TRUE, return.related.ids = TRUE)) GetPubMedRelated(bib, batch.mode = FALSE, max.results = c(2, 2)) } } \references{ \url{https://www.ncbi.nlm.nih.gov/books/NBK25500/} } \seealso{ Other pubmed: \code{\link{GetPubMedByID}()}, \code{\link{LookupPubMedID}()}, \code{\link{ReadCrossRef}()}, \code{\link{ReadPubMed}()} } \concept{pubmed} \keyword{database}
#'Hierachical ordered density clustering (HODC) Algorithm with input generated by DPdensity #'@param pvalue a vector of p-values obtained from large scale statistical hypothesis testing #'@param v number of iterations set for DPM fitting by "DPdensity" #'@param DPM.mcmc list #'@param DPM.prior list #'@details Without the information of networking, we can have an approximation to the marginal density by DPM model fitting on \strong{r}. Suppose the number of finite mixture normals is equal to L_0+L_1, which means the number of classes we have, we apply HODC algorithm in partitioning the $L_0$ and $L_1$ components into two classes. #'For this function, the input is generated by Mclust #'@return a list of HODC algorithm returned parameters. #'\describe{ #'\item{mean}{the means of each of two cluster for every DPM fitting by "DPdensity" #'\describe{ #'\item{mu0}{the means of the cluster with smaller mean} #'\item{mu1}{the means of the cluster with larger mean} #'} #'} #'\item{variance}{the variance of each of two cluster for every DPM fitting by "DPdensity" #'\describe{ #'\item{var0}{the variances of the cluster with smaller mean} #'\item{var1}{the variances of the cluster with larger mean} #'} #'} #'\item{probability}{the probability of each of two cluster for every DPM fitting by "DPdensity" #'\describe{ #'\item{pro0}{the probabilities of the cluster with smaller mean} #'\item{pro1}{the probabilities of the cluster with larger mean} #'} #'} #'\item{classification}{The classification corresponding to each cluster for every DPM fitting by "DPdensity"} #'} #'@examples #'\dontrun{ #'###random make the density #'rstat=c(rnorm(50,mean=1),rnorm(50,mean=2),rnorm(100,mean=4),rnorm(100,mean=8)) #'###transformed into pvalue #'pvalue=pnorm(-rstat) #'dpdensityHODC=DPM.HODC(v=5,pvalue) #'} #'@export DPM.HODC<-function(v,pvalue,DPM.mcmc=list(nburn=2000,nsave=1,nskip=0,ndisplay=10),DPM.prior=list(a0=2,b0=1,m2=rep(0,1),s2=diag(100000,1), psiinv2=solve(diag(0.5,1)), nu1=4,nu2=4,tau1=1,tau2=100)){ results<-list() rstat=Transfer(pvalue) #nburn <-2000 #nsave <-1 #nskip <-0 #ndisplay <-10 #mcmc <- list(nburn=nburn,nsave=nsave,nskip=nskip,ndisplay=ndisplay) mcmc<-DPM.mcmc #prior1 <- list(alpha=0.5,m1=0.2,nu1=100,psiinv1=0.1,k0=100) #prior2 <- list(a0=2,b0=1,m2=rep(0,1),s2=diag(100000,1), # psiinv2=solve(diag(0.5,1)), # nu1=4,nu2=4,tau1=1,tau2=100) fit<-DPpackage::DPdensity(rstat,prior=DPM.prior,mcmc=mcmc,status=TRUE) for(oo in 1:v){ cat("iter: ", oo,"\n") utils::flush.console() nburn <-0 nsave <-1 nskip <-0 ndisplay <-10 mcmc.conti <- list(nburn=nburn,nsave=nsave,nskip=nskip,ndisplay=ndisplay) #prior1 <- list(alpha=0.5,m1=0.2,nu1=100,psiinv1=0.1,k0=100) #prior2 <- list(a0=2,b0=1,m2=rep(0,1),s2=diag(100000,1), # psiinv2=solve(diag(0.5,1)), # nu1=4,nu2=4,tau1=1,tau2=100) fit<-DPpackage::DPdensity(rstat,prior=DPM.prior,mcmc=mcmc.conti,state=fit$state,status=FALSE) ##calculate cluster ss<-fit$state$ss num<-fit$state$ncluster prop<-table(fit$state$ss)/num mu<-fit$state$muclus[1:num] sigma<-fit$state$sigmaclus[1:num] index<-order(mu,decreasing=T) newm<-mu[index] news<-sigma[index] newp<-prop[index] if(num==2 | num==1){results[[oo]]<-ss-1}else{ #hh1<-1/2/sqrt(pi*news[1])+1/2/sqrt(pi*news[2])-2*stats::dnorm(newm[1],newm[2],sd=sqrt(news[1]+news[2])) #hh2<-1/2/sqrt(pi*news[2])+1/2/sqrt(pi*news[3])-2*stats::dnorm(newm[2],newm[3],sd=sqrt(news[2]+news[3])) clmatrix<-matrix(rep(0,num*num),ncol=num,nrow=num) for(i in 1:num){ for(j in 1:num){ clmatrix[i,j]<-stats::dnorm(newm[i],newm[j],sd=sqrt(news[i]+news[j])) } } temp1<-0 temp2<-0 com1<-0 com2<-0 res1<-0 res2<-0 final1<-0 final2<-0 latent<-0 latent[1]<-0 final1<-c(1,-1,rep(0,num-2)) final2<-c(0,-1,1,rep(0,num-3)) for(iii in 1:(num-2)){ dec1<-t(final1)%*%clmatrix%*%final1 dec2<-t(final2)%*%clmatrix%*%final2 if(dec1>dec2){latent[iii+1]<-1 }else{ latent[iii+1]<-0 } if(latent[iii+1]==1){ ddd<-max(which(latent==0)) temp1<-rep(0,num) temp1[1:ddd]<-1 com1<-temp1*newp com1<-scale(com1,center=F,scale=sum(com1)) temp1<-rep(0,num) temp1[(ddd+1):(iii+1)]<--1 res1<-temp1*newp res1<-scale(res1,center=F,scale=sum(abs(res1))) final1<-com1+res1 temp2<-rep(0,num) temp2[(ddd+1):(iii+1)]<--1 res2<-temp2*newp res2<-scale(res2,center=F,scale=sum(abs(res2))) com2<-rep(0,num) com2[iii+2]<-1 final2<-com2+res2 }else{ temp1<-rep(0,num) temp1[1:iii]<-1 com1<-temp1*newp com1<-scale(com1,center=F,scale=sum(com1)) res1<-rep(0,num) res1[iii+1]<--1 final1<-com1+res1 final2<-rep(0,num) final2[iii+1]<--1 final2[iii+2]<-1 } } latent[num]<-1 uuu<-max(which(latent==0)) ss[ss %in% index[(uuu+1):num]]<-0 ss[ss %in% index[1:uuu]]<-1 results[[oo]]<-ss } } dpdensitycluster<-do.call(rbind,results) mu0=sapply(1:v, function(kk) return(mean(rstat[which(dpdensitycluster[kk,]==0)]))) mu1=sapply(1:v, function(kk) return(mean(rstat[which(dpdensitycluster[kk,]==1)]))) var0=sapply(1:v, function(kk) return(stats::var(rstat[which(dpdensitycluster[kk,]==0)]))) var1=sapply(1:v, function(kk) return(stats::var(rstat[which(dpdensitycluster[kk,]==1)]))) pro0=sapply(1:v, function(kk) return(mean(rstat[which(dpdensitycluster[kk,]==0)])/length(pvalue))) pro1=sapply(1:v, function(kk) return(mean(rstat[which(dpdensitycluster[kk,]==1)])/length(pvalue))) for (kk in 1:v){ if (is.na(var0[kk])){var0[kk]=var1[kk] }else if (is.na(var1[kk])){var1[kk]=var0[kk]} } DPdensityHODC=list() DPdensityHODC$mean$mu0=mu0 DPdensityHODC$mean$mu1=mu1 DPdensityHODC$variance$var0=var0 DPdensityHODC$variance$var1=var1 DPdensityHODC$probility$pro0=pro0 DPdensityHODC$probility$pro1=pro1 DPdensityHODC$classification=dpdensitycluster return(DPdensityHODC) }
/R/DPM.HODC.R
no_license
cran/BANFF
R
false
false
6,608
r
#'Hierachical ordered density clustering (HODC) Algorithm with input generated by DPdensity #'@param pvalue a vector of p-values obtained from large scale statistical hypothesis testing #'@param v number of iterations set for DPM fitting by "DPdensity" #'@param DPM.mcmc list #'@param DPM.prior list #'@details Without the information of networking, we can have an approximation to the marginal density by DPM model fitting on \strong{r}. Suppose the number of finite mixture normals is equal to L_0+L_1, which means the number of classes we have, we apply HODC algorithm in partitioning the $L_0$ and $L_1$ components into two classes. #'For this function, the input is generated by Mclust #'@return a list of HODC algorithm returned parameters. #'\describe{ #'\item{mean}{the means of each of two cluster for every DPM fitting by "DPdensity" #'\describe{ #'\item{mu0}{the means of the cluster with smaller mean} #'\item{mu1}{the means of the cluster with larger mean} #'} #'} #'\item{variance}{the variance of each of two cluster for every DPM fitting by "DPdensity" #'\describe{ #'\item{var0}{the variances of the cluster with smaller mean} #'\item{var1}{the variances of the cluster with larger mean} #'} #'} #'\item{probability}{the probability of each of two cluster for every DPM fitting by "DPdensity" #'\describe{ #'\item{pro0}{the probabilities of the cluster with smaller mean} #'\item{pro1}{the probabilities of the cluster with larger mean} #'} #'} #'\item{classification}{The classification corresponding to each cluster for every DPM fitting by "DPdensity"} #'} #'@examples #'\dontrun{ #'###random make the density #'rstat=c(rnorm(50,mean=1),rnorm(50,mean=2),rnorm(100,mean=4),rnorm(100,mean=8)) #'###transformed into pvalue #'pvalue=pnorm(-rstat) #'dpdensityHODC=DPM.HODC(v=5,pvalue) #'} #'@export DPM.HODC<-function(v,pvalue,DPM.mcmc=list(nburn=2000,nsave=1,nskip=0,ndisplay=10),DPM.prior=list(a0=2,b0=1,m2=rep(0,1),s2=diag(100000,1), psiinv2=solve(diag(0.5,1)), nu1=4,nu2=4,tau1=1,tau2=100)){ results<-list() rstat=Transfer(pvalue) #nburn <-2000 #nsave <-1 #nskip <-0 #ndisplay <-10 #mcmc <- list(nburn=nburn,nsave=nsave,nskip=nskip,ndisplay=ndisplay) mcmc<-DPM.mcmc #prior1 <- list(alpha=0.5,m1=0.2,nu1=100,psiinv1=0.1,k0=100) #prior2 <- list(a0=2,b0=1,m2=rep(0,1),s2=diag(100000,1), # psiinv2=solve(diag(0.5,1)), # nu1=4,nu2=4,tau1=1,tau2=100) fit<-DPpackage::DPdensity(rstat,prior=DPM.prior,mcmc=mcmc,status=TRUE) for(oo in 1:v){ cat("iter: ", oo,"\n") utils::flush.console() nburn <-0 nsave <-1 nskip <-0 ndisplay <-10 mcmc.conti <- list(nburn=nburn,nsave=nsave,nskip=nskip,ndisplay=ndisplay) #prior1 <- list(alpha=0.5,m1=0.2,nu1=100,psiinv1=0.1,k0=100) #prior2 <- list(a0=2,b0=1,m2=rep(0,1),s2=diag(100000,1), # psiinv2=solve(diag(0.5,1)), # nu1=4,nu2=4,tau1=1,tau2=100) fit<-DPpackage::DPdensity(rstat,prior=DPM.prior,mcmc=mcmc.conti,state=fit$state,status=FALSE) ##calculate cluster ss<-fit$state$ss num<-fit$state$ncluster prop<-table(fit$state$ss)/num mu<-fit$state$muclus[1:num] sigma<-fit$state$sigmaclus[1:num] index<-order(mu,decreasing=T) newm<-mu[index] news<-sigma[index] newp<-prop[index] if(num==2 | num==1){results[[oo]]<-ss-1}else{ #hh1<-1/2/sqrt(pi*news[1])+1/2/sqrt(pi*news[2])-2*stats::dnorm(newm[1],newm[2],sd=sqrt(news[1]+news[2])) #hh2<-1/2/sqrt(pi*news[2])+1/2/sqrt(pi*news[3])-2*stats::dnorm(newm[2],newm[3],sd=sqrt(news[2]+news[3])) clmatrix<-matrix(rep(0,num*num),ncol=num,nrow=num) for(i in 1:num){ for(j in 1:num){ clmatrix[i,j]<-stats::dnorm(newm[i],newm[j],sd=sqrt(news[i]+news[j])) } } temp1<-0 temp2<-0 com1<-0 com2<-0 res1<-0 res2<-0 final1<-0 final2<-0 latent<-0 latent[1]<-0 final1<-c(1,-1,rep(0,num-2)) final2<-c(0,-1,1,rep(0,num-3)) for(iii in 1:(num-2)){ dec1<-t(final1)%*%clmatrix%*%final1 dec2<-t(final2)%*%clmatrix%*%final2 if(dec1>dec2){latent[iii+1]<-1 }else{ latent[iii+1]<-0 } if(latent[iii+1]==1){ ddd<-max(which(latent==0)) temp1<-rep(0,num) temp1[1:ddd]<-1 com1<-temp1*newp com1<-scale(com1,center=F,scale=sum(com1)) temp1<-rep(0,num) temp1[(ddd+1):(iii+1)]<--1 res1<-temp1*newp res1<-scale(res1,center=F,scale=sum(abs(res1))) final1<-com1+res1 temp2<-rep(0,num) temp2[(ddd+1):(iii+1)]<--1 res2<-temp2*newp res2<-scale(res2,center=F,scale=sum(abs(res2))) com2<-rep(0,num) com2[iii+2]<-1 final2<-com2+res2 }else{ temp1<-rep(0,num) temp1[1:iii]<-1 com1<-temp1*newp com1<-scale(com1,center=F,scale=sum(com1)) res1<-rep(0,num) res1[iii+1]<--1 final1<-com1+res1 final2<-rep(0,num) final2[iii+1]<--1 final2[iii+2]<-1 } } latent[num]<-1 uuu<-max(which(latent==0)) ss[ss %in% index[(uuu+1):num]]<-0 ss[ss %in% index[1:uuu]]<-1 results[[oo]]<-ss } } dpdensitycluster<-do.call(rbind,results) mu0=sapply(1:v, function(kk) return(mean(rstat[which(dpdensitycluster[kk,]==0)]))) mu1=sapply(1:v, function(kk) return(mean(rstat[which(dpdensitycluster[kk,]==1)]))) var0=sapply(1:v, function(kk) return(stats::var(rstat[which(dpdensitycluster[kk,]==0)]))) var1=sapply(1:v, function(kk) return(stats::var(rstat[which(dpdensitycluster[kk,]==1)]))) pro0=sapply(1:v, function(kk) return(mean(rstat[which(dpdensitycluster[kk,]==0)])/length(pvalue))) pro1=sapply(1:v, function(kk) return(mean(rstat[which(dpdensitycluster[kk,]==1)])/length(pvalue))) for (kk in 1:v){ if (is.na(var0[kk])){var0[kk]=var1[kk] }else if (is.na(var1[kk])){var1[kk]=var0[kk]} } DPdensityHODC=list() DPdensityHODC$mean$mu0=mu0 DPdensityHODC$mean$mu1=mu1 DPdensityHODC$variance$var0=var0 DPdensityHODC$variance$var1=var1 DPdensityHODC$probility$pro0=pro0 DPdensityHODC$probility$pro1=pro1 DPdensityHODC$classification=dpdensitycluster return(DPdensityHODC) }
#' printing robotstxt_text #' @param x character vector aka robotstxt$text to be printed #' @param ... goes down the sink #' @export print.robotstxt_text <- function(x, ...){ # rpint part of the robots.txt file cat("[robots.txt]\n--------------------------------------\n\n") tmp <- unlist(strsplit(x, "\n")) cat(tmp[seq_len(min(length(tmp), 50))], sep ="\n") cat("\n\n\n") if(length(tmp) > 50){ cat("[...]\n\n") } # print problems problems <- attr(x, "problems") if ( length(problems) > 0){ cat("[events]\n--------------------------------------\n\n") cat("requested: ", attr(x, "request")$request$url, "\n") cat("downloaded: ", attr(x, "request")$url, "\n\n") cat(utils::capture.output(print(problems)), sep="\n") cat("[attributes]\n--------------------------------------\n\n") cat(names(attributes(x)), sep=", ") } cat("\n") # return invisible(x) }
/R/print_robotstxt_text.R
permissive
ropensci/robotstxt
R
false
false
911
r
#' printing robotstxt_text #' @param x character vector aka robotstxt$text to be printed #' @param ... goes down the sink #' @export print.robotstxt_text <- function(x, ...){ # rpint part of the robots.txt file cat("[robots.txt]\n--------------------------------------\n\n") tmp <- unlist(strsplit(x, "\n")) cat(tmp[seq_len(min(length(tmp), 50))], sep ="\n") cat("\n\n\n") if(length(tmp) > 50){ cat("[...]\n\n") } # print problems problems <- attr(x, "problems") if ( length(problems) > 0){ cat("[events]\n--------------------------------------\n\n") cat("requested: ", attr(x, "request")$request$url, "\n") cat("downloaded: ", attr(x, "request")$url, "\n\n") cat(utils::capture.output(print(problems)), sep="\n") cat("[attributes]\n--------------------------------------\n\n") cat(names(attributes(x)), sep=", ") } cat("\n") # return invisible(x) }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/render.r \name{render_page} \alias{render_page} \alias{data_template} \title{Render page with template} \usage{ render_page(pkg = ".", name, data, path = "", depth = NULL, quiet = FALSE) data_template(pkg = ".", depth = 0L) } \arguments{ \item{pkg}{Path to package to document.} \item{name}{Name of the template (e.g. "home", "vignette", "news")} \item{data}{Data for the template. This is automatically supplemented with three lists: \itemize{ \item \code{site}: \code{title} and path to \code{root}. \item \code{yaml}: the \code{template} key from \verb{_pkgdown.yml}. \item \code{package}: package metadata including \code{name} and\code{version}. } See the full contents by running \code{\link[=data_template]{data_template()}}.} \item{path}{Location to create file; relative to destination directory. If \code{""} (the default), prints to standard out.} \item{depth}{Depth of path relative to base directory.} \item{quiet}{If \code{quiet}, will suppress output messages} } \description{ Each page is composed of four templates: "head", "header", "content", and "footer". Each of these templates is rendered using the \code{data}, and then assembled into an overall page using the "layout" template. }
/man/render_page.Rd
permissive
isabella232/pkgdown
R
false
true
1,292
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/render.r \name{render_page} \alias{render_page} \alias{data_template} \title{Render page with template} \usage{ render_page(pkg = ".", name, data, path = "", depth = NULL, quiet = FALSE) data_template(pkg = ".", depth = 0L) } \arguments{ \item{pkg}{Path to package to document.} \item{name}{Name of the template (e.g. "home", "vignette", "news")} \item{data}{Data for the template. This is automatically supplemented with three lists: \itemize{ \item \code{site}: \code{title} and path to \code{root}. \item \code{yaml}: the \code{template} key from \verb{_pkgdown.yml}. \item \code{package}: package metadata including \code{name} and\code{version}. } See the full contents by running \code{\link[=data_template]{data_template()}}.} \item{path}{Location to create file; relative to destination directory. If \code{""} (the default), prints to standard out.} \item{depth}{Depth of path relative to base directory.} \item{quiet}{If \code{quiet}, will suppress output messages} } \description{ Each page is composed of four templates: "head", "header", "content", and "footer". Each of these templates is rendered using the \code{data}, and then assembled into an overall page using the "layout" template. }
install.packages('weatherData') library(weatherData) # ejemplo temps = getWeatherForDate("SFO", "2014-05-05") train <- read.csv("train.csv") current_date = "" current_temp = 0 z = "" #descarga la info si no habia sido descargada ya train$Weather <- sapply(train$Dates, FUN=function(h) { h <- substr(h, 1, 10) if (h != current_date) { assign("current_date", h, envir = .GlobalEnv) z <- getWeatherForDate("SFO", current_date)[1,3] assign("current_temp", z, envir = .GlobalEnv) return (z) } return (current_temp) }) #saca las temps de su lista train$Weather <- sapply(train$Weather, FUN=function(h) {as.numeric(unlist(h))}) #transforma los errores en "NA" OJO los errores son prqeu no se pudo conectar, no porque el dato no estuviera! train$Weather <- sapply(train$Weather, FUN=function(h) { if (length(h)<=0) { return ("NA") } return (h) }) write.csv(train, file = "trainweather.csv", quote =c(3,6), row.names=FALSE) #quote es para que le ponga comillas solo a las cols 3 y 6 son desc y resultdo
/agregar temp.R
no_license
jgalloni/tpDatos
R
false
false
1,073
r
install.packages('weatherData') library(weatherData) # ejemplo temps = getWeatherForDate("SFO", "2014-05-05") train <- read.csv("train.csv") current_date = "" current_temp = 0 z = "" #descarga la info si no habia sido descargada ya train$Weather <- sapply(train$Dates, FUN=function(h) { h <- substr(h, 1, 10) if (h != current_date) { assign("current_date", h, envir = .GlobalEnv) z <- getWeatherForDate("SFO", current_date)[1,3] assign("current_temp", z, envir = .GlobalEnv) return (z) } return (current_temp) }) #saca las temps de su lista train$Weather <- sapply(train$Weather, FUN=function(h) {as.numeric(unlist(h))}) #transforma los errores en "NA" OJO los errores son prqeu no se pudo conectar, no porque el dato no estuviera! train$Weather <- sapply(train$Weather, FUN=function(h) { if (length(h)<=0) { return ("NA") } return (h) }) write.csv(train, file = "trainweather.csv", quote =c(3,6), row.names=FALSE) #quote es para que le ponga comillas solo a las cols 3 y 6 son desc y resultdo
library(markovchain) ### Name: name<- ### Title: Method to set name of markovchain object ### Aliases: name<- name<-,markovchain-method ### ** Examples statesNames <- c("a", "b", "c") markovB <- new("markovchain", states = statesNames, transitionMatrix = matrix(c(0.2, 0.5, 0.3, 0, 1, 0, 0.1, 0.8, 0.1), nrow = 3, byrow = TRUE, dimnames=list(statesNames,statesNames)), name = "A markovchain Object" ) name(markovB) <- "dangerous mc"
/data/genthat_extracted_code/markovchain/examples/setName.Rd.R
no_license
surayaaramli/typeRrh
R
false
false
489
r
library(markovchain) ### Name: name<- ### Title: Method to set name of markovchain object ### Aliases: name<- name<-,markovchain-method ### ** Examples statesNames <- c("a", "b", "c") markovB <- new("markovchain", states = statesNames, transitionMatrix = matrix(c(0.2, 0.5, 0.3, 0, 1, 0, 0.1, 0.8, 0.1), nrow = 3, byrow = TRUE, dimnames=list(statesNames,statesNames)), name = "A markovchain Object" ) name(markovB) <- "dangerous mc"
library(dplyr) library(tidyr) library(ggplot2) library(lubridate) library(rriskDistributions) library(stringr) library(greta) library(greta.gp) source("./functions_auxiliary.R") source("./functions_analysis_subreporting.R") source("./bayesian_subreporting_fitting.R") minDate <- "2020-10-01" maxDate <- "2021-01-30" ageBins <- c("0-4", "5-9", "10-14", "15-19", "20-24", "25-29", "30-34", "35-39", "40-44", "45-49", "50-54", "55-59", "60-64", "65-69", "70-74", "75-79", "80-84", "85-89", "90+") outcomeDf <- read.csv("../data/public_uruguay_data/processedData.csv", stringsAsFactors=FALSE) %>% dplyr::mutate(., dateSevere=lubridate::date(dateSevere), dateCritical=lubridate::date(dateCritical), dateSymptoms=lubridate::date(dateSymptoms)) %>% caseDetails_2_dynamics(., ageBins=ageBins, minDate=minDate, maxDate=maxDate) # remove first 10 days of deaths, which are from patients from # previous of this period outcomeDf$deaths[c(1:8)] <- 0 outcomeDf$critical[c(1:4)] <- 0 outcomeDf$severe[c(1:3)] <- 0 # parameters for fitting deathMean <- 0.68 deathIC <- c(0.51, 0.90) criticalMean <- 1.28 criticalIC <- c(0.96, 1.7) severeMean <- 3.3 severeIC <- c(2.6, 4.3) onset2Deathquartiles <- c(8, 18, 24) # datos para ajustar curva de delay onset2ICUquartiles <- c(5, 9, 11) # datos para ajustar curva de delay onset2Hospquartiles <- c(2, 4, 8) # datos para ajustar curva de delay # Fit to deaths delay_fun_death <- onset2Outcome(onset2Deathquartiles) deathDf <- dplyr::mutate(outcomeDf, newOutcome=deaths, newCases=casesTot) fittingData_death <- get_fitting_data(deathDf, delay_fun_death, baselineOutcomeProp=deathMean) predictionDeaths <- run_bayesian_model(fittingData_death, percentageOutcome=deathMean, percentageOutcomeRange=deathIC) write.csv(predictionDeaths, "../results/2_estimate_subreporting_deaths.csv", row.names=FALSE) # Fit to critical cases delay_fun_crit <- onset2Outcome(onset2ICUquartiles) criticalDf <- dplyr::mutate(outcomeDf, newOutcome=critical, newCases=casesTot) # fittingData_crit <- get_fitting_data(criticalDf, delay_fun_crit, baselineOutcomeProp=criticalMean) predictionCritical <- run_bayesian_model(fittingData_crit, percentageOutcome=criticalMean, percentageOutcomeRange=criticalIC) write.csv(predictionCritical, "../results/2_estimate_subreporting_critical.csv", row.names=FALSE) # Fit to severe cases delay_fun_hosp <- onset2Outcome(onset2Hospquartiles) severeDf <- dplyr::mutate(outcomeDf, newOutcome=severe, newCases=casesTot) # fittingData_severe <- get_fitting_data(severeDf, delay_fun_hosp, baselineOutcomeProp=severeMean) predictionSevere <- run_bayesian_model(fittingData_severe, percentageOutcome=severeMean, percentageOutcomeRange=severeIC) write.csv(predictionSevere, "../results/2_estimate_subreporting_severe.csv", row.names=FALSE)
/age_stratified_subreporting/code/2_estimate_subreporting.R
no_license
dherrera1911/age_stratified_COVID_subreporting
R
false
false
3,193
r
library(dplyr) library(tidyr) library(ggplot2) library(lubridate) library(rriskDistributions) library(stringr) library(greta) library(greta.gp) source("./functions_auxiliary.R") source("./functions_analysis_subreporting.R") source("./bayesian_subreporting_fitting.R") minDate <- "2020-10-01" maxDate <- "2021-01-30" ageBins <- c("0-4", "5-9", "10-14", "15-19", "20-24", "25-29", "30-34", "35-39", "40-44", "45-49", "50-54", "55-59", "60-64", "65-69", "70-74", "75-79", "80-84", "85-89", "90+") outcomeDf <- read.csv("../data/public_uruguay_data/processedData.csv", stringsAsFactors=FALSE) %>% dplyr::mutate(., dateSevere=lubridate::date(dateSevere), dateCritical=lubridate::date(dateCritical), dateSymptoms=lubridate::date(dateSymptoms)) %>% caseDetails_2_dynamics(., ageBins=ageBins, minDate=minDate, maxDate=maxDate) # remove first 10 days of deaths, which are from patients from # previous of this period outcomeDf$deaths[c(1:8)] <- 0 outcomeDf$critical[c(1:4)] <- 0 outcomeDf$severe[c(1:3)] <- 0 # parameters for fitting deathMean <- 0.68 deathIC <- c(0.51, 0.90) criticalMean <- 1.28 criticalIC <- c(0.96, 1.7) severeMean <- 3.3 severeIC <- c(2.6, 4.3) onset2Deathquartiles <- c(8, 18, 24) # datos para ajustar curva de delay onset2ICUquartiles <- c(5, 9, 11) # datos para ajustar curva de delay onset2Hospquartiles <- c(2, 4, 8) # datos para ajustar curva de delay # Fit to deaths delay_fun_death <- onset2Outcome(onset2Deathquartiles) deathDf <- dplyr::mutate(outcomeDf, newOutcome=deaths, newCases=casesTot) fittingData_death <- get_fitting_data(deathDf, delay_fun_death, baselineOutcomeProp=deathMean) predictionDeaths <- run_bayesian_model(fittingData_death, percentageOutcome=deathMean, percentageOutcomeRange=deathIC) write.csv(predictionDeaths, "../results/2_estimate_subreporting_deaths.csv", row.names=FALSE) # Fit to critical cases delay_fun_crit <- onset2Outcome(onset2ICUquartiles) criticalDf <- dplyr::mutate(outcomeDf, newOutcome=critical, newCases=casesTot) # fittingData_crit <- get_fitting_data(criticalDf, delay_fun_crit, baselineOutcomeProp=criticalMean) predictionCritical <- run_bayesian_model(fittingData_crit, percentageOutcome=criticalMean, percentageOutcomeRange=criticalIC) write.csv(predictionCritical, "../results/2_estimate_subreporting_critical.csv", row.names=FALSE) # Fit to severe cases delay_fun_hosp <- onset2Outcome(onset2Hospquartiles) severeDf <- dplyr::mutate(outcomeDf, newOutcome=severe, newCases=casesTot) # fittingData_severe <- get_fitting_data(severeDf, delay_fun_hosp, baselineOutcomeProp=severeMean) predictionSevere <- run_bayesian_model(fittingData_severe, percentageOutcome=severeMean, percentageOutcomeRange=severeIC) write.csv(predictionSevere, "../results/2_estimate_subreporting_severe.csv", row.names=FALSE)
# ui.r library(shiny) fluidPage( sidebarLayout( sidebarPanel( actionButton("sample", label = "Use Sample Data"), h3("Or use your own data"), fileInput("image", label = "Upload image file (.nii)"), fileInput("mask", label = "Upload Mask file (.nii)"), fileInput("xmat", label = "Upload X_matrix data (.csv)", accept = c('.csv')), # checkboxGroupInput("options", label = h3("Options"), # choices = list("Imputation" = 1, "Baseline Adjustment" = 2, "Covariates" = 3, "..." = 4), # selected = 0), radioButtons("group_var", "Select grouping variable", choices = c("please upload X_matrix data first")), actionButton("run", "Run!"), h3("Output"), # h5("Variables"), textOutput("var_names") ), mainPanel( h3("Imaging plots"), downloadButton("downloadPlot", label = "Save Image", class = NULL), plotOutput("plot1") ) ) )
/inst/shiny_apps/myapp/ui.r
permissive
seonjoo/neurorct
R
false
false
1,013
r
# ui.r library(shiny) fluidPage( sidebarLayout( sidebarPanel( actionButton("sample", label = "Use Sample Data"), h3("Or use your own data"), fileInput("image", label = "Upload image file (.nii)"), fileInput("mask", label = "Upload Mask file (.nii)"), fileInput("xmat", label = "Upload X_matrix data (.csv)", accept = c('.csv')), # checkboxGroupInput("options", label = h3("Options"), # choices = list("Imputation" = 1, "Baseline Adjustment" = 2, "Covariates" = 3, "..." = 4), # selected = 0), radioButtons("group_var", "Select grouping variable", choices = c("please upload X_matrix data first")), actionButton("run", "Run!"), h3("Output"), # h5("Variables"), textOutput("var_names") ), mainPanel( h3("Imaging plots"), downloadButton("downloadPlot", label = "Save Image", class = NULL), plotOutput("plot1") ) ) )
% Generated by roxygen2 (4.0.0): do not edit by hand \name{annotatedAlignment} \alias{annotatedAlignment} \title{Construct an \sQuote{annotatedAlignment}.} \usage{ annotatedAlignment(alnpath, annopath, type, ...) } \arguments{ \item{alnpath}{Path to a directory containing mercator segments, a single file in mfa (multi fasta) or maf (multiple alignment file) format containing the genome aligment.} \item{annopath}{Path to annotation files.} \item{type}{Type of annotation. Supported file types are \emph{gff}, \emph{ptt}, \emph{ftb} (NCBI feature table), \emph{genbank}/\emph{gbk}, and \emph{table} (custom tab-delimted annotation files, minimally containing \emph{start} and \emph{end} values).} \item{...}{Optionally \code{features} (default: CDS, RNA), \code{seqid} (sequence identifier, e.g. NCBI accession numbers), and \code{sep}.} } \description{ Construct an \sQuote{annotatedAlignment}. } \seealso{ \code{\link{mercator}}, \code{\link{alignSegments}}, \code{\linkS4class{annotatedAlignment}}. }
/man/annotatedAlignment.Rd
no_license
promodel/genoslideR
R
false
false
1,010
rd
% Generated by roxygen2 (4.0.0): do not edit by hand \name{annotatedAlignment} \alias{annotatedAlignment} \title{Construct an \sQuote{annotatedAlignment}.} \usage{ annotatedAlignment(alnpath, annopath, type, ...) } \arguments{ \item{alnpath}{Path to a directory containing mercator segments, a single file in mfa (multi fasta) or maf (multiple alignment file) format containing the genome aligment.} \item{annopath}{Path to annotation files.} \item{type}{Type of annotation. Supported file types are \emph{gff}, \emph{ptt}, \emph{ftb} (NCBI feature table), \emph{genbank}/\emph{gbk}, and \emph{table} (custom tab-delimted annotation files, minimally containing \emph{start} and \emph{end} values).} \item{...}{Optionally \code{features} (default: CDS, RNA), \code{seqid} (sequence identifier, e.g. NCBI accession numbers), and \code{sep}.} } \description{ Construct an \sQuote{annotatedAlignment}. } \seealso{ \code{\link{mercator}}, \code{\link{alignSegments}}, \code{\linkS4class{annotatedAlignment}}. }
Derv1 <- function(penden.env) { assign("Derv1.pen",matrix(colSums(get("tilde.PSI.d.D",penden.env)/kronecker(get("tilde.PSI.d.D",penden.env)%*%get("ck.val",penden.env), matrix(1,1,dim(get("tilde.PSI.d.D",penden.env))[2]))),get("DD",penden.env),1)-get("DDD.sum",penden.env)%*%get("ck.val",penden.env),penden.env) }
/R/Derv1.R
no_license
cran/pencopula
R
false
false
316
r
Derv1 <- function(penden.env) { assign("Derv1.pen",matrix(colSums(get("tilde.PSI.d.D",penden.env)/kronecker(get("tilde.PSI.d.D",penden.env)%*%get("ck.val",penden.env), matrix(1,1,dim(get("tilde.PSI.d.D",penden.env))[2]))),get("DD",penden.env),1)-get("DDD.sum",penden.env)%*%get("ck.val",penden.env),penden.env) }
############################################################ ######################## | Data Loading | ################## ############################################################ #Importing de csv downloaded from https://www.kaggle.com/sakshigoyal7/credit-card-customers. #It will also be provided in my GitHub repo(https://github.com/damtaba/EDX-Final-Project) along with the rest of your files data<- read.csv(file="BankChurners.csv") #There are two variables that were calculated later and are not part of the original base. We'll remove them data$Naive_Bayes_Classifier_Attrition_Flag_Card_Category_Contacts_Count_12_mon_Dependent_count_Education_Level_Months_Inactive_12_mon_1 <- NULL data$Naive_Bayes_Classifier_Attrition_Flag_Card_Category_Contacts_Count_12_mon_Dependent_count_Education_Level_Months_Inactive_12_mon_2 <- NULL str(data) ############################################################ ######################## | Packages | ###################### ############################################################ # Package names we'll need packages <- c("tidyverse", "rmarkdown", "fastDummies", "caret", "fastDummies", "knitr", "gmodels", "C50", "lubridate" ) # Install packages not yet installed installed_packages <- packages %in% rownames(installed.packages()) if (any(installed_packages == FALSE)) { install.packages(packages[!installed_packages]) } # Packages loading invisible(lapply(packages, library, character.only = TRUE)) ############################################################ ###################### | Data Analysis | ################### ############################################################ ########## Attrition_Flag data %>% group_by(Attrition_Flag) %>% summarize(n=n()) %>% mutate(Participation=n/sum(n)) %>% kable() ########## Customer_Age summary(data$Customer_Age) #Fist aproach data %>% ggplot(aes(x=Customer_Age)) + geom_bar() + ggtitle("Distribution of Customer's Ages") + xlab("Age") + ylab("n") # Graphically, we can anticipate that the age distribution resembles a normal distribution shapiro.test(sample(data$Customer_Age,5000)) #Shapiro-Wilk Test, a test ########## Gender #At first glance it does not seem that gender can be a good predictor; is almost evenly distributed CrossTable(data$Gender,data$Attrition_Flag,prop.chisq = FALSE) ########## Dependent_count summary(data$Customer_Age) CrossTable(data$Dependent_count,data$Attrition_Flag,prop.chisq = FALSE,prop.t = FALSE,prop.c = FALSE) ########## Education_Level data %>% group_by(Education_Level) %>% summarize(Q=n()) %>% mutate(porc=Q/sum(Q)) %>% arrange(desc(porc)) %>% ggplot(aes(x="",y=Q,fill=Education_Level)) + geom_bar(stat="identity", width=1)+ coord_polar("y", start=0)+ #Converting to pie chart theme_void #Cleaning the graph data %>% group_by(Education_Level) %>% summarize(Q=n()) %>% mutate(Participation=Q/sum(Q)) %>% arrange(desc(Participation)) %>% kable() ########## Marital_Status data %>% group_by(Marital_Status) %>% summarize(n=n()) %>% ungroup() %>% mutate(Participation=n/sum(n)) %>% arrange(desc(Participation)) %>% kable() data %>% group_by(Marital_Status,Attrition_Flag) %>% summarize(n=n()) %>% mutate(Participation=format(round(n/sum(n)*100), 2), nsmall = 2) %>% ggplot(aes(x=Marital_Status,y=Participation,fill=Attrition_Flag))+ geom_bar(stat = "identity")+ geom_text(aes(x=Marital_Status,y=Participation,label=Participation,vjust=-2))+ ggtitle("Percentage of participation of the type of client by Marital Status") ########## Income_Category data %>% group_by(Income_Category) %>% summarize(n=n()) %>% ungroup() %>% mutate(Participation=n/sum(n)) %>% arrange(desc(Participation)) %>% kable() data %>% group_by(Attrition_Flag,Income_Category) %>% summarize(Q=n()) %>% ungroup() %>% group_by(Income_Category) %>% mutate(Participation=Q/sum(Q)) %>% mutate(Participation=format(round(Participation*100,digits = 0),digits = 4)) %>% ggplot(aes(x=Income_Category, y = Participation, fill = Attrition_Flag))+ geom_bar(stat="identity")+ coord_flip()+ ggtitle("Percentage of Income_Category by Attrition_Flag")+ geom_text(aes(x=Income_Category, y = Participation,label=Participation),hjust=-0.25) ########## Card_Category data %>% group_by(Card_Category,Attrition_Flag) %>% summarize(Q=n()) %>% mutate(Participation=Q/sum(Q)) %>% mutate(Participation=format(round(Participation,digits = 4),digits = 4)) %>% ggplot(aes(x=Card_Category,y=Participation,fill=Attrition_Flag))+ geom_col()+ ggtitle("Card_Category by Attrition_Flag")+ geom_text(aes(x=Card_Category, y = Participation,label=Participation,vjust=-0.45))+ facet_wrap(~ Card_Category) data %>% group_by(Card_Category) %>% summarize(Q=n()) %>% mutate(Participation=Q/sum(Q)) %>% kable() ########## Months_on_book data %>% group_by(Months_on_book) %>% summarize(n=n()) %>% arrange(desc(Months_on_book)) %>% ggplot(aes(x=Months_on_book,y=n)) + geom_line()+ geom_col(alpha=0.5)+ ggtitle("Months on book") ########## Total_Relationship_Count data %>% group_by(Total_Relationship_Count) %>% summarize(n=n()) %>% ggplot(aes(Total_Relationship_Count,n))+ geom_point()+ geom_line()+ ggtitle("Total Relationship Count") ########## Months_Inactive_12_mon data %>% group_by(Months_Inactive_12_mon) %>% summarize(n=n()) %>% kable() data %>% group_by(Months_Inactive_12_mon,Attrition_Flag) %>% summarize(n=n()) %>% ggplot(aes(Months_Inactive_12_mon,n,fill=Attrition_Flag))+ geom_col(position="fill")+ geom_line() ########## Contacts_Count_12_mon data %>% group_by(Contacts_Count_12_mon) %>% summarize(n=n()) %>% mutate(Participation=n/sum(n)) %>% arrange(Contacts_Count_12_mon) ########## Credit_Limit data %>% group_by(Attrition_Flag,Credit_Limit) %>% summarize(n=n()) %>% ggplot(aes(x=Credit_Limit,y=n,fill=Attrition_Flag)) + geom_col(width = 2500)+ ylim(0,25)+ ggtitle("Composition of Credit_Limit by Attrition_Flag") ########## Total_Revolving_Bal summary(data$Total_Revolving_Bal) data %>% group_by(Total_Revolving_Bal) %>% summarize(n=n()) %>% mutate(porc=n/sum(n)) %>% arrange(desc(porc)) data %>% group_by(Total_Revolving_Bal) %>% summarize(n=n()) %>% ggplot(aes(x=Total_Revolving_Bal,y=n))+ geom_col(width = 100)+ ggtitle("Distribution of Customers by Total Revolving Limit") ########## Avg_Open_To_Buy data %>% group_by(Avg_Open_To_Buy) %>% summarize(n=n()) %>% mutate(porc=n/sum(n)) %>% ggplot(aes(Avg_Open_To_Buy,n)) + geom_col(width = 1000)+ ggtitle("Average Open to Buy") ########## Total_Amt_Chng_Q4_Q1 data %>% group_by(Total_Amt_Chng_Q4_Q1) %>% summarize(n=n()) %>% mutate(porc=n/sum(n)) %>% ggplot(aes(Total_Amt_Chng_Q4_Q1,n))+ geom_col(width = 0.1)+ ggtitle("Total_Amt_Chng_Q4_Q1") ########## Total_Trans_Amt data %>% group_by(Total_Trans_Amt,Attrition_Flag) %>% summarize(Q=n()) %>% mutate(porc=Q/sum(Q)) %>% arrange(desc(porc)) %>% ggplot(aes(Total_Trans_Amt,Q,fill=Attrition_Flag))+ geom_col(width = 500)+ ggtitle("Total_Trans_Amt by Attrition_Flag") ########## Total_Trans_Ct data %>% group_by(Total_Trans_Ct,Attrition_Flag) %>% summarize(Q=n()) %>% mutate(porc=Q/sum(Q)) %>% arrange(desc(porc)) %>% ggplot(aes(Total_Trans_Ct,Q,fill=Attrition_Flag))+ geom_col()+ ggtitle("Total_Trans_Ct by Attrition_Flag") ########## Total_Ct_Chng_Q4_Q1 data %>% group_by(Total_Ct_Chng_Q4_Q1,Attrition_Flag) %>% summarize(Q=n()) %>% mutate(porc=Q/sum(Q)) %>% arrange(desc(porc)) %>% ggplot(aes(Total_Ct_Chng_Q4_Q1,Q,fill=Attrition_Flag))+ geom_col()+ ylim(0,100)+ xlim(0,2)+ ggtitle("Total_Ct_Chng_Q4_Q1 by Attrition_Flag") ########## Avg_Utilization_Ratio data %>% group_by(Avg_Utilization_Ratio,Attrition_Flag) %>% summarize(Q=n()) %>% mutate(porc=Q/sum(Q)) %>% arrange(desc(porc)) %>% ggplot(aes(Avg_Utilization_Ratio,Q,fill=Attrition_Flag))+ geom_col(width = 0.2)+ ggtitle("Distribution of Avg_Utilization_Ratio by Attrition_Flag") ############################################################ ############# | Pre-processing the dataset | ############## ############################################################ #Making the dummy's features data_flags <- dummy_cols(data,remove_selected_columns = TRUE,select_columns = c("Gender","Education_Level","Marital_Status","Income_Category","Card_Category")) #Factor variables that are mutually exclusive can be separated into n-1 dummy variables. #This means, if it is not any of the above, it is by default the one that remains, being one of the dispensable #dummys variables data_flags <- data_flags %>% mutate(Churn_Customers= ifelse(Attrition_Flag =="Attrited Customer",1,0)) %>% select(-Attrition_Flag, -Gender_M, -Education_Level_Doctorate, -Marital_Status_Unknown, -`Income_Category_$120K +`, -Card_Category_Platinum, -CLIENTNUM #Does not have any information apart fromthe ID ) str(data_flags) #### Correlations correlationMatrix <- cor(data_flags[,1:32],method="pearson") highlyCorrelatedNames <- findCorrelation(correlationMatrix, cutoff=0.5,names=TRUE) highlyCorrelated <- findCorrelation(correlationMatrix, cutoff=0.5,names=FALSE) data_model <- data_flags[,-highlyCorrelated] kable(highlyCorrelatedNames,col.names = "This are the variables highly correlated") str(data_flags) ############################################################ ####################### | Models | ######################### ############################################################ #Using a 50/50 division of the data index_test <- createDataPartition(data_model$Churn_Customers,p=0.5,list=FALSE) test_set <- data_flags[index_test,] train_set <- data_flags[-index_test,] ################### Knn #Creating the function that normalizes features normalize <- function(x){ return((x-min(x))/(max(x)-min(x)))} #Creating both datasets with the new scale train_features_n <- as.data.frame(lapply(train_set[1:32],normalize)) test_features_n <- as.data.frame(lapply(test_set[1:32],normalize)) #Making the model model_knn_n <- train(train_features_n,as.factor(train_set[,33]),method = "knn") #Using this model to predict y_hat_knn_n_model <- predict(model_knn_n,test_features_n) #Checking the proportion of right cases Mean_knn_n <- mean(test_set$Churn_Customers==y_hat_knn_n_model) #Creating a table to fill it with the results Results_table <- data.frame(Model=character(),Mean=numeric(),stringsAsFactors = FALSE) #Creating the list for this model Results_knn_n <- list("Knn normalized",Mean_knn_n) # Adding this model's results to the table Results_table[1,] <- Results_knn_n Results_knn_n ###z-score # Scaling the data train_features_s <- as.data.frame(scale(train_set[,1:32])) test_features_s <- as.data.frame(scale(test_set[,1:32])) #Making the model model_knn_s <- train(train_features_s[,1:32],as.factor(train_set[,33]),method = "knn") #Using the model to predict y_hat_knn_s_model <- predict(model_knn_s,test_features_s) #Checking the proportion of right cases Mean_kss_s <- mean(test_set$Churn_Customers==y_hat_knn_s_model) #Creating the list for this model Results_knn_s <- list("Knn with scale z-score",Mean_kss_s) # Adding this model's results to the table Results_table[2,] <-Results_knn_s Results_knn_s ###### Decision Tree C5.0 # Knn needed numerical features. Now that that is not needed, some of them should be factors #Transforming into factorial some features in the train set train_set_C5.0 <- train_set %>% mutate( Gender_F = as.factor(Gender_F), Education_Level_College= as.factor(Education_Level_College), Education_Level_Graduate=as.factor(Education_Level_Graduate), Education_Level_High_school = as.factor(`Education_Level_High School`), Education_Level_Post_Graduate = as.factor(`Education_Level_Post-Graduate`), Education_Level_Uneducated = as.factor(Education_Level_Uneducated), Education_Level_Unknown=as.factor(Education_Level_Unknown), Marital_Status_Divorced=as.factor(Marital_Status_Divorced), Marital_Status_Married = as.factor(Marital_Status_Married), Marital_Status_Single=as.factor(Marital_Status_Single), Income_Category_40K_60K= as.factor(`Income_Category_$40K - $60K`), Income_Category_60K_80K = as.factor(`Income_Category_$60K - $80K`), Income_Category_80K_120K=as.factor(`Income_Category_$80K - $120K`), Income_Category_Less_than_40K = as.factor(`Income_Category_Less than $40K`), Income_Category_Unknown = as.factor(Income_Category_Unknown), Card_Category_Blue = as.factor(Card_Category_Blue), Card_Category_Gold = as.factor(Card_Category_Gold), Card_Category_Silver = as.factor(Card_Category_Silver), Churn_Customers = as.factor(Churn_Customers) ) %>% select( -`Education_Level_High School`, -`Education_Level_Post-Graduate`, -`Income_Category_$40K - $60K`, -`Income_Category_$60K - $80K`, -`Income_Category_$80K - $120K`, -`Income_Category_Less than $40K` ) #Transforming into factorial some features in the test set test_set_C5.0 <- test_set %>% mutate( Gender_F = as.factor(Gender_F), Education_Level_College= as.factor(Education_Level_College), Education_Level_Graduate=as.factor(Education_Level_Graduate), Education_Level_High_school = as.factor(`Education_Level_High School`), Education_Level_Post_Graduate = as.factor(`Education_Level_Post-Graduate`), Education_Level_Uneducated = as.factor(Education_Level_Uneducated), Education_Level_Unknown=as.factor(Education_Level_Unknown), Marital_Status_Divorced=as.factor(Marital_Status_Divorced), Marital_Status_Married = as.factor(Marital_Status_Married), Marital_Status_Single=as.factor(Marital_Status_Single), Income_Category_40K_60K= as.factor(`Income_Category_$40K - $60K`), Income_Category_60K_80K = as.factor(`Income_Category_$60K - $80K`), Income_Category_80K_120K=as.factor(`Income_Category_$80K - $120K`), Income_Category_Less_than_40K = as.factor(`Income_Category_Less than $40K`), Income_Category_Unknown = as.factor(Income_Category_Unknown), Card_Category_Blue = as.factor(Card_Category_Blue), Card_Category_Gold = as.factor(Card_Category_Gold), Card_Category_Silver = as.factor(Card_Category_Silver), Churn_Customers = as.factor(Churn_Customers) ) %>% select( -`Education_Level_High School`, -`Education_Level_Post-Graduate`, -`Income_Category_$40K - $60K`, -`Income_Category_$60K - $80K`, -`Income_Category_$80K - $120K`, -`Income_Category_Less than $40K` ) #Making the model model_c5o <- C5.0(Churn_Customers ~.,data=train_set_C5.0) #Using the model to predict y_hat_c50 <- predict(model_c5o,test_set_C5.0) # Summary of the model summ_c50 <- summary(model_c5o) #Checking the proportion of right cases mean_c50 <- mean(y_hat_c50==test_set_C5.0$Churn_Customers) #Creating the list for this model Results_c50 <- list("C5.0 Decision Tree",mean_c50) # Adding this model's results to the table Results_table[3,] <- Results_c50 Results_c50 # Boosting #Making the model model_c5o_boosted <- C5.0(Churn_Customers ~.,data=train_set_C5.0,trials = 20) #Using the model to predict y_hat_c50_boosted <- predict(model_c5o_boosted,test_set_C5.0) # Summary of the model summ_c50_boosted <- summary(model_c5o_boosted) #Checking the proportion of right cases mean_c50_boosted <- mean(y_hat_c50_boosted==test_set_C5.0$Churn_Customers) #Creating the list for this model Results_c50_boosted <- list("C5.0 Decision Tree Boosted",mean_c50_boosted) # Adding this model's results to the table Results_table[4,] <- Results_c50_boosted Results_c50_boosted ############################################################ ####################### | Results | ######################### ############################################################ Results_table %>% mutate("Correct predictions" = round(Results_table$Mean,digits = 4)) %>% select(-Mean) %>% kable()
/R Script.R
no_license
damtaba/EDX-Final-Project
R
false
false
16,578
r
############################################################ ######################## | Data Loading | ################## ############################################################ #Importing de csv downloaded from https://www.kaggle.com/sakshigoyal7/credit-card-customers. #It will also be provided in my GitHub repo(https://github.com/damtaba/EDX-Final-Project) along with the rest of your files data<- read.csv(file="BankChurners.csv") #There are two variables that were calculated later and are not part of the original base. We'll remove them data$Naive_Bayes_Classifier_Attrition_Flag_Card_Category_Contacts_Count_12_mon_Dependent_count_Education_Level_Months_Inactive_12_mon_1 <- NULL data$Naive_Bayes_Classifier_Attrition_Flag_Card_Category_Contacts_Count_12_mon_Dependent_count_Education_Level_Months_Inactive_12_mon_2 <- NULL str(data) ############################################################ ######################## | Packages | ###################### ############################################################ # Package names we'll need packages <- c("tidyverse", "rmarkdown", "fastDummies", "caret", "fastDummies", "knitr", "gmodels", "C50", "lubridate" ) # Install packages not yet installed installed_packages <- packages %in% rownames(installed.packages()) if (any(installed_packages == FALSE)) { install.packages(packages[!installed_packages]) } # Packages loading invisible(lapply(packages, library, character.only = TRUE)) ############################################################ ###################### | Data Analysis | ################### ############################################################ ########## Attrition_Flag data %>% group_by(Attrition_Flag) %>% summarize(n=n()) %>% mutate(Participation=n/sum(n)) %>% kable() ########## Customer_Age summary(data$Customer_Age) #Fist aproach data %>% ggplot(aes(x=Customer_Age)) + geom_bar() + ggtitle("Distribution of Customer's Ages") + xlab("Age") + ylab("n") # Graphically, we can anticipate that the age distribution resembles a normal distribution shapiro.test(sample(data$Customer_Age,5000)) #Shapiro-Wilk Test, a test ########## Gender #At first glance it does not seem that gender can be a good predictor; is almost evenly distributed CrossTable(data$Gender,data$Attrition_Flag,prop.chisq = FALSE) ########## Dependent_count summary(data$Customer_Age) CrossTable(data$Dependent_count,data$Attrition_Flag,prop.chisq = FALSE,prop.t = FALSE,prop.c = FALSE) ########## Education_Level data %>% group_by(Education_Level) %>% summarize(Q=n()) %>% mutate(porc=Q/sum(Q)) %>% arrange(desc(porc)) %>% ggplot(aes(x="",y=Q,fill=Education_Level)) + geom_bar(stat="identity", width=1)+ coord_polar("y", start=0)+ #Converting to pie chart theme_void #Cleaning the graph data %>% group_by(Education_Level) %>% summarize(Q=n()) %>% mutate(Participation=Q/sum(Q)) %>% arrange(desc(Participation)) %>% kable() ########## Marital_Status data %>% group_by(Marital_Status) %>% summarize(n=n()) %>% ungroup() %>% mutate(Participation=n/sum(n)) %>% arrange(desc(Participation)) %>% kable() data %>% group_by(Marital_Status,Attrition_Flag) %>% summarize(n=n()) %>% mutate(Participation=format(round(n/sum(n)*100), 2), nsmall = 2) %>% ggplot(aes(x=Marital_Status,y=Participation,fill=Attrition_Flag))+ geom_bar(stat = "identity")+ geom_text(aes(x=Marital_Status,y=Participation,label=Participation,vjust=-2))+ ggtitle("Percentage of participation of the type of client by Marital Status") ########## Income_Category data %>% group_by(Income_Category) %>% summarize(n=n()) %>% ungroup() %>% mutate(Participation=n/sum(n)) %>% arrange(desc(Participation)) %>% kable() data %>% group_by(Attrition_Flag,Income_Category) %>% summarize(Q=n()) %>% ungroup() %>% group_by(Income_Category) %>% mutate(Participation=Q/sum(Q)) %>% mutate(Participation=format(round(Participation*100,digits = 0),digits = 4)) %>% ggplot(aes(x=Income_Category, y = Participation, fill = Attrition_Flag))+ geom_bar(stat="identity")+ coord_flip()+ ggtitle("Percentage of Income_Category by Attrition_Flag")+ geom_text(aes(x=Income_Category, y = Participation,label=Participation),hjust=-0.25) ########## Card_Category data %>% group_by(Card_Category,Attrition_Flag) %>% summarize(Q=n()) %>% mutate(Participation=Q/sum(Q)) %>% mutate(Participation=format(round(Participation,digits = 4),digits = 4)) %>% ggplot(aes(x=Card_Category,y=Participation,fill=Attrition_Flag))+ geom_col()+ ggtitle("Card_Category by Attrition_Flag")+ geom_text(aes(x=Card_Category, y = Participation,label=Participation,vjust=-0.45))+ facet_wrap(~ Card_Category) data %>% group_by(Card_Category) %>% summarize(Q=n()) %>% mutate(Participation=Q/sum(Q)) %>% kable() ########## Months_on_book data %>% group_by(Months_on_book) %>% summarize(n=n()) %>% arrange(desc(Months_on_book)) %>% ggplot(aes(x=Months_on_book,y=n)) + geom_line()+ geom_col(alpha=0.5)+ ggtitle("Months on book") ########## Total_Relationship_Count data %>% group_by(Total_Relationship_Count) %>% summarize(n=n()) %>% ggplot(aes(Total_Relationship_Count,n))+ geom_point()+ geom_line()+ ggtitle("Total Relationship Count") ########## Months_Inactive_12_mon data %>% group_by(Months_Inactive_12_mon) %>% summarize(n=n()) %>% kable() data %>% group_by(Months_Inactive_12_mon,Attrition_Flag) %>% summarize(n=n()) %>% ggplot(aes(Months_Inactive_12_mon,n,fill=Attrition_Flag))+ geom_col(position="fill")+ geom_line() ########## Contacts_Count_12_mon data %>% group_by(Contacts_Count_12_mon) %>% summarize(n=n()) %>% mutate(Participation=n/sum(n)) %>% arrange(Contacts_Count_12_mon) ########## Credit_Limit data %>% group_by(Attrition_Flag,Credit_Limit) %>% summarize(n=n()) %>% ggplot(aes(x=Credit_Limit,y=n,fill=Attrition_Flag)) + geom_col(width = 2500)+ ylim(0,25)+ ggtitle("Composition of Credit_Limit by Attrition_Flag") ########## Total_Revolving_Bal summary(data$Total_Revolving_Bal) data %>% group_by(Total_Revolving_Bal) %>% summarize(n=n()) %>% mutate(porc=n/sum(n)) %>% arrange(desc(porc)) data %>% group_by(Total_Revolving_Bal) %>% summarize(n=n()) %>% ggplot(aes(x=Total_Revolving_Bal,y=n))+ geom_col(width = 100)+ ggtitle("Distribution of Customers by Total Revolving Limit") ########## Avg_Open_To_Buy data %>% group_by(Avg_Open_To_Buy) %>% summarize(n=n()) %>% mutate(porc=n/sum(n)) %>% ggplot(aes(Avg_Open_To_Buy,n)) + geom_col(width = 1000)+ ggtitle("Average Open to Buy") ########## Total_Amt_Chng_Q4_Q1 data %>% group_by(Total_Amt_Chng_Q4_Q1) %>% summarize(n=n()) %>% mutate(porc=n/sum(n)) %>% ggplot(aes(Total_Amt_Chng_Q4_Q1,n))+ geom_col(width = 0.1)+ ggtitle("Total_Amt_Chng_Q4_Q1") ########## Total_Trans_Amt data %>% group_by(Total_Trans_Amt,Attrition_Flag) %>% summarize(Q=n()) %>% mutate(porc=Q/sum(Q)) %>% arrange(desc(porc)) %>% ggplot(aes(Total_Trans_Amt,Q,fill=Attrition_Flag))+ geom_col(width = 500)+ ggtitle("Total_Trans_Amt by Attrition_Flag") ########## Total_Trans_Ct data %>% group_by(Total_Trans_Ct,Attrition_Flag) %>% summarize(Q=n()) %>% mutate(porc=Q/sum(Q)) %>% arrange(desc(porc)) %>% ggplot(aes(Total_Trans_Ct,Q,fill=Attrition_Flag))+ geom_col()+ ggtitle("Total_Trans_Ct by Attrition_Flag") ########## Total_Ct_Chng_Q4_Q1 data %>% group_by(Total_Ct_Chng_Q4_Q1,Attrition_Flag) %>% summarize(Q=n()) %>% mutate(porc=Q/sum(Q)) %>% arrange(desc(porc)) %>% ggplot(aes(Total_Ct_Chng_Q4_Q1,Q,fill=Attrition_Flag))+ geom_col()+ ylim(0,100)+ xlim(0,2)+ ggtitle("Total_Ct_Chng_Q4_Q1 by Attrition_Flag") ########## Avg_Utilization_Ratio data %>% group_by(Avg_Utilization_Ratio,Attrition_Flag) %>% summarize(Q=n()) %>% mutate(porc=Q/sum(Q)) %>% arrange(desc(porc)) %>% ggplot(aes(Avg_Utilization_Ratio,Q,fill=Attrition_Flag))+ geom_col(width = 0.2)+ ggtitle("Distribution of Avg_Utilization_Ratio by Attrition_Flag") ############################################################ ############# | Pre-processing the dataset | ############## ############################################################ #Making the dummy's features data_flags <- dummy_cols(data,remove_selected_columns = TRUE,select_columns = c("Gender","Education_Level","Marital_Status","Income_Category","Card_Category")) #Factor variables that are mutually exclusive can be separated into n-1 dummy variables. #This means, if it is not any of the above, it is by default the one that remains, being one of the dispensable #dummys variables data_flags <- data_flags %>% mutate(Churn_Customers= ifelse(Attrition_Flag =="Attrited Customer",1,0)) %>% select(-Attrition_Flag, -Gender_M, -Education_Level_Doctorate, -Marital_Status_Unknown, -`Income_Category_$120K +`, -Card_Category_Platinum, -CLIENTNUM #Does not have any information apart fromthe ID ) str(data_flags) #### Correlations correlationMatrix <- cor(data_flags[,1:32],method="pearson") highlyCorrelatedNames <- findCorrelation(correlationMatrix, cutoff=0.5,names=TRUE) highlyCorrelated <- findCorrelation(correlationMatrix, cutoff=0.5,names=FALSE) data_model <- data_flags[,-highlyCorrelated] kable(highlyCorrelatedNames,col.names = "This are the variables highly correlated") str(data_flags) ############################################################ ####################### | Models | ######################### ############################################################ #Using a 50/50 division of the data index_test <- createDataPartition(data_model$Churn_Customers,p=0.5,list=FALSE) test_set <- data_flags[index_test,] train_set <- data_flags[-index_test,] ################### Knn #Creating the function that normalizes features normalize <- function(x){ return((x-min(x))/(max(x)-min(x)))} #Creating both datasets with the new scale train_features_n <- as.data.frame(lapply(train_set[1:32],normalize)) test_features_n <- as.data.frame(lapply(test_set[1:32],normalize)) #Making the model model_knn_n <- train(train_features_n,as.factor(train_set[,33]),method = "knn") #Using this model to predict y_hat_knn_n_model <- predict(model_knn_n,test_features_n) #Checking the proportion of right cases Mean_knn_n <- mean(test_set$Churn_Customers==y_hat_knn_n_model) #Creating a table to fill it with the results Results_table <- data.frame(Model=character(),Mean=numeric(),stringsAsFactors = FALSE) #Creating the list for this model Results_knn_n <- list("Knn normalized",Mean_knn_n) # Adding this model's results to the table Results_table[1,] <- Results_knn_n Results_knn_n ###z-score # Scaling the data train_features_s <- as.data.frame(scale(train_set[,1:32])) test_features_s <- as.data.frame(scale(test_set[,1:32])) #Making the model model_knn_s <- train(train_features_s[,1:32],as.factor(train_set[,33]),method = "knn") #Using the model to predict y_hat_knn_s_model <- predict(model_knn_s,test_features_s) #Checking the proportion of right cases Mean_kss_s <- mean(test_set$Churn_Customers==y_hat_knn_s_model) #Creating the list for this model Results_knn_s <- list("Knn with scale z-score",Mean_kss_s) # Adding this model's results to the table Results_table[2,] <-Results_knn_s Results_knn_s ###### Decision Tree C5.0 # Knn needed numerical features. Now that that is not needed, some of them should be factors #Transforming into factorial some features in the train set train_set_C5.0 <- train_set %>% mutate( Gender_F = as.factor(Gender_F), Education_Level_College= as.factor(Education_Level_College), Education_Level_Graduate=as.factor(Education_Level_Graduate), Education_Level_High_school = as.factor(`Education_Level_High School`), Education_Level_Post_Graduate = as.factor(`Education_Level_Post-Graduate`), Education_Level_Uneducated = as.factor(Education_Level_Uneducated), Education_Level_Unknown=as.factor(Education_Level_Unknown), Marital_Status_Divorced=as.factor(Marital_Status_Divorced), Marital_Status_Married = as.factor(Marital_Status_Married), Marital_Status_Single=as.factor(Marital_Status_Single), Income_Category_40K_60K= as.factor(`Income_Category_$40K - $60K`), Income_Category_60K_80K = as.factor(`Income_Category_$60K - $80K`), Income_Category_80K_120K=as.factor(`Income_Category_$80K - $120K`), Income_Category_Less_than_40K = as.factor(`Income_Category_Less than $40K`), Income_Category_Unknown = as.factor(Income_Category_Unknown), Card_Category_Blue = as.factor(Card_Category_Blue), Card_Category_Gold = as.factor(Card_Category_Gold), Card_Category_Silver = as.factor(Card_Category_Silver), Churn_Customers = as.factor(Churn_Customers) ) %>% select( -`Education_Level_High School`, -`Education_Level_Post-Graduate`, -`Income_Category_$40K - $60K`, -`Income_Category_$60K - $80K`, -`Income_Category_$80K - $120K`, -`Income_Category_Less than $40K` ) #Transforming into factorial some features in the test set test_set_C5.0 <- test_set %>% mutate( Gender_F = as.factor(Gender_F), Education_Level_College= as.factor(Education_Level_College), Education_Level_Graduate=as.factor(Education_Level_Graduate), Education_Level_High_school = as.factor(`Education_Level_High School`), Education_Level_Post_Graduate = as.factor(`Education_Level_Post-Graduate`), Education_Level_Uneducated = as.factor(Education_Level_Uneducated), Education_Level_Unknown=as.factor(Education_Level_Unknown), Marital_Status_Divorced=as.factor(Marital_Status_Divorced), Marital_Status_Married = as.factor(Marital_Status_Married), Marital_Status_Single=as.factor(Marital_Status_Single), Income_Category_40K_60K= as.factor(`Income_Category_$40K - $60K`), Income_Category_60K_80K = as.factor(`Income_Category_$60K - $80K`), Income_Category_80K_120K=as.factor(`Income_Category_$80K - $120K`), Income_Category_Less_than_40K = as.factor(`Income_Category_Less than $40K`), Income_Category_Unknown = as.factor(Income_Category_Unknown), Card_Category_Blue = as.factor(Card_Category_Blue), Card_Category_Gold = as.factor(Card_Category_Gold), Card_Category_Silver = as.factor(Card_Category_Silver), Churn_Customers = as.factor(Churn_Customers) ) %>% select( -`Education_Level_High School`, -`Education_Level_Post-Graduate`, -`Income_Category_$40K - $60K`, -`Income_Category_$60K - $80K`, -`Income_Category_$80K - $120K`, -`Income_Category_Less than $40K` ) #Making the model model_c5o <- C5.0(Churn_Customers ~.,data=train_set_C5.0) #Using the model to predict y_hat_c50 <- predict(model_c5o,test_set_C5.0) # Summary of the model summ_c50 <- summary(model_c5o) #Checking the proportion of right cases mean_c50 <- mean(y_hat_c50==test_set_C5.0$Churn_Customers) #Creating the list for this model Results_c50 <- list("C5.0 Decision Tree",mean_c50) # Adding this model's results to the table Results_table[3,] <- Results_c50 Results_c50 # Boosting #Making the model model_c5o_boosted <- C5.0(Churn_Customers ~.,data=train_set_C5.0,trials = 20) #Using the model to predict y_hat_c50_boosted <- predict(model_c5o_boosted,test_set_C5.0) # Summary of the model summ_c50_boosted <- summary(model_c5o_boosted) #Checking the proportion of right cases mean_c50_boosted <- mean(y_hat_c50_boosted==test_set_C5.0$Churn_Customers) #Creating the list for this model Results_c50_boosted <- list("C5.0 Decision Tree Boosted",mean_c50_boosted) # Adding this model's results to the table Results_table[4,] <- Results_c50_boosted Results_c50_boosted ############################################################ ####################### | Results | ######################### ############################################################ Results_table %>% mutate("Correct predictions" = round(Results_table$Mean,digits = 4)) %>% select(-Mean) %>% kable()
library(ggtree) nwk<- "/Users/ryannguyen/Desktop/Math_127/Phylogenetics/tree2.nwk" tree<- read.tree(nwk) ggtree(tree, color="firebrick", lbranch.length='none') + geom_tiplab() +geom_tippoint() ggsave("plswork.png")
/Phylogenetics/Make_tree.R
no_license
rynguyen2018/Math127
R
false
false
217
r
library(ggtree) nwk<- "/Users/ryannguyen/Desktop/Math_127/Phylogenetics/tree2.nwk" tree<- read.tree(nwk) ggtree(tree, color="firebrick", lbranch.length='none') + geom_tiplab() +geom_tippoint() ggsave("plswork.png")
library(radiomics) ### Name: discretizeImage ### Title: Image Discretization. ### Aliases: discretizeImage ### ** Examples image(psf) image(discretizeImage(psf, n_grey=5, verbose=F)) image(tumor) image(discretizeImage(tumor, n_grey=8, verbose=F))
/data/genthat_extracted_code/radiomics/examples/discretizeImage.Rd.R
no_license
surayaaramli/typeRrh
R
false
false
255
r
library(radiomics) ### Name: discretizeImage ### Title: Image Discretization. ### Aliases: discretizeImage ### ** Examples image(psf) image(discretizeImage(psf, n_grey=5, verbose=F)) image(tumor) image(discretizeImage(tumor, n_grey=8, verbose=F))
# data wrangling for eddy data # version 1.0 # alistair mcconnell seasons_13 = c(76, 141, 230, 281) seasons_14 = c(89, 129, 225, 268) seasons_15 = c(104, 147, 226, 275) Oesterlien_C_R_2013[Oesterlien_C_R_2013 == -10000] <- NA Oesterlien_C_R_2013[Oesterlien_C_R_2013 == -9999] <- NA Oesterlien_C_R_2013[Oesterlien_C_R_2013 < -1000] <- NA Oesterlien_C_R_2014[Oesterlien_C_R_2014 == -10000] <- NA Oesterlien_C_R_2014[Oesterlien_C_R_2014 == -9999] <- NA Oesterlien_C_R_2015[Oesterlien_C_R_2015 == -10000] <- NA Oesterlien_C_R_2015[Oesterlien_C_R_2015 == -9999] <- NA ### means daily13_mean <- aggregate(daily13, by = list(daily13$DoY), FUN = mean, na.rm = T) daily14_mean <- aggregate(daily14, by = list(daily14$DoY), FUN = mean, na.rm = T) daily15_mean <- aggregate(daily15, by = list(daily15$DoY), FUN = mean, na.rm = T) ### 2013 Oesterlien_C_R_2013$season <- "winter" Oesterlien_C_R_2013 <- transform(Oesterlien_C_R_2013, season = ifelse(DoY >= 76, "spring", season)) Oesterlien_C_R_2013 <- transform(Oesterlien_C_R_2013, season = ifelse(DoY >= 141, "summer", season)) Oesterlien_C_R_2013 <- transform(Oesterlien_C_R_2013, season = ifelse(DoY >= 230, "autumn", season)) Oesterlien_C_R_2013 <- transform(Oesterlien_C_R_2013, season = ifelse(DoY >= 281, "winter", season)) daily13_sum$season <- "winter" daily13_sum <- transform(daily13_sum, season = ifelse(DoY >= 76, "spring", season)) daily13_sum <- transform(daily13_sum, season = ifelse(DoY >= 141, "summer", season)) daily13_sum <- transform(daily13_sum, season = ifelse(DoY >= 230, "autumn", season)) daily13_sum <- transform(daily13_sum, season = ifelse(DoY >= 281, "winter", season)) CX13$GPP_f <- CX13$GPP_f * -1 CX13$season <- "winter" CX13 <- transform(CX13, season = ifelse(DoY >= 76, "spring", season)) CX13 <- transform(CX13, season = ifelse(DoY >= 141, "summer", season)) CX13 <- transform(CX13, season = ifelse(DoY >= 230, "autumn", season)) CX13 <- transform(CX13, season = ifelse(DoY >= 281, "winter", season)) x13$season <- "winter" x13 <- transform(x13, season = ifelse(DoY >= 76, "spring", season)) x13 <- transform(x13, season = ifelse(DoY >= 141, "summer", season)) x13 <- transform(x13, season = ifelse(DoY >= 230, "autumn", season)) x13 <- transform(x13, season = ifelse(DoY >= 281, "winter", season)) x14$season <- "winter" x14 <- transform(x14, season = ifelse(DoY >= 89, "spring", season)) x14 <- transform(x14, season = ifelse(DoY >= 129, "summer", season)) x14 <- transform(x14, season = ifelse(DoY >= 225, "autumn", season)) x14 <- transform(x14, season = ifelse(DoY >= 268, "winter", season)) daily14_sum$season <- "winter" daily14_sum <- transform(daily14_sum, season = ifelse(DoY >= 89, "spring", season)) daily14_sum <- transform(daily14_sum, season = ifelse(DoY >= 129, "summer", season)) daily14_sum <- transform(daily14_sum, season = ifelse(DoY >= 225, "autumn", season)) daily14_sum <- transform(daily14_sum, season = ifelse(DoY >= 268, "winter", season)) daily15_sum$season <- "winter" daily15_sum <- transform(daily15_sum, season = ifelse(DoY >= 104, "spring", season)) daily15_sum <- transform(daily15_sum, season = ifelse(DoY >= 147, "summer", season)) daily15_sum <- transform(daily15_sum, season = ifelse(DoY >= 226, "autumn", season)) daily15_sum <- transform(daily15_sum, season = ifelse(DoY >= 275, "winter", season)) x15$season <- "winter" x15 <- transform(x15, season = ifelse(DoY >= 104, "spring", season)) x15 <- transform(x15, season = ifelse(DoY >= 147, "summer", season)) x15 <- transform(x15, season = ifelse(DoY >= 226, "autumn", season)) x15 <- transform(x15, season = ifelse(DoY >= 275, "winter", season)) met13_daily$season <- "winter" met13_daily <- transform(met13_daily, season = ifelse(DoY >= 76, "spring", season)) met13_daily <- transform(met13_daily, season = ifelse(DoY >= 141, "summer", season)) met13_daily <- transform(met13_daily, season = ifelse(DoY >= 230, "autumn", season)) met13_daily <- transform(met13_daily, season = ifelse(DoY >= 281, "winter", season)) met13$season <- "winter" met13 <- transform(met13, season = ifelse(DoY >= 76, "spring", season)) met13 <- transform(met13, season = ifelse(DoY >= 141, "summer", season)) met13 <- transform(met13, season = ifelse(DoY >= 230, "autumn", season)) met13 <- transform(met13, season = ifelse(DoY >= 281, "winter", season)) daily13_mean$season <- "winter" daily13_mean <- transform(daily13_mean, season = ifelse(DoY >= 76, "spring", season)) daily13_mean <- transform(daily13_mean, season = ifelse(DoY >= 141, "summer", season)) daily13_mean <- transform(daily13_mean, season = ifelse(DoY >= 230, "autumn", season)) daily13_mean <- transform(daily13_mean, season = ifelse(DoY >= 281, "winter", season)) ### 2014 Oesterlien_C_R_2014$season <- "winter" Oesterlien_C_R_2014 <- transform(Oesterlien_C_R_2014, season = ifelse(DoY >= 89, "spring", season)) Oesterlien_C_R_2014 <- transform(Oesterlien_C_R_2014, season = ifelse(DoY >= 129, "summer", season)) Oesterlien_C_R_2014 <- transform(Oesterlien_C_R_2014, season = ifelse(DoY >= 225, "autumn", season)) Oesterlien_C_R_2014 <- transform(Oesterlien_C_R_2014, season = ifelse(DoY >= 268, "winter", season)) met14_daily$season <- "winter" met14_daily <- transform(met14_daily, season = ifelse(DoY >= 89, "spring", season)) met14_daily <- transform(met14_daily, season = ifelse(DoY >= 129, "summer", season)) met14_daily <- transform(met14_daily, season = ifelse(DoY >= 225, "autumn", season)) met14_daily <- transform(met14_daily, season = ifelse(DoY >= 268, "winter", season)) met14$season <- "winter" met14 <- transform(met14, season = ifelse(DoY >= 89, "spring", season)) met14 <- transform(met14, season = ifelse(DoY >= 129, "summer", season)) met14 <- transform(met14, season = ifelse(DoY >= 225, "autumn", season)) met14 <- transform(met14, season = ifelse(DoY >= 268, "winter", season)) daily14_mean$season <- "winter" daily14_mean <- transform(daily14_mean, season = ifelse(DoY >= 89, "spring", season)) daily14_mean <- transform(daily14_mean, season = ifelse(DoY >= 129, "summer", season)) daily14_mean <- transform(daily14_mean, season = ifelse(DoY >= 225, "autumn", season)) daily14_mean <- transform(daily14_mean, season = ifelse(DoY >= 268, "winter", season)) ### 2015 Oesterlien_C_R_2015$season <- "winter" Oesterlien_C_R_2015 <- transform(Oesterlien_C_R_2015, season = ifelse(DoY >= 104, "spring", season)) Oesterlien_C_R_2015 <- transform(Oesterlien_C_R_2015, season = ifelse(DoY >= 147, "summer", season)) Oesterlien_C_R_2015 <- transform(Oesterlien_C_R_2015, season = ifelse(DoY >= 226, "autumn", season)) Oesterlien_C_R_2015 <- transform(Oesterlien_C_R_2015, season = ifelse(DoY >= 275, "winter", season)) met15_daily$season <- "winter" met15_daily <- transform(met15_daily, season = ifelse(DoY >= 104, "spring", season)) met15_daily <- transform(met15_daily, season = ifelse(DoY >= 147, "summer", season)) met15_daily <- transform(met15_daily, season = ifelse(DoY >= 226, "autumn", season)) met15_daily <- transform(met15_daily, season = ifelse(DoY >= 275, "winter", season)) met15$season <- "winter" met15 <- transform(met15, season = ifelse(DoY >= 104, "spring", season)) met15 <- transform(met15, season = ifelse(DoY >= 147, "summer", season)) met15 <- transform(met15, season = ifelse(DoY >= 226, "autumn", season)) met15 <- transform(met15, season = ifelse(DoY >= 275, "winter", season)) daily15_mean$season <- "winter" daily15_mean <- transform(daily15_mean, season = ifelse(DoY >= 104, "spring", season)) daily15_mean <- transform(daily15_mean, season = ifelse(DoY >= 147, "summer", season)) daily15_mean <- transform(daily15_mean, season = ifelse(DoY >= 226, "autumn", season)) daily15_mean <- transform(daily15_mean, season = ifelse(DoY >= 275, "winter", season))
/data_wrangling_and_tidying.R
no_license
amccnnll/oesterlien
R
false
false
7,742
r
# data wrangling for eddy data # version 1.0 # alistair mcconnell seasons_13 = c(76, 141, 230, 281) seasons_14 = c(89, 129, 225, 268) seasons_15 = c(104, 147, 226, 275) Oesterlien_C_R_2013[Oesterlien_C_R_2013 == -10000] <- NA Oesterlien_C_R_2013[Oesterlien_C_R_2013 == -9999] <- NA Oesterlien_C_R_2013[Oesterlien_C_R_2013 < -1000] <- NA Oesterlien_C_R_2014[Oesterlien_C_R_2014 == -10000] <- NA Oesterlien_C_R_2014[Oesterlien_C_R_2014 == -9999] <- NA Oesterlien_C_R_2015[Oesterlien_C_R_2015 == -10000] <- NA Oesterlien_C_R_2015[Oesterlien_C_R_2015 == -9999] <- NA ### means daily13_mean <- aggregate(daily13, by = list(daily13$DoY), FUN = mean, na.rm = T) daily14_mean <- aggregate(daily14, by = list(daily14$DoY), FUN = mean, na.rm = T) daily15_mean <- aggregate(daily15, by = list(daily15$DoY), FUN = mean, na.rm = T) ### 2013 Oesterlien_C_R_2013$season <- "winter" Oesterlien_C_R_2013 <- transform(Oesterlien_C_R_2013, season = ifelse(DoY >= 76, "spring", season)) Oesterlien_C_R_2013 <- transform(Oesterlien_C_R_2013, season = ifelse(DoY >= 141, "summer", season)) Oesterlien_C_R_2013 <- transform(Oesterlien_C_R_2013, season = ifelse(DoY >= 230, "autumn", season)) Oesterlien_C_R_2013 <- transform(Oesterlien_C_R_2013, season = ifelse(DoY >= 281, "winter", season)) daily13_sum$season <- "winter" daily13_sum <- transform(daily13_sum, season = ifelse(DoY >= 76, "spring", season)) daily13_sum <- transform(daily13_sum, season = ifelse(DoY >= 141, "summer", season)) daily13_sum <- transform(daily13_sum, season = ifelse(DoY >= 230, "autumn", season)) daily13_sum <- transform(daily13_sum, season = ifelse(DoY >= 281, "winter", season)) CX13$GPP_f <- CX13$GPP_f * -1 CX13$season <- "winter" CX13 <- transform(CX13, season = ifelse(DoY >= 76, "spring", season)) CX13 <- transform(CX13, season = ifelse(DoY >= 141, "summer", season)) CX13 <- transform(CX13, season = ifelse(DoY >= 230, "autumn", season)) CX13 <- transform(CX13, season = ifelse(DoY >= 281, "winter", season)) x13$season <- "winter" x13 <- transform(x13, season = ifelse(DoY >= 76, "spring", season)) x13 <- transform(x13, season = ifelse(DoY >= 141, "summer", season)) x13 <- transform(x13, season = ifelse(DoY >= 230, "autumn", season)) x13 <- transform(x13, season = ifelse(DoY >= 281, "winter", season)) x14$season <- "winter" x14 <- transform(x14, season = ifelse(DoY >= 89, "spring", season)) x14 <- transform(x14, season = ifelse(DoY >= 129, "summer", season)) x14 <- transform(x14, season = ifelse(DoY >= 225, "autumn", season)) x14 <- transform(x14, season = ifelse(DoY >= 268, "winter", season)) daily14_sum$season <- "winter" daily14_sum <- transform(daily14_sum, season = ifelse(DoY >= 89, "spring", season)) daily14_sum <- transform(daily14_sum, season = ifelse(DoY >= 129, "summer", season)) daily14_sum <- transform(daily14_sum, season = ifelse(DoY >= 225, "autumn", season)) daily14_sum <- transform(daily14_sum, season = ifelse(DoY >= 268, "winter", season)) daily15_sum$season <- "winter" daily15_sum <- transform(daily15_sum, season = ifelse(DoY >= 104, "spring", season)) daily15_sum <- transform(daily15_sum, season = ifelse(DoY >= 147, "summer", season)) daily15_sum <- transform(daily15_sum, season = ifelse(DoY >= 226, "autumn", season)) daily15_sum <- transform(daily15_sum, season = ifelse(DoY >= 275, "winter", season)) x15$season <- "winter" x15 <- transform(x15, season = ifelse(DoY >= 104, "spring", season)) x15 <- transform(x15, season = ifelse(DoY >= 147, "summer", season)) x15 <- transform(x15, season = ifelse(DoY >= 226, "autumn", season)) x15 <- transform(x15, season = ifelse(DoY >= 275, "winter", season)) met13_daily$season <- "winter" met13_daily <- transform(met13_daily, season = ifelse(DoY >= 76, "spring", season)) met13_daily <- transform(met13_daily, season = ifelse(DoY >= 141, "summer", season)) met13_daily <- transform(met13_daily, season = ifelse(DoY >= 230, "autumn", season)) met13_daily <- transform(met13_daily, season = ifelse(DoY >= 281, "winter", season)) met13$season <- "winter" met13 <- transform(met13, season = ifelse(DoY >= 76, "spring", season)) met13 <- transform(met13, season = ifelse(DoY >= 141, "summer", season)) met13 <- transform(met13, season = ifelse(DoY >= 230, "autumn", season)) met13 <- transform(met13, season = ifelse(DoY >= 281, "winter", season)) daily13_mean$season <- "winter" daily13_mean <- transform(daily13_mean, season = ifelse(DoY >= 76, "spring", season)) daily13_mean <- transform(daily13_mean, season = ifelse(DoY >= 141, "summer", season)) daily13_mean <- transform(daily13_mean, season = ifelse(DoY >= 230, "autumn", season)) daily13_mean <- transform(daily13_mean, season = ifelse(DoY >= 281, "winter", season)) ### 2014 Oesterlien_C_R_2014$season <- "winter" Oesterlien_C_R_2014 <- transform(Oesterlien_C_R_2014, season = ifelse(DoY >= 89, "spring", season)) Oesterlien_C_R_2014 <- transform(Oesterlien_C_R_2014, season = ifelse(DoY >= 129, "summer", season)) Oesterlien_C_R_2014 <- transform(Oesterlien_C_R_2014, season = ifelse(DoY >= 225, "autumn", season)) Oesterlien_C_R_2014 <- transform(Oesterlien_C_R_2014, season = ifelse(DoY >= 268, "winter", season)) met14_daily$season <- "winter" met14_daily <- transform(met14_daily, season = ifelse(DoY >= 89, "spring", season)) met14_daily <- transform(met14_daily, season = ifelse(DoY >= 129, "summer", season)) met14_daily <- transform(met14_daily, season = ifelse(DoY >= 225, "autumn", season)) met14_daily <- transform(met14_daily, season = ifelse(DoY >= 268, "winter", season)) met14$season <- "winter" met14 <- transform(met14, season = ifelse(DoY >= 89, "spring", season)) met14 <- transform(met14, season = ifelse(DoY >= 129, "summer", season)) met14 <- transform(met14, season = ifelse(DoY >= 225, "autumn", season)) met14 <- transform(met14, season = ifelse(DoY >= 268, "winter", season)) daily14_mean$season <- "winter" daily14_mean <- transform(daily14_mean, season = ifelse(DoY >= 89, "spring", season)) daily14_mean <- transform(daily14_mean, season = ifelse(DoY >= 129, "summer", season)) daily14_mean <- transform(daily14_mean, season = ifelse(DoY >= 225, "autumn", season)) daily14_mean <- transform(daily14_mean, season = ifelse(DoY >= 268, "winter", season)) ### 2015 Oesterlien_C_R_2015$season <- "winter" Oesterlien_C_R_2015 <- transform(Oesterlien_C_R_2015, season = ifelse(DoY >= 104, "spring", season)) Oesterlien_C_R_2015 <- transform(Oesterlien_C_R_2015, season = ifelse(DoY >= 147, "summer", season)) Oesterlien_C_R_2015 <- transform(Oesterlien_C_R_2015, season = ifelse(DoY >= 226, "autumn", season)) Oesterlien_C_R_2015 <- transform(Oesterlien_C_R_2015, season = ifelse(DoY >= 275, "winter", season)) met15_daily$season <- "winter" met15_daily <- transform(met15_daily, season = ifelse(DoY >= 104, "spring", season)) met15_daily <- transform(met15_daily, season = ifelse(DoY >= 147, "summer", season)) met15_daily <- transform(met15_daily, season = ifelse(DoY >= 226, "autumn", season)) met15_daily <- transform(met15_daily, season = ifelse(DoY >= 275, "winter", season)) met15$season <- "winter" met15 <- transform(met15, season = ifelse(DoY >= 104, "spring", season)) met15 <- transform(met15, season = ifelse(DoY >= 147, "summer", season)) met15 <- transform(met15, season = ifelse(DoY >= 226, "autumn", season)) met15 <- transform(met15, season = ifelse(DoY >= 275, "winter", season)) daily15_mean$season <- "winter" daily15_mean <- transform(daily15_mean, season = ifelse(DoY >= 104, "spring", season)) daily15_mean <- transform(daily15_mean, season = ifelse(DoY >= 147, "summer", season)) daily15_mean <- transform(daily15_mean, season = ifelse(DoY >= 226, "autumn", season)) daily15_mean <- transform(daily15_mean, season = ifelse(DoY >= 275, "winter", season))
/pointsproblem.R
no_license
kjg612/EDAHomework
R
false
false
1,898
r
# =================================== # Compare spatial summaries across systems # Load all NHLD pointsummaries data # Luke Loken, Feb 2018 # =================================== rm(list = ls()) library(gtools) library(tidyr) library(dplyr) library(ggplot2) library(gridExtra) setwd("E:/Git_Repo/NHLDLakes") asinTransform <- function(p) { asin(sqrt(p)) } j <- readRDS(file='Data/FlameStatsLagosChemAllWide.rds') #Subset or select which columns to use... goodvars<-c("TempC", "SPCuScm", "fDOMRFU", "pH","TurbFNU", "ODOmgL", "CO2uM", "CH4uM", "ChlARFU", "BGAPCRFU") # goodvars<-c("TempC", "SPCuScm", "fDOMRFU", "pH","TurbFNU", "ODOmgL", "CO2uM", "CH4uM", "ChlARFU", "BGAPCRFU", "ChlAugL", "BGAPCgL") shortnames<-c("Temp", "SPC", "fDOM", "pH", "Turb", "DO", "CO2", "CH4", "ChlA", "BGA") goodvars_pixels<-paste(goodvars, 'pixels', sep='_') goodvars_points<-paste(goodvars, 'points', sep='_') CVstats <- c('CV', 'MADMOverMedian', 'QuartileDispersion', 'SDL', 'skewness', 'shape') CVstats_short<-c('CV','skewness') Tablestats<-c('MaxMinusMin', 'sd', 'mad', 'CV', 'MADMOverMedian', 'QuartileDispersion', 'SDL', 'skewness', 'shape') SemiVars <- c('TmpC_h', 'SPCScm_h', 'fDOMRFU_h', 'pH_h', 'TrbFNU_h', 'ODOmgL_h', 'CO2M_h', 'CH4M_h', 'ChlARFU_h', 'BGAPCRFU_h') SemiRange_columns<-paste(SemiVars, 'points', 'SemiRange', sep='_') SemiRangeRatio_columns<-paste(SemiVars, 'points', 'SemiRangeOverCutoff', sep='_') SemiSillPercent_columns<-paste(SemiVars, 'points', 'SillPercent', sep='_') SemiTotalSill_columns<-paste(SemiVars, 'points', 'SillTotal', sep='_') SemiPSill_columns<-paste(SemiVars, 'points', 'PSill', sep='_') Table_columns<-c(paste(goodvars_points, Tablestats[1], sep="_"), paste(goodvars_points, Tablestats[2], sep="_"), paste(goodvars_points, Tablestats[3], sep="_"), paste(goodvars_points, Tablestats[4], sep="_"), paste(goodvars_points, Tablestats[5], sep="_"), paste(goodvars_points, Tablestats[6], sep="_"), paste(goodvars_points, Tablestats[7], sep="_"), paste(goodvars_points, Tablestats[8], sep="_"), paste(goodvars_points, Tablestats[9], sep="_"), SemiRange_columns, SemiRangeRatio_columns, SemiTotalSill_columns, SemiPSill_columns, SemiSillPercent_columns) Stats<-j[Table_columns] StatsMeans<- Stats %>% summarize_all(mean, na.rm=T) %>% as.numeric() %>% signif(digits=2) %>% matrix(nrow=14, ncol=10, byrow=T) StatsMins<- Stats %>% summarize_all(min, na.rm=T)%>% as.numeric() %>% signif(digits=2) %>% matrix(nrow=14, ncol=10, byrow=T) StatsMaxs<- Stats %>% summarize_all(max, na.rm=T) %>% as.numeric() %>% signif(digits=2) %>% matrix(nrow=14, ncol=10, byrow=T) StatsMatrix<-matrix(paste(StatsMins, ' to ', StatsMaxs, sep=''), nrow=14, ncol=10) table_SH<-as.data.frame(matrix(nrow=28, ncol=10)) table_SH[seq(1,27,2),]<-StatsMeans table_SH[seq(2,28,2),]<-StatsMatrix names(table_SH)<-c(shortnames) table_SH$Stat<-'' table_SH$Stat[seq(1,27,2)]<-c('Range (Max - Min)', 'SD', 'MAD', 'CV', 'MAD/Median', 'QuartileDispersion', 'SDL', 'Skewness', 'EVD-shape', 'SemiRange', 'SemiRangeRatio', 'Total Sill', 'P-sill', 'Sill %') table_SH_out<-table_SH[,c(11,1:10)] write.table(table_SH_out, file='Data/AcrossLake_SpatialHeterogeneity_Summary.csv', sep=',', row.names=F) # ############## # plotting # ############## #Set universal colors box widths, others boxwex=0.6 colors<-c('#377eb8', '#e41a1c', '#4daf4a') colorbyvar<-colors[c(1,1,1,1,1,2,2,2,2,2)] # Boxplots of spatial variability across variables using pixels png("Figures/Boxplots/CV_andothers_BoxplotsAmongVariablesPixels.png", res=200, width=4.5,height=12, units="in") par(mfrow=c(length(CVstats) ,1)) par(mar=c(1.5,3,.5,.5), oma=c(2.5,1,2,0)) par(mgp=c(2, .5, 0)) ymin<-c(rep(0,4),-5,-1.5) ymax<-c(0.5,0.2,0.2,0.2,5,1.5) nu <- 1 for (nu in 1:length(CVstats)){ boxplot(j[paste(goodvars_pixels, CVstats[nu], sep="_")], ylim=c(ymin[nu], ymax[nu]) , ylab='', names=NA , boxwex=boxwex, col=colorbyvar) axis(1, labels=shortnames, at=1:length(shortnames)) if (CVstats[nu]=='MADMOverMedian'){ mtext('MAD/Median', 2, 2) } else { mtext(CVstats[nu], 2, 2)} abline(h=0, lty=3) boxplot(j[paste(goodvars_pixels, CVstats[nu], sep="_")], ylim=c(ymin[nu], ymax[nu]) , ylab='', names=NA , boxwex=boxwex, col=colorbyvar, add=T) } mtext('Spatial stats across lakes and variable types (pixels)', 3, 0, outer=T) mtext('Variable', 1, .5, outer=T) dev.off() # Boxplots of spatial variability across variables using points png("Figures/Boxplots/CV_andothers_BoxplotsAmongVariablesPoints.png", res=200, width=4.5,height=12, units="in") par(mfrow=c(length(CVstats),1)) par(mar=c(1.5,3,.5,.5), oma=c(2.5,1,2,0)) par(mgp=c(2, .5, 0)) ymin<-c(rep(0,4),-5,-1.6) ymax<-c(1.1,0.75,0.75,0.45,5,1.6) nu <- 1 for (nu in 1:length(CVstats)){ boxplot(j[paste(goodvars_points, CVstats[nu], sep="_")], ylim=c(ymin[nu], ymax[nu]) , ylab='', names=NA , boxwex=boxwex, col=colorbyvar) axis(1, labels=shortnames, at=1:length(shortnames)) if (CVstats[nu]=='MADMOverMedian'){ mtext('MAD/Median', 2, 2) } else { mtext(CVstats[nu], 2, 2)} abline(h=0, lty=3) boxplot(j[paste(goodvars_points, CVstats[nu], sep="_")], ylim=c(ymin[nu], ymax[nu]) , ylab='', names=NA , boxwex=boxwex, col=colorbyvar, add=T) } mtext('Spatial stats across lakes and variable types (points)', 3, 0, outer=T) mtext('Variable', 1, .5, outer=T) dev.off() # Boxplots of spatial variability across variables using points just CV and skewness png("Figures/Boxplots/CV_skewness_BoxplotsAmongVariablesPoints.png", res=200, width=4.5,height=4, units="in") par(mfrow=c(length(CVstats_short),1)) par(mar=c(1.5,3,.5,.5), oma=c(2.5,1,0,0)) par(mgp=c(2, .5, 0), tck=-0.04) ymin<-c(0,-5) ymax<-c(1.1,5) nu <- 1 for (nu in 1:length(CVstats_short)){ boxplot(j[paste(goodvars_points, CVstats_short[nu], sep="_")], ylim=c(ymin[nu], ymax[nu]) , ylab='', names=NA , boxwex=boxwex, col=colorbyvar, cex.axis=0.8, cex=0.6, las=1) if (nu==2){abline(h=c(-2,2), lty=3)} axis(1, labels=shortnames, at=1:length(shortnames), cex=0.6, cex.axis=0.6) mtext(CVstats_short[nu], 2, 2) abline(h=0, lty=3) boxplot(j[paste(goodvars_points, CVstats_short[nu], sep="_")], ylim=c(ymin[nu], ymax[nu]) , ylab='', names=NA , boxwex=boxwex, col=colorbyvar, add=T, cex.axis=0.8, cex=.6, las=1) } # mtext('Spatial stats across lakes and variable types (points)', 3, 0, outer=T) mtext('Variable', 1, .5, outer=T) dev.off() # Boxplots of semivariance ranges across variables using points png("Figures/Boxplots/SemiVarRanges_BoxplotsAmongVariablesPoints.png", res=200, width=5,height=3, units="in") par(mfrow=c(1,1)) par(mar=c(1.5,2,.5,.5), oma=c(1,1,0,0)) par(mgp=c(2, .3, 0), tck=-0.02) ylim<-c(0,2000) boxplot(j[SemiRange_columns], ylim=ylim, ylab='', names=NA , boxwex=boxwex, col=colorbyvar, cex=0.5, pch=16, yaxt='n') axis(2, at=seq(0,2000,500), cex.axis=.7) axis(1, labels=shortnames, at=1:length(shortnames), cex.axis=.7) mtext('Semivariance range (m)', 2, 1.5) abline(h=0, lty=3) mtext('Variable', 1, 0, outer=T) dev.off() # Boxplots of semivariance ranges across variables using points png("Figures/Boxplots/SemiVarRanges_BoxplotsAmongVariablesPointsNoLGR.png", res=200, width=4,height=2.5, units="in") par(mfrow=c(1,1)) par(mar=c(1.5,2,.5,.5), oma=c(1,1,0,0)) par(mgp=c(2, .3, 0), tck=-0.02) ylim<-c(0,2000) boxplot(j[SemiRange_columns[-c(7:8)]], ylim=ylim, ylab='', names=NA , boxwex=boxwex, col=colorbyvar[-c(7:8)], cex=0.5, pch=16, yaxt='n') axis(2, at=seq(0,2000,500), cex.axis=.7) axis(1, labels=shortnames[-c(7:8)], at=1:length(shortnames[-c(7:8)]), cex.axis=.7) mtext('Semivariance range (m)', 2, 1.5) abline(h=0, lty=3) mtext('Variable', 1, 0, outer=T) dev.off() # Boxplots of semivariance range ratios across variables using points png("Figures/Boxplots/SemiVarRangeRatios_BoxplotsAmongVariablesPoints.png", res=200, width=5,height=3, units="in") par(mfrow=c(1,1)) par(mar=c(1.5,2,.5,.5), oma=c(1,1,0,0)) par(mgp=c(2, .3, 0), tck=-0.02) ylim<-c(0,1) boxplot(j[SemiRangeRatio_columns], ylim=ylim, ylab='', names=NA , boxwex=boxwex, col=colorbyvar, cex=0.5, pch=16, yaxt='n') axis(2, at=seq(0,1,0.5), cex.axis=.7, las=1) axis(1, labels=shortnames, at=1:length(shortnames), cex.axis=.7) mtext('Semivariance range ratio', 2, 1.5) abline(h=c(0,1), lty=3) mtext('Variable', 1, 0, outer=T) dev.off() # Boxplots of semivariance range ratios across variables using points png("Figures/Boxplots/SemiVarRangeRatios_BoxplotsAmongVariablesPointsNoLGR.png", res=200, width=4,height=2.5, units="in") par(mfrow=c(1,1)) par(mar=c(1.5,2,.5,.5), oma=c(1,1,0,0)) par(mgp=c(2, .3, 0), tck=-0.02) ylim<-c(0,1) boxplot(j[SemiRangeRatio_columns[-c(7:8)]], ylim=ylim, ylab='', names=NA , boxwex=boxwex, col=colorbyvar[-c(7:8)], cex=0.5, pch=16, yaxt='n') axis(2, at=seq(0,1,.5), cex.axis=.7, las=1) axis(1, labels=shortnames[-c(7:8)], at=1:length(shortnames[-c(7:8)]), cex.axis=.7) mtext('Semivariance range ratios', 2, 1.5) abline(h=c(0,1), lty=3) mtext('Variable', 1, 0, outer=T) dev.off() #With arcsin sqrt transform # Boxplots of semivariance range ratios across variables using points png("Figures/Boxplots/SemiVarRangeRatiosArcSinTran_BoxplotsAmongVariablesPoints.png", res=200, width=5,height=3, units="in") par(mfrow=c(1,1)) par(mar=c(1.5,2,.5,.5), oma=c(1,1,0,0)) par(mgp=c(2, .3, 0), tck=-0.02) ylim<-c(0,1) boxplot(asinTransform(j[SemiRangeRatio_columns]), ylab='', names=NA , boxwex=boxwex, col=colorbyvar, cex=0.5, pch=16, yaxt='n') axis(2, cex.axis=.7, las=1) axis(1, labels=shortnames, at=1:length(shortnames), cex.axis=.7) mtext('Semivariance range ratio', 2, 1.5) abline(h=c(0,pi/2), lty=3) mtext('Variable', 1, 0, outer=T) dev.off() # Boxplots of semivariance range ratios arcsin transform across variables using points png("Figures/Boxplots/SemiVarRangeRatiosArcSinTran_BoxplotsAmongVariablesPointsNoLGR.png", res=200, width=4,height=2.5, units="in") par(mfrow=c(1,1)) par(mar=c(1.5,2,.5,.5), oma=c(1,1,0,0)) par(mgp=c(2, .3, 0), tck=-0.02) ylim<-c(0,1) boxplot(asinTransform(j[SemiRangeRatio_columns[-c(7:8)]]), ylab='', names=NA , boxwex=boxwex, col=colorbyvar[-c(7:8)], cex=0.5, pch=16, yaxt='n') axis(2, cex.axis=.7, las=1) axis(1, labels=shortnames[-c(7:8)], at=1:length(shortnames[-c(7:8)]), cex.axis=.7) mtext('Arcsin(sqrt(Semivariance range ratios))', 2, 1.5) abline(h=c(0,pi/2), lty=3) mtext('Variable', 1, 0, outer=T) dev.off() # Convert data for violin plots vdata <- j %>% dplyr::select(SemiRangeRatio_columns) %>% gather(key=variable, value=rangeratio) vdata$VariableShort <- shortnames[match(vdata$variable, SemiRangeRatio_columns)] vdata$VariableShort = factor(vdata$VariableShort, shortnames) rdata <- j %>% dplyr::select(SemiRange_columns) %>% gather(key=variable, value=semivarrange) rdata$VariableShort <- shortnames[match(rdata$variable, SemiRange_columns)] rdata$VariableShort = factor(rdata$VariableShort, shortnames) # Violin plot semivariance range ratios variables using points png("Figures/Boxplots/SemiVarRangeRatios_ViolinplotsAmongVariablesPoints.png", res=200, width=4,height=2.5, units="in") par(mfrow=c(1,1)) par(mar=c(1.5,2,.5,.5), oma=c(1,1,0,0)) par(mgp=c(2, .3, 0), tck=-0.02) p1 <- ggplot(vdata, aes(x=VariableShort, y=rangeratio, fill=VariableShort, color=VariableShort)) + scale_y_continuous(limits = c(0, 1)) + labs(x = "", y='Semivariance range ratio') + geom_violin(alpha=0.15, na.rm=T) + # geom_boxplot(width=0.5, color='black', notch=T) + scale_fill_manual(values=colorbyvar) + scale_color_manual(values=colorbyvar) + geom_jitter(width=0.1, size=0.5, alpha=1) + stat_summary(fun.y=median, geom="point", size=3, color='black', shape=18) + theme_bw() + theme(legend.position="none") print(p1) dev.off() # Violin plot semivariance range ratios variables using points no LGR png("Figures/Boxplots/SemiVarRangeRatios_ViolinplotsAmongVariablesPointsNoLGR.png", res=200, width=4,height=2.5, units="in") par(mfrow=c(1,1)) par(mar=c(1.5,2,.5,.5), oma=c(1,1,0,0)) par(mgp=c(2, .3, 0), tck=-0.02) p1 <- ggplot(vdata[-which(vdata$VariableShort %in% c('CO2', 'CH4')),], aes(x=VariableShort, y=rangeratio, fill=VariableShort, color=VariableShort)) + scale_y_continuous(limits = c(0, 1)) + labs(x = "", y='Semivariance range ratio') + geom_violin(alpha=0.15, na.rm=T) + # geom_boxplot(width=0.5, color='black', notch=T) + scale_fill_manual(values=colorbyvar[-c(7:8)]) + scale_color_manual(values=colorbyvar[-c(7:8)]) + geom_jitter(width=0.1, size=0.5, alpha=1) + stat_summary(fun.y=median, geom="point", size=3, color='black', shape=18) + theme_bw() + theme(legend.position="none") print(p1) dev.off() # Violin plot semivariance range ratios variables using points no LGR png("Figures/Boxplots/SemiVarRange_ViolinplotsAmongVariablesPointsNoLGR.png", res=200, width=4,height=2.5, units="in") par(mfrow=c(1,1)) par(mar=c(1.5,2,.5,.5), oma=c(1,1,0,0)) par(mgp=c(2, .3, 0), tck=-0.02) p1 <- ggplot(rdata[-which(rdata$VariableShort %in% c('CO2', 'CH4')),], aes(x=VariableShort, y=semivarrange, fill=VariableShort, color=VariableShort)) + scale_y_continuous(limits=c(0,2000)) + labs(x = "", y='Semivariance range (m)') + geom_violin(alpha=0.15, na.rm=T, trim=F) + # geom_boxplot(width=0.5, color='black', notch=T) + scale_fill_manual(values=colorbyvar[-c(7:8)]) + scale_color_manual(values=colorbyvar[-c(7:8)]) + geom_jitter(width=0.1, size=0.5, alpha=1) + stat_summary(fun.y=median, geom="point", size=3, color='black', shape=18) + theme_bw() + theme(legend.position="none") print(p1) dev.off()
/R/CompareSpatialHeterogeneity_v2.R
no_license
lukeloken/NHLDLakes
R
false
false
13,763
r
# =================================== # Compare spatial summaries across systems # Load all NHLD pointsummaries data # Luke Loken, Feb 2018 # =================================== rm(list = ls()) library(gtools) library(tidyr) library(dplyr) library(ggplot2) library(gridExtra) setwd("E:/Git_Repo/NHLDLakes") asinTransform <- function(p) { asin(sqrt(p)) } j <- readRDS(file='Data/FlameStatsLagosChemAllWide.rds') #Subset or select which columns to use... goodvars<-c("TempC", "SPCuScm", "fDOMRFU", "pH","TurbFNU", "ODOmgL", "CO2uM", "CH4uM", "ChlARFU", "BGAPCRFU") # goodvars<-c("TempC", "SPCuScm", "fDOMRFU", "pH","TurbFNU", "ODOmgL", "CO2uM", "CH4uM", "ChlARFU", "BGAPCRFU", "ChlAugL", "BGAPCgL") shortnames<-c("Temp", "SPC", "fDOM", "pH", "Turb", "DO", "CO2", "CH4", "ChlA", "BGA") goodvars_pixels<-paste(goodvars, 'pixels', sep='_') goodvars_points<-paste(goodvars, 'points', sep='_') CVstats <- c('CV', 'MADMOverMedian', 'QuartileDispersion', 'SDL', 'skewness', 'shape') CVstats_short<-c('CV','skewness') Tablestats<-c('MaxMinusMin', 'sd', 'mad', 'CV', 'MADMOverMedian', 'QuartileDispersion', 'SDL', 'skewness', 'shape') SemiVars <- c('TmpC_h', 'SPCScm_h', 'fDOMRFU_h', 'pH_h', 'TrbFNU_h', 'ODOmgL_h', 'CO2M_h', 'CH4M_h', 'ChlARFU_h', 'BGAPCRFU_h') SemiRange_columns<-paste(SemiVars, 'points', 'SemiRange', sep='_') SemiRangeRatio_columns<-paste(SemiVars, 'points', 'SemiRangeOverCutoff', sep='_') SemiSillPercent_columns<-paste(SemiVars, 'points', 'SillPercent', sep='_') SemiTotalSill_columns<-paste(SemiVars, 'points', 'SillTotal', sep='_') SemiPSill_columns<-paste(SemiVars, 'points', 'PSill', sep='_') Table_columns<-c(paste(goodvars_points, Tablestats[1], sep="_"), paste(goodvars_points, Tablestats[2], sep="_"), paste(goodvars_points, Tablestats[3], sep="_"), paste(goodvars_points, Tablestats[4], sep="_"), paste(goodvars_points, Tablestats[5], sep="_"), paste(goodvars_points, Tablestats[6], sep="_"), paste(goodvars_points, Tablestats[7], sep="_"), paste(goodvars_points, Tablestats[8], sep="_"), paste(goodvars_points, Tablestats[9], sep="_"), SemiRange_columns, SemiRangeRatio_columns, SemiTotalSill_columns, SemiPSill_columns, SemiSillPercent_columns) Stats<-j[Table_columns] StatsMeans<- Stats %>% summarize_all(mean, na.rm=T) %>% as.numeric() %>% signif(digits=2) %>% matrix(nrow=14, ncol=10, byrow=T) StatsMins<- Stats %>% summarize_all(min, na.rm=T)%>% as.numeric() %>% signif(digits=2) %>% matrix(nrow=14, ncol=10, byrow=T) StatsMaxs<- Stats %>% summarize_all(max, na.rm=T) %>% as.numeric() %>% signif(digits=2) %>% matrix(nrow=14, ncol=10, byrow=T) StatsMatrix<-matrix(paste(StatsMins, ' to ', StatsMaxs, sep=''), nrow=14, ncol=10) table_SH<-as.data.frame(matrix(nrow=28, ncol=10)) table_SH[seq(1,27,2),]<-StatsMeans table_SH[seq(2,28,2),]<-StatsMatrix names(table_SH)<-c(shortnames) table_SH$Stat<-'' table_SH$Stat[seq(1,27,2)]<-c('Range (Max - Min)', 'SD', 'MAD', 'CV', 'MAD/Median', 'QuartileDispersion', 'SDL', 'Skewness', 'EVD-shape', 'SemiRange', 'SemiRangeRatio', 'Total Sill', 'P-sill', 'Sill %') table_SH_out<-table_SH[,c(11,1:10)] write.table(table_SH_out, file='Data/AcrossLake_SpatialHeterogeneity_Summary.csv', sep=',', row.names=F) # ############## # plotting # ############## #Set universal colors box widths, others boxwex=0.6 colors<-c('#377eb8', '#e41a1c', '#4daf4a') colorbyvar<-colors[c(1,1,1,1,1,2,2,2,2,2)] # Boxplots of spatial variability across variables using pixels png("Figures/Boxplots/CV_andothers_BoxplotsAmongVariablesPixels.png", res=200, width=4.5,height=12, units="in") par(mfrow=c(length(CVstats) ,1)) par(mar=c(1.5,3,.5,.5), oma=c(2.5,1,2,0)) par(mgp=c(2, .5, 0)) ymin<-c(rep(0,4),-5,-1.5) ymax<-c(0.5,0.2,0.2,0.2,5,1.5) nu <- 1 for (nu in 1:length(CVstats)){ boxplot(j[paste(goodvars_pixels, CVstats[nu], sep="_")], ylim=c(ymin[nu], ymax[nu]) , ylab='', names=NA , boxwex=boxwex, col=colorbyvar) axis(1, labels=shortnames, at=1:length(shortnames)) if (CVstats[nu]=='MADMOverMedian'){ mtext('MAD/Median', 2, 2) } else { mtext(CVstats[nu], 2, 2)} abline(h=0, lty=3) boxplot(j[paste(goodvars_pixels, CVstats[nu], sep="_")], ylim=c(ymin[nu], ymax[nu]) , ylab='', names=NA , boxwex=boxwex, col=colorbyvar, add=T) } mtext('Spatial stats across lakes and variable types (pixels)', 3, 0, outer=T) mtext('Variable', 1, .5, outer=T) dev.off() # Boxplots of spatial variability across variables using points png("Figures/Boxplots/CV_andothers_BoxplotsAmongVariablesPoints.png", res=200, width=4.5,height=12, units="in") par(mfrow=c(length(CVstats),1)) par(mar=c(1.5,3,.5,.5), oma=c(2.5,1,2,0)) par(mgp=c(2, .5, 0)) ymin<-c(rep(0,4),-5,-1.6) ymax<-c(1.1,0.75,0.75,0.45,5,1.6) nu <- 1 for (nu in 1:length(CVstats)){ boxplot(j[paste(goodvars_points, CVstats[nu], sep="_")], ylim=c(ymin[nu], ymax[nu]) , ylab='', names=NA , boxwex=boxwex, col=colorbyvar) axis(1, labels=shortnames, at=1:length(shortnames)) if (CVstats[nu]=='MADMOverMedian'){ mtext('MAD/Median', 2, 2) } else { mtext(CVstats[nu], 2, 2)} abline(h=0, lty=3) boxplot(j[paste(goodvars_points, CVstats[nu], sep="_")], ylim=c(ymin[nu], ymax[nu]) , ylab='', names=NA , boxwex=boxwex, col=colorbyvar, add=T) } mtext('Spatial stats across lakes and variable types (points)', 3, 0, outer=T) mtext('Variable', 1, .5, outer=T) dev.off() # Boxplots of spatial variability across variables using points just CV and skewness png("Figures/Boxplots/CV_skewness_BoxplotsAmongVariablesPoints.png", res=200, width=4.5,height=4, units="in") par(mfrow=c(length(CVstats_short),1)) par(mar=c(1.5,3,.5,.5), oma=c(2.5,1,0,0)) par(mgp=c(2, .5, 0), tck=-0.04) ymin<-c(0,-5) ymax<-c(1.1,5) nu <- 1 for (nu in 1:length(CVstats_short)){ boxplot(j[paste(goodvars_points, CVstats_short[nu], sep="_")], ylim=c(ymin[nu], ymax[nu]) , ylab='', names=NA , boxwex=boxwex, col=colorbyvar, cex.axis=0.8, cex=0.6, las=1) if (nu==2){abline(h=c(-2,2), lty=3)} axis(1, labels=shortnames, at=1:length(shortnames), cex=0.6, cex.axis=0.6) mtext(CVstats_short[nu], 2, 2) abline(h=0, lty=3) boxplot(j[paste(goodvars_points, CVstats_short[nu], sep="_")], ylim=c(ymin[nu], ymax[nu]) , ylab='', names=NA , boxwex=boxwex, col=colorbyvar, add=T, cex.axis=0.8, cex=.6, las=1) } # mtext('Spatial stats across lakes and variable types (points)', 3, 0, outer=T) mtext('Variable', 1, .5, outer=T) dev.off() # Boxplots of semivariance ranges across variables using points png("Figures/Boxplots/SemiVarRanges_BoxplotsAmongVariablesPoints.png", res=200, width=5,height=3, units="in") par(mfrow=c(1,1)) par(mar=c(1.5,2,.5,.5), oma=c(1,1,0,0)) par(mgp=c(2, .3, 0), tck=-0.02) ylim<-c(0,2000) boxplot(j[SemiRange_columns], ylim=ylim, ylab='', names=NA , boxwex=boxwex, col=colorbyvar, cex=0.5, pch=16, yaxt='n') axis(2, at=seq(0,2000,500), cex.axis=.7) axis(1, labels=shortnames, at=1:length(shortnames), cex.axis=.7) mtext('Semivariance range (m)', 2, 1.5) abline(h=0, lty=3) mtext('Variable', 1, 0, outer=T) dev.off() # Boxplots of semivariance ranges across variables using points png("Figures/Boxplots/SemiVarRanges_BoxplotsAmongVariablesPointsNoLGR.png", res=200, width=4,height=2.5, units="in") par(mfrow=c(1,1)) par(mar=c(1.5,2,.5,.5), oma=c(1,1,0,0)) par(mgp=c(2, .3, 0), tck=-0.02) ylim<-c(0,2000) boxplot(j[SemiRange_columns[-c(7:8)]], ylim=ylim, ylab='', names=NA , boxwex=boxwex, col=colorbyvar[-c(7:8)], cex=0.5, pch=16, yaxt='n') axis(2, at=seq(0,2000,500), cex.axis=.7) axis(1, labels=shortnames[-c(7:8)], at=1:length(shortnames[-c(7:8)]), cex.axis=.7) mtext('Semivariance range (m)', 2, 1.5) abline(h=0, lty=3) mtext('Variable', 1, 0, outer=T) dev.off() # Boxplots of semivariance range ratios across variables using points png("Figures/Boxplots/SemiVarRangeRatios_BoxplotsAmongVariablesPoints.png", res=200, width=5,height=3, units="in") par(mfrow=c(1,1)) par(mar=c(1.5,2,.5,.5), oma=c(1,1,0,0)) par(mgp=c(2, .3, 0), tck=-0.02) ylim<-c(0,1) boxplot(j[SemiRangeRatio_columns], ylim=ylim, ylab='', names=NA , boxwex=boxwex, col=colorbyvar, cex=0.5, pch=16, yaxt='n') axis(2, at=seq(0,1,0.5), cex.axis=.7, las=1) axis(1, labels=shortnames, at=1:length(shortnames), cex.axis=.7) mtext('Semivariance range ratio', 2, 1.5) abline(h=c(0,1), lty=3) mtext('Variable', 1, 0, outer=T) dev.off() # Boxplots of semivariance range ratios across variables using points png("Figures/Boxplots/SemiVarRangeRatios_BoxplotsAmongVariablesPointsNoLGR.png", res=200, width=4,height=2.5, units="in") par(mfrow=c(1,1)) par(mar=c(1.5,2,.5,.5), oma=c(1,1,0,0)) par(mgp=c(2, .3, 0), tck=-0.02) ylim<-c(0,1) boxplot(j[SemiRangeRatio_columns[-c(7:8)]], ylim=ylim, ylab='', names=NA , boxwex=boxwex, col=colorbyvar[-c(7:8)], cex=0.5, pch=16, yaxt='n') axis(2, at=seq(0,1,.5), cex.axis=.7, las=1) axis(1, labels=shortnames[-c(7:8)], at=1:length(shortnames[-c(7:8)]), cex.axis=.7) mtext('Semivariance range ratios', 2, 1.5) abline(h=c(0,1), lty=3) mtext('Variable', 1, 0, outer=T) dev.off() #With arcsin sqrt transform # Boxplots of semivariance range ratios across variables using points png("Figures/Boxplots/SemiVarRangeRatiosArcSinTran_BoxplotsAmongVariablesPoints.png", res=200, width=5,height=3, units="in") par(mfrow=c(1,1)) par(mar=c(1.5,2,.5,.5), oma=c(1,1,0,0)) par(mgp=c(2, .3, 0), tck=-0.02) ylim<-c(0,1) boxplot(asinTransform(j[SemiRangeRatio_columns]), ylab='', names=NA , boxwex=boxwex, col=colorbyvar, cex=0.5, pch=16, yaxt='n') axis(2, cex.axis=.7, las=1) axis(1, labels=shortnames, at=1:length(shortnames), cex.axis=.7) mtext('Semivariance range ratio', 2, 1.5) abline(h=c(0,pi/2), lty=3) mtext('Variable', 1, 0, outer=T) dev.off() # Boxplots of semivariance range ratios arcsin transform across variables using points png("Figures/Boxplots/SemiVarRangeRatiosArcSinTran_BoxplotsAmongVariablesPointsNoLGR.png", res=200, width=4,height=2.5, units="in") par(mfrow=c(1,1)) par(mar=c(1.5,2,.5,.5), oma=c(1,1,0,0)) par(mgp=c(2, .3, 0), tck=-0.02) ylim<-c(0,1) boxplot(asinTransform(j[SemiRangeRatio_columns[-c(7:8)]]), ylab='', names=NA , boxwex=boxwex, col=colorbyvar[-c(7:8)], cex=0.5, pch=16, yaxt='n') axis(2, cex.axis=.7, las=1) axis(1, labels=shortnames[-c(7:8)], at=1:length(shortnames[-c(7:8)]), cex.axis=.7) mtext('Arcsin(sqrt(Semivariance range ratios))', 2, 1.5) abline(h=c(0,pi/2), lty=3) mtext('Variable', 1, 0, outer=T) dev.off() # Convert data for violin plots vdata <- j %>% dplyr::select(SemiRangeRatio_columns) %>% gather(key=variable, value=rangeratio) vdata$VariableShort <- shortnames[match(vdata$variable, SemiRangeRatio_columns)] vdata$VariableShort = factor(vdata$VariableShort, shortnames) rdata <- j %>% dplyr::select(SemiRange_columns) %>% gather(key=variable, value=semivarrange) rdata$VariableShort <- shortnames[match(rdata$variable, SemiRange_columns)] rdata$VariableShort = factor(rdata$VariableShort, shortnames) # Violin plot semivariance range ratios variables using points png("Figures/Boxplots/SemiVarRangeRatios_ViolinplotsAmongVariablesPoints.png", res=200, width=4,height=2.5, units="in") par(mfrow=c(1,1)) par(mar=c(1.5,2,.5,.5), oma=c(1,1,0,0)) par(mgp=c(2, .3, 0), tck=-0.02) p1 <- ggplot(vdata, aes(x=VariableShort, y=rangeratio, fill=VariableShort, color=VariableShort)) + scale_y_continuous(limits = c(0, 1)) + labs(x = "", y='Semivariance range ratio') + geom_violin(alpha=0.15, na.rm=T) + # geom_boxplot(width=0.5, color='black', notch=T) + scale_fill_manual(values=colorbyvar) + scale_color_manual(values=colorbyvar) + geom_jitter(width=0.1, size=0.5, alpha=1) + stat_summary(fun.y=median, geom="point", size=3, color='black', shape=18) + theme_bw() + theme(legend.position="none") print(p1) dev.off() # Violin plot semivariance range ratios variables using points no LGR png("Figures/Boxplots/SemiVarRangeRatios_ViolinplotsAmongVariablesPointsNoLGR.png", res=200, width=4,height=2.5, units="in") par(mfrow=c(1,1)) par(mar=c(1.5,2,.5,.5), oma=c(1,1,0,0)) par(mgp=c(2, .3, 0), tck=-0.02) p1 <- ggplot(vdata[-which(vdata$VariableShort %in% c('CO2', 'CH4')),], aes(x=VariableShort, y=rangeratio, fill=VariableShort, color=VariableShort)) + scale_y_continuous(limits = c(0, 1)) + labs(x = "", y='Semivariance range ratio') + geom_violin(alpha=0.15, na.rm=T) + # geom_boxplot(width=0.5, color='black', notch=T) + scale_fill_manual(values=colorbyvar[-c(7:8)]) + scale_color_manual(values=colorbyvar[-c(7:8)]) + geom_jitter(width=0.1, size=0.5, alpha=1) + stat_summary(fun.y=median, geom="point", size=3, color='black', shape=18) + theme_bw() + theme(legend.position="none") print(p1) dev.off() # Violin plot semivariance range ratios variables using points no LGR png("Figures/Boxplots/SemiVarRange_ViolinplotsAmongVariablesPointsNoLGR.png", res=200, width=4,height=2.5, units="in") par(mfrow=c(1,1)) par(mar=c(1.5,2,.5,.5), oma=c(1,1,0,0)) par(mgp=c(2, .3, 0), tck=-0.02) p1 <- ggplot(rdata[-which(rdata$VariableShort %in% c('CO2', 'CH4')),], aes(x=VariableShort, y=semivarrange, fill=VariableShort, color=VariableShort)) + scale_y_continuous(limits=c(0,2000)) + labs(x = "", y='Semivariance range (m)') + geom_violin(alpha=0.15, na.rm=T, trim=F) + # geom_boxplot(width=0.5, color='black', notch=T) + scale_fill_manual(values=colorbyvar[-c(7:8)]) + scale_color_manual(values=colorbyvar[-c(7:8)]) + geom_jitter(width=0.1, size=0.5, alpha=1) + stat_summary(fun.y=median, geom="point", size=3, color='black', shape=18) + theme_bw() + theme(legend.position="none") print(p1) dev.off()
set.seed(123) K <- 2; p <- 2 X <- as.matrix(faithful) # Obtain initial memberships based on the K-means algorithm id.km <- kmeans(X, K)$cluster # Run the EM algorithm for a Manly mixture model based on K-means solution la <- matrix(0.1, K, p) B <- Manly.EM(X, id.km, la) Manly.plot(X, model = B, var1 = 1, x.mar = 1, xaxs="i", xaxt="n", xlab="", main = "")
/demo/DensityPlot.R
no_license
cran/ManlyMix
R
false
false
363
r
set.seed(123) K <- 2; p <- 2 X <- as.matrix(faithful) # Obtain initial memberships based on the K-means algorithm id.km <- kmeans(X, K)$cluster # Run the EM algorithm for a Manly mixture model based on K-means solution la <- matrix(0.1, K, p) B <- Manly.EM(X, id.km, la) Manly.plot(X, model = B, var1 = 1, x.mar = 1, xaxs="i", xaxt="n", xlab="", main = "")
\name{gframe} \alias{gframe} \title{gframe is a title-decorated ggroup box container} \usage{ gframe(text = "", pos = 0, horizontal = TRUE, spacing = 2, use.scrollwindow = FALSE, container = NULL, ..., width = NULL, height = NULL, ext.args = NULL) } \arguments{ \item{text}{label text} \item{pos}{position of label. Ignored?} \item{horizontal}{left or right (default), or top to bottom (\code{horizontal=FALSE})} \item{spacing}{Margin around each child component in pixels. Can be a single number, in which case it is equal pixel space around each child. But for gWidgetsWWW2 one can specify a vector with recycling like function(top, right=top, bottom=top, left=right). A typical pattern is c(5,5,0,0), as otherwise there are 10 = 5 + 5 pixels between adjoing children. To get padding just around interior of box, pass in a value through ext.args, as in \code{ext.args=list(bodyPadding=10)}.} \item{use.scrollwindow}{logical. If given, scrollbars will appear} \item{container}{A parent container. In \pkg{gWidgetsWWW2} a parent container is not optional (though it can be substituted with the \code{parent} argument in some circumstances). The parent specifies the widget heirarchy and the \code{...} argument is used to pass along arguments to layout the child component in the parent container. Typically, these are passed to the \code{add} method of the parent container.} \item{...}{passed to \code{ggroup}} \item{width}{width in pixels of component. Sizing in \pkg{gWidgetsWWW2} is sometimes necessary as the arguments \code{expand} and \code{fill} are not well implemented.} \item{height}{height in pixels of the component.} \item{ext.args}{The contructors of \pkg{gWidgetsWWW2} ultimately call an Ext constructor. The options passed to the Ext constructor may be added to or overridden by use of this argument. Values are passed in as named list components and with values converted into JavaScript objects by \code{asJSObject}.} } \value{ an \code{GContainer} object } \description{ Use \code{svalue<-} to adjust the title } \examples{ w <- gwindow() g <- gframe("Label", cont=w) b <- gbutton("insider frame", cont=g) svalue(g) <- "new label" } \seealso{ \code{\link{ggroup}} }
/man/gframe.Rd
no_license
kecoli/gWidgetsWWW2
R
false
false
2,287
rd
\name{gframe} \alias{gframe} \title{gframe is a title-decorated ggroup box container} \usage{ gframe(text = "", pos = 0, horizontal = TRUE, spacing = 2, use.scrollwindow = FALSE, container = NULL, ..., width = NULL, height = NULL, ext.args = NULL) } \arguments{ \item{text}{label text} \item{pos}{position of label. Ignored?} \item{horizontal}{left or right (default), or top to bottom (\code{horizontal=FALSE})} \item{spacing}{Margin around each child component in pixels. Can be a single number, in which case it is equal pixel space around each child. But for gWidgetsWWW2 one can specify a vector with recycling like function(top, right=top, bottom=top, left=right). A typical pattern is c(5,5,0,0), as otherwise there are 10 = 5 + 5 pixels between adjoing children. To get padding just around interior of box, pass in a value through ext.args, as in \code{ext.args=list(bodyPadding=10)}.} \item{use.scrollwindow}{logical. If given, scrollbars will appear} \item{container}{A parent container. In \pkg{gWidgetsWWW2} a parent container is not optional (though it can be substituted with the \code{parent} argument in some circumstances). The parent specifies the widget heirarchy and the \code{...} argument is used to pass along arguments to layout the child component in the parent container. Typically, these are passed to the \code{add} method of the parent container.} \item{...}{passed to \code{ggroup}} \item{width}{width in pixels of component. Sizing in \pkg{gWidgetsWWW2} is sometimes necessary as the arguments \code{expand} and \code{fill} are not well implemented.} \item{height}{height in pixels of the component.} \item{ext.args}{The contructors of \pkg{gWidgetsWWW2} ultimately call an Ext constructor. The options passed to the Ext constructor may be added to or overridden by use of this argument. Values are passed in as named list components and with values converted into JavaScript objects by \code{asJSObject}.} } \value{ an \code{GContainer} object } \description{ Use \code{svalue<-} to adjust the title } \examples{ w <- gwindow() g <- gframe("Label", cont=w) b <- gbutton("insider frame", cont=g) svalue(g) <- "new label" } \seealso{ \code{\link{ggroup}} }
###################### # Overfitting and Neural Nets ####################### library(rgl) library(NeuralNetTools) library(ggplot2) library(readr) library(dplyr) library(caret) library(nnet) library(doMC) # Benchmarking function=MAE mae<-function(a,b){mean(abs(a-b),na.rm=TRUE)} # Genrating the data set.seed(1) n<-200 # sample size x1<-runif(n) x2<-runif(n) x3<-runif(n) x4<-runif(n) x5<-runif(n) x6<-runif(n) x7<-runif(n) x8<-runif(n) x9<-runif(n) x10<-runif(n) x11<-runif(n) x12<-runif(n) x13<-runif(n) x14<-runif(n) x15<-runif(n) ruido<-rnorm(n)/4 # the added error y<-4+3*x1+ruido # y explained just by x1 plot(x1,y) plot3d(x1,x2,y) datos<-data.frame(cbind(y,x1,x2,x3,x4,x5,x6,x7,x8,x9,x10,x11,x12,x13,x14,x15)) datos.train<-datos[1:(n/2),] datos.test<-datos[((n/2)+1):n,] plot(datos.train$x1,datos.train$y) suave<-lowess(datos.train$x1,datos.train$y) lines(suave,col='green',lwd=3) # True MAE mae(0,ruido) # Trying a simple liear model ajus.lin<-lm(y ~ ., data=datos.train) summary(ajus.lin) pred <- predict(ajus.lin, newdata=datos.test) plot(datos.test$y, pred, xlab='Real', ylab='Predicho', cex=0.5, pch=1) abline(0,1,col='green') elmae<-mae(datos.test$y,pred) title(paste('MAE =',round(elmae,2))) #### Training the NNET model set.seed(1000) ajus.nnet <- nnet(y ~ ., data=datos.train, size=4, decay=0, maxit=3000, linout=1, trace=TRUE) ajus.nnet plotnet(ajus.nnet) summary(ajus.nnet) ##### To look at the results of the model # Using the training dataset pred <- predict(ajus.nnet, newdata=datos.train) plot(datos.train$y, pred, xlab='Real', ylab='Predicho', cex=0.5, pch=1) abline(0,1,col='green') elmae<-mae(datos.train$y,pred) title(paste('MAE =',round(elmae,2))) # Using the Testing dataset pred <- predict(ajus.nnet, newdata=datos.test) plot(datos.test$y, pred, xlab='Real', ylab='Predicho', cex=0.5, pch=1) abline(0,1,col='green') elmae<-mae(datos.test$y,pred) title(paste('MAE =',round(elmae,2))) ############ # What is the fitted function ???? for (lavar in 1:(ncol(datos)-1)) { datos.marg<-datos.test datos.marg[,-(lavar+1)]<-0 # no change in these variables datos.marg[,(lavar+1)]<-seq(from=-0,to=1,length.out=n/2) # change in here pred <- predict(ajus.nnet, newdata=datos.marg) plot(datos.marg[,lavar+1],pred,ylim=range(y)) title(paste('Var x',as.character(lavar),sep='')) } # Weird things happen with NN fit undato<-datos.test[1,] undato[,2:16]<-0;undato[,14]<-1 # bad undato[,2:16]<-0;undato[,14]<-1;undato[,15]<-1 # bad undato[,2:16]<-0;undato[,2]<-1 # good undato[,2:16]<-runif(15) # let us see pred.nn <- predict(ajus.nnet, newdata=undato) pred.lin <- predict(ajus.lin, newdata=undato) pred.nn pred.lin #### let us generate many samples N<-1000 mat<-matrix(runif(15*N),N,15) manydatos<-data.frame(mat);names(manydatos)<-colnames(datos)[2:16] pred.nn <- predict(ajus.nnet, newdata=manydatos) plot(pred.nn) abline(4,0,col='green');abline(7,0,col='green') ############################################### # Grid Search and Hiperparametrization Approach ############################################### maeSummary <- function (data, lev = NULL, model = NULL) { out <- mae(data$obs, data$pred) names(out) <- "MAE" out } # Training fitControl <- trainControl(method = "repeatedcv", number = 2, repeats = 10, summaryFunction = maeSummary) escen<-expand.grid(.size=seq(from=1,to=21,by=3),.decay=c(0,0.1,0.2,0.3)) model <- train(y~., data=datos.train, method='nnet', trace = TRUE,tuneGrid=escen,trControl = fitControl,maxit=3000,linout=1, metric = "MAE",maximize=FALSE) plot(model) # importance of variables import<-varImp(model, scale = FALSE) plot(import) ## extracting the best model best.model<-model$finalModel best.model summary(best.model) plotnet(best.model) ##### To look at the results of the model pred <- predict(best.model, newdata=datos.test) plot(datos.test$y, pred, xlab='Real', ylab='Predicho', cex=0.5, pch=1) abline(0,1,col='green') elmae<-mae(datos.test$y,pred) title(paste('MAE =',round(elmae,2))) # # What is the fitted function ???? for (lavar in 1:(ncol(datos)-1)) { datos.marg<-datos.test datos.marg[,-(lavar+1)]<-0 # no change in these variables datos.marg[,(lavar+1)]<-seq(from=-0,to=1,length.out=n/2) # change in here pred <- predict(best.model, newdata=datos.marg) plot(datos.marg[,lavar+1],pred,ylim=range(y)) title(paste('Var x',as.character(lavar),sep='')) } ##### let us generate many samples N<-1000 mat<-matrix(runif(15*N),N,15) manydatos<-data.frame(mat);names(manydatos)<-colnames(datos)[2:16] pred.nn <- predict(best.model, newdata=manydatos) plot(pred.nn) abline(4,0,col='green');abline(7,0,col='green')
/teorica/Overfitting Avoiding With NN ultimo 2019R.R
permissive
DiegoKoz/EEA2019
R
false
false
4,737
r
###################### # Overfitting and Neural Nets ####################### library(rgl) library(NeuralNetTools) library(ggplot2) library(readr) library(dplyr) library(caret) library(nnet) library(doMC) # Benchmarking function=MAE mae<-function(a,b){mean(abs(a-b),na.rm=TRUE)} # Genrating the data set.seed(1) n<-200 # sample size x1<-runif(n) x2<-runif(n) x3<-runif(n) x4<-runif(n) x5<-runif(n) x6<-runif(n) x7<-runif(n) x8<-runif(n) x9<-runif(n) x10<-runif(n) x11<-runif(n) x12<-runif(n) x13<-runif(n) x14<-runif(n) x15<-runif(n) ruido<-rnorm(n)/4 # the added error y<-4+3*x1+ruido # y explained just by x1 plot(x1,y) plot3d(x1,x2,y) datos<-data.frame(cbind(y,x1,x2,x3,x4,x5,x6,x7,x8,x9,x10,x11,x12,x13,x14,x15)) datos.train<-datos[1:(n/2),] datos.test<-datos[((n/2)+1):n,] plot(datos.train$x1,datos.train$y) suave<-lowess(datos.train$x1,datos.train$y) lines(suave,col='green',lwd=3) # True MAE mae(0,ruido) # Trying a simple liear model ajus.lin<-lm(y ~ ., data=datos.train) summary(ajus.lin) pred <- predict(ajus.lin, newdata=datos.test) plot(datos.test$y, pred, xlab='Real', ylab='Predicho', cex=0.5, pch=1) abline(0,1,col='green') elmae<-mae(datos.test$y,pred) title(paste('MAE =',round(elmae,2))) #### Training the NNET model set.seed(1000) ajus.nnet <- nnet(y ~ ., data=datos.train, size=4, decay=0, maxit=3000, linout=1, trace=TRUE) ajus.nnet plotnet(ajus.nnet) summary(ajus.nnet) ##### To look at the results of the model # Using the training dataset pred <- predict(ajus.nnet, newdata=datos.train) plot(datos.train$y, pred, xlab='Real', ylab='Predicho', cex=0.5, pch=1) abline(0,1,col='green') elmae<-mae(datos.train$y,pred) title(paste('MAE =',round(elmae,2))) # Using the Testing dataset pred <- predict(ajus.nnet, newdata=datos.test) plot(datos.test$y, pred, xlab='Real', ylab='Predicho', cex=0.5, pch=1) abline(0,1,col='green') elmae<-mae(datos.test$y,pred) title(paste('MAE =',round(elmae,2))) ############ # What is the fitted function ???? for (lavar in 1:(ncol(datos)-1)) { datos.marg<-datos.test datos.marg[,-(lavar+1)]<-0 # no change in these variables datos.marg[,(lavar+1)]<-seq(from=-0,to=1,length.out=n/2) # change in here pred <- predict(ajus.nnet, newdata=datos.marg) plot(datos.marg[,lavar+1],pred,ylim=range(y)) title(paste('Var x',as.character(lavar),sep='')) } # Weird things happen with NN fit undato<-datos.test[1,] undato[,2:16]<-0;undato[,14]<-1 # bad undato[,2:16]<-0;undato[,14]<-1;undato[,15]<-1 # bad undato[,2:16]<-0;undato[,2]<-1 # good undato[,2:16]<-runif(15) # let us see pred.nn <- predict(ajus.nnet, newdata=undato) pred.lin <- predict(ajus.lin, newdata=undato) pred.nn pred.lin #### let us generate many samples N<-1000 mat<-matrix(runif(15*N),N,15) manydatos<-data.frame(mat);names(manydatos)<-colnames(datos)[2:16] pred.nn <- predict(ajus.nnet, newdata=manydatos) plot(pred.nn) abline(4,0,col='green');abline(7,0,col='green') ############################################### # Grid Search and Hiperparametrization Approach ############################################### maeSummary <- function (data, lev = NULL, model = NULL) { out <- mae(data$obs, data$pred) names(out) <- "MAE" out } # Training fitControl <- trainControl(method = "repeatedcv", number = 2, repeats = 10, summaryFunction = maeSummary) escen<-expand.grid(.size=seq(from=1,to=21,by=3),.decay=c(0,0.1,0.2,0.3)) model <- train(y~., data=datos.train, method='nnet', trace = TRUE,tuneGrid=escen,trControl = fitControl,maxit=3000,linout=1, metric = "MAE",maximize=FALSE) plot(model) # importance of variables import<-varImp(model, scale = FALSE) plot(import) ## extracting the best model best.model<-model$finalModel best.model summary(best.model) plotnet(best.model) ##### To look at the results of the model pred <- predict(best.model, newdata=datos.test) plot(datos.test$y, pred, xlab='Real', ylab='Predicho', cex=0.5, pch=1) abline(0,1,col='green') elmae<-mae(datos.test$y,pred) title(paste('MAE =',round(elmae,2))) # # What is the fitted function ???? for (lavar in 1:(ncol(datos)-1)) { datos.marg<-datos.test datos.marg[,-(lavar+1)]<-0 # no change in these variables datos.marg[,(lavar+1)]<-seq(from=-0,to=1,length.out=n/2) # change in here pred <- predict(best.model, newdata=datos.marg) plot(datos.marg[,lavar+1],pred,ylim=range(y)) title(paste('Var x',as.character(lavar),sep='')) } ##### let us generate many samples N<-1000 mat<-matrix(runif(15*N),N,15) manydatos<-data.frame(mat);names(manydatos)<-colnames(datos)[2:16] pred.nn <- predict(best.model, newdata=manydatos) plot(pred.nn) abline(4,0,col='green');abline(7,0,col='green')
########################################################################################## ############################## HELPER FUNCTIONS ############################## ########################################################################################## # function foldList sums all the entries in a list with their previous entries # and returns a list with each entry as the sum of those previous entries foldList <- function(list){ sum_so_far = 0 folded_list = c(sum_so_far) n = length(list) for (i in 1:n){ sum_so_far = folded_list[i] + list[i] folded_list= c(folded_list, sum_so_far) } return(folded_list[-1]) } fb_plot <- function(dat, acc, col1, col2){ total_followed = tail(foldList(dat$Followed), n = 1) total_followers = tail(dat$Followers, n = 1) - dat$Followers[1] fbr = total_followers/total_followed plot(log(foldList(Followed)) ~ Date, ann = FALSE, dat,type = "l", col = col1, lwd = 3) lines(log((Followers - Followers[1])) ~ Date, dat, col = col2, lwd = 3) title = "Follow back rate," title(main = paste(title, acc), xlab = "date", ylab = "log(users)", sub = paste("follow-back ratio =", round(fbr,3))) legend("topleft", legend = c( "Followed","Followers"), col = c(col1, col2), lwd = 4, lty= 1:1) f = paste(acc, ".png", sep = "") dev.copy(jpeg,filename=paste("figs/", f, sep = ""),width = 500, height = 500); dev.off (); } ########################################################################################## ############################## DATA ############################## ########################################################################################## # reading in the data lhu_dat = read.csv("data/LHU_stats.csv", header = T) mmo_dat = read.csv("data/MMO_stats.csv", header = T) ftm_dat = read.csv("data/FTM_stats.csv", header = T) dru_dat = read.csv("data/DRU_stats.csv", header = T) lmo_dat = read.csv("data/LMO_stats.csv", header = T) # mmo_dat has the largest interval of observations n = length(mmo_dat$Date) # creating dates mmo_dat$Date <- as.Date(mmo_dat$Date, "%m/%d/%Y") lhu_dat$Date <- as.Date(lhu_dat$Date, "%m/%d/%Y") ftm_dat$Date <- as.Date(ftm_dat$Date, "%m/%d/%Y") dru_dat$Date <- as.Date(dru_dat$Date, "%m/%d/%Y") lmo_dat$Date <- as.Date(lmo_dat$Date, "%m/%d/%Y") ########################################################################################## ############################## FOLLOWER GAINS ############################## ########################################################################################## # Plotting the data in fancy Instagram colours plot(Followers ~ Date, mmo_dat,type = "l", ann = FALSE, col = 'darkgoldenrod1', lwd = 3, ylim = c(0,35000)) lines(Followers ~ Date, lhu_dat,type = "l", col = 'orangered', lwd = 3) lines(Followers ~ Date, ftm_dat,type = "l", col = 'magenta', lwd = 3) lines(Followers ~ Date, dru_dat,type = "l", col = 'purple', lwd = 3) lines(Followers ~ Date, lmo_dat,type = "l", col = 'blue2', lwd = 3) title(main="Follower growth Aug 28, 2015 - Sept 7, 2016", xlab="date", ylab="followers", lwd = 3) grid(nx = NULL, col = "lightgray", lty = "dotted",lwd = par("lwd"), equilogs = TRUE) legend("topleft", legend = c( "@MMO","@LHU", "@FMT", "@DRU", "@LMO"), col = c('darkgoldenrod1', 'orangered', 'magenta', "purple", "blue2"), lwd = 4, lty= 1:1) # save the fig dev.copy(jpeg,filename="figs/growth.png",width = 1000, height = 500); dev.off (); ########################################################################################## ############################## FOLLOW BACK RATIO ############################## ########################################################################################## # plotting visualization of follow-back ratios fb_plot(mmo_dat,'@MMO', 'darkgoldenrod1', 'orange') fb_plot(lhu_dat,'@LHU', 'orangered', 'red3') fb_plot(dru_dat,'@DRU', 'purple', 'purple3') fb_plot(ftm_dat,'@FTM', 'magenta', 'magenta3') fb_plot(lmo_dat,'@LMO', 'blue', 'blue3') # plotting daily follower gains plot(Gains ~ Date, mmo_dat,type = "l", ann = FALSE, col = 'darkgoldenrod1', lwd = 3) abline(mean(dru_dat$Gains),0, col = "orange") title(main="Daily follower gains, @MMO", xlab="date", ylab="followers", lwd = 3) ## TODO: figure out a way to smmoth data to give observations for each day ## TODO: do a plot of followback ratio
/instagrowth.R
no_license
evmarts/insta_analytics
R
false
false
4,383
r
########################################################################################## ############################## HELPER FUNCTIONS ############################## ########################################################################################## # function foldList sums all the entries in a list with their previous entries # and returns a list with each entry as the sum of those previous entries foldList <- function(list){ sum_so_far = 0 folded_list = c(sum_so_far) n = length(list) for (i in 1:n){ sum_so_far = folded_list[i] + list[i] folded_list= c(folded_list, sum_so_far) } return(folded_list[-1]) } fb_plot <- function(dat, acc, col1, col2){ total_followed = tail(foldList(dat$Followed), n = 1) total_followers = tail(dat$Followers, n = 1) - dat$Followers[1] fbr = total_followers/total_followed plot(log(foldList(Followed)) ~ Date, ann = FALSE, dat,type = "l", col = col1, lwd = 3) lines(log((Followers - Followers[1])) ~ Date, dat, col = col2, lwd = 3) title = "Follow back rate," title(main = paste(title, acc), xlab = "date", ylab = "log(users)", sub = paste("follow-back ratio =", round(fbr,3))) legend("topleft", legend = c( "Followed","Followers"), col = c(col1, col2), lwd = 4, lty= 1:1) f = paste(acc, ".png", sep = "") dev.copy(jpeg,filename=paste("figs/", f, sep = ""),width = 500, height = 500); dev.off (); } ########################################################################################## ############################## DATA ############################## ########################################################################################## # reading in the data lhu_dat = read.csv("data/LHU_stats.csv", header = T) mmo_dat = read.csv("data/MMO_stats.csv", header = T) ftm_dat = read.csv("data/FTM_stats.csv", header = T) dru_dat = read.csv("data/DRU_stats.csv", header = T) lmo_dat = read.csv("data/LMO_stats.csv", header = T) # mmo_dat has the largest interval of observations n = length(mmo_dat$Date) # creating dates mmo_dat$Date <- as.Date(mmo_dat$Date, "%m/%d/%Y") lhu_dat$Date <- as.Date(lhu_dat$Date, "%m/%d/%Y") ftm_dat$Date <- as.Date(ftm_dat$Date, "%m/%d/%Y") dru_dat$Date <- as.Date(dru_dat$Date, "%m/%d/%Y") lmo_dat$Date <- as.Date(lmo_dat$Date, "%m/%d/%Y") ########################################################################################## ############################## FOLLOWER GAINS ############################## ########################################################################################## # Plotting the data in fancy Instagram colours plot(Followers ~ Date, mmo_dat,type = "l", ann = FALSE, col = 'darkgoldenrod1', lwd = 3, ylim = c(0,35000)) lines(Followers ~ Date, lhu_dat,type = "l", col = 'orangered', lwd = 3) lines(Followers ~ Date, ftm_dat,type = "l", col = 'magenta', lwd = 3) lines(Followers ~ Date, dru_dat,type = "l", col = 'purple', lwd = 3) lines(Followers ~ Date, lmo_dat,type = "l", col = 'blue2', lwd = 3) title(main="Follower growth Aug 28, 2015 - Sept 7, 2016", xlab="date", ylab="followers", lwd = 3) grid(nx = NULL, col = "lightgray", lty = "dotted",lwd = par("lwd"), equilogs = TRUE) legend("topleft", legend = c( "@MMO","@LHU", "@FMT", "@DRU", "@LMO"), col = c('darkgoldenrod1', 'orangered', 'magenta', "purple", "blue2"), lwd = 4, lty= 1:1) # save the fig dev.copy(jpeg,filename="figs/growth.png",width = 1000, height = 500); dev.off (); ########################################################################################## ############################## FOLLOW BACK RATIO ############################## ########################################################################################## # plotting visualization of follow-back ratios fb_plot(mmo_dat,'@MMO', 'darkgoldenrod1', 'orange') fb_plot(lhu_dat,'@LHU', 'orangered', 'red3') fb_plot(dru_dat,'@DRU', 'purple', 'purple3') fb_plot(ftm_dat,'@FTM', 'magenta', 'magenta3') fb_plot(lmo_dat,'@LMO', 'blue', 'blue3') # plotting daily follower gains plot(Gains ~ Date, mmo_dat,type = "l", ann = FALSE, col = 'darkgoldenrod1', lwd = 3) abline(mean(dru_dat$Gains),0, col = "orange") title(main="Daily follower gains, @MMO", xlab="date", ylab="followers", lwd = 3) ## TODO: figure out a way to smmoth data to give observations for each day ## TODO: do a plot of followback ratio
library(ClusterR) ### Name: Cluster_Medoids ### Title: Partitioning around medoids ### Aliases: Cluster_Medoids ### ** Examples data(dietary_survey_IBS) dat = dietary_survey_IBS[, -ncol(dietary_survey_IBS)] dat = center_scale(dat) cm = Cluster_Medoids(dat, clusters = 3, distance_metric = 'euclidean', swap_phase = TRUE)
/data/genthat_extracted_code/ClusterR/examples/Cluster_Medoids.Rd.R
no_license
surayaaramli/typeRrh
R
false
false
333
r
library(ClusterR) ### Name: Cluster_Medoids ### Title: Partitioning around medoids ### Aliases: Cluster_Medoids ### ** Examples data(dietary_survey_IBS) dat = dietary_survey_IBS[, -ncol(dietary_survey_IBS)] dat = center_scale(dat) cm = Cluster_Medoids(dat, clusters = 3, distance_metric = 'euclidean', swap_phase = TRUE)
#Global Environment match.score <- 100 #assign operator match.score #print score assign("match.score",200) #assign function match.score #print score #Custom Environment match.environment <- new.env() #create custom environment parent.env(match.environment) match.environment$'match.score' <- 300 #assign cust env variable match.environment$'match.score' #print cust env variable #Operator #Arithmatic 10 + 5 10 - 5 10 / 5 10 * 3 10 ^ 5 format(10 ** 5, scientific = FALSE) 10 %% 3 10 %/% 3 1 / 0 #Math Function factorial(5) log(2 , base = 10) #special constant pi #Special Number -1 / 0 1 / 0 Inf + 5 is.finite(1/0) is.infinite(1/0) is.nan(NA+5) #Logical #Vectorized operation student.marks <- c(10,20,30,40) student.marks mean(student.marks) sqrt(student.marks) student.marks <- student.marks + 5 student.marks >= 25
/R_Learn/R_Operator.R
no_license
durga-nayak/R_Workspace
R
false
false
824
r
#Global Environment match.score <- 100 #assign operator match.score #print score assign("match.score",200) #assign function match.score #print score #Custom Environment match.environment <- new.env() #create custom environment parent.env(match.environment) match.environment$'match.score' <- 300 #assign cust env variable match.environment$'match.score' #print cust env variable #Operator #Arithmatic 10 + 5 10 - 5 10 / 5 10 * 3 10 ^ 5 format(10 ** 5, scientific = FALSE) 10 %% 3 10 %/% 3 1 / 0 #Math Function factorial(5) log(2 , base = 10) #special constant pi #Special Number -1 / 0 1 / 0 Inf + 5 is.finite(1/0) is.infinite(1/0) is.nan(NA+5) #Logical #Vectorized operation student.marks <- c(10,20,30,40) student.marks mean(student.marks) sqrt(student.marks) student.marks <- student.marks + 5 student.marks >= 25
library(shiny) library(shinyBS) source("functions.R") predButton <- function(id) { actionButton(id, label = "", width = "100%", style ="background-color: #ffffff; border-color: #2e6da4; height:40px") } ui = fluidPage(style ="background-color: #e6f7ff", tags$script(HTML("$(function(){ $(document).keyup(function(e) { if (e.keyCode == 72 && e.ctrlKey && e.altKey) { $('#button1').click() } }); })")), tags$script(HTML("$(function(){ $(document).keyup(function(e) { if (e.keyCode == 74 && e.ctrlKey && e.altKey) { $('#button2').click() } }); })")), tags$script(HTML("$(function(){ $(document).keyup(function(e) { if (e.keyCode == 75 && e.ctrlKey && e.altKey) { $('#button3').click() } }); })")), tags$script(HTML("$(function(){ $(document).keyup(function(e) { if (e.keyCode == 76 && e.ctrlKey && e.altKey) { $('#button4').click() } }); })")), fluidRow( column(12, p(h1(strong("Computer-Aided Writing"), style = "color: #2e6da4")) ) ), fluidRow( column(3, h4(strong("Smart Prediction:")), #br(), div(tags$i("Hot Key:"), align = "right"), predButton("button1"), br(), div(tags$i("Ctrl + Alt + h"), align = "right"), predButton("button2"), br(), div(tags$i("Ctrl + Alt + j"), align = "right"), predButton("button3"), br(), div(tags$i("Ctrl + Alt + k"), align = "right"), predButton("button4"), br(), div(tags$i("Ctrl + Alt + l"), align = "right"), br() ), column(9, br(), splitLayout( textInput("tool", label = NULL, value = "", placeholder = "Search", width = "100%"), htmlOutput("dictionary"), bsPopover("dict", "Look it up on dictionary.com"), bsPopover("thes", "Find a synonym on thesaurus.com") ), fluidRow( column(9, textAreaInput("inText", label = NULL, value = "", width = "100%", height = NULL, cols = NULL, rows = 10, placeholder = "Enter text here", resize = NULL) ) ), div("Created by ", tags$a(href="https://github.com/QLab-Engineering/Computer-Aided-Writing-App", "Francis Labrecque"), align = "right") ) ) ) server <- function(input, output, session) { pred_word <- reactive(predWordOut(readInput(input$inText))) observeEvent(input$button1, { value <- paste(input$inText, pred_word()[1], " ", sep = "") updateTextAreaInput(session, "inText", value = value) }) observeEvent(input$inText,{ label = pred_word()[1] updateActionButton(session, "button1", label = label) }) observeEvent(input$button2, { value <- paste(input$inText, pred_word()[2], " ", sep = "") updateTextAreaInput(session, "inText", value = value) }) observeEvent(input$inText,{ label = pred_word()[2] updateActionButton(session, "button2", label = label) }) observeEvent(input$button3, { value <- paste(input$inText, pred_word()[3], " ", sep = "") updateTextAreaInput(session, "inText", value = value) }) observeEvent(input$inText,{ label = pred_word()[3] updateActionButton(session, "button3", label = label) }) observeEvent(input$button4, { value <- paste(input$inText, pred_word()[4], " ", sep = "") updateTextAreaInput(session, "inText", value = value) }) observeEvent(input$inText,{ label = pred_word()[4] updateActionButton(session, "button4", label = label) }) observeEvent(input$inText,{ value = word(trimws(input$inText), -1) updateTextInput(session, "tool", value = value) }) output$dictionary <- renderUI({ tagList( tags$button(id = "dict", class = "btn action-button", onclick = paste0("window.open('https://www.dictionary.com/browse/", input$tool, "', '_blank')"), style = "width:34px; padding:0px; border-color: #cccccc", tags$img(src = "dictionary.jpg", width = "32px", margin = "0px", padding = "0px", height = "32px") ), tags$button(id = "thes", class = "btn action-button", onclick = paste0("window.open('https://www.thesaurus.com/browse/", input$tool, "', '_blank')"), style = "width:34px; padding:0px; border-color: #cccccc", tags$img(src = "thesaurus.png", width = "32px", margin = "0px", padding = "0px", height = "32px") ) ) }) } shinyApp(ui = ui, server = server)
/app.R
no_license
QLab-Engineering/Computer-Aided-Writing-App
R
false
false
5,407
r
library(shiny) library(shinyBS) source("functions.R") predButton <- function(id) { actionButton(id, label = "", width = "100%", style ="background-color: #ffffff; border-color: #2e6da4; height:40px") } ui = fluidPage(style ="background-color: #e6f7ff", tags$script(HTML("$(function(){ $(document).keyup(function(e) { if (e.keyCode == 72 && e.ctrlKey && e.altKey) { $('#button1').click() } }); })")), tags$script(HTML("$(function(){ $(document).keyup(function(e) { if (e.keyCode == 74 && e.ctrlKey && e.altKey) { $('#button2').click() } }); })")), tags$script(HTML("$(function(){ $(document).keyup(function(e) { if (e.keyCode == 75 && e.ctrlKey && e.altKey) { $('#button3').click() } }); })")), tags$script(HTML("$(function(){ $(document).keyup(function(e) { if (e.keyCode == 76 && e.ctrlKey && e.altKey) { $('#button4').click() } }); })")), fluidRow( column(12, p(h1(strong("Computer-Aided Writing"), style = "color: #2e6da4")) ) ), fluidRow( column(3, h4(strong("Smart Prediction:")), #br(), div(tags$i("Hot Key:"), align = "right"), predButton("button1"), br(), div(tags$i("Ctrl + Alt + h"), align = "right"), predButton("button2"), br(), div(tags$i("Ctrl + Alt + j"), align = "right"), predButton("button3"), br(), div(tags$i("Ctrl + Alt + k"), align = "right"), predButton("button4"), br(), div(tags$i("Ctrl + Alt + l"), align = "right"), br() ), column(9, br(), splitLayout( textInput("tool", label = NULL, value = "", placeholder = "Search", width = "100%"), htmlOutput("dictionary"), bsPopover("dict", "Look it up on dictionary.com"), bsPopover("thes", "Find a synonym on thesaurus.com") ), fluidRow( column(9, textAreaInput("inText", label = NULL, value = "", width = "100%", height = NULL, cols = NULL, rows = 10, placeholder = "Enter text here", resize = NULL) ) ), div("Created by ", tags$a(href="https://github.com/QLab-Engineering/Computer-Aided-Writing-App", "Francis Labrecque"), align = "right") ) ) ) server <- function(input, output, session) { pred_word <- reactive(predWordOut(readInput(input$inText))) observeEvent(input$button1, { value <- paste(input$inText, pred_word()[1], " ", sep = "") updateTextAreaInput(session, "inText", value = value) }) observeEvent(input$inText,{ label = pred_word()[1] updateActionButton(session, "button1", label = label) }) observeEvent(input$button2, { value <- paste(input$inText, pred_word()[2], " ", sep = "") updateTextAreaInput(session, "inText", value = value) }) observeEvent(input$inText,{ label = pred_word()[2] updateActionButton(session, "button2", label = label) }) observeEvent(input$button3, { value <- paste(input$inText, pred_word()[3], " ", sep = "") updateTextAreaInput(session, "inText", value = value) }) observeEvent(input$inText,{ label = pred_word()[3] updateActionButton(session, "button3", label = label) }) observeEvent(input$button4, { value <- paste(input$inText, pred_word()[4], " ", sep = "") updateTextAreaInput(session, "inText", value = value) }) observeEvent(input$inText,{ label = pred_word()[4] updateActionButton(session, "button4", label = label) }) observeEvent(input$inText,{ value = word(trimws(input$inText), -1) updateTextInput(session, "tool", value = value) }) output$dictionary <- renderUI({ tagList( tags$button(id = "dict", class = "btn action-button", onclick = paste0("window.open('https://www.dictionary.com/browse/", input$tool, "', '_blank')"), style = "width:34px; padding:0px; border-color: #cccccc", tags$img(src = "dictionary.jpg", width = "32px", margin = "0px", padding = "0px", height = "32px") ), tags$button(id = "thes", class = "btn action-button", onclick = paste0("window.open('https://www.thesaurus.com/browse/", input$tool, "', '_blank')"), style = "width:34px; padding:0px; border-color: #cccccc", tags$img(src = "thesaurus.png", width = "32px", margin = "0px", padding = "0px", height = "32px") ) ) }) } shinyApp(ui = ui, server = server)
# TODO(AW) make this reproducible! ## a script to investigate the coverage of my confidence set procedure ## HIV dataset ##### first: run bash script ### HIV dataset setwd("Documents/Phylogenetics/Tree Inference/Tree Inference Analysis/Coverage/") source("/Users/adw96/Documents/Phylogenetics/Tree Inference/Tree Inference Analysis/miscellaneous_phylogenetics_programs.R") ################################################ #################### n=20 ################################################ trees <- list.files(path = "/Users/adw96/Documents/Phylogenetics/Tree Inference/Tree Inference Analysis/Coverage/coverage_hiv/generated20/split/trees/") length(trees) ## should be 20000 for (tree in trees) { path <- paste("/Users/adw96/Documents/Phylogenetics/Tree Inference/Tree Inference Analysis/Coverage/coverage_hiv/generated20/split/trees/", tree, sep="") tree1 <- as.character(read.table(path)[1,1]) write.table(convert_tree(tree1), paste("/Users/adw96/Documents/Phylogenetics/Tree Inference/Tree Inference Analysis/Coverage/coverage_hiv/generated20/trees/", tree,sep=""), row.names = F, quote = F, col.names = F) } ### create 1000 batches of 20 trees, run the program on each batch, see if covers batches <- 1000 containsTruth <- rep(NA, batches) containsTruthAbs <- rep(NA, batches); mu0Abs <- c(0.002854010, 0.0379729) batchSize <- length(trees)/ batches trees <- list.files(path = "/Users/adw96/Documents/Phylogenetics/Tree Inference/Tree Inference Analysis/Coverage/coverage_hiv/generated20/trees/", full.names = T) pts <- matrix(NA, nrow=batches, ncol=2) pts_true <- matrix(NA, nrow=batches, ncol=2) covs <- list() for (batchNumber in 1:batches) { ### create the batch path <- trees[(batchNumber - 1)*batchSize + 1] tree1 <- as.character(read.table(path)[1,1]) write.table(convert_tree(tree1), paste("/Users/adw96/Documents/Phylogenetics/Tree Inference/Tree Inference Analysis/Coverage/generated/test1.txt"), row.names = F, quote = F, col.names = F, append = F) for (j in 2:batchSize) { path <- trees[(batchNumber - 1)*batchSize + j] tree1 <- as.character(read.table(path)[1,1]) write.table(convert_tree(tree1), paste("/Users/adw96/Documents/Phylogenetics/Tree Inference/Tree Inference Analysis/Coverage/generated/test1.txt"), row.names = F, quote = F, col.names = F, append = T) } ### run the script to get the log maps, and load them system('java -jar coverage_java.jar') source("/Users/adw96/Documents/Phylogenetics/Tree Inference/Tree Inference Analysis/Coverage/generated/maps.R") ### get the log map of the true tree system('java -jar coverage_truth_java.jar') source("/Users/adw96/Documents/Phylogenetics/Tree Inference/Tree Inference Analysis/Coverage/generated/map_truth.R") ### check if true tree is in confidence set observations <- matrix(NA, ncol = length(tree1coveragesim), nrow = batchSize) for (i in 1:batchSize) { observations[i, ] <- get(paste("tree", i, "coveragesim", sep="")) } mu0 <- trueTreeLogMap xbar = apply(observations, 2, mean) S = cov(observations) n=batchSize; m=length(tree1coveragesim) containsTruth[batchNumber] <- t(xbar - mu0)%*%solve(S)%*%(xbar - mu0) containsTruthAbs[batchNumber] <- t(xbar - mu0Abs)%*%solve(S)%*%(xbar - mu0Abs) # pts[batchNumber, ] <- xbar # pts_true[batchNumber, ] <- mu0 # covs[[batchNumber]] <- S } n=batchSize; m=length(tree1coveragesim) c(mean(containsTruth < (m*(n-1))/(n*(n-m))*qf(0.90, m, n-m)), mean(containsTruth < (m*(n-1))/(n*(n-m))*qf(0.95, m, n-m)), mean(containsTruth < (m*(n-1))/(n*(n-m))*qf(0.99, m, n-m)), mean(containsTruth < (m*(n-1))/(n*(n-m))*qf(0.999, m, n-m))) containsTruth_HIV_20 <- containsTruth ################################################ #################### n=50 ################################################ trees <- list.files(path = "/Users/adw96/Documents/Phylogenetics/Tree Inference/Tree Inference Analysis/Coverage/coverage_hiv/generated50/split/trees/") length(trees) ## should be 50000 for (tree in trees) { path <- paste("/Users/adw96/Documents/Phylogenetics/Tree Inference/Tree Inference Analysis/Coverage/coverage_hiv/generated50/split/trees/", tree, sep="") tree1 <- as.character(read.table(path)[1,1]) write.table(convert_tree(tree1), paste("/Users/adw96/Documents/Phylogenetics/Tree Inference/Tree Inference Analysis/Coverage/coverage_hiv/generated50/trees/", tree,sep=""), row.names = F, quote = F, col.names = F) } batches <- 1000 containsTruth <- rep(NA, batches) containsTruthAbs <- rep(NA, batches); mu0Abs <- c(0.002854010, 0.0379729) batchSize <- length(trees)/ batches trees <- list.files(path = "/Users/adw96/Documents/Phylogenetics/Tree Inference/Tree Inference Analysis/Coverage/coverage_hiv/generated50/trees/", full.names = T) for (batchNumber in 1:batches) { path <- trees[(batchNumber - 1)*batchSize + 1] tree1 <- as.character(read.table(path)[1,1]) write.table(convert_tree(tree1), paste("/Users/adw96/Documents/Phylogenetics/Tree Inference/Tree Inference Analysis/Coverage/generated/test1.txt"), row.names = F, quote = F, col.names = F, append = F) for (j in 2:batchSize) { path <- trees[(batchNumber - 1)*batchSize + j] tree1 <- as.character(read.table(path)[1,1]) write.table(convert_tree(tree1), paste("/Users/adw96/Documents/Phylogenetics/Tree Inference/Tree Inference Analysis/Coverage/generated/test1.txt"), row.names = F, quote = F, col.names = F, append = T) } system('java -jar coverage_java.jar') source("/Users/adw96/Documents/Phylogenetics/Tree Inference/Tree Inference Analysis/Coverage/generated/maps.R") system('java -jar coverage_truth_java.jar') source("/Users/adw96/Documents/Phylogenetics/Tree Inference/Tree Inference Analysis/Coverage/generated/map_truth.R") observations <- matrix(NA, ncol = length(tree1coveragesim), nrow = batchSize) for (i in 1:batchSize) { observations[i, ] <- get(paste("tree", i, "coveragesim", sep="")) } mu0 <- trueTreeLogMap xbar = apply(observations, 2, mean) S = cov(observations) n=batchSize; m=length(tree1coveragesim) containsTruth[batchNumber] <- t(xbar - mu0)%*%solve(S)%*%(xbar - mu0) containsTruthAbs[batchNumber] <- t(xbar - mu0Abs)%*%solve(S)%*%(xbar - mu0Abs) } n=batchSize; m=length(tree1coveragesim) c(mean(containsTruth < (m*(n-1))/(n*(n-m))*qf(0.90, m, n-m)), mean(containsTruth < (m*(n-1))/(n*(n-m))*qf(0.95, m, n-m)), mean(containsTruth < (m*(n-1))/(n*(n-m))*qf(0.99, m, n-m)), mean(containsTruth < (m*(n-1))/(n*(n-m))*qf(0.999, m, n-m))) containsTruth_HIV_50 <- containsTruth ################################################ #################### n=100 ################################################ trees <- list.files(path = "/Users/adw96/Documents/Phylogenetics/Tree Inference/Tree Inference Analysis/Coverage/coverage_hiv/generated100/split/trees/") length(trees) ## should be 100000 for (tree in trees) { path <- paste("/Users/adw96/Documents/Phylogenetics/Tree Inference/Tree Inference Analysis/Coverage/coverage_hiv/generated100/split/trees/", tree, sep="") tree1 <- as.character(read.table(path)[1,1]) write.table(convert_tree(tree1), paste("/Users/adw96/Documents/Phylogenetics/Tree Inference/Tree Inference Analysis/Coverage/coverage_hiv/generated100/trees/", tree,sep=""), row.names = F, quote = F, col.names = F) } batches <- 1000 containsTruth <- rep(NA, batches) containsTruthAbs <- rep(NA, batches); mu0Abs <- c(0.002854010, 0.0379729) batchSize <- length(trees)/ batches trees <- list.files(path = "/Users/adw96/Documents/Phylogenetics/Tree Inference/Tree Inference Analysis/Coverage/coverage_hiv/generated100/trees/", full.names = T) for (batchNumber in 1:batches) { ## skip 725 & 815, not working? path <- trees[(batchNumber - 1)*batchSize + 1] tree1 <- as.character(read.table(path)[1,1]) write.table(convert_tree(tree1), paste("/Users/adw96/Documents/Phylogenetics/Tree Inference/Tree Inference Analysis/Coverage/generated/test1.txt"), row.names = F, quote = F, col.names = F, append = F) for (j in 2:batchSize) { path <- trees[(batchNumber - 1)*batchSize + j] tree1 <- as.character(read.table(path)[1,1]) write.table(convert_tree(tree1), paste("/Users/adw96/Documents/Phylogenetics/Tree Inference/Tree Inference Analysis/Coverage/generated/test1.txt"), row.names = F, quote = F, col.names = F, append = T) } system('java -jar coverage_java.jar') source("/Users/adw96/Documents/Phylogenetics/Tree Inference/Tree Inference Analysis/Coverage/generated/maps.R") system('java -jar coverage_truth_java.jar') source("/Users/adw96/Documents/Phylogenetics/Tree Inference/Tree Inference Analysis/Coverage/generated/map_truth.R") observations <- matrix(NA, ncol = length(tree1coveragesim), nrow = batchSize) for (i in 1:batchSize) { observations[i, ] <- get(paste("tree", i, "coveragesim", sep="")) } mu0 <- trueTreeLogMap xbar = apply(observations, 2, mean) S = cov(observations) n=batchSize; m=length(tree1coveragesim) containsTruth[batchNumber] <- t(xbar - mu0)%*%solve(S)%*%(xbar - mu0) containsTruthAbs[batchNumber] <- t(xbar - mu0Abs)%*%solve(S)%*%(xbar - mu0Abs) } n=batchSize; m=length(tree1coveragesim) containsTruth_HIV_100 <- containsTruth m=2 n=20 100*c(mean(containsTruth_HIV_20 < (m*(n-1))/(n*(n-m))*qf(0.90, m, n-m)), mean(containsTruth_HIV_20 < (m*(n-1))/(n*(n-m))*qf(0.95, m, n-m)), mean(containsTruth_HIV_20 < (m*(n-1))/(n*(n-m))*qf(0.99, m, n-m)), mean(containsTruth_HIV_20 < (m*(n-1))/(n*(n-m))*qf(0.999, m, n-m))) n=50 100*c(mean(containsTruth_HIV_50 < (m*(n-1))/(n*(n-m))*qf(0.90, m, n-m)), mean(containsTruth_HIV_50 < (m*(n-1))/(n*(n-m))*qf(0.95, m, n-m)), mean(containsTruth_HIV_50 < (m*(n-1))/(n*(n-m))*qf(0.99, m, n-m)), mean(containsTruth_HIV_50 < (m*(n-1))/(n*(n-m))*qf(0.999, m, n-m))) n=100 100*c(mean(containsTruth_HIV_100 < (m*(n-1))/(n*(n-m))*qf(0.90, m, n-m)), mean(containsTruth_HIV_100 < (m*(n-1))/(n*(n-m))*qf(0.95, m, n-m)), mean(containsTruth_HIV_100 < (m*(n-1))/(n*(n-m))*qf(0.99, m, n-m)), mean(containsTruth_HIV_100 < (m*(n-1))/(n*(n-m))*qf(0.999, m, n-m))) ### Old stuff from debugging ## Soln turned out to be brackets error. Doh! # ### sim 1 # lim1 <- 0.5 # plot(0,0,xlim = c(-lim1, lim1), ylim=c(-lim1, lim1)) # points(observations) # points(xbar[1], xbar[2], pch=16) # points(mu0[1], mu0[2], col="red") # grid1 <- cbind(rep(seq(min(observations[,1]), max(observations[,1]), length=100), each=100), # rep(seq(min(observations[,2]), max(observations[,2]), length=100), 100)) # grid1 <- cbind(rep(seq(-lim1, lim1, length=100), each=100), # rep(seq(-lim1, lim1, length=100), 100)) # for (i in 1:dim(grid1)[1]) { # tmp <- t(xbar - grid1[i, ])%*%solve(S)%*%(xbar - grid1[i, ]) # if (tmp < (m*(n-1))*qf(0.95, m, n-m)/n*(n-m)) points(grid1[i, 1], grid1[i, 2], col="grey") # if (tmp < qf(0.95, m, n-m)) points(grid1[i, 1], grid1[i, 2], col="blue") # } # ## conf int way too large # sqrt(m*(n-1)*qf(alpha, m, n-m)/(n*(n-m))) # # dim((xbar - grid1)%*%solve(S)%*%t(xbar - grid1)) # # points(grid1) # ### # plot(pts, cex=0.5, pch=16) # points(pts_true, cex=0.5, pch=16, col="blue") # points(0.002854010, 0.0379729, col="red",pch=16) ## true mean # points(apply(pts, 2, mean)[1],apply(pts, 2, mean)[2], col="green", pch=16)
/Coverage/hiv/hiv_R.R
no_license
adw96/ConfidenceSets
R
false
false
11,245
r
# TODO(AW) make this reproducible! ## a script to investigate the coverage of my confidence set procedure ## HIV dataset ##### first: run bash script ### HIV dataset setwd("Documents/Phylogenetics/Tree Inference/Tree Inference Analysis/Coverage/") source("/Users/adw96/Documents/Phylogenetics/Tree Inference/Tree Inference Analysis/miscellaneous_phylogenetics_programs.R") ################################################ #################### n=20 ################################################ trees <- list.files(path = "/Users/adw96/Documents/Phylogenetics/Tree Inference/Tree Inference Analysis/Coverage/coverage_hiv/generated20/split/trees/") length(trees) ## should be 20000 for (tree in trees) { path <- paste("/Users/adw96/Documents/Phylogenetics/Tree Inference/Tree Inference Analysis/Coverage/coverage_hiv/generated20/split/trees/", tree, sep="") tree1 <- as.character(read.table(path)[1,1]) write.table(convert_tree(tree1), paste("/Users/adw96/Documents/Phylogenetics/Tree Inference/Tree Inference Analysis/Coverage/coverage_hiv/generated20/trees/", tree,sep=""), row.names = F, quote = F, col.names = F) } ### create 1000 batches of 20 trees, run the program on each batch, see if covers batches <- 1000 containsTruth <- rep(NA, batches) containsTruthAbs <- rep(NA, batches); mu0Abs <- c(0.002854010, 0.0379729) batchSize <- length(trees)/ batches trees <- list.files(path = "/Users/adw96/Documents/Phylogenetics/Tree Inference/Tree Inference Analysis/Coverage/coverage_hiv/generated20/trees/", full.names = T) pts <- matrix(NA, nrow=batches, ncol=2) pts_true <- matrix(NA, nrow=batches, ncol=2) covs <- list() for (batchNumber in 1:batches) { ### create the batch path <- trees[(batchNumber - 1)*batchSize + 1] tree1 <- as.character(read.table(path)[1,1]) write.table(convert_tree(tree1), paste("/Users/adw96/Documents/Phylogenetics/Tree Inference/Tree Inference Analysis/Coverage/generated/test1.txt"), row.names = F, quote = F, col.names = F, append = F) for (j in 2:batchSize) { path <- trees[(batchNumber - 1)*batchSize + j] tree1 <- as.character(read.table(path)[1,1]) write.table(convert_tree(tree1), paste("/Users/adw96/Documents/Phylogenetics/Tree Inference/Tree Inference Analysis/Coverage/generated/test1.txt"), row.names = F, quote = F, col.names = F, append = T) } ### run the script to get the log maps, and load them system('java -jar coverage_java.jar') source("/Users/adw96/Documents/Phylogenetics/Tree Inference/Tree Inference Analysis/Coverage/generated/maps.R") ### get the log map of the true tree system('java -jar coverage_truth_java.jar') source("/Users/adw96/Documents/Phylogenetics/Tree Inference/Tree Inference Analysis/Coverage/generated/map_truth.R") ### check if true tree is in confidence set observations <- matrix(NA, ncol = length(tree1coveragesim), nrow = batchSize) for (i in 1:batchSize) { observations[i, ] <- get(paste("tree", i, "coveragesim", sep="")) } mu0 <- trueTreeLogMap xbar = apply(observations, 2, mean) S = cov(observations) n=batchSize; m=length(tree1coveragesim) containsTruth[batchNumber] <- t(xbar - mu0)%*%solve(S)%*%(xbar - mu0) containsTruthAbs[batchNumber] <- t(xbar - mu0Abs)%*%solve(S)%*%(xbar - mu0Abs) # pts[batchNumber, ] <- xbar # pts_true[batchNumber, ] <- mu0 # covs[[batchNumber]] <- S } n=batchSize; m=length(tree1coveragesim) c(mean(containsTruth < (m*(n-1))/(n*(n-m))*qf(0.90, m, n-m)), mean(containsTruth < (m*(n-1))/(n*(n-m))*qf(0.95, m, n-m)), mean(containsTruth < (m*(n-1))/(n*(n-m))*qf(0.99, m, n-m)), mean(containsTruth < (m*(n-1))/(n*(n-m))*qf(0.999, m, n-m))) containsTruth_HIV_20 <- containsTruth ################################################ #################### n=50 ################################################ trees <- list.files(path = "/Users/adw96/Documents/Phylogenetics/Tree Inference/Tree Inference Analysis/Coverage/coverage_hiv/generated50/split/trees/") length(trees) ## should be 50000 for (tree in trees) { path <- paste("/Users/adw96/Documents/Phylogenetics/Tree Inference/Tree Inference Analysis/Coverage/coverage_hiv/generated50/split/trees/", tree, sep="") tree1 <- as.character(read.table(path)[1,1]) write.table(convert_tree(tree1), paste("/Users/adw96/Documents/Phylogenetics/Tree Inference/Tree Inference Analysis/Coverage/coverage_hiv/generated50/trees/", tree,sep=""), row.names = F, quote = F, col.names = F) } batches <- 1000 containsTruth <- rep(NA, batches) containsTruthAbs <- rep(NA, batches); mu0Abs <- c(0.002854010, 0.0379729) batchSize <- length(trees)/ batches trees <- list.files(path = "/Users/adw96/Documents/Phylogenetics/Tree Inference/Tree Inference Analysis/Coverage/coverage_hiv/generated50/trees/", full.names = T) for (batchNumber in 1:batches) { path <- trees[(batchNumber - 1)*batchSize + 1] tree1 <- as.character(read.table(path)[1,1]) write.table(convert_tree(tree1), paste("/Users/adw96/Documents/Phylogenetics/Tree Inference/Tree Inference Analysis/Coverage/generated/test1.txt"), row.names = F, quote = F, col.names = F, append = F) for (j in 2:batchSize) { path <- trees[(batchNumber - 1)*batchSize + j] tree1 <- as.character(read.table(path)[1,1]) write.table(convert_tree(tree1), paste("/Users/adw96/Documents/Phylogenetics/Tree Inference/Tree Inference Analysis/Coverage/generated/test1.txt"), row.names = F, quote = F, col.names = F, append = T) } system('java -jar coverage_java.jar') source("/Users/adw96/Documents/Phylogenetics/Tree Inference/Tree Inference Analysis/Coverage/generated/maps.R") system('java -jar coverage_truth_java.jar') source("/Users/adw96/Documents/Phylogenetics/Tree Inference/Tree Inference Analysis/Coverage/generated/map_truth.R") observations <- matrix(NA, ncol = length(tree1coveragesim), nrow = batchSize) for (i in 1:batchSize) { observations[i, ] <- get(paste("tree", i, "coveragesim", sep="")) } mu0 <- trueTreeLogMap xbar = apply(observations, 2, mean) S = cov(observations) n=batchSize; m=length(tree1coveragesim) containsTruth[batchNumber] <- t(xbar - mu0)%*%solve(S)%*%(xbar - mu0) containsTruthAbs[batchNumber] <- t(xbar - mu0Abs)%*%solve(S)%*%(xbar - mu0Abs) } n=batchSize; m=length(tree1coveragesim) c(mean(containsTruth < (m*(n-1))/(n*(n-m))*qf(0.90, m, n-m)), mean(containsTruth < (m*(n-1))/(n*(n-m))*qf(0.95, m, n-m)), mean(containsTruth < (m*(n-1))/(n*(n-m))*qf(0.99, m, n-m)), mean(containsTruth < (m*(n-1))/(n*(n-m))*qf(0.999, m, n-m))) containsTruth_HIV_50 <- containsTruth ################################################ #################### n=100 ################################################ trees <- list.files(path = "/Users/adw96/Documents/Phylogenetics/Tree Inference/Tree Inference Analysis/Coverage/coverage_hiv/generated100/split/trees/") length(trees) ## should be 100000 for (tree in trees) { path <- paste("/Users/adw96/Documents/Phylogenetics/Tree Inference/Tree Inference Analysis/Coverage/coverage_hiv/generated100/split/trees/", tree, sep="") tree1 <- as.character(read.table(path)[1,1]) write.table(convert_tree(tree1), paste("/Users/adw96/Documents/Phylogenetics/Tree Inference/Tree Inference Analysis/Coverage/coverage_hiv/generated100/trees/", tree,sep=""), row.names = F, quote = F, col.names = F) } batches <- 1000 containsTruth <- rep(NA, batches) containsTruthAbs <- rep(NA, batches); mu0Abs <- c(0.002854010, 0.0379729) batchSize <- length(trees)/ batches trees <- list.files(path = "/Users/adw96/Documents/Phylogenetics/Tree Inference/Tree Inference Analysis/Coverage/coverage_hiv/generated100/trees/", full.names = T) for (batchNumber in 1:batches) { ## skip 725 & 815, not working? path <- trees[(batchNumber - 1)*batchSize + 1] tree1 <- as.character(read.table(path)[1,1]) write.table(convert_tree(tree1), paste("/Users/adw96/Documents/Phylogenetics/Tree Inference/Tree Inference Analysis/Coverage/generated/test1.txt"), row.names = F, quote = F, col.names = F, append = F) for (j in 2:batchSize) { path <- trees[(batchNumber - 1)*batchSize + j] tree1 <- as.character(read.table(path)[1,1]) write.table(convert_tree(tree1), paste("/Users/adw96/Documents/Phylogenetics/Tree Inference/Tree Inference Analysis/Coverage/generated/test1.txt"), row.names = F, quote = F, col.names = F, append = T) } system('java -jar coverage_java.jar') source("/Users/adw96/Documents/Phylogenetics/Tree Inference/Tree Inference Analysis/Coverage/generated/maps.R") system('java -jar coverage_truth_java.jar') source("/Users/adw96/Documents/Phylogenetics/Tree Inference/Tree Inference Analysis/Coverage/generated/map_truth.R") observations <- matrix(NA, ncol = length(tree1coveragesim), nrow = batchSize) for (i in 1:batchSize) { observations[i, ] <- get(paste("tree", i, "coveragesim", sep="")) } mu0 <- trueTreeLogMap xbar = apply(observations, 2, mean) S = cov(observations) n=batchSize; m=length(tree1coveragesim) containsTruth[batchNumber] <- t(xbar - mu0)%*%solve(S)%*%(xbar - mu0) containsTruthAbs[batchNumber] <- t(xbar - mu0Abs)%*%solve(S)%*%(xbar - mu0Abs) } n=batchSize; m=length(tree1coveragesim) containsTruth_HIV_100 <- containsTruth m=2 n=20 100*c(mean(containsTruth_HIV_20 < (m*(n-1))/(n*(n-m))*qf(0.90, m, n-m)), mean(containsTruth_HIV_20 < (m*(n-1))/(n*(n-m))*qf(0.95, m, n-m)), mean(containsTruth_HIV_20 < (m*(n-1))/(n*(n-m))*qf(0.99, m, n-m)), mean(containsTruth_HIV_20 < (m*(n-1))/(n*(n-m))*qf(0.999, m, n-m))) n=50 100*c(mean(containsTruth_HIV_50 < (m*(n-1))/(n*(n-m))*qf(0.90, m, n-m)), mean(containsTruth_HIV_50 < (m*(n-1))/(n*(n-m))*qf(0.95, m, n-m)), mean(containsTruth_HIV_50 < (m*(n-1))/(n*(n-m))*qf(0.99, m, n-m)), mean(containsTruth_HIV_50 < (m*(n-1))/(n*(n-m))*qf(0.999, m, n-m))) n=100 100*c(mean(containsTruth_HIV_100 < (m*(n-1))/(n*(n-m))*qf(0.90, m, n-m)), mean(containsTruth_HIV_100 < (m*(n-1))/(n*(n-m))*qf(0.95, m, n-m)), mean(containsTruth_HIV_100 < (m*(n-1))/(n*(n-m))*qf(0.99, m, n-m)), mean(containsTruth_HIV_100 < (m*(n-1))/(n*(n-m))*qf(0.999, m, n-m))) ### Old stuff from debugging ## Soln turned out to be brackets error. Doh! # ### sim 1 # lim1 <- 0.5 # plot(0,0,xlim = c(-lim1, lim1), ylim=c(-lim1, lim1)) # points(observations) # points(xbar[1], xbar[2], pch=16) # points(mu0[1], mu0[2], col="red") # grid1 <- cbind(rep(seq(min(observations[,1]), max(observations[,1]), length=100), each=100), # rep(seq(min(observations[,2]), max(observations[,2]), length=100), 100)) # grid1 <- cbind(rep(seq(-lim1, lim1, length=100), each=100), # rep(seq(-lim1, lim1, length=100), 100)) # for (i in 1:dim(grid1)[1]) { # tmp <- t(xbar - grid1[i, ])%*%solve(S)%*%(xbar - grid1[i, ]) # if (tmp < (m*(n-1))*qf(0.95, m, n-m)/n*(n-m)) points(grid1[i, 1], grid1[i, 2], col="grey") # if (tmp < qf(0.95, m, n-m)) points(grid1[i, 1], grid1[i, 2], col="blue") # } # ## conf int way too large # sqrt(m*(n-1)*qf(alpha, m, n-m)/(n*(n-m))) # # dim((xbar - grid1)%*%solve(S)%*%t(xbar - grid1)) # # points(grid1) # ### # plot(pts, cex=0.5, pch=16) # points(pts_true, cex=0.5, pch=16, col="blue") # points(0.002854010, 0.0379729, col="red",pch=16) ## true mean # points(apply(pts, 2, mean)[1],apply(pts, 2, mean)[2], col="green", pch=16)
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/AllClasses.R \docType{class} \name{CountDgeResult-class} \alias{CountDgeResult-class} \title{Object that contains count data, dgeTables, and sigFilter} \description{ Object that contains count data, dgeTables, and sigFilter } \section{Slots}{ \describe{ \item{\code{dgeTables}}{A list of dgeTable} \item{\code{sigFilter}}{Significantly regulated gene filter} }} \note{ The object is used only for inheritance }
/man/CountDgeResult-class.Rd
no_license
bedapub/ribiosNGS
R
false
true
492
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/AllClasses.R \docType{class} \name{CountDgeResult-class} \alias{CountDgeResult-class} \title{Object that contains count data, dgeTables, and sigFilter} \description{ Object that contains count data, dgeTables, and sigFilter } \section{Slots}{ \describe{ \item{\code{dgeTables}}{A list of dgeTable} \item{\code{sigFilter}}{Significantly regulated gene filter} }} \note{ The object is used only for inheritance }
# Layouts #' Extracts numeric values #' @param value Value to be converted to numeric #' @return Numeric value get_numeric <- function(value) as.numeric(gsub("([0-9]+).*$", "\\1", value)) #' Creates div containing children elements of sidebar panel #' #' @param ... Container's children elements #' @param width Width of sidebar panel container as relative value #' @rdname sidebar_layout #' @export sidebar_panel <- function(..., width = 1) { list(children = div(...), width = get_numeric(width)) } #' Creates div containing children elements of main panel #' #' @param ... Container's children elements #' @param width Width of main panel container as relative value #' @rdname sidebar_layout #' @export main_panel <- function(..., width = 3) { list(children = div(...), width = get_numeric(width)) } #' Creates grid layout composed of sidebar and main panels #' #' @param sidebar_panel Sidebar panel component #' @param main_panel Main panel component #' @param mirrored If TRUE sidebar is located on the right side, #' if FALSE - on the left side (default) #' @param min_height Sidebar layout container keeps the minimum height, if #' specified. It should be formatted as a string with css units #' @param container_style CSS declarations for grid container #' @param area_styles List of CSS declarations for each grid area inside #' @param sidebarPanel same as \code{sidebar_panel} #' @param mainPanel same as \code{main_panel} #' @param position vector with position of sidebar elements in order sidebar, main #' @param fluid TRUE to use fluid layout; FALSE to use fixed layout. #' #' @return Container with sidebar and main panels #' @examples #' if (interactive()){ #' library(shiny) #' library(shiny.semantic) #' ui <- semanticPage( #' titlePanel("Hello Shiny!"), #' sidebar_layout( #' sidebar_panel( #' shiny.semantic::sliderInput("obs", #' "Number of observations:", #' min = 0, #' max = 1000, #' value = 500), #' width = 3 #' ), #' main_panel( #' plotOutput("distPlot"), #' width = 4 #' ), #' mirrored = TRUE #' ) #' ) #' server <- function(input, output) { #' output$distPlot <- renderPlot({ #' hist(rnorm(input$obs)) #' }) #' } #' shinyApp(ui, server) #' } #' @rdname sidebar_layout #' @export sidebar_layout <- function(sidebar_panel, main_panel, mirrored = FALSE, min_height = "auto", container_style = "", area_styles = list( sidebar_panel = "", main_panel = "")) { sidebar_children <- sidebar_panel$children main_children <- main_panel$children sidebar_width <- sidebar_panel$width main_width <- main_panel$width # set normal or mirrored sidebar layout layout_areas <- c("sidebar_panel", "main_panel") layout_cols <- c(glue::glue("{sidebar_width}fr"), glue::glue("{main_width}fr")) layout <- grid_template(default = list( areas = rbind(if(mirrored) rev(layout_areas) else layout_areas), cols_width = if(mirrored) rev(layout_cols) else layout_cols )) # grid container's default styling container_style <- glue::glue(" gap: 15px; height: auto; min-height: {min_height}; {container_style} ") # grid container's children default styling area_styles <- list( sidebar_panel = glue::glue(" background-color: #f5f5f5; border-radius: 5px; box-shadow: 0 1px 3px rgba(0,0,0,0.4); display: flex; flex-direction: column; min-width: 160px; padding: 10px; {area_styles$sidebar_panel} "), main_panel = glue::glue(" background-color: #fff; border-radius: 5px; box-shadow: 0 1px 3px rgba(0,0,0,0.4); display: flex; flex-direction: column; min-width: 160px; padding: 10px; {area_styles$main_panel} ") ) grid( grid_template = layout, container_style, area_styles, sidebar_panel = sidebar_children, main_panel = main_children ) } #' @rdname sidebar_layout sidebarPanel <- function(..., width = 6) { sidebar_panel(..., width = width) } #' @rdname sidebar_layout mainPanel <- function(..., width = 10) { main_panel(..., width = width) } #' @rdname sidebar_layout sidebarLayout <- function(sidebarPanel, mainPanel, position = c("left", "right"), fluid = TRUE) { sidebar_layout ( sidebar_panel, main_panel, mirrored = position == "right" ) } #' Split layout #' #' Lays out elements horizontally, dividing the available horizontal space into #' equal parts (by default) or specified by parameters. #' #' @param ... Unnamed arguments will become child elements of the layout. #' @param cell_widths Character or numeric vector indicating the widths of the #' individual cells. Recycling will be used if needed. #' @param cell_args character with additional attributes that should be used for #' each cell of the layout. #' @param style character with style of outer box surrounding all elements #' @param cellWidths same as \code{cell_widths} #' @param cellArgs same as \code{cell_args} #' #' @return split layout grid object #' @export #' #' @rdname split_layout #' #' @examples #' if (interactive()) { #' #' Server code used for all examples #' server <- function(input, output) { #' output$plot1 <- renderPlot(plot(cars)) #' output$plot2 <- renderPlot(plot(pressure)) #' output$plot3 <- renderPlot(plot(AirPassengers)) #' } #' #' Equal sizing #' ui <- semanticPage( #' split_layout( #' plotOutput("plot1"), #' plotOutput("plot2") #' ) #' ) #' shinyApp(ui, server) #' #' Custom widths #' ui <- semanticPage( #' split_layout(cell_widths = c("25%", "75%"), #' plotOutput("plot1"), #' plotOutput("plot2") #' ) #' ) #' shinyApp(ui, server) #' #' All cells at 300 pixels wide, with cell padding #' #' and a border around everything #' ui <- semanticPage( #' split_layout( #' cell_widths = 300, #' cell_args = "padding: 6px;", #' style = "border: 1px solid silver;", #' plotOutput("plot1"), #' plotOutput("plot2"), #' plotOutput("plot3") #' ) #' ) #' shinyApp(ui, server) #' } split_layout <- function(..., cell_widths = NULL, cell_args = "", style = NULL){ if (class(cell_args) == "list") stop("In this implementation of `split_layout` cell_args must be character with style css") ui_elements <- list(...) n_elems <- length(ui_elements) columns <- paste0("col", seq(1, n_elems)) names(ui_elements) <- columns if (is.null(cell_widths)) cell_widths <- rep("1fr", n_elems) layout <- grid_template( default = list( areas = rbind(columns), cols_width = cell_widths ) ) container_style <- if (is.null(style)) "background: #d8d8d8; margin: 5px;" else style area_styles <- as.list(rep(cell_args, n_elems)) names(area_styles) <- columns args_list <- ui_elements args_list$grid_template <- layout args_list$container_style <- container_style args_list$area_styles <- area_styles do.call(grid, args_list) } #' @export #' @rdname split_layout splitLayout <- function(..., cellWidths = NULL, cellArgs = "", style = NULL) { split_layout(..., cell_widths = cellWidths, cell_args = cellArgs, style = style) } #' Vertical layout #' #' Lays out elements vertically, one by one below one another. #' #' @param ... Unnamed arguments will become child elements of the layout. #' @param rows_heights Character or numeric vector indicating the widths of the #' individual cells. Recycling will be used if needed. #' @param cell_args character with additional attributes that should be used for #' each cell of the layout. #' @param adjusted_to_page if TRUE it adjust elements position in equal spaces to #' the size of the page #' @param fluid not supported yet (here for consistency with \code{shiny}) #' #' @return vertical layout grid object #' @export #' @rdname vertical_layout #' @examples #' if (interactive()) { #' ui <- semanticPage( #' verticalLayout( #' a(href="http://example.com/link1", "Link One"), #' a(href="http://example.com/link2", "Link Two"), #' a(href="http://example.com/link3", "Link Three") #' ) #' ) #' shinyApp(ui, server = function(input, output) { }) #' } #' if (interactive()) { #' ui <- semanticPage( #' vertical_layout(h1("Title"), h4("Subtitle"), p("paragraph"), h3("footer")) #' ) #' shinyApp(ui, server = function(input, output) { }) #' } vertical_layout <- function(..., rows_heights = NULL, cell_args = "", adjusted_to_page = TRUE) { ui_elements <- list(...) n_elems <- length(ui_elements) rows <- paste0("row", seq(1, n_elems)) names(ui_elements) <- rows if (is.null(rows_heights)) rows_heights <- rep("auto", n_elems) if (length(rows_heights) == 1) rows_heights <- rep(rows_heights, n_elems) layout <- grid_template( default = list( areas = t(rbind(rows)), rows_height = rows_heights, cols_width = c("auto") ) ) area_styles <- as.list(rep(cell_args, n_elems)) names(area_styles) <- rows args_list <- ui_elements args_list$grid_template <- layout args_list$area_styles <- area_styles args_list$container_style <- if (adjusted_to_page) "" else "align-content: start;" do.call(grid, args_list) } #' @export #' @rdname vertical_layout verticalLayout <- function(..., fluid = NULL) { if (!is.null(fluid)) warn_unsupported_args(c("fluid")) vertical_layout(..., adjusted_to_page = FALSE) } #' Flow layout #' #' Lays out elements in a left-to-right, top-to-bottom arrangement. #' The elements on a given row will be top-aligned with each other. #' #' The width of the elements and spacing between them is configurable. #' Lengths can be given as numeric values (interpreted as pixels) #' or character values (interpreted as CSS lengths). #' With the default settings this layout closely resembles the `flowLayout` #' from Shiny. #' #' @param ... Unnamed arguments will become child elements of the layout. #' Named arguments will become HTML attributes on the outermost tag. #' @param cell_args Any additional attributes that should be used for each cell #' of the layout. #' @param min_cell_width The minimum width of the cells. #' @param max_cell_width The maximum width of the cells. #' @param column_gap The spacing between columns. #' @param row_gap The spacing between rows. #' #' @md #' @export #' @rdname flow_layout #' #' @examples #' if (interactive()) { #' ui <- semanticPage( #' flow_layout( #' numericInput("rows", "How many rows?", 5), #' selectInput("letter", "Which letter?", LETTERS), #' sliderInput("value", "What value?", 0, 100, 50) #' ) #' ) #' shinyApp(ui, server = function(input, output) {}) #' } flow_layout <- function(..., cell_args = list(), min_cell_width = "208px", max_cell_width = "1fr",column_gap = "12px", row_gap = "0px") { if(max_cell_width != "1fr") max_cell_width <- validateCssUnit(max_cell_width) container_style <- glue::glue( "display: grid;", "grid-template-columns: repeat(auto-fill, minmax({validateCssUnit(min_cell_width)}, {max_cell_width}));", "column-gap: {shiny::validateCssUnit(column_gap)};", "row-gap: {shiny::validateCssUnit(row_gap)};" ) item_style <- "align-self: start;" args <- split_args(...) children <- lapply(args$positional, function(child) { do.call(shiny::tags$div, c(style = item_style, cell_args, list(child))) }) attributes <- args$named do.call(shiny::tags$div, c(style = container_style, attributes, children)) } #' @param cellArgs Same as `cell_args`. #' #' @md #' @export #' @rdname flow_layout flowLayout <- function(..., cellArgs = list()) { flow_layout(..., cell_args = cellArgs) }
/R/layouts.R
permissive
pepecebrian/shiny.semantic
R
false
false
12,141
r
# Layouts #' Extracts numeric values #' @param value Value to be converted to numeric #' @return Numeric value get_numeric <- function(value) as.numeric(gsub("([0-9]+).*$", "\\1", value)) #' Creates div containing children elements of sidebar panel #' #' @param ... Container's children elements #' @param width Width of sidebar panel container as relative value #' @rdname sidebar_layout #' @export sidebar_panel <- function(..., width = 1) { list(children = div(...), width = get_numeric(width)) } #' Creates div containing children elements of main panel #' #' @param ... Container's children elements #' @param width Width of main panel container as relative value #' @rdname sidebar_layout #' @export main_panel <- function(..., width = 3) { list(children = div(...), width = get_numeric(width)) } #' Creates grid layout composed of sidebar and main panels #' #' @param sidebar_panel Sidebar panel component #' @param main_panel Main panel component #' @param mirrored If TRUE sidebar is located on the right side, #' if FALSE - on the left side (default) #' @param min_height Sidebar layout container keeps the minimum height, if #' specified. It should be formatted as a string with css units #' @param container_style CSS declarations for grid container #' @param area_styles List of CSS declarations for each grid area inside #' @param sidebarPanel same as \code{sidebar_panel} #' @param mainPanel same as \code{main_panel} #' @param position vector with position of sidebar elements in order sidebar, main #' @param fluid TRUE to use fluid layout; FALSE to use fixed layout. #' #' @return Container with sidebar and main panels #' @examples #' if (interactive()){ #' library(shiny) #' library(shiny.semantic) #' ui <- semanticPage( #' titlePanel("Hello Shiny!"), #' sidebar_layout( #' sidebar_panel( #' shiny.semantic::sliderInput("obs", #' "Number of observations:", #' min = 0, #' max = 1000, #' value = 500), #' width = 3 #' ), #' main_panel( #' plotOutput("distPlot"), #' width = 4 #' ), #' mirrored = TRUE #' ) #' ) #' server <- function(input, output) { #' output$distPlot <- renderPlot({ #' hist(rnorm(input$obs)) #' }) #' } #' shinyApp(ui, server) #' } #' @rdname sidebar_layout #' @export sidebar_layout <- function(sidebar_panel, main_panel, mirrored = FALSE, min_height = "auto", container_style = "", area_styles = list( sidebar_panel = "", main_panel = "")) { sidebar_children <- sidebar_panel$children main_children <- main_panel$children sidebar_width <- sidebar_panel$width main_width <- main_panel$width # set normal or mirrored sidebar layout layout_areas <- c("sidebar_panel", "main_panel") layout_cols <- c(glue::glue("{sidebar_width}fr"), glue::glue("{main_width}fr")) layout <- grid_template(default = list( areas = rbind(if(mirrored) rev(layout_areas) else layout_areas), cols_width = if(mirrored) rev(layout_cols) else layout_cols )) # grid container's default styling container_style <- glue::glue(" gap: 15px; height: auto; min-height: {min_height}; {container_style} ") # grid container's children default styling area_styles <- list( sidebar_panel = glue::glue(" background-color: #f5f5f5; border-radius: 5px; box-shadow: 0 1px 3px rgba(0,0,0,0.4); display: flex; flex-direction: column; min-width: 160px; padding: 10px; {area_styles$sidebar_panel} "), main_panel = glue::glue(" background-color: #fff; border-radius: 5px; box-shadow: 0 1px 3px rgba(0,0,0,0.4); display: flex; flex-direction: column; min-width: 160px; padding: 10px; {area_styles$main_panel} ") ) grid( grid_template = layout, container_style, area_styles, sidebar_panel = sidebar_children, main_panel = main_children ) } #' @rdname sidebar_layout sidebarPanel <- function(..., width = 6) { sidebar_panel(..., width = width) } #' @rdname sidebar_layout mainPanel <- function(..., width = 10) { main_panel(..., width = width) } #' @rdname sidebar_layout sidebarLayout <- function(sidebarPanel, mainPanel, position = c("left", "right"), fluid = TRUE) { sidebar_layout ( sidebar_panel, main_panel, mirrored = position == "right" ) } #' Split layout #' #' Lays out elements horizontally, dividing the available horizontal space into #' equal parts (by default) or specified by parameters. #' #' @param ... Unnamed arguments will become child elements of the layout. #' @param cell_widths Character or numeric vector indicating the widths of the #' individual cells. Recycling will be used if needed. #' @param cell_args character with additional attributes that should be used for #' each cell of the layout. #' @param style character with style of outer box surrounding all elements #' @param cellWidths same as \code{cell_widths} #' @param cellArgs same as \code{cell_args} #' #' @return split layout grid object #' @export #' #' @rdname split_layout #' #' @examples #' if (interactive()) { #' #' Server code used for all examples #' server <- function(input, output) { #' output$plot1 <- renderPlot(plot(cars)) #' output$plot2 <- renderPlot(plot(pressure)) #' output$plot3 <- renderPlot(plot(AirPassengers)) #' } #' #' Equal sizing #' ui <- semanticPage( #' split_layout( #' plotOutput("plot1"), #' plotOutput("plot2") #' ) #' ) #' shinyApp(ui, server) #' #' Custom widths #' ui <- semanticPage( #' split_layout(cell_widths = c("25%", "75%"), #' plotOutput("plot1"), #' plotOutput("plot2") #' ) #' ) #' shinyApp(ui, server) #' #' All cells at 300 pixels wide, with cell padding #' #' and a border around everything #' ui <- semanticPage( #' split_layout( #' cell_widths = 300, #' cell_args = "padding: 6px;", #' style = "border: 1px solid silver;", #' plotOutput("plot1"), #' plotOutput("plot2"), #' plotOutput("plot3") #' ) #' ) #' shinyApp(ui, server) #' } split_layout <- function(..., cell_widths = NULL, cell_args = "", style = NULL){ if (class(cell_args) == "list") stop("In this implementation of `split_layout` cell_args must be character with style css") ui_elements <- list(...) n_elems <- length(ui_elements) columns <- paste0("col", seq(1, n_elems)) names(ui_elements) <- columns if (is.null(cell_widths)) cell_widths <- rep("1fr", n_elems) layout <- grid_template( default = list( areas = rbind(columns), cols_width = cell_widths ) ) container_style <- if (is.null(style)) "background: #d8d8d8; margin: 5px;" else style area_styles <- as.list(rep(cell_args, n_elems)) names(area_styles) <- columns args_list <- ui_elements args_list$grid_template <- layout args_list$container_style <- container_style args_list$area_styles <- area_styles do.call(grid, args_list) } #' @export #' @rdname split_layout splitLayout <- function(..., cellWidths = NULL, cellArgs = "", style = NULL) { split_layout(..., cell_widths = cellWidths, cell_args = cellArgs, style = style) } #' Vertical layout #' #' Lays out elements vertically, one by one below one another. #' #' @param ... Unnamed arguments will become child elements of the layout. #' @param rows_heights Character or numeric vector indicating the widths of the #' individual cells. Recycling will be used if needed. #' @param cell_args character with additional attributes that should be used for #' each cell of the layout. #' @param adjusted_to_page if TRUE it adjust elements position in equal spaces to #' the size of the page #' @param fluid not supported yet (here for consistency with \code{shiny}) #' #' @return vertical layout grid object #' @export #' @rdname vertical_layout #' @examples #' if (interactive()) { #' ui <- semanticPage( #' verticalLayout( #' a(href="http://example.com/link1", "Link One"), #' a(href="http://example.com/link2", "Link Two"), #' a(href="http://example.com/link3", "Link Three") #' ) #' ) #' shinyApp(ui, server = function(input, output) { }) #' } #' if (interactive()) { #' ui <- semanticPage( #' vertical_layout(h1("Title"), h4("Subtitle"), p("paragraph"), h3("footer")) #' ) #' shinyApp(ui, server = function(input, output) { }) #' } vertical_layout <- function(..., rows_heights = NULL, cell_args = "", adjusted_to_page = TRUE) { ui_elements <- list(...) n_elems <- length(ui_elements) rows <- paste0("row", seq(1, n_elems)) names(ui_elements) <- rows if (is.null(rows_heights)) rows_heights <- rep("auto", n_elems) if (length(rows_heights) == 1) rows_heights <- rep(rows_heights, n_elems) layout <- grid_template( default = list( areas = t(rbind(rows)), rows_height = rows_heights, cols_width = c("auto") ) ) area_styles <- as.list(rep(cell_args, n_elems)) names(area_styles) <- rows args_list <- ui_elements args_list$grid_template <- layout args_list$area_styles <- area_styles args_list$container_style <- if (adjusted_to_page) "" else "align-content: start;" do.call(grid, args_list) } #' @export #' @rdname vertical_layout verticalLayout <- function(..., fluid = NULL) { if (!is.null(fluid)) warn_unsupported_args(c("fluid")) vertical_layout(..., adjusted_to_page = FALSE) } #' Flow layout #' #' Lays out elements in a left-to-right, top-to-bottom arrangement. #' The elements on a given row will be top-aligned with each other. #' #' The width of the elements and spacing between them is configurable. #' Lengths can be given as numeric values (interpreted as pixels) #' or character values (interpreted as CSS lengths). #' With the default settings this layout closely resembles the `flowLayout` #' from Shiny. #' #' @param ... Unnamed arguments will become child elements of the layout. #' Named arguments will become HTML attributes on the outermost tag. #' @param cell_args Any additional attributes that should be used for each cell #' of the layout. #' @param min_cell_width The minimum width of the cells. #' @param max_cell_width The maximum width of the cells. #' @param column_gap The spacing between columns. #' @param row_gap The spacing between rows. #' #' @md #' @export #' @rdname flow_layout #' #' @examples #' if (interactive()) { #' ui <- semanticPage( #' flow_layout( #' numericInput("rows", "How many rows?", 5), #' selectInput("letter", "Which letter?", LETTERS), #' sliderInput("value", "What value?", 0, 100, 50) #' ) #' ) #' shinyApp(ui, server = function(input, output) {}) #' } flow_layout <- function(..., cell_args = list(), min_cell_width = "208px", max_cell_width = "1fr",column_gap = "12px", row_gap = "0px") { if(max_cell_width != "1fr") max_cell_width <- validateCssUnit(max_cell_width) container_style <- glue::glue( "display: grid;", "grid-template-columns: repeat(auto-fill, minmax({validateCssUnit(min_cell_width)}, {max_cell_width}));", "column-gap: {shiny::validateCssUnit(column_gap)};", "row-gap: {shiny::validateCssUnit(row_gap)};" ) item_style <- "align-self: start;" args <- split_args(...) children <- lapply(args$positional, function(child) { do.call(shiny::tags$div, c(style = item_style, cell_args, list(child))) }) attributes <- args$named do.call(shiny::tags$div, c(style = container_style, attributes, children)) } #' @param cellArgs Same as `cell_args`. #' #' @md #' @export #' @rdname flow_layout flowLayout <- function(..., cellArgs = list()) { flow_layout(..., cell_args = cellArgs) }
testlist <- list(x = integer(0), y = c(1768257321L, 676545880L, 1344299887L, 1853060140L, 1668247155L, 1948268086L, -11777024L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L)) result <- do.call(diffrprojects:::dist_mat_absolute,testlist) str(result)
/diffrprojects/inst/testfiles/dist_mat_absolute/libFuzzer_dist_mat_absolute/dist_mat_absolute_valgrind_files/1609963011-test.R
no_license
akhikolla/updated-only-Issues
R
false
false
451
r
testlist <- list(x = integer(0), y = c(1768257321L, 676545880L, 1344299887L, 1853060140L, 1668247155L, 1948268086L, -11777024L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L)) result <- do.call(diffrprojects:::dist_mat_absolute,testlist) str(result)
testlist <- list(id = integer(0), x = c(-1.77548498085422e-64, -1.77548498085422e-64, -1.77548498085422e-64, -1.77548498085422e-64, -1.77548498085422e-64, -1.77548498085422e-64, -1.77537878441694e-64, -1.77548498085422e-64, -1.77548498085422e-64, 3.78621301386797e-310, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), y = numeric(0)) result <- do.call(ggforce:::enclose_points,testlist) str(result)
/ggforce/inst/testfiles/enclose_points/libFuzzer_enclose_points/enclose_points_valgrind_files/1609956136-test.R
no_license
akhikolla/updated-only-Issues
R
false
false
482
r
testlist <- list(id = integer(0), x = c(-1.77548498085422e-64, -1.77548498085422e-64, -1.77548498085422e-64, -1.77548498085422e-64, -1.77548498085422e-64, -1.77548498085422e-64, -1.77537878441694e-64, -1.77548498085422e-64, -1.77548498085422e-64, 3.78621301386797e-310, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), y = numeric(0)) result <- do.call(ggforce:::enclose_points,testlist) str(result)
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/batchQC.R \name{batchQC_analyze} \alias{batchQC_analyze} \title{Checks for presence of batch effect and reports whether the batch needs to be adjusted} \usage{ batchQC_analyze(data.matrix, batch, mod = NULL) } \arguments{ \item{data.matrix}{Given data or simulated data from rnaseq_sim()} \item{batch}{Batch covariate} \item{mod}{Model matrix for outcome of interest and other covariates besides batch} } \value{ pca Principal Components Analysis object of the data } \description{ Checks for presence of batch effect and reports whether the batch needs to be adjusted } \examples{ nbatch <- 3 ncond <- 2 npercond <- 10 data.matrix <- rnaseq_sim(ngenes=50, nbatch=nbatch, ncond=ncond, npercond=npercond, ggstep=5, bbstep=15000, ccstep=10000, bvarstep=2, seed=1234) batch <- rep(1:nbatch, each=ncond*npercond) condition <- rep(rep(1:ncond, each=npercond), nbatch) pdata <- data.frame(batch, condition) modmatrix = model.matrix(~as.factor(condition), data=pdata) batchQC_analyze(data.matrix, batch, mod=modmatrix) }
/man/batchQC_analyze.Rd
no_license
dfjenkins3/BatchQC
R
false
true
1,108
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/batchQC.R \name{batchQC_analyze} \alias{batchQC_analyze} \title{Checks for presence of batch effect and reports whether the batch needs to be adjusted} \usage{ batchQC_analyze(data.matrix, batch, mod = NULL) } \arguments{ \item{data.matrix}{Given data or simulated data from rnaseq_sim()} \item{batch}{Batch covariate} \item{mod}{Model matrix for outcome of interest and other covariates besides batch} } \value{ pca Principal Components Analysis object of the data } \description{ Checks for presence of batch effect and reports whether the batch needs to be adjusted } \examples{ nbatch <- 3 ncond <- 2 npercond <- 10 data.matrix <- rnaseq_sim(ngenes=50, nbatch=nbatch, ncond=ncond, npercond=npercond, ggstep=5, bbstep=15000, ccstep=10000, bvarstep=2, seed=1234) batch <- rep(1:nbatch, each=ncond*npercond) condition <- rep(rep(1:ncond, each=npercond), nbatch) pdata <- data.frame(batch, condition) modmatrix = model.matrix(~as.factor(condition), data=pdata) batchQC_analyze(data.matrix, batch, mod=modmatrix) }
\name{rlbmat} \alias{rlbmat} \title{Block Uniform Logical Matrix Deviates} \description{ Generate a uniform logical matrix deviate with a possibly overlapping block structure. } \usage{ rlbmat(npat = 4, rows = 20, cols = 12, over = 4, noise = 0.01, prob = 0.8, perfect = FALSE) } \arguments{ \item{npat}{number of patterns.} \item{rows}{number of rows per pattern.} \item{cols}{number of columns per pattern.} \item{over}{number of additional columns per pattern that overlap.} \item{noise}{the probability of observing a one in the background (non-pattern) matrix.} \item{prob}{the probability of observing \code{TRUE} in a pattern.} \item{perfect}{option for overlap of the first and the last pattern.} } \details{ Implements a test case for \code{proximus}. } \value{ A logical matrix } \author{Christian Buchta} %\note{} \seealso{ \code{\link{lmplot}} and \code{\link{clmplot}} for plotting a logical matrix } \examples{ x <- rlbmat() lmplot(x) } \keyword{cluster}
/man/rlbmat.Rd
no_license
cran/cba
R
false
false
1,010
rd
\name{rlbmat} \alias{rlbmat} \title{Block Uniform Logical Matrix Deviates} \description{ Generate a uniform logical matrix deviate with a possibly overlapping block structure. } \usage{ rlbmat(npat = 4, rows = 20, cols = 12, over = 4, noise = 0.01, prob = 0.8, perfect = FALSE) } \arguments{ \item{npat}{number of patterns.} \item{rows}{number of rows per pattern.} \item{cols}{number of columns per pattern.} \item{over}{number of additional columns per pattern that overlap.} \item{noise}{the probability of observing a one in the background (non-pattern) matrix.} \item{prob}{the probability of observing \code{TRUE} in a pattern.} \item{perfect}{option for overlap of the first and the last pattern.} } \details{ Implements a test case for \code{proximus}. } \value{ A logical matrix } \author{Christian Buchta} %\note{} \seealso{ \code{\link{lmplot}} and \code{\link{clmplot}} for plotting a logical matrix } \examples{ x <- rlbmat() lmplot(x) } \keyword{cluster}
daily_cases_specimen_state_plot <- function(ds) { nds <- create_state_specimen_collection(ds) mov_avg <- seven_day_average(nds$NEW_CASES) g_title <- paste('Daily COVID-19 Cases by Specimen Collection Date in Tennessee') fdate <- format(nds$DATE, '%Y-%m-%d') export <- list(xval = fdate, yval = nds$NEW_CASES, barcolor = 'rgba(0, 182, 199, 0.5)', fillcolor = 'rgba(0, 182, 199, 0.25)', movingAverage = mov_avg, movingLineColor = 'rgb(0, 164, 179)', gtitle = g_title, ytitle = "New Cases (Specimen Collection Date)", type = "specimen") return(export) }
/plots/state/new_cases_specimen_state.R
no_license
adrianjselva/TN-COVID-19
R
false
false
718
r
daily_cases_specimen_state_plot <- function(ds) { nds <- create_state_specimen_collection(ds) mov_avg <- seven_day_average(nds$NEW_CASES) g_title <- paste('Daily COVID-19 Cases by Specimen Collection Date in Tennessee') fdate <- format(nds$DATE, '%Y-%m-%d') export <- list(xval = fdate, yval = nds$NEW_CASES, barcolor = 'rgba(0, 182, 199, 0.5)', fillcolor = 'rgba(0, 182, 199, 0.25)', movingAverage = mov_avg, movingLineColor = 'rgb(0, 164, 179)', gtitle = g_title, ytitle = "New Cases (Specimen Collection Date)", type = "specimen") return(export) }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/datasets.R \name{ChaffClim} \alias{ChaffClim} \title{Daily climate data from 1965 to 2012.} \format{ A data frame with 17,520 rows and 3 variables. \describe{ \item{Date}{Date when climate was recorded (dd/mm/yyyy).} \item{Rain}{Average daily rainfall data in mm.} \item{Temp}{Maximum daily temperature in degrees centigrade.} } } \description{ Maximum daily temperature and average rainfall data from 1965 to 2012. Coincides with biological data from \code{\link{Chaff}}. }
/man/ChaffClim.Rd
no_license
LiamDBailey/climwin
R
false
true
563
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/datasets.R \name{ChaffClim} \alias{ChaffClim} \title{Daily climate data from 1965 to 2012.} \format{ A data frame with 17,520 rows and 3 variables. \describe{ \item{Date}{Date when climate was recorded (dd/mm/yyyy).} \item{Rain}{Average daily rainfall data in mm.} \item{Temp}{Maximum daily temperature in degrees centigrade.} } } \description{ Maximum daily temperature and average rainfall data from 1965 to 2012. Coincides with biological data from \code{\link{Chaff}}. }
context("get_kgram_freqs") test_that("return value has the correct structure", { f <- get_kgram_freqs(text = "some text", N = 3, dict = c("some", "text"), .preprocess = identity, EOS = "") expect_true(is.list(f)) expect_true(length(f) == 3) expect_true(all( c("N", "dict", ".preprocess", "EOS") %in% names(attributes(f)) )) expect_true(is.integer(attr(f, "N"))) expect_true(is.character(attr(f, "dict"))) expect_true(is.character(attr(f, "EOS"))) expect_true( is.function(attr(f, ".preprocess")) ) expect_identical(f[[1]], as_tibble(f[[1]])) }) test_that("input `N <= 0` produces error", { expect_error(get_kgram_freqs(text = "some text", N = 0, dict = c("some", "text"), .preprocess = identity, EOS = "") ) expect_error(get_kgram_freqs(text = "some text", N = -1, dict = c("some", "text"), .preprocess = identity, EOS = "") ) }) test_that("correct 1-gram and 2-gram counts on simple input", { input <- c("a a b a", "a b b a", "a c b", "b c a a b") dict <- c("a", "b") # N.B: 0L, 3L and 4L represent <BOS>, <EOS> and <UNK> respectively. expected_1grams <- tibble(w2 = c(1L, 2L, 3L, 4L), n = c(8L, 6L, 4L, 2L) ) %>% arrange(w2) expected_2grams <- tibble( w1 = c(0L, 0L, 1L, 1L, 1L, 1L, 2L, 2L, 2L, 2L, 4L, 4L), w2 = c(1L, 2L, 1L, 2L, 3L, 4L, 1L, 2L, 3L, 4L, 1L, 2L), n = c(3L, 1L, 2L, 3L, 2L, 1L, 2L, 1L, 2L, 1L, 1L, 1L) ) %>% arrange(w1, w2) get_kgram_freqs(text = input, N = 2, dict = dict, .preprocess = identity, EOS = "") -> freqs actual_1grams <- arrange(freqs[[1]], w2) actual_2grams <- arrange(freqs[[2]], w1, w2) expect_identical(expected_1grams, actual_1grams) expect_identical(expected_2grams, actual_2grams) }) test_that("correct 1-gram and 2-gram with some preprocessing", { input <- c("a A b A", "a B b a", "a C B", "b c A a b") dict <- c("a", "b") # N.B: 0L, 3L and 4L represent <BOS>, <EOS> and <UNK> respectively. expected_1grams <- tibble(w2 = c(1L, 2L, 3L, 4L), n = c(8L, 6L, 4L, 2L) ) %>% arrange(w2) expected_2grams <- tibble( w1 = c(0L, 0L, 1L, 1L, 1L, 1L, 2L, 2L, 2L, 2L, 4L, 4L), w2 = c(1L, 2L, 1L, 2L, 3L, 4L, 1L, 2L, 3L, 4L, 1L, 2L), n = c(3L, 1L, 2L, 3L, 2L, 1L, 2L, 1L, 2L, 1L, 1L, 1L) ) %>% arrange(w1, w2) get_kgram_freqs(text = input, N = 2, dict = dict, .preprocess = tolower, EOS = "") -> freqs actual_1grams <- arrange(freqs[[1]], w2) actual_2grams <- arrange(freqs[[2]], w1, w2) expect_identical(expected_1grams, actual_1grams) expect_identical(expected_2grams, actual_2grams) }) test_that("correct 1-gram and 2-gram counts with EOS token", { input <- c("/ a a b a / a b b a / a c b / b c a a b /") dict <- c("a", "b") # N.B: 0L, 3L and 4L represent <BOS>, <EOS> and <UNK> respectively. expected_1grams <- tibble(w2 = c(1L, 2L, 3L, 4L), n = c(8L, 6L, 4L, 2L) ) %>% arrange(w2) expected_2grams <- tibble( w1 = c(0L, 0L, 1L, 1L, 1L, 1L, 2L, 2L, 2L, 2L, 4L, 4L), w2 = c(1L, 2L, 1L, 2L, 3L, 4L, 1L, 2L, 3L, 4L, 1L, 2L), n = c(3L, 1L, 2L, 3L, 2L, 1L, 2L, 1L, 2L, 1L, 1L, 1L) ) %>% arrange(w1, w2) get_kgram_freqs(text = input, N = 2, dict = dict, .preprocess = identity, EOS = "/") -> freqs actual_1grams <- arrange(freqs[[1]], w2) actual_2grams <- arrange(freqs[[2]], w1, w2) expect_identical(expected_1grams, actual_1grams) expect_identical(expected_2grams, actual_2grams) })
/tests/testthat/test-get_kgram_freqs.R
no_license
minghao2016/sbo
R
false
false
4,429
r
context("get_kgram_freqs") test_that("return value has the correct structure", { f <- get_kgram_freqs(text = "some text", N = 3, dict = c("some", "text"), .preprocess = identity, EOS = "") expect_true(is.list(f)) expect_true(length(f) == 3) expect_true(all( c("N", "dict", ".preprocess", "EOS") %in% names(attributes(f)) )) expect_true(is.integer(attr(f, "N"))) expect_true(is.character(attr(f, "dict"))) expect_true(is.character(attr(f, "EOS"))) expect_true( is.function(attr(f, ".preprocess")) ) expect_identical(f[[1]], as_tibble(f[[1]])) }) test_that("input `N <= 0` produces error", { expect_error(get_kgram_freqs(text = "some text", N = 0, dict = c("some", "text"), .preprocess = identity, EOS = "") ) expect_error(get_kgram_freqs(text = "some text", N = -1, dict = c("some", "text"), .preprocess = identity, EOS = "") ) }) test_that("correct 1-gram and 2-gram counts on simple input", { input <- c("a a b a", "a b b a", "a c b", "b c a a b") dict <- c("a", "b") # N.B: 0L, 3L and 4L represent <BOS>, <EOS> and <UNK> respectively. expected_1grams <- tibble(w2 = c(1L, 2L, 3L, 4L), n = c(8L, 6L, 4L, 2L) ) %>% arrange(w2) expected_2grams <- tibble( w1 = c(0L, 0L, 1L, 1L, 1L, 1L, 2L, 2L, 2L, 2L, 4L, 4L), w2 = c(1L, 2L, 1L, 2L, 3L, 4L, 1L, 2L, 3L, 4L, 1L, 2L), n = c(3L, 1L, 2L, 3L, 2L, 1L, 2L, 1L, 2L, 1L, 1L, 1L) ) %>% arrange(w1, w2) get_kgram_freqs(text = input, N = 2, dict = dict, .preprocess = identity, EOS = "") -> freqs actual_1grams <- arrange(freqs[[1]], w2) actual_2grams <- arrange(freqs[[2]], w1, w2) expect_identical(expected_1grams, actual_1grams) expect_identical(expected_2grams, actual_2grams) }) test_that("correct 1-gram and 2-gram with some preprocessing", { input <- c("a A b A", "a B b a", "a C B", "b c A a b") dict <- c("a", "b") # N.B: 0L, 3L and 4L represent <BOS>, <EOS> and <UNK> respectively. expected_1grams <- tibble(w2 = c(1L, 2L, 3L, 4L), n = c(8L, 6L, 4L, 2L) ) %>% arrange(w2) expected_2grams <- tibble( w1 = c(0L, 0L, 1L, 1L, 1L, 1L, 2L, 2L, 2L, 2L, 4L, 4L), w2 = c(1L, 2L, 1L, 2L, 3L, 4L, 1L, 2L, 3L, 4L, 1L, 2L), n = c(3L, 1L, 2L, 3L, 2L, 1L, 2L, 1L, 2L, 1L, 1L, 1L) ) %>% arrange(w1, w2) get_kgram_freqs(text = input, N = 2, dict = dict, .preprocess = tolower, EOS = "") -> freqs actual_1grams <- arrange(freqs[[1]], w2) actual_2grams <- arrange(freqs[[2]], w1, w2) expect_identical(expected_1grams, actual_1grams) expect_identical(expected_2grams, actual_2grams) }) test_that("correct 1-gram and 2-gram counts with EOS token", { input <- c("/ a a b a / a b b a / a c b / b c a a b /") dict <- c("a", "b") # N.B: 0L, 3L and 4L represent <BOS>, <EOS> and <UNK> respectively. expected_1grams <- tibble(w2 = c(1L, 2L, 3L, 4L), n = c(8L, 6L, 4L, 2L) ) %>% arrange(w2) expected_2grams <- tibble( w1 = c(0L, 0L, 1L, 1L, 1L, 1L, 2L, 2L, 2L, 2L, 4L, 4L), w2 = c(1L, 2L, 1L, 2L, 3L, 4L, 1L, 2L, 3L, 4L, 1L, 2L), n = c(3L, 1L, 2L, 3L, 2L, 1L, 2L, 1L, 2L, 1L, 1L, 1L) ) %>% arrange(w1, w2) get_kgram_freqs(text = input, N = 2, dict = dict, .preprocess = identity, EOS = "/") -> freqs actual_1grams <- arrange(freqs[[1]], w2) actual_2grams <- arrange(freqs[[2]], w1, w2) expect_identical(expected_1grams, actual_1grams) expect_identical(expected_2grams, actual_2grams) })
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/igo.R \name{rebuild_igo_tagger} \alias{rebuild_igo_tagger} \title{Rebuild Igo tagger} \usage{ rebuild_igo_tagger(data_dir = .pkgenv[["igodic"]]) } \arguments{ \item{data_dir}{Character vector.} } \value{ The stored tagger instance is returned invisibly. } \description{ Rebuild an instance of igo tagger using provided options. The igo tagger instance is stored in the package internal environment. } \keyword{internal}
/man/rebuild_igo_tagger.Rd
permissive
paithiov909/rjavacmecab
R
false
true
498
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/igo.R \name{rebuild_igo_tagger} \alias{rebuild_igo_tagger} \title{Rebuild Igo tagger} \usage{ rebuild_igo_tagger(data_dir = .pkgenv[["igodic"]]) } \arguments{ \item{data_dir}{Character vector.} } \value{ The stored tagger instance is returned invisibly. } \description{ Rebuild an instance of igo tagger using provided options. The igo tagger instance is stored in the package internal environment. } \keyword{internal}
## A pair of functions that cache the inverse of a matrix ## This function creates a special "matrix" object that can cache its inverse. makeCacheMatrix <- function(x = matrix()) { inversion <- NULL set <- function(y) { x <<- y inversion <<- NULL } get <- function() x setinverse <- function(solve) inversion <<- solve getinverse <- function() inversion list(set = set, get = get, setinverse = setinverse, getinverse = getinverse) } ## This function computes the inverse of the special "matrix" returned by makeCacheMatrix above. ## If the inverse has already been calculated (and the matrix has not changed), ## then the cachesolve will retrieve the inverse from the cache. cacheSolve <- function(x, ...) { inversion <- x$getinverse() if(!is.null(inversion)) { message("getting cached data") return(inversion) } data <- x$get() inversion <- solve(data, ...) x$setinverse(inversion) inversion }
/cachematrix.R
no_license
osoitzvdf/ProgrammingAssignment2
R
false
false
964
r
## A pair of functions that cache the inverse of a matrix ## This function creates a special "matrix" object that can cache its inverse. makeCacheMatrix <- function(x = matrix()) { inversion <- NULL set <- function(y) { x <<- y inversion <<- NULL } get <- function() x setinverse <- function(solve) inversion <<- solve getinverse <- function() inversion list(set = set, get = get, setinverse = setinverse, getinverse = getinverse) } ## This function computes the inverse of the special "matrix" returned by makeCacheMatrix above. ## If the inverse has already been calculated (and the matrix has not changed), ## then the cachesolve will retrieve the inverse from the cache. cacheSolve <- function(x, ...) { inversion <- x$getinverse() if(!is.null(inversion)) { message("getting cached data") return(inversion) } data <- x$get() inversion <- solve(data, ...) x$setinverse(inversion) inversion }
#' Generic Wrapper function for running dimensionality reduction #' @details Wrapper function to run one of the available dimensionality #' reduction algorithms integrated within SCTK from \code{\link{scaterPCA}}, #' \code{\link{runSeuratPCA}}, \code{\link{runSeuratICA}}, \code{\link{runTSNE}}, #' \code{\link{runSeuratTSNE}}, \code{\link{runUMAP}} and #' \code{\link{runSeuratUMAP}}. Users can use an assay by specifying #' \code{useAssay}, use the assay in an altExp by specifying both #' \code{useAltExp} and \code{useAssay}, or use a low-dimensionality #' representation by specifying \code{useReducedDim}. #' @param inSCE Input \linkS4class{SingleCellExperiment} object. #' @param method One from \code{"scaterPCA"}, \code{"seuratPCA"}, #' \code{"seuratICA"}, \code{"rTSNE"}, \code{"seuratTSNE"}, \code{"scaterUMAP"} #' and \code{"seuratUMAP"}. #' @param useAssay Assay to use for computation. If \code{useAltExp} is #' specified, \code{useAssay} has to exist in #' \code{assays(altExp(inSCE, useAltExp))}. Default \code{"counts"}. #' @param useAltExp The subset to use for computation, usually for the #' selected variable features. Default \code{NULL}. #' @param useReducedDim The low dimension representation to use for embedding #' computation. Default \code{NULL}. #' @param reducedDimName The name of the result matrix. Required. #' @param useFeatureSubset Subset of feature to use for dimension reduction. A #' character string indicating a \code{rowData} variable that stores the logical #' vector of HVG selection, or a vector that can subset the rows of #' \code{inSCE}. Default \code{NULL}. #' @param scale Logical scalar, whether to standardize the expression values. #' Default \code{TRUE}. #' @param nComponents Specify the number of dimensions to compute with the #' selected method in case of PCA/ICA and the number of components to #' use in the case of TSNE/UMAP methods. #' @param seed Random seed for reproducibility of results. #' Default \code{NULL} will use global seed in use by the R environment. #' @param ... The other arguments for running a specific algorithm. Please refer #' to the one you use. #' @return The input \linkS4class{SingleCellExperiment} object with #' \code{reducedDim} updated with the result. #' @export #' @examples #' data(scExample, package = "singleCellTK") #' sce <- subsetSCECols(sce, colData = "type != 'EmptyDroplet'") #' sce <- runNormalization(sce, useAssay = "counts", #' outAssayName = "logcounts", #' normalizationMethod = "logNormCounts") #' sce <- runDimReduce(inSCE = sce, method = "scaterPCA", #' useAssay = "logcounts", scale = TRUE, #' reducedDimName = "PCA") runDimReduce <- function(inSCE, method = c("scaterPCA", "seuratPCA", "seuratICA", "rTSNE", "seuratTSNE", "scaterUMAP", "seuratUMAP"), useAssay = NULL, useReducedDim = NULL, useAltExp = NULL, reducedDimName = method, nComponents = 20, useFeatureSubset = NULL, scale = FALSE, seed = NULL, ...) { method <- match.arg(method) args <- list(...) if (method %in% c("scaterPCA", "seuratPCA", "seuratICA") & !is.null(useReducedDim)) { stop("`useReducedDim` is not allowed for linear dimension reduction.") } if (method == "scaterPCA") { inSCE <- scaterPCA(inSCE = inSCE, useAssay = useAssay, useAltExp = useAltExp, reducedDimName = reducedDimName, nComponents = nComponents, useFeatureSubset = useFeatureSubset, scale = scale, seed = seed, ...) } else if (method == "scaterUMAP") { inSCE <- runUMAP(inSCE = inSCE, useAssay = useAssay, useAltExp = useAltExp, useReducedDim = useReducedDim, useFeatureSubset = useFeatureSubset, scale = scale, reducedDimName = reducedDimName, seed = seed, ...) } else if (method == "rTSNE") { inSCE <- runTSNE(inSCE = inSCE, useAssay = useAssay, useAltExp = useAltExp, useReducedDim = useReducedDim, useFeatureSubset = useFeatureSubset, scale = scale, reducedDimName = reducedDimName, seed = seed, ...) } else { # Seurat part # TODO: Honestly, the input checks should have been implemented for # functions being wrapped because they are being exposed to users as well. # We should not being performing redundant checks when wrapping them again. useMat <- .selectSCEMatrix(inSCE, useAssay = useAssay, useReducedDim = useReducedDim, useAltExp = useAltExp, returnMatrix = FALSE) useAssay <- useMat$names$useAssay if (!is.null(useAltExp)) { tempSCE <- SingleCellExperiment::altExp(inSCE, useAltExp) } else if (!is.null(useAssay)) { tempSCE <- inSCE } if (method %in% c("seuratPCA", "seuratICA")) { ## SeuratPCA/ICA if (method == "seuratPCA") { message(paste0(date(), " ... Computing Seurat PCA.")) tempSCE <- runSeuratPCA(tempSCE, useAssay = useAssay, reducedDimName = reducedDimName, nPCs = nComponents, useFeatureSubset = useFeatureSubset, scale = scale, seed = seed, ...) } else if (method == "seuratICA") { message(paste0(date(), " ... Computing Seurat ICA.")) tempSCE <- runSeuratICA(tempSCE, useAssay = useAssay, reducedDimName = reducedDimName, nics = nComponents, useFeatureSubset = useFeatureSubset, scale = scale, seed = seed, ...) } seuratObj <- tempSCE@metadata$seurat if (!is.null(useAltExp)) { altExp(inSCE, useAltExp)@metadata$seurat <- seuratObj } else if (!is.null(useAssay)) { inSCE@metadata$seurat <- seuratObj } } else { ## SeuratUMAP/TSNE if (is.null(useReducedDim)) { ### using assay if (!"useReduction" %in% names(args)) { stop("Must specify `useReduction` when using `useAssay` in seuratUMAP/TSNE") } if (args$useReduction == "pca") { message(paste0(date(), " ... Computing Seurat PCA.")) tempSCE <- runSeuratPCA(inSCE = tempSCE, useAssay = useAssay, reducedDimName = paste0(useAssay, "_seuratPCA"), useFeatureSubset = useFeatureSubset, seed = seed) } else if (args$useReduction == "ica") { message(paste0(date(), " ... Computing Seurat ICA.")) tempSCE <- runSeuratICA(inSCE = tempSCE, useAssay = useAssay, reducedDimName = paste0(useAssay, "_seuratICA"), useFeatureSubset = useFeatureSubset, seed = seed) } if (method == "seuratUMAP") { message(paste0(date(), " ... Computing Seurat UMAP.")) tempSCE <- runSeuratUMAP(inSCE = tempSCE, reducedDimName = reducedDimName, seed = seed, ...) } else { message(paste0(date(), " ... Computing Seurat tSNE.")) tempSCE <- runSeuratTSNE(inSCE = tempSCE, reducedDimName = reducedDimName, seed = seed, ...) } } else { ### using external reducedDim if (!is.null(args$useReduction)) { stop("Cannot specify `useReduction` when using `useReducedDim` in seuratUMAP/TSNE") } tempSCE <- inSCE seuratObj <- convertSCEToSeurat(inSCE) tempSCE@metadata$seurat$obj <- seuratObj reDim <- SingleCellExperiment::reducedDim(inSCE, useReducedDim) colnames(reDim) <- paste0(useReducedDim, "_", seq_len(length(colnames(reDim)))) rownames(reDim) <- gsub('_', '-', rownames(reDim)) key <- gsub('_', '', useReducedDim) # hard-code "pca" tempSCE@metadata$seurat$obj@reductions$pca <- Seurat::CreateDimReducObject(embeddings = reDim, key = paste0(key, "_"), assay = "RNA") if (method == "seuratUMAP") { # hard-code useReduction="pca" message(paste0(date(), " ... Computing Seurat UMAP.")) tempSCE <- runSeuratUMAP(inSCE = tempSCE, useReduction = "pca", reducedDimName = reducedDimName, seed = seed, ...) } else { # hard-code useReduction="pca" message(paste0(date(), " ... Computing Seurat tSNE.")) tempSCE <- runSeuratTSNE(inSCE = tempSCE, useReduction = "pca", reducedDimName = reducedDimName, seed = seed, ...) } } } SingleCellExperiment::reducedDim(inSCE, reducedDimName) <- SingleCellExperiment::reducedDim(tempSCE, reducedDimName) } return(inSCE) }
/R/runDimReduce.R
permissive
mvfki/singleCellTK
R
false
false
9,510
r
#' Generic Wrapper function for running dimensionality reduction #' @details Wrapper function to run one of the available dimensionality #' reduction algorithms integrated within SCTK from \code{\link{scaterPCA}}, #' \code{\link{runSeuratPCA}}, \code{\link{runSeuratICA}}, \code{\link{runTSNE}}, #' \code{\link{runSeuratTSNE}}, \code{\link{runUMAP}} and #' \code{\link{runSeuratUMAP}}. Users can use an assay by specifying #' \code{useAssay}, use the assay in an altExp by specifying both #' \code{useAltExp} and \code{useAssay}, or use a low-dimensionality #' representation by specifying \code{useReducedDim}. #' @param inSCE Input \linkS4class{SingleCellExperiment} object. #' @param method One from \code{"scaterPCA"}, \code{"seuratPCA"}, #' \code{"seuratICA"}, \code{"rTSNE"}, \code{"seuratTSNE"}, \code{"scaterUMAP"} #' and \code{"seuratUMAP"}. #' @param useAssay Assay to use for computation. If \code{useAltExp} is #' specified, \code{useAssay} has to exist in #' \code{assays(altExp(inSCE, useAltExp))}. Default \code{"counts"}. #' @param useAltExp The subset to use for computation, usually for the #' selected variable features. Default \code{NULL}. #' @param useReducedDim The low dimension representation to use for embedding #' computation. Default \code{NULL}. #' @param reducedDimName The name of the result matrix. Required. #' @param useFeatureSubset Subset of feature to use for dimension reduction. A #' character string indicating a \code{rowData} variable that stores the logical #' vector of HVG selection, or a vector that can subset the rows of #' \code{inSCE}. Default \code{NULL}. #' @param scale Logical scalar, whether to standardize the expression values. #' Default \code{TRUE}. #' @param nComponents Specify the number of dimensions to compute with the #' selected method in case of PCA/ICA and the number of components to #' use in the case of TSNE/UMAP methods. #' @param seed Random seed for reproducibility of results. #' Default \code{NULL} will use global seed in use by the R environment. #' @param ... The other arguments for running a specific algorithm. Please refer #' to the one you use. #' @return The input \linkS4class{SingleCellExperiment} object with #' \code{reducedDim} updated with the result. #' @export #' @examples #' data(scExample, package = "singleCellTK") #' sce <- subsetSCECols(sce, colData = "type != 'EmptyDroplet'") #' sce <- runNormalization(sce, useAssay = "counts", #' outAssayName = "logcounts", #' normalizationMethod = "logNormCounts") #' sce <- runDimReduce(inSCE = sce, method = "scaterPCA", #' useAssay = "logcounts", scale = TRUE, #' reducedDimName = "PCA") runDimReduce <- function(inSCE, method = c("scaterPCA", "seuratPCA", "seuratICA", "rTSNE", "seuratTSNE", "scaterUMAP", "seuratUMAP"), useAssay = NULL, useReducedDim = NULL, useAltExp = NULL, reducedDimName = method, nComponents = 20, useFeatureSubset = NULL, scale = FALSE, seed = NULL, ...) { method <- match.arg(method) args <- list(...) if (method %in% c("scaterPCA", "seuratPCA", "seuratICA") & !is.null(useReducedDim)) { stop("`useReducedDim` is not allowed for linear dimension reduction.") } if (method == "scaterPCA") { inSCE <- scaterPCA(inSCE = inSCE, useAssay = useAssay, useAltExp = useAltExp, reducedDimName = reducedDimName, nComponents = nComponents, useFeatureSubset = useFeatureSubset, scale = scale, seed = seed, ...) } else if (method == "scaterUMAP") { inSCE <- runUMAP(inSCE = inSCE, useAssay = useAssay, useAltExp = useAltExp, useReducedDim = useReducedDim, useFeatureSubset = useFeatureSubset, scale = scale, reducedDimName = reducedDimName, seed = seed, ...) } else if (method == "rTSNE") { inSCE <- runTSNE(inSCE = inSCE, useAssay = useAssay, useAltExp = useAltExp, useReducedDim = useReducedDim, useFeatureSubset = useFeatureSubset, scale = scale, reducedDimName = reducedDimName, seed = seed, ...) } else { # Seurat part # TODO: Honestly, the input checks should have been implemented for # functions being wrapped because they are being exposed to users as well. # We should not being performing redundant checks when wrapping them again. useMat <- .selectSCEMatrix(inSCE, useAssay = useAssay, useReducedDim = useReducedDim, useAltExp = useAltExp, returnMatrix = FALSE) useAssay <- useMat$names$useAssay if (!is.null(useAltExp)) { tempSCE <- SingleCellExperiment::altExp(inSCE, useAltExp) } else if (!is.null(useAssay)) { tempSCE <- inSCE } if (method %in% c("seuratPCA", "seuratICA")) { ## SeuratPCA/ICA if (method == "seuratPCA") { message(paste0(date(), " ... Computing Seurat PCA.")) tempSCE <- runSeuratPCA(tempSCE, useAssay = useAssay, reducedDimName = reducedDimName, nPCs = nComponents, useFeatureSubset = useFeatureSubset, scale = scale, seed = seed, ...) } else if (method == "seuratICA") { message(paste0(date(), " ... Computing Seurat ICA.")) tempSCE <- runSeuratICA(tempSCE, useAssay = useAssay, reducedDimName = reducedDimName, nics = nComponents, useFeatureSubset = useFeatureSubset, scale = scale, seed = seed, ...) } seuratObj <- tempSCE@metadata$seurat if (!is.null(useAltExp)) { altExp(inSCE, useAltExp)@metadata$seurat <- seuratObj } else if (!is.null(useAssay)) { inSCE@metadata$seurat <- seuratObj } } else { ## SeuratUMAP/TSNE if (is.null(useReducedDim)) { ### using assay if (!"useReduction" %in% names(args)) { stop("Must specify `useReduction` when using `useAssay` in seuratUMAP/TSNE") } if (args$useReduction == "pca") { message(paste0(date(), " ... Computing Seurat PCA.")) tempSCE <- runSeuratPCA(inSCE = tempSCE, useAssay = useAssay, reducedDimName = paste0(useAssay, "_seuratPCA"), useFeatureSubset = useFeatureSubset, seed = seed) } else if (args$useReduction == "ica") { message(paste0(date(), " ... Computing Seurat ICA.")) tempSCE <- runSeuratICA(inSCE = tempSCE, useAssay = useAssay, reducedDimName = paste0(useAssay, "_seuratICA"), useFeatureSubset = useFeatureSubset, seed = seed) } if (method == "seuratUMAP") { message(paste0(date(), " ... Computing Seurat UMAP.")) tempSCE <- runSeuratUMAP(inSCE = tempSCE, reducedDimName = reducedDimName, seed = seed, ...) } else { message(paste0(date(), " ... Computing Seurat tSNE.")) tempSCE <- runSeuratTSNE(inSCE = tempSCE, reducedDimName = reducedDimName, seed = seed, ...) } } else { ### using external reducedDim if (!is.null(args$useReduction)) { stop("Cannot specify `useReduction` when using `useReducedDim` in seuratUMAP/TSNE") } tempSCE <- inSCE seuratObj <- convertSCEToSeurat(inSCE) tempSCE@metadata$seurat$obj <- seuratObj reDim <- SingleCellExperiment::reducedDim(inSCE, useReducedDim) colnames(reDim) <- paste0(useReducedDim, "_", seq_len(length(colnames(reDim)))) rownames(reDim) <- gsub('_', '-', rownames(reDim)) key <- gsub('_', '', useReducedDim) # hard-code "pca" tempSCE@metadata$seurat$obj@reductions$pca <- Seurat::CreateDimReducObject(embeddings = reDim, key = paste0(key, "_"), assay = "RNA") if (method == "seuratUMAP") { # hard-code useReduction="pca" message(paste0(date(), " ... Computing Seurat UMAP.")) tempSCE <- runSeuratUMAP(inSCE = tempSCE, useReduction = "pca", reducedDimName = reducedDimName, seed = seed, ...) } else { # hard-code useReduction="pca" message(paste0(date(), " ... Computing Seurat tSNE.")) tempSCE <- runSeuratTSNE(inSCE = tempSCE, useReduction = "pca", reducedDimName = reducedDimName, seed = seed, ...) } } } SingleCellExperiment::reducedDim(inSCE, reducedDimName) <- SingleCellExperiment::reducedDim(tempSCE, reducedDimName) } return(inSCE) }
# == title # Empty Annotation # # == param # -which Whether it is a column annotation or a row annotation? # -border Whether draw borders of the annotation region? # -zoom If it is true and when the heatmap is split, the empty annotation slices will have # equal height or width, and you can see the correspondance between the annotation slices # and the original heatmap slices. # -width Width of the annotation. The value should be an absolute unit. Width is not allowed to be set for column annotation. # -height Height of the annotation. The value should be an absolute unit. Height is not allowed to be set for row annotation. # # == details # It creates an empty annotation and holds space, later users can add graphics # by `decorate_annotation`. This function is useful when users have difficulty to # implement `AnnotationFunction` object. # # In following example, an empty annotation is first created and later points are added: # # m = matrix(rnorm(100), 10) # ht = Heatmap(m, top_annotation = HeatmapAnnotation(pt = anno_empty())) # ht = draw(ht) # co = column_order(ht)[[1]] # pt_value = 1:10 # decorate_annotation("pt", { # pushViewport(viewport(xscale = c(0.5, ncol(mat)+0.5), yscale = range(pt_value))) # grid.points(seq_len(ncol(mat)), pt_value[co], pch = 16, default.units = "native") # grid.yaxis() # popViewport() # }) # # And it is similar as using `anno_points`: # # Heatmap(m, top_annotation = HeatmapAnnotation(pt = anno_points(pt_value))) # # == value # An annotation function which can be used in `HeatmapAnnotation`. # # == seealso # https://jokergoo.github.io/ComplexHeatmap-reference/book/heatmap-annotations.html#empty-annotation # # == examples # anno = anno_empty() # draw(anno, test = "anno_empty") # anno = anno_empty(border = FALSE) # draw(anno, test = "anno_empty without border") anno_empty = function(which = c("column", "row"), border = TRUE, zoom = FALSE, width = NULL, height = NULL) { if(is.null(.ENV$current_annotation_which)) { which = match.arg(which)[1] } else { which = .ENV$current_annotation_which } anno_size = anno_width_and_height(which, width, height, unit(1, "cm")) fun = function(index) { if(border) grid.rect() } anno = AnnotationFunction( fun = fun, n = NA, fun_name = "anno_empty", which = which, var_import = list(border, zoom), subset_rule = list(), subsetable = TRUE, height = anno_size$height, width = anno_size$width, show_name = FALSE ) return(anno) } # == title # Subset the Matrix by Rows # # == param # -x A matrix. # -i The row indices. # # == details # Mainly used for constructing the `AnnotationFunction-class` object. # subset_matrix_by_row = function(x, i) x[i, , drop = FALSE] # == title # Subset the vector # # == param # -x A vector. # -i The indices. # # == details # Mainly used for constructing the `AnnotationFunction-class` object. # subset_vector = function(x, i) x[i] # == title # Simple Annotation # # == param # -x The value vector. The value can be a vector or a matrix. The length of the vector # or the nrow of the matrix is taken as the number of the observations of the annotation. # The value can be numeric or character and NA value is allowed. # -col Color that maps to ``x``. If ``x`` is numeric and needs a continuous mapping, ``col`` # should be a color mapping function which accepts a vector of values and returns a # vector of colors. Normally it is generated by `circlize::colorRamp2`. If ``x`` is discrete # (numeric or character) and needs a discrete color mapping, ``col`` should be a vector of # colors with levels in ``x`` as vector names. If ``col`` is not specified, the color mapping # is randomly generated by ``ComplexHeatmap:::default_col``. # -na_col Color for NA value. # -which Whether it is a column annotation or a row annotation? # -border Wether draw borders of the annotation region? # -gp Graphic parameters for grid borders. The ``fill`` parameter is disabled. # -pch Points/symbols that are added on top of the annotation grids. The value can be numeric # or single letters. It can be a vector if ``x`` is a vector and a matrix if ``x`` is a matrix. # No points are drawn if the corresponding values are NA. # -pt_size Size of the points/symbols. It should be a `grid::unit` object. If ``x`` is a vector, # the value of ``pt_size`` can be a vector, while if ``x`` is a matrix, ``pt_size`` can # only be a single value. # -pt_gp Graphic parameters for points/symbols. The length setting is same as ``pt_size``. # If ``pch`` is set as letters, the fontsize should be set as ``pt_gp = gpar(fontsize = ...)``. # -simple_anno_size size of the simple annotation. # -width Width of the annotation. The value should be an absolute unit. Width is not allowed to be set for column annotation. # -height Height of the annotation. The value should be an absolute unit. Height is not allowed to be set for row annotation. # # == details # The "simple annotation" is the most widely used annotation type which is heatmap-like, where # the grid colors correspond to the values. `anno_simple` also supports to add points/symbols # on top of the grids where the it can be normal point (when ``pch`` is set as numbers) or letters (when # ``pch`` is set as single letters). # # == value # An annotation function which can be used in `HeatmapAnnotation`. # # == seealso # https://jokergoo.github.io/ComplexHeatmap-reference/book/heatmap-annotations.html#simple-annotation-as-an-annotation-function # # == example # anno = anno_simple(1:10) # draw(anno, test = "a numeric vector") # # anno = anno_simple(cbind(1:10, 10:1)) # draw(anno, test = "a matrix") # # anno = anno_simple(1:10, pch = c(1:4, NA, 6:8, NA, 10)) # draw(anno, test = "pch has NA values") # # anno = anno_simple(1:10, pch = c(rep("A", 5), rep(NA, 5))) # draw(anno, test = "pch has NA values") # # pch = matrix(1:20, nc = 2) # pch[sample(length(pch), 10)] = NA # anno = anno_simple(cbind(1:10, 10:1), pch = pch) # draw(anno, test = "matrix, pch is a matrix with NA values") anno_simple = function(x, col, na_col = "grey", which = c("column", "row"), border = FALSE, gp = gpar(col = NA), pch = NULL, pt_size = unit(1, "snpc")*0.8, pt_gp = gpar(), simple_anno_size = ht_opt$simple_anno_size, width = NULL, height = NULL) { if(is.null(.ENV$current_annotation_which)) { which = match.arg(which)[1] } else { which = .ENV$current_annotation_which } if(is.data.frame(x)) x = as.matrix(x) if(is.matrix(x)) { if(ncol(x) == 1) { x = x[, 1] } } input_is_matrix = is.matrix(x) anno_size = anno_width_and_height(which, width, height, simple_anno_size*ifelse(input_is_matrix, ncol(x), 1)) if(missing(col)) { col = default_col(x) } if(is.atomic(col)) { color_mapping = ColorMapping(name = "foo", colors = col, na_col = na_col) } else if(is.function(col)) { color_mapping = ColorMapping(name = "foo", col_fun = col, na_col = na_col) } else if(inherits(col, "ColorMapping")) { color_mapping = col } else { stop_wrap("`col` should be a named vector/a color mapping function/a ColorMapping object.") } value = x gp = subset_gp(gp, 1) # gp controls border if(is.matrix(value)) { n = nrow(value) nr = n nc = ncol(value) } else { n = length(value) nr = n nc = 1 } if(!is.null(pch)) { if(input_is_matrix) { pch = normalize_graphic_param_to_mat(pch, ifelse(is.matrix(x), ncol(x), 1), n, "pch") pt_size = pt_size[1]*(1/nc) pt_gp = subset_gp(pt_gp, 1) } else { if(length(pch) == 1) pch = rep(pch, n) if(length(pt_size) == 1) pt_size = rep(pt_size, n) pt_gp = recycle_gp(pt_gp, n) } } row_fun = function(index) { n = length(index) y = (n - seq_len(n) + 0.5) / n if(is.matrix(value)) { nc = ncol(value) pch = pch[index, , drop = FALSE] for(i in seq_len(nc)) { fill = map_to_colors(color_mapping, value[index, i]) grid.rect(x = (i-0.5)/nc, y, height = 1/n, width = 1/nc, gp = do.call("gpar", c(list(fill = fill), gp))) if(!is.null(pch)) { l = !is.na(pch[, i]) if(any(l)) { grid.points(x = rep((i-0.5)/nc, sum(l)), y = y[l], pch = pch[l, i], size = {if(length(pt_size) == 1) pt_size else pt_size[i]}, gp = subset_gp(pt_gp, i)) } } } } else { fill = map_to_colors(color_mapping, value[index]) grid.rect(x = 0.5, y, height = 1/n, width = 1, gp = do.call("gpar", c(list(fill = fill), gp))) if(!is.null(pch)) { pch = pch[index] pt_size = pt_size[index] pt_gp = subset_gp(pt_gp, index) l = !is.na(pch) if(any(l)) { grid.points(x = rep(0.5, sum(l)), y = y[l], pch = pch[l], size = pt_size[l], gp = subset_gp(pt_gp, which(l))) } } } if(border) grid.rect(gp = gpar(fill = "transparent")) } column_fun = function(index) { n = length(index) x = (seq_len(n) - 0.5) / n if(is.matrix(value)) { nc = ncol(value) pch = pch[index, , drop = FALSE] for(i in seq_len(nc)) { fill = map_to_colors(color_mapping, value[index, i]) grid.rect(x, y = (nc-i +0.5)/nc, width = 1/n, height = 1/nc, gp = do.call("gpar", c(list(fill = fill), gp))) if(!is.null(pch)){ l = !is.na(pch[, i]) if(any(l)) { grid.points(x[l], y = rep((nc-i +0.5)/nc, sum(l)), pch = pch[l, i], size = {if(length(pt_size) == 1) pt_size else pt_size[i]}, gp = subset_gp(pt_gp, i)) } } } } else { fill = map_to_colors(color_mapping, value[index]) grid.rect(x, y = 0.5, width = 1/n, height = 1, gp = do.call("gpar", c(list(fill = fill), gp))) if(!is.null(pch)) { pch = pch[index] pt_size = pt_size[index] pt_gp = subset_gp(pt_gp, index) l = !is.na(pch) if(any(l)) { grid.points(x[l], y = rep(0.5, sum(l)), pch = pch[l], size = pt_size[l], gp = subset_gp(pt_gp, which(l))) } } } if(border) grid.rect(gp = gpar(fill = "transparent")) } if(which == "row") { fun = row_fun } else if(which == "column") { fun = column_fun } anno = AnnotationFunction( fun = fun, fun_name = "anno_simple", which = which, width = anno_size$width, height = anno_size$height, n = n, data_scale = c(0.5, nc + 0.5), var_import = list(value, gp, border, color_mapping, pt_gp, pt_size, pch) ) anno@subset_rule = list() if(input_is_matrix) { anno@subset_rule$value = subset_matrix_by_row if(!is.null(pch)) { anno@subset_rule$pch = subset_matrix_by_row } } else { anno@subset_rule$value = subset_vector if(!is.null(pch)) { anno@subset_rule$pch = subset_vector anno@subset_rule$pt_size = subset_vector anno@subset_rule$pt_gp = subset_gp } } anno@subsetable = TRUE return(anno) } # == title # Image Annotation # # == param # -image A vector of file paths of images. The format of the image is inferred from the suffix name of the image file. # NA values or empty strings in the vector means no image to drawn. # -which Whether it is a column annotation or a row annotation? # -border Wether draw borders of the annotation region? # -gp Graphic parameters for annotation grids. If the image has transparent background, the ``fill`` parameter # can be used to control the background color in the annotation grids. # -space The space around the image to the annotation grid borders. The value should be a `grid::unit` object. # -width Width of the annotation. The value should be an absolute unit. Width is not allowed to be set for column annotation. # -height Height of the annotation. The value should be an absolute unit. Height is not allowed to be set for row annotation. # # == details # This function supports image formats in ``png``, ``svg``, ``pdf``, ``eps``, ``jpeg/jpg``, ``tiff``. # ``png``, ``jpeg/jpg`` and ``tiff`` images are imported by `png::readPNG`, `jpeg::readJPEG` and # `tiff::readTIFF`, and drawn by `grid::grid.raster`. ``svg`` images are firstly reformatted by ``rsvg::rsvg_svg`` # and then imported by `grImport2::readPicture` and drawn by `grImport2::grid.picture`. ``pdf`` and ``eps`` # images are imported by `grImport::PostScriptTrace` and `grImport::readPicture`, later drawn by `grImport::grid.picture`. # # Different image formats can be mixed in the ``image`` vector. # # == value # An annotation function which can be used in `HeatmapAnnotation`. # # == seealso # https://jokergoo.github.io/ComplexHeatmap-reference/book/heatmap-annotations.html#image-annotation # # == example # # download the free icons from https://github.com/Keyamoon/IcoMoon-Free # \dontrun{ # image = sample(dir("~/Downloads/IcoMoon-Free-master/PNG/64px", full.names = TRUE), 10) # anno = anno_image(image) # draw(anno, test = "png") # image[1:5] = "" # anno = anno_image(image) # draw(anno, test = "some of png") # } anno_image = function(image, which = c("column", "row"), border = TRUE, gp = gpar(fill = NA, col = NA), space = unit(1, "mm"), width = NULL, height = NULL) { image[is.na(image)] = "" l = grepl("^\\s*$", image) image[l] = "" allowed_image_type = c("png", "svg", "pdf", "eps", "jpeg", "jpg", "tiff") if(inherits(image, "character")) { ## they are file path image_type = tolower(gsub("^.*\\.(\\w+)$", "\\1", image)) if(! all(image_type[image_type != ""] %in% allowed_image_type)) { stop_wrap("image file should be of png/svg/pdf/eps/jpeg/jpg/tiff.") } } else { stop_wrap("`image` should be a vector of path.") } n_image = length(image) image_list = vector("list", n_image) image_class = vector("character", n_image) for(i in seq_along(image)) { if(image[i] == "") { image_list[[i]] = NA image_class[i] = NA } else if(image_type[i] == "png") { if(!requireNamespace("png")) { stop_wrap("Need png package to read png images.") } image_list[[i]] = png::readPNG(image[i]) image_class[i] = "raster" } else if(image_type[i] %in% c("jpeg", "jpg")) { if(!requireNamespace("jpeg")) { stop_wrap("Need jpeg package to read jpeg/jpg images.") } image_list[[i]] = jpeg::readJPEG(image[i]) image_class[i] = "raster" } else if(image_type[i] == "tiff") { if(!requireNamespace("tiff")) { stop_wrap("Need tiff package to read tiff images.") } image_list[[i]] = tiff::readTIFF(image[i]) image_class[i] = "raster" } else if(image_type[i] %in% c("pdf", "eps")) { if(!requireNamespace("grImport")) { stop_wrap("Need grImport package to read pdf/eps images.") } temp_file = tempfile() getFromNamespace("PostScriptTrace", ns = "grImport")(image[[i]], temp_file) image_list[[i]] = grImport::readPicture(temp_file) file.remove(temp_file) image_class[i] = "grImport::Picture" } else if(image_type[i] == "svg") { if(!requireNamespace("grImport2")) { stop_wrap("Need grImport2 package to read svg images.") } # if(!requireNamespace("rsvg")) { # stop_wrap("Need rsvg package to convert svg images.") # } temp_file = tempfile() # get it work on bioconductor build server oe = try(getFromNamespace("rsvg_svg", ns = "rsvg")(image[i], temp_file)) if(inherits(oe, "try-error")) { stop_wrap("Need rsvg package to convert svg images.") } image_list[[i]] = grImport2::readPicture(temp_file) file.remove(temp_file) image_class[i] = "grImport2::Picture" } } yx_asp = sapply(image_list, function(x) { if(inherits(x, "array")) { nrow(x)/ncol(x) } else if(inherits(x, "Picture")) { max(x@summary@yscale)/max(x@summary@xscale) } else { 1 } }) if(is.null(.ENV$current_annotation_which)) { which = match.arg(which)[1] } else { which = .ENV$current_annotation_which } space = space[1] anno_size = anno_width_and_height(which, width, height, unit(1, "cm")) gp = recycle_gp(gp, n_image) column_fun = function(index) { n = length(index) pushViewport(viewport()) asp = convertHeight(unit(1, "npc") - space*2, "mm", valueOnly = TRUE)/convertWidth(unit(1/n, "npc") - space*2, "mm", valueOnly = TRUE) grid.rect(x = (1:n - 0.5)/n, width = 1/n, gp = subset_gp(gp, index)) for(i in seq_len(n)) { if(identical(image_list[[ index[i] ]], NA)) next if(yx_asp[ index[i] ] > asp) { height = unit(1, "npc") - space*2 width = convertHeight(height, "mm")*yx_asp[ index[i] ] } else { width = unit(1/n, "npc") - space*2 height = yx_asp[ index[i] ]*convertWidth(width, "mm") } if(image_class[ index[i] ] == "raster") { grid.raster(image_list[[ index[i] ]], x = (i-0.5)/n, width = width, height = height) } else if(image_class[ index[i] ] == "grImport::Picture") { grid.picture = getFromNamespace("grid.picture", ns = "grImport") grid.picture(image_list[[ index[i] ]], x = (i-0.5)/n, width = width, height = height) } else if(image_class[ index[i] ] == "grImport2::Picture") { grid.picture = getFromNamespace("grid.picture", ns = "grImport2") grid.picture(image_list[[ index[i] ]], x = (i-0.5)/n, width = width, height = height) } } if(border) grid.rect(gp = gpar(fill = "transparent")) popViewport() } row_fun = function(index) { n = length(index) pushViewport(viewport()) asp = convertHeight(unit(1/n, "npc") - space*2, "mm", valueOnly = TRUE)/convertWidth(unit(1, "npc") - space*2, "mm", valueOnly = TRUE) grid.rect(y = (n - 1:n + 0.5)/n, height = 1/n, gp = subset_gp(gp, index)) for(i in seq_len(n)) { if(identical(image_list[[ index[i] ]], NA)) next if(yx_asp[ index[i] ] > asp) { height = unit(1/n, "npc") - space*2 width = convertHeight(height, "mm")*(1/yx_asp[ index[i] ]) } else { width = unit(1, "npc") - space*2 height = yx_asp[ index[i] ]*convertWidth(width, "mm") } if(image_class[ index[i] ] == "raster") { grid.raster(image_list[[ index[i] ]], y = (n - i + 0.5)/n, width = width, height = height) } else if(image_class[ index[i] ] == "grImport::Picture") { grid.picture = getFromNamespace("grid.picture", ns = "grImport") grid.picture(image_list[[ index[i] ]], y = (n - i + 0.5)/n, width = width, height = height) } else if(image_class[ index[i] ] == "grImport2::Picture") { grid.picture = getFromNamespace("grid.picture", ns = "grImport2") grid.picture(image_list[[ index[i] ]], y = (n - i + 0.5)/n, width = width, height = height) } } if(border) grid.rect(gp = gpar(fill = "transparent")) popViewport() } if(which == "row") { fun = row_fun } else if(which == "column") { fun = column_fun } anno = AnnotationFunction( fun = fun, fun_name = "anno_image", which = which, width = anno_size$width, height = anno_size$height, n = n_image, data_scale = c(0.5, 1.5), var_import = list(gp, border, space, yx_asp, image_list, image_class) ) anno@subset_rule$gp = subset_vector anno@subset_rule$image_list = subset_vector anno@subset_rule$image_class = subset_vector anno@subsetable = TRUE return(anno) } # == title # The Default Parameters for Annotation Axis # # == param # -which Whether it is for column annotation or row annotation? # # == details # There are following parameters for the annotation axis: # # -at The breaks of axis. By default it is automatically inferred. # -labels The corresponding axis labels. # -labels_rot The rotation of the axis labels. # -gp Graphc parameters of axis labels. The value should be a `grid::unit` object. # -side If it is for column annotation, the value should only be one of ``left`` and ``right``. If # it is for row annotation, the value should only be one of ``top`` and ``bottom``. # -facing Whether the axis faces to the outside of the annotation region or inside. Sometimes when # appending more than one heatmaps, the axes of column annotations of one heatmap might # overlap to the neighbouring heatmap, setting ``facing`` to ``inside`` may invoild it. # -direction The direction of the axis. Value should be "normal" or "reverse". # # All the parameters are passed to `annotation_axis_grob` to construct an axis grob. # # == example # default_axis_param("column") # default_axis_param("row") default_axis_param = function(which) { list( at = NULL, labels = NULL, labels_rot = ifelse(which == "column", 0, 90), gp = gpar(fontsize = 8), side = ifelse(which == "column", "left", "bottom"), facing = "outside", direction = "normal" ) } validate_axis_param = function(axis_param, which) { dft = default_axis_param(which) for(nm in names(axis_param)) { dft[[nm]] = axis_param[[nm]] } if(which == "row") { if(dft$side %in% c("left", "right")) { stop_wrap("axis side can only be set to 'top' or 'bottom' for row annotations.") } } if(which == "column") { if(dft$side %in% c("top", "bottom")) { stop_wrap("axis side can only be set to 'left' or 'right' for row annotations.") } } return(dft) } construct_axis_grob = function(axis_param, which, data_scale) { axis_param_default = default_axis_param(which) for(nm in setdiff(names(axis_param_default), names(axis_param))) { axis_param[[nm]] = axis_param_default[[nm]] } if(is.null(axis_param$at)) { at = pretty_breaks(data_scale) axis_param$at = at axis_param$labels = at } if(is.null(axis_param$labels)) { axis_param$labels = axis_param$at } axis_param$scale = data_scale axis_grob = do.call(annotation_axis_grob, axis_param) return(axis_grob) } # == title # Points Annotation # # == param # -x The value vector. The value can be a vector or a matrix. The length of the vector # or the number of rows of the matrix is taken as the number of the observations of the annotation. # -which Whether it is a column annotation or a row annotation? # -border Wether draw borders of the annotation region? # -gp Graphic parameters for points. The length of each graphic parameter can be 1, length of ``x`` if ``x`` # is a vector, or number of columns of ``x`` is ``x`` is a matrix. # -pch Point type. The length setting is the same as ``gp``. # -size Point size, the value should be a `grid::unit` object. The length setting is the same as ``gp``. # -ylim Data ranges. By default it is ``range(x)``. # -extend The extension to both side of ``ylim``. The value is a percent value corresponding to ``ylim[2] - ylim[1]``. # -axis Whether to add axis? # -axis_param parameters for controlling axis. See `default_axis_param` for all possible settings and default parameters. # -width Width of the annotation. The value should be an absolute unit. Width is not allowed to be set for column annotation. # -height Height of the annotation. The value should be an absolute unit. Height is not allowed to be set for row annotation. # -... Other arguments. # # == value # An annotation function which can be used in `HeatmapAnnotation`. # # == seealso # https://jokergoo.github.io/ComplexHeatmap-reference/book/heatmap-annotations.html#points-annotation # # == example # anno = anno_points(runif(10)) # draw(anno, test = "anno_points") # anno = anno_points(matrix(runif(20), nc = 2), pch = 1:2) # draw(anno, test = "matrix") anno_points = function(x, which = c("column", "row"), border = TRUE, gp = gpar(), pch = 16, size = unit(2, "mm"), ylim = NULL, extend = 0.05, axis = TRUE, axis_param = default_axis_param(which), width = NULL, height = NULL, ...) { other_args = list(...) if(length(other_args)) { if("axis_gp" %in% names(other_args)) { stop_wrap("`axis_gp` is removed from the arguments. Use `axis_param = list(gp = ...)` instead.") } if("axis_direction" %in% names(other_args)) { stop_wrap("`axis_direction` is not supported any more.") } } if("pch_as_image" %in% names(other_args)) { pch_as_image = other_args$pch_as_image } else { pch_as_image = FALSE } ef = function() NULL if(is.null(.ENV$current_annotation_which)) { which = match.arg(which)[1] dev.null() ef = dev.off2 } else { which = .ENV$current_annotation_which } on.exit(ef()) if(is.data.frame(x)) x = as.matrix(x) if(is.matrix(x)) { if(ncol(x) == 1) { x = x[, 1] } } input_is_matrix = is.matrix(x) anno_size = anno_width_and_height(which, width, height, unit(1, "cm")) if(is.matrix(x)) { n = nrow(x) nr = n nc = ncol(x) } else { n = length(x) nr = n nc = 1 } if(input_is_matrix) { gp = recycle_gp(gp, nc) if(length(pch) == 1) pch = rep(pch, nc) if(length(size) == 1) size = rep(size, nc) } else if(is.atomic(x)) { gp = recycle_gp(gp, n) if(length(pch) == 1) pch = rep(pch, n) if(length(size) == 1) size = rep(size, n) } if(is.null(ylim)) { data_scale = range(x, na.rm = TRUE) } else { data_scale = ylim } data_scale = data_scale + c(-extend, extend)*(data_scale[2] - data_scale[1]) value = x axis_param = validate_axis_param(axis_param, which) axis_grob = if(axis) construct_axis_grob(axis_param, which, data_scale) else NULL row_fun = function(index, k = 1, N = 1) { n = length(index) if(axis_param$direction == "reverse") { value = data_scale[2] - value + data_scale[1] } pushViewport(viewport(xscale = data_scale, yscale = c(0.5, n+0.5))) if(is.matrix(value)) { for(i in seq_len(ncol(value))) { grid.points(value[index, i], n - seq_along(index) + 1, gp = subset_gp(gp, i), default.units = "native", pch = pch[i], size = size[i]) } } else { if(pch_as_image) { for(ii in seq_along(index)) { pch_image = png::readPNG(pch[ index[ii] ]) grid.raster(pch_image, y = n - ii + 1, x = value[ index[ii] ], default.units = "native", width = size[ index[ii] ], height = size[ index[ii] ]*(nrow(pch_image)/ncol(pch_image))) } } else { grid.points(value[index], n - seq_along(index) + 1, gp = subset_gp(gp, index), default.units = "native", pch = pch[index], size = size[index]) } } if(axis_param$side == "top") { if(k > 1) axis = FALSE } else if(axis_param$side == "bottom") { if(k < N) axis = FALSE } if(axis) grid.draw(axis_grob) if(border) grid.rect(gp = gpar(fill = "transparent")) popViewport() } column_fun = function(index, k = 1, N = 1) { n = length(index) if(axis_param$direction == "reverse") { value = data_scale[2] - value + data_scale[1] } pushViewport(viewport(yscale = data_scale, xscale = c(0.5, n+0.5))) if(is.matrix(value)) { for(i in seq_len(ncol(value))) { grid.points(seq_along(index), value[index, i], gp = subset_gp(gp, i), default.units = "native", pch = pch[i], size = size[i]) } } else { if(pch_as_image) { for(ii in seq_along(index)) { pch_image = png::readPNG(pch[ index[ii] ]) grid.raster(pch_image, x = ii, value[ index[ii] ], default.units = "native", width = size[ index[ii] ], height = size[ index[ii] ]*(nrow(pch_image)/ncol(pch_image))) } } else { grid.points(seq_along(index), value[index], gp = subset_gp(gp, index), default.units = "native", pch = pch[index], size = size[index]) } } if(axis_param$side == "left") { if(k > 1) axis = FALSE } else if(axis_param$side == "right") { if(k < N) axis = FALSE } if(axis) grid.draw(axis_grob) if(border) grid.rect(gp = gpar(fill = "transparent")) popViewport() } if(which == "row") { fun = row_fun } else if(which == "column") { fun = column_fun } anno = AnnotationFunction( fun = fun, fun_name = "anno_points", which = which, width = anno_size$width, height = anno_size$height, n = n, data_scale = data_scale, var_import = list(value, gp, border, pch, size, axis, axis_param, axis_grob, data_scale, pch_as_image) ) anno@subset_rule$gp = subset_vector if(input_is_matrix) { anno@subset_rule$value = subset_matrix_by_row if(ncol(value) > 1) { anno@subset_rule$gp = NULL } } else { anno@subset_rule$value = subset_vector anno@subset_rule$gp = subset_gp anno@subset_rule$size = subset_vector anno@subset_rule$pch = subset_vector } anno@subsetable = TRUE anno@extended = update_anno_extend(anno, axis_grob, axis_param) return(anno) } update_anno_extend = function(anno, axis_grob, axis_param) { extended = anno@extended if(is.null(axis_grob)) { return(extended) } if(axis_param$facing == "outside") { if(axis_param$side == "left") { extended[2] = convertWidth(grobWidth(axis_grob), "mm") } else if(axis_param$side == "right") { extended[4] = convertWidth(grobWidth(axis_grob), "mm") } else if(axis_param$side == "top") { extended[3] = convertHeight(grobHeight(axis_grob), "mm") } else if(axis_param$side == "bottom") { extended[1] = convertHeight(grobHeight(axis_grob), "mm") } } return(extended) } # == title # Lines Annotation # # == param # -x The value vector. The value can be a vector or a matrix. The length of the vector # or the number of rows of the matrix is taken as the number of the observations of the annotation. # -which Whether it is a column annotation or a row annotation? # -border Wether draw borders of the annotation region? # -gp Graphic parameters for lines. The length of each graphic parameter can be 1, or number of columns of ``x`` is ``x`` is a matrix. # -add_points Whether to add points on the lines? # -smooth If it is ``TRUE``, smoothing by `stats::loess` is performed. If it is ``TRUE``, ``add_points`` is set to ``TRUE`` by default. # -pch Point type. The length setting is the same as ``gp``. # -size Point size, the value should be a `grid::unit` object. The length setting is the same as ``gp``. # -pt_gp Graphic parameters for points. The length setting is the same as ``gp``. # -ylim Data ranges. By default it is ``range(x)``. # -extend The extension to both side of ``ylim``. The value is a percent value corresponding to ``ylim[2] - ylim[1]``. # -axis Whether to add axis? # -axis_param parameters for controlling axis. See `default_axis_param` for all possible settings and default parameters. # -width Width of the annotation. The value should be an absolute unit. Width is not allowed to be set for column annotation. # -height Height of the annotation. The value should be an absolute unit. Height is not allowed to be set for row annotation. # # == value # An annotation function which can be used in `HeatmapAnnotation`. # # == seealso # https://jokergoo.github.io/ComplexHeatmap-reference/book/heatmap-annotations.html#lines-annotation # # == example # anno = anno_lines(runif(10)) # draw(anno, test = "anno_lines") # anno = anno_lines(cbind(c(1:5, 1:5), c(5:1, 5:1)), gp = gpar(col = 2:3)) # draw(anno, test = "matrix") # anno = anno_lines(cbind(c(1:5, 1:5), c(5:1, 5:1)), gp = gpar(col = 2:3), # add_points = TRUE, pt_gp = gpar(col = 5:6), pch = c(1, 16)) # draw(anno, test = "matrix") anno_lines = function(x, which = c("column", "row"), border = TRUE, gp = gpar(), add_points = smooth, smooth = FALSE, pch = 16, size = unit(2, "mm"), pt_gp = gpar(), ylim = NULL, extend = 0.05, axis = TRUE, axis_param = default_axis_param(which), width = NULL, height = NULL) { ef = function() NULL if(is.null(.ENV$current_annotation_which)) { which = match.arg(which)[1] dev.null() ef = dev.off2 } else { which = .ENV$current_annotation_which } on.exit(ef()) if(is.data.frame(x)) x = as.matrix(x) if(is.matrix(x)) { if(ncol(x) == 1) { x = x[, 1] } } input_is_matrix = is.matrix(x) anno_size = anno_width_and_height(which, width, height, unit(1, "cm")) if(is.matrix(x)) { n = nrow(x) nr = n nc = ncol(x) } else { n = length(x) nr = n nc = 1 } if(input_is_matrix) { gp = recycle_gp(gp, nc) pt_gp = recycle_gp(pt_gp, nc) if(length(pch) == 1) pch = rep(pch, nc) if(length(size) == 1) size = rep(size, nc) } else if(is.atomic(x)) { gp = recycle_gp(gp, 1) pt_gp = recycle_gp(pt_gp, n) if(length(pch) == 1) pch = rep(pch, n) if(length(size) == 1) size = rep(size, n) } if(is.null(ylim)) { data_scale = range(x, na.rm = TRUE) } else { data_scale = ylim } data_scale = data_scale + c(-extend, extend)*(data_scale[2] - data_scale[1]) value = x axis_param = validate_axis_param(axis_param, which) axis_grob = if(axis) construct_axis_grob(axis_param, which, data_scale) else NULL row_fun = function(index, k = 1, N = 1) { n = length(index) if(axis_param$direction == "reverse") { value = data_scale[2] - value + data_scale[1] } pushViewport(viewport(xscale = data_scale, yscale = c(0.5, n+0.5))) if(is.matrix(value)) { for(i in seq_len(ncol(value))) { x = n - seq_along(index) + 1 y = value[index, i] if(smooth) { fit = loess(y ~ x) x2 = seq(x[1], x[length(x)], length = 100) y2 = predict(fit, x2) grid.lines(y2, x2, gp = subset_gp(gp, i), default.units = "native") } else { grid.lines(y, x, gp = subset_gp(gp, i), default.units = "native") } if(length(add_points) == ncol(value)) { if(add_points[i]) { grid.points(y, x, gp = subset_gp(pt_gp, i), default.units = "native", pch = pch[i], size = size[i]) } } else { if(add_points) { grid.points(y, x, gp = subset_gp(pt_gp, i), default.units = "native", pch = pch[i], size = size[i]) } } } } else { x = n - seq_along(index) + 1 y = value[index] if(smooth) { fit = loess(y ~ x) x2 = seq(x[1], x[length(x)], length = 100) y2 = predict(fit, x2) grid.lines(y2, x2, gp = gp, default.units = "native") } else { grid.lines(y, x, gp = gp, default.units = "native") } if(add_points) { grid.points(y, x, gp = subset_gp(pt_gp, index), default.units = "native", pch = pch[index], size = size[index]) } } if(axis_param$side == "top") { if(k > 1) axis = FALSE } else if(axis_param$side == "bottom") { if(k < N) axis = FALSE } if(axis) grid.draw(axis_grob) if(border) grid.rect(gp = gpar(fill = "transparent")) popViewport() } column_fun = function(index, k = 1, N = 1) { n = length(index) if(axis_param$direction == "reverse") { value = data_scale[2] - value + data_scale[1] } pushViewport(viewport(yscale = data_scale, xscale = c(0.5, n+0.5))) if(is.matrix(value)) { for(i in seq_len(ncol(value))) { x = seq_along(index) y = value[index, i] if(smooth) { fit = loess(y ~ x) x2 = seq(x[1], x[length(x)], length = 100) y2 = predict(fit, x2) grid.lines(x2, y2, gp = subset_gp(gp, i), default.units = "native") } else { grid.lines(x, y, gp = subset_gp(gp, i), default.units = "native") } if(length(add_points) == ncol(value)) { if(add_points[i]) { grid.points(x, y, gp = subset_gp(pt_gp, i), default.units = "native", pch = pch[i], size = size[i]) } } else { if(add_points) { grid.points(x, y, gp = subset_gp(pt_gp, i), default.units = "native", pch = pch[i], size = size[i]) } } } } else { x = seq_along(index) y = value[index] if(smooth) { fit = loess(y ~ x) x2 = seq(x[1], x[length(x)], length = 100) y2 = predict(fit, x2) grid.lines(x2, y2, gp = gp, default.units = "native") } else { grid.lines(x, y, gp = gp, default.units = "native") } if(add_points) { grid.points(seq_along(index), value[index], gp = subset_gp(pt_gp, index), default.units = "native", pch = pch[index], size = size[index]) } } if(axis_param$side == "left") { if(k > 1) axis = FALSE } else if(axis_param$side == "right") { if(k < N) axis = FALSE } if(axis) grid.draw(axis_grob) if(border) grid.rect(gp = gpar(fill = "transparent")) popViewport() } if(which == "row") { fun = row_fun } else if(which == "column") { fun = column_fun } anno = AnnotationFunction( fun = fun, fun_name = "anno_points", which = which, width = anno_size$width, height = anno_size$height, n = n, data_scale = data_scale, var_import = list(value, gp, border, pch, size, pt_gp, axis, axis_param, axis_grob, data_scale, add_points, smooth) ) anno@subset_rule$gp = subset_vector if(input_is_matrix) { anno@subset_rule$value = subset_matrix_by_row if(ncol(value) > 1) { anno@subset_rule$gp = NULL } } else { anno@subset_rule$value = subset_vector anno@subset_rule$gp = subset_gp anno@subset_rule$pt_gp = subset_gp anno@subset_rule$size = subset_vector anno@subset_rule$pch = subset_vector } anno@subsetable = TRUE anno@extended = update_anno_extend(anno, axis_grob, axis_param) return(anno) } # == title # Barplot Annotation # # == param # -x The value vector. The value can be a vector or a matrix. The length of the vector # or the number of rows of the matrix is taken as the number of the observations of the annotation. # If ``x`` is a vector, the barplots will be represented as stacked barplots. # -baseline baseline of bars. The value should be "min" or "max", or a numeric value. It is enforced to be zero # for stacked barplots. # -which Whether it is a column annotation or a row annotation? # -border Wether draw borders of the annotation region? # -bar_width Relative width of the bars. The value should be smaller than one. # -gp Graphic parameters for points. The length of each graphic parameter can be 1, length of ``x`` if ``x`` # is a vector, or number of columns of ``x`` is ``x`` is a matrix. # -ylim Data ranges. By default it is ``range(x)`` if ``x`` is a vector, or ``range(rowSums(x))`` if ``x`` is a matrix. # -extend The extension to both side of ``ylim``. The value is a percent value corresponding to ``ylim[2] - ylim[1]``. # -axis Whether to add axis? # -axis_param parameters for controlling axis. See `default_axis_param` for all possible settings and default parameters. # -width Width of the annotation. The value should be an absolute unit. Width is not allowed to be set for column annotation. # -height Height of the annotation. The value should be an absolute unit. Height is not allowed to be set for row annotation. # -... Other arguments. # # == value # An annotation function which can be used in `HeatmapAnnotation`. # # == seealso # https://jokergoo.github.io/ComplexHeatmap-reference/book/heatmap-annotations.html#barplot_annotation # # == example # anno = anno_barplot(1:10) # draw(anno, test = "a vector") # # m = matrix(runif(4*10), nc = 4) # m = t(apply(m, 1, function(x) x/sum(x))) # anno = anno_barplot(m, gp = gpar(fill = 2:5), bar_width = 1, height = unit(6, "cm")) # draw(anno, test = "proportion matrix") anno_barplot = function(x, baseline = 0, which = c("column", "row"), border = TRUE, bar_width = 0.6, gp = gpar(fill = "#CCCCCC"), ylim = NULL, extend = 0.05, axis = TRUE, axis_param = default_axis_param(which), width = NULL, height = NULL, ...) { other_args = list(...) if(length(other_args)) { if("axis_gp" %in% names(other_args)) { stop_wrap("`axis_gp` is removed from the arguments. Use `axis_param = list(gp = ...)` instead.") } if("axis_side" %in% names(other_args)) { stop_wrap("`axis_side` is removed from the arguments. Use `axis_param = list(side = ...)` instead.") } if("axis_direction" %in% names(other_args)) { stop_wrap("`axis_direction` is not supported any more.") } } if(inherits(x, "list")) x = do.call("cbind", x) if(inherits(x, "data.frame")) x = as.matrix(x) if(inherits(x, "matrix")) { sg = apply(x, 1, function(xx) all(sign(xx) %in% c(1, 0)) || all(sign(xx) %in% c(-1, 0))) if(!all(sg)) { stop_wrap("Since `x` is a matrix, the sign of each row should be either all positive or all negative.") } } # convert everything to matrix if(is.null(dim(x))) x = matrix(x, ncol = 1) nc = ncol(x) if(missing(gp)) { gp = gpar(fill = grey(seq(0, 1, length = nc+2))[-c(1, nc+2)]) } data_scale = range(rowSums(x, na.rm = TRUE), na.rm = TRUE) if(!is.null(ylim)) data_scale = ylim if(baseline == "min") { data_scale = data_scale + c(0, extend)*(data_scale[2] - data_scale[1]) baseline = min(x) } else if(baseline == "max") { data_scale = data_scale + c(-extend, 0)*(data_scale[2] - data_scale[1]) baseline = max(x) } else { if(is.numeric(baseline)) { if(baseline == 0 && all(abs(rowSums(x) - 1) < 1e-6)) { data_scale = c(0, 1) } else if(baseline <= data_scale[1]) { data_scale = c(baseline, extend*(data_scale[2] - baseline) + data_scale[2]) } else if(baseline >= data_scale[2]) { data_scale = c(-extend*(baseline - data_scale[1]) + data_scale[1], baseline) } else { data_scale = data_scale + c(-extend, extend)*(data_scale[2] - data_scale[1]) } } } ef = function() NULL if(is.null(.ENV$current_annotation_which)) { which = match.arg(which)[1] dev.null() ef = dev.off2 } else { which = .ENV$current_annotation_which } on.exit(ef()) anno_size = anno_width_and_height(which, width, height, unit(1, "cm")) if(nc == 1) { gp = recycle_gp(gp, nrow(x)) } else { gp = recycle_gp(gp, nc) } value = x axis_param = validate_axis_param(axis_param, which) axis_grob = if(axis) construct_axis_grob(axis_param, which, data_scale) else NULL row_fun = function(index, k = 1, N = 1) { n = length(index) if(axis_param$direction == "reverse") { value_origin = value value = data_scale[2] - value + data_scale[1] baseline = data_scale[2] - baseline + data_scale[1] } pushViewport(viewport(xscale = data_scale, yscale = c(0.5, n+0.5))) if(ncol(value) == 1) { width = value[index] - baseline x_coor = width/2+baseline grid.rect(x = x_coor, y = n - seq_along(index) + 1, width = abs(width), height = 1*bar_width, default.units = "native", gp = subset_gp(gp, index)) } else { for(i in seq_len(ncol(value))) { if(axis_param$direction == "normal") { width = abs(value[index, i]) x_coor = rowSums(value[index, seq_len(i-1), drop = FALSE]) + width/2 grid.rect(x = x_coor, y = n - seq_along(index) + 1, width = abs(width), height = 1*bar_width, default.units = "native", gp = subset_gp(gp, i)) } else { width = value_origin[index, i] # the original width x_coor = rowSums(value_origin[index, seq_len(i-1), drop = FALSE]) + width/2 #distance to the right x_coor = data_scale[2] - x_coor + data_scale[1] grid.rect(x = x_coor, y = n - seq_along(index) + 1, width = abs(width), height = 1*bar_width, default.units = "native", gp = subset_gp(gp, i)) } } } if(axis_param$side == "top") { if(k > 1) axis = FALSE } else if(axis_param$side == "bottom") { if(k < N) axis = FALSE } if(axis) grid.draw(axis_grob) if(border) grid.rect(gp = gpar(fill = "transparent")) popViewport() } column_fun = function(index, k = 1, N = 1) { n = length(index) if(axis_param$direction == "reverse") { value_origin = value value = data_scale[2] - value + data_scale[1] baseline = data_scale[2] - baseline + data_scale[1] } pushViewport(viewport(yscale = data_scale, xscale = c(0.5, n+0.5))) if(ncol(value) == 1) { height = value[index] - baseline y_coor = height/2+baseline grid.rect(y = y_coor, x = seq_along(index), height = abs(height), width = 1*bar_width, default.units = "native", gp = subset_gp(gp, index)) } else { for(i in seq_len(ncol(value))) { if(axis_param$direction == "normal") { height = value[index, i] y_coor = rowSums(value[index, seq_len(i-1), drop = FALSE]) + height/2 grid.rect(y = y_coor, x = seq_along(index), height = abs(height), width = 1*bar_width, default.units = "native", gp = subset_gp(gp, i)) } else { height = value_origin[index, i] y_coor = rowSums(value_origin[index, seq_len(i-1), drop = FALSE]) + height/2 y_coor = data_scale[2] - y_coor + data_scale[1] grid.rect(y = y_coor, x = seq_along(index), height = abs(height), width = 1*bar_width, default.units = "native", gp = subset_gp(gp, i)) } } } if(axis_param$side == "left") { if(k > 1) axis = FALSE } else if(axis_param$side == "right") { if(k < N) axis = FALSE } if(axis) grid.draw(axis_grob) if(border) grid.rect(gp = gpar(fill = "transparent")) popViewport() } if(which == "row") { fun = row_fun } else if(which == "column") { fun = column_fun } n = nrow(value) anno = AnnotationFunction( fun = fun, fun_name = "anno_barplot", which = which, width = anno_size$width, height = anno_size$height, n = n, data_scale = data_scale, var_import = list(value, gp, border, bar_width, baseline, axis, axis_param, axis_grob, data_scale) ) anno@subset_rule$value = subset_matrix_by_row if(ncol(value) == 1) { anno@subset_rule$gp = subset_gp } anno@subsetable = TRUE anno@extended = update_anno_extend(anno, axis_grob, axis_param) return(anno) } # == title # Boxplot Annotation # # == param # -x A matrix or a list. If ``x`` is a matrix and if ``which`` is ``column``, statistics for boxplots # are calculated by columns, if ``which`` is ``row``, the calculation is done by rows. # -which Whether it is a column annotation or a row annotation? # -border Wether draw borders of the annotation region? # -gp Graphic parameters for the boxes. The length of the graphic parameters should be one or the number of observations. # -ylim Data ranges. # -extend The extension to both side of ``ylim``. The value is a percent value corresponding to ``ylim[2] - ylim[1]``. # -outline Whether draw outline of boxplots? # -box_width Relative width of boxes. The value should be smaller than one. # -pch Point style. # -size Point size. # -axis Whether to add axis? # -axis_param parameters for controlling axis. See `default_axis_param` for all possible settings and default parameters. # -width Width of the annotation. The value should be an absolute unit. Width is not allowed to be set for column annotation. # -height Height of the annotation. The value should be an absolute unit. Height is not allowed to be set for row annotation. # -... Other arguments. # # == value # An annotation function which can be used in `HeatmapAnnotation`. # # == seealso # https://jokergoo.github.io/ComplexHeatmap-reference/book/heatmap-annotations.html#box-annotation # # == example # set.seed(123) # m = matrix(rnorm(100), 10) # anno = anno_boxplot(m, height = unit(4, "cm")) # draw(anno, test = "anno_boxplot") # anno = anno_boxplot(m, height = unit(4, "cm"), gp = gpar(fill = 1:10)) # draw(anno, test = "anno_boxplot with gp") anno_boxplot = function(x, which = c("column", "row"), border = TRUE, gp = gpar(fill = "#CCCCCC"), ylim = NULL, extend = 0.05, outline = TRUE, box_width = 0.6, pch = 1, size = unit(2, "mm"), axis = TRUE, axis_param = default_axis_param(which), width = NULL, height = NULL, ...) { other_args = list(...) if(length(other_args)) { if("axis_gp" %in% names(other_args)) { stop_wrap("`axis_gp` is removed from the arguments. Use `axis_param = list(gp = ...)` instead.") } if("axis_side" %in% names(other_args)) { stop_wrap("`axis_side` is removed from the arguments. Use `axis_param = list(side = ...)` instead.") } if("axis_direction" %in% names(other_args)) { stop_wrap("`axis_direction` is not supported any more.") } } ef = function() NULL if(is.null(.ENV$current_annotation_which)) { which = match.arg(which)[1] dev.null() ef = dev.off2 } else { which = .ENV$current_annotation_which } on.exit(ef()) anno_size = anno_width_and_height(which, width, height, unit(2, "cm")) ## convert matrix all to list (or data frame) if(is.matrix(x)) { if(which == "column") { value = as.data.frame(x) } else if(which == "row") { value = as.data.frame(t(x)) } } else { value = x } if(is.null(ylim)) { if(!outline) { boxplot_stats = boxplot(value, plot = FALSE)$stats data_scale = range(boxplot_stats) } else { data_scale = range(value, na.rm = TRUE) } } else { data_scale = ylim } data_scale = data_scale + c(-extend, extend)*(data_scale[2] - data_scale[1]) n = length(value) gp = recycle_gp(gp, n) if(length(pch) == 1) pch = rep(pch, n) if(length(size) == 1) size = rep(size, n) axis_param = validate_axis_param(axis_param, which) axis_grob = if(axis) construct_axis_grob(axis_param, which, data_scale) else NULL row_fun = function(index, k = 1, N = 1) { if(axis_param$direction == "reverse") { value = lapply(value, function(x) data_scale[2] - x + data_scale[1]) } n_all = length(value) value = value[index] boxplot_stats = boxplot(value, plot = FALSE)$stats n = length(index) gp = subset_gp(gp, index) pch = pch[index] size = size[index] pushViewport(viewport(xscale = data_scale, yscale = c(0.5, n+0.5))) grid.rect(x = boxplot_stats[2, ], y = n - seq_along(index) + 1, height = 1*box_width, width = boxplot_stats[4, ] - boxplot_stats[2, ], just = "left", default.units = "native", gp = gp) grid.segments(boxplot_stats[5, ], n - seq_along(index) + 1 - 0.5*box_width, boxplot_stats[5, ], n - seq_along(index) + 1 + 0.5*box_width, default.units = "native", gp = gp) grid.segments(boxplot_stats[5, ], n - seq_along(index) + 1, boxplot_stats[4, ], n - seq_along(index) + 1, default.units = "native", gp = gp) grid.segments(boxplot_stats[1, ], n - seq_along(index) + 1, boxplot_stats[2, ], n - seq_along(index) + 1, default.units = "native", gp = gp) grid.segments(boxplot_stats[1, ], n - seq_along(index) + 1 - 0.5*box_width, boxplot_stats[1, ], n - seq_along(index) + 1 + 0.5*box_width, default.units = "native", gp = gp) grid.segments(boxplot_stats[3, ], n - seq_along(index) + 1 - 0.5*box_width, boxplot_stats[3, ], n - seq_along(index) + 1 + 0.5*box_width, default.units = "native", gp = gp) if(outline) { for(i in seq_along(value)) { l1 = value[[i]] > boxplot_stats[5,i] l1[is.na(l1)] = FALSE if(sum(l1)) grid.points(y = rep(n - i + 1, sum(l1)), x = value[[i]][l1], default.units = "native", gp = subset_gp(gp, i), pch = pch[i], size = size[i]) l2 = value[[i]] < boxplot_stats[1,i] l2[is.na(l2)] = FALSE if(sum(l2)) grid.points(y = rep(n - i + 1, sum(l2)), x = value[[i]][l2], default.units = "native", gp = subset_gp(gp, i), pch = pch[i], size = size[i]) } } if(axis_param$side == "top") { if(k > 1) axis = FALSE } else if(axis_param$side == "bottom") { if(k < N) axis = FALSE } if(axis) grid.draw(axis_grob) if(border) grid.rect(gp = gpar(fill = "transparent")) popViewport() } column_fun = function(index, k = 1, N = 1) { if(axis_param$direction == "reverse") { value = lapply(value, function(x) data_scale[2] - x + data_scale[1]) } value = value[index] boxplot_stats = boxplot(value, plot = FALSE)$stats n = length(index) gp = subset_gp(gp, index) pch = pch[index] size = size[index] pushViewport(viewport(xscale = c(0.5, n+0.5), yscale = data_scale)) grid.rect(x = seq_along(index), y = boxplot_stats[2, ], height = boxplot_stats[4, ] - boxplot_stats[2, ], width = 1*box_width, just = "bottom", default.units = "native", gp = gp) grid.segments(seq_along(index) - 0.5*box_width, boxplot_stats[5, ], seq_along(index) + 0.5*box_width, boxplot_stats[5, ], default.units = "native", gp = gp) grid.segments(seq_along(index), boxplot_stats[5, ], seq_along(index), boxplot_stats[4, ], default.units = "native", gp = gp) grid.segments(seq_along(index), boxplot_stats[1, ], seq_along(index), boxplot_stats[2, ], default.units = "native", gp = gp) grid.segments(seq_along(index) - 0.5*box_width, boxplot_stats[1, ], seq_along(index) + 0.5*box_width, boxplot_stats[1, ], default.units = "native", gp = gp) grid.segments(seq_along(index) - 0.5*box_width, boxplot_stats[3, ], seq_along(index) + 0.5*box_width, boxplot_stats[3, ], default.units = "native", gp = gp) if(outline) { for(i in seq_along(value)) { l1 = value[[i]] > boxplot_stats[5,i] l1[is.na(l1)] = FALSE if(sum(l1)) grid.points(x = rep(i, sum(l1)), y = value[[i]][l1], default.units = "native", gp = subset_gp(gp, i), pch = pch[i], size = size[i]) l2 = value[[i]] < boxplot_stats[1,i] l2[is.na(l2)] = FALSE if(sum(l2)) grid.points(x = rep(i, sum(l2)), y = value[[i]][l2], default.units = "native", gp = subset_gp(gp, i), pch = pch[i], size = size[i]) } } if(axis_param$side == "left") { if(k > 1) axis = FALSE } else if(axis_param$side == "right") { if(k < N) axis = FALSE } if(axis) grid.draw(axis_grob) if(border) grid.rect(gp = gpar(fill = "transparent")) popViewport() } if(which == "row") { fun = row_fun } else if(which == "column") { fun = column_fun } anno = AnnotationFunction( fun = fun, fun_name = "anno_boxplot", which = which, n = n, width = anno_size$width, height = anno_size$height, data_scale = data_scale, var_import = list(value, gp, border, box_width, axis, axis_param, axis_grob, data_scale, pch, size, outline) ) anno@subset_rule$value = subset_vector anno@subset_rule$gp = subset_gp anno@subset_rule$pch = subset_vector anno@subset_rule$size = subset_vector anno@subsetable = TRUE anno@extended = update_anno_extend(anno, axis_grob, axis_param) return(anno) } # == title # Histogram Annotation # # == param # -x A matrix or a list. If ``x`` is a matrix and if ``which`` is ``column``, statistics for boxplots # are calculated by columns, if ``which`` is ``row``, the calculation is done by rows. # -which Whether it is a column annotation or a row annotation? # -n_breaks Number of breaks for calculating histogram. # -border Wether draw borders of the annotation region? # -gp Graphic parameters for the boxes. The length of the graphic parameters should be one or the number of observations. # -axis Whether to add axis? # -axis_param parameters for controlling axis. See `default_axis_param` for all possible settings and default parameters. # -width Width of the annotation. The value should be an absolute unit. Width is not allowed to be set for column annotation. # -height Height of the annotation. The value should be an absolute unit. Height is not allowed to be set for row annotation. # # == value # An annotation function which can be used in `HeatmapAnnotation`. # # == seealso # https://jokergoo.github.io/ComplexHeatmap-reference/book/heatmap-annotations.html#histogram-annotation # # == example # m = matrix(rnorm(1000), nc = 10) # anno = anno_histogram(t(m), which = "row") # draw(anno, test = "row histogram") # anno = anno_histogram(t(m), which = "row", gp = gpar(fill = 1:10)) # draw(anno, test = "row histogram with color") # anno = anno_histogram(t(m), which = "row", n_breaks = 20) # draw(anno, test = "row histogram with color") anno_histogram = function(x, which = c("column", "row"), n_breaks = 11, border = FALSE, gp = gpar(fill = "#CCCCCC"), axis = TRUE, axis_param = default_axis_param(which), width = NULL, height = NULL) { ef = function() NULL if(is.null(.ENV$current_annotation_which)) { which = match.arg(which)[1] dev.null() ef = dev.off2 } else { which = .ENV$current_annotation_which } on.exit(ef()) anno_size = anno_width_and_height(which, width, height, unit(4, "cm")) ## convert matrix all to list (or data frame) if(is.matrix(x)) { if(which == "column") { value = as.data.frame(x) } else if(which == "row") { value = as.data.frame(t(x)) } } else { value = x } n = length(value) x_range =range(unlist(value), na.rm = TRUE) histogram_stats = lapply(value, hist, plot = FALSE, breaks = seq(x_range[1], x_range[2], length = n_breaks)) histogram_breaks = lapply(histogram_stats, function(x) x$breaks) histogram_counts = lapply(histogram_stats, function(x) x$counts) xscale = range(unlist(histogram_breaks), na.rm = TRUE) xscale = xscale + c(-0.025, 0.025)*(xscale[2] - xscale[1]) yscale = c(0, max(unlist(histogram_counts))) yscale[2] = yscale[2]*1.05 gp = recycle_gp(gp, n) axis_param$direction = "normal" axis_param = validate_axis_param(axis_param, which) axis_grob = if(axis) construct_axis_grob(axis_param, which, xscale) else NULL row_fun = function(index, k = 1, N = 1) { n_all = length(value) value = value[index] n = length(index) histogram_breaks = histogram_breaks[index] histogram_counts = histogram_counts[index] gp = subset_gp(gp, index) for(i in seq_len(n)) { n_breaks = length(histogram_breaks[[i]]) pushViewport(viewport(x = unit(0, "npc"), y = unit((n-i)/n, "npc"), height = unit(1/n, "npc"), just = c("left", "bottom"), xscale = xscale, yscale = yscale)) grid.rect(x = histogram_breaks[[i]][-1], y = 0, width = histogram_breaks[[i]][-1] - histogram_breaks[[i]][-n_breaks], height = histogram_counts[[i]], just = c("right", "bottom"), default.units = "native", gp = subset_gp(gp, i)) popViewport() } pushViewport(viewport(xscale = xscale)) if(axis_param$side == "top") { if(k > 1) axis = FALSE } else if(axis_param$side == "bottom") { if(k < N) axis = FALSE } if(axis) grid.draw(axis_grob) if(border) grid.rect(gp = gpar(fill = "transparent")) popViewport() } column_fun = function(index, k = 1, N = 1) { n_all = length(value) value = value[index] foo = yscale yscale = xscale xscale = foo histogram_breaks = histogram_breaks[index] histogram_counts = histogram_counts[index] n = length(index) gp = subset_gp(gp, index) for(i in seq_len(n)) { n_breaks = length(histogram_breaks[[i]]) pushViewport(viewport(y = unit(0, "npc"), x = unit(i/n, "npc"), width = unit(1/n, "npc"), just = c("right", "bottom"), xscale = xscale, yscale = yscale)) grid.rect(y = histogram_breaks[[i]][-1], x = 0, height = histogram_breaks[[i]][-1] - histogram_breaks[[i]][-n_breaks], width = histogram_counts[[i]], just = c("left", "top"), default.units = "native", gp = subset_gp(gp, i)) popViewport() } pushViewport(viewport(yscale = yscale)) if(axis_param$side == "left") { if(k > 1) axis = FALSE } else if(axis_param$side == "right") { if(k < N) axis = FALSE } if(axis) grid.draw(axis_grob) if(border) grid.rect(gp = gpar(fill = "transparent")) popViewport() } if(which == "row") { fun = row_fun } else if(which == "column") { fun = column_fun } anno = AnnotationFunction( fun = fun, fun_name = "anno_histogram", which = which, width = anno_size$width, height = anno_size$height, n = n, data_scale = xscale, var_import = list(value, gp, border, axis, axis_param, axis_grob, xscale, yscale, histogram_breaks, histogram_counts) ) anno@subset_rule$value = subset_vector anno@subset_rule$gp = subset_gp anno@subset_rule$histogram_breaks = subset_vector anno@subset_rule$histogram_counts = subset_vector anno@subsetable = TRUE anno@extended = update_anno_extend(anno, axis_grob, axis_param) return(anno) } # == title # Density Annotation # # == param # -x A matrix or a list. If ``x`` is a matrix and if ``which`` is ``column``, statistics for boxplots # are calculated by columns, if ``which`` is ``row``, the calculation is done by rows. # -which Whether it is a column annotation or a row annotation? # -type Type of graphics to represent density distribution. "lines" for normal density plot; "violine" for violin plot # and "heatmap" for heatmap visualization of density distribution. # -xlim Range on x-axis. # -heatmap_colors A vector of colors for interpolating density values. # -joyplot_scale Relative height of density distribution. A value higher than 1 increases the height of the density # distribution and the plot will represented as so-called "joyplot". # -border Wether draw borders of the annotation region? # -gp Graphic parameters for the boxes. The length of the graphic parameters should be one or the number of observations. # -axis Whether to add axis? # -axis_param parameters for controlling axis. See `default_axis_param` for all possible settings and default parameters. # -width Width of the annotation. The value should be an absolute unit. Width is not allowed to be set for column annotation. # -height Height of the annotation. The value should be an absolute unit. Height is not allowed to be set for row annotation. # # == value # An annotation function which can be used in `HeatmapAnnotation`. # # == seealso # https://jokergoo.github.io/ComplexHeatmap-reference/book/heatmap-annotations.html#density-annotation # # == example # m = matrix(rnorm(100), 10) # anno = anno_density(m, which = "row") # draw(anno, test = "normal density") # anno = anno_density(m, which = "row", type = "violin") # draw(anno, test = "violin") # anno = anno_density(m, which = "row", type = "heatmap") # draw(anno, test = "heatmap") # anno = anno_density(m, which = "row", type = "heatmap", # heatmap_colors = c("white", "orange")) # draw(anno, test = "heatmap, colors") anno_density = function(x, which = c("column", "row"), type = c("lines", "violin", "heatmap"), xlim = NULL, heatmap_colors = rev(brewer.pal(name = "RdYlBu", n = 11)), joyplot_scale = 1, border = TRUE, gp = gpar(fill = "#CCCCCC"), axis = TRUE, axis_param = default_axis_param(which), width = NULL, height = NULL) { ef = function() NULL if(is.null(.ENV$current_annotation_which)) { which = match.arg(which)[1] dev.null() ef = dev.off2 } else { which = .ENV$current_annotation_which } on.exit(ef()) anno_size = anno_width_and_height(which, width, height, unit(4, "cm")) ## convert matrix all to list (or data frame) if(is.matrix(x)) { if(which == "column") { value = as.data.frame(x) } else if(which == "row") { value = as.data.frame(t(x)) } } else { value = x } n = length(value) gp = recycle_gp(gp, n) type = match.arg(type)[1] n_all = length(value) density_stats = lapply(value, density, na.rm = TRUE) density_x = lapply(density_stats, function(x) x$x) density_y = lapply(density_stats, function(x) x$y) min_density_x = min(unlist(density_x)) max_density_x = max(unlist(density_x)) if(is.null(xlim)) { xscale = range(unlist(density_x), na.rm = TRUE) } else { xscale = xlim for(i in seq_len(n)) { l = density_x[[i]] >= xscale[1] & density_x[[i]] <= xscale[2] density_x[[i]] = density_x[[i]][l] density_y[[i]] = density_y[[i]][l] density_x[[i]] = c(density_x[[i]][ 1 ], density_x[[i]], density_x[[i]][ length(density_x[[i]]) ]) density_y[[i]] = c(0, density_y[[i]], 0) } } if(type == "lines") { xscale = xscale + c(-0.025, 0.025)*(xscale[2] - xscale[1]) yscale = c(0, max(unlist(density_y))) yscale[2] = yscale[2]*1.05 } else if(type == "violin") { xscale = xscale + c(-0.025, 0.025)*(xscale[2] - xscale[1]) yscale = max(unlist(density_y)) yscale = c(-yscale*1.05, yscale*1.05) } else if(type == "heatmap") { yscale = c(0, 1) min_y = min(unlist(density_y)) max_y = max(unlist(density_y)) col_fun = colorRamp2(seq(min_y, max_y, length = length(heatmap_colors)), heatmap_colors) } axis_param$direction = "normal" axis_param = validate_axis_param(axis_param, which) axis_grob = if(axis) construct_axis_grob(axis_param, which, xscale) else NULL row_fun = function(index, k = 1, N = 1) { n = length(index) value = value[index] gp = subset_gp(gp, index) density_x = density_x[index] density_y = density_y[index] for(i in seq_len(n)) { pushViewport(viewport(x = unit(0, "npc"), y = unit((n-i)/n, "npc"), just = c("left", "bottom"), height = unit(1/n, "npc"), xscale = xscale, yscale = yscale)) if(type == "lines") { grid.polygon(x = density_x[[i]], y = density_y[[i]]*joyplot_scale, default.units = "native", gp = subset_gp(gp, i)) } else if(type == "violin") { grid.polygon(x = c(density_x[[i]], rev(density_x[[i]])), y = c(density_y[[i]], -rev(density_y[[i]])), default.units = "native", gp = subset_gp(gp, i)) box_stat = boxplot(value[[i]], plot = FALSE)$stat grid.lines(box_stat[1:2, 1], c(0, 0), default.units = "native", gp = subset_gp(gp, i)) grid.lines(box_stat[4:5, 1], c(0, 0), default.units = "native", gp = subset_gp(gp, i)) grid.points(box_stat[3, 1], 0, default.units = "native", pch = 3, size = unit(1, "mm"), gp = subset_gp(gp, i)) } else if(type == "heatmap") { n_breaks = length(density_x[[i]]) grid.rect(x = density_x[[i]][-1], y = 0, width = density_x[[i]][-1] - density_x[[i]][-n_breaks], height = 1, just = c("right", "bottom"), default.units = "native", gp = gpar(fill = col_fun((density_y[[i]][-1] + density_y[[i]][-n_breaks])/2), col = NA)) grid.rect(x = density_x[[i]][1], y = 0, width = density_x[[i]][1] - min_density_x, height = 1, just = c("right", "bottom"), default.units = "native", gp = gpar(fill = col_fun(0), col = NA)) grid.rect(x = density_x[[i]][n_breaks], y = 0, width = max_density_x - density_x[[i]][n_breaks], height = 1, just = c("left", "bottom"), default.units = "native", gp = gpar(fill = col_fun(0), col = NA)) } popViewport() } pushViewport(viewport(xscale = xscale)) if(axis_param$side == "top") { if(k > 1) axis = FALSE } else if(axis_param$side == "bottom") { if(k < N) axis = FALSE } if(axis) grid.draw(axis_grob) if(border) grid.rect(gp = gpar(fill = "transparent")) popViewport() } column_fun = function(index, k = 1, N = 1) { n_all = length(value) value = value[index] foo = yscale yscale = xscale xscale = foo density_x = density_x[index] density_y = density_y[index] yscale = range(unlist(density_x), na.rm = TRUE) yscale = yscale + c(0, 0.05)*(yscale[2] - yscale[1]) if(type == "lines") { xscale = c(0, max(unlist(density_y))) xscale[2] = xscale[2]*1.05 } else if(type == "violin") { xscale = max(unlist(density_y)) xscale = c(-xscale*1.05, xscale*1.05) } else if(type == "heatmap") { yscale = range(unlist(density_x), na.rm = TRUE) xscale = c(0, 1) min_y = min(unlist(density_y)) max_y = max(unlist(density_y)) col_fun = colorRamp2(seq(min_y, max_y, length = length(heatmap_colors)), heatmap_colors) } n = length(index) gp = subset_gp(gp, index) for(i in rev(seq_len(n))) { pushViewport(viewport(y = unit(0, "npc"), x = unit(i/n, "npc"), width = unit(1/n, "npc"), just = c("right", "bottom"), xscale = xscale, yscale = yscale)) if(type == "lines") { grid.polygon(y = density_x[[i]], x = density_y[[i]]*joyplot_scale, default.units = "native", gp = subset_gp(gp, i)) } else if(type == "violin") { grid.polygon(y = c(density_x[[i]], rev(density_x[[i]])), x = c(density_y[[i]], -rev(density_y[[i]])), default.units = "native", gp = subset_gp(gp, i)) box_stat = boxplot(value[[i]], plot = FALSE)$stat grid.lines(y = box_stat[1:2, 1], x = c(0, 0), default.units = "native", gp = subset_gp(gp, i)) grid.lines(y = box_stat[4:5, 1], x = c(0, 0), default.units = "native", gp = subset_gp(gp, i)) grid.points(y = box_stat[3, 1], x = 0, default.units = "native", pch = 3, size = unit(1, "mm"), gp = subset_gp(gp, i)) } else if(type == "heatmap") { n_breaks = length(density_x[[i]]) grid.rect(y = density_x[[i]][-1], x = 0, height = density_x[[i]][-1] - density_x[[i]][-n_breaks], width = 1, just = c("left", "top"), default.units = "native", gp = gpar(fill = col_fun((density_y[[i]][-1] + density_y[[i]][-n_breaks])/2), col = NA)) grid.rect(y = density_x[[i]][1], x = 0, height = density_x[[i]][1] - min_density_x, width = 1, just = c("left", "top"), default.units = "native", gp = gpar(fill = col_fun(0), col = NA)) grid.rect(y = density_x[[i]][n_breaks], x = 0, height = max_density_x - density_x[[i]][n_breaks], width = 1, just = c("left", "bottom"), default.units = "native", gp = gpar(fill = col_fun(0), col = NA)) } popViewport() } pushViewport(viewport(yscale = yscale)) if(axis_param$side == "left") { if(k > 1) axis = FALSE } else if(axis_param$side == "right") { if(k < N) axis = FALSE } if(axis) grid.draw(axis_grob) if(border) grid.rect(gp = gpar(fill = "transparent")) popViewport() } if(which == "row") { fun = row_fun } else if(which == "column") { fun = column_fun } anno = AnnotationFunction( fun = fun, fun_name = "anno_density", which = which, width = anno_size$width, height = anno_size$height, n = n, data_scale = xscale, var_import = list(value, gp, border, type, axis, axis_param, axis_grob, xscale, yscale, density_x, density_y, min_density_x, max_density_x, joyplot_scale, heatmap_colors) ) if(type == "heatmap") { anno@var_env$col_fun = col_fun } anno@subset_rule$value = subset_vector anno@subset_rule$gp = subset_gp anno@subset_rule$density_x = subset_vector anno@subset_rule$density_y = subset_vector anno@subsetable = TRUE anno@extended = update_anno_extend(anno, axis_grob, axis_param) return(anno) } # == title # Text Annotation # # == param # -x A vector of text. # -which Whether it is a column annotation or a row annotation? # -gp Graphic parameters. # -rot Rotation of the text, pass to `grid::grid.text`. # -just Justification of text, pass to `grid::grid.text`. # -offset Depracated, use ``location`` instead. # -location Position of the text. By default ``rot``, ``just`` and ``location`` are automatically # inferred according to whether it is a row annotation or column annotation. The value # of ``location`` should be a `grid::unit` object, normally in ``npc`` unit. E.g. ``unit(0, 'npc')`` # means the most left of the annotation region and ``unit(1, 'npc')`` means the most right of # the annotation region. # -width Width of the annotation. The value should be an absolute unit. Width is not allowed to be set for column annotation. # -height Height of the annotation. The value should be an absolute unit. Height is not allowed to be set for row annotation. # # == value # An annotation function which can be used in `HeatmapAnnotation`. # # == seealso # https://jokergoo.github.io/ComplexHeatmap-reference/book/heatmap-annotations.html#text-annotation # # == example # anno = anno_text(month.name) # draw(anno, test = "month names") # anno = anno_text(month.name, gp = gpar(fontsize = 16)) # draw(anno, test = "month names with fontsize") # anno = anno_text(month.name, gp = gpar(fontsize = 1:12+4)) # draw(anno, test = "month names with changing fontsize") # anno = anno_text(month.name, which = "row") # draw(anno, test = "month names on rows") # anno = anno_text(month.name, location = 0, rot = 45, # just = "left", gp = gpar(col = 1:12)) # draw(anno, test = "with rotations") # anno = anno_text(month.name, location = 1, # rot = 45, just = "right", gp = gpar(fontsize = 1:12+4)) # draw(anno, test = "with rotations") anno_text = function(x, which = c("column", "row"), gp = gpar(), rot = guess_rot(), just = guess_just(), offset = guess_location(), location = guess_location(), width = NULL, height = NULL) { ef = function() NULL if(is.null(.ENV$current_annotation_which)) { which = match.arg(which)[1] dev.null() ef = dev.off2 } else { which = .ENV$current_annotation_which } on.exit(ef()) n = length(x) gp = recycle_gp(gp, n) guess_rot = function() { ifelse(which == "column", 90, 0) } guess_just = function() { ifelse(which == "column", "right", "left") } guess_location = function() { unit(ifelse(which == "column", 1, 0), "npc") } rot = rot[1] %% 360 just = just[1] if(!missing(offset)) { warning_wrap("`offset` is deprecated, use `location` instead.") if(missing(location)) { location = offset } } location = location[1] if(!inherits(location, "unit")) { location = unit(location, "npc") } if(which == "column") { if("right" %in% just) { if(rot < 180) { location = location - 0.5*grobHeight(textGrob("A", gp = gp))*abs(cos(rot/180*pi)) } else { location = location + 0.5*grobHeight(textGrob("A", gp = gp))*abs(cos(rot/180*pi)) } } else if("left" %in% just) { if(rot < 180) { location = location + 0.5*grobHeight(textGrob("A", gp = gp))*abs(cos(rot/180*pi)) } else { location = location - 0.5*grobHeight(textGrob("A", gp = gp))*abs(cos(rot/180*pi)) } } } if(which == "column") { if(missing(height)) { height = max_text_width(x, gp = gp)*abs(sin(rot/180*pi)) + grobHeight(textGrob("A", gp = gp))*abs(cos(rot/180*pi)) height = convertHeight(height, "mm") } if(missing(width)) { width = unit(1, "npc") } } if(which == "row") { if(missing(width)) { width = max_text_width(x, gp = gp)*abs(cos(rot/180*pi)) + grobHeight(textGrob("A", gp = gp))*abs(sin(rot/180*pi)) width = convertWidth(width, "mm") } if(missing(height)) { height = unit(1, "npc") } } anno_size = list(width = width, height = height) value = x row_fun = function(index) { n = length(index) gp = subset_gp(gp, index) gp2 = gp if("border" %in% names(gp2)) gp2$col = gp2$border if("fill" %in% names(gp2)) { if(!"border" %in% names(gp2)) gp2$col = gp2$fill } if(any(c("border", "fill") %in% names(gp2))) { grid.rect(y = (n - seq_along(index) + 0.5)/n, height = 1/n, gp = gp2) } grid.text(value[index], location, (n - seq_along(index) + 0.5)/n, gp = gp, just = just, rot = rot) # if(add_lines) { # if(n > 1) { # grid.segments(0, (n - seq_along(index)[-n])/n, 1, (n - seq_along(index)[-n])/n, default.units = "native") # } # } } column_fun = function(index, k = NULL, N = NULL, vp_name = NULL) { n = length(index) gp = subset_gp(gp, index) gp2 = gp if("border" %in% names(gp2)) gp2$col = gp2$border if("fill" %in% names(gp2)) { if(!"border" %in% names(gp2)) gp2$col = gp2$fill } if(any(c("border", "fill") %in% names(gp2))) { grid.rect(x = (seq_along(index) - 0.5)/n, width = 1/n, gp = gp2) } grid.text(value[index], (seq_along(index) - 0.5)/n, location, gp = gp, just = just, rot = rot) } if(which == "row") { fun = row_fun } else if(which == "column") { fun = column_fun } anno = AnnotationFunction( fun = fun, fun_name = "anno_text", which = which, width = width, height = height, n = n, var_import = list(value, gp, just, rot, location), show_name = FALSE ) anno@subset_rule$value = subset_vector anno@subset_rule$gp = subset_gp anno@subsetable = TRUE return(anno) } # == title # Joyplot Annotation # # == param # -x A matrix or a list. If ``x`` is a matrix or a data frame, columns correspond to observations. # -which Whether it is a column annotation or a row annotation? # -gp Graphic parameters for the boxes. The length of the graphic parameters should be one or the number of observations. # -scale Relative height of the curve. A value higher than 1 increases the height of the curve. # -transparency Transparency of the filled colors. Value should be between 0 and 1. # -axis Whether to add axis? # -axis_param parameters for controlling axis. See `default_axis_param` for all possible settings and default parameters. # -width Width of the annotation. The value should be an absolute unit. Width is not allowed to be set for column annotation. # -height Height of the annotation. The value should be an absolute unit. Height is not allowed to be set for row annotation. # # == value # An annotation function which can be used in `HeatmapAnnotation`. # # == seealso # https://jokergoo.github.io/ComplexHeatmap-reference/book/heatmap-annotations.html#joyplot-annotation # # == example # m = matrix(rnorm(1000), nc = 10) # lt = apply(m, 2, function(x) data.frame(density(x)[c("x", "y")])) # anno = anno_joyplot(lt, width = unit(4, "cm"), which = "row") # draw(anno, test = "joyplot") # anno = anno_joyplot(lt, width = unit(4, "cm"), which = "row", gp = gpar(fill = 1:10)) # draw(anno, test = "joyplot + col") # anno = anno_joyplot(lt, width = unit(4, "cm"), which = "row", scale = 1) # draw(anno, test = "joyplot + scale") # # m = matrix(rnorm(5000), nc = 50) # lt = apply(m, 2, function(x) data.frame(density(x)[c("x", "y")])) # anno = anno_joyplot(lt, width = unit(4, "cm"), which = "row", gp = gpar(fill = NA), scale = 4) # draw(anno, test = "joyplot") anno_joyplot = function(x, which = c("column", "row"), gp = gpar(fill = "#000000"), scale = 2, transparency = 0.6, axis = TRUE, axis_param = default_axis_param(which), width = NULL, height = NULL) { ef = function() NULL if(is.null(.ENV$current_annotation_which)) { which = match.arg(which)[1] dev.null() ef = dev.off2 } else { which = .ENV$current_annotation_which } on.exit(ef()) anno_size = anno_width_and_height(which, width, height, unit(4, "cm")) ## convert matrix all to list (or data frame) if(is.matrix(x) || is.data.frame(x)) { value = vector("list", ncol(x)) for(i in seq_len(ncol(x))) { value[[i]] = cbind(seq_len(nrow(x)), x[, i]) } } else if(inherits(x, "list")){ if(all(sapply(x, is.atomic))) { if(length(unique(sapply(x, length))) == 1) { value = vector("list", length(x)) for(i in seq_len(length(x))) { value[[i]] = cbind(seq_along(x[[i]]), x[[i]]) } } else { stop_wrap("Since x is a list, x need to be a list of two-column matrices.") } } else { value = x } } else { stop_wrap("The input should be a list of two-column matrices or a matrix/data frame.") } xscale = range(lapply(value, function(x) x[, 1]), na.rm = TRUE) xscale = xscale + c(-0.025, 0.025)*(xscale[2] - xscale[1]) yscale = range(lapply(value, function(x) x[, 2]), na.rm = TRUE) yscale[1] = 0 yscale[2] = yscale[2]*1.05 n = length(value) if(!"fill" %in% names(gp)) { gp$fill = "#000000" } gp = recycle_gp(gp, n) gp$fill = add_transparency(gp$fill, transparency) axis_param$direction = "normal" axis_param = validate_axis_param(axis_param, which) axis_grob = if(axis) construct_axis_grob(axis_param, which, xscale) else NULL row_fun = function(index, k = 1, N = 1) { n_all = length(value) value = value[index] n = length(index) gp = subset_gp(gp, index) for(i in seq_len(n)) { pushViewport(viewport(x = unit(0, "npc"), y = unit((n-i)/n, "npc"), just = c("left", "bottom"), height = unit(1/n, "npc"), xscale = xscale, yscale = yscale)) x0 = value[[i]][, 1] y0 = value[[i]][, 2]*scale x0 = c(x0[1], x0, x0[length(x0)]) y0 = c(0, y0, 0) gppp = subset_gp(gp, i); gppp$col = NA grid.polygon(x = x0, y = y0, default.units = "native", gp = gppp) grid.lines(x = x0, y = y0, default.units = "native", gp = subset_gp(gp, i)) popViewport() } pushViewport(viewport(xscale = xscale)) if(axis_param$side == "top") { if(k > 1) axis = FALSE } else if(axis_param$side == "bottom") { if(k < N) axis = FALSE } if(axis) grid.draw(axis_grob) popViewport() } column_fun = function(index, k = 1, N = 1) { n_all = length(value) value = value[index] foo = yscale yscale = xscale xscale = foo n = length(index) gp = subset_gp(gp, index) for(i in seq_len(n)) { pushViewport(viewport(y = unit(0, "npc"), x = unit(i/n, "npc"), width = unit(1/n, "npc"), just = c("right", "bottom"), xscale = xscale, yscale = yscale)) x0 = value[[i]][, 2]*scale y0 = value[[i]][ ,1] x0 = c(0, x0, 0) y0 = c(y0[1], y0, y0[length(y0)]) gppp = subset_gp(gp, i); gppp$col = NA grid.polygon(y = y0, x = x0, default.units = "native", gp = gppp) grid.lines(y = y0, x = x0, default.units = "native", gp = subset_gp(gp, i)) popViewport() } pushViewport(viewport(yscale = yscale)) if(axis_param$side == "left") { if(k > 1) axis = FALSE } else if(axis_param$side == "right") { if(k < N) axis = FALSE } if(axis) grid.draw(axis_grob) popViewport() } if(which == "row") { fun = row_fun } else if(which == "column") { fun = column_fun } anno = AnnotationFunction( fun = fun, fun_name = "anno_joyplot", which = which, width = anno_size$width, height = anno_size$height, n = n, data_scale = xscale, var_import = list(value, gp, axis, axis_param, axis_grob, scale, yscale, xscale) ) anno@subset_rule$value = subset_vector anno@subset_rule$gp = subset_gp anno@subsetable = TRUE anno@extended = update_anno_extend(anno, axis_grob, axis_param) return(anno) } # == title # Horizon chart Annotation # # == param # -x A matrix or a list. If ``x`` is a matrix or a data frame, columns correspond to observations. # -which Whether it is a column annotation or a row annotation? # -gp Graphic parameters for the boxes. The length of the graphic parameters should be one or the number of observations. # There are two unstandard parameters specificly for horizon chart: ``pos_fill`` and ``neg_fill`` controls the filled # color for positive values and negative values. # -n_slice Number of slices on y-axis. # -slice_size Height of the slice. If the value is not ``NULL``, ``n_slice`` will be recalculated. # -negative_from_top Whether the areas for negative values start from the top or the bottom of the plotting region? # -normalize Whether normalize ``x`` by max(abs(x)). # -gap Gap size of neighbouring horizon chart. # -axis Whether to add axis? # -axis_param parameters for controlling axis. See `default_axis_param` for all possible settings and default parameters. # -width Width of the annotation. The value should be an absolute unit. Width is not allowed to be set for column annotation. # -height Height of the annotation. The value should be an absolute unit. Height is not allowed to be set for row annotation. # # == detail # Horizon chart as row annotation is only supported. # # == value # An annotation function which can be used in `HeatmapAnnotation`. # # == seealso # https://jokergoo.github.io/ComplexHeatmap-reference/book/heatmap-annotations.html#horizon-chart-annotation # # == example # lt = lapply(1:20, function(x) cumprod(1 + runif(1000, -x/100, x/100)) - 1) # anno = anno_horizon(lt, which = "row") # draw(anno, test = "horizon chart") # anno = anno_horizon(lt, which = "row", # gp = gpar(pos_fill = "orange", neg_fill = "darkgreen")) # draw(anno, test = "horizon chart, col") # anno = anno_horizon(lt, which = "row", negative_from_top = TRUE) # draw(anno, test = "horizon chart + negative_from_top") # anno = anno_horizon(lt, which = "row", gap = unit(1, "mm")) # draw(anno, test = "horizon chart + gap") # anno = anno_horizon(lt, which = "row", # gp = gpar(pos_fill = rep(c("orange", "red"), each = 10), # neg_fill = rep(c("darkgreen", "blue"), each = 10))) # draw(anno, test = "horizon chart, col") anno_horizon = function(x, which = c("column", "row"), gp = gpar(pos_fill = "#D73027", neg_fill = "#313695"), n_slice = 4, slice_size = NULL, negative_from_top = FALSE, normalize = TRUE, gap = unit(0, "mm"), axis = TRUE, axis_param = default_axis_param(which), width = NULL, height = NULL) { ef = function() NULL if(is.null(.ENV$current_annotation_which)) { which = match.arg(which)[1] dev.null() ef = dev.off2 } else { which = .ENV$current_annotation_which } on.exit(ef()) anno_size = anno_width_and_height(which, width, height, unit(4, "cm")) ## convert matrix all to list (or data frame) if(is.matrix(x) || is.data.frame(x)) { value = vector("list", ncol(x)) for(i in seq_len(ncol(x))) { value[[i]] = cbind(seq_len(nrow(x)), x[, i]) } } else if(inherits(x, "list")){ if(all(sapply(x, is.atomic))) { if(length(unique(sapply(x, length))) == 1) { value = vector("list", length(x)) for(i in seq_len(length(x))) { value[[i]] = cbind(seq_along(x[[i]]), x[[i]]) } } else { stop_wrap("Since x is a list, x need to be a list of two-column matrices.") } } else { value = x } } else { stop_wrap("The input should be a list of two-column matrices or a matrix/data frame.") } if(is.null(gp$pos_fill)) gp$pos_fill = "#D73027" if(is.null(gp$neg_fill)) gp$neg_fill = "#313695" if("fill" %in% names(gp)) { foo = unlist(lapply(value, function(x) x[, 2])) if(all(foo >= 0)) { gp$pos_fill = gp$fill } else if(all(foo <= 0)) { gp$neg_fill = gp$fill } else { gp = gpar(pos_fill = "#D73027", neg_fill = "#313695") } } if(which == "column") { stop_wrap("anno_horizon() does not support column annotation.") } if(normalize) { value = lapply(value, function(m) { m[, 2] = m[, 2]/max(abs(m[, 2])) m }) } n = length(value) xscale = range(lapply(value, function(x) x[, 1]), na.rm = TRUE) yscale = range(lapply(value, function(x) abs(x[, 2])), na.rm = TRUE) axis_param$direction = "normal" axis_param = validate_axis_param(axis_param, which) axis_grob = if(axis) construct_axis_grob(axis_param, which, xscale) else NULL row_fun = function(index, k = 1, N = 1) { n_all = length(value) value = value[index] if(is.null(slice_size)) { slice_size = yscale[2]/n_slice } n_slice = ceiling(yscale[2]/slice_size) n = length(index) gp = subset_gp(gp, index) for(i in seq_len(n)) { pushViewport(viewport(x = unit(0, "npc"), y = unit((n-i)/n, "npc"), just = c("left", "bottom"), height = unit(1/n, "npc") - gap)) sgp = subset_gp(gp, i) horizon_chart(value[[i]][, 1], value[[i]][, 2], n_slice = n_slice, slice_size = slice_size, negative_from_top = negative_from_top, pos_fill = sgp$pos_fill, neg_fill = sgp$neg_fill) grid.rect(gp = gpar(fill = "transparent")) popViewport() } pushViewport(viewport(xscale = xscale)) if(axis_param$side == "top") { if(k > 1) axis = FALSE } else if(axis_param$side == "bottom") { if(k < N) axis = FALSE } if(axis) grid.draw(axis_grob) popViewport() } column_fun = function(index) { } if(which == "row") { fun = row_fun } else if(which == "column") { fun = column_fun } anno = AnnotationFunction( fun = fun, fun_name = "anno_horizon", which = which, width = anno_size$width, height = anno_size$height, n = n, data_scale = xscale, var_import = list(value, gp, axis, axis_param, axis_grob, n_slice, slice_size, negative_from_top, xscale, yscale, gap) ) anno@subset_rule$value = subset_vector anno@subset_rule$gp = subset_gp anno@subsetable = TRUE anno@extended = update_anno_extend(anno, axis_grob, axis_param) return(anno) } horizon_chart = function(x, y, n_slice = 4, slice_size, pos_fill = "#D73027", neg_fill = "#313695", negative_from_top = FALSE) { if(missing(slice_size)) { slice_size = max(abs(y))/n_slice } n_slice = ceiling(max(abs(y))/slice_size) if(n_slice == 0) { return(invisible(NULL)) } pos_col_fun = colorRamp2(c(0, n_slice), c("white", pos_fill)) neg_col_fun = colorRamp2(c(0, n_slice), c("white", neg_fill)) pushViewport(viewport(xscale = range(x), yscale = c(0, slice_size))) for(i in seq_len(n_slice)) { l1 = y >= (i-1)*slice_size & y < i*slice_size l2 = y < (i-1)*slice_size l3 = y >= i*slice_size if(any(l1)) { x2 = x y2 = y y2[l1] = y2[l1] - slice_size*(i-1) y2[l3] = slice_size x2[l2] = NA y2[l2] = NA add_horizon_polygon(x2, y2, gp = gpar(fill = pos_col_fun(i), col = NA), default.units = "native") } } y = -y for(i in seq_len(n_slice)) { l1 = y >= (i-1)*slice_size & y < i*slice_size l2 = y < (i-1)*slice_size l3 = y >= i*slice_size if(any(l1)) { x2 = x y2 = y y2[l1] = y2[l1] - slice_size*(i-1) y2[l3] = slice_size x2[l2] = NA y2[l2] = NA add_horizon_polygon(x2, y2, slice_size = slice_size, from_top = negative_from_top, gp = gpar(fill = neg_col_fun(i), col = NA), default.units = "native") } } popViewport() } # x and y may contain NA, split x and y by NA gaps, align the bottom to y = 0 add_horizon_polygon = function(x, y, slice_size = NULL, from_top = FALSE, ...) { ltx = split_vec_by_NA(x) lty = split_vec_by_NA(y) for(i in seq_along(ltx)) { x0 = ltx[[i]] y0 = lty[[i]] if(from_top) { x0 = c(x0[1], x0, x0[length(x0)]) y0 = c(slice_size, slice_size - y0, slice_size) } else { x0 = c(x0[1], x0, x0[length(x0)]) y0 = c(0, y0, 0) } grid.polygon(x0, y0, ...) } } # https://stat.ethz.ch/pipermail/r-help/2010-April/237031.html split_vec_by_NA = function(x) { idx = 1 + cumsum(is.na(x)) not.na = !is.na(x) split(x[not.na], idx[not.na]) } # == title # Points as Row Annotation # # == param # -... pass to `anno_points`. # # == details # A wrapper of `anno_points` with pre-defined ``which`` to ``row``. # # You can directly use `anno_points` for row annotation if you call it in `rowAnnotation`. # # == value # See help page of `anno_points`. # row_anno_points = function(...) { if(exists(".__under_SingleAnnotation__", envir = parent.frame())) { message_wrap("From version 1.99.0, you can directly use `anno_points()` for row annotation if you call it in `rowAnnotation()`.") } anno_points(..., which = "row") } # == title # Barplots as Row Annotation # # == param # -... pass to `anno_barplot`. # # == details # A wrapper of `anno_barplot` with pre-defined ``which`` to ``row``. # # You can directly use `anno_barplot` for row annotation if you call it in `rowAnnotation`. # # == value # See help page of `anno_barplot`. # row_anno_barplot = function(...) { if(exists(".__under_SingleAnnotation__", envir = parent.frame())) { message_wrap("From version 1.99.0, you can directly use `anno_barplot()` for row annotation if you call it in `rowAnnotation()`.") } anno_barplot(..., which = "row") } # == title # Boxplots as Row Annotation # # == param # -... pass to `anno_boxplot`. # # == details # A wrapper of `anno_boxplot` with pre-defined ``which`` to ``row``. # # You can directly use `anno_boxplot` for row annotation if you call it in `rowAnnotation`. # # == value # See help page of `anno_boxplot`. # row_anno_boxplot = function(...) { if(exists(".__under_SingleAnnotation__", envir = parent.frame())) { message_wrap("From version 1.99.0, you can directly use `anno_boxplot()` for row annotation if you call it in `rowAnnotation()`.") } anno_boxplot(..., which = "row") } # == title # Histograms as Row Annotation # # == param # -... pass to `anno_histogram`. # # == details # A wrapper of `anno_histogram` with pre-defined ``which`` to ``row``. # # You can directly use `anno_histogram` for row annotation if you call it in `rowAnnotation`. # # == value # See help page of `anno_histogram`. # row_anno_histogram = function(...) { if(exists(".__under_SingleAnnotation__", envir = parent.frame())) { message_wrap("From version 1.99.0, you can directly use `anno_histogram()` for row annotation if you call it in `rowAnnotation()`.") } anno_histogram(..., which = "row") } # == title # Density as Row Annotation # # == param # -... pass to `anno_density`. # # == details # A wrapper of `anno_density` with pre-defined ``which`` to ``row``. # # You can directly use `anno_density` for row annotation if you call it in `rowAnnotation`. # # == value # See help page of `anno_density`. # row_anno_density = function(...) { if(exists(".__under_SingleAnnotation__", envir = parent.frame())) { message_wrap("From version 1.99.0, you can directly use `anno_density()` for row annotation if you call it in `rowAnnotation()`.") } anno_density(..., which = "row") } # == title # Text as Row Annotation # # == param # -... pass to `anno_text`. # # == details # A wrapper of `anno_text` with pre-defined ``which`` to ``row``. # # You can directly use `anno_text` for row annotation if you call it in `rowAnnotation`. # # == value # See help page of `anno_text`. # row_anno_text = function(...) { if(exists(".__under_SingleAnnotation__", envir = parent.frame())) { message_wrap("From version 1.99.0, you can directly use `anno_text()` for row annotation if you call it in `rowAnnotation()`.") } anno_text(..., which = "row") } # == title # Link annotation with labels # # == param # -at Numeric index from the original matrix. # -labels Corresponding labels. # -which Whether it is a column annotation or a row annotation? # -side Side of the labels. If it is a column annotation, valid values are "top" and "bottom"; # If it is a row annotation, valid values are "left" and "right". # -lines_gp Please use ``link_gp`` instead. # -link_gp Graphic settings for the segments. # -labels_gp Graphic settings for the labels. # -labels_rot Rotations of labels, scalar. # -padding Padding between neighbouring labels in the plot. # -link_width Width of the segments. # -link_height Similar as ``link_width``, used for column annotation. # -extend By default, the region for the labels has the same width (if it is a column annotation) or # same height (if it is a row annotation) as the heatmap. The size can be extended by this options. # The value can be a proportion number or a `grid::unit` object. The length can be either one or two. # # == details # Sometimes there are many rows or columns in the heatmap and we want to mark some of the rows. # This annotation function is used to mark these rows and connect labels and corresponding rows # with links. # # == value # An annotation function which can be used in `HeatmapAnnotation`. # # == seealso # https://jokergoo.github.io/ComplexHeatmap-reference/book/heatmap-annotations.html#mark-annotation # # == example # anno = anno_mark(at = c(1:4, 20, 60, 97:100), labels = month.name[1:10], which = "row") # draw(anno, index = 1:100, test = "anno_mark") # # m = matrix(1:1000, byrow = TRUE, nr = 100) # anno = anno_mark(at = c(1:4, 20, 60, 97:100), labels = month.name[1:10], which = "row") # Heatmap(m, cluster_rows = FALSE, cluster_columns = FALSE) + rowAnnotation(mark = anno) # Heatmap(m) + rowAnnotation(mark = anno) anno_mark = function(at, labels, which = c("column", "row"), side = ifelse(which == "column", "top", "right"), lines_gp = gpar(), labels_gp = gpar(), labels_rot = ifelse(which == "column", 90, 0), padding = unit(1, "mm"), link_width = unit(5, "mm"), link_height = link_width, link_gp = lines_gp, extend = unit(0, "mm")) { if(is.null(.ENV$current_annotation_which)) { which = match.arg(which)[1] } else { which = .ENV$current_annotation_which } if(!is.numeric(at)) { stop_wrap(paste0("`at` should be numeric ", which, " index corresponding to the matrix.")) } if(is.logical(at)) at = which(at) n = length(at) if(n < 1) { return(anno_empty(which = which, border = FALSE)) } link_gp = recycle_gp(link_gp, n) labels_gp = recycle_gp(labels_gp, n) od = order(at) at = at[od] labels = labels[od] link_gp = subset_gp(link_gp, od) labels_gp = subset_gp(labels_gp, od) labels2index = structure(seq_along(at), names = as.character(labels)) at2labels = structure(labels, names = at) if(length(extend) == 1) extend = rep(extend, 2) if(length(extend) > 2) extend = extend[1:2] if(!inherits(extend, "unit")) extend = unit(extend, "npc") if(which == "row") { height = unit(1, "npc") width = link_width + max_text_width(labels, gp = labels_gp, rot = labels_rot) } else { height = link_width + max_text_height(labels, gp = labels_gp, rot = labels_rot) width = unit(1, "npc") } .pos = NULL .scale = NULL labels_rot = labels_rot %% 360 if(!inherits(padding, "unit")) { padding = convertHeight(padding*grobHeight(textGrob("a", gp = subset_gp(labels_gp, 1))), "mm") } # a map between row index and positions # pos_map = row_fun = function(index) { if(is_RStudio_current_dev()) { if(ht_opt$message) { message_wrap("It seems you are using RStudio IDE. `anno_mark()` needs to work with the physical size of the graphics device. It only generates correct plot in the figure panel, while in the zoomed plot (by clicking the icon 'Zoom') or in the exported plot (by clicking the icon 'Export'), the connection to heatmap rows/columns might be wrong. You can directly use e.g. pdf() to save the plot into a file.\n\nUse `ht_opt$message = FALSE` to turn off this message.") } } n = length(index) # adjust at and labels at = intersect(index, at) if(length(at) == 0) { return(NULL) } labels = rev(at2labels[as.character(at)]) labels_gp = subset_gp(labels_gp, labels2index[as.character(labels)]) link_gp = subset_gp(link_gp, labels2index[as.character(labels)]) if(is.null(.scale)) { .scale = c(0.5, n+0.5) } pushViewport(viewport(xscale = c(0, 1), yscale = .scale)) if(inherits(extend, "unit")) extend = convertHeight(extend, "native", valueOnly = TRUE) if(labels_rot %in% c(90, 270)) { text_height = convertHeight(text_width(labels, gp = labels_gp) + padding, "native", valueOnly = TRUE) } else { text_height = convertHeight(text_height(labels, gp = labels_gp) + padding, "native", valueOnly = TRUE) } if(is.null(.pos)) { i2 = rev(which(index %in% at)) pos = n-i2+1 # position of rows } else { pos = .pos[rev(which(index %in% at))] } h1 = pos - text_height*0.5 h2 = pos + text_height*0.5 pos_adjusted = smartAlign(h1, h2, c(.scale[1] - extend[1], .scale[2] + extend[2])) h = (pos_adjusted[, 1] + pos_adjusted[, 2])/2 n2 = length(labels) if(side == "right") { if(labels_rot == 90) { just = c("center", "top") } else if(labels_rot == 270) { just = c("center", "bottom") } else if(labels_rot > 90 & labels_rot < 270 ) { just = c("right", "center") } else { just = c("left", "center") } } else { if(labels_rot == 90) { just = c("center", "bottom") } else if(labels_rot == 270) { just = c("center", "top") } else if(labels_rot > 90 & labels_rot < 270 ) { just = c("left", "center") } else { just = c("right", "center") } } if(side == "right") { grid.text(labels, rep(link_width, n2), h, default.units = "native", gp = labels_gp, rot = labels_rot, just = just) link_width = link_width - unit(1, "mm") grid.segments(unit(rep(0, n2), "npc"), pos, rep(link_width*(1/3), n2), pos, default.units = "native", gp = link_gp) grid.segments(rep(link_width*(1/3), n2), pos, rep(link_width*(2/3), n2), h, default.units = "native", gp = link_gp) grid.segments(rep(link_width*(2/3), n2), h, rep(link_width, n2), h, default.units = "native", gp = link_gp) } else { grid.text(labels, unit(1, "npc")-rep(link_width, n2), h, default.units = "native", gp = labels_gp, rot = labels_rot, just = just) link_width = link_width - unit(1, "mm") grid.segments(unit(rep(1, n2), "npc"), pos, unit(1, "npc")-rep(link_width*(1/3), n2), pos, default.units = "native", gp = link_gp) grid.segments(unit(1, "npc")-rep(link_width*(1/3), n2), pos, unit(1, "npc")-rep(link_width*(2/3), n2), h, default.units = "native", gp = link_gp) grid.segments(unit(1, "npc")-rep(link_width*(2/3), n2), h, unit(1, "npc")-rep(link_width, n2), h, default.units = "native", gp = link_gp) } upViewport() } column_fun = function(index) { if(is_RStudio_current_dev()) { if(ht_opt$message) { message_wrap("It seems you are using RStudio IDE. `anno_mark()` needs to work with the physical size of the graphics device. It only generates correct plot in the figure panel, while in the zoomed plot (by clicking the icon 'Zoom') or in the exported plot (by clicking the icon 'Export'), the connection to heatmap rows/columns might be wrong. You can directly use e.g. pdf() to save the plot into a file.\n\nUse `ht_opt$message = FALSE` to turn off this message.") } } n = length(index) # adjust at and labels at = intersect(index, at) if(length(at) == 0) { return(NULL) } labels = at2labels[as.character(at)] labels_gp = subset_gp(labels_gp, labels2index[as.character(labels)]) link_gp = subset_gp(link_gp, labels2index[as.character(labels)]) if(is.null(.scale)) { .scale = c(0.5, n+0.5) } pushViewport(viewport(yscale = c(0, 1), xscale = .scale)) if(inherits(extend, "unit")) extend = convertWidth(extend, "native", valueOnly = TRUE) if(labels_rot %in% c(0, 180)) { text_height = convertWidth(text_width(labels, gp = labels_gp) + padding, "native", valueOnly = TRUE) } else { text_height = convertWidth(text_height(labels, gp = labels_gp) + padding, "native", valueOnly = TRUE) } if(is.null(.pos)) { i2 = which(index %in% at) pos = i2 # position of rows } else { pos = .pos[which(index %in% at)] } h1 = pos - text_height*0.5 h2 = pos + text_height*0.5 pos_adjusted = smartAlign(h1, h2, c(.scale[1] - extend[1], .scale[2] + extend[2])) h = (pos_adjusted[, 1] + pos_adjusted[, 2])/2 n2 = length(labels) if(side == "top") { if(labels_rot == 0) { just = c("center", "bottom") } else if(labels_rot == 180) { just = c("center", "top") } else if(labels_rot > 0 & labels_rot < 180 ) { just = c("left", "center") } else { just = c("right", "center") } } else { if(labels_rot == 0) { just = c("center", "top") } else if(labels_rot == 180) { just = c("center", "bottom") } else if(labels_rot > 0 & labels_rot < 180 ) { just = c("right", "center") } else { just = c("left", "center") } } if(side == "top") { grid.text(labels, h, rep(link_height, n2), default.units = "native", gp = labels_gp, rot = labels_rot, just = just) link_height = link_height - unit(1, "mm") grid.segments(pos, unit(rep(0, n2), "npc"), pos, rep(link_height*(1/3), n2), default.units = "native", gp = link_gp) grid.segments(pos, rep(link_height*(1/3), n2), h, rep(link_height*(2/3), n2), default.units = "native", gp = link_gp) grid.segments(h, rep(link_height*(2/3), n2), h, rep(link_height, n), default.units = "native", gp = link_gp) } else { grid.text(labels, h, unit(1, "npc")-rep(link_height, n2), default.units = "native", gp = labels_gp, rot = labels_rot, just = just) link_height = link_height - unit(1, "mm") grid.segments(pos, unit(rep(1, n2), "npc"), pos, unit(1, "npc")-rep(link_height*(1/3), n2), default.units = "native", gp = link_gp) grid.segments(pos, unit(1, "npc")-rep(link_height*(1/3), n2), h, unit(1, "npc")-rep(link_height*(2/3), n2), default.units = "native", gp = link_gp) grid.segments(h, unit(1, "npc")-rep(link_height*(2/3), n2), h, unit(1, "npc")-rep(link_height, n2), default.units = "native", gp = link_gp) } upViewport() } if(which == "row") { fun = row_fun } else if(which == "column") { fun = column_fun } anno = AnnotationFunction( fun = fun, fun_name = "anno_mark", which = which, width = width, height = height, n = -1, var_import = list(at, labels2index, at2labels, link_gp, labels_gp, labels_rot, padding, .pos, .scale, side, link_width, link_height, extend), show_name = FALSE ) anno@subset_rule$at = subset_by_intersect anno@subsetable = TRUE attr(anno, "called_args") = list( at = at, labels = labels, which = which, side = side, labels_gp = labels_gp, labels_rot = labels_rot, padding = padding, link_width = link_width, link_height = link_height, link_gp = link_gp, extend = extend ) return(anno) } subset_by_intersect = function(x, i) { intersect(x, i) } # == title # Link Annotation # # == param # -... Pass to `anno_zoom`. # # == details # This function is the same as `anno_zoom`. It links subsets of rows or columns to a list of graphic regions. # anno_link = function(...) { anno_zoom(...) } # == title # Summary Annotation # # == param # -which Whether it is a column annotation or a row annotation? # -border Wether draw borders of the annotation region? # -bar_width Relative width of the bars. The value should be smaller than one. # -axis Whether to add axis? # -axis_param parameters for controlling axis. See `default_axis_param` for all possible settings and default parameters. # -ylim Data ranges. ``ylim`` for barplot is enforced to be ``c(0, 1)``. # -extend The extension to both side of ``ylim``. The value is a percent value corresponding to ``ylim[2] - ylim[1]``. This argument is only for boxplot. # -outline Whether draw outline of boxplots? # -box_width Relative width of boxes. The value should be smaller than one. # -pch Point style. # -size Point size. # -gp Graphic parameters. # -width Width of the annotation. The value should be an absolute unit. Width is not allowed to be set for column annotation. # -height Height of the annotation. The value should be an absolute unit. Height is not allowed to be set for row annotation. # # == detail # ``anno_summary`` is a special annotation function that it only works for one-column or one-row heatmap. # It shows the summary of the values in the heatmap. If the values in the heatmap is discrete, # the proportion of each level (the sum is normalized to 1) is visualized as stacked barplot. If the heatmap # is split into multiple slices, multiple bars are put in the annotation. If the value is continuous, boxplot is used. # # In the barplot, the color schema is used as the same as the heatmap, while for the boxplot, the color needs # to be controlled by ``gp``. # # == value # An annotation function which can be used in `HeatmapAnnotation`. # # == seealso # https://jokergoo.github.io/ComplexHeatmap-reference/book/heatmap-annotations.html#summary-annotation # # == example # ha = HeatmapAnnotation(summary = anno_summary(height = unit(4, "cm"))) # v = sample(letters[1:2], 50, replace = TRUE) # split = sample(letters[1:2], 50, replace = TRUE) # Heatmap(v, top_annotation = ha, width = unit(1, "cm"), split = split) # # ha = HeatmapAnnotation(summary = anno_summary(gp = gpar(fill = 2:3), height = unit(4, "cm"))) # v = rnorm(50) # Heatmap(v, top_annotation = ha, width = unit(1, "cm"), split = split) # anno_summary = function(which = c("column", "row"), border = TRUE, bar_width = 0.8, axis = TRUE, axis_param = default_axis_param(which), ylim = NULL, extend = 0.05, outline = TRUE, box_width = 0.6, pch = 1, size = unit(2, "mm"), gp = gpar(), width = NULL, height = NULL) { ef = function() NULL if(is.null(.ENV$current_annotation_which)) { which = match.arg(which)[1] dev.null() ef = dev.off2 } else { which = .ENV$current_annotation_which } on.exit(ef()) anno_size = anno_width_and_height(which, width, height, unit(2, "cm")) axis_param = validate_axis_param(axis_param, which) if(is.null(ylim)) { axis_grob = if(axis) construct_axis_grob(axis_param, which, c(0, 1)) else NULL } else { axis_grob = if(axis) construct_axis_grob(axis_param, which, ylim) else NULL } row_fun = function(index) { ht = get("object", envir = parent.frame(7)) mat = ht@matrix cm = ht@matrix_color_mapping order_list = ht@column_order_list ng = length(order_list) if(cm@type == "discrete") { tl = lapply(order_list, function(od) table(mat[1, od])) tl = lapply(tl, function(x) x/sum(x)) pushViewport(viewport(yscale = c(0.5, ng+0.5), xscale = c(0, 1))) for(i in 1:ng) { x = i y = cumsum(tl[[i]]) grid.rect(y, x, height = bar_width, width = tl[[i]], just = "right", gp = gpar(fill = map_to_colors(cm, names(y))), default.units = "native") } if(axis) grid.draw(axis_grob) if(border) grid.rect(gp = gpar(fill = "transparent")) popViewport() } else { } } column_fun = function(index) { ht = get("object", envir = parent.frame(7)) mat = ht@matrix cm = ht@matrix_color_mapping order_list = ht@row_order_list ng = length(order_list) if(cm@type == "discrete") { if(!is.null(ylim)) { stop_wrap("For discrete matrix, `ylim` is not allowed to set. It is always c(0, 1).") } tl = lapply(order_list, function(od) table(mat[od, 1])) tl = lapply(tl, function(x) x/sum(x)) pushViewport(viewport(xscale = c(0.5, ng+0.5), yscale = c(0, 1))) for(i in 1:ng) { x = i y = cumsum(tl[[i]]) grid.rect(x, y, width = bar_width, height = tl[[i]], just = "top", gp = gpar(fill = map_to_colors(cm, names(y))), default.units = "native") } if(axis) grid.draw(axis_grob) if(border) grid.rect(gp = gpar(fill = "transparent")) popViewport() } else { vl = lapply(order_list, function(od) mat[od, 1]) nv = length(vl) if(is.null(ylim)) { if(!outline) { boxplot_stats = boxplot(vl, plot = FALSE)$stats data_scale = range(boxplot_stats) } else { data_scale = range(vl, na.rm = TRUE) } } else { data_scale = ylim } data_scale = data_scale + c(-extend, extend)*(data_scale[2] - data_scale[1]) if(is.null(ylim)) { axis_param = validate_axis_param(axis_param, which) axis_grob = if(axis) construct_axis_grob(axis_param, which, data_scale) else NULL } gp = recycle_gp(gp, nv) if(length(pch) == 1) pch = rep(pch, nv) if(length(size) == 1) size = rep(size, nv) pushViewport(viewport(xscale = c(0.5, ng+0.5), yscale = data_scale)) for(i in 1:ng) { x = i v = vl[[i]] grid.boxplot(v, pos = x, box_width = box_width, gp = subset_gp(gp, i), pch = pch, size = size, outline = outline) } if(axis) grid.draw(axis_grob) if(border) grid.rect(gp = gpar(fill = "transparent")) popViewport() } } if(which == "row") { fun = row_fun } else if(which == "column") { fun = column_fun } anno = AnnotationFunction( fun = fun, fun_name = "anno_summary", which = which, width = width, height = height, var_import = list(bar_width, border, axis, axis_grob, axis_param, which, ylim, extend, outline, box_width, pch, size, gp), n = 1, show_name = FALSE ) anno@subsetable = FALSE anno@extended = update_anno_extend(anno, axis_grob, axis_param) return(anno) } # == title # Block annotation # # == param # -gp Graphic parameters. # -labels Labels put on blocks. # -labels_gp Graphic parameters for labels. # -labels_rot Rotation for labels. # -labels_offset Positions of the labels. It controls offset on y-directions for column annotation and on x-directoin for row annotation. # -labels_just Jusification of the labels. # -which Is it a row annotation or a column annotation? # -width Width of the annotation. The value should be an absolute unit. Width is not allowed to be set for column annotation. # -height Height of the annotation. The value should be an absolute unit. Height is not allowed to be set for row annotation. # -show_name Whether show annotatio name. # # == details # The block annotation is used for representing slices. The length of all arguments should be 1 or the number of slices. # # == value # An annotation function which can be used in `HeatmapAnnotation`. # # == seealso # https://jokergoo.github.io/ComplexHeatmap-reference/book/heatmap-annotations.html#block-annotation # # == example # Heatmap(matrix(rnorm(100), 10), # top_annotation = HeatmapAnnotation(foo = anno_block(gp = gpar(fill = 2:4), # labels = c("group1", "group2", "group3"), labels_gp = gpar(col = "white"))), # column_km = 3, # left_annotation = rowAnnotation(foo = anno_block(gp = gpar(fill = 2:4), # labels = c("group1", "group2", "group3"), labels_gp = gpar(col = "white"))), # row_km = 3) anno_block = function(gp = gpar(), labels = NULL, labels_gp = gpar(), labels_rot = ifelse(which == "row", 90, 0), labels_offset = unit(0.5, "npc"), labels_just = "center", which = c("column", "row"), width = NULL, height = NULL, show_name = FALSE) { if(is.null(.ENV$current_annotation_which)) { which = match.arg(which)[1] } else { which = .ENV$current_annotation_which } if(length(labels)) { if(which == "column") { if(missing(height)) { height = grobHeight(textGrob(labels, rot = labels_rot, gp = labels_gp)) height = height + unit(5, "mm") } else { if(!inherits(height, "unit")) { stop_wrap("Since you specified `height`, the value should be `unit` object.") } } } else { if(missing(width)) { width = grobWidth(textGrob(labels, rot = labels_rot, gp = labels_gp)) width = width + unit(5, "mm") } else { if(!inherits(width, "unit")) { stop_wrap("Since you specified `width`, the value should be `unit` object.") } } } } anno_size = anno_width_and_height(which, width, height, unit(5, "mm")) fun = function(index, k, n) { gp = subset_gp(recycle_gp(gp, n), k) grid.rect(gp = gp) if(length(labels)) { if(length(labels) != n) { stop_wrap("Length of `labels` should be as same as number of slices.") } label = labels[k] labels_gp = subset_gp(recycle_gp(labels_gp, n), k) x = y = unit(0.5, "npc") if(which == "column") y = labels_offset if(which == "row") x = labels_offset grid.text(label, x = x, y = y, gp = labels_gp, rot = labels_rot, just = labels_just) } } anno = AnnotationFunction( fun = fun, n = NA, fun_name = "anno_block", which = which, var_import = list(gp, labels, labels_gp, labels_rot, labels_offset, labels_just, which), subset_rule = list(), subsetable = TRUE, height = anno_size$height, width = anno_size$width, show_name = show_name ) return(anno) } # == title # Zoom annotation # # == param # -align_to It defines how the boxes correspond to the rows or the columns in the heatmap. # If the value is a list of indices, each box corresponds to the rows or columns with indices # in one vector in the list. If the value is a categorical variable (e.g. a factor or a character vector) # that has the same length as the rows or columns in the heatmap, each box corresponds to the rows/columns # in each level in the categorical variable. # -panel_fun A self-defined function that defines how to draw graphics in the box. The function must have # a ``index`` argument which is the indices for the rows/columns that the box corresponds to. It can # have second argument ``nm`` which is the "name" of the selected part in the heatmap. The corresponding # value for ``nm`` comes from ``align_to`` if it is specified as a categorical variable or a list with names. # -which Whether it is a column annotation or a row annotation? # -side Side of the boxes If it is a column annotation, valid values are "top" and "bottom"; # If it is a row annotation, valid values are "left" and "right". # -size The size of boxes. It can be pure numeric that they are treated as relative fractions of the total # height/width of the heatmap. The value of ``size`` can also be absolute units. # -gap Gaps between boxes. # -link_gp Graphic settings for the segments. # -link_width Width of the segments. # -link_height Similar as ``link_width``, used for column annotation. # -extend By default, the region for the labels has the same width (if it is a column annotation) or # same height (if it is a row annotation) as the heatmap. The size can be extended by this options. # The value can be a proportion number or a `grid::unit` object. The length can be either one or two. # -width Width of the annotation. The value should be an absolute unit. Width is not allowed to be set for column annotation. # -height Height of the annotation. The value should be an absolute unit. Height is not allowed to be set for row annotation. # -internal_line Internally used. # # == details # `anno_zoom` creates several plotting regions (boxes) which can be corresponded to subsets of rows/columns in the # heatmap. # # == value # An annotation function which can be used in `HeatmapAnnotation`. # # == seealso # https://jokergoo.github.io/ComplexHeatmap-reference/book/heatmap-annotations.html#zoom-annotation # # == example # set.seed(123) # m = matrix(rnorm(100*10), nrow = 100) # subgroup = sample(letters[1:3], 100, replace = TRUE, prob = c(1, 5, 10)) # rg = range(m) # panel_fun = function(index, nm) { # pushViewport(viewport(xscale = rg, yscale = c(0, 2))) # grid.rect() # grid.xaxis(gp = gpar(fontsize = 8)) # grid.boxplot(m[index, ], pos = 1, direction = "horizontal") # grid.text(paste("distribution of group", nm), mean(rg), y = 1.9, # just = "top", default.units = "native", gp = gpar(fontsize = 10)) # popViewport() # } # anno = anno_zoom(align_to = subgroup, which = "row", panel_fun = panel_fun, # size = unit(2, "cm"), gap = unit(1, "cm"), width = unit(4, "cm")) # Heatmap(m, right_annotation = rowAnnotation(foo = anno), row_split = subgroup) # anno_zoom = function(align_to, panel_fun = function(index, nm = NULL) { grid.rect() }, which = c("column", "row"), side = ifelse(which == "column", "top", "right"), size = NULL, gap = unit(1, "mm"), link_width = unit(5, "mm"), link_height = link_width, link_gp = gpar(), extend = unit(0, "mm"), width = NULL, height = NULL, internal_line = TRUE) { if(is.null(.ENV$current_annotation_which)) { which = match.arg(which)[1] } else { which = .ENV$current_annotation_which } anno_size = anno_width_and_height(which, width, height, unit(2, "cm") + link_width) # align_to should be # 1. a vector of class labels that the length should be same as the nrow of the matrix # 2. a list of numeric indices if(is.list(align_to)) { if(!any(sapply(align_to, is.numeric))) { stop_wrap(paste0("`at` should be numeric ", which, " index corresponding to the matrix.")) } } .pos = NULL # position of the rows if(length(as.list(formals(panel_fun))) == 1) { formals(panel_fun) = alist(index = , nm = NULL) } if(length(extend) == 1) extend = rep(extend, 2) if(length(extend) > 2) extend = extend[1:2] if(!inherits(extend, "unit")) extend = unit(extend, "npc") # anno_zoom is always executed in one-slice mode (which means mulitple slices # are treated as one big slilce) row_fun = function(index) { if(is_RStudio_current_dev()) { if(ht_opt$message) { message_wrap("It seems you are using RStudio IDE. `anno_zoom()`/`anno_link()` needs to work with the physical size of the graphics device. It only generates correct plot in the figure panel, while in the zoomed plot (by clicking the icon 'Zoom') or in the exported plot (by clicking the icon 'Export'), the connection to heatmap rows/columns might be wrong. You can directly use e.g. pdf() to save the plot into a file.\n\nUse `ht_opt$message = FALSE` to turn off this message.") } } n = length(index) if(is.atomic(align_to)) { if(length(setdiff(align_to, index)) == 0 && !any(duplicated(align_to))) { align_to = list(align_to) } else { if(length(align_to) != n) { stop_wrap("If `align_to` is a vector with group labels, the length should be the same as the number of rows in the heatmap.") } lnm = as.character(unique(align_to[index])) align_to = as.list(tapply(seq_along(align_to), align_to, function(x) x)) align_to = align_to[lnm] } } ## adjust index order align_to = lapply(align_to, function(x) intersect(index, x)) nrl = sapply(align_to, length) align_to_df = lapply(align_to, function(x) { ind = which(index %in% x) n = length(ind) s = NULL e = NULL s[1] = ind[1] if(n > 1) { ind2 = which(ind[2:n] - ind[1:(n-1)] > 1) if(length(ind2)) s = c(s, ind[ ind2 + 1 ]) k = length(s) e[k] = ind[length(ind)] if(length(ind2)) e[1:(k-1)] = ind[1:(n-1)][ ind2 ] } else { e = ind[1] } data.frame(s = s, e = e) }) # pos is from top to bottom if(is.null(.pos)) { pos = (n:1 - 0.5)/n # position of rows } else { pos = .pos } .scale = c(0, 1) pushViewport(viewport(xscale = c(0, 1), yscale = .scale)) if(inherits(extend, "unit")) extend = convertHeight(extend, "native", valueOnly = TRUE) # the position of boxes initially are put evenly # add the gap n_boxes = length(align_to) if(length(gap) == 1) gap = rep(gap, n_boxes) if(is.null(size)) size = nrl if(length(size) == 1) size = rep(size, length(align_to)) if(length(size) != length(align_to)) { stop_wrap("Length of `size` should be the same as the number of groups of indices.") } if(!inherits(size, "unit")) { size_is_unit = FALSE if(n_boxes == 1) { h = data.frame(bottom = .scale[1] - extend[1], top = .scale[2] + extend[2]) } else { size = as.numeric(size) gap = convertHeight(gap, "native", valueOnly = TRUE) box_height = size/sum(size) * (1 + sum(extend) - sum(gap[1:(n_boxes-1)])) h = data.frame( top = cumsum(box_height) + cumsum(gap) - gap[length(gap)] - extend[1] ) h$bottom = h$top - box_height h = 1 - h[, 2:1] colnames(h) = c("top", "bottom") } } else { size_is_unit = TRUE box_height = size box_height2 = box_height # box_height2 adds the gap for(i in 1:n_boxes) { if(i == 1 || i == n_boxes) { if(n_boxes > 1) { box_height2[i] = box_height2[i] + gap[i]*0.5 } } else { box_height2[i] = box_height2[i] + gap[i] } } box_height2 = convertHeight(box_height2, "native", valueOnly = TRUE) # the original positions of boxes mean_pos = sapply(align_to_df, function(df) mean((pos[df[, 1]] + pos[df[, 2]])/2)) h1 = mean_pos - box_height2*0.5 h2 = mean_pos + box_height2*0.5 h = smartAlign2(rev(h1), rev(h2), c(.scale[1] - extend[1], .scale[2] + extend[2])) colnames(h) = c("bottom", "top") h = h[nrow(h):1, , drop = FALSE] # recalcualte h to remove gaps gap_height = convertHeight(gap, "native", valueOnly = TRUE) if(n_boxes > 1) { for(i in 1:n_boxes) { if(i == 1) { h[i, "bottom"] = h[i, "bottom"] + gap_height[i]/2 } else if(i == n_boxes) { h[i, "top"] = h[i, "top"] - gap_height[i]/2 } else { h[i, "bottom"] = h[i, "bottom"] + gap_height[i]/2 h[i, "top"] = h[i, "top"] - gap_height[i]/2 } } } } popViewport() # draw boxes if(side == "right") { pushViewport(viewport(x = link_width, just = "left", width = anno_size$width - link_width)) } else { pushViewport(viewport(x = 0, just = "left", width = anno_size$width - link_width)) } for(i in 1:n_boxes) { current_vp_name = current.viewport()$name pushViewport(viewport(y = (h[i, "top"] + h[i, "bottom"])/2, height = h[i, "top"] - h[i, "bottom"], default.units = "native")) if(is.function(panel_fun)) panel_fun(align_to[[i]], names(align_to)[i]) popViewport() if(current.viewport()$name != current_vp_name) { stop_wrap("If you push viewports `panel_fun`, you need to pop all them out.") } } popViewport() # draw the links if(is.null(link_gp$fill)) link_gp$fill = NA link_gp = recycle_gp(link_gp, n_boxes) if(side == "right") { pushViewport(viewport(x = unit(0, "npc"), just = "left", width = link_width)) } else { pushViewport(viewport(x = unit(1, "npc"), just = "right", width = link_width)) } for(i in 1:n_boxes) { df = align_to_df[[i]] for(j in 1:nrow(df)) { # draw each polygon if(!internal_line) { link_gp3 = link_gp2 = link_gp link_gp2$col = link_gp$fill link_gp2$lty = NULL link_gp3$fill = NA if(side == "right") { grid.polygon(unit.c(unit(c(0, 0), "npc"), rep(link_width, 2)), c(pos[df[j, 2]] - 0.5/n, pos[df[j, 1]] + 0.5/n, h[i, "top"], h[i, "bottom"]), default.units = "native", gp = subset_gp(link_gp2, i)) grid.lines(unit.c(link_width, unit(c(0, 0), "npc"), link_width), c(h[i, "bottom"], pos[df[j, 2]] - 0.5/n, pos[df[j, 1]] + 0.5/n, h[i, "top"]), default.units = "native", gp = subset_gp(link_gp3, i)) } else { grid.polygon(unit.c(rep(link_width, 2), unit(c(0, 0), "npc")), c(pos[df[j, 2]] - 0.5/n, pos[df[j, 1]] + 0.5/n, h[i, "top"], h[i, "bottom"]), default.units = "native", gp = subset_gp(link_gp2, i)) grid.lines(unit.c(unit(0, "npc"), rep(link_width, 2), unit(0, "npc")), c(h[i, "bottom"], pos[df[j, 2]] - 0.5/n, pos[df[j, 1]] + 0.5/n, h[i, "top"]), default.units = "native", gp = subset_gp(link_gp3, i)) } } else { if(side == "right") { grid.polygon(unit.c(unit(c(0, 0), "npc"), rep(link_width, 2)), c(pos[df[j, 2]] - 0.5/n, pos[df[j, 1]] + 0.5/n, h[i, "top"], h[i, "bottom"]), default.units = "native", gp = subset_gp(link_gp, i)) } else { grid.polygon(unit.c(rep(link_width, 2), unit(c(0, 0), "npc")), c(pos[df[j, 2]] - 0.5/n, pos[df[j, 1]] + 0.5/n, h[i, "top"], h[i, "bottom"]), default.units = "native", gp = subset_gp(link_gp, i)) } } } } popViewport() } column_fun = function(index) { if(is_RStudio_current_dev()) { if(ht_opt$message) { message_wrap("It seems you are using RStudio IDE. `anno_zoom()`/`anno_link()` needs to work with the physical size of the graphics device. It only generates correct plot in the figure panel, while in the zoomed plot (by clicking the icon 'Zoom') or in the exported plot (by clicking the icon 'Export'), the connection to heatmap rows/columns might be wrong. You can directly use e.g. pdf() to save the plot into a file.\n\nUse `ht_opt$message = FALSE` to turn off this message.") } } n = length(index) if(is.atomic(align_to)) { if(length(setdiff(align_to, index)) == 0 && !any(duplicated(align_to))) { align_to = list(align_to) } else { if(length(align_to) != n) { stop_wrap("If `align_to` is a vector with group labels, the length should be the same as the number of columns in the heatmap.") } lnm = as.character(unique(align_to[index])) align_to = as.list(tapply(seq_along(align_to), align_to, function(x) x)) align_to = align_to[lnm] } } align_to = lapply(align_to, function(x) intersect(index, x)) nrl = sapply(align_to, length) align_to_df = lapply(align_to, function(x) { ind = which(index %in% x) n = length(ind) s = NULL e = NULL s[1] = ind[1] if(n > 1) { ind2 = which(ind[2:n] - ind[1:(n-1)] > 1) if(length(ind2)) s = c(s, ind[ ind2 + 1 ]) k = length(s) e[k] = ind[length(ind)] if(length(ind2)) e[1:(k-1)] = ind[1:(n-1)][ ind2 ] } else { e = ind[1] } data.frame(s = s, e = e) }) if(is.null(.pos)) { pos = (1:n - 0.5)/n } else { pos = .pos } .scale = c(0, 1) pushViewport(viewport(yscale = c(0, 1), xscale = .scale)) if(inherits(extend, "unit")) extend = convertWidth(extend, "native", valueOnly = TRUE) # the position of boxes initially are put evenly # add the gap n_boxes = length(align_to) if(length(gap) == 1) gap = rep(gap, n_boxes) if(is.null(size)) size = nrl if(length(size) == 1) size = rep(size, length(align_to)) if(length(size) != length(align_to)) { stop_wrap("Length of `size` should be the same as the number of groups of indices.") } if(!inherits(size, "unit")) { size_is_unit = FALSE if(n_boxes == 1) { h = data.frame(left = .scale[1] - extend[1], right = .scale[2] + extend[2]) } else { size = as.numeric(size) gap = convertWidth(gap, "native", valueOnly = TRUE) box_width = size/sum(size) * (1 + sum(extend) - sum(gap[1:(n_boxes-1)])) h = data.frame( right = cumsum(box_width) + cumsum(gap) - gap[length(gap)] - extend[1] ) h$left = h$right - box_width } } else { size_is_unit = TRUE box_width = size box_width2 = box_width for(i in 1:n_boxes) { if(i == 1 || i == n_boxes) { if(n_boxes > 1) { box_width2[i] = box_width2[i] + gap[i]*0.5 } } else { box_width2[i] = box_width2[i] + gap[i] } } box_width2 = convertWidth(box_width2, "native", valueOnly = TRUE) # the original positions of boxes mean_pos = sapply(align_to_df, function(df) mean((pos[df[, 1]] + pos[df[, 2]])/2)) h1 = mean_pos - box_width2*0.5 h2 = mean_pos + box_width2*0.5 h = smartAlign2(h1, h2, c(.scale[1] - extend[1], .scale[2] + extend[2])) colnames(h) = c("left", "right") # recalcualte h to remove gaps gap_width = convertWidth(gap, "native", valueOnly = TRUE) if(n_boxes > 1) { for(i in 1:n_boxes) { if(i == 1) { h[i, "left"] = h[i, "left"] + gap_width[i]/2 } else if(i == n_boxes) { h[i, "right"] = h[i, "right"] - gap_width[i]/2 } else { h[i, "left"] = h[i, "left"] + gap_width[i]/2 h[i, "right"] = h[i, "right"] - gap_width[i]/2 } } } } popViewport() # draw boxes if(side == "top") { pushViewport(viewport(y = link_height, just = "bottom", height = anno_size$height - link_height)) } else { pushViewport(viewport(y = 0, just = "bottom", height = anno_size$height - link_height)) } for(i in 1:n_boxes) { current_vp_name = current.viewport()$name pushViewport(viewport(x = (h[i, "right"] + h[i, "left"])/2, width = h[i, "right"] - h[i, "left"], default.units = "native")) if(is.function(panel_fun)) panel_fun(align_to[[i]], names(align_to)[i]) popViewport() if(current.viewport()$name != current_vp_name) { stop_wrap("If you push viewports `panel_fun`, you need to pop all them out.") } } popViewport() # draw the links if(is.null(link_gp$fill)) link_gp$fill = NA link_gp = recycle_gp(link_gp, n_boxes) if(side == "top") { pushViewport(viewport(y = unit(0, "npc"), just = "bottom", height = link_height)) } else { pushViewport(viewport(y = unit(1, "npc"), just = "top", height = link_height)) } for(i in 1:n_boxes) { df = align_to_df[[i]] for(j in 1:nrow(df)) { # draw each polygon if(!internal_line) { link_gp3 = link_gp2 = link_gp link_gp2$col = link_gp$fill link_gp2$lty = NULL link_gp3$fill = NA if(side == "top") { grid.polygon( c(pos[df[j, 2]] + 0.5/n, pos[df[j, 1]] - 0.5/n, h[i, "left"], h[i, "right"]), unit.c(unit(c(0, 0), "npc"), rep(link_width, 2)), default.units = "native", gp = subset_gp(link_gp2, i)) grid.lines( c(h[i, "right"], pos[df[j, 2]] + 0.5/n, pos[df[j, 1]] - 0.5/n, h[i, "left"]), unit.c(link_width,unit(c(0, 0), "npc"), link_width), default.units = "native", gp = subset_gp(link_gp3, i)) } else { grid.polygon( c(pos[df[j, 2]] + 0.5/n, pos[df[j, 1]] - 0.5/n, h[i, "left"], h[i, "right"]), unit.c(rep(link_width, 2), unit(c(0, 0), "npc")), default.units = "native", gp = subset_gp(link_gp2, i)) grid.lines( c(h[i, "right"], pos[df[j, 2]] + 0.5/n, pos[df[j, 1]] - 0.5/n, h[i, "left"]), unit.c(unit(0, "npc"), rep(link_width, 2), unit(0, "npc")), default.units = "native", gp = subset_gp(link_gp3, i)) } } else { if(side == "top") { grid.polygon( c(pos[df[j, 2]] + 0.5/n, pos[df[j, 1]] - 0.5/n, h[i, "left"], h[i, "right"]), unit.c(unit(c(0, 0), "npc"), rep(link_width, 2)), default.units = "native", gp = subset_gp(link_gp, i)) } else { grid.polygon( c(pos[df[j, 2]] + 0.5/n, pos[df[j, 1]] - 0.5/n, h[i, "left"], h[i, "right"]), unit.c(rep(link_width, 2), unit(c(0, 0), "npc")), default.units = "native", gp = subset_gp(link_gp, i)) } } } } popViewport() } if(which == "row") { fun = row_fun } else if(which == "column") { fun = column_fun } anno = AnnotationFunction( fun = fun, fun_name = "anno_zoom", which = which, height = anno_size$height, width = anno_size$width, n = -1, var_import = list(align_to, .pos, gap, size, panel_fun, side, anno_size, extend, link_width, link_height, link_gp, internal_line), show_name = FALSE ) anno@subset_rule$align_to = function(x, i) { if(is.atomic(x)) { x[i] } else { x = lapply(x, function(x) intersect(x, i)) x = x[sapply(x, length) > 0] } } anno@subsetable = TRUE return(anno) }
/R/AnnotationFunction-function.R
permissive
j3gu/ComplexHeatmap
R
false
false
131,093
r
# == title # Empty Annotation # # == param # -which Whether it is a column annotation or a row annotation? # -border Whether draw borders of the annotation region? # -zoom If it is true and when the heatmap is split, the empty annotation slices will have # equal height or width, and you can see the correspondance between the annotation slices # and the original heatmap slices. # -width Width of the annotation. The value should be an absolute unit. Width is not allowed to be set for column annotation. # -height Height of the annotation. The value should be an absolute unit. Height is not allowed to be set for row annotation. # # == details # It creates an empty annotation and holds space, later users can add graphics # by `decorate_annotation`. This function is useful when users have difficulty to # implement `AnnotationFunction` object. # # In following example, an empty annotation is first created and later points are added: # # m = matrix(rnorm(100), 10) # ht = Heatmap(m, top_annotation = HeatmapAnnotation(pt = anno_empty())) # ht = draw(ht) # co = column_order(ht)[[1]] # pt_value = 1:10 # decorate_annotation("pt", { # pushViewport(viewport(xscale = c(0.5, ncol(mat)+0.5), yscale = range(pt_value))) # grid.points(seq_len(ncol(mat)), pt_value[co], pch = 16, default.units = "native") # grid.yaxis() # popViewport() # }) # # And it is similar as using `anno_points`: # # Heatmap(m, top_annotation = HeatmapAnnotation(pt = anno_points(pt_value))) # # == value # An annotation function which can be used in `HeatmapAnnotation`. # # == seealso # https://jokergoo.github.io/ComplexHeatmap-reference/book/heatmap-annotations.html#empty-annotation # # == examples # anno = anno_empty() # draw(anno, test = "anno_empty") # anno = anno_empty(border = FALSE) # draw(anno, test = "anno_empty without border") anno_empty = function(which = c("column", "row"), border = TRUE, zoom = FALSE, width = NULL, height = NULL) { if(is.null(.ENV$current_annotation_which)) { which = match.arg(which)[1] } else { which = .ENV$current_annotation_which } anno_size = anno_width_and_height(which, width, height, unit(1, "cm")) fun = function(index) { if(border) grid.rect() } anno = AnnotationFunction( fun = fun, n = NA, fun_name = "anno_empty", which = which, var_import = list(border, zoom), subset_rule = list(), subsetable = TRUE, height = anno_size$height, width = anno_size$width, show_name = FALSE ) return(anno) } # == title # Subset the Matrix by Rows # # == param # -x A matrix. # -i The row indices. # # == details # Mainly used for constructing the `AnnotationFunction-class` object. # subset_matrix_by_row = function(x, i) x[i, , drop = FALSE] # == title # Subset the vector # # == param # -x A vector. # -i The indices. # # == details # Mainly used for constructing the `AnnotationFunction-class` object. # subset_vector = function(x, i) x[i] # == title # Simple Annotation # # == param # -x The value vector. The value can be a vector or a matrix. The length of the vector # or the nrow of the matrix is taken as the number of the observations of the annotation. # The value can be numeric or character and NA value is allowed. # -col Color that maps to ``x``. If ``x`` is numeric and needs a continuous mapping, ``col`` # should be a color mapping function which accepts a vector of values and returns a # vector of colors. Normally it is generated by `circlize::colorRamp2`. If ``x`` is discrete # (numeric or character) and needs a discrete color mapping, ``col`` should be a vector of # colors with levels in ``x`` as vector names. If ``col`` is not specified, the color mapping # is randomly generated by ``ComplexHeatmap:::default_col``. # -na_col Color for NA value. # -which Whether it is a column annotation or a row annotation? # -border Wether draw borders of the annotation region? # -gp Graphic parameters for grid borders. The ``fill`` parameter is disabled. # -pch Points/symbols that are added on top of the annotation grids. The value can be numeric # or single letters. It can be a vector if ``x`` is a vector and a matrix if ``x`` is a matrix. # No points are drawn if the corresponding values are NA. # -pt_size Size of the points/symbols. It should be a `grid::unit` object. If ``x`` is a vector, # the value of ``pt_size`` can be a vector, while if ``x`` is a matrix, ``pt_size`` can # only be a single value. # -pt_gp Graphic parameters for points/symbols. The length setting is same as ``pt_size``. # If ``pch`` is set as letters, the fontsize should be set as ``pt_gp = gpar(fontsize = ...)``. # -simple_anno_size size of the simple annotation. # -width Width of the annotation. The value should be an absolute unit. Width is not allowed to be set for column annotation. # -height Height of the annotation. The value should be an absolute unit. Height is not allowed to be set for row annotation. # # == details # The "simple annotation" is the most widely used annotation type which is heatmap-like, where # the grid colors correspond to the values. `anno_simple` also supports to add points/symbols # on top of the grids where the it can be normal point (when ``pch`` is set as numbers) or letters (when # ``pch`` is set as single letters). # # == value # An annotation function which can be used in `HeatmapAnnotation`. # # == seealso # https://jokergoo.github.io/ComplexHeatmap-reference/book/heatmap-annotations.html#simple-annotation-as-an-annotation-function # # == example # anno = anno_simple(1:10) # draw(anno, test = "a numeric vector") # # anno = anno_simple(cbind(1:10, 10:1)) # draw(anno, test = "a matrix") # # anno = anno_simple(1:10, pch = c(1:4, NA, 6:8, NA, 10)) # draw(anno, test = "pch has NA values") # # anno = anno_simple(1:10, pch = c(rep("A", 5), rep(NA, 5))) # draw(anno, test = "pch has NA values") # # pch = matrix(1:20, nc = 2) # pch[sample(length(pch), 10)] = NA # anno = anno_simple(cbind(1:10, 10:1), pch = pch) # draw(anno, test = "matrix, pch is a matrix with NA values") anno_simple = function(x, col, na_col = "grey", which = c("column", "row"), border = FALSE, gp = gpar(col = NA), pch = NULL, pt_size = unit(1, "snpc")*0.8, pt_gp = gpar(), simple_anno_size = ht_opt$simple_anno_size, width = NULL, height = NULL) { if(is.null(.ENV$current_annotation_which)) { which = match.arg(which)[1] } else { which = .ENV$current_annotation_which } if(is.data.frame(x)) x = as.matrix(x) if(is.matrix(x)) { if(ncol(x) == 1) { x = x[, 1] } } input_is_matrix = is.matrix(x) anno_size = anno_width_and_height(which, width, height, simple_anno_size*ifelse(input_is_matrix, ncol(x), 1)) if(missing(col)) { col = default_col(x) } if(is.atomic(col)) { color_mapping = ColorMapping(name = "foo", colors = col, na_col = na_col) } else if(is.function(col)) { color_mapping = ColorMapping(name = "foo", col_fun = col, na_col = na_col) } else if(inherits(col, "ColorMapping")) { color_mapping = col } else { stop_wrap("`col` should be a named vector/a color mapping function/a ColorMapping object.") } value = x gp = subset_gp(gp, 1) # gp controls border if(is.matrix(value)) { n = nrow(value) nr = n nc = ncol(value) } else { n = length(value) nr = n nc = 1 } if(!is.null(pch)) { if(input_is_matrix) { pch = normalize_graphic_param_to_mat(pch, ifelse(is.matrix(x), ncol(x), 1), n, "pch") pt_size = pt_size[1]*(1/nc) pt_gp = subset_gp(pt_gp, 1) } else { if(length(pch) == 1) pch = rep(pch, n) if(length(pt_size) == 1) pt_size = rep(pt_size, n) pt_gp = recycle_gp(pt_gp, n) } } row_fun = function(index) { n = length(index) y = (n - seq_len(n) + 0.5) / n if(is.matrix(value)) { nc = ncol(value) pch = pch[index, , drop = FALSE] for(i in seq_len(nc)) { fill = map_to_colors(color_mapping, value[index, i]) grid.rect(x = (i-0.5)/nc, y, height = 1/n, width = 1/nc, gp = do.call("gpar", c(list(fill = fill), gp))) if(!is.null(pch)) { l = !is.na(pch[, i]) if(any(l)) { grid.points(x = rep((i-0.5)/nc, sum(l)), y = y[l], pch = pch[l, i], size = {if(length(pt_size) == 1) pt_size else pt_size[i]}, gp = subset_gp(pt_gp, i)) } } } } else { fill = map_to_colors(color_mapping, value[index]) grid.rect(x = 0.5, y, height = 1/n, width = 1, gp = do.call("gpar", c(list(fill = fill), gp))) if(!is.null(pch)) { pch = pch[index] pt_size = pt_size[index] pt_gp = subset_gp(pt_gp, index) l = !is.na(pch) if(any(l)) { grid.points(x = rep(0.5, sum(l)), y = y[l], pch = pch[l], size = pt_size[l], gp = subset_gp(pt_gp, which(l))) } } } if(border) grid.rect(gp = gpar(fill = "transparent")) } column_fun = function(index) { n = length(index) x = (seq_len(n) - 0.5) / n if(is.matrix(value)) { nc = ncol(value) pch = pch[index, , drop = FALSE] for(i in seq_len(nc)) { fill = map_to_colors(color_mapping, value[index, i]) grid.rect(x, y = (nc-i +0.5)/nc, width = 1/n, height = 1/nc, gp = do.call("gpar", c(list(fill = fill), gp))) if(!is.null(pch)){ l = !is.na(pch[, i]) if(any(l)) { grid.points(x[l], y = rep((nc-i +0.5)/nc, sum(l)), pch = pch[l, i], size = {if(length(pt_size) == 1) pt_size else pt_size[i]}, gp = subset_gp(pt_gp, i)) } } } } else { fill = map_to_colors(color_mapping, value[index]) grid.rect(x, y = 0.5, width = 1/n, height = 1, gp = do.call("gpar", c(list(fill = fill), gp))) if(!is.null(pch)) { pch = pch[index] pt_size = pt_size[index] pt_gp = subset_gp(pt_gp, index) l = !is.na(pch) if(any(l)) { grid.points(x[l], y = rep(0.5, sum(l)), pch = pch[l], size = pt_size[l], gp = subset_gp(pt_gp, which(l))) } } } if(border) grid.rect(gp = gpar(fill = "transparent")) } if(which == "row") { fun = row_fun } else if(which == "column") { fun = column_fun } anno = AnnotationFunction( fun = fun, fun_name = "anno_simple", which = which, width = anno_size$width, height = anno_size$height, n = n, data_scale = c(0.5, nc + 0.5), var_import = list(value, gp, border, color_mapping, pt_gp, pt_size, pch) ) anno@subset_rule = list() if(input_is_matrix) { anno@subset_rule$value = subset_matrix_by_row if(!is.null(pch)) { anno@subset_rule$pch = subset_matrix_by_row } } else { anno@subset_rule$value = subset_vector if(!is.null(pch)) { anno@subset_rule$pch = subset_vector anno@subset_rule$pt_size = subset_vector anno@subset_rule$pt_gp = subset_gp } } anno@subsetable = TRUE return(anno) } # == title # Image Annotation # # == param # -image A vector of file paths of images. The format of the image is inferred from the suffix name of the image file. # NA values or empty strings in the vector means no image to drawn. # -which Whether it is a column annotation or a row annotation? # -border Wether draw borders of the annotation region? # -gp Graphic parameters for annotation grids. If the image has transparent background, the ``fill`` parameter # can be used to control the background color in the annotation grids. # -space The space around the image to the annotation grid borders. The value should be a `grid::unit` object. # -width Width of the annotation. The value should be an absolute unit. Width is not allowed to be set for column annotation. # -height Height of the annotation. The value should be an absolute unit. Height is not allowed to be set for row annotation. # # == details # This function supports image formats in ``png``, ``svg``, ``pdf``, ``eps``, ``jpeg/jpg``, ``tiff``. # ``png``, ``jpeg/jpg`` and ``tiff`` images are imported by `png::readPNG`, `jpeg::readJPEG` and # `tiff::readTIFF`, and drawn by `grid::grid.raster`. ``svg`` images are firstly reformatted by ``rsvg::rsvg_svg`` # and then imported by `grImport2::readPicture` and drawn by `grImport2::grid.picture`. ``pdf`` and ``eps`` # images are imported by `grImport::PostScriptTrace` and `grImport::readPicture`, later drawn by `grImport::grid.picture`. # # Different image formats can be mixed in the ``image`` vector. # # == value # An annotation function which can be used in `HeatmapAnnotation`. # # == seealso # https://jokergoo.github.io/ComplexHeatmap-reference/book/heatmap-annotations.html#image-annotation # # == example # # download the free icons from https://github.com/Keyamoon/IcoMoon-Free # \dontrun{ # image = sample(dir("~/Downloads/IcoMoon-Free-master/PNG/64px", full.names = TRUE), 10) # anno = anno_image(image) # draw(anno, test = "png") # image[1:5] = "" # anno = anno_image(image) # draw(anno, test = "some of png") # } anno_image = function(image, which = c("column", "row"), border = TRUE, gp = gpar(fill = NA, col = NA), space = unit(1, "mm"), width = NULL, height = NULL) { image[is.na(image)] = "" l = grepl("^\\s*$", image) image[l] = "" allowed_image_type = c("png", "svg", "pdf", "eps", "jpeg", "jpg", "tiff") if(inherits(image, "character")) { ## they are file path image_type = tolower(gsub("^.*\\.(\\w+)$", "\\1", image)) if(! all(image_type[image_type != ""] %in% allowed_image_type)) { stop_wrap("image file should be of png/svg/pdf/eps/jpeg/jpg/tiff.") } } else { stop_wrap("`image` should be a vector of path.") } n_image = length(image) image_list = vector("list", n_image) image_class = vector("character", n_image) for(i in seq_along(image)) { if(image[i] == "") { image_list[[i]] = NA image_class[i] = NA } else if(image_type[i] == "png") { if(!requireNamespace("png")) { stop_wrap("Need png package to read png images.") } image_list[[i]] = png::readPNG(image[i]) image_class[i] = "raster" } else if(image_type[i] %in% c("jpeg", "jpg")) { if(!requireNamespace("jpeg")) { stop_wrap("Need jpeg package to read jpeg/jpg images.") } image_list[[i]] = jpeg::readJPEG(image[i]) image_class[i] = "raster" } else if(image_type[i] == "tiff") { if(!requireNamespace("tiff")) { stop_wrap("Need tiff package to read tiff images.") } image_list[[i]] = tiff::readTIFF(image[i]) image_class[i] = "raster" } else if(image_type[i] %in% c("pdf", "eps")) { if(!requireNamespace("grImport")) { stop_wrap("Need grImport package to read pdf/eps images.") } temp_file = tempfile() getFromNamespace("PostScriptTrace", ns = "grImport")(image[[i]], temp_file) image_list[[i]] = grImport::readPicture(temp_file) file.remove(temp_file) image_class[i] = "grImport::Picture" } else if(image_type[i] == "svg") { if(!requireNamespace("grImport2")) { stop_wrap("Need grImport2 package to read svg images.") } # if(!requireNamespace("rsvg")) { # stop_wrap("Need rsvg package to convert svg images.") # } temp_file = tempfile() # get it work on bioconductor build server oe = try(getFromNamespace("rsvg_svg", ns = "rsvg")(image[i], temp_file)) if(inherits(oe, "try-error")) { stop_wrap("Need rsvg package to convert svg images.") } image_list[[i]] = grImport2::readPicture(temp_file) file.remove(temp_file) image_class[i] = "grImport2::Picture" } } yx_asp = sapply(image_list, function(x) { if(inherits(x, "array")) { nrow(x)/ncol(x) } else if(inherits(x, "Picture")) { max(x@summary@yscale)/max(x@summary@xscale) } else { 1 } }) if(is.null(.ENV$current_annotation_which)) { which = match.arg(which)[1] } else { which = .ENV$current_annotation_which } space = space[1] anno_size = anno_width_and_height(which, width, height, unit(1, "cm")) gp = recycle_gp(gp, n_image) column_fun = function(index) { n = length(index) pushViewport(viewport()) asp = convertHeight(unit(1, "npc") - space*2, "mm", valueOnly = TRUE)/convertWidth(unit(1/n, "npc") - space*2, "mm", valueOnly = TRUE) grid.rect(x = (1:n - 0.5)/n, width = 1/n, gp = subset_gp(gp, index)) for(i in seq_len(n)) { if(identical(image_list[[ index[i] ]], NA)) next if(yx_asp[ index[i] ] > asp) { height = unit(1, "npc") - space*2 width = convertHeight(height, "mm")*yx_asp[ index[i] ] } else { width = unit(1/n, "npc") - space*2 height = yx_asp[ index[i] ]*convertWidth(width, "mm") } if(image_class[ index[i] ] == "raster") { grid.raster(image_list[[ index[i] ]], x = (i-0.5)/n, width = width, height = height) } else if(image_class[ index[i] ] == "grImport::Picture") { grid.picture = getFromNamespace("grid.picture", ns = "grImport") grid.picture(image_list[[ index[i] ]], x = (i-0.5)/n, width = width, height = height) } else if(image_class[ index[i] ] == "grImport2::Picture") { grid.picture = getFromNamespace("grid.picture", ns = "grImport2") grid.picture(image_list[[ index[i] ]], x = (i-0.5)/n, width = width, height = height) } } if(border) grid.rect(gp = gpar(fill = "transparent")) popViewport() } row_fun = function(index) { n = length(index) pushViewport(viewport()) asp = convertHeight(unit(1/n, "npc") - space*2, "mm", valueOnly = TRUE)/convertWidth(unit(1, "npc") - space*2, "mm", valueOnly = TRUE) grid.rect(y = (n - 1:n + 0.5)/n, height = 1/n, gp = subset_gp(gp, index)) for(i in seq_len(n)) { if(identical(image_list[[ index[i] ]], NA)) next if(yx_asp[ index[i] ] > asp) { height = unit(1/n, "npc") - space*2 width = convertHeight(height, "mm")*(1/yx_asp[ index[i] ]) } else { width = unit(1, "npc") - space*2 height = yx_asp[ index[i] ]*convertWidth(width, "mm") } if(image_class[ index[i] ] == "raster") { grid.raster(image_list[[ index[i] ]], y = (n - i + 0.5)/n, width = width, height = height) } else if(image_class[ index[i] ] == "grImport::Picture") { grid.picture = getFromNamespace("grid.picture", ns = "grImport") grid.picture(image_list[[ index[i] ]], y = (n - i + 0.5)/n, width = width, height = height) } else if(image_class[ index[i] ] == "grImport2::Picture") { grid.picture = getFromNamespace("grid.picture", ns = "grImport2") grid.picture(image_list[[ index[i] ]], y = (n - i + 0.5)/n, width = width, height = height) } } if(border) grid.rect(gp = gpar(fill = "transparent")) popViewport() } if(which == "row") { fun = row_fun } else if(which == "column") { fun = column_fun } anno = AnnotationFunction( fun = fun, fun_name = "anno_image", which = which, width = anno_size$width, height = anno_size$height, n = n_image, data_scale = c(0.5, 1.5), var_import = list(gp, border, space, yx_asp, image_list, image_class) ) anno@subset_rule$gp = subset_vector anno@subset_rule$image_list = subset_vector anno@subset_rule$image_class = subset_vector anno@subsetable = TRUE return(anno) } # == title # The Default Parameters for Annotation Axis # # == param # -which Whether it is for column annotation or row annotation? # # == details # There are following parameters for the annotation axis: # # -at The breaks of axis. By default it is automatically inferred. # -labels The corresponding axis labels. # -labels_rot The rotation of the axis labels. # -gp Graphc parameters of axis labels. The value should be a `grid::unit` object. # -side If it is for column annotation, the value should only be one of ``left`` and ``right``. If # it is for row annotation, the value should only be one of ``top`` and ``bottom``. # -facing Whether the axis faces to the outside of the annotation region or inside. Sometimes when # appending more than one heatmaps, the axes of column annotations of one heatmap might # overlap to the neighbouring heatmap, setting ``facing`` to ``inside`` may invoild it. # -direction The direction of the axis. Value should be "normal" or "reverse". # # All the parameters are passed to `annotation_axis_grob` to construct an axis grob. # # == example # default_axis_param("column") # default_axis_param("row") default_axis_param = function(which) { list( at = NULL, labels = NULL, labels_rot = ifelse(which == "column", 0, 90), gp = gpar(fontsize = 8), side = ifelse(which == "column", "left", "bottom"), facing = "outside", direction = "normal" ) } validate_axis_param = function(axis_param, which) { dft = default_axis_param(which) for(nm in names(axis_param)) { dft[[nm]] = axis_param[[nm]] } if(which == "row") { if(dft$side %in% c("left", "right")) { stop_wrap("axis side can only be set to 'top' or 'bottom' for row annotations.") } } if(which == "column") { if(dft$side %in% c("top", "bottom")) { stop_wrap("axis side can only be set to 'left' or 'right' for row annotations.") } } return(dft) } construct_axis_grob = function(axis_param, which, data_scale) { axis_param_default = default_axis_param(which) for(nm in setdiff(names(axis_param_default), names(axis_param))) { axis_param[[nm]] = axis_param_default[[nm]] } if(is.null(axis_param$at)) { at = pretty_breaks(data_scale) axis_param$at = at axis_param$labels = at } if(is.null(axis_param$labels)) { axis_param$labels = axis_param$at } axis_param$scale = data_scale axis_grob = do.call(annotation_axis_grob, axis_param) return(axis_grob) } # == title # Points Annotation # # == param # -x The value vector. The value can be a vector or a matrix. The length of the vector # or the number of rows of the matrix is taken as the number of the observations of the annotation. # -which Whether it is a column annotation or a row annotation? # -border Wether draw borders of the annotation region? # -gp Graphic parameters for points. The length of each graphic parameter can be 1, length of ``x`` if ``x`` # is a vector, or number of columns of ``x`` is ``x`` is a matrix. # -pch Point type. The length setting is the same as ``gp``. # -size Point size, the value should be a `grid::unit` object. The length setting is the same as ``gp``. # -ylim Data ranges. By default it is ``range(x)``. # -extend The extension to both side of ``ylim``. The value is a percent value corresponding to ``ylim[2] - ylim[1]``. # -axis Whether to add axis? # -axis_param parameters for controlling axis. See `default_axis_param` for all possible settings and default parameters. # -width Width of the annotation. The value should be an absolute unit. Width is not allowed to be set for column annotation. # -height Height of the annotation. The value should be an absolute unit. Height is not allowed to be set for row annotation. # -... Other arguments. # # == value # An annotation function which can be used in `HeatmapAnnotation`. # # == seealso # https://jokergoo.github.io/ComplexHeatmap-reference/book/heatmap-annotations.html#points-annotation # # == example # anno = anno_points(runif(10)) # draw(anno, test = "anno_points") # anno = anno_points(matrix(runif(20), nc = 2), pch = 1:2) # draw(anno, test = "matrix") anno_points = function(x, which = c("column", "row"), border = TRUE, gp = gpar(), pch = 16, size = unit(2, "mm"), ylim = NULL, extend = 0.05, axis = TRUE, axis_param = default_axis_param(which), width = NULL, height = NULL, ...) { other_args = list(...) if(length(other_args)) { if("axis_gp" %in% names(other_args)) { stop_wrap("`axis_gp` is removed from the arguments. Use `axis_param = list(gp = ...)` instead.") } if("axis_direction" %in% names(other_args)) { stop_wrap("`axis_direction` is not supported any more.") } } if("pch_as_image" %in% names(other_args)) { pch_as_image = other_args$pch_as_image } else { pch_as_image = FALSE } ef = function() NULL if(is.null(.ENV$current_annotation_which)) { which = match.arg(which)[1] dev.null() ef = dev.off2 } else { which = .ENV$current_annotation_which } on.exit(ef()) if(is.data.frame(x)) x = as.matrix(x) if(is.matrix(x)) { if(ncol(x) == 1) { x = x[, 1] } } input_is_matrix = is.matrix(x) anno_size = anno_width_and_height(which, width, height, unit(1, "cm")) if(is.matrix(x)) { n = nrow(x) nr = n nc = ncol(x) } else { n = length(x) nr = n nc = 1 } if(input_is_matrix) { gp = recycle_gp(gp, nc) if(length(pch) == 1) pch = rep(pch, nc) if(length(size) == 1) size = rep(size, nc) } else if(is.atomic(x)) { gp = recycle_gp(gp, n) if(length(pch) == 1) pch = rep(pch, n) if(length(size) == 1) size = rep(size, n) } if(is.null(ylim)) { data_scale = range(x, na.rm = TRUE) } else { data_scale = ylim } data_scale = data_scale + c(-extend, extend)*(data_scale[2] - data_scale[1]) value = x axis_param = validate_axis_param(axis_param, which) axis_grob = if(axis) construct_axis_grob(axis_param, which, data_scale) else NULL row_fun = function(index, k = 1, N = 1) { n = length(index) if(axis_param$direction == "reverse") { value = data_scale[2] - value + data_scale[1] } pushViewport(viewport(xscale = data_scale, yscale = c(0.5, n+0.5))) if(is.matrix(value)) { for(i in seq_len(ncol(value))) { grid.points(value[index, i], n - seq_along(index) + 1, gp = subset_gp(gp, i), default.units = "native", pch = pch[i], size = size[i]) } } else { if(pch_as_image) { for(ii in seq_along(index)) { pch_image = png::readPNG(pch[ index[ii] ]) grid.raster(pch_image, y = n - ii + 1, x = value[ index[ii] ], default.units = "native", width = size[ index[ii] ], height = size[ index[ii] ]*(nrow(pch_image)/ncol(pch_image))) } } else { grid.points(value[index], n - seq_along(index) + 1, gp = subset_gp(gp, index), default.units = "native", pch = pch[index], size = size[index]) } } if(axis_param$side == "top") { if(k > 1) axis = FALSE } else if(axis_param$side == "bottom") { if(k < N) axis = FALSE } if(axis) grid.draw(axis_grob) if(border) grid.rect(gp = gpar(fill = "transparent")) popViewport() } column_fun = function(index, k = 1, N = 1) { n = length(index) if(axis_param$direction == "reverse") { value = data_scale[2] - value + data_scale[1] } pushViewport(viewport(yscale = data_scale, xscale = c(0.5, n+0.5))) if(is.matrix(value)) { for(i in seq_len(ncol(value))) { grid.points(seq_along(index), value[index, i], gp = subset_gp(gp, i), default.units = "native", pch = pch[i], size = size[i]) } } else { if(pch_as_image) { for(ii in seq_along(index)) { pch_image = png::readPNG(pch[ index[ii] ]) grid.raster(pch_image, x = ii, value[ index[ii] ], default.units = "native", width = size[ index[ii] ], height = size[ index[ii] ]*(nrow(pch_image)/ncol(pch_image))) } } else { grid.points(seq_along(index), value[index], gp = subset_gp(gp, index), default.units = "native", pch = pch[index], size = size[index]) } } if(axis_param$side == "left") { if(k > 1) axis = FALSE } else if(axis_param$side == "right") { if(k < N) axis = FALSE } if(axis) grid.draw(axis_grob) if(border) grid.rect(gp = gpar(fill = "transparent")) popViewport() } if(which == "row") { fun = row_fun } else if(which == "column") { fun = column_fun } anno = AnnotationFunction( fun = fun, fun_name = "anno_points", which = which, width = anno_size$width, height = anno_size$height, n = n, data_scale = data_scale, var_import = list(value, gp, border, pch, size, axis, axis_param, axis_grob, data_scale, pch_as_image) ) anno@subset_rule$gp = subset_vector if(input_is_matrix) { anno@subset_rule$value = subset_matrix_by_row if(ncol(value) > 1) { anno@subset_rule$gp = NULL } } else { anno@subset_rule$value = subset_vector anno@subset_rule$gp = subset_gp anno@subset_rule$size = subset_vector anno@subset_rule$pch = subset_vector } anno@subsetable = TRUE anno@extended = update_anno_extend(anno, axis_grob, axis_param) return(anno) } update_anno_extend = function(anno, axis_grob, axis_param) { extended = anno@extended if(is.null(axis_grob)) { return(extended) } if(axis_param$facing == "outside") { if(axis_param$side == "left") { extended[2] = convertWidth(grobWidth(axis_grob), "mm") } else if(axis_param$side == "right") { extended[4] = convertWidth(grobWidth(axis_grob), "mm") } else if(axis_param$side == "top") { extended[3] = convertHeight(grobHeight(axis_grob), "mm") } else if(axis_param$side == "bottom") { extended[1] = convertHeight(grobHeight(axis_grob), "mm") } } return(extended) } # == title # Lines Annotation # # == param # -x The value vector. The value can be a vector or a matrix. The length of the vector # or the number of rows of the matrix is taken as the number of the observations of the annotation. # -which Whether it is a column annotation or a row annotation? # -border Wether draw borders of the annotation region? # -gp Graphic parameters for lines. The length of each graphic parameter can be 1, or number of columns of ``x`` is ``x`` is a matrix. # -add_points Whether to add points on the lines? # -smooth If it is ``TRUE``, smoothing by `stats::loess` is performed. If it is ``TRUE``, ``add_points`` is set to ``TRUE`` by default. # -pch Point type. The length setting is the same as ``gp``. # -size Point size, the value should be a `grid::unit` object. The length setting is the same as ``gp``. # -pt_gp Graphic parameters for points. The length setting is the same as ``gp``. # -ylim Data ranges. By default it is ``range(x)``. # -extend The extension to both side of ``ylim``. The value is a percent value corresponding to ``ylim[2] - ylim[1]``. # -axis Whether to add axis? # -axis_param parameters for controlling axis. See `default_axis_param` for all possible settings and default parameters. # -width Width of the annotation. The value should be an absolute unit. Width is not allowed to be set for column annotation. # -height Height of the annotation. The value should be an absolute unit. Height is not allowed to be set for row annotation. # # == value # An annotation function which can be used in `HeatmapAnnotation`. # # == seealso # https://jokergoo.github.io/ComplexHeatmap-reference/book/heatmap-annotations.html#lines-annotation # # == example # anno = anno_lines(runif(10)) # draw(anno, test = "anno_lines") # anno = anno_lines(cbind(c(1:5, 1:5), c(5:1, 5:1)), gp = gpar(col = 2:3)) # draw(anno, test = "matrix") # anno = anno_lines(cbind(c(1:5, 1:5), c(5:1, 5:1)), gp = gpar(col = 2:3), # add_points = TRUE, pt_gp = gpar(col = 5:6), pch = c(1, 16)) # draw(anno, test = "matrix") anno_lines = function(x, which = c("column", "row"), border = TRUE, gp = gpar(), add_points = smooth, smooth = FALSE, pch = 16, size = unit(2, "mm"), pt_gp = gpar(), ylim = NULL, extend = 0.05, axis = TRUE, axis_param = default_axis_param(which), width = NULL, height = NULL) { ef = function() NULL if(is.null(.ENV$current_annotation_which)) { which = match.arg(which)[1] dev.null() ef = dev.off2 } else { which = .ENV$current_annotation_which } on.exit(ef()) if(is.data.frame(x)) x = as.matrix(x) if(is.matrix(x)) { if(ncol(x) == 1) { x = x[, 1] } } input_is_matrix = is.matrix(x) anno_size = anno_width_and_height(which, width, height, unit(1, "cm")) if(is.matrix(x)) { n = nrow(x) nr = n nc = ncol(x) } else { n = length(x) nr = n nc = 1 } if(input_is_matrix) { gp = recycle_gp(gp, nc) pt_gp = recycle_gp(pt_gp, nc) if(length(pch) == 1) pch = rep(pch, nc) if(length(size) == 1) size = rep(size, nc) } else if(is.atomic(x)) { gp = recycle_gp(gp, 1) pt_gp = recycle_gp(pt_gp, n) if(length(pch) == 1) pch = rep(pch, n) if(length(size) == 1) size = rep(size, n) } if(is.null(ylim)) { data_scale = range(x, na.rm = TRUE) } else { data_scale = ylim } data_scale = data_scale + c(-extend, extend)*(data_scale[2] - data_scale[1]) value = x axis_param = validate_axis_param(axis_param, which) axis_grob = if(axis) construct_axis_grob(axis_param, which, data_scale) else NULL row_fun = function(index, k = 1, N = 1) { n = length(index) if(axis_param$direction == "reverse") { value = data_scale[2] - value + data_scale[1] } pushViewport(viewport(xscale = data_scale, yscale = c(0.5, n+0.5))) if(is.matrix(value)) { for(i in seq_len(ncol(value))) { x = n - seq_along(index) + 1 y = value[index, i] if(smooth) { fit = loess(y ~ x) x2 = seq(x[1], x[length(x)], length = 100) y2 = predict(fit, x2) grid.lines(y2, x2, gp = subset_gp(gp, i), default.units = "native") } else { grid.lines(y, x, gp = subset_gp(gp, i), default.units = "native") } if(length(add_points) == ncol(value)) { if(add_points[i]) { grid.points(y, x, gp = subset_gp(pt_gp, i), default.units = "native", pch = pch[i], size = size[i]) } } else { if(add_points) { grid.points(y, x, gp = subset_gp(pt_gp, i), default.units = "native", pch = pch[i], size = size[i]) } } } } else { x = n - seq_along(index) + 1 y = value[index] if(smooth) { fit = loess(y ~ x) x2 = seq(x[1], x[length(x)], length = 100) y2 = predict(fit, x2) grid.lines(y2, x2, gp = gp, default.units = "native") } else { grid.lines(y, x, gp = gp, default.units = "native") } if(add_points) { grid.points(y, x, gp = subset_gp(pt_gp, index), default.units = "native", pch = pch[index], size = size[index]) } } if(axis_param$side == "top") { if(k > 1) axis = FALSE } else if(axis_param$side == "bottom") { if(k < N) axis = FALSE } if(axis) grid.draw(axis_grob) if(border) grid.rect(gp = gpar(fill = "transparent")) popViewport() } column_fun = function(index, k = 1, N = 1) { n = length(index) if(axis_param$direction == "reverse") { value = data_scale[2] - value + data_scale[1] } pushViewport(viewport(yscale = data_scale, xscale = c(0.5, n+0.5))) if(is.matrix(value)) { for(i in seq_len(ncol(value))) { x = seq_along(index) y = value[index, i] if(smooth) { fit = loess(y ~ x) x2 = seq(x[1], x[length(x)], length = 100) y2 = predict(fit, x2) grid.lines(x2, y2, gp = subset_gp(gp, i), default.units = "native") } else { grid.lines(x, y, gp = subset_gp(gp, i), default.units = "native") } if(length(add_points) == ncol(value)) { if(add_points[i]) { grid.points(x, y, gp = subset_gp(pt_gp, i), default.units = "native", pch = pch[i], size = size[i]) } } else { if(add_points) { grid.points(x, y, gp = subset_gp(pt_gp, i), default.units = "native", pch = pch[i], size = size[i]) } } } } else { x = seq_along(index) y = value[index] if(smooth) { fit = loess(y ~ x) x2 = seq(x[1], x[length(x)], length = 100) y2 = predict(fit, x2) grid.lines(x2, y2, gp = gp, default.units = "native") } else { grid.lines(x, y, gp = gp, default.units = "native") } if(add_points) { grid.points(seq_along(index), value[index], gp = subset_gp(pt_gp, index), default.units = "native", pch = pch[index], size = size[index]) } } if(axis_param$side == "left") { if(k > 1) axis = FALSE } else if(axis_param$side == "right") { if(k < N) axis = FALSE } if(axis) grid.draw(axis_grob) if(border) grid.rect(gp = gpar(fill = "transparent")) popViewport() } if(which == "row") { fun = row_fun } else if(which == "column") { fun = column_fun } anno = AnnotationFunction( fun = fun, fun_name = "anno_points", which = which, width = anno_size$width, height = anno_size$height, n = n, data_scale = data_scale, var_import = list(value, gp, border, pch, size, pt_gp, axis, axis_param, axis_grob, data_scale, add_points, smooth) ) anno@subset_rule$gp = subset_vector if(input_is_matrix) { anno@subset_rule$value = subset_matrix_by_row if(ncol(value) > 1) { anno@subset_rule$gp = NULL } } else { anno@subset_rule$value = subset_vector anno@subset_rule$gp = subset_gp anno@subset_rule$pt_gp = subset_gp anno@subset_rule$size = subset_vector anno@subset_rule$pch = subset_vector } anno@subsetable = TRUE anno@extended = update_anno_extend(anno, axis_grob, axis_param) return(anno) } # == title # Barplot Annotation # # == param # -x The value vector. The value can be a vector or a matrix. The length of the vector # or the number of rows of the matrix is taken as the number of the observations of the annotation. # If ``x`` is a vector, the barplots will be represented as stacked barplots. # -baseline baseline of bars. The value should be "min" or "max", or a numeric value. It is enforced to be zero # for stacked barplots. # -which Whether it is a column annotation or a row annotation? # -border Wether draw borders of the annotation region? # -bar_width Relative width of the bars. The value should be smaller than one. # -gp Graphic parameters for points. The length of each graphic parameter can be 1, length of ``x`` if ``x`` # is a vector, or number of columns of ``x`` is ``x`` is a matrix. # -ylim Data ranges. By default it is ``range(x)`` if ``x`` is a vector, or ``range(rowSums(x))`` if ``x`` is a matrix. # -extend The extension to both side of ``ylim``. The value is a percent value corresponding to ``ylim[2] - ylim[1]``. # -axis Whether to add axis? # -axis_param parameters for controlling axis. See `default_axis_param` for all possible settings and default parameters. # -width Width of the annotation. The value should be an absolute unit. Width is not allowed to be set for column annotation. # -height Height of the annotation. The value should be an absolute unit. Height is not allowed to be set for row annotation. # -... Other arguments. # # == value # An annotation function which can be used in `HeatmapAnnotation`. # # == seealso # https://jokergoo.github.io/ComplexHeatmap-reference/book/heatmap-annotations.html#barplot_annotation # # == example # anno = anno_barplot(1:10) # draw(anno, test = "a vector") # # m = matrix(runif(4*10), nc = 4) # m = t(apply(m, 1, function(x) x/sum(x))) # anno = anno_barplot(m, gp = gpar(fill = 2:5), bar_width = 1, height = unit(6, "cm")) # draw(anno, test = "proportion matrix") anno_barplot = function(x, baseline = 0, which = c("column", "row"), border = TRUE, bar_width = 0.6, gp = gpar(fill = "#CCCCCC"), ylim = NULL, extend = 0.05, axis = TRUE, axis_param = default_axis_param(which), width = NULL, height = NULL, ...) { other_args = list(...) if(length(other_args)) { if("axis_gp" %in% names(other_args)) { stop_wrap("`axis_gp` is removed from the arguments. Use `axis_param = list(gp = ...)` instead.") } if("axis_side" %in% names(other_args)) { stop_wrap("`axis_side` is removed from the arguments. Use `axis_param = list(side = ...)` instead.") } if("axis_direction" %in% names(other_args)) { stop_wrap("`axis_direction` is not supported any more.") } } if(inherits(x, "list")) x = do.call("cbind", x) if(inherits(x, "data.frame")) x = as.matrix(x) if(inherits(x, "matrix")) { sg = apply(x, 1, function(xx) all(sign(xx) %in% c(1, 0)) || all(sign(xx) %in% c(-1, 0))) if(!all(sg)) { stop_wrap("Since `x` is a matrix, the sign of each row should be either all positive or all negative.") } } # convert everything to matrix if(is.null(dim(x))) x = matrix(x, ncol = 1) nc = ncol(x) if(missing(gp)) { gp = gpar(fill = grey(seq(0, 1, length = nc+2))[-c(1, nc+2)]) } data_scale = range(rowSums(x, na.rm = TRUE), na.rm = TRUE) if(!is.null(ylim)) data_scale = ylim if(baseline == "min") { data_scale = data_scale + c(0, extend)*(data_scale[2] - data_scale[1]) baseline = min(x) } else if(baseline == "max") { data_scale = data_scale + c(-extend, 0)*(data_scale[2] - data_scale[1]) baseline = max(x) } else { if(is.numeric(baseline)) { if(baseline == 0 && all(abs(rowSums(x) - 1) < 1e-6)) { data_scale = c(0, 1) } else if(baseline <= data_scale[1]) { data_scale = c(baseline, extend*(data_scale[2] - baseline) + data_scale[2]) } else if(baseline >= data_scale[2]) { data_scale = c(-extend*(baseline - data_scale[1]) + data_scale[1], baseline) } else { data_scale = data_scale + c(-extend, extend)*(data_scale[2] - data_scale[1]) } } } ef = function() NULL if(is.null(.ENV$current_annotation_which)) { which = match.arg(which)[1] dev.null() ef = dev.off2 } else { which = .ENV$current_annotation_which } on.exit(ef()) anno_size = anno_width_and_height(which, width, height, unit(1, "cm")) if(nc == 1) { gp = recycle_gp(gp, nrow(x)) } else { gp = recycle_gp(gp, nc) } value = x axis_param = validate_axis_param(axis_param, which) axis_grob = if(axis) construct_axis_grob(axis_param, which, data_scale) else NULL row_fun = function(index, k = 1, N = 1) { n = length(index) if(axis_param$direction == "reverse") { value_origin = value value = data_scale[2] - value + data_scale[1] baseline = data_scale[2] - baseline + data_scale[1] } pushViewport(viewport(xscale = data_scale, yscale = c(0.5, n+0.5))) if(ncol(value) == 1) { width = value[index] - baseline x_coor = width/2+baseline grid.rect(x = x_coor, y = n - seq_along(index) + 1, width = abs(width), height = 1*bar_width, default.units = "native", gp = subset_gp(gp, index)) } else { for(i in seq_len(ncol(value))) { if(axis_param$direction == "normal") { width = abs(value[index, i]) x_coor = rowSums(value[index, seq_len(i-1), drop = FALSE]) + width/2 grid.rect(x = x_coor, y = n - seq_along(index) + 1, width = abs(width), height = 1*bar_width, default.units = "native", gp = subset_gp(gp, i)) } else { width = value_origin[index, i] # the original width x_coor = rowSums(value_origin[index, seq_len(i-1), drop = FALSE]) + width/2 #distance to the right x_coor = data_scale[2] - x_coor + data_scale[1] grid.rect(x = x_coor, y = n - seq_along(index) + 1, width = abs(width), height = 1*bar_width, default.units = "native", gp = subset_gp(gp, i)) } } } if(axis_param$side == "top") { if(k > 1) axis = FALSE } else if(axis_param$side == "bottom") { if(k < N) axis = FALSE } if(axis) grid.draw(axis_grob) if(border) grid.rect(gp = gpar(fill = "transparent")) popViewport() } column_fun = function(index, k = 1, N = 1) { n = length(index) if(axis_param$direction == "reverse") { value_origin = value value = data_scale[2] - value + data_scale[1] baseline = data_scale[2] - baseline + data_scale[1] } pushViewport(viewport(yscale = data_scale, xscale = c(0.5, n+0.5))) if(ncol(value) == 1) { height = value[index] - baseline y_coor = height/2+baseline grid.rect(y = y_coor, x = seq_along(index), height = abs(height), width = 1*bar_width, default.units = "native", gp = subset_gp(gp, index)) } else { for(i in seq_len(ncol(value))) { if(axis_param$direction == "normal") { height = value[index, i] y_coor = rowSums(value[index, seq_len(i-1), drop = FALSE]) + height/2 grid.rect(y = y_coor, x = seq_along(index), height = abs(height), width = 1*bar_width, default.units = "native", gp = subset_gp(gp, i)) } else { height = value_origin[index, i] y_coor = rowSums(value_origin[index, seq_len(i-1), drop = FALSE]) + height/2 y_coor = data_scale[2] - y_coor + data_scale[1] grid.rect(y = y_coor, x = seq_along(index), height = abs(height), width = 1*bar_width, default.units = "native", gp = subset_gp(gp, i)) } } } if(axis_param$side == "left") { if(k > 1) axis = FALSE } else if(axis_param$side == "right") { if(k < N) axis = FALSE } if(axis) grid.draw(axis_grob) if(border) grid.rect(gp = gpar(fill = "transparent")) popViewport() } if(which == "row") { fun = row_fun } else if(which == "column") { fun = column_fun } n = nrow(value) anno = AnnotationFunction( fun = fun, fun_name = "anno_barplot", which = which, width = anno_size$width, height = anno_size$height, n = n, data_scale = data_scale, var_import = list(value, gp, border, bar_width, baseline, axis, axis_param, axis_grob, data_scale) ) anno@subset_rule$value = subset_matrix_by_row if(ncol(value) == 1) { anno@subset_rule$gp = subset_gp } anno@subsetable = TRUE anno@extended = update_anno_extend(anno, axis_grob, axis_param) return(anno) } # == title # Boxplot Annotation # # == param # -x A matrix or a list. If ``x`` is a matrix and if ``which`` is ``column``, statistics for boxplots # are calculated by columns, if ``which`` is ``row``, the calculation is done by rows. # -which Whether it is a column annotation or a row annotation? # -border Wether draw borders of the annotation region? # -gp Graphic parameters for the boxes. The length of the graphic parameters should be one or the number of observations. # -ylim Data ranges. # -extend The extension to both side of ``ylim``. The value is a percent value corresponding to ``ylim[2] - ylim[1]``. # -outline Whether draw outline of boxplots? # -box_width Relative width of boxes. The value should be smaller than one. # -pch Point style. # -size Point size. # -axis Whether to add axis? # -axis_param parameters for controlling axis. See `default_axis_param` for all possible settings and default parameters. # -width Width of the annotation. The value should be an absolute unit. Width is not allowed to be set for column annotation. # -height Height of the annotation. The value should be an absolute unit. Height is not allowed to be set for row annotation. # -... Other arguments. # # == value # An annotation function which can be used in `HeatmapAnnotation`. # # == seealso # https://jokergoo.github.io/ComplexHeatmap-reference/book/heatmap-annotations.html#box-annotation # # == example # set.seed(123) # m = matrix(rnorm(100), 10) # anno = anno_boxplot(m, height = unit(4, "cm")) # draw(anno, test = "anno_boxplot") # anno = anno_boxplot(m, height = unit(4, "cm"), gp = gpar(fill = 1:10)) # draw(anno, test = "anno_boxplot with gp") anno_boxplot = function(x, which = c("column", "row"), border = TRUE, gp = gpar(fill = "#CCCCCC"), ylim = NULL, extend = 0.05, outline = TRUE, box_width = 0.6, pch = 1, size = unit(2, "mm"), axis = TRUE, axis_param = default_axis_param(which), width = NULL, height = NULL, ...) { other_args = list(...) if(length(other_args)) { if("axis_gp" %in% names(other_args)) { stop_wrap("`axis_gp` is removed from the arguments. Use `axis_param = list(gp = ...)` instead.") } if("axis_side" %in% names(other_args)) { stop_wrap("`axis_side` is removed from the arguments. Use `axis_param = list(side = ...)` instead.") } if("axis_direction" %in% names(other_args)) { stop_wrap("`axis_direction` is not supported any more.") } } ef = function() NULL if(is.null(.ENV$current_annotation_which)) { which = match.arg(which)[1] dev.null() ef = dev.off2 } else { which = .ENV$current_annotation_which } on.exit(ef()) anno_size = anno_width_and_height(which, width, height, unit(2, "cm")) ## convert matrix all to list (or data frame) if(is.matrix(x)) { if(which == "column") { value = as.data.frame(x) } else if(which == "row") { value = as.data.frame(t(x)) } } else { value = x } if(is.null(ylim)) { if(!outline) { boxplot_stats = boxplot(value, plot = FALSE)$stats data_scale = range(boxplot_stats) } else { data_scale = range(value, na.rm = TRUE) } } else { data_scale = ylim } data_scale = data_scale + c(-extend, extend)*(data_scale[2] - data_scale[1]) n = length(value) gp = recycle_gp(gp, n) if(length(pch) == 1) pch = rep(pch, n) if(length(size) == 1) size = rep(size, n) axis_param = validate_axis_param(axis_param, which) axis_grob = if(axis) construct_axis_grob(axis_param, which, data_scale) else NULL row_fun = function(index, k = 1, N = 1) { if(axis_param$direction == "reverse") { value = lapply(value, function(x) data_scale[2] - x + data_scale[1]) } n_all = length(value) value = value[index] boxplot_stats = boxplot(value, plot = FALSE)$stats n = length(index) gp = subset_gp(gp, index) pch = pch[index] size = size[index] pushViewport(viewport(xscale = data_scale, yscale = c(0.5, n+0.5))) grid.rect(x = boxplot_stats[2, ], y = n - seq_along(index) + 1, height = 1*box_width, width = boxplot_stats[4, ] - boxplot_stats[2, ], just = "left", default.units = "native", gp = gp) grid.segments(boxplot_stats[5, ], n - seq_along(index) + 1 - 0.5*box_width, boxplot_stats[5, ], n - seq_along(index) + 1 + 0.5*box_width, default.units = "native", gp = gp) grid.segments(boxplot_stats[5, ], n - seq_along(index) + 1, boxplot_stats[4, ], n - seq_along(index) + 1, default.units = "native", gp = gp) grid.segments(boxplot_stats[1, ], n - seq_along(index) + 1, boxplot_stats[2, ], n - seq_along(index) + 1, default.units = "native", gp = gp) grid.segments(boxplot_stats[1, ], n - seq_along(index) + 1 - 0.5*box_width, boxplot_stats[1, ], n - seq_along(index) + 1 + 0.5*box_width, default.units = "native", gp = gp) grid.segments(boxplot_stats[3, ], n - seq_along(index) + 1 - 0.5*box_width, boxplot_stats[3, ], n - seq_along(index) + 1 + 0.5*box_width, default.units = "native", gp = gp) if(outline) { for(i in seq_along(value)) { l1 = value[[i]] > boxplot_stats[5,i] l1[is.na(l1)] = FALSE if(sum(l1)) grid.points(y = rep(n - i + 1, sum(l1)), x = value[[i]][l1], default.units = "native", gp = subset_gp(gp, i), pch = pch[i], size = size[i]) l2 = value[[i]] < boxplot_stats[1,i] l2[is.na(l2)] = FALSE if(sum(l2)) grid.points(y = rep(n - i + 1, sum(l2)), x = value[[i]][l2], default.units = "native", gp = subset_gp(gp, i), pch = pch[i], size = size[i]) } } if(axis_param$side == "top") { if(k > 1) axis = FALSE } else if(axis_param$side == "bottom") { if(k < N) axis = FALSE } if(axis) grid.draw(axis_grob) if(border) grid.rect(gp = gpar(fill = "transparent")) popViewport() } column_fun = function(index, k = 1, N = 1) { if(axis_param$direction == "reverse") { value = lapply(value, function(x) data_scale[2] - x + data_scale[1]) } value = value[index] boxplot_stats = boxplot(value, plot = FALSE)$stats n = length(index) gp = subset_gp(gp, index) pch = pch[index] size = size[index] pushViewport(viewport(xscale = c(0.5, n+0.5), yscale = data_scale)) grid.rect(x = seq_along(index), y = boxplot_stats[2, ], height = boxplot_stats[4, ] - boxplot_stats[2, ], width = 1*box_width, just = "bottom", default.units = "native", gp = gp) grid.segments(seq_along(index) - 0.5*box_width, boxplot_stats[5, ], seq_along(index) + 0.5*box_width, boxplot_stats[5, ], default.units = "native", gp = gp) grid.segments(seq_along(index), boxplot_stats[5, ], seq_along(index), boxplot_stats[4, ], default.units = "native", gp = gp) grid.segments(seq_along(index), boxplot_stats[1, ], seq_along(index), boxplot_stats[2, ], default.units = "native", gp = gp) grid.segments(seq_along(index) - 0.5*box_width, boxplot_stats[1, ], seq_along(index) + 0.5*box_width, boxplot_stats[1, ], default.units = "native", gp = gp) grid.segments(seq_along(index) - 0.5*box_width, boxplot_stats[3, ], seq_along(index) + 0.5*box_width, boxplot_stats[3, ], default.units = "native", gp = gp) if(outline) { for(i in seq_along(value)) { l1 = value[[i]] > boxplot_stats[5,i] l1[is.na(l1)] = FALSE if(sum(l1)) grid.points(x = rep(i, sum(l1)), y = value[[i]][l1], default.units = "native", gp = subset_gp(gp, i), pch = pch[i], size = size[i]) l2 = value[[i]] < boxplot_stats[1,i] l2[is.na(l2)] = FALSE if(sum(l2)) grid.points(x = rep(i, sum(l2)), y = value[[i]][l2], default.units = "native", gp = subset_gp(gp, i), pch = pch[i], size = size[i]) } } if(axis_param$side == "left") { if(k > 1) axis = FALSE } else if(axis_param$side == "right") { if(k < N) axis = FALSE } if(axis) grid.draw(axis_grob) if(border) grid.rect(gp = gpar(fill = "transparent")) popViewport() } if(which == "row") { fun = row_fun } else if(which == "column") { fun = column_fun } anno = AnnotationFunction( fun = fun, fun_name = "anno_boxplot", which = which, n = n, width = anno_size$width, height = anno_size$height, data_scale = data_scale, var_import = list(value, gp, border, box_width, axis, axis_param, axis_grob, data_scale, pch, size, outline) ) anno@subset_rule$value = subset_vector anno@subset_rule$gp = subset_gp anno@subset_rule$pch = subset_vector anno@subset_rule$size = subset_vector anno@subsetable = TRUE anno@extended = update_anno_extend(anno, axis_grob, axis_param) return(anno) } # == title # Histogram Annotation # # == param # -x A matrix or a list. If ``x`` is a matrix and if ``which`` is ``column``, statistics for boxplots # are calculated by columns, if ``which`` is ``row``, the calculation is done by rows. # -which Whether it is a column annotation or a row annotation? # -n_breaks Number of breaks for calculating histogram. # -border Wether draw borders of the annotation region? # -gp Graphic parameters for the boxes. The length of the graphic parameters should be one or the number of observations. # -axis Whether to add axis? # -axis_param parameters for controlling axis. See `default_axis_param` for all possible settings and default parameters. # -width Width of the annotation. The value should be an absolute unit. Width is not allowed to be set for column annotation. # -height Height of the annotation. The value should be an absolute unit. Height is not allowed to be set for row annotation. # # == value # An annotation function which can be used in `HeatmapAnnotation`. # # == seealso # https://jokergoo.github.io/ComplexHeatmap-reference/book/heatmap-annotations.html#histogram-annotation # # == example # m = matrix(rnorm(1000), nc = 10) # anno = anno_histogram(t(m), which = "row") # draw(anno, test = "row histogram") # anno = anno_histogram(t(m), which = "row", gp = gpar(fill = 1:10)) # draw(anno, test = "row histogram with color") # anno = anno_histogram(t(m), which = "row", n_breaks = 20) # draw(anno, test = "row histogram with color") anno_histogram = function(x, which = c("column", "row"), n_breaks = 11, border = FALSE, gp = gpar(fill = "#CCCCCC"), axis = TRUE, axis_param = default_axis_param(which), width = NULL, height = NULL) { ef = function() NULL if(is.null(.ENV$current_annotation_which)) { which = match.arg(which)[1] dev.null() ef = dev.off2 } else { which = .ENV$current_annotation_which } on.exit(ef()) anno_size = anno_width_and_height(which, width, height, unit(4, "cm")) ## convert matrix all to list (or data frame) if(is.matrix(x)) { if(which == "column") { value = as.data.frame(x) } else if(which == "row") { value = as.data.frame(t(x)) } } else { value = x } n = length(value) x_range =range(unlist(value), na.rm = TRUE) histogram_stats = lapply(value, hist, plot = FALSE, breaks = seq(x_range[1], x_range[2], length = n_breaks)) histogram_breaks = lapply(histogram_stats, function(x) x$breaks) histogram_counts = lapply(histogram_stats, function(x) x$counts) xscale = range(unlist(histogram_breaks), na.rm = TRUE) xscale = xscale + c(-0.025, 0.025)*(xscale[2] - xscale[1]) yscale = c(0, max(unlist(histogram_counts))) yscale[2] = yscale[2]*1.05 gp = recycle_gp(gp, n) axis_param$direction = "normal" axis_param = validate_axis_param(axis_param, which) axis_grob = if(axis) construct_axis_grob(axis_param, which, xscale) else NULL row_fun = function(index, k = 1, N = 1) { n_all = length(value) value = value[index] n = length(index) histogram_breaks = histogram_breaks[index] histogram_counts = histogram_counts[index] gp = subset_gp(gp, index) for(i in seq_len(n)) { n_breaks = length(histogram_breaks[[i]]) pushViewport(viewport(x = unit(0, "npc"), y = unit((n-i)/n, "npc"), height = unit(1/n, "npc"), just = c("left", "bottom"), xscale = xscale, yscale = yscale)) grid.rect(x = histogram_breaks[[i]][-1], y = 0, width = histogram_breaks[[i]][-1] - histogram_breaks[[i]][-n_breaks], height = histogram_counts[[i]], just = c("right", "bottom"), default.units = "native", gp = subset_gp(gp, i)) popViewport() } pushViewport(viewport(xscale = xscale)) if(axis_param$side == "top") { if(k > 1) axis = FALSE } else if(axis_param$side == "bottom") { if(k < N) axis = FALSE } if(axis) grid.draw(axis_grob) if(border) grid.rect(gp = gpar(fill = "transparent")) popViewport() } column_fun = function(index, k = 1, N = 1) { n_all = length(value) value = value[index] foo = yscale yscale = xscale xscale = foo histogram_breaks = histogram_breaks[index] histogram_counts = histogram_counts[index] n = length(index) gp = subset_gp(gp, index) for(i in seq_len(n)) { n_breaks = length(histogram_breaks[[i]]) pushViewport(viewport(y = unit(0, "npc"), x = unit(i/n, "npc"), width = unit(1/n, "npc"), just = c("right", "bottom"), xscale = xscale, yscale = yscale)) grid.rect(y = histogram_breaks[[i]][-1], x = 0, height = histogram_breaks[[i]][-1] - histogram_breaks[[i]][-n_breaks], width = histogram_counts[[i]], just = c("left", "top"), default.units = "native", gp = subset_gp(gp, i)) popViewport() } pushViewport(viewport(yscale = yscale)) if(axis_param$side == "left") { if(k > 1) axis = FALSE } else if(axis_param$side == "right") { if(k < N) axis = FALSE } if(axis) grid.draw(axis_grob) if(border) grid.rect(gp = gpar(fill = "transparent")) popViewport() } if(which == "row") { fun = row_fun } else if(which == "column") { fun = column_fun } anno = AnnotationFunction( fun = fun, fun_name = "anno_histogram", which = which, width = anno_size$width, height = anno_size$height, n = n, data_scale = xscale, var_import = list(value, gp, border, axis, axis_param, axis_grob, xscale, yscale, histogram_breaks, histogram_counts) ) anno@subset_rule$value = subset_vector anno@subset_rule$gp = subset_gp anno@subset_rule$histogram_breaks = subset_vector anno@subset_rule$histogram_counts = subset_vector anno@subsetable = TRUE anno@extended = update_anno_extend(anno, axis_grob, axis_param) return(anno) } # == title # Density Annotation # # == param # -x A matrix or a list. If ``x`` is a matrix and if ``which`` is ``column``, statistics for boxplots # are calculated by columns, if ``which`` is ``row``, the calculation is done by rows. # -which Whether it is a column annotation or a row annotation? # -type Type of graphics to represent density distribution. "lines" for normal density plot; "violine" for violin plot # and "heatmap" for heatmap visualization of density distribution. # -xlim Range on x-axis. # -heatmap_colors A vector of colors for interpolating density values. # -joyplot_scale Relative height of density distribution. A value higher than 1 increases the height of the density # distribution and the plot will represented as so-called "joyplot". # -border Wether draw borders of the annotation region? # -gp Graphic parameters for the boxes. The length of the graphic parameters should be one or the number of observations. # -axis Whether to add axis? # -axis_param parameters for controlling axis. See `default_axis_param` for all possible settings and default parameters. # -width Width of the annotation. The value should be an absolute unit. Width is not allowed to be set for column annotation. # -height Height of the annotation. The value should be an absolute unit. Height is not allowed to be set for row annotation. # # == value # An annotation function which can be used in `HeatmapAnnotation`. # # == seealso # https://jokergoo.github.io/ComplexHeatmap-reference/book/heatmap-annotations.html#density-annotation # # == example # m = matrix(rnorm(100), 10) # anno = anno_density(m, which = "row") # draw(anno, test = "normal density") # anno = anno_density(m, which = "row", type = "violin") # draw(anno, test = "violin") # anno = anno_density(m, which = "row", type = "heatmap") # draw(anno, test = "heatmap") # anno = anno_density(m, which = "row", type = "heatmap", # heatmap_colors = c("white", "orange")) # draw(anno, test = "heatmap, colors") anno_density = function(x, which = c("column", "row"), type = c("lines", "violin", "heatmap"), xlim = NULL, heatmap_colors = rev(brewer.pal(name = "RdYlBu", n = 11)), joyplot_scale = 1, border = TRUE, gp = gpar(fill = "#CCCCCC"), axis = TRUE, axis_param = default_axis_param(which), width = NULL, height = NULL) { ef = function() NULL if(is.null(.ENV$current_annotation_which)) { which = match.arg(which)[1] dev.null() ef = dev.off2 } else { which = .ENV$current_annotation_which } on.exit(ef()) anno_size = anno_width_and_height(which, width, height, unit(4, "cm")) ## convert matrix all to list (or data frame) if(is.matrix(x)) { if(which == "column") { value = as.data.frame(x) } else if(which == "row") { value = as.data.frame(t(x)) } } else { value = x } n = length(value) gp = recycle_gp(gp, n) type = match.arg(type)[1] n_all = length(value) density_stats = lapply(value, density, na.rm = TRUE) density_x = lapply(density_stats, function(x) x$x) density_y = lapply(density_stats, function(x) x$y) min_density_x = min(unlist(density_x)) max_density_x = max(unlist(density_x)) if(is.null(xlim)) { xscale = range(unlist(density_x), na.rm = TRUE) } else { xscale = xlim for(i in seq_len(n)) { l = density_x[[i]] >= xscale[1] & density_x[[i]] <= xscale[2] density_x[[i]] = density_x[[i]][l] density_y[[i]] = density_y[[i]][l] density_x[[i]] = c(density_x[[i]][ 1 ], density_x[[i]], density_x[[i]][ length(density_x[[i]]) ]) density_y[[i]] = c(0, density_y[[i]], 0) } } if(type == "lines") { xscale = xscale + c(-0.025, 0.025)*(xscale[2] - xscale[1]) yscale = c(0, max(unlist(density_y))) yscale[2] = yscale[2]*1.05 } else if(type == "violin") { xscale = xscale + c(-0.025, 0.025)*(xscale[2] - xscale[1]) yscale = max(unlist(density_y)) yscale = c(-yscale*1.05, yscale*1.05) } else if(type == "heatmap") { yscale = c(0, 1) min_y = min(unlist(density_y)) max_y = max(unlist(density_y)) col_fun = colorRamp2(seq(min_y, max_y, length = length(heatmap_colors)), heatmap_colors) } axis_param$direction = "normal" axis_param = validate_axis_param(axis_param, which) axis_grob = if(axis) construct_axis_grob(axis_param, which, xscale) else NULL row_fun = function(index, k = 1, N = 1) { n = length(index) value = value[index] gp = subset_gp(gp, index) density_x = density_x[index] density_y = density_y[index] for(i in seq_len(n)) { pushViewport(viewport(x = unit(0, "npc"), y = unit((n-i)/n, "npc"), just = c("left", "bottom"), height = unit(1/n, "npc"), xscale = xscale, yscale = yscale)) if(type == "lines") { grid.polygon(x = density_x[[i]], y = density_y[[i]]*joyplot_scale, default.units = "native", gp = subset_gp(gp, i)) } else if(type == "violin") { grid.polygon(x = c(density_x[[i]], rev(density_x[[i]])), y = c(density_y[[i]], -rev(density_y[[i]])), default.units = "native", gp = subset_gp(gp, i)) box_stat = boxplot(value[[i]], plot = FALSE)$stat grid.lines(box_stat[1:2, 1], c(0, 0), default.units = "native", gp = subset_gp(gp, i)) grid.lines(box_stat[4:5, 1], c(0, 0), default.units = "native", gp = subset_gp(gp, i)) grid.points(box_stat[3, 1], 0, default.units = "native", pch = 3, size = unit(1, "mm"), gp = subset_gp(gp, i)) } else if(type == "heatmap") { n_breaks = length(density_x[[i]]) grid.rect(x = density_x[[i]][-1], y = 0, width = density_x[[i]][-1] - density_x[[i]][-n_breaks], height = 1, just = c("right", "bottom"), default.units = "native", gp = gpar(fill = col_fun((density_y[[i]][-1] + density_y[[i]][-n_breaks])/2), col = NA)) grid.rect(x = density_x[[i]][1], y = 0, width = density_x[[i]][1] - min_density_x, height = 1, just = c("right", "bottom"), default.units = "native", gp = gpar(fill = col_fun(0), col = NA)) grid.rect(x = density_x[[i]][n_breaks], y = 0, width = max_density_x - density_x[[i]][n_breaks], height = 1, just = c("left", "bottom"), default.units = "native", gp = gpar(fill = col_fun(0), col = NA)) } popViewport() } pushViewport(viewport(xscale = xscale)) if(axis_param$side == "top") { if(k > 1) axis = FALSE } else if(axis_param$side == "bottom") { if(k < N) axis = FALSE } if(axis) grid.draw(axis_grob) if(border) grid.rect(gp = gpar(fill = "transparent")) popViewport() } column_fun = function(index, k = 1, N = 1) { n_all = length(value) value = value[index] foo = yscale yscale = xscale xscale = foo density_x = density_x[index] density_y = density_y[index] yscale = range(unlist(density_x), na.rm = TRUE) yscale = yscale + c(0, 0.05)*(yscale[2] - yscale[1]) if(type == "lines") { xscale = c(0, max(unlist(density_y))) xscale[2] = xscale[2]*1.05 } else if(type == "violin") { xscale = max(unlist(density_y)) xscale = c(-xscale*1.05, xscale*1.05) } else if(type == "heatmap") { yscale = range(unlist(density_x), na.rm = TRUE) xscale = c(0, 1) min_y = min(unlist(density_y)) max_y = max(unlist(density_y)) col_fun = colorRamp2(seq(min_y, max_y, length = length(heatmap_colors)), heatmap_colors) } n = length(index) gp = subset_gp(gp, index) for(i in rev(seq_len(n))) { pushViewport(viewport(y = unit(0, "npc"), x = unit(i/n, "npc"), width = unit(1/n, "npc"), just = c("right", "bottom"), xscale = xscale, yscale = yscale)) if(type == "lines") { grid.polygon(y = density_x[[i]], x = density_y[[i]]*joyplot_scale, default.units = "native", gp = subset_gp(gp, i)) } else if(type == "violin") { grid.polygon(y = c(density_x[[i]], rev(density_x[[i]])), x = c(density_y[[i]], -rev(density_y[[i]])), default.units = "native", gp = subset_gp(gp, i)) box_stat = boxplot(value[[i]], plot = FALSE)$stat grid.lines(y = box_stat[1:2, 1], x = c(0, 0), default.units = "native", gp = subset_gp(gp, i)) grid.lines(y = box_stat[4:5, 1], x = c(0, 0), default.units = "native", gp = subset_gp(gp, i)) grid.points(y = box_stat[3, 1], x = 0, default.units = "native", pch = 3, size = unit(1, "mm"), gp = subset_gp(gp, i)) } else if(type == "heatmap") { n_breaks = length(density_x[[i]]) grid.rect(y = density_x[[i]][-1], x = 0, height = density_x[[i]][-1] - density_x[[i]][-n_breaks], width = 1, just = c("left", "top"), default.units = "native", gp = gpar(fill = col_fun((density_y[[i]][-1] + density_y[[i]][-n_breaks])/2), col = NA)) grid.rect(y = density_x[[i]][1], x = 0, height = density_x[[i]][1] - min_density_x, width = 1, just = c("left", "top"), default.units = "native", gp = gpar(fill = col_fun(0), col = NA)) grid.rect(y = density_x[[i]][n_breaks], x = 0, height = max_density_x - density_x[[i]][n_breaks], width = 1, just = c("left", "bottom"), default.units = "native", gp = gpar(fill = col_fun(0), col = NA)) } popViewport() } pushViewport(viewport(yscale = yscale)) if(axis_param$side == "left") { if(k > 1) axis = FALSE } else if(axis_param$side == "right") { if(k < N) axis = FALSE } if(axis) grid.draw(axis_grob) if(border) grid.rect(gp = gpar(fill = "transparent")) popViewport() } if(which == "row") { fun = row_fun } else if(which == "column") { fun = column_fun } anno = AnnotationFunction( fun = fun, fun_name = "anno_density", which = which, width = anno_size$width, height = anno_size$height, n = n, data_scale = xscale, var_import = list(value, gp, border, type, axis, axis_param, axis_grob, xscale, yscale, density_x, density_y, min_density_x, max_density_x, joyplot_scale, heatmap_colors) ) if(type == "heatmap") { anno@var_env$col_fun = col_fun } anno@subset_rule$value = subset_vector anno@subset_rule$gp = subset_gp anno@subset_rule$density_x = subset_vector anno@subset_rule$density_y = subset_vector anno@subsetable = TRUE anno@extended = update_anno_extend(anno, axis_grob, axis_param) return(anno) } # == title # Text Annotation # # == param # -x A vector of text. # -which Whether it is a column annotation or a row annotation? # -gp Graphic parameters. # -rot Rotation of the text, pass to `grid::grid.text`. # -just Justification of text, pass to `grid::grid.text`. # -offset Depracated, use ``location`` instead. # -location Position of the text. By default ``rot``, ``just`` and ``location`` are automatically # inferred according to whether it is a row annotation or column annotation. The value # of ``location`` should be a `grid::unit` object, normally in ``npc`` unit. E.g. ``unit(0, 'npc')`` # means the most left of the annotation region and ``unit(1, 'npc')`` means the most right of # the annotation region. # -width Width of the annotation. The value should be an absolute unit. Width is not allowed to be set for column annotation. # -height Height of the annotation. The value should be an absolute unit. Height is not allowed to be set for row annotation. # # == value # An annotation function which can be used in `HeatmapAnnotation`. # # == seealso # https://jokergoo.github.io/ComplexHeatmap-reference/book/heatmap-annotations.html#text-annotation # # == example # anno = anno_text(month.name) # draw(anno, test = "month names") # anno = anno_text(month.name, gp = gpar(fontsize = 16)) # draw(anno, test = "month names with fontsize") # anno = anno_text(month.name, gp = gpar(fontsize = 1:12+4)) # draw(anno, test = "month names with changing fontsize") # anno = anno_text(month.name, which = "row") # draw(anno, test = "month names on rows") # anno = anno_text(month.name, location = 0, rot = 45, # just = "left", gp = gpar(col = 1:12)) # draw(anno, test = "with rotations") # anno = anno_text(month.name, location = 1, # rot = 45, just = "right", gp = gpar(fontsize = 1:12+4)) # draw(anno, test = "with rotations") anno_text = function(x, which = c("column", "row"), gp = gpar(), rot = guess_rot(), just = guess_just(), offset = guess_location(), location = guess_location(), width = NULL, height = NULL) { ef = function() NULL if(is.null(.ENV$current_annotation_which)) { which = match.arg(which)[1] dev.null() ef = dev.off2 } else { which = .ENV$current_annotation_which } on.exit(ef()) n = length(x) gp = recycle_gp(gp, n) guess_rot = function() { ifelse(which == "column", 90, 0) } guess_just = function() { ifelse(which == "column", "right", "left") } guess_location = function() { unit(ifelse(which == "column", 1, 0), "npc") } rot = rot[1] %% 360 just = just[1] if(!missing(offset)) { warning_wrap("`offset` is deprecated, use `location` instead.") if(missing(location)) { location = offset } } location = location[1] if(!inherits(location, "unit")) { location = unit(location, "npc") } if(which == "column") { if("right" %in% just) { if(rot < 180) { location = location - 0.5*grobHeight(textGrob("A", gp = gp))*abs(cos(rot/180*pi)) } else { location = location + 0.5*grobHeight(textGrob("A", gp = gp))*abs(cos(rot/180*pi)) } } else if("left" %in% just) { if(rot < 180) { location = location + 0.5*grobHeight(textGrob("A", gp = gp))*abs(cos(rot/180*pi)) } else { location = location - 0.5*grobHeight(textGrob("A", gp = gp))*abs(cos(rot/180*pi)) } } } if(which == "column") { if(missing(height)) { height = max_text_width(x, gp = gp)*abs(sin(rot/180*pi)) + grobHeight(textGrob("A", gp = gp))*abs(cos(rot/180*pi)) height = convertHeight(height, "mm") } if(missing(width)) { width = unit(1, "npc") } } if(which == "row") { if(missing(width)) { width = max_text_width(x, gp = gp)*abs(cos(rot/180*pi)) + grobHeight(textGrob("A", gp = gp))*abs(sin(rot/180*pi)) width = convertWidth(width, "mm") } if(missing(height)) { height = unit(1, "npc") } } anno_size = list(width = width, height = height) value = x row_fun = function(index) { n = length(index) gp = subset_gp(gp, index) gp2 = gp if("border" %in% names(gp2)) gp2$col = gp2$border if("fill" %in% names(gp2)) { if(!"border" %in% names(gp2)) gp2$col = gp2$fill } if(any(c("border", "fill") %in% names(gp2))) { grid.rect(y = (n - seq_along(index) + 0.5)/n, height = 1/n, gp = gp2) } grid.text(value[index], location, (n - seq_along(index) + 0.5)/n, gp = gp, just = just, rot = rot) # if(add_lines) { # if(n > 1) { # grid.segments(0, (n - seq_along(index)[-n])/n, 1, (n - seq_along(index)[-n])/n, default.units = "native") # } # } } column_fun = function(index, k = NULL, N = NULL, vp_name = NULL) { n = length(index) gp = subset_gp(gp, index) gp2 = gp if("border" %in% names(gp2)) gp2$col = gp2$border if("fill" %in% names(gp2)) { if(!"border" %in% names(gp2)) gp2$col = gp2$fill } if(any(c("border", "fill") %in% names(gp2))) { grid.rect(x = (seq_along(index) - 0.5)/n, width = 1/n, gp = gp2) } grid.text(value[index], (seq_along(index) - 0.5)/n, location, gp = gp, just = just, rot = rot) } if(which == "row") { fun = row_fun } else if(which == "column") { fun = column_fun } anno = AnnotationFunction( fun = fun, fun_name = "anno_text", which = which, width = width, height = height, n = n, var_import = list(value, gp, just, rot, location), show_name = FALSE ) anno@subset_rule$value = subset_vector anno@subset_rule$gp = subset_gp anno@subsetable = TRUE return(anno) } # == title # Joyplot Annotation # # == param # -x A matrix or a list. If ``x`` is a matrix or a data frame, columns correspond to observations. # -which Whether it is a column annotation or a row annotation? # -gp Graphic parameters for the boxes. The length of the graphic parameters should be one or the number of observations. # -scale Relative height of the curve. A value higher than 1 increases the height of the curve. # -transparency Transparency of the filled colors. Value should be between 0 and 1. # -axis Whether to add axis? # -axis_param parameters for controlling axis. See `default_axis_param` for all possible settings and default parameters. # -width Width of the annotation. The value should be an absolute unit. Width is not allowed to be set for column annotation. # -height Height of the annotation. The value should be an absolute unit. Height is not allowed to be set for row annotation. # # == value # An annotation function which can be used in `HeatmapAnnotation`. # # == seealso # https://jokergoo.github.io/ComplexHeatmap-reference/book/heatmap-annotations.html#joyplot-annotation # # == example # m = matrix(rnorm(1000), nc = 10) # lt = apply(m, 2, function(x) data.frame(density(x)[c("x", "y")])) # anno = anno_joyplot(lt, width = unit(4, "cm"), which = "row") # draw(anno, test = "joyplot") # anno = anno_joyplot(lt, width = unit(4, "cm"), which = "row", gp = gpar(fill = 1:10)) # draw(anno, test = "joyplot + col") # anno = anno_joyplot(lt, width = unit(4, "cm"), which = "row", scale = 1) # draw(anno, test = "joyplot + scale") # # m = matrix(rnorm(5000), nc = 50) # lt = apply(m, 2, function(x) data.frame(density(x)[c("x", "y")])) # anno = anno_joyplot(lt, width = unit(4, "cm"), which = "row", gp = gpar(fill = NA), scale = 4) # draw(anno, test = "joyplot") anno_joyplot = function(x, which = c("column", "row"), gp = gpar(fill = "#000000"), scale = 2, transparency = 0.6, axis = TRUE, axis_param = default_axis_param(which), width = NULL, height = NULL) { ef = function() NULL if(is.null(.ENV$current_annotation_which)) { which = match.arg(which)[1] dev.null() ef = dev.off2 } else { which = .ENV$current_annotation_which } on.exit(ef()) anno_size = anno_width_and_height(which, width, height, unit(4, "cm")) ## convert matrix all to list (or data frame) if(is.matrix(x) || is.data.frame(x)) { value = vector("list", ncol(x)) for(i in seq_len(ncol(x))) { value[[i]] = cbind(seq_len(nrow(x)), x[, i]) } } else if(inherits(x, "list")){ if(all(sapply(x, is.atomic))) { if(length(unique(sapply(x, length))) == 1) { value = vector("list", length(x)) for(i in seq_len(length(x))) { value[[i]] = cbind(seq_along(x[[i]]), x[[i]]) } } else { stop_wrap("Since x is a list, x need to be a list of two-column matrices.") } } else { value = x } } else { stop_wrap("The input should be a list of two-column matrices or a matrix/data frame.") } xscale = range(lapply(value, function(x) x[, 1]), na.rm = TRUE) xscale = xscale + c(-0.025, 0.025)*(xscale[2] - xscale[1]) yscale = range(lapply(value, function(x) x[, 2]), na.rm = TRUE) yscale[1] = 0 yscale[2] = yscale[2]*1.05 n = length(value) if(!"fill" %in% names(gp)) { gp$fill = "#000000" } gp = recycle_gp(gp, n) gp$fill = add_transparency(gp$fill, transparency) axis_param$direction = "normal" axis_param = validate_axis_param(axis_param, which) axis_grob = if(axis) construct_axis_grob(axis_param, which, xscale) else NULL row_fun = function(index, k = 1, N = 1) { n_all = length(value) value = value[index] n = length(index) gp = subset_gp(gp, index) for(i in seq_len(n)) { pushViewport(viewport(x = unit(0, "npc"), y = unit((n-i)/n, "npc"), just = c("left", "bottom"), height = unit(1/n, "npc"), xscale = xscale, yscale = yscale)) x0 = value[[i]][, 1] y0 = value[[i]][, 2]*scale x0 = c(x0[1], x0, x0[length(x0)]) y0 = c(0, y0, 0) gppp = subset_gp(gp, i); gppp$col = NA grid.polygon(x = x0, y = y0, default.units = "native", gp = gppp) grid.lines(x = x0, y = y0, default.units = "native", gp = subset_gp(gp, i)) popViewport() } pushViewport(viewport(xscale = xscale)) if(axis_param$side == "top") { if(k > 1) axis = FALSE } else if(axis_param$side == "bottom") { if(k < N) axis = FALSE } if(axis) grid.draw(axis_grob) popViewport() } column_fun = function(index, k = 1, N = 1) { n_all = length(value) value = value[index] foo = yscale yscale = xscale xscale = foo n = length(index) gp = subset_gp(gp, index) for(i in seq_len(n)) { pushViewport(viewport(y = unit(0, "npc"), x = unit(i/n, "npc"), width = unit(1/n, "npc"), just = c("right", "bottom"), xscale = xscale, yscale = yscale)) x0 = value[[i]][, 2]*scale y0 = value[[i]][ ,1] x0 = c(0, x0, 0) y0 = c(y0[1], y0, y0[length(y0)]) gppp = subset_gp(gp, i); gppp$col = NA grid.polygon(y = y0, x = x0, default.units = "native", gp = gppp) grid.lines(y = y0, x = x0, default.units = "native", gp = subset_gp(gp, i)) popViewport() } pushViewport(viewport(yscale = yscale)) if(axis_param$side == "left") { if(k > 1) axis = FALSE } else if(axis_param$side == "right") { if(k < N) axis = FALSE } if(axis) grid.draw(axis_grob) popViewport() } if(which == "row") { fun = row_fun } else if(which == "column") { fun = column_fun } anno = AnnotationFunction( fun = fun, fun_name = "anno_joyplot", which = which, width = anno_size$width, height = anno_size$height, n = n, data_scale = xscale, var_import = list(value, gp, axis, axis_param, axis_grob, scale, yscale, xscale) ) anno@subset_rule$value = subset_vector anno@subset_rule$gp = subset_gp anno@subsetable = TRUE anno@extended = update_anno_extend(anno, axis_grob, axis_param) return(anno) } # == title # Horizon chart Annotation # # == param # -x A matrix or a list. If ``x`` is a matrix or a data frame, columns correspond to observations. # -which Whether it is a column annotation or a row annotation? # -gp Graphic parameters for the boxes. The length of the graphic parameters should be one or the number of observations. # There are two unstandard parameters specificly for horizon chart: ``pos_fill`` and ``neg_fill`` controls the filled # color for positive values and negative values. # -n_slice Number of slices on y-axis. # -slice_size Height of the slice. If the value is not ``NULL``, ``n_slice`` will be recalculated. # -negative_from_top Whether the areas for negative values start from the top or the bottom of the plotting region? # -normalize Whether normalize ``x`` by max(abs(x)). # -gap Gap size of neighbouring horizon chart. # -axis Whether to add axis? # -axis_param parameters for controlling axis. See `default_axis_param` for all possible settings and default parameters. # -width Width of the annotation. The value should be an absolute unit. Width is not allowed to be set for column annotation. # -height Height of the annotation. The value should be an absolute unit. Height is not allowed to be set for row annotation. # # == detail # Horizon chart as row annotation is only supported. # # == value # An annotation function which can be used in `HeatmapAnnotation`. # # == seealso # https://jokergoo.github.io/ComplexHeatmap-reference/book/heatmap-annotations.html#horizon-chart-annotation # # == example # lt = lapply(1:20, function(x) cumprod(1 + runif(1000, -x/100, x/100)) - 1) # anno = anno_horizon(lt, which = "row") # draw(anno, test = "horizon chart") # anno = anno_horizon(lt, which = "row", # gp = gpar(pos_fill = "orange", neg_fill = "darkgreen")) # draw(anno, test = "horizon chart, col") # anno = anno_horizon(lt, which = "row", negative_from_top = TRUE) # draw(anno, test = "horizon chart + negative_from_top") # anno = anno_horizon(lt, which = "row", gap = unit(1, "mm")) # draw(anno, test = "horizon chart + gap") # anno = anno_horizon(lt, which = "row", # gp = gpar(pos_fill = rep(c("orange", "red"), each = 10), # neg_fill = rep(c("darkgreen", "blue"), each = 10))) # draw(anno, test = "horizon chart, col") anno_horizon = function(x, which = c("column", "row"), gp = gpar(pos_fill = "#D73027", neg_fill = "#313695"), n_slice = 4, slice_size = NULL, negative_from_top = FALSE, normalize = TRUE, gap = unit(0, "mm"), axis = TRUE, axis_param = default_axis_param(which), width = NULL, height = NULL) { ef = function() NULL if(is.null(.ENV$current_annotation_which)) { which = match.arg(which)[1] dev.null() ef = dev.off2 } else { which = .ENV$current_annotation_which } on.exit(ef()) anno_size = anno_width_and_height(which, width, height, unit(4, "cm")) ## convert matrix all to list (or data frame) if(is.matrix(x) || is.data.frame(x)) { value = vector("list", ncol(x)) for(i in seq_len(ncol(x))) { value[[i]] = cbind(seq_len(nrow(x)), x[, i]) } } else if(inherits(x, "list")){ if(all(sapply(x, is.atomic))) { if(length(unique(sapply(x, length))) == 1) { value = vector("list", length(x)) for(i in seq_len(length(x))) { value[[i]] = cbind(seq_along(x[[i]]), x[[i]]) } } else { stop_wrap("Since x is a list, x need to be a list of two-column matrices.") } } else { value = x } } else { stop_wrap("The input should be a list of two-column matrices or a matrix/data frame.") } if(is.null(gp$pos_fill)) gp$pos_fill = "#D73027" if(is.null(gp$neg_fill)) gp$neg_fill = "#313695" if("fill" %in% names(gp)) { foo = unlist(lapply(value, function(x) x[, 2])) if(all(foo >= 0)) { gp$pos_fill = gp$fill } else if(all(foo <= 0)) { gp$neg_fill = gp$fill } else { gp = gpar(pos_fill = "#D73027", neg_fill = "#313695") } } if(which == "column") { stop_wrap("anno_horizon() does not support column annotation.") } if(normalize) { value = lapply(value, function(m) { m[, 2] = m[, 2]/max(abs(m[, 2])) m }) } n = length(value) xscale = range(lapply(value, function(x) x[, 1]), na.rm = TRUE) yscale = range(lapply(value, function(x) abs(x[, 2])), na.rm = TRUE) axis_param$direction = "normal" axis_param = validate_axis_param(axis_param, which) axis_grob = if(axis) construct_axis_grob(axis_param, which, xscale) else NULL row_fun = function(index, k = 1, N = 1) { n_all = length(value) value = value[index] if(is.null(slice_size)) { slice_size = yscale[2]/n_slice } n_slice = ceiling(yscale[2]/slice_size) n = length(index) gp = subset_gp(gp, index) for(i in seq_len(n)) { pushViewport(viewport(x = unit(0, "npc"), y = unit((n-i)/n, "npc"), just = c("left", "bottom"), height = unit(1/n, "npc") - gap)) sgp = subset_gp(gp, i) horizon_chart(value[[i]][, 1], value[[i]][, 2], n_slice = n_slice, slice_size = slice_size, negative_from_top = negative_from_top, pos_fill = sgp$pos_fill, neg_fill = sgp$neg_fill) grid.rect(gp = gpar(fill = "transparent")) popViewport() } pushViewport(viewport(xscale = xscale)) if(axis_param$side == "top") { if(k > 1) axis = FALSE } else if(axis_param$side == "bottom") { if(k < N) axis = FALSE } if(axis) grid.draw(axis_grob) popViewport() } column_fun = function(index) { } if(which == "row") { fun = row_fun } else if(which == "column") { fun = column_fun } anno = AnnotationFunction( fun = fun, fun_name = "anno_horizon", which = which, width = anno_size$width, height = anno_size$height, n = n, data_scale = xscale, var_import = list(value, gp, axis, axis_param, axis_grob, n_slice, slice_size, negative_from_top, xscale, yscale, gap) ) anno@subset_rule$value = subset_vector anno@subset_rule$gp = subset_gp anno@subsetable = TRUE anno@extended = update_anno_extend(anno, axis_grob, axis_param) return(anno) } horizon_chart = function(x, y, n_slice = 4, slice_size, pos_fill = "#D73027", neg_fill = "#313695", negative_from_top = FALSE) { if(missing(slice_size)) { slice_size = max(abs(y))/n_slice } n_slice = ceiling(max(abs(y))/slice_size) if(n_slice == 0) { return(invisible(NULL)) } pos_col_fun = colorRamp2(c(0, n_slice), c("white", pos_fill)) neg_col_fun = colorRamp2(c(0, n_slice), c("white", neg_fill)) pushViewport(viewport(xscale = range(x), yscale = c(0, slice_size))) for(i in seq_len(n_slice)) { l1 = y >= (i-1)*slice_size & y < i*slice_size l2 = y < (i-1)*slice_size l3 = y >= i*slice_size if(any(l1)) { x2 = x y2 = y y2[l1] = y2[l1] - slice_size*(i-1) y2[l3] = slice_size x2[l2] = NA y2[l2] = NA add_horizon_polygon(x2, y2, gp = gpar(fill = pos_col_fun(i), col = NA), default.units = "native") } } y = -y for(i in seq_len(n_slice)) { l1 = y >= (i-1)*slice_size & y < i*slice_size l2 = y < (i-1)*slice_size l3 = y >= i*slice_size if(any(l1)) { x2 = x y2 = y y2[l1] = y2[l1] - slice_size*(i-1) y2[l3] = slice_size x2[l2] = NA y2[l2] = NA add_horizon_polygon(x2, y2, slice_size = slice_size, from_top = negative_from_top, gp = gpar(fill = neg_col_fun(i), col = NA), default.units = "native") } } popViewport() } # x and y may contain NA, split x and y by NA gaps, align the bottom to y = 0 add_horizon_polygon = function(x, y, slice_size = NULL, from_top = FALSE, ...) { ltx = split_vec_by_NA(x) lty = split_vec_by_NA(y) for(i in seq_along(ltx)) { x0 = ltx[[i]] y0 = lty[[i]] if(from_top) { x0 = c(x0[1], x0, x0[length(x0)]) y0 = c(slice_size, slice_size - y0, slice_size) } else { x0 = c(x0[1], x0, x0[length(x0)]) y0 = c(0, y0, 0) } grid.polygon(x0, y0, ...) } } # https://stat.ethz.ch/pipermail/r-help/2010-April/237031.html split_vec_by_NA = function(x) { idx = 1 + cumsum(is.na(x)) not.na = !is.na(x) split(x[not.na], idx[not.na]) } # == title # Points as Row Annotation # # == param # -... pass to `anno_points`. # # == details # A wrapper of `anno_points` with pre-defined ``which`` to ``row``. # # You can directly use `anno_points` for row annotation if you call it in `rowAnnotation`. # # == value # See help page of `anno_points`. # row_anno_points = function(...) { if(exists(".__under_SingleAnnotation__", envir = parent.frame())) { message_wrap("From version 1.99.0, you can directly use `anno_points()` for row annotation if you call it in `rowAnnotation()`.") } anno_points(..., which = "row") } # == title # Barplots as Row Annotation # # == param # -... pass to `anno_barplot`. # # == details # A wrapper of `anno_barplot` with pre-defined ``which`` to ``row``. # # You can directly use `anno_barplot` for row annotation if you call it in `rowAnnotation`. # # == value # See help page of `anno_barplot`. # row_anno_barplot = function(...) { if(exists(".__under_SingleAnnotation__", envir = parent.frame())) { message_wrap("From version 1.99.0, you can directly use `anno_barplot()` for row annotation if you call it in `rowAnnotation()`.") } anno_barplot(..., which = "row") } # == title # Boxplots as Row Annotation # # == param # -... pass to `anno_boxplot`. # # == details # A wrapper of `anno_boxplot` with pre-defined ``which`` to ``row``. # # You can directly use `anno_boxplot` for row annotation if you call it in `rowAnnotation`. # # == value # See help page of `anno_boxplot`. # row_anno_boxplot = function(...) { if(exists(".__under_SingleAnnotation__", envir = parent.frame())) { message_wrap("From version 1.99.0, you can directly use `anno_boxplot()` for row annotation if you call it in `rowAnnotation()`.") } anno_boxplot(..., which = "row") } # == title # Histograms as Row Annotation # # == param # -... pass to `anno_histogram`. # # == details # A wrapper of `anno_histogram` with pre-defined ``which`` to ``row``. # # You can directly use `anno_histogram` for row annotation if you call it in `rowAnnotation`. # # == value # See help page of `anno_histogram`. # row_anno_histogram = function(...) { if(exists(".__under_SingleAnnotation__", envir = parent.frame())) { message_wrap("From version 1.99.0, you can directly use `anno_histogram()` for row annotation if you call it in `rowAnnotation()`.") } anno_histogram(..., which = "row") } # == title # Density as Row Annotation # # == param # -... pass to `anno_density`. # # == details # A wrapper of `anno_density` with pre-defined ``which`` to ``row``. # # You can directly use `anno_density` for row annotation if you call it in `rowAnnotation`. # # == value # See help page of `anno_density`. # row_anno_density = function(...) { if(exists(".__under_SingleAnnotation__", envir = parent.frame())) { message_wrap("From version 1.99.0, you can directly use `anno_density()` for row annotation if you call it in `rowAnnotation()`.") } anno_density(..., which = "row") } # == title # Text as Row Annotation # # == param # -... pass to `anno_text`. # # == details # A wrapper of `anno_text` with pre-defined ``which`` to ``row``. # # You can directly use `anno_text` for row annotation if you call it in `rowAnnotation`. # # == value # See help page of `anno_text`. # row_anno_text = function(...) { if(exists(".__under_SingleAnnotation__", envir = parent.frame())) { message_wrap("From version 1.99.0, you can directly use `anno_text()` for row annotation if you call it in `rowAnnotation()`.") } anno_text(..., which = "row") } # == title # Link annotation with labels # # == param # -at Numeric index from the original matrix. # -labels Corresponding labels. # -which Whether it is a column annotation or a row annotation? # -side Side of the labels. If it is a column annotation, valid values are "top" and "bottom"; # If it is a row annotation, valid values are "left" and "right". # -lines_gp Please use ``link_gp`` instead. # -link_gp Graphic settings for the segments. # -labels_gp Graphic settings for the labels. # -labels_rot Rotations of labels, scalar. # -padding Padding between neighbouring labels in the plot. # -link_width Width of the segments. # -link_height Similar as ``link_width``, used for column annotation. # -extend By default, the region for the labels has the same width (if it is a column annotation) or # same height (if it is a row annotation) as the heatmap. The size can be extended by this options. # The value can be a proportion number or a `grid::unit` object. The length can be either one or two. # # == details # Sometimes there are many rows or columns in the heatmap and we want to mark some of the rows. # This annotation function is used to mark these rows and connect labels and corresponding rows # with links. # # == value # An annotation function which can be used in `HeatmapAnnotation`. # # == seealso # https://jokergoo.github.io/ComplexHeatmap-reference/book/heatmap-annotations.html#mark-annotation # # == example # anno = anno_mark(at = c(1:4, 20, 60, 97:100), labels = month.name[1:10], which = "row") # draw(anno, index = 1:100, test = "anno_mark") # # m = matrix(1:1000, byrow = TRUE, nr = 100) # anno = anno_mark(at = c(1:4, 20, 60, 97:100), labels = month.name[1:10], which = "row") # Heatmap(m, cluster_rows = FALSE, cluster_columns = FALSE) + rowAnnotation(mark = anno) # Heatmap(m) + rowAnnotation(mark = anno) anno_mark = function(at, labels, which = c("column", "row"), side = ifelse(which == "column", "top", "right"), lines_gp = gpar(), labels_gp = gpar(), labels_rot = ifelse(which == "column", 90, 0), padding = unit(1, "mm"), link_width = unit(5, "mm"), link_height = link_width, link_gp = lines_gp, extend = unit(0, "mm")) { if(is.null(.ENV$current_annotation_which)) { which = match.arg(which)[1] } else { which = .ENV$current_annotation_which } if(!is.numeric(at)) { stop_wrap(paste0("`at` should be numeric ", which, " index corresponding to the matrix.")) } if(is.logical(at)) at = which(at) n = length(at) if(n < 1) { return(anno_empty(which = which, border = FALSE)) } link_gp = recycle_gp(link_gp, n) labels_gp = recycle_gp(labels_gp, n) od = order(at) at = at[od] labels = labels[od] link_gp = subset_gp(link_gp, od) labels_gp = subset_gp(labels_gp, od) labels2index = structure(seq_along(at), names = as.character(labels)) at2labels = structure(labels, names = at) if(length(extend) == 1) extend = rep(extend, 2) if(length(extend) > 2) extend = extend[1:2] if(!inherits(extend, "unit")) extend = unit(extend, "npc") if(which == "row") { height = unit(1, "npc") width = link_width + max_text_width(labels, gp = labels_gp, rot = labels_rot) } else { height = link_width + max_text_height(labels, gp = labels_gp, rot = labels_rot) width = unit(1, "npc") } .pos = NULL .scale = NULL labels_rot = labels_rot %% 360 if(!inherits(padding, "unit")) { padding = convertHeight(padding*grobHeight(textGrob("a", gp = subset_gp(labels_gp, 1))), "mm") } # a map between row index and positions # pos_map = row_fun = function(index) { if(is_RStudio_current_dev()) { if(ht_opt$message) { message_wrap("It seems you are using RStudio IDE. `anno_mark()` needs to work with the physical size of the graphics device. It only generates correct plot in the figure panel, while in the zoomed plot (by clicking the icon 'Zoom') or in the exported plot (by clicking the icon 'Export'), the connection to heatmap rows/columns might be wrong. You can directly use e.g. pdf() to save the plot into a file.\n\nUse `ht_opt$message = FALSE` to turn off this message.") } } n = length(index) # adjust at and labels at = intersect(index, at) if(length(at) == 0) { return(NULL) } labels = rev(at2labels[as.character(at)]) labels_gp = subset_gp(labels_gp, labels2index[as.character(labels)]) link_gp = subset_gp(link_gp, labels2index[as.character(labels)]) if(is.null(.scale)) { .scale = c(0.5, n+0.5) } pushViewport(viewport(xscale = c(0, 1), yscale = .scale)) if(inherits(extend, "unit")) extend = convertHeight(extend, "native", valueOnly = TRUE) if(labels_rot %in% c(90, 270)) { text_height = convertHeight(text_width(labels, gp = labels_gp) + padding, "native", valueOnly = TRUE) } else { text_height = convertHeight(text_height(labels, gp = labels_gp) + padding, "native", valueOnly = TRUE) } if(is.null(.pos)) { i2 = rev(which(index %in% at)) pos = n-i2+1 # position of rows } else { pos = .pos[rev(which(index %in% at))] } h1 = pos - text_height*0.5 h2 = pos + text_height*0.5 pos_adjusted = smartAlign(h1, h2, c(.scale[1] - extend[1], .scale[2] + extend[2])) h = (pos_adjusted[, 1] + pos_adjusted[, 2])/2 n2 = length(labels) if(side == "right") { if(labels_rot == 90) { just = c("center", "top") } else if(labels_rot == 270) { just = c("center", "bottom") } else if(labels_rot > 90 & labels_rot < 270 ) { just = c("right", "center") } else { just = c("left", "center") } } else { if(labels_rot == 90) { just = c("center", "bottom") } else if(labels_rot == 270) { just = c("center", "top") } else if(labels_rot > 90 & labels_rot < 270 ) { just = c("left", "center") } else { just = c("right", "center") } } if(side == "right") { grid.text(labels, rep(link_width, n2), h, default.units = "native", gp = labels_gp, rot = labels_rot, just = just) link_width = link_width - unit(1, "mm") grid.segments(unit(rep(0, n2), "npc"), pos, rep(link_width*(1/3), n2), pos, default.units = "native", gp = link_gp) grid.segments(rep(link_width*(1/3), n2), pos, rep(link_width*(2/3), n2), h, default.units = "native", gp = link_gp) grid.segments(rep(link_width*(2/3), n2), h, rep(link_width, n2), h, default.units = "native", gp = link_gp) } else { grid.text(labels, unit(1, "npc")-rep(link_width, n2), h, default.units = "native", gp = labels_gp, rot = labels_rot, just = just) link_width = link_width - unit(1, "mm") grid.segments(unit(rep(1, n2), "npc"), pos, unit(1, "npc")-rep(link_width*(1/3), n2), pos, default.units = "native", gp = link_gp) grid.segments(unit(1, "npc")-rep(link_width*(1/3), n2), pos, unit(1, "npc")-rep(link_width*(2/3), n2), h, default.units = "native", gp = link_gp) grid.segments(unit(1, "npc")-rep(link_width*(2/3), n2), h, unit(1, "npc")-rep(link_width, n2), h, default.units = "native", gp = link_gp) } upViewport() } column_fun = function(index) { if(is_RStudio_current_dev()) { if(ht_opt$message) { message_wrap("It seems you are using RStudio IDE. `anno_mark()` needs to work with the physical size of the graphics device. It only generates correct plot in the figure panel, while in the zoomed plot (by clicking the icon 'Zoom') or in the exported plot (by clicking the icon 'Export'), the connection to heatmap rows/columns might be wrong. You can directly use e.g. pdf() to save the plot into a file.\n\nUse `ht_opt$message = FALSE` to turn off this message.") } } n = length(index) # adjust at and labels at = intersect(index, at) if(length(at) == 0) { return(NULL) } labels = at2labels[as.character(at)] labels_gp = subset_gp(labels_gp, labels2index[as.character(labels)]) link_gp = subset_gp(link_gp, labels2index[as.character(labels)]) if(is.null(.scale)) { .scale = c(0.5, n+0.5) } pushViewport(viewport(yscale = c(0, 1), xscale = .scale)) if(inherits(extend, "unit")) extend = convertWidth(extend, "native", valueOnly = TRUE) if(labels_rot %in% c(0, 180)) { text_height = convertWidth(text_width(labels, gp = labels_gp) + padding, "native", valueOnly = TRUE) } else { text_height = convertWidth(text_height(labels, gp = labels_gp) + padding, "native", valueOnly = TRUE) } if(is.null(.pos)) { i2 = which(index %in% at) pos = i2 # position of rows } else { pos = .pos[which(index %in% at)] } h1 = pos - text_height*0.5 h2 = pos + text_height*0.5 pos_adjusted = smartAlign(h1, h2, c(.scale[1] - extend[1], .scale[2] + extend[2])) h = (pos_adjusted[, 1] + pos_adjusted[, 2])/2 n2 = length(labels) if(side == "top") { if(labels_rot == 0) { just = c("center", "bottom") } else if(labels_rot == 180) { just = c("center", "top") } else if(labels_rot > 0 & labels_rot < 180 ) { just = c("left", "center") } else { just = c("right", "center") } } else { if(labels_rot == 0) { just = c("center", "top") } else if(labels_rot == 180) { just = c("center", "bottom") } else if(labels_rot > 0 & labels_rot < 180 ) { just = c("right", "center") } else { just = c("left", "center") } } if(side == "top") { grid.text(labels, h, rep(link_height, n2), default.units = "native", gp = labels_gp, rot = labels_rot, just = just) link_height = link_height - unit(1, "mm") grid.segments(pos, unit(rep(0, n2), "npc"), pos, rep(link_height*(1/3), n2), default.units = "native", gp = link_gp) grid.segments(pos, rep(link_height*(1/3), n2), h, rep(link_height*(2/3), n2), default.units = "native", gp = link_gp) grid.segments(h, rep(link_height*(2/3), n2), h, rep(link_height, n), default.units = "native", gp = link_gp) } else { grid.text(labels, h, unit(1, "npc")-rep(link_height, n2), default.units = "native", gp = labels_gp, rot = labels_rot, just = just) link_height = link_height - unit(1, "mm") grid.segments(pos, unit(rep(1, n2), "npc"), pos, unit(1, "npc")-rep(link_height*(1/3), n2), default.units = "native", gp = link_gp) grid.segments(pos, unit(1, "npc")-rep(link_height*(1/3), n2), h, unit(1, "npc")-rep(link_height*(2/3), n2), default.units = "native", gp = link_gp) grid.segments(h, unit(1, "npc")-rep(link_height*(2/3), n2), h, unit(1, "npc")-rep(link_height, n2), default.units = "native", gp = link_gp) } upViewport() } if(which == "row") { fun = row_fun } else if(which == "column") { fun = column_fun } anno = AnnotationFunction( fun = fun, fun_name = "anno_mark", which = which, width = width, height = height, n = -1, var_import = list(at, labels2index, at2labels, link_gp, labels_gp, labels_rot, padding, .pos, .scale, side, link_width, link_height, extend), show_name = FALSE ) anno@subset_rule$at = subset_by_intersect anno@subsetable = TRUE attr(anno, "called_args") = list( at = at, labels = labels, which = which, side = side, labels_gp = labels_gp, labels_rot = labels_rot, padding = padding, link_width = link_width, link_height = link_height, link_gp = link_gp, extend = extend ) return(anno) } subset_by_intersect = function(x, i) { intersect(x, i) } # == title # Link Annotation # # == param # -... Pass to `anno_zoom`. # # == details # This function is the same as `anno_zoom`. It links subsets of rows or columns to a list of graphic regions. # anno_link = function(...) { anno_zoom(...) } # == title # Summary Annotation # # == param # -which Whether it is a column annotation or a row annotation? # -border Wether draw borders of the annotation region? # -bar_width Relative width of the bars. The value should be smaller than one. # -axis Whether to add axis? # -axis_param parameters for controlling axis. See `default_axis_param` for all possible settings and default parameters. # -ylim Data ranges. ``ylim`` for barplot is enforced to be ``c(0, 1)``. # -extend The extension to both side of ``ylim``. The value is a percent value corresponding to ``ylim[2] - ylim[1]``. This argument is only for boxplot. # -outline Whether draw outline of boxplots? # -box_width Relative width of boxes. The value should be smaller than one. # -pch Point style. # -size Point size. # -gp Graphic parameters. # -width Width of the annotation. The value should be an absolute unit. Width is not allowed to be set for column annotation. # -height Height of the annotation. The value should be an absolute unit. Height is not allowed to be set for row annotation. # # == detail # ``anno_summary`` is a special annotation function that it only works for one-column or one-row heatmap. # It shows the summary of the values in the heatmap. If the values in the heatmap is discrete, # the proportion of each level (the sum is normalized to 1) is visualized as stacked barplot. If the heatmap # is split into multiple slices, multiple bars are put in the annotation. If the value is continuous, boxplot is used. # # In the barplot, the color schema is used as the same as the heatmap, while for the boxplot, the color needs # to be controlled by ``gp``. # # == value # An annotation function which can be used in `HeatmapAnnotation`. # # == seealso # https://jokergoo.github.io/ComplexHeatmap-reference/book/heatmap-annotations.html#summary-annotation # # == example # ha = HeatmapAnnotation(summary = anno_summary(height = unit(4, "cm"))) # v = sample(letters[1:2], 50, replace = TRUE) # split = sample(letters[1:2], 50, replace = TRUE) # Heatmap(v, top_annotation = ha, width = unit(1, "cm"), split = split) # # ha = HeatmapAnnotation(summary = anno_summary(gp = gpar(fill = 2:3), height = unit(4, "cm"))) # v = rnorm(50) # Heatmap(v, top_annotation = ha, width = unit(1, "cm"), split = split) # anno_summary = function(which = c("column", "row"), border = TRUE, bar_width = 0.8, axis = TRUE, axis_param = default_axis_param(which), ylim = NULL, extend = 0.05, outline = TRUE, box_width = 0.6, pch = 1, size = unit(2, "mm"), gp = gpar(), width = NULL, height = NULL) { ef = function() NULL if(is.null(.ENV$current_annotation_which)) { which = match.arg(which)[1] dev.null() ef = dev.off2 } else { which = .ENV$current_annotation_which } on.exit(ef()) anno_size = anno_width_and_height(which, width, height, unit(2, "cm")) axis_param = validate_axis_param(axis_param, which) if(is.null(ylim)) { axis_grob = if(axis) construct_axis_grob(axis_param, which, c(0, 1)) else NULL } else { axis_grob = if(axis) construct_axis_grob(axis_param, which, ylim) else NULL } row_fun = function(index) { ht = get("object", envir = parent.frame(7)) mat = ht@matrix cm = ht@matrix_color_mapping order_list = ht@column_order_list ng = length(order_list) if(cm@type == "discrete") { tl = lapply(order_list, function(od) table(mat[1, od])) tl = lapply(tl, function(x) x/sum(x)) pushViewport(viewport(yscale = c(0.5, ng+0.5), xscale = c(0, 1))) for(i in 1:ng) { x = i y = cumsum(tl[[i]]) grid.rect(y, x, height = bar_width, width = tl[[i]], just = "right", gp = gpar(fill = map_to_colors(cm, names(y))), default.units = "native") } if(axis) grid.draw(axis_grob) if(border) grid.rect(gp = gpar(fill = "transparent")) popViewport() } else { } } column_fun = function(index) { ht = get("object", envir = parent.frame(7)) mat = ht@matrix cm = ht@matrix_color_mapping order_list = ht@row_order_list ng = length(order_list) if(cm@type == "discrete") { if(!is.null(ylim)) { stop_wrap("For discrete matrix, `ylim` is not allowed to set. It is always c(0, 1).") } tl = lapply(order_list, function(od) table(mat[od, 1])) tl = lapply(tl, function(x) x/sum(x)) pushViewport(viewport(xscale = c(0.5, ng+0.5), yscale = c(0, 1))) for(i in 1:ng) { x = i y = cumsum(tl[[i]]) grid.rect(x, y, width = bar_width, height = tl[[i]], just = "top", gp = gpar(fill = map_to_colors(cm, names(y))), default.units = "native") } if(axis) grid.draw(axis_grob) if(border) grid.rect(gp = gpar(fill = "transparent")) popViewport() } else { vl = lapply(order_list, function(od) mat[od, 1]) nv = length(vl) if(is.null(ylim)) { if(!outline) { boxplot_stats = boxplot(vl, plot = FALSE)$stats data_scale = range(boxplot_stats) } else { data_scale = range(vl, na.rm = TRUE) } } else { data_scale = ylim } data_scale = data_scale + c(-extend, extend)*(data_scale[2] - data_scale[1]) if(is.null(ylim)) { axis_param = validate_axis_param(axis_param, which) axis_grob = if(axis) construct_axis_grob(axis_param, which, data_scale) else NULL } gp = recycle_gp(gp, nv) if(length(pch) == 1) pch = rep(pch, nv) if(length(size) == 1) size = rep(size, nv) pushViewport(viewport(xscale = c(0.5, ng+0.5), yscale = data_scale)) for(i in 1:ng) { x = i v = vl[[i]] grid.boxplot(v, pos = x, box_width = box_width, gp = subset_gp(gp, i), pch = pch, size = size, outline = outline) } if(axis) grid.draw(axis_grob) if(border) grid.rect(gp = gpar(fill = "transparent")) popViewport() } } if(which == "row") { fun = row_fun } else if(which == "column") { fun = column_fun } anno = AnnotationFunction( fun = fun, fun_name = "anno_summary", which = which, width = width, height = height, var_import = list(bar_width, border, axis, axis_grob, axis_param, which, ylim, extend, outline, box_width, pch, size, gp), n = 1, show_name = FALSE ) anno@subsetable = FALSE anno@extended = update_anno_extend(anno, axis_grob, axis_param) return(anno) } # == title # Block annotation # # == param # -gp Graphic parameters. # -labels Labels put on blocks. # -labels_gp Graphic parameters for labels. # -labels_rot Rotation for labels. # -labels_offset Positions of the labels. It controls offset on y-directions for column annotation and on x-directoin for row annotation. # -labels_just Jusification of the labels. # -which Is it a row annotation or a column annotation? # -width Width of the annotation. The value should be an absolute unit. Width is not allowed to be set for column annotation. # -height Height of the annotation. The value should be an absolute unit. Height is not allowed to be set for row annotation. # -show_name Whether show annotatio name. # # == details # The block annotation is used for representing slices. The length of all arguments should be 1 or the number of slices. # # == value # An annotation function which can be used in `HeatmapAnnotation`. # # == seealso # https://jokergoo.github.io/ComplexHeatmap-reference/book/heatmap-annotations.html#block-annotation # # == example # Heatmap(matrix(rnorm(100), 10), # top_annotation = HeatmapAnnotation(foo = anno_block(gp = gpar(fill = 2:4), # labels = c("group1", "group2", "group3"), labels_gp = gpar(col = "white"))), # column_km = 3, # left_annotation = rowAnnotation(foo = anno_block(gp = gpar(fill = 2:4), # labels = c("group1", "group2", "group3"), labels_gp = gpar(col = "white"))), # row_km = 3) anno_block = function(gp = gpar(), labels = NULL, labels_gp = gpar(), labels_rot = ifelse(which == "row", 90, 0), labels_offset = unit(0.5, "npc"), labels_just = "center", which = c("column", "row"), width = NULL, height = NULL, show_name = FALSE) { if(is.null(.ENV$current_annotation_which)) { which = match.arg(which)[1] } else { which = .ENV$current_annotation_which } if(length(labels)) { if(which == "column") { if(missing(height)) { height = grobHeight(textGrob(labels, rot = labels_rot, gp = labels_gp)) height = height + unit(5, "mm") } else { if(!inherits(height, "unit")) { stop_wrap("Since you specified `height`, the value should be `unit` object.") } } } else { if(missing(width)) { width = grobWidth(textGrob(labels, rot = labels_rot, gp = labels_gp)) width = width + unit(5, "mm") } else { if(!inherits(width, "unit")) { stop_wrap("Since you specified `width`, the value should be `unit` object.") } } } } anno_size = anno_width_and_height(which, width, height, unit(5, "mm")) fun = function(index, k, n) { gp = subset_gp(recycle_gp(gp, n), k) grid.rect(gp = gp) if(length(labels)) { if(length(labels) != n) { stop_wrap("Length of `labels` should be as same as number of slices.") } label = labels[k] labels_gp = subset_gp(recycle_gp(labels_gp, n), k) x = y = unit(0.5, "npc") if(which == "column") y = labels_offset if(which == "row") x = labels_offset grid.text(label, x = x, y = y, gp = labels_gp, rot = labels_rot, just = labels_just) } } anno = AnnotationFunction( fun = fun, n = NA, fun_name = "anno_block", which = which, var_import = list(gp, labels, labels_gp, labels_rot, labels_offset, labels_just, which), subset_rule = list(), subsetable = TRUE, height = anno_size$height, width = anno_size$width, show_name = show_name ) return(anno) } # == title # Zoom annotation # # == param # -align_to It defines how the boxes correspond to the rows or the columns in the heatmap. # If the value is a list of indices, each box corresponds to the rows or columns with indices # in one vector in the list. If the value is a categorical variable (e.g. a factor or a character vector) # that has the same length as the rows or columns in the heatmap, each box corresponds to the rows/columns # in each level in the categorical variable. # -panel_fun A self-defined function that defines how to draw graphics in the box. The function must have # a ``index`` argument which is the indices for the rows/columns that the box corresponds to. It can # have second argument ``nm`` which is the "name" of the selected part in the heatmap. The corresponding # value for ``nm`` comes from ``align_to`` if it is specified as a categorical variable or a list with names. # -which Whether it is a column annotation or a row annotation? # -side Side of the boxes If it is a column annotation, valid values are "top" and "bottom"; # If it is a row annotation, valid values are "left" and "right". # -size The size of boxes. It can be pure numeric that they are treated as relative fractions of the total # height/width of the heatmap. The value of ``size`` can also be absolute units. # -gap Gaps between boxes. # -link_gp Graphic settings for the segments. # -link_width Width of the segments. # -link_height Similar as ``link_width``, used for column annotation. # -extend By default, the region for the labels has the same width (if it is a column annotation) or # same height (if it is a row annotation) as the heatmap. The size can be extended by this options. # The value can be a proportion number or a `grid::unit` object. The length can be either one or two. # -width Width of the annotation. The value should be an absolute unit. Width is not allowed to be set for column annotation. # -height Height of the annotation. The value should be an absolute unit. Height is not allowed to be set for row annotation. # -internal_line Internally used. # # == details # `anno_zoom` creates several plotting regions (boxes) which can be corresponded to subsets of rows/columns in the # heatmap. # # == value # An annotation function which can be used in `HeatmapAnnotation`. # # == seealso # https://jokergoo.github.io/ComplexHeatmap-reference/book/heatmap-annotations.html#zoom-annotation # # == example # set.seed(123) # m = matrix(rnorm(100*10), nrow = 100) # subgroup = sample(letters[1:3], 100, replace = TRUE, prob = c(1, 5, 10)) # rg = range(m) # panel_fun = function(index, nm) { # pushViewport(viewport(xscale = rg, yscale = c(0, 2))) # grid.rect() # grid.xaxis(gp = gpar(fontsize = 8)) # grid.boxplot(m[index, ], pos = 1, direction = "horizontal") # grid.text(paste("distribution of group", nm), mean(rg), y = 1.9, # just = "top", default.units = "native", gp = gpar(fontsize = 10)) # popViewport() # } # anno = anno_zoom(align_to = subgroup, which = "row", panel_fun = panel_fun, # size = unit(2, "cm"), gap = unit(1, "cm"), width = unit(4, "cm")) # Heatmap(m, right_annotation = rowAnnotation(foo = anno), row_split = subgroup) # anno_zoom = function(align_to, panel_fun = function(index, nm = NULL) { grid.rect() }, which = c("column", "row"), side = ifelse(which == "column", "top", "right"), size = NULL, gap = unit(1, "mm"), link_width = unit(5, "mm"), link_height = link_width, link_gp = gpar(), extend = unit(0, "mm"), width = NULL, height = NULL, internal_line = TRUE) { if(is.null(.ENV$current_annotation_which)) { which = match.arg(which)[1] } else { which = .ENV$current_annotation_which } anno_size = anno_width_and_height(which, width, height, unit(2, "cm") + link_width) # align_to should be # 1. a vector of class labels that the length should be same as the nrow of the matrix # 2. a list of numeric indices if(is.list(align_to)) { if(!any(sapply(align_to, is.numeric))) { stop_wrap(paste0("`at` should be numeric ", which, " index corresponding to the matrix.")) } } .pos = NULL # position of the rows if(length(as.list(formals(panel_fun))) == 1) { formals(panel_fun) = alist(index = , nm = NULL) } if(length(extend) == 1) extend = rep(extend, 2) if(length(extend) > 2) extend = extend[1:2] if(!inherits(extend, "unit")) extend = unit(extend, "npc") # anno_zoom is always executed in one-slice mode (which means mulitple slices # are treated as one big slilce) row_fun = function(index) { if(is_RStudio_current_dev()) { if(ht_opt$message) { message_wrap("It seems you are using RStudio IDE. `anno_zoom()`/`anno_link()` needs to work with the physical size of the graphics device. It only generates correct plot in the figure panel, while in the zoomed plot (by clicking the icon 'Zoom') or in the exported plot (by clicking the icon 'Export'), the connection to heatmap rows/columns might be wrong. You can directly use e.g. pdf() to save the plot into a file.\n\nUse `ht_opt$message = FALSE` to turn off this message.") } } n = length(index) if(is.atomic(align_to)) { if(length(setdiff(align_to, index)) == 0 && !any(duplicated(align_to))) { align_to = list(align_to) } else { if(length(align_to) != n) { stop_wrap("If `align_to` is a vector with group labels, the length should be the same as the number of rows in the heatmap.") } lnm = as.character(unique(align_to[index])) align_to = as.list(tapply(seq_along(align_to), align_to, function(x) x)) align_to = align_to[lnm] } } ## adjust index order align_to = lapply(align_to, function(x) intersect(index, x)) nrl = sapply(align_to, length) align_to_df = lapply(align_to, function(x) { ind = which(index %in% x) n = length(ind) s = NULL e = NULL s[1] = ind[1] if(n > 1) { ind2 = which(ind[2:n] - ind[1:(n-1)] > 1) if(length(ind2)) s = c(s, ind[ ind2 + 1 ]) k = length(s) e[k] = ind[length(ind)] if(length(ind2)) e[1:(k-1)] = ind[1:(n-1)][ ind2 ] } else { e = ind[1] } data.frame(s = s, e = e) }) # pos is from top to bottom if(is.null(.pos)) { pos = (n:1 - 0.5)/n # position of rows } else { pos = .pos } .scale = c(0, 1) pushViewport(viewport(xscale = c(0, 1), yscale = .scale)) if(inherits(extend, "unit")) extend = convertHeight(extend, "native", valueOnly = TRUE) # the position of boxes initially are put evenly # add the gap n_boxes = length(align_to) if(length(gap) == 1) gap = rep(gap, n_boxes) if(is.null(size)) size = nrl if(length(size) == 1) size = rep(size, length(align_to)) if(length(size) != length(align_to)) { stop_wrap("Length of `size` should be the same as the number of groups of indices.") } if(!inherits(size, "unit")) { size_is_unit = FALSE if(n_boxes == 1) { h = data.frame(bottom = .scale[1] - extend[1], top = .scale[2] + extend[2]) } else { size = as.numeric(size) gap = convertHeight(gap, "native", valueOnly = TRUE) box_height = size/sum(size) * (1 + sum(extend) - sum(gap[1:(n_boxes-1)])) h = data.frame( top = cumsum(box_height) + cumsum(gap) - gap[length(gap)] - extend[1] ) h$bottom = h$top - box_height h = 1 - h[, 2:1] colnames(h) = c("top", "bottom") } } else { size_is_unit = TRUE box_height = size box_height2 = box_height # box_height2 adds the gap for(i in 1:n_boxes) { if(i == 1 || i == n_boxes) { if(n_boxes > 1) { box_height2[i] = box_height2[i] + gap[i]*0.5 } } else { box_height2[i] = box_height2[i] + gap[i] } } box_height2 = convertHeight(box_height2, "native", valueOnly = TRUE) # the original positions of boxes mean_pos = sapply(align_to_df, function(df) mean((pos[df[, 1]] + pos[df[, 2]])/2)) h1 = mean_pos - box_height2*0.5 h2 = mean_pos + box_height2*0.5 h = smartAlign2(rev(h1), rev(h2), c(.scale[1] - extend[1], .scale[2] + extend[2])) colnames(h) = c("bottom", "top") h = h[nrow(h):1, , drop = FALSE] # recalcualte h to remove gaps gap_height = convertHeight(gap, "native", valueOnly = TRUE) if(n_boxes > 1) { for(i in 1:n_boxes) { if(i == 1) { h[i, "bottom"] = h[i, "bottom"] + gap_height[i]/2 } else if(i == n_boxes) { h[i, "top"] = h[i, "top"] - gap_height[i]/2 } else { h[i, "bottom"] = h[i, "bottom"] + gap_height[i]/2 h[i, "top"] = h[i, "top"] - gap_height[i]/2 } } } } popViewport() # draw boxes if(side == "right") { pushViewport(viewport(x = link_width, just = "left", width = anno_size$width - link_width)) } else { pushViewport(viewport(x = 0, just = "left", width = anno_size$width - link_width)) } for(i in 1:n_boxes) { current_vp_name = current.viewport()$name pushViewport(viewport(y = (h[i, "top"] + h[i, "bottom"])/2, height = h[i, "top"] - h[i, "bottom"], default.units = "native")) if(is.function(panel_fun)) panel_fun(align_to[[i]], names(align_to)[i]) popViewport() if(current.viewport()$name != current_vp_name) { stop_wrap("If you push viewports `panel_fun`, you need to pop all them out.") } } popViewport() # draw the links if(is.null(link_gp$fill)) link_gp$fill = NA link_gp = recycle_gp(link_gp, n_boxes) if(side == "right") { pushViewport(viewport(x = unit(0, "npc"), just = "left", width = link_width)) } else { pushViewport(viewport(x = unit(1, "npc"), just = "right", width = link_width)) } for(i in 1:n_boxes) { df = align_to_df[[i]] for(j in 1:nrow(df)) { # draw each polygon if(!internal_line) { link_gp3 = link_gp2 = link_gp link_gp2$col = link_gp$fill link_gp2$lty = NULL link_gp3$fill = NA if(side == "right") { grid.polygon(unit.c(unit(c(0, 0), "npc"), rep(link_width, 2)), c(pos[df[j, 2]] - 0.5/n, pos[df[j, 1]] + 0.5/n, h[i, "top"], h[i, "bottom"]), default.units = "native", gp = subset_gp(link_gp2, i)) grid.lines(unit.c(link_width, unit(c(0, 0), "npc"), link_width), c(h[i, "bottom"], pos[df[j, 2]] - 0.5/n, pos[df[j, 1]] + 0.5/n, h[i, "top"]), default.units = "native", gp = subset_gp(link_gp3, i)) } else { grid.polygon(unit.c(rep(link_width, 2), unit(c(0, 0), "npc")), c(pos[df[j, 2]] - 0.5/n, pos[df[j, 1]] + 0.5/n, h[i, "top"], h[i, "bottom"]), default.units = "native", gp = subset_gp(link_gp2, i)) grid.lines(unit.c(unit(0, "npc"), rep(link_width, 2), unit(0, "npc")), c(h[i, "bottom"], pos[df[j, 2]] - 0.5/n, pos[df[j, 1]] + 0.5/n, h[i, "top"]), default.units = "native", gp = subset_gp(link_gp3, i)) } } else { if(side == "right") { grid.polygon(unit.c(unit(c(0, 0), "npc"), rep(link_width, 2)), c(pos[df[j, 2]] - 0.5/n, pos[df[j, 1]] + 0.5/n, h[i, "top"], h[i, "bottom"]), default.units = "native", gp = subset_gp(link_gp, i)) } else { grid.polygon(unit.c(rep(link_width, 2), unit(c(0, 0), "npc")), c(pos[df[j, 2]] - 0.5/n, pos[df[j, 1]] + 0.5/n, h[i, "top"], h[i, "bottom"]), default.units = "native", gp = subset_gp(link_gp, i)) } } } } popViewport() } column_fun = function(index) { if(is_RStudio_current_dev()) { if(ht_opt$message) { message_wrap("It seems you are using RStudio IDE. `anno_zoom()`/`anno_link()` needs to work with the physical size of the graphics device. It only generates correct plot in the figure panel, while in the zoomed plot (by clicking the icon 'Zoom') or in the exported plot (by clicking the icon 'Export'), the connection to heatmap rows/columns might be wrong. You can directly use e.g. pdf() to save the plot into a file.\n\nUse `ht_opt$message = FALSE` to turn off this message.") } } n = length(index) if(is.atomic(align_to)) { if(length(setdiff(align_to, index)) == 0 && !any(duplicated(align_to))) { align_to = list(align_to) } else { if(length(align_to) != n) { stop_wrap("If `align_to` is a vector with group labels, the length should be the same as the number of columns in the heatmap.") } lnm = as.character(unique(align_to[index])) align_to = as.list(tapply(seq_along(align_to), align_to, function(x) x)) align_to = align_to[lnm] } } align_to = lapply(align_to, function(x) intersect(index, x)) nrl = sapply(align_to, length) align_to_df = lapply(align_to, function(x) { ind = which(index %in% x) n = length(ind) s = NULL e = NULL s[1] = ind[1] if(n > 1) { ind2 = which(ind[2:n] - ind[1:(n-1)] > 1) if(length(ind2)) s = c(s, ind[ ind2 + 1 ]) k = length(s) e[k] = ind[length(ind)] if(length(ind2)) e[1:(k-1)] = ind[1:(n-1)][ ind2 ] } else { e = ind[1] } data.frame(s = s, e = e) }) if(is.null(.pos)) { pos = (1:n - 0.5)/n } else { pos = .pos } .scale = c(0, 1) pushViewport(viewport(yscale = c(0, 1), xscale = .scale)) if(inherits(extend, "unit")) extend = convertWidth(extend, "native", valueOnly = TRUE) # the position of boxes initially are put evenly # add the gap n_boxes = length(align_to) if(length(gap) == 1) gap = rep(gap, n_boxes) if(is.null(size)) size = nrl if(length(size) == 1) size = rep(size, length(align_to)) if(length(size) != length(align_to)) { stop_wrap("Length of `size` should be the same as the number of groups of indices.") } if(!inherits(size, "unit")) { size_is_unit = FALSE if(n_boxes == 1) { h = data.frame(left = .scale[1] - extend[1], right = .scale[2] + extend[2]) } else { size = as.numeric(size) gap = convertWidth(gap, "native", valueOnly = TRUE) box_width = size/sum(size) * (1 + sum(extend) - sum(gap[1:(n_boxes-1)])) h = data.frame( right = cumsum(box_width) + cumsum(gap) - gap[length(gap)] - extend[1] ) h$left = h$right - box_width } } else { size_is_unit = TRUE box_width = size box_width2 = box_width for(i in 1:n_boxes) { if(i == 1 || i == n_boxes) { if(n_boxes > 1) { box_width2[i] = box_width2[i] + gap[i]*0.5 } } else { box_width2[i] = box_width2[i] + gap[i] } } box_width2 = convertWidth(box_width2, "native", valueOnly = TRUE) # the original positions of boxes mean_pos = sapply(align_to_df, function(df) mean((pos[df[, 1]] + pos[df[, 2]])/2)) h1 = mean_pos - box_width2*0.5 h2 = mean_pos + box_width2*0.5 h = smartAlign2(h1, h2, c(.scale[1] - extend[1], .scale[2] + extend[2])) colnames(h) = c("left", "right") # recalcualte h to remove gaps gap_width = convertWidth(gap, "native", valueOnly = TRUE) if(n_boxes > 1) { for(i in 1:n_boxes) { if(i == 1) { h[i, "left"] = h[i, "left"] + gap_width[i]/2 } else if(i == n_boxes) { h[i, "right"] = h[i, "right"] - gap_width[i]/2 } else { h[i, "left"] = h[i, "left"] + gap_width[i]/2 h[i, "right"] = h[i, "right"] - gap_width[i]/2 } } } } popViewport() # draw boxes if(side == "top") { pushViewport(viewport(y = link_height, just = "bottom", height = anno_size$height - link_height)) } else { pushViewport(viewport(y = 0, just = "bottom", height = anno_size$height - link_height)) } for(i in 1:n_boxes) { current_vp_name = current.viewport()$name pushViewport(viewport(x = (h[i, "right"] + h[i, "left"])/2, width = h[i, "right"] - h[i, "left"], default.units = "native")) if(is.function(panel_fun)) panel_fun(align_to[[i]], names(align_to)[i]) popViewport() if(current.viewport()$name != current_vp_name) { stop_wrap("If you push viewports `panel_fun`, you need to pop all them out.") } } popViewport() # draw the links if(is.null(link_gp$fill)) link_gp$fill = NA link_gp = recycle_gp(link_gp, n_boxes) if(side == "top") { pushViewport(viewport(y = unit(0, "npc"), just = "bottom", height = link_height)) } else { pushViewport(viewport(y = unit(1, "npc"), just = "top", height = link_height)) } for(i in 1:n_boxes) { df = align_to_df[[i]] for(j in 1:nrow(df)) { # draw each polygon if(!internal_line) { link_gp3 = link_gp2 = link_gp link_gp2$col = link_gp$fill link_gp2$lty = NULL link_gp3$fill = NA if(side == "top") { grid.polygon( c(pos[df[j, 2]] + 0.5/n, pos[df[j, 1]] - 0.5/n, h[i, "left"], h[i, "right"]), unit.c(unit(c(0, 0), "npc"), rep(link_width, 2)), default.units = "native", gp = subset_gp(link_gp2, i)) grid.lines( c(h[i, "right"], pos[df[j, 2]] + 0.5/n, pos[df[j, 1]] - 0.5/n, h[i, "left"]), unit.c(link_width,unit(c(0, 0), "npc"), link_width), default.units = "native", gp = subset_gp(link_gp3, i)) } else { grid.polygon( c(pos[df[j, 2]] + 0.5/n, pos[df[j, 1]] - 0.5/n, h[i, "left"], h[i, "right"]), unit.c(rep(link_width, 2), unit(c(0, 0), "npc")), default.units = "native", gp = subset_gp(link_gp2, i)) grid.lines( c(h[i, "right"], pos[df[j, 2]] + 0.5/n, pos[df[j, 1]] - 0.5/n, h[i, "left"]), unit.c(unit(0, "npc"), rep(link_width, 2), unit(0, "npc")), default.units = "native", gp = subset_gp(link_gp3, i)) } } else { if(side == "top") { grid.polygon( c(pos[df[j, 2]] + 0.5/n, pos[df[j, 1]] - 0.5/n, h[i, "left"], h[i, "right"]), unit.c(unit(c(0, 0), "npc"), rep(link_width, 2)), default.units = "native", gp = subset_gp(link_gp, i)) } else { grid.polygon( c(pos[df[j, 2]] + 0.5/n, pos[df[j, 1]] - 0.5/n, h[i, "left"], h[i, "right"]), unit.c(rep(link_width, 2), unit(c(0, 0), "npc")), default.units = "native", gp = subset_gp(link_gp, i)) } } } } popViewport() } if(which == "row") { fun = row_fun } else if(which == "column") { fun = column_fun } anno = AnnotationFunction( fun = fun, fun_name = "anno_zoom", which = which, height = anno_size$height, width = anno_size$width, n = -1, var_import = list(align_to, .pos, gap, size, panel_fun, side, anno_size, extend, link_width, link_height, link_gp, internal_line), show_name = FALSE ) anno@subset_rule$align_to = function(x, i) { if(is.atomic(x)) { x[i] } else { x = lapply(x, function(x) intersect(x, i)) x = x[sapply(x, length) > 0] } } anno@subsetable = TRUE return(anno) }
library(dplyr) library(data.table) #downloading files temp<-tempfile() download.file("https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip", temp) unzip(temp,list=TRUE) #reading files into R, setting column names and labels Features<-read.table(unzip(temp,"UCI HAR Dataset/features.txt")) TestSet<-read.table(unzip(temp,"UCI HAR Dataset/test/X_test.txt")) TestLabels<-read.table(unzip(temp,"UCI HAR Dataset/test/y_test.txt")) SubjectTest<-read.table(unzip(temp,"UCI HAR Dataset/test/subject_test.txt")) colnames(TestSet)<-t(Features[2]) TestSet$activity<-TestLabels[,1] TestSet$volunteer<-SubjectTest[,1] TrainSet<-read.table(unzip(temp,"UCI HAR Dataset/train/X_train.txt")) TrainLabels<-read.table(unzip(temp,"UCI HAR Dataset/train/y_train.txt")) SubjectTrain<-read.table(unzip(temp,"UCI HAR Dataset/train/subject_train.txt")) colnames(TrainSet)<-t(Features[2]) TrainSet$activity<-TrainLabels[,1] TrainSet$volunteer<-SubjectTrain[,1] #merging test and train sets, deleting duplicated columns Set<-rbind(TrainSet,TestSet) doubles<-duplicated(colnames(Set)) Set<-Set[,!doubles] #extracting the measurements on mean and standard deviation ColMean<-grep("mean()",names(Set),value=FALSE,fixed=TRUE) #including columns 471:477 because they also contain means ColMean<-append(Mean,471:477) SetMean<- Set[ColMean] ColStDev<-grep("std()",names(Set),value=FALSE) SetStDev<-Set[ColStDev] #naming activities Set$activity[Set$activity == 1] <- "Walking" Set$activity[Set$activity == 2] <- "Walking Upstairs" Set$activity[Set$activity == 3] <- "Walking Downstairs" Set$activity[Set$activity == 4] <- "Sitting" Set$activity[Set$activity == 5] <- "Standing" Set$activity[Set$activity == 6] <- "Laying" #naming volunteers Set$volunteer[Set$volunteer == 1] <- "Volunteer 1" Set$volunteer[Set$volunteer == 2] <- "Volunteer 2" Set$volunteer[Set$volunteer == 3] <- "Volunteer 3" Set$volunteer[Set$volunteer == 4] <- "Volunteer 4" Set$volunteer[Set$volunteer == 5] <- "Volunteer 5" Set$volunteer[Set$volunteer == 6] <- "Volunteer 6" Set$volunteer[Set$volunteer == 7] <- "Volunteer 7" Set$volunteer[Set$volunteer == 8] <- "Volunteer 8" Set$volunteer[Set$volunteer == 9] <- "Volunteer 9" Set$volunteer[Set$volunteer == 10] <- "Volunteer 10" Set$volunteer[Set$volunteer == 11] <- "Volunteer 11" Set$volunteer[Set$volunteer == 12] <- "Volunteer 12" Set$volunteer[Set$volunteer == 13] <- "Volunteer 13" Set$volunteer[Set$volunteer == 14] <- "Volunteer 14" Set$volunteer[Set$volunteer == 15] <- "Volunteer 15" Set$volunteer[Set$volunteer == 16] <- "Volunteer 16" Set$volunteer[Set$volunteer == 17] <- "Volunteer 17" Set$volunteer[Set$volunteer == 18] <- "Volunteer 18" Set$volunteer[Set$volunteer == 19] <- "Volunteer 19" Set$volunteer[Set$volunteer == 20] <- "Volunteer 20" Set$volunteer[Set$volunteer == 21] <- "Volunteer 21" Set$volunteer[Set$volunteer == 22] <- "Volunteer 22" Set$volunteer[Set$volunteer == 23] <- "Volunteer 23" Set$volunteer[Set$volunteer == 24] <- "Volunteer 24" Set$volunteer[Set$volunteer == 25] <- "Volunteer 25" Set$volunteer[Set$volunteer == 26] <- "Volunteer 26" Set$volunteer[Set$volunteer == 27] <- "Volunteer 27" Set$volunteer[Set$volunteer == 28] <- "Volunteer 28" Set$volunteer[Set$volunteer == 29] <- "Volunteer 29" Set$volunteer[Set$volunteer == 30] <- "Volunteer 30" #fixing column names names(Set) <- gsub("Acc", "Accelerator", names(Set)) names(Set) <- gsub("Mag", "Magnitude", names(Set)) names(Set) <- gsub("Gyro", "Gyroscope", names(Set)) names(Set) <- gsub("^t", "Time", names(Set)) names(Set) <- gsub("^f", "Frequency", names(Set)) #writing down the final tidy data set, taking the mean of every column broken down by volunteer and activity Set.dt<-data.table(Set) TidyData <- Set.dt[, lapply(.SD, mean), by = 'volunteer,activity'] write.table(TidyData, file = "TidyData.txt", row.names = FALSE)
/run_analysis.R
no_license
AnaBayes/Getting-and-Cleaning-Data-Project
R
false
false
3,944
r
library(dplyr) library(data.table) #downloading files temp<-tempfile() download.file("https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip", temp) unzip(temp,list=TRUE) #reading files into R, setting column names and labels Features<-read.table(unzip(temp,"UCI HAR Dataset/features.txt")) TestSet<-read.table(unzip(temp,"UCI HAR Dataset/test/X_test.txt")) TestLabels<-read.table(unzip(temp,"UCI HAR Dataset/test/y_test.txt")) SubjectTest<-read.table(unzip(temp,"UCI HAR Dataset/test/subject_test.txt")) colnames(TestSet)<-t(Features[2]) TestSet$activity<-TestLabels[,1] TestSet$volunteer<-SubjectTest[,1] TrainSet<-read.table(unzip(temp,"UCI HAR Dataset/train/X_train.txt")) TrainLabels<-read.table(unzip(temp,"UCI HAR Dataset/train/y_train.txt")) SubjectTrain<-read.table(unzip(temp,"UCI HAR Dataset/train/subject_train.txt")) colnames(TrainSet)<-t(Features[2]) TrainSet$activity<-TrainLabels[,1] TrainSet$volunteer<-SubjectTrain[,1] #merging test and train sets, deleting duplicated columns Set<-rbind(TrainSet,TestSet) doubles<-duplicated(colnames(Set)) Set<-Set[,!doubles] #extracting the measurements on mean and standard deviation ColMean<-grep("mean()",names(Set),value=FALSE,fixed=TRUE) #including columns 471:477 because they also contain means ColMean<-append(Mean,471:477) SetMean<- Set[ColMean] ColStDev<-grep("std()",names(Set),value=FALSE) SetStDev<-Set[ColStDev] #naming activities Set$activity[Set$activity == 1] <- "Walking" Set$activity[Set$activity == 2] <- "Walking Upstairs" Set$activity[Set$activity == 3] <- "Walking Downstairs" Set$activity[Set$activity == 4] <- "Sitting" Set$activity[Set$activity == 5] <- "Standing" Set$activity[Set$activity == 6] <- "Laying" #naming volunteers Set$volunteer[Set$volunteer == 1] <- "Volunteer 1" Set$volunteer[Set$volunteer == 2] <- "Volunteer 2" Set$volunteer[Set$volunteer == 3] <- "Volunteer 3" Set$volunteer[Set$volunteer == 4] <- "Volunteer 4" Set$volunteer[Set$volunteer == 5] <- "Volunteer 5" Set$volunteer[Set$volunteer == 6] <- "Volunteer 6" Set$volunteer[Set$volunteer == 7] <- "Volunteer 7" Set$volunteer[Set$volunteer == 8] <- "Volunteer 8" Set$volunteer[Set$volunteer == 9] <- "Volunteer 9" Set$volunteer[Set$volunteer == 10] <- "Volunteer 10" Set$volunteer[Set$volunteer == 11] <- "Volunteer 11" Set$volunteer[Set$volunteer == 12] <- "Volunteer 12" Set$volunteer[Set$volunteer == 13] <- "Volunteer 13" Set$volunteer[Set$volunteer == 14] <- "Volunteer 14" Set$volunteer[Set$volunteer == 15] <- "Volunteer 15" Set$volunteer[Set$volunteer == 16] <- "Volunteer 16" Set$volunteer[Set$volunteer == 17] <- "Volunteer 17" Set$volunteer[Set$volunteer == 18] <- "Volunteer 18" Set$volunteer[Set$volunteer == 19] <- "Volunteer 19" Set$volunteer[Set$volunteer == 20] <- "Volunteer 20" Set$volunteer[Set$volunteer == 21] <- "Volunteer 21" Set$volunteer[Set$volunteer == 22] <- "Volunteer 22" Set$volunteer[Set$volunteer == 23] <- "Volunteer 23" Set$volunteer[Set$volunteer == 24] <- "Volunteer 24" Set$volunteer[Set$volunteer == 25] <- "Volunteer 25" Set$volunteer[Set$volunteer == 26] <- "Volunteer 26" Set$volunteer[Set$volunteer == 27] <- "Volunteer 27" Set$volunteer[Set$volunteer == 28] <- "Volunteer 28" Set$volunteer[Set$volunteer == 29] <- "Volunteer 29" Set$volunteer[Set$volunteer == 30] <- "Volunteer 30" #fixing column names names(Set) <- gsub("Acc", "Accelerator", names(Set)) names(Set) <- gsub("Mag", "Magnitude", names(Set)) names(Set) <- gsub("Gyro", "Gyroscope", names(Set)) names(Set) <- gsub("^t", "Time", names(Set)) names(Set) <- gsub("^f", "Frequency", names(Set)) #writing down the final tidy data set, taking the mean of every column broken down by volunteer and activity Set.dt<-data.table(Set) TidyData <- Set.dt[, lapply(.SD, mean), by = 'volunteer,activity'] write.table(TidyData, file = "TidyData.txt", row.names = FALSE)
library(tidyverse) library(here) #set working directory here(setwd("~/PSY6422_Final_Project")) #load data file df <- read.csv(here(setwd("~/PSY6422_Final_Project"),"data","disability_degree.csv")) head(df, 3) #tidying up the data #selecting target data for the visualization through - df <- df %>% select(Year,Disabled.with.Degree.or.equivalent,Non.disabled.with.Degree.or.equivalent) #Renaming/simplifying column names colnames(df)<- c("year","d.degree","nd.degree") #displaying the data head(df) #creating a new dataframe by grouping the data to create a column based on disability status disability_grouped <- data.frame(x = with(df, c(year)), y = with(df, c(d.degree, nd.degree)), group = rep(c("Disabled", "Non-Disabled"), each=7)) # displaying the new dataframe head(disability_grouped) #Creating a scatter plot to show the difference between groups over time ggplot(data = disability_grouped, mapping = aes(x = x, y = y, col = group)) + geom_point(shape=18, alpha = 0.8, size = 4) + scale_y_continuous(limits=c(10, 40), breaks= seq(10, 40, 5)) + scale_color_manual(values=c("#FAC218", "#905FD0")) + scale_fill_discrete(name = "Disability Status") + theme(axis.text.x = element_text(angle = 42.5, vjust = 1, hjust = 1), plot.title = element_text(hjust = 0.5)) + labs(color = "Disability status", x = "Year", y = "Percentage of sample with degree (%)", title = "Degree attainment by disability status", caption = "Source: Office of national statistics") ggsave("figs/disability_degree2021-05-24.png")
/disability_script_PSY6422.R
no_license
ejbennett/PSY6422-Visualization-Project
R
false
false
1,755
r
library(tidyverse) library(here) #set working directory here(setwd("~/PSY6422_Final_Project")) #load data file df <- read.csv(here(setwd("~/PSY6422_Final_Project"),"data","disability_degree.csv")) head(df, 3) #tidying up the data #selecting target data for the visualization through - df <- df %>% select(Year,Disabled.with.Degree.or.equivalent,Non.disabled.with.Degree.or.equivalent) #Renaming/simplifying column names colnames(df)<- c("year","d.degree","nd.degree") #displaying the data head(df) #creating a new dataframe by grouping the data to create a column based on disability status disability_grouped <- data.frame(x = with(df, c(year)), y = with(df, c(d.degree, nd.degree)), group = rep(c("Disabled", "Non-Disabled"), each=7)) # displaying the new dataframe head(disability_grouped) #Creating a scatter plot to show the difference between groups over time ggplot(data = disability_grouped, mapping = aes(x = x, y = y, col = group)) + geom_point(shape=18, alpha = 0.8, size = 4) + scale_y_continuous(limits=c(10, 40), breaks= seq(10, 40, 5)) + scale_color_manual(values=c("#FAC218", "#905FD0")) + scale_fill_discrete(name = "Disability Status") + theme(axis.text.x = element_text(angle = 42.5, vjust = 1, hjust = 1), plot.title = element_text(hjust = 0.5)) + labs(color = "Disability status", x = "Year", y = "Percentage of sample with degree (%)", title = "Degree attainment by disability status", caption = "Source: Office of national statistics") ggsave("figs/disability_degree2021-05-24.png")
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/extract_results.R \name{get_data_names} \alias{get_data_names} \title{Get data name out of model name} \usage{ get_data_names(model_name, model_type = "ts_notselected") } \arguments{ \item{model_name}{model name, string to process} \item{model_type}{"ts_notselected", "ts_selected", "lda_selected"} } \value{ data name } \description{ Get data name out of model name }
/man/get_data_names.Rd
permissive
weecology/MATSS-LDATS
R
false
true
448
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/extract_results.R \name{get_data_names} \alias{get_data_names} \title{Get data name out of model name} \usage{ get_data_names(model_name, model_type = "ts_notselected") } \arguments{ \item{model_name}{model name, string to process} \item{model_type}{"ts_notselected", "ts_selected", "lda_selected"} } \value{ data name } \description{ Get data name out of model name }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/puzzle.R \name{puzzle} \alias{puzzle} \title{Demonstrate the chessboard with labeling points.} \usage{ puzzle() } \value{ The chessboard } \description{ Demonstrate the chessboard with labeling points. } \examples{ puzzle() }
/man/puzzle.Rd
permissive
Hochia/orientchessr
R
false
true
304
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/puzzle.R \name{puzzle} \alias{puzzle} \title{Demonstrate the chessboard with labeling points.} \usage{ puzzle() } \value{ The chessboard } \description{ Demonstrate the chessboard with labeling points. } \examples{ puzzle() }
x<-100:10 print(x) #print first element of x print(x[1]) #Print first and fourth element print(x[c(1,4)])
/vectordata4.R
no_license
ashishjsharda/R
R
false
false
108
r
x<-100:10 print(x) #print first element of x print(x[1]) #Print first and fourth element print(x[c(1,4)])
##Loading the Data household <- read.table("household_power_consumption.txt", header=TRUE, sep = ";", na.strings = "?", colClasses = c('character','character','numeric','numeric','numeric','numeric','numeric','numeric','numeric')) df <- household ##Transforming the date df$Date <- as.Date(df$Date, "%d/%m/%Y") ##Filtering the specificis dates df <- subset(df,Date >= "2007-2-1" & Date <= "2007-2-2") ##Creating the datetime column datetime <- paste(df$Date, df$Time) df <- cbind(datetime,df) df$datetime <- as.POSIXct(datetime) ##Second plot plot(df$Global_active_power ~ df$datetime, type="l", ylab = "Global Active Power (Kilowatts)", xlab = "") ## Save file and close device dev.copy(png,"plot2.png", width=480, height=480) dev.off()
/plot2.R
no_license
felipeferraz14/ExData_Plotting1
R
false
false
787
r
##Loading the Data household <- read.table("household_power_consumption.txt", header=TRUE, sep = ";", na.strings = "?", colClasses = c('character','character','numeric','numeric','numeric','numeric','numeric','numeric','numeric')) df <- household ##Transforming the date df$Date <- as.Date(df$Date, "%d/%m/%Y") ##Filtering the specificis dates df <- subset(df,Date >= "2007-2-1" & Date <= "2007-2-2") ##Creating the datetime column datetime <- paste(df$Date, df$Time) df <- cbind(datetime,df) df$datetime <- as.POSIXct(datetime) ##Second plot plot(df$Global_active_power ~ df$datetime, type="l", ylab = "Global Active Power (Kilowatts)", xlab = "") ## Save file and close device dev.copy(png,"plot2.png", width=480, height=480) dev.off()
#' Magic Number: Points to Millimeters #' @noRd PT_TO_MM <- 0.352778 charopts <- function(x) { paste(sprintf("\\code{\"%s\"}", x), collapse = ", ") } # copied from ggplot2 "%||%" <- function(a, b) { if (!is.null(a)) a else b } # copied from ggplot2 ggname <- function(prefix, grob) { grob$name <- grid::grobName(grob, prefix) grob } rd_optlist <- function(x) { paste0("\\code{\"", as.character(x), "\"}", collapse = ", ") } check_pal_n <- function(n, max_n) { if (n > max_n) { warning("This manual palette can handle a maximum of ", max_n, " values.", "You have supplied ", n, ".") } }
/R/utils.R
no_license
mnel/ggthemes
R
false
false
621
r
#' Magic Number: Points to Millimeters #' @noRd PT_TO_MM <- 0.352778 charopts <- function(x) { paste(sprintf("\\code{\"%s\"}", x), collapse = ", ") } # copied from ggplot2 "%||%" <- function(a, b) { if (!is.null(a)) a else b } # copied from ggplot2 ggname <- function(prefix, grob) { grob$name <- grid::grobName(grob, prefix) grob } rd_optlist <- function(x) { paste0("\\code{\"", as.character(x), "\"}", collapse = ", ") } check_pal_n <- function(n, max_n) { if (n > max_n) { warning("This manual palette can handle a maximum of ", max_n, " values.", "You have supplied ", n, ".") } }
################### library(igraph) test1 <- delete.edges (mails.important.g, which (E(mails.important.g)$date > as.Date("2000-01-01"))) test1 <- delete.vertices(test1, which(degree(test1) < 1)) plot(test1, edge.label=NA, vertex.label=NA, vertex.size=6) test1 <- delete.edges (mails.important.g, which (E(mails.important.g)$date > as.Date("2001-01-01"))) test1 <- delete.vertices(test1, which(degree(test1) < 1)) plot(test1, edge.label=NA, vertex.label=NA, vertex.size=6) test1 <- delete.edges (mails.important.g, which (E(mails.important.g)$date > as.Date("2002-01-01"))) test1 <- delete.vertices(test1, which(degree(test1) < 1)) plot(test1, edge.label=NA, vertex.label=NA, vertex.size=6) test1 <- delete.edges (mails.important.g, which (E(mails.important.g)$date > as.Date("2003-01-01"))) test1 <- delete.vertices(test1, which(degree(test1) < 1)) plot(test1, edge.label=NA, vertex.label=NA, vertex.size=6) library(ndtv) library(network) library(intergraph) detach("package:arcdiagram") detach("package:igraph") mails.important <- mails.important net3 <- network(mails.important, matrix.type ="edgelist", weighted = TRUE , directed=FALSE) set.vertex.attribute(net3, "attr1", attr1[,1]) get.vertex.attribute(net3, "attr1") set.vertex.attribute(net3, "attr2", attr2[,1]) get.vertex.attribute(net3, "attr2") set.vertex.attribute(net3, "attr3", attr3[,1]) get.vertex.attribute(net3, "attr3") set.vertex.attribute(net3, "attr4", attr4[,1]) get.vertex.attribute(net3, "attr4") set.vertex.attribute(net3, "domain", domain) get.vertex.attribute(net3, "domain") all_p <- as.numeric(as.Date(mails.important[,5])) all <- match(all_p, unique(all_p)) vs <- data.frame(onset=1, terminus=max(all), vertex.id=1:14) es <- data.frame(onset=all, terminus=max(all), head=as.matrix(net3, matrix.type="edgelist")[,1], tail=as.matrix(net3, matrix.type="edgelist")[,2]) net3.dyn <- networkDynamic(base.net=net3, edge.spells=es, vertex.spells=vs) plot(network.extract(net3.dyn, at=15), vertex.col="domain") slice.par <- list(start = 1, end = max(all), interval = 10, aggregate.dur = 10, rule = "any") render.par <- list(tween.frames = 10, show.time = TRUE) plot.par <- list(mar = c(0, 0, 0, 0)) ## # compute.animation(net3.dyn, animation.mode = "kamadakawai", slice.par=slice.par) # filmstrip(net3.dyn, displaylabels=F, mfrow=c(2, 3), slice.par=slice.par) ## library(RColorBrewer) colors = brewer.pal(length(levels(as.factor(domain))), "Set1") animation <- render.d3movie(net3.dyn, usearrows = T, displaylabels = F, label = net3 %v% "domain", bg = "#ffffff", output.mode = "htmlWidget", render.par = render.par, vertex.col = colors[match(domain, unique(domain))], vertex.cex = 0.5, #function(slice){ degree(slice)/2.5}, vertex.tooltip = paste("<b>Name:</b>", (net3.dyn %v% "vertex.names") , "<br>", "<b>Type:</b>", (net3.dyn %v% "type.label")), edge.cex = 17, edge.lwd = (net3.dyn %e% "weight")/3, edge.col = 'lightgrey', edge.tooltip = paste("<b>Edge type:</b>", (net3.dyn %e% "type"), "<br>", "<b>Edge weight:</b>", (net3.dyn %e% "weight" ))) # vertex.tooltip = function(slice) {paste('name:',slice%v%'vertex.names','<br>','status:', slice%v%'testatus')}) animation library("scatterplot3d") timePrism(net3.dyn, at = c(1:500), displaylabels = FALSE, planes = TRUE) library(tsna) plot(tErgmStats(net3.dyn,'edges', start = 0, end = max(all), time.interval = 10)) plot(hist(edgeDuration(net3.dyn))) plot(tEdgeFormation(net3.dyn)) plot(tSnaStats(net3.dyn,'gtrans')) path <- tPath(net3.dyn,v = 13, graph.step.time=1) plot(path, edge.lwd = 2) plotPaths(net3.dyn, path, label.cex=NA)
/Empirical/r/doc-R/Social/ergm/divers/dynamic/dynamic.r
no_license
ygtfrdes/Program
R
false
false
4,157
r
################### library(igraph) test1 <- delete.edges (mails.important.g, which (E(mails.important.g)$date > as.Date("2000-01-01"))) test1 <- delete.vertices(test1, which(degree(test1) < 1)) plot(test1, edge.label=NA, vertex.label=NA, vertex.size=6) test1 <- delete.edges (mails.important.g, which (E(mails.important.g)$date > as.Date("2001-01-01"))) test1 <- delete.vertices(test1, which(degree(test1) < 1)) plot(test1, edge.label=NA, vertex.label=NA, vertex.size=6) test1 <- delete.edges (mails.important.g, which (E(mails.important.g)$date > as.Date("2002-01-01"))) test1 <- delete.vertices(test1, which(degree(test1) < 1)) plot(test1, edge.label=NA, vertex.label=NA, vertex.size=6) test1 <- delete.edges (mails.important.g, which (E(mails.important.g)$date > as.Date("2003-01-01"))) test1 <- delete.vertices(test1, which(degree(test1) < 1)) plot(test1, edge.label=NA, vertex.label=NA, vertex.size=6) library(ndtv) library(network) library(intergraph) detach("package:arcdiagram") detach("package:igraph") mails.important <- mails.important net3 <- network(mails.important, matrix.type ="edgelist", weighted = TRUE , directed=FALSE) set.vertex.attribute(net3, "attr1", attr1[,1]) get.vertex.attribute(net3, "attr1") set.vertex.attribute(net3, "attr2", attr2[,1]) get.vertex.attribute(net3, "attr2") set.vertex.attribute(net3, "attr3", attr3[,1]) get.vertex.attribute(net3, "attr3") set.vertex.attribute(net3, "attr4", attr4[,1]) get.vertex.attribute(net3, "attr4") set.vertex.attribute(net3, "domain", domain) get.vertex.attribute(net3, "domain") all_p <- as.numeric(as.Date(mails.important[,5])) all <- match(all_p, unique(all_p)) vs <- data.frame(onset=1, terminus=max(all), vertex.id=1:14) es <- data.frame(onset=all, terminus=max(all), head=as.matrix(net3, matrix.type="edgelist")[,1], tail=as.matrix(net3, matrix.type="edgelist")[,2]) net3.dyn <- networkDynamic(base.net=net3, edge.spells=es, vertex.spells=vs) plot(network.extract(net3.dyn, at=15), vertex.col="domain") slice.par <- list(start = 1, end = max(all), interval = 10, aggregate.dur = 10, rule = "any") render.par <- list(tween.frames = 10, show.time = TRUE) plot.par <- list(mar = c(0, 0, 0, 0)) ## # compute.animation(net3.dyn, animation.mode = "kamadakawai", slice.par=slice.par) # filmstrip(net3.dyn, displaylabels=F, mfrow=c(2, 3), slice.par=slice.par) ## library(RColorBrewer) colors = brewer.pal(length(levels(as.factor(domain))), "Set1") animation <- render.d3movie(net3.dyn, usearrows = T, displaylabels = F, label = net3 %v% "domain", bg = "#ffffff", output.mode = "htmlWidget", render.par = render.par, vertex.col = colors[match(domain, unique(domain))], vertex.cex = 0.5, #function(slice){ degree(slice)/2.5}, vertex.tooltip = paste("<b>Name:</b>", (net3.dyn %v% "vertex.names") , "<br>", "<b>Type:</b>", (net3.dyn %v% "type.label")), edge.cex = 17, edge.lwd = (net3.dyn %e% "weight")/3, edge.col = 'lightgrey', edge.tooltip = paste("<b>Edge type:</b>", (net3.dyn %e% "type"), "<br>", "<b>Edge weight:</b>", (net3.dyn %e% "weight" ))) # vertex.tooltip = function(slice) {paste('name:',slice%v%'vertex.names','<br>','status:', slice%v%'testatus')}) animation library("scatterplot3d") timePrism(net3.dyn, at = c(1:500), displaylabels = FALSE, planes = TRUE) library(tsna) plot(tErgmStats(net3.dyn,'edges', start = 0, end = max(all), time.interval = 10)) plot(hist(edgeDuration(net3.dyn))) plot(tEdgeFormation(net3.dyn)) plot(tSnaStats(net3.dyn,'gtrans')) path <- tPath(net3.dyn,v = 13, graph.step.time=1) plot(path, edge.lwd = 2) plotPaths(net3.dyn, path, label.cex=NA)
/Tema 2/SMV/SVM(1).R
no_license
Ocnarf2070/AprendizajeComputacional
R
false
false
11,535
r
##load libraries needed library(ggplot2) library(gridBase) library(gridExtra) setwd('/archive/beier_d/dave_data/dave_muga_0420') ##testing on one hom_data <- read.csv('smo_tens.mm10_2000kb_1000kb.bed', sep = '\t') goodChrOrder <- paste("chr",c(1:19,"X","Y"),sep="") hom_data$chr <- factor(hom_data$chr,levels=goodChrOrder) hom_data ggplot(data=hom_data,aes(x=start, y=ten_count),binwidth=1e5 ) + geom_bar(stat="identity", colour="black") + facet_grid( ~ chr, scales="free", space="free_x") + theme_bw() + theme(axis.text.x = element_blank()) ##loop through files filename <- dir('/archive/beier_d/dave_data/dave_muga_0420', pattern ="kb.bed") for(i in 1:length(filename)){ ##read in hom data hom_data <- read.csv(filename[i], sep ='\t') ##reorder chromosome, if using 'chr' goodChrOrder <- paste("chr",c(1:19,"X","Y"),sep="") hom_data$chr <- factor(hom_data$chr,levels=goodChrOrder) ##bar graph ggplot(data=hom_data,aes(x=start, y=ten_count),binwidth=1e5 ) + geom_bar(stat="identity", colour="black") + facet_grid( ~ chr, scales="free", space="free_x") + theme_bw() + theme(axis.text.x = element_blank()) gene_pdf <- gsub(".bed",".pdf",filename[i]) ggsave(gene_pdf, width = 20) }
/r_scripts/dave_muga_graphing_0420.R
no_license
atimms/ratchet_scripts
R
false
false
1,210
r
##load libraries needed library(ggplot2) library(gridBase) library(gridExtra) setwd('/archive/beier_d/dave_data/dave_muga_0420') ##testing on one hom_data <- read.csv('smo_tens.mm10_2000kb_1000kb.bed', sep = '\t') goodChrOrder <- paste("chr",c(1:19,"X","Y"),sep="") hom_data$chr <- factor(hom_data$chr,levels=goodChrOrder) hom_data ggplot(data=hom_data,aes(x=start, y=ten_count),binwidth=1e5 ) + geom_bar(stat="identity", colour="black") + facet_grid( ~ chr, scales="free", space="free_x") + theme_bw() + theme(axis.text.x = element_blank()) ##loop through files filename <- dir('/archive/beier_d/dave_data/dave_muga_0420', pattern ="kb.bed") for(i in 1:length(filename)){ ##read in hom data hom_data <- read.csv(filename[i], sep ='\t') ##reorder chromosome, if using 'chr' goodChrOrder <- paste("chr",c(1:19,"X","Y"),sep="") hom_data$chr <- factor(hom_data$chr,levels=goodChrOrder) ##bar graph ggplot(data=hom_data,aes(x=start, y=ten_count),binwidth=1e5 ) + geom_bar(stat="identity", colour="black") + facet_grid( ~ chr, scales="free", space="free_x") + theme_bw() + theme(axis.text.x = element_blank()) gene_pdf <- gsub(".bed",".pdf",filename[i]) ggsave(gene_pdf, width = 20) }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/read.fb.R \docType{data} \name{read.fb} \alias{read.fb} \alias{char_cols} \alias{read_header} \alias{read.fb} \title{FB file reader} \format{An object of class \code{list} of length 52.} \usage{ char_cols read_header(file_path) read.fb(file_path) } \arguments{ \item{file_path}{Path to fb .csv file.} } \value{ A data.table object that is also class 'fb.' } \description{ Read in funding bucket files via data.table::fread and convert them to the "fb" class. The header is stored as attributes. Applies attrubutes based off of the file header information. } \note{ Could format the columns coming in as factors if that were desirable. } \examples{ \dontrun{ require(data.table) folder_path <- 'C:/Users/dalrymplej/Dropbox/EToPac/data-sets' file_name <- 'fb contract 10_01_14_to_07_31_15 run 10_19_15.csv' file_name <- 'fb direct 10_01_14_to_07_31_15 run 10_19_15.csv' file_path <- file.path(folder_path, file_name) my_fb_dt <- read.fb(file_path) lapply(my_fb_dt, class) attributes(my_fb_dt) } } \keyword{datasets}
/man/read.fb.Rd
permissive
JamesDalrymple/cmhmisc
R
false
true
1,095
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/read.fb.R \docType{data} \name{read.fb} \alias{read.fb} \alias{char_cols} \alias{read_header} \alias{read.fb} \title{FB file reader} \format{An object of class \code{list} of length 52.} \usage{ char_cols read_header(file_path) read.fb(file_path) } \arguments{ \item{file_path}{Path to fb .csv file.} } \value{ A data.table object that is also class 'fb.' } \description{ Read in funding bucket files via data.table::fread and convert them to the "fb" class. The header is stored as attributes. Applies attrubutes based off of the file header information. } \note{ Could format the columns coming in as factors if that were desirable. } \examples{ \dontrun{ require(data.table) folder_path <- 'C:/Users/dalrymplej/Dropbox/EToPac/data-sets' file_name <- 'fb contract 10_01_14_to_07_31_15 run 10_19_15.csv' file_name <- 'fb direct 10_01_14_to_07_31_15 run 10_19_15.csv' file_path <- file.path(folder_path, file_name) my_fb_dt <- read.fb(file_path) lapply(my_fb_dt, class) attributes(my_fb_dt) } } \keyword{datasets}
setwd("/home/alejandro/Documents/MsC Bioinformatics/Advanced Bioinformatics/") # Read all the output files from stringtie bis1 = read.delim(file = "bis1_t_data.ctab", header = TRUE) bis2 = read.delim(file = "bis2_t_data.ctab", header = TRUE) bis3 = read.delim(file = "bis2_t_data.ctab", header = TRUE) control1 = read.delim(file = "control1_t_data.ctab", header = TRUE) control2 = read.delim(file = "control2_t_data.ctab", header = TRUE) control3 = read.delim(file = "control3_t_data.ctab", header = TRUE) # Store as genenames all the gene_name column that contains an actual gene name genenames = bis1$gene_name[bis1$gene_name != "."] # Store as expression values all the bis1FPKM = bis1$FPKM[bis1$gene_name != "."] bis2FPKM = bis2$FPKM[bis2$gene_name != "."] bis3FPKM = bis2$FPKM[bis3$gene_name != "."] control1FPKM = control1$FPKM[control1$gene_name != "."] control2FPKM = control2$FPKM[control2$gene_name != "."] control3FPKM = control3$FPKM[control3$gene_name != "."] # Create a dataframe with the expression lvl in the cells with samples as columns and genes as rows. exp_data = data.frame(bis1FPKM, bis2FPKM, bis3FPKM, control1FPKM, control2FPKM, control3FPKM, row.names = genenames) # Calculate standard deviation between samples for each gene. And filter out those which have a sd < 1. sds = apply(exp_data, 1 , sd) idxs = which(sds < 1) exp_data = exp_data[-idxs, ]
/StringtieToCluster.R
no_license
AlFontal/pyomics
R
false
false
1,386
r
setwd("/home/alejandro/Documents/MsC Bioinformatics/Advanced Bioinformatics/") # Read all the output files from stringtie bis1 = read.delim(file = "bis1_t_data.ctab", header = TRUE) bis2 = read.delim(file = "bis2_t_data.ctab", header = TRUE) bis3 = read.delim(file = "bis2_t_data.ctab", header = TRUE) control1 = read.delim(file = "control1_t_data.ctab", header = TRUE) control2 = read.delim(file = "control2_t_data.ctab", header = TRUE) control3 = read.delim(file = "control3_t_data.ctab", header = TRUE) # Store as genenames all the gene_name column that contains an actual gene name genenames = bis1$gene_name[bis1$gene_name != "."] # Store as expression values all the bis1FPKM = bis1$FPKM[bis1$gene_name != "."] bis2FPKM = bis2$FPKM[bis2$gene_name != "."] bis3FPKM = bis2$FPKM[bis3$gene_name != "."] control1FPKM = control1$FPKM[control1$gene_name != "."] control2FPKM = control2$FPKM[control2$gene_name != "."] control3FPKM = control3$FPKM[control3$gene_name != "."] # Create a dataframe with the expression lvl in the cells with samples as columns and genes as rows. exp_data = data.frame(bis1FPKM, bis2FPKM, bis3FPKM, control1FPKM, control2FPKM, control3FPKM, row.names = genenames) # Calculate standard deviation between samples for each gene. And filter out those which have a sd < 1. sds = apply(exp_data, 1 , sd) idxs = which(sds < 1) exp_data = exp_data[-idxs, ]
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/support.R \name{reorder_rt_ifr} \alias{reorder_rt_ifr} \title{Reorder Rt or IFR trajectories} \usage{ reorder_rt_ifr(x, rank) } \arguments{ \item{x}{An \code{Rt_trajectories} or \code{IFR_t_trajectories} object, as returned by \code{\link[=carehomes_Rt_trajectories]{carehomes_Rt_trajectories()}} or \link{carehomes_ifr_t_trajectories} respectively.} \item{rank}{A vector of ranks to reorder by} } \value{ An \code{Rt_trajectories} or \code{IFR_t_trajectories} object with appropriately reordered elements } \description{ Reorder Rt or IFR trajectories }
/man/reorder_rt_ifr.Rd
permissive
gsarfo-boateng/sircovid
R
false
true
634
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/support.R \name{reorder_rt_ifr} \alias{reorder_rt_ifr} \title{Reorder Rt or IFR trajectories} \usage{ reorder_rt_ifr(x, rank) } \arguments{ \item{x}{An \code{Rt_trajectories} or \code{IFR_t_trajectories} object, as returned by \code{\link[=carehomes_Rt_trajectories]{carehomes_Rt_trajectories()}} or \link{carehomes_ifr_t_trajectories} respectively.} \item{rank}{A vector of ranks to reorder by} } \value{ An \code{Rt_trajectories} or \code{IFR_t_trajectories} object with appropriately reordered elements } \description{ Reorder Rt or IFR trajectories }
# 8/16/2017 Preprocess gse77164 # Results: # Youngest person is 15, so doesn't cover puberty # well enough to answer my question # Excellent coverage of 18-22yo #....................# ##### Load Stuff ##### #....................# setwd("/labs/khatrilab/ebongen/sexDifferences2.0/") source("00_tools/general_GEMfunx.R") source("~/Tools/Graphing Scripts/quickPlots.R") source("00_tools/plot_funx.R") setwd("0_datasets/2_age/prePuberty/gse77164_nepal/") #..................# ##### Download ##### #..................# gse77164 = getGEOData("GSE77164") gse77164 = gse77164$originalData$GSE77164 gse77164$rawPheno = gse77164$pheno gse77164$pheno = cleanUpPheno(gse77164$rawPheno, T) View(gse77164$pheno) hist(gse77164$pheno$age) # Most of the samples are between 18-22 # Check expr boxplot(gse77164$expr[,1:5]) min(gse77164$expr) # positive value # Check keys # They're fine gse77164$keys[1:10] #..........................# ##### Quick Timecourse ##### #..........................# load("/labs/khatrilab/ebongen/sexDifferences2.0/1_metaAnaly/sexMetaObj.RData") pdf("gse77164_timecourse_blueFemales.pdf") timecourse_iSEXS(gse77164, sexMetaObj$filterResults$FDR0.05_es0.4_nStudies2_looaFALSE_hetero0, "age", "female", main = "GSE77164 - full iSEXS") timecourse_iSEXS(gse77164, sexMetaObj$filterResults$autosomeOnly, "age", "female", main = "GSE77164 - Autosomal iSEXS") dev.off()
/downloadData/gse77164_preProcess.R
permissive
ebongen/iSEXS_cellReports2019
R
false
false
1,386
r
# 8/16/2017 Preprocess gse77164 # Results: # Youngest person is 15, so doesn't cover puberty # well enough to answer my question # Excellent coverage of 18-22yo #....................# ##### Load Stuff ##### #....................# setwd("/labs/khatrilab/ebongen/sexDifferences2.0/") source("00_tools/general_GEMfunx.R") source("~/Tools/Graphing Scripts/quickPlots.R") source("00_tools/plot_funx.R") setwd("0_datasets/2_age/prePuberty/gse77164_nepal/") #..................# ##### Download ##### #..................# gse77164 = getGEOData("GSE77164") gse77164 = gse77164$originalData$GSE77164 gse77164$rawPheno = gse77164$pheno gse77164$pheno = cleanUpPheno(gse77164$rawPheno, T) View(gse77164$pheno) hist(gse77164$pheno$age) # Most of the samples are between 18-22 # Check expr boxplot(gse77164$expr[,1:5]) min(gse77164$expr) # positive value # Check keys # They're fine gse77164$keys[1:10] #..........................# ##### Quick Timecourse ##### #..........................# load("/labs/khatrilab/ebongen/sexDifferences2.0/1_metaAnaly/sexMetaObj.RData") pdf("gse77164_timecourse_blueFemales.pdf") timecourse_iSEXS(gse77164, sexMetaObj$filterResults$FDR0.05_es0.4_nStudies2_looaFALSE_hetero0, "age", "female", main = "GSE77164 - full iSEXS") timecourse_iSEXS(gse77164, sexMetaObj$filterResults$autosomeOnly, "age", "female", main = "GSE77164 - Autosomal iSEXS") dev.off()
#' Retreive Screenings Available at a Cineplex City #' #' @param city_xml an xml nodeset; output of `get_city()` #' #' @return data frame with screening details for a given city that can be used as input for `filter_screenings()`. #' @export #' #' @examples #' my_screenings_details <- get_screenings(city_xml = my_city) get_screenings <- function(city_xml) { if (is.null(city_xml) | base::missing(city_xml)) { stop("`city_xml` can't be empty or missing. See examples in `?get_city`.", call. = FALSE) } n_movies <- length(city_xml %>% rvest::html_nodes(".movie-schedule--details")) screenings_details <- dplyr::tibble() # get screening attributes (by screening, not my movie) for (i in 1:n_movies){ german_title <- c( city_xml[[i]] %>% rvest::html_node(".filmInfoLink") %>% # take first only (possible second node has no text) rvest::html_text() ) dates <- c( city_xml[[i]] %>% rvest::html_nodes(".schedule__date") %>% rvest::html_attr("datetime") ) times <- c( city_xml[[i]] %>% rvest::html_nodes(".movie-schedule--performances--all ") %>% # other one not unique rvest::html_nodes(".schedule__time") %>% rvest::html_text() ) release_types <- c( city_xml[[i]] %>% ##TODO: split 2D/3D from OmU/OV? rvest::html_nodes(".movie-schedule--performances--all") %>% rvest::html_nodes(".performance-holder") %>% rvest::html_attr("data-release-type") ##TODO: collect lots then unique to see factors ) sites <- c( city_xml[[i]] %>% rvest::html_nodes(".movie-schedule--performances--all") %>% rvest::html_nodes(".performance-holder") %>% rvest::html_attr("data-site") ) # ##TODO: presence vs. value; might need to add "attempt()" or something like this? # accessibility_icons <- c( # city_xml[[i]] %>% # rvest::html_nodes(".performance-date-block") %>% # rvest::html_nodes(".schedule__location") %>% # rvest::html_nodes(".icon-wheelchair_alt") %>% # rvest::html_attr("class") # ) screenings_details <- dplyr::bind_rows( screenings_details, dplyr::tibble(german_title = german_title, dates = dates, times = times, release_types = release_types, sites = sites) # , accessibility_icons = accessibility_icons ) } return(screenings_details) }
/R/get_screenings.R
permissive
sowla/kino
R
false
false
2,421
r
#' Retreive Screenings Available at a Cineplex City #' #' @param city_xml an xml nodeset; output of `get_city()` #' #' @return data frame with screening details for a given city that can be used as input for `filter_screenings()`. #' @export #' #' @examples #' my_screenings_details <- get_screenings(city_xml = my_city) get_screenings <- function(city_xml) { if (is.null(city_xml) | base::missing(city_xml)) { stop("`city_xml` can't be empty or missing. See examples in `?get_city`.", call. = FALSE) } n_movies <- length(city_xml %>% rvest::html_nodes(".movie-schedule--details")) screenings_details <- dplyr::tibble() # get screening attributes (by screening, not my movie) for (i in 1:n_movies){ german_title <- c( city_xml[[i]] %>% rvest::html_node(".filmInfoLink") %>% # take first only (possible second node has no text) rvest::html_text() ) dates <- c( city_xml[[i]] %>% rvest::html_nodes(".schedule__date") %>% rvest::html_attr("datetime") ) times <- c( city_xml[[i]] %>% rvest::html_nodes(".movie-schedule--performances--all ") %>% # other one not unique rvest::html_nodes(".schedule__time") %>% rvest::html_text() ) release_types <- c( city_xml[[i]] %>% ##TODO: split 2D/3D from OmU/OV? rvest::html_nodes(".movie-schedule--performances--all") %>% rvest::html_nodes(".performance-holder") %>% rvest::html_attr("data-release-type") ##TODO: collect lots then unique to see factors ) sites <- c( city_xml[[i]] %>% rvest::html_nodes(".movie-schedule--performances--all") %>% rvest::html_nodes(".performance-holder") %>% rvest::html_attr("data-site") ) # ##TODO: presence vs. value; might need to add "attempt()" or something like this? # accessibility_icons <- c( # city_xml[[i]] %>% # rvest::html_nodes(".performance-date-block") %>% # rvest::html_nodes(".schedule__location") %>% # rvest::html_nodes(".icon-wheelchair_alt") %>% # rvest::html_attr("class") # ) screenings_details <- dplyr::bind_rows( screenings_details, dplyr::tibble(german_title = german_title, dates = dates, times = times, release_types = release_types, sites = sites) # , accessibility_icons = accessibility_icons ) } return(screenings_details) }
#!/usr/bin/RScript h <- function(x, theta) { x %*% t(theta) } err <- function(x, y, theta) { m <- nrow(x) y.model <- h(x, theta) error <- sum((y - y.model)^2)*1/m return(error) } grad <- function(x,y,theta) { m <- nrow(x) gradient <- 1/m * t(x) %*% ( h(x,theta) - y ) t(gradient) } grad.descent <- function(x, y, maxit, alpha, lambda) { n <- ncol(x) m <- nrow(x) theta <- matrix(rep(1,n), nrow = 1 ) reg <- t(rbind(0, as.matrix(rep(lambda/m,n-1))) ) for(i in 1: maxit) { # print(err(x, y, theta)) theta <- theta - alpha * ( grad(x, y, theta) + reg * theta ) } return(theta) } test <- function() { set.seed(1) "gen" <- function(x){ return(30*x^2 + 5*x + 10 + rnorm(length(x))*0.1) } source('featureCook.R') X <- as.matrix(runif(10)) Y <- gen(X) X <- featureNormalize(X) Y <- featureNormalize(Y) X.m <- mapFeature(X, 2) theta <- grad.descent(X.m, Y, 2000, 0.01, 0.1) X.plot <- as.matrix(seq(-1.5,1.5, length.out = 300)) Y.plot <- h(mapFeature(X.plot,2), theta) plot(X,Y) lines(X.plot,Y.plot) } test_real <- function() { source('load_data.R') degree <- 5 x.m <- mapFeature(x.n, degree) y.m <- y.n theta <- grad.descent(x.m, y.m, 5000, 0.05, 0.8) source('plot_data.R') plot_hypothesis(theta, degree, x.n, y.n) points(xtest.n, ytest.n, col="blue") }
/ml/ex5/grad_desc.R
no_license
alexeyche/alexeyche-junk
R
false
false
1,445
r
#!/usr/bin/RScript h <- function(x, theta) { x %*% t(theta) } err <- function(x, y, theta) { m <- nrow(x) y.model <- h(x, theta) error <- sum((y - y.model)^2)*1/m return(error) } grad <- function(x,y,theta) { m <- nrow(x) gradient <- 1/m * t(x) %*% ( h(x,theta) - y ) t(gradient) } grad.descent <- function(x, y, maxit, alpha, lambda) { n <- ncol(x) m <- nrow(x) theta <- matrix(rep(1,n), nrow = 1 ) reg <- t(rbind(0, as.matrix(rep(lambda/m,n-1))) ) for(i in 1: maxit) { # print(err(x, y, theta)) theta <- theta - alpha * ( grad(x, y, theta) + reg * theta ) } return(theta) } test <- function() { set.seed(1) "gen" <- function(x){ return(30*x^2 + 5*x + 10 + rnorm(length(x))*0.1) } source('featureCook.R') X <- as.matrix(runif(10)) Y <- gen(X) X <- featureNormalize(X) Y <- featureNormalize(Y) X.m <- mapFeature(X, 2) theta <- grad.descent(X.m, Y, 2000, 0.01, 0.1) X.plot <- as.matrix(seq(-1.5,1.5, length.out = 300)) Y.plot <- h(mapFeature(X.plot,2), theta) plot(X,Y) lines(X.plot,Y.plot) } test_real <- function() { source('load_data.R') degree <- 5 x.m <- mapFeature(x.n, degree) y.m <- y.n theta <- grad.descent(x.m, y.m, 5000, 0.05, 0.8) source('plot_data.R') plot_hypothesis(theta, degree, x.n, y.n) points(xtest.n, ytest.n, col="blue") }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/gaugings_cabecera.R \docType{data} \name{gaugings_cabecera} \alias{gaugings_cabecera} \title{Gaugings at Cabecera (direct stage discharge measurement)} \format{ A data.frame with 2 columns: \describe{ \item{dates}{dates in POSIXct format} \item{h}{Water level (m)} \item{q}{discharge (m3/s)} } } \source{ \url{http://agua.unorte.edu.uy/} } \usage{ gaugings_cabecera } \description{ Gaugings at Cabecera (direct stage discharge measurement) } \keyword{datasets}
/man/gaugings_cabecera.Rd
no_license
rafaelnavas/SanAntonioData
R
false
true
545
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/gaugings_cabecera.R \docType{data} \name{gaugings_cabecera} \alias{gaugings_cabecera} \title{Gaugings at Cabecera (direct stage discharge measurement)} \format{ A data.frame with 2 columns: \describe{ \item{dates}{dates in POSIXct format} \item{h}{Water level (m)} \item{q}{discharge (m3/s)} } } \source{ \url{http://agua.unorte.edu.uy/} } \usage{ gaugings_cabecera } \description{ Gaugings at Cabecera (direct stage discharge measurement) } \keyword{datasets}
AddCaseButton <- function(label=gettext("Add", domain = "R-RQDA")){ AddCasB <- gbutton(label,handler=function(h,...) { CaseName <- ginput(gettext("Enter new Case Name. ", domain = "R-RQDA"), icon="info") if (!is.na(CaseName)) { Encoding(CaseName) <- "UTF-8" AddCase(CaseName) CaseNamesUpdate() enabled(button$profmatB) <- TRUE idx <- as.character(which(.rqda$.CasesNamesWidget[] %in% CaseName) -1) ## note the position, before manipulation of items path <-gtkTreePathNewFromString(idx) gtkTreeViewScrollToCell(.rqda$.CasesNamesWidget$widget, path,use.align=TRUE,row.align = 0.05) } } ) assign("AddCasB",AddCasB,envir=button) enabled(AddCasB) <- FALSE AddCasB } DeleteCaseButton <- function(label=gettext("Delete", domain = "R-RQDA")){ DelCasB <- gbutton(label, handler=function(h,...) { del <- gconfirm(gettext("Really delete the Case?", domain = "R-RQDA"),icon="question") if (isTRUE(del)){ SelectedCase <- svalue(.rqda$.CasesNamesWidget) Encoding(SelectedCase) <- "UTF-8" caseid <- rqda_sel(sprintf("select id from cases where name='%s'", enc(SelectedCase)))$id rqda_exe(sprintf("update cases set status=0 where name='%s'", enc(SelectedCase))) ## set status in table freecode to 0 rqda_exe(sprintf("update caselinkage set status=0 where caseid=%i",caseid)) ## set status in table caselinkage to 0 CaseNamesUpdate() .rqda$.FileofCase[] <- NULL } } ) assign("DelCasB",DelCasB,envir=button) enabled(DelCasB) <- FALSE DelCasB } Case_RenameButton <- function(label=gettext("Rename", domain = "R-RQDA"),CaseNamesWidget=.rqda$.CasesNamesWidget,...) { ## rename of selected case. CasRenB <- gbutton(label,handler=function(h,...) { selectedCaseName <- svalue(CaseNamesWidget) ## get the new file names NewName <- ginput(gettext("Enter new Case name. ", domain = "R-RQDA"), text=selectedCaseName, icon="info") if (!identical(NewName, character(0))) { if (!is.na(NewName)){ rename(selectedCaseName,NewName,"cases") CaseNamesUpdate() } } } ) assign("CasRenB",CasRenB,envir=button) enabled(CasRenB) <- FALSE CasRenB } CaseMark_Button<-function(label=gettext("Mark", domain = "R-RQDA")){ CasMarB <- gbutton(label,handler=function(h,...) { MarkCaseFun() UpdateFileofCaseWidget() } ) assign("CasMarB",CasMarB,envir=button) enabled(CasMarB) <- FALSE CasMarB } MarkCaseFun <- function(){ if (is_projOpen(envir=.rqda,conName="qdacon")) { con <- .rqda$qdacon tryCatch({ ans <- mark(get(".openfile_gui",envir=.rqda),fore.col=NULL,back.col=.rqda$back.col,addButton=FALSE) if (ans$start != ans$end){ ## when selected no text, makes on sense to do anything. SelectedCase <- svalue(.rqda$.CasesNamesWidget) SelectedCase <- enc(SelectedCase,encoding="UTF-8") currentCid <- rqda_sel(sprintf("select id from cases where name='%s'", SelectedCase))[,1] SelectedFile <- svalue(.rqda$.root_edit) ##Encoding(SelectedFile) <- "UTF-8" SelectedFile <- enc(SelectedFile,encoding="UTF-8") currentFid <- rqda_sel(sprintf("select id from source where name='%s'", SelectedFile))[,1] ## Query of caselinkage ExistLinkage <- rqda_sel(sprintf("select rowid, selfirst, selend,status from caselinkage where caseid=%i and fid=%i and status=1",currentCid,currentFid)) DAT <- data.frame(caseid=currentCid,fid=currentFid, selfirst=ans$start,selend=ans$end,status=1, owner=.rqda$owner,date=date(),memo="") if (nrow(ExistLinkage)==0){ ## if there are no relevant caselinkage, write the caselinkage table success <- rqda_wrt("caselinkage", DAT) if (!success) gmessage(gettext("Fail to write to database.", domain = "R-RQDA")) } else { Relations <- apply(ExistLinkage,1,FUN=function(x) relation(x[c("selfirst","selend")],c(ans$start,ans$end))) ExistLinkage$Relation <- sapply(Relations,FUN=function(x)x$Relation) if (!any(ExistLinkage$Relation=="exact")){ ## if there are exact caselinkage, skip; if no exact linkage then continue ExistLinkage$WhichMin <- sapply(Relations,FUN=function(x)x$WhichMin) ExistLinkage$Start <- sapply(Relations,FUN=function(x)x$UnionIndex[1]) ExistLinkage$End <- sapply(Relations,FUN=function(x)x$UnionIndex[2]) if (all(ExistLinkage$Relation=="proximity")){ success <- rqda_wrt("caselinkage", DAT) if (!success) gmessage(gettext("Fail to write to database.", domain = "R-RQDA")) } else { del1 <- ExistLinkage$WhichMin==2 & ExistLinkage$Relation =="inclusion"; del1[is.na(del1)] <- FALSE del2 <- ExistLinkage$Relation =="overlap"; del2[is.na(del2)] <- FALSE del <- (del1 | del2) if (any(del)){ Sel <- c(min(ExistLinkage$Start[del]), max(ExistLinkage$End[del])) memo <- rqda_sel(sprintf("select memo from caselinkage where rowid in (%s)", paste(ExistLinkage$rowid[del],collapse=",",sep="")))$memo memo <- paste(memo,collapse="",sep="") rqda_exe(sprintf("delete from caselinkage where rowid in (%s)", paste(ExistLinkage$rowid[del],collapse=",",sep=""))) DAT <- data.frame(caseid=currentCid,fid=currentFid, selfirst=Sel[1],selend=Sel[2],status=1, owner=.rqda$owner,date=date(),memo=memo) success <- rqda_wrt("caselinkage", DAT) if (!success) gmessage(gettext("Fail to write to database.", domain = "R-RQDA")) } } } } } },error=function(e){} ) } } CaseUnMark_Button<-function(label=gettext("Unmark", domain = "R-RQDA")){ CasUnMarB <- gbutton(label, handler=function(h,...) { con <- .rqda$qdacon W <- .rqda$.openfile_gui$widget ## get the widget for file display. If it does not exist, then return NULL. sel_index <- tryCatch(sindex(W,includeAnchor=FALSE),error=function(e) {}) ## if the not file is open, unmark doesn't work. if (!is.null(sel_index)) { SelectedCase <- svalue(.rqda$.CasesNamesWidget) if (length(SelectedCase)==0) {gmessage(gettext("Select a case first.", domain = "R-RQDA"),con=TRUE)} else{ SelectedCase <- enc(SelectedCase,"UTF-8") caseid <- rqda_sel(sprintf("select id from cases where name='%s'",SelectedCase))[,1] SelectedFile <- svalue(.rqda$.root_edit) SelectedFile <- enc(SelectedFile,"UTF-8") currentFid <- rqda_sel(sprintf("select id from source where name='%s'", SelectedFile))[,1] codings_index <- rqda_sel(sprintf("select rowid, caseid, fid, selfirst, selend from caselinkage where caseid=%i and fid=%i", caseid, currentFid)) ## should only work with those related to current case and current file. rowid <- codings_index$rowid[(codings_index$selfirst >= sel_index$startN) & (codings_index$selend <= sel_index$endN)] if (is.numeric(rowid)) for (j in rowid) { rqda_exe(sprintf("update caselinkage set status=0 where rowid=%i", j)) } coding.idx <- rqda_sel(sprintf("select selfirst,selend from coding where fid=%i and status=1",currentFid)) anno.idx <- rqda_sel(sprintf("select position from annotation where fid=%i and status=1",currentFid))$position allidx <- unlist(coding.idx,anno.idx) if (!is.null(allidx)){ startN<- sel_index$startN + sum(allidx <= sel_index$startN) endN <- sel_index$endN + sum(allidx <= sel_index$endN) } ## better to get around the loop by sqlite condition expression. ClearMark(W,min=startN,max=endN,clear.fore.col = FALSE, clear.back.col = TRUE) ## even for the non-current code. can improve. } } UpdateFileofCaseWidget() } ) assign("CasUnMarB",CasUnMarB,envir=button) enabled(CasUnMarB) <- FALSE CasUnMarB } CaseAttribute_Button <- function(label=gettext("Attribute", domain = "R-RQDA")){ CasAttrB <- gbutton(text=label, handler = function(h, ...) { SelectedCase <- svalue(.rqda$.CasesNamesWidget) if (length(SelectedCase!=0)){ SelectedCase <- enc(SelectedCase,"UTF-8") caseid <- rqda_sel(sprintf("select id from cases where status=1 and name='%s'",SelectedCase))[,1] CaseAttrFun(caseId=caseid,title=SelectedCase) }}) assign("CasAttrB", CasAttrB, envir=button) enabled(button$CasAttrB) <- FALSE CasAttrB } prof_mat_Button <- function(label="prof_mat"){ profmatB <- gbutton(text=label, handler = function(h, ...) { prof_mat(case_names = gselect.list(.rqda$.CasesNamesWidget[], multiple = TRUE, x= getOption("widgetCoordinate")[1])) }) assign("profmatB", profmatB, envir=button) profmatB } GetCaseNamesWidgetMenu <- function() { CaseNamesWidgetMenu <- list() CaseNamesWidgetMenu[[1]] <- gaction(gettext("Add File(s)", domain = "R-RQDA"), handler =function(h, ...) { if (is_projOpen(envir = .rqda, conName = "qdacon", message = FALSE)) { SelectedCase <- svalue(.rqda$.CasesNamesWidget) SelectedCase <- enc(SelectedCase,"UTF-8") caseid <- rqda_sel(sprintf("select id from cases where status=1 and name='%s'",SelectedCase))[,1] freefile <- rqda_sel("select name, id, file from source where status=1") fileofcase <- rqda_sel(sprintf("select fid from caselinkage where status=1 and caseid=%i",caseid)) Encoding(freefile[['name']]) <- Encoding(freefile[['file']]) <- "UTF-8" if (nrow(fileofcase)!=0){ fileoutofcase <- subset(freefile,!(freefile$id %in% fileofcase$fid)) } else fileoutofcase <- freefile if (length(fileoutofcase[['name']])==0) gmessage(gettext("All files are linked with this case.", domain = "R-RQDA"), cont=TRUE) else { ##Selected <- select.list(fileoutofcase[['name']],multiple=TRUE) CurrentFrame <- sys.frame(sys.nframe()) ## sys.frame(): get the frame of n ## nframe(): get n of current frame ## The value of them depends on where they evaluated, should not placed inside RunOnSelected() RunOnSelected(fileoutofcase[['name']],multiple=TRUE,enclos=CurrentFrame,expr={ if (length(Selected)> 0) { Encoding(Selected) <- "UTF-8" fid <- fileoutofcase[fileoutofcase$name %in% Selected,"id"] selend <- nchar(fileoutofcase[fileoutofcase$name %in% Selected,"file"]) Dat <- data.frame(caseid=caseid,fid=fid,selfirst=0,selend=selend,status=1,owner=.rqda$owner,date=date(),memo=NA) rqda_wrt("caselinkage", Dat) UpdateFileofCaseWidget() }}) } } }) CaseNamesWidgetMenu[[2]] <- gaction(gettext("Add New File to Selected Case", domain = "R-RQDA"), handler =function(h, ...) { AddNewFileFunOfCase() }) CaseNamesWidgetMenu[[3]] <- gaction(gettext("Case Memo", domain = "R-RQDA"), handler =function(h, ...) { if (is_projOpen(envir=.rqda,conName="qdacon")) { MemoWidget(gettext("Case", domain = "R-RQDA"),.rqda$.CasesNamesWidget,"cases") ## see CodeCatButton.R for definition of MemoWidget } }) CaseNamesWidgetMenu[[4]] <- gaction(gettext("Show Cases with Memo Only", domain = "R-RQDA"), handler =function(h, ...) { if (is_projOpen(envir=.rqda,conName="qdacon")) { cnames <- rqda_sel("select name from cases where memo is not null")$name if (!is.null(cnames)) cnames <- enc(cnames,"UTF-8") .rqda$.CasesNamesWidget[] <- cnames } }) CaseNamesWidgetMenu[[5]] <- gaction(gettext("Add/modify Attributes...", domain = "R-RQDA"), handler =function(h, ...) { if (is_projOpen(envir=.rqda,conName="qdacon")) { SelectedCase <- svalue(.rqda$.CasesNamesWidget) if (length(SelectedCase!=0)){ SelectedCase <- enc(SelectedCase,"UTF-8") caseid <- rqda_sel(sprintf("select id from cases where status=1 and name='%s'",SelectedCase))[,1] CaseAttrFun(caseId=caseid,title=SelectedCase) } } }) CaseNamesWidgetMenu[[6]] <- gaction(gettext("View Attributes", domain = "R-RQDA"), handler =function(h, ...) { if (is_projOpen(envir=.rqda,conName="qdacon")) { viewCaseAttr() } }) CaseNamesWidgetMenu[[7]] <- gaction(gettext("Export Case Attributes", domain = "R-RQDA"), handler =function(h, ...) { if (is_projOpen(envir=.rqda,conName="qdacon")) { fName <- gfile(type='save',filter=list("csv"=list(pattern=c("*.csv")))) Encoding(fName) <- "UTF-8" if (length(grep(".csv$",fName))==0) fName <- sprintf("%s.csv",fName) write.csv(getAttr("case"), row.names=FALSE, file=fName, na="") } }) CaseNamesWidgetMenu[[8]] <- gaction(gettext("Sort All by Created Time", domain = "R-RQDA"), handler =function(h, ...) { CaseNamesUpdate(.rqda$.CasesNamesWidget,sortByTime = TRUE) }) search_lst <- vector("list", 4) search_lst[[1]] <- gaction("Google", handler =function(h, ...) { KeyWord <- svalue(.rqda$.CasesNamesWidget) if (length(KeyWord)!=0){ KeyWord <- iconv(KeyWord, from="UTF-8") browseURL(sprintf("http://www.google.com/search?q=%s",KeyWord)) } }) search_lst[[2]] <- gaction("Yahoo", handler =function(h, ...) { KeyWord <- svalue(.rqda$.CasesNamesWidget) if (length(KeyWord)!=0){ KeyWord <- iconv(KeyWord, from="UTF-8") browseURL(sprintf("http://search.yahoo.com/search;_ylt=A0oGkmFV.CZJNssAOK.l87UF?p=%s&ei=UTF-8&iscqry=&fr=sfp&fr2=sfp" ,KeyWord)) } }) search_lst[[3]] <- gaction("Baidu", handler =function(h, ...) { KeyWord <- svalue(.rqda$.CasesNamesWidget) if (length(KeyWord)!=0){ KeyWord <- iconv(KeyWord, from="UTF-8",to="CP936") ## should be in CP936 to work properly. browseURL(sprintf("http://www.baidu.com/s?wd=%s",paste("%",paste(charToRaw(KeyWord),sep="",collapse="%"),sep="",collapse=""))) } }) search_lst[[4]] <- gaction("Sogou", handler =function(h, ...) { KeyWord <- svalue(.rqda$.CasesNamesWidget) if (length(KeyWord)!=0){ KeyWord <- iconv(KeyWord, from="UTF-8",to="CP936")## should be in CP936 to work properly. browseURL(sprintf("http://www.sogou.com/sohu?query=%s",paste("%",paste(charToRaw(KeyWord),sep="",collapse="%"),sep="",collapse=""))) } }) CaseNamesWidgetMenu[[gettext("Web Search", domain = "R-RQDA")]] <- search_lst CaseNamesWidgetMenu } ## pop-up menu of .rqda$.FileofCase GetFileofCaseWidgetMenu <- function() { FileofCaseWidgetMenu <- list() ## not used yet. FileofCaseWidgetMenu[[1]] <- gaction(gettext("Add To File Category ...", domain = "R-RQDA"), handler =function(h, ...) { if (is_projOpen(envir = .rqda, conName = "qdacon", message = FALSE)) { AddToFileCategory(Widget=.rqda$.FileofCase,updateWidget=FALSE) } }) FileofCaseWidgetMenu[[2]] <- gaction(gettext("Drop Selected File(s)", domain = "R-RQDA"), handler =function(h, ...) { if (is_projOpen(envir = .rqda, conName = "qdacon", message = FALSE)) { FileOfCat <- svalue(.rqda$.FileofCase) if ((NumofSelected <- length(FileOfCat)) ==0) { gmessage(gettext("Please select the Files you want to delete.", domain = "R-RQDA"),con=TRUE)} else { ## Give a confirm msg del <- gconfirm(sprintf(gettext("Delete %i file(s) from this category. Are you sure?", domain = "R-RQDA"),NumofSelected),con=TRUE,icon="question") if (isTRUE(del)){ SelectedCase <- svalue(.rqda$.CasesNamesWidget) ## Encoding(SelectedCase) <- Encoding(FileOfCat)<- "UTF-8" SelectedCase <- enc(SelectedCase,"UTF-8") FileOfCat <- enc(FileOfCat,"UTF-8") caseid <- rqda_sel(sprintf("select id from cases where status=1 and name='%s'",SelectedCase))[,1] for (i in FileOfCat){ fid <- rqda_sel(sprintf("select id from source where status=1 and name='%s'",i))[,1] rqda_exe(sprintf("update caselinkage set status=0 where caseid=%i and fid=%i",caseid,fid)) } ## update Widget UpdateFileofCaseWidget() } } } }) FileofCaseWidgetMenu[[3]] <- gaction(gettext("Delete Selected File(s)", domain = "R-RQDA"), handler =function(h, ...) { if (is_projOpen(envir=.rqda,conName="qdacon")) { SelectedFile <- svalue(.rqda$.FileofCase) Encoding(SelectedFile) <- "UTF-8" for (i in SelectedFile){ fid <- rqda_sel( sprintf("select id from source where name='%s'",i))$id rqda_exe( sprintf("update source set status=0 where name='%s'",i)) rqda_exe( sprintf("update caselinkage set status=0 where fid=%i",fid)) rqda_exe( sprintf("update treefile set status=0 where fid=%i",fid)) rqda_exe( sprintf("update coding set status=0 where fid=%i",fid)) } .rqda$.FileofCase[] <- setdiff(.rqda$.FileofCase[],SelectedFile) } }) FileofCaseWidgetMenu[[4]] <- gaction(gettext("Edit Selected File", domain = "R-RQDA"), handler =function(h, ...) { EditFileFun(FileNameWidget=.rqda$.FileofCase) }) FileofCaseWidgetMenu[[5]] <- gaction(gettext("File Memo", domain = "R-RQDA"), handler =function(h, ...) { MemoWidget(gettext("File", domain = "R-RQDA"),.rqda$.FileofCase,"source") }) FileofCaseWidgetMenu[[6]] <- gaction(gettext("Rename selected File", domain = "R-RQDA"), handler =function(h, ...) { if (is_projOpen(envir=.rqda,conName="qdacon")) { selectedFN <- svalue(.rqda$.FileofCase) if (length(selectedFN)==0){ gmessage(gettext("Select a file first.", domain = "R-RQDA"),icon="error",con=TRUE) } else { NewFileName <- ginput(gettext("Enter new file name. ", domain = "R-RQDA"),text=selectedFN, icon="info") if (!is.na(NewFileName)) { Encoding(NewFileName) <- "UTF-8" rename(selectedFN,NewFileName,"source") Fnames <- .rqda$.FileofCase[] Fnames[Fnames==selectedFN] <- NewFileName .rqda$.FileofCase[] <- Fnames } }} }) FileofCaseWidgetMenu[[7]] <- gaction(gettext("Search Files within Selected Case", domain = "R-RQDA"), handler =function(h, ...) { if (is_projOpen(envir = .rqda, conName = "qdacon", message = FALSE)) { pattern <- ifelse(is.null(.rqda$lastsearch),"file like '%%'",.rqda$lastsearch) pattern <- ginput(gettext("Please input a search pattern.", domain = "R-RQDA"),text=pattern) if (!is.na(pattern)){ Fid <- getFileIds("case") tryCatch(searchFiles(pattern,Fid=Fid,Widget=".FileofCase",is.UTF8=TRUE),error=function(e) gmessage(gettext("Error~~~.", domain = "R-RQDA")),con=TRUE) assign("lastsearch",pattern,envir=.rqda) } } }) show_lst <- vector("list", 3) show_lst[[1]] <- gaction(gettext("Show All by Sorted by Imported Time", domain = "R-RQDA"), handler =function(h, ...) { ## UpdateFileofCaseWidget() if (is_projOpen(envir=.rqda,conName="qdacon")) { fid <- getFileIds(condition="case",type="all") FileNameWidgetUpdate(FileNamesWidget=.rqda$.FileofCase,FileId=fid) } }) show_lst[[2]] <- gaction(gettext("Show Coded Files Only (sorted)", domain = "R-RQDA"), handler =function(h, ...) { if (is_projOpen(envir=.rqda,conName="qdacon")) { fid <- getFileIds(condition="case",type="coded") FileNameWidgetUpdate(FileNamesWidget=.rqda$.FileofCase,FileId=fid) } }) show_lst[[3]] <- gaction(gettext("Show Uncoded Files Only (sorted)", domain = "R-RQDA"), handler =function(h, ...) { if (is_projOpen(envir=.rqda,conName="qdacon")) { fid <- getFileIds(condition="case",type="uncoded") FileNameWidgetUpdate(FileNamesWidget=.rqda$.FileofCase,FileId=fid) } }) FileofCaseWidgetMenu[[gettext("Show ...", domain = "R-RQDA")]] <- show_lst FileofCaseWidgetMenu[[9]] <- gaction(gettext("Show Selected File Property", domain = "R-RQDA"), handler =function(h, ...) { if (is_projOpen(envir = .rqda, conName = "qdacon", message = FALSE)) { ShowFileProperty(Fid=getFileIds("case","selected")) } }) FileofCaseWidgetMenu }
/R/CaseButton.R
no_license
Proloy2018/RQDA
R
false
false
20,677
r
AddCaseButton <- function(label=gettext("Add", domain = "R-RQDA")){ AddCasB <- gbutton(label,handler=function(h,...) { CaseName <- ginput(gettext("Enter new Case Name. ", domain = "R-RQDA"), icon="info") if (!is.na(CaseName)) { Encoding(CaseName) <- "UTF-8" AddCase(CaseName) CaseNamesUpdate() enabled(button$profmatB) <- TRUE idx <- as.character(which(.rqda$.CasesNamesWidget[] %in% CaseName) -1) ## note the position, before manipulation of items path <-gtkTreePathNewFromString(idx) gtkTreeViewScrollToCell(.rqda$.CasesNamesWidget$widget, path,use.align=TRUE,row.align = 0.05) } } ) assign("AddCasB",AddCasB,envir=button) enabled(AddCasB) <- FALSE AddCasB } DeleteCaseButton <- function(label=gettext("Delete", domain = "R-RQDA")){ DelCasB <- gbutton(label, handler=function(h,...) { del <- gconfirm(gettext("Really delete the Case?", domain = "R-RQDA"),icon="question") if (isTRUE(del)){ SelectedCase <- svalue(.rqda$.CasesNamesWidget) Encoding(SelectedCase) <- "UTF-8" caseid <- rqda_sel(sprintf("select id from cases where name='%s'", enc(SelectedCase)))$id rqda_exe(sprintf("update cases set status=0 where name='%s'", enc(SelectedCase))) ## set status in table freecode to 0 rqda_exe(sprintf("update caselinkage set status=0 where caseid=%i",caseid)) ## set status in table caselinkage to 0 CaseNamesUpdate() .rqda$.FileofCase[] <- NULL } } ) assign("DelCasB",DelCasB,envir=button) enabled(DelCasB) <- FALSE DelCasB } Case_RenameButton <- function(label=gettext("Rename", domain = "R-RQDA"),CaseNamesWidget=.rqda$.CasesNamesWidget,...) { ## rename of selected case. CasRenB <- gbutton(label,handler=function(h,...) { selectedCaseName <- svalue(CaseNamesWidget) ## get the new file names NewName <- ginput(gettext("Enter new Case name. ", domain = "R-RQDA"), text=selectedCaseName, icon="info") if (!identical(NewName, character(0))) { if (!is.na(NewName)){ rename(selectedCaseName,NewName,"cases") CaseNamesUpdate() } } } ) assign("CasRenB",CasRenB,envir=button) enabled(CasRenB) <- FALSE CasRenB } CaseMark_Button<-function(label=gettext("Mark", domain = "R-RQDA")){ CasMarB <- gbutton(label,handler=function(h,...) { MarkCaseFun() UpdateFileofCaseWidget() } ) assign("CasMarB",CasMarB,envir=button) enabled(CasMarB) <- FALSE CasMarB } MarkCaseFun <- function(){ if (is_projOpen(envir=.rqda,conName="qdacon")) { con <- .rqda$qdacon tryCatch({ ans <- mark(get(".openfile_gui",envir=.rqda),fore.col=NULL,back.col=.rqda$back.col,addButton=FALSE) if (ans$start != ans$end){ ## when selected no text, makes on sense to do anything. SelectedCase <- svalue(.rqda$.CasesNamesWidget) SelectedCase <- enc(SelectedCase,encoding="UTF-8") currentCid <- rqda_sel(sprintf("select id from cases where name='%s'", SelectedCase))[,1] SelectedFile <- svalue(.rqda$.root_edit) ##Encoding(SelectedFile) <- "UTF-8" SelectedFile <- enc(SelectedFile,encoding="UTF-8") currentFid <- rqda_sel(sprintf("select id from source where name='%s'", SelectedFile))[,1] ## Query of caselinkage ExistLinkage <- rqda_sel(sprintf("select rowid, selfirst, selend,status from caselinkage where caseid=%i and fid=%i and status=1",currentCid,currentFid)) DAT <- data.frame(caseid=currentCid,fid=currentFid, selfirst=ans$start,selend=ans$end,status=1, owner=.rqda$owner,date=date(),memo="") if (nrow(ExistLinkage)==0){ ## if there are no relevant caselinkage, write the caselinkage table success <- rqda_wrt("caselinkage", DAT) if (!success) gmessage(gettext("Fail to write to database.", domain = "R-RQDA")) } else { Relations <- apply(ExistLinkage,1,FUN=function(x) relation(x[c("selfirst","selend")],c(ans$start,ans$end))) ExistLinkage$Relation <- sapply(Relations,FUN=function(x)x$Relation) if (!any(ExistLinkage$Relation=="exact")){ ## if there are exact caselinkage, skip; if no exact linkage then continue ExistLinkage$WhichMin <- sapply(Relations,FUN=function(x)x$WhichMin) ExistLinkage$Start <- sapply(Relations,FUN=function(x)x$UnionIndex[1]) ExistLinkage$End <- sapply(Relations,FUN=function(x)x$UnionIndex[2]) if (all(ExistLinkage$Relation=="proximity")){ success <- rqda_wrt("caselinkage", DAT) if (!success) gmessage(gettext("Fail to write to database.", domain = "R-RQDA")) } else { del1 <- ExistLinkage$WhichMin==2 & ExistLinkage$Relation =="inclusion"; del1[is.na(del1)] <- FALSE del2 <- ExistLinkage$Relation =="overlap"; del2[is.na(del2)] <- FALSE del <- (del1 | del2) if (any(del)){ Sel <- c(min(ExistLinkage$Start[del]), max(ExistLinkage$End[del])) memo <- rqda_sel(sprintf("select memo from caselinkage where rowid in (%s)", paste(ExistLinkage$rowid[del],collapse=",",sep="")))$memo memo <- paste(memo,collapse="",sep="") rqda_exe(sprintf("delete from caselinkage where rowid in (%s)", paste(ExistLinkage$rowid[del],collapse=",",sep=""))) DAT <- data.frame(caseid=currentCid,fid=currentFid, selfirst=Sel[1],selend=Sel[2],status=1, owner=.rqda$owner,date=date(),memo=memo) success <- rqda_wrt("caselinkage", DAT) if (!success) gmessage(gettext("Fail to write to database.", domain = "R-RQDA")) } } } } } },error=function(e){} ) } } CaseUnMark_Button<-function(label=gettext("Unmark", domain = "R-RQDA")){ CasUnMarB <- gbutton(label, handler=function(h,...) { con <- .rqda$qdacon W <- .rqda$.openfile_gui$widget ## get the widget for file display. If it does not exist, then return NULL. sel_index <- tryCatch(sindex(W,includeAnchor=FALSE),error=function(e) {}) ## if the not file is open, unmark doesn't work. if (!is.null(sel_index)) { SelectedCase <- svalue(.rqda$.CasesNamesWidget) if (length(SelectedCase)==0) {gmessage(gettext("Select a case first.", domain = "R-RQDA"),con=TRUE)} else{ SelectedCase <- enc(SelectedCase,"UTF-8") caseid <- rqda_sel(sprintf("select id from cases where name='%s'",SelectedCase))[,1] SelectedFile <- svalue(.rqda$.root_edit) SelectedFile <- enc(SelectedFile,"UTF-8") currentFid <- rqda_sel(sprintf("select id from source where name='%s'", SelectedFile))[,1] codings_index <- rqda_sel(sprintf("select rowid, caseid, fid, selfirst, selend from caselinkage where caseid=%i and fid=%i", caseid, currentFid)) ## should only work with those related to current case and current file. rowid <- codings_index$rowid[(codings_index$selfirst >= sel_index$startN) & (codings_index$selend <= sel_index$endN)] if (is.numeric(rowid)) for (j in rowid) { rqda_exe(sprintf("update caselinkage set status=0 where rowid=%i", j)) } coding.idx <- rqda_sel(sprintf("select selfirst,selend from coding where fid=%i and status=1",currentFid)) anno.idx <- rqda_sel(sprintf("select position from annotation where fid=%i and status=1",currentFid))$position allidx <- unlist(coding.idx,anno.idx) if (!is.null(allidx)){ startN<- sel_index$startN + sum(allidx <= sel_index$startN) endN <- sel_index$endN + sum(allidx <= sel_index$endN) } ## better to get around the loop by sqlite condition expression. ClearMark(W,min=startN,max=endN,clear.fore.col = FALSE, clear.back.col = TRUE) ## even for the non-current code. can improve. } } UpdateFileofCaseWidget() } ) assign("CasUnMarB",CasUnMarB,envir=button) enabled(CasUnMarB) <- FALSE CasUnMarB } CaseAttribute_Button <- function(label=gettext("Attribute", domain = "R-RQDA")){ CasAttrB <- gbutton(text=label, handler = function(h, ...) { SelectedCase <- svalue(.rqda$.CasesNamesWidget) if (length(SelectedCase!=0)){ SelectedCase <- enc(SelectedCase,"UTF-8") caseid <- rqda_sel(sprintf("select id from cases where status=1 and name='%s'",SelectedCase))[,1] CaseAttrFun(caseId=caseid,title=SelectedCase) }}) assign("CasAttrB", CasAttrB, envir=button) enabled(button$CasAttrB) <- FALSE CasAttrB } prof_mat_Button <- function(label="prof_mat"){ profmatB <- gbutton(text=label, handler = function(h, ...) { prof_mat(case_names = gselect.list(.rqda$.CasesNamesWidget[], multiple = TRUE, x= getOption("widgetCoordinate")[1])) }) assign("profmatB", profmatB, envir=button) profmatB } GetCaseNamesWidgetMenu <- function() { CaseNamesWidgetMenu <- list() CaseNamesWidgetMenu[[1]] <- gaction(gettext("Add File(s)", domain = "R-RQDA"), handler =function(h, ...) { if (is_projOpen(envir = .rqda, conName = "qdacon", message = FALSE)) { SelectedCase <- svalue(.rqda$.CasesNamesWidget) SelectedCase <- enc(SelectedCase,"UTF-8") caseid <- rqda_sel(sprintf("select id from cases where status=1 and name='%s'",SelectedCase))[,1] freefile <- rqda_sel("select name, id, file from source where status=1") fileofcase <- rqda_sel(sprintf("select fid from caselinkage where status=1 and caseid=%i",caseid)) Encoding(freefile[['name']]) <- Encoding(freefile[['file']]) <- "UTF-8" if (nrow(fileofcase)!=0){ fileoutofcase <- subset(freefile,!(freefile$id %in% fileofcase$fid)) } else fileoutofcase <- freefile if (length(fileoutofcase[['name']])==0) gmessage(gettext("All files are linked with this case.", domain = "R-RQDA"), cont=TRUE) else { ##Selected <- select.list(fileoutofcase[['name']],multiple=TRUE) CurrentFrame <- sys.frame(sys.nframe()) ## sys.frame(): get the frame of n ## nframe(): get n of current frame ## The value of them depends on where they evaluated, should not placed inside RunOnSelected() RunOnSelected(fileoutofcase[['name']],multiple=TRUE,enclos=CurrentFrame,expr={ if (length(Selected)> 0) { Encoding(Selected) <- "UTF-8" fid <- fileoutofcase[fileoutofcase$name %in% Selected,"id"] selend <- nchar(fileoutofcase[fileoutofcase$name %in% Selected,"file"]) Dat <- data.frame(caseid=caseid,fid=fid,selfirst=0,selend=selend,status=1,owner=.rqda$owner,date=date(),memo=NA) rqda_wrt("caselinkage", Dat) UpdateFileofCaseWidget() }}) } } }) CaseNamesWidgetMenu[[2]] <- gaction(gettext("Add New File to Selected Case", domain = "R-RQDA"), handler =function(h, ...) { AddNewFileFunOfCase() }) CaseNamesWidgetMenu[[3]] <- gaction(gettext("Case Memo", domain = "R-RQDA"), handler =function(h, ...) { if (is_projOpen(envir=.rqda,conName="qdacon")) { MemoWidget(gettext("Case", domain = "R-RQDA"),.rqda$.CasesNamesWidget,"cases") ## see CodeCatButton.R for definition of MemoWidget } }) CaseNamesWidgetMenu[[4]] <- gaction(gettext("Show Cases with Memo Only", domain = "R-RQDA"), handler =function(h, ...) { if (is_projOpen(envir=.rqda,conName="qdacon")) { cnames <- rqda_sel("select name from cases where memo is not null")$name if (!is.null(cnames)) cnames <- enc(cnames,"UTF-8") .rqda$.CasesNamesWidget[] <- cnames } }) CaseNamesWidgetMenu[[5]] <- gaction(gettext("Add/modify Attributes...", domain = "R-RQDA"), handler =function(h, ...) { if (is_projOpen(envir=.rqda,conName="qdacon")) { SelectedCase <- svalue(.rqda$.CasesNamesWidget) if (length(SelectedCase!=0)){ SelectedCase <- enc(SelectedCase,"UTF-8") caseid <- rqda_sel(sprintf("select id from cases where status=1 and name='%s'",SelectedCase))[,1] CaseAttrFun(caseId=caseid,title=SelectedCase) } } }) CaseNamesWidgetMenu[[6]] <- gaction(gettext("View Attributes", domain = "R-RQDA"), handler =function(h, ...) { if (is_projOpen(envir=.rqda,conName="qdacon")) { viewCaseAttr() } }) CaseNamesWidgetMenu[[7]] <- gaction(gettext("Export Case Attributes", domain = "R-RQDA"), handler =function(h, ...) { if (is_projOpen(envir=.rqda,conName="qdacon")) { fName <- gfile(type='save',filter=list("csv"=list(pattern=c("*.csv")))) Encoding(fName) <- "UTF-8" if (length(grep(".csv$",fName))==0) fName <- sprintf("%s.csv",fName) write.csv(getAttr("case"), row.names=FALSE, file=fName, na="") } }) CaseNamesWidgetMenu[[8]] <- gaction(gettext("Sort All by Created Time", domain = "R-RQDA"), handler =function(h, ...) { CaseNamesUpdate(.rqda$.CasesNamesWidget,sortByTime = TRUE) }) search_lst <- vector("list", 4) search_lst[[1]] <- gaction("Google", handler =function(h, ...) { KeyWord <- svalue(.rqda$.CasesNamesWidget) if (length(KeyWord)!=0){ KeyWord <- iconv(KeyWord, from="UTF-8") browseURL(sprintf("http://www.google.com/search?q=%s",KeyWord)) } }) search_lst[[2]] <- gaction("Yahoo", handler =function(h, ...) { KeyWord <- svalue(.rqda$.CasesNamesWidget) if (length(KeyWord)!=0){ KeyWord <- iconv(KeyWord, from="UTF-8") browseURL(sprintf("http://search.yahoo.com/search;_ylt=A0oGkmFV.CZJNssAOK.l87UF?p=%s&ei=UTF-8&iscqry=&fr=sfp&fr2=sfp" ,KeyWord)) } }) search_lst[[3]] <- gaction("Baidu", handler =function(h, ...) { KeyWord <- svalue(.rqda$.CasesNamesWidget) if (length(KeyWord)!=0){ KeyWord <- iconv(KeyWord, from="UTF-8",to="CP936") ## should be in CP936 to work properly. browseURL(sprintf("http://www.baidu.com/s?wd=%s",paste("%",paste(charToRaw(KeyWord),sep="",collapse="%"),sep="",collapse=""))) } }) search_lst[[4]] <- gaction("Sogou", handler =function(h, ...) { KeyWord <- svalue(.rqda$.CasesNamesWidget) if (length(KeyWord)!=0){ KeyWord <- iconv(KeyWord, from="UTF-8",to="CP936")## should be in CP936 to work properly. browseURL(sprintf("http://www.sogou.com/sohu?query=%s",paste("%",paste(charToRaw(KeyWord),sep="",collapse="%"),sep="",collapse=""))) } }) CaseNamesWidgetMenu[[gettext("Web Search", domain = "R-RQDA")]] <- search_lst CaseNamesWidgetMenu } ## pop-up menu of .rqda$.FileofCase GetFileofCaseWidgetMenu <- function() { FileofCaseWidgetMenu <- list() ## not used yet. FileofCaseWidgetMenu[[1]] <- gaction(gettext("Add To File Category ...", domain = "R-RQDA"), handler =function(h, ...) { if (is_projOpen(envir = .rqda, conName = "qdacon", message = FALSE)) { AddToFileCategory(Widget=.rqda$.FileofCase,updateWidget=FALSE) } }) FileofCaseWidgetMenu[[2]] <- gaction(gettext("Drop Selected File(s)", domain = "R-RQDA"), handler =function(h, ...) { if (is_projOpen(envir = .rqda, conName = "qdacon", message = FALSE)) { FileOfCat <- svalue(.rqda$.FileofCase) if ((NumofSelected <- length(FileOfCat)) ==0) { gmessage(gettext("Please select the Files you want to delete.", domain = "R-RQDA"),con=TRUE)} else { ## Give a confirm msg del <- gconfirm(sprintf(gettext("Delete %i file(s) from this category. Are you sure?", domain = "R-RQDA"),NumofSelected),con=TRUE,icon="question") if (isTRUE(del)){ SelectedCase <- svalue(.rqda$.CasesNamesWidget) ## Encoding(SelectedCase) <- Encoding(FileOfCat)<- "UTF-8" SelectedCase <- enc(SelectedCase,"UTF-8") FileOfCat <- enc(FileOfCat,"UTF-8") caseid <- rqda_sel(sprintf("select id from cases where status=1 and name='%s'",SelectedCase))[,1] for (i in FileOfCat){ fid <- rqda_sel(sprintf("select id from source where status=1 and name='%s'",i))[,1] rqda_exe(sprintf("update caselinkage set status=0 where caseid=%i and fid=%i",caseid,fid)) } ## update Widget UpdateFileofCaseWidget() } } } }) FileofCaseWidgetMenu[[3]] <- gaction(gettext("Delete Selected File(s)", domain = "R-RQDA"), handler =function(h, ...) { if (is_projOpen(envir=.rqda,conName="qdacon")) { SelectedFile <- svalue(.rqda$.FileofCase) Encoding(SelectedFile) <- "UTF-8" for (i in SelectedFile){ fid <- rqda_sel( sprintf("select id from source where name='%s'",i))$id rqda_exe( sprintf("update source set status=0 where name='%s'",i)) rqda_exe( sprintf("update caselinkage set status=0 where fid=%i",fid)) rqda_exe( sprintf("update treefile set status=0 where fid=%i",fid)) rqda_exe( sprintf("update coding set status=0 where fid=%i",fid)) } .rqda$.FileofCase[] <- setdiff(.rqda$.FileofCase[],SelectedFile) } }) FileofCaseWidgetMenu[[4]] <- gaction(gettext("Edit Selected File", domain = "R-RQDA"), handler =function(h, ...) { EditFileFun(FileNameWidget=.rqda$.FileofCase) }) FileofCaseWidgetMenu[[5]] <- gaction(gettext("File Memo", domain = "R-RQDA"), handler =function(h, ...) { MemoWidget(gettext("File", domain = "R-RQDA"),.rqda$.FileofCase,"source") }) FileofCaseWidgetMenu[[6]] <- gaction(gettext("Rename selected File", domain = "R-RQDA"), handler =function(h, ...) { if (is_projOpen(envir=.rqda,conName="qdacon")) { selectedFN <- svalue(.rqda$.FileofCase) if (length(selectedFN)==0){ gmessage(gettext("Select a file first.", domain = "R-RQDA"),icon="error",con=TRUE) } else { NewFileName <- ginput(gettext("Enter new file name. ", domain = "R-RQDA"),text=selectedFN, icon="info") if (!is.na(NewFileName)) { Encoding(NewFileName) <- "UTF-8" rename(selectedFN,NewFileName,"source") Fnames <- .rqda$.FileofCase[] Fnames[Fnames==selectedFN] <- NewFileName .rqda$.FileofCase[] <- Fnames } }} }) FileofCaseWidgetMenu[[7]] <- gaction(gettext("Search Files within Selected Case", domain = "R-RQDA"), handler =function(h, ...) { if (is_projOpen(envir = .rqda, conName = "qdacon", message = FALSE)) { pattern <- ifelse(is.null(.rqda$lastsearch),"file like '%%'",.rqda$lastsearch) pattern <- ginput(gettext("Please input a search pattern.", domain = "R-RQDA"),text=pattern) if (!is.na(pattern)){ Fid <- getFileIds("case") tryCatch(searchFiles(pattern,Fid=Fid,Widget=".FileofCase",is.UTF8=TRUE),error=function(e) gmessage(gettext("Error~~~.", domain = "R-RQDA")),con=TRUE) assign("lastsearch",pattern,envir=.rqda) } } }) show_lst <- vector("list", 3) show_lst[[1]] <- gaction(gettext("Show All by Sorted by Imported Time", domain = "R-RQDA"), handler =function(h, ...) { ## UpdateFileofCaseWidget() if (is_projOpen(envir=.rqda,conName="qdacon")) { fid <- getFileIds(condition="case",type="all") FileNameWidgetUpdate(FileNamesWidget=.rqda$.FileofCase,FileId=fid) } }) show_lst[[2]] <- gaction(gettext("Show Coded Files Only (sorted)", domain = "R-RQDA"), handler =function(h, ...) { if (is_projOpen(envir=.rqda,conName="qdacon")) { fid <- getFileIds(condition="case",type="coded") FileNameWidgetUpdate(FileNamesWidget=.rqda$.FileofCase,FileId=fid) } }) show_lst[[3]] <- gaction(gettext("Show Uncoded Files Only (sorted)", domain = "R-RQDA"), handler =function(h, ...) { if (is_projOpen(envir=.rqda,conName="qdacon")) { fid <- getFileIds(condition="case",type="uncoded") FileNameWidgetUpdate(FileNamesWidget=.rqda$.FileofCase,FileId=fid) } }) FileofCaseWidgetMenu[[gettext("Show ...", domain = "R-RQDA")]] <- show_lst FileofCaseWidgetMenu[[9]] <- gaction(gettext("Show Selected File Property", domain = "R-RQDA"), handler =function(h, ...) { if (is_projOpen(envir = .rqda, conName = "qdacon", message = FALSE)) { ShowFileProperty(Fid=getFileIds("case","selected")) } }) FileofCaseWidgetMenu }
### -----------------------------MICE.IMPUTE.PMM------------------------- #'Imputation by predictive mean matching #' #'Imputes univariate missing data using predictive mean matching #' #'Imputation of \code{y} by predictive mean matching, based on Rubin (1987, p. #'168, formulas a and b). The procedure is as follows: \enumerate{ \item #'Estimate beta and sigma by linear regression \item Draw beta* and sigma* from #'the proper posterior \item Compute predicted values for \code{yobs}beta and #'\code{ymis}beta* \item For each \code{ymis}, find the observation with #'closest predicted value, and take its observed value in \code{y} as the #'imputation. \item If there is more than one candidate, make a random draw #'among them. Note: The matching is done on predicted \code{y}, NOT on #'observed \code{y}. } #' #'@note \code{mice.impute.pmm2()} was used in \code{mice 2.13} and #'after a faster alternative to \code{mice.impute.pmm()}. #'Starting with \code{mice 2.14}, \code{mice.impute.pmm()} has been #'replaced by \code{mice.impute.pmm2()}. The \code{mice.impute.pmm2()} #'function will be depricated in future versions of \pkg{mice}. #' #'@aliases mice.impute.pmm pmm #'@param y Numeric vector with incomplete data #'@param ry Response pattern of \code{y} (\code{TRUE}=observed, #'\code{FALSE}=missing) #'@param x Design matrix with \code{length(y)} rows and \code{p} columns #'containing complete covariates. #'@param ... Other named arguments. #'@return Numeric vector of length \code{sum(!ry)} with imputations #'@author Stef van Buuren, Karin Groothuis-Oudshoorn, 2000, 2012 #'@references Little, R.J.A. (1988), Missing data adjustments in large surveys #'(with discussion), Journal of Business Economics and Statistics, 6, 287--301. #' #'Rubin, D.B. (1987). Multiple imputation for nonresponse in surveys. New York: #'Wiley. #' #'Van Buuren, S., Brand, J.P.L., Groothuis-Oudshoorn C.G.M., Rubin, D.B. (2006) #'Fully conditional specification in multivariate imputation. \emph{Journal of #'Statistical Computation and Simulation}, \bold{76}, 12, 1049--1064. #' #'Van Buuren, S., Groothuis-Oudshoorn, K. (2011). \code{mice}: Multivariate #'Imputation by Chained Equations in \code{R}. \emph{Journal of Statistical #'Software}, \bold{45}(3), 1-67. \url{http://www.jstatsoft.org/v45/i03/} #'@keywords datagen #'@export mice.impute.pmm <- function(y, ry, x, ...) # Imputation of y by predictive mean matching, based on # Rubin (p. 168, formulas a and b). # The procedure is as follows: # 1. Draw beta and sigma from the proper posterior # 2. Compute predicted values for yobs and ymis # 3. For each ymis, find the three observations with closest predicted value, # sample one randomly, and take its observed y as the imputation. # NOTE: The matching is on yhat, NOT on y, which deviates from formula b. # ry=TRUE if y observed, ry=FALSE if y missing # # Authors: S. van Buuren and K. Groothuis-Oudshoorn # Version 10/2/2010: yhatobs is calculated using the estimated # rather than the drawn regression weights # this creates between imputation variability # for the one-predictor case # Version 06/12/2010 A random draw is made from the closest THREE donors. # Version 25/04/2012 Extended to work with factors # version 31/10/2012 Using faster pmm2 { x <- cbind(1, as.matrix(x)) ynum <- y if (is.factor(y)) ynum <- as.integer(y) ## added 25/04/2012 parm <- .norm.draw(ynum, ry, x, ...) ## bug fix 10apr2013 yhatobs <- x[ry, ] %*% parm$coef yhatmis <- x[!ry, ] %*% parm$beta return(apply(as.array(yhatmis), 1, .pmm.match, yhat = yhatobs, y = y[ry], ...)) } # -------------------------.PMM.MATCH-------------------------------- #' Finds an imputed value from matches in the predictive metric #' #' This function finds matches among the observed data in the predictive #' mean metric. It selects the \code{donors} closest matches, randomly #' samples one of the donors, and returns the observed value of the #' match. #' #'@aliases .pmm.match #'@param z A scalar containing the predicted value for the current case #'to be imputed. #'@param yhat A vector containing the predicted values for all cases with an observed #'outcome. #'@param y A vector of \code{length(yhat)} elements containing the observed outcome #'@param donors The size of the donor pool among which a draw is made. The default is #'\code{donors = 3}. Setting \code{donors = 1} always selects the closest match. Values #'between 3 and 10 provide the best results. #'@param \dots Other parameters (not used). #'@return A scalar containing the observed value of the selected donor. #'@author Stef van Buuren #'@references #'Schenker N \& Taylor JMG (1996) Partially parametric techniques #'for multiple imputation. \emph{Computational Statistics and Data Analysis}, 22, 425-446. #' #'Little RJA (1988) Missing-data adjustments in large surveys (with discussion). #'\emph{Journal of Business Economics and Statistics}, 6, 287-301. #' #'@export .pmm.match <- function(z, yhat = yhat, y = y, donors = 3, ...) { d <- abs(yhat - z) f <- d > 0 a1 <- ifelse(any(f), min(d[f]), 1) d <- d + runif(length(d), 0, a1/10^10) if (donors == 1) return(y[which.min(d)]) ds <- sort.int(d, partial = donors) m <- sample(y[d <= ds[donors]], 1) return(m) } ### -----------------------------MICE.IMPUTE.PMM2------------------------ A faster version of mice.impute.pmm() mice.impute.pmm2 <- function(y, ry, x, ...) { mess <- "Method 'pmm2' is replaced by method 'pmm'" stop(mess) }
/R/mice.impute.pmm.r
no_license
RossBoylan/mice
R
false
false
5,651
r
### -----------------------------MICE.IMPUTE.PMM------------------------- #'Imputation by predictive mean matching #' #'Imputes univariate missing data using predictive mean matching #' #'Imputation of \code{y} by predictive mean matching, based on Rubin (1987, p. #'168, formulas a and b). The procedure is as follows: \enumerate{ \item #'Estimate beta and sigma by linear regression \item Draw beta* and sigma* from #'the proper posterior \item Compute predicted values for \code{yobs}beta and #'\code{ymis}beta* \item For each \code{ymis}, find the observation with #'closest predicted value, and take its observed value in \code{y} as the #'imputation. \item If there is more than one candidate, make a random draw #'among them. Note: The matching is done on predicted \code{y}, NOT on #'observed \code{y}. } #' #'@note \code{mice.impute.pmm2()} was used in \code{mice 2.13} and #'after a faster alternative to \code{mice.impute.pmm()}. #'Starting with \code{mice 2.14}, \code{mice.impute.pmm()} has been #'replaced by \code{mice.impute.pmm2()}. The \code{mice.impute.pmm2()} #'function will be depricated in future versions of \pkg{mice}. #' #'@aliases mice.impute.pmm pmm #'@param y Numeric vector with incomplete data #'@param ry Response pattern of \code{y} (\code{TRUE}=observed, #'\code{FALSE}=missing) #'@param x Design matrix with \code{length(y)} rows and \code{p} columns #'containing complete covariates. #'@param ... Other named arguments. #'@return Numeric vector of length \code{sum(!ry)} with imputations #'@author Stef van Buuren, Karin Groothuis-Oudshoorn, 2000, 2012 #'@references Little, R.J.A. (1988), Missing data adjustments in large surveys #'(with discussion), Journal of Business Economics and Statistics, 6, 287--301. #' #'Rubin, D.B. (1987). Multiple imputation for nonresponse in surveys. New York: #'Wiley. #' #'Van Buuren, S., Brand, J.P.L., Groothuis-Oudshoorn C.G.M., Rubin, D.B. (2006) #'Fully conditional specification in multivariate imputation. \emph{Journal of #'Statistical Computation and Simulation}, \bold{76}, 12, 1049--1064. #' #'Van Buuren, S., Groothuis-Oudshoorn, K. (2011). \code{mice}: Multivariate #'Imputation by Chained Equations in \code{R}. \emph{Journal of Statistical #'Software}, \bold{45}(3), 1-67. \url{http://www.jstatsoft.org/v45/i03/} #'@keywords datagen #'@export mice.impute.pmm <- function(y, ry, x, ...) # Imputation of y by predictive mean matching, based on # Rubin (p. 168, formulas a and b). # The procedure is as follows: # 1. Draw beta and sigma from the proper posterior # 2. Compute predicted values for yobs and ymis # 3. For each ymis, find the three observations with closest predicted value, # sample one randomly, and take its observed y as the imputation. # NOTE: The matching is on yhat, NOT on y, which deviates from formula b. # ry=TRUE if y observed, ry=FALSE if y missing # # Authors: S. van Buuren and K. Groothuis-Oudshoorn # Version 10/2/2010: yhatobs is calculated using the estimated # rather than the drawn regression weights # this creates between imputation variability # for the one-predictor case # Version 06/12/2010 A random draw is made from the closest THREE donors. # Version 25/04/2012 Extended to work with factors # version 31/10/2012 Using faster pmm2 { x <- cbind(1, as.matrix(x)) ynum <- y if (is.factor(y)) ynum <- as.integer(y) ## added 25/04/2012 parm <- .norm.draw(ynum, ry, x, ...) ## bug fix 10apr2013 yhatobs <- x[ry, ] %*% parm$coef yhatmis <- x[!ry, ] %*% parm$beta return(apply(as.array(yhatmis), 1, .pmm.match, yhat = yhatobs, y = y[ry], ...)) } # -------------------------.PMM.MATCH-------------------------------- #' Finds an imputed value from matches in the predictive metric #' #' This function finds matches among the observed data in the predictive #' mean metric. It selects the \code{donors} closest matches, randomly #' samples one of the donors, and returns the observed value of the #' match. #' #'@aliases .pmm.match #'@param z A scalar containing the predicted value for the current case #'to be imputed. #'@param yhat A vector containing the predicted values for all cases with an observed #'outcome. #'@param y A vector of \code{length(yhat)} elements containing the observed outcome #'@param donors The size of the donor pool among which a draw is made. The default is #'\code{donors = 3}. Setting \code{donors = 1} always selects the closest match. Values #'between 3 and 10 provide the best results. #'@param \dots Other parameters (not used). #'@return A scalar containing the observed value of the selected donor. #'@author Stef van Buuren #'@references #'Schenker N \& Taylor JMG (1996) Partially parametric techniques #'for multiple imputation. \emph{Computational Statistics and Data Analysis}, 22, 425-446. #' #'Little RJA (1988) Missing-data adjustments in large surveys (with discussion). #'\emph{Journal of Business Economics and Statistics}, 6, 287-301. #' #'@export .pmm.match <- function(z, yhat = yhat, y = y, donors = 3, ...) { d <- abs(yhat - z) f <- d > 0 a1 <- ifelse(any(f), min(d[f]), 1) d <- d + runif(length(d), 0, a1/10^10) if (donors == 1) return(y[which.min(d)]) ds <- sort.int(d, partial = donors) m <- sample(y[d <= ds[donors]], 1) return(m) } ### -----------------------------MICE.IMPUTE.PMM2------------------------ A faster version of mice.impute.pmm() mice.impute.pmm2 <- function(y, ry, x, ...) { mess <- "Method 'pmm2' is replaced by method 'pmm'" stop(mess) }
\name{plotmat} \alias{plotmat} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Plot a matrix } \description{ Plot an image of a matrix using ggplot2 } \usage{ plotmat(mat, color = "green", title = NULL, args = NULL) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{mat}{Matrix to plot} \item{color}{Color scheme: "green", "red", or "wes"} \item{title}{optional plot title} \item{args}{optional additional ggplot arguments} } \value{ sends image to active graphics device or outputs a ggplot object } \author{ Evan Poworoznek } \note{ Uses reshape2::melt which may be aliased with reshape::melt } \examples{ mat = diag(1:9 - 5) plotmat(mat) }
/man/plotmat.Rd
no_license
poworoznek/infinitefactor
R
false
false
705
rd
\name{plotmat} \alias{plotmat} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Plot a matrix } \description{ Plot an image of a matrix using ggplot2 } \usage{ plotmat(mat, color = "green", title = NULL, args = NULL) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{mat}{Matrix to plot} \item{color}{Color scheme: "green", "red", or "wes"} \item{title}{optional plot title} \item{args}{optional additional ggplot arguments} } \value{ sends image to active graphics device or outputs a ggplot object } \author{ Evan Poworoznek } \note{ Uses reshape2::melt which may be aliased with reshape::melt } \examples{ mat = diag(1:9 - 5) plotmat(mat) }
#' @title Download and Create a List of Raster Stack Objects From CRU CL2.0 Climatology Variables #' #'@description This function automates downloading and importing CRU CL2.0 #'climatology data into R and creates a list of raster stacks of the data. If #'requested, minimum and maximum temperature may also be automatically #'calculated as described in the data readme.txt file. #' #'Nomenclature and units from readme.txt: #'\describe{ #'\item{pre}{precipitation (millimetres/month)} #' \describe{ #' \item{cv}{cv of precipitation (percent)} #' } #'\item{rd0}{wet-days (no days with >0.1mm rain per month)} #'\item{tmp}{mean temperature (degrees Celsius)} #'\item{dtr}{mean diurnal temperature range (degrees Celsius)} #'\item{reh}{relative humidity (percent)} #'\item{sunp}{sunshine (percent of maximum possible (percent of day length))} #'\item{frs}{ground-frost (number of days with ground-frost per month)} #'\item{wnd}{10 metre windspeed (metres/second)} #'\item{elv}{elevation (automatically converted to metres)} #'} #'For more information see the description of the data provided by CRU, #'\url{https://crudata.uea.ac.uk/cru/data/hrg/tmc/readme.txt} #' #' @details This function generates a data.frame object in R with the following #' possible fields as specified by the user: #' @param pre Logical. Fetch precipitation (millimetres/month) from server and #' return in a raster stack? Defaults to FALSE. #' @param pre_cv Logical. Fetch cv of precipitation (percent) from server and #' return in a raster stack? Defaults to FALSE. #' @param rd0 Logical. Fetch wet-days (number days with >0.1 millimetres rain #' per month) and return in a raster stack? Defaults to FALSE. #' @param dtr Logical. Fetch mean diurnal temperature range (degrees Celsius) #' and return it in a raster stack? Defaults to FALSE. #' @param tmp Logical. Fetch temperature (degrees Celsius) and return it in the #' raster stack? Defaults to FALSE. #' @param tmn Logical. Calculate minimum temperature values (degrees Celsius) #' and return it in a raster stack? Defaults to FALSE. #' @param tmx Logical. Calculate maximum temperature (degrees Celsius) and #' return it in a raster stack? Defaults to FALSE. #' @param reh Logical. Fetch relative humidity and return it in a raster stack? #' Defaults to FALSE. #' @param sunp Logical. Fetch sunshine, percent of maximum possible (percent of #' day length) and return it in raster stack? Defaults to FALSE. #' @param frs Logical. Fetch ground-frost records (number of days with ground- #' frost per month) and return it in raster stack? Defaults to FALSE. #' @param wnd Logical. Fetch 10m wind speed (metres/second) and return it in the #' raster stack? Defaults to FALSE. #' @param elv Logical. Fetch elevation (converted to metres) and return it in a #' raster layer object? Defaults to FALSE. #' #' @examples #' # Download data and create a raster stack of precipitation and temperature #' \dontrun{ #' CRU_pre_tmp <- create_CRU_stack(pre = TRUE, tmp = TRUE) #'} #' @seealso #' \code{\link{create_CRU_df}} #' #' @note #' This package automatically converts elevation values from kilometres to #' metres. #' #' This package crops all spatial outputs to an extent of ymin = -60, ymax = 85, #' xmin = -180, xmax = 180. Note that the original wind data include land area #' for parts of Antarctica, these data are excluded in the raster stacks #' generated by this function. #' #' @export create_CRU_stack <- function(pre = FALSE, pre_cv = FALSE, rd0 = FALSE, tmp = FALSE, dtr = FALSE, reh = FALSE, tmn = FALSE, tmx = FALSE, sunp = FALSE, frs = FALSE, wnd = FALSE, elv = FALSE) { cache_dir <- tempdir() if (!isTRUE(pre) & !isTRUE(pre_cv) & !isTRUE(rd0) & !isTRUE(tmp) & !isTRUE(dtr) & !isTRUE(reh) & !isTRUE(tmn) & !isTRUE(tmx) & !isTRUE(sunp) & !isTRUE(frs) & !isTRUE(wnd) & !isTRUE(elv)) { stop("You must select at least one parameter for download.") } wrld <- raster::raster( nrows = 930, ncols = 2160, ymn = -65, ymx = 90, xmn = -180, xmx = 180 ) wrld[] <- NA month_names <- c("jan", "feb", "mar", "apr", "may", "jun", "jul", "aug", "sep", "oct", "nov", "dec") # Create raster objects using cellFromXY and generate a raster stack # create.stack takes pre, tmp, tmn and tmx and creates a raster # object stack of 12 month data .get_CRU(pre, pre_cv, rd0, tmp, dtr, reh, tmn, tmx, sunp, frs, wnd, elv, cache_dir) files <- list.files(cache_dir, pattern = ".dat.gz$", full.names = TRUE) CRU_stack_list <- plyr::llply(.fun = .create_stack, files, wrld, month_names, pre, pre_cv, .progress = "text") names(CRU_stack_list) <- substr(basename(files), 12, 14) # cacluate tmn ------------------------------------------------------------- if (isTRUE(tmn)) { CRU_stack_list$tmn <- CRU_stack_list$tmp - (0.5 * CRU_stack_list$dtr) } # cacluate tmx ------------------------------------------------------------- if (isTRUE(tmx)) { CRU_stack_list$tmx <- CRU_stack_list$tmp + (0.5 * CRU_stack_list$dtr) } # cleanup if tmn/tmx specified but tmp/dtr not ----------------------------- if (!isTRUE(tmp) | !isTRUE(dtr) & isTRUE(tmx) | isTRUE(tmn)) { CRU_stack_list[which(names(CRU_stack_list) %in% c("tmp", "dtr"))] <- NULL } if (!isTRUE(dtr) & isTRUE(tmx) | isTRUE(tmn)) { CRU_stack_list[which(names(CRU_stack_list) %in% "dtr")] <- NULL } if (!isTRUE(tmp) & isTRUE(tmx) | isTRUE(tmn)) { CRU_stack_list[which(names(CRU_stack_list) %in% "tmp")] <- NULL } return(CRU_stack_list) } #' @noRd .create_stack <- function(files, wrld, month_names, pre, pre_cv) { wvar <- utils::read.table(files, header = FALSE, colClasses = "numeric") cells <- raster::cellFromXY(wrld, wvar[, c(2, 1)]) if (ncol(wvar) == 14) { for (j in 3:14) { wrld[cells] <- wvar[, j] if (j == 3) { y <- wrld } else y <- raster::stack(y, wrld) } names(y) <- month_names } else if (ncol(wvar) == 26) { if (isTRUE(pre) & isTRUE(pre_cv)) { for (k in 3:26) { wrld[cells] <- wvar[, k] if (k == 3) { y <- wrld } else y <- raster::stack(y, wrld) } names(y) <- c(month_names, paste0("pre_cv_", month_names)) } else if (isTRUE(pre)) { for (k in 3:14) { wrld[cells] <- wvar[, k] if (k == 3) { y <- wrld } else y <- raster::stack(y, wrld) } names(y) <- month_names } else if (isTRUE(pre_cv)) { for (k in 15:26) { wrld[cells] <- wvar[, k] if (k == 15) { y <- wrld } else y <- raster::stack(y, wrld) } names(y) <- paste0("pre_cv_", month_names) } } else if (ncol(wvar) == 3) { wrld[cells] <- wvar[, 3] * 1000 y <- wrld names(y) <- "elv" } y <- raster::crop(y, raster::extent(-180, 180, -60, 85)) return(y) }
/R/create_CRU_stack.R
permissive
ldecicco-USGS/getCRUCLdata
R
false
false
7,635
r
#' @title Download and Create a List of Raster Stack Objects From CRU CL2.0 Climatology Variables #' #'@description This function automates downloading and importing CRU CL2.0 #'climatology data into R and creates a list of raster stacks of the data. If #'requested, minimum and maximum temperature may also be automatically #'calculated as described in the data readme.txt file. #' #'Nomenclature and units from readme.txt: #'\describe{ #'\item{pre}{precipitation (millimetres/month)} #' \describe{ #' \item{cv}{cv of precipitation (percent)} #' } #'\item{rd0}{wet-days (no days with >0.1mm rain per month)} #'\item{tmp}{mean temperature (degrees Celsius)} #'\item{dtr}{mean diurnal temperature range (degrees Celsius)} #'\item{reh}{relative humidity (percent)} #'\item{sunp}{sunshine (percent of maximum possible (percent of day length))} #'\item{frs}{ground-frost (number of days with ground-frost per month)} #'\item{wnd}{10 metre windspeed (metres/second)} #'\item{elv}{elevation (automatically converted to metres)} #'} #'For more information see the description of the data provided by CRU, #'\url{https://crudata.uea.ac.uk/cru/data/hrg/tmc/readme.txt} #' #' @details This function generates a data.frame object in R with the following #' possible fields as specified by the user: #' @param pre Logical. Fetch precipitation (millimetres/month) from server and #' return in a raster stack? Defaults to FALSE. #' @param pre_cv Logical. Fetch cv of precipitation (percent) from server and #' return in a raster stack? Defaults to FALSE. #' @param rd0 Logical. Fetch wet-days (number days with >0.1 millimetres rain #' per month) and return in a raster stack? Defaults to FALSE. #' @param dtr Logical. Fetch mean diurnal temperature range (degrees Celsius) #' and return it in a raster stack? Defaults to FALSE. #' @param tmp Logical. Fetch temperature (degrees Celsius) and return it in the #' raster stack? Defaults to FALSE. #' @param tmn Logical. Calculate minimum temperature values (degrees Celsius) #' and return it in a raster stack? Defaults to FALSE. #' @param tmx Logical. Calculate maximum temperature (degrees Celsius) and #' return it in a raster stack? Defaults to FALSE. #' @param reh Logical. Fetch relative humidity and return it in a raster stack? #' Defaults to FALSE. #' @param sunp Logical. Fetch sunshine, percent of maximum possible (percent of #' day length) and return it in raster stack? Defaults to FALSE. #' @param frs Logical. Fetch ground-frost records (number of days with ground- #' frost per month) and return it in raster stack? Defaults to FALSE. #' @param wnd Logical. Fetch 10m wind speed (metres/second) and return it in the #' raster stack? Defaults to FALSE. #' @param elv Logical. Fetch elevation (converted to metres) and return it in a #' raster layer object? Defaults to FALSE. #' #' @examples #' # Download data and create a raster stack of precipitation and temperature #' \dontrun{ #' CRU_pre_tmp <- create_CRU_stack(pre = TRUE, tmp = TRUE) #'} #' @seealso #' \code{\link{create_CRU_df}} #' #' @note #' This package automatically converts elevation values from kilometres to #' metres. #' #' This package crops all spatial outputs to an extent of ymin = -60, ymax = 85, #' xmin = -180, xmax = 180. Note that the original wind data include land area #' for parts of Antarctica, these data are excluded in the raster stacks #' generated by this function. #' #' @export create_CRU_stack <- function(pre = FALSE, pre_cv = FALSE, rd0 = FALSE, tmp = FALSE, dtr = FALSE, reh = FALSE, tmn = FALSE, tmx = FALSE, sunp = FALSE, frs = FALSE, wnd = FALSE, elv = FALSE) { cache_dir <- tempdir() if (!isTRUE(pre) & !isTRUE(pre_cv) & !isTRUE(rd0) & !isTRUE(tmp) & !isTRUE(dtr) & !isTRUE(reh) & !isTRUE(tmn) & !isTRUE(tmx) & !isTRUE(sunp) & !isTRUE(frs) & !isTRUE(wnd) & !isTRUE(elv)) { stop("You must select at least one parameter for download.") } wrld <- raster::raster( nrows = 930, ncols = 2160, ymn = -65, ymx = 90, xmn = -180, xmx = 180 ) wrld[] <- NA month_names <- c("jan", "feb", "mar", "apr", "may", "jun", "jul", "aug", "sep", "oct", "nov", "dec") # Create raster objects using cellFromXY and generate a raster stack # create.stack takes pre, tmp, tmn and tmx and creates a raster # object stack of 12 month data .get_CRU(pre, pre_cv, rd0, tmp, dtr, reh, tmn, tmx, sunp, frs, wnd, elv, cache_dir) files <- list.files(cache_dir, pattern = ".dat.gz$", full.names = TRUE) CRU_stack_list <- plyr::llply(.fun = .create_stack, files, wrld, month_names, pre, pre_cv, .progress = "text") names(CRU_stack_list) <- substr(basename(files), 12, 14) # cacluate tmn ------------------------------------------------------------- if (isTRUE(tmn)) { CRU_stack_list$tmn <- CRU_stack_list$tmp - (0.5 * CRU_stack_list$dtr) } # cacluate tmx ------------------------------------------------------------- if (isTRUE(tmx)) { CRU_stack_list$tmx <- CRU_stack_list$tmp + (0.5 * CRU_stack_list$dtr) } # cleanup if tmn/tmx specified but tmp/dtr not ----------------------------- if (!isTRUE(tmp) | !isTRUE(dtr) & isTRUE(tmx) | isTRUE(tmn)) { CRU_stack_list[which(names(CRU_stack_list) %in% c("tmp", "dtr"))] <- NULL } if (!isTRUE(dtr) & isTRUE(tmx) | isTRUE(tmn)) { CRU_stack_list[which(names(CRU_stack_list) %in% "dtr")] <- NULL } if (!isTRUE(tmp) & isTRUE(tmx) | isTRUE(tmn)) { CRU_stack_list[which(names(CRU_stack_list) %in% "tmp")] <- NULL } return(CRU_stack_list) } #' @noRd .create_stack <- function(files, wrld, month_names, pre, pre_cv) { wvar <- utils::read.table(files, header = FALSE, colClasses = "numeric") cells <- raster::cellFromXY(wrld, wvar[, c(2, 1)]) if (ncol(wvar) == 14) { for (j in 3:14) { wrld[cells] <- wvar[, j] if (j == 3) { y <- wrld } else y <- raster::stack(y, wrld) } names(y) <- month_names } else if (ncol(wvar) == 26) { if (isTRUE(pre) & isTRUE(pre_cv)) { for (k in 3:26) { wrld[cells] <- wvar[, k] if (k == 3) { y <- wrld } else y <- raster::stack(y, wrld) } names(y) <- c(month_names, paste0("pre_cv_", month_names)) } else if (isTRUE(pre)) { for (k in 3:14) { wrld[cells] <- wvar[, k] if (k == 3) { y <- wrld } else y <- raster::stack(y, wrld) } names(y) <- month_names } else if (isTRUE(pre_cv)) { for (k in 15:26) { wrld[cells] <- wvar[, k] if (k == 15) { y <- wrld } else y <- raster::stack(y, wrld) } names(y) <- paste0("pre_cv_", month_names) } } else if (ncol(wvar) == 3) { wrld[cells] <- wvar[, 3] * 1000 y <- wrld names(y) <- "elv" } y <- raster::crop(y, raster::extent(-180, 180, -60, 85)) return(y) }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data_income_pay_village.R \docType{data} \name{income_pay_village} \alias{income_pay_village} \title{Dataset of average income and consumption of village 1985s-2003s in chapter11} \format{a dataframe with 19 rows and 4 variables \describe{ \item{t}{year} \item{income}{average pure income per year} \item{expend}{average expenditure per year} \item{cpi}{consumption price index} }} \usage{ income_pay_village } \description{ A dataset containing t,income,expend and cpi 4 variables of 19 objects } \keyword{datasets}
/man/income_pay_village.Rd
no_license
zhaoxue-xmu/RDA
R
false
true
600
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data_income_pay_village.R \docType{data} \name{income_pay_village} \alias{income_pay_village} \title{Dataset of average income and consumption of village 1985s-2003s in chapter11} \format{a dataframe with 19 rows and 4 variables \describe{ \item{t}{year} \item{income}{average pure income per year} \item{expend}{average expenditure per year} \item{cpi}{consumption price index} }} \usage{ income_pay_village } \description{ A dataset containing t,income,expend and cpi 4 variables of 19 objects } \keyword{datasets}
# Building a Prod-Ready, Robust Shiny Application. # # README: each step of the dev files is optional, and you don't have to # fill every dev scripts before getting started. # 01_start.R should be filled at start. # 02_dev.R should be used to keep track of your development during the project. # 03_deploy.R should be used once you need to deploy your app. # # ######################################## #### CURRENT FILE: ON START SCRIPT ##### ######################################## ## Fill the DESCRIPTION ---- ## Add meta data about your application golem::fill_desc( pkg_name = "energyuse", # The Name of the package containing the App pkg_title = "My Energy Usage", # The Title of the package containing the App pkg_description = "My energy usage dashboard.", # The Description of the package containing the App author_first_name = "Matthew", # Your First Name author_last_name = "Henderson", # Your Last Name author_email = "matthew.james.henderson@gmail.com", # Your Email repo_url = "https://github.com/MHenderson/energyuse" # The URL of the GitHub Repo (optional) ) ## Set {golem} options ---- golem::set_golem_options() ## Create Common Files ---- ## See ?usethis for more information usethis::use_mit_license( copyright_holder = "Matthew Henderson" ) # You can set another license here usethis::use_readme_rmd( open = FALSE ) usethis::use_code_of_conduct() usethis::use_lifecycle_badge( "Experimental" ) usethis::use_news_md( open = FALSE ) ## Use git ---- usethis::use_git() ## Init Testing Infrastructure ---- ## Create a template for tests golem::use_recommended_tests() ## Use Recommended Packages ---- golem::use_recommended_deps() ## Favicon ---- # If you want to change the favicon (default is golem's one) golem::remove_favicon() golem::use_favicon() # path = "path/to/ico". Can be an online file. ## Add helper functions ---- golem::use_utils_ui() golem::use_utils_server() # You're now set! ---- # go to dev/02_dev.R rstudioapi::navigateToFile( "dev/02_dev.R" )
/dev/01_start.R
permissive
MHenderson/energy-use
R
false
false
2,009
r
# Building a Prod-Ready, Robust Shiny Application. # # README: each step of the dev files is optional, and you don't have to # fill every dev scripts before getting started. # 01_start.R should be filled at start. # 02_dev.R should be used to keep track of your development during the project. # 03_deploy.R should be used once you need to deploy your app. # # ######################################## #### CURRENT FILE: ON START SCRIPT ##### ######################################## ## Fill the DESCRIPTION ---- ## Add meta data about your application golem::fill_desc( pkg_name = "energyuse", # The Name of the package containing the App pkg_title = "My Energy Usage", # The Title of the package containing the App pkg_description = "My energy usage dashboard.", # The Description of the package containing the App author_first_name = "Matthew", # Your First Name author_last_name = "Henderson", # Your Last Name author_email = "matthew.james.henderson@gmail.com", # Your Email repo_url = "https://github.com/MHenderson/energyuse" # The URL of the GitHub Repo (optional) ) ## Set {golem} options ---- golem::set_golem_options() ## Create Common Files ---- ## See ?usethis for more information usethis::use_mit_license( copyright_holder = "Matthew Henderson" ) # You can set another license here usethis::use_readme_rmd( open = FALSE ) usethis::use_code_of_conduct() usethis::use_lifecycle_badge( "Experimental" ) usethis::use_news_md( open = FALSE ) ## Use git ---- usethis::use_git() ## Init Testing Infrastructure ---- ## Create a template for tests golem::use_recommended_tests() ## Use Recommended Packages ---- golem::use_recommended_deps() ## Favicon ---- # If you want to change the favicon (default is golem's one) golem::remove_favicon() golem::use_favicon() # path = "path/to/ico". Can be an online file. ## Add helper functions ---- golem::use_utils_ui() golem::use_utils_server() # You're now set! ---- # go to dev/02_dev.R rstudioapi::navigateToFile( "dev/02_dev.R" )
# due to the file size, I will only import the lines that are required for each plotting # open a connection con<-file("household_power_consumption.txt") open(con) # read exact positions from file power_Data<-read.table(con,skip=66637,nrow=2880,sep=";") # close the connection close(con) # proper labelling names colnames(power_Data) <- c("Date","Time","Global_active_power","Global_reactive_power","Voltage", "Global_intensity","Sub_metering_1","Sub_metering_2","Sub_metering_3") # set par() order top left->right -> bottom left->right par(mfrow=c(2,2)) # set suitable margin # par("mar") par(mar=c(4,4,4,2)) # graph 1 (topleft) plot(rownames(power_Data), power_Data$Global_active_power, type="l", xaxt="n", xlab="", ylab="Global Active Power (kilowatts)", cex.lab=0.7, cex.axis=0.7 ) axis(1,at=seq(0,2880,1440),labels=c("Thu","Fri","Sat"),cex.axis=0.7) # graph 2 (topright) plot(rownames(power_Data), power_Data$Voltage, type="l", xaxt="n", xlab="datetime", ylab="Voltage", cex.lab=0.7, cex.axis=0.7 ) axis(1,at=seq(0,2880,1440),labels=c("Thu","Fri","Sat"),cex.axis=0.7) # graph 3 (bottom left) plot(rownames(power_Data), power_Data$Sub_metering_1, type="n", xaxt="n", xlab="", yaxt="n", ylab="Energy sub metering", cex.axis=0.7, cex.lab=0.7) # amend to x & y axis axis(1,at=seq(0,2880,1440),labels=c("Thu","Fri","Sat"),cex.axis=0.7) axis(2,at=seq(0,30,10),labels=c(0,10,20,30),cex.axis=0.7) # line filled for each sub meter with different colours; lines(rownames(power_Data),power_Data$Sub_metering_1) lines(rownames(power_Data),power_Data$Sub_metering_2,col="red") lines(rownames(power_Data),power_Data$Sub_metering_3,col="blue") # attach legend legend("topright",col=c("black","red","blue"), lty=1,bty="n",cex=0.6,colnames(power_Data[7:9])) # graph 4 (bottom right) plot(rownames(power_Data), power_Data$Global_reactive_power, type="l", xaxt="n", xlab="datetime", ylab="Global_reactive_power", cex.lab=0.7, cex.axis=0.7 ) axis(1,at=seq(0,2880,1440),labels=c("Thu","Fri","Sat"),cex.axis=0.7) # output into png dev.copy(png,file="plot4.png",width = 480, height = 480) dev.off
/plot4.R
no_license
helenh2/ExData_Plotting1
R
false
false
2,266
r
# due to the file size, I will only import the lines that are required for each plotting # open a connection con<-file("household_power_consumption.txt") open(con) # read exact positions from file power_Data<-read.table(con,skip=66637,nrow=2880,sep=";") # close the connection close(con) # proper labelling names colnames(power_Data) <- c("Date","Time","Global_active_power","Global_reactive_power","Voltage", "Global_intensity","Sub_metering_1","Sub_metering_2","Sub_metering_3") # set par() order top left->right -> bottom left->right par(mfrow=c(2,2)) # set suitable margin # par("mar") par(mar=c(4,4,4,2)) # graph 1 (topleft) plot(rownames(power_Data), power_Data$Global_active_power, type="l", xaxt="n", xlab="", ylab="Global Active Power (kilowatts)", cex.lab=0.7, cex.axis=0.7 ) axis(1,at=seq(0,2880,1440),labels=c("Thu","Fri","Sat"),cex.axis=0.7) # graph 2 (topright) plot(rownames(power_Data), power_Data$Voltage, type="l", xaxt="n", xlab="datetime", ylab="Voltage", cex.lab=0.7, cex.axis=0.7 ) axis(1,at=seq(0,2880,1440),labels=c("Thu","Fri","Sat"),cex.axis=0.7) # graph 3 (bottom left) plot(rownames(power_Data), power_Data$Sub_metering_1, type="n", xaxt="n", xlab="", yaxt="n", ylab="Energy sub metering", cex.axis=0.7, cex.lab=0.7) # amend to x & y axis axis(1,at=seq(0,2880,1440),labels=c("Thu","Fri","Sat"),cex.axis=0.7) axis(2,at=seq(0,30,10),labels=c(0,10,20,30),cex.axis=0.7) # line filled for each sub meter with different colours; lines(rownames(power_Data),power_Data$Sub_metering_1) lines(rownames(power_Data),power_Data$Sub_metering_2,col="red") lines(rownames(power_Data),power_Data$Sub_metering_3,col="blue") # attach legend legend("topright",col=c("black","red","blue"), lty=1,bty="n",cex=0.6,colnames(power_Data[7:9])) # graph 4 (bottom right) plot(rownames(power_Data), power_Data$Global_reactive_power, type="l", xaxt="n", xlab="datetime", ylab="Global_reactive_power", cex.lab=0.7, cex.axis=0.7 ) axis(1,at=seq(0,2880,1440),labels=c("Thu","Fri","Sat"),cex.axis=0.7) # output into png dev.copy(png,file="plot4.png",width = 480, height = 480) dev.off
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/bikm1_MLBM_Binary_functions.R \name{BinBlocICL_MLBM} \alias{BinBlocICL_MLBM} \title{BinBlocICL_MLBM function for computation of the ICL criterion in the MLBM} \usage{ BinBlocICL_MLBM(a,b,x,y,z1,v1,w1) } \arguments{ \item{a}{an hyperparameter for priors on the mixing proportions. By default, a=4.} \item{b}{an hyperparameter for prior on the Bernoulli parameter. By default, b=1.} \item{x}{binary matrix of observations (1rst matrix).} \item{y}{binary matrix of observations (2nd matrix).} \item{z1}{a numeric vector specifying the class of rows.} \item{v1}{a numeric vector specifying the class of columns (1rst matrix).} \item{w1}{a numeric vector specifying the class of columns (2nd matrix).} } \value{ a value of the ICL criterion. } \description{ Produce a plot object representing the resumed co-clustered data-sets. } \examples{ require(bikm1) set.seed(42) n=200 J=120 K=120 g=2 h=2 l=2 theta=list() theta$pi_g=1/g *matrix(1,g,1) theta$rho_h=1/h *matrix(1,h,1) theta$tau_l=1/l *matrix(1,l,1) theta$alpha_gh=matrix(runif(4),ncol=h) theta$beta_gl=matrix(runif(4),ncol=l) data=BinBlocRnd_MLBM(n,J,K,theta) res=BIKM1_MLBM_Binary(data$x,data$y,2,2,2,4,init_choice='smallVBayes') BinBlocICL_MLBM(a=4,b=1,data$x,data$y, data$xrow,data$xcolx,data$xcoly) }
/man/BinBlocICL_MLBM-proc.Rd
no_license
cran/bikm1
R
false
true
1,341
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/bikm1_MLBM_Binary_functions.R \name{BinBlocICL_MLBM} \alias{BinBlocICL_MLBM} \title{BinBlocICL_MLBM function for computation of the ICL criterion in the MLBM} \usage{ BinBlocICL_MLBM(a,b,x,y,z1,v1,w1) } \arguments{ \item{a}{an hyperparameter for priors on the mixing proportions. By default, a=4.} \item{b}{an hyperparameter for prior on the Bernoulli parameter. By default, b=1.} \item{x}{binary matrix of observations (1rst matrix).} \item{y}{binary matrix of observations (2nd matrix).} \item{z1}{a numeric vector specifying the class of rows.} \item{v1}{a numeric vector specifying the class of columns (1rst matrix).} \item{w1}{a numeric vector specifying the class of columns (2nd matrix).} } \value{ a value of the ICL criterion. } \description{ Produce a plot object representing the resumed co-clustered data-sets. } \examples{ require(bikm1) set.seed(42) n=200 J=120 K=120 g=2 h=2 l=2 theta=list() theta$pi_g=1/g *matrix(1,g,1) theta$rho_h=1/h *matrix(1,h,1) theta$tau_l=1/l *matrix(1,l,1) theta$alpha_gh=matrix(runif(4),ncol=h) theta$beta_gl=matrix(runif(4),ncol=l) data=BinBlocRnd_MLBM(n,J,K,theta) res=BIKM1_MLBM_Binary(data$x,data$y,2,2,2,4,init_choice='smallVBayes') BinBlocICL_MLBM(a=4,b=1,data$x,data$y, data$xrow,data$xcolx,data$xcoly) }
## TODO: load data tree here require(lmerTest) load(system.file("testdata", "tree.RData", package="lmerTest")) modelCarrots.treat <- lme4::lmer(Preference ~ sens2*sens1*Homesize*Age + (1 | product) + (1 + sens1 + sens2 | Consumer), data=carrots, contrasts = list(Homesize = "contr.treatment", Age = "contr.treatment")) modelCarrots.sas <- lme4::lmer(Preference ~ sens2*sens1*Homesize*Age + (1 | product) + (1 + sens1 + sens2 | Consumer), data=carrots, contrasts = list(Homesize = "contr.SAS", Age = "contr.SAS")) ## here an error produces ## NO MORE in lme4 1.1-8 # tools::assertError(stopifnot(all.equal(logLik(modelCarrots.treat), # logLik(modelCarrots.sas)))) # # tools::assertError(stopifnot(all.equal(VarCorr(modelCarrots.treat), # VarCorr(modelCarrots.sas), tol = 1e-5))) modelHam.sas <- lmer(Informed.liking ~ Product*Information*Gender*Age + (1 | Consumer) + (1 | Product:Consumer) + (1 | Information:Consumer), data=ham, contrasts = list(Product = "contr.SAS", Information = "contr.SAS", Gender = "contr.SAS")) modelHam.treat <- lmer(Informed.liking ~ Product*Information*Gender*Age + (1 | Consumer) + (1 | Product:Consumer) + (1 | Information:Consumer), data=ham, contrasts = list(Product = "contr.treatment", Information = "contr.treatment", Gender = "contr.treatment")) stopifnot(all.equal(logLik(modelHam.sas), logLik(modelHam.treat))) stopifnot(all.equal(VarCorr(modelHam.sas), VarCorr(modelHam.treat), tol = 1e-3)) ## check that lsmeans is the same whether the contrasts for the models are differenr lmer4 <- lmer(increase ~ treat + (1|block), data = tree, contrasts = list(treat = "contr.treatment")) lmer5 <- lmer(increase ~ treat+ (1|block), data = tree, contrasts = list(treat = "contr.SAS")) all.equal(lsmeans(lmer4), lsmeans(lmer5), tol = 1e-3)
/lmerTest/tests/testContrasts.R
no_license
ingted/R-Examples
R
false
false
2,788
r
## TODO: load data tree here require(lmerTest) load(system.file("testdata", "tree.RData", package="lmerTest")) modelCarrots.treat <- lme4::lmer(Preference ~ sens2*sens1*Homesize*Age + (1 | product) + (1 + sens1 + sens2 | Consumer), data=carrots, contrasts = list(Homesize = "contr.treatment", Age = "contr.treatment")) modelCarrots.sas <- lme4::lmer(Preference ~ sens2*sens1*Homesize*Age + (1 | product) + (1 + sens1 + sens2 | Consumer), data=carrots, contrasts = list(Homesize = "contr.SAS", Age = "contr.SAS")) ## here an error produces ## NO MORE in lme4 1.1-8 # tools::assertError(stopifnot(all.equal(logLik(modelCarrots.treat), # logLik(modelCarrots.sas)))) # # tools::assertError(stopifnot(all.equal(VarCorr(modelCarrots.treat), # VarCorr(modelCarrots.sas), tol = 1e-5))) modelHam.sas <- lmer(Informed.liking ~ Product*Information*Gender*Age + (1 | Consumer) + (1 | Product:Consumer) + (1 | Information:Consumer), data=ham, contrasts = list(Product = "contr.SAS", Information = "contr.SAS", Gender = "contr.SAS")) modelHam.treat <- lmer(Informed.liking ~ Product*Information*Gender*Age + (1 | Consumer) + (1 | Product:Consumer) + (1 | Information:Consumer), data=ham, contrasts = list(Product = "contr.treatment", Information = "contr.treatment", Gender = "contr.treatment")) stopifnot(all.equal(logLik(modelHam.sas), logLik(modelHam.treat))) stopifnot(all.equal(VarCorr(modelHam.sas), VarCorr(modelHam.treat), tol = 1e-3)) ## check that lsmeans is the same whether the contrasts for the models are differenr lmer4 <- lmer(increase ~ treat + (1|block), data = tree, contrasts = list(treat = "contr.treatment")) lmer5 <- lmer(increase ~ treat+ (1|block), data = tree, contrasts = list(treat = "contr.SAS")) all.equal(lsmeans(lmer4), lsmeans(lmer5), tol = 1e-3)
# ver1.0 ### ----- 제4장 ---- ### # 회장님에게서 받은 데이터를 분석 사장이 정리한 이후 부분부시 시작 menus <- read.csv(file.choose(), stringsAsFactors = FALSE, colClasses = c("factor","Date","numeric")) # /Chapter04/menus.csv 선택 # menus <- read.csv("Chapter04/menus.csv", stringsAsFactors = FALSE, colClasses = c("factor","Date","numeric")) # 세 개의 열이 각각 다른 성질을 가지고 있다. # 품목은 카테고리고 날짜는 날짜형 데이터, 매상은 숫자다. # 데이터를 읽을 때 각각데 데이터 형을 지정하고 있다(읽은 후에 설정할 수도 있다) # 데이터 처리 준비 library(dplyr) # 열 명 확인 menus %>% names # names (menus)와 같음 # 데이터 처음 부분 menus %>% head # head (menus) # Code 04-01 # 시계열 그래프 작성 준비 library(ggplot2) # 삼각김밥의 매상을 추출 onigiri <- menus %>% filter (품목== "삼각김밥") # 시계열 그래프(scale_x_date로 지정) ggplot(onigiri, aes(날짜, 매상)) + geom_line() + scale_x_date() + ggtitle("삼각김밥 매상") # 파이프 처리를 사용하면 # onigiri %>% ggplot(aes(날짜, 매상)) + geom_line() + scale_x_date() + ggtitle("おにぎりの매상") # Code04-02 # 볶음밥 매상 rice <- menus %>% filter (품목 == "볶음밥") #시계열 그래프 rice %>% ggplot( aes(날짜, 매상)) + geom_line() + scale_x_date() + ggtitle("볶음밥 매상") # Code04-03 # 면 종류 추출 noodles <- menus %>% filter (품목 %in% c( "스파게티", "비빔국수", "우동","짬뽕", "라면")) # 시계열 그래프 noodles %>% ggplot(aes(날짜, 매상))+geom_line()+ facet_wrap (~품목) + ggtitle("면종류 매상") # Code-4-04 # 상관 계수 # 데이터 정리를 위한 패키지 도입 install.packages("tidyr") library(tidyr) # 상관 행렬 작성을 위한 데이터 정리 noodles2 <- menus %>% filter (품목 %in% c("삼각김밥", "된장국", "카레", "국밥","스파게티", "비빔국수", "우동", "짬뽕", "라면")) %>% spread (품목, 매상) # 처음 부분 확인 head (noodles2) # 상관 계수 구하기(-1은 날짜 부분을 제외한다) noodles2 [, -1] %>% cor #cor (noodles2 [, -1]) ## 다음은 Windows 이외의 OS에서만 동작한다 # noodles %>% select (-날짜) %>% cor # 분포도 행렬 #첫 번째 열은 날짜이므로 제외하고 있다 noodles2[, -1] %>% pairs #pairs ( noodles2[, -1] ) # Code 04-5 # 우동과 삼각김밥의 분포도 udon <- menus %>% filter (품목 %in% c("삼각김밥", "우동")) %>% spread (품목, 매상) udon %>% ggplot(aes(우동, 삼각김밥)) + geom_point() + ggtitle("우동과 삼각김밥의 분포도") # Code 04-06 # 삼각김밥과 우유의 분포도 milk <- menus %>% filter (품목 %in% c("삼각김밥", "우유")) %>% spread (품목, 매상) milk %>% ggplot(aes(삼각김밥 , 우유)) + geom_point(size = 2, color = "grey50") + geom_smooth(method = "lm", se = FALSE) + ggtitle("삼각김밥과 우유의 분포도") # Code 04-07 # 우유 매상 milk2 <-menus %>% filter (품목 == "우유") # 시계열 그래프 milk2 %>% ggplot(aes(날짜, 매상)) + geom_line() + scale_x_date() + ggtitle("우유 매상") # Code 04-08 상관이 있다? 없다? # 신장과 연수입 데이터 heights <- read.csv(file.choose()) # Chapter04/heights.csv를 선택 # heights <- read.csv("Chapter04/heights.csv") # 상관 관계는 작다 heights %>% cor # cor (heights) ggplot(heights, aes( 키,연수입)) + geom_point() + ggtitle("신장과 연수입의 상관?") heights %>% ggplot(aes( 키,연수입)) + geom_point() + ggtitle("신장과 연수입의 상관") # Code-04-09 # 무상관 데이터 xy <- data.frame (X = -10:10, Y = (-10:10)^2) xy %>% ggplot(aes (x = X, y = Y)) + geom_point(size =2 )+ ggtitle("상관 관계가 있는 것 같지만?") # ggplot(xy, aes (x = X, y = Y)) + geom_point(size =2 )+ ggtitle("상관 관계가 있는 것 같지만?") # # 회귀 분석 # 아이스크림 판매 수 데이터를 읽어 icecream에 저장한다 # Code04-09 icecream <- read.csv(file.choose()) # Chapter04/icecream.csv을 선택한다 # icecream <- read.csv("Chapter04/icecream.csv") icecream %>% head #head (icecream) # 기온과 판매 수의 분포도 작성 icecream %>% ggplot(aes(기온, 판매수)) + geom_point(size = 2) # ggplot(icecream, aes(기온, 판매수)) + geom_point(size = 2) # 상관 계수 구하기 icecream %>% select (판매수, 기온) %>% cor # lm 함수로 기울기와 절편 구하기 lm(판매수 ~ 기온, data = icecream) # 파이프 처리를 사용하면 # icecream %>% lm(판매수 ~ 기온, data = .) # 회귀 직선을 추가한 분포도 작성 ggplot(icecream, aes(기온, 판매수)) + geom_point(size = 2) + geom_smooth(method = "lm", se = FALSE) # 파이프 처리를 사용하면 # icecream %>% ggplot(aes(기온, 판매수)) + geom_point(size = 2) + geom_smooth(method = "lm", se = FALSE) # 귀무 가설 '기울기는 0이다'를 검증한 결과와 결정 계수 summary (lm(판매수 ~ 기온, data = icecream)) # 파이프 처리를 사용하면 # icecream %>% lm(판매수 ~ 기온, data = .) %>% summary #
/StoryTellingA/Chapter04/Chap04.R
no_license
mahesh122000/R
R
false
false
5,195
r
# ver1.0 ### ----- 제4장 ---- ### # 회장님에게서 받은 데이터를 분석 사장이 정리한 이후 부분부시 시작 menus <- read.csv(file.choose(), stringsAsFactors = FALSE, colClasses = c("factor","Date","numeric")) # /Chapter04/menus.csv 선택 # menus <- read.csv("Chapter04/menus.csv", stringsAsFactors = FALSE, colClasses = c("factor","Date","numeric")) # 세 개의 열이 각각 다른 성질을 가지고 있다. # 품목은 카테고리고 날짜는 날짜형 데이터, 매상은 숫자다. # 데이터를 읽을 때 각각데 데이터 형을 지정하고 있다(읽은 후에 설정할 수도 있다) # 데이터 처리 준비 library(dplyr) # 열 명 확인 menus %>% names # names (menus)와 같음 # 데이터 처음 부분 menus %>% head # head (menus) # Code 04-01 # 시계열 그래프 작성 준비 library(ggplot2) # 삼각김밥의 매상을 추출 onigiri <- menus %>% filter (품목== "삼각김밥") # 시계열 그래프(scale_x_date로 지정) ggplot(onigiri, aes(날짜, 매상)) + geom_line() + scale_x_date() + ggtitle("삼각김밥 매상") # 파이프 처리를 사용하면 # onigiri %>% ggplot(aes(날짜, 매상)) + geom_line() + scale_x_date() + ggtitle("おにぎりの매상") # Code04-02 # 볶음밥 매상 rice <- menus %>% filter (품목 == "볶음밥") #시계열 그래프 rice %>% ggplot( aes(날짜, 매상)) + geom_line() + scale_x_date() + ggtitle("볶음밥 매상") # Code04-03 # 면 종류 추출 noodles <- menus %>% filter (품목 %in% c( "스파게티", "비빔국수", "우동","짬뽕", "라면")) # 시계열 그래프 noodles %>% ggplot(aes(날짜, 매상))+geom_line()+ facet_wrap (~품목) + ggtitle("면종류 매상") # Code-4-04 # 상관 계수 # 데이터 정리를 위한 패키지 도입 install.packages("tidyr") library(tidyr) # 상관 행렬 작성을 위한 데이터 정리 noodles2 <- menus %>% filter (품목 %in% c("삼각김밥", "된장국", "카레", "국밥","스파게티", "비빔국수", "우동", "짬뽕", "라면")) %>% spread (품목, 매상) # 처음 부분 확인 head (noodles2) # 상관 계수 구하기(-1은 날짜 부분을 제외한다) noodles2 [, -1] %>% cor #cor (noodles2 [, -1]) ## 다음은 Windows 이외의 OS에서만 동작한다 # noodles %>% select (-날짜) %>% cor # 분포도 행렬 #첫 번째 열은 날짜이므로 제외하고 있다 noodles2[, -1] %>% pairs #pairs ( noodles2[, -1] ) # Code 04-5 # 우동과 삼각김밥의 분포도 udon <- menus %>% filter (품목 %in% c("삼각김밥", "우동")) %>% spread (품목, 매상) udon %>% ggplot(aes(우동, 삼각김밥)) + geom_point() + ggtitle("우동과 삼각김밥의 분포도") # Code 04-06 # 삼각김밥과 우유의 분포도 milk <- menus %>% filter (품목 %in% c("삼각김밥", "우유")) %>% spread (품목, 매상) milk %>% ggplot(aes(삼각김밥 , 우유)) + geom_point(size = 2, color = "grey50") + geom_smooth(method = "lm", se = FALSE) + ggtitle("삼각김밥과 우유의 분포도") # Code 04-07 # 우유 매상 milk2 <-menus %>% filter (품목 == "우유") # 시계열 그래프 milk2 %>% ggplot(aes(날짜, 매상)) + geom_line() + scale_x_date() + ggtitle("우유 매상") # Code 04-08 상관이 있다? 없다? # 신장과 연수입 데이터 heights <- read.csv(file.choose()) # Chapter04/heights.csv를 선택 # heights <- read.csv("Chapter04/heights.csv") # 상관 관계는 작다 heights %>% cor # cor (heights) ggplot(heights, aes( 키,연수입)) + geom_point() + ggtitle("신장과 연수입의 상관?") heights %>% ggplot(aes( 키,연수입)) + geom_point() + ggtitle("신장과 연수입의 상관") # Code-04-09 # 무상관 데이터 xy <- data.frame (X = -10:10, Y = (-10:10)^2) xy %>% ggplot(aes (x = X, y = Y)) + geom_point(size =2 )+ ggtitle("상관 관계가 있는 것 같지만?") # ggplot(xy, aes (x = X, y = Y)) + geom_point(size =2 )+ ggtitle("상관 관계가 있는 것 같지만?") # # 회귀 분석 # 아이스크림 판매 수 데이터를 읽어 icecream에 저장한다 # Code04-09 icecream <- read.csv(file.choose()) # Chapter04/icecream.csv을 선택한다 # icecream <- read.csv("Chapter04/icecream.csv") icecream %>% head #head (icecream) # 기온과 판매 수의 분포도 작성 icecream %>% ggplot(aes(기온, 판매수)) + geom_point(size = 2) # ggplot(icecream, aes(기온, 판매수)) + geom_point(size = 2) # 상관 계수 구하기 icecream %>% select (판매수, 기온) %>% cor # lm 함수로 기울기와 절편 구하기 lm(판매수 ~ 기온, data = icecream) # 파이프 처리를 사용하면 # icecream %>% lm(판매수 ~ 기온, data = .) # 회귀 직선을 추가한 분포도 작성 ggplot(icecream, aes(기온, 판매수)) + geom_point(size = 2) + geom_smooth(method = "lm", se = FALSE) # 파이프 처리를 사용하면 # icecream %>% ggplot(aes(기온, 판매수)) + geom_point(size = 2) + geom_smooth(method = "lm", se = FALSE) # 귀무 가설 '기울기는 0이다'를 검증한 결과와 결정 계수 summary (lm(판매수 ~ 기온, data = icecream)) # 파이프 처리를 사용하면 # icecream %>% lm(판매수 ~ 기온, data = .) %>% summary #
timeFormat <- function(stamp){ paste(unlist(strsplit(stamp, ",")), collapse=" ") } naFormat <- function(v){ ifelse(is.na(v),'NULL',v) } uploadSessionsTable <- function(sessionsTable, con){ for (i in 1:nrow(sessionsTable)){ command <- paste("INSERT INTO main_tables.list_sessions(device_ID, session_start_time, session_end_time, group_ID, pin_code_name, layout_info_json_version, behaviors_json_version, gps_on, compass_on, map_mode_on, physical_contact_threshold) SELECT '",as.character(sessionsTable[i,]$device_ID),"', '",timeFormat(as.character(sessionsTable[i,]$session_start_timeStamp)),"', '",timeFormat(as.character(sessionsTable[i,]$session_end_timeStamp)),"', '", as.character(sessionsTable[i,]$group_ID),"', '", as.character(sessionsTable[i,]$pin_code_name),"', '", as.character(sessionsTable[i,]$layout_info_json_version),"', '", as.character(sessionsTable[i,]$behaviors_json_version),"', '", as.character(sessionsTable[i,]$gps_on),"', '", as.character(sessionsTable[i,]$compass_on),"', '", as.character(sessionsTable[i,]$map_mode_on),"', '", as.character(sessionsTable[i,]$physical_contact_threshold),"' WHERE NOT EXISTS (SELECT 1 from main_tables.list_sessions WHERE device_ID='",as.character(sessionsTable[i,]$device_ID),"' AND session_start_time='",timeFormat(as.character(sessionsTable[i,]$session_start_timeStamp)),"');", sep="") command <- gsub("''", "NULL", command) #command <- gsub("NA", "NULL", command) command <- gsub("'NULL'", "NULL", command) command <- gsub("'NA'", "NULL", command) dbGetQuery(con, command) } } uploadFocalsTable <- function(focalsTable, con){ for (i in 1:nrow(focalsTable)){ command <- paste("INSERT INTO main_tables.list_focals(device_ID, session_start_time, focal_start_time, focal_end_time, set_duration, set_scan_interval, focal_individual_ID) SELECT '",as.character(focalsTable[i,]$device_ID),"', '",timeFormat(as.character(focalsTable[i,]$session_start_timeStamp)),"', '",timeFormat(as.character(focalsTable[i,]$focal_start_timeStamp)),"', '", as.character(focalsTable[i,]$focal_end_timeStamp),"', '", as.character(focalsTable[i,]$focal_set_duration),"', '", as.character(focalsTable[i,]$focal_set_scan_interval),"', '", as.character(focalsTable[i,]$focal_individual_ID),"' WHERE NOT EXISTS (SELECT 1 from main_tables.list_focals WHERE device_ID='",as.character(focalsTable[i,]$device_ID),"' AND focal_start_time='",timeFormat(as.character(focalsTable[i,]$focal_start_timeStamp)),"');", sep="") command <- gsub("''", "NULL", command) #command <- gsub("NA", "NULL", command) command <- gsub("'NULL'", "NULL", command) command <- gsub("'NA'", "NULL", command) dbGetQuery(con, command) } } uploadBehaviorsTable <- function(behaviorsTable, con){ temp1 <- names(behaviorsTable)[-c(1:6, (ncol(behaviorsTable)-4):ncol(behaviorsTable))] temp2 <- fixHeader(temp1) if(length(which(nchar(temp1)==regexpr("[*]", temp1)))>0){ tableHeaders <- temp2[-which(nchar(temp1)==regexpr("[*]", temp1))] } else { tableHeaders <- temp2 } for (i in 1:nrow(behaviorsTable)){ temp3 <- unlist(as.list(behaviorsTable[i,7:(ncol(behaviorsTable)-5)]))[match(tableHeaders, temp2)] command <- paste("INSERT INTO main_tables.list_behaviors(device_ID, focal_start_time, behavior_time, actor, subject, ", paste(tableHeaders, collapse=", "), ", comment, latitude, longitude, gps_horizontal_precision, altitude) SELECT '",as.character(behaviorsTable[i,]$device_ID),"', '",timeFormat(as.character(behaviorsTable[i,]$focal_start_timeStamp)),"', '", timeFormat(as.character(behaviorsTable[i,]$behavior_timeStamp)),"', '", as.character(behaviorsTable[i,]$actor),"', '", as.character(behaviorsTable[i,]$subject),"', ", paste0("'",paste(as.character(temp3), collapse="', '"),"'"),", '", as.character(behaviorsTable[i,]$comment),"', ", naFormat(as.character(behaviorsTable[i,]$latitude)),", ", naFormat(as.character(behaviorsTable[i,]$longitude)),", ", naFormat(as.character(behaviorsTable[i,]$gps_horizontal_precision)),", ", naFormat(as.character(behaviorsTable[i,]$altitude))," WHERE NOT EXISTS (SELECT 1 from main_tables.list_behaviors WHERE device_ID='",as.character(behaviorsTable[i,]$device_ID),"' AND behavior_time='",timeFormat(as.character(behaviorsTable[i,]$behavior_timeStamp)),"' AND actor='",as.character(behaviorsTable[i,]$actor),"' AND subject='",as.character(behaviorsTable[i,]$subject),"');", sep="") command <- gsub("''", "NULL", command) #command <- gsub("NA", "NULL", command) command <- gsub("'NULL'", "NULL", command) command <- gsub("'NA'", "NULL", command) dbGetQuery(con, command) } } uploadScansTable <- function(scansTable, con){ tableHeaders <- fixHeader(names(scansTable)) for (i in 1:nrow(scansTable)){ command <- paste("INSERT INTO main_tables.list_scans(device_ID, focal_start_time, scan_time, latitude, longitude, gps_horizontal_precision, altitude, compass_bearing) SELECT '",as.character(scansTable[i,]$device_ID),"', '",timeFormat(as.character(scansTable[i,]$focal_start_timeStamp)),"', '", timeFormat(as.character(scansTable[i,]$scan_timeStamp)),"', ", naFormat(as.character(scansTable[i,]$latitude)),", ", naFormat(as.character(scansTable[i,]$longitude)),", ", naFormat(as.character(scansTable[i,]$gps_horizontal_precision)),", ", naFormat(as.character(scansTable[i,]$altitude)),", ", naFormat(as.character(scansTable[i,]$compass_bearing))," WHERE NOT EXISTS (SELECT 1 from main_tables.list_scans WHERE device_ID='",as.character(scansTable[i,]$device_ID),"' AND scan_time ='",timeFormat(as.character(scansTable[i,]$scan_timeStamp)),"');", sep="") command <- gsub("''", "NULL", command) #command <- gsub("NA", "NULL", command) command <- gsub("'NULL'", "NULL", command) command <- gsub("'NA'", "NULL", command) dbGetQuery(con, command) } } uploadScanData <- function(scansTable, con){ temp1 <- names(scansTable)[-c(1:5, (ncol(scansTable)-7):ncol(scansTable))] temp2 <- fixHeader(temp1) if(length(which(nchar(temp1)==regexpr("[*]", temp1)))>0){ tableHeaders <- temp2[-which(nchar(temp1)==regexpr("[*]", temp1))] } else { tableHeaders <- temp2 } for (i in 1:nrow(scansTable)){ temp3 <- unlist(as.list(scansTable[i,6:(ncol(scansTable)-8)]))[match(tableHeaders, temp2)] command <- paste("INSERT INTO main_tables.scan_data(device_ID, scan_time, scanned_individual_ID, ", paste(tableHeaders, collapse=", "), ", x_position, y_position) SELECT '",as.character(scansTable[i,]$device_ID),"', '", timeFormat(as.character(scansTable[i,]$scan_timeStamp)),"', '", as.character(scansTable[i,]$scanned_individual_ID),"', ", paste0("'",paste(as.character(temp3), collapse="', '"),"'"),", ", as.character(scansTable[i,]$x_position),", ", as.character(scansTable[i,]$y_position)," WHERE NOT EXISTS (SELECT 1 from main_tables.scan_data WHERE device_ID='",as.character(scansTable[i,]$device_ID),"' AND scan_time='",timeFormat(as.character(scansTable[i,]$scan_timeStamp)),"' AND scanned_individual_ID ='",as.character(scansTable[i,]$scanned_individual_ID),"');", sep="") command <- gsub("''", "NULL", command) #command <- gsub("NA", "NULL", command) command <- gsub("'NULL'", "NULL", command) command <- gsub("'NA'", "NULL", command) dbGetQuery(con, command) } } uploadScanVariables <- function(scanVarsTable, con){ temp1 <- names(scanVarsTable)[-(1:4)] temp2 <- fixHeader(temp1) if(length(which(nchar(temp1)==regexpr("[*]", temp1)))>0){ tableHeaders <- temp2[-which(nchar(temp1)==regexpr("[*]", temp1))] } else { tableHeaders <- temp2 } for (i in 1:nrow(scanVarsTable)){ temp3 <- unlist(as.list(scanVarsTable[i,5:ncol(scanVarsTable)]))[match(tableHeaders, temp2)] command <- paste("INSERT INTO main_tables.scan_variables(device_ID, scan_time, ", paste(tableHeaders, collapse=", "), ") SELECT '",as.character(scanVarsTable[i,]$device_ID),"', '", timeFormat(as.character(scanVarsTable[i,]$scan_timeStamp)),"', ", paste0("'",paste(as.character(temp3), collapse="', '"),"'")," WHERE NOT EXISTS (SELECT 1 from main_tables.scan_variables WHERE device_ID='",as.character(scanVarsTable[i,]$device_ID),"' AND scan_time='",timeFormat(as.character(scanVarsTable[i,]$scan_timeStamp)),"' AND scanVars ='", as.character(scanVarsTable[i,5]),"');", sep="") command <- gsub("''", "NULL", command) #command <- gsub("NA", "NULL", command) command <- gsub("'NULL'", "NULL", command) command <- gsub("'NA'", "NULL", command) dbGetQuery(con, command) } } uploadContinuousVariables <- function(continuousVarsTable, con){ temp1 <- names(continuousVarsTable)[-(1:3)] temp2 <- fixHeader(temp1) if(length(which(nchar(temp1)==regexpr("[*]", temp1)))>0){ tableHeaders <- temp2[-which(nchar(temp1)==regexpr("[*]", temp1))] } else { tableHeaders <- temp2 } for (i in 1:nrow(continuousVarsTable)){ temp3 <- unlist(as.list(continuousVarsTable[i,4:ncol(continuousVarsTable)]))[match(tableHeaders, temp2)] command <- paste("INSERT INTO main_tables.continuous_focal_variables(device_ID, focal_start_time, ", paste(tableHeaders, collapse=", "), ") SELECT '",as.character(continuousVarsTable[i,]$device_ID),"', '", timeFormat(as.character(continuousVarsTable[i,]$focal_start_timeStamp)),"', ", paste0("'",paste(as.character(temp3), collapse="', '"),"'")," WHERE NOT EXISTS (SELECT 1 from main_tables.continuous_focal_variables WHERE device_ID='",as.character(continuousVarsTable[i,]$device_ID),"' AND focal_start_time ='",timeFormat(as.character(continuousVarsTable[i,]$focal_start_timeStamp)),"' AND continuousVars ='", as.character(continuousVarsTable[i,4]),"');", sep="") command <- gsub("''", "NULL", command) #command <- gsub("NA", "NULL", command) command <- gsub("'NULL'", "NULL", command) command <- gsub("'NA'", "NULL", command) dbGetQuery(con, command) } } uploadFocalVariables <- function(focalVarsTable, con){ temp1 <- names(focalVarsTable)[-(1:3)] temp2 <- fixHeader(temp1) if(length(which(nchar(temp1)==regexpr("[*]", temp1)))>0){ tableHeaders <- temp2[-which(nchar(temp1)==regexpr("[*]", temp1))] } else { tableHeaders <- temp2 } for (i in 1:nrow(focalVarsTable)){ temp3 <- unlist(as.list(focalVarsTable[i,4:ncol(focalVarsTable)]))[match(tableHeaders, temp2)] command <- paste("INSERT INTO main_tables.focal_variables(device_ID, focal_start_time, ", paste(tableHeaders, collapse=", "), ") SELECT '",as.character(focalVarsTable[i,]$device_ID),"', '", timeFormat(as.character(focalVarsTable[i,]$focal_start_timeStamp)),"', ", paste0("'",paste(as.character(temp3), collapse="', '"),"'")," WHERE NOT EXISTS (SELECT 1 from main_tables.focal_variables WHERE device_ID='",as.character(focalVarsTable[i,]$device_ID),"' AND focal_start_time ='",timeFormat(as.character(focalVarsTable[i,]$focal_start_timeStamp)),"' AND focalVars ='", as.character(focalVarsTable[i,4]),"');", sep="") command <- gsub("''", "NULL", command) #command <- gsub("NA", "NULL", command) command <- gsub("'NULL'", "NULL", command) command <- gsub("'NA'", "NULL", command) dbGetQuery(con, command) } } uploadSessionVariables <- function(sessionVarsTable, con){ temp1 <- names(sessionVarsTable)[-(1:2)] temp2 <- fixHeader(temp1) if(length(which(nchar(temp1)==regexpr("[*]", temp1)))>0){ tableHeaders <- temp2[-which(nchar(temp1)==regexpr("[*]", temp1))] } else { tableHeaders <- temp2 } for (i in 1:nrow(sessionVarsTable)){ temp3 <- unlist(as.list(sessionVarsTable[i,3:ncol(sessionVarsTable)]))[match(tableHeaders, temp2)] command <- paste("INSERT INTO main_tables.session_variables(device_ID, session_start_time, ", paste(tableHeaders, collapse=", "), ") SELECT '",as.character(sessionVarsTable[i,]$device_ID),"', '", timeFormat(as.character(sessionVarsTable[i,]$session_start_timeStamp)),"', ", paste0("'",paste(as.character(temp3), collapse="', '"),"'")," WHERE NOT EXISTS (SELECT 1 from main_tables.session_variables WHERE device_ID='",as.character(sessionVarsTable[i,]$device_ID),"' AND session_start_time ='",timeFormat(as.character(sessionVarsTable[i,]$session_start_timeStamp)),"' AND dayVars ='", as.character(sessionVarsTable[i,3]),"');", sep="") command <- gsub("''", "NULL", command) #command <- gsub("NA", "NULL", command) command <- gsub("'NULL'", "NULL", command) command <- gsub("'NA'", "NULL", command) dbGetQuery(con, command) } } uploadBackgroundTapsTable <- function(backgroundTapsTable, con){ tableHeaders <- fixHeader(names(backgroundTapsTable)) for (i in 1:nrow(backgroundTapsTable)){ command <- paste("INSERT INTO main_tables.list_background_taps(device_ID, focal_start_time, tap_time, description, latitude, longitude, gps_horizontal_precision, altitude) SELECT '",as.character(backgroundTapsTable[i,]$device_ID),"', '", timeFormat(as.character(backgroundTapsTable[i,]$focal_start_timeStamp)),"', '", timeFormat(as.character(backgroundTapsTable[i,]$backgroundTap_timeStamp)),"', '", as.character(backgroundTapsTable[i,]$description),"', ", naFormat(as.character(backgroundTapsTable[i,]$latitude)),", ", naFormat(as.character(backgroundTapsTable[i,]$longitude)),", ", naFormat(as.character(backgroundTapsTable[i,]$gps_horizontal_precision)),", ", naFormat(as.character(backgroundTapsTable[i,]$altitude))," WHERE NOT EXISTS (SELECT 1 from main_tables.list_background_taps WHERE device_ID='",as.character(backgroundTapsTable[i,]$device_ID),"' AND focal_start_time ='",timeFormat(as.character(backgroundTapsTable[i,]$focal_start_timeStamp)),"' AND tap_time ='",timeFormat(as.character(backgroundTapsTable[i,]$backgroundTap_timeStamp)),"');", sep="") command <- gsub("''", "NULL", command) #command <- gsub("NA", "NULL", command) command <- gsub("'NULL'", "NULL", command) command <- gsub("'NA'", "NULL", command) dbGetQuery(con, command) } } uploadCommentTable <- function(commentsTable, con){ tableHeaders <- fixHeader(names(commentsTable)) for (i in 1:nrow(commentsTable)){ temp <- gsub("'", "`", as.character(commentsTable[i,]$comment_text)) command <- paste("INSERT INTO main_tables.list_comments(device_ID, focal_start_time, comment_time, comment) SELECT '",as.character(commentsTable[i,]$device_ID),"', '", timeFormat(as.character(commentsTable[i,]$focal_start_timeStamp)),"', '", timeFormat(as.character(commentsTable[i,]$comment_timeStamp)),"', '", temp,"' WHERE NOT EXISTS (SELECT 1 from main_tables.list_comments WHERE device_ID='",as.character(commentsTable[i,]$device_ID),"' AND focal_start_time ='",timeFormat(as.character(commentsTable[i,]$focal_start_timeStamp)),"' AND comment_time ='",timeFormat(as.character(commentsTable[i,]$comment_timeStamp)),"');", sep="") command <- gsub("''", "NULL", command) #command <- gsub("NA", "NULL", command) command <- gsub("'NULL'", "NULL", command) command <- gsub("'NA'", "NULL", command) dbGetQuery(con, command) } } uploadScansIntermediateTables <- function(scansTable, con){ temp1 <- names(scansTable)[-c(1:5, (ncol(scansTable)-7):ncol(scansTable))] temp2 <- fixHeader(temp1) if(length(which(nchar(temp1)==regexpr("[*]", temp1)))>0){ tableHeaders <- temp2[which(nchar(temp1)==regexpr("[*]", temp1))] for (i in 1:nrow(scansTable)){ temp3 <- unlist(as.list(scansTable[i,6:(ncol(scansTable)-8)]))[match(tableHeaders, temp2)] for(j in 1:length(tableHeaders)){ vecValues <- unlist(strsplit(as.character(temp3[j]), ";")) if(length(na.omit(vecValues))>0){ for(k in 1:length(vecValues)){ command <- paste0("INSERT INTO accessory_tables.scan_data_", tableHeaders[j],"(device_ID, scan_time, scanned_individual_ID, ", tableHeaders[j],") SELECT '",as.character(scansTable[i,]$device_ID),"', '", timeFormat(as.character(scansTable[i,]$scan_timeStamp)),"', '", as.character(scansTable[i,]$scanned_individual_ID),"', '", vecValues[k],"' WHERE NOT EXISTS (SELECT 1 from accessory_tables.scan_data_",tableHeaders[j]," WHERE device_ID='",as.character(scansTable[i,]$device_ID),"' AND scan_time='",timeFormat(as.character(scansTable[i,]$scan_timeStamp)),"' AND scanned_individual_ID ='",as.character(scansTable[i,]$scanned_individual_ID),"' AND ",tableHeaders[j]," ='",vecValues[k],"');") command <- gsub("''", "NULL", command) #command <- gsub("NA", "NULL", command) command <- gsub("'NULL'", "NULL", command) command <- gsub("'NA'", "NULL", command) dbGetQuery(con, command) } } } } } } uploadBehaviorsIntermediateTables <- function(behaviorsTable, con){ temp1 <- names(behaviorsTable)[-c(1:6, (ncol(behaviorsTable)-4):ncol(behaviorsTable))] temp2 <- fixHeader(temp1) if(length(which(nchar(temp1)==regexpr("[*]", temp1)))>0){ tableHeaders <- temp2[which(nchar(temp1)==regexpr("[*]", temp1))] for (i in 1:nrow(behaviorsTable)){ temp3 <- unlist(as.list(behaviorsTable[i,7:(ncol(behaviorsTable)-5)]))[match(tableHeaders, temp2)] for(j in 1:length(tableHeaders)){ vecValues <- unlist(strsplit(as.character(temp3[j]), ";")) if(length(na.omit(vecValues))>0){ for(k in 1:length(vecValues)){ command <- paste0("INSERT INTO accessory_tables.list_behaviors_", tableHeaders[j],"(device_ID, behavior_time, actor, subject, ", tableHeaders[j],") SELECT '",as.character(behaviorsTable[i,]$device_ID),"', '", timeFormat(as.character(behaviorsTable[i,]$behavior_timeStamp)),"', '", as.character(behaviorsTable[i,]$actor),"', '", as.character(behaviorsTable[i,]$subject),"', '", vecValues[k],"' WHERE NOT EXISTS (SELECT 1 from accessory_tables.list_behaviors_",tableHeaders[j]," WHERE device_ID='",as.character(behaviorsTable[i,]$device_ID),"' AND behavior_time='",timeFormat(as.character(behaviorsTable[i,]$behavior_timeStamp)),"' AND actor='",as.character(behaviorsTable[i,]$actor),"' AND subject='",as.character(behaviorsTable[i,]$subject),"' AND ",tableHeaders[j]," ='",vecValues[k],"');") command <- gsub("''", "NULL", command) #command <- gsub("NA", "NULL", command) command <- gsub("'NULL'", "NULL", command) command <- gsub("'NA'", "NULL", command) dbGetQuery(con, command) } } } } } } uploadscanVarsIntermediateTables <- function(scanVarsTable, con){ temp1 <- names(scanVarsTable)[-(1:4)] temp2 <- fixHeader(temp1) if(length(which(nchar(temp1)==regexpr("[*]", temp1)))>0){ tableHeaders <- temp2[which(nchar(temp1)==regexpr("[*]", temp1))] for (i in 1:nrow(scanVarsTable)){ temp3 <- unlist(as.list(scanVarsTable[i,5:ncol(scanVarsTable)]))[match(tableHeaders, temp2)] for(j in 1:length(tableHeaders)){ vecValues <- unlist(strsplit(as.character(temp3[j]), ";")) if(length(na.omit(vecValues))>0){ for(k in 1:length(vecValues)){ command <- paste0("INSERT INTO accessory_tables.scan_variables_", tableHeaders[j],"(device_ID, scan_time, scanVars, ", tableHeaders[j],") SELECT '",as.character(scanVarsTable[i,]$device_ID),"', '", timeFormat(as.character(scanVarsTable[i,]$scan_timeStamp)),"', '",as.character(scanVarsTable[i,]$scanVars),"', '", vecValues[k],"' WHERE NOT EXISTS (SELECT 1 from accessory_tables.scan_variables_",tableHeaders[j]," WHERE device_ID='",as.character(scanVarsTable[i,]$device_ID),"' AND scan_time='",timeFormat(as.character(scanVarsTable[i,]$scan_timeStamp)),"' AND scanVars='",as.character(scanVarsTable[i,]$scanVars),"' AND ",tableHeaders[j]," ='",vecValues[k],"');") command <- gsub("''", "NULL", command) #command <- gsub("NA", "NULL", command) command <- gsub("'NULL'", "NULL", command) command <- gsub("'NA'", "NULL", command) dbGetQuery(con, command) } } } } } } uploadfocalVarsIntermediateTables <- function(focalVarsTable, con){ temp1 <- names(focalVarsTable)[-(1:3)] temp2 <- fixHeader(temp1) if(length(which(nchar(temp1)==regexpr("[*]", temp1)))>0){ tableHeaders <- temp2[which(nchar(temp1)==regexpr("[*]", temp1))] for (i in 1:nrow(focalVarsTable)){ temp3 <- unlist(as.list(focalVarsTable[i,4:ncol(focalVarsTable)]))[match(tableHeaders, temp2)] for(j in 1:length(tableHeaders)){ vecValues <- unlist(strsplit(as.character(temp3[j]), ";")) if(length(na.omit(vecValues))>0){ for(k in 1:length(vecValues)){ command <- paste0("INSERT INTO accessory_tables.focal_variables_", tableHeaders[j],"(device_ID, focal_start_time, focalVars, ", tableHeaders[j],") SELECT '", as.character(focalVarsTable[i,]$device_ID),"', '", timeFormat(as.character(focalVarsTable[i,]$focal_start_timeStamp)),"', '", as.character(focalVarsTable[i,]$focalVars),"', '", vecValues[k],"' WHERE NOT EXISTS (SELECT 1 from accessory_tables.focal_variables_",tableHeaders[j]," WHERE device_ID='",as.character(focalVarsTable[i,]$device_ID),"' AND focal_start_time='",timeFormat(as.character(focalVarsTable[i,]$focal_start_timeStamp)),"' AND focalVars='",as.character(focalVarsTable[i,]$focalVars),"' AND ",tableHeaders[j]," ='",vecValues[k],"');") command <- gsub("''", "NULL", command) #command <- gsub("NA", "NULL", command) command <- gsub("'NULL'", "NULL", command) command <- gsub("'NA'", "NULL", command) dbGetQuery(con, command) } } } } } } uploadContinuousVarsIntermediateTables <- function(continuousVarsTable, con){ temp1 <- names(continuousVarsTable)[-(1:3)] temp2 <- fixHeader(temp1) if(length(which(nchar(temp1)==regexpr("[*]", temp1)))>0){ tableHeaders <- temp2[which(nchar(temp1)==regexpr("[*]", temp1))] for (i in 1:nrow(continuousVarsTable)){ temp3 <- unlist(as.list(continuousVarsTable[i,4:ncol(continuousVarsTable)]))[match(tableHeaders, temp2)] for(j in 1:length(tableHeaders)){ vecValues <- unlist(strsplit(as.character(temp3[j]), ";")) if(length(na.omit(vecValues))>0){ for(k in 1:length(vecValues)){ command <- paste0("INSERT INTO accessory_tables.continuous_focal_variables_", tableHeaders[j],"(device_ID, focal_start_time, continuousVars, ", tableHeaders[j],") SELECT '", as.character(continuousVarsTable[i,]$device_ID),"', '", timeFormat(as.character(continuousVarsTable[i,]$focal_start_timeStamp)),"', '", as.character(continuousVarsTable[i,]$continuousVars),"', '", vecValues[k],"' WHERE NOT EXISTS (SELECT 1 from accessory_tables.continuous_focal_variables_",tableHeaders[j]," WHERE device_ID='",as.character(continuousVarsTable[i,]$device_ID),"' AND focal_start_time='",timeFormat(as.character(continuousVarsTable[i,]$focal_start_timeStamp)),"' AND continuousVars ='",as.character(continuousVarsTable[i,]$continuousVars),"' AND ",tableHeaders[j]," ='",vecValues[k],"');") command <- gsub("''", "NULL", command) #command <- gsub("NA", "NULL", command) command <- gsub("'NULL'", "NULL", command) command <- gsub("'NA'", "NULL", command) dbGetQuery(con, command) } } } } } } uploadSessionVarsIntermediateTables <- function(sessionVarsTable, con){ temp1 <- names(sessionVarsTable)[-(1:2)] temp2 <- fixHeader(temp1) if(length(which(nchar(temp1)==regexpr("[*]", temp1)))>0){ tableHeaders <- temp2[which(nchar(temp1)==regexpr("[*]", temp1))] for (i in 1:nrow(sessionVarsTable)){ temp3 <- unlist(as.list(sessionVarsTable[i,3:ncol(sessionVarsTable)]))[match(tableHeaders, temp2)] for(j in 1:length(tableHeaders)){ vecValues <- unlist(strsplit(as.character(temp3[j]), ";")) if(length(na.omit(vecValues))>0){ for(k in 1:length(vecValues)){ command <- paste0("INSERT INTO accessory_tables.session_variables_", tableHeaders[j],"(device_ID, session_start_time, dayVars, ", tableHeaders[j],") SELECT '",as.character(sessionVarsTable[i,]$device_ID),"', '", timeFormat(as.character(sessionVarsTable[i,]$session_start_timeStamp)),"', '", as.character(sessionVarsTable[i,]$dayVars),"', '", vecValues[k],"' WHERE NOT EXISTS (SELECT 1 from accessory_tables.session_variables_",tableHeaders[j]," WHERE device_ID='",as.character(sessionVarsTable[i,]$device_ID),"' AND session_start_time='",timeFormat(as.character(sessionVarsTable[i,]$session_start_timeStamp)),"' AND dayVars ='",as.character(sessionVarsTable[i,]$dayVars),"' AND ",tableHeaders[j]," ='",vecValues[k],"');") command <- gsub("''", "NULL", command) #command <- gsub("NA", "NULL", command) command <- gsub("'NULL'", "NULL", command) command <- gsub("'NA'", "NULL", command) dbGetQuery(con, command) } } } } } }
/postgres_upload.R
no_license
FosseyFund/AOToolBox
R
false
false
24,470
r
timeFormat <- function(stamp){ paste(unlist(strsplit(stamp, ",")), collapse=" ") } naFormat <- function(v){ ifelse(is.na(v),'NULL',v) } uploadSessionsTable <- function(sessionsTable, con){ for (i in 1:nrow(sessionsTable)){ command <- paste("INSERT INTO main_tables.list_sessions(device_ID, session_start_time, session_end_time, group_ID, pin_code_name, layout_info_json_version, behaviors_json_version, gps_on, compass_on, map_mode_on, physical_contact_threshold) SELECT '",as.character(sessionsTable[i,]$device_ID),"', '",timeFormat(as.character(sessionsTable[i,]$session_start_timeStamp)),"', '",timeFormat(as.character(sessionsTable[i,]$session_end_timeStamp)),"', '", as.character(sessionsTable[i,]$group_ID),"', '", as.character(sessionsTable[i,]$pin_code_name),"', '", as.character(sessionsTable[i,]$layout_info_json_version),"', '", as.character(sessionsTable[i,]$behaviors_json_version),"', '", as.character(sessionsTable[i,]$gps_on),"', '", as.character(sessionsTable[i,]$compass_on),"', '", as.character(sessionsTable[i,]$map_mode_on),"', '", as.character(sessionsTable[i,]$physical_contact_threshold),"' WHERE NOT EXISTS (SELECT 1 from main_tables.list_sessions WHERE device_ID='",as.character(sessionsTable[i,]$device_ID),"' AND session_start_time='",timeFormat(as.character(sessionsTable[i,]$session_start_timeStamp)),"');", sep="") command <- gsub("''", "NULL", command) #command <- gsub("NA", "NULL", command) command <- gsub("'NULL'", "NULL", command) command <- gsub("'NA'", "NULL", command) dbGetQuery(con, command) } } uploadFocalsTable <- function(focalsTable, con){ for (i in 1:nrow(focalsTable)){ command <- paste("INSERT INTO main_tables.list_focals(device_ID, session_start_time, focal_start_time, focal_end_time, set_duration, set_scan_interval, focal_individual_ID) SELECT '",as.character(focalsTable[i,]$device_ID),"', '",timeFormat(as.character(focalsTable[i,]$session_start_timeStamp)),"', '",timeFormat(as.character(focalsTable[i,]$focal_start_timeStamp)),"', '", as.character(focalsTable[i,]$focal_end_timeStamp),"', '", as.character(focalsTable[i,]$focal_set_duration),"', '", as.character(focalsTable[i,]$focal_set_scan_interval),"', '", as.character(focalsTable[i,]$focal_individual_ID),"' WHERE NOT EXISTS (SELECT 1 from main_tables.list_focals WHERE device_ID='",as.character(focalsTable[i,]$device_ID),"' AND focal_start_time='",timeFormat(as.character(focalsTable[i,]$focal_start_timeStamp)),"');", sep="") command <- gsub("''", "NULL", command) #command <- gsub("NA", "NULL", command) command <- gsub("'NULL'", "NULL", command) command <- gsub("'NA'", "NULL", command) dbGetQuery(con, command) } } uploadBehaviorsTable <- function(behaviorsTable, con){ temp1 <- names(behaviorsTable)[-c(1:6, (ncol(behaviorsTable)-4):ncol(behaviorsTable))] temp2 <- fixHeader(temp1) if(length(which(nchar(temp1)==regexpr("[*]", temp1)))>0){ tableHeaders <- temp2[-which(nchar(temp1)==regexpr("[*]", temp1))] } else { tableHeaders <- temp2 } for (i in 1:nrow(behaviorsTable)){ temp3 <- unlist(as.list(behaviorsTable[i,7:(ncol(behaviorsTable)-5)]))[match(tableHeaders, temp2)] command <- paste("INSERT INTO main_tables.list_behaviors(device_ID, focal_start_time, behavior_time, actor, subject, ", paste(tableHeaders, collapse=", "), ", comment, latitude, longitude, gps_horizontal_precision, altitude) SELECT '",as.character(behaviorsTable[i,]$device_ID),"', '",timeFormat(as.character(behaviorsTable[i,]$focal_start_timeStamp)),"', '", timeFormat(as.character(behaviorsTable[i,]$behavior_timeStamp)),"', '", as.character(behaviorsTable[i,]$actor),"', '", as.character(behaviorsTable[i,]$subject),"', ", paste0("'",paste(as.character(temp3), collapse="', '"),"'"),", '", as.character(behaviorsTable[i,]$comment),"', ", naFormat(as.character(behaviorsTable[i,]$latitude)),", ", naFormat(as.character(behaviorsTable[i,]$longitude)),", ", naFormat(as.character(behaviorsTable[i,]$gps_horizontal_precision)),", ", naFormat(as.character(behaviorsTable[i,]$altitude))," WHERE NOT EXISTS (SELECT 1 from main_tables.list_behaviors WHERE device_ID='",as.character(behaviorsTable[i,]$device_ID),"' AND behavior_time='",timeFormat(as.character(behaviorsTable[i,]$behavior_timeStamp)),"' AND actor='",as.character(behaviorsTable[i,]$actor),"' AND subject='",as.character(behaviorsTable[i,]$subject),"');", sep="") command <- gsub("''", "NULL", command) #command <- gsub("NA", "NULL", command) command <- gsub("'NULL'", "NULL", command) command <- gsub("'NA'", "NULL", command) dbGetQuery(con, command) } } uploadScansTable <- function(scansTable, con){ tableHeaders <- fixHeader(names(scansTable)) for (i in 1:nrow(scansTable)){ command <- paste("INSERT INTO main_tables.list_scans(device_ID, focal_start_time, scan_time, latitude, longitude, gps_horizontal_precision, altitude, compass_bearing) SELECT '",as.character(scansTable[i,]$device_ID),"', '",timeFormat(as.character(scansTable[i,]$focal_start_timeStamp)),"', '", timeFormat(as.character(scansTable[i,]$scan_timeStamp)),"', ", naFormat(as.character(scansTable[i,]$latitude)),", ", naFormat(as.character(scansTable[i,]$longitude)),", ", naFormat(as.character(scansTable[i,]$gps_horizontal_precision)),", ", naFormat(as.character(scansTable[i,]$altitude)),", ", naFormat(as.character(scansTable[i,]$compass_bearing))," WHERE NOT EXISTS (SELECT 1 from main_tables.list_scans WHERE device_ID='",as.character(scansTable[i,]$device_ID),"' AND scan_time ='",timeFormat(as.character(scansTable[i,]$scan_timeStamp)),"');", sep="") command <- gsub("''", "NULL", command) #command <- gsub("NA", "NULL", command) command <- gsub("'NULL'", "NULL", command) command <- gsub("'NA'", "NULL", command) dbGetQuery(con, command) } } uploadScanData <- function(scansTable, con){ temp1 <- names(scansTable)[-c(1:5, (ncol(scansTable)-7):ncol(scansTable))] temp2 <- fixHeader(temp1) if(length(which(nchar(temp1)==regexpr("[*]", temp1)))>0){ tableHeaders <- temp2[-which(nchar(temp1)==regexpr("[*]", temp1))] } else { tableHeaders <- temp2 } for (i in 1:nrow(scansTable)){ temp3 <- unlist(as.list(scansTable[i,6:(ncol(scansTable)-8)]))[match(tableHeaders, temp2)] command <- paste("INSERT INTO main_tables.scan_data(device_ID, scan_time, scanned_individual_ID, ", paste(tableHeaders, collapse=", "), ", x_position, y_position) SELECT '",as.character(scansTable[i,]$device_ID),"', '", timeFormat(as.character(scansTable[i,]$scan_timeStamp)),"', '", as.character(scansTable[i,]$scanned_individual_ID),"', ", paste0("'",paste(as.character(temp3), collapse="', '"),"'"),", ", as.character(scansTable[i,]$x_position),", ", as.character(scansTable[i,]$y_position)," WHERE NOT EXISTS (SELECT 1 from main_tables.scan_data WHERE device_ID='",as.character(scansTable[i,]$device_ID),"' AND scan_time='",timeFormat(as.character(scansTable[i,]$scan_timeStamp)),"' AND scanned_individual_ID ='",as.character(scansTable[i,]$scanned_individual_ID),"');", sep="") command <- gsub("''", "NULL", command) #command <- gsub("NA", "NULL", command) command <- gsub("'NULL'", "NULL", command) command <- gsub("'NA'", "NULL", command) dbGetQuery(con, command) } } uploadScanVariables <- function(scanVarsTable, con){ temp1 <- names(scanVarsTable)[-(1:4)] temp2 <- fixHeader(temp1) if(length(which(nchar(temp1)==regexpr("[*]", temp1)))>0){ tableHeaders <- temp2[-which(nchar(temp1)==regexpr("[*]", temp1))] } else { tableHeaders <- temp2 } for (i in 1:nrow(scanVarsTable)){ temp3 <- unlist(as.list(scanVarsTable[i,5:ncol(scanVarsTable)]))[match(tableHeaders, temp2)] command <- paste("INSERT INTO main_tables.scan_variables(device_ID, scan_time, ", paste(tableHeaders, collapse=", "), ") SELECT '",as.character(scanVarsTable[i,]$device_ID),"', '", timeFormat(as.character(scanVarsTable[i,]$scan_timeStamp)),"', ", paste0("'",paste(as.character(temp3), collapse="', '"),"'")," WHERE NOT EXISTS (SELECT 1 from main_tables.scan_variables WHERE device_ID='",as.character(scanVarsTable[i,]$device_ID),"' AND scan_time='",timeFormat(as.character(scanVarsTable[i,]$scan_timeStamp)),"' AND scanVars ='", as.character(scanVarsTable[i,5]),"');", sep="") command <- gsub("''", "NULL", command) #command <- gsub("NA", "NULL", command) command <- gsub("'NULL'", "NULL", command) command <- gsub("'NA'", "NULL", command) dbGetQuery(con, command) } } uploadContinuousVariables <- function(continuousVarsTable, con){ temp1 <- names(continuousVarsTable)[-(1:3)] temp2 <- fixHeader(temp1) if(length(which(nchar(temp1)==regexpr("[*]", temp1)))>0){ tableHeaders <- temp2[-which(nchar(temp1)==regexpr("[*]", temp1))] } else { tableHeaders <- temp2 } for (i in 1:nrow(continuousVarsTable)){ temp3 <- unlist(as.list(continuousVarsTable[i,4:ncol(continuousVarsTable)]))[match(tableHeaders, temp2)] command <- paste("INSERT INTO main_tables.continuous_focal_variables(device_ID, focal_start_time, ", paste(tableHeaders, collapse=", "), ") SELECT '",as.character(continuousVarsTable[i,]$device_ID),"', '", timeFormat(as.character(continuousVarsTable[i,]$focal_start_timeStamp)),"', ", paste0("'",paste(as.character(temp3), collapse="', '"),"'")," WHERE NOT EXISTS (SELECT 1 from main_tables.continuous_focal_variables WHERE device_ID='",as.character(continuousVarsTable[i,]$device_ID),"' AND focal_start_time ='",timeFormat(as.character(continuousVarsTable[i,]$focal_start_timeStamp)),"' AND continuousVars ='", as.character(continuousVarsTable[i,4]),"');", sep="") command <- gsub("''", "NULL", command) #command <- gsub("NA", "NULL", command) command <- gsub("'NULL'", "NULL", command) command <- gsub("'NA'", "NULL", command) dbGetQuery(con, command) } } uploadFocalVariables <- function(focalVarsTable, con){ temp1 <- names(focalVarsTable)[-(1:3)] temp2 <- fixHeader(temp1) if(length(which(nchar(temp1)==regexpr("[*]", temp1)))>0){ tableHeaders <- temp2[-which(nchar(temp1)==regexpr("[*]", temp1))] } else { tableHeaders <- temp2 } for (i in 1:nrow(focalVarsTable)){ temp3 <- unlist(as.list(focalVarsTable[i,4:ncol(focalVarsTable)]))[match(tableHeaders, temp2)] command <- paste("INSERT INTO main_tables.focal_variables(device_ID, focal_start_time, ", paste(tableHeaders, collapse=", "), ") SELECT '",as.character(focalVarsTable[i,]$device_ID),"', '", timeFormat(as.character(focalVarsTable[i,]$focal_start_timeStamp)),"', ", paste0("'",paste(as.character(temp3), collapse="', '"),"'")," WHERE NOT EXISTS (SELECT 1 from main_tables.focal_variables WHERE device_ID='",as.character(focalVarsTable[i,]$device_ID),"' AND focal_start_time ='",timeFormat(as.character(focalVarsTable[i,]$focal_start_timeStamp)),"' AND focalVars ='", as.character(focalVarsTable[i,4]),"');", sep="") command <- gsub("''", "NULL", command) #command <- gsub("NA", "NULL", command) command <- gsub("'NULL'", "NULL", command) command <- gsub("'NA'", "NULL", command) dbGetQuery(con, command) } } uploadSessionVariables <- function(sessionVarsTable, con){ temp1 <- names(sessionVarsTable)[-(1:2)] temp2 <- fixHeader(temp1) if(length(which(nchar(temp1)==regexpr("[*]", temp1)))>0){ tableHeaders <- temp2[-which(nchar(temp1)==regexpr("[*]", temp1))] } else { tableHeaders <- temp2 } for (i in 1:nrow(sessionVarsTable)){ temp3 <- unlist(as.list(sessionVarsTable[i,3:ncol(sessionVarsTable)]))[match(tableHeaders, temp2)] command <- paste("INSERT INTO main_tables.session_variables(device_ID, session_start_time, ", paste(tableHeaders, collapse=", "), ") SELECT '",as.character(sessionVarsTable[i,]$device_ID),"', '", timeFormat(as.character(sessionVarsTable[i,]$session_start_timeStamp)),"', ", paste0("'",paste(as.character(temp3), collapse="', '"),"'")," WHERE NOT EXISTS (SELECT 1 from main_tables.session_variables WHERE device_ID='",as.character(sessionVarsTable[i,]$device_ID),"' AND session_start_time ='",timeFormat(as.character(sessionVarsTable[i,]$session_start_timeStamp)),"' AND dayVars ='", as.character(sessionVarsTable[i,3]),"');", sep="") command <- gsub("''", "NULL", command) #command <- gsub("NA", "NULL", command) command <- gsub("'NULL'", "NULL", command) command <- gsub("'NA'", "NULL", command) dbGetQuery(con, command) } } uploadBackgroundTapsTable <- function(backgroundTapsTable, con){ tableHeaders <- fixHeader(names(backgroundTapsTable)) for (i in 1:nrow(backgroundTapsTable)){ command <- paste("INSERT INTO main_tables.list_background_taps(device_ID, focal_start_time, tap_time, description, latitude, longitude, gps_horizontal_precision, altitude) SELECT '",as.character(backgroundTapsTable[i,]$device_ID),"', '", timeFormat(as.character(backgroundTapsTable[i,]$focal_start_timeStamp)),"', '", timeFormat(as.character(backgroundTapsTable[i,]$backgroundTap_timeStamp)),"', '", as.character(backgroundTapsTable[i,]$description),"', ", naFormat(as.character(backgroundTapsTable[i,]$latitude)),", ", naFormat(as.character(backgroundTapsTable[i,]$longitude)),", ", naFormat(as.character(backgroundTapsTable[i,]$gps_horizontal_precision)),", ", naFormat(as.character(backgroundTapsTable[i,]$altitude))," WHERE NOT EXISTS (SELECT 1 from main_tables.list_background_taps WHERE device_ID='",as.character(backgroundTapsTable[i,]$device_ID),"' AND focal_start_time ='",timeFormat(as.character(backgroundTapsTable[i,]$focal_start_timeStamp)),"' AND tap_time ='",timeFormat(as.character(backgroundTapsTable[i,]$backgroundTap_timeStamp)),"');", sep="") command <- gsub("''", "NULL", command) #command <- gsub("NA", "NULL", command) command <- gsub("'NULL'", "NULL", command) command <- gsub("'NA'", "NULL", command) dbGetQuery(con, command) } } uploadCommentTable <- function(commentsTable, con){ tableHeaders <- fixHeader(names(commentsTable)) for (i in 1:nrow(commentsTable)){ temp <- gsub("'", "`", as.character(commentsTable[i,]$comment_text)) command <- paste("INSERT INTO main_tables.list_comments(device_ID, focal_start_time, comment_time, comment) SELECT '",as.character(commentsTable[i,]$device_ID),"', '", timeFormat(as.character(commentsTable[i,]$focal_start_timeStamp)),"', '", timeFormat(as.character(commentsTable[i,]$comment_timeStamp)),"', '", temp,"' WHERE NOT EXISTS (SELECT 1 from main_tables.list_comments WHERE device_ID='",as.character(commentsTable[i,]$device_ID),"' AND focal_start_time ='",timeFormat(as.character(commentsTable[i,]$focal_start_timeStamp)),"' AND comment_time ='",timeFormat(as.character(commentsTable[i,]$comment_timeStamp)),"');", sep="") command <- gsub("''", "NULL", command) #command <- gsub("NA", "NULL", command) command <- gsub("'NULL'", "NULL", command) command <- gsub("'NA'", "NULL", command) dbGetQuery(con, command) } } uploadScansIntermediateTables <- function(scansTable, con){ temp1 <- names(scansTable)[-c(1:5, (ncol(scansTable)-7):ncol(scansTable))] temp2 <- fixHeader(temp1) if(length(which(nchar(temp1)==regexpr("[*]", temp1)))>0){ tableHeaders <- temp2[which(nchar(temp1)==regexpr("[*]", temp1))] for (i in 1:nrow(scansTable)){ temp3 <- unlist(as.list(scansTable[i,6:(ncol(scansTable)-8)]))[match(tableHeaders, temp2)] for(j in 1:length(tableHeaders)){ vecValues <- unlist(strsplit(as.character(temp3[j]), ";")) if(length(na.omit(vecValues))>0){ for(k in 1:length(vecValues)){ command <- paste0("INSERT INTO accessory_tables.scan_data_", tableHeaders[j],"(device_ID, scan_time, scanned_individual_ID, ", tableHeaders[j],") SELECT '",as.character(scansTable[i,]$device_ID),"', '", timeFormat(as.character(scansTable[i,]$scan_timeStamp)),"', '", as.character(scansTable[i,]$scanned_individual_ID),"', '", vecValues[k],"' WHERE NOT EXISTS (SELECT 1 from accessory_tables.scan_data_",tableHeaders[j]," WHERE device_ID='",as.character(scansTable[i,]$device_ID),"' AND scan_time='",timeFormat(as.character(scansTable[i,]$scan_timeStamp)),"' AND scanned_individual_ID ='",as.character(scansTable[i,]$scanned_individual_ID),"' AND ",tableHeaders[j]," ='",vecValues[k],"');") command <- gsub("''", "NULL", command) #command <- gsub("NA", "NULL", command) command <- gsub("'NULL'", "NULL", command) command <- gsub("'NA'", "NULL", command) dbGetQuery(con, command) } } } } } } uploadBehaviorsIntermediateTables <- function(behaviorsTable, con){ temp1 <- names(behaviorsTable)[-c(1:6, (ncol(behaviorsTable)-4):ncol(behaviorsTable))] temp2 <- fixHeader(temp1) if(length(which(nchar(temp1)==regexpr("[*]", temp1)))>0){ tableHeaders <- temp2[which(nchar(temp1)==regexpr("[*]", temp1))] for (i in 1:nrow(behaviorsTable)){ temp3 <- unlist(as.list(behaviorsTable[i,7:(ncol(behaviorsTable)-5)]))[match(tableHeaders, temp2)] for(j in 1:length(tableHeaders)){ vecValues <- unlist(strsplit(as.character(temp3[j]), ";")) if(length(na.omit(vecValues))>0){ for(k in 1:length(vecValues)){ command <- paste0("INSERT INTO accessory_tables.list_behaviors_", tableHeaders[j],"(device_ID, behavior_time, actor, subject, ", tableHeaders[j],") SELECT '",as.character(behaviorsTable[i,]$device_ID),"', '", timeFormat(as.character(behaviorsTable[i,]$behavior_timeStamp)),"', '", as.character(behaviorsTable[i,]$actor),"', '", as.character(behaviorsTable[i,]$subject),"', '", vecValues[k],"' WHERE NOT EXISTS (SELECT 1 from accessory_tables.list_behaviors_",tableHeaders[j]," WHERE device_ID='",as.character(behaviorsTable[i,]$device_ID),"' AND behavior_time='",timeFormat(as.character(behaviorsTable[i,]$behavior_timeStamp)),"' AND actor='",as.character(behaviorsTable[i,]$actor),"' AND subject='",as.character(behaviorsTable[i,]$subject),"' AND ",tableHeaders[j]," ='",vecValues[k],"');") command <- gsub("''", "NULL", command) #command <- gsub("NA", "NULL", command) command <- gsub("'NULL'", "NULL", command) command <- gsub("'NA'", "NULL", command) dbGetQuery(con, command) } } } } } } uploadscanVarsIntermediateTables <- function(scanVarsTable, con){ temp1 <- names(scanVarsTable)[-(1:4)] temp2 <- fixHeader(temp1) if(length(which(nchar(temp1)==regexpr("[*]", temp1)))>0){ tableHeaders <- temp2[which(nchar(temp1)==regexpr("[*]", temp1))] for (i in 1:nrow(scanVarsTable)){ temp3 <- unlist(as.list(scanVarsTable[i,5:ncol(scanVarsTable)]))[match(tableHeaders, temp2)] for(j in 1:length(tableHeaders)){ vecValues <- unlist(strsplit(as.character(temp3[j]), ";")) if(length(na.omit(vecValues))>0){ for(k in 1:length(vecValues)){ command <- paste0("INSERT INTO accessory_tables.scan_variables_", tableHeaders[j],"(device_ID, scan_time, scanVars, ", tableHeaders[j],") SELECT '",as.character(scanVarsTable[i,]$device_ID),"', '", timeFormat(as.character(scanVarsTable[i,]$scan_timeStamp)),"', '",as.character(scanVarsTable[i,]$scanVars),"', '", vecValues[k],"' WHERE NOT EXISTS (SELECT 1 from accessory_tables.scan_variables_",tableHeaders[j]," WHERE device_ID='",as.character(scanVarsTable[i,]$device_ID),"' AND scan_time='",timeFormat(as.character(scanVarsTable[i,]$scan_timeStamp)),"' AND scanVars='",as.character(scanVarsTable[i,]$scanVars),"' AND ",tableHeaders[j]," ='",vecValues[k],"');") command <- gsub("''", "NULL", command) #command <- gsub("NA", "NULL", command) command <- gsub("'NULL'", "NULL", command) command <- gsub("'NA'", "NULL", command) dbGetQuery(con, command) } } } } } } uploadfocalVarsIntermediateTables <- function(focalVarsTable, con){ temp1 <- names(focalVarsTable)[-(1:3)] temp2 <- fixHeader(temp1) if(length(which(nchar(temp1)==regexpr("[*]", temp1)))>0){ tableHeaders <- temp2[which(nchar(temp1)==regexpr("[*]", temp1))] for (i in 1:nrow(focalVarsTable)){ temp3 <- unlist(as.list(focalVarsTable[i,4:ncol(focalVarsTable)]))[match(tableHeaders, temp2)] for(j in 1:length(tableHeaders)){ vecValues <- unlist(strsplit(as.character(temp3[j]), ";")) if(length(na.omit(vecValues))>0){ for(k in 1:length(vecValues)){ command <- paste0("INSERT INTO accessory_tables.focal_variables_", tableHeaders[j],"(device_ID, focal_start_time, focalVars, ", tableHeaders[j],") SELECT '", as.character(focalVarsTable[i,]$device_ID),"', '", timeFormat(as.character(focalVarsTable[i,]$focal_start_timeStamp)),"', '", as.character(focalVarsTable[i,]$focalVars),"', '", vecValues[k],"' WHERE NOT EXISTS (SELECT 1 from accessory_tables.focal_variables_",tableHeaders[j]," WHERE device_ID='",as.character(focalVarsTable[i,]$device_ID),"' AND focal_start_time='",timeFormat(as.character(focalVarsTable[i,]$focal_start_timeStamp)),"' AND focalVars='",as.character(focalVarsTable[i,]$focalVars),"' AND ",tableHeaders[j]," ='",vecValues[k],"');") command <- gsub("''", "NULL", command) #command <- gsub("NA", "NULL", command) command <- gsub("'NULL'", "NULL", command) command <- gsub("'NA'", "NULL", command) dbGetQuery(con, command) } } } } } } uploadContinuousVarsIntermediateTables <- function(continuousVarsTable, con){ temp1 <- names(continuousVarsTable)[-(1:3)] temp2 <- fixHeader(temp1) if(length(which(nchar(temp1)==regexpr("[*]", temp1)))>0){ tableHeaders <- temp2[which(nchar(temp1)==regexpr("[*]", temp1))] for (i in 1:nrow(continuousVarsTable)){ temp3 <- unlist(as.list(continuousVarsTable[i,4:ncol(continuousVarsTable)]))[match(tableHeaders, temp2)] for(j in 1:length(tableHeaders)){ vecValues <- unlist(strsplit(as.character(temp3[j]), ";")) if(length(na.omit(vecValues))>0){ for(k in 1:length(vecValues)){ command <- paste0("INSERT INTO accessory_tables.continuous_focal_variables_", tableHeaders[j],"(device_ID, focal_start_time, continuousVars, ", tableHeaders[j],") SELECT '", as.character(continuousVarsTable[i,]$device_ID),"', '", timeFormat(as.character(continuousVarsTable[i,]$focal_start_timeStamp)),"', '", as.character(continuousVarsTable[i,]$continuousVars),"', '", vecValues[k],"' WHERE NOT EXISTS (SELECT 1 from accessory_tables.continuous_focal_variables_",tableHeaders[j]," WHERE device_ID='",as.character(continuousVarsTable[i,]$device_ID),"' AND focal_start_time='",timeFormat(as.character(continuousVarsTable[i,]$focal_start_timeStamp)),"' AND continuousVars ='",as.character(continuousVarsTable[i,]$continuousVars),"' AND ",tableHeaders[j]," ='",vecValues[k],"');") command <- gsub("''", "NULL", command) #command <- gsub("NA", "NULL", command) command <- gsub("'NULL'", "NULL", command) command <- gsub("'NA'", "NULL", command) dbGetQuery(con, command) } } } } } } uploadSessionVarsIntermediateTables <- function(sessionVarsTable, con){ temp1 <- names(sessionVarsTable)[-(1:2)] temp2 <- fixHeader(temp1) if(length(which(nchar(temp1)==regexpr("[*]", temp1)))>0){ tableHeaders <- temp2[which(nchar(temp1)==regexpr("[*]", temp1))] for (i in 1:nrow(sessionVarsTable)){ temp3 <- unlist(as.list(sessionVarsTable[i,3:ncol(sessionVarsTable)]))[match(tableHeaders, temp2)] for(j in 1:length(tableHeaders)){ vecValues <- unlist(strsplit(as.character(temp3[j]), ";")) if(length(na.omit(vecValues))>0){ for(k in 1:length(vecValues)){ command <- paste0("INSERT INTO accessory_tables.session_variables_", tableHeaders[j],"(device_ID, session_start_time, dayVars, ", tableHeaders[j],") SELECT '",as.character(sessionVarsTable[i,]$device_ID),"', '", timeFormat(as.character(sessionVarsTable[i,]$session_start_timeStamp)),"', '", as.character(sessionVarsTable[i,]$dayVars),"', '", vecValues[k],"' WHERE NOT EXISTS (SELECT 1 from accessory_tables.session_variables_",tableHeaders[j]," WHERE device_ID='",as.character(sessionVarsTable[i,]$device_ID),"' AND session_start_time='",timeFormat(as.character(sessionVarsTable[i,]$session_start_timeStamp)),"' AND dayVars ='",as.character(sessionVarsTable[i,]$dayVars),"' AND ",tableHeaders[j]," ='",vecValues[k],"');") command <- gsub("''", "NULL", command) #command <- gsub("NA", "NULL", command) command <- gsub("'NULL'", "NULL", command) command <- gsub("'NA'", "NULL", command) dbGetQuery(con, command) } } } } } }
utils::globalVariables("density") #' @title Plot frequencies of variables #' @name plot_frq #' #' @description Plot frequencies of a variable as bar graph, histogram, box plot etc. #' #' @note This function only works with variables with integer values (or numeric #' factor levels), i.e. scales / centered variables #' with fractional part may result in unexpected behaviour. #' #' @param ... Optional, unquoted names of variables that should be selected for #' further processing. Required, if \code{data} is a data frame (and no #' vector) and only selected variables from \code{data} should be processed. #' You may also use functions like \code{:} or tidyselect's #' \code{\link[tidyselect]{select_helpers}}. #' @param sort.frq Determines whether categories should be sorted #' according to their frequencies or not. Default is \code{"none"}, so #' categories are not sorted by frequency. Use \code{"asc"} or #' \code{"desc"} for sorting categories ascending or descending order. #' @param geom.colors User defined color for geoms, e.g. \code{geom.colors = "#0080ff"}. #' @param errorbar.color Color of confidence interval bars (error bars). #' Only applies to \code{type = "bar"}. In case of dot plots, error bars #' will have same colors as dots (see \code{geom.colors}). #' @param show.mean Logical, if \code{TRUE}, a vertical line in histograms #' is drawn to indicate the mean value of the variables. Only #' applies to histogram-charts. #' @param show.mean.val Logical, if \code{TRUE} (default), the mean value #' is printed to the vertical line that indicates the variable's #' mean. Only applies to histogram-charts. #' @param show.sd Logical, if \code{TRUE}, the standard deviation #' is annotated as shaded rectangle around the mean intercept #' line. Only applies to histogram-charts. #' @param mean.line.type Numeric value, indicating the linetype of the mean #' intercept line. Only applies to histogram-charts and #' when \code{show.mean = TRUE}. #' @param mean.line.size Numeric, size of the mean intercept line. Only #' applies to histogram-charts and when \code{show.mean = TRUE}. #' @param normal.curve Logical, if \code{TRUE}, a normal curve, which is adjusted to the data, #' is plotted over the histogram or density plot. Default is #' \code{FALSE}. Only applies when histograms or density plots are plotted (see \code{type}). #' @param normal.curve.color Color of the normal curve line. Only #' applies if \code{normal.curve = TRUE}. #' @param normal.curve.size Numeric, size of the normal curve line. Only #' applies if \code{normal.curve = TRUE}. #' @param normal.curve.alpha Transparancy level (alpha value) of the normal curve. Only #' applies if \code{normal.curve = TRUE}. #' @param xlim Numeric vector of length two, defining lower and upper axis limits #' of the x scale. By default, this argument is set to \code{NULL}, i.e. the #' x-axis fits to the required range of the data. #' @param axis.title Character vector of length one or two (depending on #' the plot function and type), used as title(s) for the x and y axis. #' If not specified, a default labelling is chosen. #' \strong{Note:} Some plot types do not support this argument. In such #' cases, use the return value and add axis titles manually with #' \code{\link[ggplot2]{labs}}, e.g.: \code{$plot.list[[1]] + labs(x = ...)} #' #' @inheritParams plot_scatter #' @inheritParams plot_grpfrq #' @inheritParams tab_xtab #' #' @return A ggplot-object. #' #' @examples #' library(sjlabelled) #' data(efc) #' data(iris) #' #' # simple plots, two different notations #' plot_frq(iris, Species) #' plot_frq(efc$tot_sc_e) #' #' # boxplot #' plot_frq(efc$e17age, type = "box") #' #' if (require("dplyr")) { #' # histogram, pipe-workflow #' efc %>% #' dplyr::select(e17age, c160age) %>% #' plot_frq(type = "hist", show.mean = TRUE) #' #' # bar plot(s) #' plot_frq(efc, e42dep, c172code) #' } #' #' if (require("dplyr") && require("gridExtra")) { #' # grouped data frame, all panels in one plot #' efc %>% #' group_by(e42dep) %>% #' plot_frq(c161sex) %>% #' plot_grid() #' } #' #' library(sjmisc) #' # grouped variable #' ageGrp <- group_var(efc$e17age) #' ageGrpLab <- group_labels(efc$e17age) #' plot_frq(ageGrp, title = get_label(efc$e17age), axis.labels = ageGrpLab) #' #' # plotting confidence intervals. expand grid and v/hjust for text labels #' plot_frq( #' efc$e15relat, type = "dot", show.ci = TRUE, sort.frq = "desc", #' coord.flip = TRUE, expand.grid = TRUE, vjust = "bottom", hjust = "left" #' ) #' #' # histogram with overlayed normal curve #' plot_frq(efc$c160age, type = "h", show.mean = TRUE, show.mean.val = TRUE, #' normal.curve = TRUE, show.sd = TRUE, normal.curve.color = "blue", #' normal.curve.size = 3, ylim = c(0,50)) #' @import ggplot2 #' @importFrom sjstats weighted_sd #' @importFrom sjmisc group_labels group_var to_value frq #' @importFrom sjlabelled set_labels drop_labels #' @importFrom stats na.omit sd weighted.mean dnorm #' @importFrom rlang .data #' @export plot_frq <- function(data, ..., title = "", weight.by = NULL, title.wtd.suffix = NULL, sort.frq = c("none", "asc", "desc"), type = c("bar", "dot", "histogram", "line", "density", "boxplot", "violin"), geom.size = NULL, geom.colors = "#336699", errorbar.color = "darkred", axis.title = NULL, axis.labels = NULL, xlim = NULL, ylim = NULL, wrap.title = 50, wrap.labels = 20, grid.breaks = NULL, expand.grid = FALSE, show.values = TRUE, show.n = TRUE, show.prc = TRUE, show.axis.values = TRUE, show.ci = FALSE, show.na = FALSE, show.mean = FALSE, show.mean.val = TRUE, show.sd = TRUE, drop.empty = TRUE, mean.line.type = 2, mean.line.size = 0.5, inner.box.width = 0.15, inner.box.dotsize = 3, normal.curve = FALSE, normal.curve.color = "red", normal.curve.size = 0.8, normal.curve.alpha = 0.4, auto.group = NULL, coord.flip = FALSE, vjust = "bottom", hjust = "center", y.offset = NULL) { # Match arguments ----- type <- match.arg(type) sort.frq <- match.arg(sort.frq) plot_data <- get_dplyr_dot_data(data, dplyr::quos(...)) if (!is.data.frame(plot_data)) { plot_data <- data.frame(plot_data, stringsAsFactors = FALSE) colnames(plot_data) <- deparse(substitute(data)) } pl <- NULL if (inherits(plot_data, "grouped_df")) { # get grouped data grps <- get_grouped_data(plot_data) # now plot everything for (i in seq_len(nrow(grps))) { # copy back labels to grouped data frame tmp <- sjlabelled::copy_labels(grps$data[[i]], data) # prepare argument list, including title tmp.title <- get_grouped_plottitle(plot_data, grps, i, sep = "\n") # plot plots <- lapply(colnames(tmp), function(.d) { plot_frq_helper( var.cnt = tmp[[.d]], title = tmp.title, weight.by = weight.by, title.wtd.suffix, sort.frq, type, geom.size, geom.colors, errorbar.color, axis.title, axis.labels, xlim, ylim, wrap.title, wrap.labels, grid.breaks, expand.grid, show.values, show.n, show.prc, show.axis.values, show.ci, show.na, show.mean, show.mean.val, show.sd, drop.empty, mean.line.type, mean.line.size, inner.box.width, inner.box.dotsize, normal.curve, normal.curve.color, normal.curve.size, normal.curve.alpha, auto.group, coord.flip, vjust, hjust, y.offset, var.name = .d ) }) # add plots, check for NULL results pl <- c(pl, plots) } } else { pl <- lapply(colnames(plot_data), function(.d) { plot_frq_helper( var.cnt = plot_data[[.d]], title, weight.by = weight.by, title.wtd.suffix, sort.frq, type, geom.size, geom.colors, errorbar.color, axis.title, axis.labels, xlim, ylim, wrap.title, wrap.labels, grid.breaks, expand.grid, show.values, show.n, show.prc, show.axis.values, show.ci, show.na, show.mean, show.mean.val, show.sd, drop.empty, mean.line.type, mean.line.size, inner.box.width, inner.box.dotsize, normal.curve, normal.curve.color, normal.curve.size, normal.curve.alpha, auto.group, coord.flip, vjust, hjust, y.offset, var.name = .d ) }) if (length(pl) == 1) pl <- pl[[1]] } pl } plot_frq_helper <- function( var.cnt, title, weight.by, title.wtd.suffix, sort.frq, type, geom.size, geom.colors, errorbar.color, axis.title, axis.labels, xlim, ylim, wrap.title, wrap.labels, grid.breaks, expand.grid, show.values, show.n, show.prc, show.axis.values, show.ci, show.na, show.mean, show.mean.val, show.sd, drop.empty, mean.line.type, mean.line.size, inner.box.width, inner.box.dotsize, normal.curve, normal.curve.color, normal.curve.size, normal.curve.alpha, auto.group, coord.flip, vjust, hjust, y.offset, var.name = NULL) { # remove empty value-labels if (drop.empty) { var.cnt <- sjlabelled::drop_labels(var.cnt) } # try to find some useful default offsets for textlabels, # depending on plot range and flipped coordinates if (is.null(y.offset)) { # get maximum y-pos y.offset <- ceiling(max(table(var.cnt)) / 100) if (coord.flip) { if (missing(vjust)) vjust <- "center" if (missing(hjust)) hjust <- "bottom" if (hjust == "bottom") y_offset <- y.offset else if (hjust == "top") y_offset <- -y.offset else y_offset <- 0 } else { if (vjust == "bottom") y_offset <- y.offset else if (vjust == "top") y_offset <- -y.offset else y_offset <- 0 } } else { y_offset <- y.offset } if (is.null(axis.title)) axis.title <- sjlabelled::get_label(var.cnt, def.value = var.name) if (is.null(title)) title <- sjlabelled::get_label(var.cnt, def.value = var.name) # remove titles if empty if (!is.null(axis.title) && axis.title == "") axis.title <- NULL if (!is.null(title) && title == "") title <- NULL # check color argument if (length(geom.colors) > 1) geom.colors <- geom.colors[1] # default grid-expansion if (isTRUE(expand.grid) || (missing(expand.grid) && type == "histogram")) { expand.grid <- waiver() } else { expand.grid <- c(0, 0) } # for histograms or density plots... xv <- sjmisc::to_value(stats::na.omit(var.cnt)) # check for nice bin-width defaults if (type %in% c("histogram", "density") && !is.null(geom.size) && geom.size < round(diff(range(xv)) / 40)) message("Using very small binwidth. Consider adjusting `geom.size` argument.") # create second data frame hist.dat <- data.frame(xv) # check default geom.size ----- if (is.null(geom.size)) { geom.size <- dplyr::case_when( type == "bar" ~ .7, type == "dot" ~ 2.5, type == "density" ~ ceiling(diff(range(xv)) / 40), type == "histogram" ~ ceiling(diff(range(xv)) / 40), type == "line" ~ .8, type == "boxplot" ~ .3, type == "violin" ~ .3, TRUE ~ .7 ) } # check whether variable should be auto-grouped ----- if (!is.null(auto.group) && length(unique(var.cnt)) >= auto.group) { message(sprintf( "`%s` has %i unique values and was grouped...", var.name, length(unique(var.cnt)) )) } if (!is.null(weight.by)) { dat <- data.frame( var.cnt = var.cnt, weight.by = weight.by, stringsAsFactors = FALSE ) } else { dat <- data.frame( var.cnt = var.cnt, stringsAsFactors = FALSE ) } # create frequency data frame ----- df.frq <- suppressMessages(sjmisc::frq( x = dat, "var.cnt", sort.frq = sort.frq, weights = "weight.by", auto.grp = auto.group, show.na = show.na )) mydat <- df.frq[[1]] # remove empty if (drop.empty) mydat <- mydat[mydat$frq > 0, ] # add confindence intervals for frequencies total_n = sum(mydat$frq) rel_frq <- as.numeric(mydat$frq / total_n) ci <- 1.96 * suppressWarnings(sqrt(rel_frq * (1 - rel_frq) / total_n)) mydat$upper.ci <- total_n * (rel_frq + ci) mydat$lower.ci <- total_n * (rel_frq - ci) mydat$rel.upper.ci <- rel_frq + ci mydat$rel.lower.ci <- rel_frq - ci # any labels detected? if (!is.null(mydat$label) && is.null(axis.labels) && !all(stats::na.omit(mydat$label) == "<none>")) axis.labels <- mydat$label else if (is.null(axis.labels)) axis.labels <- mydat$val # wrap labels axis.labels <- sjmisc::word_wrap(axis.labels, wrap.labels) # define text label position if (show.ci) mydat$label.pos <- mydat$upper.ci else mydat$label.pos <- mydat$frq # Trim labels and title to appropriate size ----- # check length of diagram title and split longer string into new lines # every 50 chars if (!is.null(title)) { # if we have weighted values, say that in diagram's title if (!is.null(title.wtd.suffix)) title <- paste(title, title.wtd.suffix, sep = "") title <- sjmisc::word_wrap(title, wrap.title) } # check length of x-axis title and split longer string into new lines # every 50 chars if (!is.null(axis.title)) axis.title <- sjmisc::word_wrap(axis.title, wrap.title) # count variable may not be a factor! if (is.factor(var.cnt) || is.character(var.cnt)) { var.cnt <- sjmisc::to_value(var.cnt, keep.labels = F) } # If we have a histogram, caluclate means of groups if (is.null(weight.by)) { mittelwert <- mean(var.cnt, na.rm = TRUE) stddev <- stats::sd(var.cnt, na.rm = TRUE) } else { mittelwert <- stats::weighted.mean(var.cnt, weight.by, na.rm = TRUE) stddev <- sjstats::weighted_sd(var.cnt, weights = weight.by) } # If we have boxplots, use different data frame structure if (type == "boxplot" || type == "violin") { mydat <- stats::na.omit(data.frame(cbind( grp = 1, frq = var.cnt, val = var.cnt ))) mydat$grp <- as.factor(mydat$grp) } # Prepare bar charts trimViolin <- FALSE lower_lim <- 0 # calculate upper y-axis-range # if we have a fixed value, use this one here if (!is.null(ylim) && length(ylim) == 2) { lower_lim <- ylim[1] upper_lim <- ylim[2] } else { # if we have boxplots, we have different ranges, so we can adjust # the y axis if (type == "boxplot" || type == "violin") { # use an extra standard-deviation as limits for the y-axis when we have boxplots lower_lim <- min(var.cnt, na.rm = TRUE) - floor(stats::sd(var.cnt, na.rm = TRUE)) upper_lim <- max(var.cnt, na.rm = TRUE) + ceiling(stats::sd(var.cnt, na.rm = TRUE)) # make sure that the y-axis is not below zero if (lower_lim < 0) { lower_lim <- 0 trimViolin <- TRUE } } else if (type == "histogram") { # what is the maximum values after binning for histograms? hist.grp.cnt <- ceiling(diff(range(var.cnt, na.rm = T)) / geom.size) # ... or the amount of max. answers per category # add 10% margin to upper limit upper_lim <- max(pretty(table( sjmisc::group_var( var.cnt, size = "auto", n = hist.grp.cnt, append = FALSE ) ) * 1.1)) } else { if (show.ci) upper_lim <- max(pretty(mydat$upper.ci * 1.1)) else upper_lim <- max(pretty(mydat$frq * 1.1)) } } # If we want to include NA, use raw percentages as valid percentages if (show.na) mydat$valid.prc <- mydat$raw.prc # don't display value labels when we have boxplots or violin plots if (type == "boxplot" || type == "violin") show.values <- FALSE if (show.values) { # here we have counts and percentages if (show.prc && show.n) { if (coord.flip) { ggvaluelabels <- geom_text( label = sprintf("%i (%.01f%%)", mydat$frq, mydat$valid.prc), hjust = hjust, vjust = vjust, aes(y = .data$label.pos + y_offset) ) } else { ggvaluelabels <- geom_text( label = sprintf("%i\n(%.01f%%)", mydat$frq, mydat$valid.prc), hjust = hjust, vjust = vjust, aes(y = .data$label.pos + y_offset) ) } } else if (show.n) { # here we have counts, without percentages ggvaluelabels <- geom_text( label = sprintf("%i", mydat$frq), hjust = hjust, vjust = vjust, aes(y = .data$label.pos + y_offset) ) } else if (show.prc) { # here we have counts, without percentages ggvaluelabels <- geom_text( label = sprintf("%.01f%%", mydat$valid.prc), hjust = hjust, vjust = vjust, aes(y = .data$label.pos + y_offset) ) } else { # no labels ggvaluelabels <- geom_text(aes(y = .data$frq), label = "") } } else { # no labels ggvaluelabels <- geom_text(aes(y = .data$frq), label = "") } # Set up grid breaks maxx <- if (is.numeric(mydat$val)) max(mydat$val) + 1 else nrow(mydat) if (is.null(grid.breaks)) { gridbreaks <- waiver() histgridbreaks <- waiver() } else { gridbreaks <- c(seq(lower_lim, upper_lim, by = grid.breaks)) histgridbreaks <- c(seq(lower_lim, maxx, by = grid.breaks)) } # set Y-axis, depending on the calculated upper y-range. # It either corresponds to the maximum amount of cases in the data set # (length of var) or to the highest count of var's categories. if (show.axis.values) { yscale <- scale_y_continuous( limits = c(lower_lim, upper_lim), expand = expand.grid, breaks = gridbreaks ) } else { yscale <- scale_y_continuous( limits = c(lower_lim, upper_lim), expand = expand.grid, breaks = gridbreaks, labels = NULL ) } # bar and dot plot start here! ----- if (type == "bar" || type == "dot") { # define geom if (type == "bar") { geob <- geom_bar(stat = "identity", width = geom.size, fill = geom.colors) } else if (type == "dot") { geob <- geom_point(size = geom.size, colour = geom.colors) } # as factor, but preserve order mydat$val <- factor(mydat$val, levels = unique(mydat$val)) # mydat is a data frame that only contains one variable (var). # Must be declared as factor, so the bars are central aligned to # each x-axis-break. baseplot <- ggplot(mydat, aes(x = .data$val, y = .data$frq)) + geob + yscale + # remove guide / legend guides(fill = FALSE) + # show absolute and percentage value of each bar. ggvaluelabels + # print value labels to the x-axis. # If argument "axis.labels" is NULL, the category numbers (1 to ...) # appear on the x-axis scale_x_discrete(labels = axis.labels) # add error bars if (show.ci) { ebcol <- ifelse(type == "dot", geom.colors, errorbar.color) # print confidence intervalls (error bars) baseplot <- baseplot + geom_errorbar(aes_string(ymin = "lower.ci", ymax = "upper.ci"), colour = ebcol, width = 0) } # check whether coordinates should be flipped, i.e. # swap x and y axis if (coord.flip) baseplot <- baseplot + coord_flip() # Start box plot here ----- } else if (type == "boxplot" || type == "violin") { # setup base plot baseplot <- ggplot(mydat, aes_string(x = "grp", y = "frq")) # and x-axis scalex <- scale_x_discrete(labels = "") if (type == "boxplot") { baseplot <- baseplot + geom_boxplot(width = geom.size, fill = geom.colors, notch = show.ci) } else { baseplot <- baseplot + geom_violin(trim = trimViolin, width = geom.size, fill = geom.colors) # if we have a violin plot, add an additional boxplot inside to show # more information if (show.ci) { baseplot <- baseplot + geom_boxplot(width = inner.box.width, fill = "white", notch = TRUE) } else { baseplot <- baseplot + geom_boxplot(width = inner.box.width, fill = "white") } } # if we have boxplots or violon plots, also add a point that indicates # the mean value # different fill colours, because violin boxplots have white background fcsp <- ifelse(type == "boxplot", "white", "black") baseplot <- baseplot + stat_summary(fun.y = "mean", geom = "point", shape = 21, size = inner.box.dotsize, fill = fcsp) # no additional labels for the x- and y-axis, only diagram title baseplot <- baseplot + yscale + scalex # Start density plot here ----- } else if (type == "density") { # First, plot histogram with density curve baseplot <- ggplot(hist.dat, aes(x = .data$xv)) + geom_histogram(aes(y = stat(density)), binwidth = geom.size, fill = geom.colors) + # transparent density curve above bars geom_density(aes(y = stat(density)), fill = "cornsilk", alpha = 0.3) + # remove margins from left and right diagram side scale_x_continuous(expand = expand.grid, breaks = histgridbreaks, limits = xlim) # check whether user wants to overlay the histogram # with a normal curve if (normal.curve) { baseplot <- baseplot + stat_function( fun = dnorm, args = list( mean = mean(hist.dat$xv), sd = stats::sd(hist.dat$xv) ), colour = normal.curve.color, size = normal.curve.size, alpha = normal.curve.alpha ) } } else { # Since the density curve shows no absolute numbers (counts) on the # y-axis, have also the opportunity to plot "real" histrograms with # counts on the y-axis if (type == "histogram") { # original data needed for normal curve baseplot <- ggplot(mydat) + # second data frame mapped to the histogram geom geom_histogram(data = hist.dat, aes(x = .data$xv), binwidth = geom.size, fill = geom.colors) } else { baseplot <- ggplot(mydat, aes(x = .data$val, y = .data$frq)) + geom_area(alpha = 0.3) + geom_line(size = geom.size, colour = geom.colors) + ggvaluelabels } # check whether user wants to overlay the histogram # with a normal curve if (normal.curve) { baseplot <- baseplot + stat_function( fun = function(xx, mean, sd, n) { n * stats::dnorm(x = xx, mean = mean, sd = sd) }, args = with(mydat, c( mean = mittelwert, sd = stddev, n = length(var.cnt) )), colour = normal.curve.color, size = normal.curve.size, alpha = normal.curve.alpha ) } # if we have a histogram, add mean-lines if (show.mean) { baseplot <- baseplot + # vertical lines indicating the mean geom_vline(xintercept = mittelwert, linetype = mean.line.type, size = mean.line.size) # check whether meanvalue should be shown. if (show.mean.val) { baseplot <- baseplot + # use annotation instead of geomtext, because we need mean value only printed once annotate( "text", x = mittelwert, y = upper_lim, parse = TRUE, label = paste( "italic(bar(x)) == ", round(mittelwert, 1), "~~italic(s) == ", round(stddev, 1) ), vjust = "top", hjust = "top" ) } # check whether the user wants to plot standard deviation area if (show.sd) { baseplot <- baseplot + # first draw shaded rectangle. these are by default in grey colour with very high transparancy annotate("rect", xmin = mittelwert - stddev, xmax = mittelwert + stddev, ymin = 0, ymax = c(upper_lim), fill = "grey70", alpha = 0.2) + # draw border-lines for shaded rectangle geom_vline(xintercept = mittelwert - stddev, linetype = 3, size = mean.line.size, alpha = 0.7) + geom_vline(xintercept = mittelwert + stddev, linetype = 3, size = mean.line.size, alpha = 0.7) } } # show absolute and percentage value of each bar. baseplot <- baseplot + yscale + # continuous x-scale for histograms scale_x_continuous(limits = xlim, expand = expand.grid, breaks = histgridbreaks) } # set axes text and baseplot <- baseplot + labs(title = title, x = axis.title, y = NULL) # Check whether ggplot object should be returned or plotted baseplot }
/R/plot_frq.R
no_license
januz/sjPlot
R
false
false
25,666
r
utils::globalVariables("density") #' @title Plot frequencies of variables #' @name plot_frq #' #' @description Plot frequencies of a variable as bar graph, histogram, box plot etc. #' #' @note This function only works with variables with integer values (or numeric #' factor levels), i.e. scales / centered variables #' with fractional part may result in unexpected behaviour. #' #' @param ... Optional, unquoted names of variables that should be selected for #' further processing. Required, if \code{data} is a data frame (and no #' vector) and only selected variables from \code{data} should be processed. #' You may also use functions like \code{:} or tidyselect's #' \code{\link[tidyselect]{select_helpers}}. #' @param sort.frq Determines whether categories should be sorted #' according to their frequencies or not. Default is \code{"none"}, so #' categories are not sorted by frequency. Use \code{"asc"} or #' \code{"desc"} for sorting categories ascending or descending order. #' @param geom.colors User defined color for geoms, e.g. \code{geom.colors = "#0080ff"}. #' @param errorbar.color Color of confidence interval bars (error bars). #' Only applies to \code{type = "bar"}. In case of dot plots, error bars #' will have same colors as dots (see \code{geom.colors}). #' @param show.mean Logical, if \code{TRUE}, a vertical line in histograms #' is drawn to indicate the mean value of the variables. Only #' applies to histogram-charts. #' @param show.mean.val Logical, if \code{TRUE} (default), the mean value #' is printed to the vertical line that indicates the variable's #' mean. Only applies to histogram-charts. #' @param show.sd Logical, if \code{TRUE}, the standard deviation #' is annotated as shaded rectangle around the mean intercept #' line. Only applies to histogram-charts. #' @param mean.line.type Numeric value, indicating the linetype of the mean #' intercept line. Only applies to histogram-charts and #' when \code{show.mean = TRUE}. #' @param mean.line.size Numeric, size of the mean intercept line. Only #' applies to histogram-charts and when \code{show.mean = TRUE}. #' @param normal.curve Logical, if \code{TRUE}, a normal curve, which is adjusted to the data, #' is plotted over the histogram or density plot. Default is #' \code{FALSE}. Only applies when histograms or density plots are plotted (see \code{type}). #' @param normal.curve.color Color of the normal curve line. Only #' applies if \code{normal.curve = TRUE}. #' @param normal.curve.size Numeric, size of the normal curve line. Only #' applies if \code{normal.curve = TRUE}. #' @param normal.curve.alpha Transparancy level (alpha value) of the normal curve. Only #' applies if \code{normal.curve = TRUE}. #' @param xlim Numeric vector of length two, defining lower and upper axis limits #' of the x scale. By default, this argument is set to \code{NULL}, i.e. the #' x-axis fits to the required range of the data. #' @param axis.title Character vector of length one or two (depending on #' the plot function and type), used as title(s) for the x and y axis. #' If not specified, a default labelling is chosen. #' \strong{Note:} Some plot types do not support this argument. In such #' cases, use the return value and add axis titles manually with #' \code{\link[ggplot2]{labs}}, e.g.: \code{$plot.list[[1]] + labs(x = ...)} #' #' @inheritParams plot_scatter #' @inheritParams plot_grpfrq #' @inheritParams tab_xtab #' #' @return A ggplot-object. #' #' @examples #' library(sjlabelled) #' data(efc) #' data(iris) #' #' # simple plots, two different notations #' plot_frq(iris, Species) #' plot_frq(efc$tot_sc_e) #' #' # boxplot #' plot_frq(efc$e17age, type = "box") #' #' if (require("dplyr")) { #' # histogram, pipe-workflow #' efc %>% #' dplyr::select(e17age, c160age) %>% #' plot_frq(type = "hist", show.mean = TRUE) #' #' # bar plot(s) #' plot_frq(efc, e42dep, c172code) #' } #' #' if (require("dplyr") && require("gridExtra")) { #' # grouped data frame, all panels in one plot #' efc %>% #' group_by(e42dep) %>% #' plot_frq(c161sex) %>% #' plot_grid() #' } #' #' library(sjmisc) #' # grouped variable #' ageGrp <- group_var(efc$e17age) #' ageGrpLab <- group_labels(efc$e17age) #' plot_frq(ageGrp, title = get_label(efc$e17age), axis.labels = ageGrpLab) #' #' # plotting confidence intervals. expand grid and v/hjust for text labels #' plot_frq( #' efc$e15relat, type = "dot", show.ci = TRUE, sort.frq = "desc", #' coord.flip = TRUE, expand.grid = TRUE, vjust = "bottom", hjust = "left" #' ) #' #' # histogram with overlayed normal curve #' plot_frq(efc$c160age, type = "h", show.mean = TRUE, show.mean.val = TRUE, #' normal.curve = TRUE, show.sd = TRUE, normal.curve.color = "blue", #' normal.curve.size = 3, ylim = c(0,50)) #' @import ggplot2 #' @importFrom sjstats weighted_sd #' @importFrom sjmisc group_labels group_var to_value frq #' @importFrom sjlabelled set_labels drop_labels #' @importFrom stats na.omit sd weighted.mean dnorm #' @importFrom rlang .data #' @export plot_frq <- function(data, ..., title = "", weight.by = NULL, title.wtd.suffix = NULL, sort.frq = c("none", "asc", "desc"), type = c("bar", "dot", "histogram", "line", "density", "boxplot", "violin"), geom.size = NULL, geom.colors = "#336699", errorbar.color = "darkred", axis.title = NULL, axis.labels = NULL, xlim = NULL, ylim = NULL, wrap.title = 50, wrap.labels = 20, grid.breaks = NULL, expand.grid = FALSE, show.values = TRUE, show.n = TRUE, show.prc = TRUE, show.axis.values = TRUE, show.ci = FALSE, show.na = FALSE, show.mean = FALSE, show.mean.val = TRUE, show.sd = TRUE, drop.empty = TRUE, mean.line.type = 2, mean.line.size = 0.5, inner.box.width = 0.15, inner.box.dotsize = 3, normal.curve = FALSE, normal.curve.color = "red", normal.curve.size = 0.8, normal.curve.alpha = 0.4, auto.group = NULL, coord.flip = FALSE, vjust = "bottom", hjust = "center", y.offset = NULL) { # Match arguments ----- type <- match.arg(type) sort.frq <- match.arg(sort.frq) plot_data <- get_dplyr_dot_data(data, dplyr::quos(...)) if (!is.data.frame(plot_data)) { plot_data <- data.frame(plot_data, stringsAsFactors = FALSE) colnames(plot_data) <- deparse(substitute(data)) } pl <- NULL if (inherits(plot_data, "grouped_df")) { # get grouped data grps <- get_grouped_data(plot_data) # now plot everything for (i in seq_len(nrow(grps))) { # copy back labels to grouped data frame tmp <- sjlabelled::copy_labels(grps$data[[i]], data) # prepare argument list, including title tmp.title <- get_grouped_plottitle(plot_data, grps, i, sep = "\n") # plot plots <- lapply(colnames(tmp), function(.d) { plot_frq_helper( var.cnt = tmp[[.d]], title = tmp.title, weight.by = weight.by, title.wtd.suffix, sort.frq, type, geom.size, geom.colors, errorbar.color, axis.title, axis.labels, xlim, ylim, wrap.title, wrap.labels, grid.breaks, expand.grid, show.values, show.n, show.prc, show.axis.values, show.ci, show.na, show.mean, show.mean.val, show.sd, drop.empty, mean.line.type, mean.line.size, inner.box.width, inner.box.dotsize, normal.curve, normal.curve.color, normal.curve.size, normal.curve.alpha, auto.group, coord.flip, vjust, hjust, y.offset, var.name = .d ) }) # add plots, check for NULL results pl <- c(pl, plots) } } else { pl <- lapply(colnames(plot_data), function(.d) { plot_frq_helper( var.cnt = plot_data[[.d]], title, weight.by = weight.by, title.wtd.suffix, sort.frq, type, geom.size, geom.colors, errorbar.color, axis.title, axis.labels, xlim, ylim, wrap.title, wrap.labels, grid.breaks, expand.grid, show.values, show.n, show.prc, show.axis.values, show.ci, show.na, show.mean, show.mean.val, show.sd, drop.empty, mean.line.type, mean.line.size, inner.box.width, inner.box.dotsize, normal.curve, normal.curve.color, normal.curve.size, normal.curve.alpha, auto.group, coord.flip, vjust, hjust, y.offset, var.name = .d ) }) if (length(pl) == 1) pl <- pl[[1]] } pl } plot_frq_helper <- function( var.cnt, title, weight.by, title.wtd.suffix, sort.frq, type, geom.size, geom.colors, errorbar.color, axis.title, axis.labels, xlim, ylim, wrap.title, wrap.labels, grid.breaks, expand.grid, show.values, show.n, show.prc, show.axis.values, show.ci, show.na, show.mean, show.mean.val, show.sd, drop.empty, mean.line.type, mean.line.size, inner.box.width, inner.box.dotsize, normal.curve, normal.curve.color, normal.curve.size, normal.curve.alpha, auto.group, coord.flip, vjust, hjust, y.offset, var.name = NULL) { # remove empty value-labels if (drop.empty) { var.cnt <- sjlabelled::drop_labels(var.cnt) } # try to find some useful default offsets for textlabels, # depending on plot range and flipped coordinates if (is.null(y.offset)) { # get maximum y-pos y.offset <- ceiling(max(table(var.cnt)) / 100) if (coord.flip) { if (missing(vjust)) vjust <- "center" if (missing(hjust)) hjust <- "bottom" if (hjust == "bottom") y_offset <- y.offset else if (hjust == "top") y_offset <- -y.offset else y_offset <- 0 } else { if (vjust == "bottom") y_offset <- y.offset else if (vjust == "top") y_offset <- -y.offset else y_offset <- 0 } } else { y_offset <- y.offset } if (is.null(axis.title)) axis.title <- sjlabelled::get_label(var.cnt, def.value = var.name) if (is.null(title)) title <- sjlabelled::get_label(var.cnt, def.value = var.name) # remove titles if empty if (!is.null(axis.title) && axis.title == "") axis.title <- NULL if (!is.null(title) && title == "") title <- NULL # check color argument if (length(geom.colors) > 1) geom.colors <- geom.colors[1] # default grid-expansion if (isTRUE(expand.grid) || (missing(expand.grid) && type == "histogram")) { expand.grid <- waiver() } else { expand.grid <- c(0, 0) } # for histograms or density plots... xv <- sjmisc::to_value(stats::na.omit(var.cnt)) # check for nice bin-width defaults if (type %in% c("histogram", "density") && !is.null(geom.size) && geom.size < round(diff(range(xv)) / 40)) message("Using very small binwidth. Consider adjusting `geom.size` argument.") # create second data frame hist.dat <- data.frame(xv) # check default geom.size ----- if (is.null(geom.size)) { geom.size <- dplyr::case_when( type == "bar" ~ .7, type == "dot" ~ 2.5, type == "density" ~ ceiling(diff(range(xv)) / 40), type == "histogram" ~ ceiling(diff(range(xv)) / 40), type == "line" ~ .8, type == "boxplot" ~ .3, type == "violin" ~ .3, TRUE ~ .7 ) } # check whether variable should be auto-grouped ----- if (!is.null(auto.group) && length(unique(var.cnt)) >= auto.group) { message(sprintf( "`%s` has %i unique values and was grouped...", var.name, length(unique(var.cnt)) )) } if (!is.null(weight.by)) { dat <- data.frame( var.cnt = var.cnt, weight.by = weight.by, stringsAsFactors = FALSE ) } else { dat <- data.frame( var.cnt = var.cnt, stringsAsFactors = FALSE ) } # create frequency data frame ----- df.frq <- suppressMessages(sjmisc::frq( x = dat, "var.cnt", sort.frq = sort.frq, weights = "weight.by", auto.grp = auto.group, show.na = show.na )) mydat <- df.frq[[1]] # remove empty if (drop.empty) mydat <- mydat[mydat$frq > 0, ] # add confindence intervals for frequencies total_n = sum(mydat$frq) rel_frq <- as.numeric(mydat$frq / total_n) ci <- 1.96 * suppressWarnings(sqrt(rel_frq * (1 - rel_frq) / total_n)) mydat$upper.ci <- total_n * (rel_frq + ci) mydat$lower.ci <- total_n * (rel_frq - ci) mydat$rel.upper.ci <- rel_frq + ci mydat$rel.lower.ci <- rel_frq - ci # any labels detected? if (!is.null(mydat$label) && is.null(axis.labels) && !all(stats::na.omit(mydat$label) == "<none>")) axis.labels <- mydat$label else if (is.null(axis.labels)) axis.labels <- mydat$val # wrap labels axis.labels <- sjmisc::word_wrap(axis.labels, wrap.labels) # define text label position if (show.ci) mydat$label.pos <- mydat$upper.ci else mydat$label.pos <- mydat$frq # Trim labels and title to appropriate size ----- # check length of diagram title and split longer string into new lines # every 50 chars if (!is.null(title)) { # if we have weighted values, say that in diagram's title if (!is.null(title.wtd.suffix)) title <- paste(title, title.wtd.suffix, sep = "") title <- sjmisc::word_wrap(title, wrap.title) } # check length of x-axis title and split longer string into new lines # every 50 chars if (!is.null(axis.title)) axis.title <- sjmisc::word_wrap(axis.title, wrap.title) # count variable may not be a factor! if (is.factor(var.cnt) || is.character(var.cnt)) { var.cnt <- sjmisc::to_value(var.cnt, keep.labels = F) } # If we have a histogram, caluclate means of groups if (is.null(weight.by)) { mittelwert <- mean(var.cnt, na.rm = TRUE) stddev <- stats::sd(var.cnt, na.rm = TRUE) } else { mittelwert <- stats::weighted.mean(var.cnt, weight.by, na.rm = TRUE) stddev <- sjstats::weighted_sd(var.cnt, weights = weight.by) } # If we have boxplots, use different data frame structure if (type == "boxplot" || type == "violin") { mydat <- stats::na.omit(data.frame(cbind( grp = 1, frq = var.cnt, val = var.cnt ))) mydat$grp <- as.factor(mydat$grp) } # Prepare bar charts trimViolin <- FALSE lower_lim <- 0 # calculate upper y-axis-range # if we have a fixed value, use this one here if (!is.null(ylim) && length(ylim) == 2) { lower_lim <- ylim[1] upper_lim <- ylim[2] } else { # if we have boxplots, we have different ranges, so we can adjust # the y axis if (type == "boxplot" || type == "violin") { # use an extra standard-deviation as limits for the y-axis when we have boxplots lower_lim <- min(var.cnt, na.rm = TRUE) - floor(stats::sd(var.cnt, na.rm = TRUE)) upper_lim <- max(var.cnt, na.rm = TRUE) + ceiling(stats::sd(var.cnt, na.rm = TRUE)) # make sure that the y-axis is not below zero if (lower_lim < 0) { lower_lim <- 0 trimViolin <- TRUE } } else if (type == "histogram") { # what is the maximum values after binning for histograms? hist.grp.cnt <- ceiling(diff(range(var.cnt, na.rm = T)) / geom.size) # ... or the amount of max. answers per category # add 10% margin to upper limit upper_lim <- max(pretty(table( sjmisc::group_var( var.cnt, size = "auto", n = hist.grp.cnt, append = FALSE ) ) * 1.1)) } else { if (show.ci) upper_lim <- max(pretty(mydat$upper.ci * 1.1)) else upper_lim <- max(pretty(mydat$frq * 1.1)) } } # If we want to include NA, use raw percentages as valid percentages if (show.na) mydat$valid.prc <- mydat$raw.prc # don't display value labels when we have boxplots or violin plots if (type == "boxplot" || type == "violin") show.values <- FALSE if (show.values) { # here we have counts and percentages if (show.prc && show.n) { if (coord.flip) { ggvaluelabels <- geom_text( label = sprintf("%i (%.01f%%)", mydat$frq, mydat$valid.prc), hjust = hjust, vjust = vjust, aes(y = .data$label.pos + y_offset) ) } else { ggvaluelabels <- geom_text( label = sprintf("%i\n(%.01f%%)", mydat$frq, mydat$valid.prc), hjust = hjust, vjust = vjust, aes(y = .data$label.pos + y_offset) ) } } else if (show.n) { # here we have counts, without percentages ggvaluelabels <- geom_text( label = sprintf("%i", mydat$frq), hjust = hjust, vjust = vjust, aes(y = .data$label.pos + y_offset) ) } else if (show.prc) { # here we have counts, without percentages ggvaluelabels <- geom_text( label = sprintf("%.01f%%", mydat$valid.prc), hjust = hjust, vjust = vjust, aes(y = .data$label.pos + y_offset) ) } else { # no labels ggvaluelabels <- geom_text(aes(y = .data$frq), label = "") } } else { # no labels ggvaluelabels <- geom_text(aes(y = .data$frq), label = "") } # Set up grid breaks maxx <- if (is.numeric(mydat$val)) max(mydat$val) + 1 else nrow(mydat) if (is.null(grid.breaks)) { gridbreaks <- waiver() histgridbreaks <- waiver() } else { gridbreaks <- c(seq(lower_lim, upper_lim, by = grid.breaks)) histgridbreaks <- c(seq(lower_lim, maxx, by = grid.breaks)) } # set Y-axis, depending on the calculated upper y-range. # It either corresponds to the maximum amount of cases in the data set # (length of var) or to the highest count of var's categories. if (show.axis.values) { yscale <- scale_y_continuous( limits = c(lower_lim, upper_lim), expand = expand.grid, breaks = gridbreaks ) } else { yscale <- scale_y_continuous( limits = c(lower_lim, upper_lim), expand = expand.grid, breaks = gridbreaks, labels = NULL ) } # bar and dot plot start here! ----- if (type == "bar" || type == "dot") { # define geom if (type == "bar") { geob <- geom_bar(stat = "identity", width = geom.size, fill = geom.colors) } else if (type == "dot") { geob <- geom_point(size = geom.size, colour = geom.colors) } # as factor, but preserve order mydat$val <- factor(mydat$val, levels = unique(mydat$val)) # mydat is a data frame that only contains one variable (var). # Must be declared as factor, so the bars are central aligned to # each x-axis-break. baseplot <- ggplot(mydat, aes(x = .data$val, y = .data$frq)) + geob + yscale + # remove guide / legend guides(fill = FALSE) + # show absolute and percentage value of each bar. ggvaluelabels + # print value labels to the x-axis. # If argument "axis.labels" is NULL, the category numbers (1 to ...) # appear on the x-axis scale_x_discrete(labels = axis.labels) # add error bars if (show.ci) { ebcol <- ifelse(type == "dot", geom.colors, errorbar.color) # print confidence intervalls (error bars) baseplot <- baseplot + geom_errorbar(aes_string(ymin = "lower.ci", ymax = "upper.ci"), colour = ebcol, width = 0) } # check whether coordinates should be flipped, i.e. # swap x and y axis if (coord.flip) baseplot <- baseplot + coord_flip() # Start box plot here ----- } else if (type == "boxplot" || type == "violin") { # setup base plot baseplot <- ggplot(mydat, aes_string(x = "grp", y = "frq")) # and x-axis scalex <- scale_x_discrete(labels = "") if (type == "boxplot") { baseplot <- baseplot + geom_boxplot(width = geom.size, fill = geom.colors, notch = show.ci) } else { baseplot <- baseplot + geom_violin(trim = trimViolin, width = geom.size, fill = geom.colors) # if we have a violin plot, add an additional boxplot inside to show # more information if (show.ci) { baseplot <- baseplot + geom_boxplot(width = inner.box.width, fill = "white", notch = TRUE) } else { baseplot <- baseplot + geom_boxplot(width = inner.box.width, fill = "white") } } # if we have boxplots or violon plots, also add a point that indicates # the mean value # different fill colours, because violin boxplots have white background fcsp <- ifelse(type == "boxplot", "white", "black") baseplot <- baseplot + stat_summary(fun.y = "mean", geom = "point", shape = 21, size = inner.box.dotsize, fill = fcsp) # no additional labels for the x- and y-axis, only diagram title baseplot <- baseplot + yscale + scalex # Start density plot here ----- } else if (type == "density") { # First, plot histogram with density curve baseplot <- ggplot(hist.dat, aes(x = .data$xv)) + geom_histogram(aes(y = stat(density)), binwidth = geom.size, fill = geom.colors) + # transparent density curve above bars geom_density(aes(y = stat(density)), fill = "cornsilk", alpha = 0.3) + # remove margins from left and right diagram side scale_x_continuous(expand = expand.grid, breaks = histgridbreaks, limits = xlim) # check whether user wants to overlay the histogram # with a normal curve if (normal.curve) { baseplot <- baseplot + stat_function( fun = dnorm, args = list( mean = mean(hist.dat$xv), sd = stats::sd(hist.dat$xv) ), colour = normal.curve.color, size = normal.curve.size, alpha = normal.curve.alpha ) } } else { # Since the density curve shows no absolute numbers (counts) on the # y-axis, have also the opportunity to plot "real" histrograms with # counts on the y-axis if (type == "histogram") { # original data needed for normal curve baseplot <- ggplot(mydat) + # second data frame mapped to the histogram geom geom_histogram(data = hist.dat, aes(x = .data$xv), binwidth = geom.size, fill = geom.colors) } else { baseplot <- ggplot(mydat, aes(x = .data$val, y = .data$frq)) + geom_area(alpha = 0.3) + geom_line(size = geom.size, colour = geom.colors) + ggvaluelabels } # check whether user wants to overlay the histogram # with a normal curve if (normal.curve) { baseplot <- baseplot + stat_function( fun = function(xx, mean, sd, n) { n * stats::dnorm(x = xx, mean = mean, sd = sd) }, args = with(mydat, c( mean = mittelwert, sd = stddev, n = length(var.cnt) )), colour = normal.curve.color, size = normal.curve.size, alpha = normal.curve.alpha ) } # if we have a histogram, add mean-lines if (show.mean) { baseplot <- baseplot + # vertical lines indicating the mean geom_vline(xintercept = mittelwert, linetype = mean.line.type, size = mean.line.size) # check whether meanvalue should be shown. if (show.mean.val) { baseplot <- baseplot + # use annotation instead of geomtext, because we need mean value only printed once annotate( "text", x = mittelwert, y = upper_lim, parse = TRUE, label = paste( "italic(bar(x)) == ", round(mittelwert, 1), "~~italic(s) == ", round(stddev, 1) ), vjust = "top", hjust = "top" ) } # check whether the user wants to plot standard deviation area if (show.sd) { baseplot <- baseplot + # first draw shaded rectangle. these are by default in grey colour with very high transparancy annotate("rect", xmin = mittelwert - stddev, xmax = mittelwert + stddev, ymin = 0, ymax = c(upper_lim), fill = "grey70", alpha = 0.2) + # draw border-lines for shaded rectangle geom_vline(xintercept = mittelwert - stddev, linetype = 3, size = mean.line.size, alpha = 0.7) + geom_vline(xintercept = mittelwert + stddev, linetype = 3, size = mean.line.size, alpha = 0.7) } } # show absolute and percentage value of each bar. baseplot <- baseplot + yscale + # continuous x-scale for histograms scale_x_continuous(limits = xlim, expand = expand.grid, breaks = histgridbreaks) } # set axes text and baseplot <- baseplot + labs(title = title, x = axis.title, y = NULL) # Check whether ggplot object should be returned or plotted baseplot }
# FUNCTION TO BUILD THE CORE DATASET # # Arguments: # ein - character vector of nonprofits to sample # years - vector of which years to collect # form.type - which type of data to collect # modules - what sections of data to build # index - database of all electronic filers provided by the IRS buildCore <- function( eins=NULL, index=NULL, years, form.type=c("990","990EZ"), modules="all" ) { library( dplyr ) library( xml2 ) # library( R.utils ) # LOAD ALL REQUIRED FUNCTIONS source("https://raw.githubusercontent.com/lecy/Open-Data-for-Nonprofit-Research/master/Helper_Functions/buildIndex.R") source("https://raw.githubusercontent.com/lecy/Open-Data-for-Nonprofit-Research/master/Helper_Functions/scrapeXML.R") source("https://raw.githubusercontent.com/lecy/Open-Data-for-Nonprofit-Research/master/Helper_Functions/getBasicInfo.R") # source("https://raw.githubusercontent.com/lecy/Open-Data-for-Nonprofit-Research/master/Helper%20Functions/getRevExp.R") # source("https://raw.githubusercontent.com/lecy/Open-Data-for-Nonprofit-Research/master/Helper%20Functions/getMission.R") # BUILD NECESSARY RESOURCES if( is.null(index) ) { index <- buildIndex() } if( is.null(eins) ) { eins <- unique( index$EIN ) } if( modules == "all" ) { modules <- c("basic") } # { modules <- c("basic","revexp","mission"...) } # SUBSET INDEX FILE BY SPECIFIED YEARS AND FORMS these <- index[ index$EIN %in% eins & index$FilingYear %in% years & index$FormType %in% form.type , "URL" ] # NEED THIS TO BUILD CONSISTENT DATA.FRAMES WHEN VARIABLES ARE NOT PRESENT # http://stackoverflow.com/questions/16951080/can-list-objects-be-created-in-r-that-name-themselves-based-on-input-object-name namedList <- function(...){ names <- as.list(substitute(list(...)))[-1L] result <- list(...) names(result) <- names result[sapply(result, function(x){length(x)==0})] <- NA result[sapply(result, is.null)] <- NA result } core <- NULL for( i in 1:length(these) ) { one.npo <- scrapeXML( these[i], form.type, modules ) if( ! is.null(one.npo) ) { core <- bind_rows( core, one.npo ) } } # need to clean up variable types return( core ) }
/Helper_Functions/buildCore.R
no_license
shommazumder/Open-Data-for-Nonprofit-Research
R
false
false
2,306
r
# FUNCTION TO BUILD THE CORE DATASET # # Arguments: # ein - character vector of nonprofits to sample # years - vector of which years to collect # form.type - which type of data to collect # modules - what sections of data to build # index - database of all electronic filers provided by the IRS buildCore <- function( eins=NULL, index=NULL, years, form.type=c("990","990EZ"), modules="all" ) { library( dplyr ) library( xml2 ) # library( R.utils ) # LOAD ALL REQUIRED FUNCTIONS source("https://raw.githubusercontent.com/lecy/Open-Data-for-Nonprofit-Research/master/Helper_Functions/buildIndex.R") source("https://raw.githubusercontent.com/lecy/Open-Data-for-Nonprofit-Research/master/Helper_Functions/scrapeXML.R") source("https://raw.githubusercontent.com/lecy/Open-Data-for-Nonprofit-Research/master/Helper_Functions/getBasicInfo.R") # source("https://raw.githubusercontent.com/lecy/Open-Data-for-Nonprofit-Research/master/Helper%20Functions/getRevExp.R") # source("https://raw.githubusercontent.com/lecy/Open-Data-for-Nonprofit-Research/master/Helper%20Functions/getMission.R") # BUILD NECESSARY RESOURCES if( is.null(index) ) { index <- buildIndex() } if( is.null(eins) ) { eins <- unique( index$EIN ) } if( modules == "all" ) { modules <- c("basic") } # { modules <- c("basic","revexp","mission"...) } # SUBSET INDEX FILE BY SPECIFIED YEARS AND FORMS these <- index[ index$EIN %in% eins & index$FilingYear %in% years & index$FormType %in% form.type , "URL" ] # NEED THIS TO BUILD CONSISTENT DATA.FRAMES WHEN VARIABLES ARE NOT PRESENT # http://stackoverflow.com/questions/16951080/can-list-objects-be-created-in-r-that-name-themselves-based-on-input-object-name namedList <- function(...){ names <- as.list(substitute(list(...)))[-1L] result <- list(...) names(result) <- names result[sapply(result, function(x){length(x)==0})] <- NA result[sapply(result, is.null)] <- NA result } core <- NULL for( i in 1:length(these) ) { one.npo <- scrapeXML( these[i], form.type, modules ) if( ! is.null(one.npo) ) { core <- bind_rows( core, one.npo ) } } # need to clean up variable types return( core ) }
## Wrapper for running simulations of the storage effect ## model with variable resource. ## Author: Andrew Tredennick ## Email: atredenn@gmail.com # clear the workspace rm(list=ls()) ## Set do_year for validation from command line prompt args <- commandArgs(trailingOnly = F) myargument <- args[length(args)] myargument <- sub("-","",myargument) resource <- as.numeric(myargument) #### #### Initial conditions and global variables ------------------------ #### maxTime <- 2500 burn.in <- 1000 DNR <- c(D=c(1,1),N=c(1,1),R=100) Rmu <- 2 #mean resource pulse (on log scale) Rsd_vec <- seq(0,3,by=0.25) Rsd <- Rsd_vec[resource] #std dev of resource pulses (on log scale) sigE <- c(0, 0.4, 1, 2.5, 5, 7.5, 10) #environmental cue variability rho_vec <- seq(-1,1,by=0.25) #environmental cue correlation between species parms <- list( r = c(5,5), #max growth rate for genotype A and a k1 = c(20,20), #right offset for growth rates k2 = c(0.08,0.08), #rates at which max is approached mN = c(0.5,0.5), #live biomass loss (mortality) rates mD = c(0.001, 0.001) #dormant biomass loss (mortality) rates ) resource_sims <- length(Rsd_vec) env_cue_sims <- length(sigE) rho_sims <- length(rho_vec) sims_per_level <- 100 #### #### Load relevant libraries and source the model -------------------- #### library('deSolve') library('mvtnorm') source("semi_discrete_consumer_resource_fxns.R") #### #### Loop through simulation sets ----------------------------------- #### out_sims_all <- data.frame(sim=NA, rho=NA, sig_e=NA, sig_r=NA, sd_n=NA, mu_n=NA, cv_n=NA, buffer=NA) simTime <- seq(1,maxTime,by=1) #creates a vector of time steps to get output from deSolve for(cue in 1:env_cue_sims){ for(rho in 1:rho_sims){ for(sim in 1:sims_per_level){ gVec <- getG(sigE = sigE[cue], rho = rho_vec[rho], nTime = maxTime) gVec1 <- gVec[,1] gVec2 <- gVec[,2] # Set random resource fluctuations Rvector <- rlnorm(maxTime,Rmu,Rsd) # Run the model output = as.data.frame(ode(y = DNR, times = simTime, func = updateDNR, parms = parms, events = list(func = gfun, times=simTime))) # Collect outputs biomass_sd <- sd(rowSums(output[burn.in:maxTime,2:3])) biomass_mu <- mean(rowSums(output[burn.in:maxTime,2:3])) biomass_cv <- biomass_sd/biomass_mu resource_cv <- sd(output[burn.in:maxTime,4])/mean(output[burn.in:maxTime,4]) buffer <- biomass_cv/resource_cv out_sims <- data.frame(sim=NA, rho=NA, sig_e=NA, sig_r=NA, sd_n=NA, mu_n=NA, cv_n=NA, buffer=NA) out_sims$sim <- sim out_sims$rho <- rho_vec[rho] out_sims$sig_e <- sigE[cue] out_sims$sig_r <- Rsd[resource] out_sims$sd_n <- biomass_sd out_sims$mu_n <- biomass_mu out_sims$cv_n <- biomass_cv out_sims$buffer <- buffer out_sims_all <- rbind(out_sims_all, out_sims) } #next simulation } #next rho } #next cue variability level #### #### Save output ------------------------------------------ #### out_sims_all <- out_sims_all[2:nrow(out_sims_all),] out_file <- paste("storage_effect_simulation_output_resource", resource, ".rds", sep="") saveRDS(out_sims_all, out_file)
/modelCode/oldVersions/wrapper_storage_effect_simulations_hpc.R
permissive
atredennick/Coexistence-Stability
R
false
false
3,469
r
## Wrapper for running simulations of the storage effect ## model with variable resource. ## Author: Andrew Tredennick ## Email: atredenn@gmail.com # clear the workspace rm(list=ls()) ## Set do_year for validation from command line prompt args <- commandArgs(trailingOnly = F) myargument <- args[length(args)] myargument <- sub("-","",myargument) resource <- as.numeric(myargument) #### #### Initial conditions and global variables ------------------------ #### maxTime <- 2500 burn.in <- 1000 DNR <- c(D=c(1,1),N=c(1,1),R=100) Rmu <- 2 #mean resource pulse (on log scale) Rsd_vec <- seq(0,3,by=0.25) Rsd <- Rsd_vec[resource] #std dev of resource pulses (on log scale) sigE <- c(0, 0.4, 1, 2.5, 5, 7.5, 10) #environmental cue variability rho_vec <- seq(-1,1,by=0.25) #environmental cue correlation between species parms <- list( r = c(5,5), #max growth rate for genotype A and a k1 = c(20,20), #right offset for growth rates k2 = c(0.08,0.08), #rates at which max is approached mN = c(0.5,0.5), #live biomass loss (mortality) rates mD = c(0.001, 0.001) #dormant biomass loss (mortality) rates ) resource_sims <- length(Rsd_vec) env_cue_sims <- length(sigE) rho_sims <- length(rho_vec) sims_per_level <- 100 #### #### Load relevant libraries and source the model -------------------- #### library('deSolve') library('mvtnorm') source("semi_discrete_consumer_resource_fxns.R") #### #### Loop through simulation sets ----------------------------------- #### out_sims_all <- data.frame(sim=NA, rho=NA, sig_e=NA, sig_r=NA, sd_n=NA, mu_n=NA, cv_n=NA, buffer=NA) simTime <- seq(1,maxTime,by=1) #creates a vector of time steps to get output from deSolve for(cue in 1:env_cue_sims){ for(rho in 1:rho_sims){ for(sim in 1:sims_per_level){ gVec <- getG(sigE = sigE[cue], rho = rho_vec[rho], nTime = maxTime) gVec1 <- gVec[,1] gVec2 <- gVec[,2] # Set random resource fluctuations Rvector <- rlnorm(maxTime,Rmu,Rsd) # Run the model output = as.data.frame(ode(y = DNR, times = simTime, func = updateDNR, parms = parms, events = list(func = gfun, times=simTime))) # Collect outputs biomass_sd <- sd(rowSums(output[burn.in:maxTime,2:3])) biomass_mu <- mean(rowSums(output[burn.in:maxTime,2:3])) biomass_cv <- biomass_sd/biomass_mu resource_cv <- sd(output[burn.in:maxTime,4])/mean(output[burn.in:maxTime,4]) buffer <- biomass_cv/resource_cv out_sims <- data.frame(sim=NA, rho=NA, sig_e=NA, sig_r=NA, sd_n=NA, mu_n=NA, cv_n=NA, buffer=NA) out_sims$sim <- sim out_sims$rho <- rho_vec[rho] out_sims$sig_e <- sigE[cue] out_sims$sig_r <- Rsd[resource] out_sims$sd_n <- biomass_sd out_sims$mu_n <- biomass_mu out_sims$cv_n <- biomass_cv out_sims$buffer <- buffer out_sims_all <- rbind(out_sims_all, out_sims) } #next simulation } #next rho } #next cue variability level #### #### Save output ------------------------------------------ #### out_sims_all <- out_sims_all[2:nrow(out_sims_all),] out_file <- paste("storage_effect_simulation_output_resource", resource, ".rds", sep="") saveRDS(out_sims_all, out_file)
# Install and load packages package_names <- c("survey","dplyr","foreign","devtools") lapply(package_names, function(x) if(!x %in% installed.packages()) install.packages(x)) lapply(package_names, require, character.only=T) install_github("e-mitchell/meps_r_pkg/MEPS") library(MEPS) options(survey.lonely.psu="adjust") # Load FYC file FYC <- read.xport('C:/MEPS/.FYC..ssp'); year <- .year. FYC <- FYC %>% mutate_at(vars(starts_with("AGE")),funs(replace(., .< 0, NA))) %>% mutate(AGELAST = coalesce(AGE.yy.X, AGE42X, AGE31X)) FYC$ind = 1 # Adults advised to quit smoking if(year == 2002) FYC <- FYC %>% rename(ADNSMK42 = ADDSMK42) FYC <- FYC %>% mutate( adult_nosmok = recode_factor(ADNSMK42, .default = "Missing", .missing = "Missing", "1" = "Told to quit", "2" = "Not told to quit", "3" = "Had no visits in the last 12 months", "-9" = "Not ascertained", "-1" = "Inapplicable")) # Poverty status if(year == 1996) FYC <- FYC %>% rename(POVCAT96 = POVCAT) FYC <- FYC %>% mutate(poverty = recode_factor(POVCAT.yy., .default = "Missing", .missing = "Missing", "1" = "Negative or poor", "2" = "Near-poor", "3" = "Low income", "4" = "Middle income", "5" = "High income")) SAQdsgn <- svydesign( id = ~VARPSU, strata = ~VARSTR, weights = ~SAQWT.yy.F, data = FYC, nest = TRUE) results <- svyby(~adult_nosmok, FUN = svytotal, by = ~poverty, design = subset(SAQdsgn, ADSMOK42==1 & CHECK53==1)) print(results)
/mepstrends/hc_care/json/code/r/totPOP__poverty__adult_nosmok__.r
permissive
RandomCriticalAnalysis/MEPS-summary-tables
R
false
false
1,550
r
# Install and load packages package_names <- c("survey","dplyr","foreign","devtools") lapply(package_names, function(x) if(!x %in% installed.packages()) install.packages(x)) lapply(package_names, require, character.only=T) install_github("e-mitchell/meps_r_pkg/MEPS") library(MEPS) options(survey.lonely.psu="adjust") # Load FYC file FYC <- read.xport('C:/MEPS/.FYC..ssp'); year <- .year. FYC <- FYC %>% mutate_at(vars(starts_with("AGE")),funs(replace(., .< 0, NA))) %>% mutate(AGELAST = coalesce(AGE.yy.X, AGE42X, AGE31X)) FYC$ind = 1 # Adults advised to quit smoking if(year == 2002) FYC <- FYC %>% rename(ADNSMK42 = ADDSMK42) FYC <- FYC %>% mutate( adult_nosmok = recode_factor(ADNSMK42, .default = "Missing", .missing = "Missing", "1" = "Told to quit", "2" = "Not told to quit", "3" = "Had no visits in the last 12 months", "-9" = "Not ascertained", "-1" = "Inapplicable")) # Poverty status if(year == 1996) FYC <- FYC %>% rename(POVCAT96 = POVCAT) FYC <- FYC %>% mutate(poverty = recode_factor(POVCAT.yy., .default = "Missing", .missing = "Missing", "1" = "Negative or poor", "2" = "Near-poor", "3" = "Low income", "4" = "Middle income", "5" = "High income")) SAQdsgn <- svydesign( id = ~VARPSU, strata = ~VARSTR, weights = ~SAQWT.yy.F, data = FYC, nest = TRUE) results <- svyby(~adult_nosmok, FUN = svytotal, by = ~poverty, design = subset(SAQdsgn, ADSMOK42==1 & CHECK53==1)) print(results)
## File Name: pmle_eval_prior_deriv.R ## File Version: 0.05 pmle_eval_prior_deriv <- function(prior, pars, h=1e-4, eps=1e-100) { abs_par <- abs(pars) hvec <- h * ifelse(abs_par > 1, abs_par, 1) priorval1 <- pmle_eval_prior(pars=pars+hvec, prior=prior) priorval2 <- pmle_eval_prior(pars=pars-hvec, prior=prior) prior_grad <- ( priorval1 - priorval2 ) / ( 2 * hvec ) return(prior_grad) }
/R/pmle_eval_prior_deriv.R
no_license
alexanderrobitzsch/LAM
R
false
false
411
r
## File Name: pmle_eval_prior_deriv.R ## File Version: 0.05 pmle_eval_prior_deriv <- function(prior, pars, h=1e-4, eps=1e-100) { abs_par <- abs(pars) hvec <- h * ifelse(abs_par > 1, abs_par, 1) priorval1 <- pmle_eval_prior(pars=pars+hvec, prior=prior) priorval2 <- pmle_eval_prior(pars=pars-hvec, prior=prior) prior_grad <- ( priorval1 - priorval2 ) / ( 2 * hvec ) return(prior_grad) }
library(ElemStatLearn) data(vowel.train) data(vowel.test) vowel.train$y <- factor(vowel.train$y) vowel.test$y <- factor(vowel.test$y) set.seed(33833) # Fit a random forest predictor relating the factor variable y to the remaining variables. a <- randomForest(y ~ ., data = vowel.train, importance = FALSE) b <- varImp(a) order(b)
/RandomForest.R
no_license
JDeanThomas/Caret-ML
R
false
false
333
r
library(ElemStatLearn) data(vowel.train) data(vowel.test) vowel.train$y <- factor(vowel.train$y) vowel.test$y <- factor(vowel.test$y) set.seed(33833) # Fit a random forest predictor relating the factor variable y to the remaining variables. a <- randomForest(y ~ ., data = vowel.train, importance = FALSE) b <- varImp(a) order(b)
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/DBResult.R \name{dbColumnInfo} \alias{dbColumnInfo} \title{Information about result types} \usage{ dbColumnInfo(res, ...) } \arguments{ \item{res}{An object inheriting from \linkS4class{DBIResult}.} \item{...}{Other arguments passed on to methods.} } \value{ A data.frame with one row per output field in \code{res}. Methods MUST include \code{name}, \code{field.type} (the SQL type), and \code{data.type} (the R data type) columns, and MAY contain other database specific information like scale and precision or whether the field can store \code{NULL}s. } \description{ Produces a data.frame that describes the output of a query. The data.frame should have as many rows as there are output fields in the result set, and each column in the data.frame should describe an aspect of the result set field (field name, type, etc.) } \examples{ con <- dbConnect(RSQLite::SQLite(), ":memory:") rs <- dbSendQuery(con, "SELECT 1 AS a, 2 AS b") dbColumnInfo(rs) dbFetch(rs) dbClearResult(rs) dbDisconnect(con) } \seealso{ Other DBIResult generics: \code{\link{DBIResult-class}}, \code{\link{dbBind}}, \code{\link{dbClearResult}}, \code{\link{dbFetch}}, \code{\link{dbGetInfo}}, \code{\link{dbGetRowCount}}, \code{\link{dbGetRowsAffected}}, \code{\link{dbGetStatement}}, \code{\link{dbHasCompleted}}, \code{\link{dbIsValid}}, \code{\link{dbQuoteIdentifier}}, \code{\link{dbQuoteLiteral}}, \code{\link{dbQuoteString}}, \code{\link{dbUnquoteIdentifier}} } \concept{DBIResult generics}
/man/dbColumnInfo.Rd
no_license
jimhester/DBI
R
false
true
1,573
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/DBResult.R \name{dbColumnInfo} \alias{dbColumnInfo} \title{Information about result types} \usage{ dbColumnInfo(res, ...) } \arguments{ \item{res}{An object inheriting from \linkS4class{DBIResult}.} \item{...}{Other arguments passed on to methods.} } \value{ A data.frame with one row per output field in \code{res}. Methods MUST include \code{name}, \code{field.type} (the SQL type), and \code{data.type} (the R data type) columns, and MAY contain other database specific information like scale and precision or whether the field can store \code{NULL}s. } \description{ Produces a data.frame that describes the output of a query. The data.frame should have as many rows as there are output fields in the result set, and each column in the data.frame should describe an aspect of the result set field (field name, type, etc.) } \examples{ con <- dbConnect(RSQLite::SQLite(), ":memory:") rs <- dbSendQuery(con, "SELECT 1 AS a, 2 AS b") dbColumnInfo(rs) dbFetch(rs) dbClearResult(rs) dbDisconnect(con) } \seealso{ Other DBIResult generics: \code{\link{DBIResult-class}}, \code{\link{dbBind}}, \code{\link{dbClearResult}}, \code{\link{dbFetch}}, \code{\link{dbGetInfo}}, \code{\link{dbGetRowCount}}, \code{\link{dbGetRowsAffected}}, \code{\link{dbGetStatement}}, \code{\link{dbHasCompleted}}, \code{\link{dbIsValid}}, \code{\link{dbQuoteIdentifier}}, \code{\link{dbQuoteLiteral}}, \code{\link{dbQuoteString}}, \code{\link{dbUnquoteIdentifier}} } \concept{DBIResult generics}
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/jointPipeline.R \name{jointPipeline-shiny} \alias{jointPipeline-shiny} \alias{jointPipelineOutput} \alias{renderJointPipeline} \title{Shiny bindings for jointPipeline} \usage{ jointPipelineOutput(outputId, width = "100\%", height = "400px") renderJointPipeline(expr, env = parent.frame(), quoted = FALSE) } \arguments{ \item{outputId}{output variable to read from} \item{width, height}{Must be a valid CSS unit (like \code{'100\%'}, \code{'400px'}, \code{'auto'}) or a number, which will be coerced to a string and have \code{'px'} appended.} \item{expr}{An expression that generates a jointPipeline} \item{env}{The environment in which to evaluate \code{expr}.} \item{quoted}{Is \code{expr} a quoted expression (with \code{quote()})? This is useful if you want to save an expression in a variable.} } \description{ Output and render functions for using jointPipeline within Shiny applications and interactive Rmd documents. }
/man/jointPipeline-shiny.Rd
no_license
harveyl888/pipelineR
R
false
true
1,011
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/jointPipeline.R \name{jointPipeline-shiny} \alias{jointPipeline-shiny} \alias{jointPipelineOutput} \alias{renderJointPipeline} \title{Shiny bindings for jointPipeline} \usage{ jointPipelineOutput(outputId, width = "100\%", height = "400px") renderJointPipeline(expr, env = parent.frame(), quoted = FALSE) } \arguments{ \item{outputId}{output variable to read from} \item{width, height}{Must be a valid CSS unit (like \code{'100\%'}, \code{'400px'}, \code{'auto'}) or a number, which will be coerced to a string and have \code{'px'} appended.} \item{expr}{An expression that generates a jointPipeline} \item{env}{The environment in which to evaluate \code{expr}.} \item{quoted}{Is \code{expr} a quoted expression (with \code{quote()})? This is useful if you want to save an expression in a variable.} } \description{ Output and render functions for using jointPipeline within Shiny applications and interactive Rmd documents. }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/plot.R \name{plot_kurt} \alias{plot_kurt} \title{Plot Kurtosis} \usage{ plot_kurt(data, std = FALSE) } \arguments{ \item{data}{Dataframe of results.} \item{std}{Logical. If \code{TRUE}, standardized indirect effect. If \code{FALSE}, unstandardized indirect effect.} } \description{ Plot Kurtosis } \seealso{ Other plotting functions: \code{\link{plot_bias}()}, \code{\link{plot_miss}()}, \code{\link{plot_power}()}, \code{\link{plot_rmse}()}, \code{\link{plot_type1}()} } \author{ Ivan Jacob Agaloos Pesigan } \concept{plotting functions} \keyword{plot}
/man/plot_kurt.Rd
permissive
jeksterslabds/jeksterslabRmedsimple
R
false
true
634
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/plot.R \name{plot_kurt} \alias{plot_kurt} \title{Plot Kurtosis} \usage{ plot_kurt(data, std = FALSE) } \arguments{ \item{data}{Dataframe of results.} \item{std}{Logical. If \code{TRUE}, standardized indirect effect. If \code{FALSE}, unstandardized indirect effect.} } \description{ Plot Kurtosis } \seealso{ Other plotting functions: \code{\link{plot_bias}()}, \code{\link{plot_miss}()}, \code{\link{plot_power}()}, \code{\link{plot_rmse}()}, \code{\link{plot_type1}()} } \author{ Ivan Jacob Agaloos Pesigan } \concept{plotting functions} \keyword{plot}
#' @title binomial variable #' @description return the binomial variables #' @param trials the number of tirals #' @param prob the probability of a single success #' @return a list that contains the number of tirals and the probability of a single success #' @export #' @examples #' #' bin1 <- bin_variable(trials = 10, p = 0.3) #' bin_variable <- function(trials, prob) { x <- list(trials=trials, prob=prob) class(x) <- "binvar" return(x) } #' @export print.binvar <- function(x,...) { cat('"Binomial Variable"', "\n") cat("\n") cat('Parameters', "\n") cat("- number of trials:", x$trials, "\n") cat("- prob of success:", x$prob, "\n") invisible(x) } #' @export summary.binvar <- function(x,...) { mean <- aux_mean(x$trials,x$prob) variance <- aux_variance(x$trials,x$prob) mode <- aux_mode(x$trials,x$prob) skewness <- aux_skewness(x$trials,x$prob) kurtosis <- aux_kurtosis(x$trials,x$prob) x <- list(trials=x$trials,prob=x$prob, mean=mean,variance=variance, mode=mode,skewness=skewness, kurtosis=kurtosis) class(x) <- "summary.binvar" return(x) } #' @export print.summary.binvar <- function(x,...) { cat('"Summary Binomial"', "\n") cat("\n") cat('Parameters', "\n") cat("- number of trials:", x$trials, "\n") cat("- prob of success:", x$prob, "\n") cat("\n") cat('Measures', "\n") cat('- mean:',x$mean, "\n") cat('- variance:',x$variance, "\n") cat('- mode:',x$mode,"\n") cat('- skewness:',x$skewness, "\n") cat('- kurtosis:',x$kurtosis, "\n") invisible(x) }
/assignment/workout03/binomial/R/bin_variabable.R
no_license
stat133-sp19/hw-stat133-Jay1995712
R
false
false
1,574
r
#' @title binomial variable #' @description return the binomial variables #' @param trials the number of tirals #' @param prob the probability of a single success #' @return a list that contains the number of tirals and the probability of a single success #' @export #' @examples #' #' bin1 <- bin_variable(trials = 10, p = 0.3) #' bin_variable <- function(trials, prob) { x <- list(trials=trials, prob=prob) class(x) <- "binvar" return(x) } #' @export print.binvar <- function(x,...) { cat('"Binomial Variable"', "\n") cat("\n") cat('Parameters', "\n") cat("- number of trials:", x$trials, "\n") cat("- prob of success:", x$prob, "\n") invisible(x) } #' @export summary.binvar <- function(x,...) { mean <- aux_mean(x$trials,x$prob) variance <- aux_variance(x$trials,x$prob) mode <- aux_mode(x$trials,x$prob) skewness <- aux_skewness(x$trials,x$prob) kurtosis <- aux_kurtosis(x$trials,x$prob) x <- list(trials=x$trials,prob=x$prob, mean=mean,variance=variance, mode=mode,skewness=skewness, kurtosis=kurtosis) class(x) <- "summary.binvar" return(x) } #' @export print.summary.binvar <- function(x,...) { cat('"Summary Binomial"', "\n") cat("\n") cat('Parameters', "\n") cat("- number of trials:", x$trials, "\n") cat("- prob of success:", x$prob, "\n") cat("\n") cat('Measures', "\n") cat('- mean:',x$mean, "\n") cat('- variance:',x$variance, "\n") cat('- mode:',x$mode,"\n") cat('- skewness:',x$skewness, "\n") cat('- kurtosis:',x$kurtosis, "\n") invisible(x) }
initMatrixWithNEHTrue <- read.csv(file="config_0_mean.csv", header=FALSE, sep=",") initMatrixWithNEHFalse <- read.csv(file="config_1_mean.csv", header=FALSE, sep=",") iteration <- c(initMatrixWithNEHTrue[1]$V1) jpeg('result_time_mean_iteration.jpg') yrange <- range(initMatrixWithNEHTrue[2]$V2, initMatrixWithNEHFalse[2]$V2) xrange <- range(initMatrixWithNEHTrue[3]$V3, initMatrixWithNEHFalse[3]$V3) plot(iteration, initMatrixWithNEHTrue[2]$V2, type="l", ylim=yrange, col="blue", ann=FALSE) lines(iteration, initMatrixWithNEHFalse[2]$V2, type="l", col="green") title(main='Update mit Elitelösung 50 Jobs', col.main="red", font.main=4) title(xlab='Iteration') title(ylab='durchschnittliche Fertigstellungszeit') legend("topright", legend=c("true", "false"), col=c("blue", "green"), lty=1:1, cex=0.8) jpeg('result_time_mean_calculationtime.jpg') plot(initMatrixWithNEHTrue[3]$V3, initMatrixWithNEHTrue[2]$V2, type="l", ylim=yrange, xlim=xrange, col="blue", ann=FALSE) lines(initMatrixWithNEHFalse[3]$V3, initMatrixWithNEHFalse[2]$V2, type="l", col="green") box() title(main='Update mit Elitelösung 50 Jobs', col.main="red", font.main=4) title(xlab='Laufzeit in s') title(ylab='durchschnittliche Fertigstellungszeit') legend("topright", legend=c("true", "false"), col=c("blue", "green"), lty=1:1, cex=0.8)
/withEliteSolution/old/meanGraph.R
no_license
auryn31/acoflowshop
R
false
false
1,340
r
initMatrixWithNEHTrue <- read.csv(file="config_0_mean.csv", header=FALSE, sep=",") initMatrixWithNEHFalse <- read.csv(file="config_1_mean.csv", header=FALSE, sep=",") iteration <- c(initMatrixWithNEHTrue[1]$V1) jpeg('result_time_mean_iteration.jpg') yrange <- range(initMatrixWithNEHTrue[2]$V2, initMatrixWithNEHFalse[2]$V2) xrange <- range(initMatrixWithNEHTrue[3]$V3, initMatrixWithNEHFalse[3]$V3) plot(iteration, initMatrixWithNEHTrue[2]$V2, type="l", ylim=yrange, col="blue", ann=FALSE) lines(iteration, initMatrixWithNEHFalse[2]$V2, type="l", col="green") title(main='Update mit Elitelösung 50 Jobs', col.main="red", font.main=4) title(xlab='Iteration') title(ylab='durchschnittliche Fertigstellungszeit') legend("topright", legend=c("true", "false"), col=c("blue", "green"), lty=1:1, cex=0.8) jpeg('result_time_mean_calculationtime.jpg') plot(initMatrixWithNEHTrue[3]$V3, initMatrixWithNEHTrue[2]$V2, type="l", ylim=yrange, xlim=xrange, col="blue", ann=FALSE) lines(initMatrixWithNEHFalse[3]$V3, initMatrixWithNEHFalse[2]$V2, type="l", col="green") box() title(main='Update mit Elitelösung 50 Jobs', col.main="red", font.main=4) title(xlab='Laufzeit in s') title(ylab='durchschnittliche Fertigstellungszeit') legend("topright", legend=c("true", "false"), col=c("blue", "green"), lty=1:1, cex=0.8)
library(readr) library(dplyr) library(forcats) # manipulate factors psy_test <- read_csv("./data/psy_test.csv") item <- read_csv("data/item.csv", col_types = cols( item_num=col_character(), question_num=col_character() )) # variable information vars <- colnames(psy_test) vars_en <- c("time", "age", "gender", "college", "dept", "grade", 1:62, "fb1", "fb2", "fb3") var_info <- as_data_frame(cbind(vars, vars_en)) %>% left_join(item, by= c("vars_en"="question_num")) %>% select(vars, vars_en, construct_en, type, question) # converting to factors colnames(psy_test) <- vars_en psy_test <- psy_test %>% mutate(gender=factor(gender), college=factor(college), dept=factor(dept), grade=factor(grade)) %>% mutate(gender=fct_recode(gender, "male"="男", "female"="女", "other"="其他")) %>% mutate(grade=fct_recode(grade, "first"="一", "first"="一年級", "second"="二", "third"="三", "forth"="四", "suspend" = "休學", "NoDegree" = "未畢\\肄業", "graduated" = "畢\\肄業")) %>% mutate(college=fct_recode(college, "靜宜大學"="靜宜", "靜宜大學"="?宜", "臺灣大學"="ntu", "臺灣大學"="Ntu", "臺灣大學"="NTU", "臺灣大學"="台大", "臺灣大學"="台灣大學", "臺灣大學"="國立台灣大學", "臺灣大學"="國立臺灣大學", "臺灣大學"="臺灣大學與中興大學 看你想要哪一個", "台北大學"="國立台北大學", "台北大學"="國立臺北大學", "清華大學"="清大", "清華大學"="國立清華大學", "輔仁大學"="輔仁", "臺灣師範大學"="國立臺灣師範大學", "臺灣師範大學"="師大", "臺灣師範大學"="台師大", "台北科技大學"="北科", "中原大學"="中原", "中原大學"="中原大旋", "中國醫藥大學"="中國醫", "中興大學"="中興", "文化大學"="文化", "世新大學"="世新", "臺北醫學大學"="北醫", "臺北醫學大學"="台北醫學大學", "臺北市立大學"="台北市立大學", "國立臺北護理健康大學"="北護", "國立臺北護理健康大學"="國北護", "國立臺灣藝術大學"="台灣藝術大學", "國立臺灣藝術大學"="台藝大", "國立臺灣藝術大學"="國立台灣藝術大學", "國立臺灣體育運動大學"="台灣體育運動大學", "成功大學"="成大", "成功大學"="國立成功大學", "亞洲大學"="亞洲", "東吳大學"="東吳", "東海大學"="東海", "屏東科技大學"="屏科", "政治大學"="政大", "陽明大學"="陽明", "暨南大學"="國立暨南國際大學", "淡江大學"="淡江", "義守大學"="義守", "實踐大學"="實踐", "彰化師範大學"="彰師大", "實踐大學"="實踐", "輔仁大學"="輔大", "輔仁大學"="輔仁", "未知"="無", "未知"="是", "未知"="某某大學", "未知"="0")) %>% mutate(dept=fct_recode(dept, "大氣系"="AS", "經濟系"="Econ", "經濟系"="Economics", "經濟系"="經濟", "經濟系"="經濟學", "經濟系"="經濟學系", "電機與電子系"="Electrical Engineering", "電機與電子系"="電機", "電機與電子系"="電子工程系", "電機與電子系"="電機系", "心理相關科系"="psychology", "心理相關科系"="臨床心理學系", "心理相關科系"="諮商與工商心理學系", "心理相關科系"="心理學系", "心理相關科系"="心理與諮商學系", "心理相關科系"="心理系,行銷系與財金系你 你也可以選一個", "心理相關科系"="心理系", "心理相關科系"="心理", "社會社工社教相關科系"="社會學系", "社會社工社教相關科系"="社會福利系", "社會社工社教相關科系"="社會系", "社會社工社教相關科系"="社會工作學系", "社會社工社教相關科系"="社會工作", "社會社工社教相關科系"="社會", "社會社工社教相關科系"="社教系", "社會社工社教相關科系"="社教", "社會社工社教相關科系"="社工系", "社會社工社教相關科系"="人文社會學院學士班", "社會社工社教相關科系"="人文社會", "人類學系"="人類學", "工程相關科系"="土木工程", "工程相關科系"="土木工程學系", "工程相關科系"="工程系統科學", "工程相關科系"="工程科學系", "工程相關科系"="工程與系統科學系", "工程相關科系"="化學工程與材料工程學系", "工程相關科系"="材料科學工程學系", "工程相關科系"="通訊工程", "工程相關科系"="造船及海洋工程系", "工程相關科系"="機械工程系", "工程相關科系"="機械工程學系", "工程相關科系"="機械與機電工程學系", "工程相關科系"="機械", "工程相關科系"="工科系", "工程相關科系"="生工系", "工程相關科系"="材料系", "工程相關科系"="動力機械系", "資訊相關科系"="資工", "資訊相關科系"="資工系", "資訊相關科系"="資訊工程", "資訊相關科系"="資訊工程學系", "資訊相關科系"="資訊經營系", "資訊相關科系"="資訊管理", "資訊相關科系"="資管", "資訊相關科系"="資管系", "設計相關科系"="工業設計", "設計相關科系"="商品設計", "中文系"="中文", "中文系"="國文系", "中文系"="中國文學系", "法律相關科系"="法律學系", "法律相關科系"="法律系", "法律相關科系"="法律", "法律相關科系"="中葡法律", "教育相關科系"="人發", "教育相關科系"="公領", "教育相關科系"="教育科技學系", "教育相關科系"="舞蹈學系、特殊教育研究所", "教育相關科系"="國際漢語教育", "教育相關科系"="特殊教育學系", "教育相關科系"="特殊教育系", "教育相關科系"="幼兒與家庭教育學系", "教育相關科系"="特教系", "教育相關科系"="特教", "公衛系"="公衛", "化學系"="化學", "食品相關科系"="水產食品科學系", "食品相關科系"="食品科學", "食品相關科系"="食品科學系", "牙醫系"="牙醫", "牙醫系"="牙醫學系", "外文相關科系"="外文", "外文相關科系"="外文系", "外文相關科系"="外國語文學系", "外文相關科系"="應用外文系", "外文相關科系"="應用英語系", "外文相關科系"="應用日語系", "外文相關科系"="英語系", "外文相關科系"="英語文系", "外文相關科系"="應英系", "外文相關科系"="西班牙語文學系", "外文相關科系"="西文", "外文相關科系"="西文系", "外文相關科系"="西班牙文學", "外文相關科系"="西班牙文學系", "外文相關科系"="日文系", "外文相關科系"="日文", "生物相關科系"="生化科技", "生物相關科系"="生命科學系", "生物相關科系"="生物醫學系", "生物相關科系"="生科", "生物相關科系"="生科院學士班", "生物相關科系"="動物科學技術學系", "傳播相關科系"="資訊傳播學系", "傳播相關科系"="新聞傳播", "傳播相關科系"="傳播與科技學系", "傳播相關科系"="生物產業傳播暨發展學系", "傳播相關科系"="生傳系", "傳播相關科系"="生傳", "傳播相關科系"="圖傳", "傳播相關科系"="新聞", "傳播相關科系"="媒體", "傳播相關科系"="傳播與科技學系", "傳播相關科系"="廣電系", "管理相關科系"="工業工程與工程管理", "管理相關科系"="工業工程與經營管理", "管理相關科系"="企業管理", "管理相關科系"="企管", "管理相關科系"="保險金融管理系", "管理相關科系"="科管", "管理相關科系"="經管", "管理相關科系"="資訊管理", "管理相關科系"="資管", "管理相關科系"="資管系", "管理相關科系"="運動管理學系", "管理相關科系"="運輸與物流管理學系", "管理相關科系"="管理科學系", "管理相關科系"="醫務管理學系", "物理系"="物理", "文化相關科系"="民俗技藝", "文化相關科系"="公共與文化事務學系", "藝術相關科系"="表藝所", "藝術相關科系"="舞蹈系", "藝術相關科系"="應用美術", "藝術相關科系"="美術", "藝術相關科系"="戲劇", "藝術相關科系"="戲劇系", "藝術相關科系"="戲劇學系", "公衛系"="公衛", "財金系"="財務金融學系", "財金系"="財金", "財金系"="金融系", "財金系"="金融", "政治系"="政治學系", "國企系"="國企", "國企系"="國際企業學系", "園藝系"="園藝", "會計系"="會計", "會計系"="會計學系", "經濟系"="政治學系", "經濟系"="經濟學", "經濟系"="經濟", "經濟系"="經濟學系", "農業化學系"="農化", "治療相關科系"="語言治療與聽力", "治療相關科系"="物理治療", "治療相關科系"="物理治療系", "治療相關科系"="語言治療與聽力學系", "治療相關科系"="職能治療", "治療相關科系"="職能治療學系", "治療相關科系"="聽力與語言治療", "治療相關科系"="聽語系", "數學系"="數學", "歷史系"="歷史", "歷史系"="歷史與地理學系", "歷史系"="歷史 社工", "獸醫系"="獸醫", "藥用化妝品系"="藥用化妝品", "藥用化妝品系"="藥用化妝品學系", "獸醫系"="獸醫", "體育系"="體育學系")) # remove duplicated & NA rows dupli_index <- duplicated(psy_test[,-1]) ## return rows that are duplicated by comparing all variables except datetime #dupli_rows <- psy_test[dupli_index,] psy_test <- psy_test[!dupli_index, ] ## rows not duplicated NA_row <- is.na(psy_test$`3`) psy_test <- psy_test[!NA_row,] # write to rds: R's data specific data storage write_rds(psy_test, "./data/psy_test_parsed.rds") write_rds(var_info, "./data/item_construct.rds")
/rscript/data_import_cleaning.R
no_license
liao961120/psytesting.github.io
R
false
false
14,415
r
library(readr) library(dplyr) library(forcats) # manipulate factors psy_test <- read_csv("./data/psy_test.csv") item <- read_csv("data/item.csv", col_types = cols( item_num=col_character(), question_num=col_character() )) # variable information vars <- colnames(psy_test) vars_en <- c("time", "age", "gender", "college", "dept", "grade", 1:62, "fb1", "fb2", "fb3") var_info <- as_data_frame(cbind(vars, vars_en)) %>% left_join(item, by= c("vars_en"="question_num")) %>% select(vars, vars_en, construct_en, type, question) # converting to factors colnames(psy_test) <- vars_en psy_test <- psy_test %>% mutate(gender=factor(gender), college=factor(college), dept=factor(dept), grade=factor(grade)) %>% mutate(gender=fct_recode(gender, "male"="男", "female"="女", "other"="其他")) %>% mutate(grade=fct_recode(grade, "first"="一", "first"="一年級", "second"="二", "third"="三", "forth"="四", "suspend" = "休學", "NoDegree" = "未畢\\肄業", "graduated" = "畢\\肄業")) %>% mutate(college=fct_recode(college, "靜宜大學"="靜宜", "靜宜大學"="?宜", "臺灣大學"="ntu", "臺灣大學"="Ntu", "臺灣大學"="NTU", "臺灣大學"="台大", "臺灣大學"="台灣大學", "臺灣大學"="國立台灣大學", "臺灣大學"="國立臺灣大學", "臺灣大學"="臺灣大學與中興大學 看你想要哪一個", "台北大學"="國立台北大學", "台北大學"="國立臺北大學", "清華大學"="清大", "清華大學"="國立清華大學", "輔仁大學"="輔仁", "臺灣師範大學"="國立臺灣師範大學", "臺灣師範大學"="師大", "臺灣師範大學"="台師大", "台北科技大學"="北科", "中原大學"="中原", "中原大學"="中原大旋", "中國醫藥大學"="中國醫", "中興大學"="中興", "文化大學"="文化", "世新大學"="世新", "臺北醫學大學"="北醫", "臺北醫學大學"="台北醫學大學", "臺北市立大學"="台北市立大學", "國立臺北護理健康大學"="北護", "國立臺北護理健康大學"="國北護", "國立臺灣藝術大學"="台灣藝術大學", "國立臺灣藝術大學"="台藝大", "國立臺灣藝術大學"="國立台灣藝術大學", "國立臺灣體育運動大學"="台灣體育運動大學", "成功大學"="成大", "成功大學"="國立成功大學", "亞洲大學"="亞洲", "東吳大學"="東吳", "東海大學"="東海", "屏東科技大學"="屏科", "政治大學"="政大", "陽明大學"="陽明", "暨南大學"="國立暨南國際大學", "淡江大學"="淡江", "義守大學"="義守", "實踐大學"="實踐", "彰化師範大學"="彰師大", "實踐大學"="實踐", "輔仁大學"="輔大", "輔仁大學"="輔仁", "未知"="無", "未知"="是", "未知"="某某大學", "未知"="0")) %>% mutate(dept=fct_recode(dept, "大氣系"="AS", "經濟系"="Econ", "經濟系"="Economics", "經濟系"="經濟", "經濟系"="經濟學", "經濟系"="經濟學系", "電機與電子系"="Electrical Engineering", "電機與電子系"="電機", "電機與電子系"="電子工程系", "電機與電子系"="電機系", "心理相關科系"="psychology", "心理相關科系"="臨床心理學系", "心理相關科系"="諮商與工商心理學系", "心理相關科系"="心理學系", "心理相關科系"="心理與諮商學系", "心理相關科系"="心理系,行銷系與財金系你 你也可以選一個", "心理相關科系"="心理系", "心理相關科系"="心理", "社會社工社教相關科系"="社會學系", "社會社工社教相關科系"="社會福利系", "社會社工社教相關科系"="社會系", "社會社工社教相關科系"="社會工作學系", "社會社工社教相關科系"="社會工作", "社會社工社教相關科系"="社會", "社會社工社教相關科系"="社教系", "社會社工社教相關科系"="社教", "社會社工社教相關科系"="社工系", "社會社工社教相關科系"="人文社會學院學士班", "社會社工社教相關科系"="人文社會", "人類學系"="人類學", "工程相關科系"="土木工程", "工程相關科系"="土木工程學系", "工程相關科系"="工程系統科學", "工程相關科系"="工程科學系", "工程相關科系"="工程與系統科學系", "工程相關科系"="化學工程與材料工程學系", "工程相關科系"="材料科學工程學系", "工程相關科系"="通訊工程", "工程相關科系"="造船及海洋工程系", "工程相關科系"="機械工程系", "工程相關科系"="機械工程學系", "工程相關科系"="機械與機電工程學系", "工程相關科系"="機械", "工程相關科系"="工科系", "工程相關科系"="生工系", "工程相關科系"="材料系", "工程相關科系"="動力機械系", "資訊相關科系"="資工", "資訊相關科系"="資工系", "資訊相關科系"="資訊工程", "資訊相關科系"="資訊工程學系", "資訊相關科系"="資訊經營系", "資訊相關科系"="資訊管理", "資訊相關科系"="資管", "資訊相關科系"="資管系", "設計相關科系"="工業設計", "設計相關科系"="商品設計", "中文系"="中文", "中文系"="國文系", "中文系"="中國文學系", "法律相關科系"="法律學系", "法律相關科系"="法律系", "法律相關科系"="法律", "法律相關科系"="中葡法律", "教育相關科系"="人發", "教育相關科系"="公領", "教育相關科系"="教育科技學系", "教育相關科系"="舞蹈學系、特殊教育研究所", "教育相關科系"="國際漢語教育", "教育相關科系"="特殊教育學系", "教育相關科系"="特殊教育系", "教育相關科系"="幼兒與家庭教育學系", "教育相關科系"="特教系", "教育相關科系"="特教", "公衛系"="公衛", "化學系"="化學", "食品相關科系"="水產食品科學系", "食品相關科系"="食品科學", "食品相關科系"="食品科學系", "牙醫系"="牙醫", "牙醫系"="牙醫學系", "外文相關科系"="外文", "外文相關科系"="外文系", "外文相關科系"="外國語文學系", "外文相關科系"="應用外文系", "外文相關科系"="應用英語系", "外文相關科系"="應用日語系", "外文相關科系"="英語系", "外文相關科系"="英語文系", "外文相關科系"="應英系", "外文相關科系"="西班牙語文學系", "外文相關科系"="西文", "外文相關科系"="西文系", "外文相關科系"="西班牙文學", "外文相關科系"="西班牙文學系", "外文相關科系"="日文系", "外文相關科系"="日文", "生物相關科系"="生化科技", "生物相關科系"="生命科學系", "生物相關科系"="生物醫學系", "生物相關科系"="生科", "生物相關科系"="生科院學士班", "生物相關科系"="動物科學技術學系", "傳播相關科系"="資訊傳播學系", "傳播相關科系"="新聞傳播", "傳播相關科系"="傳播與科技學系", "傳播相關科系"="生物產業傳播暨發展學系", "傳播相關科系"="生傳系", "傳播相關科系"="生傳", "傳播相關科系"="圖傳", "傳播相關科系"="新聞", "傳播相關科系"="媒體", "傳播相關科系"="傳播與科技學系", "傳播相關科系"="廣電系", "管理相關科系"="工業工程與工程管理", "管理相關科系"="工業工程與經營管理", "管理相關科系"="企業管理", "管理相關科系"="企管", "管理相關科系"="保險金融管理系", "管理相關科系"="科管", "管理相關科系"="經管", "管理相關科系"="資訊管理", "管理相關科系"="資管", "管理相關科系"="資管系", "管理相關科系"="運動管理學系", "管理相關科系"="運輸與物流管理學系", "管理相關科系"="管理科學系", "管理相關科系"="醫務管理學系", "物理系"="物理", "文化相關科系"="民俗技藝", "文化相關科系"="公共與文化事務學系", "藝術相關科系"="表藝所", "藝術相關科系"="舞蹈系", "藝術相關科系"="應用美術", "藝術相關科系"="美術", "藝術相關科系"="戲劇", "藝術相關科系"="戲劇系", "藝術相關科系"="戲劇學系", "公衛系"="公衛", "財金系"="財務金融學系", "財金系"="財金", "財金系"="金融系", "財金系"="金融", "政治系"="政治學系", "國企系"="國企", "國企系"="國際企業學系", "園藝系"="園藝", "會計系"="會計", "會計系"="會計學系", "經濟系"="政治學系", "經濟系"="經濟學", "經濟系"="經濟", "經濟系"="經濟學系", "農業化學系"="農化", "治療相關科系"="語言治療與聽力", "治療相關科系"="物理治療", "治療相關科系"="物理治療系", "治療相關科系"="語言治療與聽力學系", "治療相關科系"="職能治療", "治療相關科系"="職能治療學系", "治療相關科系"="聽力與語言治療", "治療相關科系"="聽語系", "數學系"="數學", "歷史系"="歷史", "歷史系"="歷史與地理學系", "歷史系"="歷史 社工", "獸醫系"="獸醫", "藥用化妝品系"="藥用化妝品", "藥用化妝品系"="藥用化妝品學系", "獸醫系"="獸醫", "體育系"="體育學系")) # remove duplicated & NA rows dupli_index <- duplicated(psy_test[,-1]) ## return rows that are duplicated by comparing all variables except datetime #dupli_rows <- psy_test[dupli_index,] psy_test <- psy_test[!dupli_index, ] ## rows not duplicated NA_row <- is.na(psy_test$`3`) psy_test <- psy_test[!NA_row,] # write to rds: R's data specific data storage write_rds(psy_test, "./data/psy_test_parsed.rds") write_rds(var_info, "./data/item_construct.rds")
#!/usr/bin/env Rscript " Analysis of 10x Genomics Chromium single cell RNA-seq data using Seurat (version 3.0) starting with Cell Ranger output. Basic workflow steps: 1 - create - import counts matrix, perform initial QC, and calculate various variance metrics (slowest step) 2 - cluster - perform clustering based on number of PCs 3 - identify - identify clusters based on specified clustering/resolution (higher resolution for more clusters) Additional optional steps: combine - merge multiple samples/libraries (no batch correction) integrate - perform integration (batch correction) across multiple sample batches de - differential expression between samples/libraries within clusters Usage: scrna-10x-seurat-3.R create <analysis_dir> <sample_name> <sample_dir> [--min_genes=<n> --max_genes=<n> --mt=<n>] scrna-10x-seurat-3.R cluster <analysis_dir> <num_dim> scrna-10x-seurat-3.R identify <analysis_dir> <resolution> scrna-10x-seurat-3.R combine <analysis_dir> <sample_analysis_dir>... scrna-10x-seurat-3.R integrate <analysis_dir> <num_dim> <batch_analysis_dir>... scrna-10x-seurat-3.R de <analysis_dir> <resolution> scrna-10x-seurat-3.R --help Options: --min_genes=<n> cutoff for minimum number of genes per cell (2nd percentile if not specified) --max_genes=<n> cutoff for maximum number of genes per cell (98th percentile if not specified) --mt=<n> cutoff for mitochondrial genes percentage per cell [default: 10] -h, --help show this screen " -> doc # ========== functions ========== # load dependencies load_libraries = function() { message("\n\n ========== load libraries ========== \n\n") suppressPackageStartupMessages({ library(magrittr) library(glue) library(Seurat) library(future) library(Matrix) library(tidyverse) library(data.table) library(cowplot) library(scales) library(pheatmap) library(RColorBrewer) library(ggsci) library(eulerr) library(UpSetR) }) } # create a single Seurat object from multiple 10x Cell Ranger outputs # takes vector of one or more sample_names and sample_dirs # can work with multiple samples, but the appropriate way is to use "combine" with objects that are pre-filtered load_sample_counts_matrix = function(sample_names, sample_dirs) { message("\n\n ========== import cell ranger counts matrix ========== \n\n") merged_counts_matrix = NULL for (i in 1:length(sample_names)) { sample_name = sample_names[i] sample_dir = sample_dirs[i] message("loading counts matrix for sample: ", sample_name) # check if sample dir is valid if (!dir.exists(sample_dir)) stop(glue("dir {sample_dir} does not exist")) # determine counts matrix directory (HDF5 is not the preferred option) # "filtered_gene_bc_matrices" for single library # "filtered_gene_bc_matrices_mex" for aggregated # Cell Ranger 3.0: "genes" has been replaced by "features" to account for feature barcoding # Cell Ranger 3.0: the matrix and barcode files are now gzipped data_dir = glue("{sample_dir}/outs") if (!dir.exists(data_dir)) stop(glue("dir {sample_dir} does not contain outs directory")) data_dir = list.files(path = data_dir, pattern = "matrix.mtx", full.names = TRUE, recursive = TRUE) data_dir = str_subset(data_dir, "filtered_.*_bc_matri")[1] data_dir = dirname(data_dir) if (!dir.exists(data_dir)) stop(glue("dir {sample_dir} does not contain matrix.mtx")) message("loading counts matrix dir: ", data_dir) counts_matrix = Read10X(data_dir) message(glue("library {sample_name} cells: {ncol(counts_matrix)}")) message(glue("library {sample_name} genes: {nrow(counts_matrix)}")) message(" ") # log to file write(glue("library {sample_name} cells: {ncol(counts_matrix)}"), file = "create.log", append = TRUE) write(glue("library {sample_name} genes: {nrow(counts_matrix)}"), file = "create.log", append = TRUE) # clean up counts matrix to make it more readable counts_matrix = counts_matrix[sort(rownames(counts_matrix)), ] colnames(counts_matrix) = str_c(sample_name, ":", colnames(counts_matrix)) # combine current matrix with previous if (i == 1) { # skip if there is no previous matrix merged_counts_matrix = counts_matrix } else { # check if genes are the same for current and previous matrices if (!identical(rownames(merged_counts_matrix), rownames(counts_matrix))) { # generate a warning, since this is probably a mistake warning("counts matrix genes are not the same for different libraries") Sys.sleep(1) # get common genes common_genes = intersect(rownames(merged_counts_matrix), rownames(counts_matrix)) common_genes = sort(common_genes) message("num genes for previous libraries: ", length(rownames(merged_counts_matrix))) message("num genes for current library: ", length(rownames(counts_matrix))) message("num genes in common: ", length(common_genes)) # exit if the number of overlapping genes is too few if (length(common_genes) < (length(rownames(counts_matrix)) * 0.9)) stop("libraries have too few genes in common") # subset current and previous matrix to overlapping genes merged_counts_matrix = merged_counts_matrix[common_genes, ] counts_matrix = counts_matrix[common_genes, ] } # combine current matrix with previous merged_counts_matrix = cbind(merged_counts_matrix, counts_matrix) Sys.sleep(1) } } # create a Seurat object s_obj = create_seurat_obj(counts_matrix = merged_counts_matrix) return(s_obj) } # convert a sparse matrix of counts to a Seurat object and generate some QC plots create_seurat_obj = function(counts_matrix, proj_name = NULL, sample_dir = NULL) { message("\n\n ========== save raw counts matrix ========== \n\n") # log to file write(glue("input cells: {ncol(counts_matrix)}"), file = "create.log", append = TRUE) write(glue("input genes: {nrow(counts_matrix)}"), file = "create.log", append = TRUE) # remove cells with few genes (there is an additional filter in CreateSeuratObject) counts_matrix = counts_matrix[, Matrix::colSums(counts_matrix) >= 250] # remove genes without counts (there is an additional filter in CreateSeuratObject) counts_matrix = counts_matrix[Matrix::rowSums(counts_matrix) >= 5, ] # log to file write(glue("detectable cells: {ncol(counts_matrix)}"), file = "create.log", append = TRUE) write(glue("detectable genes: {nrow(counts_matrix)}"), file = "create.log", append = TRUE) # save counts matrix as a standard text file and gzip # counts_matrix_filename = "counts.raw.txt" # write.table(as.matrix(counts_matrix), file = counts_matrix_filename, quote = FALSE, sep = "\t", col.names = NA) # system(paste0("gzip ", counts_matrix_filename)) # save counts matrix as a csv file (to be consistent with the rest of the tables) raw_data = counts_matrix %>% as.matrix() %>% as.data.frame() %>% rownames_to_column("gene") %>% arrange(gene) write_csv(raw_data, path = "counts.raw.csv.gz") rm(raw_data) message("\n\n ========== create seurat object ========== \n\n") if (is.null(proj_name)) { # if name is not set, then it's a manually merged counts matrix s_obj = CreateSeuratObject(counts = counts_matrix, min.cells = 5, min.features = 250, project = "proj", names.field = 1, names.delim = ":") rm(counts_matrix) } else if (proj_name == "aggregated") { # multiple libraries combined using Cell Ranger (cellranger aggr) # setup taking into consideration aggregated names delimiter s_obj = CreateSeuratObject(counts = counts_matrix, min.cells = 5, min.features = 250, project = proj_name, names.field = 2, names.delim = "-") # import cellranger aggr sample sheet sample_sheet_csv = paste0(sample_dir, "/outs/aggregation_csv.csv") sample_sheet = read.csv(sample_sheet_csv, stringsAsFactors = FALSE) message("samples: ", paste(sample_sheet[, 1], collapse=", ")) # change s_obj@meta.data$orig.ident sample identities from numbers to names s_obj[["orig.ident"]][, 1] = factor(sample_sheet[s_obj[["orig.ident"]][, 1], 1]) # set s_obj@ident to the new s_obj@meta.data$orig.ident s_obj = set_identity(seurat_obj = s_obj, group_var = "orig.ident") } else { stop("project name set to unknown value") } message(glue("imported cells: {ncol(s_obj)}")) message(glue("imported genes: {nrow(s_obj)}")) message(" ") # log to file write(glue("imported cells: {ncol(s_obj)}"), file = "create.log", append = TRUE) write(glue("imported genes: {nrow(s_obj)}"), file = "create.log", append = TRUE) # rename nCount_RNA and nFeature_RNA slots to make them more clear s_obj$num_UMIs = s_obj$nCount_RNA s_obj$num_genes = s_obj$nFeature_RNA # nFeature_RNA and nCount_RNA are automatically calculated for every object by Seurat # calculate the percentage of mitochondrial genes here and store it in percent.mito using the AddMetaData mt_genes = grep("^MT-", rownames(GetAssayData(s_obj)), ignore.case = TRUE, value = TRUE) percent_mt = Matrix::colSums(GetAssayData(s_obj)[mt_genes, ]) / Matrix::colSums(GetAssayData(s_obj)) percent_mt = round(percent_mt * 100, digits = 3) # add columns to object@meta.data, and is a great place to stash QC stats s_obj = AddMetaData(s_obj, metadata = percent_mt, col.name = "pct_mito") message("\n\n ========== nFeature_RNA/nCount_RNA/percent_mito plots ========== \n\n") # create a named color scheme to ensure names and colors are in the proper order sample_names = s_obj$orig.ident %>% as.character() %>% sort() %>% unique() colors_samples_named = colors_samples[1:length(sample_names)] names(colors_samples_named) = sample_names vln_theme = theme( axis.title.x = element_blank(), axis.title.y = element_blank(), axis.text.x = element_text(angle = 90, vjust = 0.5, hjust = 1), legend.position = "none" ) suppressMessages({ dist_unfilt_nft_plot = VlnPlot( s_obj, features = "num_genes", group.by = "orig.ident", pt.size = 0.1, sort = TRUE, combine = TRUE, cols = colors_samples_named ) + scale_y_continuous(labels = comma) + vln_theme dist_unfilt_nct_plot = VlnPlot( s_obj, features = "num_UMIs", group.by = "orig.ident", pt.size = 0.1, sort = TRUE, combine = TRUE, cols = colors_samples_named ) + scale_y_continuous(labels = comma) + vln_theme dist_unfilt_pmt_plot = VlnPlot( s_obj, features = "pct_mito", group.by = "orig.ident", pt.size = 0.1, sort = TRUE, combine = TRUE, cols = colors_samples_named ) + scale_y_continuous(labels = comma) + vln_theme dist_unfilt_plot = plot_grid(dist_unfilt_nft_plot, dist_unfilt_nct_plot, dist_unfilt_pmt_plot, ncol = 3) ggsave("qc.distribution.unfiltered.png", plot = dist_unfilt_plot, width = 10, height = 6, units = "in") }) Sys.sleep(1) cor_ncr_nfr_plot = FeatureScatter( s_obj, feature1 = "num_UMIs", feature2 = "num_genes", group.by = "orig.ident", cols = colors_samples ) + theme(aspect.ratio = 1) cor_ncr_pmt_plot = FeatureScatter( s_obj, feature1 = "num_UMIs", feature2 = "pct_mito", group.by = "orig.ident", cols = colors_samples ) + theme(aspect.ratio = 1) cor_nfr_pmt_plot = FeatureScatter( s_obj, feature1 = "num_genes", feature2 = "pct_mito", group.by = "orig.ident", cols = colors_samples ) + theme(aspect.ratio = 1) cor_unfilt_plot = plot_grid(cor_ncr_nfr_plot, cor_ncr_pmt_plot, cor_nfr_pmt_plot, ncol = 3) ggsave("qc.correlations.unfiltered.png", plot = cor_unfilt_plot, width = 18, height = 5, units = "in") Sys.sleep(1) # check distribution of gene counts and mitochondrial percentage low_quantiles = c(0.05, 0.02, 0.01, 0.001) high_quantiles = c(0.95, 0.98, 0.99, 0.999) message("num genes low percentiles:") s_obj$num_genes %>% quantile(low_quantiles) %>% round(1) %>% print() message(" ") message("num genes high percentiles:") s_obj$num_genes %>% quantile(high_quantiles) %>% round(1) %>% print() message(" ") message("pct mito high percentiles:") s_obj$pct_mito %>% quantile(high_quantiles) %>% round(1) %>% print() message(" ") # save unfiltered cell metadata s_obj@meta.data %>% rownames_to_column("cell") %>% as_tibble() %>% mutate(sample_name = orig.ident) %>% write_excel_csv(path = "metadata.unfiltered.csv") return(s_obj) } # filter data by number of genes and mitochondrial percentage filter_data = function(seurat_obj, min_genes = NULL, max_genes = NULL, max_mt = 10) { s_obj = seurat_obj message("\n\n ========== filter data matrix ========== \n\n") # log the unfiltered gene numbers to file write(glue("unfiltered min genes: {min(s_obj$num_genes)}"), file = "create.log", append = TRUE) write(glue("unfiltered max genes: {max(s_obj$num_genes)}"), file = "create.log", append = TRUE) write(glue("unfiltered mean num genes: {round(mean(s_obj$num_genes), 3)}"), file = "create.log", append = TRUE) write(glue("unfiltered median num genes: {median(s_obj$num_genes)}"), file = "create.log", append = TRUE) # convert arguments to integers (command line arguments end up as characters) min_genes = as.numeric(min_genes) max_genes = as.numeric(max_genes) max_mt = as.numeric(max_mt) # default cutoffs (gene numbers rounded to nearest 10) # as.numeric() converts NULLs to 0 length numerics, so can't use is.null() if (!length(min_genes)) min_genes = s_obj$num_genes %>% quantile(0.02, names = FALSE) %>% round(-1) if (!length(max_genes)) max_genes = s_obj$num_genes %>% quantile(0.98, names = FALSE) %>% round(-1) if (!length(max_mt)) max_mt = 10 message(glue("min genes cutoff: {min_genes}")) message(glue("max genes cutoff: {max_genes}")) message(glue("max mitochondrial percentage cutoff: {max_mt}")) message(" ") # log the cutoffs to file write(glue("min genes cutoff: {min_genes}"), file = "create.log", append = TRUE) write(glue("max genes cutoff: {max_genes}"), file = "create.log", append = TRUE) write(glue("max mitochondrial percentage cutoff: {max_mt}"), file = "create.log", append = TRUE) message(glue("imported cells: {ncol(s_obj)}")) message(glue("imported genes: {nrow(s_obj)}")) # filter cells_subset = seurat_obj@meta.data %>% rownames_to_column("cell") %>% filter(nFeature_RNA > min_genes & nFeature_RNA < max_genes & pct_mito < max_mt) %>% pull(cell) s_obj = subset(s_obj, cells = cells_subset) message("filtered cells: ", ncol(s_obj)) message("filtered genes: ", nrow(s_obj)) # create a named color scheme to ensure names and colors are in the proper order sample_names = s_obj$orig.ident %>% as.character() %>% sort() %>% unique() colors_samples_named = colors_samples[1:length(sample_names)] names(colors_samples_named) = sample_names vln_theme = theme( axis.title.x = element_blank(), axis.title.y = element_blank(), axis.text.x = element_text(angle = 90, vjust = 0.5, hjust = 1), legend.position = "none" ) suppressMessages({ dist_filt_nft_plot = VlnPlot( s_obj, features = "num_genes", group.by = "orig.ident", pt.size = 0.1, sort = TRUE, combine = TRUE, cols = colors_samples_named ) + scale_y_continuous(labels = comma) + vln_theme dist_filt_nct_plot = VlnPlot( s_obj, features = "num_UMIs", group.by = "orig.ident", pt.size = 0.1, sort = TRUE, combine = TRUE, cols = colors_samples_named ) + scale_y_continuous(labels = comma) + vln_theme dist_filt_pmt_plot = VlnPlot( s_obj, features = "pct_mito", group.by = "orig.ident", pt.size = 0.1, sort = TRUE, combine = TRUE, cols = colors_samples_named ) + scale_y_continuous(labels = comma) + vln_theme dist_filt_plot = plot_grid(dist_filt_nft_plot, dist_filt_nct_plot, dist_filt_pmt_plot, ncol = 3) ggsave("qc.distribution.filtered.png", plot = dist_filt_plot, width = 10, height = 6, units = "in") }) Sys.sleep(1) # after removing unwanted cells from the dataset, normalize the data # LogNormalize: # - normalizes the gene expression measurements for each cell by the total expression # - multiplies this by a scale factor (10,000 by default) # - log-transforms the result s_obj = NormalizeData(s_obj, normalization.method = "LogNormalize", scale.factor = 10000, verbose = FALSE) # save counts matrix as a basic gzipped text file # object@data stores normalized and log-transformed single cell expression # used for visualizations, such as violin and feature plots, most diff exp tests, finding high-variance genes counts_norm = GetAssayData(s_obj) %>% as.matrix() %>% round(3) counts_norm = counts_norm %>% as.data.frame() %>% rownames_to_column("gene") %>% arrange(gene) write_csv(counts_norm, path = "counts.normalized.csv.gz") # log to file write(glue("filtered cells: {ncol(s_obj)}"), file = "create.log", append = TRUE) write(glue("filtered genes: {nrow(s_obj)}"), file = "create.log", append = TRUE) write(glue("filtered mean num genes: {round(mean(s_obj$num_genes), 3)}"), file = "create.log", append = TRUE) write(glue("filtered median num genes: {median(s_obj$num_genes)}"), file = "create.log", append = TRUE) return(s_obj) } # merge multiple Seurat objects combine_seurat_obj = function(original_wd, sample_analysis_dirs) { if (length(sample_analysis_dirs) < 2) stop("must have at least 2 samples to merge") message("\n\n ========== combine samples ========== \n\n") seurat_obj_list = list() for (i in 1:length(sample_analysis_dirs)) { sample_analysis_dir = sample_analysis_dirs[i] sample_analysis_dir = glue("{original_wd}/{sample_analysis_dir}") sample_seurat_rds = glue("{sample_analysis_dir}/seurat_obj.rds") # check if analysis dir is valid if (!dir.exists(sample_analysis_dir)) stop(glue("dir {sample_analysis_dir} does not exist")) # check if seurat object exists if (!file.exists(sample_seurat_rds)) stop(glue("seurat object rds {sample_seurat_rds} does not exist")) # load seurat object seurat_obj_list[[i]] = readRDS(sample_seurat_rds) # clean up object seurat_obj_list[[i]]@assays$RNA@var.features = vector() seurat_obj_list[[i]]@assays$RNA@scale.data = matrix() seurat_obj_list[[i]]@reductions = list() seurat_obj_list[[i]]@meta.data = seurat_obj_list[[i]]@meta.data %>% select(-starts_with("snn_res")) # print single sample sample stats # sample_name = seurat_obj_list[[i]]@meta.data[1, "orig.ident"] %>% as.character() sample_name = seurat_obj_list[[i]]$orig.ident[1] %>% as.character() message(glue("sample {sample_name} dir: {basename(sample_analysis_dir)}")) write(glue("sample {sample_name} dir: {basename(sample_analysis_dir)}"), file = "create.log", append = TRUE) message(glue("sample {sample_name} cells: {ncol(seurat_obj_list[[i]])}")) write(glue("sample {sample_name} cells: {ncol(seurat_obj_list[[i]])}"), file = "create.log", append = TRUE) message(glue("sample {sample_name} genes: {nrow(seurat_obj_list[[i]])}")) write(glue("sample {sample_name} genes: {nrow(seurat_obj_list[[i]])}"), file = "create.log", append = TRUE) message(" ") } # merge merged_obj = merge(seurat_obj_list[[1]], seurat_obj_list[2:length(seurat_obj_list)]) rm(seurat_obj_list) # print combined sample stats message(glue("combined unfiltered cells: {ncol(merged_obj)}")) write(glue("combined unfiltered cells: {ncol(merged_obj)}"), file = "create.log", append = TRUE) message(glue("combined unfiltered genes: {nrow(merged_obj)}")) write(glue("combined unfiltered genes: {nrow(merged_obj)}"), file = "create.log", append = TRUE) # filter poorly expressed genes (detected in less than 10 cells) filtered_genes = Matrix::rowSums(GetAssayData(merged_obj, assay = "RNA", slot = "counts") > 0) min_cells = 10 if (ncol(merged_obj) > 100000) { min_cells = 50 } filtered_genes = filtered_genes[filtered_genes >= min_cells] %>% names() %>% sort() merged_obj = subset(merged_obj, features = filtered_genes) # print combined sample stats message(glue("combined cells: {ncol(merged_obj)}")) write(glue("combined cells: {ncol(merged_obj)}"), file = "create.log", append = TRUE) message(glue("combined genes: {nrow(merged_obj)}")) write(glue("combined genes: {nrow(merged_obj)}"), file = "create.log", append = TRUE) # print gene/cell minimum cutoffs min_cells = Matrix::rowSums(GetAssayData(merged_obj, assay = "RNA", slot = "counts") > 0) %>% min() min_genes = Matrix::colSums(GetAssayData(merged_obj, assay = "RNA", slot = "counts") > 0) %>% min() message(glue("min cells per gene: {min_cells}")) write(glue("min cells per gene: {min_cells}"), file = "create.log", append = TRUE) message(glue("min genes per cell: {min_genes}")) write(glue("min genes per cell: {min_genes}"), file = "create.log", append = TRUE) # check that the full counts table is small enough to fit into an R matrix (max around 100k x 21k) num_matrix_elements = GetAssayData(merged_obj, assay = "RNA", slot = "counts") %>% length() if (num_matrix_elements < 2^31) { # save raw counts matrix counts_raw = GetAssayData(merged_obj, assay = "RNA", slot = "counts") %>% as.matrix() counts_raw = counts_raw %>% as.data.frame() %>% rownames_to_column("gene") %>% arrange(gene) # write_csv(counts_raw, path = "counts.raw.csv.gz") fwrite(counts_raw, file = "counts.raw.csv", sep = ",") R.utils::gzip("counts.raw.csv") rm(counts_raw) # save counts matrix as a basic gzipped text file counts_norm = GetAssayData(merged_obj, assay = "RNA") %>% as.matrix() %>% round(3) counts_norm = counts_norm %>% as.data.frame() %>% rownames_to_column("gene") %>% arrange(gene) # write_csv(counts_norm, path = "counts.normalized.csv.gz") fwrite(counts_norm, file = "counts.normalized.csv", sep = ",") R.utils::gzip("counts.normalized.csv") rm(counts_norm) } # create a named color scheme to ensure names and colors are in the proper order sample_names = merged_obj$orig.ident %>% as.character() %>% sort() %>% unique() colors_samples_named = colors_samples[1:length(sample_names)] names(colors_samples_named) = sample_names vln_theme = theme( axis.title.x = element_blank(), axis.title.y = element_blank(), axis.text.x = element_text(angle = 90, vjust = 0.5, hjust = 1), legend.position = "none" ) suppressMessages({ dist_nft_plot = VlnPlot( merged_obj, features = "num_genes", group.by = "orig.ident", pt.size = 0.1, sort = TRUE, combine = TRUE, cols = colors_samples_named ) + scale_y_continuous(labels = comma) + vln_theme dist_nct_plot = VlnPlot( merged_obj, features = "num_UMIs", group.by = "orig.ident", pt.size = 0.1, sort = TRUE, combine = TRUE, cols = colors_samples_named ) + scale_y_continuous(labels = comma) + vln_theme dist_pmt_plot = VlnPlot( merged_obj, features = "pct_mito", group.by = "orig.ident", pt.size = 0.1, sort = TRUE, combine = TRUE, cols = colors_samples_named ) + scale_y_continuous(labels = comma) + vln_theme dist_plot = plot_grid(dist_nft_plot, dist_nct_plot, dist_pmt_plot, ncol = 3) ggsave("qc.distribution.png", plot = dist_plot, width = 20, height = 6, units = "in") }) Sys.sleep(1) return(merged_obj) } # integrate multiple Seurat objects integrate_seurat_obj = function(original_wd, sample_analysis_dirs, num_dim) { # check if the inputs seems reasonable if (length(sample_analysis_dirs) < 2) stop("must have at least 2 samples to merge") num_dim = as.integer(num_dim) if (num_dim < 5) stop("too few dims: ", num_dim) if (num_dim > 50) stop("too many dims: ", num_dim) message("\n\n ========== integrate samples ========== \n\n") seurat_obj_list = list() var_genes_list = list() exp_genes = c() for (i in 1:length(sample_analysis_dirs)) { sample_analysis_dir = sample_analysis_dirs[i] sample_analysis_dir = glue("{original_wd}/{sample_analysis_dir}") sample_seurat_rds = glue("{sample_analysis_dir}/seurat_obj.rds") # check if analysis dir is valid if (!dir.exists(sample_analysis_dir)) stop(glue("dir {sample_analysis_dir} does not exist")) # check if seurat object exists if (!file.exists(sample_seurat_rds)) stop(glue("seurat object rds {sample_seurat_rds} does not exist")) # load seurat object seurat_obj_list[[i]] = readRDS(sample_seurat_rds) # sample_name = seurat_obj_list[[i]]@meta.data[1, "orig.ident"] %>% as.character() sample_name = seurat_obj_list[[i]]$orig.ident[1] %>% as.character() # clean up object seurat_obj_list[[i]]@assays$RNA@scale.data = matrix() seurat_obj_list[[i]]@reductions = list() seurat_obj_list[[i]]@meta.data = seurat_obj_list[[i]]@meta.data %>% select(-starts_with("snn_res")) # save expressed genes keeping only genes present in all the datasets (for genes to integrate in IntegrateData) if (length(exp_genes) > 0) { exp_genes = intersect(exp_genes, rownames(seurat_obj_list[[i]])) %>% sort() } else { exp_genes = rownames(seurat_obj_list[[i]]) } # save variable genes var_genes_list[[sample_name]] = VariableFeatures(seurat_obj_list[[i]]) # print single sample sample stats message(glue("sample {sample_name} dir: {basename(sample_analysis_dir)}")) write(glue("sample {sample_name} dir: {basename(sample_analysis_dir)}"), file = "create.log", append = TRUE) message(glue("sample {sample_name} cells: {ncol(seurat_obj_list[[i]])}")) write(glue("sample {sample_name} cells: {ncol(seurat_obj_list[[i]])}"), file = "create.log", append = TRUE) message(glue("sample {sample_name} genes: {nrow(seurat_obj_list[[i]])}")) write(glue("sample {sample_name} genes: {nrow(seurat_obj_list[[i]])}"), file = "create.log", append = TRUE) message(" ") } # euler plot of variable gene overlaps (becomes unreadable and can take days for many overlaps) if (length(var_genes_list) < 8) { colors_euler = colors_samples[1:length(var_genes_list)] euler_fit = euler(var_genes_list, shape = "ellipse") euler_plot = plot(euler_fit, fills = list(fill = colors_euler, alpha = 0.7), edges = list(col = colors_euler)) png("variance.vargenes.euler.png", res = 200, width = 5, height = 5, units = "in") print(euler_plot) dev.off() } # upset plot of variable gene overlaps png("variance.vargenes.upset.png", res = 200, width = 8, height = 5, units = "in") upset(fromList(var_genes_list), nsets = 50, nintersects = 15, order.by = "freq", mb.ratio = c(0.5, 0.5)) dev.off() message("\n\n ========== Seurat::FindIntegrationAnchors() ========== \n\n") # find the integration anchors anchors = FindIntegrationAnchors(object.list = seurat_obj_list, anchor.features = 2000, dims = 1:num_dim) rm(seurat_obj_list) message("\n\n ========== Seurat::IntegrateData() ========== \n\n") # integrating all genes may cause issues and may not add any relevant information # integrated_obj = IntegrateData(anchorset = anchors, dims = 1:num_dim, features.to.integrate = exp_genes) integrated_obj = IntegrateData(anchorset = anchors, dims = 1:num_dim) rm(anchors) # after running IntegrateData, the Seurat object will contain a new Assay with the integrated expression matrix # the original (uncorrected values) are still stored in the object in the “RNA” assay # switch to integrated assay DefaultAssay(integrated_obj) = "integrated" # print integrated sample stats message(glue("integrated unfiltered cells: {ncol(integrated_obj)}")) write(glue("integrated unfiltered cells: {ncol(integrated_obj)}"), file = "create.log", append = TRUE) message(glue("integrated unfiltered genes: {nrow(integrated_obj)}")) write(glue("integrated unfiltered genes: {nrow(integrated_obj)}"), file = "create.log", append = TRUE) # filter poorly expressed genes (detected in less than 10 cells) filtered_genes = Matrix::rowSums(GetAssayData(integrated_obj, assay = "RNA", slot = "counts") > 0) min_cells = 10 if (ncol(integrated_obj) > 100000) { min_cells = 50 } filtered_genes = filtered_genes[filtered_genes >= min_cells] %>% names() %>% sort() integrated_obj = subset(integrated_obj, features = filtered_genes) # print integrated sample stats message(glue("integrated cells: {ncol(integrated_obj)}")) write(glue("integrated cells: {ncol(integrated_obj)}"), file = "create.log", append = TRUE) message(glue("integrated genes: {nrow(GetAssayData(integrated_obj, assay = 'RNA'))}")) write(glue("integrated genes: {nrow(GetAssayData(integrated_obj, assay = 'RNA'))}"), file = "create.log", append = TRUE) # print gene/cell minumum cutoffs min_cells = Matrix::rowSums(GetAssayData(integrated_obj, assay = "RNA", slot = "counts") > 0) %>% min() min_genes = Matrix::colSums(GetAssayData(integrated_obj, assay = "RNA", slot = "counts") > 0) %>% min() message(glue("min cells per gene: {min_cells}")) write(glue("min cells per gene: {min_cells}"), file = "create.log", append = TRUE) message(glue("min genes per cell: {min_genes}")) write(glue("min genes per cell: {min_genes}"), file = "create.log", append = TRUE) # check that the full counts table is small enough to fit into an R matrix (max around 100k x 21k) num_matrix_elements = GetAssayData(integrated_obj, assay = "RNA", slot = "counts") %>% length() if (num_matrix_elements < 2^31) { # save raw counts matrix counts_raw = GetAssayData(integrated_obj, assay = "RNA", slot = "counts") %>% as.matrix() counts_raw = counts_raw %>% as.data.frame() %>% rownames_to_column("gene") %>% arrange(gene) # write_csv(counts_raw, path = "counts.raw.csv.gz") fwrite(counts_raw, file = "counts.raw.csv", sep = ",") R.utils::gzip("counts.raw.csv") rm(counts_raw) # save normalized counts matrix counts_norm = GetAssayData(integrated_obj, assay = "RNA") %>% as.matrix() %>% round(3) counts_norm = counts_norm %>% as.data.frame() %>% rownames_to_column("gene") %>% arrange(gene) # write_csv(counts_norm, path = "counts.normalized.csv.gz") fwrite(counts_norm, file = "counts.normalized.csv", sep = ",") R.utils::gzip("counts.normalized.csv") rm(counts_norm) # save integrated counts matrix # counts_int = GetAssayData(integrated_obj, assay = "integrated") %>% as.matrix() %>% round(3) # counts_int = counts_int %>% as.data.frame() %>% rownames_to_column("gene") %>% arrange(gene) # write_csv(counts_int, path = "counts.integrated.csv.gz") # rm(counts_int) } vln_theme = theme( axis.title.x = element_blank(), axis.title.y = element_blank(), axis.text.x = element_text(angle = 90, vjust = 0.5, hjust = 1), legend.position = "none" ) suppressMessages({ dist_nft_plot = VlnPlot( integrated_obj, features = "num_genes", group.by = "orig.ident", pt.size = 0.1, sort = TRUE, combine = TRUE, cols = colors_samples ) + scale_y_continuous(labels = comma) + vln_theme dist_nct_plot = VlnPlot( integrated_obj, features = "num_UMIs", group.by = "orig.ident", pt.size = 0.1, sort = TRUE, combine = TRUE, cols = colors_samples ) + scale_y_continuous(labels = comma) + vln_theme dist_pmt_plot = VlnPlot( integrated_obj, features = "pct_mito", group.by = "orig.ident", pt.size = 0.1, sort = TRUE, combine = TRUE, cols = colors_samples ) + scale_y_continuous(labels = comma) + vln_theme dist_plot = plot_grid(dist_nft_plot, dist_nct_plot, dist_pmt_plot, ncol = 3) ggsave("qc.distribution.png", plot = dist_plot, width = 20, height = 6, units = "in") }) Sys.sleep(1) return(integrated_obj) } # calculate various variance metrics and perform basic analysis # PC selection approaches: # - PCHeatmap - more supervised, exploring PCs to determine relevant sources of heterogeneity # - PCElbowPlot - heuristic that is commonly used and can be calculated instantly # - JackStrawPlot - implements a statistical test based on a random null model, but is time-consuming # jackStraw procedure is very slow, so skip for large projects (>10,000 cells) calculate_variance = function(seurat_obj, jackstraw_max_cells = 10000) { s_obj = seurat_obj message("\n\n ========== Seurat::FindVariableGenes() ========== \n\n") # identify features that are outliers on a 'mean variability plot' # Seurat v3 implements an improved method based on a variance stabilizing transformation ("vst") s_obj = FindVariableFeatures(s_obj, selection.method = "vst", nfeatures = 2000, verbose = FALSE) # export highly variable feature information (mean, variance, variance standardized) hvf_tbl = HVFInfo(s_obj) %>% round(3) %>% rownames_to_column("gene") %>% arrange(-variance.standardized) write_excel_csv(hvf_tbl, path = "variance.csv") # plot variance var_plot = VariableFeaturePlot(s_obj, pt.size = 0.5) var_plot = LabelPoints(var_plot, points = head(hvf_tbl$gene, 30), repel = TRUE, xnudge = 0, ynudge = 0) ggsave("variance.features.png", plot = var_plot, width = 12, height = 5, units = "in") message("\n\n ========== Seurat::ScaleData() ========== \n\n") # regress out unwanted sources of variation # regressing uninteresting sources of variation can improve dimensionality reduction and clustering # could include technical noise, batch effects, biological sources of variation (cell cycle stage) # scaled z-scored residuals of these models are stored in scale.data slot # used for dimensionality reduction and clustering # RegressOut function has been deprecated, and replaced with the vars.to.regress argument in ScaleData # s_obj = ScaleData(s_obj, features = rownames(s_obj), vars.to.regress = c("num_UMIs", "pct_mito"), verbose = FALSE) s_obj = ScaleData(s_obj, vars.to.regress = c("num_UMIs", "pct_mito"), verbose = FALSE) message("\n\n ========== Seurat::PCA() ========== \n\n") # use fewer PCs for small datasets num_pcs = 50 if (ncol(s_obj) < 100) num_pcs = 20 if (ncol(s_obj) < 25) num_pcs = 5 # PCA on the scaled data # PCA calculation stored in object[["pca"]] s_obj = RunPCA(s_obj, assay = "RNA", features = VariableFeatures(s_obj), npcs = num_pcs, verbose = FALSE) # plot the output of PCA analysis (shuffle cells so any one group does not appear overrepresented due to ordering) pca_plot = DimPlot( s_obj, cells = sample(colnames(s_obj)), group.by = "orig.ident", reduction = "pca", pt.size = 0.5, cols = colors_samples ) + theme(aspect.ratio = 1) ggsave("variance.pca.png", plot = pca_plot, width = 8, height = 6, units = "in") message("\n\n ========== Seurat::DimHeatmap() ========== \n\n") # PCHeatmap (former) allows for easy exploration of the primary sources of heterogeneity in a dataset if (num_pcs > 15) { png("variance.pca.heatmap.png", res = 300, width = 10, height = 16, units = "in") DimHeatmap(s_obj, reduction = "pca", dims = 1:15, nfeatures = 20, cells = 250, fast = TRUE) dev.off() } message("\n\n ========== Seurat::PCElbowPlot() ========== \n\n") # a more ad hoc method for determining PCs to use, draw cutoff where there is a clear elbow in the graph elbow_plot = ElbowPlot(s_obj, reduction = "pca", ndims = num_pcs) ggsave("variance.pca.elbow.png", plot = elbow_plot, width = 8, height = 5, units = "in") # resampling test inspired by the jackStraw procedure - very slow, so skip for large projects (>10,000 cells) if (ncol(s_obj) < jackstraw_max_cells) { message("\n\n ========== Seurat::JackStraw() ========== \n\n") # determine statistical significance of PCA scores s_obj = JackStraw(s_obj, assay = "RNA", reduction = "pca", dims = num_pcs, verbose = FALSE) # compute Jackstraw scores significance s_obj = ScoreJackStraw(s_obj, reduction = "pca", dims = 1:num_pcs, do.plot = FALSE) # plot the results of the JackStraw analysis for PCA significance # significant PCs will show a strong enrichment of genes with low p-values (solid curve above the dashed line) jackstraw_plot = JackStrawPlot(s_obj, reduction = "pca", dims = 1:num_pcs) + guides(col = guide_legend(ncol = 2)) ggsave("variance.pca.jackstraw.png", plot = jackstraw_plot, width = 12, height = 6, units = "in") } return(s_obj) } # calculate various variance metrics and perform basic analysis (integrated analysis workflow) # specify neighbors for UMAP (default is 30 in Seurat 2 and 3 pre-release) calculate_variance_integrated = function(seurat_obj, num_dim, num_neighbors = 30) { s_obj = seurat_obj num_dim = as.integer(num_dim) if (num_dim < 5) stop("too few dims: ", num_dim) if (num_dim > 50) stop("too many dims: ", num_dim) message("\n\n ========== Seurat::ScaleData() ========== \n\n") # s_obj = ScaleData(s_obj, features = rownames(s_obj), verbose = FALSE) s_obj = ScaleData(s_obj, verbose = FALSE) message("\n\n ========== Seurat::PCA() ========== \n\n") # PCA on the scaled data s_obj = RunPCA(s_obj, npcs = num_dim, verbose = FALSE) # plot the output of PCA analysis (shuffle cells so any one group does not appear overrepresented due to ordering) pca_plot = DimPlot( s_obj, reduction = "pca", cells = sample(colnames(s_obj)), group.by = "orig.ident", pt.size = 0.5, cols = colors_samples ) + theme(aspect.ratio = 1) ggsave("variance.pca.png", plot = pca_plot, width = 10, height = 6, units = "in") message("\n\n ========== Seurat::RunTSNE() ========== \n\n") # use tSNE as a tool to visualize, not for clustering directly on tSNE components # cells within the graph-based clusters determined above should co-localize on the tSNE plot s_obj = RunTSNE(s_obj, reduction = "pca", dims.use = 1:num_dim) # reduce point size for larger datasets dr_pt_size = get_dr_point_size(s_obj) # tSNE using original sample names (shuffle cells so any one group does not appear overrepresented due to ordering) s_obj = set_identity(seurat_obj = s_obj, group_var = "orig.ident") plot_tsne = DimPlot(s_obj, reduction = "tsne", cells = sample(colnames(s_obj)), pt.size = dr_pt_size, cols = colors_samples) + theme(aspect.ratio = 1) ggsave(glue("dr.tsne.{num_dim}.sample.png"), plot = plot_tsne, width = 10, height = 6, units = "in") Sys.sleep(1) ggsave(glue("dr.tsne.{num_dim}.sample.pdf"), plot = plot_tsne, width = 10, height = 6, units = "in") Sys.sleep(1) message("\n\n ========== Seurat::RunUMAP() ========== \n\n") # runs the Uniform Manifold Approximation and Projection (UMAP) dimensional reduction technique s_obj = RunUMAP(s_obj, reduction = "pca", dims = 1:num_dim, n.neighbors = num_neighbors, verbose = FALSE) # tSNE using original sample names (shuffle cells so any one group does not appear overrepresented due to ordering) s_obj = set_identity(seurat_obj = s_obj, group_var = "orig.ident") plot_umap = DimPlot(s_obj, reduction = "umap", cells = sample(colnames(s_obj)), pt.size = dr_pt_size, cols = colors_samples) + theme(aspect.ratio = 1) ggsave(glue("dr.umap.{num_dim}.sample.png"), plot = plot_umap, width = 10, height = 6, units = "in") Sys.sleep(1) ggsave(glue("dr.umap.{num_dim}.sample.pdf"), plot = plot_umap, width = 10, height = 6, units = "in") Sys.sleep(1) save_metadata(seurat_obj = s_obj) return(s_obj) } # determine point size for tSNE/UMAP plots (smaller for larger datasets) get_dr_point_size = function(seurat_obj) { pt_size = 1.8 if (ncol(seurat_obj) > 1000) pt_size = 1.2 if (ncol(seurat_obj) > 5000) pt_size = 1.0 if (ncol(seurat_obj) > 10000) pt_size = 0.8 if (ncol(seurat_obj) > 25000) pt_size = 0.6 return(pt_size) } # perform graph-based clustering and tSNE # specify neighbors for UMAP and FindNeighbors (default is 30 in Seurat 2 and 3 pre-release) calculate_clusters = function(seurat_obj, num_dim, num_neighbors = 30) { # check if number of dimensions seems reasonable if (num_dim < 5) stop("too few dims: ", num_dim) if (num_dim > 50) stop("too many dims: ", num_dim) s_obj = seurat_obj message("\n\n ========== Seurat::RunTSNE() ========== \n\n") # use tSNE as a tool to visualize, not for clustering directly on tSNE components # cells within the graph-based clusters determined above should co-localize on the tSNE plot s_obj = RunTSNE(s_obj, reduction = "pca", dims.use = 1:num_dim) # reduce point size for larger datasets dr_pt_size = get_dr_point_size(s_obj) # tSNE using original sample names (shuffle cells so any one group does not appear overrepresented due to ordering) s_obj = set_identity(seurat_obj = s_obj, group_var = "orig.ident") plot_tsne = DimPlot(s_obj, reduction = "tsne", cells = sample(colnames(s_obj)), pt.size = dr_pt_size, cols = colors_samples) + theme(aspect.ratio = 1) ggsave(glue("dr.tsne.{num_dim}.sample.png"), plot = plot_tsne, width = 10, height = 6, units = "in") Sys.sleep(1) ggsave(glue("dr.tsne.{num_dim}.sample.pdf"), plot = plot_tsne, width = 10, height = 6, units = "in") Sys.sleep(1) message("\n\n ========== Seurat::RunUMAP() ========== \n\n") # runs the Uniform Manifold Approximation and Projection (UMAP) dimensional reduction technique s_obj = RunUMAP(s_obj, reduction = "pca", dims = 1:num_dim, n.neighbors = num_neighbors, verbose = FALSE) # tSNE using original sample names (shuffle cells so any one group does not appear overrepresented due to ordering) s_obj = set_identity(seurat_obj = s_obj, group_var = "orig.ident") plot_umap = DimPlot(s_obj, reduction = "umap", cells = sample(colnames(s_obj)), pt.size = dr_pt_size, cols = colors_samples) + theme(aspect.ratio = 1) ggsave(glue("dr.umap.{num_dim}.sample.png"), plot = plot_umap, width = 10, height = 6, units = "in") Sys.sleep(1) ggsave(glue("dr.umap.{num_dim}.sample.pdf"), plot = plot_umap, width = 10, height = 6, units = "in") Sys.sleep(1) message("\n\n ========== Seurat::FindNeighbors() ========== \n\n") message("assay: ", DefaultAssay(s_obj)) message("num dims: ", num_dim) # construct a Shared Nearest Neighbor (SNN) Graph for a given dataset s_obj = FindNeighbors( s_obj, dims = 1:num_dim, k.param = num_neighbors, graph.name = "snn", compute.SNN = TRUE, force.recalc = TRUE ) message("\n\n ========== Seurat::FindClusters() ========== \n\n") message("initial metadata fields: ", str_c(colnames(s_obj@meta.data), collapse = ", ")) # resolutions for graph-based clustering # increased resolution values lead to more clusters (recommendation: 0.6-1.2 for 3K cells, 2-4 for 33K cells) res_range = seq(0.1, 2.5, 0.1) if (ncol(s_obj) > 1000) res_range = c(res_range, 3, 4, 5, 6, 7, 8, 9) # algorithm: 1 = original Louvain; 2 = Louvain with multilevel refinement; 3 = SLM # identify clusters of cells by SNN modularity optimization based clustering algorithm s_obj = FindClusters(s_obj, algorithm = 3, resolution = res_range, graph.name = "snn", verbose = FALSE) # remove "seurat_clusters" column that is added automatically (added in v3 late dev version) s_obj@meta.data = s_obj@meta.data %>% select(-seurat_clusters) message("new metadata fields: ", str_c(colnames(s_obj@meta.data), collapse = ", ")) # create a separate sub-directory for cluster resolution plots clusters_dir = "clusters-resolutions" if (!dir.exists(clusters_dir)) dir.create(clusters_dir) # for calculated cluster resolutions: remove redundant (same number of clusters), rename, and plot res_cols = str_subset(colnames(s_obj@meta.data), "snn_res") res_cols = sort(res_cols) res_num_clusters_prev = 1 for (res in res_cols) { # proceed if current resolution has more clusters than previous and less than the color scheme length res_vector = s_obj@meta.data[, res] %>% as.character() res_num_clusters_cur = res_vector %>% n_distinct() if (res_num_clusters_cur > res_num_clusters_prev && res_num_clusters_cur < length(colors_clusters)) { # check if the resolution still has original labels (characters starting with 0) if (min(res_vector) == "0") { # convert to character vector s_obj@meta.data[, res] = as.character(s_obj@meta.data[, res]) # relabel identities so they start with 1 and not 0 s_obj@meta.data[, res] = as.numeric(s_obj@meta.data[, res]) + 1 # pad with 0s to avoid sorting issues s_obj@meta.data[, res] = str_pad(s_obj@meta.data[, res], width = 2, side = "left", pad = "0") # pad with "C" to avoid downstream numeric conversions s_obj@meta.data[, res] = str_c("C", s_obj@meta.data[, res]) # encode as a factor s_obj@meta.data[, res] = factor(s_obj@meta.data[, res]) } # resolution value based on resolution column name res_val = sub("snn_res\\.", "", res) # plot file name res_str = gsub("\\.", "", res) dr_filename = glue("{clusters_dir}/dr.{DefaultAssay(s_obj)}.{num_dim}.{res_str}.clust{res_num_clusters_cur}") s_obj = plot_clusters(seurat_obj = s_obj, resolution = res_val, filename_base = dr_filename) # add blank line to make output easier to read message(" ") } else { # remove resolution if the number of clusters is same as previous s_obj@meta.data = s_obj@meta.data %>% select(-one_of(res)) } # update resolution cluster count for next iteration res_num_clusters_prev = res_num_clusters_cur } message("updated metadata fields: ", str_c(colnames(s_obj@meta.data), collapse = ", ")) save_metadata(seurat_obj = s_obj) return(s_obj) } # compile all cell metadata into a single table save_metadata = function(seurat_obj) { s_obj = seurat_obj metadata_tbl = s_obj@meta.data %>% rownames_to_column("cell") %>% as_tibble() %>% mutate(sample_name = orig.ident) tsne_tbl = s_obj[["tsne"]]@cell.embeddings %>% round(3) %>% as.data.frame() %>% rownames_to_column("cell") umap_tbl = s_obj[["umap"]]@cell.embeddings %>% round(3) %>% as.data.frame() %>% rownames_to_column("cell") cells_metadata = metadata_tbl %>% full_join(tsne_tbl, by = "cell") %>% full_join(umap_tbl, by = "cell") cells_metadata = cells_metadata %>% arrange(cell) write_excel_csv(cells_metadata, path = "metadata.csv") } # plot tSNE with color-coded clusters at specified resolution plot_clusters = function(seurat_obj, resolution, filename_base) { s_obj = seurat_obj # set identities based on specified resolution s_obj = set_identity(seurat_obj = s_obj, group_var = resolution) # print stats num_clusters = Idents(s_obj) %>% as.character() %>% n_distinct() message("resolution: ", resolution) message("num clusters: ", num_clusters) # generate plot if there is a reasonable number of clusters if (num_clusters > 1 && num_clusters < length(colors_clusters)) { # shuffle cells so they appear randomly and one group does not show up on top plot_tsne = DimPlot( s_obj, reduction = "tsne", cells = sample(colnames(s_obj)), pt.size = get_dr_point_size(s_obj), cols = colors_clusters ) + theme(aspect.ratio = 1) ggsave(glue("{filename_base}.tsne.png"), plot = plot_tsne, width = 9, height = 6, units = "in") Sys.sleep(1) ggsave(glue("{filename_base}.tsne.pdf"), plot = plot_tsne, width = 9, height = 6, units = "in") Sys.sleep(1) plot_umap = DimPlot( s_obj, reduction = "umap", cells = sample(colnames(s_obj)), pt.size = get_dr_point_size(s_obj), cols = colors_clusters ) + theme(aspect.ratio = 1) ggsave(glue("{filename_base}.umap.png"), plot = plot_umap, width = 9, height = 6, units = "in") Sys.sleep(1) ggsave(glue("{filename_base}.umap.pdf"), plot = plot_umap, width = 9, height = 6, units = "in") Sys.sleep(1) if (file.exists("Rplots.pdf")) file.remove("Rplots.pdf") } return(s_obj) } # check grouping variable/resolution against existing meta data columns check_group_var = function(seurat_obj, group_var) { s_obj = seurat_obj # check if the grouping variable is one of meta data columns if (!(group_var %in% colnames(s_obj@meta.data))) { # check if grouping variable is the resolution value (X.X instead of res.X.X) res_column = str_c("snn_res.", group_var) if (res_column %in% colnames(s_obj@meta.data)) { group_var = res_column } else { stop("unknown grouping variable: ", group_var) } } return(group_var) } # set identity based on a specified variable/resolution set_identity = function(seurat_obj, group_var) { s_obj = seurat_obj group_var = check_group_var(seurat_obj = s_obj, group_var = group_var) # set identities based on selected grouping variable message("setting grouping variable: ", group_var) Idents(s_obj) = group_var return(s_obj) } # plot a set of genes plot_genes = function(seurat_obj, genes, filename_base) { # color gradient for FeaturePlot-based plots gradient_colors = c("gray85", "red2") # switch to "RNA" assay from potentially "integrated" DefaultAssay(seurat_obj) = "RNA" # tSNE plots color-coded by expression level (should be square to match the original tSNE plots) feat_plot = FeaturePlot( seurat_obj, features = genes, reduction = "tsne", cells = sample(colnames(seurat_obj)), pt.size = 0.5, cols = gradient_colors, ncol = 4 ) ggsave(glue("{filename_base}.tsne.png"), plot = feat_plot, width = 16, height = 10, units = "in") ggsave(glue("{filename_base}.tsne.pdf"), plot = feat_plot, width = 16, height = 10, units = "in") # UMAP plots color-coded by expression level (should be square to match the original tSNE plots) feat_plot = FeaturePlot( seurat_obj, features = genes, reduction = "umap", cells = sample(colnames(seurat_obj)), pt.size = 0.5, cols = gradient_colors, ncol = 4 ) ggsave(glue("{filename_base}.umap.png"), plot = feat_plot, width = 16, height = 10, units = "in") ggsave(glue("{filename_base}.umap.pdf"), plot = feat_plot, width = 16, height = 10, units = "in") # dot plot visualization dot_plot = DotPlot(seurat_obj, features = genes, dot.scale = 12, cols = gradient_colors) ggsave(glue("{filename_base}.dotplot.png"), plot = dot_plot, width = 20, height = 8, units = "in") ggsave(glue("{filename_base}.dotplot.pdf"), plot = dot_plot, width = 20, height = 8, units = "in") # gene violin plots (size.use below 0.2 doesn't seem to make a difference) # skip PDF since every cell has to be plotted and they become too big vln_plot = VlnPlot(seurat_obj, features = genes, pt.size = 0.1, combine = TRUE, cols = colors_clusters, ncol = 4) ggsave(glue("{filename_base}.violin.png"), plot = vln_plot, width = 16, height = 10, units = "in") # expression levels per cluster for bar plots (averaging and output are in non-log space) cluster_avg_exp = AverageExpression(seurat_obj, assay = "RNA", features = genes, verbose = FALSE)[["RNA"]] cluster_avg_exp_long = cluster_avg_exp %>% rownames_to_column("gene") %>% gather(cluster, avg_exp, -gene) # bar plots # create a named color scheme to ensure names and colors are in the proper order clust_names = levels(seurat_obj) color_scheme_named = colors_clusters[1:length(clust_names)] names(color_scheme_named) = clust_names barplot_plot = ggplot(cluster_avg_exp_long, aes(x = cluster, y = avg_exp, fill = cluster)) + geom_col(color = "black") + theme(legend.position = "none") + scale_fill_manual(values = color_scheme_named) + scale_y_continuous(expand = c(0, 0)) + theme_cowplot() + facet_wrap(~ gene, ncol = 4, scales = "free") ggsave(glue("{filename_base}.barplot.png"), plot = barplot_plot, width = 16, height = 10, units = "in") ggsave(glue("{filename_base}.barplot.pdf"), plot = barplot_plot, width = 16, height = 10, units = "in") } # calculate cluster stats (number of cells, average expression, cell-gene matrix) calculate_cluster_stats = function(seurat_obj, label) { message("\n\n ========== calculate cluster stats ========== \n\n") message("cluster names: ", str_c(levels(seurat_obj), collapse = ", ")) # compile relevant cell metadata into a single table seurat_obj$cluster = Idents(seurat_obj) metadata_tbl = seurat_obj@meta.data %>% rownames_to_column("cell") %>% as_tibble() %>% select(cell, num_UMIs, num_genes, pct_mito, sample_name = orig.ident, cluster) tsne_tbl = seurat_obj[["tsne"]]@cell.embeddings %>% round(3) %>% as.data.frame() %>% rownames_to_column("cell") umap_tbl = seurat_obj[["umap"]]@cell.embeddings %>% round(3) %>% as.data.frame() %>% rownames_to_column("cell") cells_metadata = metadata_tbl %>% full_join(tsne_tbl, by = "cell") %>% full_join(umap_tbl, by = "cell") cells_metadata = cells_metadata %>% arrange(cell) write_excel_csv(cells_metadata, path = glue("metadata.{label}.csv")) # get number of cells split by cluster and by sample summary_cluster_sample = cells_metadata %>% select(cluster, sample_name) %>% mutate(num_cells_total = n()) %>% group_by(sample_name) %>% mutate(num_cells_sample = n()) %>% group_by(cluster) %>% mutate(num_cells_cluster = n()) %>% group_by(cluster, sample_name) %>% mutate(num_cells_cluster_sample = n()) %>% ungroup() %>% distinct() %>% mutate( pct_cells_cluster = num_cells_cluster / num_cells_total, pct_cells_cluster_sample = num_cells_cluster_sample / num_cells_sample ) %>% mutate( pct_cells_cluster = round(pct_cells_cluster * 100, 1), pct_cells_cluster_sample = round(pct_cells_cluster_sample * 100, 1) ) %>% arrange(cluster, sample_name) # get number of cells split by cluster (ignore samples) summary_cluster = summary_cluster_sample %>% select(-contains("sample")) %>% distinct() write_excel_csv(summary_cluster, path = glue("summary.{label}.csv")) # gene expression for an "average" cell in each identity class (averaging and output are in non-log space) cluster_avg_exp = AverageExpression(seurat_obj, assay = "RNA", verbose = FALSE)[["RNA"]] cluster_avg_exp = cluster_avg_exp %>% round(3) %>% rownames_to_column("gene") %>% arrange(gene) write_excel_csv(cluster_avg_exp, path = glue("expression.mean.{label}.csv")) Sys.sleep(1) # export results split by sample if multiple samples are present num_samples = cells_metadata %>% pull(sample_name) %>% n_distinct() if (num_samples > 1) { # number of cells split by cluster and by sample write_excel_csv(summary_cluster_sample, path = glue("summary.{label}.per-sample.csv")) # cluster averages split by sample sample_avg_exp = AverageExpression(seurat_obj, assay = "RNA", add.ident = "orig.ident", verbose = FALSE)[["RNA"]] sample_avg_exp = sample_avg_exp %>% round(3) %>% as.data.frame() %>% rownames_to_column("gene") %>% arrange(gene) write_excel_csv(sample_avg_exp, path = glue("expression.mean.{label}.per-sample.csv")) } } # calculate cluster markers (compared to all other cells) and plot top ones # tests: # - roc: ROC test returns the classification power (ranging from 0 - random, to 1 - perfect) # - wilcox: Wilcoxon rank sum test (default in Seurat 2) # - bimod: Likelihood-ratio test for single cell gene expression (McDavid, Bioinformatics, 2013) (default in Seurat 1) # - tobit: Tobit-test for differential gene expression (Trapnell, Nature Biotech, 2014) # - MAST: GLM-framework that treates cellular detection rate as a covariate (Finak, Genome Biology, 2015) # pairwise option compares each cluster to each of the other clusters to yield markers that are both local and global calculate_cluster_markers = function(seurat_obj, label, test, pairwise = FALSE) { message("\n\n ========== calculate cluster markers ========== \n\n") message("cluster set: ", label) message("marker test: ", test) # get cluster names clusters = Idents(seurat_obj) %>% as.character() %>% unique() %>% sort() # use only clusters with more than 10 cells clusters = clusters[table(Idents(seurat_obj)) > 10] if (!pairwise) { # standard cluster markers calculation markers_dir = "markers-global" # capture output to avoid excessive warnings markers_log = capture.output({ all_markers = FindAllMarkers( seurat_obj, assay = "RNA", test.use = test, logfc.threshold = log(1.2), min.pct = 0.2, only.pos = FALSE, min.diff.pct = -Inf, verbose = FALSE ) }, type = "message") # do some light filtering and clean up (ROC test returns slightly different output) if (test == "roc") { all_markers = all_markers %>% select(cluster, gene, logFC = avg_logFC, myAUC, power) %>% filter(power > 0.4) %>% mutate(logFC = round(logFC, 5), myAUC = round(myAUC, 5), power = round(power, 5)) %>% arrange(cluster, -power) top_markers = all_markers %>% filter(logFC > 0) top_markers = top_markers %>% group_by(cluster) %>% top_n(50, power) %>% ungroup() } else { all_markers = all_markers %>% select(cluster, gene, logFC = avg_logFC, p_val, p_val_adj) %>% filter(p_val_adj < 0.001) %>% mutate(logFC = round(logFC, 5)) %>% arrange(cluster, p_val_adj, p_val) top_markers = all_markers %>% filter(logFC > 0) top_markers = top_markers %>% group_by(cluster) %>% top_n(50, logFC) %>% ungroup() } } else { # pairwise (each cluster versus each other cluster) cluster markers calculation markers_dir = "markers-pairwise" # initialize empty results tibble unfiltered_markers = tibble( cluster = character(), cluster2 = character(), gene = character(), logFC = numeric(), p_val = numeric(), p_val_adj = numeric() ) # check each cluster combination for (cluster1 in clusters) { for (cluster2 in setdiff(clusters, cluster1)) { # find differentially expressed genes between two specific clusters # low fold change cutoff to maximize chance of appearing in all comparisons # capture output to avoid excessive warnings markers_log = capture.output({ cur_markers = FindMarkers( seurat_obj, assay = "RNA", ident.1 = cluster1, ident.2 = cluster2, test.use = test, logfc.threshold = log(1.1), min.pct = 0.1, only.pos = TRUE, min.diff.pct = -Inf, verbose = FALSE ) }, type = "message") # clean up markers table (would need to be modified for "roc" test) cur_markers = cur_markers %>% rownames_to_column("gene") %>% mutate(cluster = cluster1) %>% mutate(cluster2 = cluster2) %>% filter(p_val_adj < 0.01) %>% mutate(logFC = round(avg_logFC, 5)) %>% select(one_of(colnames(unfiltered_markers))) # add current cluster combination genes to the table of all markers unfiltered_markers = bind_rows(unfiltered_markers, cur_markers) } } # adjust test name for output test = glue("pairwise.{test}") # sort the markers to make the table more readable unfiltered_markers = unfiltered_markers %>% distinct() %>% add_count(cluster, gene) %>% rename(cluster_gene_n = n) %>% arrange(cluster, gene, cluster2) # filter for genes that are significant compared to all other clusters all_markers = unfiltered_markers %>% filter(cluster_gene_n == (length(clusters) - 1)) %>% select(-cluster_gene_n) # extract the lowest and highest fold changes and p-values all_markers = all_markers %>% group_by(cluster, gene) %>% summarize_at( c("logFC", "p_val", "p_val_adj"), list(min = min, max = max) ) %>% ungroup() %>% arrange(cluster, -logFC_min) top_markers = all_markers %>% group_by(cluster) %>% top_n(50, logFC_min) %>% ungroup() } # create a separate sub-directory for all markers if (!dir.exists(markers_dir)) dir.create(markers_dir) # filename prefix filename_base = glue("{markers_dir}/markers.{label}.{test}") # save unfiltered markers for pairwise comparisons if (pairwise) { unfiltered_markers_csv = glue("{filename_base}.unfiltered.csv") message("unfiltered markers: ", unfiltered_markers_csv) write_excel_csv(unfiltered_markers, path = unfiltered_markers_csv) Sys.sleep(1) } all_markers_csv = glue("{filename_base}.all.csv") message("all markers: ", all_markers_csv) write_excel_csv(all_markers, path = all_markers_csv) Sys.sleep(1) top_markers_csv = glue("{filename_base}.top.csv") message("top markers: ", top_markers_csv) write_excel_csv(top_markers, path = top_markers_csv) Sys.sleep(1) # plot cluster markers heatmap plot_cluster_markers(seurat_obj, markers_tbl = all_markers, num_genes = c(5, 10, 20), filename_base = filename_base) # plot top cluster markers for each cluster for (cluster_name in clusters) { filename_cluster_base = glue("{markers_dir}/markers.{label}-{cluster_name}.{test}") cluster_markers = top_markers %>% filter(cluster == cluster_name) if (nrow(cluster_markers) > 9) { Sys.sleep(1) top_cluster_markers = cluster_markers %>% head(12) %>% pull(gene) plot_genes(seurat_obj, genes = top_cluster_markers, filename_base = filename_cluster_base) } } } # generate cluster markers heatmap plot_cluster_markers = function(seurat_obj, markers_tbl, num_genes, filename_base) { # adjust pairwise clusters to match the standard format if ("logFC_min" %in% colnames(markers_tbl)) { markers_tbl = markers_tbl %>% mutate(logFC = logFC_min) } # keep only the top cluster for each gene so each gene appears once markers_tbl = markers_tbl %>% filter(logFC > 0) markers_tbl = markers_tbl %>% group_by(gene) %>% top_n(1, logFC) %>% slice(1) %>% ungroup() num_clusters = Idents(seurat_obj) %>% as.character() %>% n_distinct() marker_genes = markers_tbl %>% pull(gene) %>% unique() %>% sort() cluster_avg_exp = AverageExpression(seurat_obj, assay = "RNA", features = marker_genes, verbose = FALSE)[["RNA"]] cluster_avg_exp = cluster_avg_exp %>% as.matrix() %>% log1p() cluster_avg_exp = cluster_avg_exp[rowSums(cluster_avg_exp) > 0, ] # heatmap settings hm_colors = colorRampPalette(c("#053061", "#FFFFFF", "#E41A1C"))(51) hm_width = ( num_clusters / 2 ) + 2 for (ng in num_genes) { hm_base = glue("{filename_base}.heatmap.top{ng}") markers_top_tbl = markers_tbl %>% group_by(cluster) %>% top_n(ng, logFC) %>% ungroup() markers_top_tbl = markers_top_tbl %>% arrange(cluster, -logFC) # generate the scaled expression matrix and save the text version hm_mat = cluster_avg_exp[markers_top_tbl$gene, ] hm_mat = hm_mat %>% t() %>% scale() %>% t() hm_mat %>% round(3) %>% as_tibble(rownames = "gene") %>% write_excel_csv(path = glue("{hm_base}.csv")) Sys.sleep(1) # set outliers to 95th percentile to yield a more balanced color scale scale_cutoff = as.numeric(quantile(abs(hm_mat), 0.95)) hm_mat[hm_mat > scale_cutoff] = scale_cutoff hm_mat[hm_mat < -scale_cutoff] = -scale_cutoff # generate the heatmap ph_obj = pheatmap( hm_mat, scale = "none", color = hm_colors, border_color = NA, cluster_rows = FALSE, cluster_cols = FALSE, fontsize = 10, fontsize_row = 8, fontsize_col = 12, show_colnames = TRUE, main = glue("Cluster Markers: Top {ng}") ) png(glue("{hm_base}.png"), width = hm_width, height = 10, units = "in", res = 300) grid::grid.newpage() grid::grid.draw(ph_obj$gtable) dev.off() Sys.sleep(1) pdf(glue("{hm_base}.pdf"), width = hm_width, height = 10) grid::grid.newpage() grid::grid.draw(ph_obj$gtable) dev.off() Sys.sleep(1) } } # calculate differentially expressed genes within each cluster calculate_cluster_de_genes = function(seurat_obj, label, test, group_var = "orig.ident") { message("\n\n ========== calculate cluster DE genes ========== \n\n") # create a separate sub-directory for differential expression results de_dir = glue("diff-expression-{group_var}") if (!dir.exists(de_dir)) dir.create(de_dir) # common settings num_de_genes = 50 # results table de_all_genes_tbl = tibble() # get DE genes for each cluster clusters = levels(seurat_obj) for (clust_name in clusters) { message(glue("calculating DE genes for cluster {clust_name}")) # subset to the specific cluster clust_obj = subset(seurat_obj, idents = clust_name) # revert back to the grouping variable sample/library labels Idents(clust_obj) = group_var message("cluster cells: ", ncol(clust_obj)) message("cluster groups: ", paste(levels(clust_obj), collapse = ", ")) # continue if cluster has multiple groups and more than 10 cells in each group if (n_distinct(Idents(clust_obj)) > 1 && min(table(Idents(clust_obj))) > 10) { # scale data for heatmap clust_obj = ScaleData(clust_obj, assay = "RNA", vars.to.regress = c("num_UMIs", "pct_mito")) # iterate through sample/library combinations (relevant if more than two) group_combinations = combn(levels(clust_obj), m = 2, simplify = TRUE) for (combination_num in 1:ncol(group_combinations)) { # determine combination g1 = group_combinations[1, combination_num] g2 = group_combinations[2, combination_num] comparison_label = glue("{g1}-vs-{g2}") message(glue("comparison: {clust_name} {g1} vs {g2}")) filename_label = glue("{de_dir}/de.{label}-{clust_name}.{comparison_label}.{test}") # find differentially expressed genes (default Wilcoxon rank sum test) de_genes = FindMarkers(clust_obj, ident.1 = g1, ident.2 = g2, assay = "RNA", test.use = test, logfc.threshold = log(1), min.pct = 0.1, only.pos = FALSE, print.bar = FALSE) # perform some light filtering and clean up de_genes = de_genes %>% rownames_to_column("gene") %>% mutate(cluster = clust_name, group1 = g1, group2 = g2, de_test = test) %>% select(cluster, group1, group2, de_test, gene, logFC = avg_logFC, p_val, p_val_adj) %>% mutate( logFC = round(logFC, 3), p_val = if_else(p_val < 0.00001, p_val, round(p_val, 5)), p_val_adj = if_else(p_val_adj < 0.00001, p_val_adj, round(p_val_adj, 5)) ) %>% arrange(p_val_adj, p_val) message(glue("{comparison_label} num genes: {nrow(de_genes)}")) # save stats table write_excel_csv(de_genes, path = glue("{filename_label}.csv")) # add cluster genes to all genes de_all_genes_tbl = bind_rows(de_all_genes_tbl, de_genes) # heatmap of top genes if (nrow(de_genes) > 5) { top_de_genes = de_genes %>% top_n(num_de_genes, -p_val_adj) %>% arrange(logFC) %>% pull(gene) plot_hm = DoHeatmap(clust_obj, features = top_de_genes, assay = "RNA", slot = "scale.data") heatmap_prefix = glue("{filename_label}.heatmap.top{num_de_genes}") ggsave(glue("{heatmap_prefix}.png"), plot = plot_hm, width = 15, height = 10, units = "in") Sys.sleep(1) ggsave(glue("{heatmap_prefix}.pdf"), plot = plot_hm, width = 15, height = 10, units = "in") Sys.sleep(1) } } } else { message("skip cluster: ", clust_name) } message(" ") } # save stats table write_excel_csv(de_all_genes_tbl, path = glue("{de_dir}/de.{label}.{group_var}.{test}.all.csv")) de_all_genes_tbl = de_all_genes_tbl %>% filter(p_val_adj < 0.01) write_excel_csv(de_all_genes_tbl, path = glue("{de_dir}/de.{label}.{group_var}.{test}.sig.csv")) } # ========== main ========== # output width options(width = 120) # print warnings as they occur options(warn = 1) # default type for the bitmap devices such as png (should default to "cairo") options(bitmapType = "cairo") # retrieve the command-line arguments suppressPackageStartupMessages(library(docopt)) opts = docopt(doc) # show docopt options # print(opts) # dependencies load_libraries() # set number of cores for parallel package (will use all available cores by default) options(mc.cores = 4) # evaluate Seurat R expressions asynchronously when possible (such as ScaleData) using future package plan("multiprocess", workers = 4) # increase the limit of the data to be shuttled between the processes from default 500MB to 50GB options(future.globals.maxSize = 50e9) # global settings colors_samples = c(brewer.pal(5, "Set1"), brewer.pal(8, "Dark2"), pal_igv("default")(51)) colors_clusters = c(pal_d3("category10")(10), pal_d3("category20b")(20), pal_igv()(51), pal_igv(alpha = 0.6)(51)) # analysis info analysis_step = "unknown" out_dir = opts$analysis_dir # create analysis directory if starting new analysis or exit if analysis already exists if (opts$create || opts$combine || opts$integrate) { if (opts$create) analysis_step = "create" if (opts$combine) analysis_step = "combine" if (opts$integrate) analysis_step = "integrate" message(glue("\n\n ========== started analysis step {analysis_step} for {out_dir} ========== \n\n")) if (dir.exists(out_dir)) { stop(glue("output analysis dir {out_dir} already exists")) } else { dir.create(out_dir) } # original working dir (before moving to analysis dir) original_wd = getwd() } # set analysis directory as working directory if (dir.exists(out_dir)) { setwd(out_dir) } else { stop(glue("output analysis dir {out_dir} does not exist")) } # check which command was used if (opts$create) { # log to file write(glue("analysis: {out_dir}"), file = "create.log", append = TRUE) write(glue("seurat version: {packageVersion('Seurat')}"), file = "create.log", append = TRUE) # create new seurat object based on input sample names and sample directories # can work with multiple samples, but the appropriate way is to use "combine" with objects that are pre-filtered seurat_obj = load_sample_counts_matrix(opts$sample_name, opts$sample_dir) # filter by number of genes and mitochondrial genes percentage (optional parameters) seurat_obj = filter_data(seurat_obj, min_genes = opts$min_genes, max_genes = opts$max_genes, max_mt = opts$mt) # calculate various variance metrics seurat_obj = calculate_variance(seurat_obj) saveRDS(seurat_obj, file = "seurat_obj.rds") } else if (opts$combine) { # merge multiple samples/libraries based on previous analysis directories seurat_obj = combine_seurat_obj(original_wd = original_wd, sample_analysis_dirs = opts$sample_analysis_dir) saveRDS(seurat_obj, file = "seurat_obj.rds") # calculate various variance metrics seurat_obj = calculate_variance(seurat_obj) saveRDS(seurat_obj, file = "seurat_obj.rds") } else if (opts$integrate) { # run integration seurat_obj = integrate_seurat_obj(original_wd, sample_analysis_dirs = opts$batch_analysis_dir, num_dim = opts$num_dim) seurat_obj = calculate_variance_integrated(seurat_obj, num_dim = opts$num_dim) saveRDS(seurat_obj, file = "seurat_obj.rds") } else { # all commands besides "create" and "cca" start with an existing seurat object if (file.exists("seurat_obj.rds")) { message("loading seurat_obj") seurat_obj = readRDS("seurat_obj.rds") } else { stop("seurat obj does not already exist (run 'create' step first)") } if (opts$cluster) { analysis_step = "cluster" message(glue("\n\n ========== started analysis step {analysis_step} for {out_dir} ========== \n\n")) # determine clusters seurat_obj = calculate_clusters(seurat_obj, num_dim = as.integer(opts$num_dim)) saveRDS(seurat_obj, file = "seurat_obj.rds") } if (opts$identify || opts$de) { # set resolution in the seurat object group_var = check_group_var(seurat_obj = seurat_obj, group_var = opts$resolution) seurat_obj = set_identity(seurat_obj = seurat_obj, group_var = group_var) # use a grouping-specific sub-directory for all output grouping_label = gsub("\\.", "", group_var) num_clusters = Idents(seurat_obj) %>% as.character() %>% n_distinct() clust_label = glue("clust{num_clusters}") res_dir = glue("clusters-{grouping_label}-{clust_label}") if (!dir.exists(res_dir)) dir.create(res_dir) setwd(res_dir) if (opts$identify) { analysis_step = "identify" message(glue("\n\n ========== started analysis step {analysis_step} for {out_dir} ========== \n\n")) # create tSNE plot (should already exist in the main directory) dr_filename = glue("dr.{grouping_label}.{clust_label}") seurat_obj = plot_clusters(seurat_obj, resolution = opts$resolution, filename_base = dr_filename) # cluster stat tables (number of cells and average expression) calculate_cluster_stats(seurat_obj, label = clust_label) # calculate and plot standard cluster markers calculate_cluster_markers(seurat_obj, label = clust_label, test = "roc") calculate_cluster_markers(seurat_obj, label = clust_label, test = "wilcox") calculate_cluster_markers(seurat_obj, label = clust_label, test = "MAST") # calculate and plot pairwise cluster markers (very slow, so skip for high number of clusters) num_clusters = Idents(seurat_obj) %>% as.character() %>% n_distinct() if (num_clusters < 20) { calculate_cluster_markers(seurat_obj, label = clust_label, test = "wilcox", pairwise = TRUE) calculate_cluster_markers(seurat_obj, label = clust_label, test = "MAST", pairwise = TRUE) } } # differential expression if (opts$de) { analysis_step = "diff" message(glue("\n\n ========== started analysis step {analysis_step} for {out_dir} ========== \n\n")) calculate_cluster_de_genes(seurat_obj, label = clust_label, test = "wilcox") calculate_cluster_de_genes(seurat_obj, label = clust_label, test = "MAST") } } } message(glue("\n\n ========== finished analysis step {analysis_step} for {out_dir} ========== \n\n")) # delete Rplots.pdf if (file.exists("Rplots.pdf")) file.remove("Rplots.pdf") # end
/scripts/scrna-10x-seurat-3.R
no_license
xiaocong3333/genomics
R
false
false
74,805
r
#!/usr/bin/env Rscript " Analysis of 10x Genomics Chromium single cell RNA-seq data using Seurat (version 3.0) starting with Cell Ranger output. Basic workflow steps: 1 - create - import counts matrix, perform initial QC, and calculate various variance metrics (slowest step) 2 - cluster - perform clustering based on number of PCs 3 - identify - identify clusters based on specified clustering/resolution (higher resolution for more clusters) Additional optional steps: combine - merge multiple samples/libraries (no batch correction) integrate - perform integration (batch correction) across multiple sample batches de - differential expression between samples/libraries within clusters Usage: scrna-10x-seurat-3.R create <analysis_dir> <sample_name> <sample_dir> [--min_genes=<n> --max_genes=<n> --mt=<n>] scrna-10x-seurat-3.R cluster <analysis_dir> <num_dim> scrna-10x-seurat-3.R identify <analysis_dir> <resolution> scrna-10x-seurat-3.R combine <analysis_dir> <sample_analysis_dir>... scrna-10x-seurat-3.R integrate <analysis_dir> <num_dim> <batch_analysis_dir>... scrna-10x-seurat-3.R de <analysis_dir> <resolution> scrna-10x-seurat-3.R --help Options: --min_genes=<n> cutoff for minimum number of genes per cell (2nd percentile if not specified) --max_genes=<n> cutoff for maximum number of genes per cell (98th percentile if not specified) --mt=<n> cutoff for mitochondrial genes percentage per cell [default: 10] -h, --help show this screen " -> doc # ========== functions ========== # load dependencies load_libraries = function() { message("\n\n ========== load libraries ========== \n\n") suppressPackageStartupMessages({ library(magrittr) library(glue) library(Seurat) library(future) library(Matrix) library(tidyverse) library(data.table) library(cowplot) library(scales) library(pheatmap) library(RColorBrewer) library(ggsci) library(eulerr) library(UpSetR) }) } # create a single Seurat object from multiple 10x Cell Ranger outputs # takes vector of one or more sample_names and sample_dirs # can work with multiple samples, but the appropriate way is to use "combine" with objects that are pre-filtered load_sample_counts_matrix = function(sample_names, sample_dirs) { message("\n\n ========== import cell ranger counts matrix ========== \n\n") merged_counts_matrix = NULL for (i in 1:length(sample_names)) { sample_name = sample_names[i] sample_dir = sample_dirs[i] message("loading counts matrix for sample: ", sample_name) # check if sample dir is valid if (!dir.exists(sample_dir)) stop(glue("dir {sample_dir} does not exist")) # determine counts matrix directory (HDF5 is not the preferred option) # "filtered_gene_bc_matrices" for single library # "filtered_gene_bc_matrices_mex" for aggregated # Cell Ranger 3.0: "genes" has been replaced by "features" to account for feature barcoding # Cell Ranger 3.0: the matrix and barcode files are now gzipped data_dir = glue("{sample_dir}/outs") if (!dir.exists(data_dir)) stop(glue("dir {sample_dir} does not contain outs directory")) data_dir = list.files(path = data_dir, pattern = "matrix.mtx", full.names = TRUE, recursive = TRUE) data_dir = str_subset(data_dir, "filtered_.*_bc_matri")[1] data_dir = dirname(data_dir) if (!dir.exists(data_dir)) stop(glue("dir {sample_dir} does not contain matrix.mtx")) message("loading counts matrix dir: ", data_dir) counts_matrix = Read10X(data_dir) message(glue("library {sample_name} cells: {ncol(counts_matrix)}")) message(glue("library {sample_name} genes: {nrow(counts_matrix)}")) message(" ") # log to file write(glue("library {sample_name} cells: {ncol(counts_matrix)}"), file = "create.log", append = TRUE) write(glue("library {sample_name} genes: {nrow(counts_matrix)}"), file = "create.log", append = TRUE) # clean up counts matrix to make it more readable counts_matrix = counts_matrix[sort(rownames(counts_matrix)), ] colnames(counts_matrix) = str_c(sample_name, ":", colnames(counts_matrix)) # combine current matrix with previous if (i == 1) { # skip if there is no previous matrix merged_counts_matrix = counts_matrix } else { # check if genes are the same for current and previous matrices if (!identical(rownames(merged_counts_matrix), rownames(counts_matrix))) { # generate a warning, since this is probably a mistake warning("counts matrix genes are not the same for different libraries") Sys.sleep(1) # get common genes common_genes = intersect(rownames(merged_counts_matrix), rownames(counts_matrix)) common_genes = sort(common_genes) message("num genes for previous libraries: ", length(rownames(merged_counts_matrix))) message("num genes for current library: ", length(rownames(counts_matrix))) message("num genes in common: ", length(common_genes)) # exit if the number of overlapping genes is too few if (length(common_genes) < (length(rownames(counts_matrix)) * 0.9)) stop("libraries have too few genes in common") # subset current and previous matrix to overlapping genes merged_counts_matrix = merged_counts_matrix[common_genes, ] counts_matrix = counts_matrix[common_genes, ] } # combine current matrix with previous merged_counts_matrix = cbind(merged_counts_matrix, counts_matrix) Sys.sleep(1) } } # create a Seurat object s_obj = create_seurat_obj(counts_matrix = merged_counts_matrix) return(s_obj) } # convert a sparse matrix of counts to a Seurat object and generate some QC plots create_seurat_obj = function(counts_matrix, proj_name = NULL, sample_dir = NULL) { message("\n\n ========== save raw counts matrix ========== \n\n") # log to file write(glue("input cells: {ncol(counts_matrix)}"), file = "create.log", append = TRUE) write(glue("input genes: {nrow(counts_matrix)}"), file = "create.log", append = TRUE) # remove cells with few genes (there is an additional filter in CreateSeuratObject) counts_matrix = counts_matrix[, Matrix::colSums(counts_matrix) >= 250] # remove genes without counts (there is an additional filter in CreateSeuratObject) counts_matrix = counts_matrix[Matrix::rowSums(counts_matrix) >= 5, ] # log to file write(glue("detectable cells: {ncol(counts_matrix)}"), file = "create.log", append = TRUE) write(glue("detectable genes: {nrow(counts_matrix)}"), file = "create.log", append = TRUE) # save counts matrix as a standard text file and gzip # counts_matrix_filename = "counts.raw.txt" # write.table(as.matrix(counts_matrix), file = counts_matrix_filename, quote = FALSE, sep = "\t", col.names = NA) # system(paste0("gzip ", counts_matrix_filename)) # save counts matrix as a csv file (to be consistent with the rest of the tables) raw_data = counts_matrix %>% as.matrix() %>% as.data.frame() %>% rownames_to_column("gene") %>% arrange(gene) write_csv(raw_data, path = "counts.raw.csv.gz") rm(raw_data) message("\n\n ========== create seurat object ========== \n\n") if (is.null(proj_name)) { # if name is not set, then it's a manually merged counts matrix s_obj = CreateSeuratObject(counts = counts_matrix, min.cells = 5, min.features = 250, project = "proj", names.field = 1, names.delim = ":") rm(counts_matrix) } else if (proj_name == "aggregated") { # multiple libraries combined using Cell Ranger (cellranger aggr) # setup taking into consideration aggregated names delimiter s_obj = CreateSeuratObject(counts = counts_matrix, min.cells = 5, min.features = 250, project = proj_name, names.field = 2, names.delim = "-") # import cellranger aggr sample sheet sample_sheet_csv = paste0(sample_dir, "/outs/aggregation_csv.csv") sample_sheet = read.csv(sample_sheet_csv, stringsAsFactors = FALSE) message("samples: ", paste(sample_sheet[, 1], collapse=", ")) # change s_obj@meta.data$orig.ident sample identities from numbers to names s_obj[["orig.ident"]][, 1] = factor(sample_sheet[s_obj[["orig.ident"]][, 1], 1]) # set s_obj@ident to the new s_obj@meta.data$orig.ident s_obj = set_identity(seurat_obj = s_obj, group_var = "orig.ident") } else { stop("project name set to unknown value") } message(glue("imported cells: {ncol(s_obj)}")) message(glue("imported genes: {nrow(s_obj)}")) message(" ") # log to file write(glue("imported cells: {ncol(s_obj)}"), file = "create.log", append = TRUE) write(glue("imported genes: {nrow(s_obj)}"), file = "create.log", append = TRUE) # rename nCount_RNA and nFeature_RNA slots to make them more clear s_obj$num_UMIs = s_obj$nCount_RNA s_obj$num_genes = s_obj$nFeature_RNA # nFeature_RNA and nCount_RNA are automatically calculated for every object by Seurat # calculate the percentage of mitochondrial genes here and store it in percent.mito using the AddMetaData mt_genes = grep("^MT-", rownames(GetAssayData(s_obj)), ignore.case = TRUE, value = TRUE) percent_mt = Matrix::colSums(GetAssayData(s_obj)[mt_genes, ]) / Matrix::colSums(GetAssayData(s_obj)) percent_mt = round(percent_mt * 100, digits = 3) # add columns to object@meta.data, and is a great place to stash QC stats s_obj = AddMetaData(s_obj, metadata = percent_mt, col.name = "pct_mito") message("\n\n ========== nFeature_RNA/nCount_RNA/percent_mito plots ========== \n\n") # create a named color scheme to ensure names and colors are in the proper order sample_names = s_obj$orig.ident %>% as.character() %>% sort() %>% unique() colors_samples_named = colors_samples[1:length(sample_names)] names(colors_samples_named) = sample_names vln_theme = theme( axis.title.x = element_blank(), axis.title.y = element_blank(), axis.text.x = element_text(angle = 90, vjust = 0.5, hjust = 1), legend.position = "none" ) suppressMessages({ dist_unfilt_nft_plot = VlnPlot( s_obj, features = "num_genes", group.by = "orig.ident", pt.size = 0.1, sort = TRUE, combine = TRUE, cols = colors_samples_named ) + scale_y_continuous(labels = comma) + vln_theme dist_unfilt_nct_plot = VlnPlot( s_obj, features = "num_UMIs", group.by = "orig.ident", pt.size = 0.1, sort = TRUE, combine = TRUE, cols = colors_samples_named ) + scale_y_continuous(labels = comma) + vln_theme dist_unfilt_pmt_plot = VlnPlot( s_obj, features = "pct_mito", group.by = "orig.ident", pt.size = 0.1, sort = TRUE, combine = TRUE, cols = colors_samples_named ) + scale_y_continuous(labels = comma) + vln_theme dist_unfilt_plot = plot_grid(dist_unfilt_nft_plot, dist_unfilt_nct_plot, dist_unfilt_pmt_plot, ncol = 3) ggsave("qc.distribution.unfiltered.png", plot = dist_unfilt_plot, width = 10, height = 6, units = "in") }) Sys.sleep(1) cor_ncr_nfr_plot = FeatureScatter( s_obj, feature1 = "num_UMIs", feature2 = "num_genes", group.by = "orig.ident", cols = colors_samples ) + theme(aspect.ratio = 1) cor_ncr_pmt_plot = FeatureScatter( s_obj, feature1 = "num_UMIs", feature2 = "pct_mito", group.by = "orig.ident", cols = colors_samples ) + theme(aspect.ratio = 1) cor_nfr_pmt_plot = FeatureScatter( s_obj, feature1 = "num_genes", feature2 = "pct_mito", group.by = "orig.ident", cols = colors_samples ) + theme(aspect.ratio = 1) cor_unfilt_plot = plot_grid(cor_ncr_nfr_plot, cor_ncr_pmt_plot, cor_nfr_pmt_plot, ncol = 3) ggsave("qc.correlations.unfiltered.png", plot = cor_unfilt_plot, width = 18, height = 5, units = "in") Sys.sleep(1) # check distribution of gene counts and mitochondrial percentage low_quantiles = c(0.05, 0.02, 0.01, 0.001) high_quantiles = c(0.95, 0.98, 0.99, 0.999) message("num genes low percentiles:") s_obj$num_genes %>% quantile(low_quantiles) %>% round(1) %>% print() message(" ") message("num genes high percentiles:") s_obj$num_genes %>% quantile(high_quantiles) %>% round(1) %>% print() message(" ") message("pct mito high percentiles:") s_obj$pct_mito %>% quantile(high_quantiles) %>% round(1) %>% print() message(" ") # save unfiltered cell metadata s_obj@meta.data %>% rownames_to_column("cell") %>% as_tibble() %>% mutate(sample_name = orig.ident) %>% write_excel_csv(path = "metadata.unfiltered.csv") return(s_obj) } # filter data by number of genes and mitochondrial percentage filter_data = function(seurat_obj, min_genes = NULL, max_genes = NULL, max_mt = 10) { s_obj = seurat_obj message("\n\n ========== filter data matrix ========== \n\n") # log the unfiltered gene numbers to file write(glue("unfiltered min genes: {min(s_obj$num_genes)}"), file = "create.log", append = TRUE) write(glue("unfiltered max genes: {max(s_obj$num_genes)}"), file = "create.log", append = TRUE) write(glue("unfiltered mean num genes: {round(mean(s_obj$num_genes), 3)}"), file = "create.log", append = TRUE) write(glue("unfiltered median num genes: {median(s_obj$num_genes)}"), file = "create.log", append = TRUE) # convert arguments to integers (command line arguments end up as characters) min_genes = as.numeric(min_genes) max_genes = as.numeric(max_genes) max_mt = as.numeric(max_mt) # default cutoffs (gene numbers rounded to nearest 10) # as.numeric() converts NULLs to 0 length numerics, so can't use is.null() if (!length(min_genes)) min_genes = s_obj$num_genes %>% quantile(0.02, names = FALSE) %>% round(-1) if (!length(max_genes)) max_genes = s_obj$num_genes %>% quantile(0.98, names = FALSE) %>% round(-1) if (!length(max_mt)) max_mt = 10 message(glue("min genes cutoff: {min_genes}")) message(glue("max genes cutoff: {max_genes}")) message(glue("max mitochondrial percentage cutoff: {max_mt}")) message(" ") # log the cutoffs to file write(glue("min genes cutoff: {min_genes}"), file = "create.log", append = TRUE) write(glue("max genes cutoff: {max_genes}"), file = "create.log", append = TRUE) write(glue("max mitochondrial percentage cutoff: {max_mt}"), file = "create.log", append = TRUE) message(glue("imported cells: {ncol(s_obj)}")) message(glue("imported genes: {nrow(s_obj)}")) # filter cells_subset = seurat_obj@meta.data %>% rownames_to_column("cell") %>% filter(nFeature_RNA > min_genes & nFeature_RNA < max_genes & pct_mito < max_mt) %>% pull(cell) s_obj = subset(s_obj, cells = cells_subset) message("filtered cells: ", ncol(s_obj)) message("filtered genes: ", nrow(s_obj)) # create a named color scheme to ensure names and colors are in the proper order sample_names = s_obj$orig.ident %>% as.character() %>% sort() %>% unique() colors_samples_named = colors_samples[1:length(sample_names)] names(colors_samples_named) = sample_names vln_theme = theme( axis.title.x = element_blank(), axis.title.y = element_blank(), axis.text.x = element_text(angle = 90, vjust = 0.5, hjust = 1), legend.position = "none" ) suppressMessages({ dist_filt_nft_plot = VlnPlot( s_obj, features = "num_genes", group.by = "orig.ident", pt.size = 0.1, sort = TRUE, combine = TRUE, cols = colors_samples_named ) + scale_y_continuous(labels = comma) + vln_theme dist_filt_nct_plot = VlnPlot( s_obj, features = "num_UMIs", group.by = "orig.ident", pt.size = 0.1, sort = TRUE, combine = TRUE, cols = colors_samples_named ) + scale_y_continuous(labels = comma) + vln_theme dist_filt_pmt_plot = VlnPlot( s_obj, features = "pct_mito", group.by = "orig.ident", pt.size = 0.1, sort = TRUE, combine = TRUE, cols = colors_samples_named ) + scale_y_continuous(labels = comma) + vln_theme dist_filt_plot = plot_grid(dist_filt_nft_plot, dist_filt_nct_plot, dist_filt_pmt_plot, ncol = 3) ggsave("qc.distribution.filtered.png", plot = dist_filt_plot, width = 10, height = 6, units = "in") }) Sys.sleep(1) # after removing unwanted cells from the dataset, normalize the data # LogNormalize: # - normalizes the gene expression measurements for each cell by the total expression # - multiplies this by a scale factor (10,000 by default) # - log-transforms the result s_obj = NormalizeData(s_obj, normalization.method = "LogNormalize", scale.factor = 10000, verbose = FALSE) # save counts matrix as a basic gzipped text file # object@data stores normalized and log-transformed single cell expression # used for visualizations, such as violin and feature plots, most diff exp tests, finding high-variance genes counts_norm = GetAssayData(s_obj) %>% as.matrix() %>% round(3) counts_norm = counts_norm %>% as.data.frame() %>% rownames_to_column("gene") %>% arrange(gene) write_csv(counts_norm, path = "counts.normalized.csv.gz") # log to file write(glue("filtered cells: {ncol(s_obj)}"), file = "create.log", append = TRUE) write(glue("filtered genes: {nrow(s_obj)}"), file = "create.log", append = TRUE) write(glue("filtered mean num genes: {round(mean(s_obj$num_genes), 3)}"), file = "create.log", append = TRUE) write(glue("filtered median num genes: {median(s_obj$num_genes)}"), file = "create.log", append = TRUE) return(s_obj) } # merge multiple Seurat objects combine_seurat_obj = function(original_wd, sample_analysis_dirs) { if (length(sample_analysis_dirs) < 2) stop("must have at least 2 samples to merge") message("\n\n ========== combine samples ========== \n\n") seurat_obj_list = list() for (i in 1:length(sample_analysis_dirs)) { sample_analysis_dir = sample_analysis_dirs[i] sample_analysis_dir = glue("{original_wd}/{sample_analysis_dir}") sample_seurat_rds = glue("{sample_analysis_dir}/seurat_obj.rds") # check if analysis dir is valid if (!dir.exists(sample_analysis_dir)) stop(glue("dir {sample_analysis_dir} does not exist")) # check if seurat object exists if (!file.exists(sample_seurat_rds)) stop(glue("seurat object rds {sample_seurat_rds} does not exist")) # load seurat object seurat_obj_list[[i]] = readRDS(sample_seurat_rds) # clean up object seurat_obj_list[[i]]@assays$RNA@var.features = vector() seurat_obj_list[[i]]@assays$RNA@scale.data = matrix() seurat_obj_list[[i]]@reductions = list() seurat_obj_list[[i]]@meta.data = seurat_obj_list[[i]]@meta.data %>% select(-starts_with("snn_res")) # print single sample sample stats # sample_name = seurat_obj_list[[i]]@meta.data[1, "orig.ident"] %>% as.character() sample_name = seurat_obj_list[[i]]$orig.ident[1] %>% as.character() message(glue("sample {sample_name} dir: {basename(sample_analysis_dir)}")) write(glue("sample {sample_name} dir: {basename(sample_analysis_dir)}"), file = "create.log", append = TRUE) message(glue("sample {sample_name} cells: {ncol(seurat_obj_list[[i]])}")) write(glue("sample {sample_name} cells: {ncol(seurat_obj_list[[i]])}"), file = "create.log", append = TRUE) message(glue("sample {sample_name} genes: {nrow(seurat_obj_list[[i]])}")) write(glue("sample {sample_name} genes: {nrow(seurat_obj_list[[i]])}"), file = "create.log", append = TRUE) message(" ") } # merge merged_obj = merge(seurat_obj_list[[1]], seurat_obj_list[2:length(seurat_obj_list)]) rm(seurat_obj_list) # print combined sample stats message(glue("combined unfiltered cells: {ncol(merged_obj)}")) write(glue("combined unfiltered cells: {ncol(merged_obj)}"), file = "create.log", append = TRUE) message(glue("combined unfiltered genes: {nrow(merged_obj)}")) write(glue("combined unfiltered genes: {nrow(merged_obj)}"), file = "create.log", append = TRUE) # filter poorly expressed genes (detected in less than 10 cells) filtered_genes = Matrix::rowSums(GetAssayData(merged_obj, assay = "RNA", slot = "counts") > 0) min_cells = 10 if (ncol(merged_obj) > 100000) { min_cells = 50 } filtered_genes = filtered_genes[filtered_genes >= min_cells] %>% names() %>% sort() merged_obj = subset(merged_obj, features = filtered_genes) # print combined sample stats message(glue("combined cells: {ncol(merged_obj)}")) write(glue("combined cells: {ncol(merged_obj)}"), file = "create.log", append = TRUE) message(glue("combined genes: {nrow(merged_obj)}")) write(glue("combined genes: {nrow(merged_obj)}"), file = "create.log", append = TRUE) # print gene/cell minimum cutoffs min_cells = Matrix::rowSums(GetAssayData(merged_obj, assay = "RNA", slot = "counts") > 0) %>% min() min_genes = Matrix::colSums(GetAssayData(merged_obj, assay = "RNA", slot = "counts") > 0) %>% min() message(glue("min cells per gene: {min_cells}")) write(glue("min cells per gene: {min_cells}"), file = "create.log", append = TRUE) message(glue("min genes per cell: {min_genes}")) write(glue("min genes per cell: {min_genes}"), file = "create.log", append = TRUE) # check that the full counts table is small enough to fit into an R matrix (max around 100k x 21k) num_matrix_elements = GetAssayData(merged_obj, assay = "RNA", slot = "counts") %>% length() if (num_matrix_elements < 2^31) { # save raw counts matrix counts_raw = GetAssayData(merged_obj, assay = "RNA", slot = "counts") %>% as.matrix() counts_raw = counts_raw %>% as.data.frame() %>% rownames_to_column("gene") %>% arrange(gene) # write_csv(counts_raw, path = "counts.raw.csv.gz") fwrite(counts_raw, file = "counts.raw.csv", sep = ",") R.utils::gzip("counts.raw.csv") rm(counts_raw) # save counts matrix as a basic gzipped text file counts_norm = GetAssayData(merged_obj, assay = "RNA") %>% as.matrix() %>% round(3) counts_norm = counts_norm %>% as.data.frame() %>% rownames_to_column("gene") %>% arrange(gene) # write_csv(counts_norm, path = "counts.normalized.csv.gz") fwrite(counts_norm, file = "counts.normalized.csv", sep = ",") R.utils::gzip("counts.normalized.csv") rm(counts_norm) } # create a named color scheme to ensure names and colors are in the proper order sample_names = merged_obj$orig.ident %>% as.character() %>% sort() %>% unique() colors_samples_named = colors_samples[1:length(sample_names)] names(colors_samples_named) = sample_names vln_theme = theme( axis.title.x = element_blank(), axis.title.y = element_blank(), axis.text.x = element_text(angle = 90, vjust = 0.5, hjust = 1), legend.position = "none" ) suppressMessages({ dist_nft_plot = VlnPlot( merged_obj, features = "num_genes", group.by = "orig.ident", pt.size = 0.1, sort = TRUE, combine = TRUE, cols = colors_samples_named ) + scale_y_continuous(labels = comma) + vln_theme dist_nct_plot = VlnPlot( merged_obj, features = "num_UMIs", group.by = "orig.ident", pt.size = 0.1, sort = TRUE, combine = TRUE, cols = colors_samples_named ) + scale_y_continuous(labels = comma) + vln_theme dist_pmt_plot = VlnPlot( merged_obj, features = "pct_mito", group.by = "orig.ident", pt.size = 0.1, sort = TRUE, combine = TRUE, cols = colors_samples_named ) + scale_y_continuous(labels = comma) + vln_theme dist_plot = plot_grid(dist_nft_plot, dist_nct_plot, dist_pmt_plot, ncol = 3) ggsave("qc.distribution.png", plot = dist_plot, width = 20, height = 6, units = "in") }) Sys.sleep(1) return(merged_obj) } # integrate multiple Seurat objects integrate_seurat_obj = function(original_wd, sample_analysis_dirs, num_dim) { # check if the inputs seems reasonable if (length(sample_analysis_dirs) < 2) stop("must have at least 2 samples to merge") num_dim = as.integer(num_dim) if (num_dim < 5) stop("too few dims: ", num_dim) if (num_dim > 50) stop("too many dims: ", num_dim) message("\n\n ========== integrate samples ========== \n\n") seurat_obj_list = list() var_genes_list = list() exp_genes = c() for (i in 1:length(sample_analysis_dirs)) { sample_analysis_dir = sample_analysis_dirs[i] sample_analysis_dir = glue("{original_wd}/{sample_analysis_dir}") sample_seurat_rds = glue("{sample_analysis_dir}/seurat_obj.rds") # check if analysis dir is valid if (!dir.exists(sample_analysis_dir)) stop(glue("dir {sample_analysis_dir} does not exist")) # check if seurat object exists if (!file.exists(sample_seurat_rds)) stop(glue("seurat object rds {sample_seurat_rds} does not exist")) # load seurat object seurat_obj_list[[i]] = readRDS(sample_seurat_rds) # sample_name = seurat_obj_list[[i]]@meta.data[1, "orig.ident"] %>% as.character() sample_name = seurat_obj_list[[i]]$orig.ident[1] %>% as.character() # clean up object seurat_obj_list[[i]]@assays$RNA@scale.data = matrix() seurat_obj_list[[i]]@reductions = list() seurat_obj_list[[i]]@meta.data = seurat_obj_list[[i]]@meta.data %>% select(-starts_with("snn_res")) # save expressed genes keeping only genes present in all the datasets (for genes to integrate in IntegrateData) if (length(exp_genes) > 0) { exp_genes = intersect(exp_genes, rownames(seurat_obj_list[[i]])) %>% sort() } else { exp_genes = rownames(seurat_obj_list[[i]]) } # save variable genes var_genes_list[[sample_name]] = VariableFeatures(seurat_obj_list[[i]]) # print single sample sample stats message(glue("sample {sample_name} dir: {basename(sample_analysis_dir)}")) write(glue("sample {sample_name} dir: {basename(sample_analysis_dir)}"), file = "create.log", append = TRUE) message(glue("sample {sample_name} cells: {ncol(seurat_obj_list[[i]])}")) write(glue("sample {sample_name} cells: {ncol(seurat_obj_list[[i]])}"), file = "create.log", append = TRUE) message(glue("sample {sample_name} genes: {nrow(seurat_obj_list[[i]])}")) write(glue("sample {sample_name} genes: {nrow(seurat_obj_list[[i]])}"), file = "create.log", append = TRUE) message(" ") } # euler plot of variable gene overlaps (becomes unreadable and can take days for many overlaps) if (length(var_genes_list) < 8) { colors_euler = colors_samples[1:length(var_genes_list)] euler_fit = euler(var_genes_list, shape = "ellipse") euler_plot = plot(euler_fit, fills = list(fill = colors_euler, alpha = 0.7), edges = list(col = colors_euler)) png("variance.vargenes.euler.png", res = 200, width = 5, height = 5, units = "in") print(euler_plot) dev.off() } # upset plot of variable gene overlaps png("variance.vargenes.upset.png", res = 200, width = 8, height = 5, units = "in") upset(fromList(var_genes_list), nsets = 50, nintersects = 15, order.by = "freq", mb.ratio = c(0.5, 0.5)) dev.off() message("\n\n ========== Seurat::FindIntegrationAnchors() ========== \n\n") # find the integration anchors anchors = FindIntegrationAnchors(object.list = seurat_obj_list, anchor.features = 2000, dims = 1:num_dim) rm(seurat_obj_list) message("\n\n ========== Seurat::IntegrateData() ========== \n\n") # integrating all genes may cause issues and may not add any relevant information # integrated_obj = IntegrateData(anchorset = anchors, dims = 1:num_dim, features.to.integrate = exp_genes) integrated_obj = IntegrateData(anchorset = anchors, dims = 1:num_dim) rm(anchors) # after running IntegrateData, the Seurat object will contain a new Assay with the integrated expression matrix # the original (uncorrected values) are still stored in the object in the “RNA” assay # switch to integrated assay DefaultAssay(integrated_obj) = "integrated" # print integrated sample stats message(glue("integrated unfiltered cells: {ncol(integrated_obj)}")) write(glue("integrated unfiltered cells: {ncol(integrated_obj)}"), file = "create.log", append = TRUE) message(glue("integrated unfiltered genes: {nrow(integrated_obj)}")) write(glue("integrated unfiltered genes: {nrow(integrated_obj)}"), file = "create.log", append = TRUE) # filter poorly expressed genes (detected in less than 10 cells) filtered_genes = Matrix::rowSums(GetAssayData(integrated_obj, assay = "RNA", slot = "counts") > 0) min_cells = 10 if (ncol(integrated_obj) > 100000) { min_cells = 50 } filtered_genes = filtered_genes[filtered_genes >= min_cells] %>% names() %>% sort() integrated_obj = subset(integrated_obj, features = filtered_genes) # print integrated sample stats message(glue("integrated cells: {ncol(integrated_obj)}")) write(glue("integrated cells: {ncol(integrated_obj)}"), file = "create.log", append = TRUE) message(glue("integrated genes: {nrow(GetAssayData(integrated_obj, assay = 'RNA'))}")) write(glue("integrated genes: {nrow(GetAssayData(integrated_obj, assay = 'RNA'))}"), file = "create.log", append = TRUE) # print gene/cell minumum cutoffs min_cells = Matrix::rowSums(GetAssayData(integrated_obj, assay = "RNA", slot = "counts") > 0) %>% min() min_genes = Matrix::colSums(GetAssayData(integrated_obj, assay = "RNA", slot = "counts") > 0) %>% min() message(glue("min cells per gene: {min_cells}")) write(glue("min cells per gene: {min_cells}"), file = "create.log", append = TRUE) message(glue("min genes per cell: {min_genes}")) write(glue("min genes per cell: {min_genes}"), file = "create.log", append = TRUE) # check that the full counts table is small enough to fit into an R matrix (max around 100k x 21k) num_matrix_elements = GetAssayData(integrated_obj, assay = "RNA", slot = "counts") %>% length() if (num_matrix_elements < 2^31) { # save raw counts matrix counts_raw = GetAssayData(integrated_obj, assay = "RNA", slot = "counts") %>% as.matrix() counts_raw = counts_raw %>% as.data.frame() %>% rownames_to_column("gene") %>% arrange(gene) # write_csv(counts_raw, path = "counts.raw.csv.gz") fwrite(counts_raw, file = "counts.raw.csv", sep = ",") R.utils::gzip("counts.raw.csv") rm(counts_raw) # save normalized counts matrix counts_norm = GetAssayData(integrated_obj, assay = "RNA") %>% as.matrix() %>% round(3) counts_norm = counts_norm %>% as.data.frame() %>% rownames_to_column("gene") %>% arrange(gene) # write_csv(counts_norm, path = "counts.normalized.csv.gz") fwrite(counts_norm, file = "counts.normalized.csv", sep = ",") R.utils::gzip("counts.normalized.csv") rm(counts_norm) # save integrated counts matrix # counts_int = GetAssayData(integrated_obj, assay = "integrated") %>% as.matrix() %>% round(3) # counts_int = counts_int %>% as.data.frame() %>% rownames_to_column("gene") %>% arrange(gene) # write_csv(counts_int, path = "counts.integrated.csv.gz") # rm(counts_int) } vln_theme = theme( axis.title.x = element_blank(), axis.title.y = element_blank(), axis.text.x = element_text(angle = 90, vjust = 0.5, hjust = 1), legend.position = "none" ) suppressMessages({ dist_nft_plot = VlnPlot( integrated_obj, features = "num_genes", group.by = "orig.ident", pt.size = 0.1, sort = TRUE, combine = TRUE, cols = colors_samples ) + scale_y_continuous(labels = comma) + vln_theme dist_nct_plot = VlnPlot( integrated_obj, features = "num_UMIs", group.by = "orig.ident", pt.size = 0.1, sort = TRUE, combine = TRUE, cols = colors_samples ) + scale_y_continuous(labels = comma) + vln_theme dist_pmt_plot = VlnPlot( integrated_obj, features = "pct_mito", group.by = "orig.ident", pt.size = 0.1, sort = TRUE, combine = TRUE, cols = colors_samples ) + scale_y_continuous(labels = comma) + vln_theme dist_plot = plot_grid(dist_nft_plot, dist_nct_plot, dist_pmt_plot, ncol = 3) ggsave("qc.distribution.png", plot = dist_plot, width = 20, height = 6, units = "in") }) Sys.sleep(1) return(integrated_obj) } # calculate various variance metrics and perform basic analysis # PC selection approaches: # - PCHeatmap - more supervised, exploring PCs to determine relevant sources of heterogeneity # - PCElbowPlot - heuristic that is commonly used and can be calculated instantly # - JackStrawPlot - implements a statistical test based on a random null model, but is time-consuming # jackStraw procedure is very slow, so skip for large projects (>10,000 cells) calculate_variance = function(seurat_obj, jackstraw_max_cells = 10000) { s_obj = seurat_obj message("\n\n ========== Seurat::FindVariableGenes() ========== \n\n") # identify features that are outliers on a 'mean variability plot' # Seurat v3 implements an improved method based on a variance stabilizing transformation ("vst") s_obj = FindVariableFeatures(s_obj, selection.method = "vst", nfeatures = 2000, verbose = FALSE) # export highly variable feature information (mean, variance, variance standardized) hvf_tbl = HVFInfo(s_obj) %>% round(3) %>% rownames_to_column("gene") %>% arrange(-variance.standardized) write_excel_csv(hvf_tbl, path = "variance.csv") # plot variance var_plot = VariableFeaturePlot(s_obj, pt.size = 0.5) var_plot = LabelPoints(var_plot, points = head(hvf_tbl$gene, 30), repel = TRUE, xnudge = 0, ynudge = 0) ggsave("variance.features.png", plot = var_plot, width = 12, height = 5, units = "in") message("\n\n ========== Seurat::ScaleData() ========== \n\n") # regress out unwanted sources of variation # regressing uninteresting sources of variation can improve dimensionality reduction and clustering # could include technical noise, batch effects, biological sources of variation (cell cycle stage) # scaled z-scored residuals of these models are stored in scale.data slot # used for dimensionality reduction and clustering # RegressOut function has been deprecated, and replaced with the vars.to.regress argument in ScaleData # s_obj = ScaleData(s_obj, features = rownames(s_obj), vars.to.regress = c("num_UMIs", "pct_mito"), verbose = FALSE) s_obj = ScaleData(s_obj, vars.to.regress = c("num_UMIs", "pct_mito"), verbose = FALSE) message("\n\n ========== Seurat::PCA() ========== \n\n") # use fewer PCs for small datasets num_pcs = 50 if (ncol(s_obj) < 100) num_pcs = 20 if (ncol(s_obj) < 25) num_pcs = 5 # PCA on the scaled data # PCA calculation stored in object[["pca"]] s_obj = RunPCA(s_obj, assay = "RNA", features = VariableFeatures(s_obj), npcs = num_pcs, verbose = FALSE) # plot the output of PCA analysis (shuffle cells so any one group does not appear overrepresented due to ordering) pca_plot = DimPlot( s_obj, cells = sample(colnames(s_obj)), group.by = "orig.ident", reduction = "pca", pt.size = 0.5, cols = colors_samples ) + theme(aspect.ratio = 1) ggsave("variance.pca.png", plot = pca_plot, width = 8, height = 6, units = "in") message("\n\n ========== Seurat::DimHeatmap() ========== \n\n") # PCHeatmap (former) allows for easy exploration of the primary sources of heterogeneity in a dataset if (num_pcs > 15) { png("variance.pca.heatmap.png", res = 300, width = 10, height = 16, units = "in") DimHeatmap(s_obj, reduction = "pca", dims = 1:15, nfeatures = 20, cells = 250, fast = TRUE) dev.off() } message("\n\n ========== Seurat::PCElbowPlot() ========== \n\n") # a more ad hoc method for determining PCs to use, draw cutoff where there is a clear elbow in the graph elbow_plot = ElbowPlot(s_obj, reduction = "pca", ndims = num_pcs) ggsave("variance.pca.elbow.png", plot = elbow_plot, width = 8, height = 5, units = "in") # resampling test inspired by the jackStraw procedure - very slow, so skip for large projects (>10,000 cells) if (ncol(s_obj) < jackstraw_max_cells) { message("\n\n ========== Seurat::JackStraw() ========== \n\n") # determine statistical significance of PCA scores s_obj = JackStraw(s_obj, assay = "RNA", reduction = "pca", dims = num_pcs, verbose = FALSE) # compute Jackstraw scores significance s_obj = ScoreJackStraw(s_obj, reduction = "pca", dims = 1:num_pcs, do.plot = FALSE) # plot the results of the JackStraw analysis for PCA significance # significant PCs will show a strong enrichment of genes with low p-values (solid curve above the dashed line) jackstraw_plot = JackStrawPlot(s_obj, reduction = "pca", dims = 1:num_pcs) + guides(col = guide_legend(ncol = 2)) ggsave("variance.pca.jackstraw.png", plot = jackstraw_plot, width = 12, height = 6, units = "in") } return(s_obj) } # calculate various variance metrics and perform basic analysis (integrated analysis workflow) # specify neighbors for UMAP (default is 30 in Seurat 2 and 3 pre-release) calculate_variance_integrated = function(seurat_obj, num_dim, num_neighbors = 30) { s_obj = seurat_obj num_dim = as.integer(num_dim) if (num_dim < 5) stop("too few dims: ", num_dim) if (num_dim > 50) stop("too many dims: ", num_dim) message("\n\n ========== Seurat::ScaleData() ========== \n\n") # s_obj = ScaleData(s_obj, features = rownames(s_obj), verbose = FALSE) s_obj = ScaleData(s_obj, verbose = FALSE) message("\n\n ========== Seurat::PCA() ========== \n\n") # PCA on the scaled data s_obj = RunPCA(s_obj, npcs = num_dim, verbose = FALSE) # plot the output of PCA analysis (shuffle cells so any one group does not appear overrepresented due to ordering) pca_plot = DimPlot( s_obj, reduction = "pca", cells = sample(colnames(s_obj)), group.by = "orig.ident", pt.size = 0.5, cols = colors_samples ) + theme(aspect.ratio = 1) ggsave("variance.pca.png", plot = pca_plot, width = 10, height = 6, units = "in") message("\n\n ========== Seurat::RunTSNE() ========== \n\n") # use tSNE as a tool to visualize, not for clustering directly on tSNE components # cells within the graph-based clusters determined above should co-localize on the tSNE plot s_obj = RunTSNE(s_obj, reduction = "pca", dims.use = 1:num_dim) # reduce point size for larger datasets dr_pt_size = get_dr_point_size(s_obj) # tSNE using original sample names (shuffle cells so any one group does not appear overrepresented due to ordering) s_obj = set_identity(seurat_obj = s_obj, group_var = "orig.ident") plot_tsne = DimPlot(s_obj, reduction = "tsne", cells = sample(colnames(s_obj)), pt.size = dr_pt_size, cols = colors_samples) + theme(aspect.ratio = 1) ggsave(glue("dr.tsne.{num_dim}.sample.png"), plot = plot_tsne, width = 10, height = 6, units = "in") Sys.sleep(1) ggsave(glue("dr.tsne.{num_dim}.sample.pdf"), plot = plot_tsne, width = 10, height = 6, units = "in") Sys.sleep(1) message("\n\n ========== Seurat::RunUMAP() ========== \n\n") # runs the Uniform Manifold Approximation and Projection (UMAP) dimensional reduction technique s_obj = RunUMAP(s_obj, reduction = "pca", dims = 1:num_dim, n.neighbors = num_neighbors, verbose = FALSE) # tSNE using original sample names (shuffle cells so any one group does not appear overrepresented due to ordering) s_obj = set_identity(seurat_obj = s_obj, group_var = "orig.ident") plot_umap = DimPlot(s_obj, reduction = "umap", cells = sample(colnames(s_obj)), pt.size = dr_pt_size, cols = colors_samples) + theme(aspect.ratio = 1) ggsave(glue("dr.umap.{num_dim}.sample.png"), plot = plot_umap, width = 10, height = 6, units = "in") Sys.sleep(1) ggsave(glue("dr.umap.{num_dim}.sample.pdf"), plot = plot_umap, width = 10, height = 6, units = "in") Sys.sleep(1) save_metadata(seurat_obj = s_obj) return(s_obj) } # determine point size for tSNE/UMAP plots (smaller for larger datasets) get_dr_point_size = function(seurat_obj) { pt_size = 1.8 if (ncol(seurat_obj) > 1000) pt_size = 1.2 if (ncol(seurat_obj) > 5000) pt_size = 1.0 if (ncol(seurat_obj) > 10000) pt_size = 0.8 if (ncol(seurat_obj) > 25000) pt_size = 0.6 return(pt_size) } # perform graph-based clustering and tSNE # specify neighbors for UMAP and FindNeighbors (default is 30 in Seurat 2 and 3 pre-release) calculate_clusters = function(seurat_obj, num_dim, num_neighbors = 30) { # check if number of dimensions seems reasonable if (num_dim < 5) stop("too few dims: ", num_dim) if (num_dim > 50) stop("too many dims: ", num_dim) s_obj = seurat_obj message("\n\n ========== Seurat::RunTSNE() ========== \n\n") # use tSNE as a tool to visualize, not for clustering directly on tSNE components # cells within the graph-based clusters determined above should co-localize on the tSNE plot s_obj = RunTSNE(s_obj, reduction = "pca", dims.use = 1:num_dim) # reduce point size for larger datasets dr_pt_size = get_dr_point_size(s_obj) # tSNE using original sample names (shuffle cells so any one group does not appear overrepresented due to ordering) s_obj = set_identity(seurat_obj = s_obj, group_var = "orig.ident") plot_tsne = DimPlot(s_obj, reduction = "tsne", cells = sample(colnames(s_obj)), pt.size = dr_pt_size, cols = colors_samples) + theme(aspect.ratio = 1) ggsave(glue("dr.tsne.{num_dim}.sample.png"), plot = plot_tsne, width = 10, height = 6, units = "in") Sys.sleep(1) ggsave(glue("dr.tsne.{num_dim}.sample.pdf"), plot = plot_tsne, width = 10, height = 6, units = "in") Sys.sleep(1) message("\n\n ========== Seurat::RunUMAP() ========== \n\n") # runs the Uniform Manifold Approximation and Projection (UMAP) dimensional reduction technique s_obj = RunUMAP(s_obj, reduction = "pca", dims = 1:num_dim, n.neighbors = num_neighbors, verbose = FALSE) # tSNE using original sample names (shuffle cells so any one group does not appear overrepresented due to ordering) s_obj = set_identity(seurat_obj = s_obj, group_var = "orig.ident") plot_umap = DimPlot(s_obj, reduction = "umap", cells = sample(colnames(s_obj)), pt.size = dr_pt_size, cols = colors_samples) + theme(aspect.ratio = 1) ggsave(glue("dr.umap.{num_dim}.sample.png"), plot = plot_umap, width = 10, height = 6, units = "in") Sys.sleep(1) ggsave(glue("dr.umap.{num_dim}.sample.pdf"), plot = plot_umap, width = 10, height = 6, units = "in") Sys.sleep(1) message("\n\n ========== Seurat::FindNeighbors() ========== \n\n") message("assay: ", DefaultAssay(s_obj)) message("num dims: ", num_dim) # construct a Shared Nearest Neighbor (SNN) Graph for a given dataset s_obj = FindNeighbors( s_obj, dims = 1:num_dim, k.param = num_neighbors, graph.name = "snn", compute.SNN = TRUE, force.recalc = TRUE ) message("\n\n ========== Seurat::FindClusters() ========== \n\n") message("initial metadata fields: ", str_c(colnames(s_obj@meta.data), collapse = ", ")) # resolutions for graph-based clustering # increased resolution values lead to more clusters (recommendation: 0.6-1.2 for 3K cells, 2-4 for 33K cells) res_range = seq(0.1, 2.5, 0.1) if (ncol(s_obj) > 1000) res_range = c(res_range, 3, 4, 5, 6, 7, 8, 9) # algorithm: 1 = original Louvain; 2 = Louvain with multilevel refinement; 3 = SLM # identify clusters of cells by SNN modularity optimization based clustering algorithm s_obj = FindClusters(s_obj, algorithm = 3, resolution = res_range, graph.name = "snn", verbose = FALSE) # remove "seurat_clusters" column that is added automatically (added in v3 late dev version) s_obj@meta.data = s_obj@meta.data %>% select(-seurat_clusters) message("new metadata fields: ", str_c(colnames(s_obj@meta.data), collapse = ", ")) # create a separate sub-directory for cluster resolution plots clusters_dir = "clusters-resolutions" if (!dir.exists(clusters_dir)) dir.create(clusters_dir) # for calculated cluster resolutions: remove redundant (same number of clusters), rename, and plot res_cols = str_subset(colnames(s_obj@meta.data), "snn_res") res_cols = sort(res_cols) res_num_clusters_prev = 1 for (res in res_cols) { # proceed if current resolution has more clusters than previous and less than the color scheme length res_vector = s_obj@meta.data[, res] %>% as.character() res_num_clusters_cur = res_vector %>% n_distinct() if (res_num_clusters_cur > res_num_clusters_prev && res_num_clusters_cur < length(colors_clusters)) { # check if the resolution still has original labels (characters starting with 0) if (min(res_vector) == "0") { # convert to character vector s_obj@meta.data[, res] = as.character(s_obj@meta.data[, res]) # relabel identities so they start with 1 and not 0 s_obj@meta.data[, res] = as.numeric(s_obj@meta.data[, res]) + 1 # pad with 0s to avoid sorting issues s_obj@meta.data[, res] = str_pad(s_obj@meta.data[, res], width = 2, side = "left", pad = "0") # pad with "C" to avoid downstream numeric conversions s_obj@meta.data[, res] = str_c("C", s_obj@meta.data[, res]) # encode as a factor s_obj@meta.data[, res] = factor(s_obj@meta.data[, res]) } # resolution value based on resolution column name res_val = sub("snn_res\\.", "", res) # plot file name res_str = gsub("\\.", "", res) dr_filename = glue("{clusters_dir}/dr.{DefaultAssay(s_obj)}.{num_dim}.{res_str}.clust{res_num_clusters_cur}") s_obj = plot_clusters(seurat_obj = s_obj, resolution = res_val, filename_base = dr_filename) # add blank line to make output easier to read message(" ") } else { # remove resolution if the number of clusters is same as previous s_obj@meta.data = s_obj@meta.data %>% select(-one_of(res)) } # update resolution cluster count for next iteration res_num_clusters_prev = res_num_clusters_cur } message("updated metadata fields: ", str_c(colnames(s_obj@meta.data), collapse = ", ")) save_metadata(seurat_obj = s_obj) return(s_obj) } # compile all cell metadata into a single table save_metadata = function(seurat_obj) { s_obj = seurat_obj metadata_tbl = s_obj@meta.data %>% rownames_to_column("cell") %>% as_tibble() %>% mutate(sample_name = orig.ident) tsne_tbl = s_obj[["tsne"]]@cell.embeddings %>% round(3) %>% as.data.frame() %>% rownames_to_column("cell") umap_tbl = s_obj[["umap"]]@cell.embeddings %>% round(3) %>% as.data.frame() %>% rownames_to_column("cell") cells_metadata = metadata_tbl %>% full_join(tsne_tbl, by = "cell") %>% full_join(umap_tbl, by = "cell") cells_metadata = cells_metadata %>% arrange(cell) write_excel_csv(cells_metadata, path = "metadata.csv") } # plot tSNE with color-coded clusters at specified resolution plot_clusters = function(seurat_obj, resolution, filename_base) { s_obj = seurat_obj # set identities based on specified resolution s_obj = set_identity(seurat_obj = s_obj, group_var = resolution) # print stats num_clusters = Idents(s_obj) %>% as.character() %>% n_distinct() message("resolution: ", resolution) message("num clusters: ", num_clusters) # generate plot if there is a reasonable number of clusters if (num_clusters > 1 && num_clusters < length(colors_clusters)) { # shuffle cells so they appear randomly and one group does not show up on top plot_tsne = DimPlot( s_obj, reduction = "tsne", cells = sample(colnames(s_obj)), pt.size = get_dr_point_size(s_obj), cols = colors_clusters ) + theme(aspect.ratio = 1) ggsave(glue("{filename_base}.tsne.png"), plot = plot_tsne, width = 9, height = 6, units = "in") Sys.sleep(1) ggsave(glue("{filename_base}.tsne.pdf"), plot = plot_tsne, width = 9, height = 6, units = "in") Sys.sleep(1) plot_umap = DimPlot( s_obj, reduction = "umap", cells = sample(colnames(s_obj)), pt.size = get_dr_point_size(s_obj), cols = colors_clusters ) + theme(aspect.ratio = 1) ggsave(glue("{filename_base}.umap.png"), plot = plot_umap, width = 9, height = 6, units = "in") Sys.sleep(1) ggsave(glue("{filename_base}.umap.pdf"), plot = plot_umap, width = 9, height = 6, units = "in") Sys.sleep(1) if (file.exists("Rplots.pdf")) file.remove("Rplots.pdf") } return(s_obj) } # check grouping variable/resolution against existing meta data columns check_group_var = function(seurat_obj, group_var) { s_obj = seurat_obj # check if the grouping variable is one of meta data columns if (!(group_var %in% colnames(s_obj@meta.data))) { # check if grouping variable is the resolution value (X.X instead of res.X.X) res_column = str_c("snn_res.", group_var) if (res_column %in% colnames(s_obj@meta.data)) { group_var = res_column } else { stop("unknown grouping variable: ", group_var) } } return(group_var) } # set identity based on a specified variable/resolution set_identity = function(seurat_obj, group_var) { s_obj = seurat_obj group_var = check_group_var(seurat_obj = s_obj, group_var = group_var) # set identities based on selected grouping variable message("setting grouping variable: ", group_var) Idents(s_obj) = group_var return(s_obj) } # plot a set of genes plot_genes = function(seurat_obj, genes, filename_base) { # color gradient for FeaturePlot-based plots gradient_colors = c("gray85", "red2") # switch to "RNA" assay from potentially "integrated" DefaultAssay(seurat_obj) = "RNA" # tSNE plots color-coded by expression level (should be square to match the original tSNE plots) feat_plot = FeaturePlot( seurat_obj, features = genes, reduction = "tsne", cells = sample(colnames(seurat_obj)), pt.size = 0.5, cols = gradient_colors, ncol = 4 ) ggsave(glue("{filename_base}.tsne.png"), plot = feat_plot, width = 16, height = 10, units = "in") ggsave(glue("{filename_base}.tsne.pdf"), plot = feat_plot, width = 16, height = 10, units = "in") # UMAP plots color-coded by expression level (should be square to match the original tSNE plots) feat_plot = FeaturePlot( seurat_obj, features = genes, reduction = "umap", cells = sample(colnames(seurat_obj)), pt.size = 0.5, cols = gradient_colors, ncol = 4 ) ggsave(glue("{filename_base}.umap.png"), plot = feat_plot, width = 16, height = 10, units = "in") ggsave(glue("{filename_base}.umap.pdf"), plot = feat_plot, width = 16, height = 10, units = "in") # dot plot visualization dot_plot = DotPlot(seurat_obj, features = genes, dot.scale = 12, cols = gradient_colors) ggsave(glue("{filename_base}.dotplot.png"), plot = dot_plot, width = 20, height = 8, units = "in") ggsave(glue("{filename_base}.dotplot.pdf"), plot = dot_plot, width = 20, height = 8, units = "in") # gene violin plots (size.use below 0.2 doesn't seem to make a difference) # skip PDF since every cell has to be plotted and they become too big vln_plot = VlnPlot(seurat_obj, features = genes, pt.size = 0.1, combine = TRUE, cols = colors_clusters, ncol = 4) ggsave(glue("{filename_base}.violin.png"), plot = vln_plot, width = 16, height = 10, units = "in") # expression levels per cluster for bar plots (averaging and output are in non-log space) cluster_avg_exp = AverageExpression(seurat_obj, assay = "RNA", features = genes, verbose = FALSE)[["RNA"]] cluster_avg_exp_long = cluster_avg_exp %>% rownames_to_column("gene") %>% gather(cluster, avg_exp, -gene) # bar plots # create a named color scheme to ensure names and colors are in the proper order clust_names = levels(seurat_obj) color_scheme_named = colors_clusters[1:length(clust_names)] names(color_scheme_named) = clust_names barplot_plot = ggplot(cluster_avg_exp_long, aes(x = cluster, y = avg_exp, fill = cluster)) + geom_col(color = "black") + theme(legend.position = "none") + scale_fill_manual(values = color_scheme_named) + scale_y_continuous(expand = c(0, 0)) + theme_cowplot() + facet_wrap(~ gene, ncol = 4, scales = "free") ggsave(glue("{filename_base}.barplot.png"), plot = barplot_plot, width = 16, height = 10, units = "in") ggsave(glue("{filename_base}.barplot.pdf"), plot = barplot_plot, width = 16, height = 10, units = "in") } # calculate cluster stats (number of cells, average expression, cell-gene matrix) calculate_cluster_stats = function(seurat_obj, label) { message("\n\n ========== calculate cluster stats ========== \n\n") message("cluster names: ", str_c(levels(seurat_obj), collapse = ", ")) # compile relevant cell metadata into a single table seurat_obj$cluster = Idents(seurat_obj) metadata_tbl = seurat_obj@meta.data %>% rownames_to_column("cell") %>% as_tibble() %>% select(cell, num_UMIs, num_genes, pct_mito, sample_name = orig.ident, cluster) tsne_tbl = seurat_obj[["tsne"]]@cell.embeddings %>% round(3) %>% as.data.frame() %>% rownames_to_column("cell") umap_tbl = seurat_obj[["umap"]]@cell.embeddings %>% round(3) %>% as.data.frame() %>% rownames_to_column("cell") cells_metadata = metadata_tbl %>% full_join(tsne_tbl, by = "cell") %>% full_join(umap_tbl, by = "cell") cells_metadata = cells_metadata %>% arrange(cell) write_excel_csv(cells_metadata, path = glue("metadata.{label}.csv")) # get number of cells split by cluster and by sample summary_cluster_sample = cells_metadata %>% select(cluster, sample_name) %>% mutate(num_cells_total = n()) %>% group_by(sample_name) %>% mutate(num_cells_sample = n()) %>% group_by(cluster) %>% mutate(num_cells_cluster = n()) %>% group_by(cluster, sample_name) %>% mutate(num_cells_cluster_sample = n()) %>% ungroup() %>% distinct() %>% mutate( pct_cells_cluster = num_cells_cluster / num_cells_total, pct_cells_cluster_sample = num_cells_cluster_sample / num_cells_sample ) %>% mutate( pct_cells_cluster = round(pct_cells_cluster * 100, 1), pct_cells_cluster_sample = round(pct_cells_cluster_sample * 100, 1) ) %>% arrange(cluster, sample_name) # get number of cells split by cluster (ignore samples) summary_cluster = summary_cluster_sample %>% select(-contains("sample")) %>% distinct() write_excel_csv(summary_cluster, path = glue("summary.{label}.csv")) # gene expression for an "average" cell in each identity class (averaging and output are in non-log space) cluster_avg_exp = AverageExpression(seurat_obj, assay = "RNA", verbose = FALSE)[["RNA"]] cluster_avg_exp = cluster_avg_exp %>% round(3) %>% rownames_to_column("gene") %>% arrange(gene) write_excel_csv(cluster_avg_exp, path = glue("expression.mean.{label}.csv")) Sys.sleep(1) # export results split by sample if multiple samples are present num_samples = cells_metadata %>% pull(sample_name) %>% n_distinct() if (num_samples > 1) { # number of cells split by cluster and by sample write_excel_csv(summary_cluster_sample, path = glue("summary.{label}.per-sample.csv")) # cluster averages split by sample sample_avg_exp = AverageExpression(seurat_obj, assay = "RNA", add.ident = "orig.ident", verbose = FALSE)[["RNA"]] sample_avg_exp = sample_avg_exp %>% round(3) %>% as.data.frame() %>% rownames_to_column("gene") %>% arrange(gene) write_excel_csv(sample_avg_exp, path = glue("expression.mean.{label}.per-sample.csv")) } } # calculate cluster markers (compared to all other cells) and plot top ones # tests: # - roc: ROC test returns the classification power (ranging from 0 - random, to 1 - perfect) # - wilcox: Wilcoxon rank sum test (default in Seurat 2) # - bimod: Likelihood-ratio test for single cell gene expression (McDavid, Bioinformatics, 2013) (default in Seurat 1) # - tobit: Tobit-test for differential gene expression (Trapnell, Nature Biotech, 2014) # - MAST: GLM-framework that treates cellular detection rate as a covariate (Finak, Genome Biology, 2015) # pairwise option compares each cluster to each of the other clusters to yield markers that are both local and global calculate_cluster_markers = function(seurat_obj, label, test, pairwise = FALSE) { message("\n\n ========== calculate cluster markers ========== \n\n") message("cluster set: ", label) message("marker test: ", test) # get cluster names clusters = Idents(seurat_obj) %>% as.character() %>% unique() %>% sort() # use only clusters with more than 10 cells clusters = clusters[table(Idents(seurat_obj)) > 10] if (!pairwise) { # standard cluster markers calculation markers_dir = "markers-global" # capture output to avoid excessive warnings markers_log = capture.output({ all_markers = FindAllMarkers( seurat_obj, assay = "RNA", test.use = test, logfc.threshold = log(1.2), min.pct = 0.2, only.pos = FALSE, min.diff.pct = -Inf, verbose = FALSE ) }, type = "message") # do some light filtering and clean up (ROC test returns slightly different output) if (test == "roc") { all_markers = all_markers %>% select(cluster, gene, logFC = avg_logFC, myAUC, power) %>% filter(power > 0.4) %>% mutate(logFC = round(logFC, 5), myAUC = round(myAUC, 5), power = round(power, 5)) %>% arrange(cluster, -power) top_markers = all_markers %>% filter(logFC > 0) top_markers = top_markers %>% group_by(cluster) %>% top_n(50, power) %>% ungroup() } else { all_markers = all_markers %>% select(cluster, gene, logFC = avg_logFC, p_val, p_val_adj) %>% filter(p_val_adj < 0.001) %>% mutate(logFC = round(logFC, 5)) %>% arrange(cluster, p_val_adj, p_val) top_markers = all_markers %>% filter(logFC > 0) top_markers = top_markers %>% group_by(cluster) %>% top_n(50, logFC) %>% ungroup() } } else { # pairwise (each cluster versus each other cluster) cluster markers calculation markers_dir = "markers-pairwise" # initialize empty results tibble unfiltered_markers = tibble( cluster = character(), cluster2 = character(), gene = character(), logFC = numeric(), p_val = numeric(), p_val_adj = numeric() ) # check each cluster combination for (cluster1 in clusters) { for (cluster2 in setdiff(clusters, cluster1)) { # find differentially expressed genes between two specific clusters # low fold change cutoff to maximize chance of appearing in all comparisons # capture output to avoid excessive warnings markers_log = capture.output({ cur_markers = FindMarkers( seurat_obj, assay = "RNA", ident.1 = cluster1, ident.2 = cluster2, test.use = test, logfc.threshold = log(1.1), min.pct = 0.1, only.pos = TRUE, min.diff.pct = -Inf, verbose = FALSE ) }, type = "message") # clean up markers table (would need to be modified for "roc" test) cur_markers = cur_markers %>% rownames_to_column("gene") %>% mutate(cluster = cluster1) %>% mutate(cluster2 = cluster2) %>% filter(p_val_adj < 0.01) %>% mutate(logFC = round(avg_logFC, 5)) %>% select(one_of(colnames(unfiltered_markers))) # add current cluster combination genes to the table of all markers unfiltered_markers = bind_rows(unfiltered_markers, cur_markers) } } # adjust test name for output test = glue("pairwise.{test}") # sort the markers to make the table more readable unfiltered_markers = unfiltered_markers %>% distinct() %>% add_count(cluster, gene) %>% rename(cluster_gene_n = n) %>% arrange(cluster, gene, cluster2) # filter for genes that are significant compared to all other clusters all_markers = unfiltered_markers %>% filter(cluster_gene_n == (length(clusters) - 1)) %>% select(-cluster_gene_n) # extract the lowest and highest fold changes and p-values all_markers = all_markers %>% group_by(cluster, gene) %>% summarize_at( c("logFC", "p_val", "p_val_adj"), list(min = min, max = max) ) %>% ungroup() %>% arrange(cluster, -logFC_min) top_markers = all_markers %>% group_by(cluster) %>% top_n(50, logFC_min) %>% ungroup() } # create a separate sub-directory for all markers if (!dir.exists(markers_dir)) dir.create(markers_dir) # filename prefix filename_base = glue("{markers_dir}/markers.{label}.{test}") # save unfiltered markers for pairwise comparisons if (pairwise) { unfiltered_markers_csv = glue("{filename_base}.unfiltered.csv") message("unfiltered markers: ", unfiltered_markers_csv) write_excel_csv(unfiltered_markers, path = unfiltered_markers_csv) Sys.sleep(1) } all_markers_csv = glue("{filename_base}.all.csv") message("all markers: ", all_markers_csv) write_excel_csv(all_markers, path = all_markers_csv) Sys.sleep(1) top_markers_csv = glue("{filename_base}.top.csv") message("top markers: ", top_markers_csv) write_excel_csv(top_markers, path = top_markers_csv) Sys.sleep(1) # plot cluster markers heatmap plot_cluster_markers(seurat_obj, markers_tbl = all_markers, num_genes = c(5, 10, 20), filename_base = filename_base) # plot top cluster markers for each cluster for (cluster_name in clusters) { filename_cluster_base = glue("{markers_dir}/markers.{label}-{cluster_name}.{test}") cluster_markers = top_markers %>% filter(cluster == cluster_name) if (nrow(cluster_markers) > 9) { Sys.sleep(1) top_cluster_markers = cluster_markers %>% head(12) %>% pull(gene) plot_genes(seurat_obj, genes = top_cluster_markers, filename_base = filename_cluster_base) } } } # generate cluster markers heatmap plot_cluster_markers = function(seurat_obj, markers_tbl, num_genes, filename_base) { # adjust pairwise clusters to match the standard format if ("logFC_min" %in% colnames(markers_tbl)) { markers_tbl = markers_tbl %>% mutate(logFC = logFC_min) } # keep only the top cluster for each gene so each gene appears once markers_tbl = markers_tbl %>% filter(logFC > 0) markers_tbl = markers_tbl %>% group_by(gene) %>% top_n(1, logFC) %>% slice(1) %>% ungroup() num_clusters = Idents(seurat_obj) %>% as.character() %>% n_distinct() marker_genes = markers_tbl %>% pull(gene) %>% unique() %>% sort() cluster_avg_exp = AverageExpression(seurat_obj, assay = "RNA", features = marker_genes, verbose = FALSE)[["RNA"]] cluster_avg_exp = cluster_avg_exp %>% as.matrix() %>% log1p() cluster_avg_exp = cluster_avg_exp[rowSums(cluster_avg_exp) > 0, ] # heatmap settings hm_colors = colorRampPalette(c("#053061", "#FFFFFF", "#E41A1C"))(51) hm_width = ( num_clusters / 2 ) + 2 for (ng in num_genes) { hm_base = glue("{filename_base}.heatmap.top{ng}") markers_top_tbl = markers_tbl %>% group_by(cluster) %>% top_n(ng, logFC) %>% ungroup() markers_top_tbl = markers_top_tbl %>% arrange(cluster, -logFC) # generate the scaled expression matrix and save the text version hm_mat = cluster_avg_exp[markers_top_tbl$gene, ] hm_mat = hm_mat %>% t() %>% scale() %>% t() hm_mat %>% round(3) %>% as_tibble(rownames = "gene") %>% write_excel_csv(path = glue("{hm_base}.csv")) Sys.sleep(1) # set outliers to 95th percentile to yield a more balanced color scale scale_cutoff = as.numeric(quantile(abs(hm_mat), 0.95)) hm_mat[hm_mat > scale_cutoff] = scale_cutoff hm_mat[hm_mat < -scale_cutoff] = -scale_cutoff # generate the heatmap ph_obj = pheatmap( hm_mat, scale = "none", color = hm_colors, border_color = NA, cluster_rows = FALSE, cluster_cols = FALSE, fontsize = 10, fontsize_row = 8, fontsize_col = 12, show_colnames = TRUE, main = glue("Cluster Markers: Top {ng}") ) png(glue("{hm_base}.png"), width = hm_width, height = 10, units = "in", res = 300) grid::grid.newpage() grid::grid.draw(ph_obj$gtable) dev.off() Sys.sleep(1) pdf(glue("{hm_base}.pdf"), width = hm_width, height = 10) grid::grid.newpage() grid::grid.draw(ph_obj$gtable) dev.off() Sys.sleep(1) } } # calculate differentially expressed genes within each cluster calculate_cluster_de_genes = function(seurat_obj, label, test, group_var = "orig.ident") { message("\n\n ========== calculate cluster DE genes ========== \n\n") # create a separate sub-directory for differential expression results de_dir = glue("diff-expression-{group_var}") if (!dir.exists(de_dir)) dir.create(de_dir) # common settings num_de_genes = 50 # results table de_all_genes_tbl = tibble() # get DE genes for each cluster clusters = levels(seurat_obj) for (clust_name in clusters) { message(glue("calculating DE genes for cluster {clust_name}")) # subset to the specific cluster clust_obj = subset(seurat_obj, idents = clust_name) # revert back to the grouping variable sample/library labels Idents(clust_obj) = group_var message("cluster cells: ", ncol(clust_obj)) message("cluster groups: ", paste(levels(clust_obj), collapse = ", ")) # continue if cluster has multiple groups and more than 10 cells in each group if (n_distinct(Idents(clust_obj)) > 1 && min(table(Idents(clust_obj))) > 10) { # scale data for heatmap clust_obj = ScaleData(clust_obj, assay = "RNA", vars.to.regress = c("num_UMIs", "pct_mito")) # iterate through sample/library combinations (relevant if more than two) group_combinations = combn(levels(clust_obj), m = 2, simplify = TRUE) for (combination_num in 1:ncol(group_combinations)) { # determine combination g1 = group_combinations[1, combination_num] g2 = group_combinations[2, combination_num] comparison_label = glue("{g1}-vs-{g2}") message(glue("comparison: {clust_name} {g1} vs {g2}")) filename_label = glue("{de_dir}/de.{label}-{clust_name}.{comparison_label}.{test}") # find differentially expressed genes (default Wilcoxon rank sum test) de_genes = FindMarkers(clust_obj, ident.1 = g1, ident.2 = g2, assay = "RNA", test.use = test, logfc.threshold = log(1), min.pct = 0.1, only.pos = FALSE, print.bar = FALSE) # perform some light filtering and clean up de_genes = de_genes %>% rownames_to_column("gene") %>% mutate(cluster = clust_name, group1 = g1, group2 = g2, de_test = test) %>% select(cluster, group1, group2, de_test, gene, logFC = avg_logFC, p_val, p_val_adj) %>% mutate( logFC = round(logFC, 3), p_val = if_else(p_val < 0.00001, p_val, round(p_val, 5)), p_val_adj = if_else(p_val_adj < 0.00001, p_val_adj, round(p_val_adj, 5)) ) %>% arrange(p_val_adj, p_val) message(glue("{comparison_label} num genes: {nrow(de_genes)}")) # save stats table write_excel_csv(de_genes, path = glue("{filename_label}.csv")) # add cluster genes to all genes de_all_genes_tbl = bind_rows(de_all_genes_tbl, de_genes) # heatmap of top genes if (nrow(de_genes) > 5) { top_de_genes = de_genes %>% top_n(num_de_genes, -p_val_adj) %>% arrange(logFC) %>% pull(gene) plot_hm = DoHeatmap(clust_obj, features = top_de_genes, assay = "RNA", slot = "scale.data") heatmap_prefix = glue("{filename_label}.heatmap.top{num_de_genes}") ggsave(glue("{heatmap_prefix}.png"), plot = plot_hm, width = 15, height = 10, units = "in") Sys.sleep(1) ggsave(glue("{heatmap_prefix}.pdf"), plot = plot_hm, width = 15, height = 10, units = "in") Sys.sleep(1) } } } else { message("skip cluster: ", clust_name) } message(" ") } # save stats table write_excel_csv(de_all_genes_tbl, path = glue("{de_dir}/de.{label}.{group_var}.{test}.all.csv")) de_all_genes_tbl = de_all_genes_tbl %>% filter(p_val_adj < 0.01) write_excel_csv(de_all_genes_tbl, path = glue("{de_dir}/de.{label}.{group_var}.{test}.sig.csv")) } # ========== main ========== # output width options(width = 120) # print warnings as they occur options(warn = 1) # default type for the bitmap devices such as png (should default to "cairo") options(bitmapType = "cairo") # retrieve the command-line arguments suppressPackageStartupMessages(library(docopt)) opts = docopt(doc) # show docopt options # print(opts) # dependencies load_libraries() # set number of cores for parallel package (will use all available cores by default) options(mc.cores = 4) # evaluate Seurat R expressions asynchronously when possible (such as ScaleData) using future package plan("multiprocess", workers = 4) # increase the limit of the data to be shuttled between the processes from default 500MB to 50GB options(future.globals.maxSize = 50e9) # global settings colors_samples = c(brewer.pal(5, "Set1"), brewer.pal(8, "Dark2"), pal_igv("default")(51)) colors_clusters = c(pal_d3("category10")(10), pal_d3("category20b")(20), pal_igv()(51), pal_igv(alpha = 0.6)(51)) # analysis info analysis_step = "unknown" out_dir = opts$analysis_dir # create analysis directory if starting new analysis or exit if analysis already exists if (opts$create || opts$combine || opts$integrate) { if (opts$create) analysis_step = "create" if (opts$combine) analysis_step = "combine" if (opts$integrate) analysis_step = "integrate" message(glue("\n\n ========== started analysis step {analysis_step} for {out_dir} ========== \n\n")) if (dir.exists(out_dir)) { stop(glue("output analysis dir {out_dir} already exists")) } else { dir.create(out_dir) } # original working dir (before moving to analysis dir) original_wd = getwd() } # set analysis directory as working directory if (dir.exists(out_dir)) { setwd(out_dir) } else { stop(glue("output analysis dir {out_dir} does not exist")) } # check which command was used if (opts$create) { # log to file write(glue("analysis: {out_dir}"), file = "create.log", append = TRUE) write(glue("seurat version: {packageVersion('Seurat')}"), file = "create.log", append = TRUE) # create new seurat object based on input sample names and sample directories # can work with multiple samples, but the appropriate way is to use "combine" with objects that are pre-filtered seurat_obj = load_sample_counts_matrix(opts$sample_name, opts$sample_dir) # filter by number of genes and mitochondrial genes percentage (optional parameters) seurat_obj = filter_data(seurat_obj, min_genes = opts$min_genes, max_genes = opts$max_genes, max_mt = opts$mt) # calculate various variance metrics seurat_obj = calculate_variance(seurat_obj) saveRDS(seurat_obj, file = "seurat_obj.rds") } else if (opts$combine) { # merge multiple samples/libraries based on previous analysis directories seurat_obj = combine_seurat_obj(original_wd = original_wd, sample_analysis_dirs = opts$sample_analysis_dir) saveRDS(seurat_obj, file = "seurat_obj.rds") # calculate various variance metrics seurat_obj = calculate_variance(seurat_obj) saveRDS(seurat_obj, file = "seurat_obj.rds") } else if (opts$integrate) { # run integration seurat_obj = integrate_seurat_obj(original_wd, sample_analysis_dirs = opts$batch_analysis_dir, num_dim = opts$num_dim) seurat_obj = calculate_variance_integrated(seurat_obj, num_dim = opts$num_dim) saveRDS(seurat_obj, file = "seurat_obj.rds") } else { # all commands besides "create" and "cca" start with an existing seurat object if (file.exists("seurat_obj.rds")) { message("loading seurat_obj") seurat_obj = readRDS("seurat_obj.rds") } else { stop("seurat obj does not already exist (run 'create' step first)") } if (opts$cluster) { analysis_step = "cluster" message(glue("\n\n ========== started analysis step {analysis_step} for {out_dir} ========== \n\n")) # determine clusters seurat_obj = calculate_clusters(seurat_obj, num_dim = as.integer(opts$num_dim)) saveRDS(seurat_obj, file = "seurat_obj.rds") } if (opts$identify || opts$de) { # set resolution in the seurat object group_var = check_group_var(seurat_obj = seurat_obj, group_var = opts$resolution) seurat_obj = set_identity(seurat_obj = seurat_obj, group_var = group_var) # use a grouping-specific sub-directory for all output grouping_label = gsub("\\.", "", group_var) num_clusters = Idents(seurat_obj) %>% as.character() %>% n_distinct() clust_label = glue("clust{num_clusters}") res_dir = glue("clusters-{grouping_label}-{clust_label}") if (!dir.exists(res_dir)) dir.create(res_dir) setwd(res_dir) if (opts$identify) { analysis_step = "identify" message(glue("\n\n ========== started analysis step {analysis_step} for {out_dir} ========== \n\n")) # create tSNE plot (should already exist in the main directory) dr_filename = glue("dr.{grouping_label}.{clust_label}") seurat_obj = plot_clusters(seurat_obj, resolution = opts$resolution, filename_base = dr_filename) # cluster stat tables (number of cells and average expression) calculate_cluster_stats(seurat_obj, label = clust_label) # calculate and plot standard cluster markers calculate_cluster_markers(seurat_obj, label = clust_label, test = "roc") calculate_cluster_markers(seurat_obj, label = clust_label, test = "wilcox") calculate_cluster_markers(seurat_obj, label = clust_label, test = "MAST") # calculate and plot pairwise cluster markers (very slow, so skip for high number of clusters) num_clusters = Idents(seurat_obj) %>% as.character() %>% n_distinct() if (num_clusters < 20) { calculate_cluster_markers(seurat_obj, label = clust_label, test = "wilcox", pairwise = TRUE) calculate_cluster_markers(seurat_obj, label = clust_label, test = "MAST", pairwise = TRUE) } } # differential expression if (opts$de) { analysis_step = "diff" message(glue("\n\n ========== started analysis step {analysis_step} for {out_dir} ========== \n\n")) calculate_cluster_de_genes(seurat_obj, label = clust_label, test = "wilcox") calculate_cluster_de_genes(seurat_obj, label = clust_label, test = "MAST") } } } message(glue("\n\n ========== finished analysis step {analysis_step} for {out_dir} ========== \n\n")) # delete Rplots.pdf if (file.exists("Rplots.pdf")) file.remove("Rplots.pdf") # end
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/multi_chan.R \name{k_mcnn} \alias{k_mcnn} \title{k_mcnn} \usage{ k_mcnn(in_dim = 10000, in_length = 100, embed_dim = 128, sp_drop = 0.2, filter_sizes = c(1, 2, 3, 5), num_filters = 32, out_dim = 1, out_fun = "sigmoid", ...) } \arguments{ \item{in_dim}{Number of total vocabluary/words used} \item{in_length}{Length of the input sequences} \item{embed_dim}{Number of word vectors} \item{sp_drop}{Spatial Dropout after Embedding} \item{filter_sizes}{Filter sizes (windows)} \item{num_filters}{Number of filters per layer} \item{out_dim}{Number of neurons of the output layer} \item{out_fun}{Output activation function} \item{...}{Exit arguments} } \value{ model } \description{ get Keras Multi Channel CNN model } \examples{ Architecture is taken from [Code](https://www.kaggle.com/yekenot/textcnn-2d-convolution) max_features = 100000 maxlen = 200 embed_size = 300 filter_sizes = [1,2,3,5] num_filters = 32 inp = Input(shape=(maxlen, )) x = Embedding(max_features, embed_size, weights=[embedding_matrix])(inp) x = SpatialDropout1D(0.4)(x) x = Reshape((maxlen, embed_size, 1))(x) conv_0 = Conv2D(num_filters, kernel_size=(filter_sizes[0], embed_size), kernel_initializer='normal', activation='elu')(x) conv_1 = Conv2D(num_filters, kernel_size=(filter_sizes[1], embed_size), kernel_initializer='normal', activation='elu')(x) conv_2 = Conv2D(num_filters, kernel_size=(filter_sizes[2], embed_size), kernel_initializer='normal', activation='elu')(x) conv_3 = Conv2D(num_filters, kernel_size=(filter_sizes[3], embed_size), kernel_initializer='normal', activation='elu')(x) maxpool_0 = MaxPool2D(pool_size=(maxlen - filter_sizes[0] + 1, 1))(conv_0) maxpool_1 = MaxPool2D(pool_size=(maxlen - filter_sizes[1] + 1, 1))(conv_1) maxpool_2 = MaxPool2D(pool_size=(maxlen - filter_sizes[2] + 1, 1))(conv_2) maxpool_3 = MaxPool2D(pool_size=(maxlen - filter_sizes[3] + 1, 1))(conv_3) z = Concatenate(axis=1)([maxpool_0, maxpool_1, maxpool_2, maxpool_3]) z = Flatten()(z) z = Dropout(0.1)(z) outp = Dense(6, activation="sigmoid")(z) model = Model(inputs=inp, outputs=outp) model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) }
/man/k_mcnn.Rd
no_license
systats/tidykeras
R
false
true
2,347
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/multi_chan.R \name{k_mcnn} \alias{k_mcnn} \title{k_mcnn} \usage{ k_mcnn(in_dim = 10000, in_length = 100, embed_dim = 128, sp_drop = 0.2, filter_sizes = c(1, 2, 3, 5), num_filters = 32, out_dim = 1, out_fun = "sigmoid", ...) } \arguments{ \item{in_dim}{Number of total vocabluary/words used} \item{in_length}{Length of the input sequences} \item{embed_dim}{Number of word vectors} \item{sp_drop}{Spatial Dropout after Embedding} \item{filter_sizes}{Filter sizes (windows)} \item{num_filters}{Number of filters per layer} \item{out_dim}{Number of neurons of the output layer} \item{out_fun}{Output activation function} \item{...}{Exit arguments} } \value{ model } \description{ get Keras Multi Channel CNN model } \examples{ Architecture is taken from [Code](https://www.kaggle.com/yekenot/textcnn-2d-convolution) max_features = 100000 maxlen = 200 embed_size = 300 filter_sizes = [1,2,3,5] num_filters = 32 inp = Input(shape=(maxlen, )) x = Embedding(max_features, embed_size, weights=[embedding_matrix])(inp) x = SpatialDropout1D(0.4)(x) x = Reshape((maxlen, embed_size, 1))(x) conv_0 = Conv2D(num_filters, kernel_size=(filter_sizes[0], embed_size), kernel_initializer='normal', activation='elu')(x) conv_1 = Conv2D(num_filters, kernel_size=(filter_sizes[1], embed_size), kernel_initializer='normal', activation='elu')(x) conv_2 = Conv2D(num_filters, kernel_size=(filter_sizes[2], embed_size), kernel_initializer='normal', activation='elu')(x) conv_3 = Conv2D(num_filters, kernel_size=(filter_sizes[3], embed_size), kernel_initializer='normal', activation='elu')(x) maxpool_0 = MaxPool2D(pool_size=(maxlen - filter_sizes[0] + 1, 1))(conv_0) maxpool_1 = MaxPool2D(pool_size=(maxlen - filter_sizes[1] + 1, 1))(conv_1) maxpool_2 = MaxPool2D(pool_size=(maxlen - filter_sizes[2] + 1, 1))(conv_2) maxpool_3 = MaxPool2D(pool_size=(maxlen - filter_sizes[3] + 1, 1))(conv_3) z = Concatenate(axis=1)([maxpool_0, maxpool_1, maxpool_2, maxpool_3]) z = Flatten()(z) z = Dropout(0.1)(z) outp = Dense(6, activation="sigmoid")(z) model = Model(inputs=inp, outputs=outp) model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) }
#' module_gcam.usa_LA101.EIA_SEDS #' #' Produce two ouput tables from the EIA state energy database: #' \itemize{ #' \item{L101.inEIA_EJ_state_S_F: Energy data by GCAM sector and fuel, state, and year; energy units in EJ, years from 1971-2010, includes only rows that have a defined sector and fuel} #' \item{L101.EIA_use_all_Bbtu: Energy data by EIA sector and fuel code, GCAM sector and fuel, MSN, state, and year; energy units in Billion BTU, years from 1960-2011, includes all original data} #' } #' #' @param command API command to execute #' @param ... other optional parameters, depending on command #' @return Depends on \code{command}: either a vector of required inputs, #' a vector of output names, or (if \code{command} is "MAKE") all #' the generated outputs: \code{L101.EIA_use_all_Bbtu}, \code{L101.inEIA_EJ_state_S_F}. The corresponding file in the #' original data system was \code{LA101.EIA_SEDS.R} (gcam-usa level1). #' @details See above #' @importFrom assertthat assert_that #' @importFrom dplyr filter mutate select #' @importFrom tidyr gather spread fill #' @author AS April 2017 module_gcam.usa_LA101.EIA_SEDS <- function(command, ...) { if(command == driver.DECLARE_INPUTS) { return(c(FILE = "gcam-usa/EIA_SEDS_fuels", FILE = "gcam-usa/EIA_SEDS_sectors", FILE = "gcam-usa/EIA_use_all_Bbtu", FILE = "gcam-usa/A_fuel_conv")) } else if(command == driver.DECLARE_OUTPUTS) { return(c("L101.EIA_use_all_Bbtu", "L101.inEIA_EJ_state_S_F")) } else if(command == driver.MAKE) { year <- value <- Data_Status <- State <- MSN <- GCAM_fuel <- GCAM_sector <- state <- sector <- fuel <- conv_Bbtu_EJ <- EIA_fuel <- EIA_sector <- description.x <- description.y <- NULL # silence package check. all_data <- list(...)[[1]] # Load required inputs EIA_SEDS_fuels <- get_data(all_data, "gcam-usa/EIA_SEDS_fuels") EIA_SEDS_sectors <- get_data(all_data, "gcam-usa/EIA_SEDS_sectors") EIA_use_all_Bbtu <- get_data(all_data, "gcam-usa/EIA_use_all_Bbtu") A_fuel_conv <- get_data(all_data, "gcam-usa/A_fuel_conv") # =================================================== # Prep for output tables - add columns for GCAM sector and fuel names, using the substrings of the Mnemonic Series Name (MSN) code, and filter out U.S. EIA_use_all_Bbtu %>% gather_years %>% mutate(EIA_fuel = substr(MSN, 1, 2)) %>% # First and second digits of MSN is energy code mutate(EIA_sector = substr(MSN, 3, 4)) %>% # Third and fourth digits of MSN is sector code left_join(EIA_SEDS_fuels, by = "EIA_fuel") %>% left_join(EIA_SEDS_sectors, by = "EIA_sector") %>% filter(State != "US") %>% mutate(state = State, fuel = GCAM_fuel, sector = GCAM_sector) -> Bbtu_with_GCAM_names # Create 1 of the 2 output tables: narrow years from 1971-2010, convert billion BTU to EJ (fuel specific), remove rows that have no defined sector or fuel name Bbtu_with_GCAM_names %>% select(state, sector, fuel, year, value) %>% filter(year %in% HISTORICAL_YEARS, !is.na(fuel), !is.na(sector)) %>% left_join(A_fuel_conv, by = "fuel") %>% mutate(value = value * conv_Bbtu_EJ) %>% group_by(state, sector, fuel, year) %>% summarise(value = sum(value)) %>% arrange(fuel, sector) %>% ungroup() -> L101.inEIA_EJ_state_S_F # Create other output table: leave units as billion BTU, getting rid of missing values: prior to 1980, lots are missing. These data are only used for state-wise allocations Bbtu_with_GCAM_names %>% select(Data_Status, state, MSN, year, value, EIA_fuel, EIA_sector, sector, fuel, -State, -description.x, -description.y) %>% arrange(Data_Status, state, MSN, EIA_fuel, EIA_sector, sector, fuel, -year) -> # Year needs to be in descending order to use fill function Bbtu_with_GCAM_names_intermediate # To create this second output table, I need to split the dataframe and recombine Bbtu_with_GCAM_names_intermediate %>% filter(year %in% 1971:2011) %>% # Custom year range, want to keep NAs in 1960-1970 fill(value) %>% # Replace NAs in 1971-1979 with values from one year more recent bind_rows(filter(Bbtu_with_GCAM_names_intermediate, year %in% 1960:1970)) %>% # Reattaching 1960-1970 rows arrange(Data_Status, state, MSN, EIA_fuel, EIA_sector, sector, fuel, -year) -> L101.EIA_use_all_Bbtu # =================================================== L101.EIA_use_all_Bbtu %>% add_title("State Energy Data in Bbtu by Year, GCAM-Sector, and GCAM-Fuel") %>% add_units("Billion BTU") %>% add_comments("GCAM sector and fuel names were added, NAs for years 1971-1979 were replaced with most recent year's data available") %>% add_legacy_name("L101.EIA_use_all_Bbtu") %>% add_precursors("gcam-usa/EIA_use_all_Bbtu", "gcam-usa/EIA_SEDS_fuels", "gcam-usa/EIA_SEDS_sectors") %>% add_flags(FLAG_PROTECT_FLOAT) -> L101.EIA_use_all_Bbtu L101.inEIA_EJ_state_S_F %>% add_title("State Energy Data in EJ by Year, GCAM-Sector, and GCAM-Fuel") %>% add_units("EJ") %>% add_comments("GCAM sector and fuel names were added, units converted to EJ, data with no GCAM fuel or sector name removed") %>% add_legacy_name("L101.inEIA_EJ_state_S_F") %>% add_precursors("gcam-usa/EIA_use_all_Bbtu", "gcam-usa/EIA_SEDS_fuels", "gcam-usa/EIA_SEDS_sectors", "gcam-usa/A_fuel_conv") -> L101.inEIA_EJ_state_S_F return_data(L101.EIA_use_all_Bbtu, L101.inEIA_EJ_state_S_F) } else { stop("Unknown command") } }
/R/zchunk_LA101.EIA_SEDS.R
permissive
qingyihou/gcamdata
R
false
false
5,679
r
#' module_gcam.usa_LA101.EIA_SEDS #' #' Produce two ouput tables from the EIA state energy database: #' \itemize{ #' \item{L101.inEIA_EJ_state_S_F: Energy data by GCAM sector and fuel, state, and year; energy units in EJ, years from 1971-2010, includes only rows that have a defined sector and fuel} #' \item{L101.EIA_use_all_Bbtu: Energy data by EIA sector and fuel code, GCAM sector and fuel, MSN, state, and year; energy units in Billion BTU, years from 1960-2011, includes all original data} #' } #' #' @param command API command to execute #' @param ... other optional parameters, depending on command #' @return Depends on \code{command}: either a vector of required inputs, #' a vector of output names, or (if \code{command} is "MAKE") all #' the generated outputs: \code{L101.EIA_use_all_Bbtu}, \code{L101.inEIA_EJ_state_S_F}. The corresponding file in the #' original data system was \code{LA101.EIA_SEDS.R} (gcam-usa level1). #' @details See above #' @importFrom assertthat assert_that #' @importFrom dplyr filter mutate select #' @importFrom tidyr gather spread fill #' @author AS April 2017 module_gcam.usa_LA101.EIA_SEDS <- function(command, ...) { if(command == driver.DECLARE_INPUTS) { return(c(FILE = "gcam-usa/EIA_SEDS_fuels", FILE = "gcam-usa/EIA_SEDS_sectors", FILE = "gcam-usa/EIA_use_all_Bbtu", FILE = "gcam-usa/A_fuel_conv")) } else if(command == driver.DECLARE_OUTPUTS) { return(c("L101.EIA_use_all_Bbtu", "L101.inEIA_EJ_state_S_F")) } else if(command == driver.MAKE) { year <- value <- Data_Status <- State <- MSN <- GCAM_fuel <- GCAM_sector <- state <- sector <- fuel <- conv_Bbtu_EJ <- EIA_fuel <- EIA_sector <- description.x <- description.y <- NULL # silence package check. all_data <- list(...)[[1]] # Load required inputs EIA_SEDS_fuels <- get_data(all_data, "gcam-usa/EIA_SEDS_fuels") EIA_SEDS_sectors <- get_data(all_data, "gcam-usa/EIA_SEDS_sectors") EIA_use_all_Bbtu <- get_data(all_data, "gcam-usa/EIA_use_all_Bbtu") A_fuel_conv <- get_data(all_data, "gcam-usa/A_fuel_conv") # =================================================== # Prep for output tables - add columns for GCAM sector and fuel names, using the substrings of the Mnemonic Series Name (MSN) code, and filter out U.S. EIA_use_all_Bbtu %>% gather_years %>% mutate(EIA_fuel = substr(MSN, 1, 2)) %>% # First and second digits of MSN is energy code mutate(EIA_sector = substr(MSN, 3, 4)) %>% # Third and fourth digits of MSN is sector code left_join(EIA_SEDS_fuels, by = "EIA_fuel") %>% left_join(EIA_SEDS_sectors, by = "EIA_sector") %>% filter(State != "US") %>% mutate(state = State, fuel = GCAM_fuel, sector = GCAM_sector) -> Bbtu_with_GCAM_names # Create 1 of the 2 output tables: narrow years from 1971-2010, convert billion BTU to EJ (fuel specific), remove rows that have no defined sector or fuel name Bbtu_with_GCAM_names %>% select(state, sector, fuel, year, value) %>% filter(year %in% HISTORICAL_YEARS, !is.na(fuel), !is.na(sector)) %>% left_join(A_fuel_conv, by = "fuel") %>% mutate(value = value * conv_Bbtu_EJ) %>% group_by(state, sector, fuel, year) %>% summarise(value = sum(value)) %>% arrange(fuel, sector) %>% ungroup() -> L101.inEIA_EJ_state_S_F # Create other output table: leave units as billion BTU, getting rid of missing values: prior to 1980, lots are missing. These data are only used for state-wise allocations Bbtu_with_GCAM_names %>% select(Data_Status, state, MSN, year, value, EIA_fuel, EIA_sector, sector, fuel, -State, -description.x, -description.y) %>% arrange(Data_Status, state, MSN, EIA_fuel, EIA_sector, sector, fuel, -year) -> # Year needs to be in descending order to use fill function Bbtu_with_GCAM_names_intermediate # To create this second output table, I need to split the dataframe and recombine Bbtu_with_GCAM_names_intermediate %>% filter(year %in% 1971:2011) %>% # Custom year range, want to keep NAs in 1960-1970 fill(value) %>% # Replace NAs in 1971-1979 with values from one year more recent bind_rows(filter(Bbtu_with_GCAM_names_intermediate, year %in% 1960:1970)) %>% # Reattaching 1960-1970 rows arrange(Data_Status, state, MSN, EIA_fuel, EIA_sector, sector, fuel, -year) -> L101.EIA_use_all_Bbtu # =================================================== L101.EIA_use_all_Bbtu %>% add_title("State Energy Data in Bbtu by Year, GCAM-Sector, and GCAM-Fuel") %>% add_units("Billion BTU") %>% add_comments("GCAM sector and fuel names were added, NAs for years 1971-1979 were replaced with most recent year's data available") %>% add_legacy_name("L101.EIA_use_all_Bbtu") %>% add_precursors("gcam-usa/EIA_use_all_Bbtu", "gcam-usa/EIA_SEDS_fuels", "gcam-usa/EIA_SEDS_sectors") %>% add_flags(FLAG_PROTECT_FLOAT) -> L101.EIA_use_all_Bbtu L101.inEIA_EJ_state_S_F %>% add_title("State Energy Data in EJ by Year, GCAM-Sector, and GCAM-Fuel") %>% add_units("EJ") %>% add_comments("GCAM sector and fuel names were added, units converted to EJ, data with no GCAM fuel or sector name removed") %>% add_legacy_name("L101.inEIA_EJ_state_S_F") %>% add_precursors("gcam-usa/EIA_use_all_Bbtu", "gcam-usa/EIA_SEDS_fuels", "gcam-usa/EIA_SEDS_sectors", "gcam-usa/A_fuel_conv") -> L101.inEIA_EJ_state_S_F return_data(L101.EIA_use_all_Bbtu, L101.inEIA_EJ_state_S_F) } else { stop("Unknown command") } }
##' Modifies 'data' by adding new values supplied in recodeFileName ##' ##' recodeFileName is expected to have columns ##' c(lookupVariable1,lookupOperator1, lookupValue1, ##' lookupVariable2,lookupOperator2, lookupValue2, ##' lookupVariable3,lookupOperator3, lookupValue3, newVariable, newValue) ##' ##' Within the column 'newVariable', replace values that ##' meet the condition[i] defined by all 'lookupOperator[i]' and 'lookupValue[i]' combinations ##' within column 'lookupVariable[i]' with the value newValue'. ##' If any 'lookupVariable[i]' is NA, then sets condition[i] to equal TRUE ##' ##' Note that lookupVariable can be the same as newVariable. ##' ##' Note that I have not yet figured out how best to ensure that there are no extra whitespaces in the ##' data set that is being recoded. ##' ##' @param recodeDataFileName name of table with recode instructions ##' @param data existing data.frame ##' @return modified data.frame masterRecoder <- function(recodeFileName, data){ validFromNames <- names(data) import <- readRecode(recodeFileName, validFromNames) if( !is.null(import)){ for(i in seq_len(nrow(import))){ #Make replacements #print(import[i,]) col.to <- import$newVariable[i] ##colsFrom <- paste("col.from",c(1,2,3), sep="") lookupVariableColNames <- paste("lookupVariable", 1:3, sep="") lookupVariableValues <- as.list(import[i,lookupVariableColNames]) lookupOperatorColNames <- paste("lookupOperator", 1:3, sep="") OperatorValues <- as.list(import[i,lookupOperatorColNames]) lookupValueColNames <- paste("lookupValue", 1:3, sep="") lookupValueValues <- as.list(import[i,lookupValueColNames]) criteria <- vector("list", 3) for(j in 1:3) { if(is.na(lookupVariableValues[[j]])) { # set criteria[j] to TRUE if there is no lookupVariable specified criteria[[j]] <- TRUE } else { # determine subset of records where criteria[j] applies criteria[[j]] <- !is.na(data[[lookupVariableValues[[j]]]]) & # Note that invalid values in the data frame need to be coded as "NA" match.fun(OperatorValues[[j]])(data[[lookupVariableValues[[j]]]], lookupValueValues[[j]]) } } rows <- criteria[[1]] & criteria[[2]] & criteria[[3]] data[rows,col.to] <- import$newValue[i] } } data } ##' Utility function to read/process recodeFileName for masterRecoder ##' ##' @param recodeFileName name of table with recode instructions ##' @param validFromNames vector of existing variable names in the data frame to be recoded ##' @return data.frame with columns c(lookupVariable1,lookupOperator1, lookupValue1, ##' lookupVariable2,lookupOperator2, lookupValue2, ##' lookupVariable3,lookupOperator3, lookupValue3, newVariable, newValue) readRecode <- function(recodeFileName, validFromNames){ if(file.exists(recodeFileName)){ import <- read.csv(recodeFileName, header=TRUE, stringsAsFactors=FALSE, strip.white=TRUE) if(nrow(import)> 0 ){ #Check columns names for import are right expectedColumns<- c("lookupVariable1","lookupOperator1","lookupValue1", "lookupVariable2","lookupOperator2","lookupValue2", "lookupVariable3","lookupOperator3","lookupValue3", "newVariable", "newValue") nameIsOK <- names(import) %in% expectedColumns if(any(!nameIsOK)) stop("Incorrect name in lookup table for ", recodeFileName, "--> ", paste(names(import)[!nameIsOK], collapse=", ")) #Check values of lookupVariable[i] are in list of allowed variable names lookupVariableColNames <- paste("lookupVariable",c(1,2,3), sep="") for(colname in lookupVariableColNames) { import[colname][import[colname] == ""] <- NA nameIsOK <- import[colname] %in% validFromNames | is.na(import[colname]) if(any(!nameIsOK)) stop("Names in the recode file do not match variable names in the dataset. ", "Column: ", colname, "Variable name: ", paste(import[colname][!nameIsOK], collapse=", ")) } } else { import <- NULL } } else { import <- NULL } import }
/masterCoder.R
no_license
ntanners/r_code
R
false
false
4,320
r
##' Modifies 'data' by adding new values supplied in recodeFileName ##' ##' recodeFileName is expected to have columns ##' c(lookupVariable1,lookupOperator1, lookupValue1, ##' lookupVariable2,lookupOperator2, lookupValue2, ##' lookupVariable3,lookupOperator3, lookupValue3, newVariable, newValue) ##' ##' Within the column 'newVariable', replace values that ##' meet the condition[i] defined by all 'lookupOperator[i]' and 'lookupValue[i]' combinations ##' within column 'lookupVariable[i]' with the value newValue'. ##' If any 'lookupVariable[i]' is NA, then sets condition[i] to equal TRUE ##' ##' Note that lookupVariable can be the same as newVariable. ##' ##' Note that I have not yet figured out how best to ensure that there are no extra whitespaces in the ##' data set that is being recoded. ##' ##' @param recodeDataFileName name of table with recode instructions ##' @param data existing data.frame ##' @return modified data.frame masterRecoder <- function(recodeFileName, data){ validFromNames <- names(data) import <- readRecode(recodeFileName, validFromNames) if( !is.null(import)){ for(i in seq_len(nrow(import))){ #Make replacements #print(import[i,]) col.to <- import$newVariable[i] ##colsFrom <- paste("col.from",c(1,2,3), sep="") lookupVariableColNames <- paste("lookupVariable", 1:3, sep="") lookupVariableValues <- as.list(import[i,lookupVariableColNames]) lookupOperatorColNames <- paste("lookupOperator", 1:3, sep="") OperatorValues <- as.list(import[i,lookupOperatorColNames]) lookupValueColNames <- paste("lookupValue", 1:3, sep="") lookupValueValues <- as.list(import[i,lookupValueColNames]) criteria <- vector("list", 3) for(j in 1:3) { if(is.na(lookupVariableValues[[j]])) { # set criteria[j] to TRUE if there is no lookupVariable specified criteria[[j]] <- TRUE } else { # determine subset of records where criteria[j] applies criteria[[j]] <- !is.na(data[[lookupVariableValues[[j]]]]) & # Note that invalid values in the data frame need to be coded as "NA" match.fun(OperatorValues[[j]])(data[[lookupVariableValues[[j]]]], lookupValueValues[[j]]) } } rows <- criteria[[1]] & criteria[[2]] & criteria[[3]] data[rows,col.to] <- import$newValue[i] } } data } ##' Utility function to read/process recodeFileName for masterRecoder ##' ##' @param recodeFileName name of table with recode instructions ##' @param validFromNames vector of existing variable names in the data frame to be recoded ##' @return data.frame with columns c(lookupVariable1,lookupOperator1, lookupValue1, ##' lookupVariable2,lookupOperator2, lookupValue2, ##' lookupVariable3,lookupOperator3, lookupValue3, newVariable, newValue) readRecode <- function(recodeFileName, validFromNames){ if(file.exists(recodeFileName)){ import <- read.csv(recodeFileName, header=TRUE, stringsAsFactors=FALSE, strip.white=TRUE) if(nrow(import)> 0 ){ #Check columns names for import are right expectedColumns<- c("lookupVariable1","lookupOperator1","lookupValue1", "lookupVariable2","lookupOperator2","lookupValue2", "lookupVariable3","lookupOperator3","lookupValue3", "newVariable", "newValue") nameIsOK <- names(import) %in% expectedColumns if(any(!nameIsOK)) stop("Incorrect name in lookup table for ", recodeFileName, "--> ", paste(names(import)[!nameIsOK], collapse=", ")) #Check values of lookupVariable[i] are in list of allowed variable names lookupVariableColNames <- paste("lookupVariable",c(1,2,3), sep="") for(colname in lookupVariableColNames) { import[colname][import[colname] == ""] <- NA nameIsOK <- import[colname] %in% validFromNames | is.na(import[colname]) if(any(!nameIsOK)) stop("Names in the recode file do not match variable names in the dataset. ", "Column: ", colname, "Variable name: ", paste(import[colname][!nameIsOK], collapse=", ")) } } else { import <- NULL } } else { import <- NULL } import }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/utils.R \name{subsetES} \alias{subsetES} \title{Subsets es, if rows or columns are not specified, all are retained} \usage{ subsetES(es, columns = c(), rows = c()) } \arguments{ \item{es}{ExpressionSet object.#'} \item{columns}{List of specified columns' indices (optional), indices start from 0#'} \item{rows}{List of specified rows' indices (optional), indices start from 0} } \value{ `es`'s subset } \description{ Subsets es, if rows or columns are not specified, all are retained }
/man/subsetES.Rd
no_license
bhvbhushan/phantasus
R
false
true
566
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/utils.R \name{subsetES} \alias{subsetES} \title{Subsets es, if rows or columns are not specified, all are retained} \usage{ subsetES(es, columns = c(), rows = c()) } \arguments{ \item{es}{ExpressionSet object.#'} \item{columns}{List of specified columns' indices (optional), indices start from 0#'} \item{rows}{List of specified rows' indices (optional), indices start from 0} } \value{ `es`'s subset } \description{ Subsets es, if rows or columns are not specified, all are retained }
"rateratio.midp" <- function(x, y = NULL, conf.level = 0.95, rev = c("neither", "rows", "columns", "both"), verbose = FALSE){ if(is.matrix(x) && !is.null(y)){stop("y argument should be NULL")} if(is.null(y)){ x <- ratetable(x, rev = rev) } else { xn <- substitute(x) yn <- substitute(y) x <- ratetable(x, y, rev = rev) colnames(x) <- c(xn, yn) } tmx <- table.margins(x)[,-3] alpha <- 1 - conf.level nr <- nrow(x) est <- matrix(NA, nr, 3) est[1,1] <- 1 for(i in 2:nr){ aa <- x[i,1]; bb <- x[1,1]; pt1 <- x[i,2]; pt0 <- x[1,2] pt <- pt0 + pt1 mm <- aa + bb s.irr <- uniroot(function(s) (dbinom(aa, mm, s)/2 + pbinom(aa-1, mm, s)) - 0.5, c(0, 1))$root irr <- (s.irr / (1 - s.irr)) * (pt0 / pt1) s.lower <- uniroot(function(s) 1 - (dbinom(aa, mm, s)/2 + pbinom(aa-1, mm, s)) - alpha/2, c(0,1))$root irr.lower <- (s.lower/ (1 - s.lower)) * (pt0 / pt1) s.upper <- uniroot(function(s) dbinom(aa, mm, s)/2 + pbinom(aa-1, mm, s) - alpha/2, c(0,1))$root irr.upper <- (s.upper/ (1 - s.upper)) * (pt0 / pt1) est[i,] <- c(irr, irr.lower, irr.upper) } pval <- rate2by2.test(x)$p.value colnames(est) <- c("estimate", "lower", "upper") rownames(est) <- rownames(x) cn2 <- paste("rate ratio with", paste(100*conf.level, "%", sep=""), "C.I.") names(dimnames(est)) <- c(names(dimnames(x))[1], cn2) rr <- list(x = x, data = tmx, measure = est, conf.level = conf.level, p.value = pval ) rrs <- list(data = tmx, measure = est, p.value = pval ) attr(rr, "method") <- "Median unbiased estimate & mid-p exact CI" attr(rrs, "method") <- "Median unbiased estimate & mid-p exact CI" if(verbose==FALSE) { rrs } else rr }
/epitools/R/rateratio.midp.R
no_license
ingted/R-Examples
R
false
false
1,921
r
"rateratio.midp" <- function(x, y = NULL, conf.level = 0.95, rev = c("neither", "rows", "columns", "both"), verbose = FALSE){ if(is.matrix(x) && !is.null(y)){stop("y argument should be NULL")} if(is.null(y)){ x <- ratetable(x, rev = rev) } else { xn <- substitute(x) yn <- substitute(y) x <- ratetable(x, y, rev = rev) colnames(x) <- c(xn, yn) } tmx <- table.margins(x)[,-3] alpha <- 1 - conf.level nr <- nrow(x) est <- matrix(NA, nr, 3) est[1,1] <- 1 for(i in 2:nr){ aa <- x[i,1]; bb <- x[1,1]; pt1 <- x[i,2]; pt0 <- x[1,2] pt <- pt0 + pt1 mm <- aa + bb s.irr <- uniroot(function(s) (dbinom(aa, mm, s)/2 + pbinom(aa-1, mm, s)) - 0.5, c(0, 1))$root irr <- (s.irr / (1 - s.irr)) * (pt0 / pt1) s.lower <- uniroot(function(s) 1 - (dbinom(aa, mm, s)/2 + pbinom(aa-1, mm, s)) - alpha/2, c(0,1))$root irr.lower <- (s.lower/ (1 - s.lower)) * (pt0 / pt1) s.upper <- uniroot(function(s) dbinom(aa, mm, s)/2 + pbinom(aa-1, mm, s) - alpha/2, c(0,1))$root irr.upper <- (s.upper/ (1 - s.upper)) * (pt0 / pt1) est[i,] <- c(irr, irr.lower, irr.upper) } pval <- rate2by2.test(x)$p.value colnames(est) <- c("estimate", "lower", "upper") rownames(est) <- rownames(x) cn2 <- paste("rate ratio with", paste(100*conf.level, "%", sep=""), "C.I.") names(dimnames(est)) <- c(names(dimnames(x))[1], cn2) rr <- list(x = x, data = tmx, measure = est, conf.level = conf.level, p.value = pval ) rrs <- list(data = tmx, measure = est, p.value = pval ) attr(rr, "method") <- "Median unbiased estimate & mid-p exact CI" attr(rrs, "method") <- "Median unbiased estimate & mid-p exact CI" if(verbose==FALSE) { rrs } else rr }
sink("diagnostics/lmer.txt") library(lme4) library(RPostgreSQL) drv <- dbDriver("PostgreSQL") con <- dbConnect(drv, dbname="cricket") query <- dbSendQuery(con, " select distinct r.game_id, r.year, r.field as field, r.team_name as team, r.opponent_name as opponent, ln(r.team_score) as gs, (year-2019) as w from super_smash.results r where r.year between 2021 and 2021 ;") sg <- fetch(query,n=-1) dim(sg) games <- sg[rep(row.names(sg), sg$w), ] dim(games) #games <- as.data.frame(do.call("rbind",gw)) #rm(sg) attach(games) pll <- list() # Fixed parameters field <- as.factor(field) #field <- relevel(field, ref = "neutral") #game_length <- as.factor(game_length) fp <- data.frame(field) #,game_length) fpn <- names(fp) # Random parameters game_id <- as.factor(game_id) #contrasts(game_id) <- 'contr.sum' offense <- as.factor(team) #contrasts(offense) <- 'contr.sum' defense <- as.factor(opponent) #contrasts(defense) <- 'contr.sum' rp <- data.frame(offense,defense) rpn <- names(rp) for (n in fpn) { df <- fp[[n]] level <- as.matrix(attributes(df)$levels) parameter <- rep(n,nrow(level)) type <- rep("fixed",nrow(level)) pll <- c(pll,list(data.frame(parameter,type,level))) } for (n in rpn) { df <- rp[[n]] level <- as.matrix(attributes(df)$levels) parameter <- rep(n,nrow(level)) type <- rep("random",nrow(level)) pll <- c(pll,list(data.frame(parameter,type,level))) } # Model parameters parameter_levels <- as.data.frame(do.call("rbind",pll)) dbWriteTable(con,c("super_smash","_parameter_levels"),parameter_levels,row.names=TRUE) g <- cbind(fp,rp) g$gs <- gs g$w <- w detach(games) dim(g) model <- gs ~ field+(1|offense)+(1|defense)+(1|game_id) #model <- gs ~ field+offense+defense+(1|game_id) fit <- lmer(model, data=g, verbose=TRUE, weights=w) fit summary(fit) # List of data frames # Fixed factors f <- fixef(fit) fn <- names(f) # Random factors r <- ranef(fit) rn <- names(r) results <- list() for (n in fn) { df <- f[[n]] factor <- n level <- n type <- "fixed" estimate <- df results <- c(results,list(data.frame(factor,type,level,estimate))) } for (n in rn) { df <- r[[n]] factor <- rep(n,nrow(df)) type <- rep("random",nrow(df)) level <- row.names(df) estimate <- df[,1] results <- c(results,list(data.frame(factor,type,level,estimate))) } combined <- as.data.frame(do.call("rbind",results)) dbWriteTable(con,c("super_smash","_basic_factors"),as.data.frame(combined),row.names=TRUE) quit("no")
/super_smash/sos/lmer.R
no_license
octonion/cricket
R
false
false
2,503
r
sink("diagnostics/lmer.txt") library(lme4) library(RPostgreSQL) drv <- dbDriver("PostgreSQL") con <- dbConnect(drv, dbname="cricket") query <- dbSendQuery(con, " select distinct r.game_id, r.year, r.field as field, r.team_name as team, r.opponent_name as opponent, ln(r.team_score) as gs, (year-2019) as w from super_smash.results r where r.year between 2021 and 2021 ;") sg <- fetch(query,n=-1) dim(sg) games <- sg[rep(row.names(sg), sg$w), ] dim(games) #games <- as.data.frame(do.call("rbind",gw)) #rm(sg) attach(games) pll <- list() # Fixed parameters field <- as.factor(field) #field <- relevel(field, ref = "neutral") #game_length <- as.factor(game_length) fp <- data.frame(field) #,game_length) fpn <- names(fp) # Random parameters game_id <- as.factor(game_id) #contrasts(game_id) <- 'contr.sum' offense <- as.factor(team) #contrasts(offense) <- 'contr.sum' defense <- as.factor(opponent) #contrasts(defense) <- 'contr.sum' rp <- data.frame(offense,defense) rpn <- names(rp) for (n in fpn) { df <- fp[[n]] level <- as.matrix(attributes(df)$levels) parameter <- rep(n,nrow(level)) type <- rep("fixed",nrow(level)) pll <- c(pll,list(data.frame(parameter,type,level))) } for (n in rpn) { df <- rp[[n]] level <- as.matrix(attributes(df)$levels) parameter <- rep(n,nrow(level)) type <- rep("random",nrow(level)) pll <- c(pll,list(data.frame(parameter,type,level))) } # Model parameters parameter_levels <- as.data.frame(do.call("rbind",pll)) dbWriteTable(con,c("super_smash","_parameter_levels"),parameter_levels,row.names=TRUE) g <- cbind(fp,rp) g$gs <- gs g$w <- w detach(games) dim(g) model <- gs ~ field+(1|offense)+(1|defense)+(1|game_id) #model <- gs ~ field+offense+defense+(1|game_id) fit <- lmer(model, data=g, verbose=TRUE, weights=w) fit summary(fit) # List of data frames # Fixed factors f <- fixef(fit) fn <- names(f) # Random factors r <- ranef(fit) rn <- names(r) results <- list() for (n in fn) { df <- f[[n]] factor <- n level <- n type <- "fixed" estimate <- df results <- c(results,list(data.frame(factor,type,level,estimate))) } for (n in rn) { df <- r[[n]] factor <- rep(n,nrow(df)) type <- rep("random",nrow(df)) level <- row.names(df) estimate <- df[,1] results <- c(results,list(data.frame(factor,type,level,estimate))) } combined <- as.data.frame(do.call("rbind",results)) dbWriteTable(con,c("super_smash","_basic_factors"),as.data.frame(combined),row.names=TRUE) quit("no")
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/scAPAList.R \name{set_scAPAList} \alias{set_scAPAList} \title{scAPAList Constructor} \usage{ set_scAPAList(.cells.counts = data.frame(), .clus.counts = data.frame(), .cluster.anot = data.frame(), .row.Data = data.frame(), .down.seq = data.frame()) } \arguments{ \item{.cells.counts}{a data.frame, first column "Peak_ID", is the ID of the peak. Other columns (numeric) are cell barcodes, cells - read counts.} \item{.clus.counts}{If available, the sum of the counts for each peak ID from the clusters to be analyzed. Data.frame, the first column "Peak_ID", is the ID of the peak. Other columns are the clusters, cells have the sum of the counts for each cluster for each peak ID.} \item{.cluster.anot}{A Data.frame, the first column is the cell barcode as it appears in the column names of cells.counts. The second column is the cluster name corresponding to each cell barcode.} \item{.row.Data}{Data.frame, details regarding the peaks (e.g genomic location, gene ID)} \item{.down.seq}{Data.frame, the first column is the peak ID, the second is the genomic sequence 200 nt downstream to it. For filtering internal priming suspected peaks.} } \description{ A list-based S4 class for storing read counts and associated information from single-cell RNA sequencing experiments. For downstream Alternative Polyadenylation analysis. }
/scAPA/man/set_scAPAList.Rd
permissive
eldadshulman/scAPA-1
R
false
true
1,416
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/scAPAList.R \name{set_scAPAList} \alias{set_scAPAList} \title{scAPAList Constructor} \usage{ set_scAPAList(.cells.counts = data.frame(), .clus.counts = data.frame(), .cluster.anot = data.frame(), .row.Data = data.frame(), .down.seq = data.frame()) } \arguments{ \item{.cells.counts}{a data.frame, first column "Peak_ID", is the ID of the peak. Other columns (numeric) are cell barcodes, cells - read counts.} \item{.clus.counts}{If available, the sum of the counts for each peak ID from the clusters to be analyzed. Data.frame, the first column "Peak_ID", is the ID of the peak. Other columns are the clusters, cells have the sum of the counts for each cluster for each peak ID.} \item{.cluster.anot}{A Data.frame, the first column is the cell barcode as it appears in the column names of cells.counts. The second column is the cluster name corresponding to each cell barcode.} \item{.row.Data}{Data.frame, details regarding the peaks (e.g genomic location, gene ID)} \item{.down.seq}{Data.frame, the first column is the peak ID, the second is the genomic sequence 200 nt downstream to it. For filtering internal priming suspected peaks.} } \description{ A list-based S4 class for storing read counts and associated information from single-cell RNA sequencing experiments. For downstream Alternative Polyadenylation analysis. }
library(svIDE) ### Name: getFunctions ### Title: Get all functions in a given environment ### Aliases: getFunctions ### Keywords: utilities ### ** Examples getFunctions(1) # Functions defined in .GlobalEnv length(getFunctions(length(search()))) # Number of functions in package:base
/data/genthat_extracted_code/svIDE/examples/getFunctions.Rd.R
no_license
surayaaramli/typeRrh
R
false
false
292
r
library(svIDE) ### Name: getFunctions ### Title: Get all functions in a given environment ### Aliases: getFunctions ### Keywords: utilities ### ** Examples getFunctions(1) # Functions defined in .GlobalEnv length(getFunctions(length(search()))) # Number of functions in package:base
library(tidyverse) library(ggraph) library(igraph) # read in edit_thai_dishes df <- read_csv("edit_thai_dishes.csv") # create edge d1 <- data.frame(from="Shared dishes", to=c("Curries", "Soups", "Salads", "Fried and stir-fried dishes", "Deep-fried dishes", "Grilled dishes", "Steamed or blanched dishes", "Stewed dishes", "Dipping sauces and pastes", "Misc Shared")) d2 <- df %>% select(minor_grouping, Thai_name) %>% slice(54:254) %>% rename( from = minor_grouping, to = Thai_name ) edges <- rbind(d1, d2) # create vertices # one line per object of hierarchy vertices = data.frame( name = unique(c(as.character(edges$from), as.character(edges$to))) ) # add column with group of each name vertices$group = edges$from[ match(vertices$name, edges$to)] # add label data and calculate ANGLE of labels vertices$id=NA myleaves=which(is.na(match(vertices$name, edges$from))) nleaves=length(myleaves) vertices$id[myleaves] = seq(1:nleaves) vertices$angle = 360 / nleaves * vertices$id + 90 # Works the best, angles: 90 to 450 # calculate the alignment of labels: right or left vertices$hjust<-ifelse( vertices$angle < 275, 1, 0) # flip angle BY to make them Readable vertices$angle<-ifelse(vertices$angle < 275, vertices$angle+180, vertices$angle) # plot dendrogram (shared dishes) shared_dishes_graph <- graph_from_data_frame(edges) ggraph(shared_dishes_graph, layout = "dendrogram", circular = TRUE) + geom_edge_diagonal(aes(edge_colour = edges$from), label_dodge = NULL) + geom_node_text(aes(x = x*1.15, y=y*1.15, filter = leaf, label=name, angle = vertices$angle, hjust= vertices$hjust, colour= vertices$group), size=2.7, alpha=1) + geom_node_point(color = "whitesmoke") + #scale_color_manual(values = c("red", "orange", "blue", "yellow", "green", "purple", "dodgerblue", "black", "pink", "white")) + #scale_edge_color_manual(values = c("red", "orange", "blue", "yellow", "green", "purple", "dodgerblue", "black", "pink", "white", "white")) + theme( plot.background = element_rect(fill = '#343d46'), panel.background = element_rect(fill = '#343d46'), legend.position = 'none', plot.title = element_text(colour = 'whitesmoke', face = 'bold', size = 25), plot.subtitle = element_text(colour = 'whitesmoke', margin = margin(0,0,30,0), size = 20), plot.caption = element_text(color = 'whitesmoke', face = 'italic') ) + labs( title = 'Thai Food is Best Shared', subtitle = '201 Ways to Make Friends', caption = 'Data: Wikipedia | Graphic: @paulapivat' ) + #expand_limits(x = c(-1.5, 1.5), y = c(-0.8, 0.8)) + expand_limits(x = c(-1.5, 1.5), y = c(-1.5, 1.5)) + coord_flip() + annotate("text", x = 0.4, y = 0.45, label = "Steamed", color = "#F564E3") + annotate("text", x = 0.2, y = 0.5, label = "Grilled", color = "#00BA38") + annotate("text", x = -0.2, y = 0.5, label = "Deep-Fried", color = "#DE8C00") + annotate("text", x = -0.4, y = 0.1, label = "Fried &\n Stir-Fried", color = "#7CAE00") + annotate("text", x = -0.3, y = -0.4, label = "Salads", color = "#00B4F0") + annotate("text", x = -0.05, y = -0.5, label = "Soups", color = "#C77CFF") + annotate("text", x = 0.3, y = -0.5, label = "Curries", color = "#F8766D") + annotate("text", x = 0.5, y = -0.1, label = "Misc", color = "#00BFC4") + annotate("text", x = 0.5, y = 0.1, label = "Sauces\nPastes", color = "#B79F00")
/web_scraping/circular_dendro_thai_dishes.R
permissive
PaulApivat/thai_dishes
R
false
false
3,574
r
library(tidyverse) library(ggraph) library(igraph) # read in edit_thai_dishes df <- read_csv("edit_thai_dishes.csv") # create edge d1 <- data.frame(from="Shared dishes", to=c("Curries", "Soups", "Salads", "Fried and stir-fried dishes", "Deep-fried dishes", "Grilled dishes", "Steamed or blanched dishes", "Stewed dishes", "Dipping sauces and pastes", "Misc Shared")) d2 <- df %>% select(minor_grouping, Thai_name) %>% slice(54:254) %>% rename( from = minor_grouping, to = Thai_name ) edges <- rbind(d1, d2) # create vertices # one line per object of hierarchy vertices = data.frame( name = unique(c(as.character(edges$from), as.character(edges$to))) ) # add column with group of each name vertices$group = edges$from[ match(vertices$name, edges$to)] # add label data and calculate ANGLE of labels vertices$id=NA myleaves=which(is.na(match(vertices$name, edges$from))) nleaves=length(myleaves) vertices$id[myleaves] = seq(1:nleaves) vertices$angle = 360 / nleaves * vertices$id + 90 # Works the best, angles: 90 to 450 # calculate the alignment of labels: right or left vertices$hjust<-ifelse( vertices$angle < 275, 1, 0) # flip angle BY to make them Readable vertices$angle<-ifelse(vertices$angle < 275, vertices$angle+180, vertices$angle) # plot dendrogram (shared dishes) shared_dishes_graph <- graph_from_data_frame(edges) ggraph(shared_dishes_graph, layout = "dendrogram", circular = TRUE) + geom_edge_diagonal(aes(edge_colour = edges$from), label_dodge = NULL) + geom_node_text(aes(x = x*1.15, y=y*1.15, filter = leaf, label=name, angle = vertices$angle, hjust= vertices$hjust, colour= vertices$group), size=2.7, alpha=1) + geom_node_point(color = "whitesmoke") + #scale_color_manual(values = c("red", "orange", "blue", "yellow", "green", "purple", "dodgerblue", "black", "pink", "white")) + #scale_edge_color_manual(values = c("red", "orange", "blue", "yellow", "green", "purple", "dodgerblue", "black", "pink", "white", "white")) + theme( plot.background = element_rect(fill = '#343d46'), panel.background = element_rect(fill = '#343d46'), legend.position = 'none', plot.title = element_text(colour = 'whitesmoke', face = 'bold', size = 25), plot.subtitle = element_text(colour = 'whitesmoke', margin = margin(0,0,30,0), size = 20), plot.caption = element_text(color = 'whitesmoke', face = 'italic') ) + labs( title = 'Thai Food is Best Shared', subtitle = '201 Ways to Make Friends', caption = 'Data: Wikipedia | Graphic: @paulapivat' ) + #expand_limits(x = c(-1.5, 1.5), y = c(-0.8, 0.8)) + expand_limits(x = c(-1.5, 1.5), y = c(-1.5, 1.5)) + coord_flip() + annotate("text", x = 0.4, y = 0.45, label = "Steamed", color = "#F564E3") + annotate("text", x = 0.2, y = 0.5, label = "Grilled", color = "#00BA38") + annotate("text", x = -0.2, y = 0.5, label = "Deep-Fried", color = "#DE8C00") + annotate("text", x = -0.4, y = 0.1, label = "Fried &\n Stir-Fried", color = "#7CAE00") + annotate("text", x = -0.3, y = -0.4, label = "Salads", color = "#00B4F0") + annotate("text", x = -0.05, y = -0.5, label = "Soups", color = "#C77CFF") + annotate("text", x = 0.3, y = -0.5, label = "Curries", color = "#F8766D") + annotate("text", x = 0.5, y = -0.1, label = "Misc", color = "#00BFC4") + annotate("text", x = 0.5, y = 0.1, label = "Sauces\nPastes", color = "#B79F00")
v <- rep(0, as.numeric(ncol(CovidConfirmedCases))) for (i in 1:nrow(CovidConfirmedCases)){ j = 0 cases = CovidConfirmedCases[i,ncol(CovidConfirmedCases)] if (cases != 0){ while (cases/2 < CovidConfirmedCases[i,ncol(CovidConfirmedCases)-j]) { j = j + 1 } days = j } else { days = 0 } v[i] <- days } CovidConfirmedCasesRate <- cbind(CovidConfirmedCases,v)
/covid19/www/4_load_external_data/07_CovidConfirmedCasesRate.R
permissive
SpencerButt/CHAD
R
false
false
495
r
v <- rep(0, as.numeric(ncol(CovidConfirmedCases))) for (i in 1:nrow(CovidConfirmedCases)){ j = 0 cases = CovidConfirmedCases[i,ncol(CovidConfirmedCases)] if (cases != 0){ while (cases/2 < CovidConfirmedCases[i,ncol(CovidConfirmedCases)-j]) { j = j + 1 } days = j } else { days = 0 } v[i] <- days } CovidConfirmedCasesRate <- cbind(CovidConfirmedCases,v)
## Step 1: Build and format ring width data ## This script takes the ring width files (rwl format) and the stand-level inputs from the year the trees were cored and formats the data ## to run in the STAN ring width model. In getting to the correct format, this script also matches cores with tree species, ## DBH, and location within the plot. The CSV file should be in the same format as that of the sample CSV file available at (INSERT REPO URL). build_data <- function(site, dvers, mvers, prefix, census_site, cutoff = 1900){ # Prepare workspace # Had to comment these out for submitting on ND campus cluster (stupid issues with loading packages) #library(plotrix) #library(dplR) #library(fields) #library(reshape2) #library(dplyr) #library(plyr) #library(ggplot2) # Create save folders for data site_dir <- file.path('sites',site) if (!file.exists(file.path(site_dir,'runs'))) dir.create(file.path(site_dir,'runs')) if (!file.exists(file.path(site_dir,'runs',paste0(mvers,'_',dvers)))) dir.create(file.path(site_dir,'runs',paste0(mvers,'_',dvers))) if (!file.exists(file.path(site_dir,'runs',paste0(mvers,'_',dvers),'output'))) dir.create(file.path(site_dir,'runs',paste0(mvers,'_',dvers),'output')) if (!file.exists(file.path(site_dir,'runs',paste0(mvers,'_',dvers),'input'))) dir.create(file.path(site_dir,'runs',paste0(mvers,'_',dvers),'input')) if (!file.exists(file.path(site_dir,'runs',paste0(mvers,'_',dvers),'figures'))) dir.create(file.path(site_dir,'runs',paste0(mvers,'_',dvers),'figures')) rwl_dir = file.path('sites',site,'data','raw','rwl') meta_loc = file.path('sites',site,'data','raw',paste0(site,'_treeMeta_',dvers,'.csv')) RDS_loc = file.path(site_dir,'runs',paste0(mvers,'_',dvers),'input',paste0('tree_data_', site ,'_STAN_',mvers,'_', dvers, '.RDS')) if (census_site){ census_loc = file.path('sites',site,'data','raw',paste0(site,'_census_',dvers,'.csv')) } ################################################# ################ 1. Extract data ################ ################################################# # Load CSV with tree data treeMeta = read.csv(meta_loc, stringsAsFactors = FALSE) len = nchar(treeMeta$ID[1]) # List of files with tree ring increments, extract only RWL files rwFiles <- list.files(rwl_dir) rwFiles <- rwFiles[grep(".rwl$", rwFiles)] rwData <- list() for(fn in rwFiles) { id <- gsub(".rw", "", fn) # Insert the contents of each file into the rwData list rwData[[id]] <- t((read.tucson(file.path(rwl_dir, fn)))) # rows are tree, cols are times } # Bind into one dataframe and insert core IDs into the matrix # Dimensions: core x year incr = ldply(rwData, rbind) incr = incr[,c(".id", sort(colnames(incr)[2:ncol(incr)]))] rownames(incr) = as.vector(unlist(lapply(rwData, rownames))) incr[,1] = rownames(incr) # Create incr_data data frame incr_data = melt(incr) colnames(incr_data) = c('id', 'year', 'incr') incr_data$year = as.vector(incr_data$year) # Remove NA values right away to quicken processing (only keep the series for which trees were alive/existed) # Also remove all increment values for before the determined cut off year, which has a default value of 1900 # Also remove all data that comes after the first year of full data (some sites have multiple years of coring) incr_data = incr_data %>% mutate(year = as.numeric(year)) %>% filter(!is.na(incr), year >= cutoff) # Assign core ID to each core incr_data$id = substr(incr_data$id, 1, len) # Assign plot to each core in data frame (if only one plot, all 1) if (!('site' %in% colnames(treeMeta))){ incr_data$plot = rep(1, nrow(incr_data)) }else{ incr_data$plot = sapply(1:length(incr_data$id), function(ind){ choose = treeMeta$site[which(treeMeta$ID == incr_data$id[ind])] return(as.numeric(substr(choose, nchar(prefix)+1, nchar(choose)))) }) } # Assign stat IDs treeMeta = treeMeta %>% filter(ID %in% unique(incr_data$id)) treeMeta$stat_id = seq(1,nrow(treeMeta)) incr_data$stat_id = as.numeric(plyr::mapvalues(incr_data$id, from = treeMeta$ID, to = treeMeta$stat_id, warn_missing = FALSE)) ######################################################### ################ 2. Organize census data ################ ######################################################### # Load and organize census data if census available if (census_site){ census = read.csv(census_loc, stringsAsFactors = FALSE) %>% filter(species %in% unique(treeMeta$species)) census_long = melt(census, id=c('site', 'ID', 'species', 'distance', 'finalCond')) colnames(census_long) = c('site', 'ID', 'species', 'distance','finalCond','year', 'dbh') # remove NA years and reformat census year column census_long = census_long %>% filter(!is.na(dbh)) %>% mutate(year = substr(year, 2, 3)) %>% mutate(year = ifelse(as.numeric(year) < 30, paste0('20',year), paste0('19',year))) # get census years census_years = as.numeric(sort(unique(census_long$year))) # create a list of all trees that are present in available data allTrees = full_join(census, treeMeta, by = c('ID')) %>% mutate(taxon = ifelse(is.na(species.x), species.y, species.x), distance = ifelse(is.na(distance.y), distance.x, distance.y), plot = as.numeric(substr(ID,nchar(prefix)+1,nchar(prefix)+1))) %>% select(ID, stat_id, taxon, distance, plot) allTrees$stat_id[which(is.na(allTrees$stat_id))] = seq(nrow(treeMeta)+1, nrow(treeMeta)+length(which(is.na(allTrees$stat_id)))) census_long$stat_id = as.numeric(plyr::mapvalues(census_long$ID, from = allTrees$ID, to = allTrees$stat_id, warn_missing = FALSE)) } ############################################################ ################ 3. Organize increment data ################ ############################################################ # Assign species to each observation incr_data$taxon = sapply(c(1:length(incr_data$id)), function(ind){ treeMeta$species[which(treeMeta$ID == incr_data$id[ind])] }) # Find year range for data year_end = max(as.numeric(incr_data$year), na.rm=TRUE) year_start = min(as.numeric(incr_data$year), na.rm=TRUE) years = seq(year_start, year_end) # Make units of time in "years since first recorded tree ring" incr_data$year = as.numeric(incr_data$year) - year_start + 1 # Order by tree and year incr_data = incr_data %>% arrange(stat_id, year) # Data frame of the first and last year of observation for each tree # First year for all years is the first year of data (which will tend to be the cutoff year) year_idx = data.frame(stat_id = as.numeric(aggregate(year~stat_id, data=incr_data, FUN=min, na.rm=TRUE)[,1]), year_end=as.numeric(aggregate(year~stat_id, incr_data, max)[,2])) year_idx$year_start = rep(1, length(year_idx$stat_id)) ######################################################### ################ 4. Organize RW DBH data ################ ######################################################### # create dataframe that gives year index corresponding to RW DBH measurement pdbh = aggregate(year~stat_id+plot+id, incr_data, max, na.rm=TRUE) %>% arrange(stat_id) pdbh$dbh = as.numeric(plyr::mapvalues(pdbh$id, from = treeMeta$ID, to = treeMeta$dbh, warn_missing = FALSE)) pdbh$distance = as.numeric(plyr::mapvalues(pdbh$id, from = treeMeta$ID, to = treeMeta$distance, warn_missing = FALSE)) pdbh$taxon = plyr::mapvalues(pdbh$id, from = treeMeta$ID, to = treeMeta$species, warn_missing = FALSE) ##################################################################### ################ 5. Organize RW only Model estimates ################ ##################################################################### # create dataframe that would hold all RW values we need to estimate based only on RW data # AKA loop through each tree and create a row of info for each RW value we need to estimate X_data = data.frame(meas=numeric(0), stat_id=numeric(0), year=numeric(0)) n = 1 for (tree in 1:length(year_idx$stat_id)){ stat = year_idx$stat_id[tree] year = seq(year_idx$year_start[tree], year_idx$year_end[tree]) meas = seq(n, n+length(year)-1) n = n + length(year) X_data = rbind(X_data, data.frame(meas=meas, stat_id=rep(stat, length(year)), year=year)) } ######################################################################### ################ 6. Organize RW + Census Model estimates ################ ######################################################################### if (census_site){ # Need to adjust dates of census data census_long$year = as.numeric(census_long$year) - year_start + 1 # create dataframe that would hold all RW values we need to estimate based on both RW + Census data # AKA loop through all trees and create a row of info for each RW value we need to estimate X_data_C = data.frame(meas=numeric(0), stat_id=numeric(0), year=numeric(0)) n = 1 for (tree in 1:length(allTrees$stat_id)){ stat = allTrees$stat_id[tree] # check to see if available in RW data in.RW = ifelse(stat %in% year_idx$stat_id, TRUE, FALSE) # check to see if in census in.C = ifelse(stat %in% census_long$stat_id, TRUE, FALSE) # if in both, determine latest date with available data # if RW ends before the last census measurement, we need to enable smoothing # last year will be either: # (1) year before first census without that tree if it is not in all censuses or # (2) last year with recorded data if after last census if (in.RW & in.C){ # first year is always first year of available data (tends to be cutoff) firstyr = 1 # what was the last census this tree was in? lastCensus = max(which(which(years %in% census_years) %in% as.numeric(census_long$year[which(census_long$stat_id == stat)]))) lastData = year_idx$year_end[year_idx$stat_id == stat] # if RW ends after census, then we just use last RW year if ((which(years %in% census_years)[lastCensus]) < lastData){ lastyr = lastData # we will stochastically pick death year for trees in processing, always run up until first year of coring # which in this case will just be the last year of data }else{ # if in last census, use last year of data (deal with in processing) if(lastCensus == length(census_years)){ lastyr = length(years) # if not in last census, use year before first census where the tree is missing and mark for smoothing }else{ lastyr = which(years == (census_years[lastCensus+1] - 1)) census_long$finalCond[which(census_long$stat_id == stat)] = 'dead' } } } # if only in RW, use year_idx if (in.RW & !in.C){ firstyr = 1 lastyr = year_idx$year_end[year_idx$stat_id == stat] } # if only in census, last year is either: # (1) year before first census date without that tree if it is not in all censuses or # (2) final year if in last census (will use firstCond later to determine if smoothing needed) # this allows us to perform mortality smoothing in processing the model output if (!in.RW & in.C){ # first year is going to be the first year with any data (tends to be cutoff) firstyr = 1 # determine the last year with census data lastCensus = max(which(which(years %in% census_years) %in% unique(census_long$year[which(census_long$stat_id == stat)]))) # if tree was in most recent census if (lastCensus == length(census_years)){ lastyr = length(years) # otherwise, run it until the year before the next census and mark for smoothing }else{ lastyr = which(years == (census_years[lastCensus+1] - 1)) census_long$finalCond[which(census_long$stat_id == stat)] = 'dead' } } if (!in.RW & !in.C){ print(paste(tree, 'is missing from both datasets')) next } # add those estimates needed for this stat ID year = seq(firstyr, lastyr) meas = seq(n, n+length(year)-1) n = n + length(year) X_data_C = rbind(X_data_C, data.frame(meas=meas, stat_id=rep(stat, length(year)), year=year)) } } ###################################################################################### ################ 7. Save variables as needed for model and processing ################ ###################################################################################### # if there are stat IDs without data, we just need to make sure they are removed pdbh = pdbh %>% filter(stat_id %in% unique(X_data$stat_id)) # obtain required values for rw model and processing # Number of measurements N_Xobs = nrow(incr_data) # Number of years where we have RW data N_years = length(years) # Number of trees with RW data N_Tr = nrow(pdbh) # Number of values to estimate with RW only model N_X = nrow(X_data) if (census_site){ # Number of trees that will need to be estimated N_C = length(allTrees$stat_id) # if there are stat IDs without data, we just need to make sure they are removed allTrees = allTrees %>% filter(stat_id %in% unique(X_data_C$stat_id)) # Number of census DBH measurements N_Dobs = nrow(census_long) # Number of values to estimate with the RW + Census model N_X_C = length(X_data_C$meas) # Dataframe that lists first and last RW estimate index for each tree for RW only model idx_C = data.frame(stat_id = allTrees$stat_id) idx_C$firstidx = sapply(c(1:length(idx_C$stat_id)), function(ind){ val = min(which((X_data_C$stat_id == idx_C$stat_id[ind]))) if (val == -Inf){ return(NA) }else{ return(val) } }) idx_C$lastidx = sapply(c(1:length(idx_C$stat_id)), function(ind){ val = max(which((X_data_C$stat_id == idx_C$stat_id[ind]))) if (val == -Inf){ return(NA) }else{ return(val) } }) # Maps diameter values to their respective RW estimate index Dobs2X = sapply(c(1:length(census_long$site)), function(ind){ inds = which(((X_data_C$stat_id == census_long$stat_id[ind]) & (X_data_C$year == census_long$year[ind]))) return(inds) }) # Determine log of all census diameter measurements to fit with RW STAN model logDobs = log(census_long$dbh) # Maps RW diameter measurements to respective RW estimate index for RW + Census model Tr2X_C = sapply((1:length(pdbh$stat_id)), function(ind){ which((X_data_C$stat_id == pdbh$stat_id[ind]) & (X_data_C$year == pdbh$year[ind])) }) # Maps RW estimates to year index and stat_id for RW + Census model X2year_C = X_data_C$year X2C = X_data_C$stat_id # Maps observed values of RW to respective RW estimate index for RW + Census model Xobs2X_C = sapply((1:length(incr_data$id)), function(ind){ which((X_data_C$stat_id == incr_data$stat_id[ind]) & (X_data_C$year == incr_data$year[ind])) }) # Data frame that contains all census measurements/data Dobs = census_long } # Maps RW diameter measurements to respective RW estimate index for RW only model Tr2X = sapply((1:length(pdbh$stat_id)), function(ind){ which((X_data$stat_id == pdbh$stat_id[ind]) & (X_data$year == pdbh$year[ind])) }) # Maps RW estimates to year index and stat_ids for RW only model X2year = X_data$year X2Tr = X_data$stat_id # Data frame that lists first and last RW estimate index for each tree for RW only model idx_Tr = year_idx idx_Tr$firstidx = sapply(c(1:length(idx_Tr$stat_id)), function(ind){ min(which((X_data$stat_id == idx_Tr$stat_id[ind]))) }) idx_Tr$lastidx = sapply(c(1:length(idx_Tr$stat_id)), function(ind){ max(which((X_data$stat_id == idx_Tr$stat_id[ind]))) }) idx_Tr = idx_Tr %>% dplyr::select(stat_id, firstidx, lastidx) # Maps observed values of RW to respective RW estimate index for RW only model Xobs2X = sapply((1:length(incr_data$id)), function(ind){ which((X_data$stat_id == incr_data$stat_id[ind]) & (X_data$year == incr_data$year[ind])) }) # Determine log of all RW diameter measurements to fit with RW STAN model logTr = log(pdbh$dbh) # Determine log of all RW increment measurements to fit with RW STAN model Xobs = incr_data$incr Xobs[Xobs==0] = 0.0001 logXobs = log(Xobs) # Larger save matrices # All RW measurement data Xobs = incr_data # All RW diameter measurement data Tr = pdbh ##################################################### ################ 4. Save as RDS file ################ ##################################################### if (census_site){ saveRDS(list(N_Xobs = N_Xobs, N_years = N_years, N_X = N_X, N_X_C = N_X_C, N_Tr = N_Tr, N_Dobs = N_Dobs, N_C = N_C, Tr2X = Tr2X, Tr2X_C = Tr2X_C, X2C = X2C, X2Tr = X2Tr, X2year = X2year, X2year_C = X2year_C, idx_C = idx_C, idx_Tr = idx_Tr, Dobs2X = Dobs2X, Xobs2X = Xobs2X, Xobs2X_C = Xobs2X_C, logDobs = logDobs, logTr = logTr, logXobs = logXobs, Dobs = Dobs, Tr = Tr, Xobs = Xobs, allTrees = allTrees, years = years ), file=RDS_loc) }else{ saveRDS(list(N_Xobs = N_Xobs, N_years = N_years, N_X = N_X, N_Tr = N_Tr, Tr2X = Tr2X, X2Tr = X2Tr, X2year = X2year, idx_Tr = idx_Tr, Xobs2X = Xobs2X, logTr = logTr, logXobs = logXobs, Xobs = Xobs, Tr = Tr, years = years ), file=RDS_loc) } ############################################### ################ 5. Check data ################ ############################################### # Organize diameter estimates trees = unique(incr_data$stat_id) D0s = rep(NA,length(trees)) D = rep(NA, length(incr_data$id)) for (t in seq_along(trees)){ stat = trees[t] yrs = rev(seq(1,max((incr_data %>% filter(stat_id == stat))$year))) Dlast = Tr$dbh[which(Tr$stat_id == stat)] D[which((incr_data$stat_id == stat) & (incr_data$year == yrs[1]))] = Dlast Dnow = Dlast incr_now = min(incr_data$incr[which((incr_data$stat_id == stat) & (incr_data$year == yrs[1]))]) # loop through years and calculate approximate diameter value for (y in 2:length(yrs)){ yrnow = yrs[y] Dnow = Dnow - (2 * incr_now / 10) # if year has increment data we need to save it if (length(which((incr_data$stat_id == stat) & (incr_data$year == yrnow))) > 0){ D[which((incr_data$stat_id == stat) & (incr_data$year == yrnow))] = Dnow incr_now = min(incr_data$incr[which((incr_data$stat_id == stat) & (incr_data$year == yrnow))]) }else{ incr_now = median(incr_data$incr) } } # get approximate D0 value for tree D0s[t] = Dnow - (2 * incr_now / 10) } incr_data$D = D D0s = data.frame(D0 = D0s, stat_id = trees) # check range of predicted D0s if (min(D0s$D0) < -30 | max(D0s$D0) > 80) print('warning: estimated D0 value outside of model range') # PLOT 1: Look at values of D0 to see if diameter prior range is reasonable pl1 = ggplot(D0s) + geom_histogram(aes(x = D0), binwidth = 1) + labs(x = 'Estimated D0 value', title = paste0('Histogram of D0 Values (', min(D0s$D0), ', ', max(D0s$D0), ')')) + xlim(-40,100) + geom_vline(xintercept = 80, color = 'red') + geom_vline(xintercept = -30, color = 'red') ggsave(pl1, filename = file.path(site_dir,'runs',paste0(mvers,'_',dvers),'figures','D0_histogram.jpg')) # PLOT 2: Check to make sure growth makes sense pl2 = ggplot(incr_data) + geom_line(aes(x = year, y = D, group = stat_id, color = stat_id)) + facet_wrap(~as.factor(taxon)) + labs(y = 'diameter (cm)', title = 'Species Growth over Time') + theme(legend.position = 'none') ggsave(pl2, filename = file.path(site_dir,'runs',paste0(mvers,'_',dvers),'figures','species_growth_check.jpg')) # Now, we need to check the D0 assumption for the census data if (census_site){ # Then, we are going to determine what the D0 value would be in 1960 for each of the trees # based on their most recent census measurement medInc = median(incr_data$incr) # Let's just consider those trees without RW data because we already checked the other trees census_only = allTrees$stat_id[which(!(allTrees$stat_id %in% unique(incr_data$stat_id)))] D0s_census = rep(NA, length(census_only)) for (t in seq_along(census_only)){ stat = census_only[t] datanow = census_long %>% filter(stat_id == stat) firstyr = min((X_data_C %>% filter(stat_id == stat))$year) if (length(datanow$site) < 1) next # Find earliest census measurement C1 = datanow$dbh[which.min(datanow$year)] C1year = min(as.numeric(datanow$year)) # Approximate a D0 value in first year based on average increment nyears = C1year - firstyr D0s_census[t] = C1 - (2 * nyears * medInc / 10) } D0s_census = data.frame(D0 = D0s_census, stat_id = census_only) # check range of predicted D0s if (min(D0s_census$D0) < -30 | max(D0s_census$D0) > 80) print('warning: estimated D0 value outside of model range for census') pl3 = ggplot(D0s_census) + geom_histogram(aes(x = D0), binwidth = 1) + labs(x = 'Estimated D0 value', title = paste0('Histogram of D0 Values - Census (', min(D0s_census$D0), ', ', max(D0s_census$D0), ')')) + xlim(-40,100) + geom_vline(xintercept = 80, color = 'red') + geom_vline(xintercept = -30, color = 'red') ggsave(pl3, filename = file.path(site_dir,'runs',paste0(mvers,'_',dvers),'figures','D0_histogram_census.jpg')) } }
/R/build_data.R
no_license
PalEON-Project/RW-2-BIO
R
false
false
24,195
r
## Step 1: Build and format ring width data ## This script takes the ring width files (rwl format) and the stand-level inputs from the year the trees were cored and formats the data ## to run in the STAN ring width model. In getting to the correct format, this script also matches cores with tree species, ## DBH, and location within the plot. The CSV file should be in the same format as that of the sample CSV file available at (INSERT REPO URL). build_data <- function(site, dvers, mvers, prefix, census_site, cutoff = 1900){ # Prepare workspace # Had to comment these out for submitting on ND campus cluster (stupid issues with loading packages) #library(plotrix) #library(dplR) #library(fields) #library(reshape2) #library(dplyr) #library(plyr) #library(ggplot2) # Create save folders for data site_dir <- file.path('sites',site) if (!file.exists(file.path(site_dir,'runs'))) dir.create(file.path(site_dir,'runs')) if (!file.exists(file.path(site_dir,'runs',paste0(mvers,'_',dvers)))) dir.create(file.path(site_dir,'runs',paste0(mvers,'_',dvers))) if (!file.exists(file.path(site_dir,'runs',paste0(mvers,'_',dvers),'output'))) dir.create(file.path(site_dir,'runs',paste0(mvers,'_',dvers),'output')) if (!file.exists(file.path(site_dir,'runs',paste0(mvers,'_',dvers),'input'))) dir.create(file.path(site_dir,'runs',paste0(mvers,'_',dvers),'input')) if (!file.exists(file.path(site_dir,'runs',paste0(mvers,'_',dvers),'figures'))) dir.create(file.path(site_dir,'runs',paste0(mvers,'_',dvers),'figures')) rwl_dir = file.path('sites',site,'data','raw','rwl') meta_loc = file.path('sites',site,'data','raw',paste0(site,'_treeMeta_',dvers,'.csv')) RDS_loc = file.path(site_dir,'runs',paste0(mvers,'_',dvers),'input',paste0('tree_data_', site ,'_STAN_',mvers,'_', dvers, '.RDS')) if (census_site){ census_loc = file.path('sites',site,'data','raw',paste0(site,'_census_',dvers,'.csv')) } ################################################# ################ 1. Extract data ################ ################################################# # Load CSV with tree data treeMeta = read.csv(meta_loc, stringsAsFactors = FALSE) len = nchar(treeMeta$ID[1]) # List of files with tree ring increments, extract only RWL files rwFiles <- list.files(rwl_dir) rwFiles <- rwFiles[grep(".rwl$", rwFiles)] rwData <- list() for(fn in rwFiles) { id <- gsub(".rw", "", fn) # Insert the contents of each file into the rwData list rwData[[id]] <- t((read.tucson(file.path(rwl_dir, fn)))) # rows are tree, cols are times } # Bind into one dataframe and insert core IDs into the matrix # Dimensions: core x year incr = ldply(rwData, rbind) incr = incr[,c(".id", sort(colnames(incr)[2:ncol(incr)]))] rownames(incr) = as.vector(unlist(lapply(rwData, rownames))) incr[,1] = rownames(incr) # Create incr_data data frame incr_data = melt(incr) colnames(incr_data) = c('id', 'year', 'incr') incr_data$year = as.vector(incr_data$year) # Remove NA values right away to quicken processing (only keep the series for which trees were alive/existed) # Also remove all increment values for before the determined cut off year, which has a default value of 1900 # Also remove all data that comes after the first year of full data (some sites have multiple years of coring) incr_data = incr_data %>% mutate(year = as.numeric(year)) %>% filter(!is.na(incr), year >= cutoff) # Assign core ID to each core incr_data$id = substr(incr_data$id, 1, len) # Assign plot to each core in data frame (if only one plot, all 1) if (!('site' %in% colnames(treeMeta))){ incr_data$plot = rep(1, nrow(incr_data)) }else{ incr_data$plot = sapply(1:length(incr_data$id), function(ind){ choose = treeMeta$site[which(treeMeta$ID == incr_data$id[ind])] return(as.numeric(substr(choose, nchar(prefix)+1, nchar(choose)))) }) } # Assign stat IDs treeMeta = treeMeta %>% filter(ID %in% unique(incr_data$id)) treeMeta$stat_id = seq(1,nrow(treeMeta)) incr_data$stat_id = as.numeric(plyr::mapvalues(incr_data$id, from = treeMeta$ID, to = treeMeta$stat_id, warn_missing = FALSE)) ######################################################### ################ 2. Organize census data ################ ######################################################### # Load and organize census data if census available if (census_site){ census = read.csv(census_loc, stringsAsFactors = FALSE) %>% filter(species %in% unique(treeMeta$species)) census_long = melt(census, id=c('site', 'ID', 'species', 'distance', 'finalCond')) colnames(census_long) = c('site', 'ID', 'species', 'distance','finalCond','year', 'dbh') # remove NA years and reformat census year column census_long = census_long %>% filter(!is.na(dbh)) %>% mutate(year = substr(year, 2, 3)) %>% mutate(year = ifelse(as.numeric(year) < 30, paste0('20',year), paste0('19',year))) # get census years census_years = as.numeric(sort(unique(census_long$year))) # create a list of all trees that are present in available data allTrees = full_join(census, treeMeta, by = c('ID')) %>% mutate(taxon = ifelse(is.na(species.x), species.y, species.x), distance = ifelse(is.na(distance.y), distance.x, distance.y), plot = as.numeric(substr(ID,nchar(prefix)+1,nchar(prefix)+1))) %>% select(ID, stat_id, taxon, distance, plot) allTrees$stat_id[which(is.na(allTrees$stat_id))] = seq(nrow(treeMeta)+1, nrow(treeMeta)+length(which(is.na(allTrees$stat_id)))) census_long$stat_id = as.numeric(plyr::mapvalues(census_long$ID, from = allTrees$ID, to = allTrees$stat_id, warn_missing = FALSE)) } ############################################################ ################ 3. Organize increment data ################ ############################################################ # Assign species to each observation incr_data$taxon = sapply(c(1:length(incr_data$id)), function(ind){ treeMeta$species[which(treeMeta$ID == incr_data$id[ind])] }) # Find year range for data year_end = max(as.numeric(incr_data$year), na.rm=TRUE) year_start = min(as.numeric(incr_data$year), na.rm=TRUE) years = seq(year_start, year_end) # Make units of time in "years since first recorded tree ring" incr_data$year = as.numeric(incr_data$year) - year_start + 1 # Order by tree and year incr_data = incr_data %>% arrange(stat_id, year) # Data frame of the first and last year of observation for each tree # First year for all years is the first year of data (which will tend to be the cutoff year) year_idx = data.frame(stat_id = as.numeric(aggregate(year~stat_id, data=incr_data, FUN=min, na.rm=TRUE)[,1]), year_end=as.numeric(aggregate(year~stat_id, incr_data, max)[,2])) year_idx$year_start = rep(1, length(year_idx$stat_id)) ######################################################### ################ 4. Organize RW DBH data ################ ######################################################### # create dataframe that gives year index corresponding to RW DBH measurement pdbh = aggregate(year~stat_id+plot+id, incr_data, max, na.rm=TRUE) %>% arrange(stat_id) pdbh$dbh = as.numeric(plyr::mapvalues(pdbh$id, from = treeMeta$ID, to = treeMeta$dbh, warn_missing = FALSE)) pdbh$distance = as.numeric(plyr::mapvalues(pdbh$id, from = treeMeta$ID, to = treeMeta$distance, warn_missing = FALSE)) pdbh$taxon = plyr::mapvalues(pdbh$id, from = treeMeta$ID, to = treeMeta$species, warn_missing = FALSE) ##################################################################### ################ 5. Organize RW only Model estimates ################ ##################################################################### # create dataframe that would hold all RW values we need to estimate based only on RW data # AKA loop through each tree and create a row of info for each RW value we need to estimate X_data = data.frame(meas=numeric(0), stat_id=numeric(0), year=numeric(0)) n = 1 for (tree in 1:length(year_idx$stat_id)){ stat = year_idx$stat_id[tree] year = seq(year_idx$year_start[tree], year_idx$year_end[tree]) meas = seq(n, n+length(year)-1) n = n + length(year) X_data = rbind(X_data, data.frame(meas=meas, stat_id=rep(stat, length(year)), year=year)) } ######################################################################### ################ 6. Organize RW + Census Model estimates ################ ######################################################################### if (census_site){ # Need to adjust dates of census data census_long$year = as.numeric(census_long$year) - year_start + 1 # create dataframe that would hold all RW values we need to estimate based on both RW + Census data # AKA loop through all trees and create a row of info for each RW value we need to estimate X_data_C = data.frame(meas=numeric(0), stat_id=numeric(0), year=numeric(0)) n = 1 for (tree in 1:length(allTrees$stat_id)){ stat = allTrees$stat_id[tree] # check to see if available in RW data in.RW = ifelse(stat %in% year_idx$stat_id, TRUE, FALSE) # check to see if in census in.C = ifelse(stat %in% census_long$stat_id, TRUE, FALSE) # if in both, determine latest date with available data # if RW ends before the last census measurement, we need to enable smoothing # last year will be either: # (1) year before first census without that tree if it is not in all censuses or # (2) last year with recorded data if after last census if (in.RW & in.C){ # first year is always first year of available data (tends to be cutoff) firstyr = 1 # what was the last census this tree was in? lastCensus = max(which(which(years %in% census_years) %in% as.numeric(census_long$year[which(census_long$stat_id == stat)]))) lastData = year_idx$year_end[year_idx$stat_id == stat] # if RW ends after census, then we just use last RW year if ((which(years %in% census_years)[lastCensus]) < lastData){ lastyr = lastData # we will stochastically pick death year for trees in processing, always run up until first year of coring # which in this case will just be the last year of data }else{ # if in last census, use last year of data (deal with in processing) if(lastCensus == length(census_years)){ lastyr = length(years) # if not in last census, use year before first census where the tree is missing and mark for smoothing }else{ lastyr = which(years == (census_years[lastCensus+1] - 1)) census_long$finalCond[which(census_long$stat_id == stat)] = 'dead' } } } # if only in RW, use year_idx if (in.RW & !in.C){ firstyr = 1 lastyr = year_idx$year_end[year_idx$stat_id == stat] } # if only in census, last year is either: # (1) year before first census date without that tree if it is not in all censuses or # (2) final year if in last census (will use firstCond later to determine if smoothing needed) # this allows us to perform mortality smoothing in processing the model output if (!in.RW & in.C){ # first year is going to be the first year with any data (tends to be cutoff) firstyr = 1 # determine the last year with census data lastCensus = max(which(which(years %in% census_years) %in% unique(census_long$year[which(census_long$stat_id == stat)]))) # if tree was in most recent census if (lastCensus == length(census_years)){ lastyr = length(years) # otherwise, run it until the year before the next census and mark for smoothing }else{ lastyr = which(years == (census_years[lastCensus+1] - 1)) census_long$finalCond[which(census_long$stat_id == stat)] = 'dead' } } if (!in.RW & !in.C){ print(paste(tree, 'is missing from both datasets')) next } # add those estimates needed for this stat ID year = seq(firstyr, lastyr) meas = seq(n, n+length(year)-1) n = n + length(year) X_data_C = rbind(X_data_C, data.frame(meas=meas, stat_id=rep(stat, length(year)), year=year)) } } ###################################################################################### ################ 7. Save variables as needed for model and processing ################ ###################################################################################### # if there are stat IDs without data, we just need to make sure they are removed pdbh = pdbh %>% filter(stat_id %in% unique(X_data$stat_id)) # obtain required values for rw model and processing # Number of measurements N_Xobs = nrow(incr_data) # Number of years where we have RW data N_years = length(years) # Number of trees with RW data N_Tr = nrow(pdbh) # Number of values to estimate with RW only model N_X = nrow(X_data) if (census_site){ # Number of trees that will need to be estimated N_C = length(allTrees$stat_id) # if there are stat IDs without data, we just need to make sure they are removed allTrees = allTrees %>% filter(stat_id %in% unique(X_data_C$stat_id)) # Number of census DBH measurements N_Dobs = nrow(census_long) # Number of values to estimate with the RW + Census model N_X_C = length(X_data_C$meas) # Dataframe that lists first and last RW estimate index for each tree for RW only model idx_C = data.frame(stat_id = allTrees$stat_id) idx_C$firstidx = sapply(c(1:length(idx_C$stat_id)), function(ind){ val = min(which((X_data_C$stat_id == idx_C$stat_id[ind]))) if (val == -Inf){ return(NA) }else{ return(val) } }) idx_C$lastidx = sapply(c(1:length(idx_C$stat_id)), function(ind){ val = max(which((X_data_C$stat_id == idx_C$stat_id[ind]))) if (val == -Inf){ return(NA) }else{ return(val) } }) # Maps diameter values to their respective RW estimate index Dobs2X = sapply(c(1:length(census_long$site)), function(ind){ inds = which(((X_data_C$stat_id == census_long$stat_id[ind]) & (X_data_C$year == census_long$year[ind]))) return(inds) }) # Determine log of all census diameter measurements to fit with RW STAN model logDobs = log(census_long$dbh) # Maps RW diameter measurements to respective RW estimate index for RW + Census model Tr2X_C = sapply((1:length(pdbh$stat_id)), function(ind){ which((X_data_C$stat_id == pdbh$stat_id[ind]) & (X_data_C$year == pdbh$year[ind])) }) # Maps RW estimates to year index and stat_id for RW + Census model X2year_C = X_data_C$year X2C = X_data_C$stat_id # Maps observed values of RW to respective RW estimate index for RW + Census model Xobs2X_C = sapply((1:length(incr_data$id)), function(ind){ which((X_data_C$stat_id == incr_data$stat_id[ind]) & (X_data_C$year == incr_data$year[ind])) }) # Data frame that contains all census measurements/data Dobs = census_long } # Maps RW diameter measurements to respective RW estimate index for RW only model Tr2X = sapply((1:length(pdbh$stat_id)), function(ind){ which((X_data$stat_id == pdbh$stat_id[ind]) & (X_data$year == pdbh$year[ind])) }) # Maps RW estimates to year index and stat_ids for RW only model X2year = X_data$year X2Tr = X_data$stat_id # Data frame that lists first and last RW estimate index for each tree for RW only model idx_Tr = year_idx idx_Tr$firstidx = sapply(c(1:length(idx_Tr$stat_id)), function(ind){ min(which((X_data$stat_id == idx_Tr$stat_id[ind]))) }) idx_Tr$lastidx = sapply(c(1:length(idx_Tr$stat_id)), function(ind){ max(which((X_data$stat_id == idx_Tr$stat_id[ind]))) }) idx_Tr = idx_Tr %>% dplyr::select(stat_id, firstidx, lastidx) # Maps observed values of RW to respective RW estimate index for RW only model Xobs2X = sapply((1:length(incr_data$id)), function(ind){ which((X_data$stat_id == incr_data$stat_id[ind]) & (X_data$year == incr_data$year[ind])) }) # Determine log of all RW diameter measurements to fit with RW STAN model logTr = log(pdbh$dbh) # Determine log of all RW increment measurements to fit with RW STAN model Xobs = incr_data$incr Xobs[Xobs==0] = 0.0001 logXobs = log(Xobs) # Larger save matrices # All RW measurement data Xobs = incr_data # All RW diameter measurement data Tr = pdbh ##################################################### ################ 4. Save as RDS file ################ ##################################################### if (census_site){ saveRDS(list(N_Xobs = N_Xobs, N_years = N_years, N_X = N_X, N_X_C = N_X_C, N_Tr = N_Tr, N_Dobs = N_Dobs, N_C = N_C, Tr2X = Tr2X, Tr2X_C = Tr2X_C, X2C = X2C, X2Tr = X2Tr, X2year = X2year, X2year_C = X2year_C, idx_C = idx_C, idx_Tr = idx_Tr, Dobs2X = Dobs2X, Xobs2X = Xobs2X, Xobs2X_C = Xobs2X_C, logDobs = logDobs, logTr = logTr, logXobs = logXobs, Dobs = Dobs, Tr = Tr, Xobs = Xobs, allTrees = allTrees, years = years ), file=RDS_loc) }else{ saveRDS(list(N_Xobs = N_Xobs, N_years = N_years, N_X = N_X, N_Tr = N_Tr, Tr2X = Tr2X, X2Tr = X2Tr, X2year = X2year, idx_Tr = idx_Tr, Xobs2X = Xobs2X, logTr = logTr, logXobs = logXobs, Xobs = Xobs, Tr = Tr, years = years ), file=RDS_loc) } ############################################### ################ 5. Check data ################ ############################################### # Organize diameter estimates trees = unique(incr_data$stat_id) D0s = rep(NA,length(trees)) D = rep(NA, length(incr_data$id)) for (t in seq_along(trees)){ stat = trees[t] yrs = rev(seq(1,max((incr_data %>% filter(stat_id == stat))$year))) Dlast = Tr$dbh[which(Tr$stat_id == stat)] D[which((incr_data$stat_id == stat) & (incr_data$year == yrs[1]))] = Dlast Dnow = Dlast incr_now = min(incr_data$incr[which((incr_data$stat_id == stat) & (incr_data$year == yrs[1]))]) # loop through years and calculate approximate diameter value for (y in 2:length(yrs)){ yrnow = yrs[y] Dnow = Dnow - (2 * incr_now / 10) # if year has increment data we need to save it if (length(which((incr_data$stat_id == stat) & (incr_data$year == yrnow))) > 0){ D[which((incr_data$stat_id == stat) & (incr_data$year == yrnow))] = Dnow incr_now = min(incr_data$incr[which((incr_data$stat_id == stat) & (incr_data$year == yrnow))]) }else{ incr_now = median(incr_data$incr) } } # get approximate D0 value for tree D0s[t] = Dnow - (2 * incr_now / 10) } incr_data$D = D D0s = data.frame(D0 = D0s, stat_id = trees) # check range of predicted D0s if (min(D0s$D0) < -30 | max(D0s$D0) > 80) print('warning: estimated D0 value outside of model range') # PLOT 1: Look at values of D0 to see if diameter prior range is reasonable pl1 = ggplot(D0s) + geom_histogram(aes(x = D0), binwidth = 1) + labs(x = 'Estimated D0 value', title = paste0('Histogram of D0 Values (', min(D0s$D0), ', ', max(D0s$D0), ')')) + xlim(-40,100) + geom_vline(xintercept = 80, color = 'red') + geom_vline(xintercept = -30, color = 'red') ggsave(pl1, filename = file.path(site_dir,'runs',paste0(mvers,'_',dvers),'figures','D0_histogram.jpg')) # PLOT 2: Check to make sure growth makes sense pl2 = ggplot(incr_data) + geom_line(aes(x = year, y = D, group = stat_id, color = stat_id)) + facet_wrap(~as.factor(taxon)) + labs(y = 'diameter (cm)', title = 'Species Growth over Time') + theme(legend.position = 'none') ggsave(pl2, filename = file.path(site_dir,'runs',paste0(mvers,'_',dvers),'figures','species_growth_check.jpg')) # Now, we need to check the D0 assumption for the census data if (census_site){ # Then, we are going to determine what the D0 value would be in 1960 for each of the trees # based on their most recent census measurement medInc = median(incr_data$incr) # Let's just consider those trees without RW data because we already checked the other trees census_only = allTrees$stat_id[which(!(allTrees$stat_id %in% unique(incr_data$stat_id)))] D0s_census = rep(NA, length(census_only)) for (t in seq_along(census_only)){ stat = census_only[t] datanow = census_long %>% filter(stat_id == stat) firstyr = min((X_data_C %>% filter(stat_id == stat))$year) if (length(datanow$site) < 1) next # Find earliest census measurement C1 = datanow$dbh[which.min(datanow$year)] C1year = min(as.numeric(datanow$year)) # Approximate a D0 value in first year based on average increment nyears = C1year - firstyr D0s_census[t] = C1 - (2 * nyears * medInc / 10) } D0s_census = data.frame(D0 = D0s_census, stat_id = census_only) # check range of predicted D0s if (min(D0s_census$D0) < -30 | max(D0s_census$D0) > 80) print('warning: estimated D0 value outside of model range for census') pl3 = ggplot(D0s_census) + geom_histogram(aes(x = D0), binwidth = 1) + labs(x = 'Estimated D0 value', title = paste0('Histogram of D0 Values - Census (', min(D0s_census$D0), ', ', max(D0s_census$D0), ')')) + xlim(-40,100) + geom_vline(xintercept = 80, color = 'red') + geom_vline(xintercept = -30, color = 'red') ggsave(pl3, filename = file.path(site_dir,'runs',paste0(mvers,'_',dvers),'figures','D0_histogram_census.jpg')) } }
# Math 189, Winter 16, Final Project # Sherry Diep, sadiep@ucsd.edu # Sandra Hui, s3hui@ucsd.edu # David Lee, dal002@ucsd.edu # Irving Valles, ivalles@ucsd.edu # Albert Xu, a8xu@uscd.edu # Mark Yee, mjyee@ucsd.edu setwd("Data") library(reshape2) library(party) library(caret) library(randomForest) ######################## # set up input data sets ######################## # read in training data and clean it up event_type <- read.csv("event_type.csv", header=T, stringsAsFactors=F) event_type$event_type <- as.numeric(gsub("event_type ", "", event_type$event_type)) event_type <- event_type[order(event_type$id),] event_type <- dcast(event_type, id ~ as.character(event_type)) colnames(event_type) <- c("id", paste("event_type_", colnames(event_type)[2:length(colnames(event_type))], sep="")) log_feature <- read.csv("log_feature.csv", header=T, stringsAsFactors=F) log_feature$log_feature<- as.numeric(gsub("feature ", "", log_feature$log_feature)) log_feature <- log_feature[order(log_feature$id),] log_feature <- dcast(log_feature, id ~ as.character(log_feature) + as.character(volume)) # count (volume field + log_feature) field as one feature colnames(log_feature) <- c("id", paste("log_feature_", colnames(log_feature)[2:length(colnames(log_feature))], sep="")) resource_type <- read.csv("resource_type.csv", header=T, stringsAsFactors=F) resource_type$resource_type <- as.numeric(gsub("resource_type ", "", resource_type$resource_type)) resource_type <- resource_type[order(resource_type$id),] resource_type <- dcast(resource_type, id ~ as.character(resource_type)) colnames(resource_type) <- c("id", paste("resource_type_", colnames(resource_type)[2:length(colnames(resource_type))], sep="")) severity_type <- read.csv("severity_type.csv", header=T, stringsAsFactors=F) severity_type$severity_type <- as.numeric(gsub("severity_type ", "", severity_type$severity_type)) severity_type <- severity_type[order(severity_type$id),] severity_type <- dcast(severity_type, id ~ as.character(severity_type)) colnames(severity_type) <- c("id", paste("severity_type_", colnames(severity_type)[2:length(colnames(severity_type))], sep="")) train <- read.csv("train.csv", header=T, stringsAsFactors=F) train$location <- as.numeric(gsub("location ", "", train$location)) train <- train[order(train$id),] # merge by id mergedCols <- merge(merge(merge(event_type, log_feature, by="id", all=T), resource_type, by="id", all=T), severity_type, by="id", all=T) mergedTraining <- merge(train, mergedCols, id="id", all.x=T) # remove cols that are all NA mergedTraining <- mergedTraining[,colSums(is.na(mergedTraining)) < nrow(mergedTraining)] # set all present values to 1, missing values to 0 mergedTraining[,4:ncol(mergedTraining)][!is.na(mergedTraining[,4:ncol(mergedTraining)])] <- 1 mergedTraining[,4:ncol(mergedTraining)][is.na(mergedTraining[,4:ncol(mergedTraining)])] <- 0 write.table(mergedTraining, file="merged_training.csv", sep=",", row.names=F, col.names=T, quote=F) ####################################### # predict fault severity from test data ####################################### # read in test data, and merge it with the data from the other files test <- read.csv("test.csv", header=T, stringsAsFactors=F) test$location <- as.numeric(gsub("location ", "", test$location)) mergedTest <- merge(test, mergedCols, id="id", all.x=T) # remove cols that are only NAs mergedTest <- mergedTest[,colSums(is.na(mergedTest)) < nrow(mergedTest)] # set all present values to 1, missing values to 0 mergedTest[,4:ncol(mergedTest)][!is.na(mergedTest[,4:ncol(mergedTest)])] <- 1 mergedTest[,4:ncol(mergedTest)][is.na(mergedTest[,4:ncol(mergedTest)])] <- 0 # only keep columns that are in the training set and the test set commonCols <- colnames(mergedTraining)[colnames(mergedTraining) %in% colnames(mergedTest)] mergedTraining <- mergedTraining[,c("fault_severity", commonCols)] mergedTest <- mergedTest[,commonCols] # the fault_severity depends on every variable except fault_severity, id, and location frmla <- as.formula(paste("as.factor(fault_severity) ~ ", paste(colnames(mergedTraining)[4:ncol(mergedTraining)], collapse="+"), sep="")) # get an example of one tree using all variables ct <- ctree(frmla, data=mergedTraining, controls=cforest_control(mincriterion=0, mtry=NULL)) # takes ~10 mins png("../exampleTree.png", width=3600, height=1000, units="px") plot(ct) dev.off() # use a forest of trees to make the predictions # mincriterion=0 forces large trees to be grown # mtry=NULL forces bagging cf <- cforest(frmla, data=mergedTraining, controls=cforest_control(mincriterion=0, mtry=NULL)) mergedTest$pred_fault_severity <- predict(cf, newdata=mergedTest) # takes ~12 mins write.table(mergedTest, file="results.csv", sep=",", quote=F, col.names=T, row.names=F) ############### # check results ############### # check results with out of bag error oobError <- caret:::cforestStats(cf) # takes ~5 mins print(oobError) # Accuracy Kappa # 0.6963826 0.3436285 # check against another randomForest package randForRes <- randomForest(frmla, data=mergedTraining) # takes 2.5 hrs randForRes_pred <- predict(randForRes, newdata=mergedTest) summary(randForRes_pred) # 0 1 2 NA's # 1 2 0 11168 # sanity check: compare fault severity fractions between training and test datasets comp <- NULL comp$test <- summary(mergedTest$pred_fault_severity) comp$train <- summary(as.factor(train$fault_severity)) comp <- as.data.frame(comp) comp$level <- rownames(comp) comp_frac <- NULL comp_frac$test <- comp$test / sum(comp$test) comp_frac$train <- comp$train / sum(comp$train) comp_frac$level <- comp$level comp_frac <- as.data.frame(comp_frac) comp_frac_melt <- melt(comp_frac) comp_frac_melt$variable <- c("Test", "Test", "Test", "Train", "Train", "Train") p_comp_frac <- ggplot(comp_frac_melt, aes(x=level, y=value, fill=variable)) + geom_bar(stat="identity", position="dodge") + theme_bw() + labs(x="Fault Severity", y="Fraction of Cases", fill="Data set", title="Fault Severity Between Data Sets") ggsave("p_comp_frac.png")
/final/Final-Report-Hui-Sandra.R
no_license
albertxu1994/math189_winter2016
R
false
false
6,062
r
# Math 189, Winter 16, Final Project # Sherry Diep, sadiep@ucsd.edu # Sandra Hui, s3hui@ucsd.edu # David Lee, dal002@ucsd.edu # Irving Valles, ivalles@ucsd.edu # Albert Xu, a8xu@uscd.edu # Mark Yee, mjyee@ucsd.edu setwd("Data") library(reshape2) library(party) library(caret) library(randomForest) ######################## # set up input data sets ######################## # read in training data and clean it up event_type <- read.csv("event_type.csv", header=T, stringsAsFactors=F) event_type$event_type <- as.numeric(gsub("event_type ", "", event_type$event_type)) event_type <- event_type[order(event_type$id),] event_type <- dcast(event_type, id ~ as.character(event_type)) colnames(event_type) <- c("id", paste("event_type_", colnames(event_type)[2:length(colnames(event_type))], sep="")) log_feature <- read.csv("log_feature.csv", header=T, stringsAsFactors=F) log_feature$log_feature<- as.numeric(gsub("feature ", "", log_feature$log_feature)) log_feature <- log_feature[order(log_feature$id),] log_feature <- dcast(log_feature, id ~ as.character(log_feature) + as.character(volume)) # count (volume field + log_feature) field as one feature colnames(log_feature) <- c("id", paste("log_feature_", colnames(log_feature)[2:length(colnames(log_feature))], sep="")) resource_type <- read.csv("resource_type.csv", header=T, stringsAsFactors=F) resource_type$resource_type <- as.numeric(gsub("resource_type ", "", resource_type$resource_type)) resource_type <- resource_type[order(resource_type$id),] resource_type <- dcast(resource_type, id ~ as.character(resource_type)) colnames(resource_type) <- c("id", paste("resource_type_", colnames(resource_type)[2:length(colnames(resource_type))], sep="")) severity_type <- read.csv("severity_type.csv", header=T, stringsAsFactors=F) severity_type$severity_type <- as.numeric(gsub("severity_type ", "", severity_type$severity_type)) severity_type <- severity_type[order(severity_type$id),] severity_type <- dcast(severity_type, id ~ as.character(severity_type)) colnames(severity_type) <- c("id", paste("severity_type_", colnames(severity_type)[2:length(colnames(severity_type))], sep="")) train <- read.csv("train.csv", header=T, stringsAsFactors=F) train$location <- as.numeric(gsub("location ", "", train$location)) train <- train[order(train$id),] # merge by id mergedCols <- merge(merge(merge(event_type, log_feature, by="id", all=T), resource_type, by="id", all=T), severity_type, by="id", all=T) mergedTraining <- merge(train, mergedCols, id="id", all.x=T) # remove cols that are all NA mergedTraining <- mergedTraining[,colSums(is.na(mergedTraining)) < nrow(mergedTraining)] # set all present values to 1, missing values to 0 mergedTraining[,4:ncol(mergedTraining)][!is.na(mergedTraining[,4:ncol(mergedTraining)])] <- 1 mergedTraining[,4:ncol(mergedTraining)][is.na(mergedTraining[,4:ncol(mergedTraining)])] <- 0 write.table(mergedTraining, file="merged_training.csv", sep=",", row.names=F, col.names=T, quote=F) ####################################### # predict fault severity from test data ####################################### # read in test data, and merge it with the data from the other files test <- read.csv("test.csv", header=T, stringsAsFactors=F) test$location <- as.numeric(gsub("location ", "", test$location)) mergedTest <- merge(test, mergedCols, id="id", all.x=T) # remove cols that are only NAs mergedTest <- mergedTest[,colSums(is.na(mergedTest)) < nrow(mergedTest)] # set all present values to 1, missing values to 0 mergedTest[,4:ncol(mergedTest)][!is.na(mergedTest[,4:ncol(mergedTest)])] <- 1 mergedTest[,4:ncol(mergedTest)][is.na(mergedTest[,4:ncol(mergedTest)])] <- 0 # only keep columns that are in the training set and the test set commonCols <- colnames(mergedTraining)[colnames(mergedTraining) %in% colnames(mergedTest)] mergedTraining <- mergedTraining[,c("fault_severity", commonCols)] mergedTest <- mergedTest[,commonCols] # the fault_severity depends on every variable except fault_severity, id, and location frmla <- as.formula(paste("as.factor(fault_severity) ~ ", paste(colnames(mergedTraining)[4:ncol(mergedTraining)], collapse="+"), sep="")) # get an example of one tree using all variables ct <- ctree(frmla, data=mergedTraining, controls=cforest_control(mincriterion=0, mtry=NULL)) # takes ~10 mins png("../exampleTree.png", width=3600, height=1000, units="px") plot(ct) dev.off() # use a forest of trees to make the predictions # mincriterion=0 forces large trees to be grown # mtry=NULL forces bagging cf <- cforest(frmla, data=mergedTraining, controls=cforest_control(mincriterion=0, mtry=NULL)) mergedTest$pred_fault_severity <- predict(cf, newdata=mergedTest) # takes ~12 mins write.table(mergedTest, file="results.csv", sep=",", quote=F, col.names=T, row.names=F) ############### # check results ############### # check results with out of bag error oobError <- caret:::cforestStats(cf) # takes ~5 mins print(oobError) # Accuracy Kappa # 0.6963826 0.3436285 # check against another randomForest package randForRes <- randomForest(frmla, data=mergedTraining) # takes 2.5 hrs randForRes_pred <- predict(randForRes, newdata=mergedTest) summary(randForRes_pred) # 0 1 2 NA's # 1 2 0 11168 # sanity check: compare fault severity fractions between training and test datasets comp <- NULL comp$test <- summary(mergedTest$pred_fault_severity) comp$train <- summary(as.factor(train$fault_severity)) comp <- as.data.frame(comp) comp$level <- rownames(comp) comp_frac <- NULL comp_frac$test <- comp$test / sum(comp$test) comp_frac$train <- comp$train / sum(comp$train) comp_frac$level <- comp$level comp_frac <- as.data.frame(comp_frac) comp_frac_melt <- melt(comp_frac) comp_frac_melt$variable <- c("Test", "Test", "Test", "Train", "Train", "Train") p_comp_frac <- ggplot(comp_frac_melt, aes(x=level, y=value, fill=variable)) + geom_bar(stat="identity", position="dodge") + theme_bw() + labs(x="Fault Severity", y="Fraction of Cases", fill="Data set", title="Fault Severity Between Data Sets") ggsave("p_comp_frac.png")
r0.1<-5.121 ##r0=given rinf.1<-6.179 ## rinfinity=given lambda.1<-0.229 ##lambda=given k.1<-0.025 ##kappa = given (mean reversion speed) sigma.1<-126/12 ##sigma per period vol.1<-0.0126*(sqrt(1/12))*100 ##volatility per period theta<-rinf.1+(lambda.1/k.1) ##theta=rinf+lambda/k steps1=30 ##number of periods vasicektree1<-matrix(0,steps1+1,steps1+1) ##matrix to save the interest rates in vasicektree1[1,1]<-r0.1 ##r0 vasicektree1[1,2]<-r0.1+k.1*(theta-r0.1)/12+vol.1 ##first period up value vasicektree1[2,2]<-r0.1+k.1*(theta-r0.1)/12-vol.1 ##first period down value middleexpec<-r0.1+k.1*(theta-r0.1)/12 ##expected value after first time step vasicektree1[2,3]<-middleexpec+k.1*(theta-middleexpec)/12 ##expected value after second time step (recombined rate for period 2) secmiddleup<-vasicektree1[1,2]+k.1*(theta-vasicektree1[1,2])/12 ##expected rate after second step for up value secmiddledown<-vasicektree1[2,2]+k.1*(theta-vasicektree1[2,2])/12 ##expected rate after second step for down value ##Calulation of probability and r(uu) and r(dd) using quadratic equation factorisation: ##Using factorisation: delta=b^2-4ac && values=(-b+/-Sqrt(delta))/2a aup<-secmiddleup-vasicektree1[2,3] bup<-((vasicektree1[2,3]-secmiddleup)^2)-(2*(secmiddleup^2))+(2*secmiddleup*vasicektree1[2,3]-(vol.1^2)) cup<-((secmiddleup-vasicektree1[2,3])*((secmiddleup)^2))-(((vasicektree1[2,3]-secmiddleup)^2)*secmiddleup)+(vasicektree1[2,3]*(vol.1^2)) deltaup<-(bup^2)-(4*aup*cup) ##For Up value taking the maximum irup<-max((-bup+sqrt(deltaup))/(2*aup),(-bup-sqrt(deltaup))/(2*aup)) vasicektree1[1,3]<-irup adown<-vasicektree1[2,3]-secmiddledown bdown<-(vol.1^2)-((vasicektree1[2,3]-secmiddledown)^2)-(2*vasicektree1[2,3]*secmiddledown)+(2*(secmiddledown^2)) cdown<-(secmiddledown*((vasicektree1[2,3]-secmiddledown)^2))+((secmiddledown^2)*(vasicektree1[2,3]-secmiddledown))-(vasicektree1[2,3]*(vol.1^2)) deltadown<-(bdown^2)-(4*adown*cdown) ##For Up value taking the minimum irdown<-min((-bdown+sqrt(deltadown))/(2*adown),(-bdown-sqrt(deltadown))/(2*adown)) vasicektree1[3,3]<-irdown for (i in 4:(steps1+1)){ currentrow1=1 if((i %% 2)==0){ # for even columns for(j in 2:(i-2)){ ##rates using 50% probabilities to find the center of tree vasicektree1[j,i]=vasicektree1[j,i-1]+k.1*(theta-vasicektree1[j,i-1])/12+(vol.1) vasicektree1[j+1,i]=vasicektree1[j,i-1]+k.1*(theta-vasicektree1[j,i-1])/12-(vol.1) } for(m in seq(1,i,(i-1))){ middleup=vasicektree1[1,(i-1)]+k.1*(theta-vasicektree1[1,(i-1)])/12 ##expected rate after prev step for up value ##Solving the quadratic equation: aupl = middleup - vasicektree1[2,i] bupl = ((vasicektree1[2,i]-middleup)^2)-(2*(middleup^2))+(2*middleup*vasicektree1[2,i]-(vol.1^2)) cupl = ((middleup-vasicektree1[2,i])*((middleup)^2))-(((vasicektree1[2,i]-middleup)^2)*middleup)+(vasicektree1[2,i]*(vol.1^2)) deltaupl = (bupl^2)-(4*aupl*cupl) irupl = max((-bupl+sqrt(deltaupl))/(2*aupl),(-bupl-sqrt(deltaupl))/(2*aupl)) middledown=vasicektree1[(i-1),(i-1)]+k.1*(theta-vasicektree1[(i-1),(i-1)])/12 ##expected rate after prev step for down value ##Solving the quadratic equation: adownl = vasicektree1[(i-1),i]-middledown bdownl = (vol.1^2)-((vasicektree1[(i-1),i]-middledown)^2)-(2*vasicektree1[(i-1),i]*middledown)+(2*(middledown^2)) cdownl = (middledown*((vasicektree1[(i-1),i]-middledown)^2))+((middledown^2)*(vasicektree1[(i-1),i]-middledown))-(vasicektree1[(i-1),i]*(vol.1^2)) deltadownl = (bdownl^2)-(4*adownl*cdownl) irdownl = min((-bdownl+sqrt(deltadownl))/(2*adownl),(-bdownl-sqrt(deltadownl))/(2*adownl)) ##rates using the new probabilities vasicektree1[1,i]=irupl vasicektree1[i,i]=irdownl } } else { #odd columns for (n in 2:(i-1)){ middleexpec = vasicektree1[(n-1),(i-2)]+k.1*(theta-vasicektree1[(n-1),(i-2)])/12 ##expected value after first time step vasicektree1[n,i]=middleexpec+k.1*(theta-middleexpec)/12 ##expected value after second time step (recombined rate for period n+1) } for(w in seq(1,i,(i-1))){ middleup=vasicektree1[1,(i-1)]+k.1*(theta-vasicektree1[1,(i-1)])/12 ##expected rate after prev step for up value ##Solving the quadratic equation: aupl = middleup - vasicektree1[2,i] bupl = ((vasicektree1[2,i]-middleup)^2)-(2*(middleup^2))+(2*middleup*vasicektree1[2,i]-(vol.1^2)) cupl = ((middleup-vasicektree1[2,i])*((middleup)^2))-(((vasicektree1[2,i]-middleup)^2)*middleup)+(vasicektree1[2,i]*(vol.1^2)) deltaupl = (bupl^2)-(4*aupl*cupl) irupl = max((-bupl+sqrt(deltaupl))/(2*aupl),(-bupl-sqrt(deltaupl))/(2*aupl)) middledown=vasicektree1[(i-1),(i-1)]+k.1*(theta-vasicektree1[(i-1),(i-1)])/12 ##expected rate after prev step for down value ##Solving the quadratic equation: adownl = vasicektree1[(i-1),i]-middledown bdownl = (vol.1^2)-((vasicektree1[(i-1),i]-middledown)^2)-(2*vasicektree1[(i-1),i]*middledown)+(2*(middledown^2)) cdownl = (middledown*((vasicektree1[(i-1),i]-middledown)^2))+((middledown^2)*(vasicektree1[(i-1),i]-middledown))-(vasicektree1[(i-1),i]*(vol.1^2)) deltadownl = (bdownl^2)-(4*adownl*cdownl) irdownl = min((-bdownl+sqrt(deltadownl))/(2*adownl),(-bdownl-sqrt(deltadownl))/(2*adownl)) ##rates using the new probabilities vasicektree1[1,i]=irupl vasicektree1[i,i]=irdownl } } vasicektree1 } write.csv(vasicektree1,file="Vasicek Tree.csv") ##saving the matrix as CSV file
/vasicek.R
no_license
Karagul/Vasicektree
R
false
false
5,678
r
r0.1<-5.121 ##r0=given rinf.1<-6.179 ## rinfinity=given lambda.1<-0.229 ##lambda=given k.1<-0.025 ##kappa = given (mean reversion speed) sigma.1<-126/12 ##sigma per period vol.1<-0.0126*(sqrt(1/12))*100 ##volatility per period theta<-rinf.1+(lambda.1/k.1) ##theta=rinf+lambda/k steps1=30 ##number of periods vasicektree1<-matrix(0,steps1+1,steps1+1) ##matrix to save the interest rates in vasicektree1[1,1]<-r0.1 ##r0 vasicektree1[1,2]<-r0.1+k.1*(theta-r0.1)/12+vol.1 ##first period up value vasicektree1[2,2]<-r0.1+k.1*(theta-r0.1)/12-vol.1 ##first period down value middleexpec<-r0.1+k.1*(theta-r0.1)/12 ##expected value after first time step vasicektree1[2,3]<-middleexpec+k.1*(theta-middleexpec)/12 ##expected value after second time step (recombined rate for period 2) secmiddleup<-vasicektree1[1,2]+k.1*(theta-vasicektree1[1,2])/12 ##expected rate after second step for up value secmiddledown<-vasicektree1[2,2]+k.1*(theta-vasicektree1[2,2])/12 ##expected rate after second step for down value ##Calulation of probability and r(uu) and r(dd) using quadratic equation factorisation: ##Using factorisation: delta=b^2-4ac && values=(-b+/-Sqrt(delta))/2a aup<-secmiddleup-vasicektree1[2,3] bup<-((vasicektree1[2,3]-secmiddleup)^2)-(2*(secmiddleup^2))+(2*secmiddleup*vasicektree1[2,3]-(vol.1^2)) cup<-((secmiddleup-vasicektree1[2,3])*((secmiddleup)^2))-(((vasicektree1[2,3]-secmiddleup)^2)*secmiddleup)+(vasicektree1[2,3]*(vol.1^2)) deltaup<-(bup^2)-(4*aup*cup) ##For Up value taking the maximum irup<-max((-bup+sqrt(deltaup))/(2*aup),(-bup-sqrt(deltaup))/(2*aup)) vasicektree1[1,3]<-irup adown<-vasicektree1[2,3]-secmiddledown bdown<-(vol.1^2)-((vasicektree1[2,3]-secmiddledown)^2)-(2*vasicektree1[2,3]*secmiddledown)+(2*(secmiddledown^2)) cdown<-(secmiddledown*((vasicektree1[2,3]-secmiddledown)^2))+((secmiddledown^2)*(vasicektree1[2,3]-secmiddledown))-(vasicektree1[2,3]*(vol.1^2)) deltadown<-(bdown^2)-(4*adown*cdown) ##For Up value taking the minimum irdown<-min((-bdown+sqrt(deltadown))/(2*adown),(-bdown-sqrt(deltadown))/(2*adown)) vasicektree1[3,3]<-irdown for (i in 4:(steps1+1)){ currentrow1=1 if((i %% 2)==0){ # for even columns for(j in 2:(i-2)){ ##rates using 50% probabilities to find the center of tree vasicektree1[j,i]=vasicektree1[j,i-1]+k.1*(theta-vasicektree1[j,i-1])/12+(vol.1) vasicektree1[j+1,i]=vasicektree1[j,i-1]+k.1*(theta-vasicektree1[j,i-1])/12-(vol.1) } for(m in seq(1,i,(i-1))){ middleup=vasicektree1[1,(i-1)]+k.1*(theta-vasicektree1[1,(i-1)])/12 ##expected rate after prev step for up value ##Solving the quadratic equation: aupl = middleup - vasicektree1[2,i] bupl = ((vasicektree1[2,i]-middleup)^2)-(2*(middleup^2))+(2*middleup*vasicektree1[2,i]-(vol.1^2)) cupl = ((middleup-vasicektree1[2,i])*((middleup)^2))-(((vasicektree1[2,i]-middleup)^2)*middleup)+(vasicektree1[2,i]*(vol.1^2)) deltaupl = (bupl^2)-(4*aupl*cupl) irupl = max((-bupl+sqrt(deltaupl))/(2*aupl),(-bupl-sqrt(deltaupl))/(2*aupl)) middledown=vasicektree1[(i-1),(i-1)]+k.1*(theta-vasicektree1[(i-1),(i-1)])/12 ##expected rate after prev step for down value ##Solving the quadratic equation: adownl = vasicektree1[(i-1),i]-middledown bdownl = (vol.1^2)-((vasicektree1[(i-1),i]-middledown)^2)-(2*vasicektree1[(i-1),i]*middledown)+(2*(middledown^2)) cdownl = (middledown*((vasicektree1[(i-1),i]-middledown)^2))+((middledown^2)*(vasicektree1[(i-1),i]-middledown))-(vasicektree1[(i-1),i]*(vol.1^2)) deltadownl = (bdownl^2)-(4*adownl*cdownl) irdownl = min((-bdownl+sqrt(deltadownl))/(2*adownl),(-bdownl-sqrt(deltadownl))/(2*adownl)) ##rates using the new probabilities vasicektree1[1,i]=irupl vasicektree1[i,i]=irdownl } } else { #odd columns for (n in 2:(i-1)){ middleexpec = vasicektree1[(n-1),(i-2)]+k.1*(theta-vasicektree1[(n-1),(i-2)])/12 ##expected value after first time step vasicektree1[n,i]=middleexpec+k.1*(theta-middleexpec)/12 ##expected value after second time step (recombined rate for period n+1) } for(w in seq(1,i,(i-1))){ middleup=vasicektree1[1,(i-1)]+k.1*(theta-vasicektree1[1,(i-1)])/12 ##expected rate after prev step for up value ##Solving the quadratic equation: aupl = middleup - vasicektree1[2,i] bupl = ((vasicektree1[2,i]-middleup)^2)-(2*(middleup^2))+(2*middleup*vasicektree1[2,i]-(vol.1^2)) cupl = ((middleup-vasicektree1[2,i])*((middleup)^2))-(((vasicektree1[2,i]-middleup)^2)*middleup)+(vasicektree1[2,i]*(vol.1^2)) deltaupl = (bupl^2)-(4*aupl*cupl) irupl = max((-bupl+sqrt(deltaupl))/(2*aupl),(-bupl-sqrt(deltaupl))/(2*aupl)) middledown=vasicektree1[(i-1),(i-1)]+k.1*(theta-vasicektree1[(i-1),(i-1)])/12 ##expected rate after prev step for down value ##Solving the quadratic equation: adownl = vasicektree1[(i-1),i]-middledown bdownl = (vol.1^2)-((vasicektree1[(i-1),i]-middledown)^2)-(2*vasicektree1[(i-1),i]*middledown)+(2*(middledown^2)) cdownl = (middledown*((vasicektree1[(i-1),i]-middledown)^2))+((middledown^2)*(vasicektree1[(i-1),i]-middledown))-(vasicektree1[(i-1),i]*(vol.1^2)) deltadownl = (bdownl^2)-(4*adownl*cdownl) irdownl = min((-bdownl+sqrt(deltadownl))/(2*adownl),(-bdownl-sqrt(deltadownl))/(2*adownl)) ##rates using the new probabilities vasicektree1[1,i]=irupl vasicektree1[i,i]=irdownl } } vasicektree1 } write.csv(vasicektree1,file="Vasicek Tree.csv") ##saving the matrix as CSV file
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/reg_AC.R \name{AC} \alias{AC} \title{Ji and Gallo's Agreement Coefficient (AC)} \usage{ AC(data = NULL, obs, pred, tidy = FALSE, na.rm = TRUE) } \arguments{ \item{data}{(Optional) argument to call an existing data frame containing the data.} \item{obs}{Vector with observed values (numeric).} \item{pred}{Vector with predicted values (numeric).} \item{tidy}{Logical operator (TRUE/FALSE) to decide the type of return. TRUE returns a data.frame, FALSE returns a list; Default : FALSE.} \item{na.rm}{Logic argument to remove rows with missing values (NA). Default is na.rm = TRUE.} } \value{ an object of class \code{numeric} within a \code{list} (if tidy = FALSE) or within a \verb{data frame} (if tidy = TRUE). } \description{ It estimates the agreement coefficient suggested by Ji & Gallo (2006) for a continuous predicted-observed dataset. } \details{ The Ji and Gallo's AC measures general agreement, including both accuracy and precision. It is normalized, dimensionless, positively bounded (-infinity;1), and symmetric. For the formula and more details, see \href{https://adriancorrendo.github.io/metrica/articles/available_metrics_regression.html}{online-documentation} } \examples{ \donttest{ set.seed(1) X <- rnorm(n = 100, mean = 0, sd = 10) Y <- rnorm(n = 100, mean = 0, sd = 9) df <- data.frame(obs = X, pred = Y) AC(df, obs = X, pred = Y) } } \references{ Ji & Gallo (2006). An agreement coefficient for image comparison. \emph{Photogramm. Eng. Remote Sensing 7, 823–833} \doi{10.14358/PERS.72.7.823} }
/man/AC.Rd
permissive
adriancorrendo/metrica
R
false
true
1,599
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/reg_AC.R \name{AC} \alias{AC} \title{Ji and Gallo's Agreement Coefficient (AC)} \usage{ AC(data = NULL, obs, pred, tidy = FALSE, na.rm = TRUE) } \arguments{ \item{data}{(Optional) argument to call an existing data frame containing the data.} \item{obs}{Vector with observed values (numeric).} \item{pred}{Vector with predicted values (numeric).} \item{tidy}{Logical operator (TRUE/FALSE) to decide the type of return. TRUE returns a data.frame, FALSE returns a list; Default : FALSE.} \item{na.rm}{Logic argument to remove rows with missing values (NA). Default is na.rm = TRUE.} } \value{ an object of class \code{numeric} within a \code{list} (if tidy = FALSE) or within a \verb{data frame} (if tidy = TRUE). } \description{ It estimates the agreement coefficient suggested by Ji & Gallo (2006) for a continuous predicted-observed dataset. } \details{ The Ji and Gallo's AC measures general agreement, including both accuracy and precision. It is normalized, dimensionless, positively bounded (-infinity;1), and symmetric. For the formula and more details, see \href{https://adriancorrendo.github.io/metrica/articles/available_metrics_regression.html}{online-documentation} } \examples{ \donttest{ set.seed(1) X <- rnorm(n = 100, mean = 0, sd = 10) Y <- rnorm(n = 100, mean = 0, sd = 9) df <- data.frame(obs = X, pred = Y) AC(df, obs = X, pred = Y) } } \references{ Ji & Gallo (2006). An agreement coefficient for image comparison. \emph{Photogramm. Eng. Remote Sensing 7, 823–833} \doi{10.14358/PERS.72.7.823} }
#library(testthat) f1 <- function() { x <- 1 } test_that("duplicatesToFiles() works", { f <- kwb.code:::duplicatesToFiles expect_error(f()) root <- if ("tests" %in% dir()) "./tests/testthat/" else "." #writeLines("f1 <- function(x) x + 1", file.path(root, "script.R")) capture.output(trees <- kwb.code::parse_scripts(root = root)) fun_duplicates <- data.frame( script = "test-function-duplicatesToFiles.R", functionName = "f1" ) capture.output(path <- f(trees, fun_duplicates, function_name = "f1")) expect_true(file.exists(path)) expect_true(length(dir(path, "^f1")) > 0L) })
/tests/testthat/test-function-duplicatesToFiles.R
permissive
KWB-R/kwb.code
R
false
false
626
r
#library(testthat) f1 <- function() { x <- 1 } test_that("duplicatesToFiles() works", { f <- kwb.code:::duplicatesToFiles expect_error(f()) root <- if ("tests" %in% dir()) "./tests/testthat/" else "." #writeLines("f1 <- function(x) x + 1", file.path(root, "script.R")) capture.output(trees <- kwb.code::parse_scripts(root = root)) fun_duplicates <- data.frame( script = "test-function-duplicatesToFiles.R", functionName = "f1" ) capture.output(path <- f(trees, fun_duplicates, function_name = "f1")) expect_true(file.exists(path)) expect_true(length(dir(path, "^f1")) > 0L) })
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/mask.R \name{prefixMask} \alias{prefixMask} \title{Mask SigDF by probe ID prefix} \usage{ prefixMask(sdf, prefixes = NULL, invert = FALSE) } \arguments{ \item{sdf}{SigDF} \item{prefixes}{prefix characters} \item{invert}{use the complement set} } \value{ SigDF } \description{ Mask SigDF by probe ID prefix } \examples{ sdf <- resetMask(sesameDataGet("MM285.1.SigDF")) sum(prefixMask(sdf, c("ctl","rs"))$mask) sum(prefixMask(sdf, c("ctl"))$mask) sum(prefixMask(sdf, c("ctl","rs","ch"))$mask) }
/man/prefixMask.Rd
permissive
zwdzwd/sesame
R
false
true
573
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/mask.R \name{prefixMask} \alias{prefixMask} \title{Mask SigDF by probe ID prefix} \usage{ prefixMask(sdf, prefixes = NULL, invert = FALSE) } \arguments{ \item{sdf}{SigDF} \item{prefixes}{prefix characters} \item{invert}{use the complement set} } \value{ SigDF } \description{ Mask SigDF by probe ID prefix } \examples{ sdf <- resetMask(sesameDataGet("MM285.1.SigDF")) sum(prefixMask(sdf, c("ctl","rs"))$mask) sum(prefixMask(sdf, c("ctl"))$mask) sum(prefixMask(sdf, c("ctl","rs","ch"))$mask) }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/y2x_isp.f.R \name{y2x_isp.f} \alias{y2x_isp.f} \title{squared diameter using interpolating splines} \usage{ y2x_isp.f(x, x.grd, y.grd, ...) } \arguments{ \item{x}{relative height} \item{x.grd}{relative heights for interpolation} \item{y.grd}{diameter of taper curve at relative heights \code{x.grd} for interpolation} \item{...}{not currently used} } \value{ squared estimated diameter based on natural interpolating spline (\code{\link[stats]{splinefun}}) } \description{ Internal function not usually called by users } \author{ Edgar Kublin }
/man/y2x_isp.f.Rd
no_license
jonibio/TapeR
R
false
true
627
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/y2x_isp.f.R \name{y2x_isp.f} \alias{y2x_isp.f} \title{squared diameter using interpolating splines} \usage{ y2x_isp.f(x, x.grd, y.grd, ...) } \arguments{ \item{x}{relative height} \item{x.grd}{relative heights for interpolation} \item{y.grd}{diameter of taper curve at relative heights \code{x.grd} for interpolation} \item{...}{not currently used} } \value{ squared estimated diameter based on natural interpolating spline (\code{\link[stats]{splinefun}}) } \description{ Internal function not usually called by users } \author{ Edgar Kublin }