blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 327 | content_id stringlengths 40 40 | detected_licenses listlengths 0 91 | license_type stringclasses 2 values | repo_name stringlengths 5 134 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 46 values | visit_date timestamp[us]date 2016-08-02 22:44:29 2023-09-06 08:39:28 | revision_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | committer_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | github_id int64 19.4k 671M ⌀ | star_events_count int64 0 40k | fork_events_count int64 0 32.4k | gha_license_id stringclasses 14 values | gha_event_created_at timestamp[us]date 2012-06-21 16:39:19 2023-09-14 21:52:42 ⌀ | gha_created_at timestamp[us]date 2008-05-25 01:21:32 2023-06-28 13:19:12 ⌀ | gha_language stringclasses 60 values | src_encoding stringclasses 24 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 7 9.18M | extension stringclasses 20 values | filename stringlengths 1 141 | content stringlengths 7 9.18M |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3fd1064ee14eb4ed618a2801ef34741c891a93b9 | 4b9b5ad086aaa3f8a904e4d4a6d780650c2720c4 | /R/estimateSeqDepth.R | fa58e0569ae3b0061984f16ecbf09b1a8d3ec538 | [] | no_license | sunhaifeng123/exomePeak2 | f73cb398c8ae3562218ec9116acc410eadadaada | 791e74286a9dcba947a3e7a832641f0b621f6c3c | refs/heads/master | 2020-05-31T05:13:36.001059 | 2019-06-04T04:27:14 | 2019-06-04T04:27:14 | 190,114,784 | 0 | 0 | null | 2019-06-04T02:27:41 | 2019-06-04T02:27:40 | null | UTF-8 | R | false | false | 3,300 | r | estimateSeqDepth.R | #' @title Estimate the Sequencing Depth Size Factors for Peak Statistics Quantification.
#'
#' @description \code{estimateSeqDepth} estimate sequencing depth size factors for each MeRIP-seq samples.
#' Under default setting, the sequencing depth are estimated by the robust estimator defined in package DESeq2.
#' i.e. the median of the ratios to the row geometric means.
#'
#' @details The function takes the input of a \code{\link{summarizedExomePeak}} object,
#' and it estimates the sequencing depth size factors by the columns of its \link{assay}.
#'
#' @param sep a \code{\link{summarizedExomePeak}} object.
#' @param from a \code{character} specify the subset of features for sequencing depth estimation, can be one of \code{c("Control", "Modification", "Both")}.
#'
#' \describe{
#' \item{\strong{\code{Control}}}{
#' The sequencing depths are estimated from the background control regions. This could make the IP/input LFC estimates become a rescaled version of the real modification proportion.
#' }
#'
#' \item{\strong{\code{Modification}}}{
#' The sequencing depths are estimated from the modification peaks/sites.
#' }
#'
#' \item{\strong{\code{Both}}}{
#' The sequencing depths are estimated from both the control and modification features.
#' }
#' }
#'
#' Under default setting, the sequencing depth factors are estimated from the background control regions.
#'
#' @param ... inherited from \code{\link{estimateSizeFactorsForMatrix}}.
#'
#' @examples
#'
#' library(TxDb.Hsapiens.UCSC.hg19.knownGene)
#' library(BSgenome.Hsapiens.UCSC.hg19)
#'
#' aln <- scanMeripBAM(
#' bam_ip = c("IP_rep1.bam",
#' "IP_rep2.bam",
#' "IP_rep3.bam"),
#' bam_input = c("input_rep1.bam",
#' "input_rep2.bam",
#' "input_rep3.bam"),
#' paired_end = TRUE
#' )
#'
#'sep <- exomePeakCalling(merip_bams = aln,
#' txdb = TxDb.Hsapiens.UCSC.hg19.knownGene,
#' bsgenome = Hsapiens)
#'
#'sep <- estimateSeqDepth(sep)
#'
#' @seealso \code{\link{normalizeGC}}
#'
#' @return This function will return a \code{\link{summarizedExomePeak}} object containing newly estimated sequencing depth size factors.
#'
#' @importFrom DESeq2 estimateSizeFactorsForMatrix
#'
#' @docType methods
#'
#' @name estimateSeqDepth
#'
#' @rdname estimateSeqDepth
#'
#' @export
#'
setMethod("estimateSeqDepth",
"SummarizedExomePeak",
function(sep,
from = c("Control","Modification","Both"),
...){
from <- match.arg(from)
if(from == "Control") {
control_peaks_indx <- grepl("control", rownames(sep))
if(sum(control_peaks_indx) == 0) {
warning("Cannot find control peaks, the size factors are estimated using the modification containing peaks.", call. = F,immediate. = T)
sep$sizeFactor <- estimateSizeFactorsForMatrix(assay( sep ) )
} else {
sep$sizeFactor <- estimateSizeFactorsForMatrix(assay( sep[control_peaks_indx,] ))
}
}
if(from == "Modification"){
mod_peaks_indx <- grepl("mod", rownames(sep))
sep$sizeFactor <- estimateSizeFactorsForMatrix(assay( sep[mod_peaks_indx,] ) )
}
if(from == "Both"){
sep$sizeFactor <- estimateSizeFactorsForMatrix(assay( sep ) )
}
return(sep)
})
|
3181af9d5aff255ce365ba9cdc815f528a515e40 | 2fe4c16e0377a99e198ab04d5c378ca247ae4329 | /Rscript/R/mzkit/R/precursor_type.R | 60e84622b6b01474f3c48de6cfef6e48246ff171 | [
"MIT"
] | permissive | xieguigang/mzkit | 1964d28b0fad5f6d44950fdccdd4a70877f75c29 | 6391304b550f7e4b8bb6097a6fb1c0d3b6785ef1 | refs/heads/master | 2023-08-31T06:51:55.354166 | 2023-08-30T08:56:32 | 2023-08-30T08:56:32 | 86,005,665 | 37 | 11 | MIT | 2023-03-14T14:18:44 | 2017-03-23T23:03:07 | Visual Basic .NET | UTF-8 | R | false | false | 3,525 | r | precursor_type.R | #Region "Microsoft.ROpen::9d6946b34200dec0b29bee369766ffa4, R\precursor_type.R"
# Summaries:
# PrecursorType <- function() {...
# adduct.mz <- function(mass, adduct, charge) {...
# adduct.mz.general <- function(mass, adduct, charge) {# Evaluate the formula expression to weightsif (!is.numeric(adducts)) {...
# reverse.mass <- function(precursorMZ, M, charge, adduct) {...
# reverse.mass.general <- function(precursorMZ, M, charge, adduct) {# Evaluate the formula expression to weightsif (!is.numeric(adducts)) {...
# .addKey <- function(type, charge, M, adducts) {# Evaluate the formula expression to weightsif (!is.numeric(adducts)) {...
#End Region
# https://github.com/xieguigang/MassSpectrum-toolkits/blob/6f4284a0d537d86c112877243d9e3b8d9d35563f/DATA/ms2_math-core/Ms1/PrecursorType.vb
#' The precursor type data model
#'
#' @details This helper function returns a list, with members:
#' \enumerate{
#' \item \code{mz} Calculate mass \code{m/z} value with
#' given adduct and charge values.
#' \item \code{mass} Calculate mass value from given
#' \code{m/z} with given adduct and charge, etc.
#' \item \code{new} Create a new mass and \code{m/z}
#' calculator from given adduct info
#' }
#'
PrecursorType <- function() {
#' Evaluate adducts text to molecular weight.
.eval <- Eval(MolWeight)$Eval;
#' Calculate m/z
#'
#' @param mass Molecule weight
#' @param adduct adduct mass
#' @param charge precursor charge value
#'
#' @return Returns the m/z value of the precursor ion
adduct.mz <- function(mass, adduct, charge) {
(mass + adduct) / abs(charge);
}
adduct.mz.general <- function(mass, adduct, charge) {
# Evaluate the formula expression to weights
if (!is.numeric(adducts)) {
adducts <- .eval(adducts);
}
adduct.mz(mass, adduct, charge);
}
#' Calculate mass from m/z
#'
#' @description Calculate the molecule mass from precursor adduct ion m/z
#'
#' @param precursorMZ MS/MS precursor adduct ion m/z
#' @param charge Net charge of the ion
#' @param adduct Adduct mass
#' @param M The number of the molecule for formula a precursor adduct ion.
#'
#' @return The molecule mass.
reverse.mass <- function(precursorMZ, M, charge, adduct) {
(precursorMZ * abs(charge) - adduct) / M;
}
reverse.mass.general <- function(precursorMZ, M, charge, adduct) {
# Evaluate the formula expression to weights
if (!is.numeric(adducts)) {
adducts <- .eval(adducts);
}
reverse.mass(precursorMZ, M, charge, adduct);
}
#' Construct a \code{precursor_type} model
#'
#' @param charge The ion charge value, no sign required.
#' @param type Full name of the precursor type
#' @param M The number of the target molecule
#' @param adducts The precursor adducts formula expression
#'
.addKey <- function(type, charge, M, adducts) {
# Evaluate the formula expression to weights
if (!is.numeric(adducts)) {
adducts <- .eval(adducts);
}
calc_mass = function(precursorMZ) {
reverse.mass(precursorMZ, M, charge, adducts);
}
calc_mz = function(mass) {
adduct.mz(mass * M, adducts, charge);
}
new("PrecursorType",
Name = type,
calc = calc_mass,
charge = charge,
M = M,
adduct = adducts,
cal.mz = calc_mz
);
}
list(mz = adduct.mz.general,
mass = reverse.mass.general,
new = .addKey
);
}
|
5fab472043256899b4ca21ebe64151d52d28bb61 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/detrendeR/examples/detrendeR-package.Rd.R | 703835332ce660abd3039c20202bf133d4351e39 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 249 | r | detrendeR-package.Rd.R | library(detrendeR)
### Name: detrendeR-package
### Title: detrendeR - A Graphical User Interface to process and visualize
### tree-ring data using R
### Aliases: detrendeR-package detrendeR
### Keywords: package
### ** Examples
detrender()
|
bf62e5fc39c50c68a88a3cd08f83668664bebebb | fc406111d61c870776c2ffc1ce050ea0c59244fb | /R/UKBB_formatting_cancer.r | 1681480c4a6d650f0bc2daf8292daa421abf779f | [
"MIT"
] | permissive | mightyphil2000/UkbCancerMortality | 1628e0df24dfe15a778dcbdc82106ce369f00ea1 | 2aeed302eb530ad11a0d0c95c0957652a660f058 | refs/heads/main | 2023-08-22T01:16:42.217612 | 2021-10-20T13:47:10 | 2021-10-20T13:47:10 | 351,126,217 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 106,658 | r | UKBB_formatting_cancer.r | #' @importFrom magrittr %>%
lung_cancer_function<-function(){
#lung cancer
require(tidyr)
require(dplyr)
allICD10 <- unlist(bd %>% select(starts_with("f.40006.")), use.names=F)
allICD10<-allICD10[!is.na(allICD10)]
allICD10<-allICD10[!duplicated(allICD10)]
# subet for only C codes (D and O codes will be in "other" for exclusion from controls)
allICD10 <- grep("C", allICD10, value=T)
toMatch <- c("C340", "C341", "C342", "C343", "C348", "C349")
lungICD10 <- unique(grep(paste(toMatch,collapse="|"), allICD10, value=TRUE))
allICD9<-unlist(bd %>% select(starts_with("f.40013.")), use.names=F)
allICD9<-allICD9[!is.na(allICD9)]
allICD9<-allICD9[!duplicated(allICD9)]
toMatch <- c("1622", "1623", "1624", "1625", "1628", "1629")
lungICD9 <- unique(grep(paste(toMatch,collapse="|"), allICD9, value=TRUE))
lungICD9 <-as.integer(lungICD9)
bd<-UKBcancerFunc(dat=bd,cancerCode = lungICD9,sitename = "lungICD9", other=F, cancer_col = "f.40013.")
bd<-UKBcancerFunc(dat=bd,cancerCode = lungICD9,sitename = "otherICD9", other=T, cancer_col = "f.40013.")
bd<-UKBcancerFunc(dat=bd,cancerCode = lungICD10,sitename = "lungICD10", other=F)
bd<-UKBcancerFunc(dat=bd,cancerCode = lungICD10,sitename = "otherICD10", other=T)
print("functions complete!")
# define prevalent
bd$lungPrevelent<-ifelse(!is.na(bd$lungICD101), 1,NA)
bd$lungPrevelent<-ifelse(!is.na(bd$lungICD91), 1,bd$lungPrevelent)
bd$otherPrevelent<-ifelse(!is.na(bd$otherICD101), 1,NA)
bd$otherPrevelent<-ifelse(!is.na(bd$otherICD91), 1,bd$otherPrevelent)
bd$lungSelfreport<-ifelse(between(bd$f.20001.0.0, 1001, 99999),1,
ifelse(between(bd$f.20001.0.1, 1001, 99999),1,
ifelse(between(bd$f.20001.0.2, 1001, 99999),1,
ifelse(between(bd$f.20001.0.3, 1001, 99999),1,
ifelse(between(bd$f.20001.0.4, 1001, 99999),1,
ifelse(between(bd$f.20001.0.5, 1001, 99999),1,
ifelse(between(bd$f.20001.1.0, 1001, 99999),1,
ifelse(between(bd$f.20001.1.1, 1001, 99999),1,
ifelse(between(bd$f.20001.1.2, 1001, 99999),1,
ifelse(between(bd$f.20001.1.3, 1001, 99999),1,
ifelse(between(bd$f.20001.1.4, 1001, 99999),1,
ifelse(between(bd$f.20001.1.5, 1001, 99999),1,0))))))))))))
return(bd)
}
acute_lymphoblastic_leukemia_function<-function(icd9=NULL,icd10=NULL){
# acute lymphoblastic leukemia
#acute_lymph_leuk cancer
allICD10 <- unlist(bd %>% select(starts_with("f.40006.")), use.names=F)
allICD10<-allICD10[!is.na(allICD10)]
allICD10<-allICD10[!duplicated(allICD10)]
# subet for only C codes (D and O codes will be in "other" for exclusion from controls)
allICD10 <- grep("C", allICD10, value=T)
toMatch <- c("C910")
acute_lymph_leukICD10 <- unique (grep(paste(toMatch,collapse="|"), allICD10, value=TRUE))
table(acute_lymph_leukICD10)
allICD9<-unlist(bd %>% select(starts_with("f.40013.")), use.names=F)
allICD9<-allICD9[!is.na(allICD9)]
allICD9<-allICD9[!duplicated(allICD9)]
toMatch <- c("2040")
acute_lymph_leukICD9 <- unique (grep(paste(toMatch,collapse="|"), allICD9, value=TRUE))
acute_lymph_leukICD9 <-as.integer(acute_lymph_leukICD9)
table(acute_lymph_leukICD9)
bd<-UKBcancerFunc(dat=bd,cancerCode = acute_lymph_leukICD9,sitename = "acute_lymph_leukICD9", other=F, cancer_col = "f.40013.")
bd<-UKBcancerFunc(dat=bd,cancerCode = acute_lymph_leukICD9,sitename = "otherICD9", other=T, cancer_col = "f.40013.")
bd<-UKBcancerFunc(dat=bd,cancerCode = acute_lymph_leukICD10,sitename = "acute_lymph_leukICD10", other=F)
bd<-UKBcancerFunc(dat=bd,cancerCode = acute_lymph_leukICD10,sitename = "otherICD10", other=T)
print("functions complete!")
# define overall
bd$acute_lymph_leukPrevelent<-ifelse(!is.na(bd$acute_lymph_leukICD101), 1,NA)
bd$acute_lymph_leukPrevelent<-ifelse(!is.na(bd$acute_lymph_leukICD91), 1,bd$acute_lymph_leukPrevelent)
bd$otherPrevelent<-ifelse(!is.na(bd$otherICD101), 1,NA)
bd$otherPrevelent<-ifelse(!is.na(bd$otherICD91), 1,bd$otherPrevelent)
print("getting self-report cancers...")
#self report
#identify self reported all cancer (coded as 1)
bd$acute_lymph_leukSelfreport<-ifelse(between(bd$f.20001.0.0, 1001, 99999),1,
ifelse(between(bd$f.20001.0.1, 1001, 99999),1,
ifelse(between(bd$f.20001.0.2, 1001, 99999),1,
ifelse(between(bd$f.20001.0.3, 1001, 99999),1,
ifelse(between(bd$f.20001.0.4, 1001, 99999),1,
ifelse(between(bd$f.20001.0.5, 1001, 99999),1,
ifelse(between(bd$f.20001.1.0, 1001, 99999),1,
ifelse(between(bd$f.20001.1.1, 1001, 99999),1,
ifelse(between(bd$f.20001.1.2, 1001, 99999),1,
ifelse(between(bd$f.20001.1.3, 1001, 99999),1,
ifelse(between(bd$f.20001.1.4, 1001, 99999),1,
ifelse(between(bd$f.20001.1.5, 1001, 99999),1,0))))))))))))
return(bd)
}
endometrial_cancer_function<-function(icd9=NULL,icd10=NULL){
allICD10 <- unlist(bd %>% select(starts_with("f.40006.")), use.names=F)
allICD10<-allICD10[!is.na(allICD10)]
allICD10<-allICD10[!duplicated(allICD10)]
# subet for only C codes (D and O codes will be in "other" for exclusion from controls)
allICD10 <- grep("C", allICD10, value=T)
# toMatch <- c("C530","C531","C538","C539")
# allICD10[grep("C541",allICD10)]
# toMatch <- c("C641","C642","C649")
toMatch <- c( "C541" )
endometrialICD10 <- unique(grep(paste(toMatch,collapse="|"), allICD10, value=TRUE))
table(endometrialICD10)
allICD9<-unlist(bd %>% select(starts_with("f.40013.")), use.names=F)
allICD9<-allICD9[!is.na(allICD9)]
allICD9<-allICD9[!duplicated(allICD9)]
toMatch <- c(1820 )
endometrialICD9 <- unique(grep(paste(toMatch,collapse="|"), allICD9, value=TRUE))
endometrialICD9 <-as.integer(endometrialICD9)
table(endometrialICD9)
# allICD9[grep(2050,allICD9)]
bd<-UKBcancerFunc(dat=bd,cancerCode = endometrialICD9,sitename = "endometrialICD9", other=F, cancer_col = "f.40013.")
bd<-UKBcancerFunc(dat=bd,cancerCode = endometrialICD9,sitename = "otherICD9", other=T, cancer_col = "f.40013.")
bd<-UKBcancerFunc(dat=bd,cancerCode = endometrialICD10,sitename = "endometrialICD10", other=F)
bd<-UKBcancerFunc(dat=bd,cancerCode = endometrialICD10,sitename = "otherICD10", other=T)
print("functions complete!")
# define prevalent
bd$endometrialPrevelent<-ifelse(!is.na(bd$endometrialICD101), 1,NA)
bd$endometrialPrevelent<-ifelse(!is.na(bd$endometrialICD91), 1,bd$endometrialPrevelent)
bd$otherPrevelent<-ifelse(!is.na(bd$otherICD101), 1,NA)
bd$otherPrevelent<-ifelse(!is.na(bd$otherICD91), 1,bd$otherPrevelent)
print("getting self-report endometrials...")
#self report
#identify self reported all endometrial (coded as 1)
bd$endometrialSelfreport<-ifelse(between(bd$f.20001.0.0, 1001, 99999),1,
ifelse(between(bd$f.20001.0.1, 1001, 99999),1,
ifelse(between(bd$f.20001.0.2, 1001, 99999),1,
ifelse(between(bd$f.20001.0.3, 1001, 99999),1,
ifelse(between(bd$f.20001.0.4, 1001, 99999),1,
ifelse(between(bd$f.20001.0.5, 1001, 99999),1,
ifelse(between(bd$f.20001.1.0, 1001, 99999),1,
ifelse(between(bd$f.20001.1.1, 1001, 99999),1,
ifelse(between(bd$f.20001.1.2, 1001, 99999),1,
ifelse(between(bd$f.20001.1.3, 1001, 99999),1,
ifelse(between(bd$f.20001.1.4, 1001, 99999),1,
ifelse(between(bd$f.20001.1.5, 1001, 99999),1,0))))))))))))
# rm(dat)
return(bd)
}
# bd4<-bd
aml_cancer_function<-function(icd9=NULL,icd10=NULL){
allICD10 <- unlist(bd %>% select(starts_with("f.40006.")), use.names=F)
allICD10<-allICD10[!is.na(allICD10)]
allICD10<-allICD10[!duplicated(allICD10)]
# subet for only C codes (D and O codes will be in "other" for exclusion from controls)
allICD10 <- grep("C", allICD10, value=T)
# toMatch <- c("C530","C531","C538","C539")
# allICD10[grep("C920",allICD10)]
# toMatch <- c("C641","C642","C649")
toMatch <- c( "C920" )
amlICD10 <- unique(grep(paste(toMatch,collapse="|"), allICD10, value=TRUE))
table(amlICD10)
allICD9<-unlist(bd %>% select(starts_with("f.40013.")), use.names=F)
allICD9<-allICD9[!is.na(allICD9)]
allICD9<-allICD9[!duplicated(allICD9)]
toMatch <- c(2050 )
amlICD9 <- unique(grep(paste(toMatch,collapse="|"), allICD9, value=TRUE))
amlICD9 <-as.integer(amlICD9)
table(amlICD9)
# allICD9[grep(2050,allICD9)]
bd<-UKBcancerFunc(dat=bd,cancerCode = amlICD9,sitename = "amlICD9", other=F, cancer_col = "f.40013.")
bd<-UKBcancerFunc(dat=bd,cancerCode = amlICD9,sitename = "otherICD9", other=T, cancer_col = "f.40013.")
bd<-UKBcancerFunc(dat=bd,cancerCode = amlICD10,sitename = "amlICD10", other=F)
bd<-UKBcancerFunc(dat=bd,cancerCode = amlICD10,sitename = "otherICD10", other=T)
print("functions complete!")
# define prevalent
bd$amlPrevelent<-ifelse(!is.na(bd$amlICD101), 1,NA)
bd$amlPrevelent<-ifelse(!is.na(bd$amlICD91), 1,bd$amlPrevelent)
bd$otherPrevelent<-ifelse(!is.na(bd$otherICD101), 1,NA)
bd$otherPrevelent<-ifelse(!is.na(bd$otherICD91), 1,bd$otherPrevelent)
print("getting self-report amls...")
#self report
#identify self reported all aml (coded as 1)
bd$amlSelfreport<-ifelse(between(bd$f.20001.0.0, 1001, 99999),1,
ifelse(between(bd$f.20001.0.1, 1001, 99999),1,
ifelse(between(bd$f.20001.0.2, 1001, 99999),1,
ifelse(between(bd$f.20001.0.3, 1001, 99999),1,
ifelse(between(bd$f.20001.0.4, 1001, 99999),1,
ifelse(between(bd$f.20001.0.5, 1001, 99999),1,
ifelse(between(bd$f.20001.1.0, 1001, 99999),1,
ifelse(between(bd$f.20001.1.1, 1001, 99999),1,
ifelse(between(bd$f.20001.1.2, 1001, 99999),1,
ifelse(between(bd$f.20001.1.3, 1001, 99999),1,
ifelse(between(bd$f.20001.1.4, 1001, 99999),1,
ifelse(between(bd$f.20001.1.5, 1001, 99999),1,0))))))))))))
# rm(dat)
return(bd)
}
# bd4<-bd
stomach_cancer_function<-function(icd9=NULL,icd10=NULL){
allICD10 <- unlist(bd %>% select(starts_with("f.40006.")), use.names=F)
allICD10<-allICD10[!is.na(allICD10)]
allICD10<-allICD10[!duplicated(allICD10)]
# subet for only C codes (D and O codes will be in "other" for exclusion from controls)
allICD10 <- grep("C", allICD10, value=T)
# toMatch <- c("C530","C531","C538","C539")
# allICD10[grep("C16",allICD10)]
# toMatch <- c("C641","C642","C649")
toMatch <- c( "C160","C161" ,"C162","C163","C164","C165", "C166", "C168","C169" )
stomachICD10 <- unique(grep(paste(toMatch,collapse="|"), allICD10, value=TRUE))
table(stomachICD10)
allICD9<-unlist(bd %>% select(starts_with("f.40013.")), use.names=F)
allICD9<-allICD9[!is.na(allICD9)]
allICD9<-allICD9[!duplicated(allICD9)]
toMatch <- c(1510, 1512, 1514, 1515,1519 )
stomachICD9 <- unique(grep(paste(toMatch,collapse="|"), allICD9, value=TRUE))
stomachICD9 <-as.integer(stomachICD9)
table(stomachICD9)
allICD9[grep(151,allICD9)]
bd<-UKBcancerFunc(dat=bd,cancerCode = stomachICD9,sitename = "stomachICD9", other=F, cancer_col = "f.40013.")
bd<-UKBcancerFunc(dat=bd,cancerCode = stomachICD9,sitename = "otherICD9", other=T, cancer_col = "f.40013.")
bd<-UKBcancerFunc(dat=bd,cancerCode = stomachICD10,sitename = "stomachICD10", other=F)
bd<-UKBcancerFunc(dat=bd,cancerCode = stomachICD10,sitename = "otherICD10", other=T)
print("functions complete!")
# define prevalent
bd$stomachPrevelent<-ifelse(!is.na(bd$stomachICD101), 1,NA)
bd$stomachPrevelent<-ifelse(!is.na(bd$stomachICD91), 1,bd$stomachPrevelent)
bd$otherPrevelent<-ifelse(!is.na(bd$otherICD101), 1,NA)
bd$otherPrevelent<-ifelse(!is.na(bd$otherICD91), 1,bd$otherPrevelent)
print("getting self-report stomachs...")
#self report
#identify self reported all stomach (coded as 1)
bd$stomachSelfreport<-ifelse(between(bd$f.20001.0.0, 1001, 99999),1,
ifelse(between(bd$f.20001.0.1, 1001, 99999),1,
ifelse(between(bd$f.20001.0.2, 1001, 99999),1,
ifelse(between(bd$f.20001.0.3, 1001, 99999),1,
ifelse(between(bd$f.20001.0.4, 1001, 99999),1,
ifelse(between(bd$f.20001.0.5, 1001, 99999),1,
ifelse(between(bd$f.20001.1.0, 1001, 99999),1,
ifelse(between(bd$f.20001.1.1, 1001, 99999),1,
ifelse(between(bd$f.20001.1.2, 1001, 99999),1,
ifelse(between(bd$f.20001.1.3, 1001, 99999),1,
ifelse(between(bd$f.20001.1.4, 1001, 99999),1,
ifelse(between(bd$f.20001.1.5, 1001, 99999),1,0))))))))))))
# rm(dat)
return(bd)
}
# bd4<-bd
pancreatic_cancer_function<-function(icd9=NULL,icd10=NULL){
allICD10 <- unlist(bd %>% select(starts_with("f.40006.")), use.names=F)
allICD10<-allICD10[!is.na(allICD10)]
allICD10<-allICD10[!duplicated(allICD10)]
# subet for only C codes (D and O codes will be in "other" for exclusion from controls)
allICD10 <- grep("C", allICD10, value=T)
# toMatch <- c("C530","C531","C538","C539")
# allICD10[grep("C25",allICD10)]
# toMatch <- c("C641","C642","C649")
toMatch <- c( "C250", "C251", "C252", "C253","C254","C257" ,"C258" ,"C259")
pancreaticICD10 <- unique(grep(paste(toMatch,collapse="|"), allICD10, value=TRUE))
table(pancreaticICD10)
allICD9<-unlist(bd %>% select(starts_with("f.40013.")), use.names=F)
allICD9<-allICD9[!is.na(allICD9)]
allICD9<-allICD9[!duplicated(allICD9)]
toMatch <- c(1570,1571,1572,1573,1574,1578,1579)
pancreaticICD9 <- unique(grep(paste(toMatch,collapse="|"), allICD9, value=TRUE))
pancreaticICD9 <-as.integer(pancreaticICD9)
table(pancreaticICD9)
# allICD9[grep(157,allICD9)]
bd<-UKBcancerFunc(dat=bd,cancerCode = pancreaticICD9,sitename = "pancreaticICD9", other=F, cancer_col = "f.40013.")
bd<-UKBcancerFunc(dat=bd,cancerCode = pancreaticICD9,sitename = "otherICD9", other=T, cancer_col = "f.40013.")
bd<-UKBcancerFunc(dat=bd,cancerCode = pancreaticICD10,sitename = "pancreaticICD10", other=F)
bd<-UKBcancerFunc(dat=bd,cancerCode = pancreaticICD10,sitename = "otherICD10", other=T)
print("functions complete!")
# define prevalent
bd$pancreaticPrevelent<-ifelse(!is.na(bd$pancreaticICD101), 1,NA)
bd$pancreaticPrevelent<-ifelse(!is.na(bd$pancreaticICD91), 1,bd$pancreaticPrevelent)
bd$otherPrevelent<-ifelse(!is.na(bd$otherICD101), 1,NA)
bd$otherPrevelent<-ifelse(!is.na(bd$otherICD91), 1,bd$otherPrevelent)
print("getting self-report pancreatics...")
#self report
#identify self reported all pancreatic (coded as 1)
bd$pancreaticSelfreport<-ifelse(between(bd$f.20001.0.0, 1001, 99999),1,
ifelse(between(bd$f.20001.0.1, 1001, 99999),1,
ifelse(between(bd$f.20001.0.2, 1001, 99999),1,
ifelse(between(bd$f.20001.0.3, 1001, 99999),1,
ifelse(between(bd$f.20001.0.4, 1001, 99999),1,
ifelse(between(bd$f.20001.0.5, 1001, 99999),1,
ifelse(between(bd$f.20001.1.0, 1001, 99999),1,
ifelse(between(bd$f.20001.1.1, 1001, 99999),1,
ifelse(between(bd$f.20001.1.2, 1001, 99999),1,
ifelse(between(bd$f.20001.1.3, 1001, 99999),1,
ifelse(between(bd$f.20001.1.4, 1001, 99999),1,
ifelse(between(bd$f.20001.1.5, 1001, 99999),1,0))))))))))))
# rm(dat)
return(bd)
}
# bd1<-bd
kidney_cancer_function<-function(icd9=NULL,icd10=NULL){
allICD10 <- unlist(bd %>% select(starts_with("f.40006.")), use.names=F)
allICD10<-allICD10[!is.na(allICD10)]
allICD10<-allICD10[!duplicated(allICD10)]
# subet for only C codes (D and O codes will be in "other" for exclusion from controls)
allICD10 <- grep("C", allICD10, value=T)
# toMatch <- c("C530","C531","C538","C539")
# allICD10[grep("64",allICD10)]
# toMatch <- c("C641","C642","C649")
toMatch <- c("C64")
kidneyICD10 <- unique(grep(paste(toMatch,collapse="|"), allICD10, value=TRUE))
table(kidneyICD10)
allICD9<-unlist(bd %>% select(starts_with("f.40013.")), use.names=F)
allICD9<-allICD9[!is.na(allICD9)]
allICD9<-allICD9[!duplicated(allICD9)]
toMatch <- 1890
kidneyICD9 <- unique(grep(paste(toMatch,collapse="|"), allICD9, value=TRUE))
kidneyICD9 <-as.integer(kidneyICD9)
table(kidneyICD9)
bd<-UKBcancerFunc(dat=bd,cancerCode = kidneyICD9,sitename = "kidneyICD9", other=F, cancer_col = "f.40013.")
bd<-UKBcancerFunc(dat=bd,cancerCode = kidneyICD9,sitename = "otherICD9", other=T, cancer_col = "f.40013.")
bd<-UKBcancerFunc(dat=bd,cancerCode = kidneyICD10,sitename = "kidneyICD10", other=F)
bd<-UKBcancerFunc(dat=bd,cancerCode = kidneyICD10,sitename = "otherICD10", other=T)
print("functions complete!")
# define prevalent
bd$kidneyPrevelent<-ifelse(!is.na(bd$kidneyICD101), 1,NA)
bd$kidneyPrevelent<-ifelse(!is.na(bd$kidneyICD91), 1,bd$kidneyPrevelent)
bd$otherPrevelent<-ifelse(!is.na(bd$otherICD101), 1,NA)
bd$otherPrevelent<-ifelse(!is.na(bd$otherICD91), 1,bd$otherPrevelent)
print("getting self-report kidneys...")
#self report
#identify self reported all kidney (coded as 1)
bd$kidneySelfreport<-ifelse(between(bd$f.20001.0.0, 1001, 99999),1,
ifelse(between(bd$f.20001.0.1, 1001, 99999),1,
ifelse(between(bd$f.20001.0.2, 1001, 99999),1,
ifelse(between(bd$f.20001.0.3, 1001, 99999),1,
ifelse(between(bd$f.20001.0.4, 1001, 99999),1,
ifelse(between(bd$f.20001.0.5, 1001, 99999),1,
ifelse(between(bd$f.20001.1.0, 1001, 99999),1,
ifelse(between(bd$f.20001.1.1, 1001, 99999),1,
ifelse(between(bd$f.20001.1.2, 1001, 99999),1,
ifelse(between(bd$f.20001.1.3, 1001, 99999),1,
ifelse(between(bd$f.20001.1.4, 1001, 99999),1,
ifelse(between(bd$f.20001.1.5, 1001, 99999),1,0))))))))))))
# rm(dat)
return(bd)
}
cervical_cancer_function<-function(){
allICD10 <- unlist(bd %>% select(starts_with("f.40006.")), use.names=F)
allICD10<-allICD10[!is.na(allICD10)]
allICD10<-allICD10[!duplicated(allICD10)]
# subet for only C codes (D and O codes will be in "other" for exclusion from controls)
allICD10 <- grep("C", allICD10, value=T)
toMatch <- c("C530","C531","C538","C539")
cervicalICD10 <- unique(grep(paste(toMatch,collapse="|"), allICD10, value=TRUE))
table(cervicalICD10)
allICD9<-unlist(bd %>% select(starts_with("f.40013.")), use.names=F)
allICD9<-allICD9[!is.na(allICD9)]
allICD9<-allICD9[!duplicated(allICD9)]
toMatch <- c(1800,1801,1808,1809)
cervicalICD9 <- unique(grep(paste(toMatch,collapse="|"), allICD9, value=TRUE))
cervicalICD9 <-as.integer(cervicalICD9)
table(cervicalICD9)
bd<-UKBcancerFunc(dat=bd,cancerCode = cervicalICD9,sitename = "cervicalICD9", other=F, cancer_col = "f.40013.")
bd<-UKBcancerFunc(dat=bd,cancerCode = cervicalICD9,sitename = "otherICD9", other=T, cancer_col = "f.40013.")
bd<-UKBcancerFunc(dat=bd,cancerCode = cervicalICD10,sitename = "cervicalICD10", other=F)
bd<-UKBcancerFunc(dat=bd,cancerCode = cervicalICD10,sitename = "otherICD10", other=T)
print("functions complete!")
# define prevalent
bd$cervicalPrevelent<-ifelse(!is.na(bd$cervicalICD101), 1,NA)
bd$cervicalPrevelent<-ifelse(!is.na(bd$cervicalICD91), 1,bd$cervicalPrevelent)
bd$otherPrevelent<-ifelse(!is.na(bd$otherICD101), 1,NA)
bd$otherPrevelent<-ifelse(!is.na(bd$otherICD91), 1,bd$otherPrevelent)
print("getting self-report cancers...")
#self report
#identify self reported all cancer (coded as 1)
bd$cervicalSelfreport<-ifelse(between(bd$f.20001.0.0, 1001, 99999),1,
ifelse(between(bd$f.20001.0.1, 1001, 99999),1,
ifelse(between(bd$f.20001.0.2, 1001, 99999),1,
ifelse(between(bd$f.20001.0.3, 1001, 99999),1,
ifelse(between(bd$f.20001.0.4, 1001, 99999),1,
ifelse(between(bd$f.20001.0.5, 1001, 99999),1,
ifelse(between(bd$f.20001.1.0, 1001, 99999),1,
ifelse(between(bd$f.20001.1.1, 1001, 99999),1,
ifelse(between(bd$f.20001.1.2, 1001, 99999),1,
ifelse(between(bd$f.20001.1.3, 1001, 99999),1,
ifelse(between(bd$f.20001.1.4, 1001, 99999),1,
ifelse(between(bd$f.20001.1.5, 1001, 99999),1,0))))))))))))
# rm(dat)
return(bd)
}
overall_cancer_function<-function(){
#All cancer
allICD10 <- unlist(bd %>% select(starts_with("f.40006.")), use.names=F)
allICD10<-allICD10[!is.na(allICD10)]
allICD10<-allICD10[!duplicated(allICD10)]
# subet for only C codes (D and O codes will be in "other" for exclusion from controls)
allICD10 <- grep("C", allICD10, value=T)
# take out skin cancer
#allICD10 <- grep("C44", allICD10, value=T, invert=T)
allICD9<-unlist(bd %>% select(starts_with("f.40013.")), use.names=F)
allICD9<-allICD9[!is.na(allICD9)]
allICD9<-allICD9[!duplicated(allICD9)]
x <-c("140", "141", "142", "143", "144", "145", "146", "147", "148", "149", "150", "151", "152", "153", "154", "155", "156", "157", "158", "159", "160", "161", "162", "163", "164", "165", "166", "167", "168", "169", "170", "171", "172", "173", "174", "175", "176", "177", "178", "179", "180", "181", "182", "183", "184", "185", "186", "187", "188", "189", "190", "191", "192", "193", "194", "195", "196", "197", "198", "199", "200", "201", "202", "203", "204", "205", "206", "207", "208")
allICD9 <- grep(paste0(x, collapse = "|"), as.character(allICD9), value = TRUE)
allICD9 <-as.integer(allICD9)
# take out skin cancer
#allICD9 <- grep("173", allICD9, value=T, invert=T)
# unique(bd$f.40012.0.0)
# unique(bd$f.40012.0.0)
names(bd)
bd<-UKBcancerFunc(dat=bd,cancerCode = allICD9,sitename = "allICD9", other=F, cancer_col = "f.40013.")
bd<-UKBcancerFunc(dat=bd,cancerCode = allICD9,sitename = "otherICD9", other=T, cancer_col = "f.40013.")
bd<-UKBcancerFunc(dat=bd,cancerCode = allICD10,sitename = "allICD10", other=F)
bd<-UKBcancerFunc(dat=bd,cancerCode = allICD10,sitename = "otherICD10", other=T)
# define prevalent
bd$allPrevelent<-ifelse(!is.na(bd$allICD101), 1,NA)
bd$allPrevelent<-ifelse(!is.na(bd$allICD91), 1,bd$allPrevelent)
bd$otherPrevelent<-ifelse(!is.na(bd$otherICD101), 1,NA)
bd$otherPrevelent<-ifelse(!is.na(bd$otherICD91), 1,bd$otherPrevelent)
bd$allSelfreport<-ifelse(between(bd$f.20001.0.0, 1001, 99999),1,
ifelse(between(bd$f.20001.0.1, 1001, 99999),1,
ifelse(between(bd$f.20001.0.2, 1001, 99999),1,
ifelse(between(bd$f.20001.0.3, 1001, 99999),1,
ifelse(between(bd$f.20001.0.4, 1001, 99999),1,
ifelse(between(bd$f.20001.0.5, 1001, 99999),1,
ifelse(between(bd$f.20001.1.0, 1001, 99999),1,
ifelse(between(bd$f.20001.1.1, 1001, 99999),1,
ifelse(between(bd$f.20001.1.2, 1001, 99999),1,
ifelse(between(bd$f.20001.1.3, 1001, 99999),1,
ifelse(between(bd$f.20001.1.4, 1001, 99999),1,
ifelse(between(bd$f.20001.1.5, 1001, 99999),1,0))))))))))))
# rm(dat)
# df <- as.data.frame(df)
# df<-bd
return(bd)
}
# exclude non-melanoma skin cancer
overall_cancer_exclc44_function<-function(){
# get all ICD10 cancer codes
allICD10 <- unlist(bd %>% select(starts_with("f.40006.")), use.names=F)
allICD10<-allICD10[!is.na(allICD10)]
allICD10<-allICD10[!duplicated(allICD10)]
# subet for only C codes (D and O codes will be in "other" for exclusion from controls as these are carcinoma in situs)
allICD10 <- grep("C", allICD10, value=T)
# take out non-melanoma skin cancer code (C44)
allICD10 <- grep("C44", allICD10, value=T, invert=T)
# get all ICD9 cancer codes
allICD9<-unlist(bd %>% select(starts_with("f.40013.")), use.names=F)
allICD9<-allICD9[!is.na(allICD9)]
allICD9<-allICD9[!duplicated(allICD9)]
# keep all relevent ICD9 cancer codes (do not keep carcinoma in situ codes)
x <-c("140", "141", "142", "143", "144", "145", "146", "147", "148", "149", "150", "151", "152", "153", "154", "155", "156", "157", "158", "159", "160", "161", "162", "163", "164", "165", "166", "167", "168", "169", "170", "171", "172", "173", "174", "175", "176", "177", "178", "179", "180", "181", "182", "183", "184", "185", "186", "187", "188", "189", "190", "191", "192", "193", "194", "195", "196", "197", "198", "199", "200", "201", "202", "203", "204", "205", "206", "207", "208")
allICD9 <- grep(paste0(x, collapse = "|"), as.character(allICD9), value = TRUE)
allICD9 <-as.integer(allICD9)
# take out non-melanoma skin cancer code (173)
allICD9 <- grep("173", allICD9, value=T, invert=T)
# Execute UKBcancerFunc function to extract all the cancer phenotypes of interest
bd<-UKBcancerFunc(dat=bd,cancerCode = allICD9,sitename = "allICD9", other=F, cancer_col = "f.40013.")
bd<-UKBcancerFunc(dat=bd,cancerCode = allICD9,sitename = "otherICD9", other=T, cancer_col = "f.40013.")
bd<-UKBcancerFunc(dat=bd,cancerCode = allICD10,sitename = "allICD10", other=F)
bd<-UKBcancerFunc(dat=bd,cancerCode = allICD10,sitename = "otherICD10", other=T)
print("functions complete!")
# define prevalent - this is actually just creating a variable that says if an individual has cancer or not (at any timepoint)
bd$allPrevelent<-ifelse(!is.na(bd$allICD101), 1,NA)
bd$allPrevelent<-ifelse(!is.na(bd$allICD91), 1,bd$allPrevelent)
bd$otherPrevelent<-ifelse(!is.na(bd$otherICD101), 1,NA)
bd$otherPrevelent<-ifelse(!is.na(bd$otherICD91), 1,bd$otherPrevelent)
print("getting self-report cancers...")
# self report
# identify self reported all cancers (coded as 1) - these will be used to exclude participants from controls
bd$allSelfreport<-ifelse(between(bd$f.20001.0.0, 1001, 99999),1,
ifelse(between(bd$f.20001.0.1, 1001, 99999),1,
ifelse(between(bd$f.20001.0.2, 1001, 99999),1,
ifelse(between(bd$f.20001.0.3, 1001, 99999),1,
ifelse(between(bd$f.20001.0.4, 1001, 99999),1,
ifelse(between(bd$f.20001.0.5, 1001, 99999),1,
ifelse(between(bd$f.20001.1.0, 1001, 99999),1,
ifelse(between(bd$f.20001.1.1, 1001, 99999),1,
ifelse(between(bd$f.20001.1.2, 1001, 99999),1,
ifelse(between(bd$f.20001.1.3, 1001, 99999),1,
ifelse(between(bd$f.20001.1.4, 1001, 99999),1,
ifelse(between(bd$f.20001.1.5, 1001, 99999),1,0))))))))))))
return(bd)
}
# df_split<-separate_cancers()
# df_split<-format_columns()
# df_split<-generate_incident_flag()
# df_split<-generate_behaviour_flag()
# df_split<-generate_controls()
# df_split<-generate_incident_cases()
# bc<-tidy_up()
brain_cancer_function<-function(){
allICD10 <- unlist(bd %>% select(starts_with("f.40006.")), use.names=F)
allICD10<-allICD10[!is.na(allICD10)]
allICD10<-allICD10[!duplicated(allICD10)]
# subet for only C codes (D and O codes will be in "other" for exclusion from controls)
allICD10 <- grep("C", allICD10, value=T)
toMatch <- c("C71")
brainICD10 <- unique (grep(paste(toMatch,collapse="|"), allICD10, value=TRUE))
table(brainICD10)
allICD9<-unlist(bd %>% select(starts_with("f.40013.")), use.names=F)
allICD9<-allICD9[!is.na(allICD9)]
allICD9<-allICD9[!duplicated(allICD9)]
toMatch <- c("191")
brainICD9 <- unique (grep(paste(toMatch,collapse="|"), allICD9, value=TRUE))
brainICD9 <-as.integer(brainICD9)
table(brainICD9)
bd<-UKBcancerFunc(dat=bd,cancerCode = brainICD9,sitename = "brainICD9", other=F, cancer_col = "f.40013.")
bd<-UKBcancerFunc(dat=bd,cancerCode = brainICD9,sitename = "otherICD9", other=T, cancer_col = "f.40013.")
bd<-UKBcancerFunc(dat=bd,cancerCode = brainICD10,sitename = "brainICD10", other=F)
bd<-UKBcancerFunc(dat=bd,cancerCode = brainICD10,sitename = "otherICD10", other=T)
print("functions complete!")
# define overall
bd$brainPrevelent<-ifelse(!is.na(bd$brainICD101), 1,NA)
bd$brainPrevelent<-ifelse(!is.na(bd$brainICD91), 1,bd$brainPrevelent)
bd$otherPrevelent<-ifelse(!is.na(bd$otherICD101), 1,NA)
bd$otherPrevelent<-ifelse(!is.na(bd$otherICD91), 1,bd$otherPrevelent)
print("getting self-report cancers...")
#self report
#identify self reported all cancer (coded as 1)
bd$brainSelfreport<-ifelse(between(bd$f.20001.0.0, 1001, 99999),1,
ifelse(between(bd$f.20001.0.1, 1001, 99999),1,
ifelse(between(bd$f.20001.0.2, 1001, 99999),1,
ifelse(between(bd$f.20001.0.3, 1001, 99999),1,
ifelse(between(bd$f.20001.0.4, 1001, 99999),1,
ifelse(between(bd$f.20001.0.5, 1001, 99999),1,
ifelse(between(bd$f.20001.1.0, 1001, 99999),1,
ifelse(between(bd$f.20001.1.1, 1001, 99999),1,
ifelse(between(bd$f.20001.1.2, 1001, 99999),1,
ifelse(between(bd$f.20001.1.3, 1001, 99999),1,
ifelse(between(bd$f.20001.1.4, 1001, 99999),1,
ifelse(between(bd$f.20001.1.5, 1001, 99999),1,0))))))))))))
return(bd)
}
breast_cancer_function<-function(){
allICD10 <- unlist(bd %>% select(starts_with("f.40006.")), use.names=F)
allICD10<-allICD10[!is.na(allICD10)]
allICD10<-allICD10[!duplicated(allICD10)]
# subet for only C codes (D and O codes will be in "other" for exclusion from controls)
allICD10 <- grep("C", allICD10, value=T)
toMatch <- c("C500", "C501", "C502", "C503", "C504", "C505", "C506", "C508", "C509")
breastICD10 <- unique(grep(paste(toMatch,collapse="|"), allICD10, value=TRUE))
table(breastICD10)
allICD9<-unlist(bd %>% select(starts_with("f.40013.")), use.names=F)
allICD9<-allICD9[!is.na(allICD9)]
allICD9<-allICD9[!duplicated(allICD9)]
toMatch <- c(1740,1741,1742,1743,1744,1745,1746,1747,1748,1749)
breastICD9 <- unique(grep(paste(toMatch,collapse="|"), allICD9, value=TRUE))
breastICD9 <-as.integer(breastICD9)
table(breastICD9)
bd<-UKBcancerFunc(dat=bd,cancerCode = breastICD9,sitename = "breastICD9", other=F, cancer_col = "f.40013.")
bd<-UKBcancerFunc(dat=bd,cancerCode = breastICD9,sitename = "otherICD9", other=T, cancer_col = "f.40013.")
bd<-UKBcancerFunc(dat=bd,cancerCode = breastICD10,sitename = "breastICD10", other=F)
bd<-UKBcancerFunc(dat=bd,cancerCode = breastICD10,sitename = "otherICD10", other=T)
print("functions complete!")
# define prevalent
bd$breastPrevelent<-ifelse(!is.na(bd$breastICD101), 1,NA)
bd$breastPrevelent<-ifelse(!is.na(bd$breastICD91), 1,bd$breastPrevelent)
bd$otherPrevelent<-ifelse(!is.na(bd$otherICD101), 1,NA)
bd$otherPrevelent<-ifelse(!is.na(bd$otherICD91), 1,bd$otherPrevelent)
print("getting self-report cancers...")
#self report
#identify self reported all cancer (coded as 1)
bd$breastSelfreport<-ifelse(between(bd$f.20001.0.0, 1001, 99999),1,
ifelse(between(bd$f.20001.0.1, 1001, 99999),1,
ifelse(between(bd$f.20001.0.2, 1001, 99999),1,
ifelse(between(bd$f.20001.0.3, 1001, 99999),1,
ifelse(between(bd$f.20001.0.4, 1001, 99999),1,
ifelse(between(bd$f.20001.0.5, 1001, 99999),1,
ifelse(between(bd$f.20001.1.0, 1001, 99999),1,
ifelse(between(bd$f.20001.1.1, 1001, 99999),1,
ifelse(between(bd$f.20001.1.2, 1001, 99999),1,
ifelse(between(bd$f.20001.1.3, 1001, 99999),1,
ifelse(between(bd$f.20001.1.4, 1001, 99999),1,
ifelse(between(bd$f.20001.1.5, 1001, 99999),1,0))))))))))))
return(bd)
}
melanoma_cancer_function<-function(){
allICD10 <- unlist(bd %>% select(starts_with("f.40006.")), use.names=F)
allICD10<-allICD10[!is.na(allICD10)]
allICD10<-allICD10[!duplicated(allICD10)]
# subet for only C codes (D and O codes will be in "other" for exclusion from controls)
allICD10 <- grep("C", allICD10, value=T)
toMatch <- c("C430", "C431", "C432", "C433", "C434", "C435", "C436", "C437", "C438", "C439")
skinICD10 <- unique(grep(paste(toMatch,collapse="|"), allICD10, value=TRUE))
table(skinICD10)
allICD9<-unlist(bd %>% select(starts_with("f.40013.")), use.names=F)
allICD9<-allICD9[!is.na(allICD9)]
allICD9<-allICD9[!duplicated(allICD9)]
toMatch <- c(1720, 1721, 1722, 1723, 1724, 1725, 1726, 1727, 1728, 1729)
skinICD9 <- unique(grep(paste(toMatch,collapse="|"), allICD9, value=TRUE))
skinICD9 <-as.integer(skinICD9)
table(skinICD9)
bd<-UKBcancerFunc(dat=bd,cancerCode = skinICD9,sitename = "skinICD9", other=F, cancer_col = "f.40013.")
bd<-UKBcancerFunc(dat=bd,cancerCode = skinICD9,sitename = "otherICD9", other=T, cancer_col = "f.40013.")
bd<-UKBcancerFunc(dat=bd,cancerCode = skinICD10,sitename = "skinICD10", other=F)
bd<-UKBcancerFunc(dat=bd,cancerCode = skinICD10,sitename = "otherICD10", other=T)
print("functions complete!")
# define overall
bd$skinPrevelent<-ifelse(!is.na(bd$skinICD101), 1,NA)
bd$skinPrevelent<-ifelse(!is.na(bd$skinICD91), 1,bd$skinPrevelent)
bd$otherPrevelent<-ifelse(!is.na(bd$otherICD101), 1,NA)
bd$otherPrevelent<-ifelse(!is.na(bd$otherICD91), 1,bd$otherPrevelent)
print("getting self-report cancers...")
#self report
#identify self reported all cancer (coded as 1)
bd$skinSelfreport<-ifelse(between(bd$f.20001.0.0, 1001, 99999),1,
ifelse(between(bd$f.20001.0.1, 1001, 99999),1,
ifelse(between(bd$f.20001.0.2, 1001, 99999),1,
ifelse(between(bd$f.20001.0.3, 1001, 99999),1,
ifelse(between(bd$f.20001.0.4, 1001, 99999),1,
ifelse(between(bd$f.20001.0.5, 1001, 99999),1,
ifelse(between(bd$f.20001.1.0, 1001, 99999),1,
ifelse(between(bd$f.20001.1.1, 1001, 99999),1,
ifelse(between(bd$f.20001.1.2, 1001, 99999),1,
ifelse(between(bd$f.20001.1.3, 1001, 99999),1,
ifelse(between(bd$f.20001.1.4, 1001, 99999),1,
ifelse(between(bd$f.20001.1.5, 1001, 99999),1,0))))))))))))
return(bd)
}
prostate_cancer_function<-function(){
allICD10 <- unlist(bd %>% select(starts_with("f.40006.")), use.names=F)
allICD10<-allICD10[!is.na(allICD10)]
allICD10<-allICD10[!duplicated(allICD10)]
# subet for only C codes (D and O codes will be in "other" for exclusion from controls)
allICD10 <- grep("C", allICD10, value=T)
toMatch <- c("C61")
prostateICD10 <- unique(grep(paste(toMatch,collapse="|"), allICD10, value=TRUE))
table(prostateICD10)
allICD9<-unlist(bd %>% select(starts_with("f.40013.")), use.names=F)
allICD9<-allICD9[!is.na(allICD9)]
allICD9<-allICD9[!duplicated(allICD9)]
toMatch <- c(185)
prostateICD9 <- unique(grep(paste(toMatch,collapse="|"), allICD9, value=TRUE))
prostateICD9 <-as.integer(prostateICD9)
table(prostateICD9)
bd<-UKBcancerFunc(dat=bd,cancerCode = prostateICD9,sitename = "prostateICD9", other=F, cancer_col = "f.40013.")
bd<-UKBcancerFunc(dat=bd,cancerCode = prostateICD9,sitename = "otherICD9", other=T, cancer_col = "f.40013.")
bd<-UKBcancerFunc(dat=bd,cancerCode = prostateICD10,sitename = "prostateICD10", other=F)
bd<-UKBcancerFunc(dat=bd,cancerCode = prostateICD10,sitename = "otherICD10", other=T)
print("functions complete!")
# define overall
bd$prostatePrevelent<-ifelse(!is.na(bd$prostateICD101), 1,NA)
bd$prostatePrevelent<-ifelse(!is.na(bd$prostateICD91), 1,bd$prostatePrevelent)
bd$otherPrevelent<-ifelse(!is.na(bd$otherICD101), 1,NA)
bd$otherPrevelent<-ifelse(!is.na(bd$otherICD91), 1,bd$otherPrevelent)
print("getting self-report cancers...")
#self report
#identify self reported all cancer (coded as 1)
bd$prostateSelfreport<-ifelse(between(bd$f.20001.0.0, 1001, 99999),1,
ifelse(between(bd$f.20001.0.1, 1001, 99999),1,
ifelse(between(bd$f.20001.0.2, 1001, 99999),1,
ifelse(between(bd$f.20001.0.3, 1001, 99999),1,
ifelse(between(bd$f.20001.0.4, 1001, 99999),1,
ifelse(between(bd$f.20001.0.5, 1001, 99999),1,
ifelse(between(bd$f.20001.1.0, 1001, 99999),1,
ifelse(between(bd$f.20001.1.1, 1001, 99999),1,
ifelse(between(bd$f.20001.1.2, 1001, 99999),1,
ifelse(between(bd$f.20001.1.3, 1001, 99999),1,
ifelse(between(bd$f.20001.1.4, 1001, 99999),1,
ifelse(between(bd$f.20001.1.5, 1001, 99999),1,0))))))))))))
return(bd)
}
pharynx_cancer_function<-function(){
allICD10 <- unlist(bd %>% select(starts_with("f.40006.")), use.names=F)
allICD10<-allICD10[!is.na(allICD10)]
allICD10<-allICD10[!duplicated(allICD10)]
# subet for only C codes (D and O codes will be in "other" for exclusion from controls)
allICD10 <- grep("C", allICD10, value=T)
toMatch <- c("C01", "C024", "C051", "C052", "C058", "C059", "C090", "C091", "C098", "C099", "C100", "C101", "C102", "C103", "C104", "C108", "C109", "C12", "C130", "C131", "C132", "C139", "C140", "C142")
pharynxICD10 <- unique(grep(paste(toMatch,collapse="|"), allICD10, value=TRUE))
table(pharynxICD10)
allICD9<-unlist(bd %>% select(starts_with("f.40013.")), use.names=F)
allICD9<-allICD9[!is.na(allICD9)]
allICD9<-allICD9[!duplicated(allICD9)]
toMatch <- c("1410", "1453", "1455", "1460", "1461")
pharynxICD9 <- unique(grep(paste(toMatch,collapse="|"), allICD9, value=TRUE))
pharynxICD9 <-as.integer(pharynxICD9)
table(pharynxICD9)
bd<-UKBcancerFunc(dat=bd,cancerCode = pharynxICD9,sitename = "pharynxICD9", other=F, cancer_col = "f.40013.")
bd<-UKBcancerFunc(dat=bd,cancerCode = pharynxICD9,sitename = "otherICD9", other=T, cancer_col = "f.40013.")
bd<-UKBcancerFunc(dat=bd,cancerCode = pharynxICD10,sitename = "pharynxICD10", other=F)
bd<-UKBcancerFunc(dat=bd,cancerCode = pharynxICD10,sitename = "otherICD10", other=T)
print("functions complete!")
# define overall
bd$pharynxPrevelent<-ifelse(!is.na(bd$pharynxICD101), 1,NA)
bd$pharynxPrevelent<-ifelse(!is.na(bd$pharynxICD91), 1,bd$pharynxPrevelent)
bd$otherPrevelent<-ifelse(!is.na(bd$otherICD101), 1,NA)
bd$otherPrevelent<-ifelse(!is.na(bd$otherICD91), 1,bd$otherPrevelent)
print("getting self-report cancers...")
#self report
#identify self reported all cancer (coded as 1)
bd$pharynxSelfreport<-ifelse(between(bd$f.20001.0.0, 1001, 99999),1,
ifelse(between(bd$f.20001.0.1, 1001, 99999),1,
ifelse(between(bd$f.20001.0.2, 1001, 99999),1,
ifelse(between(bd$f.20001.0.3, 1001, 99999),1,
ifelse(between(bd$f.20001.0.4, 1001, 99999),1,
ifelse(between(bd$f.20001.0.5, 1001, 99999),1,
ifelse(between(bd$f.20001.1.0, 1001, 99999),1,
ifelse(between(bd$f.20001.1.1, 1001, 99999),1,
ifelse(between(bd$f.20001.1.2, 1001, 99999),1,
ifelse(between(bd$f.20001.1.3, 1001, 99999),1,
ifelse(between(bd$f.20001.1.4, 1001, 99999),1,
ifelse(between(bd$f.20001.1.5, 1001, 99999),1,0))))))))))))
return(bd)
}
ovarian_cancer_function<-function(){
allICD10 <- unlist(bd %>% select(starts_with("f.40006.")), use.names=F)
allICD10<-allICD10[!is.na(allICD10)]
allICD10<-allICD10[!duplicated(allICD10)]
# subet for only C codes (D and O codes will be in "other" for exclusion from controls)
allICD10 <- grep("C", allICD10, value=T)
toMatch <- c("C56")
ovarianICD10 <- unique(grep(paste(toMatch,collapse="|"), allICD10, value=TRUE))
table(ovarianICD10)
allICD9<-unlist(bd %>% select(starts_with("f.40013.")), use.names=F)
allICD9<-allICD9[!is.na(allICD9)]
allICD9<-allICD9[!duplicated(allICD9)]
toMatch <- c(1830)
ovarianICD9 <- unique(grep(paste(toMatch,collapse="|"), allICD9, value=TRUE))
ovarianICD9 <-as.integer(ovarianICD9)
table(ovarianICD9)
bd<-UKBcancerFunc(dat=bd,cancerCode = ovarianICD9,sitename = "ovarianICD9", other=F, cancer_col = "f.40013.")
bd<-UKBcancerFunc(dat=bd,cancerCode = ovarianICD9,sitename = "otherICD9", other=T, cancer_col = "f.40013.")
bd<-UKBcancerFunc(dat=bd,cancerCode = ovarianICD10,sitename = "ovarianICD10", other=F)
bd<-UKBcancerFunc(dat=bd,cancerCode = ovarianICD10,sitename = "otherICD10", other=T)
print("functions complete!")
# define overall
bd$ovarianPrevelent<-ifelse(!is.na(bd$ovarianICD101), 1,NA)
bd$ovarianPrevelent<-ifelse(!is.na(bd$ovarianICD91), 1,bd$ovarianPrevelent)
bd$otherPrevelent<-ifelse(!is.na(bd$otherICD101), 1,NA)
bd$otherPrevelent<-ifelse(!is.na(bd$otherICD91), 1,bd$otherPrevelent)
print("getting self-report cancers...")
#self report
#identify self reported all cancer (coded as 1)
bd$ovarianSelfreport<-ifelse(between(bd$f.20001.0.0, 1001, 99999),1,
ifelse(between(bd$f.20001.0.1, 1001, 99999),1,
ifelse(between(bd$f.20001.0.2, 1001, 99999),1,
ifelse(between(bd$f.20001.0.3, 1001, 99999),1,
ifelse(between(bd$f.20001.0.4, 1001, 99999),1,
ifelse(between(bd$f.20001.0.5, 1001, 99999),1,
ifelse(between(bd$f.20001.1.0, 1001, 99999),1,
ifelse(between(bd$f.20001.1.1, 1001, 99999),1,
ifelse(between(bd$f.20001.1.2, 1001, 99999),1,
ifelse(between(bd$f.20001.1.3, 1001, 99999),1,
ifelse(between(bd$f.20001.1.4, 1001, 99999),1,
ifelse(between(bd$f.20001.1.5, 1001, 99999),1,0))))))))))))
return(bd)
}
oropharyngeal_cancer_function<-function(){
allICD10 <- unlist(bd %>% select(starts_with("f.40006.")), use.names=F)
allICD10<-allICD10[!is.na(allICD10)]
allICD10<-allICD10[!duplicated(allICD10)]
# subet for only C codes (D and O codes will be in "other" for exclusion from controls)
allICD10 <- grep("C", allICD10, value=T)
toMatch <- c("C003", "C004", "C005", "C006", "C009", "C020", "C021", "C022", "C023", "C028", "C029", "C030", "C031", "C039", "C040", "C041", "C048", "C049", "C050", "C060", "C061", "C062", "C068", "C069", "C01", "C024", "C051", "C052", "C058", "C059", "C090", "C091", "C098", "C099", "C100", "C101", "C102", "C103", "C104", "C108", "C109", "C12", "C130", "C131", "C132", "C139", "C140", "C142")
oral_pharynxICD10 <- unique(grep(paste(toMatch,collapse="|"), allICD10, value=TRUE))
table(oral_pharynxICD10)
allICD9<-unlist(bd %>% select(starts_with("f.40013.")), use.names=F)
allICD9<-allICD9[!is.na(allICD9)]
allICD9<-allICD9[!duplicated(allICD9)]
toMatch <- c("1412", "1413", "1419", "1430", "1431", "1449", "1450", "1451", "1452")
oral_pharynxICD9 <- unique(grep(paste(toMatch,collapse="|"), allICD9, value=TRUE))
oral_pharynxICD9 <-as.integer(oral_pharynxICD9)
table(oral_pharynxICD9)
bd<-UKBcancerFunc(dat=bd,cancerCode = oral_pharynxICD9,sitename = "oral_pharynxICD9", other=F, cancer_col = "f.40013.")
bd<-UKBcancerFunc(dat=bd,cancerCode = oral_pharynxICD9,sitename = "otherICD9", other=T, cancer_col = "f.40013.")
bd<-UKBcancerFunc(dat=bd,cancerCode = oral_pharynxICD10,sitename = "oral_pharynxICD10", other=F)
bd<-UKBcancerFunc(dat=bd,cancerCode = oral_pharynxICD10,sitename = "otherICD10", other=T)
print("functions complete!")
# define overall
bd$oral_pharynxPrevelent<-ifelse(!is.na(bd$oral_pharynxICD101), 1,NA)
bd$oral_pharynxPrevelent<-ifelse(!is.na(bd$oral_pharynxICD91), 1,bd$oral_pharynxPrevelent)
bd$otherPrevelent<-ifelse(!is.na(bd$otherICD101), 1,NA)
bd$otherPrevelent<-ifelse(!is.na(bd$otherICD91), 1,bd$otherPrevelent)
print("getting self-report cancers...")
#self report
#identify self reported all cancer (coded as 1)
bd$oral_pharynxSelfreport<-ifelse(between(bd$f.20001.0.0, 1001, 99999),1,
ifelse(between(bd$f.20001.0.1, 1001, 99999),1,
ifelse(between(bd$f.20001.0.2, 1001, 99999),1,
ifelse(between(bd$f.20001.0.3, 1001, 99999),1,
ifelse(between(bd$f.20001.0.4, 1001, 99999),1,
ifelse(between(bd$f.20001.0.5, 1001, 99999),1,
ifelse(between(bd$f.20001.1.0, 1001, 99999),1,
ifelse(between(bd$f.20001.1.1, 1001, 99999),1,
ifelse(between(bd$f.20001.1.2, 1001, 99999),1,
ifelse(between(bd$f.20001.1.3, 1001, 99999),1,
ifelse(between(bd$f.20001.1.4, 1001, 99999),1,
ifelse(between(bd$f.20001.1.5, 1001, 99999),1,0))))))))))))
return(bd)
}
oral_cancer_function<-function(){
allICD10 <- unlist(bd %>% select(starts_with("f.40006.")), use.names=F)
allICD10<-allICD10[!is.na(allICD10)]
allICD10<-allICD10[!duplicated(allICD10)]
# subet for only C codes (D and O codes will be in "other" for exclusion from controls)
allICD10 <- grep("C", allICD10, value=T)
toMatch <- c("C003", "C004", "C005", "C006", "C009", "C020", "C021", "C022", "C023", "C028", "C029", "C030", "C031", "C039", "C040", "C041", "C048", "C049", "C050", "C060", "C061", "C062", "C068", "C069")
oral_cavityICD10 <- unique(grep(paste(toMatch,collapse="|"), allICD10, value=TRUE))
table(oral_cavityICD10)
allICD9<-unlist(bd %>% select(starts_with("f.40013.")), use.names=F)
allICD9<-allICD9[!is.na(allICD9)]
allICD9<-allICD9[!duplicated(allICD9)]
toMatch <- c("1412", "1413", "1419", "1430", "1431", "1449", "1450", "1451", "1452")
oral_cavityICD9 <- unique(grep(paste(toMatch,collapse="|"), allICD9, value=TRUE))
oral_cavityICD9 <-as.integer(oral_cavityICD9)
table(oral_cavityICD9)
bd<-UKBcancerFunc(dat=bd,cancerCode = oral_cavityICD9,sitename = "oral_cavityICD9", other=F, cancer_col = "f.40013.")
bd<-UKBcancerFunc(dat=bd,cancerCode = oral_cavityICD9,sitename = "otherICD9", other=T, cancer_col = "f.40013.")
bd<-UKBcancerFunc(dat=bd,cancerCode = oral_cavityICD10,sitename = "oral_cavityICD10", other=F)
bd<-UKBcancerFunc(dat=bd,cancerCode = oral_cavityICD10,sitename = "otherICD10", other=T)
print("functions complete!")
# define overall
bd$oral_cavityPrevelent<-ifelse(!is.na(bd$oral_cavityICD101), 1,NA)
bd$oral_cavityPrevelent<-ifelse(!is.na(bd$oral_cavityICD91), 1,bd$oral_cavityPrevelent)
bd$otherPrevelent<-ifelse(!is.na(bd$otherICD101), 1,NA)
bd$otherPrevelent<-ifelse(!is.na(bd$otherICD91), 1,bd$otherPrevelent)
print("getting self-report cancers...")
#self report
#identify self reported all cancer (coded as 1)
bd$oral_cavitySelfreport<-ifelse(between(bd$f.20001.0.0, 1001, 99999),1,
ifelse(between(bd$f.20001.0.1, 1001, 99999),1,
ifelse(between(bd$f.20001.0.2, 1001, 99999),1,
ifelse(between(bd$f.20001.0.3, 1001, 99999),1,
ifelse(between(bd$f.20001.0.4, 1001, 99999),1,
ifelse(between(bd$f.20001.0.5, 1001, 99999),1,
ifelse(between(bd$f.20001.1.0, 1001, 99999),1,
ifelse(between(bd$f.20001.1.1, 1001, 99999),1,
ifelse(between(bd$f.20001.1.2, 1001, 99999),1,
ifelse(between(bd$f.20001.1.3, 1001, 99999),1,
ifelse(between(bd$f.20001.1.4, 1001, 99999),1,
ifelse(between(bd$f.20001.1.5, 1001, 99999),1,0))))))))))))
return(bd)
}
oesophageal_cancer_function<-function(){
allICD10 <- unlist(bd %>% select(starts_with("f.40006.")), use.names=F)
allICD10<-allICD10[!is.na(allICD10)]
allICD10<-allICD10[!duplicated(allICD10)]
# subet for only C codes (D and O codes will be in "other" for exclusion from controls)
allICD10 <- grep("C", allICD10, value=T)
toMatch <- c("C15")
oesophICD10 <- unique(grep(paste(toMatch,collapse="|"), allICD10, value=TRUE))
table(oesophICD10)
allICD9<-unlist(bd %>% select(starts_with("f.40013.")), use.names=F)
allICD9<-allICD9[!is.na(allICD9)]
allICD9<-allICD9[!duplicated(allICD9)]
toMatch <- c("150")
oesophICD9 <- unique(grep(paste(toMatch,collapse="|"), allICD9, value=TRUE))
oesophICD9 <-as.integer(oesophICD9)
table(oesophICD9)
bd<-UKBcancerFunc(dat=bd,cancerCode = oesophICD9,sitename = "oesophICD9", other=F, cancer_col = "f.40013.")
bd<-UKBcancerFunc(dat=bd,cancerCode = oesophICD9,sitename = "otherICD9", other=T, cancer_col = "f.40013.")
bd<-UKBcancerFunc(dat=bd,cancerCode = oesophICD10,sitename = "oesophICD10", other=F)
bd<-UKBcancerFunc(dat=bd,cancerCode = oesophICD10,sitename = "otherICD10", other=T)
print("functions complete!")
# define overall
bd$oesophPrevelent<-ifelse(!is.na(bd$oesophICD101), 1,NA)
bd$oesophPrevelent<-ifelse(!is.na(bd$oesophICD91), 1,bd$oesophPrevelent)
bd$otherPrevelent<-ifelse(!is.na(bd$otherICD101), 1,NA)
bd$otherPrevelent<-ifelse(!is.na(bd$otherICD91), 1,bd$otherPrevelent)
print("getting self-report cancers...")
#self report
#identify self reported all cancer (coded as 1)
bd$oesophSelfreport<-ifelse(between(bd$f.20001.0.0, 1001, 99999),1,
ifelse(between(bd$f.20001.0.1, 1001, 99999),1,
ifelse(between(bd$f.20001.0.2, 1001, 99999),1,
ifelse(between(bd$f.20001.0.3, 1001, 99999),1,
ifelse(between(bd$f.20001.0.4, 1001, 99999),1,
ifelse(between(bd$f.20001.0.5, 1001, 99999),1,
ifelse(between(bd$f.20001.1.0, 1001, 99999),1,
ifelse(between(bd$f.20001.1.1, 1001, 99999),1,
ifelse(between(bd$f.20001.1.2, 1001, 99999),1,
ifelse(between(bd$f.20001.1.3, 1001, 99999),1,
ifelse(between(bd$f.20001.1.4, 1001, 99999),1,
ifelse(between(bd$f.20001.1.5, 1001, 99999),1,0))))))))))))
return(bd)
}
melanoma_plus_other_malignant_skin_cancer_function<-function(){
# Melanoma and other malignant neoplasms of skin
allICD10 <- unlist(bd %>% select(starts_with("f.40006.")), use.names=F)
allICD10<-allICD10[!is.na(allICD10)]
allICD10<-allICD10[!duplicated(allICD10)]
# subet for only C codes (D and O codes will be in "other" for exclusion from controls)
allICD10 <- grep("C", allICD10, value=T)
toMatch <- c("C43","C44")
mmplus_skinICD10 <- unique(grep(paste(toMatch,collapse="|"), allICD10, value=TRUE))
table(mmplus_skinICD10)
allICD9<-unlist(bd %>% select(starts_with("f.40013.")), use.names=F)
allICD9<-allICD9[!is.na(allICD9)]
allICD9<-allICD9[!duplicated(allICD9)]
toMatch <- c("172","173")
mmplus_skinICD9 <- unique(grep(paste(toMatch,collapse="|"), allICD9, value=TRUE))
mmplus_skinICD9 <-as.integer(mmplus_skinICD9)
table(mmplus_skinICD9)
bd<-UKBcancerFunc(dat=bd,cancerCode = mmplus_skinICD9,sitename = "mmplus_skinICD9", other=F, cancer_col = "f.40013.")
bd<-UKBcancerFunc(dat=bd,cancerCode = mmplus_skinICD9,sitename = "otherICD9", other=T, cancer_col = "f.40013.")
bd<-UKBcancerFunc(dat=bd,cancerCode = mmplus_skinICD10,sitename = "mmplus_skinICD10", other=F)
bd<-UKBcancerFunc(dat=bd,cancerCode = mmplus_skinICD10,sitename = "otherICD10", other=T)
print("functions complete!")
# define overall
bd$mmplus_skinPrevelent<-ifelse(!is.na(bd$mmplus_skinICD101), 1,NA)
bd$mmplus_skinPrevelent<-ifelse(!is.na(bd$mmplus_skinICD91), 1,bd$mmplus_skinPrevelent)
bd$otherPrevelent<-ifelse(!is.na(bd$otherICD101), 1,NA)
bd$otherPrevelent<-ifelse(!is.na(bd$otherICD91), 1,bd$otherPrevelent)
print("getting self-report cancers...")
#self report
#identify self reported all cancer (coded as 1)
bd$mmplus_skinSelfreport<-ifelse(between(bd$f.20001.0.0, 1001, 99999),1,
ifelse(between(bd$f.20001.0.1, 1001, 99999),1,
ifelse(between(bd$f.20001.0.2, 1001, 99999),1,
ifelse(between(bd$f.20001.0.3, 1001, 99999),1,
ifelse(between(bd$f.20001.0.4, 1001, 99999),1,
ifelse(between(bd$f.20001.0.5, 1001, 99999),1,
ifelse(between(bd$f.20001.1.0, 1001, 99999),1,
ifelse(between(bd$f.20001.1.1, 1001, 99999),1,
ifelse(between(bd$f.20001.1.2, 1001, 99999),1,
ifelse(between(bd$f.20001.1.3, 1001, 99999),1,
ifelse(between(bd$f.20001.1.4, 1001, 99999),1,
ifelse(between(bd$f.20001.1.5, 1001, 99999),1,0))))))))))))
return(bd)
}
nonmelanoma_skin_cancer_function<-function(){
allICD10 <- unlist(bd %>% select(starts_with("f.40006.")), use.names=F)
allICD10<-allICD10[!is.na(allICD10)]
allICD10<-allICD10[!duplicated(allICD10)]
# subet for only C codes (D and O codes will be in "other" for exclusion from controls)
allICD10 <- grep("C", allICD10, value=T)
toMatch <- c("C44")
nm_skinICD10 <- unique(grep(paste(toMatch,collapse="|"), allICD10, value=TRUE))
table(nm_skinICD10)
allICD9<-unlist(bd %>% select(starts_with("f.40013.")), use.names=F)
allICD9<-allICD9[!is.na(allICD9)]
allICD9<-allICD9[!duplicated(allICD9)]
toMatch <- c("173")
nm_skinICD9 <- unique(grep(paste(toMatch,collapse="|"), allICD9, value=TRUE))
nm_skinICD9 <-as.integer(nm_skinICD9)
table(nm_skinICD9)
bd<-UKBcancerFunc(dat=bd,cancerCode = nm_skinICD9,sitename = "nm_skinICD9", other=F, cancer_col = "f.40013.")
bd<-UKBcancerFunc(dat=bd,cancerCode = nm_skinICD9,sitename = "otherICD9", other=T, cancer_col = "f.40013.")
bd<-UKBcancerFunc(dat=bd,cancerCode = nm_skinICD10,sitename = "nm_skinICD10", other=F)
bd<-UKBcancerFunc(dat=bd,cancerCode = nm_skinICD10,sitename = "otherICD10", other=T)
print("functions complete!")
# define overall
bd$nm_skinPrevelent<-ifelse(!is.na(bd$nm_skinICD101), 1,NA)
bd$nm_skinPrevelent<-ifelse(!is.na(bd$nm_skinICD91), 1,bd$nm_skinPrevelent)
bd$otherPrevelent<-ifelse(!is.na(bd$otherICD101), 1,NA)
bd$otherPrevelent<-ifelse(!is.na(bd$otherICD91), 1,bd$otherPrevelent)
print("getting self-report cancers...")
#self report
#identify self reported all cancer (coded as 1)
bd$nm_skinSelfreport<-ifelse(between(bd$f.20001.0.0, 1001, 99999),1,
ifelse(between(bd$f.20001.0.1, 1001, 99999),1,
ifelse(between(bd$f.20001.0.2, 1001, 99999),1,
ifelse(between(bd$f.20001.0.3, 1001, 99999),1,
ifelse(between(bd$f.20001.0.4, 1001, 99999),1,
ifelse(between(bd$f.20001.0.5, 1001, 99999),1,
ifelse(between(bd$f.20001.1.0, 1001, 99999),1,
ifelse(between(bd$f.20001.1.1, 1001, 99999),1,
ifelse(between(bd$f.20001.1.2, 1001, 99999),1,
ifelse(between(bd$f.20001.1.3, 1001, 99999),1,
ifelse(between(bd$f.20001.1.4, 1001, 99999),1,
ifelse(between(bd$f.20001.1.5, 1001, 99999),1,0))))))))))))
return(bd)
}
bladder_cancer_function<-function(){
allICD10 <- unlist(bd %>% select(starts_with("f.40006.")), use.names=F)
allICD10<-allICD10[!is.na(allICD10)]
allICD10<-allICD10[!duplicated(allICD10)]
# subet for only C codes (D and O codes will be in "other" for exclusion from controls)
allICD10 <- grep("C", allICD10, value=T)
toMatch <- c("C670", "C671", "C672", "C673", "C674", "C675", "C676", "C677", "C678", "C679")
bladderICD10 <- unique(grep(paste(toMatch,collapse="|"), allICD10, value=TRUE))
table(bladderICD10)
allICD9<-unlist(bd %>% select(starts_with("f.40013.")), use.names=F)
allICD9<-allICD9[!is.na(allICD9)]
allICD9<-allICD9[!duplicated(allICD9)]
toMatch <- c(1880, 1882, 1884, 1886, 1888, 1889)
bladderICD9 <- unique(grep(paste(toMatch,collapse="|"), allICD9, value=TRUE))
bladderICD9 <-as.integer(bladderICD9)
table(bladderICD9)
bd<-UKBcancerFunc(dat=bd,cancerCode = bladderICD9,sitename = "bladderICD9", other=F, cancer_col = "f.40013.")
bd<-UKBcancerFunc(dat=bd,cancerCode = bladderICD9,sitename = "otherICD9", other=T, cancer_col = "f.40013.")
bd<-UKBcancerFunc(dat=bd,cancerCode = bladderICD10,sitename = "bladderICD10", other=F)
bd<-UKBcancerFunc(dat=bd,cancerCode = bladderICD10,sitename = "otherICD10", other=T)
print("functions complete!")
# define overall
bd$bladderPrevelent<-ifelse(!is.na(bd$bladderICD101), 1,NA)
bd$bladderPrevelent<-ifelse(!is.na(bd$bladderICD91), 1,bd$bladderPrevelent)
bd$otherPrevelent<-ifelse(!is.na(bd$otherICD101), 1,NA)
bd$otherPrevelent<-ifelse(!is.na(bd$otherICD91), 1,bd$otherPrevelent)
print("getting self-report cancers...")
#self report
#identify self reported all cancer (coded as 1)
bd$bladderSelfreport<-ifelse(between(bd$f.20001.0.0, 1001, 99999),1,
ifelse(between(bd$f.20001.0.1, 1001, 99999),1,
ifelse(between(bd$f.20001.0.2, 1001, 99999),1,
ifelse(between(bd$f.20001.0.3, 1001, 99999),1,
ifelse(between(bd$f.20001.0.4, 1001, 99999),1,
ifelse(between(bd$f.20001.0.5, 1001, 99999),1,
ifelse(between(bd$f.20001.1.0, 1001, 99999),1,
ifelse(between(bd$f.20001.1.1, 1001, 99999),1,
ifelse(between(bd$f.20001.1.2, 1001, 99999),1,
ifelse(between(bd$f.20001.1.3, 1001, 99999),1,
ifelse(between(bd$f.20001.1.4, 1001, 99999),1,
ifelse(between(bd$f.20001.1.5, 1001, 99999),1,0))))))))))))
return(bd)
}
myeloid_leukemia_function<-function(){
allICD10 <- unlist(bd %>% select(starts_with("f.40006.")), use.names=F)
allICD10<-allICD10[!is.na(allICD10)]
allICD10<-allICD10[!duplicated(allICD10)]
# subet for only C codes (D and O codes will be in "other" for exclusion from controls)
allICD10 <- grep("C", allICD10, value=T)
toMatch <- c("C92")
myel_leukICD10 <- unique(grep(paste(toMatch,collapse="|"), allICD10, value=TRUE))
table(myel_leukICD10)
allICD9<-unlist(bd %>% select(starts_with("f.40013.")), use.names=F)
allICD9<-allICD9[!is.na(allICD9)]
allICD9<-allICD9[!duplicated(allICD9)]
toMatch <- c("205")
myel_leukICD9 <- unique(grep(paste(toMatch,collapse="|"), allICD9, value=TRUE))
myel_leukICD9 <-as.integer(myel_leukICD9)
table(myel_leukICD9)
bd<-UKBcancerFunc(dat=bd,cancerCode = myel_leukICD9,sitename = "myel_leukICD9", other=F, cancer_col = "f.40013.")
bd<-UKBcancerFunc(dat=bd,cancerCode = myel_leukICD9,sitename = "otherICD9", other=T, cancer_col = "f.40013.")
bd<-UKBcancerFunc(dat=bd,cancerCode = myel_leukICD10,sitename = "myel_leukICD10", other=F)
bd<-UKBcancerFunc(dat=bd,cancerCode = myel_leukICD10,sitename = "otherICD10", other=T)
print("functions complete!")
# define overall
bd$myel_leukPrevelent<-ifelse(!is.na(bd$myel_leukICD101), 1,NA)
bd$myel_leukPrevelent<-ifelse(!is.na(bd$myel_leukICD91), 1,bd$myel_leukPrevelent)
bd$otherPrevelent<-ifelse(!is.na(bd$otherICD101), 1,NA)
bd$otherPrevelent<-ifelse(!is.na(bd$otherICD91), 1,bd$otherPrevelent)
print("getting self-report cancers...")
#self report
#identify self reported all cancer (coded as 1)
bd$myel_leukSelfreport<-ifelse(between(bd$f.20001.0.0, 1001, 99999),1,
ifelse(between(bd$f.20001.0.1, 1001, 99999),1,
ifelse(between(bd$f.20001.0.2, 1001, 99999),1,
ifelse(between(bd$f.20001.0.3, 1001, 99999),1,
ifelse(between(bd$f.20001.0.4, 1001, 99999),1,
ifelse(between(bd$f.20001.0.5, 1001, 99999),1,
ifelse(between(bd$f.20001.1.0, 1001, 99999),1,
ifelse(between(bd$f.20001.1.1, 1001, 99999),1,
ifelse(between(bd$f.20001.1.2, 1001, 99999),1,
ifelse(between(bd$f.20001.1.3, 1001, 99999),1,
ifelse(between(bd$f.20001.1.4, 1001, 99999),1,
ifelse(between(bd$f.20001.1.5, 1001, 99999),1,0))))))))))))
return(bd)
}
haematological_cancer_function<-function(){
allICD10 <- unlist(bd %>% select(starts_with("f.40006.")), use.names=F)
allICD10<-allICD10[!is.na(allICD10)]
allICD10<-allICD10[!duplicated(allICD10)]
# subet for only C codes (D and O codes will be in "other" for exclusion from controls)
allICD10 <- grep("C", allICD10, value=T)
toMatch <- c("C81", "C82", "C83", "C84", "C85", "C86", "C87", "C88", "C89", "C90", "C91", "C92", "C93", "C94", "C95", "C96")
haemICD10 <- unique(grep(paste(toMatch,collapse="|"), allICD10, value=TRUE))
table(haemICD10)
allICD9<-unlist(bd %>% select(starts_with("f.40013.")), use.names=F)
allICD9<-allICD9[!is.na(allICD9)]
allICD9<-allICD9[!duplicated(allICD9)]
toMatch <- c("200", "201", "202", "203", "204", "205", "206", "207", "208")
haemICD9 <- unique(grep(paste(toMatch,collapse="|"), allICD9, value=TRUE))
haemICD9 <-as.integer(haemICD9)
table(haemICD9)
bd<-UKBcancerFunc(dat=bd,cancerCode = haemICD9,sitename = "haemICD9", other=F, cancer_col = "f.40013.")
bd<-UKBcancerFunc(dat=bd,cancerCode = haemICD9,sitename = "otherICD9", other=T, cancer_col = "f.40013.")
bd<-UKBcancerFunc(dat=bd,cancerCode = haemICD10,sitename = "haemICD10", other=F)
bd<-UKBcancerFunc(dat=bd,cancerCode = haemICD10,sitename = "otherICD10", other=T)
print("functions complete!")
# define overall
bd$haemPrevelent<-ifelse(!is.na(bd$haemICD101), 1,NA)
bd$haemPrevelent<-ifelse(!is.na(bd$haemICD91), 1,bd$haemPrevelent)
bd$otherPrevelent<-ifelse(!is.na(bd$otherICD101), 1,NA)
bd$otherPrevelent<-ifelse(!is.na(bd$otherICD91), 1,bd$otherPrevelent)
print("getting self-report cancers...")
#self report
#identify self reported all cancer (coded as 1)
bd$haemSelfreport<-ifelse(between(bd$f.20001.0.0, 1001, 99999),1,
ifelse(between(bd$f.20001.0.1, 1001, 99999),1,
ifelse(between(bd$f.20001.0.2, 1001, 99999),1,
ifelse(between(bd$f.20001.0.3, 1001, 99999),1,
ifelse(between(bd$f.20001.0.4, 1001, 99999),1,
ifelse(between(bd$f.20001.0.5, 1001, 99999),1,
ifelse(between(bd$f.20001.1.0, 1001, 99999),1,
ifelse(between(bd$f.20001.1.1, 1001, 99999),1,
ifelse(between(bd$f.20001.1.2, 1001, 99999),1,
ifelse(between(bd$f.20001.1.3, 1001, 99999),1,
ifelse(between(bd$f.20001.1.4, 1001, 99999),1,
ifelse(between(bd$f.20001.1.5, 1001, 99999),1,0))))))))))))
return(bd)
}
head_and_neck_cancer<-function(){
allICD10 <- unlist(bd %>% select(starts_with("f.40006.")), use.names=F)
allICD10<-allICD10[!is.na(allICD10)]
allICD10<-allICD10[!duplicated(allICD10)]
# subet for only C codes (D and O codes will be in "other" for exclusion from controls)
allICD10 <- grep("C", allICD10, value=T)
toMatch <- c("C003", "C004", "C005", "C006", "C009", "C01", "C020", "C021", "C022", "C023", "C024", "C028", "C029", "C030", "C031", "C039", "C040", "C041", "C048", "C049", "C050", "C051", "C052", "C058", "C059", "C060", "C061", "C062", "C068", "C069", "C090", "C091", "C098", "C099", "C100", "C101", "C102", "C103", "C104", "C108", "C109", "C12", "C130", "C131", "C132", "C139", "C140", "C142", "C320", "C321", "C322", "C323", "C328", "C329")
headneckICD10 <- unique(grep(paste(toMatch,collapse="|"), allICD10, value=TRUE))
table(headneckICD10)
allICD9<-unlist(bd %>% select(starts_with("f.40013.")), use.names=F)
allICD9<-allICD9[!is.na(allICD9)]
allICD9<-allICD9[!duplicated(allICD9)]
toMatch <- c("1410", "1412", "1413", "1419", "1430", "1431", "1449", "1450", "1451", "1452", "1453", "1455", "1460", "1461", "1610")
headneckICD9 <- unique(grep(paste(toMatch,collapse="|"), allICD9, value=TRUE))
headneckICD9 <-as.integer(headneckICD9)
table(headneckICD9)
bd<-UKBcancerFunc(dat=bd,cancerCode = headneckICD9,sitename = "headneckICD9", other=F, cancer_col = "f.40013.")
bd<-UKBcancerFunc(dat=bd,cancerCode = headneckICD9,sitename = "otherICD9", other=T, cancer_col = "f.40013.")
bd<-UKBcancerFunc(dat=bd,cancerCode = headneckICD10,sitename = "headneckICD10", other=F)
bd<-UKBcancerFunc(dat=bd,cancerCode = headneckICD10,sitename = "otherICD10", other=T)
print("functions complete!")
# define overall
bd$headneckPrevelent<-ifelse(!is.na(bd$headneckICD101), 1,NA)
bd$headneckPrevelent<-ifelse(!is.na(bd$headneckICD91), 1,bd$headneckPrevelent)
bd$otherPrevelent<-ifelse(!is.na(bd$otherICD101), 1,NA)
bd$otherPrevelent<-ifelse(!is.na(bd$otherICD91), 1,bd$otherPrevelent)
print("getting self-report cancers...")
#self report
#identify self reported all cancer (coded as 1)
bd$headneckSelfreport<-ifelse(between(bd$f.20001.0.0, 1001, 99999),1,
ifelse(between(bd$f.20001.0.1, 1001, 99999),1,
ifelse(between(bd$f.20001.0.2, 1001, 99999),1,
ifelse(between(bd$f.20001.0.3, 1001, 99999),1,
ifelse(between(bd$f.20001.0.4, 1001, 99999),1,
ifelse(between(bd$f.20001.0.5, 1001, 99999),1,
ifelse(between(bd$f.20001.1.0, 1001, 99999),1,
ifelse(between(bd$f.20001.1.1, 1001, 99999),1,
ifelse(between(bd$f.20001.1.2, 1001, 99999),1,
ifelse(between(bd$f.20001.1.3, 1001, 99999),1,
ifelse(between(bd$f.20001.1.4, 1001, 99999),1,
ifelse(between(bd$f.20001.1.5, 1001, 99999),1,0))))))))))))
return(bd)
}
larynx_cancer<-function(){
allICD10 <- unlist(bd %>% select(starts_with("f.40006.")), use.names=F)
allICD10<-allICD10[!is.na(allICD10)]
allICD10<-allICD10[!duplicated(allICD10)]
# subet for only C codes (D and O codes will be in "other" for exclusion from controls)
allICD10 <- grep("C", allICD10, value=T)
toMatch <- c("C320", "C321", "C322", "C323", "C328", "C329")
larynxICD10 <- unique(grep(paste(toMatch,collapse="|"), allICD10, value=TRUE))
table(larynxICD10)
allICD9<-unlist(bd %>% select(starts_with("f.40013.")), use.names=F)
allICD9<-allICD9[!is.na(allICD9)]
allICD9<-allICD9[!duplicated(allICD9)]
toMatch <- c("1610")
larynxICD9 <- unique(grep(paste(toMatch,collapse="|"), allICD9, value=TRUE))
larynxICD9 <-as.integer(larynxICD9)
table(larynxICD9)
bd<-UKBcancerFunc(dat=bd,cancerCode = larynxICD9,sitename = "larynxICD9", other=F, cancer_col = "f.40013.")
bd<-UKBcancerFunc(dat=bd,cancerCode = larynxICD9,sitename = "otherICD9", other=T, cancer_col = "f.40013.")
bd<-UKBcancerFunc(dat=bd,cancerCode = larynxICD10,sitename = "larynxICD10", other=F)
bd<-UKBcancerFunc(dat=bd,cancerCode = larynxICD10,sitename = "otherICD10", other=T)
print("functions complete!")
# define overall
bd$larynxPrevelent<-ifelse(!is.na(bd$larynxICD101), 1,NA)
bd$larynxPrevelent<-ifelse(!is.na(bd$larynxICD91), 1,bd$larynxPrevelent)
bd$otherPrevelent<-ifelse(!is.na(bd$otherICD101), 1,NA)
bd$otherPrevelent<-ifelse(!is.na(bd$otherICD91), 1,bd$otherPrevelent)
print("getting self-report cancers...")
#self report
#identify self reported all cancer (coded as 1)
bd$larynxSelfreport<-ifelse(between(bd$f.20001.0.0, 1001, 99999),1,
ifelse(between(bd$f.20001.0.1, 1001, 99999),1,
ifelse(between(bd$f.20001.0.2, 1001, 99999),1,
ifelse(between(bd$f.20001.0.3, 1001, 99999),1,
ifelse(between(bd$f.20001.0.4, 1001, 99999),1,
ifelse(between(bd$f.20001.0.5, 1001, 99999),1,
ifelse(between(bd$f.20001.1.0, 1001, 99999),1,
ifelse(between(bd$f.20001.1.1, 1001, 99999),1,
ifelse(between(bd$f.20001.1.2, 1001, 99999),1,
ifelse(between(bd$f.20001.1.3, 1001, 99999),1,
ifelse(between(bd$f.20001.1.4, 1001, 99999),1,
ifelse(between(bd$f.20001.1.5, 1001, 99999),1,0))))))))))))
return(bd)
}
leukemia_function<-function(){
allICD10 <- unlist(bd %>% select(starts_with("f.40006.")), use.names=F)
allICD10<-allICD10[!is.na(allICD10)]
allICD10<-allICD10[!duplicated(allICD10)]
# subet for only C codes (D and O codes will be in "other" for exclusion from controls)
allICD10 <- grep("C", allICD10, value=T)
toMatch <- c("C91", "C92", "C93", "C94", "C95")
leukICD10 <- unique (grep(paste(toMatch,collapse="|"), allICD10, value=TRUE))
table(leukICD10)
allICD9<-unlist(bd %>% select(starts_with("f.40013.")), use.names=F)
allICD9<-allICD9[!is.na(allICD9)]
allICD9<-allICD9[!duplicated(allICD9)]
toMatch <- c("204", "205", "206", "207", "208")
leukICD9 <- unique (grep(paste(toMatch,collapse="|"), allICD9, value=TRUE))
leukICD9 <-as.integer(leukICD9)
table(leukICD9)
bd<-UKBcancerFunc(dat=bd,cancerCode = leukICD9,sitename = "leukICD9", other=F, cancer_col = "f.40013.")
bd<-UKBcancerFunc(dat=bd,cancerCode = leukICD9,sitename = "otherICD9", other=T, cancer_col = "f.40013.")
bd<-UKBcancerFunc(dat=bd,cancerCode = leukICD10,sitename = "leukICD10", other=F)
bd<-UKBcancerFunc(dat=bd,cancerCode = leukICD10,sitename = "otherICD10", other=T)
print("functions complete!")
# define overall
bd$leukPrevelent<-ifelse(!is.na(bd$leukICD101), 1,NA)
bd$leukPrevelent<-ifelse(!is.na(bd$leukICD91), 1,bd$leukPrevelent)
bd$otherPrevelent<-ifelse(!is.na(bd$otherICD101), 1,NA)
bd$otherPrevelent<-ifelse(!is.na(bd$otherICD91), 1,bd$otherPrevelent)
print("getting self-report cancers...")
#self report
#identify self reported all cancer (coded as 1)
bd$leukSelfreport<-ifelse(between(bd$f.20001.0.0, 1001, 99999),1,
ifelse(between(bd$f.20001.0.1, 1001, 99999),1,
ifelse(between(bd$f.20001.0.2, 1001, 99999),1,
ifelse(between(bd$f.20001.0.3, 1001, 99999),1,
ifelse(between(bd$f.20001.0.4, 1001, 99999),1,
ifelse(between(bd$f.20001.0.5, 1001, 99999),1,
ifelse(between(bd$f.20001.1.0, 1001, 99999),1,
ifelse(between(bd$f.20001.1.1, 1001, 99999),1,
ifelse(between(bd$f.20001.1.2, 1001, 99999),1,
ifelse(between(bd$f.20001.1.3, 1001, 99999),1,
ifelse(between(bd$f.20001.1.4, 1001, 99999),1,
ifelse(between(bd$f.20001.1.5, 1001, 99999),1,0))))))))))))
return(bd)
}
liver_bile_cancer_function<-function(){
allICD10 <- unlist(bd %>% select(starts_with("f.40006.")), use.names=F)
allICD10<-allICD10[!is.na(allICD10)]
allICD10<-allICD10[!duplicated(allICD10)]
# subet for only C codes (D and O codes will be in "other" for exclusion from controls)
allICD10 <- grep("C", allICD10, value=T)
toMatch <- c("C22")
liver_bileICD10 <- unique (grep(paste(toMatch,collapse="|"), allICD10, value=TRUE))
table(liver_bileICD10)
allICD9<-unlist(bd %>% select(starts_with("f.40013.")), use.names=F)
allICD9<-allICD9[!is.na(allICD9)]
allICD9<-allICD9[!duplicated(allICD9)]
toMatch <- c("155")
liver_bileICD9 <- unique (grep(paste(toMatch,collapse="|"), allICD9, value=TRUE))
liver_bileICD9 <-as.integer(liver_bileICD9)
table(liver_bileICD9)
bd<-UKBcancerFunc(dat=bd,cancerCode = liver_bileICD9,sitename = "liver_bileICD9", other=F, cancer_col = "f.40013.")
bd<-UKBcancerFunc(dat=bd,cancerCode = liver_bileICD9,sitename = "otherICD9", other=T, cancer_col = "f.40013.")
bd<-UKBcancerFunc(dat=bd,cancerCode = liver_bileICD10,sitename = "liver_bileICD10", other=F)
bd<-UKBcancerFunc(dat=bd,cancerCode = liver_bileICD10,sitename = "otherICD10", other=T)
print("functions complete!")
# define overall
bd$liver_bilePrevelent<-ifelse(!is.na(bd$liver_bileICD101), 1,NA)
bd$liver_bilePrevelent<-ifelse(!is.na(bd$liver_bileICD91), 1,bd$liver_bilePrevelent)
bd$otherPrevelent<-ifelse(!is.na(bd$otherICD101), 1,NA)
bd$otherPrevelent<-ifelse(!is.na(bd$otherICD91), 1,bd$otherPrevelent)
print("getting self-report cancers...")
#self report
#identify self reported all cancer (coded as 1)
bd$liver_bileSelfreport<-ifelse(between(bd$f.20001.0.0, 1001, 99999),1,
ifelse(between(bd$f.20001.0.1, 1001, 99999),1,
ifelse(between(bd$f.20001.0.2, 1001, 99999),1,
ifelse(between(bd$f.20001.0.3, 1001, 99999),1,
ifelse(between(bd$f.20001.0.4, 1001, 99999),1,
ifelse(between(bd$f.20001.0.5, 1001, 99999),1,
ifelse(between(bd$f.20001.1.0, 1001, 99999),1,
ifelse(between(bd$f.20001.1.1, 1001, 99999),1,
ifelse(between(bd$f.20001.1.2, 1001, 99999),1,
ifelse(between(bd$f.20001.1.3, 1001, 99999),1,
ifelse(between(bd$f.20001.1.4, 1001, 99999),1,
ifelse(between(bd$f.20001.1.5, 1001, 99999),1,0))))))))))))
return(bd)
}
liver_cell_cancer_function<-function(){
allICD10 <- unlist(bd %>% select(starts_with("f.40006.")), use.names=F)
allICD10<-allICD10[!is.na(allICD10)]
allICD10<-allICD10[!duplicated(allICD10)]
# subet for only C codes (D and O codes will be in "other" for exclusion from controls)
allICD10 <- grep("C", allICD10, value=T)
toMatch <- c("C220")
liver_cellICD10 <- unique (grep(paste(toMatch,collapse="|"), allICD10, value=TRUE))
table(liver_cellICD10)
allICD9<-unlist(bd %>% select(starts_with("f.40013.")), use.names=F)
allICD9<-allICD9[!is.na(allICD9)]
allICD9<-allICD9[!duplicated(allICD9)]
toMatch <- c("1550")
liver_cellICD9 <- unique (grep(paste(toMatch,collapse="|"), allICD9, value=TRUE))
liver_cellICD9 <-as.integer(liver_cellICD9)
table(liver_cellICD9)
bd<-UKBcancerFunc(dat=bd,cancerCode = liver_cellICD9,sitename = "liver_cellICD9", other=F, cancer_col = "f.40013.")
bd<-UKBcancerFunc(dat=bd,cancerCode = liver_cellICD9,sitename = "otherICD9", other=T, cancer_col = "f.40013.")
bd<-UKBcancerFunc(dat=bd,cancerCode = liver_cellICD10,sitename = "liver_cellICD10", other=F)
bd<-UKBcancerFunc(dat=bd,cancerCode = liver_cellICD10,sitename = "otherICD10", other=T)
print("functions complete!")
# define overall
bd$liver_cellPrevelent<-ifelse(!is.na(bd$liver_cellICD101), 1,NA)
bd$liver_cellPrevelent<-ifelse(!is.na(bd$liver_cellICD91), 1,bd$liver_cellPrevelent)
bd$otherPrevelent<-ifelse(!is.na(bd$otherICD101), 1,NA)
bd$otherPrevelent<-ifelse(!is.na(bd$otherICD91), 1,bd$otherPrevelent)
print("getting self-report cancers...")
#self report
#identify self reported all cancer (coded as 1)
bd$liver_cellSelfreport<-ifelse(between(bd$f.20001.0.0, 1001, 99999),1,
ifelse(between(bd$f.20001.0.1, 1001, 99999),1,
ifelse(between(bd$f.20001.0.2, 1001, 99999),1,
ifelse(between(bd$f.20001.0.3, 1001, 99999),1,
ifelse(between(bd$f.20001.0.4, 1001, 99999),1,
ifelse(between(bd$f.20001.0.5, 1001, 99999),1,
ifelse(between(bd$f.20001.1.0, 1001, 99999),1,
ifelse(between(bd$f.20001.1.1, 1001, 99999),1,
ifelse(between(bd$f.20001.1.2, 1001, 99999),1,
ifelse(between(bd$f.20001.1.3, 1001, 99999),1,
ifelse(between(bd$f.20001.1.4, 1001, 99999),1,
ifelse(between(bd$f.20001.1.5, 1001, 99999),1,0))))))))))))
return(bd)
}
lymphoid_leukemia_function<-function(){
allICD10 <- unlist(bd %>% select(starts_with("f.40006.")), use.names=F)
allICD10<-allICD10[!is.na(allICD10)]
allICD10<-allICD10[!duplicated(allICD10)]
# subet for only C codes (D and O codes will be in "other" for exclusion from controls)
allICD10 <- grep("C", allICD10, value=T)
toMatch <- c("C91")
lymph_leukICD10 <- unique (grep(paste(toMatch,collapse="|"), allICD10, value=TRUE))
table(lymph_leukICD10)
allICD9<-unlist(bd %>% select(starts_with("f.40013.")), use.names=F)
allICD9<-allICD9[!is.na(allICD9)]
allICD9<-allICD9[!duplicated(allICD9)]
toMatch <- c("204")
lymph_leukICD9 <- unique (grep(paste(toMatch,collapse="|"), allICD9, value=TRUE))
lymph_leukICD9 <-as.integer(lymph_leukICD9)
table(lymph_leukICD9)
bd<-UKBcancerFunc(dat=bd,cancerCode = lymph_leukICD9,sitename = "lymph_leukICD9", other=F, cancer_col = "f.40013.")
bd<-UKBcancerFunc(dat=bd,cancerCode = lymph_leukICD9,sitename = "otherICD9", other=T, cancer_col = "f.40013.")
bd<-UKBcancerFunc(dat=bd,cancerCode = lymph_leukICD10,sitename = "lymph_leukICD10", other=F)
bd<-UKBcancerFunc(dat=bd,cancerCode = lymph_leukICD10,sitename = "otherICD10", other=T)
print("functions complete!")
# define overall
bd$lymph_leukPrevelent<-ifelse(!is.na(bd$lymph_leukICD101), 1,NA)
bd$lymph_leukPrevelent<-ifelse(!is.na(bd$lymph_leukICD91), 1,bd$lymph_leukPrevelent)
bd$otherPrevelent<-ifelse(!is.na(bd$otherICD101), 1,NA)
bd$otherPrevelent<-ifelse(!is.na(bd$otherICD91), 1,bd$otherPrevelent)
print("getting self-report cancers...")
#self report
#identify self reported all cancer (coded as 1)
bd$lymph_leukSelfreport<-ifelse(between(bd$f.20001.0.0, 1001, 99999),1,
ifelse(between(bd$f.20001.0.1, 1001, 99999),1,
ifelse(between(bd$f.20001.0.2, 1001, 99999),1,
ifelse(between(bd$f.20001.0.3, 1001, 99999),1,
ifelse(between(bd$f.20001.0.4, 1001, 99999),1,
ifelse(between(bd$f.20001.0.5, 1001, 99999),1,
ifelse(between(bd$f.20001.1.0, 1001, 99999),1,
ifelse(between(bd$f.20001.1.1, 1001, 99999),1,
ifelse(between(bd$f.20001.1.2, 1001, 99999),1,
ifelse(between(bd$f.20001.1.3, 1001, 99999),1,
ifelse(between(bd$f.20001.1.4, 1001, 99999),1,
ifelse(between(bd$f.20001.1.5, 1001, 99999),1,0))))))))))))
return(bd)
}
multiple_myeloma_function<-function(){
allICD10 <- unlist(bd %>% select(starts_with("f.40006.")), use.names=F)
allICD10<-allICD10[!is.na(allICD10)]
allICD10<-allICD10[!duplicated(allICD10)]
# subet for only C codes (D and O codes will be in "other" for exclusion from controls)
allICD10 <- grep("C", allICD10, value=T)
toMatch <- c("C900")
mult_myelICD10 <- unique(grep(paste(toMatch,collapse="|"), allICD10, value=TRUE))
table(mult_myelICD10)
allICD9<-unlist(bd %>% select(starts_with("f.40013.")), use.names=F)
allICD9<-allICD9[!is.na(allICD9)]
allICD9<-allICD9[!duplicated(allICD9)]
toMatch <- c("2030")
mult_myelICD9 <- unique(grep(paste(toMatch,collapse="|"), allICD9, value=TRUE))
mult_myelICD9 <-as.integer(mult_myelICD9)
table(mult_myelICD9)
bd<-UKBcancerFunc(dat=bd,cancerCode = mult_myelICD9,sitename = "mult_myelICD9", other=F, cancer_col = "f.40013.")
bd<-UKBcancerFunc(dat=bd,cancerCode = mult_myelICD9,sitename = "otherICD9", other=T, cancer_col = "f.40013.")
bd<-UKBcancerFunc(dat=bd,cancerCode = mult_myelICD10,sitename = "mult_myelICD10", other=F)
bd<-UKBcancerFunc(dat=bd,cancerCode = mult_myelICD10,sitename = "otherICD10", other=T)
print("functions complete!")
# define overall
bd$mult_myelPrevelent<-ifelse(!is.na(bd$mult_myelICD101), 1,NA)
bd$mult_myelPrevelent<-ifelse(!is.na(bd$mult_myelICD91), 1,bd$mult_myelPrevelent)
bd$otherPrevelent<-ifelse(!is.na(bd$otherICD101), 1,NA)
bd$otherPrevelent<-ifelse(!is.na(bd$otherICD91), 1,bd$otherPrevelent)
print("getting self-report cancers...")
#self report
#identify self reported all cancer (coded as 1)
bd$mult_myelSelfreport<-ifelse(between(bd$f.20001.0.0, 1001, 99999),1,
ifelse(between(bd$f.20001.0.1, 1001, 99999),1,
ifelse(between(bd$f.20001.0.2, 1001, 99999),1,
ifelse(between(bd$f.20001.0.3, 1001, 99999),1,
ifelse(between(bd$f.20001.0.4, 1001, 99999),1,
ifelse(between(bd$f.20001.0.5, 1001, 99999),1,
ifelse(between(bd$f.20001.1.0, 1001, 99999),1,
ifelse(between(bd$f.20001.1.1, 1001, 99999),1,
ifelse(between(bd$f.20001.1.2, 1001, 99999),1,
ifelse(between(bd$f.20001.1.3, 1001, 99999),1,
ifelse(between(bd$f.20001.1.4, 1001, 99999),1,
ifelse(between(bd$f.20001.1.5, 1001, 99999),1,0))))))))))))
return(bd)
}
colorectal_cancer_function<-function(){
#colorectal cancer
allICD10 <- unlist(bd %>% select(starts_with("f.40006.")), use.names=F)
allICD10<-allICD10[!is.na(allICD10)]
allICD10<-allICD10[!duplicated(allICD10)]
# subet for only C codes (D and O codes will be in "other" for exclusion from controls)
allICD10 <- grep("C", allICD10, value=T)
toMatch <- c("C180", "C181", "C182", "C183", "C184", "C185", "C186", "C187", "C188", "C189", "C19", "C20")
colorectalICD10 <- unique(grep(paste(toMatch,collapse="|"), allICD10, value=TRUE))
table(colorectalICD10)
allICD9<-unlist(bd %>% select(starts_with("f.40013.")), use.names=F)
allICD9<-allICD9[!is.na(allICD9)]
allICD9<-allICD9[!duplicated(allICD9)]
toMatch <- c(1530, 1531, 1532, 1533, 1534, 1535, 1536, 1537, 1538, 1539)
colorectalICD9 <- unique(grep(paste(toMatch,collapse="|"), allICD9, value=TRUE))
colorectalICD9 <-as.integer(colorectalICD9)
table(colorectalICD9)
bd<-UKBcancerFunc(dat=bd,cancerCode = colorectalICD9,sitename = "colorectalICD9", other=F, cancer_col = "f.40013.")
bd<-UKBcancerFunc(dat=bd,cancerCode = colorectalICD9,sitename = "otherICD9", other=T, cancer_col = "f.40013.")
bd<-UKBcancerFunc(dat=bd,cancerCode = colorectalICD10,sitename = "colorectalICD10", other=F)
bd<-UKBcancerFunc(dat=bd,cancerCode = colorectalICD10,sitename = "otherICD10", other=T)
# define overall
bd$colorectalPrevelent<-ifelse(!is.na(bd$colorectalICD101), 1,NA)
bd$colorectalPrevelent<-ifelse(!is.na(bd$colorectalICD91), 1,bd$colorectalPrevelent)
bd$otherPrevelent<-ifelse(!is.na(bd$otherICD101), 1,NA)
bd$otherPrevelent<-ifelse(!is.na(bd$otherICD91), 1,bd$otherPrevelent)
bd$colorectalSelfreport<-ifelse(between(bd$f.20001.0.0, 1001, 99999),1,
ifelse(between(bd$f.20001.0.1, 1001, 99999),1,
ifelse(between(bd$f.20001.0.2, 1001, 99999),1,
ifelse(between(bd$f.20001.0.3, 1001, 99999),1,
ifelse(between(bd$f.20001.0.4, 1001, 99999),1,
ifelse(between(bd$f.20001.0.5, 1001, 99999),1,
ifelse(between(bd$f.20001.1.0, 1001, 99999),1,
ifelse(between(bd$f.20001.1.1, 1001, 99999),1,
ifelse(between(bd$f.20001.1.2, 1001, 99999),1,
ifelse(between(bd$f.20001.1.3, 1001, 99999),1,
ifelse(between(bd$f.20001.1.4, 1001, 99999),1,
ifelse(between(bd$f.20001.1.5, 1001, 99999),1,0))))))))))))
return(bd)
}
format_smoking<-function(){
lvl.0090 <- c(-3,0,1,2)
lbl.0090 <- c("Prefer not to answer","Never","Previous","Current")
bd$f.20116.0.0 <- ordered(bd$f.20116.0.0, levels=lvl.0090, labels=lbl.0090)
bd$f.20116.1.0 <- ordered(bd$f.20116.1.0, levels=lvl.0090, labels=lbl.0090)
bd$f.20116.2.0 <- ordered(bd$f.20116.2.0, levels=lvl.0090, labels=lbl.0090)
names(bd)[names(bd) == "f.20116.0.0"]<-"smoking"
return(bd)
}
format_sex<-function(){
lvl.0009 <- c(0,1)
lbl.0009 <- c("Female","Male")
bd$f.31.0.0 <- as.character(ordered(bd$f.31.0.0, levels=lvl.0009, labels=lbl.0009))
# names(bd)[names(bd)=="f.31.0.0"]<-"sex"
return(bd)
# names(bd)[names(bd) == "sex" ]<-"f.31.0.0"
}
format_behaviour<-function(){
lvl.0039 <- c(-1,0,1,2,3,5,6,9)
lbl.0039 <- c("Malignant","Benign","Uncertain whether benign or malignant","Carcinoma in situ","Malignant, primary site","Malignant, microinvasive","Malignant, metastatic site","Malignant, uncertain whether primary or metastatic site")
# bd$f.40012.0.0 <- ordered(bd$f.40012.0.0, levels=lvl.0039, labels=lbl.0039)
# bd$f.40012.1.0 <- ordered(bd$f.40012.1.0, levels=lvl.0039, labels=lbl.0039)
# bd$f.40012.2.0 <- ordered(bd$f.40012.2.0, levels=lvl.0039, labels=lbl.0039)
# bd$f.40012.3.0 <- ordered(bd$f.40012.3.0, levels=lvl.0039, labels=lbl.0039)
# bd$f.40012.4.0 <- ordered(bd$f.40012.4.0, levels=lvl.0039, labels=lbl.0039)
# bd$f.40012.5.0 <- ordered(bd$f.40012.5.0, levels=lvl.0039, labels=lbl.0039)
# bd$f.40012.6.0 <- ordered(bd$f.40012.6.0, levels=lvl.0039, labels=lbl.0039)
# bd$f.40012.7.0 <- ordered(bd$f.40012.7.0, levels=lvl.0039, labels=lbl.0039)
# bd$f.40012.8.0 <- ordered(bd$f.40012.8.0, levels=lvl.0039, labels=lbl.0039)
# bd$f.40012.9.0 <- ordered(bd$f.40012.9.0, levels=lvl.0039, labels=lbl.0039)
# bd$f.40012.10.0 <- ordered(bd$f.40012.10.0, levels=lvl.0039, labels=lbl.0039)
# bd$f.40012.11.0 <- ordered(bd$f.40012.11.0, levels=lvl.0039, labels=lbl.0039)
# bd$f.40012.12.0 <- ordered(bd$f.40012.12.0, levels=lvl.0039, labels=lbl.0039)
# bd$f.40012.13.0 <- ordered(bd$f.40012.13.0, levels=lvl.0039, labels=lbl.0039)
# bd$f.40012.14.0 <- ordered(bd$f.40012.14.0, levels=lvl.0039, labels=lbl.0039)
# bd$f.40012.15.0 <- ordered(bd$f.40012.15.0, levels=lvl.0039, labels=lbl.0039)
# bd$f.40012.16.0 <- ordered(bd$f.40012.16.0, levels=lvl.0039, labels=lbl.0039)
bd$f.40012.0.0 <- as.character(ordered(bd$f.40012.0.0, levels=lvl.0039, labels=lbl.0039))
bd$f.40012.1.0 <- as.character(ordered(bd$f.40012.1.0, levels=lvl.0039, labels=lbl.0039))
bd$f.40012.2.0 <- as.character(ordered(bd$f.40012.2.0, levels=lvl.0039, labels=lbl.0039))
bd$f.40012.3.0 <- as.character(ordered(bd$f.40012.3.0, levels=lvl.0039, labels=lbl.0039))
bd$f.40012.4.0 <- as.character(ordered(bd$f.40012.4.0, levels=lvl.0039, labels=lbl.0039))
bd$f.40012.5.0 <- as.character(ordered(bd$f.40012.5.0, levels=lvl.0039, labels=lbl.0039))
bd$f.40012.6.0 <- as.character(ordered(bd$f.40012.6.0, levels=lvl.0039, labels=lbl.0039))
bd$f.40012.7.0 <- as.character(ordered(bd$f.40012.7.0, levels=lvl.0039, labels=lbl.0039))
bd$f.40012.8.0 <- as.character(ordered(bd$f.40012.8.0, levels=lvl.0039, labels=lbl.0039))
bd$f.40012.9.0 <- as.character(ordered(bd$f.40012.9.0, levels=lvl.0039, labels=lbl.0039))
bd$f.40012.10.0 <- as.character(ordered(bd$f.40012.10.0, levels=lvl.0039, labels=lbl.0039))
bd$f.40012.11.0 <- as.character(ordered(bd$f.40012.11.0, levels=lvl.0039, labels=lbl.0039))
bd$f.40012.12.0 <- as.character(ordered(bd$f.40012.12.0, levels=lvl.0039, labels=lbl.0039))
bd$f.40012.13.0 <- as.character(ordered(bd$f.40012.13.0, levels=lvl.0039, labels=lbl.0039))
bd$f.40012.14.0 <- as.character(ordered(bd$f.40012.14.0, levels=lvl.0039, labels=lbl.0039))
bd$f.40012.15.0 <- as.character(ordered(bd$f.40012.15.0, levels=lvl.0039, labels=lbl.0039))
bd$f.40012.16.0 <- as.character(ordered(bd$f.40012.16.0, levels=lvl.0039, labels=lbl.0039))
return(bd)
}
format_date_enrollment<-function(){
bd$f.200.0.0 <- as.Date(bd$f.200.0.0)
# names(bd)[names(bd)=="f.200.0.0"]<-"consenting_to_uk"
return(bd)
}
format_date_of_attending_assessment_centre<-function(){
bd$f.53.0.0 <- as.Date(bd$f.53.0.0)
bd$f.53.1.0 <- as.Date(bd$f.53.1.0)
bd$f.53.2.0 <- as.Date(bd$f.53.2.0)
bd$f.53.3.0 <- as.Date(bd$f.53.3.0)
return(bd)
}
format_date_of_death<-function(){
bd$f.40000.0.0 <- as.Date(bd$f.40000.0.0)
bd$f.40000.1.0 <- as.Date(bd$f.40000.1.0)
names(bd)[names(bd)=="f.40000.0.0"]<-"date_of_death_40000"
return(bd)
}
# unique(bd$f.40005.0.0)
format_date_diagnosis<-function(){
# bd$f.40005.0.0
bd$f.40005.0.0 <- as.Date(bd$f.40005.0.0)
bd$f.40005.1.0 <- as.Date(bd$f.40005.1.0)
bd$f.40005.2.0 <- as.Date(bd$f.40005.2.0)
bd$f.40005.3.0 <- as.Date(bd$f.40005.3.0)
bd$f.40005.4.0 <- as.Date(bd$f.40005.4.0)
bd$f.40005.5.0 <- as.Date(bd$f.40005.5.0)
bd$f.40005.6.0 <- as.Date(bd$f.40005.6.0)
bd$f.40005.7.0 <- as.Date(bd$f.40005.7.0)
bd$f.40005.8.0 <- as.Date(bd$f.40005.8.0)
bd$f.40005.9.0 <- as.Date(bd$f.40005.9.0)
bd$f.40005.10.0 <- as.Date(bd$f.40005.10.0)
bd$f.40005.11.0 <- as.Date(bd$f.40005.11.0)
bd$f.40005.12.0 <- as.Date(bd$f.40005.12.0)
bd$f.40005.13.0 <- as.Date(bd$f.40005.13.0)
bd$f.40005.14.0 <- as.Date(bd$f.40005.14.0)
bd$f.40005.15.0 <- as.Date(bd$f.40005.15.0)
bd$f.40005.16.0 <- as.Date(bd$f.40005.16.0)
return(bd)
}
genotyping_batch<-function(){
names(bd)[names(bd)=="f.22000.0.0"]<-"genotyping_batch"
return(bd)
}
format_nsaids_baseline_6154<-function(){
# Data-Field 6154
# Medication for pain relief, constipation, heartburn
# Do you regularly take any of the following? (You can select more than one answer)
lvl.100628 <- c(-7,-3,-1,1,2,3,4,5,6)
lbl.100628 <- c("None of the above","Prefer not to answer","Do not know","Aspirin","Ibuprofen (e.g. Nurofen)","Paracetamol","Ranitidine (e.g. Zantac)","Omeprazole (e.g. Zanprol)","Laxatives (e.g. Dulcolax, Senokot)")
bd$f.6154.0.0 <- ordered(bd$f.6154.0.0, levels=lvl.100628, labels=lbl.100628)
bd$f.6154.0.1 <- ordered(bd$f.6154.0.1, levels=lvl.100628, labels=lbl.100628)
bd$f.6154.0.2 <- ordered(bd$f.6154.0.2, levels=lvl.100628, labels=lbl.100628)
bd$f.6154.0.3 <- ordered(bd$f.6154.0.3, levels=lvl.100628, labels=lbl.100628)
bd$f.6154.0.4 <- ordered(bd$f.6154.0.4, levels=lvl.100628, labels=lbl.100628)
bd$f.6154.0.5 <- ordered(bd$f.6154.0.5, levels=lvl.100628, labels=lbl.100628)
bd$f.6154.1.0 <- ordered(bd$f.6154.1.0, levels=lvl.100628, labels=lbl.100628)
bd$f.6154.1.1 <- ordered(bd$f.6154.1.1, levels=lvl.100628, labels=lbl.100628)
bd$f.6154.1.2 <- ordered(bd$f.6154.1.2, levels=lvl.100628, labels=lbl.100628)
bd$f.6154.1.3 <- ordered(bd$f.6154.1.3, levels=lvl.100628, labels=lbl.100628)
bd$f.6154.1.4 <- ordered(bd$f.6154.1.4, levels=lvl.100628, labels=lbl.100628)
bd$f.6154.1.5 <- ordered(bd$f.6154.1.5, levels=lvl.100628, labels=lbl.100628)
bd$f.6154.2.0 <- ordered(bd$f.6154.2.0, levels=lvl.100628, labels=lbl.100628)
bd$f.6154.2.1 <- ordered(bd$f.6154.2.1, levels=lvl.100628, labels=lbl.100628)
bd$f.6154.2.2 <- ordered(bd$f.6154.2.2, levels=lvl.100628, labels=lbl.100628)
bd$f.6154.2.3 <- ordered(bd$f.6154.2.3, levels=lvl.100628, labels=lbl.100628)
bd$f.6154.2.4 <- ordered(bd$f.6154.2.4, levels=lvl.100628, labels=lbl.100628)
bd$f.6154.2.5 <- ordered(bd$f.6154.2.5, levels=lvl.100628, labels=lbl.100628)
# any NSAIDS
Test_list<-NULL
for(i in 1:nrow(bd)){
# for(i in 1:100){
print(i)
Temp<-bd[i,c("f.6154.0.0","f.6154.0.1","f.6154.0.2","f.6154.0.3","f.6154.0.4","f.6154.0.5")]
Test_list[[i]]<-any(Temp == "Aspirin" | Temp == "Ibuprofen (e.g. Nurofen)")
}
Test_list<-unlist(Test_list)
Test_list[is.na(Test_list)]<-"no"
Test_list[Test_list=="TRUE"]<-"yes"
bd$nsaid_baseline_f_6154<-NA
bd$nsaid_baseline_f_6154[Test_list=="no"]<-"no"
bd$nsaid_baseline_f_6154[Test_list=="yes"]<-"yes"
# unique(bd$nsaid_baseline_f_6154)
return(bd)
}
# table(bd1$nsaid_baseline_f_6154,bd1$nsaid_baseline_f_20003)
# table(bd1$aspirin_baseline_f_6154,bd1$aspirin_baseline_f_20003)
format_aspirin_baseline_6154<-function(){
#Aspirin
Test_list<-NULL
for(i in 1:nrow(bd)){
print(i)
Temp<-bd[i,c("f.6154.0.0","f.6154.0.1","f.6154.0.2","f.6154.0.3","f.6154.0.4","f.6154.0.5")]
Test_list[[i]]<-any(Temp == "Aspirin")
}
Test_list2<-unlist(Test_list)
Test_list2[is.na(Test_list2)]<-"no"
Test_list2[Test_list2=="TRUE"]<-"yes"
bd$aspirin_baseline_f_6154<-NA
bd$aspirin_baseline_f_6154[Test_list2=="no"]<-"no"
bd$aspirin_baseline_f_6154[Test_list2=="yes"]<-"yes"
# pilot study
lvl.100688 <- c(-7,-3,-1,1,2,3,4,5)
lbl.100688 <- c("None of the above","Prefer not to answer","Do not know","Aspirin","Ibuprofen (e.g. Nurofen)","Paracetamol","Codeine","Ranitidine (e.g. Zantac)")
bd$f.10004.0.0 <- ordered(bd$f.10004.0.0, levels=lvl.100688, labels=lbl.100688)
bd$f.10004.0.1 <- ordered(bd$f.10004.0.1, levels=lvl.100688, labels=lbl.100688)
bd$f.10004.0.2 <- ordered(bd$f.10004.0.2, levels=lvl.100688, labels=lbl.100688)
bd$f.10004.0.3 <- ordered(bd$f.10004.0.3, levels=lvl.100688, labels=lbl.100688)
bd$f.10004.0.4 <- ordered(bd$f.10004.0.4, levels=lvl.100688, labels=lbl.100688)
return(bd)
}
med_codings<-function(){
Med<-readLines("~/UKBB_cancer_outcomes/coding4.tsv")
Med2<-data.frame(do.call(rbind,strsplit(Med,split="\t")))
names(Med2)<-paste(Med2[1,])
Med2<-Med2[2:nrow(Med2),]
nsaids<-c("ibuprofen","naproxen","diclofenac","celecoxib","mefenamic acid","etoricoxib","indomethacin","aspirin") #https://www.nhs.uk/conditions/nsaids/
Pos<-unique(unlist(lapply(nsaids,FUN=function(x) grep(x,Med2$meaning))))
Med2<-Med2[Pos,]
return(Med2)
}
format_nsaids_baseline_20003<-function(){
# f.20003
# Treatment/medication code
# verbal interview
# This category contains data obtained through a verbal interview by a trained nurse on prescription medications and includes data on type and number of medications taken.
# The interviewer was made aware, via a pop-up box on their computer screen, if the participant had answered in the touchscreen that they are taking regular prescription medication, and was then prompted to ask "Could you now tell me what these are?" If the participant indicated in the touchscreen that they were taking any of the following classes of medications: blood pressure lowering, cholesterol lowering, hormone replacement therapy or oral contraceptive pills, then the interviewer was prompted to record the name of the medication. If the participant stated in the touchscreen they were not taking any regular prescription medications (or were not sure), this question was asked again and confirmed by the interviewer.
# This category contains data on any regular treatments taken weekly, monthly, etc. It does not include short-term medications (such as a 1 week course of antibiotics) or prescribed medication that is not taken, or over-the-counter medications, vitamins and supplements (this information was collected in the touchscreen and was not recorded here, unless for some reason the participant had forgotten to record it in the touchscreen). Doses and formulations were not recorded.
# Medicines that could not be coded at the time of the interview were entered as free text, and subsequently coded wherever possible.
# old code
# nsaid_list<-NULL
# length(which(!is.na(bd1$f.20003.0.3)))
# for(i in 1:length(Names)){
# print(i)
# print(Names[i])
# bd1<-merge(bd,Med2,by.x=Names[i],by.y="coding",all.x=T)
# Col<-paste0("nsaid",i)
# bd1[,Col]<-NA
# bd1[,Col][is.na(bd1$meaning)]<-"no"
# bd1[,Col][!is.na(bd1$meaning)]<-"yes"
# nsaid_list[[i]]<-bd1[,Col]
# }
# nsaids_list2<-data.frame(do.call(cbind,nsaid_list))
# Test_any_yes<-NULL
# # Test_all_yes<-NULL
# for(i in 1:nrow(nsaids_list2)){
# # for(i in 1:1000){
# print(i)
# Test_any_yes[[i]]<-any(nsaids_list2[i,]=="yes")
# # Test_all_yes[[i]]<-all(nsaids_list2[i,]=="yes")
# }
# Test_any_yes2<-unlist(Test_any_yes)
# bd1$nsaid_baseline_f_20003<-NA
# bd1$nsaid_baseline_f_20003[!Test_any_yes2]<-"no"
# bd1$nsaid_baseline_f_20003[Test_any_yes2]<-"yes"
Med2<-med_codings()
nsaid_codings<-as.numeric(Med2$coding)
Names<-names(bd)[grep("20003.0",names(bd))]
#####################
# Any nsaids f.20003#
#####################
Test_list<-NULL
for(i in 1:nrow(bd)){
# for(i in 1:100){
# i<-1
print(i)
Temp<-bd[,Names]
Test_list[[i]]<-any(Temp[i,] %in% c(nsaid_codings))
}
Test_list2<-unlist(Test_list)
bd$nsaid_baseline_f_20003<-NA
bd$nsaid_baseline_f_20003[!Test_list2]<-"no"
bd$nsaid_baseline_f_20003[Test_list2]<-"yes"
return(bd)
}
format_aspirin_baseline_20003<-function(){
Med2<-med_codings()
aspirin_codings<-as.numeric(Med2$coding[grep("aspirin",Med2$meaning)])
Names<-names(bd)[grep("20003.0",names(bd))]
#####################
# aspirin f.20003#
#####################
Test_list<-NULL
for(i in 1:nrow(bd)){
# for(i in 1:100){
print(i)
Temp<-bd[,Names]
Test_list[[i]]<-any(Temp[i,] %in% c(aspirin_codings))
}
Test_list2<-unlist(Test_list)
bd$aspirin_baseline_f_20003<-NA
bd$aspirin_baseline_f_20003[!Test_list2]<-"no"
bd$aspirin_baseline_f_20003[Test_list2]<-"yes"
return(bd)
}
cleanup_names<-function(){
bd <- df_split %>% select("projectID", "geneticID", starts_with("f."), starts_with("incident"),starts_with("overall"))
names(bd)
bd<-bd[,!names(bd) %in% c("incident.flag","overall_cases", "overall_cases2" )]
# Names_keep<-c(names(df_split)[grep("f\\.",names(df_split))],"projectID","geneticID","allSelfreport" ,"incident_pan_inclc44_cancer","overall_pan_inclc44_cancer" )
# Names_keep<-c(names(df_split)[grep("f\\.",names(df_split))],"projectID","geneticID","allSelfreport" ,starts_with("incident"),starts_with("overall"))
# bd<-df_split[,names(df_split) %in% Names_keep]
return(bd)
}
lung_cancer_function2<-function(dat=NULL){
setwd("~/UKBB_cancer_outcomes")
######################################################### Format results #################################################################################
# 1. split the cancer diagnoses: #ICD_code/date_of_diagnosis/histology_code/behaviour_code/age_at_diagnosis
# 2. format columns
# 3. generate incidence of cancer flag
# 4. generate tumour behaviour flag
# 5. define controls
# 6. define incident cases
# 7. define overall cases
# 8. tidy up data
library(tidyr); library(dplyr)
#1. separate the cancer data into columns
print("performing task 1. separation...")
Df<-dat
Df2 <-separate(Df, lungICD91, into = c("lung.ICD9.1", "lung.ICD9.date.diagnosis.1", "lung.ICD9.histology.1", "lung.ICD9.behaviour.1", "lung.ICD9.age_diagnosis.1"), sep = "/")
Df3 <-separate(Df2, lungICD92, into = c("lung.ICD9.2", "lung.ICD9.date.diagnosis.2", "lung.ICD9.histology.2", "lung.ICD9.behaviour.2", "lung.ICD9.age_diagnosis.2"), sep = "/")
Df4 <-separate(Df3, lungICD101, into = c("lung.ICD10.1", "lung.ICD10.date.diagnosis.1", "lung.ICD10.histology.1", "lung.ICD10.behaviour.1", "lung.ICD10.age_diagnosis.1"), sep = "/")
Df5 <-separate(Df4, lungICD102, into = c("lung.ICD10.2", "lung.ICD10.date.diagnosis.2", "lung.ICD10.histology.2", "lung.ICD10.behaviour.2", "lung.ICD10.age_diagnosis.2"), sep = "/")
Df6 <-separate(Df5, lungICD103, into = c("lung.ICD10.3", "lung.ICD10.date.diagnosis.3", "lung.ICD10.histology.3", "lung.ICD10.behaviour.3", "lung.ICD10.age_diagnosis.3"), sep = "/")
Df7 <-separate(Df6, otherICD91, into = c("other.ICD9.1", "other.ICD9.date.diagnosis.1", "other.ICD9.histology.1", "other.ICD9.behaviour.1", "other.ICD9.age_diagnosis.1"), sep = "/")
Df8 <-separate(Df7, otherICD92, into = c("other.ICD9.2", "other.ICD9.date.diagnosis.2", "other.ICD9.histology.2", "other.ICD9.behaviour.2", "other.ICD9.age_diagnosis.2"), sep = "/")
Df9 <-separate(Df8, otherICD93, into = c("other.ICD9.3", "other.ICD9.date.diagnosis.3", "other.ICD9.histology.3", "other.ICD9.behaviour.3", "other.ICD9.age_diagnosis.3"), sep = "/")
Df10 <-separate(Df9, otherICD94, into = c("other.ICD9.4", "other.ICD9.date.diagnosis.4", "other.ICD9.histology.4", "other.ICD9.behaviour.4", "other.ICD9.age_diagnosis.4"), sep = "/")
Df11 <-separate(Df10, otherICD95, into = c("other.ICD9.5", "other.ICD9.date.diagnosis.5", "other.ICD9.histology.5", "other.ICD9.behaviour.5", "other.ICD9.age_diagnosis.5"), sep = "/")
Df12 <-separate(Df11, otherICD96, into = c("other.ICD9.6", "other.ICD9.date.diagnosis.6", "other.ICD9.histology.6", "other.ICD9.behaviour.6", "other.ICD9.age_diagnosis.6"), sep = "/")
Df13 <-separate(Df12, otherICD97, into = c("other.ICD9.7", "other.ICD9.date.diagnosis.7", "other.ICD9.histology.7", "other.ICD9.behaviour.7", "other.ICD9.age_diagnosis.7"), sep = "/")
Df14 <-separate(Df13, otherICD98, into = c("other.ICD9.8", "other.ICD9.date.diagnosis.8", "other.ICD9.histology.8", "other.ICD9.behaviour.8", "other.ICD9.age_diagnosis.8"), sep = "/")
Df15 <-separate(Df14, otherICD101, into = c("other.ICD10.1", "other.ICD10.date.diagnosis.1", "other.ICD10.histology.1", "other.ICD10.behaviour.1", "other.ICD10.age_diagnosis.1"), sep = "/")
Df16 <-separate(Df15, otherICD102, into = c("other.ICD10.2", "other.ICD10.date.diagnosis.2", "other.ICD10.histology.2", "other.ICD10.behaviour.2", "other.ICD10.age_diagnosis.2"), sep = "/")
Df17 <-separate(Df16, otherICD103, into = c("other.ICD10.3", "other.ICD10.date.diagnosis.3", "other.ICD10.histology.3", "other.ICD10.behaviour.3", "other.ICD10.age_diagnosis.3"), sep = "/")
Df18 <-separate(Df17, otherICD104, into = c("other.ICD10.4", "other.ICD10.date.diagnosis.4", "other.ICD10.histology.4", "other.ICD10.behaviour.4", "other.ICD10.age_diagnosis.4"), sep = "/")
Df19 <-separate(Df18, otherICD105, into = c("other.ICD10.5", "other.ICD10.date.diagnosis.5", "other.ICD10.histology.5", "other.ICD10.behaviour.5", "other.ICD10.age_diagnosis.5"), sep = "/")
Df20 <-separate(Df19, otherICD106, into = c("other.ICD10.6", "other.ICD10.date.diagnosis.6", "other.ICD10.histology.6", "other.ICD10.behaviour.6", "other.ICD10.age_diagnosis.6"), sep = "/")
df_split <-separate(Df20, otherICD107, into = c("other.ICD10.7", "other.ICD10.date.diagnosis.7", "other.ICD10.histology.7", "other.ICD10.behaviour.7", "other.ICD10.age_diagnosis.7"), sep = "/")
rm(list = c("Df2", "Df3", "Df4", "Df5", "Df6", "Df7", "Df8", "Df9", "Df10", "Df11", "Df12", "Df13", "Df14", "Df15", "Df16", "Df17", "Df18", "Df19", "Df20"))
str(Df, list.len=ncol(Df))
str(df_split, list.len=ncol(df_split))
# rm(Df)
# 2. format the columns
print("2. formatting columns...")
df_split$enroll <-as.Date(df_split$f.200.0.0)
date <- grepl("date", names(df_split))
df_split[,date] <-lapply(df_split[, date, drop=FALSE], as.Date)
age <- grepl("age", names(df_split))
df_split[,age] <-lapply(df_split[, age, drop=FALSE], as.numeric)
# 3. generate incident cancer flags: incidence is classed as cancer cases diagnosed after enrolment to UKBB (var: f.200.0.0, Date of consenting to join UK Biobank)
print("generating incidence flag")
# Get earliest date for the cancer (only need the first instance)
df_split <- df_split %>% mutate(earliest_date = pmin(lung.ICD9.date.diagnosis.1, lung.ICD10.date.diagnosis.1, na.rm =T))
df_split$incident.flag <- ifelse(df_split$earliest_date >= df_split$enroll,1,0)
table(df_split$incident.flag)
# 4. generate behaviour flag: only using codes: 3, 6, 7, & 9 see below
# behaviour levels: "Malignant, primary site","Malignant, microinvasive","Malignant, metastatic site","Malignant, uncertain whether primary or metastatic site"
# restrict to "Malignant, primary site"
# vast majority of cancers are "Malignant, primary site"
table(df_split$lung.ICD9.behaviour.1)
table(df_split$lung.ICD9.behaviour.2)
table(df_split$lung.ICD10.behaviour.1)
table(df_split$lung.ICD10.behaviour.2)
table(df_split$lung.ICD10.behaviour.3)
print("generating behaviour flag")
df_split$behaviour.flag <-
ifelse(
df_split$lung.ICD9.behaviour.1 == "Malignant, primary site" |
df_split$lung.ICD9.behaviour.2 == "Malignant, primary site" |
df_split$lung.ICD10.behaviour.1 == "Malignant, primary site" |
df_split$lung.ICD10.behaviour.2 == "Malignant, primary site" |
df_split$lung.ICD10.behaviour.3 == "Malignant, primary site"
,1,0)
# df_split$behaviour.flag <-
# ifelse(
# df_split$lung.ICD9.behaviour.1 == "Malignant, primary site" |
# df_split$lung.ICD9.behaviour.1 == "Malignant, microinvasive" |
# df_split$lung.ICD9.behaviour.1 == "Malignant, metastatic site" |
# df_split$lung.ICD9.behaviour.1 == "Malignant, uncertain whether primary or metastatic site" |
# df_split$lung.ICD9.behaviour.2 == "Malignant, primary site" |
# df_split$lung.ICD9.behaviour.2 == "Malignant, microinvasive" |
# df_split$lung.ICD9.behaviour.2 == "Malignant, metastatic site" |
# df_split$lung.ICD9.behaviour.2 == "Malignant, uncertain whether primary or metastatic site" |
# df_split$lung.ICD10.behaviour.1 == "Malignant, primary site" |
# df_split$lung.ICD10.behaviour.1 == "Malignant, microinvasive" |
# df_split$lung.ICD10.behaviour.1 == "Malignant, metastatic site" |
# df_split$lung.ICD10.behaviour.1 == "Malignant, uncertain whether primary or metastatic site" |
# df_split$lung.ICD10.behaviour.2 == "Malignant, primary site" |
# df_split$lung.ICD10.behaviour.2 == "Malignant, microinvasive" |
# df_split$lung.ICD10.behaviour.2 == "Malignant, metastatic site" |
# df_split$lung.ICD10.behaviour.2 == "Malignant, uncertain whether primary or metastatic site" |
# df_split$lung.ICD10.behaviour.3 == "Malignant, primary site" |
# df_split$lung.ICD10.behaviour.3 == "Malignant, microinvasive" |
# df_split$lung.ICD10.behaviour.3 == "Malignant, metastatic site" |
# df_split$lung.ICD10.behaviour.3 == "Malignant, uncertain whether primary or metastatic site",
# 1,
# 0
# )
table(df_split$behaviour.flag)
# 5. generate controls: controls are participants that do not have a cancer of interest code or any other cancer code including ICD10:D codes
# controls also have no self-report of cancers
print("defining controls...")
# control flags (controls =1, others =0)
df_split$controls <- ifelse(
is.na(df_split$lung.ICD9.1) &
is.na(df_split$lung.ICD9.2) &
is.na(df_split$other.ICD9.1) &
is.na(df_split$other.ICD9.2) &
is.na(df_split$other.ICD9.3) &
is.na(df_split$other.ICD9.4) &
is.na(df_split$other.ICD9.5) &
is.na(df_split$other.ICD9.6) &
is.na(df_split$other.ICD9.7) &
is.na(df_split$other.ICD9.8) &
is.na(df_split$lung.ICD10.1) &
is.na(df_split$lung.ICD10.2) &
is.na(df_split$lung.ICD10.3) &
is.na(df_split$other.ICD10.1) &
is.na(df_split$other.ICD10.2) &
is.na(df_split$other.ICD10.3) &
is.na(df_split$other.ICD10.4) &
is.na(df_split$other.ICD10.5) &
is.na(df_split$other.ICD10.6) &
is.na(df_split$other.ICD10.7) &
is.na(df_split$lungSelfreport) &
is.na(df_split$lungPrevelent) &
is.na(df_split$otherPrevelent),
1,
0
)
names(df_split)
table(df_split$controls)
# 5. generate incident cases: participants who have a cancer of interest code diagnosed after enrolment
# Self report not included (some report NA but have a diagnosis, and some report cancer but these are carcinoma in situ ICD10:D codes)
print("defining incident cases...")
# define incident cases (2=cases)
df_split$cases <- ifelse(df_split$incident.flag ==1 & df_split$behaviour.flag ==1, 2, 0) # cases
table(df_split$cases)
# make incident lung cancer outcome
df_split$incident_lung_cancer <- NA
df_split$incident_lung_cancer <- ifelse(df_split$controls==1, 1, ifelse(df_split$cases==2, 2, NA))
table(df_split$incident_lung_cancer)
# 6. make overall cancer outcome (these are cases diagnosed both before and after enrolment)
print("generating overall cancer cases")
# define overall cases (2=cases)
#df_split$overall_cases <- ifelse(df_split$lungPrevelent ==1 & df_split$behaviour.flag ==1, 2, 0) # cases
#table(df_split$overall_cases); table(df_split$overall_cases2)
df_split$overall_cases <- ifelse(
!is.na(df_split$lung.ICD9.1) |
!is.na(df_split$lung.ICD9.2) |
!is.na(df_split$lung.ICD10.1) |
!is.na(df_split$lung.ICD10.2) |
!is.na(df_split$lung.ICD10.3),
2,
0
)
df_split$overall_cases2 <- ifelse(df_split$overall_cases ==2 & df_split$behaviour.flag ==1, 2, 0) # cases
table(df_split$overall_cases); table(df_split$overall_cases2)
# make overall lung cancer outcome
df_split$overall_lung_cancer <- NA
df_split$overall_lung_cancer <- ifelse(df_split$controls==1, 1, ifelse(df_split$overall_cases2==2, 2, NA))
return(df_split)
# table(df_split$overall_lung_cancer)
# # 7. Numbers and tidying
# bc <- df_split[,c("projectID", "f.31.0.0", "incident_lung_cancer", "overall_lung_cancer")]
# #cases =2, controls =1
# print("numbers for incident cancer")
# table(bc$incident_lung_cancer)
# print("numbers for overall cancer")
# table(bc$overall_lung_cancer)
# # formatting for BOLT LMM pipeline:
# # a. link with genetic IEU IDs
# library(readr)
# linker <- read_csv("../linker.csv") #cols = ieu, app
# print("linking IDs")
# bc <- merge(bc, linker, by.x = "projectID", by.y = "app") #not all match re-do the numbers
# #cases =2, controls =1
# print("numbers for incident cancer")
# table(bc$incident_lung_cancer)
# print("numbers for overall cancer")
# table(bc$overall_lung_cancer)
# #These files should be space delimited text files.
# #• The first two columns must be FID and IID (the PLINK identifiers of an individual); any number of columns may follow.
# #• Values in the column should be numeric.
# #• Case/control phenotypes should be encoded as 1=unaffected (control), 2=affected (case).
# bc$FID <-bc$ieu
# bc$IID <-bc$ieu
# ## Now merge with genetic samples and covariates (complete cases) to get actual case/control numbers for the GWAS
# sample <- read.table("../sample.txt", header=T, stringsAsFactors=F)
# covars <- read.table("../covariates.txt", header=T, stringsAsFactors=F)
# df <- merge(bc, sample, by.x = "FID", by.y = "FID")
# df <- merge(df, covars, by.x = "FID", by.y = "FID")
# inc_df <- subset(df, df$incident_lung_cancer !="NA" & sex.y !="NA")
# overall_df <- subset(df, df$overall_lung_cancer !="NA" & sex.y !="NA")
# print("numbers for incident cancer")
# table(inc_df$incident_lung_cancer)
# print("numbers for overall cancer")
# table(overall_df$overall_lung_cancer)
# write.table(bc[,c("FID", "IID", "incident_lung_cancer", "overall_lung_cancer")], file="../UKBB_lung_cancer.txt", sep=" ", row.names = F, quote = F)
} |
49a248716bbf713fc078bf432a23a1733c37bf61 | b44b55d4c9326d2808a6bd207ffe6efdf2098e03 | /working-script.R | cb121ec8f0bad56a1c080003d5c435c341a13d5b | [
"MIT"
] | permissive | WL-Biol185-ShinyProjects/General-Election-Project | 574ece93b768929618ecf6f166f481138164b623 | d5f206d95518c54d23afca7b8938557448d9d778 | refs/heads/master | 2020-08-15T13:59:23.059791 | 2019-12-12T18:56:55 | 2019-12-12T18:56:55 | 215,354,244 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,715 | r | working-script.R | # Data Manipulation; Thursday 10-24-2019
#From Matt's Push at 12:34PM:
library(ggplot2)
library(dplyr)
library(leaflet)
# Pull in raw data and name i
# Delete unwanted columns & rename dataset
countyData <- select(countypres_2000_2016, -c(office, version))
# Add columns
countyData[is.na(countyData)] <- "other"
View(countyData)
#Making "green" = "other" under "party" column
countyData$party[countyData$party=="green"] <- "other"
#Playing around with data visualization using ggplot2 functions
countyData %>%
ggplot(aes(party, fill = state_po)) + geom_density(alpha = 0.5)
countyData %>%
ggplot(aes(state_po, fill = party)) + geom_density(alpha = 0.2)
countyData %>%
ggplot(aes(party)) + geom_density()
#Let's try a barplot
countyData %>%
ggplot(aes(state_po, party)) + geom_bar(stat = 'identity')
# Data Manipulation; Tuesday 10-29-2019
# I am going to try and filter the "state" categorical variables so that only east/west/central states are shown
eastCoast <- filter(countyData, state_po %in% c("ME", "NH", "MA", "RI", "CT", "NY", "NJ", "DE", "MD", "VA", "NC", "SC", "GA", "FL"))
#West Coast & Extranneous states
westCoast <- filter(countyData, state_po %in% c("CA", "OR", "WA", "AK", "HI"))
# rest of the states
otherStates <- filter(countyData, state_po %in% c("ID", "NV", "AZ", "UT", "WY", "CO", "NM", "MT", "ND", "SD", "MN", "IA", "WI", "NE", "KS", "MO", "IL", "TX", "OK", "AR", "LA", "MS", "TN", "AL", "KY", "IN", "OH", "MI", "WV", "PA", "VT"))
eastCoast %>%
ggplot(aes(state_po, fill = party)) + geom_density(alpha = 0.3)
# Here I try a count plot instead
eastCoast %>%
ggplot(aes(state_po, party)) + geom_count()
eastCoast %>%
ggplot(aes(state_po)) + geom_bar()
|
1fd54aa7c113fe9dc6cc06dcee1c39af7e2fb65f | c023b43276dd1f2092b443394d94453e4dbc1493 | /man/BSSprep.Rd | adf4c425dd0855dfd1b38bb569b8b145866551f1 | [] | no_license | cran/BSSprep | d69f49cbe88e6bd79371783eb6decaa602bef0ce | c280051ff82c7934640d194bbe2b67ae83d3f2cd | refs/heads/master | 2023-03-25T01:48:42.328824 | 2021-03-29T08:32:16 | 2021-03-29T08:32:16 | 352,696,913 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,434 | rd | BSSprep.Rd | \name{BSSprep}
\alias{BSSprep}
\title{
Whitening of Multivariate Data
}
\description{
A function for data whitening.
}
\usage{
BSSprep(X)
}
\arguments{
\item{X}{A numeric matrix. Missing values are not allowed.}
}
\details{
A \eqn{p}-variate \eqn{{\bf Y}}{Y} with \eqn{T} observations is whitened, i.e. \eqn{{\bf Y}={\bf S}^{-1/2}({\bf X}_t - \frac{1}{T}\sum_{t=1}^T {\bf X}_{t})}{Y = S^(-1/2)*(X_t - (1/T)*sum_t(X_t))}, \if{html}{for \eqn{t = 1, \ldots, T},}
where \eqn{{\bf S}}{S} is the sample covariance matrix of \eqn{{\bf X}}{X}.
This is often need as a preprocessing step like in almost all blind source separation (BSS) methods. The function is implemented using C++ and returns the whitened data matrix as well as the ingredients to back transform.
}
\value{
A list containing the following components:
\item{Y }{The whitened data matrix.}
\item{X.C }{The mean-centered data matrix.}
\item{COV.sqrt.i }{The inverse square root of the covariance matrix of X.}
\item{MEAN }{Mean vector of X.}
}
\author{
Markus Matilainen, Klaus Nordhausen
}
\examples{
n <- 100
X <- matrix(rnorm(10*n) - 1, nrow = n, ncol = 10)
res1 <- BSSprep(X)
res1$Y # The whitened matrix
colMeans(res1$Y) # should be close to zero
cov(res1$Y) # should be close to the identity matrix
res1$MEAN # Should hover around -1 for all 10 columns
}
\keyword{ multivariate }
\keyword{ ts } |
2720fdaa3c2b0144d4ffa100aa35db601a92889d | fa795a12f18341e713d9142893f62a0625a9838a | /2-create-set-list-template.R | 45cc7280d93d91fcf1823c9faa25225997318f2c | [] | no_license | loosely-covered/set_list_ty_caton_2018_06 | 3030eb3d6ba8f8cd1105174b6d01ecbcdca182de | 7fec26b2f7c246f792f1f4cfe14e618ed81fdafc | refs/heads/master | 2022-11-08T06:24:04.990359 | 2020-03-02T22:14:30 | 2020-03-02T22:14:30 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,682 | r | 2-create-set-list-template.R | library(googlesheets)
library(httr)
library(rvest)
library(stringr)
library(tidyverse)
# connect to Google Sheets
(my_sheets <- gs_ls())
# connect to Loosely Covered Set Lists workbook
set_lists <- gs_title("LC Gigs Set Lists")
# connect to gig-specific tab in that workbook
gig_name <- "Master"
set_list <- set_lists %>% gs_read(ws = gig_name) %>%
mutate( Artist = str_replace_all(Artist, "/", "-") ) %>%
mutate( Artist = str_replace_all(Artist, "’" , "") ) %>%
mutate( Artist = str_replace_all(Artist, "'", "") ) %>%
mutate( Artist = str_replace_all(Artist, "&", "n") ) %>%
mutate( Artist = str_replace_all(Artist, "\\.", "") ) %>%
mutate( Title = str_replace_all(Title, "/", "-") ) %>%
mutate( Title = str_replace_all(Title, "’" , "-") ) %>%
mutate( Title = str_replace_all(Title, "'", "-") ) %>%
mutate( Title = str_replace_all(Title, "&", "n") ) %>%
mutate( Title = str_replace_all(Title, "\\.", "") ) %>%
mutate( Artist_Title =
str_c("https://www.songlyrics.com/", Artist, "/", Title, "-lyrics/") ) %>%
mutate( Artist_Title = str_to_lower(Artist_Title) ) %>%
mutate( Artist_Title = str_replace_all(Artist_Title, "\\s+", "-") ) %>%
mutate( Title_dash = str_replace_all(Title, " ", "-"))
# create RMarkdown file for gig-specific Lead Sheets
bookfilename <- str_c('book_filename: "', gig_name, '"\n', 'rmd_files: [')
write(bookfilename, file = "_bookdown.yml")
songs <- paste0('\t"lead_sheets/',
set_list$Title_dash,
'.md",')
write(songs, file = "_bookdown.yml", append = TRUE)
outputs <- paste0('\n]\n',
'output_dir: docs')
write(outputs, file = "_bookdown.yml", append = TRUE)
|
a35764212ad18dcf417402383b0d4debf300f32e | 4fd86d6cbff5c05e5edddd137208755d03bb8f26 | /Analyses/BEST - Derivation Opportunitions Mean RT.r | 203f2c406b94cff0c329eaa9502fcc6a6a4f69c8 | [] | no_license | Sean-Hughes/Relational_Complexity_Derivation | df94a74f7f5b0397bcd7b4a6fa3ce46388d39ab5 | 28538c896db7e3943317508d1ec8c74ea15c7782 | refs/heads/master | 2020-09-15T10:34:41.531672 | 2016-09-30T08:04:45 | 2016-09-30T08:04:45 | 65,979,157 | 2 | 1 | null | 2016-09-29T21:01:53 | 2016-08-18T08:22:34 | R | UTF-8 | R | false | false | 11,554 | r | BEST - Derivation Opportunitions Mean RT.r | ########################################################################
# Automated reporting of Bayesian Estimation Superceeds the T test
# (BEST: Krushke, 2013), a bayesian t test alternative.
# Author: Ian Hussey (ian.hussey@ugent.be)
# see github.com/ianhussey/automatedreporting
# Thanks to John Kruschke for feedback on how to report results and
# to Mike Meredith for help with the inner workings of the BEST package.
# License: GPLv3+
# Version: 1.0
# Model to describe data
# means of both conditions (μ1, μ2),
# SDs of both conditions (σ1, σ2),
# shared normality parameter (ν).
# Prior distribution
# Krushke (2013) decribes a specific broad/vague default prior,
# HOWEVER THE BEST PACKAGE USED HERE EMPLOYS A DIFFERENT DEFAULT PRIOR TO THAT DESCRIBED IN THE 2013 ARTICLE.
# Kruschke (2016, personal communication) argues that both are equally broad and vague.
# For each sample yi in (y1, y2),
# μi = normal(M = mean(yi), SD = sd(yi)*5))
# σi = gamma(Mo = sd(yi), SD = sd(yi)*5)
# ν = gamma(M = 30, SD = 30)
# Assumptions of script
# 1. Comparison value (compVal) between conditions = 0
# 2. ROPE is placed on effect size (rather than mean group difference)
# 3. Decision making regarding whether the effect size's HDI includes zero assumes a unimodal plot/single interval,
# however this should be checked against the plot.
########################################################################
# Clean the workspace
rm(list=ls())
########################################################################
## Dependencies
library(BEST)
library(dplyr)
library(reshape2)
########################################################################
# Specific data, variables, and parameters of test
# labels
DV_name <- "mean RTs in the derivation opportunities task"
condition_a_name <- "the low condition"
condition_b_name <- "the high condition"
analysis_file_name <- "BEST - deriv opps RTs.RData"
output_file_name <- "BEST output - deriv opps RTs.txt"
ROPE <- c(-0.2, 0.2) # region of practical equivalence (ROPE) for assessing group equality.
# working directory where output will be saved
setwd("~/Git/Derivation study/Analyses/")
# Data acquisition
data_df <-
read.csv("~/Git/Derivation study/Data processing/processed data for analysis.csv") %>%
filter(exclude == FALSE) # exclude participants who met any of the three mastery criteria
# BEST test
attach(data_df) # use the input data frame for all tests below
BEST <- BESTmcmc(deriv_opps_rt_mean[condition == "low"], # SET THE DV AND CONDITION NAMES HERE
deriv_opps_rt_mean[condition == "high"], # SET THE DV AND CONDITION NAMES HERE
burnInSteps = 1000, # Increase this if convergence is insufficient
numSavedSteps = 1e+05, # Increase this or thinsteps if effective sample size is insufficient
thinSteps = 1)
########################################################################
# save to/read from disk
# save analysis to disk
save(BEST, file = analysis_file_name)
# Load previously saved analysis from disk
#load(file = analysis_file_name)
########################################################################
# tidy up output
BEST_output_df <-
summary(BEST, ROPEeff = ROPE) %>%
as.data.frame() %>% # convert to data frame for easier subsetting
tibble::rownames_to_column() %>% # convert rowname to column for subsetting
dplyr::mutate(mode = round(mode, 2), # round values and rename
HDIlo = round(HDIlo, 2),
HDIup = round(HDIup, 2),
percent_greater_than_zero = round(`%>compVal`, 2),
percent_in_rope = round(`%InROPE`, 2)) %>%
dplyr::select(-`%InROPE`, -`%>compVal`)
########################################################################
## MCMC convergence and n.eff assessment
# convert the strings returned by print into a usable data frame. This is a bit hacky but it works.
# NB!! this is dependant on the width of the RStudio console being adequtely wide to print all columns on one row,
# even though this printing is not shown
n_eff_strings <-
capture.output(print(BEST)) %>% # capture print as variable
as.data.frame() %>% # convert to data frame for easier subsetting
tibble::rownames_to_column() %>%
dplyr::filter(rowname > 3) %>% # trim top and bottom rows
dplyr::filter(rowname <= 8) %>%
dplyr::select(-rowname)
colnames(n_eff_strings) <- "strings"
MCMC_checks <-
reshape2::colsplit(string = n_eff_strings$strings,
pattern = "\\s+", # treat one or more spaces as a column break (uses regular expressions)
names = c("parameter", "mean", "sd", "median", "HDIlo", "HDIup", "Rhat", "n.eff")) %>%
dplyr::select(parameter, Rhat, n.eff) %>%
dplyr::mutate(Rhat_sufficient = ifelse(Rhat > 1.05, 0, 1), # insufficient convergence if less than value
n_eff_sufficient = ifelse(n.eff <= 10000, 0, 1)) %>% # insufficient effective sample size if less than value
dplyr::summarize(Rhat_sufficient = as.logical(min(Rhat_sufficient)),
n_eff_sufficient = as.logical(min(n_eff_sufficient)))
if(is.na(MCMC_checks[1,1]) | is.na(MCMC_checks[1,2])) print("************** \n ERROR: the console width is to narrow to print the results correctly! \n **************")
########################################################################
# View results
# full output
BEST_output_df
# plot
plotAll(BEST, ROPEeff = ROPE, showCurve = TRUE)
########################################################################
## extract individual variables for easier printing
MCMC_convergence <- MCMC_checks$Rhat_sufficient
MCMC_effective_n <- MCMC_checks$n_eff_sufficient
es_mode <- BEST_output_df %>% filter(rowname == "effSz") %>% .$mode
es_hdi_low <- BEST_output_df %>% filter(rowname == "effSz") %>% .$HDIlo
es_hdi_high <- BEST_output_df %>% filter(rowname == "effSz") %>% .$HDIup
es_in_rope <- BEST_output_df %>% filter(rowname == "effSz") %>% .$percent_in_rope
m_condition_a <- BEST_output_df %>% filter(rowname == "mu1") %>% .$mean
m_condition_a <- round(m_condition_a, 2)
m_condition_b <- BEST_output_df %>% filter(rowname == "mu2") %>% .$mean
m_condition_b <- round(m_condition_b, 2)
########################################################################
# construct strings from output
# MCMC convergence
MCMC_checks_string <- ifelse(MCMC_convergence == FALSE,
"The MCMC chains did not converge well. NB 'burnInSteps' SHOULD BE INCREASED AND THE TEST RE-RUN.",
ifelse(MCMC_effective_n == FALSE,
"The effective sample size was insufficient for one or more parameter. NB 'numSavedSteps' OR 'thinSteps' SHOULD BE INCREASED AND THE TEST RE-RUN.",
"The MCMC chains converged well and had an effective sample size (ESS) greater than 10,000 for all parameters."))
# interpret effect size based on Cohen's (1988) guidelines
es_size <- ifelse(abs(es_mode) < 0.2, "negligable",
ifelse(abs(es_mode) < 0.5, "small",
ifelse(abs(es_mode) < 0.8, "medium", "large")))
# assess if >=95% of credible es are inside the ROPE
equality_boolean <- ifelse(es_in_rope >= 95, 1, 0)
# assess if the 95% HDI includes the zero point
es_hid_includes_zero <- ifelse((es_hdi_low * es_hdi_high) < 0, # if the product of the number is negative then one is positive and one is negative, therefore the interval contains zero. Otherwise, it does not.
"included zero",
"did not include zero")
# Assess 3 way decision path based on equality and differences booleans to make a final conclusion
conclusions <- ifelse(equality_boolean == 1, # NB even if differences==1 here, effect is still so small as to consider groups equal.
"Given that more than 95% of estimated effect sizes fell within the ROPE, the posterior distribution therefore indicated that the groups were credibly equal. ",
ifelse(es_hid_includes_zero == "did not include zero",
"Given that less than 95% of estimated effect sizes fell within the ROPE and the 95% RDI did not include zero, the posterior distribution therefore indicated that credible differences existed between the groups. ",
"Although the 95% HDI included zero, less than 95% of estimated effect sizes within the ROPE. As such, the posterior distribution indicated that there was great uncertainty about the magnitude of difference between the two conditions, which were neither credibly different nor credibly equal. "))
########################################################################
# combine all output into a natural langauge string
BEST_parameters <- sprintf("Bayesian analysis (Kruschke, 2013) was used to compare differences in %s between %s and %s. The analysis accommodated the possibility of outliers by using t distributions to describe the data, and allowed for different variances across the groups. Specifically, the model employed 5 parameters to describe the data: the means of both conditions (μ1, μ2), the standard deviations of both conditions (σ1, σ2), and a shared normality parameter (ν). We employed the default prior, which is a noncommittal prior intended to have minimal impact on the posterior distribution. Specifically, for sample yi in (y1, y2), μi = normal(M = mean(yi), SD = sd(yi)*5)), σi = gamma(Mo = sd(yi), SD = sd(yi)*5), ν = gamma(M = 30, SD = 30). The posterior distribution was represented by Markov Chain Monte Carlo (MCMC) simulation methods (see Kruschke, 2013). For decision-making purposes, a region of practical equivalence (ROPE: Kruschke, 2011) for negligible effect size was defined (-0.2 < d < 0.2; Cohen, 1988). ",
DV_name,
condition_a_name,
condition_b_name)
BEST_text <- sprintf("%s The posterior distributions showed the modal estimate of the %s was %s for %s and %s for %s. The modal estimated effect size was %s (Cohen, 1988) with a 95%% Highest Density Interval that %s, Mo d = %s, 95%% HDI [%s, %s]. %s %% of estimated effect sizes fell within the ROPE. %s",
MCMC_checks_string,
DV_name,
m_condition_a,
condition_a_name,
m_condition_b,
condition_b_name,
es_size,
es_hid_includes_zero,
es_mode,
es_hdi_low,
es_hdi_high,
es_in_rope,
conclusions)
########################################################################
# write data to disk
sink(output_file_name)
cat(BEST_parameters)
cat("\n\n")
cat(BEST_text)
sink()
|
69e6f656750f4acba8d42a43f52128fa701df43c | 61bf1ab8d9bad7c74f0f73bddb34b787449684a3 | /scripts/functions.R | 695f4baf1782329ef04d6050e346462c0b763bd4 | [] | no_license | mhkhan27/AFG_2020_Surge | cdcd9784c323a2066fd5361e27f5e230249233f1 | 9caa9b974204ee1ea5f6232650679a4b8cb82027 | refs/heads/master | 2022-11-10T19:04:05.431137 | 2020-06-30T06:21:17 | 2020-06-30T06:21:17 | 272,364,231 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 818 | r | functions.R | survey_weight <- function(df,pop,df_strata,sf_strata,sf_pop){
sf_with_weights<- df %>%
group_by(!!sym(df_strata)) %>%
summarise(sample_strata_num=n()) %>%
inner_join(pop, by=c("list_displacement"= "strata"))%>% mutate(
sample_global = sum(sample_strata_num),
pop_global=sum(!!sym(sf_pop)),
survey_weight= (!!sym(sf_pop)/pop_global)/(sample_strata_num/sample_global)
)
}
survey_weight2 <- function(df,pop,df_strata,sf_strata,sf_pop){
sf_with_weights<- df %>%
group_by(!!sym(df_strata)) %>%
summarise(sample_strata_num=n()) %>%
inner_join(pop, by=c("Region..name."= "strata"))%>% mutate(
sample_global = sum(sample_strata_num),
pop_global=sum(!!sym(sf_pop)),
survey_weight= (!!sym(sf_pop)/pop_global)/(sample_strata_num/sample_global)
)
}
|
9f63ca5879e04d7c23c59675cfa7b015d0209330 | 12030eac89a13f1cb31549addb14eda03593d3b9 | /Donations Under 1k & NonIndividuals.R | 2dc9adb2a0ebf29643ac379012346daa1a98204a | [] | no_license | SethuO/Sethu-Odayappan | b76ff7de40bff860b564affc64677d08416e85ce | f8a727ee53310f2a23f3c989fbde1d81af6e3c98 | refs/heads/master | 2022-04-18T13:26:46.673454 | 2020-04-17T16:39:23 | 2020-04-17T16:39:23 | 256,570,389 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,911 | r | Donations Under 1k & NonIndividuals.R | library(tidyverse)
library(mosaic)
library(ggformula)
library(openintro)
library(clipr)
library(data1135)
Senate_Full_Contribution_Data <- read_excel("Campaign Donations Project/Senate Full Contribution Data.xlsx")
names(Senate_Full_Contribution_Data)
newdata <-filter(Senate_Full_Contribution_Data, Amount<=1000)
View(newdata)
#Prop From NonIndividuals
favstats(Amount~(Record_Type_Description=="Individual"), data=newdata)
gf_boxplot(Amount~(Record_Type_Description=="Individual"), data=newdata, bins=15)
#Feeney NonIndividuals
newdata6 <-filter(Senate_Full_Contribution_Data, Amount<=1000, Recipient=="Feeney, Paul")
favstats(Amount~(Record_Type_Description=="Individual"), data=newdata6)
#Total Donations By Sex
favstats(Amount~Sex, data=newdata)
gf_boxplot(~Amount| Sex, data=newdata, bins=15)
#Total Donations By Race
favstats(Amount~Race, data=newdata)
gf_boxplot(~Amount| Race, data=newdata, bins=15)
newdata2 <-filter(Senate_Full_Contribution_Data, Amount<=1000, Record_Type_Description == "Individual")
#Individual Donations by Sex
favstats(Amount~Sex, data=newdata2)
gf_boxplot(~Amount| Sex, data=newdata, bins=15)
newdata4 <-filter(Senate_Full_Contribution_Data, Amount<=1000,Amount>=500, Record_Type_Description == "Individual")
#Individual Donations Over $500 by Sex
favstats(Amount~Sex, data=newdata4)
#Individual Donations By Race
favstats(Amount~Race, data=newdata2)
gf_boxplot(~Amount| Race, data=newdata2, bins=15)
#Individual Donations by Recipient
favstats(Amount~Recipient, data=newdata2)
#Why are Barry Finegold's Donations so high?
newdata3 <-filter(Senate_Full_Contribution_Data, Amount<=1000, Recipient== "Finegold, Barry R.")
view(newdata3)
favstats(Amount~(Record_Type_Description=="Individual"), data=newdata3)
favstats(Amount~(Recipient== "Finegold, Barry R."), data=newdata2)
|
bbc17a6f3cc44205f7d9d994db324ea2acd31e91 | 337b492630de294eba762f728dc2b506af2c43bd | /R/sumSpeciesList.R | adda1df510d11fa8bffc89aed0ac6821f8726e7b | [
"MIT"
] | permissive | KateMMiller/forestMIDN | 316caa5381d257bcdfe401074d9f99b0cb8ea6f9 | 729a6b2bbffd8c6c532a6eb3a874a59eceefe698 | refs/heads/main | 2023-07-26T05:40:31.545355 | 2023-07-14T13:30:21 | 2023-07-14T13:30:21 | 157,909,348 | 0 | 0 | NOASSERTION | 2023-02-09T18:09:32 | 2018-11-16T18:58:18 | HTML | UTF-8 | R | false | false | 8,378 | r | sumSpeciesList.R | #' @include joinLocEvent.R
#' @include joinAdditionalSpecies.R
#' @include joinMicroShrubData.R
#' @include joinQuadSpecies.R
#' @include joinRegenData.R
#' @include joinTreeData.R
#' @include joinTreeVineSpecies.R
#' @include prepTaxa.R
#'
#' @title sumSpeciesList: summarize a species list for each plot visit
#'
#' @importFrom dplyr arrange group_by filter full_join left_join select summarize
#' @importFrom magrittr %>%
#' @importFrom purrr reduce
#'
#' @description This function summarizes all species data collected in a plot visit, including live trees,
#' microplots, quadrats, and additional species lists.
#'
#' @param park Combine data from all parks or one or more parks at a time. Valid inputs:
#' \describe{
#' \item{"all"}{Includes all parks in the network}
#' \item{"APCO"}{Appomattox Court House NHP only}
#' \item{"ASIS"}{Assateague Island National Seashore}
#' \item{"BOWA"}{Booker T. Washington NM only}
#' \item{"COLO"}{Colonial NHP only}
#' \item{"FRSP"}{Fredericksburg & Spotsylvania NMP only}
#' \item{"GETT"}{Gettysburg NMP only}
#' \item{"GEWA"}{George Washington Birthplace NM only}
#' \item{"HOFU"}{Hopewell Furnace NHS only}
#' \item{"PETE"}{Petersburg NBP only}
#' \item{"RICH"}{Richmond NB only}
#' \item{"SAHI"}{Sagamore Hill NHS only}
#' \item{"THST"}{Thomas Stone NHS only}
#' \item{"VAFO"}{Valley Forge NHP only}}
#'
#' @param from Year to start analysis, ranging from 2007 to current year
#' @param to Year to stop analysis, ranging from 2007 to current year
#'
#' @param QAQC Allows you to remove or include QAQC events.
#' \describe{
#' \item{FALSE}{Default. Only returns visits that are not QAQC visits}
#' \item{TRUE}{Returns all visits, including QAQC visits}}
#'
#' @param locType Allows you to only include plots that are part of the GRTS sample design or
#' include all plots, such as deer exclosures.
#' \describe{
#' \item{"VS"}{Only include plots that are part of the Vital Signs GRTS sample design}
#' \item{"all"}{Include all plots, such as plots in deer exclosures or test plots.}}
#'
#' @param eventType Allows you to include only complete sampling events or all sampling events
#' \describe{
#' \item{"complete"}{Default. Only include sampling events for a plot that are complete.}
#' \item{"all}{Include all plot events with a record in tblCOMN.Event, including plots missing most of the data
#' associated with that event (eg COLO-380-2018). This feature is currently hard-coded in the function.}}
#'
#' @param panels Allows you to select individual panels from 1 to 4. Default is all 4 panels (1:4).
#' If more than one panel is selected, specify by c(1, 3), for example.
#'
#' @param speciesType Allows you to filter on native, exotic or include all species.
#' \describe{
#' \item{"all"}{Default. Returns all species.}
#' \item{"native"}{Returns native species only}
#' \item{"exotic"}{Returns exotic species only}
#' \item{"invasive"}{Returns species on the Indicator Invasive List}
#' }
#'
#' @param ... Other arguments passed to function.
#'
#' @return Returns a dataframe with species list for each plot.
#'
#' @examples
#' \dontrun{
#' importData()
#'
#' # Compile number of invasive species found per plot in most recent survey for all parks
#' inv_spp <- sumSppList(speciesType = 'invasive', from = 2015, to = 2018)
#' inv_spp$present <- ifelse(is.na(inv_spp$ScientificName), 0, 1)
#' num_inv_per_plot <- inv_spp %>% group_by(Plot_Name) %>% summarize(numspp = sum(present, na.rm = T))
#'
#' # Compile species list for FRSP in 2019
#' FRSP_spp <- sumSppList(park = 'FRSP', from = 2019, speciesType = 'all')
#'
#' }
#'
#' @export
#'
#------------------------
# Joins quadrat tables and filters by park, year, and plot/visit type
#------------------------
sumSpeciesList <- function(park = 'all', from = 2007, to = as.numeric(format(Sys.Date(), "%Y")),
QAQC = FALSE, panels = 1:4,
locType = c('VS', 'all'), eventType = c('complete', 'all'),
speciesType = c('all', 'native', 'exotic', 'invasive'), ...){
# Match args and class
park <- match.arg(park, several.ok = TRUE,
c("all", "APCO", "ASIS", "BOWA", "COLO", "FRSP", "GETT", "GEWA", "HOFU", "PETE",
"RICH", "SAHI", "THST", "VAFO"))
stopifnot(class(from) == "numeric", from >= 2007)
stopifnot(class(to) == "numeric", to >= 2007)
stopifnot(class(QAQC) == 'logical')
stopifnot(panels %in% c(1, 2, 3, 4))
locType <- match.arg(locType)
eventType <- match.arg(eventType)
speciesType <- match.arg(speciesType)
options(scipen = 100)
# Set up data
arglist <- list(park = park, from = from, to = to, QAQC = QAQC, panels = panels,
locType = locType, eventType = eventType)
plot_events <- plot_events <- joinLocEvent(park = park, from = from, to = to, QAQC = QAQC, panels = panels,
locType = locType, eventType = eventType, output = 'verbose', ...) %>%
select(Plot_Name, Network, ParkUnit, ParkSubUnit, PlotTypeCode, PanelCode, PlotCode, PlotID,
EventID, SampleYear, SampleDate, cycle, IsQAQC)
if(nrow(plot_events) == 0){stop("Function returned 0 rows. Check that park and years specified have plot visits.")}
taxa_wide <- prepTaxa()
# Trees
tree_spp <- do.call(joinTreeData, c(arglist, list(status = 'live', speciesType = speciesType)))
tree_sum <- tree_spp %>% group_by(Plot_Name, PlotID, EventID, IsQAQC, SampleYear, TSN, ScientificName) %>%
summarize(BA_cm2 = sum(BA_cm2, na.rm = TRUE),
DBH_mean = mean(DBHcm, na.rm = TRUE),
tree_stems = sum(num_stems),
.groups = 'drop') %>%
filter(ScientificName != "None present")
# Regen
regen_spp <- do.call(joinRegenData,
c(arglist, list(canopyForm = "all", speciesType = speciesType)))
regen_sum <- regen_spp %>% select(Plot_Name, PlotID, EventID, IsQAQC, SampleYear, TSN, ScientificName, seed_den,
sap_den, stock) %>%
filter(ScientificName != "None present")
# Shrubs
shrubs <- do.call(joinMicroShrubData, c(arglist, list(speciesType = speciesType, valueType = 'midpoint')))
shrub_sum <- shrubs %>% select(Plot_Name, PlotID, EventID, IsQAQC, SampleYear,
TSN, ScientificName, shrub_avg_cov, shrub_pct_freq) %>%
filter(ScientificName != "None present")
# Quad species
quadspp <- suppressWarnings(do.call(joinQuadSpecies,
c(arglist, list(speciesType = speciesType,
valueType = 'averages',
returnNoCover = TRUE)))
)
quad_sum <- quadspp %>% select(Plot_Name, PlotID, EventID, IsQAQC, SampleYear, TSN,
ScientificName, quad_avg_cov, quad_pct_freq) %>%
filter(ScientificName != "None present")
# Additional Species
addspp <- do.call(joinAdditionalSpecies, c(arglist, list(speciesType = speciesType)))
addspp_sum <- addspp %>% select(Plot_Name, PlotID, EventID, IsQAQC, SampleYear, TSN,
ScientificName, addspp_present) %>%
filter(ScientificName != "None present")
sppdata_list <- list(tree_sum, regen_sum, shrub_sum, quad_sum, addspp_sum)
spp_comb <- sppdata_list %>% reduce(full_join,
by = c("Plot_Name", "PlotID", "EventID", "IsQAQC",
"SampleYear", "TSN", "ScientificName"))
spp_evs <- left_join(plot_events,
spp_comb, by = intersect(names(plot_events), names(spp_comb)))
spp_evs$ScientificName[is.na(spp_evs$ScientificName)] <- "None present"
na_cols <- c("BA_cm2", "DBH_mean", "tree_stems", "sap_den", "seed_den", "stock",
"shrub_avg_cov", "shrub_pct_freq",
"quad_avg_cov", "quad_pct_freq", "addspp_present")
spp_evs[, na_cols][is.na(spp_evs[, na_cols])] <- 0
spp_final <- spp_evs %>% arrange(Plot_Name, SampleYear, IsQAQC, ScientificName)
return(data.frame(spp_final))
} # end of function
|
4483e5aa2f63635701438fa9119928b8e3a2d0fb | 93248e00dd1111b8638cf7824ba97625819fe9d5 | /s_outlook_ghg.R | d71250666249b279cf36786ea08db2021140240d | [] | no_license | EBukin/GHGoutlook | 779e1dfa7baa72a0db5cd80ecb26035bac1d8c2e | d4c5a82b262718a6f3d91dc9aa113ff1ca6c70bd | refs/heads/master | 2021-01-24T18:46:41.281043 | 2017-10-02T20:26:43 | 2017-10-02T20:49:57 | 84,472,377 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 29,593 | r | s_outlook_ghg.R | #' ---
#' title: "Estimating GHG emission based on the OECD-FAO Outlook projections"
#' author: Eduard Bukin
#' date: 14 March 2017
#' output:
#' prettydoc::html_pretty:
#' toc: yes
#' theme: architect
#' ---
#' *****
#' # Process
#'
#' The purpose of this document is to explain the proces of reproducing the GHG
#' emissions data from the FAOSTAT using as the activity data the numbers
#' of OECD-FAO Agricultural Outlook.
#'
#' The process is structured around particular domains, data for which has to be reproduced.
#'
#' For the domains GE, GM, GU, GP and GR emissions are reproduced based on the
#' projected activity data, emissions related to other domains are treated separately
#' and data is reproduced based on various assumptions:
#' * GB, GH and GA domains are projected as a constant share of total emissions
#' assuming the share based on the 5 years average share in the last know historical
#' period.
#' * GY domain emissions data are approximared based on the area and yields of
#' crops relevant to the nitrogenous fertilizers consumption.
#' * GV domain data is kept at the constant level as it is assumed in the
#' FAOSTAT. Alternatively, we test a situation, when emissions from the oranic
#' soils are changing with the same rate as the area utilised under the
#' palm oil produciton.
#'
#' Below, we elaborate more explicitely on the methodology of the GHG estimation
#' for different domains.
#'
#' # Setup
#'
#' Installing packages
#+results='hide', message = FALSE, warning = FALSE
packs <- c("plyr", "tidyverse", "dplyr", "tidyr","readxl", "stringr",
"DT", "rmarkdown", "gridExtra", "grid", "ggplot2", "ggthemes",
"scales", "devtools", "gridGraphics")
lapply(packs[!packs %in% installed.packages()[,1]], install.packages,
dependencies = TRUE)
lapply(packs, require, character.only = TRUE)
#' Making sure that the number of digits displayed is large enough.
options(scipen=20)
#' Loading locally developed functions
l_ply(str_c("R/", list.files("R/", pattern="*.R")), source)
#' # Loading data
#'
#' ## Outlook data
#'
#' First we load all outlook data. If there is no data savein the Rdata file we reload all data from the CSV file.
#'
olRDFile <- "data/outlook.Rdata"
if(!file.exists(olRDFile)) {
#olFile <- "C:/Users/Bukin/OneDrive - Food and Agriculture Organization/outlookGHG/data/base17.csv"
olFile <- "C:/2017/Master/BaselineOutput.csv"
if(!file.exists(olFile)) olFile <- "data/base17.csv"
ol <- load_troll_csv(olFile, d.source = "") %>%
select(AreaCode, ItemCode, ElementCode, Year, Value)
save(ol, file = olRDFile)
} else {
load(file = olRDFile)
}
#' ## FAOSTAT data
#'
#' Next step is loading all FAOSTAT data. Since FAOSTAT data combines data from
#' multiple domains, we laod it all in on .Rdata file. In csae if there is no such file,
#' we reload all data from each domain specific file and save it in the R data file for further use.
#'
fsRDFile <- "data/all_fs_emissions.Rdata"
if(!file.exists(fsRDFile)) {
files <-
c("data/Emissions_Agriculture_Agriculture_total_E_All_Data_(Norm).csv",
"data/Emissions_Agriculture_Burning_crop_residues_E_All_Data_(Norm).csv",
"data/Emissions_Agriculture_Burning_Savanna_E_All_Data_(Norm).csv",
"data/Emissions_Agriculture_Crop_Residues_E_All_Data_(Norm).csv",
"data/Emissions_Agriculture_Cultivated_Organic_Soils_E_All_Data_(Norm).csv",
"data/Emissions_Agriculture_Enteric_Fermentation_E_All_Data_(Norm).csv",
"data/Emissions_Agriculture_Manure_applied_to_soils_E_All_Data_(Norm).csv",
"data/Emissions_Agriculture_Manure_left_on_pasture_E_All_Data_(Norm).csv",
"data/Emissions_Agriculture_Manure_Management_E_All_Data_(Norm).csv",
"data/Emissions_Agriculture_Rice_Cultivation_E_All_Data_(Norm).csv",
"data/Emissions_Land_Use_Burning_Biomass_E_All_Data_(Norm).csv",
"data/Emissions_Land_Use_Cropland_E_All_Data_(Norm).csv",
"data/Emissions_Land_Use_Forest_Land_E_All_Data_(Norm).csv",
"data/Emissions_Land_Use_Grassland_E_All_Data_(Norm).csv",
"data/Emissions_Land_Use_Land_Use_Total_E_All_Data_(Norm).csv")
domains <- c("GT", "GB", "GH", "GA", "GV", "GE", "GU",
"GP", "GM", "GR", "GI", "GC", "GF", "GG", "GL")
fs <-
ddply(tibble(files, domains),
.(files),
function(x) {
if(file.exists(x$files)) {
read.fs.bulk(x$files) %>%
mutate(Domain = as.character(x$domains))
}
}) %>% tbl_df()
els <- fs %>%
select(Domain, ElementCode, ElementName, Unit) %>%
distinct()
its <- fs %>%
select(Domain, ItemCode, ItemName) %>%
distinct()
fs <-
fs %>%
select(Domain, AreaCode, ItemCode, ElementCode,
Year, Value, Unit, ElementName, ItemName)
save(fs, its, els, file = fsRDFile)
} else {
load("data/all_fs_emissions.Rdata")
}
#'
#' ## Mapping tables
#'
#' Besides data from Outlook and FAOSTAT, we also need specific mapping tables
#' which explain mappings from FAOSTAT to Outlook areas and items.
#'
itemsMTFile <- "mappingTables/fs_outlook_items_mt.csv"
itemsMT <- read_csv(itemsMTFile,
col_types = cols(
ItemCode = col_integer(),
OutlookItemCode = col_character(),
ItemCodeAggSign = col_character()
))
#'
#' Table `elementsMT` describes mapping and adjustment of elements from FAOSTAT to outlook.
#'
elementsMTFile <- "mappingTables/fs_outlook_elements_mt.csv"
elementsMT <-
read_csv(elementsMTFile,
col_types = cols(
Domain = col_character(),
ItemCode = col_character(),
ElementCode = col_integer(),
OutlookElementCode = col_character(),
OutlookAdjustment = col_double()
))
#' Table `emissionsMT` describes mapping and assumption behind projection of the
#' implied emissions factor for the years of projection.
emissionsMTFile <- "mappingTables/fs_outlook_emissions_mt.csv"
emissionsMT <-
read_csv(emissionsMTFile,
col_types = cols(
Domain = col_character(),
Emissions = col_integer(),
OutlookEmissions = col_character(),
ActivityElement = col_character(),
EFLag = col_integer(),
GHG = col_character()
))
#'
#' # Implementing the process
#'
#' ## Reproducing GR, GE, GU, GP and GM
#'
#' Domains discussed in this part are estimated based on the activity data,
#' projected in the OECD-FAO Agricultural Outlook. There domains are:
#'
#' * GR - Rice cultivation
#' * GE - Enteric fementation
#' * GM - Manure Management
#' * GU - Manure applied to soils
#' * GP - Manure left of pastures
#'
#' The overall process consist of several important steps. All steps are
#' organised in the body of a function `outlook_emissions`. This funciotn
#' utilises faostat data, outlook data and previously loaded mapping tables
#' for reproducing emissions for the pre-defined domain. The steps of
#' reproduction are the following:
#'
#' 1. Mapping FAOSTAT Areas to the outlook regions reestimating activity data
#' and emissiosn respectively. Mapping the FAOSTAT activity data to the
#' outlook activity data aggregating FAOSTAT items to the outlook items
#' and reestimating emissions and activity data according to aggregatings.
#' This is done with the `map_fs_data` function, which uses items and elements
#' mapping tabels and faostat filtered to one domain data. Thisng the function
#' uses `map_fs2ol` and `agg_ol_regions` which does the aggregation of the
#' FAOSTAT data to the outlook structure. In the mapping process, some of the
#' items and elements may be agregted by substracting one from another what
#' is specified with the mapping tables.
#'
#' 3. Adjusting outlook activity data to the baseline level derived from the
#' FAOSTAT historical data. This step is the part of the `outlook_emissions`
#' function, where mapped faostat data is ued for subset the outlook data
#' to the items and elements relevant for one domain with the funciton
#' `subset_outlook`. After subsetting, we apply function `adjust_outlook_activity`
#' in order to adjust ativity data from the outlook to the levels of the
#' FAOSTAT in the historical period.
#'
#' 4. At the next srep we `reestimate_emissions` data based on the activity
#' if such was prepared in the OUTLOOK data.
#'
#' 5. In some cases, for some items and elements outlook does not have any
#' activity data. In such cases, we estimate the emissions for the
#' missing items and elements combinations based on the constant share of
#' these items and elements in the knownd and estimated emissions.
#' Constant share is assumed based on the 5 years average share calculated
#' on the last available. THis step is made with the funcotin `estimate_missing_emissions`.
#'
#' 6. At the next step we convert all GHG to the GHG expressed in the CO2
#' equivalent with the functoin `convert_ghg`.
#'
#' 7. After the numbers are reestimated in the steps 1-4, we aggregate regions
#' relevant to the outlook such as "Big five" region, Cosimo and Aglink
#' regions and the World total. THe regional aggregating is made using the
#' function `agg_ol_regions`.
#'
#'
#' We perfrom all abovexplained calculations for one domain at the time. That allows
#' us to apply the same functions and approaches to every domain maintaining
#' methodological consistency.
#'
#' Reproducing data.
gm <- outlook_emissions(fs, ol, DomainName = "GM")
ge <- outlook_emissions(fs, ol, DomainName = "GE")
gu <- outlook_emissions(fs, ol, DomainName = "GU")
gp <- outlook_emissions(fs, ol, DomainName = "GP")
gr <- outlook_emissions(fs, ol, DomainName = "GR")
#'
#' ## Reproducing GV
#'
#' For the GV - Cultivating Orghanic Soils domain we repeat the last know values.
gv_fs <-
fs %>%
filter(Year %in% c(2000:2016), Domain == "GT") %>%
map_fs_data(., fsYears = c(2000:2016)) %>%
filter(ItemCode == "GV") %>%
filter(AreaCode %in% get_ol_countries())
gv <-
gv_fs %>%
filter(Year %in% (max(Year))) %>%
mutate(Year = max(Year)) %>%
group_by_(.dots = names(.)[!names(.) %in% c("Value")]) %>%
summarise(Value = mean(Value)) %>%
ungroup()
# Expanding projected emissions for the projected period
gv <-
ldply((max(gv$Year) + 1):2030, function(x) {
gv %>%
mutate(Year = x)
}) %>%
tbl_df() %>%
bind_rows(gv_fs) %>%
mutate(d.source = "Outlook")
gv <-
gv %>%
bind_rows(gv_fs) %>%
bind_rows(gv %>% filter(d.source == "Outlook") %>% mutate(d.source = "no adj. Outlook"))%>%
arrange(Domain, AreaCode, ItemCode, ElementCode, Year) %>%
agg_all_ol_regions()
#' ## Reproducing GB, GH and GA
#'
#' Reproducing emissions for the domains Burning crop residues, Burning Savana
#' and crop residues. To reproduce emissions for these domains such we use
#' the constant share of the enissions from this dimains in the estimatable
#' emissions from agriculture and continue this trend to future.
#'
#' Projecting of these domains is made based on the total aggregates of all
#' estimated domains and Agriculture total domain.
#'
#'
gtpart <-
bind_rows(list(gm, ge, gu, gp, gr)) %>%
agg_ghg_domains %>%
agg_total_emissions
gt <-
outlook_emissions(fs,
gtpart %>% filter(d.source == "Outlook"),
DomainName = "GT", useActivity = FALSE) %>%
filter(!ItemCode %in% c("GM", "GE", "GU", "GP", "GR", "GV")) %>%
bind_rows(gtpart, gv) %>%
join_names()
#' ## Reproducing GI, GC, GG and GF domains
#'
#' When reproducing data for the GI - Burning Biomass, GC - Cropland and GG - Grassland
#' domains we assume that the values of emissions remains constant at the levels
#' of the last 5 years average. Blow we reproduce that.
# Number of years lag for average projections
nYears <- max(5 - 1, 0)
lastYear = 2030
# Reproducing emissions for the GI, GC, GG
ol_lu_fs <-
fs %>%
filter(Year %in% c(2000:2016), Domain == "GL") %>%
map_fs_data(., fsYears = c(2000:2016)) %>%
filter(AreaCode %in% get_ol_countries())
ol_lu <-
ol_lu_fs %>%
filter(Year %in% (max(Year) - nYears + 1):max(Year)) %>%
mutate(Year = max(Year)) %>%
group_by_(.dots = names(.)[!names(.) %in% c("Value")]) %>%
summarise(Value = mean(Value)) %>%
ungroup()
# Expanding projected emissions for the projected period
ol_lu <-
ldply((max(ol_lu$Year) + 1):lastYear, function(x) {
ol_lu %>%
mutate(Year = x)
}) %>%
tbl_df() %>%
bind_rows(ol_lu_fs) %>%
mutate(d.source = "Outlook") %>%
arrange(Domain, AreaCode, ItemCode, ElementCode, Year) %>%
filter(ItemCode != "GF")
ol_lu <-
ol_lu %>%
mutate(d.source = "no adj. Outlook") %>%
bind_rows(ol_lu)%>%
bind_rows(ol_lu_fs)
#' For the domain GF - Forestland we continue the last know value to the future.
# Reproducing emissions for the GF
gf_sf <-
fs %>%
filter(Year %in% c(2000:2016), Domain == "GL") %>%
map_fs_data(., fsYears = c(2000:2016)) %>%
filter(ItemCode == "GF")
gf <-
gf_sf %>%
filter(AreaCode %in% get_ol_countries()) %>%
filter(Year %in% (max(Year))) %>%
mutate(Year = max(Year)) %>%
group_by_(.dots = names(.)[!names(.) %in% c("Value")]) %>%
summarise(Value = mean(Value)) %>%
ungroup()
# Expanding projected emissions for the projected period
gf <-
ldply((max(gf$Year) + 1):lastYear, function(x) {
gf %>%
mutate(Year = x)
}) %>%
tbl_df() %>%
bind_rows(gf_sf) %>%
mutate(d.source = "Outlook") %>%
arrange(Domain, AreaCode, ItemCode, ElementCode, Year)
gf <-
gf %>%
mutate(d.source = "no adj. Outlook") %>%
bind_rows(gf) %>%
bind_rows(gf_sf)
#' Combining Landuse total emissions
lu <-
bind_rows(gf, ol_lu) %>%
agg_all_ol_regions() %>%
join_names()
#' Combining and exporting all emissions not adjusted data
#' At this stage all computed work was over and a person in ESS started his analysis.
seaData <-
bind_rows(lu, gt) %>%
filter(d.source == "Outlook") %>%
filter(AreaCode %in% c("WLD", "RestOfTheWorld", "OutlookSEAsia", "KHM",
"IDN", "LAO", "MYS", "MMR", "PHL", "THA", "VNM"))
#' ## Adjusting organic soils and cropland --------------------------------------
#'
#' Here below are development stages of the work which were included fro abalysis by the colleague in ESS.
#'
#' This adjustment is made manually in the file, which we furtherly loaded to
#' the main data.
#'
#' Export relevant data for adjustment into a file
seaData %>%
filter(AreaCode %in% c("MYS", "IDN"),
ItemCode %in% c("GV", "GC")) %>%
bind_rows(ol %>% filter(AreaCode %in% c("MYS", "IDN"), ItemCode == "PL", ElementCode == "AH")) %>%
# slice(c(125, 126))
spread(Year, Value) %>%
write_csv("adjustmetns/baseOrganicSoilsCroplandAdjustmens.csv")
# #' Loading manually adjusted organic soils and cropland data
# seaAdjData_part1 <-
# seaData %>%
# filter(! (AreaCode %in% c("MYS", "IDN") & ItemCode %in% c("GV", "GC"))) %>%
# bind_rows(read_csv("adjustmetns/AdjustedOrganicSoilsCroplandAdjustmens.csv") %>%
# gather(Year, Value, 9:length(.)) %>%
# mutate(Year = as.integer(Year),
# Value = as.numeric(Value))) %>%
# filter(AreaCode != "OutlookSEAsia") %>%
# bind_rows(agg_all_ol_regions(.) %>%
# filter(AreaCode == "OutlookSEAsia")) %>%
# mutate(d.source = "Outlook organic cropland and forest")
#' ## Adjusting organic soils, cropland and forestland
#'
#' To adjust forest land data we need to manipulate data from the forest land domain directly.
#'
# Exporting forest data for manual fixup
fs %>%
filter(Year %in% c(2000:2016), Domain == "GF") %>%
map_fs_data(., fsYears = c(2000:2016)) %>%
filter((AreaCode %in% c("MYS", "IDN") & ItemCode %in% c("FO", "FC"))) %>%
bind_rows(ol %>% filter(AreaCode %in% c("MYS", "IDN"), ItemCode == "PL", ElementCode == "AH")) %>%
bind_rows(seaData %>%
filter(AreaCode %in% c("MYS", "IDN"),
ItemCode %in% c("GF"))) %>%
spread(Year, Value) %>%
arrange(AreaCode, ItemCode, ElementCode) %>%
write_csv("adjustmetns/baseForestdjustmens.csv")
#' Loading manually adjusted organic soils and cropland data and forest land data
seaAdjData_part1 <-
seaData %>%
filter(! (AreaCode %in% c("MYS", "IDN") & ItemCode %in% c("GV", "GC", "GF"))) %>%
bind_rows(read_csv("adjustmetns/AdjustedOrganicSoilsCroplandAdjustmens.csv") %>%
gather(Year, Value, 9:length(.)) %>%
mutate(Year = as.integer(Year),
Value = as.numeric(Value))) %>%
bind_rows(read_csv("adjustmetns/AdjustedForest.csv") %>%
filter(ItemCode == "GF") %>%
gather(Year, Value, 9:length(.)) %>%
mutate(Year = as.integer(Year),
Value = as.numeric(Value))) %>%
filter(AreaCode != "OutlookSEAsia") %>%
bind_rows(agg_all_ol_regions(.) %>%
filter(AreaCode == "OutlookSEAsia")) %>%
mutate(d.source = "Outlook organic cropland forest")
#' Preparing forest data as a reference
#' FOREST FOR THE BASE and Extra PARTS -
#'
#'
gf_Extra <-
fs %>%
filter(Year %in% c(2000:2016), Domain == "GF") %>%
map_fs_data(., fsYears = c(2000:2016)) %>%
filter(ItemCode %in% c("FO", "FC"), ElementCode != "Area") %>%
mutate(d.source = "Outlook")
#' Epanding data with the last available values
gf_Extra <-
ldply(c(2000:2030),
function(x) {
gf_Extra %>%
select(Domain, AreaCode, ItemCode, ElementCode, d.source) %>%
distinct() %>%
mutate(Year = x)}) %>%
tbl_df %>%
left_join(gf_Extra, by = c("Domain", "AreaCode", "ItemCode", "ElementCode", "d.source", "Year")) %>%
group_by(Domain, AreaCode, ItemCode, ElementCode, d.source) %>%
arrange(Domain, AreaCode, ItemCode, ElementCode, d.source, Year) %>%
fill(Value) %>%
ungroup()
#' Adding data from the adjustment table
gf_Extra <-
gf_Extra %>%
filter(!AreaCode %in% c("MYS", "IDN")) %>%
# spread(Year, Value) %>%
bind_rows(
read_csv("adjustmetns/AdjustedForest.csv") %>%
filter(ItemCode %in% c("FO", "FC")) %>%
gather(Year, Value, 9:length(.)) %>%
mutate(Year = as.integer(Year),
Value = as.numeric(Value)) %>%
select(Domain, AreaCode, ItemCode, ElementCode, d.source, Year, Value)) %>%
mutate(d.source = "Outlook organic cropland forest") %>%
bind_rows(gf_Extra) %>%
join_names()
gf_Extra_sea <-
gf_Extra %>%
agg_ol_regions(., regionVar = "OutlookSEAsia") %>%
filter(AreaCode == "OutlookSEAsia")
#' Adding other extra things such as activity data
SEA_activity <-
bind_rows(list(gm, ge, gu, gp, gr)) %>%
filter(d.source == "Outlook", AreaCode == "OutlookSEAsia") %>%
filter(ElementCode %in% c("LI", "CI", "AH"), Domain %in% c("GR", "GM")) %>%
bind_rows(
ol %>%
agg_ol_regions(., regionVar = "OutlookSEAsia") %>%
filter(AreaCode == "OutlookSEAsia",
ItemCode == "PL",
ElementCode == "AH")) %>%
mutate(d.source = "Outlook")
#'
SEA_separate_activity <-
bind_rows(list(gm, ge, gu, gp, gr)) %>%
filter(d.source == "Outlook", AreaCode %in% c("LAO", "VNM", "KHM", "IDN", "MYS", "PHL", "THA", "MMR")) %>%
filter(ElementCode %in% c("LI", "CI", "AH"), Domain %in% c("GR", "GM")) %>%
bind_rows(
ol %>%
agg_ol_regions(., regionVar = "OutlookSEAsia") %>%
filter(AreaCode %in% c("LAO", "VNM", "KHM", "IDN", "MYS", "PHL", "THA", "MMR"),
ItemCode == "PL",
ElementCode == "AH"))%>%
mutate(d.source = "Outlook")
#' Exporting data for SEA total only
export <-
bind_rows(seaData, seaAdjData_part1, gf_Extra_sea) %>%
filter(Year >= 2000 & Year < 2027) %>%
mutate(Year = as.character(Year))
export <-
export %>%
mutate(#Year = ifelse(Year %in% as.character(c(2001:2010)), "2001-2010", Year),
Year = ifelse(Year %in% as.character(c(2014:2016)), "2014-2016", Year)) %>%
group_by_(.dots = names(.)[!names(.) %in% c("Value")]) %>%
summarise(Value = mean(Value)) %>%
filter(Year %in% c("2001-2010", "2014-2016")) %>%
bind_rows(export) %>%
# filter(Year %in% c("2001-2010", "2014-2016", "2026")) %>%
filter(AreaCode == "OutlookSEAsia", ElementCode == "Emissions_CO2Eq") %>%
ungroup() %>%
bind_rows(SEA_activity %>% mutate(Year = as.character(Year)) %>% join_names())
BurningSavanna <-
filter(export, Year == "2014-2016", ItemCode == "GH") %>%
rename(Savanna = Value) %>%
select(AreaCode, ItemCode, ElementCode, d.source, Savanna )
BurningBiomass <-
filter(export, Year == "2014-2016", ItemCode == "GI") %>%
rename(Biomass = Value) %>%
select(AreaCode, ItemCode, ElementCode, d.source, Biomass )
#' Writing all
export %>%
left_join(BurningSavanna , by = c("AreaCode", "ItemCode", "ElementCode", "d.source")) %>%
left_join(BurningBiomass , by = c("AreaCode", "ItemCode", "ElementCode", "d.source")) %>%
mutate(Value = ifelse(ItemCode == "GH", Savanna, Value ),
Value = ifelse(ItemCode == "GI", Biomass, Value )) %>%
select(-Biomass, -Savanna) %>%
# slice(c(3, 71))
spread(Year, Value) %>%
right_join(tibble(ItemCode = c("MK", "BV", "SH", "PT", "PK", "RI", "PL", "GH",
"GB", "GI", "GU", "GP", "GM", "GE", "GA",
"GY", "GR", "GV", "GC", "GG", "FO", "FC",
"GF"),
ElementCode = c("CI", "LI", "LI", "LI", "LI", "AH", "AH",
"Emissions_CO2Eq", "Emissions_CO2Eq", "Emissions_CO2Eq",
"Emissions_CO2Eq", "Emissions_CO2Eq", "Emissions_CO2Eq",
"Emissions_CO2Eq", "Emissions_CO2Eq", "Emissions_CO2Eq",
"Emissions_CO2Eq", "Emissions_CO2Eq", "Emissions_CO2Eq",
"Emissions_CO2Eq", "Emissions_CO2Eq", "Emissions_CO2Eq",
"Emissions_CO2Eq"))) %>%
select(-ItemCode, -ElementCode, -Unit) %>%
arrange(d.source) %>%
select(AreaCode, ElementName, Domain, ItemName, d.source, `2016`, `2026`, everything()) %>%
write_csv(str_c("output/SEA_total_prelim_adjusted_Base_2", Sys.Date(),".csv"))
#' Exporting data for SEA All countries not totals
export2 <-
bind_rows(seaData, seaAdjData_part1, gf_Extra_sea) %>%
filter(Year >= 2000 & Year < 2027) %>%
mutate(Year = as.character(Year))
export2 <-
export2 %>%
mutate(#Year = ifelse(Year %in% as.character(c(2001:2010)), "2001-2010", Year),
Year = ifelse(Year %in% as.character(c(2014:2016)), "2014-2016", Year)) %>%
group_by_(.dots = names(.)[!names(.) %in% c("Value")]) %>%
summarise(Value = mean(Value)) %>%
filter(Year %in% c("2001-2010", "2014-2016")) %>%
bind_rows(export2) %>%
# filter(Year %in% c("2001-2010", "2014-2016", "2026")) %>%
filter(ElementCode == "Emissions_CO2Eq") %>%
ungroup() %>%
bind_rows(SEA_separate_activity %>% mutate(Year = as.character(Year)) %>% join_names()) %>%
filter(AreaCode %in% c("LAO", "VNM", "KHM", "IDN", "MYS", "PHL", "THA", "MMR"))
BurningSavanna <-
filter(export2, Year == "2014-2016", ItemCode == "GH") %>%
rename(Savanna = Value) %>%
select(AreaCode, ItemCode, ElementCode, d.source, Savanna )
BurningBiomass <-
filter(export2, Year == "2014-2016", ItemCode == "GI") %>%
rename(Biomass = Value) %>%
select(AreaCode, ItemCode, ElementCode, d.source, Biomass )
# Writing all
export2 %>%
left_join(BurningSavanna , by = c("AreaCode", "ItemCode", "ElementCode", "d.source")) %>%
left_join(BurningBiomass , by = c("AreaCode", "ItemCode", "ElementCode", "d.source")) %>%
mutate(Value = ifelse(ItemCode == "GH", Savanna, Value ),
Value = ifelse(ItemCode == "GI", Biomass, Value )) %>%
select(-Biomass, -Savanna) %>%
# slice(c(6166, 6167))
spread(Year, Value) %>%
right_join(tibble(ItemCode = c("MK", "BV", "SH", "PT", "PK", "RI", "PL", "GH",
"GB", "GI", "GU", "GP", "GM", "GE", "GA",
"GY", "GR", "GV", "GC", "GG", "FO", "FC",
"GF"),
ElementCode = c("CI", "LI", "LI", "LI", "LI", "AH", "AH",
"Emissions_CO2Eq", "Emissions_CO2Eq", "Emissions_CO2Eq",
"Emissions_CO2Eq", "Emissions_CO2Eq", "Emissions_CO2Eq",
"Emissions_CO2Eq", "Emissions_CO2Eq", "Emissions_CO2Eq",
"Emissions_CO2Eq", "Emissions_CO2Eq", "Emissions_CO2Eq",
"Emissions_CO2Eq", "Emissions_CO2Eq", "Emissions_CO2Eq",
"Emissions_CO2Eq"))) %>%
select(-ItemCode, -ElementCode, -Unit) %>%
arrange(AreaCode, d.source) %>%
select(AreaCode, ElementName, Domain, ItemName, d.source, `2016`, `2026`, everything()) %>%
write_csv(str_c("output/SEA_total_prelim_adjusted_countries_Base_2_", Sys.Date(),".csv"))
#
# write_csv(seaData, "output/SEA_data_prelim.csv")
#
# write_csv(, "output/SEA_Adjusted_data_prelim.csv")
# QA of some selceted numbers
# gt %>%
# filter(AreaCode == "VNM") %>%
# plot_group(n_page = 12,
# groups_var = c("ElementCode"),
# plots_var = "ItemCode" )
# gtt %>%
# filter(AreaCode == "OutlookSEAsia", ElementCode == "Emissions_CO2Eq") %>%
# plot_group(n_page = 6,
# groups_var = c("ElementCode"),
# plots_var = "ItemCode" )
# # QUALITY ASSURANCE
# plot_group(gm ,
# n_page = 6,
# groups_var = c("ElementCode"),
# plots_var = "ItemCode"
# )
# Exporting numbers
# gt %>%
# mutate(AreaCode2 = AreaCode) %>%
# filter(d.source == "Faostat" & Year <= 2014 |
# d.source == "Outlook" & Year > 2014 ) %>%
# arrange(Domain, AreaCode, ItemCode, ElementCode, d.source, Year) %>%
# write.csv(file = "output/preliminatyData_new.csv")
#' ## QA of the adjusted activity data
#+echo = FALSE, results = 'hide', message = FALSE
# QAData <-
# bind_rows(activity,
# activity %>%
# select(AreaCode, ItemCode, ElementCode, Year) %>%
# distinct() %>%
# left_join(fsol) %>%
# filter(!is.na(Value)),
# olSubset %>%
# mutate(d.source = "old_Outlook") %>%
# right_join(activity %>%
# select(AreaCode, ItemCode, ElementCode, Year) %>%
# distinct())%>%
# filter(!is.na(Value))) %>%
# filter(AreaCode %in% c("WLD", "RestOfTheWorld", "OutlookSEAsia", "CHN", "KHM",
# "IDN", "LAO", "MYS", "MMR", "PHL", "THA", "VNM"))
# plot_group(filter(QAData, AreaCode %in% c("WLD", "RestOfTheWorld", "OutlookSEAsia", "CHN")),
# n_page = 4,
# groups_var = c("ElementCode", "ItemCode"),
# plots_var = "AreaCode"
# )
#
# plot_group(filter(QAData, AreaCode %in% c("KHM", "IDN", "LAO", "MYS")),
# n_page = 4,
# groups_var = c("ElementCode", "ItemCode"),
# plots_var = "AreaCode"
# )
#
# plot_group(filter(QAData, AreaCode %in% c("MMR", "PHL", "THA", "VNM")),
# n_page = 4,
# groups_var = c("ElementCode", "ItemCode"),
# plots_var = "AreaCode"
# )
#
#'
#'
#' # Annexes
#'
#' ## Funciton `map_fs2ol` for aggregating outlook countries to the regions
#+code=readLines("r/map_fs2ol.R")
#' ## Funciton `agg_ol_regions` for aggregating outlook countries to the regions
#+code=readLines("r/agg_ol_regions.R")
#' ## Mapping tabels from FAOSTAT countries to Outlook countries and regions
#+echo=FALSE
# options(markdown.HTML.header = system.file('misc', 'datatables.html', package = 'knitr'))
# areaMT <- read_csv("mappingTables/faostat_areas_outlook_areas.csv",
# col_types = cols(
# AreaCode = col_integer(),
# AreaName = col_character(),
# OutlookAreaCode = col_character(),
# OutlookAreaName = col_character(),
# OutlookStatus = col_character(),
# OutlookSubRegion = col_character(),
# OutlookBigRegion = col_character(),
# OutlookSuperRegion = col_character(),
# OutlookSEAsia = col_character()
# ))
#
# # Changing encoding
# Encoding(areaMT$AreaName) <- "latin1"
# Encoding(areaMT$OutlookAreaName) <- "latin1"
# # Printing the table
# datatable(areaMT,
# rownames=FALSE,
# colnames =
# c("FS Code", "FS Name", "Outlook Code", "Outlook name",
# "Status", "Sub Regions", "Big Five", "Super Region",
# "Southeast Asia"))
#
#
# #' ## Mapping tabel for mapping FAOSTAT items to the Outlook
# #+echo=FALSE
# datatable(itemsMT, style = 'bootstrap', rownames=FALSE)
#
# #' ## Mapping tabel for mapping FAOSTAT elements to the Outlook
# #+echo=FALSE
# datatable(elementsMT, style = 'bootstrap', rownames=FALSE)
#
|
dd329e6eb773d07ba352e8b339d278f90da44f92 | 098841409c03478ddae35c4cdf6367cfd65fa3bf | /diff/code/sc_10x_5cl/01_all.R | 95d2bde30ac6f282477488a97f020f3b0de0e471 | [] | no_license | wangdi2016/imputationBenchmark | 0281746b482788c347faf9d96e8288639ba388a6 | 0881121444975cd0a3ee2ce69aaec46c3acd7791 | refs/heads/master | 2023-07-29T10:16:14.004610 | 2021-09-09T20:00:43 | 2021-09-09T20:00:43 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,558 | r | 01_all.R | library(data.table)
source('/home-4/whou10@jhu.edu/scratch/Wenpin/resource/function.R')
method = commandArgs(trailingOnly = T)[1]
# method='magic'
bulk = readRDS('/home-4/whou10@jhu.edu/scratch/Wenpin/rna_imputation/data/bulkrna/cellbench/GSE86337_processed_count.rds')
g = fread('/home-4/whou10@jhu.edu/scratch/Wenpin/resource/gencode.v19.annotation.gtf',data.table = F)
g <- g[g[,3]=='gene',]
gn <- gsub('\"','',sub(' gene_name ','',sapply(g[,9],function(i) strsplit(i,';')[[1]][5])))
gl <- g[,5]-g[,4]+1
names(gl) <- gn
gl <- gl/1000
bulk <- bulk[row.names(bulk) %in% names(gl),]
bulk <- bulk/gl[row.names(bulk)]
lib <- colSums(bulk)/1e6
bulk <- t(t(bulk)/lib)
bulk <- log2(bulk + 1) ## TPM
colnames(bulk) = sub('_.*','',colnames(bulk))
bulk <- sapply(unique(colnames(bulk)),function(i) rowMeans(bulk[,colnames(bulk)==i]))
sexpr = readRDS(paste0('/home-4/whou10@jhu.edu/scratch/Wenpin/rna_imputation/result/procimpute/cellbench/',method,'/sc_10x_5cl.rds'))
example = readRDS(paste0('/home-4/whou10@jhu.edu/scratch/Wenpin/rna_imputation/result/procimpute/cellbench/saver/sc_10x_5cl.rds'))
colnames(sexpr) = colnames(example)
cl = sub('.*:','',colnames(sexpr))
intgene = intersect(rownames(bulk),rownames(sexpr))
bulk = bulk[intgene,]
sexpr = sexpr[intgene,]
get_scCellType_bulkCellType_cor <- function(ct1, ct2, bulkDiff){
imp1 = sexpr[, which(cl==ct1)]
imp2 = sexpr[, which(cl==ct2)]
corvec = NULL
corvec <- sapply(1:ncol(imp1),function(i) {
print(i)
sapply(1:ncol(imp2), function(j) {
cor((imp1[,i] - imp2[,j]), bulkDiff,method='spearman')
})
})
as.vector(corvec)
}
v = sapply(1:(ncol(bulk)-1), function(i){
sapply((i+1):ncol(bulk), function(j){
cn = paste0(colnames(bulk)[i],'_',colnames(bulk)[j])
tmp <- get_scCellType_bulkCellType_cor(ct1=colnames(bulk)[i], ct2=colnames(bulk)[j], bulkDiff = bulk[,colnames(bulk)[i]]-bulk[,colnames(bulk)[j]])
names(tmp)=cn
tmp
})
})
for (i in 1:length(v)){
if (!is.list(v[[i]])) v[[i]] = list(as.vector(v[[i]]))
}
cn = NULL
for (i in 1:(ncol(bulk)-1)){
for (j in (i+1):ncol(bulk)){
cn = c(cn,paste0(colnames(bulk)[i],'_',colnames(bulk)[j]))
}
}
for (i in 1:length(v)){
names(v[[i]]) = cn[1:length(v[[i]])]
if (i!=length(v)) cn = cn[(length(v[[i]])+1):length(cn)]
}
tmp = c(v[[1]],v[[2]],v[[3]],v[[4]])
dir.create('/home-4/whou10@jhu.edu/scratch/Wenpin/rna_imputation/diff/result/sc_10x_5cl/',recursive = T, showWarnings = F)
saveRDS(tmp,paste0('/home-4/whou10@jhu.edu/scratch/Wenpin/rna_imputation/diff/result/sc_10x_5cl/',method,'.rds'))
|
bfef32a21433a3453bbc4839c4d668242d85806a | 9f269bd0f27e52e915ce43784ae84bd7cb698493 | /R/plotFunctions.R | 99406a415fcdc1eb6121a40e3b3e2f748b2581b5 | [] | no_license | andykrause/hpiR | f229296480dd36e446bc940639d28518ba08938f | faf67c70b82dd51ca5b056b074404312db49de15 | refs/heads/master | 2022-10-05T12:46:31.373709 | 2021-12-20T17:54:36 | 2021-12-20T17:54:36 | 111,461,682 | 16 | 8 | null | 2022-09-27T03:15:35 | 2017-11-20T20:53:53 | R | UTF-8 | R | false | false | 17,216 | r | plotFunctions.R | #'
#' Plot method for `hpiindex` object
#'
#' Specific plotting method for hpiindex objects
#'
#' @param x Object to plot of class `hpiindex``
#' @param show_imputed default = FALSE; highlight the imputed points
#' @param smooth default = FALSE; plot the smoothed index
#' @param ... Additional Arguments
#' @import ggplot2
#' @method plot hpiindex
#' @return `plotindex` object inheriting from a ggplot object
#' @examples
#'
#' # Load data
#' data(ex_sales)
#'
#' # With a raw transaction data.frame
#' rt_data <- rtCreateTrans(trans_df = ex_sales,
#' prop_id = 'pinx',
#' trans_id = 'sale_id',
#' price = 'sale_price',
#' periodicity = 'monthly',
#' date = 'sale_date')
#'
#' # Create model object
#' hpi_model <- hpiModel(model_type = 'rt',
#' hpi_df = rt_data,
#' estimator = 'base',
#' log_dep = TRUE)
#'
#' # Create Index
#' hpi_index <- modelToIndex(hpi_model,
#' max_period = 84)
#'
#' # Make Plot
#' plot(hpi_index)
#'
#' @export
plot.hpiindex <- function(x,
show_imputed=FALSE,
smooth=FALSE,
...){
## Extract Data
hpi_data <- data.frame(x=x$period,
y=as.numeric(x$value),
imp=x$imputed,
stringsAsFactors=FALSE)
## Make the base plot object
gg_obj <- ggplot(hpi_data, aes_string(x="x", y="y")) +
geom_line(size=1.1, color='gray40') +
ylab("Index Value\n") +
xlab('\nTime Period')
if (show_imputed){
hpi_data$imp <- ifelse(hpi_data$imp, 1, 0)
gg_obj <- gg_obj +
geom_point(data=hpi_data,
aes_string(x="x", y="y",
color="as.factor(imp)",
size="imp")) +
scale_color_manual(values=c('black', 'red')) +
theme(legend.position="none")
}
if (smooth){
if ('smooth' %in% names(x)){
sm_data <- data.frame(x=x$period,
y=as.numeric(x$smooth),
stringsAsFactors=FALSE)
gg_obj <- gg_obj +
geom_line(data=sm_data,
aes_string(x="x", y="y"),
size=1.3,
linetype=1,
color='red')
} else {
message('No smoothed index (index_obj$smooth) present.\n')
}
}
# Return Values
structure(gg_obj, class = c('plotindex', class(gg_obj)))
}
#'
#' Plot method for `hpi` object
#'
#' Specific plotting method for hpi objects
#'
#' @method plot hpi
#' @param x Object to plot of class `hpi`
#' @param ... Additional Arguments
#' @return `plotindex` object inheriting from a ggplot object
#' @importFrom graphics plot
#' @section Further Details:
#' Additional argument can include those argument for `plot.hpindex``
#' @examples
#'
#' # Load data
#' data(ex_sales)
#'
#' # Create index with raw transaction data
#' rt_index <- rtIndex(trans_df = ex_sales,
#' periodicity = 'monthly',
#' min_date = '2010-06-01',
#' max_date = '2015-11-30',
#' adj_type = 'clip',
#' date = 'sale_date',
#' price = 'sale_price',
#' trans_id = 'sale_id',
#' prop_id = 'pinx',
#' estimator = 'robust',
#' log_dep = TRUE,
#' trim_model = TRUE,
#' max_period = 48,
#' smooth = FALSE)
#'
#' # Plot data
#' plot(rt_index)
#' plot(rt_index, smooth = TRUE)
#'
#' @export
plot.hpi <- function(x,
...){
plot(x$index, ...)
}
#'
#' Plot method for `indexvolatility` object
#'
#' Specific plotting method for indexvolatility objects
#'
#' @method plot indexvolatility
#' @param x Object to plot of class `indexvolatility``
#' @param ... Additional Arguments
#' @return `plotvolatility` object inheriting from a ggplot object
#' @import ggplot2
#' @examples
#'
#' # Load Data
#' data(ex_sales)
#'
#' # Create index with raw transaction data
#' rt_index <- rtIndex(trans_df = ex_sales,
#' periodicity = 'monthly',
#' min_date = '2010-06-01',
#' max_date = '2015-11-30',
#' adj_type = 'clip',
#' date = 'sale_date',
#' price = 'sale_price',
#' trans_id = 'sale_id',
#' prop_id = 'pinx',
#' estimator = 'robust',
#' log_dep = TRUE,
#' trim_model = TRUE,
#' max_period = 48,
#' smooth = FALSE)
#'
#' # Calculate Volatility
#' index_vol <- calcVolatility(index = rt_index,
#' window = 3)
#'
#' # Make Plot
#' plot(index_vol)
#'
#' @export
plot.indexvolatility <- function(x, ...){
# Set up dimensions
data_df <- data.frame(time_period=1:length(attr(x, 'orig')),
volatility = c(rep(NA_integer_, attr(x, 'window')),
as.numeric(x$roll)),
stringsAsFactors=FALSE)
# Plot base volatility
vol_plot <- ggplot(data_df, aes_string(x="time_period", y="volatility")) +
geom_line(color='navy', size=2) +
ylab('Volatility\n') +
xlab('\nTime Period') +
geom_hline(yintercept = x$mean, size=1, linetype = 2, color='gray50') +
geom_hline(yintercept = x$median, size=1, linetype = 3, color='gray50' )
# Return Plot
structure(vol_plot, class = c('plotvolatility', class(vol_plot)))
}
#'
#' Plot method for `hpiaccuracy` object
#'
#' Specific plotting method for hpiaccuracy objects
#'
#' @method plot hpiaccuracy
#' @param x Object to plot of class `hpiaccuracy``
#' @param return_plot default = FALSE; Return the plot to the function call
#' @param do_plot default = FALSE; Execute plotting to terminal/console
#' @param use_log_error [FALSE] Use the log error?
#' @param ... Additional Arguments
#' @return `plotaccuracy` object inheriting from a ggplot object
#' @import ggplot2
#' @importFrom stats quantile
#' @importFrom graphics plot
#' @importFrom gridExtra grid.arrange
#' @examples
#'
#' # Load Data
#' data(ex_sales)
#'
#' # Create Index
#' rt_index <- rtIndex(trans_df = ex_sales,
#' periodicity = 'monthly',
#' min_date = '2010-06-01',
#' max_date = '2015-11-30',
#' adj_type = 'clip',
#' date = 'sale_date',
#' price = 'sale_price',
#' trans_id = 'sale_id',
#' prop_id = 'pinx',
#' estimator = 'robust',
#' log_dep = TRUE,
#' trim_model = TRUE,
#' max_period = 48,
#' smooth = FALSE)
#'
#' # Calculate insample accuracy
#' hpi_accr <- calcAccuracy(hpi_obj = rt_index,
#' test_type = 'rt',
#' test_method = 'insample')
#'
#' # Make Plot
#' plot(hpi_accr)
#'
#' @export
plot.hpiaccuracy <- function(x,
return_plot = FALSE,
do_plot = TRUE,
use_log_error = FALSE,
...){
if (use_log_error) x$error <- x$log_error
# Get period count
p_cnt <- length(unique(x$pred_period))
# Make the absolute box plot
bar_abs <- ggplot(x, aes_string(x="as.factor(pred_period)",
y="abs(error)"), alpha=.5) +
geom_boxplot(fill='lightblue') +
coord_cartesian(ylim=c(0, quantile(abs(x$error),.99))) +
ylab('Absolute Error') +
xlab('Time Period')
# Make the magnitude box plot
bar_mag <- ggplot(x, aes_string(x="as.factor(pred_period)",
y="error"), alpha=.5) +
geom_boxplot(fill='salmon') +
coord_cartesian(ylim=c(stats::quantile(x$error, .01),
stats::quantile(x$error, .99))) +
ylab('Error') +
xlab('Time Period')
# Adjust axis if too many periods
if (p_cnt > 12){
breaks <- seq(from=min(x$pred_period),
to=max(x$pred_period),
length.out=12)
bar_abs <- bar_abs +
scale_x_discrete(breaks=breaks)
bar_mag <- bar_mag +
scale_x_discrete(breaks=breaks)
}
# Make absolute density plot
dens_abs <- ggplot(x, aes_string(x="abs(error)"), alpha=.5) +
geom_density(fill='lightblue') +
coord_cartesian(xlim=c(0, stats::quantile(abs(x$error),.99))) +
xlab('Absolute Error') +
ylab('Density of Error')
# Make magnitude density plot
dens_mag <- ggplot(x, aes_string(x="error"), alpha=.5) +
geom_density(fill='salmon') +
coord_cartesian(xlim=c(stats::quantile(x$error, .01),
stats::quantile(x$error, .99))) +
xlab('Error') +
ylab('Density of Error')
# Combine
full_plot <- gridExtra::grid.arrange(bar_abs, bar_mag, dens_abs, dens_mag,
nrow = 2)
# Plot
if (do_plot) plot(full_plot)
# Return or plot
if (return_plot){
return(structure(full_plot, class = c('plotaccuracy', class(full_plot))))
}
}
#'
#' Plot method for `seriesaccuracy` object
#'
#' Specific plotting method for seriesaccuracy objects
#'
#' @method plot seriesaccuracy
#' @param x Object of class `hpiaccuracy``
#' @param return_plot default = FALSE; Return the plot to the function call
#' @param ... Additional argument (passed to `plot.hpiaccuracy()``)
#' @return `plotaccuracy` object inheriting from a ggplot object
#' @import ggplot2
#' @importFrom graphics plot
#' @examples
#'
#' # Load data
#' data(ex_sales)
#'
#' # Create index
#' rt_index <- rtIndex(trans_df = ex_sales,
#' periodicity = 'monthly',
#' min_date = '2010-06-01',
#' max_date = '2015-11-30',
#' adj_type = 'clip',
#' date = 'sale_date',
#' price = 'sale_price',
#' trans_id = 'sale_id',
#' prop_id = 'pinx',
#' estimator = 'robust',
#' log_dep = TRUE,
#' trim_model = TRUE,
#' max_period = 48,
#' smooth = FALSE)
#'
#' # Create Series (Suppressing messages do to small sample size of this example)
#' suppressMessages(
#' hpi_series <- createSeries(hpi_obj = rt_index,
#' train_period = 12))
#'
#' # Calculate insample accuracy
#' hpi_series_accr <- calcSeriesAccuracy(series_obj = hpi_series,
#' test_type = 'rt',
#' test_method = 'insample')
#' # Make Plot
#' plot(hpi_series_accr)
#'
#' @export
plot.seriesaccuracy <- function(x,
return_plot = FALSE,
...){
class(x) <- c('hpiaccuracy', 'data.frame')
plot(x, return_plot=return_plot, do_plot=FALSE, ...)
}
#'
#' Plot method for `serieshpi` object
#'
#' Specific plotting method for serieshpi objects
#'
#' @method plot serieshpi
#' @param x Object of class `serieshpi`
#' @param smooth default = FALSE; plot the smoothed object
#' @param ... Additional Arguments`
#' @return `plotseries` object inheriting from a ggplot object
#' @import ggplot2
#' @importFrom purrr map
#' @examples
#'
#' # Load data
#' data(ex_sales)
#'
#' # Create index
#' rt_index <- rtIndex(trans_df = ex_sales,
#' periodicity = 'monthly',
#' min_date = '2010-06-01',
#' max_date = '2015-11-30',
#' adj_type = 'clip',
#' date = 'sale_date',
#' price = 'sale_price',
#' trans_id = 'sale_id',
#' prop_id = 'pinx',
#' estimator = 'robust',
#' log_dep = TRUE,
#' trim_model = TRUE,
#' max_period = 48,
#' smooth = FALSE)
#'
#' # Create Series (Suppressing messages do to small sample size of this example)
#' suppressMessages(
#' hpi_series <- createSeries(hpi_obj = rt_index,
#' train_period = 12))
#'
#' # Make Plot
#' plot(hpi_series)
#'
#' @export
plot.serieshpi<- function(x,
smooth = FALSE,
...){
# Extract the indexes
indexes_. <- purrr::map(.x=x$hpis,
.f = function(x) x$index)
# Get the longest
largest <- indexes_.[[length(indexes_.)]]
# Set the value field
if (smooth && 'smooth' %in% names(largest)){
index_name <- 'smooth'
} else {
index_name <- 'value'
}
# Create blank_df
blank_df <- data.frame(time_period = 1:length(largest[[index_name]]),
value=seq(min(largest[[index_name]]),
max(largest[[index_name]]),
length.out=length(largest[[index_name]])),
stringsAsFactors=FALSE)
# Plot canvas
series_plot <- ggplot(blank_df,
aes_string(x="time_period", y="value"))
# Plot each of the non-terminal indexes
for(i in 1:length(indexes_.)){
data_df <- data.frame(x=1:length(indexes_.[[i]][[index_name]]),
y=as.numeric(indexes_.[[i]][[index_name]]),
stringsAsFactors=FALSE)
series_plot <- series_plot + geom_line(data=data_df,
aes_string(x="x",y="y"),
color='gray70')
}
# Add the terminal index
data_df <- data.frame(x=1:length(indexes_.[[length(indexes_.)]][[index_name]]),
y=as.numeric(indexes_.[[length(indexes_.)]][[index_name]]),
stringsAsFactors=FALSE)
series_plot <- series_plot + geom_line(data=data_df,
aes_string(x="x",y="y"),
color='red',
size=2) +
ylab('Index Value\n') +
xlab('\nTime Period')
structure(series_plot, class = c('plotseries', class(series_plot)))
}
#'
#' Plot method for `seriesrevision` object
#'
#' Specific plotting method for seriesrevision objects
#'
#' @method plot seriesrevision
#' @param x Object to plot of class `seriesrevision`
#' @param measure default = 'median'; Metric to plot ('median' or 'mean')
#' @param ... Additional Arguments
#' @return `plotrevision` object inheriting from a ggplot object
#' @import ggplot2
#' @importFrom magrittr %>%
#' @importFrom dplyr mutate
#' @examples
#'
#' # Load example sales
#' data(ex_sales)
#'
#' # Create Index
#' rt_index <- rtIndex(trans_df = ex_sales,
#' periodicity = 'monthly',
#' min_date = '2010-06-01',
#' max_date = '2015-11-30',
#' adj_type = 'clip',
#' date = 'sale_date',
#' price = 'sale_price',
#' trans_id = 'sale_id',
#' prop_id = 'pinx',
#' estimator = 'robust',
#' log_dep = TRUE,
#' trim_model = TRUE,
#' max_period = 48,
#' smooth = FALSE)
#'
#' # Create Series (Suppressing messages do to small sample size of this example)
#' suppressMessages(
#' hpi_series <- createSeries(hpi_obj = rt_index,
#' train_period = 12))
#'
#' # Calculate revision
#' series_rev <- calcRevision(series_obj = hpi_series)
#'
#' # Make Plot
#' plot(series_rev)
#'
#' @export
plot.seriesrevision <- function(x,
measure = 'median',
...){
# Make Data
plot_data <- x$period
if (measure == 'median'){
plot_data$revision <- plot_data$median
yint <- x$median
y_lab <- 'Median Revision\n'
} else {
plot_data$revision <- plot_data$mean
yint <- x$mean
y_lab <- 'Mean Revision\n'
}
# Create Plot
plot_data <- plot_data %>%
dplyr::mutate(col = ifelse(.data$revision > 0, 1, 0))
rev_plot <- ggplot(plot_data, aes_string(x="period",
y="revision",
fill="as.factor(col)",
alpha=.5)) +
geom_bar(stat='identity') +
scale_fill_manual(values=c('red', 'blue')) +
geom_hline(yintercept = yint, size=1, linetype = 2) +
ylab(y_lab) +
xlab('\nTime Period') +
theme(legend.position='none',
legend.title = element_blank())
structure(rev_plot, class = c('plotrevision', class(rev_plot)))
}
|
5efcdea0dd0d673386e7bf446f54fa5008c6d6d1 | f8ef4b37c45195001a543cd9f498d43d34c5f784 | /man/sdr_list_tables.Rd | 54238c49fcfb74af1970bc410c8b173b4b550984 | [
"MIT"
] | permissive | thomascrines/sdrUpload | cb1a751ec70844e2704e3e2b06b1f9dc23322d93 | 24a4b1034253070e4b6c54e92225b8c07dc20361 | refs/heads/master | 2020-07-11T04:58:58.064063 | 2019-10-21T14:45:32 | 2019-10-21T14:45:32 | 204,450,762 | 1 | 0 | MIT | 2019-10-21T14:43:35 | 2019-08-26T10:18:23 | R | UTF-8 | R | false | true | 595 | rd | sdr_list_tables.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sdr_list_tables.R
\name{sdr_list_tables}
\alias{sdr_list_tables}
\title{List tables in a SQL Server database}
\usage{
sdr_list_tables(database, server)
}
\arguments{
\item{database}{\code{string}. A SQL Server database name.}
\item{server}{\code{string}. A SQL Server database server.}
}
\value{
\code{null}
}
\description{
\code{sdr_list_tables} lists all base tables in a SQL Server database on a specified server.
}
\examples{
\dontrun{
sdr_list_tables(database = "DatabaseName", server = "DatabaseServer")
}
}
|
b82c1096dc60106e2779ecf26bb3b58677394f9f | 6c7f767493c3716844b44deee8460000646a463f | /man/track_distribution.Rd | 4ea084c2584a9652da6a00d65c7323c6b57dfca3 | [] | no_license | TylerGrantSmith/heimdallr | 709ff4277c87ca3dc8a5baa945043584a5286db7 | 4656486a93fb2996389961f173bafaeb9231d4e6 | refs/heads/master | 2020-04-06T17:21:53.601814 | 2018-11-21T21:28:32 | 2018-11-21T21:28:32 | 157,656,102 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 420 | rd | track_distribution.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tracker.R
\name{track_distribution}
\alias{track_distribution}
\title{Title}
\usage{
track_distribution(m, ..., distance_method = "jensen-shannon",
method = "fix", options = list(), focus = m$watch_vars, role = NULL,
initialized = FALSE, skip = FALSE, id = rand_id("track_distribution"))
}
\arguments{
\item{id}{}
}
\description{
Title
}
|
476043a49c65ba6f9e9addb6f2aa4dee0dbf5f99 | 5f7cc5800d5ab0a867ca84a4e5b007622ed4eefd | /Plot4.R | 6c5c5fc3b3efd4067f38cf8445236b8a8a4f89fb | [] | no_license | nmouquet/ExData_Plotting1 | e6ac7e7cf88331c33119790e1057473ac817e2ec | 123e21dfce00f50c3ee954c6c2f93d911bf50ee9 | refs/heads/master | 2020-06-02T18:34:33.258712 | 2017-06-12T17:32:42 | 2017-06-12T17:32:42 | 94,102,510 | 0 | 0 | null | 2017-06-12T14:00:58 | 2017-06-12T14:00:57 | null | UTF-8 | R | false | false | 2,275 | r | Plot4.R | #Exploratory Data Analysis Course 1 : PLOT4
rm(list=ls(all=TRUE))
library(lubridate)
library(parallel)
library(dplyr)
# Create a new column "exact_time" with time/date in POSIXlt
data_hpc <- read.table("household_power_consumption.txt",sep=";",header=TRUE)
data_hpc <- mutate(data_hpc,exact_time=paste(as.character(Date),as.character(Time)))
data_hpc$exact_time <- strptime(data_hpc$exact_time, "%d/%m/%Y %H:%M:%S",tz = "US")
# Subset the time serie from the dates 2007-02-01 and 2007-02-02
sub_data_hpc <- subset(data_hpc, (data_hpc$exact_time>=strptime("2007/02/01 00:00:00","%Y/%m/%d %H:%M:%S",tz = "US")) & (data_hpc$exact_time<=strptime("2007/02/03 00:00:00","%Y/%m/%d %H:%M:%S",tz = "US")))
# Tranform the ? in NA
for (i in 3:9) {sub_data_hpc[,i][sub_data_hpc[,i] %in% "?"]=NA}
# Convert the variables in numerical values
sub_data_hpc$Global_active_power <- as.numeric(as.character(sub_data_hpc$Global_active_power))
sub_data_hpc$Global_reactive_power <- as.numeric(as.character(sub_data_hpc$Global_reactive_power))
sub_data_hpc$Voltage <- as.numeric(as.character(sub_data_hpc$Voltage))
sub_data_hpc$Sub_metering_1 <- as.numeric(as.character(sub_data_hpc$Sub_metering_1))
sub_data_hpc$Sub_metering_2 <- as.numeric(as.character(sub_data_hpc$Sub_metering_2))
sub_data_hpc$Sub_metering_3 <- as.numeric(as.character(sub_data_hpc$Sub_metering_3))
#Draw Plot#4
par(mfrow=c(2,2))
with(sub_data_hpc, plot(exact_time,Global_active_power,type="l",ylab="Global Active Power (kilowatts)",xlab=""))
with(sub_data_hpc, plot(exact_time,Voltage,type="l",ylab="Voltage",xlab="datetime"))
with(sub_data_hpc, plot(exact_time,Sub_metering_1,type="l",ylab="Energy sub metering",xlab=""))
with(sub_data_hpc, points(exact_time,Sub_metering_2,type="l",col="red"))
with(sub_data_hpc, points(exact_time,Sub_metering_3,type="l",col="blue"))
legend("topright",
legend = c("Sub_metering_1", "Sub_metering_2","Sub_metering_3"),
col = c("black","red","blue"),
lty = 1,
bty = "n",
pt.cex = 2,
text.col = "black",
horiz = F
)
with(sub_data_hpc, plot(exact_time,Global_reactive_power,type="l",ylab="Global_reactive_power",xlab="datetime"))
dev.copy(png,file = "plot4.png", bg = "transparent",width=480,height=480)
dev.off()
|
fe939a604860955144d8cbf237347bb0d801d825 | 8f789aa5b5e5c0054f8d501dbb00f22a70c7a22a | /man/getOrthologFromMatrix.Rd | 62ab25f156b98b8538fe08822a5b3f67f68d0ba9 | [] | no_license | ddiez/rTRM | 3458ea89798a374736f19df59e8be64ddc480e9b | 8603c339ee67a94958d2be6f8cd8b7c6bb55f7f2 | refs/heads/master | 2021-01-10T20:30:21.929922 | 2015-10-09T03:14:59 | 2015-10-09T03:14:59 | 37,570,884 | 2 | 2 | null | null | null | null | UTF-8 | R | false | false | 625 | rd | getOrthologFromMatrix.Rd | \name{getOrthologFromMatrix}
\alias{getOrthologFromMatrix}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Obtain gene identifiers for a target organism associated with a list of PWMs.
}
\description{
Obtain gene identifiers for a target organism associated with a list of PWMs.
}
\usage{
getOrthologFromMatrix(filter, organism = "human", dbname = NULL)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{filter}{
vector of matrices to filter results.
}
\item{organism}{
target organism.
}
\item{dbname}{
database- usually not need to specify.
}
}
\author{
Diego Diez
} |
d873c5ed8171363d66d78abf30ca523a3672bd53 | c24367bb6cf9cc60f07e8a8c8430d6b54c36e53b | /man/az_role_definition.Rd | 2ff88325bd7cd41b5d5055e007a4666f3d132fa8 | [] | no_license | cran/AzureRMR | 794ed550c5a4c90d2b9e60459f62dffc6bbc1375 | 1b3163abef8d3c0374239a1a19cc00fde732c140 | refs/heads/master | 2021-10-24T12:44:42.805161 | 2021-10-23T04:30:02 | 2021-10-23T04:30:02 | 160,071,897 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,482 | rd | az_role_definition.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/az_role.R
\docType{class}
\name{az_role_definition}
\alias{az_role_definition}
\title{Azure role definition class}
\format{
An R6 object of class \code{az_role_definition}.
}
\description{
Azure role definition class
}
\section{Fields}{
\itemize{
\item \code{id}: The full resource ID for this role definition.
\item \code{type}: The resource type for a role definition. Always \code{Microsoft.Authorization/roleDefinitions}.
\item \code{name}: A GUID that identifies this role definition.
\item \code{properties}: Properties for the role definition.
}
}
\section{Methods}{
This class has no methods.
}
\section{Initialization}{
The recommended way to create new instances of this class is via the \link{get_role_definition} method for subscription, resource group and resource objects.
Technically role assignments and role definitions are Azure \emph{resources}, and could be implemented as subclasses of \code{az_resource}. AzureRMR treats them as distinct, due to limited RBAC functionality currently supported. In particular, role definitions are read-only: you can retrieve a definition, but not modify it, nor create new definitions.
}
\seealso{
\link{get_role_definition}, \link{get_role_assignment}, \link{az_role_assignment}
\href{https://docs.microsoft.com/en-us/azure/role-based-access-control/overview}{Overview of role-based access control}
}
|
1dd96eefd731380fe58b89931c59bff5ed8d4f02 | fd570307c637f9101ab25a223356ec32dacbff0a | /src-local/specpr/src.specpr/gould/tocent.r | 1248cd4d51facef240e77ae2583c0fd2e8b3e580 | [] | no_license | ns-bak/tetracorder-tutorial | 3ab4dd14950eff0d63429291c648820fb14bb4cb | fd07c008100f6021c293ce3c1f69584cc35de98a | refs/heads/master | 2022-07-30T06:04:07.138507 | 2021-01-03T22:19:09 | 2021-01-03T22:49:48 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 282 | r | tocent.r | subroutine tocent(down,across,dncm,accm,isyst)
#
# converts coordinates to centimeters
#
integer*4 isyst
include "../common/plot02"
if (isyst<=0) {
isyst=-isyst
dncm=down
accm=across
} else {
dncm=(down-dmin)*dscale
accm=(across-amin)*ascale
}
return
end
|
9b65288c1f1085fc414a6afb5d541683420e0879 | ec9243d22d65142081fc6a79ee60542482e33f03 | /PA1_template.R | 711f62d6175f2704fb31fb1f09420268a7ccb23e | [] | no_license | mrogman/RepData_PeerAssessment1 | d499f8177fb159999e0846054517b51f4a495e37 | bf8ef160ca0d06405da2b858b4aecd2e1dc5264f | refs/heads/master | 2016-09-06T16:56:55.433777 | 2016-02-22T03:51:57 | 2016-02-22T03:51:57 | 40,827,328 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,279 | r | PA1_template.R | library(ggplot2)
## load data
if(!file.exists('activity.csv')) unzip('activity.zip')
monitorData <- read.csv('activity.csv')
## plot and get mean and median of total steps
totalSteps <- tapply(monitorData$steps, monitorData$date, sum, na.rm=TRUE)
plot <- qplot(totalSteps, xlab='Total number of steps per day', binwidth=500)
print(plot)
stepsMean <- mean(totalSteps)
stepsMedian <- median(totalSteps)
## plot step means per 5 min interval
averageStepsPerInterval <- aggregate(x=list(steps=monitorData$steps), by=list(interval=monitorData$interval), mean, na.rm=TRUE)
plot2 <- ggplot(averageStepsPerInterval, aes(x=interval, y=steps)) +
geom_line() +
xlab("Interval (5 min)") +
ylab("Average number of steps")
print(plot2)
## get 5-min max
max5min <- averageStepsPerInterval[which.max(averageStepsPerInterval$steps),]
## total No. entries with NA values
totalNaSteps <- sum(is.na(monitorData$steps))
## copy monitor data and fill missing values with mean
filledMonitorData <- monitorData
for (i in 1:nrow(filledMonitorData)) {
if (is.na(filledMonitorData$steps[i])) {
filledMonitorData$steps[i] <- averageStepsPerInterval[which(filledMonitorData$interval[i]==averageStepsPerInterval$interval),]$steps
}
}
filledTotalNaSteps <- sum(is.na(filledMonitorData$steps))
## replot total steps and calculate mean and median with new (filled) data set
totalStepsFilled <- tapply(filledMonitorData$steps, filledMonitorData$date, sum)
plot3 <- qplot(totalStepsFilled, binwidth=500, xlab="Total number of steps per day")
print(plot3)
stepsfilledMean <- mean(totalStepsFilled)
stepsfilledMedian <- median(totalStepsFilled)
## new factor with two levels: weekday & weekend
daysofweek <- c("Monday", "Tuesday", "Wednesday", "Thursday", "Friday")
filledMonitorData$dayOfWeek <- factor((weekdays(as.Date(filledMonitorData$date)) %in% daysofweek),
levels=c(FALSE, TRUE), labels=c('weekend', 'weekday'))
## plot average steps per interval with weekend/weekday facets
averageSteps <- aggregate(steps ~ interval + dayOfWeek, data=filledMonitorData, mean)
plot4 <- ggplot(filledMonitorData, aes(interval, steps)) +
geom_line() +
facet_grid(dayOfWeek ~ .) +
xlab("Interval (5 min)") +
ylab("Number of steps (mean)")
print(plot4)
|
f6e8f73b7c70b8b2becc4afad89cc1a75e0fd8f2 | 05b091c00769c0db9190e1de3dc5d39ccd5d04a5 | /man/txx_text_to_xml.Rd | 038efe3ef1f52e628037e9fb72438091a2a5f516 | [
"MIT"
] | permissive | michael-ccccc/textured | 3feb7ee788eecac70b5aee9272165c0107259f59 | 35631cd95bb780926080f5213c710848f99fc4af | refs/heads/main | 2023-07-15T17:29:52.623753 | 2021-08-27T14:24:41 | 2021-08-27T14:24:41 | 312,844,984 | 0 | 0 | NOASSERTION | 2021-04-13T16:04:41 | 2020-11-14T15:32:10 | R | UTF-8 | R | false | true | 1,612 | rd | txx_text_to_xml.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/txx_text_to_xml.R
\name{txx_text_to_xml}
\alias{txx_text_to_xml}
\title{Transform semi-structured text into an XML structure}
\usage{
txx_text_to_xml(strings, tags)
}
\arguments{
\item{strings}{A vector of character strings containing the semi-structured
text. Each string should represent a single entry (e.g. a single letter).}
\item{tags}{The character strings that identify that a section has started,
e.g. "Diagnosis:" or "SUMMARY:". This may be variable from letter to letter,
include all variants. Order matters; the first strings are searched
for first - if there are tags that are contained within larger tags, put the
larger tag first so that it is used in the string searches first.}
}
\value{
A vector of character strings with the text transformed into an
XML structure.
}
\description{
Transforms a character string into an XML structure by identifying (known)
words or phrases that indicates that a new section has started. These words
or phrases (known as tags) need to be entered beforehand. Outputs can then
be analyzed in other packages such as \code{XML} or \code{xml2}, to extract
the interested sections.
}
\examples{
txx_text_to_xml(strings = "Name: Alice Age:40 Address:43 Maple Street",
tags = c("Name:", "Age:", "Address:"))
txx_text_to_xml(strings =
c("Name: Alice Age:40 Address:43 Maple Street",
"Name: Bob Address: 44 Maple Street Age:41 Weight:100kg"),
tags = c("Name:", "Age:", "Address:", "Weight:"))
}
|
a87fdcdb539d5687e7faad2cd942638e8838224b | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/lgarch/examples/mlgarch.Rd.R | 9ba4d5654ced8c7128d880169a159a4bca12dbcd | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 922 | r | mlgarch.Rd.R | library(lgarch)
### Name: mlgarch
### Title: Estimate a multivariate CCC-log-GARCH(1,1) model
### Aliases: mlgarch
### Keywords: Statistical Models Time Series Financial Econometrics
### ** Examples
##simulate 1000 observations from a 2-dimensional
##ccc-log-garch(1,1) w/default parameter values:
set.seed(123)
y <- mlgarchSim(1000)
##estimate a 2-dimensional ccc-log-garch(1,1):
mymod <- mlgarch(y)
##print results:
print(mymod)
##extract ccc-log-garch coefficients:
coef(mymod)
##extract Gaussian log-likelihood (zeros excluded) of the ccc-log-garch model:
logLik(mymod)
##extract Gaussian log-likelihood (zeros excluded) of the varma representation:
logLik(mymod, varma=TRUE)
##extract variance-covariance matrix:
vcov(mymod)
##extract and plot the fitted conditional standard deviations:
sdhat <- fitted(mymod)
plot(sdhat)
##extract and plot standardised residuals:
zhat <- residuals(mymod)
plot(zhat)
|
2517b685ef1c2b7d5c98f7ea70ae80720cb30239 | c091056e779a3ea6686fbf9200a9e1ba1edef1e9 | /R/vortex.R | 2e5fd714646790cc262311eefbd963b47600d49c | [] | no_license | AnthonyTedde/vortex | 7b2c75575b2fd1c9cbaad933897b75e6c4bf0a84 | 4c4e8e37bcf15aacba6638a2c9f7e420f65c1b8f | refs/heads/master | 2020-07-15T13:57:19.914012 | 2019-09-05T13:16:12 | 2019-09-05T13:16:12 | 205,578,701 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 64 | r | vortex.R | #' @keywords internal
"_PACKAGE"
#' @importFrom purrr %>%
NULL
|
7594efb9a6c3c5ad8a37dbe4d4b11969185e16b1 | 5e62d24701b872e0ba10efc19aa3a0452e932765 | /man/readsToStartOrEnd.Rd | 79a7e3453c81719720f98ffd2e8dd063e53c835f | [] | no_license | alenzhao/RiboProfiling | 76d8d08868199b0eda562c167acd41919e5b3323 | 94b63319204230e92083e01fd5967cde27fddb1d | refs/heads/master | 2021-01-11T21:36:41.826469 | 2017-01-11T07:49:26 | 2017-01-11T07:49:26 | 78,817,285 | 1 | 0 | null | 2017-01-13T05:09:08 | 2017-01-13T05:09:08 | null | UTF-8 | R | false | true | 969 | rd | readsToStartOrEnd.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/readsToStartOrEnd.R
\name{readsToStartOrEnd}
\alias{readsToStartOrEnd}
\title{Reads in GAlignments converted to either Read Start (5') or End (3') Positions}
\usage{
readsToStartOrEnd(aln, what)
}
\arguments{
\item{aln}{A GAlignments object of the BAM mapping file.}
\item{what}{A character object. Either "start" (the default) or "end"
for read start or read end.}
}
\value{
A GRanges object containing either the read start or end
genomic positions.
}
\description{
Reads in GAlignments converted to either Read Start (5') or End (3') Positions
}
\examples{
#read the BAM file into a GAlignments object using
#GenomicAlignments::readGAlignments
#the GAlignments object should be similar to ctrlGAlignments object
data(ctrlGAlignments)
aln <- ctrlGAlignments
#transform the GAlignments object into a GRanges object (faster processing)
alnGRanges <- readsToStartOrEnd(aln, what = "end")
}
|
da7f90c458342034b66e6228048baa7480541fc9 | 2bd58ca05fce7e7fd7013872e7d15f9ffd3da25f | /ui.R | 7d66a7a066d368b55c14480216f617681385d021 | [] | no_license | Yihuiz/BMI-Calculator | 22e19885e7d3ed67eb5f5417f99eaaeb28624195 | 4e31daa67c5882f028fe2b0ff8a5c52a367acaf8 | refs/heads/master | 2021-01-24T09:04:40.494584 | 2016-10-06T15:40:14 | 2016-10-06T15:40:14 | 69,820,891 | 0 | 0 | null | 2016-10-06T15:44:34 | 2016-10-02T20:41:18 | HTML | UTF-8 | R | false | false | 1,056 | r | ui.R | #
# This is the user-interface definition of a Shiny web application. You can
# run the application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
# Define UI for application that calculates your Body Mass Index(BMI)
shinyUI(
pageWithSidebar(
# Application title
headerPanel("BMI Caculator"),
# Sidebar with the height and weight text input
sidebarPanel(
numericInput('height', 'Height (m)', 1.65, min = 0, max = 3, step = 0.01),
numericInput('weight', 'Weight (kg)', 55, min = 0, max = 300, step = 0.5),
submitButton('Submit')
),
# Show the result of BMI
mainPanel(
h3('Your BMI'),
verbatimTextOutput("result"),
h3('Your BMI Status'),
verbatimTextOutput('status'),
h3('BMI reference value'),
h4('<18.5 | Underweight'),
h4('18.5~24.9 | Normal'),
h4('>=25.0 | Overweight')
)
)
)
|
f4f8caced44dadeb75caf4d1e326d90abdcba040 | eb8c51814e306ea1f8a0c10bc86dc2112e83360c | /analysis/R/extract.date.sub.R | 408d3676d0f2c4b3be6a9d656ea6328abd343051 | [] | no_license | gilmore-lab/moco-psychophysics-child-adult | e1f437cfc18f04160632846b7eceb34c750637e7 | 829e8472d17158e76eea667ea14e5326eb5d70bd | refs/heads/master | 2021-06-07T13:38:33.281338 | 2021-04-09T15:21:45 | 2021-04-09T15:21:45 | 154,509,872 | 0 | 0 | null | 2020-07-01T17:48:08 | 2018-10-24T13:53:16 | HTML | UTF-8 | R | false | false | 246 | r | extract.date.sub.R | extract.date.sub <- function(fn="160402105932.csv"){
fn = basename(fn)
test.date <- substr(fn, 1, 6)
sub.num <- substr(fn, 7, 10)
block <- substr(fn, 11, 11)
speed <- substr(fn, 12, 12)
return(list(test.date, sub.num, block, speed))
} |
75cf8c90285c3b5370673bb0ac6ba30ff0bccd22 | ffb2418b096271c5b29821344e47269d6fe4d192 | /R/method-from-call.r | cbd463b4874a85f514ddcb3c47304d3b13920d12 | [] | no_license | hadley/pryr | ed001475a186a0125136d40fd2ecaace230ae194 | 860500b7ff9951441822bf046b2b8665113f2276 | refs/heads/master | 2023-04-05T06:00:42.153084 | 2023-01-18T13:54:12 | 2023-01-18T13:54:12 | 7,491,765 | 188 | 35 | null | 2023-03-18T16:58:03 | 2013-01-07T23:19:25 | R | UTF-8 | R | false | false | 1,093 | r | method-from-call.r | #' Given a function class, find correspoding S4 method
#'
#' @param call unquoted function call
#' @param env environment in which to look for function definition
#' @export
#' @examples
#' library(stats4)
#'
#' # From example(mle)
#' y <- c(26, 17, 13, 12, 20, 5, 9, 8, 5, 4, 8)
#' nLL <- function(lambda) -sum(dpois(y, lambda, log = TRUE))
#' fit <- mle(nLL, start = list(lambda = 5), nobs = length(y))
#'
#' method_from_call(summary(fit))
#' method_from_call(coef(fit))
#' method_from_call(length(fit))
method_from_call <- function(call, env = parent.frame()) {
call <- standardise_call(substitute(call), env)
generic <- as.character(call[[1]])
g_args <- setdiff(names(formals(methods::getGeneric(generic))), "...")
args_uneval <- as.list(call[intersect(g_args, names(call))])
args <- lapply(args_uneval, eval, env = env)
classes <- lapply(args, class)
# Add in any missing args
missing <- setdiff(g_args, names(classes))
if (length(missing) > 0) {
classes[missing] <- rep("missing", length(missing))
}
methods::selectMethod(generic, classes)
}
|
79366ebf9e4d7b42b2f56c884d3ce88b622159fd | 9fc708f1637c232f41d35ee6331985b205194c28 | /funNewsQ_no_reest.R | 5834de0641409111ff6923cb4194cc54707ada35 | [] | no_license | guilbran/EM-transcription | 0c00923abd62ec20958049e0cfe3eca4c57bd113 | 867c9f96192fb550bf51c29a89307864ceb3fcca | refs/heads/master | 2021-03-19T11:26:26.041648 | 2017-10-16T18:10:41 | 2017-10-16T18:10:41 | 106,282,455 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 10,232 | r | funNewsQ_no_reest.R | library(R.matlab)
library(matlab)
library(readxl)
library(zoo)
P <- readMat("arquivos pra fç EMstep/P.mat")
P <- P$P[,,1]
P$DataFile <- "C:\\Users\\daiane.mattos\\Desktop\\NowCastingOxfordReplicationWeb\\Data\\Data 2010-01-21 Back oil15.xls"
P$BlockFile <- "C:\\Users\\daiane.mattos\\Desktop\\NowCastingOxfordReplicationWeb\\Data\\RNBen.xls"
funNewsQ_no_reest <- function(P){
StartEst <- P$StartEst
Qnews <- P$Qnews
SerNews <- P$SerNews
StartEv <- P$StartEv
EndEv <- P$EndEv
P.max_iter <- 500
fcstH <- 1
# %--------------------------------------------------------------------------
# % Loading monthly data
# %--------------------------------------------------------------------------
DataFile <- P$DataFile
BlockFile <- P$BlockFile
a <- data.frame(read_excel(BlockFile, sheet = 1))
ListM <- a[,4]
ListM <- find(ListM)
Blocks <- a[,4:ncol(a)]
Blocks <- Blocks[ListM,]
aa <- data.frame(read_excel(DataFile, sheet = 3))
a <- aa
a[,2:3] <- NaN
b <- aa[,-1]
b[,-c(1:2)] <- NA
b <- b[2:nrow(b),]
GroupM <- b[ListM,2]
SeriesM <- b[ListM,3]
# Transformation
TransfM <- a[ListM,4:5]
# unbalancedeness patterns
UnbM <- a[ListM,6:11]
a <- data.frame(read_excel(DataFile, sheet = 1, skip = 3, col_names = F)[,-1])
b <- data.frame(read_excel(DataFile, sheet = 1, col_names = T))
b[3:nrow(b),2:ncol(b)] <- NA
DataM <- a[,ListM]
# if strcmp(version('-release'),'2006b')
DatesM <- as.Date(data.frame(b[3:nrow(b),1])[,1])
# else
# DatesM <- datenum(b(4:end,1));
# end
DatesMV <- data.frame(ano = as.numeric(substr(DatesM,1,4)),
mes = as.numeric(substr(DatesM,6,7)))
TT <- length(DatesM)
# MoM transformations
DataMM <- DataM
DataMM[,c(TransfM[,1] == 1)] <- 100*log(DataMM[,c(TransfM[,1] == 1)])
DataMM[2:nrow(DataMM),c(TransfM[,2] == 1)] <- DataMM[2:nrow(DataMM),c(TransfM[,2] == 1)] - DataMM[1:(nrow(DataMM)-1),c(TransfM[,2] == 1)]
DataMM[1,c(TransfM[,2] == 1)] <- NaN
GroupSurveys <- c('ECSurv','ECSurvNom','PMI','PMInom')
if(P$SL == 1){
DataMM[,GroupM %in% GroupSurveys] <- DataM[,GroupM %in% GroupSurveys];
}
DataMTrf <- DataMM
tM <- nrow(DataMTrf)
nM <- ncol(DataMTrf)
x <- matrix(NaN, ncol = nM, nrow = TT-tM)
colnames(x) <- colnames(DataMTrf)
DataMTrf <- rbind(DataMTrf,x)
x <- matrix(NaN, ncol = nM, nrow = TT-tM)
colnames(x) <- colnames(DataM)
DataMM <- rbind(DataMM, x)
# %--------------------------------------------------------------------------
# % Loading quarterly data
# %--------------------------------------------------------------------------
a <- data.frame(read_excel(BlockFile, sheet = 2, col_names = T))
ListQ <- a[,4]
ListQ <- find(ListQ)
BlocksQ <- a[,4:ncol(a)]
BlocksQ <- BlocksQ[ListQ,]
aa <- data.frame(read_excel(DataFile, sheet = 4))
a <- aa
a[,2:3] <- NaN
b <- aa[,-1]
b[,-c(1:2)] <- NA
b <- b[2:nrow(b),]
GroupQ <- b[ListQ,2]
SeriesQ <- b[ListQ,3]
# Transformation
Transf <- a[ListQ,4:5]
# unbalancedeness patterns
UnbQ <- a[ListQ,6:11]
a <- data.frame(read_excel(DataFile, sheet = 2, skip = 3, col_names = F)[,-1])
b <- data.frame(read_excel(DataFile, sheet = 2, col_names = T))
b[3:nrow(b),2:ncol(b)] <- NA
DataQ <- a[,ListQ]
DataQTrf <- data.frame(DataQ)
DataQTrf[,Transf[,1] == 1] <- log(DataQTrf[,Transf[,1] == 1])
DataQTrf[2:nrow(DataQTrf),Transf[,2] == 1] <- 100*(DataQTrf[2:nrow(DataQTrf),Transf[,2] == 1] - DataQTrf[1:(nrow(DataQTrf)-1),Transf[,2] == 1])
DataQTrf[1,Transf[,2] == 1] <- NaN
# quarterly at monthly frequency
DataQMTrf <- kronecker(as.matrix(DataQTrf),c(NaN,NaN,1))
tQ <- nrow(DataQMTrf)
nQ <- ncol(DataQMTrf)
x <- matrix(NaN, ncol = nQ, nrow = TT-tQ)
colnames(x) <- colnames(DataQMTrf)
DataQMTrf <- rbind(DataQMTrf,x)
# %--------------------------------------------------------------------------
# % complete dataset
# %--------------------------------------------------------------------------
Data <- cbind(DataMTrf,DataQMTrf)
Series <- rbind(SeriesM,SeriesQ)
Group <- rbind(GroupM,GroupQ)
UnbPatt <- rbind(UnbM,UnbQ)
P$blocks <- rbind(Blocks,BlocksQ)
iEst <- find(DatesMV[,1] == StartEst[1] & DatesMV[,2] == StartEst[2])
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
Data <- Data[iEst:nrow(Data),]
Dates <- DatesM[iEst:length(DatesM)]
DatesV <- DatesMV[iEst:nrow(DatesMV),]
idxM <- t(1:nM)
idxQ <- t((nM+1):(nM+nQ))
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
DataMM <- DataMM[iEst:nrow(DataMM),]
nVar <- nM+nQ
# %--------------------------------------------------------------------------
# % unbalancedness patterns
# %--------------------------------------------------------------------------
nn <- min(UnbPatt)
nn <- min(nn,0)
UnbPattM1_1 <- zeros(12-nn,nVar)
UnbPattM1_2 <- zeros(12-nn,nVar)
UnbPattM2_1 <- zeros(12-nn,nVar)
UnbPattM2_2 <- zeros(12-nn,nVar)
UnbPattM3_1 <- zeros(12-nn,nVar)
UnbPattM3_2 <- zeros(12-nn,nVar)
nUnb <- 12-nn
for(i in 1:(nVar-2)){
UnbPattM1_1[(nrow(UnbPattM1_1)-UnbPatt[i,1]+1+nn):nrow(UnbPattM1_1),i] <- NaN
UnbPattM2_1[(nrow(UnbPattM2_1)-UnbPatt[i,2]+1+nn):nrow(UnbPattM2_1),i] <- NaN
UnbPattM3_1[(nrow(UnbPattM3_1)-UnbPatt[i,3]+1+nn):nrow(UnbPattM3_1),i] <- NaN
if(i < (nUnb-1)){
UnbPattM1_2[(nrow(UnbPattM1_2)-UnbPatt[i,4]+1+nn):nrow(UnbPattM1_2),i] <- NaN
UnbPattM2_2[(nrow(UnbPattM2_2)-UnbPatt[i,5]+1+nn):nrow(UnbPattM2_2),i] <- NaN
UnbPattM3_2[(nrow(UnbPattM3_2)-UnbPatt[i,6]+1+nn):nrow(UnbPattM3_2),i] <- NaN
}
}
# %--------------------------------------------------------------------------
# % restrictions
# %--------------------------------------------------------------------------
P$nQ <- nQ
P$Rconstr <- matrix(c(2, -1, 0, 0, 0,
3, 0, -1, 0, 0,
2, 0, 0, -1, 0,
1, 0, 0, 0, -1), byrow = T, ncol = 5, nrow = 4)
P$q <- zeros(4,1)
P$restr <- '_restrMQ'
# %--------------------------------------------------------------------------
# % out-of-sample evaluation
# %--------------------------------------------------------------------------
iS <- find(DatesV[,1] == StartEv[1] & DatesV[,2] == StartEv[2])
iE <- find(DatesV[,1] == EndEv[1] & DatesV[,2] == EndEv[2])
iQ <- find(DatesV[,1] == Qnews[1] & DatesV[,2] == Qnews[2])
iSer <- find(Series %in% SerNews)
Month <- mod(DatesV[,2],3)
Month[Month == 0] <- 3
P$i_idio <- rbind(ones(nM,1),zeros(nQ,1)) == 1
Month_i <- Month[iS-1]
# second unbalancedeness pattern
eval(parse(text = paste0('UnbP = UnbPattM',Month_i,'_2')))
X <- Data[1:(iS-1-nn),]
temp <- X[(nrow(X)-nUnb+1):nrow(X),]
temp[is.nan(UnbP)] <- NaN
X[(nrow(X)-nUnb+1):nrow(X),] <- temp
x <- matrix(NaN, nrow = max(0,(fcstH+1)*3-Month_i+nn), ncol = nM+nQ)
colnames(x) <- colnames(X)
X_old <- rbind(X,x)
OldFcst <- zeros(2*(iE-iS+1),1);
NewFcst <- zeros(2*(iE-iS+1),1);
GroupNews <- zeros(2*(iE-iS+1),length(unique(Group)));
SerNews <- zeros(2*(iE-iS+1),nM+nQ);
Gain <- zeros(2*(iE-iS+1),nM+nQ);
for(i in iS:iE){
Date_i <- DatesV[i,]
Month_i <- Month[i];
message(paste0('Computing the news for the vintages: y', DatesV[i,1],' m', DatesV[i,2]))
# first unbalancedeness pattern
eval(paste(text = paste0('UnbP = UnbPattM',Month_i,'_1;')))
X <- Data[1:(i-nn),]
temp <- X[(nrow(X)-nUnb+1):nrow(X),]
temp[is.nan(UnbP)] <- NaN
X[(nrow(X)-nUnb+1):nrow(X),] <- temp
x <- matrix(NaN, nrow = max(0,(fcstH+1)*3-Month_i+nn), ncol = nM+nQ)
colnames(x) <- colnames(X)
X_new <- rbind(X,x)
T_o <- nrow(X_old)
T_n <- nrow(X_new)
x <- matrix(NaN, nrow = T_n-T_o, ncol = nM+nQ)
colnames(x) <- colnames(X_old)
X_old <- rbind(X_old,x)
if(i == iS){
eval(parse(text = paste0('R_new <- EM_DFM_SS',P$method,P$idio,P$restr,'(X_new,P)')))
R_new$Groups <- Group
R_new$Series <- Series
}
# ATENÇÃO AQUI BICHAUM
out <- News_DFM_ML(X_old,X_new,R_new,iQ,iSer)
OldFcst[2*(i-iS)+1,1] <- out$OldFcst
NewFcst[2*(i-iS)+1,1] <- out$NewFcst
GroupNews[2*(i-iS)+1,] <- out$GroupNews
SerNews[2*(i-iS)+1,] <- out$SerNews
gainT <- out$gainT
serGainT <- out$serGainT
Actual[,2*(i-iS)+1] <- out$Actual
Fcst[,2*(i-iS)+1] <- out$Fcst
Filt[,2*(i-iS)+1] <- out$Filt
Gain[2*(i-iS)+1,Series %in% serGainT] <- gainT
X_old <- X_new
# second unbalancedeness pattern
eval(parse(text = paste0('UnbP <- UnbPattM',Month_i,'_2')))
X <- Data[1:(i-nn),]
temp <- X[(nrow(X)-nUnb+1):nrow(X),]
temp[is.nan(UnbP)] <- NaN
X[(nrow(X)-nUnb+1):nrow(X),] <- temp
x <- matrix(NaN, nrow = max(0,(fcstH+1)*3-Month_i+nn), ncol = nM+nQ)
colnames(x) <- colnames(X)
X_new <- rbind(X,x)
# ATENÇÃO AQUI DE NOVO BICHAUM
out2 <- News_DFM_ML(X_old,X_new,R_new,iQ,iSer)
OldFcst[2*(i-iS)+2,1] <- out2$OldFcst
NewFcst[2*(i-iS)+2,1] <- out2$NewFcst
GroupNews[2*(i-iS)+2,] <- out2$GroupNews
SerNews[2*(i-iS)+2,] <- out2$SerNews
gainT <- out2$gainT
serGainT <- out2$serGainT
Actual[,2*(i-iS)+2] <- out2$Actual
Fcst[,2*(i-iS)+2] <- out2$Fcst
Filt[,2*(i-iS)+2] <- out2$Filt
Gain[2*(i-iS)+2,Series %in% serGainT] <- gainT
X_old <- X_new
}
DatesNews <- matrix(NaN, nrow = length(OldFcst), ncol = 2)
DatesNews[seq(1,nrow(DatesNews), by = 2),] <- DatesV[iS:iE,]
DatesNews[seq(1,nrow(DatesNews), by = 2),] <- DatesV[iS:iE,]
GroupNames <- t(unique(Group))
TrueSer <- Data[iQ,iSer]
# check whether the new forecats is equal to the old forecast plus the news
check <- NewFcst-OldFcst-matrix(rowSums(GroupNews))
datafile <- paste0('news',P$DF,P$method,P$idio,paste0(P$r, collapse =""),P$p)
# datafile <- strrep(datafile,' ','_');
# output
list(OldFcst = OldFcst,NewFcst = NewFcst, TrueSer = TrueSer, DatesNews = DatesNews, GroupNews = GroupNews,
SerNews = SerNews, GroupNames = GroupNames, Gain = Gain, Fcst = Fcst, Actual = Actual, Filt = Filt,
Series = Series, Group = Group, P = P)
} |
027d7880714ad627137265eef79cc201d5ff06fe | d8cdb87d2ccefc0db15239edba9a15fbb509f51c | /plot2.R | da1b5bdf9d93884f76ea5ead971568386980f375 | [] | no_license | Nalpin/ExData_Plotting1 | d187c4755cefec6d2bf3263b93af8306b3ff96fe | c9ef1ec2fbfca8fb43a7d21a88702f91738b1ef0 | refs/heads/master | 2021-01-18T05:42:45.284154 | 2015-03-08T22:58:05 | 2015-03-08T22:58:05 | 31,752,891 | 0 | 0 | null | 2015-03-06T04:43:18 | 2015-03-06T04:43:16 | null | UTF-8 | R | false | false | 698 | r | plot2.R | require(sqldf)
require(lubridate)
url <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
fzip <- "household_power_consumption.zip"
file <- "household_power_consumption.txt"
if (!file.exists(fzip)) {
download.file(url, fzip)
unzip(fzip)
}
ds <- read.csv.sql(file,
"select * from file where Date in ('1/2/2007','2/2/2007')",
sep = ";")
Sys.setlocale("LC_TIME", "English")
ds$datetime <- dmy_hms(paste(df$Date,df$Time))
plot(ds$datetime, ds$Global_active_power,
xlab="", ylab="Global Active Power (kilowatts)", type="n")
lines(ds$datetime, ds$Global_active_power, type="l")
# Save png image
dev.copy(png, file = "plot2.png")
dev.off() |
797a487392dd5fa53830605c32ae514993cec3a5 | 0a906cf8b1b7da2aea87de958e3662870df49727 | /diceR/inst/testfiles/indicator_matrix/libFuzzer_indicator_matrix/indicator_matrix_valgrind_files/1609959746-test.R | ade1c07aedb8292c855e74db77caeb7c17149bd9 | [] | no_license | akhikolla/updated-only-Issues | a85c887f0e1aae8a8dc358717d55b21678d04660 | 7d74489dfc7ddfec3955ae7891f15e920cad2e0c | refs/heads/master | 2023-04-13T08:22:15.699449 | 2021-04-21T16:25:35 | 2021-04-21T16:25:35 | 360,232,775 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 524 | r | 1609959746-test.R | testlist <- list(x = c(4.44380721892337e+252, 8.0930792450553e+175, 1.75261887579858e+243, 6.22211717938606e-109, 3.62473289151349e+228, 1.62618103126837e-260, 2.11451614301046e-314, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
result <- do.call(diceR:::indicator_matrix,testlist)
str(result) |
96220cb58b8460176f11adee35c86abc313da266 | b1a584edd3b74bdff1eed9dda86adb5a214c47e0 | /chap3.R | 49c868874149ac4f4d535e246ed7465a5d03277b | [] | no_license | sakura2014/PisaDataAnalysisManual | 94ad1a8266c742592a2f37993dfb9e373172ef59 | 79e857794780a91b26d28565f97f44fc1f1ce4db | refs/heads/master | 2020-08-03T12:11:43.076228 | 2019-10-14T13:35:15 | 2019-10-14T13:35:15 | 211,748,889 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 308 | r | chap3.R | # table 3.2
x <- seq(160, 178, 2)
w <- rep(10, 10)
# p.50
sd(x)
sqrt(sum(w * (x - weighted.mean(x))^2) / length(x)) # (i)
sqrt(sum(w * (x - weighted.mean(x))^2) / (length(x) - 1)) # (ii)
sqrt(sum(w * (x - weighted.mean(x))^2) / sum(w)) # (iii)
sqrt(sum(w * (x - weighted.mean(x))^2) / (sum(w) - 1)) # (iv)
|
322d21c9d0a1149150f2afe7982d90a1640e2dc5 | 0a88827f7304d4a3e9e8624cd1f15d3f18d9d000 | /R/search_any_match_paged.R | 82baa6353c5c8319bac8e7ff84b4c92a43dbf44d | [
"MIT"
] | permissive | ropensci/ritis | 51b7dd2afb9cedb434f9c024daacab96f7c85da0 | 95f190cd328c80e75aa9619f04db7cd1016f7e9c | refs/heads/master | 2023-05-23T19:49:34.994109 | 2022-09-28T05:31:44 | 2022-09-28T05:31:44 | 2,693,028 | 14 | 3 | null | 2018-05-20T15:34:10 | 2011-11-02T04:52:30 | R | UTF-8 | R | false | false | 1,244 | r | search_any_match_paged.R | #' Search for any matched page
#'
#' @export
#' @inheritParams accepted_names
#' @inheritParams any_match_count
#' @return a data.frame
#' @param pagesize An integer containing the page size (numeric)
#' @param pagenum An integer containing the page number (numeric)
#' @param ascend A boolean containing true for ascending sort order or false
#' for descending (logical)
#' @return a data.frame
#' @seealso \code{\link{search_anymatch}}
#' @examples \dontrun{
#' search_any_match_paged(x=202385, pagesize=100, pagenum=1, ascend=FALSE)
#' search_any_match_paged(x="Zy", pagesize=100, pagenum=1, ascend=FALSE)
#' }
search_any_match_paged <- function(x, pagesize = NULL, pagenum = NULL, ascend = NULL, wt = "json", raw = FALSE, ...) {
args <- tc(list(srchKey=x, pageSize=pagesize, pageNum=pagenum, ascend=ascend))
out <- itis_GET("searchForAnyMatchPaged", args, wt, ...)
if (raw || wt == "xml") return(out)
x <- parse_raw(out)$anyMatchList
tmp <- dr_op(bindlist(x$commonNameList.commonNames), "class")
names(tmp) <- paste0("common_", names(tmp))
x <- suppressWarnings(
cbind(
dr_op(x, c("commonNameList.commonNames", "commonNameList.class", "commonNameList.tsn", "class")),
tmp
)
)
tibble::as_tibble(x)
}
|
7282e65b3f1a35efbe3b2f0623f02bfb21531d35 | d229a05252283ef814cf87c04cff054022c25293 | /ScrapingScripts/Scraping99Acres.R | f85e15b06b4bda635b811bc13fb2e8ffcfeb0120 | [] | no_license | thecomeonman/HouseLeadsAutomation | 462fbaeb0e1d2abcb56207b3b31b99807283f55a | d0ee03277a579a4be000f29cdf3db1ce45371746 | refs/heads/master | 2020-12-30T13:38:49.681696 | 2018-09-01T07:27:38 | 2018-09-01T07:27:38 | 91,234,533 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 12,566 | r | Scraping99Acres.R | # rm(list = ls())
library(googlesheets)
library(data.table)
library(dplyr)
library(rjson)
cFileName = 'Automated House Leads 2018'
cResultsSheetName = '99Acres'
cSearchURLPattern = '99acres'
# Getting details from the Google sheet
# Code is same across websites
# =============================================================================
# Reading the entire sheet
AHH <- gs_title(cFileName)
# Reading the list of search URLs from this website
gsWebpageSearch <- AHH %>% gs_read(ws = "QueryURLs")
vcWebpageSearch = gsWebpageSearch$URL
vcWebpageSearch = vcWebpageSearch[
grepl(
tolower(vcWebpageSearch),
pattern = cSearchURLPattern
)
]
# Reading the listings already scraped from this website
gsListings <- AHH %>% gs_read(ws = cResultsSheetName)
vcAlreadySeenListings = gsListings$ZZURL
# Getting the filters to be applied to the new readings which get scraped
gsFilters <- AHH %>% gs_read(ws = "99AcresFilters")
# Getting the list of properties to scrape by scraping each search URL
# =============================================================================
# We'll populate this vector with the final URLs
vcURLsToScrape = c()
# counter to loop through all the search URLs
i = 1
repeat {
if ( i > length(vcWebpageSearch) ) {
break
}
print(paste('Searching',vcWebpageSearch[i]))
# if the search page returns multiple pages of results then
# need to go through each of them
iPageNo = 1
vcURLsToScrapeFromThisPage = c()
repeat {
print(paste('Page number',iPageNo))
# Substituting the page number in the search page URL
vcWebpage = readLines(
paste0(
gsub(
x = vcWebpageSearch[i],
pattern = 'ffid-.*?\\?',
replacement = paste0('ffid-page-',iPageNo, '?')
)
)
)
# Removing all the unnecessary data from this webpage
# and get the section which has the search results
vcWebpageListings = vcWebpage[
grepl(
x = vcWebpage,
pattern = 'data-propPos'
)
]
# The search results, for some reason that I didn't try
# and understand, seem to be in one of four formats. I'm
# splitting the contents of the webpage against all four
# formats to get the entries out
# 1
vcWebpageListingsHREF = vcWebpageListings[
grepl(
x = vcWebpageListings,
pattern = 'href="'
)
]
if ( length(vcWebpageListings) == 0 ) {
break
}
vcWebpageListingsHREF = gsub(
x = vcWebpageListingsHREF,
pattern = '.*href="',
replacement = ''
)
vcWebpageListingsHREF = gsub(
x = vcWebpageListingsHREF,
pattern = '".*',
replacement = ''
)
# 2
vcWebpageListingsHREF2 = vcWebpageListings[
grepl(
x = vcWebpageListings,
pattern = 'href=/'
)
]
vcWebpageListingsHREF2 = gsub(
x = vcWebpageListingsHREF2,
pattern = '.*href=',
replacement = ''
)
vcWebpageListingsHREF2 = gsub(
x = vcWebpageListingsHREF2,
pattern = ' itemprop.*',
replacement = ''
)
vcWebpageListingsHREF2 = gsub(
x = vcWebpageListingsHREF2,
pattern = ' data-fsl.*',
replacement = ''
)
# 3
vcWebpageListingsBlank = vcWebpageListings[
grepl(
x = vcWebpageListings,
pattern = 'target=_blank '
)
]
vcWebpageListingsBlank = vcWebpageListings[
!grepl(
x = vcWebpageListings,
pattern = 'href'
)
]
vcWebpageListingsBlank = gsub(
x = vcWebpageListingsBlank,
pattern = 'href=',
replacement = ''
)
vcWebpageListingsBlank = gsub(
x = vcWebpageListingsBlank,
pattern = ' itemprop.*',
replacement = ''
)
# 4
vcWebpageListingsNoHREF = vcWebpageListings[
!grepl(
x = vcWebpageListings,
pattern = 'href'
)
]
vcWebpageListingsNoHREF = gsub(
x = vcWebpageListingsNoHREF,
pattern = '*.a data-propPos=',
replacement = ''
)
vcWebpageListingsNoHREF = gsub(
x = vcWebpageListingsNoHREF,
pattern = '" data-fsl.*',
replacement = ''
)
# Gets used in determining whether there are any new
# search results to be had from this page number
iPreviousLength = length(vcURLsToScrapeFromThisPage)
vcURLsToScrapeFromThisPage = c(
vcURLsToScrapeFromThisPage,
vcWebpageListingsHREF,
vcWebpageListingsHREF2,
vcWebpageListingsNoHREF
)
vcURLsToScrapeFromThisPage = unique(vcURLsToScrapeFromThisPage)
# If the current search page added zero new entries to the
# URLs to search then tthere probably aren't any more search
# results to query. We'll use this variable to check that
# condition later.
if ( length(vcURLsToScrapeFromThisPage) == iPreviousLength ) {
break
}
# Incrementing page number
iPageNo = iPageNo + 1
}
# Appending the results from this search to all the results from
# previous searches
vcURLsToScrape = c(
vcURLsToScrape,
vcURLsToScrapeFromThisPage
)
# Next search URL
i = i + 1
}
rm(vcWebpage)
# Scraping details of the properties
# =============================================================================
# the URL by default doesn't have the full path
vcURLsToScrape = paste0('http://99acres.com/', unique(vcURLsToScrape))
# Removing the ones which haave been queried already
vcURLsToScrape = setdiff(
vcURLsToScrape,
vcAlreadySeenListings
)
# If there are any new URLs to scrape left then scrape
if ( length (vcURLsToScrape) > 0 ) {
# Looping through each URL
dtListings = rbindlist(
lapply(
vcURLsToScrape,
function( cListing ) {
print(paste('Scraping',cListing))
vcWebpage = readLines(cListing)
# Removing all the useless content which aren't details
# of the property being looked at
iStartingIndex = which(grepl(
x = vcWebpage,
pattern = 'pdMainFacts type2'
))
if ( length(iStartingIndex) == 0 ) {
return ( data.table() )
}
iEndingIndex = which(grepl(
x = vcWebpage[iStartingIndex:length(vcWebpage)],
pattern = '</table>'
))[1]
iEndingIndex = iStartingIndex + iEndingIndex - 1
vcTable = vcWebpage[iStartingIndex:iEndingIndex]
# Getting all the details
vcTable = unlist(strsplit(
unlist(
strsplit(
vcTable,
'<tr>'
)
),
'<td>'
))
vcTable[length(vcTable)] = gsub(
x = vcTable[length(vcTable)],
pattern = '</table>.*',
replacement = ''
)
vcTable = grep(
x = vcTable,
pattern = 'span.*id',
value = T
)
dtTemp = data.table(
Category = gsub(
x = vcTable,
pattern = '.*span.*id=\"(.*?)\">.*',
replacement = '\\1'
),
Value = gsub(
x = gsub(
x = vcTable,
pattern = '.*\">',
replacement = ''
),
pattern = '<.*',
replacement = ''
)
)
dtTemp = dtTemp[!grepl(x = Category, pattern = '>|<')]
vcFacilities = vcWebpage[
grep(
vcWebpage,
pattern = 'amnIcons'
) + 1
]
vcFacilities = gsub(
x = vcFacilities,
pattern = '</div>',
replacement = ''
)
vcFacilities = gsub(
x = vcFacilities,
pattern = '<div.*>',
replacement = ''
)
vcFacilities = gsub(
x = vcFacilities,
pattern = ' *$|^ *',
replacement = ''
)
vcFacilities = paste(
vcFacilities[vcFacilities != ''],
collapse = ', '
)
dtTemp = rbind(
dtTemp,
data.table(
Category = 'Facilities',
Value = vcFacilities
)
)
setDT(dtTemp)
dtTemp[, ZZURL := cListing]
dtTemp
}
),
fill = T
)
}
# Cleaning the data
# =============================================================================
if ( exists('dtListings') ) {
# Changing column names from their website name to something
# that R can accept
setnames(
dtListings,
make.names(colnames(dtListings))
)
# Some cleaning up of the data
dtListings[, Value := gsub(x = Value, pattern = '^ *| *$', replacement = '')]
dtListings[, Value := gsub(x = Value, pattern = ' +', replacement = ' ')]
dtListings[, Category := gsub(x = Category, pattern = ' |[[:punct:]]', replacement = '')]
# Going from long format to wide format
dtListings = dcast(dtListings, ZZURL ~ Category, value.var = 'Value')
setDT(dtListings)
setnames(
dtListings,
make.names(colnames(dtListings))
)
# Some extra junk which doesn't get cleaned
dtListings[, Rent := pdPrice2]
dtListings[, pdPrice2 := NULL]
dtListings[, Rent := gsub(x = Rent, pattern = '.*;', replacement = '')]
dtListings[, Rent := gsub(x = Rent, pattern = ' |[[:punct:]]|[[:alpha:]]', replacement = '')]
}
# Automatic processing of the data
# Code is same across websites
# =============================================================================
if ( exists('dtListings') ) {
# Running the automated filter
dtListings[, ZZStatus := '']
dtListings[, ZZComments := '']
dtFilters = data.table(gsFilters)
if ( nrow(dtFilters) > 0 ) {
for ( i in seq(nrow(dtFilters))) {
if ( dtFilters[i, Column] %in% colnames(dtListings) ) {
dtListings[
grep(
x = get(dtFilters[i, Column]),
pattern = dtFilters[i, Value],
invert = dtFilters[i, Invert]
),
c(
'ZZCalledBy',
'ZZStatus',
'ZZComments'
) := list(
'Automated',
paste0(ZZStatus, dtFilters[i, Status], '; '),
paste0(ZZComments, dtFilters[i, Comment], '; ')
)
]
}
}
}
rm(dtFilters)
}
# Uploading details of the properties back to the Google sheet
# Code is same across websites
# =============================================================================
if ( exists('dtListings') ) {
# Putting old and new entries together
dtListings = rbind(
data.frame(gsListings),
dtListings,
fill = T
)
setDT(dtListings)
# Changing order of columns such that user entered columns come last
setcolorder(
dtListings,
c(
grep(colnames(dtListings), pattern = '^ZZ', value = T, invert = T),
grep(colnames(dtListings), pattern = '^ZZ', value = T)
)
)
# Error values are easier on the eye this way
dtListings[dtListings == 'NULL'] = ''
dtListings[dtListings == 'NA'] = ''
dtListings[is.na(dtListings)] = ''
# Deleting previous sheet and adding data as a new sheet
# This is needed in case there are any new
# columns that go added in this iteration
# AHH %>%
# gs_ws_delete(ws = cResultsSheetName)
AHH = gs_ws_rename(AHH, from = cResultsSheetName, to = 'temp')
AHH <- gs_title(cFileName)
AHH %>%
gs_ws_new(
ws_title = cResultsSheetName,
input = dtListings,
trim = TRUE,
verbose = FALSE
)
AHH %>%
gs_ws_delete(ws = 'temp')
} |
b046e013f6371a70b6757d989bc945cde6ce4584 | 29598a51ee27ff9713ec3ee0745c3fb266d383e7 | /app/modals.R | db3e381dbf98d0808b6b431ea6492d1f8ee965e0 | [] | no_license | alemenze/magic-firetool | ecc6d9526c7061ccf3905e4cce1522d37848246d | 55c27bffd0218bcd0efe48cb23bd3b59f25042f1 | refs/heads/main | 2023-04-03T12:28:30.153363 | 2021-04-15T12:49:48 | 2021-04-15T12:49:48 | 341,429,589 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,053 | r | modals.R | # Input data
########################################################################################################
observeEvent(input$hitcount_example, {
showModal(modalDialog(
column(12,tags$img(src='feature_count_img.png'), align='center', hr()),
easyClose = TRUE,
footer = modalButton("Close"),
size='l'
))
})
observeEvent(input$metadata_example, {
showModal(modalDialog(
column(12,tags$img(src='meta_img.png'), align='center', hr()),
easyClose = TRUE,
footer = modalButton("Close"),
size='l'
))
})
# Clustering
########################################################################################################
observeEvent(input$pca_example, {
showModal(modalDialog(
column(12,tags$img(src='pca.png'), align='center', hr()),
easyClose = TRUE,
footer = modalButton("Close"),
size='l'
))
})
observeEvent(input$dm_example, {
showModal(modalDialog(
column(12,tags$img(src='dm.png'), align='center', hr()),
easyClose = TRUE,
footer = modalButton("Close"),
size='l'
))
})
observeEvent(input$eigen_example, {
showModal(modalDialog(
column(12,tags$img(src='eigencor.png'), align='center', hr()),
easyClose = TRUE,
footer = modalButton("Close"),
size='l'
))
})
# Comparisons
########################################################################################################
observeEvent(input$volcano_example, {
showModal(modalDialog(
column(12,tags$img(src='volcano.png'), align='center', hr()),
easyClose = TRUE,
footer = modalButton("Close"),
size='l'
))
})
observeEvent(input$venn_example, {
showModal(modalDialog(
column(12,tags$img(src='venn.png'), align='center', hr()),
easyClose = TRUE,
footer = modalButton("Close"),
size='l'
))
})
observeEvent(input$upset_example, {
showModal(modalDialog(
column(12,tags$img(src='upset.png'), align='center', hr()),
easyClose = TRUE,
footer = modalButton("Close"),
size='l'
))
})
# Genes
########################################################################################################
observeEvent(input$heatmap_example, {
showModal(modalDialog(
column(12,tags$img(src='heatmap.png'), align='center', hr()),
easyClose = TRUE,
footer = modalButton("Close"),
size='l'
))
})
observeEvent(input$box_example, {
showModal(modalDialog(
column(12,tags$img(src='boxplot.png'), align='center', hr()),
easyClose = TRUE,
footer = modalButton("Close"),
size='l'
))
})
observeEvent(input$violin_example, {
showModal(modalDialog(
column(12,tags$img(src='violinplot.png'), align='center', hr()),
easyClose = TRUE,
footer = modalButton("Close"),
size='l'
))
})
# Pathways
########################################################################################################
observeEvent(input$dotplot_example, {
showModal(modalDialog(
column(12,tags$img(src='gsea_dotplot.png'), align='center', hr()),
easyClose = TRUE,
footer = modalButton("Close"),
size='l'
))
})
observeEvent(input$enrich_example, {
showModal(modalDialog(
column(12,tags$img(src='gsea_emap.png'), align='center', hr()),
easyClose = TRUE,
footer = modalButton("Close"),
size='l'
))
})
observeEvent(input$gsea_example, {
showModal(modalDialog(
column(12,tags$img(src='gseaplot.png'), align='center', hr()),
easyClose = TRUE,
footer = modalButton("Close"),
size='l'
))
})
observeEvent(input$pathviewer_example, {
showModal(modalDialog(
column(12,tags$img(src='pathview.png'), align='center', hr()),
easyClose = TRUE,
footer = modalButton("Close"),
size='l'
))
})
|
ed035ca7b3b3e22c291119aced2028bc3caad120 | 74b64ba7ef0800cb858dfa17ca6641514d0de7cc | /MobileInsurancePrediction.R | 4a9a3142d2d9e6813cc9edc58918c871d4ffd530 | [] | no_license | abhi117a/R | 7ad877cd9354129f946ed2f5c42f5355a48f7191 | 9f193c9b7ad4abb9cc669e3849ab91a323d2c54e | refs/heads/master | 2020-04-09T11:14:03.265723 | 2018-01-11T15:20:44 | 2018-01-11T15:20:44 | 68,149,562 | 2 | 2 | null | 2017-12-08T18:39:06 | 2016-09-13T21:36:28 | R | UTF-8 | R | false | false | 8,889 | r | MobileInsurancePrediction.R | setwd("C:/Users/admin/Documents")
mobileInsurance <- read.csv("mobileInsuranceTrain.csv", sep = "\t")
ncol(mobileInsurance)
colnames(mobileInsurance) <- c("X0","X1","X2","X3","X4","X5","X6","X7","X8",
"X9","X10","X11","X12","X13","X14","X15","X16","X17",
"X18","X19","X20","X21","X22","X23","X24","X25","X26",
"X27","X28","X29","X30","X31","X32","X33","X34","X35",
"X36","X37","X38","X39","X40","X41","X42","X43","X44",
"X45","X46","X47","X48","X49","X50","X51","X52","X53",
"X54","X55","X56","X57","X58","X59","X60","X61","X62",
"X63","X64","X65","X66","X67","X68","X69","X70","X71",
"X72","X73","X74","X75","X76","X77","X78","X79","X80",
"X81","X82","X83","X84","Y")
str(mobileInsurance)
#Visualization
library(ggplot2)
ggplot(mobileInsurance) +
geom_bar(aes(x= mobileInsurance$X0), fill = "red")
linModel <- lm(Y~., data= mobileInsurance)
plot(linModel)
summary(linModel)
#TrainTest
library(caTools)
split <- sample.split(mobileInsurance$Y, SplitRatio = 0.8)
mobileInsuranceTrain <- subset(mobileInsurance, split==T)
nrow(mobileInsuranceTrain)
mobileInsuranceTest <- subset(mobileInsurance, split==F)
nrow(mobileInsuranceTest)
#ML
#SVM 87% accuracy with kernel = radial
library(e1071)
SVMmodel <- svm(Y~., data = mobileInsuranceTrain)
summary(SVMmodel)
svmPred <- predict(SVMmodel, mobileInsuranceTest)
svmPred1 <- ifelse(svmPred>49,1,0)
table(mobileInsuranceTest$Y,svmPred1)
# perform a grid search
tuneResult <- tune(svm, Y ~ ., data = mobileInsuranceTest,
ranges = list(epsilon = seq(0,1,0.1), cost = 2^(2:9))
)
print(tuneResult)
# best performance: MSE = 8.371412, RMSE = 2.89 epsilon 1e-04 cost 4
# Draw the tuning graph
plot(tuneResult)
#Categorical
#Naive bayes
#RandomFOrest - 93%
library(randomForest)
randomForest <- randomForest(Y~.,data = mobileInsuranceTrain)
RandomPred <- predict(randomForest, mobileInsuranceTest[-86])
table(mobileInsuranceTest$Y,RandomPred)
navie <- naiveBayes(Y~., data = mobileInsuranceTrain)
naivPred <- predict(navie, mobileInsuranceTest[-86])
table(mobileInsuranceTest$Y,naivPred)
#Gradient Boosting
library(gbm)
modelGB <- gbm(formula = Y ~ .,
distribution = "bernoulli",
data = mobileInsuranceTrain,
n.trees = 70,
interaction.depth = 5,
shrinkage = 0.3,
bag.fraction = 0.5,
train.fraction = 1.0,
n.cores = NULL)
print(modelGB)
preds <- predict(modelGB, newdata = mobileInsuranceTest[-86], n.trees = 70)
labels <- mobileInsuranceTest$Y
cvAUC::AUC(predictions = preds, labels = labels)
#XGBoost
library(xgboost)
library(Matrix)
train.mx <- sparse.model.matrix(Y ~ ., mobileInsuranceTrain)
test.mx <- sparse.model.matrix(Y ~ ., mobileInsuranceTest)
dtrain <- xgb.DMatrix(train.mx, label = mobileInsuranceTrain[,"Y"])
dtest <- xgb.DMatrix(test.mx, label = mobileInsuranceTest[,"Y"])
train.gdbt <- xgb.train(params = list(objective = "binary:logistic",
#num_class = 2,
#eval_metric = "mlogloss",
eta = 0.3,
max_depth = 5,
subsample = 1,
colsample_bytree = 0.5),
data = dtrain,
nrounds = 70,
watchlist = list(train = dtrain, test = dtest))
# Generate predictions on test dataset
preds <- predict(train.gdbt, newdata = dtest)
labels <- mobileInsuranceTest[,"Y"]
# Compute AUC on the test set
cvAUC::AUC(predictions = preds, labels = labels)
#XGBOOST -> 74.6 %
#Important features
library(Ckmeans.1d.dp)
names <- dimnames(data.matrix(mobileInsuranceTrain[,-1]))[[2]]
importance_matrix <- xgb.importance(names, model = train.gdbt)
xgb.plot.importance(importance_matrix[1:50,])
###
#Neural Net
library(neuralnet)
nn <- neuralnet(Y~X48+X60+X23+X2+X31,mobileInsuranceTrain,
hidden=c(3,5),linear.output=FALSE)
#############################################################################
mobileInsurance$X0 <- as.factor(mobileInsurance$X0)
mobileInsurance$X1 <- as.factor(mobileInsurance$X1)
mobileInsurance$X2 <- as.factor(mobileInsurance$X2)
mobileInsurance$X3 <- as.factor(mobileInsurance$X3)
mobileInsurance$X4 <- as.factor(mobileInsurance$X4)
mobileInsurance$X5 <- as.factor(mobileInsurance$X5)
mobileInsurance$X6 <- as.factor(mobileInsurance$X6)
mobileInsurance$X7 <- as.factor(mobileInsurance$X7)
mobileInsurance$X9 <- as.factor(mobileInsurance$X8)
mobileInsurance$X10 <- as.factor(mobileInsurance$X10)
mobileInsurance$X11 <- as.factor(mobileInsurance$X11)
mobileInsurance$X12 <- as.factor(mobileInsurance$X12)
mobileInsurance$X13 <- as.factor(mobileInsurance$X13)
mobileInsurance$X14 <- as.factor(mobileInsurance$X15)
mobileInsurance$X16 <- as.factor(mobileInsurance$X16)
mobileInsurance$X17 <- as.factor(mobileInsurance$X17)
mobileInsurance$X18 <- as.factor(mobileInsurance$X18)
mobileInsurance$X19 <- as.factor(mobileInsurance$X19)
mobileInsurance$X20 <- as.factor(mobileInsurance$X20)
mobileInsurance$X21 <- as.factor(mobileInsurance$X21)
mobileInsurance$X22 <- as.factor(mobileInsurance$X22)
mobileInsurance$X23 <- as.factor(mobileInsurance$X23)
mobileInsurance$X24 <- as.factor(mobileInsurance$X24)
mobileInsurance$X25 <- as.factor(mobileInsurance$X25)
mobileInsurance$X26 <- as.factor(mobileInsurance$X26)
mobileInsurance$X27 <- as.factor(mobileInsurance$X27)
mobileInsurance$X28 <- as.factor(mobileInsurance$X28)
mobileInsurance$X29 <- as.factor(mobileInsurance$X29)
mobileInsurance$X30 <- as.factor(mobileInsurance$X30)
mobileInsurance$X31 <- as.factor(mobileInsurance$X31)
mobileInsurance$X32 <- as.factor(mobileInsurance$X32)
mobileInsurance$X33 <- as.factor(mobileInsurance$X33)
mobileInsurance$X34 <- as.factor(mobileInsurance$X34)
mobileInsurance$X35 <- as.factor(mobileInsurance$X35)
mobileInsurance$X36 <- as.factor(mobileInsurance$X36)
mobileInsurance$X37 <- as.factor(mobileInsurance$X37)
mobileInsurance$X38 <- as.factor(mobileInsurance$X38)
mobileInsurance$X39 <- as.factor(mobileInsurance$X39)
mobileInsurance$X40 <- as.factor(mobileInsurance$X40)
mobileInsurance$X41 <- as.factor(mobileInsurance$X41)
mobileInsurance$X42 <- as.factor(mobileInsurance$X42)
mobileInsurance$X43 <- as.factor(mobileInsurance$X43)
mobileInsurance$X44<- as.factor(mobileInsurance$X44)
mobileInsurance$X45 <- as.factor(mobileInsurance$X45)
mobileInsurance$X46 <- as.factor(mobileInsurance$X46)
mobileInsurance$X47 <- as.factor(mobileInsurance$X47)
mobileInsurance$X48 <- as.factor(mobileInsurance$X48)
mobileInsurance$X49 <- as.factor(mobileInsurance$X49)
mobileInsurance$X50 <- as.factor(mobileInsurance$X50)
mobileInsurance$X51 <- as.factor(mobileInsurance$X51)
mobileInsurance$X52 <- as.factor(mobileInsurance$X52)
mobileInsurance$X53 <- as.factor(mobileInsurance$X53)
mobileInsurance$X54 <- as.factor(mobileInsurance$X54)
mobileInsurance$X55 <- as.factor(mobileInsurance$X55)
mobileInsurance$X56 <- as.factor(mobileInsurance$X56)
mobileInsurance$X57 <- as.factor(mobileInsurance$X57)
mobileInsurance$X58 <- as.factor(mobileInsurance$X58)
mobileInsurance$X59 <- as.factor(mobileInsurance$X59)
mobileInsurance$X60 <- as.factor(mobileInsurance$X60)
mobileInsurance$X61 <- as.factor(mobileInsurance$X61)
mobileInsurance$X62 <- as.factor(mobileInsurance$X62)
mobileInsurance$X63 <- as.factor(mobileInsurance$X63)
mobileInsurance$X64 <- as.factor(mobileInsurance$X64)
mobileInsurance$X65 <- as.factor(mobileInsurance$X65)
mobileInsurance$X66 <- as.factor(mobileInsurance$X66)
mobileInsurance$X67 <- as.factor(mobileInsurance$X67)
mobileInsurance$X68 <- as.factor(mobileInsurance$X68)
mobileInsurance$X69 <- as.factor(mobileInsurance$X69)
mobileInsurance$X70 <- as.factor(mobileInsurance$X70)
mobileInsurance$X71 <- as.factor(mobileInsurance$X71)
mobileInsurance$X72 <- as.factor(mobileInsurance$X72)
mobileInsurance$X73 <- as.factor(mobileInsurance$X73)
mobileInsurance$X74 <- as.factor(mobileInsurance$X74)
mobileInsurance$X75 <- as.factor(mobileInsurance$X75)
mobileInsurance$X76 <- as.factor(mobileInsurance$X76)
mobileInsurance$X77 <- as.factor(mobileInsurance$X77)
mobileInsurance$X78 <- as.factor(mobileInsurance$X78)
mobileInsurance$X79 <- as.factor(mobileInsurance$X79)
mobileInsurance$X80 <- as.factor(mobileInsurance$X80)
mobileInsurance$X81 <- as.factor(mobileInsurance$X81)
mobileInsurance$X81 <- as.factor(mobileInsurance$X82)
mobileInsurance$X83 <- as.factor(mobileInsurance$X83)
mobileInsurance$X84 <- as.factor(mobileInsurance$X84)
mobileInsurance$Y <- as.factor(mobileInsurance$Y) |
ecd1ef69ec51749232306602ebccf8bc92ac914e | ca4106f2732e98c5cd7c23f07c1e3086bced8c03 | /man/blomar.Rd | ecd662b0300bac7ab202f197c807f1020b52a2bc | [] | no_license | cran/timsac | e1c8e170e55de5d065bc6b36842d6ed1902c5c68 | 160f1c2693f6c1e19dfd143ce0bf769b0d4f9f4b | refs/heads/master | 2023-07-19T17:52:59.522078 | 2023-07-12T22:50:02 | 2023-07-13T03:38:55 | 17,700,506 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,169 | rd | blomar.Rd | \name{blomar}
\alias{blomar}
\alias{print.blomar}
\title{Bayesian Method of Locally Stationary Multivariate AR Model Fitting}
\description{
Locally fit multivariate autoregressive models to non-stationary time series
by a Bayesian procedure.
}
\usage{
blomar(y, max.order = NULL, span)
}
\arguments{
\item{y}{A multivariate time series.}
\item{max.order}{upper limit of the order of AR model, less than or equal to
\eqn{n/2d} where \eqn{n} is the length and \eqn{d} is the dimension of the
time series \code{y}.
Default is \eqn{min(2 \sqrt{n}, n/2d)}{min(2*sqrt(n), n/2d)}.}
\item{span}{length of basic local span. Let \eqn{m} denote \code{max.order},
if \eqn{n-m-1} is less than or equal to \code{span} or \eqn{n-m-1-}\code{span}
is less than \eqn{2md}, \code{span} is \eqn{n-m}.}
}
\value{
\item{mean}{mean.}
\item{var}{variance.}
\item{bweight}{Bayesian weight.}
\item{aic}{AIC with respect to the present data.}
\item{arcoef}{AR coefficients. \code{arcoef[[m]][i,j,k]} shows the value of
\eqn{i}-th row, \eqn{j}-th column, \eqn{k}-th order of \eqn{m}-th model.}
\item{v}{innovation variance.}
\item{eaic}{equivalent AIC of Bayesian model.}
\item{init}{start point of the data fitted to the current model.}
\item{end}{end point of the data fitted to the current model.}
}
\details{
The basic AR model is given by
\deqn{y(t) = A(1)y(t-1) + A(2)y(t-2) + \ldots + A(p)y(t-p) + u(t),}
where \eqn{p} is order of the AR model and \eqn{u(t)} is innovation variance
\code{v}.
}
\references{
G.Kitagawa and H.Akaike (1978)
A Procedure for the Modeling of Non-stationary Time Series.
Ann. Inst. Statist. Math., 30, B, 351--363.
H.Akaike (1978)
A Bayesian Extension of The Minimum AIC Procedure of Autoregressive Model
Fitting. Research Memo. NO.126. The institute of Statistical Mathematics.
H.Akaike, G.Kitagawa, E.Arahata and F.Tada (1979)
\emph{Computer Science Monograph, No.11, Timsac78.}
The Institute of Statistical Mathematics.
}
\examples{
data(Amerikamaru)
blomar(Amerikamaru, max.order = 10, span = 300)
}
\keyword{ts}
|
e6539013e14a308d9e06df9955b8c15711b4909c | 8ee9e15003aa31d0533d708e3faf3235bc2922ee | /AnaliseDeRiscoDeCredito.r | 199d11b1256e466f7abe3bca4cebdc9b08ce6cf4 | [] | no_license | gabrielalvesfortunato/ProjetoAnaliseDeRiscoDeCredito | 53049ac2172fb69e0fe5c23212cef440f54867a5 | 72b06dbfb29e6b5f025b162c985366a31981f590 | refs/heads/master | 2021-03-28T13:04:38.907677 | 2020-03-17T02:57:19 | 2020-03-17T02:57:19 | 247,865,337 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,442 | r | AnaliseDeRiscoDeCredito.r | ####### PROJETO DE ANALISE DE RISCO DE CREDITO #########
# Carregando pacotes
library(ggplot2)
library(readr)
# Obtendo os dados
dadosCredito <- read_csv("credit_dataset.csv")
View(dadosCredito)
str(dadosCredito)
# Convertendo as variaveis para fatores
toFactors <- function(dataframe, variaveis) {
for(variavel in variaveis) {
dataframe[[variavel]] <- as.factor(dataframe[[variavel]])
}
return(dataframe)
}
variaveisFatores <- c("credit.rating", "account.balance", "previous.credit.payment.status",
"credit.purpose", "savings", "employment.duration", "installment.rate",
"marital.status", "guarantor", "residence.duration", "current.assets",
"other.credits", "apartment.type", "bank.credits", "occupation", "dependents",
"telephone", "foreign.worker")
dadosCredito <- toFactors(dadosCredito, variaveisFatores)
str(dadosCredito)
#### ANALISE EXPLORATORIA DE DADOS ####
# Analise da duraçao em meses do credito
estatisticasDuracao <- summary(dadosCredito$credit.duration.months)
amplitudeDuracao <- (max(dadosCredito$credit.duration.months) -
min(dadosCredito$credit.duration.months))
desvioPadraoDuracao <- sd(dadosCredito$credit.duration.months)
duracaoMinima <- min(dadosCredito$credit.duration.months)
duracaoMaxima <- max(dadosCredito$credit.duration.months)
duracaoMedia <- mean(dadosCredito$credit.duration.months)
estatisticasDuracao
amplitudeDuracao
desvioPadraoDuracao
duracaoMaxima
duracaoMinima
duracaoMedia
# grafico de barras da variavel duraçao
?ggplot
ggplot(dadosCredito, aes(x = dadosCredito$credit.duration.months)) +
geom_bar(colour = "red") +
xlab("Duraçao do Credito em Meses") +
ylab("Contagem")
# boxplot da variavel duracao
boxplot(dadosCredito$credit.duration.months, main = "Duração do credito em Meses")
# Analise da Quantia de credito solicitado
estatisticasQuantCredito <- summary(dadosCredito$credit.amount)
amplitudeQuantCredito <- (max(dadosCredito$credit.amount) -
min(dadosCredito$credit.amount))
desvioPadraoQuantCredito <- sd(dadosCredito$credit.amount)
quantidadeMinimaDeCredito <- min(dadosCredito$credit.amount)
quantidadeMaximaDeCredito <- max(dadosCredito$credit.amount)
quantidadeMediaDeCredito <- mean(dadosCredito$credit.amount)
estatisticasQuantCredito
amplitudeQuantCredito
desvioPadraoQuantCredito
quantidadeMinimaDeCredito
quantidadeMaximaDeCredito
quantidadeMediaDeCredito
# Histograma da variavel quantidade de credito
ggplot(dadosCredito, aes(x = dadosCredito$credit.amount)) +
geom_histogram(colour = "black", binwidth = 1000) +
xlab("Quantidade de credito solicitado") +
ylab("Frequência")
# Box plot da variavel quantidade de credito
boxplot(dadosCredito$credit.amount, main = "BoxPlot Quantidade de Crédito Solicitado")
# Analise geral da Idade dos requerintes
estatisticasIdade <- summary(dadosCredito$age)
amplitudeIdade <- (max(dadosCredito$age) -
min(dadosCredito$age))
desvioPadraoIdade <- sd(dadosCredito$age)
idadeMinima <- min(dadosCredito$age)
idadeMaxima <- max(dadosCredito$age)
idadeMedia<- mean(dadosCredito$age)
estatisticasIdade
amplitudeIdade
desvioPadraoIdade
idadeMinima
idadeMedia
idadeMaxima
# BarPlot da varivel Idade
ggplot(dadosCredito, aes(x = dadosCredito$age)) +
geom_bar(colour = "red") +
xlab("Idade do Solicitante") +
ylab("Frequencia")
# Boxplot da variavel Idade
boxplot(dadosCredito$age, main = "BoxPlot da Variavel Idade")
# CREDITO SOLICITADO VS IDADE
ggplot(dadosCredito, aes(x = dadosCredito$age, y = dadosCredito$credit.amount)) +
geom_point(shape = 1, aes(color = age)) +
xlab("Idade") +
ylab("Quantia de Credito") +
geom_smooth(method = "lm", color = "red")
# CREDITO SOLICITADO VS DURAÇAO DO CREDITO
ggplot(dadosCredito, aes(x = dadosCredito$credit.amount, y = dadosCredito$credit.duration.months)) +
geom_point(shape = 1, aes(color = credit.duration.months)) +
xlab("Quantia de Credito") +
ylab("Duração do Credito (em meses)") +
geom_smooth(method = "lm", color = "red")
# Normalizando as variavies numericas
variaveisNumericas <- c("age", "credit.amount", "credit.duration.months")
scale.features <- function(df, variaveis) {
for(variavel in variaveis) {
df[[variavel]] <- scale(df[[variavel]], center = T, scale = T)
}
return(df)
}
# Normalizando os dados
dadosCredito <- scale.features(dadosCredito, variaveisNumericas)
View(dadosCredito)
####### DIVIDINDO EM DADOS DE TREINO E DADOS DE TESTE #######
split <- function(dataFrame, seed = NULL) {
if(!is.null(seed)) set.seed(seed)
index <- 1:nrow(dadosCredito)
trainIndex <- sample(index, trunc(length(index) * 0.7))
dadosTreino <- dataFrame[trainIndex, ]
dadosTeste <- dataFrame[-trainIndex, ]
list(trainSet = dadosTreino, testSet = dadosTeste)
}
# Gerando dados de treino e de teste
splits <- split(dadosCredito)
dadosTreino <- splits$trainSet
dadosTeste <- splits$testSet
View(splits)
####### FEATURE SELECETION COM RANDOM FOREST #######
library(randomForest)
featureSelection_rf <- randomForest( credit.rating ~.,
data = dadosCredito,
ntree = 100,
nodesize = 10,
importance = T)
varImpPlot(featureSelection_rf)
# A partir da analise de importancia de variaveis foi possivel concluir
# que as variavies mais relevantes em primeiro momento sao:
# account.balance, credit.duration.months, previous.credit.payment.status
# credit.amount, savings, age
#### CRIANDO OS MODELOS DE CLASSIFICAÇAO ####
# Criando o modelo de random forest
modeloRandomForest_v1 <- randomForest( credit.rating ~ .
- residence.duration
- dependents
- installment.rate
- foreign.worker
- telephone
- employment.duration
- marital.status
- apartment.type,
data = dadosTreino,
ntree = 100,
nodesize = 10)
# Imprimindo o resultado
print(modeloRandomForest_v1)
# Foi possivel constar um error rate de 23.86 e uma accuracia de 76,14% de precisao
accuracia <- 100 - 23.86
accuracia
#### GERANDO OS SCORES ####
# Fazendo previsoes
previsoes <- data.frame(observado = dadosTeste$credit.rating,
previsto = predict(modeloRandomForest_v1, newdata = dadosTeste)
)
# Visualizando as previsoes
View(previsoes)
### GERANDO CURVA ROC PARA AVALIAÇAO DO MODELO ###
library(ROCR)
# gerando as classes de dados
class1 <- predict(modeloRandomForest_v1, newdata = dadosTeste, type = "prob")
class2 <- dadosTeste$credit.rating
# Gerando a curva ROC
prediction <- prediction(class1[, 2], class2)
performance <- performance(prediction, "tpr", "fpr")
plot(performance, col = rainbow(10))
|
065fd39818de002ded53609e78b90cccd91d74cd | f2778192f431b0c3dfcd070cd12033c4831ac485 | /InnovaAnalyserSelection.R | 4104bb1cd42730f2d7b006fec8571db56a89bad9 | [] | no_license | MathotM/ValidationCode | 4e7c9b7d50ecb69107ad35a85e6b772e3bcbd55f | d0033ddebd40a8be3ac76f2f18b9c0e1863850b5 | refs/heads/master | 2020-03-20T00:41:57.498111 | 2018-06-14T14:37:52 | 2018-06-14T14:37:52 | 137,053,044 | 0 | 1 | null | 2018-06-14T14:37:53 | 2018-06-12T10:02:38 | R | UTF-8 | R | false | false | 1,634 | r | InnovaAnalyserSelection.R | # # function for selection in Manure Data and selection on time basis
InnovaAnalyserSelectionFct<-function(
PathRawDataInnovaAnalyser=PathRawDataInnovaAnalyser,
InnovaAnalyserCompiledDataName=InnovaAnalyserCompiledDataName, # c("InnovaAnalyserCompiled.csv")
StartData=StartData,
EndData=EndData,
TimeZone=TimeZone
){
library(lubridate)
print("Opening of data compiled")
InnovaAnalyserSelectedData.df<-try(read.csv(file=paste(PathRawDataInnovaAnalyser,InnovaAnalyserCompiledDataName,sep=c("/")),sep=c(";"),dec=c(","),header=TRUE,stringsAsFactors = FALSE),silent = TRUE)
#Time Setting
if(class(InnovaAnalyserSelectedData.df)!=c("try-error")){InnovaAnalyserSelectedData.df$Time<-as_datetime(ymd_hms(InnovaAnalyserSelectedData.df$Time,tz=TimeZone),tz=TimeZone)}
#Time boundaries
if(class(StartData)==c("character")){StartData<-as_datetime(ymd_hms(StartData,tz=TimeZone),tz=TimeZone)}
if(class(EndData)==c("character")){EndData<-as_datetime(ymd_hms(EndData,tz=TimeZone),tz=TimeZone)}
print("Format Time of data compiled")
#Selection on time
if(class(InnovaAnalyserSelectedData.df)!=c("try-error")){InnovaAnalyserSelectedData.df<-InnovaAnalyserSelectedData.df[InnovaAnalyserSelectedData.df$Time>StartData&InnovaAnalyserSelectedData.df$Time<EndData,]}
#Replacement of error DF by 0
if(class(InnovaAnalyserSelectedData.df)==c("try-error")){InnovaAnalyserSelectedData.df<-NA}
return(InnovaAnalyserSelectedData.df)
}
|
4e45986d54f6f9a5a61d68bdc59615745d69f2e4 | d03e45ed3bbe167143c6919120cb30a7361c1645 | /tests/testthat/test-names.R | 7514ce337714b79ea7cb9ef186c25292d7ed0922 | [
"MIT"
] | permissive | thomasp85/farver | 5a56d7937a295b1b10ef756f4f589e02e8001c1f | 9bc85a6fd839dc6d2d919c772feee40740afe53d | refs/heads/main | 2022-11-13T13:24:58.345474 | 2022-07-06T17:48:32 | 2022-07-06T17:48:32 | 125,286,884 | 106 | 13 | NOASSERTION | 2022-09-20T06:00:02 | 2018-03-14T23:32:11 | R | UTF-8 | R | false | false | 757 | r | test-names.R | names <- c("#404040", "#8FBC8F", "#FFFFE0", "#7AC5CD", "#66CDAA", "#1E90FF",
"#CDC0B0", "#CD0000", "#7A67EE", "#FFFACD")
cols <- decode_colour(names)
cols_named <- cols
rownames(cols_named) <- names
codes_named <- names
names(codes_named) <- names
test_that("names gets transfered", {
expect_equal(names(encode_colour(cols_named)), names)
expect_null(names(encode_colour(cols)))
expect_equal(rownames(decode_colour(codes_named)), names)
expect_null(rownames(decode_colour(names)))
expect_equal(rownames(convert_colour(cols_named, 'rgb', 'lab')), names)
expect_null(rownames(convert_colour(cols, 'rgb', 'lab')))
col_dist <- compare_colour(cols, cols_named, 'rgb')
expect_equal(dimnames(col_dist), list(NULL, names))
})
|
562b3df2d298a2b248229e0b0650bc4799d6a442 | d53ad1327c7481e52f9cfc4644b836a6754f89a0 | /R/forest_rfe.R | 45533ba27b02d7b3d2866d7443c860b07a95b2a1 | [] | no_license | talegari/forager | 5aa152f65c4596c7d1161694a8aab40225950973 | f3963444886afac85d252d6c0b5455426361a7f3 | refs/heads/master | 2020-03-19T20:40:45.641914 | 2019-03-09T19:25:26 | 2019-03-09T19:25:26 | 136,911,591 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 11,519 | r | forest_rfe.R | #' @name forest_rfe
#' @title lightweight implementation of RFE using ranger
#' @description For datasets with large number of predictors, this
#' implementation has these modifications to regular recursive feature
#' elimination procedure:
#'
#' \itemize{
#'
#' \item Use oob prediction error as a proxy to model performance.
#'
#' \item Build forests \code{\link[ranger]{ranger}} on samples of data and
#' average variable importance and oob prediction error.
#'
#' }
#'
#' For a comprehensive RFE procedure with resampling, use
#' \code{\link[caret]{rfe}}
#' @references \itemize{
#'
#' \item
#' \href{https://topepo.github.io/caret/recursive-feature-elimination.html}{RFE
#' using caret}
#'
#' \item
#' \href{http://scikit-learn.org/stable/modules/generated/sklearn.feature_selection.RFE.html}{RFE
#' using scikit learn}
#'
#' }
#' @param dataset (object inheriting data.frame class) A dataframe
#' @param responseVarName (string) Name of the response variable
#' @param sizes (integer vector) Vector of number of variables. When missing, sizes will be sequence of nc/2^i where the sequnce ranges from nc(number of columns) to 2.
#' @param sampleprop (A real number between 0 and 1 or a vector) Proportion of observations. If not a single number and sizes is specified, this vector should have same length as sizes.
#' per sample
#' @param nsamples (positive integer or a vector) Number of samples. If not a single number and sizes is specified, this vector should have same length as sizes.
#' @param seed (positive integer) Seed
#' @param ... Arguments to be passed to \code{\link[ranger]{ranger}}
#' @return A list with:
#'
#' \itemize{
#'
#' \item (rfeTable) A tibble with three columns:
#'
#' \itemize{
#'
#' \item size: Number of variables used \item ooberror: Out-of-box error of
#' the forest
#'
#' \item varimp: A list-column where each item is a data.frame with
#' variable names and importance
#'
#' }
#'
#' \item (oobchangeTable) A dataframe with five columns sorted by absolute value of the variable 'oepc'.
#'
#' \itemize{
#'
#' \item variable: Name of the variable that got removed at some stage
#'
#' \item size: Number of variables that were considered before removing the variable
#'
#' \item reducedSize: Number of the variables at next stage. Gives an idea of how many variables were reduced at that stage.
#'
#' \item oepc: OOB error percentage change
#'
#' \item importance: Importance of the variable at the stage when the variable was decided to be removed.
#'
#' }
#' }
#' @examples
#' temp <- forest_rfe(iris, "Species")
#' temp
#'
#' temp <- forest_rfe(iris
#' , "Species"
#' , sizes = c(4,2)
#' , sampleprop = c(0.2, 0.3)
#' , nsamples = c(20, 30)
#' )
#' temp
#'
#' temp <- forest_rfe(iris
#' , "Species"
#' , sizes = c(4,2)
#' , sampleprop = 0.1
#' , nsamples = c(20, 30)
#' )
#' temp
#'
#' temp <- forest_rfe(iris
#' , "Species"
#' , sizes = c(4,2)
#' , sampleprop = c(0.2, 0.3)
#' , nsamples = 10
#' )
#' temp
#'
#' temp <- forest_rfe(iris
#' , "Species"
#' , sizes = c(4,2)
#' , sampleprop = c(0.2, 0.3)
#' , nsamples = 10
#' , mtry = list(3, 2)
#' , num.trees = list(500, 1000)
#' , case.weights = replicate(2, runif(150), simplify = FALSE)
#' )
#' temp
#'
#' @export
forest_rfe <- function(dataset
, responseVarName
, sizes
, sampleprop = 0.2
, nsamples = 10
, seed = 1
, ...
){
# assertions ----
assertthat::assert_that(inherits(dataset, "data.frame"))
assertthat::assert_that(!is.null(colnames(dataset)))
assertthat::assert_that(assertthat::is.string(responseVarName))
assertthat::assert_that(responseVarName %in% colnames(dataset))
nc <- ncol(dataset)
if(!missing(sizes)){
assertthat::assert_that(all(sapply(sizes, assertthat::is.count)))
assertthat::assert_that(length(sizes) == dplyr::n_distinct(sizes))
sizes <- sort(sizes, decreasing = TRUE)
assertthat::assert_that(all(sizes <= (nc - 1)))
assertthat::assert_that((nc - 1) %in% sizes)
assertthat::assert_that(length(sizes) == length(sampleprop) ||
length(sampleprop) == 1
)
if(length(sampleprop) == 1){
sampleprop <- rep(sampleprop, length(sizes))
}
assertthat::assert_that(length(sizes) == length(nsamples) ||
length(nsamples) == 1
)
if(length(nsamples) == 1){
nsamples <- rep(nsamples, length(sizes))
}
assertthat::assert_that(
all(sapply(sampleprop, function(x) dplyr::between(x, 1e-8, 1)))
)
assertthat::assert_that(
all(sapply(nsamples, function(x) assertthat::is.count(x)))
)
} else {
sizes <- unique(ceiling(sapply(0:floor(log(nc - 1, 2)), function(x) (nc - 1)/2^x)))
assertthat::assert_that(dplyr::between(sampleprop, 1e-8, 1))
assertthat::assert_that(assertthat::assert_that(assertthat::is.count(nsamples)))
sampleprop <- rep(sampleprop, length(sizes))
nsamples <- rep(nsamples, length(sizes))
}
arguments <- list(...)
if(length(arguments) > 0){
assertthat::assert_that(
all(sapply(arguments, function(x) inherits(x, "list")))
)
arguments <- lapply(arguments, function(x) rep_len(x, length(sizes)))
}
assertthat::assert_that(assertthat::assert_that(assertthat::is.count(seed)))
# setup ----
nr <- nrow(dataset)
dataset <- data.table::copy(dataset)
data.table::setDT(dataset)
predictorNames <- setdiff(colnames(dataset), responseVarName)
if(is.null(arguments[["importance"]])){
arguments[["importance"]] <- as.list(rep("impurity", length(sizes)))
}
if(is.null(arguments[["write.forest"]])){
arguments[["write.forest"]] <- as.list(rep(FALSE, length(sizes)))
}
# given a resample index, extractImp return the vector of variable importance and oobError
extractImp <- function(resampleIndex, iter){
arguments_local <- lapply(arguments, function(x) `[[`(x, iter))
resampledData <- dataset[resampleIndex, ]
if(!is.null(arguments_local[["case.weights"]])){
arguments_local[["case.weights"]] <-
arguments_local[["case.weights"]][resampleIndex]
model <- do.call(
ranger::ranger
, c(list(data = resampledData
, dependent.variable.name = responseVarName
)
, arguments_local
)
)
} else {
model <- do.call(
ranger::ranger
, c(list(data = resampledData
, dependent.variable.name = responseVarName
)
, arguments_local
)
)
}
return(list(model[["variable.importance"]], model[["prediction.error"]]))
}
# All topvars for first iteration
topVarsList <- vector("list", length = length(sizes))
names(topVarsList) <- as.character(sizes)
topVarsList[[as.character(sizes[1])]] <-
data.frame(variable = setdiff(colnames(dataset), responseVarName)
, value = 1
)
oobErrorsList <- numeric(length = length(sizes))
names(oobErrorsList) <- as.character(sizes)
# loop over sizes ----
for(asizeIndex in 1:length(sizes)){
set.seed(seed)
seeds <- sample.int(1e6, nsamples[asizeIndex])
# choose only the required columns
removeVars <-
setdiff(predictorNames
, topVarsList[[as.character(sizes[max(1, asizeIndex - 1)])]][["variable"]][1:sizes[asizeIndex]]
)
if(length(removeVars) > 0){
suppressWarnings(dataset[, c(removeVars) := NULL])
}
imps <- vector("list", nsamples[asizeIndex])
oobErrors <- numeric(length = nsamples[asizeIndex])
# compute importance over bootstraps
for(i in 1:(nsamples[asizeIndex])){
set.seed(seeds[[i]])
extracted <- extractImp(sample.int(nr, floor(sampleprop[asizeIndex] * nr))
, asizeIndex
)
imps[[i]] <- extracted[[1]]
oobErrors <- extracted[[2]]
}
# get overall importance
imps <- lapply(imps, function(x) data.frame(variable = names(x), value = x))
merger <- function(x, y){
suppressWarnings(
merge(x
, y
, by = "variable"
, all = TRUE
, incomparables = NA
)
)
}
# compute average of importances over bootstraps and create a dataframe
varImp <- Reduce(merger, imps)
varImpSummed <- sort(matrixStats::rowMedians(as.matrix(varImp[, -1]), na.rm = TRUE)
, decreasing = TRUE
)
topVars <- data.frame(
variable = as.character(varImp[,1][order(varImpSummed ,decreasing = TRUE)])
, importance = sort(varImpSummed, decreasing = TRUE)
)
topVarsList[[as.character(sizes[asizeIndex])]] <- topVars
oobErrorsList[as.character(sizes[asizeIndex])] <- stats::median(oobErrors, na.rm = TRUE)
message("size: "
, sizes[asizeIndex]
, " , "
, "oobError: "
, round(oobErrorsList[as.character(sizes[asizeIndex])], 2)
)
}
# return ----
rfeTable <- tibble::tibble(size = as.integer(sizes)
, ooberror = oobErrorsList
, varimp = topVarsList
, sampleprop = sampleprop
, nsamples = as.integer(nsamples)
)
rfeTableu <- rfeTable[nrow(rfeTable):1, ]
varRemoved <- function(df1, df2){
setdiff(df1[["variable"]], df2[["variable"]])
}
computeOobErrorChange <- function(i){
variables <- varRemoved(rfeTableu[["varimp"]][[i + 1]], rfeTableu[["varimp"]][[i]])
variable <- NULL
data.frame(
variable = variables
, size = rep_len(rfeTableu[["size"]][(i + 1)], length(variables))
, reducedSize = rep_len(rfeTableu[["size"]][i], length(variables))
, oepc = (rfeTableu[["ooberror"]][i] - rfeTableu[["ooberror"]][i + 1]) %>%
magrittr::divide_by((rfeTableu[["ooberror"]][i + 1] + 1e-8))
, importance = subset(rfeTableu[["varimp"]][[i + 1]], variable %in% variables)[["importance"]]
)
}
oobchangeTable <- data.table::rbindlist(lapply(1:(nrow(rfeTable) - 1)
, computeOobErrorChange
)
)
return(list(rfeTable = rfeTableu
, oobchangeTable = oobchangeTable[order(abs(oobchangeTable[["oepc"]])
, oobchangeTable[["importance"]]
, decreasing = TRUE
)
, ]
)
)
}
|
dffec282eec7993544a6128eeee9f27a2930d7e7 | b57fd22ab01e94b1d5b95b236b79812320da2eca | /R/loglik_R.R | 8b571acc99bfcbe3f9ac32b6c60863c6d6b6075e | [] | no_license | linogaliana/gravity | 3da73fdd69101a1f65199c8ae8cc945316025a6e | a51bfee1d5f5d1eab83b929bb474604581af9e38 | refs/heads/master | 2021-05-23T12:06:01.968282 | 2020-04-13T21:05:26 | 2020-04-13T21:05:26 | 253,277,885 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,109 | r | loglik_R.R | loglik_ZIP_R <- function(params, X, Z, Y,
weights = NULL,
offsetx = NULL,
offsetz = NULL,
link = c("probit","logit")) {
link <- match.arg(link)
if (missing(weights)) weights <- rep(1,nrow(X))
if (missing(offsetx)) offsetx <- rep(0,nrow(X))
if (missing(offsetz)) offsetz <- rep(0,nrow(Z))
kx <- ncol(X)
kz <- ncol(Z)
linkinv <- make.link(link)$linkinv
mu <- as.vector(exp(X %*% params[1:kx] + offsetx))
phi <- as.vector(linkinv(Z %*% params[(kx + 1):(kx +
kz)] + offsetz))
loglik0 <- log(phi + exp(log(1 - phi) - mu))
loglik1 <- log(1 - phi) + dpois(Y, lambda = mu, log = TRUE)
Y0 <- (Y==0)
loglik <- sum(weights[Y0] * loglik0[Y0]) + sum(weights[!Y0] *
loglik1[!Y0])
loglik
}
loglik_ZINB_R <- function(params, X, Z, Y,
weights = NULL,
offsetx = NULL,
offsetz = NULL,
link = c("probit","logit")){
if (missing(weights)) weights <- rep(1,nrow(X))
if (missing(offsetx)) offsetx <- rep(0,nrow(X))
if (missing(offsetz)) offsetz <- rep(0,nrow(Z))
link <- match.arg(link)
kx <- ncol(X)
kz <- ncol(Z)
linkinv <- make.link(link)$linkinv
mu <- as.vector(exp(X %*% params[1:kx] + offsetx))
phi <- as.vector(linkinv(Z %*% params[(kx + 1):(kx +
kz)] + offsetz))
theta <- exp(params[(kx + kz) + 1])
loglik0 <- log(phi + exp(log(1 - phi) + suppressWarnings(dnbinom(0,
size = theta, mu = mu, log = TRUE))))
loglik1 <- log(1 - phi) + suppressWarnings(dnbinom(Y,
size = theta, mu = mu, log = TRUE))
Y0 <- (Y==0)
loglik <- sum(weights[Y0] * loglik0[Y0]) + sum(weights[!Y0] *
loglik1[!Y0])
loglik
}
|
fd6afbf970b4a2780bb6fdda5870bb8929f93f9d | d6f8b486ca99ed3c4a9bfe52acf11b56ee08774f | /final/log_fold_change_DESeq.R | 69dfff66d4c06e4e81ace85a98ba21019dcdf64e | [] | no_license | je-yang/crispr-deeplearning | 1c4686b8372f42a3dc7b663b5cc6bca16e4b7fc9 | 1d3534478d419acc2019535eaaec24c141ed762c | refs/heads/master | 2020-03-22T22:38:24.738209 | 2018-09-10T18:32:49 | 2018-09-10T18:32:49 | 140,762,561 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,125 | r | log_fold_change_DESeq.R | source("https://bioconductor.org/biocLite.R")
biocLite("DESeq2")
library(DESeq2)
############################
#computing log2 fold changes with DESeq2
file = "/Users/JYang/Desktop/Stanford/20140519_ricintilingFinal_readcounts.csv"
data = read.csv(file, header = TRUE, stringsAsFactors=FALSE, row.names = "name")
#get columns of interesultst
#dCas9 VP64
listnames1 <- colnames(data)[grepl('dCas9.VP64', colnames(data))]
index1 <- match(listnames1, names(data))
index1 <- sort(c(index1))
dCas9_CP64 <- data[ , index1]
#scFV VP64
listnames2 <- colnames(data)[grepl('scFV.VP64', colnames(data))]
index2 <- match(listnames2, names(data))
index2 <- sort(c(index2))
scFV_VP64 <- data[ , index2]
#create combined dataframe
dataset <- cbind(dCas9_CP64, scFV_VP64)
#split new dataset to separate ricin and cycled
col_names <- colnames(dataset)
condition <- c()
for(i in 1:length(col_names)){
if(grepl('ricin', col_names[i])){
condition <- c(condition, 'ricin')
}
else{
condition <- c(condition, 'cycled')
}
}
condition
coldata <- cbind(col_names, condition)
counts <- dataset
coldata <- as.matrix(coldata, row.names = 'col_names')
head(counts,2)
coldata
#check that row and columns match
all(rownames(coldata) %in% colnames(counts))
all(rownames(coldata) == colnames(counts))
library("DESeq2")
DESeq_data <- DESeqDataSetFromMatrix(countData = counts,
colData = coldata,
design = ~ condition)
DESeq_data
DESeq_data1 <- DESeq(DESeq_data)
results <- result(DESeq_data1)
results
results <- result(DESeq_data1, name="condition_ricin_vs_cycled")
results <- result(DESeq_data1, contrast=c("condition","ricin","cycled"))
results
log2fc <- cbind(rownames(results), results$log2FoldChange)
colnames(log2fc) <- c('qname', "log2fc")
head(log2fc)
log2fc_df <- data.frame(log2fc)
write.csv(log2fc, file = "/Users/JYang/Desktop/Stanford/log2fc.csv")
log2fc_df <- read.csv(file = "/Users/JYang/Desktop/Stanford/log2fc.csv", header =TRUE)
#check where mean and median are distributed
mean(log2fc_df$log2fc, na.rm=TRUE)
median(log2fc_df$log2fc, na.rm=TRUE)
|
8944af21bcc9c04e4bdd2bc7de3e47c98d4fc13e | 1934db2e5de7bb67e2b461ab577b8d523a58d384 | /inst/scripts/Cap3.R | d2bb2ea101c8970f2bd91fe6db73638980413b9f | [] | no_license | cran/labstatR | 5cc96f139886925fd3ebcda5a5885462ca6b484e | 3477e6221a091a393f094c0252287e947da330ba | refs/heads/master | 2022-08-27T16:50:32.935156 | 2022-08-08T17:30:06 | 2022-08-08T17:30:06 | 17,696,966 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 8,115 | r | Cap3.R | #-*- R -*-
##########################################################
### ###
### Script tratti da `Laboratorio di statistica con R' ###
### ###
### Stefano M. Iacus & Guido Masaratto ###
### ###
### CAPITOLO 3 ###
##########################################################
require(labstatR)
### Sez 3.1 ANALISI DI DIPENDENZA: LA CONNESSIONE
x<- c("O","O","S","B","S","O","B","B","S",
"B","O","B","B","O","S")
y<- c("O","B","B","B","S","S","O","O","B",
"B","O","S","B","S","B")
x <- ordered(x, levels=c("S","B","O"))
y <- ordered(y, levels=c("S","B","O"))
table(x,y)
tab <- matrix(c(1,1,2,3,3,1,0,2,2),3,3)
tab
rownames(tab)
rownames(tab) <- c("S","B","O")
tab
colnames(tab) <- c("S","B","O")
tab
table(x,y) -> tabella
tabella
# condizionate di Y ad X
tabella[1,] # Y | X=S
tabella[2,] # Y | X=B
tabella[3,] # Y | X=O
# condizionate di X ad Y
tabella[,1] # X | Y=S
tabella[,2] # X | Y=B
tabella[,3] # X | Y=O
tabella
margin.table(tabella,1)
margin.table(tabella,2)
tabella[3,]/sum(tabella[3,])
tabella[,1]/sum(tabella[,1])
tab2 <- tabella
tab2[1,] <- tab2[1,]/sum(tab2[1,])
tab2[2,] <- tab2[2,]/sum(tab2[2,])
tab2[3,] <- tab2[3,]/sum(tab2[3,])
print(tab2,digits=2)
tab3 <- tabella
tab3[,1] <- tab3[,1]/sum(tab3[,1])
tab3[,2] <- tab3[,2]/sum(tab3[,2])
tab3[,3] <- tab3[,3]/sum(tab3[,3])
print(tab3,digits=2)
# distribuzione doppia relativa
prop.table(tabella)
# marginali relative di Y condizionate ad X
prop.table(tabella,1)
# marginali relative di X condizionate ad Y
prop.table(tabella,2)
summary(tabella)
str(summary(tabella))
chi2(x,y)
### Sez 3.1.1 RAPPRESENTAZIONI GRAFICHE DI TABELLE
x <- c("O","O","S","B","S","O","B","B","S",
"B","O","B","B","O","S")
y <- c("O","B","B","B","S","S","O","O","B",
"B","O","S","B","S","B")
x <- ordered(x, levels=c("S","B","O"))
y <- ordered(y, levels=c("S","B","O"))
table(x,y)
bubbleplot(table(x,y),main="Musica versus Pittura")
table(x,y) -> mytab
mytab
str(mytab)
dimnames(mytab)
names(dimnames(mytab))
load("dati1.rda")
bubbleplot(table(dati$Z,dati$X), joint=FALSE,
main="Z dato X")
bubbleplot(table(dati$X,dati$Z), joint=FALSE,
main="X dato Z")
bubbleplot(table(dati$Z,dati$X), main = "Z versus X")
### Sez 3.1.2 IL CASO DEL TITANIC
data(Titanic)
str(Titanic)
Titanic
apply(Titanic,c(2,3),sum)
# Dipendenza dal sesso
as.table(apply(Titanic,c(2,4),sum)) -> tabsex
tabsex
summary(tabsex)$statistic/2201
# Dipendenza dall'eta'
as.table(apply(Titanic,c(3,4),sum)) -> tabage
tabage
summary(tabage)$statistic/2201
# Dipendenza dalla classe di imbarco
as.table(apply(Titanic,c(1,4),sum)) -> tabclass
tabclass
summary(tabclass)$statistic/2201
# Effetto della classe di imbarco senza l'equipaggio
apply(Titanic,c(1,4),sum) -> tabclass
tabclass <- as.table(tabclass[1:3,])
tabclass
summary(tabclass)$statistic/sum(tabclass)
as.table(apply(Titanic,c(1,4),sum)) -> tabclass
t(tabclass)
bubbleplot(tabclass, main="Distribuzione dei sopravvissuti per classe")
### Sez 3.1.3 IL PARADOSSO DI SIMPSON (I)
x <- c( rep(TRUE,160), rep(FALSE,40), rep(TRUE,170),
rep(FALSE,30), rep(TRUE,15), rep(FALSE,85),
rep(TRUE,100), rep(FALSE,300))
y <- c( rep("A",200), rep("B",200), rep("A",100),
rep("B",400))
z <- c( rep(1,400), rep(2,500) )
simpson <- data.frame( trattamento = y, decesso = x, ospedale = z )
table(simpson)
table(simpson) -> tab
osp1 <- tab[,,1]
osp1
osp2 <- tab[,,2]
osp2
osp1[1,] <- osp1[1,]/sum(osp1[1,])
osp1[2,] <- osp1[2,]/sum(osp1[2,])
osp1 # tabella delle condizionate
osp2[1,] <- osp2[1,]/sum(osp2[1,])
osp2[2,] <- osp2[2,]/sum(osp2[2,])
osp2 # tabella delle condizionate
table(simpson) -> tab
apply(tab,c(1,2),sum)
apply(table(simpson),c(1,2),sum) -> ritab
prop.table(ritab,1)
### Sez 3.2 DIPENDENZA IN MEDIA
scricciolo <- c(19.85, 20.05, 20.25, 20.85, 20.85, 20.85,
21.05, 21.05, 21.05, 21.25, 21.45, 22.05,
22.05, 22.05, 22.25)
pettirosso <- c(21.05, 21.85, 22.05, 22.05, 22.05, 22.25,
22.45, 22.45, 22.65, 23.05, 23.05, 23.05,
23.05, 23.05, 23.25, 23.85)
boxplot(scricciolo,pettirosso, names=c("scricciolo", "pettirosso"))
summary(scricciolo)
summary(pettirosso)
sqrt(sigma2(scricciolo))
sqrt(sigma2(pettirosso))
lunghezza <- c(scricciolo, pettirosso)
plot(rep(1,length(scricciolo)),scricciolo,xaxt="n",
xlim=c(0,3),ylim=c(18,25),xlab="",ylab="lunghezza")
axis(1,c(1,2),c("scricciolo","pettirosso"))
points(rep(2,length(pettirosso)),pettirosso)
abline(h=mean(lunghezza))
points(1,mean(scricciolo),pch=4, cex=4, lwd=1.5)
points(2,mean(pettirosso),pch=4, cex=4, lwd=1.5)
ospite <- c(rep(1,length(scricciolo)), rep(2,length(pettirosso)))
eta(ospite,lunghezza)
x <- c(rep(1,10),rep(0,23), rep(2,15))
y <- c(rnorm(10,mean=7),rnorm(23,mean=19),
rnorm(15,mean=17))
eta(x,y)
y <- c(rnorm(10,mean=8),rnorm(23,mean=7),
rnorm(15,mean=6.5))
eta(x,y)
t(as.table(apply(Titanic,c(1,4),sum))) -> tabclass
tabclass
tabclass[1,]/(tabclass[1,]+tabclass[2,]) -> reg
reg
plot(reg,axes=FALSE,type="b")
abline(h=1490/2201, lty=2)
axis(1,1:length(reg),names(reg))
axis(2)
box()
n <- sum(tabclass)
md <- 1490/n
sy <- md*(1-md)
nx <- apply(tabclass,2,sum)
sm <- sum( (reg-md)^2 * nx) / n
sm/sy
eta(dati$X,dati$W)
eta(dati$Y,dati$W)
eta(dati$Z,dati$W)
### Sez 3.3.1 I GRAFICI DI DISPERSIONE E LA COVARIANZA
x <- c(2,3,4,2,5,4,5,3,4,1)
y <- c(5,4,3,6,2,5,3,5,3,3)
plot(x, y, axes=FALSE)
axis(1,c(mean(x),0:6),
c(expression(bar(x)),0:6))
axis(2,c(mean(y),0,1,2,3,5,6),
c(expression(bar(y)),0,1,2,3,5,6))
box()
lines(c(2,2,0), c(0,5,5), lty=2)
points(2,5, pch = 3, cex = 3, col = "red", lty=2)
lines(c(3.3,3.3,0), c(0,3.9,3.9), lty=3)
text(3.6, 3.9, expression((list(bar(x)[n],bar(y)[n]))))
points(mean(x), mean(y), pch = 4, cex = 3, col = "red")
COV(x,y)
cov(x,y)
x <- c(-2, -1, 0, 0, 1, 2)
y <- c(4, 1, 0, 0, 1, 4)
plot(x,y, main="parabola")
cor(x,y)
table(x,y)
summary(table(x,y))
### Sez 3.3.2 LA RETTA DI REGRESSIONE
x <- c(11,8,28,17,9,4,28,5,12,23,6,24,18,21,6,22,
27,17,27,6,29,9,3,12,9,23,5,27,20,13)
y <- c(28,21,63,42,28,2,80,19,33,60,14,58,54,67,
18,64,65,68,77, 17,95,12,1,30,34,67,20,75,59,55)
plot(x,y)
cor(x,y)
lm(y~x)
lm(y~x) -> model
plot(x,y)
abline(model, col="red", lwd=2)
text(10, 80, expression(y[i]==0.349 + 2.805*x[i]))
### Sez 3.3.3 PREVISIONI
predict(model, data.frame(x=50))
predict(model, data.frame(x=70))
predict(model, data.frame(x=c(50,70)))
predict(model, data.frame(x))
### Sez 3.3.4 BONTA' DI ADATTAMENTO
predict(model,data.frame(x)) -> yy
sum((yy-y)^2)/length(y)
var(y)*(length(y)-1)/length(y)
summary(model)
### Sez 3.3.5 EFFETTO DEGLI OUTLIER SULLA RETTA DI REGRESSIONE
x <- c(1,1,2,2)
y <- c(4,3,3,2)
cor(x,y)
lm(y~x) -> model
model
abline(model)
summary(model)
# aggiunta di un outlier
x <- c(x,8)
y <- c(y,8)
cor(x,y)
lm(y~x) -> model
model
abline(model)
summary(model)
### Sez CAMBIAMENTI DI SCALA
x <- c(75,76,77,78,79,80,81)
y <- c(21,15.5,11.7,10.7,9.2,8.9,8)
cor(x,y)
plot(x,y,xlab="anni",ylab="incidenza")
lm(y~x) -> model
abline(model)
model
plot(model)
cor(x,log(y))
plot(x,log(y))
lm(log(y)~x) -> model2
abline(model2)
model2
plot(model2)
predict(model,data.frame(x=85))
predict(model2,data.frame(x=85)) -> z
z
exp(z)
x <- c(33,49,65,33,79,49,93)
y <- c(5.3,14.5,21.21,6.5,38.45,11.23,50.42)
cor(x,y)
plot(x,y)
lm(y~x) -> model
abline(model)
model
cor(x,sqrt(y))
plot(x,sqrt(y))
lm(sqrt(y)~x) -> model2
abline(model2)
model2
### Sez 3.4 DALLA REGRESSIONE LINEARE A QUELLA NON PARAMETRICA
data(cars)
attach(cars)
plot(speed, dist)
lines(ksmooth(speed, dist, "normal", bandwidth=2))
lines(ksmooth(speed, dist, "normal", bandwidth=5),lty=3)
lines(ksmooth(speed, dist, "normal", bandwidth=10))
detach()
data(cars)
attach(cars)
plot(cars)
lines(lowess(cars))
lines(lowess(cars, f=.2), lty = 3)
legend(5, 120, c(paste("f = ", c("2/3", ".2"))), lty = c(1:3))
detach()
# EOF Cap3.R
|
689870d851c98a37084eb8bec5650b9bc017e185 | a8d513835f48f0b339a764b94b3f9cde7b8f178a | /model.R | cf9cfe22443a1864a522fcd55727fdbaf71cc678 | [] | no_license | rasyidstat/har | ebd1c76f0320f396aee7cd4d896008243e30babc | b296a94cf5ec9a286f6620074e26a733ed2c1bc4 | refs/heads/master | 2021-07-06T10:29:08.022630 | 2018-09-26T14:39:00 | 2018-09-26T14:39:00 | 149,958,300 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,431 | r | model.R | library(tidymodels)
library(tidyverse)
library(data.table)
library(e1071)
library(glue)
# read data
har <- fread("data/extracted_vals.csv")
har <- har %>%
mutate(V271 = as.factor(V271))
# preprocess (Z normalization)
har_rec <- recipe(V271 ~ ., data = har) %>%
step_center(all_predictors()) %>%
step_scale(all_predictors())
har_scaled <- prep(har_rec, training = har, retain = TRUE)
# train-test splitting (80:20, stratified)
set.seed(2019)
har_split <- initial_split(har, prop = 0.8, strata = "V271")
har_train <- bake(har_scaled, training(har_split))
har_test <- bake(har_scaled, testing(har_split))
# model
if (file.exists("output/model_svm.rds")) {
model_svm <- read_rds("output/model_svm.rds")
} else {
model_svm <- svm(V271 ~ ., data = har_train)
saveRDS(model_svm, "output/model_svm.rds")
}
# generate prediction for train and test
train_pred <- predict(model_svm)
test_pred <- predict(model_svm, newdata = har_test)
# save prediction
res <- data.frame(truth = har_train$V271,
predicted = train_pred) %>%
add_column(type = "train") %>%
bind_rows(
data.frame(truth = har_test$V271,
predicted = test_pred) %>%
add_column(type = "test")
) %>%
group_by(type) %>%
nest()
write_rds(res, "output/prediction_svm.rds")
# write session info (for reproducibility)
writeLines(capture.output(sessionInfo()), glue("output/session_info_{format(Sys.Date(), '%Y%m%d')}.txt"))
|
c9ff003ce01cc7d1a009ef6202bd4dad2e5bb832 | 8516967f77aa4437daa77f038444e6929e4dfd35 | /R/triplepara.R | aadabd1fc645a49ba0e2198e168b9ef81052aec7 | [] | no_license | bomeara/phybase | 52a7c5872809f7dff283f4e8c8a0d3693cfe2d85 | 05949f49b2bac58de81113e81d6515c20340142d | refs/heads/master | 2016-08-11T20:09:33.013441 | 2016-02-02T21:29:32 | 2016-02-02T21:29:32 | 50,945,737 | 0 | 2 | null | null | null | null | UTF-8 | R | false | false | 551 | r | triplepara.R | triplepara<-function(inode,jnode,nodematrix,nspecies)
{
par<-rep(0,4)
height1<-node.height(inode, nodematrix, nspecies)
height2<-node.height(jnode, nodematrix, nspecies)
if(height1 < height2)
{
par[1] <- height2-height1
par[2] <- height1
par[3] <- nodematrix[jnode,5]
par[4] <- nodematrix[inode,5]
}
else if(height1 > height2)
{
par[1] <- height1-height2
par[2] <- height2
par[3] <- nodematrix[inode,5]
par[4] <- nodematrix[jnode,5]
}
else
{
warnings("something is wrong in triplepara")
}
par
} |
342bf7f8b143d8118e6046c35f5db9320ead0d6a | 5f6e952a9855d68d3381e6d65958bb9737660def | /Probability_distribution_practice.R | f1e17e208ebd4ddddfd885fac65bb1cdb5300384 | [] | no_license | bsteuerman832/R_Programs | ebd221a454f2beedfcbeefcf2d95ab6b93a5fdad | 67ba9b160109ac24948445dc5a57d0dfd5913760 | refs/heads/main | 2023-07-08T05:20:32.550276 | 2021-08-10T17:07:12 | 2021-08-10T17:07:12 | 388,488,618 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,565 | r | Probability_distribution_practice.R | library(dplyr)
library(ggplot2)
set.seed(595)
dice <- data.frame(n =
c(2,3,3,4,4,4,5,5,5,5,6,6,6,6,6,7,7,7,7,7,7,8,8,8,8,8,9,9,9,9,10,10,10,11,11,12))
rolls_100 <- dice %>% sample_n(100, replace = TRUE)
barplot(table(rolls_100), main = "Rolling a dice 100 times",
ylab = "Frequency",
xlab = "Dice roll")
# Uniform distribution notes:
# Min and max wait times for back-up that happens every 30 min
min <- 0
max <- 30
# Calculate probability of waiting less than 5 mins
prob_less_than_5 <- punif(5, min, max)
prob_less_than_5
# Calculate probability of waiting 10-20 mins
prob_between_10_and_20 <- punif(20, min, max) - punif(10, min, max)
prob_between_10_and_20
rbinom(10, 1, 0.5) # Flip a coin ten times
dbinom(9,10,0.5) # Flip a coin ten times, chance 9 are heads
pbinom(4,10,0.5) # Flip a coin ten times, chance at most 4 are heads
pbinom(4,10, 0.5, lower.tail = FALSE) # Flip a coin ten times, chance more than 4 are heads
# Normal distribution stuff:
# Let's take a dist w/ a mean of 5000 and a SD of 2000
# Probability of < 7500
pnorm(7500, mean = 5000, sd =2000)
# Probability of between 3000 and 7000
pnorm(7000, mean = 5000, sd =2000) - pnorm(3000, mean = 5000, sd =2000)
#Poisson distribution:
#Events are random but hover around a certain lambda value:
dpois(5, lambda = 8) # Avg = 8, probability the value comes out to 5
ppois(5, lambda = 8) # Avg = 8, probability the value is <= 5
ppois(10, lambda = 8, lower.tail = FALSE) # Avg = 8, probability the value is > 10) |
4049b379969e5139120cde6c9e4fa284e8a64ade | 6711d1ffe24e21bb5e9f56826e834323531e904b | /ManuscriptScripts/AOP-Net-Script 8-Topological Sorting.R | 3df421a4bdcc433e6671e7cbf2c102fc69564670 | [] | no_license | npollesch/AOPNet | 1659c7288a5d296f2c2101ca9736d94b34d44dbb | b214a3c2acf5c50458933900b16f409a6aa2e08b | refs/heads/master | 2021-06-21T00:14:50.860745 | 2021-01-08T16:36:12 | 2021-01-08T16:36:12 | 161,541,713 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 12,212 | r | AOP-Net-Script 8-Topological Sorting.R | #libraries
library(igraph)
library(prodlim)
library(RColorBrewer)
library(autoimage)
#Directories
workingDir<-"C:\\Users\\obrienja\\Documents\\GitHub\\AOPWiki\\R_files_Jason\\"
#load source of functions
source(paste(workingDir,"AOP-Net-Functions.R",sep="")) #imports custom functions
### IMPORTANT: this script relies on objects created in other scripts. Please run the following other scripts to create the required objects:
### 1) "AOP-Net-1-XML Parse.R" to create raw data files
### 2) "AOP-Net-2-Build Network.R" to create iGraph object from AOPwiki data
### 3) "AOP-Net-3-Adjacent vs NonAdjacent.R" identifies non-adjacent KERs and creates adjacent-only network
### 4) "AOP-Net-4-Components.R" identifies strong and weak components and created "contracted" network
### 5) "AOP-Net-5-Linear AOPs.R" identifies all linear aops
### 6) "AOP-Net-6-Connectivity.R" AOP occurence and edge connectivity
# Toplogical sorting of subgraph made from MIE/AO pair high number
# of laops and WITHOUT strong components (MIE/AO pair 201/341)
# subgraph called sub_lNoS created in AOP-Net-5-Linear AOPs.R script
g<-sub_lNoS
### Plot unsorted
# reusbale plot layout
set.seed(3)
layout.g<-layout_with_graphopt(g, charge=0.07)
V(g)$plotX<-layout.g[,1]
V(g)$plotY<-layout.g[,2]
# plot options
vCol<-rep("white",length(V(g)))
vCol[V(g)$KE_KED=="MIE"]<-"green"
vCol[V(g)$KE_KED=="AO"]<-"red"
eCol<-rep("grey40", length(E(g)))
eCol[E(g)$adjacency=="non-adjacent"]<-hsv(0.085, 1, 0.95)
# plot
plotLay<-cbind(V(g)$plotX,V(g)$plotY)
par(mar=c(0,0,0,0))
plot(g, vertex.size=15, vertex.color=vCol,
edge.width=4, edge.color=eCol, edge.arrow.size=0.4, edge.arrow.width=3,
layout=plotLay)
reset.par()
### Topo sorted plot
# topo sort and generate plot layout
topoLay<-topo.lay(g)
V(g)$topoX<-topoLay[,1]
V(g)$topoY<-topoLay[,2]
# plot
plotLay<-cbind(V(g)$topoX,V(g)$topoY)
textLay<-plotLay
textLay[,1]<-textLay[,1]+1
par(mar=c(0,0,0,0))
plot(g, vertex.size=12, vertex.color=vCol, vertex.label.cex=0.8,
edge.width=3, edge.color=eCol, edge.arrow.size=0.5, edge.arrow.width=2, edge.curved=1,
layout=plotLay)
reset.par()
### Shortest Path (regardlesss of any other attribute)
sCol<-short.path.edge.color(g,
fromnode=V(g)[V(g)$ID=="201"],
tonode=V(g)[V(g)$ID=="341"],
loc=F,
clr=hsv(0.6,0.4,1),
nonclr="transparent",
weight=NA,
all=T)
# unsorted
plotLay<-cbind(V(g)$plotX,V(g)$plotY)
par(mar=c(0,0,0,0))
plot(g, vertex.color= rgb(1,1,1,alpha=0), vertex.frame.color= rgb(1,1,1,alpha=0),vertex.label=NA,
edge.width=20, edge.color=sCol, edge.arrow.size=0,
layout=plotLay)
plot(g, vertex.size=15, vertex.color=vCol,
edge.width=4, edge.color=eCol, edge.arrow.size=0.4, edge.arrow.width=3,
layout=plotLay, add=TRUE)
reset.par()
# sorted
plotLay<-cbind(V(g)$topoX,V(g)$topoY)
par(mar=c(0,0,0,0))
plot(g, vertex.size=10, vertex.color= rgb(1,1,1,alpha=0), vertex.frame.color= rgb(1,1,1,alpha=0), vertex.label=NA,
edge.width=20, edge.color=sCol, edge.arrow.size=0, edge.curved=1,
layout=plotLay)
plot(g, vertex.size=10, vertex.color=vCol, vertex.label.cex=0.8,
edge.width=3, edge.color=eCol, edge.arrow.size=0.4, edge.arrow.width=3, edge.curved=1,
layout=plotLay, add=TRUE)
reset.par()
### Shortest path for adjacent edges only
subAdj<-subgraph.edges(g, E(g)[E(g)$adjacency=="adjacent"])
spAdj<-all_shortest_paths(subAdj, from= V(subAdj)[V(subAdj)$ID=="201"], to=V(subAdj)[V(subAdj)$ID=="341"], mode="out")
# There are 5 shortest paths of equal length
# generate different colours for each of the 5 paths
spCol<-c(hsv(0.5,0.4,0.2),
hsv(0.5,0.4,0.4),
hsv(0.5,0.4,0.6),
hsv(0.5,0.4,0.8),
hsv(0.5,0.4,1))
spSize<-c(26,23,19,16,13)
# plot unsorted
plotLay<-cbind(V(subAdj)$plotX,V(subAdj)$plotY)
par(mar=c(0,0,0,0))
for(i in 1: length(spAdj[[1]])){
ssG<-subgraph.edges(subAdj, eids=E(subAdj, path=spAdj[[1]][[i]]))
seCol<-rep(hsv(1,1,1,alpha=0), length(E(subAdj)))
seCol[E(subAdj)$ID%in%E(ssG)$ID]<-spCol[i]
plot(subAdj, vertex.color= rgb(1,1,1,alpha=0), vertex.frame.color= rgb(1,1,1,alpha=0),vertex.label=NA,
edge.width=spSize[i], edge.color=seCol, edge.arrow.size=0,
layout=plotLay, add=if(i>1){TRUE}else{FALSE})
}
plot(g, vertex.size=15, vertex.color=vCol,
edge.width=4, edge.color=eCol, edge.arrow.size=0.4, edge.arrow.width=3,
layout=plotLay, add=TRUE)
reset.par()
#plot sorted
plotLay<-cbind(V(subAdj)$topoX,V(subAdj)$topoY)
par(mar=c(0,0,0,0))
for(i in 1: length(spAdj[[1]])){
ssG<-subgraph.edges(subAdj, eids=E(subAdj, path=spAdj[[1]][[i]]))
seCol<-rep(hsv(1,1,1,alpha=0), length(E(subAdj)))
seCol[E(subAdj)$ID%in%E(ssG)$ID]<-spCol[i]
plot(subAdj, vertex.size=10, vertex.color= rgb(1,1,1,alpha=0), vertex.frame.color= rgb(1,1,1,alpha=0),vertex.label=NA,
edge.width=spSize[i], edge.color=seCol, edge.arrow.size=0, edge.curved=1,
layout=plotLay, add=if(i>1){TRUE}else{FALSE})
}
plot(g, vertex.size=10, vertex.color=vCol, vertex.label.cex=0.8,
edge.width=3, edge.color=eCol, edge.arrow.size=0.4, edge.arrow.width=3, edge.curved=1,
layout=plotLay, add=TRUE)
reset.par()
### Shortest path analysis based on WOE
wScores<-data.frame(w=c("High","Moderate","Low","Not Specified"), score=c(1, 2, 3, 3))
wWeight<-wScores$score[match(E(g)$woe, wScores$w)]
sCol<-short.path.edge.color(g,
fromnode=V(g)[V(g)$ID=="201"],
tonode=V(g)[V(g)$ID=="341"],
loc=F,
clr=hsv(0.5,0.4,1),
nonclr="transparent",
weight=wWeight,
all=T)
# unsorted
plotLay<-cbind(V(g)$plotX,V(g)$plotY)
par(mar=c(0,0,0,0))
plot(g, vertex.color= rgb(1,1,1,alpha=0), vertex.frame.color= rgb(1,1,1,alpha=0),vertex.label=NA,
edge.width=20, edge.color=sCol, edge.arrow.size=0,
layout=plotLay)
plot(g, vertex.size=15, vertex.color=vCol,
edge.width=4, edge.color=eCol, edge.arrow.size=0.4, edge.arrow.width=3,
layout=plotLay, add=TRUE)
reset.par()
# plot sorted
plotLay<-cbind(V(g)$topoX,V(g)$topoY)
par(mar=c(0,0,0,0))
plot(g, vertex.size=10, vertex.color= rgb(1,1,1,alpha=0), vertex.frame.color= rgb(1,1,1,alpha=0), vertex.label=NA,
edge.width=17, edge.color=sCol, edge.arrow.size=0, edge.curved=1,
layout=plotLay)
plot(g, vertex.size=10, vertex.color=vCol, vertex.label.cex=0.8,
edge.width=3, edge.color=eCol, edge.arrow.size=0.4, edge.arrow.width=3, edge.curved=1,
layout=plotLay, add=TRUE)
reset.par()
### Shortest path analysis based on quantitave understanding ONLY
wScores<-data.frame(w=c("High","Moderate","Low","Not Specified"), score=c(1, 2, 3, 3))
qWeight<-wScores$score[match(E(g)$quant, wScores$w)]
sCol<-short.path.edge.color(g,
fromnode=V(g)[V(g)$ID=="201"],
tonode=V(g)[V(g)$ID=="341"],
loc=F,
clr=hsv(0.5,0.4,1),
nonclr="transparent",
weight=qWeight,
all=T)
# plot unsorted
plotLay<-cbind(V(g)$plotX,V(g)$plotY)
par(mar=c(0,0,0,0))
plot(g, vertex.color= rgb(1,1,1,alpha=0), vertex.frame.color= rgb(1,1,1,alpha=0),vertex.label=NA,
edge.width=20, edge.color=sCol, edge.arrow.size=0,
layout=plotLay)
plot(g, vertex.size=15, vertex.color=vCol,
edge.width=4, edge.color=eCol, edge.arrow.size=0.4, edge.arrow.width=3,
layout=plotLay, add=TRUE)
reset.par()
# plot sorted
plotLay<-cbind(V(g)$topoX,V(g)$topoY)
par(mar=c(0,0,0,0))
plot(g, vertex.size=10, vertex.color= rgb(1,1,1,alpha=0), vertex.frame.color= rgb(1,1,1,alpha=0), vertex.label=NA,
edge.width=12, edge.color=sCol, edge.arrow.size=0, edge.curved=1,
layout=plotLay)
plot(g, vertex.size=10, vertex.color=vCol, vertex.label.cex=0.8,
edge.width=3, edge.color=eCol, edge.arrow.size=0.4, edge.arrow.width=3, edge.curved=1,
layout=plotLay, add=TRUE)
reset.par()
### Shortest path analysis based on BOTH quantitative understanding AND adjacent KERs
subAdj<-subgraph.edges(g, E(g)[E(g)$adjacency=="adjacent"])
qWeight<-wScores$score[match(E(subAdj)$quant, wScores$w)]
sCol<-short.path.edge.color(subAdj,
fromnode=V(subAdj)[V(subAdj)$ID=="201"],
tonode=V(subAdj)[V(subAdj)$ID=="341"],
loc=F,
clr=hsv(0.25,0.4,0.7),
nonclr="transparent",
weight=qWeight,
all=T)
# plot unsorted
plotLay<-cbind(V(subAdj)$plotX,V(subAdj)$plotY)
par(mar=c(0,0,0,0))
plot(subAdj, vertex.color= rgb(1,1,1,alpha=0), vertex.frame.color= rgb(1,1,1,alpha=0),vertex.label=NA,
edge.width=20, edge.color=sCol, edge.arrow.size=0,
layout=plotLay)
plot(g, vertex.size=15, vertex.color=vCol,
edge.width=4, edge.color=eCol, edge.arrow.size=0.4, edge.arrow.width=3,
layout=plotLay, add=TRUE)
reset.par()
# plot sorted
plotLay<-cbind(V(subAdj)$topoX,V(subAdj)$topoY)
par(mar=c(0,0,0,0))
plot(subAdj, vertex.size=10, vertex.color= rgb(1,1,1,alpha=0), vertex.frame.color= rgb(1,1,1,alpha=0), vertex.label=NA,
edge.width=12, edge.color=sCol, edge.arrow.size=0, edge.curved=1,
layout=plotLay)
plot(g, vertex.size=10, vertex.color=vCol, vertex.label.cex=0.8,
edge.width=3, edge.color=eCol, edge.arrow.size=0.4, edge.arrow.width=3, edge.curved=1,
layout=plotLay, add=TRUE)
reset.par()
#conclusion: there are many paths with equal weight
### Shortest path analysis based on BOTH WOE AND adjacent KERs
subAdj<-subgraph.edges(g, E(g)[E(g)$adjacency=="adjacent"])
wWeight<-wScores$score[match(E(subAdj)$woe, wScores$w)]
sCol<-short.path.edge.color(subAdj,
fromnode=V(subAdj)[V(subAdj)$ID=="201"],
tonode=V(subAdj)[V(subAdj)$ID=="341"],
loc=F,
clr=hsv(0.75,0.5,1),
nonclr="transparent",
weight=wWeight,
all=T)
# plot unsorted
plotLay<-cbind(V(subAdj)$plotX,V(subAdj)$plotY)
par(mar=c(0,0,0,0))
plot(subAdj, vertex.color= rgb(1,1,1,alpha=0), vertex.frame.color= rgb(1,1,1,alpha=0),vertex.label=NA,
edge.width=20, edge.color=sCol, edge.arrow.size=0,
layout=plotLay)
plot(g, vertex.size=15, vertex.color=vCol,
edge.width=4, edge.color=eCol, edge.arrow.size=0.4, edge.arrow.width=3,
layout=plotLay, add=TRUE)
reset.par()
# plot sorted
plotLay<-cbind(V(subAdj)$topoX,V(subAdj)$topoY)
par(mar=c(0,0,0,0))
plot(subAdj, vertex.size=10, vertex.color= rgb(1,1,1,alpha=0), vertex.frame.color= rgb(1,1,1,alpha=0), vertex.label=NA,
edge.width=17, edge.color=sCol, edge.arrow.size=0, edge.curved=1,
layout=plotLay)
plot(g, vertex.size=10, vertex.color=vCol, vertex.label.cex=0.8,
edge.width=3, edge.color=eCol, edge.arrow.size=0.4, edge.arrow.width=3, edge.curved=1,
layout=plotLay, add=TRUE)
reset.par()
#conclusion: one unique shortest path with best WOE using adj KERs only
### Shortest path analysis based on BOTH WOE AND adjacent KERs
### AND NORMALIZING FOR LENGTH
subAdj<-subgraph.edges(g, E(g)[E(g)$adjacency=="adjacent"])
laopsAdj<-all_simple_paths(subAdj,
from=V(subAdj)[V(subAdj)$ID=="201"],
to=V(subAdj)[V(subAdj)$ID=="341"],
mode="out")
# 6 Laops
#determine average WoE for each path
wWeight<-wScores$score[match(E(subAdj)$woe, wScores$w)]
avgWoe<-vector()
for(i in 1: length(laopsAdj)){
mW<-mean(wScores$score[match(E(subAdj, path=laopsAdj[[i]])$woe,wScores$w)])
avgWoe<-c(avgWoe, mW)
}
# path with lowest (best) average WoE score
laopsAdj[which(avgWoe==min(avgWoe))]
# Results: same as shortest path based on un-normlaized WoE
|
13af4f5728cc909a4a5c9237d9346e7a12f7063f | 7280a7a89922c54fb582a78cb72a50bef1450945 | /stdVarCompIxJxK.R | a43a974172c3bc1acd126272f1b71163739aa03c | [] | no_license | edwardmurphy/R-scripts | 1f139218e5a7a828551fb5c734fa07812fcfe273 | 6afb6513f3c7e38f1112ededb239510c14dfecd2 | refs/heads/master | 2021-01-13T08:05:38.558126 | 2016-10-24T22:02:39 | 2016-10-24T22:02:39 | 71,745,617 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,308 | r | stdVarCompIxJxK.R | library(lattice)
#############################################################################
##### Script to perform ANOVA and lme modeling (id.) for qual/validation data
##### from DOE using i assays/j levels/k injections
#############################################################################
#############################################################################
#### Data must be in varCompAnalysisR.txt file in working dir with
#### the following headers (col order can be arbitrary, as cols are called
#### by name):
#### column 1: Assay
#### column 2: Level (Low, Mid, High)
#### column 3: Replicate
#### column 4: Injection
#### column 5: Result
#### Missing data should be filled in prior to reading with 0 (zero)
#### If 0 (zero) is a legitimate value, change script to read dummy value
#### chosen
#### Need expected results in expected.txt file in working dir with
#### expected results for each level in a column vector
#############################################################################
stdVarComp <- function ( ) {
# Read in data, clean up, factorize levels appropriately
data<-read.table("varCompAnalysisR.txt",header=T)
data[data==0]<-NA #missing data
data$Assay<-as.factor(data$Assay)
levels(data$Assay)<-c("Assay1","Assay2","Assay3")
data$Level<-as.factor(data$Level)
levels(data$Level)<-c("Low","Mid","High")
data$Replicate<-as.factor(data$Replicate)
data$Injection<-as.factor(data$Injection)
# Read in expected results
expect <- as.vector(read.table("expected.txt"))
# plot results using lattice plot
xyplot(Result~Replicate|Assay,data=data,groups=data$Level,
layout=c(nlevels(data$Assay,1),aspect=1,type = "p",cex=1,#pch=c(0,1,2),
panel = function(x, ...) {
panel.xyplot(x,...)
panel.abline(h=c(LowLevel,MidLevel,HighLevel),lty=2)
# how to generalize adding lines???
}
)
# create recovery tables for each replicate
# view recovery to nearest 0.1%
# create array to hold recovery results by replicate
# first dimension (row) holds assay result
# second dimension (col) holds replicate result
# third dimension (holds level result)
# so RecReplicate[,,1] is all recovery results for level 1
# create 2 arrays- one with unrounded results, the other with rounded results (for display)
RecReplicate <- array(dim=c(nlevels(data$Assay),nlevels(data$Replicate),nlevels(data$Level)))
for (i in 1:nlevels(data$Level)){
for (j in 1:nlevels(data$Assay)){
RecReplicate[j,,i] <- with (subset(data,Level==levels(data$Level)[[i]]&Assay==levels(data$Assay)[[j]]),
tapply(Result, Replicate, mean)/expect[i,1] * 100)
}
}
RecReplicateRound <- round(RecReplicate,1)
# create recovery table for each assay
# row contains assay mean
# col contains level
# so first row contains the mean results for assay 1 at each level
RecAssay <- apply(RecReplicate,c(1,3),mean)
RecAssayRound <- round(RecAssay,1)
# create recovery table for each level
RecLevel <- apply(RecAssay,2,mean)
RecLevelRound <- round(RecLevel,1)
# perform ANOVA
for (i in 1:nlevels(data$Level)) {
aov.mod<-aov(Result~1+Error(Assay/Replicate),data=subset(data,data$Level=="Low"))
str(summary(aov.mod))
|
0a83aac4e2ced60510e0c42966fd5611dcf6dda7 | 811e464b5e76dd2b4cb489c185671337c6671937 | /02_Getting_and_Cleaning_Data/week3/q1.R | 1ca3219976d38f1e6b46df221b413e95cd8bd096 | [] | no_license | fhyme/Coursera_Data_Science | 7966427d64870cd3a95fdcd2cdc42a9cad7a990d | 37aa27d717a16959a1e3f760ee1748e71305ffa5 | refs/heads/master | 2021-01-01T18:49:27.249860 | 2015-04-12T14:02:51 | 2015-04-12T14:02:51 | 33,064,716 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 110 | r | q1.R | dt<-read.csv("getdata-data-ss06hid.csv")
agricultureLogical<-(dt$ACR==3 & dt$AGS==6)
which(agricultureLogical) |
fc7aa8ef04778b5263aedb14fcb5a2dc44260249 | 6a28ba69be875841ddc9e71ca6af5956110efcb2 | /Introductory_Statistics_by_Douglas_S_Shafer_And_Zhiyi_Zhang/CH9/EX9.2/Ex9_2.R | 6e7353d57c325a0538a4b442ca3800299331cb14 | [] | permissive | FOSSEE/R_TBC_Uploads | 1ea929010b46babb1842b3efe0ed34be0deea3c0 | 8ab94daf80307aee399c246682cb79ccf6e9c282 | refs/heads/master | 2023-04-15T04:36:13.331525 | 2023-03-15T18:39:42 | 2023-03-15T18:39:42 | 212,745,783 | 0 | 3 | MIT | 2019-10-04T06:57:33 | 2019-10-04T05:57:19 | null | UTF-8 | R | false | false | 507 | r | Ex9_2.R | #Page 450
n1<-174
n2<-355
x1<-3.51
x2<-3.24
s1<-0.51
s2<-0.52
alpha<-0.01
m<-(((s1**2)/n1)+((s2**2)/n2))
z<-((x1-x2)/sqrt(m))
print(z)
z0<-qnorm(alpha,lower.tail = FALSE)
print(z0)
x=seq(-6,6,length=500)
y=dnorm(x,mean=0,sd=1)
plot(x,y,type="l",lwd=2,col="black")
x=seq(z0,6,length=500)
y=dnorm(x,mean=0,sd=1)
polygon(c(z0,x,6),c(0,y,0),col="gray")
points(z,0,pch=19,col="red",cex=1.5)
if(z0<z){
print("REJECT NULL HYPOTHESIS")
}else{
print("ACCEPT NULL HYPOTHESIS")
}
|
56414e09327efa9400a31b38582fcf88cf359a9f | 2a3fcd5b705d1703ba2bdf2a22b65e7ed6090d4e | /cachematrix.R | ceee5809cac7fb565b5960dbc3ef630171d80911 | [] | no_license | SeaRhythms/ProgrammingAssignment2 | 045f65b2617421e067a1a282a08b91126f82839f | ba29fb965e1010ff017dd850bfd28f5d435c6452 | refs/heads/master | 2020-04-06T04:36:38.039536 | 2014-04-26T20:49:43 | 2014-04-26T20:49:43 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 894 | r | cachematrix.R | ## Fnctions for cacheing results of potentially time-consuming
## matrix computations.
## Create a special "matrix" object that caches its inverse
## Return a list that contains the names of 4 functions
## that operate on that object
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setMatrix <- function(matrix) m <<- matrix
getMatrix <- function() m
list(set = set, get = get,
setMatrix = setMatrix,
getMatrix = getMatrix)
}
## Compute the inverse of a square matrix
## If the inverse was previously calculated then retrieve it from cache
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getMatrix()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setMatrix(m)
m
}
|
8a93476b6899ea1c6562bc777d76aa72cc4e91e9 | f96eb44b1e125d937564f481f51c420a1c088496 | /_src/plots_nowcasting.R | 8a6dc8f3c33a2e211856d2753975e7d3dd478fba | [] | no_license | andrebida/covid19br.github.io | 51d206fd22eae29dec4fdb150119356eec441158 | 1fbe60c696a621053f0d1ddd1a3fc7c7627fab19 | refs/heads/master | 2022-10-09T15:31:45.680608 | 2020-05-22T17:23:36 | 2020-05-22T17:23:36 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 8,029 | r | plots_nowcasting.R | # libraries
library(ggplot2)
library(dplyr)
library(tidyr)
# Parametros de formatacao comum aos plots
source("funcoes.R") # plot.formatos vem junto aqui
# teste local definindo vars
# adm <- "municipio"
# sigla.adm <- "SP"
# dir para os ler os dados
data.dir <- paste0("../dados/", adm, "_", sigla.adm, "/", "tabelas_nowcasting_para_grafico/")
# dir para os outputs, separados em subpastas
output.dir <- paste0("../web/", adm, "_", sigla.adm, "/")
if (!dir.exists(output.dir)) dir.create(output.dir)
# testando se existe nowcasting
existe.covid <- existe.nowcasting2(adm = adm, sigla.adm = sigla.adm, tipo = "covid")
existe.srag <- existe.nowcasting2(adm = adm, sigla.adm = sigla.adm, tipo = "srag")
existe.ob.covid <- existe.nowcasting2(adm = adm, sigla.adm = sigla.adm, tipo = "obitos_covid")
existe.ob.srag <- existe.nowcasting2(adm = adm, sigla.adm = sigla.adm, tipo = "obitos_srag")
existe.ob.srag.proaim <- existe.nowcasting2(adm = adm, sigla.adm = sigla.adm, tipo = "obitos_srag_proaim")
#############
## COVID ####
#############
if (existe.covid) {
data.covid <- get.data.base2(adm, sigla.adm, "covid")
df.covid.diario <- read.csv(paste0(data.dir, "nowcasting_diario_covid_", data.covid, ".csv"))
df.covid.cum <- read.csv(paste0(data.dir, "nowcasting_acumulado_covid_", data.covid, ".csv"))
df.td.covid <- read.csv(paste0(data.dir, "tempo_duplicacao_covid_", data.covid, ".csv"))
df.re.covid <- read.csv(paste0(data.dir, "r_efetivo_covid_", data.covid, ".csv"))
# PLOTS ####
### diario
## N de novos casos observados e por nowcasting
## Com linha de média móvel
plot.nowcast.covid <- plot.nowcast.diario(df.covid.diario)
### acumulado
plot.nowcast.cum.covid <- plot.nowcast.acumulado(df.covid.cum)
### tempo de duplicação
plot.tempo.dupl.covid <- plot.tempo.dupl(df.td.covid)
### R efetivo
plot.estimate.R0.covid <- plot.estimate.R0(df.re.covid)
# TABELAS ####
## Tabela que preenche o minimo e o maximo do nowcast, tempo de duplicacao, e r efetivo
tabelas.web(sigla.adm,
output.dir,
tipo = "covid",
df.covid.cum,
df.td.covid,
df.re.covid)
} else {
plot.nowcast.covid <- NULL
plot.nowcast.cum.covid <- NULL
plot.estimate.R0.covid <- NULL
plot.tempo.dupl.covid <- NULL
}
############
## SRAG ####
############
if (existe.srag) {
data.srag <- get.data.base2(adm, sigla.adm, "srag")
df.srag.diario <- read.csv(paste0(data.dir, "nowcasting_diario_srag_", data.srag, ".csv"))
df.srag.cum <- read.csv(paste0(data.dir, "nowcasting_acumulado_srag_", data.srag, ".csv"))
df.td.srag <- read.csv(paste0(data.dir, "tempo_duplicacao_srag_", data.srag, ".csv"))
df.re.srag <- read.csv(paste0(data.dir, "r_efetivo_srag_", data.srag, ".csv"))
# PLOTS ####
### diario
## N de novos casos observados e por nowcasting
## Com linha de média móvel
plot.nowcast.srag <- plot.nowcast.diario(df.srag.diario)
### acumulado
plot.nowcast.cum.srag <- plot.nowcast.acumulado(df.srag.cum)
### tempo de duplicação
# ö fazendo o filtro na mão para todo mundo, mas depois pode sair daqui ja está no repo nowcasting
# R: ops, não podia, não
df.td.srag <- df.td.srag %>%
filter(data > "2020-03-15")
df.re.srag <- df.re.srag %>%
filter(data > "2020-03-15")
plot.tempo.dupl.srag <- plot.tempo.dupl(df.td.srag)
### R efetivo
plot.estimate.R0.srag <- plot.estimate.R0(df.re.srag)
# TABELAS ####
tabelas.web(sigla.adm,
output.dir,
tipo = "srag",
df.srag.cum,
df.td.srag,
df.re.srag)
} else {
plot.nowcast.srag <- NULL
plot.nowcast.cum.srag <- NULL
plot.estimate.R0.srag <- NULL
plot.tempo.dupl.srag <- NULL
}
#####################
## OBITOS COVID ####
#####################
if (existe.ob.covid) {
data.ob.covid <- get.data.base2(adm, sigla.adm, "obitos_covid")
df.ob.covid.diario <- read.csv(paste0(data.dir, "nowcasting_diario_obitos_covid_", data.ob.covid, ".csv"))
df.ob.covid.cum <- read.csv(paste0(data.dir, "nowcasting_acumulado_obitos_covid_", data.ob.covid, ".csv"))
df.td.ob.covid <- read.csv(paste0(data.dir, "tempo_duplicacao_obitos_covid_", data.ob.covid, ".csv"))
### diario
## N de novos casos observados e por nowcasting
## Com linha de média móvel
plot.nowcast.ob.covid <- plot.nowcast.diario(df.ob.covid.diario) +
xlab("Dia") +
ylab("Número de novos óbitos")
### acumulado
plot.nowcast.cum.ob.covid <- plot.nowcast.acumulado(df.ob.covid.cum) +
xlab("Dia") +
ylab("Número acumulado de óbitos")
### tempo de duplicação
plot.tempo.dupl.ob.covid <- plot.tempo.dupl(df.td.ob.covid)
# TABELAS ####
tabelas.web(sigla.adm,
output.dir,
tipo = "obitos_covid",
df.ob.covid.cum,
df.td.ob.covid)
} else {
plot.nowcast.ob.covid <- NULL
plot.nowcast.cum.ob.covid <- NULL
plot.tempo.dupl.ob.covid <- NULL
}
####################
## OBITOS SRAG ####
####################
if (existe.ob.srag) {
data.ob.srag <- get.data.base2(adm, sigla.adm, "obitos_srag")
df.ob.srag.diario <- read.csv(paste0(data.dir, "nowcasting_diario_obitos_srag_", data.ob.srag, ".csv"))
df.ob.srag.cum <- read.csv(paste0(data.dir, "nowcasting_acumulado_obitos_srag_", data.ob.srag, ".csv"))
df.td.ob.srag <- read.csv(paste0(data.dir, "tempo_duplicacao_obitos_srag_", data.ob.srag, ".csv"))
### diario
## N de novos casos observados e por nowcasting
## Com linha de média móvel
plot.nowcast.ob.srag <- plot.nowcast.diario(df.ob.srag.diario) +
xlab("Dia") +
ylab("Número de novos óbitos")
### acumulado
plot.nowcast.cum.ob.srag <- plot.nowcast.acumulado(df.ob.srag.cum) +
xlab("Dia") +
ylab("Número acumulado de óbitos")
### tempo de duplicação
plot.tempo.dupl.ob.srag <- plot.tempo.dupl(df.td.ob.srag)
# TABELAS ####
tabelas.web(sigla.adm,
output.dir,
tipo = "obitos_srag",
df.ob.srag.cum,
df.td.ob.srag)
} else {
plot.nowcast.ob.srag <- NULL
plot.nowcast.cum.ob.srag <- NULL
plot.tempo.dupl.ob.srag <- NULL
}
#########################
# OBITOS SRAG PROAIM ####
#########################
if (existe.ob.srag.proaim) {
data.ob.srag.proaim <- get.data.base2(adm, sigla.adm, "obitos_srag_proaim")
df.ob.srag.diario.proaim <- read.csv(paste0(data.dir, "nowcasting_diario_obitos_srag_proaim_",
data.ob.srag.proaim, ".csv"))
df.ob.srag.cum.proaim <- read.csv(paste0(data.dir, "nowcasting_acumulado_obitos_srag_proaim_",
data.ob.srag.proaim, ".csv"))
df.td.ob.srag.proaim <- read.csv(paste0(data.dir, "tempo_duplicacao_obitos_srag_proaim_", data.ob.srag.proaim, ".csv"))
### diario
## N de novos casos observados e por nowcasting
## Com linha de média móvel
plot.nowcast.ob.srag.proaim <- plot.nowcast.diario(df.ob.srag.diario.proaim) +
xlab("Dia") +
ylab("Número de novos óbitos")
### acumulado
plot.nowcast.cum.ob.srag.proaim <- plot.nowcast.acumulado(df.ob.srag.cum.proaim) +
xlab("Dia") +
ylab("Número acumulado de óbitos")
### tempo de duplicação
plot.tempo.dupl.ob.srag.proaim <- plot.tempo.dupl(df.td.ob.srag.proaim)
# TABELAS ####
tabelas.web(sigla.adm,
output.dir,
tipo = "obitos_srag_proaim",
df.ob.srag.cum.proaim,
df.td.ob.srag.proaim)
} else {
plot.nowcast.ob.srag.proaim <- NULL
plot.nowcast.cum.ob.srag.proaim <- NULL
plot.tempo.dupl.ob.srag.proaim <- NULL
}
|
ecd2e3bcce689eea63f914c71c97cf5fdab58d86 | de340ab665b7e8b673bfae9ec26c570b6d412717 | /man/chromoR-package.Rd | d441a808f8a2c6326fc76d6b585ad26e13210c82 | [] | no_license | cran/chromoR | f23937255b3eb4b9d8e83a0075dc5a0b4bf62944 | 202b4fc1f036eeb299b4ba946d64d585969bbf79 | refs/heads/master | 2021-03-12T20:24:51.312408 | 2014-02-06T00:00:00 | 2014-02-06T00:00:00 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 936 | rd | chromoR-package.Rd | \name{chromoR-package}
\alias{chromoR-package}
\alias{chromoR}
\docType{package}
\title{
Analysis of chromosomal interactions data (Hi-C data)
}
\description{
ChromoR combines wavelet change point with Bayes Factor, for useful correction,
segmentation and comparison of Hi-C contact maps.
It provides a user friendly software solution, addressing the entire statistical
pipeline required for the analysis of chromosomal interactions data.
}
\details{
\tabular{ll}{
Package: \tab chromoR\cr
Type: \tab Package\cr
Version: \tab 1.0\cr
Date: \tab 2014-02-07\cr
License: \tab GPL-2\cr
}
For an easy start with chromoR, check out the documentation and examples for
correctCIM, compareCIM and segmentCIM
See also http://www.cl.cam.ac.uk/~ys388/chromoR/ for more examples and data sets.
}
\author{
Yoli Shavit <ys388@cam.ac.uk>
}
\references{
http://www.cl.cam.ac.uk/~ys388/chromoR/
}
\keyword{ package }
|
4b98f910396d0b9a6cb9415065d00d80543a1a48 | bacba0d49109344c7f303269a9d991be2840297f | /src/burn_glass_validation/openjobs_profile/openjobs_profiling.R | 0ad87b75b986bd6cf62d6b55a3744c628f23e1db | [] | no_license | uva-bi-sdad/stem_edu | 6dbf0d814fec5d5fcf6234f0533ae5db63322307 | 24d56f420d254379a729fb7c73847f19f21e5930 | refs/heads/master | 2020-05-23T09:22:56.901163 | 2019-08-08T19:10:35 | 2019-08-08T19:10:35 | 186,703,989 | 4 | 1 | null | 2019-09-30T19:14:39 | 2019-05-14T21:36:30 | R | UTF-8 | R | false | false | 161 | r | openjobs_profiling.R | #script for open jobs data profiling
library(DataExplorer)
library(data.table)
ojobs <- fread('./data/stem_edu/working/allOpenjobsParsed.csv')
introduce(ojobs)
|
024d5f8806f60d664e500cb12a0e1219d9583516 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/fastR/examples/utilities.Rd.R | d7f17c022a6c4ba1f314d3c10863b5d3d45a4d86 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 261 | r | utilities.Rd.R | library(fastR)
### Name: utilities
### Title: Utilities bills
### Aliases: utilities utilities2
### Keywords: datasets
### ** Examples
data(utilities); data(utilities2)
xyplot(gasbill ~ temp, data=utilities)
xyplot(gasbillpday ~ temp, data=utilities2)
|
56dfd2a7f554e5251079a9ca4c5ccf19cc9d607b | b5f3a9bdc6a63b03bac5cda027efc7457bba2789 | /code/homoscedastic_model_holdout/functions_AR.R | 5a21f8120457c9bb7dff5276b57e453172a2fe7f | [] | no_license | philawhite/Pollution_state_modeling_code | 367ad452a08354b622ba26febe6c7d7a14c43626 | 2cb5242e7f13f97766bfdee68a187358bc1fc076 | refs/heads/master | 2020-04-02T18:55:32.320379 | 2019-03-08T16:19:24 | 2019-03-08T16:19:24 | 154,717,599 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 14,008 | r | functions_AR.R | ################################################
################ coverage Function ###########
################################################
in_function = function(x,val,alp=0.1){
CI = quantile(x,c(alp/2,1 - alp/2))
1*(val <= CI[2] & val >= CI[1])
}
is.stationary = function(coef,lags){
n_poly = max(lags) + 1
coef_vec = numeric(n_poly)
coef_vec[1] = 1
coef_vec[lags + 1] = -coef
all(Mod(polyroot(coef_vec)) > 1)
}
box_tran <- function(dat,lam1,off = 0){
if( lam1 != 0){ ## 1use power transformation in lam != 0
((dat + off)^lam1 - 1) / lam1
}else{ ## use log(data) if lam =0
log(dat + off)
}
}
inv_box_tran <- function(dat,lam1,off = 0){
if( lam1 != 0){ ## 1use power transformation in lam != 0
(dat*lam1 + 1 )^(1/lam1) - off
}else{ ## use log(data) if lam =0
exp(dat) - off
}
}
################################################
################ Gibbs Functions #############
################################################
Sig10b_update = function( bet1, bet10, M_b1, nu_b1 , ns){
BtB = Reduce('+',lapply(1:ns,function(j,m,v){(m[j,] - v)%*%t(m[j,] - v )},m=bet1,v=bet10) )
return( rwish(ns + nu_b1, solve( M_b1 + BtB)) )
}
Sig20b_update = function( bet2, bet20, M_b2, nu_b2 , ns){
BtB = Reduce('+',lapply(1:ns,function(j,m,v){(m[j,] - v)%*%t(m[j,] - v )},m=bet2,v=bet20) )
return( rwish(ns + nu_b2, solve( M_b2 + BtB)) )
}
Sig10g_update = function( gam1, gam10, M_g1, nu_g1 , ns){
BtB = Reduce('+',lapply(1:ns,function(j,m,v){(m[j,] - v)%*%t(m[j,] - v )},m=gam1,v=gam10) )
return( rwish(ns + nu_g1, solve( M_g1 + BtB)) )
}
Sig20g_update = function( gam2, gam20, M_g2, nu_g2 , ns){
BtB = Reduce('+',lapply(1:ns,function(j,m,v){(m[j,] - v)%*%t(m[j,] - v )},m=gam2,v=gam20) )
return( rwish(ns + nu_g2, solve( M_g2 + BtB)) )
}
################################################
################ Data
################################################
################################################################
################ Mexico all data
################################################################
MCMC_mexico = function(dat,dat_dec ,lags1,lags2 ,reps ,burn ,ns , nt,
lam = 0,seed = 1,n_hold = n_hold){
set.seed(seed)
hold_ind = sample( nt * ns , n_hold )
hold_ind = hold_ind[order(dat$loc_ind[hold_ind],dat$time_ind[hold_ind])]
hold_dat = dat[hold_ind,c("obs_ind","Hour","time_ind","loc_ind","PM10","O3")]
lag_times1 <- lag_times2 <- vector(mode = "list",length = n_hold)
dat$O3[hold_ind] = NA
dat$PM10[hold_ind] = NA
for(i in 1:n_hold){
lag_poss = hold_dat$time_ind[i] + lags1
idx = which(lag_poss <= nt)
lag_times1[[i]] = lag_poss[idx]
}
for(i in 1:n_hold){
lag_poss = hold_dat$time_ind[i] + lags2
idx = which(lag_poss <= nt)
lag_times2[[i]] = lag_poss[idx]
}
Z1_loc = lapply(1:ns,function(x){ sqrt(dat$O3[dat$loc_ind == x]) } )
Z1_time = lapply(1:nt,function(x){ sqrt(dat$O3[dat$time_ind == x]) } )
Z1_all = sqrt(dat$O3)
Z1_loc_old = lapply(1:ns,function(x){ sqrt(dat_dec$O3[dat_dec$loc_ind == x]) } )
Z1_old = sqrt(dat_dec$O3)
Z2_loc = lapply(1:ns,function(x){ box_tran(dat$PM10[dat$loc_ind == x],lam) } )
Z2_time = lapply(1:nt,function(x){ box_tran(dat$PM10[dat$time_ind == x],lam) } )
Z2_all = box_tran(dat$PM10,lam)
Z2_loc_old = lapply(1:ns,function(x){ box_tran(dat_dec$PM10[dat_dec$loc_ind == x],lam) } )
Z2_old = box_tran(dat_dec$PM10,lam)
X_all = as.matrix(cbind(1,scale(dat[,c("RH","TMP")],scale=FALSE)))
X_all = X_all[c(1:(ns*24),1:(nt*ns - ns*24)),]
X_loc = lapply(1:ns,function(x){ X_all[dat$loc_ind == x,] } )
XtX_loc = lapply(X_loc,function(X) t(X) %*% X )
X_time = lapply(1:nt,function(x){ X_all[dat$time_ind == x,] } )
p = ncol(X_all)
n_lags1 = length(lags1)
n_lags2 = length(lags2)
L_all1 = matrix(0,ncol=length(lags1),nrow=(ns*nt))
L_all2 = matrix(0,ncol=length(lags2),nrow=(ns*nt))
for(i in 1:(nt*ns)){
t_ind = dat$time_ind[i]
s_ind = dat$loc_ind[i]
lag_ind1 = (t_ind - lags1)
lag_ind2 = (t_ind - lags2)
idx1_2017 = which(1:nt %in% lag_ind1)
idx2_2017 = which(1:nt %in% lag_ind2)
idx1_2016 = which((-nt_dec + 1):0 %in% lag_ind1)
idx2_2016 = which((-nt_dec + 1):0 %in% lag_ind2)
if(length(idx1_2016) == 0){
L_all1[i,] = Z1_loc[[s_ind]][idx1_2017]
} else if(length(idx1_2017) ==0 ) {
L_all1[i,] = Z1_loc_old[[s_ind]][idx1_2016]
} else{
L_all1[i,] = c(Z1_loc_old[[s_ind]][idx1_2016],Z1_loc[[s_ind]][idx1_2017])
}
if(length(idx2_2016) == 0){
L_all2[i,] = Z2_loc[[s_ind]][idx2_2017]
} else if(length(idx2_2017) == 0 ) {
L_all2[i,] = Z2_loc_old[[s_ind]][idx2_2016]
} else{
L_all2[i,] = c(Z2_loc_old[[s_ind]][idx2_2016],Z2_loc[[s_ind]][idx2_2017])
}
}
L_all1 = L_all1[,n_lags1:1]
L_all2 = L_all2[,n_lags2:1]
L_loc1 = lapply(1:ns,function(x){ L_all1[dat$loc_ind == x,] } )
L_time1 = lapply(1:nt,function(x){ L_all1[dat$time_ind == x,] } )
L_loc2 = lapply(1:ns,function(x){ L_all2[dat$loc_ind == x,] } )
L_time2 = lapply(1:nt,function(x){ L_all2[dat$time_ind == x,] } )
S_b1_inv = solve(1e3 * diag(p))
S_b2_inv = solve(1e3 * diag(p))
m_b1 = rep(0,p)
m_b2 = rep(0,p)
Smb1 = S_b1_inv %*% m_b1
Smb2 = S_b2_inv %*% m_b2
S_g1_inv = solve(1e3 * diag(n_lags1))
S_g2_inv = solve(1e3 * diag(n_lags2))
m_g1 = rep(0,n_lags1)
m_g2 = rep(0,n_lags2)
Smg1 = S_g1_inv %*% m_g1
Smg2 = S_g2_inv %*% m_g2
as1 = 1
as2 = 1
bs1 = 1
bs2 = 1
preds1 = matrix(0,ncol = n_hold,nrow = (reps + burn))
preds2 = matrix(0,ncol = n_hold,nrow = (reps + burn))
bet1 = vector(mode= "list",length=(reps+burn)) ; bet1[[1]] = matrix(0,ncol=p,nrow=ns)
bet2 = vector(mode= "list",length=(reps+burn)) ; bet2[[1]] = matrix(0,ncol=p,nrow=ns)
bet10 = matrix(0,ncol = p,nrow= (reps +burn))
bet20 = matrix(0,ncol = p,nrow= (reps +burn))
gam1 = vector(mode= "list",length=(reps+burn)) ; gam1[[1]] = matrix(0,ncol=n_lags1,nrow=ns)
gam2 = vector(mode= "list",length=(reps+burn)) ; gam2[[1]] = matrix(0,ncol=n_lags2,nrow=ns)
Sig_inv_b1 = vector(mode= "list",length=(reps+burn)) ; Sig_inv_b1[[1]] = 1e-3*diag(p)
Sig_inv_b2 = vector(mode= "list",length=(reps+burn)) ; Sig_inv_b2[[1]] = 1e-3*diag(p)
Sig_inv_g1 = vector(mode= "list",length=(reps+burn)) ; Sig_inv_g1[[1]] = 1e-3*diag(n_lags1)
Sig_inv_g2 = vector(mode= "list",length=(reps+burn)) ; Sig_inv_g2[[1]] = 1e-3*diag(n_lags2)
gam10 = matrix(0,ncol = n_lags1,nrow= (reps +burn))
gam20 = matrix(0,ncol = n_lags2,nrow= (reps +burn))
sig21 = numeric(reps + burn) ; sig21[1] = 1
sig22 = numeric(reps + burn) ; sig22[1] = 1
tau21 = numeric(reps + burn) ; tau21[1] = 1
tau22 = numeric(reps + burn) ; tau22[1] = 1
V1 = matrix(0,ncol = ns,nrow= (reps +burn))
V2 = matrix(0,ncol = ns,nrow= (reps +burn))
a12 = numeric(reps + burn)
a11 = rep(1,reps+burn)
m1 = mean(Z1_all,na.rm = TRUE)
m2 = mean(Z2_all,na.rm = TRUE)
for(i in 1:n_hold){
s_idx = hold_dat$loc_ind[i]
t_idx = hold_dat$time_ind[i]
imp1 = m1
Z1_loc[[s_idx]][t_idx] = imp1
Z1_time[[t_idx]][s_idx] = imp1
imp2 = m2
Z2_loc[[s_idx]][t_idx] = imp2
Z2_time[[t_idx]][s_idx] = imp2
n_l1 = length(lag_times1[[i]])
n_l2 = length(lag_times2[[i]])
if(n_l1 > 0 ){
for(j in 1:n_l1){
t_ind = lag_times1[[i]][j]
L_loc1[[s_idx]][t_ind,j] = imp1
L_time1[[t_ind]][s_idx,j] = imp1
}
for(j in 1:n_l2){
t_ind = lag_times2[[i]][j]
L_loc2[[s_idx]][t_ind,j] = imp2
L_time2[[t_ind]][s_idx,j] = imp2
}
}
}
LtL_loc1 = lapply(L_loc1,function(X) t(X) %*% X )
LtL_loc2 = lapply(L_loc2,function(X) t(X) %*% X )
st = proc.time()
for(i in 2:(reps + burn)){
############## Likelihood Variance
sig21[i] = sig21_update(Z1_loc, X_loc,bet1[[i-1]], L_loc1, gam1[[i-1]], a11[i-1],V1[i-1,],as1,bs1,ns,nt)
sig22[i] = sig22_update(Z2_loc, X_loc,bet2[[i-1]], L_loc2,gam2[[i-1]], a12[i-1],V1[i-1,],V2[i-1,],as2,bs2,ns,nt)
############## Update Beta
bet1[[i]] = bet1_update(Z1_loc,X_loc,L_loc1,Sig_inv_b1[[i-1]],bet10[i-1,], gam1[[i-1]],
a11[i-1],V1[i-1,], sig21[i], XtX_loc , p,ns,nt)
bet2[[i]] = bet2_update(Z2_loc,X_loc,L_loc2,Sig_inv_b2[[i-1]] ,bet20[i-1,], gam2[[i-1]],
a12[i-1], V1[i-1,],V2[i-1,],sig22[i],XtX_loc , p,ns,nt)
bet10[i,] = bet10_update(Sig_inv_b1[[i-1]],bet1[[i]],S_b1_inv, Smb1, ns)
bet20[i,] = bet20_update(Sig_inv_b2[[i-1]],bet2[[i]] ,S_b2_inv, Smb2, ns)
Sig_inv_b1[[i]] = Sig10b_update( bet1[[i]], bet10[i,], 1e-3 * diag(p) , p+1 , ns)
Sig_inv_b2[[i]] = Sig20b_update( bet2[[i]], bet20[i,], 1e-3 * diag(p) , p+1 , ns)
############## Update Gamma
gam1[[i]] = gam1_update(Z1_loc,X_loc,L_loc1,Sig_inv_g1[[i-1]] ,gam10[i-1,],
bet1[[i]], a11[i-1], V1[i-1,], sig21[i],LtL_loc1 , n_lags1,ns,nt)
gam2[[i]] = gam2_update(Z2_loc,X_loc,L_loc2,Sig_inv_g2[[i-1]] ,gam20[i-1,],
bet2[[i]], a12[i-1], V1[i-1,], V2[i-1,],sig22[i],LtL_loc2 , n_lags2,ns,nt)
gam10[i,] = gam10_update(Sig_inv_g1[[i-1]],gam1[[i]],S_g1_inv, Smg1, ns)
gam20[i,] = gam20_update(Sig_inv_g2[[i-1]],gam2[[i]],S_g2_inv, Smg2, ns)
Sig_inv_g1[[i]] = Sig10g_update( gam1[[i]], gam10[i,], 1e-3 * diag(n_lags1) , n_lags1 + 1 , ns)
Sig_inv_g2[[i]] = Sig20g_update( gam2[[i]], gam20[i,], 1e-3 * diag(n_lags2) , n_lags2 + 1 , ns)
############## Update V
V1[i,] = V1_update(Z1_time,Z2_time, X_time, L_time1, L_time2, a_11 = a11[i-1], a_12 = a12[i-1],
V2[i-1,], sig21 = sig21[i], sig22 = sig22[i], tau21 = tau21[i-1],bet1[[i]],
bet2[[i]], gam1[[i]],gam2[[i]],Q, nt, ns)
V1[i,] = scale(V1[i,],scale=FALSE)
V2[i,] = V2_update(Z2_time, X_time, L_time2, a_12 = a12[i-1],V1[i,],
sig22 = sig22[i], tau22 = tau22[i-1],bet2[[i]],gam2[[i]],Q, nt, ns)
V2[i,] = scale(V2[i,],scale=FALSE)
tau21[i] = tau21_update(Q, V1[i,], a_t1 = 1, b_t1 = 1, ns)
tau22[i] = tau22_update(Q, V2[i,], a_t2 = 1, b_t2 = 1, ns)
# a12[i] = a12[i-1]
a12[i] = a12_update( Z2_loc, X_loc, L_loc2,bet2[[i]], gam2[[i]], V1[i,], V2[i,],
sig22[i], m = 0, s2 = 1, ns, nt)
############## Impute missing data
for(j in 1:n_hold){
# if( (j == 1) | (s_idx != hold_dat$loc_ind[j]) ){
s_idx = hold_dat$loc_ind[j]
X_temp = X_loc[[s_idx]]
Z1_temp = Z1_loc[[s_idx]]
Z2_temp = Z2_loc[[s_idx]]
L1_temp = L_loc1[[s_idx]]
L2_temp = L_loc2[[s_idx]]
bet1_temp = bet1[[i]][s_idx,]
gam1_temp = gam1[[i]][s_idx,]
bet2_temp = bet2[[i]][s_idx,]
gam2_temp = gam2[[i]][s_idx,]
# }
n_lagsj1 = length(lag_times1[[j]])
n_lagsj2 = length(lag_times2[[j]])
t_idx = hold_dat$time_ind[j]
# imp1 = Z1_update(s_idx,t_idx,Z1_loc,X_loc, L_loc1,bet1[[i]],gam1[[i]],1,
# V1,sig21[i],n_lagsj, lags1)
# imp2 = Z2_update(s_idx,t_idx,Z2_loc,X_loc, L_loc2,bet2[[i]],gam2[[i]],a12[i],
# V1,V2,sig22[i],n_lagsj, lags2)
imp1 = max(0,Z1_update_alt(t_idx,Z1_temp,X_temp, L1_temp,bet1_temp,gam1_temp,1,
V1[s_idx],sig21[i],n_lagsj1, lags1))
imp2 = Z2_update_alt(t_idx,Z2_temp,X_temp, L2_temp,bet2_temp,gam2_temp,a12[i],
V1[s_idx],V2[s_idx],sig22[i],n_lagsj2, lags2)
Z1_loc[[s_idx]][t_idx] = imp1
Z1_time[[t_idx]][s_idx] = imp1
Z2_loc[[s_idx]][t_idx] = imp2
Z2_time[[t_idx]][s_idx] = imp2
# n_l = length(lag_times[[j]])
# if(n_l > 0 ){
# for(k in 1:n_l){
# t_ind = lag_times[[j]][k]
# L_loc1[[s_idx]][t_ind,k] = imp1
# L_time1[[t_ind]][s_idx,k] = imp1
# L_loc2[[s_idx]][t_ind,k] = imp2
# L_time2[[t_ind]][s_idx,k] = imp2
#
# }
# }
n_l1 = length(lag_times1[[j]])
n_l2 = length(lag_times2[[j]])
if(n_l1 > 0 ){
for(k in 1:n_l1){
t_ind = lag_times1[[j]][k]
L_loc1[[s_idx]][t_ind,k] = imp1
L_time1[[t_ind]][s_idx,k] = imp1
}
}
if(n_l2 > 0 ){
for(k in 1:n_l2){
t_ind = lag_times2[[j]][k]
L_loc2[[s_idx]][t_ind,k] = imp2
L_time2[[t_ind]][s_idx,k] = imp2
}
}
preds1[i,j] = imp1
preds2[i,j] = imp2
}
LtL_loc1 = lapply(L_loc1,function(X) t(X) %*% X )
LtL_loc2 = lapply(L_loc2,function(X) t(X) %*% X )
time_its <- (proc.time() - st)[3] / (i)
time_used <- round((proc.time() - st)[3]/(60),digits=4)
time_left <- round(time_its * (reps +burn- i )/(60),digits=4)
cat("\r", i, " of ", reps + burn,"||| Time left: ",floor(time_left/60),
" hours",time_left%%60," minutes")
flush.console()
}
return(list(sig21 = sig21[-(1:burn)], sig22 = sig22[-(1:burn)],bet1 = bet1[-(1:burn)],
bet2 = bet2[-(1:burn)],bet10 = bet10[-(1:burn),],bet20 = bet20[-(1:burn),],
Sig_inv_b1 = Sig_inv_b1[-(1:burn)],Sig_inv_b2 = Sig_inv_b2[-(1:burn)],
gam1 = gam1[-(1:burn)],gam2 = gam2[-(1:burn)],gam10 = gam10[-(1:burn),],
gam20 = gam20[-(1:burn),], Sig_inv_g1 = Sig_inv_g1[-(1:burn)],
Sig_inv_g2 = Sig_inv_g2[-(1:burn)],V1 = V1[-(1:burn),],V2 = V2[-(1:burn),],
tau21 = tau21[-(1:burn)],tau22 = tau22[-(1:burn)],a12 = a12[-(1:burn)] ,
preds1 = preds1[-(1:burn),] , preds2 = preds2[-(1:burn),],hold_dat = hold_dat))
}
|
7d7bb4aa0c9fe06e74f1852782c3936408065259 | 2ba98fc0f29719fbb49b1dcf698011a29ed6cb30 | /Daman.R | c804b1b7ccd05ad80e6037d5d3bdc35be6039848 | [] | no_license | cstawitz/uw_tutorial | 7e7de17cd8a9c578ebac9e67318738e0cf9ef8b5 | 2f730483a08e92bb205f99ec31dded8cfaac6aa0 | refs/heads/master | 2023-03-19T02:53:48.624501 | 2023-03-13T18:27:40 | 2023-03-13T18:27:40 | 61,586,127 | 0 | 0 | null | 2017-01-19T00:47:17 | 2016-06-20T23:01:01 | R | UTF-8 | R | false | false | 11 | r | Daman.R | x <- 2 + 2 |
cbf651145a3bae84229a1ba257295dfcd46d3770 | 1cfed2d6b645f5e4f9593473616cde41af48bffe | /01 Data/Load_Data.R | 528fd908fa958a990d9eafcfc0943f9f89474e18 | [] | no_license | ryudkin0/DV_RProject3 | 4341565e67aaf9eec163e6f5086427c533aebaed | 8af763b8a54ed016c363d2b141c803c64af83bac | refs/heads/master | 2016-09-06T09:21:17.143827 | 2015-03-05T02:40:44 | 2015-03-05T02:40:44 | 31,520,240 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 996 | r | Load_Data.R | MBSDF <- data.frame(fromJSON(getURL(URLencode('129.152.144.84:5001/rest/native/?query="select * from MBS order by id"'),httpheader=c(DB='jdbc:oracle:thin:@129.152.144.84:1521:ORCL', USER='C##cs329e_ry2634', PASS='orcl_ry2634', MODE='native_mode', MODEL='model', returnDimensions = 'False', returnFor = 'JSON'), verbose = TRUE)))
MBS2DF <- data.frame(fromJSON(getURL(URLencode('129.152.144.84:5001/rest/native/?query="select * from MBS2 order by id"'),httpheader=c(DB='jdbc:oracle:thin:@129.152.144.84:1521:ORCL', USER='C##cs329e_ry2634', PASS='orcl_ry2634', MODE='native_mode', MODEL='model', returnDimensions = 'False', returnFor = 'JSON'), verbose = TRUE)))
UNEMPLOYMENTDF <- data.frame(fromJSON(getURL(URLencode('129.152.144.84:5001/rest/native/?query="select * from UNEMPLOYMENT"'),httpheader=c(DB='jdbc:oracle:thin:@129.152.144.84:1521:ORCL', USER='C##cs329e_ry2634', PASS='orcl_ry2634', MODE='native_mode', MODEL='model', returnDimensions = 'False', returnFor = 'JSON'), verbose = TRUE)))
|
1c4b5dcba62edd98376a774af77c2d49c2c81dd3 | b8e469c65a43a79f299a628a37444006769fda7f | /explore_data.R | ca961c1a72d21354ca3b524aaf67d4956e251b46 | [] | no_license | sachiwije/AFL_fitzRoy | 05cdea778cf022b9d94aa829062401e1fe6b01f8 | ced051088b943c96f9bead84a6e41283dd72fb1e | refs/heads/main | 2023-04-01T15:01:23.119734 | 2021-03-25T09:39:31 | 2021-03-25T09:39:31 | 351,365,262 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,751 | r | explore_data.R |
## Challenge to scrape AFLW stats from web and do data viz
## 26 March 2021
## Burnet Coding and Software Club 2021
#setwd("/Users/sachintha/projects/AFL_fitzRoy/")
#install.packages("fitzRoy")
# https://jimmyday12.github.io/fitzRoy/articles/womens-stats.html
#---------------
library(fitzRoy)
library(tidyverse)
#---------------
#---------------
# fetch data
#Wok with player stats
#Lest see what the fetch_player_stats is about
?fetch_player_stats
#Provides Individual Player Statistics for AFL game
#Get stats form 2017 - 2020
# this is not working all_seasons <- fetch_player_stats(season = c(2017,2020), comp = "AFLW")
season_2017 <- fetch_player_stats(season = 2017, comp = "AFLW")
season_2018 <- fetch_player_stats(season = 2018, comp = "AFLW")
season_2019 <- fetch_player_stats(season = 2019, comp = "AFLW")
season_2020 <- fetch_player_stats(season = 2020, comp = "AFLW")
#lets just wok on the 2020 player stat
colnames(season_2020)
#check the names of each round
unique(season_2020$round.name)
#see clubs played home
unique(season_2020$home.team.club.name)
#see clubs played away
unique(season_2020$away.team.club.name)
#See team names of players
unique(season_2020$team.name)
#based on the colnames colum 16 was player 1st name and 17 was last name
#lets combne them for easier analysis
season_2020 <- unite(season_2020, player_full_name, 16:17, remove = FALSE )
View(season_2020)
richmond_2020 <- filter(.data = season_2020,season_2020$team.name=="Richmond")
richmond_2020$shotEfficiency
plot_rich_ef_shot <- richmond_2020 %>%
ggplot(mapping = aes(x = player_full_name, y = metresGained )) +
geom_col() + coord_flip()
plot_rich_ef_shot
ggsave("figures/metersGained_richmond.pdf", plot_rich_ef_shot)
## END
|
b3633822b2351d26107a7d51f516b7e39fa9e5d8 | ce68a85c4a6c5d474a6a574c612df3a8eb6685f7 | /book/packt/R.Object-oriented.Programming/6682OS_03_Codes/chapter3/chapter_3_ex4.R | f6ae06bc3e6aa550fe2d2999529f4a3575249835 | [] | no_license | xenron/sandbox-da-r | c325b63114a1bf17d8849f076bfba22b6bdb34a3 | c217fdddc26ed523b3860e2000afc699afac55a2 | refs/heads/master | 2020-04-06T06:58:17.049181 | 2016-08-24T06:16:32 | 2016-08-24T06:16:32 | 60,466,314 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 63 | r | chapter_3_ex4.R | getwd()
d <- getwd()
d
setwd('/tmp/examples/csv')
getwd()
|
65b9a043ca045eb5c0ba545a64c5f7c440bce264 | 5b44b2990fa14d92e35ac6f53cf0a3e323433e58 | /R/Ontario_PC_MappeR.R | e0b7167fe1169375f2e3c7bd74a63c77febe2fe7 | [] | no_license | benyamindsmith/PostalCodeLocaleMappeR | fe9254a5beabe664d2a3f701d1ddf588100d0815 | 84148fb7fa7de4c5c2bd746fc56c570a255ce56a | refs/heads/master | 2020-11-26T16:10:32.854514 | 2019-12-25T18:18:44 | 2019-12-25T18:18:44 | 229,134,905 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 842 | r | Ontario_PC_MappeR.R |
Ontario_PC_MappeR<-function(addresses){
require(readr)
require(stringr)
##First lets attach the Ontario Postal Codes Data set##
Ontario_ds<-read.csv(url("https://raw.githubusercontent.com/benyamindsmith/PostalCodeLocaleMappeR/master/Ontario%20Postal%20Code%20Dataset.csv?token=ALCCTHSDAGXRODXBPOQHSRC6AJDAS"))
##Now lets get our required functions
get_postal_codes<-function(x){
str_extract_all(x,
"[ABCEGHJKLMNPRSTVXY]\\d[ABCEGHJ-NPRSTV-Z][ ]?\\d[ABCEGHJ-NPRSTV-Z]\\d")
}
##Lets extract the Postal Codes
pc<-get_postal_codes(addresses)
##Get FSAs
fsa<-str_extract_all(pc,"[A-Z][0-9][A-Z]")
fsa<-unlist(fsa)
##Now match
ind<- match(fsa,Ontario_ds$Area.Code)
##Get result
locale<-sapply(ind,function(x) Ontario_ds$Locale[x])
##Print result
locale
}
|
ebc99489f020f093f9e10ea5e607b22c18de1e9f | ea1adeeeb764355ecbc55f08545952e1ac614d66 | /day12-prof.R | 5fc7a4b16888855abe8c97617fdf753475309e11 | [] | no_license | nalsalam/AdventOfCode2019 | 4f48b8db2e8a70a82adc6d70974b42acc94f54a9 | 9390714cb3ed27052fb76f110b872c89f4907f44 | refs/heads/master | 2020-11-24T07:19:20.774225 | 2019-12-23T20:47:21 | 2019-12-23T20:47:21 | 228,026,421 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,243 | r | day12-prof.R | library(tidyverse)
# library(profvis)
position <- c(-1, 2, 4, 3)
velocity <- rep(0, 4)
gravity <- function(position, velocity) {
rowSums(sign(-outer(position, position, FUN = "-")))
}
gravity_fast <- function(position, velocity) {
.rowSums(sign(-outer(position, position, FUN = "-")), 4, 4, na.rm = FALSE)
}
outer_diff_fst <- function(X, Y) {
Y <- rep.int(position, rep.int(4, 4))
X <- rep(position, times = 1)
robj <- `-`(X, Y)
dim(robj) <- c(4, 4)
robj
}
bench::mark(
outer(position, position, FUN = "-"),
outer_diff_fst(position)
) %>% View() # total time 93 vs. 43 ms
gravity_faster <- function(position, velocity) {
.rowSums(sign(-outer_diff_fst(position)), 4, 4, na.rm = FALSE)
}
# gravity_faster(position, velocity)
bench::mark(
gravity(position, velocity),
gravity_fast(position, velocity),
gravity_faster(position, velocity)
) %>% View() # 207, 103, 43 -- Nice!
moons_upd <- function(x, x_v) {
moon_scan %>%
mutate(
x_v = x_v + gravity(x, x_v) #,
# y_v = y_v + gravity(y, y_v),
# z_v = z_v + gravity(z, z_v)
) %>%
mutate(
x = x + x_v # ,
# y = y + y_v,
# z = z + z_v
)
}
moons_upd(position, velocity)
moon_scan <-
tibble(x = NA_real_, y = NA_real_, z = NA_real_) %>%
add_row(x=7, y=10, z=17) %>%
add_row(x=-2, y=7, z=0) %>%
add_row(x=12, y=5, z=12) %>%
add_row(x=5, y=-8, z=6) %>%
slice(-1) %>%
mutate(
x_v = 0, y_v = 0, z_v = 0
)
moon_scan_start <- moon_scan
sim_moons <- function() {
i <- 1
repeat {
moon_scan <-
moon_scan %>%
mutate(
x_v = x_v + gravity(x, x_v),
y_v = y_v + gravity(y, y_v),
z_v = z_v + gravity(z, z_v)
) %>%
mutate(
x = x + x_v,
y = y + y_v,
z = z + z_v
) # %>%
# mutate(
# pot = abs(x) + abs(y) + abs(z),
# kin = abs(x_v) + abs(y_v) + abs(z_v),
# tot = pot * kin
# )
if(all(moon_scan_start == moon_scan) || i >= 1000) {
steps <- i
break()
}
i <- i + 1
}
return(i)
}
moon_scan$x_v <- moon_scan$x_v + gravity(moon_scan$x, moon_scan$x_v)
typeof(mutate)
dplyr:::mutate # not .Primitive
class(dplyr:::mutate)
typeof(all)
all # .Primitive, i.e. C++
|
b0ef6c3e8571841410dbe0339d34020040c18f12 | 83fec32b0e9f9f113f105271adb8d49e062d9909 | /geologyGeometry/tutorials/c/oriMCMCInference.R | ddb379bc2976897a00f54ee4d2643f9e2f7e78ba | [
"Apache-2.0"
] | permissive | nicolasmroberts/InternalStructureMtEdgar_PR2021 | e0982fa25ad8247573649ff780381742ce461163 | 01ebb627aedb2b8f7dab6ce8f3bc53b545c6df4d | refs/heads/master | 2023-03-17T14:18:24.883512 | 2021-03-16T19:59:57 | 2021-03-16T19:59:57 | 348,474,436 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,015 | r | oriMCMCInference.R |
# Copyright 2016 Joshua R. Davis
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
### INTRODUCTION ###
# This tutorial demonstrates Markov chain Monte Carlo (MCMC) simulation for
# orientational data. It is not self-contained. Rather, it is intended to be
# studied immediately after orientation inference tutorial about the western
# Idaho shear zone foliation-lineations. Like all tutorials in tutorialsC,
# this tutorial requires compilation of the C part of our R library.
# Warning: It is not easy for R to stop C code while it is running. Pressing
# the Stop button in RStudio may not immediately stop the program. Eventually
# an interface may appear, giving you the option of killing R entirely. So
# activate a C routine only if you're sure that you want to.
### PRELIMINARY WORK ###
# You are expected to have run the orientation inference tutorial about the
# western Idaho shear zone foliation-lineations immediately before this
# tutorial. That tutorial loads a data set and computes some predictions.
# The new thing here is: Execute the following line of code to load the C part
# of our library.
source("libraryC/all.R")
### CREDIBLE REGION ###
# Remember that the sample size is n = 23 and the Fisher concentration tensor
# K-hat has eigenvalues 33, 11, 0.000003. Based on the numerical experiments
# reported by Davis and Titus (2017), we proceed by Markov chain Monte Carlo
# simulation. The number of MCMC samples collected is 100 * 10,000 = 1,000,000.
wiszMCMC <- oricWrappedTrivariateNormalMCMCInference(wiszData$rotation, group=oriLineInPlaneGroup, numCollection=100)
# Although the MCMC credible region is computed based on all 1,000,000
# samples, only 10,000 of those samples are passed back to us for inspection.
# We're supposed to check that they form a tight, ellipsoidally shaped cloud.
# Yep.
oriEqualAnglePlot(wiszMCMC$ms, group=oriLineInPlaneGroup, simplePoints=TRUE)
rotEqualVolumePlot(wiszMCMC$ms, simplePoints=TRUE)
# Here are those samples in equal-area. The foliation poles are nearly
# horizontal and the lineation directions are nearly vertical.
lineEqualAreaPlot(c(lapply(wiszMCMC$ms, function(r) r[1,]), lapply(wiszMCMC$ms, function(r) r[2,])), shapes=c("."))
# Here's the ellipsoidal 95% credible region, containing the middle 95% of the
# MCMC samples. It is much closer to spherical than the bootstrap confidence
# region is. It is also larger.
rotEllipsoidPlot(wiszMCMC$ms, wiszMCMC$mBar, wiszMCMC$leftCovarInv, wiszMCMC$q095^2, simplePoints=TRUE, numNonAdapt=5)
### HYPOTHESIS TESTS ###
# This plot shows the predicted foliation-lineations missing the 95% credible
# region.
monoPredictions <- oriNearestRepresentatives(monoPredictions, wiszMCMC$mBar, group=oriLineInPlaneGroup)
rotEllipsoidPlot(monoPredictions, wiszMCMC$mBar, wiszMCMC$leftCovarInv, wiszMCMC$q095^2, numNonAdapt=4, simplePoints=TRUE)
# In other words, if we did a hypothesis test with any one of these
# predictions as the hypothesized mean, then that hypothesis would be rejected
# with a p-value less than 0.05. More precisely, here is the range of
# p-values attained. (The particular numbers you see will depend on exactly
# how your MCMC went.)
range(sapply(monoPredictions, wiszMCMC$pvalue))
# So we reject the entire proposed class of deformations, as an explanation
# for these data.
### CONCLUSION ###
# In this problem, MCMC simulation produces results similar to, but not
# identical to, those of the bootstrapping simulation.
|
7fc4a1a215f5562575eb5783e52fecef8602f10d | 63d97198709f3368d1c6d36739442efa699fe61d | /advanced algorithm/round3/k-server-analysis-master/data/tests/case222.rd | 5e8f8e9d51bd1665460c50bd067765172605df65 | [] | no_license | tawlas/master_2_school_projects | f6138d5ade91e924454b93dd8f4902ca5db6fd3c | 03ce4847155432053d7883f3b5c2debe9fbe1f5f | refs/heads/master | 2023-04-16T15:25:09.640859 | 2021-04-21T03:11:04 | 2021-04-21T03:11:04 | 360,009,035 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 64,187 | rd | case222.rd | 36
1 [33, 3, 18] 14 14 14 18 18
2 [33, 3, 19] 1 2 16 2 20
3 [33, 3, 18] 1 2 18 2 22
4 [33, 3, 19] 1 2 20 2 24
5 [33, 3, 18] 1 2 22 2 26
6 [33, 3, 19] 1 2 24 2 28
7 [33, 3, 18] 1 2 26 2 30
8 [33, 3, 19] 1 2 28 2 32
9 [33, 3, 18] 1 2 30 2 34
10 [33, 3, 19] 1 2 32 2 36
11 [33, 3, 18] 1 2 34 2 38
12 [33, 3, 19] 1 2 36 2 40
13 [33, 3, 18] 1 2 38 2 42
14 [19, 3, 18] 14 2 40 2 44
15 [19, 29, 18] 10 10 50 18 62
16 [19, 30, 18] 1 2 52 2 64
17 [19, 29, 18] 1 2 54 2 66
18 [19, 30, 18] 1 2 56 2 68
19 [19, 29, 18] 1 2 58 2 70
20 [19, 30, 18] 1 2 60 2 72
21 [19, 29, 18] 1 2 62 2 74
22 [19, 30, 18] 1 2 64 2 76
23 [19, 29, 18] 1 2 66 2 78
24 [19, 30, 18] 1 2 68 2 80
25 [19, 29, 18] 1 2 70 2 82
26 [19, 30, 18] 1 2 72 2 84
27 [19, 29, 18] 1 2 74 2 86
28 [19, 30, 18] 1 2 76 2 88
29 [19, 29, 18] 1 2 78 2 90
30 [19, 30, 18] 1 2 80 2 92
31 [19, 29, 18] 1 2 82 2 94
32 [19, 30, 18] 1 2 84 2 96
33 [19, 29, 18] 1 2 86 2 98
34 [19, 30, 18] 1 2 88 2 100
35 [29, 30, 18] 10 1 89 1 101
36 [29, 6, 18] 12 24 113 24 125
37 [29, 7, 18] 1 2 115 2 127
38 [29, 6, 18] 1 2 117 2 129
39 [29, 7, 18] 1 2 119 2 131
40 [29, 6, 18] 1 2 121 2 133
41 [29, 7, 18] 1 2 123 2 135
42 [29, 6, 18] 1 2 125 2 137
43 [29, 7, 18] 1 2 127 2 139
44 [29, 6, 18] 1 2 129 2 141
45 [29, 7, 18] 1 2 131 2 143
46 [29, 6, 18] 1 2 133 2 145
47 [29, 7, 18] 1 2 135 2 147
48 [29, 6, 18] 1 2 137 2 149
49 [29, 7, 18] 1 2 139 2 151
50 [29, 6, 18] 1 2 141 2 153
51 [29, 7, 18] 1 2 143 2 155
52 [29, 6, 18] 1 2 145 2 157
53 [29, 7, 18] 1 2 147 2 159
54 [29, 6, 18] 1 2 149 2 161
55 [29, 7, 18] 1 2 151 2 163
56 [29, 6, 18] 1 2 153 2 165
57 [29, 7, 18] 1 2 155 2 167
58 [29, 6, 18] 1 2 157 2 169
59 [29, 6, 7] 11 0 157 2 171
60 [18, 6, 7] 11 14 171 14 185
61 [19, 6, 7] 1 2 173 2 187
62 [18, 6, 7] 1 2 175 2 189
63 [19, 6, 7] 1 2 177 2 191
64 [18, 6, 7] 1 2 179 2 193
65 [19, 6, 7] 1 2 181 2 195
66 [18, 6, 7] 1 2 183 2 197
67 [19, 6, 7] 1 2 185 2 199
68 [18, 6, 7] 1 2 187 2 201
69 [19, 6, 7] 1 2 189 2 203
70 [18, 6, 7] 1 2 191 2 205
71 [19, 6, 7] 1 2 193 2 207
72 [18, 6, 7] 1 2 195 2 209
73 [19, 6, 7] 1 2 197 2 211
74 [18, 6, 7] 1 2 199 2 213
75 [19, 6, 7] 1 2 201 2 215
76 [18, 6, 7] 1 2 203 2 217
77 [19, 6, 7] 1 2 205 2 219
78 [18, 6, 7] 1 2 207 2 221
79 [19, 6, 7] 1 2 209 2 223
80 [18, 6, 7] 1 2 211 2 225
81 [19, 6, 7] 1 2 213 2 227
82 [18, 6, 7] 1 2 215 2 229
83 [19, 6, 7] 1 2 217 2 231
84 [19, 6, 18] 11 0 217 0 231
85 [30, 6, 18] 11 22 239 22 253
86 [31, 6, 18] 1 2 241 2 255
87 [30, 6, 18] 1 2 243 2 257
88 [31, 6, 18] 1 2 245 2 259
89 [30, 6, 18] 1 2 247 2 261
90 [31, 6, 18] 1 2 249 2 263
91 [30, 6, 18] 1 2 251 2 265
92 [31, 6, 18] 1 2 253 2 267
93 [30, 6, 18] 1 2 255 2 269
94 [31, 6, 18] 1 2 257 2 271
95 [30, 6, 18] 1 2 259 2 273
96 [31, 6, 18] 1 2 261 2 275
97 [30, 6, 18] 1 2 263 2 277
98 [31, 6, 18] 1 2 265 2 279
99 [30, 6, 18] 1 2 267 2 281
100 [31, 6, 18] 1 2 269 2 283
101 [30, 6, 18] 1 2 271 2 285
102 [31, 6, 18] 1 2 273 2 287
103 [30, 6, 18] 1 2 275 2 289
104 [31, 6, 18] 1 2 277 2 291
105 [30, 6, 18] 1 2 279 2 293
106 [31, 6, 18] 1 2 281 2 295
107 [30, 6, 18] 1 2 283 2 297
108 [30, 31, 18] 11 0 283 2 299
109 [30, 31, 6] 12 14 297 14 313
110 [30, 31, 7] 1 2 299 2 315
111 [30, 31, 6] 1 2 301 2 317
112 [30, 31, 7] 1 2 303 2 319
113 [30, 31, 6] 1 2 305 2 321
114 [30, 31, 7] 1 2 307 2 323
115 [30, 31, 6] 1 2 309 2 325
116 [30, 31, 7] 1 2 311 2 327
117 [30, 31, 6] 1 2 313 2 329
118 [30, 31, 7] 1 2 315 2 331
119 [30, 31, 6] 1 2 317 2 333
120 [30, 31, 7] 1 2 319 2 335
121 [30, 31, 6] 1 2 321 2 337
122 [30, 31, 7] 1 2 323 2 339
123 [30, 31, 6] 1 2 325 2 341
124 [30, 31, 7] 1 2 327 2 343
125 [30, 31, 6] 1 2 329 2 345
126 [30, 31, 7] 1 2 331 2 347
127 [30, 31, 6] 1 2 333 2 349
128 [30, 31, 7] 1 2 335 2 351
129 [30, 31, 6] 1 2 337 2 353
130 [30, 31, 7] 1 2 339 2 355
131 [30, 6, 7] 11 2 341 2 357
132 [18, 6, 7] 12 22 363 22 379
133 [19, 6, 7] 1 2 365 2 381
134 [18, 6, 7] 1 2 367 2 383
135 [19, 6, 7] 1 2 369 2 385
136 [18, 6, 7] 1 2 371 2 387
137 [19, 6, 7] 1 2 373 2 389
138 [18, 6, 7] 1 2 375 2 391
139 [19, 6, 7] 1 2 377 2 393
140 [18, 6, 7] 1 2 379 2 395
141 [19, 6, 7] 1 2 381 2 397
142 [18, 6, 7] 1 2 383 2 399
143 [19, 6, 7] 1 2 385 2 401
144 [18, 6, 7] 1 2 387 2 403
145 [19, 6, 7] 1 2 389 2 405
146 [18, 6, 7] 1 2 391 2 407
147 [19, 6, 7] 1 2 393 2 409
148 [18, 6, 7] 1 2 395 2 411
149 [19, 6, 7] 1 2 397 2 413
150 [18, 6, 7] 1 2 399 2 415
151 [19, 6, 7] 1 2 401 2 417
152 [18, 6, 7] 1 2 403 2 419
153 [19, 6, 7] 1 2 405 2 421
154 [18, 6, 7] 1 2 407 2 423
155 [19, 6, 7] 1 2 409 2 425
156 [19, 6, 18] 11 0 409 0 425
157 [19, 30, 18] 12 14 423 14 439
158 [19, 31, 18] 1 2 425 2 441
159 [19, 30, 18] 1 2 427 2 443
160 [19, 31, 18] 1 2 429 2 445
161 [19, 30, 18] 1 2 431 2 447
162 [19, 31, 18] 1 2 433 2 449
163 [19, 30, 18] 1 2 435 2 451
164 [19, 31, 18] 1 2 437 2 453
165 [19, 30, 18] 1 2 439 2 455
166 [19, 31, 18] 1 2 441 2 457
167 [19, 30, 18] 1 2 443 2 459
168 [19, 31, 18] 1 2 445 2 461
169 [19, 30, 18] 1 2 447 2 463
170 [19, 31, 18] 1 2 449 2 465
171 [19, 30, 18] 1 2 451 2 467
172 [19, 31, 18] 1 2 453 2 469
173 [19, 30, 18] 1 2 455 2 471
174 [19, 31, 18] 1 2 457 2 473
175 [19, 30, 18] 1 2 459 2 475
176 [19, 31, 18] 1 2 461 2 477
177 [19, 30, 18] 1 2 463 2 479
178 [19, 31, 18] 1 2 465 2 481
179 [30, 31, 18] 11 2 467 2 483
180 [30, 6, 18] 11 22 489 22 505
181 [30, 7, 18] 1 2 491 2 507
182 [30, 6, 18] 1 2 493 2 509
183 [30, 7, 18] 1 2 495 2 511
184 [30, 6, 18] 1 2 497 2 513
185 [30, 7, 18] 1 2 499 2 515
186 [30, 6, 18] 1 2 501 2 517
187 [30, 7, 18] 1 2 503 2 519
188 [30, 6, 18] 1 2 505 2 521
189 [30, 7, 18] 1 2 507 2 523
190 [30, 6, 18] 1 2 509 2 525
191 [30, 7, 18] 1 2 511 2 527
192 [30, 6, 18] 1 2 513 2 529
193 [30, 7, 18] 1 2 515 2 531
194 [30, 6, 18] 1 2 517 2 533
195 [30, 7, 18] 1 2 519 2 535
196 [30, 6, 18] 1 2 521 2 537
197 [30, 7, 18] 1 2 523 2 539
198 [30, 6, 18] 1 2 525 2 541
199 [30, 7, 18] 1 2 527 2 543
200 [30, 6, 18] 1 2 529 2 545
201 [30, 7, 18] 1 2 531 2 547
202 [30, 6, 18] 1 2 533 2 549
203 [30, 6, 7] 11 0 533 2 551
204 [18, 6, 7] 12 14 547 14 565
205 [19, 6, 7] 1 2 549 2 567
206 [18, 6, 7] 1 2 551 2 569
207 [19, 6, 7] 1 2 553 2 571
208 [18, 6, 7] 1 2 555 2 573
209 [19, 6, 7] 1 2 557 2 575
210 [18, 6, 7] 1 2 559 2 577
211 [19, 6, 7] 1 2 561 2 579
212 [18, 6, 7] 1 2 563 2 581
213 [19, 6, 7] 1 2 565 2 583
214 [18, 6, 7] 1 2 567 2 585
215 [19, 6, 7] 1 2 569 2 587
216 [18, 6, 7] 1 2 571 2 589
217 [19, 6, 7] 1 2 573 2 591
218 [18, 6, 7] 1 2 575 2 593
219 [19, 6, 7] 1 2 577 2 595
220 [18, 6, 7] 1 2 579 2 597
221 [19, 6, 7] 1 2 581 2 599
222 [18, 6, 7] 1 2 583 2 601
223 [19, 6, 7] 1 2 585 2 603
224 [18, 6, 7] 1 2 587 2 605
225 [19, 6, 7] 1 2 589 2 607
226 [18, 6, 7] 1 2 591 2 609
227 [19, 6, 7] 1 2 593 2 611
228 [19, 6, 18] 11 0 593 0 611
229 [30, 6, 18] 11 22 615 22 633
230 [31, 6, 18] 1 2 617 2 635
231 [30, 6, 18] 1 2 619 2 637
232 [31, 6, 18] 1 2 621 2 639
233 [30, 6, 18] 1 2 623 2 641
234 [31, 6, 18] 1 2 625 2 643
235 [30, 6, 18] 1 2 627 2 645
236 [31, 6, 18] 1 2 629 2 647
237 [30, 6, 18] 1 2 631 2 649
238 [31, 6, 18] 1 2 633 2 651
239 [30, 6, 18] 1 2 635 2 653
240 [31, 6, 18] 1 2 637 2 655
241 [30, 6, 18] 1 2 639 2 657
242 [31, 6, 18] 1 2 641 2 659
243 [30, 6, 18] 1 2 643 2 661
244 [31, 6, 18] 1 2 645 2 663
245 [30, 6, 18] 1 2 647 2 665
246 [31, 6, 18] 1 2 649 2 667
247 [30, 6, 18] 1 2 651 2 669
248 [31, 6, 18] 1 2 653 2 671
249 [30, 6, 18] 1 2 655 2 673
250 [31, 6, 18] 1 2 657 2 675
251 [30, 6, 18] 1 2 659 2 677
252 [30, 31, 18] 11 0 659 2 679
253 [30, 31, 6] 12 14 673 14 693
254 [30, 31, 7] 1 2 675 2 695
255 [30, 31, 6] 1 2 677 2 697
256 [30, 31, 7] 1 2 679 2 699
257 [30, 31, 6] 1 2 681 2 701
258 [30, 31, 7] 1 2 683 2 703
259 [30, 31, 6] 1 2 685 2 705
260 [30, 31, 7] 1 2 687 2 707
261 [30, 31, 6] 1 2 689 2 709
262 [30, 31, 7] 1 2 691 2 711
263 [30, 31, 6] 1 2 693 2 713
264 [30, 31, 7] 1 2 695 2 715
265 [30, 31, 6] 1 2 697 2 717
266 [30, 31, 7] 1 2 699 2 719
267 [30, 31, 6] 1 2 701 2 721
268 [30, 31, 7] 1 2 703 2 723
269 [30, 31, 6] 1 2 705 2 725
270 [30, 31, 7] 1 2 707 2 727
271 [30, 31, 6] 1 2 709 2 729
272 [30, 31, 7] 1 2 711 2 731
273 [30, 31, 6] 1 2 713 2 733
274 [30, 31, 7] 1 2 715 2 735
275 [30, 6, 7] 11 2 717 2 737
276 [18, 6, 7] 12 22 739 22 759
277 [19, 6, 7] 1 2 741 2 761
278 [18, 6, 7] 1 2 743 2 763
279 [19, 6, 7] 1 2 745 2 765
280 [18, 6, 7] 1 2 747 2 767
281 [19, 6, 7] 1 2 749 2 769
282 [18, 6, 7] 1 2 751 2 771
283 [19, 6, 7] 1 2 753 2 773
284 [18, 6, 7] 1 2 755 2 775
285 [19, 6, 7] 1 2 757 2 777
286 [18, 6, 7] 1 2 759 2 779
287 [19, 6, 7] 1 2 761 2 781
288 [18, 6, 7] 1 2 763 2 783
289 [19, 6, 7] 1 2 765 2 785
290 [18, 6, 7] 1 2 767 2 787
291 [19, 6, 7] 1 2 769 2 789
292 [18, 6, 7] 1 2 771 2 791
293 [19, 6, 7] 1 2 773 2 793
294 [18, 6, 7] 1 2 775 2 795
295 [19, 6, 7] 1 2 777 2 797
296 [18, 6, 7] 1 2 779 2 799
297 [19, 6, 7] 1 2 781 2 801
298 [18, 6, 7] 1 2 783 2 803
299 [19, 6, 7] 1 2 785 2 805
300 [19, 6, 18] 11 0 785 0 805
301 [19, 30, 18] 12 14 799 14 819
302 [19, 31, 18] 1 2 801 2 821
303 [19, 30, 18] 1 2 803 2 823
304 [19, 31, 18] 1 2 805 2 825
305 [19, 30, 18] 1 2 807 2 827
306 [19, 31, 18] 1 2 809 2 829
307 [19, 30, 18] 1 2 811 2 831
308 [19, 31, 18] 1 2 813 2 833
309 [19, 30, 18] 1 2 815 2 835
310 [19, 31, 18] 1 2 817 2 837
311 [19, 30, 18] 1 2 819 2 839
312 [19, 31, 18] 1 2 821 2 841
313 [19, 30, 18] 1 2 823 2 843
314 [19, 31, 18] 1 2 825 2 845
315 [19, 30, 18] 1 2 827 2 847
316 [19, 31, 18] 1 2 829 2 849
317 [19, 30, 18] 1 2 831 2 851
318 [19, 31, 18] 1 2 833 2 853
319 [19, 30, 18] 1 2 835 2 855
320 [19, 31, 18] 1 2 837 2 857
321 [19, 30, 18] 1 2 839 2 859
322 [19, 31, 18] 1 2 841 2 861
323 [30, 31, 18] 11 2 843 2 863
324 [30, 6, 18] 11 22 865 22 885
325 [30, 7, 18] 1 2 867 2 887
326 [30, 6, 18] 1 2 869 2 889
327 [30, 7, 18] 1 2 871 2 891
328 [30, 6, 18] 1 2 873 2 893
329 [30, 7, 18] 1 2 875 2 895
330 [30, 6, 18] 1 2 877 2 897
331 [30, 7, 18] 1 2 879 2 899
332 [30, 6, 18] 1 2 881 2 901
333 [30, 7, 18] 1 2 883 2 903
334 [30, 6, 18] 1 2 885 2 905
335 [30, 7, 18] 1 2 887 2 907
336 [30, 6, 18] 1 2 889 2 909
337 [30, 7, 18] 1 2 891 2 911
338 [30, 6, 18] 1 2 893 2 913
339 [30, 7, 18] 1 2 895 2 915
340 [30, 6, 18] 1 2 897 2 917
341 [30, 7, 18] 1 2 899 2 919
342 [30, 6, 18] 1 2 901 2 921
343 [30, 7, 18] 1 2 903 2 923
344 [30, 6, 18] 1 2 905 2 925
345 [30, 7, 18] 1 2 907 2 927
346 [30, 6, 18] 1 2 909 2 929
347 [30, 6, 7] 11 0 909 2 931
348 [18, 6, 7] 12 14 923 14 945
349 [19, 6, 7] 1 2 925 2 947
350 [18, 6, 7] 1 2 927 2 949
351 [19, 6, 7] 1 2 929 2 951
352 [18, 6, 7] 1 2 931 2 953
353 [19, 6, 7] 1 2 933 2 955
354 [18, 6, 7] 1 2 935 2 957
355 [19, 6, 7] 1 2 937 2 959
356 [18, 6, 7] 1 2 939 2 961
357 [19, 6, 7] 1 2 941 2 963
358 [18, 6, 7] 1 2 943 2 965
359 [19, 6, 7] 1 2 945 2 967
360 [18, 6, 7] 1 2 947 2 969
361 [19, 6, 7] 1 2 949 2 971
362 [18, 6, 7] 1 2 951 2 973
363 [19, 6, 7] 1 2 953 2 975
364 [18, 6, 7] 1 2 955 2 977
365 [19, 6, 7] 1 2 957 2 979
366 [18, 6, 7] 1 2 959 2 981
367 [19, 6, 7] 1 2 961 2 983
368 [18, 6, 7] 1 2 963 2 985
369 [19, 6, 7] 1 2 965 2 987
370 [18, 6, 7] 1 2 967 2 989
371 [19, 6, 7] 1 2 969 2 991
372 [19, 6, 18] 11 0 969 0 991
373 [30, 6, 18] 11 22 991 22 1013
374 [31, 6, 18] 1 2 993 2 1015
375 [30, 6, 18] 1 2 995 2 1017
376 [31, 6, 18] 1 2 997 2 1019
377 [30, 6, 18] 1 2 999 2 1021
378 [31, 6, 18] 1 2 1001 2 1023
379 [30, 6, 18] 1 2 1003 2 1025
380 [31, 6, 18] 1 2 1005 2 1027
381 [30, 6, 18] 1 2 1007 2 1029
382 [31, 6, 18] 1 2 1009 2 1031
383 [30, 6, 18] 1 2 1011 2 1033
384 [31, 6, 18] 1 2 1013 2 1035
385 [30, 6, 18] 1 2 1015 2 1037
386 [31, 6, 18] 1 2 1017 2 1039
387 [30, 6, 18] 1 2 1019 2 1041
388 [31, 6, 18] 1 2 1021 2 1043
389 [30, 6, 18] 1 2 1023 2 1045
390 [31, 6, 18] 1 2 1025 2 1047
391 [30, 6, 18] 1 2 1027 2 1049
392 [31, 6, 18] 1 2 1029 2 1051
393 [30, 6, 18] 1 2 1031 2 1053
394 [31, 6, 18] 1 2 1033 2 1055
395 [30, 6, 18] 1 2 1035 2 1057
396 [30, 31, 18] 11 0 1035 2 1059
397 [30, 31, 6] 12 14 1049 14 1073
398 [30, 31, 7] 1 2 1051 2 1075
399 [30, 31, 6] 1 2 1053 2 1077
400 [30, 31, 7] 1 2 1055 2 1079
401 [30, 31, 6] 1 2 1057 2 1081
402 [30, 31, 7] 1 2 1059 2 1083
403 [30, 31, 6] 1 2 1061 2 1085
404 [30, 31, 7] 1 2 1063 2 1087
405 [30, 31, 6] 1 2 1065 2 1089
406 [30, 31, 7] 1 2 1067 2 1091
407 [30, 31, 6] 1 2 1069 2 1093
408 [30, 31, 7] 1 2 1071 2 1095
409 [30, 31, 6] 1 2 1073 2 1097
410 [30, 31, 7] 1 2 1075 2 1099
411 [30, 31, 6] 1 2 1077 2 1101
412 [30, 31, 7] 1 2 1079 2 1103
413 [30, 31, 6] 1 2 1081 2 1105
414 [30, 31, 7] 1 2 1083 2 1107
415 [30, 31, 6] 1 2 1085 2 1109
416 [30, 31, 7] 1 2 1087 2 1111
417 [30, 31, 6] 1 2 1089 2 1113
418 [30, 31, 7] 1 2 1091 2 1115
419 [30, 6, 7] 11 2 1093 2 1117
420 [18, 6, 7] 12 22 1115 22 1139
421 [19, 6, 7] 1 2 1117 2 1141
422 [18, 6, 7] 1 2 1119 2 1143
423 [19, 6, 7] 1 2 1121 2 1145
424 [18, 6, 7] 1 2 1123 2 1147
425 [19, 6, 7] 1 2 1125 2 1149
426 [18, 6, 7] 1 2 1127 2 1151
427 [19, 6, 7] 1 2 1129 2 1153
428 [18, 6, 7] 1 2 1131 2 1155
429 [19, 6, 7] 1 2 1133 2 1157
430 [18, 6, 7] 1 2 1135 2 1159
431 [19, 6, 7] 1 2 1137 2 1161
432 [18, 6, 7] 1 2 1139 2 1163
433 [19, 6, 7] 1 2 1141 2 1165
434 [18, 6, 7] 1 2 1143 2 1167
435 [19, 6, 7] 1 2 1145 2 1169
436 [18, 6, 7] 1 2 1147 2 1171
437 [19, 6, 7] 1 2 1149 2 1173
438 [18, 6, 7] 1 2 1151 2 1175
439 [19, 6, 7] 1 2 1153 2 1177
440 [18, 6, 7] 1 2 1155 2 1179
441 [19, 6, 7] 1 2 1157 2 1181
442 [18, 6, 7] 1 2 1159 2 1183
443 [19, 6, 7] 1 2 1161 2 1185
444 [19, 6, 18] 11 0 1161 0 1185
445 [19, 30, 18] 12 14 1175 14 1199
446 [19, 31, 18] 1 2 1177 2 1201
447 [19, 30, 18] 1 2 1179 2 1203
448 [19, 31, 18] 1 2 1181 2 1205
449 [19, 30, 18] 1 2 1183 2 1207
450 [19, 31, 18] 1 2 1185 2 1209
451 [19, 30, 18] 1 2 1187 2 1211
452 [19, 31, 18] 1 2 1189 2 1213
453 [19, 30, 18] 1 2 1191 2 1215
454 [19, 31, 18] 1 2 1193 2 1217
455 [19, 30, 18] 1 2 1195 2 1219
456 [19, 31, 18] 1 2 1197 2 1221
457 [19, 30, 18] 1 2 1199 2 1223
458 [19, 31, 18] 1 2 1201 2 1225
459 [19, 30, 18] 1 2 1203 2 1227
460 [19, 31, 18] 1 2 1205 2 1229
461 [19, 30, 18] 1 2 1207 2 1231
462 [19, 31, 18] 1 2 1209 2 1233
463 [19, 30, 18] 1 2 1211 2 1235
464 [19, 31, 18] 1 2 1213 2 1237
465 [19, 30, 18] 1 2 1215 2 1239
466 [19, 31, 18] 1 2 1217 2 1241
467 [30, 31, 18] 11 2 1219 2 1243
468 [30, 6, 18] 11 22 1241 22 1265
469 [30, 7, 18] 1 2 1243 2 1267
470 [30, 6, 18] 1 2 1245 2 1269
471 [30, 7, 18] 1 2 1247 2 1271
472 [30, 6, 18] 1 2 1249 2 1273
473 [30, 7, 18] 1 2 1251 2 1275
474 [30, 6, 18] 1 2 1253 2 1277
475 [30, 7, 18] 1 2 1255 2 1279
476 [30, 6, 18] 1 2 1257 2 1281
477 [30, 7, 18] 1 2 1259 2 1283
478 [30, 6, 18] 1 2 1261 2 1285
479 [30, 7, 18] 1 2 1263 2 1287
480 [30, 6, 18] 1 2 1265 2 1289
481 [30, 7, 18] 1 2 1267 2 1291
482 [30, 6, 18] 1 2 1269 2 1293
483 [30, 7, 18] 1 2 1271 2 1295
484 [30, 6, 18] 1 2 1273 2 1297
485 [30, 7, 18] 1 2 1275 2 1299
486 [30, 6, 18] 1 2 1277 2 1301
487 [30, 7, 18] 1 2 1279 2 1303
488 [30, 6, 18] 1 2 1281 2 1305
489 [30, 7, 18] 1 2 1283 2 1307
490 [30, 6, 18] 1 2 1285 2 1309
491 [30, 6, 7] 11 0 1285 2 1311
492 [18, 6, 7] 12 14 1299 14 1325
493 [19, 6, 7] 1 2 1301 2 1327
494 [18, 6, 7] 1 2 1303 2 1329
495 [19, 6, 7] 1 2 1305 2 1331
496 [18, 6, 7] 1 2 1307 2 1333
497 [19, 6, 7] 1 2 1309 2 1335
498 [18, 6, 7] 1 2 1311 2 1337
499 [19, 6, 7] 1 2 1313 2 1339
500 [18, 6, 7] 1 2 1315 2 1341
501 [19, 6, 7] 1 2 1317 2 1343
502 [18, 6, 7] 1 2 1319 2 1345
503 [19, 6, 7] 1 2 1321 2 1347
504 [18, 6, 7] 1 2 1323 2 1349
505 [19, 6, 7] 1 2 1325 2 1351
506 [18, 6, 7] 1 2 1327 2 1353
507 [19, 6, 7] 1 2 1329 2 1355
508 [18, 6, 7] 1 2 1331 2 1357
509 [19, 6, 7] 1 2 1333 2 1359
510 [18, 6, 7] 1 2 1335 2 1361
511 [19, 6, 7] 1 2 1337 2 1363
512 [18, 6, 7] 1 2 1339 2 1365
513 [19, 6, 7] 1 2 1341 2 1367
514 [18, 6, 7] 1 2 1343 2 1369
515 [19, 6, 7] 1 2 1345 2 1371
516 [19, 6, 18] 11 0 1345 0 1371
517 [30, 6, 18] 11 22 1367 22 1393
518 [31, 6, 18] 1 2 1369 2 1395
519 [30, 6, 18] 1 2 1371 2 1397
520 [31, 6, 18] 1 2 1373 2 1399
521 [30, 6, 18] 1 2 1375 2 1401
522 [31, 6, 18] 1 2 1377 2 1403
523 [30, 6, 18] 1 2 1379 2 1405
524 [31, 6, 18] 1 2 1381 2 1407
525 [30, 6, 18] 1 2 1383 2 1409
526 [31, 6, 18] 1 2 1385 2 1411
527 [30, 6, 18] 1 2 1387 2 1413
528 [31, 6, 18] 1 2 1389 2 1415
529 [30, 6, 18] 1 2 1391 2 1417
530 [31, 6, 18] 1 2 1393 2 1419
531 [30, 6, 18] 1 2 1395 2 1421
532 [31, 6, 18] 1 2 1397 2 1423
533 [30, 6, 18] 1 2 1399 2 1425
534 [31, 6, 18] 1 2 1401 2 1427
535 [30, 6, 18] 1 2 1403 2 1429
536 [31, 6, 18] 1 2 1405 2 1431
537 [30, 6, 18] 1 2 1407 2 1433
538 [31, 6, 18] 1 2 1409 2 1435
539 [30, 6, 18] 1 2 1411 2 1437
540 [30, 31, 18] 11 0 1411 2 1439
541 [30, 31, 6] 12 14 1425 14 1453
542 [30, 31, 7] 1 2 1427 2 1455
543 [30, 31, 6] 1 2 1429 2 1457
544 [30, 31, 7] 1 2 1431 2 1459
545 [30, 31, 6] 1 2 1433 2 1461
546 [30, 31, 7] 1 2 1435 2 1463
547 [30, 31, 6] 1 2 1437 2 1465
548 [30, 31, 7] 1 2 1439 2 1467
549 [30, 31, 6] 1 2 1441 2 1469
550 [30, 31, 7] 1 2 1443 2 1471
551 [30, 31, 6] 1 2 1445 2 1473
552 [30, 31, 7] 1 2 1447 2 1475
553 [30, 31, 6] 1 2 1449 2 1477
554 [30, 31, 7] 1 2 1451 2 1479
555 [30, 31, 6] 1 2 1453 2 1481
556 [30, 31, 7] 1 2 1455 2 1483
557 [30, 31, 6] 1 2 1457 2 1485
558 [30, 31, 7] 1 2 1459 2 1487
559 [30, 31, 6] 1 2 1461 2 1489
560 [30, 31, 7] 1 2 1463 2 1491
561 [30, 31, 6] 1 2 1465 2 1493
562 [30, 31, 7] 1 2 1467 2 1495
563 [30, 6, 7] 11 2 1469 2 1497
564 [18, 6, 7] 12 22 1491 22 1519
565 [19, 6, 7] 1 2 1493 2 1521
566 [18, 6, 7] 1 2 1495 2 1523
567 [19, 6, 7] 1 2 1497 2 1525
568 [18, 6, 7] 1 2 1499 2 1527
569 [19, 6, 7] 1 2 1501 2 1529
570 [18, 6, 7] 1 2 1503 2 1531
571 [19, 6, 7] 1 2 1505 2 1533
572 [18, 6, 7] 1 2 1507 2 1535
573 [19, 6, 7] 1 2 1509 2 1537
574 [18, 6, 7] 1 2 1511 2 1539
575 [19, 6, 7] 1 2 1513 2 1541
576 [18, 6, 7] 1 2 1515 2 1543
577 [19, 6, 7] 1 2 1517 2 1545
578 [18, 6, 7] 1 2 1519 2 1547
579 [19, 6, 7] 1 2 1521 2 1549
580 [18, 6, 7] 1 2 1523 2 1551
581 [19, 6, 7] 1 2 1525 2 1553
582 [18, 6, 7] 1 2 1527 2 1555
583 [19, 6, 7] 1 2 1529 2 1557
584 [18, 6, 7] 1 2 1531 2 1559
585 [19, 6, 7] 1 2 1533 2 1561
586 [18, 6, 7] 1 2 1535 2 1563
587 [19, 6, 7] 1 2 1537 2 1565
588 [19, 6, 18] 11 0 1537 0 1565
589 [19, 30, 18] 12 14 1551 14 1579
590 [19, 31, 18] 1 2 1553 2 1581
591 [19, 30, 18] 1 2 1555 2 1583
592 [19, 31, 18] 1 2 1557 2 1585
593 [19, 30, 18] 1 2 1559 2 1587
594 [19, 31, 18] 1 2 1561 2 1589
595 [19, 30, 18] 1 2 1563 2 1591
596 [19, 31, 18] 1 2 1565 2 1593
597 [19, 30, 18] 1 2 1567 2 1595
598 [19, 31, 18] 1 2 1569 2 1597
599 [19, 30, 18] 1 2 1571 2 1599
600 [19, 31, 18] 1 2 1573 2 1601
601 [19, 30, 18] 1 2 1575 2 1603
602 [19, 31, 18] 1 2 1577 2 1605
603 [19, 30, 18] 1 2 1579 2 1607
604 [19, 31, 18] 1 2 1581 2 1609
605 [19, 30, 18] 1 2 1583 2 1611
606 [19, 31, 18] 1 2 1585 2 1613
607 [19, 30, 18] 1 2 1587 2 1615
608 [19, 31, 18] 1 2 1589 2 1617
609 [19, 30, 18] 1 2 1591 2 1619
610 [19, 31, 18] 1 2 1593 2 1621
611 [30, 31, 18] 11 2 1595 2 1623
612 [30, 6, 18] 11 22 1617 22 1645
613 [30, 7, 18] 1 2 1619 2 1647
614 [30, 6, 18] 1 2 1621 2 1649
615 [30, 7, 18] 1 2 1623 2 1651
616 [30, 6, 18] 1 2 1625 2 1653
617 [30, 7, 18] 1 2 1627 2 1655
618 [30, 6, 18] 1 2 1629 2 1657
619 [30, 7, 18] 1 2 1631 2 1659
620 [30, 6, 18] 1 2 1633 2 1661
621 [30, 7, 18] 1 2 1635 2 1663
622 [30, 6, 18] 1 2 1637 2 1665
623 [30, 7, 18] 1 2 1639 2 1667
624 [30, 6, 18] 1 2 1641 2 1669
625 [30, 7, 18] 1 2 1643 2 1671
626 [30, 6, 18] 1 2 1645 2 1673
627 [30, 7, 18] 1 2 1647 2 1675
628 [30, 6, 18] 1 2 1649 2 1677
629 [30, 7, 18] 1 2 1651 2 1679
630 [30, 6, 18] 1 2 1653 2 1681
631 [30, 7, 18] 1 2 1655 2 1683
632 [30, 6, 18] 1 2 1657 2 1685
633 [30, 7, 18] 1 2 1659 2 1687
634 [30, 6, 18] 1 2 1661 2 1689
635 [30, 6, 7] 11 0 1661 2 1691
636 [18, 6, 7] 12 14 1675 14 1705
637 [19, 6, 7] 1 2 1677 2 1707
638 [18, 6, 7] 1 2 1679 2 1709
639 [19, 6, 7] 1 2 1681 2 1711
640 [18, 6, 7] 1 2 1683 2 1713
641 [19, 6, 7] 1 2 1685 2 1715
642 [18, 6, 7] 1 2 1687 2 1717
643 [19, 6, 7] 1 2 1689 2 1719
644 [18, 6, 7] 1 2 1691 2 1721
645 [19, 6, 7] 1 2 1693 2 1723
646 [18, 6, 7] 1 2 1695 2 1725
647 [19, 6, 7] 1 2 1697 2 1727
648 [18, 6, 7] 1 2 1699 2 1729
649 [19, 6, 7] 1 2 1701 2 1731
650 [18, 6, 7] 1 2 1703 2 1733
651 [19, 6, 7] 1 2 1705 2 1735
652 [18, 6, 7] 1 2 1707 2 1737
653 [19, 6, 7] 1 2 1709 2 1739
654 [18, 6, 7] 1 2 1711 2 1741
655 [19, 6, 7] 1 2 1713 2 1743
656 [18, 6, 7] 1 2 1715 2 1745
657 [19, 6, 7] 1 2 1717 2 1747
658 [18, 6, 7] 1 2 1719 2 1749
659 [19, 6, 7] 1 2 1721 2 1751
660 [19, 6, 18] 11 0 1721 0 1751
661 [30, 6, 18] 11 22 1743 22 1773
662 [31, 6, 18] 1 2 1745 2 1775
663 [30, 6, 18] 1 2 1747 2 1777
664 [31, 6, 18] 1 2 1749 2 1779
665 [30, 6, 18] 1 2 1751 2 1781
666 [31, 6, 18] 1 2 1753 2 1783
667 [30, 6, 18] 1 2 1755 2 1785
668 [31, 6, 18] 1 2 1757 2 1787
669 [30, 6, 18] 1 2 1759 2 1789
670 [31, 6, 18] 1 2 1761 2 1791
671 [30, 6, 18] 1 2 1763 2 1793
672 [31, 6, 18] 1 2 1765 2 1795
673 [30, 6, 18] 1 2 1767 2 1797
674 [31, 6, 18] 1 2 1769 2 1799
675 [30, 6, 18] 1 2 1771 2 1801
676 [31, 6, 18] 1 2 1773 2 1803
677 [30, 6, 18] 1 2 1775 2 1805
678 [31, 6, 18] 1 2 1777 2 1807
679 [30, 6, 18] 1 2 1779 2 1809
680 [31, 6, 18] 1 2 1781 2 1811
681 [30, 6, 18] 1 2 1783 2 1813
682 [31, 6, 18] 1 2 1785 2 1815
683 [30, 6, 18] 1 2 1787 2 1817
684 [30, 31, 18] 11 0 1787 2 1819
685 [30, 31, 6] 12 14 1801 14 1833
686 [30, 31, 7] 1 2 1803 2 1835
687 [30, 31, 6] 1 2 1805 2 1837
688 [30, 31, 7] 1 2 1807 2 1839
689 [30, 31, 6] 1 2 1809 2 1841
690 [30, 31, 7] 1 2 1811 2 1843
691 [30, 31, 6] 1 2 1813 2 1845
692 [30, 31, 7] 1 2 1815 2 1847
693 [30, 31, 6] 1 2 1817 2 1849
694 [30, 31, 7] 1 2 1819 2 1851
695 [30, 31, 6] 1 2 1821 2 1853
696 [30, 31, 7] 1 2 1823 2 1855
697 [30, 31, 6] 1 2 1825 2 1857
698 [30, 31, 7] 1 2 1827 2 1859
699 [30, 31, 6] 1 2 1829 2 1861
700 [30, 31, 7] 1 2 1831 2 1863
701 [30, 31, 6] 1 2 1833 2 1865
702 [30, 31, 7] 1 2 1835 2 1867
703 [30, 31, 6] 1 2 1837 2 1869
704 [30, 31, 7] 1 2 1839 2 1871
705 [30, 31, 6] 1 2 1841 2 1873
706 [30, 31, 7] 1 2 1843 2 1875
707 [30, 6, 7] 11 2 1845 2 1877
708 [18, 6, 7] 12 22 1867 22 1899
709 [19, 6, 7] 1 2 1869 2 1901
710 [18, 6, 7] 1 2 1871 2 1903
711 [19, 6, 7] 1 2 1873 2 1905
712 [18, 6, 7] 1 2 1875 2 1907
713 [19, 6, 7] 1 2 1877 2 1909
714 [18, 6, 7] 1 2 1879 2 1911
715 [19, 6, 7] 1 2 1881 2 1913
716 [18, 6, 7] 1 2 1883 2 1915
717 [19, 6, 7] 1 2 1885 2 1917
718 [18, 6, 7] 1 2 1887 2 1919
719 [19, 6, 7] 1 2 1889 2 1921
720 [18, 6, 7] 1 2 1891 2 1923
721 [19, 6, 7] 1 2 1893 2 1925
722 [18, 6, 7] 1 2 1895 2 1927
723 [19, 6, 7] 1 2 1897 2 1929
724 [18, 6, 7] 1 2 1899 2 1931
725 [19, 6, 7] 1 2 1901 2 1933
726 [18, 6, 7] 1 2 1903 2 1935
727 [19, 6, 7] 1 2 1905 2 1937
728 [18, 6, 7] 1 2 1907 2 1939
729 [19, 6, 7] 1 2 1909 2 1941
730 [18, 6, 7] 1 2 1911 2 1943
731 [19, 6, 7] 1 2 1913 2 1945
732 [19, 6, 18] 11 0 1913 0 1945
733 [19, 30, 18] 12 14 1927 14 1959
734 [19, 31, 18] 1 2 1929 2 1961
735 [19, 30, 18] 1 2 1931 2 1963
736 [19, 31, 18] 1 2 1933 2 1965
737 [19, 30, 18] 1 2 1935 2 1967
738 [19, 31, 18] 1 2 1937 2 1969
739 [19, 30, 18] 1 2 1939 2 1971
740 [19, 31, 18] 1 2 1941 2 1973
741 [19, 30, 18] 1 2 1943 2 1975
742 [19, 31, 18] 1 2 1945 2 1977
743 [19, 30, 18] 1 2 1947 2 1979
744 [19, 31, 18] 1 2 1949 2 1981
745 [19, 30, 18] 1 2 1951 2 1983
746 [19, 31, 18] 1 2 1953 2 1985
747 [19, 30, 18] 1 2 1955 2 1987
748 [19, 31, 18] 1 2 1957 2 1989
749 [19, 30, 18] 1 2 1959 2 1991
750 [19, 31, 18] 1 2 1961 2 1993
751 [19, 30, 18] 1 2 1963 2 1995
752 [19, 31, 18] 1 2 1965 2 1997
753 [19, 30, 18] 1 2 1967 2 1999
754 [19, 31, 18] 1 2 1969 2 2001
755 [30, 31, 18] 11 2 1971 2 2003
756 [30, 6, 18] 11 22 1993 22 2025
757 [30, 7, 18] 1 2 1995 2 2027
758 [30, 6, 18] 1 2 1997 2 2029
759 [30, 7, 18] 1 2 1999 2 2031
760 [30, 6, 18] 1 2 2001 2 2033
761 [30, 7, 18] 1 2 2003 2 2035
762 [30, 6, 18] 1 2 2005 2 2037
763 [30, 7, 18] 1 2 2007 2 2039
764 [30, 6, 18] 1 2 2009 2 2041
765 [30, 7, 18] 1 2 2011 2 2043
766 [30, 6, 18] 1 2 2013 2 2045
767 [30, 7, 18] 1 2 2015 2 2047
768 [30, 6, 18] 1 2 2017 2 2049
769 [30, 7, 18] 1 2 2019 2 2051
770 [30, 6, 18] 1 2 2021 2 2053
771 [30, 7, 18] 1 2 2023 2 2055
772 [30, 6, 18] 1 2 2025 2 2057
773 [30, 7, 18] 1 2 2027 2 2059
774 [30, 6, 18] 1 2 2029 2 2061
775 [30, 7, 18] 1 2 2031 2 2063
776 [30, 6, 18] 1 2 2033 2 2065
777 [30, 7, 18] 1 2 2035 2 2067
778 [30, 6, 18] 1 2 2037 2 2069
779 [30, 6, 7] 11 0 2037 2 2071
780 [18, 6, 7] 12 14 2051 14 2085
781 [19, 6, 7] 1 2 2053 2 2087
782 [18, 6, 7] 1 2 2055 2 2089
783 [19, 6, 7] 1 2 2057 2 2091
784 [18, 6, 7] 1 2 2059 2 2093
785 [19, 6, 7] 1 2 2061 2 2095
786 [18, 6, 7] 1 2 2063 2 2097
787 [19, 6, 7] 1 2 2065 2 2099
788 [18, 6, 7] 1 2 2067 2 2101
789 [19, 6, 7] 1 2 2069 2 2103
790 [18, 6, 7] 1 2 2071 2 2105
791 [19, 6, 7] 1 2 2073 2 2107
792 [18, 6, 7] 1 2 2075 2 2109
793 [19, 6, 7] 1 2 2077 2 2111
794 [18, 6, 7] 1 2 2079 2 2113
795 [19, 6, 7] 1 2 2081 2 2115
796 [18, 6, 7] 1 2 2083 2 2117
797 [19, 6, 7] 1 2 2085 2 2119
798 [18, 6, 7] 1 2 2087 2 2121
799 [19, 6, 7] 1 2 2089 2 2123
800 [18, 6, 7] 1 2 2091 2 2125
801 [19, 6, 7] 1 2 2093 2 2127
802 [18, 6, 7] 1 2 2095 2 2129
803 [19, 6, 7] 1 2 2097 2 2131
804 [19, 6, 18] 11 0 2097 0 2131
805 [30, 6, 18] 11 22 2119 22 2153
806 [31, 6, 18] 1 2 2121 2 2155
807 [30, 6, 18] 1 2 2123 2 2157
808 [31, 6, 18] 1 2 2125 2 2159
809 [30, 6, 18] 1 2 2127 2 2161
810 [31, 6, 18] 1 2 2129 2 2163
811 [30, 6, 18] 1 2 2131 2 2165
812 [31, 6, 18] 1 2 2133 2 2167
813 [30, 6, 18] 1 2 2135 2 2169
814 [31, 6, 18] 1 2 2137 2 2171
815 [30, 6, 18] 1 2 2139 2 2173
816 [31, 6, 18] 1 2 2141 2 2175
817 [30, 6, 18] 1 2 2143 2 2177
818 [31, 6, 18] 1 2 2145 2 2179
819 [30, 6, 18] 1 2 2147 2 2181
820 [31, 6, 18] 1 2 2149 2 2183
821 [30, 6, 18] 1 2 2151 2 2185
822 [31, 6, 18] 1 2 2153 2 2187
823 [30, 6, 18] 1 2 2155 2 2189
824 [31, 6, 18] 1 2 2157 2 2191
825 [30, 6, 18] 1 2 2159 2 2193
826 [31, 6, 18] 1 2 2161 2 2195
827 [30, 6, 18] 1 2 2163 2 2197
828 [30, 31, 18] 11 0 2163 2 2199
829 [30, 31, 6] 12 14 2177 14 2213
830 [30, 31, 7] 1 2 2179 2 2215
831 [30, 31, 6] 1 2 2181 2 2217
832 [30, 31, 7] 1 2 2183 2 2219
833 [30, 31, 6] 1 2 2185 2 2221
834 [30, 31, 7] 1 2 2187 2 2223
835 [30, 31, 6] 1 2 2189 2 2225
836 [30, 31, 7] 1 2 2191 2 2227
837 [30, 31, 6] 1 2 2193 2 2229
838 [30, 31, 7] 1 2 2195 2 2231
839 [30, 31, 6] 1 2 2197 2 2233
840 [30, 31, 7] 1 2 2199 2 2235
841 [30, 31, 6] 1 2 2201 2 2237
842 [30, 31, 7] 1 2 2203 2 2239
843 [30, 31, 6] 1 2 2205 2 2241
844 [30, 31, 7] 1 2 2207 2 2243
845 [30, 31, 6] 1 2 2209 2 2245
846 [30, 31, 7] 1 2 2211 2 2247
847 [30, 31, 6] 1 2 2213 2 2249
848 [30, 31, 7] 1 2 2215 2 2251
849 [30, 31, 6] 1 2 2217 2 2253
850 [30, 31, 7] 1 2 2219 2 2255
851 [30, 6, 7] 11 2 2221 2 2257
852 [18, 6, 7] 12 22 2243 22 2279
853 [19, 6, 7] 1 2 2245 2 2281
854 [18, 6, 7] 1 2 2247 2 2283
855 [19, 6, 7] 1 2 2249 2 2285
856 [18, 6, 7] 1 2 2251 2 2287
857 [19, 6, 7] 1 2 2253 2 2289
858 [18, 6, 7] 1 2 2255 2 2291
859 [19, 6, 7] 1 2 2257 2 2293
860 [18, 6, 7] 1 2 2259 2 2295
861 [19, 6, 7] 1 2 2261 2 2297
862 [18, 6, 7] 1 2 2263 2 2299
863 [19, 6, 7] 1 2 2265 2 2301
864 [18, 6, 7] 1 2 2267 2 2303
865 [19, 6, 7] 1 2 2269 2 2305
866 [18, 6, 7] 1 2 2271 2 2307
867 [19, 6, 7] 1 2 2273 2 2309
868 [18, 6, 7] 1 2 2275 2 2311
869 [19, 6, 7] 1 2 2277 2 2313
870 [18, 6, 7] 1 2 2279 2 2315
871 [19, 6, 7] 1 2 2281 2 2317
872 [18, 6, 7] 1 2 2283 2 2319
873 [19, 6, 7] 1 2 2285 2 2321
874 [18, 6, 7] 1 2 2287 2 2323
875 [19, 6, 7] 1 2 2289 2 2325
876 [19, 6, 18] 11 0 2289 0 2325
877 [19, 30, 18] 12 14 2303 14 2339
878 [19, 31, 18] 1 2 2305 2 2341
879 [19, 30, 18] 1 2 2307 2 2343
880 [19, 31, 18] 1 2 2309 2 2345
881 [19, 30, 18] 1 2 2311 2 2347
882 [19, 31, 18] 1 2 2313 2 2349
883 [19, 30, 18] 1 2 2315 2 2351
884 [19, 31, 18] 1 2 2317 2 2353
885 [19, 30, 18] 1 2 2319 2 2355
886 [19, 31, 18] 1 2 2321 2 2357
887 [19, 30, 18] 1 2 2323 2 2359
888 [19, 31, 18] 1 2 2325 2 2361
889 [19, 30, 18] 1 2 2327 2 2363
890 [19, 31, 18] 1 2 2329 2 2365
891 [19, 30, 18] 1 2 2331 2 2367
892 [19, 31, 18] 1 2 2333 2 2369
893 [19, 30, 18] 1 2 2335 2 2371
894 [19, 31, 18] 1 2 2337 2 2373
895 [19, 30, 18] 1 2 2339 2 2375
896 [19, 31, 18] 1 2 2341 2 2377
897 [19, 30, 18] 1 2 2343 2 2379
898 [19, 31, 18] 1 2 2345 2 2381
899 [30, 31, 18] 11 2 2347 2 2383
900 [30, 6, 18] 11 22 2369 22 2405
901 [30, 7, 18] 1 2 2371 2 2407
902 [30, 6, 18] 1 2 2373 2 2409
903 [30, 7, 18] 1 2 2375 2 2411
904 [30, 6, 18] 1 2 2377 2 2413
905 [30, 7, 18] 1 2 2379 2 2415
906 [30, 6, 18] 1 2 2381 2 2417
907 [30, 7, 18] 1 2 2383 2 2419
908 [30, 6, 18] 1 2 2385 2 2421
909 [30, 7, 18] 1 2 2387 2 2423
910 [30, 6, 18] 1 2 2389 2 2425
911 [30, 7, 18] 1 2 2391 2 2427
912 [30, 6, 18] 1 2 2393 2 2429
913 [30, 7, 18] 1 2 2395 2 2431
914 [30, 6, 18] 1 2 2397 2 2433
915 [30, 7, 18] 1 2 2399 2 2435
916 [30, 6, 18] 1 2 2401 2 2437
917 [30, 7, 18] 1 2 2403 2 2439
918 [30, 6, 18] 1 2 2405 2 2441
919 [30, 7, 18] 1 2 2407 2 2443
920 [30, 6, 18] 1 2 2409 2 2445
921 [30, 7, 18] 1 2 2411 2 2447
922 [30, 6, 18] 1 2 2413 2 2449
923 [30, 6, 7] 11 0 2413 2 2451
924 [18, 6, 7] 12 14 2427 14 2465
925 [19, 6, 7] 1 2 2429 2 2467
926 [18, 6, 7] 1 2 2431 2 2469
927 [19, 6, 7] 1 2 2433 2 2471
928 [18, 6, 7] 1 2 2435 2 2473
929 [19, 6, 7] 1 2 2437 2 2475
930 [18, 6, 7] 1 2 2439 2 2477
931 [19, 6, 7] 1 2 2441 2 2479
932 [18, 6, 7] 1 2 2443 2 2481
933 [19, 6, 7] 1 2 2445 2 2483
934 [18, 6, 7] 1 2 2447 2 2485
935 [19, 6, 7] 1 2 2449 2 2487
936 [18, 6, 7] 1 2 2451 2 2489
937 [19, 6, 7] 1 2 2453 2 2491
938 [18, 6, 7] 1 2 2455 2 2493
939 [19, 6, 7] 1 2 2457 2 2495
940 [18, 6, 7] 1 2 2459 2 2497
941 [19, 6, 7] 1 2 2461 2 2499
942 [18, 6, 7] 1 2 2463 2 2501
943 [19, 6, 7] 1 2 2465 2 2503
944 [18, 6, 7] 1 2 2467 2 2505
945 [19, 6, 7] 1 2 2469 2 2507
946 [18, 6, 7] 1 2 2471 2 2509
947 [19, 6, 7] 1 2 2473 2 2511
948 [19, 6, 18] 11 0 2473 0 2511
949 [30, 6, 18] 11 22 2495 22 2533
950 [31, 6, 18] 1 2 2497 2 2535
951 [30, 6, 18] 1 2 2499 2 2537
952 [31, 6, 18] 1 2 2501 2 2539
953 [30, 6, 18] 1 2 2503 2 2541
954 [31, 6, 18] 1 2 2505 2 2543
955 [30, 6, 18] 1 2 2507 2 2545
956 [31, 6, 18] 1 2 2509 2 2547
957 [30, 6, 18] 1 2 2511 2 2549
958 [31, 6, 18] 1 2 2513 2 2551
959 [30, 6, 18] 1 2 2515 2 2553
960 [31, 6, 18] 1 2 2517 2 2555
961 [30, 6, 18] 1 2 2519 2 2557
962 [31, 6, 18] 1 2 2521 2 2559
963 [30, 6, 18] 1 2 2523 2 2561
964 [31, 6, 18] 1 2 2525 2 2563
965 [30, 6, 18] 1 2 2527 2 2565
966 [31, 6, 18] 1 2 2529 2 2567
967 [30, 6, 18] 1 2 2531 2 2569
968 [31, 6, 18] 1 2 2533 2 2571
969 [30, 6, 18] 1 2 2535 2 2573
970 [31, 6, 18] 1 2 2537 2 2575
971 [30, 6, 18] 1 2 2539 2 2577
972 [30, 31, 18] 11 0 2539 2 2579
973 [30, 31, 6] 12 14 2553 14 2593
974 [30, 31, 7] 1 2 2555 2 2595
975 [30, 31, 6] 1 2 2557 2 2597
976 [30, 31, 7] 1 2 2559 2 2599
977 [30, 31, 6] 1 2 2561 2 2601
978 [30, 31, 7] 1 2 2563 2 2603
979 [30, 31, 6] 1 2 2565 2 2605
980 [30, 31, 7] 1 2 2567 2 2607
981 [30, 31, 6] 1 2 2569 2 2609
982 [30, 31, 7] 1 2 2571 2 2611
983 [30, 31, 6] 1 2 2573 2 2613
984 [30, 31, 7] 1 2 2575 2 2615
985 [30, 31, 6] 1 2 2577 2 2617
986 [30, 31, 7] 1 2 2579 2 2619
987 [30, 31, 6] 1 2 2581 2 2621
988 [30, 31, 7] 1 2 2583 2 2623
989 [30, 31, 6] 1 2 2585 2 2625
990 [30, 31, 7] 1 2 2587 2 2627
991 [30, 31, 6] 1 2 2589 2 2629
992 [30, 31, 7] 1 2 2591 2 2631
993 [30, 31, 6] 1 2 2593 2 2633
994 [30, 31, 7] 1 2 2595 2 2635
995 [30, 6, 7] 11 2 2597 2 2637
996 [18, 6, 7] 12 22 2619 22 2659
997 [19, 6, 7] 1 2 2621 2 2661
998 [18, 6, 7] 1 2 2623 2 2663
999 [19, 6, 7] 1 2 2625 2 2665
1000 [18, 6, 7] 1 2 2627 2 2667
1001 [19, 6, 7] 1 2 2629 2 2669
1002 [18, 6, 7] 1 2 2631 2 2671
1003 [19, 6, 7] 1 2 2633 2 2673
1004 [18, 6, 7] 1 2 2635 2 2675
1005 [19, 6, 7] 1 2 2637 2 2677
1006 [18, 6, 7] 1 2 2639 2 2679
1007 [19, 6, 7] 1 2 2641 2 2681
1008 [18, 6, 7] 1 2 2643 2 2683
1009 [19, 6, 7] 1 2 2645 2 2685
1010 [18, 6, 7] 1 2 2647 2 2687
1011 [19, 6, 7] 1 2 2649 2 2689
1012 [18, 6, 7] 1 2 2651 2 2691
1013 [19, 6, 7] 1 2 2653 2 2693
1014 [18, 6, 7] 1 2 2655 2 2695
1015 [19, 6, 7] 1 2 2657 2 2697
1016 [18, 6, 7] 1 2 2659 2 2699
1017 [19, 6, 7] 1 2 2661 2 2701
1018 [18, 6, 7] 1 2 2663 2 2703
1019 [19, 6, 7] 1 2 2665 2 2705
1020 [19, 6, 18] 11 0 2665 0 2705
1021 [19, 30, 18] 12 14 2679 14 2719
1022 [19, 31, 18] 1 2 2681 2 2721
1023 [19, 30, 18] 1 2 2683 2 2723
1024 [19, 31, 18] 1 2 2685 2 2725
1025 [19, 30, 18] 1 2 2687 2 2727
1026 [19, 31, 18] 1 2 2689 2 2729
1027 [19, 30, 18] 1 2 2691 2 2731
1028 [19, 31, 18] 1 2 2693 2 2733
1029 [19, 30, 18] 1 2 2695 2 2735
1030 [19, 31, 18] 1 2 2697 2 2737
1031 [19, 30, 18] 1 2 2699 2 2739
1032 [19, 31, 18] 1 2 2701 2 2741
1033 [19, 30, 18] 1 2 2703 2 2743
1034 [19, 31, 18] 1 2 2705 2 2745
1035 [19, 30, 18] 1 2 2707 2 2747
1036 [19, 31, 18] 1 2 2709 2 2749
1037 [19, 30, 18] 1 2 2711 2 2751
1038 [19, 31, 18] 1 2 2713 2 2753
1039 [19, 30, 18] 1 2 2715 2 2755
1040 [19, 31, 18] 1 2 2717 2 2757
1041 [19, 30, 18] 1 2 2719 2 2759
1042 [19, 31, 18] 1 2 2721 2 2761
1043 [30, 31, 18] 11 2 2723 2 2763
1044 [30, 6, 18] 11 22 2745 22 2785
1045 [30, 7, 18] 1 2 2747 2 2787
1046 [30, 6, 18] 1 2 2749 2 2789
1047 [30, 7, 18] 1 2 2751 2 2791
1048 [30, 6, 18] 1 2 2753 2 2793
1049 [30, 7, 18] 1 2 2755 2 2795
1050 [30, 6, 18] 1 2 2757 2 2797
1051 [30, 7, 18] 1 2 2759 2 2799
1052 [30, 6, 18] 1 2 2761 2 2801
1053 [30, 7, 18] 1 2 2763 2 2803
1054 [30, 6, 18] 1 2 2765 2 2805
1055 [30, 7, 18] 1 2 2767 2 2807
1056 [30, 6, 18] 1 2 2769 2 2809
1057 [30, 7, 18] 1 2 2771 2 2811
1058 [30, 6, 18] 1 2 2773 2 2813
1059 [30, 7, 18] 1 2 2775 2 2815
1060 [30, 6, 18] 1 2 2777 2 2817
1061 [30, 7, 18] 1 2 2779 2 2819
1062 [30, 6, 18] 1 2 2781 2 2821
1063 [30, 7, 18] 1 2 2783 2 2823
1064 [30, 6, 18] 1 2 2785 2 2825
1065 [30, 7, 18] 1 2 2787 2 2827
1066 [30, 6, 18] 1 2 2789 2 2829
1067 [30, 6, 7] 11 0 2789 2 2831
1068 [18, 6, 7] 12 14 2803 14 2845
1069 [19, 6, 7] 1 2 2805 2 2847
1070 [18, 6, 7] 1 2 2807 2 2849
1071 [19, 6, 7] 1 2 2809 2 2851
1072 [18, 6, 7] 1 2 2811 2 2853
1073 [19, 6, 7] 1 2 2813 2 2855
1074 [18, 6, 7] 1 2 2815 2 2857
1075 [19, 6, 7] 1 2 2817 2 2859
1076 [18, 6, 7] 1 2 2819 2 2861
1077 [19, 6, 7] 1 2 2821 2 2863
1078 [18, 6, 7] 1 2 2823 2 2865
1079 [19, 6, 7] 1 2 2825 2 2867
1080 [18, 6, 7] 1 2 2827 2 2869
1081 [19, 6, 7] 1 2 2829 2 2871
1082 [18, 6, 7] 1 2 2831 2 2873
1083 [19, 6, 7] 1 2 2833 2 2875
1084 [18, 6, 7] 1 2 2835 2 2877
1085 [19, 6, 7] 1 2 2837 2 2879
1086 [18, 6, 7] 1 2 2839 2 2881
1087 [19, 6, 7] 1 2 2841 2 2883
1088 [18, 6, 7] 1 2 2843 2 2885
1089 [19, 6, 7] 1 2 2845 2 2887
1090 [18, 6, 7] 1 2 2847 2 2889
1091 [19, 6, 7] 1 2 2849 2 2891
1092 [19, 6, 18] 11 0 2849 0 2891
1093 [30, 6, 18] 11 22 2871 22 2913
1094 [31, 6, 18] 1 2 2873 2 2915
1095 [30, 6, 18] 1 2 2875 2 2917
1096 [31, 6, 18] 1 2 2877 2 2919
1097 [30, 6, 18] 1 2 2879 2 2921
1098 [31, 6, 18] 1 2 2881 2 2923
1099 [30, 6, 18] 1 2 2883 2 2925
1100 [31, 6, 18] 1 2 2885 2 2927
1101 [30, 6, 18] 1 2 2887 2 2929
1102 [31, 6, 18] 1 2 2889 2 2931
1103 [30, 6, 18] 1 2 2891 2 2933
1104 [31, 6, 18] 1 2 2893 2 2935
1105 [30, 6, 18] 1 2 2895 2 2937
1106 [31, 6, 18] 1 2 2897 2 2939
1107 [30, 6, 18] 1 2 2899 2 2941
1108 [31, 6, 18] 1 2 2901 2 2943
1109 [30, 6, 18] 1 2 2903 2 2945
1110 [31, 6, 18] 1 2 2905 2 2947
1111 [30, 6, 18] 1 2 2907 2 2949
1112 [31, 6, 18] 1 2 2909 2 2951
1113 [30, 6, 18] 1 2 2911 2 2953
1114 [31, 6, 18] 1 2 2913 2 2955
1115 [30, 6, 18] 1 2 2915 2 2957
1116 [30, 31, 18] 11 0 2915 2 2959
1117 [30, 31, 6] 12 14 2929 14 2973
1118 [30, 31, 7] 1 2 2931 2 2975
1119 [30, 31, 6] 1 2 2933 2 2977
1120 [30, 31, 7] 1 2 2935 2 2979
1121 [30, 31, 6] 1 2 2937 2 2981
1122 [30, 31, 7] 1 2 2939 2 2983
1123 [30, 31, 6] 1 2 2941 2 2985
1124 [30, 31, 7] 1 2 2943 2 2987
1125 [30, 31, 6] 1 2 2945 2 2989
1126 [30, 31, 7] 1 2 2947 2 2991
1127 [30, 31, 6] 1 2 2949 2 2993
1128 [30, 31, 7] 1 2 2951 2 2995
1129 [30, 31, 6] 1 2 2953 2 2997
1130 [30, 31, 7] 1 2 2955 2 2999
1131 [30, 31, 6] 1 2 2957 2 3001
1132 [30, 31, 7] 1 2 2959 2 3003
1133 [30, 31, 6] 1 2 2961 2 3005
1134 [30, 31, 7] 1 2 2963 2 3007
1135 [30, 31, 6] 1 2 2965 2 3009
1136 [30, 31, 7] 1 2 2967 2 3011
1137 [30, 31, 6] 1 2 2969 2 3013
1138 [30, 31, 7] 1 2 2971 2 3015
1139 [30, 6, 7] 11 2 2973 2 3017
1140 [18, 6, 7] 12 22 2995 22 3039
1141 [19, 6, 7] 1 2 2997 2 3041
1142 [18, 6, 7] 1 2 2999 2 3043
1143 [19, 6, 7] 1 2 3001 2 3045
1144 [18, 6, 7] 1 2 3003 2 3047
1145 [19, 6, 7] 1 2 3005 2 3049
1146 [18, 6, 7] 1 2 3007 2 3051
1147 [19, 6, 7] 1 2 3009 2 3053
1148 [18, 6, 7] 1 2 3011 2 3055
1149 [19, 6, 7] 1 2 3013 2 3057
1150 [18, 6, 7] 1 2 3015 2 3059
1151 [19, 6, 7] 1 2 3017 2 3061
1152 [18, 6, 7] 1 2 3019 2 3063
1153 [19, 6, 7] 1 2 3021 2 3065
1154 [18, 6, 7] 1 2 3023 2 3067
1155 [19, 6, 7] 1 2 3025 2 3069
1156 [18, 6, 7] 1 2 3027 2 3071
1157 [19, 6, 7] 1 2 3029 2 3073
1158 [18, 6, 7] 1 2 3031 2 3075
1159 [19, 6, 7] 1 2 3033 2 3077
1160 [18, 6, 7] 1 2 3035 2 3079
1161 [19, 6, 7] 1 2 3037 2 3081
1162 [18, 6, 7] 1 2 3039 2 3083
1163 [19, 6, 7] 1 2 3041 2 3085
1164 [19, 6, 18] 11 0 3041 0 3085
1165 [19, 30, 18] 12 14 3055 14 3099
1166 [19, 31, 18] 1 2 3057 2 3101
1167 [19, 30, 18] 1 2 3059 2 3103
1168 [19, 31, 18] 1 2 3061 2 3105
1169 [19, 30, 18] 1 2 3063 2 3107
1170 [19, 31, 18] 1 2 3065 2 3109
1171 [19, 30, 18] 1 2 3067 2 3111
1172 [19, 31, 18] 1 2 3069 2 3113
1173 [19, 30, 18] 1 2 3071 2 3115
1174 [19, 31, 18] 1 2 3073 2 3117
1175 [19, 30, 18] 1 2 3075 2 3119
1176 [19, 31, 18] 1 2 3077 2 3121
1177 [19, 30, 18] 1 2 3079 2 3123
1178 [19, 31, 18] 1 2 3081 2 3125
1179 [19, 30, 18] 1 2 3083 2 3127
1180 [19, 31, 18] 1 2 3085 2 3129
1181 [19, 30, 18] 1 2 3087 2 3131
1182 [19, 31, 18] 1 2 3089 2 3133
1183 [19, 30, 18] 1 2 3091 2 3135
1184 [19, 31, 18] 1 2 3093 2 3137
1185 [19, 30, 18] 1 2 3095 2 3139
1186 [19, 31, 18] 1 2 3097 2 3141
1187 [30, 31, 18] 11 2 3099 2 3143
1188 [30, 6, 18] 11 22 3121 22 3165
1189 [30, 7, 18] 1 2 3123 2 3167
1190 [30, 6, 18] 1 2 3125 2 3169
1191 [30, 7, 18] 1 2 3127 2 3171
1192 [30, 6, 18] 1 2 3129 2 3173
1193 [30, 7, 18] 1 2 3131 2 3175
1194 [30, 6, 18] 1 2 3133 2 3177
1195 [30, 7, 18] 1 2 3135 2 3179
1196 [30, 6, 18] 1 2 3137 2 3181
1197 [30, 7, 18] 1 2 3139 2 3183
1198 [30, 6, 18] 1 2 3141 2 3185
1199 [30, 7, 18] 1 2 3143 2 3187
1200 [30, 6, 18] 1 2 3145 2 3189
1201 [30, 7, 18] 1 2 3147 2 3191
1202 [30, 6, 18] 1 2 3149 2 3193
1203 [30, 7, 18] 1 2 3151 2 3195
1204 [30, 6, 18] 1 2 3153 2 3197
1205 [30, 7, 18] 1 2 3155 2 3199
1206 [30, 6, 18] 1 2 3157 2 3201
1207 [30, 7, 18] 1 2 3159 2 3203
1208 [30, 6, 18] 1 2 3161 2 3205
1209 [30, 7, 18] 1 2 3163 2 3207
1210 [30, 6, 18] 1 2 3165 2 3209
1211 [30, 6, 7] 11 0 3165 2 3211
1212 [18, 6, 7] 12 14 3179 14 3225
1213 [19, 6, 7] 1 2 3181 2 3227
1214 [18, 6, 7] 1 2 3183 2 3229
1215 [19, 6, 7] 1 2 3185 2 3231
1216 [18, 6, 7] 1 2 3187 2 3233
1217 [19, 6, 7] 1 2 3189 2 3235
1218 [18, 6, 7] 1 2 3191 2 3237
1219 [19, 6, 7] 1 2 3193 2 3239
1220 [18, 6, 7] 1 2 3195 2 3241
1221 [19, 6, 7] 1 2 3197 2 3243
1222 [18, 6, 7] 1 2 3199 2 3245
1223 [19, 6, 7] 1 2 3201 2 3247
1224 [18, 6, 7] 1 2 3203 2 3249
1225 [19, 6, 7] 1 2 3205 2 3251
1226 [18, 6, 7] 1 2 3207 2 3253
1227 [19, 6, 7] 1 2 3209 2 3255
1228 [18, 6, 7] 1 2 3211 2 3257
1229 [19, 6, 7] 1 2 3213 2 3259
1230 [18, 6, 7] 1 2 3215 2 3261
1231 [19, 6, 7] 1 2 3217 2 3263
1232 [18, 6, 7] 1 2 3219 2 3265
1233 [19, 6, 7] 1 2 3221 2 3267
1234 [18, 6, 7] 1 2 3223 2 3269
1235 [19, 6, 7] 1 2 3225 2 3271
1236 [19, 6, 18] 11 0 3225 0 3271
1237 [30, 6, 18] 11 22 3247 22 3293
1238 [31, 6, 18] 1 2 3249 2 3295
1239 [30, 6, 18] 1 2 3251 2 3297
1240 [31, 6, 18] 1 2 3253 2 3299
1241 [30, 6, 18] 1 2 3255 2 3301
1242 [31, 6, 18] 1 2 3257 2 3303
1243 [30, 6, 18] 1 2 3259 2 3305
1244 [31, 6, 18] 1 2 3261 2 3307
1245 [30, 6, 18] 1 2 3263 2 3309
1246 [31, 6, 18] 1 2 3265 2 3311
1247 [30, 6, 18] 1 2 3267 2 3313
1248 [31, 6, 18] 1 2 3269 2 3315
1249 [30, 6, 18] 1 2 3271 2 3317
1250 [31, 6, 18] 1 2 3273 2 3319
1251 [30, 6, 18] 1 2 3275 2 3321
1252 [31, 6, 18] 1 2 3277 2 3323
1253 [30, 6, 18] 1 2 3279 2 3325
1254 [31, 6, 18] 1 2 3281 2 3327
1255 [30, 6, 18] 1 2 3283 2 3329
1256 [31, 6, 18] 1 2 3285 2 3331
1257 [30, 6, 18] 1 2 3287 2 3333
1258 [31, 6, 18] 1 2 3289 2 3335
1259 [30, 6, 18] 1 2 3291 2 3337
1260 [30, 31, 18] 11 0 3291 2 3339
1261 [30, 31, 6] 12 14 3305 14 3353
1262 [30, 31, 7] 1 2 3307 2 3355
1263 [30, 31, 6] 1 2 3309 2 3357
1264 [30, 31, 7] 1 2 3311 2 3359
1265 [30, 31, 6] 1 2 3313 2 3361
1266 [30, 31, 7] 1 2 3315 2 3363
1267 [30, 31, 6] 1 2 3317 2 3365
1268 [30, 31, 7] 1 2 3319 2 3367
1269 [30, 31, 6] 1 2 3321 2 3369
1270 [30, 31, 7] 1 2 3323 2 3371
1271 [30, 31, 6] 1 2 3325 2 3373
1272 [30, 31, 7] 1 2 3327 2 3375
1273 [30, 31, 6] 1 2 3329 2 3377
1274 [30, 31, 7] 1 2 3331 2 3379
1275 [30, 31, 6] 1 2 3333 2 3381
1276 [30, 31, 7] 1 2 3335 2 3383
1277 [30, 31, 6] 1 2 3337 2 3385
1278 [30, 31, 7] 1 2 3339 2 3387
1279 [30, 31, 6] 1 2 3341 2 3389
1280 [30, 31, 7] 1 2 3343 2 3391
1281 [30, 31, 6] 1 2 3345 2 3393
1282 [30, 31, 7] 1 2 3347 2 3395
1283 [30, 6, 7] 11 2 3349 2 3397
1284 [18, 6, 7] 12 22 3371 22 3419
1285 [19, 6, 7] 1 2 3373 2 3421
1286 [18, 6, 7] 1 2 3375 2 3423
1287 [19, 6, 7] 1 2 3377 2 3425
1288 [18, 6, 7] 1 2 3379 2 3427
1289 [19, 6, 7] 1 2 3381 2 3429
1290 [18, 6, 7] 1 2 3383 2 3431
1291 [19, 6, 7] 1 2 3385 2 3433
1292 [18, 6, 7] 1 2 3387 2 3435
1293 [19, 6, 7] 1 2 3389 2 3437
1294 [18, 6, 7] 1 2 3391 2 3439
1295 [19, 6, 7] 1 2 3393 2 3441
1296 [18, 6, 7] 1 2 3395 2 3443
1297 [19, 6, 7] 1 2 3397 2 3445
1298 [18, 6, 7] 1 2 3399 2 3447
1299 [19, 6, 7] 1 2 3401 2 3449
1300 [18, 6, 7] 1 2 3403 2 3451
1301 [19, 6, 7] 1 2 3405 2 3453
1302 [18, 6, 7] 1 2 3407 2 3455
1303 [19, 6, 7] 1 2 3409 2 3457
1304 [18, 6, 7] 1 2 3411 2 3459
1305 [19, 6, 7] 1 2 3413 2 3461
1306 [18, 6, 7] 1 2 3415 2 3463
1307 [19, 6, 7] 1 2 3417 2 3465
1308 [19, 6, 18] 11 0 3417 0 3465
1309 [19, 30, 18] 12 14 3431 14 3479
1310 [19, 31, 18] 1 2 3433 2 3481
1311 [19, 30, 18] 1 2 3435 2 3483
1312 [19, 31, 18] 1 2 3437 2 3485
1313 [19, 30, 18] 1 2 3439 2 3487
1314 [19, 31, 18] 1 2 3441 2 3489
1315 [19, 30, 18] 1 2 3443 2 3491
1316 [19, 31, 18] 1 2 3445 2 3493
1317 [19, 30, 18] 1 2 3447 2 3495
1318 [19, 31, 18] 1 2 3449 2 3497
1319 [19, 30, 18] 1 2 3451 2 3499
1320 [19, 31, 18] 1 2 3453 2 3501
1321 [19, 30, 18] 1 2 3455 2 3503
1322 [19, 31, 18] 1 2 3457 2 3505
1323 [19, 30, 18] 1 2 3459 2 3507
1324 [19, 31, 18] 1 2 3461 2 3509
1325 [19, 30, 18] 1 2 3463 2 3511
1326 [19, 31, 18] 1 2 3465 2 3513
1327 [19, 30, 18] 1 2 3467 2 3515
1328 [19, 31, 18] 1 2 3469 2 3517
1329 [19, 30, 18] 1 2 3471 2 3519
1330 [19, 31, 18] 1 2 3473 2 3521
1331 [30, 31, 18] 11 2 3475 2 3523
1332 [30, 6, 18] 11 22 3497 22 3545
1333 [30, 7, 18] 1 2 3499 2 3547
1334 [30, 6, 18] 1 2 3501 2 3549
1335 [30, 7, 18] 1 2 3503 2 3551
1336 [30, 6, 18] 1 2 3505 2 3553
1337 [30, 7, 18] 1 2 3507 2 3555
1338 [30, 6, 18] 1 2 3509 2 3557
1339 [30, 7, 18] 1 2 3511 2 3559
1340 [30, 6, 18] 1 2 3513 2 3561
1341 [30, 7, 18] 1 2 3515 2 3563
1342 [30, 6, 18] 1 2 3517 2 3565
1343 [30, 7, 18] 1 2 3519 2 3567
1344 [30, 6, 18] 1 2 3521 2 3569
1345 [30, 7, 18] 1 2 3523 2 3571
1346 [30, 6, 18] 1 2 3525 2 3573
1347 [30, 7, 18] 1 2 3527 2 3575
1348 [30, 6, 18] 1 2 3529 2 3577
1349 [30, 7, 18] 1 2 3531 2 3579
1350 [30, 6, 18] 1 2 3533 2 3581
1351 [30, 7, 18] 1 2 3535 2 3583
1352 [30, 6, 18] 1 2 3537 2 3585
1353 [30, 7, 18] 1 2 3539 2 3587
1354 [30, 6, 18] 1 2 3541 2 3589
1355 [30, 6, 7] 11 0 3541 2 3591
1356 [18, 6, 7] 12 14 3555 14 3605
1357 [19, 6, 7] 1 2 3557 2 3607
1358 [18, 6, 7] 1 2 3559 2 3609
1359 [19, 6, 7] 1 2 3561 2 3611
1360 [18, 6, 7] 1 2 3563 2 3613
1361 [19, 6, 7] 1 2 3565 2 3615
1362 [18, 6, 7] 1 2 3567 2 3617
1363 [19, 6, 7] 1 2 3569 2 3619
1364 [18, 6, 7] 1 2 3571 2 3621
1365 [19, 6, 7] 1 2 3573 2 3623
1366 [18, 6, 7] 1 2 3575 2 3625
1367 [19, 6, 7] 1 2 3577 2 3627
1368 [18, 6, 7] 1 2 3579 2 3629
1369 [19, 6, 7] 1 2 3581 2 3631
1370 [18, 6, 7] 1 2 3583 2 3633
1371 [19, 6, 7] 1 2 3585 2 3635
1372 [18, 6, 7] 1 2 3587 2 3637
1373 [19, 6, 7] 1 2 3589 2 3639
1374 [18, 6, 7] 1 2 3591 2 3641
1375 [19, 6, 7] 1 2 3593 2 3643
1376 [18, 6, 7] 1 2 3595 2 3645
1377 [19, 6, 7] 1 2 3597 2 3647
1378 [18, 6, 7] 1 2 3599 2 3649
1379 [19, 6, 7] 1 2 3601 2 3651
1380 [19, 6, 18] 11 0 3601 0 3651
1381 [30, 6, 18] 11 22 3623 22 3673
1382 [31, 6, 18] 1 2 3625 2 3675
1383 [30, 6, 18] 1 2 3627 2 3677
1384 [31, 6, 18] 1 2 3629 2 3679
1385 [30, 6, 18] 1 2 3631 2 3681
1386 [31, 6, 18] 1 2 3633 2 3683
1387 [30, 6, 18] 1 2 3635 2 3685
1388 [31, 6, 18] 1 2 3637 2 3687
1389 [30, 6, 18] 1 2 3639 2 3689
1390 [31, 6, 18] 1 2 3641 2 3691
1391 [30, 6, 18] 1 2 3643 2 3693
1392 [31, 6, 18] 1 2 3645 2 3695
1393 [30, 6, 18] 1 2 3647 2 3697
1394 [31, 6, 18] 1 2 3649 2 3699
1395 [30, 6, 18] 1 2 3651 2 3701
1396 [31, 6, 18] 1 2 3653 2 3703
1397 [30, 6, 18] 1 2 3655 2 3705
1398 [31, 6, 18] 1 2 3657 2 3707
1399 [30, 6, 18] 1 2 3659 2 3709
1400 [31, 6, 18] 1 2 3661 2 3711
1401 [30, 6, 18] 1 2 3663 2 3713
1402 [31, 6, 18] 1 2 3665 2 3715
1403 [30, 6, 18] 1 2 3667 2 3717
1404 [30, 31, 18] 11 0 3667 2 3719
1405 [30, 31, 6] 12 14 3681 14 3733
1406 [30, 31, 7] 1 2 3683 2 3735
1407 [30, 31, 6] 1 2 3685 2 3737
1408 [30, 31, 7] 1 2 3687 2 3739
1409 [30, 31, 6] 1 2 3689 2 3741
1410 [30, 31, 7] 1 2 3691 2 3743
1411 [30, 31, 6] 1 2 3693 2 3745
1412 [30, 31, 7] 1 2 3695 2 3747
1413 [30, 31, 6] 1 2 3697 2 3749
1414 [30, 31, 7] 1 2 3699 2 3751
1415 [30, 31, 6] 1 2 3701 2 3753
1416 [30, 31, 7] 1 2 3703 2 3755
1417 [30, 31, 6] 1 2 3705 2 3757
1418 [30, 31, 7] 1 2 3707 2 3759
1419 [30, 31, 6] 1 2 3709 2 3761
1420 [30, 31, 7] 1 2 3711 2 3763
1421 [30, 31, 6] 1 2 3713 2 3765
1422 [30, 31, 7] 1 2 3715 2 3767
1423 [30, 31, 6] 1 2 3717 2 3769
1424 [30, 31, 7] 1 2 3719 2 3771
1425 [30, 31, 6] 1 2 3721 2 3773
1426 [30, 31, 7] 1 2 3723 2 3775
1427 [30, 6, 7] 11 2 3725 2 3777
1428 [18, 6, 7] 12 22 3747 22 3799
1429 [19, 6, 7] 1 2 3749 2 3801
1430 [18, 6, 7] 1 2 3751 2 3803
1431 [19, 6, 7] 1 2 3753 2 3805
1432 [18, 6, 7] 1 2 3755 2 3807
1433 [19, 6, 7] 1 2 3757 2 3809
1434 [18, 6, 7] 1 2 3759 2 3811
1435 [19, 6, 7] 1 2 3761 2 3813
1436 [18, 6, 7] 1 2 3763 2 3815
1437 [19, 6, 7] 1 2 3765 2 3817
1438 [18, 6, 7] 1 2 3767 2 3819
1439 [19, 6, 7] 1 2 3769 2 3821
1440 [18, 6, 7] 1 2 3771 2 3823
1441 [19, 6, 7] 1 2 3773 2 3825
1442 [18, 6, 7] 1 2 3775 2 3827
1443 [19, 6, 7] 1 2 3777 2 3829
1444 [18, 6, 7] 1 2 3779 2 3831
1445 [19, 6, 7] 1 2 3781 2 3833
1446 [18, 6, 7] 1 2 3783 2 3835
1447 [19, 6, 7] 1 2 3785 2 3837
1448 [18, 6, 7] 1 2 3787 2 3839
1449 [19, 6, 7] 1 2 3789 2 3841
1450 [18, 6, 7] 1 2 3791 2 3843
1451 [19, 6, 7] 1 2 3793 2 3845
1452 [19, 6, 18] 11 0 3793 0 3845
1453 [19, 30, 18] 12 14 3807 14 3859
1454 [19, 31, 18] 1 2 3809 2 3861
1455 [19, 30, 18] 1 2 3811 2 3863
1456 [19, 31, 18] 1 2 3813 2 3865
1457 [19, 30, 18] 1 2 3815 2 3867
1458 [19, 31, 18] 1 2 3817 2 3869
1459 [19, 30, 18] 1 2 3819 2 3871
1460 [19, 31, 18] 1 2 3821 2 3873
1461 [19, 30, 18] 1 2 3823 2 3875
1462 [19, 31, 18] 1 2 3825 2 3877
1463 [19, 30, 18] 1 2 3827 2 3879
1464 [19, 31, 18] 1 2 3829 2 3881
1465 [19, 30, 18] 1 2 3831 2 3883
1466 [19, 31, 18] 1 2 3833 2 3885
1467 [19, 30, 18] 1 2 3835 2 3887
1468 [19, 31, 18] 1 2 3837 2 3889
1469 [19, 30, 18] 1 2 3839 2 3891
1470 [19, 31, 18] 1 2 3841 2 3893
1471 [19, 30, 18] 1 2 3843 2 3895
1472 [19, 31, 18] 1 2 3845 2 3897
1473 [19, 30, 18] 1 2 3847 2 3899
1474 [19, 31, 18] 1 2 3849 2 3901
1475 [30, 31, 18] 11 2 3851 2 3903
1476 [30, 6, 18] 11 22 3873 22 3925
1477 [30, 7, 18] 1 2 3875 2 3927
1478 [30, 6, 18] 1 2 3877 2 3929
1479 [30, 7, 18] 1 2 3879 2 3931
1480 [30, 6, 18] 1 2 3881 2 3933
1481 [30, 7, 18] 1 2 3883 2 3935
1482 [30, 6, 18] 1 2 3885 2 3937
1483 [30, 7, 18] 1 2 3887 2 3939
1484 [30, 6, 18] 1 2 3889 2 3941
1485 [30, 7, 18] 1 2 3891 2 3943
1486 [30, 6, 18] 1 2 3893 2 3945
1487 [30, 7, 18] 1 2 3895 2 3947
1488 [30, 6, 18] 1 2 3897 2 3949
1489 [30, 7, 18] 1 2 3899 2 3951
1490 [30, 6, 18] 1 2 3901 2 3953
1491 [30, 7, 18] 1 2 3903 2 3955
1492 [30, 6, 18] 1 2 3905 2 3957
1493 [30, 7, 18] 1 2 3907 2 3959
1494 [30, 6, 18] 1 2 3909 2 3961
1495 [30, 7, 18] 1 2 3911 2 3963
1496 [30, 6, 18] 1 2 3913 2 3965
1497 [30, 7, 18] 1 2 3915 2 3967
1498 [30, 6, 18] 1 2 3917 2 3969
1499 [30, 6, 7] 11 0 3917 2 3971
1500 [18, 6, 7] 12 14 3931 14 3985
1501 [19, 6, 7] 1 2 3933 2 3987
1502 [18, 6, 7] 1 2 3935 2 3989
1503 [19, 6, 7] 1 2 3937 2 3991
1504 [18, 6, 7] 1 2 3939 2 3993
1505 [19, 6, 7] 1 2 3941 2 3995
1506 [18, 6, 7] 1 2 3943 2 3997
1507 [19, 6, 7] 1 2 3945 2 3999
1508 [18, 6, 7] 1 2 3947 2 4001
1509 [19, 6, 7] 1 2 3949 2 4003
1510 [18, 6, 7] 1 2 3951 2 4005
1511 [19, 6, 7] 1 2 3953 2 4007
1512 [18, 6, 7] 1 2 3955 2 4009
1513 [19, 6, 7] 1 2 3957 2 4011
1514 [18, 6, 7] 1 2 3959 2 4013
1515 [19, 6, 7] 1 2 3961 2 4015
1516 [18, 6, 7] 1 2 3963 2 4017
1517 [19, 6, 7] 1 2 3965 2 4019
1518 [18, 6, 7] 1 2 3967 2 4021
1519 [19, 6, 7] 1 2 3969 2 4023
1520 [18, 6, 7] 1 2 3971 2 4025
1521 [19, 6, 7] 1 2 3973 2 4027
1522 [18, 6, 7] 1 2 3975 2 4029
1523 [19, 6, 7] 1 2 3977 2 4031
1524 [19, 6, 18] 11 0 3977 0 4031
1525 [30, 6, 18] 11 22 3999 22 4053
1526 [31, 6, 18] 1 2 4001 2 4055
1527 [30, 6, 18] 1 2 4003 2 4057
1528 [31, 6, 18] 1 2 4005 2 4059
1529 [30, 6, 18] 1 2 4007 2 4061
1530 [31, 6, 18] 1 2 4009 2 4063
1531 [30, 6, 18] 1 2 4011 2 4065
1532 [31, 6, 18] 1 2 4013 2 4067
1533 [30, 6, 18] 1 2 4015 2 4069
1534 [31, 6, 18] 1 2 4017 2 4071
1535 [30, 6, 18] 1 2 4019 2 4073
1536 [31, 6, 18] 1 2 4021 2 4075
1537 [30, 6, 18] 1 2 4023 2 4077
1538 [31, 6, 18] 1 2 4025 2 4079
1539 [30, 6, 18] 1 2 4027 2 4081
1540 [31, 6, 18] 1 2 4029 2 4083
1541 [30, 6, 18] 1 2 4031 2 4085
1542 [31, 6, 18] 1 2 4033 2 4087
1543 [30, 6, 18] 1 2 4035 2 4089
1544 [31, 6, 18] 1 2 4037 2 4091
1545 [30, 6, 18] 1 2 4039 2 4093
1546 [31, 6, 18] 1 2 4041 2 4095
1547 [30, 6, 18] 1 2 4043 2 4097
1548 [30, 31, 18] 11 0 4043 2 4099
1549 [30, 31, 6] 12 14 4057 14 4113
1550 [30, 31, 7] 1 2 4059 2 4115
1551 [30, 31, 6] 1 2 4061 2 4117
1552 [30, 31, 7] 1 2 4063 2 4119
1553 [30, 31, 6] 1 2 4065 2 4121
1554 [30, 31, 7] 1 2 4067 2 4123
1555 [30, 31, 6] 1 2 4069 2 4125
1556 [30, 31, 7] 1 2 4071 2 4127
1557 [30, 31, 6] 1 2 4073 2 4129
1558 [30, 31, 7] 1 2 4075 2 4131
1559 [30, 31, 6] 1 2 4077 2 4133
1560 [30, 31, 7] 1 2 4079 2 4135
1561 [30, 31, 6] 1 2 4081 2 4137
1562 [30, 31, 7] 1 2 4083 2 4139
1563 [30, 31, 6] 1 2 4085 2 4141
1564 [30, 31, 7] 1 2 4087 2 4143
1565 [30, 31, 6] 1 2 4089 2 4145
1566 [30, 31, 7] 1 2 4091 2 4147
1567 [30, 31, 6] 1 2 4093 2 4149
1568 [30, 31, 7] 1 2 4095 2 4151
1569 [30, 31, 6] 1 2 4097 2 4153
1570 [30, 31, 7] 1 2 4099 2 4155
1571 [30, 6, 7] 11 2 4101 2 4157
1572 [18, 6, 7] 12 22 4123 22 4179
1573 [19, 6, 7] 1 2 4125 2 4181
1574 [18, 6, 7] 1 2 4127 2 4183
1575 [19, 6, 7] 1 2 4129 2 4185
1576 [18, 6, 7] 1 2 4131 2 4187
1577 [19, 6, 7] 1 2 4133 2 4189
1578 [18, 6, 7] 1 2 4135 2 4191
1579 [19, 6, 7] 1 2 4137 2 4193
1580 [18, 6, 7] 1 2 4139 2 4195
1581 [19, 6, 7] 1 2 4141 2 4197
1582 [18, 6, 7] 1 2 4143 2 4199
1583 [19, 6, 7] 1 2 4145 2 4201
1584 [18, 6, 7] 1 2 4147 2 4203
1585 [19, 6, 7] 1 2 4149 2 4205
1586 [18, 6, 7] 1 2 4151 2 4207
1587 [19, 6, 7] 1 2 4153 2 4209
1588 [18, 6, 7] 1 2 4155 2 4211
1589 [19, 6, 7] 1 2 4157 2 4213
1590 [18, 6, 7] 1 2 4159 2 4215
1591 [19, 6, 7] 1 2 4161 2 4217
1592 [18, 6, 7] 1 2 4163 2 4219
1593 [19, 6, 7] 1 2 4165 2 4221
1594 [18, 6, 7] 1 2 4167 2 4223
1595 [19, 6, 7] 1 2 4169 2 4225
1596 [19, 6, 18] 11 0 4169 0 4225
1597 [19, 30, 18] 12 14 4183 14 4239
1598 [19, 31, 18] 1 2 4185 2 4241
1599 [19, 30, 18] 1 2 4187 2 4243
1600 [19, 31, 18] 1 2 4189 2 4245
1601 [19, 30, 18] 1 2 4191 2 4247
1602 [19, 31, 18] 1 2 4193 2 4249
1603 [19, 30, 18] 1 2 4195 2 4251
1604 [19, 31, 18] 1 2 4197 2 4253
1605 [19, 30, 18] 1 2 4199 2 4255
1606 [19, 31, 18] 1 2 4201 2 4257
1607 [19, 30, 18] 1 2 4203 2 4259
1608 [19, 31, 18] 1 2 4205 2 4261
1609 [19, 30, 18] 1 2 4207 2 4263
1610 [19, 31, 18] 1 2 4209 2 4265
1611 [19, 30, 18] 1 2 4211 2 4267
1612 [19, 31, 18] 1 2 4213 2 4269
1613 [19, 30, 18] 1 2 4215 2 4271
1614 [19, 31, 18] 1 2 4217 2 4273
1615 [19, 30, 18] 1 2 4219 2 4275
1616 [19, 31, 18] 1 2 4221 2 4277
1617 [19, 30, 18] 1 2 4223 2 4279
1618 [19, 31, 18] 1 2 4225 2 4281
1619 [30, 31, 18] 11 2 4227 2 4283
1620 [30, 6, 18] 11 22 4249 22 4305
1621 [30, 7, 18] 1 2 4251 2 4307
1622 [30, 6, 18] 1 2 4253 2 4309
1623 [30, 7, 18] 1 2 4255 2 4311
1624 [30, 6, 18] 1 2 4257 2 4313
1625 [30, 7, 18] 1 2 4259 2 4315
1626 [30, 6, 18] 1 2 4261 2 4317
1627 [30, 7, 18] 1 2 4263 2 4319
1628 [30, 6, 18] 1 2 4265 2 4321
1629 [30, 7, 18] 1 2 4267 2 4323
1630 [30, 6, 18] 1 2 4269 2 4325
1631 [30, 7, 18] 1 2 4271 2 4327
1632 [30, 6, 18] 1 2 4273 2 4329
1633 [30, 7, 18] 1 2 4275 2 4331
1634 [30, 6, 18] 1 2 4277 2 4333
1635 [30, 7, 18] 1 2 4279 2 4335
1636 [30, 6, 18] 1 2 4281 2 4337
1637 [30, 7, 18] 1 2 4283 2 4339
1638 [30, 6, 18] 1 2 4285 2 4341
1639 [30, 7, 18] 1 2 4287 2 4343
1640 [30, 6, 18] 1 2 4289 2 4345
1641 [30, 7, 18] 1 2 4291 2 4347
1642 [30, 6, 18] 1 2 4293 2 4349
1643 [30, 6, 7] 11 0 4293 2 4351
1644 [18, 6, 7] 12 14 4307 14 4365
1645 [19, 6, 7] 1 2 4309 2 4367
1646 [18, 6, 7] 1 2 4311 2 4369
1647 [19, 6, 7] 1 2 4313 2 4371
1648 [18, 6, 7] 1 2 4315 2 4373
1649 [19, 6, 7] 1 2 4317 2 4375
1650 [18, 6, 7] 1 2 4319 2 4377
1651 [19, 6, 7] 1 2 4321 2 4379
1652 [18, 6, 7] 1 2 4323 2 4381
1653 [19, 6, 7] 1 2 4325 2 4383
1654 [18, 6, 7] 1 2 4327 2 4385
1655 [19, 6, 7] 1 2 4329 2 4387
1656 [18, 6, 7] 1 2 4331 2 4389
1657 [19, 6, 7] 1 2 4333 2 4391
1658 [18, 6, 7] 1 2 4335 2 4393
1659 [19, 6, 7] 1 2 4337 2 4395
1660 [18, 6, 7] 1 2 4339 2 4397
1661 [19, 6, 7] 1 2 4341 2 4399
1662 [18, 6, 7] 1 2 4343 2 4401
1663 [19, 6, 7] 1 2 4345 2 4403
1664 [18, 6, 7] 1 2 4347 2 4405
1665 [19, 6, 7] 1 2 4349 2 4407
1666 [18, 6, 7] 1 2 4351 2 4409
1667 [19, 6, 7] 1 2 4353 2 4411
1668 [19, 6, 18] 11 0 4353 0 4411
1669 [30, 6, 18] 11 22 4375 22 4433
1670 [31, 6, 18] 1 2 4377 2 4435
1671 [30, 6, 18] 1 2 4379 2 4437
1672 [31, 6, 18] 1 2 4381 2 4439
1673 [30, 6, 18] 1 2 4383 2 4441
1674 [31, 6, 18] 1 2 4385 2 4443
1675 [30, 6, 18] 1 2 4387 2 4445
1676 [31, 6, 18] 1 2 4389 2 4447
1677 [30, 6, 18] 1 2 4391 2 4449
1678 [31, 6, 18] 1 2 4393 2 4451
1679 [30, 6, 18] 1 2 4395 2 4453
1680 [31, 6, 18] 1 2 4397 2 4455
1681 [30, 6, 18] 1 2 4399 2 4457
1682 [31, 6, 18] 1 2 4401 2 4459
1683 [30, 6, 18] 1 2 4403 2 4461
1684 [31, 6, 18] 1 2 4405 2 4463
1685 [30, 6, 18] 1 2 4407 2 4465
1686 [31, 6, 18] 1 2 4409 2 4467
1687 [30, 6, 18] 1 2 4411 2 4469
1688 [31, 6, 18] 1 2 4413 2 4471
1689 [30, 6, 18] 1 2 4415 2 4473
1690 [31, 6, 18] 1 2 4417 2 4475
1691 [30, 6, 18] 1 2 4419 2 4477
1692 [30, 31, 18] 11 0 4419 2 4479
1693 [30, 31, 6] 12 14 4433 14 4493
1694 [30, 31, 7] 1 2 4435 2 4495
1695 [30, 31, 6] 1 2 4437 2 4497
1696 [30, 31, 7] 1 2 4439 2 4499
1697 [30, 31, 6] 1 2 4441 2 4501
1698 [30, 31, 7] 1 2 4443 2 4503
1699 [30, 31, 6] 1 2 4445 2 4505
1700 [30, 31, 7] 1 2 4447 2 4507
1701 [30, 31, 6] 1 2 4449 2 4509
1702 [30, 31, 7] 1 2 4451 2 4511
1703 [30, 31, 6] 1 2 4453 2 4513
1704 [30, 31, 7] 1 2 4455 2 4515
1705 [30, 31, 6] 1 2 4457 2 4517
1706 [30, 31, 7] 1 2 4459 2 4519
1707 [30, 31, 6] 1 2 4461 2 4521
1708 [30, 31, 7] 1 2 4463 2 4523
1709 [30, 31, 6] 1 2 4465 2 4525
1710 [30, 31, 7] 1 2 4467 2 4527
1711 [30, 31, 6] 1 2 4469 2 4529
1712 [30, 31, 7] 1 2 4471 2 4531
1713 [30, 31, 6] 1 2 4473 2 4533
1714 [30, 31, 7] 1 2 4475 2 4535
1715 [30, 6, 7] 11 2 4477 2 4537
1716 [18, 6, 7] 12 22 4499 22 4559
1717 [19, 6, 7] 1 2 4501 2 4561
1718 [18, 6, 7] 1 2 4503 2 4563
1719 [19, 6, 7] 1 2 4505 2 4565
1720 [18, 6, 7] 1 2 4507 2 4567
1721 [19, 6, 7] 1 2 4509 2 4569
1722 [18, 6, 7] 1 2 4511 2 4571
1723 [19, 6, 7] 1 2 4513 2 4573
1724 [18, 6, 7] 1 2 4515 2 4575
1725 [19, 6, 7] 1 2 4517 2 4577
1726 [18, 6, 7] 1 2 4519 2 4579
1727 [19, 6, 7] 1 2 4521 2 4581
1728 [18, 6, 7] 1 2 4523 2 4583
1729 [19, 6, 7] 1 2 4525 2 4585
1730 [18, 6, 7] 1 2 4527 2 4587
1731 [19, 6, 7] 1 2 4529 2 4589
1732 [18, 6, 7] 1 2 4531 2 4591
1733 [19, 6, 7] 1 2 4533 2 4593
1734 [18, 6, 7] 1 2 4535 2 4595
1735 [19, 6, 7] 1 2 4537 2 4597
1736 [18, 6, 7] 1 2 4539 2 4599
1737 [19, 6, 7] 1 2 4541 2 4601
1738 [18, 6, 7] 1 2 4543 2 4603
1739 [19, 6, 7] 1 2 4545 2 4605
1740 [19, 6, 18] 11 0 4545 0 4605
1741 [19, 30, 18] 12 14 4559 14 4619
1742 [19, 31, 18] 1 2 4561 2 4621
1743 [19, 30, 18] 1 2 4563 2 4623
1744 [19, 31, 18] 1 2 4565 2 4625
1745 [19, 30, 18] 1 2 4567 2 4627
1746 [19, 31, 18] 1 2 4569 2 4629
1747 [19, 30, 18] 1 2 4571 2 4631
1748 [19, 31, 18] 1 2 4573 2 4633
1749 [19, 30, 18] 1 2 4575 2 4635
1750 [19, 31, 18] 1 2 4577 2 4637
1751 [19, 30, 18] 1 2 4579 2 4639
1752 [19, 31, 18] 1 2 4581 2 4641
1753 [19, 30, 18] 1 2 4583 2 4643
1754 [19, 31, 18] 1 2 4585 2 4645
1755 [19, 30, 18] 1 2 4587 2 4647
1756 [19, 31, 18] 1 2 4589 2 4649
1757 [19, 30, 18] 1 2 4591 2 4651
1758 [19, 31, 18] 1 2 4593 2 4653
1759 [19, 30, 18] 1 2 4595 2 4655
1760 [19, 31, 18] 1 2 4597 2 4657
1761 [19, 30, 18] 1 2 4599 2 4659
1762 [19, 31, 18] 1 2 4601 2 4661
1763 [30, 31, 18] 11 2 4603 2 4663
1764 [30, 6, 18] 11 22 4625 22 4685
1765 [30, 7, 18] 1 2 4627 2 4687
1766 [30, 6, 18] 1 2 4629 2 4689
1767 [30, 7, 18] 1 2 4631 2 4691
1768 [30, 6, 18] 1 2 4633 2 4693
1769 [30, 7, 18] 1 2 4635 2 4695
1770 [30, 6, 18] 1 2 4637 2 4697
1771 [30, 7, 18] 1 2 4639 2 4699
1772 [30, 6, 18] 1 2 4641 2 4701
1773 [30, 7, 18] 1 2 4643 2 4703
1774 [30, 6, 18] 1 2 4645 2 4705
1775 [30, 7, 18] 1 2 4647 2 4707
1776 [30, 6, 18] 1 2 4649 2 4709
1777 [30, 7, 18] 1 2 4651 2 4711
1778 [30, 6, 18] 1 2 4653 2 4713
1779 [30, 7, 18] 1 2 4655 2 4715
1780 [30, 6, 18] 1 2 4657 2 4717
1781 [30, 7, 18] 1 2 4659 2 4719
1782 [30, 6, 18] 1 2 4661 2 4721
1783 [30, 7, 18] 1 2 4663 2 4723
1784 [30, 6, 18] 1 2 4665 2 4725
1785 [30, 7, 18] 1 2 4667 2 4727
1786 [30, 6, 18] 1 2 4669 2 4729
1787 [30, 6, 7] 11 0 4669 2 4731
1788 [18, 6, 7] 12 14 4683 14 4745
1789 [19, 6, 7] 1 2 4685 2 4747
1790 [18, 6, 7] 1 2 4687 2 4749
1791 [19, 6, 7] 1 2 4689 2 4751
1792 [18, 6, 7] 1 2 4691 2 4753
1793 [19, 6, 7] 1 2 4693 2 4755
1794 [18, 6, 7] 1 2 4695 2 4757
1795 [19, 6, 7] 1 2 4697 2 4759
1796 [18, 6, 7] 1 2 4699 2 4761
1797 [19, 6, 7] 1 2 4701 2 4763
1798 [18, 6, 7] 1 2 4703 2 4765
1799 [19, 6, 7] 1 2 4705 2 4767
1800 [18, 6, 7] 1 2 4707 2 4769
1801 [19, 6, 7] 1 2 4709 2 4771
1802 [18, 6, 7] 1 2 4711 2 4773
1803 [19, 6, 7] 1 2 4713 2 4775
1804 [18, 6, 7] 1 2 4715 2 4777
1805 [19, 6, 7] 1 2 4717 2 4779
1806 [18, 6, 7] 1 2 4719 2 4781
1807 [19, 6, 7] 1 2 4721 2 4783
1808 [18, 6, 7] 1 2 4723 2 4785
1809 [19, 6, 7] 1 2 4725 2 4787
1810 [18, 6, 7] 1 2 4727 2 4789
1811 [19, 6, 7] 1 2 4729 2 4791
1812 [19, 6, 18] 11 0 4729 0 4791
1813 [30, 6, 18] 11 22 4751 22 4813
1814 [31, 6, 18] 1 2 4753 2 4815
1815 [30, 6, 18] 1 2 4755 2 4817
1816 [31, 6, 18] 1 2 4757 2 4819
1817 [30, 6, 18] 1 2 4759 2 4821
1818 [31, 6, 18] 1 2 4761 2 4823
1819 [30, 6, 18] 1 2 4763 2 4825
1820 [31, 6, 18] 1 2 4765 2 4827
1821 [30, 6, 18] 1 2 4767 2 4829
1822 [31, 6, 18] 1 2 4769 2 4831
1823 [30, 6, 18] 1 2 4771 2 4833
1824 [31, 6, 18] 1 2 4773 2 4835
1825 [30, 6, 18] 1 2 4775 2 4837
1826 [31, 6, 18] 1 2 4777 2 4839
1827 [30, 6, 18] 1 2 4779 2 4841
1828 [31, 6, 18] 1 2 4781 2 4843
1829 [30, 6, 18] 1 2 4783 2 4845
1830 [31, 6, 18] 1 2 4785 2 4847
1831 [30, 6, 18] 1 2 4787 2 4849
1832 [31, 6, 18] 1 2 4789 2 4851
1833 [30, 6, 18] 1 2 4791 2 4853
1834 [31, 6, 18] 1 2 4793 2 4855
1835 [30, 6, 18] 1 2 4795 2 4857
1836 [30, 31, 18] 11 0 4795 2 4859
1837 [30, 31, 6] 12 14 4809 14 4873
1838 [30, 31, 7] 1 2 4811 2 4875
1839 [30, 31, 6] 1 2 4813 2 4877
1840 [30, 31, 7] 1 2 4815 2 4879
1841 [30, 31, 6] 1 2 4817 2 4881
1842 [30, 31, 7] 1 2 4819 2 4883
1843 [30, 31, 6] 1 2 4821 2 4885
1844 [30, 31, 7] 1 2 4823 2 4887
1845 [30, 31, 6] 1 2 4825 2 4889
1846 [30, 31, 7] 1 2 4827 2 4891
1847 [30, 31, 6] 1 2 4829 2 4893
1848 [30, 31, 7] 1 2 4831 2 4895
1849 [30, 31, 6] 1 2 4833 2 4897
1850 [30, 31, 7] 1 2 4835 2 4899
1851 [30, 31, 6] 1 2 4837 2 4901
1852 [30, 31, 7] 1 2 4839 2 4903
1853 [30, 31, 6] 1 2 4841 2 4905
1854 [30, 31, 7] 1 2 4843 2 4907
1855 [30, 31, 6] 1 2 4845 2 4909
1856 [30, 31, 7] 1 2 4847 2 4911
1857 [30, 31, 6] 1 2 4849 2 4913
1858 [30, 31, 7] 1 2 4851 2 4915
1859 [30, 6, 7] 11 2 4853 2 4917
1860 [18, 6, 7] 12 22 4875 22 4939
1861 [19, 6, 7] 1 2 4877 2 4941
1862 [18, 6, 7] 1 2 4879 2 4943
1863 [19, 6, 7] 1 2 4881 2 4945
1864 [18, 6, 7] 1 2 4883 2 4947
1865 [19, 6, 7] 1 2 4885 2 4949
1866 [18, 6, 7] 1 2 4887 2 4951
1867 [19, 6, 7] 1 2 4889 2 4953
1868 [18, 6, 7] 1 2 4891 2 4955
1869 [19, 6, 7] 1 2 4893 2 4957
1870 [18, 6, 7] 1 2 4895 2 4959
1871 [19, 6, 7] 1 2 4897 2 4961
1872 [18, 6, 7] 1 2 4899 2 4963
1873 [19, 6, 7] 1 2 4901 2 4965
1874 [18, 6, 7] 1 2 4903 2 4967
1875 [19, 6, 7] 1 2 4905 2 4969
1876 [18, 6, 7] 1 2 4907 2 4971
1877 [19, 6, 7] 1 2 4909 2 4973
1878 [18, 6, 7] 1 2 4911 2 4975
1879 [19, 6, 7] 1 2 4913 2 4977
1880 [18, 6, 7] 1 2 4915 2 4979
1881 [19, 6, 7] 1 2 4917 2 4981
1882 [18, 6, 7] 1 2 4919 2 4983
1883 [19, 6, 7] 1 2 4921 2 4985
1884 [19, 6, 18] 11 0 4921 0 4985
1885 [19, 30, 18] 12 14 4935 14 4999
1886 [19, 31, 18] 1 2 4937 2 5001
1887 [19, 30, 18] 1 2 4939 2 5003
1888 [19, 31, 18] 1 2 4941 2 5005
1889 [19, 30, 18] 1 2 4943 2 5007
1890 [19, 31, 18] 1 2 4945 2 5009
1891 [19, 30, 18] 1 2 4947 2 5011
1892 [19, 31, 18] 1 2 4949 2 5013
1893 [19, 30, 18] 1 2 4951 2 5015
1894 [19, 31, 18] 1 2 4953 2 5017
1895 [19, 30, 18] 1 2 4955 2 5019
1896 [19, 31, 18] 1 2 4957 2 5021
1897 [19, 30, 18] 1 2 4959 2 5023
1898 [19, 31, 18] 1 2 4961 2 5025
1899 [19, 30, 18] 1 2 4963 2 5027
1900 [19, 31, 18] 1 2 4965 2 5029
1901 [19, 30, 18] 1 2 4967 2 5031
1902 [19, 31, 18] 1 2 4969 2 5033
1903 [19, 30, 18] 1 2 4971 2 5035
1904 [19, 31, 18] 1 2 4973 2 5037
1905 [19, 30, 18] 1 2 4975 2 5039
1906 [19, 31, 18] 1 2 4977 2 5041
1907 [30, 31, 18] 11 2 4979 2 5043
1908 [30, 6, 18] 11 22 5001 22 5065
1909 [30, 7, 18] 1 2 5003 2 5067
1910 [30, 6, 18] 1 2 5005 2 5069
1911 [30, 7, 18] 1 2 5007 2 5071
1912 [30, 6, 18] 1 2 5009 2 5073
1913 [30, 7, 18] 1 2 5011 2 5075
1914 [30, 6, 18] 1 2 5013 2 5077
1915 [30, 7, 18] 1 2 5015 2 5079
1916 [30, 6, 18] 1 2 5017 2 5081
1917 [30, 7, 18] 1 2 5019 2 5083
1918 [30, 6, 18] 1 2 5021 2 5085
1919 [30, 7, 18] 1 2 5023 2 5087
1920 [30, 6, 18] 1 2 5025 2 5089
1921 [30, 7, 18] 1 2 5027 2 5091
1922 [30, 6, 18] 1 2 5029 2 5093
1923 [30, 7, 18] 1 2 5031 2 5095
1924 [30, 6, 18] 1 2 5033 2 5097
1925 [30, 7, 18] 1 2 5035 2 5099
1926 [30, 6, 18] 1 2 5037 2 5101
1927 [30, 7, 18] 1 2 5039 2 5103
1928 [30, 6, 18] 1 2 5041 2 5105
1929 [30, 7, 18] 1 2 5043 2 5107
1930 [30, 6, 18] 1 2 5045 2 5109
1931 [30, 6, 7] 11 0 5045 2 5111
1932 [18, 6, 7] 12 14 5059 14 5125
1933 [19, 6, 7] 1 2 5061 2 5127
1934 [18, 6, 7] 1 2 5063 2 5129
1935 [19, 6, 7] 1 2 5065 2 5131
1936 [18, 6, 7] 1 2 5067 2 5133
1937 [19, 6, 7] 1 2 5069 2 5135
1938 [18, 6, 7] 1 2 5071 2 5137
1939 [19, 6, 7] 1 2 5073 2 5139
1940 [18, 6, 7] 1 2 5075 2 5141
1941 [19, 6, 7] 1 2 5077 2 5143
1942 [18, 6, 7] 1 2 5079 2 5145
1943 [19, 6, 7] 1 2 5081 2 5147
1944 [18, 6, 7] 1 2 5083 2 5149
1945 [19, 6, 7] 1 2 5085 2 5151
1946 [18, 6, 7] 1 2 5087 2 5153
1947 [19, 6, 7] 1 2 5089 2 5155
1948 [18, 6, 7] 1 2 5091 2 5157
1949 [19, 6, 7] 1 2 5093 2 5159
1950 [18, 6, 7] 1 2 5095 2 5161
1951 [19, 6, 7] 1 2 5097 2 5163
1952 [18, 6, 7] 1 2 5099 2 5165
1953 [19, 6, 7] 1 2 5101 2 5167
1954 [18, 6, 7] 1 2 5103 2 5169
1955 [19, 6, 7] 1 2 5105 2 5171
1956 [19, 6, 18] 11 0 5105 0 5171
1957 [30, 6, 18] 11 22 5127 22 5193
1958 [31, 6, 18] 1 2 5129 2 5195
1959 [30, 6, 18] 1 2 5131 2 5197
1960 [31, 6, 18] 1 2 5133 2 5199
1961 [30, 6, 18] 1 2 5135 2 5201
1962 [31, 6, 18] 1 2 5137 2 5203
1963 [30, 6, 18] 1 2 5139 2 5205
1964 [31, 6, 18] 1 2 5141 2 5207
1965 [30, 6, 18] 1 2 5143 2 5209
1966 [31, 6, 18] 1 2 5145 2 5211
1967 [30, 6, 18] 1 2 5147 2 5213
1968 [31, 6, 18] 1 2 5149 2 5215
1969 [30, 6, 18] 1 2 5151 2 5217
1970 [31, 6, 18] 1 2 5153 2 5219
1971 [30, 6, 18] 1 2 5155 2 5221
1972 [31, 6, 18] 1 2 5157 2 5223
1973 [30, 6, 18] 1 2 5159 2 5225
1974 [31, 6, 18] 1 2 5161 2 5227
1975 [30, 6, 18] 1 2 5163 2 5229
1976 [31, 6, 18] 1 2 5165 2 5231
1977 [30, 6, 18] 1 2 5167 2 5233
1978 [31, 6, 18] 1 2 5169 2 5235
1979 [30, 6, 18] 1 2 5171 2 5237
1980 [30, 31, 18] 11 0 5171 2 5239
1981 [30, 31, 6] 12 14 5185 14 5253
1982 [30, 31, 7] 1 2 5187 2 5255
1983 [30, 31, 6] 1 2 5189 2 5257
1984 [30, 31, 7] 1 2 5191 2 5259
1985 [30, 31, 6] 1 2 5193 2 5261
1986 [30, 31, 7] 1 2 5195 2 5263
1987 [30, 31, 6] 1 2 5197 2 5265
1988 [30, 31, 7] 1 2 5199 2 5267
1989 [30, 31, 6] 1 2 5201 2 5269
1990 [30, 31, 7] 1 2 5203 2 5271
1991 [30, 31, 6] 1 2 5205 2 5273
1992 [30, 31, 7] 1 2 5207 2 5275
1993 [30, 31, 6] 1 2 5209 2 5277
1994 [30, 31, 7] 1 2 5211 2 5279
1995 [30, 31, 6] 1 2 5213 2 5281
1996 [30, 31, 7] 1 2 5215 2 5283
1997 [30, 31, 6] 1 2 5217 2 5285
1998 [30, 31, 7] 1 2 5219 2 5287
1999 [30, 31, 6] 1 2 5221 2 5289
2000 [30, 31, 7] 1 2 5223 2 5291
2001 [30, 31, 6] 1 2 5225 2 5293
2002 [30, 31, 7] 1 2 5227 2 5295
2003 [30, 6, 7] 11 2 5229 2 5297
5229 5297 4470
|
3816117f0c4b93577849eab47cc52e0073efa346 | 6f83ecfc6399fb31f10f74e8bf819f1e7042e80e | /R/01.3-checks.R | c7162815fae2f8bc53a55f1b38fe9e6c567bb4d3 | [
"MIT"
] | permissive | jessesadler/debvctrs | d510edfd21d968d5df7ba237c7503552154cec8a | d3a04b446c5f20b34a5044b5b52530d4adcb2d23 | refs/heads/main | 2022-05-04T02:22:29.272112 | 2020-06-29T19:38:15 | 2020-06-29T19:38:15 | 207,156,132 | 27 | 2 | null | null | null | null | UTF-8 | R | false | false | 2,865 | r | 01.3-checks.R | ## Checks ##
# lsd checks --------------------------------------------------------------
#' Checks for deb_lsd functions
#'
#' @description
#' Checks made:
#' - That `l`, `s`, and `d` are numeric
#' - That they are the same length, length 1, or all length 0
#' @keywords internal
lsd_check <- function(l, s, d) {
# Check that l, s, and d are numeric
if (!all(rlang::are_na(l))) {
if (!is.numeric(l)) {
stop(call. = FALSE, "`l` must be a numeric vector.")
}
}
if (!all(rlang::are_na(s))) {
if (!is.numeric(s)) {
stop(call. = FALSE, "`s` must be a numeric vector.")
}
}
if (!all(rlang::are_na(d))) {
if (!is.numeric(d)) {
stop(call. = FALSE, "`d` must be a numeric vector.")
}
}
# Check that l, s, and d are same length, length 1, or all length 0
lengths <- c(vec_size(l), vec_size(s), vec_size(d))
# Must be either all zero length or no zero length
if (sum(lengths) == 1L || sum(lengths) == 2L) {
stop(call. = FALSE,
paste0("`l`, `s`, and `d` must all have values. ",
"You may have forgotten a value or need to use 0."))
}
# Must be only one length other than scalar
non_scalar <- lengths[lengths != 1L]
if (length(unique(non_scalar)) > 1L) {
stop(call. = FALSE,
"`l`, `s`, and `d` must be vectors of equal length or length 1.")
}
}
# bases check -------------------------------------------------------------
#' Check that bases are natural numbers
#'
#' Check that bases are natural numbers (whole number greater than 0).
#' From integer docs and SO: https://stackoverflow.com/a/4562291
#' @keywords internal
is_natural <- function(x, tol = .Machine$double.eps^0.5) {
x > tol & abs(x - round(x)) < tol
}
#' Checks for bases attribute
#'
#' @description
#' Check that:
#' - Bases are numeric vector of length 2
#' - Cannot have NA values
#' - Must be natural (whole) numbers greater that 0
#' @keywords internal
bases_check <- function(bases) {
if (!is.numeric(bases) || vec_size(bases) != 2L || is.null(bases)) {
stop(call. = FALSE, "`bases` must be a numeric vector of length 2.")
}
if (any(rlang::are_na(bases))) {
stop(call. = FALSE, "`bases` cannot be `NA`.")
}
if (!all(is_natural(bases))) {
stop(call. = FALSE, "`bases` must be natural numbers greater than zero.")
}
}
# Bases equivalent --------------------------------------------------------
#' Check that bases are equal for two deb-style vectors
#'
#' Used to ensure that deb_lsd and deb_decimal vectors with different bases
#' cannot be combined except explicitly with `deb_convert_bases()`.
#' @keywords internal
bases_equal <- function(x, y) {
if (!identical(deb_bases(x), deb_bases(y))) {
stop(call. = FALSE,
paste0("`bases` attributes must be equal to combine <deb_lsd> ",
"or <deb_decimal> vectors."))
}
}
|
0a231e05573ba253e2a02f1c2d7c4fdc91422217 | 85f0d52239c8ca6e8ec46a1ad6a3d6055c3f8f52 | /R/utils.R | 525410da84ccfa6ffb654b2ab947aca4772ba736 | [
"LicenseRef-scancode-public-domain"
] | permissive | halpo/pivot | 053daa31119d753955dcaef5551361787c7102e4 | 2dd0f1bfbfbbd6eed0b9e53cc035e11bf4b6ef15 | refs/heads/master | 2020-03-10T14:08:25.490882 | 2019-08-23T19:11:18 | 2019-08-23T19:11:18 | 129,418,480 | 10 | 2 | null | 2019-04-24T21:04:18 | 2018-04-13T15:08:47 | R | UTF-8 | R | false | false | 1,556 | r | utils.R | find_connection <- function(x)UseMethod('find_connection')
find_connection.op <- function(x){
assert_that(inherits(x, 'op'))
op <- x
x <- op$x
while (!is.null(x)) {
if (inherits(x, 'tbl_lazy')) {
return(x$src$con)
} else
if (inherits(x, 'op')) {
x <- x$x
} else {
stop( "Could not find base table to infer con from. "
, "Final op$x...$x value was "
, paste(class(x), collapse="/")
)
}
}
stop("Could not find a valid connection.")
}
find_connection.tbl_lazy<- function(x)x$src$con
#' @importFrom magrittr %>%
get_pivot_levels <- function(data, key, ..., con=find_connection(data)){
key <- rlang::enquo(key)
dots <- rlang::quos(...)
data %>% dplyr::ungroup() %>% dplyr::select(!!key) %>%
dplyr::distinct() %>%
dplyr::pull(!!key) %>%
as.character() %>%
tidyselect::vars_select(!!!dots)
}
#' @export
levels.op_pilot <- function(x){
assert_that(inherits(x, "op_pivot"))
con <- find_connection(x)
assert_that(inherits(con, "DBIConnection"))
levels_op_pivot(op=x, con=con)
}
levels_op_pivot <-
function( op
, con = find_connection(op)
){
assert_that( inherits(op , 'op_pivot') )
if (!is.null(op$args$levels))
return(op$args$levels)
if (!any(purrr::map_lgl(op$dots, rlang::quo_is_call)))
return (purrr::map_chr(op$dots, rlang::quo_text))
get_pivot_levels(op$x, !!op$args$key, !!!op$dots, con=con)
}
|
194d3d58819d591b231cbfa97ce4ee5a45091156 | a3501805f3b884d8d6bbd22d1bc47fa4358180fb | /Exploratory_Data_Analysis/Course_Project_1/plot2.R | 6671274b826e88afca0b372cd5c758f0517209f5 | [] | no_license | hhstkh/datasciencecoursera | d183a49bef089a784ae59ddfdd6ff9b143a0ca4f | a23a517c9cdf49adf889796db64d998dd7fd3087 | refs/heads/master | 2020-05-18T02:55:08.457333 | 2015-09-27T19:20:15 | 2015-09-27T19:20:15 | 26,918,620 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 716 | r | plot2.R | require(sqldf)
require(lubridate)
## The file exists in current directory
file <- c("household_power_consumption.txt")
## Read data from the dates 2007-02-01 and 2007-02-02
data_subset <- read.csv.sql(file, header = T, sep=";", sql = "select * from file where (Date == '1/2/2007' OR Date == '2/2/2007')" )
## Add new column concat Date and Time
data_subset$DateAndTime <- paste(data_subset$Date, data_subset$Time)
## Convert to Date type
data_subset$DateAndTime <- dmy_hms(data_subset$DateAndTime)
## Set up png
png(file = "./plot2.png", width = 480, height = 480, units = "px")
plot(data_subset$DateAndTime, data_subset$Global_active_power, type = "l", xlab = "", ylab="Global Active Power (kilowatts)")
dev.off() |
7af211718345e7d9c3ee216dcecd316dadec4f76 | 33dab35365d69701913f9f86ca7be6a2fca96d50 | /R/leaflet_tiles.R | d29a2ff72d3448ac918559e6af867506b3b2e5a3 | [
"CC0-1.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | r-stata/rstatatools | 98a92fd128211b75ba11d66c2ebfb69cf346a1ca | 3a1ce18107533eea58fa98c65ba443d8f0ec6c5e | refs/heads/main | 2023-02-03T09:43:22.568582 | 2020-12-16T13:48:37 | 2020-12-16T13:48:37 | 321,953,931 | 1 | 4 | null | null | null | null | UTF-8 | R | false | false | 6,896 | r | leaflet_tiles.R | #' @title Load Tiandi Map to leaflet
#'
#' @description Simple function like addTiles()
#' @import leaflet
#'
#' @param map A leaflet object.
#' @param type A character value to set type of Tiandi map tiles. Options are "normal", "satellite", "terrain".
#' @param ... Other paramter pass to the addTiles. function
#'
#' @examples
#' library(leaflet)
#' library(leafem)
#' library(rstatatools)
#' library(sf)
#' if(interactive()){
#' leaflet() %>%
#' tdtmap(type = "terrain") %>%
#' addFeatures(locsf, weight = 0.1, radius = 0.1)
#' }
#' @export
tdtmap <- function (map, type = "normal", ...) {
stopifnot(type %in% c("normal", "satellite", "terrain"))
key = "93724b915d1898d946ca7dc7b765dda5"
url = paste0("http://t1.tianditu.com/DataServer?T=vec_w&X={x}&Y={y}&L={z}&tk=", key)
if (type == "satellite") {
url = paste0("http://t1.tianditu.com/DataServer?T=img_w&X={x}&Y={y}&L={z}&tk=", key)
}
if (type == "terrain") {
url = paste0("http://t1.tianditu.com/DataServer?T=ter_w&X={x}&Y={y}&L={z}&tk=", key)
}
leaflet::addTiles(map, url,
leaflet::tileOptions(tileSize = 256, minZoom = 3, maxZoom = 17, zoomOffset = 1), ...)
}
#' @title Load Tiandi Map annotion to leaflet
#'
#' @description Simple function like addTiles()
#' @import leaflet
#'
#' @param map A leaflet object.
#' @param ... Other paramter pass to the addTiles function.
#'
#' @examples
#' library(leaflet)
#' library(leafem)
#' library(rstatatools)
#' library(sf)
#' if(interactive()){
#' leaflet() %>%
#' tdtmap(type = "terrain") %>%
#' tdtmap_annotion() %>%
#' addFeatures(locsf, weight = 0.1, radius = 0.1)
#' }
#'
#' @export
tdtmap_annotion <- function (map,...) {
leaflet::addTiles(map, "http://t1.tianditu.com/DataServer?T=cia_w&X={x}&Y={y}&L={z}&tk=93724b915d1898d946ca7dc7b765dda5",
leaflet::tileOptions(tileSize = 256, minZoom = 3, maxZoom = 17), ...)
}
#' @title Load GaoDe Map to leaflet
#'
#' @description Simple function like addTiles()
#' @import leaflet
#'
#' @param map A leaflet object.
#' @param type A character value to set type of Gaode map tiles. Options are "normal" and "satellite".
#' @param ... Other paramter pass to the addTiles. function
#'
#' @examples
#' library(leaflet)
#' library(leafem)
#' library(rstatatools)
#' library(sf)
#' if(interactive()){
#' leaflet() %>%
#' gdmap(type = "satellite") %>%
#' addFeatures(locsf, weight = 0.1, radius = 0.1)
#' }
#'
#' @export
gdmap <- function (map, type = "normal", ...) {
stopifnot(type %in% c("normal", "satellite"))
if (type == "normal") {
url = "http://webrd01.is.autonavi.com/appmaptile?lang=zh_cn&size=1&scale=1&style=8&x={x}&y={y}&z={z}"
}
if (type == "satellite") {
url = "http://webst01.is.autonavi.com/appmaptile?style=6&x={x}&y={y}&z={z}"
}
leaflet::addTiles(map, url,
leaflet::tileOptions(tileSize = 256, minZoom = 3, maxZoom = 17), ...)
}
#' @title Load GaoDe Map annotion to leaflet
#'
#' @description Simple function like addTiles()
#' @import leaflet
#'
#' @param map A leaflet object.
#' @param ... Other paramter pass to the addTiles. function
#'
#' @examples
#' library(leaflet)
#' library(leafem)
#' library(rstatatools)
#' library(sf)
#' if(interactive()){
#' leaflet() %>%
#' gdmap(type = "satellite") %>%
#' gdmap_annotion() %>%
#' addFeatures(locsf, weight = 0.1, radius = 0.1)
#' }
#'
#' @export
gdmap_annotion <- function (map, ...)
{
leaflet::addTiles(map, "http://webst01.is.autonavi.com/appmaptile?style=8&x={x}&y={y}&z={z}",
leaflet::tileOptions(tileSize = 256, minZoom = 3, maxZoom = 17), ...)
}
#' @title Load Geoq Map to leaflet
#'
#' @description Simple function like addTiles()
#' @import leaflet
#'
#' @param map A leaflet object.
#' @param type A character value to set type of Geoq map tiles. Options are "normal", "PurplishBlue", "Gray", "Warm", "ENG", "LabelAndBoundaryLine", "Subway", "WorldHydroMap", "Gray_OnlySymbol", "Gray_Reference", "PurplishBlue_OnlySymbol", "PurplishBlue_Reference", "Warm_OnlySymbol", "Warm_Reference".
#' @param ... Other paramter pass to the addTiles. function
#'
#' @examples
#' library(leaflet)
#' library(leafem)
#' library(rstatatools)
#' library(sf)
#' if(interactive()){
#' leaflet() %>%
#' geoqmap(type = "ENG") %>%
#' addFeatures(locsf, weight = 0.1, radius = 0.1)
#' }
#'
#' @export
geoqmap <- function (map, type = "normal", ...) {
stopifnot(type %in% c("normal", "PurplishBlue", "Gray", "Warm", "ENG", "LabelAndBoundaryLine", "Subway", "WorldHydroMap", "Gray_OnlySymbol", "Gray_Reference", "PurplishBlue_OnlySymbol", "PurplishBlue_Reference", "Warm_OnlySymbol", "Warm_Reference"))
url <- "http://map.geoq.cn/ArcGIS/rest/services/ChinaOnlineCommunity/MapServer/tile/{z}/{y}/{x}"
if (type == "PurplishBlue") {
url <- "http://map.geoq.cn/ArcGIS/rest/services/ChinaOnlineStreetPurplishBlue/MapServer/tile/{z}/{y}/{x}"
}
if (type == "Gray") {
url <- "http://map.geoq.cn/ArcGIS/rest/services/ChinaOnlineStreetGray/MapServer/tile/{z}/{y}/{x}"
}
if (type == "Warm") {
url <- "http://map.geoq.cn/ArcGIS/rest/services/ChinaOnlineStreetWarm/MapServer/tile/{z}/{y}/{x}"
}
if (type == "ENG") {
url <- "http://map.geoq.cn/ArcGIS/rest/services/ChinaOnlineCommunityENG/MapServer/tile/{z}/{y}/{x}"
}
if (type == "LabelAndBoundaryLine") {
url <- "http://thematic.geoq.cn/arcgis/rest/services/ThematicMaps/administrative_division_boundaryandlabel/MapServer/tile/{z}/{y}/{x}"
}
if (type == "Subway") {
url <- "http://thematic.geoq.cn/arcgis/rest/services/ThematicMaps/subway/MapServer/tile/{z}/{y}/{x}"
}
if (type == "WorldHydroMap") {
url <- "http://thematic.geoq.cn/arcgis/rest/services/ThematicMaps/WorldHydroMap/MapServer/tile/{z}/{y}/{x}"
}
if (type == "Gray_OnlySymbol") {
url <- "http://thematic.geoq.cn/arcgis/rest/services/StreetThematicMaps/Gray_OnlySymbol/MapServer/tile/{z}/{y}/{x}"
}
if (type == "Gray_Reference") {
url <- "http://thematic.geoq.cn/arcgis/rest/services/StreetThematicMaps/Gray_Reference/MapServer/tile/{z}/{y}/{x}"
}
if (type == "PurplishBlue_OnlySymbol") {
url <- "http://thematic.geoq.cn/arcgis/rest/services/StreetThematicMaps/PurplishBlue_OnlySymbol/MapServer/tile/{z}/{y}/{x}"
}
if (type == "PurplishBlue_Reference") {
url <- "http://thematic.geoq.cn/arcgis/rest/services/StreetThematicMaps/PurplishBlue_Reference/MapServer/tile/{z}/{y}/{x}"
}
if (type == "Warm_OnlySymbol") {
url <- "http://thematic.geoq.cn/arcgis/rest/services/StreetThematicMaps/Warm_OnlySymbol/MapServer/tile/{z}/{y}/{x}"
}
if (type == "Warm_Reference") {
url <- "http://thematic.geoq.cn/arcgis/rest/services/StreetThematicMaps/Warm_Reference/MapServer/tile/{z}/{y}/{x}"
}
leaflet::addTiles(map, url,
leaflet::tileOptions(tileSize = 256, minZoom = 3, maxZoom = 17), ...)
}
|
bc921f937544adf73eb31ae31a7af3b16fb3e11b | 01507ea904d24afb4f8773e1dc20385309b19fde | /R/stream.R | aebc0c3a7361e2e65408ab49bea2447d745fcbe7 | [] | no_license | Exilehope/fasster | 61c5ef4d859983e76ecf66544139e66f841b2610 | ec1ce55f8780516c3e69858befcfe2b223a62c25 | refs/heads/master | 2020-03-26T22:33:26.010101 | 2018-08-16T02:34:32 | 2018-08-16T02:34:32 | 145,468,667 | 1 | 0 | null | 2018-08-20T20:44:15 | 2018-08-20T20:44:15 | null | UTF-8 | R | false | false | 1,666 | r | stream.R | #' @export
stream.FASSTER <- function(object, data, ...){
# Define specials
specials <- child_env(caller_env())
specials <- new_specials_env(
!!!fasster_specials,
.env = specials,
.bury = FALSE,
.vals = list(
.data = data,
.specials = specials
)
)
# Extend model
X <- parse_model_rhs(model_rhs(object%@%"model"), data = data, specials = specials)$args %>%
unlist(recursive = FALSE) %>%
reduce(`+`) %>%
.$X
response <- eval_tidy(model_lhs(object%@%"model"), data = data)
dlmModel <- object$dlm_future
dlmModel$X <- X
filtered <- dlmFilter(response, dlmModel)
if(!is.matrix(filtered$a)){
filtered$a <- matrix(filtered$a)
}
# Rebuild return structure
dlmModel$X <- rbind(object$dlm$X, X)
resid <- c(object$residuals, filtered$y - filtered$f)
states <- rbind(object$states, filtered$a)
fitted <- c(object$fitted, invert_transformation(object%@%"transformation")(filtered$f))
# Update model variance
filtered$mod$V <- resid %>%
as.numeric() %>%
var(na.rm = TRUE)
# Model to start forecasting from
modFuture <- dlmModel
lastObsIndex <- NROW(filtered$m)
modFuture$C0 <- with(filtered, dlmSvd2var(
U.C[[lastObsIndex]],
D.C[lastObsIndex, ]
))
wt <- states[seq_len(NROW(states) - 1) + 1, ] - states[seq_len(NROW(states) - 1), ]%*%t(dlmModel$GG)
modFuture$W <- var(wt)
modFuture$m0 <- filtered$m %>% tail(1) %>% as.numeric()
object$dlmModel <- dlmModel
object$dlm_future <- modFuture
object$resid <- resid
object$states <- states
object$fitted <- fitted
object$index <- c(object$index, data %>% .[[expr_text(index(data))]])
object
}
|
4e79d2b6620731f87e38c88b0e456f79035ea97a | d453aa8c92d024fe938adec79fd777af5324eebd | /Rscripts/gene_body_coverage.R | aa9dc6b1f53eae3385a4ce9f4c6c34999dfe058b | [
"MIT"
] | permissive | felixgrunberger/microbepore | f9f276927c243cad3fd459e073928dfdb1bff2d5 | 3ff6657d58ea86e56949db7dd6e93ebdc791e1b3 | refs/heads/master | 2023-05-28T09:49:08.013451 | 2021-06-14T12:24:57 | 2021-06-14T12:24:57 | 365,993,950 | 5 | 1 | null | null | null | null | UTF-8 | R | false | false | 12,961 | r | gene_body_coverage.R | # >> gene body coverage << #
# load libraries ----
library(here)
source(here("Rscripts/load_libraries.R"))
# functions & defs ----
filter_bed_files <- function(input_file){
strand_f <- str_split_fixed(str_split_fixed(string = input_file, pattern = "\\.coverage", n = 2)[,1], "\\.", 2)[,2]
suppressMessages(vroom(input_file,col_names = c("seqid","TSS", "TTS", "gene","rel_pos", "counts"), num_threads = 8, progress = F)) %>%
as_tibble() %>%
mutate(strand = strand_f) %>%
group_by(gene) %>%
mutate(perc_pos = ifelse(strand == "plus", round(scales::rescale(rel_pos, to=c(0,100)), digits = 0),
round(scales::rescale(rel_pos, to=c(100,0)), digits= 0))) %>%
dplyr::select(-strand)
}
merge_bed_files <- function(input_plus, input_minus){
# Suppress summarise info
options(dplyr.summarise.inform = FALSE)
rbind(input_plus, input_minus) %>%
group_by(gene) %>%
mutate(perc_coverage = scales::rescale(counts, to = c(0,100))) %>%
group_by(perc_pos, gene) %>%
summarise(C_mean = mean(perc_coverage),
C_max = max(perc_coverage),
C_min = min(perc_coverage)) %>%
group_by(perc_pos) %>%
summarise(C_mean_sum = mean(C_mean),
C_m_sum = min(C_mean)) %>%
mutate(median_C = median(C_mean_sum),
IQR = IQR(C_mean_sum),
QCoV = IQR/median_C)
}
modify_coverage_files <- function(folder, minimum_wanted_seqs = 10, output = c("normal", "genesizes")){
coverage_files <- list.files(folder, recursive = T, pattern = ".coverage$")
plus_c_files <- coverage_files[which(1:length(coverage_files) %% 2 == 0)]
minus_c_files <- coverage_files[which(1:length(coverage_files) %% 2 == 1)]
dataset_names <- str_split_fixed(str_split_fixed(str_split_fixed(plus_c_files, "\\/", n = 3)[,3],"_fu",2)[,1], "\\.", 2)[,1]
coverage_frame <- data.table()
for(i in seq_along(plus_c_files)){
# files
f <- folder
tic("normalise coverage files")
print(paste0("file number ", i, " of ", length(plus_c_files)))
if(output == "normal"){
# coverage to normalised coverage
p_t <- filter_bed_files(paste0(f,plus_c_files[i])) %>%
group_by(gene) %>% dplyr::filter(min(counts) >= minimum_wanted_seqs) %>% ungroup()
m_t <- filter_bed_files(paste0(f,minus_c_files[i])) %>%
group_by(gene) %>% dplyr::filter(min(counts) >= minimum_wanted_seqs) %>% ungroup()
a_t <- merge_bed_files(p_t, m_t) %>%
mutate(dataset = dataset_names[i])
coverage_frame <- rbind(coverage_frame, a_t)
}else{
# coverage to normalised coverage
p_t <- filter_bed_files_size(paste0(f,plus_c_files[i])) %>%
group_by(gene) %>% dplyr::filter(min(counts) >= minimum_wanted_seqs) %>% ungroup()
m_t <- filter_bed_files_size(paste0(f,minus_c_files[i])) %>%
group_by(gene) %>% dplyr::filter(min(counts) >= minimum_wanted_seqs) %>% ungroup()
a_t <- merge_bed_files_sizes(p_t, m_t) %>%
mutate(dataset = dataset_names[i])
coverage_frame <- rbind(coverage_frame, a_t)
}
toc()
}
return(coverage_frame)
}
keep_highest_site <- function(inputdf, selected_end, merge_w = 20, cov_min = 3){
inputdf %>%
distinct({{selected_end}}, .keep_all = T) %>%
arrange({{selected_end}}) %>%
mutate(index = lag({{selected_end}}, default = 1) + as.integer(merge_w),
index1 = cumsum(ifelse(index >= {{selected_end}}, 0, 1))+1) %>%
dplyr::group_by(index1) %>%
dplyr::filter(cov == max(cov),
cov >= cov_min) %>%
ungroup() %>%
group_by(gene) %>%
dplyr::slice(which.max(cov)) %>%
ungroup() %>%
dplyr::select(id_name, {{selected_end}}, strand)
}
filter_bed_files_size <- function(input_file, min_wanted = 10){
strand_f <- str_split_fixed(str_split_fixed(string = input_file, pattern = "\\.coverage", n = 2)[,1], "\\.", 2)[,2]
suppressMessages(vroom(input_file,col_names = c("seqid","TSS", "TTS", "gene","rel_pos", "counts"), num_threads = 8, progress = F)) %>%
as_tibble() %>%
mutate(strand = strand_f) %>%
group_by(gene) %>%
mutate(perc_pos = ifelse(strand == "plus", round(scales::rescale(rel_pos, to=c(0,100)), digits = 0),
round(scales::rescale(rel_pos, to=c(100,0)), digits= 0))) %>%
dplyr::select(-strand) %>%
group_by(gene) %>%
dplyr::filter(min(counts) >= min_wanted) %>%
ungroup() %>%
left_join(ecoli_gff %>% dplyr::select(id_name, width) %>% dplyr::rename(gene = id_name), by = c("gene")) %>%
mutate(read_group = ifelse(width <= 500, "sub500",
ifelse(width > 500 & width <= 1000, "sub1000",
ifelse(width > 1000 & width <= 1500, "sub1500",
ifelse(width > 1500 & width <= 2000, "sub1500",
ifelse(width > 2000, "big2000",NA))))))
}
merge_bed_files_sizes <- function(input_plus, input_minus){
# Suppress summarise info
options(dplyr.summarise.inform = FALSE)
f <- rbind(input_plus, input_minus) %>%
group_by(gene) %>%
mutate(perc_coverage = scales::rescale(counts, to = c(0,100))) %>%
group_by(perc_pos, gene,read_group) %>%
summarise(C_mean = mean(perc_coverage),
C_max = max(perc_coverage),
C_min = min(perc_coverage)) %>%
group_by(perc_pos,read_group) %>%
summarise(C_mean_sum = mean(C_mean),
C_m_sum = min(C_mean)) %>%
ungroup() %>%
group_by(read_group) %>%
mutate(median_C = median(C_mean_sum),
IQR = IQR(C_mean_sum),
QCoV = IQR/median_C)
gene_per_group <- rbind(input_plus, input_minus) %>%
distinct(gene, read_group) %>%
group_by(read_group) %>%
summarise(n = n())
f2 <- left_join(f, gene_per_group, by = "read_group")
return(f2)
}
# load & tidy data ----
## prepare 5´-3´end tables for coverage calculations ====
### primary 5´end ####
dir <- here()
tss <- vroom(file = paste0(dir,"/tables/tss_tables/tss_data_untrimmed.tsv"), num_threads = 8, progress = F) %>%
mutate(mode = str_sub(sample, 1,3),
TSS = ifelse(mode == "RNA" & strand == "+", TSS - 12,
ifelse(mode == "RNA" & strand == "-", TSS + 12,TSS))) %>%
dplyr::filter(TSS_type == "primary", type == "CDS") %>%
keep_highest_site(inputdf = .,selected_end = TSS)
### primary 3´end ####
tts <- vroom(file = paste0(dir,"/tables/tts_tables/tts_data_trimmed.tsv"), num_threads = 8, progress = F) %>%
mutate(mode = str_sub(sample, 1,3)) %>%
dplyr::filter(TTS_type == "primary", type == "CDS") %>%
keep_highest_site(inputdf = .,selected_end = TTS)
### find gens with annotated primary 5´and 3´end ####
w <- left_join(tss, tts, by = c("id_name", "strand")) %>%
dplyr::filter(!is.na(TSS), !is.na(TTS)) %>%
mutate(seqnames = ecoli_gff$seqid[1]) %>%
dplyr::select(seqnames, TSS, TTS, id_name, strand)
### write to bed-like file which can be used with bedtools coverage
fwrite(w %>% dplyr::filter(strand == "+") %>% dplyr::select(-strand),
paste0(dir,"/tables/transcript_tables/transcripts.plus.bedgraph"), sep = "\t", col.names = F, quote = F)
fwrite(w %>% dplyr::filter(strand == "-") %>% dplyr::select(-strand),
paste0(dir,"/tables/transcript_tables/transcripts.minus.bedgraph"), sep = "\t", col.names = F, quote = F)
## read in files from bedtools coverage ====
### full-length > polyA-trimmed > polyA & SSP adapter trimmed > clipping removed > stranded ####
cov_trimmed <- modify_coverage_files(folder = paste0(dir, "/data/coverage_data/coverage_data_pychopper_auto_cutadapt_SSP_clipped_stranded/"),
output = "normal")
### notrimming > stranded ####
cov_untrimmed <- modify_coverage_files(folder = paste0(dir,"/data/coverage_data/coverage_data_notrimming_stranded/"),
output = "normal")
### merge datasets ####
cov_sets <- rbind(cov_untrimmed %>% mutate(method = "untrimmed"),
cov_trimmed %>% mutate(method = "trimmed")) %>%
dplyr::left_join(old_new, by = c("dataset" = "old_name")) %>%
mutate(sample = new_name) %>%
dplyr::select(-new_name, -dataset) %>%
dplyr::filter(!is.na(sample))
## calculate QCoV sizes
cov_trimmed_sizes <- modify_coverage_files(folder = paste0(dir, "/data/coverage_data/coverage_data_pychopper_auto_cutadapt_SSP_clipped_stranded/"),
output = "genesizes")
# PLOTS ----
## reorder levels ====
cov_sets$sample <- factor(cov_sets$sample,
levels = (bc_to_sample$sample[c(1,10,11,8,9,2,4,3,5,6,7)]))
cov_sets$method <- factor(cov_sets$method,
levels = rev(c("untrimmed", "trimmed")))
cov_trimmed_sizes$sample <- factor(cov_trimmed_sizes$sample,
levels = rev(bc_to_sample$sample[c(1,10,11,8,9,2,4,3,5,6,7)]))
cov_trimmed_sizes$read_group <- factor(cov_trimmed_sizes$read_group,
levels = (c("sub500",
"sub1000",
"sub1500",
"big2000")))
## plotting ====
### Gene body coverage - Fig. 4A ####
ggplot(data = cov_sets ,
aes(x = perc_pos, y = C_mean_sum)) +
geom_line(size = 1.2,aes(linetype = method), color = "black") +
facet_grid(cols = vars(sample)) +
scale_x_continuous(limits = c(0,100), expand = c(0,0)) +
scale_y_continuous(limits = c(0,100), expand = c(0,0)) +
geom_ribbon(aes(fill = method, ymin = 0, ymax = C_mean_sum), alpha = 0.5, color = NA) +
scale_fill_manual(values = rev(c("#AD9D86","#A1CEC2"))) +
theme_Publication_white() +
theme(panel.grid.major.x = element_blank(),
panel.grid.major.y = element_line(color = "black")) +
ylab("Mean gene body coverage (%)") +
xlab("Relative 5´to 3´gene body position (%)")
### Gene body coverage - Fig. 4C ####
cov_sets %>%
group_by(sample,method) %>%
summarise(QCoV = max(QCoV)) %>%
mutate(mode = str_sub(sample, 1,3),
sample = factor(sample, levels = rev(bc_to_sample$sample[c(1,10,11,8,9,2,4,3,5,6,7)]))) %>%
ggplot() +
geom_bar(aes(y = sample, x = QCoV, group = method, fill = method),position = position_dodge(),
stat = "identity", color = "black") +
theme_Publication_white() +
theme(panel.grid.major.y = element_blank(),
panel.grid.major.x = element_line(color = "black", linetype = "dashed")) +
scale_x_continuous(limits = c(0.0,0.3, expand = c(0,0))) +
scale_fill_manual(values = c("#AD9D86","#A1CEC2")) +
ylab("") +
xlab("QCoV")
### Cov5 - Fig. 4D ####
cov_sets %>%
dplyr::filter(perc_pos <= 10, method == "trimmed") %>%
dplyr::group_by(sample) %>%
summarise(prime5 = mean(C_mean_sum, na.rm = T)/median_C*100) %>%
distinct(sample, .keep_all = T) %>%
mutate(mode = str_sub(sample, 1,3),
sample = factor(sample, levels = rev(bc_to_sample$sample[c(1,10,11,8,9,2,4,3,5,6,7)]))) %>%
ggplot() +
geom_bar(aes(y = sample, x = prime5 - 100, fill = mode),
stat = "identity", color = "black") +
scale_x_continuous(limits = c(-45,45), expand = c(0,0)) +
theme_Publication_white() +
scale_fill_manual(values = cbf1[c(2,5,3)]) +
theme(panel.grid.major.y = element_blank(),
panel.grid.major.x = element_line(color = "black", linetype = "dashed")) +
ylab("") +
xlab("CoV5")
### Cov3 - Fig. 4E ####
cov_sets %>%
dplyr::filter(perc_pos >= 90, method == "trimmed") %>%
dplyr::group_by(sample) %>%
summarise(prime3 = mean(C_mean_sum, na.rm = T)/median_C*100) %>%
distinct(sample, .keep_all = T) %>%
mutate(mode = str_sub(sample, 1,3),
sample = factor(sample, levels = rev(bc_to_sample$sample[c(1,10,11,8,9,2,4,3,5,6,7)]))) %>%
ggplot() +
geom_bar(aes(y = sample, x = prime3 - 100, fill = mode),
stat = "identity", color = "black") +
scale_x_continuous(limits = c(-45,45), expand = c(0,0)) +
theme_Publication_white() +
scale_fill_manual(values = cbf1[c(2,5,3)]) +
theme(panel.grid.major.y = element_blank(),
panel.grid.major.x = element_line(color = "black", linetype = "dashed")) +
ylab("") +
xlab("CoV3")
### QCoV gene size - Fig. 4F ####
ggplot(data = cov_trimmed_sizes %>% mutate(mode = str_sub(sample,1,3)) %>% distinct(sample, read_group, QCoV,n, .keep_all =T),
aes(x = QCoV, y = sample, fill = mode, factor = read_group)) +
geom_bar(stat = "identity", position = position_dodge(), color = "black",
aes(alpha = read_group),size = 0.5) +
scale_alpha_manual(values = rev(c(0,0.33,0.66,1))) +
scale_x_continuous(limits = c(0.0,1.5),expand = c(0,0)) +
theme_Publication_white() +
scale_fill_manual(values = cbf1[c(2,5,3)])
|
65c3dc368a991ab594ef5c81ac314a8e75e38464 | 70412a43e78946f75c14a05d79e23a08636ba625 | /Specializations/perceptual_decision_making/resources/ObsModel_L2.R | 5e1b83acdd101331a9664554c6e3a278a8362711 | [] | no_license | danieljwilson/CMMC-2018 | 94de25ec725b331477d6d38349de3db540d51354 | 8450f092c81f25a056e0de607f05cd79421271e8 | refs/heads/master | 2020-03-22T19:45:07.769122 | 2018-08-04T16:57:08 | 2018-08-04T16:57:08 | 140,547,285 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,870 | r | ObsModel_L2.R | ## Observer model - skeleton code with a suggested structure
library(stats)
library(plyr)
library(Hmisc)
rm(list=ls())
setwd()
# PC_MAFC computes the predicted proportion correct for a single signal level.
PC_MAFC<-function(mu_signal,mu_noise=0,sigma=1){
# Define the function to be integrated
MAFCmax<-function(x,mean1=0,mean2=0,sd1=1,sd2=1,M=2) dnorm(x,mean=mean1,sd=sd1)*(pnorm(x,mean=mean2,sd=sd2))^(M-1)
Mstar<-4
# This function can only be numerically integrated. You can be more or less
# sophisticated about this. I'm using the built-in 'integrate'
# function.
lowlim<-mu_noise-5*sigma # Set some sensible integration limits
uplim<-mu_signal+5*sigma # Assumes: mu_noise <= mu_signal
PC<-integrate(MAFCmax,
lowlim,
uplim,
mean1=mu_signal,
mean2=mu_noise,
sd1=sigma,
sd2=sigma,
M=Mstar)
PC_MAFCval<-PC$value
return(PC_MAFCval)
}
# Write the error function that returns the deviance for a single subject
MyModel<-function(parms,PFdata){
return(MyDeviance)
}
# Write a function that finds the best-fitting parameter(s) for a single subject: in other words: find the MLE parameters for MyModel, using optim or optimise. Having a function that does this will make it easier to efficiently fit multiple subjects in one go (rather than a for-loop)---have a look at the apply family of functions.
# However, you don't have to write a specific function and you could simply embed an 'optim(ize)' call in a for-loop that cycles through each participant separately.
# Load the data file and fit the model (see previous comment).
# Plot the results: observed data and model predictions superimposed. Do this separately for each subject. Better still: create a single figure with 8 subplots. |
7a4f4f054268bbff26976136396574facb9fc565 | e12f2926d8521776c8a906f5b1c800c51d57de9c | /Project1Code.R | e1c25386ec444004c2c81f769b9a630d669210fa | [] | no_license | bryanluna/RepData_PeerAssessment1 | 3a8aa710dba5d9e968d392f3f75dbf78fd2ec436 | c9f5ca232ee7ffb841dd1fd614b02553eb559389 | refs/heads/master | 2021-01-23T01:46:13.827666 | 2017-05-31T01:25:47 | 2017-05-31T01:25:47 | 92,877,257 | 0 | 0 | null | 2017-05-30T21:21:38 | 2017-05-30T21:21:37 | null | UTF-8 | R | false | false | 4,131 | r | Project1Code.R | #Read in the CSV data, assuming the file is already downloaded and unzipped in the wd
data <- read.csv("activity.csv")
#Transform data for analysis
##Convert dates to date format
data$date <- as.POSIXct(data$date, format="%Y-%m-%d")
##Add day of week to data
data <- data.frame(date=data$date,
weekday=tolower(weekdays(data$date)),
steps=data$steps,
interval=data$interval)
##Name each day weekday or weekend
data <- cbind(data, daytype=ifelse(data$weekday == "saturday" |
data$weekday == "sunday", "weekend", "weekday"))
##Create final data frame for analysis
activity <- data.frame(date=data$date,
weekday=data$weekday,
daytype=data$daytype,
interval=data$interval,
steps=data$steps)
##Print first couple rows of final data frame
head(activity)
#What is mean total number of steps taken per day?
##Calculate the total number of steps taken per day
sum_data <- aggregate(activity$steps, by=list(activity$date), FUN=sum, na.rm=TRUE)
names(sum_data) <- c("date", "steps")
##Make a histogram of the total number of steps taken each day
hist(sum_data$steps,
breaks=seq(from=0, to=25000, by=2500),
xlab="Total Steps",
ylim=c(0, 20),
main="Histogram of Total Steps Each Day")
##Calculate and report the mean and median of the total number of steps taken per day
mean <- mean(sum_data$steps)
median <-median(sum_data$steps)
#What is the average daily activity pattern?
##Make a time series plot of the 5-minute interval and the average number of steps taken, averaged across all days
mean_data <- aggregate(activity$steps,
by=list(activity$interval),
FUN=mean,
na.rm=TRUE)
names(mean_data) <- c("interval", "mean")
plot(mean_data$interval,
mean_data$mean,
type="l",
lwd=2,
xlab="Interval",
ylab="Average number of steps",
main="Time-series of the average number of steps per intervals")
##Which 5-minute interval, on average across all the days in the dataset, contains the maximum number of steps?
max_interval <- mean_data[which.max(mean_data$mean),1]
#Imputing missing values
##Calculate and report the total number of missing values in the dataset (i.e. the total number of rows with NAs)
NA_count <- sum(is.na(activity$steps))
##Devise a strategy for filling in all of the missing values in the dataset.
na_pos <- which(is.na(activity$steps))
mean_vec <- rep(mean(activity$steps, na.rm=TRUE), times=length(na_pos))
##Create a new dataset that is equal to the original dataset but with the missing data filled in.
activity[na_pos, "steps"] <- mean_vec
##Make a histogram of the total number of steps taken each day and calculate and report the mean and median total number of steps taken per day.
sum_data <- aggregate(activity$steps, by=list(activity$date), FUN=sum)
names(sum_data) <- c("date", "steps")
hist(sum_data$steps,
breaks=seq(from=0, to=25000, by=2500),
xlab="Total Steps",
ylim=c(0, 30),
main="Histogram of Total Steps Each Day\nwith NAs Replaced by Mean of Steps")
mean2 <- mean(sum_data$steps)
median2 <-median(sum_data$steps)
#Are there differences in activity patterns between weekdays and weekends?
##Create a new factor variable in the dataset with two levels - “weekdays” and “weekend”
###This was already done when orignally transforming data for analysis
##Make a panel plot containing a time series plot of the 5- minute interval and the average number of steps taken, averaged across all weekday days or weekend days.
library(lattice)
mean_data <- aggregate(activity$steps,
by=list(activity$daytype,
activity$weekday, activity$interval), mean)
names(mean_data) <- c("daytype", "weekday", "interval", "mean")
xyplot(mean ~ interval | daytype, mean_data,
type="l",
lwd=1,
xlab="Interval",
ylab="Number of steps",
layout=c(1,2))
|
76e179a6a00582854e7dd7fe07b5dd271efd38c8 | 12ae74bd0ba9d5494d7301b521b45d1bfa5ff84a | /man/equal_length.Rd | 82a59960d1bf09f59e26b6a1cce10a65924c0fbd | [] | no_license | cran/do | 62b609a0f0cc0f0c0cc879adb821b1d9d95b6632 | fa0d7c8f9799326ffa6f0763f490c2873597131b | refs/heads/master | 2021-08-15T11:59:00.793187 | 2021-08-03T10:40:02 | 2021-08-03T10:40:02 | 206,034,685 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 727 | rd | equal_length.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/equal_length.R
\name{equal_length}
\alias{equal_length}
\title{Equal Length}
\usage{
equal_length(x, suffix = " ", nchar, colname = FALSE, rowname = FALSE)
}
\arguments{
\item{x}{can be number, strings, verctors, dataframe or matrix.}
\item{suffix}{suffix}
\item{nchar}{maximun length}
\item{colname}{a logistic value, default is FALSE}
\item{rowname}{a logistic value, default is FALSE}
}
\value{
equal length results
}
\description{
Equal Length
}
\examples{
a=c(123,1,24,5,1.22554)
equal_length(a,0)
df = data.frame(
a=c(12,1,1.23),
b=c('a','abcd','d')
)
equal_length(x = df,suffix = 'x')
equal_length(x = df,suffix = 0,nchar =5)
}
|
225d61bace05b9f86a4b811016ad121fc30fecfa | 9cd088408e0d37dfd0d8df7ad3a793d48aefc7e5 | /inst/check.R | b7b49babf73e35e704111ec151874cd5ed064ce6 | [] | no_license | mdbrown/TreatmentSelection | 2efad91fae21c091c2b8361a3deb8f9667c1121d | 3cc9f213edd10e09f2801ea30b73bff66a4655ff | refs/heads/master | 2021-04-22T12:08:34.659741 | 2017-08-16T23:13:00 | 2017-08-16T23:13:00 | 12,115,030 | 4 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,486 | r | check.R |
SIM.data.singleMarker <-
function(nn,
mu = 0,
Sigma = 1,
beta = log(3),
beta2 = log(.5),
beta3 = log(2),
lam0 = .1,
cens.lam = 0,
time.max = 5)
{
Y <- rnorm(nn, mu, Sigma)
trt <- rbinom(nn, size = 1, prob = .5)
mu.i <- Y*beta + trt*beta2 + Y*trt*beta3
#true survival time
r.ti <- log(-log(runif(nn)))
ti <- -mu.i + r.ti
ti <- exp(ti)/lam0
#time.max is the followup time.
ci = rep(time.max, nn)
if(cens.lam > 0){
ci = rexp(nn, rate = cens.lam)
}
ci = pmin(ci, time.max)
#observed marker is the min of ti and ci
xi <- pmin(ti, ci)
# failure indicator
di <- ifelse( ti == xi, 1, 0)
#xi is the min of ti and ci
#di is the indicator for failure, 1 for failure, 0 for censored
#Y is the marker values
result <- as.data.frame(cbind(xi, di, Y, trt))
names(result) = c( "xi", "di", "Y", "trt")
return(result)
}
surv_tsdata <- SIM.data.singleMarker(1000)
data(surv_tsdata)
ts_surv <- trtsel(Surv(time = xi, event = di)~Y*trt,
treatment.name = "trt",
prediction.time = 1,
data = surv_tsdata)
plot(ts_surv, bootstraps = 10)
calibrate(ts_surv)
evaluate(ts_surv, bootstraps = 10)
TreatmentSelection::evaluate(ts_surv, bootstraps = 50,bias.correct = FALSE)
|
b67c105bd0977ae67c8bceea96cfd157b06f2778 | c7efbf534a3a3ebb3f58bc60fd9875f1ae73a6a5 | /02b_terraclim_map.R | 073d14a3bb6931a9a9d4db33e689da1231b07e2a | [] | no_license | CaitLittlef/fia-regen | ed20fecc4ec3c4110ed7552e7b2e8b1e2c52a805 | f7c17cdc6481b4f17021c1c747c5ea83dff1404a | refs/heads/master | 2021-06-22T09:20:33.476516 | 2020-12-18T14:01:32 | 2020-12-18T14:01:32 | 165,902,272 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 13,142 | r | 02b_terraclim_map.R | ## Map climatic conditions in study area
## set directory for terraclimate
tc.dir <- "C:/Users/clittlef/Google Drive/2RMRS/fia-regen/data/terraclimate/"
## Load TerraClime datasets
def <- raster(paste0(tc.dir,"def.1981.2010.tif")) %>% crop(IntWsts) %>% mask(IntWsts) %>% plot()
tmax <- raster(paste0(tc.dir, "tmax.1981.2010.tif")) %>% crop(IntWsts) %>% mask(IntWsts) #%>% plot()
precip <- raster(paste0(tc.dir, "ppt.1981.2010.tif")) %>% crop(IntWsts) %>% mask(IntWsts) %>% plot()
## Find years that have strong spatial graident in deficit
# Temporarily re-set working directory to list all files (May - Sept)
# Load, crop, and mask those rasters.
setwd(paste0(tc.dir,"/def_z/"))
def.tifs = list.files(pattern="*5.9.tif", full.names = TRUE)
def.stack <- stack(def.tifs)
wd <- setwd("C:/Users/clittlef/Google Drive/2RMRS/fia-regen/data") # If working with/within drive
def.stack <- def.stack %>% crop(IntWsts) %>% mask(IntWsts)
# Alt: alt, use sequential lapply, tho won't work with mask.
# def.list <- lapply(def.tifs, raster)
# def.list <- lapply(def.list, crop, y = IntWsts) # For lapply, specify 2nd var in fun as y.
# # For whatever reason, cannot apply mask with lapply, so do in loop
# def.list.2 <- list()
# for(i in (1:length(def.list))){
# def.list.2[[i]] <- def.list[[i]] %>% mask(IntWsts)
# }
# def.list <- def.list.2 ; rm(def.list.2)
# Rename; have run full dates then crop 2 digits off of right
names(def.stack) <- paste0("def", right(c(1981:2017),2))
#############################################WHICH YRS?########################
# ...select 10-12 and 15-17 as big spatial variabiltiy
plot(def.stack$def15)
plot(def.stack$def99)
# Alt, could unlist and assign names to each separate raster.
# Take max from those series of yrs:
# def9395 <- overlay(def.stack$def93, def.stack$def94, def.stack$def95,
# fun=function(x){ max(x,na.rm=T)})
# def1012 <- overlay(def.stack$def10, def.stack$def11, def.stack$def12,
# fun=function(x){ max(x,na.rm=T)})
# def1517 <- overlay(def.stack$def15, def.stack$def16, def.stack$def17,
# fun=function(x){ max(x,na.rm=T)})
plot(def9395)
plot(def1012)
plot(def1517)
# zoom(def1517)
#############################################PLOTTING########################
# Turn deficit raster into table (function defiend in 00_setup)
def.data <- gplot_data(def.stack$def15)
def.data <- gplot_data(def.stack$def16)
def.data <- gplot_data(def.stack$def17)
def.data <- gplot_data(def9395)
def.data <- gplot_data(def1012)
def.data <- gplot_data(def1517)
# What should the limits when plotting be?
min(def.data$value[is.finite(def.data$value)], na.rm =TRUE) # 1997: 02.85; 1998: -3.22; 2012: -2.54; 2017: -3.12
max(def.data$value[is.finite(def.data$value)], na.rm =TRUE) # 1997: 0.92; 1998: 1.75; 2012: 5.41; 2017: 3
# Turn hillshade raster into table (function defined in 00_setup)
# hill.data <- gplot_data(hill)
# Then do somethign with this:
# annotate(geom = 'raster', x = hill.data$x, y = hill.data$y,
# fill = scales::colour_ramp(c("light grey", "dark grey"))(hill.data$value),
# interpolate = TRUE) +
## For overlaying 2 rasters, use annotate and geom_raster to control both colors.
# ref re: plotting rasters in ggplot
# https://stackoverflow.com/questions/47116217/overlay-raster-layer-on-map-in-ggplot2-in-r
# Here, can turn on/off hillshade
# For pix min max, load this (from 05_spp_models_brt_pixel_track.R):
pixels <- read.csv("loc.pixels.csv")
rownames(pixels) <- c("pix.min.1012",
"pix.max.1012",
"pix.min.1517",
"pix.max.1517")
display.brewer.pal(8, "Dark2")
dev.off()
par(mfrow=c(1,1))
def.data <- gplot_data(def.stack$def15); yrlabel <- 2015; p15 <- ggplot() +
# def.data <- gplot_data(def.stack$def16); yrlabel <- 2016; p16 <- ggplot() +
# def.data <- gplot_data(def.stack$def17); yrlabel <- 2017; p17 <- ggplot() +
geom_raster(data = def.data, aes(x = x, y = y, fill = value), interpolate = TRUE) +
# geom_tile(data = def.data, aes(x = x, y = y, fill = value)) +
# geom_sf(data = nonIntWest.aea, color = "#808B96", fill = "white") +
# geom_sf(data = IntWsts.aea, color = "#808B96", fill = NA) +
geom_sf(data = nonIntWest, color = "#808B96", fill = "white") +
geom_sf(data = IntWsts, color = "#808B96", fill = NA) +
# geom_point(data = pixels["pix.min.1012",], aes(x=x, y=y), color = palette[5], size = 5) +
# geom_point(data = pixels["pix.max.1012",], aes(x=x, y=y), color = palette[3], size = 5) +
# geom_point(data = pixels["pix.min.1517",], aes(x=x, y=y), color = palette[1], size = 5) +
# geom_point(data = pixels["pix.max.1517",], aes(x=x, y=y), color = palette[4], size = 5) +
scale_fill_gradient2("CMD\nanomaly",
# low = palette[8], mid = "white", high = "#145adb", #high = palette[4],
# low = "#145adb", mid = "white", high = palette[2],
low = palette[3], mid = "white", high = palette[2],
midpoint = 0,
limits = c(-3.5,3.5), # 2015
# limits = c(-1,5.5), # 2016
# limits = c(13,19), # 2017
na.value = NA) +
# na.value = "#EAECEE")+ # sets background IntW states pale grey
coord_sf(xlim = c(-121, -100), ylim = c(30, 50), expand = FALSE) +
theme_bw(base_size = 18) +
# theme(panel.grid.major = element_line(color = "#808B96"), # blend lat/long into background
theme(panel.grid.major = element_blank(), # blend lat/long into background
panel.border = element_rect(fill = NA, color = "black", size = 0.5),
panel.background = element_rect(fill = "#EAECEE"),
axis.title = element_blank(),
legend.background = element_rect(fill = "white", color = "black", size = 0,5),
# legend.title = element_blank(),
legend.justification=c(0,0), # defines which side oflegend .position coords refer to
legend.position=c(0,0),
legend.text=element_text(size=10),
legend.title = element_text(size=12),
# plot.margin=unit(c(0.5,1.5,1.5,1.5),"cm")) + # top, right, bottom, left
plot.margin=unit(c(0.5,1.25,0.5,0.5),"cm")) + # top, right, bottom, left
# annotate("text", x = -120.5, y = 49.5, label = "2010-2012", hjust = 0)
annotate("text", x = -120.5, y = 49.5, label = paste0(yrlabel), hjust = 0) #+
# coord_equal()
# coord_map("albers",lat0=39, lat1=45)
dev.off()
p15
p16
p17
temp <- 2015
temp <- 2016
temp <- 2017
# pdf(paste0(out.dir, "def_map_", temp, "_", currentDate,".pdf"),
png(paste0(out.dir, "def_map_", temp, "_", currentDate,".png"),
# width = 6, height = 8, units = "cm", res = 300)
width = 475, height = 600, units = "px", pointsize = 12)
# width = 3, height = 4)
p15; dev.off()
p16; dev.off()
p17; dev.off()
#################################################non-ggplot maps#####################3
## Goal: project maps. Folks recommend ggplot-similar tmap.
# refs:
# https://geocompr.robinlovelace.net/adv-map.html
# see colors with this:
# tmaptools::palette_explorer()
# install.packages("tmap")
# install.packages("shinyjs")
library(tmap)
library(shinyjs)
# FIXME: cannot add coordaintes. crs still stuck in m and tm_grid shows as such.
# tm_graticules, which should add coords doesn't seem to exist anymore.
## Pick Albers equal area projection & transform all data.
aea.proj <- "+proj=aea +lat_1=29.5 +lat_2=45.5 +lat_0=37.5 +lon_0=-110 +x_0=0 +y_0=0 +ellps=GRS80 +datum=NAD83 +units=m"
IntWsts.aea <- st_transform(IntWsts, crs = aea.proj)
nonIntWest.aea <- st_transform(nonIntWest, crs = aea.proj)
def15.aea <- projectRaster(def.stack$def15, crs = aea.proj)
hill.aea <- projectRaster(hill, crs = aea.proj)
plot(st_geometry(IntWsts))
plot(st_geometry(IntWsts.aea))
plot(st_geometry(nonIntWest))
plot(st_geometry(nonIntWest.aea))
plot(def.stack$def15)
plot(def15.aea)
plot(hill.aea)
## I'll want to control bounding box of map.
# Use IntW, expanded slightly.
# https://www.jla-data.net/eng/adjusting-bounding-box-of-a-tmap-map/
(bbox <- st_bbox(IntWsts.aea))
bbox_new <- bbox
bbox_new[1] <- (bbox[1] - 20000) #xmin
bbox_new[3] <- (bbox[3] + 20000) #xmas
bbox_new[2] <- (bbox[2] - 20000) #ymin
bbox_new[4] <- (bbox[4] + 20000) #ymax
bbox_new <- bbox_new %>% # take the bounding box ...
st_as_sfc() # ... and make it a sf polygon
## Create map. Cannot figure out how to get negative values on bottom in legend.
map <- # by default, set master to establish bbox, projection, etc (default = 1st raster)
tm_shape(IntWsts.aea, is.master = TRUE, bbox = bbox_new) + # master to rule bbox, proj
tm_fill("grey40") + # for holes in raster
# # add in hillshade for study area first with continuous grey gradient
# tm_shape(hill.aea) + tm_raster(palette = "Greys", style = "cont") +
# add in deficit values with reverse palette; may make transparent with alpha
tm_shape(def15.aea) + tm_raster(palette = "-RdYlBu",
style = "cont",
title = "CMD\nanomaly") +#, alpha = 0.85) +
# add in non-Interior West states with light grey fill
tm_shape(nonIntWest.aea) + tm_borders(lwd=1.5) + tm_fill("gray90") +
# add in Interior West states with no fill
tm_shape(IntWsts.aea) + tm_borders(lwd=1.5) +
tm_layout(legend.show = TRUE,
legend.position = c(0.01, 0.01),
legend.bg.color = "white",
legend.title.size = 0.8,
legend.text.size = 0.6,
legend.frame = TRUE) ; map
## Save as pdf by version
# v <- 1
pdf(paste0(out.dir, "def_map_2015_v",v, "_", currentDate,".pdf"),
width = 3, height = 4) ; v <- v+1
map
dev.off()
# Coordinates? tm_graticules() no longer seems to exist. Can't figure out lat/long.
# https://geocompr.github.io/post/2019/tmap-grid/
#################################################STUDY SITES#####################3
## map of study sites
temp.pipo <- data.pipo %>% dplyr::select(UNIQUEID, LAT_FS, LON_FS) %>%
rename(x = LON_FS, y = LAT_FS) %>%
mutate(pipo = "pipo")
temp.psme <- data.psme %>% dplyr::select(UNIQUEID, LAT_FS, LON_FS) %>%
rename(x = LON_FS, y = LAT_FS) %>%
mutate(psme = "psme")
temp <- full_join(temp.pipo, temp.psme, by = c("UNIQUEID", "x", "y")) %>%
mutate(sp = ifelse(is.na(pipo), "ponderosa", ifelse(is.na(psme), "Douglas-fir", "both species")))
# Order so "both" are plotted on top
temp <- arrange(temp, desc(sp))
# dummy raster to cover up coordinate lines; plot this as single color raster
# dummy <- def.data %>% dplyr::select(x, y, value)
# dummy$value <- ifelse(is.na(dummy$value, 1, NA))
# ^ nevermind. unnecessary if panel.grid.major= element_blank()
display.brewer.pal(8, "Dark2")
dev.off()
par(mfrow=c(1,1))
p <- ggplot() +
# geom_raster(data = dummy, aes(x = x, y = y, fill = value), interpolate = TRUE) +
scale_fill_gradient(low = "#EAECEE", high = "#EAECEE", na.value ="#EAECEE", guide = FALSE) +
geom_sf(data = nonIntWest, color = "#808B96", fill = "white") +
geom_sf(data = IntWsts, color = "#808B96", fill = NA) +
# geom_sf(data = IntWsts, color = "#808B96", fill = "#EAECEE", na.value = NA) +
geom_point(data = temp, aes(x=x, y=y, color = sp), size = 3, alpha = 0.5) +
scale_color_manual("FIA plots used", values = c(palette[1], palette[2], palette[3])) +
coord_sf(xlim = c(-121, -100), ylim = c(30, 50), expand = FALSE) +
theme_bw(base_size = 12) +
theme(panel.grid.major = element_blank(), # blend lat/long into background
panel.border = element_rect(fill = NA, color = "black", size = 0.5),
panel.background = element_rect(fill = "#EAECEE"),
axis.title = element_blank(),
legend.background = element_rect(fill = "white", color = "black", size = 0,5),
# legend.title = element_blank(),
legend.justification=c(0,0), # defines which side oflegend .position coords refer to
legend.position=c(0,0),
legend.text=element_text(size=10),
legend.title = element_text(size=12),
plot.margin=unit(c(0.5,1.25,0.5,0.5),"cm")) # top, right, bottom, left
dev.off()
p
png(paste0(out.dir,"FIA_plots_used_",currentDate,".png"),
width = 475, height = 600, units = "px")
pdf(paste0(out.dir,"FIA_plots_used_",currentDate,".pdf"),
width = 3, height = 5)
p # preview it then save as pdf 8x7
dev.off()
########################################ENVI AMPLITUDE#####################
## What's the envi amplidue over which pipo optimum (14-19 degrees C) occurs?
p <- plot_ly(data.pipo, x = ~tmax.tc, y = ~LAT_FS, z = ~ELEV, color = ~tmax.tc) %>%
add_markers() %>%
layout(scene = list(xaxis = list(title = 'TMAX'),
yaxis = list(title = 'LAT'),
zaxis = list(title = 'ELEV')))
p
temp <- data.pipo %>% filter(tmax.tc >14 & tmax.tc <19)
min(temp$LAT_FS[temp$tmax.tc >18]) # 32.45029
max(temp$LAT_FS[temp$tmax.tc <15]) # 47.70303
min(temp$ELEV[temp$tmax.tc >18]) # 6248
max(temp$ELEV[temp$tmax.tc <15]) # 9143
|
ac4573c070faf4a1bcf40f3bceed3e94870c9456 | 9cf8c603d52ed07b978d7b4dc65adac2fb898a25 | /man/speed2run.Rd | aadc4dfa649ed03052c958b6b5a63e8455b36989 | [] | no_license | molsysbio/speed2 | 876f95868ba90de24f5d3b3f9c9660ebaaa9f481 | e28311ee3033e57590b6a0089219090186983a6d | refs/heads/master | 2021-04-16T09:25:04.612340 | 2020-04-26T05:44:36 | 2020-04-26T05:44:36 | 249,345,480 | 6 | 3 | null | null | null | null | UTF-8 | R | false | true | 1,489 | rd | speed2run.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/speed2.R
\name{speed2run}
\alias{speed2run}
\title{Test pathway gene signature enrichment using SPEED2}
\usage{
speed2run(genes, background_genes = c(), shuffle = F, custom_signatures = NULL)
}
\arguments{
\item{genes}{list of genes in Entrez or gene symbols format to test
for enrichment in SPEED2. Maximum 500 genes.}
\item{background_genes}{(optional) list of background genes in Entrez or gene
symbol from which \code{genes} were selected. If not provided, the full
set of SPEED2 genes are used.}
\item{shuffle}{(optional) shuffle identities of genes in SPEED2, for control
experiments.}
\item{custom_signatures}{(optional, advanced) user provided custom pathway
gene signatures to use instead of SPEED2, provided as a tibble with the
following columns: p_id (pathway id), g_id (gene id), zrank_signed_mean
(average normalized z score across many experiments, between -1 and +1),
P.bates (p-value associated with zrank_signed_mean). Users are expected
to handle Entrez conversion and background gene list themselves.}
}
\value{
List with four items: \code{df_stattest} a tibble with enrichment
scores, \code{df_rankcoords} coordinates for GSEA plot (see publication),
and two lists with unmatched items in \code{genes} and
\code{background_genes}.
}
\description{
Test pathway gene signature enrichment using SPEED2
}
\examples{
ret = speed2run(genes=speed2:::speed2_signatures$g_id[1:50])
}
|
522a87e37308903778ff8570e60ba3fb379cc9ca | 536c0ec981961c33343d13eb339963f6c0274be9 | /run_analysis.R | 228ccb02b621dcd92b0ecd998197aa0a79b9ce0b | [] | no_license | alanc1988/getting_and_cleaning_data | d814bc5bd1f5b6ac5966c6f322b69b15a81c277c | ca54ee6fd4e6fe4283ca465a160f7ba0d87e8700 | refs/heads/master | 2021-01-23T11:34:49.424346 | 2014-07-27T20:07:16 | 2014-07-27T20:07:16 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,730 | r | run_analysis.R | ####################################################################################################################
## PROJECT TASK
####################################################################################################################
# You should create one R script called run_analysis.R that does the following.
# Merges the training and the test sets to create one data set.
# Extracts only the measurements on the mean and standard deviation for each measurement.
# Uses descriptive activity names to name the activities in the data set
# Appropriately labels the data set with descriptive variable names.
# Creates a second, independent tidy data set with the average of each variable for each activity and each subject.
####################################################################################################################
rm(list=ls())
cat("\014")
library(Hmisc);
setwd("~/../Desktop/Getting and Cleaning Data/UCI HAR Dataset")
options(warn=-1)
print("starting run_analysis.R")
print("loading training and test data sets into memory...")
X_train <- read.table("train/X_train.txt")
y_train <- read.table("train/y_train.txt")
subject_train <- read.table("train/subject_train.txt")
X_test <- read.table("test/X_test.txt")
y_test <- read.table("test/y_test.txt")
subject_test <- read.table("test/subject_test.txt")
print("data loaded, re-formatting...")
dist_subj <- unique(tmp <- rbind(subject_train,subject_test))$subject
no_subj <- nrow(tmp)
activity_labels <- read.csv("activity_labels.txt", sep="", header=FALSE)
features <- read.table("features.txt")[,2]
names(activity_labels) <- c("id", "activity")
no_act <- nrow(activity_labels)
names(subject_test) <- names(subject_train) <- "subject"
names(X_test) <- names(X_train) <- features
names(y_test) <- names(y_train) <- "activity"
X_train <- X_train[,grepl("mean|std", features)]
X_test <- X_test[,grepl("mean|std", features)]
y_train[,1] <- sapply(y_train[,1],function(x){subset(activity_labels,id == x)$activity;})
y_test[,1] <- sapply(y_test[,1],function(x){subset(activity_labels,id == x)$activity;})
print("constructing table...")
train <- cbind(as.data.frame(subject_train), y_train, X_train)
test <- cbind(as.data.frame(subject_test), y_test, X_test)
no_cols <- length(cmb_data <- rbind(test, train))
cmb_data$activity <- as.factor(cmb_data$activity)
cmb_data$subject <- as.factor(cmb_data$subject)
i = 1
for (j in activity_labels$activity) {
cmb_data$activity <- gsub(i, j, cmb_data$activity)
inc(i) <- 1
}
output <- aggregate(cmb_data, by=list(activity <- cmb_data$activity, subject <- cmb_data$subject), mean)
print("writing table out to disk")
write.table(output, "output.txt")
print("run complete. All is well")
|
bd709d5f234f1da415088d0e6c115b2aa5094b9e | c1a2389bad4647a37b28a34a46266cf4b3a970c4 | /src/pws_areas.R | 2c922da4c66c4c46c460ad9bd2fcbf216d3808ef | [] | no_license | ucd-cwee/water-geo | 2e5798e6e70cd3d86df71fe6b30a6f42b889ceb4 | 8d0bf9c8a0d4636382a5ba87496fc3b1e13e4fd8 | refs/heads/master | 2021-04-09T10:51:13.602707 | 2018-09-24T22:28:24 | 2018-09-24T22:28:24 | 125,431,520 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 633 | r | pws_areas.R |
library(tidyverse)
library(httr)
library(sf)
library(units)
# download data ----------------------------------------------------------------
# California Water Service Areas
source("src/get_data/cal_water_service_areas.R")
cal_water_sa <- st_read("data/water_service_areas/service_areas_valid.shp")
# Calculate areas --------------------------------------------------------------
cal_water_sa_sqkm <- cal_water_sa %>%
group_by(pwsid, pws_name = name) %>%
summarise() %>%
ungroup() %>%
mutate(sq_km = set_units(st_area(geometry), km^2)) %>%
st_set_geometry(NULL)
write_csv(cal_water_sa_sqkm, "data/pws_sqkm.csv")
|
8abfe2b1588983efdbd9fbdc1f710e53cd39bbee | 530d2ac1c29a5b939e889efaa5ff1ce88f21e5a3 | /dplyr_practice.R | b105244136b7945751cd9e6d42e3e308547a7bc6 | [] | no_license | uw-ischool-info-201a-2019-autumn/bxie-lab-04 | fc2c8bb1590ab6ff1f8d1e0fe0f2fda690d3bc7a | 21337424f54ec38ed6e5058906af08c712b0d693 | refs/heads/master | 2020-08-23T13:02:53.578757 | 2019-10-21T22:06:14 | 2019-10-21T22:06:14 | 216,622,418 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 2,510 | r | dplyr_practice.R | # dplyr practice with "Seatbelts" data
# objective: Get practice with common dplyr functions
# and familiarize with dplyr workflow
################################################
# STEP 1: install and load dplyr
################################################
# install package only if don't have
if(!require(dplyr)){install.packages("dplyr")}
library(dplyr)
################################################
# STEP 2: Load dataset
################################################
# TODO: Go to Kaggle link (included below) and do the following:
# What is the dataset about? When and where was this data collected?
# What is a row?
# What is a column? Which 2 columns store data about alcohol consumption?
# How might somebody want to use this data?
# Link to dataset: https://www.kaggle.com/uciml/student-alcohol-consumption
# TODO: Download the data (you'll need to create an account) and store
# "student-mat.csv" in the `data/` directory. (Ignore the other files)
# TODO: read the data from data/student-mat.csv into a variable `df`
# handle strings so they are not factors
# You may need to set the working directory first
# TODO: View the dataset. Is it what you expect? Is there missing data?
################################################
# STEP 3: Analyze data
################################################
# TODO: select columns related to age, address, weekday consumption,
# weekend consumption, and number of absences. Store these 5 columns in
# a variable named `df_select`.
# TODO: filter dataframe to only get students in rural areas and store in
# variable `df_rural`. How many responses are from students in rural areas?
# TODO: Use mutate() the dataframe df to include a new column "total_alc" which is
# the sum of weekday and weekend consumption ratings.
# Be sure to update `df` to include this column
# TODO: arrange() student responses from lowest to highest by total consumption
# View the results
# TODO: arrange() student responses from oldest to youngest
# Tip: add a `-` before a column name to sort it in descending order
# TODO: Use summarize() to get average/mean absences (as variable `avg_absences``)
# and median age (as variable `median_age`)
# TODO: Use group_by() to Group students by sex and age
# then use summarize to get the following summary information:
# - mean_alc: mean total alcohol rating
# - mean_absences: mean number of absences
# - frequency: number of responses in that group (using function `n()`)
# View the results
|
02043948433bb01ad7859005d56f827a13c6dd39 | 1325a43cd89cb9c35b8bda36dff84698bccb1808 | /plot4.R | e03f760e482e9f61b056abaa6ce36fb3dcccab02 | [] | no_license | MicheleVNG/exploratory-data-analysis-final | 4bcc97eb9225bfa0b1fd3b9b405da96fec49bcc0 | 769f610218548abf28e0640a61bcbd1454c3c510 | refs/heads/master | 2021-01-11T14:31:01.159812 | 2017-02-11T11:50:20 | 2017-02-11T11:50:20 | 80,150,078 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 797 | r | plot4.R | # Read the data
NEI <- readRDS("data/summarySCC_PM25.rds")
SCC <- readRDS("data/Source_Classification_Code.rds")
# Plot 4
# Across the United States, how have emissions from coal combustion-related
# sources changed from 1999–2008?
library(dplyr)
library(ggplot2)
SCC <- SCC[, c("SCC", "Short.Name")]
graphData <- NEI[, c("SCC", "Emissions", "year")]
graphData <- merge(graphData, SCC, by = "SCC")
graphData <- graphData %>% subset(grepl("Comb /|Coal", graphData$Short.Name))
graphData$Emissions <- graphData$Emissions / 1000
png("plot4.png")
g <- ggplot(data = graphData, mapping = aes(as.factor(year), Emissions))
g + geom_col() +
labs(x = "Year", y = "Total Emissions (thousands of tons)",
title = "Emissions from coal combustion-related\nsources in the United States")
dev.off() |
6df8e751ed2fa7a57e83c6a51a0951f615599e0b | 9892013459fd70fa37a79bbcaca7f8669e5209c4 | /Input/checkload_input.R | a12d9305ab8266c76145e7b3c392386658b48d49 | [
"LicenseRef-scancode-warranty-disclaimer"
] | no_license | cybeco/cybersecandcyberinsurance | 5b24cd89f3966ffa2c333188a93c5f652ac7a408 | fa91a0aa4c45160c64e2595e2e8c8f5f389eb3d9 | refs/heads/master | 2022-11-20T21:20:14.248877 | 2020-07-09T08:00:18 | 2020-07-09T08:00:18 | 277,143,135 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 19,860 | r | checkload_input.R | #! Rscript checkload_input.R --
#### THE SCRIPT CHECKS AND LOADS THE INPUT ----
# For each of the input parameters provided by the Toolbox,
# the script does the following:
# 1 - Check whether the input values have valid values.
# 2 - Load this input as variables in the R environment.
# This is done with the "checkload" functions definded bellow.
#### THE SCRIPT DEFINES FUNCTIONS FOR CHECKING AND LOADING THE INPUT
checkLoadBooleanToLogical <- function(input) {
ifelse(input %in% c(0,1),
input,
stop(substitute(input)," is not valid"))
if (input == 0) {
FALSE
} else if (input == 1 ) {
TRUE
} else {
stop(substitute(input)," cannot be loaded")
}
}
checkLoadIntegerListToLogical <- function(input, integer_vector, no_value) {
ifelse(input %in% integer_vector,
input,
stop(substitute(input)," is not valid"))
ifelse(no_value %in% integer_vector,
no_value,
stop("error in integer vector or no_value"))
if (input == no_value) {
FALSE
} else if (input %in% integer_vector ) {
TRUE
} else {
stop(substitute(input)," cannot be loaded")
}
}
checkLoadIntegerListToFactor <- function(input, integer_vector, factor_vector) {
ifelse(input %in% integer_vector,
input,
stop(substitute(input)," is not valid"))
ifelse(length(factor_vector) == length(integer_vector) &
! as.logical(anyDuplicated(integer_vector)) &
! as.logical(anyDuplicated(factor_vector)),
factor_vector[which(integer_vector == input)],
stop("error in integer vector or factor vector"))
}
checkLoadNonNegInteger <- function(input) {
ifelse(is.numeric(input) &
(input %in% c(-1) |
input >= 0),
input,
stop(substitute(input)," is not valid"))
if(input == -1) {input <- 0}
floor(input)
}
checkLoadPercentage <- function(input) {
ifelse(is.numeric(input) &
(input %in% c(-1) |
(input >= 0 & input <= 1)),
input,
stop(substitute(input)," is not valid"))
if(input == -1) {input <- 0}
round(input,4)
}
checkLoadNumeric <- function(input) {
ifelse(is.numeric(input),
input,
stop(substitute(input)," is not valid"))
input
}
#### THE SCRIPT CHECKS THE INPUT AND LOADS IT IN THE R ENVIRONMENT ----
# expert_mode <- checkLoadBooleanToLogical(input_expert)
exact_statistics <- checkLoadBooleanToLogical(input_exact_statistics)
feature_company_type <- checkLoadIntegerListToFactor(input_company,
c(1),
c(1))
# Assets
asset_facilities_included <- checkLoadBooleanToLogical(input_facilities)
asset_facilities_money <- checkLoadNonNegInteger(input_facilities_value)
asset_it_included <- checkLoadBooleanToLogical(input_it_infrastructure)
asset_num_computers <- checkLoadNonNegInteger(input_computers)
asset_num_servers <- checkLoadNonNegInteger(input_servers)
asset_pii_included <- checkLoadBooleanToLogical(input_personal_information)
asset_pii_num_records <- checkLoadNonNegInteger(input_personal_information_records)
asset_pii_num_records_business <- checkLoadNonNegInteger(input_personal_information_records_business)
# Features
feature_turnover_included <- checkLoadBooleanToLogical(input_turnover)
feature_turnover_money <- checkLoadNonNegInteger(input_turnover_value)
feature_employees_included <- checkLoadBooleanToLogical(input_employees)
feature_employees_num <- checkLoadNonNegInteger(input_employees_number)
# Impacts
impacts_to_equipment_included <- checkLoadBooleanToLogical(input_impacts_to_equipment)
impacts_to_market_share_included <- checkLoadBooleanToLogical(input_impacts_to_market_share)
impacts_to_availability_included <- checkLoadBooleanToLogical(input_impacts_to_availability)
impacts_to_records_exposed_included <- checkLoadBooleanToLogical(input_impacts_to_records_exposed)
impacts_to_business_info_included <- checkLoadBooleanToLogical(input_impacts_to_business_info)
impacts_postincident_costs_included <- checkLoadBooleanToLogical(input_recovery)
### NON-INTENTIONAL THREATS ###
# Environmental threats #
# Fire
envthreat_fire_included <- checkLoadBooleanToLogical(input_threat_fire)
# Flood
envthreat_flood_included <- checkLoadBooleanToLogical(input_threat_flood)
# Accidental threats #
# Employee error
accthreat_employee_error_included <- checkLoadBooleanToLogical(input_threat_employee_error)
# Misconfiguration
accthreat_misconfiguration_included <- checkLoadBooleanToLogical(input_threat_misconfiguration)
# Non-targeted threats #
# Computer virus
ntathreat_virus_included <- checkLoadBooleanToLogical(input_nontarg_threat_virus)
# Ransomware
ntathreat_ransomware_included <- checkLoadBooleanToLogical(input_nontarg_threat_ransomware)
### INTENTIONAL THREATS ###
# DDoS
tarthreat_dos_included <- checkLoadBooleanToLogical(input_targ_threat_dos)
# Data maniputation
tarthreat_dataman_included <- checkLoadBooleanToLogical(input_targ_threat_data_manipulation)
# Social engineering attack (include data exfiltration pii and business records)
tarthreat_social_enginerring_included <- checkLoadBooleanToLogical(input_targ_threat_social_engineering)
# Data exfiltration
tarthreat_dataexf_included <- checkLoadBooleanToLogical(input_targ_threat_data_exfiltration)
# Data business exfiltration
tarthreat_dataexf_business_included <- checkLoadBooleanToLogical(input_targ_threat_data_business_exfiltration)
# Targeted malware
tarthreat_malware_included <- checkLoadBooleanToLogical(input_targ_threat_malware)
### ACTORS ###
# Competitor (COMPEET)
thactor_competitor_included <- checkLoadIntegerListToLogical(input_actor_competitor, c(1,4),4)
# Hacktivist (ANTONYMOUS)
thactor_hacktivists_included <- checkLoadIntegerListToLogical(input_actor_hacktivist,c(1,4),4)
# Hacktivist (ANTONYMOUS) likelihood
thactor_hacktivists_likelihood <- checkLoadIntegerListToFactor(input_actor_hacktivist, c(1,4), c(0,1))
# Cybercriminals (CYBEGANSTA)
thactor_cybercriminal_included <- checkLoadIntegerListToLogical(input_actor_cyber_criminal,c(1,4),4)
# Cybercriminals (CYBEGANSTA) likelihood
thactor_cybercriminals_likelihood <- checkLoadIntegerListToFactor(input_actor_cyber_criminal, c(1,4), c(0,1))
# Modern Republic (MR)
thactor_mr_included <- checkLoadIntegerListToLogical(input_actor_mr, c(1,4),4)
checkLoadBooleanToLogical(input_technical_gateways)
checkLoadBooleanToLogical(input_technical_gateways_compliance)
checkLoadBooleanToLogical(input_technical_gateways_implementation)
# Sprk (Fire protection)
techctrl_sprk_protection_options <- if (input_technical_sprk_protection == 1 &
input_technical_sprk_protection_compliance == 1 ) {
c(1)
} else if (input_technical_sprk_protection == 1 &
input_technical_sprk_protection_compliance == 0) {
c(0,1)
} else if (input_technical_sprk_protection == 0 ) {
c(0)
} else {
stop("security control input error in sprk protection")
}
# Sprk (Fire protection capex)
techctrl_sprk_protection_capex <- if (input_technical_sprk_protection_implementation == 0 ) {
checkLoadNonNegInteger(input_technical_sprk_protection_capex)
} else if (input_technical_sprk_protection_implementation == 1 ) {
0
} else {
stop("security control input error in sprk protection capex")
}
# Firewall
techctrl_fwallgways_options <- if (input_technical_gateways == 1 &
input_technical_gateways_compliance == 1 ) {
c(1)
} else if (input_technical_gateways == 1 &
input_technical_gateways_compliance == 0) {
c(0,1)
} else if (input_technical_gateways == 0 ) {
c(0)
} else {
stop("security control input is not defined correctly")
}
# Firewall capex
techctrl_fwallgways_capex <- if (input_technical_gateways_implementation == 0 ) {
checkLoadNonNegInteger(input_technical_gateways_capex)
} else if (input_technical_gateways_implementation == 1 ) {
0
} else {
stop("security control input is not defined correctly")
}
# Firewall opex
# techctrl_fwallgways_opex <- checkLoadNonNegInteger(input_technical_gateways_opex) # no sé si se usa
checkLoadBooleanToLogical(input_technical_fd)
checkLoadBooleanToLogical(input_technical_fd_compliance)
checkLoadBooleanToLogical(input_technical_fd_implementation)
# FD (Flood doors)
techctrl_fd_options <- if (input_technical_fd == 1 & input_technical_fd_compliance == 1 ) {
c(1)
} else if (input_technical_fd == 1 & input_technical_fd_compliance == 0) {
c(0,1)
} else if (input_technical_fd == 0 ) {
c(0)
} else {
stop("security control error fd")
}
# FD (Flood doors) capex
techctrl_fd_capex <- if (input_technical_fd_implementation == 0 ) {
checkLoadNonNegInteger(input_technical_fd_capex)
} else if (input_technical_fd_implementation == 1 ) {
0
} else {
stop("security control error fd capex")
}
checkLoadBooleanToLogical(input_technical_ddos_prot)
checkLoadBooleanToLogical(input_technical_ddos_prot_compliance)
checkLoadBooleanToLogical(input_technical_ddos_prot_implementation)
# DDoS protection
techctrl_ddos_prot_options <- if (input_technical_ddos_prot == 1 & input_technical_ddos_prot_compliance == 1 ) {
c(1)
} else if (input_technical_ddos_prot == 1 & input_technical_ddos_prot_compliance == 0) {
c(0,1)
} else if (input_technical_ddos_prot == 0 ) {
c(0)
} else {
stop("security control error ddos_prot")
}
# DDoS protection capex
techctrl_ddos_prot_capex <- if (input_technical_ddos_prot_implementation == 0 ) {
checkLoadNonNegInteger(input_technical_ddos_prot_capex)
} else if (input_technical_ddos_prot_implementation == 1 ) {
0
} else {
stop("security control error ddos_prot capex")
}
checkLoadBooleanToLogical(input_technical_configuration)
checkLoadBooleanToLogical(input_technical_configuration_compliance)
checkLoadBooleanToLogical(input_technical_configuration_implementation)
# Secconfig
techctrl_secconfig_options <- if (input_technical_configuration == 1 &
input_technical_configuration_compliance == 1 ) {
c(1)
} else if (input_technical_configuration == 1 &
input_technical_configuration_compliance == 0) {
c(0,1)
} else if (input_technical_configuration == 0 ) {
c(0)
} else {
stop("security control input error secconfig")
}
# Secconfig capex
techctrl_secconfig_capex <- if (input_technical_configuration_implementation == 0 ) {
checkLoadNonNegInteger(input_technical_configuration_capex)
} else if (input_technical_configuration_implementation == 1 ) {
0
} else {
stop("security control input error secconfig capex")
}
# Secconfig opex
# techctrl_secconfig_opex <- checkLoadNonNegInteger(input_technical_configuration_opex) # no sé si se usa
checkLoadBooleanToLogical(input_technical_access)
checkLoadBooleanToLogical(input_technical_access_compliance)
checkLoadBooleanToLogical(input_technical_access_implementation)
# Access control system (ACS)
techctrl_acctrl_options <- if (input_technical_access == 1 &
input_technical_access_compliance == 1 ) {
c(1)
} else if (input_technical_access == 1 &
input_technical_access_compliance == 0) {
c(0,1)
} else if (input_technical_access == 0 ) {
c(0)
} else {
stop("security control input error acs")
}
# Access control system (ACS) capex
techctrl_acctrl_capex <- if (input_technical_access_implementation == 0 ) {
checkLoadNonNegInteger(input_technical_access_capex)
} else if (input_technical_access_implementation == 1 ) {
0
} else {
stop("security control input error acs capex")
}
# Access control system (ACS) opex
# techctrl_acctrl_opex <- checkLoadNonNegInteger(input_technical_access_opex) # no sé si se usa
checkLoadBooleanToLogical(input_technical_malware)
checkLoadBooleanToLogical(input_technical_malware_compliance)
checkLoadBooleanToLogical(input_technical_malware_implementation)
# Malware protection
techctrl_malwprot_options <- if (input_technical_malware == 1 & input_technical_malware_compliance == 1 ) {
c(1)
} else if (input_technical_malware == 1 &
input_technical_malware_compliance == 0) {
c(0,1)
} else if (input_technical_malware == 0 ) {
c(0)
} else {
stop("security control input error malware")
}
# Malware protection capex
techctrl_malwprot_capex <- if (input_technical_malware_implementation == 0 ) {
checkLoadNonNegInteger(input_technical_malware_capex)
} else if (input_technical_malware_implementation == 1 ) {
0
} else {
stop("security control input error malware capex")
}
# Malware opex
# techctrl_malwprot_opex <- checkLoadNonNegInteger(input_technical_malware_opex) # no sé si se usa
checkLoadBooleanToLogical(input_non_technical_patch_vulnerability)
checkLoadBooleanToLogical(input_non_technical_patch_vulnerability_compliance)
checkLoadBooleanToLogical(input_non_technical_patch_vulnerability_implementation)
# Patch vulnerability management (PVM)
proctrl_patchvul_options <- if (input_non_technical_patch_vulnerability == 1 &
input_non_technical_patch_vulnerability_compliance == 1 ) {
c(1)
} else if (input_non_technical_patch_vulnerability == 1 &
input_non_technical_patch_vulnerability_compliance == 0) {
c(0,1)
} else if (input_non_technical_patch_vulnerability == 0 ) {
c(0)
} else {
stop("security control input is not defined correctly")
}
# Patch vulnerability management (PVM) capex
proctrl_patchvul_capex <- if (input_non_technical_patch_vulnerability_implementation == 0 ) {
checkLoadNonNegInteger(input_non_technical_patch_vulnerability_capex)
} else if (input_non_technical_patch_vulnerability_implementation == 1 ) {
0
} else {
stop("security control input is not defined correctly")
}
# Patch vulnerability management (PVM) opex
# proctrl_patchvul_opex <- checkLoadNonNegInteger(input_non_technical_patch_vulnerability_opex) # no sé si se usa
# checkLoadBooleanToLogical(input_physical_hazard_protection)
# checkLoadBooleanToLogical(input_physical_hazard_protection_compliance)
# checkLoadBooleanToLogical(input_physical_hazard_protection_implementation)
checkLoadBooleanToLogical(input_technical_ids)
checkLoadBooleanToLogical(input_technical_ids_compliance)
checkLoadBooleanToLogical(input_technical_ids_implementation)
# Intrussion Detection System (IDS)
techctrl_ids_options <- if (input_technical_ids == 1 & input_technical_ids_compliance == 1 ) {
c(1)
} else if (input_technical_ids == 1 & input_technical_ids_compliance == 0) {
c(0,1)
} else if (input_technical_ids == 0 ) {
c(0)
} else {
stop("security control error ids")
}
# IDS capex
techctrl_ids_capex <- if (input_technical_ids_implementation == 0 ) {
checkLoadNonNegInteger(input_technical_ids_capex)
} else if (input_technical_ids_implementation == 1 ) {
0
} else {
stop("security control error ids capex")
}
# Insurance products
# Conventional
checkLoadBooleanToLogical(input_insurance_conventional_equipment)
checkLoadBooleanToLogical(input_insurance_conventional_compliance)
checkLoadBooleanToLogical(input_insurance_conventional_implementation)
insurance_conventional_options <- if (input_insurance_conventional_equipment == 1 &&
input_insurance_conventional_compliance == 1 ) {
c(1)
} else if (input_insurance_conventional_equipment == 1 &&
input_insurance_conventional_compliance == 0) {
c(0,1)
} else if (input_insurance_conventional_equipment == 0 ) {
c(0)
} else {
stop("security control input error conventional insurance")
}
insurance_conventional_price <- checkLoadNonNegInteger(input_insurance_conventional_price)
insurance_conventional_equipment_coverage <- checkLoadPercentage(input_insurance_conventional_equipment_coverage)
# Cyber1
checkLoadBooleanToLogical(input_insurance_cyber1_market_share)
checkLoadBooleanToLogical(input_insurance_cyber1_exfiltration)
checkLoadBooleanToLogical(input_insurance_cyber1_business_info)
checkLoadBooleanToLogical(input_insurance_cyber1_compliance)
checkLoadBooleanToLogical(input_insurance_cyber1_implementation)
insurance_cyber1_options <- if (input_insurance_cyber1_market_share == 1 &&
input_insurance_cyber1_exfiltration == 1 &&
input_insurance_cyber1_business_info == 1 &&
input_insurance_cyber1_compliance == 1 ) {
c(1)
} else if (input_insurance_cyber1_market_share == 1 &&
input_insurance_cyber1_exfiltration == 1 &&
input_insurance_cyber1_business_info == 1 &&
input_insurance_cyber1_compliance == 0) {
c(0,1)
} else if (input_insurance_cyber1_market_share == 0 &&
input_insurance_cyber1_exfiltration == 0 &&
input_insurance_cyber1_business_info == 0 ) {
c(0)
} else {
stop("security control input error cyber1 insurance")
}
insurance_cyber1_price <- checkLoadNonNegInteger(input_insurance_cyber1_price)
insurance_cyber1_market_share_coverage <- checkLoadPercentage(input_insurance_cyber1_market_share_coverage)
insurance_cyber1_exfiltration_coverage <- checkLoadPercentage(input_insurance_cyber1_exfiltration_coverage)
insurance_cyber1_business_info_coverage <- checkLoadPercentage(input_insurance_cyber1_business_info_coverage)
# Cyber2
checkLoadBooleanToLogical(input_insurance_cyber2_market_share)
checkLoadBooleanToLogical(input_insurance_cyber2_availability)
checkLoadBooleanToLogical(input_insurance_cyber2_exfiltration)
checkLoadBooleanToLogical(input_insurance_cyber2_business_info)
checkLoadBooleanToLogical(input_insurance_cyber2_compliance)
checkLoadBooleanToLogical(input_insurance_cyber2_implementation)
insurance_cyber2_options <- if (input_insurance_cyber2_market_share == 1 &&
input_insurance_cyber2_availability == 1 &&
input_insurance_cyber2_exfiltration == 1 &&
input_insurance_cyber2_business_info == 1 &&
input_insurance_cyber2_compliance == 1) {
c(1)
} else if (input_insurance_cyber2_market_share == 1 &&
input_insurance_cyber2_availability == 1 &&
input_insurance_cyber2_exfiltration == 1 &&
input_insurance_cyber2_business_info == 1 &&
input_insurance_cyber2_compliance == 0) {
c(0,1)
} else if (input_insurance_cyber2_market_share == 0 &&
input_insurance_cyber2_availability == 0 &&
input_insurance_cyber2_exfiltration == 0 &&
input_insurance_cyber2_business_info == 0) {
c(0)
} else {
stop("security control input error cyber2 insurance")
}
insurance_cyber2_price <- checkLoadNonNegInteger(input_insurance_cyber2_price)
insurance_cyber2_market_share_coverage <- checkLoadPercentage(input_insurance_cyber2_market_share_coverage)
insurance_cyber2_availability_coverage <- checkLoadPercentage(input_insurance_cyber2_availability_coverage)
insurance_cyber2_exfiltration_coverage <- checkLoadPercentage(input_insurance_cyber2_exfiltration_coverage)
insurance_cyber2_business_info_coverage <- checkLoadPercentage(input_insurance_cyber2_business_info_coverage)
# Constraint budget
constraint_budget_included <- checkLoadIntegerListToLogical(input_budget, c(1,3),3)
# Constraint budget type
constraint_budget_type <- checkLoadIntegerListToFactor(input_budget, c(1,3), c(1,3))
# Constraint budget money
constraint_budget_money <- checkLoadNonNegInteger(input_budget_total_value)
# Utility defender rho
# utility_defender_rho <- checkLoadNumeric(input_utility_defender_rho)
# Utility defender coef
# utility_defender_coef_exp <- checkLoadNumeric(input_utility_defender_coef_exp)
utility_rho <- checkLoadNumeric(rho)
utility_coef_exp<- checkLoadNumeric(coef_exp)
# Cybersecurity team hourly rate
cybersecurity_team_hourly_rate <- checkLoadNumeric(input_cybersecurity_team_hourly_rate)
# Fines
fines <- checkLoadNonNegInteger(input_fines) |
8f2b1882d57b1c485461c1a8602692b665ae6553 | 0500ba15e741ce1c84bfd397f0f3b43af8cb5ffb | /cran/paws.compute/man/imagebuilder_create_infrastructure_configuration.Rd | 7098ebcf64b4c2621d9ba4e8b737416b3af986eb | [
"Apache-2.0"
] | permissive | paws-r/paws | 196d42a2b9aca0e551a51ea5e6f34daca739591b | a689da2aee079391e100060524f6b973130f4e40 | refs/heads/main | 2023-08-18T00:33:48.538539 | 2023-08-09T09:31:24 | 2023-08-09T09:31:24 | 154,419,943 | 293 | 45 | NOASSERTION | 2023-09-14T15:31:32 | 2018-10-24T01:28:47 | R | UTF-8 | R | false | true | 2,895 | rd | imagebuilder_create_infrastructure_configuration.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/imagebuilder_operations.R
\name{imagebuilder_create_infrastructure_configuration}
\alias{imagebuilder_create_infrastructure_configuration}
\title{Creates a new infrastructure configuration}
\usage{
imagebuilder_create_infrastructure_configuration(
name,
description = NULL,
instanceTypes = NULL,
instanceProfileName,
securityGroupIds = NULL,
subnetId = NULL,
logging = NULL,
keyPair = NULL,
terminateInstanceOnFailure = NULL,
snsTopicArn = NULL,
resourceTags = NULL,
instanceMetadataOptions = NULL,
tags = NULL,
clientToken
)
}
\arguments{
\item{name}{[required] The name of the infrastructure configuration.}
\item{description}{The description of the infrastructure configuration.}
\item{instanceTypes}{The instance types of the infrastructure configuration. You can specify
one or more instance types to use for this build. The service will pick
one of these instance types based on availability.}
\item{instanceProfileName}{[required] The instance profile to associate with the instance used to customize
your Amazon EC2 AMI.}
\item{securityGroupIds}{The security group IDs to associate with the instance used to customize
your Amazon EC2 AMI.}
\item{subnetId}{The subnet ID in which to place the instance used to customize your
Amazon EC2 AMI.}
\item{logging}{The logging configuration of the infrastructure configuration.}
\item{keyPair}{The key pair of the infrastructure configuration. You can use this to
log on to and debug the instance used to create your image.}
\item{terminateInstanceOnFailure}{The terminate instance on failure setting of the infrastructure
configuration. Set to false if you want Image Builder to retain the
instance used to configure your AMI if the build or test phase of your
workflow fails.}
\item{snsTopicArn}{The Amazon Resource Name (ARN) for the SNS topic to which we send image
build event notifications.
EC2 Image Builder is unable to send notifications to SNS topics that are
encrypted using keys from other accounts. The key that is used to
encrypt the SNS topic must reside in the account that the Image Builder
service runs under.}
\item{resourceTags}{The tags attached to the resource created by Image Builder.}
\item{instanceMetadataOptions}{The instance metadata options that you can set for the HTTP requests
that pipeline builds use to launch EC2 build and test instances.}
\item{tags}{The tags of the infrastructure configuration.}
\item{clientToken}{[required] The idempotency token used to make this request idempotent.}
}
\description{
Creates a new infrastructure configuration. An infrastructure configuration defines the environment in which your image will be built and tested.
See \url{https://www.paws-r-sdk.com/docs/imagebuilder_create_infrastructure_configuration/} for full documentation.
}
\keyword{internal}
|
59dafc113a32779dad74f3e509353538e51ccbb2 | a1c8a2a809e397237b184eac8972d36753156165 | /1Voloskov124_Zad1.R | f9964f3ec54ec335985e0a266f83fd675b2be452 | [] | no_license | VoloskovAlex/MathMod | c2c8333821c75a6d5965a47057d086681a89019e | 3291a37e1a77a90ae0da9c95a3610aef6fba779a | refs/heads/master | 2023-04-10T19:29:07.371391 | 2021-04-22T10:45:47 | 2021-04-22T10:45:47 | 355,607,492 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,298 | r | 1Voloskov124_Zad1.R | #ЗАДАНИЕ 1: для региона 79 - Еврейская АО рассчитайте урожайность пшеницы в 2013 году,
#взяв для рассчета средние суммы активных температур за текущий год,
#с 14 ближайших метеостанций но убирая из рассчета активных температур дни с температурой выше 30 градусов
# Столица Биробиджан latitude = 48.7928, longitude = 132.924
#Установка рабочей директории
rm(list=ls())
setwd("D:/R_voloskov/MathMod")
getwd()
#Выбираем пакеты
library (tidyverse)
library(rnoaa)
library(lubridate)
# устанавливаем список метеостанций
station_data = ghcnd_stations()
write.csv(station_data,file = "station_data.csv")
station_data=read.csv("station_data.csv")
#После получения списка всех станций, получаем список станций ближайших
# к столице нашего региона,
#создав таблицу с именем региона и координатами его столицы
Birobijan = data.frame(id="Birobijan", latitude = 48.7928, longitude = 132.924)
Birobijan_around = meteo_nearby_stations(lat_lon_df = Birobijan,
station_data = station_data,
limit = 14,
var=c("TAVG"),
year_min = 2012, year_max = 2014)
#Birobijan_around это список единственным элементом которого является таблица, содержащая идентификаторы метеостанций отсортированных по их
# удалленности от столицы, первым элементом таблицы будет идентификатор метеостанции Биробиджана, получим его
Birobijan_id=Birobijan_around[["Birobijan"]][["id"]][1]
summary(Birobijan_id)
# для получения таблицы со всеми метеостанциями вокруг столицы
# необходимо выбрать целиком первый объект из списка
Birobijan_table=Birobijan_around[[1]]
summary(Birobijan_table)
# в таблице Birobijan_table оказалось 14 объектов, ранжированных по расстоянию от столицы
#сформируем список необходимых станций
Birobijan_stations=Birobijan_table
str(Birobijan_stations)
# список содержит 14 метеостанций расположенных вблизи Биробиджана выведем индетификаторы отфильрованных метеостанций
Birobijan_stations$id
# скачаем погодые данных для наших метеостанций
# чтобы получить все данные с 1 метеостанции используем команду meteo_tidy_ghcnd
all_Birobijan_data=meteo_tidy_ghcnd(stationid = Birobijan_id)
summary(all_Birobijan_data)
# создать цикл, в котором бы скачивались нужные данные для всех метеостанций
# cоздадим объект, куда скачаем все данные всех метеостанций
all_Birobijan_meteodata = data.frame()
# создаем цикл для наших 14 метеостанций
stations_names=Birobijan_stations$id
stations_names=stations_names[1:14]
for (sname in stations_names)
{ one_meteo=meteo_tidy_ghcnd( stationid = sname,
date_min = "2013-01-01",
date_max = "2013-12-31")
station_vars=names(one_meteo)
if (!("tavg" %in% station_vars)){
if(!("tmax"%in% station_vars)){
next()
}
one_meteo=one_meteo %>% mutate(tavg=(tmax+tmin)/2)}
one_meteo=one_meteo %>% select(id,date,tavg)
one_meteo = one_meteo %>% mutate(tavg=tavg/10)
all_Birobijan_meteodata=rbind(all_Birobijan_meteodata, one_meteo)}
# записываем полученные результаты
write.csv(all_Birobijan_meteodata,"all_Birobijan_meteodata.csv")
# считываем данные
all_Birobijan_meteodata=read.csv("all_Birobijan_meteodata.csv")
str(all_Birobijan_meteodata)
# добавим год, месяц, день
all_Birobijan_meteodata=all_Birobijan_meteodata %>% mutate(year=year(date),
month=month(date),
day=day(date))
# превратим NA в 0 и где tavg<5 и tavg>30
all_Birobijan_meteodata[is.na(all_Birobijan_meteodata$tavg),"tavg"] = 0
all_Birobijan_meteodata[all_Birobijan_meteodata$tavg<5, "tavg"] = 0
all_Birobijan_meteodata[all_Birobijan_meteodata$tavg>30, "tavg"] = 0
summary(all_Birobijan_meteodata)
# сгруппируем метеостанции по id, месяцам и проссумируем темперетатуру
# по этим группам, затем сгурппируем данные по месяцам и найдем среднее по месяцам для всех метеостанций
group_meteodata =all_Birobijan_meteodata %>% group_by(id,year,month)
sumT_group_meteodata = group_meteodata %>% summarise(tsum=sum(tavg))
groups_month=sumT_group_meteodata%>%group_by(month)
sumT_month=groups_month%>%summarise(St=mean(tsum))
# Подготовка к расчету по формуле Урожая ##
# Ввод констант
afi = c(0.000,0.000,0.000,32.110,26.310,25.640,23.200,18.730,16.300,13.830,0.000,0.000)
bfi = c(0.000,0.000,0.000,11.300,9.260,9.030,8.160,6.590,5.730,4.870,0.000,0.000)
di = c(0.000,0.000,0.000,0.330,1.000,1.000,1.000,0.320,0.000,0.000,0.000,0.000)
y = 1.0
Kf = 300
Qj = 1600
Lj = 2.2
Ej = 25
# Рассчитаем Fi по месяцаv
sumT_month =sumT_month %>% mutate(Fi = afi+bfi*y*St)
#Рассчитаем Yi
sumT_month = sumT_month %>% mutate( Yi = ((Fi*di)*Kf)/(Qj*Lj*(100-Ej)))
## Расчитываем урожай
Yield = (sum(sumT_month$Yi))
Yield
#Результат 16,9 ц/га |
04f3bd4e5e4b3154c6fd73da652d9be9afec5da5 | 326e472f315f247c0258a683456980fe2fda56bd | /Slogans/slogans.R | 4c6185113a39b24eeb56234c053b6682cd58fc7b | [] | no_license | kms6bn/kms6bn.github.io | 425d5741c8fe11e3086d40bb5e22f78c248ede54 | b73bd85a1e92d0036f3248591d72edc12dbc58ce | refs/heads/master | 2021-11-26T01:52:02.501443 | 2020-03-11T21:10:55 | 2020-03-11T21:10:55 | 58,340,725 | 0 | 0 | null | 2018-03-31T18:40:48 | 2016-05-09T01:43:20 | HTML | UTF-8 | R | false | false | 4,288 | r | slogans.R | library(tm)
library(topicmodels)
setwd("~/Documents/MSDS/dataViz/Slogans")
slogans = read.csv("SlogansClean.csv", header=TRUE)
#write.csv(slogans, file = "SlogansClean.csv", row.names = FALSE)
#get lemma
adorn <- function(text) {
require(httr)
require(XML)
url <- "http://devadorner.northwestern.edu/maserver/partofspeechtagger"
response <- GET(url,query=list(text=text, media="xml",
xmlOutputType="outputPlainXML",
corpusConfig="ncf", # Nineteenth Century Fiction
includeInputText="false", outputReg="true"))
doc <- content(response,type="text/xml")
words <- doc["//adornedWord"]
xmlToDataFrame(doc,nodes=words)
}
first = VCorpus(DataframeSource(slogans["Slogan"]))
sloganCorpus2 = lapply(first,function(x) adorn(as.character(x)))
lemma = slogans["Slogan"]
for (i in as.numeric(names(sloganCorpus2))){
lemma$lemma[i] = paste(sloganCorpus2[i][[1]][4][[1]], collapse = ' ')
}
lemma$lemma <- gsub('\\|', ' ', lemma$lemma)
lemma$lemma[146] = "compassionate conservative"
lemma$lemma[113] = "nixon be one the"
lemma$lemma[144] = "lead for the new millennium"
lemma$lemma[179] = "new possibility . real lead ."
lemma$lemma[95] = "a time for great"
lemma$lemma[123] = "return integrity to the white home"
lemma$lemma[18] = "a home divide against itself can stand"
lemma$lemma[15] = "America for the American"
lemma$lemma[4] = "Hurray , Hurray , the country be rise ' vote for clay and Frelinghuysen !"
sloganCorpus = VCorpus(DataframeSource(lemma["lemma"]))
slogan.clean = tm_map(sloganCorpus, stripWhitespace)
slogan.clean = tm_map(slogan.clean, removeNumbers)
slogan.clean = tm_map(slogan.clean, removePunctuation)
slogan.clean = tm_map(slogan.clean, content_transformer(tolower))
#slogan.clean = tm_map(slogan.clean, removeWords, stopwords("english"))
#slogan.clean = tm_map(slogan.clean, stemDocument)
slogan.clean.tf = DocumentTermMatrix(slogan.clean, control = list(weighting = weightTfIdf))
# frequent terms
findFreqTerms(slogan.clean.tf)
m <- as.matrix(slogan.clean.tf)
frequency <- colSums(m)
frequency <- sort(frequency, decreasing=TRUE)
# word cloud
library(wordcloud)
words <- names(frequency)
wordcloud(words[1:75], frequency[1:75])
# remove empty documents
row.sums = apply(slogan.clean.tf, 1, sum)
slogan = sloganCorpus[row.sums > 0]
slogan.clean.tf = slogan.clean.tf[row.sums > 0,]
row.sums[row.sums==0]
# topic modeling (do after names are removed)
#topic.model = LDA(slogan.clean.tf, 4)
#terms(topic.model, 5)[,1:4]
#cosine similarity
library(lsa)
dtmat = as.matrix(slogan.clean.tf)
row.names(dtmat) <- slogans$Slogan
distance = cosine(t(dtmat))
#remove slogans with no similarity
distance = distance[ rowSums(distance)!=1, ]
distance = distance[ ,colSums(distance)!=0]
#document clustering - heirarchically
distance = dist(distance)
hclusters = hclust(distance)
#plot(hclusters)
# try clustering documents into 10 clusters using kmeans
kmeans.clusters = kmeans(slogan.clean.tf, 50)
clustered.kmeans = split(slogans, kmeans.clusters$cluster)
# inspect a couple clusters
inspect(clustered.kmeans[[1]])
inspect(clustered.kmeans[[2]])
#plot in D3
library(networkD3)
radialNetwork(as.radialNetwork(hclusters))
dendroNetwork(hclusters, treeOrientation = "vertical")
diagonalNetwork(as.radialNetwork(hclusters), fontFamily = "Helvetica")
saveNetwork(diagonalNetwork(as.radialNetwork(hclusters)), "diagonal.html", selfcontained = TRUE)
###############################
#aggregate by president
result <- aggregate(Slogan~Candidate,paste,collapse=",",data=slogans)
sloganCorpus = VCorpus(DataframeSource(result["Slogan"]))
slogan.clean = tm_map(sloganCorpus, stripWhitespace)
slogan.clean = tm_map(slogan.clean, removeNumbers)
slogan.clean = tm_map(slogan.clean, removePunctuation)
slogan.clean = tm_map(slogan.clean, content_transformer(tolower))
slogan.clean = tm_map(slogan.clean, removeWords, stopwords("english"))
slogan.clean = tm_map(slogan.clean, stemDocument)
slogan.clean.tf = DocumentTermMatrix(slogan.clean, control = list(weighting = weightTf))
#distances = dist(slogan.clean.tf)
#distances = as.matrix(distances)
#row.names(distances) <- result$Candidate
distances = dist(distances)
hclusters = hclust(distances)
plot(hclusters)
|
e2d5fe5217e92f2b44fd82ec5a41e13fd4cafab8 | b2a5ba2ef17b82043ffb0e92556b83ad7520a79f | /twitter_analysis.R | aa18bc55bc5f1584fdbcf44fd451a396f06c1e7f | [] | no_license | amrrs/TwitterAnalysis-in-R | d6456b471de679e5e72ae506f70a541c1ca33760 | c1bfc2933592839ea8f20b6675556b3040016983 | refs/heads/master | 2021-01-10T13:44:05.451506 | 2015-10-31T16:48:34 | 2015-10-31T16:48:34 | 45,309,688 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 9,722 | r | twitter_analysis.R | library(twitteR)
library(lubridate)
library(ggplot2)
library(tm)
library(wordcloud)
consumerKey='x'
consumerSecret='x'
accesstoken ='x'
tokensecret = 'x'
#establishing connection with Twitter api
setup_twitter_oauth(consumerKey,
consumerSecret,
accesstoken,
tokensecret)
#searchTwitter gives only a week's data
#cdTweets <- searchTwitter('from:cardekho', n=5000 ,since = '2014-09-01', until = '2015-10-27')
cdTweets2 <- userTimeline('cardekho',n=3200,includeRts = T)
cdtwt2 <- twListToDF(cdTweets2)
View(cdtwt2)
setwd('E:\\DM\\cardekho')
#write.csv(cdtwt2,'cardekho_tweets.csv',row.names = F)
#cdtwt2 <- read.csv('cardekho_tweets.csv',header = T,stringsAsFactors = F)
#extracting one-year data
cardekho <- cdtwt2[1:2220,]
#saving a copy
write.csv(cardekho,'cardekho_twt1yr.csv',row.names = F)
View(cardekho)
cardekho <- read.csv('cardekho_twt1yr.csv',header = T,stringsAsFactors = F)
#removing extra columns
cardekho <- cardekho[,c(1,3,4,5,12,13)]
#date-time changing timezone to IST
cardekho$dt <- with_tz(ymd_hms(cardekho$created),'Asia/Calcutta')
#calculating time difference between two tweets
for(i in 1:2219){
cardekho$timedif[i] = round(as.numeric(difftime(cardekho$dt[i],cardekho$dt[i+1],units='mins')),2)
cardekho$timedif[2220] = 0
}
#cardekho <- read.csv('cardekho_update2.csv',header = T)
cardekho$weekday = weekdays(cardekho$dt)
cardekho$weekday = as.factor(cardekho$weekday)
print(levels(cardekho$weekday))
cardekho$weekday = factor(cardekho$weekday,levels(cardekho$weekday)[c(4,2,6,7,5,1,3)])
#extracting time
cardekho$time = hms(sapply(strsplit(as.character(cardekho$dt)," "),'[',2))
#labeling time period of day
cardekho$tinterval[cardekho$time>hms('05:00:01')&cardekho$time<hms('09:30:00')] = 'morning'
cardekho$tinterval[cardekho$time>hms('09:30:01')&cardekho$time<hms('11:30:00')]= 'forenoon'
cardekho$tinterval[cardekho$time>hms('11:30:01')&cardekho$time<hms('13:30:00')] = 'midday'
cardekho$tinterval[cardekho$time>hms('13:30:01')&cardekho$time<hms('16:00:00')]= 'afternoon'
cardekho$tinterval[cardekho$time>hms('16:00:01')&cardekho$time<hms('19:00:00')] = 'evening'
cardekho$tinterval[cardekho$time>hms('19:00:01')&cardekho$time<hms('21:00:00')]= 'late evening'
cardekho$tinterval[cardekho$time>hms('21:00:01')] = 'night'
cardekho$tinterval[cardekho$time<hms('05:00:00')]= 'late night'
cardekho$tinterval = as.factor(cardekho$tinterval)
print(levels(cardekho$tinterval))
cardekho$tinterval = factor(cardekho$tinterval,levels(cardekho$tinterval)[c(7,3,6,1,2,4,8,5)])
#extracting original tweets from overall tweets
cardekho_og <- cardekho[cardekho$isRetweet=='FALSE',]
View(cardekho_og)
#visualization
x = as.data.frame(round(prop.table(table(cardekho$weekday)),2))
ggplot(x)+geom_bar(aes(x= x$Var1,y=x$Freq),stat = "identity") +
xlab('Weekdays') + ylab('Frequency') +
ggtitle('Amount of Tweets in a Week')
weekday_rtcount = aggregate(cardekho_og$retweetCount~cardekho_og$weekday,data=cardekho_og,FUN=mean)
ggplot(weekday_rtcount)+geom_bar(aes(x= weekday_rtcount$`cardekho_og$weekday`,y=weekday_rtcount$`cardekho_og$retweetCount`),stat = "identity") +
xlab('Day of the Week') + ylab('Avg. no. of RTs per tweet') +
ggtitle('RTs per tweet in a Week')
round(prop.table(table(cardekho$tinterval)),3)
y = as.data.frame(round(prop.table(table(cardekho$tinterval)),3))
ggplot(y)+geom_bar(aes(x= y$Var1,y=y$Freq),stat = "identity") +
xlab('Time in Day') + ylab('Frequency') +
ggtitle('Amount of Tweets in a Day')
tint_rtcount = aggregate(cardekho_og$retweetCount~cardekho_og$tinterval,data=cardekho_og,FUN=mean)
ggplot(tint_rtcount)+geom_bar(aes(x= tint_rtcount$`cardekho_og$tinterval`,y=tint_rtcount$`cardekho_og$retweetCount`),stat = "identity") +
xlab('Time Interval in the day') + ylab('Avg. no. of RTs per tweet') +
ggtitle('RTs per tweet')
ggplot(as.data.frame(cardekho$timedif[cardekho$timedif<1200])) + geom_histogram(aes(x=cardekho$timedif[cardekho$timedif<1200],fill = ..count..),binwidth = 5) +
xlab('Time difference between tweets(in mins)') +
ylab('No. of tweets') +
ggtitle('Frequency of Tweets')
ggplot(as.data.frame(cardekho$timedif[cardekho$timedif<120])) + geom_histogram(aes(x=cardekho$timedif[cardekho$timedif<120],fill = ..count..),binwidth = 5) +
xlab('Time difference between tweets(in mins)') +
ylab('No. of tweets') +
ggtitle('Frequency of Tweets (witin 2 hrs)')
#visualizing tweet vs RT
ggplot(cardekho) + geom_bar(aes(x=factor(1),y = ((..count..)/sum(..count..))*100,fill=factor(cardekho$isRetweet)),width=1) +
coord_polar(theta = 'y') +
xlab('Percentage') +
ylab(' ') +
guides(fill=guide_legend(title='Is Retweet?')) +
ggtitle('Original Tweets vs Retweets')
#extracting top-performed original tweets
cardekho_toppers <- cardekho_og[cardekho_og$retweetCount>4,]
#mining the content of top-performing tweets
txt <- Corpus(VectorSource(cardekho_toppers$text))
txt <- tm_map(txt,tolower)
txt <- tm_map(txt,removePunctuation)
txt <- tm_map(txt,removeNumbers)
stpwords <- c(stopwords('english'),'the','\n','us','cardekho')
txt <- tm_map(txt,removeWords,stpwords)
txt <- sapply(1:22, function(x){gsub("http\\w+ *", "", txt[x]$content)})
txt <- gsub("\n\\w+ *", "", txt)
wordcloud(txt,scale = c(3, 0.1),min.freq = 2,colors = c('blue','green','red'),random.color =T,max.words = 50)
cwTweets2 <- userTimeline('carwale',n=3200,includeRts = T)
cwtwt2 <- twListToDF(cwTweets2)
View(cwtwt2)
setwd('E:\\DM\\cardekho')
write.csv(cwtwt2,'carwale_tweets.csv',row.names = F)
cwtwt2 <- read.csv('carwale_tweets.csv',header = T,stringsAsFactors = F)
#extracting one-year data
carwale <- cwtwt2[1:2560,]
#saving a copy
write.csv(carwale,'carwale_twt1yr.csv',row.names = F)
View(carwale)
carwale <- read.csv('carwale_twt1yr.csv',header = T,stringsAsFactors = F)
#removing extra columns
carwale <- carwale[,c(1,3,4,5,12,13)]
#date-time changing timezone to IST
carwale$dt <- with_tz(ymd_hms(carwale$created),'Asia/Calcutta')
#calculating time difference between two tweets
for(i in 1:2559){
carwale$timedif[i] = round(as.numeric(difftime(carwale$dt[i],carwale$dt[i+1],units='mins')),2)
carwale$timedif[2560] = 0
}
carwale$weekday = weekdays(carwale$dt)
carwale$weekday = as.factor(carwale$weekday)
print(levels(carwale$weekday))
carwale$weekday = factor(carwale$weekday,levels(carwale$weekday)[c(4,2,6,7,5,1,3)])
x = as.data.frame(round(prop.table(table(carwale$weekday)),2))
ggplot(x)+geom_bar(aes(x= x$Var1,y=x$Freq),stat = "identity") +
xlab('Weekdays') + ylab('Frequency') +
ggtitle('Amount of Tweets in a Week - Carwale')
weekday_rtcount = aggregate(carwale$retweetCount~carwale$weekday,data=carwale,FUN=mean)
ggplot(weekday_rtcount)+geom_bar(aes(x= weekday_rtcount$`carwale$weekday`,y=weekday_rtcount$`carwale$retweetCount`),stat = "identity") +
xlab('Day of the Week') + ylab('Avg. no. of RTs per tweet') +
ggtitle('RTs per tweet in a Week - Carwale')
carwale$time = hms(sapply(strsplit(as.character(carwale$dt)," "),'[',2))
carwale$tinterval[carwale$time>hms('05:00:01')&carwale$time<hms('09:30:00')] = 'morning'
carwale$tinterval[carwale$time>hms('09:30:01')&carwale$time<hms('11:30:00')]= 'forenoon'
carwale$tinterval[carwale$time>hms('11:30:01')&carwale$time<hms('13:30:00')] = 'midday'
carwale$tinterval[carwale$time>hms('13:30:01')&carwale$time<hms('16:00:00')]= 'afternoon'
carwale$tinterval[carwale$time>hms('16:00:01')&carwale$time<hms('19:00:00')] = 'evening'
carwale$tinterval[carwale$time>hms('19:00:01')&carwale$time<hms('21:00:00')]= 'late evening'
carwale$tinterval[carwale$time>hms('21:00:01')] = 'night'
carwale$tinterval[carwale$time<hms('05:00:00')]= 'late night'
carwale$tinterval = as.factor(carwale$tinterval)
print(levels(carwale$tinterval))
carwale$tinterval = factor(carwale$tinterval,levels(carwale$tinterval)[c(7,3,6,1,2,4,8,5)])
write.csv(carwale,'carwale_update2.csv',row.names =F)
round(prop.table(table(carwale$tinterval)),3)
y = as.data.frame(round(prop.table(table(carwale$tinterval)),3))
ggplot(y)+geom_bar(aes(x= y$Var1,y=y$Freq),stat = "identity") +
xlab('Time in Day') + ylab('Frequency') +
ggtitle('Amount of Tweets in a Day - Carwale')
tint_rtcount = aggregate(carwale$retweetCount~carwale$tinterval,data=carwale,FUN=mean)
ggplot(tint_rtcount)+geom_bar(aes(x= tint_rtcount$`carwale$tinterval`,y=tint_rtcount$`carwale$retweetCount`),stat = "identity") +
xlab('Time Interval in the day') + ylab('Avg. no. of RTs per tweet') +
ggtitle('RTs per tweet - Carwale')
ggplot(as.data.frame(carwale$timedif[carwale$timedif<1200])) + geom_histogram(aes(x=carwale$timedif[carwale$timedif<1200],fill = ..count..),binwidth = 5) +
xlab('Time difference between tweets(in mins)') +
ylab('No. of tweets') +
ggtitle('Frequency of Tweets - Carwale')
ggplot(as.data.frame(carwale$timedif[carwale$timedif<120])) + geom_histogram(aes(x=carwale$timedif[carwale$timedif<120],fill = ..count..),binwidth = 5) +
xlab('Time difference between tweets(in mins)') +
ylab('No. of tweets') +
ggtitle('Frequency of Tweets (witin 2 hrs) - Carwale')
#visualizing no. of retweets
ggplot(carwale) + geom_bar(aes(x=factor(1),y = ((..count..)/sum(..count..))*100,fill=factor(carwale$isRetweet)),width=1) +
coord_polar(theta = 'y') +
xlab('Percentage') +
ylab(' ') +
guides(fill=guide_legend(title='Is Retweet?')) +
ggtitle('Original Tweets vs Retweets - Carwale')
|
4903f058d46792dc998669c9ce06b638e6505eed | ec277669a23616385befd5333bd546bdca31d618 | /server.R | 642ab93b0ac35910e84edaac28d2158d21951003 | [] | no_license | svobodnik86/Developping-Data-Products | ada455341edbbd89917c5be56c26ad9a288aff55 | 33cddcb5038caadaca7b5727a88a7c1d4be02f02 | refs/heads/master | 2020-12-25T11:06:10.734987 | 2016-07-21T11:46:26 | 2016-07-21T11:46:26 | 63,859,838 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,284 | r | server.R | library(shiny)
# preprocessing of data
data("presidents")
presidentNames <-c("Harry S. Truman","Harry S. Truman","Harry S. Truman","Harry S. Truman","Harry S. Truman","Harry S. Truman","Harry S. Truman","Harry S. Truman","Dwight D. Eisenhower","Dwight D. Eisenhower","Dwight D. Eisenhower","Dwight D. Eisenhower","Dwight D. Eisenhower","Dwight D. Eisenhower","Dwight D. Eisenhower","Dwight D. Eisenhower","John F. Kennedy","John F. Kennedy","John F. Kennedy","Lyndon B. Johnson","Lyndon B. Johnson","Lyndon B. Johnson","Lyndon B. Johnson","Lyndon B. Johnson","Richard Nixon","Richard Nixon","Richard Nixon","Richard Nixon","Richard Nixon","Richard Nixon")
Q1<-c()
Q2<-c()
Q3<-c()
Q4<-c()
year<-c()
year=1945
for(i in 1:30){
year[i]<-1945+i-1
Q1[i]<-presidents[i]
Q2[i]<-presidents[i+1]
Q3[i]<-presidents[i+2]
Q4[i]<-presidents[i+3]
}
presidentsDF <- data.frame(year,Q1,Q2,Q3,Q4, presidentNames,row.names = NULL)
# check whether NAs are reported for quarters after president change (see Roosevelt, Kennedy, Nixon)
# 1st QT 1945 NA, 3rd and 4th QTs 1974
presidentsDF$Q3[30]<-NA
presidentsDF$Q4[30]<-NA
# Define server logic required to draw a histogram
shinyServer(function(input, output) {
output$text2 <- renderText({
radio <- input$radio
presidentElect <- input$selectPres
if (radio==1){
paste(" President", input$selectPres, "reached the highest approval of", calculateApproval(presidentElect),
"% of American people in ", calculateMinMaxPeriod(presidentElect), ".")
}else{
paste(" President", input$selectPres, "reached the lowest approval of", calculateApproval(presidentElect),
"% of American people in ", calculateMinMaxPeriod(presidentElect), ".")
}
})
output$photo <- renderImage({
filename <- paste((input$selectPres), ".jpg",sep='')
list(src = filename,contentType = 'image/jpg',
height = 300)
}, deleteFile = FALSE)
#output$documentation <- renderMarkdown(file = "App-Presidents/readme.md")
output$documentation <- renderText({
readLines("readme.html")
})
calculateApproval <- function(president){
if (input$radio == 1){
max(presidentsDF[presidentsDF$presidentNames==input$selectPres,2:5], na.rm=TRUE)}
else {
min(presidentsDF[presidentsDF$presidentNames==input$selectPres,2:5], na.rm=TRUE)
}
}
calculateMinMaxPeriod <- function(president){
minmax <- c()
extreme <- calculateApproval(president)
electionYear <- min(presidentsDF[presidentsDF$presidentNames==president,1])
lastYear <- max(presidentsDF[presidentsDF$presidentNames==president,1])
for (year in electionYear:lastYear){
for (i in 2:5){
maxVal <- presidentsDF[presidentsDF$year==year,i]
if (!is.na(maxVal)){
if (maxVal == extreme){
if (length(minmax)==0){
minmax <- paste(names(presidentsDF[i]), "of" , year)
}else {
minmax <- c(paste(minmax,"and", names(presidentsDF[i]), "of" , year))
}
}
}
}
}
minmax
}
})
|
ca40a22f4975bbd966cd6051ed15d6a737c8fb57 | ab00bc7e17121d2dcf3741dc9f650a4e76ed4a44 | /R/tbl.R | f1412a44e611dca9bd702102766ec32ab2021f4a | [
"MIT"
] | permissive | tidyverse/dplyr | 9b7fdc07e6a70bc8e802094e2e2a127af22bcc02 | cf8031d00f406c6dc5d483d7e9e34639df797b81 | refs/heads/main | 2023-09-01T03:52:50.608019 | 2023-08-25T13:42:29 | 2023-08-25T13:42:29 | 6,427,813 | 3,290 | 1,982 | NOASSERTION | 2023-09-09T20:14:25 | 2012-10-28T13:39:17 | R | UTF-8 | R | false | false | 2,198 | r | tbl.R | #' Create a table from a data source
#'
#' This is a generic method that dispatches based on the first argument.
#'
#' @param src A data source
#' @param ... Other arguments passed on to the individual methods
#' @export
tbl <- function(src, ...) {
UseMethod("tbl")
}
#' Create a "tbl" object
#'
#' `tbl()` is the standard constructor for tbls. `as.tbl()` coerces,
#' and `is.tbl()` tests.
#'
#' @keywords internal
#' @export
#' @param subclass name of subclass. "tbl" is an abstract base class, so you
#' must supply this value. `tbl_` is automatically prepended to the
#' class name
#' @param ... For `tbl()`, other fields used by class. For `as.tbl()`,
#' other arguments passed to methods.
make_tbl <- function(subclass, ...) {
subclass <- paste0("tbl_", subclass)
structure(list(...), class = c(subclass, "tbl"))
}
#' @rdname tbl
#' @param x Any object
#' @export
is.tbl <- function(x) inherits(x, "tbl")
tbl_vars_dispatch <- function(x) {
UseMethod("tbl_vars")
}
new_sel_vars <- function(vars, group_vars) {
structure(
vars,
groups = group_vars,
class = c("dplyr_sel_vars", "character")
)
}
#' List variables provided by a tbl.
#'
#' `tbl_vars()` returns all variables while `tbl_nongroup_vars()`
#' returns only non-grouping variables. The `groups` attribute
#' of the object returned by `tbl_vars()` is a character vector of the
#' grouping columns.
#'
#' @export
#' @param x A tbl object
#' @seealso [group_vars()] for a function that returns grouping
#' variables.
#' @keywords internal
tbl_vars <- function(x) {
return(new_sel_vars(tbl_vars_dispatch(x), group_vars(x)))
# For roxygen and static analysis
UseMethod("tbl_vars")
}
#' @export
tbl_vars.data.frame <- function(x) {
names(x)
}
#' @rdname tbl_vars
#' @export
tbl_nongroup_vars <- function(x) {
setdiff(tbl_vars(x), group_vars(x))
}
is_sel_vars <- function(x) {
inherits(x, "dplyr_sel_vars")
}
#' @export
print.dplyr_sel_vars <- function(x, ...) {
cat("<dplyr:::vars>\n")
print(unstructure(x))
groups <- attr(x, "groups")
if (length(groups)) {
cat("Groups:\n")
print(groups)
}
invisible(x)
}
unstructure <- function(x) {
attributes(x) <- NULL
x
}
|
55fa9bdba1c1004656b0756a04bdbec063936995 | ee70f1012f4e76828c71d8484b3ef38983f25ad7 | /R-scripts/SF9_lumpy investment.R | 99d6e17705e969c1f1a2d2a92278b2060a89036e | [
"MIT"
] | permissive | fhaegner/Mak-h-ro | 688639c5d132a5c654cda90f12213ee3ee37bf6e | bc4bc292adc52a4842b83a90763021438b08d014 | refs/heads/main | 2023-04-16T23:45:17.085664 | 2022-08-25T18:53:45 | 2022-08-25T18:53:45 | 484,345,505 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,512 | r | SF9_lumpy investment.R | #call rm() function to remove all objects
rm(list = ls())
#set working directory
setwd("C:/Users/...")
#get packages
library(tidyverse) #contains ggplot2, dplyr, tidyr, readr, purr, tibble, stringr, forcats, rlang, lubridate, pillar
#get data
#SF9 uses run-0
run <- read_csv("C:/Users/.../run-0.csv")
keep <- c("10", "19", "27") #randomly chosen
mtm <- run[run$id %in% keep, ]
################################################################################
#Generate plot
################################################################################
g <-
g <-
ggplot(mtm, aes(time, investmentPeriod, group = id)) +
geom_line(aes(linetype=factor(id)),size = 0.7) +
#scale_y_continuous(expand = c(0,0)) +
scale_y_continuous(limits = c(0,90), expand = c(0, 0)) +
scale_linetype_manual(values = c("dashed", "solid", "dotted")) +
labs(y = "Investment in nominal units",
x = "Time in periods",
linetype = "Firm id") +
theme_bw() +
theme(legend.position = c(0.89, 0.86),
text = element_text(family = "Arial", size = 14),
axis.text = element_text(size = 12),
axis.title.y = element_text(vjust = 2.5)) +
guides(linetype = guide_legend(override.aes = list(size = 0.75), keywidth = 3)) +
xlim(200,300)
#save graph in working directory
cairo_pdf("SF9_lumpy_investment.pdf", width=8, height=6)
#jpeg(filename = "SF9_lumpy_investment.jpeg", width = 888, height = 688, quality = 100)
print(g)
dev.off() |
1a2df445f915aa143e5c860f271a2a5aefaac916 | 1e8e3135aa4556919d433c3ff8d86b71d85f72c3 | /dev/dev_gen_install_cap.R | 71b128d6121f4695dd37645cc1b8df0df0f68120 | [
"MIT"
] | permissive | dreamRs/rte.data | 3183c33345ef6887721fd8278d3f5feee2cd4db3 | 06738a25803b30dc2586f1e051e6e8af6dcac334 | refs/heads/master | 2022-11-04T19:43:46.023893 | 2022-10-03T09:56:58 | 2022-10-03T09:56:58 | 125,424,537 | 8 | 5 | NOASSERTION | 2019-06-17T07:35:48 | 2018-03-15T20:49:00 | R | UTF-8 | R | false | false | 3,879 | r | dev_gen_install_cap.R |
# ------------------------------------------------------------------------
#
# Title : Generation Installed Capacities
# By : VP
# Date : 2018-03-27
#
# ------------------------------------------------------------------------
# Packages ----------------------------------------------------------------
library( rte.data )
library( ggplot2 )
library( data.table )
# Funs --------------------------------------------------------------------
capitalize <- function(x) {
lo <- substring(text = x, first = 2)
up <- substring(text = x, first = 1, last = 1)
up <- toupper(up)
lo <- tolower(lo)
lo <- gsub(pattern = "_", replacement = " ", x = lo)
paste0(up, lo)
}
# API key -----------------------------------------------------------------
set_key(
api = "generation_installed_capacities",
key = "BASE64KEY=="
)
# Datas -------------------------------------------------------------------
gen_inst <- get_open_api(api = "generation_installed_capacities", resource = "capacities_cpc")
str(gen_inst)
gen_inst
# saveRDS(object = gen_inst, file = "dev/gen_inst.rds")
# par dep
gen_inst[department_code != "FR", list(value = sum(value, na.rm = TRUE)), by = list(department_code)][order(-value)]
# par dep et hydro
gen_inst[department_code != "FR" & production_type == "HYDRO",
list(value = sum(value, na.rm = TRUE)),
by = list(department_code)][order(-value)]
# type max par dep
gen_inst[department_code != "FR",
.SD[which.max(value)],
by = list(department_code)][order(-value)]
# capacities_per_production_unit ------------------------------------------
gen_inst_unit <- get_open_api(api = "generation_installed_capacities", resource = "capacities_per_production_unit", raw = FALSE)
str(gen_inst_unit, max.level = 2)
gen_inst_unit
table(gen_inst_unit$type)
# saveRDS(object = gen_inst_unit, file = "dev/gen_inst_unit.rds")
gen_inst_unit[type %chin% c("HYDRO_RUN_OF_RIVER_AND_POUNDAGE", "HYDRO_WATER_RESERVOIR"), type := "HYDRO"]
gen_inst_unit_a <- gen_inst_unit[, list(N = .N), by = type]
gen_inst_unit_a <- gen_inst_unit_a[order(N, decreasing = FALSE)]
gen_inst_unit_a[, type := factor(type, levels = type, labels = rte.data:::capitalize(type))]
gen_inst_unit_a[, P := round(N / sum(N) * 100)]
gen_inst_unit_a
ggplot(data = gen_inst_unit_a) +
geom_segment(aes(x = type, xend = type, y = 0, yend = N), color = "#666666") +
geom_point(aes(x = type, y = N), color = "#112446", size = 5) +
coord_flip() + theme_minimal() +
labs(
x = NULL, y = "Number of unit",
title = "Installed capacity",
subtitle = "per production type"
)
# capacities_per_production_type -----------------------------------------
gen_inst_type <- get_open_api(api = "generation_installed_capacities", resource = "capacities_per_production_type", raw = FALSE)
str(gen_inst_type, max.level = 2)
gen_inst_type
# saveRDS(object = gen_inst_type, file = "dev/gen_inst_type.rds")
gen_inst_type <- readRDS("dev/gen_inst_type.rds")
gen_inst_type[type %chin% c("HYDRO_RUN_OF_RIVER_AND_POUNDAGE", "HYDRO_WATER_RESERVOIR"), type := "HYDRO"]
gen_inst_type[type %chin% c("WIND_ONSHORE", "WIND_OFFSHORE"), type := "WIND"]
gen_inst_type <- gen_inst_type[, list(value = sum(value)), by = list(type)]
gen_inst_type <- gen_inst_type[order(value, decreasing = FALSE)]
gen_inst_type[, type := factor(type, levels = type, labels = capitalize(type))]
gen_inst_type
ggplot(data = gen_inst_type) +
geom_col(aes(x = type, y = value)) +
coord_flip() + theme_minimal() +
labs(x = NULL)
ggplot(data = gen_inst_type) +
geom_segment(aes(x = type, xend = type, y = 0, yend = value), color = "#666666") +
geom_point(aes(x = type, y = value), color = "#112446", size = 5) +
coord_flip() + theme_minimal() +
labs(
x = NULL, y = "In MW",
title = "Installed capacity",
subtitle = "per production type"
)
|
eee049bb08ef5aa4c1a8740bc6fdb2aae330f278 | c315e8d1fdcf23086841a7d9cfb48ceba0b0b357 | /R/GenSeries.R | 4c0458a6ac34415d4459ba4ee6eb869601993076 | [] | no_license | cran/s2dverification | c4d5caa8b518356b095c8768a7aadfe6c8da5a64 | a772070454789d66328916463f91d306f1df0a3b | refs/heads/master | 2022-05-22T11:13:13.975865 | 2022-04-20T07:10:06 | 2022-04-20T07:10:06 | 19,931,684 | 1 | 3 | null | null | null | null | UTF-8 | R | false | false | 1,040 | r | GenSeries.R | #'Generates An AR1 Time Series
#'
#'This function generates AR1 processes containing n data points, where alpha
#'is the autocorrelation at lag 1, and the mean and standard deviation are
#'specified by the mean and std arguments.
#'
#'@param n Length of the timeseries to be generated.
#'@param alpha Autocorrelation at lag 1.
#'@param mean Mean of the data.
#'@param std Standard deviation of the data.
#'
#'@return AR1 timeseries.
#'
#'@keywords datagen
#'@author History:\cr
#'0.1 - 2012-04 (L. Auger) - Original code\cr
#'1.0 - 2012-04 (N. Manubens) - Formatting to CRAN
#'@examples
#'series <- GenSeries(1000, 0.35, 2, 1)
#'plot(series, type = 'l')
#'
#'@importFrom stats rnorm
#'@export
GenSeries <- function(n, alpha, mean, std) {
res <- vector("numeric", n)
x <- mean
stdterm <- std * (sqrt(1 - alpha ^ 2) / (1 - alpha))
for (i in 1:100) {
x <- alpha * x + (1 - alpha) * rnorm(1, mean, stdterm)
}
for (i in 1:n) {
x <- alpha * x + (1 - alpha) * rnorm(1, mean, stdterm)
res[i] <- x
}
res
}
|
add63743602154411bcc7df5d286dde3a8fbcdac | ec99462f759e09f5d82f76b9071e6e11e69b5272 | /Calc Clonality_SI_R20_R50 on many files.R | 0e670b50b692c323b2916880436c1abee9240dfc | [] | no_license | michellemiron/Immune-repertoire-analysis | 534cfa2d0ef3bb98f5402ff42939aa3093489b27 | bc41d14e7b6e21d607b80c081fe82003f0f2ce63 | refs/heads/master | 2021-09-20T10:56:44.715766 | 2018-08-08T12:29:12 | 2018-08-08T12:29:12 | 37,478,269 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,737 | r | Calc Clonality_SI_R20_R50 on many files.R | #This script calculates clonality on n number of files in a given directory
# Each file has TCR sequencing data for one sample
# Input is the directory with the files
# Output is a list of dataframes with clonality calculations
# TCR files are .txt files
#File is set up so a column is the counts and each row is a different sequence
#only the counts column is needed for these calculations
#These are the functions used to calculate Clonality
rm(list=ls())
options(stringsAsFactors=FALSE)
normalize <- function(data) {
nc = ncol(data)
for (i in 1:nc) {
data[,i] = data[,i] / sum(data[,i])
}
return(data)
}
shannon.entropy <- function(p){
if (min(p) < 0 || sum(p) <= 0)
return(NA)
p.norm <- p[p>0]/sum(p)
-sum(log2(p.norm)*p.norm)
}
Clonality <- function(p) {
x = p[p>0] / sum(p)
l = length(x)
entropy = shannon.entropy(p)
maxentropy = -log2(1/l)
return(signif(1 - entropy / maxentropy, 3))
}
#Simpsons Index
calcSI<-function(vals){
vals=vals[vals>0]
fq=vals/sum(vals)
si=sum(fq^2)
return(si)
}
#R20
calcr20 = function(X){
X=sort(X,decreasing=T)
X=X[X>0]
CX=cumsum(X)
num=length(which(CX/sum(X)<=0.2))
den=length(X)
return(num/den)
}
#R50
calcr50 = function(X){
X=sort(X,decreasing=T)
X=X[X>0]
CX=cumsum(X)
num=length(which(CX/sum(X)<=0.5))
den=length(X)
return(num/den)
}
# Input your path to files here:
File_path <- "/Users/michellemiron/Desktop/TCR data/All TCR data/All Reps pooled/"
# Get a list of all files in the directory
files <- list.files(path=File_path, pattern="*.txt")
file <- files[[1]]
file <- "D229_BM_CD4+CD69-rep1_2.txt"
# Function to calculate clonality on a given file
# output is dataframe with clonality and file name
outputclonality_data <- function(file) {
file_location <- paste(File_path, file, sep = "")
all_data<-read.table(file_location, header=T )
counts <- all_data[,2]
countsdf <-as.data.frame(counts)
normalizedcounts <- normalize(countsdf)
entropy <- shannon.entropy(normalizedcounts)
clonalitycalc <- Clonality(normalizedcounts)
SI <-calcSI(counts)
R20<- calcr20(counts)
R50<- calcr50(counts)
NumberUniqueClones <- nrow(countsdf)
Output <- data.frame(file,clonalitycalc,SI,R20,R50,NumberUniqueClones)
Output
}
# Apply function to all files in a given directory
data_compiled_list <- lapply(files, outputclonality_data)
data_compiled_table <- my.matrix<-do.call("rbind", data_compiled_list)
Path_save = "/Users/michellemiron/Desktop/TCR data/All TCR data/All Reps pooled/results/"
file_output <- paste(Path_save,"clonality_R20_R50_SI_CloneNum.txt", sep = "")
write.csv(data_compiled_table, file=file_output)
|
0315cc27d5f2341787430f6f627493b7228be045 | c8cde1ea449bbd1c26454124a96e97df954ed319 | /man/e22-rand-method.Rd | 681034ea35d6e9f07bbf1cc48b9058e0b7db3151 | [] | no_license | cran/Umpire | b5785399ca942ecdf80cb369aa973b0df38230e0 | 46c5342920eda3414b38f3a1c46658db11a4c4fb | refs/heads/master | 2021-07-03T21:09:38.015302 | 2020-11-10T20:10:06 | 2020-11-10T20:10:06 | 96,940,613 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 975 | rd | e22-rand-method.Rd | \name{rand-method}
\alias{rand}
\alias{rand-method}
\alias{rand,ANY-method}
\docType{methods}
\title{Method "rand"}
\description{
\code{rand} is a generic function used to produce random vectors from the
distribution defined by various objects. The generic function invokes particular
\code{\link{methods}} which depend on the \code{\link{class}} of the first
argument.
}
\usage{
\S4method{rand}{ANY}(object, n, \dots)
}
\arguments{
\item{object}{an object from which random numbers from a distribution is
desired}
\item{n}{numeric scalar specifying quantity of random numbers}
\item{\dots}{additional arguments affecting the random numbers produced}
}
\value{
The form of the value returned by \code{rand} depends on the
class of its argument. See the documentation of the particular methods
for details of what is produced by that method.
}
\author{
Kevin R. Coombes \email{krc@silicovore.com},
}
\keyword{methods}
|
eb453b055ed725c2b2e582e991dd12b4fed710cb | a8d29e7f2b23bc097b75f0c9807cef6b74cbd7ea | /cachematrix.R | f9eb655c4f2f0e62c7d268fb1e0908161d08c371 | [] | no_license | natarajnaikar/ProgrammingAssignment2 | 761bc72d14827a3daa34d32f96993d44ecc8f9c8 | e2cbddeb3a7f45f56d1433ddde33e8328f393565 | refs/heads/master | 2021-01-24T20:59:45.718695 | 2015-06-21T13:20:38 | 2015-06-21T13:20:38 | 37,768,866 | 0 | 0 | null | 2015-06-20T12:18:27 | 2015-06-20T12:18:27 | null | UTF-8 | R | false | false | 1,743 | r | cachematrix.R | ##Below function inverses the passed function and stores in the memory
##.i.e.basically it creates the matrix object to cache its inverse
makeCacheMatrix <- function(x = matrix()) {
#sets the value of the m to NULL. Provides the default value
m<-NULL
#sets the value of matrix
setvalueofmatrix<-function(y){
#cach the value of the matrix
#use <<- to assign a value to an object in an environment different from the current environment
x<<-y
#sets the value of the m to NULL. Provides the default value
m<<-NULL
}
#get the matrics
get<-function() x
#set the inverse of the matrix
setmatrix<-function(solve) m<<- solve
#get the inverse
getmatrix<-function() m
list(set=setvalueofmatrix, get=get,
setmatrix=setmatrix,
getmatrix=getmatrix)
}
## Compute the inverse of the matrix returned by above function
## if the inverse already calculated and not changed then below function retrives the
## inverse of the matrix directly from the cache
## otherwise this function creates the inverse of the matrics and stores in the cache
## x is output of makeCacheMatrix()
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
#compare the previous value to check what was there earlier
m<-x$getmatrix()
#check if the inverse has been already calculated
#if true,rturn the inverse matyrix
if(!is.null(m)){
message("getting cached data")
return(m)
}
#if not inverse is not already calculated then calculate it
matrix<-x$get()
m<-solve(matrix, ...)
#store the computed inverse in cache
x$setmatrix(m)
#return the inverse of the matrix
m
}
|
43990359be32ee4bcb7e750983b1900eac94ddb6 | 2ab1fde88fa973454caad091e7a135a674aa1222 | /man/NLSS_sum.Rd | 291adb4749c5476cd43964c465235b4195171fa4 | [] | no_license | benwu233/nlss | c964e47e4adbc16939d11cc94408b4a58af9711a | 69e5472cc26a1d514ad659d10f2f2a38b55e3d4e | refs/heads/master | 2023-02-15T04:09:29.105167 | 2023-02-13T13:46:54 | 2023-02-13T13:46:54 | 197,650,756 | 2 | 0 | null | null | null | null | UTF-8 | R | false | true | 507 | rd | NLSS_sum.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/NLSS_sum.R
\name{NLSS_sum}
\alias{NLSS_sum}
\title{Summary of the MCMC result for NLSS}
\usage{
NLSS_sum(res, th = 0.95, nstart = 1, nend = 1)
}
\arguments{
\item{res}{result from the function NLSS}
}
\description{
The function summarizes the MCMC result and returns the posterior mean
of A, the posterior mode of S, beta coefficient (frequency of each discrete value of S
among the MCMC samples) and the log-likelihood trace.
}
|
55cd76dadd0d512543e5a25319655bfe6a9282ef | fa0b440a3718858249cb7ef9d441da7363152f45 | /man/id.chol.Rd | a24b8f5ee0a59111c7e297b90e16525c29ac137d | [
"MIT"
] | permissive | alexanderlange53/svars | a44b4ee4faeb6e29d9606402821afaa77afd62ad | c51f597b87ef62394be9bcab90a4209b63285e54 | refs/heads/master | 2023-02-17T23:48:25.882026 | 2023-01-30T19:48:28 | 2023-01-30T19:48:28 | 76,240,281 | 34 | 20 | NOASSERTION | 2020-01-06T12:13:40 | 2016-12-12T09:11:07 | R | UTF-8 | R | false | true | 2,197 | rd | id.chol.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/id.chol.R
\name{id.chol}
\alias{id.chol}
\title{Recursive identification of SVAR models via Cholesky decomposition}
\usage{
id.chol(x, order_k = NULL)
}
\arguments{
\item{x}{An object of class 'vars', 'vec2var', 'nlVar'. Estimated VAR object}
\item{order_k}{Vector. Vector of characters or integers specifying the assumed structure of the recursive causality. Change the causal ordering in the instantaneous effects without permuting variables and re-estimating the VAR model.}
}
\value{
A list of class "svars" with elements
\item{B}{Estimated structural impact matrix B, i.e. unique decomposition of the covariance matrix of reduced form residuals}
\item{n}{Number of observations}
\item{method}{Method applied for identification}
\item{order_k}{Ordering of the variables as assumed for recursive causality}
\item{A_hat}{Estimated VAR parameter}
\item{type}{Type of the VAR model, e.g. 'const'}
\item{y}{Data matrix}
\item{p}{Number of lags}
\item{K}{Dimension of the VAR}
\item{VAR}{Estimated input VAR object}
}
\description{
Given an estimated VAR model, this function uses the Cholesky decomposition to identify the structural impact matrix B of the corresponding SVAR model
\deqn{y_t=c_t+A_1 y_{t-1}+...+A_p y_{t-p}+u_t
=c_t+A_1 y_{t-1}+...+A_p y_{t-p}+B \epsilon_t.}
Matrix B corresponds to the decomposition of the least squares covariance matrix \eqn{\Sigma_u=B\Lambda_t B'}.
}
\examples{
\donttest{
# data contains quarterly observations from 1965Q1 to 2008Q3
# x = output gap
# pi = inflation
# i = interest rates
set.seed(23211)
v1 <- vars::VAR(USA, lag.max = 10, ic = "AIC" )
x1 <- id.chol(v1)
x2 <- id.chol(v1, order_k = c("pi", "x", "i")) ## order_k = c(2,1,3)
summary(x1)
# impulse response analysis
i1 <- irf(x1, n.ahead = 30)
i2 <- irf(x2, n.ahead = 30)
plot(i1, scales = 'free_y')
plot(i2, scales = 'free_y')
}
}
\references{
Luetkepohl, H., 2005. New introduction to multiple time series analysis, Springer-Verlag, Berlin.
}
\seealso{
For alternative identification approaches see \code{\link{id.st}}, \code{\link{id.cvm}}, \code{\link{id.cv}}, \code{\link{id.dc}} or \code{\link{id.ngml}}
}
|
7ea0552219e75027cc437dcf0f29f01fc31aec60 | 6280fb08d623464fc6da54d7be5a5de51397b295 | /figure/plot1.R | d58e9a4d930c49a431297fe0afabc0cf8b6c2ec4 | [] | no_license | wraphaeljr/ExData_Plotting1 | d9ec8c4f09c5298f3bdec175506c5027ff30d29a | 9c8b4df4755adb00ba0aa609ec1aa4b3dc499608 | refs/heads/master | 2021-01-21T03:08:32.334394 | 2015-06-05T02:48:38 | 2015-06-05T02:48:38 | 36,903,496 | 0 | 0 | null | 2015-06-05T00:42:56 | 2015-06-05T00:42:54 | null | UTF-8 | R | false | false | 1,001 | r | plot1.R | data <- read.csv2("household_power_consumption.txt")
select.data <- data[(data$Date == "1/2/2007" | data$Date == "2/2/2007"), ]
rm(data)
select.data$Global_active_power <- as.numeric(as.character(select.data$Global_active_power))
select.data$Global_reactive_power <- as.numeric(as.character(select.data$Global_reactive_power))
select.data$Voltage <- as.numeric(as.character(select.data$Voltage))
select.data$Global_intensity <- as.numeric(as.character(select.data$Global_intensity))
select.data$Sub_metering_1 <- as.numeric(as.character(select.data$Sub_metering_1))
select.data$Sub_metering_2 <- as.numeric(as.character(select.data$Sub_metering_2))
select.data$Sub_metering_3 <- as.numeric(as.character(select.data$Sub_metering_3))
select.data$Date <- strptime(paste(select.data$Date, select.data$Time), "%d/%m/%Y %H:%M:%S")
select.data$Time <- NULL
png("plot1.png")
hist(select.data$Global_active_power, col = "red", main = "Global Active Power", xlab = "Global Active Power (kilowatts)")
dev.off() |
db73481699699d5c12675794927544382ab840cc | c1ce9fcfb9389b87bbe42391bf9b1f47d467f5a5 | /R_4.4_1_to_3.r | 6aecd274d6b2a037bb4cbcf8dd199aa7b44f5d46 | [] | no_license | prithviadhi/DSC-ACC-RV | 9323b1ba979b27c08145984535e46069533e88eb | 103ae02c937eb63061c0a85d2ba7d8a77c08ac91 | refs/heads/master | 2020-04-06T23:03:40.073928 | 2018-11-16T11:15:03 | 2018-11-16T11:15:03 | 157,856,626 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,747 | r | R_4.4_1_to_3.r |
# Week-2.
# Prithvi Adhikarla.
# 4.4
1.
my_variable <- 10
my_variable
2.
install.packages("tidyverse")
library(tidyverse)
ggplot(data = mpg) +
geom_point(mapping = aes(x = displ, y = hwy))
3.
filter(mpg, cyl = 8)
4.
filter(diamonds, carat > 3)
# -------------------------------------------------------------
# 5.2.4
Find all flights that ...
1.1.had an arrival delay of two or more hours.
install.packages("tidyverse")
install.packages("/Users/padhikarla/Downloads/nycflights13_1.0.0.tar.gz", repos=NULL, method="libcurl")
library(nycflights13)
df_arr_delay_2hrs <- filter(flights, arr_delay>=120)
1.2. Flew to Houston (IAH or HOU)
filter(flights, dest %in% c('IAH', 'HOU'))
1.3. Were operated by United, American, or Delta
filter(flights, carrier %in% c('UA', 'AA', 'DL'))
1.4. Departed in summer (July, August, and September)
filter(flights, month %in% c(7,8,9))
1.5. Arrived more than two hours late, but didn’t leave late
filter(flights, arr_delay>=120 & dep_delay==0)
1.6. Were delayed by at least an hour, but made up over 30 minutes in flight
df_made_up_late_time <- filter(flights, dep_delay>=60 & arr_delay <30)
1.7. Departed between midnight and 6am (inclusive)
(filter(flights, sched_dep_time>=0000 & sched_dep_time <= 0600))
# 5.2.4
2.Another useful dplyr filtering helper is between(). What does it do? Can you use it to simplify the code needed to answer the previous challenges?
filter(flights, between(month, 7, 9))
filter(flights, between(sched_dep_time, 0000, 0600))
3. How many flights have a missing dep_time? What other variables are missing? What might these rows represent?
filter(flights,is.na(dep_time))
8255 flights.
# Below shows the column names that have missing values.
colnames(flights)[ apply(flights, 2, anyNA) ]
Q: What might these rows represent?
A: I am guessing thse are charter flights departed from NY Airport.
4. Why is NA ^ 0 not missing? Why is NA | TRUE not missing? Why is FALSE & NA not missing?
Can you figure out the general rule? (NA * 0 is a tricky counterexample!)
4A: NA^0 = 1; as anything to the power of zero is 1.
NA | TRUE = TRUE; as general of thumb, if one of the conditions in an OR check is TRUE, the result is TRUE.
FALSE & NA = FALSE; as general of thumb, if one of the conditions in an AND check is FALSE, the result is FALSE.
NA * 0 = NA; as a general of thumb, anything multiplied by zero should be a zero. But this is an anomaly result to get a NA.
# -------------------------------------------------------------------------------------------
5.3.1 How could you use arrange() to sort all missing values to the start? (Hint: use is.na()).
arrange(flights,desc(is.na(dep_time)))
5.3.2 Sort flights to find the most delayed flights. Find the flights that left earliest.
(arrange(flights,desc(arr_delay))
(arrange(flights, sched_dep_time, dep_delay))
5.3.3 Sort flights to find the fastest flights.
df_flights_with_speed <- arrange(flights %>% mutate(speed = distance/air_time), desc(speed))
5.3.4 Which flights travelled the longest? Which travelled the shortest?
Answer: I do not think we can compute this with the information available because of inadequate information.
Sorting by distince would not satisfy the requirement as the "distance" is only the distance between airports when
in fact a flight can be in the air for more time and travel more distance because of bad weather, etc..
# ---------------------------------------------------------------------------------------------
5.4.1 Brainstorm as many ways as possible to select dep_time, dep_delay, arr_time, and arr_delay from flights.
select(flights, dep_time, dep_delay, arr_time, arr_delay)
select(flights, 4, 6, 7, 9)
select(flights, starts_with("dep_"), starts_with("arr_")) |
2c109addf1980b2a395b14788689d45c7f5ed2f5 | 7309103dd69b32622c9d93977d3e41a99fdae52a | /assets/2019-Machine Learning Techniques for Code Smell Detection- A Systematic Literature Review and Meta-Analysis/WithinVSCross-Forestplot.R | 015dc15fc47a3eb98b07db7eb4fb51abd6581ced | [] | no_license | anhquandlqb2001/code_smell_py | 7f75abc63db1152a9a04453d102acf51835a01b0 | 5cc63869b72987addaf5147bf79b999de69a5360 | refs/heads/master | 2023-05-06T21:00:39.729396 | 2021-05-22T07:28:29 | 2021-05-22T07:28:29 | 369,744,436 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,220 | r | WithinVSCross-Forestplot.R |
library(rmeta)
library(forestplot)
library(readxl)
#data(cochrane)
#View(cochrane)
wc_data <- read_excel("FPlot_analysis.xlsx", sheet = 3)
View(wc_data)
names(wc_data)
attach(wc_data)
steroid <- meta.MH(withintotal, crosstotal, WithinProject, CrossProject,
names=Study, data=wc_data)
#steroid1 <- meta.MH(n.trt, n.ctrl, ev.trt, ev.ctrl,
# names=name, data=cochrane)
tabletext3<-cbind(c("Study",steroid$names,NA,"Summary"),
c("Within",WithinProject,NA, NA),
c("Cross", CrossProject,NA, NA),
c("OR",format(exp(steroid$logOR),digits=2),NA,format(exp(steroid$logMH),digits=2)))
m<- c(NA,steroid$logOR,NA,steroid$logMH)
l<- m-c(NA,steroid$selogOR,NA,steroid$selogMH)*2
u<- m+c(NA,steroid$selogOR,NA,steroid$selogMH)*2
forestplot(tabletext3,m,l,u,
is.summary=c(TRUE,rep(FALSE,6),TRUE),
clip=c(log(0.1),log(2.5)),
xlog=TRUE,
new_page = TRUE,
digitsize=0.9,
boxsize = 1,
graphwidth = unit(3, "inches"),
col=meta.colors(box="royalblue",
line="darkblue", summary="royalblue"))
|
364b1b4b7336c66060d6160cdf83b50b265d77a4 | 56c984e900b090ce16517938ff2b3feacabc2108 | /R/scripts/csbc_pson/study_summaries/project_summaries_syn7349759.R | 8c9c0a35721b4ef032fa97bd7c3f06fc73a9eadc | [
"Apache-2.0"
] | permissive | avanlinden/syndccutils | 3306cc039f7fa8a2714024c510d85a32c83b5ff1 | e852d282daafaad1c48cfe3066a4970706496123 | refs/heads/master | 2022-12-19T09:10:39.931285 | 2020-03-17T17:33:54 | 2020-03-17T17:33:54 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,504 | r | project_summaries_syn7349759.R | source("R/charts.R")
source("R/tables.R")
source("R/synapse_helpers.R")
source("R/utils.R")
#This works on the CSBC PSOC site
# Script/template to create summary tables and charts for a "study"
synLogin()
update_remote <- TRUE
# Config ------------------------------------------------------------------
synproject_id <- "syn7315805" # Synapse project for project Center
parent_id <- "syn11738140" # Center 'Reporting' folder where files should be stored
master_fileview_id <- "syn11448522" # Synapse fileview associated with project
tool_fileview_id <- "syn11957355"
# Collect data ------------------------------------------------------------
fileview_df <- get_table_df(master_fileview_id)
##Start with scRNA seq project
source_id <- "syn11448532" # Synapse folder associated with project
# Assays by patient -------------------------------------------------------
table_filename <- glue::glue("{source_id}_DataFileCountsByAssay.html",
source_id = source_id)
files_by_assay_and_diagnosis_table_filename <- glue::glue("{source_id}_DataFileCountsByAssayAndDiagnosis.html",
source_id = source_id)
assay_by_diagnosis_chart_filename <- glue::glue("{source_id}_AssayDataFilesByDiagnosis.html",
source_id = source_id)
patient_by_diagnosis_chart_filename <- glue::glue("{source_id}_PatientsByDiagnosis.html",
source_id = source_id)
summarize_datafiles_by_assay_and_diagnosis <- function(view_df, table_id) {
count_cols <- c("id", "individualID",
"specimenID")
view_df %>%
group_by(assay, diagnosis) %>%
summarise_at(count_cols, n_distinct) %>%
rowwise() %>%
mutate(sourceFileview = table_id,
query = build_tablequery(sourceFileview, assay, diagnosis)) %>%
add_queryview_column(format = "html") %>%
select(-query)
}
# create and save table
##datafile_counts_by_assay <- fileview_df %>%
## summarize_datafiles_by_assay(master_fileview_id)
##datafile_counts_by_assay_dt <- datafile_counts_by_assay %>%
## format_summarytable_columns("assay") %>%
## as_datatable()
datafile_counts_by_assay_and_diagnosis <- fileview_df %>%
summarize_datafiles_by_assay_and_diagnosis(master_fileview_id)
datafile_counts_by_assay_and_diagnosis_dt <- datafile_counts_by_assay_and_diagnosis %>%
format_summarytable_columns(c("assay", "diagnosis")) %>%
as_datatable()
##syn_dt_entity <- datafile_counts_by_assay_dt %>%
## save_datatable(parent_id, table_filename, .)
if (update_remote) {
syn_file_by_assay_and_diagnosis_dt_entity <- datafile_counts_by_assay_and_diagnosis_dt %>%
save_datatable(parent_id, files_by_assay_and_diagnosis_table_filename, .)
}
chart<-plot_sample_counts_by_annotationkey_2d(fileview_df,sample_key='individualID',annotation_keys=c(tumorType='Tumor Type',egfrStatus='EGFR Status'))
if (update_remote) {
syn_entity <- save_chart(parent_id, patient_by_diagnosis_chart_filename, chart)
}
# Files by category -------------------------------------------------------
chart_filename <- glue::glue("{source_id}_AllFilesByCategory.html",
source_id = source_id)
categories <- list(assay = "Assay", diagnosis = "Diagnosis",
species = "Species",
organ = "Organ", tissue = "Tissue",
dataType = "Data Type")
chart <- categories %>%
map2(.y = names(.), function(annotation_prettykey, annotation_key) {
p <- fileview_df %>%
group_by(.dots = annotation_key) %>%
tally() %>%
ggplot(aes(x = 1, y = n)) +
geom_col(aes_string(fill = annotation_key),
colour = "white") +
scale_fill_viridis_d() +
xlab(annotation_prettykey) +
ylab("Number of Files") +
theme_minimal() +
theme(axis.text.x = element_blank(),
axis.ticks.x = element_blank()) +
guides(fill = FALSE)
ggplotly(p, tooltip = c("y", "fill"),
width = 100 * length(categories) + 50,
height = 300)
}) %>%
subplot(shareY = TRUE, titleX = TRUE) %>%
layout(showlegend = FALSE,
font = list(family = "Roboto, Open Sans, sans-serif"))
# chart
if (update_remote) {
syn_entity <- save_chart(parent_id, chart_filename, chart)
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.