content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/invoicer_bill_client.R
\name{invoicer_bill_client}
\alias{invoicer_bill_client}
\title{Generate and send any outstanding invoices to the client}
\usage{
invoicer_bill_client(client = "iFixit",
first_bill_date = as.Date("2018-09-24"), billing_period = 14)
}
\arguments{
\item{client}{client name}
\item{first_bill_date}{date to send first bill}
\item{billing_period}{length of billing period (in days)}
}
\description{
Generate and send any outstanding invoices to the client
}
| /man/invoicer_bill_client.Rd | no_license | anthonypileggi/invoicer | R | false | true | 559 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/invoicer_bill_client.R
\name{invoicer_bill_client}
\alias{invoicer_bill_client}
\title{Generate and send any outstanding invoices to the client}
\usage{
invoicer_bill_client(client = "iFixit",
first_bill_date = as.Date("2018-09-24"), billing_period = 14)
}
\arguments{
\item{client}{client name}
\item{first_bill_date}{date to send first bill}
\item{billing_period}{length of billing period (in days)}
}
\description{
Generate and send any outstanding invoices to the client
}
|
\name{components}
\alias{components}
\alias{smallComps}
\alias{removeComps}
\alias{combineComps}
\alias{suggestCompCombis}
\title{Functions to assess and refine ALS components}
\description{One of the inherent drawbacks of the MCR-ALS method is that
in the vast majority of cases there is no one unique set of components
describing the data, a situation known as "rotational ambiguity". This
implies that in some cases a spectrum of a chemical compound can be
described by a linear combination of two ALS components. This can
sometimes be recognised by looking at elution profiles. In addition,
in cases where the number of components is too large, some components
may only describe noise or very small and irrelevant features. The
functions clarified here allow one to find which components only
correspond with minor features, to remove components, and to merge
components.
}
\usage{
smallComps(obj, Ithresh)
removeComps(obj, toRemove, ...)
combineComps(obj, compList, weights, ...)
suggestCompCombis(obj, indices, Ithresh = 0, corthresh = 0.9,
clusterHeight = 0.6)
}
\arguments{
\item{obj}{The R object containing the als model}
\item{Ithresh}{Intensity cutoff: all components with a maximal intensity
(in the elution profiles) below this value will be termed "small".}
\item{toRemove}{The indices of the components to remove from the ALS
model. A new call to \code{doALS} will be done with the smaller set
of components.}
\item{\dots}{Additional arguments to \code{doALS}, e.g. \code{maxiter
= 1} if no full set of iterations is required.}
\item{compList}{A list indicating which components need to be
combined. Using \code{list(c(1, c(2, 3), 4))} will lead to a
three-component model, where components 1 and 4 are unchanged and
components 2 and 3 are combined.}
\item{weights}{Weights for the components to be combined. If not
provided, equal weights will be assumed.}
\item{indices}{A list indicating in which (groups of) samples
correlations will be calculated. See details.}
\item{corthresh}{Correlation threshold: components with elution
profiles showing a higher correlation than this threshold may be
candidates for merging.}
\item{clusterHeight}{Similarity threshold at which to cut the
dendrogram (see details).}
}
\details{Function \code{suggestCompCombis} checks correlations in
elution profiles that could point to a situation where one chemical
compound is described by two or more ALS components. For every sample
in which this correlation is higher than the threshold, a "hit" will
be recorded for these two components. After checking all samples and
all combinations, the hit matrix will be used as a similarity measure
in a hierarchical clustering. The dendrogram will be cut at a specific
height, leading to groups of components, sometimes containing more
than one element. In such a case, these components could be considered
for merging.
If injections of pure standards are present, they probably should not
be used in isolation to check for coelution; rather, suggestions for
combined components can be validated looking at the elution profiles
of the standards.
}
\value{Functions \code{removeComps} and \code{combineComps} return
\code{ALS} objects with fewer components than the original
object. Function \code{smallComps} returns a list of two elements:
\item{smallComps}{the indices of the small components}
\item{maxCvalues}{the maximal values found in the concentration
profiles across all samples for each of the components.}
}
\author{Ron Wehrens}
\examples{
data(tea)
new.lambdas <- seq(260, 500, by = 2)
tea <- lapply(tea.raw, preprocess)
tea.split <- splitTimeWindow(tea, c(12, 14), overlap = 10)
Xl <- tea.split[[3]]
Xl.opa <- opa(Xl, 10)
Xl.als <- doALS(Xl, Xl.opa)
smallC <- smallComps(Xl.als, 5)
smallC
Xl.als2 <- removeComps(Xl.als, smallC$smallC)
summary(Xl.als)
summary(Xl.als2)
## smaller models, but with a higher fit error...
## another way to decrease the number of components, this example
## not particularly deep, just to show how it can be done:
Xl.als3 <- combineComps(Xl.als, list(1, 2, 3:4, 5, c(6, 10), 6, 7:9))
summary(Xl.als3)
}
\keyword{manip}
| /man/components.Rd | no_license | forked-packages/alsace | R | false | false | 4,253 | rd | \name{components}
\alias{components}
\alias{smallComps}
\alias{removeComps}
\alias{combineComps}
\alias{suggestCompCombis}
\title{Functions to assess and refine ALS components}
\description{One of the inherent drawbacks of the MCR-ALS method is that
in the vast majority of cases there is no one unique set of components
describing the data, a situation known as "rotational ambiguity". This
implies that in some cases a spectrum of a chemical compound can be
described by a linear combination of two ALS components. This can
sometimes be recognised by looking at elution profiles. In addition,
in cases where the number of components is too large, some components
may only describe noise or very small and irrelevant features. The
functions clarified here allow one to find which components only
correspond with minor features, to remove components, and to merge
components.
}
\usage{
smallComps(obj, Ithresh)
removeComps(obj, toRemove, ...)
combineComps(obj, compList, weights, ...)
suggestCompCombis(obj, indices, Ithresh = 0, corthresh = 0.9,
clusterHeight = 0.6)
}
\arguments{
\item{obj}{The R object containing the als model}
\item{Ithresh}{Intensity cutoff: all components with a maximal intensity
(in the elution profiles) below this value will be termed "small".}
\item{toRemove}{The indices of the components to remove from the ALS
model. A new call to \code{doALS} will be done with the smaller set
of components.}
\item{\dots}{Additional arguments to \code{doALS}, e.g. \code{maxiter
= 1} if no full set of iterations is required.}
\item{compList}{A list indicating which components need to be
combined. Using \code{list(c(1, c(2, 3), 4))} will lead to a
three-component model, where components 1 and 4 are unchanged and
components 2 and 3 are combined.}
\item{weights}{Weights for the components to be combined. If not
provided, equal weights will be assumed.}
\item{indices}{A list indicating in which (groups of) samples
correlations will be calculated. See details.}
\item{corthresh}{Correlation threshold: components with elution
profiles showing a higher correlation than this threshold may be
candidates for merging.}
\item{clusterHeight}{Similarity threshold at which to cut the
dendrogram (see details).}
}
\details{Function \code{suggestCompCombis} checks correlations in
elution profiles that could point to a situation where one chemical
compound is described by two or more ALS components. For every sample
in which this correlation is higher than the threshold, a "hit" will
be recorded for these two components. After checking all samples and
all combinations, the hit matrix will be used as a similarity measure
in a hierarchical clustering. The dendrogram will be cut at a specific
height, leading to groups of components, sometimes containing more
than one element. In such a case, these components could be considered
for merging.
If injections of pure standards are present, they probably should not
be used in isolation to check for coelution; rather, suggestions for
combined components can be validated looking at the elution profiles
of the standards.
}
\value{Functions \code{removeComps} and \code{combineComps} return
\code{ALS} objects with fewer components than the original
object. Function \code{smallComps} returns a list of two elements:
\item{smallComps}{the indices of the small components}
\item{maxCvalues}{the maximal values found in the concentration
profiles across all samples for each of the components.}
}
\author{Ron Wehrens}
\examples{
data(tea)
new.lambdas <- seq(260, 500, by = 2)
tea <- lapply(tea.raw, preprocess)
tea.split <- splitTimeWindow(tea, c(12, 14), overlap = 10)
Xl <- tea.split[[3]]
Xl.opa <- opa(Xl, 10)
Xl.als <- doALS(Xl, Xl.opa)
smallC <- smallComps(Xl.als, 5)
smallC
Xl.als2 <- removeComps(Xl.als, smallC$smallC)
summary(Xl.als)
summary(Xl.als2)
## smaller models, but with a higher fit error...
## another way to decrease the number of components, this example
## not particularly deep, just to show how it can be done:
Xl.als3 <- combineComps(Xl.als, list(1, 2, 3:4, 5, c(6, 10), 6, 7:9))
summary(Xl.als3)
}
\keyword{manip}
|
#install.packages("shiny")
library(shiny)
# Define UI for dataset viewer app ----
ui <- fluidPage(
# App title ----
titlePanel("Shiny Text"),
# Sidebar layout with a input and output definitions ----
sidebarLayout(
# Sidebar panel for inputs ----
sidebarPanel(
# Input: Selector for choosing dataset ----
selectInput(inputId = "dataset",
label = "Choose a dataset:",
choices = c("rock", "pressure", "cars")),
# Input: Numeric entry for number of obs to view ----
numericInput(inputId = "obs",
label = "Number of observations to view:",
value = 10)
),
# Main panel for displaying outputs ----
mainPanel(
# Output: Verbatim text for data summary ----
verbatimTextOutput("summary"),
# Output: HTML table with requested number of observations ----
tableOutput("view")
)
)
)
# Define server logic required to draw a histogram ----
server <- function(input, output) {
# Return the requested dataset ----
datasetInput <- reactive({
switch(input$dataset,
"rock" = rock,
"pressure" = pressure,
"cars" = cars)
})
# Generate a summary of the dataset ----
output$summary <- renderPrint({
dataset <- datasetInput()
summary(dataset)
})
# Show the first "n" observations ----
output$view <- renderTable({
head(datasetInput(), n = input$obs)
})
}
shinyApp(ui, server)
| /Examples/app.R | no_license | jcksac/Rshiny | R | false | false | 1,498 | r | #install.packages("shiny")
library(shiny)
# Define UI for dataset viewer app ----
ui <- fluidPage(
# App title ----
titlePanel("Shiny Text"),
# Sidebar layout with a input and output definitions ----
sidebarLayout(
# Sidebar panel for inputs ----
sidebarPanel(
# Input: Selector for choosing dataset ----
selectInput(inputId = "dataset",
label = "Choose a dataset:",
choices = c("rock", "pressure", "cars")),
# Input: Numeric entry for number of obs to view ----
numericInput(inputId = "obs",
label = "Number of observations to view:",
value = 10)
),
# Main panel for displaying outputs ----
mainPanel(
# Output: Verbatim text for data summary ----
verbatimTextOutput("summary"),
# Output: HTML table with requested number of observations ----
tableOutput("view")
)
)
)
# Define server logic required to draw a histogram ----
server <- function(input, output) {
# Return the requested dataset ----
datasetInput <- reactive({
switch(input$dataset,
"rock" = rock,
"pressure" = pressure,
"cars" = cars)
})
# Generate a summary of the dataset ----
output$summary <- renderPrint({
dataset <- datasetInput()
summary(dataset)
})
# Show the first "n" observations ----
output$view <- renderTable({
head(datasetInput(), n = input$obs)
})
}
shinyApp(ui, server)
|
#!/usr/bin/env Rscript
## THIS SCRIPT EXPECTS THAT YOU HAVE PROCESSED SAMPLES WITH THE PIPELINE: /home/apa/local/bin/chipseq-tertiary.
## Thus, in the dataroot/ location (specified below), each sample name will be its own subdirectory.
## Within each of these subdirs, there will be a "bams" directory containing "pooled.bam".
## 'dataroot' and IP-input sample name pairs will be given below, and the corresponding pooled.bams used.
## It is OK to give IPs without inputs -- some protocols like ATAC, Nexus have no inputs -- the RData objects will lack the LFC matrices, that's all.
##
## See args below for inputs.
## Outputs: a directory tree containing heatmap matrix RData objects, also a small RData file with a list object, showing the final tree and its contents.
## Within the tree root, each bed file gets its own directory.
## Within bed directories, each value of K gets a subdir.
## Within these subdirs, each BAM file gets its own RData object.
## So, final paths like output_root/bed_file/k_value/sample_name.coverage_matrix_data.RData
##
## Finally, each RData file contains one object 'coverage.matrix.data', with this structure:
## coverage.matrix.data <- list(
## matrix=list( # list of matrices of IP signal and transformations thereof. Float values have been rounded to 3 places.
## raw=matrix(...), # Raw CoverageView::cov.matrix() output (transposed), such that rows = peaks, cols = coverageView bins.
## logrpm=matrix(...), # log2-adjusted-RPM, that is, log2(1E6*(raw+1)/M), where 'M' is the value of 'aligns' below.
## pctmax=matrix(...), # raw/max(raw). To curb outliers, max(raw) is actually quantile(raw,0.99), and final matrix values > 1 have been thresholded down to 1.
## zscore=matrix(...) # the logrpm matrix, as row z-scores, i.e. t(scale(t(logrpm))).
## ),
## LFC.matrix=list( # An empty list, OR, if N inputs supplied, then a list of N matrices, one for each input (usually, 0 or 1 inputs given).
## input.i=matrix(...), # this "logrpm" - input i "logrpm"
## ..., # ...
## input.N=matrix(...) # this "logrpm" - input N "logrpm"
## ),
## aligns=aligns, # N alignments in IP bam (samtools view IP.bam | wc -l)
## bam=bams, # /path/to/IP.bam
## bed=bed, # BED file supplied for CoverageView::cov.matrix()
## inputs=inputs, # vector of /path/to/input.bam(s), one for each matrix in "LFC.matrix". Or, empty.
## RData=outfiles # /path/to/this.RData, just for reference.
## )
## SETUP
source("/home/apa/apa_tools.R")
ca <- commandArgs(trailing=TRUE)
bedlist <- ca[1] # headered 2-col file with col 1 = name, col 2 = path/to/bedfile
ip.inp <- ca[2] # headered 2-col file: col 1 = IP sample name; col 2 = input sample name (if any), colnames = "IP", "Input".
dataroot <- ca[3] # all data should be systematically arranged within this location (expecting chipseq_tertiary pipeline output structure)
## multiple dataroots (as comma-sep string) can be given, and will be searched. *** HOWEVER: sample names must be unique across all these locations!! ***
outpath <- ca[4] # directory where cov.mats will be written
genome <- ca[5] # genome label, e.g. "mm10": what genome are these peaks from, so we know if flanked peaks run off the end of the chromosome.
kval <- ca[6] # peak +- flank value (from midpoint), e.g. 5000 to make heatmaps from peak-mids+-5000. Can be CSV, e.g. 1000,2000,5000 for multiple k.
cores <- ca[7] # N cores for CoverageView matrix generation
clobber <- as.logical(ca[8])
## bedlist="b4.beds"; ip.inp="b4.ipi.txt"; dataroot="~/cbio.bdk.113/data/output,~/cbio.bdk.107/data"; outpath="~/cbio.bdk.113/data/cov.mats"; genome="mm10"; kval="5000"; cores=40
message("Validating arguments...")
dataroot <- unlist(strsplit(dataroot,","))
D <- length(dataroot)
for (i in 1:D) {
if (!dir.exists(dataroot[i])) stop("Data root location '",dataroot[i],"' does not exist!\n")
if (!grepl("/$",dataroot[i])) dataroot[i] <- paste0(dataroot[i],"/")
}
if (!grepl("/$",outpath)) outpath <- paste0(outpath,"/")
beds.RData <- paste0(outpath,"cov.mat.beds.RData")
paths.RData <- paste0(outpath,"cov.mat.paths.RData")
tempstring <- paste0(random.string(10),".tmp.")
beds.RData.tmp <- sub("RData",paste0(tempstring,"RData"),beds.RData)
paths.RData.tmp <- sub("RData",paste0(tempstring,"RData"),paths.RData)
kval <- as.numeric(unlist(strsplit(kval,",")))
K <- length(kval)
if (any(is.na(kval))) stop("Some 'kval' values were not numeric!\n")
cores <- as.numeric(cores)
if (is.na(cores)) stop("Cores value '",cores,"' was not numeric!\n")
## Process IP-input pairings
message("Checking IP/input pairings...")
ip.inp <- read.delim(ip.inp, as.is=TRUE)
inp.given <- ip.inp[,2]!=""
inp.given[is.na(inp.given)] <- FALSE
any.inp <- any(inp.given)
dup <- duplicated(ip.inp)
if (any(dup)) {
message(paste(sum(dup),"duplicated IP-input pairings removed!\n"))
ip.inp <- ip.inp[!dup,]
}
if (any.inp) ip.inp[ip.inp[,2]=="",2] <- NA
## READ FINAL PEAKLISTS FOR HEATMAPS
## GENERATE HEATMAP VERSION(S) FOR EACH BED
message("Reading bed files...")
beds1 <- read.delim(bedlist, as.is=TRUE)
if (any(duplicated(beds1[,1]))) stop("Bed file names (column 1 of bed list) are not unique! Halting.\n")
beds1 <- cbind(beds1, FULL=NA)
for (i in 1:nrow(beds1)) beds1$FULL[i] <- system(paste0("bash -c 'readlink -f ",beds1[i,2],"'"),intern=TRUE)
beds <- named.vector(beds1[,2], beds1[,1])
beds <- lapply(lapply(beds, read.bed), '[', , 1:6) # read only first 6 cols // should die if any missing files
sapply(beds, nrow)
cov.mat.beds <- beds
cov.mat.paths <- beds
## Named index vectors for apply-type calls
bedi <- 1:length(beds); names(bedi) <- names(beds)
ki <- 1:K; names(ki) <- paste0("k",kval/1E3) # "5000" named "k5", e.g.
## GET IP AND INPUT BAM PATHS
## MATCH IPS WITH INPUTS
message("Checking bam files...")
source.sample <- function(sample) {
paths <- c()
for (i in 1:D) if (file.exists(paste0(dataroot[i],sample,"/bams/pooled.bam"))) paths <- c(paths,dataroot[i])
if (length(paths)==0) {
stop(paste0("Sample '",sample,"' was NOT found in the following locations: ",paste(dataroot,collapse="', '"),". Halting.\n"))
} else if (length(paths)>1) {
stop(paste0("Sample '",sample,"' was found in multiple locations: '",paste(paths,collapse="', '"),"'. Halting.\n"))
} else {
paths
}
}
get.aligns <- function(x) {
## 'x' is a /path/to/bam
idx <- sub("bam$","idxstats.txt",x)
if (!file.exists(idx)) {
message(paste0("expected idxstats file '",idx,"' not found, creating...\n"))
if (!file.exists(paste0(x,'.bai'))) system(paste("samtools index",x))
system(paste("samtools idxstats",x,">",idx))
}
N <- as.numeric(system(paste0("bash -c 'paste -s -d+ <(cut -f3 ",idx,") | bc'"),intern=TRUE))
ifelse (length(N)==0, NA, N)
}
## IP bams
bams <- sapply(unique(ip.inp$IP), function(x) paste0(source.sample(x),x,"/bams/pooled.bam") )
names(bams) <- unique(ip.inp$IP)
## Input bams
if (any.inp) {
bams.inp <- unique(ip.inp$Input[!is.na(ip.inp$Input)])
bams.inp <- named.vector(sapply(bams.inp, function(x) paste0(source.sample(x),x,"/bams/pooled.bam") ), bams.inp)
} else {
bams.inp <- c()
}
## Name vectors
samples <- names(bams)
if (any.inp) inputs <- names(bams.inp)
## Named index vectors for apply-type calls
bami <- 1:length(bams); names(bami) <- names(bams)
if (any.inp) {
inpbami <- 1:length(bams.inp)
names(inpbami) <- names(bams.inp)
}
## Were BAM filenames successfully reconstructed
ok <- file.exists(bams)
if (any.inp) ok.inp <- file.exists(bams.inp)
## Aligned read counts per BAM (for RPM conversion)
aligns <- named.vector(sapply(bams, get.aligns), names(bams))
if (any.inp) aligns.inp <- named.vector(sapply(bams.inp, get.aligns), names(bams.inp))
## CHECK: are all BAM files found, have known N alignments
message("IP BAM status:")
print(data.frame(ok,aligns,bams))
message("Input BAM status:")
if (any.inp) print(data.frame(ok.inp,aligns.inp,bams.inp))
if (any(is.na(aligns))) stop("Some IP bams could not be counted! Cannot proceed.\n")
if (any.inp) if (any(is.na(aligns.inp))) stop("Some input bams could not be counted! Cannot proceed.\n")
## Just before takeoff (after everything else has been vetted):
## Make heatmap versions of bed files (convert peaks to windows of fixed length; remove any that run off ends of chroms)
message("Everything checks out, now making expanded bed files...")
bed.paths <- new.list(names(bedi))
for (i in 1:length(bedi)) {
cov.mat.beds[[i]] <- cov.mat.paths[[i]] <- new.list(names(ki)) # to start; more added below
orig <- beds[[i]]
nco <- ifelse(ncol(orig)>6,6,ncol(orig))
colnames(orig)[1:nco] <- c("Chr","Start","End","Name","Score","Strand")[1:nco] # prevents breaking if input bed had > 6 cols
for (k in ki) {
message(names(bedi)[i]," ",kval[k])
## Create cov.mat output location
cov.mat.paths[[i]][[k]] <- paste0(outpath,names(beds)[i],"/",names(ki)[k]) # initially; later gets replaced with IP/input list
system(paste0("mkdir -p ",cov.mat.paths[[i]][[k]])) # output cov.mat RData object locations
if (!dir.exists(cov.mat.paths[[i]][[k]])) stop("Failed to create coverage matrix output location '",cov.mat.paths[[i]][[k]],"'!\n")
## Create heatmap bed
bed.paths[[i]][[k]] <- paste0(cov.mat.paths[[i]][[k]],"/",names(beds)[i],".",names(ki)[k],".bed")
cov.mat.beds[[i]][[k]] <- write.heatmap.bed(beds[[i]], bed.paths[[i]][[k]], genome, window=kval[k]*2)
orig <- cbind(orig, cov.mat.beds[[i]][[k]]$OK)
colnames(orig)[ncol(orig)] <- paste0("OK.",names(ki)[k])
cov.mat.beds[[i]][[k]] <- cov.mat.beds[[i]][[k]][cov.mat.beds[[i]][[k]]$OK,1:6] # drop any peaks that were dropped in the output
message("Wrote: ",bed.paths[[i]][[k]])
}
cov.mat.beds[[i]]$original <- orig # original unfiltered bed coords, also has "OK" columns for each K value, indicating if that coord made it to that k-bed file
cov.mat.paths[[i]]$original <- orig
cov.mat.beds[[i]]$input <- beds1$FULL[i] # path to original input bed file
cov.mat.paths[[i]]$input <- beds1$FULL[i]
}
if (!file.exists(beds.RData)) {
save(cov.mat.beds, file=beds.RData)
} else {
## adding to existing collection; ensure additions are valid
beds.merge.ok <- coverage.matrix.RData.validate(cov.mat.beds, beds.RData)
if (beds.merge.ok) {
## merge new data into existing RData file
coverage.matrix.RData.validate(cov.mat.beds, beds.RData, combine=TRUE)
} else {
message("Current 'cov.mat.beds' dataset incompatible with existing! Saving to '",beds.RData.tmp,"'.\n")
save(cov.mat.beds, file=beds.RData.tmp)
}
}
## GENERATE COVERAGE MATRICES
## EACH BED FILE GETS ONE COLUMN PER BAM
library(CoverageView)
message("\nGenerating matrices...")
## Get ordered bam vectors in IP-input table row order
IP.bam <- match(ip.inp$IP, names(bams))
if (any.inp) {
inp.bam <- match(ip.inp$Input, names(bams.inp))
has.inp <- !is.na(inp.bam)
} else {
has.inp <- rep(FALSE,length(IP.bam))
}
## For each bed file, for each k value, for each IP-input pairing (rows in ip.inp), compute one RData object
#cov.mat.paths <- cov.mat.beds
for (i in bedi) {
for (k in ki) {
bed <- bed.paths[[i]][[k]]
out <- cov.mat.paths[[i]][[k]] # save off location value, then replace with IP/input list below
if (any.inp) {
bam.list <- list(IP=bams[IP.bam],input=bams.inp)
} else {
bam.list <- list(IP=bams[IP.bam])
}
cov.mat.paths[[i]][[k]] <- lapply(bam.list, function(x){ x[!is.na(x)]=NA; x }) # initialize as all NA
## generate input matrices FIRST
if (length(bams.inp)>0) {
group <- paste0("Bed: ",names(bedi)[i],", k: ",names(ki)[k],", inputs")
message(group)
cov.mat.paths[[i]][[k]]$input <- coverage.matrix.generate(bams.inp, bed, out, 100, cores, clobber=clobber, skip=TRUE) # writes RData objects
message(group," complete!\n")
}
## generate IP matrices SECOND: requires RData objects from inputs
## IPs with input first
if (sum(has.inp)>0) {
group <- paste0("Bed: ",names(bedi)[i],", k: ",names(ki)[k],", IPs with inputs")
message(group)
cov.mat.paths[[i]][[k]]$IP[ has.inp] <- coverage.matrix.generate(bams[ has.inp], bed, out, 100, cores, cov.mat.paths[[i]][[k]]$input[inp.bam[has.inp]], clobber=clobber, skip=TRUE) # writes RData objects
message(group," complete!\n")
}
## IPs with no input second
if (sum(!has.inp)>0) {
group <- paste0("Bed: ",names(bedi)[i],", k: ",names(ki)[k],", IPs with no inputs")
message(group)
cov.mat.paths[[i]][[k]]$IP[!has.inp] <- coverage.matrix.generate(bams[!has.inp], bed, out, 100, cores, clobber=clobber, skip=TRUE) # writes RData objects
message(group," complete!\n")
}
}
}
if (!file.exists(paths.RData)) {
save(cov.mat.paths, file=paths.RData)
} else {
## adding to existing collection; ensure additions are valid
paths.merge.ok <- coverage.matrix.RData.validate(cov.mat.paths, paths.RData)
if (paths.merge.ok) {
## merge new data into existing RData file
message("Current 'cov.mat.paths' dataset is compatible with the existing one!\n")
coverage.matrix.RData.validate(cov.mat.paths, paths.RData, combine=TRUE)
} else {
message("Current 'cov.mat.paths' dataset incompatible with existing! Saving to '",paths.RData.tmp,"'.\n")
save(cov.mat.paths, file=paths.RData.tmp)
}
}
message(paste0("Matrix creation complete!\nSee: ",paths.RData))
| /boneyard/prepareHeatmapMatrices | no_license | zm-git-dev/apa_bin | R | false | false | 13,985 | #!/usr/bin/env Rscript
## THIS SCRIPT EXPECTS THAT YOU HAVE PROCESSED SAMPLES WITH THE PIPELINE: /home/apa/local/bin/chipseq-tertiary.
## Thus, in the dataroot/ location (specified below), each sample name will be its own subdirectory.
## Within each of these subdirs, there will be a "bams" directory containing "pooled.bam".
## 'dataroot' and IP-input sample name pairs will be given below, and the corresponding pooled.bams used.
## It is OK to give IPs without inputs -- some protocols like ATAC, Nexus have no inputs -- the RData objects will lack the LFC matrices, that's all.
##
## See args below for inputs.
## Outputs: a directory tree containing heatmap matrix RData objects, also a small RData file with a list object, showing the final tree and its contents.
## Within the tree root, each bed file gets its own directory.
## Within bed directories, each value of K gets a subdir.
## Within these subdirs, each BAM file gets its own RData object.
## So, final paths like output_root/bed_file/k_value/sample_name.coverage_matrix_data.RData
##
## Finally, each RData file contains one object 'coverage.matrix.data', with this structure:
## coverage.matrix.data <- list(
## matrix=list( # list of matrices of IP signal and transformations thereof. Float values have been rounded to 3 places.
## raw=matrix(...), # Raw CoverageView::cov.matrix() output (transposed), such that rows = peaks, cols = coverageView bins.
## logrpm=matrix(...), # log2-adjusted-RPM, that is, log2(1E6*(raw+1)/M), where 'M' is the value of 'aligns' below.
## pctmax=matrix(...), # raw/max(raw). To curb outliers, max(raw) is actually quantile(raw,0.99), and final matrix values > 1 have been thresholded down to 1.
## zscore=matrix(...) # the logrpm matrix, as row z-scores, i.e. t(scale(t(logrpm))).
## ),
## LFC.matrix=list( # An empty list, OR, if N inputs supplied, then a list of N matrices, one for each input (usually, 0 or 1 inputs given).
## input.i=matrix(...), # this "logrpm" - input i "logrpm"
## ..., # ...
## input.N=matrix(...) # this "logrpm" - input N "logrpm"
## ),
## aligns=aligns, # N alignments in IP bam (samtools view IP.bam | wc -l)
## bam=bams, # /path/to/IP.bam
## bed=bed, # BED file supplied for CoverageView::cov.matrix()
## inputs=inputs, # vector of /path/to/input.bam(s), one for each matrix in "LFC.matrix". Or, empty.
## RData=outfiles # /path/to/this.RData, just for reference.
## )
## SETUP
source("/home/apa/apa_tools.R")
ca <- commandArgs(trailing=TRUE)
bedlist <- ca[1] # headered 2-col file with col 1 = name, col 2 = path/to/bedfile
ip.inp <- ca[2] # headered 2-col file: col 1 = IP sample name; col 2 = input sample name (if any), colnames = "IP", "Input".
dataroot <- ca[3] # all data should be systematically arranged within this location (expecting chipseq_tertiary pipeline output structure)
## multiple dataroots (as comma-sep string) can be given, and will be searched. *** HOWEVER: sample names must be unique across all these locations!! ***
outpath <- ca[4] # directory where cov.mats will be written
genome <- ca[5] # genome label, e.g. "mm10": what genome are these peaks from, so we know if flanked peaks run off the end of the chromosome.
kval <- ca[6] # peak +- flank value (from midpoint), e.g. 5000 to make heatmaps from peak-mids+-5000. Can be CSV, e.g. 1000,2000,5000 for multiple k.
cores <- ca[7] # N cores for CoverageView matrix generation
clobber <- as.logical(ca[8])
## bedlist="b4.beds"; ip.inp="b4.ipi.txt"; dataroot="~/cbio.bdk.113/data/output,~/cbio.bdk.107/data"; outpath="~/cbio.bdk.113/data/cov.mats"; genome="mm10"; kval="5000"; cores=40
message("Validating arguments...")
dataroot <- unlist(strsplit(dataroot,","))
D <- length(dataroot)
for (i in 1:D) {
if (!dir.exists(dataroot[i])) stop("Data root location '",dataroot[i],"' does not exist!\n")
if (!grepl("/$",dataroot[i])) dataroot[i] <- paste0(dataroot[i],"/")
}
if (!grepl("/$",outpath)) outpath <- paste0(outpath,"/")
beds.RData <- paste0(outpath,"cov.mat.beds.RData")
paths.RData <- paste0(outpath,"cov.mat.paths.RData")
tempstring <- paste0(random.string(10),".tmp.")
beds.RData.tmp <- sub("RData",paste0(tempstring,"RData"),beds.RData)
paths.RData.tmp <- sub("RData",paste0(tempstring,"RData"),paths.RData)
kval <- as.numeric(unlist(strsplit(kval,",")))
K <- length(kval)
if (any(is.na(kval))) stop("Some 'kval' values were not numeric!\n")
cores <- as.numeric(cores)
if (is.na(cores)) stop("Cores value '",cores,"' was not numeric!\n")
## Process IP-input pairings
message("Checking IP/input pairings...")
ip.inp <- read.delim(ip.inp, as.is=TRUE)
inp.given <- ip.inp[,2]!=""
inp.given[is.na(inp.given)] <- FALSE
any.inp <- any(inp.given)
dup <- duplicated(ip.inp)
if (any(dup)) {
message(paste(sum(dup),"duplicated IP-input pairings removed!\n"))
ip.inp <- ip.inp[!dup,]
}
if (any.inp) ip.inp[ip.inp[,2]=="",2] <- NA
## READ FINAL PEAKLISTS FOR HEATMAPS
## GENERATE HEATMAP VERSION(S) FOR EACH BED
message("Reading bed files...")
beds1 <- read.delim(bedlist, as.is=TRUE)
if (any(duplicated(beds1[,1]))) stop("Bed file names (column 1 of bed list) are not unique! Halting.\n")
beds1 <- cbind(beds1, FULL=NA)
for (i in 1:nrow(beds1)) beds1$FULL[i] <- system(paste0("bash -c 'readlink -f ",beds1[i,2],"'"),intern=TRUE)
beds <- named.vector(beds1[,2], beds1[,1])
beds <- lapply(lapply(beds, read.bed), '[', , 1:6) # read only first 6 cols // should die if any missing files
sapply(beds, nrow)
cov.mat.beds <- beds
cov.mat.paths <- beds
## Named index vectors for apply-type calls
bedi <- 1:length(beds); names(bedi) <- names(beds)
ki <- 1:K; names(ki) <- paste0("k",kval/1E3) # "5000" named "k5", e.g.
## GET IP AND INPUT BAM PATHS
## MATCH IPS WITH INPUTS
message("Checking bam files...")
source.sample <- function(sample) {
paths <- c()
for (i in 1:D) if (file.exists(paste0(dataroot[i],sample,"/bams/pooled.bam"))) paths <- c(paths,dataroot[i])
if (length(paths)==0) {
stop(paste0("Sample '",sample,"' was NOT found in the following locations: ",paste(dataroot,collapse="', '"),". Halting.\n"))
} else if (length(paths)>1) {
stop(paste0("Sample '",sample,"' was found in multiple locations: '",paste(paths,collapse="', '"),"'. Halting.\n"))
} else {
paths
}
}
get.aligns <- function(x) {
## 'x' is a /path/to/bam
idx <- sub("bam$","idxstats.txt",x)
if (!file.exists(idx)) {
message(paste0("expected idxstats file '",idx,"' not found, creating...\n"))
if (!file.exists(paste0(x,'.bai'))) system(paste("samtools index",x))
system(paste("samtools idxstats",x,">",idx))
}
N <- as.numeric(system(paste0("bash -c 'paste -s -d+ <(cut -f3 ",idx,") | bc'"),intern=TRUE))
ifelse (length(N)==0, NA, N)
}
## IP bams
bams <- sapply(unique(ip.inp$IP), function(x) paste0(source.sample(x),x,"/bams/pooled.bam") )
names(bams) <- unique(ip.inp$IP)
## Input bams
if (any.inp) {
bams.inp <- unique(ip.inp$Input[!is.na(ip.inp$Input)])
bams.inp <- named.vector(sapply(bams.inp, function(x) paste0(source.sample(x),x,"/bams/pooled.bam") ), bams.inp)
} else {
bams.inp <- c()
}
## Name vectors
samples <- names(bams)
if (any.inp) inputs <- names(bams.inp)
## Named index vectors for apply-type calls
bami <- 1:length(bams); names(bami) <- names(bams)
if (any.inp) {
inpbami <- 1:length(bams.inp)
names(inpbami) <- names(bams.inp)
}
## Were BAM filenames successfully reconstructed
ok <- file.exists(bams)
if (any.inp) ok.inp <- file.exists(bams.inp)
## Aligned read counts per BAM (for RPM conversion)
aligns <- named.vector(sapply(bams, get.aligns), names(bams))
if (any.inp) aligns.inp <- named.vector(sapply(bams.inp, get.aligns), names(bams.inp))
## CHECK: are all BAM files found, have known N alignments
message("IP BAM status:")
print(data.frame(ok,aligns,bams))
message("Input BAM status:")
if (any.inp) print(data.frame(ok.inp,aligns.inp,bams.inp))
if (any(is.na(aligns))) stop("Some IP bams could not be counted! Cannot proceed.\n")
if (any.inp) if (any(is.na(aligns.inp))) stop("Some input bams could not be counted! Cannot proceed.\n")
## Just before takeoff (after everything else has been vetted):
## Make heatmap versions of bed files (convert peaks to windows of fixed length; remove any that run off ends of chroms)
message("Everything checks out, now making expanded bed files...")
bed.paths <- new.list(names(bedi))
for (i in 1:length(bedi)) {
cov.mat.beds[[i]] <- cov.mat.paths[[i]] <- new.list(names(ki)) # to start; more added below
orig <- beds[[i]]
nco <- ifelse(ncol(orig)>6,6,ncol(orig))
colnames(orig)[1:nco] <- c("Chr","Start","End","Name","Score","Strand")[1:nco] # prevents breaking if input bed had > 6 cols
for (k in ki) {
message(names(bedi)[i]," ",kval[k])
## Create cov.mat output location
cov.mat.paths[[i]][[k]] <- paste0(outpath,names(beds)[i],"/",names(ki)[k]) # initially; later gets replaced with IP/input list
system(paste0("mkdir -p ",cov.mat.paths[[i]][[k]])) # output cov.mat RData object locations
if (!dir.exists(cov.mat.paths[[i]][[k]])) stop("Failed to create coverage matrix output location '",cov.mat.paths[[i]][[k]],"'!\n")
## Create heatmap bed
bed.paths[[i]][[k]] <- paste0(cov.mat.paths[[i]][[k]],"/",names(beds)[i],".",names(ki)[k],".bed")
cov.mat.beds[[i]][[k]] <- write.heatmap.bed(beds[[i]], bed.paths[[i]][[k]], genome, window=kval[k]*2)
orig <- cbind(orig, cov.mat.beds[[i]][[k]]$OK)
colnames(orig)[ncol(orig)] <- paste0("OK.",names(ki)[k])
cov.mat.beds[[i]][[k]] <- cov.mat.beds[[i]][[k]][cov.mat.beds[[i]][[k]]$OK,1:6] # drop any peaks that were dropped in the output
message("Wrote: ",bed.paths[[i]][[k]])
}
cov.mat.beds[[i]]$original <- orig # original unfiltered bed coords, also has "OK" columns for each K value, indicating if that coord made it to that k-bed file
cov.mat.paths[[i]]$original <- orig
cov.mat.beds[[i]]$input <- beds1$FULL[i] # path to original input bed file
cov.mat.paths[[i]]$input <- beds1$FULL[i]
}
if (!file.exists(beds.RData)) {
save(cov.mat.beds, file=beds.RData)
} else {
## adding to existing collection; ensure additions are valid
beds.merge.ok <- coverage.matrix.RData.validate(cov.mat.beds, beds.RData)
if (beds.merge.ok) {
## merge new data into existing RData file
coverage.matrix.RData.validate(cov.mat.beds, beds.RData, combine=TRUE)
} else {
message("Current 'cov.mat.beds' dataset incompatible with existing! Saving to '",beds.RData.tmp,"'.\n")
save(cov.mat.beds, file=beds.RData.tmp)
}
}
## GENERATE COVERAGE MATRICES
## EACH BED FILE GETS ONE COLUMN PER BAM
library(CoverageView)
message("\nGenerating matrices...")
## Get ordered bam vectors in IP-input table row order
IP.bam <- match(ip.inp$IP, names(bams))
if (any.inp) {
inp.bam <- match(ip.inp$Input, names(bams.inp))
has.inp <- !is.na(inp.bam)
} else {
has.inp <- rep(FALSE,length(IP.bam))
}
## For each bed file, for each k value, for each IP-input pairing (rows in ip.inp), compute one RData object
#cov.mat.paths <- cov.mat.beds
for (i in bedi) {
for (k in ki) {
bed <- bed.paths[[i]][[k]]
out <- cov.mat.paths[[i]][[k]] # save off location value, then replace with IP/input list below
if (any.inp) {
bam.list <- list(IP=bams[IP.bam],input=bams.inp)
} else {
bam.list <- list(IP=bams[IP.bam])
}
cov.mat.paths[[i]][[k]] <- lapply(bam.list, function(x){ x[!is.na(x)]=NA; x }) # initialize as all NA
## generate input matrices FIRST
if (length(bams.inp)>0) {
group <- paste0("Bed: ",names(bedi)[i],", k: ",names(ki)[k],", inputs")
message(group)
cov.mat.paths[[i]][[k]]$input <- coverage.matrix.generate(bams.inp, bed, out, 100, cores, clobber=clobber, skip=TRUE) # writes RData objects
message(group," complete!\n")
}
## generate IP matrices SECOND: requires RData objects from inputs
## IPs with input first
if (sum(has.inp)>0) {
group <- paste0("Bed: ",names(bedi)[i],", k: ",names(ki)[k],", IPs with inputs")
message(group)
cov.mat.paths[[i]][[k]]$IP[ has.inp] <- coverage.matrix.generate(bams[ has.inp], bed, out, 100, cores, cov.mat.paths[[i]][[k]]$input[inp.bam[has.inp]], clobber=clobber, skip=TRUE) # writes RData objects
message(group," complete!\n")
}
## IPs with no input second
if (sum(!has.inp)>0) {
group <- paste0("Bed: ",names(bedi)[i],", k: ",names(ki)[k],", IPs with no inputs")
message(group)
cov.mat.paths[[i]][[k]]$IP[!has.inp] <- coverage.matrix.generate(bams[!has.inp], bed, out, 100, cores, clobber=clobber, skip=TRUE) # writes RData objects
message(group," complete!\n")
}
}
}
if (!file.exists(paths.RData)) {
save(cov.mat.paths, file=paths.RData)
} else {
## adding to existing collection; ensure additions are valid
paths.merge.ok <- coverage.matrix.RData.validate(cov.mat.paths, paths.RData)
if (paths.merge.ok) {
## merge new data into existing RData file
message("Current 'cov.mat.paths' dataset is compatible with the existing one!\n")
coverage.matrix.RData.validate(cov.mat.paths, paths.RData, combine=TRUE)
} else {
message("Current 'cov.mat.paths' dataset incompatible with existing! Saving to '",paths.RData.tmp,"'.\n")
save(cov.mat.paths, file=paths.RData.tmp)
}
}
message(paste0("Matrix creation complete!\nSee: ",paths.RData))
| |
#' Read SDA output into R list
#'
#' \code{load_results} simulate digital gene expression matrix containing count data from given parameters
#'
#'
#' @param results_folder string; path to folder containing estimates, same as the string you passed to '--out' when running SDA
#'
#' @param iteration integer; iteration number of save to load, defaults to maximum
#'
#' @param data_path string; path to location data was saved (using export_data). Used to reload gene and individual names
#'
#' @return A list of matrices
#'
#'
#' @export
#' @import data.table
load_results <- function(results_folder, iteration=NULL, data_path=NULL) {
# get highest ieration if not specified
if (is.null(iteration)){
iteration <- max(gsub("it", "",basename(grep("it[0-9]",list.dirs(gsub("[.]$", "", results_folder)), value=TRUE))))
}
folder <- paste0(results_folder, "/it", iteration)
try(if(!dir.exists(folder)) stop(paste0("Folder '",folder,"' not found.")))
out <- list()
# read each file in folder as item in list
files <- list.files(folder)
for (file in files) {
out[[file]] <- as.matrix(fread(paste0(folder, "/", file)))
}
out1 <- reformat_data(out) # collect X matrices together, same for S, B
out1$n$iterations <- iteration
out1$free_energy <- out$free_energy
out1$miss <- out$miss
# if log saved, extract PIP fraction < 0.5
if (file.exists(paste0(results_folder, "/log.txt"))){
out1$pip_fraction <- fread(cmd=paste0("grep -o '[0]) : [0-9].*[0-9]*' ", results_folder, "/log.txt", sep=" "))[1:iteration]$V3
} else {
message(paste0("Log file containing pip fraction per iteration not found: '", results_folder, "/log.txt'"))
}
# if command.txt saved, extract arguments used
if (file.exists(paste0(results_folder, "/command.txt"))){
command <- readLines(paste0(results_folder, "/command.txt"), warn=F)
command <- strsplit(command, "--")[[1]][-1] # split into argument value string and remove first string (location of sda)
command <- regmatches(command, regexpr(" ", command), invert = TRUE) # split argument name from value
values <- unlist(command)[c(FALSE,TRUE)]
values <- gsub(" $","", values) # remove trailing space
names(values) <- unlist(command)[c(TRUE,FALSE)]
out1$command_arguments <- as.data.table(t(values))
} else {
message(paste0("Command file containing SDA run parameters not found: '", results_folder, "/command.txt'"))
}
# if dimnames saved, add back in
if (is.null(data_path)){
dimnames_file <- paste0(tools::file_path_sans_ext(out1$command_arguments$data),"_dimnames.rds")
}else{
dimnames_file <- paste0(data_path, basename(tools::file_path_sans_ext(out1$command_arguments$data)),"_dimnames.rds")
}
if (file.exists(dimnames_file)){
original_dimnames <- readRDS(dimnames_file)
# add individual names to scores
rownames(out1$scores) <- original_dimnames[[1]]
# add gene names to loadings & pips
for (i in length(out1$loadings)){
colnames(out1$loadings[[i]]) <- original_dimnames[[2]]
colnames(out1$pips[[i]]) <- original_dimnames[[2]]
}
} else {
message(paste0("File containing gene and individual lables not found: '", dimnames_file,"'"))
}
return(out1)
}
reformat_data <- function(out) {
matrix_names <- names(out)
est <- list()
est$scores <- out$A
est$n <- list()
est$n$individuals <- nrow(est$scores) # number individuals
est$n$components <- ncol(est$scores) # number components
est$n$omics <- length(matrix_names[grep("X[0-9]?", matrix_names)]) # num_X_mats
stopifnot(length(matrix_names[grep("S[0-9]?", matrix_names)]) == est$n$omics) # check all pips are loaded
stopifnot(est$n$omics != 0) # at least one loading matrix
est$n$context_matrices <- length(matrix_names[grep("B[0-9]?", matrix_names)]) # number of B matrices
est$loadings <- list()
out$pips <- list()
out$context_scores <- list()
for (d in seq_len(est$n$omics)) {
est$loadings[[d]] <- out[[paste0("X", d)]]
est$pips[[d]] <- out[[paste0("S", d)]]
if(est$n$context_matrices > 0){
est$context_scores[[d]] <- out[[paste0("B", d)]]
}
}
return(est)
}
| /R/load_results.R | permissive | marchinilab/SDAtools | R | false | false | 4,154 | r | #' Read SDA output into R list
#'
#' \code{load_results} simulate digital gene expression matrix containing count data from given parameters
#'
#'
#' @param results_folder string; path to folder containing estimates, same as the string you passed to '--out' when running SDA
#'
#' @param iteration integer; iteration number of save to load, defaults to maximum
#'
#' @param data_path string; path to location data was saved (using export_data). Used to reload gene and individual names
#'
#' @return A list of matrices
#'
#'
#' @export
#' @import data.table
load_results <- function(results_folder, iteration=NULL, data_path=NULL) {
# get highest ieration if not specified
if (is.null(iteration)){
iteration <- max(gsub("it", "",basename(grep("it[0-9]",list.dirs(gsub("[.]$", "", results_folder)), value=TRUE))))
}
folder <- paste0(results_folder, "/it", iteration)
try(if(!dir.exists(folder)) stop(paste0("Folder '",folder,"' not found.")))
out <- list()
# read each file in folder as item in list
files <- list.files(folder)
for (file in files) {
out[[file]] <- as.matrix(fread(paste0(folder, "/", file)))
}
out1 <- reformat_data(out) # collect X matrices together, same for S, B
out1$n$iterations <- iteration
out1$free_energy <- out$free_energy
out1$miss <- out$miss
# if log saved, extract PIP fraction < 0.5
if (file.exists(paste0(results_folder, "/log.txt"))){
out1$pip_fraction <- fread(cmd=paste0("grep -o '[0]) : [0-9].*[0-9]*' ", results_folder, "/log.txt", sep=" "))[1:iteration]$V3
} else {
message(paste0("Log file containing pip fraction per iteration not found: '", results_folder, "/log.txt'"))
}
# if command.txt saved, extract arguments used
if (file.exists(paste0(results_folder, "/command.txt"))){
command <- readLines(paste0(results_folder, "/command.txt"), warn=F)
command <- strsplit(command, "--")[[1]][-1] # split into argument value string and remove first string (location of sda)
command <- regmatches(command, regexpr(" ", command), invert = TRUE) # split argument name from value
values <- unlist(command)[c(FALSE,TRUE)]
values <- gsub(" $","", values) # remove trailing space
names(values) <- unlist(command)[c(TRUE,FALSE)]
out1$command_arguments <- as.data.table(t(values))
} else {
message(paste0("Command file containing SDA run parameters not found: '", results_folder, "/command.txt'"))
}
# if dimnames saved, add back in
if (is.null(data_path)){
dimnames_file <- paste0(tools::file_path_sans_ext(out1$command_arguments$data),"_dimnames.rds")
}else{
dimnames_file <- paste0(data_path, basename(tools::file_path_sans_ext(out1$command_arguments$data)),"_dimnames.rds")
}
if (file.exists(dimnames_file)){
original_dimnames <- readRDS(dimnames_file)
# add individual names to scores
rownames(out1$scores) <- original_dimnames[[1]]
# add gene names to loadings & pips
for (i in length(out1$loadings)){
colnames(out1$loadings[[i]]) <- original_dimnames[[2]]
colnames(out1$pips[[i]]) <- original_dimnames[[2]]
}
} else {
message(paste0("File containing gene and individual lables not found: '", dimnames_file,"'"))
}
return(out1)
}
reformat_data <- function(out) {
matrix_names <- names(out)
est <- list()
est$scores <- out$A
est$n <- list()
est$n$individuals <- nrow(est$scores) # number individuals
est$n$components <- ncol(est$scores) # number components
est$n$omics <- length(matrix_names[grep("X[0-9]?", matrix_names)]) # num_X_mats
stopifnot(length(matrix_names[grep("S[0-9]?", matrix_names)]) == est$n$omics) # check all pips are loaded
stopifnot(est$n$omics != 0) # at least one loading matrix
est$n$context_matrices <- length(matrix_names[grep("B[0-9]?", matrix_names)]) # number of B matrices
est$loadings <- list()
out$pips <- list()
out$context_scores <- list()
for (d in seq_len(est$n$omics)) {
est$loadings[[d]] <- out[[paste0("X", d)]]
est$pips[[d]] <- out[[paste0("S", d)]]
if(est$n$context_matrices > 0){
est$context_scores[[d]] <- out[[paste0("B", d)]]
}
}
return(est)
}
|
rm(list=ls())
source('mainFunctions_sub.R')
#downloaded from:https://static-content.springer.com/esm/art%3A10.1038%2Fs41586-020-2119-x/MediaObjects/41586_2020_2119_MOESM5_ESM.zip
#Unzip and `rename craniofacial EFP *`
UC_all=readRDS(UC_in_matrix_ls_file)
feDMR_dir='../downstream/input/mouse_analysis/FeDMR/'
FeDMR_olap=data.table()
for(ts in names(UC_all)){
feDMR_ts=GRanges()
#read in feDMR regions
for(tsv_in in dir(feDMR_dir,pattern=ts)){
feDMR_ts=c(feDMR_ts, makeGRangesFromDataFrame(fread(paste0(feDMR_dir,tsv_in)),keep.extra.columns=T))
}
feDMR_ts=feDMR_ts[feDMR_ts$tissue_specificity==1]
UC_01=fread(paste0(dir_out_cluster01,ts,'.csv'))
olap_all=findOverlaps(UC_all[[ts]],feDMR_ts)
olap_UC_01=findOverlaps(convert_GR(UC_01$regions),feDMR_ts)
olap_UC_01_MML=findOverlaps(convert_GR(UC_01[region_type=='MML only']$regions),feDMR_ts)
FeDMR_olap=rbind(FeDMR_olap,
data.table(tissue=ts,
total_UC_analyzed=length(UC_all[[ts]]),
total_UC_01=nrow(UC_01),
total_UC_01_MML=sum(UC_01$region_type=='MML only',na.rm = T),
olap_UC_01_MML_FeDMR=length(unique(queryHits(olap_UC_01_MML))),
overlap_total_UC_feDMR=length(unique(queryHits(olap_all))),
overlap_UC_01_feDMR=length(unique(queryHits(olap_UC_01))),
total_FeDMR=length(feDMR_ts),
FeDMR_covered_all_UC=length(unique(subjectHits(olap_all))),
FeDMR_covered_uc_01=length(unique(subjectHits(olap_UC_01))),
FeDMR_covered_uc_01_MML=length(unique(subjectHits(olap_UC_01_MML)))
))
}
FeDMR_olap$proportion_overlap_uc_01=FeDMR_olap$FeDMR_covered_uc_01/FeDMR_olap$FeDMR_covered_all_UC
FeDMR_olap$proportion_overlap_uc_all=FeDMR_olap$FeDMR_covered_all_UC/FeDMR_olap$total_FeDMR
saveRDS(FeDMR_olap,'../downstream/output/mouse_analysis/Ecker_comparison/FeDMR_olap_tissue_specific.rds')
write.csv(FeDMR_olap,'../downstream/output/mouse_analysis/Ecker_comparison/FeDMR_olap_tissue_specific.csv',row.names = F) | /code_not_in_paper/mouse_MML_analysis/FeDMR_overlap.R | no_license | yuqifang94/ASE | R | false | false | 2,262 | r | rm(list=ls())
source('mainFunctions_sub.R')
#downloaded from:https://static-content.springer.com/esm/art%3A10.1038%2Fs41586-020-2119-x/MediaObjects/41586_2020_2119_MOESM5_ESM.zip
#Unzip and `rename craniofacial EFP *`
UC_all=readRDS(UC_in_matrix_ls_file)
feDMR_dir='../downstream/input/mouse_analysis/FeDMR/'
FeDMR_olap=data.table()
for(ts in names(UC_all)){
feDMR_ts=GRanges()
#read in feDMR regions
for(tsv_in in dir(feDMR_dir,pattern=ts)){
feDMR_ts=c(feDMR_ts, makeGRangesFromDataFrame(fread(paste0(feDMR_dir,tsv_in)),keep.extra.columns=T))
}
feDMR_ts=feDMR_ts[feDMR_ts$tissue_specificity==1]
UC_01=fread(paste0(dir_out_cluster01,ts,'.csv'))
olap_all=findOverlaps(UC_all[[ts]],feDMR_ts)
olap_UC_01=findOverlaps(convert_GR(UC_01$regions),feDMR_ts)
olap_UC_01_MML=findOverlaps(convert_GR(UC_01[region_type=='MML only']$regions),feDMR_ts)
FeDMR_olap=rbind(FeDMR_olap,
data.table(tissue=ts,
total_UC_analyzed=length(UC_all[[ts]]),
total_UC_01=nrow(UC_01),
total_UC_01_MML=sum(UC_01$region_type=='MML only',na.rm = T),
olap_UC_01_MML_FeDMR=length(unique(queryHits(olap_UC_01_MML))),
overlap_total_UC_feDMR=length(unique(queryHits(olap_all))),
overlap_UC_01_feDMR=length(unique(queryHits(olap_UC_01))),
total_FeDMR=length(feDMR_ts),
FeDMR_covered_all_UC=length(unique(subjectHits(olap_all))),
FeDMR_covered_uc_01=length(unique(subjectHits(olap_UC_01))),
FeDMR_covered_uc_01_MML=length(unique(subjectHits(olap_UC_01_MML)))
))
}
FeDMR_olap$proportion_overlap_uc_01=FeDMR_olap$FeDMR_covered_uc_01/FeDMR_olap$FeDMR_covered_all_UC
FeDMR_olap$proportion_overlap_uc_all=FeDMR_olap$FeDMR_covered_all_UC/FeDMR_olap$total_FeDMR
saveRDS(FeDMR_olap,'../downstream/output/mouse_analysis/Ecker_comparison/FeDMR_olap_tissue_specific.rds')
write.csv(FeDMR_olap,'../downstream/output/mouse_analysis/Ecker_comparison/FeDMR_olap_tissue_specific.csv',row.names = F) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/install.R
\name{py_install}
\alias{py_install}
\title{Install Python packages}
\usage{
py_install(
packages,
envname = NULL,
method = c("auto", "virtualenv", "conda"),
conda = "auto",
python_version = NULL,
pip = FALSE,
...
)
}
\arguments{
\item{packages}{A vector of Python packages to install.}
\item{envname}{The name, or full path, of the environment in which Python
packages are to be installed. When \code{NULL} (the default), the active
environment as set by the \code{RETICULATE_PYTHON_ENV} variable will be used;
if that is unset, then the \code{r-reticulate} environment will be used.}
\item{method}{Installation method. By default, "auto" automatically finds a
method that will work in the local environment. Change the default to force
a specific installation method. Note that the "virtualenv" method is not
available on Windows.}
\item{conda}{The path to a \code{conda} executable. Use \code{"auto"} to allow \code{reticulate} to
automatically find an appropriate \code{conda} binary. See \strong{Finding Conda} for more details.}
\item{python_version}{The requested Python version. Ignored when attempting
to install with a Python virtual environment.}
\item{pip}{Boolean; use \code{pip} for package installation? This is only relevant
when Conda environments are used, as otherwise packages will be installed
from the Conda repositories.}
\item{...}{Additional arguments passed to \code{\link[=conda_install]{conda_install()}}
or \code{\link[=virtualenv_install]{virtualenv_install()}}.}
}
\description{
Install Python packages into a virtual environment or Conda environment.
}
\details{
On Linux and OS X the "virtualenv" method will be used by default
("conda" will be used if virtualenv isn't available). On Windows, the
"conda" method is always used.
}
\seealso{
\link{conda-tools}, \link{virtualenv-tools}
}
| /man/py_install.Rd | permissive | nb786/reticulate | R | false | true | 1,928 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/install.R
\name{py_install}
\alias{py_install}
\title{Install Python packages}
\usage{
py_install(
packages,
envname = NULL,
method = c("auto", "virtualenv", "conda"),
conda = "auto",
python_version = NULL,
pip = FALSE,
...
)
}
\arguments{
\item{packages}{A vector of Python packages to install.}
\item{envname}{The name, or full path, of the environment in which Python
packages are to be installed. When \code{NULL} (the default), the active
environment as set by the \code{RETICULATE_PYTHON_ENV} variable will be used;
if that is unset, then the \code{r-reticulate} environment will be used.}
\item{method}{Installation method. By default, "auto" automatically finds a
method that will work in the local environment. Change the default to force
a specific installation method. Note that the "virtualenv" method is not
available on Windows.}
\item{conda}{The path to a \code{conda} executable. Use \code{"auto"} to allow \code{reticulate} to
automatically find an appropriate \code{conda} binary. See \strong{Finding Conda} for more details.}
\item{python_version}{The requested Python version. Ignored when attempting
to install with a Python virtual environment.}
\item{pip}{Boolean; use \code{pip} for package installation? This is only relevant
when Conda environments are used, as otherwise packages will be installed
from the Conda repositories.}
\item{...}{Additional arguments passed to \code{\link[=conda_install]{conda_install()}}
or \code{\link[=virtualenv_install]{virtualenv_install()}}.}
}
\description{
Install Python packages into a virtual environment or Conda environment.
}
\details{
On Linux and OS X the "virtualenv" method will be used by default
("conda" will be used if virtualenv isn't available). On Windows, the
"conda" method is always used.
}
\seealso{
\link{conda-tools}, \link{virtualenv-tools}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/hbfm_functions.R
\docType{data}
\name{gene.mat}
\alias{gene.mat}
\title{Example gene expression matrix}
\format{A matrix with 10 rows and 100 columns}
\usage{
gene.mat
}
\description{
An example dataset consisting of gene expression counts for G = 10 genes (rows)
across N = 100 cells (columns)
}
\examples{
data(gene.mat)
rownames(gene.mat)
colnames(gene.mat)
}
\keyword{datasets}
| /man/gene.mat.Rd | no_license | mnsekula/hbfm | R | false | true | 461 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/hbfm_functions.R
\docType{data}
\name{gene.mat}
\alias{gene.mat}
\title{Example gene expression matrix}
\format{A matrix with 10 rows and 100 columns}
\usage{
gene.mat
}
\description{
An example dataset consisting of gene expression counts for G = 10 genes (rows)
across N = 100 cells (columns)
}
\examples{
data(gene.mat)
rownames(gene.mat)
colnames(gene.mat)
}
\keyword{datasets}
|
#' @title Update the inputs when an event occurs
#'
#' @description When an event occurs in the GenEst GUI, the input values may
#' need to be updated. This function contains all of the possible updates
#' based on the event options (or lacks any updates if the event doesn't
#' require any).
#'
#' @param eventName Character name of the event. One of "clear_all",
#' "file_SE", "file_SE_clear", "file_CP", "file_CP_clear", "file_SS",
#' "file_SS_clear", "file_DWP", "file_DWP_clear", "file_CO",
#' "file_CO_clear", "class", "obsSE", "predsSE", "run_SE", "run_SE_clear",
#' "outSEclass", "outSEp", "outSEk", "ltp", "fta", "predsCP", "run_CP",
#' "run_CP_clear", "outCPclass", "outCPdist", "outCPl", "outCPs",
#' "run_M", "run_M_clear", "split_M", "split_M_clear", "transpose_split",
#' "run_g", "run_g_clear", or "outgclass".
#'
#' @param rv Reactive values list for the GenEst GUI.
#'
#' @param input \code{input} list for the GenEst GUI.
#'
#' @param session Environment for the GenEst GUI.
#'
#' @export
#'
update_input <- function(eventName, rv, input, session){
eventOptions <- c("clear_all", "file_SE", "file_SE_clear", "file_CP",
"file_CP_clear", "file_SS", "file_SS_clear", "file_DWP",
"file_DWP_clear", "file_CO", "file_CO_clear", "class",
"obsSE", "predsSE", "run_SE", "run_SE_clear",
"outSEclass", "outSEp", "outSEk", "ltp", "fta", "predsCP",
"run_CP", "run_CP_clear", "outCPclass", "outCPdist",
"outCPl", "outCPs", "run_M", "run_M_clear", "split_M",
"split_M_clear", "transpose_split",
"run_g", "run_g_clear", "outgclass",
"load_RP", "load_RPbat", "load_cleared", "load_PV",
"load_trough", "load_powerTower", "load_mock")
if (missing(eventName) || (eventName %in% eventOptions) == FALSE){
stop("eventName missing or not in list of available eventNames")
}
if(eventName == "clear_all" | grepl("load_", eventName)){
toReset <- c("file_SE", "predsSE", "obsSE", "outSEp", "outSEk",
"outSEclass", "DWPCol", "split_SS", "split_CO",
"modelChoices_SE1", "outgclass","file_CP", "predsCP", "ltp",
"fta", "outCPl", "outCPs", "outCPdist", "outCPclass",
"modelChoices_CP1", "file_SS", "file_DWP", "file_CO", "COdate",
"sizeCol")
lapply(toReset, reset)
# scc <- rv$colNames_size
# if (is.null(scc)){
# scc <- ""
# }
# scs <- rv$sizeCol # why would the sizeCol not be cleared when all the data sets are?
# if (is.null(scc)){
# scs <- ""
# }
updateSelectizeInput(session, "predsSE", choices = "")
updateSelectizeInput(session, "obsSE", choices = "")
# updateSelectizeInput(session, "class", choices = scc, selected = scs)
# NOTE: the commented-out line glued the previous sizes onto the size menu
updateSelectizeInput(session, "class", choices = "")
updateSelectizeInput(session, "modelChoices_SE1", choices = "")
updateSelectizeInput(session, "split_SS", choices = "")
updateSelectizeInput(session, "split_CO", choices = "")
updateSelectizeInput(session, "outSEp", choices = "")
updateSelectizeInput(session, "outSEk", choices = "")
updateSelectizeInput(session, "outSEclass", choices = "")
updateSelectizeInput(session, "outgclass", choices = "")
updateSelectizeInput(session, "predsCP", choices = "")
updateSelectizeInput(session, "ltp", choices = "")
updateSelectizeInput(session, "fta", choices = "")
updateSelectizeInput(session, "modelChoices_CP1", choices = "")
updateSelectizeInput(session, "outCPl", choices = "")
updateSelectizeInput(session, "outCPs", choices = "")
updateSelectizeInput(session, "outCPdist", choices = "")
updateSelectizeInput(session, "outCPclass", choices = "")
updateSelectizeInput(session, "COdate", choices = "")
updateNumericInput(session, "gSearchInterval", value = NULL)
updateNumericInput(session, "gSearchMax", value = NULL)
}
if (eventName == "file_SE"){
updateSelectizeInput(session, "predsSE", choices = rv$colNames_SE_preds)
updateSelectizeInput(session, "obsSE", choices = rv$colNames_SE_obs)
updateSelectizeInput(session, "class", choices = rv$colNames_size,
selected = rv$sizeCol)
updateTabsetPanel(session, "LoadedDataViz", "Searcher Efficiency")
if (rv$nsizeclasses > 1){
updateSelectizeInput(session, "DWPCol", selected = " ")
}
}
if (eventName == "file_SE_clear"){
toReset <- c("file_SE", "predsSE", "obsSE", "outSEp", "outSEk",
"outSEclass", "DWPCol", "split_SS", "split_CO",
"modelChoices_SE1", "outgclass")
lapply(toReset, reset)
scc <- rv$colNames_size
scs <- rv$sizeCol
if (is.null(scc)){
scs <- ""
scc <- ""
}
updateSelectizeInput(session, "predsSE", choices = "")
updateSelectizeInput(session, "obsSE", choices = "")
updateSelectizeInput(session, "class", choices = scc, selected = scs)
updateSelectizeInput(session, "modelChoices_SE1", choices = "")
updateSelectizeInput(session, "split_SS", choices = "")
updateSelectizeInput(session, "split_CO", choices = "")
updateSelectizeInput(session, "outSEp", choices = "")
updateSelectizeInput(session, "outSEk", choices = "")
updateSelectizeInput(session, "outSEclass", choices = "")
updateSelectizeInput(session, "outgclass", choices = "")
updateSelectizeInput(session, "DWPCol", choices = "")
updateTabsetPanel(session, "LoadedDataViz", "Searcher Efficiency")
}
if (eventName == "file_CP"){
updateSelectizeInput(session, "predsCP", choices = rv$colNames_CP_preds)
updateSelectizeInput(session, "ltp", choices = rv$colNames_ltp)
updateSelectizeInput(session, "fta", choices = rv$colNames_fta)
updateSelectizeInput(session, "class", choices = rv$colNames_size,
selected = rv$sizeCol
)
updateTabsetPanel(session, "LoadedDataViz", "Carcass Persistence")
}
if (eventName == "file_CP_clear"){
toReset <- c("file_CP", "predsCP", "ltp", "fta", "outCPl", "outCPs",
"outCPdist", "outCPclass", "modelChoices_CP1",
"split_SS", "split_CO", "outgclass")
lapply(toReset, reset)
scc <- rv$colNames_size
scs <- rv$sizeCol
if (is.null(scc)){
scs <- ""
scc <- ""
}
updateSelectizeInput(session, "predsCP", choices = "")
updateSelectizeInput(session, "ltp", choices = "")
updateSelectizeInput(session, "fta", choices = "")
updateSelectizeInput(session, "class", choices = scc, selected = scs)
updateSelectizeInput(session, "modelChoices_CP1", choices = "")
updateSelectizeInput(session, "outCPl", choices = "")
updateSelectizeInput(session, "outCPs", choices = "")
updateSelectizeInput(session, "outCPdist", choices = "")
updateSelectizeInput(session, "outCPclass", choices = "")
updateSelectizeInput(session, "split_SS", choices = "")
updateSelectizeInput(session, "split_CO", choices = "")
updateSelectizeInput(session, "outgclass", choices = "")
updateTabsetPanel(session, "LoadedDataViz", "Carcass Persistence")
}
if (eventName == "file_SS"){
updateNumericInput(session, "gSearchInterval", value = rv$SS[["I"]])
updateNumericInput(session, "gSearchMax", value = rv$SS[["span"]])
updateSelectizeInput(session, "split_SS", choices = rv$splittable_SS)
updateTabsetPanel(session, "LoadedDataViz", "Search Schedule")
}
if (eventName == "file_SS_clear"){
toReset <- c("file_SS", "gSearchInterval", "gSearchMax",
"split_SS", "split_CO", "outgclass")
lapply(toReset, reset)
updateSelectizeInput(session, "split_SS", choices = "")
updateSelectizeInput(session, "split_CO", choices = "")
updateSelectizeInput(session, "outgclass", choices = "")
updateTabsetPanel(session, "LoadedDataViz", "Search Schedule")
}
if (eventName == "file_DWP"){
updateSelectizeInput(session, "DWPCol", choices = rv$colNames_DWP)
if (length(rv$colNames_DWP) == 1){
updateSelectizeInput(session, "DWPCol", selected = rv$colNames_DWP)
}
if (rv$nsizeclasses > 1){
updateSelectizeInput(session, "DWPCol", selected = rv$colNames_DWP[1])
}
updateTabsetPanel(session, "LoadedDataViz", "Density Weighted Proportion")
}
if (eventName == "file_DWP_clear"){
toReset <- c("file_DWP", "DWPCol", "split_SS", "split_CO")
lapply(toReset, reset)
updateSelectizeInput(session, "DWPCol", choices = "")
updateSelectizeInput(session, "split_SS", choices = "")
updateSelectizeInput(session, "split_CO", choices = "")
updateTabsetPanel(session, "LoadedDataViz", "Density Weighted Proportion")
}
if (eventName == "file_CO"){
updateSelectizeInput(session, "COdate", choices = rv$colNames_COdates)
if (length(rv$colNames_COdates) == 1){
updateSelectizeInput(session, "COdate", choices = rv$colNames_COdates,
selected = rv$colNames_COdates)
}
updateSelectizeInput(session, "class", choices = rv$colNames_size,
selected = rv$sizeCol)
updateTabsetPanel(session, "LoadedDataViz", "Carcass Observations")
}
if (eventName == "file_CO_clear"){
toReset <- c("file_CO", "COdate", "split_SS", "split_CO")
lapply(toReset, reset)
scc <- rv$colNames_size
scs <- rv$sizeCol
if (is.null(scc)){
scs <- ""
scc <- ""
}
updateSelectizeInput(session, "COdate", choices = "")
updateSelectizeInput(session, "split_SS", choices = "")
updateSelectizeInput(session, "split_CO", choices = "")
updateTabsetPanel(session, "LoadedDataViz", "Carcass Observations")
}
if (grepl("load_", eventName)){
updateSelectizeInput(session, "predsSE", choices = rv$colNames_SE_preds)
updateSelectizeInput(session, "obsSE", choices = rv$colNames_SE_obs)
updateSelectizeInput(session, "class", choices = rv$colNames_size,
selected = rv$sizeCol)
updateNumericInput(session, "gSearchInterval", value = rv$SS[["I"]])
updateNumericInput(session, "gSearchMax", value = rv$SS[["span"]])
updateSelectizeInput(session, "predsCP", choices = rv$colNames_CP_preds)
updateSelectizeInput(session, "ltp", choices = rv$colNames_ltp)
updateSelectizeInput(session, "fta", choices = rv$colNames_fta)
updateSelectizeInput(session, "class", choices = rv$colNames_size)
updateSelectizeInput(session, "DWPCol", choices = rv$colNames_DWP)
if (length(rv$colNames_DWP) == 1){
updateSelectizeInput(session, "DWPCol", selected = rv$colNames_DWP)
}
if (rv$nsizeclasses > 1){
updateSelectizeInput(session, "DWPCol", selected = rv$colNames_DWP[1])
}
updateSelectizeInput(session, "COdate", choices = rv$colNames_COdates)
if (length(rv$colNames_COdates) == 1){
updateSelectizeInput(session, "COdate",
choices = rv$colNames_COdates, selected = rv$colNames_COdates
)
}
updateSelectizeInput(session, "class", choices = rv$colNames_size,
selected = rv$sizeCol
)
if (rv$nsizeclasses > 1){
updateSelectizeInput(session, "DWPCol", selected = " ")
}
updateNavbarPage(session, "GenEstApp", selected = "Data Input")
updateTabsetPanel(session, "LoadedDataViz", "Carcass Persistence")
updateTabsetPanel(session, "LoadedDataViz", "Search Schedule")
updateTabsetPanel(session, "LoadedDataViz", "Density Weighted Proportion")
updateTabsetPanel(session, "LoadedDataViz", "Carcass Observations")
updateTabsetPanel(session, "LoadedDataViz", "Searcher Efficiency")
}
if (eventName == "class"){
toReset <- c(
"outCPl", "outCPs", "outCPdist", "outsizeclassCP", "modelChoices_CP1",
"outSEp", "outSEk", "outsizeclassSE", "modelChoices_SE1",
"DWPCol", "split_SS", "split_CO", "outgclass")
lapply(toReset, reset)
updateSelectizeInput(session, "predsSE", choices = rv$colNames_SE_preds,
selected = input$predsSE)
updateSelectizeInput(session, "obsSE", choices = rv$colNames_SE_obs,
selected = input$obsSE)
updateSelectizeInput(session, "ltp", choices = rv$colNames_CP_nosel,
selected = input$ltp)
updateSelectizeInput(session, "fta", choices = rv$colNames_CP_nosel,
selected = input$fta)
updateSelectizeInput(session, "predsCP", choices = rv$colNames_CP_preds,
selected = input$predsCP)
updateSelectizeInput(session, "DWPCol", choices = rv$colNames_DWP,
selected = rv$DWPCol)
updateSelectizeInput(session, "class", choices = rv$colNames_size,
selected = rv$sizeCol)
}
if (eventName == "obsSE"){
updateSelectizeInput(session, "predsSE", choices = rv$colNames_SE_preds,
selected = rv$preds_SE)
updateSelectizeInput(session, "obsSE", choices = rv$colNames_SE_obs,
selected = rv$obsCols_SE)
updateSelectizeInput(session, "class", choices = rv$colNames_size,
selected = rv$sizeCol
)
}
if (eventName == "predsSE"){
updateSelectizeInput(session, "obsSE", choices = rv$colNames_SE_obs,
selected = rv$obsCols_SE)
updateSelectizeInput(session, "predsSE", choices = rv$colNames_SE_preds,
selected = rv$preds_SE)
updateSelectizeInput(session, "class", choices = rv$colNames_size,
selected = rv$sizeCol
)
}
if (eventName == "ltp"){
updateSelectizeInput(session, "fta", choices = rv$colNames_fta,
selected = rv$fta)
updateSelectizeInput(session, "ltp", choices = rv$colNames_ltp,
selected = rv$ltp)
updateSelectizeInput(session, "predsCP", choices = rv$colNames_CP_preds,
selected = rv$preds_CP)
updateSelectizeInput(session, "class", choices = rv$colNames_size,
selected = rv$sizeCol
)
}
if (eventName == "fta"){
updateSelectizeInput(session, "fta", choices = rv$colNames_fta,
selected = rv$fta)
updateSelectizeInput(session, "ltp", choices = rv$colNames_ltp,
selected = rv$ltp)
updateSelectizeInput(session, "predsCP", choices = rv$colNames_CP_preds,
selected = rv$preds_CP)
updateSelectizeInput(session, "class", choices = rv$colNames_size,
selected = rv$sizeCol
)
}
if (eventName == "predsCP"){
updateSelectizeInput(session, "fta", choices = rv$colNames_fta,
selected = rv$fta)
updateSelectizeInput(session, "ltp", choices = rv$colNames_ltp,
selected = rv$ltp)
updateSelectizeInput(session, "predsCP", choices = rv$colNames_CP_preds,
selected = rv$preds_CP)
updateSelectizeInput(session, "class", choices = rv$colNames_size,
selected = rv$sizeCol
)
}
if (eventName == "run_SE"){
updateTabsetPanel(session, "analyses_SE", "Model Comparison")
updateSelectizeInput(session, "outSEp", choices = rv$modNames_SEp)
updateSelectizeInput(session, "outSEk", choices = rv$modNames_SEk)
updateSelectizeInput(session, "outSEclass", choices = rv$sizeclasses)
updateSelectizeInput(session, "DWPCol", choices = rv$colNames_DWP,
selected = rv$DWPCol)
if (length(rv$colNames_DWP) == 1){
updateSelectizeInput(session, "DWPCol", selected = rv$colNames_DWP)
}
reset("outgclass")
updateSelectizeInput(session, "split_SS", choices = "")
updateSelectizeInput(session, "split_CO", choices = "")
updateSelectizeInput(session, "outgclass", choices = "")
}
if (eventName == "run_SE_clear"){
toReset <- c("outSEp", "outSEk", "outsizeclassSE", #DWPCol, # reset DWPCol after run_SE_clear?
"split_SS", "split_CO", "modelChoices_SE1", "outgclass")
lapply(toReset, reset)
updateSelectizeInput(session, "modelChoices_SE1", choices = "")
updateSelectizeInput(session, "split_SS", choices = "")
updateSelectizeInput(session, "split_CO", choices = "")
updateSelectizeInput(session, "outSEp", choices = "")
updateSelectizeInput(session, "outSEk", choices = "")
updateSelectizeInput(session, "outSEclass", choices = "")
updateSelectizeInput(session, "outgclass", choices = "")
}
if (eventName == "outSEclass"){
updateSelectizeInput(session, "outSEp", choices = rv$modNames_SEp)
updateSelectizeInput(session, "outSEk", choices = rv$modNames_SEk)
}
if (eventName == "run_CP"){
toReset <- c("outgclass")#, "gSearchInterval", "gSearchMax")
lapply(toReset, reset)
updateTabsetPanel(session, "analyses_CP", "Model Comparison")
updateSelectizeInput(session, "outCPl", choices = rv$modNames_CPl)
updateSelectizeInput(session, "outCPs", choices = rv$modNames_CPs)
updateSelectizeInput(session, "outCPdist", choices = rv$modNames_CPdist)
updateSelectizeInput(session, "outCPclass", choices = rv$sizeclasses)
updateSelectizeInput(session, "split_SS", choices = "")
updateSelectizeInput(session, "split_CO", choices = "")
updateSelectizeInput(session, "outgclass", choices = "")
}
if (eventName == "run_CP_clear"){
toReset <- c("outCPl", "outCPs", "outCPdist", "outsizeclassCP",
"split_SS", "split_CO", "modelChoices_CP1", "outgclass")
lapply(toReset, reset)
updateSelectizeInput(session, "modelChoices_CP1", choices = "")
updateSelectizeInput(session, "split_SS", choices = "")
updateSelectizeInput(session, "split_CO", choices = "")
updateSelectizeInput(session, "outCPl", choices = "")
updateSelectizeInput(session, "outCPs", choices = "")
updateSelectizeInput(session, "outCPdist", choices = "")
updateSelectizeInput(session, "outCPclass", choices = "")
updateSelectizeInput(session, "outgclass", choices = "")
}
if (eventName == "outCPclass"){
updateSelectizeInput(session, "outCPl", choices = rv$modNames_CPl)
updateSelectizeInput(session, "outCPs", choices = rv$modNames_CPs)
updateSelectizeInput(session, "outCPdist", choices = rv$modNames_CPdist)
}
if (eventName == "run_g"){
updateSelectizeInput(session, "outgclass", choices = rv$sizeclasses_g)
updateTabsetPanel(session, "analyses_g", "Summary")
}
if (eventName == "run_g_clear"){
reset("outgclass")
updateSelectizeInput(session, "outgclass", choices = "")
}
if (eventName == "run_M"){
updateNumericInput(session, "frac", value = rv$frac)
updateSelectizeInput(session, "split_SS", choices = rv$splittable_SS)
updateSelectizeInput(session, "split_CO", choices = rv$colNames_CO)
}
if (eventName == "run_M_clear"){
reset("split_SS")
reset("split_CO")
updateSelectizeInput(session, "split_SS", choices = "")
updateSelectizeInput(session, "split_CO", choices = "")
}
if (eventName == "split_M_clear"){
reset("split_SS")
reset("split_CO")
updateSelectizeInput(session, "split_SS", choices = rv$splittable_SS)
updateSelectizeInput(session, "split_CO", choices = rv$colNames_CO)
}
}
| /R/app_update_input.R | permissive | atredennick/GenEst | R | false | false | 18,883 | r | #' @title Update the inputs when an event occurs
#'
#' @description When an event occurs in the GenEst GUI, the input values may
#' need to be updated. This function contains all of the possible updates
#' based on the event options (or lacks any updates if the event doesn't
#' require any).
#'
#' @param eventName Character name of the event. One of "clear_all",
#' "file_SE", "file_SE_clear", "file_CP", "file_CP_clear", "file_SS",
#' "file_SS_clear", "file_DWP", "file_DWP_clear", "file_CO",
#' "file_CO_clear", "class", "obsSE", "predsSE", "run_SE", "run_SE_clear",
#' "outSEclass", "outSEp", "outSEk", "ltp", "fta", "predsCP", "run_CP",
#' "run_CP_clear", "outCPclass", "outCPdist", "outCPl", "outCPs",
#' "run_M", "run_M_clear", "split_M", "split_M_clear", "transpose_split",
#' "run_g", "run_g_clear", or "outgclass".
#'
#' @param rv Reactive values list for the GenEst GUI.
#'
#' @param input \code{input} list for the GenEst GUI.
#'
#' @param session Environment for the GenEst GUI.
#'
#' @export
#'
update_input <- function(eventName, rv, input, session){
eventOptions <- c("clear_all", "file_SE", "file_SE_clear", "file_CP",
"file_CP_clear", "file_SS", "file_SS_clear", "file_DWP",
"file_DWP_clear", "file_CO", "file_CO_clear", "class",
"obsSE", "predsSE", "run_SE", "run_SE_clear",
"outSEclass", "outSEp", "outSEk", "ltp", "fta", "predsCP",
"run_CP", "run_CP_clear", "outCPclass", "outCPdist",
"outCPl", "outCPs", "run_M", "run_M_clear", "split_M",
"split_M_clear", "transpose_split",
"run_g", "run_g_clear", "outgclass",
"load_RP", "load_RPbat", "load_cleared", "load_PV",
"load_trough", "load_powerTower", "load_mock")
if (missing(eventName) || (eventName %in% eventOptions) == FALSE){
stop("eventName missing or not in list of available eventNames")
}
if(eventName == "clear_all" | grepl("load_", eventName)){
toReset <- c("file_SE", "predsSE", "obsSE", "outSEp", "outSEk",
"outSEclass", "DWPCol", "split_SS", "split_CO",
"modelChoices_SE1", "outgclass","file_CP", "predsCP", "ltp",
"fta", "outCPl", "outCPs", "outCPdist", "outCPclass",
"modelChoices_CP1", "file_SS", "file_DWP", "file_CO", "COdate",
"sizeCol")
lapply(toReset, reset)
# scc <- rv$colNames_size
# if (is.null(scc)){
# scc <- ""
# }
# scs <- rv$sizeCol # why would the sizeCol not be cleared when all the data sets are?
# if (is.null(scc)){
# scs <- ""
# }
updateSelectizeInput(session, "predsSE", choices = "")
updateSelectizeInput(session, "obsSE", choices = "")
# updateSelectizeInput(session, "class", choices = scc, selected = scs)
# NOTE: the commented-out line glued the previous sizes onto the size menu
updateSelectizeInput(session, "class", choices = "")
updateSelectizeInput(session, "modelChoices_SE1", choices = "")
updateSelectizeInput(session, "split_SS", choices = "")
updateSelectizeInput(session, "split_CO", choices = "")
updateSelectizeInput(session, "outSEp", choices = "")
updateSelectizeInput(session, "outSEk", choices = "")
updateSelectizeInput(session, "outSEclass", choices = "")
updateSelectizeInput(session, "outgclass", choices = "")
updateSelectizeInput(session, "predsCP", choices = "")
updateSelectizeInput(session, "ltp", choices = "")
updateSelectizeInput(session, "fta", choices = "")
updateSelectizeInput(session, "modelChoices_CP1", choices = "")
updateSelectizeInput(session, "outCPl", choices = "")
updateSelectizeInput(session, "outCPs", choices = "")
updateSelectizeInput(session, "outCPdist", choices = "")
updateSelectizeInput(session, "outCPclass", choices = "")
updateSelectizeInput(session, "COdate", choices = "")
updateNumericInput(session, "gSearchInterval", value = NULL)
updateNumericInput(session, "gSearchMax", value = NULL)
}
if (eventName == "file_SE"){
updateSelectizeInput(session, "predsSE", choices = rv$colNames_SE_preds)
updateSelectizeInput(session, "obsSE", choices = rv$colNames_SE_obs)
updateSelectizeInput(session, "class", choices = rv$colNames_size,
selected = rv$sizeCol)
updateTabsetPanel(session, "LoadedDataViz", "Searcher Efficiency")
if (rv$nsizeclasses > 1){
updateSelectizeInput(session, "DWPCol", selected = " ")
}
}
if (eventName == "file_SE_clear"){
toReset <- c("file_SE", "predsSE", "obsSE", "outSEp", "outSEk",
"outSEclass", "DWPCol", "split_SS", "split_CO",
"modelChoices_SE1", "outgclass")
lapply(toReset, reset)
scc <- rv$colNames_size
scs <- rv$sizeCol
if (is.null(scc)){
scs <- ""
scc <- ""
}
updateSelectizeInput(session, "predsSE", choices = "")
updateSelectizeInput(session, "obsSE", choices = "")
updateSelectizeInput(session, "class", choices = scc, selected = scs)
updateSelectizeInput(session, "modelChoices_SE1", choices = "")
updateSelectizeInput(session, "split_SS", choices = "")
updateSelectizeInput(session, "split_CO", choices = "")
updateSelectizeInput(session, "outSEp", choices = "")
updateSelectizeInput(session, "outSEk", choices = "")
updateSelectizeInput(session, "outSEclass", choices = "")
updateSelectizeInput(session, "outgclass", choices = "")
updateSelectizeInput(session, "DWPCol", choices = "")
updateTabsetPanel(session, "LoadedDataViz", "Searcher Efficiency")
}
if (eventName == "file_CP"){
updateSelectizeInput(session, "predsCP", choices = rv$colNames_CP_preds)
updateSelectizeInput(session, "ltp", choices = rv$colNames_ltp)
updateSelectizeInput(session, "fta", choices = rv$colNames_fta)
updateSelectizeInput(session, "class", choices = rv$colNames_size,
selected = rv$sizeCol
)
updateTabsetPanel(session, "LoadedDataViz", "Carcass Persistence")
}
if (eventName == "file_CP_clear"){
toReset <- c("file_CP", "predsCP", "ltp", "fta", "outCPl", "outCPs",
"outCPdist", "outCPclass", "modelChoices_CP1",
"split_SS", "split_CO", "outgclass")
lapply(toReset, reset)
scc <- rv$colNames_size
scs <- rv$sizeCol
if (is.null(scc)){
scs <- ""
scc <- ""
}
updateSelectizeInput(session, "predsCP", choices = "")
updateSelectizeInput(session, "ltp", choices = "")
updateSelectizeInput(session, "fta", choices = "")
updateSelectizeInput(session, "class", choices = scc, selected = scs)
updateSelectizeInput(session, "modelChoices_CP1", choices = "")
updateSelectizeInput(session, "outCPl", choices = "")
updateSelectizeInput(session, "outCPs", choices = "")
updateSelectizeInput(session, "outCPdist", choices = "")
updateSelectizeInput(session, "outCPclass", choices = "")
updateSelectizeInput(session, "split_SS", choices = "")
updateSelectizeInput(session, "split_CO", choices = "")
updateSelectizeInput(session, "outgclass", choices = "")
updateTabsetPanel(session, "LoadedDataViz", "Carcass Persistence")
}
if (eventName == "file_SS"){
updateNumericInput(session, "gSearchInterval", value = rv$SS[["I"]])
updateNumericInput(session, "gSearchMax", value = rv$SS[["span"]])
updateSelectizeInput(session, "split_SS", choices = rv$splittable_SS)
updateTabsetPanel(session, "LoadedDataViz", "Search Schedule")
}
if (eventName == "file_SS_clear"){
toReset <- c("file_SS", "gSearchInterval", "gSearchMax",
"split_SS", "split_CO", "outgclass")
lapply(toReset, reset)
updateSelectizeInput(session, "split_SS", choices = "")
updateSelectizeInput(session, "split_CO", choices = "")
updateSelectizeInput(session, "outgclass", choices = "")
updateTabsetPanel(session, "LoadedDataViz", "Search Schedule")
}
if (eventName == "file_DWP"){
updateSelectizeInput(session, "DWPCol", choices = rv$colNames_DWP)
if (length(rv$colNames_DWP) == 1){
updateSelectizeInput(session, "DWPCol", selected = rv$colNames_DWP)
}
if (rv$nsizeclasses > 1){
updateSelectizeInput(session, "DWPCol", selected = rv$colNames_DWP[1])
}
updateTabsetPanel(session, "LoadedDataViz", "Density Weighted Proportion")
}
if (eventName == "file_DWP_clear"){
toReset <- c("file_DWP", "DWPCol", "split_SS", "split_CO")
lapply(toReset, reset)
updateSelectizeInput(session, "DWPCol", choices = "")
updateSelectizeInput(session, "split_SS", choices = "")
updateSelectizeInput(session, "split_CO", choices = "")
updateTabsetPanel(session, "LoadedDataViz", "Density Weighted Proportion")
}
if (eventName == "file_CO"){
updateSelectizeInput(session, "COdate", choices = rv$colNames_COdates)
if (length(rv$colNames_COdates) == 1){
updateSelectizeInput(session, "COdate", choices = rv$colNames_COdates,
selected = rv$colNames_COdates)
}
updateSelectizeInput(session, "class", choices = rv$colNames_size,
selected = rv$sizeCol)
updateTabsetPanel(session, "LoadedDataViz", "Carcass Observations")
}
if (eventName == "file_CO_clear"){
toReset <- c("file_CO", "COdate", "split_SS", "split_CO")
lapply(toReset, reset)
scc <- rv$colNames_size
scs <- rv$sizeCol
if (is.null(scc)){
scs <- ""
scc <- ""
}
updateSelectizeInput(session, "COdate", choices = "")
updateSelectizeInput(session, "split_SS", choices = "")
updateSelectizeInput(session, "split_CO", choices = "")
updateTabsetPanel(session, "LoadedDataViz", "Carcass Observations")
}
if (grepl("load_", eventName)){
updateSelectizeInput(session, "predsSE", choices = rv$colNames_SE_preds)
updateSelectizeInput(session, "obsSE", choices = rv$colNames_SE_obs)
updateSelectizeInput(session, "class", choices = rv$colNames_size,
selected = rv$sizeCol)
updateNumericInput(session, "gSearchInterval", value = rv$SS[["I"]])
updateNumericInput(session, "gSearchMax", value = rv$SS[["span"]])
updateSelectizeInput(session, "predsCP", choices = rv$colNames_CP_preds)
updateSelectizeInput(session, "ltp", choices = rv$colNames_ltp)
updateSelectizeInput(session, "fta", choices = rv$colNames_fta)
updateSelectizeInput(session, "class", choices = rv$colNames_size)
updateSelectizeInput(session, "DWPCol", choices = rv$colNames_DWP)
if (length(rv$colNames_DWP) == 1){
updateSelectizeInput(session, "DWPCol", selected = rv$colNames_DWP)
}
if (rv$nsizeclasses > 1){
updateSelectizeInput(session, "DWPCol", selected = rv$colNames_DWP[1])
}
updateSelectizeInput(session, "COdate", choices = rv$colNames_COdates)
if (length(rv$colNames_COdates) == 1){
updateSelectizeInput(session, "COdate",
choices = rv$colNames_COdates, selected = rv$colNames_COdates
)
}
updateSelectizeInput(session, "class", choices = rv$colNames_size,
selected = rv$sizeCol
)
if (rv$nsizeclasses > 1){
updateSelectizeInput(session, "DWPCol", selected = " ")
}
updateNavbarPage(session, "GenEstApp", selected = "Data Input")
updateTabsetPanel(session, "LoadedDataViz", "Carcass Persistence")
updateTabsetPanel(session, "LoadedDataViz", "Search Schedule")
updateTabsetPanel(session, "LoadedDataViz", "Density Weighted Proportion")
updateTabsetPanel(session, "LoadedDataViz", "Carcass Observations")
updateTabsetPanel(session, "LoadedDataViz", "Searcher Efficiency")
}
if (eventName == "class"){
toReset <- c(
"outCPl", "outCPs", "outCPdist", "outsizeclassCP", "modelChoices_CP1",
"outSEp", "outSEk", "outsizeclassSE", "modelChoices_SE1",
"DWPCol", "split_SS", "split_CO", "outgclass")
lapply(toReset, reset)
updateSelectizeInput(session, "predsSE", choices = rv$colNames_SE_preds,
selected = input$predsSE)
updateSelectizeInput(session, "obsSE", choices = rv$colNames_SE_obs,
selected = input$obsSE)
updateSelectizeInput(session, "ltp", choices = rv$colNames_CP_nosel,
selected = input$ltp)
updateSelectizeInput(session, "fta", choices = rv$colNames_CP_nosel,
selected = input$fta)
updateSelectizeInput(session, "predsCP", choices = rv$colNames_CP_preds,
selected = input$predsCP)
updateSelectizeInput(session, "DWPCol", choices = rv$colNames_DWP,
selected = rv$DWPCol)
updateSelectizeInput(session, "class", choices = rv$colNames_size,
selected = rv$sizeCol)
}
if (eventName == "obsSE"){
updateSelectizeInput(session, "predsSE", choices = rv$colNames_SE_preds,
selected = rv$preds_SE)
updateSelectizeInput(session, "obsSE", choices = rv$colNames_SE_obs,
selected = rv$obsCols_SE)
updateSelectizeInput(session, "class", choices = rv$colNames_size,
selected = rv$sizeCol
)
}
if (eventName == "predsSE"){
updateSelectizeInput(session, "obsSE", choices = rv$colNames_SE_obs,
selected = rv$obsCols_SE)
updateSelectizeInput(session, "predsSE", choices = rv$colNames_SE_preds,
selected = rv$preds_SE)
updateSelectizeInput(session, "class", choices = rv$colNames_size,
selected = rv$sizeCol
)
}
if (eventName == "ltp"){
updateSelectizeInput(session, "fta", choices = rv$colNames_fta,
selected = rv$fta)
updateSelectizeInput(session, "ltp", choices = rv$colNames_ltp,
selected = rv$ltp)
updateSelectizeInput(session, "predsCP", choices = rv$colNames_CP_preds,
selected = rv$preds_CP)
updateSelectizeInput(session, "class", choices = rv$colNames_size,
selected = rv$sizeCol
)
}
if (eventName == "fta"){
updateSelectizeInput(session, "fta", choices = rv$colNames_fta,
selected = rv$fta)
updateSelectizeInput(session, "ltp", choices = rv$colNames_ltp,
selected = rv$ltp)
updateSelectizeInput(session, "predsCP", choices = rv$colNames_CP_preds,
selected = rv$preds_CP)
updateSelectizeInput(session, "class", choices = rv$colNames_size,
selected = rv$sizeCol
)
}
if (eventName == "predsCP"){
updateSelectizeInput(session, "fta", choices = rv$colNames_fta,
selected = rv$fta)
updateSelectizeInput(session, "ltp", choices = rv$colNames_ltp,
selected = rv$ltp)
updateSelectizeInput(session, "predsCP", choices = rv$colNames_CP_preds,
selected = rv$preds_CP)
updateSelectizeInput(session, "class", choices = rv$colNames_size,
selected = rv$sizeCol
)
}
if (eventName == "run_SE"){
updateTabsetPanel(session, "analyses_SE", "Model Comparison")
updateSelectizeInput(session, "outSEp", choices = rv$modNames_SEp)
updateSelectizeInput(session, "outSEk", choices = rv$modNames_SEk)
updateSelectizeInput(session, "outSEclass", choices = rv$sizeclasses)
updateSelectizeInput(session, "DWPCol", choices = rv$colNames_DWP,
selected = rv$DWPCol)
if (length(rv$colNames_DWP) == 1){
updateSelectizeInput(session, "DWPCol", selected = rv$colNames_DWP)
}
reset("outgclass")
updateSelectizeInput(session, "split_SS", choices = "")
updateSelectizeInput(session, "split_CO", choices = "")
updateSelectizeInput(session, "outgclass", choices = "")
}
if (eventName == "run_SE_clear"){
toReset <- c("outSEp", "outSEk", "outsizeclassSE", #DWPCol, # reset DWPCol after run_SE_clear?
"split_SS", "split_CO", "modelChoices_SE1", "outgclass")
lapply(toReset, reset)
updateSelectizeInput(session, "modelChoices_SE1", choices = "")
updateSelectizeInput(session, "split_SS", choices = "")
updateSelectizeInput(session, "split_CO", choices = "")
updateSelectizeInput(session, "outSEp", choices = "")
updateSelectizeInput(session, "outSEk", choices = "")
updateSelectizeInput(session, "outSEclass", choices = "")
updateSelectizeInput(session, "outgclass", choices = "")
}
if (eventName == "outSEclass"){
updateSelectizeInput(session, "outSEp", choices = rv$modNames_SEp)
updateSelectizeInput(session, "outSEk", choices = rv$modNames_SEk)
}
if (eventName == "run_CP"){
toReset <- c("outgclass")#, "gSearchInterval", "gSearchMax")
lapply(toReset, reset)
updateTabsetPanel(session, "analyses_CP", "Model Comparison")
updateSelectizeInput(session, "outCPl", choices = rv$modNames_CPl)
updateSelectizeInput(session, "outCPs", choices = rv$modNames_CPs)
updateSelectizeInput(session, "outCPdist", choices = rv$modNames_CPdist)
updateSelectizeInput(session, "outCPclass", choices = rv$sizeclasses)
updateSelectizeInput(session, "split_SS", choices = "")
updateSelectizeInput(session, "split_CO", choices = "")
updateSelectizeInput(session, "outgclass", choices = "")
}
if (eventName == "run_CP_clear"){
toReset <- c("outCPl", "outCPs", "outCPdist", "outsizeclassCP",
"split_SS", "split_CO", "modelChoices_CP1", "outgclass")
lapply(toReset, reset)
updateSelectizeInput(session, "modelChoices_CP1", choices = "")
updateSelectizeInput(session, "split_SS", choices = "")
updateSelectizeInput(session, "split_CO", choices = "")
updateSelectizeInput(session, "outCPl", choices = "")
updateSelectizeInput(session, "outCPs", choices = "")
updateSelectizeInput(session, "outCPdist", choices = "")
updateSelectizeInput(session, "outCPclass", choices = "")
updateSelectizeInput(session, "outgclass", choices = "")
}
if (eventName == "outCPclass"){
updateSelectizeInput(session, "outCPl", choices = rv$modNames_CPl)
updateSelectizeInput(session, "outCPs", choices = rv$modNames_CPs)
updateSelectizeInput(session, "outCPdist", choices = rv$modNames_CPdist)
}
if (eventName == "run_g"){
updateSelectizeInput(session, "outgclass", choices = rv$sizeclasses_g)
updateTabsetPanel(session, "analyses_g", "Summary")
}
if (eventName == "run_g_clear"){
reset("outgclass")
updateSelectizeInput(session, "outgclass", choices = "")
}
if (eventName == "run_M"){
updateNumericInput(session, "frac", value = rv$frac)
updateSelectizeInput(session, "split_SS", choices = rv$splittable_SS)
updateSelectizeInput(session, "split_CO", choices = rv$colNames_CO)
}
if (eventName == "run_M_clear"){
reset("split_SS")
reset("split_CO")
updateSelectizeInput(session, "split_SS", choices = "")
updateSelectizeInput(session, "split_CO", choices = "")
}
if (eventName == "split_M_clear"){
reset("split_SS")
reset("split_CO")
updateSelectizeInput(session, "split_SS", choices = rv$splittable_SS)
updateSelectizeInput(session, "split_CO", choices = rv$colNames_CO)
}
}
|
#' Test availability of web resources concurrently
#' @description
#' An analogue of \code{file.exists} for URLs.
#' @param url
#' A character vector listing URLs.
#' @param timeout
#' A non-negative number giving the time in seconds to wait for results.
#' @param con_total,con_host
#' Positive integers indicating the maximum number of concurrent
#' HTTP requests, in total and for any given host.
#' @return
#' A logical vector indexing the elements of \code{url}
#' with response status code 200 (success).
#' @noRd
url.exists <- function(url,
timeout = getOption("timeout", 60),
con_total = getOption("con_total", 100L),
con_host = getOption("con_host", 6L)) {
stopifnot(requireNamespace("curl"))
res <- logical(length(url))
names(res) <- url
done <- function(req) {
res[[req[["url"]]]] <<- identical(req[["status_code"]], 200L)
}
handle <- function() {
curl::new_handle(nobody = TRUE)
}
pool <- curl::new_pool(total_con = con_total, host_con = con_host)
for (i in seq_along(url)) {
curl::curl_fetch_multi(url[i], done = done, handle = handle(), pool = pool)
}
curl::multi_run(timeout = timeout, poll = FALSE, pool = pool)
res
}
#' Download web resources concurrently
#' @description
#' A wrapper function using \code{download.file(method = "libcurl")}
#' to download files in batches.
#' @param url
#' A character vector listing URLs.
#' @param destfile
#' A character vector listing paths where downloaded files should be saved.
#' Tilde expansion is performed.
#' @param batchsize
#' A positive integer indicating a maximum number of concurrent downloads.
#' @return
#' An integer vector of length \code{ceiling(length(url) / batchsize)}
#' containing the status code returned by each call to \code{download.file}.
batch.download.file <- function(url, destfile, batchsize = 6L) {
stopifnot(capabilities("libcurl"))
n <- length(url)
if (batchsize >= n) {
return(download.file(url, destfile, method = "libcurl"))
}
index <- seq_len(n)
batch <- as.factor(as.integer(ceiling(index / batchsize)))
l <- split(index, batch)
res <- integer(length(l))
for (i in seq_along(l)) {
res[i] <- download.file(url[l[[i]]] , destfile[l[[i]]], method = "libcurl")
}
res
}
#' Uniform sampling in (longitude, latitude) space
#' @description
#' Samples uniformly from a box in (longitude, latitude) space.
#' Longitude and latitude are assumed to be [0, 360)- and [-90, 90]-valued.
#' @param n Sample size.
#' @param x0,x1,y0,y1 Limits of box.
#' @details
#' \code{x0 < 0}, \code{x1 > 360}, \code{y0 < -90}, and \code{y1 > 90}
#' are all tolerated, as wrapping is implemented in a sensible way.
#' @return
#' \code{cbind(longitude, latitude)}
#' @noRd
runif_ll <- function(n, x0 = 0, x1 = 360, y0 = -90, y1 = 90, ...) {
x <- runif(n, x0, x1) %% 360
y <- runif(n, y0, y1)
k <- abs(y) > 90
x[k] <- (x[k] + 180) %% 360
y[k] <- sign(y[k]) * 180 - y[k]
cbind(x, y, deparse.level = 0L)
}
#' Process Met Office weather data file
#' @description
#' For each of \code{N} cities, sample \code{n} points from a box in
#' (longitude, latitude) space and retrieve the value at each point
#' of a weather variable from a Met Office NetCDF file. Compute from
#' the resulting collection of values the city population-weighted
#' mean of the variable within countries.
#' @param path
#' A character string indicating a path to a NetCDF (usually \code{.nc})
#' file.
#' @param url
#' A character string indicating a URL from which the NetCDF file can be
#' downloaded in the event that a file does not exist at \code{path} or
#' the existing file is corrupt.
#' @param varid
#' A character string indicating a variable to read from the NetCDF file.
#' @param data
#' A data frame with variables \code{country},
#' \code{longitude}, \code{latitude}, and \code{population},
#' supplying those details about \code{N = nrow(data)} cities.
#' @param n
#' A positive integer indicating a sample size.
#' @param scale
#' A positive number indicating a ratio of box side length to NetCDF grid
#' spacing.
#' @return
#' A double vector corresponding elementwise to
#' \code{levels(as.factor(data[["country"]]))},
#' giving the city population-weighted mean of \code{varid} in each country.
#' @noRd
nc_process <- function(path, url = NULL, varid, data, n = 1000L, scale = 1) {
stopifnot(requireNamespace("ncdf4"))
cat("Processing NetCDF file:", path, "...\n")
nc <- tryCatch(
expr = {
ncdf4::nc_open(path)
},
error = function(e) {
if (is.null(url)) {
stop(e)
}
download.file(url, path)
eval(conditionCall(e))
}
)
on.exit(ncdf4::nc_close(nc))
z <- ncdf4::ncvar_get(nc, varid)
x <- c(ncdf4::ncvar_get(nc, "longitude"))
y <- c(ncdf4::ncvar_get(nc, "latitude"))
dx <- 0.5 * scale * (x[2L] - x[1L])
dy <- 0.5 * scale * (y[2L] - y[1L])
## FIXME: memory limits could be an issue when 'n * N' is , see 'help("Memory-limits")'
R <- runif_ll(
n = n * nrow(data),
x0 = data[["longitude"]] - dx,
x1 = data[["longitude"]] + dx,
y0 = data[["latitude"]] - dy,
y1 = data[["latitude"]] + dy
)
x[c(1L, length(x))] <- c(0, 360)
y[c(1L, length(y))] <- c(-90, 90)
i <- .bincode(R[, 1L], breaks = x, right = FALSE, include.lowest = FALSE)
j <- .bincode(R[, 2L], breaks = y, right = FALSE, include.lowest = TRUE)
fxw <- data.frame(
f = data[["country"]],
x = z[cbind(i, j)],
w = data[["population"]]
)
c(by(fxw[c("x", "w")], fxw["f"], function(d) do.call(weighted.mean, d)))
}
#' Process Met Office weather data
#' @description
#' Downloads and processes NetCDF files from a Met Office server
#' and caches output for later reuse.
#' NetCDF files are deleted once output is generated.
#' @param path
#' A character string containing the path to a root directory
#' where downloads are (temporarily) saved and output is cached.
#' @param date
#' A Date vector listing dates for which to download weather data.
#' The Met Office provides data from January 1, 2020.
#' @param varid
#' A character vector listing names of weather variables.
#' @param data,n,scale
#' Arguments to \code{process_nc}.
#' @return
#' \code{NULL}, invisibly.
update_weather <- function(path,
date = seq(as.Date("2020-01-01"), Sys.Date() - 1, 1),
varid = c("temperature", "specific_humidity", "shortwave_radiation", "precipitation", "wind_speed"),
data,
n = 1000L,
scale = 1) {
stopifnot(
is.character(path),
length(path) == 1L,
dir.exists(path),
inherits(date, "Date"),
length(date) > 0L,
!anyNA(date),
is.data.frame(data),
c("country", "longitude", "latitude", "population") %in% names(data),
is.numeric(n),
length(n) == 1L,
n >= 1,
is.numeric(scale),
length(scale) == 1L,
scale > 0
)
ymd <- format(date, "%Y%m%d")
map <- cbind(
metoffice = c("t1o5m", "sh", "sw", "precip", "windspeed"),
netcdf = c("air_temperature", "specific_humidity", "m01s01i202", "precipitation_flux", "wind_speed")
)
rownames(map) <- eval(formals(sys.function())[["varid"]])
varid <- unique(match.arg(varid, rownames(map), several.ok = TRUE))
url <- "https://metdatasa.blob.core.windows.net/covid19-response/metoffice_global_daily"
for (i in seq_along(varid)) {
subdir <- file.path(path, varid[i])
if (!dir.exists(subdir)) {
dir.create(subdir)
}
file <- data.frame(
## Addresses of source files
url = file.path(url, paste0(map[varid[i], "metoffice"], "_mean"), paste0("global_daily_", map[varid[i], "metoffice"], "_mean_", ymd, ".nc"), fsep = "/"),
## Paths to source files
nc = file.path(subdir, paste0(varid[i], "_", ymd, ".nc")),
## Paths to output files
rds = file.path(subdir, paste0(varid[i], "_", ymd, ".rds")),
stringsAsFactors = FALSE
)
## Subset source files that have not already been processed
e <- file.exists(file[["rds"]])
if (all(e)) {
next
}
file <- file[!e, , drop = FALSE]
## Subset source files that are actually available for download
e <- url.exists(file[["url"]])
if (!any(e)) {
next
}
file <- file[e, , drop = FALSE]
## Identify source files already in file system
e <- file.exists(file[["nc"]])
## Download missing source files in batches
if (!all(e)) {
batch.download.file(file[!e, "url"], file[!e, "nc"], batchsize = getOption("con_host", 6L))
}
## FIXME:
## Possible disk space issue since all NetCDF files are temporarily
## but simultaneously stored on disk, at least while there is no cache.
## Fix would involve downloading and processing _in the same loop_.
f <- function(k) {
res <- nc_process(
path = file[k, "nc"],
url = file[k, "url"],
varid = map[varid[i], "netcdf"],
data = data,
n = n,
scale = scale
)
saveRDS(res, file = file[k, "rds"])
file.remove(file[k, "nc"])
}
parallel::mclapply(seq_len(nrow(file)), f)
}
invisible(NULL)
}
.shape_weather <- function(path, date1 = seq(as.Date("2020-01-01"), Sys.Date() - 1, 1)) {
lf <- list.files(path, pattern = "_\\d{8}\\.rds$", full.names = TRUE, recursive = FALSE)
if (length(lf) == 0L) {
return(NULL)
}
l <- lapply(lf, readRDS) # list of named double vectors
date <- as.Date(sub("^.*_(\\d{8})\\.rds$", "\\1", lf), format = "%Y%m%d")
value <- unlist(l, recursive = FALSE, use.names = TRUE)
res <- data.frame(
country = factor(names(value)),
date = rep.int(date, lengths(l, use.names = FALSE)),
value = value,
row.names = NULL
)
m <- match(date1, date, 0L)
i <- m == 0L
if (!any(i)) {
return(res)
}
s <- levels(res[["country"]])
res_fill <- data.frame(
country = gl(length(s), sum(i), labels = s),
date = date1[i],
value = NA_real_,
row.names = NULL
)
rbind(res, res_fill)
}
shape_weather <- function(path, date1 = seq(as.Date("2020-01-01"), Sys.Date() - 1, 1)) {
ld <- list.dirs(path, full.names = TRUE, recursive = FALSE)
l <- lapply(ld, .shape_weather, date1 = date1)
res <- do.call(rbind, l)
res[["varid"]] <- rep.int(gl(length(ld), 1L, labels = basename(ld)), vapply(l, NROW, 0L))
s <- c("country", "varid", "date")
o <- do.call(order, unname(res[s]))
res <- res[o, c(s, "value"), drop = FALSE]
row.names(res) <- NULL
res
}
| /misc/world/data/utils_world_weather.R | no_license | davidearn/epigrowthfit | R | false | false | 10,600 | r | #' Test availability of web resources concurrently
#' @description
#' An analogue of \code{file.exists} for URLs.
#' @param url
#' A character vector listing URLs.
#' @param timeout
#' A non-negative number giving the time in seconds to wait for results.
#' @param con_total,con_host
#' Positive integers indicating the maximum number of concurrent
#' HTTP requests, in total and for any given host.
#' @return
#' A logical vector indexing the elements of \code{url}
#' with response status code 200 (success).
#' @noRd
url.exists <- function(url,
timeout = getOption("timeout", 60),
con_total = getOption("con_total", 100L),
con_host = getOption("con_host", 6L)) {
stopifnot(requireNamespace("curl"))
res <- logical(length(url))
names(res) <- url
done <- function(req) {
res[[req[["url"]]]] <<- identical(req[["status_code"]], 200L)
}
handle <- function() {
curl::new_handle(nobody = TRUE)
}
pool <- curl::new_pool(total_con = con_total, host_con = con_host)
for (i in seq_along(url)) {
curl::curl_fetch_multi(url[i], done = done, handle = handle(), pool = pool)
}
curl::multi_run(timeout = timeout, poll = FALSE, pool = pool)
res
}
#' Download web resources concurrently
#' @description
#' A wrapper function using \code{download.file(method = "libcurl")}
#' to download files in batches.
#' @param url
#' A character vector listing URLs.
#' @param destfile
#' A character vector listing paths where downloaded files should be saved.
#' Tilde expansion is performed.
#' @param batchsize
#' A positive integer indicating a maximum number of concurrent downloads.
#' @return
#' An integer vector of length \code{ceiling(length(url) / batchsize)}
#' containing the status code returned by each call to \code{download.file}.
batch.download.file <- function(url, destfile, batchsize = 6L) {
stopifnot(capabilities("libcurl"))
n <- length(url)
if (batchsize >= n) {
return(download.file(url, destfile, method = "libcurl"))
}
index <- seq_len(n)
batch <- as.factor(as.integer(ceiling(index / batchsize)))
l <- split(index, batch)
res <- integer(length(l))
for (i in seq_along(l)) {
res[i] <- download.file(url[l[[i]]] , destfile[l[[i]]], method = "libcurl")
}
res
}
#' Uniform sampling in (longitude, latitude) space
#' @description
#' Samples uniformly from a box in (longitude, latitude) space.
#' Longitude and latitude are assumed to be [0, 360)- and [-90, 90]-valued.
#' @param n Sample size.
#' @param x0,x1,y0,y1 Limits of box.
#' @details
#' \code{x0 < 0}, \code{x1 > 360}, \code{y0 < -90}, and \code{y1 > 90}
#' are all tolerated, as wrapping is implemented in a sensible way.
#' @return
#' \code{cbind(longitude, latitude)}
#' @noRd
runif_ll <- function(n, x0 = 0, x1 = 360, y0 = -90, y1 = 90, ...) {
x <- runif(n, x0, x1) %% 360
y <- runif(n, y0, y1)
k <- abs(y) > 90
x[k] <- (x[k] + 180) %% 360
y[k] <- sign(y[k]) * 180 - y[k]
cbind(x, y, deparse.level = 0L)
}
#' Process Met Office weather data file
#' @description
#' For each of \code{N} cities, sample \code{n} points from a box in
#' (longitude, latitude) space and retrieve the value at each point
#' of a weather variable from a Met Office NetCDF file. Compute from
#' the resulting collection of values the city population-weighted
#' mean of the variable within countries.
#' @param path
#' A character string indicating a path to a NetCDF (usually \code{.nc})
#' file.
#' @param url
#' A character string indicating a URL from which the NetCDF file can be
#' downloaded in the event that a file does not exist at \code{path} or
#' the existing file is corrupt.
#' @param varid
#' A character string indicating a variable to read from the NetCDF file.
#' @param data
#' A data frame with variables \code{country},
#' \code{longitude}, \code{latitude}, and \code{population},
#' supplying those details about \code{N = nrow(data)} cities.
#' @param n
#' A positive integer indicating a sample size.
#' @param scale
#' A positive number indicating a ratio of box side length to NetCDF grid
#' spacing.
#' @return
#' A double vector corresponding elementwise to
#' \code{levels(as.factor(data[["country"]]))},
#' giving the city population-weighted mean of \code{varid} in each country.
#' @noRd
nc_process <- function(path, url = NULL, varid, data, n = 1000L, scale = 1) {
stopifnot(requireNamespace("ncdf4"))
cat("Processing NetCDF file:", path, "...\n")
nc <- tryCatch(
expr = {
ncdf4::nc_open(path)
},
error = function(e) {
if (is.null(url)) {
stop(e)
}
download.file(url, path)
eval(conditionCall(e))
}
)
on.exit(ncdf4::nc_close(nc))
z <- ncdf4::ncvar_get(nc, varid)
x <- c(ncdf4::ncvar_get(nc, "longitude"))
y <- c(ncdf4::ncvar_get(nc, "latitude"))
dx <- 0.5 * scale * (x[2L] - x[1L])
dy <- 0.5 * scale * (y[2L] - y[1L])
## FIXME: memory limits could be an issue when 'n * N' is , see 'help("Memory-limits")'
R <- runif_ll(
n = n * nrow(data),
x0 = data[["longitude"]] - dx,
x1 = data[["longitude"]] + dx,
y0 = data[["latitude"]] - dy,
y1 = data[["latitude"]] + dy
)
x[c(1L, length(x))] <- c(0, 360)
y[c(1L, length(y))] <- c(-90, 90)
i <- .bincode(R[, 1L], breaks = x, right = FALSE, include.lowest = FALSE)
j <- .bincode(R[, 2L], breaks = y, right = FALSE, include.lowest = TRUE)
fxw <- data.frame(
f = data[["country"]],
x = z[cbind(i, j)],
w = data[["population"]]
)
c(by(fxw[c("x", "w")], fxw["f"], function(d) do.call(weighted.mean, d)))
}
#' Process Met Office weather data
#' @description
#' Downloads and processes NetCDF files from a Met Office server
#' and caches output for later reuse.
#' NetCDF files are deleted once output is generated.
#' @param path
#' A character string containing the path to a root directory
#' where downloads are (temporarily) saved and output is cached.
#' @param date
#' A Date vector listing dates for which to download weather data.
#' The Met Office provides data from January 1, 2020.
#' @param varid
#' A character vector listing names of weather variables.
#' @param data,n,scale
#' Arguments to \code{process_nc}.
#' @return
#' \code{NULL}, invisibly.
update_weather <- function(path,
date = seq(as.Date("2020-01-01"), Sys.Date() - 1, 1),
varid = c("temperature", "specific_humidity", "shortwave_radiation", "precipitation", "wind_speed"),
data,
n = 1000L,
scale = 1) {
stopifnot(
is.character(path),
length(path) == 1L,
dir.exists(path),
inherits(date, "Date"),
length(date) > 0L,
!anyNA(date),
is.data.frame(data),
c("country", "longitude", "latitude", "population") %in% names(data),
is.numeric(n),
length(n) == 1L,
n >= 1,
is.numeric(scale),
length(scale) == 1L,
scale > 0
)
ymd <- format(date, "%Y%m%d")
map <- cbind(
metoffice = c("t1o5m", "sh", "sw", "precip", "windspeed"),
netcdf = c("air_temperature", "specific_humidity", "m01s01i202", "precipitation_flux", "wind_speed")
)
rownames(map) <- eval(formals(sys.function())[["varid"]])
varid <- unique(match.arg(varid, rownames(map), several.ok = TRUE))
url <- "https://metdatasa.blob.core.windows.net/covid19-response/metoffice_global_daily"
for (i in seq_along(varid)) {
subdir <- file.path(path, varid[i])
if (!dir.exists(subdir)) {
dir.create(subdir)
}
file <- data.frame(
## Addresses of source files
url = file.path(url, paste0(map[varid[i], "metoffice"], "_mean"), paste0("global_daily_", map[varid[i], "metoffice"], "_mean_", ymd, ".nc"), fsep = "/"),
## Paths to source files
nc = file.path(subdir, paste0(varid[i], "_", ymd, ".nc")),
## Paths to output files
rds = file.path(subdir, paste0(varid[i], "_", ymd, ".rds")),
stringsAsFactors = FALSE
)
## Subset source files that have not already been processed
e <- file.exists(file[["rds"]])
if (all(e)) {
next
}
file <- file[!e, , drop = FALSE]
## Subset source files that are actually available for download
e <- url.exists(file[["url"]])
if (!any(e)) {
next
}
file <- file[e, , drop = FALSE]
## Identify source files already in file system
e <- file.exists(file[["nc"]])
## Download missing source files in batches
if (!all(e)) {
batch.download.file(file[!e, "url"], file[!e, "nc"], batchsize = getOption("con_host", 6L))
}
## FIXME:
## Possible disk space issue since all NetCDF files are temporarily
## but simultaneously stored on disk, at least while there is no cache.
## Fix would involve downloading and processing _in the same loop_.
f <- function(k) {
res <- nc_process(
path = file[k, "nc"],
url = file[k, "url"],
varid = map[varid[i], "netcdf"],
data = data,
n = n,
scale = scale
)
saveRDS(res, file = file[k, "rds"])
file.remove(file[k, "nc"])
}
parallel::mclapply(seq_len(nrow(file)), f)
}
invisible(NULL)
}
.shape_weather <- function(path, date1 = seq(as.Date("2020-01-01"), Sys.Date() - 1, 1)) {
lf <- list.files(path, pattern = "_\\d{8}\\.rds$", full.names = TRUE, recursive = FALSE)
if (length(lf) == 0L) {
return(NULL)
}
l <- lapply(lf, readRDS) # list of named double vectors
date <- as.Date(sub("^.*_(\\d{8})\\.rds$", "\\1", lf), format = "%Y%m%d")
value <- unlist(l, recursive = FALSE, use.names = TRUE)
res <- data.frame(
country = factor(names(value)),
date = rep.int(date, lengths(l, use.names = FALSE)),
value = value,
row.names = NULL
)
m <- match(date1, date, 0L)
i <- m == 0L
if (!any(i)) {
return(res)
}
s <- levels(res[["country"]])
res_fill <- data.frame(
country = gl(length(s), sum(i), labels = s),
date = date1[i],
value = NA_real_,
row.names = NULL
)
rbind(res, res_fill)
}
shape_weather <- function(path, date1 = seq(as.Date("2020-01-01"), Sys.Date() - 1, 1)) {
ld <- list.dirs(path, full.names = TRUE, recursive = FALSE)
l <- lapply(ld, .shape_weather, date1 = date1)
res <- do.call(rbind, l)
res[["varid"]] <- rep.int(gl(length(ld), 1L, labels = basename(ld)), vapply(l, NROW, 0L))
s <- c("country", "varid", "date")
o <- do.call(order, unname(res[s]))
res <- res[o, c(s, "value"), drop = FALSE]
row.names(res) <- NULL
res
}
|
##' 'cancertypeLandscape' displays the landscape of cancer type component of cell lines with different dependencies of a gene set (signature).
##'
##'
##' @title cancertypeLandscape
##' @param signature.name Names of a signature (format: character)
##' @param signature Gene names of a signature (format: vector)
##' @param cutoff.freq Cutoff for frequency of cancer cell lines for each cancer type, default 10
##' @param cutoff.percentile Cutoff for percentile of cancer cell lines with highest/lowest dependency, default 0.2
##' @return plot
##' @importFrom stats complete.cases
##' @importFrom wesanderson wes_palette
##' @importFrom cowplot plot_grid
##' @importFrom purrr map
##' @importFrom ggrepel geom_label_repel
##' @import data.table ggpubr ggplot2
##' @export
##' @author Xiao Chen
##' @references 1. X Chen, J McGuire, F Zhu, X Xu, Y Li, D Karagiannis, R Dalla-Favera, A Ciccia, J Amengual, C Lu (2020).
##' Harnessing genetic dependency correlation network to reveal chromatin vulnerability in cancer.
##' In preparation.
##' @examples
##' source(system.file("script", "load_libs.R", package = "deplink"))
##' signature.name = "9-1-1"
##' signature = c("RAD9A", "RAD1", "HUS1", "RAD17")
##' cancertypeLandscape(signature.name, signature)
## Main
cancertypeLandscape <- function(signature.name,
signature,
cutoff.freq = 10,
cutoff.percentile = 0.2
) {
# Primary.Disease.freq = table(dep.t.signature.meta.order$disease)
Primary.Disease.freq = table(dep.t.meta$tcga_code)
Primary.Disease.freq.cutoff = Primary.Disease.freq[Primary.Disease.freq >= cutoff.freq]
TCGA.tumor.target = TCGA.tumor[TCGA.tumor$tcga_code %in% names(Primary.Disease.freq.cutoff),,drop=FALSE]
head(TCGA.tumor.target)
dim(TCGA.tumor.target)
# 1047 1
dep.t = dep.t[rownames(dep.t) %in% rownames(TCGA.tumor.target),,drop=FALSE]
head(dep.t)
dim(dep.t)
# 458 18333
dep.t.signature = dep.t[,colnames(dep.t) %in% signature, drop=FALSE]
dep.t.signature$signature.score = rowMeans(dep.t.signature)*(-1)
dep.t.signature = dep.t.signature[order(dep.t.signature$signature.score, decreasing=TRUE),]
head(dep.t.signature)
dim(dep.t.signature)
# 558 5
dep.t.signature.high = dep.t.signature[1:ceiling(nrow(dep.t.signature)*cutoff.percentile),,drop=FALSE]
head(dep.t.signature.high)
dim(dep.t.signature.high)
# 56 12
dep.t.signature.low = dep.t.signature[(nrow(dep.t.signature)-ceiling(nrow(dep.t.signature)*cutoff.percentile) + 1):nrow(dep.t.signature),,drop=FALSE]
head(dep.t.signature.low)
dim(dep.t.signature.low)
# 56 12
# write.csv(dep.t.signature, paste0("dep_", signature.name, "_score.csv"))
# write.csv(dep.t.signature.high, paste0("dep_", signature.name, "_score.high", cutoff.percentile, ".csv"))
# write.csv(dep.t.signature.low, paste0("dep_", signature.name, "_score.low", cutoff.percentile, ".csv"))
#############################################
# dot plot
dep.t.signature.meta = merge(dep.t.signature, meta, by="row.names", all=FALSE)
rownames(dep.t.signature.meta) = dep.t.signature.meta[,1]
dep.t.signature.meta = dep.t.signature.meta[,-1]
head(dep.t.signature.meta)
dim(dep.t.signature.meta)
# 558 20
# write.csv(dep.t.signature.meta, paste0("meta/dep_", signature.name, "_score.meta.csv"))
dep.t.signature.meta = merge(dep.t.signature.meta, TCGA.tumor, by="row.names", all=FALSE)
rownames(dep.t.signature.meta) = dep.t.signature.meta[,1]
dep.t.signature.meta = dep.t.signature.meta[,-1]
head(dep.t.signature.meta)
dim(dep.t.signature.meta)
# 517 21
dep.t.signature.meta.order = dep.t.signature.meta[order(dep.t.signature.meta$signature.score),]
head(dep.t.signature.meta.order)
dim(dep.t.signature.meta.order)
# 517 21
# write.csv(dep.t.signature.meta.order, paste0("meta/dep_", signature.name, "_score_CancerType.TCGA.csv"))
# Primary.Disease.freq = table(dep.t.signature.meta.order$disease)
Primary.Disease.freq = table(dep.t.signature.meta.order$tcga_code)
Primary.Disease.freq.cutoff = Primary.Disease.freq[Primary.Disease.freq > cutoff.freq]
colors = as.list(wes_palette(length(Primary.Disease.freq.cutoff), name = "Darjeeling1", type = "continuous"))
plotlist = list()
for (i in 1:length(Primary.Disease.freq.cutoff)) {
cancer.type = names(Primary.Disease.freq.cutoff)[i]
# dep.t.signature.meta.order.subset = dep.t.signature.meta.order[dep.t.signature.meta.order$disease == cancer.type,,drop=FALSE]
dep.t.signature.meta.order.subset = dep.t.signature.meta.order[dep.t.signature.meta.order$tcga_code == cancer.type,,drop=FALSE]
dep.t.signature.meta.order.subset$order = seq(1:nrow(dep.t.signature.meta.order.subset))
head(dep.t.signature.meta.order.subset)
dim(dep.t.signature.meta.order.subset)
# 558 20
cancer.type.name = gsub(" .+", "", cancer.type)
cancer.type.name = gsub("\\/.+", "", cancer.type.name)
# p = paste0("p", i)
if (i == 1) {
p = ggplot(data = dep.t.signature.meta.order.subset, mapping = aes(x = order, y = signature.score)) +
geom_point(size=0.5, color= unlist(colors)[i])+
# xlim(-1,1) +
ylim(min(dep.t.signature.meta$signature.score),max(dep.t.signature.meta$signature.score)) +
# geom_hline(yintercept = min(dep.t.signature.high$signature.score), linetype="dashed", colour="grey30", size=0.2) +
# geom_hline(yintercept = max(dep.t.signature.low$signature.score), linetype="dashed", colour="grey30", size=0.2) +
labs(x= cancer.type.name, y="Signature score")+
theme_classic() + rremove("legend") +
theme(axis.title.x=element_text(size=9), axis.text.x=element_blank(), axis.ticks.x=element_blank(), axis.title.y=element_text(size=9, color=NA), axis.line.x=element_blank(), axis.text.y=element_text(size=9, color=NA), axis.line.y=element_line(color=NA))
} else {
p = ggplot(data = dep.t.signature.meta.order.subset, mapping = aes(x = order, y = signature.score)) +
geom_point(size=0.5, color= unlist(colors)[i])+
# xlim(-1,1) +
ylim(min(dep.t.signature.meta$signature.score),max(dep.t.signature.meta$signature.score)) +
# geom_hline(yintercept = min(dep.t.signature.high$signature.score), linetype="dashed", colour="grey30", size=0.2) +
# geom_hline(yintercept = max(dep.t.signature.low$signature.score), linetype="dashed", colour="grey30", size=0.2) +
labs(x= cancer.type.name, y="Signature score")+
theme_classic() + rremove("legend") +
theme(axis.title.x=element_text(size=9), axis.text.x=element_blank(), axis.ticks.x=element_blank(), axis.line.x=element_blank(), axis.title.y=element_blank(), axis.ticks.y=element_blank(), axis.text.y=element_blank(), axis.line.y=element_blank())
}
plotlist[[i]] = p
}
# Arranging the plot using cowplot
# paste(as.list(paste0("p", seq(1:length(Primary.Disease.freq.cutoff)))), collapse = ",")
# plotlist = map(paste0("p", seq(1:length(Primary.Disease.freq.cutoff))), get)
p = ggarrange(plotlist = plotlist, ncol = length(Primary.Disease.freq.cutoff), nrow = 1, widths = c(2, rep(1,length(Primary.Disease.freq.cutoff)-1)), heights = c(1,1,1,1,1))
p0 = ggplot(data = dep.t.signature.meta.order.subset, mapping = aes(x = order, y = signature.score)) +
geom_point(size=NA, color= unlist(colors)[i])+
# xlim(-1,1) +
ylim(min(dep.t.signature.meta$signature.score),max(dep.t.signature.meta$signature.score)) +
geom_hline(yintercept = min(dep.t.signature.high$signature.score), linetype="dashed", colour="grey30", size=0.2) +
geom_hline(yintercept = max(dep.t.signature.low$signature.score), linetype="dashed", colour="grey30", size=0.2) +
labs(x= cancer.type.name, y="Signature score")+
theme_classic() + rremove("legend") +
theme(axis.title.x=element_text(size=9, color=NA), axis.text.x=element_blank(), axis.ticks.x=element_blank(), axis.title.y=element_text(size=9), axis.line.x=element_line(), axis.text.y=element_text(size=9), panel.grid.major = element_blank(), panel.grid.minor = element_blank(), panel.background = element_rect(fill = "transparent",colour = NA), plot.background = element_rect(fill = "transparent",colour = NA))
p = suppressWarnings(p + annotation_custom(grob = ggplotGrob(p0)))
return(p)
}
| /R/cancertypeLandscape.R | no_license | Raquelqcm/deplink | R | false | false | 8,648 | r | ##' 'cancertypeLandscape' displays the landscape of cancer type component of cell lines with different dependencies of a gene set (signature).
##'
##'
##' @title cancertypeLandscape
##' @param signature.name Names of a signature (format: character)
##' @param signature Gene names of a signature (format: vector)
##' @param cutoff.freq Cutoff for frequency of cancer cell lines for each cancer type, default 10
##' @param cutoff.percentile Cutoff for percentile of cancer cell lines with highest/lowest dependency, default 0.2
##' @return plot
##' @importFrom stats complete.cases
##' @importFrom wesanderson wes_palette
##' @importFrom cowplot plot_grid
##' @importFrom purrr map
##' @importFrom ggrepel geom_label_repel
##' @import data.table ggpubr ggplot2
##' @export
##' @author Xiao Chen
##' @references 1. X Chen, J McGuire, F Zhu, X Xu, Y Li, D Karagiannis, R Dalla-Favera, A Ciccia, J Amengual, C Lu (2020).
##' Harnessing genetic dependency correlation network to reveal chromatin vulnerability in cancer.
##' In preparation.
##' @examples
##' source(system.file("script", "load_libs.R", package = "deplink"))
##' signature.name = "9-1-1"
##' signature = c("RAD9A", "RAD1", "HUS1", "RAD17")
##' cancertypeLandscape(signature.name, signature)
## Main
cancertypeLandscape <- function(signature.name,
signature,
cutoff.freq = 10,
cutoff.percentile = 0.2
) {
# Primary.Disease.freq = table(dep.t.signature.meta.order$disease)
Primary.Disease.freq = table(dep.t.meta$tcga_code)
Primary.Disease.freq.cutoff = Primary.Disease.freq[Primary.Disease.freq >= cutoff.freq]
TCGA.tumor.target = TCGA.tumor[TCGA.tumor$tcga_code %in% names(Primary.Disease.freq.cutoff),,drop=FALSE]
head(TCGA.tumor.target)
dim(TCGA.tumor.target)
# 1047 1
dep.t = dep.t[rownames(dep.t) %in% rownames(TCGA.tumor.target),,drop=FALSE]
head(dep.t)
dim(dep.t)
# 458 18333
dep.t.signature = dep.t[,colnames(dep.t) %in% signature, drop=FALSE]
dep.t.signature$signature.score = rowMeans(dep.t.signature)*(-1)
dep.t.signature = dep.t.signature[order(dep.t.signature$signature.score, decreasing=TRUE),]
head(dep.t.signature)
dim(dep.t.signature)
# 558 5
dep.t.signature.high = dep.t.signature[1:ceiling(nrow(dep.t.signature)*cutoff.percentile),,drop=FALSE]
head(dep.t.signature.high)
dim(dep.t.signature.high)
# 56 12
dep.t.signature.low = dep.t.signature[(nrow(dep.t.signature)-ceiling(nrow(dep.t.signature)*cutoff.percentile) + 1):nrow(dep.t.signature),,drop=FALSE]
head(dep.t.signature.low)
dim(dep.t.signature.low)
# 56 12
# write.csv(dep.t.signature, paste0("dep_", signature.name, "_score.csv"))
# write.csv(dep.t.signature.high, paste0("dep_", signature.name, "_score.high", cutoff.percentile, ".csv"))
# write.csv(dep.t.signature.low, paste0("dep_", signature.name, "_score.low", cutoff.percentile, ".csv"))
#############################################
# dot plot
dep.t.signature.meta = merge(dep.t.signature, meta, by="row.names", all=FALSE)
rownames(dep.t.signature.meta) = dep.t.signature.meta[,1]
dep.t.signature.meta = dep.t.signature.meta[,-1]
head(dep.t.signature.meta)
dim(dep.t.signature.meta)
# 558 20
# write.csv(dep.t.signature.meta, paste0("meta/dep_", signature.name, "_score.meta.csv"))
dep.t.signature.meta = merge(dep.t.signature.meta, TCGA.tumor, by="row.names", all=FALSE)
rownames(dep.t.signature.meta) = dep.t.signature.meta[,1]
dep.t.signature.meta = dep.t.signature.meta[,-1]
head(dep.t.signature.meta)
dim(dep.t.signature.meta)
# 517 21
dep.t.signature.meta.order = dep.t.signature.meta[order(dep.t.signature.meta$signature.score),]
head(dep.t.signature.meta.order)
dim(dep.t.signature.meta.order)
# 517 21
# write.csv(dep.t.signature.meta.order, paste0("meta/dep_", signature.name, "_score_CancerType.TCGA.csv"))
# Primary.Disease.freq = table(dep.t.signature.meta.order$disease)
Primary.Disease.freq = table(dep.t.signature.meta.order$tcga_code)
Primary.Disease.freq.cutoff = Primary.Disease.freq[Primary.Disease.freq > cutoff.freq]
colors = as.list(wes_palette(length(Primary.Disease.freq.cutoff), name = "Darjeeling1", type = "continuous"))
plotlist = list()
for (i in 1:length(Primary.Disease.freq.cutoff)) {
cancer.type = names(Primary.Disease.freq.cutoff)[i]
# dep.t.signature.meta.order.subset = dep.t.signature.meta.order[dep.t.signature.meta.order$disease == cancer.type,,drop=FALSE]
dep.t.signature.meta.order.subset = dep.t.signature.meta.order[dep.t.signature.meta.order$tcga_code == cancer.type,,drop=FALSE]
dep.t.signature.meta.order.subset$order = seq(1:nrow(dep.t.signature.meta.order.subset))
head(dep.t.signature.meta.order.subset)
dim(dep.t.signature.meta.order.subset)
# 558 20
cancer.type.name = gsub(" .+", "", cancer.type)
cancer.type.name = gsub("\\/.+", "", cancer.type.name)
# p = paste0("p", i)
if (i == 1) {
p = ggplot(data = dep.t.signature.meta.order.subset, mapping = aes(x = order, y = signature.score)) +
geom_point(size=0.5, color= unlist(colors)[i])+
# xlim(-1,1) +
ylim(min(dep.t.signature.meta$signature.score),max(dep.t.signature.meta$signature.score)) +
# geom_hline(yintercept = min(dep.t.signature.high$signature.score), linetype="dashed", colour="grey30", size=0.2) +
# geom_hline(yintercept = max(dep.t.signature.low$signature.score), linetype="dashed", colour="grey30", size=0.2) +
labs(x= cancer.type.name, y="Signature score")+
theme_classic() + rremove("legend") +
theme(axis.title.x=element_text(size=9), axis.text.x=element_blank(), axis.ticks.x=element_blank(), axis.title.y=element_text(size=9, color=NA), axis.line.x=element_blank(), axis.text.y=element_text(size=9, color=NA), axis.line.y=element_line(color=NA))
} else {
p = ggplot(data = dep.t.signature.meta.order.subset, mapping = aes(x = order, y = signature.score)) +
geom_point(size=0.5, color= unlist(colors)[i])+
# xlim(-1,1) +
ylim(min(dep.t.signature.meta$signature.score),max(dep.t.signature.meta$signature.score)) +
# geom_hline(yintercept = min(dep.t.signature.high$signature.score), linetype="dashed", colour="grey30", size=0.2) +
# geom_hline(yintercept = max(dep.t.signature.low$signature.score), linetype="dashed", colour="grey30", size=0.2) +
labs(x= cancer.type.name, y="Signature score")+
theme_classic() + rremove("legend") +
theme(axis.title.x=element_text(size=9), axis.text.x=element_blank(), axis.ticks.x=element_blank(), axis.line.x=element_blank(), axis.title.y=element_blank(), axis.ticks.y=element_blank(), axis.text.y=element_blank(), axis.line.y=element_blank())
}
plotlist[[i]] = p
}
# Arranging the plot using cowplot
# paste(as.list(paste0("p", seq(1:length(Primary.Disease.freq.cutoff)))), collapse = ",")
# plotlist = map(paste0("p", seq(1:length(Primary.Disease.freq.cutoff))), get)
p = ggarrange(plotlist = plotlist, ncol = length(Primary.Disease.freq.cutoff), nrow = 1, widths = c(2, rep(1,length(Primary.Disease.freq.cutoff)-1)), heights = c(1,1,1,1,1))
p0 = ggplot(data = dep.t.signature.meta.order.subset, mapping = aes(x = order, y = signature.score)) +
geom_point(size=NA, color= unlist(colors)[i])+
# xlim(-1,1) +
ylim(min(dep.t.signature.meta$signature.score),max(dep.t.signature.meta$signature.score)) +
geom_hline(yintercept = min(dep.t.signature.high$signature.score), linetype="dashed", colour="grey30", size=0.2) +
geom_hline(yintercept = max(dep.t.signature.low$signature.score), linetype="dashed", colour="grey30", size=0.2) +
labs(x= cancer.type.name, y="Signature score")+
theme_classic() + rremove("legend") +
theme(axis.title.x=element_text(size=9, color=NA), axis.text.x=element_blank(), axis.ticks.x=element_blank(), axis.title.y=element_text(size=9), axis.line.x=element_line(), axis.text.y=element_text(size=9), panel.grid.major = element_blank(), panel.grid.minor = element_blank(), panel.background = element_rect(fill = "transparent",colour = NA), plot.background = element_rect(fill = "transparent",colour = NA))
p = suppressWarnings(p + annotation_custom(grob = ggplotGrob(p0)))
return(p)
}
|
\name{templateCutoff}
\alias{templateCutoff}
\alias{templateCutoff<-}
\title{
Query or Set Template Cutoffs
}
\description{
Use this function to check or change the values of score cutoff in template lists (\code{\linkS4class{corTemplateList}} or \code{\linkS4class{binTemplateList}} objects), scores (\code{\linkS4class{templateScores}} objects), or detections list (\code{\linkS4class{detectionList}} objects).
}
\usage{
templateCutoff(object)
templateCutoff(object) <- value
}
\arguments{
\item{object}{
A binary or correlation template list (class \code{binTemplateList} or \code{corTemplateList}).
}
\item{value}{
A numeric vector with the new score cutoff.
}
}
\details{
\code{templateCutoff} is an accessor function and \code{templateCutoff <- } is a replacement function.\cr
For replacement, the \code{value} object should be as long as the number of templates in \code{object} (or the number selecting via indexing) unless it is a named vector (see Examples).
}
\value{
For extraction, a numeric vector of the same length as \code{object} with score cutoffs.
For replacement, the updated object.
}
\author{
Sasha D. Hafner
}
\seealso{
\code{\link{templateNames}}, \code{\link{templateComment}}
}
\examples{
# Load data
data(btnw)
data(oven)
# Write Wave objects to file (temporary directory used here)
btnw.fp <- file.path(tempdir(), "btnw.wav")
oven.fp <- file.path(tempdir(), "oven.wav")
writeWave(btnw, btnw.fp)
writeWave(oven, oven.fp)
# Create four correlation templates
wct1 <- makeCorTemplate(btnw.fp, name = "w1")
wct2 <- makeCorTemplate(btnw.fp, t.lim = c(1.5, 2.1), frq.lim = c(4.2, 5.6), name = "w2")
oct1 <- makeCorTemplate(oven.fp, t.lim = c(1, 4), frq.lim = c(1, 11), name = "o1")
oct2 <- makeCorTemplate(oven.fp, t.lim = c(1, 4), frq.lim = c(1, 11), dens = 0.1, name = "o2")
# Combine all of them
ctemps <- combineCorTemplates(wct1, wct2, oct1, oct2)
ctemps
# Check cutoffs
templateCutoff(ctemps)
# Change all like this
templateCutoff(ctemps) <- c(0.35, 0.35, 0.35, 0.35)
# or this
templateCutoff(ctemps) <- c(default = 0.35)
# Change select ones like this
templateCutoff(ctemps) <- c(o1 = 0.45, o2 = 0.45)
# or this
templateCutoff(ctemps)[c(3, 4)] <- 0.45
# Could combine these two steps
templateCutoff(ctemps) <- c(default = 0.35, o1 = 0.45, o2 = 0.45)
# Clean up (only because these files were created in these examples)
file.remove(btnw.fp)
file.remove(oven.fp)
}
\keyword{manip}
\keyword{attribute}
| /man/templateCutoff.Rd | no_license | jonkatz2/monitoR | R | false | false | 2,467 | rd | \name{templateCutoff}
\alias{templateCutoff}
\alias{templateCutoff<-}
\title{
Query or Set Template Cutoffs
}
\description{
Use this function to check or change the values of score cutoff in template lists (\code{\linkS4class{corTemplateList}} or \code{\linkS4class{binTemplateList}} objects), scores (\code{\linkS4class{templateScores}} objects), or detections list (\code{\linkS4class{detectionList}} objects).
}
\usage{
templateCutoff(object)
templateCutoff(object) <- value
}
\arguments{
\item{object}{
A binary or correlation template list (class \code{binTemplateList} or \code{corTemplateList}).
}
\item{value}{
A numeric vector with the new score cutoff.
}
}
\details{
\code{templateCutoff} is an accessor function and \code{templateCutoff <- } is a replacement function.\cr
For replacement, the \code{value} object should be as long as the number of templates in \code{object} (or the number selecting via indexing) unless it is a named vector (see Examples).
}
\value{
For extraction, a numeric vector of the same length as \code{object} with score cutoffs.
For replacement, the updated object.
}
\author{
Sasha D. Hafner
}
\seealso{
\code{\link{templateNames}}, \code{\link{templateComment}}
}
\examples{
# Load data
data(btnw)
data(oven)
# Write Wave objects to file (temporary directory used here)
btnw.fp <- file.path(tempdir(), "btnw.wav")
oven.fp <- file.path(tempdir(), "oven.wav")
writeWave(btnw, btnw.fp)
writeWave(oven, oven.fp)
# Create four correlation templates
wct1 <- makeCorTemplate(btnw.fp, name = "w1")
wct2 <- makeCorTemplate(btnw.fp, t.lim = c(1.5, 2.1), frq.lim = c(4.2, 5.6), name = "w2")
oct1 <- makeCorTemplate(oven.fp, t.lim = c(1, 4), frq.lim = c(1, 11), name = "o1")
oct2 <- makeCorTemplate(oven.fp, t.lim = c(1, 4), frq.lim = c(1, 11), dens = 0.1, name = "o2")
# Combine all of them
ctemps <- combineCorTemplates(wct1, wct2, oct1, oct2)
ctemps
# Check cutoffs
templateCutoff(ctemps)
# Change all like this
templateCutoff(ctemps) <- c(0.35, 0.35, 0.35, 0.35)
# or this
templateCutoff(ctemps) <- c(default = 0.35)
# Change select ones like this
templateCutoff(ctemps) <- c(o1 = 0.45, o2 = 0.45)
# or this
templateCutoff(ctemps)[c(3, 4)] <- 0.45
# Could combine these two steps
templateCutoff(ctemps) <- c(default = 0.35, o1 = 0.45, o2 = 0.45)
# Clean up (only because these files were created in these examples)
file.remove(btnw.fp)
file.remove(oven.fp)
}
\keyword{manip}
\keyword{attribute}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/qdo.R
\name{qdo}
\alias{qdo}
\title{Manipulating the output of \code{mqgam}}
\usage{
qdo(obj, qu = NULL, fun = I, ...)
}
\arguments{
\item{obj}{the output of a \code{mqgam} call.}
\item{qu}{A vector whose elements must be in (0, 1). Each element indicates a quantile of interest,
which should be an element of \code{names(obj$fit)}. If left to \code{NULL} the function
\code{fun} will be applied to each of the quantile fits in \code{obj}.}
\item{fun}{The method or function that we want to use on the \code{gamObject} corresponding to quantile \code{qu}. For instance
\code{predict}, \code{plot} or \code{summary}. By default this is the identity function (\code{I}), which
means that the fitted model for quantile \code{qu} is returned.}
\item{...}{Additional arguments to be passed to \code{fun}.}
}
\value{
A list where the i-th entry is the output of \code{fun} (whatever that is) corresponding to quantile \code{qu[i]}.
}
\description{
Contrary to \code{qgam}, \code{mqgam} does not output a standard \code{gamObject}, hence
methods such as \code{predict.gam} or \code{plot.gam} cannot be used directly. \code{qdo}
provides a simple wrapper for such methods.
}
\examples{
library(qgam); library(MASS)
quSeq <- c(0.4, 0.6)
set.seed(737)
fit <- mqgam(accel~s(times, k=20, bs="ad"), data = mcycle, err = 0.05, qu = quSeq,
control = list("tol" = 0.01)) # <- semi-sloppy tolerance to speed-up calibration
qdo(fit, 0.4, summary)
invisible(qdo(fit, 0.4, plot, pages = 1))
# Return the object for qu = 0.6 and then plot it
tmp <- qdo(fit, 0.6)
plot(tmp)
}
\author{
Matteo Fasiolo <matteo.fasiolo@gmail.com>.
}
| /man/qdo.Rd | no_license | davidruegamer/qgam | R | false | true | 1,735 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/qdo.R
\name{qdo}
\alias{qdo}
\title{Manipulating the output of \code{mqgam}}
\usage{
qdo(obj, qu = NULL, fun = I, ...)
}
\arguments{
\item{obj}{the output of a \code{mqgam} call.}
\item{qu}{A vector whose elements must be in (0, 1). Each element indicates a quantile of interest,
which should be an element of \code{names(obj$fit)}. If left to \code{NULL} the function
\code{fun} will be applied to each of the quantile fits in \code{obj}.}
\item{fun}{The method or function that we want to use on the \code{gamObject} corresponding to quantile \code{qu}. For instance
\code{predict}, \code{plot} or \code{summary}. By default this is the identity function (\code{I}), which
means that the fitted model for quantile \code{qu} is returned.}
\item{...}{Additional arguments to be passed to \code{fun}.}
}
\value{
A list where the i-th entry is the output of \code{fun} (whatever that is) corresponding to quantile \code{qu[i]}.
}
\description{
Contrary to \code{qgam}, \code{mqgam} does not output a standard \code{gamObject}, hence
methods such as \code{predict.gam} or \code{plot.gam} cannot be used directly. \code{qdo}
provides a simple wrapper for such methods.
}
\examples{
library(qgam); library(MASS)
quSeq <- c(0.4, 0.6)
set.seed(737)
fit <- mqgam(accel~s(times, k=20, bs="ad"), data = mcycle, err = 0.05, qu = quSeq,
control = list("tol" = 0.01)) # <- semi-sloppy tolerance to speed-up calibration
qdo(fit, 0.4, summary)
invisible(qdo(fit, 0.4, plot, pages = 1))
# Return the object for qu = 0.6 and then plot it
tmp <- qdo(fit, 0.6)
plot(tmp)
}
\author{
Matteo Fasiolo <matteo.fasiolo@gmail.com>.
}
|
#' ---
#' title: "Test Optimal Combination"
#' author: "Kevin Lu"
#' date: '`r format(Sys.Date(), "%B %d, %Y")`'
#' output:
#' html_document:
#' theme: default
#' highlight: tango
#' toc: true
#' toc_float: true
#' number_sections: false
#' fig_width: 8
#' fig_height: 5
#' ---
#' # 1. Source Search Cointegration Combinations
source("./scripts/04-search-cointegration.R", echo = FALSE, print.eval = FALSE)
#' # 2. Prepare Data and Calculate Statistics
#' For each time resolution, prepare the pricing data and test for cointegration for all 98 coin pairs. The date of study is
#' 2017-09-01 to 2017-09-30. This period has exhibited strong mean reversion.
pricing_data_300 <- prepare_data(time_resolution = 300, start_date = "2017-09-01", end_date = "2017-09-30")
pricing_data_900 <- prepare_data(time_resolution = 900, start_date = "2017-09-01", end_date = "2017-09-30")
pricing_data_1800 <- prepare_data(time_resolution = 1800, start_date = "2017-09-01", end_date = "2017-09-30")
pricing_data_7200 <- prepare_data(time_resolution = 7200, start_date = "2017-09-01", end_date = "2017-09-30")
pricing_data_14400 <- prepare_data(time_resolution = 14400, start_date = "2017-09-01", end_date = "2017-09-30")
pricing_data_86400 <- prepare_data(time_resolution = 86400, start_date = "2017-09-01", end_date = "2017-09-30")
coin_pairs_300 <- calculate_statistics(pricing_data = pricing_data_300, coin_pairs = create_coins())
coin_pairs_900 <- calculate_statistics(pricing_data = pricing_data_900, coin_pairs = create_coins())
coin_pairs_1800 <- calculate_statistics(pricing_data = pricing_data_1800, coin_pairs = create_coins())
coin_pairs_7200 <- calculate_statistics(pricing_data = pricing_data_7200, coin_pairs = create_coins())
coin_pairs_14400 <- calculate_statistics(pricing_data = pricing_data_14400, coin_pairs = create_coins())
coin_pairs_86400 <- calculate_statistics(pricing_data = pricing_data_86400, coin_pairs = create_coins())
#' # 3. Plot Time Resolution 300
#' For each time resolution, plot the top 10 coins ranked by the ADF test statistic.
for (i in 1:10) {
coin_y <- coin_pairs_300[["coin_y"]][i]
coin_x <- coin_pairs_300[["coin_x"]][i]
print(str_c("Generating plots for ", coin_y, " and ", coin_x, "."))
plot_coins(df = pricing_data_300,
coin_y = pricing_data_300[[coin_y]],
coin_x = pricing_data_300[[coin_x]])
}
#' # 4. Plot Time Resolution 900
for (i in 1:10) {
coin_y <- coin_pairs_900[["coin_y"]][i]
coin_x <- coin_pairs_900[["coin_x"]][i]
print(str_c("Generating plots for ", coin_y, " and ", coin_x, "."))
plot_coins(df = pricing_data_900,
coin_y = pricing_data_900[[coin_y]],
coin_x = pricing_data_900[[coin_x]])
}
#' # 5. Plot Time Resolution 1800
for (i in 1:10) {
coin_y <- coin_pairs_1800[["coin_y"]][i]
coin_x <- coin_pairs_1800[["coin_x"]][i]
print(str_c("Generating plots for ", coin_y, " and ", coin_x, "."))
plot_coins(df = pricing_data_1800,
coin_y = pricing_data_1800[[coin_y]],
coin_x = pricing_data_1800[[coin_x]])
}
#' # 6. Plot Time Resolution 7200
for (i in 1:10) {
coin_y <- coin_pairs_7200[["coin_y"]][i]
coin_x <- coin_pairs_7200[["coin_x"]][i]
print(str_c("Generating plots for ", coin_y, " and ", coin_x, "."))
plot_coins(df = pricing_data_7200,
coin_y = pricing_data_7200[[coin_y]],
coin_x = pricing_data_7200[[coin_x]])
}
#' # 7. Plot Time Resolution 14400
for (i in 1:10) {
coin_y <- coin_pairs_14400[["coin_y"]][i]
coin_x <- coin_pairs_14400[["coin_x"]][i]
print(str_c("Generating plots for ", coin_y, " and ", coin_x, "."))
plot_coins(df = pricing_data_14400,
coin_y = pricing_data_14400[[coin_y]],
coin_x = pricing_data_14400[[coin_x]])
}
#' # 8. Plot Time Resolution 86400
for (i in 1:10) {
coin_y <- coin_pairs_86400[["coin_y"]][i]
coin_x <- coin_pairs_86400[["coin_x"]][i]
print(str_c("Generating plots for ", coin_y, " and ", coin_x, "."))
plot_coins(df = pricing_data_86400,
coin_y = pricing_data_86400[[coin_y]],
coin_x = pricing_data_86400[[coin_x]])
}
#' # 9. BTC_XEM and BTC_LTC Coin Pair
#' An examination of BTC_XEM and BTC_LTC across time resolutions.
plot_coins(df = pricing_data_300,
coin_y = pricing_data_300[["BTC_XEM"]],
coin_x = pricing_data_300[["BTC_LTC"]])
plot_coins(df = pricing_data_900,
coin_y = pricing_data_900[["BTC_XEM"]],
coin_x = pricing_data_900[["BTC_LTC"]])
plot_coins(df = pricing_data_1800,
coin_y = pricing_data_1800[["BTC_XEM"]],
coin_x = pricing_data_1800[["BTC_LTC"]])
plot_coins(df = pricing_data_7200,
coin_y = pricing_data_7200[["BTC_XEM"]],
coin_x = pricing_data_7200[["BTC_LTC"]])
plot_coins(df = pricing_data_14400,
coin_y = pricing_data_14400[["BTC_XEM"]],
coin_x = pricing_data_14400[["BTC_LTC"]])
plot_coins(df = pricing_data_86400,
coin_y = pricing_data_86400[["BTC_XEM"]],
coin_x = pricing_data_86400[["BTC_LTC"]])
#' # 9. USDT_REP and USDT_BTC Coin Pair
#' An examination of USDT_REP and USDT_BTC across time resolutions.
plot_coins(df = pricing_data_300,
coin_y = pricing_data_300[["USDT_REP"]],
coin_x = pricing_data_300[["USDT_BTC"]])
plot_coins(df = pricing_data_900,
coin_y = pricing_data_900[["USDT_REP"]],
coin_x = pricing_data_900[["USDT_BTC"]])
plot_coins(df = pricing_data_1800,
coin_y = pricing_data_1800[["USDT_REP"]],
coin_x = pricing_data_1800[["USDT_BTC"]])
plot_coins(df = pricing_data_7200,
coin_y = pricing_data_7200[["USDT_REP"]],
coin_x = pricing_data_7200[["USDT_BTC"]])
plot_coins(df = pricing_data_14400,
coin_y = pricing_data_14400[["USDT_REP"]],
coin_x = pricing_data_14400[["USDT_BTC"]])
plot_coins(df = pricing_data_86400,
coin_y = pricing_data_86400[["USDT_REP"]],
coin_x = pricing_data_86400[["USDT_BTC"]])
#' # 10. USDT_XMR and USDT_LTC Coin Pair
#' An examination of USDT_XMR and USDT_LTC across time resolutions.
plot_coins(df = pricing_data_300,
coin_y = pricing_data_300[["USDT_XMR"]],
coin_x = pricing_data_300[["USDT_LTC"]])
plot_coins(df = pricing_data_900,
coin_y = pricing_data_900[["USDT_XMR"]],
coin_x = pricing_data_900[["USDT_LTC"]])
plot_coins(df = pricing_data_1800,
coin_y = pricing_data_1800[["USDT_XMR"]],
coin_x = pricing_data_1800[["USDT_LTC"]])
plot_coins(df = pricing_data_7200,
coin_y = pricing_data_7200[["USDT_XMR"]],
coin_x = pricing_data_7200[["USDT_LTC"]])
plot_coins(df = pricing_data_14400,
coin_y = pricing_data_14400[["USDT_XMR"]],
coin_x = pricing_data_14400[["USDT_LTC"]])
plot_coins(df = pricing_data_86400,
coin_y = pricing_data_86400[["USDT_XMR"]],
coin_x = pricing_data_86400[["USDT_LTC"]])
| /scripts/06-test-cointegration.R | no_license | luyongxu/pairstrading | R | false | false | 7,092 | r | #' ---
#' title: "Test Optimal Combination"
#' author: "Kevin Lu"
#' date: '`r format(Sys.Date(), "%B %d, %Y")`'
#' output:
#' html_document:
#' theme: default
#' highlight: tango
#' toc: true
#' toc_float: true
#' number_sections: false
#' fig_width: 8
#' fig_height: 5
#' ---
#' # 1. Source Search Cointegration Combinations
source("./scripts/04-search-cointegration.R", echo = FALSE, print.eval = FALSE)
#' # 2. Prepare Data and Calculate Statistics
#' For each time resolution, prepare the pricing data and test for cointegration for all 98 coin pairs. The date of study is
#' 2017-09-01 to 2017-09-30. This period has exhibited strong mean reversion.
pricing_data_300 <- prepare_data(time_resolution = 300, start_date = "2017-09-01", end_date = "2017-09-30")
pricing_data_900 <- prepare_data(time_resolution = 900, start_date = "2017-09-01", end_date = "2017-09-30")
pricing_data_1800 <- prepare_data(time_resolution = 1800, start_date = "2017-09-01", end_date = "2017-09-30")
pricing_data_7200 <- prepare_data(time_resolution = 7200, start_date = "2017-09-01", end_date = "2017-09-30")
pricing_data_14400 <- prepare_data(time_resolution = 14400, start_date = "2017-09-01", end_date = "2017-09-30")
pricing_data_86400 <- prepare_data(time_resolution = 86400, start_date = "2017-09-01", end_date = "2017-09-30")
coin_pairs_300 <- calculate_statistics(pricing_data = pricing_data_300, coin_pairs = create_coins())
coin_pairs_900 <- calculate_statistics(pricing_data = pricing_data_900, coin_pairs = create_coins())
coin_pairs_1800 <- calculate_statistics(pricing_data = pricing_data_1800, coin_pairs = create_coins())
coin_pairs_7200 <- calculate_statistics(pricing_data = pricing_data_7200, coin_pairs = create_coins())
coin_pairs_14400 <- calculate_statistics(pricing_data = pricing_data_14400, coin_pairs = create_coins())
coin_pairs_86400 <- calculate_statistics(pricing_data = pricing_data_86400, coin_pairs = create_coins())
#' # 3. Plot Time Resolution 300
#' For each time resolution, plot the top 10 coins ranked by the ADF test statistic.
for (i in 1:10) {
coin_y <- coin_pairs_300[["coin_y"]][i]
coin_x <- coin_pairs_300[["coin_x"]][i]
print(str_c("Generating plots for ", coin_y, " and ", coin_x, "."))
plot_coins(df = pricing_data_300,
coin_y = pricing_data_300[[coin_y]],
coin_x = pricing_data_300[[coin_x]])
}
#' # 4. Plot Time Resolution 900
for (i in 1:10) {
coin_y <- coin_pairs_900[["coin_y"]][i]
coin_x <- coin_pairs_900[["coin_x"]][i]
print(str_c("Generating plots for ", coin_y, " and ", coin_x, "."))
plot_coins(df = pricing_data_900,
coin_y = pricing_data_900[[coin_y]],
coin_x = pricing_data_900[[coin_x]])
}
#' # 5. Plot Time Resolution 1800
for (i in 1:10) {
coin_y <- coin_pairs_1800[["coin_y"]][i]
coin_x <- coin_pairs_1800[["coin_x"]][i]
print(str_c("Generating plots for ", coin_y, " and ", coin_x, "."))
plot_coins(df = pricing_data_1800,
coin_y = pricing_data_1800[[coin_y]],
coin_x = pricing_data_1800[[coin_x]])
}
#' # 6. Plot Time Resolution 7200
for (i in 1:10) {
coin_y <- coin_pairs_7200[["coin_y"]][i]
coin_x <- coin_pairs_7200[["coin_x"]][i]
print(str_c("Generating plots for ", coin_y, " and ", coin_x, "."))
plot_coins(df = pricing_data_7200,
coin_y = pricing_data_7200[[coin_y]],
coin_x = pricing_data_7200[[coin_x]])
}
#' # 7. Plot Time Resolution 14400
for (i in 1:10) {
coin_y <- coin_pairs_14400[["coin_y"]][i]
coin_x <- coin_pairs_14400[["coin_x"]][i]
print(str_c("Generating plots for ", coin_y, " and ", coin_x, "."))
plot_coins(df = pricing_data_14400,
coin_y = pricing_data_14400[[coin_y]],
coin_x = pricing_data_14400[[coin_x]])
}
#' # 8. Plot Time Resolution 86400
for (i in 1:10) {
coin_y <- coin_pairs_86400[["coin_y"]][i]
coin_x <- coin_pairs_86400[["coin_x"]][i]
print(str_c("Generating plots for ", coin_y, " and ", coin_x, "."))
plot_coins(df = pricing_data_86400,
coin_y = pricing_data_86400[[coin_y]],
coin_x = pricing_data_86400[[coin_x]])
}
#' # 9. BTC_XEM and BTC_LTC Coin Pair
#' An examination of BTC_XEM and BTC_LTC across time resolutions.
plot_coins(df = pricing_data_300,
coin_y = pricing_data_300[["BTC_XEM"]],
coin_x = pricing_data_300[["BTC_LTC"]])
plot_coins(df = pricing_data_900,
coin_y = pricing_data_900[["BTC_XEM"]],
coin_x = pricing_data_900[["BTC_LTC"]])
plot_coins(df = pricing_data_1800,
coin_y = pricing_data_1800[["BTC_XEM"]],
coin_x = pricing_data_1800[["BTC_LTC"]])
plot_coins(df = pricing_data_7200,
coin_y = pricing_data_7200[["BTC_XEM"]],
coin_x = pricing_data_7200[["BTC_LTC"]])
plot_coins(df = pricing_data_14400,
coin_y = pricing_data_14400[["BTC_XEM"]],
coin_x = pricing_data_14400[["BTC_LTC"]])
plot_coins(df = pricing_data_86400,
coin_y = pricing_data_86400[["BTC_XEM"]],
coin_x = pricing_data_86400[["BTC_LTC"]])
#' # 9. USDT_REP and USDT_BTC Coin Pair
#' An examination of USDT_REP and USDT_BTC across time resolutions.
plot_coins(df = pricing_data_300,
coin_y = pricing_data_300[["USDT_REP"]],
coin_x = pricing_data_300[["USDT_BTC"]])
plot_coins(df = pricing_data_900,
coin_y = pricing_data_900[["USDT_REP"]],
coin_x = pricing_data_900[["USDT_BTC"]])
plot_coins(df = pricing_data_1800,
coin_y = pricing_data_1800[["USDT_REP"]],
coin_x = pricing_data_1800[["USDT_BTC"]])
plot_coins(df = pricing_data_7200,
coin_y = pricing_data_7200[["USDT_REP"]],
coin_x = pricing_data_7200[["USDT_BTC"]])
plot_coins(df = pricing_data_14400,
coin_y = pricing_data_14400[["USDT_REP"]],
coin_x = pricing_data_14400[["USDT_BTC"]])
plot_coins(df = pricing_data_86400,
coin_y = pricing_data_86400[["USDT_REP"]],
coin_x = pricing_data_86400[["USDT_BTC"]])
#' # 10. USDT_XMR and USDT_LTC Coin Pair
#' An examination of USDT_XMR and USDT_LTC across time resolutions.
plot_coins(df = pricing_data_300,
coin_y = pricing_data_300[["USDT_XMR"]],
coin_x = pricing_data_300[["USDT_LTC"]])
plot_coins(df = pricing_data_900,
coin_y = pricing_data_900[["USDT_XMR"]],
coin_x = pricing_data_900[["USDT_LTC"]])
plot_coins(df = pricing_data_1800,
coin_y = pricing_data_1800[["USDT_XMR"]],
coin_x = pricing_data_1800[["USDT_LTC"]])
plot_coins(df = pricing_data_7200,
coin_y = pricing_data_7200[["USDT_XMR"]],
coin_x = pricing_data_7200[["USDT_LTC"]])
plot_coins(df = pricing_data_14400,
coin_y = pricing_data_14400[["USDT_XMR"]],
coin_x = pricing_data_14400[["USDT_LTC"]])
plot_coins(df = pricing_data_86400,
coin_y = pricing_data_86400[["USDT_XMR"]],
coin_x = pricing_data_86400[["USDT_LTC"]])
|
library(ggplot2)
library(grid)
library(gridExtra)
library(cowplot)
library(ggpubr)
setwd("W:/Evangelyn_Sim/Transcriptome_chromatin_human/Sequencing_ATAC_RNA/20190515_hRNAseq_MF/R/5.gsea/allgenes/")
files = list.files(path = "./USED/", pattern = ".*reactome.xls", full.names = T)
mx = lapply(files, read.delim, header=T)
for(i in 1:length(mx)){
mx[[i]]$GeneSetName = gsub("REACTOME_", "", mx[[i]]$GeneSetName)
mx[[i]]$GeneSetName = gsub("RESPIRATORY_ELECTRON_TRANSPORT_ATP_SYNTHESIS_BY_CHEMIOSMOTIC_COUPLING_AND_HEAT_PRODUCTION_BY_UNCOUPLING_PROTEINS_",
"RESPIRATORY_ELECTRON_TRANSPORT_ATP_SYNTHESIS", mx[[i]]$GeneSetName)
mx[[i]]$GeneSetName = gsub("NEF_MEDIATES_DOWN_MODULATION_OF_CELL_SURFACE_RECEPTORS_BY_RECRUITING_THEM_TO_CLATHRIN_ADAPTERS",
"NEF_MEDIATES_DOWN_MODULATION_OF_CELL_SURFACE_RECEPTORS", mx[[i]]$GeneSetName)
mxRU= mx[[i]]
mxRU= mxRU[order(mxRU$ES, decreasing = T), ]
mxRU= mxRU[c(1:10),]
mxRU= mxRU[order(mxRU$ES), ]
mxRU$colour = "#00AFBB"
mxRU$GeneSetName = factor(mxRU$GeneSetName, levels = mxRU$GeneSetName)
mxRD= mx[[i]]
mxRD= mxRD[order(mxRD$ES), ]
mxRD= mxRD[c(1:10),]
mxRD$colour = "hotpink1"
mxRD$GeneSetName = factor(mxRD$GeneSetName, levels = mxRD$GeneSetName)
ES_all = rbind(mxRD, mxRU)
mx[[i]] = ggplot(ES_all, aes(y=GeneSetName, x=ES))+
geom_point(stat = 'identity', alpha=0.65, shape= 21, color="black", fill=ES_all$colour, aes(size=GeneSetSize))+
scale_size_continuous(range = c(1,3))+
theme_classic()+
labs(title = gsub("./USED/edgeR_RNA_mina12a13y5y6_|.c2.cp.reactome.xls","",files[[i]]), x="Enrichment Score", y="Gene Set Name")+
theme(plot.title = element_text(size = 3))+
theme(axis.text = element_text(size = 3))+
theme(axis.title = element_text(size = 5))+
theme(legend.text = element_text(size = 2))+
theme(legend.title = element_text(size = 2))+
theme(legend.position = "none")
}
multi = arrangeGrob(mx[[3]],mx[[5]],mx[[2]],
mx[[1]],mx[[4]],
ncol = 3, nrow = 2)
plot = as_ggplot(multi)
pdf("ggplot_Reactome_EStop10.pdf", width = 9, height = 5.25)
plot
dev.off()
| /code/ggplot reac batch_top10.R | no_license | bphipson/Human_Development_snRNAseq | R | false | false | 2,248 | r | library(ggplot2)
library(grid)
library(gridExtra)
library(cowplot)
library(ggpubr)
setwd("W:/Evangelyn_Sim/Transcriptome_chromatin_human/Sequencing_ATAC_RNA/20190515_hRNAseq_MF/R/5.gsea/allgenes/")
files = list.files(path = "./USED/", pattern = ".*reactome.xls", full.names = T)
mx = lapply(files, read.delim, header=T)
for(i in 1:length(mx)){
mx[[i]]$GeneSetName = gsub("REACTOME_", "", mx[[i]]$GeneSetName)
mx[[i]]$GeneSetName = gsub("RESPIRATORY_ELECTRON_TRANSPORT_ATP_SYNTHESIS_BY_CHEMIOSMOTIC_COUPLING_AND_HEAT_PRODUCTION_BY_UNCOUPLING_PROTEINS_",
"RESPIRATORY_ELECTRON_TRANSPORT_ATP_SYNTHESIS", mx[[i]]$GeneSetName)
mx[[i]]$GeneSetName = gsub("NEF_MEDIATES_DOWN_MODULATION_OF_CELL_SURFACE_RECEPTORS_BY_RECRUITING_THEM_TO_CLATHRIN_ADAPTERS",
"NEF_MEDIATES_DOWN_MODULATION_OF_CELL_SURFACE_RECEPTORS", mx[[i]]$GeneSetName)
mxRU= mx[[i]]
mxRU= mxRU[order(mxRU$ES, decreasing = T), ]
mxRU= mxRU[c(1:10),]
mxRU= mxRU[order(mxRU$ES), ]
mxRU$colour = "#00AFBB"
mxRU$GeneSetName = factor(mxRU$GeneSetName, levels = mxRU$GeneSetName)
mxRD= mx[[i]]
mxRD= mxRD[order(mxRD$ES), ]
mxRD= mxRD[c(1:10),]
mxRD$colour = "hotpink1"
mxRD$GeneSetName = factor(mxRD$GeneSetName, levels = mxRD$GeneSetName)
ES_all = rbind(mxRD, mxRU)
mx[[i]] = ggplot(ES_all, aes(y=GeneSetName, x=ES))+
geom_point(stat = 'identity', alpha=0.65, shape= 21, color="black", fill=ES_all$colour, aes(size=GeneSetSize))+
scale_size_continuous(range = c(1,3))+
theme_classic()+
labs(title = gsub("./USED/edgeR_RNA_mina12a13y5y6_|.c2.cp.reactome.xls","",files[[i]]), x="Enrichment Score", y="Gene Set Name")+
theme(plot.title = element_text(size = 3))+
theme(axis.text = element_text(size = 3))+
theme(axis.title = element_text(size = 5))+
theme(legend.text = element_text(size = 2))+
theme(legend.title = element_text(size = 2))+
theme(legend.position = "none")
}
multi = arrangeGrob(mx[[3]],mx[[5]],mx[[2]],
mx[[1]],mx[[4]],
ncol = 3, nrow = 2)
plot = as_ggplot(multi)
pdf("ggplot_Reactome_EStop10.pdf", width = 9, height = 5.25)
plot
dev.off()
|
#' Autocorrelation Function
#' @export
#' @param input A tsibble object
#' @param var A character, optional, defines the variables names to calculate the ACF when having a multuple time series object
#' @param max.lag An integer, defines the maximum number of lags to be used
#' @param ci A numeric between 0 and 1, defines the coverage probablility for confidence interval (by default set to 0.95)
#' @param na.rm A boolean, if set to TRUE will ignore missing values
#' @param width A numeric, defines the plot's autocorrelation lines width
#' @param plot A boolean, if set to TRUE will plot the acf results
#' @description The tsACF function calculate the estimated autocorrelation between a series and its past lags
#' @examples
#' data(ny_gas)
#'
#' tsACF(ny_gas)
tsACF <- function(input,
var = NULL,
max.lag = NULL,
ci = 0.95,
na.rm = FALSE,
width = 0.01,
plot = TRUE){
`%>%` <- magrittr::`%>%`
# Error handling
if(!base::is.logical(na.rm)){
stop("The 'na.rm' argument must be boolean")
}
if(!base::is.logical(plot)){
stop("The 'plot' argument must be boolean")
}
if(!tsibble::is.tsibble(input)){
stop("The input object is not a 'tbl_ts' class")
}
if (ci > 1 | ci <= 0) {
warning("The 'ci' value is out of bound (0-1], the default option of 0.95 will be used")
ci <- 0.95
}
if(base::is.null(max.lag)){
max.lag <- base::round(stats::frequency(input) * 2)
if(max.lag > base::nrow(input)){
max.lag <- base::nrow(input)
}
} else if(!base::is.numeric(max.lag) || max.lag %% 1 != 0){
stop("The value of the 'max.lag' argument must be integer")
} else if(max.lag < 1){
stop("The value of the 'max.lag' argument must be greater than 1")
}
if(!base::is.null(var)){
if(!base::all(var %in% base::names(input))){
stop("The variables names on the 'var' argument don't match the variables names of the input object")
} else if(!sapply(input[,var], base::is.numeric) %>% base::all){
stop("At least one of the selected variables are not numeric")
}
} else {
if(!base::any(sapply(input, base::is.numeric))){
stop("The input object doesn't have any numeric variables")
} else {
var <- base::names(input)[base::which(sapply(input, base::is.numeric))]
}
}
output <- base::list()
for(i in base::seq_along(var)){
y_mean <- s <- ci_value <- acf <- p <- NULL
y_mean <- base::mean(input[[var[i]]], na.rm = na.rm)
for(k in 0:max.lag){
s <- c(s,
base::sum((input[[var[i]]][1:(base::nrow(input) - k)] - y_mean) *
(input[[var[i]]][(1 + k):(base::nrow(input))] - y_mean), na.rm = na.rm) /
base::sum((input[[var[i]]] - y_mean)^2, na.rm = na.rm))
}
ci_value <- stats::qnorm((1 + ci)/2)/sqrt(base::nrow(input))
acf <- base::data.frame(lag = 0:max.lag ,
acf = s,
ci_lower = - ci_value,
ci_upper = ci_value)
if(base::length(var) == 1){
p <- plotly::plot_ly(data = acf) %>%
plotly::add_trace(x = ~ lag, y = ~ acf, type = "bar", width = width, showlegend = FALSE,
marker = list(color = "#00526d", line = list(color = "#00526d")), name = "Autocorrelation") %>%
plotly::add_lines(x = ~ lag, y = ~ ci_upper, line = list(dash = "dash", color = "red", width = 1), showlegend = FALSE, name = "CI Upper") %>%
plotly::add_lines(x = ~ lag, y = ~ ci_lower, line = list(dash = "dash", color = "red", width = 1), showlegend = FALSE, name = "CI Lower") %>%
plotly::layout(yaxis = list(title = "ACF"),
xaxis = list(title = "Lag"),
title = base::paste("Autocorrelation - ", var[i], sep = ""),
hovermode = "compare")
} else if(base::length(var) > 1){
p <- plotly::plot_ly(data = acf) %>%
plotly::add_trace(x = ~ lag, y = ~ acf, type = "bar", width = width, showlegend = FALSE,
marker = list(color = "#00526d", line = list(color = "#00526d")), name = "Autocorrelation") %>%
plotly::add_lines(x = ~ lag, y = ~ ci_upper, line = list(dash = "dash", color = "red", width = 1), showlegend = FALSE, name = "CI Upper") %>%
plotly::add_lines(x = ~ lag, y = ~ ci_lower, line = list(dash = "dash", color = "red", width = 1), showlegend = FALSE, name = "CI Lower") %>%
plotly::layout(yaxis = list(title = "ACF"),
xaxis = list(title = "Lag"),
annotations = list(text = var[i],
showarrow = FALSE,
yref = "paper",
yanchor = "bottom",
xanchor = "center",
align = "center",
x = max.lag / 2,
y = 0.9),
hovermode = "compare")
}
output[[var[i]]]$acf <- acf
if(plot){
print(p)
output[[var[i]]]$plot <- p
# base::invisible(output)
} else{
output[[var[i]]]$plot <- p
#base::invisible(output)
}
}
if(plot){
if(base::length(var) > 1){
base::print(plotly::subplot(output[names(output)] %>% purrr::map("plot"), nrows = base::ceiling(base::length(var)),
titleY = TRUE, titleX = TRUE, shareY = TRUE, shareX = TRUE) %>%
plotly::layout(title = "Autocorrelation Plot"))
} else if(base::length(var) == 1){
base::print(output[[var]]$plot)
}
return(base::invisible(output))
} else {
return(output)
}
}
| /R/utility_functions.R | permissive | naftalic/forecastLM | R | false | false | 5,865 | r | #' Autocorrelation Function
#' @export
#' @param input A tsibble object
#' @param var A character, optional, defines the variables names to calculate the ACF when having a multuple time series object
#' @param max.lag An integer, defines the maximum number of lags to be used
#' @param ci A numeric between 0 and 1, defines the coverage probablility for confidence interval (by default set to 0.95)
#' @param na.rm A boolean, if set to TRUE will ignore missing values
#' @param width A numeric, defines the plot's autocorrelation lines width
#' @param plot A boolean, if set to TRUE will plot the acf results
#' @description The tsACF function calculate the estimated autocorrelation between a series and its past lags
#' @examples
#' data(ny_gas)
#'
#' tsACF(ny_gas)
tsACF <- function(input,
var = NULL,
max.lag = NULL,
ci = 0.95,
na.rm = FALSE,
width = 0.01,
plot = TRUE){
`%>%` <- magrittr::`%>%`
# Error handling
if(!base::is.logical(na.rm)){
stop("The 'na.rm' argument must be boolean")
}
if(!base::is.logical(plot)){
stop("The 'plot' argument must be boolean")
}
if(!tsibble::is.tsibble(input)){
stop("The input object is not a 'tbl_ts' class")
}
if (ci > 1 | ci <= 0) {
warning("The 'ci' value is out of bound (0-1], the default option of 0.95 will be used")
ci <- 0.95
}
if(base::is.null(max.lag)){
max.lag <- base::round(stats::frequency(input) * 2)
if(max.lag > base::nrow(input)){
max.lag <- base::nrow(input)
}
} else if(!base::is.numeric(max.lag) || max.lag %% 1 != 0){
stop("The value of the 'max.lag' argument must be integer")
} else if(max.lag < 1){
stop("The value of the 'max.lag' argument must be greater than 1")
}
if(!base::is.null(var)){
if(!base::all(var %in% base::names(input))){
stop("The variables names on the 'var' argument don't match the variables names of the input object")
} else if(!sapply(input[,var], base::is.numeric) %>% base::all){
stop("At least one of the selected variables are not numeric")
}
} else {
if(!base::any(sapply(input, base::is.numeric))){
stop("The input object doesn't have any numeric variables")
} else {
var <- base::names(input)[base::which(sapply(input, base::is.numeric))]
}
}
output <- base::list()
for(i in base::seq_along(var)){
y_mean <- s <- ci_value <- acf <- p <- NULL
y_mean <- base::mean(input[[var[i]]], na.rm = na.rm)
for(k in 0:max.lag){
s <- c(s,
base::sum((input[[var[i]]][1:(base::nrow(input) - k)] - y_mean) *
(input[[var[i]]][(1 + k):(base::nrow(input))] - y_mean), na.rm = na.rm) /
base::sum((input[[var[i]]] - y_mean)^2, na.rm = na.rm))
}
ci_value <- stats::qnorm((1 + ci)/2)/sqrt(base::nrow(input))
acf <- base::data.frame(lag = 0:max.lag ,
acf = s,
ci_lower = - ci_value,
ci_upper = ci_value)
if(base::length(var) == 1){
p <- plotly::plot_ly(data = acf) %>%
plotly::add_trace(x = ~ lag, y = ~ acf, type = "bar", width = width, showlegend = FALSE,
marker = list(color = "#00526d", line = list(color = "#00526d")), name = "Autocorrelation") %>%
plotly::add_lines(x = ~ lag, y = ~ ci_upper, line = list(dash = "dash", color = "red", width = 1), showlegend = FALSE, name = "CI Upper") %>%
plotly::add_lines(x = ~ lag, y = ~ ci_lower, line = list(dash = "dash", color = "red", width = 1), showlegend = FALSE, name = "CI Lower") %>%
plotly::layout(yaxis = list(title = "ACF"),
xaxis = list(title = "Lag"),
title = base::paste("Autocorrelation - ", var[i], sep = ""),
hovermode = "compare")
} else if(base::length(var) > 1){
p <- plotly::plot_ly(data = acf) %>%
plotly::add_trace(x = ~ lag, y = ~ acf, type = "bar", width = width, showlegend = FALSE,
marker = list(color = "#00526d", line = list(color = "#00526d")), name = "Autocorrelation") %>%
plotly::add_lines(x = ~ lag, y = ~ ci_upper, line = list(dash = "dash", color = "red", width = 1), showlegend = FALSE, name = "CI Upper") %>%
plotly::add_lines(x = ~ lag, y = ~ ci_lower, line = list(dash = "dash", color = "red", width = 1), showlegend = FALSE, name = "CI Lower") %>%
plotly::layout(yaxis = list(title = "ACF"),
xaxis = list(title = "Lag"),
annotations = list(text = var[i],
showarrow = FALSE,
yref = "paper",
yanchor = "bottom",
xanchor = "center",
align = "center",
x = max.lag / 2,
y = 0.9),
hovermode = "compare")
}
output[[var[i]]]$acf <- acf
if(plot){
print(p)
output[[var[i]]]$plot <- p
# base::invisible(output)
} else{
output[[var[i]]]$plot <- p
#base::invisible(output)
}
}
if(plot){
if(base::length(var) > 1){
base::print(plotly::subplot(output[names(output)] %>% purrr::map("plot"), nrows = base::ceiling(base::length(var)),
titleY = TRUE, titleX = TRUE, shareY = TRUE, shareX = TRUE) %>%
plotly::layout(title = "Autocorrelation Plot"))
} else if(base::length(var) == 1){
base::print(output[[var]]$plot)
}
return(base::invisible(output))
} else {
return(output)
}
}
|
svc <- paws::iotsitewise()
test_that("describe_default_encryption_configuration", {
expect_error(svc$describe_default_encryption_configuration(), NA)
})
test_that("describe_logging_options", {
expect_error(svc$describe_logging_options(), NA)
})
test_that("list_access_policies", {
expect_error(svc$list_access_policies(), NA)
})
test_that("list_asset_models", {
expect_error(svc$list_asset_models(), NA)
})
test_that("list_assets", {
expect_error(svc$list_assets(), NA)
})
test_that("list_gateways", {
expect_error(svc$list_gateways(), NA)
})
test_that("list_portals", {
expect_error(svc$list_portals(), NA)
})
| /paws/tests/testthat/test_iotsitewise.R | permissive | TWarczak/paws | R | false | false | 632 | r | svc <- paws::iotsitewise()
test_that("describe_default_encryption_configuration", {
expect_error(svc$describe_default_encryption_configuration(), NA)
})
test_that("describe_logging_options", {
expect_error(svc$describe_logging_options(), NA)
})
test_that("list_access_policies", {
expect_error(svc$list_access_policies(), NA)
})
test_that("list_asset_models", {
expect_error(svc$list_asset_models(), NA)
})
test_that("list_assets", {
expect_error(svc$list_assets(), NA)
})
test_that("list_gateways", {
expect_error(svc$list_gateways(), NA)
})
test_that("list_portals", {
expect_error(svc$list_portals(), NA)
})
|
\name{StatComp20008-package}
\alias{StatComp20008-package}
\alias{StatComp20008}
\docType{package}
\title{
Estimate Bart Simpson density by histogram and compare different bins.
}
\description{
Estimate Bart Simpson density f(x)=1/2 phi(x;0,1)+1/10 sum_{j=0}^4 phi(x;j/2-1,1/10) by histogram and naive density estimator using different bandwidth.
}
\details{
Function BS(x) draw ture density of Bart Simpson density function and function HBS(n,h) uses n random points generated from BS density with bandwidth of h. NB(x,y,h) uses naive estimator with x being estimated points and y being samples generated from BS density,h being bandwidth.
}
\author{
Yangshuhua.
Maintainer: Yangshuhua <shuashua0608@mail.ustc.edu.cn>
}
\references{
https://www.bb.ustc.edu.cn/bbcswebdav/pid-66546-dt-content-rid-7067954_1/courses/STAT6125P.01.2020FA/Lec7.pdf
----Nonparametric Statistics
}
\keyword{ package }
| /man/StatComp20008-package.Rd | permissive | shuashua0608/StatComp20008 | R | false | false | 928 | rd | \name{StatComp20008-package}
\alias{StatComp20008-package}
\alias{StatComp20008}
\docType{package}
\title{
Estimate Bart Simpson density by histogram and compare different bins.
}
\description{
Estimate Bart Simpson density f(x)=1/2 phi(x;0,1)+1/10 sum_{j=0}^4 phi(x;j/2-1,1/10) by histogram and naive density estimator using different bandwidth.
}
\details{
Function BS(x) draw ture density of Bart Simpson density function and function HBS(n,h) uses n random points generated from BS density with bandwidth of h. NB(x,y,h) uses naive estimator with x being estimated points and y being samples generated from BS density,h being bandwidth.
}
\author{
Yangshuhua.
Maintainer: Yangshuhua <shuashua0608@mail.ustc.edu.cn>
}
\references{
https://www.bb.ustc.edu.cn/bbcswebdav/pid-66546-dt-content-rid-7067954_1/courses/STAT6125P.01.2020FA/Lec7.pdf
----Nonparametric Statistics
}
\keyword{ package }
|
#-----------------------Code Description---------------------------------------#
# Notes:
# ver1.0, data: 20171217, by MaoY
#
# Description: 分析妈湾ZDB的行驶轨迹,速度,加速度,制动,加速等。
#------------------------------------------------------------------------------#
# 调用数据导入程序DataInput.R
source(file = "E:/R/MaWan/MawanDrivingSimulatorDataAnalysis/DataInput.R", encoding = "utf-8")
# 调用数据导入程序Functions.R
source(file = "E:/R/MaWan/MawanDrivingSimulatorDataAnalysis/Functions.R", encoding = "utf-8")
library(ggplot2) # 导入绘图用数据包
# 数据切分
df.zda <- subset(x = df.dsdata, Scen == "ZDA") # ZDA数据
df.zdasedan <- subset(x = df.zda, dsVehicleType == "Sedan") # ZDA轿车数据
df.zdatruck <- subset(x = df.zda, dsVehicleType == "Truck") # ZDA货车数据
# 1. ZDA行驶速度----
# 1.1 轿车----
plot.zdasedanspeed <- ggplot(df.zdasedan, aes(x = disTravelled, y = speedKMH)) +
geom_line(aes(colour = factor(driverID)), size = 1) +
scale_x_continuous(name = NULL, limits = c(1000, 2410),
breaks = c(1800, 1880, 2000, 2410),
labels = c("", "", "AK0+000", "AK0+591.045")) +
scale_y_continuous(name = "速度(km/h)", limits = c(0, 100)) +
annotate(geom = "rect", xmin = 1000, xmax = 2272, ymin = 0, ymax = 100, alpha = 0.1) +
annotate(geom = "text", x = 2272, y = 100, label = "敞开段起点") +
annotate("segment", x= 1880, xend= 2000, y= 95, yend= 95, arrow=arrow(ends="both", angle=45, length=unit(0.2, "cm"))) +
annotate(geom = "text", x = 1940, y = 100, label = "减速段") +
annotate("segment", x= 1800, xend= 1880, y= 95, yend= 95, arrow=arrow(ends="both", angle=45, length=unit(0.2, "cm"))) +
annotate(geom = "text", x = 1840, y = 100, label = "渐变段") +
theme(legend.position = "none",
axis.text.x = element_text(face = "bold", size = 10),
axis.text.y = element_text(face = "bold", size = 10),
axis.title.x = element_text(face = "bold", size = 12),
axis.title.y = element_text(face = "bold", size = 12),
panel.grid.major.x = element_line(linetype = "dashed", colour = "black", size = 0.5))
plot.zdasedanspeed
# 1.2 货车----
# 2. ZDA行驶轨迹----
# 2.1 轿车----
# 2.2 货车----
# 3. ZDA车道跨越点位置----
# 3.1 轿车----
# 3.2 货车----
# 4. ZDA加速度----
# 4.1 轿车----
plot.zdasedanacc <- ggplot(df.zdasedan, aes(x = disTravelled, y = accZMS2)) +
geom_point(aes(colour = factor(driverID)), size = 1) +
geom_hline(yintercept = c(-3.5, 3.5), colour = "red", linetype = "dashed", size = 1) +
scale_x_continuous(name = NULL, limits = c(1000, 2410),
breaks = c(1800, 1880, 2000, 2410),
labels = c("", "", "AK0+000", "AK0+591.045")) +
scale_y_continuous(name = "加速度(m/s2)", limits = c(-5, 5)) +
annotate(geom = "rect", xmin = 1000, xmax = 2272, ymin = -5, ymax = 5, alpha = 0.1) +
annotate(geom = "text", x = 2272, y = 4.5, label = "敞开段起点") +
annotate("segment", x= 1880, xend= 2000, y= 5, yend= 5, arrow=arrow(ends="both", angle=45, length=unit(0.2, "cm"))) +
annotate(geom = "text", x = 1940, y = 4.5, label = "减速段") +
annotate("segment", x= 1800, xend= 1880, y= 5, yend= 5, arrow=arrow(ends="both", angle=45, length=unit(0.2, "cm"))) +
annotate(geom = "text", x = 1840, y = 4.5, label = "渐变段") +
theme(legend.position = "none",
axis.text.x = element_text(face = "bold", size = 10),
axis.text.y = element_text(face = "bold", size = 10),
axis.title.x = element_text(face = "bold", size = 12),
axis.title.y = element_text(face = "bold", size = 12),
panel.grid.major.x = element_line(linetype = "dashed", colour = "black", size = 0.5))
plot.zdasedanacc
# 4.2 货车----
# 5. ZDA制动踏板位移----
# 5.1 轿车----
plot.zdasedanbrakepedal <- ggplot(df.zdasedan, aes(x = disTravelled, y = brakePedal)) +
geom_line(aes(colour = factor(driverID)), size = 1) +
geom_hline(yintercept = 0.5, colour = "red", linetype = "dashed", size = 1) +
scale_x_continuous(name = NULL, limits = c(1000, 2410),
breaks = c(1800, 1880, 2000, 2410),
labels = c("", "", "AK0+000", "AK0+591.045")) +
scale_y_continuous(name = "制动踏板位移", limits = c(0, 1)) +
annotate(geom = "rect", xmin = 1000, xmax = 2272, ymin = 0, ymax = 1, alpha = 0.1) +
annotate(geom = "text", x = 2272, y = 0.95, label = "敞开段起点") +
annotate("segment", x= 1880, xend= 2000, y= 1, yend= 1, arrow=arrow(ends="both", angle=45, length=unit(0.2, "cm"))) +
annotate(geom = "text", x = 1940, y = 0.95, label = "减速段") +
annotate("segment", x= 1800, xend= 1880, y= 1, yend= 1, arrow=arrow(ends="both", angle=45, length=unit(0.2, "cm"))) +
annotate(geom = "text", x = 1840, y = 0.95, label = "渐变段") +
theme(legend.position = "none",
axis.text.x = element_text(face = "bold", size = 10),
axis.text.y = element_text(face = "bold", size = 10),
axis.title.x = element_text(face = "bold", size = 12),
axis.title.y = element_text(face = "bold", size = 12),
panel.grid.major.x = element_line(linetype = "dashed", colour = "black", size = 0.5))
plot.zdasedanbrakepedal
# 5.2 货车----
# 6. ZDA油门踏板位移----
# 6.1 轿车----
plot.zdasedangaspedal <- ggplot(df.zdasedan, aes(x = disTravelled, y = gasPedal)) +
geom_line(aes(colour = factor(driverID)), size = 1) +
geom_hline(yintercept = 0.5, colour = "red", linetype = "dashed", size = 1) +
scale_x_continuous(name = NULL, limits = c(1000, 2410),
breaks = c(1800, 1880, 2000, 2410),
labels = c("", "", "AK0+000", "AK0+591.045")) +
scale_y_continuous(name = "油门踏板位移", limits = c(0, 1)) +
annotate(geom = "rect", xmin = 1000, xmax = 2272, ymin = 0, ymax = 1, alpha = 0.1) +
annotate(geom = "text", x = 2272, y = 0.95, label = "敞开段起点") +
annotate("segment", x= 1880, xend= 2000, y= 1, yend= 1, arrow=arrow(ends="both", angle=45, length=unit(0.2, "cm"))) +
annotate(geom = "text", x = 1940, y = 0.95, label = "减速段") +
annotate("segment", x= 1800, xend= 1880, y= 1, yend= 1, arrow=arrow(ends="both", angle=45, length=unit(0.2, "cm"))) +
annotate(geom = "text", x = 1840, y = 0.95, label = "渐变段") +
theme(legend.position = "none",
axis.text.x = element_text(face = "bold", size = 10),
axis.text.y = element_text(face = "bold", size = 10),
axis.title.x = element_text(face = "bold", size = 12),
axis.title.y = element_text(face = "bold", size = 12),
panel.grid.major.x = element_line(linetype = "dashed", colour = "black", size = 0.5))
plot.zdasedangaspedal
# 6.2 货车---- | /ZDAAnalysis.R | no_license | githubmao/MawanDrivingSimulatorDataAnalysis | R | false | false | 6,823 | r | #-----------------------Code Description---------------------------------------#
# Notes:
# ver1.0, data: 20171217, by MaoY
#
# Description: 分析妈湾ZDB的行驶轨迹,速度,加速度,制动,加速等。
#------------------------------------------------------------------------------#
# 调用数据导入程序DataInput.R
source(file = "E:/R/MaWan/MawanDrivingSimulatorDataAnalysis/DataInput.R", encoding = "utf-8")
# 调用数据导入程序Functions.R
source(file = "E:/R/MaWan/MawanDrivingSimulatorDataAnalysis/Functions.R", encoding = "utf-8")
library(ggplot2) # 导入绘图用数据包
# 数据切分
df.zda <- subset(x = df.dsdata, Scen == "ZDA") # ZDA数据
df.zdasedan <- subset(x = df.zda, dsVehicleType == "Sedan") # ZDA轿车数据
df.zdatruck <- subset(x = df.zda, dsVehicleType == "Truck") # ZDA货车数据
# 1. ZDA行驶速度----
# 1.1 轿车----
plot.zdasedanspeed <- ggplot(df.zdasedan, aes(x = disTravelled, y = speedKMH)) +
geom_line(aes(colour = factor(driverID)), size = 1) +
scale_x_continuous(name = NULL, limits = c(1000, 2410),
breaks = c(1800, 1880, 2000, 2410),
labels = c("", "", "AK0+000", "AK0+591.045")) +
scale_y_continuous(name = "速度(km/h)", limits = c(0, 100)) +
annotate(geom = "rect", xmin = 1000, xmax = 2272, ymin = 0, ymax = 100, alpha = 0.1) +
annotate(geom = "text", x = 2272, y = 100, label = "敞开段起点") +
annotate("segment", x= 1880, xend= 2000, y= 95, yend= 95, arrow=arrow(ends="both", angle=45, length=unit(0.2, "cm"))) +
annotate(geom = "text", x = 1940, y = 100, label = "减速段") +
annotate("segment", x= 1800, xend= 1880, y= 95, yend= 95, arrow=arrow(ends="both", angle=45, length=unit(0.2, "cm"))) +
annotate(geom = "text", x = 1840, y = 100, label = "渐变段") +
theme(legend.position = "none",
axis.text.x = element_text(face = "bold", size = 10),
axis.text.y = element_text(face = "bold", size = 10),
axis.title.x = element_text(face = "bold", size = 12),
axis.title.y = element_text(face = "bold", size = 12),
panel.grid.major.x = element_line(linetype = "dashed", colour = "black", size = 0.5))
plot.zdasedanspeed
# 1.2 货车----
# 2. ZDA行驶轨迹----
# 2.1 轿车----
# 2.2 货车----
# 3. ZDA车道跨越点位置----
# 3.1 轿车----
# 3.2 货车----
# 4. ZDA加速度----
# 4.1 轿车----
plot.zdasedanacc <- ggplot(df.zdasedan, aes(x = disTravelled, y = accZMS2)) +
geom_point(aes(colour = factor(driverID)), size = 1) +
geom_hline(yintercept = c(-3.5, 3.5), colour = "red", linetype = "dashed", size = 1) +
scale_x_continuous(name = NULL, limits = c(1000, 2410),
breaks = c(1800, 1880, 2000, 2410),
labels = c("", "", "AK0+000", "AK0+591.045")) +
scale_y_continuous(name = "加速度(m/s2)", limits = c(-5, 5)) +
annotate(geom = "rect", xmin = 1000, xmax = 2272, ymin = -5, ymax = 5, alpha = 0.1) +
annotate(geom = "text", x = 2272, y = 4.5, label = "敞开段起点") +
annotate("segment", x= 1880, xend= 2000, y= 5, yend= 5, arrow=arrow(ends="both", angle=45, length=unit(0.2, "cm"))) +
annotate(geom = "text", x = 1940, y = 4.5, label = "减速段") +
annotate("segment", x= 1800, xend= 1880, y= 5, yend= 5, arrow=arrow(ends="both", angle=45, length=unit(0.2, "cm"))) +
annotate(geom = "text", x = 1840, y = 4.5, label = "渐变段") +
theme(legend.position = "none",
axis.text.x = element_text(face = "bold", size = 10),
axis.text.y = element_text(face = "bold", size = 10),
axis.title.x = element_text(face = "bold", size = 12),
axis.title.y = element_text(face = "bold", size = 12),
panel.grid.major.x = element_line(linetype = "dashed", colour = "black", size = 0.5))
plot.zdasedanacc
# 4.2 货车----
# 5. ZDA制动踏板位移----
# 5.1 轿车----
plot.zdasedanbrakepedal <- ggplot(df.zdasedan, aes(x = disTravelled, y = brakePedal)) +
geom_line(aes(colour = factor(driverID)), size = 1) +
geom_hline(yintercept = 0.5, colour = "red", linetype = "dashed", size = 1) +
scale_x_continuous(name = NULL, limits = c(1000, 2410),
breaks = c(1800, 1880, 2000, 2410),
labels = c("", "", "AK0+000", "AK0+591.045")) +
scale_y_continuous(name = "制动踏板位移", limits = c(0, 1)) +
annotate(geom = "rect", xmin = 1000, xmax = 2272, ymin = 0, ymax = 1, alpha = 0.1) +
annotate(geom = "text", x = 2272, y = 0.95, label = "敞开段起点") +
annotate("segment", x= 1880, xend= 2000, y= 1, yend= 1, arrow=arrow(ends="both", angle=45, length=unit(0.2, "cm"))) +
annotate(geom = "text", x = 1940, y = 0.95, label = "减速段") +
annotate("segment", x= 1800, xend= 1880, y= 1, yend= 1, arrow=arrow(ends="both", angle=45, length=unit(0.2, "cm"))) +
annotate(geom = "text", x = 1840, y = 0.95, label = "渐变段") +
theme(legend.position = "none",
axis.text.x = element_text(face = "bold", size = 10),
axis.text.y = element_text(face = "bold", size = 10),
axis.title.x = element_text(face = "bold", size = 12),
axis.title.y = element_text(face = "bold", size = 12),
panel.grid.major.x = element_line(linetype = "dashed", colour = "black", size = 0.5))
plot.zdasedanbrakepedal
# 5.2 货车----
# 6. ZDA油门踏板位移----
# 6.1 轿车----
plot.zdasedangaspedal <- ggplot(df.zdasedan, aes(x = disTravelled, y = gasPedal)) +
geom_line(aes(colour = factor(driverID)), size = 1) +
geom_hline(yintercept = 0.5, colour = "red", linetype = "dashed", size = 1) +
scale_x_continuous(name = NULL, limits = c(1000, 2410),
breaks = c(1800, 1880, 2000, 2410),
labels = c("", "", "AK0+000", "AK0+591.045")) +
scale_y_continuous(name = "油门踏板位移", limits = c(0, 1)) +
annotate(geom = "rect", xmin = 1000, xmax = 2272, ymin = 0, ymax = 1, alpha = 0.1) +
annotate(geom = "text", x = 2272, y = 0.95, label = "敞开段起点") +
annotate("segment", x= 1880, xend= 2000, y= 1, yend= 1, arrow=arrow(ends="both", angle=45, length=unit(0.2, "cm"))) +
annotate(geom = "text", x = 1940, y = 0.95, label = "减速段") +
annotate("segment", x= 1800, xend= 1880, y= 1, yend= 1, arrow=arrow(ends="both", angle=45, length=unit(0.2, "cm"))) +
annotate(geom = "text", x = 1840, y = 0.95, label = "渐变段") +
theme(legend.position = "none",
axis.text.x = element_text(face = "bold", size = 10),
axis.text.y = element_text(face = "bold", size = 10),
axis.title.x = element_text(face = "bold", size = 12),
axis.title.y = element_text(face = "bold", size = 12),
panel.grid.major.x = element_line(linetype = "dashed", colour = "black", size = 0.5))
plot.zdasedangaspedal
# 6.2 货车---- |
#' @export
#' @title append_team_data_from_teamcode
#'
#' @description
#' \code{teamcodetoname} is a macro (rather than a function) which converts team codes in the raw SSNAP
#' data to team names.
#'
#' @details
#' This macro takes as input a tibble (an optimised data frame; see the tibble package).
#'
#' The function API needs nailing down more firmly - at present it knows of three hard-coded column
#' names (TeamName, TransferFrom and TransferTo) but it should be able to work more generically in
#' future if we provide it with a column name to translate and an output name.
#'
#' It also requires Unit tests which currently we don't have. Take this function as a 'work in progress'
#' It will not win an R beauty contest any time soon.
#'
#' @param data_table A tibble (optimised data frame) containing S1PostcodeOut.
#' @return Data table updated with Team names in TeamName, TransferToTeamName, TransferFromTeamName,
#' and S7TransferHospitalName
#' @author Andrew Hill, \email{andrew.hill@@doctors.org.uk}
append_team_data_from_teamcode <- function(data_table) {
if ("TeamCode" %in% names(data_table)) {
teamcsvpath <- system.file("extdata", "teamdetails.csv",
package = "ssnapstats")
team_data <- readr::read_csv(teamcsvpath,
col_names = TRUE,
readr::cols_only(TeamCode = readr::col_character(),
TeamName = readr::col_character(),
TeamType = readr::col_character(),
SCN_Names = readr::col_character(),
TRUST_Names = readr::col_character()#,
# Country = readr::col_character()
))
team_data <- dplyr::mutate(team_data, "TeamCode" =
ssnapinterface::teamcode_to_number(.data[["TeamCode"]]))
# Use the data as-is to change the team code into a team name
data_table <- dplyr::inner_join(data_table,
team_data,
by = "TeamCode")
}
return(data_table)
}
old_teamcodetoname <- function(data_table) {
teamcsvpath <- system.file("extdata", "teamdetails.csv",
package = "ssnapstats")
team_data <- readr::read_csv(teamcsvpath,
col_names = TRUE,
readr::cols(TeamCode = readr::col_character(),
TeamName = readr::col_character(),
Country = readr::col_character()
))
# Use the data as-is to change the team code into a team name
data_table <- dplyr::left_join(data_table, team_data, by = "TeamCode")
# We don't need country codes for transfers so drop the country column
team_data$Country <- NULL
# Now rename the team code columns to do the same translation to do
# TransferTo and TransferFrom
team_data <- dplyr::rename (team_data,
TransferToTeamCode = .data[["TeamCode"]],
TransferToTeamName = .data[["TeamName"]])
data_table <- dplyr::left_join(data_table, team_data,
by = "TransferToTeamCode")
team_data <- dplyr::rename (team_data,
TransferFromTeamCode = .data[["TransferToTeamCode"]],
TransferFromTeamName = .data[["TransferToTeamName"]])
data_table <- dplyr::left_join(data_table, team_data,
by = "TransferFromTeamCode")
team_data <- dplyr::rename (team_data,
S7TransferTeamCode = .data[["TransferFromTeamCode"]],
S7TransferTeamName = .data[["TransferFromTeamName"]])
data_table <- dplyr::left_join(data_table, team_data,
by = "S7TransferTeamCode")
}
| /R/teamcodetoname.R | no_license | md0u80c9/SSNAPStats | R | false | false | 3,406 | r | #' @export
#' @title append_team_data_from_teamcode
#'
#' @description
#' \code{teamcodetoname} is a macro (rather than a function) which converts team codes in the raw SSNAP
#' data to team names.
#'
#' @details
#' This macro takes as input a tibble (an optimised data frame; see the tibble package).
#'
#' The function API needs nailing down more firmly - at present it knows of three hard-coded column
#' names (TeamName, TransferFrom and TransferTo) but it should be able to work more generically in
#' future if we provide it with a column name to translate and an output name.
#'
#' It also requires Unit tests which currently we don't have. Take this function as a 'work in progress'
#' It will not win an R beauty contest any time soon.
#'
#' @param data_table A tibble (optimised data frame) containing S1PostcodeOut.
#' @return Data table updated with Team names in TeamName, TransferToTeamName, TransferFromTeamName,
#' and S7TransferHospitalName
#' @author Andrew Hill, \email{andrew.hill@@doctors.org.uk}
append_team_data_from_teamcode <- function(data_table) {
if ("TeamCode" %in% names(data_table)) {
teamcsvpath <- system.file("extdata", "teamdetails.csv",
package = "ssnapstats")
team_data <- readr::read_csv(teamcsvpath,
col_names = TRUE,
readr::cols_only(TeamCode = readr::col_character(),
TeamName = readr::col_character(),
TeamType = readr::col_character(),
SCN_Names = readr::col_character(),
TRUST_Names = readr::col_character()#,
# Country = readr::col_character()
))
team_data <- dplyr::mutate(team_data, "TeamCode" =
ssnapinterface::teamcode_to_number(.data[["TeamCode"]]))
# Use the data as-is to change the team code into a team name
data_table <- dplyr::inner_join(data_table,
team_data,
by = "TeamCode")
}
return(data_table)
}
old_teamcodetoname <- function(data_table) {
teamcsvpath <- system.file("extdata", "teamdetails.csv",
package = "ssnapstats")
team_data <- readr::read_csv(teamcsvpath,
col_names = TRUE,
readr::cols(TeamCode = readr::col_character(),
TeamName = readr::col_character(),
Country = readr::col_character()
))
# Use the data as-is to change the team code into a team name
data_table <- dplyr::left_join(data_table, team_data, by = "TeamCode")
# We don't need country codes for transfers so drop the country column
team_data$Country <- NULL
# Now rename the team code columns to do the same translation to do
# TransferTo and TransferFrom
team_data <- dplyr::rename (team_data,
TransferToTeamCode = .data[["TeamCode"]],
TransferToTeamName = .data[["TeamName"]])
data_table <- dplyr::left_join(data_table, team_data,
by = "TransferToTeamCode")
team_data <- dplyr::rename (team_data,
TransferFromTeamCode = .data[["TransferToTeamCode"]],
TransferFromTeamName = .data[["TransferToTeamName"]])
data_table <- dplyr::left_join(data_table, team_data,
by = "TransferFromTeamCode")
team_data <- dplyr::rename (team_data,
S7TransferTeamCode = .data[["TransferFromTeamCode"]],
S7TransferTeamName = .data[["TransferFromTeamName"]])
data_table <- dplyr::left_join(data_table, team_data,
by = "S7TransferTeamCode")
}
|
testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(2.31584307392782e+77, 9.53818252170339e+295, 1.22810536108214e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)))
result <- do.call(CNull:::communities_individual_based_sampling_beta,testlist)
str(result) | /CNull/inst/testfiles/communities_individual_based_sampling_beta/AFL_communities_individual_based_sampling_beta/communities_individual_based_sampling_beta_valgrind_files/1615827104-test.R | no_license | akhikolla/updatedatatype-list2 | R | false | false | 361 | r | testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(2.31584307392782e+77, 9.53818252170339e+295, 1.22810536108214e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)))
result <- do.call(CNull:::communities_individual_based_sampling_beta,testlist)
str(result) |
library(data.table)
library(readxl)
#library(ggplot2)
#library(cowplot)
#library(ggrepel)
#library(sva)
#library(UpSetR)
#library(ggpubr)
library(tidyverse)
library(lmerTest)
library(broom.mixed)
#theme_set(theme_pubr())
load("mouse_data_models.rda")
feature_list <- met_data_sub[Duplicate == 0,sort(unique(IDUniq))]
metadata2 <- data.table(read_xlsx("origData/ForCN_elen_mono.xlsx", skip = 2))
metadata2 <- metadata2[!is.na(Metabolomics)]
#metadata2[,table(Cage, Group)]
met_data_sub[,Animal2:=as.numeric(gsub(".*_", "", Animal))]
met_data_sub <- merge(met_data_sub, metadata2[,list(`Mouse ID`, Cage)], by.x = "Animal2", by.y = "Mouse ID", all.x = T)
mods_all = vector("list", length(feature_list))
effects_all_all =vector("list", length(feature_list))
contrasts_all <- vector("list", length(feature_list))
constrasts_all_strains <- vector("list", length(feature_list))
for(i in 1:length(feature_list)){
x = feature_list[i]
mod_data = met_data_sub[IDUniq==x]
mod_results = lmer(log10value~SampleType+Group+SampleType:Group + (1|Cage/Animal), data = mod_data)
mod_coefs = data.table(tidy(mod_results), Model = "Full")
site_contrasts_full <- difflsmeans(mod_results) %>% as.data.frame() %>% rownames_to_column() %>% data.table()
site_contrasts_full <- site_contrasts_full[rowname %in% c("SampleTypeSI:GroupGF - SampleTypeSI:Group2243",
"SampleTypeSI:GroupGF - SampleTypeSI:Group15644",
"SampleTypeSI:GroupGF - SampleTypeSI:GroupAB12n2",
"SampleTypeSI:Group2243 - SampleTypeSI:Group15644",
"SampleTypeSI:Group2243 - SampleTypeSI:GroupAB12n2",
"SampleTypeSI:Group15644 - SampleTypeSI:GroupAB12n2",
"SampleTypeCecal:GroupGF - SampleTypeCecal:Group2243",
"SampleTypeCecal:GroupGF - SampleTypeCecal:Group15644",
"SampleTypeCecal:GroupGF - SampleTypeCecal:GroupAB12n2",
"SampleTypeCecal:Group2243 - SampleTypeCecal:Group15644",
"SampleTypeCecal:Group2243 - SampleTypeCecal:GroupAB12n2",
"SampleTypeCecal:Group15644 - SampleTypeCecal:GroupAB12n2",
"SampleTypeLI:GroupGF - SampleTypeLI:Group2243",
"SampleTypeLI:GroupGF - SampleTypeLI:Group15644",
"SampleTypeLI:GroupGF - SampleTypeLI:GroupAB12n2",
"SampleTypeLI:Group2243 - SampleTypeLI:Group15644",
"SampleTypeLI:Group2243 - SampleTypeLI:GroupAB12n2",
"SampleTypeLI:Group15644 - SampleTypeLI:GroupAB12n2")]
#type_mod = lmer(value~SampleType + (1|Animal), data = mod_data)
site_contrasts_full[,IDUniq:=x]
nostrain_mod = lmer(log10value~SampleType+Colonized+SampleType:Colonized + (1|Animal), data = mod_data)
ns_coefs = data.table(tidy(nostrain_mod), Model = "NoStrains")
# e_full = effects::allEffects(mod_results)
# e_tab = as.data.table(e_full[[1]])
# e_tab[,Model:="Full"]
# e_nostrain = effects::allEffects(nostrain_mod)
# e_ns_tab = as.data.table(e_nostrain[[1]])
# e_ns_tab[,Model:="NoStrains"]
site_contrasts <- difflsmeans(nostrain_mod) %>% as.data.frame() %>% rownames_to_column() %>% data.table()
site_contrasts <- site_contrasts[rowname %in% c("SampleTypeSI:ColonizedGF - SampleTypeSI:ColonizedEl", "SampleTypeLI:ColonizedGF - SampleTypeLI:ColonizedEl",
"SampleTypeCecal:ColonizedGF - SampleTypeCecal:ColonizedEl")]
site_contrasts[,IDUniq:=x]
chisq_pval = anova(mod_results, nostrain_mod)["Pr(>Chisq)"][2,1]
mod_all = rbind(mod_coefs, ns_coefs, fill = T)
# effects_all = rbind(e_tab, e_ns_tab, fill = T)
if(chisq_pval < 0.05){
mod_all[,Best:=ifelse(Model == "Full", 1, 0)]
# effects_all[,Best:=ifelse(Model == "Full", 1, 0)]
} else {
mod_all[,Best:=ifelse(Model == "NoStrains", 1, 0)]
# effects_all[,Best:=ifelse(Model == "NoStrains", 1, 0)]
}
mod_all[,IDUniq:=x]
# effects_all[,ID2:=x]
mods_all[[i]] = mod_all
# effects_all_all[[i]] = effects_all
contrasts_all[[i]] <- site_contrasts
constrasts_all_strains[[i]] <- site_contrasts_full
}
mods_all = rbindlist(mods_all)
#effects_all = rbindlist(effects_all_all)
#effects_all[,Group2:=ifelse(Model == "Full", as.character(Group), as.character(Colonized))]
contrasts_all <- rbindlist(contrasts_all)
constrasts_all_strains <- rbindlist(constrasts_all_strains)
#save(mods_all, effects_all, feature_list, contrasts_all, contrasts_all_strains, file = "mouse_lmer_results_log10.rda")
save(mods_all, feature_list, contrasts_all, constrasts_all_strains, file = "mouse_lmer_results_log10_wCage.rda")
| /scripts/run_mouse_lmer_cages.R | no_license | turnbaughlab/2022_Noecker_ElentaMetabolism | R | false | false | 5,340 | r | library(data.table)
library(readxl)
#library(ggplot2)
#library(cowplot)
#library(ggrepel)
#library(sva)
#library(UpSetR)
#library(ggpubr)
library(tidyverse)
library(lmerTest)
library(broom.mixed)
#theme_set(theme_pubr())
load("mouse_data_models.rda")
feature_list <- met_data_sub[Duplicate == 0,sort(unique(IDUniq))]
metadata2 <- data.table(read_xlsx("origData/ForCN_elen_mono.xlsx", skip = 2))
metadata2 <- metadata2[!is.na(Metabolomics)]
#metadata2[,table(Cage, Group)]
met_data_sub[,Animal2:=as.numeric(gsub(".*_", "", Animal))]
met_data_sub <- merge(met_data_sub, metadata2[,list(`Mouse ID`, Cage)], by.x = "Animal2", by.y = "Mouse ID", all.x = T)
mods_all = vector("list", length(feature_list))
effects_all_all =vector("list", length(feature_list))
contrasts_all <- vector("list", length(feature_list))
constrasts_all_strains <- vector("list", length(feature_list))
for(i in 1:length(feature_list)){
x = feature_list[i]
mod_data = met_data_sub[IDUniq==x]
mod_results = lmer(log10value~SampleType+Group+SampleType:Group + (1|Cage/Animal), data = mod_data)
mod_coefs = data.table(tidy(mod_results), Model = "Full")
site_contrasts_full <- difflsmeans(mod_results) %>% as.data.frame() %>% rownames_to_column() %>% data.table()
site_contrasts_full <- site_contrasts_full[rowname %in% c("SampleTypeSI:GroupGF - SampleTypeSI:Group2243",
"SampleTypeSI:GroupGF - SampleTypeSI:Group15644",
"SampleTypeSI:GroupGF - SampleTypeSI:GroupAB12n2",
"SampleTypeSI:Group2243 - SampleTypeSI:Group15644",
"SampleTypeSI:Group2243 - SampleTypeSI:GroupAB12n2",
"SampleTypeSI:Group15644 - SampleTypeSI:GroupAB12n2",
"SampleTypeCecal:GroupGF - SampleTypeCecal:Group2243",
"SampleTypeCecal:GroupGF - SampleTypeCecal:Group15644",
"SampleTypeCecal:GroupGF - SampleTypeCecal:GroupAB12n2",
"SampleTypeCecal:Group2243 - SampleTypeCecal:Group15644",
"SampleTypeCecal:Group2243 - SampleTypeCecal:GroupAB12n2",
"SampleTypeCecal:Group15644 - SampleTypeCecal:GroupAB12n2",
"SampleTypeLI:GroupGF - SampleTypeLI:Group2243",
"SampleTypeLI:GroupGF - SampleTypeLI:Group15644",
"SampleTypeLI:GroupGF - SampleTypeLI:GroupAB12n2",
"SampleTypeLI:Group2243 - SampleTypeLI:Group15644",
"SampleTypeLI:Group2243 - SampleTypeLI:GroupAB12n2",
"SampleTypeLI:Group15644 - SampleTypeLI:GroupAB12n2")]
#type_mod = lmer(value~SampleType + (1|Animal), data = mod_data)
site_contrasts_full[,IDUniq:=x]
nostrain_mod = lmer(log10value~SampleType+Colonized+SampleType:Colonized + (1|Animal), data = mod_data)
ns_coefs = data.table(tidy(nostrain_mod), Model = "NoStrains")
# e_full = effects::allEffects(mod_results)
# e_tab = as.data.table(e_full[[1]])
# e_tab[,Model:="Full"]
# e_nostrain = effects::allEffects(nostrain_mod)
# e_ns_tab = as.data.table(e_nostrain[[1]])
# e_ns_tab[,Model:="NoStrains"]
site_contrasts <- difflsmeans(nostrain_mod) %>% as.data.frame() %>% rownames_to_column() %>% data.table()
site_contrasts <- site_contrasts[rowname %in% c("SampleTypeSI:ColonizedGF - SampleTypeSI:ColonizedEl", "SampleTypeLI:ColonizedGF - SampleTypeLI:ColonizedEl",
"SampleTypeCecal:ColonizedGF - SampleTypeCecal:ColonizedEl")]
site_contrasts[,IDUniq:=x]
chisq_pval = anova(mod_results, nostrain_mod)["Pr(>Chisq)"][2,1]
mod_all = rbind(mod_coefs, ns_coefs, fill = T)
# effects_all = rbind(e_tab, e_ns_tab, fill = T)
if(chisq_pval < 0.05){
mod_all[,Best:=ifelse(Model == "Full", 1, 0)]
# effects_all[,Best:=ifelse(Model == "Full", 1, 0)]
} else {
mod_all[,Best:=ifelse(Model == "NoStrains", 1, 0)]
# effects_all[,Best:=ifelse(Model == "NoStrains", 1, 0)]
}
mod_all[,IDUniq:=x]
# effects_all[,ID2:=x]
mods_all[[i]] = mod_all
# effects_all_all[[i]] = effects_all
contrasts_all[[i]] <- site_contrasts
constrasts_all_strains[[i]] <- site_contrasts_full
}
mods_all = rbindlist(mods_all)
#effects_all = rbindlist(effects_all_all)
#effects_all[,Group2:=ifelse(Model == "Full", as.character(Group), as.character(Colonized))]
contrasts_all <- rbindlist(contrasts_all)
constrasts_all_strains <- rbindlist(constrasts_all_strains)
#save(mods_all, effects_all, feature_list, contrasts_all, contrasts_all_strains, file = "mouse_lmer_results_log10.rda")
save(mods_all, feature_list, contrasts_all, constrasts_all_strains, file = "mouse_lmer_results_log10_wCage.rda")
|
## makeCacheMatrix creates a vector of functions that can be called from outside.
## It takes the existing variable matrix, and keeps the inverse of the matrix.
## cacheSolve will check if a inverse of the matrix was calculated before
## if it does exists, it will re-use it, otherwise, calculate the inverse matrix,
## pass it to makeCacheMatrix and save the inverse matrix for reuse later.
makeCacheMatrix <- function(x = matrix()) {
#set variable s (Inverse of a Matrix in this case) to NULL
s <- NULL
#set function sets x to the argument y and set s to null,
#basically, clear anything exists before when x$set(y) is called
set <- function(y) {
x <<- y
s <<- NULL
}
#get returns the value of x (argument of makeCacheMatrix)
get <- function() x
#sets s in makeCacheMatrix to Inverse Matrix, called in cacheSolve function.
setSolve <- function(v_solve) s <<- v_solve
# get returns the value of S
getSolve <- function() s
#Returns of a labeled vector, with values of the functions in makeCacheMatrix
#so these functions can be called from outside.
list(set = set, get = get,
setSolve = setSolve,
getSolve = getSolve)
}
## Write a short comment describing this function
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
#Check if precalculated inverse matrix, s, exists.
#If TRUE, return the precalculated s
s <- x$getSolve()
if(!is.null(s)) {
message("getting cached data")
return(s)
}
#the matrix variable
data <- x$get()
#calculate the inverse matrix of data
s <- solve(data, ...)
#set the inverse matrix value
x$setSolve(s)
#return the inverse matrix
s
}
| /cachematrix.R | no_license | daxuyin/ProgrammingAssignment2 | R | false | false | 1,729 | r | ## makeCacheMatrix creates a vector of functions that can be called from outside.
## It takes the existing variable matrix, and keeps the inverse of the matrix.
## cacheSolve will check if a inverse of the matrix was calculated before
## if it does exists, it will re-use it, otherwise, calculate the inverse matrix,
## pass it to makeCacheMatrix and save the inverse matrix for reuse later.
makeCacheMatrix <- function(x = matrix()) {
#set variable s (Inverse of a Matrix in this case) to NULL
s <- NULL
#set function sets x to the argument y and set s to null,
#basically, clear anything exists before when x$set(y) is called
set <- function(y) {
x <<- y
s <<- NULL
}
#get returns the value of x (argument of makeCacheMatrix)
get <- function() x
#sets s in makeCacheMatrix to Inverse Matrix, called in cacheSolve function.
setSolve <- function(v_solve) s <<- v_solve
# get returns the value of S
getSolve <- function() s
#Returns of a labeled vector, with values of the functions in makeCacheMatrix
#so these functions can be called from outside.
list(set = set, get = get,
setSolve = setSolve,
getSolve = getSolve)
}
## Write a short comment describing this function
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
#Check if precalculated inverse matrix, s, exists.
#If TRUE, return the precalculated s
s <- x$getSolve()
if(!is.null(s)) {
message("getting cached data")
return(s)
}
#the matrix variable
data <- x$get()
#calculate the inverse matrix of data
s <- solve(data, ...)
#set the inverse matrix value
x$setSolve(s)
#return the inverse matrix
s
}
|
pacman::p_load(tidyverse, data.table)
######################################
# In this code, we are going to cover:
# 0. how to load a dataset
# 1. select columns
# 2. create columns
# 3. rename columns
# 4. reorder columns
# 5. sort rows
# 6. subset rows
# 7. remove duplicates
# 8. summarize data
# 9. merge two datasets
######################################
# creating a data.table
# data.table is an enhanced version of data.frame
DT = data.table(
ID = c("b","b","b","a","a","c"),
a = 1:6,
b = 7:12,
c = 13:18
)
# reading a csv file from the internet
url <- "https://raw.githubusercontent.com/Rdatatable/data.table/master/vignettes/flights14.csv"
flights <- fread(url)
flights
# look into the data
class(flights)
str(flights)
# `data.table` is structured thus:
# `DATAFRAME[ROWS,COLUMNS]`
# 1. selecting a/more than one column:
flights[,year]
ans <- flights[,year]
# we are going to select four columns:
# year, month, day, carrier
ans <- flights[, .(year,month,day,carrier)]
head(ans)
# selecting columns (an alternative method)
select_cols = c("arr_delay", "dep_delay")
flights[ , ..select_cols]
# drop a set of columns
ans <- flights[, !c("month","day","hour")]
# create a new column
df <- data.table(x = 1:6)
df2 <- df[, .(x,x2 = x^2)]
# creating many columns
# we are going to:
# square up x
# take cube of x
# take average of x
df[, `:=`(x2 = x^2,
x3 = x^3,
x_m = mean(x))]
# 3. rename columns
# we are going to rename the column carrier as airlines
ans <- flights[, .(year, month, day, airlines = carrier)]
# 4. change the order of columns
# we want to put airlines as the first column
setcolorder(ans, c("airlines"))
# 5. sort data
db <- data.table(
name = c("D", "Q", "M", "E"),
score = c(20,16,18,15)
)
# we want to sort this data by name
db[order(name)]
# alternatively, you can use setorder()
# in this example, i have sorted the data
# in descending order of the column `name`
setorder(db, -name)
# 6. subset rows
# applying condition on data frame
ans <- flights[origin == "JFK" & month == 6L]
# extracting rows by index
ans <- flights[1:5]
# doing simple operations
# let's see how many trips had delay < 0
ans <- flights[, sum( (arr_delay + dep_delay) < 0 )]
# 7. remove duplicates
#let's create a fake data
movieDB <- data.table(
director = c("Ozu", "Ozu", "Ritwik"),
name =c("Tokyo Story", "Tokyo Story", "Meghe Dhaka Tara"),
year = c(1953, 1953, 1960))
movieDB
unique(movieDB, by = "director")
# 8. summarize data
# select all flights with origin in JFK &
# select data for the month of June
# compute average arrival delay and departure delay
ans <- flights[origin == "JFK" & month == 6L,
.(m_arr = mean(arr_delay), m_dep = mean(dep_delay))]
# number of flights that started from JFK
ans <- flights[origin == "JFK" & month == 6L, length(dest)]
# you can get the number of observations by
# using .N
ans <- flights[origin == "JFK" & month == 6L, .N]
# generating aggregates
ans <- flights[, .(.N), by = .(origin)]
ans
# or
ans <- flights[, .N, by = origin]
# example 2
ans <- flights[carrier == "AA", .N, by = origin]
ans
# example 3
ans <- flights[carrier == "AA", .N, by = .(origin, dest)]
head(ans)
# example 4
ans <- flights[carrier == "AA",
.(mean(arr_delay), mean(dep_delay)),
by = .(origin, dest, month)]
ans
# how many flights started late but
# arrived early (or on time), started and arrived late
ans <- flights[, .N, .(dep_delay>0, arr_delay>0)]
ans
# 9. merge two datasets
# we create two variables x and y
x <- c("x1", "x2", "x3", "x4", "x5")
y <- c("y1","y2","y3","y4")
# we create two data frames DT1 and DT2
# both these datasets contain a common column (key) called
# ID
DT1 <- data.table(ID = 1:5, x)
DT1
DT2 <- data.table(ID = c(2,4,6,8), y)
DT2
# merge two datasets
merge(DT1,DT2, by = "ID")
# note that this will output only matched rows
# what if you want all the rows from DT1 to be
# preserved in your output
merge(DT1,DT2, by = "ID", all.x = T)
# what if you want all the rows from DT2 to be stored in the merged file?
merge(DT1,DT2, by = "ID", all.y = T)
# okay, what about preserving all the rows from the two datasets?
merge(DT1, DT2, by = "ID", all = T)
# Further readings--
# check out
# https://atrebas.github.io/post/2019-03-03-datatable-dplyr/
| /codes/L02(2020).R | permissive | sumitrmishra/data504 | R | false | false | 4,344 | r | pacman::p_load(tidyverse, data.table)
######################################
# In this code, we are going to cover:
# 0. how to load a dataset
# 1. select columns
# 2. create columns
# 3. rename columns
# 4. reorder columns
# 5. sort rows
# 6. subset rows
# 7. remove duplicates
# 8. summarize data
# 9. merge two datasets
######################################
# creating a data.table
# data.table is an enhanced version of data.frame
DT = data.table(
ID = c("b","b","b","a","a","c"),
a = 1:6,
b = 7:12,
c = 13:18
)
# reading a csv file from the internet
url <- "https://raw.githubusercontent.com/Rdatatable/data.table/master/vignettes/flights14.csv"
flights <- fread(url)
flights
# look into the data
class(flights)
str(flights)
# `data.table` is structured thus:
# `DATAFRAME[ROWS,COLUMNS]`
# 1. selecting a/more than one column:
flights[,year]
ans <- flights[,year]
# we are going to select four columns:
# year, month, day, carrier
ans <- flights[, .(year,month,day,carrier)]
head(ans)
# selecting columns (an alternative method)
select_cols = c("arr_delay", "dep_delay")
flights[ , ..select_cols]
# drop a set of columns
ans <- flights[, !c("month","day","hour")]
# create a new column
df <- data.table(x = 1:6)
df2 <- df[, .(x,x2 = x^2)]
# creating many columns
# we are going to:
# square up x
# take cube of x
# take average of x
df[, `:=`(x2 = x^2,
x3 = x^3,
x_m = mean(x))]
# 3. rename columns
# we are going to rename the column carrier as airlines
ans <- flights[, .(year, month, day, airlines = carrier)]
# 4. change the order of columns
# we want to put airlines as the first column
setcolorder(ans, c("airlines"))
# 5. sort data
db <- data.table(
name = c("D", "Q", "M", "E"),
score = c(20,16,18,15)
)
# we want to sort this data by name
db[order(name)]
# alternatively, you can use setorder()
# in this example, i have sorted the data
# in descending order of the column `name`
setorder(db, -name)
# 6. subset rows
# applying condition on data frame
ans <- flights[origin == "JFK" & month == 6L]
# extracting rows by index
ans <- flights[1:5]
# doing simple operations
# let's see how many trips had delay < 0
ans <- flights[, sum( (arr_delay + dep_delay) < 0 )]
# 7. remove duplicates
#let's create a fake data
movieDB <- data.table(
director = c("Ozu", "Ozu", "Ritwik"),
name =c("Tokyo Story", "Tokyo Story", "Meghe Dhaka Tara"),
year = c(1953, 1953, 1960))
movieDB
unique(movieDB, by = "director")
# 8. summarize data
# select all flights with origin in JFK &
# select data for the month of June
# compute average arrival delay and departure delay
ans <- flights[origin == "JFK" & month == 6L,
.(m_arr = mean(arr_delay), m_dep = mean(dep_delay))]
# number of flights that started from JFK
ans <- flights[origin == "JFK" & month == 6L, length(dest)]
# you can get the number of observations by
# using .N
ans <- flights[origin == "JFK" & month == 6L, .N]
# generating aggregates
ans <- flights[, .(.N), by = .(origin)]
ans
# or
ans <- flights[, .N, by = origin]
# example 2
ans <- flights[carrier == "AA", .N, by = origin]
ans
# example 3
ans <- flights[carrier == "AA", .N, by = .(origin, dest)]
head(ans)
# example 4
ans <- flights[carrier == "AA",
.(mean(arr_delay), mean(dep_delay)),
by = .(origin, dest, month)]
ans
# how many flights started late but
# arrived early (or on time), started and arrived late
ans <- flights[, .N, .(dep_delay>0, arr_delay>0)]
ans
# 9. merge two datasets
# we create two variables x and y
x <- c("x1", "x2", "x3", "x4", "x5")
y <- c("y1","y2","y3","y4")
# we create two data frames DT1 and DT2
# both these datasets contain a common column (key) called
# ID
DT1 <- data.table(ID = 1:5, x)
DT1
DT2 <- data.table(ID = c(2,4,6,8), y)
DT2
# merge two datasets
merge(DT1,DT2, by = "ID")
# note that this will output only matched rows
# what if you want all the rows from DT1 to be
# preserved in your output
merge(DT1,DT2, by = "ID", all.x = T)
# what if you want all the rows from DT2 to be stored in the merged file?
merge(DT1,DT2, by = "ID", all.y = T)
# okay, what about preserving all the rows from the two datasets?
merge(DT1, DT2, by = "ID", all = T)
# Further readings--
# check out
# https://atrebas.github.io/post/2019-03-03-datatable-dplyr/
|
# ---------------------------------------------------------------------
# Simulate a single Damped Linear Oscillator
# Author: Original script by Steve Boker, modified by Kevin McKee
# ---------------------------------------------------------------------
# ----------------------------------
#Generate correlated parameters between twin 1 and twin 2.
#Let's say rMZ will be .7
#Going to need to constrain etas and zetas to be below 0
#Gammas can be anything.
sig<-diag(6)
colnames(sig)<-rownames(sig)<-c("T1.eta","T1.zeta","T2.eta","T2.zeta","T1.gamma","T2.gamma")
sig[3,1]<-sig[1,3]<-rEta; sig[4,2]<-sig[2,4]<-rZeta
sig<-sig*0.01; sig[5,5]<-sig[6,6]<-0.05
mu<-c(-0.4, -0.2, -0.4, -0.2, 0, 0)
T.pars<-mvrnorm(nPairs, mu, Sigma = sig, empirical=T)
T.pars[,c(1,3)][which(T.pars[,c(1,3)]>0.1)]<-0.1 #Eta 0 correction
T.pars[,c(2,4)][which(T.pars[,c(2,4)]>0)]<-0 #Zeta 0 correction
# hist(T.pars[,6])
# plot(T.pars[,2],T.pars[,4])
twinData<-list()
for(i in 1:nPairs){
# Define the damped linear oscillator function.
DLOmodel <- function(t, prevState, parms) {
x <- prevState[1] # x[t]
y <- prevState[2] # dx[t]
z <- prevState[3] # z[t]
w <- prevState[4] # dz[t]
with(as.list(parms),
{
dx <- y
dy <- parms[1]*x + parms[2]*y + parms[3] * (parms[4]*z + parms[5]*w)
dz <- w
dw <- parms[4]*z + parms[5]*w + parms[6] * (parms[1]*x + parms[2]*y)
res<-c(dx,dy,dz,dw)
list(res)
}
)
}
# ---------------------------------------------
theTimes <- 1:N
#Generate correlated shocks
shocksX<-rbinom(N,1,pEvent/2)*sample(c(-1, 1), N, replace=T)#*rnorm(N, 0, eventScaleX1)
shocksY<-rbinom(N,1,pEvent/2)*sample(c(-1, 1), N, replace=T)#*rnorm(N, 0, eventScaleX2)
shocksR<-rbinom(N,1,pEventShared)*sample(c(-1, 1), N, replace=T)#*rnorm(N, 0, eventScaleShared)
shocksR.ind<-which(shocksR!=0)
shocksX[shocksR.ind]<-shocksR[shocksR.ind]
shocksY[shocksR.ind]<-shocksR[shocksR.ind]
locShocks<-unique(sort(c(which(shocksX!=0), which(shocksY!=0))+1))
if(eventTypeX1=="slope"){
shocks.m<-c(rep(0,N), shocksX)
}else if(eventTypeX1=="both"){
shocks.m<-c(rep(shocksX,2))
}else{
shocks.m<-c(shocksX, rep(0,N))
}
if(eventTypeX2=="slope"){
shocks.m<-c(shocks.m, rep(0,N), shocksY)
}else if(eventTypeX1=="both"){
shocks.m<-c(shocks.m, rep(shocksY,2))
}else{
shocks.m<-c(shocks.m, shocksY, rep(0,N))
}
eventdat <- data.frame(var = c(rep("x", N),rep("y", N),rep("z", N),rep("w", N)),
time = rep(theTimes, 4),
value = shocks.m,
method = rep("add", N*4))
# ----------------------------------
# Simulate a damped linear oscillator.
parms <- c(T.pars[i,1], T.pars[i,2], T.pars[i,5], T.pars[i,3], T.pars[i,4], T.pars[i,6])
# tOffsets <- c(1:N)
tOffsets <- c(1:N)
xstart <- c(x = initCond.X, y = initCond.dX, z = initCond.Y, w = initCond.dY)
out1 <- as.data.frame(lsoda(xstart, theTimes, DLOmodel, parms, events = list(data=eventdat)))[tOffsets,]
# ----------------------------------
# Scale error for a chosen signal to noise ratio.
tSD <- sqrt(var(c(out1$x,out1$z)))
tESD <- 1 / tSNR
tOscDataX <- out1$x/tSD + rnorm(N, mean=0, sd=tESD)
tOscDataY <- out1$z/tSD + rnorm(N, mean=0, sd=tESD)
tOscData <- cbind(tOscDataX, tOscDataY)
dimnames(tOscData) <- list(NULL, c("X1", "X2"))
tData<-tOscData
tEmbeddedX <- gllaEmbed(tData[,1], embed=embedD, tau=theTau, idColumn=FALSE)
tEmbeddedY <- gllaEmbed(tData[,2], embed=embedD, tau=theTau, idColumn=FALSE)
tEmbedded.cur<-data.frame(cbind(tEmbeddedX, tEmbeddedY))
colnames(tEmbedded.cur)<-manifestVars
twinData[[i]]<-tEmbedded.cur
}
| /lib/NL/old/T_genData.R | no_license | kmckee90/LDE-Simulations | R | false | false | 3,671 | r | # ---------------------------------------------------------------------
# Simulate a single Damped Linear Oscillator
# Author: Original script by Steve Boker, modified by Kevin McKee
# ---------------------------------------------------------------------
# ----------------------------------
#Generate correlated parameters between twin 1 and twin 2.
#Let's say rMZ will be .7
#Going to need to constrain etas and zetas to be below 0
#Gammas can be anything.
sig<-diag(6)
colnames(sig)<-rownames(sig)<-c("T1.eta","T1.zeta","T2.eta","T2.zeta","T1.gamma","T2.gamma")
sig[3,1]<-sig[1,3]<-rEta; sig[4,2]<-sig[2,4]<-rZeta
sig<-sig*0.01; sig[5,5]<-sig[6,6]<-0.05
mu<-c(-0.4, -0.2, -0.4, -0.2, 0, 0)
T.pars<-mvrnorm(nPairs, mu, Sigma = sig, empirical=T)
T.pars[,c(1,3)][which(T.pars[,c(1,3)]>0.1)]<-0.1 #Eta 0 correction
T.pars[,c(2,4)][which(T.pars[,c(2,4)]>0)]<-0 #Zeta 0 correction
# hist(T.pars[,6])
# plot(T.pars[,2],T.pars[,4])
twinData<-list()
for(i in 1:nPairs){
# Define the damped linear oscillator function.
DLOmodel <- function(t, prevState, parms) {
x <- prevState[1] # x[t]
y <- prevState[2] # dx[t]
z <- prevState[3] # z[t]
w <- prevState[4] # dz[t]
with(as.list(parms),
{
dx <- y
dy <- parms[1]*x + parms[2]*y + parms[3] * (parms[4]*z + parms[5]*w)
dz <- w
dw <- parms[4]*z + parms[5]*w + parms[6] * (parms[1]*x + parms[2]*y)
res<-c(dx,dy,dz,dw)
list(res)
}
)
}
# ---------------------------------------------
theTimes <- 1:N
#Generate correlated shocks
shocksX<-rbinom(N,1,pEvent/2)*sample(c(-1, 1), N, replace=T)#*rnorm(N, 0, eventScaleX1)
shocksY<-rbinom(N,1,pEvent/2)*sample(c(-1, 1), N, replace=T)#*rnorm(N, 0, eventScaleX2)
shocksR<-rbinom(N,1,pEventShared)*sample(c(-1, 1), N, replace=T)#*rnorm(N, 0, eventScaleShared)
shocksR.ind<-which(shocksR!=0)
shocksX[shocksR.ind]<-shocksR[shocksR.ind]
shocksY[shocksR.ind]<-shocksR[shocksR.ind]
locShocks<-unique(sort(c(which(shocksX!=0), which(shocksY!=0))+1))
if(eventTypeX1=="slope"){
shocks.m<-c(rep(0,N), shocksX)
}else if(eventTypeX1=="both"){
shocks.m<-c(rep(shocksX,2))
}else{
shocks.m<-c(shocksX, rep(0,N))
}
if(eventTypeX2=="slope"){
shocks.m<-c(shocks.m, rep(0,N), shocksY)
}else if(eventTypeX1=="both"){
shocks.m<-c(shocks.m, rep(shocksY,2))
}else{
shocks.m<-c(shocks.m, shocksY, rep(0,N))
}
eventdat <- data.frame(var = c(rep("x", N),rep("y", N),rep("z", N),rep("w", N)),
time = rep(theTimes, 4),
value = shocks.m,
method = rep("add", N*4))
# ----------------------------------
# Simulate a damped linear oscillator.
parms <- c(T.pars[i,1], T.pars[i,2], T.pars[i,5], T.pars[i,3], T.pars[i,4], T.pars[i,6])
# tOffsets <- c(1:N)
tOffsets <- c(1:N)
xstart <- c(x = initCond.X, y = initCond.dX, z = initCond.Y, w = initCond.dY)
out1 <- as.data.frame(lsoda(xstart, theTimes, DLOmodel, parms, events = list(data=eventdat)))[tOffsets,]
# ----------------------------------
# Scale error for a chosen signal to noise ratio.
tSD <- sqrt(var(c(out1$x,out1$z)))
tESD <- 1 / tSNR
tOscDataX <- out1$x/tSD + rnorm(N, mean=0, sd=tESD)
tOscDataY <- out1$z/tSD + rnorm(N, mean=0, sd=tESD)
tOscData <- cbind(tOscDataX, tOscDataY)
dimnames(tOscData) <- list(NULL, c("X1", "X2"))
tData<-tOscData
tEmbeddedX <- gllaEmbed(tData[,1], embed=embedD, tau=theTau, idColumn=FALSE)
tEmbeddedY <- gllaEmbed(tData[,2], embed=embedD, tau=theTau, idColumn=FALSE)
tEmbedded.cur<-data.frame(cbind(tEmbeddedX, tEmbeddedY))
colnames(tEmbedded.cur)<-manifestVars
twinData[[i]]<-tEmbedded.cur
}
|
ggplot(data = job_income, aes(x = job, y = mean_income, fill = sex)) +
coord_flip()
| /R/R_projects/chap09/chap09_6_practice(2).R | no_license | db3124/bigdata_maestro | R | false | false | 86 | r | ggplot(data = job_income, aes(x = job, y = mean_income, fill = sex)) +
coord_flip()
|
library(ape)
testtree <- read.tree("1528_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="1528_0_unrooted.txt") | /codeml_files/newick_trees_processed/1528_0/rinput.R | no_license | DaniBoo/cyanobacteria_project | R | false | false | 135 | r | library(ape)
testtree <- read.tree("1528_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="1528_0_unrooted.txt") |
testlist <- list(Rs = numeric(0), atmp = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), relh = -1.72131968218895e+83, temp = c(8.5728629954997e-312, 1.56898424065867e+82, 8.96970809549085e-158, -1.3258495253834e-113, 5.15808994908456e-100, -6.80033518839696e+41, 2.68298522855314e-211, 1444042902784.06, 6.68889884134308e+51, -4.05003163986346e-308, -3.52601820453991e+43, -1.49815227045093e+197, -2.61605817623304e+76, -1.18078903777423e-90, 1.86807199752012e+112, -5.58551357556946e+160, 2.00994342527714e-162, 1.81541609400943e-79, 7.89363005545926e+139, 2.3317908961407e-93, 2.16562581831091e+161))
result <- do.call(meteor:::ET0_Makkink,testlist)
str(result) | /meteor/inst/testfiles/ET0_Makkink/AFL_ET0_Makkink/ET0_Makkink_valgrind_files/1615845429-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 736 | r | testlist <- list(Rs = numeric(0), atmp = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), relh = -1.72131968218895e+83, temp = c(8.5728629954997e-312, 1.56898424065867e+82, 8.96970809549085e-158, -1.3258495253834e-113, 5.15808994908456e-100, -6.80033518839696e+41, 2.68298522855314e-211, 1444042902784.06, 6.68889884134308e+51, -4.05003163986346e-308, -3.52601820453991e+43, -1.49815227045093e+197, -2.61605817623304e+76, -1.18078903777423e-90, 1.86807199752012e+112, -5.58551357556946e+160, 2.00994342527714e-162, 1.81541609400943e-79, 7.89363005545926e+139, 2.3317908961407e-93, 2.16562581831091e+161))
result <- do.call(meteor:::ET0_Makkink,testlist)
str(result) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AllGenerics.R, R/plot_summary.R
\docType{methods}
\name{plot_summary}
\alias{plot_summary}
\alias{plot_summary,MixtureSummary,GraphicalParameters-method}
\alias{plot_summary,MixtureSummary,missing-method}
\title{Plotting function for `MixtureSummary` objects}
\usage{
plot_summary(summ, params)
\S4method{plot_summary}{MixtureSummary,GraphicalParameters}(summ, params)
\S4method{plot_summary}{MixtureSummary,missing}(summ, params)
}
\arguments{
\item{summ}{a `MixtureSummary` object.}
\item{params}{A GraphicalParamters object. If missing, a new object will be
instantiated.
GraphicalParameters includes a color palette for fill and color
aesthetics, which will be included via `scale_*_manual`. Setting these
palettes to NULL will cause the `scale_*_manual` function call to be
removed from the ggplot object.
GraphicalParameters also includes default alpha, linetype, and size
aesthetics for the geom_histogram and geom_line layers. These values
may be set by vectors of length 1 or of a length equal to number of
data points represented in these layers. Setting a value to NULL will
cause the aesthetic to be removed from the geom_* function call.}
}
\value{
An object of class `ggplot`
}
\description{
Plots normal densities from theoretical mixture models over a histogram of observed data.
}
\examples{
data(CNPBayes_SBP, package="MixModelViz")
sbp.summ <- summarize(CNPBayes_SBP)
sbp.summ <- addMarginalModel(sbp.summ)
plot_summary(sbp.summ)
data(CNPBayes_MBP, package="MixModelViz")
mbcnp.summ <- summarize(CNPBayes::CopyNumberModel(CNPBayes_MBP))
mbcnp.summ <- addMarginalModel(mbcnp.summ)
mbcpn.summ <- addMarginalBatch(mbcnp.summ)
plot_summary(mbcnp.summ, new("GraphicalParameters", line.size=2))
}
| /man/plot_summary-method.Rd | no_license | rscharpf/MixModelViz | R | false | true | 1,793 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AllGenerics.R, R/plot_summary.R
\docType{methods}
\name{plot_summary}
\alias{plot_summary}
\alias{plot_summary,MixtureSummary,GraphicalParameters-method}
\alias{plot_summary,MixtureSummary,missing-method}
\title{Plotting function for `MixtureSummary` objects}
\usage{
plot_summary(summ, params)
\S4method{plot_summary}{MixtureSummary,GraphicalParameters}(summ, params)
\S4method{plot_summary}{MixtureSummary,missing}(summ, params)
}
\arguments{
\item{summ}{a `MixtureSummary` object.}
\item{params}{A GraphicalParamters object. If missing, a new object will be
instantiated.
GraphicalParameters includes a color palette for fill and color
aesthetics, which will be included via `scale_*_manual`. Setting these
palettes to NULL will cause the `scale_*_manual` function call to be
removed from the ggplot object.
GraphicalParameters also includes default alpha, linetype, and size
aesthetics for the geom_histogram and geom_line layers. These values
may be set by vectors of length 1 or of a length equal to number of
data points represented in these layers. Setting a value to NULL will
cause the aesthetic to be removed from the geom_* function call.}
}
\value{
An object of class `ggplot`
}
\description{
Plots normal densities from theoretical mixture models over a histogram of observed data.
}
\examples{
data(CNPBayes_SBP, package="MixModelViz")
sbp.summ <- summarize(CNPBayes_SBP)
sbp.summ <- addMarginalModel(sbp.summ)
plot_summary(sbp.summ)
data(CNPBayes_MBP, package="MixModelViz")
mbcnp.summ <- summarize(CNPBayes::CopyNumberModel(CNPBayes_MBP))
mbcnp.summ <- addMarginalModel(mbcnp.summ)
mbcpn.summ <- addMarginalBatch(mbcnp.summ)
plot_summary(mbcnp.summ, new("GraphicalParameters", line.size=2))
}
|
\name{catdyn}
\alias{catdyn}
\title{
Class Attribute of Numerically Fit CatDyn Model Objects
}
\description{
To be used by CatDynPred() to create model results.
}
\usage{
catdyn(x, ...)
}
\arguments{
\item{x}{
A list object coming from the optimization wrapper CatDynFit().
}
\item{\dots}{
Not used.
}
}
\value{
A class attribute.
}
\author{
Ruben H. Roa-Ureta (ORCID ID 0000-0002-9620-5224)
}
\examples{
#See examples for CatDynFit().
}
\keyword{ ~classes }
| /man/catdyn.Rd | no_license | santucofs/CatDyn | R | false | false | 492 | rd | \name{catdyn}
\alias{catdyn}
\title{
Class Attribute of Numerically Fit CatDyn Model Objects
}
\description{
To be used by CatDynPred() to create model results.
}
\usage{
catdyn(x, ...)
}
\arguments{
\item{x}{
A list object coming from the optimization wrapper CatDynFit().
}
\item{\dots}{
Not used.
}
}
\value{
A class attribute.
}
\author{
Ruben H. Roa-Ureta (ORCID ID 0000-0002-9620-5224)
}
\examples{
#See examples for CatDynFit().
}
\keyword{ ~classes }
|
library(pacman)
p_load(raster, ncdf4, rgdal, stringr, gsubfn, lubridate, ggplot2)
late_files <- sort(list.files('.',pattern='*.nc',full.names=TRUE))
nc = nc_open(late_files[1])
| /Validation/test.R | no_license | sitio-couto/embrapa | R | false | false | 178 | r | library(pacman)
p_load(raster, ncdf4, rgdal, stringr, gsubfn, lubridate, ggplot2)
late_files <- sort(list.files('.',pattern='*.nc',full.names=TRUE))
nc = nc_open(late_files[1])
|
\name{densityMap}
\alias{densityMap}
\alias{plot.densityMap}
\title{Plot posterior density of stochastic mapping on a tree}
\usage{
densityMap(trees, res=100, fsize=NULL, ftype=NULL, lwd=3, check=FALSE,
legend=NULL, outline=FALSE, type="phylogram", direction="rightwards",
plot=TRUE, ...)
\method{plot}{densityMap}(x, ...)
}
\arguments{
\item{trees}{set of phylogenetic trees in a modified \code{"multiPhylo"} object. Values for a two-state discrete character are mapped on the tree. See \code{\link{make.simmap}} and \code{\link{read.simmap}} for details.}
\item{res}{resolution for gradient plotting. Larger numbers indicate a finer (smoother) gradient.}
\item{fsize}{relative font size - can be a vector with the second element giving the font size for the legend.}
\item{ftype}{font type - see options in \code{\link{plotSimmap}}. As with \code{fsize}, can be a vector with the second element giving font type for the legend.}
\item{lwd}{line width for branches. If a vector of two elements is supplied, the second element will be taken to be the desired width of the legend bar.}
\item{check}{check to make sure that the topology and branch lengths of all phylogenies in \code{trees} are equal.}
\item{legend}{if \code{FALSE} no legend is plotted; if a numeric value, it gives the length of the legend in units of branch length. Default is 0.5 times the total tree length.}
\item{outline}{logical value indicating whether or not to outline the branches of the tree in black.}
\item{type}{type of plot desired. Options are \code{"phylogram"} for a rightward square phylogram; and \code{"fan"} for a circular phylogram.}
\item{plot}{logical value indicating whether or not to plot the tree. If \code{plot=FALSE} then an object of class \code{"densityMap"} will be returned without plotting.}
\item{direction}{plotting direction for \code{type="phylogram"}.}
\item{x}{for \code{plot.densityMap}, an object of class \code{"densityMap"}.}
\item{...}{optional arguments for \code{plot.densityMap}. These include all the arguments of \code{densityMap} except \code{trees} and \code{res}. Additional optional arguments include \code{mar} (margins), \code{offset} (tip label offset), and \code{hold} (whether or not to use \code{dev.hold} to hold output to graphical device before plotting; defaults to \code{hold=TRUE}). Also, the argument \code{states} can be used to 'order' the states on the probability axis (that is, which state should correspond to a posterior probability of 0 or 1).}
}
\description{
Function plots a tree with the posterior density for a mapped character from stochastic character mapping on the tree. Since the mapped value is the probability of being in state "1", only binary [0,1] characters are allowed.
}
\value{
Plots a tree and returns an object of class \code{"densityMap"} invisibly.
}
\references{
Bollback, J. P. 2006. Stochastic character mapping of discrete traits on phylogenies. \emph{BMC Bioinformatics}, \bold{7}, 88.
Huelsenbeck, J. P., R. Neilsen, and J. P. Bollback. 2003. Stochastic mapping of morphological characters. \emph{Systematic Biology}, \bold{52}, 131-138.
Revell, L. J. (2012) phytools: An R package for phylogenetic comparative biology (and other things). \emph{Methods Ecol. Evol.}, \bold{3}, 217-223.
Revell, L. J. 2013. Two new graphical methods for mapping trait evolution on phylogenies. \emph{Methods in Ecology and Evolution}, \bold{4}, 754-759.
}
\author{Liam Revell \email{liam.revell@umb.edu}}
\seealso{
\code{\link{make.simmap}}, \code{\link{plotSimmap}}, \code{\link{read.simmap}}
}
\keyword{phylogenetics}
\keyword{plotting}
\keyword{comparative method}
| /man/densityMap.Rd | no_license | PeteCowman/phytools | R | false | false | 3,650 | rd | \name{densityMap}
\alias{densityMap}
\alias{plot.densityMap}
\title{Plot posterior density of stochastic mapping on a tree}
\usage{
densityMap(trees, res=100, fsize=NULL, ftype=NULL, lwd=3, check=FALSE,
legend=NULL, outline=FALSE, type="phylogram", direction="rightwards",
plot=TRUE, ...)
\method{plot}{densityMap}(x, ...)
}
\arguments{
\item{trees}{set of phylogenetic trees in a modified \code{"multiPhylo"} object. Values for a two-state discrete character are mapped on the tree. See \code{\link{make.simmap}} and \code{\link{read.simmap}} for details.}
\item{res}{resolution for gradient plotting. Larger numbers indicate a finer (smoother) gradient.}
\item{fsize}{relative font size - can be a vector with the second element giving the font size for the legend.}
\item{ftype}{font type - see options in \code{\link{plotSimmap}}. As with \code{fsize}, can be a vector with the second element giving font type for the legend.}
\item{lwd}{line width for branches. If a vector of two elements is supplied, the second element will be taken to be the desired width of the legend bar.}
\item{check}{check to make sure that the topology and branch lengths of all phylogenies in \code{trees} are equal.}
\item{legend}{if \code{FALSE} no legend is plotted; if a numeric value, it gives the length of the legend in units of branch length. Default is 0.5 times the total tree length.}
\item{outline}{logical value indicating whether or not to outline the branches of the tree in black.}
\item{type}{type of plot desired. Options are \code{"phylogram"} for a rightward square phylogram; and \code{"fan"} for a circular phylogram.}
\item{plot}{logical value indicating whether or not to plot the tree. If \code{plot=FALSE} then an object of class \code{"densityMap"} will be returned without plotting.}
\item{direction}{plotting direction for \code{type="phylogram"}.}
\item{x}{for \code{plot.densityMap}, an object of class \code{"densityMap"}.}
\item{...}{optional arguments for \code{plot.densityMap}. These include all the arguments of \code{densityMap} except \code{trees} and \code{res}. Additional optional arguments include \code{mar} (margins), \code{offset} (tip label offset), and \code{hold} (whether or not to use \code{dev.hold} to hold output to graphical device before plotting; defaults to \code{hold=TRUE}). Also, the argument \code{states} can be used to 'order' the states on the probability axis (that is, which state should correspond to a posterior probability of 0 or 1).}
}
\description{
Function plots a tree with the posterior density for a mapped character from stochastic character mapping on the tree. Since the mapped value is the probability of being in state "1", only binary [0,1] characters are allowed.
}
\value{
Plots a tree and returns an object of class \code{"densityMap"} invisibly.
}
\references{
Bollback, J. P. 2006. Stochastic character mapping of discrete traits on phylogenies. \emph{BMC Bioinformatics}, \bold{7}, 88.
Huelsenbeck, J. P., R. Neilsen, and J. P. Bollback. 2003. Stochastic mapping of morphological characters. \emph{Systematic Biology}, \bold{52}, 131-138.
Revell, L. J. (2012) phytools: An R package for phylogenetic comparative biology (and other things). \emph{Methods Ecol. Evol.}, \bold{3}, 217-223.
Revell, L. J. 2013. Two new graphical methods for mapping trait evolution on phylogenies. \emph{Methods in Ecology and Evolution}, \bold{4}, 754-759.
}
\author{Liam Revell \email{liam.revell@umb.edu}}
\seealso{
\code{\link{make.simmap}}, \code{\link{plotSimmap}}, \code{\link{read.simmap}}
}
\keyword{phylogenetics}
\keyword{plotting}
\keyword{comparative method}
|
# Code to create global, super-regional, and regional aggregates with uncertainty -------------------------
# Plotting example at bottom ------------------------------------------------------------------------------
#----------------------------------------------------------------------------------------------------------
########################################################################################################################
# SECTION 1: Prep
########################################################################################################################
username <- Sys.info()[["user"]]
source(paste0("FILEPATH/init.r"))
library(ggrepel)
### 0. Custom settings
model_version <- "2021-02-11"
decomp_step <- "iterative"
superregion_exception <- FALSE
prep_to_share <- FALSE
save_draws <- TRUE
world_bank_agg <- FALSE
introduced_locs_only <- FALSE
under_1s <- 28
### 1. Pull locs
source("FILEPATH/get_location_metadata.R")
locs <- get_location_metadata(location_set_id=location_set_id, gbd_round_id=gbd_round, decomp_step=decomp_step)
most_detailed_locs <- locs[most_detailed == 1]
### 2. Pull pops
source('FILEPATH/get_population.R')
populations <- get_population(location_id = unique(locs$location_id), sex_id="3", year_id = "-1", age_group_id=c(under_1s, 238), single_year_age = TRUE, gbd_round_id=gbd_round, decomp_step = decomp_step)
# add a populations col denoting if 0-11mo (cohort 1) or 12-23mo (cohort 2) for location-specific target population aggregations
populations[age_group_id==under_1s, age_cohort := 1]
populations[age_group_id==238, age_cohort := 2]
### 3. Pull schedule (MCV1, MCV2, RCV1)
schedule <- readRDS(file.path(ref_data_repo, "vaccine_target.rds"))
schedule <- merge(schedule, locs[,.(ihme_loc_id, location_id)], by="ihme_loc_id", all.x=T)[, ihme_loc_id := NULL]
schedule[age_cohort > 2, age_cohort := 2]
########################################################################################################################
# SECTION 2: Make function
########################################################################################################################
# Function to create global, super-regional, and regional aggregates for a given vaccine
make_vaccine_aggregate <- function(vaccine, gbd_round, run_date, locs = most_detailed_locs, pops = populations, verbose = F,
superregion_exception = F, world_bank_agg = F, introduced_locs_only = F, sched = schedule) {
if (verbose) message(paste0("\nVaccine: ", vaccine))
# 1. Pull draws
# Helper function to load draws and combine for all locations
load_draws <- function(me, loc_ids, gbd_rd, rd) {
draw_dir <- paste0("FILEPATH/", me)
draws <- rbindlist(lapply(loc_ids, function(l) fread(paste0(draw_dir, "/", l, ".csv"))), fill = TRUE)
# If there are missing measure_id or covariate_id, fill in
if (("measure_id" %in% names(draws)) & ("covariate_id" %in% names(draws))) {
if (nrow(draws[is.na(measure_id)]) + nrow(draws[is.na(covariate_id)]) > 0) {
m_id <- unique(draws[!is.na(measure_id)]$measure_id)
c_id <- unique(draws[!is.na(covariate_id)]$covariate_id)
draws[is.na(measure_id), measure_id := m_id]
draws[is.na(covariate_id), covariate_id := c_id]
}
}
return(draws)
}
if (verbose) message(" -- Loading draws")
all_draws <- load_draws(me = vaccine, loc_ids = unique(locs$location_id), gbd_rd = gbd_round, rd = run_date)
### 2. Merge on population info
if (verbose) message(" -- Merging populations")
if (vaccine %in% c("vacc_mcv1", "vacc_mcv2", "vacc_rcv1")) {
# merge in pops by mcv-sched-specific age groups
vax_sched <- sched[me_name==vaccine][, me_name := NULL]
all_draws <- merge(all_draws, vax_sched, by="location_id", all.x=T)
all_draws[is.na(age_cohort), age_cohort := 2]
all_draws <- merge(all_draws, subset(pops, select = c("location_id", "year_id", "sex_id", "population", "age_cohort")), all.x = T, all.y = F, by = c("location_id", "year_id", "sex_id", "age_cohort"))
all_draws <- all_draws[, age_cohort := NULL]
} else {
pops <- pops[age_cohort==1][, age_cohort := NULL]
all_draws <- merge(all_draws, subset(pops, select = c("location_id", "year_id", "sex_id", "population")), all.x = T, all.y = F, by = c("location_id", "year_id", "sex_id"))
}
### 3. Merge on location info
if (verbose) message(" -- Merging locations")
all_draws <- merge(all_draws, locs, by = "location_id", all.x = T, all.y = F)
### 4. Calculate draws of aggregates - number of children vaccinated (by draw) and pop
if (verbose) message(" -- Converting to counts")
draw_cols <- names(all_draws)[grepl("draw_[0-9]*", names(all_draws))]
non_draw_cols <- setdiff(names(all_draws), draw_cols)
### 5. Convert to counts (both coverage & population)
all_draws[, (draw_cols) := .SD * population, .SDcols = draw_cols]
### 6. Aggregate counts of children vaccinated & population by geography and year
if (verbose) message(" -- Collapsing by geography")
global_draws <- all_draws[, lapply(.SD, sum), by = c("year_id", "sex_id", "age_group_id", "covariate_id"), .SDcols = c(draw_cols, "population")]
super_region_draws <- all_draws[, lapply(.SD, sum), by = c("year_id", "sex_id", "age_group_id", "covariate_id", "super_region_id", "super_region_name"), .SDcols = c(draw_cols, "population")]
region_draws <- all_draws[, lapply(.SD, sum), by = c("year_id", "sex_id", "age_group_id", "covariate_id", "super_region_id", "super_region_name", "region_id", "region_name"), .SDcols = c(draw_cols, "population")]
if (save_draws) {
message(" -- Saving geographic counts draw objects")
counts_dir <- file.path(agg_dir, "count_draws")
if (!dir.exists(counts_dir)) dir.create(counts_dir)
global_draws[, me_name := vaccine]
fwrite(global_draws, file=paste0(counts_dir, "/", vaccine, "_global_draws.csv"))
super_region_draws[, me_name := vaccine]
fwrite(super_region_draws, file=paste0(counts_dir, "/", vaccine, "_super_region_draws.csv"))
region_draws[, me_name := vaccine]
fwrite(region_draws, file=paste0(counts_dir, "/", vaccine, "_region_draws.csv"))
}
### 7. Create draws of percentages (coverage) by year & aggregate location, then calculate mean/upper/lower across draws
convert_and_collapse <- function(df, global=FALSE, super_region=FALSE) {
# Get draw cols and non draw cols
dcols <- names(df)[grepl("draw_[0-9]*", names(df))]
ndcols <- setdiff(names(df), dcols)
# Convert from counts to proportions
df[, (draw_cols) := .SD / population, .SDcols = draw_cols]
if (save_draws & global) {
cov_dir <- file.path(agg_dir, "coverage_draws")
if (!dir.exists(cov_dir)) dir.create(cov_dir)
global_save <- copy(df) %>% .[, me_name := vaccine]
fwrite(global_save, file=paste0(cov_dir, "/", vaccine, "_global_cov_draws.csv"))
}
if (save_draws & super_region) {
cov_dir <- file.path(agg_dir, "coverage_draws")
if (!dir.exists(cov_dir)) dir.create(cov_dir)
global_save <- copy(df) %>% .[, me_name := vaccine]
fwrite(global_save, file=paste0(cov_dir, "/", vaccine, "_super_region_cov_draws.csv"))
}
df[, mean := rowMeans(.SD), .SDcols = dcols]
df[, lower := matrixStats::rowQuantiles(as.matrix(.SD), probs = 0.025), .SDcols = dcols]
df[, upper := matrixStats::rowQuantiles(as.matrix(.SD), probs = 0.975), .SDcols = dcols]
keep_cols <- names(df)[!(names(df) %in% dcols)]
df <- subset(df, select = keep_cols)
return(df)
}
if (verbose) message(" -- Collapsing to mean and upper/lower bounds")
global_df <- convert_and_collapse(global_draws, global=TRUE)
super_region_df <- convert_and_collapse(super_region_draws, super_region=TRUE)
region_df <- convert_and_collapse(region_draws)
### 8. Add some additional information and return the multi-level aggregates
global_df[, level := "global"]
super_region_df[, level := "super_region"]
region_df[, level := "region"]
output_df <- rbindlist(list(global_df, super_region_df, region_df), fill = TRUE)
output_df[, me_name := vaccine]
### 9. save csv for future analyses
message(" -- Saving aggregated output file to agg_dir")
fwrite(output_df, file=paste0(agg_dir, "/", vaccine, ".csv"))
message("All done")
return(output_df)
}
########################################################################################################################
# SECTION 3: Call function
########################################################################################################################
# Make directory to save output in same folder as mean results
results.root <- paste0(root, "/data/exp/modeled/", gbd_cycle, "/", model_version)
agg_dir <- file.path(results.root, "aggregate_summaries")
ifelse(!dir.exists(agg_dir), dir.create(agg_dir), FALSE)
# Load all vaccines of interest
vaccines <- c("vacc_dpt1", "vacc_dpt3", "vacc_mcv1", "vacc_pcv3", "vacc_hib3", "vacc_hepb3",
"vacc_mcv2", "vacc_polio3", "vacc_rotac", "vacc_bcg", "vacc_rcv1")
df_vax <- lapply(vaccines,
function(v){
make_vaccine_aggregate(vaccine = v,
run_date = model_version,
gbd_round = gbd_cycle,
locs = most_detailed_locs,
pops = populations,
verbose = TRUE,
superregion_exception = superregion_exception,
world_bank_agg = world_bank_agg,
introduced_locs_only = introduced_locs_only)
}) %>% rbindlist
########################################################################################################################
# SECTION 5: PLOT function output (based just on regular geographic aggregation)
########################################################################################################################
vaccines <- c("vacc_dpt1", "vacc_dpt3", "vacc_mcv1", "vacc_pcv3", "vacc_hib3", "vacc_hepb3",
"vacc_mcv2", "vacc_polio3", "vacc_rotac", "vacc_bcg", "vacc_rcv1")
df_vax <- lapply(paste0(data_root, "/exp/modeled/", gbd_cycle, "/", model_version, "/aggregate_summaries/", vaccines, ".csv"),
fread) %>% rbindlist(., fill=TRUE) %>% unique
if (gbd_cycle=="gbd2020") {
year_end <- 2019
df_vax <- df_vax[year_id <= year_end]
}
df_plot <- subset(df_vax, level == "global")
df_plot[me_name == "vacc_dpt1", label := "DTP1"]
df_plot[me_name == "vacc_dpt3", label := "DTP3"]
df_plot[me_name == "vacc_hepb3", label := "HepB3"]
df_plot[me_name == "vacc_hib3", label := "Hib3"]
df_plot[me_name == "vacc_mcv1", label := "MCV1"]
df_plot[me_name == "vacc_mcv2", label := "MCV2"]
df_plot[me_name == "vacc_pcv3", label := "PCV3"]
df_plot[me_name == "vacc_polio3", label := "Pol3"]
df_plot[me_name == "vacc_rotac", label := "RotaC"]
df_plot[me_name == "vacc_rcv1", label := "RCV1"]
df_plot <- df_plot[!is.na(label)]
cols <- c("#8C021C", "#72451C", "#CCA606", "#334403", "#187933", "#064742", "#006E8B", "#331D49", "#5C2266")
cols2 <- c("#E58606","#5D69B1","#52BCA3","#99C945","#CC61B0","#24796C","#DAA51B","#2F8AC4","#764E9F","#ED645A","#CC3A8E","#A5AA99")
cols3 <- c("#7F3C8D","#11A579","#3969AC","#F2B701","#E73F74","#80BA5A","#E68310","#008695","#CF1C90","#f97b72","#4b4b8f","#A5AA99")
cols_select <- c("#671f90", "#25919a")
### PUBLICATION GLOBAL TIME SERIES
plot.root <- file.path("FILEPATH/paper", model_version)
ifelse (!dir.exists(plot.root), dir.create(plot.root, recursive = TRUE), FALSE)
cols3 <- c("#7F3C8D","#11A579","#3969AC","#F2B701","#E73F74","#80BA5A","#E68310","#008695","#CF1C90","#f97b72","#4b4b8f","#A5AA99")
cols4 <- c("#82B1E5","#0571EC",
"#c3710c","#F2B701",
"#E7073F","#891238",
"#80BA5A","#66169F",
"#5C5751", "#307404")
ifelse(!dir.exists(file.path(plot.root, "global_time_series")), dir.create(file.path(plot.root, "global_time_series")), FALSE)
pdf(file = paste0(plot.root, "/FILEPATH/vaccines_global_to_", year_end, ".pdf"),
width = 8,
height = 5)
ggplot(data = df_plot,
aes(x = year_id,
y = mean,
ymin = lower,
ymax = upper,
group = label)) +
geom_hline(aes(yintercept=0.9), linetype="dotted") +
annotate("text", x = 1980, y = 0.9, label = "GVAP Target", vjust = 1.5, hjust = 0, size=5) +
geom_ribbon(alpha = 0.15, aes(fill = label)) +
geom_line(alpha = 0.95, aes(color = label)) +
coord_cartesian(ylim=c(-0.05, 1.05), xlim=c(1979, 2020), expand=TRUE) +
theme_minimal() +
theme(axis.title=element_text(),
strip.text=element_text(size=12, face ="bold"),
strip.background=element_blank(),
axis.text.x = element_text(size = 12),
axis.text.y = element_text(size = 12),
axis.title.x = element_text(size=12),
axis.title.y = element_text(size=12),
legend.title = element_text(size = 11)
) +
labs(x = "Year", y = "Coverage", color = "Vaccine", fill = "Vaccine",
title = "Figure 1. Global vaccine coverage by vaccine, 1980 to 2019.") +
scale_y_continuous(breaks=c(0, 0.2, 0.4, 0.6, 0.8, 1) %>% round(., 2),
labels = scales::percent,
limits = c(0,1),
expand = c(0,0)) +
scale_x_continuous(expand = c(0,0),
breaks = c(seq(1980, 2015, by = 5), as.numeric(year_end))) +
scale_color_manual(values = cols4) +
scale_fill_manual(values = cols4) #+
dev.off()
### SR FACETS PLOT
df_plot <- subset(df_vax, level == "super_region")
df_plot[me_name == "vacc_dpt1", label := "DTP1"]
df_plot[me_name == "vacc_dpt3", label := "DTP3"]
df_plot[me_name == "vacc_hepb3", label := "HepB3"]
df_plot[me_name == "vacc_hib3", label := "Hib3"]
df_plot[me_name == "vacc_mcv1", label := "MCV1"]
df_plot[me_name == "vacc_mcv2", label := "MCV2"]
df_plot[me_name == "vacc_pcv3", label := "PCV3"]
df_plot[me_name == "vacc_polio3", label := "Pol3"]
df_plot[me_name == "vacc_rcv1", label := "RCV1"]
df_plot[me_name == "vacc_rotac", label := "RotaC"]
df_plot <- df_plot[!is.na(label)]
# add in lancet_label
df_plot[, location_name := super_region_name]
df_plot <- merge(df_plot, locs[,.(location_name, lancet_label)], by="location_name", all.x=T)
# plotting directory
ifelse(!dir.exists(file.path(plot.root, "FILEPATH")), dir.create(file.path(plot.root, "FILEPATH"), recursive=TRUE), FALSE)
# plot
## png:
png(file = paste0(plot.root, "/FILEPATH/vaccines_SR_to_", year_end, "_lancet_label.png"),
width = 13,
height = 8,
units = "in",
res = 600)
ggplot(data = df_plot,
aes(x = year_id,
y = mean,
ymin = lower,
ymax = upper,
group = label)) +
geom_hline(aes(yintercept=0.9), linetype="dotted") +
annotate("text", x = 1980, y = 0.9, label = "GVAP Target", vjust = 1.5, hjust = 0, size=4) +
geom_ribbon(alpha = 0.1, aes(fill = label)) +
geom_line(alpha = 0.9, aes(color = label)) +
coord_cartesian(ylim=c(-0.05, 1.05), xlim=c(1979, 2020), expand=TRUE) +
theme_minimal() +
theme(axis.title=element_text(),
plot.title=element_text(face="bold",size=18, hjust = 0.5),
strip.text=element_text(size=10, face ="bold"),
strip.background=element_blank(),
axis.text.x = element_text(size = 10.5, angle = 45),
axis.text.y = element_text(size = 10.5),
axis.title.x = element_text(size=10.5),
axis.title.y = element_text(size=10.5),
legend.title = element_text(size = 10.5)
) +
labs(x = "Year", y = "Coverage", color = "Vaccine", fill = "Vaccine") +
scale_y_continuous(breaks=c(0, 0.2, 0.4, 0.6, 0.8, 1) %>% round(., 2),
labels = scales::percent,
limits = c(0,1),
expand = c(0,0)) +
scale_x_continuous(expand = c(0,0),
breaks = c(seq(1980, 2015, by = 5), as.numeric(year_end))) +
scale_color_manual(values = cols4) +
scale_fill_manual(values = cols4) +
facet_wrap(~lancet_label)
dev.off()
## pdf:
pdf(file = paste0(plot.root, "/FILEPATH/vaccines_SR_to_", year_end, "_lancet_label.pdf"),
width = 13,
height = 8)
ggplot(data = df_plot,
aes(x = year_id,
y = mean,
ymin = lower,
ymax = upper,
group = label)) +
geom_hline(aes(yintercept=0.9), linetype="dotted") +
annotate("text", x = 1980, y = 0.9, label = "GVAP Target", vjust = 1.5, hjust = 0, size=4) +
geom_ribbon(alpha = 0.1, aes(fill = label)) +
geom_line(alpha = 0.9, aes(color = label)) +
coord_cartesian(ylim=c(-0.05, 1.05), xlim=c(1979, 2020), expand=TRUE) +
theme_minimal() +
theme(axis.title=element_text(),
plot.title=element_text(face="bold",size=18, hjust = 0.5),
strip.text=element_text(size=10, face ="bold"),
strip.background=element_blank(),
axis.text.x = element_text(size = 10.5, angle = 45),
axis.text.y = element_text(size = 10.5),
axis.title.x = element_text(size=10.5),
axis.title.y = element_text(size=10.5),
legend.title = element_text(size = 10.5)
) +
labs(x = "Year", y = "Coverage", color = "Vaccine", fill = "Vaccine") +
scale_y_continuous(breaks=c(0, 0.2, 0.4, 0.6, 0.8, 1) %>% round(., 2),
labels = scales::percent,
limits = c(0,1),
expand = c(0,0)) +
scale_x_continuous(expand = c(0,0),
breaks = c(seq(1980, 2015, by = 5), as.numeric(year_end))) +
scale_color_manual(values = cols4) +
scale_fill_manual(values = cols4) +
facet_wrap(~lancet_label)
dev.off()
| /display/global_coverage_aggregations_plots.R | no_license | ihmeuw/gbd_coverage | R | false | false | 18,141 | r | # Code to create global, super-regional, and regional aggregates with uncertainty -------------------------
# Plotting example at bottom ------------------------------------------------------------------------------
#----------------------------------------------------------------------------------------------------------
########################################################################################################################
# SECTION 1: Prep
########################################################################################################################
username <- Sys.info()[["user"]]
source(paste0("FILEPATH/init.r"))
library(ggrepel)
### 0. Custom settings
model_version <- "2021-02-11"
decomp_step <- "iterative"
superregion_exception <- FALSE
prep_to_share <- FALSE
save_draws <- TRUE
world_bank_agg <- FALSE
introduced_locs_only <- FALSE
under_1s <- 28
### 1. Pull locs
source("FILEPATH/get_location_metadata.R")
locs <- get_location_metadata(location_set_id=location_set_id, gbd_round_id=gbd_round, decomp_step=decomp_step)
most_detailed_locs <- locs[most_detailed == 1]
### 2. Pull pops
source('FILEPATH/get_population.R')
populations <- get_population(location_id = unique(locs$location_id), sex_id="3", year_id = "-1", age_group_id=c(under_1s, 238), single_year_age = TRUE, gbd_round_id=gbd_round, decomp_step = decomp_step)
# add a populations col denoting if 0-11mo (cohort 1) or 12-23mo (cohort 2) for location-specific target population aggregations
populations[age_group_id==under_1s, age_cohort := 1]
populations[age_group_id==238, age_cohort := 2]
### 3. Pull schedule (MCV1, MCV2, RCV1)
schedule <- readRDS(file.path(ref_data_repo, "vaccine_target.rds"))
schedule <- merge(schedule, locs[,.(ihme_loc_id, location_id)], by="ihme_loc_id", all.x=T)[, ihme_loc_id := NULL]
schedule[age_cohort > 2, age_cohort := 2]
########################################################################################################################
# SECTION 2: Make function
########################################################################################################################
# Function to create global, super-regional, and regional aggregates for a given vaccine
make_vaccine_aggregate <- function(vaccine, gbd_round, run_date, locs = most_detailed_locs, pops = populations, verbose = F,
superregion_exception = F, world_bank_agg = F, introduced_locs_only = F, sched = schedule) {
if (verbose) message(paste0("\nVaccine: ", vaccine))
# 1. Pull draws
# Helper function to load draws and combine for all locations
load_draws <- function(me, loc_ids, gbd_rd, rd) {
draw_dir <- paste0("FILEPATH/", me)
draws <- rbindlist(lapply(loc_ids, function(l) fread(paste0(draw_dir, "/", l, ".csv"))), fill = TRUE)
# If there are missing measure_id or covariate_id, fill in
if (("measure_id" %in% names(draws)) & ("covariate_id" %in% names(draws))) {
if (nrow(draws[is.na(measure_id)]) + nrow(draws[is.na(covariate_id)]) > 0) {
m_id <- unique(draws[!is.na(measure_id)]$measure_id)
c_id <- unique(draws[!is.na(covariate_id)]$covariate_id)
draws[is.na(measure_id), measure_id := m_id]
draws[is.na(covariate_id), covariate_id := c_id]
}
}
return(draws)
}
if (verbose) message(" -- Loading draws")
all_draws <- load_draws(me = vaccine, loc_ids = unique(locs$location_id), gbd_rd = gbd_round, rd = run_date)
### 2. Merge on population info
if (verbose) message(" -- Merging populations")
if (vaccine %in% c("vacc_mcv1", "vacc_mcv2", "vacc_rcv1")) {
# merge in pops by mcv-sched-specific age groups
vax_sched <- sched[me_name==vaccine][, me_name := NULL]
all_draws <- merge(all_draws, vax_sched, by="location_id", all.x=T)
all_draws[is.na(age_cohort), age_cohort := 2]
all_draws <- merge(all_draws, subset(pops, select = c("location_id", "year_id", "sex_id", "population", "age_cohort")), all.x = T, all.y = F, by = c("location_id", "year_id", "sex_id", "age_cohort"))
all_draws <- all_draws[, age_cohort := NULL]
} else {
pops <- pops[age_cohort==1][, age_cohort := NULL]
all_draws <- merge(all_draws, subset(pops, select = c("location_id", "year_id", "sex_id", "population")), all.x = T, all.y = F, by = c("location_id", "year_id", "sex_id"))
}
### 3. Merge on location info
if (verbose) message(" -- Merging locations")
all_draws <- merge(all_draws, locs, by = "location_id", all.x = T, all.y = F)
### 4. Calculate draws of aggregates - number of children vaccinated (by draw) and pop
if (verbose) message(" -- Converting to counts")
draw_cols <- names(all_draws)[grepl("draw_[0-9]*", names(all_draws))]
non_draw_cols <- setdiff(names(all_draws), draw_cols)
### 5. Convert to counts (both coverage & population)
all_draws[, (draw_cols) := .SD * population, .SDcols = draw_cols]
### 6. Aggregate counts of children vaccinated & population by geography and year
if (verbose) message(" -- Collapsing by geography")
global_draws <- all_draws[, lapply(.SD, sum), by = c("year_id", "sex_id", "age_group_id", "covariate_id"), .SDcols = c(draw_cols, "population")]
super_region_draws <- all_draws[, lapply(.SD, sum), by = c("year_id", "sex_id", "age_group_id", "covariate_id", "super_region_id", "super_region_name"), .SDcols = c(draw_cols, "population")]
region_draws <- all_draws[, lapply(.SD, sum), by = c("year_id", "sex_id", "age_group_id", "covariate_id", "super_region_id", "super_region_name", "region_id", "region_name"), .SDcols = c(draw_cols, "population")]
if (save_draws) {
message(" -- Saving geographic counts draw objects")
counts_dir <- file.path(agg_dir, "count_draws")
if (!dir.exists(counts_dir)) dir.create(counts_dir)
global_draws[, me_name := vaccine]
fwrite(global_draws, file=paste0(counts_dir, "/", vaccine, "_global_draws.csv"))
super_region_draws[, me_name := vaccine]
fwrite(super_region_draws, file=paste0(counts_dir, "/", vaccine, "_super_region_draws.csv"))
region_draws[, me_name := vaccine]
fwrite(region_draws, file=paste0(counts_dir, "/", vaccine, "_region_draws.csv"))
}
### 7. Create draws of percentages (coverage) by year & aggregate location, then calculate mean/upper/lower across draws
convert_and_collapse <- function(df, global=FALSE, super_region=FALSE) {
# Get draw cols and non draw cols
dcols <- names(df)[grepl("draw_[0-9]*", names(df))]
ndcols <- setdiff(names(df), dcols)
# Convert from counts to proportions
df[, (draw_cols) := .SD / population, .SDcols = draw_cols]
if (save_draws & global) {
cov_dir <- file.path(agg_dir, "coverage_draws")
if (!dir.exists(cov_dir)) dir.create(cov_dir)
global_save <- copy(df) %>% .[, me_name := vaccine]
fwrite(global_save, file=paste0(cov_dir, "/", vaccine, "_global_cov_draws.csv"))
}
if (save_draws & super_region) {
cov_dir <- file.path(agg_dir, "coverage_draws")
if (!dir.exists(cov_dir)) dir.create(cov_dir)
global_save <- copy(df) %>% .[, me_name := vaccine]
fwrite(global_save, file=paste0(cov_dir, "/", vaccine, "_super_region_cov_draws.csv"))
}
df[, mean := rowMeans(.SD), .SDcols = dcols]
df[, lower := matrixStats::rowQuantiles(as.matrix(.SD), probs = 0.025), .SDcols = dcols]
df[, upper := matrixStats::rowQuantiles(as.matrix(.SD), probs = 0.975), .SDcols = dcols]
keep_cols <- names(df)[!(names(df) %in% dcols)]
df <- subset(df, select = keep_cols)
return(df)
}
if (verbose) message(" -- Collapsing to mean and upper/lower bounds")
global_df <- convert_and_collapse(global_draws, global=TRUE)
super_region_df <- convert_and_collapse(super_region_draws, super_region=TRUE)
region_df <- convert_and_collapse(region_draws)
### 8. Add some additional information and return the multi-level aggregates
global_df[, level := "global"]
super_region_df[, level := "super_region"]
region_df[, level := "region"]
output_df <- rbindlist(list(global_df, super_region_df, region_df), fill = TRUE)
output_df[, me_name := vaccine]
### 9. save csv for future analyses
message(" -- Saving aggregated output file to agg_dir")
fwrite(output_df, file=paste0(agg_dir, "/", vaccine, ".csv"))
message("All done")
return(output_df)
}
########################################################################################################################
# SECTION 3: Call function
########################################################################################################################
# Make directory to save output in same folder as mean results
results.root <- paste0(root, "/data/exp/modeled/", gbd_cycle, "/", model_version)
agg_dir <- file.path(results.root, "aggregate_summaries")
ifelse(!dir.exists(agg_dir), dir.create(agg_dir), FALSE)
# Load all vaccines of interest
vaccines <- c("vacc_dpt1", "vacc_dpt3", "vacc_mcv1", "vacc_pcv3", "vacc_hib3", "vacc_hepb3",
"vacc_mcv2", "vacc_polio3", "vacc_rotac", "vacc_bcg", "vacc_rcv1")
df_vax <- lapply(vaccines,
function(v){
make_vaccine_aggregate(vaccine = v,
run_date = model_version,
gbd_round = gbd_cycle,
locs = most_detailed_locs,
pops = populations,
verbose = TRUE,
superregion_exception = superregion_exception,
world_bank_agg = world_bank_agg,
introduced_locs_only = introduced_locs_only)
}) %>% rbindlist
########################################################################################################################
# SECTION 5: PLOT function output (based just on regular geographic aggregation)
########################################################################################################################
vaccines <- c("vacc_dpt1", "vacc_dpt3", "vacc_mcv1", "vacc_pcv3", "vacc_hib3", "vacc_hepb3",
"vacc_mcv2", "vacc_polio3", "vacc_rotac", "vacc_bcg", "vacc_rcv1")
df_vax <- lapply(paste0(data_root, "/exp/modeled/", gbd_cycle, "/", model_version, "/aggregate_summaries/", vaccines, ".csv"),
fread) %>% rbindlist(., fill=TRUE) %>% unique
if (gbd_cycle=="gbd2020") {
year_end <- 2019
df_vax <- df_vax[year_id <= year_end]
}
df_plot <- subset(df_vax, level == "global")
df_plot[me_name == "vacc_dpt1", label := "DTP1"]
df_plot[me_name == "vacc_dpt3", label := "DTP3"]
df_plot[me_name == "vacc_hepb3", label := "HepB3"]
df_plot[me_name == "vacc_hib3", label := "Hib3"]
df_plot[me_name == "vacc_mcv1", label := "MCV1"]
df_plot[me_name == "vacc_mcv2", label := "MCV2"]
df_plot[me_name == "vacc_pcv3", label := "PCV3"]
df_plot[me_name == "vacc_polio3", label := "Pol3"]
df_plot[me_name == "vacc_rotac", label := "RotaC"]
df_plot[me_name == "vacc_rcv1", label := "RCV1"]
df_plot <- df_plot[!is.na(label)]
cols <- c("#8C021C", "#72451C", "#CCA606", "#334403", "#187933", "#064742", "#006E8B", "#331D49", "#5C2266")
cols2 <- c("#E58606","#5D69B1","#52BCA3","#99C945","#CC61B0","#24796C","#DAA51B","#2F8AC4","#764E9F","#ED645A","#CC3A8E","#A5AA99")
cols3 <- c("#7F3C8D","#11A579","#3969AC","#F2B701","#E73F74","#80BA5A","#E68310","#008695","#CF1C90","#f97b72","#4b4b8f","#A5AA99")
cols_select <- c("#671f90", "#25919a")
### PUBLICATION GLOBAL TIME SERIES
plot.root <- file.path("FILEPATH/paper", model_version)
ifelse (!dir.exists(plot.root), dir.create(plot.root, recursive = TRUE), FALSE)
cols3 <- c("#7F3C8D","#11A579","#3969AC","#F2B701","#E73F74","#80BA5A","#E68310","#008695","#CF1C90","#f97b72","#4b4b8f","#A5AA99")
cols4 <- c("#82B1E5","#0571EC",
"#c3710c","#F2B701",
"#E7073F","#891238",
"#80BA5A","#66169F",
"#5C5751", "#307404")
ifelse(!dir.exists(file.path(plot.root, "global_time_series")), dir.create(file.path(plot.root, "global_time_series")), FALSE)
pdf(file = paste0(plot.root, "/FILEPATH/vaccines_global_to_", year_end, ".pdf"),
width = 8,
height = 5)
ggplot(data = df_plot,
aes(x = year_id,
y = mean,
ymin = lower,
ymax = upper,
group = label)) +
geom_hline(aes(yintercept=0.9), linetype="dotted") +
annotate("text", x = 1980, y = 0.9, label = "GVAP Target", vjust = 1.5, hjust = 0, size=5) +
geom_ribbon(alpha = 0.15, aes(fill = label)) +
geom_line(alpha = 0.95, aes(color = label)) +
coord_cartesian(ylim=c(-0.05, 1.05), xlim=c(1979, 2020), expand=TRUE) +
theme_minimal() +
theme(axis.title=element_text(),
strip.text=element_text(size=12, face ="bold"),
strip.background=element_blank(),
axis.text.x = element_text(size = 12),
axis.text.y = element_text(size = 12),
axis.title.x = element_text(size=12),
axis.title.y = element_text(size=12),
legend.title = element_text(size = 11)
) +
labs(x = "Year", y = "Coverage", color = "Vaccine", fill = "Vaccine",
title = "Figure 1. Global vaccine coverage by vaccine, 1980 to 2019.") +
scale_y_continuous(breaks=c(0, 0.2, 0.4, 0.6, 0.8, 1) %>% round(., 2),
labels = scales::percent,
limits = c(0,1),
expand = c(0,0)) +
scale_x_continuous(expand = c(0,0),
breaks = c(seq(1980, 2015, by = 5), as.numeric(year_end))) +
scale_color_manual(values = cols4) +
scale_fill_manual(values = cols4) #+
dev.off()
### SR FACETS PLOT
df_plot <- subset(df_vax, level == "super_region")
df_plot[me_name == "vacc_dpt1", label := "DTP1"]
df_plot[me_name == "vacc_dpt3", label := "DTP3"]
df_plot[me_name == "vacc_hepb3", label := "HepB3"]
df_plot[me_name == "vacc_hib3", label := "Hib3"]
df_plot[me_name == "vacc_mcv1", label := "MCV1"]
df_plot[me_name == "vacc_mcv2", label := "MCV2"]
df_plot[me_name == "vacc_pcv3", label := "PCV3"]
df_plot[me_name == "vacc_polio3", label := "Pol3"]
df_plot[me_name == "vacc_rcv1", label := "RCV1"]
df_plot[me_name == "vacc_rotac", label := "RotaC"]
df_plot <- df_plot[!is.na(label)]
# add in lancet_label
df_plot[, location_name := super_region_name]
df_plot <- merge(df_plot, locs[,.(location_name, lancet_label)], by="location_name", all.x=T)
# plotting directory
ifelse(!dir.exists(file.path(plot.root, "FILEPATH")), dir.create(file.path(plot.root, "FILEPATH"), recursive=TRUE), FALSE)
# plot
## png:
png(file = paste0(plot.root, "/FILEPATH/vaccines_SR_to_", year_end, "_lancet_label.png"),
width = 13,
height = 8,
units = "in",
res = 600)
ggplot(data = df_plot,
aes(x = year_id,
y = mean,
ymin = lower,
ymax = upper,
group = label)) +
geom_hline(aes(yintercept=0.9), linetype="dotted") +
annotate("text", x = 1980, y = 0.9, label = "GVAP Target", vjust = 1.5, hjust = 0, size=4) +
geom_ribbon(alpha = 0.1, aes(fill = label)) +
geom_line(alpha = 0.9, aes(color = label)) +
coord_cartesian(ylim=c(-0.05, 1.05), xlim=c(1979, 2020), expand=TRUE) +
theme_minimal() +
theme(axis.title=element_text(),
plot.title=element_text(face="bold",size=18, hjust = 0.5),
strip.text=element_text(size=10, face ="bold"),
strip.background=element_blank(),
axis.text.x = element_text(size = 10.5, angle = 45),
axis.text.y = element_text(size = 10.5),
axis.title.x = element_text(size=10.5),
axis.title.y = element_text(size=10.5),
legend.title = element_text(size = 10.5)
) +
labs(x = "Year", y = "Coverage", color = "Vaccine", fill = "Vaccine") +
scale_y_continuous(breaks=c(0, 0.2, 0.4, 0.6, 0.8, 1) %>% round(., 2),
labels = scales::percent,
limits = c(0,1),
expand = c(0,0)) +
scale_x_continuous(expand = c(0,0),
breaks = c(seq(1980, 2015, by = 5), as.numeric(year_end))) +
scale_color_manual(values = cols4) +
scale_fill_manual(values = cols4) +
facet_wrap(~lancet_label)
dev.off()
## pdf:
pdf(file = paste0(plot.root, "/FILEPATH/vaccines_SR_to_", year_end, "_lancet_label.pdf"),
width = 13,
height = 8)
ggplot(data = df_plot,
aes(x = year_id,
y = mean,
ymin = lower,
ymax = upper,
group = label)) +
geom_hline(aes(yintercept=0.9), linetype="dotted") +
annotate("text", x = 1980, y = 0.9, label = "GVAP Target", vjust = 1.5, hjust = 0, size=4) +
geom_ribbon(alpha = 0.1, aes(fill = label)) +
geom_line(alpha = 0.9, aes(color = label)) +
coord_cartesian(ylim=c(-0.05, 1.05), xlim=c(1979, 2020), expand=TRUE) +
theme_minimal() +
theme(axis.title=element_text(),
plot.title=element_text(face="bold",size=18, hjust = 0.5),
strip.text=element_text(size=10, face ="bold"),
strip.background=element_blank(),
axis.text.x = element_text(size = 10.5, angle = 45),
axis.text.y = element_text(size = 10.5),
axis.title.x = element_text(size=10.5),
axis.title.y = element_text(size=10.5),
legend.title = element_text(size = 10.5)
) +
labs(x = "Year", y = "Coverage", color = "Vaccine", fill = "Vaccine") +
scale_y_continuous(breaks=c(0, 0.2, 0.4, 0.6, 0.8, 1) %>% round(., 2),
labels = scales::percent,
limits = c(0,1),
expand = c(0,0)) +
scale_x_continuous(expand = c(0,0),
breaks = c(seq(1980, 2015, by = 5), as.numeric(year_end))) +
scale_color_manual(values = cols4) +
scale_fill_manual(values = cols4) +
facet_wrap(~lancet_label)
dev.off()
|
# litmesg --- display literal message
subroutine litmesg (lit, type)
integer lit (ARB), type
character msg (MAXCOLS)
call ptoc (lit, '.'c, msg, MAXCOLS)
call mesg (msg, type)
return
end
| /swt/src/spc/se.u/source/litmesg.r | no_license | arnoldrobbins/gt-swt | R | false | false | 211 | r | # litmesg --- display literal message
subroutine litmesg (lit, type)
integer lit (ARB), type
character msg (MAXCOLS)
call ptoc (lit, '.'c, msg, MAXCOLS)
call mesg (msg, type)
return
end
|
#pull it all together
source("getLatLng.R")
source("getInfoForUser.R")
source("clusterLatLng.R")
source("plotLatLngs.R")
load("cred.Rdata")
registerTwitterOAuth(cred)
# sample username...
username <- "michaelmcgee"
# takes time to run... lots of api calls to resolve t.co links
latlngs <- getLatLngs(username, n=100)
# cluster those latlngs and get a center
clusterCenter <- getClusterCenter(latlngs)
# see the latlngs on a map...
# make sure latlngs isn't empty for the username you've chosen
makeGraph(latlngs, zoom=1) + addPoint(clusterCenter, size=2, colour="blue")
ggsave("michaelmcgee_cluster_center.png")
| /go.R | no_license | pavani9/4sq_lat_lng | R | false | false | 617 | r |
#pull it all together
source("getLatLng.R")
source("getInfoForUser.R")
source("clusterLatLng.R")
source("plotLatLngs.R")
load("cred.Rdata")
registerTwitterOAuth(cred)
# sample username...
username <- "michaelmcgee"
# takes time to run... lots of api calls to resolve t.co links
latlngs <- getLatLngs(username, n=100)
# cluster those latlngs and get a center
clusterCenter <- getClusterCenter(latlngs)
# see the latlngs on a map...
# make sure latlngs isn't empty for the username you've chosen
makeGraph(latlngs, zoom=1) + addPoint(clusterCenter, size=2, colour="blue")
ggsave("michaelmcgee_cluster_center.png")
|
testlist <- list(Beta = 0, CVLinf = 7.66098570059422e+87, FM = 9.72722870578481e-311, L50 = 0, L95 = 0, LenBins = c(-3.08183910645402e-133, 6.48633034209941e-310, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), LenMids = numeric(0), Linf = 0, MK = 0, Ml = numeric(0), Prob = structure(0, .Dim = c(1L, 1L)), SL50 = 2.12186634922202e-308, SL95 = 1.01234050832871e-320, nage = 2663471L, nlen = -48232119L, rLens = numeric(0))
result <- do.call(DLMtool::LBSPRgen,testlist)
str(result) | /DLMtool/inst/testfiles/LBSPRgen/AFL_LBSPRgen/LBSPRgen_valgrind_files/1615830618-test.R | no_license | akhikolla/updatedatatype-list2 | R | false | false | 606 | r | testlist <- list(Beta = 0, CVLinf = 7.66098570059422e+87, FM = 9.72722870578481e-311, L50 = 0, L95 = 0, LenBins = c(-3.08183910645402e-133, 6.48633034209941e-310, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), LenMids = numeric(0), Linf = 0, MK = 0, Ml = numeric(0), Prob = structure(0, .Dim = c(1L, 1L)), SL50 = 2.12186634922202e-308, SL95 = 1.01234050832871e-320, nage = 2663471L, nlen = -48232119L, rLens = numeric(0))
result <- do.call(DLMtool::LBSPRgen,testlist)
str(result) |
############################################################################################
#
# Author: Luigi Augugliaro
# e-mail: luigi.augugliaro@unipa.it
# home: http://dssm.unipa.it/augugliaro/
# data: 27-03-2018
#
# Description: R code used for the simulation study (model 1 with pi = 0.50)
# reported in "l1-Penalized Censored Gaussian Graphical Model"
# Section 3.3 (see also Section 1 in Supplementary Materials)
#
# Journal: Biostatistics
# libraries and R code
library("tmvtnorm")
library("MASS")
library("glasso")
source("rcglasso_v_3.0.3.R")
source("rcglasso.fitmu.R")
source("rcglasso.fitTheta.R")
source("rcglasso_mle.R")
source("Q.R")
source("loglik.R")
# general setting
zero <- 1e-6
nsim <- 100 # number of simulations
n <- c(100, 200, 300, 400) # sample size
tht_ij <- 0.3 # value of the non-zero partial correlation coefficients
nrho <- 100 # number of tuning parameters
rhoratio <- 0.01 # parameter used to define minrho, i.e., minrho = rhoratio * maxrho
g <- c(0, 0.25, 0.5, 0.75, 1) # parameter of the ebic criterion
K <- 0.5 # p approx n^K
d <- 0.05 # d% of the p variables are censured
up <- 40 # right censoreing value
prob_cens <- 0.50 # probability of censoring
# objects used to save the results
results <- array(0, dim = c(length(n), nsim, length(g), 2, 5),
dimnames = list(n = n, nsim = NULL, g = g, type = c("ll", "Qval"),
summary = c("npar", "TP", "FP", "TN", "FN")))
Qval <- ll <- npar <- array(0, dim = c(nrho, nsim, length(n)), dimnames = list(nrho = NULL, nsim = NULL, n = n))
conv <- matrix(0, nrow = nsim, ncol = length(n))
set.seed(123)
for(k in length(n):1){
nk <- n[k]
p <- round(nk^K)
Tht <- diag(p)
diag(Tht[1:(p-1), 2:p]) <- tht_ij
diag(Tht[2:p, 1:(p-1)]) <- tht_ij
Sgm = solve(Tht)
U <- upper.tri(Tht, diag = FALSE)
A <- abs(Tht[U]) > 0
notA <- !A
S <- sort(sample(1:p, ceiling(p * d)))
mu <- rep(34, p)
tau <- qnorm(1 - prob_cens)
mu[S] <- up - sqrt(diag(Sgm)[S]) * tau
for(i in 1:nsim){
X <- mvrnorm(nk, mu = mu, Sigma = Sgm)
X[X > up] <- up
# fitting rcglasso model
out <- rcglasso(X, up, fitmean = TRUE, scale = FALSE, nrho = nrho, rhoratio = rhoratio, verbose = FALSE)
if(out$conv == 0){
for(m in 1:nrho){
S_ini <- out$S[, , m]
muh_ini <- out$muh[, m]
thetah_ini <- out$thetah[, , m]
model <- abs(thetah_ini) > zero
out_mle <- rcglasso_mle(X = X, k = up, model = model, S_ini = S_ini,
muh_ini = muh_ini, thetah_ini = thetah_ini,
fitmean = TRUE, scale = FALSE, verbose = FALSE)
if(out_mle$conv == 0){
tht_h <- out_mle$thetah
npar[m, i, k] <- sum(abs(tht_h[upper.tri(tht_h, diag = FALSE)]) > zero)
ll[m, i, k] <- loglik(out_mle)$loglik
Qval[m, i, k] <- Q(out_mle)$Qval
} else {
conv[i, k] <- 1
break
}
}
#######################################
# eBIC section: computed with ll
for(j in 1:5){
ebic <- -2 * ll[, i, k] + npar[, i, k] * (log(nk) + 4 * g[j] * log(p))
best <- which.min(ebic)
tht_h_fit <- out$thetah[, , best]
results[k, i, j, "ll", "npar"] <- npar[best, i, k]
results[k, i, j, "ll", "TP"] <- sum(abs(tht_h_fit[U][A]) > zero)
results[k, i, j, "ll", "FN"] <- sum(abs(tht_h_fit[U][A]) <= zero)
results[k, i, j, "ll", "TN"] <- sum(abs(tht_h_fit[U][notA]) <= zero)
results[k, i, j, "ll", "FP"] <- sum(abs(tht_h_fit[U][notA]) > zero)
}
#######################################
# eBIC section: computed with Qval
for(j in 1:5){
ebic <- -2 * Qval[, i, k] + npar[, i, k] * (log(nk) + 4 * g[j] * log(p))
best <- which.min(ebic)
tht_h_fit <- out$thetah[, , best]
results[k, i, j, "Qval", "npar"] <- npar[best, i, k]
results[k, i, j, "Qval", "TP"] <- sum(abs(tht_h_fit[U][A]) > zero)
results[k, i, j, "Qval", "FN"] <- sum(abs(tht_h_fit[U][A]) <= zero)
results[k, i, j, "Qval", "TN"] <- sum(abs(tht_h_fit[U][notA]) <= zero)
results[k, i, j, "Qval", "FP"] <- sum(abs(tht_h_fit[U][notA]) > zero)
}
} else conv[i, k] <- 1
cat("simulation ", i, "with n =", nk, "completed\n")
if(is.element(i, 10 * 1:10)) save.image("Sim_ebic_tmp.RData")
}
}
save.image("Sim_ebic.RData")
| /Sec_3.3/model1/K0.5_pi50/ebic-simul.R | no_license | LuigiAugugliaro/cglasso | R | false | false | 4,974 | r | ############################################################################################
#
# Author: Luigi Augugliaro
# e-mail: luigi.augugliaro@unipa.it
# home: http://dssm.unipa.it/augugliaro/
# data: 27-03-2018
#
# Description: R code used for the simulation study (model 1 with pi = 0.50)
# reported in "l1-Penalized Censored Gaussian Graphical Model"
# Section 3.3 (see also Section 1 in Supplementary Materials)
#
# Journal: Biostatistics
# libraries and R code
library("tmvtnorm")
library("MASS")
library("glasso")
source("rcglasso_v_3.0.3.R")
source("rcglasso.fitmu.R")
source("rcglasso.fitTheta.R")
source("rcglasso_mle.R")
source("Q.R")
source("loglik.R")
# general setting
zero <- 1e-6
nsim <- 100 # number of simulations
n <- c(100, 200, 300, 400) # sample size
tht_ij <- 0.3 # value of the non-zero partial correlation coefficients
nrho <- 100 # number of tuning parameters
rhoratio <- 0.01 # parameter used to define minrho, i.e., minrho = rhoratio * maxrho
g <- c(0, 0.25, 0.5, 0.75, 1) # parameter of the ebic criterion
K <- 0.5 # p approx n^K
d <- 0.05 # d% of the p variables are censured
up <- 40 # right censoreing value
prob_cens <- 0.50 # probability of censoring
# objects used to save the results
results <- array(0, dim = c(length(n), nsim, length(g), 2, 5),
dimnames = list(n = n, nsim = NULL, g = g, type = c("ll", "Qval"),
summary = c("npar", "TP", "FP", "TN", "FN")))
Qval <- ll <- npar <- array(0, dim = c(nrho, nsim, length(n)), dimnames = list(nrho = NULL, nsim = NULL, n = n))
conv <- matrix(0, nrow = nsim, ncol = length(n))
set.seed(123)
for(k in length(n):1){
nk <- n[k]
p <- round(nk^K)
Tht <- diag(p)
diag(Tht[1:(p-1), 2:p]) <- tht_ij
diag(Tht[2:p, 1:(p-1)]) <- tht_ij
Sgm = solve(Tht)
U <- upper.tri(Tht, diag = FALSE)
A <- abs(Tht[U]) > 0
notA <- !A
S <- sort(sample(1:p, ceiling(p * d)))
mu <- rep(34, p)
tau <- qnorm(1 - prob_cens)
mu[S] <- up - sqrt(diag(Sgm)[S]) * tau
for(i in 1:nsim){
X <- mvrnorm(nk, mu = mu, Sigma = Sgm)
X[X > up] <- up
# fitting rcglasso model
out <- rcglasso(X, up, fitmean = TRUE, scale = FALSE, nrho = nrho, rhoratio = rhoratio, verbose = FALSE)
if(out$conv == 0){
for(m in 1:nrho){
S_ini <- out$S[, , m]
muh_ini <- out$muh[, m]
thetah_ini <- out$thetah[, , m]
model <- abs(thetah_ini) > zero
out_mle <- rcglasso_mle(X = X, k = up, model = model, S_ini = S_ini,
muh_ini = muh_ini, thetah_ini = thetah_ini,
fitmean = TRUE, scale = FALSE, verbose = FALSE)
if(out_mle$conv == 0){
tht_h <- out_mle$thetah
npar[m, i, k] <- sum(abs(tht_h[upper.tri(tht_h, diag = FALSE)]) > zero)
ll[m, i, k] <- loglik(out_mle)$loglik
Qval[m, i, k] <- Q(out_mle)$Qval
} else {
conv[i, k] <- 1
break
}
}
#######################################
# eBIC section: computed with ll
for(j in 1:5){
ebic <- -2 * ll[, i, k] + npar[, i, k] * (log(nk) + 4 * g[j] * log(p))
best <- which.min(ebic)
tht_h_fit <- out$thetah[, , best]
results[k, i, j, "ll", "npar"] <- npar[best, i, k]
results[k, i, j, "ll", "TP"] <- sum(abs(tht_h_fit[U][A]) > zero)
results[k, i, j, "ll", "FN"] <- sum(abs(tht_h_fit[U][A]) <= zero)
results[k, i, j, "ll", "TN"] <- sum(abs(tht_h_fit[U][notA]) <= zero)
results[k, i, j, "ll", "FP"] <- sum(abs(tht_h_fit[U][notA]) > zero)
}
#######################################
# eBIC section: computed with Qval
for(j in 1:5){
ebic <- -2 * Qval[, i, k] + npar[, i, k] * (log(nk) + 4 * g[j] * log(p))
best <- which.min(ebic)
tht_h_fit <- out$thetah[, , best]
results[k, i, j, "Qval", "npar"] <- npar[best, i, k]
results[k, i, j, "Qval", "TP"] <- sum(abs(tht_h_fit[U][A]) > zero)
results[k, i, j, "Qval", "FN"] <- sum(abs(tht_h_fit[U][A]) <= zero)
results[k, i, j, "Qval", "TN"] <- sum(abs(tht_h_fit[U][notA]) <= zero)
results[k, i, j, "Qval", "FP"] <- sum(abs(tht_h_fit[U][notA]) > zero)
}
} else conv[i, k] <- 1
cat("simulation ", i, "with n =", nk, "completed\n")
if(is.element(i, 10 * 1:10)) save.image("Sim_ebic_tmp.RData")
}
}
save.image("Sim_ebic.RData")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/reinstallers.R
\name{reinstallers}
\alias{reinstallers}
\alias{remove_greta_env}
\alias{reinstall_greta_env}
\alias{remove_miniconda}
\alias{reinstall_miniconda}
\title{Helpers to remove, and reinstall python environments and miniconda}
\usage{
remove_greta_env()
reinstall_greta_env(timeout = 5)
remove_miniconda()
reinstall_miniconda(timeout = 5)
}
\arguments{
\item{timeout}{time in minutes to wait until timeout (default is 5 minutes)}
}
\value{
invisible
}
\description{
This can be useful when debugging greta installation to get to "clean slate".
There are four functions:
}
\details{
\itemize{
\item \code{remove_greta_env()} removes the 'greta-env' conda environment
\item \code{remove_miniconda()} removes miniconda installation
\item \code{reinstall_greta_env()} remove 'greta-env' and reinstall it using \code{greta_create_conda_env()} (which is used internally).
\item \code{reinstall_miniconda()} removes miniconda and reinstalls it using \code{greta_install_miniconda()} (which is used internally)
}
}
\examples{
\dontrun{
remove_greta_env()
remove_miniconda()
reinstall_greta_env()
reinstall_miniconda()
}
}
| /man/reinstallers.Rd | permissive | njtierney/greta | R | false | true | 1,205 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/reinstallers.R
\name{reinstallers}
\alias{reinstallers}
\alias{remove_greta_env}
\alias{reinstall_greta_env}
\alias{remove_miniconda}
\alias{reinstall_miniconda}
\title{Helpers to remove, and reinstall python environments and miniconda}
\usage{
remove_greta_env()
reinstall_greta_env(timeout = 5)
remove_miniconda()
reinstall_miniconda(timeout = 5)
}
\arguments{
\item{timeout}{time in minutes to wait until timeout (default is 5 minutes)}
}
\value{
invisible
}
\description{
This can be useful when debugging greta installation to get to "clean slate".
There are four functions:
}
\details{
\itemize{
\item \code{remove_greta_env()} removes the 'greta-env' conda environment
\item \code{remove_miniconda()} removes miniconda installation
\item \code{reinstall_greta_env()} remove 'greta-env' and reinstall it using \code{greta_create_conda_env()} (which is used internally).
\item \code{reinstall_miniconda()} removes miniconda and reinstalls it using \code{greta_install_miniconda()} (which is used internally)
}
}
\examples{
\dontrun{
remove_greta_env()
remove_miniconda()
reinstall_greta_env()
reinstall_miniconda()
}
}
|
library(plyr)
setwd("C:/Users/Patrick/Documents/Coursera/DataScience/Assignments/Course3/FinalAssignment")
# Load all relevant data
activity_labels <- read.table("activity_labels.txt")
features <- read.table("features.txt")
X_train <- read.table("train/X_train.txt")
y_train <- read.table("train/y_train.txt")
subject_train <- read.table("train/subject_train.txt")
X_test <- read.table("test/X_test.txt")
y_test <- read.table("test/y_test.txt")
subject_test<- read.table("test/subject_test.txt")
# Check dimensions and other characteristics of the data
dim(X_train)
dim(y_train)
dim(subject_train)
dim(X_test)
dim(y_test)
dim(subject_test)
table(subject_train)
table(subject_test)
# (1) Merge train and test for all applicable data, and give column names
subject <- rbind(subject_train, subject_test)
names(subject) <- "subject"
y <- rbind(y_train, y_test)
names(y) <- "activity_id"
X <- rbind(X_train, X_test)
names(X) <- as.character(features$V2)
# (2) Extract only the measurements on the mean and standard deviation for each measurement
X_red <- X[,sort(c(grep("mean\\()", names(X)), grep("std\\()", names(X))))]
# Merge all data to one data set
data <- cbind(subject, y, X_red)
names(data) # check if all columns are named meaningful
# (3) Give descriptive activity names to name the activities in the data set
names(activity_labels) <- c("id","activity")
data_labled <- merge(data, activity_labels, by.x = "activity_id", by.y = "id", all = FALSE)
data_labled <- data_labled[,-1] # get rid off the activity ID "activity_id"
# (4) Appropriate labels were given in each step
# (5) Creating a second, independent tidy data set averaging each variable for each activity AND subject
data_tidy <- aggregate(. ~ subject + activity, data_labled, mean)
write.table(data_tidy, file = "data_tidy.txt", row.names = FALSE)
| /run_analysis.R | no_license | PatGruetter/Course3_FinalAssignment | R | false | false | 1,907 | r | library(plyr)
setwd("C:/Users/Patrick/Documents/Coursera/DataScience/Assignments/Course3/FinalAssignment")
# Load all relevant data
activity_labels <- read.table("activity_labels.txt")
features <- read.table("features.txt")
X_train <- read.table("train/X_train.txt")
y_train <- read.table("train/y_train.txt")
subject_train <- read.table("train/subject_train.txt")
X_test <- read.table("test/X_test.txt")
y_test <- read.table("test/y_test.txt")
subject_test<- read.table("test/subject_test.txt")
# Check dimensions and other characteristics of the data
dim(X_train)
dim(y_train)
dim(subject_train)
dim(X_test)
dim(y_test)
dim(subject_test)
table(subject_train)
table(subject_test)
# (1) Merge train and test for all applicable data, and give column names
subject <- rbind(subject_train, subject_test)
names(subject) <- "subject"
y <- rbind(y_train, y_test)
names(y) <- "activity_id"
X <- rbind(X_train, X_test)
names(X) <- as.character(features$V2)
# (2) Extract only the measurements on the mean and standard deviation for each measurement
X_red <- X[,sort(c(grep("mean\\()", names(X)), grep("std\\()", names(X))))]
# Merge all data to one data set
data <- cbind(subject, y, X_red)
names(data) # check if all columns are named meaningful
# (3) Give descriptive activity names to name the activities in the data set
names(activity_labels) <- c("id","activity")
data_labled <- merge(data, activity_labels, by.x = "activity_id", by.y = "id", all = FALSE)
data_labled <- data_labled[,-1] # get rid off the activity ID "activity_id"
# (4) Appropriate labels were given in each step
# (5) Creating a second, independent tidy data set averaging each variable for each activity AND subject
data_tidy <- aggregate(. ~ subject + activity, data_labled, mean)
write.table(data_tidy, file = "data_tidy.txt", row.names = FALSE)
|
#' Process coverageBed reports
#'
#' @description
#' Process the coverage reports generated by bedtools coverage tool.
#'
#' @inheritParams get.coverage.by.sample.statistics
#'
#' @return final.statistics data frame of coverage statistics generated by parsing through coverage reports
process.coverage.reports <- function(project.directory) {
# TO DO:
# - add tests for reports having expected format
# - ask Ros what the "cumulative" coverage numbers are supposed to mean
coverage.report.paths <- system.ls(pattern = "*/*all.coverage.report", directory = project.directory, error = TRUE);
sample.ids <- extract.sample.ids(coverage.report.paths, from.filename = TRUE);
single.sample <- !(length(sample.ids) > 1)
# store mean and median coverage per patient
mean.median.by.sample <- list();
# initialize data frame to store coverage data for all samples
merged.coverage.data <- data.frame();
### PROCESS EACH SAMPLE
for(i in seq_along(coverage.report.paths)) {
path <- coverage.report.paths[i];
sample.id <- sample.ids[i];
# generated from coverageBed, based on all target regions
# 1)
# 2) depth
# 3) no. of bases at depth
# 4) size of A
# 5) % of A at depth
coverage.data <- utils::read.delim(
path,
header = FALSE,
stringsAsFactors = FALSE
);
depth.values <- coverage.data[, 2];
depth.frequencies <- coverage.data[, 3];
# data frame for merging with all other patients
patient.coverage <- data.frame(depth.values, depth.frequencies);
names(patient.coverage) <- c('depth', sample.id);
# get the median coverage for the sample
median.coverage <- tabular.median(
values = depth.values,
frequencies = depth.frequencies
);
# get the mean coverage for each sample
mean.coverage <- tabular.mean(
values = depth.values,
frequencies = depth.frequencies
);
mean.median.by.sample[[ sample.id ]] <- data.frame(
"sample.id" = sample.id,
"mean.coverage" = mean.coverage,
"median.coverage" = median.coverage
);
# merge with full data frame
if( 0 == nrow(merged.coverage.data) ) {
merged.coverage.data <- patient.coverage;
} else {
merged.coverage.data <- merge(
merged.coverage.data,
patient.coverage,
by.x = "depth",
by.y = "depth",
all = TRUE
);
}
}
### POST-PROCESSING
mean.median.by.sample <- do.call(rbind, mean.median.by.sample);
merged.coverage.data$bin <- cut(
merged.coverage.data$depth,
breaks = c(-Inf, 1, 10, 20, 30, 40, Inf),
labels = c("0", "1-10", "10-20", "20-30", "30-40", "40+")
# labels = c(0, 1, 10, 20, 30, 40)
);
# for each patient, get proportion of frequencies falling within each depth category
if (!single.sample){
coverage.statistics <- apply(
merged.coverage.data[, 2:(ncol(merged.coverage.data) - 1)],
2,
FUN = function(x, coverage.bin) {
tapply(x, coverage.bin, sum, na.rm = TRUE )/ sum(x, na.rm = TRUE);
},
coverage.bin = merged.coverage.data$bin
);
} else {
coverage.statistics <- data.frame(sample.ids=tapply(merged.coverage.data[, 2], merged.coverage.data$bin, sum, na.rm = TRUE) / sum(merged.coverage.data[ ,2], na.rm = TRUE))
}
# for all categories except the first one, get the proportion of frequencies falling into that category or higher
# - first category (coverage zero) is still just the proportion falling in that category
for( i in 2:(nrow(coverage.statistics)-1) ) {
if (!single.sample) {
coverage.statistics[i,] <- apply(coverage.statistics[i:nrow(coverage.statistics),], 2, sum, na.rm = TRUE)
} else {
coverage.statistics[i,] <- sum(coverage.statistics[i:nrow(coverage.statistics),], na.rm = TRUE)
}
}
# transpose
#if (!single.sample) {
names(coverage.statistics) <- sample.ids;
coverage.statistics <- t(coverage.statistics);
#} else {
# rownames(coverage.statistics) <- sample.ids
#}
# add mean/ median per sample
# make sure they're ordered the same way – anything else will lead to disappointment
if (!single.sample) {
mean.median.by.sample <- mean.median.by.sample[rownames(coverage.statistics), ];
}
print(mean.median.by.sample);
print(coverage.statistics);
# sanity check
if( !identical(rownames(coverage.statistics), rownames(mean.median.by.sample) ) ) {
stop("coverage.statistics and mean.median.by.sample do not appear to be in the same order. Please investigate.");
}
# assemble final data frame
final.statistics <- cbind(
coverage.statistics,
mean.median.by.sample
);
return(final.statistics);
}
| /R/process.coverage.reports.R | no_license | cran/varitas | R | false | false | 5,273 | r | #' Process coverageBed reports
#'
#' @description
#' Process the coverage reports generated by bedtools coverage tool.
#'
#' @inheritParams get.coverage.by.sample.statistics
#'
#' @return final.statistics data frame of coverage statistics generated by parsing through coverage reports
process.coverage.reports <- function(project.directory) {
# TO DO:
# - add tests for reports having expected format
# - ask Ros what the "cumulative" coverage numbers are supposed to mean
coverage.report.paths <- system.ls(pattern = "*/*all.coverage.report", directory = project.directory, error = TRUE);
sample.ids <- extract.sample.ids(coverage.report.paths, from.filename = TRUE);
single.sample <- !(length(sample.ids) > 1)
# store mean and median coverage per patient
mean.median.by.sample <- list();
# initialize data frame to store coverage data for all samples
merged.coverage.data <- data.frame();
### PROCESS EACH SAMPLE
for(i in seq_along(coverage.report.paths)) {
path <- coverage.report.paths[i];
sample.id <- sample.ids[i];
# generated from coverageBed, based on all target regions
# 1)
# 2) depth
# 3) no. of bases at depth
# 4) size of A
# 5) % of A at depth
coverage.data <- utils::read.delim(
path,
header = FALSE,
stringsAsFactors = FALSE
);
depth.values <- coverage.data[, 2];
depth.frequencies <- coverage.data[, 3];
# data frame for merging with all other patients
patient.coverage <- data.frame(depth.values, depth.frequencies);
names(patient.coverage) <- c('depth', sample.id);
# get the median coverage for the sample
median.coverage <- tabular.median(
values = depth.values,
frequencies = depth.frequencies
);
# get the mean coverage for each sample
mean.coverage <- tabular.mean(
values = depth.values,
frequencies = depth.frequencies
);
mean.median.by.sample[[ sample.id ]] <- data.frame(
"sample.id" = sample.id,
"mean.coverage" = mean.coverage,
"median.coverage" = median.coverage
);
# merge with full data frame
if( 0 == nrow(merged.coverage.data) ) {
merged.coverage.data <- patient.coverage;
} else {
merged.coverage.data <- merge(
merged.coverage.data,
patient.coverage,
by.x = "depth",
by.y = "depth",
all = TRUE
);
}
}
### POST-PROCESSING
mean.median.by.sample <- do.call(rbind, mean.median.by.sample);
merged.coverage.data$bin <- cut(
merged.coverage.data$depth,
breaks = c(-Inf, 1, 10, 20, 30, 40, Inf),
labels = c("0", "1-10", "10-20", "20-30", "30-40", "40+")
# labels = c(0, 1, 10, 20, 30, 40)
);
# for each patient, get proportion of frequencies falling within each depth category
if (!single.sample){
coverage.statistics <- apply(
merged.coverage.data[, 2:(ncol(merged.coverage.data) - 1)],
2,
FUN = function(x, coverage.bin) {
tapply(x, coverage.bin, sum, na.rm = TRUE )/ sum(x, na.rm = TRUE);
},
coverage.bin = merged.coverage.data$bin
);
} else {
coverage.statistics <- data.frame(sample.ids=tapply(merged.coverage.data[, 2], merged.coverage.data$bin, sum, na.rm = TRUE) / sum(merged.coverage.data[ ,2], na.rm = TRUE))
}
# for all categories except the first one, get the proportion of frequencies falling into that category or higher
# - first category (coverage zero) is still just the proportion falling in that category
for( i in 2:(nrow(coverage.statistics)-1) ) {
if (!single.sample) {
coverage.statistics[i,] <- apply(coverage.statistics[i:nrow(coverage.statistics),], 2, sum, na.rm = TRUE)
} else {
coverage.statistics[i,] <- sum(coverage.statistics[i:nrow(coverage.statistics),], na.rm = TRUE)
}
}
# transpose
#if (!single.sample) {
names(coverage.statistics) <- sample.ids;
coverage.statistics <- t(coverage.statistics);
#} else {
# rownames(coverage.statistics) <- sample.ids
#}
# add mean/ median per sample
# make sure they're ordered the same way – anything else will lead to disappointment
if (!single.sample) {
mean.median.by.sample <- mean.median.by.sample[rownames(coverage.statistics), ];
}
print(mean.median.by.sample);
print(coverage.statistics);
# sanity check
if( !identical(rownames(coverage.statistics), rownames(mean.median.by.sample) ) ) {
stop("coverage.statistics and mean.median.by.sample do not appear to be in the same order. Please investigate.");
}
# assemble final data frame
final.statistics <- cbind(
coverage.statistics,
mean.median.by.sample
);
return(final.statistics);
}
|
plot3 <- function(con= "./household_power_consumption.txt"){
## Check if hpc is already generated,to aviod repeated reading.
lg <- any("hpc" %in% ls(pos.to.env(1)))
if(lg==FALSE)
{
## Process rawdata
print("The loading process will takes sometime, thank you for your patience!")
print("A dataframe named < hpc > will be generated in global environment for plotting and checking,")
print("in which a POSIXct varialbe is added in 1st col.")
print("Once the data is loaded other plot functions will call it from cache.")
dat1 <- read.table(con, colClasses = "character", sep=";", header=TRUE)
dat2 <- subset(dat1, dat1[[1]]=="1/2/2007"|dat1[[1]]=="2/2/2007") ### Subset by dates
fulltime0 <- paste(dat2[[1]], dat2[[2]], sep=" ") ### Set a string of date+time
fulltime <- strptime(fulltime0, "%d/%m/%Y %H:%M:%S") ### Convert it into POSIXct
dat3 <- cbind(fulltime, dat2) ### Then put it into data
for (i in 4:10) {dat3[[i]] <- as.numeric(dat3[[i]]) } ### Set some variables as numeric
hpc <<- dat3
}
## Initialize device
png()
png(file="plot3.png", width=480, height=480)
par("mfrow"=c(1,1))
with(hpc, plot(fulltime,Sub_metering_1, ### Set a blank pic
ylab="Energy sub metering",
xlab = NA,
type="n"))
### Add 3 lines
with(hpc, lines(fulltime,Sub_metering_1,lwd=1.5, col="black"))
with(hpc, lines(fulltime,Sub_metering_2,lwd=1.5, col="red"))
with(hpc, lines(fulltime,Sub_metering_3,lwd=1.5, col="blue"))
### Add legend
legend("topright", lty = c(1,1,1),
legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),
col = c("black", "red", "blue"))
dev.off() ## Close decive
print("File < plot3.png > has been saved in your working directory ")
} | /plot3.R | no_license | guipeng1019/ExData_Plotting1 | R | false | false | 2,007 | r | plot3 <- function(con= "./household_power_consumption.txt"){
## Check if hpc is already generated,to aviod repeated reading.
lg <- any("hpc" %in% ls(pos.to.env(1)))
if(lg==FALSE)
{
## Process rawdata
print("The loading process will takes sometime, thank you for your patience!")
print("A dataframe named < hpc > will be generated in global environment for plotting and checking,")
print("in which a POSIXct varialbe is added in 1st col.")
print("Once the data is loaded other plot functions will call it from cache.")
dat1 <- read.table(con, colClasses = "character", sep=";", header=TRUE)
dat2 <- subset(dat1, dat1[[1]]=="1/2/2007"|dat1[[1]]=="2/2/2007") ### Subset by dates
fulltime0 <- paste(dat2[[1]], dat2[[2]], sep=" ") ### Set a string of date+time
fulltime <- strptime(fulltime0, "%d/%m/%Y %H:%M:%S") ### Convert it into POSIXct
dat3 <- cbind(fulltime, dat2) ### Then put it into data
for (i in 4:10) {dat3[[i]] <- as.numeric(dat3[[i]]) } ### Set some variables as numeric
hpc <<- dat3
}
## Initialize device
png()
png(file="plot3.png", width=480, height=480)
par("mfrow"=c(1,1))
with(hpc, plot(fulltime,Sub_metering_1, ### Set a blank pic
ylab="Energy sub metering",
xlab = NA,
type="n"))
### Add 3 lines
with(hpc, lines(fulltime,Sub_metering_1,lwd=1.5, col="black"))
with(hpc, lines(fulltime,Sub_metering_2,lwd=1.5, col="red"))
with(hpc, lines(fulltime,Sub_metering_3,lwd=1.5, col="blue"))
### Add legend
legend("topright", lty = c(1,1,1),
legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),
col = c("black", "red", "blue"))
dev.off() ## Close decive
print("File < plot3.png > has been saved in your working directory ")
} |
###############################################################################
#-------------------------------DEVOLUTION------------------------------------#
###############################################################################
setwd("~/Läkarprogrammet/PhD/Projekt 1/Final_DEVOLUTION")
#Dependencies----
library("readxl") #Needed to load the data from the xlsx file.
library("xlsx") #Needed to save matrices into xlsx-files.
library("stringr") #Needed for using the function "word".
library("ape")
library("phangorn") #Needed to transform the EM into phyDat and make trees.
library("ggplot2") #Needed to visualize the trees.
library("ggtree")
library("ggimage") #Needed to insert the pies in the tree.
library("dplyr") #Needed for the distinct function in pie.it.
library("RColorBrewer") #Needed to add the colored pie charts.
library("ggridges") #Used to plot the distribution.
library("cowplot")
library("dbscan") #Clustering
#Start with pressing the little triangle to the left here to collapse all functions!
#Functions----
#Function extracting the data
load_matrix <- function(filename, sheetname) {
data <- as.data.frame(read_xlsx(filename, sheetname)) #Reading the xlsx file and saving it in the variable data.
subdata <- data[ c(1:nrow(data)), c(1:ncol(data)) ] #Extracting the part of the file we are interested in.
subdata <- subdata[is.na(subdata[,1])==FALSE,]
return(subdata)
}
#Function creating the eventmatrix
DEVOLUTION <- function(file,eventcutoff,datatypes, rule, eps,truncate,names){
start.time <- Sys.time()
if(missing(eventcutoff)==TRUE){
print("You have not chosen an event cutoff. Default 1 Mbp chosen.")
eventcutoff <- 1000000
}
all_cols <- ncol(file)
if(all_cols < 11){
print("There are missing columns!")
types <- c("Tumor ID","Samples","Chr","Start","End","Med LogR","VAF (TRS)","Type","Method","Cytoband/ Gene","Clone size (%)")
thematch <- match(colnames(file),types)
missing <- types[types%in%colnames(file)==FALSE]
print("This is missing. The algorithm will add it.")
print(missing)
file_new <- matrix(0,nrow(file),11)
colnames(file_new) <- types
cols_file <- colnames(file)
i <- 1
for(i in 1:11){
if(types[i]%in%missing){
file_new[,i] <- "NA"
}else{
col <- match(types[i],cols_file)
file_new[,i] <- file[,col]
}
i <- i+1
}
file <- file_new
}
################################
#Treating NA for TC in the file#
################################
i <- 1
for(i in 1:as.numeric(nrow(file))){
if(file[i,11] == "NA"){
if(file[i,2] == "ALL"){
file[i,11] <- "100"
}
if(file[i,2] != "ALL"){
file[i,] <- "0"
}
}
i <- i+1
}
###################################################################
#Removing events obtained with a method not specified by datatypes#
###################################################################
#If you choose yes we will not remove anything.
if(length(datatypes) > 1){
print("Only the following datatypes are included in the analysis.")
print(datatypes)
i <- 1
for(i in 1:as.numeric(nrow(file))){
if(file[i,9] %in% datatypes == FALSE){
file[i,] <- "0"
}
i <- i+1
}
}else{
if(datatypes == "All"){
print("All datatypes supplied in data are included in the analysis.")
}else{
print("The following datatype is included in the analysis")
print(datatypes)
i <- 1
for(i in 1:as.numeric(nrow(file))){
if(file[i,9] %in% datatypes == FALSE){
file[i,] <- "0"
}
i <- i+1
}
}
}
file <- as.matrix(file[file[,2] != "0",])
###########################
#Finding all unique events#
###########################
#This loop defines all events that we have in the dataset for a particular tumor.
#The events have to have the same name, be on the same chromosome and have breakpoints
#within a certain cutoff that is set on beforehand.
file_new <- file
i <- 1
versionnames <- c(paste( c("v"), 1:30, sep=""))
k <- 1
for(i in 1:nrow(file_new)){ #Choosing a row.
j <- 1
for(j in 1:nrow(file_new)){ #Choosing another row to compare it with.
if(i != j){
if(file_new[i,10] == file_new[j,10]){ #Comparing event names.
if(file_new[i,8] == file_new[j,8]){ #Comparing the type of event.
if(file_new[i,3]==file_new[j,3]){ #Are they on the same chromosome?
if(file_new[i,2] == "ALL"){ #If the event is a part of the stem they shall always be separate.
if(file_new[j,2] == "ALL"){
if(is.na(word(file[i,10],2)) == TRUE){ #If the one you compare with does not already have a version name.
file_new[j,10] <- paste(file_new[j,10],versionnames[k], sep = "") #Changing the name for the second version of the mutation.
k <- k+1
}else if(is.na(word(file[i,10],2)) == FALSE){
versionpos <- match(word(file[i,10],2),versionnames)
newversion <- versionpos+1
file_new[j,10] <- paste(file_new[j,10],versionnames[newversion], sep = "") #Changing the name for the second version of the mutation.
}
}}
if(file_new[i,2] != "ALL"){ #This part is only valid for non stem events.
if(abs(as.numeric(file_new[i,4]) - as.numeric(file_new[j,4])) > eventcutoff){ #If the events differ too much in genetic distance they are seen as two separate events.
file_new[j,10] <- paste(file_new[j,10],"v1", sep = "")} #Changing the name for the second version of the mutation.
else if(abs(as.numeric(file_new[i,5])-as.numeric(file_new[j,5])) > eventcutoff){ #The same but in the other direction.
file_new[j,10] <- paste(file_new[j,10],"v1", sep = "")}
}
}
}
}
}
j <- j+1
}
i <- i+1
}
for(i in 1:nrow(file_new)){ #Adding information to the events about which kind of alteration it is.
file_new[i,10] <- paste(file_new[i,10],file_new[i,8],file_new[i,3], sep = " ")
i <- i+1
}
un_s <- unique(file_new[,2])
samples <- un_s[un_s!="ALL"]
###########################
#Making an overview matrix#
###########################
samples <- as.matrix(unique(file_new[,2])) #Extracting all unique samples.
aberrations <- as.matrix(unique(file_new[,10])) #Extracting all unique events.
#Constructing a matrix with all samples and their TC for each of the unique events.
overview <- matrix(0,(length(aberrations)+1),(length(samples)+1))
overview[1,2:as.numeric(ncol(overview))] <- samples
overview[2:as.numeric(nrow(overview)),1] <- aberrations
i <- 1
for(i in 1:nrow(file_new)){ #Extracting all of the TC:s.
samplepos <- match(file_new[i,2],overview[1,])
aberrationpos <- match(file_new[i,10],overview[,1])
overview[aberrationpos,samplepos] <- file_new[i,11]
if(file_new[i,2] == "ALL"){
overview[aberrationpos,2:ncol(overview)] <- 100 #All samples should have 100 on the "ALL" events.
}
i <- i+1
}
#Do we have any stem at all?
if(overview[1,2] != "ALL"){
print("You do not have any declared stem event denoted ALL.")
allcolumn <- matrix(0,nrow(overview),1)
allcolumn[1,1] <- "ALL"
overview <- cbind(overview[,1],allcolumn,overview[,2:ncol(overview)])
}
#Treating cases where not all stem events have been declared.
i <- 2
firststem <- 1
for(i in 2:nrow(overview)){
stemornot <- (length(which(as.numeric(overview[i,2:ncol(overview)])>=90))-(as.numeric(ncol(overview))-2))
if(stemornot == 0){
#This is a stem event.
overview[i,2:ncol(overview)] <- 100 #Declaring it as a stem event.
#Now we have to declare it a stem in the file as well and remove it from the other ones.
if(firststem == 1){
row <- match(overview[i,1],file_new[,10])
stemmatrix <- t(as.matrix(file_new[row,]))
stemmatrix[1,2] <- "ALL"
stemmatrix[1,11] <- "100"
#stemmatrix[1,1] <- "Remove" #Temporary.
firststem <- 2
pos_stem <- as.numeric(which(overview[i,1]==file_new[,10])) #The positions in which the stem exists.
file_new[pos_stem,1] <- "Remove"
}else{
row <- match(overview[i,1],file_new[,10])
event <- t(as.matrix(file_new[row,]))
event[1,2] <- "ALL"
event[1,11] <- "100"
#event[1,1] <- "Remove" #Temporary.
stemmatrix <- rbind(stemmatrix,event)
pos_stem <- as.numeric(which(overview[i,1]==file_new[,10])) #The positions in which the stem exists.
file_new[pos_stem,1] <- "Remove"
}
}
if(i == nrow(overview)){
if(firststem == 1 && as.numeric(overview[2,2]) == 0){
print("There is no stem events in the data. Adding a fabricated stem.")
f_stem <- matrix(0,1,11)
f_stem[1,] <- c(unique(file_new[file_new[,1]!="Remove",1]),"ALL",1,1,1,"NA","NA","Stem","WES","Stem","100")
file_new <- rbind(f_stem,file_new)
overview <- rbind(overview[1,],as.vector(c("Stem",rep("100",(ncol(overview)-1)))),overview[2:nrow(overview),])
}else{
if(firststem!=1){
file_new <- rbind(stemmatrix,file_new)}
}
}
i <- i+1
}
# #We want to order the overview a bit.
overview_new <- matrix(0,nrow(overview),ncol(overview))
overview_new[1,] <- overview[1,]
sub <- overview[2:nrow(overview),]
ov_stem <- sub[as.numeric(sub[,2])==100,]
ov_notstem <- sub[as.numeric(sub[,2])!=100,]
overview_new[2:nrow(overview_new),] <- rbind(ov_stem,ov_notstem)
overview <- overview_new
assign("file_new_stem", file_new, envir=globalenv())
file_new <- file_new[file_new[,1]!="Remove",]
assign("overview_stem", overview, envir=globalenv())
assign("file_new_removed", file_new, envir=globalenv())
if(firststem == 2){
assign("stemmatrix", stemmatrix, envir=globalenv())}
#########################################################
#Including events present in a certain number of samples#
#########################################################
#View(file_new)
if(missing(truncate)==FALSE){
print("You have chosen to truncate your data. We will now remove events that are present in less than this many samples:")
print(truncate)
remove <- matrix(0,nrow(overview),2)
i <- 2
j <- 1
for(i in 2:nrow(overview)){
nr_samples <- length(which(overview[i,3:ncol(overview)]!= "0"))
if(nr_samples<as.numeric(truncate)){
remove[j,1] <- overview[i,1]
j <- j+1
}
i <- i+1
}
#print("Här är vektorn")
#print(remove)
remove <- remove[remove[,1]!="0",]
i <- 1
for(i in 1:nrow(remove)){
pos <- which(remove[i,1]==file_new[,10])
file_new <- file_new[-pos,] #Removing the event.
overview <- overview[(overview[,1]%in%remove[,1])==FALSE,]
i <- i+1
}
}
file_new_hej <- file_new
overview_new <- overview
#View(file_new_hej)
#View(overview_new)
###################################################################
#Using Density-Based Spatial Clustering of Applications with Noise#
###################################################################
#overview_truncated <- overview[2:nrow(overview),2:ncol(overview)]
sub <- overview[2:nrow(overview),]
overview_truncated <- sub[as.numeric(sub[,2])!=100,] #Removing the ALL events so that events are not clustered into the stem.
oneevent <- 0
assign("overview", overview, envir=globalenv())
if(is.null(dim(overview_truncated))==FALSE){
print("more")
overview_truncated <- overview_truncated[,3:ncol(overview_truncated)]
overview_truncated <- as.data.frame(overview_truncated)
}else{
print("only one")
overview_truncated <- overview[2:nrow(overview),3:ncol(overview)]
overview_truncated <- as.data.frame(overview_truncated)
oneevent <- 1
}
overview_dfm <- data.matrix(overview_truncated, rownames.force = NA)
if(missing(eps)==TRUE){
eps <- 0.5
}
library(dbscan)
x <- kNNdist(overview_dfm, k = 1)
kNNdistplot(overview_dfm,k=1)
abline(h=eps, col = "red", lty=2)
myclusters <- dbscan(overview_dfm, eps = eps ,minPts = 1)
#myclusters <- dbscan(overview_dfm, eps = 15 ,minPts = 1) #TRACERx
print(myclusters)
assign("myclusters", myclusters, envir=globalenv())
# View(overview_dfm)
# fviz_cluster(myclusters,data = overview_dfm, minPts = 1) #Plotting the clusters.
#
# if(length(unique(overview_dfm)) >= 2){
# fviz_cluster(myclusters,data = overview_dfm, minPts = 1) #Plotting the clusters.
# }else{
# x = "Nope"
# print("Warning message: The input matrix does only contain one single subclone.")
# stopifnot(Datadimensions == "ok")
# }
#assign("overview", overview, envir=globalenv())
#######################################################################
#Constructing a matrix indicating which events belong to which cluster#
#######################################################################
#If we use DBSCAN
clusters <- as.matrix(myclusters$cluster)
overview_new <- cbind(overview,matrix(0,nrow(overview),1)) #Adding the cluster belonging to the overview.
if(oneevent == 0){
if(is.null(nrow(ov_stem))==TRUE){
t <- 1
}else{
t <- nrow(ov_stem)
}
overview_new[2:nrow(overview_new),ncol(overview_new)] <- c(c(rep("ALL",t)),clusters[,1])
unique_clusters <- c(c("ALL"),c(unique(clusters[,1]))) #Constructing the matrix in which the events will be saved.
}else{
overview_new[2:nrow(overview_new),ncol(overview_new)] <- c(clusters[,1])
unique_clusters <- c(unique(clusters[,1])) #Constructing the matrix in which the events will be saved.
}
assign("overview_cluster",overview_new,envir=globalenv())
cluster_matrix <- matrix(0,as.numeric(length(unique_clusters)),400)
print(unique_clusters)
i <- 1
for(i in 1:length(unique_clusters)){
if(unique_clusters[i]== "ALL"){
cluster_matrix[i,1] <- "ALL"
}else{
cluster_matrix[i,1] <- paste("Subclone",unique_clusters[i])}
i <- i+1
}
i <- 1
for(i in 2:nrow(overview_new)){ #Looping through the subclonal belonging.
j <- 2
Subclonerow <- match(overview_new[i,ncol(overview_new)],unique_clusters)
for(j in 2:ncol(cluster_matrix)){ #Looping through the available spots for saving the event.
if(cluster_matrix[Subclonerow,j] == "0"){
cluster_matrix[Subclonerow,j] <- overview_new[i,1]
break
}
j <- j+1
}
i <- i+1
}
addmatrix <- matrix(0,as.numeric(nrow(cluster_matrix)),3)
addmatrix[,1] <- cluster_matrix[,1]
clone_matrix_names <- cbind(addmatrix,cluster_matrix[,2:as.numeric(ncol(cluster_matrix))]) #Adding three columns in the beginning.
#View(clone_matrix_names)
#########################
#Extracting the clusters#
#########################
#Extracting clusters, calculating the median TC for each one and assigning the
#cluster names to the events in the segment file.
clusterTC <- matrix(0,400,3) #Changed from 200 to 400 210727.
calculateTC <- matrix(0,400,1)
i <- 1
s <- 1
t <- 1
#View(file_new)
for(i in 1:as.numeric(nrow(clone_matrix_names))){ #Looping through the rows in the clustermatrix.
j <- 1
for(j in 1:as.numeric(ncol(clone_matrix_names))){ #Looping through the mutations in a particular cluster i.
if(clone_matrix_names[i,j] != 0 && clone_matrix_names[i,j] != "0"){
k <- 1
for(k in 1:nrow(file_new)){ #Looping through the mutations in our datafile.
if(clone_matrix_names[i,j] == file_new[k,10]){
calculateTC[s,1] <- file_new[k,11] #Saving the TC
s <- s+1
}
k <- k+1
}
}
j <- j+1
}
#We now have a vector with all of the TC:s for that cluster.
clusterTC[t,1] <- paste("Cluster",t)
clusterTC[t,2] <- median(as.numeric(calculateTC[calculateTC[,1] != 0,]))
clone_matrix_names[i,2] <- median(as.numeric(calculateTC[calculateTC[,1] != 0,]))
calculateTC <- matrix(0,400,1) #Resetting the matrix.
s <- 1
t <- t + 1
i <- i+1
}
clusterTC_order <- clusterTC[order(as.numeric(clusterTC[,2]), decreasing = TRUE),]
clone_matrix_names <- clone_matrix_names[order(as.numeric(clone_matrix_names[,2]), decreasing = TRUE),]
if(missing(names)==FALSE){
if(names=="numbers"){
if("ALL" %in% file_new[,2] == FALSE){
namevector <- c(seq(1,1000))
}else{
namevector <- c("ALL",seq(1,1000))
}
}else if(names=="letters"||names=="subclone"){
# #If we do not have any ALL-events we want to use another name vector.
if("ALL" %in% file_new[,2] == FALSE){
namevector <- c("A","B","C","D","E","F","G","H","I","J","K","L","M","N","O","P","Q","R","S","T","U","V","X","Y","Z","ZA","ZB","ZC","ZD","ZE","ZF","ZG","ZH","ZI","ZJ","ZK","ZL","ZM","ZN","ZO","ZP","ZQ","ZR","ZS","ZT","ZU","ZV","ZX","ZY","ZZ",
"AAA","BBB","CCC","DDD","EEE","FFF","GGG","HHH")
}else{
namevector <- c("ALL","A","B","C","D","E","F","G","H","I","J","K","L","M","N","O","P","Q","R","S","T","U","V","X","Y","Z","ZA","ZB","ZC","ZD","ZE","ZF","ZG","ZH","ZI","ZJ","ZK","ZL","ZM","ZN","ZO","ZP","ZQ","ZR","ZS","ZT","ZU","ZV","ZX","ZY","ZZ")
}
}
}else{
# #If we do not have any ALL-events we want to use another name vector.
if(length(unique(overview_cluster[,ncol(overview_cluster)]))<=40){
if("ALL" %in% file_new[,2] == FALSE){
namevector <- c("A","B","C","D","E","F","G","H","I","J","K","L","M","N","O","P","Q","R","S","T","U","V","X","Y","Z","ZA","ZB","ZC","ZD","ZE","ZF","ZG","ZH","ZI","ZJ","ZK","ZL","ZM","ZN","ZO","ZP","ZQ","ZR","ZS","ZT","ZU","ZV","ZX","ZY","ZZ",
"AAA","BBB","CCC","DDD","EEE","FFF","GGG","HHH")
}else{
namevector <- c("ALL","A","B","C","D","E","F","G","H","I","J","K","L","M","N","O","P","Q","R","S","T","U","V","X","Y","Z","ZA","ZB","ZC","ZD","ZE","ZF","ZG","ZH","ZI","ZJ","ZK","ZL","ZM","ZN","ZO","ZP","ZQ","ZR","ZS","ZT","ZU","ZV","ZX","ZY","ZZ",
"AAA","BBB","CCC","DDD","EEE","FFF","GGG","HHH")
}
}else{
#There are a lot of subclones. Giving numbers instead.
if("ALL" %in% file_new[,2] == FALSE){
namevector <- c(seq(1,1000))
}else{
namevector <- c("ALL",seq(1,1000))
}
}
}
#print(namevector)
#View(clusterTC_order)
i <- 1
for(i in 1:as.numeric(nrow(clusterTC_order))){
# print(i)
# print(clusterTC_order[i,2])
if(as.numeric(clusterTC_order[i,2]) != 0){
if(i == 1){
if("ALL" %in% file_new[,2] == TRUE){ #If we have an ALL event we do not want to add the name "Subclone" to it.
clusterTC_order[i,1] <- paste("Cluster",i)
clusterTC_order[i,3] <- paste(namevector[i])
clone_matrix_names[i,1] <- paste("Cluster",i)
clone_matrix_names[i,3] <- paste(namevector[i])
}else{
clusterTC_order[i,1] <- paste("Cluster",i) #In the case where we do not have any ALL-events we want to add "Subclone" to the first subclone.
clusterTC_order[i,3] <- paste("Subclone_",namevector[i])
clone_matrix_names[i,1] <- paste("Cluster",i)
clone_matrix_names[i,3] <- paste("Subclone_",namevector[i])
}
}else{
clusterTC_order[i,1] <- paste("Cluster",i)
clusterTC_order[i,3] <- paste("Subclone_",namevector[i])
clone_matrix_names[i,1] <- paste("Cluster",i)
clone_matrix_names[i,3] <- paste("Subclone_",namevector[i])
}
}
i <- i+1
}
overview_subclones <- cbind(overview_new,matrix(0,nrow(overview_new),1))
i <- 2
for(i in 2:nrow(overview_subclones)){
#print(overview_subclones[i,1])
pos <- which(clone_matrix_names==overview_subclones[i,1], arr.ind = T)
#print(pos)
#print(clone_matrix_names[pos[1],3])
overview_subclones[i,ncol(overview_subclones)] <- clone_matrix_names[pos[1],3]
i <- i+1
}
assign("overview_subclones", overview_subclones, envir=globalenv())
assign("clone_matrix_names_hej", t(clone_matrix_names), envir=globalenv()) #The subclone names and which mutations are included in each subclone are exported to the global environment.
##################################################################
#Making a new file indicating which subclone each event belong to#
##################################################################
file_original <- file_new
file_subclones <- file_new
i <- 1
j <- 1
k <- 1
for(i in 1:as.numeric(nrow(clone_matrix_names))){ #Looping through a certain subclone.
for(j in 4:as.numeric(ncol(clone_matrix_names))){ #Looping through the clone mutations.
for(k in 1:as.numeric(nrow(file_new))){ #Looping through the file.
if(clone_matrix_names[i,j] == file_new[k,10]){ #If we match a subclone event with an event in our data set.
file_subclones[k,2] <- clone_matrix_names[i,3]
}
k <- k+1
}
j <- j+1
}
i <- i+1
}
##################################################
#Finding out which subclones exist in each sample#
##################################################
samples <- as.matrix(unique(c(file_original[,2]))) #Extracting all unique samples.
sample_clone_matrix <- matrix(0,40,(as.numeric(length(samples))*3)) #A matrix which will contain all of the samples and their clones. One column is used in order to asses the number of mutations that exist within the subclone.
file_samples_subclones <- cbind(file_new,file_subclones[,2]) #Adding a column with all of the subclonal denotations of the alterations.
file_samples_subclones <- cbind(file_samples_subclones,matrix(0,as.numeric(nrow(file_samples_subclones)),1))
i <- 1
m <- 1
for(i in 1:as.numeric(nrow(file_samples_subclones))){ #Adding the subclonal names to the sample names.
if(file_samples_subclones[i,2] != "ALL"){
file_samples_subclones[i,13] <- paste(file_samples_subclones[i,2],file_samples_subclones[i,12])
}else{
file_samples_subclones[i,13] <- "ALL"
}
i <- i+1
}
subclones <- as.matrix(unique(c(file_samples_subclones[,13]))) #Extracting all unique subclones within samples.
medianmatrix <- matrix(0,100,as.numeric(length(subclones))) #This matrix is to be used in order to calculate the median TC for each subclone within each sample.
medianmatrix[1,] <- subclones #The first row consists of the subclone names.
samples_unique <- as.matrix(unique(c(file_samples_subclones[,2]))) #Extracting all unique samples.
i <- 1
if("ALL" %in% file_new[,2] == TRUE){
for(i in 1:(ncol(sample_clone_matrix)/3)){ #All samples have the subclone named "ALL".
sample_clone_matrix[2,(3*i-2)] <- "ALL"
sample_clone_matrix[2,(3*i-1)] <- "100"
sample_clone_matrix[1,(3*i-2)] <- samples_unique[i,1]
i <- i+1
s <- 3
}
}else{
s <- 2
}
i <- 1
t <- 1
for(i in 1:as.numeric(nrow(file_samples_subclones))){ #Looping through the dataset.
if(i == 1){ #The first position will of course be our first sample in the matrix.
if("ALL" %in% file_new[,2] == TRUE){
sample_clone_matrix[1,1] <- file_samples_subclones[1,2] #Name.
}else{
sample_clone_matrix[1,1] <- file_samples_subclones[1,2] #When we do not have ALL-events in the tumor we need to extract the subclone name.
sample_clone_matrix[s,1] <- file_samples_subclones[1,12] #Name.
}
if(file_samples_subclones[1,2] != "ALL"){
sample_clone_matrix[s,2] <- file_samples_subclones[1,11] #TC.
sample_clone_matrix[s,3] <- 1
s <- s+1
medianmatrix[1,1] <- file_samples_subclones[1,13] #Name.
medianmatrix[2,1] <- file_samples_subclones[1,11] #TC.
}
if(file_samples_subclones[i,2] != file_samples_subclones[i+1,2]){
t <-t+1
}
}
if(i != 1){
if(i < as.numeric(nrow(file_samples_subclones))){
if(file_samples_subclones[i,2] == file_samples_subclones[i+1,2]){ #We are still within the same sample.
if((file_samples_subclones[i,12] %in% sample_clone_matrix[,((3*t)-2)]) == FALSE){ #The event should not already be in that column. We just want the unique labels.
sample_clone_matrix[1,(3*t-2)] <- file_samples_subclones[i,2] #Sample name.
sample_clone_matrix[s,(3*t-2)] <- file_samples_subclones[i,12] #Saving the clone name.
sample_clone_matrix[s,(3*t-1)] <- file_samples_subclones[i,11] #clone_matrix_names[as.numeric(match(file_samples_subclones[i,12], clone_matrix_names[,3])),2] #Saving the clone TC in that sample.
sample_clone_matrix[s,(3*t)] <- (as.numeric(sample_clone_matrix[s,(3*t)])+1)
#sample_clone_matrix[s,2*t] <- file_samples_subclones[i,11] #Saving the clone TC in that sample.
s <- s+1
}else{ #If the event already is in that column we add the TC.
if(file_samples_subclones[i,2] != "ALL"){
matchrow <- match(file_samples_subclones[i,12],sample_clone_matrix[,((3*t)-2)])
}else{matchrow <- 2}
sample_clone_matrix[matchrow,(3*t-1)] <- (as.numeric(sample_clone_matrix[matchrow,(3*t-1)]) + as.numeric(file_samples_subclones[i,11])) #Saving the clone TC in that sample.
sample_clone_matrix[matchrow,(3*t)] <- (as.numeric(sample_clone_matrix[matchrow,(3*t)])+1)
}
}
if(file_samples_subclones[i,2] != file_samples_subclones[i+1,2]){ #New sample.
if(file_samples_subclones[i,12] %in% sample_clone_matrix[,((3*t)-2)] == FALSE){ #The event should not already be in that column. We just want the unique labels.
sample_clone_matrix[1,(3*t-2)] <- file_samples_subclones[i,2] #Sample name.
sample_clone_matrix[s,(3*t-2)] <- file_samples_subclones[i,12] #Saving the clone name.
sample_clone_matrix[s,(3*t-1)] <- file_samples_subclones[i,11] #clone_matrix_names[as.numeric(match(file_samples_subclones[i,12], clone_matrix_names[,3])),2] #Saving the clone TC in that sample.
sample_clone_matrix[s,(3*t)] <- (as.numeric(sample_clone_matrix[s,(3*t)]) + 1) #Counting events.
#sample_clone_matrix[s,2*t] <- file_samples_subclones[i,11] #Saving the clone TC in that sample.
}else{
if(file_samples_subclones[i,2] != "ALL"){
matchrow <- match(file_samples_subclones[i,12],sample_clone_matrix[,((3*t)-2)])
}else{matchrow <- 2}
sample_clone_matrix[matchrow,(3*t-1)] <- (as.numeric(sample_clone_matrix[matchrow,((3*t)-1)]) + as.numeric(file_samples_subclones[i,11])) #Saving the clone TC in that sample.
sample_clone_matrix[matchrow,(3*t)] <- (as.numeric(sample_clone_matrix[as.numeric(matchrow),3*t])+1)
}
s <- 3 #Resetting the s.
if(t != length(samples)){
t <- t+1 #Going to the next triad of columns.
}
}
}
}
if(i == as.numeric(nrow(file_samples_subclones))){ #We're at the end of the file.
if(file_samples_subclones[i,2] != file_samples_subclones[i-1,2]){ #If the last row actually is a new sample.
s <- 3 #Resetting the s.
if(file_samples_subclones[i,12] %in% sample_clone_matrix[,((3*t)-2)] == FALSE){ #The event should not already be in that column. We just want the unique labels.
sample_clone_matrix[1,(3*t-2)] <- file_samples_subclones[i,2] #Sample name
sample_clone_matrix[s,(3*t-2)] <- file_samples_subclones[i,12] #Saving the clone name.
sample_clone_matrix[s,(3*t-1)] <- file_samples_subclones[i,11] #clone_matrix_names[as.numeric(match(file_samples_subclones[i,12], clone_matrix_names[,3])),2] #Saving the clone TC in that sample.
#sample_clone_matrix[s,2*t] <- file_samples_subclones[i,11] #Saving the clone TC in that sample.
}
}
if(file_samples_subclones[i,2] == file_samples_subclones[i-1,2]){ #If the last row is the same sample.
if(file_samples_subclones[i,12] %in% sample_clone_matrix[,((3*t)-2)] == FALSE){
sample_clone_matrix[s,(3*t-2)] <- file_samples_subclones[i,12] #Saving the clone name.
sample_clone_matrix[s,(3*t-1)] <- file_samples_subclones[i,11] #clone_matrix_names[as.numeric(match(file_samples_subclones[i,12], clone_matrix_names[,3])),2] #Saving the clone TC in that sample.
sample_clone_matrix[s,(3*t)] <- (as.numeric(sample_clone_matrix[s,(3*t)]) + 1) #Counting events.
}else{
matchrow <- match(file_samples_subclones[i,12],sample_clone_matrix[,(3*t-2)])
sample_clone_matrix[matchrow,(3*t-1)] <- (as.numeric(sample_clone_matrix[matchrow,(3*t-1)]) + as.numeric(file_samples_subclones[i,11])) #Saving the clone TC in that sample.
sample_clone_matrix[matchrow,(3*t)] <- (as.numeric(sample_clone_matrix[as.numeric(matchrow),3*t])+1)
}
}
}
samplematch <- match(file_samples_subclones[i,13],medianmatrix[1,])
k <- 2
m <- 0
for(k in 2:as.numeric(nrow(medianmatrix))){ #Making a matrix for calculating the median.
if(medianmatrix[k,samplematch] == "0"){
if(m == 0){
medianmatrix[k,samplematch] <- file_samples_subclones[i,11]
m <- 1
}
}
k <- k+1
}
i <- i+1
}
i <- 1
for(i in 1:as.numeric(ncol(medianmatrix))){
column <- as.matrix(medianmatrix[2:nrow(medianmatrix),i])
column <- column[column[,1] != "0",1]
medianmatrix[as.numeric(nrow(medianmatrix)),i] <- median(as.numeric(column))
i <- i+1
}
#Adding the TC to the matrix illustrating the subclonal architecture within a sample.
i <- 1
for(i in 1:as.numeric(ncol(medianmatrix))){ #Looping through the subclones.
columnsample <- match(word(medianmatrix[1,i],1),sample_clone_matrix[1,]) #Locating the sample. We get the column for the sample in sample_clone_matrix.
if(medianmatrix[1,i] != "ALL"){
rowsample <- match(word(medianmatrix[1,i],2,3),sample_clone_matrix[,columnsample]) #Locating the subclone in the row for the sample.
}else{rowsample <- match(word(medianmatrix[1,i],1),sample_clone_matrix[,columnsample])}
if(is.na(rowsample) == FALSE){
if(columnsample != 1){
sample_clone_matrix[rowsample,(columnsample+1)] <- medianmatrix[nrow(medianmatrix),i] #Adding the median TC.
}else{sample_clone_matrix[2,2] <- "100"}
}
i <- i+1
}
assign("sample_clone_matrix", sample_clone_matrix, envir=globalenv()) #The matrix which tells us which mutations belong to which subclone is transferred to the global environment.
####################################
#Building the event matrix skeleton#
####################################
subclones <- as.matrix(unique(c(file_samples_subclones[,13]))) #Extracting all unique subclones.
samples <- as.matrix(unique(c(file_new[,2]))) #Extracting all unique samples.
events <- as.matrix(unique(c(file_new[,10]))) #Extracting all unique events.
EMc <- nrow(subclones)+1
EMr <- nrow(events)+1
eventmatrix <- matrix(0,EMr,EMc) #Creating an empty event matrix.
eventmatrix[1,2:EMc] <- subclones #The subclone names are placed on the firs row of the event matrix.
eventmatrix[2:EMr,1] <- events #The event names are placed in the first column of the event matrix.
eventnumber <- nrow(file_new) #The upper bound of events we think the tumor will have.
events <- matrix(0,eventnumber,as.numeric(nrow(subclones))) #Creating an empty matrix for the events belonging to each subclone.
events[1,] <- subclones
#########################################################################################################
#Allocating the events to the samples/subclones. All subclones should have the events belonging to "ALL"#
#########################################################################################################
i <- 1
for(i in 1:ncol(events)){ #Looping through every subclone separately.
j = 1
s = 2
for(j in 1:nrow(file_samples_subclones)){ #Going through all of the events for the data set.
if(file_samples_subclones[j,13] == "ALL"){ #If we find an "ALL"-event the sample should always have this one.
events[s,i] <- file_samples_subclones[j,10]
s <- s+1
}
else if(events[1,i] == file_samples_subclones[j,13]){ #If we find an event belonging to the subclone we add it to the EM.
if((file_samples_subclones[j,10] %in% events[,i]) == FALSE){
events[s,i] <- file_samples_subclones[j,10]
s <- s+1
}
}
j <- j+1
}
i <- i+1
}
#############################
#Adding the events to the EM#
#############################
i <- 1
for(i in 2:nrow(eventmatrix)){ #Events in the EM.
j <- 1
for(j in 1:ncol(events)){ #Cells.
if(eventmatrix[i,1] %in% events[,j] == TRUE){ #Check if the events exist in this cell.
eventmatrix[i,j+1] <- 1
}
else(eventmatrix[i,j+1] <- 0)
}
i <- i+1
}
#View(events)
#View(eventmatrix)
###############################################################
#The subclones should have the events that its motherclone has#
###############################################################
i <- 2
j <- 1
s <- 3
space <- matrix(0,50,1)
#The events of the mother clones are allocated within a single sample.
i <- 1
j <- 1
s <- 2
t <- 1
space <- matrix(0,50,2) #Spaces within a sample. Dynamic.
totalspace <- matrix(0,(as.numeric(nrow(space)+1)),((2*as.numeric(ncol(sample_clone_matrix))/3)+1)) #A matrix used for calculating the spaces available.
possible_mothers <- matrix(0,(as.numeric(nrow(space)+1)),((as.numeric(nrow(subclones))-1)*2)) #A matrix used for saving the possible motherclones.
rowsofhundred <- 40
hundredpercentclones <- matrix(0,rowsofhundred,length(samples_unique)) #Matrix that is to be used in the cases where we have > 2 clones in a sample that have 100 %.
hundredpercentclones[1,] <- samples_unique
hpc <- 2
cl <- 1
nr_eq <- 6
equalclones <- matrix(0,rowsofhundred,(length(samples_unique)*nr_eq)) #Matrix that is to be used in the cases where we have > 2 clones in a sample that have 100 %.
equalclones[1,] <- rep(samples_unique,nr_eq)
ec <- 2 #Count.
ecl <- 1 #Column number.
k <- 1
for(k in 1:(ncol(sample_clone_matrix)/3)){ #Constructing a matrix were every two columns represent a sample. The first one tells us which subclone harbors the space and the second the remaining space on top of this sample.
totalspace[1,(2*k-1)] <- sample_clone_matrix[1,(3*k-2)]
k <- k+1
}
k <- 1
for(k in 2:as.numeric(nrow(subclones))){ #Constructing a matrix were every two columns represent a subclone within a sample. The first one tells us which the chosen motherclone is and the other which other possible solutions there are.
possible_mothers[1,(2*k-3)] <- subclones[k,1]
k <- k+1
}
#Mother-daughter clone matrix.
allocation_samples <- matrix(0,(as.numeric(nrow(clone_matrix_names))+1),(as.numeric(nrow(samples))+1)) #Matrix which is to be used for comparing the subclonal designation within each sample.
allocation_samples[2:(as.numeric(nrow(clone_matrix_names))+1),1] <- clone_matrix_names[,3] #The subclone names are in the first row.
allocation_samples[1,2:(as.numeric(nrow(samples))+1)] <- samples #The sample names are in the first column.
subcloneswithinsample <- matrix(0,(as.numeric(nrow(sample_clone_matrix))-1),2)
i <- 1
for(i in 1:(as.numeric(ncol(sample_clone_matrix))/3)){ #Looping through all of the samples.
#for(i in 1:3){
subcloneswithinsample <- sample_clone_matrix[2:as.numeric(nrow(sample_clone_matrix)),(3*i-2):(3*i-1)] #Extraxting the subclonal architecture and TC for a certain sample.
subcloneswithinsample_order <- subcloneswithinsample[order(as.numeric(subcloneswithinsample[,2]),decreasing = TRUE),] #Ordering the subclones from highest to lowest TC.
#Ordering the subclones.
ord <- 2
subcloneswithinsample_order_old <- matrix(0,(as.numeric(nrow(sample_clone_matrix))-1),2)
subcloneswithinsample_order_new <- subcloneswithinsample_order
#print(subcloneswithinsample_order_new)
while(all(subcloneswithinsample_order_new == subcloneswithinsample_order_old) == FALSE){
subcloneswithinsample_order_old <- subcloneswithinsample_order_new
for(ord in 2:(as.numeric(nrow(subcloneswithinsample_order_old))-1)){ #Writing a function/loop that orders the subclones of the same size according to their median size.
if(subcloneswithinsample_order_old[ord,2] != "0"){
if(subcloneswithinsample_order_old[ord,2] == subcloneswithinsample_order_old[ord+1,2]){
# orderpos1 <- match(word(subcloneswithinsample_order_old[ord,1],2),namevector)
# orderpos2 <- match(word(subcloneswithinsample_order_old[ord+1,1],2),namevector)
# if(as.numeric(orderpos2) < as.numeric(orderpos1)){
# subcloneswithinsample_order_new <- subcloneswithinsample_order_old[c(1:(ord-1),ord+1,ord,(ord+2):nrow(subcloneswithinsample_order_old)), ]
# }
orderpos1 <- match(subcloneswithinsample_order_old[ord,1],overview_subclones[,ncol(overview_subclones)])
orderpos2 <- match(subcloneswithinsample_order_old[ord+1,1],overview_subclones[,ncol(overview_subclones)])
diff <- as.numeric(overview_cluster[orderpos1,2:(ncol(overview_cluster)-1)])-as.numeric(overview_cluster[orderpos2,2:(ncol(overview_cluster)-1)])
larger <- length(which(diff>0)) #In how many positions is the first one larger than the second one?
smaller <- length(which(diff<0)) #In how many positions is the second one larger than the first one?
if(smaller > larger){
subcloneswithinsample_order_new <- subcloneswithinsample_order_old[c(1:(ord-1),ord+1,ord,(ord+2):nrow(subcloneswithinsample_order_old)), ]
}
}
}
ord <- ord+1
}
}
subcloneswithinsample_order <- subcloneswithinsample_order_new
#print(subcloneswithinsample_order)
j <- 1
ecl_original <- ecl
equal <- 1
for(j in 2:as.numeric(nrow(sample_clone_matrix))){ #Looping through the subclones within the sample.
#for(j in 2:4){
if(j == 2){ #We're in the first position. This is the ALL-event.
space[1,1] <- subcloneswithinsample_order[j-1,1] #The name.
space[1,2] <- subcloneswithinsample_order[j-1,2] #The TC.
}
if(j != 2){
if(subcloneswithinsample_order[j-1,1] != "0"){
if(subcloneswithinsample_order[j-1,1] != "ALL"){ #We should not add it again.
maxspace <- which.max(space[,2]) #Finding the largest available space.
newname <- subcloneswithinsample_order[j-1,1] #The name of the new subclone.
newspace <- subcloneswithinsample_order[j-1,2] #The space of the new subclone.
maxname <- space[maxspace,1]
#Adding a loop that checks whether or not this is the only possible solution for the subclone to be placed as a daughter to.
c <- 1
so <- 0
for(c in 1:nrow(space)){
if(as.numeric(space[c,2]) != 0){ #The new test space should not be zero. You cannot put anything there.
if((as.numeric(space[c,2])-as.numeric(newspace)) >= -0.1){ #Added this due to the simulation. Too many decimals. Rounding makes events not being placed in parallel.
daughter_pos <- match(paste(sample_clone_matrix[1,(3*i-2)],newname), possible_mothers[1,])
#print("Här är c")
#print(c)
if(c == maxspace && possible_mothers[2,daughter_pos] =="0"){
possible_mothers[2,daughter_pos] <- space[c,1] #Adding the original solution.
}
if(c != maxspace || (c == maxspace && possible_mothers[2,daughter_pos] !="0")){ #Added this. It can happen if there are equalclones. It will otherwise overwrite the mother in the second row.
#print("There are other solutions")
if(space[c,1] %in% possible_mothers[2,(as.numeric(daughter_pos))] == FALSE){
#print("Now we will add it")
daughter_pos <- match(paste(sample_clone_matrix[1,(3*i-2)],newname), possible_mothers[1,])
possible_mothers[(2+so),(as.numeric(daughter_pos)+1)] <- space[c,1]
#Tystade detta 200720 samt 200820. Made some rules disappear.
# if(space[c,2] == newspace){ #Beh?ver ju dock inte inneb?ra att de faktiskt ?r equalclones och kan placeras i varandra.
# if(space[c,1] != "ALL"){ #Varf?r inte ALL? Man kan f? fel.
# #print(space)
# #print(c)
#
# mothername <- paste(sample_clone_matrix[1,(3*i-2)],space[c,1])
# mothercolumn <- match(mothername,possible_mothers[1,])
# possible_mothers[1,mothercolumn]
# #print("h?r ?r mothername and newname")
# #print(mothername)
# #print(newname)
#
# n <- 1
# for(n in 2:nrow(possible_mothers)){
# #print("H?r ?r ett n")
# #print(n)
# #print(mothercolumn)
# #print(possible_mothers[n,(mothercolumn+1)])
# if(possible_mothers[n,(mothercolumn+1)] == "0"){
# possible_mothers[n,(mothercolumn+1)] <- newname
# break
# }
# n <- n+1
# }
# }
# }
so <- so+1
}}
}
}
c <- c+1
}
space[maxspace,2] <- (as.numeric(space[maxspace,2])-as.numeric(newspace)) #Replacing the old maxspace.
space[s,1] <- newname #Adding the new spacename and space size to the spacematrix.
space[s,2] <- newspace
#Treating the case when space[maxspace,2] = 100 % and the newspace as well. Then the motherclone and the daughterclone are both part of the base.
if(subcloneswithinsample_order[j-1,2] == "100"){
if(subcloneswithinsample_order[j-1,1] != "ALL"){
if(subcloneswithinsample_order[match(space[maxspace,1],subcloneswithinsample_order[,1]),2] == "100"){
if(subcloneswithinsample_order[match(space[maxspace,1],subcloneswithinsample_order[,1]),1] != "ALL"){
newspace_name <- subcloneswithinsample_order[j-1,1]
maxspace_name <- subcloneswithinsample_order[match(space[maxspace,1],subcloneswithinsample_order[,1]),1]
if(hpc == 2){
hundredpercentclones[2,cl] <- subcloneswithinsample_order[match(space[maxspace,1],subcloneswithinsample_order[,1]),1]
hundredpercentclones[3,cl] <- subcloneswithinsample_order[j-1,1]
hpc <- 4
}else{
if(subcloneswithinsample_order[j-1,1] %in% hundredpercentclones[,cl] == FALSE){
hundredpercentclones[hpc,cl] <- subcloneswithinsample_order[j-1,1]
hpc <- hpc + 1
}
}
allocation_samples[match(space[s,1],allocation_samples[,1]),(i+1)] <- maxspace_name #Annoting the mother clone of each of the subclones within each sample.
allocation_samples[match(maxspace_name,allocation_samples[,1]),(i+1)] <- newspace_name #Annoting the mother clone of each of the subclones within each sample.
}else{allocation_samples[match(space[s,1],allocation_samples[,1]),(i+1)] <- space[maxspace,1] #Annoting the mother clone of each of the subclones within each sample.
}
}else{allocation_samples[match(space[s,1],allocation_samples[,1]),(i+1)] <- space[maxspace,1] #Annoting the mother clone of each of the subclones within each sample.
}
}else{allocation_samples[match(space[s,1],allocation_samples[,1]),(i+1)] <- space[maxspace,1] #Annoting the mother clone of each of the subclones within each sample.
}
}else{allocation_samples[match(space[s,1],allocation_samples[,1]),(i+1)] <- space[maxspace,1] #Annoting the mother clone of each of the subclones within each sample.
}
#Treating the case when we have multiple clones of equal size that have to be placed inside each other.
if(subcloneswithinsample_order[j-1,1] != "ALL"){
if(subcloneswithinsample_order[j-1,2] != "100"){
if(as.numeric(subcloneswithinsample_order[match(space[maxspace,1],subcloneswithinsample_order[,1]),2]) == as.numeric(subcloneswithinsample_order[j-1,2])){ #It should be equal in size to the other cluster.
newspace_name <- subcloneswithinsample_order[j-1,1]
maxspace_name <- subcloneswithinsample_order[match(space[maxspace,1],subcloneswithinsample_order[,1]),1]
if(ec == 2){ #We have not yet added any events to the equalcolumn for this sample.
equalclones[2,ecl] <- subcloneswithinsample_order[match(space[maxspace,1],subcloneswithinsample_order[,1]),1]
equalclones[3,ecl] <- subcloneswithinsample_order[j-1,1]
ec <- 4
}else{ #We have added events earlier. We now want to add even one more to this column.
if(subcloneswithinsample_order[j-1,1] %in% equalclones[,ecl] == FALSE){
equalclones[ec,ecl] <- subcloneswithinsample_order[j-1,1]
ec <- ec + 1
}
}
#This part adds the names such that they get each others names in the allocation_samples matrix.
#Silenced this one 200308 since it sometimes made events be allocated in a weird way. 100 % events got each others and 50 % each others but the 50 did not get the 100.
#allocation_samples[match(space[s,1],allocation_samples[,1]),(i+1)] <- maxspace_name #Annoting the mother clone of each of the subclones within each sample.
#allocation_samples[match(maxspace_name,allocation_samples[,1]),(i+1)] <- newspace_name #Annoting the mother clone of each of the subclones within each sample.
equal <- 2
}else{allocation_samples[match(space[s,1],allocation_samples[,1]),(i+1)] <- space[maxspace,1] #Annoting the mother clone of each of the subclones within each sample.
if(equal == 2){
ecl <- ecl+length(samples_unique)
ec <- 2
equal <- 1}
}
}else{allocation_samples[match(space[s,1],allocation_samples[,1]),(i+1)] <- space[maxspace,1] #Annoting the mother clone of each of the subclones within each sample.
}
}else{allocation_samples[match(space[s,1],allocation_samples[,1]),(i+1)] <- space[maxspace,1] #Annoting the mother clone of each of the subclones within each sample.
}
} #!= "ALL".
} #!= "0".
if(j == as.numeric(nrow(sample_clone_matrix))){ #We're at the end of a sample.
#print("Endspace")
#print(i)
#print(space)
totalspace[2:as.numeric(nrow(totalspace)),((t*2)-1):((t*2))] <- space
t <- t+1
s <- 2 #Resetting s and space.
space <- matrix(0,50,2)
}else{s <- s+1}
} #j != 2.
j <- j+1
} #Subclones within a sample.
hpc <- 2
cl <- cl + 1
ec <- 2
ecl <- ecl_original + 1
i <- i+1
} #Samples.
i <- 1
Clustering <- file_samples_subclones
Clustering[,12] <- "No"
Clustering <- Clustering[,Clustering[1,]!="No"]
colnames(Clustering)[12] <- "Cluster"
for(i in 1:nrow(Clustering)){
w1 <- word(Clustering[i,12],1)
w3 <- word(Clustering[i,12],3)
w2 <- "Cluster_"
if(w1 != "ALL"){
Clustering[i,12] <- paste(w1,w2,w3,sep=" ")}
i <- i+1
}
assign("Clustering", Clustering, envir=globalenv()) #This is a matrix illustrating the events and their subclonal belonging.
assign("file_samples_subclones", file_samples_subclones, envir=globalenv()) #This is a matrix illustrating the events and their subclonal belonging.
assign("possible_mothers", possible_mothers, envir=globalenv()) #This is a matrix illustrating the chosen mother clone as well as other possible mothers.
assign("allocation_samples", allocation_samples, envir=globalenv()) #The mother-daughter division are exported to the global environment.
assign("equalclones", equalclones, envir=globalenv()) #The equal clones.
assign("hundredpercentclones", hundredpercentclones, envir=globalenv()) #The equal clones.
#Fusing the equalclones and the hundredpercentclones.
i <- 1
for(i in 1:ncol(hundredpercentclones)){
if(hundredpercentclones[2,i] != "0"){ #We have some hundredpercentlones in this sample.
if(equalclones[2,i] == "0"){ #We do not have any other equalclones in this sample.
equalclones[2:nrow(equalclones),i] <- hundredpercentclones[2:nrow(hundredpercentclones),i] #We paste it here.
}else if(equalclones[2,(i+as.numeric(ncol(hundredpercentclones)))] == "0"){ #We have something but not in the next one.
equalclones[2:nrow(equalclones),(i+as.numeric(ncol(hundredpercentclones)))] <- hundredpercentclones[2:nrow(hundredpercentclones),i] #We paste it here.
}else if(equalclones[2,(i+2*as.numeric(ncol(hundredpercentclones)))] == "0"){ #We have something but not in the nextnext one.
equalclones[2:nrow(equalclones),(i+as.numeric(ncol(hundredpercentclones)))] <- hundredpercentclones[2:nrow(hundredpercentclones),i] #We paste it here.
}
}
i <- i+1
}
#View(events)
#The equalclones should have each other's mothers. Created 200804.
i <- 1
save_order <- 1
unique_mothers <- list()
for(i in 1:ncol(equalclones)){ #Looping through the clones of equal size.
removed <- 0
if(equalclones[2,i] != "0"){
equal_mothers <- as.vector(equalclones[equalclones[,i] != "0",i])
order <- matrix(0,length(equal_mothers),2)
overview_sub <- matrix(0,length(equal_mothers),ncol(overview))
order[,1] <- as.matrix(equal_mothers)
j <- 2
for(j in 2:length(equal_mothers)){
daughter <- paste(equal_mothers[1],equal_mothers[j])
daughter_column <- match(daughter,possible_mothers[1,])
all <- rbind(as.matrix(possible_mothers[,daughter_column]),as.matrix(possible_mothers[,daughter_column+1]))
all_nozero <- as.matrix(all[all!="0",])
if(length(all_nozero)>1){ #Added this for the cases where there is none.
unique_mothers[[(j-1)]] <- all_nozero[2:nrow(all_nozero),] #Saving all unique mothers. 2 because we do not want the daughter.
}
file_samples_subclones_row <- match(daughter,file_samples_subclones[,13])
overview_row <- match(file_samples_subclones[file_samples_subclones_row,10],overview[,1])
overview_sub[j,] <- overview[overview_row,]
j <- j+1
}
unique_mother_tot <- as.matrix(unique(unlist(unique_mothers)))
j <- 2
for(j in 3:nrow(overview_sub)){
overview_sub <- overview_sub[,overview_sub[j,]!="0"] #Removing columns where not all of them are present simultaneously.
j <- j+1
}
overview_sub <- as.matrix(overview_sub)
overview_sub_n <- overview_sub[2:nrow(overview_sub),2:as.numeric(ncol(overview_sub))]
class(overview_sub_n) <- "numeric"
#Checking if the equalclones really are allowed to be placed inside each other in all samples.
m <- 1
again <- 0
if(is.null(nrow(overview_sub_n))==FALSE){
for(m in 1:nrow(overview_sub_n)){ #Looping through the clones in this equalclones.
n <- 1
for(n in 1:nrow(overview_sub_n)){
thesigns <- unique(sign(overview_sub_n[m,]-overview_sub_n[n,])) #Subtracting the rows.
if("-1" %in% thesigns && "1" %in% thesigns && again == 0 && length(equalclones[equalclones[,i]%in% hundredpercentclones,i])!=nrow(equalclones)){ #the last part was added due to handle cases where we have contradictions in the data so that we do not remove it.
# print("These should not be in equalclones.")
again <- 1 #We should not remove it multiple times.
removed <- 1
eq_segment <- ceiling(as.numeric(i)/as.numeric(length(samples_unique))) #Telling us which segment of equalclones we are in.
segments_left <- as.numeric(nr_eq)-as.numeric(eq_segment)
therest <- as.numeric(i)%%length(samples_unique) #Calculating in which biopsy we are.
if(as.numeric(therest) == 0){
biopsy <- nr_eq
}else{
biopsy <- therest
}
if(as.numeric(segments_left) != 0){ #There is at least one that should be moved.
p <- as.numeric(eq_segment)+1
for(p in (as.numeric(eq_segment)+1):as.numeric(nr_eq)){
if(as.numeric(biopsy)+(as.numeric(length(samples_unique))*(as.numeric(p)-1)) <= as.numeric(ncol(equalclones))){
equalclones[,as.numeric(biopsy)+(as.numeric(length(samples_unique))*(as.numeric(p)-2))] <- equalclones[,as.numeric(biopsy)+(as.numeric(length(samples_unique))*(as.numeric(p)-1))]
}
p <- p+1
}
}else{
equalclones[2:nrow(equalclones),i] <- "0"
}
}
n <- n+1
}
m <- m+1
}
}
if(is.null(nrow(overview_sub_n))==FALSE){
if(removed == 0){
order[2:nrow(order),2] <- as.matrix(rowSums(overview_sub_n))
assign("order",order,envir=globalenv())
order[2:nrow(order),2] <- as.numeric(order[2:nrow(order),2])
order_new <- order[order(as.numeric(as.matrix(order[,2])),decreasing=TRUE),] #Ordering the matrix after size.
order[1,] <- order_new[nrow(order_new),]
order[2:nrow(order),] <- order_new[1:(nrow(order_new)-1),]
# if(save_order == 1){ #Silenced it 210316 since equalclones_order and order had differing numbers of rows.
# equalclones_order <- order
# save_order <- 2
# }else{
# print(equalclones_order)
# print(order)
# print(nrow(equalclones_order))
# print(nrow(order))
# equalclones_order <- cbind(equalclones_order,order)
# }
j <- 2
for(j in 2:nrow(order)){
daughter_column <- match(paste(order[1,1],order[j,1]),possible_mothers[1,])
if(j == 2){
events <- as.matrix(unique_mother_tot[unique_mother_tot %in% order == FALSE,]) #Finding which possible mothers are not one of the equal clones. Why?
#events <- as.matrix(unique_mother_tot[unique_mother_tot != order[j,1],]) #Finding which possible mothers are left.
if(nrow(events)==1){
possible_mothers[2,daughter_column] <- events
}else{
#print(possible_mothers[1,daughter_column])
possible_mothers[2,daughter_column] <- events[1,]
possible_mothers[2:nrow(events),(daughter_column+1)] <- events[2:nrow(events),]
}
}else{
mother_column <- match(paste(order[1],order[j-1,1]),possible_mothers[1,])
#print(paste(order[1],order[j-1,1]))
#print(mother_column)
possible_mothers[2,daughter_column] <- word(possible_mothers[1,mother_column],2,3)
events <- as.matrix(unique_mother_tot[unique_mother_tot != order[j,1],]) #Finding which possible mothers are not one of the equal clones.
as.matrix(events[word(possible_mothers[1,mother_column],2,3) != order[j,1],])
possible_mothers[2:nrow(events),(daughter_column+1)] <- "0" #events[2:nrow(events),1] #"0" was added 200821.
}
j <- j+1
}
}#Removed.
}
}
i <- i+1
}
assign("possible_mothers_2",possible_mothers,envir=globalenv())
assign("equalclones_new", equalclones, envir=globalenv()) #The equal clones.
#assign("equalclones_order",equalclones_order,envir=globalenv())
#########################################################################################
#We want to add all equalclones to possible mothers if one of them are a possible mother#
#########################################################################################
#This segment was "#" until 210626.
# i <- 1
# h <- 0
# for(i in 1:(ncol(possible_mothers)/2)){
#
# daughter <- possible_mothers[1,(2*i-1)]
# mother <- possible_mothers[2,(2*i-1)] #Right now I only check the "primary mother".
# eq_col <- which(word(daughter,1)==equalclones[1,])
# eq_sample <- equalclones[,eq_col] #All equalclones in this sample.
#
# if(mother %in% eq_sample){ #Checking if the mother is in equalclones.
# # print("eq_sample")
# # print(eq_sample)
# # print(eq_col)
# j <- 1
# for(j in 1:length(eq_col)){ #All equalclones columns for this sample.
#
# if(mother %in% eq_sample[,j]){
#
# eq_sub <- eq_sample[eq_sample[,j]!=mother,j]
# eq_sub <- eq_sub[eq_sub!="0"]
#
# othermothers <- possible_mothers[possible_mothers[,(2*i)]!="0",(2*i)]
# pos_m_rows <- length(othermothers)
#
# if(length(othermothers)!=0){ #We cannot add any if it is empty.
# print("Här")
# print(othermothers)
# print(length(othermothers))
# print(eq_sub)
#
# if(h == 0){
# assign("eq_sub",eq_sub,envir=globalenv())
# assign("othermothers",othermothers,envir=globalenv())
# h <- 1
# }
#
# eq_sub <- eq_sub[eq_sub!=othermothers] #Extracting the equalclones not yet given to the cluster in possible_mothers.
#
# if(length(eq_sub)!=0 && is.na(length(pos_m_rows))==FALSE){
# possible_mothers[(pos_m_rows+2):(pos_m_rows+length(eq_sub)),(2*i)] <- eq_sub[2:length(eq_sub)]} #Adding the new mothers.
# }
# }
#
# j <- j+1
# }
# }
# i <- i+1
# }
#
# assign("possible_mothers_3",possible_mothers,envir=globalenv())
###################################################################################
#Looking for discrepancies between the subclonal architecture of different samples#
###################################################################################
#Taking rules into consideration.
if(missing(rule)==FALSE){
i <- 1
for(i in 1:nrow(rule)){
if(rule[i,1]!="ALL"){
row_m <- match(rule[i,1],file_samples_subclones[,10])
rule[i,1] <- file_samples_subclones[row_m,12]
}else{ #If it is ALL we cannot match it to column 10.
rule[i,1] <- "ALL"
}
row_d <- match(rule[i,2],file_samples_subclones[,10])
rule[i,2] <- file_samples_subclones[row_d,12]
rule <- as.matrix(rule)
# #Adding or removing mothers
print(rule)
if(rule[i,3]=="Yes"){
pos <- which(rule[i,2]==word(possible_mothers[1,],2,3))
k <- 1
for(k in 1:length(pos)){
if(possible_mothers[2,as.numeric(pos[k])]!=rule[i,1]){ #The chosen mother is not the rule one.
if((rule[i,1]%in%possible_mothers[,as.numeric(pos[k])+1])==FALSE){ #The ruled mother is not even present here.
print("The ruled mother was not present in possible_mothers. Adding it.")
print(rule[i,])
#Maybe it is not even present in the sample?
# nr <- length(possible_mothers[possible_mothers[,pos[k]+1]!="0",pos[k]+1])
# print(nr)
# print(pos[k])
# possible_mothers[as.numeric(nr)+2,as.numeric(pos[k])+1] <- as.vector(rule[i,1])
#I add it as the chosen one and move the previously chosen one to the other column.
nr <- length(possible_mothers[possible_mothers[,as.numeric(pos[k])+1]!="0",as.numeric(pos[k])+1])
possible_mothers[as.numeric(nr)+2,as.numeric(pos[k])+1] <- possible_mothers[2,as.numeric(pos[k])]
possible_mothers[2,as.numeric(pos[k])] <- rule[i,1]
}else{
#It is present here but we want it to be the chosen one.
nr <- length(possible_mothers[possible_mothers[,as.numeric(pos[k])+1]!="0",as.numeric(pos[k])+1])
possible_mothers[as.numeric(nr)+2,as.numeric(pos[k])+1] <- possible_mothers[2,as.numeric(pos[k])]
possible_mothers[2,as.numeric(pos[k])] <- rule[i,1]
possible_mothers[1:(nrow(possible_mothers)-1),as.numeric(pos[k])+1] <- possible_mothers[possible_mothers[,as.numeric(pos[k])+1]!=rule[i,1],as.numeric(pos[k])+1] #Removing it from the second column.
}
}
possible_mothers[,as.numeric(pos[k])+1] <- "0" #I try to just remove all other mothers.
k <- k+1
}
}else{
#We will remove mothers.
print("Removing a mother")
pos <- which(rule[i,2]==word(possible_mothers[1,],2,3))
print(rule[i,])
print(pos)
k <- 1
print(possible_mothers[2,as.numeric(pos[k])])
for(k in 1:length(pos)){
if(possible_mothers[2,as.numeric(pos[k])]==rule[i,1]){ #The chosen mother is the rule one.
print("Rule one")
if(length(possible_mothers[possible_mothers[,as.numeric(pos[k])+1]!="0",as.numeric(pos[k])+1])!=0){ #There are other possible mothers.
possible_mothers[2,as.numeric(pos[k])] <- possible_mothers[2,as.numeric(pos[k])+1] #Adding this instead.
possible_mothers[1:(nrow(possible_mothers)-1),as.numeric(pos[k])+1] <- possible_mothers[possible_mothers[,as.numeric(pos[k])+1]!=possible_mothers[2,as.numeric(pos[k])+1],as.numeric(pos[k])+1] #Removing it from the second column.
}
}else if(rule[i,1]%in%possible_mothers[,as.numeric(pos[k])+1]){ #It is among other mothers.
print("Extra")
possible_mothers[1:(nrow(possible_mothers)-1),as.numeric(pos[k])+1] <- possible_mothers[possible_mothers[,as.numeric(pos[k])+1]!=rule[i,1],as.numeric(pos[k])+1] #Removing it from the second column.
}
k <- k+1
}
}
i <- i+1
}
}else{
rule <- matrix(0,1,3)
}
assign("rule_new",rule,envir=globalenv())
assign("rule_pos_moth",possible_mothers,envir=globalenv())
#If there are rules saying a certain subclone should be allocated at a specific place, we add it in the possible_mothers.
theonlymothers <- matrix(0,as.numeric(nrow(possible_mothers)),as.numeric(ncol(possible_mothers)))
if(as.numeric(length(unique(file_samples_subclones[,2]))) > 2){ #If we only have one biopsy we do not have to compare stuff.
i <- 2
x <- matrix(0,2,as.numeric(ncol(allocation_samples)))
x[1,] <- allocation_samples[1,]
tom <- 1 #We will start to save down the data in column 1.
Event_rule_removed <- 0
not_again <- 0
removed <- 0
stop_while <- 0
again <- 1
while(i <= as.numeric(nrow(allocation_samples))){ #Looping through the subclones.
#while(i <= 6){
print("Here is i - Subclones")
print(i)
print(allocation_samples[i,1])
only <- 0
x[2,] <- allocation_samples[i,1:as.numeric(ncol(allocation_samples))] #Extracting information about the motherclones in all samples.
print(x)
y <- as.data.frame(allocation_samples[i,2:as.numeric(ncol(allocation_samples))])
if(length(allocation_samples[i,allocation_samples[i,2:as.numeric(ncol(allocation_samples))]!="0"]) > 1 || x[2,1] %in% equalclones){ #If we only have this event in one sample we want to solely go on largest space.
y <- y[y[]!=0] #Removing the data points containing zeros. We now have all the motherclones.
mother_all_biopsies <- matrix(0,as.numeric(nrow(possible_mothers))+1,ncol(allocation_samples))
mother_all_biopsies[1,] <- allocation_samples[1,]
k <- 2
for(k in 2:ncol(x)){ #Looping through the daughter clones.
daughtersubclone <- paste(x[1,k],x[2,1]) #Finding the name of the daughter subclone.
daughterposition <- match(daughtersubclone,possible_mothers[1,]) #Finding the position for the daughter subclone in the possible_mothers matrix.
mother_all_biopsies[2,k] <- possible_mothers[2,as.numeric(daughterposition)]
mother_all_biopsies[3:nrow(mother_all_biopsies),k] <- possible_mothers[2:nrow(possible_mothers),(as.numeric(daughterposition)+1)] #This matrix will contain all mothers in all biopsies.
k <- k+1
}
distribution <- table(mother_all_biopsies[2:nrow(mother_all_biopsies),3:ncol(mother_all_biopsies)])
mother_not_all <- distribution[distribution!=length(y)]
mother_all <- distribution[distribution==length(y)] #This table illustrates the mothers that can be given in all biopsies.
# print("mother all!")
# print(mother_all)
# print(length(mother_all))
# print(distribution)
#print(daughtersubclone)
if(length(mother_all) > 1){
mother_all <- mother_all[mother_all>=length(y)]
}
#Testar att hitta den mest prevalenta modern.
#hej <- theonlymothers[1:3,word(theonlymothers[1,],2,3)=="Subclone_ 18"]
#hejsan <- table(hej[2,is.na(hej[1,])==FALSE])
#t <- table(hejsan)
#t
#which.max(t)
# mother_almost_all <- which.max(distribution)
# print("mother almost")
# print(mother_almost_all)
mother_almost_all <- distribution[distribution==(length(y)-1)]
mother_not_all <- mother_not_all[rownames(mother_not_all)!="0"] #This table illustrates all mothers that cannot be given in each biopsy.
j <- 2
mp <- 0
count_replace <- 1 #Used when we get a rule where we only have one mother in one sample and have to change earlier clones.
for(j in 2:ncol(x)){ #Looping through the motherclones.
if(x[2,j] != "0"){ #We do not want to analyze situations where we do not even have the daughter subclone in question.
daughtersubclone <- paste(x[1,j],x[2,1]) #Finding the name of the daughter subclone.
daughterposition <- match(daughtersubclone,possible_mothers[1,]) #Finding the position for the daughter subclones in the possible_mothers matrix.
justzeros <- table(possible_mothers[,(as.numeric(daughterposition)+1)]) #Extracting the column for the other possible mothers.
# print("Before")
# print(daughtersubclone)
# print(justzeros)
if((as.numeric(justzeros[1])/(as.numeric(nrow(possible_mothers)))) == 1){ #If it is one, then every position is a zero. This means that this is the only solution for this mother-daughter allocation in this sample.
daughterrowdata <- match(daughtersubclone,file_samples_subclones[,13]) #Finding the row of the subclone in the file. This will be used in order to obatin the clone size.
only <- 1
onlymother <- possible_mothers[2,as.numeric(daughterposition)]
print("Onlymother")
print(onlymother)
# print("Now we are changing things backwards!")
column_equal <- which(x[1,j]==equalclones[1,])
equalclones_biopsy <- equalclones[,column_equal]
#if(x[1,j] != "B1" && onlymother %in% equalclones == FALSE){ #Changed 210910. This does not matter if we only have one biopsy. why is it not allowed to be in equalclones?
if(x[1,j] != "B1" && onlymother %in% mother_all %in% equalclones == FALSE){ #This does not matter if we only have one biopsy. why is it not allowed to be in equalclones?
if(count_replace!=1 && tom > 1){
sub <- which(word(theonlymothers[1,1:(tom-1)],2,3)==word(theonlymothers[1,tom],2,3))
howmany <- length(sub)
if(length(sub)==0){howmany <- 0}
if(howmany!=0){
if(theonlymothers[3,tom-1] == "0"){
theonlymothers[3,(tom-as.numeric(howmany)):(tom-1)] <- theonlymothers[2,(tom-as.numeric(howmany)):(tom-1)] #Changed from j to count_replace. Otherwise we get problems when certain events are not present in all biopsies.
}
theonlymothers[2,(tom-as.numeric(howmany)):(tom-1)] <- onlymother
}
}
}
if(possible_mothers[2,as.numeric(daughterposition)] != "ALL"){ #Extracting the name of the mother it has to have.
mothername <- paste(word(possible_mothers[1,as.numeric(daughterposition)],1),possible_mothers[2,as.numeric(daughterposition)]) #The name of the only possible mother.
motherrowdata <- match(mothername,file_samples_subclones[,13]) #Finding its row in the file in order to obtain the clone size.
}else{mothername <- "ALL"
motherrowdata <- 1}
#print(file_samples_subclones[daughterrowdata,11])
#print(file_samples_subclones[motherrowdata,11])
if(mothername != "ALL" && as.numeric(file_samples_subclones[daughterrowdata,11]) + as.numeric(file_samples_subclones[motherrowdata,11]) != 200){ #If they were they they should be each other's motherclones.
theonlymothers[1,tom] <- possible_mothers[1,as.numeric(daughterposition)] #The clone which is to be allocated.
theonlymothers[2,tom] <- possible_mothers[2,as.numeric(daughterposition)] #The mother clone which it has to have.
tom <- tom + 1
norules <- 0
}else if(mothername == "ALL"){ #The only possible mother for this clone are the "ALL" events.
theonlymothers[1,tom] <- possible_mothers[1,as.numeric(daughterposition)] #The clone which is to be allocated.
theonlymothers[2,tom] <- possible_mothers[2,as.numeric(daughterposition)] #The mother clone which it has to have.
tom <- tom + 1
norules <- 0
}
}else{ #There are multiple solutions.
print("Multiple solutions")
if(is.na(match(x[2,1],hundredpercentclones)) == FALSE){ #The subclone is present in a hundredpercentclones.
columnhundred <- round(match(x[2,1],hundredpercentclones)/nrow(hundredpercentclones))+1 #The column. This column contains all of the alterations in the hundredpercentclone.
rowhundred <- match(x[2,1],hundredpercentclones)- columnhundred*nrow(hundredpercentclones)
if(is.na(match(x[2,j],hundredpercentclones[,columnhundred])) == FALSE){ #The mother exist in the same hundredpercentclone.
#Both the daughter and the mother exist in the same hundredpercentclones.
mother_most_common <- which.max(distribution[2:length(distribution)])
#print(distribution[2:length(distribution)])
#print(which.max(distribution[2:length(distribution)]))
mother_most_common <- names(distribution)[as.numeric(mother_most_common)+1]
#print("Most common")
#print(mother_most_common)
if(mother_most_common%in%hundredpercentclones[,columnhundred]==TRUE && mother_most_common %in% possible_mothers[,as.numeric(daughterposition)+1]==TRUE){
#The most common mother also exist in this 100% clone and it is also a possible mother to the clone.
#print("Här")
theonlymothers[1,tom] <- possible_mothers[1,as.numeric(daughterposition)] #The clone which is to be allocated.
theonlymothers[2,tom] <- mother_most_common
}else{
theonlymothers[1,tom] <- possible_mothers[1,as.numeric(daughterposition)] #The clone which is to be allocated.
theonlymothers[2,tom] <- possible_mothers[2,as.numeric(daughterposition)] #The mother clone which it has to have.
}
tom <- tom + 1
}else{
k <- 2
for(k in 2:nrow(possible_mothers)){ #Looping through the other solutions.
if(possible_mothers[k,(as.numeric(daughterposition)+1)]!= "0"){
if(is.na(match(possible_mothers[k,(as.numeric(daughterposition)+1)],hundredpercentclones[,columnhundred])) == FALSE){
theonlymothers[1,tom] <- possible_mothers[1,as.numeric(daughterposition)] #The clone which is to be allocated.
theonlymothers[2,tom] <- possible_mothers[k,as.numeric(daughterposition)+1] #The mother clone which it has to have.
tom <- tom + 1
}
}
k <- k+1
}
}
}else if(length(names(mother_all)) < 1){
print("There is no mother that is possible in all samples.")
if(only == 1){
#print("Onlymother")
theonlymothers[1,tom] <- possible_mothers[1,as.numeric(daughterposition)] #The clone which is to be allocated.
theonlymothers[2,tom] <- onlymother
tom <- tom+1
}else if(length(mother_almost_all) != 0){ #There is a possibility that this almost event is the true one.
p <- 1
for(p in 1:length(mother_almost_all)){ #Looping through the mothers that are possible in almost all samples.
columns <- which(theonlymothers[2,]==names(mother_almost_all)[p]) #Finding all places where this mother is present.
biopsy_nr <- x[1,j] #The biopsy we are looking at.
sample_columns <- which(word(theonlymothers[1,],1)== biopsy_nr) #Finding all positions belonging to this biopsy.
match_columns <- intersect(columns,sample_columns) #All the rules for events being placed in this mother.
#Information about the mother in this sample.
if(names(mother_almost_all)[p]=="ALL"){
mother_rule <- "ALL"
}else{
mother_rule <- paste(biopsy_nr,names(mother_almost_all)[p]) #Name
}
row_TC_mother_rule <- match(mother_rule,file_samples_subclones[,13]) #Position.
mother_rule_size <- file_samples_subclones[row_TC_mother_rule,11] #Size in the sample.
if(length(match_columns) >= 2){ #Changed to 2.
#print("There is a rule for this mother.")
our_new_daughter <- daughtersubclone
row_TC_our_new_daughter_rule <- match(our_new_daughter,file_samples_subclones[,13])
our_new_daughter_rule_size <- file_samples_subclones[row_TC_our_new_daughter_rule,11]
#Calculating if there is any room left.
#print("Calculating if there is any room left")
r <- 1
for(r in 1:length(match_columns)){
daughter_rule <- theonlymothers[1,as.numeric(match_columns[r])]
row_TC_daughter_rule <- match(daughter_rule,file_samples_subclones[,13])
daughter_rule_size <- file_samples_subclones[row_TC_daughter_rule,11]
if((as.numeric(daughter_rule_size)-as.numeric(our_new_daughter_rule_size)) > 0){ #If our new daughter is larger than the ones we're comparing with now, it is not interesting to subtract them since this new alteration will have f?retr?de.
mother_rule_size <- as.numeric(mother_rule_size)-as.numeric(daughter_rule_size)}
r <- r+1
}
if(is.na(as.numeric(mother_rule_size))==FALSE){
if(is.na(as.numeric(our_new_daughter_rule_size))==FALSE){
if((as.numeric(mother_rule_size)+0.1) < as.numeric(our_new_daughter_rule_size)){ #Added 0.1 because otherwise you might get rounding errors.
#print("There is no longer room.")
pos_rem1 <- match(names(mother_almost_all)[p],possible_mothers[,daughterposition])
if(is.na(pos_rem1) == FALSE){
possible_mothers[pos_rem1,daughterposition] <- "0"
}
if(daughterposition < ncol(possible_mothers)){
pos_rem2 <- match(names(mother_almost_all)[p],possible_mothers[,(daughterposition+1)])
if(is.na(pos_rem2) == FALSE){
possible_mothers[pos_rem2,(daughterposition+1)] <- "0"
}
}
removed <- 1
Event_rule_removed <- 1 #Indicating that an event has been removed.
}}}
}
p < p+1
}
theonlymothers[1,tom] <- possible_mothers[1,as.numeric(daughterposition)] #The clone which is to be allocated.
theonlymothers[2,tom] <- names(mother_almost_all)[1] #The mother clone which is possible in almost all samples.
tom <- tom + 1
}else{
theonlymothers[1,tom] <- possible_mothers[1,as.numeric(daughterposition)]
mother_possibly <- which.max(distribution[2:length(distribution)])
#print("possibly the mother")
#print(names(distribution)[mother_possibly])
theonlymothers[2,tom] <- names(distribution)[as.numeric(mother_possibly)+1] #Changed from just taking 2, to taking the most prevalent one.
tom <- tom+1 #Added this since we do not get the tom-count during the second round.
}
}else if(length(mother_all) == 1){
#Multiple solutions
#Adding a rule in order to make all the daughters originate after the same mother.
print("The only solution now.")
daughtersubclone <- paste(x[1,j],x[2,1]) #Finding the name of the daughter subclone.
daughterposition <- match(daughtersubclone,possible_mothers[1,]) #Finding the position for the daughter subclones in the possible_mothers matrix.
theonlymothers[2:nrow(theonlymothers),tom] <- "0"
theonlymothers[1,tom] <- possible_mothers[1,as.numeric(daughterposition)] #The clone which is to be allocated.
theonlymothers[2,tom] <- names(mother_all)[1] #The mother clone which it has to have.
if(length(mother_almost_all) != 0){
theonlymothers[3,tom] <- names(mother_almost_all)[1]
}
tom <- tom + 1
norules <- 0
}else if(length(mother_all) > 1){ #There are multiple solutions that are possible in all samples.
print("We have multiple possible allocations that are equally probable")
print(mother_all)
print(x)
daughtersubclone <- paste(x[1,j],x[2,1]) #Finding the name of the daughter subclone.
daughterposition <- match(daughtersubclone,possible_mothers[1,]) #Finding the position for the daughter subclones in the possible_mothers matrix.
theonlymothers[2:nrow(theonlymothers),tom] <- "0"
#Adding a rule algorithm.
# rule_d <- which(rule[,2]==x[2,1])
# r <- 1
# rule_applied <- 0
# for(r in 1:length(mother_all)){
#
# rule_m <- which(rule[,1]==names(mother_all)[r])
# rule_both <- intersect(rule_d,rule_m)
# if(length(rule_both)>0){
# if(rule[rule_d,3]=="No"){
# print("They are not allowed to be placed after one another.") #Tystade då jag har en kod högre upp som gör det.
# #mother_all <- mother_all[names(mother_all)!=names(mother_all)[r]] #Removing this mother entirely.
# }else{
# print("They should be placed after one another.")
# mother_all <- rule[rule_d,1]
# }
# rule_applied <- 1
# }
#
# r <- r+1
# }
#Algorithm for choosing between many mothers that are possible in all samples.
if(ncol(overview) > 3){ #If it is 3 we do only have one sample. No reason to look for patterns.
mother_all_mtrx <- as.matrix(names(mother_all)) #The mother names.
if(length(names(mother_all))==1){
mother_all_type <- matrix(0,2,ncol(overview))
}else{
mother_all_type <- matrix(0,(nrow(mother_all)+1),ncol(overview))}
n <- 1
for(n in 1:(nrow(mother_all_mtrx)+1)){
if(n == 1){
mother_all_mtrx_row<- match(possible_mothers[1,as.numeric(daughterposition)],file_samples_subclones[,13]) #Position.
mother_all_type[n,1] <- file_samples_subclones[mother_all_mtrx_row,12]
type <- word(file_samples_subclones[mother_all_mtrx_row,10],1:3)
mother_all_type[n,2] <- paste(type[1],type[2],type[3],sep=" ")
overview_row <- match(file_samples_subclones[mother_all_mtrx_row,10], overview[,1])
mother_all_type[n,3:ncol(mother_all_type)] <- overview[overview_row,3:ncol(overview)]
}else{
if(mother_all_mtrx[n-1,1] != "ALL"){
mother_all_type[n,1] <- mother_all_mtrx[n-1,1]
mother_all_mtrx_row <- match(mother_all_mtrx[n-1,1],word(file_samples_subclones[,13],2,3)) #Position.
type <- word(file_samples_subclones[mother_all_mtrx_row,10],1:3)
mother_all_type[n,2] <- paste(type[1],type[2],type[3],sep=" ")
overview_row <- match(file_samples_subclones[mother_all_mtrx_row,10], overview[,1])
mother_all_type[n,3:ncol(mother_all_type)] <- overview[overview_row,3:ncol(overview)]
}else{
mother_all_type[n,1] <- "ALL"
mother_all_type[n,2] <- "ALL"
mother_all_type[n,3:ncol(mother_all_type)] <- "100"
}
}
n <- n+1
}
m <- 4
for(m in 4:ncol(mother_all_type)){
if(is.null(nrow(mother_all_type))==FALSE){
sign_vector <- (as.numeric(mother_all_type[,(as.numeric(m)-1)]) - as.numeric(mother_all_type[,m]))
same_sign <- as.matrix(sign(sign_vector[1]) == sign(sign_vector))
mother_all_type <- mother_all_type[same_sign,]
}
m <- m+1
}
if(is.null(nrow(mother_all_type))==FALSE){
mother_all_type <- mother_all_type[!is.na(mother_all_type[,1]),]
if(is.null(nrow(mother_all_type))==FALSE){
if(nrow(mother_all_type) > 1){
#print("There are mothers that follow the same pattern.")
mother_preferred <- mother_all_type[2,1] #We prefer to choose a mother that shows a similar pattern.
mp <- 1
}}
}
#print("Final")
}
#Adding the events.
if(not_again != 1){
if(mp != 1){
theonlymothers[1,tom] <- possible_mothers[1,as.numeric(daughterposition)] #The clone which is to be allocated.
if(possible_mothers[2,as.numeric(daughterposition)]%in% names(mother_all)){ #If the largest space mother is in this, we will choose it.
theonlymothers[2,tom] <- possible_mothers[2,as.numeric(daughterposition)]
}else{
theonlymothers[2,tom] <- names(mother_all)[1] #The mother clone.
theonlymothers[3:(length(mother_all)+1),tom] <- names(mother_all)[2:length(mother_all)]}
}else{
theonlymothers[1,tom] <- possible_mothers[1,as.numeric(daughterposition)] #The clone which is to be allocated.
theonlymothers[2,tom] <- mother_preferred
pos_mp <- match(mother_preferred,names(mother_all))
mother_all[pos_mp] <- mother_all[1] #Replacing the first event with the preferred one.
theonlymothers[3:(length(mother_all)+1),tom] <- names(mother_all)[2:length(mother_all)]
}
}else{
if(mp != 1){
if(possible_mothers[2,as.numeric(daughterposition)]%in% names(mother_all)){ #If the largest space mother is in this, we will choose it.
theonlymothers[2,tom] <- possible_mothers[2,as.numeric(daughterposition)]
}else{
theonlymothers[1,tom] <- possible_mothers[1,as.numeric(daughterposition)] #The clone which is to be allocated.
theonlymothers[2,tom] <- names(mother_all)[1] #The mother clone which it has to have.
theonlymothers[3,tom] <- names(mother_all)[2]
if(length(mother_all) >= 3){
theonlymothers[4:(length(mother_all)+1),tom] <- names(mother_all)[3:length(mother_all)]}
}
}else{
theonlymothers[1,tom] <- possible_mothers[1,as.numeric(daughterposition)] #The clone which is to be allocated.
pos_mp <- match(mother_preferred,names(mother_all))
theonlymothers[2,tom] <- mother_preferred
mother_all[pos_mp] <- mother_all[1] #Replacing the first event with the preferred one.
theonlymothers[3:(length(mother_all)+1),tom] <- names(mother_all)[2:length(mother_all)]
}
}
#We have to see if any of these mothers are not possible any more since we've gotten some rules for the allocations.
if(not_again != 1){
p <- 1
for(p in 2:(length(mother_all)+1)){ #Looping through the mothers that are possible in all samples.
columns <- which(theonlymothers[2,]==theonlymothers[p,tom]) #Finding all places where this mother is present.
biopsy_nr <- x[1,j] #The biopsy we are looking at.
sample_columns <- which(word(theonlymothers[1,],1)== biopsy_nr) #Finding all positions belonging to this biopsy.
match_columns <- intersect(columns,sample_columns) #All the rules for events being placed in this mother.
#Information about the mother in this sample.
if(theonlymothers[p,tom]=="ALL"){
mother_rule <- "ALL"
}else{
mother_rule <- paste(biopsy_nr,theonlymothers[p,tom]) #Name
}
row_TC_mother_rule <- match(mother_rule,file_samples_subclones[,13]) #Position.
mother_rule_size <- file_samples_subclones[row_TC_mother_rule,11] #Size in the sample.
if(length(match_columns) > 1){
#print("There is a rule for this mother.")
our_new_daughter <- daughtersubclone
row_TC_our_new_daughter_rule <- match(our_new_daughter,file_samples_subclones[,13])
our_new_daughter_rule_size <- file_samples_subclones[row_TC_our_new_daughter_rule,11]
#print(our_new_daughter)
#print(row_TC_our_new_daughter_rule)
#print(our_new_daughter_rule_size)
#Calculating if there is any room left.
r <- 1
#print("Calculating if there is any room left")
for(r in 1:length(match_columns)){
daughter_rule <- theonlymothers[1,as.numeric(match_columns[r])]
row_TC_daughter_rule <- match(daughter_rule,file_samples_subclones[,13])
daughter_rule_size <- file_samples_subclones[row_TC_daughter_rule,11]
#print("Daughter rule")
#print(daughter_rule)
if((as.numeric(daughter_rule_size)-as.numeric(our_new_daughter_rule_size)) > 0){ #If our new daughter is larger than the ones we're comparing with now, it is not interesting to subtract them since this new alteration will have f?retr?de.
mother_rule_size <- as.numeric(mother_rule_size)-as.numeric(daughter_rule_size)}
r <- r+1
}
#print(mother_rule_size)
if(is.na(as.numeric(mother_rule_size))==FALSE){
if(is.na(as.numeric(our_new_daughter_rule_size))==FALSE){
if((as.numeric(mother_rule_size)+0.1) < as.numeric(our_new_daughter_rule_size)){ #Added 0.1 because otherwise you might get rounding errors.
#print("There is no longer room.")
#changed from theonlymothers[p,tom].
pos_rem1 <- match(theonlymothers[p,tom],possible_mothers[,daughterposition])
pos_rem2 <- match(theonlymothers[p,tom],possible_mothers[,(daughterposition+1)])
if(is.na(pos_rem1) == FALSE){
possible_mothers[pos_rem1,daughterposition] <- "0"
}
if(is.na(pos_rem2) == FALSE){
possible_mothers[pos_rem2,(daughterposition+1)] <- "0"
}
removed <- 1
Event_rule_removed <- 1 #Indicating that an event has been removed.
#mother_all <- mother_all[names(mother_all) != theonlymothers[p,tom]] #Removing the mother from the possible ones.
#} While loop for error searching.
}}}
}
p < p+1
}}
norules <- 0
tom <- tom + 1
}
}
count_replace <- count_replace+1 #Increasing this one which calculates how many columns we are into theonlymothers for this subclone. Needed when we will replace earlier chosen mothers since we later got a definitive rule.
}
j <- j+1
} #Looping through motherclones.
if(Event_rule_removed == 1 && not_again!=1){
# print("We will now redo the loop.")
Event_rule_removed <- 0
not_again <- 1
removed <- 0
tom <- (tom-(count_replace-1)) #We have to reset this. changed from allocation samples to count_replace since some events are not present in all samples.
}else{
not_again <- 0
i <- i+1
}
stop_while <- stop_while+1
if(stop_while > 2*ncol(theonlymothers)){
i <- (as.numeric(nrow(allocation_samples))+1)
break
}
if(i==as.numeric(nrow(allocation_samples))&&again==1){ #We will redo it all in order to minimize discrepancies.
#print("We will now redo it all.")
tom <- 1
again <- 0
i <- 2
}
}else{ #If the event is only present in one sample.
#print("One sample")
biopsy <- which(allocation_samples[i,2:as.numeric(ncol(allocation_samples))]!="0")
name <- paste(allocation_samples[1,biopsy+1],allocation_samples[i,1])
pos <- match(name,possible_mothers[1,])
#print(biopsy)
#print(name)
#print(pos)
if(possible_mothers[2,pos+1]=="0"){
theonlymothers[1,tom] <- possible_mothers[1,as.numeric(pos)] #The clone which is to be allocated.
theonlymothers[2,tom] <- possible_mothers[2,as.numeric(pos)] #The mother clone which it has to have.
}
tom <- tom + 1
i <- i+1}
}
}
assign("theonlymothers", theonlymothers, envir=globalenv()) #The equal clones.
assign("equalclones_before", equalclones, envir=globalenv()) #The equal clones.
assign("possible_mothers_new", possible_mothers, envir=globalenv()) #The equal clones.
###############################################
#Updating the mother-daughter-clone allocation#
###############################################
i <- 1
j <- 1
s <- 2
t <- 1
space <- matrix(0,50,2) #Spaces within a sample. Dynamic.
totalspace <- matrix(0,(as.numeric(nrow(space)+1)),((2*as.numeric(ncol(sample_clone_matrix))/3)+1)) #A matrix used for calculating the spaces available.
possible_mothers <- matrix(0,(as.numeric(nrow(space)+1)),((as.numeric(nrow(subclones))-1)*2)) #A matrix used for saving the possible motherclones.
Not_allocated_correctly <- matrix(0,ncol(theonlymothers),3)
nac <- 1
k <- 1
for(k in 1:(ncol(sample_clone_matrix)/3)){ #Constructing a matrix where every two columns represent a sample. The first one tells us which subclone harbors the space and the second the remaining space on top of this sample.
totalspace[1,(2*k-1)] <- sample_clone_matrix[1,(3*k-2)]
k <- k+1
}
k <- 1
for(k in 2:as.numeric(nrow(subclones))){ #Constructing a matrix were every two columns represent a subclone within a sample. The first one tells us which the chosen motherclone is and the other which other possible solutions there are.
possible_mothers[1,(2*k-3)] <- subclones[k,1]
k <- k+1
}
subcloneswithinsample <- matrix(0,(as.numeric(nrow(sample_clone_matrix))-1),2)
#SAMPLE LOOP
i <- 1
for(i in 1:(as.numeric(ncol(sample_clone_matrix))/3)){ #Looping through all of the samples.
#Change the loop number
#for(i in 1:45){
#for(i in 3:3){
#print("Here is i")
#print(i)
subcloneswithinsample <- sample_clone_matrix[2:as.numeric(nrow(sample_clone_matrix)),(3*i-2):(3*i-1)] #Extraxting the subclonal architecture and TC for a certain sample.
subcloneswithinsample_order <- subcloneswithinsample[order(as.numeric(subcloneswithinsample[,2]),decreasing = TRUE),] #Ordering the subclones from highest to lowest TC.
sameclones <- 0
current_sample <- sample_clone_matrix[1,(3*i-2)]
#print("Sample")
#print(current_sample)
#Tystade 210716. Vet inte riktigt vad den sista kolumnen är till för.
# or <- 1
# subcloneswithinsample_order <- cbind(subcloneswithinsample_order,matrix(0,nrow(subcloneswithinsample_order),1))
# for(or in 1:nrow(subcloneswithinsample_order)){
# col <- match(subcloneswithinsample_order[or,1],clone_matrix_names[,3]) #The third row contains the subclones.
# subcloneswithinsample_order[or,3] <- clone_matrix_names[3,as.numeric(col)] #The third row contains the subclones.
# or <- or+1
# }
#Arranging the subclones.
subcloneswithinsample_order_old <- matrix(0,(as.numeric(nrow(sample_clone_matrix))-1),2)
subcloneswithinsample_order_new <- subcloneswithinsample_order
ord <- 2
while(all(subcloneswithinsample_order_new[,1] == subcloneswithinsample_order_old[,1]) == FALSE){
subcloneswithinsample_order_old <- subcloneswithinsample_order_new
ord <- 2
for(ord in 2:(as.numeric(nrow(subcloneswithinsample_order_old))-1)){ #Writing a function/loop that orders the subclones of the same size according to their median size.
if(subcloneswithinsample_order_old[ord,2] != "0"){
if(subcloneswithinsample_order_old[ord,2] == subcloneswithinsample_order_old[ord+1,2]){
# orderpos1 <- match(word(subcloneswithinsample_order_old[ord,1],2),namevector)
# orderpos2 <- match(word(subcloneswithinsample_order_old[ord+1,1],2),namevector)
#
# if(as.numeric(orderpos2) < as.numeric(orderpos1)){
# subcloneswithinsample_order_new <- subcloneswithinsample_order_old[c(1:(ord-1),ord+1,ord,(ord+2):nrow(subcloneswithinsample_order_old)), ]
# }
orderpos1 <- match(subcloneswithinsample_order_old[ord,1],overview_subclones[,ncol(overview_subclones)])
orderpos2 <- match(subcloneswithinsample_order_old[ord+1,1],overview_subclones[,ncol(overview_subclones)])
diff <- as.numeric(overview_cluster[orderpos1,2:(ncol(overview_cluster)-1)])-as.numeric(overview_cluster[orderpos2,2:(ncol(overview_cluster)-1)])
larger <- length(which(diff>0)) #In how many positions is the first one larger than the second one?
smaller <- length(which(diff<0)) #In how many positions is the second one larger than the first one?
if(smaller > larger){
subcloneswithinsample_order_new <- subcloneswithinsample_order_old[c(1:(ord-1),ord+1,ord,(ord+2):nrow(subcloneswithinsample_order_old)), ]
}
}
}
ord <- ord+1
}
}
subcloneswithinsample_order <- subcloneswithinsample_order_new
#print("Sub")
#print(i)
#SUBCLONE LOOP
j <- 1
for(j in 2:as.numeric(nrow(sample_clone_matrix))){ #Looping through the subclones within the sample.
#print("Here is j")
#print(j)
tick <- 0
if(j == 2){ #We're in the first position. This is the ALL-event.
space[1,1] <- subcloneswithinsample_order[j-1,1] #The name.
space[1,2] <- subcloneswithinsample_order[j-1,2] #The TC.
}
if(j != 2){
if(subcloneswithinsample_order[j-1,1] != "0"){
if(subcloneswithinsample_order[j-1,1] != "ALL"){ #We should not add it again.
maxspace <- which.max(space[,2]) #Finding the largest available space.
newname <- subcloneswithinsample_order[j-1,1] #The name of the new subclone.
newspace <- subcloneswithinsample_order[j-1,2] #The space of the new subclone.
full_newname <- paste(sample_clone_matrix[1,(3*i-2)],newname)
#print("Precisely before conditioned.")
if(newspace != "100"){
if(newname %in% word(theonlymothers[1,],2,3) == TRUE){ #The clone in question has a condition on it.
######################
#CONDITIONED SUBCLONE#
######################
#print("Conditioned")
newnamecolumn <- match(paste(current_sample,newname),theonlymothers[1,]) #Finding the column in theonlymothers that the daughter has.
maxname <- theonlymothers[2,newnamecolumn]
#subpart <- theonlymothers[2,word(theonlymothers[1,],2,3)==newname]
#subpart <- subpart[is.na(subpart) == FALSE] #All the other mothers in other samples.
if(is.na(newnamecolumn)==TRUE){
newnamecolumn <- match(newname,word(theonlymothers[1,],2,3))
}
maxname <- theonlymothers[2,newnamecolumn] #This is the name of the mother that it has to have.
#print("The maxname and the newname")
#print(maxname)
#print(newname)
if(maxname %in% space[,1] == TRUE){ #Does the mother exist in the sample in question?
#print("The mother exist in the sample")
maxspace <- match(maxname,space[,1]) #This is the new maxspace's row position in the space matrix.
maxspaceTC <- space[maxspace,2] #Added this since maxspace in the row above only gives the row position and not the actual TC.
space[s,1] <- newname #Adding the new spacename and space size to the spacematrix.
space[s,2] <- newspace
#I try to take into account that we actually have multiple columns of equalclones for one single sample.
equalclones_multiples <- as.numeric(ncol(equalclones))/length(samples_unique)
e <- 0
for(e in 0:(equalclones_multiples-1)){
if(e == 0){
e_x <- NA #The column multiple in which it exist.
e_y <- NA
x <- NA
y <- NA
}
x_test <- match(maxname,equalclones[2:nrow(equalclones),i+(length(samples_unique)*e)]) #The row (minus 1) where the maxname exist.
y_test <- match(newname,equalclones[2:nrow(equalclones),i+(length(samples_unique)*e)]) #The row (minus 1) where the newname exist.
if(is.na(x_test) == FALSE){ #The maxname exist in an equalclones column for this sample.
x <- (x_test+1)
e_x <- (i+length(samples_unique)*e) #The column where the maxname exist.
}
if(is.na(y_test) == FALSE){ #The newname exist in an equalclones column for this sample.
y <- (y_test+1)
e_y <- (i+length(samples_unique)*e) #The column where the newname exist.
}
e <- e+1
}
#Added TC to the row below since we should compare TC:s. maxspace is just a row position.
#Added 0.1 here 200721 since rounding problems can occur when handling simulated data.
#print("Spaces")
#print(maxspaceTC)
#print(newspace)
if(as.numeric(maxspaceTC)+0.1 >= as.numeric(newspace)){ #There must be enough room left.
#print("There is room")
if(is.na(x) == FALSE){ #The maxname is in equalclones.
#print("Maxname is in equalclones")
if(is.na(y) == TRUE){ #The newname is not in equalclones.
#print("Newname is not")
true_maxsize <- file_samples_subclones[match(paste(current_sample,maxname),file_samples_subclones[,13]),11]
true_newsize <- file_samples_subclones[match(paste(current_sample,newname),file_samples_subclones[,13]),11]
#print(maxname)
#print(true_maxsize)
#print(newname)
#print(true_newsize)
if(true_maxsize!=true_newsize){ #Added 200920 since I got weird equalclones.
#if(as.numeric(newspace) != space[maxspace,2]){ #They are not of the same size.
#print("They are not of the same size.")
eq <- 2
for(eq in 2:nrow(equalclones)){ #I have to reduce the space for all of the events belonging to this subclone.
if(equalclones[eq,e_x] != "0"){ #I changed the i to i+e_x
eq_row <- match(equalclones[eq,e_x],space[,1])
if(is.na(eq_row) == FALSE){
space[eq_row,2] <- (as.numeric(space[eq_row,2])-as.numeric(newspace))
}
}
eq <- eq+1
}
}else{ #This event should belong to the equalclones-subclone since they are of the same size.
#print("The new event should belong to equalclones for maxclone")
# if(newname %in% equalclones[2:nrow(equalclones),i] == FALSE){ #If it does not already belong to the equalclones of this sample.
eq <- 2
for(eq in 2:nrow(equalclones)){
if(equalclones[eq,e_x] == "0"){
if(newname %in% equalclones[2:nrow(equalclones),e_x] == FALSE){
equalclones[eq,e_x] <- newname}
eq <- nrow(equalclones)
}
eq <- eq+1
}
}
}else if(as.numeric(newspace) == as.numeric(space[maxspace,2])){ #Newname does belong to equalclones as well as maxname.
#print("Newname is in equalclones as well")
#Is it in the same equalclones?
if(e_x == e_y){
# if(newname %in% equalclones[2:nrow(equalclones),i] == FALSE){ #If it does not already belong to the equalclones of this sample.
eq <- 2
for(eq in 2:nrow(equalclones)){
if(equalclones[eq,e_x] == "0"){
if(newname %in% equalclones[2:nrow(equalclones),e_x] == FALSE){
equalclones[eq,e_x] <- newname}
eq <- nrow(equalclones)
}
eq <- eq+1
}
}
}else{ #Maxclone and new name is in equaclones. They are not of the same size.
#space[maxspace,2] <- (as.numeric(space[maxspace,2])-as.numeric(newspace)) #Replacing the old maxspace.
#Added 200202.
#print("Newname is in equalclones as well")
#print("They are not of the same size")
eq <- 2
for(eq in 2:nrow(equalclones)){ #I have to reduce the space for all of the events belonging to this subclone.
if(equalclones[eq,e_x] != "0"){ #I changed the i to i+e_x
eq_row <- match(equalclones[eq,e_x],space[,1])
if(is.na(eq_row) == FALSE){
space[eq_row,2] <- (as.numeric(space[eq_row,2])-as.numeric(newspace))
}
}
eq <- eq+1
}
}
}else{ #Maxname is not in equalclones.
#print("Maxname is not in equalclones")
thename <- space[maxspace,1]
therow <- match(thename,subcloneswithinsample_order_new[,1])
maxTC <- subcloneswithinsample_order_new[therow,2]
if(as.numeric(maxTC) == as.numeric(newspace)){ #The maxspace and the newclone are of equal size.
#print("Maxname and newname is of equal size.")
if(is.na(y) == FALSE){
#print("Newname is in equalclones")
eq <- 2
for(eq in 2:nrow(equalclones)){
if(equalclones[eq,e_y] == "0"){
if(maxname %in% equalclones[2:nrow(equalclones),e_y] == FALSE){ #Maxname is not in this equalclone column.
equalclones[eq,e_y] <- maxname} #Adding the maxname to the equalclones.
break
}
eq <- eq+1
}
}else{ #The newname does not exist in the equalclones for this sample.
#print("Newname is not in equalclones")
if(equalclones[2,i] == "0"){ #Adding them to the equalclones matrix.
equalclones[2,i] <- newname
equalclones[3,i] <- thename
}else if (equalclones[2,(i+as.numeric(ncol(hundredpercentclones)))] == "0"){
#print("Adding them")
equalclones[2,(i+as.numeric(ncol(hundredpercentclones)))] <- newname
equalclones[3,(i+as.numeric(ncol(hundredpercentclones)))] <- thename
}else if(equalclones[2,(i+2*as.numeric(ncol(hundredpercentclones)))] == "0"){
#print("Adding them")
equalclones[2,(i+2*as.numeric(ncol(hundredpercentclones)))] <- newname
equalclones[3,(i+2*as.numeric(ncol(hundredpercentclones)))] <- thename
}
}
}
#print(space)
if(as.numeric(space[maxspace,2]) >= as.numeric(newspace)){ #Added = 210412.
space[maxspace,2] <- (as.numeric(space[maxspace,2])-as.numeric(newspace)) #Replacing the old maxspace.
}
#print(space)
}
}else{ #This is the case where the conditioned subclone does not have room for the new event.
# print("We could not allocate it to the conditioned place.")
# print(current_sample)
# print(space[space[,1]!="0",])
# print(maxname)
# print(maxspaceTC)
# print(newname)
# print(newspace)
Not_allocated_correctly[nac,1] <- current_sample
Not_allocated_correctly[nac,2] <- newname
Not_allocated_correctly[nac,3] <- newspace
nac <- nac+1
if(theonlymothers[3,newnamecolumn]!= "0" && theonlymothers[3,newnamecolumn] %in% space[,1]){ #New 200720.
#This a second conditioned clone.
#print("Second conditioned")
maxspace <- match(theonlymothers[3,newnamecolumn],space[,1])
maxname <- space[maxspace,1]
maxspaceTC <- space[maxspace,2]
#Actually we do not want the other subclones to be allocated to the event since it is not possible in all samples any more.
}else{
maxspace <- which.max(space[,2]) #Finding the largest available space.
maxname <- space[maxspace,1]
maxspaceTC <- space[maxspace,2]}
if(maxspaceTC == newspace && maxname!="ALL"){ # They are of equal size.
#I try to take into account that we actually have multiple columns of equalclones for one single sample.
equalclones_multiples <- as.numeric(ncol(equalclones))/length(samples_unique)
e <- 0
for(e in 0:(equalclones_multiples-1)){
if(e == 0){
e_x <- NA #The column multiple in which it exist.
e_y <- NA
x <- NA
y <- NA
}
x_test <- match(maxname,equalclones[2:nrow(equalclones),i+(length(samples_unique)*e)]) #The row (minus 1) where the maxname exist.
y_test <- match(newname,equalclones[2:nrow(equalclones),i+(length(samples_unique)*e)]) #The row (minus 1) where the newname exist.
if(is.na(x_test) == FALSE){ #The maxname exist in an equalclones column for this sample.
x <- (x_test+1)
e_x <- (i+length(samples_unique)*e) #The column where the maxname exist.
}
if(is.na(y_test) == FALSE){ #The newname exist in an equalclones column for this sample.
y <- (y_test+1)
e_y <- (i+length(samples_unique)*e) #The column where the newname exist.
}
e <- e+1
}
if(is.na(x) ==TRUE){
#print("The maxname is not in equalclones.")
if(is.na(y) == TRUE){
#print("The newname is not in equalclones either.")
if(equalclones[2,i] == "0"){ #Adding them to the equalclones matrix.
equalclones[2,i] <- newname
equalclones[3,i] <- maxname #Changed "thename" to "maxname" 200720.
}else if (equalclones[2,(i+as.numeric(ncol(hundredpercentclones)))] == "0"){
#print("Adding them")
equalclones[2,(i+as.numeric(ncol(hundredpercentclones)))] <- newname
equalclones[3,(i+as.numeric(ncol(hundredpercentclones)))] <- maxname
}else if(equalclones[2,(i+2*as.numeric(ncol(hundredpercentclones)))] == "0"){
#print("Adding them")
equalclones[2,(i+2*as.numeric(ncol(hundredpercentclones)))] <- newname
equalclones[3,(i+2*as.numeric(ncol(hundredpercentclones)))] <- maxname
}
}else{
#print("Newname is in equalclones.")
#print(newname)
#print(maxname)
eq <- 2
for(eq in 2:nrow(equalclones)){
if(equalclones[eq,e_y] == "0"){
equalclones[eq,e_y] <- maxname #Adding the maxname to the equalclones.
break
}
eq <- eq+1
}
}
}else{
#print("Maxname is in equalclones.")
if(is.na(y) == TRUE){
#print("Newname is not in equalclones.")
eq <- 2
for(eq in 2:nrow(equalclones)){
if(equalclones[eq,e_x] == "0"){
equalclones[eq,e_x] <- newname} #Adding the newname to the equalclones.
break
eq <- eq+1
}
}else{
#print("Newname is in equalclones as well.")
if(e_x != e_y){
#print("They are not in the same equalclones.")
eq <- 2
for(eq in 2:nrow(equalclones)){
if(equalclones[eq,e_x] == "0"){
if(newname %in% equalclones[2:nrow(equalclones),e_x] == FALSE){ #Maxname is not in this equalclone column.
equalclones[eq,e_x] <- newname} #Adding the maxname to the equalclones.
break
}
eq <- eq+1
}
equalclones[y,e_y] <- "0" #Removing the newname from its old position.
}
}
}
}else{
#Not of equal size.
space[maxspace,2] <- (as.numeric(space[maxspace,2])-as.numeric(newspace)) #Uncommented 210912.
eq <- 2
for(eq in 2:nrow(equalclones)){ #I have to reduce the space for all of the events belonging to this subclone.
if(equalclones[eq,i] != "0"){
eq_row <- match(equalclones[eq,i],space[,1])
space[eq_row,2] <- (as.numeric(space[eq_row,2])-as.numeric(newspace))
}
eq <- eq+1
}
}
}
}else{
#print("It is not allocated yet.")
if(maxname %in% sample_clone_matrix[,(3*i-2)] == TRUE){ #The clone exist in the sample but has not been allocated yet. This only happens if they are equal in size.
#print("The mother has not been allocated yet but it exist in the biopsy.")
###################################################################
#The mother has not been allocated yet but it exist in the biopsy.#
###################################################################
rowofthemother <- match(maxname,subcloneswithinsample_order[,1]) #The row in which the mother exist in the subcloneswithinsample matrix.
themothername <- maxname
thedaughtername <- newname
if(themothername %in% word(theonlymothers[1,],2,3) == TRUE){ #The mother is conditioned.
#print("The mother is conditioned")
#print(subcloneswithinsample_order)
mothernamecolumn <- match(paste(current_sample,themothername),theonlymothers[1,]) #Finding the column in theonlymothers that the daughter has.
themothermothername <- theonlymothers[2,mothernamecolumn]
rowofthemothermother <- match(themothermothername,subcloneswithinsample_order[,1])
if(is.na(rowofthemothermother)==TRUE){
#print("The mothermother has not been allocated yet.")
#Added this since we do not get a tree otherwise.
maxspace <- which.max(space[,2])
rowofthemothermother <- match(space[maxspace,1],subcloneswithinsample_order[,1])
themothermothername <- space[maxspace,1]
}
}else{
#print("The mother is not conditioned.")
rowofthemothermother <- match(space[maxspace,1],subcloneswithinsample_order[,1])
themothermothername <- space[maxspace,1]
}
tick <- 1 #Just so that we know that we've been in this loop and that space[maxspace,1] outside the loop will be the mother to the mother.
#It may happen that the mother it should have here is smaller than the daughter. Switch positions.
if(as.numeric(subcloneswithinsample_order[rowofthemother,2]) < as.numeric(newspace)){
#print("The mother is smaller than the daughter.")
newspace <- subcloneswithinsample_order[as.numeric(rowofthemother),2] #Finding the new newspace.
rowofthemother <- match(newname,subcloneswithinsample_order[,1]) #The new row of the mother.
temp1 <- thedaughtername
temp2 <- themothername
themothername <- temp1 #Changing the name.
newname <- temp2
}
#Mother
#space[maxspace,2] <- (as.numeric(space[maxspace,2])-as.numeric(subcloneswithinsample_order[rowofthemother,2])) #The mother is allocated to its mother.
space[s,1] <- themothername #Adding the new spacename and space size to the spacematrix belonging to the mother.
space[s,2] <- subcloneswithinsample_order[rowofthemother,2]
maxspaceTC <- subcloneswithinsample_order[rowofthemother,2]
s <- s+1
if(as.numeric(subcloneswithinsample_order[as.numeric(rowofthemothermother),2]) > as.numeric(subcloneswithinsample_order[as.numeric(rowofthemother),2])){
#print("The mothermother is larger than the mother.")
space[as.numeric(maxspace),2] <- (as.numeric(subcloneswithinsample_order[as.numeric(rowofthemothermother),2])-as.numeric(subcloneswithinsample_order[as.numeric(rowofthemother),2])) #The mother is allocated to its mother.
allocation_samples[match(maxname,allocation_samples[,1]),(i+1)] <- subcloneswithinsample_order[as.numeric(rowofthemothermother),1] #We have to add information about the mother's mother to the allocation matrix.
}else{
#print("The mother's mother and the mother are of equal size and should be equalclones together")
allocation_samples[match(maxname,allocation_samples[,1]),(i+1)] <- subcloneswithinsample_order[rowofthemothermother,1] #We have to add information about the mother's mother to the allocation matrix.
#I try to take into account that we actually have multiple columns of equalclones for one single sample.
equalclones_multiples <- as.numeric(ncol(equalclones))/length(samples_unique)
e <- 0
for(e in 0:(equalclones_multiples-1)){
if(e == 0){
e_x <- NA #The column multiple in which it exist.
e_y <- NA
x <- NA
y <- NA
}
x_test <- match(themothermothername,equalclones[2:nrow(equalclones),i+(length(samples_unique)*e)]) #The row (minus 1) where the maxname exist.
y_test <- match(maxname,equalclones[2:nrow(equalclones),i+(length(samples_unique)*e)]) #The row (minus 1) where the newname exist.
if(is.na(x_test) == FALSE){ #The mothermother exist in an equalclones column for this sample.
x <- (x_test+1)
e_x <- (i+length(samples_unique)*e) #The column where the maxname exist.
}
if(is.na(y_test) == FALSE){ #The mother exist in an equalclones column for this sample.
y <- (y_test+1)
e_y <- (i+length(samples_unique)*e) #The column where the newname exist.
}
e <- e+1
}
if(is.na(x) == FALSE){
#print("The mothermother is in equalclones")
eq <- 2
for(eq in 2:nrow(equalclones)){
if(equalclones[eq,e_x] == "0"){
if(themothername %in% equalclones[2:nrow(equalclones),e_x] == FALSE){ #Maxname is not in this equalclone column.
equalclones[eq,e_x] <- themothername} #Adding the mothername to the equalclones.
break
#eq <- nrow(equalclones)
}
eq <- eq+1
}
}else{ #The mothermother does not exist in the equalclones for this sample.
#print("The mothermother is not in equalclones yet. We now add it.")
if(equalclones[2,i] == "0"){ #Adding them to the equalclones matris.
equalclones[2,i] <- themothermothername
equalclones[3,i] <- themothername
}else if (equalclones[2,(i+as.numeric(ncol(hundredpercentclones)))] == "0"){
#print("Adding them")
equalclones[2,(i+as.numeric(ncol(hundredpercentclones)))] <- themothermothername
equalclones[3,(i+as.numeric(ncol(hundredpercentclones)))] <- themothername
}else if(equalclones[2,(i+2*as.numeric(ncol(hundredpercentclones)))] == "0"){
#print("Adding them")
equalclones[2,(i+2*as.numeric(ncol(hundredpercentclones)))] <- themothermothername
equalclones[3,(i+2*as.numeric(ncol(hundredpercentclones)))] <- themothername
}
}
}
#Daughter
#I try to take into account that we actually have multiple columns of equalclones for one single sample.
equalclones_multiples <- as.numeric(ncol(equalclones))/length(samples_unique)
e <- 0
for(e in 0:(equalclones_multiples-1)){
if(e == 0){
e_x <- NA #The column multiple in which it exist.
e_y <- NA
x <- NA
y <- NA
}
x_test <- match(themothermothername,equalclones[2:nrow(equalclones),i+(length(samples_unique)*e)]) #The row (minus 1) where the maxname exist.
y_test <- match(maxname,equalclones[2:nrow(equalclones),i+(length(samples_unique)*e)]) #The row (minus 1) where the newname exist.
if(is.na(x_test) == FALSE){ #The mothermother exist in an equalclones column for this sample.
x <- (x_test+1)
e_x <- (i+length(samples_unique)*e) #The column where the maxname exist.
}
if(is.na(y_test) == FALSE){ #The mother exist in an equalclones column for this sample.
y <- (y_test+1)
e_y <- (i+length(samples_unique)*e) #The column where the newname exist.
}
e <- e+1
}
if(as.numeric(subcloneswithinsample_order[rowofthemother,2]) != as.numeric(newspace)){
#print("They are not of equal size.")
if(is.na(x) == TRUE){
#print("The mother is not in equalclones")
space[s-1,2] <- (as.numeric(space[s-1,2])-as.numeric(newspace)) #Replacing the old maxspace.
}else{
#print("The mother is in equalclones")
eq <- 2
for(eq in 2:nrow(equalclones)){ #I have to reduce the space for all of the events belonging to this subclone.
if(equalclones[eq,e_x] != "0"){
eq_row <- match(equalclones[eq,e_x],space[,1])
space[eq_row,2] <- (as.numeric(space[eq_row,2])-as.numeric(newspace))
}
eq <- eq+1
}
}
space[s,1] <- newname #Adding the new spacename and space size to the spacematrix belonging to the mother.
space[s,2] <- newspace
}else{
#print("The mother and the newclone are of equal size")
#The newclone and maxname should be in equalclones.
#I try to take into account that we actually have multiple columns of equalclones for one single sample.
equalclones_multiples <- as.numeric(ncol(equalclones))/length(samples_unique)
e <- 0
for(e in 0:(equalclones_multiples-1)){
if(e == 0){
e_x <- NA #The column multiple in which it exist.
e_y <- NA
x <- NA
y <- NA
}
x_test <- match(maxname,equalclones[2:nrow(equalclones),i+(length(samples_unique)*e)]) #The row (minus 1) where the maxname exist.
y_test <- match(newname,equalclones[2:nrow(equalclones),i+(length(samples_unique)*e)]) #The row (minus 1) where the newname exist.
if(is.na(x_test) == FALSE){ #The maxname exist in an equalclones column for this sample.
x <- (x_test+1)
e_x <- (i+length(samples_unique)*e) #The column where the maxname exist.
}
if(is.na(y_test) == FALSE){ #The newname exist in an equalclones column for this sample.
y <- (y_test+1)
e_y <- (i+length(samples_unique)*e) #The column where the newname exist.
}
e <- e+1
}
if(is.na(y) == FALSE){
#print("Newname is in equalclones")
eq <- 2
for(eq in 2:nrow(equalclones)){
if(equalclones[eq,e_y] == "0"){
if(maxname %in% equalclones[2:nrow(equalclones),e_y] == FALSE){ #Maxname is not in this equalclone column.
equalclones[eq,e_y] <- maxname} #Adding the maxname to the equalclones.
eq <- nrow(equalclones)
}
eq <- eq+1
}
}else{ #The newname does not exist in the equalclones for this sample.
#print("The newname does not exist in the equalclones for this sample.")
thename <- maxname #200516 ???? F?rs?kte fixa till en sak.
if(equalclones[2,i] == "0"){ #Adding them to the equalclones matris.
#print("Adding them")
equalclones[2,i] <- newname
equalclones[3,i] <- thename
}else if (equalclones[2,(i+as.numeric(ncol(hundredpercentclones)))] == "0"){
#print("Adding them")
equalclones[2,(i+as.numeric(ncol(hundredpercentclones)))] <- newname
equalclones[3,(i+as.numeric(ncol(hundredpercentclones)))] <- thename
}else if(equalclones[2,(i+2*as.numeric(ncol(hundredpercentclones)))] == "0"){
#print("Adding them")
equalclones[2,(i+2*as.numeric(ncol(hundredpercentclones)))] <- newname
equalclones[3,(i+2*as.numeric(ncol(hundredpercentclones)))] <- thename
}
}
space[s,1] <- newname #Adding the new spacename and space size to the spacematrix belonging to the mother.
space[s,2] <- newspace
}
#Removing the mother from the matrix so that it is not added again.
rowtoremove <- match(maxname,subcloneswithinsample_order[,1])
#print("Here a row is removed")
#subcloneswithinsample_order[rowtoremove,] <- "0" #210203. Testar att tysta 210514.Avtystar 210716. Tystar igen 210729.
sameclones <- 1 #This is just a way to illustrate the fact that we have done this.
}else{ #Conditioned clone but the mother it has to have in another sample does not exist in this sample at all.
maxspace <- which.max(space[,2]) #Finding the largest available space.
maxspaceTC <- as.numeric(space[maxspace,2])
maxname <- space[maxspace,1]
space[s,1] <- newname #Adding the new spacename and space size to the spacematrix.
space[s,2] <- as.numeric(newspace)
# print("The conditioned mother is not present in the sample.")
#Looking if there is other possible places for this event to be placed
#which better corresponds to earlier samples.
other <- 1
nej <- 0
for(other in 1:j){ #j is the latest event to be placed.
if(as.numeric(space[other,2]) >= as.numeric(newspace)){
if(space[other,1] %in% allocation_samples[match(space[s,1],allocation_samples[,1]),2:i]){
maxspace <- other
maxname <- space[maxspace,1]
maxspaceTC <- space[maxspace,2]
nej <- 1
break
}
}
if(nej == 0){
maxname <- space[maxspace,1]
maxspaceTC <- space[maxspace,2]
}
other <- other+1
}
#I try to take into account that we actually have multiple columns of equalclones for one single sample.
equalclones_multiples <- as.numeric(ncol(equalclones))/length(samples_unique)
e <- 0
for(e in 0:(equalclones_multiples-1)){
if(e == 0){
e_x <- NA #The column multiple in which it exist.
e_y <- NA
x <- NA
y <- NA
}
x_test <- match(maxname,equalclones[2:nrow(equalclones),i+(length(samples_unique)*e)]) #The row (minus 1) where the maxname exist.
y_test <- match(newname,equalclones[2:nrow(equalclones),i+(length(samples_unique)*e)]) #The row (minus 1) where the newname exist.
if(is.na(x_test) == FALSE){ #The maxname exist in an equalclones column for this sample.
x <- (x_test+1)
e_x <- (i+length(samples_unique)*e) #The column where the maxname exist.
}
if(is.na(y_test) == FALSE){ #The newname exist in an equalclones column for this sample.
y <- (y_test+1)
e_y <- (i+length(samples_unique)*e) #The column where the newname exist.
}
e <- e+1
}
# x <- match(space[maxspace,1],equalclones[2:nrow(equalclones),i])
# y <- match(newname,equalclones[2:nrow(equalclones),i])
if(maxname != "ALL"){
if(as.numeric(maxspaceTC) == as.numeric(newspace)){
if(is.na(x) == FALSE){ #The maxname is in equalclones.
#print("The maxname is in equalclones")
if(is.na(y) == TRUE){ #The newname is not.
#print("The newname is not in equalclones")
eq <- 2
for(eq in 2:nrow(equalclones)){
if(equalclones[eq,e_x] == "0"){
equalclones[eq,e_x] <- newname #Adding the newname to the equalclones for maxname.
break
}
eq <- eq+1
}
}else{
#print("The newname is in equalclones as well")
if(as.numeric(newspace) != space[maxspace,2]){
eq <- 2
for(eq in 2:nrow(equalclones)){ #I have to reduce the space for all of the events belonging to this subclone.
if(equalclones[eq,e_y] != "0"){ #Changed i to e_y.
eq_row <- match(equalclones[eq,e_y],space[,1])
space[eq_row,2] <- (as.numeric(space[eq_row,2])-as.numeric(newspace))
}
eq <- eq+1
}
}
}
}
}else{
if(is.na(x) == FALSE){
#print("The maxname is in equalclones")
eq <- 2
for(eq in 2:nrow(equalclones)){ #I have to reduce the space for all of the events belonging to this subclone.
if(equalclones[eq,e_x] != "0"){ #Changed i to e_x
eq_row <- match(equalclones[eq,e_x],space[,1])
space[eq_row,2] <- (as.numeric(space[eq_row,2])-as.numeric(newspace))
}
eq <- eq+1
}
}else{
space[maxspace,2] <- (as.numeric(space[maxspace,2])-as.numeric(newspace)) #Replacing the old maxspace.
}
}
}else{
#print("The maxname is ALL")
space[maxspace,2] <- (as.numeric(space[maxspace,2])-as.numeric(newspace))
}
}
}
############################
#NOT A CONDITIONED SUBCLONE#
############################
}else{
#print("Not conditioned")
space[s,1] <- newname #Adding the new spacename and space size to the spacematrix.
space[s,2] <- newspace
#Seeing if there is other possible places for this event to be placed
#which better corresponds to earlier samples.
other <- 1
nej <- 0
for(other in 1:j){ #j is the latest event to be placed.
if(as.numeric(space[other,2]) >= as.numeric(newspace)){
if(space[other,1] %in% allocation_samples[match(space[s,1],allocation_samples[,1]),2:i]){
maxspace <- other
maxname <- space[maxspace,1]
maxspaceTC <- space[maxspace,2]
nej <- 1
break
}
}
if(nej == 0){
maxname <- space[maxspace,1]
maxspaceTC <- space[maxspace,2]
}
other <- other+1
}
#I try to take into account that we actually have multiple columns of equalclones for one single sample.
equalclones_multiples <- as.numeric(ncol(equalclones))/length(samples_unique)
e <- 0
for(e in 0:(equalclones_multiples-1)){
if(e == 0){
e_x <- NA #The column multiple in which it exist.
e_y <- NA
x <- NA
y <- NA
}
x_test <- match(maxname,equalclones[2:nrow(equalclones),i+(length(samples_unique)*e)])
y_test <- match(newname,equalclones[2:nrow(equalclones),i+(length(samples_unique)*e)])
if(is.na(x_test) == FALSE){ #The maxname exist in an equalclones column for this sample.
x <- (x_test+1) #Row
e_x <- (i+length(samples_unique)*e) #Column
}
if(is.na(y_test) == FALSE){ #The newname exist in an equalclones column for this sample.
y <- (y_test+1)
e_y <- (i+length(samples_unique)*e)
}
e <- e+1
}
#x <- match(maxname,equalclones[2:nrow(equalclones),i])
#y <- match(newname,equalclones[2:nrow(equalclones),i])
#print(maxname)
#print(newname)
if(is.na(x) == FALSE){ #The maxname is in equalclones.
#print("Maxname is in equalclones")
if(is.na(y) == TRUE){ #The newname is not.
#print("Newname is not in equalclones")
true_maxsize <- file_samples_subclones[match(paste(current_sample,maxname),file_samples_subclones[,13]),11]
true_newsize <- file_samples_subclones[match(paste(current_sample,newname),file_samples_subclones[,13]),11]
#print(true_maxsize)
#print(true_newsize)
if(true_maxsize==true_newsize){ #Added 210707.
#print("They are of equal size")
eq <- 2
for(eq in 2:nrow(equalclones)){
if(equalclones[eq,e_x] == "0"){
if(newname %in% equalclones[2:nrow(equalclones),e_x] == FALSE){
equalclones[eq,e_x] <- newname}
eq <- nrow(equalclones)
}
eq <- eq+1
}
}else{
#print("They are not equal in size.")
#Removing the TC from all of the events in the equalclones.
eq <- 2
breaking <- 1
for(eq in 2:nrow(equalclones)){
if(equalclones[eq,e_x] != "0"){
eq_row <- match(equalclones[eq,e_x],space[,1])
space[eq_row,2] <- (as.numeric(space[eq_row,2])-as.numeric(newspace))
}
eq <- eq+1
}}
}else{
#print("Newname is in equalclones as well")
#if(as.numeric(newspace) == as.numeric(space[maxspace,2])){ #Here we just compare the newspace with the space the maxspace has right now. They might not be equal.
#We cannot go around adding events to equalclones just based on this above. Changed it to the line below instead 200308.
#if(as.numeric(newspace) == as.numeric(subcloneswithinsample_order[maxspace,2])){
if(as.numeric(newspace) == as.numeric(subcloneswithinsample_order[maxspace,2])){
#print("They are are of equal size")
if(newname %in% equalclones[2:nrow(equalclones),e_x] == FALSE){ #If it does not already belong to the equalclones of this sample.
eq <- 2
for(eq in 2:nrow(equalclones)){
if(equalclones[eq,e_x] == "0"){
if(newname %in% equalclones[2:nrow(equalclones),e_x] == FALSE){
equalclones[eq,e_x] <- newname}
eq <- nrow(equalclones)
}
eq <- eq+1
}
}
}else{
#space[maxspace,2] <- (as.numeric(space[maxspace,2])-as.numeric(newspace)) #Replacing the old maxspace.
equalclones[y,e_y] <- "0" #Removing the newname from equalclones.
eq <- 2
for(eq in 2:nrow(equalclones)){ #I have to reduce the space for all of the events belonging to this subclone.
if(equalclones[eq,e_x] != "0"){
eq_row <- match(equalclones[eq,e_x],space[,1])
space[eq_row,2] <- (as.numeric(space[eq_row,2])-as.numeric(newspace))
}
eq <- eq+1
}
}
}
}else{
#print("Maxname is not in equalclones")
if(maxname != "ALL"){
maxnamerow <- match(maxname,subcloneswithinsample_order[,1])
maxspaceTC <- subcloneswithinsample_order[maxnamerow,2]
# print(subcloneswithinsample_order)
# print(maxname)
# print(maxspaceTC)
# print(newname)
# print(newspace)
if(as.numeric(newspace) == as.numeric(maxspaceTC)){
#print("Newclone and maxclone are of the same size.")
if(is.na(y) == FALSE){
#print("Newclone is in equalclones.")
equalclones[y,e_y] <- "0" #Removing it from its old place and placing it on a new one.
if(equalclones[2,i] == "0"){ #Adding them to the equalclones matris.
equalclones[2,i] <- newname
equalclones[3,i] <- maxname
}else if (equalclones[2,(i+as.numeric(ncol(hundredpercentclones)))] == "0"){
#print("Adding them")
equalclones[2,(i+as.numeric(ncol(hundredpercentclones)))] <- newname
equalclones[3,(i+as.numeric(ncol(hundredpercentclones)))] <- maxname
}else if(equalclones[2,(i+2*as.numeric(ncol(hundredpercentclones)))] == "0"){
#print("Adding them")
equalclones[2,(i+2*as.numeric(ncol(hundredpercentclones)))] <- newname
equalclones[3,(i+2*as.numeric(ncol(hundredpercentclones)))] <- maxname
}
}else{
#print("Neither the maxclone nor the newclone is in equalclones.")
if(equalclones[2,i] == "0"){ #Adding them to the equalclones matris.
equalclones[2,i] <- newname
equalclones[3,i] <- maxname
}else if (equalclones[2,(i+as.numeric(ncol(hundredpercentclones)))] == "0"){
#print("Adding them")
equalclones[2,(i+as.numeric(ncol(hundredpercentclones)))] <- newname
equalclones[3,(i+as.numeric(ncol(hundredpercentclones)))] <- maxname
}else if(equalclones[2,(i+2*as.numeric(ncol(hundredpercentclones)))] == "0"){
#print("Adding them")
equalclones[2,(i+2*as.numeric(ncol(hundredpercentclones)))] <- newname
equalclones[3,(i+2*as.numeric(ncol(hundredpercentclones)))] <- maxname
}
}
}else{
space[maxspace,2] <- (as.numeric(space[maxspace,2])-as.numeric(newspace)) #Replacing the old maxspace.
if(is.na(y) == FALSE){
#print("Newname is in equalclones")
equalclones[y,e_y] <- "0" #Removing it.
}
}
}else{
#print("Maxname is ALL.")
if(is.na(y) == FALSE){
#print("The daughter exist in an equalclones.")
equalclones[y,e_y] <- "0" #Removing it
}
space[maxspace,2] <- (as.numeric(space[maxspace,2])-as.numeric(newspace)) #Replacing the old maxspace.
}
}
}
}else{ #The newspace is 100.
if(space[1,1] == "ALL"){ #The ALL-space is now occupied by the new subclone.
space[1,2] <- "0"
}
space[s,1] <- newname #Adding the new spacename and space size to the spacematrix. We do not have to alter anything when dealing with hundredpercentclones.
space[s,2] <- newspace
} #100 %
}else{ #ALL
}
}else{ #"0"
}
#The point of these loops are to take into account cases where the newname is in an equalclone situation with
#an event in another sample and this event is also present in this sample but they are not equal here.
if(newname %in% equalclones == TRUE){ #The clone exists in equalcones.
#I try to take into account that we actually have multiple columns of equalclones for one single sample.
equalclones_multiples <- as.numeric(ncol(equalclones))/length(samples_unique)
e <- 0
for(e in 0:(equalclones_multiples-1)){
if(e == 0){
e_x <- NA #The column multiple in which it exist.
e_y <- NA
x <- NA
y <- NA
}
x_test <- match(maxname,equalclones[2:nrow(equalclones),i+(length(samples_unique)*e)])
y_test <- match(newname,equalclones[2:nrow(equalclones),i+(length(samples_unique)*e)])
if(is.na(x_test) == FALSE){ #The maxname exist in an equalclones column for this sample.
x <- (x_test+1) #Row
e_x <- (i+length(samples_unique)*e) #Column
}
if(is.na(y_test) == FALSE){ #The newname exist in an equalclones column for this sample.
y <- (y_test+1)
e_y <- (i+length(samples_unique)*e)
}
e <- e+1
}
if(is.na(y) == TRUE && maxname != "ALL"){ #But not in this sample. Added ALL 201108.
if(equalclones[2,i] != "0"){ #There exists equalclones in this sample.
equalclonename <- equalclones[2,i]
equalclonepos <- match(equalclonename,subcloneswithinsample_order_new) #The position for the equalclone.
theTCforequalpos <- subcloneswithinsample_order_new[equalclonepos,2] #Changed to new.
if(as.numeric(theTCforequalpos) == as.numeric(newspace) && subcloneswithinsample_order_new[match(maxname,subcloneswithinsample_order_new[,1]),2]==subcloneswithinsample_order_new[match(newname,subcloneswithinsample_order_new[,1]),2]){ #The new clone and the clone in equalclones are equal in size.
#print("Den kom in hit")
h <- 1
for(h in 1:ncol(equalclones)){ #Looping through the columns.
n <- 1
u <- 1
t <- 2
for(n in 1:nrow(equalclones)){ #Looping through the rows.
if(equalclones[n,h] == equalclonename){ #We have found the equalclone in a particular sample.
u <- h
}
if(equalclones[n,h] == newname){ #We have found the newclone.
t <- h
}
if(t == u){ #The equalclone and the newclone actually exists together as equalclones in another sample.
o <- 1
for(o in 1:length(equalclones[,i])){ #We add the newclone to the equalclone.
if(o == 1){
#print("They exist together in another sample!")
}
if(equalclones[o,i] == "0"){
if(newname %in% equalclones[,i] == FALSE){
equalclones[o,i] <- newname
eq <- 2
for(eq in 2:nrow(equalclones)){ #I have to reduce the space for all of the events belonging to this subclone.
if(equalclones[eq,i] != "0"){ #Changed it to i instead of e_x since we did not have e_x for RMS6 B2. 200329.
eq_row <- match(equalclones[eq,i],space[,1])
space[eq_row,2] <- (as.numeric(space[eq_row,2])-as.numeric(newspace))
}
eq <- eq+1
}
#space[maxspace,2] <- (as.numeric(space[maxspace,2])+as.numeric(newspace)) #Resetting the space.
maxspace <- match(equalclonename, space[,1])
o <- nrow(equalclones)
}
}
o <- o+1
}
}
n <- n+1
}
h <- h+1
}
}
}
}
}
#print(space[maxspace,1])
#print(newname)
#print(allocation_samples[match(newname,allocation_samples[,1]),(i+1)])
if(tick == 0){
if(space[s,1] != "0"){
if(sameclones != 1){
#Treating the case when space[maxspace,2] = 100 % and the newspace as well. Then the motherclone and the daughterclone are both part of the base.
if(subcloneswithinsample_order[j-1,2] == "100"){
if(subcloneswithinsample_order[j-1,1] != "ALL"){
if(subcloneswithinsample_order[match(space[maxspace,1],subcloneswithinsample_order[,1]),2] == "100"){
if(subcloneswithinsample_order[match(space[maxspace,1],subcloneswithinsample_order[,1]),1] != "ALL"){
newspace_name <- subcloneswithinsample_order[j-1,1]
maxspace_name <- subcloneswithinsample_order[match(space[maxspace,1],subcloneswithinsample_order[,1]),1]
allocation_samples[match(space[s,1],allocation_samples[,1]),(i+1)] <- maxspace_name #Annoting the mother clone of each of the subclones within each sample.
allocation_samples[match(maxspace_name,allocation_samples[,1]),(i+1)] <- newspace_name #Annoting the mother clone of each of the subclones within each sample.
}else{allocation_samples[match(space[s,1],allocation_samples[,1]),(i+1)] <- space[maxspace,1] #Annoting the mother clone of each of the subclones within each sample.
}
}else{allocation_samples[match(space[s,1],allocation_samples[,1]),(i+1)] <- space[maxspace,1] #Annoting the mother clone of each of the subclones within each sample.
}
}else{allocation_samples[match(space[s,1],allocation_samples[,1]),(i+1)] <- space[maxspace,1] #Annoting the mother clone of each of the subclones within each sample.
}
}else{allocation_samples[match(space[s,1],allocation_samples[,1]),(i+1)] <- space[maxspace,1] #Annoting the mother clone of each of the subclones within each sample.
}
}else{
#if(themothername != "ALL"){
#allocation_samples[match(thedaughtername,allocation_samples[,1]),(i+1)] <- themothername}else{
allocation_samples[match(newname,allocation_samples[,1]),(i+1)] <- space[maxspace,1] #200330
#}
}
} #"0"
}else{
allocation_samples[match(space[s,1],allocation_samples[,1]),(i+1)] <- themothername
}
} #j != 1.#}
#print(space)
if(j == as.numeric(nrow(sample_clone_matrix))){ #We're at the end of a sample.
#print("Totalspace is added.")
#print(space)
totalspace[2:as.numeric(nrow(totalspace)),((i*2)-1):((i*2))] <- space
#t <- t+1
s <- 2 #Resetting s and space.
space <- matrix(0,50,2)
}else{s <- s+1}
j <- j+1
}
i <- i+1
}
assign("totalspace", totalspace, envir=globalenv()) #The equal clones.
assign("equalclones_after", equalclones, envir=globalenv()) #The equal clones.
assign("allocation_samples_updated", allocation_samples, envir=globalenv()) #The mother-daughter division are exported to the global environment.
assign("Not_allocated_correctly",Not_allocated_correctly,envir=globalenv())
#Treating cases where the alteration did not get placed in its correct mother.
nac <- Not_allocated_correctly[as.numeric(Not_allocated_correctly[,3])<10,]
i <- 1
for(i in 1:nrow(nac)){
col <- match(nac[i,1],allocation_samples[1,])
row <- match(nac[i,2],allocation_samples[,1])
mothercol <- match(paste(nac[i,1],nac[i,2]),theonlymothers[1,])
mothercond <- theonlymothers[2,mothercol]
allocation_samples[row,col] <- mothercond
i <- i+1
}
assign("allocation_samples_revised", allocation_samples, envir=globalenv())
#Cleaning the equalclones matrix.
if(length(equalclones[1:10,equalclones[2,]!="0"]) > 1){
equalclones_eq <- equalclones[1:20,equalclones[2,]!="0"]
if(is.null(ncol(equalclones_eq))==FALSE){
i <- 1
for(i in 1:ncol(equalclones_eq)){
sample <- equalclones_eq[1,i]
all_col <- match(sample,allocation_samples_updated[1,])
j <- 2
for(j in 2:nrow(equalclones_eq)){
yes <- 0
subclone <- equalclones_eq[j,i]
if(subclone!="0"){
subclone_row <- match(subclone,allocation_samples_updated[,1])
mother <- allocation_samples_updated[subclone_row,all_col]
if(mother %in% equalclones_eq[,i] == FALSE){
#print("This subclone did not get placed in any of the equalclones.")
k <- 3
for(k in 3:nrow(allocation_samples_updated)){
if(allocation_samples_updated[k,all_col] == subclone){
subclone_mother <- allocation_samples_updated[k,1]
if(subclone_mother != "0"){
if(subclone_mother %in% equalclones_eq[,i] == TRUE){
#print("This subclone got an equalclone as daughter.")
yes <- 1
}
}
}
k <- k+1
}
}else{
#print("The mother is in equalclones.")
yes <- 1
}
}
if(yes == 0 && equalclones_eq[j,i] != "0"){
#print("This subclone did not get placed in any of these equalclones nor got any of them as daughter.")
rm <- match(paste(equalclones_eq[1,i],equalclones_eq[j,i]),file_samples_subclones[,13])
rs <- file_samples_subclones[rm,11]
if(as.numeric(rs) <= 50){
equalclones_eq[j,i] <- "0"
if(j != nrow(equalclones_eq) && as.numeric(rs) <= 50){ #If under 50 they have to be inside each other.
equalclones_eq[j:(nrow(equalclones_eq)-1),i] <- equalclones_eq[(j+1):nrow(equalclones_eq),i]
}
yes <- 0}
}
j <- j+1
}
i <- i+1
}
assign("equalclones_cleaned", equalclones_eq, envir=globalenv()) #The equal clones.
equalclones <- equalclones_eq
}
}
###################################################################
#Treating the case when we have many clones which have the same TC#
###################################################################
i <- 2
j <- 1
therows <- nrow(equalclones)
for(j in 1:as.numeric(ncol(equalclones))){ #Looping through the samples.
v <- equalclones[,j] #Extracting the column.
v <- cbind(v,matrix(0,nrow(equalclones),1)) #Adding a new column to it. Changed from "rowsofhundred" to nrow(equalclones).
clonenumber <- length(v[v!="0"])
v <- v[v[,1] !="0",]
if(clonenumber > 2){ #If we have more than 1 subclone within the sample with equal TC % who are placed inside each other.
k <- 2
for(k in 2:clonenumber){ #Giving alterations in all combinations.
mother <- paste(v[1,1],v[k,1])
l <- 2
for(l in 2:clonenumber){ #Looping through the clones.
if(k != l){
daughter <- paste(v[1,1], v[l,1])
mothercolumn <- match(mother,eventmatrix[1,])
daughtercolumn <- match(daughter,eventmatrix[1,])
m <- 2
for(m in 2:as.numeric(nrow(eventmatrix))){ #Looping through the events.
if(eventmatrix[m,mothercolumn] == "1"){
eventmatrix[m,daughtercolumn] <- "1"
}else if(eventmatrix[m,daughtercolumn] == "1"){ #Added 200821.
eventmatrix[m,mothercolumn] <- "1"
}
m <- m+1
}
}
l <- l+1
}
k <- k+1
}
}
j <- j+1
}
assign("eventmatrix_part1", eventmatrix, envir=globalenv())
##########################################################
#Giving the daughter subclones their motherclone's events#
##########################################################
motherdaughterevent <- matrix(0,as.numeric(nrow(allocation_samples)),3)
motherdaughterevent[,1] <- allocation_samples[,1]
motherdaughterevent_new <- matrix(0,as.numeric(nrow(allocation_samples)),3) #The one used for the order.
eventmatrix_original <- eventmatrix
motherdaughter <- matrix(0,50,2)
themedian <- as.numeric(nrow(medianmatrix))
j <- 2
s <- 1
for(j in 2:as.numeric(ncol(allocation_samples))){ #Looping through the columns.
motherdaughterevent[,2] <- allocation_samples[,j] #Extracting that particular column.
motherdaughterevent[1,3] <- "100" #Setting the subclone name to "100" just so that it will stay where it is.
#print(motherdaughterevent)
i <- 2
for(i in 2:as.numeric(nrow(allocation_samples))){ #Looping through the rows.
if(motherdaughterevent[i,2] != "ALL"){
if(motherdaughterevent[1,2] != "ALL"){
mothername <- paste(motherdaughterevent[1,2],motherdaughterevent[i,2])
daughtername <- paste(motherdaughterevent[1,2],motherdaughterevent[i,1])
}else{
mothername <- "0"
daughtername <- "0"}
}else{mothername <- "ALL"
daughtername <- "ALL"}
if(motherdaughterevent[i,2] != "0"){ #Adding the median TC of the mother clone to the matrix.
samplecolumn <- match(mothername,medianmatrix[1,])
theTC <- medianmatrix[themedian,samplecolumn]
samplecolumn_daughter <- match(daughtername,medianmatrix[1,])
theTC_daughter <- medianmatrix[themedian,samplecolumn_daughter]
}else{theTC <- "0"
theTC_daughter <- "0"}
motherdaughterevent[i,3] <- theTC_daughter #The median TC for that subclone in that particular sample.
i <- i+1
}
motherdaughter_order_before <- motherdaughterevent[order(as.numeric(motherdaughterevent[1:nrow(motherdaughterevent),3]), decreasing = TRUE),]
motherdaughter_totalspace <- as.matrix(totalspace[,(j-1)*2-1]) #The order obtained from totalspace.
motherdaughter_totalspace <- as.matrix(motherdaughter_totalspace[motherdaughter_totalspace != "0",]) #Removing the zero rows.
a <- 2
for(a in 2:as.numeric(nrow(motherdaughter_totalspace))){ #Looping through the order we should have.
clone <- motherdaughter_totalspace[a,1]
clonerow <- match(clone,motherdaughterevent)
# print("Här")
# print(motherdaughter_totalspace)
# print(clone)
# print(motherdaughterevent)
# print(clonerow)
# print(a)
motherdaughterevent_new[a,] <- motherdaughterevent[clonerow,] #New matrix in which the correct order is saved.
a <- a+1
}
motherdaughter_order <- as.matrix(motherdaughterevent_new)
motherdaughter_order[1,2] <- motherdaughterevent[1,2] #In this position we want to to have the sample name.
print(motherdaughter_order)
i <- 2
for(i in 2:as.numeric(nrow(allocation_samples))){ #Looping through the subclones.
eq <- 0
if(motherdaughter_order[i,2] != "0"){ #The subclone does not exist in that particular sample.
if(motherdaughter_order[i,2] != "ALL"){ #The subclone has already gotten these alterations.
daughter_name <- paste(motherdaughter_order[1,2],motherdaughter_order[i,1]) #The name of the subclone in a particular sample.
daughter_column <- match(daughter_name,eventmatrix[1,]) #The corresponding column in the eventmatrix.
mother_name <- paste(motherdaughter_order[1,2],motherdaughter_order[i,2]) #The motherclone.
mother_column <- match(mother_name,eventmatrix[1,]) #The column in the eventmatrix corresponding to the motherclone.
col <- match(word(daughter_name,1),equalclones[1,]) #Changed the equalclones_new here to just equalclones.
if(is.na(col) == FALSE){
if(word(daughter_name,2,3) %in% equalclones[,col]){
#It is part of equalclones in this sample. Then the other ones here should also have this mother.
print("It is in equalclones.")
eq <- 1
}
}
if(motherdaughter_order[1,2]=="16569_01B"){
print("Motherdaughtername")
print(mother_name)
print(daughter_name)}
k <- 2
for(k in 2:as.numeric(nrow(eventmatrix))){
if(eventmatrix[k,mother_column] == "1"){
eventmatrix[k,daughter_column] <- "1"
if(eq == 1){#Equalclones.
l <- 2
sub <- equalclones[equalclones[,col]!="0",col]
for(l in 2:length(sub)){
if(sub[1]=="16569_01B"){
print("EQ")
print(paste(sub[1],sub[l]))}
eq_column <- match(paste(sub[1],sub[l]),eventmatrix[1,])
eventmatrix[k,eq_column] <- "1"
l <- l+1
}
}
}
k <- k+1
}
}
}
i <- i+1
}
motherdaughterevent <- matrix(0,as.numeric(nrow(allocation_samples)),3)
motherdaughterevent[,1] <- allocation_samples[,1]
motherdaughterevent_new <- matrix(0,as.numeric(nrow(allocation_samples)),3) #The one used for the order.
j <- j+1
}
# eq_test <- matrix(0,nrow(equalclones),1)
# eq_test[1,1] <- "4240_15"
# eq_test[2,1] <- "Subclone_ A"
# eq_test[3,1] <- "Subclone_ D"
# equalclones <- cbind(equalclones,eq_test)
# print(equalclones)
assign("eventmatrix_part2", eventmatrix, envir=globalenv())
###################################################################
#Treating the case when we have many clones which have the same TC#
###################################################################
i <- 2
j <- 1
therows <- nrow(equalclones)
for(j in 1:as.numeric(ncol(equalclones))){ #Looping through the samples.
v <- equalclones[,j] #Extracting the column.
v <- cbind(v,matrix(0,nrow(equalclones),1)) #Adding a new column to it. Changed from "rowsofhundred" to nrow(equalclones).
clonenumber <- length(v[v!="0"])
v <- v[v[,1] !="0",]
if(clonenumber > 2){ #If we have more than 1 subclone within the sample with equal TC % who are placed inside each other.
k <- 2
for(k in 2:clonenumber){ #Giving alterations in all combinations.
mother <- paste(v[1,1],v[k,1])
l <- 2
for(l in 2:clonenumber){ #Looping through the clones.
if(k != l){
daughter <- paste(v[1,1], v[l,1])
mothercolumn <- match(mother,eventmatrix[1,])
daughtercolumn <- match(daughter,eventmatrix[1,])
m <- 2
for(m in 2:as.numeric(nrow(eventmatrix))){ #Looping through the events.
if(eventmatrix[m,mothercolumn] == "1"){
eventmatrix[m,daughtercolumn] <- "1"
}else if(eventmatrix[m,daughtercolumn] == "1"){ #Added 200821.
eventmatrix[m,mothercolumn] <- "1"
}
m <- m+1
}
}
l <- l+1
}
k <- k+1
}
}
j <- j+1
}
eventmatrix_new <- matrix(0,(as.numeric(nrow(eventmatrix))-1), (as.numeric(ncol(eventmatrix))-1)) #Skapar en ny h?ndelsematris d?r vi bara har med 1:orna och 0:orna.
eventmatrix_new <- eventmatrix[2:as.numeric(nrow(eventmatrix)),2:as.numeric(ncol(eventmatrix))]
eventmatrix_new <- as.matrix(eventmatrix_new)
rownames(eventmatrix_new) <- eventmatrix[2:as.numeric(nrow(eventmatrix)),1] #L?gger till radnamnen och kolumnnamnen till den nya matrisen.
colnames(eventmatrix_new) <- eventmatrix[1,2:as.numeric(ncol(eventmatrix))]
eventmatrix_new <- t(eventmatrix_new)
stop.time <- Sys.time()
print("Execution time")
print(stop.time-start.time)
return(eventmatrix_new)
}
#Splitting the input file.
splitdata <- function(file,name,ord){
k <- 1
s <- 1
if(missing(ord)==FALSE){
if(ord== TRUE){
file <- file[order(file[,1],file[,2],file[,3]),] #Ordering the matrix by tumor and then by sample and chromosome.
file[,2] <- trimws(file[,2], which = c("both", "left", "right"), whitespace = "[ \t\r\n]")
}
}
samples <- matrix(0,100,2)
rownames(samples) <- c(1:100)
file <- file[is.na(file[,1])==FALSE,]
for(k in 1:as.numeric(nrow(file))){ #Looping over all samples.
if(k == 1){ #The first position.
samples[s,1] <- k
rownames(samples)[s] <- file[k,1]
}
if(k != 1){ #Every other position.
if(file[k-1,1] != file[k,1]){
if(k != nrow(file)){
samples[s,2] <- k-1 #End position.
s <- s+1
samples[s,1] <- k
rownames(samples)[s] <- file[k,1]}}
}
if(k == nrow(file)){ #Last row.
if(file[k-1,1] != file[k,1]){
samples[s,2] <- k-1 #End position.
s <- s+1
samples[s,1] <- k
samples[s,2] <- k
rownames(samples)[s] <- file[k,1]
}else{
samples[s,2] <- k}
}
k <- k+1
}
i <- 1
for(i in 1:nrow(samples)){ #Localizing that particular tumor in the sample file.
if(samples[i,1] != 0){
tumorname <- match(name,rownames(samples))
}
i <- i+1
}
datasegment <- file[samples[tumorname,1]:samples[tumorname,2],] #Extracting the data for that particular tumor from the large segment file.
return(datasegment)
}
#Adding a stem to the data.
stem <- function(eventmatrix,co,root){
i = 1
j = 1
s = 1
class(eventmatrix) <- "numeric"
eventmatrix_new <- eventmatrix
if(root == "Stem"){
stemroot <- matrix(0, 1, as.numeric(ncol(eventmatrix_new)))
i = 1
for(i in 1:as.numeric(ncol(eventmatrix_new))){
if(sum(as.numeric(eventmatrix_new[,i]))/as.numeric(nrow(eventmatrix_new)) == 1){
stemroot[1,i] <- 1
i <- i+1
}
}
eventmatrix_new <- rbind(eventmatrix_new,stemroot)
rownames(eventmatrix_new)[nrow(eventmatrix_new)] <- "Stem"}
if(root == "Normal"){
M <- matrix(0, 1, as.numeric(ncol(eventmatrix_new)))
eventmatrix_new <- rbind(eventmatrix_new,M)
rownames(eventmatrix_new)[as.numeric(nrow(eventmatrix_new))] <- "Normal"
i <- i+1}
if(root == "None"){
eventmatrix_new <- eventmatrix_new
}
return(eventmatrix_new)
}
#Transform the file into phyDat format.
phydatevent <- function(excelfil){
patient.matrix <- as.matrix(excelfil)
patient.phydat <- phyDat(patient.matrix,type="USER",levels=c(0,1),ambiguity='0')
return(patient.phydat)
}
#Maximum Likelihood
ml_tree <- function(Eventmatrix,root) {
dm_h <- dist.hamming(Eventmatrix)
starting_tree <- NJ(dm_h)
starting_tree <- root(starting_tree, outgroup = root,resolve.root = TRUE)
Lf <- pml(starting_tree, Eventmatrix) #Obtaining an object of class pml
Lf_JC <- optim.pml(Lf, model = "JC", optEdge = TRUE)
return(Lf_JC)
}
#Maximum parsimony
mp_tree <- function(Eventmatrix,root){
MP_tree_pratchet <- pratchet(Eventmatrix, start = NULL, method = "fitch", maxit = 2000, k = 10, #Funktionen anv?nder sig av Fithchs algoritm. Pratchet = p ratchett (1999).
trace = 1, all = FALSE, rearrangements = "TBR",
perturbation = "ratchet") #Den ger det b?sta tr?det den funnit. Minimerar parsimony score.
MP_tree_pratchet <- root(MP_tree_pratchet, outgroup = root,resolve.root = TRUE)
treeRatchet <- acctran(MP_tree_pratchet, Eventmatrix) #Gives us the tree with an edge length fulfilling the acctran criterion.
return(treeRatchet)
}
#Visualising the MP-tree.
MP_treeplot <- function(MP_tree,limitmp,col){
if(col=="col"){
branches <- list(certain_branches = c(certainty[certainty[,2]=="1 solution",1],"Stem","Normal"),
uncertain_branches = certainty[certainty[,2]=="> 1 solution",1])
EM_mptree <- groupOTU(MP_tree,branches)
EM_testmp <- ggplot(EM_mptree) + geom_tree(size=1) + geom_tiplab(size=4,aes(color = factor(group)))
EM_testmp <- EM_testmp + theme_tree() + limitmp+
scale_color_manual(values=c(certain_branches = "#FC4E07",uncertain_branches="darkgreen"))+
theme(plot.title = element_text(hjust = 0.5, size = (14), color = "black"),legend.position = "none")
print(EM_testmp)
}else{
EM_testmp <- ggplot(MP_tree) + geom_tree() + geom_tiplab(size=4, color = "black") #+ geom_treescale(width = 1)
EM_testmp <- EM_testmp + theme_tree() + limitmp+theme(plot.title = element_text(hjust = 0.5, size = (14), color = "black"))
print(EM_testmp)
}
return(EM_testmp)
}
#Visualising the ML-tree.
ML_treeplot <- function(ML_tree,limitml,col){
if(col=="col"){
branches <- list(certain_branches = c(certainty[certainty[,2]=="1 solution",1],"Stem","Normal"),
uncertain_branches = certainty[certainty[,2]=="> 1 solution",1])
EM_mltree <- groupOTU(ML_tree$tree,branches)
EM_mltree <- ggplot(EM_mltree) + geom_tree(size=1) + geom_tiplab(size=4,aes(color = factor(group)))
EM_mltree <- EM_mltree + theme_tree() + limitml+
scale_color_manual(values=c(certain_branches = "#FC4E07",uncertain_branches="darkgreen"))+
theme(plot.title = element_text(hjust = 0.5, size = (14), color = "black"),legend.position = "none")
print(EM_mltree)
}else{
EM_mltree <- ML_tree$tree
EM_mltree <- ggplot(EM_mltree) + geom_tree() + geom_tiplab(size=4, color = "black") #+ geom_treescale(width = 1)
EM_mltree <- EM_mltree + theme_tree() + limitml+theme(plot.title = element_text(hjust = 0.5, size = (14), color = "black"))
print(EM_mltree)
}
return(EM_mltree)}
#Making new subclones.
subclones <- function(EM_test,file_samples_subclones,root,possible_mothers,cutoff,names){
if(missing(root)==TRUE){root <- "Normal"} #The default is to root the tree in a normal cell.
EM_newnames <- unique(EM_test) #Finding all unique rows in the EM i.e. all subclones that have different sets of mutations.
clonenames_new <- matrix(0,(as.numeric(nrow(EM_newnames))*2),500) #Creating a new matrix that will contain the new subclone names and which former subclones it includes.
samples_all <- t(as.matrix(unique(datasegment[,2]))) #A matrix containing all unique samples.
#samples_all <- t(as.matrix(unique(file_samples_subclones[,2]))) #A matrix containing all unique samples.
samples <- t(as.matrix(samples_all[samples_all != "ALL"]))
sampleTC <- matrix(0,1,ncol(samples))
sampleTC[1,1:ncol(samples)] <- "100"
i <- 1
l <- 2
k <- 1
for(i in 1:as.numeric(nrow(EM_newnames))){ #Looping through each of the unique subclones.
uniquenamerow <- match(EM_newnames[i,1],file_samples_subclones[,13]) #Finding the position of the subclone.
uniquenameTC <- file_samples_subclones[uniquenamerow,11] #Finding the TC of the subclone.
j <- 1
for(j in 1:as.numeric(nrow(EM_test))){ #Every unique subclone is to be compared with the others.
if(all(EM_newnames[i,] == EM_test[j,]) == TRUE){ #They have to include the same events.
clonenames_new[k,l] <- rownames(EM_test)[j] #Saving the subclone name to the matrix.
if(rownames(EM_test)[j] != "ALL"){
column <- match(word(rownames(EM_test)[j],1),sample_clone_matrix[1,])
row <- match(word(rownames(EM_test)[j],2,3),sample_clone_matrix[,column])
theTC <- sample_clone_matrix[row,(column+1)] #Finding the TC for the subclone.
}else{theTC <- "100"}
clonenames_new[(k+1),l] <- theTC #Saving the TC below its subclone name in the matrix.
l <- l+1
}
j <- j+1
}
l <- 2
k <- k + 2
i <- i+1
}
m <- 1
for(m in 1:(as.numeric(nrow(clonenames_new))/2)){ #Calculating the mean of all of the subclones within the new subclones.
#print(sum(as.numeric(clonenames_new[2*m,2:ncol(clonenames_new)])))
clonenames_new[2*m,1] <- mean(as.numeric(clonenames_new[2*m,clonenames_new[2*m,] != 0]))
clonenames_new[(2*m-1),1] <- mean(as.numeric(clonenames_new[2*m,clonenames_new[2*m,] != 0]))
m <- m+1
}
#Giving the new subclone names. The order is determined based on the subclones' mean TC:s.
clonenames_new_order <- clonenames_new[order(as.numeric(clonenames_new[,1]), decreasing = TRUE),] #Ordering the subclones based on their TC.
if(nrow(clonenames_new)/2 < 20){
newnames <- c("Subclone A", "Subclone B","Subclone C","Subclone D","Subclone E","Subclone F","Subclone G","Subclone H","Subclone I","Subclone J","Subclone K","Subclone L","Subclone M","Subclone N","Subclone O","Subclone P", "Subclone Q","Subclone R","Subclone S","Subclone T","Subclone U","Subclone V","Subclone X","Subclone Y","Subclone Z",
"Subclone ZA","Subclone ZB","Subclone ZC", "Subclone ZD","Subclone ZE", "Subclone ZF", "Subclone ZG", "Subclone ZH", "Subclone ZI", "Subclone ZJ", "Subclone ZK", "Subclone ZL", "Subclone ZM", "Subclone ZN", "Subclone ZO", "Subclone ZP", "Subclone ZQ", "Subclone ZR", "Subclone ZS", "Subclone ZT", "Subclone ZU", "Subclone ZV", "Subclone ZX","Subclone ZY", "Subclone ZZ",
"Subclone ZZA","Subclone ZZB","Subclone ZZC", "Subclone ZZD","Subclone ZZE", "Subclone ZZF", "Subclone ZZG", "Subclone ZZH", "Subclone ZZI", "Subclone ZZJ", "Subclone ZZK", "Subclone ZZL", "Subclone ZZM", "Subclone ZZN", "Subclone ZZO", "Subclone ZZP", "Subclone ZZQ", "Subclone ZZR", "Subclone ZZS", "Subclone ZZT", "Subclone ZZU", "Subclone ZZV", "Subclone ZZX","Subclone ZZY", "Subclone ZZZ",
"Subclone ZZZA","Subclone ZZZB","Subclone ZZZC", "Subclone ZZZD","Subclone ZZZE", "Subclone ZZZF", "Subclone ZZZG", "Subclone ZZZH", "Subclone ZZZI", "Subclone ZZZJ", "Subclone ZZZK", "Subclone ZZZL", "Subclone ZZZM", "Subclone ZZZN", "Subclone ZZZO", "Subclone ZZZP", "Subclone ZZZQ", "Subclone ZZZR", "Subclone ZZZS", "Subclone ZZZT", "Subclone ZZZU", "Subclone ZZZV", "Subclone ZZZX","Subclone ZZZY", "Subclone ZZZZ")
}else{
newnames <- c(seq(1:1000))
}
if(missing(names)==FALSE){
if(names=="numbers"){
newnames <- c(seq(1:1000))
}else if(names=="letters"){
newnames <- c("A","B","C","D","E","F","G","H","I","J","K","L","M","N","O","P","Q","R","S","T","U","V","X","Y","Z","ZA","ZB","ZC","ZD","ZE","ZF","ZG","ZH","ZI","ZJ","ZK","ZL","ZM","ZN","ZO","ZP","ZQ","ZR","ZS","ZT","ZU","ZV","ZX","ZY","ZZ",
"AAA","BBB","CCC","DDD","EEE","FFF","GGG","HHH")
}else if(names=="subclone"){
newnames <- c("Subclone A", "Subclone B","Subclone C","Subclone D","Subclone E","Subclone F","Subclone G","Subclone H","Subclone I","Subclone J","Subclone K","Subclone L","Subclone M","Subclone N","Subclone O","Subclone P", "Subclone Q","Subclone R","Subclone S","Subclone T","Subclone U","Subclone V","Subclone X","Subclone Y","Subclone Z",
"Subclone ZA","Subclone ZB","Subclone ZC", "Subclone ZD","Subclone ZE", "Subclone ZF", "Subclone ZG", "Subclone ZH", "Subclone ZI", "Subclone ZJ", "Subclone ZK", "Subclone ZL", "Subclone ZM", "Subclone ZN", "Subclone ZO", "Subclone ZP", "Subclone ZQ", "Subclone ZR", "Subclone ZS", "Subclone ZT", "Subclone ZU", "Subclone ZV", "Subclone ZX","Subclone ZY", "Subclone ZZ",
"Subclone ZZA","Subclone ZZB","Subclone ZZC", "Subclone ZZD","Subclone ZZE", "Subclone ZZF", "Subclone ZZG", "Subclone ZZH", "Subclone ZZI", "Subclone ZZJ", "Subclone ZZK", "Subclone ZZL", "Subclone ZZM", "Subclone ZZN", "Subclone ZZO", "Subclone ZZP", "Subclone ZZQ", "Subclone ZZR", "Subclone ZZS", "Subclone ZZT", "Subclone ZZU", "Subclone ZZV", "Subclone ZZX","Subclone ZZY", "Subclone ZZZ",
"Subclone ZZZA","Subclone ZZZB","Subclone ZZZC", "Subclone ZZZD","Subclone ZZZE", "Subclone ZZZF", "Subclone ZZZG", "Subclone ZZZH", "Subclone ZZZI", "Subclone ZZZJ", "Subclone ZZZK", "Subclone ZZZL", "Subclone ZZZM", "Subclone ZZZN", "Subclone ZZZO", "Subclone ZZZP", "Subclone ZZZQ", "Subclone ZZZR", "Subclone ZZZS", "Subclone ZZZT", "Subclone ZZZU", "Subclone ZZZV", "Subclone ZZZX","Subclone ZZZY", "Subclone ZZZZ")
}
}
i <- 1
s <- 1
for(i in 1:(nrow(clonenames_new_order)/2)){ #Looping through all of the new subclones and giving them their new names.
#print(i)
if(clonenames_new_order[2*i-1,2] != "ALL"){
clonenames_new_order[2*i-1,1] <- newnames[s]
s <- s+1
}else{
clonenames_new_order[2*i-1,1] <- "Stem"
clonenames_new_order[2*i-1,2:(ncol(samples)+1)] <- samples
clonenames_new_order[2*i,1] <- "100"
clonenames_new_order[2*i,2:(ncol(samples)+1)] <- sampleTC
rowofall <- match("ALL",rownames(EM_newnames))
rownames(EM_newnames)[rowofall] <- "Stem"
}
if(clonenames_new_order[2*i-1,2] == "Normal"){
clonenames_new_order[2*i-1,1] <- "Normal"
}
i <- i+1
}
#Adding an "ALL" cell to the clonenames_new_order matrix in the cases where we only have 2 subclones. Otherwise we will not be able to construct any phylogenetic trees.
if(as.numeric(nrow(clonenames_new_order))/2 < 2){ #Changed it to 2 rather than 3.
ALL <- matrix(0,2,ncol(clonenames_new_order))
ALL[1,1] <- "ALL"
ALL[1,2:(ncol(samples)+1)] <- samples
ALL[2,1] <- "100"
ALL[2,2:(ncol(samples)+1)] <- sampleTC
clonenames_new_order <- rbind(ALL,clonenames_new_order)
print("Warning message: Your dataset only contain two subclones. An ALL subclone has been added in order to be able to reconstruct a phylogenetic tree. This has the same events as Stem.")
}
assign("clonenames_new_order",clonenames_new_order,envir = globalenv())
#Creating the new event matrix with the new subclones.
i <- 1
EM_saved <- EM_newnames
for(i in 1:nrow(EM_newnames)){
therow <- which(clonenames_new_order == rownames(EM_newnames)[i], arr.ind = T)
therow <- therow[1]
rownames(EM_newnames)[i] <- clonenames_new_order[therow,1]
i <- i+1
}
EM_test_newnames <- EM_newnames
#root <- "Stem"
#root <- "Normal"
if(root != "Stem"){
EM_test_newnames <- stem(EM_test_newnames,stem_co,root) #Adding the root to the event matrix.
allrow <- match("Stem",rownames(EM_test_newnames))
if(is.na(allrow) == TRUE){
allrow <- match("ALL",rownames(EM_test_newnames))
}
thesumofstem <- sum(EM_test_newnames[allrow,])
kvoten <- thesumofstem/(ncol(EM_test_newnames))
#print(thesumofstem)
#print(ncol(EM_test_newnames))
if(kvoten < 0.5){
print("Warning message: The stem is very long compared to the entire data set. Maybe you should root the tree in the stem instead of a Normal cell?")
print("Warning message: If the normal cell is too different from the subclones, the plot viewer might show a graph where the tip labels drift off from the tips")
}
}
#else{EM_test_newnames <- stem(EM_test_newnames,stem_co,root) } #Adding the root to the event matrix.
#Multiple solutions?
#Placing all possible mothers for each subclone in a single row for each biopsy.
possible_mothers <- possible_mothers_new
compr <- possible_mothers[2:nrow(possible_mothers),]
i <- 1
for(i in 1:ncol(possible_mothers)){
nonzero <- compr[possible_mothers[2:nrow(possible_mothers),i]!="0",i]
if(length(nonzero)!=0){
possible_mothers[2:(length(nonzero)+1),i] <- nonzero
}
i <- i+1
}
i <- 1
for(i in 1:ncol(possible_mothers)){
if(possible_mothers[1,i]=="0"){ #This is an extra column with possible mothers.
if(possible_mothers[2,i-1]=="0"){
possible_mothers[2:nrow(possible_mothers),i-1] <- possible_mothers[2:nrow(possible_mothers),i]
}else{
possible_mothers[3:nrow(possible_mothers),i-1] <- possible_mothers[2:(nrow(possible_mothers)-1),i]
}
}
i <- i+1
}
possible_mothers_compressed <- possible_mothers[,possible_mothers[1,]!= "0"]
#View(possible_mothers)
#Removing mothers that are not possible in all samples.
i <-1
for(i in 1:ncol(possible_mothers_compressed)){
pos <- which(word(possible_mothers_compressed[1,i],2,3) == word(possible_mothers_compressed[1,],2,3))
same_clone <- as.matrix(possible_mothers_compressed[,pos])
#print("Same_clone")
#print(as.matrix(same_clone))
tbl_clone <- table(same_clone[2:nrow(same_clone),])
tbl_clone <- tbl_clone[tbl_clone<ncol(same_clone)]
if(length(tbl_clone)>0){
j <- 1
for(j in 1:length(pos)){
if(length(possible_mothers_compressed[possible_mothers_compressed[,pos[j]]%in%names(tbl_clone),pos[j]])!=0){ #At least one should be removed.
k <- 2
for(k in 2:nrow(possible_mothers_compressed)){
if(possible_mothers_compressed[k,pos[j]]%in%names(tbl_clone)){
possible_mothers_compressed[k:(nrow(possible_mothers_compressed)-1),pos[j]] <- possible_mothers_compressed[(k+1):nrow(possible_mothers_compressed),pos[j]]
}
k <- k+1
}
#possible_mothers_compressed[possible_mothers_compressed[,pos[j]]%in%names(tbl_clone),pos[j]] <- "0"
}
j <- j+1
}
}
i <- i+1
}
#View(possible_mothers_compressed)
#Sometimes we do not find a mother possible in all samples just because in some samples we have equalclones.
i <- 1
for(i in 1:ncol(possible_mothers_compressed)){
if(possible_mothers_compressed[2,i]=="0"){
pos <- match(possible_mothers_compressed[1,i],theonlymothers[1,])
if(is.na(pos)==FALSE){
possible_mothers_compressed[2,i] <- theonlymothers[2,pos]
}
}
i <- i+1
}
#View(possible_mothers_compressed)
#Creating a matrix in which I tell which subclones only have one position and which have multiple.
i <- 1
s <- 1
certainty <- matrix(0,ncol(possible_mothers_compressed),4)
for(i in 1:ncol(possible_mothers_compressed)){
name <- possible_mothers_compressed[1,i]#word(possible_mothers_compressed[1,i],2,3)
pos <- which(name == clonenames_new_order, arr.ind = T)
certainty[s,4] <- clonenames_new_order[pos[1],1] #The new subclone name in the phylogeny.
if(name%in%certainty[,1] == FALSE){
certainty[s,1] <- name #The cluster name with biopsy name.
nr <- length(which(possible_mothers_compressed[,i]!="0"))-1
certainty[s,2] <- nr #The number of solutions.
if(nr == 1){
certainty[s,3] <- "1 solution"
}else{
certainty[s,3] <- "> 1 solution"
}
s <- s+1
}
i <- i+1
}
certainty_all <- certainty[certainty[,1]!=0,]
# certainty <- unique(certainty[,4:3])
# View(certainty)
assign("certainty_all", certainty_all, envir=globalenv())
i <- 1
s <- 1
certainty <- matrix(0,ncol(possible_mothers_compressed),2)
for(i in 1:ncol(possible_mothers_compressed)){
name <- possible_mothers_compressed[1,i]
pos <- which(name == clonenames_new_order, arr.ind = T)
name <- clonenames_new_order[pos[1],1]
#print(name)
if(name%in%certainty[,1] == FALSE){
certainty[s,1] <- name
nr <- length(which(possible_mothers_compressed[,i]!="0"))-1
certainty[s,2] <- nr
if(nr == 1){
certainty[s,2] <- "1 solution"
}else{
certainty[s,2] <- "> 1 solution"
}
s <- s+1
}else{
nr <- length(which(possible_mothers_compressed[,i]!="0"))-1
certainty[s,2] <- nr
if(nr != 1){
row <- match(name,certainty[,1])
certainty[row,2] <- "> 1 solution"
}
}
i <- i+1
}
certainty <- certainty[certainty[,1]!="0",]
assign("certainty", certainty, envir=globalenv())
EM <- EM_test_newnames
if(as.numeric(ncol(possible_mothers_compressed))==length(possible_mothers_compressed[2,possible_mothers_compressed[3,]=="0"])){
print("This is the only solution")
}else{
x <- readline("There are multiple solutions. Do you want to see the suggested tree or another? Print suggested or another.")
if(x == "another"){
print("Outputting a suboptimal solution!")
#Extracting the spaces we have left.
space <- matrix(0,10,(nrow(EM)-1))
space[1,] <- rownames(EM)[1:(nrow(EM)-1)]
#Constructing a matrix with the subclones placed in each subclone.
i <- 1
for(i in 1:ncol(space)){
row <- 2
pos <- match(space[1,i],rownames(EM))
j <- 1
for(j in 1:(nrow(EM)-1)){
if(space[1,i]!=rownames(EM)[j]){
diff <- (EM[i,]-EM[j,])
if(sign(sum(diff>0)) != sign(sum(diff<0))){
#print(space[1,i])
#print(rownames(EM)[j])
if(rownames(EM)[j] %in% space[,i] == FALSE && sum(diff) < 0){
space[row,i] <- rownames(EM)[j]
row <- row+1
}
}
}
j <- j+1
}
i <- i+1
}
#Removing clones within clones in the same column.
i <- 1
for(i in 1:ncol(space)){
#print("Column")
#print(i)
j <- 2
for(j in 2:nrow(space)){
#print("Row")
#print(j)
clone <- space[j,i]
#print(clone)
if(clone != "0"){
pos <- match(clone,space[1,])
#print(pos)
if(i != pos){
present <- as.matrix(space[2:nrow(space),i]%in%space[2:nrow(space),pos])
k <- 1
#print(space[2:nrow(space),i])
#print(space[2:nrow(space),pos])
#print(present)
for(k in 1:nrow(present)){
if(present[k,1]==TRUE){
#print(space[k+1,i])
space[k+1,i] <- "0"
}
k <- k+1
}
}
}
j <- j+1
}
i <- i+1
}
#Finding out the size of of each level without other clones in them.
biopsies <- unique(file_samples_subclones[,2])
biopsies <- biopsies[2:length(biopsies)]
biopsy_space <- matrix(0,(length(biopsies)+1),(ncol(space)+1))
biopsy_space[1,2:ncol(biopsy_space)] <- space[1,]
biopsy_space[2:nrow(biopsy_space),1] <- biopsies
i <- 1
for(i in 1:ncol(space)){
pos <- match(space[1,i],clonenames_new_order[,1]) #Row. Mother.
j <- 2
for(j in 2:nrow(biopsy_space)){
col <- match(biopsy_space[j,1],word(clonenames_new_order[pos,],1))
if(is.na(col)==FALSE){ #It might not be present in some samples.
biopsy_space[j,i+1] <- clonenames_new_order[pos+1,col]
}
j <- j+1
}
i <- i+1
}
biopsy_space_base <- biopsy_space #Otherwise we get wrong results when subtracting columns further on in the loop.
#View(biopsy_space_base)
if(missing(cutoff)==TRUE){
cutoff <- 30
}
#Finding out the remaining space at each level.
i <- 2
#print("I")
for(i in 2:ncol(biopsy_space)){
#print(i)
j <- 2
for(j in 2:nrow(space)){
#print("J")
#print(j)
col <- match(space[j,i-1],biopsy_space[1,])
if(space[j,i-1] != "0" && space[j,i-1] != 0){
#print(biopsy_space[1,i]) #Mother.
#print(biopsy_space[1,col]) #Daughter
#print(as.numeric(biopsy_space[2:nrow(biopsy_space),i])) #Motherspace.
#print(as.numeric(biopsy_space_base[2:nrow(biopsy_space_base),col])) #Daughterspace.
biopsy_space[2:nrow(biopsy_space),i] <- (as.numeric(biopsy_space[2:nrow(biopsy_space),i])-as.numeric(biopsy_space_base[2:nrow(biopsy_space_base),col]))
if(length(biopsy_space[as.numeric(biopsy_space[2:nrow(biopsy_space),i]) < 0,i]) != 0){ #There is negative numbers.
#print("Negative")
row <- which(as.numeric(biopsy_space[2:nrow(biopsy_space),i])<2)
biopsy_space[row+1,i] <- "0"
pos <- which(biopsy_space[1,i]==space,arr.ind=TRUE)
pos <- pos[pos[,1]!=1,]
mothermother <- space[1,as.numeric(pos[2])]
biopsy_space[row+1,as.numeric(pos[2])+1] <- as.numeric(biopsy_space[row+1,as.numeric(pos[2])+1])-as.numeric(biopsy_space_base[row+1,col])#Removing this space from the mothermother.
}
}
j <- j+1
}
i <- i+1
}
#Finding out which clusters can be reshuffled and where they can be placed.
#print("Shuffle")
#print(biopsy_space)
shuffle <- matrix(0,10,3)
s <- 1
i <- 1
for(i in 2:ncol(biopsy_space_base)){
#print("Här")
b <- length(biopsy_space_base[biopsy_space_base[,i]!="0",i])-1 #Biopsies in which it exist.
#print(b)
reduced <- biopsy_space_base[biopsy_space_base[,i]!="0",i] #Biopsies in which it exist.
#print("reduced")
#print(reduced)
#Finding out in how many biopsies the event is < 30 %. Diff is the difference between this number and the total number of biopsies.
diff <- (length(which(as.numeric(reduced[2:length(reduced)])<cutoff))-b) #Will be 0 if this subclone is of size < 30 in all samples.
#print("old")
#print(biopsy_space_base[1,i])
#print(as.numeric(biopsy_space_base[2:nrow(biopsy_space_base),i]))
#print(as.numeric(biopsy_space_base[2:nrow(biopsy_space_base),i])<cutoff)
#print(which(as.numeric(biopsy_space_base[2:nrow(biopsy_space_base),i])<cutoff))
#print(diff)
if(diff == 0){ #The event is below the cutoff in all samples.
#print("Will be shuffled")
#print(biopsy_space_base[1,i])
shuffle[s,1] <- biopsy_space_base[1,i] #Saving the ones that will be shuffled in a matrix.
pos <- which(space==biopsy_space_base[1,i],arr.ind=TRUE)
pos <- pos[pos[,1]!=1,] #Removing the cases where it is in the first row.
biopsy_space[2:nrow(biopsy_space),as.numeric(pos[2])+1] <- as.numeric(biopsy_space[2:nrow(biopsy_space),as.numeric(pos[2])+1])+as.numeric(biopsy_space[2:nrow(biopsy_space),i]) #Adding the space again.
p <- which(biopsy_space_base[1,i]==space,arr.ind=TRUE)
p <- p[p[,1]!=1,]
shuffle[s,2] <- space[1,as.numeric(pos[2])] #Saving the old mother.
s <- s+1
}
i <- i+1
}
#We have now chosen the ones to be shuffled.
#Looking for where it could be placed.
#print("Shuffle")
#print(shuffle)
shuffle <- t(as.matrix(shuffle[shuffle[,1]!= "0",]))
shuffle <- t(as.matrix(shuffle[sample(nrow(shuffle)),])) #Randomly shuffling the ones that should be relocated.
space_new <- space
i <- 1
#print(nrow(shuffle))
for(i in 1:nrow(shuffle)){
newspace <- biopsy_space
col <- match(shuffle[i,1],biopsy_space_base[1,]) #The position.
spaces <- biopsy_space[2:nrow(biopsy_space),2:ncol(biopsy_space)] #Extracting the spaces.
class(spaces) <- "numeric"
shuffled_clone_space <- as.matrix(biopsy_space_base[2:nrow(biopsy_space_base),col])
class(shuffled_clone_space) <- "numeric"
room <- sweep(spaces,1,shuffled_clone_space, FUN="-") #The spaces left if we place this clone in that level.
newspace[2:nrow(newspace),2:ncol(newspace)] <- room #The spaces left if we place this clone in that level.
neg <- which(room<0,arr.ind=TRUE) #Gives us the columns not possible.
pos <- c(1:ncol(room))[c(1:ncol(room))%in%neg[,2] == FALSE]
possible <- biopsy_space[,pos+c(rep(1,length(pos)))]
#print(possible)
possible <- as.matrix(possible[,possible[1,]!=shuffle[i,1]]) #It should not be placed in itself.
#print(possible)
#Randomly choosing a new position.
chosen <- t(as.matrix(possible[,sample(ncol(possible),1)]))
shuffle[i,3] <- chosen[1,1] #Saving the new mother.
#print(chosen)
col_m <- match(chosen[1,1],biopsy_space[1,])
biopsy_space[,col_m] <- newspace[,col_m]
u <- which(space == shuffle[i,1],arr.ind=TRUE)
u <- u[u[,1]!=1,]
#print(u)
space_new[u[1],u[2]] <- "0"
#print(space_new[match(chosen[1,1],space_new[1,]),])
lgh <- length(space_new[space_new[match(chosen[1,1],space_new[1,])]!="0",])
#print(lgh)
#print("test")
#print(space_new[,match(chosen[1,1],space_new[1,])])
#print(space_new[,match(chosen[1,1],space_new[1,])]!="0")
#print(space_new[space_new[,match(chosen[1,1],space_new[1,])]!="0",match(chosen[1,1],space_new[1,])])
space_new[length(space_new[space_new[,match(chosen[1,1],space_new[1,])]!="0",match(chosen[1,1],space_new[1,])])+1,match(chosen[1,1],space_new[1,])] <- shuffle[i,1]
#print("The chosen one.")
#print(chosen)
#print(biopsy_space)
i <- i+1
}
#print("The final shuffled")
#print(shuffle)
#Computing a new EM.
EM <- EM_test_newnames
shuffle <- t(as.matrix(shuffle[shuffle[,1]!="0",])) #Contains the ones that have been reshuffled.
shuffle_original <- shuffle
#print("Shuffle")
while(all(shuffle[,1]=="0")==FALSE){
#We will continue to randomly allocating the changed clones until all have been allocated.
i <- sample(nrow(shuffle),1)
#print(i)
daughter <- shuffle[i,1]
row_d <- match(shuffle[i,1],rownames(EM))
row_m_old <- match(shuffle[i,2],rownames(EM))
row_m_new <- match(shuffle[i,3],rownames(EM))
# print(row_d)
# print(row_m_old)
# print(row_m_new)
# print(shuffle[i,3]%in%shuffle[,1])
if(shuffle[i,3]%in%shuffle[,1]==FALSE){
# print("Inne")
# print(as.numeric(EM_test_newnames[row_d,2:ncol(EM_test_newnames)]))
# print(as.numeric(EM_test_newnames[row_m_old,2:ncol(EM_test_newnames)]))
# print(as.numeric(EM_test_newnames[row_m_new,2:ncol(EM_test_newnames)]))
EM[row_d,2:ncol(EM)] <- (as.numeric(EM_test_newnames[row_d,2:ncol(EM_test_newnames)])-as.numeric(EM_test_newnames[row_m_old,2:ncol(EM_test_newnames)])+as.numeric(EM_test_newnames[row_m_new,2:ncol(EM_test_newnames)]))
shuffle[i,1] <- "0" #Removing it
}else{
#The mother has not been allocated to its new place yet.
}
}
# print(shuffle)
# print(EM_test_newnames)
# print(EM)
# print(space)
# print(space_new)
# print(biopsy_space)
}else{
EM <- EM_test_newnames
}
}
i <- 1
for(i in 1:ncol(EM)){
colnames(EM)[[i]] <- paste(word(colnames(EM)[[i]],-1),word(colnames(EM)[[i]],1,-2))
i <- i+1
}
i <- 2
for(i in 2:nrow(overview)){
overview[i,1]<- paste(word(overview[i,1],-1),word(overview[i,1],1,-2))
i <- i+1
}
output <- list()
output[[1]] <- EM
output[[2]] <- clonenames_new_order
output[[3]] <- overview
return(output)
}
#Creating a distribution-plot.
distribution <- function(overview){
i <- 2
empty <- 0
for(i in 2:nrow(overview)){
name <- overview[i,1]
j <- 2
for(j in 2:ncol(overview)){
biopsy <- overview[1,j]
value <- as.numeric(overview[i,j])
if(value != 0){
df_el <- t(replicate(value,c(biopsy,name)))
if(empty == 0){
df <- df_el
empty <- 1
}else{
df <- rbind(df,df_el)
}
}
j <- j+1
}
i <- i+1
}
df <- as.data.frame(df)
# Plot
p <- ggplot(df, aes(y=V2, x=V1, fill=V2,height = stat(count))) +
geom_density_ridges(alpha=0.8, stat="binline",bins=(ncol(overview)-1),scale=0.8)+theme_ridges()+
theme(
legend.position="none",
panel.spacing = unit(0.1, "lines"),
strip.text.x = element_text(size = 8),
axis.text.x = element_text(angle = 45))+
scale_fill_viridis_d(direction = -1, guide = "none")+
xlab("") +
ylab("")
p
ggsave(p,filename= "Distribution.png",width = w,height = h)
return(p)
}
#Creating pies.
make_pie <- function(clonenames_new_order, root, samples, type, custom_col){
if(root == "Normal"){# && "Normal" %in% clonenames_new_order[1,1] == FALSE){
Normal <- matrix(0,2,ncol(clonenames_new_order)) #Adding the normal cell to the clonenames_new_order matrix.
Normal[1,1] <- "Normal"
Normal[1,2] <- "100"
Normal[2,1] <- "Normal cells"
Normal[2,2] <- "100"
clonenames_new_order <- rbind(Normal,clonenames_new_order)}
Subclones <- matrix(0,2,100)
pies <- list() #Creating a list for all pie data.
pie_images <- list()
pie_empty <- matrix(0,length(samples),2) #Creating empty pies.
pie_empty[,1] <- samples
pie_empty[,2] <- "0"
i <- 1 #The following loop will extract the size of the subclone in each sample.
for(i in 1:(nrow(clonenames_new_order)/2)){ #Looping through the new subclones.
j <- 1
s <- 1
for(j in 1:ncol(clonenames_new_order)){ #Looping through the samples in which the subclone exists.
if(clonenames_new_order[(2*i-1),j] != "0"){ #We should not add all of the columns with zeros.
if(j != 1){ #We're not in the first column. The data includes the subclones within the new subclone.
Subclones[1,s] <- word(clonenames_new_order[(2*i-1),j],1) #The sample.
Subclones[2,s] <- clonenames_new_order[(2*i),j] #The size of the subclone within that sample.
s <- s+1
}else{Subclones[1,s] <- clonenames_new_order[(2*i-1),j] #We're in the first position. This is the new subclone name.
Subclones[2,s] <- clonenames_new_order[(2*i),j] #The mean size of the subclone.
s <- s+1
}
}
j <- j+1
}
Subclones <- Subclones[,Subclones[1,] != "0"] #Removing the rows with zeros.
if(Subclones[1,1]!="Normal"){
Subclones <- distinct(data.frame(t(Subclones))) #Adding the vector to a list after removing rows that are equal. This contains all of the pie data needed.
}
pies[[i]] <- Subclones #Adding the vector to a list after removing rows that are equal. This contains all of the pie data needed.
Subclones <- matrix(0,2,100) #Resetting the matrix.
i <- i+1
}
assign("pies", pies, envir=globalenv())
image_names <- matrix(0,1,(nrow(clonenames_new_order)/2)) #Creating a vector that is to be used in order to save all of the file names.
unique_biopsies <- unique(datasegment[,2]) #Unique biopsies.
if(unique_biopsies[1]=="ALL"){ #Removing the ALL.
unique_biopsies <- unique_biopsies[2:length(unique_biopsies)]}
unique_biopsies <- c(c(unique_biopsies),c("Normal","Stem"))
#This part should be looped for each matrix in "pies".
j <- 1
#Custom_colors
blue <- c("#6bb5d8","#6fb9e7","#4d8dc6","#2c6f9a","#205575")
red <- c("#ed6d70","#ea5456","#e52421","#a71916")
yellow <- c("#f6c400","#ed8606","#e55514")
#grey <- c("#b9b8b8","#9d9d9c","#706f6f","#3c3c3b") In article.
green <- c("#add3a2","#6abfa4","#497f7a","#2c574a")
brown <- c("#ca9e67","#936037")
purple <- c("#d4bae0","#c9a0dc","#ae87d0","#7851a9","#522d80","#500691","#330066")
grey <- c("#b9b8b8","#9d9d9c","#8a8a8a","#706f6f","#595858","#3c3c3b","#212121")
#Create your own color matrix.
#custom_col <- t(as.matrix(c(blue[2],red[2],yellow[2],green[2],grey[6],grey[6])))
for(j in 1:length(pies)){ #Looping though all of the matrices in pies.
Subclone <- pies[j] #Extracting the matrix j representing the data for a particular subclone that is to be presented in the shape of pie charts.
Subclone <- as.matrix(as.data.frame(Subclone[1])) #Transforming it to a data frame.
#The subclone vector.
i <- 2
for(i in 2:nrow(Subclone)){ #Looping through the samples that the subclone are present in.
if(i == 2){
y <- as.vector(rep(Subclone[i,1],2)) #Extracting the first sample name and creating a vector where it appears two times.
a <- as.vector(rep(Subclone[i,2],2)) #Extracting the TC.
a[2] <- (100 - as.numeric(a[1])) #The first position will be the TC and the other one 100-TC and hence the other slice of the pie chart.
}else{
x <- as.vector(rep(Subclone[i,1],2))
b <- as.vector(rep(Subclone[i,2],2))
b[2] <- (100 -as.numeric(b[1]))
y <- c(y,x) #Combining the sample vectors.
a <- c(a,b)} #Combining the TC values. This gives a vector with all the TC values that is to be used when dividing the pie charts.
i <- i+1
}
#print(y)
#print(a)
#Creating the pie colors for this particular biopsy.
sp <- brewer.pal(11,"Spectral")
if(type=="col"){
colors_biopsies <- as.matrix(cbind(c("B1",sp[2],"white"),c("B2",sp[10],"white"),c("B3",sp[9],"white"),c("B4",sp[5],"white"),c("B5",sp[1],"white"),c("B6",sp[11],"white"),c("B7",sp[8],"white"),c("B8",sp[3],"white"),c("B9",sp[6],"white"),c("B10",sp[4],"white"),c("B11",sp[7],"white"),c("B12","#008080","white"),c("B13","#800000","white"),c("B14","#808080","white")))
#colors_biopsies <- as.matrix(cbind(c("B1","indianred1","white"),c("B2","#619CFF","white"),c("B3","#00BA38","white"),c("B4","#00BFC4","white"),c("B5","indianred1","white"),c("B6","#5fc400","white"),c("B7","#F564E3","white"),c("B8","#000485","white")))
if(length(unique_biopsies)>ncol(colors_biopsies)){
print("There are more samples than we have colors (11). Add your own colors or visualize without colors.")
}
colors_biopsies[1,1:length(unique_biopsies)]<- unique_biopsies
}else if(type=="nocol"){
colors_biopsies <- as.matrix(cbind(c("B1","indianred1","white"),c("B2","indianred1","white"),c("B3","indianred1","white"),c("B4","indianred1","white"),c("B5","indianred1","white"),c("B6","indianred1","white"),c("B7","indianred1","white"),c("B8","indianred1","white"),c("B9","indianred1","white"),c("B10","indianred1","white"),c("B11","indianred1","white"),c("B12","indianred1","white"),c("B13","indianred1","white"),c("B14","indianred1","white"),c("B15","indianred1","white")))
colors_biopsies[1,1:length(unique_biopsies)]<- unique_biopsies
}else if(type=="custom"){
names <- t(as.matrix(c(paste( c("B"), 1:as.numeric(length(unique_biopsies)), sep=""))))
white <- t(as.matrix(c(rep("white",length(unique_biopsies)))))
colors_biopsies <- rbind(names,custom_col,white)
colors_biopsies[1,1:length(unique_biopsies)]<- unique_biopsies
}else{
print("You have not chosen a correct color mode.")
}
print(colors_biopsies)
c <- 1
for(c in 1:(length(y)/2)){
column <- match(y[c+(c-1)],colors_biopsies[1,])
if(c != 1){
color_matrix <- c(c(color_matrix),c(colors_biopsies[2:3,column]))
}else{
if(Subclone[1,1] != "Normal"){
color_matrix <- colors_biopsies[2:3,column]
}else{
color_matrix <- colors_biopsies[2:3,1]
}
}
c <- c+1
}
print(color_matrix)
test <- data.frame(Subclone = y,
Names = c(rep(c("Sample 1","Sample 2"),length(y)/2)),
TC = as.numeric(a),colour = color_matrix) #Creating a data frame with the samples in which the subclone exist, sample names, the TC for each and the colors.
test$Subclone <- factor(test$Subclone, levels = unique(Subclone[2:nrow(Subclone),1]))
x <- ggplot(test, aes(x="", y = TC, group = Names, fill = colour)) +
geom_bar(width = 10, stat = "identity")+
geom_col(position = "fill")+scale_fill_identity()+facet_grid(.~Subclone)+
coord_polar("y", start=0) +theme_void()+#theme(strip.text.x = element_text(size = 200))+
theme(strip.background = element_blank(),
strip.text.x = element_blank(),
title = element_blank(),
axis.text = element_blank(),
axis.ticks = element_blank(),
panel.grid = element_blank(), legend.position="none")+
theme(plot.margin=unit(c(0,0,0,0),units = "lines"))
if(type=="nocol" && Subclone[1,1] != "Normal"){
x <- x+theme(strip.text.x = element_text(size = 200))
}
pie_images[[j]] <- x
names(pie_images)[j] <- Subclone[1,1]
pie_images[[j]]$Subclone <- Subclone[1,1]
#Changes the element_text. Standard is 200 for all images. When you have more samples you might need to change it.
w <- 49
#w <- 10*(as.numeric(nrow(Subclone))-1)
s <- 10
ggsave(x,filename=paste(Subclone[1,1],".png",sep=""),width = w,height = s) #Testade att strunta i vidgningen av bilden.
image_names[j] <- paste(Subclone[1,1],".png",sep="")
if(Subclone[1,1]=="Stem"){
x <- ggplot(test, aes(x="", y = TC, group = Names, fill = colour)) +
geom_col(position = "fill")+facet_grid(.~Subclone)+#scale_fill_manual(values=color_matrix)+
coord_polar("y", start=0) +theme_void()+labs("Samples")+
scale_fill_identity(guide="legend",labels=c(samples),breaks = colors_biopsies[2,1:length(samples)],name="Samples")+
guides(labels = guide_legend(override.aes = list(shape = 15)))+
theme(plot.margin=unit(c(0,0,0,0),units = "lines"))
plot(x)
if(type != "nocol"){
legend <- cowplot::get_legend(x)
ggsave(legend,filename="legend.pdf",width=8,height=10,units = "cm")}
}
j <- j+1
}
pieData <- list()
pieData[[1]] <- image_names
pieData[[2]] <- pie_images
return(pieData)
}
#Adding the pies.
pie_it <- function(Tree,pieData, offset, size,col){
image_names <- pieData[[1]]
pie_images <- pieData[[2]]
p <- Tree
#labels <- rownames(EM_dev[[1]])
labels <- as.matrix(p$data$label)
labels <- as.matrix(labels[!is.na(labels)])
positions <- matrix(0,length(rownames(EM_dev[[1]])),1) #Empty matrix in which the positions are to be saved. Changed from nrow(labels).
pie <- matrix(0,length(rownames(EM_dev[[1]])),1)
#Extracting the subclone that each image belongs to.
i <- 1
s <- 1
for(i in 1:length(image_names)){ #Looping through the image names.
thesubclone <- word(image_names[i],1,sep = ".png") #Extracting the subclone that each image belongs to.
thesubclone_pos <- match(thesubclone,labels)
pie_image <- match(thesubclone,names(pie_images))
positions[s,1] <- thesubclone_pos
pie[s,1] <- pie_image
s <- s+1
i <- i+1
}
d <- data.frame(node = positions,images = c(image_names),pie_add = c(pie))
View(d)
# image_names <- pieData[[1]]
# pie_images <- pieData[[2]]
# p <- Tree
# labels <- as.matrix(p$data$label)
# labels <- as.matrix(labels[!is.na(labels)])
# positions <- matrix(0,nrow(labels),1) #Empty matrix in which the positions are to be saved.
# pie <- matrix(0,nrow(labels),1)
#
# #Extracting the subclone that each image belongs to.
# i <- 1
# s <- 1
# for(i in 1:length(image_names)){ #Looping through the image names.
# thesubclone <- word(image_names[i],1,sep = ".png") #Extracting the subclone that each image belongs to.
# thesubclone_pos <- match(thesubclone,labels)
# pie_image <- match(thesubclone,names(pie_images))
# positions[s,1] <- thesubclone_pos
# pie[s,1] <- pie_image
# s <- s+1
# i <- i+1
# }
#
# d <- data.frame(node = positions,images = c(image_names),pie_add = c(pie))
#img <- readPNG("legend.png")
if(missing(offset)==TRUE){
offset <- 1
}
if(missing(size)==TRUE){
size <- 0.21
}
#img <- readPNG("legend.png")
#img <- image_read("legend.png")
new <- p %<+% d + geom_tiplab(aes(image=images), geom="image",offset = offset, size = size)
# if(col=="yes"){
# get_png <- function(filename) {
# grid::rasterGrob(png::readPNG(filename), interpolate = TRUE)
# }
# l <- get_png("legend.png")
# new <- new+annotation_custom(l,xmin = max(p$data$x), xmax = max(p$data$x)+1.5, ymin = 0, ymax = 3)
# }else if(col == "no"){
# print("No legend.")
# }
return(new)
}
#Making a heatmap.
tree_heatmap <- function(clonenames_new_order){
i <- 1
pie_EM <- matrix(0,length(clonenames_new_order),3)
s <- 1
for(i in 1:(nrow(clonenames_new_order)/2)){
#print(i)
part <- clonenames_new_order[(2*i-1):(2*i),]
#print(part)
samples <- unique(word(part[1,],1))
samples <- samples[2:(length(samples)-1)]
j <- 1
#print(samples)
for(j in 1:length(samples)){
pie_EM[s,1] <- part[1,1]
pos <- match(samples[j],word(part[1,],1))
pie_EM[s,2] <- word(part[1,pos],1)
pie_EM[s,3] <- part[2,pos]
s <- s+1
j <- j+1
}
i <- i+1
}
pie_EM <- pie_EM[pie_EM[,1]!="0",]
pie_df <- data.frame(subclone = pie_EM[,1], sample = pie_EM[,2], size = pie_EM[,3])
#Change the data frame into a matrix of the distribution.
i <- 1
pie_EM <- matrix(0,length(unique(pie_df$subclone)),length(unique(pie_df$sample)))
rownames(pie_EM) <- unique(pie_df$subclone)
colnames(pie_EM) <- unique(pie_df$sample)
for(i in 1:nrow(pie_df)){
row <- match(pie_df[i,1],rownames(pie_EM))
col <- match(pie_df[i,2],colnames(pie_EM))
pie_EM[row,col] <- as.numeric(pie_df[i,3])
i <- i+1
}
return(pie_EM)
}
#Overview --> Segment file
#ov_input <- function(data,name,method,ploidy){}
#Transforming a heat map to an event matrix.
df <- matrix(0,length(as.matrix(data)),11)
types <- c("Tumor ID","Samples","Chr","Start","End","Med LogR","VAF (TRS)","Type","Method","Cytoband/ Gene","Clone size (%)")
colnames(df) <- types
method <- "SNP-array"
name <- x
x <- x
ploidy <- 2
df[,1] <- name
i <- 6
s <- 1
for(i in 6:ncol(data)){ #Sample
j <- 1
for(j in 1:nrow(data)){ #Alteration
if(data[j,i]!="0"){
df[s,2] <- colnames(data)[i] #Sample
df[s,3] <- data[j,1] #Chr
df[s,4] <- data[j,2] #Start
df[s,5] <- data[j,3] #End
df[s,6] <- "NA" #Med LogR
df[s,7] <- "NA" #VAF (TRS)
B_allele <- str_split(data[j,4],"")[[1]][1]
A_allele <- str_split(data[j,4],"")[[1]][3]
if(as.numeric(B_allele) == ploidy && as.numeric(A_allele) == 0){
type <- "LOH"
}else if(as.numeric(B_allele)+as.numeric(A_allele)>ploidy){
type <- "Gain"
}else{type <- "Loss"}
df[s,8] <- type #Type
df[s,9] <- method #Method
df[s,10] <- data[j,4] #Cytoband/gene
df[s,11] <- data[j,i] #Clone size.
s <- s+1
}
}
i <- i+1
}
df <- df[df[,11]!="0",]
data <- df
######################
#Files to be analyzed#
######################
setwd("~/") #Set your working directory.
data <- load_matrix(filename="Segment_example.xlsx",sheetname ="Example_tumors") #Extracting the whole segment file including all of the tumors.
x <- "Tumor"
#Rule matrix. The first object is the mother that the second one the daughter it cannot have according to
#information we have from some source.
# rule <- matrix(0,3,3)
# rule[1,1] <- "ALL"#"17p13q12 LOSS (1+0)"
# rule[1,2] <- "17p13q12 GAIN (2+1)"
# rule[1,3] <- "Yes"
# colnames(rule) <- c("Mother","Daughter","Allowed or not")
#####################################
#Generating event matrices and trees#
#####################################
datatypes <- c("All")#c(unique(test[,9])) #These are your data types such as SNP-array, TDS, WGS, WES etc. Change this vector if you do not want all of them to be included.
event_co <- 10000
root <- "Normal"
datasegment <- splitdata(data,name=x,ord=TRUE) #Extracting the beginning and end position of each sample in the segment file. #Declare which tumor you want to analyze. Specified by the first column in your data set.
#Creating the event matrix.
#DEVOLUTION(file,eventcutoff,datatypes, rule, eps,truncate,names)
EM <- DEVOLUTION(datasegment,event_co,datatypes=c("All"), eps = 0.5,names="letters") #Creating an event matrix based on the segment file chosen.
#The final event matrix
EM_dev <- subclones(EM,file_samples_subclones,root = "Normal",possible_mothers,cutoff=30,names="letters") #The first element in this list is the new event matrix. The second one is used for making pie charts.
DB <- distribution(overview_stem)
plot(DB)
ggsave(DB,filename= "Distribution.png",width = 15,height = 15)
#Visualizing the trees without pies and saving them
EM_phy <- phydatevent(EM_dev[[1]]) #Transforming the EM to phyDat format.
EM_mptree <- mp_tree(EM_phy,root) #Constructing the maximum parsimony tree.
EM_mltree <- ml_tree(EM_phy,root) #Constructing the maximum likelihood tree.
limitmp <- xlim(c(0, 30)) #Here you can determine the limits for the graph for mp. 20
limitml <- xlim(c(0, 20)) #Here you can determine the limits for the graph for ml. 1.5
type <- "nocol"
Treemp <- MP_treeplot(EM_mptree,limitmp,col = type) #Illustrating the maximum parsimony tree.
Treeml <- ML_treeplot(EM_mltree,limitml,col = type) #Illustrating the maximum likelihood tree.
ggsave(Treemp,filename="PDX3_211102_mp.pdf",width=10,height=10)
######################
#Other ways to visualize the trees.
df <- EM_mltree$tree
df <- EM_mptree
library(viridis)
p <- ggtree(df)+
geom_tiplab(align=TRUE, linetype='dashed', linesize=.3)+ #Lines between the subclone name and end node.
#geom_tiplab()+ #Unmuting this and muting the row above instead places the subclonenames close to the end node.
geom_tippoint(aes(colour=label),size=4)+ geom_tree()+
scale_color_viridis_d("label")+
theme(legend.position='none')
p
ggsave(p,filename="Tree_one_211101_mp_nolines.png",width=12,height=10)
#Add a heat map of the EM next to the tree.
q <- gheatmap(p,EM_dev[[1]], offset=0.05, width=8,
colnames_angle=45, hjust=1,low="white",high="steelblue")+theme(legend.position="none")+
scale_y_continuous(expand = expansion(mult = c(0.2,0)))
q
ggsave(q,filename="Tree_one_EM_ml.png",width=27,height=14)
#If you want a specific order for the samples.
order_samples <- c("sample1","sample2")
class(order_samples) <- "character"
pie_EM_order <- df_pie[,match(order_samples,colnames(df_pie))]
q <- gheatmap(p,pie_EM_order, offset=0.1, width=5,
colnames_angle=45, hjust=1,low="white",high="steelblue")+theme(legend.position="none")+
scale_y_continuous(expand = expansion(mult = c(0.1,0)))
q
q <- gheatmap(p,pie_EM_order, offset=4, width=4,
colnames_angle=45, hjust=1,low="white",high="steelblue")+theme(legend.position="none")+
scale_y_continuous(expand = expansion(mult = c(0.1,0)))
q
#Add a heat map of the pies next to the tree.
df_pie<- tree_heatmap(clonenames_new_order)
q <- gheatmap(p,df_pie, offset=0.1, width=5,
colnames_angle=45, hjust=1,low="white",high="steelblue")+theme(legend.position="none")+
scale_y_continuous(expand = expansion(mult = c(0.1,0)))
q
ggsave(q,filename="Tree_one_mp_neworder_211123.png",width=12,height=10)
#Saving the event matrix and the clustering annotation of each event.
#It is basically an updated version of the input segment file with the clustering of events.
write.xlsx(as.data.frame(t(EM_dev[[1]])),"DEVOLUTION.xlsx",sheetName="Event matrix")
write.xlsx(clonenames_new_order[clonenames_new_order[,1]!="0",],append = TRUE,"DEVOLUTION.xlsx",sheetName="Pies")
write.xlsx(Clustering,append = TRUE, "DEVOLUTION.xlsx",sheetName = "Clustering") #Saving the data set that has been used in order to make the EM. It includes information about the subclonal belonging.
write.xlsx(as.data.frame(t(EM)),append = TRUE,"DEVOLUTION.xlsx",sheetName="Event matrix samples")
write.xlsx(as.data.frame(EM_dev[[3]]),append = TRUE,"DEVOLUTION.xlsx",sheetName="Overview")
s <- 10
#Creating pie charts and saving the final tree.
coltype <- "col" #Choose how you want your pies. nocol = Just red pie charts with a biopsy name above. col = colored pies. custom = create your own color scheme.
samples <- as.vector(unique(datasegment[datasegment[,2]!="ALL",2])) #Or just write it.
#EM_dev[[2]][8,6] <- "30"
pieData <- make_pie(EM_dev[[2]],root,samples,type=coltype) #Creates the pie charts.
pietree <- pie_it(Treemp,pieData,offset=1,size=0.21,col=coltype) #Adds pie charts to the tree. 0.21. Used 0.17 lately.
ggsave(pietree,filename=paste(x,"_211128",".pdf",sep=""),width = s,height = s) #RMS8_SNP_tree_ml
image_names <- pieData[[1]]
pie_images <- pieData[[2]]
p <- Tree
labels <- as.matrix(Treemp$data$label)
labels <- as.matrix(labels[!is.na(labels)])
positions <- matrix(0,nrow(labels),1) #Empty matrix in which the positions are to be saved.
pie <- matrix(0,nrow(labels),1)
#Extracting the subclone that each image belongs to.
i <- 1
s <- 1
for(i in 1:length(image_names)){ #Looping through the image names.
thesubclone <- word(image_names[i],1,sep = ".png") #Extracting the subclone that each image belongs to.
thesubclone_pos <- match(thesubclone,labels)
pie_image <- match(thesubclone,names(pie_images))
positions[s,1] <- thesubclone_pos
pie[s,1] <- pie_image
s <- s+1
i <- i+1
}
d <- data.frame(node = positions[1:19],images = c(image_names),pie_add = c(pie)[1:19])
new <- Treemp %<+% d + geom_tiplab(aes(image=images), geom="image",offset = 2, size = 0.17)
s <- 10
ggsave(new,filename=paste(x,"_col_mp",".pdf",sep=""),width = s,height = s) #RMS8_SNP_tree_ml
| /DEVOLUTION_1.1.R | no_license | NatalieKAndersson/DEVOLUTION | R | false | false | 233,140 | r | ###############################################################################
#-------------------------------DEVOLUTION------------------------------------#
###############################################################################
setwd("~/Läkarprogrammet/PhD/Projekt 1/Final_DEVOLUTION")
#Dependencies----
library("readxl") #Needed to load the data from the xlsx file.
library("xlsx") #Needed to save matrices into xlsx-files.
library("stringr") #Needed for using the function "word".
library("ape")
library("phangorn") #Needed to transform the EM into phyDat and make trees.
library("ggplot2") #Needed to visualize the trees.
library("ggtree")
library("ggimage") #Needed to insert the pies in the tree.
library("dplyr") #Needed for the distinct function in pie.it.
library("RColorBrewer") #Needed to add the colored pie charts.
library("ggridges") #Used to plot the distribution.
library("cowplot")
library("dbscan") #Clustering
#Start with pressing the little triangle to the left here to collapse all functions!
#Functions----
#Function extracting the data
load_matrix <- function(filename, sheetname) {
data <- as.data.frame(read_xlsx(filename, sheetname)) #Reading the xlsx file and saving it in the variable data.
subdata <- data[ c(1:nrow(data)), c(1:ncol(data)) ] #Extracting the part of the file we are interested in.
subdata <- subdata[is.na(subdata[,1])==FALSE,]
return(subdata)
}
#Function creating the eventmatrix
DEVOLUTION <- function(file,eventcutoff,datatypes, rule, eps,truncate,names){
start.time <- Sys.time()
if(missing(eventcutoff)==TRUE){
print("You have not chosen an event cutoff. Default 1 Mbp chosen.")
eventcutoff <- 1000000
}
all_cols <- ncol(file)
if(all_cols < 11){
print("There are missing columns!")
types <- c("Tumor ID","Samples","Chr","Start","End","Med LogR","VAF (TRS)","Type","Method","Cytoband/ Gene","Clone size (%)")
thematch <- match(colnames(file),types)
missing <- types[types%in%colnames(file)==FALSE]
print("This is missing. The algorithm will add it.")
print(missing)
file_new <- matrix(0,nrow(file),11)
colnames(file_new) <- types
cols_file <- colnames(file)
i <- 1
for(i in 1:11){
if(types[i]%in%missing){
file_new[,i] <- "NA"
}else{
col <- match(types[i],cols_file)
file_new[,i] <- file[,col]
}
i <- i+1
}
file <- file_new
}
################################
#Treating NA for TC in the file#
################################
i <- 1
for(i in 1:as.numeric(nrow(file))){
if(file[i,11] == "NA"){
if(file[i,2] == "ALL"){
file[i,11] <- "100"
}
if(file[i,2] != "ALL"){
file[i,] <- "0"
}
}
i <- i+1
}
###################################################################
#Removing events obtained with a method not specified by datatypes#
###################################################################
#If you choose yes we will not remove anything.
if(length(datatypes) > 1){
print("Only the following datatypes are included in the analysis.")
print(datatypes)
i <- 1
for(i in 1:as.numeric(nrow(file))){
if(file[i,9] %in% datatypes == FALSE){
file[i,] <- "0"
}
i <- i+1
}
}else{
if(datatypes == "All"){
print("All datatypes supplied in data are included in the analysis.")
}else{
print("The following datatype is included in the analysis")
print(datatypes)
i <- 1
for(i in 1:as.numeric(nrow(file))){
if(file[i,9] %in% datatypes == FALSE){
file[i,] <- "0"
}
i <- i+1
}
}
}
file <- as.matrix(file[file[,2] != "0",])
###########################
#Finding all unique events#
###########################
#This loop defines all events that we have in the dataset for a particular tumor.
#The events have to have the same name, be on the same chromosome and have breakpoints
#within a certain cutoff that is set on beforehand.
file_new <- file
i <- 1
versionnames <- c(paste( c("v"), 1:30, sep=""))
k <- 1
for(i in 1:nrow(file_new)){ #Choosing a row.
j <- 1
for(j in 1:nrow(file_new)){ #Choosing another row to compare it with.
if(i != j){
if(file_new[i,10] == file_new[j,10]){ #Comparing event names.
if(file_new[i,8] == file_new[j,8]){ #Comparing the type of event.
if(file_new[i,3]==file_new[j,3]){ #Are they on the same chromosome?
if(file_new[i,2] == "ALL"){ #If the event is a part of the stem they shall always be separate.
if(file_new[j,2] == "ALL"){
if(is.na(word(file[i,10],2)) == TRUE){ #If the one you compare with does not already have a version name.
file_new[j,10] <- paste(file_new[j,10],versionnames[k], sep = "") #Changing the name for the second version of the mutation.
k <- k+1
}else if(is.na(word(file[i,10],2)) == FALSE){
versionpos <- match(word(file[i,10],2),versionnames)
newversion <- versionpos+1
file_new[j,10] <- paste(file_new[j,10],versionnames[newversion], sep = "") #Changing the name for the second version of the mutation.
}
}}
if(file_new[i,2] != "ALL"){ #This part is only valid for non stem events.
if(abs(as.numeric(file_new[i,4]) - as.numeric(file_new[j,4])) > eventcutoff){ #If the events differ too much in genetic distance they are seen as two separate events.
file_new[j,10] <- paste(file_new[j,10],"v1", sep = "")} #Changing the name for the second version of the mutation.
else if(abs(as.numeric(file_new[i,5])-as.numeric(file_new[j,5])) > eventcutoff){ #The same but in the other direction.
file_new[j,10] <- paste(file_new[j,10],"v1", sep = "")}
}
}
}
}
}
j <- j+1
}
i <- i+1
}
for(i in 1:nrow(file_new)){ #Adding information to the events about which kind of alteration it is.
file_new[i,10] <- paste(file_new[i,10],file_new[i,8],file_new[i,3], sep = " ")
i <- i+1
}
un_s <- unique(file_new[,2])
samples <- un_s[un_s!="ALL"]
###########################
#Making an overview matrix#
###########################
samples <- as.matrix(unique(file_new[,2])) #Extracting all unique samples.
aberrations <- as.matrix(unique(file_new[,10])) #Extracting all unique events.
#Constructing a matrix with all samples and their TC for each of the unique events.
overview <- matrix(0,(length(aberrations)+1),(length(samples)+1))
overview[1,2:as.numeric(ncol(overview))] <- samples
overview[2:as.numeric(nrow(overview)),1] <- aberrations
i <- 1
for(i in 1:nrow(file_new)){ #Extracting all of the TC:s.
samplepos <- match(file_new[i,2],overview[1,])
aberrationpos <- match(file_new[i,10],overview[,1])
overview[aberrationpos,samplepos] <- file_new[i,11]
if(file_new[i,2] == "ALL"){
overview[aberrationpos,2:ncol(overview)] <- 100 #All samples should have 100 on the "ALL" events.
}
i <- i+1
}
#Do we have any stem at all?
if(overview[1,2] != "ALL"){
print("You do not have any declared stem event denoted ALL.")
allcolumn <- matrix(0,nrow(overview),1)
allcolumn[1,1] <- "ALL"
overview <- cbind(overview[,1],allcolumn,overview[,2:ncol(overview)])
}
#Treating cases where not all stem events have been declared.
i <- 2
firststem <- 1
for(i in 2:nrow(overview)){
stemornot <- (length(which(as.numeric(overview[i,2:ncol(overview)])>=90))-(as.numeric(ncol(overview))-2))
if(stemornot == 0){
#This is a stem event.
overview[i,2:ncol(overview)] <- 100 #Declaring it as a stem event.
#Now we have to declare it a stem in the file as well and remove it from the other ones.
if(firststem == 1){
row <- match(overview[i,1],file_new[,10])
stemmatrix <- t(as.matrix(file_new[row,]))
stemmatrix[1,2] <- "ALL"
stemmatrix[1,11] <- "100"
#stemmatrix[1,1] <- "Remove" #Temporary.
firststem <- 2
pos_stem <- as.numeric(which(overview[i,1]==file_new[,10])) #The positions in which the stem exists.
file_new[pos_stem,1] <- "Remove"
}else{
row <- match(overview[i,1],file_new[,10])
event <- t(as.matrix(file_new[row,]))
event[1,2] <- "ALL"
event[1,11] <- "100"
#event[1,1] <- "Remove" #Temporary.
stemmatrix <- rbind(stemmatrix,event)
pos_stem <- as.numeric(which(overview[i,1]==file_new[,10])) #The positions in which the stem exists.
file_new[pos_stem,1] <- "Remove"
}
}
if(i == nrow(overview)){
if(firststem == 1 && as.numeric(overview[2,2]) == 0){
print("There is no stem events in the data. Adding a fabricated stem.")
f_stem <- matrix(0,1,11)
f_stem[1,] <- c(unique(file_new[file_new[,1]!="Remove",1]),"ALL",1,1,1,"NA","NA","Stem","WES","Stem","100")
file_new <- rbind(f_stem,file_new)
overview <- rbind(overview[1,],as.vector(c("Stem",rep("100",(ncol(overview)-1)))),overview[2:nrow(overview),])
}else{
if(firststem!=1){
file_new <- rbind(stemmatrix,file_new)}
}
}
i <- i+1
}
# #We want to order the overview a bit.
overview_new <- matrix(0,nrow(overview),ncol(overview))
overview_new[1,] <- overview[1,]
sub <- overview[2:nrow(overview),]
ov_stem <- sub[as.numeric(sub[,2])==100,]
ov_notstem <- sub[as.numeric(sub[,2])!=100,]
overview_new[2:nrow(overview_new),] <- rbind(ov_stem,ov_notstem)
overview <- overview_new
assign("file_new_stem", file_new, envir=globalenv())
file_new <- file_new[file_new[,1]!="Remove",]
assign("overview_stem", overview, envir=globalenv())
assign("file_new_removed", file_new, envir=globalenv())
if(firststem == 2){
assign("stemmatrix", stemmatrix, envir=globalenv())}
#########################################################
#Including events present in a certain number of samples#
#########################################################
#View(file_new)
if(missing(truncate)==FALSE){
print("You have chosen to truncate your data. We will now remove events that are present in less than this many samples:")
print(truncate)
remove <- matrix(0,nrow(overview),2)
i <- 2
j <- 1
for(i in 2:nrow(overview)){
nr_samples <- length(which(overview[i,3:ncol(overview)]!= "0"))
if(nr_samples<as.numeric(truncate)){
remove[j,1] <- overview[i,1]
j <- j+1
}
i <- i+1
}
#print("Här är vektorn")
#print(remove)
remove <- remove[remove[,1]!="0",]
i <- 1
for(i in 1:nrow(remove)){
pos <- which(remove[i,1]==file_new[,10])
file_new <- file_new[-pos,] #Removing the event.
overview <- overview[(overview[,1]%in%remove[,1])==FALSE,]
i <- i+1
}
}
file_new_hej <- file_new
overview_new <- overview
#View(file_new_hej)
#View(overview_new)
###################################################################
#Using Density-Based Spatial Clustering of Applications with Noise#
###################################################################
#overview_truncated <- overview[2:nrow(overview),2:ncol(overview)]
sub <- overview[2:nrow(overview),]
overview_truncated <- sub[as.numeric(sub[,2])!=100,] #Removing the ALL events so that events are not clustered into the stem.
oneevent <- 0
assign("overview", overview, envir=globalenv())
if(is.null(dim(overview_truncated))==FALSE){
print("more")
overview_truncated <- overview_truncated[,3:ncol(overview_truncated)]
overview_truncated <- as.data.frame(overview_truncated)
}else{
print("only one")
overview_truncated <- overview[2:nrow(overview),3:ncol(overview)]
overview_truncated <- as.data.frame(overview_truncated)
oneevent <- 1
}
overview_dfm <- data.matrix(overview_truncated, rownames.force = NA)
if(missing(eps)==TRUE){
eps <- 0.5
}
library(dbscan)
x <- kNNdist(overview_dfm, k = 1)
kNNdistplot(overview_dfm,k=1)
abline(h=eps, col = "red", lty=2)
myclusters <- dbscan(overview_dfm, eps = eps ,minPts = 1)
#myclusters <- dbscan(overview_dfm, eps = 15 ,minPts = 1) #TRACERx
print(myclusters)
assign("myclusters", myclusters, envir=globalenv())
# View(overview_dfm)
# fviz_cluster(myclusters,data = overview_dfm, minPts = 1) #Plotting the clusters.
#
# if(length(unique(overview_dfm)) >= 2){
# fviz_cluster(myclusters,data = overview_dfm, minPts = 1) #Plotting the clusters.
# }else{
# x = "Nope"
# print("Warning message: The input matrix does only contain one single subclone.")
# stopifnot(Datadimensions == "ok")
# }
#assign("overview", overview, envir=globalenv())
#######################################################################
#Constructing a matrix indicating which events belong to which cluster#
#######################################################################
#If we use DBSCAN
clusters <- as.matrix(myclusters$cluster)
overview_new <- cbind(overview,matrix(0,nrow(overview),1)) #Adding the cluster belonging to the overview.
if(oneevent == 0){
if(is.null(nrow(ov_stem))==TRUE){
t <- 1
}else{
t <- nrow(ov_stem)
}
overview_new[2:nrow(overview_new),ncol(overview_new)] <- c(c(rep("ALL",t)),clusters[,1])
unique_clusters <- c(c("ALL"),c(unique(clusters[,1]))) #Constructing the matrix in which the events will be saved.
}else{
overview_new[2:nrow(overview_new),ncol(overview_new)] <- c(clusters[,1])
unique_clusters <- c(unique(clusters[,1])) #Constructing the matrix in which the events will be saved.
}
assign("overview_cluster",overview_new,envir=globalenv())
cluster_matrix <- matrix(0,as.numeric(length(unique_clusters)),400)
print(unique_clusters)
i <- 1
for(i in 1:length(unique_clusters)){
if(unique_clusters[i]== "ALL"){
cluster_matrix[i,1] <- "ALL"
}else{
cluster_matrix[i,1] <- paste("Subclone",unique_clusters[i])}
i <- i+1
}
i <- 1
for(i in 2:nrow(overview_new)){ #Looping through the subclonal belonging.
j <- 2
Subclonerow <- match(overview_new[i,ncol(overview_new)],unique_clusters)
for(j in 2:ncol(cluster_matrix)){ #Looping through the available spots for saving the event.
if(cluster_matrix[Subclonerow,j] == "0"){
cluster_matrix[Subclonerow,j] <- overview_new[i,1]
break
}
j <- j+1
}
i <- i+1
}
addmatrix <- matrix(0,as.numeric(nrow(cluster_matrix)),3)
addmatrix[,1] <- cluster_matrix[,1]
clone_matrix_names <- cbind(addmatrix,cluster_matrix[,2:as.numeric(ncol(cluster_matrix))]) #Adding three columns in the beginning.
#View(clone_matrix_names)
#########################
#Extracting the clusters#
#########################
#Extracting clusters, calculating the median TC for each one and assigning the
#cluster names to the events in the segment file.
clusterTC <- matrix(0,400,3) #Changed from 200 to 400 210727.
calculateTC <- matrix(0,400,1)
i <- 1
s <- 1
t <- 1
#View(file_new)
for(i in 1:as.numeric(nrow(clone_matrix_names))){ #Looping through the rows in the clustermatrix.
j <- 1
for(j in 1:as.numeric(ncol(clone_matrix_names))){ #Looping through the mutations in a particular cluster i.
if(clone_matrix_names[i,j] != 0 && clone_matrix_names[i,j] != "0"){
k <- 1
for(k in 1:nrow(file_new)){ #Looping through the mutations in our datafile.
if(clone_matrix_names[i,j] == file_new[k,10]){
calculateTC[s,1] <- file_new[k,11] #Saving the TC
s <- s+1
}
k <- k+1
}
}
j <- j+1
}
#We now have a vector with all of the TC:s for that cluster.
clusterTC[t,1] <- paste("Cluster",t)
clusterTC[t,2] <- median(as.numeric(calculateTC[calculateTC[,1] != 0,]))
clone_matrix_names[i,2] <- median(as.numeric(calculateTC[calculateTC[,1] != 0,]))
calculateTC <- matrix(0,400,1) #Resetting the matrix.
s <- 1
t <- t + 1
i <- i+1
}
clusterTC_order <- clusterTC[order(as.numeric(clusterTC[,2]), decreasing = TRUE),]
clone_matrix_names <- clone_matrix_names[order(as.numeric(clone_matrix_names[,2]), decreasing = TRUE),]
if(missing(names)==FALSE){
if(names=="numbers"){
if("ALL" %in% file_new[,2] == FALSE){
namevector <- c(seq(1,1000))
}else{
namevector <- c("ALL",seq(1,1000))
}
}else if(names=="letters"||names=="subclone"){
# #If we do not have any ALL-events we want to use another name vector.
if("ALL" %in% file_new[,2] == FALSE){
namevector <- c("A","B","C","D","E","F","G","H","I","J","K","L","M","N","O","P","Q","R","S","T","U","V","X","Y","Z","ZA","ZB","ZC","ZD","ZE","ZF","ZG","ZH","ZI","ZJ","ZK","ZL","ZM","ZN","ZO","ZP","ZQ","ZR","ZS","ZT","ZU","ZV","ZX","ZY","ZZ",
"AAA","BBB","CCC","DDD","EEE","FFF","GGG","HHH")
}else{
namevector <- c("ALL","A","B","C","D","E","F","G","H","I","J","K","L","M","N","O","P","Q","R","S","T","U","V","X","Y","Z","ZA","ZB","ZC","ZD","ZE","ZF","ZG","ZH","ZI","ZJ","ZK","ZL","ZM","ZN","ZO","ZP","ZQ","ZR","ZS","ZT","ZU","ZV","ZX","ZY","ZZ")
}
}
}else{
# #If we do not have any ALL-events we want to use another name vector.
if(length(unique(overview_cluster[,ncol(overview_cluster)]))<=40){
if("ALL" %in% file_new[,2] == FALSE){
namevector <- c("A","B","C","D","E","F","G","H","I","J","K","L","M","N","O","P","Q","R","S","T","U","V","X","Y","Z","ZA","ZB","ZC","ZD","ZE","ZF","ZG","ZH","ZI","ZJ","ZK","ZL","ZM","ZN","ZO","ZP","ZQ","ZR","ZS","ZT","ZU","ZV","ZX","ZY","ZZ",
"AAA","BBB","CCC","DDD","EEE","FFF","GGG","HHH")
}else{
namevector <- c("ALL","A","B","C","D","E","F","G","H","I","J","K","L","M","N","O","P","Q","R","S","T","U","V","X","Y","Z","ZA","ZB","ZC","ZD","ZE","ZF","ZG","ZH","ZI","ZJ","ZK","ZL","ZM","ZN","ZO","ZP","ZQ","ZR","ZS","ZT","ZU","ZV","ZX","ZY","ZZ",
"AAA","BBB","CCC","DDD","EEE","FFF","GGG","HHH")
}
}else{
#There are a lot of subclones. Giving numbers instead.
if("ALL" %in% file_new[,2] == FALSE){
namevector <- c(seq(1,1000))
}else{
namevector <- c("ALL",seq(1,1000))
}
}
}
#print(namevector)
#View(clusterTC_order)
i <- 1
for(i in 1:as.numeric(nrow(clusterTC_order))){
# print(i)
# print(clusterTC_order[i,2])
if(as.numeric(clusterTC_order[i,2]) != 0){
if(i == 1){
if("ALL" %in% file_new[,2] == TRUE){ #If we have an ALL event we do not want to add the name "Subclone" to it.
clusterTC_order[i,1] <- paste("Cluster",i)
clusterTC_order[i,3] <- paste(namevector[i])
clone_matrix_names[i,1] <- paste("Cluster",i)
clone_matrix_names[i,3] <- paste(namevector[i])
}else{
clusterTC_order[i,1] <- paste("Cluster",i) #In the case where we do not have any ALL-events we want to add "Subclone" to the first subclone.
clusterTC_order[i,3] <- paste("Subclone_",namevector[i])
clone_matrix_names[i,1] <- paste("Cluster",i)
clone_matrix_names[i,3] <- paste("Subclone_",namevector[i])
}
}else{
clusterTC_order[i,1] <- paste("Cluster",i)
clusterTC_order[i,3] <- paste("Subclone_",namevector[i])
clone_matrix_names[i,1] <- paste("Cluster",i)
clone_matrix_names[i,3] <- paste("Subclone_",namevector[i])
}
}
i <- i+1
}
overview_subclones <- cbind(overview_new,matrix(0,nrow(overview_new),1))
i <- 2
for(i in 2:nrow(overview_subclones)){
#print(overview_subclones[i,1])
pos <- which(clone_matrix_names==overview_subclones[i,1], arr.ind = T)
#print(pos)
#print(clone_matrix_names[pos[1],3])
overview_subclones[i,ncol(overview_subclones)] <- clone_matrix_names[pos[1],3]
i <- i+1
}
assign("overview_subclones", overview_subclones, envir=globalenv())
assign("clone_matrix_names_hej", t(clone_matrix_names), envir=globalenv()) #The subclone names and which mutations are included in each subclone are exported to the global environment.
##################################################################
#Making a new file indicating which subclone each event belong to#
##################################################################
file_original <- file_new
file_subclones <- file_new
i <- 1
j <- 1
k <- 1
for(i in 1:as.numeric(nrow(clone_matrix_names))){ #Looping through a certain subclone.
for(j in 4:as.numeric(ncol(clone_matrix_names))){ #Looping through the clone mutations.
for(k in 1:as.numeric(nrow(file_new))){ #Looping through the file.
if(clone_matrix_names[i,j] == file_new[k,10]){ #If we match a subclone event with an event in our data set.
file_subclones[k,2] <- clone_matrix_names[i,3]
}
k <- k+1
}
j <- j+1
}
i <- i+1
}
##################################################
#Finding out which subclones exist in each sample#
##################################################
samples <- as.matrix(unique(c(file_original[,2]))) #Extracting all unique samples.
sample_clone_matrix <- matrix(0,40,(as.numeric(length(samples))*3)) #A matrix which will contain all of the samples and their clones. One column is used in order to asses the number of mutations that exist within the subclone.
file_samples_subclones <- cbind(file_new,file_subclones[,2]) #Adding a column with all of the subclonal denotations of the alterations.
file_samples_subclones <- cbind(file_samples_subclones,matrix(0,as.numeric(nrow(file_samples_subclones)),1))
i <- 1
m <- 1
for(i in 1:as.numeric(nrow(file_samples_subclones))){ #Adding the subclonal names to the sample names.
if(file_samples_subclones[i,2] != "ALL"){
file_samples_subclones[i,13] <- paste(file_samples_subclones[i,2],file_samples_subclones[i,12])
}else{
file_samples_subclones[i,13] <- "ALL"
}
i <- i+1
}
subclones <- as.matrix(unique(c(file_samples_subclones[,13]))) #Extracting all unique subclones within samples.
medianmatrix <- matrix(0,100,as.numeric(length(subclones))) #This matrix is to be used in order to calculate the median TC for each subclone within each sample.
medianmatrix[1,] <- subclones #The first row consists of the subclone names.
samples_unique <- as.matrix(unique(c(file_samples_subclones[,2]))) #Extracting all unique samples.
i <- 1
if("ALL" %in% file_new[,2] == TRUE){
for(i in 1:(ncol(sample_clone_matrix)/3)){ #All samples have the subclone named "ALL".
sample_clone_matrix[2,(3*i-2)] <- "ALL"
sample_clone_matrix[2,(3*i-1)] <- "100"
sample_clone_matrix[1,(3*i-2)] <- samples_unique[i,1]
i <- i+1
s <- 3
}
}else{
s <- 2
}
i <- 1
t <- 1
for(i in 1:as.numeric(nrow(file_samples_subclones))){ #Looping through the dataset.
if(i == 1){ #The first position will of course be our first sample in the matrix.
if("ALL" %in% file_new[,2] == TRUE){
sample_clone_matrix[1,1] <- file_samples_subclones[1,2] #Name.
}else{
sample_clone_matrix[1,1] <- file_samples_subclones[1,2] #When we do not have ALL-events in the tumor we need to extract the subclone name.
sample_clone_matrix[s,1] <- file_samples_subclones[1,12] #Name.
}
if(file_samples_subclones[1,2] != "ALL"){
sample_clone_matrix[s,2] <- file_samples_subclones[1,11] #TC.
sample_clone_matrix[s,3] <- 1
s <- s+1
medianmatrix[1,1] <- file_samples_subclones[1,13] #Name.
medianmatrix[2,1] <- file_samples_subclones[1,11] #TC.
}
if(file_samples_subclones[i,2] != file_samples_subclones[i+1,2]){
t <-t+1
}
}
if(i != 1){
if(i < as.numeric(nrow(file_samples_subclones))){
if(file_samples_subclones[i,2] == file_samples_subclones[i+1,2]){ #We are still within the same sample.
if((file_samples_subclones[i,12] %in% sample_clone_matrix[,((3*t)-2)]) == FALSE){ #The event should not already be in that column. We just want the unique labels.
sample_clone_matrix[1,(3*t-2)] <- file_samples_subclones[i,2] #Sample name.
sample_clone_matrix[s,(3*t-2)] <- file_samples_subclones[i,12] #Saving the clone name.
sample_clone_matrix[s,(3*t-1)] <- file_samples_subclones[i,11] #clone_matrix_names[as.numeric(match(file_samples_subclones[i,12], clone_matrix_names[,3])),2] #Saving the clone TC in that sample.
sample_clone_matrix[s,(3*t)] <- (as.numeric(sample_clone_matrix[s,(3*t)])+1)
#sample_clone_matrix[s,2*t] <- file_samples_subclones[i,11] #Saving the clone TC in that sample.
s <- s+1
}else{ #If the event already is in that column we add the TC.
if(file_samples_subclones[i,2] != "ALL"){
matchrow <- match(file_samples_subclones[i,12],sample_clone_matrix[,((3*t)-2)])
}else{matchrow <- 2}
sample_clone_matrix[matchrow,(3*t-1)] <- (as.numeric(sample_clone_matrix[matchrow,(3*t-1)]) + as.numeric(file_samples_subclones[i,11])) #Saving the clone TC in that sample.
sample_clone_matrix[matchrow,(3*t)] <- (as.numeric(sample_clone_matrix[matchrow,(3*t)])+1)
}
}
if(file_samples_subclones[i,2] != file_samples_subclones[i+1,2]){ #New sample.
if(file_samples_subclones[i,12] %in% sample_clone_matrix[,((3*t)-2)] == FALSE){ #The event should not already be in that column. We just want the unique labels.
sample_clone_matrix[1,(3*t-2)] <- file_samples_subclones[i,2] #Sample name.
sample_clone_matrix[s,(3*t-2)] <- file_samples_subclones[i,12] #Saving the clone name.
sample_clone_matrix[s,(3*t-1)] <- file_samples_subclones[i,11] #clone_matrix_names[as.numeric(match(file_samples_subclones[i,12], clone_matrix_names[,3])),2] #Saving the clone TC in that sample.
sample_clone_matrix[s,(3*t)] <- (as.numeric(sample_clone_matrix[s,(3*t)]) + 1) #Counting events.
#sample_clone_matrix[s,2*t] <- file_samples_subclones[i,11] #Saving the clone TC in that sample.
}else{
if(file_samples_subclones[i,2] != "ALL"){
matchrow <- match(file_samples_subclones[i,12],sample_clone_matrix[,((3*t)-2)])
}else{matchrow <- 2}
sample_clone_matrix[matchrow,(3*t-1)] <- (as.numeric(sample_clone_matrix[matchrow,((3*t)-1)]) + as.numeric(file_samples_subclones[i,11])) #Saving the clone TC in that sample.
sample_clone_matrix[matchrow,(3*t)] <- (as.numeric(sample_clone_matrix[as.numeric(matchrow),3*t])+1)
}
s <- 3 #Resetting the s.
if(t != length(samples)){
t <- t+1 #Going to the next triad of columns.
}
}
}
}
if(i == as.numeric(nrow(file_samples_subclones))){ #We're at the end of the file.
if(file_samples_subclones[i,2] != file_samples_subclones[i-1,2]){ #If the last row actually is a new sample.
s <- 3 #Resetting the s.
if(file_samples_subclones[i,12] %in% sample_clone_matrix[,((3*t)-2)] == FALSE){ #The event should not already be in that column. We just want the unique labels.
sample_clone_matrix[1,(3*t-2)] <- file_samples_subclones[i,2] #Sample name
sample_clone_matrix[s,(3*t-2)] <- file_samples_subclones[i,12] #Saving the clone name.
sample_clone_matrix[s,(3*t-1)] <- file_samples_subclones[i,11] #clone_matrix_names[as.numeric(match(file_samples_subclones[i,12], clone_matrix_names[,3])),2] #Saving the clone TC in that sample.
#sample_clone_matrix[s,2*t] <- file_samples_subclones[i,11] #Saving the clone TC in that sample.
}
}
if(file_samples_subclones[i,2] == file_samples_subclones[i-1,2]){ #If the last row is the same sample.
if(file_samples_subclones[i,12] %in% sample_clone_matrix[,((3*t)-2)] == FALSE){
sample_clone_matrix[s,(3*t-2)] <- file_samples_subclones[i,12] #Saving the clone name.
sample_clone_matrix[s,(3*t-1)] <- file_samples_subclones[i,11] #clone_matrix_names[as.numeric(match(file_samples_subclones[i,12], clone_matrix_names[,3])),2] #Saving the clone TC in that sample.
sample_clone_matrix[s,(3*t)] <- (as.numeric(sample_clone_matrix[s,(3*t)]) + 1) #Counting events.
}else{
matchrow <- match(file_samples_subclones[i,12],sample_clone_matrix[,(3*t-2)])
sample_clone_matrix[matchrow,(3*t-1)] <- (as.numeric(sample_clone_matrix[matchrow,(3*t-1)]) + as.numeric(file_samples_subclones[i,11])) #Saving the clone TC in that sample.
sample_clone_matrix[matchrow,(3*t)] <- (as.numeric(sample_clone_matrix[as.numeric(matchrow),3*t])+1)
}
}
}
samplematch <- match(file_samples_subclones[i,13],medianmatrix[1,])
k <- 2
m <- 0
for(k in 2:as.numeric(nrow(medianmatrix))){ #Making a matrix for calculating the median.
if(medianmatrix[k,samplematch] == "0"){
if(m == 0){
medianmatrix[k,samplematch] <- file_samples_subclones[i,11]
m <- 1
}
}
k <- k+1
}
i <- i+1
}
i <- 1
for(i in 1:as.numeric(ncol(medianmatrix))){
column <- as.matrix(medianmatrix[2:nrow(medianmatrix),i])
column <- column[column[,1] != "0",1]
medianmatrix[as.numeric(nrow(medianmatrix)),i] <- median(as.numeric(column))
i <- i+1
}
#Adding the TC to the matrix illustrating the subclonal architecture within a sample.
i <- 1
for(i in 1:as.numeric(ncol(medianmatrix))){ #Looping through the subclones.
columnsample <- match(word(medianmatrix[1,i],1),sample_clone_matrix[1,]) #Locating the sample. We get the column for the sample in sample_clone_matrix.
if(medianmatrix[1,i] != "ALL"){
rowsample <- match(word(medianmatrix[1,i],2,3),sample_clone_matrix[,columnsample]) #Locating the subclone in the row for the sample.
}else{rowsample <- match(word(medianmatrix[1,i],1),sample_clone_matrix[,columnsample])}
if(is.na(rowsample) == FALSE){
if(columnsample != 1){
sample_clone_matrix[rowsample,(columnsample+1)] <- medianmatrix[nrow(medianmatrix),i] #Adding the median TC.
}else{sample_clone_matrix[2,2] <- "100"}
}
i <- i+1
}
assign("sample_clone_matrix", sample_clone_matrix, envir=globalenv()) #The matrix which tells us which mutations belong to which subclone is transferred to the global environment.
####################################
#Building the event matrix skeleton#
####################################
subclones <- as.matrix(unique(c(file_samples_subclones[,13]))) #Extracting all unique subclones.
samples <- as.matrix(unique(c(file_new[,2]))) #Extracting all unique samples.
events <- as.matrix(unique(c(file_new[,10]))) #Extracting all unique events.
EMc <- nrow(subclones)+1
EMr <- nrow(events)+1
eventmatrix <- matrix(0,EMr,EMc) #Creating an empty event matrix.
eventmatrix[1,2:EMc] <- subclones #The subclone names are placed on the firs row of the event matrix.
eventmatrix[2:EMr,1] <- events #The event names are placed in the first column of the event matrix.
eventnumber <- nrow(file_new) #The upper bound of events we think the tumor will have.
events <- matrix(0,eventnumber,as.numeric(nrow(subclones))) #Creating an empty matrix for the events belonging to each subclone.
events[1,] <- subclones
#########################################################################################################
#Allocating the events to the samples/subclones. All subclones should have the events belonging to "ALL"#
#########################################################################################################
i <- 1
for(i in 1:ncol(events)){ #Looping through every subclone separately.
j = 1
s = 2
for(j in 1:nrow(file_samples_subclones)){ #Going through all of the events for the data set.
if(file_samples_subclones[j,13] == "ALL"){ #If we find an "ALL"-event the sample should always have this one.
events[s,i] <- file_samples_subclones[j,10]
s <- s+1
}
else if(events[1,i] == file_samples_subclones[j,13]){ #If we find an event belonging to the subclone we add it to the EM.
if((file_samples_subclones[j,10] %in% events[,i]) == FALSE){
events[s,i] <- file_samples_subclones[j,10]
s <- s+1
}
}
j <- j+1
}
i <- i+1
}
#############################
#Adding the events to the EM#
#############################
i <- 1
for(i in 2:nrow(eventmatrix)){ #Events in the EM.
j <- 1
for(j in 1:ncol(events)){ #Cells.
if(eventmatrix[i,1] %in% events[,j] == TRUE){ #Check if the events exist in this cell.
eventmatrix[i,j+1] <- 1
}
else(eventmatrix[i,j+1] <- 0)
}
i <- i+1
}
#View(events)
#View(eventmatrix)
###############################################################
#The subclones should have the events that its motherclone has#
###############################################################
i <- 2
j <- 1
s <- 3
space <- matrix(0,50,1)
#The events of the mother clones are allocated within a single sample.
i <- 1
j <- 1
s <- 2
t <- 1
space <- matrix(0,50,2) #Spaces within a sample. Dynamic.
totalspace <- matrix(0,(as.numeric(nrow(space)+1)),((2*as.numeric(ncol(sample_clone_matrix))/3)+1)) #A matrix used for calculating the spaces available.
possible_mothers <- matrix(0,(as.numeric(nrow(space)+1)),((as.numeric(nrow(subclones))-1)*2)) #A matrix used for saving the possible motherclones.
rowsofhundred <- 40
hundredpercentclones <- matrix(0,rowsofhundred,length(samples_unique)) #Matrix that is to be used in the cases where we have > 2 clones in a sample that have 100 %.
hundredpercentclones[1,] <- samples_unique
hpc <- 2
cl <- 1
nr_eq <- 6
equalclones <- matrix(0,rowsofhundred,(length(samples_unique)*nr_eq)) #Matrix that is to be used in the cases where we have > 2 clones in a sample that have 100 %.
equalclones[1,] <- rep(samples_unique,nr_eq)
ec <- 2 #Count.
ecl <- 1 #Column number.
k <- 1
for(k in 1:(ncol(sample_clone_matrix)/3)){ #Constructing a matrix were every two columns represent a sample. The first one tells us which subclone harbors the space and the second the remaining space on top of this sample.
totalspace[1,(2*k-1)] <- sample_clone_matrix[1,(3*k-2)]
k <- k+1
}
k <- 1
for(k in 2:as.numeric(nrow(subclones))){ #Constructing a matrix were every two columns represent a subclone within a sample. The first one tells us which the chosen motherclone is and the other which other possible solutions there are.
possible_mothers[1,(2*k-3)] <- subclones[k,1]
k <- k+1
}
#Mother-daughter clone matrix.
allocation_samples <- matrix(0,(as.numeric(nrow(clone_matrix_names))+1),(as.numeric(nrow(samples))+1)) #Matrix which is to be used for comparing the subclonal designation within each sample.
allocation_samples[2:(as.numeric(nrow(clone_matrix_names))+1),1] <- clone_matrix_names[,3] #The subclone names are in the first row.
allocation_samples[1,2:(as.numeric(nrow(samples))+1)] <- samples #The sample names are in the first column.
subcloneswithinsample <- matrix(0,(as.numeric(nrow(sample_clone_matrix))-1),2)
i <- 1
for(i in 1:(as.numeric(ncol(sample_clone_matrix))/3)){ #Looping through all of the samples.
#for(i in 1:3){
subcloneswithinsample <- sample_clone_matrix[2:as.numeric(nrow(sample_clone_matrix)),(3*i-2):(3*i-1)] #Extraxting the subclonal architecture and TC for a certain sample.
subcloneswithinsample_order <- subcloneswithinsample[order(as.numeric(subcloneswithinsample[,2]),decreasing = TRUE),] #Ordering the subclones from highest to lowest TC.
#Ordering the subclones.
ord <- 2
subcloneswithinsample_order_old <- matrix(0,(as.numeric(nrow(sample_clone_matrix))-1),2)
subcloneswithinsample_order_new <- subcloneswithinsample_order
#print(subcloneswithinsample_order_new)
while(all(subcloneswithinsample_order_new == subcloneswithinsample_order_old) == FALSE){
subcloneswithinsample_order_old <- subcloneswithinsample_order_new
for(ord in 2:(as.numeric(nrow(subcloneswithinsample_order_old))-1)){ #Writing a function/loop that orders the subclones of the same size according to their median size.
if(subcloneswithinsample_order_old[ord,2] != "0"){
if(subcloneswithinsample_order_old[ord,2] == subcloneswithinsample_order_old[ord+1,2]){
# orderpos1 <- match(word(subcloneswithinsample_order_old[ord,1],2),namevector)
# orderpos2 <- match(word(subcloneswithinsample_order_old[ord+1,1],2),namevector)
# if(as.numeric(orderpos2) < as.numeric(orderpos1)){
# subcloneswithinsample_order_new <- subcloneswithinsample_order_old[c(1:(ord-1),ord+1,ord,(ord+2):nrow(subcloneswithinsample_order_old)), ]
# }
orderpos1 <- match(subcloneswithinsample_order_old[ord,1],overview_subclones[,ncol(overview_subclones)])
orderpos2 <- match(subcloneswithinsample_order_old[ord+1,1],overview_subclones[,ncol(overview_subclones)])
diff <- as.numeric(overview_cluster[orderpos1,2:(ncol(overview_cluster)-1)])-as.numeric(overview_cluster[orderpos2,2:(ncol(overview_cluster)-1)])
larger <- length(which(diff>0)) #In how many positions is the first one larger than the second one?
smaller <- length(which(diff<0)) #In how many positions is the second one larger than the first one?
if(smaller > larger){
subcloneswithinsample_order_new <- subcloneswithinsample_order_old[c(1:(ord-1),ord+1,ord,(ord+2):nrow(subcloneswithinsample_order_old)), ]
}
}
}
ord <- ord+1
}
}
subcloneswithinsample_order <- subcloneswithinsample_order_new
#print(subcloneswithinsample_order)
j <- 1
ecl_original <- ecl
equal <- 1
for(j in 2:as.numeric(nrow(sample_clone_matrix))){ #Looping through the subclones within the sample.
#for(j in 2:4){
if(j == 2){ #We're in the first position. This is the ALL-event.
space[1,1] <- subcloneswithinsample_order[j-1,1] #The name.
space[1,2] <- subcloneswithinsample_order[j-1,2] #The TC.
}
if(j != 2){
if(subcloneswithinsample_order[j-1,1] != "0"){
if(subcloneswithinsample_order[j-1,1] != "ALL"){ #We should not add it again.
maxspace <- which.max(space[,2]) #Finding the largest available space.
newname <- subcloneswithinsample_order[j-1,1] #The name of the new subclone.
newspace <- subcloneswithinsample_order[j-1,2] #The space of the new subclone.
maxname <- space[maxspace,1]
#Adding a loop that checks whether or not this is the only possible solution for the subclone to be placed as a daughter to.
c <- 1
so <- 0
for(c in 1:nrow(space)){
if(as.numeric(space[c,2]) != 0){ #The new test space should not be zero. You cannot put anything there.
if((as.numeric(space[c,2])-as.numeric(newspace)) >= -0.1){ #Added this due to the simulation. Too many decimals. Rounding makes events not being placed in parallel.
daughter_pos <- match(paste(sample_clone_matrix[1,(3*i-2)],newname), possible_mothers[1,])
#print("Här är c")
#print(c)
if(c == maxspace && possible_mothers[2,daughter_pos] =="0"){
possible_mothers[2,daughter_pos] <- space[c,1] #Adding the original solution.
}
if(c != maxspace || (c == maxspace && possible_mothers[2,daughter_pos] !="0")){ #Added this. It can happen if there are equalclones. It will otherwise overwrite the mother in the second row.
#print("There are other solutions")
if(space[c,1] %in% possible_mothers[2,(as.numeric(daughter_pos))] == FALSE){
#print("Now we will add it")
daughter_pos <- match(paste(sample_clone_matrix[1,(3*i-2)],newname), possible_mothers[1,])
possible_mothers[(2+so),(as.numeric(daughter_pos)+1)] <- space[c,1]
#Tystade detta 200720 samt 200820. Made some rules disappear.
# if(space[c,2] == newspace){ #Beh?ver ju dock inte inneb?ra att de faktiskt ?r equalclones och kan placeras i varandra.
# if(space[c,1] != "ALL"){ #Varf?r inte ALL? Man kan f? fel.
# #print(space)
# #print(c)
#
# mothername <- paste(sample_clone_matrix[1,(3*i-2)],space[c,1])
# mothercolumn <- match(mothername,possible_mothers[1,])
# possible_mothers[1,mothercolumn]
# #print("h?r ?r mothername and newname")
# #print(mothername)
# #print(newname)
#
# n <- 1
# for(n in 2:nrow(possible_mothers)){
# #print("H?r ?r ett n")
# #print(n)
# #print(mothercolumn)
# #print(possible_mothers[n,(mothercolumn+1)])
# if(possible_mothers[n,(mothercolumn+1)] == "0"){
# possible_mothers[n,(mothercolumn+1)] <- newname
# break
# }
# n <- n+1
# }
# }
# }
so <- so+1
}}
}
}
c <- c+1
}
space[maxspace,2] <- (as.numeric(space[maxspace,2])-as.numeric(newspace)) #Replacing the old maxspace.
space[s,1] <- newname #Adding the new spacename and space size to the spacematrix.
space[s,2] <- newspace
#Treating the case when space[maxspace,2] = 100 % and the newspace as well. Then the motherclone and the daughterclone are both part of the base.
if(subcloneswithinsample_order[j-1,2] == "100"){
if(subcloneswithinsample_order[j-1,1] != "ALL"){
if(subcloneswithinsample_order[match(space[maxspace,1],subcloneswithinsample_order[,1]),2] == "100"){
if(subcloneswithinsample_order[match(space[maxspace,1],subcloneswithinsample_order[,1]),1] != "ALL"){
newspace_name <- subcloneswithinsample_order[j-1,1]
maxspace_name <- subcloneswithinsample_order[match(space[maxspace,1],subcloneswithinsample_order[,1]),1]
if(hpc == 2){
hundredpercentclones[2,cl] <- subcloneswithinsample_order[match(space[maxspace,1],subcloneswithinsample_order[,1]),1]
hundredpercentclones[3,cl] <- subcloneswithinsample_order[j-1,1]
hpc <- 4
}else{
if(subcloneswithinsample_order[j-1,1] %in% hundredpercentclones[,cl] == FALSE){
hundredpercentclones[hpc,cl] <- subcloneswithinsample_order[j-1,1]
hpc <- hpc + 1
}
}
allocation_samples[match(space[s,1],allocation_samples[,1]),(i+1)] <- maxspace_name #Annoting the mother clone of each of the subclones within each sample.
allocation_samples[match(maxspace_name,allocation_samples[,1]),(i+1)] <- newspace_name #Annoting the mother clone of each of the subclones within each sample.
}else{allocation_samples[match(space[s,1],allocation_samples[,1]),(i+1)] <- space[maxspace,1] #Annoting the mother clone of each of the subclones within each sample.
}
}else{allocation_samples[match(space[s,1],allocation_samples[,1]),(i+1)] <- space[maxspace,1] #Annoting the mother clone of each of the subclones within each sample.
}
}else{allocation_samples[match(space[s,1],allocation_samples[,1]),(i+1)] <- space[maxspace,1] #Annoting the mother clone of each of the subclones within each sample.
}
}else{allocation_samples[match(space[s,1],allocation_samples[,1]),(i+1)] <- space[maxspace,1] #Annoting the mother clone of each of the subclones within each sample.
}
#Treating the case when we have multiple clones of equal size that have to be placed inside each other.
if(subcloneswithinsample_order[j-1,1] != "ALL"){
if(subcloneswithinsample_order[j-1,2] != "100"){
if(as.numeric(subcloneswithinsample_order[match(space[maxspace,1],subcloneswithinsample_order[,1]),2]) == as.numeric(subcloneswithinsample_order[j-1,2])){ #It should be equal in size to the other cluster.
newspace_name <- subcloneswithinsample_order[j-1,1]
maxspace_name <- subcloneswithinsample_order[match(space[maxspace,1],subcloneswithinsample_order[,1]),1]
if(ec == 2){ #We have not yet added any events to the equalcolumn for this sample.
equalclones[2,ecl] <- subcloneswithinsample_order[match(space[maxspace,1],subcloneswithinsample_order[,1]),1]
equalclones[3,ecl] <- subcloneswithinsample_order[j-1,1]
ec <- 4
}else{ #We have added events earlier. We now want to add even one more to this column.
if(subcloneswithinsample_order[j-1,1] %in% equalclones[,ecl] == FALSE){
equalclones[ec,ecl] <- subcloneswithinsample_order[j-1,1]
ec <- ec + 1
}
}
#This part adds the names such that they get each others names in the allocation_samples matrix.
#Silenced this one 200308 since it sometimes made events be allocated in a weird way. 100 % events got each others and 50 % each others but the 50 did not get the 100.
#allocation_samples[match(space[s,1],allocation_samples[,1]),(i+1)] <- maxspace_name #Annoting the mother clone of each of the subclones within each sample.
#allocation_samples[match(maxspace_name,allocation_samples[,1]),(i+1)] <- newspace_name #Annoting the mother clone of each of the subclones within each sample.
equal <- 2
}else{allocation_samples[match(space[s,1],allocation_samples[,1]),(i+1)] <- space[maxspace,1] #Annoting the mother clone of each of the subclones within each sample.
if(equal == 2){
ecl <- ecl+length(samples_unique)
ec <- 2
equal <- 1}
}
}else{allocation_samples[match(space[s,1],allocation_samples[,1]),(i+1)] <- space[maxspace,1] #Annoting the mother clone of each of the subclones within each sample.
}
}else{allocation_samples[match(space[s,1],allocation_samples[,1]),(i+1)] <- space[maxspace,1] #Annoting the mother clone of each of the subclones within each sample.
}
} #!= "ALL".
} #!= "0".
if(j == as.numeric(nrow(sample_clone_matrix))){ #We're at the end of a sample.
#print("Endspace")
#print(i)
#print(space)
totalspace[2:as.numeric(nrow(totalspace)),((t*2)-1):((t*2))] <- space
t <- t+1
s <- 2 #Resetting s and space.
space <- matrix(0,50,2)
}else{s <- s+1}
} #j != 2.
j <- j+1
} #Subclones within a sample.
hpc <- 2
cl <- cl + 1
ec <- 2
ecl <- ecl_original + 1
i <- i+1
} #Samples.
i <- 1
Clustering <- file_samples_subclones
Clustering[,12] <- "No"
Clustering <- Clustering[,Clustering[1,]!="No"]
colnames(Clustering)[12] <- "Cluster"
for(i in 1:nrow(Clustering)){
w1 <- word(Clustering[i,12],1)
w3 <- word(Clustering[i,12],3)
w2 <- "Cluster_"
if(w1 != "ALL"){
Clustering[i,12] <- paste(w1,w2,w3,sep=" ")}
i <- i+1
}
assign("Clustering", Clustering, envir=globalenv()) #This is a matrix illustrating the events and their subclonal belonging.
assign("file_samples_subclones", file_samples_subclones, envir=globalenv()) #This is a matrix illustrating the events and their subclonal belonging.
assign("possible_mothers", possible_mothers, envir=globalenv()) #This is a matrix illustrating the chosen mother clone as well as other possible mothers.
assign("allocation_samples", allocation_samples, envir=globalenv()) #The mother-daughter division are exported to the global environment.
assign("equalclones", equalclones, envir=globalenv()) #The equal clones.
assign("hundredpercentclones", hundredpercentclones, envir=globalenv()) #The equal clones.
#Fusing the equalclones and the hundredpercentclones.
i <- 1
for(i in 1:ncol(hundredpercentclones)){
if(hundredpercentclones[2,i] != "0"){ #We have some hundredpercentlones in this sample.
if(equalclones[2,i] == "0"){ #We do not have any other equalclones in this sample.
equalclones[2:nrow(equalclones),i] <- hundredpercentclones[2:nrow(hundredpercentclones),i] #We paste it here.
}else if(equalclones[2,(i+as.numeric(ncol(hundredpercentclones)))] == "0"){ #We have something but not in the next one.
equalclones[2:nrow(equalclones),(i+as.numeric(ncol(hundredpercentclones)))] <- hundredpercentclones[2:nrow(hundredpercentclones),i] #We paste it here.
}else if(equalclones[2,(i+2*as.numeric(ncol(hundredpercentclones)))] == "0"){ #We have something but not in the nextnext one.
equalclones[2:nrow(equalclones),(i+as.numeric(ncol(hundredpercentclones)))] <- hundredpercentclones[2:nrow(hundredpercentclones),i] #We paste it here.
}
}
i <- i+1
}
#View(events)
#The equalclones should have each other's mothers. Created 200804.
i <- 1
save_order <- 1
unique_mothers <- list()
for(i in 1:ncol(equalclones)){ #Looping through the clones of equal size.
removed <- 0
if(equalclones[2,i] != "0"){
equal_mothers <- as.vector(equalclones[equalclones[,i] != "0",i])
order <- matrix(0,length(equal_mothers),2)
overview_sub <- matrix(0,length(equal_mothers),ncol(overview))
order[,1] <- as.matrix(equal_mothers)
j <- 2
for(j in 2:length(equal_mothers)){
daughter <- paste(equal_mothers[1],equal_mothers[j])
daughter_column <- match(daughter,possible_mothers[1,])
all <- rbind(as.matrix(possible_mothers[,daughter_column]),as.matrix(possible_mothers[,daughter_column+1]))
all_nozero <- as.matrix(all[all!="0",])
if(length(all_nozero)>1){ #Added this for the cases where there is none.
unique_mothers[[(j-1)]] <- all_nozero[2:nrow(all_nozero),] #Saving all unique mothers. 2 because we do not want the daughter.
}
file_samples_subclones_row <- match(daughter,file_samples_subclones[,13])
overview_row <- match(file_samples_subclones[file_samples_subclones_row,10],overview[,1])
overview_sub[j,] <- overview[overview_row,]
j <- j+1
}
unique_mother_tot <- as.matrix(unique(unlist(unique_mothers)))
j <- 2
for(j in 3:nrow(overview_sub)){
overview_sub <- overview_sub[,overview_sub[j,]!="0"] #Removing columns where not all of them are present simultaneously.
j <- j+1
}
overview_sub <- as.matrix(overview_sub)
overview_sub_n <- overview_sub[2:nrow(overview_sub),2:as.numeric(ncol(overview_sub))]
class(overview_sub_n) <- "numeric"
#Checking if the equalclones really are allowed to be placed inside each other in all samples.
m <- 1
again <- 0
if(is.null(nrow(overview_sub_n))==FALSE){
for(m in 1:nrow(overview_sub_n)){ #Looping through the clones in this equalclones.
n <- 1
for(n in 1:nrow(overview_sub_n)){
thesigns <- unique(sign(overview_sub_n[m,]-overview_sub_n[n,])) #Subtracting the rows.
if("-1" %in% thesigns && "1" %in% thesigns && again == 0 && length(equalclones[equalclones[,i]%in% hundredpercentclones,i])!=nrow(equalclones)){ #the last part was added due to handle cases where we have contradictions in the data so that we do not remove it.
# print("These should not be in equalclones.")
again <- 1 #We should not remove it multiple times.
removed <- 1
eq_segment <- ceiling(as.numeric(i)/as.numeric(length(samples_unique))) #Telling us which segment of equalclones we are in.
segments_left <- as.numeric(nr_eq)-as.numeric(eq_segment)
therest <- as.numeric(i)%%length(samples_unique) #Calculating in which biopsy we are.
if(as.numeric(therest) == 0){
biopsy <- nr_eq
}else{
biopsy <- therest
}
if(as.numeric(segments_left) != 0){ #There is at least one that should be moved.
p <- as.numeric(eq_segment)+1
for(p in (as.numeric(eq_segment)+1):as.numeric(nr_eq)){
if(as.numeric(biopsy)+(as.numeric(length(samples_unique))*(as.numeric(p)-1)) <= as.numeric(ncol(equalclones))){
equalclones[,as.numeric(biopsy)+(as.numeric(length(samples_unique))*(as.numeric(p)-2))] <- equalclones[,as.numeric(biopsy)+(as.numeric(length(samples_unique))*(as.numeric(p)-1))]
}
p <- p+1
}
}else{
equalclones[2:nrow(equalclones),i] <- "0"
}
}
n <- n+1
}
m <- m+1
}
}
if(is.null(nrow(overview_sub_n))==FALSE){
if(removed == 0){
order[2:nrow(order),2] <- as.matrix(rowSums(overview_sub_n))
assign("order",order,envir=globalenv())
order[2:nrow(order),2] <- as.numeric(order[2:nrow(order),2])
order_new <- order[order(as.numeric(as.matrix(order[,2])),decreasing=TRUE),] #Ordering the matrix after size.
order[1,] <- order_new[nrow(order_new),]
order[2:nrow(order),] <- order_new[1:(nrow(order_new)-1),]
# if(save_order == 1){ #Silenced it 210316 since equalclones_order and order had differing numbers of rows.
# equalclones_order <- order
# save_order <- 2
# }else{
# print(equalclones_order)
# print(order)
# print(nrow(equalclones_order))
# print(nrow(order))
# equalclones_order <- cbind(equalclones_order,order)
# }
j <- 2
for(j in 2:nrow(order)){
daughter_column <- match(paste(order[1,1],order[j,1]),possible_mothers[1,])
if(j == 2){
events <- as.matrix(unique_mother_tot[unique_mother_tot %in% order == FALSE,]) #Finding which possible mothers are not one of the equal clones. Why?
#events <- as.matrix(unique_mother_tot[unique_mother_tot != order[j,1],]) #Finding which possible mothers are left.
if(nrow(events)==1){
possible_mothers[2,daughter_column] <- events
}else{
#print(possible_mothers[1,daughter_column])
possible_mothers[2,daughter_column] <- events[1,]
possible_mothers[2:nrow(events),(daughter_column+1)] <- events[2:nrow(events),]
}
}else{
mother_column <- match(paste(order[1],order[j-1,1]),possible_mothers[1,])
#print(paste(order[1],order[j-1,1]))
#print(mother_column)
possible_mothers[2,daughter_column] <- word(possible_mothers[1,mother_column],2,3)
events <- as.matrix(unique_mother_tot[unique_mother_tot != order[j,1],]) #Finding which possible mothers are not one of the equal clones.
as.matrix(events[word(possible_mothers[1,mother_column],2,3) != order[j,1],])
possible_mothers[2:nrow(events),(daughter_column+1)] <- "0" #events[2:nrow(events),1] #"0" was added 200821.
}
j <- j+1
}
}#Removed.
}
}
i <- i+1
}
assign("possible_mothers_2",possible_mothers,envir=globalenv())
assign("equalclones_new", equalclones, envir=globalenv()) #The equal clones.
#assign("equalclones_order",equalclones_order,envir=globalenv())
#########################################################################################
#We want to add all equalclones to possible mothers if one of them are a possible mother#
#########################################################################################
#This segment was "#" until 210626.
# i <- 1
# h <- 0
# for(i in 1:(ncol(possible_mothers)/2)){
#
# daughter <- possible_mothers[1,(2*i-1)]
# mother <- possible_mothers[2,(2*i-1)] #Right now I only check the "primary mother".
# eq_col <- which(word(daughter,1)==equalclones[1,])
# eq_sample <- equalclones[,eq_col] #All equalclones in this sample.
#
# if(mother %in% eq_sample){ #Checking if the mother is in equalclones.
# # print("eq_sample")
# # print(eq_sample)
# # print(eq_col)
# j <- 1
# for(j in 1:length(eq_col)){ #All equalclones columns for this sample.
#
# if(mother %in% eq_sample[,j]){
#
# eq_sub <- eq_sample[eq_sample[,j]!=mother,j]
# eq_sub <- eq_sub[eq_sub!="0"]
#
# othermothers <- possible_mothers[possible_mothers[,(2*i)]!="0",(2*i)]
# pos_m_rows <- length(othermothers)
#
# if(length(othermothers)!=0){ #We cannot add any if it is empty.
# print("Här")
# print(othermothers)
# print(length(othermothers))
# print(eq_sub)
#
# if(h == 0){
# assign("eq_sub",eq_sub,envir=globalenv())
# assign("othermothers",othermothers,envir=globalenv())
# h <- 1
# }
#
# eq_sub <- eq_sub[eq_sub!=othermothers] #Extracting the equalclones not yet given to the cluster in possible_mothers.
#
# if(length(eq_sub)!=0 && is.na(length(pos_m_rows))==FALSE){
# possible_mothers[(pos_m_rows+2):(pos_m_rows+length(eq_sub)),(2*i)] <- eq_sub[2:length(eq_sub)]} #Adding the new mothers.
# }
# }
#
# j <- j+1
# }
# }
# i <- i+1
# }
#
# assign("possible_mothers_3",possible_mothers,envir=globalenv())
###################################################################################
#Looking for discrepancies between the subclonal architecture of different samples#
###################################################################################
#Taking rules into consideration.
if(missing(rule)==FALSE){
i <- 1
for(i in 1:nrow(rule)){
if(rule[i,1]!="ALL"){
row_m <- match(rule[i,1],file_samples_subclones[,10])
rule[i,1] <- file_samples_subclones[row_m,12]
}else{ #If it is ALL we cannot match it to column 10.
rule[i,1] <- "ALL"
}
row_d <- match(rule[i,2],file_samples_subclones[,10])
rule[i,2] <- file_samples_subclones[row_d,12]
rule <- as.matrix(rule)
# #Adding or removing mothers
print(rule)
if(rule[i,3]=="Yes"){
pos <- which(rule[i,2]==word(possible_mothers[1,],2,3))
k <- 1
for(k in 1:length(pos)){
if(possible_mothers[2,as.numeric(pos[k])]!=rule[i,1]){ #The chosen mother is not the rule one.
if((rule[i,1]%in%possible_mothers[,as.numeric(pos[k])+1])==FALSE){ #The ruled mother is not even present here.
print("The ruled mother was not present in possible_mothers. Adding it.")
print(rule[i,])
#Maybe it is not even present in the sample?
# nr <- length(possible_mothers[possible_mothers[,pos[k]+1]!="0",pos[k]+1])
# print(nr)
# print(pos[k])
# possible_mothers[as.numeric(nr)+2,as.numeric(pos[k])+1] <- as.vector(rule[i,1])
#I add it as the chosen one and move the previously chosen one to the other column.
nr <- length(possible_mothers[possible_mothers[,as.numeric(pos[k])+1]!="0",as.numeric(pos[k])+1])
possible_mothers[as.numeric(nr)+2,as.numeric(pos[k])+1] <- possible_mothers[2,as.numeric(pos[k])]
possible_mothers[2,as.numeric(pos[k])] <- rule[i,1]
}else{
#It is present here but we want it to be the chosen one.
nr <- length(possible_mothers[possible_mothers[,as.numeric(pos[k])+1]!="0",as.numeric(pos[k])+1])
possible_mothers[as.numeric(nr)+2,as.numeric(pos[k])+1] <- possible_mothers[2,as.numeric(pos[k])]
possible_mothers[2,as.numeric(pos[k])] <- rule[i,1]
possible_mothers[1:(nrow(possible_mothers)-1),as.numeric(pos[k])+1] <- possible_mothers[possible_mothers[,as.numeric(pos[k])+1]!=rule[i,1],as.numeric(pos[k])+1] #Removing it from the second column.
}
}
possible_mothers[,as.numeric(pos[k])+1] <- "0" #I try to just remove all other mothers.
k <- k+1
}
}else{
#We will remove mothers.
print("Removing a mother")
pos <- which(rule[i,2]==word(possible_mothers[1,],2,3))
print(rule[i,])
print(pos)
k <- 1
print(possible_mothers[2,as.numeric(pos[k])])
for(k in 1:length(pos)){
if(possible_mothers[2,as.numeric(pos[k])]==rule[i,1]){ #The chosen mother is the rule one.
print("Rule one")
if(length(possible_mothers[possible_mothers[,as.numeric(pos[k])+1]!="0",as.numeric(pos[k])+1])!=0){ #There are other possible mothers.
possible_mothers[2,as.numeric(pos[k])] <- possible_mothers[2,as.numeric(pos[k])+1] #Adding this instead.
possible_mothers[1:(nrow(possible_mothers)-1),as.numeric(pos[k])+1] <- possible_mothers[possible_mothers[,as.numeric(pos[k])+1]!=possible_mothers[2,as.numeric(pos[k])+1],as.numeric(pos[k])+1] #Removing it from the second column.
}
}else if(rule[i,1]%in%possible_mothers[,as.numeric(pos[k])+1]){ #It is among other mothers.
print("Extra")
possible_mothers[1:(nrow(possible_mothers)-1),as.numeric(pos[k])+1] <- possible_mothers[possible_mothers[,as.numeric(pos[k])+1]!=rule[i,1],as.numeric(pos[k])+1] #Removing it from the second column.
}
k <- k+1
}
}
i <- i+1
}
}else{
rule <- matrix(0,1,3)
}
assign("rule_new",rule,envir=globalenv())
assign("rule_pos_moth",possible_mothers,envir=globalenv())
#If there are rules saying a certain subclone should be allocated at a specific place, we add it in the possible_mothers.
theonlymothers <- matrix(0,as.numeric(nrow(possible_mothers)),as.numeric(ncol(possible_mothers)))
if(as.numeric(length(unique(file_samples_subclones[,2]))) > 2){ #If we only have one biopsy we do not have to compare stuff.
i <- 2
x <- matrix(0,2,as.numeric(ncol(allocation_samples)))
x[1,] <- allocation_samples[1,]
tom <- 1 #We will start to save down the data in column 1.
Event_rule_removed <- 0
not_again <- 0
removed <- 0
stop_while <- 0
again <- 1
while(i <= as.numeric(nrow(allocation_samples))){ #Looping through the subclones.
#while(i <= 6){
print("Here is i - Subclones")
print(i)
print(allocation_samples[i,1])
only <- 0
x[2,] <- allocation_samples[i,1:as.numeric(ncol(allocation_samples))] #Extracting information about the motherclones in all samples.
print(x)
y <- as.data.frame(allocation_samples[i,2:as.numeric(ncol(allocation_samples))])
if(length(allocation_samples[i,allocation_samples[i,2:as.numeric(ncol(allocation_samples))]!="0"]) > 1 || x[2,1] %in% equalclones){ #If we only have this event in one sample we want to solely go on largest space.
y <- y[y[]!=0] #Removing the data points containing zeros. We now have all the motherclones.
mother_all_biopsies <- matrix(0,as.numeric(nrow(possible_mothers))+1,ncol(allocation_samples))
mother_all_biopsies[1,] <- allocation_samples[1,]
k <- 2
for(k in 2:ncol(x)){ #Looping through the daughter clones.
daughtersubclone <- paste(x[1,k],x[2,1]) #Finding the name of the daughter subclone.
daughterposition <- match(daughtersubclone,possible_mothers[1,]) #Finding the position for the daughter subclone in the possible_mothers matrix.
mother_all_biopsies[2,k] <- possible_mothers[2,as.numeric(daughterposition)]
mother_all_biopsies[3:nrow(mother_all_biopsies),k] <- possible_mothers[2:nrow(possible_mothers),(as.numeric(daughterposition)+1)] #This matrix will contain all mothers in all biopsies.
k <- k+1
}
distribution <- table(mother_all_biopsies[2:nrow(mother_all_biopsies),3:ncol(mother_all_biopsies)])
mother_not_all <- distribution[distribution!=length(y)]
mother_all <- distribution[distribution==length(y)] #This table illustrates the mothers that can be given in all biopsies.
# print("mother all!")
# print(mother_all)
# print(length(mother_all))
# print(distribution)
#print(daughtersubclone)
if(length(mother_all) > 1){
mother_all <- mother_all[mother_all>=length(y)]
}
#Testar att hitta den mest prevalenta modern.
#hej <- theonlymothers[1:3,word(theonlymothers[1,],2,3)=="Subclone_ 18"]
#hejsan <- table(hej[2,is.na(hej[1,])==FALSE])
#t <- table(hejsan)
#t
#which.max(t)
# mother_almost_all <- which.max(distribution)
# print("mother almost")
# print(mother_almost_all)
mother_almost_all <- distribution[distribution==(length(y)-1)]
mother_not_all <- mother_not_all[rownames(mother_not_all)!="0"] #This table illustrates all mothers that cannot be given in each biopsy.
j <- 2
mp <- 0
count_replace <- 1 #Used when we get a rule where we only have one mother in one sample and have to change earlier clones.
for(j in 2:ncol(x)){ #Looping through the motherclones.
if(x[2,j] != "0"){ #We do not want to analyze situations where we do not even have the daughter subclone in question.
daughtersubclone <- paste(x[1,j],x[2,1]) #Finding the name of the daughter subclone.
daughterposition <- match(daughtersubclone,possible_mothers[1,]) #Finding the position for the daughter subclones in the possible_mothers matrix.
justzeros <- table(possible_mothers[,(as.numeric(daughterposition)+1)]) #Extracting the column for the other possible mothers.
# print("Before")
# print(daughtersubclone)
# print(justzeros)
if((as.numeric(justzeros[1])/(as.numeric(nrow(possible_mothers)))) == 1){ #If it is one, then every position is a zero. This means that this is the only solution for this mother-daughter allocation in this sample.
daughterrowdata <- match(daughtersubclone,file_samples_subclones[,13]) #Finding the row of the subclone in the file. This will be used in order to obatin the clone size.
only <- 1
onlymother <- possible_mothers[2,as.numeric(daughterposition)]
print("Onlymother")
print(onlymother)
# print("Now we are changing things backwards!")
column_equal <- which(x[1,j]==equalclones[1,])
equalclones_biopsy <- equalclones[,column_equal]
#if(x[1,j] != "B1" && onlymother %in% equalclones == FALSE){ #Changed 210910. This does not matter if we only have one biopsy. why is it not allowed to be in equalclones?
if(x[1,j] != "B1" && onlymother %in% mother_all %in% equalclones == FALSE){ #This does not matter if we only have one biopsy. why is it not allowed to be in equalclones?
if(count_replace!=1 && tom > 1){
sub <- which(word(theonlymothers[1,1:(tom-1)],2,3)==word(theonlymothers[1,tom],2,3))
howmany <- length(sub)
if(length(sub)==0){howmany <- 0}
if(howmany!=0){
if(theonlymothers[3,tom-1] == "0"){
theonlymothers[3,(tom-as.numeric(howmany)):(tom-1)] <- theonlymothers[2,(tom-as.numeric(howmany)):(tom-1)] #Changed from j to count_replace. Otherwise we get problems when certain events are not present in all biopsies.
}
theonlymothers[2,(tom-as.numeric(howmany)):(tom-1)] <- onlymother
}
}
}
if(possible_mothers[2,as.numeric(daughterposition)] != "ALL"){ #Extracting the name of the mother it has to have.
mothername <- paste(word(possible_mothers[1,as.numeric(daughterposition)],1),possible_mothers[2,as.numeric(daughterposition)]) #The name of the only possible mother.
motherrowdata <- match(mothername,file_samples_subclones[,13]) #Finding its row in the file in order to obtain the clone size.
}else{mothername <- "ALL"
motherrowdata <- 1}
#print(file_samples_subclones[daughterrowdata,11])
#print(file_samples_subclones[motherrowdata,11])
if(mothername != "ALL" && as.numeric(file_samples_subclones[daughterrowdata,11]) + as.numeric(file_samples_subclones[motherrowdata,11]) != 200){ #If they were they they should be each other's motherclones.
theonlymothers[1,tom] <- possible_mothers[1,as.numeric(daughterposition)] #The clone which is to be allocated.
theonlymothers[2,tom] <- possible_mothers[2,as.numeric(daughterposition)] #The mother clone which it has to have.
tom <- tom + 1
norules <- 0
}else if(mothername == "ALL"){ #The only possible mother for this clone are the "ALL" events.
theonlymothers[1,tom] <- possible_mothers[1,as.numeric(daughterposition)] #The clone which is to be allocated.
theonlymothers[2,tom] <- possible_mothers[2,as.numeric(daughterposition)] #The mother clone which it has to have.
tom <- tom + 1
norules <- 0
}
}else{ #There are multiple solutions.
print("Multiple solutions")
if(is.na(match(x[2,1],hundredpercentclones)) == FALSE){ #The subclone is present in a hundredpercentclones.
columnhundred <- round(match(x[2,1],hundredpercentclones)/nrow(hundredpercentclones))+1 #The column. This column contains all of the alterations in the hundredpercentclone.
rowhundred <- match(x[2,1],hundredpercentclones)- columnhundred*nrow(hundredpercentclones)
if(is.na(match(x[2,j],hundredpercentclones[,columnhundred])) == FALSE){ #The mother exist in the same hundredpercentclone.
#Both the daughter and the mother exist in the same hundredpercentclones.
mother_most_common <- which.max(distribution[2:length(distribution)])
#print(distribution[2:length(distribution)])
#print(which.max(distribution[2:length(distribution)]))
mother_most_common <- names(distribution)[as.numeric(mother_most_common)+1]
#print("Most common")
#print(mother_most_common)
if(mother_most_common%in%hundredpercentclones[,columnhundred]==TRUE && mother_most_common %in% possible_mothers[,as.numeric(daughterposition)+1]==TRUE){
#The most common mother also exist in this 100% clone and it is also a possible mother to the clone.
#print("Här")
theonlymothers[1,tom] <- possible_mothers[1,as.numeric(daughterposition)] #The clone which is to be allocated.
theonlymothers[2,tom] <- mother_most_common
}else{
theonlymothers[1,tom] <- possible_mothers[1,as.numeric(daughterposition)] #The clone which is to be allocated.
theonlymothers[2,tom] <- possible_mothers[2,as.numeric(daughterposition)] #The mother clone which it has to have.
}
tom <- tom + 1
}else{
k <- 2
for(k in 2:nrow(possible_mothers)){ #Looping through the other solutions.
if(possible_mothers[k,(as.numeric(daughterposition)+1)]!= "0"){
if(is.na(match(possible_mothers[k,(as.numeric(daughterposition)+1)],hundredpercentclones[,columnhundred])) == FALSE){
theonlymothers[1,tom] <- possible_mothers[1,as.numeric(daughterposition)] #The clone which is to be allocated.
theonlymothers[2,tom] <- possible_mothers[k,as.numeric(daughterposition)+1] #The mother clone which it has to have.
tom <- tom + 1
}
}
k <- k+1
}
}
}else if(length(names(mother_all)) < 1){
print("There is no mother that is possible in all samples.")
if(only == 1){
#print("Onlymother")
theonlymothers[1,tom] <- possible_mothers[1,as.numeric(daughterposition)] #The clone which is to be allocated.
theonlymothers[2,tom] <- onlymother
tom <- tom+1
}else if(length(mother_almost_all) != 0){ #There is a possibility that this almost event is the true one.
p <- 1
for(p in 1:length(mother_almost_all)){ #Looping through the mothers that are possible in almost all samples.
columns <- which(theonlymothers[2,]==names(mother_almost_all)[p]) #Finding all places where this mother is present.
biopsy_nr <- x[1,j] #The biopsy we are looking at.
sample_columns <- which(word(theonlymothers[1,],1)== biopsy_nr) #Finding all positions belonging to this biopsy.
match_columns <- intersect(columns,sample_columns) #All the rules for events being placed in this mother.
#Information about the mother in this sample.
if(names(mother_almost_all)[p]=="ALL"){
mother_rule <- "ALL"
}else{
mother_rule <- paste(biopsy_nr,names(mother_almost_all)[p]) #Name
}
row_TC_mother_rule <- match(mother_rule,file_samples_subclones[,13]) #Position.
mother_rule_size <- file_samples_subclones[row_TC_mother_rule,11] #Size in the sample.
if(length(match_columns) >= 2){ #Changed to 2.
#print("There is a rule for this mother.")
our_new_daughter <- daughtersubclone
row_TC_our_new_daughter_rule <- match(our_new_daughter,file_samples_subclones[,13])
our_new_daughter_rule_size <- file_samples_subclones[row_TC_our_new_daughter_rule,11]
#Calculating if there is any room left.
#print("Calculating if there is any room left")
r <- 1
for(r in 1:length(match_columns)){
daughter_rule <- theonlymothers[1,as.numeric(match_columns[r])]
row_TC_daughter_rule <- match(daughter_rule,file_samples_subclones[,13])
daughter_rule_size <- file_samples_subclones[row_TC_daughter_rule,11]
if((as.numeric(daughter_rule_size)-as.numeric(our_new_daughter_rule_size)) > 0){ #If our new daughter is larger than the ones we're comparing with now, it is not interesting to subtract them since this new alteration will have f?retr?de.
mother_rule_size <- as.numeric(mother_rule_size)-as.numeric(daughter_rule_size)}
r <- r+1
}
if(is.na(as.numeric(mother_rule_size))==FALSE){
if(is.na(as.numeric(our_new_daughter_rule_size))==FALSE){
if((as.numeric(mother_rule_size)+0.1) < as.numeric(our_new_daughter_rule_size)){ #Added 0.1 because otherwise you might get rounding errors.
#print("There is no longer room.")
pos_rem1 <- match(names(mother_almost_all)[p],possible_mothers[,daughterposition])
if(is.na(pos_rem1) == FALSE){
possible_mothers[pos_rem1,daughterposition] <- "0"
}
if(daughterposition < ncol(possible_mothers)){
pos_rem2 <- match(names(mother_almost_all)[p],possible_mothers[,(daughterposition+1)])
if(is.na(pos_rem2) == FALSE){
possible_mothers[pos_rem2,(daughterposition+1)] <- "0"
}
}
removed <- 1
Event_rule_removed <- 1 #Indicating that an event has been removed.
}}}
}
p < p+1
}
theonlymothers[1,tom] <- possible_mothers[1,as.numeric(daughterposition)] #The clone which is to be allocated.
theonlymothers[2,tom] <- names(mother_almost_all)[1] #The mother clone which is possible in almost all samples.
tom <- tom + 1
}else{
theonlymothers[1,tom] <- possible_mothers[1,as.numeric(daughterposition)]
mother_possibly <- which.max(distribution[2:length(distribution)])
#print("possibly the mother")
#print(names(distribution)[mother_possibly])
theonlymothers[2,tom] <- names(distribution)[as.numeric(mother_possibly)+1] #Changed from just taking 2, to taking the most prevalent one.
tom <- tom+1 #Added this since we do not get the tom-count during the second round.
}
}else if(length(mother_all) == 1){
#Multiple solutions
#Adding a rule in order to make all the daughters originate after the same mother.
print("The only solution now.")
daughtersubclone <- paste(x[1,j],x[2,1]) #Finding the name of the daughter subclone.
daughterposition <- match(daughtersubclone,possible_mothers[1,]) #Finding the position for the daughter subclones in the possible_mothers matrix.
theonlymothers[2:nrow(theonlymothers),tom] <- "0"
theonlymothers[1,tom] <- possible_mothers[1,as.numeric(daughterposition)] #The clone which is to be allocated.
theonlymothers[2,tom] <- names(mother_all)[1] #The mother clone which it has to have.
if(length(mother_almost_all) != 0){
theonlymothers[3,tom] <- names(mother_almost_all)[1]
}
tom <- tom + 1
norules <- 0
}else if(length(mother_all) > 1){ #There are multiple solutions that are possible in all samples.
print("We have multiple possible allocations that are equally probable")
print(mother_all)
print(x)
daughtersubclone <- paste(x[1,j],x[2,1]) #Finding the name of the daughter subclone.
daughterposition <- match(daughtersubclone,possible_mothers[1,]) #Finding the position for the daughter subclones in the possible_mothers matrix.
theonlymothers[2:nrow(theonlymothers),tom] <- "0"
#Adding a rule algorithm.
# rule_d <- which(rule[,2]==x[2,1])
# r <- 1
# rule_applied <- 0
# for(r in 1:length(mother_all)){
#
# rule_m <- which(rule[,1]==names(mother_all)[r])
# rule_both <- intersect(rule_d,rule_m)
# if(length(rule_both)>0){
# if(rule[rule_d,3]=="No"){
# print("They are not allowed to be placed after one another.") #Tystade då jag har en kod högre upp som gör det.
# #mother_all <- mother_all[names(mother_all)!=names(mother_all)[r]] #Removing this mother entirely.
# }else{
# print("They should be placed after one another.")
# mother_all <- rule[rule_d,1]
# }
# rule_applied <- 1
# }
#
# r <- r+1
# }
#Algorithm for choosing between many mothers that are possible in all samples.
if(ncol(overview) > 3){ #If it is 3 we do only have one sample. No reason to look for patterns.
mother_all_mtrx <- as.matrix(names(mother_all)) #The mother names.
if(length(names(mother_all))==1){
mother_all_type <- matrix(0,2,ncol(overview))
}else{
mother_all_type <- matrix(0,(nrow(mother_all)+1),ncol(overview))}
n <- 1
for(n in 1:(nrow(mother_all_mtrx)+1)){
if(n == 1){
mother_all_mtrx_row<- match(possible_mothers[1,as.numeric(daughterposition)],file_samples_subclones[,13]) #Position.
mother_all_type[n,1] <- file_samples_subclones[mother_all_mtrx_row,12]
type <- word(file_samples_subclones[mother_all_mtrx_row,10],1:3)
mother_all_type[n,2] <- paste(type[1],type[2],type[3],sep=" ")
overview_row <- match(file_samples_subclones[mother_all_mtrx_row,10], overview[,1])
mother_all_type[n,3:ncol(mother_all_type)] <- overview[overview_row,3:ncol(overview)]
}else{
if(mother_all_mtrx[n-1,1] != "ALL"){
mother_all_type[n,1] <- mother_all_mtrx[n-1,1]
mother_all_mtrx_row <- match(mother_all_mtrx[n-1,1],word(file_samples_subclones[,13],2,3)) #Position.
type <- word(file_samples_subclones[mother_all_mtrx_row,10],1:3)
mother_all_type[n,2] <- paste(type[1],type[2],type[3],sep=" ")
overview_row <- match(file_samples_subclones[mother_all_mtrx_row,10], overview[,1])
mother_all_type[n,3:ncol(mother_all_type)] <- overview[overview_row,3:ncol(overview)]
}else{
mother_all_type[n,1] <- "ALL"
mother_all_type[n,2] <- "ALL"
mother_all_type[n,3:ncol(mother_all_type)] <- "100"
}
}
n <- n+1
}
m <- 4
for(m in 4:ncol(mother_all_type)){
if(is.null(nrow(mother_all_type))==FALSE){
sign_vector <- (as.numeric(mother_all_type[,(as.numeric(m)-1)]) - as.numeric(mother_all_type[,m]))
same_sign <- as.matrix(sign(sign_vector[1]) == sign(sign_vector))
mother_all_type <- mother_all_type[same_sign,]
}
m <- m+1
}
if(is.null(nrow(mother_all_type))==FALSE){
mother_all_type <- mother_all_type[!is.na(mother_all_type[,1]),]
if(is.null(nrow(mother_all_type))==FALSE){
if(nrow(mother_all_type) > 1){
#print("There are mothers that follow the same pattern.")
mother_preferred <- mother_all_type[2,1] #We prefer to choose a mother that shows a similar pattern.
mp <- 1
}}
}
#print("Final")
}
#Adding the events.
if(not_again != 1){
if(mp != 1){
theonlymothers[1,tom] <- possible_mothers[1,as.numeric(daughterposition)] #The clone which is to be allocated.
if(possible_mothers[2,as.numeric(daughterposition)]%in% names(mother_all)){ #If the largest space mother is in this, we will choose it.
theonlymothers[2,tom] <- possible_mothers[2,as.numeric(daughterposition)]
}else{
theonlymothers[2,tom] <- names(mother_all)[1] #The mother clone.
theonlymothers[3:(length(mother_all)+1),tom] <- names(mother_all)[2:length(mother_all)]}
}else{
theonlymothers[1,tom] <- possible_mothers[1,as.numeric(daughterposition)] #The clone which is to be allocated.
theonlymothers[2,tom] <- mother_preferred
pos_mp <- match(mother_preferred,names(mother_all))
mother_all[pos_mp] <- mother_all[1] #Replacing the first event with the preferred one.
theonlymothers[3:(length(mother_all)+1),tom] <- names(mother_all)[2:length(mother_all)]
}
}else{
if(mp != 1){
if(possible_mothers[2,as.numeric(daughterposition)]%in% names(mother_all)){ #If the largest space mother is in this, we will choose it.
theonlymothers[2,tom] <- possible_mothers[2,as.numeric(daughterposition)]
}else{
theonlymothers[1,tom] <- possible_mothers[1,as.numeric(daughterposition)] #The clone which is to be allocated.
theonlymothers[2,tom] <- names(mother_all)[1] #The mother clone which it has to have.
theonlymothers[3,tom] <- names(mother_all)[2]
if(length(mother_all) >= 3){
theonlymothers[4:(length(mother_all)+1),tom] <- names(mother_all)[3:length(mother_all)]}
}
}else{
theonlymothers[1,tom] <- possible_mothers[1,as.numeric(daughterposition)] #The clone which is to be allocated.
pos_mp <- match(mother_preferred,names(mother_all))
theonlymothers[2,tom] <- mother_preferred
mother_all[pos_mp] <- mother_all[1] #Replacing the first event with the preferred one.
theonlymothers[3:(length(mother_all)+1),tom] <- names(mother_all)[2:length(mother_all)]
}
}
#We have to see if any of these mothers are not possible any more since we've gotten some rules for the allocations.
if(not_again != 1){
p <- 1
for(p in 2:(length(mother_all)+1)){ #Looping through the mothers that are possible in all samples.
columns <- which(theonlymothers[2,]==theonlymothers[p,tom]) #Finding all places where this mother is present.
biopsy_nr <- x[1,j] #The biopsy we are looking at.
sample_columns <- which(word(theonlymothers[1,],1)== biopsy_nr) #Finding all positions belonging to this biopsy.
match_columns <- intersect(columns,sample_columns) #All the rules for events being placed in this mother.
#Information about the mother in this sample.
if(theonlymothers[p,tom]=="ALL"){
mother_rule <- "ALL"
}else{
mother_rule <- paste(biopsy_nr,theonlymothers[p,tom]) #Name
}
row_TC_mother_rule <- match(mother_rule,file_samples_subclones[,13]) #Position.
mother_rule_size <- file_samples_subclones[row_TC_mother_rule,11] #Size in the sample.
if(length(match_columns) > 1){
#print("There is a rule for this mother.")
our_new_daughter <- daughtersubclone
row_TC_our_new_daughter_rule <- match(our_new_daughter,file_samples_subclones[,13])
our_new_daughter_rule_size <- file_samples_subclones[row_TC_our_new_daughter_rule,11]
#print(our_new_daughter)
#print(row_TC_our_new_daughter_rule)
#print(our_new_daughter_rule_size)
#Calculating if there is any room left.
r <- 1
#print("Calculating if there is any room left")
for(r in 1:length(match_columns)){
daughter_rule <- theonlymothers[1,as.numeric(match_columns[r])]
row_TC_daughter_rule <- match(daughter_rule,file_samples_subclones[,13])
daughter_rule_size <- file_samples_subclones[row_TC_daughter_rule,11]
#print("Daughter rule")
#print(daughter_rule)
if((as.numeric(daughter_rule_size)-as.numeric(our_new_daughter_rule_size)) > 0){ #If our new daughter is larger than the ones we're comparing with now, it is not interesting to subtract them since this new alteration will have f?retr?de.
mother_rule_size <- as.numeric(mother_rule_size)-as.numeric(daughter_rule_size)}
r <- r+1
}
#print(mother_rule_size)
if(is.na(as.numeric(mother_rule_size))==FALSE){
if(is.na(as.numeric(our_new_daughter_rule_size))==FALSE){
if((as.numeric(mother_rule_size)+0.1) < as.numeric(our_new_daughter_rule_size)){ #Added 0.1 because otherwise you might get rounding errors.
#print("There is no longer room.")
#changed from theonlymothers[p,tom].
pos_rem1 <- match(theonlymothers[p,tom],possible_mothers[,daughterposition])
pos_rem2 <- match(theonlymothers[p,tom],possible_mothers[,(daughterposition+1)])
if(is.na(pos_rem1) == FALSE){
possible_mothers[pos_rem1,daughterposition] <- "0"
}
if(is.na(pos_rem2) == FALSE){
possible_mothers[pos_rem2,(daughterposition+1)] <- "0"
}
removed <- 1
Event_rule_removed <- 1 #Indicating that an event has been removed.
#mother_all <- mother_all[names(mother_all) != theonlymothers[p,tom]] #Removing the mother from the possible ones.
#} While loop for error searching.
}}}
}
p < p+1
}}
norules <- 0
tom <- tom + 1
}
}
count_replace <- count_replace+1 #Increasing this one which calculates how many columns we are into theonlymothers for this subclone. Needed when we will replace earlier chosen mothers since we later got a definitive rule.
}
j <- j+1
} #Looping through motherclones.
if(Event_rule_removed == 1 && not_again!=1){
# print("We will now redo the loop.")
Event_rule_removed <- 0
not_again <- 1
removed <- 0
tom <- (tom-(count_replace-1)) #We have to reset this. changed from allocation samples to count_replace since some events are not present in all samples.
}else{
not_again <- 0
i <- i+1
}
stop_while <- stop_while+1
if(stop_while > 2*ncol(theonlymothers)){
i <- (as.numeric(nrow(allocation_samples))+1)
break
}
if(i==as.numeric(nrow(allocation_samples))&&again==1){ #We will redo it all in order to minimize discrepancies.
#print("We will now redo it all.")
tom <- 1
again <- 0
i <- 2
}
}else{ #If the event is only present in one sample.
#print("One sample")
biopsy <- which(allocation_samples[i,2:as.numeric(ncol(allocation_samples))]!="0")
name <- paste(allocation_samples[1,biopsy+1],allocation_samples[i,1])
pos <- match(name,possible_mothers[1,])
#print(biopsy)
#print(name)
#print(pos)
if(possible_mothers[2,pos+1]=="0"){
theonlymothers[1,tom] <- possible_mothers[1,as.numeric(pos)] #The clone which is to be allocated.
theonlymothers[2,tom] <- possible_mothers[2,as.numeric(pos)] #The mother clone which it has to have.
}
tom <- tom + 1
i <- i+1}
}
}
assign("theonlymothers", theonlymothers, envir=globalenv()) #The equal clones.
assign("equalclones_before", equalclones, envir=globalenv()) #The equal clones.
assign("possible_mothers_new", possible_mothers, envir=globalenv()) #The equal clones.
###############################################
#Updating the mother-daughter-clone allocation#
###############################################
i <- 1
j <- 1
s <- 2
t <- 1
space <- matrix(0,50,2) #Spaces within a sample. Dynamic.
totalspace <- matrix(0,(as.numeric(nrow(space)+1)),((2*as.numeric(ncol(sample_clone_matrix))/3)+1)) #A matrix used for calculating the spaces available.
possible_mothers <- matrix(0,(as.numeric(nrow(space)+1)),((as.numeric(nrow(subclones))-1)*2)) #A matrix used for saving the possible motherclones.
Not_allocated_correctly <- matrix(0,ncol(theonlymothers),3)
nac <- 1
k <- 1
for(k in 1:(ncol(sample_clone_matrix)/3)){ #Constructing a matrix where every two columns represent a sample. The first one tells us which subclone harbors the space and the second the remaining space on top of this sample.
totalspace[1,(2*k-1)] <- sample_clone_matrix[1,(3*k-2)]
k <- k+1
}
k <- 1
for(k in 2:as.numeric(nrow(subclones))){ #Constructing a matrix were every two columns represent a subclone within a sample. The first one tells us which the chosen motherclone is and the other which other possible solutions there are.
possible_mothers[1,(2*k-3)] <- subclones[k,1]
k <- k+1
}
subcloneswithinsample <- matrix(0,(as.numeric(nrow(sample_clone_matrix))-1),2)
#SAMPLE LOOP
i <- 1
for(i in 1:(as.numeric(ncol(sample_clone_matrix))/3)){ #Looping through all of the samples.
#Change the loop number
#for(i in 1:45){
#for(i in 3:3){
#print("Here is i")
#print(i)
subcloneswithinsample <- sample_clone_matrix[2:as.numeric(nrow(sample_clone_matrix)),(3*i-2):(3*i-1)] #Extraxting the subclonal architecture and TC for a certain sample.
subcloneswithinsample_order <- subcloneswithinsample[order(as.numeric(subcloneswithinsample[,2]),decreasing = TRUE),] #Ordering the subclones from highest to lowest TC.
sameclones <- 0
current_sample <- sample_clone_matrix[1,(3*i-2)]
#print("Sample")
#print(current_sample)
#Tystade 210716. Vet inte riktigt vad den sista kolumnen är till för.
# or <- 1
# subcloneswithinsample_order <- cbind(subcloneswithinsample_order,matrix(0,nrow(subcloneswithinsample_order),1))
# for(or in 1:nrow(subcloneswithinsample_order)){
# col <- match(subcloneswithinsample_order[or,1],clone_matrix_names[,3]) #The third row contains the subclones.
# subcloneswithinsample_order[or,3] <- clone_matrix_names[3,as.numeric(col)] #The third row contains the subclones.
# or <- or+1
# }
#Arranging the subclones.
subcloneswithinsample_order_old <- matrix(0,(as.numeric(nrow(sample_clone_matrix))-1),2)
subcloneswithinsample_order_new <- subcloneswithinsample_order
ord <- 2
while(all(subcloneswithinsample_order_new[,1] == subcloneswithinsample_order_old[,1]) == FALSE){
subcloneswithinsample_order_old <- subcloneswithinsample_order_new
ord <- 2
for(ord in 2:(as.numeric(nrow(subcloneswithinsample_order_old))-1)){ #Writing a function/loop that orders the subclones of the same size according to their median size.
if(subcloneswithinsample_order_old[ord,2] != "0"){
if(subcloneswithinsample_order_old[ord,2] == subcloneswithinsample_order_old[ord+1,2]){
# orderpos1 <- match(word(subcloneswithinsample_order_old[ord,1],2),namevector)
# orderpos2 <- match(word(subcloneswithinsample_order_old[ord+1,1],2),namevector)
#
# if(as.numeric(orderpos2) < as.numeric(orderpos1)){
# subcloneswithinsample_order_new <- subcloneswithinsample_order_old[c(1:(ord-1),ord+1,ord,(ord+2):nrow(subcloneswithinsample_order_old)), ]
# }
orderpos1 <- match(subcloneswithinsample_order_old[ord,1],overview_subclones[,ncol(overview_subclones)])
orderpos2 <- match(subcloneswithinsample_order_old[ord+1,1],overview_subclones[,ncol(overview_subclones)])
diff <- as.numeric(overview_cluster[orderpos1,2:(ncol(overview_cluster)-1)])-as.numeric(overview_cluster[orderpos2,2:(ncol(overview_cluster)-1)])
larger <- length(which(diff>0)) #In how many positions is the first one larger than the second one?
smaller <- length(which(diff<0)) #In how many positions is the second one larger than the first one?
if(smaller > larger){
subcloneswithinsample_order_new <- subcloneswithinsample_order_old[c(1:(ord-1),ord+1,ord,(ord+2):nrow(subcloneswithinsample_order_old)), ]
}
}
}
ord <- ord+1
}
}
subcloneswithinsample_order <- subcloneswithinsample_order_new
#print("Sub")
#print(i)
#SUBCLONE LOOP
j <- 1
for(j in 2:as.numeric(nrow(sample_clone_matrix))){ #Looping through the subclones within the sample.
#print("Here is j")
#print(j)
tick <- 0
if(j == 2){ #We're in the first position. This is the ALL-event.
space[1,1] <- subcloneswithinsample_order[j-1,1] #The name.
space[1,2] <- subcloneswithinsample_order[j-1,2] #The TC.
}
if(j != 2){
if(subcloneswithinsample_order[j-1,1] != "0"){
if(subcloneswithinsample_order[j-1,1] != "ALL"){ #We should not add it again.
maxspace <- which.max(space[,2]) #Finding the largest available space.
newname <- subcloneswithinsample_order[j-1,1] #The name of the new subclone.
newspace <- subcloneswithinsample_order[j-1,2] #The space of the new subclone.
full_newname <- paste(sample_clone_matrix[1,(3*i-2)],newname)
#print("Precisely before conditioned.")
if(newspace != "100"){
if(newname %in% word(theonlymothers[1,],2,3) == TRUE){ #The clone in question has a condition on it.
######################
#CONDITIONED SUBCLONE#
######################
#print("Conditioned")
newnamecolumn <- match(paste(current_sample,newname),theonlymothers[1,]) #Finding the column in theonlymothers that the daughter has.
maxname <- theonlymothers[2,newnamecolumn]
#subpart <- theonlymothers[2,word(theonlymothers[1,],2,3)==newname]
#subpart <- subpart[is.na(subpart) == FALSE] #All the other mothers in other samples.
if(is.na(newnamecolumn)==TRUE){
newnamecolumn <- match(newname,word(theonlymothers[1,],2,3))
}
maxname <- theonlymothers[2,newnamecolumn] #This is the name of the mother that it has to have.
#print("The maxname and the newname")
#print(maxname)
#print(newname)
if(maxname %in% space[,1] == TRUE){ #Does the mother exist in the sample in question?
#print("The mother exist in the sample")
maxspace <- match(maxname,space[,1]) #This is the new maxspace's row position in the space matrix.
maxspaceTC <- space[maxspace,2] #Added this since maxspace in the row above only gives the row position and not the actual TC.
space[s,1] <- newname #Adding the new spacename and space size to the spacematrix.
space[s,2] <- newspace
#I try to take into account that we actually have multiple columns of equalclones for one single sample.
equalclones_multiples <- as.numeric(ncol(equalclones))/length(samples_unique)
e <- 0
for(e in 0:(equalclones_multiples-1)){
if(e == 0){
e_x <- NA #The column multiple in which it exist.
e_y <- NA
x <- NA
y <- NA
}
x_test <- match(maxname,equalclones[2:nrow(equalclones),i+(length(samples_unique)*e)]) #The row (minus 1) where the maxname exist.
y_test <- match(newname,equalclones[2:nrow(equalclones),i+(length(samples_unique)*e)]) #The row (minus 1) where the newname exist.
if(is.na(x_test) == FALSE){ #The maxname exist in an equalclones column for this sample.
x <- (x_test+1)
e_x <- (i+length(samples_unique)*e) #The column where the maxname exist.
}
if(is.na(y_test) == FALSE){ #The newname exist in an equalclones column for this sample.
y <- (y_test+1)
e_y <- (i+length(samples_unique)*e) #The column where the newname exist.
}
e <- e+1
}
#Added TC to the row below since we should compare TC:s. maxspace is just a row position.
#Added 0.1 here 200721 since rounding problems can occur when handling simulated data.
#print("Spaces")
#print(maxspaceTC)
#print(newspace)
if(as.numeric(maxspaceTC)+0.1 >= as.numeric(newspace)){ #There must be enough room left.
#print("There is room")
if(is.na(x) == FALSE){ #The maxname is in equalclones.
#print("Maxname is in equalclones")
if(is.na(y) == TRUE){ #The newname is not in equalclones.
#print("Newname is not")
true_maxsize <- file_samples_subclones[match(paste(current_sample,maxname),file_samples_subclones[,13]),11]
true_newsize <- file_samples_subclones[match(paste(current_sample,newname),file_samples_subclones[,13]),11]
#print(maxname)
#print(true_maxsize)
#print(newname)
#print(true_newsize)
if(true_maxsize!=true_newsize){ #Added 200920 since I got weird equalclones.
#if(as.numeric(newspace) != space[maxspace,2]){ #They are not of the same size.
#print("They are not of the same size.")
eq <- 2
for(eq in 2:nrow(equalclones)){ #I have to reduce the space for all of the events belonging to this subclone.
if(equalclones[eq,e_x] != "0"){ #I changed the i to i+e_x
eq_row <- match(equalclones[eq,e_x],space[,1])
if(is.na(eq_row) == FALSE){
space[eq_row,2] <- (as.numeric(space[eq_row,2])-as.numeric(newspace))
}
}
eq <- eq+1
}
}else{ #This event should belong to the equalclones-subclone since they are of the same size.
#print("The new event should belong to equalclones for maxclone")
# if(newname %in% equalclones[2:nrow(equalclones),i] == FALSE){ #If it does not already belong to the equalclones of this sample.
eq <- 2
for(eq in 2:nrow(equalclones)){
if(equalclones[eq,e_x] == "0"){
if(newname %in% equalclones[2:nrow(equalclones),e_x] == FALSE){
equalclones[eq,e_x] <- newname}
eq <- nrow(equalclones)
}
eq <- eq+1
}
}
}else if(as.numeric(newspace) == as.numeric(space[maxspace,2])){ #Newname does belong to equalclones as well as maxname.
#print("Newname is in equalclones as well")
#Is it in the same equalclones?
if(e_x == e_y){
# if(newname %in% equalclones[2:nrow(equalclones),i] == FALSE){ #If it does not already belong to the equalclones of this sample.
eq <- 2
for(eq in 2:nrow(equalclones)){
if(equalclones[eq,e_x] == "0"){
if(newname %in% equalclones[2:nrow(equalclones),e_x] == FALSE){
equalclones[eq,e_x] <- newname}
eq <- nrow(equalclones)
}
eq <- eq+1
}
}
}else{ #Maxclone and new name is in equaclones. They are not of the same size.
#space[maxspace,2] <- (as.numeric(space[maxspace,2])-as.numeric(newspace)) #Replacing the old maxspace.
#Added 200202.
#print("Newname is in equalclones as well")
#print("They are not of the same size")
eq <- 2
for(eq in 2:nrow(equalclones)){ #I have to reduce the space for all of the events belonging to this subclone.
if(equalclones[eq,e_x] != "0"){ #I changed the i to i+e_x
eq_row <- match(equalclones[eq,e_x],space[,1])
if(is.na(eq_row) == FALSE){
space[eq_row,2] <- (as.numeric(space[eq_row,2])-as.numeric(newspace))
}
}
eq <- eq+1
}
}
}else{ #Maxname is not in equalclones.
#print("Maxname is not in equalclones")
thename <- space[maxspace,1]
therow <- match(thename,subcloneswithinsample_order_new[,1])
maxTC <- subcloneswithinsample_order_new[therow,2]
if(as.numeric(maxTC) == as.numeric(newspace)){ #The maxspace and the newclone are of equal size.
#print("Maxname and newname is of equal size.")
if(is.na(y) == FALSE){
#print("Newname is in equalclones")
eq <- 2
for(eq in 2:nrow(equalclones)){
if(equalclones[eq,e_y] == "0"){
if(maxname %in% equalclones[2:nrow(equalclones),e_y] == FALSE){ #Maxname is not in this equalclone column.
equalclones[eq,e_y] <- maxname} #Adding the maxname to the equalclones.
break
}
eq <- eq+1
}
}else{ #The newname does not exist in the equalclones for this sample.
#print("Newname is not in equalclones")
if(equalclones[2,i] == "0"){ #Adding them to the equalclones matrix.
equalclones[2,i] <- newname
equalclones[3,i] <- thename
}else if (equalclones[2,(i+as.numeric(ncol(hundredpercentclones)))] == "0"){
#print("Adding them")
equalclones[2,(i+as.numeric(ncol(hundredpercentclones)))] <- newname
equalclones[3,(i+as.numeric(ncol(hundredpercentclones)))] <- thename
}else if(equalclones[2,(i+2*as.numeric(ncol(hundredpercentclones)))] == "0"){
#print("Adding them")
equalclones[2,(i+2*as.numeric(ncol(hundredpercentclones)))] <- newname
equalclones[3,(i+2*as.numeric(ncol(hundredpercentclones)))] <- thename
}
}
}
#print(space)
if(as.numeric(space[maxspace,2]) >= as.numeric(newspace)){ #Added = 210412.
space[maxspace,2] <- (as.numeric(space[maxspace,2])-as.numeric(newspace)) #Replacing the old maxspace.
}
#print(space)
}
}else{ #This is the case where the conditioned subclone does not have room for the new event.
# print("We could not allocate it to the conditioned place.")
# print(current_sample)
# print(space[space[,1]!="0",])
# print(maxname)
# print(maxspaceTC)
# print(newname)
# print(newspace)
Not_allocated_correctly[nac,1] <- current_sample
Not_allocated_correctly[nac,2] <- newname
Not_allocated_correctly[nac,3] <- newspace
nac <- nac+1
if(theonlymothers[3,newnamecolumn]!= "0" && theonlymothers[3,newnamecolumn] %in% space[,1]){ #New 200720.
#This a second conditioned clone.
#print("Second conditioned")
maxspace <- match(theonlymothers[3,newnamecolumn],space[,1])
maxname <- space[maxspace,1]
maxspaceTC <- space[maxspace,2]
#Actually we do not want the other subclones to be allocated to the event since it is not possible in all samples any more.
}else{
maxspace <- which.max(space[,2]) #Finding the largest available space.
maxname <- space[maxspace,1]
maxspaceTC <- space[maxspace,2]}
if(maxspaceTC == newspace && maxname!="ALL"){ # They are of equal size.
#I try to take into account that we actually have multiple columns of equalclones for one single sample.
equalclones_multiples <- as.numeric(ncol(equalclones))/length(samples_unique)
e <- 0
for(e in 0:(equalclones_multiples-1)){
if(e == 0){
e_x <- NA #The column multiple in which it exist.
e_y <- NA
x <- NA
y <- NA
}
x_test <- match(maxname,equalclones[2:nrow(equalclones),i+(length(samples_unique)*e)]) #The row (minus 1) where the maxname exist.
y_test <- match(newname,equalclones[2:nrow(equalclones),i+(length(samples_unique)*e)]) #The row (minus 1) where the newname exist.
if(is.na(x_test) == FALSE){ #The maxname exist in an equalclones column for this sample.
x <- (x_test+1)
e_x <- (i+length(samples_unique)*e) #The column where the maxname exist.
}
if(is.na(y_test) == FALSE){ #The newname exist in an equalclones column for this sample.
y <- (y_test+1)
e_y <- (i+length(samples_unique)*e) #The column where the newname exist.
}
e <- e+1
}
if(is.na(x) ==TRUE){
#print("The maxname is not in equalclones.")
if(is.na(y) == TRUE){
#print("The newname is not in equalclones either.")
if(equalclones[2,i] == "0"){ #Adding them to the equalclones matrix.
equalclones[2,i] <- newname
equalclones[3,i] <- maxname #Changed "thename" to "maxname" 200720.
}else if (equalclones[2,(i+as.numeric(ncol(hundredpercentclones)))] == "0"){
#print("Adding them")
equalclones[2,(i+as.numeric(ncol(hundredpercentclones)))] <- newname
equalclones[3,(i+as.numeric(ncol(hundredpercentclones)))] <- maxname
}else if(equalclones[2,(i+2*as.numeric(ncol(hundredpercentclones)))] == "0"){
#print("Adding them")
equalclones[2,(i+2*as.numeric(ncol(hundredpercentclones)))] <- newname
equalclones[3,(i+2*as.numeric(ncol(hundredpercentclones)))] <- maxname
}
}else{
#print("Newname is in equalclones.")
#print(newname)
#print(maxname)
eq <- 2
for(eq in 2:nrow(equalclones)){
if(equalclones[eq,e_y] == "0"){
equalclones[eq,e_y] <- maxname #Adding the maxname to the equalclones.
break
}
eq <- eq+1
}
}
}else{
#print("Maxname is in equalclones.")
if(is.na(y) == TRUE){
#print("Newname is not in equalclones.")
eq <- 2
for(eq in 2:nrow(equalclones)){
if(equalclones[eq,e_x] == "0"){
equalclones[eq,e_x] <- newname} #Adding the newname to the equalclones.
break
eq <- eq+1
}
}else{
#print("Newname is in equalclones as well.")
if(e_x != e_y){
#print("They are not in the same equalclones.")
eq <- 2
for(eq in 2:nrow(equalclones)){
if(equalclones[eq,e_x] == "0"){
if(newname %in% equalclones[2:nrow(equalclones),e_x] == FALSE){ #Maxname is not in this equalclone column.
equalclones[eq,e_x] <- newname} #Adding the maxname to the equalclones.
break
}
eq <- eq+1
}
equalclones[y,e_y] <- "0" #Removing the newname from its old position.
}
}
}
}else{
#Not of equal size.
space[maxspace,2] <- (as.numeric(space[maxspace,2])-as.numeric(newspace)) #Uncommented 210912.
eq <- 2
for(eq in 2:nrow(equalclones)){ #I have to reduce the space for all of the events belonging to this subclone.
if(equalclones[eq,i] != "0"){
eq_row <- match(equalclones[eq,i],space[,1])
space[eq_row,2] <- (as.numeric(space[eq_row,2])-as.numeric(newspace))
}
eq <- eq+1
}
}
}
}else{
#print("It is not allocated yet.")
if(maxname %in% sample_clone_matrix[,(3*i-2)] == TRUE){ #The clone exist in the sample but has not been allocated yet. This only happens if they are equal in size.
#print("The mother has not been allocated yet but it exist in the biopsy.")
###################################################################
#The mother has not been allocated yet but it exist in the biopsy.#
###################################################################
rowofthemother <- match(maxname,subcloneswithinsample_order[,1]) #The row in which the mother exist in the subcloneswithinsample matrix.
themothername <- maxname
thedaughtername <- newname
if(themothername %in% word(theonlymothers[1,],2,3) == TRUE){ #The mother is conditioned.
#print("The mother is conditioned")
#print(subcloneswithinsample_order)
mothernamecolumn <- match(paste(current_sample,themothername),theonlymothers[1,]) #Finding the column in theonlymothers that the daughter has.
themothermothername <- theonlymothers[2,mothernamecolumn]
rowofthemothermother <- match(themothermothername,subcloneswithinsample_order[,1])
if(is.na(rowofthemothermother)==TRUE){
#print("The mothermother has not been allocated yet.")
#Added this since we do not get a tree otherwise.
maxspace <- which.max(space[,2])
rowofthemothermother <- match(space[maxspace,1],subcloneswithinsample_order[,1])
themothermothername <- space[maxspace,1]
}
}else{
#print("The mother is not conditioned.")
rowofthemothermother <- match(space[maxspace,1],subcloneswithinsample_order[,1])
themothermothername <- space[maxspace,1]
}
tick <- 1 #Just so that we know that we've been in this loop and that space[maxspace,1] outside the loop will be the mother to the mother.
#It may happen that the mother it should have here is smaller than the daughter. Switch positions.
if(as.numeric(subcloneswithinsample_order[rowofthemother,2]) < as.numeric(newspace)){
#print("The mother is smaller than the daughter.")
newspace <- subcloneswithinsample_order[as.numeric(rowofthemother),2] #Finding the new newspace.
rowofthemother <- match(newname,subcloneswithinsample_order[,1]) #The new row of the mother.
temp1 <- thedaughtername
temp2 <- themothername
themothername <- temp1 #Changing the name.
newname <- temp2
}
#Mother
#space[maxspace,2] <- (as.numeric(space[maxspace,2])-as.numeric(subcloneswithinsample_order[rowofthemother,2])) #The mother is allocated to its mother.
space[s,1] <- themothername #Adding the new spacename and space size to the spacematrix belonging to the mother.
space[s,2] <- subcloneswithinsample_order[rowofthemother,2]
maxspaceTC <- subcloneswithinsample_order[rowofthemother,2]
s <- s+1
if(as.numeric(subcloneswithinsample_order[as.numeric(rowofthemothermother),2]) > as.numeric(subcloneswithinsample_order[as.numeric(rowofthemother),2])){
#print("The mothermother is larger than the mother.")
space[as.numeric(maxspace),2] <- (as.numeric(subcloneswithinsample_order[as.numeric(rowofthemothermother),2])-as.numeric(subcloneswithinsample_order[as.numeric(rowofthemother),2])) #The mother is allocated to its mother.
allocation_samples[match(maxname,allocation_samples[,1]),(i+1)] <- subcloneswithinsample_order[as.numeric(rowofthemothermother),1] #We have to add information about the mother's mother to the allocation matrix.
}else{
#print("The mother's mother and the mother are of equal size and should be equalclones together")
allocation_samples[match(maxname,allocation_samples[,1]),(i+1)] <- subcloneswithinsample_order[rowofthemothermother,1] #We have to add information about the mother's mother to the allocation matrix.
#I try to take into account that we actually have multiple columns of equalclones for one single sample.
equalclones_multiples <- as.numeric(ncol(equalclones))/length(samples_unique)
e <- 0
for(e in 0:(equalclones_multiples-1)){
if(e == 0){
e_x <- NA #The column multiple in which it exist.
e_y <- NA
x <- NA
y <- NA
}
x_test <- match(themothermothername,equalclones[2:nrow(equalclones),i+(length(samples_unique)*e)]) #The row (minus 1) where the maxname exist.
y_test <- match(maxname,equalclones[2:nrow(equalclones),i+(length(samples_unique)*e)]) #The row (minus 1) where the newname exist.
if(is.na(x_test) == FALSE){ #The mothermother exist in an equalclones column for this sample.
x <- (x_test+1)
e_x <- (i+length(samples_unique)*e) #The column where the maxname exist.
}
if(is.na(y_test) == FALSE){ #The mother exist in an equalclones column for this sample.
y <- (y_test+1)
e_y <- (i+length(samples_unique)*e) #The column where the newname exist.
}
e <- e+1
}
if(is.na(x) == FALSE){
#print("The mothermother is in equalclones")
eq <- 2
for(eq in 2:nrow(equalclones)){
if(equalclones[eq,e_x] == "0"){
if(themothername %in% equalclones[2:nrow(equalclones),e_x] == FALSE){ #Maxname is not in this equalclone column.
equalclones[eq,e_x] <- themothername} #Adding the mothername to the equalclones.
break
#eq <- nrow(equalclones)
}
eq <- eq+1
}
}else{ #The mothermother does not exist in the equalclones for this sample.
#print("The mothermother is not in equalclones yet. We now add it.")
if(equalclones[2,i] == "0"){ #Adding them to the equalclones matris.
equalclones[2,i] <- themothermothername
equalclones[3,i] <- themothername
}else if (equalclones[2,(i+as.numeric(ncol(hundredpercentclones)))] == "0"){
#print("Adding them")
equalclones[2,(i+as.numeric(ncol(hundredpercentclones)))] <- themothermothername
equalclones[3,(i+as.numeric(ncol(hundredpercentclones)))] <- themothername
}else if(equalclones[2,(i+2*as.numeric(ncol(hundredpercentclones)))] == "0"){
#print("Adding them")
equalclones[2,(i+2*as.numeric(ncol(hundredpercentclones)))] <- themothermothername
equalclones[3,(i+2*as.numeric(ncol(hundredpercentclones)))] <- themothername
}
}
}
#Daughter
#I try to take into account that we actually have multiple columns of equalclones for one single sample.
equalclones_multiples <- as.numeric(ncol(equalclones))/length(samples_unique)
e <- 0
for(e in 0:(equalclones_multiples-1)){
if(e == 0){
e_x <- NA #The column multiple in which it exist.
e_y <- NA
x <- NA
y <- NA
}
x_test <- match(themothermothername,equalclones[2:nrow(equalclones),i+(length(samples_unique)*e)]) #The row (minus 1) where the maxname exist.
y_test <- match(maxname,equalclones[2:nrow(equalclones),i+(length(samples_unique)*e)]) #The row (minus 1) where the newname exist.
if(is.na(x_test) == FALSE){ #The mothermother exist in an equalclones column for this sample.
x <- (x_test+1)
e_x <- (i+length(samples_unique)*e) #The column where the maxname exist.
}
if(is.na(y_test) == FALSE){ #The mother exist in an equalclones column for this sample.
y <- (y_test+1)
e_y <- (i+length(samples_unique)*e) #The column where the newname exist.
}
e <- e+1
}
if(as.numeric(subcloneswithinsample_order[rowofthemother,2]) != as.numeric(newspace)){
#print("They are not of equal size.")
if(is.na(x) == TRUE){
#print("The mother is not in equalclones")
space[s-1,2] <- (as.numeric(space[s-1,2])-as.numeric(newspace)) #Replacing the old maxspace.
}else{
#print("The mother is in equalclones")
eq <- 2
for(eq in 2:nrow(equalclones)){ #I have to reduce the space for all of the events belonging to this subclone.
if(equalclones[eq,e_x] != "0"){
eq_row <- match(equalclones[eq,e_x],space[,1])
space[eq_row,2] <- (as.numeric(space[eq_row,2])-as.numeric(newspace))
}
eq <- eq+1
}
}
space[s,1] <- newname #Adding the new spacename and space size to the spacematrix belonging to the mother.
space[s,2] <- newspace
}else{
#print("The mother and the newclone are of equal size")
#The newclone and maxname should be in equalclones.
#I try to take into account that we actually have multiple columns of equalclones for one single sample.
equalclones_multiples <- as.numeric(ncol(equalclones))/length(samples_unique)
e <- 0
for(e in 0:(equalclones_multiples-1)){
if(e == 0){
e_x <- NA #The column multiple in which it exist.
e_y <- NA
x <- NA
y <- NA
}
x_test <- match(maxname,equalclones[2:nrow(equalclones),i+(length(samples_unique)*e)]) #The row (minus 1) where the maxname exist.
y_test <- match(newname,equalclones[2:nrow(equalclones),i+(length(samples_unique)*e)]) #The row (minus 1) where the newname exist.
if(is.na(x_test) == FALSE){ #The maxname exist in an equalclones column for this sample.
x <- (x_test+1)
e_x <- (i+length(samples_unique)*e) #The column where the maxname exist.
}
if(is.na(y_test) == FALSE){ #The newname exist in an equalclones column for this sample.
y <- (y_test+1)
e_y <- (i+length(samples_unique)*e) #The column where the newname exist.
}
e <- e+1
}
if(is.na(y) == FALSE){
#print("Newname is in equalclones")
eq <- 2
for(eq in 2:nrow(equalclones)){
if(equalclones[eq,e_y] == "0"){
if(maxname %in% equalclones[2:nrow(equalclones),e_y] == FALSE){ #Maxname is not in this equalclone column.
equalclones[eq,e_y] <- maxname} #Adding the maxname to the equalclones.
eq <- nrow(equalclones)
}
eq <- eq+1
}
}else{ #The newname does not exist in the equalclones for this sample.
#print("The newname does not exist in the equalclones for this sample.")
thename <- maxname #200516 ???? F?rs?kte fixa till en sak.
if(equalclones[2,i] == "0"){ #Adding them to the equalclones matris.
#print("Adding them")
equalclones[2,i] <- newname
equalclones[3,i] <- thename
}else if (equalclones[2,(i+as.numeric(ncol(hundredpercentclones)))] == "0"){
#print("Adding them")
equalclones[2,(i+as.numeric(ncol(hundredpercentclones)))] <- newname
equalclones[3,(i+as.numeric(ncol(hundredpercentclones)))] <- thename
}else if(equalclones[2,(i+2*as.numeric(ncol(hundredpercentclones)))] == "0"){
#print("Adding them")
equalclones[2,(i+2*as.numeric(ncol(hundredpercentclones)))] <- newname
equalclones[3,(i+2*as.numeric(ncol(hundredpercentclones)))] <- thename
}
}
space[s,1] <- newname #Adding the new spacename and space size to the spacematrix belonging to the mother.
space[s,2] <- newspace
}
#Removing the mother from the matrix so that it is not added again.
rowtoremove <- match(maxname,subcloneswithinsample_order[,1])
#print("Here a row is removed")
#subcloneswithinsample_order[rowtoremove,] <- "0" #210203. Testar att tysta 210514.Avtystar 210716. Tystar igen 210729.
sameclones <- 1 #This is just a way to illustrate the fact that we have done this.
}else{ #Conditioned clone but the mother it has to have in another sample does not exist in this sample at all.
maxspace <- which.max(space[,2]) #Finding the largest available space.
maxspaceTC <- as.numeric(space[maxspace,2])
maxname <- space[maxspace,1]
space[s,1] <- newname #Adding the new spacename and space size to the spacematrix.
space[s,2] <- as.numeric(newspace)
# print("The conditioned mother is not present in the sample.")
#Looking if there is other possible places for this event to be placed
#which better corresponds to earlier samples.
other <- 1
nej <- 0
for(other in 1:j){ #j is the latest event to be placed.
if(as.numeric(space[other,2]) >= as.numeric(newspace)){
if(space[other,1] %in% allocation_samples[match(space[s,1],allocation_samples[,1]),2:i]){
maxspace <- other
maxname <- space[maxspace,1]
maxspaceTC <- space[maxspace,2]
nej <- 1
break
}
}
if(nej == 0){
maxname <- space[maxspace,1]
maxspaceTC <- space[maxspace,2]
}
other <- other+1
}
#I try to take into account that we actually have multiple columns of equalclones for one single sample.
equalclones_multiples <- as.numeric(ncol(equalclones))/length(samples_unique)
e <- 0
for(e in 0:(equalclones_multiples-1)){
if(e == 0){
e_x <- NA #The column multiple in which it exist.
e_y <- NA
x <- NA
y <- NA
}
x_test <- match(maxname,equalclones[2:nrow(equalclones),i+(length(samples_unique)*e)]) #The row (minus 1) where the maxname exist.
y_test <- match(newname,equalclones[2:nrow(equalclones),i+(length(samples_unique)*e)]) #The row (minus 1) where the newname exist.
if(is.na(x_test) == FALSE){ #The maxname exist in an equalclones column for this sample.
x <- (x_test+1)
e_x <- (i+length(samples_unique)*e) #The column where the maxname exist.
}
if(is.na(y_test) == FALSE){ #The newname exist in an equalclones column for this sample.
y <- (y_test+1)
e_y <- (i+length(samples_unique)*e) #The column where the newname exist.
}
e <- e+1
}
# x <- match(space[maxspace,1],equalclones[2:nrow(equalclones),i])
# y <- match(newname,equalclones[2:nrow(equalclones),i])
if(maxname != "ALL"){
if(as.numeric(maxspaceTC) == as.numeric(newspace)){
if(is.na(x) == FALSE){ #The maxname is in equalclones.
#print("The maxname is in equalclones")
if(is.na(y) == TRUE){ #The newname is not.
#print("The newname is not in equalclones")
eq <- 2
for(eq in 2:nrow(equalclones)){
if(equalclones[eq,e_x] == "0"){
equalclones[eq,e_x] <- newname #Adding the newname to the equalclones for maxname.
break
}
eq <- eq+1
}
}else{
#print("The newname is in equalclones as well")
if(as.numeric(newspace) != space[maxspace,2]){
eq <- 2
for(eq in 2:nrow(equalclones)){ #I have to reduce the space for all of the events belonging to this subclone.
if(equalclones[eq,e_y] != "0"){ #Changed i to e_y.
eq_row <- match(equalclones[eq,e_y],space[,1])
space[eq_row,2] <- (as.numeric(space[eq_row,2])-as.numeric(newspace))
}
eq <- eq+1
}
}
}
}
}else{
if(is.na(x) == FALSE){
#print("The maxname is in equalclones")
eq <- 2
for(eq in 2:nrow(equalclones)){ #I have to reduce the space for all of the events belonging to this subclone.
if(equalclones[eq,e_x] != "0"){ #Changed i to e_x
eq_row <- match(equalclones[eq,e_x],space[,1])
space[eq_row,2] <- (as.numeric(space[eq_row,2])-as.numeric(newspace))
}
eq <- eq+1
}
}else{
space[maxspace,2] <- (as.numeric(space[maxspace,2])-as.numeric(newspace)) #Replacing the old maxspace.
}
}
}else{
#print("The maxname is ALL")
space[maxspace,2] <- (as.numeric(space[maxspace,2])-as.numeric(newspace))
}
}
}
############################
#NOT A CONDITIONED SUBCLONE#
############################
}else{
#print("Not conditioned")
space[s,1] <- newname #Adding the new spacename and space size to the spacematrix.
space[s,2] <- newspace
#Seeing if there is other possible places for this event to be placed
#which better corresponds to earlier samples.
other <- 1
nej <- 0
for(other in 1:j){ #j is the latest event to be placed.
if(as.numeric(space[other,2]) >= as.numeric(newspace)){
if(space[other,1] %in% allocation_samples[match(space[s,1],allocation_samples[,1]),2:i]){
maxspace <- other
maxname <- space[maxspace,1]
maxspaceTC <- space[maxspace,2]
nej <- 1
break
}
}
if(nej == 0){
maxname <- space[maxspace,1]
maxspaceTC <- space[maxspace,2]
}
other <- other+1
}
#I try to take into account that we actually have multiple columns of equalclones for one single sample.
equalclones_multiples <- as.numeric(ncol(equalclones))/length(samples_unique)
e <- 0
for(e in 0:(equalclones_multiples-1)){
if(e == 0){
e_x <- NA #The column multiple in which it exist.
e_y <- NA
x <- NA
y <- NA
}
x_test <- match(maxname,equalclones[2:nrow(equalclones),i+(length(samples_unique)*e)])
y_test <- match(newname,equalclones[2:nrow(equalclones),i+(length(samples_unique)*e)])
if(is.na(x_test) == FALSE){ #The maxname exist in an equalclones column for this sample.
x <- (x_test+1) #Row
e_x <- (i+length(samples_unique)*e) #Column
}
if(is.na(y_test) == FALSE){ #The newname exist in an equalclones column for this sample.
y <- (y_test+1)
e_y <- (i+length(samples_unique)*e)
}
e <- e+1
}
#x <- match(maxname,equalclones[2:nrow(equalclones),i])
#y <- match(newname,equalclones[2:nrow(equalclones),i])
#print(maxname)
#print(newname)
if(is.na(x) == FALSE){ #The maxname is in equalclones.
#print("Maxname is in equalclones")
if(is.na(y) == TRUE){ #The newname is not.
#print("Newname is not in equalclones")
true_maxsize <- file_samples_subclones[match(paste(current_sample,maxname),file_samples_subclones[,13]),11]
true_newsize <- file_samples_subclones[match(paste(current_sample,newname),file_samples_subclones[,13]),11]
#print(true_maxsize)
#print(true_newsize)
if(true_maxsize==true_newsize){ #Added 210707.
#print("They are of equal size")
eq <- 2
for(eq in 2:nrow(equalclones)){
if(equalclones[eq,e_x] == "0"){
if(newname %in% equalclones[2:nrow(equalclones),e_x] == FALSE){
equalclones[eq,e_x] <- newname}
eq <- nrow(equalclones)
}
eq <- eq+1
}
}else{
#print("They are not equal in size.")
#Removing the TC from all of the events in the equalclones.
eq <- 2
breaking <- 1
for(eq in 2:nrow(equalclones)){
if(equalclones[eq,e_x] != "0"){
eq_row <- match(equalclones[eq,e_x],space[,1])
space[eq_row,2] <- (as.numeric(space[eq_row,2])-as.numeric(newspace))
}
eq <- eq+1
}}
}else{
#print("Newname is in equalclones as well")
#if(as.numeric(newspace) == as.numeric(space[maxspace,2])){ #Here we just compare the newspace with the space the maxspace has right now. They might not be equal.
#We cannot go around adding events to equalclones just based on this above. Changed it to the line below instead 200308.
#if(as.numeric(newspace) == as.numeric(subcloneswithinsample_order[maxspace,2])){
if(as.numeric(newspace) == as.numeric(subcloneswithinsample_order[maxspace,2])){
#print("They are are of equal size")
if(newname %in% equalclones[2:nrow(equalclones),e_x] == FALSE){ #If it does not already belong to the equalclones of this sample.
eq <- 2
for(eq in 2:nrow(equalclones)){
if(equalclones[eq,e_x] == "0"){
if(newname %in% equalclones[2:nrow(equalclones),e_x] == FALSE){
equalclones[eq,e_x] <- newname}
eq <- nrow(equalclones)
}
eq <- eq+1
}
}
}else{
#space[maxspace,2] <- (as.numeric(space[maxspace,2])-as.numeric(newspace)) #Replacing the old maxspace.
equalclones[y,e_y] <- "0" #Removing the newname from equalclones.
eq <- 2
for(eq in 2:nrow(equalclones)){ #I have to reduce the space for all of the events belonging to this subclone.
if(equalclones[eq,e_x] != "0"){
eq_row <- match(equalclones[eq,e_x],space[,1])
space[eq_row,2] <- (as.numeric(space[eq_row,2])-as.numeric(newspace))
}
eq <- eq+1
}
}
}
}else{
#print("Maxname is not in equalclones")
if(maxname != "ALL"){
maxnamerow <- match(maxname,subcloneswithinsample_order[,1])
maxspaceTC <- subcloneswithinsample_order[maxnamerow,2]
# print(subcloneswithinsample_order)
# print(maxname)
# print(maxspaceTC)
# print(newname)
# print(newspace)
if(as.numeric(newspace) == as.numeric(maxspaceTC)){
#print("Newclone and maxclone are of the same size.")
if(is.na(y) == FALSE){
#print("Newclone is in equalclones.")
equalclones[y,e_y] <- "0" #Removing it from its old place and placing it on a new one.
if(equalclones[2,i] == "0"){ #Adding them to the equalclones matris.
equalclones[2,i] <- newname
equalclones[3,i] <- maxname
}else if (equalclones[2,(i+as.numeric(ncol(hundredpercentclones)))] == "0"){
#print("Adding them")
equalclones[2,(i+as.numeric(ncol(hundredpercentclones)))] <- newname
equalclones[3,(i+as.numeric(ncol(hundredpercentclones)))] <- maxname
}else if(equalclones[2,(i+2*as.numeric(ncol(hundredpercentclones)))] == "0"){
#print("Adding them")
equalclones[2,(i+2*as.numeric(ncol(hundredpercentclones)))] <- newname
equalclones[3,(i+2*as.numeric(ncol(hundredpercentclones)))] <- maxname
}
}else{
#print("Neither the maxclone nor the newclone is in equalclones.")
if(equalclones[2,i] == "0"){ #Adding them to the equalclones matris.
equalclones[2,i] <- newname
equalclones[3,i] <- maxname
}else if (equalclones[2,(i+as.numeric(ncol(hundredpercentclones)))] == "0"){
#print("Adding them")
equalclones[2,(i+as.numeric(ncol(hundredpercentclones)))] <- newname
equalclones[3,(i+as.numeric(ncol(hundredpercentclones)))] <- maxname
}else if(equalclones[2,(i+2*as.numeric(ncol(hundredpercentclones)))] == "0"){
#print("Adding them")
equalclones[2,(i+2*as.numeric(ncol(hundredpercentclones)))] <- newname
equalclones[3,(i+2*as.numeric(ncol(hundredpercentclones)))] <- maxname
}
}
}else{
space[maxspace,2] <- (as.numeric(space[maxspace,2])-as.numeric(newspace)) #Replacing the old maxspace.
if(is.na(y) == FALSE){
#print("Newname is in equalclones")
equalclones[y,e_y] <- "0" #Removing it.
}
}
}else{
#print("Maxname is ALL.")
if(is.na(y) == FALSE){
#print("The daughter exist in an equalclones.")
equalclones[y,e_y] <- "0" #Removing it
}
space[maxspace,2] <- (as.numeric(space[maxspace,2])-as.numeric(newspace)) #Replacing the old maxspace.
}
}
}
}else{ #The newspace is 100.
if(space[1,1] == "ALL"){ #The ALL-space is now occupied by the new subclone.
space[1,2] <- "0"
}
space[s,1] <- newname #Adding the new spacename and space size to the spacematrix. We do not have to alter anything when dealing with hundredpercentclones.
space[s,2] <- newspace
} #100 %
}else{ #ALL
}
}else{ #"0"
}
#The point of these loops are to take into account cases where the newname is in an equalclone situation with
#an event in another sample and this event is also present in this sample but they are not equal here.
if(newname %in% equalclones == TRUE){ #The clone exists in equalcones.
#I try to take into account that we actually have multiple columns of equalclones for one single sample.
equalclones_multiples <- as.numeric(ncol(equalclones))/length(samples_unique)
e <- 0
for(e in 0:(equalclones_multiples-1)){
if(e == 0){
e_x <- NA #The column multiple in which it exist.
e_y <- NA
x <- NA
y <- NA
}
x_test <- match(maxname,equalclones[2:nrow(equalclones),i+(length(samples_unique)*e)])
y_test <- match(newname,equalclones[2:nrow(equalclones),i+(length(samples_unique)*e)])
if(is.na(x_test) == FALSE){ #The maxname exist in an equalclones column for this sample.
x <- (x_test+1) #Row
e_x <- (i+length(samples_unique)*e) #Column
}
if(is.na(y_test) == FALSE){ #The newname exist in an equalclones column for this sample.
y <- (y_test+1)
e_y <- (i+length(samples_unique)*e)
}
e <- e+1
}
if(is.na(y) == TRUE && maxname != "ALL"){ #But not in this sample. Added ALL 201108.
if(equalclones[2,i] != "0"){ #There exists equalclones in this sample.
equalclonename <- equalclones[2,i]
equalclonepos <- match(equalclonename,subcloneswithinsample_order_new) #The position for the equalclone.
theTCforequalpos <- subcloneswithinsample_order_new[equalclonepos,2] #Changed to new.
if(as.numeric(theTCforequalpos) == as.numeric(newspace) && subcloneswithinsample_order_new[match(maxname,subcloneswithinsample_order_new[,1]),2]==subcloneswithinsample_order_new[match(newname,subcloneswithinsample_order_new[,1]),2]){ #The new clone and the clone in equalclones are equal in size.
#print("Den kom in hit")
h <- 1
for(h in 1:ncol(equalclones)){ #Looping through the columns.
n <- 1
u <- 1
t <- 2
for(n in 1:nrow(equalclones)){ #Looping through the rows.
if(equalclones[n,h] == equalclonename){ #We have found the equalclone in a particular sample.
u <- h
}
if(equalclones[n,h] == newname){ #We have found the newclone.
t <- h
}
if(t == u){ #The equalclone and the newclone actually exists together as equalclones in another sample.
o <- 1
for(o in 1:length(equalclones[,i])){ #We add the newclone to the equalclone.
if(o == 1){
#print("They exist together in another sample!")
}
if(equalclones[o,i] == "0"){
if(newname %in% equalclones[,i] == FALSE){
equalclones[o,i] <- newname
eq <- 2
for(eq in 2:nrow(equalclones)){ #I have to reduce the space for all of the events belonging to this subclone.
if(equalclones[eq,i] != "0"){ #Changed it to i instead of e_x since we did not have e_x for RMS6 B2. 200329.
eq_row <- match(equalclones[eq,i],space[,1])
space[eq_row,2] <- (as.numeric(space[eq_row,2])-as.numeric(newspace))
}
eq <- eq+1
}
#space[maxspace,2] <- (as.numeric(space[maxspace,2])+as.numeric(newspace)) #Resetting the space.
maxspace <- match(equalclonename, space[,1])
o <- nrow(equalclones)
}
}
o <- o+1
}
}
n <- n+1
}
h <- h+1
}
}
}
}
}
#print(space[maxspace,1])
#print(newname)
#print(allocation_samples[match(newname,allocation_samples[,1]),(i+1)])
if(tick == 0){
if(space[s,1] != "0"){
if(sameclones != 1){
#Treating the case when space[maxspace,2] = 100 % and the newspace as well. Then the motherclone and the daughterclone are both part of the base.
if(subcloneswithinsample_order[j-1,2] == "100"){
if(subcloneswithinsample_order[j-1,1] != "ALL"){
if(subcloneswithinsample_order[match(space[maxspace,1],subcloneswithinsample_order[,1]),2] == "100"){
if(subcloneswithinsample_order[match(space[maxspace,1],subcloneswithinsample_order[,1]),1] != "ALL"){
newspace_name <- subcloneswithinsample_order[j-1,1]
maxspace_name <- subcloneswithinsample_order[match(space[maxspace,1],subcloneswithinsample_order[,1]),1]
allocation_samples[match(space[s,1],allocation_samples[,1]),(i+1)] <- maxspace_name #Annoting the mother clone of each of the subclones within each sample.
allocation_samples[match(maxspace_name,allocation_samples[,1]),(i+1)] <- newspace_name #Annoting the mother clone of each of the subclones within each sample.
}else{allocation_samples[match(space[s,1],allocation_samples[,1]),(i+1)] <- space[maxspace,1] #Annoting the mother clone of each of the subclones within each sample.
}
}else{allocation_samples[match(space[s,1],allocation_samples[,1]),(i+1)] <- space[maxspace,1] #Annoting the mother clone of each of the subclones within each sample.
}
}else{allocation_samples[match(space[s,1],allocation_samples[,1]),(i+1)] <- space[maxspace,1] #Annoting the mother clone of each of the subclones within each sample.
}
}else{allocation_samples[match(space[s,1],allocation_samples[,1]),(i+1)] <- space[maxspace,1] #Annoting the mother clone of each of the subclones within each sample.
}
}else{
#if(themothername != "ALL"){
#allocation_samples[match(thedaughtername,allocation_samples[,1]),(i+1)] <- themothername}else{
allocation_samples[match(newname,allocation_samples[,1]),(i+1)] <- space[maxspace,1] #200330
#}
}
} #"0"
}else{
allocation_samples[match(space[s,1],allocation_samples[,1]),(i+1)] <- themothername
}
} #j != 1.#}
#print(space)
if(j == as.numeric(nrow(sample_clone_matrix))){ #We're at the end of a sample.
#print("Totalspace is added.")
#print(space)
totalspace[2:as.numeric(nrow(totalspace)),((i*2)-1):((i*2))] <- space
#t <- t+1
s <- 2 #Resetting s and space.
space <- matrix(0,50,2)
}else{s <- s+1}
j <- j+1
}
i <- i+1
}
assign("totalspace", totalspace, envir=globalenv()) #The equal clones.
assign("equalclones_after", equalclones, envir=globalenv()) #The equal clones.
assign("allocation_samples_updated", allocation_samples, envir=globalenv()) #The mother-daughter division are exported to the global environment.
assign("Not_allocated_correctly",Not_allocated_correctly,envir=globalenv())
#Treating cases where the alteration did not get placed in its correct mother.
nac <- Not_allocated_correctly[as.numeric(Not_allocated_correctly[,3])<10,]
i <- 1
for(i in 1:nrow(nac)){
col <- match(nac[i,1],allocation_samples[1,])
row <- match(nac[i,2],allocation_samples[,1])
mothercol <- match(paste(nac[i,1],nac[i,2]),theonlymothers[1,])
mothercond <- theonlymothers[2,mothercol]
allocation_samples[row,col] <- mothercond
i <- i+1
}
assign("allocation_samples_revised", allocation_samples, envir=globalenv())
#Cleaning the equalclones matrix.
if(length(equalclones[1:10,equalclones[2,]!="0"]) > 1){
equalclones_eq <- equalclones[1:20,equalclones[2,]!="0"]
if(is.null(ncol(equalclones_eq))==FALSE){
i <- 1
for(i in 1:ncol(equalclones_eq)){
sample <- equalclones_eq[1,i]
all_col <- match(sample,allocation_samples_updated[1,])
j <- 2
for(j in 2:nrow(equalclones_eq)){
yes <- 0
subclone <- equalclones_eq[j,i]
if(subclone!="0"){
subclone_row <- match(subclone,allocation_samples_updated[,1])
mother <- allocation_samples_updated[subclone_row,all_col]
if(mother %in% equalclones_eq[,i] == FALSE){
#print("This subclone did not get placed in any of the equalclones.")
k <- 3
for(k in 3:nrow(allocation_samples_updated)){
if(allocation_samples_updated[k,all_col] == subclone){
subclone_mother <- allocation_samples_updated[k,1]
if(subclone_mother != "0"){
if(subclone_mother %in% equalclones_eq[,i] == TRUE){
#print("This subclone got an equalclone as daughter.")
yes <- 1
}
}
}
k <- k+1
}
}else{
#print("The mother is in equalclones.")
yes <- 1
}
}
if(yes == 0 && equalclones_eq[j,i] != "0"){
#print("This subclone did not get placed in any of these equalclones nor got any of them as daughter.")
rm <- match(paste(equalclones_eq[1,i],equalclones_eq[j,i]),file_samples_subclones[,13])
rs <- file_samples_subclones[rm,11]
if(as.numeric(rs) <= 50){
equalclones_eq[j,i] <- "0"
if(j != nrow(equalclones_eq) && as.numeric(rs) <= 50){ #If under 50 they have to be inside each other.
equalclones_eq[j:(nrow(equalclones_eq)-1),i] <- equalclones_eq[(j+1):nrow(equalclones_eq),i]
}
yes <- 0}
}
j <- j+1
}
i <- i+1
}
assign("equalclones_cleaned", equalclones_eq, envir=globalenv()) #The equal clones.
equalclones <- equalclones_eq
}
}
###################################################################
#Treating the case when we have many clones which have the same TC#
###################################################################
i <- 2
j <- 1
therows <- nrow(equalclones)
for(j in 1:as.numeric(ncol(equalclones))){ #Looping through the samples.
v <- equalclones[,j] #Extracting the column.
v <- cbind(v,matrix(0,nrow(equalclones),1)) #Adding a new column to it. Changed from "rowsofhundred" to nrow(equalclones).
clonenumber <- length(v[v!="0"])
v <- v[v[,1] !="0",]
if(clonenumber > 2){ #If we have more than 1 subclone within the sample with equal TC % who are placed inside each other.
k <- 2
for(k in 2:clonenumber){ #Giving alterations in all combinations.
mother <- paste(v[1,1],v[k,1])
l <- 2
for(l in 2:clonenumber){ #Looping through the clones.
if(k != l){
daughter <- paste(v[1,1], v[l,1])
mothercolumn <- match(mother,eventmatrix[1,])
daughtercolumn <- match(daughter,eventmatrix[1,])
m <- 2
for(m in 2:as.numeric(nrow(eventmatrix))){ #Looping through the events.
if(eventmatrix[m,mothercolumn] == "1"){
eventmatrix[m,daughtercolumn] <- "1"
}else if(eventmatrix[m,daughtercolumn] == "1"){ #Added 200821.
eventmatrix[m,mothercolumn] <- "1"
}
m <- m+1
}
}
l <- l+1
}
k <- k+1
}
}
j <- j+1
}
assign("eventmatrix_part1", eventmatrix, envir=globalenv())
##########################################################
#Giving the daughter subclones their motherclone's events#
##########################################################
motherdaughterevent <- matrix(0,as.numeric(nrow(allocation_samples)),3)
motherdaughterevent[,1] <- allocation_samples[,1]
motherdaughterevent_new <- matrix(0,as.numeric(nrow(allocation_samples)),3) #The one used for the order.
eventmatrix_original <- eventmatrix
motherdaughter <- matrix(0,50,2)
themedian <- as.numeric(nrow(medianmatrix))
j <- 2
s <- 1
for(j in 2:as.numeric(ncol(allocation_samples))){ #Looping through the columns.
motherdaughterevent[,2] <- allocation_samples[,j] #Extracting that particular column.
motherdaughterevent[1,3] <- "100" #Setting the subclone name to "100" just so that it will stay where it is.
#print(motherdaughterevent)
i <- 2
for(i in 2:as.numeric(nrow(allocation_samples))){ #Looping through the rows.
if(motherdaughterevent[i,2] != "ALL"){
if(motherdaughterevent[1,2] != "ALL"){
mothername <- paste(motherdaughterevent[1,2],motherdaughterevent[i,2])
daughtername <- paste(motherdaughterevent[1,2],motherdaughterevent[i,1])
}else{
mothername <- "0"
daughtername <- "0"}
}else{mothername <- "ALL"
daughtername <- "ALL"}
if(motherdaughterevent[i,2] != "0"){ #Adding the median TC of the mother clone to the matrix.
samplecolumn <- match(mothername,medianmatrix[1,])
theTC <- medianmatrix[themedian,samplecolumn]
samplecolumn_daughter <- match(daughtername,medianmatrix[1,])
theTC_daughter <- medianmatrix[themedian,samplecolumn_daughter]
}else{theTC <- "0"
theTC_daughter <- "0"}
motherdaughterevent[i,3] <- theTC_daughter #The median TC for that subclone in that particular sample.
i <- i+1
}
motherdaughter_order_before <- motherdaughterevent[order(as.numeric(motherdaughterevent[1:nrow(motherdaughterevent),3]), decreasing = TRUE),]
motherdaughter_totalspace <- as.matrix(totalspace[,(j-1)*2-1]) #The order obtained from totalspace.
motherdaughter_totalspace <- as.matrix(motherdaughter_totalspace[motherdaughter_totalspace != "0",]) #Removing the zero rows.
a <- 2
for(a in 2:as.numeric(nrow(motherdaughter_totalspace))){ #Looping through the order we should have.
clone <- motherdaughter_totalspace[a,1]
clonerow <- match(clone,motherdaughterevent)
# print("Här")
# print(motherdaughter_totalspace)
# print(clone)
# print(motherdaughterevent)
# print(clonerow)
# print(a)
motherdaughterevent_new[a,] <- motherdaughterevent[clonerow,] #New matrix in which the correct order is saved.
a <- a+1
}
motherdaughter_order <- as.matrix(motherdaughterevent_new)
motherdaughter_order[1,2] <- motherdaughterevent[1,2] #In this position we want to to have the sample name.
print(motherdaughter_order)
i <- 2
for(i in 2:as.numeric(nrow(allocation_samples))){ #Looping through the subclones.
eq <- 0
if(motherdaughter_order[i,2] != "0"){ #The subclone does not exist in that particular sample.
if(motherdaughter_order[i,2] != "ALL"){ #The subclone has already gotten these alterations.
daughter_name <- paste(motherdaughter_order[1,2],motherdaughter_order[i,1]) #The name of the subclone in a particular sample.
daughter_column <- match(daughter_name,eventmatrix[1,]) #The corresponding column in the eventmatrix.
mother_name <- paste(motherdaughter_order[1,2],motherdaughter_order[i,2]) #The motherclone.
mother_column <- match(mother_name,eventmatrix[1,]) #The column in the eventmatrix corresponding to the motherclone.
col <- match(word(daughter_name,1),equalclones[1,]) #Changed the equalclones_new here to just equalclones.
if(is.na(col) == FALSE){
if(word(daughter_name,2,3) %in% equalclones[,col]){
#It is part of equalclones in this sample. Then the other ones here should also have this mother.
print("It is in equalclones.")
eq <- 1
}
}
if(motherdaughter_order[1,2]=="16569_01B"){
print("Motherdaughtername")
print(mother_name)
print(daughter_name)}
k <- 2
for(k in 2:as.numeric(nrow(eventmatrix))){
if(eventmatrix[k,mother_column] == "1"){
eventmatrix[k,daughter_column] <- "1"
if(eq == 1){#Equalclones.
l <- 2
sub <- equalclones[equalclones[,col]!="0",col]
for(l in 2:length(sub)){
if(sub[1]=="16569_01B"){
print("EQ")
print(paste(sub[1],sub[l]))}
eq_column <- match(paste(sub[1],sub[l]),eventmatrix[1,])
eventmatrix[k,eq_column] <- "1"
l <- l+1
}
}
}
k <- k+1
}
}
}
i <- i+1
}
motherdaughterevent <- matrix(0,as.numeric(nrow(allocation_samples)),3)
motherdaughterevent[,1] <- allocation_samples[,1]
motherdaughterevent_new <- matrix(0,as.numeric(nrow(allocation_samples)),3) #The one used for the order.
j <- j+1
}
# eq_test <- matrix(0,nrow(equalclones),1)
# eq_test[1,1] <- "4240_15"
# eq_test[2,1] <- "Subclone_ A"
# eq_test[3,1] <- "Subclone_ D"
# equalclones <- cbind(equalclones,eq_test)
# print(equalclones)
assign("eventmatrix_part2", eventmatrix, envir=globalenv())
###################################################################
#Treating the case when we have many clones which have the same TC#
###################################################################
i <- 2
j <- 1
therows <- nrow(equalclones)
for(j in 1:as.numeric(ncol(equalclones))){ #Looping through the samples.
v <- equalclones[,j] #Extracting the column.
v <- cbind(v,matrix(0,nrow(equalclones),1)) #Adding a new column to it. Changed from "rowsofhundred" to nrow(equalclones).
clonenumber <- length(v[v!="0"])
v <- v[v[,1] !="0",]
if(clonenumber > 2){ #If we have more than 1 subclone within the sample with equal TC % who are placed inside each other.
k <- 2
for(k in 2:clonenumber){ #Giving alterations in all combinations.
mother <- paste(v[1,1],v[k,1])
l <- 2
for(l in 2:clonenumber){ #Looping through the clones.
if(k != l){
daughter <- paste(v[1,1], v[l,1])
mothercolumn <- match(mother,eventmatrix[1,])
daughtercolumn <- match(daughter,eventmatrix[1,])
m <- 2
for(m in 2:as.numeric(nrow(eventmatrix))){ #Looping through the events.
if(eventmatrix[m,mothercolumn] == "1"){
eventmatrix[m,daughtercolumn] <- "1"
}else if(eventmatrix[m,daughtercolumn] == "1"){ #Added 200821.
eventmatrix[m,mothercolumn] <- "1"
}
m <- m+1
}
}
l <- l+1
}
k <- k+1
}
}
j <- j+1
}
eventmatrix_new <- matrix(0,(as.numeric(nrow(eventmatrix))-1), (as.numeric(ncol(eventmatrix))-1)) #Skapar en ny h?ndelsematris d?r vi bara har med 1:orna och 0:orna.
eventmatrix_new <- eventmatrix[2:as.numeric(nrow(eventmatrix)),2:as.numeric(ncol(eventmatrix))]
eventmatrix_new <- as.matrix(eventmatrix_new)
rownames(eventmatrix_new) <- eventmatrix[2:as.numeric(nrow(eventmatrix)),1] #L?gger till radnamnen och kolumnnamnen till den nya matrisen.
colnames(eventmatrix_new) <- eventmatrix[1,2:as.numeric(ncol(eventmatrix))]
eventmatrix_new <- t(eventmatrix_new)
stop.time <- Sys.time()
print("Execution time")
print(stop.time-start.time)
return(eventmatrix_new)
}
#Splitting the input file.
splitdata <- function(file,name,ord){
k <- 1
s <- 1
if(missing(ord)==FALSE){
if(ord== TRUE){
file <- file[order(file[,1],file[,2],file[,3]),] #Ordering the matrix by tumor and then by sample and chromosome.
file[,2] <- trimws(file[,2], which = c("both", "left", "right"), whitespace = "[ \t\r\n]")
}
}
samples <- matrix(0,100,2)
rownames(samples) <- c(1:100)
file <- file[is.na(file[,1])==FALSE,]
for(k in 1:as.numeric(nrow(file))){ #Looping over all samples.
if(k == 1){ #The first position.
samples[s,1] <- k
rownames(samples)[s] <- file[k,1]
}
if(k != 1){ #Every other position.
if(file[k-1,1] != file[k,1]){
if(k != nrow(file)){
samples[s,2] <- k-1 #End position.
s <- s+1
samples[s,1] <- k
rownames(samples)[s] <- file[k,1]}}
}
if(k == nrow(file)){ #Last row.
if(file[k-1,1] != file[k,1]){
samples[s,2] <- k-1 #End position.
s <- s+1
samples[s,1] <- k
samples[s,2] <- k
rownames(samples)[s] <- file[k,1]
}else{
samples[s,2] <- k}
}
k <- k+1
}
i <- 1
for(i in 1:nrow(samples)){ #Localizing that particular tumor in the sample file.
if(samples[i,1] != 0){
tumorname <- match(name,rownames(samples))
}
i <- i+1
}
datasegment <- file[samples[tumorname,1]:samples[tumorname,2],] #Extracting the data for that particular tumor from the large segment file.
return(datasegment)
}
#Adding a stem to the data.
stem <- function(eventmatrix,co,root){
i = 1
j = 1
s = 1
class(eventmatrix) <- "numeric"
eventmatrix_new <- eventmatrix
if(root == "Stem"){
stemroot <- matrix(0, 1, as.numeric(ncol(eventmatrix_new)))
i = 1
for(i in 1:as.numeric(ncol(eventmatrix_new))){
if(sum(as.numeric(eventmatrix_new[,i]))/as.numeric(nrow(eventmatrix_new)) == 1){
stemroot[1,i] <- 1
i <- i+1
}
}
eventmatrix_new <- rbind(eventmatrix_new,stemroot)
rownames(eventmatrix_new)[nrow(eventmatrix_new)] <- "Stem"}
if(root == "Normal"){
M <- matrix(0, 1, as.numeric(ncol(eventmatrix_new)))
eventmatrix_new <- rbind(eventmatrix_new,M)
rownames(eventmatrix_new)[as.numeric(nrow(eventmatrix_new))] <- "Normal"
i <- i+1}
if(root == "None"){
eventmatrix_new <- eventmatrix_new
}
return(eventmatrix_new)
}
#Transform the file into phyDat format.
phydatevent <- function(excelfil){
patient.matrix <- as.matrix(excelfil)
patient.phydat <- phyDat(patient.matrix,type="USER",levels=c(0,1),ambiguity='0')
return(patient.phydat)
}
#Maximum Likelihood
ml_tree <- function(Eventmatrix,root) {
dm_h <- dist.hamming(Eventmatrix)
starting_tree <- NJ(dm_h)
starting_tree <- root(starting_tree, outgroup = root,resolve.root = TRUE)
Lf <- pml(starting_tree, Eventmatrix) #Obtaining an object of class pml
Lf_JC <- optim.pml(Lf, model = "JC", optEdge = TRUE)
return(Lf_JC)
}
#Maximum parsimony
mp_tree <- function(Eventmatrix,root){
MP_tree_pratchet <- pratchet(Eventmatrix, start = NULL, method = "fitch", maxit = 2000, k = 10, #Funktionen anv?nder sig av Fithchs algoritm. Pratchet = p ratchett (1999).
trace = 1, all = FALSE, rearrangements = "TBR",
perturbation = "ratchet") #Den ger det b?sta tr?det den funnit. Minimerar parsimony score.
MP_tree_pratchet <- root(MP_tree_pratchet, outgroup = root,resolve.root = TRUE)
treeRatchet <- acctran(MP_tree_pratchet, Eventmatrix) #Gives us the tree with an edge length fulfilling the acctran criterion.
return(treeRatchet)
}
#Visualising the MP-tree.
MP_treeplot <- function(MP_tree,limitmp,col){
if(col=="col"){
branches <- list(certain_branches = c(certainty[certainty[,2]=="1 solution",1],"Stem","Normal"),
uncertain_branches = certainty[certainty[,2]=="> 1 solution",1])
EM_mptree <- groupOTU(MP_tree,branches)
EM_testmp <- ggplot(EM_mptree) + geom_tree(size=1) + geom_tiplab(size=4,aes(color = factor(group)))
EM_testmp <- EM_testmp + theme_tree() + limitmp+
scale_color_manual(values=c(certain_branches = "#FC4E07",uncertain_branches="darkgreen"))+
theme(plot.title = element_text(hjust = 0.5, size = (14), color = "black"),legend.position = "none")
print(EM_testmp)
}else{
EM_testmp <- ggplot(MP_tree) + geom_tree() + geom_tiplab(size=4, color = "black") #+ geom_treescale(width = 1)
EM_testmp <- EM_testmp + theme_tree() + limitmp+theme(plot.title = element_text(hjust = 0.5, size = (14), color = "black"))
print(EM_testmp)
}
return(EM_testmp)
}
#Visualising the ML-tree.
ML_treeplot <- function(ML_tree,limitml,col){
if(col=="col"){
branches <- list(certain_branches = c(certainty[certainty[,2]=="1 solution",1],"Stem","Normal"),
uncertain_branches = certainty[certainty[,2]=="> 1 solution",1])
EM_mltree <- groupOTU(ML_tree$tree,branches)
EM_mltree <- ggplot(EM_mltree) + geom_tree(size=1) + geom_tiplab(size=4,aes(color = factor(group)))
EM_mltree <- EM_mltree + theme_tree() + limitml+
scale_color_manual(values=c(certain_branches = "#FC4E07",uncertain_branches="darkgreen"))+
theme(plot.title = element_text(hjust = 0.5, size = (14), color = "black"),legend.position = "none")
print(EM_mltree)
}else{
EM_mltree <- ML_tree$tree
EM_mltree <- ggplot(EM_mltree) + geom_tree() + geom_tiplab(size=4, color = "black") #+ geom_treescale(width = 1)
EM_mltree <- EM_mltree + theme_tree() + limitml+theme(plot.title = element_text(hjust = 0.5, size = (14), color = "black"))
print(EM_mltree)
}
return(EM_mltree)}
#Making new subclones.
subclones <- function(EM_test,file_samples_subclones,root,possible_mothers,cutoff,names){
if(missing(root)==TRUE){root <- "Normal"} #The default is to root the tree in a normal cell.
EM_newnames <- unique(EM_test) #Finding all unique rows in the EM i.e. all subclones that have different sets of mutations.
clonenames_new <- matrix(0,(as.numeric(nrow(EM_newnames))*2),500) #Creating a new matrix that will contain the new subclone names and which former subclones it includes.
samples_all <- t(as.matrix(unique(datasegment[,2]))) #A matrix containing all unique samples.
#samples_all <- t(as.matrix(unique(file_samples_subclones[,2]))) #A matrix containing all unique samples.
samples <- t(as.matrix(samples_all[samples_all != "ALL"]))
sampleTC <- matrix(0,1,ncol(samples))
sampleTC[1,1:ncol(samples)] <- "100"
i <- 1
l <- 2
k <- 1
for(i in 1:as.numeric(nrow(EM_newnames))){ #Looping through each of the unique subclones.
uniquenamerow <- match(EM_newnames[i,1],file_samples_subclones[,13]) #Finding the position of the subclone.
uniquenameTC <- file_samples_subclones[uniquenamerow,11] #Finding the TC of the subclone.
j <- 1
for(j in 1:as.numeric(nrow(EM_test))){ #Every unique subclone is to be compared with the others.
if(all(EM_newnames[i,] == EM_test[j,]) == TRUE){ #They have to include the same events.
clonenames_new[k,l] <- rownames(EM_test)[j] #Saving the subclone name to the matrix.
if(rownames(EM_test)[j] != "ALL"){
column <- match(word(rownames(EM_test)[j],1),sample_clone_matrix[1,])
row <- match(word(rownames(EM_test)[j],2,3),sample_clone_matrix[,column])
theTC <- sample_clone_matrix[row,(column+1)] #Finding the TC for the subclone.
}else{theTC <- "100"}
clonenames_new[(k+1),l] <- theTC #Saving the TC below its subclone name in the matrix.
l <- l+1
}
j <- j+1
}
l <- 2
k <- k + 2
i <- i+1
}
m <- 1
for(m in 1:(as.numeric(nrow(clonenames_new))/2)){ #Calculating the mean of all of the subclones within the new subclones.
#print(sum(as.numeric(clonenames_new[2*m,2:ncol(clonenames_new)])))
clonenames_new[2*m,1] <- mean(as.numeric(clonenames_new[2*m,clonenames_new[2*m,] != 0]))
clonenames_new[(2*m-1),1] <- mean(as.numeric(clonenames_new[2*m,clonenames_new[2*m,] != 0]))
m <- m+1
}
#Giving the new subclone names. The order is determined based on the subclones' mean TC:s.
clonenames_new_order <- clonenames_new[order(as.numeric(clonenames_new[,1]), decreasing = TRUE),] #Ordering the subclones based on their TC.
if(nrow(clonenames_new)/2 < 20){
newnames <- c("Subclone A", "Subclone B","Subclone C","Subclone D","Subclone E","Subclone F","Subclone G","Subclone H","Subclone I","Subclone J","Subclone K","Subclone L","Subclone M","Subclone N","Subclone O","Subclone P", "Subclone Q","Subclone R","Subclone S","Subclone T","Subclone U","Subclone V","Subclone X","Subclone Y","Subclone Z",
"Subclone ZA","Subclone ZB","Subclone ZC", "Subclone ZD","Subclone ZE", "Subclone ZF", "Subclone ZG", "Subclone ZH", "Subclone ZI", "Subclone ZJ", "Subclone ZK", "Subclone ZL", "Subclone ZM", "Subclone ZN", "Subclone ZO", "Subclone ZP", "Subclone ZQ", "Subclone ZR", "Subclone ZS", "Subclone ZT", "Subclone ZU", "Subclone ZV", "Subclone ZX","Subclone ZY", "Subclone ZZ",
"Subclone ZZA","Subclone ZZB","Subclone ZZC", "Subclone ZZD","Subclone ZZE", "Subclone ZZF", "Subclone ZZG", "Subclone ZZH", "Subclone ZZI", "Subclone ZZJ", "Subclone ZZK", "Subclone ZZL", "Subclone ZZM", "Subclone ZZN", "Subclone ZZO", "Subclone ZZP", "Subclone ZZQ", "Subclone ZZR", "Subclone ZZS", "Subclone ZZT", "Subclone ZZU", "Subclone ZZV", "Subclone ZZX","Subclone ZZY", "Subclone ZZZ",
"Subclone ZZZA","Subclone ZZZB","Subclone ZZZC", "Subclone ZZZD","Subclone ZZZE", "Subclone ZZZF", "Subclone ZZZG", "Subclone ZZZH", "Subclone ZZZI", "Subclone ZZZJ", "Subclone ZZZK", "Subclone ZZZL", "Subclone ZZZM", "Subclone ZZZN", "Subclone ZZZO", "Subclone ZZZP", "Subclone ZZZQ", "Subclone ZZZR", "Subclone ZZZS", "Subclone ZZZT", "Subclone ZZZU", "Subclone ZZZV", "Subclone ZZZX","Subclone ZZZY", "Subclone ZZZZ")
}else{
newnames <- c(seq(1:1000))
}
if(missing(names)==FALSE){
if(names=="numbers"){
newnames <- c(seq(1:1000))
}else if(names=="letters"){
newnames <- c("A","B","C","D","E","F","G","H","I","J","K","L","M","N","O","P","Q","R","S","T","U","V","X","Y","Z","ZA","ZB","ZC","ZD","ZE","ZF","ZG","ZH","ZI","ZJ","ZK","ZL","ZM","ZN","ZO","ZP","ZQ","ZR","ZS","ZT","ZU","ZV","ZX","ZY","ZZ",
"AAA","BBB","CCC","DDD","EEE","FFF","GGG","HHH")
}else if(names=="subclone"){
newnames <- c("Subclone A", "Subclone B","Subclone C","Subclone D","Subclone E","Subclone F","Subclone G","Subclone H","Subclone I","Subclone J","Subclone K","Subclone L","Subclone M","Subclone N","Subclone O","Subclone P", "Subclone Q","Subclone R","Subclone S","Subclone T","Subclone U","Subclone V","Subclone X","Subclone Y","Subclone Z",
"Subclone ZA","Subclone ZB","Subclone ZC", "Subclone ZD","Subclone ZE", "Subclone ZF", "Subclone ZG", "Subclone ZH", "Subclone ZI", "Subclone ZJ", "Subclone ZK", "Subclone ZL", "Subclone ZM", "Subclone ZN", "Subclone ZO", "Subclone ZP", "Subclone ZQ", "Subclone ZR", "Subclone ZS", "Subclone ZT", "Subclone ZU", "Subclone ZV", "Subclone ZX","Subclone ZY", "Subclone ZZ",
"Subclone ZZA","Subclone ZZB","Subclone ZZC", "Subclone ZZD","Subclone ZZE", "Subclone ZZF", "Subclone ZZG", "Subclone ZZH", "Subclone ZZI", "Subclone ZZJ", "Subclone ZZK", "Subclone ZZL", "Subclone ZZM", "Subclone ZZN", "Subclone ZZO", "Subclone ZZP", "Subclone ZZQ", "Subclone ZZR", "Subclone ZZS", "Subclone ZZT", "Subclone ZZU", "Subclone ZZV", "Subclone ZZX","Subclone ZZY", "Subclone ZZZ",
"Subclone ZZZA","Subclone ZZZB","Subclone ZZZC", "Subclone ZZZD","Subclone ZZZE", "Subclone ZZZF", "Subclone ZZZG", "Subclone ZZZH", "Subclone ZZZI", "Subclone ZZZJ", "Subclone ZZZK", "Subclone ZZZL", "Subclone ZZZM", "Subclone ZZZN", "Subclone ZZZO", "Subclone ZZZP", "Subclone ZZZQ", "Subclone ZZZR", "Subclone ZZZS", "Subclone ZZZT", "Subclone ZZZU", "Subclone ZZZV", "Subclone ZZZX","Subclone ZZZY", "Subclone ZZZZ")
}
}
i <- 1
s <- 1
for(i in 1:(nrow(clonenames_new_order)/2)){ #Looping through all of the new subclones and giving them their new names.
#print(i)
if(clonenames_new_order[2*i-1,2] != "ALL"){
clonenames_new_order[2*i-1,1] <- newnames[s]
s <- s+1
}else{
clonenames_new_order[2*i-1,1] <- "Stem"
clonenames_new_order[2*i-1,2:(ncol(samples)+1)] <- samples
clonenames_new_order[2*i,1] <- "100"
clonenames_new_order[2*i,2:(ncol(samples)+1)] <- sampleTC
rowofall <- match("ALL",rownames(EM_newnames))
rownames(EM_newnames)[rowofall] <- "Stem"
}
if(clonenames_new_order[2*i-1,2] == "Normal"){
clonenames_new_order[2*i-1,1] <- "Normal"
}
i <- i+1
}
#Adding an "ALL" cell to the clonenames_new_order matrix in the cases where we only have 2 subclones. Otherwise we will not be able to construct any phylogenetic trees.
if(as.numeric(nrow(clonenames_new_order))/2 < 2){ #Changed it to 2 rather than 3.
ALL <- matrix(0,2,ncol(clonenames_new_order))
ALL[1,1] <- "ALL"
ALL[1,2:(ncol(samples)+1)] <- samples
ALL[2,1] <- "100"
ALL[2,2:(ncol(samples)+1)] <- sampleTC
clonenames_new_order <- rbind(ALL,clonenames_new_order)
print("Warning message: Your dataset only contain two subclones. An ALL subclone has been added in order to be able to reconstruct a phylogenetic tree. This has the same events as Stem.")
}
assign("clonenames_new_order",clonenames_new_order,envir = globalenv())
#Creating the new event matrix with the new subclones.
i <- 1
EM_saved <- EM_newnames
for(i in 1:nrow(EM_newnames)){
therow <- which(clonenames_new_order == rownames(EM_newnames)[i], arr.ind = T)
therow <- therow[1]
rownames(EM_newnames)[i] <- clonenames_new_order[therow,1]
i <- i+1
}
EM_test_newnames <- EM_newnames
#root <- "Stem"
#root <- "Normal"
if(root != "Stem"){
EM_test_newnames <- stem(EM_test_newnames,stem_co,root) #Adding the root to the event matrix.
allrow <- match("Stem",rownames(EM_test_newnames))
if(is.na(allrow) == TRUE){
allrow <- match("ALL",rownames(EM_test_newnames))
}
thesumofstem <- sum(EM_test_newnames[allrow,])
kvoten <- thesumofstem/(ncol(EM_test_newnames))
#print(thesumofstem)
#print(ncol(EM_test_newnames))
if(kvoten < 0.5){
print("Warning message: The stem is very long compared to the entire data set. Maybe you should root the tree in the stem instead of a Normal cell?")
print("Warning message: If the normal cell is too different from the subclones, the plot viewer might show a graph where the tip labels drift off from the tips")
}
}
#else{EM_test_newnames <- stem(EM_test_newnames,stem_co,root) } #Adding the root to the event matrix.
#Multiple solutions?
#Placing all possible mothers for each subclone in a single row for each biopsy.
possible_mothers <- possible_mothers_new
compr <- possible_mothers[2:nrow(possible_mothers),]
i <- 1
for(i in 1:ncol(possible_mothers)){
nonzero <- compr[possible_mothers[2:nrow(possible_mothers),i]!="0",i]
if(length(nonzero)!=0){
possible_mothers[2:(length(nonzero)+1),i] <- nonzero
}
i <- i+1
}
i <- 1
for(i in 1:ncol(possible_mothers)){
if(possible_mothers[1,i]=="0"){ #This is an extra column with possible mothers.
if(possible_mothers[2,i-1]=="0"){
possible_mothers[2:nrow(possible_mothers),i-1] <- possible_mothers[2:nrow(possible_mothers),i]
}else{
possible_mothers[3:nrow(possible_mothers),i-1] <- possible_mothers[2:(nrow(possible_mothers)-1),i]
}
}
i <- i+1
}
possible_mothers_compressed <- possible_mothers[,possible_mothers[1,]!= "0"]
#View(possible_mothers)
#Removing mothers that are not possible in all samples.
i <-1
for(i in 1:ncol(possible_mothers_compressed)){
pos <- which(word(possible_mothers_compressed[1,i],2,3) == word(possible_mothers_compressed[1,],2,3))
same_clone <- as.matrix(possible_mothers_compressed[,pos])
#print("Same_clone")
#print(as.matrix(same_clone))
tbl_clone <- table(same_clone[2:nrow(same_clone),])
tbl_clone <- tbl_clone[tbl_clone<ncol(same_clone)]
if(length(tbl_clone)>0){
j <- 1
for(j in 1:length(pos)){
if(length(possible_mothers_compressed[possible_mothers_compressed[,pos[j]]%in%names(tbl_clone),pos[j]])!=0){ #At least one should be removed.
k <- 2
for(k in 2:nrow(possible_mothers_compressed)){
if(possible_mothers_compressed[k,pos[j]]%in%names(tbl_clone)){
possible_mothers_compressed[k:(nrow(possible_mothers_compressed)-1),pos[j]] <- possible_mothers_compressed[(k+1):nrow(possible_mothers_compressed),pos[j]]
}
k <- k+1
}
#possible_mothers_compressed[possible_mothers_compressed[,pos[j]]%in%names(tbl_clone),pos[j]] <- "0"
}
j <- j+1
}
}
i <- i+1
}
#View(possible_mothers_compressed)
#Sometimes we do not find a mother possible in all samples just because in some samples we have equalclones.
i <- 1
for(i in 1:ncol(possible_mothers_compressed)){
if(possible_mothers_compressed[2,i]=="0"){
pos <- match(possible_mothers_compressed[1,i],theonlymothers[1,])
if(is.na(pos)==FALSE){
possible_mothers_compressed[2,i] <- theonlymothers[2,pos]
}
}
i <- i+1
}
#View(possible_mothers_compressed)
#Creating a matrix in which I tell which subclones only have one position and which have multiple.
i <- 1
s <- 1
certainty <- matrix(0,ncol(possible_mothers_compressed),4)
for(i in 1:ncol(possible_mothers_compressed)){
name <- possible_mothers_compressed[1,i]#word(possible_mothers_compressed[1,i],2,3)
pos <- which(name == clonenames_new_order, arr.ind = T)
certainty[s,4] <- clonenames_new_order[pos[1],1] #The new subclone name in the phylogeny.
if(name%in%certainty[,1] == FALSE){
certainty[s,1] <- name #The cluster name with biopsy name.
nr <- length(which(possible_mothers_compressed[,i]!="0"))-1
certainty[s,2] <- nr #The number of solutions.
if(nr == 1){
certainty[s,3] <- "1 solution"
}else{
certainty[s,3] <- "> 1 solution"
}
s <- s+1
}
i <- i+1
}
certainty_all <- certainty[certainty[,1]!=0,]
# certainty <- unique(certainty[,4:3])
# View(certainty)
assign("certainty_all", certainty_all, envir=globalenv())
i <- 1
s <- 1
certainty <- matrix(0,ncol(possible_mothers_compressed),2)
for(i in 1:ncol(possible_mothers_compressed)){
name <- possible_mothers_compressed[1,i]
pos <- which(name == clonenames_new_order, arr.ind = T)
name <- clonenames_new_order[pos[1],1]
#print(name)
if(name%in%certainty[,1] == FALSE){
certainty[s,1] <- name
nr <- length(which(possible_mothers_compressed[,i]!="0"))-1
certainty[s,2] <- nr
if(nr == 1){
certainty[s,2] <- "1 solution"
}else{
certainty[s,2] <- "> 1 solution"
}
s <- s+1
}else{
nr <- length(which(possible_mothers_compressed[,i]!="0"))-1
certainty[s,2] <- nr
if(nr != 1){
row <- match(name,certainty[,1])
certainty[row,2] <- "> 1 solution"
}
}
i <- i+1
}
certainty <- certainty[certainty[,1]!="0",]
assign("certainty", certainty, envir=globalenv())
EM <- EM_test_newnames
if(as.numeric(ncol(possible_mothers_compressed))==length(possible_mothers_compressed[2,possible_mothers_compressed[3,]=="0"])){
print("This is the only solution")
}else{
x <- readline("There are multiple solutions. Do you want to see the suggested tree or another? Print suggested or another.")
if(x == "another"){
print("Outputting a suboptimal solution!")
#Extracting the spaces we have left.
space <- matrix(0,10,(nrow(EM)-1))
space[1,] <- rownames(EM)[1:(nrow(EM)-1)]
#Constructing a matrix with the subclones placed in each subclone.
i <- 1
for(i in 1:ncol(space)){
row <- 2
pos <- match(space[1,i],rownames(EM))
j <- 1
for(j in 1:(nrow(EM)-1)){
if(space[1,i]!=rownames(EM)[j]){
diff <- (EM[i,]-EM[j,])
if(sign(sum(diff>0)) != sign(sum(diff<0))){
#print(space[1,i])
#print(rownames(EM)[j])
if(rownames(EM)[j] %in% space[,i] == FALSE && sum(diff) < 0){
space[row,i] <- rownames(EM)[j]
row <- row+1
}
}
}
j <- j+1
}
i <- i+1
}
#Removing clones within clones in the same column.
i <- 1
for(i in 1:ncol(space)){
#print("Column")
#print(i)
j <- 2
for(j in 2:nrow(space)){
#print("Row")
#print(j)
clone <- space[j,i]
#print(clone)
if(clone != "0"){
pos <- match(clone,space[1,])
#print(pos)
if(i != pos){
present <- as.matrix(space[2:nrow(space),i]%in%space[2:nrow(space),pos])
k <- 1
#print(space[2:nrow(space),i])
#print(space[2:nrow(space),pos])
#print(present)
for(k in 1:nrow(present)){
if(present[k,1]==TRUE){
#print(space[k+1,i])
space[k+1,i] <- "0"
}
k <- k+1
}
}
}
j <- j+1
}
i <- i+1
}
#Finding out the size of of each level without other clones in them.
biopsies <- unique(file_samples_subclones[,2])
biopsies <- biopsies[2:length(biopsies)]
biopsy_space <- matrix(0,(length(biopsies)+1),(ncol(space)+1))
biopsy_space[1,2:ncol(biopsy_space)] <- space[1,]
biopsy_space[2:nrow(biopsy_space),1] <- biopsies
i <- 1
for(i in 1:ncol(space)){
pos <- match(space[1,i],clonenames_new_order[,1]) #Row. Mother.
j <- 2
for(j in 2:nrow(biopsy_space)){
col <- match(biopsy_space[j,1],word(clonenames_new_order[pos,],1))
if(is.na(col)==FALSE){ #It might not be present in some samples.
biopsy_space[j,i+1] <- clonenames_new_order[pos+1,col]
}
j <- j+1
}
i <- i+1
}
biopsy_space_base <- biopsy_space #Otherwise we get wrong results when subtracting columns further on in the loop.
#View(biopsy_space_base)
if(missing(cutoff)==TRUE){
cutoff <- 30
}
#Finding out the remaining space at each level.
i <- 2
#print("I")
for(i in 2:ncol(biopsy_space)){
#print(i)
j <- 2
for(j in 2:nrow(space)){
#print("J")
#print(j)
col <- match(space[j,i-1],biopsy_space[1,])
if(space[j,i-1] != "0" && space[j,i-1] != 0){
#print(biopsy_space[1,i]) #Mother.
#print(biopsy_space[1,col]) #Daughter
#print(as.numeric(biopsy_space[2:nrow(biopsy_space),i])) #Motherspace.
#print(as.numeric(biopsy_space_base[2:nrow(biopsy_space_base),col])) #Daughterspace.
biopsy_space[2:nrow(biopsy_space),i] <- (as.numeric(biopsy_space[2:nrow(biopsy_space),i])-as.numeric(biopsy_space_base[2:nrow(biopsy_space_base),col]))
if(length(biopsy_space[as.numeric(biopsy_space[2:nrow(biopsy_space),i]) < 0,i]) != 0){ #There is negative numbers.
#print("Negative")
row <- which(as.numeric(biopsy_space[2:nrow(biopsy_space),i])<2)
biopsy_space[row+1,i] <- "0"
pos <- which(biopsy_space[1,i]==space,arr.ind=TRUE)
pos <- pos[pos[,1]!=1,]
mothermother <- space[1,as.numeric(pos[2])]
biopsy_space[row+1,as.numeric(pos[2])+1] <- as.numeric(biopsy_space[row+1,as.numeric(pos[2])+1])-as.numeric(biopsy_space_base[row+1,col])#Removing this space from the mothermother.
}
}
j <- j+1
}
i <- i+1
}
#Finding out which clusters can be reshuffled and where they can be placed.
#print("Shuffle")
#print(biopsy_space)
shuffle <- matrix(0,10,3)
s <- 1
i <- 1
for(i in 2:ncol(biopsy_space_base)){
#print("Här")
b <- length(biopsy_space_base[biopsy_space_base[,i]!="0",i])-1 #Biopsies in which it exist.
#print(b)
reduced <- biopsy_space_base[biopsy_space_base[,i]!="0",i] #Biopsies in which it exist.
#print("reduced")
#print(reduced)
#Finding out in how many biopsies the event is < 30 %. Diff is the difference between this number and the total number of biopsies.
diff <- (length(which(as.numeric(reduced[2:length(reduced)])<cutoff))-b) #Will be 0 if this subclone is of size < 30 in all samples.
#print("old")
#print(biopsy_space_base[1,i])
#print(as.numeric(biopsy_space_base[2:nrow(biopsy_space_base),i]))
#print(as.numeric(biopsy_space_base[2:nrow(biopsy_space_base),i])<cutoff)
#print(which(as.numeric(biopsy_space_base[2:nrow(biopsy_space_base),i])<cutoff))
#print(diff)
if(diff == 0){ #The event is below the cutoff in all samples.
#print("Will be shuffled")
#print(biopsy_space_base[1,i])
shuffle[s,1] <- biopsy_space_base[1,i] #Saving the ones that will be shuffled in a matrix.
pos <- which(space==biopsy_space_base[1,i],arr.ind=TRUE)
pos <- pos[pos[,1]!=1,] #Removing the cases where it is in the first row.
biopsy_space[2:nrow(biopsy_space),as.numeric(pos[2])+1] <- as.numeric(biopsy_space[2:nrow(biopsy_space),as.numeric(pos[2])+1])+as.numeric(biopsy_space[2:nrow(biopsy_space),i]) #Adding the space again.
p <- which(biopsy_space_base[1,i]==space,arr.ind=TRUE)
p <- p[p[,1]!=1,]
shuffle[s,2] <- space[1,as.numeric(pos[2])] #Saving the old mother.
s <- s+1
}
i <- i+1
}
#We have now chosen the ones to be shuffled.
#Looking for where it could be placed.
#print("Shuffle")
#print(shuffle)
shuffle <- t(as.matrix(shuffle[shuffle[,1]!= "0",]))
shuffle <- t(as.matrix(shuffle[sample(nrow(shuffle)),])) #Randomly shuffling the ones that should be relocated.
space_new <- space
i <- 1
#print(nrow(shuffle))
for(i in 1:nrow(shuffle)){
newspace <- biopsy_space
col <- match(shuffle[i,1],biopsy_space_base[1,]) #The position.
spaces <- biopsy_space[2:nrow(biopsy_space),2:ncol(biopsy_space)] #Extracting the spaces.
class(spaces) <- "numeric"
shuffled_clone_space <- as.matrix(biopsy_space_base[2:nrow(biopsy_space_base),col])
class(shuffled_clone_space) <- "numeric"
room <- sweep(spaces,1,shuffled_clone_space, FUN="-") #The spaces left if we place this clone in that level.
newspace[2:nrow(newspace),2:ncol(newspace)] <- room #The spaces left if we place this clone in that level.
neg <- which(room<0,arr.ind=TRUE) #Gives us the columns not possible.
pos <- c(1:ncol(room))[c(1:ncol(room))%in%neg[,2] == FALSE]
possible <- biopsy_space[,pos+c(rep(1,length(pos)))]
#print(possible)
possible <- as.matrix(possible[,possible[1,]!=shuffle[i,1]]) #It should not be placed in itself.
#print(possible)
#Randomly choosing a new position.
chosen <- t(as.matrix(possible[,sample(ncol(possible),1)]))
shuffle[i,3] <- chosen[1,1] #Saving the new mother.
#print(chosen)
col_m <- match(chosen[1,1],biopsy_space[1,])
biopsy_space[,col_m] <- newspace[,col_m]
u <- which(space == shuffle[i,1],arr.ind=TRUE)
u <- u[u[,1]!=1,]
#print(u)
space_new[u[1],u[2]] <- "0"
#print(space_new[match(chosen[1,1],space_new[1,]),])
lgh <- length(space_new[space_new[match(chosen[1,1],space_new[1,])]!="0",])
#print(lgh)
#print("test")
#print(space_new[,match(chosen[1,1],space_new[1,])])
#print(space_new[,match(chosen[1,1],space_new[1,])]!="0")
#print(space_new[space_new[,match(chosen[1,1],space_new[1,])]!="0",match(chosen[1,1],space_new[1,])])
space_new[length(space_new[space_new[,match(chosen[1,1],space_new[1,])]!="0",match(chosen[1,1],space_new[1,])])+1,match(chosen[1,1],space_new[1,])] <- shuffle[i,1]
#print("The chosen one.")
#print(chosen)
#print(biopsy_space)
i <- i+1
}
#print("The final shuffled")
#print(shuffle)
#Computing a new EM.
EM <- EM_test_newnames
shuffle <- t(as.matrix(shuffle[shuffle[,1]!="0",])) #Contains the ones that have been reshuffled.
shuffle_original <- shuffle
#print("Shuffle")
while(all(shuffle[,1]=="0")==FALSE){
#We will continue to randomly allocating the changed clones until all have been allocated.
i <- sample(nrow(shuffle),1)
#print(i)
daughter <- shuffle[i,1]
row_d <- match(shuffle[i,1],rownames(EM))
row_m_old <- match(shuffle[i,2],rownames(EM))
row_m_new <- match(shuffle[i,3],rownames(EM))
# print(row_d)
# print(row_m_old)
# print(row_m_new)
# print(shuffle[i,3]%in%shuffle[,1])
if(shuffle[i,3]%in%shuffle[,1]==FALSE){
# print("Inne")
# print(as.numeric(EM_test_newnames[row_d,2:ncol(EM_test_newnames)]))
# print(as.numeric(EM_test_newnames[row_m_old,2:ncol(EM_test_newnames)]))
# print(as.numeric(EM_test_newnames[row_m_new,2:ncol(EM_test_newnames)]))
EM[row_d,2:ncol(EM)] <- (as.numeric(EM_test_newnames[row_d,2:ncol(EM_test_newnames)])-as.numeric(EM_test_newnames[row_m_old,2:ncol(EM_test_newnames)])+as.numeric(EM_test_newnames[row_m_new,2:ncol(EM_test_newnames)]))
shuffle[i,1] <- "0" #Removing it
}else{
#The mother has not been allocated to its new place yet.
}
}
# print(shuffle)
# print(EM_test_newnames)
# print(EM)
# print(space)
# print(space_new)
# print(biopsy_space)
}else{
EM <- EM_test_newnames
}
}
i <- 1
for(i in 1:ncol(EM)){
colnames(EM)[[i]] <- paste(word(colnames(EM)[[i]],-1),word(colnames(EM)[[i]],1,-2))
i <- i+1
}
i <- 2
for(i in 2:nrow(overview)){
overview[i,1]<- paste(word(overview[i,1],-1),word(overview[i,1],1,-2))
i <- i+1
}
output <- list()
output[[1]] <- EM
output[[2]] <- clonenames_new_order
output[[3]] <- overview
return(output)
}
#Creating a distribution-plot.
distribution <- function(overview){
i <- 2
empty <- 0
for(i in 2:nrow(overview)){
name <- overview[i,1]
j <- 2
for(j in 2:ncol(overview)){
biopsy <- overview[1,j]
value <- as.numeric(overview[i,j])
if(value != 0){
df_el <- t(replicate(value,c(biopsy,name)))
if(empty == 0){
df <- df_el
empty <- 1
}else{
df <- rbind(df,df_el)
}
}
j <- j+1
}
i <- i+1
}
df <- as.data.frame(df)
# Plot
p <- ggplot(df, aes(y=V2, x=V1, fill=V2,height = stat(count))) +
geom_density_ridges(alpha=0.8, stat="binline",bins=(ncol(overview)-1),scale=0.8)+theme_ridges()+
theme(
legend.position="none",
panel.spacing = unit(0.1, "lines"),
strip.text.x = element_text(size = 8),
axis.text.x = element_text(angle = 45))+
scale_fill_viridis_d(direction = -1, guide = "none")+
xlab("") +
ylab("")
p
ggsave(p,filename= "Distribution.png",width = w,height = h)
return(p)
}
#Creating pies.
make_pie <- function(clonenames_new_order, root, samples, type, custom_col){
if(root == "Normal"){# && "Normal" %in% clonenames_new_order[1,1] == FALSE){
Normal <- matrix(0,2,ncol(clonenames_new_order)) #Adding the normal cell to the clonenames_new_order matrix.
Normal[1,1] <- "Normal"
Normal[1,2] <- "100"
Normal[2,1] <- "Normal cells"
Normal[2,2] <- "100"
clonenames_new_order <- rbind(Normal,clonenames_new_order)}
Subclones <- matrix(0,2,100)
pies <- list() #Creating a list for all pie data.
pie_images <- list()
pie_empty <- matrix(0,length(samples),2) #Creating empty pies.
pie_empty[,1] <- samples
pie_empty[,2] <- "0"
i <- 1 #The following loop will extract the size of the subclone in each sample.
for(i in 1:(nrow(clonenames_new_order)/2)){ #Looping through the new subclones.
j <- 1
s <- 1
for(j in 1:ncol(clonenames_new_order)){ #Looping through the samples in which the subclone exists.
if(clonenames_new_order[(2*i-1),j] != "0"){ #We should not add all of the columns with zeros.
if(j != 1){ #We're not in the first column. The data includes the subclones within the new subclone.
Subclones[1,s] <- word(clonenames_new_order[(2*i-1),j],1) #The sample.
Subclones[2,s] <- clonenames_new_order[(2*i),j] #The size of the subclone within that sample.
s <- s+1
}else{Subclones[1,s] <- clonenames_new_order[(2*i-1),j] #We're in the first position. This is the new subclone name.
Subclones[2,s] <- clonenames_new_order[(2*i),j] #The mean size of the subclone.
s <- s+1
}
}
j <- j+1
}
Subclones <- Subclones[,Subclones[1,] != "0"] #Removing the rows with zeros.
if(Subclones[1,1]!="Normal"){
Subclones <- distinct(data.frame(t(Subclones))) #Adding the vector to a list after removing rows that are equal. This contains all of the pie data needed.
}
pies[[i]] <- Subclones #Adding the vector to a list after removing rows that are equal. This contains all of the pie data needed.
Subclones <- matrix(0,2,100) #Resetting the matrix.
i <- i+1
}
assign("pies", pies, envir=globalenv())
image_names <- matrix(0,1,(nrow(clonenames_new_order)/2)) #Creating a vector that is to be used in order to save all of the file names.
unique_biopsies <- unique(datasegment[,2]) #Unique biopsies.
if(unique_biopsies[1]=="ALL"){ #Removing the ALL.
unique_biopsies <- unique_biopsies[2:length(unique_biopsies)]}
unique_biopsies <- c(c(unique_biopsies),c("Normal","Stem"))
#This part should be looped for each matrix in "pies".
j <- 1
#Custom_colors
blue <- c("#6bb5d8","#6fb9e7","#4d8dc6","#2c6f9a","#205575")
red <- c("#ed6d70","#ea5456","#e52421","#a71916")
yellow <- c("#f6c400","#ed8606","#e55514")
#grey <- c("#b9b8b8","#9d9d9c","#706f6f","#3c3c3b") In article.
green <- c("#add3a2","#6abfa4","#497f7a","#2c574a")
brown <- c("#ca9e67","#936037")
purple <- c("#d4bae0","#c9a0dc","#ae87d0","#7851a9","#522d80","#500691","#330066")
grey <- c("#b9b8b8","#9d9d9c","#8a8a8a","#706f6f","#595858","#3c3c3b","#212121")
#Create your own color matrix.
#custom_col <- t(as.matrix(c(blue[2],red[2],yellow[2],green[2],grey[6],grey[6])))
for(j in 1:length(pies)){ #Looping though all of the matrices in pies.
Subclone <- pies[j] #Extracting the matrix j representing the data for a particular subclone that is to be presented in the shape of pie charts.
Subclone <- as.matrix(as.data.frame(Subclone[1])) #Transforming it to a data frame.
#The subclone vector.
i <- 2
for(i in 2:nrow(Subclone)){ #Looping through the samples that the subclone are present in.
if(i == 2){
y <- as.vector(rep(Subclone[i,1],2)) #Extracting the first sample name and creating a vector where it appears two times.
a <- as.vector(rep(Subclone[i,2],2)) #Extracting the TC.
a[2] <- (100 - as.numeric(a[1])) #The first position will be the TC and the other one 100-TC and hence the other slice of the pie chart.
}else{
x <- as.vector(rep(Subclone[i,1],2))
b <- as.vector(rep(Subclone[i,2],2))
b[2] <- (100 -as.numeric(b[1]))
y <- c(y,x) #Combining the sample vectors.
a <- c(a,b)} #Combining the TC values. This gives a vector with all the TC values that is to be used when dividing the pie charts.
i <- i+1
}
#print(y)
#print(a)
#Creating the pie colors for this particular biopsy.
sp <- brewer.pal(11,"Spectral")
if(type=="col"){
colors_biopsies <- as.matrix(cbind(c("B1",sp[2],"white"),c("B2",sp[10],"white"),c("B3",sp[9],"white"),c("B4",sp[5],"white"),c("B5",sp[1],"white"),c("B6",sp[11],"white"),c("B7",sp[8],"white"),c("B8",sp[3],"white"),c("B9",sp[6],"white"),c("B10",sp[4],"white"),c("B11",sp[7],"white"),c("B12","#008080","white"),c("B13","#800000","white"),c("B14","#808080","white")))
#colors_biopsies <- as.matrix(cbind(c("B1","indianred1","white"),c("B2","#619CFF","white"),c("B3","#00BA38","white"),c("B4","#00BFC4","white"),c("B5","indianred1","white"),c("B6","#5fc400","white"),c("B7","#F564E3","white"),c("B8","#000485","white")))
if(length(unique_biopsies)>ncol(colors_biopsies)){
print("There are more samples than we have colors (11). Add your own colors or visualize without colors.")
}
colors_biopsies[1,1:length(unique_biopsies)]<- unique_biopsies
}else if(type=="nocol"){
colors_biopsies <- as.matrix(cbind(c("B1","indianred1","white"),c("B2","indianred1","white"),c("B3","indianred1","white"),c("B4","indianred1","white"),c("B5","indianred1","white"),c("B6","indianred1","white"),c("B7","indianred1","white"),c("B8","indianred1","white"),c("B9","indianred1","white"),c("B10","indianred1","white"),c("B11","indianred1","white"),c("B12","indianred1","white"),c("B13","indianred1","white"),c("B14","indianred1","white"),c("B15","indianred1","white")))
colors_biopsies[1,1:length(unique_biopsies)]<- unique_biopsies
}else if(type=="custom"){
names <- t(as.matrix(c(paste( c("B"), 1:as.numeric(length(unique_biopsies)), sep=""))))
white <- t(as.matrix(c(rep("white",length(unique_biopsies)))))
colors_biopsies <- rbind(names,custom_col,white)
colors_biopsies[1,1:length(unique_biopsies)]<- unique_biopsies
}else{
print("You have not chosen a correct color mode.")
}
print(colors_biopsies)
c <- 1
for(c in 1:(length(y)/2)){
column <- match(y[c+(c-1)],colors_biopsies[1,])
if(c != 1){
color_matrix <- c(c(color_matrix),c(colors_biopsies[2:3,column]))
}else{
if(Subclone[1,1] != "Normal"){
color_matrix <- colors_biopsies[2:3,column]
}else{
color_matrix <- colors_biopsies[2:3,1]
}
}
c <- c+1
}
print(color_matrix)
test <- data.frame(Subclone = y,
Names = c(rep(c("Sample 1","Sample 2"),length(y)/2)),
TC = as.numeric(a),colour = color_matrix) #Creating a data frame with the samples in which the subclone exist, sample names, the TC for each and the colors.
test$Subclone <- factor(test$Subclone, levels = unique(Subclone[2:nrow(Subclone),1]))
x <- ggplot(test, aes(x="", y = TC, group = Names, fill = colour)) +
geom_bar(width = 10, stat = "identity")+
geom_col(position = "fill")+scale_fill_identity()+facet_grid(.~Subclone)+
coord_polar("y", start=0) +theme_void()+#theme(strip.text.x = element_text(size = 200))+
theme(strip.background = element_blank(),
strip.text.x = element_blank(),
title = element_blank(),
axis.text = element_blank(),
axis.ticks = element_blank(),
panel.grid = element_blank(), legend.position="none")+
theme(plot.margin=unit(c(0,0,0,0),units = "lines"))
if(type=="nocol" && Subclone[1,1] != "Normal"){
x <- x+theme(strip.text.x = element_text(size = 200))
}
pie_images[[j]] <- x
names(pie_images)[j] <- Subclone[1,1]
pie_images[[j]]$Subclone <- Subclone[1,1]
#Changes the element_text. Standard is 200 for all images. When you have more samples you might need to change it.
w <- 49
#w <- 10*(as.numeric(nrow(Subclone))-1)
s <- 10
ggsave(x,filename=paste(Subclone[1,1],".png",sep=""),width = w,height = s) #Testade att strunta i vidgningen av bilden.
image_names[j] <- paste(Subclone[1,1],".png",sep="")
if(Subclone[1,1]=="Stem"){
x <- ggplot(test, aes(x="", y = TC, group = Names, fill = colour)) +
geom_col(position = "fill")+facet_grid(.~Subclone)+#scale_fill_manual(values=color_matrix)+
coord_polar("y", start=0) +theme_void()+labs("Samples")+
scale_fill_identity(guide="legend",labels=c(samples),breaks = colors_biopsies[2,1:length(samples)],name="Samples")+
guides(labels = guide_legend(override.aes = list(shape = 15)))+
theme(plot.margin=unit(c(0,0,0,0),units = "lines"))
plot(x)
if(type != "nocol"){
legend <- cowplot::get_legend(x)
ggsave(legend,filename="legend.pdf",width=8,height=10,units = "cm")}
}
j <- j+1
}
pieData <- list()
pieData[[1]] <- image_names
pieData[[2]] <- pie_images
return(pieData)
}
#Adding the pies.
pie_it <- function(Tree,pieData, offset, size,col){
image_names <- pieData[[1]]
pie_images <- pieData[[2]]
p <- Tree
#labels <- rownames(EM_dev[[1]])
labels <- as.matrix(p$data$label)
labels <- as.matrix(labels[!is.na(labels)])
positions <- matrix(0,length(rownames(EM_dev[[1]])),1) #Empty matrix in which the positions are to be saved. Changed from nrow(labels).
pie <- matrix(0,length(rownames(EM_dev[[1]])),1)
#Extracting the subclone that each image belongs to.
i <- 1
s <- 1
for(i in 1:length(image_names)){ #Looping through the image names.
thesubclone <- word(image_names[i],1,sep = ".png") #Extracting the subclone that each image belongs to.
thesubclone_pos <- match(thesubclone,labels)
pie_image <- match(thesubclone,names(pie_images))
positions[s,1] <- thesubclone_pos
pie[s,1] <- pie_image
s <- s+1
i <- i+1
}
d <- data.frame(node = positions,images = c(image_names),pie_add = c(pie))
View(d)
# image_names <- pieData[[1]]
# pie_images <- pieData[[2]]
# p <- Tree
# labels <- as.matrix(p$data$label)
# labels <- as.matrix(labels[!is.na(labels)])
# positions <- matrix(0,nrow(labels),1) #Empty matrix in which the positions are to be saved.
# pie <- matrix(0,nrow(labels),1)
#
# #Extracting the subclone that each image belongs to.
# i <- 1
# s <- 1
# for(i in 1:length(image_names)){ #Looping through the image names.
# thesubclone <- word(image_names[i],1,sep = ".png") #Extracting the subclone that each image belongs to.
# thesubclone_pos <- match(thesubclone,labels)
# pie_image <- match(thesubclone,names(pie_images))
# positions[s,1] <- thesubclone_pos
# pie[s,1] <- pie_image
# s <- s+1
# i <- i+1
# }
#
# d <- data.frame(node = positions,images = c(image_names),pie_add = c(pie))
#img <- readPNG("legend.png")
if(missing(offset)==TRUE){
offset <- 1
}
if(missing(size)==TRUE){
size <- 0.21
}
#img <- readPNG("legend.png")
#img <- image_read("legend.png")
new <- p %<+% d + geom_tiplab(aes(image=images), geom="image",offset = offset, size = size)
# if(col=="yes"){
# get_png <- function(filename) {
# grid::rasterGrob(png::readPNG(filename), interpolate = TRUE)
# }
# l <- get_png("legend.png")
# new <- new+annotation_custom(l,xmin = max(p$data$x), xmax = max(p$data$x)+1.5, ymin = 0, ymax = 3)
# }else if(col == "no"){
# print("No legend.")
# }
return(new)
}
#Making a heatmap.
tree_heatmap <- function(clonenames_new_order){
i <- 1
pie_EM <- matrix(0,length(clonenames_new_order),3)
s <- 1
for(i in 1:(nrow(clonenames_new_order)/2)){
#print(i)
part <- clonenames_new_order[(2*i-1):(2*i),]
#print(part)
samples <- unique(word(part[1,],1))
samples <- samples[2:(length(samples)-1)]
j <- 1
#print(samples)
for(j in 1:length(samples)){
pie_EM[s,1] <- part[1,1]
pos <- match(samples[j],word(part[1,],1))
pie_EM[s,2] <- word(part[1,pos],1)
pie_EM[s,3] <- part[2,pos]
s <- s+1
j <- j+1
}
i <- i+1
}
pie_EM <- pie_EM[pie_EM[,1]!="0",]
pie_df <- data.frame(subclone = pie_EM[,1], sample = pie_EM[,2], size = pie_EM[,3])
#Change the data frame into a matrix of the distribution.
i <- 1
pie_EM <- matrix(0,length(unique(pie_df$subclone)),length(unique(pie_df$sample)))
rownames(pie_EM) <- unique(pie_df$subclone)
colnames(pie_EM) <- unique(pie_df$sample)
for(i in 1:nrow(pie_df)){
row <- match(pie_df[i,1],rownames(pie_EM))
col <- match(pie_df[i,2],colnames(pie_EM))
pie_EM[row,col] <- as.numeric(pie_df[i,3])
i <- i+1
}
return(pie_EM)
}
#Overview --> Segment file
#ov_input <- function(data,name,method,ploidy){}
#Transforming a heat map to an event matrix.
df <- matrix(0,length(as.matrix(data)),11)
types <- c("Tumor ID","Samples","Chr","Start","End","Med LogR","VAF (TRS)","Type","Method","Cytoband/ Gene","Clone size (%)")
colnames(df) <- types
method <- "SNP-array"
name <- x
x <- x
ploidy <- 2
df[,1] <- name
i <- 6
s <- 1
for(i in 6:ncol(data)){ #Sample
j <- 1
for(j in 1:nrow(data)){ #Alteration
if(data[j,i]!="0"){
df[s,2] <- colnames(data)[i] #Sample
df[s,3] <- data[j,1] #Chr
df[s,4] <- data[j,2] #Start
df[s,5] <- data[j,3] #End
df[s,6] <- "NA" #Med LogR
df[s,7] <- "NA" #VAF (TRS)
B_allele <- str_split(data[j,4],"")[[1]][1]
A_allele <- str_split(data[j,4],"")[[1]][3]
if(as.numeric(B_allele) == ploidy && as.numeric(A_allele) == 0){
type <- "LOH"
}else if(as.numeric(B_allele)+as.numeric(A_allele)>ploidy){
type <- "Gain"
}else{type <- "Loss"}
df[s,8] <- type #Type
df[s,9] <- method #Method
df[s,10] <- data[j,4] #Cytoband/gene
df[s,11] <- data[j,i] #Clone size.
s <- s+1
}
}
i <- i+1
}
df <- df[df[,11]!="0",]
data <- df
######################
#Files to be analyzed#
######################
setwd("~/") #Set your working directory.
data <- load_matrix(filename="Segment_example.xlsx",sheetname ="Example_tumors") #Extracting the whole segment file including all of the tumors.
x <- "Tumor"
#Rule matrix. The first object is the mother that the second one the daughter it cannot have according to
#information we have from some source.
# rule <- matrix(0,3,3)
# rule[1,1] <- "ALL"#"17p13q12 LOSS (1+0)"
# rule[1,2] <- "17p13q12 GAIN (2+1)"
# rule[1,3] <- "Yes"
# colnames(rule) <- c("Mother","Daughter","Allowed or not")
#####################################
#Generating event matrices and trees#
#####################################
datatypes <- c("All")#c(unique(test[,9])) #These are your data types such as SNP-array, TDS, WGS, WES etc. Change this vector if you do not want all of them to be included.
event_co <- 10000
root <- "Normal"
datasegment <- splitdata(data,name=x,ord=TRUE) #Extracting the beginning and end position of each sample in the segment file. #Declare which tumor you want to analyze. Specified by the first column in your data set.
#Creating the event matrix.
#DEVOLUTION(file,eventcutoff,datatypes, rule, eps,truncate,names)
EM <- DEVOLUTION(datasegment,event_co,datatypes=c("All"), eps = 0.5,names="letters") #Creating an event matrix based on the segment file chosen.
#The final event matrix
EM_dev <- subclones(EM,file_samples_subclones,root = "Normal",possible_mothers,cutoff=30,names="letters") #The first element in this list is the new event matrix. The second one is used for making pie charts.
DB <- distribution(overview_stem)
plot(DB)
ggsave(DB,filename= "Distribution.png",width = 15,height = 15)
#Visualizing the trees without pies and saving them
EM_phy <- phydatevent(EM_dev[[1]]) #Transforming the EM to phyDat format.
EM_mptree <- mp_tree(EM_phy,root) #Constructing the maximum parsimony tree.
EM_mltree <- ml_tree(EM_phy,root) #Constructing the maximum likelihood tree.
limitmp <- xlim(c(0, 30)) #Here you can determine the limits for the graph for mp. 20
limitml <- xlim(c(0, 20)) #Here you can determine the limits for the graph for ml. 1.5
type <- "nocol"
Treemp <- MP_treeplot(EM_mptree,limitmp,col = type) #Illustrating the maximum parsimony tree.
Treeml <- ML_treeplot(EM_mltree,limitml,col = type) #Illustrating the maximum likelihood tree.
ggsave(Treemp,filename="PDX3_211102_mp.pdf",width=10,height=10)
######################
#Other ways to visualize the trees.
df <- EM_mltree$tree
df <- EM_mptree
library(viridis)
p <- ggtree(df)+
geom_tiplab(align=TRUE, linetype='dashed', linesize=.3)+ #Lines between the subclone name and end node.
#geom_tiplab()+ #Unmuting this and muting the row above instead places the subclonenames close to the end node.
geom_tippoint(aes(colour=label),size=4)+ geom_tree()+
scale_color_viridis_d("label")+
theme(legend.position='none')
p
ggsave(p,filename="Tree_one_211101_mp_nolines.png",width=12,height=10)
#Add a heat map of the EM next to the tree.
q <- gheatmap(p,EM_dev[[1]], offset=0.05, width=8,
colnames_angle=45, hjust=1,low="white",high="steelblue")+theme(legend.position="none")+
scale_y_continuous(expand = expansion(mult = c(0.2,0)))
q
ggsave(q,filename="Tree_one_EM_ml.png",width=27,height=14)
#If you want a specific order for the samples.
order_samples <- c("sample1","sample2")
class(order_samples) <- "character"
pie_EM_order <- df_pie[,match(order_samples,colnames(df_pie))]
q <- gheatmap(p,pie_EM_order, offset=0.1, width=5,
colnames_angle=45, hjust=1,low="white",high="steelblue")+theme(legend.position="none")+
scale_y_continuous(expand = expansion(mult = c(0.1,0)))
q
q <- gheatmap(p,pie_EM_order, offset=4, width=4,
colnames_angle=45, hjust=1,low="white",high="steelblue")+theme(legend.position="none")+
scale_y_continuous(expand = expansion(mult = c(0.1,0)))
q
#Add a heat map of the pies next to the tree.
df_pie<- tree_heatmap(clonenames_new_order)
q <- gheatmap(p,df_pie, offset=0.1, width=5,
colnames_angle=45, hjust=1,low="white",high="steelblue")+theme(legend.position="none")+
scale_y_continuous(expand = expansion(mult = c(0.1,0)))
q
ggsave(q,filename="Tree_one_mp_neworder_211123.png",width=12,height=10)
#Saving the event matrix and the clustering annotation of each event.
#It is basically an updated version of the input segment file with the clustering of events.
write.xlsx(as.data.frame(t(EM_dev[[1]])),"DEVOLUTION.xlsx",sheetName="Event matrix")
write.xlsx(clonenames_new_order[clonenames_new_order[,1]!="0",],append = TRUE,"DEVOLUTION.xlsx",sheetName="Pies")
write.xlsx(Clustering,append = TRUE, "DEVOLUTION.xlsx",sheetName = "Clustering") #Saving the data set that has been used in order to make the EM. It includes information about the subclonal belonging.
write.xlsx(as.data.frame(t(EM)),append = TRUE,"DEVOLUTION.xlsx",sheetName="Event matrix samples")
write.xlsx(as.data.frame(EM_dev[[3]]),append = TRUE,"DEVOLUTION.xlsx",sheetName="Overview")
s <- 10
#Creating pie charts and saving the final tree.
coltype <- "col" #Choose how you want your pies. nocol = Just red pie charts with a biopsy name above. col = colored pies. custom = create your own color scheme.
samples <- as.vector(unique(datasegment[datasegment[,2]!="ALL",2])) #Or just write it.
#EM_dev[[2]][8,6] <- "30"
pieData <- make_pie(EM_dev[[2]],root,samples,type=coltype) #Creates the pie charts.
pietree <- pie_it(Treemp,pieData,offset=1,size=0.21,col=coltype) #Adds pie charts to the tree. 0.21. Used 0.17 lately.
ggsave(pietree,filename=paste(x,"_211128",".pdf",sep=""),width = s,height = s) #RMS8_SNP_tree_ml
image_names <- pieData[[1]]
pie_images <- pieData[[2]]
p <- Tree
labels <- as.matrix(Treemp$data$label)
labels <- as.matrix(labels[!is.na(labels)])
positions <- matrix(0,nrow(labels),1) #Empty matrix in which the positions are to be saved.
pie <- matrix(0,nrow(labels),1)
#Extracting the subclone that each image belongs to.
i <- 1
s <- 1
for(i in 1:length(image_names)){ #Looping through the image names.
thesubclone <- word(image_names[i],1,sep = ".png") #Extracting the subclone that each image belongs to.
thesubclone_pos <- match(thesubclone,labels)
pie_image <- match(thesubclone,names(pie_images))
positions[s,1] <- thesubclone_pos
pie[s,1] <- pie_image
s <- s+1
i <- i+1
}
d <- data.frame(node = positions[1:19],images = c(image_names),pie_add = c(pie)[1:19])
new <- Treemp %<+% d + geom_tiplab(aes(image=images), geom="image",offset = 2, size = 0.17)
s <- 10
ggsave(new,filename=paste(x,"_col_mp",".pdf",sep=""),width = s,height = s) #RMS8_SNP_tree_ml
|
c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 61600
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 61600
c
c Input Parameter (command line, file):
c input filename QBFLIB/Miller-Marin/trafficlight-controller/tlc02-uniform-depth-101.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 23359
c no.of clauses 61600
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 61600
c
c QBFLIB/Miller-Marin/trafficlight-controller/tlc02-uniform-depth-101.qdimacs 23359 61600 E1 [] 0 102 22953 61600 NONE
| /code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Experiments/Miller-Marin/trafficlight-controller/tlc02-uniform-depth-101/tlc02-uniform-depth-101.R | no_license | arey0pushpa/dcnf-autarky | R | false | false | 691 | r | c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 61600
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 61600
c
c Input Parameter (command line, file):
c input filename QBFLIB/Miller-Marin/trafficlight-controller/tlc02-uniform-depth-101.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 23359
c no.of clauses 61600
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 61600
c
c QBFLIB/Miller-Marin/trafficlight-controller/tlc02-uniform-depth-101.qdimacs 23359 61600 E1 [] 0 102 22953 61600 NONE
|
context("check function bin_probability()")
test_that("check success, trials and prob", {
expect_error(bin_probability(-1, 5, 0.5), "success cannot be less than zero")
expect_error(bin_probability(6, 5, 0.5), "success cannot be greater than trials")
expect_error(bin_probability(3, 4, -0.1), "probability values must be between 0 and 1")
})
test_that("bin_probability calculates the corresponding probability of binomial variable", {
expect_equal(bin_probability(success = 2, trials = 5, prob = 0.5), 0.3125)
expect_length(bin_probability(success = 0:2, trials = 5, prob = 0.5), 3)
expect_equal(bin_probability(success = 0:2, trials = 5, prob = 0.5), c(0.03125, 0.15625, 0.31250))
expect_equal(bin_probability(success = 55, trials = 100, prob = 0.45), 0.01075277)
})
| /tests/testthat/test-bin-probability.R | no_license | Agatemei/binomial | R | false | false | 783 | r | context("check function bin_probability()")
test_that("check success, trials and prob", {
expect_error(bin_probability(-1, 5, 0.5), "success cannot be less than zero")
expect_error(bin_probability(6, 5, 0.5), "success cannot be greater than trials")
expect_error(bin_probability(3, 4, -0.1), "probability values must be between 0 and 1")
})
test_that("bin_probability calculates the corresponding probability of binomial variable", {
expect_equal(bin_probability(success = 2, trials = 5, prob = 0.5), 0.3125)
expect_length(bin_probability(success = 0:2, trials = 5, prob = 0.5), 3)
expect_equal(bin_probability(success = 0:2, trials = 5, prob = 0.5), c(0.03125, 0.15625, 0.31250))
expect_equal(bin_probability(success = 55, trials = 100, prob = 0.45), 0.01075277)
})
|
library(data.table)
#Read in Final Attributes:
dat <- read.csv("attributes_edited.final.csv", header= TRUE)
dat$Start.Up.Date <- as.Date(dat$Start.Up.Date, "%d/%m/%Y")
dat$Start_Date <- dat$Start.Up.Date
dat$Start.Up.Date = NULL
#str(dat)
##CHANGING DATES TO READ from 1900's rather 2000s before 1970, as currently as.Date assumes 1955 is actually 2055.
#library(dplyr)
#dat$Start_Date<-(dat$Start.Up.Date %>% as.Date(format="%Y-%m-%d") %>%
# format("%y%m%d") %>%
# (function(d){
# paste0(ifelse(d>171231,"19","20"),d)
# }) %>%
# # as.Date("%Y%m%d"))
#str(dat)
#Removing original date column from dataset
#dat$Start.Up.Date = NULL
#all static attributes to attach to pump stations in work order.
dat_attributes <- data.frame(dat)
str(dat_attributes)
#JOINING WORK ORDER
#Work Order Dateset:
#Compiling Logistic regression Dataset
work_order <- read.csv("final_POISSION_workorders.csv", header=TRUE)
str(work_order)
work_order$Month <- as.Date(work_order$Month, format="%d/%m/%Y")
str(work_order)
##JOINING WORK ORDERS TO STATIC ATTRIBUTES:
test1 <- left_join(work_order, dat_attributes, by = c('FL'))
dat <- as.data.frame(test1)
str(dat)
saveRDS(dat, file = "poisson_fault_dataset.rds")
write.csv(dat, file= "poisson_fault_dataset.csv")
#######creating grid of dates to join work orders to
#DATAFRAME OF ALL MONTHS between 2006-2017
#1 is used as placeholder for day as it date functioned needed a value for day
tmp2 <- expand.grid(day = "1", month=c("01", "02", "03", "04", "05", "06", "07", "08", "09", "10", "11", "12"), year=c("2006", "2007", "2008", "2009", "2010", "2011", "2012", "2013", "2014", "2015", "2016", "2017"))
tmp2 <- paste(tmp2[,1], tmp2[,2],tmp2[,3], sep="/")
tmp2 <- as.Date(tmp2,format="%d/%m/%Y")
str(dat)
res <- NULL
for(name in unique(dat$FL)){
tmp <- subset(dat, subset= FL==name)
tmp4 <- data.frame(Month = tmp2[ tmp2 >= tmp$Start_Date[1]])
tmp3 <- merge(tmp, tmp4, all=TRUE, sort=FALSE)
tmp3 <- within(tmp3, {
FL <- unique(FL[!is.na(FL)])
Name <- unique(Name[!is.na(Name)])
Superior.FL <- unique(Superior.FL[!is.na(Superior.FL)])
Pump.station.Group <- unique(Pump.station.Group[!is.na(Pump.station.Group)])
Suburb.Name <- unique(Suburb.Name[!is.na(Suburb.Name)])
Installation.type.design <- unique(Installation.type.design[!is.na(Installation.type.design)])
Pressure.Main.Diameter..mm. <- unique(Pressure.Main.Diameter..mm.[!is.na(Pressure.Main.Diameter..mm.)])
Pressure.Main.Material <- unique(Pressure.Main.Material[!is.na(Pressure.Main.Material)])
Pump.Brand <- unique(Pump.Brand[!is.na(Pump.Brand)])
Pump_Type<- unique(Pump_Type[!is.na(Pump_Type)])
Motor_Make.y<- unique(Motor_Make.y[!is.na(Motor_Make.y)])
Motor_Rating_kw<- unique(Motor_Rating_kw[!is.na(Motor_Rating_kw)])
No..of.Motor.Poles<- unique(No..of.Motor.Poles[!is.na(No..of.Motor.Poles)])
Pump.Impeller.TYPE<- unique(Pump.Impeller.TYPE[!is.na(Pump.Impeller.TYPE)])
Pump_Impeller.Model<- unique(Pump_Impeller.Model[!is.na(Pump_Impeller.Model)])
Impeller_Throughlet<- unique(Impeller_Throughlet[!is.na(Impeller_Throughlet)])
Impeller_Diameter<- unique(Impeller_Diameter[!is.na(Impeller_Diameter)])
Design.Duty.FLOWRATE<- unique(Design.Duty.FLOWRATE[!is.na(Design.Duty.FLOWRATE)])
Design_Duty_MHW_HEAD<- unique(Design_Duty_MHW_HEAD[!is.na(Design_Duty_MHW_HEAD)])
Design.Duty.RPM.pumpspeed<- unique(Design.Duty.RPM.pumpspeed[!is.na(Design.Duty.RPM.pumpspeed)])
Start_Date<- unique(Start_Date[!is.na(Start_Date)])
start.up.YEAR<- unique(start.up.YEAR[!is.na(start.up.YEAR)])
Motor_Rating_kw_LEVELS<- unique(Motor_Rating_kw_LEVELS[!is.na(Motor_Rating_kw_LEVELS)])
start_up_decade<- unique(start_up_decade[!is.na(start_up_decade)])
Failure_total_cohort<- unique(Failure_total_cohort[!is.na(Failure_total_cohort)])
})
res <- rbind(res, tmp3)
}
### #Replace zeros with NA's
res[c("Count.of.Obstruction.Event.Occurred")][is.na(res[c("Count.of.Obstruction.Event.Occurred")])] <- 0
### tmp3[c("Cost..")][is.na(tmp3[c("Cost..")])] <- 0
work_order_expanded <- data.frame(res)
str(work_order_expanded)
work_order_expanded$X12_year_total = NULL
saveRDS(work_order_expanded, file = "complete_poisson_dataset.rds")
write.csv(work_order_expanded, file= "complete_poisson_dataset.csv")
dat <-read.csv("complete_poisson_dataset.csv")
str(dat)
ggplot(dat, aes(as.numeric(dat$Count.of.Obstruction.Event.Occurred))) + geom_histogram(bins=50) +theme_minimal()+xlab("Count of total Monthly Faults")+scale_x_continuous(breaks=c(0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24))
| /Data_Cleaning.R | no_license | ellenmarinko/hon_thesis | R | false | false | 4,707 | r | library(data.table)
#Read in Final Attributes:
dat <- read.csv("attributes_edited.final.csv", header= TRUE)
dat$Start.Up.Date <- as.Date(dat$Start.Up.Date, "%d/%m/%Y")
dat$Start_Date <- dat$Start.Up.Date
dat$Start.Up.Date = NULL
#str(dat)
##CHANGING DATES TO READ from 1900's rather 2000s before 1970, as currently as.Date assumes 1955 is actually 2055.
#library(dplyr)
#dat$Start_Date<-(dat$Start.Up.Date %>% as.Date(format="%Y-%m-%d") %>%
# format("%y%m%d") %>%
# (function(d){
# paste0(ifelse(d>171231,"19","20"),d)
# }) %>%
# # as.Date("%Y%m%d"))
#str(dat)
#Removing original date column from dataset
#dat$Start.Up.Date = NULL
#all static attributes to attach to pump stations in work order.
dat_attributes <- data.frame(dat)
str(dat_attributes)
#JOINING WORK ORDER
#Work Order Dateset:
#Compiling Logistic regression Dataset
work_order <- read.csv("final_POISSION_workorders.csv", header=TRUE)
str(work_order)
work_order$Month <- as.Date(work_order$Month, format="%d/%m/%Y")
str(work_order)
##JOINING WORK ORDERS TO STATIC ATTRIBUTES:
test1 <- left_join(work_order, dat_attributes, by = c('FL'))
dat <- as.data.frame(test1)
str(dat)
saveRDS(dat, file = "poisson_fault_dataset.rds")
write.csv(dat, file= "poisson_fault_dataset.csv")
#######creating grid of dates to join work orders to
#DATAFRAME OF ALL MONTHS between 2006-2017
#1 is used as placeholder for day as it date functioned needed a value for day
tmp2 <- expand.grid(day = "1", month=c("01", "02", "03", "04", "05", "06", "07", "08", "09", "10", "11", "12"), year=c("2006", "2007", "2008", "2009", "2010", "2011", "2012", "2013", "2014", "2015", "2016", "2017"))
tmp2 <- paste(tmp2[,1], tmp2[,2],tmp2[,3], sep="/")
tmp2 <- as.Date(tmp2,format="%d/%m/%Y")
str(dat)
res <- NULL
for(name in unique(dat$FL)){
tmp <- subset(dat, subset= FL==name)
tmp4 <- data.frame(Month = tmp2[ tmp2 >= tmp$Start_Date[1]])
tmp3 <- merge(tmp, tmp4, all=TRUE, sort=FALSE)
tmp3 <- within(tmp3, {
FL <- unique(FL[!is.na(FL)])
Name <- unique(Name[!is.na(Name)])
Superior.FL <- unique(Superior.FL[!is.na(Superior.FL)])
Pump.station.Group <- unique(Pump.station.Group[!is.na(Pump.station.Group)])
Suburb.Name <- unique(Suburb.Name[!is.na(Suburb.Name)])
Installation.type.design <- unique(Installation.type.design[!is.na(Installation.type.design)])
Pressure.Main.Diameter..mm. <- unique(Pressure.Main.Diameter..mm.[!is.na(Pressure.Main.Diameter..mm.)])
Pressure.Main.Material <- unique(Pressure.Main.Material[!is.na(Pressure.Main.Material)])
Pump.Brand <- unique(Pump.Brand[!is.na(Pump.Brand)])
Pump_Type<- unique(Pump_Type[!is.na(Pump_Type)])
Motor_Make.y<- unique(Motor_Make.y[!is.na(Motor_Make.y)])
Motor_Rating_kw<- unique(Motor_Rating_kw[!is.na(Motor_Rating_kw)])
No..of.Motor.Poles<- unique(No..of.Motor.Poles[!is.na(No..of.Motor.Poles)])
Pump.Impeller.TYPE<- unique(Pump.Impeller.TYPE[!is.na(Pump.Impeller.TYPE)])
Pump_Impeller.Model<- unique(Pump_Impeller.Model[!is.na(Pump_Impeller.Model)])
Impeller_Throughlet<- unique(Impeller_Throughlet[!is.na(Impeller_Throughlet)])
Impeller_Diameter<- unique(Impeller_Diameter[!is.na(Impeller_Diameter)])
Design.Duty.FLOWRATE<- unique(Design.Duty.FLOWRATE[!is.na(Design.Duty.FLOWRATE)])
Design_Duty_MHW_HEAD<- unique(Design_Duty_MHW_HEAD[!is.na(Design_Duty_MHW_HEAD)])
Design.Duty.RPM.pumpspeed<- unique(Design.Duty.RPM.pumpspeed[!is.na(Design.Duty.RPM.pumpspeed)])
Start_Date<- unique(Start_Date[!is.na(Start_Date)])
start.up.YEAR<- unique(start.up.YEAR[!is.na(start.up.YEAR)])
Motor_Rating_kw_LEVELS<- unique(Motor_Rating_kw_LEVELS[!is.na(Motor_Rating_kw_LEVELS)])
start_up_decade<- unique(start_up_decade[!is.na(start_up_decade)])
Failure_total_cohort<- unique(Failure_total_cohort[!is.na(Failure_total_cohort)])
})
res <- rbind(res, tmp3)
}
### #Replace zeros with NA's
res[c("Count.of.Obstruction.Event.Occurred")][is.na(res[c("Count.of.Obstruction.Event.Occurred")])] <- 0
### tmp3[c("Cost..")][is.na(tmp3[c("Cost..")])] <- 0
work_order_expanded <- data.frame(res)
str(work_order_expanded)
work_order_expanded$X12_year_total = NULL
saveRDS(work_order_expanded, file = "complete_poisson_dataset.rds")
write.csv(work_order_expanded, file= "complete_poisson_dataset.csv")
dat <-read.csv("complete_poisson_dataset.csv")
str(dat)
ggplot(dat, aes(as.numeric(dat$Count.of.Obstruction.Event.Occurred))) + geom_histogram(bins=50) +theme_minimal()+xlab("Count of total Monthly Faults")+scale_x_continuous(breaks=c(0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/imTools.R
\name{summary.impinf}
\alias{summary.impinf}
\title{Summaries of \code{impinf} object}
\usage{
\method{summary}{impinf}(object, ...)
}
\arguments{
\item{object}{an object for which a summary is needed.}
\item{...}{additional arguments affecting the summary produced.}
}
\description{
Summarizing outputs produced from \code{update.impinf}.
}
| /man/summary.Rd | no_license | cran/imPois | R | false | true | 432 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/imTools.R
\name{summary.impinf}
\alias{summary.impinf}
\title{Summaries of \code{impinf} object}
\usage{
\method{summary}{impinf}(object, ...)
}
\arguments{
\item{object}{an object for which a summary is needed.}
\item{...}{additional arguments affecting the summary produced.}
}
\description{
Summarizing outputs produced from \code{update.impinf}.
}
|
writeAnswer <- function(v, outputFileUNC) {
outputFile <- file(outputFileUNC)
v.trim <- sub("^\\s+", "", v)
writeLines(c("ArticleId,Labels", sprintf("%d,%s", (1:length(v.trim))+64857, v.trim)), outputFile) # 64858= First test id
close(outputFile)
}
| /writeAnswer.R | no_license | TBKelley/KAGGLE-WISE2014 | R | false | false | 266 | r | writeAnswer <- function(v, outputFileUNC) {
outputFile <- file(outputFileUNC)
v.trim <- sub("^\\s+", "", v)
writeLines(c("ArticleId,Labels", sprintf("%d,%s", (1:length(v.trim))+64857, v.trim)), outputFile) # 64858= First test id
close(outputFile)
}
|
library(phyclust)
### Name: paml.baseml
### Title: Phylogenetic Analysis by Maximum Likelihood for Nucleotide
### Sequences
### Aliases: paml.baseml paml.baseml.control paml.baseml.show.default
### 'Class baseml'
### Keywords: PAML
### ** Examples
## Not run:
##D library(phyclust, quiet = TRUE)
##D
##D paml.baseml.show.default()
##D
##D ### Generate data.
##D set.seed(123)
##D ret.ms <- ms(nsam = 5, nreps = 1, opts = "-T")
##D ret.seqgen <- seqgen(opts = "-mHKY -l40 -s0.2", newick.tree = ret.ms[3])
##D (ret.nucleotide <- read.seqgen(ret.seqgen))
##D X <- ret.nucleotide$org
##D seqname <- ret.nucleotide$seqname
##D
##D ### Run baseml.
##D opts <- paml.baseml.control(model = 4, clock = 1)
##D (ret.baseml <- paml.baseml(X, seqname = seqname, opts = opts))
##D (ret.baseml.init <- paml.baseml(X, seqname = seqname, opts = opts,
##D newick.trees = ret.ms[3]))
##D ret.ms[3]
##D
##D ### Unrooted tree.
##D opts <- paml.baseml.control(model = 4)
##D (ret.baseml.unrooted <- paml.baseml(X, seqname = seqname, opts = opts))
##D
##D ### More information.
##D opts <- paml.baseml.control(noisy = 3, verbose = 1, model = 4, clock = 1)
##D ret.more <- paml.baseml(X, seqname = seqname, opts = opts)
##D # ret.more$stdout
##D
##D ### Plot trees
##D par(mfrow = c(2, 2))
##D plot(read.tree(text = ret.ms[3]), main = "true")
##D plot(read.tree(text = ret.baseml$best.tree), main = "baseml")
##D plot(read.tree(text = ret.baseml.init$best.tree), main = "baseml with initial")
##D plot(unroot(read.tree(text = ret.baseml.unrooted$best.tree)),
##D main = "baseml unrooted")
## End(Not run)
| /data/genthat_extracted_code/phyclust/examples/paml.baseml.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 1,607 | r | library(phyclust)
### Name: paml.baseml
### Title: Phylogenetic Analysis by Maximum Likelihood for Nucleotide
### Sequences
### Aliases: paml.baseml paml.baseml.control paml.baseml.show.default
### 'Class baseml'
### Keywords: PAML
### ** Examples
## Not run:
##D library(phyclust, quiet = TRUE)
##D
##D paml.baseml.show.default()
##D
##D ### Generate data.
##D set.seed(123)
##D ret.ms <- ms(nsam = 5, nreps = 1, opts = "-T")
##D ret.seqgen <- seqgen(opts = "-mHKY -l40 -s0.2", newick.tree = ret.ms[3])
##D (ret.nucleotide <- read.seqgen(ret.seqgen))
##D X <- ret.nucleotide$org
##D seqname <- ret.nucleotide$seqname
##D
##D ### Run baseml.
##D opts <- paml.baseml.control(model = 4, clock = 1)
##D (ret.baseml <- paml.baseml(X, seqname = seqname, opts = opts))
##D (ret.baseml.init <- paml.baseml(X, seqname = seqname, opts = opts,
##D newick.trees = ret.ms[3]))
##D ret.ms[3]
##D
##D ### Unrooted tree.
##D opts <- paml.baseml.control(model = 4)
##D (ret.baseml.unrooted <- paml.baseml(X, seqname = seqname, opts = opts))
##D
##D ### More information.
##D opts <- paml.baseml.control(noisy = 3, verbose = 1, model = 4, clock = 1)
##D ret.more <- paml.baseml(X, seqname = seqname, opts = opts)
##D # ret.more$stdout
##D
##D ### Plot trees
##D par(mfrow = c(2, 2))
##D plot(read.tree(text = ret.ms[3]), main = "true")
##D plot(read.tree(text = ret.baseml$best.tree), main = "baseml")
##D plot(read.tree(text = ret.baseml.init$best.tree), main = "baseml with initial")
##D plot(unroot(read.tree(text = ret.baseml.unrooted$best.tree)),
##D main = "baseml unrooted")
## End(Not run)
|
####### gPLS
# library(mixOmics)
library(sgPLS)
#---------------------------- DATA ---------------------------------#
# Load data
setwd('/Users/raphaelsinclair/Desktop/MSc Health Data Analytics - IC/HDA/SPH030 - Translational Data Sciences/Project/Data')
X <- read.csv('Xdata_simulated.csv')
Y <- read.csv('Ydata_simulated.csv')
# X <- scale(X)
# Y <- scale(Y)
X.s <- scale(as.matrix(X))
Y.s <- scale(as.matrix(Y))
#-------------------- MANUAL gPLS (penalising groups of vaiables) --------------------#
gPLSreg <- function(X, Y, n_components, keepX_group, keepY_group = NULL, ind.block.x, ind.block.y = NULL, tol = 1e-06, max_iter = 100) {
# X = Input data with predictors as columns and observations/samples as rows. This is coerced into a matrix
# Y = Output data with outcomes as columns and observations/samples as rows. This is coerced into a matrix
# n_components = The number of components considered for the PLS regression algorithm
# keepX_group = A vector of length n_components which enforeces sparsity on X. The hth entry corresponds to how many groups to keep for the hth component.
# keepY_group = A vector of length n_components which enforeces sparsity on Y. The hth entry corresponds to how many groups to keep for the hth component. Default is NULL (i.e. no sparsity on Y).
# ind.block.x = A vector of column indices denoting the split points (inclusive) of variables for each group in X.
# ind.block.y = A vector of column indices denoting the split points (inclusive) of variables for each group in Y.
# tol = The tolerance set for the condition of convergence in the iterative step. Default is 10^-6.
# max_iter = The maximum number of iterations for the iterative process to run. Default is 100 iterations.
# OPTIONAL
# trunc_adjust = Boolean to enable adjustment for truncation error. TRUE gives a harder threshold within the soft-thresholding function. FALSE gives the original soft-thresholding function. Default is TRUE.
#==================== Initial checks ====================#
# Coerce data into matrices to store original data
X <- as.matrix(X)
Y <- as.matrix(Y)
# Check data
if (length(dim(X)) != 2) {
stop('Check dimensions of X')
}
if (length(dim(Y)) > 2) {
stop('Check dimensions of Y')
}
if (nrow(X) != nrow(Y)) {
stop('Number of observations in X and Y do not match')
}
if (n_components > min(nrow(X), ncol(X))) {
stop('Exceeded maximum number of components')
}
if (length(keepX_group) != n_components) {
stop('Length of keepX does not match number of components')
}
if (!is.null(keepY_group) && length(keepY_group) != n_components) {
stop('Length of keepY does not match number of components')
}
if (sum(keepX_group > (length(ind.block.x) + 1)) != 0) {
stop('keepX_group exceeds the number of groups in X')
}
if (sum(keepY_group > (length(ind.block.y) + 1)) != 0) {
stop('keepY_group exceeds the number of groups in Y')
}
#==================== Initiate items ====================#
# Carry out algorithm on X_h, Y_h matices
X_h <- as.matrix(X)
Y_h <- as.matrix(Y)
# Dimensions of data
n <- nrow(X)
p <- ncol(X)
q <- ncol(Y)
k = length(ind.block.x) + 1
l = length(ind.block.y) + 1
# Column/row names
if (is.null(rownames(X))) {
if (is.null(rownames(Y))) {
row_names <- c(1:n)
} else {
row_names <- rownames(Y)
}
} else {
row_names <- rownames(X)
}
if (is.null(colnames(X))) {
x.col_names <- c(paste0('X', seq(p)))
} else {
x.col_names <- colnames(X)
}
if (is.null(colnames(Y))) {
y.col_names <- c(paste0('Y', seq(q)))
} else {
y.col_names <- colnames(Y)
}
# Create items to store results and assign row/column names
Eta <- matrix(NA, nrow = n, ncol = n_components, dimnames = list(row_names, paste0('comp ', seq(n_components)))) # X scores
Omega <- matrix(NA, nrow = n, ncol = n_components, dimnames = list(row_names, paste0('comp ', seq(n_components)))) # Y scores
U <- matrix(NA, nrow = p, ncol = n_components, dimnames = list(x.col_names, paste0('comp ', seq(n_components)))) # X loadings
V <- matrix(NA, nrow = q, ncol = n_components, dimnames = list(y.col_names, paste0('comp ', seq(n_components)))) # Y loadings
C <- matrix(NA, nrow = p, ncol = n_components, dimnames = list(x.col_names, paste0('comp ', seq(n_components)))) # Regression coefficient on latent variables (for X_h)
D <- matrix(NA, nrow = q, ncol = n_components, dimnames = list(y.col_names, paste0('comp ', seq(n_components)))) # Regression coefficient on latent variables in 'Regression mode' (for Y_h)
# E <- matrix(NA, nrow = q, ncol = n_components, dimnames = list(y.col_names, paste0('comp ', seq(n_components)))) # Regression coefficient on latent variables in 'PLS-mode A' (for Y_h)
iter.comp <- matrix(NA, nrow = 1, ncol = n_components, dimnames = list(NULL, paste0('comp ', seq(n_components)))) # Stores iterations for each component
# Number of groups to penalise
x_sparsity <- rep(k, n_components) - keepX_group
if (is.null(ind.block.y)) {
y_sparsity <- rep(0, n_components)
} else {
y_sparsity <- rep(l, n_components) - keepY_group
}
#==================== Create blocks of X and Y data ====================#
# Calculate group indices from ind.block.x/ind.block.y (can more intuitively get group information from this vector
# instead of ind.block.x/ind.block.y)
ind.x <- c(0,ind.block.x,ncol(X_h))
ind.y <- c(0,ind.block.y,ncol(Y_h))
# p_k/q_l holds the number of variables in each group for X and Y respectively
# x.blocks/y.blocks holds indices for the groups of variables in X and Y respectively
x.blocks <- list()
p_k <- NULL
for (index in 1:k) {
p_k[index] = ind.x[index + 1] - ind.x[index]
x.blocks[[index]] = c((ind.x[index] + 1):ind.x[index + 1])
}
y.blocks <- list()
q_l <- NULL
for (index in 1:l) {
q_l[index] = ind.y[index + 1] - ind.y[index]
y.blocks[[index]] = ((ind.y[index] + 1):ind.y[index + 1])
}
# # Set penalty tolerance (i.e. heavyside function). 'trunc_adjust' = TRUE gives tolerance
# if (trunc_adjust == TRUE) {
# pen_tol <- .Machine$double.eps ^ 0.5
# } else {
# pen_tol <- 0
# }
pen_tol <- 0
#==================== Loop over components ====================#
# Initiate first component and loop over defined number of components
for (h in (1:n_components)) {
# Compute matrix M (p x q matrix)
M <- t(X_h) %*% Y_h
#==================== Tune component ====================#
# WARNING: Truncation errors may occur from long format numbers in calculations. These can be carried over during iterative steps.
# NOTE: Unlike sPLS, we penalise the number of groups involved in producing the component. A similar procedure occurs
# when calculating the penalties although the group penalties are dependent on the size of the group it belongs to
# so this is calculated for each block structure.
# Find SVD of M and find loadings from first pair of singular vectors
M_decomp <- svd(M, nu = 1, nv = 1)
u_old <- M_decomp$u
v_old <- M_decomp$v
# Initiate iterables
counter <- 0
u_diff <- dim(X_h)[2]
v_diff <- dim(X_h)[2]
# Loop until convergence of u and v or max iteration
while ((sum(abs(u_diff) > tol) != 0 || sum(abs(v_diff) > tol) != 0) && counter < max_iter + 1) {
# Calculate the projection of v on M to produce the X loadings candidate
M_v <- M %*% v_old
# Calculate group lasso penalties using the entries correspeonding to each group from the projection vector
# and add to x.penalty vector
x.penalties <- NULL
for (group in 1:k) {
vec <- M_v[(x.blocks[[group]])]
x.penalties <- c(x.penalties, 2*sqrt(sum(vec^2))/sqrt(p_k[group]))
}
# Convert number of penalised groups in X into sparsity parameter based on group lasso penalties
if (x_sparsity[h] == 0) {
lambda_x <- 0
} else {
lambda_x <- sort(x.penalties)[x_sparsity[h]]
}
# Optimise u iteratively for each group (and normalise)
tmp <- NULL
for (group in 1:k) {
vec <- M_v[(x.blocks[[group]])]
pen <- 1 - (lambda_x/x.penalties[group])
if (pen < pen_tol) {pen <- 0}
tmp <- c(tmp, pen*vec)
}
u_new = tmp / sqrt(sum(tmp^2))
# Calculate the projection of u on M to produce the Y loadings candidate
M_u <- t(M) %*% u_new
# Calculate group lasso penalties using the entries correspeonding to each group from the projection vector
# and add to y.penalty vector
y.penalties <- NULL
for (group in 1:l) {
vec <- M_u[(y.blocks[[group]])]
y.penalties <- c(y.penalties, 2*sqrt(sum(vec^2))/sqrt(q_l[group]))
}
# Convert number of penalised groups in Y into sparsity parameter based on group lasso penalties
if (y_sparsity[h] == 0) {
lambda_y <- 0
} else {
lambda_y <- sort(y.penalties)[y_sparsity[h]]
}
# Optimise v iteratively for each group (and normalise)
tmp <- NULL
for (group in 1:l) {
vec <- M_u[(y.blocks[[group]])]
pen <- 1 - (lambda_y/y.penalties[group])
if (pen < pen_tol) {pen <- 0}
tmp <- c(tmp, pen*vec)
}
v_new = tmp / sqrt(sum(tmp^2))
# Update iterables
u_diff = u_new - u_old
v_diff = v_new - v_old
u_old = u_new
v_old = v_new
counter = counter + 1
}
# Check convergence
if (counter == max_iter + 1) {
warning(paste0('Warning! Max iteration reached. No convergence for component ', h))
}
# Add number of iterations to vector
iter.comp[, h] <- counter
#==================== Deflation step ====================#
# Calculate scores/latent variables for X and Y
eta = as.vector(X_h %*% u_new) / sum(u_new^2)
omega = as.vector(Y_h %*% v_new) / sum(v_new^2)
# Calculate regression coefficients
c = as.vector(t(X_h) %*% eta) / sum(eta^2)
d = as.vector(t(Y_h) %*% eta) / sum(eta^2)
# e = as.vector(t(Y_h) %*% omega) / sum(omega^2)
# Deflate X and Y matrices using latent variables and regression coefficients
X_h <- X_h - (eta %*% t(c))
Y_h <- Y_h - (eta %*% t(d))
# Store variables
Eta[, h] <- eta
Omega[, h] <- omega
U[, h] <- u_new
V[, h] <- v_new
C[, h] <- c
D[, h] <- d
# E[, h] <- e
}
#==================== Form predictions using results ====================#
# Create function for prediction
# ??????
#========================================================================#
# Return final outputs
cl = match.call()
x.block = list(ind = x.blocks, size = p_k)
y.block = list(ind = y.blocks, size = q_l)
result <- list(call = cl, n_components = n_components, keepX_group = keepX_group, keepY_group = keepY_group,
ind.block.x = ind.block.x, ind.block.y = ind.block.y,
data = list(X = X, Y = Y), blocks = list(x.block = x.block, y.block = y.block),
scores = list(X.scores = Eta, Y.scores = Omega),
loadings = list(X.loadings = U, Y.loadings = V), defl.coefs = list(C = C, D = D), iterations = iter.comp,
names = list(sample = row_names, X.columns = x.col_names, Y.columns = y.col_names), tol = tol)
return(invisible(result))
}
#=================================================#
n_components <- 2
# Define ind.block.x/ind.block.y (i.e. vector of indices denoting the end of each group inclusive
# e.g. ind.block.x = c(6, 16) <==> 3 groups s.t. group 1 = 1-6, group 2 = 7-16, group 3 = 17-ncol(X))
ind.block.x <- seq(20, 380, 20)
ind.block.y <- seq(20, 480, 20)
# Select keepX_group/keepY_group variables (i.e. number of groups to keep in each component)
# keepY_group = rep(l, n_components)
keepX_group = c(4, 4)
keepY_group = c(4, 4)
#### gPLS model
t0 <- Sys.time()
test <- gPLSreg(X.s,Y.s,n_components = n_components, keepX_group = keepX_group, keepY_group = keepY_group, ind.block.x = ind.block.x, ind.block.y = ind.block.y, trunc_adjust = TRUE)
t1 <- Sys.time()
print(t1 - t0)
t2 <- Sys.time()
model.gPLS <- gPLS(X, Y, ncomp = n_components, mode = "regression", keepX = keepX_group,
keepY = keepY_group, ind.block.x = ind.block.x , ind.block.y = ind.block.y, scale = TRUE)
t3 <- Sys.time()
print(t3 - t2)
test2 <- gPLSreg(X.s,Y.s,n_components = n_components, keepX_group = keepX_group, keepY_group = keepY_group, ind.block.x = ind.block.x, ind.block.y = ind.block.y, trunc_adjust = FALSE)
| /Code/R/gPLS.R | no_license | puczilka/PLS | R | false | false | 12,849 | r | ####### gPLS
# library(mixOmics)
library(sgPLS)
#---------------------------- DATA ---------------------------------#
# Load data
setwd('/Users/raphaelsinclair/Desktop/MSc Health Data Analytics - IC/HDA/SPH030 - Translational Data Sciences/Project/Data')
X <- read.csv('Xdata_simulated.csv')
Y <- read.csv('Ydata_simulated.csv')
# X <- scale(X)
# Y <- scale(Y)
X.s <- scale(as.matrix(X))
Y.s <- scale(as.matrix(Y))
#-------------------- MANUAL gPLS (penalising groups of vaiables) --------------------#
gPLSreg <- function(X, Y, n_components, keepX_group, keepY_group = NULL, ind.block.x, ind.block.y = NULL, tol = 1e-06, max_iter = 100) {
# X = Input data with predictors as columns and observations/samples as rows. This is coerced into a matrix
# Y = Output data with outcomes as columns and observations/samples as rows. This is coerced into a matrix
# n_components = The number of components considered for the PLS regression algorithm
# keepX_group = A vector of length n_components which enforeces sparsity on X. The hth entry corresponds to how many groups to keep for the hth component.
# keepY_group = A vector of length n_components which enforeces sparsity on Y. The hth entry corresponds to how many groups to keep for the hth component. Default is NULL (i.e. no sparsity on Y).
# ind.block.x = A vector of column indices denoting the split points (inclusive) of variables for each group in X.
# ind.block.y = A vector of column indices denoting the split points (inclusive) of variables for each group in Y.
# tol = The tolerance set for the condition of convergence in the iterative step. Default is 10^-6.
# max_iter = The maximum number of iterations for the iterative process to run. Default is 100 iterations.
# OPTIONAL
# trunc_adjust = Boolean to enable adjustment for truncation error. TRUE gives a harder threshold within the soft-thresholding function. FALSE gives the original soft-thresholding function. Default is TRUE.
#==================== Initial checks ====================#
# Coerce data into matrices to store original data
X <- as.matrix(X)
Y <- as.matrix(Y)
# Check data
if (length(dim(X)) != 2) {
stop('Check dimensions of X')
}
if (length(dim(Y)) > 2) {
stop('Check dimensions of Y')
}
if (nrow(X) != nrow(Y)) {
stop('Number of observations in X and Y do not match')
}
if (n_components > min(nrow(X), ncol(X))) {
stop('Exceeded maximum number of components')
}
if (length(keepX_group) != n_components) {
stop('Length of keepX does not match number of components')
}
if (!is.null(keepY_group) && length(keepY_group) != n_components) {
stop('Length of keepY does not match number of components')
}
if (sum(keepX_group > (length(ind.block.x) + 1)) != 0) {
stop('keepX_group exceeds the number of groups in X')
}
if (sum(keepY_group > (length(ind.block.y) + 1)) != 0) {
stop('keepY_group exceeds the number of groups in Y')
}
#==================== Initiate items ====================#
# Carry out algorithm on X_h, Y_h matices
X_h <- as.matrix(X)
Y_h <- as.matrix(Y)
# Dimensions of data
n <- nrow(X)
p <- ncol(X)
q <- ncol(Y)
k = length(ind.block.x) + 1
l = length(ind.block.y) + 1
# Column/row names
if (is.null(rownames(X))) {
if (is.null(rownames(Y))) {
row_names <- c(1:n)
} else {
row_names <- rownames(Y)
}
} else {
row_names <- rownames(X)
}
if (is.null(colnames(X))) {
x.col_names <- c(paste0('X', seq(p)))
} else {
x.col_names <- colnames(X)
}
if (is.null(colnames(Y))) {
y.col_names <- c(paste0('Y', seq(q)))
} else {
y.col_names <- colnames(Y)
}
# Create items to store results and assign row/column names
Eta <- matrix(NA, nrow = n, ncol = n_components, dimnames = list(row_names, paste0('comp ', seq(n_components)))) # X scores
Omega <- matrix(NA, nrow = n, ncol = n_components, dimnames = list(row_names, paste0('comp ', seq(n_components)))) # Y scores
U <- matrix(NA, nrow = p, ncol = n_components, dimnames = list(x.col_names, paste0('comp ', seq(n_components)))) # X loadings
V <- matrix(NA, nrow = q, ncol = n_components, dimnames = list(y.col_names, paste0('comp ', seq(n_components)))) # Y loadings
C <- matrix(NA, nrow = p, ncol = n_components, dimnames = list(x.col_names, paste0('comp ', seq(n_components)))) # Regression coefficient on latent variables (for X_h)
D <- matrix(NA, nrow = q, ncol = n_components, dimnames = list(y.col_names, paste0('comp ', seq(n_components)))) # Regression coefficient on latent variables in 'Regression mode' (for Y_h)
# E <- matrix(NA, nrow = q, ncol = n_components, dimnames = list(y.col_names, paste0('comp ', seq(n_components)))) # Regression coefficient on latent variables in 'PLS-mode A' (for Y_h)
iter.comp <- matrix(NA, nrow = 1, ncol = n_components, dimnames = list(NULL, paste0('comp ', seq(n_components)))) # Stores iterations for each component
# Number of groups to penalise
x_sparsity <- rep(k, n_components) - keepX_group
if (is.null(ind.block.y)) {
y_sparsity <- rep(0, n_components)
} else {
y_sparsity <- rep(l, n_components) - keepY_group
}
#==================== Create blocks of X and Y data ====================#
# Calculate group indices from ind.block.x/ind.block.y (can more intuitively get group information from this vector
# instead of ind.block.x/ind.block.y)
ind.x <- c(0,ind.block.x,ncol(X_h))
ind.y <- c(0,ind.block.y,ncol(Y_h))
# p_k/q_l holds the number of variables in each group for X and Y respectively
# x.blocks/y.blocks holds indices for the groups of variables in X and Y respectively
x.blocks <- list()
p_k <- NULL
for (index in 1:k) {
p_k[index] = ind.x[index + 1] - ind.x[index]
x.blocks[[index]] = c((ind.x[index] + 1):ind.x[index + 1])
}
y.blocks <- list()
q_l <- NULL
for (index in 1:l) {
q_l[index] = ind.y[index + 1] - ind.y[index]
y.blocks[[index]] = ((ind.y[index] + 1):ind.y[index + 1])
}
# # Set penalty tolerance (i.e. heavyside function). 'trunc_adjust' = TRUE gives tolerance
# if (trunc_adjust == TRUE) {
# pen_tol <- .Machine$double.eps ^ 0.5
# } else {
# pen_tol <- 0
# }
pen_tol <- 0
#==================== Loop over components ====================#
# Initiate first component and loop over defined number of components
for (h in (1:n_components)) {
# Compute matrix M (p x q matrix)
M <- t(X_h) %*% Y_h
#==================== Tune component ====================#
# WARNING: Truncation errors may occur from long format numbers in calculations. These can be carried over during iterative steps.
# NOTE: Unlike sPLS, we penalise the number of groups involved in producing the component. A similar procedure occurs
# when calculating the penalties although the group penalties are dependent on the size of the group it belongs to
# so this is calculated for each block structure.
# Find SVD of M and find loadings from first pair of singular vectors
M_decomp <- svd(M, nu = 1, nv = 1)
u_old <- M_decomp$u
v_old <- M_decomp$v
# Initiate iterables
counter <- 0
u_diff <- dim(X_h)[2]
v_diff <- dim(X_h)[2]
# Loop until convergence of u and v or max iteration
while ((sum(abs(u_diff) > tol) != 0 || sum(abs(v_diff) > tol) != 0) && counter < max_iter + 1) {
# Calculate the projection of v on M to produce the X loadings candidate
M_v <- M %*% v_old
# Calculate group lasso penalties using the entries correspeonding to each group from the projection vector
# and add to x.penalty vector
x.penalties <- NULL
for (group in 1:k) {
vec <- M_v[(x.blocks[[group]])]
x.penalties <- c(x.penalties, 2*sqrt(sum(vec^2))/sqrt(p_k[group]))
}
# Convert number of penalised groups in X into sparsity parameter based on group lasso penalties
if (x_sparsity[h] == 0) {
lambda_x <- 0
} else {
lambda_x <- sort(x.penalties)[x_sparsity[h]]
}
# Optimise u iteratively for each group (and normalise)
tmp <- NULL
for (group in 1:k) {
vec <- M_v[(x.blocks[[group]])]
pen <- 1 - (lambda_x/x.penalties[group])
if (pen < pen_tol) {pen <- 0}
tmp <- c(tmp, pen*vec)
}
u_new = tmp / sqrt(sum(tmp^2))
# Calculate the projection of u on M to produce the Y loadings candidate
M_u <- t(M) %*% u_new
# Calculate group lasso penalties using the entries correspeonding to each group from the projection vector
# and add to y.penalty vector
y.penalties <- NULL
for (group in 1:l) {
vec <- M_u[(y.blocks[[group]])]
y.penalties <- c(y.penalties, 2*sqrt(sum(vec^2))/sqrt(q_l[group]))
}
# Convert number of penalised groups in Y into sparsity parameter based on group lasso penalties
if (y_sparsity[h] == 0) {
lambda_y <- 0
} else {
lambda_y <- sort(y.penalties)[y_sparsity[h]]
}
# Optimise v iteratively for each group (and normalise)
tmp <- NULL
for (group in 1:l) {
vec <- M_u[(y.blocks[[group]])]
pen <- 1 - (lambda_y/y.penalties[group])
if (pen < pen_tol) {pen <- 0}
tmp <- c(tmp, pen*vec)
}
v_new = tmp / sqrt(sum(tmp^2))
# Update iterables
u_diff = u_new - u_old
v_diff = v_new - v_old
u_old = u_new
v_old = v_new
counter = counter + 1
}
# Check convergence
if (counter == max_iter + 1) {
warning(paste0('Warning! Max iteration reached. No convergence for component ', h))
}
# Add number of iterations to vector
iter.comp[, h] <- counter
#==================== Deflation step ====================#
# Calculate scores/latent variables for X and Y
eta = as.vector(X_h %*% u_new) / sum(u_new^2)
omega = as.vector(Y_h %*% v_new) / sum(v_new^2)
# Calculate regression coefficients
c = as.vector(t(X_h) %*% eta) / sum(eta^2)
d = as.vector(t(Y_h) %*% eta) / sum(eta^2)
# e = as.vector(t(Y_h) %*% omega) / sum(omega^2)
# Deflate X and Y matrices using latent variables and regression coefficients
X_h <- X_h - (eta %*% t(c))
Y_h <- Y_h - (eta %*% t(d))
# Store variables
Eta[, h] <- eta
Omega[, h] <- omega
U[, h] <- u_new
V[, h] <- v_new
C[, h] <- c
D[, h] <- d
# E[, h] <- e
}
#==================== Form predictions using results ====================#
# Create function for prediction
# ??????
#========================================================================#
# Return final outputs
cl = match.call()
x.block = list(ind = x.blocks, size = p_k)
y.block = list(ind = y.blocks, size = q_l)
result <- list(call = cl, n_components = n_components, keepX_group = keepX_group, keepY_group = keepY_group,
ind.block.x = ind.block.x, ind.block.y = ind.block.y,
data = list(X = X, Y = Y), blocks = list(x.block = x.block, y.block = y.block),
scores = list(X.scores = Eta, Y.scores = Omega),
loadings = list(X.loadings = U, Y.loadings = V), defl.coefs = list(C = C, D = D), iterations = iter.comp,
names = list(sample = row_names, X.columns = x.col_names, Y.columns = y.col_names), tol = tol)
return(invisible(result))
}
#=================================================#
n_components <- 2
# Define ind.block.x/ind.block.y (i.e. vector of indices denoting the end of each group inclusive
# e.g. ind.block.x = c(6, 16) <==> 3 groups s.t. group 1 = 1-6, group 2 = 7-16, group 3 = 17-ncol(X))
ind.block.x <- seq(20, 380, 20)
ind.block.y <- seq(20, 480, 20)
# Select keepX_group/keepY_group variables (i.e. number of groups to keep in each component)
# keepY_group = rep(l, n_components)
keepX_group = c(4, 4)
keepY_group = c(4, 4)
#### gPLS model
t0 <- Sys.time()
test <- gPLSreg(X.s,Y.s,n_components = n_components, keepX_group = keepX_group, keepY_group = keepY_group, ind.block.x = ind.block.x, ind.block.y = ind.block.y, trunc_adjust = TRUE)
t1 <- Sys.time()
print(t1 - t0)
t2 <- Sys.time()
model.gPLS <- gPLS(X, Y, ncomp = n_components, mode = "regression", keepX = keepX_group,
keepY = keepY_group, ind.block.x = ind.block.x , ind.block.y = ind.block.y, scale = TRUE)
t3 <- Sys.time()
print(t3 - t2)
test2 <- gPLSreg(X.s,Y.s,n_components = n_components, keepX_group = keepX_group, keepY_group = keepY_group, ind.block.x = ind.block.x, ind.block.y = ind.block.y, trunc_adjust = FALSE)
|
# Exercise 1: creating data frames
# Create a vector of the number of points the Seahawks scored in the first 4 games
# of the season (google "Seahawks" for the scores!)
points <- c(26, 21, 42, 30)
# Create a vector of the number of points the Seahwaks have allowed to be scored
# against them in each of the first 4 games of the season
allowed_points <- c(24, 12, 7, 24)
# Combine your two vectors into a dataframe called `games`
games <- data.frame(points, allowed_points)
# Create a new column "diff" that is the difference in points between the teams
# Hint: recall the syntax for assigning new elements (which in this case will be
# a vector) to a list!
games$diff <- games$point - games$allowed_points
# Create a new column "won" which is TRUE if the Seahawks won the game
games$won <- games%point > games%allowed_points
# Create a vector of the opponent names corresponding to the games played
opponent_names <- c("Cardinals", "Cowboys", "Rams", "Jaguars")
# Assign your dataframe rownames of their opponents
rownames(games) <- opponent_names
# View your data frame to see how it has changed!
View(games)
| /exercise-1/exercise.R | permissive | ericngg/ch9-data-frames | R | false | false | 1,120 | r | # Exercise 1: creating data frames
# Create a vector of the number of points the Seahawks scored in the first 4 games
# of the season (google "Seahawks" for the scores!)
points <- c(26, 21, 42, 30)
# Create a vector of the number of points the Seahwaks have allowed to be scored
# against them in each of the first 4 games of the season
allowed_points <- c(24, 12, 7, 24)
# Combine your two vectors into a dataframe called `games`
games <- data.frame(points, allowed_points)
# Create a new column "diff" that is the difference in points between the teams
# Hint: recall the syntax for assigning new elements (which in this case will be
# a vector) to a list!
games$diff <- games$point - games$allowed_points
# Create a new column "won" which is TRUE if the Seahawks won the game
games$won <- games%point > games%allowed_points
# Create a vector of the opponent names corresponding to the games played
opponent_names <- c("Cardinals", "Cowboys", "Rams", "Jaguars")
# Assign your dataframe rownames of their opponents
rownames(games) <- opponent_names
# View your data frame to see how it has changed!
View(games)
|
#:# libraries
library(digest)
library(mlr)
library(OpenML)
library(farff)
#:# config
set.seed(1)
#:# data
dataset <- getOMLDataSet(data.name = "heart-statlog")
head(dataset$data)
#:# preprocessing
head(dataset$data)
#:# model
task = makeClassifTask(id = "task", data = dataset$data, target = "class")
lrn = makeLearner("classif.glmnet", par.vals = list(type.logistic = "modified.Newton", type.multinomial = "ungrouped"), predict.type = "prob")
#:# hash
#:# e7651eb232f0a1fa777e21896bb08fd4
hash <- digest(list(task, lrn))
hash
#:# audit
cv <- makeResampleDesc("CV", iters = 5)
r <- mlr::resample(lrn, task, cv, measures = list(acc, auc, tnr, tpr, ppv, f1))
ACC <- r$aggr
ACC
#:# session info
sink(paste0("sessionInfo.txt"))
sessionInfo()
sink()
| /models/openml_heart-statlog/classification_class/e7651eb232f0a1fa777e21896bb08fd4/code.R | no_license | pysiakk/CaseStudies2019S | R | false | false | 752 | r | #:# libraries
library(digest)
library(mlr)
library(OpenML)
library(farff)
#:# config
set.seed(1)
#:# data
dataset <- getOMLDataSet(data.name = "heart-statlog")
head(dataset$data)
#:# preprocessing
head(dataset$data)
#:# model
task = makeClassifTask(id = "task", data = dataset$data, target = "class")
lrn = makeLearner("classif.glmnet", par.vals = list(type.logistic = "modified.Newton", type.multinomial = "ungrouped"), predict.type = "prob")
#:# hash
#:# e7651eb232f0a1fa777e21896bb08fd4
hash <- digest(list(task, lrn))
hash
#:# audit
cv <- makeResampleDesc("CV", iters = 5)
r <- mlr::resample(lrn, task, cv, measures = list(acc, auc, tnr, tpr, ppv, f1))
ACC <- r$aggr
ACC
#:# session info
sink(paste0("sessionInfo.txt"))
sessionInfo()
sink()
|
library(shiny)
library(ggplot2)
data("mtcars")
function(input,output) {
output$Scatter <- renderPlot({
ss <- mtcars[,c(input$VarX,input$VarY,input$Color) ]
ss[,3] <- as.factor(ss[,3])
ggplot(data = ss, aes(x=ss[,1],y=ss[,2],color=ss[,3]))+
geom_point()+labs(color=input$Color)
})
} | /R Programming/Statistics with R/1. R Basics/Interactive Graphics with Shiny/Scatter Plot (ggplot)/server.R | no_license | SaurabhRuikar/CdacRepo | R | false | false | 322 | r | library(shiny)
library(ggplot2)
data("mtcars")
function(input,output) {
output$Scatter <- renderPlot({
ss <- mtcars[,c(input$VarX,input$VarY,input$Color) ]
ss[,3] <- as.factor(ss[,3])
ggplot(data = ss, aes(x=ss[,1],y=ss[,2],color=ss[,3]))+
geom_point()+labs(color=input$Color)
})
} |
## Import data and subsetting
myData <- fread("household_power_consumption.txt",header = TRUE, na.strings="?", colClasses = "character")[Date == "1/2/2007" | Date =="2/2/2007"]
## add a column "date + time"
myData[,DateTime:=as.POSIXct(paste(myData$Date, myData$Time), format="%d/%m/%Y %H:%M:%S")]
## create the png file
png(filename="Plot4.PNG",width = 480, height = 480)
par(mfrow = c(2,2))
## create the plot
## plot 1 top left
plot(myData$DateTime,as.numeric(myData$Global_active_power),type = "n",xlab = "", ylab ="Global Active Power")
lines(myData$DateTime,as.numeric(myData$Global_active_power),type = "l")
## plot 2 top right
plot(myData$DateTime,as.numeric(myData$Voltage),type = "n",xlab = "datetime", ylab ="Voltage")
lines(myData$DateTime,as.numeric(myData$Voltage),type = "l")
## plot 3 bottom left
plot(myData$DateTime, as.numeric(myData$Sub_metering_1) ,xlab = "",ylab ="Energy sub metering",type = "l")
lines(myData$DateTime,as.numeric(myData$Sub_metering_2),type = "l", col = "red")
lines(myData$DateTime,as.numeric(myData$Sub_metering_3),type = "l", col = "blue")
legend("topright", legend = c("Sub_metering_1", "Sub_metering_2","Sub_metering_3"), col=c("black","red","blue"),pch = "-",cex = 0.9, lwd = 2)
## plot 4 bottom right
plot(myData$DateTime,as.numeric(myData$Global_reactive_power),type = "n",xlab = "datetime", ylab ="Voltage")
lines(myData$DateTime,as.numeric(myData$Global_reactive_power),type = "l")
dev.off()
| /Plot4.R | no_license | mgranconato/ExData_Plotting1 | R | false | false | 1,454 | r | ## Import data and subsetting
myData <- fread("household_power_consumption.txt",header = TRUE, na.strings="?", colClasses = "character")[Date == "1/2/2007" | Date =="2/2/2007"]
## add a column "date + time"
myData[,DateTime:=as.POSIXct(paste(myData$Date, myData$Time), format="%d/%m/%Y %H:%M:%S")]
## create the png file
png(filename="Plot4.PNG",width = 480, height = 480)
par(mfrow = c(2,2))
## create the plot
## plot 1 top left
plot(myData$DateTime,as.numeric(myData$Global_active_power),type = "n",xlab = "", ylab ="Global Active Power")
lines(myData$DateTime,as.numeric(myData$Global_active_power),type = "l")
## plot 2 top right
plot(myData$DateTime,as.numeric(myData$Voltage),type = "n",xlab = "datetime", ylab ="Voltage")
lines(myData$DateTime,as.numeric(myData$Voltage),type = "l")
## plot 3 bottom left
plot(myData$DateTime, as.numeric(myData$Sub_metering_1) ,xlab = "",ylab ="Energy sub metering",type = "l")
lines(myData$DateTime,as.numeric(myData$Sub_metering_2),type = "l", col = "red")
lines(myData$DateTime,as.numeric(myData$Sub_metering_3),type = "l", col = "blue")
legend("topright", legend = c("Sub_metering_1", "Sub_metering_2","Sub_metering_3"), col=c("black","red","blue"),pch = "-",cex = 0.9, lwd = 2)
## plot 4 bottom right
plot(myData$DateTime,as.numeric(myData$Global_reactive_power),type = "n",xlab = "datetime", ylab ="Voltage")
lines(myData$DateTime,as.numeric(myData$Global_reactive_power),type = "l")
dev.off()
|
vec1 =c(1,2,3,4,5)
vec1
vec2=1:100
vec2
vec3=c(vec1,0,vec2)
vec3
vec1[vec1>2]
vec1
vector("numeric",5)
vector
s=c("aa","bb","cc","dd","ee")
l=c(F,T,T,T,T)
s[l]
V=c("marry","sue")
names(V)=c("first","second")
V[c("second","first")]
V
x=c("first"=3,"second"=0)
x
x=c("lalit")
x
substr(x,3,5)
vec1
vec1=NULL
VEC1
x =c(1,5,3,4,2)
x
sort(x)
order(x)
x[order(x)]
v=1:100
v=seq(1,100,10)
v
y=rep(x,3)
y
rep(x,)
x=matrix(1:12,nrow=4,byrow=T)
x
x[2,2]
x
x[,c(1,2)]
x
dim(x)
x
x[-2,-3]
colnames(x)=c('sub1','sub2','sub3')
x
rownames(x)=paste('R',1:4,sep='')
y=c(8,6,4,1)
cbind(x,y)
z=c(8,9,1)
rbind(x,z)
z=c(x)
z
zt=t(x)
zt
vec1
| /vectors.R | no_license | lsahni82/lalitr | R | false | false | 629 | r | vec1 =c(1,2,3,4,5)
vec1
vec2=1:100
vec2
vec3=c(vec1,0,vec2)
vec3
vec1[vec1>2]
vec1
vector("numeric",5)
vector
s=c("aa","bb","cc","dd","ee")
l=c(F,T,T,T,T)
s[l]
V=c("marry","sue")
names(V)=c("first","second")
V[c("second","first")]
V
x=c("first"=3,"second"=0)
x
x=c("lalit")
x
substr(x,3,5)
vec1
vec1=NULL
VEC1
x =c(1,5,3,4,2)
x
sort(x)
order(x)
x[order(x)]
v=1:100
v=seq(1,100,10)
v
y=rep(x,3)
y
rep(x,)
x=matrix(1:12,nrow=4,byrow=T)
x
x[2,2]
x
x[,c(1,2)]
x
dim(x)
x
x[-2,-3]
colnames(x)=c('sub1','sub2','sub3')
x
rownames(x)=paste('R',1:4,sep='')
y=c(8,6,4,1)
cbind(x,y)
z=c(8,9,1)
rbind(x,z)
z=c(x)
z
zt=t(x)
zt
vec1
|
#' Linear combinations of submatrices of an array
#'
#' Computes a matrix of expected values based on an array X of predictors and a
#' vector beta of regression coefficients.
#'
#'
#' @usage Xbeta(X, beta)
#' @param X an n by n by p array
#' @param beta a p by 1 vector
#' @return An n by n matrix
#' @author Peter Hoff
#' @export Xbeta
Xbeta <-
function(X,beta)
{
XB<-matrix(0,nrow=dim(X)[1],ncol=dim(X)[2] )
for(k in seq(1,length(beta),length=length(beta))){XB<-XB + beta[k]*X[,,k]}
XB
}
| /R/Xbeta.R | no_license | cran/amen | R | false | false | 500 | r | #' Linear combinations of submatrices of an array
#'
#' Computes a matrix of expected values based on an array X of predictors and a
#' vector beta of regression coefficients.
#'
#'
#' @usage Xbeta(X, beta)
#' @param X an n by n by p array
#' @param beta a p by 1 vector
#' @return An n by n matrix
#' @author Peter Hoff
#' @export Xbeta
Xbeta <-
function(X,beta)
{
XB<-matrix(0,nrow=dim(X)[1],ncol=dim(X)[2] )
for(k in seq(1,length(beta),length=length(beta))){XB<-XB + beta[k]*X[,,k]}
XB
}
|
\name{explain.covariance}
\alias{explain.covariance}
\title{Covariance Function Explained}
\description{Step by step demonstration of the covariance calculus.}
\usage{
explain.covariance(x,y)
}
\arguments{
\item{x}{Should be a vector}
\item{y}{Should be a vector}
}
\details{To calculate the covariance, the user should give two vectors of numbers. The result is the explained process to calculate the covariance, with the data of the datasets provided like argument. We can saw the harmonic mean formule in the covariance_ help document.}
\value{Numeric result and the process of this calculus explained.}
\author{Jose Manuel Gomez Caceres, \email{josemanuel.gomezc@edu.uah.es}
\cr{Juan Jose Cuadrado, \email{jjcg@uah.es}}
\cr{Universidad de Alcala de Henares}
}
\note{A vector is created by c(), like c(1,2,3,4,5) creates a vector with the numbers: 1,2,3,4,5 }
%\seealso{}
\examples{
#data creation
data <- c(10,4,5,7,3,4,1)
data2 <- c(1,8,3,4,4,5,7)
explain.covariance(data, data2)
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~covaiance }% use one of RShowDoc("KEYWORDS")
\keyword{ ~covarianza }% __ONLY ONE__ keyword per line
\keyword{ ~explain }
\keyword{ ~explicada}
| /man/explain.covariance.Rd | no_license | cran/LearningRlab | R | false | false | 1,314 | rd | \name{explain.covariance}
\alias{explain.covariance}
\title{Covariance Function Explained}
\description{Step by step demonstration of the covariance calculus.}
\usage{
explain.covariance(x,y)
}
\arguments{
\item{x}{Should be a vector}
\item{y}{Should be a vector}
}
\details{To calculate the covariance, the user should give two vectors of numbers. The result is the explained process to calculate the covariance, with the data of the datasets provided like argument. We can saw the harmonic mean formule in the covariance_ help document.}
\value{Numeric result and the process of this calculus explained.}
\author{Jose Manuel Gomez Caceres, \email{josemanuel.gomezc@edu.uah.es}
\cr{Juan Jose Cuadrado, \email{jjcg@uah.es}}
\cr{Universidad de Alcala de Henares}
}
\note{A vector is created by c(), like c(1,2,3,4,5) creates a vector with the numbers: 1,2,3,4,5 }
%\seealso{}
\examples{
#data creation
data <- c(10,4,5,7,3,4,1)
data2 <- c(1,8,3,4,4,5,7)
explain.covariance(data, data2)
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~covaiance }% use one of RShowDoc("KEYWORDS")
\keyword{ ~covarianza }% __ONLY ONE__ keyword per line
\keyword{ ~explain }
\keyword{ ~explicada}
|
library(ggplot2)
spotify_df <- read.csv("./spotify_files/data.csv")
spotify_df <- spotify_df[,c(-2,-4,-8,-9,-11,-14,-15,-17)]
names(spotify_df)
ggplot(data = spotify_df) +
geom_histogram(aes(x = valence)) +
labs(x = "Valence (positivity)")
ggplot(data = spotify_df) +
geom_histogram(aes(x = tempo)) +
labs(x = "tempo")
ggplot(data = spotify_df) +
geom_histogram(aes(x = speechiness)) +
labs(x = "speechiness")
ggplot(data = spotify_df) +
geom_histogram(aes(x = acousticness), bins=35) +
labs(x = "acousticness")
ggplot(data = spotify_df) +
geom_histogram(aes(x = danceability)) +
labs(x = "danceability")
ggplot(data = spotify_df) +
geom_histogram(aes(x = energy), bins=35) +
labs(x = "energy")
ggplot(data = spotify_df) +
geom_histogram(aes(x = instrumentalness)) +
labs(x = "instrumentalness")
ggplot(data = spotify_df) +
geom_histogram(aes(x = liveness)) +
labs(x = "liveness")
ggplot(data = spotify_df) +
geom_histogram(aes(x = loudness)) +
labs(x = "loudness")
ggplot(data = spotify_df) +
geom_histogram(aes(x = tempo)) +
labs(x = "tempo")
| /explore_spotify.R | no_license | yjuw/spotify_purple8 | R | false | false | 1,139 | r | library(ggplot2)
spotify_df <- read.csv("./spotify_files/data.csv")
spotify_df <- spotify_df[,c(-2,-4,-8,-9,-11,-14,-15,-17)]
names(spotify_df)
ggplot(data = spotify_df) +
geom_histogram(aes(x = valence)) +
labs(x = "Valence (positivity)")
ggplot(data = spotify_df) +
geom_histogram(aes(x = tempo)) +
labs(x = "tempo")
ggplot(data = spotify_df) +
geom_histogram(aes(x = speechiness)) +
labs(x = "speechiness")
ggplot(data = spotify_df) +
geom_histogram(aes(x = acousticness), bins=35) +
labs(x = "acousticness")
ggplot(data = spotify_df) +
geom_histogram(aes(x = danceability)) +
labs(x = "danceability")
ggplot(data = spotify_df) +
geom_histogram(aes(x = energy), bins=35) +
labs(x = "energy")
ggplot(data = spotify_df) +
geom_histogram(aes(x = instrumentalness)) +
labs(x = "instrumentalness")
ggplot(data = spotify_df) +
geom_histogram(aes(x = liveness)) +
labs(x = "liveness")
ggplot(data = spotify_df) +
geom_histogram(aes(x = loudness)) +
labs(x = "loudness")
ggplot(data = spotify_df) +
geom_histogram(aes(x = tempo)) +
labs(x = "tempo")
|
#' Get CDN path semantic dependencies
#'
#' Internal function that returns path string from `shiny.custom.semantic.cdn` options.
#'
#' @examples
#' ## Load shiny.semantic dependencies from local domain.
#' options("shiny.custom.semantic.cdn" = "shiny.semantic")
#'
#' @return CDN path of semantic dependencies
get_cdn_path <- function() {
getOption("shiny.custom.semantic.cdn", default = "https://d335w9rbwpvuxm.cloudfront.net/2.8.3")
}
#' Add dashboard dependencies to html
#'
#' Internal function that adds dashboard dependencies to html.
#'
#' @param theme define theme
#'
#' @return Content with appended dependencies.
get_dependencies <- function(theme = NULL) {
minfield <- if (getOption("shiny.minified", TRUE)) "min" else NULL
javascript_file <- paste(c("semantic", minfield, "js"), collapse = ".")
css_files <- c(check_semantic_theme(theme, full_url = FALSE))
dep_src <- NULL
if (!is.null(getOption("shiny.custom.semantic", NULL))) {
dep_src <- c(file = getOption("shiny.custom.semantic"))
} else if (isTRUE(getOption("shiny.semantic.local", FALSE))) {
if (!is.null(theme)) {
warning("It's not posible use local semantic version with themes. Using CDN")
} else {
dep_src <- c(
file = system.file(
"www",
"shared",
"semantic",
package = "shiny.semantic"
)
)
}
}
if (is.null(dep_src)) {
dep_src <- c(href = get_cdn_path())
}
shiny::tagList(
htmltools::htmlDependency("semantic-ui",
"2.8.3",
dep_src,
script = javascript_file,
stylesheet = css_files
)
)
}
#' Get default semantic css
#'
#' @param full_url define return output filename or full path. Default TRUE
#'
#' @return path to default css semantic file or default filename
get_default_semantic_theme <- function(full_url = TRUE) {
minfield <- if (getOption("shiny.minified", TRUE)) "min" else NULL
css_file <- paste(c("semantic", minfield, "css"), collapse = ".")
path <- file.path(get_cdn_path(), css_file, fsep = "/")
return(c(ifelse(full_url, path, css_file)))
}
#' Semantic theme path validator
#'
#' @param theme_css it can be either NULL, character with css path, or theme name
#' @param full_url boolean flag that defines what is returned, either filename, or full path. Default TRUE
#'
#' @return path to theme or filename
#' @export
#'
#' @examples
#' check_semantic_theme(NULL)
#' check_semantic_theme("darkly")
#' check_semantic_theme("darkly", full_url = FALSE)
check_semantic_theme <- function(theme_css, full_url = TRUE) {
minfield <- if (getOption("shiny.minified", TRUE)) "min" else NULL
if (is.null(theme_css)) return(get_default_semantic_theme(full_url))
if (tools::file_ext(theme_css) == "css") return(theme_css)
if (theme_css %in% SUPPORTED_THEMES) {
if (full_url)
return(
file.path(
get_cdn_path(),
paste(c("semantic", theme_css, minfield, "css"), collapse = "."),
fsep = "/"
)
)
else
return(paste(c("semantic", theme_css, minfield, "css"), collapse = "."))
} else {
warning(paste("Theme ", theme_css, "not recognized. Default used instead!"))
return(get_default_semantic_theme(full_url))
}
}
#' Semantic UI page
#'
#' This creates a Semantic page for use in a Shiny app.
#'
#' Inside, it uses two crucial options:
#'
#' (1) \code{shiny.minified} with a logical value, tells whether it should attach min or full
#' semnatic css or js (TRUE by default).
#' (2) \code{shiny.custom.semantic} if this option has not NULL character \code{semanticPage}
#' takes dependencies from custom css and js files specified in this path
#' (NULL by default). Depending on \code{shiny.minified} value the folder should contain
#' either "min" or standard version. The folder should contain: \code{semantic.css} and
#' \code{semantic.js} files, or \code{semantic.min.css} and \code{semantic.min.js}
#' in \code{shiny.minified = TRUE} mode.
#'
#' @param ... Other arguments to be added as attributes of the main div tag
#' wrapper (e.g. style, class etc.)
#' @param title A title to display in the browser's title bar.
#' @param theme Theme name or path. Full list of supported themes you will find in
#' \code{SUPPORTED_THEMES} or at http://semantic-ui-forest.com/themes.
#' @param suppress_bootstrap boolean flag that supresses bootstrap when turned on
#' @param margin character with body margin size
#' @examples
#' ## Only run examples in interactive R sessions
#' if (interactive()) {
#' library(shiny)
#' library(shiny.semantic)
#'
#' ui <- semanticPage(
#' title = "Hello Shiny Semantic!",
#' tags$label("Number of observations:"),
#' slider_input("obs", value = 500, min = 0, max = 1000),
#' segment(
#' plotOutput("dist_plot")
#' )
#' )
#'
#' server <- function(input, output) {
#' output$dist_plot <- renderPlot({
#' hist(rnorm(input$obs))
#' })
#' }
#'
#' shinyApp(ui, server)
#' }
#'
#' @export
semanticPage <- function(..., title = "", theme = NULL, suppress_bootstrap = TRUE,
margin = "10px") {
if (suppress_bootstrap) {
suppress_bootstrap <- suppressDependencies("bootstrap")
}
else {
suppress_bootstrap <- NULL
}
shiny::tagList(
shiny::tags$head(
get_dependencies(theme),
shiny::tags$title(title),
shiny::tags$meta(name = "viewport", content = "width=device-width, initial-scale=1.0"),
shiny::tags$link(rel = "stylesheet", type = "text/css",
href = "shiny.semantic/shiny-semantic-DT.css"),
shiny::tags$script(src = "shiny.semantic/shiny-semantic-modal.js"),
shiny::tags$script(src = "shiny.semantic/shiny-semantic-dropdown.js"),
shiny::tags$script(src = "shiny.semantic/shiny-semantic-button.js"),
shiny::tags$script(src = "shiny.semantic/shiny-semantic-slider.js"),
shiny::tags$script(src = "shiny.semantic/shiny-semantic-calendar.js"),
shiny::tags$script(src = "shiny.semantic/shiny-semantic-fileinput.js"),
shiny::tags$script(src = "shiny.semantic/shiny-semantic-numericinput.js"),
shiny::tags$script(src = "shiny.semantic/shiny-semantic-rating.js"),
shiny::tags$script(src = "shiny.semantic/shiny-semantic-tabset.js"),
shiny::tags$script(src = "shiny.semantic/shiny-semantic-progress.js"),
shiny::tags$script(src = "shiny.semantic/shiny-semantic-toast.js")
),
shiny::tags$body(style = glue::glue("margin:{margin};"),
suppress_bootstrap,
...)
)
}
| /R/semanticPage.R | permissive | pepecebrian/shiny.semantic | R | false | false | 6,619 | r | #' Get CDN path semantic dependencies
#'
#' Internal function that returns path string from `shiny.custom.semantic.cdn` options.
#'
#' @examples
#' ## Load shiny.semantic dependencies from local domain.
#' options("shiny.custom.semantic.cdn" = "shiny.semantic")
#'
#' @return CDN path of semantic dependencies
get_cdn_path <- function() {
getOption("shiny.custom.semantic.cdn", default = "https://d335w9rbwpvuxm.cloudfront.net/2.8.3")
}
#' Add dashboard dependencies to html
#'
#' Internal function that adds dashboard dependencies to html.
#'
#' @param theme define theme
#'
#' @return Content with appended dependencies.
get_dependencies <- function(theme = NULL) {
minfield <- if (getOption("shiny.minified", TRUE)) "min" else NULL
javascript_file <- paste(c("semantic", minfield, "js"), collapse = ".")
css_files <- c(check_semantic_theme(theme, full_url = FALSE))
dep_src <- NULL
if (!is.null(getOption("shiny.custom.semantic", NULL))) {
dep_src <- c(file = getOption("shiny.custom.semantic"))
} else if (isTRUE(getOption("shiny.semantic.local", FALSE))) {
if (!is.null(theme)) {
warning("It's not posible use local semantic version with themes. Using CDN")
} else {
dep_src <- c(
file = system.file(
"www",
"shared",
"semantic",
package = "shiny.semantic"
)
)
}
}
if (is.null(dep_src)) {
dep_src <- c(href = get_cdn_path())
}
shiny::tagList(
htmltools::htmlDependency("semantic-ui",
"2.8.3",
dep_src,
script = javascript_file,
stylesheet = css_files
)
)
}
#' Get default semantic css
#'
#' @param full_url define return output filename or full path. Default TRUE
#'
#' @return path to default css semantic file or default filename
get_default_semantic_theme <- function(full_url = TRUE) {
minfield <- if (getOption("shiny.minified", TRUE)) "min" else NULL
css_file <- paste(c("semantic", minfield, "css"), collapse = ".")
path <- file.path(get_cdn_path(), css_file, fsep = "/")
return(c(ifelse(full_url, path, css_file)))
}
#' Semantic theme path validator
#'
#' @param theme_css it can be either NULL, character with css path, or theme name
#' @param full_url boolean flag that defines what is returned, either filename, or full path. Default TRUE
#'
#' @return path to theme or filename
#' @export
#'
#' @examples
#' check_semantic_theme(NULL)
#' check_semantic_theme("darkly")
#' check_semantic_theme("darkly", full_url = FALSE)
check_semantic_theme <- function(theme_css, full_url = TRUE) {
minfield <- if (getOption("shiny.minified", TRUE)) "min" else NULL
if (is.null(theme_css)) return(get_default_semantic_theme(full_url))
if (tools::file_ext(theme_css) == "css") return(theme_css)
if (theme_css %in% SUPPORTED_THEMES) {
if (full_url)
return(
file.path(
get_cdn_path(),
paste(c("semantic", theme_css, minfield, "css"), collapse = "."),
fsep = "/"
)
)
else
return(paste(c("semantic", theme_css, minfield, "css"), collapse = "."))
} else {
warning(paste("Theme ", theme_css, "not recognized. Default used instead!"))
return(get_default_semantic_theme(full_url))
}
}
#' Semantic UI page
#'
#' This creates a Semantic page for use in a Shiny app.
#'
#' Inside, it uses two crucial options:
#'
#' (1) \code{shiny.minified} with a logical value, tells whether it should attach min or full
#' semnatic css or js (TRUE by default).
#' (2) \code{shiny.custom.semantic} if this option has not NULL character \code{semanticPage}
#' takes dependencies from custom css and js files specified in this path
#' (NULL by default). Depending on \code{shiny.minified} value the folder should contain
#' either "min" or standard version. The folder should contain: \code{semantic.css} and
#' \code{semantic.js} files, or \code{semantic.min.css} and \code{semantic.min.js}
#' in \code{shiny.minified = TRUE} mode.
#'
#' @param ... Other arguments to be added as attributes of the main div tag
#' wrapper (e.g. style, class etc.)
#' @param title A title to display in the browser's title bar.
#' @param theme Theme name or path. Full list of supported themes you will find in
#' \code{SUPPORTED_THEMES} or at http://semantic-ui-forest.com/themes.
#' @param suppress_bootstrap boolean flag that supresses bootstrap when turned on
#' @param margin character with body margin size
#' @examples
#' ## Only run examples in interactive R sessions
#' if (interactive()) {
#' library(shiny)
#' library(shiny.semantic)
#'
#' ui <- semanticPage(
#' title = "Hello Shiny Semantic!",
#' tags$label("Number of observations:"),
#' slider_input("obs", value = 500, min = 0, max = 1000),
#' segment(
#' plotOutput("dist_plot")
#' )
#' )
#'
#' server <- function(input, output) {
#' output$dist_plot <- renderPlot({
#' hist(rnorm(input$obs))
#' })
#' }
#'
#' shinyApp(ui, server)
#' }
#'
#' @export
semanticPage <- function(..., title = "", theme = NULL, suppress_bootstrap = TRUE,
margin = "10px") {
if (suppress_bootstrap) {
suppress_bootstrap <- suppressDependencies("bootstrap")
}
else {
suppress_bootstrap <- NULL
}
shiny::tagList(
shiny::tags$head(
get_dependencies(theme),
shiny::tags$title(title),
shiny::tags$meta(name = "viewport", content = "width=device-width, initial-scale=1.0"),
shiny::tags$link(rel = "stylesheet", type = "text/css",
href = "shiny.semantic/shiny-semantic-DT.css"),
shiny::tags$script(src = "shiny.semantic/shiny-semantic-modal.js"),
shiny::tags$script(src = "shiny.semantic/shiny-semantic-dropdown.js"),
shiny::tags$script(src = "shiny.semantic/shiny-semantic-button.js"),
shiny::tags$script(src = "shiny.semantic/shiny-semantic-slider.js"),
shiny::tags$script(src = "shiny.semantic/shiny-semantic-calendar.js"),
shiny::tags$script(src = "shiny.semantic/shiny-semantic-fileinput.js"),
shiny::tags$script(src = "shiny.semantic/shiny-semantic-numericinput.js"),
shiny::tags$script(src = "shiny.semantic/shiny-semantic-rating.js"),
shiny::tags$script(src = "shiny.semantic/shiny-semantic-tabset.js"),
shiny::tags$script(src = "shiny.semantic/shiny-semantic-progress.js"),
shiny::tags$script(src = "shiny.semantic/shiny-semantic-toast.js")
),
shiny::tags$body(style = glue::glue("margin:{margin};"),
suppress_bootstrap,
...)
)
}
|
normal_body <-
"
*** =instructions
- instruction 1
- instruction 2
*** =hint
hint comes here
*** =pre_exercise_code
```{%1$s}
# pec
```
*** =sample_code
```{%1$s}
# sample code
```
*** =solution
```{%1$s}
# solution code
```
*** =sct
```{%1$s}
# sct code
```
"
mce_body <-
"
*** =instructions
- option 1
- option 2
- option 3
*** =hint
hint
*** =pre_exercise_code
```{%1$s}
# pec
```
*** =sct
```{%1$s}
test_mc(2) # if 2 is the correct option.
```
"
video_body <-
"*** =video_link
//player.vimeo.com/video/108225030
"
course_yaml_template <-
"title: insert course title here
author_field: insert author name here
description: insert course description here
"
chapter_yaml_template <-
"---
title_meta : Chapter %s
title : %s
description : %s
"
| /R/author_templates.R | no_license | dheerulearns/datacamp | R | false | false | 770 | r | normal_body <-
"
*** =instructions
- instruction 1
- instruction 2
*** =hint
hint comes here
*** =pre_exercise_code
```{%1$s}
# pec
```
*** =sample_code
```{%1$s}
# sample code
```
*** =solution
```{%1$s}
# solution code
```
*** =sct
```{%1$s}
# sct code
```
"
mce_body <-
"
*** =instructions
- option 1
- option 2
- option 3
*** =hint
hint
*** =pre_exercise_code
```{%1$s}
# pec
```
*** =sct
```{%1$s}
test_mc(2) # if 2 is the correct option.
```
"
video_body <-
"*** =video_link
//player.vimeo.com/video/108225030
"
course_yaml_template <-
"title: insert course title here
author_field: insert author name here
description: insert course description here
"
chapter_yaml_template <-
"---
title_meta : Chapter %s
title : %s
description : %s
"
|
require(mlbench)
require(mxnet)
data(Sonar, package = "mlbench")
Sonar[,61] <- as.numeric(Sonar[,61])-1
train.ind <- c(1:50, 100:150)
train.x <- data.matrix(Sonar[train.ind, 1:60])
train.y <- Sonar[train.ind, 61]
test.x <- data.matrix(Sonar[-train.ind, 1:60])
test.y <- Sonar[-train.ind, 61]
table(train.y)
table(test.y)
mx.set.seed(0)
model <- mx.mlp(train.x, train.y, hidden_node=10, out_node=2, out_activation="softmax",
num.round=20, array.batch.size=15, learning.rate=0.07, momentum=0.9,
eval.metric=mx.metric.accuracy)
graph.viz(model$symbol)
preds <- predict(model, test.x)
pred.label <- max.col(t(preds)) - 1
table(pred.label, test.y)
preds
| /mxnet-R/mxnet-class.R | no_license | mjmg/R-demo-files | R | false | false | 690 | r | require(mlbench)
require(mxnet)
data(Sonar, package = "mlbench")
Sonar[,61] <- as.numeric(Sonar[,61])-1
train.ind <- c(1:50, 100:150)
train.x <- data.matrix(Sonar[train.ind, 1:60])
train.y <- Sonar[train.ind, 61]
test.x <- data.matrix(Sonar[-train.ind, 1:60])
test.y <- Sonar[-train.ind, 61]
table(train.y)
table(test.y)
mx.set.seed(0)
model <- mx.mlp(train.x, train.y, hidden_node=10, out_node=2, out_activation="softmax",
num.round=20, array.batch.size=15, learning.rate=0.07, momentum=0.9,
eval.metric=mx.metric.accuracy)
graph.viz(model$symbol)
preds <- predict(model, test.x)
pred.label <- max.col(t(preds)) - 1
table(pred.label, test.y)
preds
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/NobBS.posterior2.R
\name{NobBS.posterior2}
\alias{NobBS.posterior2}
\title{NobBS.posterior2}
\usage{
NobBS.posterior2(
data,
now,
units,
onset_date,
report_date,
moving_window = NULL,
max_D = NULL,
cutoff_D = NULL,
proportion_reported = 1,
quiet = TRUE,
specs = list(dist = c("Poisson", "NB"), alpha1.mean.prior = 0, alpha1.prec.prior =
0.001, alphat.shape.prior = 0.001, alphat.rate.prior = 0.001, beta.priors = NULL,
param_names = NULL, conf = 0.95, dispersion.prior = NULL, nAdapt = 1000, nChains = 1,
nBurnin = 1000, nThin = 1, nSamp = 10000)
)
}
\arguments{
\item{data}{data}
\item{now}{now}
\item{units}{units}
\item{onset_date}{onset_date}
\item{report_date}{report_date}
\item{moving_window}{moving_window}
\item{max_D}{max_D}
\item{cutoff_D}{cutoff_D}
\item{proportion_reported}{proportion_reported}
\item{quiet}{quiet}
\item{specs}{specs}
}
\description{
NobBS.posterior2
}
| /man/NobBS.posterior2.Rd | permissive | covid19br/now_fcts | R | false | true | 1,006 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/NobBS.posterior2.R
\name{NobBS.posterior2}
\alias{NobBS.posterior2}
\title{NobBS.posterior2}
\usage{
NobBS.posterior2(
data,
now,
units,
onset_date,
report_date,
moving_window = NULL,
max_D = NULL,
cutoff_D = NULL,
proportion_reported = 1,
quiet = TRUE,
specs = list(dist = c("Poisson", "NB"), alpha1.mean.prior = 0, alpha1.prec.prior =
0.001, alphat.shape.prior = 0.001, alphat.rate.prior = 0.001, beta.priors = NULL,
param_names = NULL, conf = 0.95, dispersion.prior = NULL, nAdapt = 1000, nChains = 1,
nBurnin = 1000, nThin = 1, nSamp = 10000)
)
}
\arguments{
\item{data}{data}
\item{now}{now}
\item{units}{units}
\item{onset_date}{onset_date}
\item{report_date}{report_date}
\item{moving_window}{moving_window}
\item{max_D}{max_D}
\item{cutoff_D}{cutoff_D}
\item{proportion_reported}{proportion_reported}
\item{quiet}{quiet}
\item{specs}{specs}
}
\description{
NobBS.posterior2
}
|
#load data - wdbc
require(Matrix)
###Solve WLS problem
#simulate data
# create sparse matrix with simulated data
n = 1000
p = 500
X = matrix(rnorm(n*p), nrow=n)
mask = matrix(rbinom(n*p,1,0.04), nrow=n, ncol=p)
X = mask*X
beta = runif(p)
y = X %*% beta + rnorm( n, mean = 0, sd = 1)
W <- diag(rep(1, n))
#inversion method
inversion <- function(y,X,W)
{
return(solve(t(X) %*% W %*% X) %*% t(X) %*% W %*% y)
}
#QR decomposition method
QRdec <- function(y,X,W)
{
betahat <- 1
#decomposition
Wsqrt = diag(sqrt(diag(W)))
QR = qr(diag(sqrt(diag(W)))%*%X)
#solve R*betahat = t(Q)*Wsqrt
QW = t(qr.Q(QR)) %*% W.sqrt %*% y
R = qr.R(QR) #components of decomposition
for(j in ncol(X):1){
index = c(2:ncol(X),0)[j:ncol(X)]
betahat[j] = (QW[j] - sum(R[j,index]*betahat[index]))/R[j,j]
}
return(betahat)
}
#sparse matrix
###Gradient descent problem
#Predictor variables X ~ first 10 features
X <- as.matrix(wdbc[,c(3,12)])
#Add ones to X
X <- cbind(rep(1, nrow(X)),X)
X = scale(X)
#Response variable ~ classification as M (1) or B (0) - I modified the data file
y <- as.matrix(wdbc[,2])
m <- nrow(y)
#
#Gradient descent
#
#define gradient as per logistic regression
grad.get <- function(y, X, w, m) {
gradient <- -t(X) %*% (y - m*w)
return(gradient)
}
#GD algorithm
grad.descent <- function(X, iter){
#initialize parameters
theta <- matrix(c(0, 0), nrow=1)
alpha = .01 # learning rate
for (i in 1:iter) {
theta <- theta - alpha * grad.get(x, y, theta)
}
return(theta)
print(grad.descent(x,1000))
}
#
#Newton's method
#
m = 1
iter = 100
tol = .001
alpha = 1
newton = function(X, y, Binit, tol, m, iter, alpha)
{
N = dim(X)[1]
p = dim(X)[2]
Betas = matrix(0,length(Binit), p)
Betas[1,] = Binit
loglik = rep(0,iter)
distance = rep(0,iter)
mvect = rep(m,N)
for (i in 2:iter)
{
w = as.numeric(1 / (1 + exp(-X %*% Betas[i-1,]))) #get weights
H = hessian(X,mvect,w) #Hessian
G = grad.get(y, X, w, mvect) #gradient
#solve linear system
solve(H,G)
#find Beta step
u = solve(t(chol(H))) %*% G
v = solve(chol(H)) %*% u
#augment betas matrix
Betas[i,] = Beta[i,] + v
#break if distance < tolerance
distance [i] = dist(Betas[i,]-Betas[i-1,])
if(distance[i] < tol)
{
return (Betas)
break
}
#update loglikelihood
loglik[i] = loglike(y,w,m)
}
return (Betas)
print(Betas)
}
| /ex1.R | no_license | hariskr/BigData1 | R | false | false | 2,482 | r | #load data - wdbc
require(Matrix)
###Solve WLS problem
#simulate data
# create sparse matrix with simulated data
n = 1000
p = 500
X = matrix(rnorm(n*p), nrow=n)
mask = matrix(rbinom(n*p,1,0.04), nrow=n, ncol=p)
X = mask*X
beta = runif(p)
y = X %*% beta + rnorm( n, mean = 0, sd = 1)
W <- diag(rep(1, n))
#inversion method
inversion <- function(y,X,W)
{
return(solve(t(X) %*% W %*% X) %*% t(X) %*% W %*% y)
}
#QR decomposition method
QRdec <- function(y,X,W)
{
betahat <- 1
#decomposition
Wsqrt = diag(sqrt(diag(W)))
QR = qr(diag(sqrt(diag(W)))%*%X)
#solve R*betahat = t(Q)*Wsqrt
QW = t(qr.Q(QR)) %*% W.sqrt %*% y
R = qr.R(QR) #components of decomposition
for(j in ncol(X):1){
index = c(2:ncol(X),0)[j:ncol(X)]
betahat[j] = (QW[j] - sum(R[j,index]*betahat[index]))/R[j,j]
}
return(betahat)
}
#sparse matrix
###Gradient descent problem
#Predictor variables X ~ first 10 features
X <- as.matrix(wdbc[,c(3,12)])
#Add ones to X
X <- cbind(rep(1, nrow(X)),X)
X = scale(X)
#Response variable ~ classification as M (1) or B (0) - I modified the data file
y <- as.matrix(wdbc[,2])
m <- nrow(y)
#
#Gradient descent
#
#define gradient as per logistic regression
grad.get <- function(y, X, w, m) {
gradient <- -t(X) %*% (y - m*w)
return(gradient)
}
#GD algorithm
grad.descent <- function(X, iter){
#initialize parameters
theta <- matrix(c(0, 0), nrow=1)
alpha = .01 # learning rate
for (i in 1:iter) {
theta <- theta - alpha * grad.get(x, y, theta)
}
return(theta)
print(grad.descent(x,1000))
}
#
#Newton's method
#
m = 1
iter = 100
tol = .001
alpha = 1
newton = function(X, y, Binit, tol, m, iter, alpha)
{
N = dim(X)[1]
p = dim(X)[2]
Betas = matrix(0,length(Binit), p)
Betas[1,] = Binit
loglik = rep(0,iter)
distance = rep(0,iter)
mvect = rep(m,N)
for (i in 2:iter)
{
w = as.numeric(1 / (1 + exp(-X %*% Betas[i-1,]))) #get weights
H = hessian(X,mvect,w) #Hessian
G = grad.get(y, X, w, mvect) #gradient
#solve linear system
solve(H,G)
#find Beta step
u = solve(t(chol(H))) %*% G
v = solve(chol(H)) %*% u
#augment betas matrix
Betas[i,] = Beta[i,] + v
#break if distance < tolerance
distance [i] = dist(Betas[i,]-Betas[i-1,])
if(distance[i] < tol)
{
return (Betas)
break
}
#update loglikelihood
loglik[i] = loglike(y,w,m)
}
return (Betas)
print(Betas)
}
|
#' Geographic Cleaning of Coordinates from Biologic Collections
#'
#' Cleaning geographic coordinates by multiple empirical tests to flag
#' potentially erroneous coordinates, addressing issues common in biological
#' collection databases.
#'
#' The function needs all coordinates to be formally valid according to WGS84.
#' If the data contains invalid coordinates, the function will stop and return
#' a vector flagging the invalid records. TRUE = non-problematic coordinate,
#' FALSE = potentially problematic coordinates.
#' * capitals tests a radius around adm-0 capitals. The
#' radius is \code{capitals_rad}.
#' * centroids tests a radius around country centroids.
#' The radius is \code{centroids_rad}.
#' * countries tests if coordinates are from the
#' country indicated in the country column. *Switched off by default.*
#' * duplicates tests for duplicate records. This
#' checks for identical coordinates or if a species vector is provided for
#' identical coordinates within a species. All but the first records are
#' flagged as duplicates. *Switched off by default.*
#' * equal tests for equal absolute longitude and latitude.
#' * gbif tests a one-degree radius around the GBIF
#' headquarters in Copenhagen, Denmark.
#' * institutions tests a radius around known
#' biodiversity institutions from \code{instiutions}. The radius is
#' \code{inst_rad}.
#' * outliers tests each species for outlier records.
#' Depending on the \code{outliers_mtp} and \code{outliers.td} arguments either
#' flags records that are a minimum distance away from all other records of
#' this species (\code{outliers_td}) or records that are outside a multiple of
#' the interquartile range of minimum distances to the next neighbour of this
#' species (\code{outliers_mtp}). Three different methods are available
#' for the outlier test: "If
#' \dQuote{outlier} a boxplot method is used and records are flagged as
#' outliers if their \emph{mean} distance to all other records of the same
#' species is larger than mltpl * the interquartile range of the mean distance
#' of all records of this species. If \dQuote{mad} the median absolute
#' deviation is used. In this case a record is flagged as outlier, if the
#' \emph{mean} distance to all other records of the same species is larger than
#' the median of the mean distance of all points plus/minus the mad of the mean
#' distances of all records of the species * mltpl. If \dQuote{distance}
#' records are flagged as outliers, if the \emph{minimum} distance to the next
#' record of the species is > \code{tdi}.
#' * ranges tests if records fall within provided natural range polygons on
#' a per species basis. See \code{\link{cc_iucn}} for details.
#' * seas tests if coordinates fall into the ocean.
#' * urban tests if coordinates are from urban areas.
#' *Switched off by default*
#' * validity checks if coordinates correspond to a lat/lon coordinate reference system.
#' This test is always on, since all records need to pass for any other test to run.
#' * zeros tests for plain zeros, equal latitude and
#' longitude and a radius around the point 0/0. The radius is \code{zeros.rad}.
#'
#' @aliases CleanCoordinates summary.spatialvalid is.spatialvalid
#'
#' @param species a character string. A vector of the same length as rows in x,
#' with the species identity for each record. If missing, the outliers test is
#' skipped.
#' @param countries a character string. The column with the country assignment of
#' each record in three letter ISO code. Default = \dQuote{countrycode}. If missing, the
#' countries test is skipped.
#' @param tests a vector of character strings, indicating which tests to run.
#' See details for all tests available. Default = c("capitals", "centroids",
#' "equal", "gbif", "institutions", "outliers",
#' "seas", "zeros")
#' @param capitals_rad numeric. The radius around capital coordinates in
#' meters. Default = 10000.
#' @param centroids_rad numeric. The radius around capital coordinates in
#' meters. Default = 1000.
#' @param centroids_detail a \code{character string}. If set to
#' \sQuote{country} only country (adm-0) centroids are tested, if set to
#' \sQuote{provinces} only province (adm-1) centroids are tested. Default =
#' \sQuote{both}.
#' @param inst_rad numeric. The radius around biodiversity institutions
#' coordinates in metres. Default = 100.
#' @param outliers_method The method used for outlier testing. See details.
#' @param outliers_mtp numeric. The multiplier for the interquartile range of
#' the outlier test. If NULL \code{outliers.td} is used. Default = 5.
#' @param outliers_td numeric. The minimum distance of a record to all other
#' records of a species to be identified as outlier, in km. Default = 1000.
#' @param outliers_size numerical. The minimum number of records in a dataset
#' to run the taxon-specific outlier test. Default = 7.
#' @param range_rad buffer around natural ranges. Default = 0.
#' @param zeros_rad numeric. The radius around 0/0 in degrees. Default = 0.5.
#' @param capitals_ref a \code{data.frame} with alternative reference data for
#' the country capitals test. If missing, the \code{countryref} dataset is used.
#' Alternatives must be identical in structure.
#' @param centroids_ref a \code{data.frame} with alternative reference data for
#' the centroid test. If NULL, the \code{countryref} dataset is used.
#' Alternatives must be identical in structure.
#' @param country_ref a \code{SpatialPolygonsDataFrame} as alternative
#' reference for the countries test. If NULL, the
#' \code{rnaturalearth:ne_countries('medium')} dataset is used.
#' @param country_refcol the column name in the reference dataset, containing the relevant
#' ISO codes for matching. Default is to "iso_a3_eh" which referes to the ISO-3
#' codes in the reference dataset. See notes.
#' @param inst_ref a \code{data.frame} with alternative reference data for the
#' biodiversity institution test. If NULL, the \code{institutions} dataset
#' is used. Alternatives must be identical in structure.
#' @param range_ref a \code{SpatialPolygonsDataFrame} of species natural ranges.
#' Required to include the 'ranges' test. See \code{\link{cc_iucn}} for details.
#' @param seas_ref a \code{SpatialPolygonsDataFrame} as alternative reference
#' for the seas test. If NULL, the
#' rnaturalearth::ne_download(=scale = 110, type = 'land', category = 'physical')
#' dataset is used.
#' @param seas_scale The scale of the default landmass reference. Must be one of 10, 50, 110.
#' Higher numbers equal higher detail. Default = 50.
#' @param urban_ref a \code{SpatialPolygonsDataFrame} as alternative reference
#' for the urban test. If NULL, the test is skipped. See details for a
#' reference gazetteers.
#' @param value a character string defining the output value. See the value
#' section for details. one of \sQuote{spatialvalid}, \sQuote{summary},
#' \sQuote{clean}. Default = \sQuote{\code{spatialvalid}}.
#' @param report logical or character. If TRUE a report file is written to the
#' working directory, summarizing the cleaning results. If a character, the
#' path to which the file should be written. Default = FALSE.
#' @inheritParams cc_cap
#'
#' @return Depending on the output argument:
#' \describe{
#' \item{\dQuote{spatialvalid}}{an object of class \code{spatialvalid} similar to x
#' with one column added for each test. TRUE = clean coordinate entry, FALSE = potentially
#' problematic coordinate entries. The .summary column is FALSE if any test flagged
#' the respective coordinate.}
#' \item{\dQuote{flagged}}{a logical vector with the
#' same order as the input data summarizing the results of all test. TRUE =
#' clean coordinate, FALSE = potentially problematic (= at least one test
#' failed).}
#' \item{\dQuote{clean}}{a \code{data.frame} similar to x
#' with potentially problematic records removed}
#' }
#'
#' @note Always tests for coordinate validity: non-numeric or missing
#' coordinates and coordinates exceeding the global extent (lon/lat, WGS84).
#' See \url{https://ropensci.github.io/CoordinateCleaner/} for more details
#' and tutorials.
#'
#' @note The country_refcol argument allows to adapt the function to the structure of
#' alternative reference datasets. For instance, for
#' \code{rnaturalearth::ne_countries(scale = "small")}, the default will fail,
#' but country_refcol = "iso_a3" will work.
#'
#' @keywords Coordinate cleaning wrapper
#' @family Wrapper functions
#'
#' @examples
#'
#'
#' exmpl <- data.frame(species = sample(letters, size = 250, replace = TRUE),
#' decimallongitude = runif(250, min = 42, max = 51),
#' decimallatitude = runif(250, min = -26, max = -11))
#'
#' test <- clean_coordinates(x = exmpl,
#' tests = c("equal"))
#'
#'\dontrun{
#' #run more tests
#' test <- clean_coordinates(x = exmpl,
#' tests = c("capitals",
#' "centroids","equal",
#' "gbif", "institutions",
#' "outliers", "seas",
#' "zeros"))
#'}
#'
#'
#' summary(test)
#'
#' @export
#' @importFrom methods as is
#' @importFrom utils write.table
#' @md
clean_coordinates <- function(x,
lon = "decimallongitude",
lat = "decimallatitude",
species = "species",
countries = NULL,
tests = c("capitals", "centroids",
"equal", "gbif",
"institutions",
"outliers",
"seas", "zeros"),
capitals_rad = 10000,
centroids_rad = 1000,
centroids_detail = "both",
inst_rad = 100,
outliers_method = "quantile",
outliers_mtp = 5,
outliers_td = 1000,
outliers_size = 7,
range_rad = 0,
zeros_rad = 0.5,
capitals_ref = NULL,
centroids_ref = NULL,
country_ref = NULL,
country_refcol = "iso_a3_eh",
inst_ref = NULL,
range_ref = NULL,
seas_ref = NULL,
seas_scale = 50,
urban_ref = NULL,
value = "spatialvalid",
verbose = TRUE,
report = FALSE) {
# check function arguments
match.arg(value, choices = c("spatialvalid", "flagged", "clean"))
match.arg(centroids_detail, choices = c("both", "country", "provinces"))
match.arg(outliers_method, choices = c("distance", "quantile", "mad"))
# check column names
nams <- c(lon, lat, species, countries)
if (!all(nams %in% names(x))) {
stop(sprintf("%s column not found\n", nams[which(!nams %in% names(x))]))
}
if (is.null(countries) & "countries" %in% tests) {
stop("provide countries column or remove countries test")
}
if (is.null(species)) {
if ("outliers" %in% tests) {
stop("provide species column or remove outliers test")
}
if ("duplicates" %in% tests) {
stop("provide species column or remove duplicates test")
}
}
# Initiate output
out <- data.frame(matrix(NA, nrow = nrow(x), ncol = 13))
colnames(out) <- c("val", "equ", "zer", "cap", "cen", "sea", "urb", "con",
"otl", "gbf", "inst", "rang", "dpl")
# Run tests Validity, check if coordinates fit to lat/long system, this has
# to be run all the time, as otherwise the other tests don't work
out$val <- cc_val(x, lon = lon, lat = lat,
verbose = verbose, value = "flagged")
if (!all(out$val)) {
stop(
"invalid coordinates found in rows, clean dataset before proceeding:\n",
paste(which(!out$val), "\n")
)
}
## Equal coordinates
if ("equal" %in% tests) {
out$equ <- cc_equ(x,
lon = lon, lat = lat, verbose = verbose, value = "flagged",
test = "absolute"
)
}
## Zero coordinates
if ("zeros" %in% tests) {
out$zer <- cc_zero(x,
lon = lon, lat = lat, buffer = zeros_rad, verbose = verbose,
value = "flagged"
)
}
## Capitals
if ("capitals" %in% tests) {
out$cap <- cc_cap(x,
lon = lon, lat = lat, buffer = capitals_rad, ref = capitals_ref,
value = "flagged", verbose = verbose
)
}
## Centroids
if ("centroids" %in% tests) {
out$cen <- cc_cen(x,
lon = lon, lat = lat, buffer = centroids_rad, test = centroids_detail,
ref = centroids_ref, value = "flagged", verbose = verbose
)
}
## Seas
if ("seas" %in% tests) {
out$sea <- cc_sea(x,
lon = lon, lat = lat, ref = seas_ref,
scale = seas_scale,
verbose = verbose,
value = "flagged"
)
}
## Urban Coordinates
if ("urban" %in% tests) {
out$urb <- cc_urb(x,
lon = lon, lat = lat, ref = urban_ref, verbose = verbose,
value = "flagged"
)
}
## Country check
if ("countries" %in% tests) {
out$con <- cc_coun(x,
lon = lon,
lat = lat,
iso3 = countries,
ref = country_ref,
ref_col = country_refcol,
verbose = verbose, value = "flagged"
)
}
## Outliers
if ("outliers" %in% tests) {
# select species with more than threshold species
otl_test <- table(x[species])
otl_test <- otl_test[otl_test > outliers_size]
otl_test <- x[x[[species]] %in% names(otl_test), ]
otl_test <- otl_test[, c(species, lon, lat)]
otl_flag <- cc_outl(otl_test,
lon = lon, lat = lat, species = species,
method = outliers_method, mltpl = outliers_mtp, tdi = outliers_td,
value = "ids", verbose = verbose
)
otl <- rep(TRUE, nrow(x))
otl[otl_flag] <- FALSE
out$otl <- otl
}
## GBIF headquarters
if ("gbif" %in% tests) {
out$gbf <- cc_gbif(x, lon = lon, lat = lat,
verbose = verbose, value = "flagged")
}
## Biodiversity institution
if ("institutions" %in% tests) {
out$inst <- cc_inst(x,
lon = lon, lat = lat, ref = inst_ref, buffer = inst_rad,
verbose = verbose, value = "flagged"
)
}
## Natural ranges
if ("range" %in% tests) {
if(!is.null(range_rad)){
stop("'range_rad' not found")
}else{
out$rang <- cc_iucn(x,
lon = lon, lat = lat, species = species,
buffer = range_rad,
verbose = verbose, value = "flagged"
)
}
}
## exclude duplicates
if ("duplicates" %in% tests) {
out$dpl <- cc_dupl(x, lon = lon, lat = lat, species = species,
value = "flagged")
}
# prepare output data
out <- Filter(function(x) !all(is.na(x)), out)
suma <- as.vector(Reduce("&", out))
if (verbose) {
if (!is.null(suma)) {
message(sprintf("Flagged %s of %s records, EQ = %s.", sum(!suma,
na.rm = TRUE
), length(suma), round(
sum(!suma, na.rm = TRUE) / length(suma), 2
)))
} else {
message("flagged 0 records, EQ = 0")
}
}
if (value == "spatialvalid") {
ret <- data.frame(x, out, summary = suma)
names(ret) <- c(names(x),
paste(".", names(out), sep = ""),
".summary")
class(ret) <- c("spatialvalid", "data.frame", class(out))
out <- ret
if (report) {
report <- "clean_coordinates_report.txt"
}
if (is.character(report)) {
repo <- data.frame(
Test = as.character(names(out[-(1:3)])),
Flagged.records = colSums(!out[-(1:3)]),
stringsAsFactors = FALSE
)
repo <- rbind(repo, c("Total number of records", length(out$summary)))
repo <- rbind(repo, c("Error Quotient", round(sum(!out$summary,
na.rm = TRUE
) / length(out$summary), 2)))
write.table(repo, report, sep = "\t", row.names = FALSE, quote = FALSE)
}
}
if (value == "clean") {
out <- x[suma, ]
if (report | is.character(report)) {
warning("report only valid with value = 'spatialvalid'")
}
}
if (value == "flagged") {
out <- suma
if (report | is.character(report)) {
warning("report only valid with value = 'spatialvalid'")
}
}
return(out)
}
| /R/clean_coordinates.R | no_license | mdsumner/CoordinateCleaner | R | false | false | 16,827 | r | #' Geographic Cleaning of Coordinates from Biologic Collections
#'
#' Cleaning geographic coordinates by multiple empirical tests to flag
#' potentially erroneous coordinates, addressing issues common in biological
#' collection databases.
#'
#' The function needs all coordinates to be formally valid according to WGS84.
#' If the data contains invalid coordinates, the function will stop and return
#' a vector flagging the invalid records. TRUE = non-problematic coordinate,
#' FALSE = potentially problematic coordinates.
#' * capitals tests a radius around adm-0 capitals. The
#' radius is \code{capitals_rad}.
#' * centroids tests a radius around country centroids.
#' The radius is \code{centroids_rad}.
#' * countries tests if coordinates are from the
#' country indicated in the country column. *Switched off by default.*
#' * duplicates tests for duplicate records. This
#' checks for identical coordinates or if a species vector is provided for
#' identical coordinates within a species. All but the first records are
#' flagged as duplicates. *Switched off by default.*
#' * equal tests for equal absolute longitude and latitude.
#' * gbif tests a one-degree radius around the GBIF
#' headquarters in Copenhagen, Denmark.
#' * institutions tests a radius around known
#' biodiversity institutions from \code{instiutions}. The radius is
#' \code{inst_rad}.
#' * outliers tests each species for outlier records.
#' Depending on the \code{outliers_mtp} and \code{outliers.td} arguments either
#' flags records that are a minimum distance away from all other records of
#' this species (\code{outliers_td}) or records that are outside a multiple of
#' the interquartile range of minimum distances to the next neighbour of this
#' species (\code{outliers_mtp}). Three different methods are available
#' for the outlier test: "If
#' \dQuote{outlier} a boxplot method is used and records are flagged as
#' outliers if their \emph{mean} distance to all other records of the same
#' species is larger than mltpl * the interquartile range of the mean distance
#' of all records of this species. If \dQuote{mad} the median absolute
#' deviation is used. In this case a record is flagged as outlier, if the
#' \emph{mean} distance to all other records of the same species is larger than
#' the median of the mean distance of all points plus/minus the mad of the mean
#' distances of all records of the species * mltpl. If \dQuote{distance}
#' records are flagged as outliers, if the \emph{minimum} distance to the next
#' record of the species is > \code{tdi}.
#' * ranges tests if records fall within provided natural range polygons on
#' a per species basis. See \code{\link{cc_iucn}} for details.
#' * seas tests if coordinates fall into the ocean.
#' * urban tests if coordinates are from urban areas.
#' *Switched off by default*
#' * validity checks if coordinates correspond to a lat/lon coordinate reference system.
#' This test is always on, since all records need to pass for any other test to run.
#' * zeros tests for plain zeros, equal latitude and
#' longitude and a radius around the point 0/0. The radius is \code{zeros.rad}.
#'
#' @aliases CleanCoordinates summary.spatialvalid is.spatialvalid
#'
#' @param species a character string. A vector of the same length as rows in x,
#' with the species identity for each record. If missing, the outliers test is
#' skipped.
#' @param countries a character string. The column with the country assignment of
#' each record in three letter ISO code. Default = \dQuote{countrycode}. If missing, the
#' countries test is skipped.
#' @param tests a vector of character strings, indicating which tests to run.
#' See details for all tests available. Default = c("capitals", "centroids",
#' "equal", "gbif", "institutions", "outliers",
#' "seas", "zeros")
#' @param capitals_rad numeric. The radius around capital coordinates in
#' meters. Default = 10000.
#' @param centroids_rad numeric. The radius around capital coordinates in
#' meters. Default = 1000.
#' @param centroids_detail a \code{character string}. If set to
#' \sQuote{country} only country (adm-0) centroids are tested, if set to
#' \sQuote{provinces} only province (adm-1) centroids are tested. Default =
#' \sQuote{both}.
#' @param inst_rad numeric. The radius around biodiversity institutions
#' coordinates in metres. Default = 100.
#' @param outliers_method The method used for outlier testing. See details.
#' @param outliers_mtp numeric. The multiplier for the interquartile range of
#' the outlier test. If NULL \code{outliers.td} is used. Default = 5.
#' @param outliers_td numeric. The minimum distance of a record to all other
#' records of a species to be identified as outlier, in km. Default = 1000.
#' @param outliers_size numerical. The minimum number of records in a dataset
#' to run the taxon-specific outlier test. Default = 7.
#' @param range_rad buffer around natural ranges. Default = 0.
#' @param zeros_rad numeric. The radius around 0/0 in degrees. Default = 0.5.
#' @param capitals_ref a \code{data.frame} with alternative reference data for
#' the country capitals test. If missing, the \code{countryref} dataset is used.
#' Alternatives must be identical in structure.
#' @param centroids_ref a \code{data.frame} with alternative reference data for
#' the centroid test. If NULL, the \code{countryref} dataset is used.
#' Alternatives must be identical in structure.
#' @param country_ref a \code{SpatialPolygonsDataFrame} as alternative
#' reference for the countries test. If NULL, the
#' \code{rnaturalearth:ne_countries('medium')} dataset is used.
#' @param country_refcol the column name in the reference dataset, containing the relevant
#' ISO codes for matching. Default is to "iso_a3_eh" which referes to the ISO-3
#' codes in the reference dataset. See notes.
#' @param inst_ref a \code{data.frame} with alternative reference data for the
#' biodiversity institution test. If NULL, the \code{institutions} dataset
#' is used. Alternatives must be identical in structure.
#' @param range_ref a \code{SpatialPolygonsDataFrame} of species natural ranges.
#' Required to include the 'ranges' test. See \code{\link{cc_iucn}} for details.
#' @param seas_ref a \code{SpatialPolygonsDataFrame} as alternative reference
#' for the seas test. If NULL, the
#' rnaturalearth::ne_download(=scale = 110, type = 'land', category = 'physical')
#' dataset is used.
#' @param seas_scale The scale of the default landmass reference. Must be one of 10, 50, 110.
#' Higher numbers equal higher detail. Default = 50.
#' @param urban_ref a \code{SpatialPolygonsDataFrame} as alternative reference
#' for the urban test. If NULL, the test is skipped. See details for a
#' reference gazetteers.
#' @param value a character string defining the output value. See the value
#' section for details. one of \sQuote{spatialvalid}, \sQuote{summary},
#' \sQuote{clean}. Default = \sQuote{\code{spatialvalid}}.
#' @param report logical or character. If TRUE a report file is written to the
#' working directory, summarizing the cleaning results. If a character, the
#' path to which the file should be written. Default = FALSE.
#' @inheritParams cc_cap
#'
#' @return Depending on the output argument:
#' \describe{
#' \item{\dQuote{spatialvalid}}{an object of class \code{spatialvalid} similar to x
#' with one column added for each test. TRUE = clean coordinate entry, FALSE = potentially
#' problematic coordinate entries. The .summary column is FALSE if any test flagged
#' the respective coordinate.}
#' \item{\dQuote{flagged}}{a logical vector with the
#' same order as the input data summarizing the results of all test. TRUE =
#' clean coordinate, FALSE = potentially problematic (= at least one test
#' failed).}
#' \item{\dQuote{clean}}{a \code{data.frame} similar to x
#' with potentially problematic records removed}
#' }
#'
#' @note Always tests for coordinate validity: non-numeric or missing
#' coordinates and coordinates exceeding the global extent (lon/lat, WGS84).
#' See \url{https://ropensci.github.io/CoordinateCleaner/} for more details
#' and tutorials.
#'
#' @note The country_refcol argument allows to adapt the function to the structure of
#' alternative reference datasets. For instance, for
#' \code{rnaturalearth::ne_countries(scale = "small")}, the default will fail,
#' but country_refcol = "iso_a3" will work.
#'
#' @keywords Coordinate cleaning wrapper
#' @family Wrapper functions
#'
#' @examples
#'
#'
#' exmpl <- data.frame(species = sample(letters, size = 250, replace = TRUE),
#' decimallongitude = runif(250, min = 42, max = 51),
#' decimallatitude = runif(250, min = -26, max = -11))
#'
#' test <- clean_coordinates(x = exmpl,
#' tests = c("equal"))
#'
#'\dontrun{
#' #run more tests
#' test <- clean_coordinates(x = exmpl,
#' tests = c("capitals",
#' "centroids","equal",
#' "gbif", "institutions",
#' "outliers", "seas",
#' "zeros"))
#'}
#'
#'
#' summary(test)
#'
#' @export
#' @importFrom methods as is
#' @importFrom utils write.table
#' @md
clean_coordinates <- function(x,
lon = "decimallongitude",
lat = "decimallatitude",
species = "species",
countries = NULL,
tests = c("capitals", "centroids",
"equal", "gbif",
"institutions",
"outliers",
"seas", "zeros"),
capitals_rad = 10000,
centroids_rad = 1000,
centroids_detail = "both",
inst_rad = 100,
outliers_method = "quantile",
outliers_mtp = 5,
outliers_td = 1000,
outliers_size = 7,
range_rad = 0,
zeros_rad = 0.5,
capitals_ref = NULL,
centroids_ref = NULL,
country_ref = NULL,
country_refcol = "iso_a3_eh",
inst_ref = NULL,
range_ref = NULL,
seas_ref = NULL,
seas_scale = 50,
urban_ref = NULL,
value = "spatialvalid",
verbose = TRUE,
report = FALSE) {
# check function arguments
match.arg(value, choices = c("spatialvalid", "flagged", "clean"))
match.arg(centroids_detail, choices = c("both", "country", "provinces"))
match.arg(outliers_method, choices = c("distance", "quantile", "mad"))
# check column names
nams <- c(lon, lat, species, countries)
if (!all(nams %in% names(x))) {
stop(sprintf("%s column not found\n", nams[which(!nams %in% names(x))]))
}
if (is.null(countries) & "countries" %in% tests) {
stop("provide countries column or remove countries test")
}
if (is.null(species)) {
if ("outliers" %in% tests) {
stop("provide species column or remove outliers test")
}
if ("duplicates" %in% tests) {
stop("provide species column or remove duplicates test")
}
}
# Initiate output
out <- data.frame(matrix(NA, nrow = nrow(x), ncol = 13))
colnames(out) <- c("val", "equ", "zer", "cap", "cen", "sea", "urb", "con",
"otl", "gbf", "inst", "rang", "dpl")
# Run tests Validity, check if coordinates fit to lat/long system, this has
# to be run all the time, as otherwise the other tests don't work
out$val <- cc_val(x, lon = lon, lat = lat,
verbose = verbose, value = "flagged")
if (!all(out$val)) {
stop(
"invalid coordinates found in rows, clean dataset before proceeding:\n",
paste(which(!out$val), "\n")
)
}
## Equal coordinates
if ("equal" %in% tests) {
out$equ <- cc_equ(x,
lon = lon, lat = lat, verbose = verbose, value = "flagged",
test = "absolute"
)
}
## Zero coordinates
if ("zeros" %in% tests) {
out$zer <- cc_zero(x,
lon = lon, lat = lat, buffer = zeros_rad, verbose = verbose,
value = "flagged"
)
}
## Capitals
if ("capitals" %in% tests) {
out$cap <- cc_cap(x,
lon = lon, lat = lat, buffer = capitals_rad, ref = capitals_ref,
value = "flagged", verbose = verbose
)
}
## Centroids
if ("centroids" %in% tests) {
out$cen <- cc_cen(x,
lon = lon, lat = lat, buffer = centroids_rad, test = centroids_detail,
ref = centroids_ref, value = "flagged", verbose = verbose
)
}
## Seas
if ("seas" %in% tests) {
out$sea <- cc_sea(x,
lon = lon, lat = lat, ref = seas_ref,
scale = seas_scale,
verbose = verbose,
value = "flagged"
)
}
## Urban Coordinates
if ("urban" %in% tests) {
out$urb <- cc_urb(x,
lon = lon, lat = lat, ref = urban_ref, verbose = verbose,
value = "flagged"
)
}
## Country check
if ("countries" %in% tests) {
out$con <- cc_coun(x,
lon = lon,
lat = lat,
iso3 = countries,
ref = country_ref,
ref_col = country_refcol,
verbose = verbose, value = "flagged"
)
}
## Outliers
if ("outliers" %in% tests) {
# select species with more than threshold species
otl_test <- table(x[species])
otl_test <- otl_test[otl_test > outliers_size]
otl_test <- x[x[[species]] %in% names(otl_test), ]
otl_test <- otl_test[, c(species, lon, lat)]
otl_flag <- cc_outl(otl_test,
lon = lon, lat = lat, species = species,
method = outliers_method, mltpl = outliers_mtp, tdi = outliers_td,
value = "ids", verbose = verbose
)
otl <- rep(TRUE, nrow(x))
otl[otl_flag] <- FALSE
out$otl <- otl
}
## GBIF headquarters
if ("gbif" %in% tests) {
out$gbf <- cc_gbif(x, lon = lon, lat = lat,
verbose = verbose, value = "flagged")
}
## Biodiversity institution
if ("institutions" %in% tests) {
out$inst <- cc_inst(x,
lon = lon, lat = lat, ref = inst_ref, buffer = inst_rad,
verbose = verbose, value = "flagged"
)
}
## Natural ranges
if ("range" %in% tests) {
if(!is.null(range_rad)){
stop("'range_rad' not found")
}else{
out$rang <- cc_iucn(x,
lon = lon, lat = lat, species = species,
buffer = range_rad,
verbose = verbose, value = "flagged"
)
}
}
## exclude duplicates
if ("duplicates" %in% tests) {
out$dpl <- cc_dupl(x, lon = lon, lat = lat, species = species,
value = "flagged")
}
# prepare output data
out <- Filter(function(x) !all(is.na(x)), out)
suma <- as.vector(Reduce("&", out))
if (verbose) {
if (!is.null(suma)) {
message(sprintf("Flagged %s of %s records, EQ = %s.", sum(!suma,
na.rm = TRUE
), length(suma), round(
sum(!suma, na.rm = TRUE) / length(suma), 2
)))
} else {
message("flagged 0 records, EQ = 0")
}
}
if (value == "spatialvalid") {
ret <- data.frame(x, out, summary = suma)
names(ret) <- c(names(x),
paste(".", names(out), sep = ""),
".summary")
class(ret) <- c("spatialvalid", "data.frame", class(out))
out <- ret
if (report) {
report <- "clean_coordinates_report.txt"
}
if (is.character(report)) {
repo <- data.frame(
Test = as.character(names(out[-(1:3)])),
Flagged.records = colSums(!out[-(1:3)]),
stringsAsFactors = FALSE
)
repo <- rbind(repo, c("Total number of records", length(out$summary)))
repo <- rbind(repo, c("Error Quotient", round(sum(!out$summary,
na.rm = TRUE
) / length(out$summary), 2)))
write.table(repo, report, sep = "\t", row.names = FALSE, quote = FALSE)
}
}
if (value == "clean") {
out <- x[suma, ]
if (report | is.character(report)) {
warning("report only valid with value = 'spatialvalid'")
}
}
if (value == "flagged") {
out <- suma
if (report | is.character(report)) {
warning("report only valid with value = 'spatialvalid'")
}
}
return(out)
}
|
## Getting Data
fileURL <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(fileURL, destfile = "household_power_consumption.zip")
unzip("household_power_consumption.zip")
## Cleaning Data
power_data_file <- "household_power_consumption.txt"
power_data <- read.table(power_data_file, sep = ";",
colClasses = c("character", "character", "numeric", "numeric", "numeric", "numeric", "numeric",
"numeric", "numeric"), header = TRUE, na.strings = "?")
## Checking Header
head(power_data)
## Reading 2 days in Feb
power_data_feb <- subset(power_data, power_data$Date == "1/2/2007" | power_data$Date == "2/2/2007")
## Converting Date/Time
date_time <- strptime(paste(power_data_feb$Date, power_data_feb$Time, sep = " "), "%d/%m/%Y %H:%M:%S")
## Converting global active power to numeric
global_active_power <- as.numeric(power_data_feb$Global_active_power)
## Plot2
png("plot2.png", width=480, height=480)
plot(date_time, global_active_power, type="l", xlab="", ylab="Global Active Power (kilowatts)")
dev.off()
| /plot2.R | no_license | jmason1980/ExData_Plotting1 | R | false | false | 1,200 | r | ## Getting Data
fileURL <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(fileURL, destfile = "household_power_consumption.zip")
unzip("household_power_consumption.zip")
## Cleaning Data
power_data_file <- "household_power_consumption.txt"
power_data <- read.table(power_data_file, sep = ";",
colClasses = c("character", "character", "numeric", "numeric", "numeric", "numeric", "numeric",
"numeric", "numeric"), header = TRUE, na.strings = "?")
## Checking Header
head(power_data)
## Reading 2 days in Feb
power_data_feb <- subset(power_data, power_data$Date == "1/2/2007" | power_data$Date == "2/2/2007")
## Converting Date/Time
date_time <- strptime(paste(power_data_feb$Date, power_data_feb$Time, sep = " "), "%d/%m/%Y %H:%M:%S")
## Converting global active power to numeric
global_active_power <- as.numeric(power_data_feb$Global_active_power)
## Plot2
png("plot2.png", width=480, height=480)
plot(date_time, global_active_power, type="l", xlab="", ylab="Global Active Power (kilowatts)")
dev.off()
|
#' Quasi-Newton acceleration of MM algorithm
#'
#' \code{qnamm} performs Quasi-Newton acceleration of an MM algorithm.
#'
#' @param x initial iterate
#' @param fx_mm MM algorithm map
#' @param qn number of secants
#' @param fx_obj handle to objective function
#' @param max_iter maximum number of iterations
#' @param tol convergence tolerance
#' @param ... Additional arguments to pass to \code{fx_mm}
#' @import compiler corpcor
#' @export
#' @references H Zhou, D Alexander, and K Lange. (2011) A quasi-Newton acceleration method for high-dimensional optimization algorithms, Statistics and Computing, 21(2):261-273.
qnamm <- cmpfun(function(x, fx_mm, qn, fx_obj, max_iter=100, tol=1e-6, ...) {
n <- length(x)
U <- matrix(0,n,qn)
V <- matrix(0,n,qn)
objval <- Inf
objective <- double(max_iter)
Xhist <- matrix(NA,n,qn+max_iter)
#
# accumulate the first QN differences for Quasi-Newton acceleration
#
for (i in 1:qn) {
Xhist[,i] <- x
x_old <- x
x <- fx_mm(x, ...)
U[,i] <- x - x_old
}
V[,1:(qn-1)] <- U[,2:qn]
x_old <- x
x <- fx_mm(x, ...)
V[,qn] <- x - x_old
old_secant <- 1
C <- t(U)%*%(U-V)
nacc <- 0
nrej <- 0
for (i in 1:max_iter) {
Xhist[,qn+i] <- x
objval_old <- objval
x_old <- x
x <- fx_mm(x, ...)
#
# do one more MM step to accumulate secant pairs
#
U[,old_secant] <- x - x_old
x_old <- x
x <- fx_mm(x, ...)
V[,old_secant] <- x - x_old
C[old_secant,] <- t(U[,old_secant,drop=FALSE]) %*% (U-V)
C[,old_secant] <- t(U) %*% (U[,old_secant,drop=FALSE] - V[,old_secant,drop=FALSE])
new_secant <- old_secant
old_secant <- (old_secant %% qn) + 1
objval_MM <- fx_obj(x, ...)
#
# quasi-Newton jump
#
# x_qn <- x_old + V %*% solve(C, t(U)%*%U[,new_secant,drop=FALSE])
x_qn <- x_old + V %*% pseudoinverse(C) %*% (t(U)%*%U[,new_secant,drop=FALSE])
x_qn <- fx_mm(x_qn, ...)
objval_QN <- fx_obj(x_qn, ...)
#
# choose MM vs QN jump
#
if (objval_QN < objval_MM) {
x <- x_qn;
objval <- objval_QN;
nacc <- nacc + 1
} else {
objval <- objval_MM;
nrej <- nrej + 1
}
objective[i] <- objval
#
# stopping rule
#
print(norm(as.matrix(x-x_old),'f')/(norm(as.matrix(x_old),'f')+1))
if (norm(as.matrix(x-x_old),'f') < tol*(norm(as.matrix(x_old),'f')+1)) break
}
print(paste("Accepted:", nacc))
print(paste("Rejected:", nrej))
return(list(x=x, objective=objective[1:i], iter=i+qn, Xhist=Xhist[,1:(i+qn),drop=FALSE]))
})
| /R/qnamm.r | no_license | cran/splitFeas | R | false | false | 2,596 | r | #' Quasi-Newton acceleration of MM algorithm
#'
#' \code{qnamm} performs Quasi-Newton acceleration of an MM algorithm.
#'
#' @param x initial iterate
#' @param fx_mm MM algorithm map
#' @param qn number of secants
#' @param fx_obj handle to objective function
#' @param max_iter maximum number of iterations
#' @param tol convergence tolerance
#' @param ... Additional arguments to pass to \code{fx_mm}
#' @import compiler corpcor
#' @export
#' @references H Zhou, D Alexander, and K Lange. (2011) A quasi-Newton acceleration method for high-dimensional optimization algorithms, Statistics and Computing, 21(2):261-273.
qnamm <- cmpfun(function(x, fx_mm, qn, fx_obj, max_iter=100, tol=1e-6, ...) {
n <- length(x)
U <- matrix(0,n,qn)
V <- matrix(0,n,qn)
objval <- Inf
objective <- double(max_iter)
Xhist <- matrix(NA,n,qn+max_iter)
#
# accumulate the first QN differences for Quasi-Newton acceleration
#
for (i in 1:qn) {
Xhist[,i] <- x
x_old <- x
x <- fx_mm(x, ...)
U[,i] <- x - x_old
}
V[,1:(qn-1)] <- U[,2:qn]
x_old <- x
x <- fx_mm(x, ...)
V[,qn] <- x - x_old
old_secant <- 1
C <- t(U)%*%(U-V)
nacc <- 0
nrej <- 0
for (i in 1:max_iter) {
Xhist[,qn+i] <- x
objval_old <- objval
x_old <- x
x <- fx_mm(x, ...)
#
# do one more MM step to accumulate secant pairs
#
U[,old_secant] <- x - x_old
x_old <- x
x <- fx_mm(x, ...)
V[,old_secant] <- x - x_old
C[old_secant,] <- t(U[,old_secant,drop=FALSE]) %*% (U-V)
C[,old_secant] <- t(U) %*% (U[,old_secant,drop=FALSE] - V[,old_secant,drop=FALSE])
new_secant <- old_secant
old_secant <- (old_secant %% qn) + 1
objval_MM <- fx_obj(x, ...)
#
# quasi-Newton jump
#
# x_qn <- x_old + V %*% solve(C, t(U)%*%U[,new_secant,drop=FALSE])
x_qn <- x_old + V %*% pseudoinverse(C) %*% (t(U)%*%U[,new_secant,drop=FALSE])
x_qn <- fx_mm(x_qn, ...)
objval_QN <- fx_obj(x_qn, ...)
#
# choose MM vs QN jump
#
if (objval_QN < objval_MM) {
x <- x_qn;
objval <- objval_QN;
nacc <- nacc + 1
} else {
objval <- objval_MM;
nrej <- nrej + 1
}
objective[i] <- objval
#
# stopping rule
#
print(norm(as.matrix(x-x_old),'f')/(norm(as.matrix(x_old),'f')+1))
if (norm(as.matrix(x-x_old),'f') < tol*(norm(as.matrix(x_old),'f')+1)) break
}
print(paste("Accepted:", nacc))
print(paste("Rejected:", nrej))
return(list(x=x, objective=objective[1:i], iter=i+qn, Xhist=Xhist[,1:(i+qn),drop=FALSE]))
})
|
pacman::p_load("shiny","shinymaterial", "plotly")
source("read_data.R")
ui=material_page(
title = "Zillow Dashboard",
tags$br(),
material_row(
material_column(
width=2,
material_card(
title = "",
depth = 4,
uiOutput("state_select"),
uiOutput("metro_select")
)
),
material_column(
width = 5,
material_card(
title = "Average Zillow Housing Value Index",
depth = 4,
plotlyOutput("zhvi_plot")
)
),
material_column(
width=5,
material_card(
title = "Price to Rent Ratio",
depth = 4,
plotlyOutput("p2rr_plot")
)
),
material_row(
material_column(
width=2
),
material_column(
width=5,
material_card(
title = "Price Cut (%)",
depth = 4,
plotlyOutput("pcut_plot")
)
),
material_column(
width=5,
material_card(
title = "Buyer-Seller Index",
depth = 4,
plotlyOutput("bsi_plot")
)
)
)
)
)
server<-function(input, output) {
output$state_select <- renderUI({
selectInput(inputId = "state_input", label = "Select State:", choices = levels(data_1$State))
})
metros <- reactive({
c(as.character(data_1[data_1$State==req(input$state_input),"Metro"]),
as.character(data_2[data_2$State==req(input$state_input),"Metro"]),
as.character(data_3[data_3$State==req(input$state_input),"Metro"]),
as.character(data_4[data_4$State==req(input$state_input),"Metro"])) %>% unique()
})
output$metro_select <- renderUI({
selectInput(inputId = "metro_input", label = "Select Metro:", choices = metros())
})
data_1_input <- reactive({
mydata_1 <- (data_1 %>% filter(State == req(input$state_input), Metro==req(input$metro_input)) %>% group_by(date) %>% summarise(ZHVI=mean(zhvi,na.rm = T)))
})
data_2_input <- reactive({
mydata_2 <- (data_2 %>% filter(State == req(input$state_input), Metro==req(input$metro_input)) %>% group_by(date) %>% summarise(P2RR=mean(p2rr,na.rm = T)))
})
data_3_input <- reactive({
mydata_3 <- (data_3 %>% filter(State == req(input$state_input), Metro==req(input$metro_input)) %>% group_by(date) %>% summarise(BSI=mean(bsi,na.rm = T)))
})
data_4_input <- reactive({
mydata_4 <- (data_4 %>% filter(State == req(input$state_input), Metro==req(input$metro_input)) %>% group_by(date) %>% summarise(PCUT_PCT=mean(pcut,na.rm = T)))
})
output$zhvi_plot <- renderPlotly({
plot_output <- ggplot(data_1_input(), aes(date,ZHVI)) + geom_line()
plot_output %>% ggplotly() %>% config(displayModeBar=F)
})
output$p2rr_plot <- renderPlotly({
plot_output <- ggplot(data_2_input(), aes(date,P2RR)) + geom_line()
plot_output %>% ggplotly() %>% config(displayModeBar=F)
})
output$pcut_plot <- renderPlotly({
plot_output <- ggplot(data_4_input(), aes(date,PCUT_PCT)) + geom_line()
plot_output %>% ggplotly() %>% config(displayModeBar=F)
})
output$bsi_plot <- renderPlotly({
plot_output <- ggplot(data_3_input(), aes(date,BSI)) + geom_line()
plot_output %>% ggplotly() %>% config(displayModeBar=F)
})
}
shinyApp(ui, server) | /zillow app.R | no_license | ahernnelson/Zillow-Application | R | false | false | 3,405 | r | pacman::p_load("shiny","shinymaterial", "plotly")
source("read_data.R")
ui=material_page(
title = "Zillow Dashboard",
tags$br(),
material_row(
material_column(
width=2,
material_card(
title = "",
depth = 4,
uiOutput("state_select"),
uiOutput("metro_select")
)
),
material_column(
width = 5,
material_card(
title = "Average Zillow Housing Value Index",
depth = 4,
plotlyOutput("zhvi_plot")
)
),
material_column(
width=5,
material_card(
title = "Price to Rent Ratio",
depth = 4,
plotlyOutput("p2rr_plot")
)
),
material_row(
material_column(
width=2
),
material_column(
width=5,
material_card(
title = "Price Cut (%)",
depth = 4,
plotlyOutput("pcut_plot")
)
),
material_column(
width=5,
material_card(
title = "Buyer-Seller Index",
depth = 4,
plotlyOutput("bsi_plot")
)
)
)
)
)
server<-function(input, output) {
output$state_select <- renderUI({
selectInput(inputId = "state_input", label = "Select State:", choices = levels(data_1$State))
})
metros <- reactive({
c(as.character(data_1[data_1$State==req(input$state_input),"Metro"]),
as.character(data_2[data_2$State==req(input$state_input),"Metro"]),
as.character(data_3[data_3$State==req(input$state_input),"Metro"]),
as.character(data_4[data_4$State==req(input$state_input),"Metro"])) %>% unique()
})
output$metro_select <- renderUI({
selectInput(inputId = "metro_input", label = "Select Metro:", choices = metros())
})
data_1_input <- reactive({
mydata_1 <- (data_1 %>% filter(State == req(input$state_input), Metro==req(input$metro_input)) %>% group_by(date) %>% summarise(ZHVI=mean(zhvi,na.rm = T)))
})
data_2_input <- reactive({
mydata_2 <- (data_2 %>% filter(State == req(input$state_input), Metro==req(input$metro_input)) %>% group_by(date) %>% summarise(P2RR=mean(p2rr,na.rm = T)))
})
data_3_input <- reactive({
mydata_3 <- (data_3 %>% filter(State == req(input$state_input), Metro==req(input$metro_input)) %>% group_by(date) %>% summarise(BSI=mean(bsi,na.rm = T)))
})
data_4_input <- reactive({
mydata_4 <- (data_4 %>% filter(State == req(input$state_input), Metro==req(input$metro_input)) %>% group_by(date) %>% summarise(PCUT_PCT=mean(pcut,na.rm = T)))
})
output$zhvi_plot <- renderPlotly({
plot_output <- ggplot(data_1_input(), aes(date,ZHVI)) + geom_line()
plot_output %>% ggplotly() %>% config(displayModeBar=F)
})
output$p2rr_plot <- renderPlotly({
plot_output <- ggplot(data_2_input(), aes(date,P2RR)) + geom_line()
plot_output %>% ggplotly() %>% config(displayModeBar=F)
})
output$pcut_plot <- renderPlotly({
plot_output <- ggplot(data_4_input(), aes(date,PCUT_PCT)) + geom_line()
plot_output %>% ggplotly() %>% config(displayModeBar=F)
})
output$bsi_plot <- renderPlotly({
plot_output <- ggplot(data_3_input(), aes(date,BSI)) + geom_line()
plot_output %>% ggplotly() %>% config(displayModeBar=F)
})
}
shinyApp(ui, server) |
bootstrapping <-
function(pos, neg)
{
rval.pos=NULL
rval.neg=NULL
for (i in 1:length(pos)){
pp=pos[i]
nn=neg[i]
n.pos=sum(sample(c(rep(T, pp), rep(F, nn)), pp+nn, replace=T))
n.neg=pp+nn-n.pos
rval.pos=c(rval.pos, n.pos)
rval.neg=c(rval.neg, n.neg)
}
return (list(rval.pos, rval.neg))
}
| /digitalPCR/R/bootstrapping.R | no_license | ingted/R-Examples | R | false | false | 302 | r | bootstrapping <-
function(pos, neg)
{
rval.pos=NULL
rval.neg=NULL
for (i in 1:length(pos)){
pp=pos[i]
nn=neg[i]
n.pos=sum(sample(c(rep(T, pp), rep(F, nn)), pp+nn, replace=T))
n.neg=pp+nn-n.pos
rval.pos=c(rval.pos, n.pos)
rval.neg=c(rval.neg, n.neg)
}
return (list(rval.pos, rval.neg))
}
|
A=read.csv(file="~/Documents/RStudio(домахи)/logregr_data.csv")
#ЗАДАЧА 1
#Файл logregr_data.csv содержит выборку из 5000 россиян.
#a) С помощью логрегрессии исследуйте связь между возрастом и АГ. Интерпретируйте результат через отношение шансов. К примеру: «повышение возраста на 1 увеличивает шанс развития АГ в ХХ раз».
#H0: AGE не объясняет зависимую переменную code_AH (коэффиц. при нём = 0).
alpha=0.05
mod1=glm(formula=code_AH~AGE, data=A, family = binomial(link = "logit"))
summary(mod1)
#p-value < 2e-16, отклоняем H0.
coef(mod1) #коэфициенты при факторах
exp(coef(mod1))
#отношение шансов заболеть увеличивается ~ в 1.1 раз при увеличении возраста на 1 год.
#b) Сделайте тоже самое для пола и ЛНП (см. описание переменных)
#H0: SEX и LDL не объясняет зависимую переменную code_AH (коэффиц. при нём = 0).
mod2=glm(formula=code_AH~SEX+LDL, data=A, family = binomial(link = "logit"))
summary(mod2)
#LDL - значимый фактор (p-value < 2e-16), SEX - нет (p-value = 0.398).
exp(coef(mod2))
#отношение шансов заболеть увеличивается ~ в 1.5 раз при увеличении LDL на 1 (и уменьшается в .. для женщин?).
#c) Изучите связь сразу всех переменных из базы с АГ. Что-нибудь поменялось по сравнению с однофакторным анализом? Сравните однофакторные ОШ с многофакторными для пола, возраста и ЛНП и объясните результат
#H0: независимые переменные(SEX,AGE,WAI..) не объясняют зависимую(code_AH) (коэффиц. при них = 0)
mod3=glm(formula= code_AH~.-X, data=A, family = binomial(link = "logit"))
summary(mod3)
#значимые факторы - все, кроме LDL.
exp(coef(mod3))
#Jтношение шансов заболеть увеличивается ~ в 1.08 раз при увеличении возраста на 1 год и фиксированных
# остальных факторах, ~ в 1.07 раз при увеличении BMI на 1 и т.д.
#Отношение шансов для возраста и LDL и пола уменьшилось в многофакторном.
#Добавленные в 3ю модель факторы сами по себе скорее всего зависят от пола и возраста из-за чего
# ОШ для отдельных факторов при фиксированных остальных перераспределилось и, как следствие, уменьшилось.
#(то же самое с коэффициентами в многофакторном и однофакторном).
#ЗАДАЧА 2
#Для пункта с предыдущего задания используйте другие линки: пробит и Коши. Как можно объяснить изменения в статистических выводах?
mod4=glm(formula= code_AH~AGE, data=A, family = binomial(link = "probit"))
summary(mod4)
mod5=glm(formula= code_AH~AGE, data=A, family = binomial(link = "cauchit"))
summary(mod5)
mod6=glm(formula=code_AH~SEX+LDL, data=A, family = binomial(link = "probit"))
summary(mod6)
mod7=glm(formula=code_AH~SEX+LDL, data=A, family = binomial(link = "cauchit"))
summary(mod7)
mod8=glm(formula= code_AH~.-X, data=A, family = binomial(link = "probit"))
summary(mod8)
mod9=glm(formula= code_AH~.-X, data=A, family = binomial(link = "cauchit"))
summary(mod9)
coef(mod1)
coef(mod4)
coef(mod5)
coef(mod2)
coef(mod6)
coef(mod7)
coef(mod3)
coef(mod8)
coef(mod9)
# во всех 3х моделях немного изменились коэфициенты при факторах и свободные члены, p-value изменились незначительно.
exp(coef(mod1))
exp(coef(mod4))
exp(coef(mod5))
exp(coef(mod2))
exp(coef(mod6))
exp(coef(mod7))
exp(coef(mod3))
exp(coef(mod8))
exp(coef(mod9))
# почти ничего не поменялось. зачем тогда брать неинтерпретируемые модели, если можно взять логрегрессию. так что надобность в пробите и коши просто отпадает
| /task8/task8 (logistic regression).r | no_license | ktrndy/home_task_applied_statistics | R | false | false | 4,819 | r | A=read.csv(file="~/Documents/RStudio(домахи)/logregr_data.csv")
#ЗАДАЧА 1
#Файл logregr_data.csv содержит выборку из 5000 россиян.
#a) С помощью логрегрессии исследуйте связь между возрастом и АГ. Интерпретируйте результат через отношение шансов. К примеру: «повышение возраста на 1 увеличивает шанс развития АГ в ХХ раз».
#H0: AGE не объясняет зависимую переменную code_AH (коэффиц. при нём = 0).
alpha=0.05
mod1=glm(formula=code_AH~AGE, data=A, family = binomial(link = "logit"))
summary(mod1)
#p-value < 2e-16, отклоняем H0.
coef(mod1) #коэфициенты при факторах
exp(coef(mod1))
#отношение шансов заболеть увеличивается ~ в 1.1 раз при увеличении возраста на 1 год.
#b) Сделайте тоже самое для пола и ЛНП (см. описание переменных)
#H0: SEX и LDL не объясняет зависимую переменную code_AH (коэффиц. при нём = 0).
mod2=glm(formula=code_AH~SEX+LDL, data=A, family = binomial(link = "logit"))
summary(mod2)
#LDL - значимый фактор (p-value < 2e-16), SEX - нет (p-value = 0.398).
exp(coef(mod2))
#отношение шансов заболеть увеличивается ~ в 1.5 раз при увеличении LDL на 1 (и уменьшается в .. для женщин?).
#c) Изучите связь сразу всех переменных из базы с АГ. Что-нибудь поменялось по сравнению с однофакторным анализом? Сравните однофакторные ОШ с многофакторными для пола, возраста и ЛНП и объясните результат
#H0: независимые переменные(SEX,AGE,WAI..) не объясняют зависимую(code_AH) (коэффиц. при них = 0)
mod3=glm(formula= code_AH~.-X, data=A, family = binomial(link = "logit"))
summary(mod3)
#значимые факторы - все, кроме LDL.
exp(coef(mod3))
#Jтношение шансов заболеть увеличивается ~ в 1.08 раз при увеличении возраста на 1 год и фиксированных
# остальных факторах, ~ в 1.07 раз при увеличении BMI на 1 и т.д.
#Отношение шансов для возраста и LDL и пола уменьшилось в многофакторном.
#Добавленные в 3ю модель факторы сами по себе скорее всего зависят от пола и возраста из-за чего
# ОШ для отдельных факторов при фиксированных остальных перераспределилось и, как следствие, уменьшилось.
#(то же самое с коэффициентами в многофакторном и однофакторном).
#ЗАДАЧА 2
#Для пункта с предыдущего задания используйте другие линки: пробит и Коши. Как можно объяснить изменения в статистических выводах?
mod4=glm(formula= code_AH~AGE, data=A, family = binomial(link = "probit"))
summary(mod4)
mod5=glm(formula= code_AH~AGE, data=A, family = binomial(link = "cauchit"))
summary(mod5)
mod6=glm(formula=code_AH~SEX+LDL, data=A, family = binomial(link = "probit"))
summary(mod6)
mod7=glm(formula=code_AH~SEX+LDL, data=A, family = binomial(link = "cauchit"))
summary(mod7)
mod8=glm(formula= code_AH~.-X, data=A, family = binomial(link = "probit"))
summary(mod8)
mod9=glm(formula= code_AH~.-X, data=A, family = binomial(link = "cauchit"))
summary(mod9)
coef(mod1)
coef(mod4)
coef(mod5)
coef(mod2)
coef(mod6)
coef(mod7)
coef(mod3)
coef(mod8)
coef(mod9)
# во всех 3х моделях немного изменились коэфициенты при факторах и свободные члены, p-value изменились незначительно.
exp(coef(mod1))
exp(coef(mod4))
exp(coef(mod5))
exp(coef(mod2))
exp(coef(mod6))
exp(coef(mod7))
exp(coef(mod3))
exp(coef(mod8))
exp(coef(mod9))
# почти ничего не поменялось. зачем тогда брать неинтерпретируемые модели, если можно взять логрегрессию. так что надобность в пробите и коши просто отпадает
|
data("iris")
library(ggplot2)
## Scatter Plot of the data
ggplot(iris, aes(Petal.Length, Petal.Width, color = Species)) + geom_point()
## Create the kmeans clusters. This will create 3 clusters using the petal length and width
irisCluster <-kmeans(iris[,3:4],3,nstart=20)
irisCluster
## This table tells us how good our estimates were
table(irisCluster$cluster,iris$Species)
## Here is a graph of our classifications
irisCluster$cluster <- as.factor(irisCluster$cluster)
ggplot(iris, aes(Petal.Length, Petal.Width, color = irisCluster$cluster)) + geom_point()
## Association Rules
install.packages("arules")
library(arules)
data(Groceries)
## Take a peek at the Data
inspect(head(Groceries))
##Creates a list of the most frequently purchased items
frequentItems <- eclat (Groceries, parameter = list(supp = 0.07, maxlen = 15))
##Take a peek at said items and graph the top 10
inspect(frequentItems)
itemFrequencyPlot(Groceries, topN=10, type="absolute", main="Item Frequency")
## Create our Association Rules
rules <- apriori (Groceries, parameter = list(supp = 0.001, conf = 0.5))
## Rules Sorted by Confidence
rules_conf <- sort (rules, by="confidence", decreasing=TRUE)
inspect(head(rules_conf))
## Rules Sorted by Lift
rules_lift <- sort (rules, by="lift", decreasing=TRUE)
inspect(head(rules_lift))
## Control Length of Rules and Confidence. Higher "conf" Means stronger rules
## higher "maxlen" means longer rules
rules <- apriori(Groceries, parameter = list (supp = 0.001, conf = 0.5, maxlen=3))
## You can search for items on the RHS by changing what's in quotes
rules <- apriori (data=Groceries, parameter=list (supp=0.001,conf = 0.08), appearance = list (default="lhs",rhs="bottled beer"), control = list (verbose=F))
rules_conf <- sort (rules, by="confidence", decreasing=TRUE)
inspect(head(rules_conf))
## You can search for items on the LHS by changing what's in quotes
rules <- apriori (data=Groceries, parameter=list (supp=0.001,conf = 0.15,minlen=2), appearance = list(default="rhs",lhs="bottled beer"), control = list (verbose=F))
rules_conf <- sort (rules, by="confidence", decreasing=TRUE)
inspect(head(rules_conf))
| /advanced_analytics__COSC515/Cluster_Association.R | no_license | tliggett/coursework | R | false | false | 2,151 | r | data("iris")
library(ggplot2)
## Scatter Plot of the data
ggplot(iris, aes(Petal.Length, Petal.Width, color = Species)) + geom_point()
## Create the kmeans clusters. This will create 3 clusters using the petal length and width
irisCluster <-kmeans(iris[,3:4],3,nstart=20)
irisCluster
## This table tells us how good our estimates were
table(irisCluster$cluster,iris$Species)
## Here is a graph of our classifications
irisCluster$cluster <- as.factor(irisCluster$cluster)
ggplot(iris, aes(Petal.Length, Petal.Width, color = irisCluster$cluster)) + geom_point()
## Association Rules
install.packages("arules")
library(arules)
data(Groceries)
## Take a peek at the Data
inspect(head(Groceries))
##Creates a list of the most frequently purchased items
frequentItems <- eclat (Groceries, parameter = list(supp = 0.07, maxlen = 15))
##Take a peek at said items and graph the top 10
inspect(frequentItems)
itemFrequencyPlot(Groceries, topN=10, type="absolute", main="Item Frequency")
## Create our Association Rules
rules <- apriori (Groceries, parameter = list(supp = 0.001, conf = 0.5))
## Rules Sorted by Confidence
rules_conf <- sort (rules, by="confidence", decreasing=TRUE)
inspect(head(rules_conf))
## Rules Sorted by Lift
rules_lift <- sort (rules, by="lift", decreasing=TRUE)
inspect(head(rules_lift))
## Control Length of Rules and Confidence. Higher "conf" Means stronger rules
## higher "maxlen" means longer rules
rules <- apriori(Groceries, parameter = list (supp = 0.001, conf = 0.5, maxlen=3))
## You can search for items on the RHS by changing what's in quotes
rules <- apriori (data=Groceries, parameter=list (supp=0.001,conf = 0.08), appearance = list (default="lhs",rhs="bottled beer"), control = list (verbose=F))
rules_conf <- sort (rules, by="confidence", decreasing=TRUE)
inspect(head(rules_conf))
## You can search for items on the LHS by changing what's in quotes
rules <- apriori (data=Groceries, parameter=list (supp=0.001,conf = 0.15,minlen=2), appearance = list(default="rhs",lhs="bottled beer"), control = list (verbose=F))
rules_conf <- sort (rules, by="confidence", decreasing=TRUE)
inspect(head(rules_conf))
|
#' Select bands of a data cube
#'
#' Create a proxy data cube, which selects specific bands of a data cube. The resulting cube
#' will drop any other bands.
#'
#' @param cube source data cube
#' @param bands character vector with band names
#' @return proxy data cube object
#' @examples
#' # create image collection from example Landsat data only
#' # if not already done in other examples
#' if (!file.exists(file.path(tempdir(), "L8.db"))) {
#' L8_files <- list.files(system.file("L8NY18", package = "gdalcubes"),
#' ".TIF", recursive = TRUE, full.names = TRUE)
#' create_image_collection(L8_files, "L8_L1TP", file.path(tempdir(), "L8.db"))
#' }
#'
#' L8.col = image_collection(file.path(tempdir(), "L8.db"))
#' v = cube_view(extent=list(left=388941.2, right=766552.4,
#' bottom=4345299, top=4744931, t0="2018-04", t1="2018-07"),
#' srs="EPSG:32618", nx = 497, ny=526, dt="P1M")
#' L8.cube = raster_cube(L8.col, v)
#' L8.rgb = select_bands(L8.cube, c("B02", "B03", "B04"))
#' L8.rgb
#' \donttest{
#' plot(L8.rgb, rgb=3:1)
#' }
#'
#' @note This function returns a proxy object, i.e., it will not start any computations besides deriving the shape of the result.
#' @note For performance reasons, \code{select_bands} should always be called directly on a cube created with \code{\link{raster_cube}} and
#' drop all unneded bands. This allows to reduce RasterIO and warp operations in GDAL.
#' @export
select_bands <- function(cube, bands) {
stopifnot(is.cube(cube))
x = gc_create_select_bands_cube(cube, bands)
class(x) <- c("select_bands_cube", "cube", "xptr")
return(x)
}
is.select_bands_cube <- function(obj) {
if(!("select_bands_cube" %in% class(obj))) {
return(FALSE)
}
if (gc_is_null(obj)) {
warning("GDAL data cube proxy object is invalid")
return(FALSE)
}
return(TRUE)
}
| /R/select_bands.R | permissive | rsbivand/gdalcubes_R | R | false | false | 1,887 | r | #' Select bands of a data cube
#'
#' Create a proxy data cube, which selects specific bands of a data cube. The resulting cube
#' will drop any other bands.
#'
#' @param cube source data cube
#' @param bands character vector with band names
#' @return proxy data cube object
#' @examples
#' # create image collection from example Landsat data only
#' # if not already done in other examples
#' if (!file.exists(file.path(tempdir(), "L8.db"))) {
#' L8_files <- list.files(system.file("L8NY18", package = "gdalcubes"),
#' ".TIF", recursive = TRUE, full.names = TRUE)
#' create_image_collection(L8_files, "L8_L1TP", file.path(tempdir(), "L8.db"))
#' }
#'
#' L8.col = image_collection(file.path(tempdir(), "L8.db"))
#' v = cube_view(extent=list(left=388941.2, right=766552.4,
#' bottom=4345299, top=4744931, t0="2018-04", t1="2018-07"),
#' srs="EPSG:32618", nx = 497, ny=526, dt="P1M")
#' L8.cube = raster_cube(L8.col, v)
#' L8.rgb = select_bands(L8.cube, c("B02", "B03", "B04"))
#' L8.rgb
#' \donttest{
#' plot(L8.rgb, rgb=3:1)
#' }
#'
#' @note This function returns a proxy object, i.e., it will not start any computations besides deriving the shape of the result.
#' @note For performance reasons, \code{select_bands} should always be called directly on a cube created with \code{\link{raster_cube}} and
#' drop all unneded bands. This allows to reduce RasterIO and warp operations in GDAL.
#' @export
select_bands <- function(cube, bands) {
stopifnot(is.cube(cube))
x = gc_create_select_bands_cube(cube, bands)
class(x) <- c("select_bands_cube", "cube", "xptr")
return(x)
}
is.select_bands_cube <- function(obj) {
if(!("select_bands_cube" %in% class(obj))) {
return(FALSE)
}
if (gc_is_null(obj)) {
warning("GDAL data cube proxy object is invalid")
return(FALSE)
}
return(TRUE)
}
|
library(upwaver)
### Name: list_cards
### Title: Information about the cards on a board
### Aliases: list_cards
### ** Examples
list_cards("ims-fhs", 14351, "a44fa67c5df2acc9836058ffca870d7b78b017cb")
| /data/genthat_extracted_code/upwaver/examples/list_cards.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 208 | r | library(upwaver)
### Name: list_cards
### Title: Information about the cards on a board
### Aliases: list_cards
### ** Examples
list_cards("ims-fhs", 14351, "a44fa67c5df2acc9836058ffca870d7b78b017cb")
|
library('RNeo4j')
source('src/functionsOfDBRequestByExecutingCypher.R')
#### CONSTANTS, PLASE DO NOT CHANGE #####
callId1 <- c('mc45')
clientId1 <- '999'
callId2 <- c('mc46','mc43')
clientId2 <- '999'
callId3 <- c('mc45','mc46','mc48','mc49','mc50','mc51')
clientId3 <- '999'
callId4 <- c('mc51','mc52','mc54','mc55','mc57','mc59','mc61') ; clientId4 <- '999'
callId5 <- c('mc53','mc54','mc56','mc58','mc59','mc60')
clientId5 <- '999'
callId6 <- c('mc10','mc11','mc12','mc13','mc14','mc15',
'mc41','mc42','mc46','mc47','mc48','mc49',
'mc5','mc50','mc51','mc55','mc56','mc57','mc58','mc59',
'mc6','mc60','mc61','mc64')
clientId6 <- '999'
#### EXAMPLE FUNCTIONS ##################
availAssetByCallIdAndClientIdEx1 <- function(){
availAssetByCallIdAndClientId(callId1,clientId1)
}
availAssetByCallIdAndClientIdEx2 <- function(){
availAssetByCallIdAndClientId(callId2,clientId2)
}
availAssetByCallIdAndClientIdEx3 <- function(){
availAssetByCallIdAndClientId(callId3,clientId3)
}
availAssetByCallIdAndClientIdEx4 <- function(){
availAssetByCallIdAndClientId(callId4,clientId4)
}
availAssetByCallIdAndClientIdEx5 <- function(){
availAssetByCallIdAndClientId(callId5,clientId5)
}
availAssetByCallIdAndClientIdEx6 <- function(){
availAssetByCallIdAndClientId(callId6,clientId6)
}
#### EXAMPLES RESULTS ####################
availAssetByCallIdAndClientIdEx1()
availAssetByCallIdAndClientIdEx2()
availAssetByCallIdAndClientIdEx3()
availAssetByCallIdAndClientIdEx4()
availAssetByCallIdAndClientIdEx5()
availAssetByCallIdAndClientIdEx6()
| /examples/availAssetByCallIdAndClientIdExamples.R | no_license | AcuoFS/acuo-allocation | R | false | false | 1,584 | r | library('RNeo4j')
source('src/functionsOfDBRequestByExecutingCypher.R')
#### CONSTANTS, PLASE DO NOT CHANGE #####
callId1 <- c('mc45')
clientId1 <- '999'
callId2 <- c('mc46','mc43')
clientId2 <- '999'
callId3 <- c('mc45','mc46','mc48','mc49','mc50','mc51')
clientId3 <- '999'
callId4 <- c('mc51','mc52','mc54','mc55','mc57','mc59','mc61') ; clientId4 <- '999'
callId5 <- c('mc53','mc54','mc56','mc58','mc59','mc60')
clientId5 <- '999'
callId6 <- c('mc10','mc11','mc12','mc13','mc14','mc15',
'mc41','mc42','mc46','mc47','mc48','mc49',
'mc5','mc50','mc51','mc55','mc56','mc57','mc58','mc59',
'mc6','mc60','mc61','mc64')
clientId6 <- '999'
#### EXAMPLE FUNCTIONS ##################
availAssetByCallIdAndClientIdEx1 <- function(){
availAssetByCallIdAndClientId(callId1,clientId1)
}
availAssetByCallIdAndClientIdEx2 <- function(){
availAssetByCallIdAndClientId(callId2,clientId2)
}
availAssetByCallIdAndClientIdEx3 <- function(){
availAssetByCallIdAndClientId(callId3,clientId3)
}
availAssetByCallIdAndClientIdEx4 <- function(){
availAssetByCallIdAndClientId(callId4,clientId4)
}
availAssetByCallIdAndClientIdEx5 <- function(){
availAssetByCallIdAndClientId(callId5,clientId5)
}
availAssetByCallIdAndClientIdEx6 <- function(){
availAssetByCallIdAndClientId(callId6,clientId6)
}
#### EXAMPLES RESULTS ####################
availAssetByCallIdAndClientIdEx1()
availAssetByCallIdAndClientIdEx2()
availAssetByCallIdAndClientIdEx3()
availAssetByCallIdAndClientIdEx4()
availAssetByCallIdAndClientIdEx5()
availAssetByCallIdAndClientIdEx6()
|
# Function containing the framework setup in the intial study design
# -----------------------------------------------------------------------------
# Set up directories
if (!exists("git.dir")) {
rm(list = ls(all = T))
wd <- c("C:/Users/Jim Hughes/Documents", "C:/Users/hugjh001/Documents",
"C:/Users/hugjh001/Desktop", "C:/windows/system32")
graphics.off()
if (getwd() == wd[1]) {
git.dir <- paste0(getwd(), "/GitRepos")
reponame <- "optinterval"
} else if (getwd() == wd[2]) {
git.dir <- getwd()
reponame <- "optinterval"
} else if (getwd() == wd[3] | getwd() == wd[4]) {
git.dir <- "E:/Hughes/Git"
reponame <- "splines"
}
rm("wd")
}
# Load packages
library(GA)
#library(ggplot2)
#theme_bw2 <- theme_set(theme_bw(base_size = 14))
#theme_update(plot.title = element_text(hjust = 0.5))
# Source scripts to set up environment
set.seed(256256)
niter <- 1000
sdev <- 4
source(paste(git.dir, reponame, "fn_diag/fix_functions.R", sep = "/"))
source(paste(git.dir, reponame, "fn_diag/study_data.R", sep = "/"))
# Set basic parameters
data.names <- paste0("d", as.vector(outer(1:3, c("b", "a"), paste0)))[-1]
par.names <- paste(data.names, "p", sep = ".")
fn.names <- paste("pred", data.names, sep = ".")
t0.names <- paste(data.names, "t", sep = ".")
t1 <- c(0, 0.5, 1, 1.5, 2, 3, 4, 6, 8, 12, 16, 24)
# -----------------------------------------------------------------------------
study.fn <- function(data, par, fn, nobs, t0, tlast = 24, logauc = F) { # sdev = 1:4
absorp <- ifelse((length(par) %% 2) != 0, T, F)
if (absorp) data[1] <- 0
all.sumexp <- apply(data, 2, function(x) {
optim.sumexp.new(
# optim.sumexp.sig(
data.frame(time = t0, conc = x), oral = absorp
# , nexp = 2
)
})
print("sumexp done")
res.sumexp <- lapply(all.sumexp, best.sumexp.aic) # ".lrt)", ".aic)", ".bic, nobs = length(t1))"
fit.par <- lapply(res.sumexp, function(x) x$sumexp)
true.tlast <- rep(list(
seq(0, pred.tlast.lam(par), length.out = nobs)
), niter)
auc.tlast <- lapply(fit.par, function(x) {
c(0, exp(seq(log(t0[2]), log(pred.tlast(x, 12)[1]), length.out = nobs-1)))
})
lam.tlast <- lapply(fit.par, function(x) {
c(0, exp(seq(log(t0[2]), log(pred.tlast.lam(x)), length.out = nobs-1)))
})
obs.tlast.mat <- apply(data, 2, function(x) {
out <- try(
c(0, exp(seq(log(t0[2]), log(obs.tlast.lam(data.frame(t0, x))), length.out = nobs-1)))
)
if (class(out) == "try-error") browser()
out
})
obs.tlast <- split(t(obs.tlast.mat), seq(NROW(t(obs.tlast.mat))))
print("tlast done")
# Explanation of Option Naming
# a b c
# t 0 0 0
# a - fixed tmax (0 - off, 1 - on)
# b - variable tlast (0 - off, 1 - 80% auc, 2 - three half-lives, 3 - three half-lives using observed data)
# c - optimal lambdaz (0 - off, 1 - geomean, 2 - optimise)
t2 <- c(0, exp(seq(log(t0[2]), log(tail(t0, 1)), length.out = nobs-1)))
t000.res <- lapply(fit.par, FUN = function(x) {
optim.interv.dtmax(x, t2)
})
t001.res <- lapply(fit.par, FUN = function(x) {
optim.interv.dtmax(x, t2[-(nobs-1)])$times
})
t010.res <- mapply(fit.par, auc.tlast, SIMPLIFY = F, FUN = function(x, t) {
optim.interv.dtmax(x, t)
})
t011.res <- mapply(fit.par, auc.tlast, SIMPLIFY = F, FUN = function(x, t) {
optim.interv.dtmax(x, t[-(nobs-1)])$times
})
t020.res <- mapply(fit.par, lam.tlast, SIMPLIFY = F, FUN = function(x, t) {
optim.interv.dtmax(x, t)
})
t021.res <- mapply(fit.par, lam.tlast, SIMPLIFY = F, FUN = function(x, t) {
optim.interv.dtmax(x, t[-(nobs-1)])$times
})
t030.res <- mapply(fit.par, obs.tlast, SIMPLIFY = F, FUN = function(x, t) {
optim.interv.dtmax(x, t)
})
t031.res <- mapply(fit.par, obs.tlast, SIMPLIFY = F, FUN = function(x, t) {
optim.interv.dtmax(x, t[-(nobs-1)])$times
})
t100.res <- lapply(fit.par, FUN = function(x) {
optim.interv.dtmax(x, t2, tmax = T)
})
t101.res <- lapply(fit.par, FUN = function(x) {
optim.interv.dtmax(x, t2[-(nobs-1)], tmax = T)$times
})
t110.res <- mapply(fit.par, auc.tlast, SIMPLIFY = F, FUN = function(x, t) {
optim.interv.dtmax(x, t, tmax = T)
})
t111.res <- mapply(fit.par, auc.tlast, SIMPLIFY = F, FUN = function(x, t) {
optim.interv.dtmax(x, t[-(nobs-1)], tmax = T)$times
})
t120.res <- mapply(fit.par, lam.tlast, SIMPLIFY = F, FUN = function(x, t) {
optim.interv.dtmax(x, t, tmax = T)
})
t121.res <- mapply(fit.par, lam.tlast, SIMPLIFY = F, FUN = function(x, t) {
optim.interv.dtmax(x, t[-(nobs-1)], tmax = T)$times
})
t130.res <- mapply(fit.par, obs.tlast, SIMPLIFY = F, FUN = function(x, t) {
optim.interv.dtmax(x, t, tmax = T)
})
t131.res <- mapply(fit.par, obs.tlast, SIMPLIFY = F, FUN = function(x, t) {
optim.interv.dtmax(x, t[-(nobs-1)], tmax = T)$times
})
print("intervals done")
t000 <- sapply(t000.res, FUN = function(x) {
x$times
})
t001 <- sapply(t001.res, FUN = function(x) {
geomean <- exp(mean(log(tail(x, 2))))
c(head(x, nobs-2), geomean, tail(x, 1))
})
t002 <- mapply(t001.res, fit.par, FUN = function(x, fit) {
tail.par <- tail(unique(x), 2)
optres <- try(optim(
mean(tail.par),
function(p, tfirst, tlast, fit) {
m <- fit[1:ceiling(length(fit)/2)]
times <- c(tfirst, p, tlast)
pred <- data.frame(
time = times,
dv = pred.sumexp(fit, times)
)
lmres <- lm(log(dv) ~ times, pred)$coefficients
err <- sqrt(diff(c(max(m), lmres[2]))^2)
return(err)
},
method = "L-BFGS-B", hessian = T,
lower = tail.par[1], upper = tail.par[2],
tfirst = tail.par[1], tlast = tail.par[2], fit = fit
))
if (class(optres) == "try-error") {
sort(c(head(x, nobs-2), exp(mean(log(tail.par))), tail(x, 1)))
} else {
sort(c(head(x, nobs-2), optres$par, tail(x, 1)))
}
})
t010 <- sapply(t010.res, FUN = function(x) {
x$times
})
t011 <- sapply(t011.res, FUN = function(x) {
geomean <- exp(mean(log(tail(x, 2))))
c(head(x, nobs-2), geomean, tail(x, 1))
})
t012 <- mapply(t011.res, fit.par, FUN = function(x, fit) {
tail.par <- tail(unique(x), 2)
optres <- try(optim(
mean(tail.par),
function(p, tfirst, tlast, fit) {
m <- fit[1:ceiling(length(fit)/2)]
times <- c(tfirst, p, tlast)
pred <- data.frame(
time = times,
dv = pred.sumexp(fit, times)
)
lmres <- lm(log(dv) ~ times, pred)$coefficients
err <- sqrt(diff(c(max(m), lmres[2]))^2)
return(err)
},
method = "L-BFGS-B", hessian = T,
lower = tail.par[1], upper = tail.par[2],
tfirst = tail.par[1], tlast = tail.par[2], fit = fit
))
if (class(optres) == "try-error") {
sort(c(head(x, nobs-2), exp(mean(log(tail.par))), tail(x, 1)))
} else {
sort(c(head(x, nobs-2), optres$par, tail(x, 1)))
}
})
t020 <- sapply(t020.res, FUN = function(x) {
x$times
})
t021 <- sapply(t021.res, FUN = function(x) {
geomean <- exp(mean(log(tail(x, 2))))
c(head(x, nobs-2), geomean, tail(x, 1))
})
t022 <- mapply(t021.res, fit.par, FUN = function(x, fit) {
tail.par <- tail(unique(x), 2)
optres <- try(optim(
mean(tail.par),
function(p, tfirst, tlast, fit) {
m <- fit[1:ceiling(length(fit)/2)]
times <- c(tfirst, p, tlast)
pred <- data.frame(
time = times,
dv = pred.sumexp(fit, times)
)
lmres <- lm(log(dv) ~ times, pred)$coefficients
err <- sqrt(diff(c(max(m), lmres[2]))^2)
return(err)
},
method = "L-BFGS-B", hessian = T,
lower = tail.par[1], upper = tail.par[2],
tfirst = tail.par[1], tlast = tail.par[2], fit = fit
))
if (class(optres) == "try-error") {
sort(c(head(x, nobs-2), exp(mean(log(tail.par))), tail(x, 1)))
} else {
sort(c(head(x, nobs-2), optres$par, tail(x, 1)))
}
})
t030 <- sapply(t030.res, FUN = function(x) {
x$times
})
t031 <- sapply(t031.res, FUN = function(x) {
geomean <- exp(mean(log(tail(x, 2))))
c(head(x, nobs-2), geomean, tail(x, 1))
})
t032 <- mapply(t031.res, fit.par, FUN = function(x, fit) {
tail.par <- tail(unique(x), 2)
optres <- try(optim(
mean(tail.par),
function(p, tfirst, tlast, fit) {
m <- fit[1:ceiling(length(fit)/2)]
times <- c(tfirst, p, tlast)
pred <- data.frame(
time = times,
dv = pred.sumexp(fit, times)
)
lmres <- lm(log(dv) ~ times, pred)$coefficients
err <- sqrt(diff(c(max(m), lmres[2]))^2)
return(err)
},
method = "L-BFGS-B", hessian = T,
lower = tail.par[1], upper = tail.par[2],
tfirst = tail.par[1], tlast = tail.par[2], fit = fit
))
if (class(optres) == "try-error") {
sort(c(head(x, nobs-2), exp(mean(log(tail.par))), tail(x, 1)))
} else {
sort(c(head(x, nobs-2), optres$par, tail(x, 1)))
}
})
t100 <- sapply(t100.res, FUN = function(x) {
x$times
})
t101 <- sapply(t101.res, FUN = function(x) {
geomean <- exp(mean(log(tail(x, 2))))
c(head(x, nobs-2), geomean, tail(x, 1))
})
t102 <- mapply(t101.res, fit.par, FUN = function(x, fit) {
tail.par <- tail(unique(x), 2)
optres <- try(optim(
mean(tail.par),
function(p, tfirst, tlast, fit) {
m <- fit[1:ceiling(length(fit)/2)]
times <- c(tfirst, p, tlast)
pred <- data.frame(
time = times,
dv = pred.sumexp(fit, times)
)
lmres <- lm(log(dv) ~ times, pred)$coefficients
err <- sqrt(diff(c(max(m), lmres[2]))^2)
return(err)
},
method = "L-BFGS-B", hessian = T,
lower = tail.par[1], upper = tail.par[2],
tfirst = tail.par[1], tlast = tail.par[2], fit = fit
))
if (class(optres) == "try-error") {
sort(c(head(x, nobs-2), exp(mean(log(tail.par))), tail(x, 1)))
} else {
sort(c(head(x, nobs-2), optres$par, tail(x, 1)))
}
})
t110 <- sapply(t110.res, FUN = function(x) {
x$times
})
t111 <- sapply(t111.res, FUN = function(x) {
geomean <- exp(mean(log(tail(x, 2))))
c(head(x, nobs-2), geomean, tail(x, 1))
})
t112 <- mapply(t111.res, fit.par, FUN = function(x, fit) {
tail.par <- tail(unique(x), 2)
optres <- try(optim(
mean(tail.par),
function(p, tfirst, tlast, fit) {
m <- fit[1:ceiling(length(fit)/2)]
times <- c(tfirst, p, tlast)
pred <- data.frame(
time = times,
dv = pred.sumexp(fit, times)
)
lmres <- lm(log(dv) ~ times, pred)$coefficients
err <- sqrt(diff(c(max(m), lmres[2]))^2)
return(err)
},
method = "L-BFGS-B", hessian = T,
lower = tail.par[1], upper = tail.par[2],
tfirst = tail.par[1], tlast = tail.par[2], fit = fit
))
if (class(optres) == "try-error") {
sort(c(head(x, nobs-2), exp(mean(log(tail.par))), tail(x, 1)))
} else {
sort(c(head(x, nobs-2), optres$par, tail(x, 1)))
}
})
t120 <- sapply(t120.res, FUN = function(x) {
x$times
})
t121 <- sapply(t121.res, FUN = function(x) {
geomean <- exp(mean(log(tail(x, 2))))
c(head(x, nobs-2), geomean, tail(x, 1))
})
t122 <- mapply(t121.res, fit.par, FUN = function(x, fit) {
tail.par <- tail(unique(x), 2)
optres <- try(optim(
mean(tail.par),
function(p, tfirst, tlast, fit) {
m <- fit[1:ceiling(length(fit)/2)]
times <- c(tfirst, p, tlast)
pred <- data.frame(
time = times,
dv = pred.sumexp(fit, times)
)
lmres <- lm(log(dv) ~ times, pred)$coefficients
err <- sqrt(diff(c(max(m), lmres[2]))^2)
return(err)
},
method = "L-BFGS-B", hessian = T,
lower = tail.par[1], upper = tail.par[2],
tfirst = tail.par[1], tlast = tail.par[2], fit = fit
))
if (class(optres) == "try-error") {
sort(c(head(x, nobs-2), exp(mean(log(tail.par))), tail(x, 1)))
} else {
sort(c(head(x, nobs-2), optres$par, tail(x, 1)))
}
})
t130 <- sapply(t130.res, FUN = function(x) {
x$times
})
t131 <- sapply(t131.res, FUN = function(x) {
geomean <- exp(mean(log(tail(x, 2))))
c(head(x, nobs-2), geomean, tail(x, 1))
})
t132 <- mapply(t131.res, fit.par, FUN = function(x, fit) {
tail.par <- tail(unique(x), 2)
optres <- try(optim(
mean(tail.par),
function(p, tfirst, tlast, fit) {
m <- fit[1:ceiling(length(fit)/2)]
times <- c(tfirst, p, tlast)
pred <- data.frame(
time = times,
dv = pred.sumexp(fit, times)
)
lmres <- lm(log(dv) ~ times, pred)$coefficients
err <- sqrt(diff(c(max(m), lmres[2]))^2)
return(err)
},
method = "L-BFGS-B", hessian = T,
lower = tail.par[1], upper = tail.par[2],
tfirst = tail.par[1], tlast = tail.par[2], fit = fit
))
if (class(optres) == "try-error") {
sort(c(head(x, nobs-2), exp(mean(log(tail.par))), tail(x, 1)))
} else {
sort(c(head(x, nobs-2), optres$par, tail(x, 1)))
}
})
print("times done")
auc24 <- data.frame(
true = rep(integrate(fn, 0, 24, p = par)$value, niter),
basic = rep(auc.interv(t1, par, fn), niter),
t000 = apply(t000, 2, function(x) auc.interv(x, par, fn)),
t001 = apply(t001, 2, function(x) auc.interv(x, par, fn)),
t002 = apply(t002, 2, function(x) auc.interv(x, par, fn)),
t010 = apply(t010, 2, function(x) auc.interv(x, par, fn)),
t011 = apply(t011, 2, function(x) auc.interv(x, par, fn)),
t012 = apply(t012, 2, function(x) auc.interv(x, par, fn)),
t020 = apply(t020, 2, function(x) auc.interv(x, par, fn)),
t021 = apply(t021, 2, function(x) auc.interv(x, par, fn)),
t022 = apply(t022, 2, function(x) auc.interv(x, par, fn)),
t030 = apply(t030, 2, function(x) auc.interv(x, par, fn)),
t031 = apply(t031, 2, function(x) auc.interv(x, par, fn)),
t032 = apply(t032, 2, function(x) auc.interv(x, par, fn)),
t100 = apply(t100, 2, function(x) auc.interv(x, par, fn)),
t101 = apply(t101, 2, function(x) auc.interv(x, par, fn)),
t102 = apply(t102, 2, function(x) auc.interv(x, par, fn)),
t110 = apply(t110, 2, function(x) auc.interv(x, par, fn)),
t111 = apply(t111, 2, function(x) auc.interv(x, par, fn)),
t112 = apply(t112, 2, function(x) auc.interv(x, par, fn)),
t120 = apply(t120, 2, function(x) auc.interv(x, par, fn)),
t121 = apply(t121, 2, function(x) auc.interv(x, par, fn)),
t122 = apply(t122, 2, function(x) auc.interv(x, par, fn)),
t130 = apply(t130, 2, function(x) auc.interv(x, par, fn)),
t131 = apply(t131, 2, function(x) auc.interv(x, par, fn)),
t132 = apply(t132, 2, function(x) auc.interv(x, par, fn))
)
auctlast <- data.frame(
true = rep(integrate(fn, 0, tail(true.tlast[[1]], 1), p = par)$value, niter),
basic = rep(auc.interv(t1, par, fn), niter),
t000 = apply(t000, 2, function(x) auc.interv(x, par, fn)),
t001 = apply(t001, 2, function(x) auc.interv(x, par, fn)),
t002 = apply(t002, 2, function(x) auc.interv(x, par, fn)),
t010 = apply(t010, 2, function(x) auc.interv(x, par, fn)),
t011 = apply(t011, 2, function(x) auc.interv(x, par, fn)),
t012 = apply(t012, 2, function(x) auc.interv(x, par, fn)),
t020 = apply(t020, 2, function(x) auc.interv(x, par, fn)),
t021 = apply(t021, 2, function(x) auc.interv(x, par, fn)),
t022 = apply(t022, 2, function(x) auc.interv(x, par, fn)),
t030 = apply(t030, 2, function(x) auc.interv(x, par, fn)),
t031 = apply(t031, 2, function(x) auc.interv(x, par, fn)),
t032 = apply(t032, 2, function(x) auc.interv(x, par, fn)),
t100 = apply(t100, 2, function(x) auc.interv(x, par, fn)),
t101 = apply(t101, 2, function(x) auc.interv(x, par, fn)),
t102 = apply(t102, 2, function(x) auc.interv(x, par, fn)),
t110 = apply(t110, 2, function(x) auc.interv(x, par, fn)),
t111 = apply(t111, 2, function(x) auc.interv(x, par, fn)),
t112 = apply(t112, 2, function(x) auc.interv(x, par, fn)),
t120 = apply(t120, 2, function(x) auc.interv(x, par, fn)),
t121 = apply(t121, 2, function(x) auc.interv(x, par, fn)),
t122 = apply(t122, 2, function(x) auc.interv(x, par, fn)),
t130 = apply(t130, 2, function(x) auc.interv(x, par, fn)),
t131 = apply(t131, 2, function(x) auc.interv(x, par, fn)),
t132 = apply(t132, 2, function(x) auc.interv(x, par, fn))
)
aucinf <- try(data.frame(
true = {
auc <- integrate(fn, 0, 168, p = par)$value
inf <- fn(168, par)/abs(max(par[ceiling(length(par)/2)]))
rep(auc + inf, niter)
},
basic = {
auc <- auc.interv(t1, par, fn)
inf <- auc.interv.lam(par, t1)
rep(auc + inf, niter)
},
t000 = apply(t000, 2, FUN = function(t) {
auc <- auc.interv(t, par, fn)
inf <- auc.interv.lam(par, t)
auc + inf
}),
t001 = apply(t001, 2, FUN = function(t) {
auc <- auc.interv(t, par, fn)
inf <- auc.interv.lam(par, t)
auc + inf
}),
t002 = apply(t002, 2, FUN = function(t) {
auc <- auc.interv(t, par, fn)
inf <- auc.interv.lam(par, t)
auc + inf
}),
t010 = apply(t010, 2, FUN = function(t) {
auc <- auc.interv(t, par, fn)
inf <- auc.interv.lam(par, t)
auc + inf
}),
t011 = apply(t011, 2, FUN = function(t) {
auc <- auc.interv(t, par, fn)
inf <- auc.interv.lam(par, t)
auc + inf
}),
t012 = apply(t012, 2, FUN = function(t) {
auc <- auc.interv(t, par, fn)
inf <- auc.interv.lam(par, t)
auc + inf
}),
t020 = apply(t020, 2, FUN = function(t) {
auc <- auc.interv(t, par, fn)
inf <- auc.interv.lam(par, t)
auc + inf
}),
t021 = apply(t021, 2, FUN = function(t) {
auc <- auc.interv(t, par, fn)
inf <- auc.interv.lam(par, t)
auc + inf
}),
t022 = apply(t022, 2, FUN = function(t) {
auc <- auc.interv(t, par, fn)
inf <- auc.interv.lam(par, t)
auc + inf
}),
t030 = apply(t030, 2, FUN = function(t) {
auc <- auc.interv(t, par, fn)
inf <- auc.interv.lam(par, t)
auc + inf
}),
t031 = apply(t031, 2, FUN = function(t) {
auc <- auc.interv(t, par, fn)
inf <- auc.interv.lam(par, t)
auc + inf
}),
t032 = apply(t032, 2, FUN = function(t) {
auc <- auc.interv(t, par, fn)
inf <- auc.interv.lam(par, t)
auc + inf
}),
t100 = apply(t100, 2, FUN = function(t) {
auc <- auc.interv(t, par, fn)
inf <- auc.interv.lam(par, t)
auc + inf
}),
t101 = apply(t101, 2, FUN = function(t) {
auc <- auc.interv(t, par, fn)
inf <- auc.interv.lam(par, t)
auc + inf
}),
t102 = apply(t102, 2, FUN = function(t) {
auc <- auc.interv(t, par, fn)
inf <- auc.interv.lam(par, t)
auc + inf
}),
t110 = apply(t110, 2, FUN = function(t) {
auc <- auc.interv(t, par, fn)
inf <- auc.interv.lam(par, t)
auc + inf
}),
t111 = apply(t111, 2, FUN = function(t) {
auc <- auc.interv(t, par, fn)
inf <- auc.interv.lam(par, t)
auc + inf
}),
t112 = apply(t112, 2, FUN = function(t) {
auc <- auc.interv(t, par, fn)
inf <- auc.interv.lam(par, t)
auc + inf
}),
t120 = apply(t120, 2, FUN = function(t) {
auc <- auc.interv(t, par, fn)
inf <- auc.interv.lam(par, t)
auc + inf
}),
t121 = apply(t121, 2, FUN = function(t) {
auc <- auc.interv(t, par, fn)
inf <- auc.interv.lam(par, t)
auc + inf
}),
t122 = apply(t122, 2, FUN = function(t) {
auc <- auc.interv(t, par, fn)
inf <- auc.interv.lam(par, t)
auc + inf
}),
t130 = apply(t130, 2, FUN = function(t) {
auc <- auc.interv(t, par, fn)
inf <- auc.interv.lam(par, t)
auc + inf
}),
t131 = apply(t131, 2, FUN = function(t) {
auc <- auc.interv(t, par, fn)
inf <- auc.interv.lam(par, t)
auc + inf
}),
t132 = apply(t132, 2, FUN = function(t) {
auc <- auc.interv(t, par, fn)
inf <- auc.interv.lam(par, t)
auc + inf
})
))
if (class(aucinf) == "try-error") browser()
test <- pred.sumexp(par, tmax.sumexp(par))
if (class(test) == "try-error") browser()
cmax <- data.frame(
true = rep(pred.sumexp(par, tmax.sumexp(par)), niter),
basic = rep(max(pred.sumexp(par, t1)), niter),
t000 = apply(t000, 2, function(t) max(pred.sumexp(par, t))),
t001 = apply(t001, 2, function(t) max(pred.sumexp(par, t))),
t002 = apply(t002, 2, function(t) max(pred.sumexp(par, t))),
t010 = apply(t010, 2, function(t) max(pred.sumexp(par, t))),
t011 = apply(t011, 2, function(t) max(pred.sumexp(par, t))),
t012 = apply(t012, 2, function(t) max(pred.sumexp(par, t))),
t020 = apply(t020, 2, function(t) max(pred.sumexp(par, t))),
t021 = apply(t021, 2, function(t) max(pred.sumexp(par, t))),
t022 = apply(t022, 2, function(t) max(pred.sumexp(par, t))),
t030 = apply(t030, 2, function(t) max(pred.sumexp(par, t))),
t031 = apply(t031, 2, function(t) max(pred.sumexp(par, t))),
t032 = apply(t032, 2, function(t) max(pred.sumexp(par, t))),
t100 = apply(t100, 2, function(t) max(pred.sumexp(par, t))),
t101 = apply(t101, 2, function(t) max(pred.sumexp(par, t))),
t102 = apply(t102, 2, function(t) max(pred.sumexp(par, t))),
t110 = apply(t110, 2, function(t) max(pred.sumexp(par, t))),
t111 = apply(t111, 2, function(t) max(pred.sumexp(par, t))),
t112 = apply(t112, 2, function(t) max(pred.sumexp(par, t))),
t120 = apply(t120, 2, function(t) max(pred.sumexp(par, t))),
t121 = apply(t121, 2, function(t) max(pred.sumexp(par, t))),
t122 = apply(t122, 2, function(t) max(pred.sumexp(par, t))),
t130 = apply(t130, 2, function(t) max(pred.sumexp(par, t))),
t131 = apply(t131, 2, function(t) max(pred.sumexp(par, t))),
t132 = apply(t132, 2, function(t) max(pred.sumexp(par, t)))
)
tmax <- data.frame(
true = rep(tmax.sumexp(par), niter),
basic = apply(matrix(cmax$basic), 1,
FUN = function(x) t1[which(pred.sumexp(par, t1) == x)][1]
),
t000 = mapply(cmax$t000, data.frame(t000),
FUN = function(x, t) t[which(pred.sumexp(par, t) == x)][1]
),
t001 = mapply(cmax$t001, data.frame(t001),
FUN = function(x, t) t[which(pred.sumexp(par, t) == x)][1]
),
t002 = mapply(cmax$t002, data.frame(t002),
FUN = function(x, t) t[which(pred.sumexp(par, t) == x)][1]
),
t010 = mapply(cmax$t010, data.frame(t010),
FUN = function(x, t) t[which(pred.sumexp(par, t) == x)][1]
),
t011 = mapply(cmax$t011, data.frame(t011),
FUN = function(x, t) t[which(pred.sumexp(par, t) == x)][1]
),
t012 = mapply(cmax$t012, data.frame(t012),
FUN = function(x, t) t[which(pred.sumexp(par, t) == x)][1]
),
t020 = mapply(cmax$t020, data.frame(t020),
FUN = function(x, t) t[which(pred.sumexp(par, t) == x)][1]
),
t021 = mapply(cmax$t021, data.frame(t021),
FUN = function(x, t) t[which(pred.sumexp(par, t) == x)][1]
),
t022 = mapply(cmax$t022, data.frame(t022),
FUN = function(x, t) t[which(pred.sumexp(par, t) == x)][1]
),
t030 = mapply(cmax$t030, data.frame(t030),
FUN = function(x, t) t[which(pred.sumexp(par, t) == x)][1]
),
t031 = mapply(cmax$t031, data.frame(t031),
FUN = function(x, t) t[which(pred.sumexp(par, t) == x)][1]
),
t032 = mapply(cmax$t032, data.frame(t032),
FUN = function(x, t) t[which(pred.sumexp(par, t) == x)][1]
),
t100 = mapply(cmax$t100, data.frame(t100),
FUN = function(x, t) t[which(pred.sumexp(par, t) == x)][1]
),
t101 = mapply(cmax$t101, data.frame(t101),
FUN = function(x, t) t[which(pred.sumexp(par, t) == x)][1]
),
t102 = mapply(cmax$t102, data.frame(t102),
FUN = function(x, t) t[which(pred.sumexp(par, t) == x)][1]
),
t110 = mapply(cmax$t110, data.frame(t110),
FUN = function(x, t) t[which(pred.sumexp(par, t) == x)][1]
),
t111 = mapply(cmax$t111, data.frame(t111),
FUN = function(x, t) t[which(pred.sumexp(par, t) == x)][1]
),
t112 = mapply(cmax$t112, data.frame(t112),
FUN = function(x, t) t[which(pred.sumexp(par, t) == x)][1]
),
t120 = mapply(cmax$t120, data.frame(t120),
FUN = function(x, t) t[which(pred.sumexp(par, t) == x)][1]
),
t121 = mapply(cmax$t121, data.frame(t121),
FUN = function(x, t) t[which(pred.sumexp(par, t) == x)][1]
),
t122 = mapply(cmax$t122, data.frame(t122),
FUN = function(x, t) t[which(pred.sumexp(par, t) == x)][1]
),
t130 = mapply(cmax$t130, data.frame(t130),
FUN = function(x, t) t[which(pred.sumexp(par, t) == x)][1]
),
t131 = mapply(cmax$t131, data.frame(t131),
FUN = function(x, t) t[which(pred.sumexp(par, t) == x)][1]
),
t132 = mapply(cmax$t132, data.frame(t132),
FUN = function(x, t) t[which(pred.sumexp(par, t) == x)][1]
)
)
tlast <- data.frame(
true = sapply(true.tlast, function(x) tail(unlist(x), 1)),
auc = sapply(auc.tlast, function(x) tail(unlist(x), 1)),
lam = sapply(lam.tlast, function(x) tail(unlist(x), 1)),
obs = sapply(obs.tlast, function(x) tail(unlist(x), 1))
)
return(list(par = par, fit.par = fit.par, tlast = tlast, sumexp = res.sumexp, tbas = t1,
t000 = t000, t001 = t001, t002 = t002, t010 = t010, t011 = t011, t012 = t012,
t020 = t020, t021 = t021, t022 = t022, t030 = t030, t031 = t031, t032 = t032,
t100 = t100, t101 = t101, t102 = t102, t110 = t110, t111 = t111, t112 = t112,
t120 = t120, t121 = t121, t122 = t122, t130 = t130, t131 = t131, t132 = t132,
interv.t000 = t000.res, interv.t001 = t001.res, interv.t010 = t010.res,
interv.t011 = t011.res, interv.t020 = t020.res, interv.t021 = t021.res,
interv.t030 = t030.res, interv.t031 = t031.res, interv.t100 = t100.res,
interv.t101 = t101.res, interv.t110 = t110.res, interv.t111 = t111.res,
interv.t120 = t120.res, interv.t121 = t121.res, interv.t130 = t130.res,
interv.t131 = t131.res,
auc24 = auc24, auctlast = auctlast, aucinf = aucinf, cmax = cmax, tmax = tmax
))
}
fin.res <- list(NULL)
for (i in 1:5) {
fin.res[[i]] <- list(
data = data.names[i],
result = study.fn(get(data.names[i]),
par = get(par.names[i]), fn = get(fn.names[i]),
t0 = get(t0.names[i]), nobs = 12
) # study.fn
) # list
print(paste0(i, "done"))
} # for loop
setwd("E:/Hughes/Git/splines/fn_diag")
saveRDS(fin.res[[1]]$result, "d2b-newnobs12-AR3024.rds")
saveRDS(fin.res[[2]]$result, "d3b-newnobs12-AR3024.rds")
saveRDS(fin.res[[3]]$result, "d1a-newnobs12-AR3024.rds")
saveRDS(fin.res[[4]]$result, "d2a-newnobs12-AR3024.rds")
saveRDS(fin.res[[5]]$result, "d3a-newnobs12-AR3024.rds")
| /fn_diag/fix25_function_narrow_new.R | no_license | jhhughes256/optinterval | R | false | false | 28,354 | r | # Function containing the framework setup in the intial study design
# -----------------------------------------------------------------------------
# Set up directories
if (!exists("git.dir")) {
rm(list = ls(all = T))
wd <- c("C:/Users/Jim Hughes/Documents", "C:/Users/hugjh001/Documents",
"C:/Users/hugjh001/Desktop", "C:/windows/system32")
graphics.off()
if (getwd() == wd[1]) {
git.dir <- paste0(getwd(), "/GitRepos")
reponame <- "optinterval"
} else if (getwd() == wd[2]) {
git.dir <- getwd()
reponame <- "optinterval"
} else if (getwd() == wd[3] | getwd() == wd[4]) {
git.dir <- "E:/Hughes/Git"
reponame <- "splines"
}
rm("wd")
}
# Load packages
library(GA)
#library(ggplot2)
#theme_bw2 <- theme_set(theme_bw(base_size = 14))
#theme_update(plot.title = element_text(hjust = 0.5))
# Source scripts to set up environment
set.seed(256256)
niter <- 1000
sdev <- 4
source(paste(git.dir, reponame, "fn_diag/fix_functions.R", sep = "/"))
source(paste(git.dir, reponame, "fn_diag/study_data.R", sep = "/"))
# Set basic parameters
data.names <- paste0("d", as.vector(outer(1:3, c("b", "a"), paste0)))[-1]
par.names <- paste(data.names, "p", sep = ".")
fn.names <- paste("pred", data.names, sep = ".")
t0.names <- paste(data.names, "t", sep = ".")
t1 <- c(0, 0.5, 1, 1.5, 2, 3, 4, 6, 8, 12, 16, 24)
# -----------------------------------------------------------------------------
study.fn <- function(data, par, fn, nobs, t0, tlast = 24, logauc = F) { # sdev = 1:4
absorp <- ifelse((length(par) %% 2) != 0, T, F)
if (absorp) data[1] <- 0
all.sumexp <- apply(data, 2, function(x) {
optim.sumexp.new(
# optim.sumexp.sig(
data.frame(time = t0, conc = x), oral = absorp
# , nexp = 2
)
})
print("sumexp done")
res.sumexp <- lapply(all.sumexp, best.sumexp.aic) # ".lrt)", ".aic)", ".bic, nobs = length(t1))"
fit.par <- lapply(res.sumexp, function(x) x$sumexp)
true.tlast <- rep(list(
seq(0, pred.tlast.lam(par), length.out = nobs)
), niter)
auc.tlast <- lapply(fit.par, function(x) {
c(0, exp(seq(log(t0[2]), log(pred.tlast(x, 12)[1]), length.out = nobs-1)))
})
lam.tlast <- lapply(fit.par, function(x) {
c(0, exp(seq(log(t0[2]), log(pred.tlast.lam(x)), length.out = nobs-1)))
})
obs.tlast.mat <- apply(data, 2, function(x) {
out <- try(
c(0, exp(seq(log(t0[2]), log(obs.tlast.lam(data.frame(t0, x))), length.out = nobs-1)))
)
if (class(out) == "try-error") browser()
out
})
obs.tlast <- split(t(obs.tlast.mat), seq(NROW(t(obs.tlast.mat))))
print("tlast done")
# Explanation of Option Naming
# a b c
# t 0 0 0
# a - fixed tmax (0 - off, 1 - on)
# b - variable tlast (0 - off, 1 - 80% auc, 2 - three half-lives, 3 - three half-lives using observed data)
# c - optimal lambdaz (0 - off, 1 - geomean, 2 - optimise)
t2 <- c(0, exp(seq(log(t0[2]), log(tail(t0, 1)), length.out = nobs-1)))
t000.res <- lapply(fit.par, FUN = function(x) {
optim.interv.dtmax(x, t2)
})
t001.res <- lapply(fit.par, FUN = function(x) {
optim.interv.dtmax(x, t2[-(nobs-1)])$times
})
t010.res <- mapply(fit.par, auc.tlast, SIMPLIFY = F, FUN = function(x, t) {
optim.interv.dtmax(x, t)
})
t011.res <- mapply(fit.par, auc.tlast, SIMPLIFY = F, FUN = function(x, t) {
optim.interv.dtmax(x, t[-(nobs-1)])$times
})
t020.res <- mapply(fit.par, lam.tlast, SIMPLIFY = F, FUN = function(x, t) {
optim.interv.dtmax(x, t)
})
t021.res <- mapply(fit.par, lam.tlast, SIMPLIFY = F, FUN = function(x, t) {
optim.interv.dtmax(x, t[-(nobs-1)])$times
})
t030.res <- mapply(fit.par, obs.tlast, SIMPLIFY = F, FUN = function(x, t) {
optim.interv.dtmax(x, t)
})
t031.res <- mapply(fit.par, obs.tlast, SIMPLIFY = F, FUN = function(x, t) {
optim.interv.dtmax(x, t[-(nobs-1)])$times
})
t100.res <- lapply(fit.par, FUN = function(x) {
optim.interv.dtmax(x, t2, tmax = T)
})
t101.res <- lapply(fit.par, FUN = function(x) {
optim.interv.dtmax(x, t2[-(nobs-1)], tmax = T)$times
})
t110.res <- mapply(fit.par, auc.tlast, SIMPLIFY = F, FUN = function(x, t) {
optim.interv.dtmax(x, t, tmax = T)
})
t111.res <- mapply(fit.par, auc.tlast, SIMPLIFY = F, FUN = function(x, t) {
optim.interv.dtmax(x, t[-(nobs-1)], tmax = T)$times
})
t120.res <- mapply(fit.par, lam.tlast, SIMPLIFY = F, FUN = function(x, t) {
optim.interv.dtmax(x, t, tmax = T)
})
t121.res <- mapply(fit.par, lam.tlast, SIMPLIFY = F, FUN = function(x, t) {
optim.interv.dtmax(x, t[-(nobs-1)], tmax = T)$times
})
t130.res <- mapply(fit.par, obs.tlast, SIMPLIFY = F, FUN = function(x, t) {
optim.interv.dtmax(x, t, tmax = T)
})
t131.res <- mapply(fit.par, obs.tlast, SIMPLIFY = F, FUN = function(x, t) {
optim.interv.dtmax(x, t[-(nobs-1)], tmax = T)$times
})
print("intervals done")
t000 <- sapply(t000.res, FUN = function(x) {
x$times
})
t001 <- sapply(t001.res, FUN = function(x) {
geomean <- exp(mean(log(tail(x, 2))))
c(head(x, nobs-2), geomean, tail(x, 1))
})
t002 <- mapply(t001.res, fit.par, FUN = function(x, fit) {
tail.par <- tail(unique(x), 2)
optres <- try(optim(
mean(tail.par),
function(p, tfirst, tlast, fit) {
m <- fit[1:ceiling(length(fit)/2)]
times <- c(tfirst, p, tlast)
pred <- data.frame(
time = times,
dv = pred.sumexp(fit, times)
)
lmres <- lm(log(dv) ~ times, pred)$coefficients
err <- sqrt(diff(c(max(m), lmres[2]))^2)
return(err)
},
method = "L-BFGS-B", hessian = T,
lower = tail.par[1], upper = tail.par[2],
tfirst = tail.par[1], tlast = tail.par[2], fit = fit
))
if (class(optres) == "try-error") {
sort(c(head(x, nobs-2), exp(mean(log(tail.par))), tail(x, 1)))
} else {
sort(c(head(x, nobs-2), optres$par, tail(x, 1)))
}
})
t010 <- sapply(t010.res, FUN = function(x) {
x$times
})
t011 <- sapply(t011.res, FUN = function(x) {
geomean <- exp(mean(log(tail(x, 2))))
c(head(x, nobs-2), geomean, tail(x, 1))
})
t012 <- mapply(t011.res, fit.par, FUN = function(x, fit) {
tail.par <- tail(unique(x), 2)
optres <- try(optim(
mean(tail.par),
function(p, tfirst, tlast, fit) {
m <- fit[1:ceiling(length(fit)/2)]
times <- c(tfirst, p, tlast)
pred <- data.frame(
time = times,
dv = pred.sumexp(fit, times)
)
lmres <- lm(log(dv) ~ times, pred)$coefficients
err <- sqrt(diff(c(max(m), lmres[2]))^2)
return(err)
},
method = "L-BFGS-B", hessian = T,
lower = tail.par[1], upper = tail.par[2],
tfirst = tail.par[1], tlast = tail.par[2], fit = fit
))
if (class(optres) == "try-error") {
sort(c(head(x, nobs-2), exp(mean(log(tail.par))), tail(x, 1)))
} else {
sort(c(head(x, nobs-2), optres$par, tail(x, 1)))
}
})
t020 <- sapply(t020.res, FUN = function(x) {
x$times
})
t021 <- sapply(t021.res, FUN = function(x) {
geomean <- exp(mean(log(tail(x, 2))))
c(head(x, nobs-2), geomean, tail(x, 1))
})
t022 <- mapply(t021.res, fit.par, FUN = function(x, fit) {
tail.par <- tail(unique(x), 2)
optres <- try(optim(
mean(tail.par),
function(p, tfirst, tlast, fit) {
m <- fit[1:ceiling(length(fit)/2)]
times <- c(tfirst, p, tlast)
pred <- data.frame(
time = times,
dv = pred.sumexp(fit, times)
)
lmres <- lm(log(dv) ~ times, pred)$coefficients
err <- sqrt(diff(c(max(m), lmres[2]))^2)
return(err)
},
method = "L-BFGS-B", hessian = T,
lower = tail.par[1], upper = tail.par[2],
tfirst = tail.par[1], tlast = tail.par[2], fit = fit
))
if (class(optres) == "try-error") {
sort(c(head(x, nobs-2), exp(mean(log(tail.par))), tail(x, 1)))
} else {
sort(c(head(x, nobs-2), optres$par, tail(x, 1)))
}
})
t030 <- sapply(t030.res, FUN = function(x) {
x$times
})
t031 <- sapply(t031.res, FUN = function(x) {
geomean <- exp(mean(log(tail(x, 2))))
c(head(x, nobs-2), geomean, tail(x, 1))
})
t032 <- mapply(t031.res, fit.par, FUN = function(x, fit) {
tail.par <- tail(unique(x), 2)
optres <- try(optim(
mean(tail.par),
function(p, tfirst, tlast, fit) {
m <- fit[1:ceiling(length(fit)/2)]
times <- c(tfirst, p, tlast)
pred <- data.frame(
time = times,
dv = pred.sumexp(fit, times)
)
lmres <- lm(log(dv) ~ times, pred)$coefficients
err <- sqrt(diff(c(max(m), lmres[2]))^2)
return(err)
},
method = "L-BFGS-B", hessian = T,
lower = tail.par[1], upper = tail.par[2],
tfirst = tail.par[1], tlast = tail.par[2], fit = fit
))
if (class(optres) == "try-error") {
sort(c(head(x, nobs-2), exp(mean(log(tail.par))), tail(x, 1)))
} else {
sort(c(head(x, nobs-2), optres$par, tail(x, 1)))
}
})
t100 <- sapply(t100.res, FUN = function(x) {
x$times
})
t101 <- sapply(t101.res, FUN = function(x) {
geomean <- exp(mean(log(tail(x, 2))))
c(head(x, nobs-2), geomean, tail(x, 1))
})
t102 <- mapply(t101.res, fit.par, FUN = function(x, fit) {
tail.par <- tail(unique(x), 2)
optres <- try(optim(
mean(tail.par),
function(p, tfirst, tlast, fit) {
m <- fit[1:ceiling(length(fit)/2)]
times <- c(tfirst, p, tlast)
pred <- data.frame(
time = times,
dv = pred.sumexp(fit, times)
)
lmres <- lm(log(dv) ~ times, pred)$coefficients
err <- sqrt(diff(c(max(m), lmres[2]))^2)
return(err)
},
method = "L-BFGS-B", hessian = T,
lower = tail.par[1], upper = tail.par[2],
tfirst = tail.par[1], tlast = tail.par[2], fit = fit
))
if (class(optres) == "try-error") {
sort(c(head(x, nobs-2), exp(mean(log(tail.par))), tail(x, 1)))
} else {
sort(c(head(x, nobs-2), optres$par, tail(x, 1)))
}
})
t110 <- sapply(t110.res, FUN = function(x) {
x$times
})
t111 <- sapply(t111.res, FUN = function(x) {
geomean <- exp(mean(log(tail(x, 2))))
c(head(x, nobs-2), geomean, tail(x, 1))
})
t112 <- mapply(t111.res, fit.par, FUN = function(x, fit) {
tail.par <- tail(unique(x), 2)
optres <- try(optim(
mean(tail.par),
function(p, tfirst, tlast, fit) {
m <- fit[1:ceiling(length(fit)/2)]
times <- c(tfirst, p, tlast)
pred <- data.frame(
time = times,
dv = pred.sumexp(fit, times)
)
lmres <- lm(log(dv) ~ times, pred)$coefficients
err <- sqrt(diff(c(max(m), lmres[2]))^2)
return(err)
},
method = "L-BFGS-B", hessian = T,
lower = tail.par[1], upper = tail.par[2],
tfirst = tail.par[1], tlast = tail.par[2], fit = fit
))
if (class(optres) == "try-error") {
sort(c(head(x, nobs-2), exp(mean(log(tail.par))), tail(x, 1)))
} else {
sort(c(head(x, nobs-2), optres$par, tail(x, 1)))
}
})
t120 <- sapply(t120.res, FUN = function(x) {
x$times
})
t121 <- sapply(t121.res, FUN = function(x) {
geomean <- exp(mean(log(tail(x, 2))))
c(head(x, nobs-2), geomean, tail(x, 1))
})
t122 <- mapply(t121.res, fit.par, FUN = function(x, fit) {
tail.par <- tail(unique(x), 2)
optres <- try(optim(
mean(tail.par),
function(p, tfirst, tlast, fit) {
m <- fit[1:ceiling(length(fit)/2)]
times <- c(tfirst, p, tlast)
pred <- data.frame(
time = times,
dv = pred.sumexp(fit, times)
)
lmres <- lm(log(dv) ~ times, pred)$coefficients
err <- sqrt(diff(c(max(m), lmres[2]))^2)
return(err)
},
method = "L-BFGS-B", hessian = T,
lower = tail.par[1], upper = tail.par[2],
tfirst = tail.par[1], tlast = tail.par[2], fit = fit
))
if (class(optres) == "try-error") {
sort(c(head(x, nobs-2), exp(mean(log(tail.par))), tail(x, 1)))
} else {
sort(c(head(x, nobs-2), optres$par, tail(x, 1)))
}
})
t130 <- sapply(t130.res, FUN = function(x) {
x$times
})
t131 <- sapply(t131.res, FUN = function(x) {
geomean <- exp(mean(log(tail(x, 2))))
c(head(x, nobs-2), geomean, tail(x, 1))
})
t132 <- mapply(t131.res, fit.par, FUN = function(x, fit) {
tail.par <- tail(unique(x), 2)
optres <- try(optim(
mean(tail.par),
function(p, tfirst, tlast, fit) {
m <- fit[1:ceiling(length(fit)/2)]
times <- c(tfirst, p, tlast)
pred <- data.frame(
time = times,
dv = pred.sumexp(fit, times)
)
lmres <- lm(log(dv) ~ times, pred)$coefficients
err <- sqrt(diff(c(max(m), lmres[2]))^2)
return(err)
},
method = "L-BFGS-B", hessian = T,
lower = tail.par[1], upper = tail.par[2],
tfirst = tail.par[1], tlast = tail.par[2], fit = fit
))
if (class(optres) == "try-error") {
sort(c(head(x, nobs-2), exp(mean(log(tail.par))), tail(x, 1)))
} else {
sort(c(head(x, nobs-2), optres$par, tail(x, 1)))
}
})
print("times done")
auc24 <- data.frame(
true = rep(integrate(fn, 0, 24, p = par)$value, niter),
basic = rep(auc.interv(t1, par, fn), niter),
t000 = apply(t000, 2, function(x) auc.interv(x, par, fn)),
t001 = apply(t001, 2, function(x) auc.interv(x, par, fn)),
t002 = apply(t002, 2, function(x) auc.interv(x, par, fn)),
t010 = apply(t010, 2, function(x) auc.interv(x, par, fn)),
t011 = apply(t011, 2, function(x) auc.interv(x, par, fn)),
t012 = apply(t012, 2, function(x) auc.interv(x, par, fn)),
t020 = apply(t020, 2, function(x) auc.interv(x, par, fn)),
t021 = apply(t021, 2, function(x) auc.interv(x, par, fn)),
t022 = apply(t022, 2, function(x) auc.interv(x, par, fn)),
t030 = apply(t030, 2, function(x) auc.interv(x, par, fn)),
t031 = apply(t031, 2, function(x) auc.interv(x, par, fn)),
t032 = apply(t032, 2, function(x) auc.interv(x, par, fn)),
t100 = apply(t100, 2, function(x) auc.interv(x, par, fn)),
t101 = apply(t101, 2, function(x) auc.interv(x, par, fn)),
t102 = apply(t102, 2, function(x) auc.interv(x, par, fn)),
t110 = apply(t110, 2, function(x) auc.interv(x, par, fn)),
t111 = apply(t111, 2, function(x) auc.interv(x, par, fn)),
t112 = apply(t112, 2, function(x) auc.interv(x, par, fn)),
t120 = apply(t120, 2, function(x) auc.interv(x, par, fn)),
t121 = apply(t121, 2, function(x) auc.interv(x, par, fn)),
t122 = apply(t122, 2, function(x) auc.interv(x, par, fn)),
t130 = apply(t130, 2, function(x) auc.interv(x, par, fn)),
t131 = apply(t131, 2, function(x) auc.interv(x, par, fn)),
t132 = apply(t132, 2, function(x) auc.interv(x, par, fn))
)
auctlast <- data.frame(
true = rep(integrate(fn, 0, tail(true.tlast[[1]], 1), p = par)$value, niter),
basic = rep(auc.interv(t1, par, fn), niter),
t000 = apply(t000, 2, function(x) auc.interv(x, par, fn)),
t001 = apply(t001, 2, function(x) auc.interv(x, par, fn)),
t002 = apply(t002, 2, function(x) auc.interv(x, par, fn)),
t010 = apply(t010, 2, function(x) auc.interv(x, par, fn)),
t011 = apply(t011, 2, function(x) auc.interv(x, par, fn)),
t012 = apply(t012, 2, function(x) auc.interv(x, par, fn)),
t020 = apply(t020, 2, function(x) auc.interv(x, par, fn)),
t021 = apply(t021, 2, function(x) auc.interv(x, par, fn)),
t022 = apply(t022, 2, function(x) auc.interv(x, par, fn)),
t030 = apply(t030, 2, function(x) auc.interv(x, par, fn)),
t031 = apply(t031, 2, function(x) auc.interv(x, par, fn)),
t032 = apply(t032, 2, function(x) auc.interv(x, par, fn)),
t100 = apply(t100, 2, function(x) auc.interv(x, par, fn)),
t101 = apply(t101, 2, function(x) auc.interv(x, par, fn)),
t102 = apply(t102, 2, function(x) auc.interv(x, par, fn)),
t110 = apply(t110, 2, function(x) auc.interv(x, par, fn)),
t111 = apply(t111, 2, function(x) auc.interv(x, par, fn)),
t112 = apply(t112, 2, function(x) auc.interv(x, par, fn)),
t120 = apply(t120, 2, function(x) auc.interv(x, par, fn)),
t121 = apply(t121, 2, function(x) auc.interv(x, par, fn)),
t122 = apply(t122, 2, function(x) auc.interv(x, par, fn)),
t130 = apply(t130, 2, function(x) auc.interv(x, par, fn)),
t131 = apply(t131, 2, function(x) auc.interv(x, par, fn)),
t132 = apply(t132, 2, function(x) auc.interv(x, par, fn))
)
aucinf <- try(data.frame(
true = {
auc <- integrate(fn, 0, 168, p = par)$value
inf <- fn(168, par)/abs(max(par[ceiling(length(par)/2)]))
rep(auc + inf, niter)
},
basic = {
auc <- auc.interv(t1, par, fn)
inf <- auc.interv.lam(par, t1)
rep(auc + inf, niter)
},
t000 = apply(t000, 2, FUN = function(t) {
auc <- auc.interv(t, par, fn)
inf <- auc.interv.lam(par, t)
auc + inf
}),
t001 = apply(t001, 2, FUN = function(t) {
auc <- auc.interv(t, par, fn)
inf <- auc.interv.lam(par, t)
auc + inf
}),
t002 = apply(t002, 2, FUN = function(t) {
auc <- auc.interv(t, par, fn)
inf <- auc.interv.lam(par, t)
auc + inf
}),
t010 = apply(t010, 2, FUN = function(t) {
auc <- auc.interv(t, par, fn)
inf <- auc.interv.lam(par, t)
auc + inf
}),
t011 = apply(t011, 2, FUN = function(t) {
auc <- auc.interv(t, par, fn)
inf <- auc.interv.lam(par, t)
auc + inf
}),
t012 = apply(t012, 2, FUN = function(t) {
auc <- auc.interv(t, par, fn)
inf <- auc.interv.lam(par, t)
auc + inf
}),
t020 = apply(t020, 2, FUN = function(t) {
auc <- auc.interv(t, par, fn)
inf <- auc.interv.lam(par, t)
auc + inf
}),
t021 = apply(t021, 2, FUN = function(t) {
auc <- auc.interv(t, par, fn)
inf <- auc.interv.lam(par, t)
auc + inf
}),
t022 = apply(t022, 2, FUN = function(t) {
auc <- auc.interv(t, par, fn)
inf <- auc.interv.lam(par, t)
auc + inf
}),
t030 = apply(t030, 2, FUN = function(t) {
auc <- auc.interv(t, par, fn)
inf <- auc.interv.lam(par, t)
auc + inf
}),
t031 = apply(t031, 2, FUN = function(t) {
auc <- auc.interv(t, par, fn)
inf <- auc.interv.lam(par, t)
auc + inf
}),
t032 = apply(t032, 2, FUN = function(t) {
auc <- auc.interv(t, par, fn)
inf <- auc.interv.lam(par, t)
auc + inf
}),
t100 = apply(t100, 2, FUN = function(t) {
auc <- auc.interv(t, par, fn)
inf <- auc.interv.lam(par, t)
auc + inf
}),
t101 = apply(t101, 2, FUN = function(t) {
auc <- auc.interv(t, par, fn)
inf <- auc.interv.lam(par, t)
auc + inf
}),
t102 = apply(t102, 2, FUN = function(t) {
auc <- auc.interv(t, par, fn)
inf <- auc.interv.lam(par, t)
auc + inf
}),
t110 = apply(t110, 2, FUN = function(t) {
auc <- auc.interv(t, par, fn)
inf <- auc.interv.lam(par, t)
auc + inf
}),
t111 = apply(t111, 2, FUN = function(t) {
auc <- auc.interv(t, par, fn)
inf <- auc.interv.lam(par, t)
auc + inf
}),
t112 = apply(t112, 2, FUN = function(t) {
auc <- auc.interv(t, par, fn)
inf <- auc.interv.lam(par, t)
auc + inf
}),
t120 = apply(t120, 2, FUN = function(t) {
auc <- auc.interv(t, par, fn)
inf <- auc.interv.lam(par, t)
auc + inf
}),
t121 = apply(t121, 2, FUN = function(t) {
auc <- auc.interv(t, par, fn)
inf <- auc.interv.lam(par, t)
auc + inf
}),
t122 = apply(t122, 2, FUN = function(t) {
auc <- auc.interv(t, par, fn)
inf <- auc.interv.lam(par, t)
auc + inf
}),
t130 = apply(t130, 2, FUN = function(t) {
auc <- auc.interv(t, par, fn)
inf <- auc.interv.lam(par, t)
auc + inf
}),
t131 = apply(t131, 2, FUN = function(t) {
auc <- auc.interv(t, par, fn)
inf <- auc.interv.lam(par, t)
auc + inf
}),
t132 = apply(t132, 2, FUN = function(t) {
auc <- auc.interv(t, par, fn)
inf <- auc.interv.lam(par, t)
auc + inf
})
))
if (class(aucinf) == "try-error") browser()
test <- pred.sumexp(par, tmax.sumexp(par))
if (class(test) == "try-error") browser()
cmax <- data.frame(
true = rep(pred.sumexp(par, tmax.sumexp(par)), niter),
basic = rep(max(pred.sumexp(par, t1)), niter),
t000 = apply(t000, 2, function(t) max(pred.sumexp(par, t))),
t001 = apply(t001, 2, function(t) max(pred.sumexp(par, t))),
t002 = apply(t002, 2, function(t) max(pred.sumexp(par, t))),
t010 = apply(t010, 2, function(t) max(pred.sumexp(par, t))),
t011 = apply(t011, 2, function(t) max(pred.sumexp(par, t))),
t012 = apply(t012, 2, function(t) max(pred.sumexp(par, t))),
t020 = apply(t020, 2, function(t) max(pred.sumexp(par, t))),
t021 = apply(t021, 2, function(t) max(pred.sumexp(par, t))),
t022 = apply(t022, 2, function(t) max(pred.sumexp(par, t))),
t030 = apply(t030, 2, function(t) max(pred.sumexp(par, t))),
t031 = apply(t031, 2, function(t) max(pred.sumexp(par, t))),
t032 = apply(t032, 2, function(t) max(pred.sumexp(par, t))),
t100 = apply(t100, 2, function(t) max(pred.sumexp(par, t))),
t101 = apply(t101, 2, function(t) max(pred.sumexp(par, t))),
t102 = apply(t102, 2, function(t) max(pred.sumexp(par, t))),
t110 = apply(t110, 2, function(t) max(pred.sumexp(par, t))),
t111 = apply(t111, 2, function(t) max(pred.sumexp(par, t))),
t112 = apply(t112, 2, function(t) max(pred.sumexp(par, t))),
t120 = apply(t120, 2, function(t) max(pred.sumexp(par, t))),
t121 = apply(t121, 2, function(t) max(pred.sumexp(par, t))),
t122 = apply(t122, 2, function(t) max(pred.sumexp(par, t))),
t130 = apply(t130, 2, function(t) max(pred.sumexp(par, t))),
t131 = apply(t131, 2, function(t) max(pred.sumexp(par, t))),
t132 = apply(t132, 2, function(t) max(pred.sumexp(par, t)))
)
tmax <- data.frame(
true = rep(tmax.sumexp(par), niter),
basic = apply(matrix(cmax$basic), 1,
FUN = function(x) t1[which(pred.sumexp(par, t1) == x)][1]
),
t000 = mapply(cmax$t000, data.frame(t000),
FUN = function(x, t) t[which(pred.sumexp(par, t) == x)][1]
),
t001 = mapply(cmax$t001, data.frame(t001),
FUN = function(x, t) t[which(pred.sumexp(par, t) == x)][1]
),
t002 = mapply(cmax$t002, data.frame(t002),
FUN = function(x, t) t[which(pred.sumexp(par, t) == x)][1]
),
t010 = mapply(cmax$t010, data.frame(t010),
FUN = function(x, t) t[which(pred.sumexp(par, t) == x)][1]
),
t011 = mapply(cmax$t011, data.frame(t011),
FUN = function(x, t) t[which(pred.sumexp(par, t) == x)][1]
),
t012 = mapply(cmax$t012, data.frame(t012),
FUN = function(x, t) t[which(pred.sumexp(par, t) == x)][1]
),
t020 = mapply(cmax$t020, data.frame(t020),
FUN = function(x, t) t[which(pred.sumexp(par, t) == x)][1]
),
t021 = mapply(cmax$t021, data.frame(t021),
FUN = function(x, t) t[which(pred.sumexp(par, t) == x)][1]
),
t022 = mapply(cmax$t022, data.frame(t022),
FUN = function(x, t) t[which(pred.sumexp(par, t) == x)][1]
),
t030 = mapply(cmax$t030, data.frame(t030),
FUN = function(x, t) t[which(pred.sumexp(par, t) == x)][1]
),
t031 = mapply(cmax$t031, data.frame(t031),
FUN = function(x, t) t[which(pred.sumexp(par, t) == x)][1]
),
t032 = mapply(cmax$t032, data.frame(t032),
FUN = function(x, t) t[which(pred.sumexp(par, t) == x)][1]
),
t100 = mapply(cmax$t100, data.frame(t100),
FUN = function(x, t) t[which(pred.sumexp(par, t) == x)][1]
),
t101 = mapply(cmax$t101, data.frame(t101),
FUN = function(x, t) t[which(pred.sumexp(par, t) == x)][1]
),
t102 = mapply(cmax$t102, data.frame(t102),
FUN = function(x, t) t[which(pred.sumexp(par, t) == x)][1]
),
t110 = mapply(cmax$t110, data.frame(t110),
FUN = function(x, t) t[which(pred.sumexp(par, t) == x)][1]
),
t111 = mapply(cmax$t111, data.frame(t111),
FUN = function(x, t) t[which(pred.sumexp(par, t) == x)][1]
),
t112 = mapply(cmax$t112, data.frame(t112),
FUN = function(x, t) t[which(pred.sumexp(par, t) == x)][1]
),
t120 = mapply(cmax$t120, data.frame(t120),
FUN = function(x, t) t[which(pred.sumexp(par, t) == x)][1]
),
t121 = mapply(cmax$t121, data.frame(t121),
FUN = function(x, t) t[which(pred.sumexp(par, t) == x)][1]
),
t122 = mapply(cmax$t122, data.frame(t122),
FUN = function(x, t) t[which(pred.sumexp(par, t) == x)][1]
),
t130 = mapply(cmax$t130, data.frame(t130),
FUN = function(x, t) t[which(pred.sumexp(par, t) == x)][1]
),
t131 = mapply(cmax$t131, data.frame(t131),
FUN = function(x, t) t[which(pred.sumexp(par, t) == x)][1]
),
t132 = mapply(cmax$t132, data.frame(t132),
FUN = function(x, t) t[which(pred.sumexp(par, t) == x)][1]
)
)
tlast <- data.frame(
true = sapply(true.tlast, function(x) tail(unlist(x), 1)),
auc = sapply(auc.tlast, function(x) tail(unlist(x), 1)),
lam = sapply(lam.tlast, function(x) tail(unlist(x), 1)),
obs = sapply(obs.tlast, function(x) tail(unlist(x), 1))
)
return(list(par = par, fit.par = fit.par, tlast = tlast, sumexp = res.sumexp, tbas = t1,
t000 = t000, t001 = t001, t002 = t002, t010 = t010, t011 = t011, t012 = t012,
t020 = t020, t021 = t021, t022 = t022, t030 = t030, t031 = t031, t032 = t032,
t100 = t100, t101 = t101, t102 = t102, t110 = t110, t111 = t111, t112 = t112,
t120 = t120, t121 = t121, t122 = t122, t130 = t130, t131 = t131, t132 = t132,
interv.t000 = t000.res, interv.t001 = t001.res, interv.t010 = t010.res,
interv.t011 = t011.res, interv.t020 = t020.res, interv.t021 = t021.res,
interv.t030 = t030.res, interv.t031 = t031.res, interv.t100 = t100.res,
interv.t101 = t101.res, interv.t110 = t110.res, interv.t111 = t111.res,
interv.t120 = t120.res, interv.t121 = t121.res, interv.t130 = t130.res,
interv.t131 = t131.res,
auc24 = auc24, auctlast = auctlast, aucinf = aucinf, cmax = cmax, tmax = tmax
))
}
fin.res <- list(NULL)
for (i in 1:5) {
fin.res[[i]] <- list(
data = data.names[i],
result = study.fn(get(data.names[i]),
par = get(par.names[i]), fn = get(fn.names[i]),
t0 = get(t0.names[i]), nobs = 12
) # study.fn
) # list
print(paste0(i, "done"))
} # for loop
setwd("E:/Hughes/Git/splines/fn_diag")
saveRDS(fin.res[[1]]$result, "d2b-newnobs12-AR3024.rds")
saveRDS(fin.res[[2]]$result, "d3b-newnobs12-AR3024.rds")
saveRDS(fin.res[[3]]$result, "d1a-newnobs12-AR3024.rds")
saveRDS(fin.res[[4]]$result, "d2a-newnobs12-AR3024.rds")
saveRDS(fin.res[[5]]$result, "d3a-newnobs12-AR3024.rds")
|
# Setup -------------------------------------------------------------------
library(tidyverse)
library(tidymodels)
library(stacks)
trn <- vroom::vroom(here::here("march_21_tabular/data/trn_lpa.csv")) %>%
rename(target = ...33) %>%
mutate(target = as_factor(target))
set.seed(0520)
split <- initial_split(trn, prop = 4/5, strata = target)
trn <- training(split)
val <- testing(split)
#set up kfolds
folds <- vfold_cv(trn, v = 5)
#set roc auc as metric
metric <- metric_set(roc_auc)
#setting up ctrl grid
ctrl_grid <- control_stack_grid()
# SVM Specification -------------------------------------------------------
svm_rec <- recipe(target ~ ., data = trn) %>%
update_role(id, new_role = "id_var") %>%
step_other(starts_with("cat"), threshold = .1) %>%
step_scale(all_numeric()) %>%
step_dummy(all_nominal(), -all_outcomes()) %>%
step_corr(all_numeric()) %>%
step_nzv(all_predictors())
svm_spec <- svm_rbf(cost = tune("cost"), rbf_sigma = tune("sigma")) %>%
set_engine("kernlab") %>%
set_mode("classification")
svm_wf <- workflow() %>%
add_recipe(svm_rec) %>%
add_model(svm_spec)
# Fit Model ---------------------------------------------------------------
doParallel::registerDoParallel()
set.seed(0410)
svm_res <- tune_grid(
svm_wf,
resamples = folds,
grid = 4,
metrics = metric,
control = ctrl_grid
)
| /march_21_tabular/scripts/stacks_svm.R | no_license | ekholme/kaggle | R | false | false | 1,354 | r |
# Setup -------------------------------------------------------------------
library(tidyverse)
library(tidymodels)
library(stacks)
trn <- vroom::vroom(here::here("march_21_tabular/data/trn_lpa.csv")) %>%
rename(target = ...33) %>%
mutate(target = as_factor(target))
set.seed(0520)
split <- initial_split(trn, prop = 4/5, strata = target)
trn <- training(split)
val <- testing(split)
#set up kfolds
folds <- vfold_cv(trn, v = 5)
#set roc auc as metric
metric <- metric_set(roc_auc)
#setting up ctrl grid
ctrl_grid <- control_stack_grid()
# SVM Specification -------------------------------------------------------
svm_rec <- recipe(target ~ ., data = trn) %>%
update_role(id, new_role = "id_var") %>%
step_other(starts_with("cat"), threshold = .1) %>%
step_scale(all_numeric()) %>%
step_dummy(all_nominal(), -all_outcomes()) %>%
step_corr(all_numeric()) %>%
step_nzv(all_predictors())
svm_spec <- svm_rbf(cost = tune("cost"), rbf_sigma = tune("sigma")) %>%
set_engine("kernlab") %>%
set_mode("classification")
svm_wf <- workflow() %>%
add_recipe(svm_rec) %>%
add_model(svm_spec)
# Fit Model ---------------------------------------------------------------
doParallel::registerDoParallel()
set.seed(0410)
svm_res <- tune_grid(
svm_wf,
resamples = folds,
grid = 4,
metrics = metric,
control = ctrl_grid
)
|
# Plots avg speed and ratio evolution, using hop years period
# List lcriteria allows to choose GDP, GNI or both
#
# Author: Javier Garcia-Algarra
# August 2021
#
# Inputs: output_data/all_speeds_criteria.csv"
# Results: figs/AVG_EVOLUTION.png .tiff
library(ggplot2)
library(ggrepel)
criteria <- read.table("config_data/criteria.txt")
lcriteria <- criteria$V1
tdir <- "figs"
if (!dir.exists(tdir))
dir.create(tdir)
hop <- 20
lstart_year <- seq(1960,2020-hop,by=hop)
ratio_breaks <- c(0.01,0.05,0.1,0.25,0.8,1,1.1)
ppi <- 300
scountry <- c("China","Brazil","Argentina","Chile","Portugal","Korea, Rep.")
for (criteria in lcriteria)
{
datosdec <- data.frame("Country"=c(),"CountryCode"=c(),"distX"=c(),"distY"=c(),"PeriodStart" =c())
datos_raw <- read.csv(paste0("output_data/all_speeds_",criteria,".csv"))
lp <- unique(datos_raw$Country)
for(start_year in lstart_year)
{
end_year <- start_year + hop
datos_all <- datos_raw[(datos_raw$Year>=start_year) & (datos_raw$Year<=end_year),]
datadist <- data.frame("Country"=c(),"CountryCode"=c(),"distX"=c(),"distY"=c())
for (k in lp){
if (is.element(k,scountry))
{
clean_data <- datos_all[(datos_all$Country == k) & !is.na(datos_all$ratio) &
!is.na(datos_all$dratio_dt_mmov),]
datosx <- datos_all[(datos_all$Country == k) & !is.na(datos_all$ratio),]$ratio
datosy <- datos_all[(datos_all$Country == k) & !is.na(datos_all$dratio_dt_mmov),]$dratio_dt_mmov
datadist <- rbind(datadist,data.frame("Country"=k,"CountryCode"= clean_data$CountryCode[1],
"distX"=mean(clean_data$ratio),
"distY"=mean(clean_data$dratio_dt_mmov)))
datosdec <- rbind(datosdec,
data.frame("Country"=k,"CountryCode"= clean_data$CountryCode[1],
"distX"=mean(clean_data$ratio),
"distY"=mean(clean_data$dratio_dt_mmov),
"PeriodStart" =start_year)
)
}
}
}
datosdec <- datosdec[!is.na(datosdec$CountryCode),]
allp <- ggplot(data=datosdec)+
geom_point(aes(y = distY, x = distX, color=CountryCode), alpha=0.8,size=2.5)+
geom_path(aes(y = distY, x = distX,color=CountryCode),alpha=0.4,
size = 0.5, arrow = arrow(length = unit(0.3, "cm"),type="closed"))+
geom_text_repel(aes(label=paste0(CountryCode," ",PeriodStart,"/",PeriodStart+hop-1," "),
y = distY, x = distX,color=CountryCode),alpha=0.8,size=4)+
xlab(paste("Avg.",criteria,"ratio")) + ylab ("Avg. convergence speed")+
theme_bw()+
theme(panel.grid.minor = element_blank(),
panel.grid.major = element_line(size = 0.1),
legend.position = "none",
axis.text.y = element_text(face="bold", size=14),
axis.text.x = element_text(face="bold", size=14),
axis.title.x = element_text(face="bold", size=16),
axis.title.y = element_text(face="bold", size=16))
wplot <- 7
hplot <- 6
nfile <- paste0(tdir,"/AVG_EVOLUTION_",criteria)
tiff(paste0(nfile,".tiff"), width=wplot*ppi, height=hplot*ppi,res=ppi)
print(allp)
dev.off()
png(paste0(nfile,".png"), width=wplot*ppi, height=hplot*ppi, res=ppi)
print(allp)
dev.off()
} | /midtrap_all_plot_evolution.R | permissive | jgalgarra/midinctrap | R | false | false | 3,396 | r | # Plots avg speed and ratio evolution, using hop years period
# List lcriteria allows to choose GDP, GNI or both
#
# Author: Javier Garcia-Algarra
# August 2021
#
# Inputs: output_data/all_speeds_criteria.csv"
# Results: figs/AVG_EVOLUTION.png .tiff
library(ggplot2)
library(ggrepel)
criteria <- read.table("config_data/criteria.txt")
lcriteria <- criteria$V1
tdir <- "figs"
if (!dir.exists(tdir))
dir.create(tdir)
hop <- 20
lstart_year <- seq(1960,2020-hop,by=hop)
ratio_breaks <- c(0.01,0.05,0.1,0.25,0.8,1,1.1)
ppi <- 300
scountry <- c("China","Brazil","Argentina","Chile","Portugal","Korea, Rep.")
for (criteria in lcriteria)
{
datosdec <- data.frame("Country"=c(),"CountryCode"=c(),"distX"=c(),"distY"=c(),"PeriodStart" =c())
datos_raw <- read.csv(paste0("output_data/all_speeds_",criteria,".csv"))
lp <- unique(datos_raw$Country)
for(start_year in lstart_year)
{
end_year <- start_year + hop
datos_all <- datos_raw[(datos_raw$Year>=start_year) & (datos_raw$Year<=end_year),]
datadist <- data.frame("Country"=c(),"CountryCode"=c(),"distX"=c(),"distY"=c())
for (k in lp){
if (is.element(k,scountry))
{
clean_data <- datos_all[(datos_all$Country == k) & !is.na(datos_all$ratio) &
!is.na(datos_all$dratio_dt_mmov),]
datosx <- datos_all[(datos_all$Country == k) & !is.na(datos_all$ratio),]$ratio
datosy <- datos_all[(datos_all$Country == k) & !is.na(datos_all$dratio_dt_mmov),]$dratio_dt_mmov
datadist <- rbind(datadist,data.frame("Country"=k,"CountryCode"= clean_data$CountryCode[1],
"distX"=mean(clean_data$ratio),
"distY"=mean(clean_data$dratio_dt_mmov)))
datosdec <- rbind(datosdec,
data.frame("Country"=k,"CountryCode"= clean_data$CountryCode[1],
"distX"=mean(clean_data$ratio),
"distY"=mean(clean_data$dratio_dt_mmov),
"PeriodStart" =start_year)
)
}
}
}
datosdec <- datosdec[!is.na(datosdec$CountryCode),]
allp <- ggplot(data=datosdec)+
geom_point(aes(y = distY, x = distX, color=CountryCode), alpha=0.8,size=2.5)+
geom_path(aes(y = distY, x = distX,color=CountryCode),alpha=0.4,
size = 0.5, arrow = arrow(length = unit(0.3, "cm"),type="closed"))+
geom_text_repel(aes(label=paste0(CountryCode," ",PeriodStart,"/",PeriodStart+hop-1," "),
y = distY, x = distX,color=CountryCode),alpha=0.8,size=4)+
xlab(paste("Avg.",criteria,"ratio")) + ylab ("Avg. convergence speed")+
theme_bw()+
theme(panel.grid.minor = element_blank(),
panel.grid.major = element_line(size = 0.1),
legend.position = "none",
axis.text.y = element_text(face="bold", size=14),
axis.text.x = element_text(face="bold", size=14),
axis.title.x = element_text(face="bold", size=16),
axis.title.y = element_text(face="bold", size=16))
wplot <- 7
hplot <- 6
nfile <- paste0(tdir,"/AVG_EVOLUTION_",criteria)
tiff(paste0(nfile,".tiff"), width=wplot*ppi, height=hplot*ppi,res=ppi)
print(allp)
dev.off()
png(paste0(nfile,".png"), width=wplot*ppi, height=hplot*ppi, res=ppi)
print(allp)
dev.off()
} |
## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
## this function creates a matrix by using a list that will get the inverse
makeCacheMatrix <- function(x = matrix()) {
i <- NULL
set <- function(y) {
x <<- y
i <<- NULL
}
get <- function() x
setinverse <- function(inverse) i <<- inverse
getinverse <- function() i
list(set = set,
get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## Write a short comment describing this function
## This function will compute the inverse of the matrix obtained by the past function.
## When the inverse has already been calculated by the past function, it will retrieve it
## from the cache
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
i <- x$getinverse()
if (!is.null(i)) {
message("getting cached data")
return(i)
}
data <- x$get()
i <- solve(data, ...)
x$setinverse(i)
i
}
| /cachematrix.R | no_license | JLaniado/ProgrammingAssignment2 | R | false | false | 1,026 | r | ## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
## this function creates a matrix by using a list that will get the inverse
makeCacheMatrix <- function(x = matrix()) {
i <- NULL
set <- function(y) {
x <<- y
i <<- NULL
}
get <- function() x
setinverse <- function(inverse) i <<- inverse
getinverse <- function() i
list(set = set,
get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## Write a short comment describing this function
## This function will compute the inverse of the matrix obtained by the past function.
## When the inverse has already been calculated by the past function, it will retrieve it
## from the cache
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
i <- x$getinverse()
if (!is.null(i)) {
message("getting cached data")
return(i)
}
data <- x$get()
i <- solve(data, ...)
x$setinverse(i)
i
}
|
## Put comments here that give an overall description of what your
## functions do
##calling exemple:
## > source("cachematrix.R") load R program
## > m <- makeCacheMatrix() create functions
## > m$set(matrix(c(1,2,3,4), 2, 2)) set any square invertible matrix
## > cacheSolve(m) get inverse (new matrix)
## > cacheSolve(m) get in cash (inverse exists)
## set and get cache matrix
makeCacheMatrix <- function(x = matrix()) {
mtxCache <- NULL
set <- function(y) {
x <<- y
mtxCache <<- NULL
}
get <- function() x
setmatrix <- function(m) mtxCache <<- m
getmatrix <- function() mtxCache
list(set = set, get = get,
setmatrix = setmatrix,
getmatrix = getmatrix)
}
## Return Inverse matrix
cacheSolve <- function(x, ...) {
mt <- x$getmatrix()
##get in cash
if(!is.null(mt)) {
return(mt)
}else{
#new
data <- x$get()
mt <- solve(data)
#set matrix to inverse
#cash inverse
x$setmatrix(mt)
return(mt)
}
}
| /cachematrix.R | no_license | Cnaza/ProgrammingAssignment2 | R | false | false | 1,110 | r | ## Put comments here that give an overall description of what your
## functions do
##calling exemple:
## > source("cachematrix.R") load R program
## > m <- makeCacheMatrix() create functions
## > m$set(matrix(c(1,2,3,4), 2, 2)) set any square invertible matrix
## > cacheSolve(m) get inverse (new matrix)
## > cacheSolve(m) get in cash (inverse exists)
## set and get cache matrix
makeCacheMatrix <- function(x = matrix()) {
mtxCache <- NULL
set <- function(y) {
x <<- y
mtxCache <<- NULL
}
get <- function() x
setmatrix <- function(m) mtxCache <<- m
getmatrix <- function() mtxCache
list(set = set, get = get,
setmatrix = setmatrix,
getmatrix = getmatrix)
}
## Return Inverse matrix
cacheSolve <- function(x, ...) {
mt <- x$getmatrix()
##get in cash
if(!is.null(mt)) {
return(mt)
}else{
#new
data <- x$get()
mt <- solve(data)
#set matrix to inverse
#cash inverse
x$setmatrix(mt)
return(mt)
}
}
|
library(rjags)
n <- 100
y <- 25
BL <- function(p) choose(n,y)*p^(y)*(1-p)^(n-y)
plot(0:100/100, BL(0:100/100), type="l")
#直前2 行は、以下の関数を使っても描ける
#plot(0:100, dbinom(0:100, n, y/n), type="l")
abline(v=y/n, lwd=2)
text(y/n+0.05,0.09,labels="0.25")
####################
#生物統計学の実行コード
#作成者:飯島勇人(山梨県森林研)
#連絡先:hayato.iijima@gmail.com
#作成日:2017/1/16
####################
####################
#2章:なぜ統計学が必要なのか?
####################
##正規分布
#グラフィックスパラメータの定義
par(mar=c(5,5,1,1), ps=15)
#描画
values <- -50:50
plot(values, dnorm(values, 0, 1), type="l", xlab="Value", ylab="Probability density")
points(values, dnorm(values, 0, 5), col="red", type="l")
points(values, dnorm(values, 0, 10), col="blue", type="l")
points(values, dnorm(values, 0, sqrt(10^3)), col="green", type="l")
points(values, dnorm(values, -20, 3), type="l", lty=2)
points(values, dnorm(values, 40, 6), type="l", lty=2)
legend("topleft", lty=c(rep(1, 4), 2, 2), col=c("black", "red", "blue", "green", "black", "black"),
legend=c(expression(paste(mu, " = 0, ", sigma, " = 1")),
expression(paste(mu, " = 0, ", sigma, " = 5")),
expression(paste(mu, " = 0, ", sigma, " = 10")),
expression(paste(mu, " = 0, ", sigma, " = 31.6")),
expression(paste(mu, " = -20, ", sigma, " = 3")),
expression(paste(mu, " = 40, ", sigma, " = 6")))
)
###二項分布
##グラフィックスパラメータの定義
par(mfrow=c(1,2), mar=c(5,5,1,1), ps=15)
##生起確率の違い
#生起確率
prob <- c(0.05, 0.3, 0.5, 0.8)
#試行回数
N <- 100
#色の設定
iro <- c("black", "red", "blue", "green")
#描画
plot(0:N, dbinom(0:N, N, prob[1]), type="h", xlab="Value", ylab="Probability")
points(0:N, dbinom(0:N, N, prob[2]), type="h", col=iro[2])
points(0:N, dbinom(0:N, N, prob[3]), type="h", col=iro[3])
points(0:N, dbinom(0:N, N, prob[4]), type="h", col=iro[4])
legend("topright", lty=1, col=iro,
legend=c("p = 0.05",
"p = 0.3",
"p = 0.5",
"p = 0.8"
), cex=0.7
)
##試行回数の違い
#生起確率
prob <- 0.5
#試行回数(あえて不正確な示し方をします)
N <- c(100, 30, 20, 10)
#描画
plot(0:N[1], dbinom(0:N[1], N[1], prob), type="h", xaxt="n", xlab="Relative position", ylab="Probability")
axis(1, 0:5*20, at=0:5*20, labels=0:5/5, tick=TRUE)
for (i in 2:4) {
par(new=TRUE)
plot(0:N[i], dbinom(0:N[i], N[i], prob), type="h", axes=F, ann=F, col=iro[i])
}
legend("topright", lty=1, col=iro,
legend=c("N = 100",
"N = 30",
"N = 20",
"N = 10"
), cex=0.7
)
##ポアソン分布
#平均の発生回数
N <- c(1, 3, 7, 15)
values <- 0:30
#色の設定
iro <- c("black", "red", "blue", "green")
#描画
plot(values, dpois(values, N[1]), type="h", xlab="Values", ylab="Probability")
for (i in 2:4) {
points(jitter(values), dpois(values, N[i]), type="h", col=iro[i])
}
legend("topright", lty=1, col=iro,
legend=c(expression(paste(lambda, " = 1")),
expression(paste(lambda, " = 3")),
expression(paste(lambda, " = 7")),
expression(paste(lambda, " = 15")))
)
##############
#4章:GLM(一般化線型モデル)
##############
#データの生成<-データは今回は乱数で生成
set.seed(1)
N <- 100
x1 <- rnorm(N, 0, 2)
x2 <- rnorm(N, 0, 2)
intercept <- -2
y <- rpois(N, exp(intercept + x1)) #x2は影響しない
d <- data.frame(y, x1, x2)
head(d)
,
#データ同士の関係
#グラフィックスパラメータの定義
par(mfrow=c(1,2), mar=c(5,5,1,1), ps=15)
#描画
plot(y ~ x1, d)
plot(y ~ x2, d)
#glm()による解析
res <- glm(y ~ x1 + x2, family=poisson(link="log"), data = d)
summary(res)
library(MASS)
stepAIC(res)
#JAGSを用いた解析
#データを用意する
list.data <- list(N=N, x1=x1, x2=x2, y=y)
#モデルを読み込ませつつ、テキストファイルとして出力する
modelFilename = "testmod.txt"
cat("
#BUGS言語でモデルを記述する
model {
#考えたモデル
for (i in 1:N) {
y[i] ~ dpois(lambda[i])
log(lambda[i]) <- intercept + bx1*x1[i] + bx2*x2[i]
# lambda[i] <- exp(intercept + bx1*x1[i] + bx2*x2[i])としても同じ
}
#パラメータの事前分布
intercept ~ dnorm(0.0, 1.0E-3)
bx1 ~ dnorm(0.0, 1.0E-3)
bx2 ~ dnorm(0.0, 1.0E-3)
} #モデルの記述はここまで
", fill=TRUE, file=modelFilename)
#初期値を与える
inits <- list(intercept = 0,
bx1 = 0,
bx2 = 0
)
inits <- list(inits, inits, inits)
#初期値の乱数の種に異なる値を与える(数字は何でもいいです)
inits[[1]]$.RNG.name <- "base::Mersenne-Twister"
inits[[1]]$.RNG.seed <- 1
inits[[2]]$.RNG.name <- "base::Mersenne-Twister"
inits[[2]]$.RNG.seed <- 12
inits[[3]]$.RNG.name <- "base::Mersenne-Twister"
inits[[3]]$.RNG.seed <- 123
#監視対象パラメータを設定する
para <- c("intercept", "bx1", "bx2")
#JAGSによる計算の実行
#MCMCの計算に関するパラメータ
n.chains <- 3
n.iter <- 2000
n.update <- 2000
thin <- 2
#計算に必要なパッケージの読み込み
library(rjags)
#計算開始時間を記録
start.time <- Sys.time()
#初期化(エラーがある場合はここで出ます)
m <- jags.model(
file = modelFilename,
data = list.data,
inits = inits,
n.chain = n.chains
)
#Burn-inの実行
update(m, n.update)
#本計算の実行
x <- coda.samples(
m,
para,
thin = thin, n.iter = n.iter
)
#終了時間の記録と、計算時間の出力
end.time <- Sys.time()
elapsed.time <- difftime(end.time, start.time, units='hours')
cat(paste(paste('Posterior computed in ', elapsed.time, sep=''), ' hours\n', sep=''))
#計算結果の出力
res <- data.frame(summary(x)$statistics)
ci <- data.frame(summary(x)$quantiles)
#95%信用区間が0をまたぐかどうかを計算
res$sig <- abs(sign(ci[, 1]) + sign(ci[, 5])) == 2
#Rhat値の計算
rhat <- gelman.diag(x)[["psrf"]][, 1]
res$Rhat <- rhat
#結果の表示
res
plot(x)
##############
#5章:GLMM(一般化線形混合モデル)
##############
#データの生成
set.seed(11)
N <- 100
x1 <- rnorm(N, 0, 2)
x2 <- rnorm(N, 0, 2)
intercept <- -2
sigma <- 2
Nplot <- 10
plot <- rnorm(Nplot, 0, sigma)
plot <- rep(plot, c(10, 10, 4, 11, 7, 14, 8, 18, 11, 7))
plotid <- rep(1:10, c(10, 10, 4, 11, 7, 14, 8, 18, 11, 7))
y <- rpois(N, exp(intercept + x1 + plot)) #x2は影響しない
d2 <- data.frame(y, x1, x2, plot, plotid)
#glmmML()による解析
library(glmmML)
res2 <- glmmML(y ~ x1 + x2, cluster=plotid, family=poisson, d2)
#JAGSによる解析
#データを用意する
list.data <- list(N=N, x1=x1, x2=x2, y=y, plotid=plotid, Nplot=Nplot)
#モデルを読み込ませつつ、テキストファイルとして出力する
modelFilename = "testmod.txt"
cat("
#BUGS言語でモデルを記述する
model {
#考えたモデル
for (i in 1:N) {
y[i] ~ dpois(lambda[i])
log(lambda[i]) <- intercept + bx1*x1[i] + bx2*x2[i] + ranef[plotid[i]]
}
#パラメータの事前分布
intercept ~ dnorm(0.0, 1.0E-3)
bx1 ~ dnorm(0.0, 1.0E-3)
bx2 ~ dnorm(0.0, 1.0E-3)
for (i in 1:Nplot) {
ranef[i] ~ dnorm(0.0, tau)
}
tau <- pow(sigma, -2)
sigma ~ dunif(0, 100)
} #モデルの記述はここまで
", fill=TRUE, file=modelFilename)
#
#初期値を与える
inits <- list(intercept = 0,
bx1 = 0,
bx2 = 0,
ranef = rnorm(Nplot, 0, 1),
sigma = 5
)
inits <- list(inits, inits, inits)
#初期値の乱数の種に異なる値を与える
inits[[1]]$.RNG.name <- "base::Mersenne-Twister"
inits[[1]]$.RNG.seed <- 1
inits[[2]]$.RNG.name <- "base::Mersenne-Twister"
inits[[2]]$.RNG.seed <- 12
inits[[3]]$.RNG.name <- "base::Mersenne-Twister"
inits[[3]]$.RNG.seed <- 123
#監視対象パラメータを設定する
para <- c("intercept", "bx1", "bx2", "ranef", "sigma")
#JAGSによる計算の実行
#MCMCの計算に関するパラメータ
n.chains <- 3
n.iter <- 2000
n.update <- 2000
thin <- 2
#計算に必要なパッケージの読み込み
library(rjags)
#計算開始時間を記録
start.time <- Sys.time()
#初期化
m <- jags.model(
file = modelFilename,
data = list.data,
inits = inits,
n.chain = n.chains
)
#Burn-inの実行
update(m, n.update)
#本計算の実行
x <- coda.samples(
m,
para,
thin = thin, n.iter = n.iter
)
#終了時間の記録と、計算時間の出力
end.time <- Sys.time()
elapsed.time <- difftime(end.time, start.time, units='hours')
cat(paste(paste('Posterior computed in ', elapsed.time, sep=''), ' hours\n', sep=''))
#計算結果の出力
res <- data.frame(summary(x)$statistics)
ci <- data.frame(summary(x)$quantiles)
#95%信用区間が0をまたぐかどうかを計算
res$sig <- abs(sign(ci[, 1]) + sign(ci[, 5])) == 2
#Rhat値の計算
rhat <- gelman.diag(x)[["psrf"]][, 1]
res$Rhat <- rhat
#結果の表示
res
#ランダム効果の事後分布
par(ask=TRUE)
plot(x[, grep("ranef", rownames(res))])
##############
#6章:状態空間モデル
##############
#データの生成
set.seed(111)
Nyear <- 25
Nint <- 30.5
mean.lambda <- 1.02
sigma.lambda <- 0.1
sigma.obs <- 5
N <- as.numeric()
y <- as.numeric()
N[1] <- Nint
lambda <- rnorm(Nyear-1, mean.lambda, sigma.lambda)
for (i in 1:(Nyear-1)) {
N[i+1] <- lambda[i]*N[i]
}
for (i in 1:Nyear) {
y[i] <- rnorm(1, N[i], sigma.obs)
}
Ntau <- 2
d3 <- data.frame(year = 1:Nyear, N, y)
#
#データを用意する
list.data <- list(y=y, Nyear=Nyear, Ntau=Ntau)
#モデルを読み込ませつつ、テキストファイルとして出力する
modelFilename = "testmod.txt"
cat("
#BUGS言語でモデルを記述する
model {
#状態プロセス
for (i in 1:(Nyear-1)) {
N[i+1] <- lambda[i]*N[i]
lambda[i] ~ dnorm(mulambda, tau[1])
}
N[1] <- Ninit
Ninit ~ dnorm(0.0, 1.0E-3)
mulambda ~ dnorm(0.0, 1.0E-3)
#観測プロセス
for (i in 1:Nyear) {
y[i] ~ dnorm(N[i], tau[2])
}
#パラメータの事前分布
for (i in 1:Ntau) {
tau[i] <- pow(sigma[i], -2)
sigma[i] ~ dunif(0, 100)
}
} #モデルの記述はここまで
", fill=TRUE, file=modelFilename)
#
#初期値を与える
inits <- list(lambda = rnorm(Nyear-1, 1.1, 0.1),
mulambda = 0,
Ninit = 30,
sigma = rep(5, Ntau)
)
inits <- list(inits, inits, inits)
#初期値の乱数の種に異なる値を与える
inits[[1]]$.RNG.name <- "base::Mersenne-Twister"
inits[[1]]$.RNG.seed <- 1
inits[[2]]$.RNG.name <- "base::Mersenne-Twister"
inits[[2]]$.RNG.seed <- 12
inits[[3]]$.RNG.name <- "base::Mersenne-Twister"
inits[[3]]$.RNG.seed <- 123
#監視対象パラメータを設定する
para <- c("N", "lambda", "mulambda", "sigma")
#JAGSによる計算の実行
#MCMCの計算に関するパラメータ
n.chains <- 3
n.iter <- 5000
n.update <- 5000
thin <- 5
#計算に必要なパッケージの読み込み
library(rjags)
#計算開始時間を記録
start.time <- Sys.time()
#初期化
m <- jags.model(
file = modelFilename,
data = list.data,
inits = inits,
n.chain = n.chains
)
#Burn-inの実行
update(m, n.update)
#本計算の実行
x <- coda.samples(
m,
para,
thin = thin, n.iter = n.iter
)
#終了時間の記録と、計算時間の出力
end.time <- Sys.time()
elapsed.time <- difftime(end.time, start.time, units='hours')
cat(paste(paste('Posterior computed in ', elapsed.time, sep=''), ' hours\n', sep=''))
#計算結果の出力
res <- data.frame(summary(x)$statistics)
ci <- data.frame(summary(x)$quantiles)
#95%信用区間が0をまたぐかどうかを計算
res$sig <- abs(sign(ci[, 1]) + sign(ci[, 5])) == 2
#Rhat値の計算
rhat <- gelman.diag(x)[["psrf"]][, 1]
res$Rhat <- rhat
#結果の表示
res
#結果の図示
par(mfrow=c(2,2), mar=c(5,5,3,1),ps=15)
#密度
plot(1:Nyear, N, type="l", lwd=2, ylim=c(0, 100))
lines(1:Nyear, y, col="red")
lines(1:Nyear, res[grep("N", rownames(res)), 1], col="blue")
legend("topright", col=c("black", "red", "blue"), lwd=c(2,1,1),
legend=c("Setting", "Observed", "Estimated"), cex=0.6
)
#個体群増加率の平均値
plot(density(unlist(x[, grep("mulambda", rownames(res))])), main="mean.lambda")
abline(v=mean.lambda, lwd=3, lty=2, col="red")
#個体群増加率の標準誤差
plot(density(unlist(x[, grep("sigma", rownames(res))[1]])), main="sigma.lambda")
abline(v=sigma.lambda, lwd=3, lty=2, col="red")
#観測誤差
plot(density(unlist(x[, grep("sigma", rownames(res))[2]])), main="sigma.obs")
abline(v=sigma.obs, lwd=3, lty=2, col="red")
legend("topright", lty=c(1,2), lwd=c(1,3), col=c("black", "red"),
legend=c("Estimated", "Setting"), cex=0.6
)
##############
#7章:状態空間モデル(二項混合モデル)
##############
#データの生成
set.seed(11111)
Nsite <- 20
Nrep <- 2
meanlogitp <- log(0.7/(1-0.7))
light <- rnorm(Nsite, 0, 0.5)
coefL <- 2
p <- 1/(1+exp(-(meanlogitp + coefL*light)))
lambda <- 5
N <- rpois(Nsite, lambda)
y <- matrix(NA, nrow=Nrep, ncol=Nsite)
for (i in 1:Nsite) {
y[, i] <- rbinom(Nrep, N[i], p[i])
}
#生成したデータを見る
par(mar=c(5,5,1,1), ps=15)
plot(1:length(N), N, pch=15, xlab="Site", ylab="Abundance", ylim=c(0, max(N)), cex=2)
points(1:length(N), y[1, ], pch=1, cex=2)
points(1:length(N), y[2, ], pch=2, cex=2)
legend("topright", pch=c(15, 1, 2),
legend=c("True", "Obs. 1", "Obs. 2"))
#発見率一定
#データを用意する
list.data <- list(y=y, Nsite=Nsite, Nrep=Nrep)
#モデルを読み込ませつつ、テキストファイルとして出力する
modelFilename = "testmod.txt"
cat("
#BUGS言語でモデルを記述する
model {
#状態プロセス
for (i in 1:Nsite) {
EN[i] ~ dpois(meanN)
}
meanN ~ dunif(0, 1000)
#観測プロセス
for (i in 1:Nsite) {
for (j in 1:Nrep) {
y[j, i] ~ dbin(estp, EN[i])
}
}
estp ~ dunif(0, 1)
} #モデルの記述はここまで
", fill=TRUE, file=modelFilename)
#初期値を与える
inits <- list(EN = N,
meanN = 5,
estp = 0.5
)
inits <- list(inits, inits, inits)
#初期値の乱数の種に異なる値を与える
inits[[1]]$.RNG.name <- "base::Mersenne-Twister"
inits[[1]]$.RNG.seed <- 1
inits[[2]]$.RNG.name <- "base::Mersenne-Twister"
inits[[2]]$.RNG.seed <- 12
inits[[3]]$.RNG.name <- "base::Mersenne-Twister"
inits[[3]]$.RNG.seed <- 123
#監視対象パラメータを設定する
para <- c("EN", "meanN", "estp")
#JAGSによる計算の実行
#MCMCの計算に関するパラメータ
n.chains <- 3
n.iter <- 10000
n.update <- 10000
thin <- 5
#計算に必要なパッケージの読み込み
library(rjags)
#計算開始時間を記録
start.time <- Sys.time()
#初期化
m <- jags.model(
file = modelFilename,
data = list.data,
inits = inits,
n.chain = n.chains
)
#Burn-inの実行
update(m, n.update)
#本計算の実行
x <- coda.samples(
m,
para,
thin = thin, n.iter = n.iter
)
#終了時間の記録と、計算時間の出力
end.time <- Sys.time()
elapsed.time <- difftime(end.time, start.time, units='hours')
cat(paste(paste('Posterior computed in ', elapsed.time, sep=''), ' hours\n', sep=''))
#結果の出力
res <- data.frame(summary(x)$statistics)
ci <- data.frame(summary(x)$quantiles)
#95%信用区間が0をまたぐかどうかを計算
res$sig <- abs(sign(ci[, 1]) + sign(ci[, 5])) == 2
#Rhat値の計算
rhat <- gelman.diag(x)[["psrf"]][, 1]
res$Rhat <- rhat
#結果の表示
res
#設定値との関係
par(mfrow=c(1,2), mar=c(5,5,1,1), ps=15)
#真の個体数
plot(N, res[grep("EN", rownames(res)), 1], xlab="Set", ylab="Estimated (mean)",
main="Abundance", xlim=c(0, max(N)+2), ylim=c(0, max(N)+2))
abline(a=0, b=1, lwd=2)
#平均の個体数
plot(density(unlist(x[, grep("meanN", rownames(res))])), main=expression(lambda))
abline(v=lambda)
#検出率変動
#データを用意する
list.data <- list(y=y, Nsite=Nsite, Nrep=Nrep, light=light)
#モデルを読み込ませつつ、テキストファイルとして出力する
modelFilename = "testmod.txt"
cat("
#BUGS言語でモデルを記述する
model {
#状態プロセス
for (i in 1:Nsite) {
EN[i] ~ dpois(meanN)
}
meanN ~ dunif(0, 1000)
#観測プロセス
for (i in 1:Nsite) {
for (j in 1:Nrep) {
y[j, i] ~ dbin(estp[i], EN[i])
}
estp[i] <- 1/(1+exp(-(alpha + bLI*light[i])))
}
alpha ~ dnorm(0.0, 1.0E-3)
bLI ~ dnorm(0.0, 1.0E-3)
} #モデルの記述はここまで
", fill=TRUE, file=modelFilename)
#
#初期値を与える
inits <- list(EN = N,
meanN = 5,
bLI = 0,
alpha = 0
)
inits <- list(inits, inits, inits)
#初期値の乱数の種に異なる値を与える
inits[[1]]$.RNG.name <- "base::Mersenne-Twister"
inits[[1]]$.RNG.seed <- 1
inits[[2]]$.RNG.name <- "base::Mersenne-Twister"
inits[[2]]$.RNG.seed <- 12
inits[[3]]$.RNG.name <- "base::Mersenne-Twister"
inits[[3]]$.RNG.seed <- 123
#監視対象パラメータを設定する
para <- c("EN", "meanN", "alpha", "bLI")
#JAGSによる計算の実行
#MCMCの計算に関するパラメータ
n.chains <- 3
n.iter <- 10000
n.update <- 10000
thin <- 5
#計算に必要なパッケージの読み込み
library(rjags)
#計算開始時間を記録
start.time <- Sys.time()
#初期化
m <- jags.model(
file = modelFilename,
data = list.data,
inits = inits,
n.chain = n.chains
)
#Burn-inの実行
update(m, n.update)
#本計算の実行
x2 <- coda.samples(
m,
para,
thin = thin, n.iter = n.iter
)
#終了時間の記録と、計算時間の出力
end.time <- Sys.time()
elapsed.time <- difftime(end.time, start.time, units='hours')
cat(paste(paste('Posterior computed in ', elapsed.time, sep=''), ' hours\n', sep=''))
#結果の出力
res2 <- data.frame(summary(x2)$statistics)
ci2 <- data.frame(summary(x2)$quantiles)
#95%信用区間が0をまたぐかどうかを計算
res2$sig <- abs(sign(ci2[, 1]) + sign(ci2[, 5])) == 2
#Rhat値の計算
rhat2 <- gelman.diag(x2)[["psrf"]][, 1]
res2$Rhat <- rhat2
#結果の表示
res2
#設定値との関係(検出率一定モデルとの比較)
par(mfrow=c(2,2), mar=c(5,5,1,1), ps=15)
#検出率一定
#真の個体数
plot(N, res[grep("EN", rownames(res)), 1], xlab="Set", ylab="Estimated (mean)",
main="Abundance", xlim=c(0, max(N)+2), ylim=c(0, max(N)+2))
abline(a=0, b=1, lwd=2)
#平均の個体数
plot(density(unlist(x[, grep("meanN", rownames(res))])), main=expression(lambda))
abline(v=lambda)
#検出率変動(今回の解析)
plot(N, res2[grep("EN", rownames(res2)), 1], xlab="Set", ylab="Estimated (mean)",
main="Abundance", xlim=c(0, max(N)+2), ylim=c(0, max(N)+2))
abline(a=0, b=1, lwd=2)
#平均の個体数
plot(density(unlist(x2[, grep("meanN", rownames(res2))])), main=expression(lambda))
abline(v=lambda)
#Bayesian p valueの計算
#検出率一定
#データを用意する
list.data <- list(y=y, Nsite=Nsite, Nrep=Nrep)
#モデルを読み込ませつつ、テキストファイルとして出力する
modelFilename = "testmod.txt"
cat("
#BUGS言語でモデルを記述する
model {
#状態プロセス
for (i in 1:Nsite) {
EN[i] ~ dpois(meanN)
}
meanN ~ dunif(0, 1000)
#観測プロセス
for (i in 1:Nsite) {
for (j in 1:Nrep) {
y[j, i] ~ dbin(estp, EN[i])
#期待値
esty[j, i] <- estp*EN[i]
E[j, i] <- pow((y[j, i] - esty[j, i]), 2)/(esty[j, i] + 0.5)
}
}
estp ~ dunif(0, 1)
#Bayesian p value計算
for (i in 1:Nsite) {
for (j in 1:Nrep) {
y_new[j, i] ~ dbin(estp, EN[i])
esty_new[j, i] <- estp*EN[i]
E_new[j, i] <- pow((y_new[j, i] - esty_new[j, i]), 2)/(esty_new[j, i] + 0.5)
}
}
fit_data <- sum(E[, ])
fit_new <- sum(E_new[, ])
} #モデルの記述はここまで
", fill=TRUE, file=modelFilename)
#初期値を与える
inits <- list(EN = N,
meanN = 5,
y_new = y,
estp = 0.5
)
inits <- list(inits, inits, inits)
#初期値の乱数の種に異なる値を与える
inits[[1]]$.RNG.name <- "base::Mersenne-Twister"
inits[[1]]$.RNG.seed <- 1
inits[[2]]$.RNG.name <- "base::Mersenne-Twister"
inits[[2]]$.RNG.seed <- 12
inits[[3]]$.RNG.name <- "base::Mersenne-Twister"
inits[[3]]$.RNG.seed <- 123
#監視対象パラメータを設定する
para <- c("EN", "meanN", "estp", "fit_data", "fit_new")
#JAGSによる計算の実行
#MCMCの計算に関するパラメータ
n.chains <- 3
n.iter <- 10000
n.update <- 10000
thin <- 5
#計算に必要なパッケージの読み込み
library(rjags)
#計算開始時間を記録
start.time <- Sys.time()
#初期化
m <- jags.model(
file = modelFilename,
data = list.data,
inits = inits,
n.chain = n.chains
)
#Burn-inの実行
update(m, n.update)
#本計算の実行
x <- coda.samples(
m,
para,
thin = thin, n.iter = n.iter
)
#終了時間の記録と、計算時間の出力
end.time <- Sys.time()
elapsed.time <- difftime(end.time, start.time, units='hours')
cat(paste(paste('Posterior computed in ', elapsed.time, sep=''), ' hours\n', sep=''))
#結果の出力
res <- data.frame(summary(x)$statistics)
ci <- data.frame(summary(x)$quantiles)
#95%信用区間が0をまたぐかどうかを計算
res$sig <- abs(sign(ci[, 1]) + sign(ci[, 5])) == 2
#Rhat値の計算
rhat <- gelman.diag(x)[["psrf"]][, 1]
res$Rhat <- rhat
#検出率変動
#データを用意する
list.data <- list(y=y, Nsite=Nsite, Nrep=Nrep, light=light)
#モデルを読み込ませつつ、テキストファイルとして出力する
modelFilename = "testmod.txt"
cat("
#BUGS言語でモデルを記述する
model {
#状態プロセス
for (i in 1:Nsite) {
EN[i] ~ dpois(meanN)
}
meanN ~ dunif(0, 1000)
#観測プロセス
for (i in 1:Nsite) {
for (j in 1:Nrep) {
y[j, i] ~ dbin(estp[i], EN[i])
#期待値
esty[j, i] <- estp[i]*EN[i]
E[j, i] <- pow((y[j, i] - esty[j, i]), 2)/(esty[j, i] + 0.5)
}
estp[i] <- 1/(1+exp(-(alpha + bLI*light[i])))
}
alpha ~ dnorm(0.0, 1.0E-3)
bLI ~ dnorm(0.0, 1.0E-3)
#Bayesian p value計算
for (i in 1:Nsite) {
for (j in 1:Nrep) {
y_new[j, i] ~ dbin(estp[i], EN[i])
esty_new[j, i] <- estp[i]*EN[i]
E_new[j, i] <- pow((y_new[j, i] - esty_new[j, i]), 2)/(esty_new[j, i] + 0.5)
}
}
fit_data <- sum(E[, ])
fit_new <- sum(E_new[, ])
} #モデルの記述はここまで
", fill=TRUE, file=modelFilename)
#初期値を与える
inits <- list(EN = N,
meanN = 5,
y_new = y,
bLI = 0,
alpha = 0
)
inits <- list(inits, inits, inits)
#初期値の乱数の種に異なる値を与える
inits[[1]]$.RNG.name <- "base::Mersenne-Twister"
inits[[1]]$.RNG.seed <- 1
inits[[2]]$.RNG.name <- "base::Mersenne-Twister"
inits[[2]]$.RNG.seed <- 12
inits[[3]]$.RNG.name <- "base::Mersenne-Twister"
inits[[3]]$.RNG.seed <- 123
#監視対象パラメータを設定する
para <- c("EN", "meanN", "alpha", "bLI", "fit_data", "fit_new")
#JAGSによる計算の実行
#MCMCの計算に関するパラメータ
n.chains <- 3
n.iter <- 10000
n.update <- 10000
thin <- 5
#計算に必要なパッケージの読み込み
library(rjags)
#計算開始時間を記録
start.time <- Sys.time()
#初期化
m <- jags.model(
file = modelFilename,
data = list.data,
inits = inits,
n.chain = n.chains
)
#Burn-inの実行
update(m, n.update)
#本計算の実行
x2 <- coda.samples(
m,
para,
thin = thin, n.iter = n.iter
)
#終了時間の記録と、計算時間の出力
end.time <- Sys.time()
elapsed.time <- difftime(end.time, start.time, units='hours')
cat(paste(paste('Posterior computed in ', elapsed.time, sep=''), ' hours\n', sep=''))
#結果の出力
res2 <- data.frame(summary(x2)$statistics)
ci2 <- data.frame(summary(x2)$quantiles)
#95%信用区間が0をまたぐかどうかを計算
res2$sig <- abs(sign(ci2[, 1]) + sign(ci2[, 5])) == 2
#Rhat値の計算
rhat2 <- gelman.diag(x2)[["psrf"]][, 1]
res2$Rhat <- rhat2
#Bayesian p valueの計算
#検出率一定
mean(unlist(x[grep("fit_data", rownames(res)), ]) > unlist(x[grep("fit_new", rownames(res)), ]))
#検出率変動
mean(unlist(x2[grep("fit_data", rownames(res2)), ]) > unlist(x2[grep("fit_new", rownames(res2)), ]))
| /elements/script (7).r | no_license | KazuhaM/Resources | R | false | false | 24,219 | r | library(rjags)
n <- 100
y <- 25
BL <- function(p) choose(n,y)*p^(y)*(1-p)^(n-y)
plot(0:100/100, BL(0:100/100), type="l")
#直前2 行は、以下の関数を使っても描ける
#plot(0:100, dbinom(0:100, n, y/n), type="l")
abline(v=y/n, lwd=2)
text(y/n+0.05,0.09,labels="0.25")
####################
#生物統計学の実行コード
#作成者:飯島勇人(山梨県森林研)
#連絡先:hayato.iijima@gmail.com
#作成日:2017/1/16
####################
####################
#2章:なぜ統計学が必要なのか?
####################
##正規分布
#グラフィックスパラメータの定義
par(mar=c(5,5,1,1), ps=15)
#描画
values <- -50:50
plot(values, dnorm(values, 0, 1), type="l", xlab="Value", ylab="Probability density")
points(values, dnorm(values, 0, 5), col="red", type="l")
points(values, dnorm(values, 0, 10), col="blue", type="l")
points(values, dnorm(values, 0, sqrt(10^3)), col="green", type="l")
points(values, dnorm(values, -20, 3), type="l", lty=2)
points(values, dnorm(values, 40, 6), type="l", lty=2)
legend("topleft", lty=c(rep(1, 4), 2, 2), col=c("black", "red", "blue", "green", "black", "black"),
legend=c(expression(paste(mu, " = 0, ", sigma, " = 1")),
expression(paste(mu, " = 0, ", sigma, " = 5")),
expression(paste(mu, " = 0, ", sigma, " = 10")),
expression(paste(mu, " = 0, ", sigma, " = 31.6")),
expression(paste(mu, " = -20, ", sigma, " = 3")),
expression(paste(mu, " = 40, ", sigma, " = 6")))
)
###二項分布
##グラフィックスパラメータの定義
par(mfrow=c(1,2), mar=c(5,5,1,1), ps=15)
##生起確率の違い
#生起確率
prob <- c(0.05, 0.3, 0.5, 0.8)
#試行回数
N <- 100
#色の設定
iro <- c("black", "red", "blue", "green")
#描画
plot(0:N, dbinom(0:N, N, prob[1]), type="h", xlab="Value", ylab="Probability")
points(0:N, dbinom(0:N, N, prob[2]), type="h", col=iro[2])
points(0:N, dbinom(0:N, N, prob[3]), type="h", col=iro[3])
points(0:N, dbinom(0:N, N, prob[4]), type="h", col=iro[4])
legend("topright", lty=1, col=iro,
legend=c("p = 0.05",
"p = 0.3",
"p = 0.5",
"p = 0.8"
), cex=0.7
)
##試行回数の違い
#生起確率
prob <- 0.5
#試行回数(あえて不正確な示し方をします)
N <- c(100, 30, 20, 10)
#描画
plot(0:N[1], dbinom(0:N[1], N[1], prob), type="h", xaxt="n", xlab="Relative position", ylab="Probability")
axis(1, 0:5*20, at=0:5*20, labels=0:5/5, tick=TRUE)
for (i in 2:4) {
par(new=TRUE)
plot(0:N[i], dbinom(0:N[i], N[i], prob), type="h", axes=F, ann=F, col=iro[i])
}
legend("topright", lty=1, col=iro,
legend=c("N = 100",
"N = 30",
"N = 20",
"N = 10"
), cex=0.7
)
##ポアソン分布
#平均の発生回数
N <- c(1, 3, 7, 15)
values <- 0:30
#色の設定
iro <- c("black", "red", "blue", "green")
#描画
plot(values, dpois(values, N[1]), type="h", xlab="Values", ylab="Probability")
for (i in 2:4) {
points(jitter(values), dpois(values, N[i]), type="h", col=iro[i])
}
legend("topright", lty=1, col=iro,
legend=c(expression(paste(lambda, " = 1")),
expression(paste(lambda, " = 3")),
expression(paste(lambda, " = 7")),
expression(paste(lambda, " = 15")))
)
##############
#4章:GLM(一般化線型モデル)
##############
#データの生成<-データは今回は乱数で生成
set.seed(1)
N <- 100
x1 <- rnorm(N, 0, 2)
x2 <- rnorm(N, 0, 2)
intercept <- -2
y <- rpois(N, exp(intercept + x1)) #x2は影響しない
d <- data.frame(y, x1, x2)
head(d)
,
#データ同士の関係
#グラフィックスパラメータの定義
par(mfrow=c(1,2), mar=c(5,5,1,1), ps=15)
#描画
plot(y ~ x1, d)
plot(y ~ x2, d)
#glm()による解析
res <- glm(y ~ x1 + x2, family=poisson(link="log"), data = d)
summary(res)
library(MASS)
stepAIC(res)
#JAGSを用いた解析
#データを用意する
list.data <- list(N=N, x1=x1, x2=x2, y=y)
#モデルを読み込ませつつ、テキストファイルとして出力する
modelFilename = "testmod.txt"
cat("
#BUGS言語でモデルを記述する
model {
#考えたモデル
for (i in 1:N) {
y[i] ~ dpois(lambda[i])
log(lambda[i]) <- intercept + bx1*x1[i] + bx2*x2[i]
# lambda[i] <- exp(intercept + bx1*x1[i] + bx2*x2[i])としても同じ
}
#パラメータの事前分布
intercept ~ dnorm(0.0, 1.0E-3)
bx1 ~ dnorm(0.0, 1.0E-3)
bx2 ~ dnorm(0.0, 1.0E-3)
} #モデルの記述はここまで
", fill=TRUE, file=modelFilename)
#初期値を与える
inits <- list(intercept = 0,
bx1 = 0,
bx2 = 0
)
inits <- list(inits, inits, inits)
#初期値の乱数の種に異なる値を与える(数字は何でもいいです)
inits[[1]]$.RNG.name <- "base::Mersenne-Twister"
inits[[1]]$.RNG.seed <- 1
inits[[2]]$.RNG.name <- "base::Mersenne-Twister"
inits[[2]]$.RNG.seed <- 12
inits[[3]]$.RNG.name <- "base::Mersenne-Twister"
inits[[3]]$.RNG.seed <- 123
#監視対象パラメータを設定する
para <- c("intercept", "bx1", "bx2")
#JAGSによる計算の実行
#MCMCの計算に関するパラメータ
n.chains <- 3
n.iter <- 2000
n.update <- 2000
thin <- 2
#計算に必要なパッケージの読み込み
library(rjags)
#計算開始時間を記録
start.time <- Sys.time()
#初期化(エラーがある場合はここで出ます)
m <- jags.model(
file = modelFilename,
data = list.data,
inits = inits,
n.chain = n.chains
)
#Burn-inの実行
update(m, n.update)
#本計算の実行
x <- coda.samples(
m,
para,
thin = thin, n.iter = n.iter
)
#終了時間の記録と、計算時間の出力
end.time <- Sys.time()
elapsed.time <- difftime(end.time, start.time, units='hours')
cat(paste(paste('Posterior computed in ', elapsed.time, sep=''), ' hours\n', sep=''))
#計算結果の出力
res <- data.frame(summary(x)$statistics)
ci <- data.frame(summary(x)$quantiles)
#95%信用区間が0をまたぐかどうかを計算
res$sig <- abs(sign(ci[, 1]) + sign(ci[, 5])) == 2
#Rhat値の計算
rhat <- gelman.diag(x)[["psrf"]][, 1]
res$Rhat <- rhat
#結果の表示
res
plot(x)
##############
#5章:GLMM(一般化線形混合モデル)
##############
#データの生成
set.seed(11)
N <- 100
x1 <- rnorm(N, 0, 2)
x2 <- rnorm(N, 0, 2)
intercept <- -2
sigma <- 2
Nplot <- 10
plot <- rnorm(Nplot, 0, sigma)
plot <- rep(plot, c(10, 10, 4, 11, 7, 14, 8, 18, 11, 7))
plotid <- rep(1:10, c(10, 10, 4, 11, 7, 14, 8, 18, 11, 7))
y <- rpois(N, exp(intercept + x1 + plot)) #x2は影響しない
d2 <- data.frame(y, x1, x2, plot, plotid)
#glmmML()による解析
library(glmmML)
res2 <- glmmML(y ~ x1 + x2, cluster=plotid, family=poisson, d2)
#JAGSによる解析
#データを用意する
list.data <- list(N=N, x1=x1, x2=x2, y=y, plotid=plotid, Nplot=Nplot)
#モデルを読み込ませつつ、テキストファイルとして出力する
modelFilename = "testmod.txt"
cat("
#BUGS言語でモデルを記述する
model {
#考えたモデル
for (i in 1:N) {
y[i] ~ dpois(lambda[i])
log(lambda[i]) <- intercept + bx1*x1[i] + bx2*x2[i] + ranef[plotid[i]]
}
#パラメータの事前分布
intercept ~ dnorm(0.0, 1.0E-3)
bx1 ~ dnorm(0.0, 1.0E-3)
bx2 ~ dnorm(0.0, 1.0E-3)
for (i in 1:Nplot) {
ranef[i] ~ dnorm(0.0, tau)
}
tau <- pow(sigma, -2)
sigma ~ dunif(0, 100)
} #モデルの記述はここまで
", fill=TRUE, file=modelFilename)
#
#初期値を与える
inits <- list(intercept = 0,
bx1 = 0,
bx2 = 0,
ranef = rnorm(Nplot, 0, 1),
sigma = 5
)
inits <- list(inits, inits, inits)
#初期値の乱数の種に異なる値を与える
inits[[1]]$.RNG.name <- "base::Mersenne-Twister"
inits[[1]]$.RNG.seed <- 1
inits[[2]]$.RNG.name <- "base::Mersenne-Twister"
inits[[2]]$.RNG.seed <- 12
inits[[3]]$.RNG.name <- "base::Mersenne-Twister"
inits[[3]]$.RNG.seed <- 123
#監視対象パラメータを設定する
para <- c("intercept", "bx1", "bx2", "ranef", "sigma")
#JAGSによる計算の実行
#MCMCの計算に関するパラメータ
n.chains <- 3
n.iter <- 2000
n.update <- 2000
thin <- 2
#計算に必要なパッケージの読み込み
library(rjags)
#計算開始時間を記録
start.time <- Sys.time()
#初期化
m <- jags.model(
file = modelFilename,
data = list.data,
inits = inits,
n.chain = n.chains
)
#Burn-inの実行
update(m, n.update)
#本計算の実行
x <- coda.samples(
m,
para,
thin = thin, n.iter = n.iter
)
#終了時間の記録と、計算時間の出力
end.time <- Sys.time()
elapsed.time <- difftime(end.time, start.time, units='hours')
cat(paste(paste('Posterior computed in ', elapsed.time, sep=''), ' hours\n', sep=''))
#計算結果の出力
res <- data.frame(summary(x)$statistics)
ci <- data.frame(summary(x)$quantiles)
#95%信用区間が0をまたぐかどうかを計算
res$sig <- abs(sign(ci[, 1]) + sign(ci[, 5])) == 2
#Rhat値の計算
rhat <- gelman.diag(x)[["psrf"]][, 1]
res$Rhat <- rhat
#結果の表示
res
#ランダム効果の事後分布
par(ask=TRUE)
plot(x[, grep("ranef", rownames(res))])
##############
#6章:状態空間モデル
##############
#データの生成
set.seed(111)
Nyear <- 25
Nint <- 30.5
mean.lambda <- 1.02
sigma.lambda <- 0.1
sigma.obs <- 5
N <- as.numeric()
y <- as.numeric()
N[1] <- Nint
lambda <- rnorm(Nyear-1, mean.lambda, sigma.lambda)
for (i in 1:(Nyear-1)) {
N[i+1] <- lambda[i]*N[i]
}
for (i in 1:Nyear) {
y[i] <- rnorm(1, N[i], sigma.obs)
}
Ntau <- 2
d3 <- data.frame(year = 1:Nyear, N, y)
#
#データを用意する
list.data <- list(y=y, Nyear=Nyear, Ntau=Ntau)
#モデルを読み込ませつつ、テキストファイルとして出力する
modelFilename = "testmod.txt"
cat("
#BUGS言語でモデルを記述する
model {
#状態プロセス
for (i in 1:(Nyear-1)) {
N[i+1] <- lambda[i]*N[i]
lambda[i] ~ dnorm(mulambda, tau[1])
}
N[1] <- Ninit
Ninit ~ dnorm(0.0, 1.0E-3)
mulambda ~ dnorm(0.0, 1.0E-3)
#観測プロセス
for (i in 1:Nyear) {
y[i] ~ dnorm(N[i], tau[2])
}
#パラメータの事前分布
for (i in 1:Ntau) {
tau[i] <- pow(sigma[i], -2)
sigma[i] ~ dunif(0, 100)
}
} #モデルの記述はここまで
", fill=TRUE, file=modelFilename)
#
#初期値を与える
inits <- list(lambda = rnorm(Nyear-1, 1.1, 0.1),
mulambda = 0,
Ninit = 30,
sigma = rep(5, Ntau)
)
inits <- list(inits, inits, inits)
#初期値の乱数の種に異なる値を与える
inits[[1]]$.RNG.name <- "base::Mersenne-Twister"
inits[[1]]$.RNG.seed <- 1
inits[[2]]$.RNG.name <- "base::Mersenne-Twister"
inits[[2]]$.RNG.seed <- 12
inits[[3]]$.RNG.name <- "base::Mersenne-Twister"
inits[[3]]$.RNG.seed <- 123
#監視対象パラメータを設定する
para <- c("N", "lambda", "mulambda", "sigma")
#JAGSによる計算の実行
#MCMCの計算に関するパラメータ
n.chains <- 3
n.iter <- 5000
n.update <- 5000
thin <- 5
#計算に必要なパッケージの読み込み
library(rjags)
#計算開始時間を記録
start.time <- Sys.time()
#初期化
m <- jags.model(
file = modelFilename,
data = list.data,
inits = inits,
n.chain = n.chains
)
#Burn-inの実行
update(m, n.update)
#本計算の実行
x <- coda.samples(
m,
para,
thin = thin, n.iter = n.iter
)
#終了時間の記録と、計算時間の出力
end.time <- Sys.time()
elapsed.time <- difftime(end.time, start.time, units='hours')
cat(paste(paste('Posterior computed in ', elapsed.time, sep=''), ' hours\n', sep=''))
#計算結果の出力
res <- data.frame(summary(x)$statistics)
ci <- data.frame(summary(x)$quantiles)
#95%信用区間が0をまたぐかどうかを計算
res$sig <- abs(sign(ci[, 1]) + sign(ci[, 5])) == 2
#Rhat値の計算
rhat <- gelman.diag(x)[["psrf"]][, 1]
res$Rhat <- rhat
#結果の表示
res
#結果の図示
par(mfrow=c(2,2), mar=c(5,5,3,1),ps=15)
#密度
plot(1:Nyear, N, type="l", lwd=2, ylim=c(0, 100))
lines(1:Nyear, y, col="red")
lines(1:Nyear, res[grep("N", rownames(res)), 1], col="blue")
legend("topright", col=c("black", "red", "blue"), lwd=c(2,1,1),
legend=c("Setting", "Observed", "Estimated"), cex=0.6
)
#個体群増加率の平均値
plot(density(unlist(x[, grep("mulambda", rownames(res))])), main="mean.lambda")
abline(v=mean.lambda, lwd=3, lty=2, col="red")
#個体群増加率の標準誤差
plot(density(unlist(x[, grep("sigma", rownames(res))[1]])), main="sigma.lambda")
abline(v=sigma.lambda, lwd=3, lty=2, col="red")
#観測誤差
plot(density(unlist(x[, grep("sigma", rownames(res))[2]])), main="sigma.obs")
abline(v=sigma.obs, lwd=3, lty=2, col="red")
legend("topright", lty=c(1,2), lwd=c(1,3), col=c("black", "red"),
legend=c("Estimated", "Setting"), cex=0.6
)
##############
#7章:状態空間モデル(二項混合モデル)
##############
#データの生成
set.seed(11111)
Nsite <- 20
Nrep <- 2
meanlogitp <- log(0.7/(1-0.7))
light <- rnorm(Nsite, 0, 0.5)
coefL <- 2
p <- 1/(1+exp(-(meanlogitp + coefL*light)))
lambda <- 5
N <- rpois(Nsite, lambda)
y <- matrix(NA, nrow=Nrep, ncol=Nsite)
for (i in 1:Nsite) {
y[, i] <- rbinom(Nrep, N[i], p[i])
}
#生成したデータを見る
par(mar=c(5,5,1,1), ps=15)
plot(1:length(N), N, pch=15, xlab="Site", ylab="Abundance", ylim=c(0, max(N)), cex=2)
points(1:length(N), y[1, ], pch=1, cex=2)
points(1:length(N), y[2, ], pch=2, cex=2)
legend("topright", pch=c(15, 1, 2),
legend=c("True", "Obs. 1", "Obs. 2"))
#発見率一定
#データを用意する
list.data <- list(y=y, Nsite=Nsite, Nrep=Nrep)
#モデルを読み込ませつつ、テキストファイルとして出力する
modelFilename = "testmod.txt"
cat("
#BUGS言語でモデルを記述する
model {
#状態プロセス
for (i in 1:Nsite) {
EN[i] ~ dpois(meanN)
}
meanN ~ dunif(0, 1000)
#観測プロセス
for (i in 1:Nsite) {
for (j in 1:Nrep) {
y[j, i] ~ dbin(estp, EN[i])
}
}
estp ~ dunif(0, 1)
} #モデルの記述はここまで
", fill=TRUE, file=modelFilename)
#初期値を与える
inits <- list(EN = N,
meanN = 5,
estp = 0.5
)
inits <- list(inits, inits, inits)
#初期値の乱数の種に異なる値を与える
inits[[1]]$.RNG.name <- "base::Mersenne-Twister"
inits[[1]]$.RNG.seed <- 1
inits[[2]]$.RNG.name <- "base::Mersenne-Twister"
inits[[2]]$.RNG.seed <- 12
inits[[3]]$.RNG.name <- "base::Mersenne-Twister"
inits[[3]]$.RNG.seed <- 123
#監視対象パラメータを設定する
para <- c("EN", "meanN", "estp")
#JAGSによる計算の実行
#MCMCの計算に関するパラメータ
n.chains <- 3
n.iter <- 10000
n.update <- 10000
thin <- 5
#計算に必要なパッケージの読み込み
library(rjags)
#計算開始時間を記録
start.time <- Sys.time()
#初期化
m <- jags.model(
file = modelFilename,
data = list.data,
inits = inits,
n.chain = n.chains
)
#Burn-inの実行
update(m, n.update)
#本計算の実行
x <- coda.samples(
m,
para,
thin = thin, n.iter = n.iter
)
#終了時間の記録と、計算時間の出力
end.time <- Sys.time()
elapsed.time <- difftime(end.time, start.time, units='hours')
cat(paste(paste('Posterior computed in ', elapsed.time, sep=''), ' hours\n', sep=''))
#結果の出力
res <- data.frame(summary(x)$statistics)
ci <- data.frame(summary(x)$quantiles)
#95%信用区間が0をまたぐかどうかを計算
res$sig <- abs(sign(ci[, 1]) + sign(ci[, 5])) == 2
#Rhat値の計算
rhat <- gelman.diag(x)[["psrf"]][, 1]
res$Rhat <- rhat
#結果の表示
res
#設定値との関係
par(mfrow=c(1,2), mar=c(5,5,1,1), ps=15)
#真の個体数
plot(N, res[grep("EN", rownames(res)), 1], xlab="Set", ylab="Estimated (mean)",
main="Abundance", xlim=c(0, max(N)+2), ylim=c(0, max(N)+2))
abline(a=0, b=1, lwd=2)
#平均の個体数
plot(density(unlist(x[, grep("meanN", rownames(res))])), main=expression(lambda))
abline(v=lambda)
#検出率変動
#データを用意する
list.data <- list(y=y, Nsite=Nsite, Nrep=Nrep, light=light)
#モデルを読み込ませつつ、テキストファイルとして出力する
modelFilename = "testmod.txt"
cat("
#BUGS言語でモデルを記述する
model {
#状態プロセス
for (i in 1:Nsite) {
EN[i] ~ dpois(meanN)
}
meanN ~ dunif(0, 1000)
#観測プロセス
for (i in 1:Nsite) {
for (j in 1:Nrep) {
y[j, i] ~ dbin(estp[i], EN[i])
}
estp[i] <- 1/(1+exp(-(alpha + bLI*light[i])))
}
alpha ~ dnorm(0.0, 1.0E-3)
bLI ~ dnorm(0.0, 1.0E-3)
} #モデルの記述はここまで
", fill=TRUE, file=modelFilename)
#
#初期値を与える
inits <- list(EN = N,
meanN = 5,
bLI = 0,
alpha = 0
)
inits <- list(inits, inits, inits)
#初期値の乱数の種に異なる値を与える
inits[[1]]$.RNG.name <- "base::Mersenne-Twister"
inits[[1]]$.RNG.seed <- 1
inits[[2]]$.RNG.name <- "base::Mersenne-Twister"
inits[[2]]$.RNG.seed <- 12
inits[[3]]$.RNG.name <- "base::Mersenne-Twister"
inits[[3]]$.RNG.seed <- 123
#監視対象パラメータを設定する
para <- c("EN", "meanN", "alpha", "bLI")
#JAGSによる計算の実行
#MCMCの計算に関するパラメータ
n.chains <- 3
n.iter <- 10000
n.update <- 10000
thin <- 5
#計算に必要なパッケージの読み込み
library(rjags)
#計算開始時間を記録
start.time <- Sys.time()
#初期化
m <- jags.model(
file = modelFilename,
data = list.data,
inits = inits,
n.chain = n.chains
)
#Burn-inの実行
update(m, n.update)
#本計算の実行
x2 <- coda.samples(
m,
para,
thin = thin, n.iter = n.iter
)
#終了時間の記録と、計算時間の出力
end.time <- Sys.time()
elapsed.time <- difftime(end.time, start.time, units='hours')
cat(paste(paste('Posterior computed in ', elapsed.time, sep=''), ' hours\n', sep=''))
#結果の出力
res2 <- data.frame(summary(x2)$statistics)
ci2 <- data.frame(summary(x2)$quantiles)
#95%信用区間が0をまたぐかどうかを計算
res2$sig <- abs(sign(ci2[, 1]) + sign(ci2[, 5])) == 2
#Rhat値の計算
rhat2 <- gelman.diag(x2)[["psrf"]][, 1]
res2$Rhat <- rhat2
#結果の表示
res2
#設定値との関係(検出率一定モデルとの比較)
par(mfrow=c(2,2), mar=c(5,5,1,1), ps=15)
#検出率一定
#真の個体数
plot(N, res[grep("EN", rownames(res)), 1], xlab="Set", ylab="Estimated (mean)",
main="Abundance", xlim=c(0, max(N)+2), ylim=c(0, max(N)+2))
abline(a=0, b=1, lwd=2)
#平均の個体数
plot(density(unlist(x[, grep("meanN", rownames(res))])), main=expression(lambda))
abline(v=lambda)
#検出率変動(今回の解析)
plot(N, res2[grep("EN", rownames(res2)), 1], xlab="Set", ylab="Estimated (mean)",
main="Abundance", xlim=c(0, max(N)+2), ylim=c(0, max(N)+2))
abline(a=0, b=1, lwd=2)
#平均の個体数
plot(density(unlist(x2[, grep("meanN", rownames(res2))])), main=expression(lambda))
abline(v=lambda)
#Bayesian p valueの計算
#検出率一定
#データを用意する
list.data <- list(y=y, Nsite=Nsite, Nrep=Nrep)
#モデルを読み込ませつつ、テキストファイルとして出力する
modelFilename = "testmod.txt"
cat("
#BUGS言語でモデルを記述する
model {
#状態プロセス
for (i in 1:Nsite) {
EN[i] ~ dpois(meanN)
}
meanN ~ dunif(0, 1000)
#観測プロセス
for (i in 1:Nsite) {
for (j in 1:Nrep) {
y[j, i] ~ dbin(estp, EN[i])
#期待値
esty[j, i] <- estp*EN[i]
E[j, i] <- pow((y[j, i] - esty[j, i]), 2)/(esty[j, i] + 0.5)
}
}
estp ~ dunif(0, 1)
#Bayesian p value計算
for (i in 1:Nsite) {
for (j in 1:Nrep) {
y_new[j, i] ~ dbin(estp, EN[i])
esty_new[j, i] <- estp*EN[i]
E_new[j, i] <- pow((y_new[j, i] - esty_new[j, i]), 2)/(esty_new[j, i] + 0.5)
}
}
fit_data <- sum(E[, ])
fit_new <- sum(E_new[, ])
} #モデルの記述はここまで
", fill=TRUE, file=modelFilename)
#初期値を与える
inits <- list(EN = N,
meanN = 5,
y_new = y,
estp = 0.5
)
inits <- list(inits, inits, inits)
#初期値の乱数の種に異なる値を与える
inits[[1]]$.RNG.name <- "base::Mersenne-Twister"
inits[[1]]$.RNG.seed <- 1
inits[[2]]$.RNG.name <- "base::Mersenne-Twister"
inits[[2]]$.RNG.seed <- 12
inits[[3]]$.RNG.name <- "base::Mersenne-Twister"
inits[[3]]$.RNG.seed <- 123
#監視対象パラメータを設定する
para <- c("EN", "meanN", "estp", "fit_data", "fit_new")
#JAGSによる計算の実行
#MCMCの計算に関するパラメータ
n.chains <- 3
n.iter <- 10000
n.update <- 10000
thin <- 5
#計算に必要なパッケージの読み込み
library(rjags)
#計算開始時間を記録
start.time <- Sys.time()
#初期化
m <- jags.model(
file = modelFilename,
data = list.data,
inits = inits,
n.chain = n.chains
)
#Burn-inの実行
update(m, n.update)
#本計算の実行
x <- coda.samples(
m,
para,
thin = thin, n.iter = n.iter
)
#終了時間の記録と、計算時間の出力
end.time <- Sys.time()
elapsed.time <- difftime(end.time, start.time, units='hours')
cat(paste(paste('Posterior computed in ', elapsed.time, sep=''), ' hours\n', sep=''))
#結果の出力
res <- data.frame(summary(x)$statistics)
ci <- data.frame(summary(x)$quantiles)
#95%信用区間が0をまたぐかどうかを計算
res$sig <- abs(sign(ci[, 1]) + sign(ci[, 5])) == 2
#Rhat値の計算
rhat <- gelman.diag(x)[["psrf"]][, 1]
res$Rhat <- rhat
#検出率変動
#データを用意する
list.data <- list(y=y, Nsite=Nsite, Nrep=Nrep, light=light)
#モデルを読み込ませつつ、テキストファイルとして出力する
modelFilename = "testmod.txt"
cat("
#BUGS言語でモデルを記述する
model {
#状態プロセス
for (i in 1:Nsite) {
EN[i] ~ dpois(meanN)
}
meanN ~ dunif(0, 1000)
#観測プロセス
for (i in 1:Nsite) {
for (j in 1:Nrep) {
y[j, i] ~ dbin(estp[i], EN[i])
#期待値
esty[j, i] <- estp[i]*EN[i]
E[j, i] <- pow((y[j, i] - esty[j, i]), 2)/(esty[j, i] + 0.5)
}
estp[i] <- 1/(1+exp(-(alpha + bLI*light[i])))
}
alpha ~ dnorm(0.0, 1.0E-3)
bLI ~ dnorm(0.0, 1.0E-3)
#Bayesian p value計算
for (i in 1:Nsite) {
for (j in 1:Nrep) {
y_new[j, i] ~ dbin(estp[i], EN[i])
esty_new[j, i] <- estp[i]*EN[i]
E_new[j, i] <- pow((y_new[j, i] - esty_new[j, i]), 2)/(esty_new[j, i] + 0.5)
}
}
fit_data <- sum(E[, ])
fit_new <- sum(E_new[, ])
} #モデルの記述はここまで
", fill=TRUE, file=modelFilename)
#初期値を与える
inits <- list(EN = N,
meanN = 5,
y_new = y,
bLI = 0,
alpha = 0
)
inits <- list(inits, inits, inits)
#初期値の乱数の種に異なる値を与える
inits[[1]]$.RNG.name <- "base::Mersenne-Twister"
inits[[1]]$.RNG.seed <- 1
inits[[2]]$.RNG.name <- "base::Mersenne-Twister"
inits[[2]]$.RNG.seed <- 12
inits[[3]]$.RNG.name <- "base::Mersenne-Twister"
inits[[3]]$.RNG.seed <- 123
#監視対象パラメータを設定する
para <- c("EN", "meanN", "alpha", "bLI", "fit_data", "fit_new")
#JAGSによる計算の実行
#MCMCの計算に関するパラメータ
n.chains <- 3
n.iter <- 10000
n.update <- 10000
thin <- 5
#計算に必要なパッケージの読み込み
library(rjags)
#計算開始時間を記録
start.time <- Sys.time()
#初期化
m <- jags.model(
file = modelFilename,
data = list.data,
inits = inits,
n.chain = n.chains
)
#Burn-inの実行
update(m, n.update)
#本計算の実行
x2 <- coda.samples(
m,
para,
thin = thin, n.iter = n.iter
)
#終了時間の記録と、計算時間の出力
end.time <- Sys.time()
elapsed.time <- difftime(end.time, start.time, units='hours')
cat(paste(paste('Posterior computed in ', elapsed.time, sep=''), ' hours\n', sep=''))
#結果の出力
res2 <- data.frame(summary(x2)$statistics)
ci2 <- data.frame(summary(x2)$quantiles)
#95%信用区間が0をまたぐかどうかを計算
res2$sig <- abs(sign(ci2[, 1]) + sign(ci2[, 5])) == 2
#Rhat値の計算
rhat2 <- gelman.diag(x2)[["psrf"]][, 1]
res2$Rhat <- rhat2
#Bayesian p valueの計算
#検出率一定
mean(unlist(x[grep("fit_data", rownames(res)), ]) > unlist(x[grep("fit_new", rownames(res)), ]))
#検出率変動
mean(unlist(x2[grep("fit_data", rownames(res2)), ]) > unlist(x2[grep("fit_new", rownames(res2)), ]))
|
#' Scatterplot matrix tour path animation.
#'
#' Animate a nD tour path with a scatterplot matrix.
#'
#' The lines show the observations, and the points, the values of the
#' projection matrix.
#'
#' @param ... other arguments passed on to \code{\link{animate}}
#' @seealso \code{\link{animate}} for options that apply to all animations
#' @keywords hplot
#' @export
#' @examples
#' animate_scatmat(flea[, 1:6], grand_tour(2))
#' animate_scatmat(flea[, 1:6], grand_tour(6))
display_scatmat <- function(...) {
render_data <- function(data, proj, geodesic) {
pairs(data %*% proj, pch = 20, ...)
}
list(
init = nul,
render_frame = nul,
render_transition = nul,
render_data = render_data,
render_target = nul
)
}
#' @rdname display_scatmat
#' @inheritParams animate
#' @export
animate_scatmat <- function(data, tour_path = grand_tour(3), ...) {
animate(data = data, tour_path = tour_path,
display = display_scatmat(...), ...)
}
| /tourr/R/display-scatmat.r | no_license | ingted/R-Examples | R | false | false | 973 | r | #' Scatterplot matrix tour path animation.
#'
#' Animate a nD tour path with a scatterplot matrix.
#'
#' The lines show the observations, and the points, the values of the
#' projection matrix.
#'
#' @param ... other arguments passed on to \code{\link{animate}}
#' @seealso \code{\link{animate}} for options that apply to all animations
#' @keywords hplot
#' @export
#' @examples
#' animate_scatmat(flea[, 1:6], grand_tour(2))
#' animate_scatmat(flea[, 1:6], grand_tour(6))
display_scatmat <- function(...) {
render_data <- function(data, proj, geodesic) {
pairs(data %*% proj, pch = 20, ...)
}
list(
init = nul,
render_frame = nul,
render_transition = nul,
render_data = render_data,
render_target = nul
)
}
#' @rdname display_scatmat
#' @inheritParams animate
#' @export
animate_scatmat <- function(data, tour_path = grand_tour(3), ...) {
animate(data = data, tour_path = tour_path,
display = display_scatmat(...), ...)
}
|
% Generated by roxygen2 (4.0.0): do not edit by hand
\name{smart.points}
\alias{smart.points}
\title{(private) An alternative to point.default() for plotting a large number of
densely distributed points.}
\usage{
smart.points(x, y = NULL, resolution = 50, col = NULL, clip = Inf,
color.clipped = TRUE, ...)
}
\arguments{
\item{x}{x}
\item{y}{y}
\item{resolution}{a number, determines the distance below which
points will be considered as overlapping.}
\item{col}{color}
\item{clip}{clip}
\item{color.clipped}{color of clipped points}
\item{...}{other arguments are the same as in plot.default().}
}
\description{
See description of \code{\link{smart.plot}} for more details.
}
| /man/smart.points.Rd | no_license | diystat/NBPSeq | R | false | false | 686 | rd | % Generated by roxygen2 (4.0.0): do not edit by hand
\name{smart.points}
\alias{smart.points}
\title{(private) An alternative to point.default() for plotting a large number of
densely distributed points.}
\usage{
smart.points(x, y = NULL, resolution = 50, col = NULL, clip = Inf,
color.clipped = TRUE, ...)
}
\arguments{
\item{x}{x}
\item{y}{y}
\item{resolution}{a number, determines the distance below which
points will be considered as overlapping.}
\item{col}{color}
\item{clip}{clip}
\item{color.clipped}{color of clipped points}
\item{...}{other arguments are the same as in plot.default().}
}
\description{
See description of \code{\link{smart.plot}} for more details.
}
|
kronrad = function(n, r = 1e10, re = 1){
# kron radii
bn = qgamma(0.5,2*n)
x = bn*((r/re)^(1/n))
krad = suppressWarnings((re/(bn^n))*(igamma(x,3*n)/igamma(x,2*n)))
return(krad)
}
| /R/kronrad.R | no_license | cran/astro | R | false | false | 200 | r | kronrad = function(n, r = 1e10, re = 1){
# kron radii
bn = qgamma(0.5,2*n)
x = bn*((r/re)^(1/n))
krad = suppressWarnings((re/(bn^n))*(igamma(x,3*n)/igamma(x,2*n)))
return(krad)
}
|
library(ape)
testtree <- read.tree("9943_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="9943_0_unrooted.txt") | /codeml_files/newick_trees_processed/9943_0/rinput.R | no_license | DaniBoo/cyanobacteria_project | R | false | false | 135 | r | library(ape)
testtree <- read.tree("9943_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="9943_0_unrooted.txt") |
library(readxl)
library(tm)
library(wordcloud)
library(e1071)
library(gmodels)
tweet <- read.csv(file.choose(), header = T,fill = TRUE, stringsAsFactors = F)
tweet$sarcasm <- factor(tweet$sarcasm)
table(tweet$sarcasm)
tweet_corpus <- VCorpus(VectorSource(tweet$text))
tweet_dtm <- DocumentTermMatrix(tweet_corpus, control = list(
tolower = TRUE,
removeNumbers = TRUE,
stopwords = TRUE,
removePunctuation = TRUE,
stemming = TRUE
))
# creating training and test datasets
tweet_dtm_train <- tweet_dtm[1:1593, ]
tweet_dtm_test <- tweet_dtm[1594:1992, ]
# also save the labels
tweet_train_labels <- tweet[1:1593, ]$sarcasm
tweet_test_labels <- tweet[1594:1992, ]$sarcasm
# check that the proportion of spam is similar
prop.table(table(tweet_train_labels))
prop.table(table(tweet_test_labels))
##proportion is not same on train and test data
rm(tweet_dtm_train)
rm(tweet_dtm_test)
rm(tweet_train_labels)
rm(tweet_test_labels)
# Create random samples
set.seed(123)
train_index <- sample(1000, 800)
tweet_train <- tweet[train_index, ]
tweet_test <- tweet[-train_index, ]
# check the proportion of class variable
prop.table(table(tweet_train$sarcasm))
prop.table(table(tweet_test$sarcasm))
train_corpus <- VCorpus(VectorSource(tweet_train$text))
test_corpus <- VCorpus(VectorSource(tweet_test$text))
# subset the training data into spam and ham groups
positive <- subset(tweet_train, sarcasm == 1)
negative <- subset(tweet_train, sarcasm == 0)
wordcloud(positive$text, max.words = 40, scale = c(5, 0.5))
wordcloud(negative$text, max.words = 40, scale = c(5, 0.5))
# create a document-term sparse matrix directly for train and test
train_dtm <- DocumentTermMatrix(train_corpus, control = list(
tolower = TRUE,
removeNumbers = TRUE,
stopwords = TRUE,
removePunctuation = TRUE,
stemming = TRUE
))
test_dtm <- DocumentTermMatrix(test_corpus, control = list(
tolower = TRUE,
removeNumbers = TRUE,
stopwords = TRUE,
removePunctuation = TRUE,
stemming = TRUE
))
train_dtm
test_dtm
# create function to convert counts to a factor
convert_counts <- function(x) {
x <- ifelse(x > 0, "Yes", "No")
}
# apply() convert_counts() to columns of train/test data
train_dtm_binary <- apply(train_dtm, MARGIN = 2, convert_counts)
test_dtm_binary <- apply(test_dtm, MARGIN = 2, convert_counts)
tweet_classifier <- naiveBayes(as.matrix(train_dtm_binary), tweet_train$sarcasm)
tweet_test_pred <- predict(tweet_classifier, as.matrix(test_dtm_binary))
head(tweet_test_pred)
CrossTable(tweet_test_pred, tweet_test$sarcasm,
prop.chisq = FALSE, prop.t = FALSE, prop.r = FALSE,
dnn = c('predicted', 'actual'))
#accuracy is 0.91
#improving accuracy
tweet_classifier2 <- naiveBayes(as.matrix(train_dtm_binary), tweet_train$sarcasm, laplace = 1)
tweet_test_pred2 <- predict(tweet_classifier2, as.matrix(test_dtm_binary))
CrossTable(tweet_test_pred2, tweet_test$sarcasm,
prop.chisq = FALSE, prop.t = FALSE, prop.r = FALSE,
dnn = c('predicted', 'actual'))
#accuracy is 0.92
#improving accuracy
tweet_classifier3 <- naiveBayes(as.matrix(train_dtm_binary), tweet_train$sarcasm, laplace = .5)
tweet_test_pred3 <- predict(tweet_classifier3, as.matrix(test_dtm_binary))
CrossTable(tweet_test_pred3, tweet_test$sarcasm,
prop.chisq = FALSE, prop.t = FALSE, prop.r = FALSE,
dnn = c('predicted', 'actual'))
#accuracy 0.91
write.table(positive$text, file = "E:/Project 3th sem/positive.txt")
write.table(negative$text, file = "E:/Project 3th sem/negative.txt")
| /navie-beys.R | no_license | smitchaute/Sarcasm_detection-in-R | R | false | false | 3,548 | r | library(readxl)
library(tm)
library(wordcloud)
library(e1071)
library(gmodels)
tweet <- read.csv(file.choose(), header = T,fill = TRUE, stringsAsFactors = F)
tweet$sarcasm <- factor(tweet$sarcasm)
table(tweet$sarcasm)
tweet_corpus <- VCorpus(VectorSource(tweet$text))
tweet_dtm <- DocumentTermMatrix(tweet_corpus, control = list(
tolower = TRUE,
removeNumbers = TRUE,
stopwords = TRUE,
removePunctuation = TRUE,
stemming = TRUE
))
# creating training and test datasets
tweet_dtm_train <- tweet_dtm[1:1593, ]
tweet_dtm_test <- tweet_dtm[1594:1992, ]
# also save the labels
tweet_train_labels <- tweet[1:1593, ]$sarcasm
tweet_test_labels <- tweet[1594:1992, ]$sarcasm
# check that the proportion of spam is similar
prop.table(table(tweet_train_labels))
prop.table(table(tweet_test_labels))
##proportion is not same on train and test data
rm(tweet_dtm_train)
rm(tweet_dtm_test)
rm(tweet_train_labels)
rm(tweet_test_labels)
# Create random samples
set.seed(123)
train_index <- sample(1000, 800)
tweet_train <- tweet[train_index, ]
tweet_test <- tweet[-train_index, ]
# check the proportion of class variable
prop.table(table(tweet_train$sarcasm))
prop.table(table(tweet_test$sarcasm))
train_corpus <- VCorpus(VectorSource(tweet_train$text))
test_corpus <- VCorpus(VectorSource(tweet_test$text))
# subset the training data into spam and ham groups
positive <- subset(tweet_train, sarcasm == 1)
negative <- subset(tweet_train, sarcasm == 0)
wordcloud(positive$text, max.words = 40, scale = c(5, 0.5))
wordcloud(negative$text, max.words = 40, scale = c(5, 0.5))
# create a document-term sparse matrix directly for train and test
train_dtm <- DocumentTermMatrix(train_corpus, control = list(
tolower = TRUE,
removeNumbers = TRUE,
stopwords = TRUE,
removePunctuation = TRUE,
stemming = TRUE
))
test_dtm <- DocumentTermMatrix(test_corpus, control = list(
tolower = TRUE,
removeNumbers = TRUE,
stopwords = TRUE,
removePunctuation = TRUE,
stemming = TRUE
))
train_dtm
test_dtm
# create function to convert counts to a factor
convert_counts <- function(x) {
x <- ifelse(x > 0, "Yes", "No")
}
# apply() convert_counts() to columns of train/test data
train_dtm_binary <- apply(train_dtm, MARGIN = 2, convert_counts)
test_dtm_binary <- apply(test_dtm, MARGIN = 2, convert_counts)
tweet_classifier <- naiveBayes(as.matrix(train_dtm_binary), tweet_train$sarcasm)
tweet_test_pred <- predict(tweet_classifier, as.matrix(test_dtm_binary))
head(tweet_test_pred)
CrossTable(tweet_test_pred, tweet_test$sarcasm,
prop.chisq = FALSE, prop.t = FALSE, prop.r = FALSE,
dnn = c('predicted', 'actual'))
#accuracy is 0.91
#improving accuracy
tweet_classifier2 <- naiveBayes(as.matrix(train_dtm_binary), tweet_train$sarcasm, laplace = 1)
tweet_test_pred2 <- predict(tweet_classifier2, as.matrix(test_dtm_binary))
CrossTable(tweet_test_pred2, tweet_test$sarcasm,
prop.chisq = FALSE, prop.t = FALSE, prop.r = FALSE,
dnn = c('predicted', 'actual'))
#accuracy is 0.92
#improving accuracy
tweet_classifier3 <- naiveBayes(as.matrix(train_dtm_binary), tweet_train$sarcasm, laplace = .5)
tweet_test_pred3 <- predict(tweet_classifier3, as.matrix(test_dtm_binary))
CrossTable(tweet_test_pred3, tweet_test$sarcasm,
prop.chisq = FALSE, prop.t = FALSE, prop.r = FALSE,
dnn = c('predicted', 'actual'))
#accuracy 0.91
write.table(positive$text, file = "E:/Project 3th sem/positive.txt")
write.table(negative$text, file = "E:/Project 3th sem/negative.txt")
|
library(sf)
library(ggplot2)
library(usethis)
# Creates the macleish_layers list
dsn <- path.expand("data-raw/macleish/")
sf::st_layers(dsn)
layers <- c("landmarks", "forests", "streams",
"challenge_courses",
"buildings", "wetlands",
# "slopes",
"boundary",
"research", "soil", "trails")
macleish_layers <- lapply(layers, st_read, dsn = dsn)
names(macleish_layers) <- layers
# Add more layers
macleish_layers[["camp_sites"]] <- tibble::tribble(
~name, ~lat, ~lon,
"Group Campsite", 42.450976, -72.678154,
"Remote Campsite", 42.458549, -72.679581
) %>%
st_as_sf(coords = c("lon", "lat")) %>%
st_set_crs(4326)
# rename forests field
macleish_layers[["forests"]] <- macleish_layers %>%
purrr::pluck("forests") %>%
dplyr::rename(type = Sheet1__Na)
# add 30 foot contours
macleish_layers[["elevation"]] <- macleish::mass_gis() %>%
macleish::macleish_intersect() %>%
st_transform(4326)
# Set coordinate reference system for entire list
macleish_layers <- lapply(macleish_layers, st_transform, crs = 4326)
# https://stackoverflow.com/questions/61286108/error-in-cpl-transformx-crs-aoi-pipeline-reverse-ogrcreatecoordinatetrans
for(i in 1:length(macleish_layers)){
st_crs(macleish_layers[[i]]) <- 4326
}
# check to make sure they are all projected the same
lapply(macleish_layers, st_crs)
# Test all layers
names(macleish_layers)
ggplot() +
geom_sf(data = macleish_layers[["landmarks"]]) +
geom_sf(data = macleish_layers[["forests"]]) +
geom_sf(data = macleish_layers[["streams"]]) +
geom_sf(data = macleish_layers[["challenge_courses"]]) +
geom_sf(data = macleish_layers[["buildings"]]) +
geom_sf(data = macleish_layers[["wetlands"]]) +
geom_sf(data = macleish_layers[["boundary"]]) +
geom_sf(data = macleish_layers[["research"]]) +
geom_sf(data = macleish_layers[["soil"]]) +
geom_sf(data = macleish_layers[["trails"]]) +
geom_sf(data = macleish_layers[["camp_sites"]]) +
geom_sf(data = macleish_layers[["elevation"]])
# Save for package
usethis::use_data(macleish_layers, overwrite = TRUE, compress = "xz")
| /data-raw/spatial.R | no_license | wndlovu/macleish | R | false | false | 2,079 | r | library(sf)
library(ggplot2)
library(usethis)
# Creates the macleish_layers list
dsn <- path.expand("data-raw/macleish/")
sf::st_layers(dsn)
layers <- c("landmarks", "forests", "streams",
"challenge_courses",
"buildings", "wetlands",
# "slopes",
"boundary",
"research", "soil", "trails")
macleish_layers <- lapply(layers, st_read, dsn = dsn)
names(macleish_layers) <- layers
# Add more layers
macleish_layers[["camp_sites"]] <- tibble::tribble(
~name, ~lat, ~lon,
"Group Campsite", 42.450976, -72.678154,
"Remote Campsite", 42.458549, -72.679581
) %>%
st_as_sf(coords = c("lon", "lat")) %>%
st_set_crs(4326)
# rename forests field
macleish_layers[["forests"]] <- macleish_layers %>%
purrr::pluck("forests") %>%
dplyr::rename(type = Sheet1__Na)
# add 30 foot contours
macleish_layers[["elevation"]] <- macleish::mass_gis() %>%
macleish::macleish_intersect() %>%
st_transform(4326)
# Set coordinate reference system for entire list
macleish_layers <- lapply(macleish_layers, st_transform, crs = 4326)
# https://stackoverflow.com/questions/61286108/error-in-cpl-transformx-crs-aoi-pipeline-reverse-ogrcreatecoordinatetrans
for(i in 1:length(macleish_layers)){
st_crs(macleish_layers[[i]]) <- 4326
}
# check to make sure they are all projected the same
lapply(macleish_layers, st_crs)
# Test all layers
names(macleish_layers)
ggplot() +
geom_sf(data = macleish_layers[["landmarks"]]) +
geom_sf(data = macleish_layers[["forests"]]) +
geom_sf(data = macleish_layers[["streams"]]) +
geom_sf(data = macleish_layers[["challenge_courses"]]) +
geom_sf(data = macleish_layers[["buildings"]]) +
geom_sf(data = macleish_layers[["wetlands"]]) +
geom_sf(data = macleish_layers[["boundary"]]) +
geom_sf(data = macleish_layers[["research"]]) +
geom_sf(data = macleish_layers[["soil"]]) +
geom_sf(data = macleish_layers[["trails"]]) +
geom_sf(data = macleish_layers[["camp_sites"]]) +
geom_sf(data = macleish_layers[["elevation"]])
# Save for package
usethis::use_data(macleish_layers, overwrite = TRUE, compress = "xz")
|
maxtemp <- ts(c(
38.1,
32.4,
34.5,
20.7,
21.5,
23.1,
29.7,
36.6,
36.1,
20.6,
20.4,
30.1,
38.7,
41.4,
37.0,
36.0,
37.0,
38.0,
23.0,
26.7,
27.5,
21.7,
22.9,
26.2,
36.5,
41.8,
21.5,
19.2,
25.0,
28.9,
23.2,
31.5,
36.2,
38.2,
26.4,
20.9,
21.5,
30.2,
33.4,
32.6,
22.2,
21.7,
30.0,
35.7,
32.8,
39.3,
25.5,
23.0,
19.9,
21.3,
20.8,
21.7,
23.8,
29.0,
23.7,
21.3,
28.5,
33.6,
34.6,
34.2,
27.0,
24.2,
19.9,
19.7,
21.5,
30.6,
30.0,
19.0,
19.6,
20.6,
23.6,
17.9,
17.3,
21.4,
24.1,
20.9,
30.1,
32.6,
21.3,
19.5,
19.9,
21.0,
25.4,
17.5,
20.4,
26.8,
25.8,
20.9,
19.4,
25.8,
26.3,
29.6,
30.3,
23.6,
28.4,
20.7,
24.1,
27.3,
23.2,
18.3,
24.6,
27.4,
20.4,
18.1,
25.2,
19.8,
21.0,
23.7,
19.6,
18.1,
20.8,
26.0,
18.4,
22.0,
14.4,
19.9,
22.6,
13.7,
15.9,
21.2,
23.7,
24.0,
17.2,
23.2,
25.2,
17.2,
16.0,
15.6,
13.4,
16.0,
16.8,
14.6,
19.4,
21.0,
19.5,
18.5,
13.3,
13.7,
14.3,
14.1,
11.4,
13.6,
16.6,
17.6,
14.6,
17.2,
14.4,
16.4,
17.3,
17.6,
17.2,
17.7,
14.2,
16.6,
15.7,
13.7,
14.7,
13.1,
12.9,
15.4,
11.9,
15.2,
15.3,
16.5,
16.1,
11.7,
11.2,
11.5,
10.8,
16.1,
14.8,
13.6,
13.8,
9.7,
10.7,
11.0,
15.3,
15.3,
17.0,
16.0,
16.3,
15.7,
14.5,
10.8,
10.5,
13.4,
12.2,
13.2,
13.0,
12.4,
13.1,
9.8,
10.5,
13.4,
11.0,
13.1,
15.0,
16.7,
16.1,
18.2,
15.7,
17.7,
15.9,
15.1,
15.2,
14.7,
13.3,
14.5,
11.1,
13.1,
13.7,
14.6,
12.9,
12.8,
15.2,
14.5,
17.2,
14.5,
14.4,
11.0,
13.1,
13.6,
14.6,
12.7,
13.6,
12.7,
15.5,
17.4,
15.2,
14.2,
17.7,
19.2,
12.5,
14.2,
15.3,
15.7,
17.0,
19.0,
13.1,
13.2,
13.2,
15.7,
14.1,
15.6,
15.5,
15.9,
15.1,
16.0,
19.4,
21.5,
23.7,
18.7,
23.8,
18.0,
16.2,
18.5,
20.6,
18.3,
22.5,
26.9,
19.4,
15.9,
20.5,
21.2,
19.5,
14.7,
17.6,
15.8,
17.7,
14.3,
16.8,
18.6,
21.9,
21.4,
20.8,
14.0,
17.0,
23.0,
26.4,
19.6,
22.7,
26.9,
14.7,
15.2,
19.8,
26.9,
20.2,
14.3,
14.8,
18.5,
21.7,
21.4,
21.8,
18.2,
15.8,
15.3,
18.5,
19.2,
28.5,
32.2,
21.8,
22.1,
20.7,
17.0,
24.7,
26.2,
29.0,
21.6,
17.1,
16.9,
19.1,
24.7,
25.4,
19.8,
18.2,
16.3,
17.0,
17.7,
15.5,
14.7,
15.8,
19.9,
20.4,
23.3,
20.2,
28.8,
31.2,
17.4,
18.5,
26.8,
34.3,
30.1,
20.5,
20.5,
19.8,
27.0,
21.0,
33.0,
22.6,
28.3,
21.1,
19.0,
17.3,
27.0,
30.2,
24.8,
17.9,
17.9,
20.7,
30.9,
36.2,
21.0,
20.2,
21.3,
24.2,
21.0,
20.7,
17.8,
19.6,
22.6,
20.5,
24.1,
22.2,
27.0,
33.6,
26.6,
20.6,
24.5,
19.8,
22.6,
29.2,
20.3,
23.0,
24.4,
38.0,
40.5,
24.2,
20.2,
21.8,
27.0,
35.2,
25.2,
32.7,
35.9,
38.9,
26.5,
21.8,
37.9,
43.3,
19.0,
19.7,
21.4,
32.0,
33.3,
22.2,
21.3,
20.8,
22.3,
22.5,
21.4,
23.0,
35.1,
40.3,
39.0,
21.1,
25.4,
23.6,
28.1,
37.0,
39.3,
39.4,
25.8,
27.7,
23.0,
24.0,
26.1,
21.8,
24.2,
22.3,
19.7,
20.8,
17.9,
20.1,
20.9,
21.2,
20.4,
29.0,
34.7,
34.0,
30.9,
29.6,
26.1,
18.5,
21.2,
22.4,
21.4,
30.5,
32.0,
32.7,
27.4,
18.9,
19.4,
22.2,
30.2,
31.8,
31.4,
18.5,
22.2,
27.5,
25.8,
25.2,
19.1,
19.6,
21.4,
20.7,
28.3,
23.0,
16.5,
18.8,
20.0,
20.6,
19.8,
20.3,
26.6,
21.5,
22.3,
26.5,
27.1,
30.0,
28.2,
17.3,
16.8,
17.5,
19.2,
20.3,
20.6,
23.7,
24.3,
26.4,
16.0,
14.5,
14.2,
16.5,
15.7,
16.0,
16.7,
19.8,
22.5,
22.9,
18.8,
15.7,
14.7,
15.3,
15.3,
18.0,
15.1,
15.1,
13.6,
15.3,
18.0,
18.6,
22.0,
22.1,
17.8,
15.2,
11.3,
13.5,
16.1,
16.0,
16.5,
16.7,
19.6,
19.5,
18.0,
18.4,
12.9,
14.9,
12.4,
11.9,
11.7,
12.9,
12.9,
12.4,
12.5,
15.7,
15.9,
14.3,
16.9,
14.0,
13.6,
13.7,
16.0,
13.1,
13.8,
13.4,
11.3,
10.6,
13.9,
12.7,
12.5,
11.3,
11.6,
14.0,
13.7,
15.2,
14.0,
11.4,
14.2,
15.2,
12.6,
10.2,
13.9,
13.7,
13.6,
13.2,
15.4,
12.9,
11.9,
13.3,
13.3,
10.5,
10.0,
11.0,
8.3,
10.7,
11.2,
11.7,
14.2,
14.5,
16.0,
16.7,
14.4,
17.0,
14.6,
11.6,
12.8,
14.0,
17.1,
15.4,
15.5,
15.1,
16.9,
14.2,
15.9,
18.2,
19.5,
19.5,
19.8,
15.6,
14.1,
17.0,
14.3,
13.1,
13.7,
18.2,
19.8,
22.3,
17.8,
15.0,
22.6,
23.7,
15.8,
15.0,
23.5,
25.7,
26.5,
15.1,
13.9,
20.5,
21.4,
14.9,
15.0,
18.9,
11.7,
15.5,
17.7,
11.0,
14.9,
13.1,
13.7,
15.4,
15.6,
23.9,
23.7,
24.3,
15.9,
13.5,
15.3,
19.3,
20.5,
22.5,
23.2,
16.3,
14.8,
13.7,
18.4,
12.3,
13.3,
16.2,
25.6,
24.1,
13.8,
15.9,
21.2,
21.7,
15.6,
13.5,
15.0,
21.2,
25.4,
19.0,
15.0,
16.4,
15.6,
13.7,
22.0,
17.4,
15.1,
14.6,
16.9,
15.9,
21.3,
29.7,
33.0,
22.7,
21.6,
30.3,
19.0,
14.6,
15.9,
20.4,
29.3,
33.7,
35.2,
38.1,
24.5,
26.3,
25.0,
17.9,
18.1,
23.3,
31.2,
32.8,
28.0,
19.7,
22.1,
16.3,
20.1,
21.7,
19.6,
30.8,
36.8,
38.1,
17.6,
21.2,
17.2,
20.7,
26.1,
34.5,
18.6,
20.1,
22.4,
19.1,
19.2,
28.0,
17.9,
18.1,
16.0,
18.5,
20.6,
33.0,
35.2,
24.0,
26.0,
28.0,
23.0,
19.0,
19.1,
18.0,
19.4,
32.5,
32.6,
18.1,
19.7,
28.0,
28.0,
23.8,
31.4,
22.0,
35.8,
23.0,
18.9,
25.4,
28.0,
19.2,
21.5,
28.8,
37.3,
24.6,
21.3,
17.9,
17.5,
18.7,
18.1,
19.7,
22.1,
19.6,
18.0,
19.9,
28.5,
23.0,
37.0,
19.0,
19.4,
35.0,
27.7,
23.9,
25.6,
27.7,
24.3,
38.6,
41.2,
22.0,
22.5,
22.3,
25.5,
36.6,
35.7,
43.2,
28.0,
25.5,
21.6,
21.8,
23.3,
34.3,
31.2,
43.0,
20.2,
20.3,
22.2,
24.0,
34.4,
32.9,
21.6,
26.5,
33.2,
38.6,
24.4,
28.3,
35.2,
28.7,
35.6,
37.2,
25.5,
31.5,
30.2,
29.0,
21.4,
28.5,
32.1,
23.5,
20.0,
21.0,
20.0,
21.8,
24.3,
30.9,
21.6,
22.2,
20.2,
20.5,
17.6,
18.6,
17.7,
18.0,
18.6,
18.6,
25.9,
19.8,
18.7,
17.9,
17.3,
18.1,
14.6,
14.1,
15.7,
16.0,
21.6,
21.4,
23.8,
21.7,
16.6,
16.7,
18.1,
14.5,
14.7,
16.4,
16.5,
22.7,
23.8,
24.6,
23.6,
19.8,
19.8,
18.0,
17.6,
19.0,
17.5,
17.0,
19.9,
18.5,
18.7,
17.8,
17.0,
17.6,
18.7,
17.0,
18.8,
17.5,
19.5,
16.6,
19.1,
21.2,
20.2,
17.9,
14.3,
16.0,
17.7,
18.6,
19.1,
13.7,
17.5,
16.1,
15.2,
17.3,
14.9,
18.4,
17.2,
12.2,
13.9,
14.0,
15.3,
15.0,
15.1,
16.6,
16.5,
16.1,
13.5,
13.0,
13.6,
11.8,
14.5,
14.0,
14.1,
13.5,
12.1,
11.9,
11.9,
14.2,
14.0,
17.9,
19.1,
15.1,
13.8,
13.7,
10.0,
10.3,
10.4,
12.0,
8.5,
14.4,
13.1,
14.0,
11.8,
13.9,
12.9,
11.0,
11.8,
10.7,
11.2,
10.3,
12.2,
12.4,
14.7,
15.1,
15.0,
14.0,
15.2,
14.2,
8.7,
11.0,
14.2,
15.2,
12.2,
13.0,
14.7,
17.2,
13.7,
14.8,
13.8,
13.7,
13.1,
12.7,
12.3,
12.4,
13.0,
13.5,
13.5,
14.7,
13.4,
15.9,
16.0,
15.3,
15.5,
18.2,
18.6,
18.5,
19.2,
14.2,
12.8,
13.1,
15.4,
17.3,
14.5,
19.5,
20.2,
16.3,
13.0,
13.3,
13.3,
19.0,
16.8,
19.2,
18.5,
19.0,
17.4,
15.3,
15.8,
18.7,
17.8,
16.5,
15.3,
13.2,
12.0,
14.0,
9.5,
13.0,
17.6,
19.6,
17.2,
17.3,
17.0,
16.3,
17.3,
20.3,
16.9,
13.5,
13.3,
20.7,
19.8,
20.8,
19.8,
19.5,
23.4,
14.2,
14.2,
16.6,
17.0,
15.6,
14.8,
17.2,
20.2,
16.7,
18.2,
18.1,
22.5,
23.4,
15.3,
13.7,
15.6,
18.9,
15.1,
16.6,
17.7,
23.0,
16.8,
21.3,
17.9,
23.6,
25.3,
14.5,
16.9,
16.3,
17.7,
18.0,
21.7,
22.3,
15.5,
16.7,
24.3,
23.5,
27.7,
18.0,
15.6,
19.0,
19.5,
20.1,
31.3,
15.6,
12.8,
13.8,
15.8,
16.4,
17.6,
28.2,
25.5,
22.7,
28.0,
17.0,
22.6,
24.5,
25.6,
27.5,
18.5,
18.2,
18.8,
17.8,
18.9,
24.0,
20.9,
16.8,
18.5,
20.6,
24.9,
28.1,
25.7,
19.2,
32.7,
21.3,
20.4,
20.5,
22.8,
35.3,
38.7,
21.4,
23.3,
22.0,
31.7,
23.4,
19.1,
19.5,
30.0,
32.4,
33.6,
30.0,
27.7,
28.1,
23.7,
20.6,
32.3,
31.2,
21.5,
23.0,
21.9,
23.0,
21.6,
27.2,
29.2,
32.0,
22.5,
23.9,
19.7,
19.5,
22.0,
22.1,
19.7,
19.4,
30.5,
28.5,
20.2,
19.0,
20.2,
22.6,
28.4,
18.6,
20.3,
19.0,
26.2,
30.0,
24.2,
23.4,
34.7,
20.4,
20.7,
20.0,
19.5,
31.6,
33.7,
23.5,
24.6,
33.9,
22.4,
24.3,
29.5,
31.7,
32.9,
20.5,
28.5,
27.3,
19.5,
21.4,
33.2,
35.0,
18.1,
17.2,
18.1,
19.5,
21.5,
29.2,
33.8,
20.8,
17.5,
21.0,
27.4,
21.2,
27.1,
22.8,
18.7,
19.0,
19.7,
28.7,
25.5,
18.4,
18.5,
23.6,
26.9,
28.0,
24.7,
21.5,
17.5,
16.1,
18.7,
24.5,
27.6,
17.4,
18.2,
19.7,
18.5,
17.7,
20.7,
21.2,
17.2,
21.5,
22.0,
23.2,
25.4,
24.6,
24.8,
25.0,
25.0,
18.2,
21.5,
16.2,
17.3,
15.5,
16.4,
19.6,
18.5,
16.5,
16.0,
17.7,
19.2,
16.8,
16.5,
22.5,
22.7,
23.5,
19.0,
15.5,
13.5,
14.0,
14.7,
15.6,
17.0,
18.0,
19.6,
17.1,
16.7,
16.9,
18.2,
19.4,
22.0,
21.7,
22.0,
22.2,
20.0,
16.5,
15.6,
15.0,
16.6,
18.7,
18.7,
18.1,
15.6,
20.1,
20.3,
15.2,
17.1,
16.3,
18.3,
18.3,
19.0,
20.0,
15.7,
14.9,
13.1,
13.6,
13.8,
15.0,
16.9,
18.3,
16.4,
14.0,
12.5,
12.8,
12.2,
14.2,
14.2,
15.5,
15.1,
14.4,
12.9,
10.8,
11.7,
13.3,
12.4,
13.4,
10.6,
7.0,
12.6,
13.2,
12.1,
10.3,
15.7,
11.7,
15.1,
15.8,
15.0,
11.6,
11.4,
10.0,
14.7,
13.6,
13.2,
11.0,
13.0,
12.0,
10.6,
13.3,
14.5,
13.5,
15.9,
12.8,
11.7,
13.0,
14.0,
16.0,
17.0,
16.3,
17.6,
19.5,
14.0,
17.5,
19.2,
18.5,
15.1,
15.4,
13.3,
13.1,
12.8,
12.0,
13.3,
12.9,
15.4,
13.4,
15.1,
14.7,
14.9,
16.8,
15.8,
17.0,
14.1,
16.5,
12.4,
12.7,
14.6,
17.6,
19.6,
17.3,
17.9,
17.8,
13.0,
13.2,
17.1,
19.6,
16.3,
16.9,
22.2,
12.0,
14.2,
16.8,
16.9,
22.0,
16.4,
14.2,
11.8,
15.2,
12.6,
13.2,
14.4,
15.0,
16.5,
13.3,
14.5,
16.7,
13.8,
12.4,
13.7,
19.9,
24.0,
18.7,
18.9,
20.2,
19.9,
24.0,
27.3,
24.2,
18.0,
21.3,
26.6,
18.5,
18.2,
21.0,
12.0,
12.2,
14.1,
16.5,
15.5,
15.0,
20.8,
27.3,
28.4,
18.9,
18.2,
25.5,
15.9,
15.5,
21.4,
23.2,
26.8,
23.9,
19.1,
15.2,
14.9,
21.3,
25.4,
28.7,
32.3,
25.7,
19.0,
19.4,
15.0,
16.5,
16.3,
28.9,
27.4,
32.9,
27.0,
23.7,
22.8,
16.5,
14.0,
18.0,
28.2,
32.8,
28.0,
22.2,
21.8,
18.7,
25.5,
27.6,
16.5,
15.9,
16.8,
17.9,
23.1,
22.8,
24.2,
27.7,
21.3,
23.5,
17.9,
24.2,
20.0,
23.7,
27.0,
24.6,
25.0,
20.0,
22.7,
22.6,
29.2,
27.8,
26.5,
18.9,
23.2,
24.2,
28.1,
30.3,
25.6,
20.7,
19.6,
21.7,
28.0,
21.3,
21.4,
21.4,
22.6,
21.7,
21.8,
19.8,
30.4,
36.0,
42.2,
21.1,
20.0,
18.4,
19.1,
19.3,
18.8,
22.8,
20.0,
21.9,
20.1,
23.2,
34.5,
22.1,
21.6,
30.0,
34.0,
22.2,
19.6,
25.4,
37.1,
30.2,
29.1,
21.1,
19.1,
22.2,
24.4,
20.3,
28.0,
32.2,
23.4,
21.4,
33.5,
18.6,
19.2,
21.1,
29.5,
25.7,
32.4,
28.5,
20.6,
25.2,
20.8,
17.5,
20.1,
22.4,
24.0,
21.0,
26.8,
17.7,
19.2,
19.1,
26.4,
29.7,
30.2,
33.0,
33.3,
33.0,
36.1,
34.0,
34.7,
20.4,
19.4,
21.3,
26.8,
28.5,
25.6,
28.6,
32.5,
18.7,
23.2,
23.7,
27.2,
23.5,
19.0,
22.6,
30.1,
23.3,
19.7,
22.3,
21.6,
23.5,
22.6,
24.0,
22.9,
22.4,
28.8,
30.0,
32.4,
33.8,
26.8,
18.6,
17.0,
15.3,
15.3,
16.8,
16.0,
16.6,
17.2,
21.5,
19.7,
19.8,
21.2,
23.7,
23.9,
25.6,
24.7,
20.2,
23.5,
22.2,
24.5,
25.5,
18.8,
18.0,
14.9,
18.3,
19.2,
18.9,
19.2,
21.5,
18.0,
13.1,
14.9,
15.2,
13.5,
15.2,
14.2,
14.6,
16.0,
15.1,
15.6,
17.5,
18.8,
20.4,
20.3,
19.1,
14.9,
14.6,
13.7,
13.0,
13.4,
12.7,
14.2,
13.7,
12.7,
14.1,
12.8,
17.1,
14.0,
14.5,
15.7,
14.1,
13.7,
13.3,
13.4,
11.9,
13.6,
13.3,
12.2,
10.8,
13.0,
14.1,
14.4,
13.3,
15.7,
15.6,
19.3,
16.3,
19.4,
17.7,
16.9,
14.4,
14.3,
13.0,
12.3,
12.8,
12.7,
10.8,
14.6,
13.3,
12.7,
12.9,
15.7,
11.4,
12.0,
12.2,
11.8,
13.1,
15.5,
11.3,
12.0,
12.8,
11.6,
12.4,
12.6,
17.4,
13.4,
11.3,
12.1,
12.7,
12.5,
13.2,
17.1,
15.9,
15.0,
13.2,
12.6,
11.8,
12.0,
14.0,
13.7,
16.1,
19.8,
15.7,
13.2,
13.6,
13.5,
11.7,
11.4,
15.4,
17.4,
15.3,
16.7,
16.5,
15.3,
18.8,
15.3,
15.6,
18.7,
17.4,
12.6,
12.9,
13.5,
13.6,
13.7,
15.1,
14.3,
21.3,
21.5,
23.2,
15.3,
19.1,
15.3,
14.8,
15.6,
14.6,
14.7,
13.7,
13.6,
13.5,
14.6,
15.6,
20.7,
25.1,
19.7,
12.5,
14.1,
13.3,
14.7,
20.5,
21.8,
16.2,
25.8,
26.6,
19.4,
21.1,
20.2,
20.9,
18.1,
26.3,
19.2,
16.1,
17.9,
21.1,
25.7,
23.7,
19.7,
16.8,
14.3,
14.2,
20.5,
22.2,
15.4,
17.2,
18.0,
20.5,
23.8,
24.1,
25.8,
19.4,
18.3,
22.3,
17.4,
20.2,
27.5,
23.9,
22.8,
22.7,
21.5,
21.9,
30.3,
17.7,
26.8,
28.3,
24.2,
18.2,
21.8,
17.8,
16.1,
18.7,
17.1,
15.7,
20.8,
26.2,
26.2,
21.3,
20.5,
20.7,
19.9,
19.1,
21.7,
25.6,
23.4,
26.6,
23.5,
25.0,
25.8,
28.1,
27.8,
23.2,
21.0,
21.1,
18.3,
19.6,
22.2,
30.6,
23.1,
18.4,
22.4,
30.9,
25.2,
21.4,
20.4,
18.7,
22.0,
26.6,
20.4,
18.5,
22.5,
32.7,
27.8,
19.0,
21.0,
18.5,
22.2,
23.3,
19.8,
24.7,
32.4,
20.0,
18.2,
20.4,
25.5,
24.4,
21.5,
32.4,
27.2,
19.1,
19.7,
21.3,
20.3,
21.0,
22.6,
21.5,
24.2,
25.0,
20.6,
19.6,
20.0,
29.5,
35.5,
21.1,
23.7,
33.7,
21.8,
21.2,
23.7,
26.4,
21.8,
31.1,
38.3,
22.7,
21.5,
20.8,
30.0,
34.5,
36.5,
28.9,
27.2,
25.5,
22.3,
21.8,
22.4,
19.7,
19.4,
18.5,
24.0,
25.8,
25.5,
26.6,
34.5,
21.1,
20.4,
24.9,
21.9,
25.5,
38.0,
36.4,
19.0,
18.4,
17.7,
21.3,
25.7,
30.5,
32.6,
23.0,
20.5,
21.8,
26.4,
25.8,
36.7,
27.2,
27.7,
20.4,
18.8,
22.6,
32.7,
22.8,
24.4,
32.6,
29.1,
18.3,
19.6,
28.6,
26.8,
30.7,
16.8,
16.1,
18.2,
19.3,
19.8,
26.8,
17.5,
18.2,
24.9,
27.3,
24.8,
18.0,
16.8,
14.6,
18.7,
18.8,
19.5,
21.1,
20.0,
18.5,
17.0,
16.2,
16.7,
14.9,
15.5,
15.3,
15.7,
17.3,
17.1,
16.2,
15.4,
15.0,
19.0,
19.7,
19.7,
15.1,
16.4,
21.7,
19.5,
17.2,
16.4,
18.0,
19.5,
16.2,
16.0,
14.4,
15.4,
18.2,
13.8,
16.1,
13.7,
16.8,
15.9,
16.7,
14.6,
15.2,
14.0,
14.1,
16.0,
16.7,
15.2,
13.3,
14.0,
15.9,
14.5,
14.2,
13.0,
13.7,
14.7,
14.6,
13.9,
14.2,
16.8,
14.7,
12.1,
10.2,
10.8,
11.9,
11.8,
12.0,
12.7,
13.6,
13.0,
13.1,
11.1,
12.8,
13.3,
11.7,
14.3,
15.4,
14.2,
13.0,
13.1,
12.4,
11.2,
9.2,
12.4,
12.7,
13.7,
12.7,
15.0,
16.4,
14.0,
13.2,
13.6,
15.4,
13.8,
15.0,
14.5,
14.8,
14.0,
8.2,
10.0,
11.2,
13.3,
13.7,
15.0,
12.5,
15.3,
16.4,
17.6,
15.0,
14.8,
13.8,
14.8,
16.6,
17.3,
12.2,
12.1,
13.9,
16.8,
16.8,
14.5,
15.1,
13.8,
13.0,
14.5,
18.0,
20.2,
21.0,
16.5,
12.2,
15.0,
15.7,
17.7,
18.9,
14.9,
11.4,
15.4,
15.2,
17.6,
17.2,
21.6,
18.7,
13.9,
13.3,
14.6,
16.5,
17.7,
13.6,
15.2,
15.5,
16.5,
18.8,
13.6,
14.0,
13.8,
16.0,
19.3,
16.9,
14.3,
16.7,
21.3,
19.4,
18.7,
14.7,
16.2,
15.5,
23.4,
22.0,
16.7,
14.4,
16.4,
19.6,
18.3,
14.4,
14.8,
17.1,
15.2,
19.5,
21.1,
21.7,
14.6,
18.2,
17.8,
23.5,
20.8,
15.7,
16.1,
21.0,
15.0,
16.0,
16.0,
14.8,
14.6,
16.1,
21.6,
23.8,
27.0,
20.7,
19.7,
20.0,
28.8,
30.0,
33.1,
18.9,
16.3,
19.0,
18.3,
21.2,
16.5,
16.7,
19.5,
29.1,
19.7,
22.6,
20.7,
14.0,
15.5,
14.8,
19.2,
27.4,
18.5,
18.5,
31.5,
16.7,
15.7,
16.7,
22.0,
25.7,
29.2,
27.7,
29.4,
19.9,
27.8,
31.2,
22.8,
17.6,
19.4,
27.5,
18.4,
17.3,
17.5,
17.3,
21.9,
21.1,
17.9,
19.6,
23.6,
18.2,
20.0,
17.5,
18.9,
18.7,
28.9,
21.6,
20.7,
21.8,
21.9,
35.0,
26.0,
20.0,
33.6,
20.1,
23.5,
23.6,
24.5,
27.0,
17.0,
18.9,
21.8,
27.5,
22.8,
26.5,
22.7,
22.5,
37.8,
22.0,
27.6,
21.2,
22.6,
18.7,
17.5,
17.7,
30.3,
25.6,
32.7,
22.9,
21.0,
22.5,
25.7,
37.8,
21.3,
38.7,
21.5,
18.8,
21.7,
30.6,
40.0,
20.8,
23.7,
22.0,
20.5,
23.3,
34.2,
36.7,
21.3,
28.7,
27.6,
28.8,
37.0,
21.7,
22.7,
18.5,
25.4,
28.7,
18.4,
24.3,
33.0,
16.5,
18.0,
22.1,
24.3,
24.7,
30.1,
22.7,
22.9,
16.4,
16.2,
19.0,
27.0,
21.7,
16.7,
17.7,
21.6,
33.8,
31.8,
32.0,
17.6,
20.2,
17.3,
17.0,
15.6,
17.1,
21.9,
20.1,
29.7,
20.7,
16.5,
15.8,
17.4,
19.4,
21.1,
19.9,
28.6,
29.7,
29.7,
19.9,
17.4,
18.6,
23.0,
25.1,
26.6,
19.0,
21.2,
16.5,
17.5,
18.2,
24.0,
22.8,
15.1,
16.1,
15.8,
17.0,
19.7,
24.9,
26.9,
27.9,
18.5,
17.0,
16.4,
20.8,
16.7,
15.8,
18.5,
16.5,
16.6,
17.4,
21.7,
22.7,
23.3,
22.5,
21.9,
17.9,
20.1,
16.0,
16.6,
15.6,
15.2,
14.3,
15.0,
14.5,
16.9,
18.3,
18.0,
13.5,
13.7,
10.9,
13.6,
15.2,
14.1,
13.2,
13.8,
14.6,
16.2,
14.5,
14.3,
15.6,
15.9,
19.3,
16.0,
20.3,
15.8,
16.2,
12.8,
15.0,
13.4,
13.2,
13.8,
15.7,
16.2,
14.9,
18.3,
15.3,
15.2,
14.2,
12.0,
13.7,
13.8,
12.3,
12.8,
13.1,
14.5,
14.0,
13.3,
15.7,
16.0,
16.2,
18.0,
16.3,
15.8,
13.3,
13.7,
11.5,
13.7,
12.3,
12.6,
12.7,
14.8,
14.9,
12.0,
12.8,
12.4,
13.0,
13.1,
12.9,
13.8,
15.2,
13.1,
11.1,
10.8,
13.1,
15.0,
15.7,
16.4,
14.1,
15.5,
14.8,
13.9,
15.4,
14.8,
10.9,
12.9,
12.8,
18.6,
14.6,
18.0,
18.5,
11.8,
13.3,
14.5,
14.0,
13.3,
15.0,
18.4,
13.6,
15.3,
16.1,
19.0,
15.5,
16.2,
14.9,
17.9,
10.1,
12.1,
13.1,
12.9,
15.4,
15.6,
14.0,
14.2,
17.4,
14.0,
13.6,
15.7,
14.0,
15.0,
16.4,
15.1,
17.1,
18.9,
23.8,
25.9,
15.0,
17.0,
19.0,
26.5,
26.9,
23.7,
30.7,
29.6,
25.9,
20.2,
14.8,
13.9,
15.7,
22.8,
18.7,
17.0,
19.0,
14.1,
17.6,
19.0,
15.8,
12.9,
14.5,
15.9,
21.9,
24.1,
19.4,
15.4,
17.6,
20.5,
19.4,
18.0,
16.8,
15.8,
16.0,
22.1,
26.8,
26.3,
17.5,
19.0,
30.3,
32.7,
35.0,
36.1,
18.1,
18.1,
16.8,
18.6,
20.5,
27.8,
18.8,
16.8,
17.2,
19.6,
18.8,
18.5,
19.8,
30.5,
28.3,
32.6,
30.1,
19.8,
19.9,
23.8,
24.3,
17.4,
18.2,
18.4,
16.9,
24.0,
32.8,
18.9,
15.5,
16.0,
16.6,
20.8,
19.6,
18.5,
17.6,
20.8,
21.5,
25.0,
21.2,
22.6,
25.6,
25.1,
32.5,
27.8,
21.5,
20.7,
19.9,
18.4,
22.3,
24.6,
25.6,
19.0,
18.7,
29.2,
31.8,
18.4,
19.2,
30.8,
32.2,
22.0,
21.4,
20.0,
20.6,
22.2,
30.0,
40.4,
26.3,
39.0,
30.6,
39.2,
37.8,
28.8,
30.8,
38.7,
25.2,
20.6,
21.0,
25.6,
36.2,
24.9,
26.2,
20.0,
19.7,
20.3,
25.1,
29.1,
37.5,
39.2,
33.7,
19.5,
19.9,
21.0,
20.3,
22.6,
32.9,
21.3,
19.5,
19.2,
22.0,
27.2,
32.2,
25.4,
25.0,
25.2,
26.7,
25.3,
29.9,
27.6,
20.6,
21.0,
21.6,
24.0,
23.8,
21.9,
29.4,
37.4,
21.5,
21.1,
21.0,
36.3,
28.7,
19.4,
20.3,
20.8,
30.5,
30.3,
23.4,
25.7,
15.7,
19.3,
18.8,
17.0,
17.6,
18.5,
21.0,
26.9,
30.6,
32.7,
29.2,
25.8,
28.2,
24.7,
30.6,
30.6,
29.0,
26.5,
25.5,
24.9,
20.9,
23.4,
18.1,
20.7,
20.3,
21.1,
19.5,
20.4,
17.3,
18.5,
21.0,
23.5,
25.7,
23.1,
24.8,
22.4,
17.4,
18.3,
26.0,
25.5,
21.8,
21.2,
25.2,
21.7,
22.6,
24.3,
17.0,
20.7,
22.7,
25.4,
24.9,
25.0,
20.5,
20.2,
19.4,
21.4,
25.5,
19.4,
18.8,
17.0,
15.5,
16.2,
16.7,
20.1,
21.4,
16.4,
17.3,
17.0,
18.2,
17.5,
14.9,
18.0,
16.7,
17.1,
16.8,
17.7,
18.4,
18.5,
15.5,
15.4,
17.5,
16.8,
18.9,
19.4,
20.3,
19.3,
17.0,
16.4,
13.3,
13.6,
14.1,
14.8,
14.1,
17.0,
16.0,
16.5,
14.9,
14.4,
13.5,
11.3,
15.0,
13.0,
14.9,
14.6,
14.1,
15.5,
14.4,
12.0,
17.5,
13.3,
14.3,
14.0,
13.5,
13.3,
12.3,
12.6,
14.1,
16.6,
17.3,
17.1,
16.1,
15.0,
15.4,
14.6,
14.6,
16.8,
16.5,
15.6,
14.8,
15.2,
15.2,
15.0,
14.1,
15.5,
12.6,
12.9,
15.1,
13.0,
15.6,
13.4,
14.4,
14.5,
15.4,
16.7,
16.1,
16.6,
16.5,
13.8,
13.4,
11.9,
10.7,
13.2,
13.1,
12.8,
16.2,
18.0,
18.9,
18.3,
19.8,
19.3,
16.2,
17.6,
17.5,
13.2,
13.2,
13.4,
14.2,
16.1,
14.2,
14.3,
14.5,
17.1,
18.2,
22.2,
22.4,
15.6,
16.6,
14.5,
17.7,
21.0,
14.3,
20.0,
20.2,
19.2,
19.8,
14.8,
14.2,
17.2,
17.2,
22.1,
15.4,
16.1,
14.8,
15.1,
20.2,
22.3,
24.6,
18.4,
18.2,
23.4,
23.8,
20.1,
20.5,
19.5,
18.8,
19.8,
26.2,
21.2,
21.2,
17.3,
20.5,
19.2,
18.0,
17.7,
23.7,
16.9,
23.2,
22.3,
18.7,
24.0,
16.4,
18.8,
19.0,
26.7,
19.3,
19.5,
21.2,
17.0,
24.4,
29.0,
19.2,
24.6,
23.4,
22.6,
21.1,
29.1,
36.1,
23.1,
16.5,
16.2,
21.3,
25.4,
26.4,
21.1,
21.1,
19.7,
31.2,
34.5,
22.9,
13.5,
16.2,
15.3,
19.2,
29.4,
27.7,
19.6,
16.8,
19.9,
20.4,
18.3,
16.7,
16.9,
18.9,
17.6,
18.6,
21.4,
28.7,
28.0,
22.8,
17.2,
18.7,
19.9,
23.0,
30.8,
31.7,
20.8,
20.8,
25.2,
31.3,
32.1,
34.3,
33.7,
28.6,
19.2,
20.9,
21.1,
26.1,
26.4,
36.8,
26.3,
17.9,
21.3,
19.5,
20.1,
21.7,
19.5,
24.8,
30.2,
31.6,
19.0,
21.3,
20.6,
22.8,
33.2,
24.7,
21.3,
17.9,
18.7,
25.3,
31.0,
33.2,
31.0,
18.2,
19.1,
22.2,
20.3,
19.8,
18.3,
27.5,
29.8,
31.0,
32.9,
35.5,
36.0,
36.6,
21.7,
19.8,
26.7,
22.9,
19.8,
24.9,
24.8,
28.8,
32.7,
33.7,
24.7,
21.6,
26.2,
33.4,
22.9,
31.2,
23.1,
24.4,
30.7,
38.8,
21.5,
20.7,
20.3,
21.3,
30.0,
37.1,
21.1,
22.0,
34.4,
33.8,
37.8,
38.4,
32.8,
34.4,
26.4,
27.3,
28.0,
30.7,
30.7,
23.4,
26.1,
26.1,
21.8,
18.9,
18.5,
17.5,
21.5,
25.1,
21.9,
24.2,
24.2,
27.4,
21.3,
18.0,
18.7,
20.0,
20.9,
19.3,
19.3,
22.2,
17.7,
21.9,
25.2,
25.1,
28.5,
27.6,
19.6,
18.5,
19.2,
20.4,
22.8,
23.0,
20.3,
18.0,
18.0,
20.2,
18.1,
19.2,
17.8,
19.8,
20.3,
18.3,
18.3,
20.2,
22.0,
24.3,
24.7,
25.4,
19.9,
23.0,
21.8,
19.3,
18.5,
20.6,
17.2,
17.5,
19.1,
18.0,
18.9,
23.4,
19.3,
19.2,
21.2,
21.5,
17.2,
16.8,
16.0,
18.9,
20.6,
15.9,
16.7,
18.4,
16.2,
16.2,
15.8,
16.2,
18.3,
18.5,
19.4,
17.0,
14.6,
11.0,
12.4,
14.0,
10.8,
13.1,
15.0,
13.0,
17.5,
16.0,
10.2,
13.7,
15.4,
13.6,
12.9,
14.2,
15.0,
15.2,
18.2,
12.3,
13.8,
13.9,
14.6,
14.9,
12.6,
11.2,
12.9,
14.1,
12.8,
15.8,
16.0,
16.0,
16.6,
16.6,
16.0,
12.7,
12.3,
11.8,
14.1,
12.6,
12.9,
11.5,
12.9,
11.8,
11.9,
14.2,
12.0,
11.7,
11.8,
13.0,
11.4,
11.0,
12.3,
13.1,
11.4,
16.0,
15.3,
13.2,
15.9,
16.0,
19.6,
15.2,
10.6,
14.8,
15.8,
14.1,
14.4,
10.7,
13.2,
13.0,
13.2,
16.0,
13.2,
11.8,
11.6,
13.4,
15.2,
15.5,
13.5,
14.0,
18.5,
14.9,
13.9,
15.9,
11.3,
12.2,
13.8,
13.8,
11.3,
13.0,
13.6,
13.2,
16.0,
17.2,
14.7,
15.5,
15.2,
16.0,
16.6,
17.8,
19.8,
20.6,
19.0,
19.5,
12.6,
14.3,
14.9,
13.7,
14.2,
18.4,
15.4,
18.7,
16.9,
16.5,
20.0,
17.2,
15.8,
20.0,
14.3,
14.2,
14.7,
22.4,
25.3,
21.5,
15.0,
13.8,
15.6,
22.8,
18.0,
16.6,
20.0,
17.5,
18.1,
17.1,
18.4,
18.8,
21.8,
19.7,
26.1,
18.7,
20.8,
29.7,
21.0,
17.3,
16.8,
19.4,
16.8,
21.1,
20.4,
26.1,
15.0,
17.0,
24.0,
22.5,
16.3,
17.9,
17.7,
18.4,
25.6,
28.3,
15.1,
16.8,
19.6,
15.8,
16.9,
17.8,
18.2,
19.2,
19.0,
18.4,
22.0,
16.4,
16.7,
17.4,
20.6,
30.4,
30.3,
35.5,
18.8,
24.7,
27.3,
29.4,
32.5,
33.9,
25.0,
33.6,
25.7,
19.0,
18.4,
22.1,
25.2,
23.9,
28.7,
31.1,
29.1,
25.6,
23.2,
19.0,
17.1,
18.0,
20.0,
32.9,
32.1,
25.2,
18.5,
18.9,
20.8,
23.7,
28.0,
35.2,
23.2,
18.0,
19.8,
31.9,
21.5,
33.0,
21.6,
31.2,
36.6,
19.6,
23.1,
21.9,
21.1,
27.6,
27.3,
30.1,
33.5,
24.2,
21.4,
20.4,
20.3,
18.5,
29.5,
25.4,
25.2,
21.6,
21.8,
25.4,
36.6,
36.6,
28.7,
27.2,
25.5,
23.6,
25.6,
35.9,
19.1,
15.0,
20.7,
21.9,
18.2,
21.9,
25.3,
29.6,
29.9,
24.4,
20.2,
23.5,
26.0,
25.9,
27.4,
31.5,
35.8,
23.9,
27.8,
26.0,
31.1,
35.2,
27.8,
21.3,
21.5,
23.0,
24.4,
27.4,
23.8,
25.2,
28.5,
25.3,
25.6,
24.1,
19.3,
22.7,
31.2,
25.8,
21.5,
20.3,
18.9,
25.2,
26.8,
19.2,
21.5,
30.8,
18.6,
19.8,
21.3,
21.3,
29.9,
32.0,
32.4,
32.5,
32.6,
31.5,
21.0,
20.6,
19.7,
19.7,
20.1,
17.8,
22.1,
28.8,
26.8,
27.1,
19.4,
19.1,
22.5,
26.0,
22.2,
28.4,
25.2,
20.9,
17.6,
17.2,
17.2,
18.7,
18.6,
21.8,
16.7,
17.9,
20.8,
21.2,
17.5,
16.6,
17.5,
18.1,
18.5,
19.8,
23.0,
21.2,
21.2,
23.1,
20.2,
18.1,
16.2,
18.7,
21.2,
19.3,
21.8,
19.9,
13.6,
14.7,
18.0,
19.3,
17.7,
14.6,
14.5,
14.4,
15.4,
14.5,
16.2,
14.2,
13.2,
15.2,
17.4,
13.7,
16.8,
17.7,
16.2,
13.2,
16.3,
16.3,
14.6,
13.9,
16.0,
18.4,
19.0,
17.4,
16.2,
16.3,
16.8,
14.6,
14.2,
13.3,
14.0,
11.7,
12.9,
12.7,
14.6,
15.1,
16.1,
16.2,
13.0,
12.4,
12.6,
9.7,
11.8,
15.8,
13.6,
8.9,
12.2,
13.2,
15.2,
12.6,
12.9,
15.9,
15.4,
15.0,
14.2,
15.7,
14.4,
14.7,
14.5,
15.5,
19.2,
18.2,
13.9,
15.0,
15.8,
15.7,
15.6,
17.7,
18.8,
15.2,
14.5,
13.5,
13.4,
12.4,
15.5,
13.2,
14.0,
14.1,
15.1,
17.2,
15.1,
14.5,
13.6,
13.1,
13.1,
13.3,
14.8,
15.0,
16.4,
15.3,
13.8,
13.5,
12.7,
13.1,
15.8,
16.0,
14.4,
11.9,
13.2,
12.0,
11.6,
12.1,
17.3,
17.0,
14.7,
17.8,
13.7,
16.6,
14.5,
13.6,
15.8,
16.4,
16.8,
14.6,
15.7,
19.7,
20.7,
18.6,
13.7,
11.9,
14.0,
14.6,
13.4,
16.6,
21.0,
23.2,
23.9,
17.8,
16.2,
22.1,
24.3,
19.0,
16.5,
17.1,
17.3,
16.8,
21.7,
20.1,
15.0,
14.8,
19.8,
24.0,
14.0,
15.3,
14.8,
16.8,
20.8,
12.9,
14.3,
18.9,
22.6,
24.7,
19.6,
20.2,
23.9,
15.1,
17.1,
15.3,
16.0,
16.8,
19.8,
27.2,
29.1,
31.4,
33.2,
33.7,
19.6,
31.8,
22.4,
21.4,
22.0,
21.6,
30.7,
30.2,
32.6,
26.6,
22.0,
16.3,
17.2,
25.5,
32.1,
18.4,
21.0,
16.8,
15.6,
18.5,
21.5,
28.1,
31.4,
20.9,
18.7,
19.9,
21.9,
19.1,
28.5,
26.2,
20.1,
19.9,
22.2,
31.4,
24.1,
35.2,
36.9,
33.2,
35.7,
21.1,
21.6,
18.6,
18.8,
18.7,
21.6,
22.4,
20.9,
23.3,
34.4,
20.7,
24.2,
22.2,
23.5,
30.3,
19.8,
23.3,
29.3,
20.9,
37.6,
23.2,
25.1,
24.4,
24.6),f=365,s=1981)
| /data/melbmax.R | no_license | freuerde/hdrcde | R | false | false | 25,582 | r | maxtemp <- ts(c(
38.1,
32.4,
34.5,
20.7,
21.5,
23.1,
29.7,
36.6,
36.1,
20.6,
20.4,
30.1,
38.7,
41.4,
37.0,
36.0,
37.0,
38.0,
23.0,
26.7,
27.5,
21.7,
22.9,
26.2,
36.5,
41.8,
21.5,
19.2,
25.0,
28.9,
23.2,
31.5,
36.2,
38.2,
26.4,
20.9,
21.5,
30.2,
33.4,
32.6,
22.2,
21.7,
30.0,
35.7,
32.8,
39.3,
25.5,
23.0,
19.9,
21.3,
20.8,
21.7,
23.8,
29.0,
23.7,
21.3,
28.5,
33.6,
34.6,
34.2,
27.0,
24.2,
19.9,
19.7,
21.5,
30.6,
30.0,
19.0,
19.6,
20.6,
23.6,
17.9,
17.3,
21.4,
24.1,
20.9,
30.1,
32.6,
21.3,
19.5,
19.9,
21.0,
25.4,
17.5,
20.4,
26.8,
25.8,
20.9,
19.4,
25.8,
26.3,
29.6,
30.3,
23.6,
28.4,
20.7,
24.1,
27.3,
23.2,
18.3,
24.6,
27.4,
20.4,
18.1,
25.2,
19.8,
21.0,
23.7,
19.6,
18.1,
20.8,
26.0,
18.4,
22.0,
14.4,
19.9,
22.6,
13.7,
15.9,
21.2,
23.7,
24.0,
17.2,
23.2,
25.2,
17.2,
16.0,
15.6,
13.4,
16.0,
16.8,
14.6,
19.4,
21.0,
19.5,
18.5,
13.3,
13.7,
14.3,
14.1,
11.4,
13.6,
16.6,
17.6,
14.6,
17.2,
14.4,
16.4,
17.3,
17.6,
17.2,
17.7,
14.2,
16.6,
15.7,
13.7,
14.7,
13.1,
12.9,
15.4,
11.9,
15.2,
15.3,
16.5,
16.1,
11.7,
11.2,
11.5,
10.8,
16.1,
14.8,
13.6,
13.8,
9.7,
10.7,
11.0,
15.3,
15.3,
17.0,
16.0,
16.3,
15.7,
14.5,
10.8,
10.5,
13.4,
12.2,
13.2,
13.0,
12.4,
13.1,
9.8,
10.5,
13.4,
11.0,
13.1,
15.0,
16.7,
16.1,
18.2,
15.7,
17.7,
15.9,
15.1,
15.2,
14.7,
13.3,
14.5,
11.1,
13.1,
13.7,
14.6,
12.9,
12.8,
15.2,
14.5,
17.2,
14.5,
14.4,
11.0,
13.1,
13.6,
14.6,
12.7,
13.6,
12.7,
15.5,
17.4,
15.2,
14.2,
17.7,
19.2,
12.5,
14.2,
15.3,
15.7,
17.0,
19.0,
13.1,
13.2,
13.2,
15.7,
14.1,
15.6,
15.5,
15.9,
15.1,
16.0,
19.4,
21.5,
23.7,
18.7,
23.8,
18.0,
16.2,
18.5,
20.6,
18.3,
22.5,
26.9,
19.4,
15.9,
20.5,
21.2,
19.5,
14.7,
17.6,
15.8,
17.7,
14.3,
16.8,
18.6,
21.9,
21.4,
20.8,
14.0,
17.0,
23.0,
26.4,
19.6,
22.7,
26.9,
14.7,
15.2,
19.8,
26.9,
20.2,
14.3,
14.8,
18.5,
21.7,
21.4,
21.8,
18.2,
15.8,
15.3,
18.5,
19.2,
28.5,
32.2,
21.8,
22.1,
20.7,
17.0,
24.7,
26.2,
29.0,
21.6,
17.1,
16.9,
19.1,
24.7,
25.4,
19.8,
18.2,
16.3,
17.0,
17.7,
15.5,
14.7,
15.8,
19.9,
20.4,
23.3,
20.2,
28.8,
31.2,
17.4,
18.5,
26.8,
34.3,
30.1,
20.5,
20.5,
19.8,
27.0,
21.0,
33.0,
22.6,
28.3,
21.1,
19.0,
17.3,
27.0,
30.2,
24.8,
17.9,
17.9,
20.7,
30.9,
36.2,
21.0,
20.2,
21.3,
24.2,
21.0,
20.7,
17.8,
19.6,
22.6,
20.5,
24.1,
22.2,
27.0,
33.6,
26.6,
20.6,
24.5,
19.8,
22.6,
29.2,
20.3,
23.0,
24.4,
38.0,
40.5,
24.2,
20.2,
21.8,
27.0,
35.2,
25.2,
32.7,
35.9,
38.9,
26.5,
21.8,
37.9,
43.3,
19.0,
19.7,
21.4,
32.0,
33.3,
22.2,
21.3,
20.8,
22.3,
22.5,
21.4,
23.0,
35.1,
40.3,
39.0,
21.1,
25.4,
23.6,
28.1,
37.0,
39.3,
39.4,
25.8,
27.7,
23.0,
24.0,
26.1,
21.8,
24.2,
22.3,
19.7,
20.8,
17.9,
20.1,
20.9,
21.2,
20.4,
29.0,
34.7,
34.0,
30.9,
29.6,
26.1,
18.5,
21.2,
22.4,
21.4,
30.5,
32.0,
32.7,
27.4,
18.9,
19.4,
22.2,
30.2,
31.8,
31.4,
18.5,
22.2,
27.5,
25.8,
25.2,
19.1,
19.6,
21.4,
20.7,
28.3,
23.0,
16.5,
18.8,
20.0,
20.6,
19.8,
20.3,
26.6,
21.5,
22.3,
26.5,
27.1,
30.0,
28.2,
17.3,
16.8,
17.5,
19.2,
20.3,
20.6,
23.7,
24.3,
26.4,
16.0,
14.5,
14.2,
16.5,
15.7,
16.0,
16.7,
19.8,
22.5,
22.9,
18.8,
15.7,
14.7,
15.3,
15.3,
18.0,
15.1,
15.1,
13.6,
15.3,
18.0,
18.6,
22.0,
22.1,
17.8,
15.2,
11.3,
13.5,
16.1,
16.0,
16.5,
16.7,
19.6,
19.5,
18.0,
18.4,
12.9,
14.9,
12.4,
11.9,
11.7,
12.9,
12.9,
12.4,
12.5,
15.7,
15.9,
14.3,
16.9,
14.0,
13.6,
13.7,
16.0,
13.1,
13.8,
13.4,
11.3,
10.6,
13.9,
12.7,
12.5,
11.3,
11.6,
14.0,
13.7,
15.2,
14.0,
11.4,
14.2,
15.2,
12.6,
10.2,
13.9,
13.7,
13.6,
13.2,
15.4,
12.9,
11.9,
13.3,
13.3,
10.5,
10.0,
11.0,
8.3,
10.7,
11.2,
11.7,
14.2,
14.5,
16.0,
16.7,
14.4,
17.0,
14.6,
11.6,
12.8,
14.0,
17.1,
15.4,
15.5,
15.1,
16.9,
14.2,
15.9,
18.2,
19.5,
19.5,
19.8,
15.6,
14.1,
17.0,
14.3,
13.1,
13.7,
18.2,
19.8,
22.3,
17.8,
15.0,
22.6,
23.7,
15.8,
15.0,
23.5,
25.7,
26.5,
15.1,
13.9,
20.5,
21.4,
14.9,
15.0,
18.9,
11.7,
15.5,
17.7,
11.0,
14.9,
13.1,
13.7,
15.4,
15.6,
23.9,
23.7,
24.3,
15.9,
13.5,
15.3,
19.3,
20.5,
22.5,
23.2,
16.3,
14.8,
13.7,
18.4,
12.3,
13.3,
16.2,
25.6,
24.1,
13.8,
15.9,
21.2,
21.7,
15.6,
13.5,
15.0,
21.2,
25.4,
19.0,
15.0,
16.4,
15.6,
13.7,
22.0,
17.4,
15.1,
14.6,
16.9,
15.9,
21.3,
29.7,
33.0,
22.7,
21.6,
30.3,
19.0,
14.6,
15.9,
20.4,
29.3,
33.7,
35.2,
38.1,
24.5,
26.3,
25.0,
17.9,
18.1,
23.3,
31.2,
32.8,
28.0,
19.7,
22.1,
16.3,
20.1,
21.7,
19.6,
30.8,
36.8,
38.1,
17.6,
21.2,
17.2,
20.7,
26.1,
34.5,
18.6,
20.1,
22.4,
19.1,
19.2,
28.0,
17.9,
18.1,
16.0,
18.5,
20.6,
33.0,
35.2,
24.0,
26.0,
28.0,
23.0,
19.0,
19.1,
18.0,
19.4,
32.5,
32.6,
18.1,
19.7,
28.0,
28.0,
23.8,
31.4,
22.0,
35.8,
23.0,
18.9,
25.4,
28.0,
19.2,
21.5,
28.8,
37.3,
24.6,
21.3,
17.9,
17.5,
18.7,
18.1,
19.7,
22.1,
19.6,
18.0,
19.9,
28.5,
23.0,
37.0,
19.0,
19.4,
35.0,
27.7,
23.9,
25.6,
27.7,
24.3,
38.6,
41.2,
22.0,
22.5,
22.3,
25.5,
36.6,
35.7,
43.2,
28.0,
25.5,
21.6,
21.8,
23.3,
34.3,
31.2,
43.0,
20.2,
20.3,
22.2,
24.0,
34.4,
32.9,
21.6,
26.5,
33.2,
38.6,
24.4,
28.3,
35.2,
28.7,
35.6,
37.2,
25.5,
31.5,
30.2,
29.0,
21.4,
28.5,
32.1,
23.5,
20.0,
21.0,
20.0,
21.8,
24.3,
30.9,
21.6,
22.2,
20.2,
20.5,
17.6,
18.6,
17.7,
18.0,
18.6,
18.6,
25.9,
19.8,
18.7,
17.9,
17.3,
18.1,
14.6,
14.1,
15.7,
16.0,
21.6,
21.4,
23.8,
21.7,
16.6,
16.7,
18.1,
14.5,
14.7,
16.4,
16.5,
22.7,
23.8,
24.6,
23.6,
19.8,
19.8,
18.0,
17.6,
19.0,
17.5,
17.0,
19.9,
18.5,
18.7,
17.8,
17.0,
17.6,
18.7,
17.0,
18.8,
17.5,
19.5,
16.6,
19.1,
21.2,
20.2,
17.9,
14.3,
16.0,
17.7,
18.6,
19.1,
13.7,
17.5,
16.1,
15.2,
17.3,
14.9,
18.4,
17.2,
12.2,
13.9,
14.0,
15.3,
15.0,
15.1,
16.6,
16.5,
16.1,
13.5,
13.0,
13.6,
11.8,
14.5,
14.0,
14.1,
13.5,
12.1,
11.9,
11.9,
14.2,
14.0,
17.9,
19.1,
15.1,
13.8,
13.7,
10.0,
10.3,
10.4,
12.0,
8.5,
14.4,
13.1,
14.0,
11.8,
13.9,
12.9,
11.0,
11.8,
10.7,
11.2,
10.3,
12.2,
12.4,
14.7,
15.1,
15.0,
14.0,
15.2,
14.2,
8.7,
11.0,
14.2,
15.2,
12.2,
13.0,
14.7,
17.2,
13.7,
14.8,
13.8,
13.7,
13.1,
12.7,
12.3,
12.4,
13.0,
13.5,
13.5,
14.7,
13.4,
15.9,
16.0,
15.3,
15.5,
18.2,
18.6,
18.5,
19.2,
14.2,
12.8,
13.1,
15.4,
17.3,
14.5,
19.5,
20.2,
16.3,
13.0,
13.3,
13.3,
19.0,
16.8,
19.2,
18.5,
19.0,
17.4,
15.3,
15.8,
18.7,
17.8,
16.5,
15.3,
13.2,
12.0,
14.0,
9.5,
13.0,
17.6,
19.6,
17.2,
17.3,
17.0,
16.3,
17.3,
20.3,
16.9,
13.5,
13.3,
20.7,
19.8,
20.8,
19.8,
19.5,
23.4,
14.2,
14.2,
16.6,
17.0,
15.6,
14.8,
17.2,
20.2,
16.7,
18.2,
18.1,
22.5,
23.4,
15.3,
13.7,
15.6,
18.9,
15.1,
16.6,
17.7,
23.0,
16.8,
21.3,
17.9,
23.6,
25.3,
14.5,
16.9,
16.3,
17.7,
18.0,
21.7,
22.3,
15.5,
16.7,
24.3,
23.5,
27.7,
18.0,
15.6,
19.0,
19.5,
20.1,
31.3,
15.6,
12.8,
13.8,
15.8,
16.4,
17.6,
28.2,
25.5,
22.7,
28.0,
17.0,
22.6,
24.5,
25.6,
27.5,
18.5,
18.2,
18.8,
17.8,
18.9,
24.0,
20.9,
16.8,
18.5,
20.6,
24.9,
28.1,
25.7,
19.2,
32.7,
21.3,
20.4,
20.5,
22.8,
35.3,
38.7,
21.4,
23.3,
22.0,
31.7,
23.4,
19.1,
19.5,
30.0,
32.4,
33.6,
30.0,
27.7,
28.1,
23.7,
20.6,
32.3,
31.2,
21.5,
23.0,
21.9,
23.0,
21.6,
27.2,
29.2,
32.0,
22.5,
23.9,
19.7,
19.5,
22.0,
22.1,
19.7,
19.4,
30.5,
28.5,
20.2,
19.0,
20.2,
22.6,
28.4,
18.6,
20.3,
19.0,
26.2,
30.0,
24.2,
23.4,
34.7,
20.4,
20.7,
20.0,
19.5,
31.6,
33.7,
23.5,
24.6,
33.9,
22.4,
24.3,
29.5,
31.7,
32.9,
20.5,
28.5,
27.3,
19.5,
21.4,
33.2,
35.0,
18.1,
17.2,
18.1,
19.5,
21.5,
29.2,
33.8,
20.8,
17.5,
21.0,
27.4,
21.2,
27.1,
22.8,
18.7,
19.0,
19.7,
28.7,
25.5,
18.4,
18.5,
23.6,
26.9,
28.0,
24.7,
21.5,
17.5,
16.1,
18.7,
24.5,
27.6,
17.4,
18.2,
19.7,
18.5,
17.7,
20.7,
21.2,
17.2,
21.5,
22.0,
23.2,
25.4,
24.6,
24.8,
25.0,
25.0,
18.2,
21.5,
16.2,
17.3,
15.5,
16.4,
19.6,
18.5,
16.5,
16.0,
17.7,
19.2,
16.8,
16.5,
22.5,
22.7,
23.5,
19.0,
15.5,
13.5,
14.0,
14.7,
15.6,
17.0,
18.0,
19.6,
17.1,
16.7,
16.9,
18.2,
19.4,
22.0,
21.7,
22.0,
22.2,
20.0,
16.5,
15.6,
15.0,
16.6,
18.7,
18.7,
18.1,
15.6,
20.1,
20.3,
15.2,
17.1,
16.3,
18.3,
18.3,
19.0,
20.0,
15.7,
14.9,
13.1,
13.6,
13.8,
15.0,
16.9,
18.3,
16.4,
14.0,
12.5,
12.8,
12.2,
14.2,
14.2,
15.5,
15.1,
14.4,
12.9,
10.8,
11.7,
13.3,
12.4,
13.4,
10.6,
7.0,
12.6,
13.2,
12.1,
10.3,
15.7,
11.7,
15.1,
15.8,
15.0,
11.6,
11.4,
10.0,
14.7,
13.6,
13.2,
11.0,
13.0,
12.0,
10.6,
13.3,
14.5,
13.5,
15.9,
12.8,
11.7,
13.0,
14.0,
16.0,
17.0,
16.3,
17.6,
19.5,
14.0,
17.5,
19.2,
18.5,
15.1,
15.4,
13.3,
13.1,
12.8,
12.0,
13.3,
12.9,
15.4,
13.4,
15.1,
14.7,
14.9,
16.8,
15.8,
17.0,
14.1,
16.5,
12.4,
12.7,
14.6,
17.6,
19.6,
17.3,
17.9,
17.8,
13.0,
13.2,
17.1,
19.6,
16.3,
16.9,
22.2,
12.0,
14.2,
16.8,
16.9,
22.0,
16.4,
14.2,
11.8,
15.2,
12.6,
13.2,
14.4,
15.0,
16.5,
13.3,
14.5,
16.7,
13.8,
12.4,
13.7,
19.9,
24.0,
18.7,
18.9,
20.2,
19.9,
24.0,
27.3,
24.2,
18.0,
21.3,
26.6,
18.5,
18.2,
21.0,
12.0,
12.2,
14.1,
16.5,
15.5,
15.0,
20.8,
27.3,
28.4,
18.9,
18.2,
25.5,
15.9,
15.5,
21.4,
23.2,
26.8,
23.9,
19.1,
15.2,
14.9,
21.3,
25.4,
28.7,
32.3,
25.7,
19.0,
19.4,
15.0,
16.5,
16.3,
28.9,
27.4,
32.9,
27.0,
23.7,
22.8,
16.5,
14.0,
18.0,
28.2,
32.8,
28.0,
22.2,
21.8,
18.7,
25.5,
27.6,
16.5,
15.9,
16.8,
17.9,
23.1,
22.8,
24.2,
27.7,
21.3,
23.5,
17.9,
24.2,
20.0,
23.7,
27.0,
24.6,
25.0,
20.0,
22.7,
22.6,
29.2,
27.8,
26.5,
18.9,
23.2,
24.2,
28.1,
30.3,
25.6,
20.7,
19.6,
21.7,
28.0,
21.3,
21.4,
21.4,
22.6,
21.7,
21.8,
19.8,
30.4,
36.0,
42.2,
21.1,
20.0,
18.4,
19.1,
19.3,
18.8,
22.8,
20.0,
21.9,
20.1,
23.2,
34.5,
22.1,
21.6,
30.0,
34.0,
22.2,
19.6,
25.4,
37.1,
30.2,
29.1,
21.1,
19.1,
22.2,
24.4,
20.3,
28.0,
32.2,
23.4,
21.4,
33.5,
18.6,
19.2,
21.1,
29.5,
25.7,
32.4,
28.5,
20.6,
25.2,
20.8,
17.5,
20.1,
22.4,
24.0,
21.0,
26.8,
17.7,
19.2,
19.1,
26.4,
29.7,
30.2,
33.0,
33.3,
33.0,
36.1,
34.0,
34.7,
20.4,
19.4,
21.3,
26.8,
28.5,
25.6,
28.6,
32.5,
18.7,
23.2,
23.7,
27.2,
23.5,
19.0,
22.6,
30.1,
23.3,
19.7,
22.3,
21.6,
23.5,
22.6,
24.0,
22.9,
22.4,
28.8,
30.0,
32.4,
33.8,
26.8,
18.6,
17.0,
15.3,
15.3,
16.8,
16.0,
16.6,
17.2,
21.5,
19.7,
19.8,
21.2,
23.7,
23.9,
25.6,
24.7,
20.2,
23.5,
22.2,
24.5,
25.5,
18.8,
18.0,
14.9,
18.3,
19.2,
18.9,
19.2,
21.5,
18.0,
13.1,
14.9,
15.2,
13.5,
15.2,
14.2,
14.6,
16.0,
15.1,
15.6,
17.5,
18.8,
20.4,
20.3,
19.1,
14.9,
14.6,
13.7,
13.0,
13.4,
12.7,
14.2,
13.7,
12.7,
14.1,
12.8,
17.1,
14.0,
14.5,
15.7,
14.1,
13.7,
13.3,
13.4,
11.9,
13.6,
13.3,
12.2,
10.8,
13.0,
14.1,
14.4,
13.3,
15.7,
15.6,
19.3,
16.3,
19.4,
17.7,
16.9,
14.4,
14.3,
13.0,
12.3,
12.8,
12.7,
10.8,
14.6,
13.3,
12.7,
12.9,
15.7,
11.4,
12.0,
12.2,
11.8,
13.1,
15.5,
11.3,
12.0,
12.8,
11.6,
12.4,
12.6,
17.4,
13.4,
11.3,
12.1,
12.7,
12.5,
13.2,
17.1,
15.9,
15.0,
13.2,
12.6,
11.8,
12.0,
14.0,
13.7,
16.1,
19.8,
15.7,
13.2,
13.6,
13.5,
11.7,
11.4,
15.4,
17.4,
15.3,
16.7,
16.5,
15.3,
18.8,
15.3,
15.6,
18.7,
17.4,
12.6,
12.9,
13.5,
13.6,
13.7,
15.1,
14.3,
21.3,
21.5,
23.2,
15.3,
19.1,
15.3,
14.8,
15.6,
14.6,
14.7,
13.7,
13.6,
13.5,
14.6,
15.6,
20.7,
25.1,
19.7,
12.5,
14.1,
13.3,
14.7,
20.5,
21.8,
16.2,
25.8,
26.6,
19.4,
21.1,
20.2,
20.9,
18.1,
26.3,
19.2,
16.1,
17.9,
21.1,
25.7,
23.7,
19.7,
16.8,
14.3,
14.2,
20.5,
22.2,
15.4,
17.2,
18.0,
20.5,
23.8,
24.1,
25.8,
19.4,
18.3,
22.3,
17.4,
20.2,
27.5,
23.9,
22.8,
22.7,
21.5,
21.9,
30.3,
17.7,
26.8,
28.3,
24.2,
18.2,
21.8,
17.8,
16.1,
18.7,
17.1,
15.7,
20.8,
26.2,
26.2,
21.3,
20.5,
20.7,
19.9,
19.1,
21.7,
25.6,
23.4,
26.6,
23.5,
25.0,
25.8,
28.1,
27.8,
23.2,
21.0,
21.1,
18.3,
19.6,
22.2,
30.6,
23.1,
18.4,
22.4,
30.9,
25.2,
21.4,
20.4,
18.7,
22.0,
26.6,
20.4,
18.5,
22.5,
32.7,
27.8,
19.0,
21.0,
18.5,
22.2,
23.3,
19.8,
24.7,
32.4,
20.0,
18.2,
20.4,
25.5,
24.4,
21.5,
32.4,
27.2,
19.1,
19.7,
21.3,
20.3,
21.0,
22.6,
21.5,
24.2,
25.0,
20.6,
19.6,
20.0,
29.5,
35.5,
21.1,
23.7,
33.7,
21.8,
21.2,
23.7,
26.4,
21.8,
31.1,
38.3,
22.7,
21.5,
20.8,
30.0,
34.5,
36.5,
28.9,
27.2,
25.5,
22.3,
21.8,
22.4,
19.7,
19.4,
18.5,
24.0,
25.8,
25.5,
26.6,
34.5,
21.1,
20.4,
24.9,
21.9,
25.5,
38.0,
36.4,
19.0,
18.4,
17.7,
21.3,
25.7,
30.5,
32.6,
23.0,
20.5,
21.8,
26.4,
25.8,
36.7,
27.2,
27.7,
20.4,
18.8,
22.6,
32.7,
22.8,
24.4,
32.6,
29.1,
18.3,
19.6,
28.6,
26.8,
30.7,
16.8,
16.1,
18.2,
19.3,
19.8,
26.8,
17.5,
18.2,
24.9,
27.3,
24.8,
18.0,
16.8,
14.6,
18.7,
18.8,
19.5,
21.1,
20.0,
18.5,
17.0,
16.2,
16.7,
14.9,
15.5,
15.3,
15.7,
17.3,
17.1,
16.2,
15.4,
15.0,
19.0,
19.7,
19.7,
15.1,
16.4,
21.7,
19.5,
17.2,
16.4,
18.0,
19.5,
16.2,
16.0,
14.4,
15.4,
18.2,
13.8,
16.1,
13.7,
16.8,
15.9,
16.7,
14.6,
15.2,
14.0,
14.1,
16.0,
16.7,
15.2,
13.3,
14.0,
15.9,
14.5,
14.2,
13.0,
13.7,
14.7,
14.6,
13.9,
14.2,
16.8,
14.7,
12.1,
10.2,
10.8,
11.9,
11.8,
12.0,
12.7,
13.6,
13.0,
13.1,
11.1,
12.8,
13.3,
11.7,
14.3,
15.4,
14.2,
13.0,
13.1,
12.4,
11.2,
9.2,
12.4,
12.7,
13.7,
12.7,
15.0,
16.4,
14.0,
13.2,
13.6,
15.4,
13.8,
15.0,
14.5,
14.8,
14.0,
8.2,
10.0,
11.2,
13.3,
13.7,
15.0,
12.5,
15.3,
16.4,
17.6,
15.0,
14.8,
13.8,
14.8,
16.6,
17.3,
12.2,
12.1,
13.9,
16.8,
16.8,
14.5,
15.1,
13.8,
13.0,
14.5,
18.0,
20.2,
21.0,
16.5,
12.2,
15.0,
15.7,
17.7,
18.9,
14.9,
11.4,
15.4,
15.2,
17.6,
17.2,
21.6,
18.7,
13.9,
13.3,
14.6,
16.5,
17.7,
13.6,
15.2,
15.5,
16.5,
18.8,
13.6,
14.0,
13.8,
16.0,
19.3,
16.9,
14.3,
16.7,
21.3,
19.4,
18.7,
14.7,
16.2,
15.5,
23.4,
22.0,
16.7,
14.4,
16.4,
19.6,
18.3,
14.4,
14.8,
17.1,
15.2,
19.5,
21.1,
21.7,
14.6,
18.2,
17.8,
23.5,
20.8,
15.7,
16.1,
21.0,
15.0,
16.0,
16.0,
14.8,
14.6,
16.1,
21.6,
23.8,
27.0,
20.7,
19.7,
20.0,
28.8,
30.0,
33.1,
18.9,
16.3,
19.0,
18.3,
21.2,
16.5,
16.7,
19.5,
29.1,
19.7,
22.6,
20.7,
14.0,
15.5,
14.8,
19.2,
27.4,
18.5,
18.5,
31.5,
16.7,
15.7,
16.7,
22.0,
25.7,
29.2,
27.7,
29.4,
19.9,
27.8,
31.2,
22.8,
17.6,
19.4,
27.5,
18.4,
17.3,
17.5,
17.3,
21.9,
21.1,
17.9,
19.6,
23.6,
18.2,
20.0,
17.5,
18.9,
18.7,
28.9,
21.6,
20.7,
21.8,
21.9,
35.0,
26.0,
20.0,
33.6,
20.1,
23.5,
23.6,
24.5,
27.0,
17.0,
18.9,
21.8,
27.5,
22.8,
26.5,
22.7,
22.5,
37.8,
22.0,
27.6,
21.2,
22.6,
18.7,
17.5,
17.7,
30.3,
25.6,
32.7,
22.9,
21.0,
22.5,
25.7,
37.8,
21.3,
38.7,
21.5,
18.8,
21.7,
30.6,
40.0,
20.8,
23.7,
22.0,
20.5,
23.3,
34.2,
36.7,
21.3,
28.7,
27.6,
28.8,
37.0,
21.7,
22.7,
18.5,
25.4,
28.7,
18.4,
24.3,
33.0,
16.5,
18.0,
22.1,
24.3,
24.7,
30.1,
22.7,
22.9,
16.4,
16.2,
19.0,
27.0,
21.7,
16.7,
17.7,
21.6,
33.8,
31.8,
32.0,
17.6,
20.2,
17.3,
17.0,
15.6,
17.1,
21.9,
20.1,
29.7,
20.7,
16.5,
15.8,
17.4,
19.4,
21.1,
19.9,
28.6,
29.7,
29.7,
19.9,
17.4,
18.6,
23.0,
25.1,
26.6,
19.0,
21.2,
16.5,
17.5,
18.2,
24.0,
22.8,
15.1,
16.1,
15.8,
17.0,
19.7,
24.9,
26.9,
27.9,
18.5,
17.0,
16.4,
20.8,
16.7,
15.8,
18.5,
16.5,
16.6,
17.4,
21.7,
22.7,
23.3,
22.5,
21.9,
17.9,
20.1,
16.0,
16.6,
15.6,
15.2,
14.3,
15.0,
14.5,
16.9,
18.3,
18.0,
13.5,
13.7,
10.9,
13.6,
15.2,
14.1,
13.2,
13.8,
14.6,
16.2,
14.5,
14.3,
15.6,
15.9,
19.3,
16.0,
20.3,
15.8,
16.2,
12.8,
15.0,
13.4,
13.2,
13.8,
15.7,
16.2,
14.9,
18.3,
15.3,
15.2,
14.2,
12.0,
13.7,
13.8,
12.3,
12.8,
13.1,
14.5,
14.0,
13.3,
15.7,
16.0,
16.2,
18.0,
16.3,
15.8,
13.3,
13.7,
11.5,
13.7,
12.3,
12.6,
12.7,
14.8,
14.9,
12.0,
12.8,
12.4,
13.0,
13.1,
12.9,
13.8,
15.2,
13.1,
11.1,
10.8,
13.1,
15.0,
15.7,
16.4,
14.1,
15.5,
14.8,
13.9,
15.4,
14.8,
10.9,
12.9,
12.8,
18.6,
14.6,
18.0,
18.5,
11.8,
13.3,
14.5,
14.0,
13.3,
15.0,
18.4,
13.6,
15.3,
16.1,
19.0,
15.5,
16.2,
14.9,
17.9,
10.1,
12.1,
13.1,
12.9,
15.4,
15.6,
14.0,
14.2,
17.4,
14.0,
13.6,
15.7,
14.0,
15.0,
16.4,
15.1,
17.1,
18.9,
23.8,
25.9,
15.0,
17.0,
19.0,
26.5,
26.9,
23.7,
30.7,
29.6,
25.9,
20.2,
14.8,
13.9,
15.7,
22.8,
18.7,
17.0,
19.0,
14.1,
17.6,
19.0,
15.8,
12.9,
14.5,
15.9,
21.9,
24.1,
19.4,
15.4,
17.6,
20.5,
19.4,
18.0,
16.8,
15.8,
16.0,
22.1,
26.8,
26.3,
17.5,
19.0,
30.3,
32.7,
35.0,
36.1,
18.1,
18.1,
16.8,
18.6,
20.5,
27.8,
18.8,
16.8,
17.2,
19.6,
18.8,
18.5,
19.8,
30.5,
28.3,
32.6,
30.1,
19.8,
19.9,
23.8,
24.3,
17.4,
18.2,
18.4,
16.9,
24.0,
32.8,
18.9,
15.5,
16.0,
16.6,
20.8,
19.6,
18.5,
17.6,
20.8,
21.5,
25.0,
21.2,
22.6,
25.6,
25.1,
32.5,
27.8,
21.5,
20.7,
19.9,
18.4,
22.3,
24.6,
25.6,
19.0,
18.7,
29.2,
31.8,
18.4,
19.2,
30.8,
32.2,
22.0,
21.4,
20.0,
20.6,
22.2,
30.0,
40.4,
26.3,
39.0,
30.6,
39.2,
37.8,
28.8,
30.8,
38.7,
25.2,
20.6,
21.0,
25.6,
36.2,
24.9,
26.2,
20.0,
19.7,
20.3,
25.1,
29.1,
37.5,
39.2,
33.7,
19.5,
19.9,
21.0,
20.3,
22.6,
32.9,
21.3,
19.5,
19.2,
22.0,
27.2,
32.2,
25.4,
25.0,
25.2,
26.7,
25.3,
29.9,
27.6,
20.6,
21.0,
21.6,
24.0,
23.8,
21.9,
29.4,
37.4,
21.5,
21.1,
21.0,
36.3,
28.7,
19.4,
20.3,
20.8,
30.5,
30.3,
23.4,
25.7,
15.7,
19.3,
18.8,
17.0,
17.6,
18.5,
21.0,
26.9,
30.6,
32.7,
29.2,
25.8,
28.2,
24.7,
30.6,
30.6,
29.0,
26.5,
25.5,
24.9,
20.9,
23.4,
18.1,
20.7,
20.3,
21.1,
19.5,
20.4,
17.3,
18.5,
21.0,
23.5,
25.7,
23.1,
24.8,
22.4,
17.4,
18.3,
26.0,
25.5,
21.8,
21.2,
25.2,
21.7,
22.6,
24.3,
17.0,
20.7,
22.7,
25.4,
24.9,
25.0,
20.5,
20.2,
19.4,
21.4,
25.5,
19.4,
18.8,
17.0,
15.5,
16.2,
16.7,
20.1,
21.4,
16.4,
17.3,
17.0,
18.2,
17.5,
14.9,
18.0,
16.7,
17.1,
16.8,
17.7,
18.4,
18.5,
15.5,
15.4,
17.5,
16.8,
18.9,
19.4,
20.3,
19.3,
17.0,
16.4,
13.3,
13.6,
14.1,
14.8,
14.1,
17.0,
16.0,
16.5,
14.9,
14.4,
13.5,
11.3,
15.0,
13.0,
14.9,
14.6,
14.1,
15.5,
14.4,
12.0,
17.5,
13.3,
14.3,
14.0,
13.5,
13.3,
12.3,
12.6,
14.1,
16.6,
17.3,
17.1,
16.1,
15.0,
15.4,
14.6,
14.6,
16.8,
16.5,
15.6,
14.8,
15.2,
15.2,
15.0,
14.1,
15.5,
12.6,
12.9,
15.1,
13.0,
15.6,
13.4,
14.4,
14.5,
15.4,
16.7,
16.1,
16.6,
16.5,
13.8,
13.4,
11.9,
10.7,
13.2,
13.1,
12.8,
16.2,
18.0,
18.9,
18.3,
19.8,
19.3,
16.2,
17.6,
17.5,
13.2,
13.2,
13.4,
14.2,
16.1,
14.2,
14.3,
14.5,
17.1,
18.2,
22.2,
22.4,
15.6,
16.6,
14.5,
17.7,
21.0,
14.3,
20.0,
20.2,
19.2,
19.8,
14.8,
14.2,
17.2,
17.2,
22.1,
15.4,
16.1,
14.8,
15.1,
20.2,
22.3,
24.6,
18.4,
18.2,
23.4,
23.8,
20.1,
20.5,
19.5,
18.8,
19.8,
26.2,
21.2,
21.2,
17.3,
20.5,
19.2,
18.0,
17.7,
23.7,
16.9,
23.2,
22.3,
18.7,
24.0,
16.4,
18.8,
19.0,
26.7,
19.3,
19.5,
21.2,
17.0,
24.4,
29.0,
19.2,
24.6,
23.4,
22.6,
21.1,
29.1,
36.1,
23.1,
16.5,
16.2,
21.3,
25.4,
26.4,
21.1,
21.1,
19.7,
31.2,
34.5,
22.9,
13.5,
16.2,
15.3,
19.2,
29.4,
27.7,
19.6,
16.8,
19.9,
20.4,
18.3,
16.7,
16.9,
18.9,
17.6,
18.6,
21.4,
28.7,
28.0,
22.8,
17.2,
18.7,
19.9,
23.0,
30.8,
31.7,
20.8,
20.8,
25.2,
31.3,
32.1,
34.3,
33.7,
28.6,
19.2,
20.9,
21.1,
26.1,
26.4,
36.8,
26.3,
17.9,
21.3,
19.5,
20.1,
21.7,
19.5,
24.8,
30.2,
31.6,
19.0,
21.3,
20.6,
22.8,
33.2,
24.7,
21.3,
17.9,
18.7,
25.3,
31.0,
33.2,
31.0,
18.2,
19.1,
22.2,
20.3,
19.8,
18.3,
27.5,
29.8,
31.0,
32.9,
35.5,
36.0,
36.6,
21.7,
19.8,
26.7,
22.9,
19.8,
24.9,
24.8,
28.8,
32.7,
33.7,
24.7,
21.6,
26.2,
33.4,
22.9,
31.2,
23.1,
24.4,
30.7,
38.8,
21.5,
20.7,
20.3,
21.3,
30.0,
37.1,
21.1,
22.0,
34.4,
33.8,
37.8,
38.4,
32.8,
34.4,
26.4,
27.3,
28.0,
30.7,
30.7,
23.4,
26.1,
26.1,
21.8,
18.9,
18.5,
17.5,
21.5,
25.1,
21.9,
24.2,
24.2,
27.4,
21.3,
18.0,
18.7,
20.0,
20.9,
19.3,
19.3,
22.2,
17.7,
21.9,
25.2,
25.1,
28.5,
27.6,
19.6,
18.5,
19.2,
20.4,
22.8,
23.0,
20.3,
18.0,
18.0,
20.2,
18.1,
19.2,
17.8,
19.8,
20.3,
18.3,
18.3,
20.2,
22.0,
24.3,
24.7,
25.4,
19.9,
23.0,
21.8,
19.3,
18.5,
20.6,
17.2,
17.5,
19.1,
18.0,
18.9,
23.4,
19.3,
19.2,
21.2,
21.5,
17.2,
16.8,
16.0,
18.9,
20.6,
15.9,
16.7,
18.4,
16.2,
16.2,
15.8,
16.2,
18.3,
18.5,
19.4,
17.0,
14.6,
11.0,
12.4,
14.0,
10.8,
13.1,
15.0,
13.0,
17.5,
16.0,
10.2,
13.7,
15.4,
13.6,
12.9,
14.2,
15.0,
15.2,
18.2,
12.3,
13.8,
13.9,
14.6,
14.9,
12.6,
11.2,
12.9,
14.1,
12.8,
15.8,
16.0,
16.0,
16.6,
16.6,
16.0,
12.7,
12.3,
11.8,
14.1,
12.6,
12.9,
11.5,
12.9,
11.8,
11.9,
14.2,
12.0,
11.7,
11.8,
13.0,
11.4,
11.0,
12.3,
13.1,
11.4,
16.0,
15.3,
13.2,
15.9,
16.0,
19.6,
15.2,
10.6,
14.8,
15.8,
14.1,
14.4,
10.7,
13.2,
13.0,
13.2,
16.0,
13.2,
11.8,
11.6,
13.4,
15.2,
15.5,
13.5,
14.0,
18.5,
14.9,
13.9,
15.9,
11.3,
12.2,
13.8,
13.8,
11.3,
13.0,
13.6,
13.2,
16.0,
17.2,
14.7,
15.5,
15.2,
16.0,
16.6,
17.8,
19.8,
20.6,
19.0,
19.5,
12.6,
14.3,
14.9,
13.7,
14.2,
18.4,
15.4,
18.7,
16.9,
16.5,
20.0,
17.2,
15.8,
20.0,
14.3,
14.2,
14.7,
22.4,
25.3,
21.5,
15.0,
13.8,
15.6,
22.8,
18.0,
16.6,
20.0,
17.5,
18.1,
17.1,
18.4,
18.8,
21.8,
19.7,
26.1,
18.7,
20.8,
29.7,
21.0,
17.3,
16.8,
19.4,
16.8,
21.1,
20.4,
26.1,
15.0,
17.0,
24.0,
22.5,
16.3,
17.9,
17.7,
18.4,
25.6,
28.3,
15.1,
16.8,
19.6,
15.8,
16.9,
17.8,
18.2,
19.2,
19.0,
18.4,
22.0,
16.4,
16.7,
17.4,
20.6,
30.4,
30.3,
35.5,
18.8,
24.7,
27.3,
29.4,
32.5,
33.9,
25.0,
33.6,
25.7,
19.0,
18.4,
22.1,
25.2,
23.9,
28.7,
31.1,
29.1,
25.6,
23.2,
19.0,
17.1,
18.0,
20.0,
32.9,
32.1,
25.2,
18.5,
18.9,
20.8,
23.7,
28.0,
35.2,
23.2,
18.0,
19.8,
31.9,
21.5,
33.0,
21.6,
31.2,
36.6,
19.6,
23.1,
21.9,
21.1,
27.6,
27.3,
30.1,
33.5,
24.2,
21.4,
20.4,
20.3,
18.5,
29.5,
25.4,
25.2,
21.6,
21.8,
25.4,
36.6,
36.6,
28.7,
27.2,
25.5,
23.6,
25.6,
35.9,
19.1,
15.0,
20.7,
21.9,
18.2,
21.9,
25.3,
29.6,
29.9,
24.4,
20.2,
23.5,
26.0,
25.9,
27.4,
31.5,
35.8,
23.9,
27.8,
26.0,
31.1,
35.2,
27.8,
21.3,
21.5,
23.0,
24.4,
27.4,
23.8,
25.2,
28.5,
25.3,
25.6,
24.1,
19.3,
22.7,
31.2,
25.8,
21.5,
20.3,
18.9,
25.2,
26.8,
19.2,
21.5,
30.8,
18.6,
19.8,
21.3,
21.3,
29.9,
32.0,
32.4,
32.5,
32.6,
31.5,
21.0,
20.6,
19.7,
19.7,
20.1,
17.8,
22.1,
28.8,
26.8,
27.1,
19.4,
19.1,
22.5,
26.0,
22.2,
28.4,
25.2,
20.9,
17.6,
17.2,
17.2,
18.7,
18.6,
21.8,
16.7,
17.9,
20.8,
21.2,
17.5,
16.6,
17.5,
18.1,
18.5,
19.8,
23.0,
21.2,
21.2,
23.1,
20.2,
18.1,
16.2,
18.7,
21.2,
19.3,
21.8,
19.9,
13.6,
14.7,
18.0,
19.3,
17.7,
14.6,
14.5,
14.4,
15.4,
14.5,
16.2,
14.2,
13.2,
15.2,
17.4,
13.7,
16.8,
17.7,
16.2,
13.2,
16.3,
16.3,
14.6,
13.9,
16.0,
18.4,
19.0,
17.4,
16.2,
16.3,
16.8,
14.6,
14.2,
13.3,
14.0,
11.7,
12.9,
12.7,
14.6,
15.1,
16.1,
16.2,
13.0,
12.4,
12.6,
9.7,
11.8,
15.8,
13.6,
8.9,
12.2,
13.2,
15.2,
12.6,
12.9,
15.9,
15.4,
15.0,
14.2,
15.7,
14.4,
14.7,
14.5,
15.5,
19.2,
18.2,
13.9,
15.0,
15.8,
15.7,
15.6,
17.7,
18.8,
15.2,
14.5,
13.5,
13.4,
12.4,
15.5,
13.2,
14.0,
14.1,
15.1,
17.2,
15.1,
14.5,
13.6,
13.1,
13.1,
13.3,
14.8,
15.0,
16.4,
15.3,
13.8,
13.5,
12.7,
13.1,
15.8,
16.0,
14.4,
11.9,
13.2,
12.0,
11.6,
12.1,
17.3,
17.0,
14.7,
17.8,
13.7,
16.6,
14.5,
13.6,
15.8,
16.4,
16.8,
14.6,
15.7,
19.7,
20.7,
18.6,
13.7,
11.9,
14.0,
14.6,
13.4,
16.6,
21.0,
23.2,
23.9,
17.8,
16.2,
22.1,
24.3,
19.0,
16.5,
17.1,
17.3,
16.8,
21.7,
20.1,
15.0,
14.8,
19.8,
24.0,
14.0,
15.3,
14.8,
16.8,
20.8,
12.9,
14.3,
18.9,
22.6,
24.7,
19.6,
20.2,
23.9,
15.1,
17.1,
15.3,
16.0,
16.8,
19.8,
27.2,
29.1,
31.4,
33.2,
33.7,
19.6,
31.8,
22.4,
21.4,
22.0,
21.6,
30.7,
30.2,
32.6,
26.6,
22.0,
16.3,
17.2,
25.5,
32.1,
18.4,
21.0,
16.8,
15.6,
18.5,
21.5,
28.1,
31.4,
20.9,
18.7,
19.9,
21.9,
19.1,
28.5,
26.2,
20.1,
19.9,
22.2,
31.4,
24.1,
35.2,
36.9,
33.2,
35.7,
21.1,
21.6,
18.6,
18.8,
18.7,
21.6,
22.4,
20.9,
23.3,
34.4,
20.7,
24.2,
22.2,
23.5,
30.3,
19.8,
23.3,
29.3,
20.9,
37.6,
23.2,
25.1,
24.4,
24.6),f=365,s=1981)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/matrixpls.sim.R
\name{matrixpls.sim}
\alias{matrixpls.sim}
\title{Monte Carlo simulations with matrixpls}
\usage{
matrixpls.sim(
nRep = NULL,
model = NULL,
n = NULL,
...,
cilevel = 0.95,
citype = c("norm", "basic", "stud", "perc", "bca"),
boot.R = 500,
fitIndices = fitSummary,
outfundata = NULL,
outfun = NULL,
prefun = NULL
)
}
\arguments{
\item{nRep}{Number of replications. If any of the \code{n}, \code{pmMCAR}, or \code{pmMAR} arguments are specified as lists, the number of replications will default to the length of the list(s), and \code{nRep} need not be specified.}
\item{model}{There are two options for this argument: 1. lavaan script or lavaan parameter
table, or 2. a list containing three matrices
\code{inner}, \code{reflective}, and \code{formative} defining the free regression paths
in the model.}
\item{n}{
Sample size(s). In single-group models, either a single \code{integer}, or a vector of integers to vary sample size across replications. In multigroup models, either a \code{list} of single integers (for constant group sizes across replications) or a \code{list} of vectors (to vary group sizes across replications).
%The \code{n} argument can also be specified as a random distribution object.
Any non-integers will be rounded.
}
\item{...}{All other arguments are passed through to \code{\link[simsem]{sim}},
\code{\link{matrixpls.boot}}, or \code{\link{matrixpls}}.}
\item{cilevel}{Confidence level. This argument will be forwarded to the \code{\link[boot]{boot.ci}} when calculating the confidence intervals.}
\item{citype}{Type of confidence interval. This argument will be forwarded to the \code{\link[boot]{boot.ci}} when calculating the confidence intervals.}
\item{boot.R}{Number of bootstrap replications to use to estimate standard errors or \code{FALSE} to disable bootstrapping.}
\item{fitIndices}{A function that returns a list of fit indices for the model. Setting this argument to \code{NULL} disables fit indices.}
\item{outfundata}{A function to be applied to the \code{matrixpls} output and the
generated data after each replication. Users can get the characteristics of the
generated data and also compare the characteristics with the generated output.
The output from this function in each replication will be saved in the
simulation output (SimResult), and can be obtained using the getExtraOutput function.}
\item{outfun}{A function to be applied to the \code{matrixpls} output at each replication.
Output from this function in each replication will be saved in the simulation
output (SimResult), and can be obtained using the getExtraOutput function.}
\item{prefun}{A function to be applied to the dataset before each replication. The output of this
function is passed as arguments to \code{\link{matrixpls}}}
}
\value{
An object of class \code{\link[simsem]{SimResult-class}}.
}
\description{
Performs Monte Carlo simulations of \code{\link{matrixpls}} with the \code{\link[simsem]{sim}} function of the \code{simsem} package.
The standard errors and confidence intervals are estimated with the \code{\link[boot]{boot}} and \code{\link[boot]{boot.ci}} functions
of the \code{boot} package.
}
\details{
This function calls the \code{\link[simsem]{sim}} function from the \code{simsem} package to perform Monte
Carlo simulations with matrixpls. The function parses the model parameters and replaces it with
a function call that estimates the model and bootstrapped standard errors and confidence
intervals with \link{matrixpls.boot}.
If the \code{generate} or \code{rawdata} arguments are not specified in the \code{\link[simsem]{sim}} arguments
then the \code{model} argument will be used for data generation and must be specified in lavaan format.
}
\examples{
if(!require(simsem)){
print("This example requires the simsem package")
} else{
#
# Runs the second model from
#
# Aguirre-Urreta, M., & Marakas, G. (2013). Partial Least Squares and Models with Formatively
# Specified Endogenous Constructs: A Cautionary Note. Information Systems Research.
# doi:10.1287/isre.2013.0493
library(MASS)
X <- diag(15)
X[upper.tri(X, diag=TRUE)] <- c(
1.000,
0.640, 1.000,
0.640, 0.640, 1.000,
0.640, 0.640, 0.640, 1.000,
0.096, 0.096, 0.096, 0.096, 1.000,
0.096, 0.096, 0.096, 0.096, 0.640, 1.000,
0.096, 0.096, 0.096, 0.096, 0.640, 0.640, 1.000,
0.096, 0.096, 0.096, 0.096, 0.640, 0.640, 0.640, 1.000,
0.115, 0.115, 0.115, 0.115, 0.192, 0.192, 0.192, 0.192, 1.000,
0.115, 0.115, 0.115, 0.115, 0.192, 0.192, 0.192, 0.192, 0.640, 1.000,
0.115, 0.115, 0.115, 0.115, 0.192, 0.192, 0.192, 0.192, 0.640, 0.640,
1.000,
0.115, 0.115, 0.115, 0.115, 0.192, 0.192, 0.192, 0.192, 0.640, 0.640,
0.640, 1.000,
0.000, 0.000, 0.000, 0.000, 0.271, 0.271, 0.271, 0.271, 0.325, 0.325,
0.325, 0.325, 1.000,
0.000, 0.000, 0.000, 0.000, 0.271, 0.271, 0.271, 0.271, 0.325, 0.325,
0.325, 0.325, 0.300, 1.000,
0.000, 0.000, 0.000, 0.000, 0.271, 0.271, 0.271, 0.271, 0.325, 0.325,
0.325, 0.325, 0.300, 0.300, 1.000
)
X <- X + t(X) - diag(diag(X))
colnames(X) <- rownames(X) <- c(paste("Y",1:12,sep=""),paste("X",1:3,sep=""))
# Print the population covariance matrix X to see that it is correct
X
# The estimated model in Lavaan syntax
analyzeModel <- "
ksi =~ Y1 + Y2 + Y3 + Y4
eta1 <~ X1 + X2 + X3
eta2 =~ Y5 + Y6 + Y7 + Y8
eta3 =~ Y9 + Y10 + Y11 + Y12
eta1 ~ ksi
eta2 ~ eta1
eta3 ~ eta1
"
# Only run 100 replications without bootstrap replications each so that the
# example runs faster. Generate the data outside simsem because simsem
# does not support drawing samples from population matrix
dataSets <- lapply(1:100, function(x){
mvrnorm(300, # Sample size
rep(0,15), # Means
X) # Population covarancematrix
})
Output <- matrixpls.sim(model = analyzeModel, rawData = dataSets, boot.R=FALSE,
multicore = FALSE, stopOnError = TRUE)
summary(Output)
}
}
\seealso{
\code{\link{matrixpls}}, \code{\link{matrixpls.boot}}, \code{\link[simsem]{sim}}, \code{\link[simsem]{SimResult-class}}
}
| /man/matrixpls.sim.Rd | no_license | mronkko/matrixpls | R | false | true | 6,182 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/matrixpls.sim.R
\name{matrixpls.sim}
\alias{matrixpls.sim}
\title{Monte Carlo simulations with matrixpls}
\usage{
matrixpls.sim(
nRep = NULL,
model = NULL,
n = NULL,
...,
cilevel = 0.95,
citype = c("norm", "basic", "stud", "perc", "bca"),
boot.R = 500,
fitIndices = fitSummary,
outfundata = NULL,
outfun = NULL,
prefun = NULL
)
}
\arguments{
\item{nRep}{Number of replications. If any of the \code{n}, \code{pmMCAR}, or \code{pmMAR} arguments are specified as lists, the number of replications will default to the length of the list(s), and \code{nRep} need not be specified.}
\item{model}{There are two options for this argument: 1. lavaan script or lavaan parameter
table, or 2. a list containing three matrices
\code{inner}, \code{reflective}, and \code{formative} defining the free regression paths
in the model.}
\item{n}{
Sample size(s). In single-group models, either a single \code{integer}, or a vector of integers to vary sample size across replications. In multigroup models, either a \code{list} of single integers (for constant group sizes across replications) or a \code{list} of vectors (to vary group sizes across replications).
%The \code{n} argument can also be specified as a random distribution object.
Any non-integers will be rounded.
}
\item{...}{All other arguments are passed through to \code{\link[simsem]{sim}},
\code{\link{matrixpls.boot}}, or \code{\link{matrixpls}}.}
\item{cilevel}{Confidence level. This argument will be forwarded to the \code{\link[boot]{boot.ci}} when calculating the confidence intervals.}
\item{citype}{Type of confidence interval. This argument will be forwarded to the \code{\link[boot]{boot.ci}} when calculating the confidence intervals.}
\item{boot.R}{Number of bootstrap replications to use to estimate standard errors or \code{FALSE} to disable bootstrapping.}
\item{fitIndices}{A function that returns a list of fit indices for the model. Setting this argument to \code{NULL} disables fit indices.}
\item{outfundata}{A function to be applied to the \code{matrixpls} output and the
generated data after each replication. Users can get the characteristics of the
generated data and also compare the characteristics with the generated output.
The output from this function in each replication will be saved in the
simulation output (SimResult), and can be obtained using the getExtraOutput function.}
\item{outfun}{A function to be applied to the \code{matrixpls} output at each replication.
Output from this function in each replication will be saved in the simulation
output (SimResult), and can be obtained using the getExtraOutput function.}
\item{prefun}{A function to be applied to the dataset before each replication. The output of this
function is passed as arguments to \code{\link{matrixpls}}}
}
\value{
An object of class \code{\link[simsem]{SimResult-class}}.
}
\description{
Performs Monte Carlo simulations of \code{\link{matrixpls}} with the \code{\link[simsem]{sim}} function of the \code{simsem} package.
The standard errors and confidence intervals are estimated with the \code{\link[boot]{boot}} and \code{\link[boot]{boot.ci}} functions
of the \code{boot} package.
}
\details{
This function calls the \code{\link[simsem]{sim}} function from the \code{simsem} package to perform Monte
Carlo simulations with matrixpls. The function parses the model parameters and replaces it with
a function call that estimates the model and bootstrapped standard errors and confidence
intervals with \link{matrixpls.boot}.
If the \code{generate} or \code{rawdata} arguments are not specified in the \code{\link[simsem]{sim}} arguments
then the \code{model} argument will be used for data generation and must be specified in lavaan format.
}
\examples{
if(!require(simsem)){
print("This example requires the simsem package")
} else{
#
# Runs the second model from
#
# Aguirre-Urreta, M., & Marakas, G. (2013). Partial Least Squares and Models with Formatively
# Specified Endogenous Constructs: A Cautionary Note. Information Systems Research.
# doi:10.1287/isre.2013.0493
library(MASS)
X <- diag(15)
X[upper.tri(X, diag=TRUE)] <- c(
1.000,
0.640, 1.000,
0.640, 0.640, 1.000,
0.640, 0.640, 0.640, 1.000,
0.096, 0.096, 0.096, 0.096, 1.000,
0.096, 0.096, 0.096, 0.096, 0.640, 1.000,
0.096, 0.096, 0.096, 0.096, 0.640, 0.640, 1.000,
0.096, 0.096, 0.096, 0.096, 0.640, 0.640, 0.640, 1.000,
0.115, 0.115, 0.115, 0.115, 0.192, 0.192, 0.192, 0.192, 1.000,
0.115, 0.115, 0.115, 0.115, 0.192, 0.192, 0.192, 0.192, 0.640, 1.000,
0.115, 0.115, 0.115, 0.115, 0.192, 0.192, 0.192, 0.192, 0.640, 0.640,
1.000,
0.115, 0.115, 0.115, 0.115, 0.192, 0.192, 0.192, 0.192, 0.640, 0.640,
0.640, 1.000,
0.000, 0.000, 0.000, 0.000, 0.271, 0.271, 0.271, 0.271, 0.325, 0.325,
0.325, 0.325, 1.000,
0.000, 0.000, 0.000, 0.000, 0.271, 0.271, 0.271, 0.271, 0.325, 0.325,
0.325, 0.325, 0.300, 1.000,
0.000, 0.000, 0.000, 0.000, 0.271, 0.271, 0.271, 0.271, 0.325, 0.325,
0.325, 0.325, 0.300, 0.300, 1.000
)
X <- X + t(X) - diag(diag(X))
colnames(X) <- rownames(X) <- c(paste("Y",1:12,sep=""),paste("X",1:3,sep=""))
# Print the population covariance matrix X to see that it is correct
X
# The estimated model in Lavaan syntax
analyzeModel <- "
ksi =~ Y1 + Y2 + Y3 + Y4
eta1 <~ X1 + X2 + X3
eta2 =~ Y5 + Y6 + Y7 + Y8
eta3 =~ Y9 + Y10 + Y11 + Y12
eta1 ~ ksi
eta2 ~ eta1
eta3 ~ eta1
"
# Only run 100 replications without bootstrap replications each so that the
# example runs faster. Generate the data outside simsem because simsem
# does not support drawing samples from population matrix
dataSets <- lapply(1:100, function(x){
mvrnorm(300, # Sample size
rep(0,15), # Means
X) # Population covarancematrix
})
Output <- matrixpls.sim(model = analyzeModel, rawData = dataSets, boot.R=FALSE,
multicore = FALSE, stopOnError = TRUE)
summary(Output)
}
}
\seealso{
\code{\link{matrixpls}}, \code{\link{matrixpls.boot}}, \code{\link[simsem]{sim}}, \code{\link[simsem]{SimResult-class}}
}
|
#KNN zoo prob
#KNN algos p2
# predicting glass
# Read the dataset
zoo1 <- read.csv(file.choose())
View(zoo1)
#First colum in dataset is id which is not required so we will be taking out
zoo<-zoo1[-1]
table(zoo$type)
#factorizing the type
zoo$Type <- factor(zoo$type)
# table or proportation of enteries in the datasets. What % of entry is Bengin and % of entry is Malignant
round(prop.table(table(zoo$type))*100,1)
#Create a function to normalize the data
# norm <- function(x){
# return((x-min(x))/(max(x)-min(x)))
# }
train_index <- sample(1:nrow(zoo), 0.7 * nrow(zoo))
test_index <- setdiff(1:nrow(zoo), train_index)
train<- zoo[train_index,-10]
train_label<-zoo[train_index,"Type"]
test<-zoo[test_index,-10]
test_label<-zoo[test_index,"Type"]
train_n<-train[1:17]
test_n<-test[1:17]
# Build a KNN model on taining dataset
library("class")
# Building the KNN model on training dataset and also need labels which we are including c1. Once we build the preduction model
# we have to test on test dataset
test_acc <- NULL
train_acc <- NULL
for (i in seq(3,200,2))
{
train_zoo_pred <- knn(train=train_n,test=train_n,cl=train_label,k=i)
train_acc <- c(train_acc,mean(train_zoo_pred==train_label))
t_a<-c()
test_zoo_pred <- knn(train = train_n, test = test_n, cl = train_label, k=i)
test_acc <- c(test_acc,mean(test_zoo_pred==test_label))
}
# Testing Accuracy
# Plotting 2 different graphs on same window
par(mfrow=c(1,2)) # c(1,2) => indicates 1 row and 2 columns
plot(seq(3,200,2),train_acc,type="l",main="Train_accuracy",col="blue")
plot(seq(3,200,2),test_acc,type="l",main="Test_accuracy",col="red")
acc_neigh_df <- data.frame(list(train_acc=train_acc,test_acc=test_acc,neigh=seq(3,200,2)))
# Plotting 2 different graphs on same co-ordinate axis
#install.packages("ggplot2")
library(ggplot2)
ggplot(acc_neigh_df,aes(x=neigh))+
geom_line(aes(y=train_acc,colour="train_acc"),lwd=1.5)+
geom_line(aes(y=test_acc,colour="test_acc"),lwd=1.5)+
scale_fill_manual(" ",breaks=c("train_acc","test_acc"),values = c("train_acc"="green","test_acc"="red"))
#from the graph max accuracy can be acheved at k = 4
zoo_pred_te <- knn(train = train_n, test = test_n, cl = train_label, k=4)
accuracy_test<- sum(zoo_pred_te==test_label)/31
zoo_pred_tr <- knn(train = train_n, test = train_n, cl = train_label, k=4)
accuracy_train<- sum(zoo_pred_tr==train_label)/70
accuracy_test
accuracy_train
| /KNN R/My code_ZOO.R | no_license | Smit131/Data-Science-Assignmnets-in-R-studio- | R | false | false | 2,496 | r | #KNN zoo prob
#KNN algos p2
# predicting glass
# Read the dataset
zoo1 <- read.csv(file.choose())
View(zoo1)
#First colum in dataset is id which is not required so we will be taking out
zoo<-zoo1[-1]
table(zoo$type)
#factorizing the type
zoo$Type <- factor(zoo$type)
# table or proportation of enteries in the datasets. What % of entry is Bengin and % of entry is Malignant
round(prop.table(table(zoo$type))*100,1)
#Create a function to normalize the data
# norm <- function(x){
# return((x-min(x))/(max(x)-min(x)))
# }
train_index <- sample(1:nrow(zoo), 0.7 * nrow(zoo))
test_index <- setdiff(1:nrow(zoo), train_index)
train<- zoo[train_index,-10]
train_label<-zoo[train_index,"Type"]
test<-zoo[test_index,-10]
test_label<-zoo[test_index,"Type"]
train_n<-train[1:17]
test_n<-test[1:17]
# Build a KNN model on taining dataset
library("class")
# Building the KNN model on training dataset and also need labels which we are including c1. Once we build the preduction model
# we have to test on test dataset
test_acc <- NULL
train_acc <- NULL
for (i in seq(3,200,2))
{
train_zoo_pred <- knn(train=train_n,test=train_n,cl=train_label,k=i)
train_acc <- c(train_acc,mean(train_zoo_pred==train_label))
t_a<-c()
test_zoo_pred <- knn(train = train_n, test = test_n, cl = train_label, k=i)
test_acc <- c(test_acc,mean(test_zoo_pred==test_label))
}
# Testing Accuracy
# Plotting 2 different graphs on same window
par(mfrow=c(1,2)) # c(1,2) => indicates 1 row and 2 columns
plot(seq(3,200,2),train_acc,type="l",main="Train_accuracy",col="blue")
plot(seq(3,200,2),test_acc,type="l",main="Test_accuracy",col="red")
acc_neigh_df <- data.frame(list(train_acc=train_acc,test_acc=test_acc,neigh=seq(3,200,2)))
# Plotting 2 different graphs on same co-ordinate axis
#install.packages("ggplot2")
library(ggplot2)
ggplot(acc_neigh_df,aes(x=neigh))+
geom_line(aes(y=train_acc,colour="train_acc"),lwd=1.5)+
geom_line(aes(y=test_acc,colour="test_acc"),lwd=1.5)+
scale_fill_manual(" ",breaks=c("train_acc","test_acc"),values = c("train_acc"="green","test_acc"="red"))
#from the graph max accuracy can be acheved at k = 4
zoo_pred_te <- knn(train = train_n, test = test_n, cl = train_label, k=4)
accuracy_test<- sum(zoo_pred_te==test_label)/31
zoo_pred_tr <- knn(train = train_n, test = train_n, cl = train_label, k=4)
accuracy_train<- sum(zoo_pred_tr==train_label)/70
accuracy_test
accuracy_train
|
library(dplyr)
library(readr)
results <-
read_csv("C:/Users/marcu/TRG/t2f/Analysis/qgis/revised/results.csv")
gt1 <- results %>% filter(groundtype == 1)
gt2 <- results %>% filter(groundtype == 2)
gt3 <- results %>% filter(groundtype == 3)
interventions <- c('softpads120', 'usp100', 'usp250', 'mat27')
item <-
c(
'within 200m',
'within 80m',
'<30dB(A)',
'>=30dB(A)',
'>=30 <35dB(A)',
'>=35 <40dB(A)',
'>=40 <45dB(A)',
'>=45 <50dB(A)',
'>=50 <55dB(A)',
'>=55 <60dB(A)',
'net dB(A) change (30dB and above)',
'value(£)2002'
)
### gt1
summary_gt1 <- as.data.frame(item, stringsAsFactors = FALSE)
summary_gt1[1, "reference"] <- sum(gt1$pop)
summary_gt1[2, "reference"] <- gt1 %>% filter(distance <= 80) %>%
summarise(sum(pop))
summary_gt1[3, "reference"] <- gt1 %>% filter(reference < 30) %>%
summarise(sum(pop))
summary_gt1[4, "reference"] <-
gt1 %>% filter(reference >= 30) %>%
summarise(sum(pop))
summary_gt1[5, "reference"] <-
gt1 %>% filter(reference >= 30 & reference < 35) %>%
summarise(sum(pop))
summary_gt1[6, "reference"] <-
gt1 %>% filter(reference >= 35 & reference < 40) %>%
summarise(sum(pop))
summary_gt1[7, "reference"] <-
gt1 %>% filter(reference >= 40 & reference < 45) %>%
summarise(sum(pop))
summary_gt1[8, "reference"] <-
gt1 %>% filter(reference >= 45 & reference < 50) %>%
summarise(sum(pop))
summary_gt1[9, "reference"] <-
gt1 %>% filter(reference >= 50 & reference < 55) %>%
summarise(sum(pop))
summary_gt1[10, "reference"] <-
gt1 %>% filter(reference >= 55 & reference < 60) %>%
summarise(sum(pop))
# This code is to determine the change in total noise level (increase or decrease) that occurs
# in each of the noise band. In this case the highest band predicted was 55-60, so don't need to
# consider beyond that.
# There really must be a better way of calculating this - perhaps ask a computer scientist!
for (name in interventions) {
temp <-
gt1 %>% select(pop, distance, reference, intervention = !!name)
# process decrease or no change
for (i in 1:nrow(temp)) {
ref <- temp$reference[i]
int <- temp$intervention[i]
# Only where a decrease or no change - intervention below reference
# Only interested where starting point is >=30
if (ref >= 30 & int <= ref) {
# if level is below 30 after intervention
# set level to 30 - not interested in improvements below 30
int <- ifelse(int >= 30, int, 30)
dif <- int - ref
if (ref >= 55 & int < 35) {
a <- 55 - ref
b <- -5
c <- -5
d <- -5
e <- -5
f <- int - 35
} else if (ref >= 55 & int >= 35 & int < 40) {
a <- 55 - ref
b <- -5
c <- -5
d <- -5
e <- int - 40
f <- 0
} else if (ref >= 55 & int >= 40 & int < 45) {
a <- 55 - ref
b <- -5
c <- -5
d <- int - 45
e <- 0
f <- 0
} else if (ref >= 55 & int >= 45 & int < 50) {
a <- 55 - ref
b <- -5
c <- int - 50
d <- 0
e <- 0
f <- 0
} else if (ref >= 55 & int >= 50 & int < 55) {
a <- 55 - ref
b <- int - 55
c <- 0
d <- 0
e <- 0
f <- 0
} else if (ref >= 55 & int >= 55 & int < 60) {
a <- int - ref
b <- 0
c <- 0
d <- 0
e <- 0
f <- 0
} else if (ref >= 50 & int < 35) {
a <- 0
b <- 50 - ref
c <- -5
d <- -5
e <- -5
f <- int - 35
} else if (ref >= 50 & int >= 35 & int < 40) {
a <- 0
b <- 50 - ref
c <- -5
d <- -5
e <- int - 40
f <- 0
} else if (ref >= 50 & int >= 40 & int < 45) {
a <- 0
b <- 50 - ref
c <- -5
d <- int - 45
e <- 0
f <- 0
} else if (ref >= 50 & int >= 45 & int < 50) {
a <- 0
b <- 50 - ref
c <- int - 50
d <- 0
e <- 0
f <- 0
} else if (ref >= 50 & int >= 50 & int < 55) {
a <- 0
b <- int - ref
c <- 0
d <- 0
e <- 0
f <- 0
} else if (ref >= 45 & int < 35) {
a <- 0
b <- 0
c <- 45 - ref
d <- -5
e <- -5
f <- int - 35
} else if (ref >= 45 & int >= 35 & int < 40) {
a <- 0
b <- 0
c <- 45 - ref
d <- -5
e <- int - 40
f <- 0
} else if (ref >= 45 & int >= 40 & int < 45) {
a <- 0
b <- 0
c <- 45 - ref
d <- int - 45
e <- 0
f <- 0
} else if (ref >= 45 & int >= 45 & int < 50) {
a <- 0
b <- 0
c <- int - ref
d <- 0
e <- 0
f <- 0
} else if (ref >= 40 & int < 35) {
a <- 0
b <- 0
c <- 0
d <- 40 - ref
e <- -5
f <- int - 35
} else if (ref >= 40 & int >= 35 & int < 40) {
a <- 0
b <- 0
c <- 0
d <- 40 - ref
e <- int - 40
f <- 0
} else if (ref >= 40 & int >= 40 & int < 45) {
a <- 0
b <- 0
c <- 0
d <- int - ref
e <- 0
f <- 0
} else if (ref >= 35 & int < 35) {
a <- 0
b <- 0
c <- 0
d <- 0
e <- 35 - ref
f <- int - 35
} else if (ref >= 35 & int >= 35 & int < 40) {
a <- 0
b <- 0
c <- 0
d <- 0
e <- int - ref
f <- 0
} else {
a <- 0
b <- 0
c <- 0
d <- 0
e <- 0
f <- int - ref
}
temp[i, "dif"] <- dif
temp[i, "t_60"] <- a
temp[i, "t_55"] <- b
temp[i, "t_50"] <- c
temp[i, "t_45"] <- d
temp[i, "t_40"] <- e
temp[i, "t_35"] <- f
temp[i, "check"] <- isTRUE(dif == a + b + c + d + e + f)
}
# process increases
# only interested where level is higher after intervention
# and only interested where level after intervention is >=30
if (int >= 30 & int > ref) {
# if the reference level is below 30 then raise it up to 30
# we are not interested in increases happening below 30
ref <- ifelse(ref >= 30, ref, 30)
dif <- int - ref
if (int >= 55 & ref < 35) {
a <- int - 55
b <- 5
c <- 5
d <- 5
e <- 5
f <- 35 - ref
} else if (int >= 55 & ref >= 35 & ref < 40) {
a <- int - 55
b <- 5
c <- 5
d <- 5
e <- 40 - ref
f <- 0
} else if (int >= 55 & ref >= 40 & ref < 45) {
a <- int - 55
b <- 5
c <- 5
d <- 45 - ref
e <- 0
f <- 0
} else if (int >= 55 & ref >= 45 & ref < 50) {
a <- int - 55
b <- 5
c <- 50 - ref
d <- 0
e <- 0
f <- 0
} else if (int >= 55 & ref >= 50 & ref < 55) {
a <- int - 55
b <- 55 - ref
c <- 0
d <- 0
e <- 0
f <- 0
} else if (int >= 55 & ref >= 55 & ref < 60) {
a <- int - ref
b <- 0
c <- 0
d <- 0
e <- 0
f <- 0
} else if (int >= 50 & ref < 35) {
a <- 0
b <- int - 50
c <- 5
d <- 5
e <- 5
f <- 35 - ref
} else if (int >= 50 & ref >= 35 & ref < 40) {
a <- 0
b <- int - 50
c <- 5
d <- 5
e <- 40 - ref
f <- 0
} else if (int >= 50 & ref >= 40 & ref < 45) {
a <- 0
b <- int - 50
c <- 5
d <- 45 - ref
e <- 0
f <- 0
} else if (int >= 50 & ref >= 45 & ref < 50) {
a <- 0
b <- int - 50
c <- 50 - ref
d <- 0
e <- 0
f <- 0
} else if (int >= 50 & ref >= 50 & ref < 55) {
a <- 0
b <- int - ref
c <- 0
d <- 0
e <- 0
f <- 0
} else if (int >= 45 & ref < 35) {
a <- 0
b <- 0
c <- int - 45
d <- 5
e <- 5
f <- 35 - ref
} else if (int >= 45 & ref >= 35 & ref < 40) {
a <- 0
b <- 0
c <- int - 45
d <- 5
e <- 40 - ref
f <- 0
} else if (int >= 45 & ref >= 40 & ref < 45) {
a <- 0
b <- 0
c <- int - 45
d <- 45 - ref
e <- 0
f <- 0
} else if (int >= 45 & ref >= 45 & ref < 50) {
a <- 0
b <- 0
c <- int - ref
d <- 0
e <- 0
f <- 0
} else if (int >= 40 & ref < 35) {
a <- 0
b <- 0
c <- 0
d <- int - 40
e <- 5
f <- 35 - ref
} else if (int >= 40 & ref >= 35 & ref < 40) {
a <- 0
b <- 0
c <- 0
d <- int - 40
e <- 40 - ref
f <- 0
} else if (int >= 40 & ref >= 40 & ref < 45) {
a <- 0
b <- 0
c <- 0
d <- int - ref
e <- 0
f <- 0
} else if (int >= 35 & ref < 35) {
a <- 0
b <- 0
c <- 0
d <- 0
e <- int - 35
f <- 35 - ref
} else if (int >= 35 & ref >= 35 & ref < 40) {
a <- 0
b <- 0
c <- 0
d <- 0
e <- int - ref
f <- 0
} else {
a <- 0
b <- 0
c <- 0
d <- 0
e <- 0
f <- int - ref
}
temp[i, "dif"] <- dif
temp[i, "t_60"] <- a
temp[i, "t_55"] <- b
temp[i, "t_50"] <- c
temp[i, "t_45"] <- d
temp[i, "t_40"] <- e
temp[i, "t_35"] <- f
temp[i, "check"] <- isTRUE(dif == a + b + c + d + e + f)
if (nrow(filter(temp, check == FALSE)) > 0) {
warning(paste("error in ", name))
}
}
}
temp <-
temp %>% mutate(value = (t_35 * 5.8 * pop) + (t_40 * 11.4 * pop) + (t_45 * 17.0 * pop) + (t_50 * 22.6 * pop) + (t_55 * 28.1 * pop) + (t_60 * 33.7 * pop))
summary_gt1[1, paste(name)] <- sum(temp$pop)
summary_gt1[2, paste(name)] <- temp %>% filter(distance <= 80) %>%
summarise(sum(pop))
summary_gt1[3, paste(name)] <-
temp %>% filter(intervention < 30) %>%
summarise(sum(pop))
summary_gt1[4, paste(name)] <-
temp %>% filter(intervention >= 30) %>%
summarise(sum(pop))
summary_gt1[5, paste(name)] <-
temp %>% filter(intervention >= 30 & intervention < 35) %>%
summarise(sum(pop))
summary_gt1[6, paste(name)] <-
temp %>% filter(intervention >= 35 & intervention < 40) %>%
summarise(sum(pop))
summary_gt1[7, paste(name)] <-
temp %>% filter(intervention >= 40 & intervention < 45) %>%
summarise(sum(pop))
summary_gt1[8, paste(name)] <-
temp %>% filter(intervention >= 45 & intervention < 50) %>%
summarise(sum(pop))
summary_gt1[9, paste(name)] <-
temp %>% filter(intervention >= 50 & intervention < 55) %>%
summarise(sum(pop))
summary_gt1[10, paste(name)] <-
temp %>% filter(intervention >= 55 & intervention < 60) %>%
summarise(sum(pop))
summary_gt1[11, paste(name)] <-
temp %>% summarise(sum(dif * pop, na.rm = TRUE))
summary_gt1[12, paste(name)] <-
temp %>% summarise(sum(value, na.rm = TRUE))
}
summary_gt1 <- summary_gt1 %>% mutate_if(is.numeric, round, 2)
write.csv(file = "../qgis/revised/summary_gt1_new.csv", summary_gt1)
### gt2
summary_gt2 <- as.data.frame(item, stringsAsFactors = FALSE)
summary_gt2[1, "reference"] <- sum(gt2$pop)
summary_gt2[2, "reference"] <- gt2 %>% filter(distance <= 80) %>%
summarise(sum(pop))
summary_gt2[3, "reference"] <- gt2 %>% filter(reference < 30) %>%
summarise(sum(pop))
summary_gt2[4, "reference"] <-
gt2 %>% filter(reference >= 30) %>%
summarise(sum(pop))
summary_gt2[5, "reference"] <-
gt2 %>% filter(reference >= 30 & reference < 35) %>%
summarise(sum(pop))
summary_gt2[6, "reference"] <-
gt2 %>% filter(reference >= 35 & reference < 40) %>%
summarise(sum(pop))
summary_gt2[7, "reference"] <-
gt2 %>% filter(reference >= 40 & reference < 45) %>%
summarise(sum(pop))
summary_gt2[8, "reference"] <-
gt2 %>% filter(reference >= 45 & reference < 50) %>%
summarise(sum(pop))
summary_gt2[9, "reference"] <-
gt2 %>% filter(reference >= 50 & reference < 55) %>%
summarise(sum(pop))
summary_gt2[10, "reference"] <-
gt2 %>% filter(reference >= 55 & reference < 60) %>%
summarise(sum(pop))
for (name in interventions) {
temp <-
gt2 %>% select(pop, distance, reference, intervention = !!name)
# process decrease or no change
for (i in 1:nrow(temp)) {
ref <- temp$reference[i]
int <- temp$intervention[i]
# Only where a decrease or no change - intervention below reference
# Only interested where starting point is >=30
if (ref >= 30 & int <= ref) {
# if level is below 30 after intervention
# set level to 30 - not interested in improvements below 30
int <- ifelse(int >= 30, int, 30)
dif <- int - ref
if (ref >= 55 & int < 35) {
a <- 55 - ref
b <- -5
c <- -5
d <- -5
e <- -5
f <- int - 35
} else if (ref >= 55 & int >= 35 & int < 40) {
a <- 55 - ref
b <- -5
c <- -5
d <- -5
e <- int - 40
f <- 0
} else if (ref >= 55 & int >= 40 & int < 45) {
a <- 55 - ref
b <- -5
c <- -5
d <- int - 45
e <- 0
f <- 0
} else if (ref >= 55 & int >= 45 & int < 50) {
a <- 55 - ref
b <- -5
c <- int - 50
d <- 0
e <- 0
f <- 0
} else if (ref >= 55 & int >= 50 & int < 55) {
a <- 55 - ref
b <- int - 55
c <- 0
d <- 0
e <- 0
f <- 0
} else if (ref >= 55 & int >= 55 & int < 60) {
a <- int - ref
b <- 0
c <- 0
d <- 0
e <- 0
f <- 0
} else if (ref >= 50 & int < 35) {
a <- 0
b <- 50 - ref
c <- -5
d <- -5
e <- -5
f <- int - 35
} else if (ref >= 50 & int >= 35 & int < 40) {
a <- 0
b <- 50 - ref
c <- -5
d <- -5
e <- int - 40
f <- 0
} else if (ref >= 50 & int >= 40 & int < 45) {
a <- 0
b <- 50 - ref
c <- -5
d <- int - 45
e <- 0
f <- 0
} else if (ref >= 50 & int >= 45 & int < 50) {
a <- 0
b <- 50 - ref
c <- int - 50
d <- 0
e <- 0
f <- 0
} else if (ref >= 50 & int >= 50 & int < 55) {
a <- 0
b <- int - ref
c <- 0
d <- 0
e <- 0
f <- 0
} else if (ref >= 45 & int < 35) {
a <- 0
b <- 0
c <- 45 - ref
d <- -5
e <- -5
f <- int - 35
} else if (ref >= 45 & int >= 35 & int < 40) {
a <- 0
b <- 0
c <- 45 - ref
d <- -5
e <- int - 40
f <- 0
} else if (ref >= 45 & int >= 40 & int < 45) {
a <- 0
b <- 0
c <- 45 - ref
d <- int - 45
e <- 0
f <- 0
} else if (ref >= 45 & int >= 45 & int < 50) {
a <- 0
b <- 0
c <- int - ref
d <- 0
e <- 0
f <- 0
} else if (ref >= 40 & int < 35) {
a <- 0
b <- 0
c <- 0
d <- 40 - ref
e <- -5
f <- int - 35
} else if (ref >= 40 & int >= 35 & int < 40) {
a <- 0
b <- 0
c <- 0
d <- 40 - ref
e <- int - 40
f <- 0
} else if (ref >= 40 & int >= 40 & int < 45) {
a <- 0
b <- 0
c <- 0
d <- int - ref
e <- 0
f <- 0
} else if (ref >= 35 & int < 35) {
a <- 0
b <- 0
c <- 0
d <- 0
e <- 35 - ref
f <- int - 35
} else if (ref >= 35 & int >= 35 & int < 40) {
a <- 0
b <- 0
c <- 0
d <- 0
e <- int - ref
f <- 0
} else {
a <- 0
b <- 0
c <- 0
d <- 0
e <- 0
f <- int - ref
}
temp[i, "dif"] <- dif
temp[i, "t_60"] <- a
temp[i, "t_55"] <- b
temp[i, "t_50"] <- c
temp[i, "t_45"] <- d
temp[i, "t_40"] <- e
temp[i, "t_35"] <- f
temp[i, "check"] <- isTRUE(dif == a + b + c + d + e + f)
}
# process increases
# only interested where level is higher after intervention
# and only interested where level after intervention is >=30
if (int >= 30 & int > ref) {
# if the reference level is below 30 then raise it up to 30
# we are not interested in increases happening below 30
ref <- ifelse(ref >= 30, ref, 30)
dif <- int - ref
if (int >= 55 & ref < 35) {
a <- int - 55
b <- 5
c <- 5
d <- 5
e <- 5
f <- 35 - ref
} else if (int >= 55 & ref >= 35 & ref < 40) {
a <- int - 55
b <- 5
c <- 5
d <- 5
e <- 40 - ref
f <- 0
} else if (int >= 55 & ref >= 40 & ref < 45) {
a <- int - 55
b <- 5
c <- 5
d <- 45 - ref
e <- 0
f <- 0
} else if (int >= 55 & ref >= 45 & ref < 50) {
a <- int - 55
b <- 5
c <- 50 - ref
d <- 0
e <- 0
f <- 0
} else if (int >= 55 & ref >= 50 & ref < 55) {
a <- int - 55
b <- 55 - ref
c <- 0
d <- 0
e <- 0
f <- 0
} else if (int >= 55 & ref >= 55 & ref < 60) {
a <- int - ref
b <- 0
c <- 0
d <- 0
e <- 0
f <- 0
} else if (int >= 50 & ref < 35) {
a <- 0
b <- int - 50
c <- 5
d <- 5
e <- 5
f <- 35 - ref
} else if (int >= 50 & ref >= 35 & ref < 40) {
a <- 0
b <- int - 50
c <- 5
d <- 5
e <- 40 - ref
f <- 0
} else if (int >= 50 & ref >= 40 & ref < 45) {
a <- 0
b <- int - 50
c <- 5
d <- 45 - ref
e <- 0
f <- 0
} else if (int >= 50 & ref >= 45 & ref < 50) {
a <- 0
b <- int - 50
c <- 50 - ref
d <- 0
e <- 0
f <- 0
} else if (int >= 50 & ref >= 50 & ref < 55) {
a <- 0
b <- int - ref
c <- 0
d <- 0
e <- 0
f <- 0
} else if (int >= 45 & ref < 35) {
a <- 0
b <- 0
c <- int - 45
d <- 5
e <- 5
f <- 35 - ref
} else if (int >= 45 & ref >= 35 & ref < 40) {
a <- 0
b <- 0
c <- int - 45
d <- 5
e <- 40 - ref
f <- 0
} else if (int >= 45 & ref >= 40 & ref < 45) {
a <- 0
b <- 0
c <- int - 45
d <- 45 - ref
e <- 0
f <- 0
} else if (int >= 45 & ref >= 45 & ref < 50) {
a <- 0
b <- 0
c <- int - ref
d <- 0
e <- 0
f <- 0
} else if (int >= 40 & ref < 35) {
a <- 0
b <- 0
c <- 0
d <- int - 40
e <- 5
f <- 35 - ref
} else if (int >= 40 & ref >= 35 & ref < 40) {
a <- 0
b <- 0
c <- 0
d <- int - 40
e <- 40 - ref
f <- 0
} else if (int >= 40 & ref >= 40 & ref < 45) {
a <- 0
b <- 0
c <- 0
d <- int - ref
e <- 0
f <- 0
} else if (int >= 35 & ref < 35) {
a <- 0
b <- 0
c <- 0
d <- 0
e <- int - 35
f <- 35 - ref
} else if (int >= 35 & ref >= 35 & ref < 40) {
a <- 0
b <- 0
c <- 0
d <- 0
e <- int - ref
f <- 0
} else {
a <- 0
b <- 0
c <- 0
d <- 0
e <- 0
f <- int - ref
}
temp[i, "dif"] <- dif
temp[i, "t_60"] <- a
temp[i, "t_55"] <- b
temp[i, "t_50"] <- c
temp[i, "t_45"] <- d
temp[i, "t_40"] <- e
temp[i, "t_35"] <- f
temp[i, "check"] <- isTRUE(dif == a + b + c + d + e + f)
if (nrow(filter(temp, check == FALSE)) > 0) {
warning(paste("error in ", name))
}
}
}
temp <-
temp %>% mutate(value = (t_35 * 5.8 * pop) + (t_40 * 11.4 * pop) + (t_45 * 17.0 * pop) + (t_50 * 22.6 * pop) + (t_55 * 28.1 * pop) + (t_60 * 33.7 * pop))
summary_gt2[1, paste(name)] <- sum(temp$pop)
summary_gt2[2, paste(name)] <- temp %>% filter(distance <= 80) %>%
summarise(sum(pop))
summary_gt2[3, paste(name)] <-
temp %>% filter(intervention < 30) %>%
summarise(sum(pop))
summary_gt2[4, paste(name)] <-
temp %>% filter(intervention >= 30) %>%
summarise(sum(pop))
summary_gt2[5, paste(name)] <-
temp %>% filter(intervention >= 30 & intervention < 35) %>%
summarise(sum(pop))
summary_gt2[6, paste(name)] <-
temp %>% filter(intervention >= 35 & intervention < 40) %>%
summarise(sum(pop))
summary_gt2[7, paste(name)] <-
temp %>% filter(intervention >= 40 & intervention < 45) %>%
summarise(sum(pop))
summary_gt2[8, paste(name)] <-
temp %>% filter(intervention >= 45 & intervention < 50) %>%
summarise(sum(pop))
summary_gt2[9, paste(name)] <-
temp %>% filter(intervention >= 50 & intervention < 55) %>%
summarise(sum(pop))
summary_gt2[10, paste(name)] <-
temp %>% filter(intervention >= 55 & intervention < 60) %>%
summarise(sum(pop))
summary_gt2[11, paste(name)] <-
temp %>% summarise(sum(dif * pop, na.rm = TRUE))
summary_gt2[12, paste(name)] <-
temp %>% summarise(sum(value, na.rm = TRUE))
}
summary_gt2 <- summary_gt2 %>% mutate_if(is.numeric, round, 2)
write.csv(file = "../qgis/revised/summary_gt2_new.csv", summary_gt2)
## gt3
summary_gt3 <- as.data.frame(item, stringsAsFactors = FALSE)
summary_gt3[1, "reference"] <- sum(gt3$pop)
summary_gt3[2, "reference"] <- gt3 %>% filter(distance <= 80) %>%
summarise(sum(pop))
summary_gt3[3, "reference"] <- gt3 %>% filter(reference < 30) %>%
summarise(sum(pop))
summary_gt3[4, "reference"] <-
gt3 %>% filter(reference >= 30) %>%
summarise(sum(pop))
summary_gt3[5, "reference"] <-
gt3 %>% filter(reference >= 30 & reference < 35) %>%
summarise(sum(pop))
summary_gt3[6, "reference"] <-
gt3 %>% filter(reference >= 35 & reference < 40) %>%
summarise(sum(pop))
summary_gt3[7, "reference"] <-
gt3 %>% filter(reference >= 40 & reference < 45) %>%
summarise(sum(pop))
summary_gt3[8, "reference"] <-
gt3 %>% filter(reference >= 45 & reference < 50) %>%
summarise(sum(pop))
summary_gt3[9, "reference"] <-
gt3 %>% filter(reference >= 50 & reference < 55) %>%
summarise(sum(pop))
summary_gt3[10, "reference"] <-
gt3 %>% filter(reference >= 55 & reference < 60) %>%
summarise(sum(pop))
for (name in interventions) {
temp <-
gt3 %>% select(pop, distance, reference, intervention = !!name)
# process decrease or no change
for (i in 1:nrow(temp)) {
ref <- temp$reference[i]
int <- temp$intervention[i]
# Only where a decrease or no change - intervention below reference
# Only interested where starting point is >=30
if (ref >= 30 & int <= ref) {
# if level is below 30 after intervention
# set level to 30 - not interested in improvements below 30
int <- ifelse(int >= 30, int, 30)
dif <- int - ref
if (ref >= 55 & int < 35) {
a <- 55 - ref
b <- -5
c <- -5
d <- -5
e <- -5
f <- int - 35
} else if (ref >= 55 & int >= 35 & int < 40) {
a <- 55 - ref
b <- -5
c <- -5
d <- -5
e <- int - 40
f <- 0
} else if (ref >= 55 & int >= 40 & int < 45) {
a <- 55 - ref
b <- -5
c <- -5
d <- int - 45
e <- 0
f <- 0
} else if (ref >= 55 & int >= 45 & int < 50) {
a <- 55 - ref
b <- -5
c <- int - 50
d <- 0
e <- 0
f <- 0
} else if (ref >= 55 & int >= 50 & int < 55) {
a <- 55 - ref
b <- int - 55
c <- 0
d <- 0
e <- 0
f <- 0
} else if (ref >= 55 & int >= 55 & int < 60) {
a <- int - ref
b <- 0
c <- 0
d <- 0
e <- 0
f <- 0
} else if (ref >= 50 & int < 35) {
a <- 0
b <- 50 - ref
c <- -5
d <- -5
e <- -5
f <- int - 35
} else if (ref >= 50 & int >= 35 & int < 40) {
a <- 0
b <- 50 - ref
c <- -5
d <- -5
e <- int - 40
f <- 0
} else if (ref >= 50 & int >= 40 & int < 45) {
a <- 0
b <- 50 - ref
c <- -5
d <- int - 45
e <- 0
f <- 0
} else if (ref >= 50 & int >= 45 & int < 50) {
a <- 0
b <- 50 - ref
c <- int - 50
d <- 0
e <- 0
f <- 0
} else if (ref >= 50 & int >= 50 & int < 55) {
a <- 0
b <- int - ref
c <- 0
d <- 0
e <- 0
f <- 0
} else if (ref >= 45 & int < 35) {
a <- 0
b <- 0
c <- 45 - ref
d <- -5
e <- -5
f <- int - 35
} else if (ref >= 45 & int >= 35 & int < 40) {
a <- 0
b <- 0
c <- 45 - ref
d <- -5
e <- int - 40
f <- 0
} else if (ref >= 45 & int >= 40 & int < 45) {
a <- 0
b <- 0
c <- 45 - ref
d <- int - 45
e <- 0
f <- 0
} else if (ref >= 45 & int >= 45 & int < 50) {
a <- 0
b <- 0
c <- int - ref
d <- 0
e <- 0
f <- 0
} else if (ref >= 40 & int < 35) {
a <- 0
b <- 0
c <- 0
d <- 40 - ref
e <- -5
f <- int - 35
} else if (ref >= 40 & int >= 35 & int < 40) {
a <- 0
b <- 0
c <- 0
d <- 40 - ref
e <- int - 40
f <- 0
} else if (ref >= 40 & int >= 40 & int < 45) {
a <- 0
b <- 0
c <- 0
d <- int - ref
e <- 0
f <- 0
} else if (ref >= 35 & int < 35) {
a <- 0
b <- 0
c <- 0
d <- 0
e <- 35 - ref
f <- int - 35
} else if (ref >= 35 & int >= 35 & int < 40) {
a <- 0
b <- 0
c <- 0
d <- 0
e <- int - ref
f <- 0
} else {
a <- 0
b <- 0
c <- 0
d <- 0
e <- 0
f <- int - ref
}
temp[i, "dif"] <- dif
temp[i, "t_60"] <- a
temp[i, "t_55"] <- b
temp[i, "t_50"] <- c
temp[i, "t_45"] <- d
temp[i, "t_40"] <- e
temp[i, "t_35"] <- f
temp[i, "check"] <- isTRUE(dif == a + b + c + d + e + f)
}
# process increases
# only interested where level is higher after intervention
# and only interested where level after intervention is >=30
if (int >= 30 & int > ref) {
# if the reference level is below 30 then raise it up to 30
# we are not interested in increases happening below 30
ref <- ifelse(ref >= 30, ref, 30)
dif <- int - ref
if (int >= 55 & ref < 35) {
a <- int - 55
b <- 5
c <- 5
d <- 5
e <- 5
f <- 35 - ref
} else if (int >= 55 & ref >= 35 & ref < 40) {
a <- int - 55
b <- 5
c <- 5
d <- 5
e <- 40 - ref
f <- 0
} else if (int >= 55 & ref >= 40 & ref < 45) {
a <- int - 55
b <- 5
c <- 5
d <- 45 - ref
e <- 0
f <- 0
} else if (int >= 55 & ref >= 45 & ref < 50) {
a <- int - 55
b <- 5
c <- 50 - ref
d <- 0
e <- 0
f <- 0
} else if (int >= 55 & ref >= 50 & ref < 55) {
a <- int - 55
b <- 55 - ref
c <- 0
d <- 0
e <- 0
f <- 0
} else if (int >= 55 & ref >= 55 & ref < 60) {
a <- int - ref
b <- 0
c <- 0
d <- 0
e <- 0
f <- 0
} else if (int >= 50 & ref < 35) {
a <- 0
b <- int - 50
c <- 5
d <- 5
e <- 5
f <- 35 - ref
} else if (int >= 50 & ref >= 35 & ref < 40) {
a <- 0
b <- int - 50
c <- 5
d <- 5
e <- 40 - ref
f <- 0
} else if (int >= 50 & ref >= 40 & ref < 45) {
a <- 0
b <- int - 50
c <- 5
d <- 45 - ref
e <- 0
f <- 0
} else if (int >= 50 & ref >= 45 & ref < 50) {
a <- 0
b <- int - 50
c <- 50 - ref
d <- 0
e <- 0
f <- 0
} else if (int >= 50 & ref >= 50 & ref < 55) {
a <- 0
b <- int - ref
c <- 0
d <- 0
e <- 0
f <- 0
} else if (int >= 45 & ref < 35) {
a <- 0
b <- 0
c <- int - 45
d <- 5
e <- 5
f <- 35 - ref
} else if (int >= 45 & ref >= 35 & ref < 40) {
a <- 0
b <- 0
c <- int - 45
d <- 5
e <- 40 - ref
f <- 0
} else if (int >= 45 & ref >= 40 & ref < 45) {
a <- 0
b <- 0
c <- int - 45
d <- 45 - ref
e <- 0
f <- 0
} else if (int >= 45 & ref >= 45 & ref < 50) {
a <- 0
b <- 0
c <- int - ref
d <- 0
e <- 0
f <- 0
} else if (int >= 40 & ref < 35) {
a <- 0
b <- 0
c <- 0
d <- int - 40
e <- 5
f <- 35 - ref
} else if (int >= 40 & ref >= 35 & ref < 40) {
a <- 0
b <- 0
c <- 0
d <- int - 40
e <- 40 - ref
f <- 0
} else if (int >= 40 & ref >= 40 & ref < 45) {
a <- 0
b <- 0
c <- 0
d <- int - ref
e <- 0
f <- 0
} else if (int >= 35 & ref < 35) {
a <- 0
b <- 0
c <- 0
d <- 0
e <- int - 35
f <- 35 - ref
} else if (int >= 35 & ref >= 35 & ref < 40) {
a <- 0
b <- 0
c <- 0
d <- 0
e <- int - ref
f <- 0
} else {
a <- 0
b <- 0
c <- 0
d <- 0
e <- 0
f <- int - ref
}
temp[i, "dif"] <- dif
temp[i, "t_60"] <- a
temp[i, "t_55"] <- b
temp[i, "t_50"] <- c
temp[i, "t_45"] <- d
temp[i, "t_40"] <- e
temp[i, "t_35"] <- f
temp[i, "check"] <- isTRUE(dif == a + b + c + d + e + f)
if (nrow(filter(temp, check == FALSE)) > 0) {
warning(paste("error in ", name))
}
}
}
temp <-
temp %>% mutate(value = (t_35 * 5.8 * pop) + (t_40 * 11.4 * pop) + (t_45 * 17.0 * pop) + (t_50 * 22.6 * pop) + (t_55 * 28.1 * pop) + (t_60 * 33.7 * pop))
summary_gt3[1, paste(name)] <- sum(temp$pop)
summary_gt3[2, paste(name)] <- temp %>% filter(distance <= 80) %>%
summarise(sum(pop))
summary_gt3[3, paste(name)] <-
temp %>% filter(intervention < 30) %>%
summarise(sum(pop))
summary_gt3[4, paste(name)] <-
temp %>% filter(intervention >= 30) %>%
summarise(sum(pop))
summary_gt3[5, paste(name)] <-
temp %>% filter(intervention >= 30 & intervention < 35) %>%
summarise(sum(pop))
summary_gt3[6, paste(name)] <-
temp %>% filter(intervention >= 35 & intervention < 40) %>%
summarise(sum(pop))
summary_gt3[7, paste(name)] <-
temp %>% filter(intervention >= 40 & intervention < 45) %>%
summarise(sum(pop))
summary_gt3[8, paste(name)] <-
temp %>% filter(intervention >= 45 & intervention < 50) %>%
summarise(sum(pop))
summary_gt3[9, paste(name)] <-
temp %>% filter(intervention >= 50 & intervention < 55) %>%
summarise(sum(pop))
summary_gt3[10, paste(name)] <-
temp %>% filter(intervention >= 55 & intervention < 60) %>%
summarise(sum(pop))
summary_gt3[11, paste(name)] <-
temp %>% summarise(sum(dif * pop, na.rm = TRUE))
summary_gt3[12, paste(name)] <-
temp %>% summarise(sum(value, na.rm = TRUE))
}
summary_gt3 <- summary_gt3 %>% mutate_if(is.numeric, round, 2)
write.csv(file = "../qgis/revised/summary_gt3_new.csv", summary_gt3) | /r-scripts/results_analysis.R | no_license | marcusyoung/qgis-noise-models | R | false | false | 32,696 | r | library(dplyr)
library(readr)
results <-
read_csv("C:/Users/marcu/TRG/t2f/Analysis/qgis/revised/results.csv")
gt1 <- results %>% filter(groundtype == 1)
gt2 <- results %>% filter(groundtype == 2)
gt3 <- results %>% filter(groundtype == 3)
interventions <- c('softpads120', 'usp100', 'usp250', 'mat27')
item <-
c(
'within 200m',
'within 80m',
'<30dB(A)',
'>=30dB(A)',
'>=30 <35dB(A)',
'>=35 <40dB(A)',
'>=40 <45dB(A)',
'>=45 <50dB(A)',
'>=50 <55dB(A)',
'>=55 <60dB(A)',
'net dB(A) change (30dB and above)',
'value(£)2002'
)
### gt1
summary_gt1 <- as.data.frame(item, stringsAsFactors = FALSE)
summary_gt1[1, "reference"] <- sum(gt1$pop)
summary_gt1[2, "reference"] <- gt1 %>% filter(distance <= 80) %>%
summarise(sum(pop))
summary_gt1[3, "reference"] <- gt1 %>% filter(reference < 30) %>%
summarise(sum(pop))
summary_gt1[4, "reference"] <-
gt1 %>% filter(reference >= 30) %>%
summarise(sum(pop))
summary_gt1[5, "reference"] <-
gt1 %>% filter(reference >= 30 & reference < 35) %>%
summarise(sum(pop))
summary_gt1[6, "reference"] <-
gt1 %>% filter(reference >= 35 & reference < 40) %>%
summarise(sum(pop))
summary_gt1[7, "reference"] <-
gt1 %>% filter(reference >= 40 & reference < 45) %>%
summarise(sum(pop))
summary_gt1[8, "reference"] <-
gt1 %>% filter(reference >= 45 & reference < 50) %>%
summarise(sum(pop))
summary_gt1[9, "reference"] <-
gt1 %>% filter(reference >= 50 & reference < 55) %>%
summarise(sum(pop))
summary_gt1[10, "reference"] <-
gt1 %>% filter(reference >= 55 & reference < 60) %>%
summarise(sum(pop))
# This code is to determine the change in total noise level (increase or decrease) that occurs
# in each of the noise band. In this case the highest band predicted was 55-60, so don't need to
# consider beyond that.
# There really must be a better way of calculating this - perhaps ask a computer scientist!
for (name in interventions) {
temp <-
gt1 %>% select(pop, distance, reference, intervention = !!name)
# process decrease or no change
for (i in 1:nrow(temp)) {
ref <- temp$reference[i]
int <- temp$intervention[i]
# Only where a decrease or no change - intervention below reference
# Only interested where starting point is >=30
if (ref >= 30 & int <= ref) {
# if level is below 30 after intervention
# set level to 30 - not interested in improvements below 30
int <- ifelse(int >= 30, int, 30)
dif <- int - ref
if (ref >= 55 & int < 35) {
a <- 55 - ref
b <- -5
c <- -5
d <- -5
e <- -5
f <- int - 35
} else if (ref >= 55 & int >= 35 & int < 40) {
a <- 55 - ref
b <- -5
c <- -5
d <- -5
e <- int - 40
f <- 0
} else if (ref >= 55 & int >= 40 & int < 45) {
a <- 55 - ref
b <- -5
c <- -5
d <- int - 45
e <- 0
f <- 0
} else if (ref >= 55 & int >= 45 & int < 50) {
a <- 55 - ref
b <- -5
c <- int - 50
d <- 0
e <- 0
f <- 0
} else if (ref >= 55 & int >= 50 & int < 55) {
a <- 55 - ref
b <- int - 55
c <- 0
d <- 0
e <- 0
f <- 0
} else if (ref >= 55 & int >= 55 & int < 60) {
a <- int - ref
b <- 0
c <- 0
d <- 0
e <- 0
f <- 0
} else if (ref >= 50 & int < 35) {
a <- 0
b <- 50 - ref
c <- -5
d <- -5
e <- -5
f <- int - 35
} else if (ref >= 50 & int >= 35 & int < 40) {
a <- 0
b <- 50 - ref
c <- -5
d <- -5
e <- int - 40
f <- 0
} else if (ref >= 50 & int >= 40 & int < 45) {
a <- 0
b <- 50 - ref
c <- -5
d <- int - 45
e <- 0
f <- 0
} else if (ref >= 50 & int >= 45 & int < 50) {
a <- 0
b <- 50 - ref
c <- int - 50
d <- 0
e <- 0
f <- 0
} else if (ref >= 50 & int >= 50 & int < 55) {
a <- 0
b <- int - ref
c <- 0
d <- 0
e <- 0
f <- 0
} else if (ref >= 45 & int < 35) {
a <- 0
b <- 0
c <- 45 - ref
d <- -5
e <- -5
f <- int - 35
} else if (ref >= 45 & int >= 35 & int < 40) {
a <- 0
b <- 0
c <- 45 - ref
d <- -5
e <- int - 40
f <- 0
} else if (ref >= 45 & int >= 40 & int < 45) {
a <- 0
b <- 0
c <- 45 - ref
d <- int - 45
e <- 0
f <- 0
} else if (ref >= 45 & int >= 45 & int < 50) {
a <- 0
b <- 0
c <- int - ref
d <- 0
e <- 0
f <- 0
} else if (ref >= 40 & int < 35) {
a <- 0
b <- 0
c <- 0
d <- 40 - ref
e <- -5
f <- int - 35
} else if (ref >= 40 & int >= 35 & int < 40) {
a <- 0
b <- 0
c <- 0
d <- 40 - ref
e <- int - 40
f <- 0
} else if (ref >= 40 & int >= 40 & int < 45) {
a <- 0
b <- 0
c <- 0
d <- int - ref
e <- 0
f <- 0
} else if (ref >= 35 & int < 35) {
a <- 0
b <- 0
c <- 0
d <- 0
e <- 35 - ref
f <- int - 35
} else if (ref >= 35 & int >= 35 & int < 40) {
a <- 0
b <- 0
c <- 0
d <- 0
e <- int - ref
f <- 0
} else {
a <- 0
b <- 0
c <- 0
d <- 0
e <- 0
f <- int - ref
}
temp[i, "dif"] <- dif
temp[i, "t_60"] <- a
temp[i, "t_55"] <- b
temp[i, "t_50"] <- c
temp[i, "t_45"] <- d
temp[i, "t_40"] <- e
temp[i, "t_35"] <- f
temp[i, "check"] <- isTRUE(dif == a + b + c + d + e + f)
}
# process increases
# only interested where level is higher after intervention
# and only interested where level after intervention is >=30
if (int >= 30 & int > ref) {
# if the reference level is below 30 then raise it up to 30
# we are not interested in increases happening below 30
ref <- ifelse(ref >= 30, ref, 30)
dif <- int - ref
if (int >= 55 & ref < 35) {
a <- int - 55
b <- 5
c <- 5
d <- 5
e <- 5
f <- 35 - ref
} else if (int >= 55 & ref >= 35 & ref < 40) {
a <- int - 55
b <- 5
c <- 5
d <- 5
e <- 40 - ref
f <- 0
} else if (int >= 55 & ref >= 40 & ref < 45) {
a <- int - 55
b <- 5
c <- 5
d <- 45 - ref
e <- 0
f <- 0
} else if (int >= 55 & ref >= 45 & ref < 50) {
a <- int - 55
b <- 5
c <- 50 - ref
d <- 0
e <- 0
f <- 0
} else if (int >= 55 & ref >= 50 & ref < 55) {
a <- int - 55
b <- 55 - ref
c <- 0
d <- 0
e <- 0
f <- 0
} else if (int >= 55 & ref >= 55 & ref < 60) {
a <- int - ref
b <- 0
c <- 0
d <- 0
e <- 0
f <- 0
} else if (int >= 50 & ref < 35) {
a <- 0
b <- int - 50
c <- 5
d <- 5
e <- 5
f <- 35 - ref
} else if (int >= 50 & ref >= 35 & ref < 40) {
a <- 0
b <- int - 50
c <- 5
d <- 5
e <- 40 - ref
f <- 0
} else if (int >= 50 & ref >= 40 & ref < 45) {
a <- 0
b <- int - 50
c <- 5
d <- 45 - ref
e <- 0
f <- 0
} else if (int >= 50 & ref >= 45 & ref < 50) {
a <- 0
b <- int - 50
c <- 50 - ref
d <- 0
e <- 0
f <- 0
} else if (int >= 50 & ref >= 50 & ref < 55) {
a <- 0
b <- int - ref
c <- 0
d <- 0
e <- 0
f <- 0
} else if (int >= 45 & ref < 35) {
a <- 0
b <- 0
c <- int - 45
d <- 5
e <- 5
f <- 35 - ref
} else if (int >= 45 & ref >= 35 & ref < 40) {
a <- 0
b <- 0
c <- int - 45
d <- 5
e <- 40 - ref
f <- 0
} else if (int >= 45 & ref >= 40 & ref < 45) {
a <- 0
b <- 0
c <- int - 45
d <- 45 - ref
e <- 0
f <- 0
} else if (int >= 45 & ref >= 45 & ref < 50) {
a <- 0
b <- 0
c <- int - ref
d <- 0
e <- 0
f <- 0
} else if (int >= 40 & ref < 35) {
a <- 0
b <- 0
c <- 0
d <- int - 40
e <- 5
f <- 35 - ref
} else if (int >= 40 & ref >= 35 & ref < 40) {
a <- 0
b <- 0
c <- 0
d <- int - 40
e <- 40 - ref
f <- 0
} else if (int >= 40 & ref >= 40 & ref < 45) {
a <- 0
b <- 0
c <- 0
d <- int - ref
e <- 0
f <- 0
} else if (int >= 35 & ref < 35) {
a <- 0
b <- 0
c <- 0
d <- 0
e <- int - 35
f <- 35 - ref
} else if (int >= 35 & ref >= 35 & ref < 40) {
a <- 0
b <- 0
c <- 0
d <- 0
e <- int - ref
f <- 0
} else {
a <- 0
b <- 0
c <- 0
d <- 0
e <- 0
f <- int - ref
}
temp[i, "dif"] <- dif
temp[i, "t_60"] <- a
temp[i, "t_55"] <- b
temp[i, "t_50"] <- c
temp[i, "t_45"] <- d
temp[i, "t_40"] <- e
temp[i, "t_35"] <- f
temp[i, "check"] <- isTRUE(dif == a + b + c + d + e + f)
if (nrow(filter(temp, check == FALSE)) > 0) {
warning(paste("error in ", name))
}
}
}
temp <-
temp %>% mutate(value = (t_35 * 5.8 * pop) + (t_40 * 11.4 * pop) + (t_45 * 17.0 * pop) + (t_50 * 22.6 * pop) + (t_55 * 28.1 * pop) + (t_60 * 33.7 * pop))
summary_gt1[1, paste(name)] <- sum(temp$pop)
summary_gt1[2, paste(name)] <- temp %>% filter(distance <= 80) %>%
summarise(sum(pop))
summary_gt1[3, paste(name)] <-
temp %>% filter(intervention < 30) %>%
summarise(sum(pop))
summary_gt1[4, paste(name)] <-
temp %>% filter(intervention >= 30) %>%
summarise(sum(pop))
summary_gt1[5, paste(name)] <-
temp %>% filter(intervention >= 30 & intervention < 35) %>%
summarise(sum(pop))
summary_gt1[6, paste(name)] <-
temp %>% filter(intervention >= 35 & intervention < 40) %>%
summarise(sum(pop))
summary_gt1[7, paste(name)] <-
temp %>% filter(intervention >= 40 & intervention < 45) %>%
summarise(sum(pop))
summary_gt1[8, paste(name)] <-
temp %>% filter(intervention >= 45 & intervention < 50) %>%
summarise(sum(pop))
summary_gt1[9, paste(name)] <-
temp %>% filter(intervention >= 50 & intervention < 55) %>%
summarise(sum(pop))
summary_gt1[10, paste(name)] <-
temp %>% filter(intervention >= 55 & intervention < 60) %>%
summarise(sum(pop))
summary_gt1[11, paste(name)] <-
temp %>% summarise(sum(dif * pop, na.rm = TRUE))
summary_gt1[12, paste(name)] <-
temp %>% summarise(sum(value, na.rm = TRUE))
}
summary_gt1 <- summary_gt1 %>% mutate_if(is.numeric, round, 2)
write.csv(file = "../qgis/revised/summary_gt1_new.csv", summary_gt1)
### gt2
summary_gt2 <- as.data.frame(item, stringsAsFactors = FALSE)
summary_gt2[1, "reference"] <- sum(gt2$pop)
summary_gt2[2, "reference"] <- gt2 %>% filter(distance <= 80) %>%
summarise(sum(pop))
summary_gt2[3, "reference"] <- gt2 %>% filter(reference < 30) %>%
summarise(sum(pop))
summary_gt2[4, "reference"] <-
gt2 %>% filter(reference >= 30) %>%
summarise(sum(pop))
summary_gt2[5, "reference"] <-
gt2 %>% filter(reference >= 30 & reference < 35) %>%
summarise(sum(pop))
summary_gt2[6, "reference"] <-
gt2 %>% filter(reference >= 35 & reference < 40) %>%
summarise(sum(pop))
summary_gt2[7, "reference"] <-
gt2 %>% filter(reference >= 40 & reference < 45) %>%
summarise(sum(pop))
summary_gt2[8, "reference"] <-
gt2 %>% filter(reference >= 45 & reference < 50) %>%
summarise(sum(pop))
summary_gt2[9, "reference"] <-
gt2 %>% filter(reference >= 50 & reference < 55) %>%
summarise(sum(pop))
summary_gt2[10, "reference"] <-
gt2 %>% filter(reference >= 55 & reference < 60) %>%
summarise(sum(pop))
for (name in interventions) {
temp <-
gt2 %>% select(pop, distance, reference, intervention = !!name)
# process decrease or no change
for (i in 1:nrow(temp)) {
ref <- temp$reference[i]
int <- temp$intervention[i]
# Only where a decrease or no change - intervention below reference
# Only interested where starting point is >=30
if (ref >= 30 & int <= ref) {
# if level is below 30 after intervention
# set level to 30 - not interested in improvements below 30
int <- ifelse(int >= 30, int, 30)
dif <- int - ref
if (ref >= 55 & int < 35) {
a <- 55 - ref
b <- -5
c <- -5
d <- -5
e <- -5
f <- int - 35
} else if (ref >= 55 & int >= 35 & int < 40) {
a <- 55 - ref
b <- -5
c <- -5
d <- -5
e <- int - 40
f <- 0
} else if (ref >= 55 & int >= 40 & int < 45) {
a <- 55 - ref
b <- -5
c <- -5
d <- int - 45
e <- 0
f <- 0
} else if (ref >= 55 & int >= 45 & int < 50) {
a <- 55 - ref
b <- -5
c <- int - 50
d <- 0
e <- 0
f <- 0
} else if (ref >= 55 & int >= 50 & int < 55) {
a <- 55 - ref
b <- int - 55
c <- 0
d <- 0
e <- 0
f <- 0
} else if (ref >= 55 & int >= 55 & int < 60) {
a <- int - ref
b <- 0
c <- 0
d <- 0
e <- 0
f <- 0
} else if (ref >= 50 & int < 35) {
a <- 0
b <- 50 - ref
c <- -5
d <- -5
e <- -5
f <- int - 35
} else if (ref >= 50 & int >= 35 & int < 40) {
a <- 0
b <- 50 - ref
c <- -5
d <- -5
e <- int - 40
f <- 0
} else if (ref >= 50 & int >= 40 & int < 45) {
a <- 0
b <- 50 - ref
c <- -5
d <- int - 45
e <- 0
f <- 0
} else if (ref >= 50 & int >= 45 & int < 50) {
a <- 0
b <- 50 - ref
c <- int - 50
d <- 0
e <- 0
f <- 0
} else if (ref >= 50 & int >= 50 & int < 55) {
a <- 0
b <- int - ref
c <- 0
d <- 0
e <- 0
f <- 0
} else if (ref >= 45 & int < 35) {
a <- 0
b <- 0
c <- 45 - ref
d <- -5
e <- -5
f <- int - 35
} else if (ref >= 45 & int >= 35 & int < 40) {
a <- 0
b <- 0
c <- 45 - ref
d <- -5
e <- int - 40
f <- 0
} else if (ref >= 45 & int >= 40 & int < 45) {
a <- 0
b <- 0
c <- 45 - ref
d <- int - 45
e <- 0
f <- 0
} else if (ref >= 45 & int >= 45 & int < 50) {
a <- 0
b <- 0
c <- int - ref
d <- 0
e <- 0
f <- 0
} else if (ref >= 40 & int < 35) {
a <- 0
b <- 0
c <- 0
d <- 40 - ref
e <- -5
f <- int - 35
} else if (ref >= 40 & int >= 35 & int < 40) {
a <- 0
b <- 0
c <- 0
d <- 40 - ref
e <- int - 40
f <- 0
} else if (ref >= 40 & int >= 40 & int < 45) {
a <- 0
b <- 0
c <- 0
d <- int - ref
e <- 0
f <- 0
} else if (ref >= 35 & int < 35) {
a <- 0
b <- 0
c <- 0
d <- 0
e <- 35 - ref
f <- int - 35
} else if (ref >= 35 & int >= 35 & int < 40) {
a <- 0
b <- 0
c <- 0
d <- 0
e <- int - ref
f <- 0
} else {
a <- 0
b <- 0
c <- 0
d <- 0
e <- 0
f <- int - ref
}
temp[i, "dif"] <- dif
temp[i, "t_60"] <- a
temp[i, "t_55"] <- b
temp[i, "t_50"] <- c
temp[i, "t_45"] <- d
temp[i, "t_40"] <- e
temp[i, "t_35"] <- f
temp[i, "check"] <- isTRUE(dif == a + b + c + d + e + f)
}
# process increases
# only interested where level is higher after intervention
# and only interested where level after intervention is >=30
if (int >= 30 & int > ref) {
# if the reference level is below 30 then raise it up to 30
# we are not interested in increases happening below 30
ref <- ifelse(ref >= 30, ref, 30)
dif <- int - ref
if (int >= 55 & ref < 35) {
a <- int - 55
b <- 5
c <- 5
d <- 5
e <- 5
f <- 35 - ref
} else if (int >= 55 & ref >= 35 & ref < 40) {
a <- int - 55
b <- 5
c <- 5
d <- 5
e <- 40 - ref
f <- 0
} else if (int >= 55 & ref >= 40 & ref < 45) {
a <- int - 55
b <- 5
c <- 5
d <- 45 - ref
e <- 0
f <- 0
} else if (int >= 55 & ref >= 45 & ref < 50) {
a <- int - 55
b <- 5
c <- 50 - ref
d <- 0
e <- 0
f <- 0
} else if (int >= 55 & ref >= 50 & ref < 55) {
a <- int - 55
b <- 55 - ref
c <- 0
d <- 0
e <- 0
f <- 0
} else if (int >= 55 & ref >= 55 & ref < 60) {
a <- int - ref
b <- 0
c <- 0
d <- 0
e <- 0
f <- 0
} else if (int >= 50 & ref < 35) {
a <- 0
b <- int - 50
c <- 5
d <- 5
e <- 5
f <- 35 - ref
} else if (int >= 50 & ref >= 35 & ref < 40) {
a <- 0
b <- int - 50
c <- 5
d <- 5
e <- 40 - ref
f <- 0
} else if (int >= 50 & ref >= 40 & ref < 45) {
a <- 0
b <- int - 50
c <- 5
d <- 45 - ref
e <- 0
f <- 0
} else if (int >= 50 & ref >= 45 & ref < 50) {
a <- 0
b <- int - 50
c <- 50 - ref
d <- 0
e <- 0
f <- 0
} else if (int >= 50 & ref >= 50 & ref < 55) {
a <- 0
b <- int - ref
c <- 0
d <- 0
e <- 0
f <- 0
} else if (int >= 45 & ref < 35) {
a <- 0
b <- 0
c <- int - 45
d <- 5
e <- 5
f <- 35 - ref
} else if (int >= 45 & ref >= 35 & ref < 40) {
a <- 0
b <- 0
c <- int - 45
d <- 5
e <- 40 - ref
f <- 0
} else if (int >= 45 & ref >= 40 & ref < 45) {
a <- 0
b <- 0
c <- int - 45
d <- 45 - ref
e <- 0
f <- 0
} else if (int >= 45 & ref >= 45 & ref < 50) {
a <- 0
b <- 0
c <- int - ref
d <- 0
e <- 0
f <- 0
} else if (int >= 40 & ref < 35) {
a <- 0
b <- 0
c <- 0
d <- int - 40
e <- 5
f <- 35 - ref
} else if (int >= 40 & ref >= 35 & ref < 40) {
a <- 0
b <- 0
c <- 0
d <- int - 40
e <- 40 - ref
f <- 0
} else if (int >= 40 & ref >= 40 & ref < 45) {
a <- 0
b <- 0
c <- 0
d <- int - ref
e <- 0
f <- 0
} else if (int >= 35 & ref < 35) {
a <- 0
b <- 0
c <- 0
d <- 0
e <- int - 35
f <- 35 - ref
} else if (int >= 35 & ref >= 35 & ref < 40) {
a <- 0
b <- 0
c <- 0
d <- 0
e <- int - ref
f <- 0
} else {
a <- 0
b <- 0
c <- 0
d <- 0
e <- 0
f <- int - ref
}
temp[i, "dif"] <- dif
temp[i, "t_60"] <- a
temp[i, "t_55"] <- b
temp[i, "t_50"] <- c
temp[i, "t_45"] <- d
temp[i, "t_40"] <- e
temp[i, "t_35"] <- f
temp[i, "check"] <- isTRUE(dif == a + b + c + d + e + f)
if (nrow(filter(temp, check == FALSE)) > 0) {
warning(paste("error in ", name))
}
}
}
temp <-
temp %>% mutate(value = (t_35 * 5.8 * pop) + (t_40 * 11.4 * pop) + (t_45 * 17.0 * pop) + (t_50 * 22.6 * pop) + (t_55 * 28.1 * pop) + (t_60 * 33.7 * pop))
summary_gt2[1, paste(name)] <- sum(temp$pop)
summary_gt2[2, paste(name)] <- temp %>% filter(distance <= 80) %>%
summarise(sum(pop))
summary_gt2[3, paste(name)] <-
temp %>% filter(intervention < 30) %>%
summarise(sum(pop))
summary_gt2[4, paste(name)] <-
temp %>% filter(intervention >= 30) %>%
summarise(sum(pop))
summary_gt2[5, paste(name)] <-
temp %>% filter(intervention >= 30 & intervention < 35) %>%
summarise(sum(pop))
summary_gt2[6, paste(name)] <-
temp %>% filter(intervention >= 35 & intervention < 40) %>%
summarise(sum(pop))
summary_gt2[7, paste(name)] <-
temp %>% filter(intervention >= 40 & intervention < 45) %>%
summarise(sum(pop))
summary_gt2[8, paste(name)] <-
temp %>% filter(intervention >= 45 & intervention < 50) %>%
summarise(sum(pop))
summary_gt2[9, paste(name)] <-
temp %>% filter(intervention >= 50 & intervention < 55) %>%
summarise(sum(pop))
summary_gt2[10, paste(name)] <-
temp %>% filter(intervention >= 55 & intervention < 60) %>%
summarise(sum(pop))
summary_gt2[11, paste(name)] <-
temp %>% summarise(sum(dif * pop, na.rm = TRUE))
summary_gt2[12, paste(name)] <-
temp %>% summarise(sum(value, na.rm = TRUE))
}
summary_gt2 <- summary_gt2 %>% mutate_if(is.numeric, round, 2)
write.csv(file = "../qgis/revised/summary_gt2_new.csv", summary_gt2)
## gt3
summary_gt3 <- as.data.frame(item, stringsAsFactors = FALSE)
summary_gt3[1, "reference"] <- sum(gt3$pop)
summary_gt3[2, "reference"] <- gt3 %>% filter(distance <= 80) %>%
summarise(sum(pop))
summary_gt3[3, "reference"] <- gt3 %>% filter(reference < 30) %>%
summarise(sum(pop))
summary_gt3[4, "reference"] <-
gt3 %>% filter(reference >= 30) %>%
summarise(sum(pop))
summary_gt3[5, "reference"] <-
gt3 %>% filter(reference >= 30 & reference < 35) %>%
summarise(sum(pop))
summary_gt3[6, "reference"] <-
gt3 %>% filter(reference >= 35 & reference < 40) %>%
summarise(sum(pop))
summary_gt3[7, "reference"] <-
gt3 %>% filter(reference >= 40 & reference < 45) %>%
summarise(sum(pop))
summary_gt3[8, "reference"] <-
gt3 %>% filter(reference >= 45 & reference < 50) %>%
summarise(sum(pop))
summary_gt3[9, "reference"] <-
gt3 %>% filter(reference >= 50 & reference < 55) %>%
summarise(sum(pop))
summary_gt3[10, "reference"] <-
gt3 %>% filter(reference >= 55 & reference < 60) %>%
summarise(sum(pop))
for (name in interventions) {
temp <-
gt3 %>% select(pop, distance, reference, intervention = !!name)
# process decrease or no change
for (i in 1:nrow(temp)) {
ref <- temp$reference[i]
int <- temp$intervention[i]
# Only where a decrease or no change - intervention below reference
# Only interested where starting point is >=30
if (ref >= 30 & int <= ref) {
# if level is below 30 after intervention
# set level to 30 - not interested in improvements below 30
int <- ifelse(int >= 30, int, 30)
dif <- int - ref
if (ref >= 55 & int < 35) {
a <- 55 - ref
b <- -5
c <- -5
d <- -5
e <- -5
f <- int - 35
} else if (ref >= 55 & int >= 35 & int < 40) {
a <- 55 - ref
b <- -5
c <- -5
d <- -5
e <- int - 40
f <- 0
} else if (ref >= 55 & int >= 40 & int < 45) {
a <- 55 - ref
b <- -5
c <- -5
d <- int - 45
e <- 0
f <- 0
} else if (ref >= 55 & int >= 45 & int < 50) {
a <- 55 - ref
b <- -5
c <- int - 50
d <- 0
e <- 0
f <- 0
} else if (ref >= 55 & int >= 50 & int < 55) {
a <- 55 - ref
b <- int - 55
c <- 0
d <- 0
e <- 0
f <- 0
} else if (ref >= 55 & int >= 55 & int < 60) {
a <- int - ref
b <- 0
c <- 0
d <- 0
e <- 0
f <- 0
} else if (ref >= 50 & int < 35) {
a <- 0
b <- 50 - ref
c <- -5
d <- -5
e <- -5
f <- int - 35
} else if (ref >= 50 & int >= 35 & int < 40) {
a <- 0
b <- 50 - ref
c <- -5
d <- -5
e <- int - 40
f <- 0
} else if (ref >= 50 & int >= 40 & int < 45) {
a <- 0
b <- 50 - ref
c <- -5
d <- int - 45
e <- 0
f <- 0
} else if (ref >= 50 & int >= 45 & int < 50) {
a <- 0
b <- 50 - ref
c <- int - 50
d <- 0
e <- 0
f <- 0
} else if (ref >= 50 & int >= 50 & int < 55) {
a <- 0
b <- int - ref
c <- 0
d <- 0
e <- 0
f <- 0
} else if (ref >= 45 & int < 35) {
a <- 0
b <- 0
c <- 45 - ref
d <- -5
e <- -5
f <- int - 35
} else if (ref >= 45 & int >= 35 & int < 40) {
a <- 0
b <- 0
c <- 45 - ref
d <- -5
e <- int - 40
f <- 0
} else if (ref >= 45 & int >= 40 & int < 45) {
a <- 0
b <- 0
c <- 45 - ref
d <- int - 45
e <- 0
f <- 0
} else if (ref >= 45 & int >= 45 & int < 50) {
a <- 0
b <- 0
c <- int - ref
d <- 0
e <- 0
f <- 0
} else if (ref >= 40 & int < 35) {
a <- 0
b <- 0
c <- 0
d <- 40 - ref
e <- -5
f <- int - 35
} else if (ref >= 40 & int >= 35 & int < 40) {
a <- 0
b <- 0
c <- 0
d <- 40 - ref
e <- int - 40
f <- 0
} else if (ref >= 40 & int >= 40 & int < 45) {
a <- 0
b <- 0
c <- 0
d <- int - ref
e <- 0
f <- 0
} else if (ref >= 35 & int < 35) {
a <- 0
b <- 0
c <- 0
d <- 0
e <- 35 - ref
f <- int - 35
} else if (ref >= 35 & int >= 35 & int < 40) {
a <- 0
b <- 0
c <- 0
d <- 0
e <- int - ref
f <- 0
} else {
a <- 0
b <- 0
c <- 0
d <- 0
e <- 0
f <- int - ref
}
temp[i, "dif"] <- dif
temp[i, "t_60"] <- a
temp[i, "t_55"] <- b
temp[i, "t_50"] <- c
temp[i, "t_45"] <- d
temp[i, "t_40"] <- e
temp[i, "t_35"] <- f
temp[i, "check"] <- isTRUE(dif == a + b + c + d + e + f)
}
# process increases
# only interested where level is higher after intervention
# and only interested where level after intervention is >=30
if (int >= 30 & int > ref) {
# if the reference level is below 30 then raise it up to 30
# we are not interested in increases happening below 30
ref <- ifelse(ref >= 30, ref, 30)
dif <- int - ref
if (int >= 55 & ref < 35) {
a <- int - 55
b <- 5
c <- 5
d <- 5
e <- 5
f <- 35 - ref
} else if (int >= 55 & ref >= 35 & ref < 40) {
a <- int - 55
b <- 5
c <- 5
d <- 5
e <- 40 - ref
f <- 0
} else if (int >= 55 & ref >= 40 & ref < 45) {
a <- int - 55
b <- 5
c <- 5
d <- 45 - ref
e <- 0
f <- 0
} else if (int >= 55 & ref >= 45 & ref < 50) {
a <- int - 55
b <- 5
c <- 50 - ref
d <- 0
e <- 0
f <- 0
} else if (int >= 55 & ref >= 50 & ref < 55) {
a <- int - 55
b <- 55 - ref
c <- 0
d <- 0
e <- 0
f <- 0
} else if (int >= 55 & ref >= 55 & ref < 60) {
a <- int - ref
b <- 0
c <- 0
d <- 0
e <- 0
f <- 0
} else if (int >= 50 & ref < 35) {
a <- 0
b <- int - 50
c <- 5
d <- 5
e <- 5
f <- 35 - ref
} else if (int >= 50 & ref >= 35 & ref < 40) {
a <- 0
b <- int - 50
c <- 5
d <- 5
e <- 40 - ref
f <- 0
} else if (int >= 50 & ref >= 40 & ref < 45) {
a <- 0
b <- int - 50
c <- 5
d <- 45 - ref
e <- 0
f <- 0
} else if (int >= 50 & ref >= 45 & ref < 50) {
a <- 0
b <- int - 50
c <- 50 - ref
d <- 0
e <- 0
f <- 0
} else if (int >= 50 & ref >= 50 & ref < 55) {
a <- 0
b <- int - ref
c <- 0
d <- 0
e <- 0
f <- 0
} else if (int >= 45 & ref < 35) {
a <- 0
b <- 0
c <- int - 45
d <- 5
e <- 5
f <- 35 - ref
} else if (int >= 45 & ref >= 35 & ref < 40) {
a <- 0
b <- 0
c <- int - 45
d <- 5
e <- 40 - ref
f <- 0
} else if (int >= 45 & ref >= 40 & ref < 45) {
a <- 0
b <- 0
c <- int - 45
d <- 45 - ref
e <- 0
f <- 0
} else if (int >= 45 & ref >= 45 & ref < 50) {
a <- 0
b <- 0
c <- int - ref
d <- 0
e <- 0
f <- 0
} else if (int >= 40 & ref < 35) {
a <- 0
b <- 0
c <- 0
d <- int - 40
e <- 5
f <- 35 - ref
} else if (int >= 40 & ref >= 35 & ref < 40) {
a <- 0
b <- 0
c <- 0
d <- int - 40
e <- 40 - ref
f <- 0
} else if (int >= 40 & ref >= 40 & ref < 45) {
a <- 0
b <- 0
c <- 0
d <- int - ref
e <- 0
f <- 0
} else if (int >= 35 & ref < 35) {
a <- 0
b <- 0
c <- 0
d <- 0
e <- int - 35
f <- 35 - ref
} else if (int >= 35 & ref >= 35 & ref < 40) {
a <- 0
b <- 0
c <- 0
d <- 0
e <- int - ref
f <- 0
} else {
a <- 0
b <- 0
c <- 0
d <- 0
e <- 0
f <- int - ref
}
temp[i, "dif"] <- dif
temp[i, "t_60"] <- a
temp[i, "t_55"] <- b
temp[i, "t_50"] <- c
temp[i, "t_45"] <- d
temp[i, "t_40"] <- e
temp[i, "t_35"] <- f
temp[i, "check"] <- isTRUE(dif == a + b + c + d + e + f)
if (nrow(filter(temp, check == FALSE)) > 0) {
warning(paste("error in ", name))
}
}
}
temp <-
temp %>% mutate(value = (t_35 * 5.8 * pop) + (t_40 * 11.4 * pop) + (t_45 * 17.0 * pop) + (t_50 * 22.6 * pop) + (t_55 * 28.1 * pop) + (t_60 * 33.7 * pop))
summary_gt3[1, paste(name)] <- sum(temp$pop)
summary_gt3[2, paste(name)] <- temp %>% filter(distance <= 80) %>%
summarise(sum(pop))
summary_gt3[3, paste(name)] <-
temp %>% filter(intervention < 30) %>%
summarise(sum(pop))
summary_gt3[4, paste(name)] <-
temp %>% filter(intervention >= 30) %>%
summarise(sum(pop))
summary_gt3[5, paste(name)] <-
temp %>% filter(intervention >= 30 & intervention < 35) %>%
summarise(sum(pop))
summary_gt3[6, paste(name)] <-
temp %>% filter(intervention >= 35 & intervention < 40) %>%
summarise(sum(pop))
summary_gt3[7, paste(name)] <-
temp %>% filter(intervention >= 40 & intervention < 45) %>%
summarise(sum(pop))
summary_gt3[8, paste(name)] <-
temp %>% filter(intervention >= 45 & intervention < 50) %>%
summarise(sum(pop))
summary_gt3[9, paste(name)] <-
temp %>% filter(intervention >= 50 & intervention < 55) %>%
summarise(sum(pop))
summary_gt3[10, paste(name)] <-
temp %>% filter(intervention >= 55 & intervention < 60) %>%
summarise(sum(pop))
summary_gt3[11, paste(name)] <-
temp %>% summarise(sum(dif * pop, na.rm = TRUE))
summary_gt3[12, paste(name)] <-
temp %>% summarise(sum(value, na.rm = TRUE))
}
summary_gt3 <- summary_gt3 %>% mutate_if(is.numeric, round, 2)
write.csv(file = "../qgis/revised/summary_gt3_new.csv", summary_gt3) |
perkiraan <- read.csv(file.choose(),header=TRUE)
perkiraan
model <- lm(jarak ~ waktu, data = perkiraan)
summary(model)
plot(jarak ~ waktu, data=perkiraan)
abline(model, col = "red", lwd = 1)
poly_model <- lm(mpg ~ poly(hp, degree=2), data = mtcars)
poly_model
x <- with(mtcars, seq(min(hp), max(hp), length.out=2000))
y <- predict(poly_model, newdata = data.frame(hp = x))
plot(mpg ~ hp, data = mtcars)
lines(x, y, col="red")
Data perkiraan ini saya dapatkan dari hasil pengamatan perkiraan mengendarai motor dengan renggang jarak setiap 10 km dan waktu yang diperlukan berkisar 30 menit tiap 10 km-nya.
Penjelasan
A.
perkiraan <- read.csv(file.choose(),header=TRUE)
//*untuk memanggil data tabel dari perkiraan
perkiraan
//*untuk menampilkan tabel model perkiraan (jarak dan waktu)
B.
model <- lm(jarak ~ waktu, data = perkiraan)
summary(model)
//*untuk menampilkan data model perkiraan berupa jarak dan waktu dengan formula
Hasilnya berupa:
Call:
lm(formula = jarak ~ waktu, data = perkiraan)
Nah! Disini kita dapat melihat formula yang dimasukkan berupa jarak ~ waktu dari model perkiraan.
Nantinya akan menghasilkan berupa residual(sisa) berupa Min(Nilai minimum), 1Q(Quartil awal), Median(Nilai tengah), 3Q(Quartil terakhir).
Residuals:
Min 1Q Median 3Q Max
-2.049e-14 -7.620e-15 8.760e-16 4.008e-15 5.609e-14
Untuk koefisien yang didapat berupa:
Coefficients:
Estimate Std. Error t value Pr(>|t|)
(Intercept) -8.039e-15 3.580e-15 -2.245e+00 0.0294 *
(waktu) 2.000e+01 2.444e-16 8.184e+16 <2e-16 ***
Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
Untuk pendekatan dari titik erornya:
Residual standard error: 1.247e-14 on 48 degrees of freedom
Multiple R-squared: 1, Adjusted R-squared: 1
F-statistic: 6.697e+33 on 1 and 48 DF, p-value: < 2.2e-16
C.
plot(jarak ~ waktu, data=perkiraan)
abline(model, col = "red", lwd = 1)
*//untuk menampilkan grafik dari model perkiraan
poly_model <- lm(mpg ~ poly(hp, degree=2), data = mtcars)
poly_model
x <- with(mtcars, seq(min(hp), max(hp), length.out=2000))
y <- predict(poly_model, newdata = data.frame(hp = x))
plot(mpg ~ hp, data = mtcars)
lines(x, y, col="red")
*//untuk memanggil dan menampilkan polinomial data dari plot yang telah ada
Hasilnya berupa :
Call:
lm(formula = mpg ~ poly(hp, degree = 2), data = mtcars)
Untuk koefisien yang didapat berupa:
Coefficients:
(Intercept) poly(hp, degree = 2)1 poly(hp, degree = 2)2
20.09 -26.05 13.15
| /17523224 - Regresi.r | no_license | bagusanugrahprasetyo/bagusap | R | false | false | 2,598 | r |
perkiraan <- read.csv(file.choose(),header=TRUE)
perkiraan
model <- lm(jarak ~ waktu, data = perkiraan)
summary(model)
plot(jarak ~ waktu, data=perkiraan)
abline(model, col = "red", lwd = 1)
poly_model <- lm(mpg ~ poly(hp, degree=2), data = mtcars)
poly_model
x <- with(mtcars, seq(min(hp), max(hp), length.out=2000))
y <- predict(poly_model, newdata = data.frame(hp = x))
plot(mpg ~ hp, data = mtcars)
lines(x, y, col="red")
Data perkiraan ini saya dapatkan dari hasil pengamatan perkiraan mengendarai motor dengan renggang jarak setiap 10 km dan waktu yang diperlukan berkisar 30 menit tiap 10 km-nya.
Penjelasan
A.
perkiraan <- read.csv(file.choose(),header=TRUE)
//*untuk memanggil data tabel dari perkiraan
perkiraan
//*untuk menampilkan tabel model perkiraan (jarak dan waktu)
B.
model <- lm(jarak ~ waktu, data = perkiraan)
summary(model)
//*untuk menampilkan data model perkiraan berupa jarak dan waktu dengan formula
Hasilnya berupa:
Call:
lm(formula = jarak ~ waktu, data = perkiraan)
Nah! Disini kita dapat melihat formula yang dimasukkan berupa jarak ~ waktu dari model perkiraan.
Nantinya akan menghasilkan berupa residual(sisa) berupa Min(Nilai minimum), 1Q(Quartil awal), Median(Nilai tengah), 3Q(Quartil terakhir).
Residuals:
Min 1Q Median 3Q Max
-2.049e-14 -7.620e-15 8.760e-16 4.008e-15 5.609e-14
Untuk koefisien yang didapat berupa:
Coefficients:
Estimate Std. Error t value Pr(>|t|)
(Intercept) -8.039e-15 3.580e-15 -2.245e+00 0.0294 *
(waktu) 2.000e+01 2.444e-16 8.184e+16 <2e-16 ***
Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
Untuk pendekatan dari titik erornya:
Residual standard error: 1.247e-14 on 48 degrees of freedom
Multiple R-squared: 1, Adjusted R-squared: 1
F-statistic: 6.697e+33 on 1 and 48 DF, p-value: < 2.2e-16
C.
plot(jarak ~ waktu, data=perkiraan)
abline(model, col = "red", lwd = 1)
*//untuk menampilkan grafik dari model perkiraan
poly_model <- lm(mpg ~ poly(hp, degree=2), data = mtcars)
poly_model
x <- with(mtcars, seq(min(hp), max(hp), length.out=2000))
y <- predict(poly_model, newdata = data.frame(hp = x))
plot(mpg ~ hp, data = mtcars)
lines(x, y, col="red")
*//untuk memanggil dan menampilkan polinomial data dari plot yang telah ada
Hasilnya berupa :
Call:
lm(formula = mpg ~ poly(hp, degree = 2), data = mtcars)
Untuk koefisien yang didapat berupa:
Coefficients:
(Intercept) poly(hp, degree = 2)1 poly(hp, degree = 2)2
20.09 -26.05 13.15
|
######################################################
######################################################
# Here the inferred mean coalescent and migration
# rate ratios are plotted
######################################################
######################################################
library(ggplot2)
library(colorblindr)
# clear workspace
rm(list = ls())
# Set the directory to the directory of the file
this.dir <- dirname(parent.frame(2)$ofile)
setwd(this.dir)
segments = c("HA", "MP", "NA", "NP", "NS1", "PA", "PB1", "PB2", "prior")
threshold = 0.5
first = T;
for (a in seq(1,length(segments)-2)){
for (b in seq(a+1,length(segments)-1)){
# get the names run log files
filename = paste(path="./combined/infB.event.",segments[[a]], "_", segments[[b]], ".txt",sep="")
t <- read.table(filename, header=TRUE, sep="\t")
for (i in seq(1,length(threshold))){
new.rate = data.frame(seg1=segments[[a]], seg2=segments[[b]], nrEvents = length(t[which(t$posterior>threshold[[i]]),]$posterior), sumEvents=sum(t$posterior), threshold=threshold[[i]] )
new.rate = rbind(new.rate,data.frame(seg2=segments[[a]], seg1=segments[[b]], nrEvents = length(t[which(t$posterior>threshold[[i]]),]$posterior), sumEvents=sum(t$posterior), threshold=threshold[[i]] ))
if (first){
rate=new.rate
first=F
}else{
rate=rbind(rate,new.rate)
}
}
}
}
rate$seg1 <- factor(rate$seg1, levels = c("HA", "MP", "NA", "NP", "NS1", "PA", "PB1", "PB2"))
rate$seg2 <- factor(rate$seg2, levels = c("HA", "MP", "NA", "NP", "NS1", "PA", "PB1", "PB2"))
p.rea.count <- ggplot(rate) +
geom_point(aes(x=seg1, y=nrEvents, color=seg1), size=3) +
facet_grid(.~seg2)+
xlab("") +
ylab("reassortment rate") +
theme(legend.position="top") +
scale_color_OkabeIto()+
theme_light()
plot(p.rea.count)
ggsave(plot=p.rea.count,paste("../../../Reassortment-Text/Figures/pairwise/infB_count", ".pdf" , sep=""),width=10, height=3)
| /Applications/InfB/plotPairwiseInfBReassortmentEvents.R | no_license | nicfel/Reassortment-Material | R | false | false | 1,994 | r | ######################################################
######################################################
# Here the inferred mean coalescent and migration
# rate ratios are plotted
######################################################
######################################################
library(ggplot2)
library(colorblindr)
# clear workspace
rm(list = ls())
# Set the directory to the directory of the file
this.dir <- dirname(parent.frame(2)$ofile)
setwd(this.dir)
segments = c("HA", "MP", "NA", "NP", "NS1", "PA", "PB1", "PB2", "prior")
threshold = 0.5
first = T;
for (a in seq(1,length(segments)-2)){
for (b in seq(a+1,length(segments)-1)){
# get the names run log files
filename = paste(path="./combined/infB.event.",segments[[a]], "_", segments[[b]], ".txt",sep="")
t <- read.table(filename, header=TRUE, sep="\t")
for (i in seq(1,length(threshold))){
new.rate = data.frame(seg1=segments[[a]], seg2=segments[[b]], nrEvents = length(t[which(t$posterior>threshold[[i]]),]$posterior), sumEvents=sum(t$posterior), threshold=threshold[[i]] )
new.rate = rbind(new.rate,data.frame(seg2=segments[[a]], seg1=segments[[b]], nrEvents = length(t[which(t$posterior>threshold[[i]]),]$posterior), sumEvents=sum(t$posterior), threshold=threshold[[i]] ))
if (first){
rate=new.rate
first=F
}else{
rate=rbind(rate,new.rate)
}
}
}
}
rate$seg1 <- factor(rate$seg1, levels = c("HA", "MP", "NA", "NP", "NS1", "PA", "PB1", "PB2"))
rate$seg2 <- factor(rate$seg2, levels = c("HA", "MP", "NA", "NP", "NS1", "PA", "PB1", "PB2"))
p.rea.count <- ggplot(rate) +
geom_point(aes(x=seg1, y=nrEvents, color=seg1), size=3) +
facet_grid(.~seg2)+
xlab("") +
ylab("reassortment rate") +
theme(legend.position="top") +
scale_color_OkabeIto()+
theme_light()
plot(p.rea.count)
ggsave(plot=p.rea.count,paste("../../../Reassortment-Text/Figures/pairwise/infB_count", ".pdf" , sep=""),width=10, height=3)
|
library(gdata)
get_statistics <- function (results) {
jaccard <- function (X, Y) {length(intersect(X, Y)) / length(union(X, Y))}
comms <- results$communities
K <- length(comms)
# Calculate pair-wise overlap
cat("computing overlap...\n")
overlap_mat <- matrix(0, K, K)
for (i in 1:K) {
for (j in 1:K) {
overlap_mat[i, j] <- jaccard(comms[[i]], comms[[j]])
}
}
# Creating memberships and comm_nodes
cat("getting memberships...\n")
full_node_vec <- unlist(comms)
full_comm_vec <- unlist(lapply(1:K, function (i) rep(i, length(comms[[i]]))))
mships <- split(full_comm_vec, full_node_vec)
comm_nodes <- split(full_node_vec, full_node_vec)
comm_nodes <- unlist(lapply(comm_nodes, function (L) L[1]))
mships <- mships[order(comm_nodes)]
comm_nodes <- sort(comm_nodes)
comm_nodes <- unname(comm_nodes)
# Assessing overlap
overlap_nodes <- which(unlist(lapply(mships, length)) > 1)
on <- length(overlap_nodes)
om <- unlist(lapply(mships[overlap_nodes], length))
return(list("K" = K,
"node_count" = length(comm_nodes),
"comm_sizes" = unlist(lapply(comms, length)),
"comm_nodes" = comm_nodes,
"mships" = mships,
"overlap_nodes" = overlap_nodes,
"jaccards" = upperTriangle(overlap_mat),
"on" = on,
"om" = om))
} | /applications-code/get_statistics.R | no_license | jpalowitch/CCME_analyses | R | false | false | 1,473 | r | library(gdata)
get_statistics <- function (results) {
jaccard <- function (X, Y) {length(intersect(X, Y)) / length(union(X, Y))}
comms <- results$communities
K <- length(comms)
# Calculate pair-wise overlap
cat("computing overlap...\n")
overlap_mat <- matrix(0, K, K)
for (i in 1:K) {
for (j in 1:K) {
overlap_mat[i, j] <- jaccard(comms[[i]], comms[[j]])
}
}
# Creating memberships and comm_nodes
cat("getting memberships...\n")
full_node_vec <- unlist(comms)
full_comm_vec <- unlist(lapply(1:K, function (i) rep(i, length(comms[[i]]))))
mships <- split(full_comm_vec, full_node_vec)
comm_nodes <- split(full_node_vec, full_node_vec)
comm_nodes <- unlist(lapply(comm_nodes, function (L) L[1]))
mships <- mships[order(comm_nodes)]
comm_nodes <- sort(comm_nodes)
comm_nodes <- unname(comm_nodes)
# Assessing overlap
overlap_nodes <- which(unlist(lapply(mships, length)) > 1)
on <- length(overlap_nodes)
om <- unlist(lapply(mships[overlap_nodes], length))
return(list("K" = K,
"node_count" = length(comm_nodes),
"comm_sizes" = unlist(lapply(comms, length)),
"comm_nodes" = comm_nodes,
"mships" = mships,
"overlap_nodes" = overlap_nodes,
"jaccards" = upperTriangle(overlap_mat),
"on" = on,
"om" = om))
} |
#' Pearson's Chi-squared Test. (Category vs Category)
#'
#' This function performs chi square test between the categorical target variable
#' and user defined columns of the same dataset.
#' Note: Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
#'
#' @usage test.chisq(dataf, y, x, skipMessage = FALSE)
#'
#' @param dataf a (non-empty) dataframe.
#' @param y a (non-empty) integer which points to the column which will be considered
#' as the target variable.
#' @param x a (non-empty) vector of integers which points to the columns which should
#' be used for the test.
#' @param skipMessage an optional (non-empty) boolean which lets the user choose whether
#' he should get the warning messages.
#'
#' @return dataframe containing the result of the tests.
#' @export
#'
#' @examples test.chisq(mtcars, 1, 2:ncol(mtcars))
test.chisq <- function(dataf, y, x, skipMessage = FALSE) {
if (!is.data.frame(dataf))
stop("The given object is not a data frame")
if (!is.factor(dataf[,y]) && !is.character(dataf[, y]))
stop('Target variable is not a categorical variable')
if (y %in% x)
stop('Target is present in list of variables as well')
row_data <- list()
var.name <- c()
stat <- c()
p.value <- c()
significance <- c()
counter <- 1
for (i in x) {
if (is.factor(dataf[, i]) || is.character(dataf[, i])) {
chi.result <- suppressWarnings(chisq.test(dataf[,i], dataf[, y]))
var.name[counter] <- names(dataf)[i]
stat[counter] <- chi.result$statistic
p.value[counter] <- chi.result$p.value
significance[counter] <- ifelse(chi.result$p.value >= 0.1, ' ',
ifelse(chi.result$p.value >= 0.05, '.',
ifelse(chi.result$p.value >= 0.01, '*',
ifelse(chi.result$p.value >= 0.001, '**',
'***'))))
counter = counter + 1
}
else
if (!skipMessage)
print(paste(names(dataf)[i], 'will be skipped as it is not a categorical variable'))
}
row_data$var_name <- var.name
row_data$x_square <- stat
row_data$p_value <- p.value
row_data$significance <- significance
finalDF <- as.data.frame(row_data, row.names = row_data$var_name)
return(finalDF)
}
#' Student's t-Test. (Category for 2 values vs Numeric)
#'
#' This function performs t test between the categorical target (upto 2 categories)
#' and user defined columns of the same dataset.
#' Note: Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
#'
#' @usage test.t_test_cat(dataf, y, x, skipMessage = FALSE)
#'
#' @param dataf a (non-empty) dataframe.
#' @param y a (non-empty) integer which points to the column which will be considered
#' as the target variable.
#' @param x a (non-empty) vector of integers which points to the columns which should
#' be used for the test.
#' @param skipMessage an optional (non-empty) boolean which lets the user choose whether
#' he should get the warning messages.
#'
#' @return dataframe containing the result of the tests.
#' @export
#'
#' @examples test.t_test_cat(mtcars, 2, 3:ncol(mtcars))
test.t_test_cat <- function(dataf, y, x, skipMessage = FALSE) {
if (!is.data.frame(dataf))
stop("The given object is not a data frame")
if (!is.factor(dataf[,y]) && !is.character(dataf[, y]))
stop('Target variable is not a categorical variable')
if (length(unique(dataf[, y])) != 2)
stop('grouping factor must have exactly 2 levels')
if (y %in% x)
stop('Target is present in list of variables as well')
row_data <- list()
var.name <- c()
stat <- c()
p.value <- c()
significance <- c()
counter <- 1
for (i in x) {
if (is.numeric(dataf[, i]) || is.integer(dataf[, i])) {
tTest <- t.test(dataf[,i]~dataf[, y])
var.name[counter] <- names(dataf)[i]
stat[counter] <- tTest$statistic
p.value[counter] <- tTest$p.value
significance[counter] <- ifelse(tTest$p.value >= 0.1, ' ',
ifelse(tTest$p.value >= 0.05, '.',
ifelse(tTest$p.value >= 0.01, '*',
ifelse(tTest$p.value >= 0.001, '**',
'***'))))
counter = counter + 1
}
else
if (!skipMessage)
print(paste(names(dataf)[i], 'will be skipped as it is not a numerical variable'))
}
row_data$var_name <- var.name
row_data$t <- stat
row_data$p_value <- p.value
row_data$significance <- significance
finalDF <- as.data.frame(row_data, row.names = row_data$var_name)
return(finalDF)
}
#' Perform every test for Categorical Target.
#'
#' This function lets us perform the following tests when the target is categorical:
#' - Chi-Square Test
#' - T-Test (Only when number of categories is 2 in target variable)
#'
#' @usage test.all_cat(mtcars, 'cyl', 2:ncol(mtcars), tTest = FALSE)
#'
#'
#' @param dataf a (non-empty) dataframe.
#' @param y a (non-empty) integer/string which points to the column which will be considered
#' as the target variable.
#' @param x a (non-empty) vector of integers/string which points to the columns which should
#' be used for the test.
#' @param chiSquare an optional (non-empty) variable which lets the user decide whether
#' Chi-Square test should be performed.
#' @param tTest an optional (non-empty) variable which lets the user decide whether
#' T-test should be performed.
#'
#' @return list of dataframes returned from other tests.
#' @export
#'
#' @examples ## To perform every test.
#' test.all_cat(mtcars, 'vs', 3:ncol(mtcars))
#'
#' ## To omit a specefic test.
#' test.all_cat(mtcars, 'cyl', 2:ncol(mtcars), tTest = FALSE)
test.all_cat <- function(dataf, y, x, chiSquare = TRUE, tTest = TRUE) {
if (!is.data.frame(dataf))
stop("The given object is not a data frame")
pred <- c()
counter <- 1
if (is.character(y))
y <- grep(y, colnames(dataf))
if (is.character(x)) {
for (i in x) {
pred[counter] <- grep(i, colnames(dataf))
counter = counter + 1
}
}
else{
pred <- x
}
resultList <- list()
if (chiSquare)
resultList$chiSquare <- test.chisq(dataf, y, pred, TRUE)
if (tTest)
resultList$tTest <- test.t_test_cat(dataf, y, pred, TRUE)
return(resultList)
}
| /R/Tests.R | permissive | akshayamrit/dataExploration | R | false | false | 7,445 | r | #' Pearson's Chi-squared Test. (Category vs Category)
#'
#' This function performs chi square test between the categorical target variable
#' and user defined columns of the same dataset.
#' Note: Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
#'
#' @usage test.chisq(dataf, y, x, skipMessage = FALSE)
#'
#' @param dataf a (non-empty) dataframe.
#' @param y a (non-empty) integer which points to the column which will be considered
#' as the target variable.
#' @param x a (non-empty) vector of integers which points to the columns which should
#' be used for the test.
#' @param skipMessage an optional (non-empty) boolean which lets the user choose whether
#' he should get the warning messages.
#'
#' @return dataframe containing the result of the tests.
#' @export
#'
#' @examples test.chisq(mtcars, 1, 2:ncol(mtcars))
test.chisq <- function(dataf, y, x, skipMessage = FALSE) {
if (!is.data.frame(dataf))
stop("The given object is not a data frame")
if (!is.factor(dataf[,y]) && !is.character(dataf[, y]))
stop('Target variable is not a categorical variable')
if (y %in% x)
stop('Target is present in list of variables as well')
row_data <- list()
var.name <- c()
stat <- c()
p.value <- c()
significance <- c()
counter <- 1
for (i in x) {
if (is.factor(dataf[, i]) || is.character(dataf[, i])) {
chi.result <- suppressWarnings(chisq.test(dataf[,i], dataf[, y]))
var.name[counter] <- names(dataf)[i]
stat[counter] <- chi.result$statistic
p.value[counter] <- chi.result$p.value
significance[counter] <- ifelse(chi.result$p.value >= 0.1, ' ',
ifelse(chi.result$p.value >= 0.05, '.',
ifelse(chi.result$p.value >= 0.01, '*',
ifelse(chi.result$p.value >= 0.001, '**',
'***'))))
counter = counter + 1
}
else
if (!skipMessage)
print(paste(names(dataf)[i], 'will be skipped as it is not a categorical variable'))
}
row_data$var_name <- var.name
row_data$x_square <- stat
row_data$p_value <- p.value
row_data$significance <- significance
finalDF <- as.data.frame(row_data, row.names = row_data$var_name)
return(finalDF)
}
#' Student's t-Test. (Category for 2 values vs Numeric)
#'
#' This function performs t test between the categorical target (upto 2 categories)
#' and user defined columns of the same dataset.
#' Note: Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
#'
#' @usage test.t_test_cat(dataf, y, x, skipMessage = FALSE)
#'
#' @param dataf a (non-empty) dataframe.
#' @param y a (non-empty) integer which points to the column which will be considered
#' as the target variable.
#' @param x a (non-empty) vector of integers which points to the columns which should
#' be used for the test.
#' @param skipMessage an optional (non-empty) boolean which lets the user choose whether
#' he should get the warning messages.
#'
#' @return dataframe containing the result of the tests.
#' @export
#'
#' @examples test.t_test_cat(mtcars, 2, 3:ncol(mtcars))
test.t_test_cat <- function(dataf, y, x, skipMessage = FALSE) {
if (!is.data.frame(dataf))
stop("The given object is not a data frame")
if (!is.factor(dataf[,y]) && !is.character(dataf[, y]))
stop('Target variable is not a categorical variable')
if (length(unique(dataf[, y])) != 2)
stop('grouping factor must have exactly 2 levels')
if (y %in% x)
stop('Target is present in list of variables as well')
row_data <- list()
var.name <- c()
stat <- c()
p.value <- c()
significance <- c()
counter <- 1
for (i in x) {
if (is.numeric(dataf[, i]) || is.integer(dataf[, i])) {
tTest <- t.test(dataf[,i]~dataf[, y])
var.name[counter] <- names(dataf)[i]
stat[counter] <- tTest$statistic
p.value[counter] <- tTest$p.value
significance[counter] <- ifelse(tTest$p.value >= 0.1, ' ',
ifelse(tTest$p.value >= 0.05, '.',
ifelse(tTest$p.value >= 0.01, '*',
ifelse(tTest$p.value >= 0.001, '**',
'***'))))
counter = counter + 1
}
else
if (!skipMessage)
print(paste(names(dataf)[i], 'will be skipped as it is not a numerical variable'))
}
row_data$var_name <- var.name
row_data$t <- stat
row_data$p_value <- p.value
row_data$significance <- significance
finalDF <- as.data.frame(row_data, row.names = row_data$var_name)
return(finalDF)
}
#' Perform every test for Categorical Target.
#'
#' This function lets us perform the following tests when the target is categorical:
#' - Chi-Square Test
#' - T-Test (Only when number of categories is 2 in target variable)
#'
#' @usage test.all_cat(mtcars, 'cyl', 2:ncol(mtcars), tTest = FALSE)
#'
#'
#' @param dataf a (non-empty) dataframe.
#' @param y a (non-empty) integer/string which points to the column which will be considered
#' as the target variable.
#' @param x a (non-empty) vector of integers/string which points to the columns which should
#' be used for the test.
#' @param chiSquare an optional (non-empty) variable which lets the user decide whether
#' Chi-Square test should be performed.
#' @param tTest an optional (non-empty) variable which lets the user decide whether
#' T-test should be performed.
#'
#' @return list of dataframes returned from other tests.
#' @export
#'
#' @examples ## To perform every test.
#' test.all_cat(mtcars, 'vs', 3:ncol(mtcars))
#'
#' ## To omit a specefic test.
#' test.all_cat(mtcars, 'cyl', 2:ncol(mtcars), tTest = FALSE)
test.all_cat <- function(dataf, y, x, chiSquare = TRUE, tTest = TRUE) {
if (!is.data.frame(dataf))
stop("The given object is not a data frame")
pred <- c()
counter <- 1
if (is.character(y))
y <- grep(y, colnames(dataf))
if (is.character(x)) {
for (i in x) {
pred[counter] <- grep(i, colnames(dataf))
counter = counter + 1
}
}
else{
pred <- x
}
resultList <- list()
if (chiSquare)
resultList$chiSquare <- test.chisq(dataf, y, pred, TRUE)
if (tTest)
resultList$tTest <- test.t_test_cat(dataf, y, pred, TRUE)
return(resultList)
}
|
library(miniCRAN)
create_miniCRAN <- function(pkgs) {
repo <- c("https://cran.rstudio.com",
"https://bioconductor.org/packages/release/bioc",
"https://guangchuangyu.github.io/drat")
pkgList <- pkgDep(pkgs, repos=repo, type="source", suggests = FALSE)
makeRepo(pkgList, path="docs", repos=repo,
type=c("source", "win.binary")) # "mac.binary"
}
## pkgs <- c("seqcombo")
## create_miniCRAN(pkgs)
| /B_analysts_sources_github/GuangchuangYu/drat/create_miniCRAN.R | no_license | Irbis3/crantasticScrapper | R | false | false | 450 | r | library(miniCRAN)
create_miniCRAN <- function(pkgs) {
repo <- c("https://cran.rstudio.com",
"https://bioconductor.org/packages/release/bioc",
"https://guangchuangyu.github.io/drat")
pkgList <- pkgDep(pkgs, repos=repo, type="source", suggests = FALSE)
makeRepo(pkgList, path="docs", repos=repo,
type=c("source", "win.binary")) # "mac.binary"
}
## pkgs <- c("seqcombo")
## create_miniCRAN(pkgs)
|
library(shiny)
library(shinydashboard)
source("getdata2008.R")
source("https://raw.githubusercontent.com/kcha193/isspshiny/master/Rcode/plotsOutput.R")
source("https://raw.githubusercontent.com/kcha193/isspshiny/master/Rcode/sidebarInput.R")
source("https://raw.githubusercontent.com/kcha193/isspshiny/master/Rcode/titleOutput.R")
# Define UI for application that draws a histogram
ui <- dashboardPage(
skin = "black",
# Application title
dashboardHeader(title = "ISSP 2008 - Religion III", titleWidth = 300),
# Sidebar with a slider input for number of bins
dashboardSidebar(sidebarInput("side",
date = "26-07-2018")), # <- change this for every update
# Show a plot of the generated distribution
dashboardBody(box(
#tags$head(includeScript("google-analytics.js")),
h2(textOutput("title")),
plotOutput("barPlot", height = "800px"),
width = 12,
height = 850
))
)
# Define server logic required to draw a plot
server <- function(input, output) {
plotOut <- callModule(plotOutWeighted, "side",
dat = dat, datRaw = datRaw)
titleOut <- callModule(titleOut, "side")
output$title <- renderText({ titleOut()})
output$barPlot <- renderPlot({plotOut()})
}
# Run the application
shinyApp(ui = ui, server = server)
| /old/ISSP2008/app.R | no_license | kcha193/isspshiny | R | false | false | 1,343 | r |
library(shiny)
library(shinydashboard)
source("getdata2008.R")
source("https://raw.githubusercontent.com/kcha193/isspshiny/master/Rcode/plotsOutput.R")
source("https://raw.githubusercontent.com/kcha193/isspshiny/master/Rcode/sidebarInput.R")
source("https://raw.githubusercontent.com/kcha193/isspshiny/master/Rcode/titleOutput.R")
# Define UI for application that draws a histogram
ui <- dashboardPage(
skin = "black",
# Application title
dashboardHeader(title = "ISSP 2008 - Religion III", titleWidth = 300),
# Sidebar with a slider input for number of bins
dashboardSidebar(sidebarInput("side",
date = "26-07-2018")), # <- change this for every update
# Show a plot of the generated distribution
dashboardBody(box(
#tags$head(includeScript("google-analytics.js")),
h2(textOutput("title")),
plotOutput("barPlot", height = "800px"),
width = 12,
height = 850
))
)
# Define server logic required to draw a plot
server <- function(input, output) {
plotOut <- callModule(plotOutWeighted, "side",
dat = dat, datRaw = datRaw)
titleOut <- callModule(titleOut, "side")
output$title <- renderText({ titleOut()})
output$barPlot <- renderPlot({plotOut()})
}
# Run the application
shinyApp(ui = ui, server = server)
|
library(svars)
### Name: fevd
### Title: Forecast error variance decomposition for SVAR Models
### Aliases: fevd fevd.svars
### ** Examples
## No test:
v1 <- vars::VAR(USA, lag.max = 10, ic = "AIC" )
x1 <- id.dc(v1)
x2 <- fevd(x1, n.ahead = 30)
plot(x2)
## End(No test)
| /data/genthat_extracted_code/svars/examples/fevd.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 279 | r | library(svars)
### Name: fevd
### Title: Forecast error variance decomposition for SVAR Models
### Aliases: fevd fevd.svars
### ** Examples
## No test:
v1 <- vars::VAR(USA, lag.max = 10, ic = "AIC" )
x1 <- id.dc(v1)
x2 <- fevd(x1, n.ahead = 30)
plot(x2)
## End(No test)
|
#' Download a survey and import it into R
#'
#' Download a Qualtrics survey you own via API and import the survey directly into R.
#'
#' @param surveyID String. Unique ID for the survey you want to download.
#' Returned as \code{id} by the \link[qualtRics]{all_surveys} function.
#' @param last_response Deprecated.
#' @param start_date String. Filter to only exports responses recorded after the
#' specified date. Accepts dates as character strings in format "YYYY-MM-DD".
#' Defaults to \code{NULL}.
#' @param end_date String. Filter to only exports responses recorded before the
#' specified date. Accepts dates as character strings in format "YYYY-MM-DD".
#' Defaults to \code{NULL}.
#' @param unanswer_recode Integer. Recode seen but unanswered questions with an
#' integer-like value, such as 999. Defaults to \code{NULL}.
#' @param unanswer_recode_multi Integer. Recode seen but unanswered multi-select
#' questions with an integer-like value, such as 999. Defaults to value for
#' \code{unaswer_recode}.
#' @param include_display_order Display order information (such as for
#' surveys with randomization).
#' @param limit Integer. Maximum number of responses exported. Defaults to
#' \code{NULL} (all responses).
#' @param include_questions Vector of strings (e.g. c('QID1', 'QID2', 'QID3').
#' Export only specified questions. Defaults to \code{NULL}.
#' @param save_dir String. Directory where survey results will be stored.
#' Defaults to a temporary directory which is cleaned when your R session is
#' terminated. This argument is useful if you'd like to store survey results.
#' The downloaded survey will be stored as an RDS file (see
#' \code{\link[base]{readRDS}}).
#' @param force_request Logical. fetch_survey() saves each survey in a temporary
#' directory so that it can quickly be retrieved later. If force_request is
#' \code{TRUE}, fetch_survey() always downloads the survey from the API instead
#' of loading it from the temporary directory. Defaults to \code{FALSE}.
#' @param verbose Logical. If \code{TRUE}, verbose messages will be printed to
#' the R console. Defaults to \code{TRUE}.
#' @param label Logical. \code{TRUE} to export survey responses as Choice Text
#' or \code{FALSE} to export survey responses as values.
#' @param convert Logical. If \code{TRUE}, then the
#' \code{\link[qualtRics]{fetch_survey}} function will convert certain question
#' types (e.g. multiple choice) to proper data type in R. Defaults to \code{TRUE}.
#' @param import_id Logical. If \code{TRUE}, use Qualtrics import IDs instead of
#' question IDs as column names. Defaults to \code{FALSE}.
#' @param time_zone String. A local timezone to determine response date
#' values. Defaults to \code{NULL} which corresponds to UTC time. See
#' \url{https://api.qualtrics.com/docs/time-zones} for more information on
#' format.
#' @param breakout_sets Logical. If \code{TRUE}, then the
#' \code{\link[qualtRics]{fetch_survey}} function will split multiple
#' choice question answers into columns. If \code{FALSE}, each multiple choice
#' question is one column. Defaults to \code{TRUE}.
#' @param ... Optional arguments, such as a `fileEncoding` (see `fileEncoding`
#' argument in \code{\link[qualtRics]{read_survey}}) to import your survey using
#' a specific encoding.
#'
#' @seealso See \url{https://api.qualtrics.com/reference} for documentation on
#' the Qualtrics API.
#'
#' @importFrom lifecycle deprecated
#' @export
#' @examples
#' \dontrun{
#' # Register your Qualtrics credentials if you haven't already
#' qualtrics_api_credentials(
#' api_key = "<YOUR-API-KEY>",
#' base_url = "<YOUR-BASE-URL>"
#' )
#'
#' # Retrieve a list of surveys
#' surveys <- all_surveys()
#'
#' # Retrieve a single survey
#' mysurvey <- fetch_survey(surveyID = surveys$id[6])
#'
#' mysurvey <- fetch_survey(
#' surveyID = surveys$id[6],
#' save_dir = tempdir(),
#' start_date = "2018-01-01",
#' end_date = "2018-01-31",
#' limit = 100,
#' label = TRUE,
#' unanswer_recode = 999,
#' verbose = TRUE
#' )
#' }
#'
fetch_survey <- function(surveyID,
last_response = deprecated(),
start_date = NULL,
end_date = NULL,
unanswer_recode = NULL,
unanswer_recode_multi = unanswer_recode,
include_display_order = TRUE,
limit = NULL,
include_questions = NULL,
save_dir = NULL,
force_request = FALSE,
verbose = TRUE,
label = TRUE,
convert = TRUE,
import_id = FALSE,
time_zone = NULL,
breakout_sets = TRUE,
...) {
if (lifecycle::is_present(last_response)) {
lifecycle::deprecate_warn("3.1.2", "fetch_survey(last_response = )")
}
## Are the API credentials stored?
assert_base_url()
assert_api_key()
check_params(
verbose = verbose,
convert = convert,
import_id = import_id,
time_zone = time_zone,
label = label,
start_date = start_date,
end_date = end_date,
include_questions = include_questions,
save_dir = save_dir,
unanswer_recode = unanswer_recode,
unanswer_recode_multi = unanswer_recode_multi,
include_display_order = include_display_order,
limit = limit,
breakout_sets = breakout_sets
)
# See if survey already in tempdir
if (!force_request) {
if (paste0(surveyID, ".rds") %in% list.files(tempdir())) {
data <- readRDS(paste0(tempdir(), "/", surveyID, ".rds"))
if (verbose) {
message(paste0(
"Found an earlier download for survey with id ", surveyID, # nolint
". Loading this file.\nSet 'force_request' to TRUE if you want to override this."
))
} # nolint
return(data)
}
}
# CONSTRUCT API CALL ----
# fetch URL:
fetch_url <- create_fetch_url(Sys.getenv("QUALTRICS_BASE_URL"), surveyID)
# Create raw JSON payload
raw_payload <- create_raw_payload(
label = label,
start_date = start_date,
end_date = end_date,
unanswer_recode = unanswer_recode,
unanswer_recode_multi = unanswer_recode_multi,
include_display_order = include_display_order,
limit = limit,
time_zone = time_zone,
include_questions = include_questions,
breakout_sets = breakout_sets
)
# SEND POST REQUEST TO API ----
# POST request for download
res <- qualtrics_api_request("POST", url = fetch_url, body = raw_payload)
# Get id
if (is.null(res$result$progressId)) {
stop("Something went wrong. Please re-run your query.")
} else {
requestID <- res$result$progressId
} # NOTE This is not fail safe because ID can still be NULL
# Download, unzip and return file path
survey.fpath <- download_qualtrics_export(fetch_url, requestID, verbose = verbose)
# READ DATA AND SET VARIABLES ----
# Read data
data <- read_survey(survey.fpath, import_id = import_id, time_zone = time_zone)
# Add types
if (convert & label) {
data <- infer_data_types(data, surveyID)
}
# Save survey as RDS file in temp folder so that it can be easily
# retrieved this session.
saveRDS(data, paste0(tempdir(), "/", surveyID, ".rds"))
# RETURN ----
# Remove tmpfiles
if (!is.null(save_dir)) {
# Save file to directory
saveRDS(data, file = paste0(save_dir, "/", surveyID, ".rds"))
# Return
return(data)
} else {
p <- file.remove(survey.fpath)
# Return
return(data)
}
}
| /R/fetch_survey.R | permissive | jntrcs/qualtRics | R | false | false | 7,631 | r | #' Download a survey and import it into R
#'
#' Download a Qualtrics survey you own via API and import the survey directly into R.
#'
#' @param surveyID String. Unique ID for the survey you want to download.
#' Returned as \code{id} by the \link[qualtRics]{all_surveys} function.
#' @param last_response Deprecated.
#' @param start_date String. Filter to only exports responses recorded after the
#' specified date. Accepts dates as character strings in format "YYYY-MM-DD".
#' Defaults to \code{NULL}.
#' @param end_date String. Filter to only exports responses recorded before the
#' specified date. Accepts dates as character strings in format "YYYY-MM-DD".
#' Defaults to \code{NULL}.
#' @param unanswer_recode Integer. Recode seen but unanswered questions with an
#' integer-like value, such as 999. Defaults to \code{NULL}.
#' @param unanswer_recode_multi Integer. Recode seen but unanswered multi-select
#' questions with an integer-like value, such as 999. Defaults to value for
#' \code{unaswer_recode}.
#' @param include_display_order Display order information (such as for
#' surveys with randomization).
#' @param limit Integer. Maximum number of responses exported. Defaults to
#' \code{NULL} (all responses).
#' @param include_questions Vector of strings (e.g. c('QID1', 'QID2', 'QID3').
#' Export only specified questions. Defaults to \code{NULL}.
#' @param save_dir String. Directory where survey results will be stored.
#' Defaults to a temporary directory which is cleaned when your R session is
#' terminated. This argument is useful if you'd like to store survey results.
#' The downloaded survey will be stored as an RDS file (see
#' \code{\link[base]{readRDS}}).
#' @param force_request Logical. fetch_survey() saves each survey in a temporary
#' directory so that it can quickly be retrieved later. If force_request is
#' \code{TRUE}, fetch_survey() always downloads the survey from the API instead
#' of loading it from the temporary directory. Defaults to \code{FALSE}.
#' @param verbose Logical. If \code{TRUE}, verbose messages will be printed to
#' the R console. Defaults to \code{TRUE}.
#' @param label Logical. \code{TRUE} to export survey responses as Choice Text
#' or \code{FALSE} to export survey responses as values.
#' @param convert Logical. If \code{TRUE}, then the
#' \code{\link[qualtRics]{fetch_survey}} function will convert certain question
#' types (e.g. multiple choice) to proper data type in R. Defaults to \code{TRUE}.
#' @param import_id Logical. If \code{TRUE}, use Qualtrics import IDs instead of
#' question IDs as column names. Defaults to \code{FALSE}.
#' @param time_zone String. A local timezone to determine response date
#' values. Defaults to \code{NULL} which corresponds to UTC time. See
#' \url{https://api.qualtrics.com/docs/time-zones} for more information on
#' format.
#' @param breakout_sets Logical. If \code{TRUE}, then the
#' \code{\link[qualtRics]{fetch_survey}} function will split multiple
#' choice question answers into columns. If \code{FALSE}, each multiple choice
#' question is one column. Defaults to \code{TRUE}.
#' @param ... Optional arguments, such as a `fileEncoding` (see `fileEncoding`
#' argument in \code{\link[qualtRics]{read_survey}}) to import your survey using
#' a specific encoding.
#'
#' @seealso See \url{https://api.qualtrics.com/reference} for documentation on
#' the Qualtrics API.
#'
#' @importFrom lifecycle deprecated
#' @export
#' @examples
#' \dontrun{
#' # Register your Qualtrics credentials if you haven't already
#' qualtrics_api_credentials(
#' api_key = "<YOUR-API-KEY>",
#' base_url = "<YOUR-BASE-URL>"
#' )
#'
#' # Retrieve a list of surveys
#' surveys <- all_surveys()
#'
#' # Retrieve a single survey
#' mysurvey <- fetch_survey(surveyID = surveys$id[6])
#'
#' mysurvey <- fetch_survey(
#' surveyID = surveys$id[6],
#' save_dir = tempdir(),
#' start_date = "2018-01-01",
#' end_date = "2018-01-31",
#' limit = 100,
#' label = TRUE,
#' unanswer_recode = 999,
#' verbose = TRUE
#' )
#' }
#'
fetch_survey <- function(surveyID,
last_response = deprecated(),
start_date = NULL,
end_date = NULL,
unanswer_recode = NULL,
unanswer_recode_multi = unanswer_recode,
include_display_order = TRUE,
limit = NULL,
include_questions = NULL,
save_dir = NULL,
force_request = FALSE,
verbose = TRUE,
label = TRUE,
convert = TRUE,
import_id = FALSE,
time_zone = NULL,
breakout_sets = TRUE,
...) {
if (lifecycle::is_present(last_response)) {
lifecycle::deprecate_warn("3.1.2", "fetch_survey(last_response = )")
}
## Are the API credentials stored?
assert_base_url()
assert_api_key()
check_params(
verbose = verbose,
convert = convert,
import_id = import_id,
time_zone = time_zone,
label = label,
start_date = start_date,
end_date = end_date,
include_questions = include_questions,
save_dir = save_dir,
unanswer_recode = unanswer_recode,
unanswer_recode_multi = unanswer_recode_multi,
include_display_order = include_display_order,
limit = limit,
breakout_sets = breakout_sets
)
# See if survey already in tempdir
if (!force_request) {
if (paste0(surveyID, ".rds") %in% list.files(tempdir())) {
data <- readRDS(paste0(tempdir(), "/", surveyID, ".rds"))
if (verbose) {
message(paste0(
"Found an earlier download for survey with id ", surveyID, # nolint
". Loading this file.\nSet 'force_request' to TRUE if you want to override this."
))
} # nolint
return(data)
}
}
# CONSTRUCT API CALL ----
# fetch URL:
fetch_url <- create_fetch_url(Sys.getenv("QUALTRICS_BASE_URL"), surveyID)
# Create raw JSON payload
raw_payload <- create_raw_payload(
label = label,
start_date = start_date,
end_date = end_date,
unanswer_recode = unanswer_recode,
unanswer_recode_multi = unanswer_recode_multi,
include_display_order = include_display_order,
limit = limit,
time_zone = time_zone,
include_questions = include_questions,
breakout_sets = breakout_sets
)
# SEND POST REQUEST TO API ----
# POST request for download
res <- qualtrics_api_request("POST", url = fetch_url, body = raw_payload)
# Get id
if (is.null(res$result$progressId)) {
stop("Something went wrong. Please re-run your query.")
} else {
requestID <- res$result$progressId
} # NOTE This is not fail safe because ID can still be NULL
# Download, unzip and return file path
survey.fpath <- download_qualtrics_export(fetch_url, requestID, verbose = verbose)
# READ DATA AND SET VARIABLES ----
# Read data
data <- read_survey(survey.fpath, import_id = import_id, time_zone = time_zone)
# Add types
if (convert & label) {
data <- infer_data_types(data, surveyID)
}
# Save survey as RDS file in temp folder so that it can be easily
# retrieved this session.
saveRDS(data, paste0(tempdir(), "/", surveyID, ".rds"))
# RETURN ----
# Remove tmpfiles
if (!is.null(save_dir)) {
# Save file to directory
saveRDS(data, file = paste0(save_dir, "/", surveyID, ".rds"))
# Return
return(data)
} else {
p <- file.remove(survey.fpath)
# Return
return(data)
}
}
|
#-------------------
# Conditionally autoregressive regression
#-------------------
# Seedling densities
#-------------------
setwd('C:/temp/GIS')
library(rgdal)
library(spdep)
library(car)
library(rpart)
library(rpart.plot)
options("scipen"=100, "digits"=4)
file=('C:/temp/GIS')
#----------------------------------
# Seedling Densities
#----------------------------------
seeds<-read.csv('Seedlings_m2_with_pred_df.csv')[,-c(1)]
seeds$maple<-seeds$ACERUB+seeds$ACESAC
seeds$sqseeds<-sqrt(seeds$seds_m2)
seeds$oakhick<-seeds$QUERCUS+seeds$CARYA
seeds$inv.dc<-seeds$inv_sm_/100
seeds$cancov<-seeds$CANOPY_/100
seeds2<-subset(seeds,type=="train" ) # select points that are only in the training dataset
seeds.test<-subset(seeds, type='test')
# Run OLS model first
seed.lm<-lm(sqseeds~scale(prec_ex)+scale(tmax_ex)+scale(deer_den)+scale(inv.dc)+scale(hmod_ex) + scale(QUERCUS) + scale(FAGGRA) + scale(PINUS) + scale(STDSZCD) + scale(cancov), data = seeds2)
summary(seed.lm) #r2=0.0642
AIC(seed.lm) #8392
vif(seed.lm) # all are below 1.3
coordinates(seeds2)<-~X+Y # set X/Y coordinates as the coordinates for the dataset
coords<-coordinates(seeds2)
IDs<-row.names(as(seeds2,"data.frame"))
seeds_kd1<-dnearneigh(coords,d1=0, d2=10000, row.names=IDs)
seeds_kd1_w<-nb2listw(seeds_kd1,style='B', zero.policy=TRUE)
plot(seeds_kd1_w,coords)
car.mod<-spautolm(sqseeds~prec_ex+tmax_ex+inv.dc+hmod_ex + QUERCUS + FAGGRA + PINUS + STDSZCD + cancov +deer_den,
data = seeds2, listw=seeds_kd1_w, family = "CAR", method="eigen", verbose = TRUE,zero.policy = TRUE)
summary(car.mod, Nagelkerke=T, correlation=F)
seeds2$fit<-car.mod$fit$fitted.values
seeds2$resid<-car.mod$fit$residuals
plot(seeds2$resid~seeds2$fit)
abline(lm(seeds2$resid~seeds2$fit), col='red')
# neighborhood 20km
IDs<-row.names(as(seeds2,"data.frame"))
seeds_kd20<-dnearneigh(coords,d1=0, d2=20000, row.names=IDs)
seeds_kd20_w<-nb2listw(seeds_kd20,style='B', zero.policy=TRUE)
plot(seeds_kd20_w,coords)
car.mod20<-spautolm(sqseeds~prec_ex+tmax_ex+inv.dc+hmod_ex + QUERCUS + FAGGRA + PINUS + STDSZCD + cancov+deer_den,
data = seeds2,listw=seeds_kd20_w, family = "CAR", method="eigen", verbose = TRUE,zero.policy = TRUE)
summary(car.mod20, Nagelkerke=T, correlation=F)
seeds2$fit20<-car.mod20$fit$fitted.values
seeds2$resid20<-car.mod20$fit$residuals
plot(seeds2$resid20~seeds2$fit20)
abline(lm(seeds2$resid20~seeds2$fit20), col='red')
# neighborhood 20km with row standardized weights instead of Binary
seeds_kd20w<-dnearneigh(coords,d1=0, d2=20000, row.names=IDs)
seeds_kd20w_w<-nb2listw(seeds_kd20w,style='W', zero.policy=TRUE)
car.mod20w<-spautolm(sqseeds~scale(prec_ex)+scale(tmax_ex)+scale(inv.dc)+scale(hmod_ex) + scale(QUERCUS) + scale(FAGGRA) + scale(PINUS) + scale(STDSZCD) + scale(cancov)+ scale(deer_den),
data = seeds2,listw=seeds_kd20w_w, family = "CAR", method="eigen", verbose = TRUE,zero.policy = TRUE)
summary(car.mod20w, Nagelkerke=T) #0.1325
(car.mod20w$fit$imat) #10316
seeds2$fit20w<-car.mod20w$fit$fitted.values
seeds2$resid20w<-car.mod20w$fit$residuals
plot(seeds2$resid20~seeds2$fit20)
abline(lm(seeds2$resid20~seeds2$fit20), col='red')
#------------------------------
# Sapling Densities
#------------------------------
saps<-read.csv('Saplings_m2_with_pred_df.csv')[,-c(1)]
saps$sqsaps<-sqrt(saps$saps_m2)
saps$maple<-saps$ACERUB+saps$ACESAC
saps$oakhick<-saps$QUERCUS+saps$CARYA
saps$inv.dc<-saps$inv_sm_/100
saps$cancov<-saps$CANOPY_/100
saps2<-subset(saps,type=="train" ) # select points that are only in the training dataset
saps.test<-subset(saps,type=='test')
# Run global model first
sap.lm<-lm(sqsaps~prec_ex+inv.dc+hmod_ex+maple+QUERCUS+FAGGRA+PINUS+STDSZCD+cancov+deer_den, data=saps2)
summary(sap.lm) #r2=0.128
AIC(sap.lm) #-8839
par(mfrow = c(2, 2), oma = c(0, 0, 2, 0))
plot(sap.lm)
par(mfrow=c(1,1))
vif(sap.lm) # all are below 2
coordinates(saps2)<-~X+Y # set X/Y coordinates as the coordinates for the dataset
coords<-coordinates(saps2)
# CAR model with 20km weights
IDs<-row.names(as(saps2,"data.frame"))
saps_kd20<-dnearneigh(coords,d1=0, d2=20000, row.names=IDs)
saps_kd20_w<-nb2listw(saps_kd20,style='W', zero.policy=TRUE)
car.mod.sap<-spautolm(sqsaps~scale(prec_ex)+scale(inv.dc)+scale(hmod_ex)+scale(QUERCUS)+scale(maple)+scale(FAGGRA)+
scale(PINUS)+scale(STDSZCD)+scale(cancov)+scale(deer_den), data = saps2,
listw=saps_kd20_w, family = "CAR", method="eigen", verbose = TRUE,zero.policy = TRUE)
summary(car.mod.sap, Nagelkerke=T) #0.1622, AIC=-9297
# took about 15 minutes to run
saps2$fit<-car.mod.sap$fit$fitted.values
saps2$resid<-car.mod.sap$fit$residuals
plot(saps2$resid~saps2$fit)
abline(lm(saps2$resid~saps2$fit), col='red')
| /6_CAR_regen_densities.R | no_license | KateMMiller/RegenDebtCode | R | false | false | 4,732 | r | #-------------------
# Conditionally autoregressive regression
#-------------------
# Seedling densities
#-------------------
setwd('C:/temp/GIS')
library(rgdal)
library(spdep)
library(car)
library(rpart)
library(rpart.plot)
options("scipen"=100, "digits"=4)
file=('C:/temp/GIS')
#----------------------------------
# Seedling Densities
#----------------------------------
seeds<-read.csv('Seedlings_m2_with_pred_df.csv')[,-c(1)]
seeds$maple<-seeds$ACERUB+seeds$ACESAC
seeds$sqseeds<-sqrt(seeds$seds_m2)
seeds$oakhick<-seeds$QUERCUS+seeds$CARYA
seeds$inv.dc<-seeds$inv_sm_/100
seeds$cancov<-seeds$CANOPY_/100
seeds2<-subset(seeds,type=="train" ) # select points that are only in the training dataset
seeds.test<-subset(seeds, type='test')
# Run OLS model first
seed.lm<-lm(sqseeds~scale(prec_ex)+scale(tmax_ex)+scale(deer_den)+scale(inv.dc)+scale(hmod_ex) + scale(QUERCUS) + scale(FAGGRA) + scale(PINUS) + scale(STDSZCD) + scale(cancov), data = seeds2)
summary(seed.lm) #r2=0.0642
AIC(seed.lm) #8392
vif(seed.lm) # all are below 1.3
coordinates(seeds2)<-~X+Y # set X/Y coordinates as the coordinates for the dataset
coords<-coordinates(seeds2)
IDs<-row.names(as(seeds2,"data.frame"))
seeds_kd1<-dnearneigh(coords,d1=0, d2=10000, row.names=IDs)
seeds_kd1_w<-nb2listw(seeds_kd1,style='B', zero.policy=TRUE)
plot(seeds_kd1_w,coords)
car.mod<-spautolm(sqseeds~prec_ex+tmax_ex+inv.dc+hmod_ex + QUERCUS + FAGGRA + PINUS + STDSZCD + cancov +deer_den,
data = seeds2, listw=seeds_kd1_w, family = "CAR", method="eigen", verbose = TRUE,zero.policy = TRUE)
summary(car.mod, Nagelkerke=T, correlation=F)
seeds2$fit<-car.mod$fit$fitted.values
seeds2$resid<-car.mod$fit$residuals
plot(seeds2$resid~seeds2$fit)
abline(lm(seeds2$resid~seeds2$fit), col='red')
# neighborhood 20km
IDs<-row.names(as(seeds2,"data.frame"))
seeds_kd20<-dnearneigh(coords,d1=0, d2=20000, row.names=IDs)
seeds_kd20_w<-nb2listw(seeds_kd20,style='B', zero.policy=TRUE)
plot(seeds_kd20_w,coords)
car.mod20<-spautolm(sqseeds~prec_ex+tmax_ex+inv.dc+hmod_ex + QUERCUS + FAGGRA + PINUS + STDSZCD + cancov+deer_den,
data = seeds2,listw=seeds_kd20_w, family = "CAR", method="eigen", verbose = TRUE,zero.policy = TRUE)
summary(car.mod20, Nagelkerke=T, correlation=F)
seeds2$fit20<-car.mod20$fit$fitted.values
seeds2$resid20<-car.mod20$fit$residuals
plot(seeds2$resid20~seeds2$fit20)
abline(lm(seeds2$resid20~seeds2$fit20), col='red')
# neighborhood 20km with row standardized weights instead of Binary
seeds_kd20w<-dnearneigh(coords,d1=0, d2=20000, row.names=IDs)
seeds_kd20w_w<-nb2listw(seeds_kd20w,style='W', zero.policy=TRUE)
car.mod20w<-spautolm(sqseeds~scale(prec_ex)+scale(tmax_ex)+scale(inv.dc)+scale(hmod_ex) + scale(QUERCUS) + scale(FAGGRA) + scale(PINUS) + scale(STDSZCD) + scale(cancov)+ scale(deer_den),
data = seeds2,listw=seeds_kd20w_w, family = "CAR", method="eigen", verbose = TRUE,zero.policy = TRUE)
summary(car.mod20w, Nagelkerke=T) #0.1325
(car.mod20w$fit$imat) #10316
seeds2$fit20w<-car.mod20w$fit$fitted.values
seeds2$resid20w<-car.mod20w$fit$residuals
plot(seeds2$resid20~seeds2$fit20)
abline(lm(seeds2$resid20~seeds2$fit20), col='red')
#------------------------------
# Sapling Densities
#------------------------------
saps<-read.csv('Saplings_m2_with_pred_df.csv')[,-c(1)]
saps$sqsaps<-sqrt(saps$saps_m2)
saps$maple<-saps$ACERUB+saps$ACESAC
saps$oakhick<-saps$QUERCUS+saps$CARYA
saps$inv.dc<-saps$inv_sm_/100
saps$cancov<-saps$CANOPY_/100
saps2<-subset(saps,type=="train" ) # select points that are only in the training dataset
saps.test<-subset(saps,type=='test')
# Run global model first
sap.lm<-lm(sqsaps~prec_ex+inv.dc+hmod_ex+maple+QUERCUS+FAGGRA+PINUS+STDSZCD+cancov+deer_den, data=saps2)
summary(sap.lm) #r2=0.128
AIC(sap.lm) #-8839
par(mfrow = c(2, 2), oma = c(0, 0, 2, 0))
plot(sap.lm)
par(mfrow=c(1,1))
vif(sap.lm) # all are below 2
coordinates(saps2)<-~X+Y # set X/Y coordinates as the coordinates for the dataset
coords<-coordinates(saps2)
# CAR model with 20km weights
IDs<-row.names(as(saps2,"data.frame"))
saps_kd20<-dnearneigh(coords,d1=0, d2=20000, row.names=IDs)
saps_kd20_w<-nb2listw(saps_kd20,style='W', zero.policy=TRUE)
car.mod.sap<-spautolm(sqsaps~scale(prec_ex)+scale(inv.dc)+scale(hmod_ex)+scale(QUERCUS)+scale(maple)+scale(FAGGRA)+
scale(PINUS)+scale(STDSZCD)+scale(cancov)+scale(deer_den), data = saps2,
listw=saps_kd20_w, family = "CAR", method="eigen", verbose = TRUE,zero.policy = TRUE)
summary(car.mod.sap, Nagelkerke=T) #0.1622, AIC=-9297
# took about 15 minutes to run
saps2$fit<-car.mod.sap$fit$fitted.values
saps2$resid<-car.mod.sap$fit$residuals
plot(saps2$resid~saps2$fit)
abline(lm(saps2$resid~saps2$fit), col='red')
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/polish.R
\docType{package}
\name{polish-package}
\alias{polish-package}
\title{Polish up your R content.}
\description{
Polish up your R content (RMarkdown and flexdashboards)
using attractive templates for publishing to RStudio Connect.
}
\author{
Will Bishop \email{wcmbishop@gmail.com}
}
| /man/polish-package.Rd | permissive | wcmbishop/polish | R | false | true | 370 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/polish.R
\docType{package}
\name{polish-package}
\alias{polish-package}
\title{Polish up your R content.}
\description{
Polish up your R content (RMarkdown and flexdashboards)
using attractive templates for publishing to RStudio Connect.
}
\author{
Will Bishop \email{wcmbishop@gmail.com}
}
|
#! /usr/bin/env Rscript
args <- commandArgs(trailingOnly=TRUE)
usage <- function() {
message("usage:")
message("./blat.R databaseFile.rfa queryFile.fa outputFile.psl")
message("
databaseFile.rfa: reference database in fasta format
queryFile.fa: query sequences in fasta format
outputFile.psl: blat output filename (default: 'queryFile.psl')
")
}
cargs <- length(args)
if(cargs < 2 || cargs > 3) {
message("wrong number of arguments (", cargs, ")\n")
usage()
quit(save="no", status=1)
}
rfaFile <- args[1]
faFile <- args[2]
if(cargs < 3) {
pslFile <- gsub("^(.*)\\.fa$", "\\1.psl", faFile)
} else {
pslFile <- args[3]
}
cmd <- sprintf("blat -noHead -minIdentity=100 -stepSize=7 %s %s %s",
shQuote(rfaFile), shQuote(faFile), shQuote(pslFile))
message('executing shell command:')
message(shQuote(cmd))
system(cmd)
| /blat.R | no_license | antoniofabio/annotationExperiments | R | false | false | 851 | r | #! /usr/bin/env Rscript
args <- commandArgs(trailingOnly=TRUE)
usage <- function() {
message("usage:")
message("./blat.R databaseFile.rfa queryFile.fa outputFile.psl")
message("
databaseFile.rfa: reference database in fasta format
queryFile.fa: query sequences in fasta format
outputFile.psl: blat output filename (default: 'queryFile.psl')
")
}
cargs <- length(args)
if(cargs < 2 || cargs > 3) {
message("wrong number of arguments (", cargs, ")\n")
usage()
quit(save="no", status=1)
}
rfaFile <- args[1]
faFile <- args[2]
if(cargs < 3) {
pslFile <- gsub("^(.*)\\.fa$", "\\1.psl", faFile)
} else {
pslFile <- args[3]
}
cmd <- sprintf("blat -noHead -minIdentity=100 -stepSize=7 %s %s %s",
shQuote(rfaFile), shQuote(faFile), shQuote(pslFile))
message('executing shell command:')
message(shQuote(cmd))
system(cmd)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bmab.R
\name{bmab_args}
\alias{bmab_args}
\title{Function arguments}
\arguments{
\item{Sigma}{value of Sigma for the arm}
\item{n}{value of n for the arm}
\item{gamma}{numeric in (0, 1); discount factor}
\item{tol}{absolute accuracy required}
\item{N}{integer>0; time horizon used}
}
\description{
Function arguments
}
\keyword{internal}
| /man/bmab_args.Rd | no_license | jedwards24/gittins | R | false | true | 420 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bmab.R
\name{bmab_args}
\alias{bmab_args}
\title{Function arguments}
\arguments{
\item{Sigma}{value of Sigma for the arm}
\item{n}{value of n for the arm}
\item{gamma}{numeric in (0, 1); discount factor}
\item{tol}{absolute accuracy required}
\item{N}{integer>0; time horizon used}
}
\description{
Function arguments
}
\keyword{internal}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.