content
large_stringlengths 0
6.46M
| path
large_stringlengths 3
331
| license_type
large_stringclasses 2
values | repo_name
large_stringlengths 5
125
| language
large_stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.46M
| extension
large_stringclasses 75
values | text
stringlengths 0
6.46M
|
|---|---|---|---|---|---|---|---|---|---|
# Download the data
dataset_url <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
if(!file.exists("power.zip")){download.file(dataset_url,"power.zip")}
if(!file.exists("power")){unzip("power.zip",exdir="power")}
# Use data.table package to read the data
library(data.table)
data <- fread("~/power/household_power_consumption.txt",na.strings="?",
colClasses=c("character","character","numeric","numeric","numeric","numeric",
"numeric","numeric","numeric"))
summary(data)
head(data)
# Create data subset and convert the Data and DateTime format
data.subset <- subset(data,Date == ("1/2/2007") | Date == ("2/2/2007"))
data.subset$DateTime <- (paste(data.subset$Date,data.subset$Time))
data.subset[,`:=`(DateTime=as.POSIXct(strptime(DateTime,format="%d/%m/%Y %H:%M:%S")),
Date=as.Date(Date,"%d/%m/%Y")
)]
class(data.subset$Date)
class(data.subset$DateTime)
table(data.subset$Date)
# Create plot3
png(filename="plot3.png",width=480,height=480,units="px")
plot.new()
par(mar=c(4,4,2,1))
plot(data.subset$DateTime,
data.subset$Sub_metering_1,
type="l",
ylab="Energy sub metering",
xlab=""
)
points(data.subset$DateTime,
data.subset$Sub_metering_2,
type="l",
col="red"
)
points(data.subset$DateTime,
data.subset$Sub_metering_3,
type="l",
col="blue"
)
legend("topright",
lty=1,
col=c("black","red","blue"),
legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"))
dev.off()
|
/plot3.R
|
no_license
|
ngkimfong/ExData_Plotting1
|
R
| false
| false
| 1,604
|
r
|
# Download the data
dataset_url <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
if(!file.exists("power.zip")){download.file(dataset_url,"power.zip")}
if(!file.exists("power")){unzip("power.zip",exdir="power")}
# Use data.table package to read the data
library(data.table)
data <- fread("~/power/household_power_consumption.txt",na.strings="?",
colClasses=c("character","character","numeric","numeric","numeric","numeric",
"numeric","numeric","numeric"))
summary(data)
head(data)
# Create data subset and convert the Data and DateTime format
data.subset <- subset(data,Date == ("1/2/2007") | Date == ("2/2/2007"))
data.subset$DateTime <- (paste(data.subset$Date,data.subset$Time))
data.subset[,`:=`(DateTime=as.POSIXct(strptime(DateTime,format="%d/%m/%Y %H:%M:%S")),
Date=as.Date(Date,"%d/%m/%Y")
)]
class(data.subset$Date)
class(data.subset$DateTime)
table(data.subset$Date)
# Create plot3
png(filename="plot3.png",width=480,height=480,units="px")
plot.new()
par(mar=c(4,4,2,1))
plot(data.subset$DateTime,
data.subset$Sub_metering_1,
type="l",
ylab="Energy sub metering",
xlab=""
)
points(data.subset$DateTime,
data.subset$Sub_metering_2,
type="l",
col="red"
)
points(data.subset$DateTime,
data.subset$Sub_metering_3,
type="l",
col="blue"
)
legend("topright",
lty=1,
col=c("black","red","blue"),
legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"))
dev.off()
|
library('recount') # to process gene expression
colDataRse=colData(rse_gene) # rse_gene_breast.Rdata
setwd('/Users/mab8354/granddb/static/data')
databreast=read.csv('metadata_breast_01232021.csv')
a=databreast['gdc_cases.samples.portions.analytes.aliquots.submitter_id']
b=colDataRse@listData$gdc_cases.samples.portions.analytes.aliquots.submitter_id
indInter=match(a[,1], b)
#create new column
databreast$cgc_slide_percent_tumor_nuclei='NA'
databreast['cgc_slide_percent_necrosis']='NA'
#fill column
databreast['cgc_slide_percent_tumor_nuclei']=colDataRse@listData$cgc_slide_percent_tumor_nuclei[indInter]
databreast['cgc_slide_percent_necrosis']=colDataRse@listData$cgc_slide_percent_necrosis[indInter]
#write file
write.csv(databreast,'/Users/mab8354/granddb/static/data/metadata_breast_01292021.csv', row.names = FALSE)
|
/src/builddbDf/cancer/addPhenoVar.r
|
no_license
|
QuackenbushLab/grand
|
R
| false
| false
| 831
|
r
|
library('recount') # to process gene expression
colDataRse=colData(rse_gene) # rse_gene_breast.Rdata
setwd('/Users/mab8354/granddb/static/data')
databreast=read.csv('metadata_breast_01232021.csv')
a=databreast['gdc_cases.samples.portions.analytes.aliquots.submitter_id']
b=colDataRse@listData$gdc_cases.samples.portions.analytes.aliquots.submitter_id
indInter=match(a[,1], b)
#create new column
databreast$cgc_slide_percent_tumor_nuclei='NA'
databreast['cgc_slide_percent_necrosis']='NA'
#fill column
databreast['cgc_slide_percent_tumor_nuclei']=colDataRse@listData$cgc_slide_percent_tumor_nuclei[indInter]
databreast['cgc_slide_percent_necrosis']=colDataRse@listData$cgc_slide_percent_necrosis[indInter]
#write file
write.csv(databreast,'/Users/mab8354/granddb/static/data/metadata_breast_01292021.csv', row.names = FALSE)
|
sink("reproIfeoma.txt")
.timeStart <- Sys.time()
path <- "/home/exacloud/lustre1/users/chanb"
f <- grep("episodes.*201[0-3]\\.txt", list.files(path), ignore.case=TRUE, value=TRUE)
f <- file.path(path, f)
file.info(f)[c("size", "mtime")]
library(data.table)
varnames <- names(fread(f[1], nrows=0))
names(varnames) <- 1:length(varnames)
select <- c(1, 2, 7, 13, 14, 15, 16, 17, 18, 20, 21, 23, 24, 26:29, 35, 36:48, 62:74, 75:79, 98)
colClasses <- rep("character", length(varnames))
colClasses[varnames %in% c("patid", "personkey", "yob", "megnum")] <- "integer"
names(colClasses) <- varnames
na.strings <- c("NA", "*N", "*NU", "*NUL", "*NULL", "*NULL*", "")
library("doParallel")
years <- length(f)
cl <- makeCluster(years)
registerDoParallel(cl, cores=years)
L <- foreach (i = 1:years) %dopar% {
require(data.table)
Di <- fread(f[i], select=select, colClasses=colClasses, na.strings=na.strings)
Di <- Di[gender != "M" & between(yob, 1955 + (i - 1), 2000 + (i - 1))]
Di <- Di[,
`:=` (fromdate = as.Date(fromdate),
todate = as.Date(todate))]
Di <- Di[, year := year(fromdate)]
Di
}
stopCluster(cl)
D <- rbindlist(L)
D[, .(.N, minYOB = min(yob), maxYOB = max(yob)), .(year, gender)]
library(haven)
f <- file.path(path, "APAC_episodes_childbearingwomen.dta")
write_dta(D, f)
file.info(f)[c("size", "mtime")]
list(objectName = deparse(substitute(D)),
timeStamp = sprintf("%s", Sys.time()),
objectSize = format(object.size(D), units="auto"),
rowCount = nrow(D),
colCount = ncol(D),
colNames = names(D),
colClasses = sapply(D, class))
list(timeStart = ifelse(exists(".timeStart"), format(.timeStart), NA),
timeEnd = Sys.time(),
timeElapsed = ifelse(exists(".timeStart"),
format(Sys.time() - .timeStart, format=difftime),
NA),
Sys.info = Sys.info(),
sessionInfo = sessionInfo())
sink()
|
/ParallelizedFread/reproIfeoma.R
|
permissive
|
benjamin-chan/ExacloudPractice
|
R
| false
| false
| 1,942
|
r
|
sink("reproIfeoma.txt")
.timeStart <- Sys.time()
path <- "/home/exacloud/lustre1/users/chanb"
f <- grep("episodes.*201[0-3]\\.txt", list.files(path), ignore.case=TRUE, value=TRUE)
f <- file.path(path, f)
file.info(f)[c("size", "mtime")]
library(data.table)
varnames <- names(fread(f[1], nrows=0))
names(varnames) <- 1:length(varnames)
select <- c(1, 2, 7, 13, 14, 15, 16, 17, 18, 20, 21, 23, 24, 26:29, 35, 36:48, 62:74, 75:79, 98)
colClasses <- rep("character", length(varnames))
colClasses[varnames %in% c("patid", "personkey", "yob", "megnum")] <- "integer"
names(colClasses) <- varnames
na.strings <- c("NA", "*N", "*NU", "*NUL", "*NULL", "*NULL*", "")
library("doParallel")
years <- length(f)
cl <- makeCluster(years)
registerDoParallel(cl, cores=years)
L <- foreach (i = 1:years) %dopar% {
require(data.table)
Di <- fread(f[i], select=select, colClasses=colClasses, na.strings=na.strings)
Di <- Di[gender != "M" & between(yob, 1955 + (i - 1), 2000 + (i - 1))]
Di <- Di[,
`:=` (fromdate = as.Date(fromdate),
todate = as.Date(todate))]
Di <- Di[, year := year(fromdate)]
Di
}
stopCluster(cl)
D <- rbindlist(L)
D[, .(.N, minYOB = min(yob), maxYOB = max(yob)), .(year, gender)]
library(haven)
f <- file.path(path, "APAC_episodes_childbearingwomen.dta")
write_dta(D, f)
file.info(f)[c("size", "mtime")]
list(objectName = deparse(substitute(D)),
timeStamp = sprintf("%s", Sys.time()),
objectSize = format(object.size(D), units="auto"),
rowCount = nrow(D),
colCount = ncol(D),
colNames = names(D),
colClasses = sapply(D, class))
list(timeStart = ifelse(exists(".timeStart"), format(.timeStart), NA),
timeEnd = Sys.time(),
timeElapsed = ifelse(exists(".timeStart"),
format(Sys.time() - .timeStart, format=difftime),
NA),
Sys.info = Sys.info(),
sessionInfo = sessionInfo())
sink()
|
library(hutils)
### Name: longest_affix
### Title: Longest common prefix/suffix
### Aliases: longest_affix trim_common_affixes longest_suffix
### longest_prefix
### ** Examples
longest_prefix(c("totalx", "totaly", "totalz"))
longest_suffix(c("ztotal", "ytotal", "xtotal"))
|
/data/genthat_extracted_code/hutils/examples/longest_affix.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 282
|
r
|
library(hutils)
### Name: longest_affix
### Title: Longest common prefix/suffix
### Aliases: longest_affix trim_common_affixes longest_suffix
### longest_prefix
### ** Examples
longest_prefix(c("totalx", "totaly", "totalz"))
longest_suffix(c("ztotal", "ytotal", "xtotal"))
|
#' Only emergency admissions
#'
#' Keeps only emergency admissions in the sample
#' @param study.sample Data frame. The study sample. No default.
#' @param admission.type.variable.name Character vector of length 1. The name of
#' the admission type variable. Defaults to "ad_type".
#' @param emergency.value Character or numeric vector of length 1. The value of
#' the admission type variable that indicates that an admission is an
#' emergency admission. Defaults to "Emergency".
#' @param remove.missing Logical vector of length 1. If TRUE all observations
#' with missing admission type, as detected by is.na, are removed from the
#' sample. Defaults to TRUE.
#' @export
OnlyEmergencyAdmissions <- function(study.sample, admission.type.variable.name = "ad_type", emergency.value = "Emergency", remove.missing = TRUE) {
## Error handling
if (!is.data.frame(study.sample))
stop("study.sample has to be a data frame")
if (!is.character(admission.type.variable.name) | !IsLength1(admission.type.variable.name))
stop("admission.type.variable.name has to be a character vector of length 1")
if ((!is.numeric(emergency.value) & !is.character(emergency.value)) | !IsLength1(emergency.value))
stop("emergency.value has to be a character or numeric vector of length 1")
if (!is.logical(remove.missing) | !IsLength1(remove.missing))
stop("remove.missing has to be a logical vector of length 1")
## Create subsample
subsample <- study.sample
## Remove missing
n.missing <- 0
if (remove.missing) {
subsample <- subsample[!is.na(subsample[, admission.type.variable.name]), ]
n.missing <- nrow(study.sample) - nrow(subsample)
}
## Remove adults
subsample <- subsample[subsample[, admission.type.variable.name] == emergency.value, ]
n.excluded <- nrow(study.sample) - nrow(subsample) - n.missing
## Collate return list
total.n.excluded <- n.excluded
if (remove.missing)
total.n.excluded <- total.n.excluded + n.missing
exclusion.text <- paste0(total.n.excluded, " were elective admissions.")
if (remove.missing) {
exclusion.text <- paste0(total.n.excluded, " excluded: \n\n",
"- ", n.missing, " had missing admission type \n\n",
"- ", n.excluded, " were elective admissions \n\n")
}
return.list <- list(exclusion.text = exclusion.text,
subsample = subsample)
return(return.list)
}
|
/R/OnlyEmergencyAdmissions.R
|
permissive
|
martingerdin/bengaltiger
|
R
| false
| false
| 2,530
|
r
|
#' Only emergency admissions
#'
#' Keeps only emergency admissions in the sample
#' @param study.sample Data frame. The study sample. No default.
#' @param admission.type.variable.name Character vector of length 1. The name of
#' the admission type variable. Defaults to "ad_type".
#' @param emergency.value Character or numeric vector of length 1. The value of
#' the admission type variable that indicates that an admission is an
#' emergency admission. Defaults to "Emergency".
#' @param remove.missing Logical vector of length 1. If TRUE all observations
#' with missing admission type, as detected by is.na, are removed from the
#' sample. Defaults to TRUE.
#' @export
OnlyEmergencyAdmissions <- function(study.sample, admission.type.variable.name = "ad_type", emergency.value = "Emergency", remove.missing = TRUE) {
## Error handling
if (!is.data.frame(study.sample))
stop("study.sample has to be a data frame")
if (!is.character(admission.type.variable.name) | !IsLength1(admission.type.variable.name))
stop("admission.type.variable.name has to be a character vector of length 1")
if ((!is.numeric(emergency.value) & !is.character(emergency.value)) | !IsLength1(emergency.value))
stop("emergency.value has to be a character or numeric vector of length 1")
if (!is.logical(remove.missing) | !IsLength1(remove.missing))
stop("remove.missing has to be a logical vector of length 1")
## Create subsample
subsample <- study.sample
## Remove missing
n.missing <- 0
if (remove.missing) {
subsample <- subsample[!is.na(subsample[, admission.type.variable.name]), ]
n.missing <- nrow(study.sample) - nrow(subsample)
}
## Remove adults
subsample <- subsample[subsample[, admission.type.variable.name] == emergency.value, ]
n.excluded <- nrow(study.sample) - nrow(subsample) - n.missing
## Collate return list
total.n.excluded <- n.excluded
if (remove.missing)
total.n.excluded <- total.n.excluded + n.missing
exclusion.text <- paste0(total.n.excluded, " were elective admissions.")
if (remove.missing) {
exclusion.text <- paste0(total.n.excluded, " excluded: \n\n",
"- ", n.missing, " had missing admission type \n\n",
"- ", n.excluded, " were elective admissions \n\n")
}
return.list <- list(exclusion.text = exclusion.text,
subsample = subsample)
return(return.list)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/geer.R
\docType{data}
\name{aids}
\alias{aids}
\title{Aids Data}
\format{A data frame with 2376 rows and 8 variables}
\usage{
data(aids)
}
\description{
The aids dataset comprises a total of 2376 CD4+ cell counts for 369 HIV
infected men with a follow up period of approximately eight and half year.
The number of measurements for each individual varies from 1 to 12 and the
times are not equally spaced. The CD4+ cell data are highly unbalanced.
}
\details{
\itemize{
\item id: subject id
\item time: measurement time
\item cd4: CD4+ cell count
}
}
\keyword{datasets}
|
/fuzzedpackages/gee4/man/aids.Rd
|
no_license
|
akhikolla/testpackages
|
R
| false
| true
| 677
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/geer.R
\docType{data}
\name{aids}
\alias{aids}
\title{Aids Data}
\format{A data frame with 2376 rows and 8 variables}
\usage{
data(aids)
}
\description{
The aids dataset comprises a total of 2376 CD4+ cell counts for 369 HIV
infected men with a follow up period of approximately eight and half year.
The number of measurements for each individual varies from 1 to 12 and the
times are not equally spaced. The CD4+ cell data are highly unbalanced.
}
\details{
\itemize{
\item id: subject id
\item time: measurement time
\item cd4: CD4+ cell count
}
}
\keyword{datasets}
|
testlist <- list(data = structure(c(0, 0, 8.28879260278178e-317, 2.41737052621236e+35, 1.17570999667719e+26, 2.12743751473313e+223, 6.75413975356041e+38, 3.94604863549254e-114, 1.16674439868909e+224, 1.49181102216358e-154, 2.02613064886767e+179, 6.44920162434206e-314, 2.51332283756558e-307, 3.52953696963763e+30, 3.52981610868354e+30), .Dim = c(3L, 5L)), q = -3.17665590958218e-277)
result <- do.call(biwavelet:::rcpp_row_quantile,testlist)
str(result)
|
/biwavelet/inst/testfiles/rcpp_row_quantile/libFuzzer_rcpp_row_quantile/rcpp_row_quantile_valgrind_files/1610554643-test.R
|
no_license
|
akhikolla/updated-only-Issues
|
R
| false
| false
| 462
|
r
|
testlist <- list(data = structure(c(0, 0, 8.28879260278178e-317, 2.41737052621236e+35, 1.17570999667719e+26, 2.12743751473313e+223, 6.75413975356041e+38, 3.94604863549254e-114, 1.16674439868909e+224, 1.49181102216358e-154, 2.02613064886767e+179, 6.44920162434206e-314, 2.51332283756558e-307, 3.52953696963763e+30, 3.52981610868354e+30), .Dim = c(3L, 5L)), q = -3.17665590958218e-277)
result <- do.call(biwavelet:::rcpp_row_quantile,testlist)
str(result)
|
#' Transform files with transformer functions
#'
#' `transform_files` applies transformations to file contents and writes back
#' the result.
#' @param files A character vector with paths to the file that should be
#' transformed.
#' @inheritParams make_transformer
#' @section Value:
#' Invisibly returns a data frame that indicates for each file considered for
#' styling whether or not it was actually changed.
#' @keywords internal
transform_files <- function(files, transformers, include_roxygen_examples) {
transformer <- make_transformer(transformers, include_roxygen_examples)
max_char <- min(max(nchar(files), 0), 80)
if (length(files) > 0L) {
cat("Styling ", length(files), " files:\n")
}
changed <- map_lgl(files, transform_file,
fun = transformer, max_char_path = max_char
)
communicate_summary(changed, max_char)
communicate_warning(changed, transformers)
data_frame(file = files, changed = changed)
}
#' Transform a file and output a customized message
#'
#' Wraps `enc::transform_lines_enc()` and outputs customized messages.
#' @param max_char_path The number of characters of the longest path. Determines
#' the indention level of `message_after`.
#' @param message_before The message to print before the path.
#' @param message_after The message to print after the path.
#' @param message_after_if_changed The message to print after `message_after` if
#' any file was transformed.
#' @inheritParams enc::transform_lines_enc
#' @param ... Further arguments passed to `enc::transform_lines_enc()`.
#' @keywords internal
transform_file <- function(path,
fun,
verbose = FALSE,
max_char_path,
message_before = "",
message_after = " [DONE]",
message_after_if_changed = " *",
...) {
char_after_path <- nchar(message_before) + nchar(path) + 1
max_char_after_message_path <- nchar(message_before) + max_char_path + 1
n_spaces_before_message_after <-
max_char_after_message_path - char_after_path
cat(
message_before, path,
rep_char(" ", max(0L, n_spaces_before_message_after)),
append = FALSE
)
changed <- transform_code(path, fun = fun, verbose = verbose, ...)
bullet <- ifelse(is.na(changed), "warning", ifelse(changed, "info", "tick"))
cli::cat_bullet(bullet = bullet)
invisible(changed)
}
#' Closure to return a transformer function
#'
#' This function takes a list of transformer functions as input and
#' returns a function that can be applied to character strings
#' that should be transformed.
#' @param transformers A list of transformer functions that operate on flat
#' parse tables.
#' @param include_roxygen_examples Whether or not to style code in roxygen
#' examples.
#' @keywords internal
#' @importFrom purrr when
make_transformer <- function(transformers, include_roxygen_examples) {
force(transformers)
function(text) {
transformed_code <- text %>%
parse_transform_serialize_r(transformers) %>%
when(
include_roxygen_examples ~
parse_transform_serialize_roxygen(., transformers),
~.
)
transformed_code
}
}
#' Parse, transform and serialize roxygen comments
#'
#' Splits `text` into roxygen code examples and non-roxygen code examples and
#' then maps over these examples by applyingj
#' [style_roxygen_code_example()].
#' @section Hierarchy:
#' Styling involves splitting roxygen example code into segments, and segments
#' into snippets. This describes the proccess for input of
#' [parse_transform_serialize_roxygen()]:
#'
#' - Splitting code into roxygen example code and other code. Downstream,
#' we are only concerned about roxygen code. See
#' [parse_transform_serialize_roxygen()].
#' - Every roxygen example code can have zero or more
#' dontrun / dontshow / donttest sequences. We next create segments of roxygen
#' code examples that contain at most one of these. See
#' [style_roxygen_code_example()].
#' - We further split the segment that contains at most one dont* sequence into
#' snippets that are either don* or not. See
#' [style_roxygen_code_example_segment()].
#'
#' Finally, that we have roxygen code snippets that are either dont* or not,
#' we style them in [style_roxygen_example_snippet()] using
#' [parse_transform_serialize_r()].
#' @importFrom purrr map_at flatten_chr
#' @keywords internal
parse_transform_serialize_roxygen <- function(text, transformers) {
roxygen_seqs <- identify_start_to_stop_of_roxygen_examples_from_text(text)
if (length(roxygen_seqs) < 1L) return(text)
split_segments <- split_roxygen_segments(text, unlist(roxygen_seqs))
map_at(split_segments$separated, split_segments$selectors,
style_roxygen_code_example,
transformers = transformers
) %>%
flatten_chr()
}
#' Split text into roxygen and non-roxygen example segments
#'
#' @param text Roxygen comments
#' @param roxygen_examples Integer sequence that indicates which lines in `text`
#' are roxygen examples. Most conveniently obtained with
#' [identify_start_to_stop_of_roxygen_examples_from_text].
#' @return
#' A list with two elements:
#'
#' * A list that contains elements grouped into roxygen and non-rogxygen
#' sections. This list is named `separated`.
#' * An integer vector with the indices that correspond to roxygen code
#' examples in `separated`.
#' @importFrom rlang seq2
#' @keywords internal
split_roxygen_segments <- function(text, roxygen_examples) {
if (is.null(roxygen_examples)) return(lst(separated = list(text), selectors = NULL))
all_lines <- seq2(1L, length(text))
active_segemnt <- as.integer(all_lines %in% roxygen_examples)
segment_id <- cumsum(abs(c(0L, diff(active_segemnt)))) + 1L
separated <- split(text, factor(segment_id))
restyle_selector <- ifelse(roxygen_examples[1] == 1L, odd_index, even_index)
lst(separated, selectors = restyle_selector(separated))
}
#' Parse, transform and serialize text
#'
#' Wrapper function for the common three operations.
#' @inheritParams compute_parse_data_nested
#' @inheritParams apply_transformers
#' @seealso [parse_transform_serialize_roxygen()]
#' @keywords internal
parse_transform_serialize_r <- function(text, transformers) {
text <- assert_text(text)
pd_nested <- compute_parse_data_nested(text)
start_line <- find_start_line(pd_nested)
if (nrow(pd_nested) == 0) {
warning(
"Text to style did not contain any tokens. Returning empty string.",
call. = FALSE
)
return("")
}
transformed_pd <- apply_transformers(pd_nested, transformers)
flattened_pd <- post_visit(transformed_pd, list(extract_terminals)) %>%
enrich_terminals(transformers$use_raw_indention) %>%
apply_ref_indention() %>%
set_regex_indention(
pattern = transformers$reindention$regex_pattern,
target_indention = transformers$reindention$indention,
comments_only = transformers$reindention$comments_only
)
serialized_transformed_text <-
serialize_parse_data_flattened(flattened_pd, start_line = start_line)
if (can_verify_roundtrip(transformers)) {
verify_roundtrip(text, serialized_transformed_text)
}
serialized_transformed_text
}
#' Apply transformers to a parse table
#'
#' The column `multi_line` is updated (after the line break information is
#' modified) and the rest of the transformers are applied afterwards,
#' The former requires two pre visits and one post visit.
#' @details
#' The order of the transformations is:
#'
#' * Initialization (must be first).
#' * Line breaks (must be before spacing due to indention).
#' * Update of newline and multi-line attributes (must not change afterwards,
#' hence line breaks must be modified first).
#' * spacing rules (must be after line-breaks and updating newlines and
#' multi-line).
#' * indention.
#' * token manipulation / replacement (is last since adding and removing tokens
#' will invalidate columns token_after and token_before).
#' * Update indention reference (must be after line breaks).
#'
#' @param pd_nested A nested parse table.
#' @param transformers A list of *named* transformer functions
#' @importFrom purrr flatten
#' @keywords internal
apply_transformers <- function(pd_nested, transformers) {
transformed_line_breaks <- pre_visit(
pd_nested,
c(transformers$initialize, transformers$line_break)
)
transformed_updated_multi_line <- post_visit(
transformed_line_breaks,
c(set_multi_line, update_newlines)
)
transformed_all <- pre_visit(
transformed_updated_multi_line,
c(transformers$space, transformers$indention, transformers$token)
)
transformed_absolute_indent <- context_to_terminals(
transformed_all,
outer_lag_newlines = 0L,
outer_indent = 0L,
outer_spaces = 0L,
outer_indention_refs = NA
)
transformed_absolute_indent
}
#' Check whether a roundtrip verification can be carried out
#'
#' If scope was set to "line_breaks" or lower (compare [tidyverse_style()]),
#' we can compare the expression before and after styling and return an error if
#' it is not the same.
#' @param transformers The list of transformer functions used for styling.
#' Needed for reverse engineering the scope.
#' @keywords internal
can_verify_roundtrip <- function(transformers) {
is.null(transformers$token)
}
#' Verify the styling
#'
#' If scope was set to "line_breaks" or lower (compare [tidyverse_style()]),
#' we can compare the expression before and after styling and return an error if
#' it is not the same. Note that this method ignores comments and no
#' verification can be conducted if scope > "line_breaks".
#' @inheritParams expressions_are_identical
#' @examples
#' styler:::verify_roundtrip("a+1", "a + 1")
#' styler:::verify_roundtrip("a+1", "a + 1 # comments are dropped")
#' \dontrun{
#' styler:::verify_roundtrip("a+1", "b - 3")
#' }
#' @keywords internal
verify_roundtrip <- function(old_text, new_text) {
if (!expressions_are_identical(old_text, new_text)) {
msg <- paste(
"The expression evaluated before the styling is not the same as the",
"expression after styling. This should not happen. Please file a",
"bug report on GitHub (https://github.com/r-lib/styler/issues)",
"using a reprex."
)
stop(msg, call. = FALSE)
}
}
#' Check whether two expressions are identical
#'
#' @param old_text The initial expression in its character representation.
#' @param new_text The styled expression in its character representation.
#' @keywords internal
expressions_are_identical <- function(old_text, new_text) {
identical(
parse_safely(old_text, keep.source = FALSE),
parse_safely(new_text, keep.source = FALSE)
)
}
|
/R/transform-files.R
|
no_license
|
davidklaing/styler
|
R
| false
| false
| 10,770
|
r
|
#' Transform files with transformer functions
#'
#' `transform_files` applies transformations to file contents and writes back
#' the result.
#' @param files A character vector with paths to the file that should be
#' transformed.
#' @inheritParams make_transformer
#' @section Value:
#' Invisibly returns a data frame that indicates for each file considered for
#' styling whether or not it was actually changed.
#' @keywords internal
transform_files <- function(files, transformers, include_roxygen_examples) {
transformer <- make_transformer(transformers, include_roxygen_examples)
max_char <- min(max(nchar(files), 0), 80)
if (length(files) > 0L) {
cat("Styling ", length(files), " files:\n")
}
changed <- map_lgl(files, transform_file,
fun = transformer, max_char_path = max_char
)
communicate_summary(changed, max_char)
communicate_warning(changed, transformers)
data_frame(file = files, changed = changed)
}
#' Transform a file and output a customized message
#'
#' Wraps `enc::transform_lines_enc()` and outputs customized messages.
#' @param max_char_path The number of characters of the longest path. Determines
#' the indention level of `message_after`.
#' @param message_before The message to print before the path.
#' @param message_after The message to print after the path.
#' @param message_after_if_changed The message to print after `message_after` if
#' any file was transformed.
#' @inheritParams enc::transform_lines_enc
#' @param ... Further arguments passed to `enc::transform_lines_enc()`.
#' @keywords internal
transform_file <- function(path,
fun,
verbose = FALSE,
max_char_path,
message_before = "",
message_after = " [DONE]",
message_after_if_changed = " *",
...) {
char_after_path <- nchar(message_before) + nchar(path) + 1
max_char_after_message_path <- nchar(message_before) + max_char_path + 1
n_spaces_before_message_after <-
max_char_after_message_path - char_after_path
cat(
message_before, path,
rep_char(" ", max(0L, n_spaces_before_message_after)),
append = FALSE
)
changed <- transform_code(path, fun = fun, verbose = verbose, ...)
bullet <- ifelse(is.na(changed), "warning", ifelse(changed, "info", "tick"))
cli::cat_bullet(bullet = bullet)
invisible(changed)
}
#' Closure to return a transformer function
#'
#' This function takes a list of transformer functions as input and
#' returns a function that can be applied to character strings
#' that should be transformed.
#' @param transformers A list of transformer functions that operate on flat
#' parse tables.
#' @param include_roxygen_examples Whether or not to style code in roxygen
#' examples.
#' @keywords internal
#' @importFrom purrr when
make_transformer <- function(transformers, include_roxygen_examples) {
force(transformers)
function(text) {
transformed_code <- text %>%
parse_transform_serialize_r(transformers) %>%
when(
include_roxygen_examples ~
parse_transform_serialize_roxygen(., transformers),
~.
)
transformed_code
}
}
#' Parse, transform and serialize roxygen comments
#'
#' Splits `text` into roxygen code examples and non-roxygen code examples and
#' then maps over these examples by applyingj
#' [style_roxygen_code_example()].
#' @section Hierarchy:
#' Styling involves splitting roxygen example code into segments, and segments
#' into snippets. This describes the proccess for input of
#' [parse_transform_serialize_roxygen()]:
#'
#' - Splitting code into roxygen example code and other code. Downstream,
#' we are only concerned about roxygen code. See
#' [parse_transform_serialize_roxygen()].
#' - Every roxygen example code can have zero or more
#' dontrun / dontshow / donttest sequences. We next create segments of roxygen
#' code examples that contain at most one of these. See
#' [style_roxygen_code_example()].
#' - We further split the segment that contains at most one dont* sequence into
#' snippets that are either don* or not. See
#' [style_roxygen_code_example_segment()].
#'
#' Finally, that we have roxygen code snippets that are either dont* or not,
#' we style them in [style_roxygen_example_snippet()] using
#' [parse_transform_serialize_r()].
#' @importFrom purrr map_at flatten_chr
#' @keywords internal
parse_transform_serialize_roxygen <- function(text, transformers) {
roxygen_seqs <- identify_start_to_stop_of_roxygen_examples_from_text(text)
if (length(roxygen_seqs) < 1L) return(text)
split_segments <- split_roxygen_segments(text, unlist(roxygen_seqs))
map_at(split_segments$separated, split_segments$selectors,
style_roxygen_code_example,
transformers = transformers
) %>%
flatten_chr()
}
#' Split text into roxygen and non-roxygen example segments
#'
#' @param text Roxygen comments
#' @param roxygen_examples Integer sequence that indicates which lines in `text`
#' are roxygen examples. Most conveniently obtained with
#' [identify_start_to_stop_of_roxygen_examples_from_text].
#' @return
#' A list with two elements:
#'
#' * A list that contains elements grouped into roxygen and non-rogxygen
#' sections. This list is named `separated`.
#' * An integer vector with the indices that correspond to roxygen code
#' examples in `separated`.
#' @importFrom rlang seq2
#' @keywords internal
split_roxygen_segments <- function(text, roxygen_examples) {
if (is.null(roxygen_examples)) return(lst(separated = list(text), selectors = NULL))
all_lines <- seq2(1L, length(text))
active_segemnt <- as.integer(all_lines %in% roxygen_examples)
segment_id <- cumsum(abs(c(0L, diff(active_segemnt)))) + 1L
separated <- split(text, factor(segment_id))
restyle_selector <- ifelse(roxygen_examples[1] == 1L, odd_index, even_index)
lst(separated, selectors = restyle_selector(separated))
}
#' Parse, transform and serialize text
#'
#' Wrapper function for the common three operations.
#' @inheritParams compute_parse_data_nested
#' @inheritParams apply_transformers
#' @seealso [parse_transform_serialize_roxygen()]
#' @keywords internal
parse_transform_serialize_r <- function(text, transformers) {
text <- assert_text(text)
pd_nested <- compute_parse_data_nested(text)
start_line <- find_start_line(pd_nested)
if (nrow(pd_nested) == 0) {
warning(
"Text to style did not contain any tokens. Returning empty string.",
call. = FALSE
)
return("")
}
transformed_pd <- apply_transformers(pd_nested, transformers)
flattened_pd <- post_visit(transformed_pd, list(extract_terminals)) %>%
enrich_terminals(transformers$use_raw_indention) %>%
apply_ref_indention() %>%
set_regex_indention(
pattern = transformers$reindention$regex_pattern,
target_indention = transformers$reindention$indention,
comments_only = transformers$reindention$comments_only
)
serialized_transformed_text <-
serialize_parse_data_flattened(flattened_pd, start_line = start_line)
if (can_verify_roundtrip(transformers)) {
verify_roundtrip(text, serialized_transformed_text)
}
serialized_transformed_text
}
#' Apply transformers to a parse table
#'
#' The column `multi_line` is updated (after the line break information is
#' modified) and the rest of the transformers are applied afterwards,
#' The former requires two pre visits and one post visit.
#' @details
#' The order of the transformations is:
#'
#' * Initialization (must be first).
#' * Line breaks (must be before spacing due to indention).
#' * Update of newline and multi-line attributes (must not change afterwards,
#' hence line breaks must be modified first).
#' * spacing rules (must be after line-breaks and updating newlines and
#' multi-line).
#' * indention.
#' * token manipulation / replacement (is last since adding and removing tokens
#' will invalidate columns token_after and token_before).
#' * Update indention reference (must be after line breaks).
#'
#' @param pd_nested A nested parse table.
#' @param transformers A list of *named* transformer functions
#' @importFrom purrr flatten
#' @keywords internal
apply_transformers <- function(pd_nested, transformers) {
transformed_line_breaks <- pre_visit(
pd_nested,
c(transformers$initialize, transformers$line_break)
)
transformed_updated_multi_line <- post_visit(
transformed_line_breaks,
c(set_multi_line, update_newlines)
)
transformed_all <- pre_visit(
transformed_updated_multi_line,
c(transformers$space, transformers$indention, transformers$token)
)
transformed_absolute_indent <- context_to_terminals(
transformed_all,
outer_lag_newlines = 0L,
outer_indent = 0L,
outer_spaces = 0L,
outer_indention_refs = NA
)
transformed_absolute_indent
}
#' Check whether a roundtrip verification can be carried out
#'
#' If scope was set to "line_breaks" or lower (compare [tidyverse_style()]),
#' we can compare the expression before and after styling and return an error if
#' it is not the same.
#' @param transformers The list of transformer functions used for styling.
#' Needed for reverse engineering the scope.
#' @keywords internal
can_verify_roundtrip <- function(transformers) {
is.null(transformers$token)
}
#' Verify the styling
#'
#' If scope was set to "line_breaks" or lower (compare [tidyverse_style()]),
#' we can compare the expression before and after styling and return an error if
#' it is not the same. Note that this method ignores comments and no
#' verification can be conducted if scope > "line_breaks".
#' @inheritParams expressions_are_identical
#' @examples
#' styler:::verify_roundtrip("a+1", "a + 1")
#' styler:::verify_roundtrip("a+1", "a + 1 # comments are dropped")
#' \dontrun{
#' styler:::verify_roundtrip("a+1", "b - 3")
#' }
#' @keywords internal
verify_roundtrip <- function(old_text, new_text) {
if (!expressions_are_identical(old_text, new_text)) {
msg <- paste(
"The expression evaluated before the styling is not the same as the",
"expression after styling. This should not happen. Please file a",
"bug report on GitHub (https://github.com/r-lib/styler/issues)",
"using a reprex."
)
stop(msg, call. = FALSE)
}
}
#' Check whether two expressions are identical
#'
#' @param old_text The initial expression in its character representation.
#' @param new_text The styled expression in its character representation.
#' @keywords internal
expressions_are_identical <- function(old_text, new_text) {
identical(
parse_safely(old_text, keep.source = FALSE),
parse_safely(new_text, keep.source = FALSE)
)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/module_generation.R
\name{get_monte_carlo_simulation_p_values}
\alias{get_monte_carlo_simulation_p_values}
\title{Do Monte Carlo simulation}
\usage{
get_monte_carlo_simulation_p_values(module_graphs_, n_monte_carlo_ = 1000,
n_threads_ = 1, verbose_level_ = 1, seed_ = 12345)
}
\arguments{
\item{gene_weights_}{A numeric vector. Should be the same one the user supplied to \code{construct_weighted_graph}.}
}
\description{
Do Monte Carlo simulation
}
|
/man/get_monte_carlo_simulation_p_values.Rd
|
no_license
|
hrk2109/NMFEM
|
R
| false
| true
| 531
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/module_generation.R
\name{get_monte_carlo_simulation_p_values}
\alias{get_monte_carlo_simulation_p_values}
\title{Do Monte Carlo simulation}
\usage{
get_monte_carlo_simulation_p_values(module_graphs_, n_monte_carlo_ = 1000,
n_threads_ = 1, verbose_level_ = 1, seed_ = 12345)
}
\arguments{
\item{gene_weights_}{A numeric vector. Should be the same one the user supplied to \code{construct_weighted_graph}.}
}
\description{
Do Monte Carlo simulation
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/process-optimizer.R
\name{optimize_effect}
\alias{optimize_effect}
\title{Run the Balke optimizer}
\usage{
optimize_effect(obj)
}
\arguments{
\item{obj}{Object as returned by \link{analyze_graph}}
}
\value{
An object of class "balkebound" that contains the bounds and logs as character strings
}
\description{
Given a object with the linear programming problem set up, compute the bounds
using the c++ code developed by Alex Balke. Bounds are returned as text but can
be converted to R functions using \link{interpret_bounds}, or latex code using
\link{latex_bounds}.
}
\examples{
b <- graph_from_literal(X -+ Y, Ur -+ X, Ur -+ Y)
V(b)$leftside <- c(0,0,0)
V(b)$latent <- c(0,0,1)
V(b)$nvals <- c(2,2,2)
E(b)$rlconnect <- E(b)$edge.monotone <- c(0, 0, 0)
obj <- analyze_graph(b, constraints = NULL, effectt = "p{Y(X = 1) = 1} - p{Y(X = 0) = 1}")
optimize_effect(obj)
}
|
/man/optimize_effect.Rd
|
no_license
|
cran/causaloptim
|
R
| false
| true
| 976
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/process-optimizer.R
\name{optimize_effect}
\alias{optimize_effect}
\title{Run the Balke optimizer}
\usage{
optimize_effect(obj)
}
\arguments{
\item{obj}{Object as returned by \link{analyze_graph}}
}
\value{
An object of class "balkebound" that contains the bounds and logs as character strings
}
\description{
Given a object with the linear programming problem set up, compute the bounds
using the c++ code developed by Alex Balke. Bounds are returned as text but can
be converted to R functions using \link{interpret_bounds}, or latex code using
\link{latex_bounds}.
}
\examples{
b <- graph_from_literal(X -+ Y, Ur -+ X, Ur -+ Y)
V(b)$leftside <- c(0,0,0)
V(b)$latent <- c(0,0,1)
V(b)$nvals <- c(2,2,2)
E(b)$rlconnect <- E(b)$edge.monotone <- c(0, 0, 0)
obj <- analyze_graph(b, constraints = NULL, effectt = "p{Y(X = 1) = 1} - p{Y(X = 0) = 1}")
optimize_effect(obj)
}
|
#' Calculate euclidean norm of a numerical vector.
#'
#' @param x vector of numeric values.
#' @return the square root of the sum of squared values in \code{x}.
#' @examples
#' euclidNorm(c(1, 3, 0, 4))
#' euclidNorm(c(-1, 0, 5))
#' @export
euclidNorm <- function(x) {sqrt(sum(x ^ 2))}
#' Determine sign of distance from linear separator
#'
#' @param data matrix or data frame of numeric values.
#' @param weight vector of numeric values.
#' @return -1 if distance is negative else 1
#' @examples
#' vals <- data.frame(4, 3, -2, 9)
#' weights <- c(1, 1, 1, 1)
#' signum(vals, weights)
#' @export
signum <- function(data, weight) {
distances <- vector(length = nrow(data))
distances <- apply(data, 1, distanceFromSeparator, weight)
ifelse(distances < 0, -1, 1)
}
#' Calculate signed distance of point from linear separator.
#'
#' @param data matrix or vector of numeric values
#' @param weight vector of numeric values
#' @return If both arguments are vectors of same length, it will return the
#' inner product. If one argument is a vector, it will be promoted to either a
#' row or column matrix.
#' @examples
#' vec1 <- c(1, 2, 3)
#' vec2 <- c(4, 5, 6)
#' distanceFromSeparator(vec1, vec2)
#' @export
distanceFromSeparator <- function(data, weight) {
distance <- data %*% weight
distance
}
#' Generate perceptron model.
#'
#' @description Fit a single-layer perceptron model
#' @param formula formula expression as for regression models, of the form
#' response ~ predictors.
#' @param data data frame in which to interpret the variables occurring in
#' formula
#' @param learningRate integer value determining the magnitude of the weight
#' updates (default 1)
#' @param activation function to control neuron activation (default signum)
#' @details This function implements a model for linear classification based on
#' the perceptron model. The level of the response variable must be binary.
#' @note The learning algorithm for the perceptron model is only guaranteed to
#' converge for linearly separable input data!
#' @return
#' \describe{
#' \item{w}{vector of best weight values found}
#' \item{coefficients}{vector of weight values normalized by euclidean distance}
#' \item{updates}{count of weight updates}
#' \item{formula}{character representation of formula}
#' \item{call}{character representation of the call command}
#' \item{x}{model matrix}
#' \item{y}{vector of response values}
#' \item{options}{list of character representation of modelling options}
#' }
#' @references Cristianini, Nello und John Shawe-Taylor (2000): \emph{An
#' Introduction to Support Vector Machines: And Other Kernel-Based Learning
#' Methods}, Cambridge University Press: Cambridge, England.
#' @examples
#' data(iris)
#' head(iris, n=20)
#' irisSub <- iris[1:100, c(1, 3, 5)]
#' names(irisSub) <- c("sepal", "petal", "species")
#' head(irisSub)
#' formula <- formula(species ~ sepal + petal)
#' p1 <- newPerceptronModel(formula, irisSub)
#' @import stats
#' @export
newPerceptronModel <- function(formula, data, learningRate = 1,
activation = signum) {
if(!is.data.frame(data)) stop("Input data must be of type data frame.")
# model matrix
mf <- model.frame(formula, data)
x <- model.matrix(formula, mf)
respondName <- as.character(attr(terms(mf), "variables"))[2]
if(nlevels(data[respondName] != 2))
stop("Invalid number of levels in response variable detected.
Response variable must be binary!")
# response vector
y <- get(respondName, mf)
yLab <- as.character(y)
y <- factor(y)
y <- ifelse(y == levels(y)[1], -1, 1)
y <- cbind(y, yLab)
colnames(y) <- c("class", respondName)
y <- data.frame(y, stringsAsFactors = FALSE)
y$class <- as.numeric(y$class)
w <- vector(length = ncol(x)) # coefficient vector
c <- 0 # weight update counter
weightUpdate <- TRUE
R <- max(apply(x, 1, euclidNorm))
while (weightUpdate) {
weightUpdate <- FALSE
yClassified <- activation(x, w)
for (i in 1:nrow(x)) {
if (y[i,1] != yClassified[i]) {
w[-1] <- w[-1] + learningRate * y[i,1] * x[i,-1]
w[1] <- w[1] + learningRate * y[i,1] * R^2
c <- c + 1
weightUpdate <- TRUE
}
}
}
s <- euclidNorm(w)
coefficients <- w / s
names(coefficients) <- c("bias", attr(terms.formula(formula),"term.labels"))
# assemble output object
perceptronOut <- list()
class(perceptronOut) <- "perceptron"
perceptronOut$weights <- w
perceptronOut$respondName <- respondName
perceptronOut$coefficients <- coefficients
perceptronOut$updates <- c
perceptronOut$formula <- formula
perceptronOut$call <- match.call()
perceptronOut$x <- x
perceptronOut$y <- y
perceptronOut$yMapping <- unique(y)
perceptronOut$options <- list(learningRate,
as.character(substitute(activation)))
names(perceptronOut$options) <- c("Learning rate", "Activation function")
return(perceptronOut)
}
#' Print perceptron model.
#'
#' @param x an object of class \code{perceptron} as returned by perceptron.
#' @param ... arguments passed to or from other methods.
#' @note the weight values are normalized by the euclidean distance and
#' represented as a unit vector
#' @examples
#' data(iris)
#' head(iris, n=20)
#' irisSub <- iris[1:100, c(1, 3, 5)]
#' names(irisSub) <- c("sepal", "petal", "species")
#' head(irisSub)
#' formula <- formula(species ~ sepal + petal)
#' p1 <- newPerceptronModel(formula, irisSub)
#' print(p1)
#' @export
print.perceptron <- function(x, ...) {
cat("\nCall:\n", deparse(x$call), "\n\n", sep = "")
if (length(coef(x))) {
cat("Weights:\n")
print(coef(x))
cat("\n")
cat("Epochs:\n")
cat(x$updates,"\n")
} else {
cat("No coefficients\n")
cat("\n")
invisible(x)
}
}
#' Assemble summary output of fitted perceptron model.
#'
#' @param object an object of class \code{perceptron} as returned by perceptron.
#' @param ... arguments passed to or from other methods
#' @return
#' \describe{
#' \item{coefficients}{vector of weight values normalized by euclidean distance}
#' \item{options}{list of character representation of modelling options}
#' \item{input}{number of input layer nodes}
#' \item{hidden}{number of hidden layer nodes (fixed to 0)}
#' \item{output}{number of output layer nodes (fixed to 1)}
#' }
#' @examples
#' data(iris)
#' head(iris, n=20)
#' irisSub <- iris[1:100, c(1, 3, 5)]
#' names(irisSub) <- c("sepal", "petal", "species")
#' head(irisSub)
#' formula <- formula(species ~ sepal + petal)
#' p1 <- newPerceptronModel(formula, irisSub)
#' summary(p1)
#' @export
summary.perceptron <- function(object, ...) {
perceptronSumOut <- list()
class(perceptronSumOut) <- "summary.perceptron"
neuronLabels <- vector(length = length(object$coefficients))
for (i in 1:length(object$coefficients)) {
neuronLabel <- paste(attr(object$coefficients[i], "names"),
"->",
"o",
sep = "")
neuronLabels[i] <- neuronLabel
}
perceptronSumOut$coefficients <- object$coefficients
names(perceptronSumOut$coefficients) <- neuronLabels
perceptronSumOut$options <- object$options
perceptronSumOut$input <- ncol(object$x)
perceptronSumOut$hidden <- 0
perceptronSumOut$output <- 1
return(perceptronSumOut)
}
#' Print summary output of a fitted perceptron model.
#'
#' @param x an object of class \code{perceptron} as returned by perceptron.
#' @param ... arguments passed to or from other methods
#' @examples
#' data(iris)
#' head(iris, n=20)
#' irisSub <- iris[1:100, c(1, 3, 5)]
#' names(irisSub) <- c("sepal", "petal", "species")
#' head(irisSub)
#' formula <- formula(species ~ sepal + petal)
#' p1 <- newPerceptronModel(formula, irisSub)
#' summary(p1)
#' @export
print.summary.perceptron <- function(x, ...) {
networkDescription <- paste(x$input-1, x$hidden, x$output, sep = "-")
cat("\nResult:\n")
cat("A", networkDescription, "network with", x$input, "weights\n",
sep = " ")
cat("\n")
print(coef(x))
cat("\n\n")
optList <- vector(length = length(x$options))
for (i in 1:length(x$options)) {
optString <- paste(attr(x$options[i], "names"),
": ",
as.character(x$options[i]),
sep = "")
optList[i] <- optString
}
cat(optList, sep = ", ")
cat("\n")
}
#' Plot of a fitted perceptron model.
#'
#' @param x an object of class \code{perceptron} as returned by perceptron.
#' @param ... arguments passed to or from other methods
#' @examples
#' data(iris)
#' head(iris, n=20)
#' irisSub <- iris[1:100, c(1, 3, 5)]
#' names(irisSub) <- c("sepal", "petal", "species")
#' head(irisSub)
#' formula <- formula(species ~ sepal + petal)
#' p1 <- newPerceptronModel(formula, irisSub)
#' plot(p1, title = "Perceptron Classifier")
#' @import ggplot2
#' @export
plot.perceptron <- function(x, ...) {
if(ncol(x$x) != 3) stop("Plot functionality is only available for 2d input
data")
w <- x$weights
respond <- data.frame(x$y[,2], stringsAsFactors = FALSE)
respond <- respond[,1,drop = FALSE]
names(respond) <- x$respondName
respondName <- x$respondName
intercept <- -w[1] / w[3]
slope <- -w[2] / w[3]
df <- data.frame(x = x$x[,2], y = x$x[,3], respond = respond,
stringsAsFactors = FALSE)
ggplot(df, aes(x = df$x, y = df$y)) +
geom_point(aes_(color = as.name(respondName),
shape = as.name(respondName)), size = 3) +
geom_abline(aes(intercept = intercept, slope = slope), col = "green")
}
#' Predict function for a fitted perceptron model.
#'
#' @param object fitted \code{perceptron} model.
#' @param newdata data frame from values for which to predict the class
#' @param ... arguments passed to or from other methods
#' @examples
#' data(iris)
#' irisSub <- iris[(1:100), c(1, 3, 5)]
#' names(irisSub) <- c("sepal", "petal", "species")
#' formula <- formula(species ~ sepal + petal)
#' p1 <- newPerceptronModel(formula, irisSub)
#' irisShuffled <- irisSub[sample(nrow(irisSub)),]
#' irisTraining <- irisShuffled[1:70,]
#' irisHoldout <- irisShuffled[70:100,]
#' holdOutX <- irisHoldout[, 1:2]
#' holdOutY <- irisHoldout[, 3]
#' holdOutY <- factor(holdOutY)
#' prediction <- predict(p1, holdOutX)
#' @export
predict.perceptron <- function(object, newdata, ...) {
perceptronPredOut <- list()
yMapping <- object$yMapping
w <- object$weights
input <- cbind(1, newdata)
rownames(input) <- c()
colnames(input)[1] <- "bias"
predictions <- signum(input, w)
predictions <- as.factor(ifelse(predictions == yMapping[1,1], yMapping[1,2],
yMapping[2,2]))
outputFrame <- data.frame(input[,-1], prediction = predictions)
outputFrame
}
|
/R/nnetr.R
|
no_license
|
MPEP/nnetr-project
|
R
| false
| false
| 11,144
|
r
|
#' Calculate euclidean norm of a numerical vector.
#'
#' @param x vector of numeric values.
#' @return the square root of the sum of squared values in \code{x}.
#' @examples
#' euclidNorm(c(1, 3, 0, 4))
#' euclidNorm(c(-1, 0, 5))
#' @export
euclidNorm <- function(x) {sqrt(sum(x ^ 2))}
#' Determine sign of distance from linear separator
#'
#' @param data matrix or data frame of numeric values.
#' @param weight vector of numeric values.
#' @return -1 if distance is negative else 1
#' @examples
#' vals <- data.frame(4, 3, -2, 9)
#' weights <- c(1, 1, 1, 1)
#' signum(vals, weights)
#' @export
signum <- function(data, weight) {
distances <- vector(length = nrow(data))
distances <- apply(data, 1, distanceFromSeparator, weight)
ifelse(distances < 0, -1, 1)
}
#' Calculate signed distance of point from linear separator.
#'
#' @param data matrix or vector of numeric values
#' @param weight vector of numeric values
#' @return If both arguments are vectors of same length, it will return the
#' inner product. If one argument is a vector, it will be promoted to either a
#' row or column matrix.
#' @examples
#' vec1 <- c(1, 2, 3)
#' vec2 <- c(4, 5, 6)
#' distanceFromSeparator(vec1, vec2)
#' @export
distanceFromSeparator <- function(data, weight) {
distance <- data %*% weight
distance
}
#' Generate perceptron model.
#'
#' @description Fit a single-layer perceptron model
#' @param formula formula expression as for regression models, of the form
#' response ~ predictors.
#' @param data data frame in which to interpret the variables occurring in
#' formula
#' @param learningRate integer value determining the magnitude of the weight
#' updates (default 1)
#' @param activation function to control neuron activation (default signum)
#' @details This function implements a model for linear classification based on
#' the perceptron model. The level of the response variable must be binary.
#' @note The learning algorithm for the perceptron model is only guaranteed to
#' converge for linearly separable input data!
#' @return
#' \describe{
#' \item{w}{vector of best weight values found}
#' \item{coefficients}{vector of weight values normalized by euclidean distance}
#' \item{updates}{count of weight updates}
#' \item{formula}{character representation of formula}
#' \item{call}{character representation of the call command}
#' \item{x}{model matrix}
#' \item{y}{vector of response values}
#' \item{options}{list of character representation of modelling options}
#' }
#' @references Cristianini, Nello und John Shawe-Taylor (2000): \emph{An
#' Introduction to Support Vector Machines: And Other Kernel-Based Learning
#' Methods}, Cambridge University Press: Cambridge, England.
#' @examples
#' data(iris)
#' head(iris, n=20)
#' irisSub <- iris[1:100, c(1, 3, 5)]
#' names(irisSub) <- c("sepal", "petal", "species")
#' head(irisSub)
#' formula <- formula(species ~ sepal + petal)
#' p1 <- newPerceptronModel(formula, irisSub)
#' @import stats
#' @export
newPerceptronModel <- function(formula, data, learningRate = 1,
activation = signum) {
if(!is.data.frame(data)) stop("Input data must be of type data frame.")
# model matrix
mf <- model.frame(formula, data)
x <- model.matrix(formula, mf)
respondName <- as.character(attr(terms(mf), "variables"))[2]
if(nlevels(data[respondName] != 2))
stop("Invalid number of levels in response variable detected.
Response variable must be binary!")
# response vector
y <- get(respondName, mf)
yLab <- as.character(y)
y <- factor(y)
y <- ifelse(y == levels(y)[1], -1, 1)
y <- cbind(y, yLab)
colnames(y) <- c("class", respondName)
y <- data.frame(y, stringsAsFactors = FALSE)
y$class <- as.numeric(y$class)
w <- vector(length = ncol(x)) # coefficient vector
c <- 0 # weight update counter
weightUpdate <- TRUE
R <- max(apply(x, 1, euclidNorm))
while (weightUpdate) {
weightUpdate <- FALSE
yClassified <- activation(x, w)
for (i in 1:nrow(x)) {
if (y[i,1] != yClassified[i]) {
w[-1] <- w[-1] + learningRate * y[i,1] * x[i,-1]
w[1] <- w[1] + learningRate * y[i,1] * R^2
c <- c + 1
weightUpdate <- TRUE
}
}
}
s <- euclidNorm(w)
coefficients <- w / s
names(coefficients) <- c("bias", attr(terms.formula(formula),"term.labels"))
# assemble output object
perceptronOut <- list()
class(perceptronOut) <- "perceptron"
perceptronOut$weights <- w
perceptronOut$respondName <- respondName
perceptronOut$coefficients <- coefficients
perceptronOut$updates <- c
perceptronOut$formula <- formula
perceptronOut$call <- match.call()
perceptronOut$x <- x
perceptronOut$y <- y
perceptronOut$yMapping <- unique(y)
perceptronOut$options <- list(learningRate,
as.character(substitute(activation)))
names(perceptronOut$options) <- c("Learning rate", "Activation function")
return(perceptronOut)
}
#' Print perceptron model.
#'
#' @param x an object of class \code{perceptron} as returned by perceptron.
#' @param ... arguments passed to or from other methods.
#' @note the weight values are normalized by the euclidean distance and
#' represented as a unit vector
#' @examples
#' data(iris)
#' head(iris, n=20)
#' irisSub <- iris[1:100, c(1, 3, 5)]
#' names(irisSub) <- c("sepal", "petal", "species")
#' head(irisSub)
#' formula <- formula(species ~ sepal + petal)
#' p1 <- newPerceptronModel(formula, irisSub)
#' print(p1)
#' @export
print.perceptron <- function(x, ...) {
cat("\nCall:\n", deparse(x$call), "\n\n", sep = "")
if (length(coef(x))) {
cat("Weights:\n")
print(coef(x))
cat("\n")
cat("Epochs:\n")
cat(x$updates,"\n")
} else {
cat("No coefficients\n")
cat("\n")
invisible(x)
}
}
#' Assemble summary output of fitted perceptron model.
#'
#' @param object an object of class \code{perceptron} as returned by perceptron.
#' @param ... arguments passed to or from other methods
#' @return
#' \describe{
#' \item{coefficients}{vector of weight values normalized by euclidean distance}
#' \item{options}{list of character representation of modelling options}
#' \item{input}{number of input layer nodes}
#' \item{hidden}{number of hidden layer nodes (fixed to 0)}
#' \item{output}{number of output layer nodes (fixed to 1)}
#' }
#' @examples
#' data(iris)
#' head(iris, n=20)
#' irisSub <- iris[1:100, c(1, 3, 5)]
#' names(irisSub) <- c("sepal", "petal", "species")
#' head(irisSub)
#' formula <- formula(species ~ sepal + petal)
#' p1 <- newPerceptronModel(formula, irisSub)
#' summary(p1)
#' @export
summary.perceptron <- function(object, ...) {
perceptronSumOut <- list()
class(perceptronSumOut) <- "summary.perceptron"
neuronLabels <- vector(length = length(object$coefficients))
for (i in 1:length(object$coefficients)) {
neuronLabel <- paste(attr(object$coefficients[i], "names"),
"->",
"o",
sep = "")
neuronLabels[i] <- neuronLabel
}
perceptronSumOut$coefficients <- object$coefficients
names(perceptronSumOut$coefficients) <- neuronLabels
perceptronSumOut$options <- object$options
perceptronSumOut$input <- ncol(object$x)
perceptronSumOut$hidden <- 0
perceptronSumOut$output <- 1
return(perceptronSumOut)
}
#' Print summary output of a fitted perceptron model.
#'
#' @param x an object of class \code{perceptron} as returned by perceptron.
#' @param ... arguments passed to or from other methods
#' @examples
#' data(iris)
#' head(iris, n=20)
#' irisSub <- iris[1:100, c(1, 3, 5)]
#' names(irisSub) <- c("sepal", "petal", "species")
#' head(irisSub)
#' formula <- formula(species ~ sepal + petal)
#' p1 <- newPerceptronModel(formula, irisSub)
#' summary(p1)
#' @export
print.summary.perceptron <- function(x, ...) {
networkDescription <- paste(x$input-1, x$hidden, x$output, sep = "-")
cat("\nResult:\n")
cat("A", networkDescription, "network with", x$input, "weights\n",
sep = " ")
cat("\n")
print(coef(x))
cat("\n\n")
optList <- vector(length = length(x$options))
for (i in 1:length(x$options)) {
optString <- paste(attr(x$options[i], "names"),
": ",
as.character(x$options[i]),
sep = "")
optList[i] <- optString
}
cat(optList, sep = ", ")
cat("\n")
}
#' Plot of a fitted perceptron model.
#'
#' @param x an object of class \code{perceptron} as returned by perceptron.
#' @param ... arguments passed to or from other methods
#' @examples
#' data(iris)
#' head(iris, n=20)
#' irisSub <- iris[1:100, c(1, 3, 5)]
#' names(irisSub) <- c("sepal", "petal", "species")
#' head(irisSub)
#' formula <- formula(species ~ sepal + petal)
#' p1 <- newPerceptronModel(formula, irisSub)
#' plot(p1, title = "Perceptron Classifier")
#' @import ggplot2
#' @export
plot.perceptron <- function(x, ...) {
if(ncol(x$x) != 3) stop("Plot functionality is only available for 2d input
data")
w <- x$weights
respond <- data.frame(x$y[,2], stringsAsFactors = FALSE)
respond <- respond[,1,drop = FALSE]
names(respond) <- x$respondName
respondName <- x$respondName
intercept <- -w[1] / w[3]
slope <- -w[2] / w[3]
df <- data.frame(x = x$x[,2], y = x$x[,3], respond = respond,
stringsAsFactors = FALSE)
ggplot(df, aes(x = df$x, y = df$y)) +
geom_point(aes_(color = as.name(respondName),
shape = as.name(respondName)), size = 3) +
geom_abline(aes(intercept = intercept, slope = slope), col = "green")
}
#' Predict function for a fitted perceptron model.
#'
#' @param object fitted \code{perceptron} model.
#' @param newdata data frame from values for which to predict the class
#' @param ... arguments passed to or from other methods
#' @examples
#' data(iris)
#' irisSub <- iris[(1:100), c(1, 3, 5)]
#' names(irisSub) <- c("sepal", "petal", "species")
#' formula <- formula(species ~ sepal + petal)
#' p1 <- newPerceptronModel(formula, irisSub)
#' irisShuffled <- irisSub[sample(nrow(irisSub)),]
#' irisTraining <- irisShuffled[1:70,]
#' irisHoldout <- irisShuffled[70:100,]
#' holdOutX <- irisHoldout[, 1:2]
#' holdOutY <- irisHoldout[, 3]
#' holdOutY <- factor(holdOutY)
#' prediction <- predict(p1, holdOutX)
#' @export
predict.perceptron <- function(object, newdata, ...) {
perceptronPredOut <- list()
yMapping <- object$yMapping
w <- object$weights
input <- cbind(1, newdata)
rownames(input) <- c()
colnames(input)[1] <- "bias"
predictions <- signum(input, w)
predictions <- as.factor(ifelse(predictions == yMapping[1,1], yMapping[1,2],
yMapping[2,2]))
outputFrame <- data.frame(input[,-1], prediction = predictions)
outputFrame
}
|
#Multiple Linear regression
#install.packages('catools')
sampledata = read.csv('50_Startups.csv')
#Handling Catagorical Data
sampledata$State = factor(x = sampledata$State ,levels = c('New York','California','Florida'), labels = c(1,2,3))
#Splitting into Training and test data
library(caTools)
set.seed(123)
split = sample.split(sampledata$Profit,SplitRatio = 4/5)
training_set = subset(sampledata , split==TRUE)
test_set = subset(sampledata, split == FALSE)
#regressor lm for multiple linear regression
regressor = lm(formula = Profit ~ ., data = training_set)
|
/Multiple Linear Regression/Practice_multiple_linear_regression.R
|
no_license
|
rbgautam/MachineLearning
|
R
| false
| false
| 570
|
r
|
#Multiple Linear regression
#install.packages('catools')
sampledata = read.csv('50_Startups.csv')
#Handling Catagorical Data
sampledata$State = factor(x = sampledata$State ,levels = c('New York','California','Florida'), labels = c(1,2,3))
#Splitting into Training and test data
library(caTools)
set.seed(123)
split = sample.split(sampledata$Profit,SplitRatio = 4/5)
training_set = subset(sampledata , split==TRUE)
test_set = subset(sampledata, split == FALSE)
#regressor lm for multiple linear regression
regressor = lm(formula = Profit ~ ., data = training_set)
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/fitTimeSeries.R
\name{plotTimeSeries}
\alias{plotTimeSeries}
\title{Plot difference function for particular bacteria}
\usage{
plotTimeSeries(res, C = 0, xlab = "Time",
ylab = "Difference in abundance",
main = "SS difference function prediction", ...)
}
\arguments{
\item{res}{Output of fitTimeSeries function}
\item{C}{Value for which difference function has to be larger or smaller than (default 0).}
\item{xlab}{X-label.}
\item{ylab}{Y-label.}
\item{main}{Main label.}
\item{...}{Extra plotting arguments.}
}
\value{
Plot of difference in abundance for significant features.
}
\description{
Plot difference function for particular bacteria
}
\details{
Plot the difference in abundance for significant features.
}
\examples{
data(mouseData)
res = fitTimeSeries(obj=mouseData,feature="Actinobacteria",
class="status",id="mouseID",time="relativeTime",lvl='class',B=10)
plotTimeSeries(res)
}
\seealso{
\code{\link{fitTimeSeries}}
}
|
/man/plotTimeSeries.Rd
|
no_license
|
emcgi/metagenomeSeq-1
|
R
| false
| false
| 1,029
|
rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/fitTimeSeries.R
\name{plotTimeSeries}
\alias{plotTimeSeries}
\title{Plot difference function for particular bacteria}
\usage{
plotTimeSeries(res, C = 0, xlab = "Time",
ylab = "Difference in abundance",
main = "SS difference function prediction", ...)
}
\arguments{
\item{res}{Output of fitTimeSeries function}
\item{C}{Value for which difference function has to be larger or smaller than (default 0).}
\item{xlab}{X-label.}
\item{ylab}{Y-label.}
\item{main}{Main label.}
\item{...}{Extra plotting arguments.}
}
\value{
Plot of difference in abundance for significant features.
}
\description{
Plot difference function for particular bacteria
}
\details{
Plot the difference in abundance for significant features.
}
\examples{
data(mouseData)
res = fitTimeSeries(obj=mouseData,feature="Actinobacteria",
class="status",id="mouseID",time="relativeTime",lvl='class',B=10)
plotTimeSeries(res)
}
\seealso{
\code{\link{fitTimeSeries}}
}
|
library(testthat)
precipDeviation <- terra::rast(system.file("extdata/precipDeviation.asc",
package = "climateStability"))
test_that("rescale0to1 works", {
expect_error(climateStability::stabilityCalc())
expect_warning(climateStability::stabilityCalc("a"))
testResult <- climateStability::stabilityCalc(precipDeviation)
expect_equal(class(testResult)[[1]], "SpatRaster")
expect_true(minmax(testResult)[[2]] >= 1)
})
|
/tests/testthat/test-stabilityCalc.R
|
no_license
|
hannahlowens/climateStability
|
R
| false
| false
| 472
|
r
|
library(testthat)
precipDeviation <- terra::rast(system.file("extdata/precipDeviation.asc",
package = "climateStability"))
test_that("rescale0to1 works", {
expect_error(climateStability::stabilityCalc())
expect_warning(climateStability::stabilityCalc("a"))
testResult <- climateStability::stabilityCalc(precipDeviation)
expect_equal(class(testResult)[[1]], "SpatRaster")
expect_true(minmax(testResult)[[2]] >= 1)
})
|
########################################################
# NEW PHASE #
########### Vector Error correction Model ##############
library(bvartools)
library(urca)
begin_date= as.Date("2020-08-05")
ending_date = as.Date("2020-09-08")
## Mobility datae from Feb 15 to Aug 31
data1 <- read.csv("data/mobility_merged.csv")
data1 <- data1 %>% dplyr::select(date, Google_park_7d_Avg,
Google_grocery_7d_Avg,
Apple_driving_7d_Avg,
Apple_transit_7d_Avg,
Apple_walking_7d_Avg) %>% na.omit()
names(data1) <- c("date","park","grocery","driving","transit","walking")
## Facebook data
fb_cli<-readRDS("../fb_data_dfw.rds") %>%
filter(geo_value == name_to_fips("Tarrant County")) %>%
dplyr::select(date=time_value, cli_value=value)
pos_rate <- tarrant_data_ma %>% dplyr::select(date,positivity_rate_7d) %>% na.omit()
## Composing timeseries
pos_ts1 <- pos_rate %>% filter(between(date, begin_date, ending_date)) %>% dplyr::select(positivity_rate_7d) %>% ts()
fb_ts1 <- fb_cli %>% filter(between(date, begin_date, ending_date)) %>% dplyr::select(cli_value) %>% ts()
mob_ts1 <- data1 %>% filter(between(mdy(date), begin_date, ending_date)) %>% dplyr::select(-date) %>% ts()
df_ts1 <- cbind(pos_ts1, fb_ts1, mob_ts1)
dates <- data1 %>% filter(between(mdy(date), begin_date, ending_date)) %>% dplyr::select(date)
### building the model
## Lag Order selection
var1_lag <- VARselect(df_ts1[1:28,], lag.max = 14, type = "both")
## building the VAr modle to get the order for the VECM model
var1.aic <- VAR(df_ts1[1:28,], type = "both", lag.max = 15, ic="AIC")
summary(var1.aic)
### Prediction based on the VAR model
pred_var1 <- predict(var1.aic, new_data=df_ts1[29:35,], n.ahead=7)
pred_7d <- tibble(Actual=df_ts1[29:35,c("pos_ts1")],
Fitted=pred_var1$fcst$pos_ts1[,"fcst"],
date=dates$date[29:35],
lower=pred_var1$fcst$pos_ts1[,"lower"],
upper=pred_var1$fcst$pos_ts1[,"upper"])
plot_ly(data = pred_7d, x=~date) %>%
add_trace(y=~Actual, name="Actual", type="scatter" ,mode="lines+markers") %>%
add_trace(y=~Fitted, name="Fitted", type="scatter",mode="lines+markers") %>%
add_trace(y=~lower, name="Lower", type="scatter",mode="lines+markers") %>%
add_trace(y=~upper, name="Upper", type="scatter",mode="lines+markers")
### forecasting 14 Days ahead blind
pred_var14 <- predict(var1.aic, n.ahead=10)
pred_14d <- tibble(Actual=tail(tarrant_data_ma$positivity_rate_7d,23)[1:10],
Fitted=pred_var14$fcst$pos_ts1[,"fcst"],
date=tail(tarrant_data_ma$date,23)[1:10],
lower=pred_var14$fcst$pos_ts1[,"lower"],
upper=pred_var14$fcst$pos_ts1[,"upper"])
plot_ly(data = pred_14d, x=~date) %>%
add_trace(y=~Actual, name="Actual", type="scatter" ,mode="lines+markers") %>%
add_trace(y=~Fitted, name="Fitted", type="scatter",mode="lines+markers") %>%
add_trace(y=~lower, name="Lower", type="scatter",mode="lines+markers")
#add_trace(y=~upper, name="Upper", type="scatter",mode="lines+markers")
#### Correcting the model using the VECM
## Extracting the order to be used in the VECM modem from the fitted VAR model
k1_order = var1.aic$p
## building the VECM
vec1_model <- ca.jo(df_ts1[1:28,], ecdet = "none", type = "trace", K=k1_order, spec = "transitory")
summary(vec1_model)
### Converting back VECM to new VAR model
var1_model <- vec2var(vec1_model, r=1)
### Prediction based on New data
forecast_var1 <- predict(var1_model,new_data=df_ts1[29:35,], n.ahead=7)
pred_7d_vecm <- tibble(Actual=df_ts1[29:35,c("pos_ts1")], Fitted=forecast_var1$fcst$pos_ts1[,"fcst"], date=dates$date[29:35])
p7d <- plot_ly(data = pred_7d_vecm, x=~date) %>%
add_trace(y=~Actual, name="Actual", type="scatter" ,mode="lines+markers") %>%
add_trace(y=~Fitted, name="VECM_Fitted", type="scatter",mode="lines+markers", text=~Fitted) %>%
add_trace(data=pred_7d, y=~Fitted, name="VAR_Fitted", type="scatter",mode="lines+markers", text=~Fitted)
p7d
####### Policy Effect Simulation##############
## Based on the converted VAR model ##
## impulse response function with VAR model before Error Correction
ir_test <- irf(var1.aic, n.ahead = 7,
impulse = c("fb_ts1",
"mob_ts1.park",
"mob_ts1.grocery",
"mob_ts1.driving",
"mob_ts1.transit",
"mob_ts1.walking"),
response = "pos_ts1",
ortho = FALSE,
runs = 1000)
plot(ir_test)
## impulse response function with VAR model after Error Correction
ir1_test <- irf(var1_model, n.ahead = 7,
impulse = c("fb_ts1",
"mob_ts1.park",
"mob_ts1.grocery",
"mob_ts1.driving",
"mob_ts1.transit",
"mob_ts1.walking"),
response = "pos_ts1",
ortho = FALSE,
runs = 1000)
plot(ir1_test)
#### plotting interactive impulse response
irf_data <- tibble(days=c(1:8),
fb_cli_mean=ir1_test$irf$fb_ts1,
fb_cli_low=ir1_test$Lower$fb_ts1,
fb_cli_up=ir1_test$Upper$fb_ts1)
plot_ly(data = irf_data, x=~days) %>%
add_trace(y=~fb_cli_mean, name="Mean effect", mode="lines+markers") %>%
add_trace(y=~fb_cli_low, name="Lower", mode="lines+markers") %>%
add_trace(y=~fb_cli_up, name="Upper", mode="lines+markers") %>%
layout(title="Facebook CLI 1 point predicted effect on Positivity Rate over 7 days (CI 95%)", color="blue")
## Prediction
new_data <- read.csv("../google_data.csv")
test_data <- new_data %>% filter(county=="Tarrant County", between(ymd(date), ymd("2020-09-01"), ymd("2020-09-08")))
var_pred <- predict(var1.aic, new_data=test_data, n.ahead=7,ci=0.95,dumvar=NULL)
pred_new_var <- tibble(date=mdy(df_1$date),
Actual=c(var_pred$endog[,"pos_ts1"], df_ts1[,"pos_ts1"][29:35]),
Fitted=c(var_pred$endog[,"pos_ts1"],var_pred$fcst$pos_ts1[,"fcst"]),
Lower=c(var_pred$endog[,"pos_ts1"],var_pred$fcst$pos_ts1[,"lower"]),
Upper=c(var_pred$endog[,"pos_ts1"],var_pred$fcst$pos_ts1[,"upper"]))
plot_ly(data = pred_new_var, x=~date) %>%
add_trace(y=~Fitted, name="Prediction", type="scatter",mode="lines") %>%
#add_trace(y=~Lower, name="Lower", type="scatter",mode="lines+markers") %>%
#add_trace(y=~Upper, name="Upper", type="scatter",mode="lines+markers") %>%
add_trace(y=~Actual, name="Actual", type="scatter" ,mode="lines") %>%
layout(title="Positivity Rates Actual versus Predicton")
|
/test-only/vecm.R
|
no_license
|
VOICE-TONE/covid19-risk-modelling
|
R
| false
| false
| 6,943
|
r
|
########################################################
# NEW PHASE #
########### Vector Error correction Model ##############
library(bvartools)
library(urca)
begin_date= as.Date("2020-08-05")
ending_date = as.Date("2020-09-08")
## Mobility datae from Feb 15 to Aug 31
data1 <- read.csv("data/mobility_merged.csv")
data1 <- data1 %>% dplyr::select(date, Google_park_7d_Avg,
Google_grocery_7d_Avg,
Apple_driving_7d_Avg,
Apple_transit_7d_Avg,
Apple_walking_7d_Avg) %>% na.omit()
names(data1) <- c("date","park","grocery","driving","transit","walking")
## Facebook data
fb_cli<-readRDS("../fb_data_dfw.rds") %>%
filter(geo_value == name_to_fips("Tarrant County")) %>%
dplyr::select(date=time_value, cli_value=value)
pos_rate <- tarrant_data_ma %>% dplyr::select(date,positivity_rate_7d) %>% na.omit()
## Composing timeseries
pos_ts1 <- pos_rate %>% filter(between(date, begin_date, ending_date)) %>% dplyr::select(positivity_rate_7d) %>% ts()
fb_ts1 <- fb_cli %>% filter(between(date, begin_date, ending_date)) %>% dplyr::select(cli_value) %>% ts()
mob_ts1 <- data1 %>% filter(between(mdy(date), begin_date, ending_date)) %>% dplyr::select(-date) %>% ts()
df_ts1 <- cbind(pos_ts1, fb_ts1, mob_ts1)
dates <- data1 %>% filter(between(mdy(date), begin_date, ending_date)) %>% dplyr::select(date)
### building the model
## Lag Order selection
var1_lag <- VARselect(df_ts1[1:28,], lag.max = 14, type = "both")
## building the VAr modle to get the order for the VECM model
var1.aic <- VAR(df_ts1[1:28,], type = "both", lag.max = 15, ic="AIC")
summary(var1.aic)
### Prediction based on the VAR model
pred_var1 <- predict(var1.aic, new_data=df_ts1[29:35,], n.ahead=7)
pred_7d <- tibble(Actual=df_ts1[29:35,c("pos_ts1")],
Fitted=pred_var1$fcst$pos_ts1[,"fcst"],
date=dates$date[29:35],
lower=pred_var1$fcst$pos_ts1[,"lower"],
upper=pred_var1$fcst$pos_ts1[,"upper"])
plot_ly(data = pred_7d, x=~date) %>%
add_trace(y=~Actual, name="Actual", type="scatter" ,mode="lines+markers") %>%
add_trace(y=~Fitted, name="Fitted", type="scatter",mode="lines+markers") %>%
add_trace(y=~lower, name="Lower", type="scatter",mode="lines+markers") %>%
add_trace(y=~upper, name="Upper", type="scatter",mode="lines+markers")
### forecasting 14 Days ahead blind
pred_var14 <- predict(var1.aic, n.ahead=10)
pred_14d <- tibble(Actual=tail(tarrant_data_ma$positivity_rate_7d,23)[1:10],
Fitted=pred_var14$fcst$pos_ts1[,"fcst"],
date=tail(tarrant_data_ma$date,23)[1:10],
lower=pred_var14$fcst$pos_ts1[,"lower"],
upper=pred_var14$fcst$pos_ts1[,"upper"])
plot_ly(data = pred_14d, x=~date) %>%
add_trace(y=~Actual, name="Actual", type="scatter" ,mode="lines+markers") %>%
add_trace(y=~Fitted, name="Fitted", type="scatter",mode="lines+markers") %>%
add_trace(y=~lower, name="Lower", type="scatter",mode="lines+markers")
#add_trace(y=~upper, name="Upper", type="scatter",mode="lines+markers")
#### Correcting the model using the VECM
## Extracting the order to be used in the VECM modem from the fitted VAR model
k1_order = var1.aic$p
## building the VECM
vec1_model <- ca.jo(df_ts1[1:28,], ecdet = "none", type = "trace", K=k1_order, spec = "transitory")
summary(vec1_model)
### Converting back VECM to new VAR model
var1_model <- vec2var(vec1_model, r=1)
### Prediction based on New data
forecast_var1 <- predict(var1_model,new_data=df_ts1[29:35,], n.ahead=7)
pred_7d_vecm <- tibble(Actual=df_ts1[29:35,c("pos_ts1")], Fitted=forecast_var1$fcst$pos_ts1[,"fcst"], date=dates$date[29:35])
p7d <- plot_ly(data = pred_7d_vecm, x=~date) %>%
add_trace(y=~Actual, name="Actual", type="scatter" ,mode="lines+markers") %>%
add_trace(y=~Fitted, name="VECM_Fitted", type="scatter",mode="lines+markers", text=~Fitted) %>%
add_trace(data=pred_7d, y=~Fitted, name="VAR_Fitted", type="scatter",mode="lines+markers", text=~Fitted)
p7d
####### Policy Effect Simulation##############
## Based on the converted VAR model ##
## impulse response function with VAR model before Error Correction
ir_test <- irf(var1.aic, n.ahead = 7,
impulse = c("fb_ts1",
"mob_ts1.park",
"mob_ts1.grocery",
"mob_ts1.driving",
"mob_ts1.transit",
"mob_ts1.walking"),
response = "pos_ts1",
ortho = FALSE,
runs = 1000)
plot(ir_test)
## impulse response function with VAR model after Error Correction
ir1_test <- irf(var1_model, n.ahead = 7,
impulse = c("fb_ts1",
"mob_ts1.park",
"mob_ts1.grocery",
"mob_ts1.driving",
"mob_ts1.transit",
"mob_ts1.walking"),
response = "pos_ts1",
ortho = FALSE,
runs = 1000)
plot(ir1_test)
#### plotting interactive impulse response
irf_data <- tibble(days=c(1:8),
fb_cli_mean=ir1_test$irf$fb_ts1,
fb_cli_low=ir1_test$Lower$fb_ts1,
fb_cli_up=ir1_test$Upper$fb_ts1)
plot_ly(data = irf_data, x=~days) %>%
add_trace(y=~fb_cli_mean, name="Mean effect", mode="lines+markers") %>%
add_trace(y=~fb_cli_low, name="Lower", mode="lines+markers") %>%
add_trace(y=~fb_cli_up, name="Upper", mode="lines+markers") %>%
layout(title="Facebook CLI 1 point predicted effect on Positivity Rate over 7 days (CI 95%)", color="blue")
## Prediction
new_data <- read.csv("../google_data.csv")
test_data <- new_data %>% filter(county=="Tarrant County", between(ymd(date), ymd("2020-09-01"), ymd("2020-09-08")))
var_pred <- predict(var1.aic, new_data=test_data, n.ahead=7,ci=0.95,dumvar=NULL)
pred_new_var <- tibble(date=mdy(df_1$date),
Actual=c(var_pred$endog[,"pos_ts1"], df_ts1[,"pos_ts1"][29:35]),
Fitted=c(var_pred$endog[,"pos_ts1"],var_pred$fcst$pos_ts1[,"fcst"]),
Lower=c(var_pred$endog[,"pos_ts1"],var_pred$fcst$pos_ts1[,"lower"]),
Upper=c(var_pred$endog[,"pos_ts1"],var_pred$fcst$pos_ts1[,"upper"]))
plot_ly(data = pred_new_var, x=~date) %>%
add_trace(y=~Fitted, name="Prediction", type="scatter",mode="lines") %>%
#add_trace(y=~Lower, name="Lower", type="scatter",mode="lines+markers") %>%
#add_trace(y=~Upper, name="Upper", type="scatter",mode="lines+markers") %>%
add_trace(y=~Actual, name="Actual", type="scatter" ,mode="lines") %>%
layout(title="Positivity Rates Actual versus Predicton")
|
library(NISTunits)
### Name: NISTnewtonPerMeterTOpoundForcePerFt
### Title: Convert newton per meter to pound-force per foot
### Aliases: NISTnewtonPerMeterTOpoundForcePerFt
### Keywords: programming
### ** Examples
NISTnewtonPerMeterTOpoundForcePerFt(10)
|
/data/genthat_extracted_code/NISTunits/examples/NISTnewtonPerMeterTOpoundForcePerFt.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 263
|
r
|
library(NISTunits)
### Name: NISTnewtonPerMeterTOpoundForcePerFt
### Title: Convert newton per meter to pound-force per foot
### Aliases: NISTnewtonPerMeterTOpoundForcePerFt
### Keywords: programming
### ** Examples
NISTnewtonPerMeterTOpoundForcePerFt(10)
|
###
#get the PCBC samples geneExp normalized counts
###
flog.info('Reading the PCBC normalized mRNA Exp data from Synapse', name='synapse')
mRNA_NormCounts <- synGet('syn2701943')
#read in the file
mRNA_NormCounts <- read.delim(mRNA_NormCounts@filePath, header=T, sep='\t',
as.is=T, stringsAsFactors = F, check.names=F)
## remove version from ENSEMBL ID
rownames(mRNA_NormCounts) <- gsub('\\..*', '',mRNA_NormCounts$gene_id)
mRNA_NormCounts$symbol <- NULL
mRNA_NormCounts$gene_id <- NULL
mRNA_NormCounts$locus <- NULL
#apply(mRNA_NormCounts,2,class)
#mRNA_NormCounts <- as.data.frame(apply(mRNA_NormCounts,2,as.numeric))
#rownames(mRNA_NormCounts)
###
#get the metadata from synapse for PCBC geneExp samples
###
flog.info('Reading the PCBC mRNA metadata from Synapse', name='synapse')
mRNA_metadata <- synGet('syn2731147')
mRNA_metadata <- read.delim(mRNA_metadata@filePath, header=T, sep='\t',
as.is=T, stringsAsFactors = F, check.names=F)
rownames(mRNA_metadata) <- mRNA_metadata[,'Decorated Name']
#keep only that metadata for samples which we have expression data
mRNA_metadata <- mRNA_metadata[rownames(mRNA_metadata) %in% colnames(mRNA_NormCounts),]
|
/mRNA_data_prep.R
|
no_license
|
rlesca01/PCBC_DataExplorer_ShinyApp
|
R
| false
| false
| 1,223
|
r
|
###
#get the PCBC samples geneExp normalized counts
###
flog.info('Reading the PCBC normalized mRNA Exp data from Synapse', name='synapse')
mRNA_NormCounts <- synGet('syn2701943')
#read in the file
mRNA_NormCounts <- read.delim(mRNA_NormCounts@filePath, header=T, sep='\t',
as.is=T, stringsAsFactors = F, check.names=F)
## remove version from ENSEMBL ID
rownames(mRNA_NormCounts) <- gsub('\\..*', '',mRNA_NormCounts$gene_id)
mRNA_NormCounts$symbol <- NULL
mRNA_NormCounts$gene_id <- NULL
mRNA_NormCounts$locus <- NULL
#apply(mRNA_NormCounts,2,class)
#mRNA_NormCounts <- as.data.frame(apply(mRNA_NormCounts,2,as.numeric))
#rownames(mRNA_NormCounts)
###
#get the metadata from synapse for PCBC geneExp samples
###
flog.info('Reading the PCBC mRNA metadata from Synapse', name='synapse')
mRNA_metadata <- synGet('syn2731147')
mRNA_metadata <- read.delim(mRNA_metadata@filePath, header=T, sep='\t',
as.is=T, stringsAsFactors = F, check.names=F)
rownames(mRNA_metadata) <- mRNA_metadata[,'Decorated Name']
#keep only that metadata for samples which we have expression data
mRNA_metadata <- mRNA_metadata[rownames(mRNA_metadata) %in% colnames(mRNA_NormCounts),]
|
################################################################################
##' @title Plot snail sizes by tide
##' @author Robin Elahi
##' @contact elahi.robin@gmail.com
##' @date 2017-01-16
##' @log
################################################################################
source("3_analyse_data/01_sbs_bayes_data.R")
theme_set(theme_bw(base_size = 12) +
theme(strip.background = element_blank(),
strip.text = element_text(face = "italic"),
panel.grid = element_blank()))
dat_dens
names(dat_dens)
dat_dens <- dat_dens %>%
mutate(sample_area_tidal_ht_offset =
ifelse(era == "present", sample_area_tidal_ht + 0.1, sample_area_tidal_ht))
dat_dens %>%
ggplot(aes(sample_area_tidal_ht_offset, size1mm, color = era)) +
geom_jitter(alpha = 0.25, size = 0.5, pch = 1) +
facet_wrap(~ species) +
labs(x = "Tidal height (m)", y = "Size (mm)") +
theme(legend.position = c(0.05, 0.05), legend.justification = c(0.05, 0.025)) +
theme(legend.title = element_blank()) +
scale_color_manual(values = c("#e79f00", "black"))
ggsave("figs_ms/plot_size_era_tide.pdf", height = 3.5, width = 7)
datMeans <- dat_dens %>% filter(!is.na(size1mm)) %>%
group_by(species, sp, site, era, year, sampleArea,
sample_area_tidal_ht, sample_area_tidal_ht_offset) %>%
summarise(size_mean = mean(size1mm),
size_med = median(size1mm),
size_sd = sd(size1mm),
size_n = n(),
size_se = size_sd/sqrt(size_n),
size_CI = qt(0.975, df = size_n - 1) * size_se,
tide_mean = mean(tideHTm)) %>%
ungroup()
# set desired dodge width
pd <- position_dodge(width = 0.2)
datMeans %>%
ggplot(aes(sample_area_tidal_ht_offset, size_mean, color = era, shape = species)) +
geom_point(alpha = 0.8, size = 2) +
geom_errorbar(aes(ymin = size_mean - size_sd, ymax = size_mean + size_sd),
width = 0.2, alpha = 0.8) +
facet_wrap(~ species) +
guides(shape = FALSE) +
labs(x = "Tidal height (m)", y = "Size (mm)") +
theme(legend.position = c(0.05, 0.05), legend.justification = c(0.05, 0.025)) +
theme(legend.title = element_blank()) +
scale_color_manual(values = c("black", "#e79f00"))
ggsave("figs_ms/plot_size_era_tide_means.pdf", height = 3.5, width = 7)
|
/4_plot_data/plot_size_era_tide.R
|
permissive
|
elahi/sbs_analysis
|
R
| false
| false
| 2,344
|
r
|
################################################################################
##' @title Plot snail sizes by tide
##' @author Robin Elahi
##' @contact elahi.robin@gmail.com
##' @date 2017-01-16
##' @log
################################################################################
source("3_analyse_data/01_sbs_bayes_data.R")
theme_set(theme_bw(base_size = 12) +
theme(strip.background = element_blank(),
strip.text = element_text(face = "italic"),
panel.grid = element_blank()))
dat_dens
names(dat_dens)
dat_dens <- dat_dens %>%
mutate(sample_area_tidal_ht_offset =
ifelse(era == "present", sample_area_tidal_ht + 0.1, sample_area_tidal_ht))
dat_dens %>%
ggplot(aes(sample_area_tidal_ht_offset, size1mm, color = era)) +
geom_jitter(alpha = 0.25, size = 0.5, pch = 1) +
facet_wrap(~ species) +
labs(x = "Tidal height (m)", y = "Size (mm)") +
theme(legend.position = c(0.05, 0.05), legend.justification = c(0.05, 0.025)) +
theme(legend.title = element_blank()) +
scale_color_manual(values = c("#e79f00", "black"))
ggsave("figs_ms/plot_size_era_tide.pdf", height = 3.5, width = 7)
datMeans <- dat_dens %>% filter(!is.na(size1mm)) %>%
group_by(species, sp, site, era, year, sampleArea,
sample_area_tidal_ht, sample_area_tidal_ht_offset) %>%
summarise(size_mean = mean(size1mm),
size_med = median(size1mm),
size_sd = sd(size1mm),
size_n = n(),
size_se = size_sd/sqrt(size_n),
size_CI = qt(0.975, df = size_n - 1) * size_se,
tide_mean = mean(tideHTm)) %>%
ungroup()
# set desired dodge width
pd <- position_dodge(width = 0.2)
datMeans %>%
ggplot(aes(sample_area_tidal_ht_offset, size_mean, color = era, shape = species)) +
geom_point(alpha = 0.8, size = 2) +
geom_errorbar(aes(ymin = size_mean - size_sd, ymax = size_mean + size_sd),
width = 0.2, alpha = 0.8) +
facet_wrap(~ species) +
guides(shape = FALSE) +
labs(x = "Tidal height (m)", y = "Size (mm)") +
theme(legend.position = c(0.05, 0.05), legend.justification = c(0.05, 0.025)) +
theme(legend.title = element_blank()) +
scale_color_manual(values = c("black", "#e79f00"))
ggsave("figs_ms/plot_size_era_tide_means.pdf", height = 3.5, width = 7)
|
library("XML")
library("stringr")
library("ggplot2")
library("data.table")
#Functions
source(paste(getwd(),"/R Scripts/Functions/Functions.R", sep=""))
source(paste(getwd(),"/R Scripts/Functions/League Settings.R", sep=""))
#Suffix
suffix <- "yahoo"
#Download fantasy football projections from Yahoo.com
baseurl <-"http://football.fantasysports.yahoo.com/f1/39345/players?status=A&pos=O&cut_type=9&myteam=0&sort=PTS&sdir=1"
playerCount <- paste("&count=",seq(0,350,by=25),sep="")
PWeekCount<-paste("&stat1=S_PW_",1:17,sep="")
YahooURLs <- unlist(lapply(PWeekCount,function(x)paste(baseurl,playerCount,x,sep="")))
#Pull from yahoo, but do it slowly, with random sys.sleep intervals ~7 secs long.
load("YahooRawWeekProj.RData")
yahoo<-yahoo_backup
#Add Weeks.
for(i in 1:length(yahoo)) { yahoo[[i]]$week <- trunc((i-1)/15+1)}
#rbind, makes a data.table.
yahoo_proj<-rbindlist(yahoo)
#Variable Names
setnames(yahoo_proj,c("star","player","add","owner","pts_yahoo","ownedPct","proj","actual",
"passYds_yahoo","passTds_yahoo","passInt_yahoo","rushYds_yahoo",
"rushTds_yahoo","recYds_yahoo","recTds_yahoo","returnTds_yahoo",
"twoPts_yahoo","fumbles_yahoo","missing","week"))
yahoo_proj
#Add missing variables
yahoo_proj[,c("passAtt_yahoo","passComp_yahoo","rushAtt_yahoo","rec_yahoo"):=NA]
#Remove special characters(commas)
yahoo_proj<-yahoo_proj[,lapply(.SD,function(x) gsub("\\,", "", x))]
#Convert variables from character strings to numeric
yahoo_proj<-yahoo_proj[,lapply(.SD,as.numeric),by=eval(names(yahoo_proj)[c(1:4,6,8,19)]),.SDcols=c(5,7,9:18,20:24)]
yahoo_proj<-yahoo_proj[,ownedPct := as.numeric(sub("%", "", ownedPct))]
#Player name, position, and team
yahoo_proj[,player := str_trim(sapply(str_split(player, "\n"), "[[", 2))]
yahoo_proj[,pos := str_trim(str_sub(player, start= -2))]
yahoo_proj[,name_yahoo := str_trim(str_sub(player, start=0, end=nchar(player)-8))]
yahoo_proj[,name := nameMerge(name_yahoo)]
yahoo_proj[,team_yahoo := toupper(str_trim(str_sub(player, start=str_locate(player, "-")[,1]-4, end=str_locate(player, "-")[,1]-2)))]
#Check for duplicates (duplicate means same name, position, week, and team).
setkey(yahoo_proj,name,pos,week,team_yahoo)
yahoo_proj[duplicated(yahoo_proj)]
#Rename players
yahoo_proj[name=="STEVIEJOHNSON", name:= "STEVEJOHNSON"]
#Calculate Week Rank
yahoo_proj[,weekRank_yahoo:= rank(-pts_yahoo, ties.method="min")]
#Calculate Season Rank
yahoo_proj[,SeasonPts_yahoo:=sum(pts_yahoo,rm.na=TRUE),by=list(name,pos,team_yahoo)]
#Overall Rank
yahoo_proj[,overallRank_yahoo:=as.numeric(as.factor(rank(-SeasonPts_yahoo,ties.method = "min")))]
#Calculate Position Rank
yahoo_proj[,positionRank_yahoo:=as.numeric(as.factor(rank(-SeasonPts_yahoo,ties.method = "min"))),by=list(pos)]
#Delete Nuisiance Columns
yahoo_proj[,c("star","player","add","owner","actual","missing","proj"):=NULL]
#Order variables in data set
setcolorder(yahoo_proj,c("week",prefix,"SeasonPts_yahoo","weekRank_yahoo", paste(varNames, suffix, sep="_"),"ownedPct"))
#Order players by overall rank
yahoo_proj[order(overallRank_yahoo,week)][1:100]
#Density Plot
ggplot(yahoo_proj, aes(x=pts_yahoo)) + geom_density(fill="blue", alpha=.3) + xlab("Players' Weekly Projected Points") + ggtitle("Density Plot of Yahoo Projected Points")
ggsave(paste(getwd(),"/Figures/Yahoo projections.jpg", sep=""), width=10, height=10)
dev.off()
#Save file
save(yahoo_proj, file = paste(getwd(),"/Data/Yahoo-Weekly-Projections.RData", sep=""))
write.csv(yahoo_proj, file=paste(getwd(),"/Data/Yahoo-Weekly-Projections.csv", sep=""), row.names=FALSE)
save(yahoo_proj, file = paste(getwd(),"/Data/Historical Projections/Yahoo-Weekly-Projections-2014.RData", sep=""))
write.csv(yahoo_proj, file=paste(getwd(),"/Data/Historical Projections/Yahoo-Weekly-Projections-2014.csv", sep=""), row.names=FALSE)
|
/Weekly.R
|
no_license
|
ethielg/Projections
|
R
| false
| false
| 3,885
|
r
|
library("XML")
library("stringr")
library("ggplot2")
library("data.table")
#Functions
source(paste(getwd(),"/R Scripts/Functions/Functions.R", sep=""))
source(paste(getwd(),"/R Scripts/Functions/League Settings.R", sep=""))
#Suffix
suffix <- "yahoo"
#Download fantasy football projections from Yahoo.com
baseurl <-"http://football.fantasysports.yahoo.com/f1/39345/players?status=A&pos=O&cut_type=9&myteam=0&sort=PTS&sdir=1"
playerCount <- paste("&count=",seq(0,350,by=25),sep="")
PWeekCount<-paste("&stat1=S_PW_",1:17,sep="")
YahooURLs <- unlist(lapply(PWeekCount,function(x)paste(baseurl,playerCount,x,sep="")))
#Pull from yahoo, but do it slowly, with random sys.sleep intervals ~7 secs long.
load("YahooRawWeekProj.RData")
yahoo<-yahoo_backup
#Add Weeks.
for(i in 1:length(yahoo)) { yahoo[[i]]$week <- trunc((i-1)/15+1)}
#rbind, makes a data.table.
yahoo_proj<-rbindlist(yahoo)
#Variable Names
setnames(yahoo_proj,c("star","player","add","owner","pts_yahoo","ownedPct","proj","actual",
"passYds_yahoo","passTds_yahoo","passInt_yahoo","rushYds_yahoo",
"rushTds_yahoo","recYds_yahoo","recTds_yahoo","returnTds_yahoo",
"twoPts_yahoo","fumbles_yahoo","missing","week"))
yahoo_proj
#Add missing variables
yahoo_proj[,c("passAtt_yahoo","passComp_yahoo","rushAtt_yahoo","rec_yahoo"):=NA]
#Remove special characters(commas)
yahoo_proj<-yahoo_proj[,lapply(.SD,function(x) gsub("\\,", "", x))]
#Convert variables from character strings to numeric
yahoo_proj<-yahoo_proj[,lapply(.SD,as.numeric),by=eval(names(yahoo_proj)[c(1:4,6,8,19)]),.SDcols=c(5,7,9:18,20:24)]
yahoo_proj<-yahoo_proj[,ownedPct := as.numeric(sub("%", "", ownedPct))]
#Player name, position, and team
yahoo_proj[,player := str_trim(sapply(str_split(player, "\n"), "[[", 2))]
yahoo_proj[,pos := str_trim(str_sub(player, start= -2))]
yahoo_proj[,name_yahoo := str_trim(str_sub(player, start=0, end=nchar(player)-8))]
yahoo_proj[,name := nameMerge(name_yahoo)]
yahoo_proj[,team_yahoo := toupper(str_trim(str_sub(player, start=str_locate(player, "-")[,1]-4, end=str_locate(player, "-")[,1]-2)))]
#Check for duplicates (duplicate means same name, position, week, and team).
setkey(yahoo_proj,name,pos,week,team_yahoo)
yahoo_proj[duplicated(yahoo_proj)]
#Rename players
yahoo_proj[name=="STEVIEJOHNSON", name:= "STEVEJOHNSON"]
#Calculate Week Rank
yahoo_proj[,weekRank_yahoo:= rank(-pts_yahoo, ties.method="min")]
#Calculate Season Rank
yahoo_proj[,SeasonPts_yahoo:=sum(pts_yahoo,rm.na=TRUE),by=list(name,pos,team_yahoo)]
#Overall Rank
yahoo_proj[,overallRank_yahoo:=as.numeric(as.factor(rank(-SeasonPts_yahoo,ties.method = "min")))]
#Calculate Position Rank
yahoo_proj[,positionRank_yahoo:=as.numeric(as.factor(rank(-SeasonPts_yahoo,ties.method = "min"))),by=list(pos)]
#Delete Nuisiance Columns
yahoo_proj[,c("star","player","add","owner","actual","missing","proj"):=NULL]
#Order variables in data set
setcolorder(yahoo_proj,c("week",prefix,"SeasonPts_yahoo","weekRank_yahoo", paste(varNames, suffix, sep="_"),"ownedPct"))
#Order players by overall rank
yahoo_proj[order(overallRank_yahoo,week)][1:100]
#Density Plot
ggplot(yahoo_proj, aes(x=pts_yahoo)) + geom_density(fill="blue", alpha=.3) + xlab("Players' Weekly Projected Points") + ggtitle("Density Plot of Yahoo Projected Points")
ggsave(paste(getwd(),"/Figures/Yahoo projections.jpg", sep=""), width=10, height=10)
dev.off()
#Save file
save(yahoo_proj, file = paste(getwd(),"/Data/Yahoo-Weekly-Projections.RData", sep=""))
write.csv(yahoo_proj, file=paste(getwd(),"/Data/Yahoo-Weekly-Projections.csv", sep=""), row.names=FALSE)
save(yahoo_proj, file = paste(getwd(),"/Data/Historical Projections/Yahoo-Weekly-Projections-2014.RData", sep=""))
write.csv(yahoo_proj, file=paste(getwd(),"/Data/Historical Projections/Yahoo-Weekly-Projections-2014.csv", sep=""), row.names=FALSE)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AllGenerics.R, R/mergeClusters.R
\docType{methods}
\name{mergeClusters}
\alias{mergeClusters}
\alias{mergeClusters,daFrame,character,data.frame,character-method}
\title{Manual cluster merging}
\usage{
mergeClusters(x, k, table, id)
\S4method{mergeClusters}{daFrame,character,data.frame,character}(x, k,
table, id)
}
\arguments{
\item{x}{a \code{\link{daFrame}}.}
\item{k}{a character string specifying the clustering to merge.}
\item{table}{a merging table with 2 columns containing the cluster IDs to merge
in the 1st, and the cluster IDs to newly assign in the 2nd column.}
\item{id}{character string. Used as a label for the merging.}
\item{...}{optional arguments.}
}
\value{
Writes the newly assigend cluster codes into the metadata slot
\code{cluster_codes} of the input \code{daFrame} and returns the latter.
}
\description{
\code{mergeClusters} provides a simple wrapper
to store a manual merging inside the input \code{daFrame}.
}
\details{
in the following code snippets, \code{x} is a \code{daFrame} object.
\itemize{
\item{merging codes are accesible through \code{cluster_codes(x)$id}}
\item{all functions that ask for specification of a clustering
(e.g. \code{\link{plotAbundances}}, \code{\link{plotClusterHeatmap}})
take the merging ID as a valid input argument.}}
}
\examples{
data(PBMC_fs, PBMC_panel, PBMC_md, merging_table)
re <- daFrame(PBMC_fs, PBMC_panel, PBMC_md)
# run clustering
re <- cluster(re)
# merge clusters
re <- mergeClusters(re, k="meta20", table=merging_table, id="merging")
plotClusterHeatmap(re, k="merging", hm2="pS6")
}
\references{
Nowicka M, Krieg C, Weber LM et al.
CyTOF workflow: Differential discovery in
high-throughput high-dimensional cytometry datasets.
\emph{F1000Research} 2017, 6:748 (doi: 10.12688/f1000research.11622.1)
}
\author{
Helena Lucia Crowell \email{helena.crowell@uzh.ch}
}
|
/man/mergeClusters.Rd
|
no_license
|
sukath/CATALYST
|
R
| false
| true
| 1,936
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AllGenerics.R, R/mergeClusters.R
\docType{methods}
\name{mergeClusters}
\alias{mergeClusters}
\alias{mergeClusters,daFrame,character,data.frame,character-method}
\title{Manual cluster merging}
\usage{
mergeClusters(x, k, table, id)
\S4method{mergeClusters}{daFrame,character,data.frame,character}(x, k,
table, id)
}
\arguments{
\item{x}{a \code{\link{daFrame}}.}
\item{k}{a character string specifying the clustering to merge.}
\item{table}{a merging table with 2 columns containing the cluster IDs to merge
in the 1st, and the cluster IDs to newly assign in the 2nd column.}
\item{id}{character string. Used as a label for the merging.}
\item{...}{optional arguments.}
}
\value{
Writes the newly assigend cluster codes into the metadata slot
\code{cluster_codes} of the input \code{daFrame} and returns the latter.
}
\description{
\code{mergeClusters} provides a simple wrapper
to store a manual merging inside the input \code{daFrame}.
}
\details{
in the following code snippets, \code{x} is a \code{daFrame} object.
\itemize{
\item{merging codes are accesible through \code{cluster_codes(x)$id}}
\item{all functions that ask for specification of a clustering
(e.g. \code{\link{plotAbundances}}, \code{\link{plotClusterHeatmap}})
take the merging ID as a valid input argument.}}
}
\examples{
data(PBMC_fs, PBMC_panel, PBMC_md, merging_table)
re <- daFrame(PBMC_fs, PBMC_panel, PBMC_md)
# run clustering
re <- cluster(re)
# merge clusters
re <- mergeClusters(re, k="meta20", table=merging_table, id="merging")
plotClusterHeatmap(re, k="merging", hm2="pS6")
}
\references{
Nowicka M, Krieg C, Weber LM et al.
CyTOF workflow: Differential discovery in
high-throughput high-dimensional cytometry datasets.
\emph{F1000Research} 2017, 6:748 (doi: 10.12688/f1000research.11622.1)
}
\author{
Helena Lucia Crowell \email{helena.crowell@uzh.ch}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_negative_cases.R
\name{get_negative_cases}
\alias{get_negative_cases}
\title{Find all observations with negative incidence in a
given data frame}
\usage{
get_negative_cases(data)
}
\arguments{
\item{data}{a data frame with location, date, cum and inc}
}
\value{
a data frame of cases with locations and dates
}
\description{
Find all observations with negative incidence in a
given data frame
}
|
/man/get_negative_cases.Rd
|
no_license
|
reichlab/covidData
|
R
| false
| true
| 477
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_negative_cases.R
\name{get_negative_cases}
\alias{get_negative_cases}
\title{Find all observations with negative incidence in a
given data frame}
\usage{
get_negative_cases(data)
}
\arguments{
\item{data}{a data frame with location, date, cum and inc}
}
\value{
a data frame of cases with locations and dates
}
\description{
Find all observations with negative incidence in a
given data frame
}
|
#BASIC DESCRPTIVES:
#Read INPUT DATA
seattle_merged_reduced= fread("C:/Users/isabe/Documents/isabel/R_HU_Statistik/Course_StatisticalProgramming/Projects_SPL/
bikeRental/Rawdata_bikeRental/Seattle.csv")#seattleMerged_reduced.csv")
#
seattle_data = seattle_merged_reduced #working with seattle_data
#########################################
#some needed format conversions:
seattle_data$Niederschlagmm = as.numeric(seattle_data$Niederschlagmm)
seattle_data$Niederschlagmm[is.na(seattle_data$Niederschlagmm)] = 0
str(seattle_data)
#seattle_data1$tripduration = as.numeric(seattle_data1$tripduration)#do not needed!it is already
seattle_data$mittlereTemperaturC = as.numeric(seattle_data$mittlereTemperaturC)
#
#Niederschlagmm chr to num and NA to 0:
seattle_data$Niederschlagmm = as.numeric(seattle_data$Niederschlagmm)
seattle_data$Niederschlagmm[is.na(seattle_data$Niederschlagmm)] = 0
#Temperatur as numeric:
seattle_data$mittlereTemperaturC = as.numeric(seattle_data$mittlereTemperaturC)
str(seattle_data)
##New values for Ereignisse (weather)
typeof(seattle_data$Ereignisse)
seattle_data$Ereignisse[which(seattle_data$Ereignisse == "Regen")] = 2
seattle_data$Ereignisse[which(seattle_data$Ereignisse == "Schneefall")] = 3
seattle_data$Ereignisse[which(seattle_data$Ereignisse == "Regen-Gewitter")] = 5
seattle_data$Ereignisse[which(seattle_data$Ereignisse == "Nebel-Regen")] = 4
seattle_data$Ereignisse[which(seattle_data$Ereignisse == "Nebel")] = 1
#
#Converting Ereignisse chr to int and NA to 0:
seattle_data$Ereignisse=as.integer(seattle_data$Ereignisse)
#
seattle_data$Ereignisse[is.na(seattle_data$Ereignisse)] = 0
str(seattle_data) #Ereignisse is num
##
seattle_data$gender=as.integer(seattle_data$gender)
seattle_data$gender[is.na(seattle_data$gender)] = 2 #NA to 2
#
#usertype to factor:
seattle_data$gender<-factor(seattle_data$gender)
levels(seattle_data$gender)<-list(male="0", female="1", nodata="2")
#######################################################
#BASIC DESCRIPTIVE:
library("MASS")
#Summary(Median, Mean,Quantiles)
summary(seattle_data$tripduration)
sd(seattle_data$tripduration) #standard deviation of tripduration
#Histograms:
hist(seattle_data$birthyear)
hist(seattle_data$mittlereTemperaturC)
density(seattle_data$tripduration) #density of tripduration
#Plots:
plot(seattle_data$birthyear, seattle_data$tripduration)
plot(seattle_data$gender, seattle_data$tripduration)
|
/Q2_Descriptives_BikeRentalSeattle/Q2_Descriptives_BikeRentalSeattle.r
|
no_license
|
isabelchaquire/SPL_HU
|
R
| false
| false
| 2,411
|
r
|
#BASIC DESCRPTIVES:
#Read INPUT DATA
seattle_merged_reduced= fread("C:/Users/isabe/Documents/isabel/R_HU_Statistik/Course_StatisticalProgramming/Projects_SPL/
bikeRental/Rawdata_bikeRental/Seattle.csv")#seattleMerged_reduced.csv")
#
seattle_data = seattle_merged_reduced #working with seattle_data
#########################################
#some needed format conversions:
seattle_data$Niederschlagmm = as.numeric(seattle_data$Niederschlagmm)
seattle_data$Niederschlagmm[is.na(seattle_data$Niederschlagmm)] = 0
str(seattle_data)
#seattle_data1$tripduration = as.numeric(seattle_data1$tripduration)#do not needed!it is already
seattle_data$mittlereTemperaturC = as.numeric(seattle_data$mittlereTemperaturC)
#
#Niederschlagmm chr to num and NA to 0:
seattle_data$Niederschlagmm = as.numeric(seattle_data$Niederschlagmm)
seattle_data$Niederschlagmm[is.na(seattle_data$Niederschlagmm)] = 0
#Temperatur as numeric:
seattle_data$mittlereTemperaturC = as.numeric(seattle_data$mittlereTemperaturC)
str(seattle_data)
##New values for Ereignisse (weather)
typeof(seattle_data$Ereignisse)
seattle_data$Ereignisse[which(seattle_data$Ereignisse == "Regen")] = 2
seattle_data$Ereignisse[which(seattle_data$Ereignisse == "Schneefall")] = 3
seattle_data$Ereignisse[which(seattle_data$Ereignisse == "Regen-Gewitter")] = 5
seattle_data$Ereignisse[which(seattle_data$Ereignisse == "Nebel-Regen")] = 4
seattle_data$Ereignisse[which(seattle_data$Ereignisse == "Nebel")] = 1
#
#Converting Ereignisse chr to int and NA to 0:
seattle_data$Ereignisse=as.integer(seattle_data$Ereignisse)
#
seattle_data$Ereignisse[is.na(seattle_data$Ereignisse)] = 0
str(seattle_data) #Ereignisse is num
##
seattle_data$gender=as.integer(seattle_data$gender)
seattle_data$gender[is.na(seattle_data$gender)] = 2 #NA to 2
#
#usertype to factor:
seattle_data$gender<-factor(seattle_data$gender)
levels(seattle_data$gender)<-list(male="0", female="1", nodata="2")
#######################################################
#BASIC DESCRIPTIVE:
library("MASS")
#Summary(Median, Mean,Quantiles)
summary(seattle_data$tripduration)
sd(seattle_data$tripduration) #standard deviation of tripduration
#Histograms:
hist(seattle_data$birthyear)
hist(seattle_data$mittlereTemperaturC)
density(seattle_data$tripduration) #density of tripduration
#Plots:
plot(seattle_data$birthyear, seattle_data$tripduration)
plot(seattle_data$gender, seattle_data$tripduration)
|
data_path <- "/project/huff/huff/TCGA_AML/siSTAB1_data/RNAseq-analysis/result/2.Analysis_result/Single_Sample"
fileNames <- dir(data_path,pattern = "count$")
# load expression data ----------------------------------------------------
for (x in fileNames) {
assign(x,readr::read_tsv(file.path(data_path,x),col_names = F))
}
# data manage -------------------------------------------------------------
NC1.count %>% dplyr::rename("ENSG"="X1","NC1"="X2") -> NC1_count.comfirm
NC2.count %>% dplyr::rename("ENSG"="X1","NC2"="X2")-> NC2_count.comfirm
siSTAB1_1.count %>% dplyr::rename("ENSG"="X1","siSTAB1_1"="X2") -> siSTAB1_1_count.comfirm
siSTAB1_2.count %>% dplyr::rename("ENSG"="X1","siSTAB1_2"="X2") -> siSTAB1_2_count.comfirm
NC1_count.comfirm %>%
dplyr::inner_join(NC2_count.comfirm,by=c("ENSG")) %>%
dplyr::inner_join(siSTAB1_1_count.comfirm,by=c("ENSG")) %>%
dplyr::inner_join(siSTAB1_2_count.comfirm,by=c("ENSG")) -> all_count
# check data distribution -------------------------------------------------------
library(CancerSubtypes)
mRNA <- as.matrix(all_count[,-c(1)])
rownames(mRNA)=all_count$ENSG
###To observe the mean, variance and Median Absolute Deviation distribution of the dataset, it helps users to get the distribution characteristics of the data, e.g. To evaluate whether the dataset fits a normal distribution or not.
data.checkDistribution(mRNA) # data don't fit a nomal distribution
# do edgeR to get DEGs ----------------------------------------------------
# Userguide: http://www.bioconductor.org/packages/3.6/bioc/vignettes/edgeR/inst/doc/edgeRUsersGuide.pdf
setwd("/project/huff/huff/TCGA_AML/siSTAB1_data/DEG")
## get DEGlist
d <- DGEList(counts=mRNA,group=c(1,1,2,2)) # construct a DEGList for edgeR analysis
d_backup <- d
keep <- rowSums(cpm(d)>100) >= 2 # filtering genes with very low counts across all libraries provide little evidence for differential expression
d <- d[keep,]
d$samples$lib.size <- colSums(d$counts) # recalculate the library size
d <- calcNormFactors(d) # The calcNormFactors function normalizes for RNA composition by finding a set of scaling factors for the library sizes that minimize the log-fold changes between the samples for most genes.
d$samples #The normalization factors of all the libraries multiply to unity. A normalization factor below 1 indicates that a small number of high count genes are monopolizing the sequencing, causing the counts for other genes to be lower than would be usual given the library size. As a result, the library size will be scaled down, analogous to scaling the counts upwards in that library. Conversely, a factor above 1 scales up the library size, analogous to downscaling the counts.
plotMDS(d, method="bcv", col=as.numeric(d$samples$group)) # Multidimensional scaling plot of distances between gene expression profiles
d1 <- estimateDisp(d) # To estimate common dispersion and tagwise dispersions in one run
plotBCV(d1)
et <- exactTest(d1) # Testing for DE genes
topTags(et)
de1 <- decideTestsDGE(et, adjust.method="BH", p.value=0.05)
de1tags12 <- rownames(d1)[as.logical(de1)]
plotSmear(et, de.tags=de1tags12)
dev.off()
nrDEG=topTags(et, n=nrow(mRNA))
nrDEG=as.data.frame(nrDEG)
library(org.Hs.eg.db)
library(clusterProfiler)
bitr(rownames(nrDEG), fromType = "ENSEMBL",
toType = c("SYMBOL"),
OrgDb = org.Hs.eg.db) -> ensembl.symbol
setwd("/project/huff/huff/TCGA_AML/siSTAB1_data/DEG")
nrDEG %>%
tibble::as.tibble() %>%
dplyr::mutate(ENSEMBL=rownames(nrDEG)) %>%
dplyr::left_join(ensembl.symbol,by="ENSEMBL") %>%
readr::write_tsv(file.path("edger_classic.results.tsv"))
DEG_edger_classic(mRNA,c(1,1,2,2))
## unit edgeR into a function
DEG_edger_classic <- function(exprSet=exprSet,group_list=group_list){
d <- DGEList(counts=exprSet,group=factor(group_list))
d.full <- d # keep the old one in case we mess up
#apply(d$counts, 2, sum) # total gene counts per samplekeep <- rowSums(cpm(d)>100) >= 2
keep <- rowSums(cpm(d)>100) >= 2
d <- d[keep,]
d$samples$lib.size <- colSums(d$counts)
d <- calcNormFactors(d)
d$samples
png("MDS.png")
plotMDS(d, method="bcv", col=as.numeric(d$samples$group)) # Multidimensional scaling plot of distances between gene expression profiles
legend("bottomleft", as.character(unique(d$samples$group)), col=1:3, pch=20)
dev.off()
d1 <- estimateCommonDisp(d1, verbose=T)
d1 <- estimateTagwiseDisp(d1)
png("BCV.png")
plotBCV(d1)
dev.off()
et12 <- exactTest(d1) # Testing for DE genes
png("MA.png")
de1 <- decideTestsDGE(et12, adjust.method="BH", p.value=0.05)
de1tags12 <- rownames(d1)[as.logical(de1)]
plotSmear(et12, de.tags=de1tags12)
dev.off()
nrDEG=topTags(et12, n=nrow(exprSet))
nrDEG=as.data.frame(nrDEG)
write.table(nrDEG,"edger_classic.results.csv",sep = "\t")
}
# confirm with expression data --------------------------------------------
fileNames <- dir("/project/huff/huff/TCGA_AML/siSTAB1_data/expression",pattern = "RPKM$")
# load expression data ----------------------------------------------------
fn_exp_manage <- function(x){
x %>%
dplyr::rename("ENSG_id"="X1","symbol" = "X2", "FPKM" = "X3") %>%
dplyr::group_by(symbol) %>%
dplyr::mutate(FPKM_sum = sum(FPKM)) %>%
dplyr::ungroup() %>%
dplyr::select(ENSG_id,symbol,FPKM_sum) %>%
unique()
}
for (x in fileNames) {
assign(x,readr::read_tsv(file.path("/project/huff/huff/TCGA_AML/siSTAB1_data/expression",x),col_names = F))
}
fn_exp_manage(NC1.RPKM) %>% dplyr::rename("NC1" ="FPKM_sum") -> NC1_exp.comfirm
fn_exp_manage(NC2.RPKM) %>% dplyr::rename("NC2" ="FPKM_sum")-> NC2_exp.comfirm
fn_exp_manage(siSTAB1_1.RPKM) %>% dplyr::rename("siSTAB1_1" ="FPKM_sum") -> siSTAB1_1_exp.comfirm
fn_exp_manage(siSTAB1_2.RPKM) %>% dplyr::rename("siSTAB1_2" ="FPKM_sum") -> siSTAB1_2_exp.comfirm
NC1_exp.comfirm %>%
dplyr::inner_join(NC2_exp.comfirm,by=c("symbol","ENSG_id")) %>%
dplyr::inner_join(siSTAB1_1_exp.comfirm,by=c("symbol","ENSG_id")) %>%
dplyr::inner_join(siSTAB1_2_exp.comfirm,by=c("symbol","ENSG_id")) -> all_exp
all_exp %>%
readr::write_tsv("RPKM_expression.tsv")
fn_ttest <- function(x){
x %>% as.matrix() %>% .[1,c(1,2)] %>% as.vector() -> nc
x %>% as.matrix() %>% .[1,c(3,4)] %>% as.vector() -> si
t.test(nc,si) %>%
broom::tidy()
}
all_exp %>%
tidyr::nest(-ENSG_id,-symbol) %>%
dplyr::group_by(ENSG_id,symbol) %>%
dplyr::mutate(ttest = purrr::map(data,fn_ttest)) -> exp_DE
exp_DE %>%
dplyr::select(-data) %>%
tidyr::unnest() %>%
dplyr::rename("nc" = "estimate1","si"="estimate2") %>%
dplyr::mutate(log2FC = log2(si/nc)) %>%
dplyr::filter(p.value <= 0.05) -> exp_DE.p0.05
exp_DE.p0.05 %>%
readr::write_tsv("ttest_rpkm_exp_DE.p0.05")
|
/R-pac-learning/edgeR.R
|
no_license
|
Huffyphenix/hello-world
|
R
| false
| false
| 6,758
|
r
|
data_path <- "/project/huff/huff/TCGA_AML/siSTAB1_data/RNAseq-analysis/result/2.Analysis_result/Single_Sample"
fileNames <- dir(data_path,pattern = "count$")
# load expression data ----------------------------------------------------
for (x in fileNames) {
assign(x,readr::read_tsv(file.path(data_path,x),col_names = F))
}
# data manage -------------------------------------------------------------
NC1.count %>% dplyr::rename("ENSG"="X1","NC1"="X2") -> NC1_count.comfirm
NC2.count %>% dplyr::rename("ENSG"="X1","NC2"="X2")-> NC2_count.comfirm
siSTAB1_1.count %>% dplyr::rename("ENSG"="X1","siSTAB1_1"="X2") -> siSTAB1_1_count.comfirm
siSTAB1_2.count %>% dplyr::rename("ENSG"="X1","siSTAB1_2"="X2") -> siSTAB1_2_count.comfirm
NC1_count.comfirm %>%
dplyr::inner_join(NC2_count.comfirm,by=c("ENSG")) %>%
dplyr::inner_join(siSTAB1_1_count.comfirm,by=c("ENSG")) %>%
dplyr::inner_join(siSTAB1_2_count.comfirm,by=c("ENSG")) -> all_count
# check data distribution -------------------------------------------------------
library(CancerSubtypes)
mRNA <- as.matrix(all_count[,-c(1)])
rownames(mRNA)=all_count$ENSG
###To observe the mean, variance and Median Absolute Deviation distribution of the dataset, it helps users to get the distribution characteristics of the data, e.g. To evaluate whether the dataset fits a normal distribution or not.
data.checkDistribution(mRNA) # data don't fit a nomal distribution
# do edgeR to get DEGs ----------------------------------------------------
# Userguide: http://www.bioconductor.org/packages/3.6/bioc/vignettes/edgeR/inst/doc/edgeRUsersGuide.pdf
setwd("/project/huff/huff/TCGA_AML/siSTAB1_data/DEG")
## get DEGlist
d <- DGEList(counts=mRNA,group=c(1,1,2,2)) # construct a DEGList for edgeR analysis
d_backup <- d
keep <- rowSums(cpm(d)>100) >= 2 # filtering genes with very low counts across all libraries provide little evidence for differential expression
d <- d[keep,]
d$samples$lib.size <- colSums(d$counts) # recalculate the library size
d <- calcNormFactors(d) # The calcNormFactors function normalizes for RNA composition by finding a set of scaling factors for the library sizes that minimize the log-fold changes between the samples for most genes.
d$samples #The normalization factors of all the libraries multiply to unity. A normalization factor below 1 indicates that a small number of high count genes are monopolizing the sequencing, causing the counts for other genes to be lower than would be usual given the library size. As a result, the library size will be scaled down, analogous to scaling the counts upwards in that library. Conversely, a factor above 1 scales up the library size, analogous to downscaling the counts.
plotMDS(d, method="bcv", col=as.numeric(d$samples$group)) # Multidimensional scaling plot of distances between gene expression profiles
d1 <- estimateDisp(d) # To estimate common dispersion and tagwise dispersions in one run
plotBCV(d1)
et <- exactTest(d1) # Testing for DE genes
topTags(et)
de1 <- decideTestsDGE(et, adjust.method="BH", p.value=0.05)
de1tags12 <- rownames(d1)[as.logical(de1)]
plotSmear(et, de.tags=de1tags12)
dev.off()
nrDEG=topTags(et, n=nrow(mRNA))
nrDEG=as.data.frame(nrDEG)
library(org.Hs.eg.db)
library(clusterProfiler)
bitr(rownames(nrDEG), fromType = "ENSEMBL",
toType = c("SYMBOL"),
OrgDb = org.Hs.eg.db) -> ensembl.symbol
setwd("/project/huff/huff/TCGA_AML/siSTAB1_data/DEG")
nrDEG %>%
tibble::as.tibble() %>%
dplyr::mutate(ENSEMBL=rownames(nrDEG)) %>%
dplyr::left_join(ensembl.symbol,by="ENSEMBL") %>%
readr::write_tsv(file.path("edger_classic.results.tsv"))
DEG_edger_classic(mRNA,c(1,1,2,2))
## unit edgeR into a function
DEG_edger_classic <- function(exprSet=exprSet,group_list=group_list){
d <- DGEList(counts=exprSet,group=factor(group_list))
d.full <- d # keep the old one in case we mess up
#apply(d$counts, 2, sum) # total gene counts per samplekeep <- rowSums(cpm(d)>100) >= 2
keep <- rowSums(cpm(d)>100) >= 2
d <- d[keep,]
d$samples$lib.size <- colSums(d$counts)
d <- calcNormFactors(d)
d$samples
png("MDS.png")
plotMDS(d, method="bcv", col=as.numeric(d$samples$group)) # Multidimensional scaling plot of distances between gene expression profiles
legend("bottomleft", as.character(unique(d$samples$group)), col=1:3, pch=20)
dev.off()
d1 <- estimateCommonDisp(d1, verbose=T)
d1 <- estimateTagwiseDisp(d1)
png("BCV.png")
plotBCV(d1)
dev.off()
et12 <- exactTest(d1) # Testing for DE genes
png("MA.png")
de1 <- decideTestsDGE(et12, adjust.method="BH", p.value=0.05)
de1tags12 <- rownames(d1)[as.logical(de1)]
plotSmear(et12, de.tags=de1tags12)
dev.off()
nrDEG=topTags(et12, n=nrow(exprSet))
nrDEG=as.data.frame(nrDEG)
write.table(nrDEG,"edger_classic.results.csv",sep = "\t")
}
# confirm with expression data --------------------------------------------
fileNames <- dir("/project/huff/huff/TCGA_AML/siSTAB1_data/expression",pattern = "RPKM$")
# load expression data ----------------------------------------------------
fn_exp_manage <- function(x){
x %>%
dplyr::rename("ENSG_id"="X1","symbol" = "X2", "FPKM" = "X3") %>%
dplyr::group_by(symbol) %>%
dplyr::mutate(FPKM_sum = sum(FPKM)) %>%
dplyr::ungroup() %>%
dplyr::select(ENSG_id,symbol,FPKM_sum) %>%
unique()
}
for (x in fileNames) {
assign(x,readr::read_tsv(file.path("/project/huff/huff/TCGA_AML/siSTAB1_data/expression",x),col_names = F))
}
fn_exp_manage(NC1.RPKM) %>% dplyr::rename("NC1" ="FPKM_sum") -> NC1_exp.comfirm
fn_exp_manage(NC2.RPKM) %>% dplyr::rename("NC2" ="FPKM_sum")-> NC2_exp.comfirm
fn_exp_manage(siSTAB1_1.RPKM) %>% dplyr::rename("siSTAB1_1" ="FPKM_sum") -> siSTAB1_1_exp.comfirm
fn_exp_manage(siSTAB1_2.RPKM) %>% dplyr::rename("siSTAB1_2" ="FPKM_sum") -> siSTAB1_2_exp.comfirm
NC1_exp.comfirm %>%
dplyr::inner_join(NC2_exp.comfirm,by=c("symbol","ENSG_id")) %>%
dplyr::inner_join(siSTAB1_1_exp.comfirm,by=c("symbol","ENSG_id")) %>%
dplyr::inner_join(siSTAB1_2_exp.comfirm,by=c("symbol","ENSG_id")) -> all_exp
all_exp %>%
readr::write_tsv("RPKM_expression.tsv")
fn_ttest <- function(x){
x %>% as.matrix() %>% .[1,c(1,2)] %>% as.vector() -> nc
x %>% as.matrix() %>% .[1,c(3,4)] %>% as.vector() -> si
t.test(nc,si) %>%
broom::tidy()
}
all_exp %>%
tidyr::nest(-ENSG_id,-symbol) %>%
dplyr::group_by(ENSG_id,symbol) %>%
dplyr::mutate(ttest = purrr::map(data,fn_ttest)) -> exp_DE
exp_DE %>%
dplyr::select(-data) %>%
tidyr::unnest() %>%
dplyr::rename("nc" = "estimate1","si"="estimate2") %>%
dplyr::mutate(log2FC = log2(si/nc)) %>%
dplyr::filter(p.value <= 0.05) -> exp_DE.p0.05
exp_DE.p0.05 %>%
readr::write_tsv("ttest_rpkm_exp_DE.p0.05")
|
context("sync")
cf <- read_repo_config(local_config_file = NULL)
cf <- subset(cf, name == "NSIDC SMMR-SSM/I Nasateam sea ice concentration")
cf$do_sync <- TRUE
tok <- "--accept=\"*nt_200703*\""
cf$method_flags <- paste(cf$method_flags, "--accept=\"*nt_2016*\"", "--accept=\"*nt_2015*\"")
lfr <- tempdir()
cf$local_file_root <- lfr
test_that("sync works", {
expect_true(sync_repo(cf))
})
test_that("sync got some files", {
files <- data.frame(fullname = file.path(lfr, list.files(lfr, recursive = TRUE)), stringsAsFactors = FALSE)
expect_gt(nrow(files), 0L)
})
|
/tests/testthat/test-sync.R
|
no_license
|
AustralianAntarcticDataCentre/raadsync
|
R
| false
| false
| 572
|
r
|
context("sync")
cf <- read_repo_config(local_config_file = NULL)
cf <- subset(cf, name == "NSIDC SMMR-SSM/I Nasateam sea ice concentration")
cf$do_sync <- TRUE
tok <- "--accept=\"*nt_200703*\""
cf$method_flags <- paste(cf$method_flags, "--accept=\"*nt_2016*\"", "--accept=\"*nt_2015*\"")
lfr <- tempdir()
cf$local_file_root <- lfr
test_that("sync works", {
expect_true(sync_repo(cf))
})
test_that("sync got some files", {
files <- data.frame(fullname = file.path(lfr, list.files(lfr, recursive = TRUE)), stringsAsFactors = FALSE)
expect_gt(nrow(files), 0L)
})
|
# First set up the working directory
setwd('./RealData/Data')
######################################################################################################################
# Bottomly
# Bottomly, Daniel, et al. "Evaluating gene expression in C57BL/6J and DBA/2J mouse striatum using RNA-Seq and
# microarrays." PloS one 6.3 (2011): e17820
######################################################################################################################
library("IHWpaper")
bottomly <- analyze_dataset("bottomly")
pvals <- bottomly$pvalue
ind <- which(!is.na(pvals))
pvals <- pvals[ind]
x1 <- log(bottomly$baseMean)
x1 <- x1[ind]
reorder <- rev(order(x1))
x1 <- x1[reorder]
pvals <- pvals[reorder]
hist(pvals)
length(pvals)
bottomly <- cbind(pvalue = pvals, covariate = x1)
saveRDS(bottomly, file='bottomly.p.value.rds')
######################################################################################################################
# Pasilla
# Brooks, Angela N., et al. "Conservation of an RNA regulatory map between Drosophila and mammals." Genome research
# 21.2 (2011): 193-202.
######################################################################################################################
library("DESeq")
library("pasilla")
data("pasillaGenes")
cds <- estimateSizeFactors(pasillaGenes)
cds <- estimateDispersions(cds)
fit1 <- fitNbinomGLMs(cds, count ~ type + condition)
fit0 <- fitNbinomGLMs(cds, count ~ type)
res <- data.frame(
filterstat = rowMeans(counts(cds)),
pvalue = nbinomGLMTest(fit1, fit0),
row.names = featureNames(cds))
ind <- which(!is.na(res$pvalue))
res <- res[ind, ]
pvals <- res$pvalue
x1 <- log(res[, 1])
reorder <- rev(order(x1))
x1 <- x1[reorder]
pvals <- pvals[reorder]
hist(pvals)
length(pvals)
pasilla <- cbind(pvalue = pvals, covariate = x1)
saveRDS(pasilla, 'pasilla.p.value.rds')
######################################################################################################################
# Airway
# Himes, Blanca E., et al. "RNA-Seq transcriptome profiling identifies CRISPLD2 as a glucocorticoid responsive gene
# that modulates cytokine function in airway smooth muscle cells." PloS one 9.6 (2014): e99625.
######################################################################################################################
library("DESeq2")
library("dplyr")
data("airway", package = "airway")
dds <- DESeqDataSet(se = airway, design = ~ cell + dex) %>% DESeq
deRes <- as.data.frame(results(dds))
pvals <- deRes$pvalue
ind <- which(!is.na(pvals))
pvals <- pvals[ind]
x <- log(deRes$baseMean)
x <- x[ind]
reorder <- rev(order(x))
x1 <- x[reorder]
pvals <- pvals[reorder]
hist(pvals)
length(pvals)
airway <- cbind(pvalue = pvals, covariate = x1)
saveRDS(airway, 'airway.p.value.rds')
######################################################################################################################
# Yeast
# Dephoure, Noah, and Steven P. Gygi. "Hyperplexing: a method for higher-order multiplexed quantitative proteomics
# provides a map of the dynamic response to rapamycin in yeast." Sci. Signal. 5.217 (2012): rs2-rs2.
######################################################################################################################
library("DESeq2")
library("dplyr")
library("IHWpaper")
proteomics_file <- system.file(
"extdata/real_data",
"science_signaling.csv",
package = "IHWpaper"
)
proteomics_df <- read.csv(proteomics_file, stringsAsFactors = F)
proteomics_df$pvalue <- rank(
proteomics_df$p1,
ties.method="first"
) * proteomics_df$p1 / nrow(proteomics_df)
pvals <- proteomics_df$pvalue
x <- log(proteomics_df$X..peptides)
reorder <- rev(order(x))
x1 <- x[reorder]
pvals <- pvals[reorder]
hist(pvals)
length(pvals)
yeast <- cbind(pvalue = pvals, covariate = x1)
saveRDS(yeast, 'yeast.p.value.rds')
######################################################################################################################
# EWAS - data directly from the paper, included in the package
# Wijnands, Kim PJ, et al. "Genome-wide methylation analysis identifies novel CpG loci for perimembranous ventricular
# septal defects in human." Epigenomics 9.3 (2017): 241-251.
######################################################################################################################
#pvals <- Haven.df[, 'pvalue']
#x1 <- Haven.df[, 'mean']
#
#reorder <- rev(order(x1))
#x1 <- x1[reorder]
#pvals <- pvals[reorder]
#
#ewas <- cbind(pvalue = pvals, covariate = x1)
#rownames(ewas) <- rownames(Haven.df)[reorder]
#
#saveRDS(ewas, 'ewas.p.value.rds')
######################################################################################################################
# MWAS - data retrieved from figshare biom file "deblur_125nt_no_blooms_normed.biom" - doi:10.6084/m9.figshare.6137198
# McDonald, Daniel, et al. "American gut: an open platform for citizen science microbiome research." mSystems (2018) :
# e00031-18.
######################################################################################################################
# The downloaded data were already normalized
# Rare OTUs occured in less than or equal to 20 subjects were excluded (>0.2% prevalence) in the data
meta.dat <- readRDS(file='MWAS/amgut.meta.dat.rds')
otu.tab <- readRDS(file='MWAS/amgut.otu.dat.rds')
otu.name <- readRDS(file='MWAS/amgut.otu.name.rds')
# We select subjects from United States and adults
ind <- meta.dat$bmi_cat %in% c('Normal') & meta.dat$country_residence %in% c('United States') &
meta.dat$age_cat %in% c('teen', '20s', '30s', '40s', '50s', '60s') & meta.dat$sex %in% c('female', 'male')
otu.tab <- otu.tab[, ind]
meta.dat <- meta.dat[ind, ]
sex <- meta.dat$sex
sex <- factor(sex)
meta.dat <- droplevels(sex)
# Further discard OTU occuring in less than 5 subjects
ind <- rowSums(otu.tab != 0) >= 5
otu.tab <- otu.tab[ind, ]
otu.name <- otu.name[ind, ]
# Wilcox rank sum test
pvals <- apply(otu.tab, 1, function (x) wilcox.test(x ~ sex)$p.value)
x <- rowSums(otu.tab != 0)
reorder <- rev(order(x))
x1 <- x[reorder]
pvals <- pvals[reorder]
hist(pvals)
length(pvals)
mwas <- data.frame(pvalue = pvals, covariate = x1, otu.name[reorder, ])
rownames(mwas) <- rownames(otu.tab)[reorder]
saveRDS(mwas, 'mwas.p.value.rds')
# Compare to traditional filtering based method
otu.tab2 <- otu.tab[rowMeans(otu.tab != 0) >= 0.1, ]
pvals2 <- apply(otu.tab2, 1, function (x) wilcox.test(x ~ sex)$p.value)
sum(qvalue(pvals2)$qvalue <= 0.1) # 116
sum(p.adjust(pvals2, 'fdr') <= 0.1) # 85
otu.tab2 <- otu.tab[rowMeans(otu.tab != 0) >= 0.2, ]
pvals2 <- apply(otu.tab2, 1, function (x) wilcox.test(x ~ sex)$p.value)
sum(qvalue(pvals2)$qvalue <= 0.1) # 71
sum(p.adjust(pvals2, 'fdr') <= 0.1) # 50
######################################################################################################################
|
/RealData/Code/1.RealDataProcess.R
|
no_license
|
jchen1981/CAMT
|
R
| false
| false
| 6,850
|
r
|
# First set up the working directory
setwd('./RealData/Data')
######################################################################################################################
# Bottomly
# Bottomly, Daniel, et al. "Evaluating gene expression in C57BL/6J and DBA/2J mouse striatum using RNA-Seq and
# microarrays." PloS one 6.3 (2011): e17820
######################################################################################################################
library("IHWpaper")
bottomly <- analyze_dataset("bottomly")
pvals <- bottomly$pvalue
ind <- which(!is.na(pvals))
pvals <- pvals[ind]
x1 <- log(bottomly$baseMean)
x1 <- x1[ind]
reorder <- rev(order(x1))
x1 <- x1[reorder]
pvals <- pvals[reorder]
hist(pvals)
length(pvals)
bottomly <- cbind(pvalue = pvals, covariate = x1)
saveRDS(bottomly, file='bottomly.p.value.rds')
######################################################################################################################
# Pasilla
# Brooks, Angela N., et al. "Conservation of an RNA regulatory map between Drosophila and mammals." Genome research
# 21.2 (2011): 193-202.
######################################################################################################################
library("DESeq")
library("pasilla")
data("pasillaGenes")
cds <- estimateSizeFactors(pasillaGenes)
cds <- estimateDispersions(cds)
fit1 <- fitNbinomGLMs(cds, count ~ type + condition)
fit0 <- fitNbinomGLMs(cds, count ~ type)
res <- data.frame(
filterstat = rowMeans(counts(cds)),
pvalue = nbinomGLMTest(fit1, fit0),
row.names = featureNames(cds))
ind <- which(!is.na(res$pvalue))
res <- res[ind, ]
pvals <- res$pvalue
x1 <- log(res[, 1])
reorder <- rev(order(x1))
x1 <- x1[reorder]
pvals <- pvals[reorder]
hist(pvals)
length(pvals)
pasilla <- cbind(pvalue = pvals, covariate = x1)
saveRDS(pasilla, 'pasilla.p.value.rds')
######################################################################################################################
# Airway
# Himes, Blanca E., et al. "RNA-Seq transcriptome profiling identifies CRISPLD2 as a glucocorticoid responsive gene
# that modulates cytokine function in airway smooth muscle cells." PloS one 9.6 (2014): e99625.
######################################################################################################################
library("DESeq2")
library("dplyr")
data("airway", package = "airway")
dds <- DESeqDataSet(se = airway, design = ~ cell + dex) %>% DESeq
deRes <- as.data.frame(results(dds))
pvals <- deRes$pvalue
ind <- which(!is.na(pvals))
pvals <- pvals[ind]
x <- log(deRes$baseMean)
x <- x[ind]
reorder <- rev(order(x))
x1 <- x[reorder]
pvals <- pvals[reorder]
hist(pvals)
length(pvals)
airway <- cbind(pvalue = pvals, covariate = x1)
saveRDS(airway, 'airway.p.value.rds')
######################################################################################################################
# Yeast
# Dephoure, Noah, and Steven P. Gygi. "Hyperplexing: a method for higher-order multiplexed quantitative proteomics
# provides a map of the dynamic response to rapamycin in yeast." Sci. Signal. 5.217 (2012): rs2-rs2.
######################################################################################################################
library("DESeq2")
library("dplyr")
library("IHWpaper")
proteomics_file <- system.file(
"extdata/real_data",
"science_signaling.csv",
package = "IHWpaper"
)
proteomics_df <- read.csv(proteomics_file, stringsAsFactors = F)
proteomics_df$pvalue <- rank(
proteomics_df$p1,
ties.method="first"
) * proteomics_df$p1 / nrow(proteomics_df)
pvals <- proteomics_df$pvalue
x <- log(proteomics_df$X..peptides)
reorder <- rev(order(x))
x1 <- x[reorder]
pvals <- pvals[reorder]
hist(pvals)
length(pvals)
yeast <- cbind(pvalue = pvals, covariate = x1)
saveRDS(yeast, 'yeast.p.value.rds')
######################################################################################################################
# EWAS - data directly from the paper, included in the package
# Wijnands, Kim PJ, et al. "Genome-wide methylation analysis identifies novel CpG loci for perimembranous ventricular
# septal defects in human." Epigenomics 9.3 (2017): 241-251.
######################################################################################################################
#pvals <- Haven.df[, 'pvalue']
#x1 <- Haven.df[, 'mean']
#
#reorder <- rev(order(x1))
#x1 <- x1[reorder]
#pvals <- pvals[reorder]
#
#ewas <- cbind(pvalue = pvals, covariate = x1)
#rownames(ewas) <- rownames(Haven.df)[reorder]
#
#saveRDS(ewas, 'ewas.p.value.rds')
######################################################################################################################
# MWAS - data retrieved from figshare biom file "deblur_125nt_no_blooms_normed.biom" - doi:10.6084/m9.figshare.6137198
# McDonald, Daniel, et al. "American gut: an open platform for citizen science microbiome research." mSystems (2018) :
# e00031-18.
######################################################################################################################
# The downloaded data were already normalized
# Rare OTUs occured in less than or equal to 20 subjects were excluded (>0.2% prevalence) in the data
meta.dat <- readRDS(file='MWAS/amgut.meta.dat.rds')
otu.tab <- readRDS(file='MWAS/amgut.otu.dat.rds')
otu.name <- readRDS(file='MWAS/amgut.otu.name.rds')
# We select subjects from United States and adults
ind <- meta.dat$bmi_cat %in% c('Normal') & meta.dat$country_residence %in% c('United States') &
meta.dat$age_cat %in% c('teen', '20s', '30s', '40s', '50s', '60s') & meta.dat$sex %in% c('female', 'male')
otu.tab <- otu.tab[, ind]
meta.dat <- meta.dat[ind, ]
sex <- meta.dat$sex
sex <- factor(sex)
meta.dat <- droplevels(sex)
# Further discard OTU occuring in less than 5 subjects
ind <- rowSums(otu.tab != 0) >= 5
otu.tab <- otu.tab[ind, ]
otu.name <- otu.name[ind, ]
# Wilcox rank sum test
pvals <- apply(otu.tab, 1, function (x) wilcox.test(x ~ sex)$p.value)
x <- rowSums(otu.tab != 0)
reorder <- rev(order(x))
x1 <- x[reorder]
pvals <- pvals[reorder]
hist(pvals)
length(pvals)
mwas <- data.frame(pvalue = pvals, covariate = x1, otu.name[reorder, ])
rownames(mwas) <- rownames(otu.tab)[reorder]
saveRDS(mwas, 'mwas.p.value.rds')
# Compare to traditional filtering based method
otu.tab2 <- otu.tab[rowMeans(otu.tab != 0) >= 0.1, ]
pvals2 <- apply(otu.tab2, 1, function (x) wilcox.test(x ~ sex)$p.value)
sum(qvalue(pvals2)$qvalue <= 0.1) # 116
sum(p.adjust(pvals2, 'fdr') <= 0.1) # 85
otu.tab2 <- otu.tab[rowMeans(otu.tab != 0) >= 0.2, ]
pvals2 <- apply(otu.tab2, 1, function (x) wilcox.test(x ~ sex)$p.value)
sum(qvalue(pvals2)$qvalue <= 0.1) # 71
sum(p.adjust(pvals2, 'fdr') <= 0.1) # 50
######################################################################################################################
|
# ACF plot in ggplot2 ---------------------------------------------------------
#Note that this code is heavily based on the following code:
#https://github.com/dewittpe/qwraps2/blob/master/R/qacf.R#L79
ggacf = function(x, conf_level = 0.95, type = "acf"){
library(dplyr); library(stats); library(ggplot2);library(tidyr)
if (type == "pacf"){
acf_data = stats::pacf(x, plot = F)
}else{
acf_data = stats::acf(x, plot = F)
}
signif = stats::qnorm((1 - conf_level)/2)/sqrt(acf_data$n.used)
lags = dplyr::as_data_frame(acf_data$lag)
acfs = dplyr::as_data_frame(acf_data$acf)
acf_df = dplyr::bind_cols(tidyr::gather(lags, key = 'key', value = 'lag'),
tidyr::gather(acfs, key = 'key', value = 'value')["value"])
acf_df = dplyr::mutate(acf_df, Significant = factor(abs(.data$value) > abs(signif)))
g = ggplot2::ggplot() +
ggplot2::aes_string(x = "lag", y = "value") +
ggplot2::geom_bar(stat = "identity", position = "identity") +
ggplot2::ylab("Correlation") +
ggplot2::xlab("Lag")+
ggplot2::geom_hline(yintercept = signif) +
ggplot2::geom_hline(yintercept = -signif) +
ggplot2::aes_string(fill = "Significant") +
ggplot2::coord_cartesian(ylim = c(-1,1))
g = ggplot2::`%+%`(g, acf_df)
g
}
# Descriptive Plots Function -----------------------------------------
Descriptive_Plots = function(timeseries, dates, draw = TRUE){
library(stats);library(tseries);library(ggplot2);library(grid);library(gridExtra)
if (missing(dates)){
dates = 1:length(timeseries)
}
#Theme = theme(plot.title = element_text(family = "Trebuchet MS", color="#666666", face="bold", hjust=0)) +
# theme(axis.title = element_text(family = "Trebuchet MS", color="#666666", face="bold"))
Data = data.frame(dates,timeseries)
Plot_Data = ggplot(Data, aes(dates, timeseries)) + geom_line(colour="#211a52") + xlab("") + ylab("Price") + ggtitle("Price vs. Time")
Plot_PACF = ggacf(timeseries, type ="pacf") + ggtitle("Partial Autocorrelation Plot")
Plot_ACF = ggacf(timeseries) + ggtitle("Autocorrelation Plot")
Plot_Hist = ggplot(Data, aes(timeseries)) + geom_histogram(fill ="white" ,col = "#211a52",bins = 50) +
xlab ("Price") + ylab("Observations") + ggtitle("Histogram of Prices")
grid.arrange(Plot_Data, Plot_Hist, Plot_ACF, Plot_PACF , nrow = 2, ncol=2)
}
# Descriptive Statistics Function ------------------------------------
Descriptive_Statistics = function(timeseries, lags, alpha = 0.05, tex = F){
library(tseries); library(stats)
#Augmented Dickey Fueller Test:
ADF = adf.test(timeseries)
Statistics = c(round(ADF$statistic[[1]],3))
Pvalue = c(round(ADF$p.value[[1]],3))
Names = c("Augmented Dickey Fueller")
if (ADF$p.value[[1]]>alpha){
Reject = c("No")
}else{
Reject = c("Yes")
}
#Jarque-Bera Test
JB = jarque.bera.test(timeseries)
Statistics = c(Statistics, round(JB$statistic[[1]],3))
Pvalue = c(Pvalue, round(JB$p.value[[1]],3))
Names = c(Names, "Jarque-Bera Test")
if (JB$p.value[[1]]>alpha){
Reject = c(Reject, "No")
}else{
Reject = c(Reject, "Yes")
}
#Ljung-Box test
LB_result = list()
for (i in lags){
LB_result[[paste("Lag",i,sep = " ")]] = Box.test(timeseries, lag = i, type = "Ljung-Box")
Names = c(Names, paste("Ljung-Box lag:",i,sep = " "))
}
for (j in LB_result){
Statistics = c(Statistics, round(j$statistic[[1]],3))
Pvalue = c(Pvalue, round(j$p.value[[1]],3))
if (j$p.value[[1]]>alpha){
Reject = c(Reject, "No")
}else{
Reject = c(Reject, "Yes")
}
}
Data_Table = data.frame('Test-Type' = Names, 'Statistic' = Statistics, 'p-value' = Pvalue, 'Reject Null' = Reject)
if (tex == T){
print(xtable(Data_Table))
}
return(Data_Table)
}
|
/Descriptive Function.R
|
no_license
|
laranea/Bachelor
|
R
| false
| false
| 3,908
|
r
|
# ACF plot in ggplot2 ---------------------------------------------------------
#Note that this code is heavily based on the following code:
#https://github.com/dewittpe/qwraps2/blob/master/R/qacf.R#L79
ggacf = function(x, conf_level = 0.95, type = "acf"){
library(dplyr); library(stats); library(ggplot2);library(tidyr)
if (type == "pacf"){
acf_data = stats::pacf(x, plot = F)
}else{
acf_data = stats::acf(x, plot = F)
}
signif = stats::qnorm((1 - conf_level)/2)/sqrt(acf_data$n.used)
lags = dplyr::as_data_frame(acf_data$lag)
acfs = dplyr::as_data_frame(acf_data$acf)
acf_df = dplyr::bind_cols(tidyr::gather(lags, key = 'key', value = 'lag'),
tidyr::gather(acfs, key = 'key', value = 'value')["value"])
acf_df = dplyr::mutate(acf_df, Significant = factor(abs(.data$value) > abs(signif)))
g = ggplot2::ggplot() +
ggplot2::aes_string(x = "lag", y = "value") +
ggplot2::geom_bar(stat = "identity", position = "identity") +
ggplot2::ylab("Correlation") +
ggplot2::xlab("Lag")+
ggplot2::geom_hline(yintercept = signif) +
ggplot2::geom_hline(yintercept = -signif) +
ggplot2::aes_string(fill = "Significant") +
ggplot2::coord_cartesian(ylim = c(-1,1))
g = ggplot2::`%+%`(g, acf_df)
g
}
# Descriptive Plots Function -----------------------------------------
Descriptive_Plots = function(timeseries, dates, draw = TRUE){
library(stats);library(tseries);library(ggplot2);library(grid);library(gridExtra)
if (missing(dates)){
dates = 1:length(timeseries)
}
#Theme = theme(plot.title = element_text(family = "Trebuchet MS", color="#666666", face="bold", hjust=0)) +
# theme(axis.title = element_text(family = "Trebuchet MS", color="#666666", face="bold"))
Data = data.frame(dates,timeseries)
Plot_Data = ggplot(Data, aes(dates, timeseries)) + geom_line(colour="#211a52") + xlab("") + ylab("Price") + ggtitle("Price vs. Time")
Plot_PACF = ggacf(timeseries, type ="pacf") + ggtitle("Partial Autocorrelation Plot")
Plot_ACF = ggacf(timeseries) + ggtitle("Autocorrelation Plot")
Plot_Hist = ggplot(Data, aes(timeseries)) + geom_histogram(fill ="white" ,col = "#211a52",bins = 50) +
xlab ("Price") + ylab("Observations") + ggtitle("Histogram of Prices")
grid.arrange(Plot_Data, Plot_Hist, Plot_ACF, Plot_PACF , nrow = 2, ncol=2)
}
# Descriptive Statistics Function ------------------------------------
Descriptive_Statistics = function(timeseries, lags, alpha = 0.05, tex = F){
library(tseries); library(stats)
#Augmented Dickey Fueller Test:
ADF = adf.test(timeseries)
Statistics = c(round(ADF$statistic[[1]],3))
Pvalue = c(round(ADF$p.value[[1]],3))
Names = c("Augmented Dickey Fueller")
if (ADF$p.value[[1]]>alpha){
Reject = c("No")
}else{
Reject = c("Yes")
}
#Jarque-Bera Test
JB = jarque.bera.test(timeseries)
Statistics = c(Statistics, round(JB$statistic[[1]],3))
Pvalue = c(Pvalue, round(JB$p.value[[1]],3))
Names = c(Names, "Jarque-Bera Test")
if (JB$p.value[[1]]>alpha){
Reject = c(Reject, "No")
}else{
Reject = c(Reject, "Yes")
}
#Ljung-Box test
LB_result = list()
for (i in lags){
LB_result[[paste("Lag",i,sep = " ")]] = Box.test(timeseries, lag = i, type = "Ljung-Box")
Names = c(Names, paste("Ljung-Box lag:",i,sep = " "))
}
for (j in LB_result){
Statistics = c(Statistics, round(j$statistic[[1]],3))
Pvalue = c(Pvalue, round(j$p.value[[1]],3))
if (j$p.value[[1]]>alpha){
Reject = c(Reject, "No")
}else{
Reject = c(Reject, "Yes")
}
}
Data_Table = data.frame('Test-Type' = Names, 'Statistic' = Statistics, 'p-value' = Pvalue, 'Reject Null' = Reject)
if (tex == T){
print(xtable(Data_Table))
}
return(Data_Table)
}
|
#' Convert a tile coordinate to a lon/lat coordinate
#'
#' Convert a tile coordinate to a lon/lat coordinate for a given zoom. Decimal
#' tile coordinates are accepted.
#'
#' @param X horizontal map-tile coordinate (0 is map-left)
#' @param Y vertical map-tile coordinate (0 is map-top)
#' @param zoom zoom
#' @param x within tile x (0 is tile-left)
#' @param y within tile y (0 it tile-top)
#' @param xpix width of tile in pixels
#' @param ypix length of tile in pixels
#' @return a data frame with columns lon and lat (in degrees)
#' @author David Kahle \email{david@@kahle.io}, based on
#' [RgoogleMaps::XY2LatLon()] by Markus Loecher of Sense Networks
#' \email{markus@@sensenetworks.com}
#' @seealso \url{http://wiki.openstreetmap.org/wiki/Slippy_map_tilenames}
#' @export
#' @examples
#'
#'
#' \dontrun{
#' XY2LonLat(480, 845, zoom = 11)
#' XY2LonLat(0, 0, zoom = 1)
#' XY2LonLat(0, 0, 255, 255, zoom = 1)
#' XY2LonLat(0, 0, 255, 255, zoom = 1)
#'
#' }
#'
XY2LonLat <- function(X, Y, zoom, x = 0, y = 0, xpix=255, ypix=255){
n <- 2^zoom
lon_deg <- (X+x/xpix) / n * 360.0 - 180.0
tmp <- tanh( pi * (1 - 2 * (Y+y/ypix) / n))
ShiftLat <- function(tmp) {
lat <- 2 * pi * (-1:1) + asin(tmp)
lat[which(-pi/2 < lat & lat <= pi/2)] * 180/pi
}
lat_deg <- ShiftLat(tmp)
data.frame(lon = lon_deg, lat = lat_deg)
}
|
/R/XY2LonLat.R
|
permissive
|
erhard1/ggmap
|
R
| false
| false
| 1,336
|
r
|
#' Convert a tile coordinate to a lon/lat coordinate
#'
#' Convert a tile coordinate to a lon/lat coordinate for a given zoom. Decimal
#' tile coordinates are accepted.
#'
#' @param X horizontal map-tile coordinate (0 is map-left)
#' @param Y vertical map-tile coordinate (0 is map-top)
#' @param zoom zoom
#' @param x within tile x (0 is tile-left)
#' @param y within tile y (0 it tile-top)
#' @param xpix width of tile in pixels
#' @param ypix length of tile in pixels
#' @return a data frame with columns lon and lat (in degrees)
#' @author David Kahle \email{david@@kahle.io}, based on
#' [RgoogleMaps::XY2LatLon()] by Markus Loecher of Sense Networks
#' \email{markus@@sensenetworks.com}
#' @seealso \url{http://wiki.openstreetmap.org/wiki/Slippy_map_tilenames}
#' @export
#' @examples
#'
#'
#' \dontrun{
#' XY2LonLat(480, 845, zoom = 11)
#' XY2LonLat(0, 0, zoom = 1)
#' XY2LonLat(0, 0, 255, 255, zoom = 1)
#' XY2LonLat(0, 0, 255, 255, zoom = 1)
#'
#' }
#'
XY2LonLat <- function(X, Y, zoom, x = 0, y = 0, xpix=255, ypix=255){
n <- 2^zoom
lon_deg <- (X+x/xpix) / n * 360.0 - 180.0
tmp <- tanh( pi * (1 - 2 * (Y+y/ypix) / n))
ShiftLat <- function(tmp) {
lat <- 2 * pi * (-1:1) + asin(tmp)
lat[which(-pi/2 < lat & lat <= pi/2)] * 180/pi
}
lat_deg <- ShiftLat(tmp)
data.frame(lon = lon_deg, lat = lat_deg)
}
|
######################################
#Calculate MSY--------------------------------------------------
#This code runs CatchMSY on fisheries
######################################
RunCatchMSY <-
function(Data,
ErrorSize = 0.85,
sigR = 0,
Smooth = F,
Display = F,
BestValues = 1,
ManualFinalYear = 0,
n,
NumCPUs,
CatchMSYTrumps = T)
{
Data$RanCatchMSY <- FALSE
Data$HasRamMSY <- is.na(Data$MSY) == F & Data$Dbase == 'RAM'
Data$HasRamFvFmsy <- is.na(Data$FvFmsy) == F & Data$Dbase == 'RAM'
Data$HasRamBvBmsy <- is.na(Data$BvBmsy) == F & Data$Dbase == 'RAM'
Data$BtoKRatio <- 1 / ((Data$phi + 1) ^ (1 / Data$phi))
MsyData <- Data
MsyData$g <- NA
MsyData$k <- NA
MsyData$MSYLogSd <- NA
MsyData$gLogSd <- NA
MsyData$KLogSd <- NA
MsyData$CatchMSYBvBmsy <- NA
MsyData$CatchMSYBvBmsy_LogSd <- NA
CommonError <- mean(MsyData$BvBmsySD, na.rm = T)
if (is.na(CommonError))
{
CommonError <- ErrorSize
}
# find mean range between final bio priors to pass to SnowCatchMSY_Matrix for stocks with finalbio>1
MeanRange <-
MsyData[is.na(MsyData$BvBmsySD) == F &
MsyData$Year == 2012, c('IdOrig', 'BvBmsy', 'BvBmsySD', 'BtoKRatio')]
MeanRange$BoverK <- pmin(1, MeanRange$BvBmsy * MeanRange$BtoKRatio)
MeanRange <- MeanRange[MeanRange$BoverK < 0.95, ]
MeanRange$Bioerror <- MeanRange$BvBmsySD * MeanRange$BtoKRatio
MeanRange$Bioerror[is.na(MeanRange$Bioerror)] <- CommonError
MeanRange$FbLow <-
pmax(0, qnorm(0.25, MeanRange$BoverK, MeanRange$Bioerror))
MeanRange$FbHigh <-
pmin(1, qnorm(0.75, MeanRange$BoverK, MeanRange$Bioerror))
MeanRange$BioRange <- MeanRange$FbHigh - MeanRange$FbLow
CommonRange <-
mean(MeanRange$BioRange, na.rm = T) # Common range to apply to all stocks with B/K >=0.95
stock_id <-
unique((Data[, IdVar][Data$HasRamMSY == F &
Data$BvBmsy != 999 & is.infinite(Data$BvBmsy) == F]))
if (NumCPUs > 1)
{
if (Sys.info()[1] != 'Windows')
{
CMSYResults <-
(
mclapply(
1:length(stock_id),
MatrixSnowCatchMSY,
mc.cores = NumCPUs,
Data = Data,
CommonError = CommonError,
CommonRange = CommonRange,
sigR = sigR,
Smooth = Smooth,
Display = Display,
BestValues = BestValues,
ManualFinalYear = ManualFinalYear,
n = n,
NumCPUs = NumCPUs,
CatchMSYTrumps = CatchMSYTrumps,
stock_id = stock_id,
IdVar = IdVar,
mc.cleanup = T
)
)
}
if (Sys.info()[1] == 'Windows')
{
sfInit(parallel = TRUE, cpus = NumCPUs)
sfExportAll()
sfLibrary(dplyr)
CMSYResults <-
(
sfClusterApplyLB(
1:length(stock_id),
MatrixSnowCatchMSY,
Data = Data,
CommonError = CommonError,
CommonRange = CommonRange,
sigR = sigR,
Smooth = Smooth,
Display = Display,
BestValues = BestValues,
ManualFinalYear = ManualFinalYear,
n = n,
NumCPUs = NumCPUs,
CatchMSYTrumps = CatchMSYTrumps,
stock_id = stock_id,
IdVar = IdVar
)
)
sfStop()
}
}
if (NumCPUs == 1)
{
pdf(file = paste(FigureFolder, 'Catch-MSY Diagnostics.pdf', sep = ''))
CMSYResults <-
(
mclapply(
1:length(stock_id),
MatrixSnowCatchMSY,
mc.cores = NumCPUs,
Data = Data,
CommonError = CommonError,
CommonRange = CommonRange,
sigR = sigR,
Smooth = Smooth,
Display = Display,
BestValues = BestValues,
ManualFinalYear = ManualFinalYear,
n = n,
NumCPUs = NumCPUs,
CatchMSYTrumps = CatchMSYTrumps,
stock_id = stock_id,
IdVar = IdVar,
mc.cleanup = T
)
)
# CMSYResults <- (mclapply(1:length(stock_id), SnowCatchMSY,mc.cores=NumCPUs,Data=Data,CommonError=CommonError,sigR=sigR,Smooth=Smooth,Display=Display,BestValues=BestValues,ManualFinalYear=ManualFinalYear,n=n,NumCPUs=NumCPUs,
# CatchMSYTrumps=CatchMSYTrumps,stock_id=stock_id,IdVar=IdVar))
dev.off()
# pdf(file=paste(FigureFolder,'Catch-MSY Diagnostics Normal.pdf',sep=''))
#
#
# CMSYResults <- (mclapply(1, SnowCatchMSY,mc.cores=NumCPUs,Data=Data,CommonError=CommonError,sigR=sigR,Smooth=Smooth,Display=Display,BestValues=BestValues,ManualFinalYear=ManualFinalYear,n=n,NumCPUs=NumCPUs,
# CatchMSYTrumps=CatchMSYTrumps,stock_id=stock_id,IdVar=IdVar))
#
# dev.off()
}
CmsyStore <-
as.data.frame(matrix(
NA,
nrow = 0,
ncol = dim(CMSYResults[[1]]$CatchMSY)[2]
))
PossibleParams <-
lapply(seq(along = CMSYResults), function(i)
CMSYResults[[i]]$PossibleParams)
EmptyParams <-
lapply(seq(along = PossibleParams), function(i)
sum(is.na(PossibleParams[[i]])) == 0)
HasData <- unlist(EmptyParams)
PossibleParams <- PossibleParams[which(HasData == T)]
CmsyStore <-
lapply(seq(along = CMSYResults), function(i)
CMSYResults[[i]]$CatchMSY)
PossibleParams <- bind_rows(PossibleParams)
if (dim(PossibleParams)[1] > 0 &
sum(PossibleParams$Fail == 0, na.rm = T) >= 1)
{
PossibleParams <-
PossibleParams[, c('IdOrig',
'g',
'phi',
'K',
'MSY',
'FinalFvFmsy',
'FinalBvBmsy')]
}
CmsyStore <- bind_rows(CmsyStore)
ConCatDat <- paste(MsyData$IdOrig, MsyData$Year, sep = '-')
ConCatCmsy <- paste(CmsyStore$IdOrig, CmsyStore$Year, sep = '-')
Where <- ConCatDat %in% ConCatCmsy
MsyData[Where, ] <- CmsyStore
return(list(MsyData = MsyData, PossibleParams = PossibleParams))
} #Close function
|
/Functions/RunCatchMSY.R
|
no_license
|
kmillage/Global-Fishery-Potential
|
R
| false
| false
| 6,722
|
r
|
######################################
#Calculate MSY--------------------------------------------------
#This code runs CatchMSY on fisheries
######################################
RunCatchMSY <-
function(Data,
ErrorSize = 0.85,
sigR = 0,
Smooth = F,
Display = F,
BestValues = 1,
ManualFinalYear = 0,
n,
NumCPUs,
CatchMSYTrumps = T)
{
Data$RanCatchMSY <- FALSE
Data$HasRamMSY <- is.na(Data$MSY) == F & Data$Dbase == 'RAM'
Data$HasRamFvFmsy <- is.na(Data$FvFmsy) == F & Data$Dbase == 'RAM'
Data$HasRamBvBmsy <- is.na(Data$BvBmsy) == F & Data$Dbase == 'RAM'
Data$BtoKRatio <- 1 / ((Data$phi + 1) ^ (1 / Data$phi))
MsyData <- Data
MsyData$g <- NA
MsyData$k <- NA
MsyData$MSYLogSd <- NA
MsyData$gLogSd <- NA
MsyData$KLogSd <- NA
MsyData$CatchMSYBvBmsy <- NA
MsyData$CatchMSYBvBmsy_LogSd <- NA
CommonError <- mean(MsyData$BvBmsySD, na.rm = T)
if (is.na(CommonError))
{
CommonError <- ErrorSize
}
# find mean range between final bio priors to pass to SnowCatchMSY_Matrix for stocks with finalbio>1
MeanRange <-
MsyData[is.na(MsyData$BvBmsySD) == F &
MsyData$Year == 2012, c('IdOrig', 'BvBmsy', 'BvBmsySD', 'BtoKRatio')]
MeanRange$BoverK <- pmin(1, MeanRange$BvBmsy * MeanRange$BtoKRatio)
MeanRange <- MeanRange[MeanRange$BoverK < 0.95, ]
MeanRange$Bioerror <- MeanRange$BvBmsySD * MeanRange$BtoKRatio
MeanRange$Bioerror[is.na(MeanRange$Bioerror)] <- CommonError
MeanRange$FbLow <-
pmax(0, qnorm(0.25, MeanRange$BoverK, MeanRange$Bioerror))
MeanRange$FbHigh <-
pmin(1, qnorm(0.75, MeanRange$BoverK, MeanRange$Bioerror))
MeanRange$BioRange <- MeanRange$FbHigh - MeanRange$FbLow
CommonRange <-
mean(MeanRange$BioRange, na.rm = T) # Common range to apply to all stocks with B/K >=0.95
stock_id <-
unique((Data[, IdVar][Data$HasRamMSY == F &
Data$BvBmsy != 999 & is.infinite(Data$BvBmsy) == F]))
if (NumCPUs > 1)
{
if (Sys.info()[1] != 'Windows')
{
CMSYResults <-
(
mclapply(
1:length(stock_id),
MatrixSnowCatchMSY,
mc.cores = NumCPUs,
Data = Data,
CommonError = CommonError,
CommonRange = CommonRange,
sigR = sigR,
Smooth = Smooth,
Display = Display,
BestValues = BestValues,
ManualFinalYear = ManualFinalYear,
n = n,
NumCPUs = NumCPUs,
CatchMSYTrumps = CatchMSYTrumps,
stock_id = stock_id,
IdVar = IdVar,
mc.cleanup = T
)
)
}
if (Sys.info()[1] == 'Windows')
{
sfInit(parallel = TRUE, cpus = NumCPUs)
sfExportAll()
sfLibrary(dplyr)
CMSYResults <-
(
sfClusterApplyLB(
1:length(stock_id),
MatrixSnowCatchMSY,
Data = Data,
CommonError = CommonError,
CommonRange = CommonRange,
sigR = sigR,
Smooth = Smooth,
Display = Display,
BestValues = BestValues,
ManualFinalYear = ManualFinalYear,
n = n,
NumCPUs = NumCPUs,
CatchMSYTrumps = CatchMSYTrumps,
stock_id = stock_id,
IdVar = IdVar
)
)
sfStop()
}
}
if (NumCPUs == 1)
{
pdf(file = paste(FigureFolder, 'Catch-MSY Diagnostics.pdf', sep = ''))
CMSYResults <-
(
mclapply(
1:length(stock_id),
MatrixSnowCatchMSY,
mc.cores = NumCPUs,
Data = Data,
CommonError = CommonError,
CommonRange = CommonRange,
sigR = sigR,
Smooth = Smooth,
Display = Display,
BestValues = BestValues,
ManualFinalYear = ManualFinalYear,
n = n,
NumCPUs = NumCPUs,
CatchMSYTrumps = CatchMSYTrumps,
stock_id = stock_id,
IdVar = IdVar,
mc.cleanup = T
)
)
# CMSYResults <- (mclapply(1:length(stock_id), SnowCatchMSY,mc.cores=NumCPUs,Data=Data,CommonError=CommonError,sigR=sigR,Smooth=Smooth,Display=Display,BestValues=BestValues,ManualFinalYear=ManualFinalYear,n=n,NumCPUs=NumCPUs,
# CatchMSYTrumps=CatchMSYTrumps,stock_id=stock_id,IdVar=IdVar))
dev.off()
# pdf(file=paste(FigureFolder,'Catch-MSY Diagnostics Normal.pdf',sep=''))
#
#
# CMSYResults <- (mclapply(1, SnowCatchMSY,mc.cores=NumCPUs,Data=Data,CommonError=CommonError,sigR=sigR,Smooth=Smooth,Display=Display,BestValues=BestValues,ManualFinalYear=ManualFinalYear,n=n,NumCPUs=NumCPUs,
# CatchMSYTrumps=CatchMSYTrumps,stock_id=stock_id,IdVar=IdVar))
#
# dev.off()
}
CmsyStore <-
as.data.frame(matrix(
NA,
nrow = 0,
ncol = dim(CMSYResults[[1]]$CatchMSY)[2]
))
PossibleParams <-
lapply(seq(along = CMSYResults), function(i)
CMSYResults[[i]]$PossibleParams)
EmptyParams <-
lapply(seq(along = PossibleParams), function(i)
sum(is.na(PossibleParams[[i]])) == 0)
HasData <- unlist(EmptyParams)
PossibleParams <- PossibleParams[which(HasData == T)]
CmsyStore <-
lapply(seq(along = CMSYResults), function(i)
CMSYResults[[i]]$CatchMSY)
PossibleParams <- bind_rows(PossibleParams)
if (dim(PossibleParams)[1] > 0 &
sum(PossibleParams$Fail == 0, na.rm = T) >= 1)
{
PossibleParams <-
PossibleParams[, c('IdOrig',
'g',
'phi',
'K',
'MSY',
'FinalFvFmsy',
'FinalBvBmsy')]
}
CmsyStore <- bind_rows(CmsyStore)
ConCatDat <- paste(MsyData$IdOrig, MsyData$Year, sep = '-')
ConCatCmsy <- paste(CmsyStore$IdOrig, CmsyStore$Year, sep = '-')
Where <- ConCatDat %in% ConCatCmsy
MsyData[Where, ] <- CmsyStore
return(list(MsyData = MsyData, PossibleParams = PossibleParams))
} #Close function
|
censIndCR = function(target, dataset, xIndex, csIndex, wei = NULL, dataInfo=NULL, univariateModels=NULL, hash = FALSE, stat_hash=NULL, pvalue_hash=NULL,robust=FALSE){
# Conditional independence test based on the Log Likelihood ratio test
if ( !survival::is.Surv(target) ) stop('The survival test can not be performed without a Surv object target');
csIndex[ which( is.na(csIndex) ) ] = 0;
if ( hash ) {
csIndex2 = csIndex[which(csIndex!=0)]
csindex2 = sort(csIndex2)
xcs = c(xIndex,csIndex2)
key = paste(as.character(xcs) , collapse=" ");
if ( is.null(stat_hash[[key]]) == FALSE ) {
stat = stat_hash[[key]];
pvalue = pvalue_hash[[key]];
flag = 1;
results <- list(pvalue = pvalue, stat = stat, flag = flag, stat_hash=stat_hash, pvalue_hash=pvalue_hash);
return(results);
}
}
#initialization: these values will be returned whether the test cannot be carried out
pvalue = log(1);
stat = 0;
flag = 0;
results <- list(pvalue = pvalue, stat = stat, flag = flag, stat_hash=stat_hash, pvalue_hash=pvalue_hash);
cox_results = NULL;
cox_results_full = NULL;
#timeIndex = dim(dataset)[2];
event = target[,2]
numCases = dim(dataset)[1];
if ( length(event) == 0 ) event = vector('numeric',numCases) + 1;
if (is.na(csIndex) || length(csIndex) == 0 || csIndex == 0) {
options(warn = -1)
cox_results <- survival::coxph(target ~ dataset[, xIndex], weights = wei )
res <- anova(cox_results)
dof <- res[2, 3]
stat <- res[2, 2]
pvalue <- pchisq(stat, dof, lower.tail = FALSE, log.p = TRUE);
} else {
options(warn = -1)
cox_results <- survival::coxph(target ~ ., data = as.data.frame( dataset[ , csIndex] ), weights = wei)
cox_results_full <- survival::coxph(target ~ ., data = as.data.frame( dataset[ , c(csIndex, xIndex)] ), weights = wei)
res = anova(cox_results_full, cox_results)
stat = res[2, 2]
dF = res[2, 3]
pvalue = pchisq(stat, dF, lower.tail = FALSE, log.p = TRUE)
}
flag = 1;
if ( is.na(pvalue) || is.na(stat) ) {
pvalue = log(1);
stat = 0;
flag = 0;
} else {
#update hash objects
if( hash ) {
stat_hash[[key]] <- stat; #.set(stat_hash , key , stat)
pvalue_hash[[key]] <- pvalue; #.set(pvalue_hash , key , pvalue)
}
}
#testerrorcaseintrycatch(4);
results <- list(pvalue = pvalue, stat = stat, flag = flag , stat_hash=stat_hash, pvalue_hash=pvalue_hash);
return(results);
}
|
/R/censIndCR.R
|
no_license
|
JokerWhy233/MXM
|
R
| false
| false
| 2,661
|
r
|
censIndCR = function(target, dataset, xIndex, csIndex, wei = NULL, dataInfo=NULL, univariateModels=NULL, hash = FALSE, stat_hash=NULL, pvalue_hash=NULL,robust=FALSE){
# Conditional independence test based on the Log Likelihood ratio test
if ( !survival::is.Surv(target) ) stop('The survival test can not be performed without a Surv object target');
csIndex[ which( is.na(csIndex) ) ] = 0;
if ( hash ) {
csIndex2 = csIndex[which(csIndex!=0)]
csindex2 = sort(csIndex2)
xcs = c(xIndex,csIndex2)
key = paste(as.character(xcs) , collapse=" ");
if ( is.null(stat_hash[[key]]) == FALSE ) {
stat = stat_hash[[key]];
pvalue = pvalue_hash[[key]];
flag = 1;
results <- list(pvalue = pvalue, stat = stat, flag = flag, stat_hash=stat_hash, pvalue_hash=pvalue_hash);
return(results);
}
}
#initialization: these values will be returned whether the test cannot be carried out
pvalue = log(1);
stat = 0;
flag = 0;
results <- list(pvalue = pvalue, stat = stat, flag = flag, stat_hash=stat_hash, pvalue_hash=pvalue_hash);
cox_results = NULL;
cox_results_full = NULL;
#timeIndex = dim(dataset)[2];
event = target[,2]
numCases = dim(dataset)[1];
if ( length(event) == 0 ) event = vector('numeric',numCases) + 1;
if (is.na(csIndex) || length(csIndex) == 0 || csIndex == 0) {
options(warn = -1)
cox_results <- survival::coxph(target ~ dataset[, xIndex], weights = wei )
res <- anova(cox_results)
dof <- res[2, 3]
stat <- res[2, 2]
pvalue <- pchisq(stat, dof, lower.tail = FALSE, log.p = TRUE);
} else {
options(warn = -1)
cox_results <- survival::coxph(target ~ ., data = as.data.frame( dataset[ , csIndex] ), weights = wei)
cox_results_full <- survival::coxph(target ~ ., data = as.data.frame( dataset[ , c(csIndex, xIndex)] ), weights = wei)
res = anova(cox_results_full, cox_results)
stat = res[2, 2]
dF = res[2, 3]
pvalue = pchisq(stat, dF, lower.tail = FALSE, log.p = TRUE)
}
flag = 1;
if ( is.na(pvalue) || is.na(stat) ) {
pvalue = log(1);
stat = 0;
flag = 0;
} else {
#update hash objects
if( hash ) {
stat_hash[[key]] <- stat; #.set(stat_hash , key , stat)
pvalue_hash[[key]] <- pvalue; #.set(pvalue_hash , key , pvalue)
}
}
#testerrorcaseintrycatch(4);
results <- list(pvalue = pvalue, stat = stat, flag = flag , stat_hash=stat_hash, pvalue_hash=pvalue_hash);
return(results);
}
|
library(PracTools)
### Name: pclass
### Title: Form nonresponse adjustment classes based on propensity scores
### Aliases: pclass
### Keywords: methods survey
### ** Examples
# classes based on unweighted logistic regression
require(PracTools)
data(nhis)
out <- pclass(formula = resp ~ age + as.factor(sex) + as.factor(hisp) + as.factor(race),
data = nhis, type = "unwtd", link="logit", numcl=5)
table(out$p.class, useNA="always")
summary(out$propensities)
# classes based on survey-weighted logistic regression
require(survey)
nhis.dsgn <- svydesign(ids = ~psu, strata = ~stratum, data = nhis, nest = TRUE, weights = ~svywt)
out <- pclass(formula = resp ~ age + as.factor(sex) + as.factor(hisp) + as.factor(race),
type = "wtd", design = nhis.dsgn, link="logit", numcl=5)
table(out$p.class, useNA="always")
summary(out$propensities)
|
/data/genthat_extracted_code/PracTools/examples/pclass.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 870
|
r
|
library(PracTools)
### Name: pclass
### Title: Form nonresponse adjustment classes based on propensity scores
### Aliases: pclass
### Keywords: methods survey
### ** Examples
# classes based on unweighted logistic regression
require(PracTools)
data(nhis)
out <- pclass(formula = resp ~ age + as.factor(sex) + as.factor(hisp) + as.factor(race),
data = nhis, type = "unwtd", link="logit", numcl=5)
table(out$p.class, useNA="always")
summary(out$propensities)
# classes based on survey-weighted logistic regression
require(survey)
nhis.dsgn <- svydesign(ids = ~psu, strata = ~stratum, data = nhis, nest = TRUE, weights = ~svywt)
out <- pclass(formula = resp ~ age + as.factor(sex) + as.factor(hisp) + as.factor(race),
type = "wtd", design = nhis.dsgn, link="logit", numcl=5)
table(out$p.class, useNA="always")
summary(out$propensities)
|
exampleDatasets <- c("mouseBrainSubset", "maits", "campBrainSubset")
if ("scRNAseq" %in% rownames(installed.packages())){
exampleDatasets <- c(exampleDatasets, "fluidigm_pollen_et_al",
"th2_mahata_et_al", "allen_tasic_et_al")
}
shinyPanelUpload <- fluidPage(
useShinyjs(),
tags$style(appCSS),
tags$div(
class = "jumbotron", style = "background-color:#ededed",
tags$div(
class = "container",
h1("Single Cell Toolkit"),
p("Filter, cluster, and analyze single cell RNA-Seq data"),
p(
"Need help?",
tags$a(href = "https://compbiomed.github.io/sctk_docs/",
"Read the docs.", target = "_blank")
)
)
),
tags$br(),
tags$div(
class = "container",
h1("Upload"),
h5(tags$a(href = "https://compbiomed.github.io/sctk_docs/articles/v03-tab01_Upload.html",
"(help)", target = "_blank")),
tags$hr(),
tags$div(id = "uploadAlert", alertText),
h3("Choose data source:"),
radioButtons("uploadChoice", label = NULL, c("Upload files" = "files",
"Upload SummarizedExperiment/SCExperiment RDS File" = "rds",
"Use example data" = "example")
),
tags$hr(),
conditionalPanel(condition = sprintf("input['%s'] == 'files'", "uploadChoice"),
h3("Upload data in tab separated text format:"),
fluidRow(
column(width = 4,
wellPanel(
h4("Example count file:"),
HTML('<table class="table"><thead><tr class="header"><th>Gene</th>
<th>Cell1</th><th>Cell2</th><th>…</th><th>CellN</th>
</tr></thead><tbody><tr class="odd"><td>Gene1</td><td>0</td>
<td>0</td><td>…</td><td>0</td></tr><tr class="even">
<td>Gene2</td><td>5</td><td>6</td><td>…</td><td>0</td>
</tr><tr class="odd"><td>Gene3</td><td>4</td><td>3</td>
<td>…</td><td>8</td></tr><tr class="even">
<td>…</td><td>…</td><td>…</td>
<td>…</td><td>…</td></tr><tr class="odd">
<td>GeneM</td><td>10</td><td>10</td><td>…</td><td>10</td>
</tr></tbody></table>'),
tags$a(href = "https://drive.google.com/open?id=1n0CtM6phfkWX0O6xRtgPPg6QuPFP6pY8",
"Download an example count file here.", target = "_blank"),
tags$br(),
tags$br(),
fileInput(
"countsfile",
HTML(
paste("Input assay (eg. counts, required):",
tags$span(style = "color:red", "*", sep = ""))
),
accept = c(
"text/csv", "text/comma-separated-values",
"text/tab-separated-values", "text/plain", ".csv", ".tsv"
)
)
),
h4("Input Assay Type:"),
selectInput("inputAssayType", label = NULL,
c("counts", "normcounts", "logcounts", "cpm",
"logcpm", "tpm", "logtpm")
),
checkboxInput("createLogcounts",
"Also create log2 input assay on upload", value = TRUE)
),
column(width = 4,
wellPanel(
h4("Example sample annotation file:"),
HTML('<table class="table"><thead><tr class="header"><th>Cell</th>
<th>Annot1</th><th>…</th></tr></thead><tbody><tr class="odd">
<td>Cell1</td><td>a</td><td>…</td></tr><tr class="even">
<td>Cell2</td><td>a</td><td>…</td></tr><tr class="odd">
<td>Cell3</td><td>b</td><td>…</td></tr><tr class="even">
<td>…</td><td>…</td><td>…</td></tr><tr class="odd"><td>CellN</td>
<td>b</td><td>…</td></tr></tbody></table>'),
tags$a(href = "https://drive.google.com/open?id=10IDmZQUiASN4wnzO4-WRJQopKvxCNu6J",
"Download an example annotation file here.", target = "_blank"),
tags$br(),
tags$br(),
fileInput(
"annotFile", "Sample annotations (optional):",
accept = c(
"text/csv", "text/comma-separated-values",
"text/tab-separated-values", "text/plain", ".csv", ".tsv"
)
)
)
),
column(width = 4,
wellPanel(
h4("Example feature file:"),
HTML('<table class="table"><thead><tr class="header"><th>Gene</th>
<th>Annot2</th><th>…</th></tr></thead><tbody><tr class="odd">
<td>Gene1</td><td>a</td><td>…</td></tr><tr class="even">
<td>Gene2</td><td>a</td><td>…</td></tr><tr class="odd">
<td>Gene3</td><td>b</td><td>…</td></tr><tr class="even">
<td>…</td><td>…</td><td>…</td></tr><tr class="odd"><td>GeneM</td>
<td>b</td><td>…</td></tr></tbody></table>'),
tags$a(href = "https://drive.google.com/open?id=1gxXaZPq5Wrn2lNHacEVaCN2a_FHNvs4O",
"Download an example feature file here.", target = "_blank"),
tags$br(),
tags$br(),
fileInput(
"featureFile", "Feature annotations (optional):",
accept = c(
"text/csv", "text/comma-separated-values",
"text/tab-separated-values", "text/plain", ".csv", ".tsv"
)
)
)
)
)
),
conditionalPanel(
condition = sprintf("input['%s'] == 'example'", "uploadChoice"),
h3("Choose Example Dataset:"),
selectInput("selectExampleData", label = NULL, exampleDatasets),
conditionalPanel(
condition = sprintf("input['%s'] == 'mouseBrainSubset'", "selectExampleData"),
h3(tags$a(href = "https://doi.org/10.1126/science.aaa1934", "Mouse Brain Subset: GSE60361", target = "_blank")),
"A subset of 30 samples from a single cell RNA-Seq experiment from Zeisel, et al. Science 2015. The data was produced from cells from the mouse somatosensory cortex (S1) and hippocampus (CA1). 15 of the cells were identified as oligodendrocytes and 15 of the cell were identified as microglia.",
tags$br(),
tags$br()
),
conditionalPanel(
condition = sprintf("input['%s'] == 'campBrainSubset'", "selectExampleData"),
h3(tags$a(href = "https://doi.org/10.1038/nn.4495", "500 cells from Campbell et. al, 2017, Mouse Brain Subset", target = "_blank")),
"A subset of 500 cells from a single cell RNA-Seq experiment from Campbell, et al. Nature Neuroscience 2017 using droplet-based sequencing technology. This study was perfomed to identify various hypothalamic arcuate–median eminence complex (Arc-ME) cell types. This contains information such as the diet of the mice, sex and proposed cell type for each cell. ",
tags$br(),
tags$br()
),
conditionalPanel(
condition = sprintf("input['%s'] == 'maits'", "selectExampleData"),
h3(tags$a(href = "https://doi.org/10.1186/s13059-015-0844-5", "MAITs data from MAST package", target = "_blank")),
"96 Single-cell transcriptome profiling from Mucosal Associated Invariant T cells (MAITs), measured on the Fluidigm C1.",
tags$br(),
tags$br()
),
conditionalPanel(
condition = sprintf("input['%s'] == 'fluidigm_pollen_et_al'", "selectExampleData"),
h3(tags$a(href = "http://dx.doi.org/10.1038/nbt.2967", "130 cells from (Pollen et al. 2014), 65 at high coverage and 65 at low coverage", target = "_blank")),
"Transcriptomes of cell populations in both of low-coverage (~0.27 million reads per cell) and high-coverage (~5 million reads per cell) to identify cell-type-specific biomarkers, and to compare gene expression across samples specifically for cells of a given type as well as to reconstruct developmental lineages of related cell types. (data loaded from scRNASeq package)",
tags$br(),
tags$br()
),
conditionalPanel(
condition = sprintf("input['%s'] == 'th2_mahata_et_al'", "selectExampleData"),
h3(tags$a(href = "http://dx.doi.org/10.1016/j.celrep.2014.04.011", "96 T helper cells from (Mahata et al. 2014)", target = "_blank")),
"96 T helper cells from 6-week-old mouse, day 4.5 in vitro Th2 differentiation. (data loaded from scRNASeq package)",
tags$br(),
tags$br()
),
conditionalPanel(
condition = sprintf("input['%s'] == 'allen_tasic_et_al'", "selectExampleData"),
h3(tags$a(href = "http://dx.doi.org/10.1038/nn.4216", "Mouse visual cortex cells from (Tasic et al. 2016)", target = "_blank")),
"Subset of 379 cells from the mouse visual cortex. (data loaded from scRNASeq package)",
tags$br(),
tags$br()
)
),
conditionalPanel(
condition = sprintf("input['%s'] == 'rds'", "uploadChoice"),
h3("Choose an RDS file that contains a SummarizedExperiment/SCExperiment Object:"),
fileInput(
"rdsFile", "SCExperiment RDS file:", accept = c(".rds", ".RDS")
)
),
withBusyIndicatorUI(
actionButton("uploadData", "Upload")
),
tags$div(
class = "container",
p("")
)
)
#includeHTML("www/footer.html")
)
|
/inst/shiny/ui_01_upload.R
|
permissive
|
aleshchyk/singleCellTK
|
R
| false
| false
| 9,553
|
r
|
exampleDatasets <- c("mouseBrainSubset", "maits", "campBrainSubset")
if ("scRNAseq" %in% rownames(installed.packages())){
exampleDatasets <- c(exampleDatasets, "fluidigm_pollen_et_al",
"th2_mahata_et_al", "allen_tasic_et_al")
}
shinyPanelUpload <- fluidPage(
useShinyjs(),
tags$style(appCSS),
tags$div(
class = "jumbotron", style = "background-color:#ededed",
tags$div(
class = "container",
h1("Single Cell Toolkit"),
p("Filter, cluster, and analyze single cell RNA-Seq data"),
p(
"Need help?",
tags$a(href = "https://compbiomed.github.io/sctk_docs/",
"Read the docs.", target = "_blank")
)
)
),
tags$br(),
tags$div(
class = "container",
h1("Upload"),
h5(tags$a(href = "https://compbiomed.github.io/sctk_docs/articles/v03-tab01_Upload.html",
"(help)", target = "_blank")),
tags$hr(),
tags$div(id = "uploadAlert", alertText),
h3("Choose data source:"),
radioButtons("uploadChoice", label = NULL, c("Upload files" = "files",
"Upload SummarizedExperiment/SCExperiment RDS File" = "rds",
"Use example data" = "example")
),
tags$hr(),
conditionalPanel(condition = sprintf("input['%s'] == 'files'", "uploadChoice"),
h3("Upload data in tab separated text format:"),
fluidRow(
column(width = 4,
wellPanel(
h4("Example count file:"),
HTML('<table class="table"><thead><tr class="header"><th>Gene</th>
<th>Cell1</th><th>Cell2</th><th>…</th><th>CellN</th>
</tr></thead><tbody><tr class="odd"><td>Gene1</td><td>0</td>
<td>0</td><td>…</td><td>0</td></tr><tr class="even">
<td>Gene2</td><td>5</td><td>6</td><td>…</td><td>0</td>
</tr><tr class="odd"><td>Gene3</td><td>4</td><td>3</td>
<td>…</td><td>8</td></tr><tr class="even">
<td>…</td><td>…</td><td>…</td>
<td>…</td><td>…</td></tr><tr class="odd">
<td>GeneM</td><td>10</td><td>10</td><td>…</td><td>10</td>
</tr></tbody></table>'),
tags$a(href = "https://drive.google.com/open?id=1n0CtM6phfkWX0O6xRtgPPg6QuPFP6pY8",
"Download an example count file here.", target = "_blank"),
tags$br(),
tags$br(),
fileInput(
"countsfile",
HTML(
paste("Input assay (eg. counts, required):",
tags$span(style = "color:red", "*", sep = ""))
),
accept = c(
"text/csv", "text/comma-separated-values",
"text/tab-separated-values", "text/plain", ".csv", ".tsv"
)
)
),
h4("Input Assay Type:"),
selectInput("inputAssayType", label = NULL,
c("counts", "normcounts", "logcounts", "cpm",
"logcpm", "tpm", "logtpm")
),
checkboxInput("createLogcounts",
"Also create log2 input assay on upload", value = TRUE)
),
column(width = 4,
wellPanel(
h4("Example sample annotation file:"),
HTML('<table class="table"><thead><tr class="header"><th>Cell</th>
<th>Annot1</th><th>…</th></tr></thead><tbody><tr class="odd">
<td>Cell1</td><td>a</td><td>…</td></tr><tr class="even">
<td>Cell2</td><td>a</td><td>…</td></tr><tr class="odd">
<td>Cell3</td><td>b</td><td>…</td></tr><tr class="even">
<td>…</td><td>…</td><td>…</td></tr><tr class="odd"><td>CellN</td>
<td>b</td><td>…</td></tr></tbody></table>'),
tags$a(href = "https://drive.google.com/open?id=10IDmZQUiASN4wnzO4-WRJQopKvxCNu6J",
"Download an example annotation file here.", target = "_blank"),
tags$br(),
tags$br(),
fileInput(
"annotFile", "Sample annotations (optional):",
accept = c(
"text/csv", "text/comma-separated-values",
"text/tab-separated-values", "text/plain", ".csv", ".tsv"
)
)
)
),
column(width = 4,
wellPanel(
h4("Example feature file:"),
HTML('<table class="table"><thead><tr class="header"><th>Gene</th>
<th>Annot2</th><th>…</th></tr></thead><tbody><tr class="odd">
<td>Gene1</td><td>a</td><td>…</td></tr><tr class="even">
<td>Gene2</td><td>a</td><td>…</td></tr><tr class="odd">
<td>Gene3</td><td>b</td><td>…</td></tr><tr class="even">
<td>…</td><td>…</td><td>…</td></tr><tr class="odd"><td>GeneM</td>
<td>b</td><td>…</td></tr></tbody></table>'),
tags$a(href = "https://drive.google.com/open?id=1gxXaZPq5Wrn2lNHacEVaCN2a_FHNvs4O",
"Download an example feature file here.", target = "_blank"),
tags$br(),
tags$br(),
fileInput(
"featureFile", "Feature annotations (optional):",
accept = c(
"text/csv", "text/comma-separated-values",
"text/tab-separated-values", "text/plain", ".csv", ".tsv"
)
)
)
)
)
),
conditionalPanel(
condition = sprintf("input['%s'] == 'example'", "uploadChoice"),
h3("Choose Example Dataset:"),
selectInput("selectExampleData", label = NULL, exampleDatasets),
conditionalPanel(
condition = sprintf("input['%s'] == 'mouseBrainSubset'", "selectExampleData"),
h3(tags$a(href = "https://doi.org/10.1126/science.aaa1934", "Mouse Brain Subset: GSE60361", target = "_blank")),
"A subset of 30 samples from a single cell RNA-Seq experiment from Zeisel, et al. Science 2015. The data was produced from cells from the mouse somatosensory cortex (S1) and hippocampus (CA1). 15 of the cells were identified as oligodendrocytes and 15 of the cell were identified as microglia.",
tags$br(),
tags$br()
),
conditionalPanel(
condition = sprintf("input['%s'] == 'campBrainSubset'", "selectExampleData"),
h3(tags$a(href = "https://doi.org/10.1038/nn.4495", "500 cells from Campbell et. al, 2017, Mouse Brain Subset", target = "_blank")),
"A subset of 500 cells from a single cell RNA-Seq experiment from Campbell, et al. Nature Neuroscience 2017 using droplet-based sequencing technology. This study was perfomed to identify various hypothalamic arcuate–median eminence complex (Arc-ME) cell types. This contains information such as the diet of the mice, sex and proposed cell type for each cell. ",
tags$br(),
tags$br()
),
conditionalPanel(
condition = sprintf("input['%s'] == 'maits'", "selectExampleData"),
h3(tags$a(href = "https://doi.org/10.1186/s13059-015-0844-5", "MAITs data from MAST package", target = "_blank")),
"96 Single-cell transcriptome profiling from Mucosal Associated Invariant T cells (MAITs), measured on the Fluidigm C1.",
tags$br(),
tags$br()
),
conditionalPanel(
condition = sprintf("input['%s'] == 'fluidigm_pollen_et_al'", "selectExampleData"),
h3(tags$a(href = "http://dx.doi.org/10.1038/nbt.2967", "130 cells from (Pollen et al. 2014), 65 at high coverage and 65 at low coverage", target = "_blank")),
"Transcriptomes of cell populations in both of low-coverage (~0.27 million reads per cell) and high-coverage (~5 million reads per cell) to identify cell-type-specific biomarkers, and to compare gene expression across samples specifically for cells of a given type as well as to reconstruct developmental lineages of related cell types. (data loaded from scRNASeq package)",
tags$br(),
tags$br()
),
conditionalPanel(
condition = sprintf("input['%s'] == 'th2_mahata_et_al'", "selectExampleData"),
h3(tags$a(href = "http://dx.doi.org/10.1016/j.celrep.2014.04.011", "96 T helper cells from (Mahata et al. 2014)", target = "_blank")),
"96 T helper cells from 6-week-old mouse, day 4.5 in vitro Th2 differentiation. (data loaded from scRNASeq package)",
tags$br(),
tags$br()
),
conditionalPanel(
condition = sprintf("input['%s'] == 'allen_tasic_et_al'", "selectExampleData"),
h3(tags$a(href = "http://dx.doi.org/10.1038/nn.4216", "Mouse visual cortex cells from (Tasic et al. 2016)", target = "_blank")),
"Subset of 379 cells from the mouse visual cortex. (data loaded from scRNASeq package)",
tags$br(),
tags$br()
)
),
conditionalPanel(
condition = sprintf("input['%s'] == 'rds'", "uploadChoice"),
h3("Choose an RDS file that contains a SummarizedExperiment/SCExperiment Object:"),
fileInput(
"rdsFile", "SCExperiment RDS file:", accept = c(".rds", ".RDS")
)
),
withBusyIndicatorUI(
actionButton("uploadData", "Upload")
),
tags$div(
class = "container",
p("")
)
)
#includeHTML("www/footer.html")
)
|
# UCM
# Prof. Lorenzo Escot
# Alumno: Caio Fernandes Moreno (caiofern@ucm.es)
# Brazil Stock Market Analysis
setwd("~/git/Bitbucket/ucm/SCORE/tareas/Lorenzo-Escot")
library(tseries) # adf.test, kpss.test, bds.test, get.hist.quote, portfolio.optim, surrogate, arma, garch
#install.packages("forecast")
library(forecast)
# En el paquete forecast tiene un modelo auto ARIMA.
#install.packages("fArma")
library(fArma) #ARMAFIT, RSFIT
#install.packages("fGarch")
library(fGarch) #GARCHFIT formula ~arma (2,1) + garch (1,1) # ~ AltGr + 4
#install.packages("outliers")
library(outliers) #: outlier, rm.outlier, scores, chisq.out.test # para detectar outliers o datos an?malos ojo serie estacionaria
#install.packages("zoo")
library(zoo)
#setinternet2() #esto abre el puerto de internet
#stock.name <- "^BVSP"
#stock.description <- "IBOVESPA"
generateAnalysis <- function(x, y) {
stock.name <- x
stock.description <- y
## lectura de los datos hist?ricos del ^BVSP
stock.name <- get.hist.quote(instrument=stock.name, quote="AdjClose")
# BVSP time series starts 1993-04-27
# http://finance.yahoo.com/q?s=%5EBVSP
series.name <- stock.name
str(series.name)
summary(series.name)
start(series.name)
end(series.name)
plot(series.name)
head(series.name, 10)
tail(series.name, 10)
# Mirando los datos he decidido con la ayuda del Prof. Lorenzo no quitar los datos de 1993 hasta 1998.
#?existen datos nulos?
length(series.name)
length(series.name[!is.na(series.name)])
length(complete.cases(series.name))
#parece que no, pero si tuviera na se podr?an quitar con: ibex<-ibex[complete.cases(ibex)]
series.name<-series.name[complete.cases(series.name)]
plot(series.name)
### podemos seleccionar una submuestra: Temporal
series.name.short <-window(series.name,start=as.Date("1993-04-27"),end=as.Date("2015-09-30"))
str(series.name.short)
summary(series.name.short)
plot(series.name.short)
## Calculo la serie de rendimientos
d.series.name <- diff(log(series.name.short))
# Concatenate stock.description with the text Withall
plot.description <- paste(stock.description, "WITH ALL DATA", collapse = ", ")
#Grafico de la serie
plot(d.series.name, main=(plot.description))
#Datos an?malos
# type = z Busca los datos tipificados mayor que 5 vezes la sd (disviacion tipica)
# Remove datos anomalos
remove.outlier.d.series.name <- d.series.name[abs(scores(d.series.name, type="z"))<=5]
#plot(remove.outlier.d.series.name, main="IBOVESPA WITHOUT OUTLIERS")
# Concatenate stock.description with the text Withall
plot.description <- paste(stock.description, " WITHOUT OUTLIERS", collapse = ", ")
#Grafico de la serie
plot(d.series.name, main=(plot.description))
#?es estacionario?
adf.test(d.series.name)# Ho: una ra?z unitaria (no estacionaria)
# Augmented Dickey-Fuller Test
# data: dBVSP
# Dickey-Fuller = -14.073, Lag order = 17, p-value = 0.01
# alternative hypothesis: stationary
# No es estacionaria
sd(d.series.name) #desviaci?n t?pica
# Statistical stationarity:
# http://people.duke.edu/~rnau/411diff.htm
#?presenta correlaci?n?
df.d.series.name <- as.data.frame(d.series.name)
#periodograma
par(mfrow=c(2,1))
acf(df.d.series.name, ylim=c(-1,1))
pacf(df.d.series.name, ylim=c(-1,1))
tsdisplay(df.d.series.name)
# test bds
bds.test(d.series.name,m=10) # H0: i.i.d
#test R/, exponente de Hurst
HURST<-rsFit(d.series.name, doplot = T)# Exponente de Hurst 0.5 ruido blanco
HURST
##Se puede hacer el test de Hurst=0.5 con el siguiente estad?stico t ##
t<-(HURST@hurst$diag[2,1]-0.5)/HURST@hurst$diag[2,2]
t
#Modelo Auto Arima
modelo.auto.arima <- auto.arima(d.series.name)
plot(forecast(modelo.auto.arima,h=20))
modelo.auto.arima1 <- auto.arima(d.series.name)
plot(forecast(modelo.auto.arima1, h=1))
# alternativa
d.series.name.ARMA<-armaFit(~ arma(1,3), data=d.series.name)
summary(d.series.name.ARMA, wich="all")
residuo<-residuals(d.series.name.ARMA)
plot(residuo)
lines(residuo)
df.residuo <- as.data.frame(residuo)
#periodograma
par(mfrow=c(2,1))
acf(df.residuo, ylim=c(-1,1))
pacf(df.residuo, ylim=c(-1,1))
#x11()
tsdisplay(df.residuo)
# test bds
bds.test(d.series.name,m=10) # H0: i.i.d
#test R/, exponente de Hurst
HURST<-rsFit(d.series.name, doplot = T)# Exponente de Hurst 0.5 ruido blanco
HURST
# Este codigo ha tenido muchas modificaciones hay que cojer el codigo del profesor Lorenzo.
##Se puede hacer el test de Hurst=0.5 con el siguiente estad?stico t ##
t<-(HURST@hurst$diag[2,1]-0.5)/HURST@hurst$diag[2,2]
t
####PREDICCIONES
predict(d.series.name.ARMA, n.ahead=10, conf=c(90,95), dplot=True)
#alternativo
d.series.name.ARMAGARCH<-garchFit(~ arma(1,1) + garch(2,1), data=d.series.name, include.mean=TRUE) ####aqu? el orden es GARCH,ARCH
summary(d.series.name.ARMAGARCH)
plot(d.series.name.ARMAGARCH@residuals)
residuogarch<-residuals(d.series.name.ARMAGARCH)
volatilitygarch<-volatility(d.series.name.ARMAGARCH)
plot(volatilitygarch)
lines(volatilitygarch)
plot(d.series.name^2)
predict(d.series.name.ARMAGARCH, n.ahead=10, conf=c(90,95), dplot=TRUE)
}
# IBOVESPA
stock.name <- "^BVSP"
stock.description <- "IBOVESPA"
# Call your function and pass x and y to the function
run <- generateAnalysis(stock.name,stock.description)
# Itau
stock.name <- "ITSA4.SA"
stock.description <- "Itausa - Investimentos Itau S.A"
# Call your function and pass x and y to the function
run <- generateAnalysis(stock.name,stock.description)
# BBAS3.SA
stock.name <- "BBAS3.SA"
stock.description <- "Banco do Brasil S.A."
# Call your function and pass x and y to the function
run <- generateAnalysis(stock.name,stock.description)
# KROT3.SA
stock.name <- "KROT3.SA"
stock.description <- "Kroton Educacional S.A."
# Call your function and pass x and y to the function
run <- generateAnalysis(stock.name,stock.description)
# VALE5.SA
stock.name <- "VALE5.SA"
stock.description <- "Vale S.A."
# Call your function and pass x and y to the function
run <- generateAnalysis(stock.name,stock.description)
|
/score/ucm-lorenzo-task01-caio-brazil-stock-market-with-stock-param.R
|
no_license
|
joaopauloramos/ucm
|
R
| false
| false
| 6,316
|
r
|
# UCM
# Prof. Lorenzo Escot
# Alumno: Caio Fernandes Moreno (caiofern@ucm.es)
# Brazil Stock Market Analysis
setwd("~/git/Bitbucket/ucm/SCORE/tareas/Lorenzo-Escot")
library(tseries) # adf.test, kpss.test, bds.test, get.hist.quote, portfolio.optim, surrogate, arma, garch
#install.packages("forecast")
library(forecast)
# En el paquete forecast tiene un modelo auto ARIMA.
#install.packages("fArma")
library(fArma) #ARMAFIT, RSFIT
#install.packages("fGarch")
library(fGarch) #GARCHFIT formula ~arma (2,1) + garch (1,1) # ~ AltGr + 4
#install.packages("outliers")
library(outliers) #: outlier, rm.outlier, scores, chisq.out.test # para detectar outliers o datos an?malos ojo serie estacionaria
#install.packages("zoo")
library(zoo)
#setinternet2() #esto abre el puerto de internet
#stock.name <- "^BVSP"
#stock.description <- "IBOVESPA"
generateAnalysis <- function(x, y) {
stock.name <- x
stock.description <- y
## lectura de los datos hist?ricos del ^BVSP
stock.name <- get.hist.quote(instrument=stock.name, quote="AdjClose")
# BVSP time series starts 1993-04-27
# http://finance.yahoo.com/q?s=%5EBVSP
series.name <- stock.name
str(series.name)
summary(series.name)
start(series.name)
end(series.name)
plot(series.name)
head(series.name, 10)
tail(series.name, 10)
# Mirando los datos he decidido con la ayuda del Prof. Lorenzo no quitar los datos de 1993 hasta 1998.
#?existen datos nulos?
length(series.name)
length(series.name[!is.na(series.name)])
length(complete.cases(series.name))
#parece que no, pero si tuviera na se podr?an quitar con: ibex<-ibex[complete.cases(ibex)]
series.name<-series.name[complete.cases(series.name)]
plot(series.name)
### podemos seleccionar una submuestra: Temporal
series.name.short <-window(series.name,start=as.Date("1993-04-27"),end=as.Date("2015-09-30"))
str(series.name.short)
summary(series.name.short)
plot(series.name.short)
## Calculo la serie de rendimientos
d.series.name <- diff(log(series.name.short))
# Concatenate stock.description with the text Withall
plot.description <- paste(stock.description, "WITH ALL DATA", collapse = ", ")
#Grafico de la serie
plot(d.series.name, main=(plot.description))
#Datos an?malos
# type = z Busca los datos tipificados mayor que 5 vezes la sd (disviacion tipica)
# Remove datos anomalos
remove.outlier.d.series.name <- d.series.name[abs(scores(d.series.name, type="z"))<=5]
#plot(remove.outlier.d.series.name, main="IBOVESPA WITHOUT OUTLIERS")
# Concatenate stock.description with the text Withall
plot.description <- paste(stock.description, " WITHOUT OUTLIERS", collapse = ", ")
#Grafico de la serie
plot(d.series.name, main=(plot.description))
#?es estacionario?
adf.test(d.series.name)# Ho: una ra?z unitaria (no estacionaria)
# Augmented Dickey-Fuller Test
# data: dBVSP
# Dickey-Fuller = -14.073, Lag order = 17, p-value = 0.01
# alternative hypothesis: stationary
# No es estacionaria
sd(d.series.name) #desviaci?n t?pica
# Statistical stationarity:
# http://people.duke.edu/~rnau/411diff.htm
#?presenta correlaci?n?
df.d.series.name <- as.data.frame(d.series.name)
#periodograma
par(mfrow=c(2,1))
acf(df.d.series.name, ylim=c(-1,1))
pacf(df.d.series.name, ylim=c(-1,1))
tsdisplay(df.d.series.name)
# test bds
bds.test(d.series.name,m=10) # H0: i.i.d
#test R/, exponente de Hurst
HURST<-rsFit(d.series.name, doplot = T)# Exponente de Hurst 0.5 ruido blanco
HURST
##Se puede hacer el test de Hurst=0.5 con el siguiente estad?stico t ##
t<-(HURST@hurst$diag[2,1]-0.5)/HURST@hurst$diag[2,2]
t
#Modelo Auto Arima
modelo.auto.arima <- auto.arima(d.series.name)
plot(forecast(modelo.auto.arima,h=20))
modelo.auto.arima1 <- auto.arima(d.series.name)
plot(forecast(modelo.auto.arima1, h=1))
# alternativa
d.series.name.ARMA<-armaFit(~ arma(1,3), data=d.series.name)
summary(d.series.name.ARMA, wich="all")
residuo<-residuals(d.series.name.ARMA)
plot(residuo)
lines(residuo)
df.residuo <- as.data.frame(residuo)
#periodograma
par(mfrow=c(2,1))
acf(df.residuo, ylim=c(-1,1))
pacf(df.residuo, ylim=c(-1,1))
#x11()
tsdisplay(df.residuo)
# test bds
bds.test(d.series.name,m=10) # H0: i.i.d
#test R/, exponente de Hurst
HURST<-rsFit(d.series.name, doplot = T)# Exponente de Hurst 0.5 ruido blanco
HURST
# Este codigo ha tenido muchas modificaciones hay que cojer el codigo del profesor Lorenzo.
##Se puede hacer el test de Hurst=0.5 con el siguiente estad?stico t ##
t<-(HURST@hurst$diag[2,1]-0.5)/HURST@hurst$diag[2,2]
t
####PREDICCIONES
predict(d.series.name.ARMA, n.ahead=10, conf=c(90,95), dplot=True)
#alternativo
d.series.name.ARMAGARCH<-garchFit(~ arma(1,1) + garch(2,1), data=d.series.name, include.mean=TRUE) ####aqu? el orden es GARCH,ARCH
summary(d.series.name.ARMAGARCH)
plot(d.series.name.ARMAGARCH@residuals)
residuogarch<-residuals(d.series.name.ARMAGARCH)
volatilitygarch<-volatility(d.series.name.ARMAGARCH)
plot(volatilitygarch)
lines(volatilitygarch)
plot(d.series.name^2)
predict(d.series.name.ARMAGARCH, n.ahead=10, conf=c(90,95), dplot=TRUE)
}
# IBOVESPA
stock.name <- "^BVSP"
stock.description <- "IBOVESPA"
# Call your function and pass x and y to the function
run <- generateAnalysis(stock.name,stock.description)
# Itau
stock.name <- "ITSA4.SA"
stock.description <- "Itausa - Investimentos Itau S.A"
# Call your function and pass x and y to the function
run <- generateAnalysis(stock.name,stock.description)
# BBAS3.SA
stock.name <- "BBAS3.SA"
stock.description <- "Banco do Brasil S.A."
# Call your function and pass x and y to the function
run <- generateAnalysis(stock.name,stock.description)
# KROT3.SA
stock.name <- "KROT3.SA"
stock.description <- "Kroton Educacional S.A."
# Call your function and pass x and y to the function
run <- generateAnalysis(stock.name,stock.description)
# VALE5.SA
stock.name <- "VALE5.SA"
stock.description <- "Vale S.A."
# Call your function and pass x and y to the function
run <- generateAnalysis(stock.name,stock.description)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/movement.R
\name{move_iTags}
\alias{move_iTags}
\title{Movement function for individual tags}
\usage{
move_iTags(dat, om)
}
\arguments{
\item{dat}{Object with individual tags}
\item{om}{operating model object}
}
\description{
Movement of individual tags, based on move_rules
}
\details{
This implementation of individual based tag movement is designed for use when
a large number of regions are specified and array based movement becomes slow
}
|
/man/move_iTags.Rd
|
no_license
|
AustralianAntarcticDivision/planetfish
|
R
| false
| true
| 524
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/movement.R
\name{move_iTags}
\alias{move_iTags}
\title{Movement function for individual tags}
\usage{
move_iTags(dat, om)
}
\arguments{
\item{dat}{Object with individual tags}
\item{om}{operating model object}
}
\description{
Movement of individual tags, based on move_rules
}
\details{
This implementation of individual based tag movement is designed for use when
a large number of regions are specified and array based movement becomes slow
}
|
#### Here is the R script you will use: (remember that # indicates a comment) ####
#Lab5: Sequential Regression
devtools::install_github("MichaelJMahometa/SDSRegressionR")
library(SDSRegressionR)
#import data...
int <- read.csv("data/introverts.csv", stringsAsFactors=FALSE)
names(int)
library(psych)
describe(int$Happiness)
#Determine and run the final model
full <- lm(Happiness ~ Age + ERA + QSR + Neuroticism + Extraversion, data=int)
View(int)
#Look for any issues:
library(car)
vif(full)
residFitted(full)
cooksPlot(full, print.obs = TRUE , save.cutoff = TRUE)
cooksCutOff*3
threeOuts(full)
#Clean up
good_int <- int[!row.names(int) %in% c(13, 140),]
#Re-run the final model
fullg <- lm(Happiness ~ Age + ERA + QSR + Neuroticism + Extraversion, data=good_int)
#Tag observations in the final model
good_int$in_fullg <- tagObs(fullg)
View(good_int)
#Keep just those in the model
good_int_fullg <- good_int[which(good_int$in_fullg == 1), ]
sum(good_int_fullg$in_fullg) #Double check
#Now for the Sequential Regression:
#Model 1:
m1_seq <- lm(Happiness ~ Age + ERA + QSR, data=good_int_fullg)
summary(m1_seq)
summary(m1_seq)$r.squared
lmBeta(m1_seq)
pCorr(m1_seq)
#Model 2:
m2_seq <- lm(Happiness ~ Age + ERA + QSR + Neuroticism + Extraversion, data=good_int_fullg)
summary(m2_seq)
summary(m2_seq)$r.squared
lmBeta(m2_seq)
pCorr(m2_seq)
#Now the Sequential Results
anova(m1_seq, m2_seq)
summary(m2_seq)$r.squared - summary(m1_seq)$r.squared
|
/Lab5/Lab 5 R syntax.R
|
no_license
|
mehranaman/Applied-Regression
|
R
| false
| false
| 1,452
|
r
|
#### Here is the R script you will use: (remember that # indicates a comment) ####
#Lab5: Sequential Regression
devtools::install_github("MichaelJMahometa/SDSRegressionR")
library(SDSRegressionR)
#import data...
int <- read.csv("data/introverts.csv", stringsAsFactors=FALSE)
names(int)
library(psych)
describe(int$Happiness)
#Determine and run the final model
full <- lm(Happiness ~ Age + ERA + QSR + Neuroticism + Extraversion, data=int)
View(int)
#Look for any issues:
library(car)
vif(full)
residFitted(full)
cooksPlot(full, print.obs = TRUE , save.cutoff = TRUE)
cooksCutOff*3
threeOuts(full)
#Clean up
good_int <- int[!row.names(int) %in% c(13, 140),]
#Re-run the final model
fullg <- lm(Happiness ~ Age + ERA + QSR + Neuroticism + Extraversion, data=good_int)
#Tag observations in the final model
good_int$in_fullg <- tagObs(fullg)
View(good_int)
#Keep just those in the model
good_int_fullg <- good_int[which(good_int$in_fullg == 1), ]
sum(good_int_fullg$in_fullg) #Double check
#Now for the Sequential Regression:
#Model 1:
m1_seq <- lm(Happiness ~ Age + ERA + QSR, data=good_int_fullg)
summary(m1_seq)
summary(m1_seq)$r.squared
lmBeta(m1_seq)
pCorr(m1_seq)
#Model 2:
m2_seq <- lm(Happiness ~ Age + ERA + QSR + Neuroticism + Extraversion, data=good_int_fullg)
summary(m2_seq)
summary(m2_seq)$r.squared
lmBeta(m2_seq)
pCorr(m2_seq)
#Now the Sequential Results
anova(m1_seq, m2_seq)
summary(m2_seq)$r.squared - summary(m1_seq)$r.squared
|
#### Data loading and setup ####
rm(list=ls(all=TRUE)) #Clears console
options(digits=7)
setwd("/Users/hrobbins827/Documents/PhD/NCI overflow/NLST/Analysis")
# Read in my personal defined functions and the Kovalchik prediction function
source("/Users/hrobbins827/Documents/PhD/Textbooks Resources/R functions/hilary_functions.R")
source("kovalchik.R")
# Read in library functions including Stephanie's coxph.risk which I installed from the local tar.gz file
# install coxph.risk tar file
# install lcmodels package
packages <- c("dplyr","ggplot2","survival","gmodels","coxph.risk","geepack","MESS","psych","Hmisc","glmnet","boot")
lapply(packages, require, c = T)
# Load PLCO and NLST data sets. New versions provided by Li Cheung on 11 July 2016, 14 July 2016, 20 July 2016, 8 Aug 2016.
load("hilary.RData") # Load NLST and PLCO data
# In the PLCO dataset, impute missing family history values to 0
plco$fam.lung.trend <- ifelse(is.na(plco$fam.lung.trend), 0, plco$fam.lung.trend)
plco.control <- subset(plco, control.group==1) # control arm of PLCO who had no chest xray
# Remove people with <30 pack-years and age<55 or age>74 from NLST
nlst <- filter(nlst, pkyears.cat!="[0,30)" & age>=55 & age<=74)
# Make a new pack-years variable to get rid of the [0,30) level
nlst <- mutate(nlst, pkyears.cat.clone=ifelse(pkyears.cat=="[30,40)", "[30,40)", ifelse(pkyears.cat=="[40,50)", "[40,50)",
ifelse(pkyears.cat=="[50,Inf]", "[50,Inf]", NA)))) # I checked this with a table
nlst$pkyears.cat <- as.factor(nlst$pkyears.cat.clone)
# Make a variable for days to diagnosis
nlst$days_to_dx <- ifelse(nlst$case==1, 365*nlst$incidence.years, NA)
# Make a subset of NLST data with the LCRAT variables that we will need later to merge back with at-risk datasets
varlist <- c("female","race","edu6","fam.lung.trend","emp","bmi","cpd","pkyears.cat","age","qtyears","smkyears")
nlst.sub <- as.data.frame(cbind(nlst[,varlist], pid=nlst$pid, lss=as.numeric(nlst$lss)))
# To later calculate pre-screening risk, we must first fit the incidence model and other-cause death models in PLCO.
LCRAT <- coxph(Surv(incidence.years, case) ~
female+race+edu6+fam.lung.trend+emp+I(bmi<=18.5)+I(cpd>20)+as.factor(pkyears.cat)+
I(log(age))+I(log(bmi))+I(log(qtyears+1))+smkyears,data = plco.control)
cox.death <- coxph(Surv(years.followed, other.cause.death) ~
female+race+edu6+emp+I(bmi <= 18.5)+I(cpd>20)+as.factor(pkyears.cat)+I((age)^2)+I((bmi-25)^2)+
I(log(qtyears+1))+smkyears, data = plco.control)
# Subset to CT arm in NLST and make a pos/neg variable for the first, second, and third screens
nlst$T0posneg <- ifelse(nlst$truefalse_scrnres_ly0 %in% c(4,5,6), 0, NA)
nlst$T0posneg <- ifelse(nlst$truefalse_scrnres_ly0 %in% c(1,2,3), 1, nlst$T0posneg)
nlst$T1posneg <- ifelse(nlst$truefalse_scrnres_ly1 %in% c(4,5,6), 0, NA)
nlst$T1posneg <- ifelse(nlst$truefalse_scrnres_ly1 %in% c(1,2,3), 1, nlst$T1posneg)
nlst$T2posneg <- ifelse(nlst$truefalse_scrnres_ly2 %in% c(4,5,6), 0, NA)
nlst$T2posneg <- ifelse(nlst$truefalse_scrnres_ly2 %in% c(1,2,3), 1, nlst$T2posneg)
nlst$prescr.1yrisk.T0 <- risk.kovalchik(0, 1, nlst, LCRAT, cox.death) # add 1y risk to NLST dataset for descriptive stats
# Subset to CT arm and create screening history variables
nlst.CT <- subset(nlst, screen_group=="CT")
nlst.CT <- mutate(nlst.CT, hist.T0.T1 = 1*(T0posneg==0 & T1posneg==0) + 2*(T0posneg==0 & T1posneg==1) + 3*(T0posneg==1 & T1posneg==0) + 4*(T0posneg==1 & T1posneg==1))
nlst.CT$hist.T0.T1 <- factor(nlst.CT$hist.T0.T1, levels=c(1,2,3,4), labels=c("Neg-Neg","Neg-Pos","Pos-Neg","Pos-Pos"))
nlst.CT <- mutate(nlst.CT, hist.T1.T2 = 1*(T1posneg==0 & T2posneg==0) + 2*(T1posneg==0 & T2posneg==1) + 3*(T1posneg==1 & T2posneg==0) + 4*(T1posneg==1 & T2posneg==1))
nlst.CT$hist.T1.T2 <- factor(nlst.CT$hist.T1.T2, levels=c(1,2,3,4), labels=c("Neg-Neg","Neg-Pos","Pos-Neg","Pos-Pos"))
### Create datasets for risk from T0 to T1. One for interval, one for screen-detected.
## .neg intended for analysis of interval cancer risk; .scrisk intended for analysis of screen-detected cancers
# 0 inadeq, 1 true-pos, 2 poss true-pos, 3 false-pos, 4 true-neg, 5 poss false-neg, 6 false-neg
# At risk in interval: T0 negatives only. Case status: false-negative at T0.
nlst.CT.T1.neg <- filter(nlst.CT, truefalse_scrnres_ly0 %in% c(4,5,6))
nlst.CT.T1.neg$case_T1_interval <- ifelse(nlst.CT.T1.neg$truefalse_scrnres_ly0==6, 1, 0)
# At risk for screen-detected at T1: either false-positive or true-negative at T0, and did not have any of the following at T1:
# inadequate image, left study, refused, wrong screen, erroneous report of LC, form not submitted (no missing values of scr_res0).
# Case status: case=1 AND either of (true-pos at T1 or T1 is coded as "not expected: cancer/death in screening window")
nlst.CT.T1.scrisk <- filter(nlst.CT, truefalse_scrnres_ly0 %in% c(2,3,4,5) & scr_res1 %!in% c(10,11,15,17,95,97))
nlst.CT.T1.scrisk$case_T1_screen <- ifelse(nlst.CT.T1.scrisk$case==1 &
(nlst.CT.T1.scrisk$truefalse_scrnres_ly1==1 | nlst.CT.T1.scrisk$scr_res1 %in% c(23,24)), 1, 0)
### Create datasets for risk from T1 to T2. One for interval, one for screen-detected.
# At risk in interval: T1 negatives only. Case status: false-negative at T1.
nlst.CT.T2.neg <- filter(nlst.CT, truefalse_scrnres_ly1 %in% c(4,5,6))
nlst.CT.T2.neg$case_T2_interval <- ifelse(nlst.CT.T2.neg$truefalse_scrnres_ly1==6, 1, 0)
# At risk for screen-detected at T2: either false-positive or true-negative at T1, and did not have any of the following at T2:
# inadequate image, left study, refused, wrong screen, erroneous report of LC, form not submitted (no missing values of scr_res0).
# Case status: case=1 AND either of (true-pos at T2 or T2 is coded as "not expected: cancer/death in screening window")
nlst.CT.T2.scrisk <- filter(nlst.CT, truefalse_scrnres_ly1 %in% c(2,3,4,5) & scr_res2 %!in% c(10,11,15,17,95,97))
nlst.CT.T2.scrisk$case_T2_screen <- ifelse(nlst.CT.T2.scrisk$case==1 &
(nlst.CT.T2.scrisk$truefalse_scrnres_ly2==1 | nlst.CT.T2.scrisk$scr_res2 %in% c(23,24)), 1, 0)
### Create a dataset for risk during "interval" after T2 (within 1 year)
nlst.CT.T3.neg <- filter(nlst.CT, truefalse_scrnres_ly2 %in% c(4,5,6))
nlst.CT.T3.neg$case_T3_interval <- ifelse(nlst.CT.T3.neg$truefalse_scrnres_ly2==6, 1, 0)
### Construct dataset to model risk of ALL interval cancers across all 3 screens
# Interval variable in data.interval datasets is 1 for T0-T1, 2 for T1-T2, and 3 for post-T2 intervals
data.interval <- data.frame(pid=c(nlst.CT.T1.neg$pid, nlst.CT.T2.neg$pid, nlst.CT.T3.neg$pid),
case=c(nlst.CT.T1.neg$case_T1_interval, nlst.CT.T2.neg$case_T2_interval, nlst.CT.T3.neg$case_T3_interval),
interval=c(rep(1,times=nrow(nlst.CT.T1.neg)), rep(2, times=nrow(nlst.CT.T2.neg)), rep(3, times=nrow(nlst.CT.T3.neg))))
# Merge this back with covariates from NLST. Add screening history variable.
data.interval <- merge(data.interval, nlst.sub, by="pid", all.x=T)
data.interval <- merge(data.interval, select(nlst.CT, pid, hist.T0.T1, hist.T1.T2), by="pid", all.x=T, all.y=F) ## delete??
data.interval$screen.hist <- ifelse(data.interval$interval==2, data.interval$hist.T0.T1, ifelse(data.interval$interval==3, data.interval$hist.T1.T2, NA))
data.interval$screen.hist <- factor(data.interval$screen.hist, levels=c(1,2,3,4), labels=c("Neg-Neg","Neg-Pos","Pos-Neg","Pos-Pos"))
data.interval <- select(data.interval, -c(hist.T0.T1,hist.T1.T2)) ## delete??
data.interval <- arrange(data.interval, pid, interval)
# Update age, quit-years, and smoke-years by adding a year for T1 and T2
data.interval <- mutate(data.interval, age=ifelse(interval==2, age+1, ifelse(interval==3, age+2, age)),
smkyears=ifelse(interval==2 & qtyears==0, smkyears+1, ifelse(interval==3 & qtyears==0, smkyears+2, smkyears)),
qtyears=ifelse(interval==2 & qtyears>0, qtyears+1, ifelse(interval==3 & qtyears>0, qtyears+2, qtyears)))
data.interval <- mutate(data.interval, pkyears.cont=cpd*smkyears/20) # using new smoke-years, update pack-years, then re-categorize
data.interval <- mutate(data.interval, pkyears.cat=as.factor(ifelse(pkyears.cont>=30 & pkyears.cont<40, "[30,40)",
ifelse(pkyears.cont>=40 & pkyears.cont<50, "[40,50)", ifelse(pkyears.cont>=50 & pkyears.cont<999,"[50,Inf]",NA)))))
# Construct dataset to model risk of ALL screen-detected cancers (at T1 and T2)
# Interval variable in data.screen datasets is 1 for risk at T1 and 2 for risk at T2
data.screen <- data.frame(pid=c(nlst.CT.T1.scrisk$pid, nlst.CT.T2.scrisk$pid),
case=c(nlst.CT.T1.scrisk$case_T1_screen, nlst.CT.T2.scrisk$case_T2_screen),
screen.result=c(nlst.CT.T1.scrisk$T0posneg, nlst.CT.T2.scrisk$T1posneg),
interval=c(rep(1,times=nrow(nlst.CT.T1.scrisk)), rep(2, times=nrow(nlst.CT.T2.scrisk))))
# Merge this back with covariates from NLST
data.screen <- merge(data.screen, nlst.sub, by="pid", all.x=T)
# Add a variable for lagged screen result & a 6-level variable for all combinations
data.screen <- data.screen %>% group_by(pid) %>% mutate(lag.screen = lag(screen.result, order_by=interval))
data.screen <- mutate(data.screen, screen.comb = 1*(interval==1 & screen.result==0) +
2*(interval==1 & screen.result==1) + 3*(interval==2 & lag.screen==0 & screen.result==0) +
4*(interval==2 & lag.screen==0 & screen.result==1) + 5*(interval==2 & lag.screen==1 & screen.result==0) +
6*(interval==2 & lag.screen==1 & screen.result==1))
data.screen$screen.comb <- factor(data.screen$screen.comb, levels = c(1,2,3,4,5,6),
labels = c("Neg","Pos","Neg-Neg","Neg-Pos","Pos-Neg","Pos-Pos"))
# Update age, quit-years, and smoke-years by adding a year for T1
data.screen <- mutate(data.screen, age=as.numeric(age), smkyears=as.numeric(smkyears), qtyears=as.numeric(qtyears))
data.screen <- mutate(data.screen, age=ifelse(interval==2, age+1, age),
smkyears=ifelse(interval==2 & qtyears==0, smkyears+1, smkyears),
qtyears=ifelse(interval==2 & qtyears>0, qtyears+1, qtyears))
data.screen <- mutate(data.screen, pkyears.cont=cpd*smkyears/20) # using new smoke-years, update pack-years, then re-categorize
data.screen <- mutate(data.screen, pkyears.cat=as.factor(ifelse(pkyears.cont>=30 & pkyears.cont<40, "[30,40)",
ifelse(pkyears.cont>=40 & pkyears.cont<50, "[40,50)", ifelse(pkyears.cont>=50 & pkyears.cont<999,"[50,Inf]",NA)))))
# Load abnormalities data (person-screen level) and merge with data.screen
# This dataset was generated by the program prepare_abn_data_vX.R
load("/Users/hrobbins827/Documents/PhD/NCI overflow/NLST/Analysis/Nodule data/abn.spl.20160810.rdata")
data.screen.abn <- merge(data.screen, abn.pl.all, by=c("pid","interval"), all.x=T, all.y=F)
# Replace NAs with 0 (not present) for appropriate variables
replacevars <- names(abn.pl.all)[!names(abn.pl.all) %in% c("pid","interval","LRcat","LRcatcol.neg","LRcatcol.pos")]
data.screen.abn[replacevars][is.na(data.screen.abn[replacevars])] <- 0
# Make a variable for including observations in Lung-RADS analysis
data.screen.abn$LR.include <- (data.screen.abn$LRcat %in% c("1","2","3","4A","4B","4X"))
# Merge abnormalities data (person-screen level) with data.interval
data.interval.abn <- merge(data.interval, abn.pl.all, by=c("pid","interval"), all.x=T, all.y=F)
data.interval.abn[replacevars][is.na(data.interval.abn[replacevars])] <- 0
# Will need this vector for exploratory analysis of abnormalities (24 Jan 2017 - also create interaction vectors)
# abnlist.neg contains a list of abnormalities variables that are relevant to negative CTs. abnlist.pos is the list for positive CTs.
abnlist <- abnlist.pos <- names(abn.pl.all)[3:32]
abnlist.neg <- abnlist[c(4,5,6,7,8,9,10,11,12,13,14)]
abnlist.neg.int <- lapply(abnlist.neg, function(x) {substitute(logit1yrisk:i, list(i=as.name(x)))}) # these lists create interaction terms
abnlist.pos.int <- lapply(abnlist.pos, function(x) {substitute(logit1yrisk:i, list(i=as.name(x)))})
# Create variable for log(diameter)
data.screen.abn$log.diam <- log(data.screen.abn$longest.diam+1)
# Calculate pre-screening risk inside this dataset
data.interval.abn$prescr.1yrisk <- risk.kovalchik(0, 1, data.interval, LCRAT, cox.death)
data.interval.abn <- mutate(data.interval.abn, log1yrisk=log(prescr.1yrisk), logit1yrisk=log(prescr.1yrisk/(1-prescr.1yrisk)))
data.screen.abn$prescr.1yrisk <- risk.kovalchik(0, 1, data.screen.abn, LCRAT, cox.death)
data.screen.abn <- mutate(data.screen.abn, log1yrisk=log(prescr.1yrisk), logit1yrisk=log(prescr.1yrisk/(1-prescr.1yrisk)))
# These datasets are needed to separately model screen-detected cancers incorporating abnormalities for negatives and false-positives
data.screen.abn.neg <- filter(data.screen.abn, screen.result==0)
data.screen.abn.pos <- filter(data.screen.abn, screen.result==1)
# Make a categorical variable for diameter
data.screen.abn.pos <- mutate(data.screen.abn.pos, diam.cat = 1*(longest.diam==0)+2*(longest.diam>0 & longest.diam<=5)+
3*(longest.diam>5 & longest.diam<=7) + 4*(longest.diam>7 & longest.diam<=10) +
5*(longest.diam>10 & longest.diam<=13) + 6*(longest.diam>13 & longest.diam<100))
data.screen.abn.pos$diam.cat <- factor(data.screen.abn.pos$diam.cat, levels=c(1:6),labels=c("0","4-5","6-7","8-10","11-13","14+"))
# Create a variable for any.growth that reflects a group in which growth can't be assessed (i.e. screen=T0)
data.screen.abn.pos <- mutate(data.screen.abn.pos, growth.3l =
1*(interval==1) + 2*(interval==2 & any.growth==0) + 3*(interval==2 & any.growth==1))
data.screen.abn.pos$growth.3l <- factor(data.screen.abn.pos$growth.3l, levels = c(1,2,3), labels=c("NA","No","Yes"))
# Make dataset of unique individuals for descriptive table of screen-negatives
all.subj.neg <- filter(nlst.CT, pid %in% data.interval.abn$pid | pid %in% data.screen.abn.neg$pid)
all.subj.neg <- mutate(all.subj.neg, age.cat=as.factor(ifelse(age>=55 & age<60, "55-59", ifelse(age>=60 & age<65, "60-64",
ifelse(age>=65 & age<70, "65-69", ifelse(age>=70 & age<75, "70-74", NA))))),
qtyears.cat=as.factor(ifelse(qtyears==0, "Current smoker", ifelse(qtyears>0 & qtyears<=5, "1-5",
ifelse(qtyears>5 & qtyears<=10, "6-10", ifelse(qtyears>10 & qtyears<99, "11 or more", NA))))),
bmi.cat=as.factor(ifelse(bmi>0 & bmi<18.5, "Underweight", ifelse(bmi>=18.5 & bmi<25, "Normal",
ifelse(bmi>=25 & bmi<30, "Overweight", ifelse(bmi>=30, "Obese", NA))))),
cpd.cat=as.factor(ifelse(cpd>0 & cpd<20, "<20", ifelse(cpd>=20 & cpd<30, "20-29",
ifelse(cpd>=30 & cpd<40, "30-39", ifelse(cpd>=40 & cpd<99, "40+", NA))))),
smkyears.cat=as.factor(ifelse(smkyears>0 & smkyears<30, "<30", ifelse(smkyears>=30 & smkyears<40, "30-39",
ifelse(smkyears>=40 & smkyears<50, "40-49", ifelse(smkyears>=50 & smkyears<99, "50+", NA))))))
# Run the main models
# Overall effects (without specific CT features)
glm.interval <- glm(case ~ log1yrisk -1, data=data.interval.abn, family=binomial(link='log'))
data.interval.abn$post.risk.interv <- fitted.values(glm.interval)
glm.screen.neg <- glm(case ~ log1yrisk -1, data=data.screen.abn.neg, family=binomial(link='log'))
data.screen.abn.neg$post.risk.neg.overall <- fitted.values(glm.screen.neg)
# With specific CT findings
glm.int.abn <- glm(case ~ log1yrisk + log1yrisk:adenop.consol -1, data=data.interval.abn, family=binomial(link='log'))
data.interval.abn$post.risk.abn <- fitted.values(glm.int.abn)
glm.screen.neg.abn <- glm(case ~ log1yrisk + log1yrisk:consolidation + log1yrisk:emphysema -1, data=data.screen.abn.neg, family=binomial(link='log'), na.action=na.exclude)
data.screen.abn.neg$post.risk.abn <- fitted.values(glm.screen.neg.abn)
# --------------------------- run code to this line for data setup ----------------------------- #
#### Descriptive stats ####
# Descriptive table 1 for analysis after NEGATIVE screen
nrow(all.subj.neg) # number of unique individuals
length(unique(filter(nlst.CT, T0posneg==0 | T1posneg==0 | T2posneg==0)$pid)) # confirm - this is the same # with at least one negative screen
length(unique(data.interval$pid)) # number of unique individuals in interval cancers analysis
CrossTable((data.interval %>% group_by(pid) %>% summarise(times.in.interv.analysis=n()))$times.in.interv.analysis) # times included in interval ca analysis
length(unique(data.screen.abn.neg$pid)) # number of unique individuals in next-screen analysis
CrossTable((data.screen.abn.neg %>% group_by(pid) %>% summarise(times.in.screen.analysis=n()))$times.in.screen.analysis) # times included in next-screen analysis
CrossTable(all.subj.neg$female, missing.include=T)
CrossTable(all.subj.neg$age.cat, missing.include=T)
CrossTable(all.subj.neg$race, missing.include=T) # 0 white, 1 black, 2 hispanic, 3 other
CrossTable(all.subj.neg$edu6, missing.include=T) # see codebook
CrossTable(all.subj.neg$bmi.cat, missing.include=T)
CrossTable(all.subj.neg$fam.lung.trend, missing.include=T) # none, 1, 2+
CrossTable(all.subj.neg$qtyears.cat, missing.include=T)
CrossTable(all.subj.neg$pkyears.cat, missing.include=T)
CrossTable(all.subj.neg$smkyears.cat, missing.include=T)
CrossTable(all.subj.neg$cpd.cat, missing.include=T)
CrossTable(all.subj.neg$emp, missing.include=T)
quantile(all.subj.neg$prescr.1yrisk.T0, probs=c(0.25, 0.5, 0.75)) # median IQR of pre-screening risk
# Other numbers
with(filter(data.screen.abn.neg, interval==2), CrossTable(screen.comb, case)) # numbers considered for Markov assumption test
c(sum(data.interval.abn$case), nrow(data.interval.abn), sum(data.interval.abn$case)/nrow(data.interval.abn)) # interval cancers: cases, # at risk, overall risk
c(sum(data.screen.abn.neg$case), nrow(data.screen.abn.neg), sum(data.screen.abn.neg$case)/nrow(data.screen.abn.neg)) # next-screen cancers after negative: cases, # at risk, overall risk
range_without_outliers(all.subj.neg$prescr.1yrisk)
#### Model development: Interval cancer among NEGATIVES ####
# Interval cancers: overall model (no abnormalities) - properties of screening
# Confirm that pre-screening risk improves the model
int.nopsr <- glm(case ~ 1, data=data.interval.abn, family=binomial(link='log'))
summary(int.nopsr)
int.psr <- glm(case ~ log1yrisk+1, data=data.interval.abn, family=binomial(link='log'))
summary(int.psr)
1-pchisq(int.nopsr$deviance - int.psr$deviance, length(int.psr$coefficients)-length(int.nopsr$coefficients)) # LRT
# Overall model results
# glm.interval.abn <- glm(case ~ log1yrisk -1, data=data.interval.abn, family=binomial(link='log')) # run above in setup
# data.interval.abn$post.risk.interv <- fitted.values(glm.interval) # run above in setup
summary(glm.interval)
confint(glm.interval)
# Does the risk coefficient differ by interval? No (LRT p=0.23). Steps below: fit model, estimate 3 exponents, get p-value, get counts
glm.interval.intervals <- glm(case ~ log1yrisk + log1yrisk:I(as.numeric(interval==2)) + log1yrisk:I(as.numeric(interval==3)) -1, data=data.interval.abn, family=binomial(link='log'))
c(coefficients(glm.interval.intervals)[1], coefficients(glm.interval.intervals)[1]+coefficients(glm.interval.intervals)[2], coefficients(glm.interval.intervals)[1]+coefficients(glm.interval.intervals)[3])
1-pchisq(glm.interval$deviance - glm.interval.intervals$deviance, length(glm.interval.intervals$coefficients)-length(glm.interval$coefficients))
with(data.interval.abn, table(interval))
# Do previous screens matter? No (LRT p=0.99)
glm.int.2levels <- glm(case ~ log1yrisk:as.factor(screen.hist) -1, data=filter(data.interval.abn, interval %in% c(2,3)), family=binomial(link='log'))
summary(glm.int.2levels)
confint(glm.int.2levels)
glm.int.1level <- glm(case ~ log1yrisk -1, data=filter(data.interval.abn, interval %in% c(2,3) & !is.na(screen.hist)), family=binomial(link='log'))
summary(glm.int.1level)
1-pchisq(glm.int.1level$deviance-glm.int.2levels$deviance, df=length(glm.int.2levels$coefficients-length(glm.int.1level$coefficients)))
# Interval cancers: effects of abnormalities
# Following a negative screen, the relevant CT features are in abnlist.neg. The relevant p-value is for the interaction (i.e. risk differs between the 0 and 1 levels)
# Backwards stepwise selection: selects other.above, benign.nodule, consolidation, adenopathy
int.full <- glm(paste("case ~ logit1yrisk -1 +",paste(abnlist.neg.int, collapse="+"),sep=""), data=data.interval.abn, family=binomial(link='logit'))
bsw.int <- step(int.full, direction="backward", scope = list(lower = case ~ logit1yrisk -1, upper = int.full))
# Look at a model including these 4 effects
summary(glm(case ~ log1yrisk + log1yrisk:other.above + log1yrisk:benign.nodule + log1yrisk:consolidation + log1yrisk:adenopathy -1, data=data.interval.abn, family=binomial(link='log')))
# Lasso using intermediate lambda: selects adenopathy and consolidation
set.seed(61116)
x <- model.matrix(case ~ logit1yrisk -1 + logit1yrisk:., data = data.interval.abn[,c("case","logit1yrisk",abnlist.neg)])
cv.lasso <- cv.glmnet(x, data.interval.abn$case, alpha=1, family="binomial")
out <- glmnet(x, data.interval.abn$case, alpha=1, family="binomial")
predict(out, type="coefficients", s=(cv.lasso$lambda.min+cv.lasso$lambda.1se)/2)
# Look at a model including these two effects
summary(glm(case ~ log1yrisk + log1yrisk:consolidation + log1yrisk:adenopathy -1, data=data.interval.abn, family=binomial(link='log')))
# Based on discussion with Chris: include adenopathy and consolidation. Model as 1 variable (effect size is the same)
# Switch back to log scale for final model for interpretability (these models are run above in data setup section)
# glm.int.abn.log <- glm(case ~ log1yrisk + log1yrisk:adenop.consol -1, data=data.interval.abn, family=binomial(link='log'))
# data.interval.abn$post.risk.abn <- fitted.values(glm.int.abn.log)
summary(glm.int.abn)
# Get estimate and CIs for the exponents
mat <- c(1,0) # Use this matrix for "Neither noted"
mat <- c(1,1) # Use this matrix for adenopathy or consolidation
stder <- sqrt(c(t(mat) %*% vcov(glm.int.abn) %*% mat))
c(coefficients(glm.int.abn) %*% mat, (coefficients(glm.int.abn) %*% mat)-1.96*stder, (coefficients(glm.int.abn) %*% mat)+1.96*stder)
# Check for residual effects of LCRAT variables using likelihood ratio tests - the LRT for emp is 0.02, but the Wald is 0.06. We will say p>0.05.
titles <- c("var","null model # param", "extended model # param", "LRT p-value", "check same # obs")
mat.out.interv <- matrix(rep(NA),nrow=length(varlist),ncol=length(titles))
for (x in seq_along(varlist)) {
mod.without <- glm(case ~ logit1yrisk + logit1yrisk:adenop.consol -1, data=data.interval.abn, family=binomial(link='logit'))
mod.with <- glm(substitute(case ~ logit1yrisk + logit1yrisk:adenop.consol + logit1yrisk:i -1, list(i=as.name(varlist[x]))), data=data.interval.abn, family=binomial(link='logit'))
print(summary(mod.with))
mat.out.interv[x,] <- c(varlist[x], length(mod.without$coefficients), sum(!is.na(mod.with$coefficients)), 1-pchisq(mod.without$deviance-mod.with$deviance, df=sum(!is.na(mod.with$coefficients))-length(mod.without$coefficients)), I(length(mod.without$residuals)==length(mod.with$residuals)))
}
rbind(titles, mat.out.interv)
#### Model development: Next-screen cancer among NEGATIVES ####
# Overall model for next-screen cancer among negatives (no abnormalities) - properties of screening
# Confirm that pre-screening risk improves the model
ns.nopsr <- glm(case ~ 1, data=data.screen.abn.neg, family=binomial(link='log'))
summary(ns.nopsr)
ns.psr <- glm(case ~ log1yrisk+1, data=data.screen.abn.neg, family=binomial(link='log'))
summary(ns.psr)
1-pchisq(ns.nopsr$deviance - ns.psr$deviance, length(ns.psr$coefficients)-length(ns.nopsr$coefficients))
# Overall model results
# glm.screen.neg <- glm(case ~ log1yrisk -1, data=data.screen.abn.neg, family=binomial(link='log')) # this is run above the line
# data.screen.abn.neg$post.risk.neg.overall <- fitted.values(glm.screen.neg)
summary(glm.screen.neg)
confint(glm.screen.neg)
# Does the interval matter? no (p=0.38)
glm.screen.neg.by.int <- glm(case ~ log1yrisk:as.factor(interval) -1, data=data.screen.abn.neg, family=binomial(link='log'))
summary(glm.screen.neg.by.int)
1-pchisq(glm.screen.neg$deviance - glm.screen.neg.by.int$deviance, length(glm.screen.neg.by.int$coefficients) - length(glm.screen.neg$coefficients))
with(data.screen.abn.neg, table(interval))
# Do previous screens matter? no (p=0.26)
glm.screen.neg.2levels <- glm(case ~ log1yrisk:screen.comb -1, data=filter(data.screen.abn.neg, interval==2 & !is.na(screen.comb)), family=binomial(link='log'))
summary(glm.screen.neg.2levels)
confint(glm.screen.neg.2levels)
glm.screen.neg.1level <- glm(case ~ log1yrisk -1, data=filter(data.screen.abn.neg, interval==2 & !is.na(screen.comb)), family=binomial(link='log'))
summary(glm.screen.neg.1level)
1-pchisq(glm.screen.neg.1level$deviance - glm.screen.neg.2levels$deviance, length(glm.screen.neg.2levels$coefficients) - length(glm.screen.neg.1level$coefficients))
# Do previous screens matter if we ignore pre-screening risk? p=0.14
glm.screen.neg.2levels.nopsr <- glm(case ~ screen.comb -1, data=filter(data.screen.abn.neg, interval==2 & !is.na(screen.comb)), family=binomial(link='log'))
summary(glm.screen.neg.2levels.nopsr)
glm.screen.neg.1level.nopsr <- glm(case ~ 1, data=filter(data.screen.abn.neg, interval==2 & !is.na(screen.comb)), family=binomial(link='log'))
summary(glm.screen.neg.1level.nopsr)
1-pchisq(glm.screen.neg.1level.nopsr$deviance - glm.screen.neg.2levels.nopsr$deviance, length(glm.screen.neg.2levels.nopsr$coefficients) - length(glm.screen.neg.1level.nopsr$coefficients))
# Analysis to compare with Patz result
# Start with everyone with T0 screen being negative, then compare those who are T1-false-positive or
# negative for the risk of screen-detected cancer at T2.
patz.m1 <- glm(case ~ log1yrisk -1, data=filter(data.screen.abn, interval==2 & screen.comb %in% c("Neg-Pos","Neg-Neg")), family=binomial(link='log'))
summary(patz.m1)
patz.m2 <- glm(case ~ log1yrisk:screen.comb -1, data=filter(data.screen.abn, interval==2 & screen.comb %in% c("Neg-Pos","Neg-Neg")), family=binomial(link='log'))
summary(patz.m2)
1-pchisq(patz.m1$deviance - patz.m2$deviance, length(patz.m2$coefficients) - length(patz.m1$coefficients))
# Effects of abnormalities for next-screen among negatives
# Backwards stepwise: selects nod6.not.susp, opac.fibr, consolidation, emphysema
scr.neg.full <- glm(paste("case ~ logit1yrisk -1 +",paste(abnlist.neg.int, collapse="+"),sep=""), data=data.screen.abn.neg, family=binomial(link='logit'))
bsw.scr.neg <- step(scr.neg.full, direction="backward", scope = list(lower = case ~ logit1yrisk -1, upper = scr.neg.full))
# Look at a model including these 4 effects
summary(glm(case ~ log1yrisk + log1yrisk:opac.fibr + log1yrisk:nod6.not.susp + log1yrisk:consolidation + log1yrisk:emphysema -1, data=data.screen.abn.neg, family=binomial(link='log')))
# Lasso using intermediate lambda: selects ONLY logit1yrisk
set.seed(61116)
x <- model.matrix(case ~ logit1yrisk -1 + logit1yrisk:. , data = data.screen.abn.neg[,c("case","logit1yrisk",abnlist.neg)])
cv.lasso <- cv.glmnet(x, data.screen.abn.neg$case, alpha=1, family="binomial")
out <- glmnet(x, data.screen.abn.neg$case, alpha=1, family="binomial")
predict(out, type="coefficients", s=(cv.lasso$lambda.min+cv.lasso$lambda.1se)/2)
# From discussion with Chris: keep consolidation and emphysema.
# Switch back to log scale for final model for interpretability (this model is run above in data setup)
# glm.screen.neg.abn <- glm(case ~ log1yrisk + log1yrisk:consolidation + log1yrisk:emphysema -1, data=data.screen.abn.neg, family=binomial(link='log'))
# data.screen.abn.neg$post.risk.abn <- fitted.values(glm.screen.neg.abn)
summary(glm.screen.neg.abn)
# Get estimates and CIs for the exponents
mat <- c(1,0,0) # Use this matrix for "neither noted"
mat <- c(1,1,0) # Use this matrix for consolidation
mat <- c(1,0,1) # Use this matrix for emphysema
stder <- sqrt(c(t(mat) %*% vcov(glm.screen.neg.abn) %*% mat))
c(coefficients(glm.screen.neg.abn) %*% mat, (coefficients(glm.screen.neg.abn) %*% mat)-1.96*stder, (coefficients(glm.screen.neg.abn) %*% mat)+1.96*stder)
# Check for residual effects of LCRAT variables. All p>0.05
titles <- c("var","null model # param", "extended model # param", "LRT p-value", "check same # obs")
mat.out.ns <- matrix(rep(NA),nrow=length(varlist),ncol=length(titles))
for (x in seq_along(varlist)) {
mod.without <- glm(case ~ logit1yrisk + logit1yrisk:consolidation + logit1yrisk:emphysema -1, data=data.screen.abn.neg, family=binomial(link='logit'))
mod.with <- glm(substitute(case ~ logit1yrisk + logit1yrisk:consolidation + logit1yrisk:emphysema + logit1yrisk:i -1, list(i=as.name(varlist[x]))), data=data.screen.abn.neg, family=binomial(link='logit'))
mat.out.ns[x,] <- c(varlist[x], length(mod.without$coefficients), sum(!is.na(mod.with$coefficients)), 1-pchisq(mod.without$deviance-mod.with$deviance, df=sum(!is.na(mod.with$coefficients))-length(mod.without$coefficients)), I(length(mod.without$residuals)==length(mod.with$residuals)))
}
rbind(titles, mat.out.ns)
# What if we account for screening history along with pre-screening risk, emphysema, and consolidation? p=0.34
m1 <- glm(case ~ log1yrisk + log1yrisk:consolidation + log1yrisk:emphysema -1, data=filter(data.screen.abn.neg, interval==2 & !is.na(screen.comb)), family=binomial(link='log'))
summary(m1)
m2 <- glm(case ~ log1yrisk:screen.comb + log1yrisk:consolidation + log1yrisk:emphysema -1, data=filter(data.screen.abn.neg, interval==2 & !is.na(screen.comb)), family=binomial(link='log'))
summary(m2)
1-pchisq(m1$deviance - m2$deviance, length(m2$coefficients) - length(m1$coefficients))
#### Additional analyses (GEE, AUCs, cross-validation, etc) ####
### Comparison with GEE - this impacts the SEs negligibly ###
# Interval cancer models
summary(geeglm(case ~ log1yrisk -1, id=pid, data=data.interval, family=binomial(link='log'), corstr="exchangeable", waves=interval))
summary(glm.interval)
summary(geeglm(case ~ log1yrisk + log1yrisk:adenop.consol -1, id=pid, data=data.interval.abn, family=binomial(link='log'), corstr="exchangeable", waves=interval))
summary(glm.int.abn.log)
# Next-screen among negatives model
summary(geeglm(case ~ log1yrisk -1, id=pid, data=data.screen.abn.neg, family=binomial(link='log'), corstr="exchangeable", waves=interval))
summary(glm.screen.neg)
summary(geeglm(case ~ log1yrisk + log1yrisk:consolidation + log1yrisk:emphysema -1, id=pid, data=data.screen.abn.neg, family=binomial(link='log'), corstr="exchangeable", waves=interval))
summary(glm.screen.neg.abn)
### Calibration and validation analyses ###
# 10-fold cross-validated calibration
# Interval cancers
set.seed(61116)
data.interval.abn$randgrp <- base::sample(1:10, nrow(data.interval.abn), replace=T)
data.interval.abn$cvpred <- NA
for (i in 1:10) {
fit <- glm(formula = case ~ log1yrisk + log1yrisk:adenop.consol - 1,
family = binomial(link = "log"), data = filter(data.interval.abn, randgrp!=i))
data.interval.abn[data.interval.abn$randgrp==i,]$cvpred <- predict(fit, newdata=data.interval.abn[data.interval.abn$randgrp==i,], type="response")
}
data.interval.abn <- mutate(data.interval.abn, cvpred.ntile = ntile(cvpred, 5))
data.interval.abn %>% group_by(cvpred.ntile) %>% summarise(pred.cases= sum(cvpred), obs.cases = sum(case))
c(sum(data.interval.abn$cvpred), sum(data.interval.abn$case)) # number obs and expected cases
poisson.test(round(sum(data.interval.abn$cvpred),0), sum(data.interval.abn$case), alternative="two.sided") # p-value, requires rounding
# Next-screen among screen-negatives
set.seed(61116)
data.screen.abn.neg$randgrp <- base::sample(1:10, nrow(data.screen.abn.neg), replace=T)
data.screen.abn.neg$cvpred <- NA
for (i in 1:10) {
fit <- glm(formula = case ~ log1yrisk + log1yrisk:consolidation + log1yrisk:emphysema - 1,
family = binomial(link = "log"), data = filter(data.screen.abn.neg, randgrp!=i))
data.screen.abn.neg[data.screen.abn.neg$randgrp==i,]$cvpred <- predict(fit, newdata=data.screen.abn.neg[data.screen.abn.neg$randgrp==i,], type="response")
}
data.screen.abn.neg <- mutate(data.screen.abn.neg, cvpred.ntile = ntile(cvpred, 5))
data.screen.abn.neg %>% group_by(cvpred.ntile) %>% summarise(pred.cases= sum(cvpred), obs.cases = sum(case))
c(sum(data.screen.abn.neg$cvpred), sum(data.screen.abn.neg$case)) # number obs and expected cases
poisson.test(round(sum(data.screen.abn.neg$cvpred)), sum(data.screen.abn.neg$case), alternative="two.sided")
# 10-fold cross validation to get CV error. The first delta is standard version; second is bias-corrected.
set.seed(61116)
cv.err.int <- cv.glm(data.interval.abn, glm.int.abn, K=10)
cv.err.int$delta
cv.err.screen.neg <- cv.glm(data.screen.abn.neg, glm.screen.neg.abn, K=10)
cv.err.screen.neg$delta
### Calculate AUCs ###
## Regular AUCs. By default, the 95% CI are computed with 2000 stratified bootstrap replicates.
library(pROC)
# Interval cancer model
with(filter(data.interval.abn, interval==1), roc(case, post.risk.abn, ci=T, plot=T)) # T0-T1 - change interval for T1-T2, post-T2
# Next-screen model among negatives
with(filter(data.screen.abn.neg, interval==1), roc(case, post.risk.abn, ci=T, plot=T)) # T1 - change interval for T2
## Optimism-corrected AUCs - have to use logistic models for this, and have to actually add the interaction terms to the dataset.
library(rms)
data.screen.abn.neg <- mutate(data.screen.abn.neg, logit1yriskconsolidation = logit1yrisk*consolidation, logit1yriskemphysema = logit1yrisk*emphysema)
data.interval.abn <- mutate(data.interval.abn, logit1yriskadenopconsol = logit1yrisk*adenop.consol)
# Interval cancer model
mod.int <- lrm(case ~ logit1yrisk + logit1yriskadenopconsol -1, x=T, y=T, data=data.interval.abn)
set.seed(61116)
validate(mod.int, B=1000)
c(0.5*(0.5072+1), 0.5*(0.5082+1)) # AUC = 0.5(Dxy+1). Naive, optimism-corrected AUCs - 0.75 is OC-AUC
# Next-screen model among negatives
mod.ns <- lrm(case ~ logit1yrisk + logit1yriskconsolidation + logit1yriskemphysema -1, x=T, y=T, data=data.screen.abn.neg)
set.seed(61116)
validate(mod.ns, B=1000) # AUC = 0.5(Dxy+1)
c(0.5*(0.4760+1), 0.5*(0.4689+1)) # AUC = 0.5(Dxy+1). Naive, optimism-corrected AUCs - 0.73 is OC-AUC
#### Figures ####
# Effect of screen findings on risk of INTERVAL ca among screen-negatives
med.risk.interv.prescr <- median(filter(data.interval.abn, interval==1)$prescr.1yrisk, na.rm=T)
med.risk.interv.post.noac <- median(filter(data.interval.abn, interval==1 & adenop.consol==0)$post.risk.abn, na.rm=T)
med.risk.interv.post.ac <- median(filter(data.interval.abn, interval==1 & adenop.consol==1)$post.risk.abn, na.rm=T)
png(file="/Users/hrobbins827/Documents/PhD/NCI overflow/NLST/Figures/screen_neg_interval_ad_con.png",width=1200,height=850)
ggplot() +
theme(panel.background = element_blank(), panel.grid.major = element_blank(),
panel.grid.minor = element_blank(), axis.line = element_line(colour = "black"),
axis.title = element_text(size=28),
axis.text = element_text(colour="black", size=28)) +
scale_y_continuous(labels=scales::percent, limits=c(0, 0.045)) +
scale_x_continuous(breaks=NULL) +
# scale_x_continuous(breaks=c(0,1), labels = c("Negative screen", "1-year interval")) +
ylab("1-year lung cancer risk, %\n") + xlab("") +
geom_boxplot(data=subset(data.interval.abn, interval==1), aes(x=0, y=prescr.1yrisk), lwd=1, width=0.4, outlier.shape=NA) +
geom_boxplot(data=subset(data.interval.abn, interval==1 & adenop.consol==0), aes(x=0.9, y=post.risk.abn), lwd=1, width=0.98*.8, outlier.shape=NA) +
geom_boxplot(data=subset(data.interval.abn, interval==1 & adenop.consol==1), aes(x=1.1, y=post.risk.abn), lwd=1, width=0.02*.8, outlier.shape=NA) +
geom_segment(aes(x=0, y=med.risk.interv.prescr, xend=0.9, yend=med.risk.interv.post.noac), linetype="dashed", size=0.6) +
geom_segment(aes(x=0, y=med.risk.interv.prescr, xend=1.1, yend=med.risk.interv.post.ac), linetype="dashed", size=0.6) +
annotate(geom="text", x=0.6, y=0.0062, label = "Adenopathy or consolidation (2%)", angle=4, size=9) +
annotate(geom="text", x=0.45, y=0.0028, label = "Neither noted (98%)", angle=-4, size=9) +
annotate(geom="text", x=0, y=0.045, label="Pre-screening risk", size=9) +
annotate(geom="text", x=1, y=0.045, label="Risk during 1-year interval", size=9)
dev.off()
# Numbers for the text
with(data.interval.abn, CrossTable(interval, adenop.consol)) # 1.8% have adenop or consol at T0
# Some percentiles for below the figure
neg.i.psr.q <- quantile(filter(data.interval.abn, interval==1)$prescr.1yrisk, probs=c(0.25, 0.5, 0.75))
neg.i.no.q <- quantile(filter(data.interval.abn, interval==1 & adenop.consol==0)$post.risk.abn, probs=c(0.25, 0.5, 0.75))
neg.i.adcon.q <- quantile(filter(data.interval.abn, interval==1 & adenop.consol==1)$post.risk.abn, probs=c(0.25, 0.5, 0.75))
rbind(neg.i.psr.q, neg.i.no.q, neg.i.adcon.q) # print the quantiles for each group
c(neg.i.no.q[2]/neg.i.psr.q[2], neg.i.adcon.q[2]/neg.i.psr.q[2]) # median RRs for no, adenop.consol
c(neg.i.no.q[2]-neg.i.psr.q[2], neg.i.adcon.q[2]-neg.i.psr.q[2]) # median RDs for no, adenop.consol
range_without_outliers(filter(data.interval.abn, interval==1 & adenop.consol==0)$post.risk.abn) # this uses my function defined in hilary_functions.R
# Effect of screen findings on risk of SCREEN-DETECTED ca among screen negatives
# Note: I am not making a boxplot for the N=36 with emphysema and consolidation.
med.risk.screen.neg.prescr <- median(filter(data.screen.abn.neg, interval==1)$prescr.1yrisk)
med.risk.screen.neg.neither <- median(filter(data.screen.abn.neg, interval==1 & emphysema==0 & consolidation==0)$post.risk.abn)
med.risk.screen.neg.emp <- median(filter(data.screen.abn.neg, interval==1 & emphysema==1)$post.risk.abn)
med.risk.screen.neg.consol <- median(filter(data.screen.abn.neg, interval==1 & consolidation==1)$post.risk.abn)
png(file="/Users/hrobbins827/Documents/PhD/NCI overflow/NLST/Figures/screen_neg_emp_consol.png",width=1200,height=850)
ggplot() +
theme(panel.background = element_blank(), panel.grid.major = element_blank(),
panel.grid.minor = element_blank(), axis.line = element_line(colour = "black"),
axis.title = element_text(size=28),
axis.text = element_text(colour="black", size=28)) +
scale_y_continuous(labels=scales::percent, limits=c(0, 0.045)) +
scale_x_continuous(breaks=NULL) +
# scale_x_continuous(breaks=c(0,1), labels = c("Negative screen", "Screen")) + # remove labels for manuscript
ylab("1-year lung cancer risk, %\n") + xlab("") +
geom_boxplot(data=subset(data.screen.abn.neg, interval==1), aes(x=0, y=prescr.1yrisk), lwd=1, width=0.4, outlier.shape=NA) +
geom_boxplot(data=subset(data.screen.abn.neg, interval==1 & emphysema==0 & consolidation==0), aes(x=0.8, y=post.risk.abn), lwd=1, width=0.69*1.2, outlier.shape=NA) +
geom_boxplot(data=subset(data.screen.abn.neg, interval==1 & emphysema==1), aes(x=1, y=post.risk.abn), lwd=1, width=.3*1.2, outlier.shape=NA) +
geom_boxplot(data=subset(data.screen.abn.neg, interval==1 & consolidation==1), aes(x=1.2, y=post.risk.abn), lwd=1, width=.01*1.2, outlier.shape=NA) +
geom_segment(aes(x=0, y=med.risk.screen.neg.prescr, xend=0.8, yend=med.risk.screen.neg.neither), linetype="dashed", size=0.6) +
geom_segment(aes(x=0, y=med.risk.screen.neg.prescr, xend=1.0, yend=med.risk.screen.neg.emp), linetype="dashed", size=0.6) +
geom_segment(aes(x=0, y=med.risk.screen.neg.prescr, xend=1.2, yend=med.risk.screen.neg.consol-0.0019), linetype="dashed", size=0.6) +
annotate(geom="text", x=0.3, y=0.0003, label = "Neither (70%)", angle=-4, size=9) +
annotate(geom="text", x=0.5, y=0.0052, label = "Emphysema (30%)", angle=2, size=9) +
annotate(geom="text", x=0.65, y=0.0105, label = "Consolidation (0.6%)", angle=12, size=9) +
annotate(geom="text", x=0, y=0.045, label="Pre-screening risk", size=9) +
annotate(geom="text", x=1, y=0.045, label="Risk at next screen", size=9)
dev.off()
# Numbers for the text: Among T0-negatives: prevalence of self-reported emphysema (7%), CT emphysema (30%), and consolidation (0.6%)
with(filter(data.screen.abn.neg, interval==1), CrossTable(emp))
with(filter(data.screen.abn.neg, interval==1), CrossTable(emphysema))
with(filter(data.screen.abn.neg, interval==1), CrossTable(consolidation))
with(filter(data.screen.abn.neg, interval==1), CrossTable(emphysema, consolidation))
with(data.screen.abn.neg, CrossTable(interval, I(emphysema==0 & consolidation==0))) # no emphysema NOR consolidation (70% at T0)
# Thus .738 *.696 = 51% of participants would be screen-negative without emp or consol.
# Some percentiles for below the figure
quantile(filter(data.screen.abn.neg, interval==1)$post.risk.abn, probs=c(0.25, 0.5, 0.75, 0.8, 0.85, 0.9)) # overall quantiles among all negatives
neg.s.psr.q <- quantile(filter(data.screen.abn.neg, interval==1)$prescr.1yrisk, probs=c(0.25, 0.5, 0.75))
neg.s.no.no.q <- quantile(filter(data.screen.abn.neg, interval==1 & emphysema==0 & consolidation==0)$post.risk.abn, probs=c(0.25, 0.5, 0.75))
neg.s.emp.q <- quantile(filter(data.screen.abn.neg, interval==1 & emphysema==1)$post.risk.abn, probs=c(0.25, 0.5, 0.75))
neg.s.consol.q <- quantile(filter(data.screen.abn.neg, interval==1 & consolidation==1)$post.risk.abn, probs=c(0.25, 0.5, 0.75))
rbind(neg.s.psr.q, neg.s.no.no.q, neg.s.emp.q, neg.s.consol.q) # print the quantiles for each group
rbind(neg.s.no.no.q[2]/neg.s.psr.q[2], neg.s.emp.q[2]/neg.s.psr.q[2], neg.s.consol.q[2]/neg.s.psr.q[2]) # median RRs for no-no, emp, consol
rbind(neg.s.no.no.q[2]-neg.s.psr.q[2], neg.s.emp.q[2]-neg.s.psr.q[2], neg.s.consol.q[2]-neg.s.psr.q[2]) # median RDs for no-no, emp, consol
# Potential risk THRESHOLDS for longer interval after negative screen. Use T0-negatives and cases at T1
# Note: 64 cancers at T1; only 30 among the 70% with no emp or consol.
# Make a dataset for impact of different risk thresholds among all screen-negatives
num_T0_neg <- nrow(filter(data.screen.abn.neg, interval==1)) # change to interval==2 to look at T2
num_T0_all <- nrow(filter(data.screen.abn, interval==1)) # change to interval==2 to look at T2
cases_T1 <- sum(filter(data.screen.abn.neg, interval==1)$case) # change to interval==2 to look at T2
n <- perc_negs <- perc_all <- ca_N <- perc_of_ca <- perc_w_ca <- perc_w_emp <- perc_w_consol <- perc_w_emp_or_consol <- vector()
thresholds <- seq(0,0.1,0.0001)
for (i in seq_along(thresholds)) {
dat <- filter(data.screen.abn.neg, interval==1 & post.risk.abn<=thresholds[i]) # change to interval==2 to look at T2
n[i] <- nrow(dat)
perc_negs[i] <- 100*nrow(dat)/num_T0_neg # percent of all negatives below threshold
perc_all[i] <- 100*nrow(dat)/num_T0_all # percent of all individuals below threshold
ca_N[i] <- sum(dat$case) # number with cancer below threshold
perc_of_ca[i] <- 100*sum(dat$case)/cases_T1 # percent of cancers falling below threshold
perc_w_ca[i] <- 100*sum(dat$case)/nrow(dat) # percent of individuals below threshold who have cancer
perc_w_emp[i] <- 100*sum(dat$emphysema)/nrow(dat) # percent of individuals below threshold who have CT-emphysema
perc_w_consol[i] <- 100*sum(dat$consolidation)/nrow(dat) # percent of individuals below threshold who have CT-consolidation
perc_w_emp_or_consol[i] <- 100*sum(I(dat$emphysema==1 | dat$consolidation==1))/nrow(dat) # percent of individuals below threshold who have CT-emphysema or consolidation
}
thres.plot.all <- as.data.frame(cbind(threshold=100*thresholds, n, perc_negs, perc_all, ca_N, perc_of_ca, perc_w_ca, perc_w_emp, perc_w_consol, perc_w_emp_or_consol))
# Plot this
thres.of.interest <- c(0.1, 0.15, 0.2, 0.3, 0.4, 0.5, 0.6, 0.8)
png(file="/Users/hrobbins827/Documents/PhD/NCI overflow/NLST/Figures/negatives_threshold.png",width=1200,height=850)
ggplot() + geom_line(data=thres.plot.all, aes(x=perc_negs, y=perc_of_ca), size=0.8) +
theme(panel.background = element_rect(fill=NA), panel.grid.major=element_line(colour="grey88"), panel.grid.minor=element_line(colour="grey88"),
axis.line = element_line(colour="black"), axis.title = element_text(size=28), axis.text = element_text(colour="black", size=28)) +
ylab("% of detectable next-screen cancers with delayed diagnosis") + xlab("\n% of screen-negatives with longer-than-annual interval") +
geom_point(data=subset(thres.plot.all, threshold %in% thres.of.interest), aes(x=perc_negs, y=perc_of_ca), size=4.5) +
geom_text(data=subset(thres.plot.all, threshold %in% thres.of.interest[1:5]), aes(x=perc_negs, y=perc_of_ca, label=paste("r \u2264", as.character(threshold), "%", sep="")), size=9, vjust=-1, hjust=0.8) +
geom_text(data=subset(thres.plot.all, threshold %in% thres.of.interest[6:8]), aes(x=perc_negs, y=perc_of_ca, label=paste("r \u2264", as.character(threshold), "%", sep="")), size=9, hjust=-0.25)
dev.off()
# Print some numbers to highlight in the text
filter(thres.plot.all, threshold %in% thres.of.interest)
|
/R/old/analysis_nlst_split_v24_negatives_only_Hilaryannotation.R
|
no_license
|
marskar/nlst
|
R
| false
| false
| 45,591
|
r
|
#### Data loading and setup ####
rm(list=ls(all=TRUE)) #Clears console
options(digits=7)
setwd("/Users/hrobbins827/Documents/PhD/NCI overflow/NLST/Analysis")
# Read in my personal defined functions and the Kovalchik prediction function
source("/Users/hrobbins827/Documents/PhD/Textbooks Resources/R functions/hilary_functions.R")
source("kovalchik.R")
# Read in library functions including Stephanie's coxph.risk which I installed from the local tar.gz file
# install coxph.risk tar file
# install lcmodels package
packages <- c("dplyr","ggplot2","survival","gmodels","coxph.risk","geepack","MESS","psych","Hmisc","glmnet","boot")
lapply(packages, require, c = T)
# Load PLCO and NLST data sets. New versions provided by Li Cheung on 11 July 2016, 14 July 2016, 20 July 2016, 8 Aug 2016.
load("hilary.RData") # Load NLST and PLCO data
# In the PLCO dataset, impute missing family history values to 0
plco$fam.lung.trend <- ifelse(is.na(plco$fam.lung.trend), 0, plco$fam.lung.trend)
plco.control <- subset(plco, control.group==1) # control arm of PLCO who had no chest xray
# Remove people with <30 pack-years and age<55 or age>74 from NLST
nlst <- filter(nlst, pkyears.cat!="[0,30)" & age>=55 & age<=74)
# Make a new pack-years variable to get rid of the [0,30) level
nlst <- mutate(nlst, pkyears.cat.clone=ifelse(pkyears.cat=="[30,40)", "[30,40)", ifelse(pkyears.cat=="[40,50)", "[40,50)",
ifelse(pkyears.cat=="[50,Inf]", "[50,Inf]", NA)))) # I checked this with a table
nlst$pkyears.cat <- as.factor(nlst$pkyears.cat.clone)
# Make a variable for days to diagnosis
nlst$days_to_dx <- ifelse(nlst$case==1, 365*nlst$incidence.years, NA)
# Make a subset of NLST data with the LCRAT variables that we will need later to merge back with at-risk datasets
varlist <- c("female","race","edu6","fam.lung.trend","emp","bmi","cpd","pkyears.cat","age","qtyears","smkyears")
nlst.sub <- as.data.frame(cbind(nlst[,varlist], pid=nlst$pid, lss=as.numeric(nlst$lss)))
# To later calculate pre-screening risk, we must first fit the incidence model and other-cause death models in PLCO.
LCRAT <- coxph(Surv(incidence.years, case) ~
female+race+edu6+fam.lung.trend+emp+I(bmi<=18.5)+I(cpd>20)+as.factor(pkyears.cat)+
I(log(age))+I(log(bmi))+I(log(qtyears+1))+smkyears,data = plco.control)
cox.death <- coxph(Surv(years.followed, other.cause.death) ~
female+race+edu6+emp+I(bmi <= 18.5)+I(cpd>20)+as.factor(pkyears.cat)+I((age)^2)+I((bmi-25)^2)+
I(log(qtyears+1))+smkyears, data = plco.control)
# Subset to CT arm in NLST and make a pos/neg variable for the first, second, and third screens
nlst$T0posneg <- ifelse(nlst$truefalse_scrnres_ly0 %in% c(4,5,6), 0, NA)
nlst$T0posneg <- ifelse(nlst$truefalse_scrnres_ly0 %in% c(1,2,3), 1, nlst$T0posneg)
nlst$T1posneg <- ifelse(nlst$truefalse_scrnres_ly1 %in% c(4,5,6), 0, NA)
nlst$T1posneg <- ifelse(nlst$truefalse_scrnres_ly1 %in% c(1,2,3), 1, nlst$T1posneg)
nlst$T2posneg <- ifelse(nlst$truefalse_scrnres_ly2 %in% c(4,5,6), 0, NA)
nlst$T2posneg <- ifelse(nlst$truefalse_scrnres_ly2 %in% c(1,2,3), 1, nlst$T2posneg)
nlst$prescr.1yrisk.T0 <- risk.kovalchik(0, 1, nlst, LCRAT, cox.death) # add 1y risk to NLST dataset for descriptive stats
# Subset to CT arm and create screening history variables
nlst.CT <- subset(nlst, screen_group=="CT")
nlst.CT <- mutate(nlst.CT, hist.T0.T1 = 1*(T0posneg==0 & T1posneg==0) + 2*(T0posneg==0 & T1posneg==1) + 3*(T0posneg==1 & T1posneg==0) + 4*(T0posneg==1 & T1posneg==1))
nlst.CT$hist.T0.T1 <- factor(nlst.CT$hist.T0.T1, levels=c(1,2,3,4), labels=c("Neg-Neg","Neg-Pos","Pos-Neg","Pos-Pos"))
nlst.CT <- mutate(nlst.CT, hist.T1.T2 = 1*(T1posneg==0 & T2posneg==0) + 2*(T1posneg==0 & T2posneg==1) + 3*(T1posneg==1 & T2posneg==0) + 4*(T1posneg==1 & T2posneg==1))
nlst.CT$hist.T1.T2 <- factor(nlst.CT$hist.T1.T2, levels=c(1,2,3,4), labels=c("Neg-Neg","Neg-Pos","Pos-Neg","Pos-Pos"))
### Create datasets for risk from T0 to T1. One for interval, one for screen-detected.
## .neg intended for analysis of interval cancer risk; .scrisk intended for analysis of screen-detected cancers
# 0 inadeq, 1 true-pos, 2 poss true-pos, 3 false-pos, 4 true-neg, 5 poss false-neg, 6 false-neg
# At risk in interval: T0 negatives only. Case status: false-negative at T0.
nlst.CT.T1.neg <- filter(nlst.CT, truefalse_scrnres_ly0 %in% c(4,5,6))
nlst.CT.T1.neg$case_T1_interval <- ifelse(nlst.CT.T1.neg$truefalse_scrnres_ly0==6, 1, 0)
# At risk for screen-detected at T1: either false-positive or true-negative at T0, and did not have any of the following at T1:
# inadequate image, left study, refused, wrong screen, erroneous report of LC, form not submitted (no missing values of scr_res0).
# Case status: case=1 AND either of (true-pos at T1 or T1 is coded as "not expected: cancer/death in screening window")
nlst.CT.T1.scrisk <- filter(nlst.CT, truefalse_scrnres_ly0 %in% c(2,3,4,5) & scr_res1 %!in% c(10,11,15,17,95,97))
nlst.CT.T1.scrisk$case_T1_screen <- ifelse(nlst.CT.T1.scrisk$case==1 &
(nlst.CT.T1.scrisk$truefalse_scrnres_ly1==1 | nlst.CT.T1.scrisk$scr_res1 %in% c(23,24)), 1, 0)
### Create datasets for risk from T1 to T2. One for interval, one for screen-detected.
# At risk in interval: T1 negatives only. Case status: false-negative at T1.
nlst.CT.T2.neg <- filter(nlst.CT, truefalse_scrnres_ly1 %in% c(4,5,6))
nlst.CT.T2.neg$case_T2_interval <- ifelse(nlst.CT.T2.neg$truefalse_scrnres_ly1==6, 1, 0)
# At risk for screen-detected at T2: either false-positive or true-negative at T1, and did not have any of the following at T2:
# inadequate image, left study, refused, wrong screen, erroneous report of LC, form not submitted (no missing values of scr_res0).
# Case status: case=1 AND either of (true-pos at T2 or T2 is coded as "not expected: cancer/death in screening window")
nlst.CT.T2.scrisk <- filter(nlst.CT, truefalse_scrnres_ly1 %in% c(2,3,4,5) & scr_res2 %!in% c(10,11,15,17,95,97))
nlst.CT.T2.scrisk$case_T2_screen <- ifelse(nlst.CT.T2.scrisk$case==1 &
(nlst.CT.T2.scrisk$truefalse_scrnres_ly2==1 | nlst.CT.T2.scrisk$scr_res2 %in% c(23,24)), 1, 0)
### Create a dataset for risk during "interval" after T2 (within 1 year)
nlst.CT.T3.neg <- filter(nlst.CT, truefalse_scrnres_ly2 %in% c(4,5,6))
nlst.CT.T3.neg$case_T3_interval <- ifelse(nlst.CT.T3.neg$truefalse_scrnres_ly2==6, 1, 0)
### Construct dataset to model risk of ALL interval cancers across all 3 screens
# Interval variable in data.interval datasets is 1 for T0-T1, 2 for T1-T2, and 3 for post-T2 intervals
data.interval <- data.frame(pid=c(nlst.CT.T1.neg$pid, nlst.CT.T2.neg$pid, nlst.CT.T3.neg$pid),
case=c(nlst.CT.T1.neg$case_T1_interval, nlst.CT.T2.neg$case_T2_interval, nlst.CT.T3.neg$case_T3_interval),
interval=c(rep(1,times=nrow(nlst.CT.T1.neg)), rep(2, times=nrow(nlst.CT.T2.neg)), rep(3, times=nrow(nlst.CT.T3.neg))))
# Merge this back with covariates from NLST. Add screening history variable.
data.interval <- merge(data.interval, nlst.sub, by="pid", all.x=T)
data.interval <- merge(data.interval, select(nlst.CT, pid, hist.T0.T1, hist.T1.T2), by="pid", all.x=T, all.y=F) ## delete??
data.interval$screen.hist <- ifelse(data.interval$interval==2, data.interval$hist.T0.T1, ifelse(data.interval$interval==3, data.interval$hist.T1.T2, NA))
data.interval$screen.hist <- factor(data.interval$screen.hist, levels=c(1,2,3,4), labels=c("Neg-Neg","Neg-Pos","Pos-Neg","Pos-Pos"))
data.interval <- select(data.interval, -c(hist.T0.T1,hist.T1.T2)) ## delete??
data.interval <- arrange(data.interval, pid, interval)
# Update age, quit-years, and smoke-years by adding a year for T1 and T2
data.interval <- mutate(data.interval, age=ifelse(interval==2, age+1, ifelse(interval==3, age+2, age)),
smkyears=ifelse(interval==2 & qtyears==0, smkyears+1, ifelse(interval==3 & qtyears==0, smkyears+2, smkyears)),
qtyears=ifelse(interval==2 & qtyears>0, qtyears+1, ifelse(interval==3 & qtyears>0, qtyears+2, qtyears)))
data.interval <- mutate(data.interval, pkyears.cont=cpd*smkyears/20) # using new smoke-years, update pack-years, then re-categorize
data.interval <- mutate(data.interval, pkyears.cat=as.factor(ifelse(pkyears.cont>=30 & pkyears.cont<40, "[30,40)",
ifelse(pkyears.cont>=40 & pkyears.cont<50, "[40,50)", ifelse(pkyears.cont>=50 & pkyears.cont<999,"[50,Inf]",NA)))))
# Construct dataset to model risk of ALL screen-detected cancers (at T1 and T2)
# Interval variable in data.screen datasets is 1 for risk at T1 and 2 for risk at T2
data.screen <- data.frame(pid=c(nlst.CT.T1.scrisk$pid, nlst.CT.T2.scrisk$pid),
case=c(nlst.CT.T1.scrisk$case_T1_screen, nlst.CT.T2.scrisk$case_T2_screen),
screen.result=c(nlst.CT.T1.scrisk$T0posneg, nlst.CT.T2.scrisk$T1posneg),
interval=c(rep(1,times=nrow(nlst.CT.T1.scrisk)), rep(2, times=nrow(nlst.CT.T2.scrisk))))
# Merge this back with covariates from NLST
data.screen <- merge(data.screen, nlst.sub, by="pid", all.x=T)
# Add a variable for lagged screen result & a 6-level variable for all combinations
data.screen <- data.screen %>% group_by(pid) %>% mutate(lag.screen = lag(screen.result, order_by=interval))
data.screen <- mutate(data.screen, screen.comb = 1*(interval==1 & screen.result==0) +
2*(interval==1 & screen.result==1) + 3*(interval==2 & lag.screen==0 & screen.result==0) +
4*(interval==2 & lag.screen==0 & screen.result==1) + 5*(interval==2 & lag.screen==1 & screen.result==0) +
6*(interval==2 & lag.screen==1 & screen.result==1))
data.screen$screen.comb <- factor(data.screen$screen.comb, levels = c(1,2,3,4,5,6),
labels = c("Neg","Pos","Neg-Neg","Neg-Pos","Pos-Neg","Pos-Pos"))
# Update age, quit-years, and smoke-years by adding a year for T1
data.screen <- mutate(data.screen, age=as.numeric(age), smkyears=as.numeric(smkyears), qtyears=as.numeric(qtyears))
data.screen <- mutate(data.screen, age=ifelse(interval==2, age+1, age),
smkyears=ifelse(interval==2 & qtyears==0, smkyears+1, smkyears),
qtyears=ifelse(interval==2 & qtyears>0, qtyears+1, qtyears))
data.screen <- mutate(data.screen, pkyears.cont=cpd*smkyears/20) # using new smoke-years, update pack-years, then re-categorize
data.screen <- mutate(data.screen, pkyears.cat=as.factor(ifelse(pkyears.cont>=30 & pkyears.cont<40, "[30,40)",
ifelse(pkyears.cont>=40 & pkyears.cont<50, "[40,50)", ifelse(pkyears.cont>=50 & pkyears.cont<999,"[50,Inf]",NA)))))
# Load abnormalities data (person-screen level) and merge with data.screen
# This dataset was generated by the program prepare_abn_data_vX.R
load("/Users/hrobbins827/Documents/PhD/NCI overflow/NLST/Analysis/Nodule data/abn.spl.20160810.rdata")
data.screen.abn <- merge(data.screen, abn.pl.all, by=c("pid","interval"), all.x=T, all.y=F)
# Replace NAs with 0 (not present) for appropriate variables
replacevars <- names(abn.pl.all)[!names(abn.pl.all) %in% c("pid","interval","LRcat","LRcatcol.neg","LRcatcol.pos")]
data.screen.abn[replacevars][is.na(data.screen.abn[replacevars])] <- 0
# Make a variable for including observations in Lung-RADS analysis
data.screen.abn$LR.include <- (data.screen.abn$LRcat %in% c("1","2","3","4A","4B","4X"))
# Merge abnormalities data (person-screen level) with data.interval
data.interval.abn <- merge(data.interval, abn.pl.all, by=c("pid","interval"), all.x=T, all.y=F)
data.interval.abn[replacevars][is.na(data.interval.abn[replacevars])] <- 0
# Will need this vector for exploratory analysis of abnormalities (24 Jan 2017 - also create interaction vectors)
# abnlist.neg contains a list of abnormalities variables that are relevant to negative CTs. abnlist.pos is the list for positive CTs.
abnlist <- abnlist.pos <- names(abn.pl.all)[3:32]
abnlist.neg <- abnlist[c(4,5,6,7,8,9,10,11,12,13,14)]
abnlist.neg.int <- lapply(abnlist.neg, function(x) {substitute(logit1yrisk:i, list(i=as.name(x)))}) # these lists create interaction terms
abnlist.pos.int <- lapply(abnlist.pos, function(x) {substitute(logit1yrisk:i, list(i=as.name(x)))})
# Create variable for log(diameter)
data.screen.abn$log.diam <- log(data.screen.abn$longest.diam+1)
# Calculate pre-screening risk inside this dataset
data.interval.abn$prescr.1yrisk <- risk.kovalchik(0, 1, data.interval, LCRAT, cox.death)
data.interval.abn <- mutate(data.interval.abn, log1yrisk=log(prescr.1yrisk), logit1yrisk=log(prescr.1yrisk/(1-prescr.1yrisk)))
data.screen.abn$prescr.1yrisk <- risk.kovalchik(0, 1, data.screen.abn, LCRAT, cox.death)
data.screen.abn <- mutate(data.screen.abn, log1yrisk=log(prescr.1yrisk), logit1yrisk=log(prescr.1yrisk/(1-prescr.1yrisk)))
# These datasets are needed to separately model screen-detected cancers incorporating abnormalities for negatives and false-positives
data.screen.abn.neg <- filter(data.screen.abn, screen.result==0)
data.screen.abn.pos <- filter(data.screen.abn, screen.result==1)
# Make a categorical variable for diameter
data.screen.abn.pos <- mutate(data.screen.abn.pos, diam.cat = 1*(longest.diam==0)+2*(longest.diam>0 & longest.diam<=5)+
3*(longest.diam>5 & longest.diam<=7) + 4*(longest.diam>7 & longest.diam<=10) +
5*(longest.diam>10 & longest.diam<=13) + 6*(longest.diam>13 & longest.diam<100))
data.screen.abn.pos$diam.cat <- factor(data.screen.abn.pos$diam.cat, levels=c(1:6),labels=c("0","4-5","6-7","8-10","11-13","14+"))
# Create a variable for any.growth that reflects a group in which growth can't be assessed (i.e. screen=T0)
data.screen.abn.pos <- mutate(data.screen.abn.pos, growth.3l =
1*(interval==1) + 2*(interval==2 & any.growth==0) + 3*(interval==2 & any.growth==1))
data.screen.abn.pos$growth.3l <- factor(data.screen.abn.pos$growth.3l, levels = c(1,2,3), labels=c("NA","No","Yes"))
# Make dataset of unique individuals for descriptive table of screen-negatives
all.subj.neg <- filter(nlst.CT, pid %in% data.interval.abn$pid | pid %in% data.screen.abn.neg$pid)
all.subj.neg <- mutate(all.subj.neg, age.cat=as.factor(ifelse(age>=55 & age<60, "55-59", ifelse(age>=60 & age<65, "60-64",
ifelse(age>=65 & age<70, "65-69", ifelse(age>=70 & age<75, "70-74", NA))))),
qtyears.cat=as.factor(ifelse(qtyears==0, "Current smoker", ifelse(qtyears>0 & qtyears<=5, "1-5",
ifelse(qtyears>5 & qtyears<=10, "6-10", ifelse(qtyears>10 & qtyears<99, "11 or more", NA))))),
bmi.cat=as.factor(ifelse(bmi>0 & bmi<18.5, "Underweight", ifelse(bmi>=18.5 & bmi<25, "Normal",
ifelse(bmi>=25 & bmi<30, "Overweight", ifelse(bmi>=30, "Obese", NA))))),
cpd.cat=as.factor(ifelse(cpd>0 & cpd<20, "<20", ifelse(cpd>=20 & cpd<30, "20-29",
ifelse(cpd>=30 & cpd<40, "30-39", ifelse(cpd>=40 & cpd<99, "40+", NA))))),
smkyears.cat=as.factor(ifelse(smkyears>0 & smkyears<30, "<30", ifelse(smkyears>=30 & smkyears<40, "30-39",
ifelse(smkyears>=40 & smkyears<50, "40-49", ifelse(smkyears>=50 & smkyears<99, "50+", NA))))))
# Run the main models
# Overall effects (without specific CT features)
glm.interval <- glm(case ~ log1yrisk -1, data=data.interval.abn, family=binomial(link='log'))
data.interval.abn$post.risk.interv <- fitted.values(glm.interval)
glm.screen.neg <- glm(case ~ log1yrisk -1, data=data.screen.abn.neg, family=binomial(link='log'))
data.screen.abn.neg$post.risk.neg.overall <- fitted.values(glm.screen.neg)
# With specific CT findings
glm.int.abn <- glm(case ~ log1yrisk + log1yrisk:adenop.consol -1, data=data.interval.abn, family=binomial(link='log'))
data.interval.abn$post.risk.abn <- fitted.values(glm.int.abn)
glm.screen.neg.abn <- glm(case ~ log1yrisk + log1yrisk:consolidation + log1yrisk:emphysema -1, data=data.screen.abn.neg, family=binomial(link='log'), na.action=na.exclude)
data.screen.abn.neg$post.risk.abn <- fitted.values(glm.screen.neg.abn)
# --------------------------- run code to this line for data setup ----------------------------- #
#### Descriptive stats ####
# Descriptive table 1 for analysis after NEGATIVE screen
nrow(all.subj.neg) # number of unique individuals
length(unique(filter(nlst.CT, T0posneg==0 | T1posneg==0 | T2posneg==0)$pid)) # confirm - this is the same # with at least one negative screen
length(unique(data.interval$pid)) # number of unique individuals in interval cancers analysis
CrossTable((data.interval %>% group_by(pid) %>% summarise(times.in.interv.analysis=n()))$times.in.interv.analysis) # times included in interval ca analysis
length(unique(data.screen.abn.neg$pid)) # number of unique individuals in next-screen analysis
CrossTable((data.screen.abn.neg %>% group_by(pid) %>% summarise(times.in.screen.analysis=n()))$times.in.screen.analysis) # times included in next-screen analysis
CrossTable(all.subj.neg$female, missing.include=T)
CrossTable(all.subj.neg$age.cat, missing.include=T)
CrossTable(all.subj.neg$race, missing.include=T) # 0 white, 1 black, 2 hispanic, 3 other
CrossTable(all.subj.neg$edu6, missing.include=T) # see codebook
CrossTable(all.subj.neg$bmi.cat, missing.include=T)
CrossTable(all.subj.neg$fam.lung.trend, missing.include=T) # none, 1, 2+
CrossTable(all.subj.neg$qtyears.cat, missing.include=T)
CrossTable(all.subj.neg$pkyears.cat, missing.include=T)
CrossTable(all.subj.neg$smkyears.cat, missing.include=T)
CrossTable(all.subj.neg$cpd.cat, missing.include=T)
CrossTable(all.subj.neg$emp, missing.include=T)
quantile(all.subj.neg$prescr.1yrisk.T0, probs=c(0.25, 0.5, 0.75)) # median IQR of pre-screening risk
# Other numbers
with(filter(data.screen.abn.neg, interval==2), CrossTable(screen.comb, case)) # numbers considered for Markov assumption test
c(sum(data.interval.abn$case), nrow(data.interval.abn), sum(data.interval.abn$case)/nrow(data.interval.abn)) # interval cancers: cases, # at risk, overall risk
c(sum(data.screen.abn.neg$case), nrow(data.screen.abn.neg), sum(data.screen.abn.neg$case)/nrow(data.screen.abn.neg)) # next-screen cancers after negative: cases, # at risk, overall risk
range_without_outliers(all.subj.neg$prescr.1yrisk)
#### Model development: Interval cancer among NEGATIVES ####
# Interval cancers: overall model (no abnormalities) - properties of screening
# Confirm that pre-screening risk improves the model
int.nopsr <- glm(case ~ 1, data=data.interval.abn, family=binomial(link='log'))
summary(int.nopsr)
int.psr <- glm(case ~ log1yrisk+1, data=data.interval.abn, family=binomial(link='log'))
summary(int.psr)
1-pchisq(int.nopsr$deviance - int.psr$deviance, length(int.psr$coefficients)-length(int.nopsr$coefficients)) # LRT
# Overall model results
# glm.interval.abn <- glm(case ~ log1yrisk -1, data=data.interval.abn, family=binomial(link='log')) # run above in setup
# data.interval.abn$post.risk.interv <- fitted.values(glm.interval) # run above in setup
summary(glm.interval)
confint(glm.interval)
# Does the risk coefficient differ by interval? No (LRT p=0.23). Steps below: fit model, estimate 3 exponents, get p-value, get counts
glm.interval.intervals <- glm(case ~ log1yrisk + log1yrisk:I(as.numeric(interval==2)) + log1yrisk:I(as.numeric(interval==3)) -1, data=data.interval.abn, family=binomial(link='log'))
c(coefficients(glm.interval.intervals)[1], coefficients(glm.interval.intervals)[1]+coefficients(glm.interval.intervals)[2], coefficients(glm.interval.intervals)[1]+coefficients(glm.interval.intervals)[3])
1-pchisq(glm.interval$deviance - glm.interval.intervals$deviance, length(glm.interval.intervals$coefficients)-length(glm.interval$coefficients))
with(data.interval.abn, table(interval))
# Do previous screens matter? No (LRT p=0.99)
glm.int.2levels <- glm(case ~ log1yrisk:as.factor(screen.hist) -1, data=filter(data.interval.abn, interval %in% c(2,3)), family=binomial(link='log'))
summary(glm.int.2levels)
confint(glm.int.2levels)
glm.int.1level <- glm(case ~ log1yrisk -1, data=filter(data.interval.abn, interval %in% c(2,3) & !is.na(screen.hist)), family=binomial(link='log'))
summary(glm.int.1level)
1-pchisq(glm.int.1level$deviance-glm.int.2levels$deviance, df=length(glm.int.2levels$coefficients-length(glm.int.1level$coefficients)))
# Interval cancers: effects of abnormalities
# Following a negative screen, the relevant CT features are in abnlist.neg. The relevant p-value is for the interaction (i.e. risk differs between the 0 and 1 levels)
# Backwards stepwise selection: selects other.above, benign.nodule, consolidation, adenopathy
int.full <- glm(paste("case ~ logit1yrisk -1 +",paste(abnlist.neg.int, collapse="+"),sep=""), data=data.interval.abn, family=binomial(link='logit'))
bsw.int <- step(int.full, direction="backward", scope = list(lower = case ~ logit1yrisk -1, upper = int.full))
# Look at a model including these 4 effects
summary(glm(case ~ log1yrisk + log1yrisk:other.above + log1yrisk:benign.nodule + log1yrisk:consolidation + log1yrisk:adenopathy -1, data=data.interval.abn, family=binomial(link='log')))
# Lasso using intermediate lambda: selects adenopathy and consolidation
set.seed(61116)
x <- model.matrix(case ~ logit1yrisk -1 + logit1yrisk:., data = data.interval.abn[,c("case","logit1yrisk",abnlist.neg)])
cv.lasso <- cv.glmnet(x, data.interval.abn$case, alpha=1, family="binomial")
out <- glmnet(x, data.interval.abn$case, alpha=1, family="binomial")
predict(out, type="coefficients", s=(cv.lasso$lambda.min+cv.lasso$lambda.1se)/2)
# Look at a model including these two effects
summary(glm(case ~ log1yrisk + log1yrisk:consolidation + log1yrisk:adenopathy -1, data=data.interval.abn, family=binomial(link='log')))
# Based on discussion with Chris: include adenopathy and consolidation. Model as 1 variable (effect size is the same)
# Switch back to log scale for final model for interpretability (these models are run above in data setup section)
# glm.int.abn.log <- glm(case ~ log1yrisk + log1yrisk:adenop.consol -1, data=data.interval.abn, family=binomial(link='log'))
# data.interval.abn$post.risk.abn <- fitted.values(glm.int.abn.log)
summary(glm.int.abn)
# Get estimate and CIs for the exponents
mat <- c(1,0) # Use this matrix for "Neither noted"
mat <- c(1,1) # Use this matrix for adenopathy or consolidation
stder <- sqrt(c(t(mat) %*% vcov(glm.int.abn) %*% mat))
c(coefficients(glm.int.abn) %*% mat, (coefficients(glm.int.abn) %*% mat)-1.96*stder, (coefficients(glm.int.abn) %*% mat)+1.96*stder)
# Check for residual effects of LCRAT variables using likelihood ratio tests - the LRT for emp is 0.02, but the Wald is 0.06. We will say p>0.05.
titles <- c("var","null model # param", "extended model # param", "LRT p-value", "check same # obs")
mat.out.interv <- matrix(rep(NA),nrow=length(varlist),ncol=length(titles))
for (x in seq_along(varlist)) {
mod.without <- glm(case ~ logit1yrisk + logit1yrisk:adenop.consol -1, data=data.interval.abn, family=binomial(link='logit'))
mod.with <- glm(substitute(case ~ logit1yrisk + logit1yrisk:adenop.consol + logit1yrisk:i -1, list(i=as.name(varlist[x]))), data=data.interval.abn, family=binomial(link='logit'))
print(summary(mod.with))
mat.out.interv[x,] <- c(varlist[x], length(mod.without$coefficients), sum(!is.na(mod.with$coefficients)), 1-pchisq(mod.without$deviance-mod.with$deviance, df=sum(!is.na(mod.with$coefficients))-length(mod.without$coefficients)), I(length(mod.without$residuals)==length(mod.with$residuals)))
}
rbind(titles, mat.out.interv)
#### Model development: Next-screen cancer among NEGATIVES ####
# Overall model for next-screen cancer among negatives (no abnormalities) - properties of screening
# Confirm that pre-screening risk improves the model
ns.nopsr <- glm(case ~ 1, data=data.screen.abn.neg, family=binomial(link='log'))
summary(ns.nopsr)
ns.psr <- glm(case ~ log1yrisk+1, data=data.screen.abn.neg, family=binomial(link='log'))
summary(ns.psr)
1-pchisq(ns.nopsr$deviance - ns.psr$deviance, length(ns.psr$coefficients)-length(ns.nopsr$coefficients))
# Overall model results
# glm.screen.neg <- glm(case ~ log1yrisk -1, data=data.screen.abn.neg, family=binomial(link='log')) # this is run above the line
# data.screen.abn.neg$post.risk.neg.overall <- fitted.values(glm.screen.neg)
summary(glm.screen.neg)
confint(glm.screen.neg)
# Does the interval matter? no (p=0.38)
glm.screen.neg.by.int <- glm(case ~ log1yrisk:as.factor(interval) -1, data=data.screen.abn.neg, family=binomial(link='log'))
summary(glm.screen.neg.by.int)
1-pchisq(glm.screen.neg$deviance - glm.screen.neg.by.int$deviance, length(glm.screen.neg.by.int$coefficients) - length(glm.screen.neg$coefficients))
with(data.screen.abn.neg, table(interval))
# Do previous screens matter? no (p=0.26)
glm.screen.neg.2levels <- glm(case ~ log1yrisk:screen.comb -1, data=filter(data.screen.abn.neg, interval==2 & !is.na(screen.comb)), family=binomial(link='log'))
summary(glm.screen.neg.2levels)
confint(glm.screen.neg.2levels)
glm.screen.neg.1level <- glm(case ~ log1yrisk -1, data=filter(data.screen.abn.neg, interval==2 & !is.na(screen.comb)), family=binomial(link='log'))
summary(glm.screen.neg.1level)
1-pchisq(glm.screen.neg.1level$deviance - glm.screen.neg.2levels$deviance, length(glm.screen.neg.2levels$coefficients) - length(glm.screen.neg.1level$coefficients))
# Do previous screens matter if we ignore pre-screening risk? p=0.14
glm.screen.neg.2levels.nopsr <- glm(case ~ screen.comb -1, data=filter(data.screen.abn.neg, interval==2 & !is.na(screen.comb)), family=binomial(link='log'))
summary(glm.screen.neg.2levels.nopsr)
glm.screen.neg.1level.nopsr <- glm(case ~ 1, data=filter(data.screen.abn.neg, interval==2 & !is.na(screen.comb)), family=binomial(link='log'))
summary(glm.screen.neg.1level.nopsr)
1-pchisq(glm.screen.neg.1level.nopsr$deviance - glm.screen.neg.2levels.nopsr$deviance, length(glm.screen.neg.2levels.nopsr$coefficients) - length(glm.screen.neg.1level.nopsr$coefficients))
# Analysis to compare with Patz result
# Start with everyone with T0 screen being negative, then compare those who are T1-false-positive or
# negative for the risk of screen-detected cancer at T2.
patz.m1 <- glm(case ~ log1yrisk -1, data=filter(data.screen.abn, interval==2 & screen.comb %in% c("Neg-Pos","Neg-Neg")), family=binomial(link='log'))
summary(patz.m1)
patz.m2 <- glm(case ~ log1yrisk:screen.comb -1, data=filter(data.screen.abn, interval==2 & screen.comb %in% c("Neg-Pos","Neg-Neg")), family=binomial(link='log'))
summary(patz.m2)
1-pchisq(patz.m1$deviance - patz.m2$deviance, length(patz.m2$coefficients) - length(patz.m1$coefficients))
# Effects of abnormalities for next-screen among negatives
# Backwards stepwise: selects nod6.not.susp, opac.fibr, consolidation, emphysema
scr.neg.full <- glm(paste("case ~ logit1yrisk -1 +",paste(abnlist.neg.int, collapse="+"),sep=""), data=data.screen.abn.neg, family=binomial(link='logit'))
bsw.scr.neg <- step(scr.neg.full, direction="backward", scope = list(lower = case ~ logit1yrisk -1, upper = scr.neg.full))
# Look at a model including these 4 effects
summary(glm(case ~ log1yrisk + log1yrisk:opac.fibr + log1yrisk:nod6.not.susp + log1yrisk:consolidation + log1yrisk:emphysema -1, data=data.screen.abn.neg, family=binomial(link='log')))
# Lasso using intermediate lambda: selects ONLY logit1yrisk
set.seed(61116)
x <- model.matrix(case ~ logit1yrisk -1 + logit1yrisk:. , data = data.screen.abn.neg[,c("case","logit1yrisk",abnlist.neg)])
cv.lasso <- cv.glmnet(x, data.screen.abn.neg$case, alpha=1, family="binomial")
out <- glmnet(x, data.screen.abn.neg$case, alpha=1, family="binomial")
predict(out, type="coefficients", s=(cv.lasso$lambda.min+cv.lasso$lambda.1se)/2)
# From discussion with Chris: keep consolidation and emphysema.
# Switch back to log scale for final model for interpretability (this model is run above in data setup)
# glm.screen.neg.abn <- glm(case ~ log1yrisk + log1yrisk:consolidation + log1yrisk:emphysema -1, data=data.screen.abn.neg, family=binomial(link='log'))
# data.screen.abn.neg$post.risk.abn <- fitted.values(glm.screen.neg.abn)
summary(glm.screen.neg.abn)
# Get estimates and CIs for the exponents
mat <- c(1,0,0) # Use this matrix for "neither noted"
mat <- c(1,1,0) # Use this matrix for consolidation
mat <- c(1,0,1) # Use this matrix for emphysema
stder <- sqrt(c(t(mat) %*% vcov(glm.screen.neg.abn) %*% mat))
c(coefficients(glm.screen.neg.abn) %*% mat, (coefficients(glm.screen.neg.abn) %*% mat)-1.96*stder, (coefficients(glm.screen.neg.abn) %*% mat)+1.96*stder)
# Check for residual effects of LCRAT variables. All p>0.05
titles <- c("var","null model # param", "extended model # param", "LRT p-value", "check same # obs")
mat.out.ns <- matrix(rep(NA),nrow=length(varlist),ncol=length(titles))
for (x in seq_along(varlist)) {
mod.without <- glm(case ~ logit1yrisk + logit1yrisk:consolidation + logit1yrisk:emphysema -1, data=data.screen.abn.neg, family=binomial(link='logit'))
mod.with <- glm(substitute(case ~ logit1yrisk + logit1yrisk:consolidation + logit1yrisk:emphysema + logit1yrisk:i -1, list(i=as.name(varlist[x]))), data=data.screen.abn.neg, family=binomial(link='logit'))
mat.out.ns[x,] <- c(varlist[x], length(mod.without$coefficients), sum(!is.na(mod.with$coefficients)), 1-pchisq(mod.without$deviance-mod.with$deviance, df=sum(!is.na(mod.with$coefficients))-length(mod.without$coefficients)), I(length(mod.without$residuals)==length(mod.with$residuals)))
}
rbind(titles, mat.out.ns)
# What if we account for screening history along with pre-screening risk, emphysema, and consolidation? p=0.34
m1 <- glm(case ~ log1yrisk + log1yrisk:consolidation + log1yrisk:emphysema -1, data=filter(data.screen.abn.neg, interval==2 & !is.na(screen.comb)), family=binomial(link='log'))
summary(m1)
m2 <- glm(case ~ log1yrisk:screen.comb + log1yrisk:consolidation + log1yrisk:emphysema -1, data=filter(data.screen.abn.neg, interval==2 & !is.na(screen.comb)), family=binomial(link='log'))
summary(m2)
1-pchisq(m1$deviance - m2$deviance, length(m2$coefficients) - length(m1$coefficients))
#### Additional analyses (GEE, AUCs, cross-validation, etc) ####
### Comparison with GEE - this impacts the SEs negligibly ###
# Interval cancer models
summary(geeglm(case ~ log1yrisk -1, id=pid, data=data.interval, family=binomial(link='log'), corstr="exchangeable", waves=interval))
summary(glm.interval)
summary(geeglm(case ~ log1yrisk + log1yrisk:adenop.consol -1, id=pid, data=data.interval.abn, family=binomial(link='log'), corstr="exchangeable", waves=interval))
summary(glm.int.abn.log)
# Next-screen among negatives model
summary(geeglm(case ~ log1yrisk -1, id=pid, data=data.screen.abn.neg, family=binomial(link='log'), corstr="exchangeable", waves=interval))
summary(glm.screen.neg)
summary(geeglm(case ~ log1yrisk + log1yrisk:consolidation + log1yrisk:emphysema -1, id=pid, data=data.screen.abn.neg, family=binomial(link='log'), corstr="exchangeable", waves=interval))
summary(glm.screen.neg.abn)
### Calibration and validation analyses ###
# 10-fold cross-validated calibration
# Interval cancers
set.seed(61116)
data.interval.abn$randgrp <- base::sample(1:10, nrow(data.interval.abn), replace=T)
data.interval.abn$cvpred <- NA
for (i in 1:10) {
fit <- glm(formula = case ~ log1yrisk + log1yrisk:adenop.consol - 1,
family = binomial(link = "log"), data = filter(data.interval.abn, randgrp!=i))
data.interval.abn[data.interval.abn$randgrp==i,]$cvpred <- predict(fit, newdata=data.interval.abn[data.interval.abn$randgrp==i,], type="response")
}
data.interval.abn <- mutate(data.interval.abn, cvpred.ntile = ntile(cvpred, 5))
data.interval.abn %>% group_by(cvpred.ntile) %>% summarise(pred.cases= sum(cvpred), obs.cases = sum(case))
c(sum(data.interval.abn$cvpred), sum(data.interval.abn$case)) # number obs and expected cases
poisson.test(round(sum(data.interval.abn$cvpred),0), sum(data.interval.abn$case), alternative="two.sided") # p-value, requires rounding
# Next-screen among screen-negatives
set.seed(61116)
data.screen.abn.neg$randgrp <- base::sample(1:10, nrow(data.screen.abn.neg), replace=T)
data.screen.abn.neg$cvpred <- NA
for (i in 1:10) {
fit <- glm(formula = case ~ log1yrisk + log1yrisk:consolidation + log1yrisk:emphysema - 1,
family = binomial(link = "log"), data = filter(data.screen.abn.neg, randgrp!=i))
data.screen.abn.neg[data.screen.abn.neg$randgrp==i,]$cvpred <- predict(fit, newdata=data.screen.abn.neg[data.screen.abn.neg$randgrp==i,], type="response")
}
data.screen.abn.neg <- mutate(data.screen.abn.neg, cvpred.ntile = ntile(cvpred, 5))
data.screen.abn.neg %>% group_by(cvpred.ntile) %>% summarise(pred.cases= sum(cvpred), obs.cases = sum(case))
c(sum(data.screen.abn.neg$cvpred), sum(data.screen.abn.neg$case)) # number obs and expected cases
poisson.test(round(sum(data.screen.abn.neg$cvpred)), sum(data.screen.abn.neg$case), alternative="two.sided")
# 10-fold cross validation to get CV error. The first delta is standard version; second is bias-corrected.
set.seed(61116)
cv.err.int <- cv.glm(data.interval.abn, glm.int.abn, K=10)
cv.err.int$delta
cv.err.screen.neg <- cv.glm(data.screen.abn.neg, glm.screen.neg.abn, K=10)
cv.err.screen.neg$delta
### Calculate AUCs ###
## Regular AUCs. By default, the 95% CI are computed with 2000 stratified bootstrap replicates.
library(pROC)
# Interval cancer model
with(filter(data.interval.abn, interval==1), roc(case, post.risk.abn, ci=T, plot=T)) # T0-T1 - change interval for T1-T2, post-T2
# Next-screen model among negatives
with(filter(data.screen.abn.neg, interval==1), roc(case, post.risk.abn, ci=T, plot=T)) # T1 - change interval for T2
## Optimism-corrected AUCs - have to use logistic models for this, and have to actually add the interaction terms to the dataset.
library(rms)
data.screen.abn.neg <- mutate(data.screen.abn.neg, logit1yriskconsolidation = logit1yrisk*consolidation, logit1yriskemphysema = logit1yrisk*emphysema)
data.interval.abn <- mutate(data.interval.abn, logit1yriskadenopconsol = logit1yrisk*adenop.consol)
# Interval cancer model
mod.int <- lrm(case ~ logit1yrisk + logit1yriskadenopconsol -1, x=T, y=T, data=data.interval.abn)
set.seed(61116)
validate(mod.int, B=1000)
c(0.5*(0.5072+1), 0.5*(0.5082+1)) # AUC = 0.5(Dxy+1). Naive, optimism-corrected AUCs - 0.75 is OC-AUC
# Next-screen model among negatives
mod.ns <- lrm(case ~ logit1yrisk + logit1yriskconsolidation + logit1yriskemphysema -1, x=T, y=T, data=data.screen.abn.neg)
set.seed(61116)
validate(mod.ns, B=1000) # AUC = 0.5(Dxy+1)
c(0.5*(0.4760+1), 0.5*(0.4689+1)) # AUC = 0.5(Dxy+1). Naive, optimism-corrected AUCs - 0.73 is OC-AUC
#### Figures ####
# Effect of screen findings on risk of INTERVAL ca among screen-negatives
med.risk.interv.prescr <- median(filter(data.interval.abn, interval==1)$prescr.1yrisk, na.rm=T)
med.risk.interv.post.noac <- median(filter(data.interval.abn, interval==1 & adenop.consol==0)$post.risk.abn, na.rm=T)
med.risk.interv.post.ac <- median(filter(data.interval.abn, interval==1 & adenop.consol==1)$post.risk.abn, na.rm=T)
png(file="/Users/hrobbins827/Documents/PhD/NCI overflow/NLST/Figures/screen_neg_interval_ad_con.png",width=1200,height=850)
ggplot() +
theme(panel.background = element_blank(), panel.grid.major = element_blank(),
panel.grid.minor = element_blank(), axis.line = element_line(colour = "black"),
axis.title = element_text(size=28),
axis.text = element_text(colour="black", size=28)) +
scale_y_continuous(labels=scales::percent, limits=c(0, 0.045)) +
scale_x_continuous(breaks=NULL) +
# scale_x_continuous(breaks=c(0,1), labels = c("Negative screen", "1-year interval")) +
ylab("1-year lung cancer risk, %\n") + xlab("") +
geom_boxplot(data=subset(data.interval.abn, interval==1), aes(x=0, y=prescr.1yrisk), lwd=1, width=0.4, outlier.shape=NA) +
geom_boxplot(data=subset(data.interval.abn, interval==1 & adenop.consol==0), aes(x=0.9, y=post.risk.abn), lwd=1, width=0.98*.8, outlier.shape=NA) +
geom_boxplot(data=subset(data.interval.abn, interval==1 & adenop.consol==1), aes(x=1.1, y=post.risk.abn), lwd=1, width=0.02*.8, outlier.shape=NA) +
geom_segment(aes(x=0, y=med.risk.interv.prescr, xend=0.9, yend=med.risk.interv.post.noac), linetype="dashed", size=0.6) +
geom_segment(aes(x=0, y=med.risk.interv.prescr, xend=1.1, yend=med.risk.interv.post.ac), linetype="dashed", size=0.6) +
annotate(geom="text", x=0.6, y=0.0062, label = "Adenopathy or consolidation (2%)", angle=4, size=9) +
annotate(geom="text", x=0.45, y=0.0028, label = "Neither noted (98%)", angle=-4, size=9) +
annotate(geom="text", x=0, y=0.045, label="Pre-screening risk", size=9) +
annotate(geom="text", x=1, y=0.045, label="Risk during 1-year interval", size=9)
dev.off()
# Numbers for the text
with(data.interval.abn, CrossTable(interval, adenop.consol)) # 1.8% have adenop or consol at T0
# Some percentiles for below the figure
neg.i.psr.q <- quantile(filter(data.interval.abn, interval==1)$prescr.1yrisk, probs=c(0.25, 0.5, 0.75))
neg.i.no.q <- quantile(filter(data.interval.abn, interval==1 & adenop.consol==0)$post.risk.abn, probs=c(0.25, 0.5, 0.75))
neg.i.adcon.q <- quantile(filter(data.interval.abn, interval==1 & adenop.consol==1)$post.risk.abn, probs=c(0.25, 0.5, 0.75))
rbind(neg.i.psr.q, neg.i.no.q, neg.i.adcon.q) # print the quantiles for each group
c(neg.i.no.q[2]/neg.i.psr.q[2], neg.i.adcon.q[2]/neg.i.psr.q[2]) # median RRs for no, adenop.consol
c(neg.i.no.q[2]-neg.i.psr.q[2], neg.i.adcon.q[2]-neg.i.psr.q[2]) # median RDs for no, adenop.consol
range_without_outliers(filter(data.interval.abn, interval==1 & adenop.consol==0)$post.risk.abn) # this uses my function defined in hilary_functions.R
# Effect of screen findings on risk of SCREEN-DETECTED ca among screen negatives
# Note: I am not making a boxplot for the N=36 with emphysema and consolidation.
med.risk.screen.neg.prescr <- median(filter(data.screen.abn.neg, interval==1)$prescr.1yrisk)
med.risk.screen.neg.neither <- median(filter(data.screen.abn.neg, interval==1 & emphysema==0 & consolidation==0)$post.risk.abn)
med.risk.screen.neg.emp <- median(filter(data.screen.abn.neg, interval==1 & emphysema==1)$post.risk.abn)
med.risk.screen.neg.consol <- median(filter(data.screen.abn.neg, interval==1 & consolidation==1)$post.risk.abn)
png(file="/Users/hrobbins827/Documents/PhD/NCI overflow/NLST/Figures/screen_neg_emp_consol.png",width=1200,height=850)
ggplot() +
theme(panel.background = element_blank(), panel.grid.major = element_blank(),
panel.grid.minor = element_blank(), axis.line = element_line(colour = "black"),
axis.title = element_text(size=28),
axis.text = element_text(colour="black", size=28)) +
scale_y_continuous(labels=scales::percent, limits=c(0, 0.045)) +
scale_x_continuous(breaks=NULL) +
# scale_x_continuous(breaks=c(0,1), labels = c("Negative screen", "Screen")) + # remove labels for manuscript
ylab("1-year lung cancer risk, %\n") + xlab("") +
geom_boxplot(data=subset(data.screen.abn.neg, interval==1), aes(x=0, y=prescr.1yrisk), lwd=1, width=0.4, outlier.shape=NA) +
geom_boxplot(data=subset(data.screen.abn.neg, interval==1 & emphysema==0 & consolidation==0), aes(x=0.8, y=post.risk.abn), lwd=1, width=0.69*1.2, outlier.shape=NA) +
geom_boxplot(data=subset(data.screen.abn.neg, interval==1 & emphysema==1), aes(x=1, y=post.risk.abn), lwd=1, width=.3*1.2, outlier.shape=NA) +
geom_boxplot(data=subset(data.screen.abn.neg, interval==1 & consolidation==1), aes(x=1.2, y=post.risk.abn), lwd=1, width=.01*1.2, outlier.shape=NA) +
geom_segment(aes(x=0, y=med.risk.screen.neg.prescr, xend=0.8, yend=med.risk.screen.neg.neither), linetype="dashed", size=0.6) +
geom_segment(aes(x=0, y=med.risk.screen.neg.prescr, xend=1.0, yend=med.risk.screen.neg.emp), linetype="dashed", size=0.6) +
geom_segment(aes(x=0, y=med.risk.screen.neg.prescr, xend=1.2, yend=med.risk.screen.neg.consol-0.0019), linetype="dashed", size=0.6) +
annotate(geom="text", x=0.3, y=0.0003, label = "Neither (70%)", angle=-4, size=9) +
annotate(geom="text", x=0.5, y=0.0052, label = "Emphysema (30%)", angle=2, size=9) +
annotate(geom="text", x=0.65, y=0.0105, label = "Consolidation (0.6%)", angle=12, size=9) +
annotate(geom="text", x=0, y=0.045, label="Pre-screening risk", size=9) +
annotate(geom="text", x=1, y=0.045, label="Risk at next screen", size=9)
dev.off()
# Numbers for the text: Among T0-negatives: prevalence of self-reported emphysema (7%), CT emphysema (30%), and consolidation (0.6%)
with(filter(data.screen.abn.neg, interval==1), CrossTable(emp))
with(filter(data.screen.abn.neg, interval==1), CrossTable(emphysema))
with(filter(data.screen.abn.neg, interval==1), CrossTable(consolidation))
with(filter(data.screen.abn.neg, interval==1), CrossTable(emphysema, consolidation))
with(data.screen.abn.neg, CrossTable(interval, I(emphysema==0 & consolidation==0))) # no emphysema NOR consolidation (70% at T0)
# Thus .738 *.696 = 51% of participants would be screen-negative without emp or consol.
# Some percentiles for below the figure
quantile(filter(data.screen.abn.neg, interval==1)$post.risk.abn, probs=c(0.25, 0.5, 0.75, 0.8, 0.85, 0.9)) # overall quantiles among all negatives
neg.s.psr.q <- quantile(filter(data.screen.abn.neg, interval==1)$prescr.1yrisk, probs=c(0.25, 0.5, 0.75))
neg.s.no.no.q <- quantile(filter(data.screen.abn.neg, interval==1 & emphysema==0 & consolidation==0)$post.risk.abn, probs=c(0.25, 0.5, 0.75))
neg.s.emp.q <- quantile(filter(data.screen.abn.neg, interval==1 & emphysema==1)$post.risk.abn, probs=c(0.25, 0.5, 0.75))
neg.s.consol.q <- quantile(filter(data.screen.abn.neg, interval==1 & consolidation==1)$post.risk.abn, probs=c(0.25, 0.5, 0.75))
rbind(neg.s.psr.q, neg.s.no.no.q, neg.s.emp.q, neg.s.consol.q) # print the quantiles for each group
rbind(neg.s.no.no.q[2]/neg.s.psr.q[2], neg.s.emp.q[2]/neg.s.psr.q[2], neg.s.consol.q[2]/neg.s.psr.q[2]) # median RRs for no-no, emp, consol
rbind(neg.s.no.no.q[2]-neg.s.psr.q[2], neg.s.emp.q[2]-neg.s.psr.q[2], neg.s.consol.q[2]-neg.s.psr.q[2]) # median RDs for no-no, emp, consol
# Potential risk THRESHOLDS for longer interval after negative screen. Use T0-negatives and cases at T1
# Note: 64 cancers at T1; only 30 among the 70% with no emp or consol.
# Make a dataset for impact of different risk thresholds among all screen-negatives
num_T0_neg <- nrow(filter(data.screen.abn.neg, interval==1)) # change to interval==2 to look at T2
num_T0_all <- nrow(filter(data.screen.abn, interval==1)) # change to interval==2 to look at T2
cases_T1 <- sum(filter(data.screen.abn.neg, interval==1)$case) # change to interval==2 to look at T2
n <- perc_negs <- perc_all <- ca_N <- perc_of_ca <- perc_w_ca <- perc_w_emp <- perc_w_consol <- perc_w_emp_or_consol <- vector()
thresholds <- seq(0,0.1,0.0001)
for (i in seq_along(thresholds)) {
dat <- filter(data.screen.abn.neg, interval==1 & post.risk.abn<=thresholds[i]) # change to interval==2 to look at T2
n[i] <- nrow(dat)
perc_negs[i] <- 100*nrow(dat)/num_T0_neg # percent of all negatives below threshold
perc_all[i] <- 100*nrow(dat)/num_T0_all # percent of all individuals below threshold
ca_N[i] <- sum(dat$case) # number with cancer below threshold
perc_of_ca[i] <- 100*sum(dat$case)/cases_T1 # percent of cancers falling below threshold
perc_w_ca[i] <- 100*sum(dat$case)/nrow(dat) # percent of individuals below threshold who have cancer
perc_w_emp[i] <- 100*sum(dat$emphysema)/nrow(dat) # percent of individuals below threshold who have CT-emphysema
perc_w_consol[i] <- 100*sum(dat$consolidation)/nrow(dat) # percent of individuals below threshold who have CT-consolidation
perc_w_emp_or_consol[i] <- 100*sum(I(dat$emphysema==1 | dat$consolidation==1))/nrow(dat) # percent of individuals below threshold who have CT-emphysema or consolidation
}
thres.plot.all <- as.data.frame(cbind(threshold=100*thresholds, n, perc_negs, perc_all, ca_N, perc_of_ca, perc_w_ca, perc_w_emp, perc_w_consol, perc_w_emp_or_consol))
# Plot this
thres.of.interest <- c(0.1, 0.15, 0.2, 0.3, 0.4, 0.5, 0.6, 0.8)
png(file="/Users/hrobbins827/Documents/PhD/NCI overflow/NLST/Figures/negatives_threshold.png",width=1200,height=850)
ggplot() + geom_line(data=thres.plot.all, aes(x=perc_negs, y=perc_of_ca), size=0.8) +
theme(panel.background = element_rect(fill=NA), panel.grid.major=element_line(colour="grey88"), panel.grid.minor=element_line(colour="grey88"),
axis.line = element_line(colour="black"), axis.title = element_text(size=28), axis.text = element_text(colour="black", size=28)) +
ylab("% of detectable next-screen cancers with delayed diagnosis") + xlab("\n% of screen-negatives with longer-than-annual interval") +
geom_point(data=subset(thres.plot.all, threshold %in% thres.of.interest), aes(x=perc_negs, y=perc_of_ca), size=4.5) +
geom_text(data=subset(thres.plot.all, threshold %in% thres.of.interest[1:5]), aes(x=perc_negs, y=perc_of_ca, label=paste("r \u2264", as.character(threshold), "%", sep="")), size=9, vjust=-1, hjust=0.8) +
geom_text(data=subset(thres.plot.all, threshold %in% thres.of.interest[6:8]), aes(x=perc_negs, y=perc_of_ca, label=paste("r \u2264", as.character(threshold), "%", sep="")), size=9, hjust=-0.25)
dev.off()
# Print some numbers to highlight in the text
filter(thres.plot.all, threshold %in% thres.of.interest)
|
#' Create a subgraph based on a selection of nodes
#' or edges
#' @description Create a subgraph based on a
#' selection of nodes or edges extant in the graph
#' object.
#' @param graph a graph object of class
#' \code{dgr_graph} that is created using
#' \code{create_graph}.
#' @examples
#' # Create a simple graph
#' nodes <-
#' create_nodes(
#' nodes = c("a", "b", "c", "d",
#' "e", "f", "g", "h"),
#' value = c(3.5, 2.6, 9.4, 2.7,
#' 5.2, 2.1, 4.8, 8.5))
#'
#' edges <-
#' create_edges(
#' from = c("a", "b", "c", "g", "e",
#' "e", "h", "f", "a", "c"),
#' to = c("d", "c", "a", "c", "h",
#' "b", "d", "e", "f", "d"))
#'
#' graph <-
#' create_graph(nodes_df = nodes,
#' edges_df = edges)
#'
#' get_nodes(graph)
#' #> [1] "a" "b" "c" "d" "e" "f" "g" "h"
#'
#' get_edges(graph, return_type = "vector")
#' #> [1] "a -> d" "b -> c" "c -> a" "g -> c" "e -> h"
#' #> [6] "e -> b" "h -> d" "f -> e" "a -> f" "c -> d"
#'
#' # Create a selection of nodes
#' graph <-
#' select_nodes(
#' graph = graph,
#' node_attr = "value",
#' search = "> 3")
#'
#' # Create a subgraph based on the selection
#' subgraph <-
#' create_subgraph_ws(graph)
#'
#' # Check the nodes available in the subgraph
#' get_nodes(subgraph)
#' #> [1] "a" "c" "e" "g" "h"
#'
#' # Check the edges available in the subgraph
#' get_edges(subgraph, return_type = "vector")
#' #> [1] "c -> a" "g -> c" "e -> h"
#' @return a graph object of class \code{dgr_graph}.
#' @export create_subgraph_ws
create_subgraph_ws <- function(graph) {
# Stop function if the graph does not contain a selection
if (is.null(graph$selection)) {
stop("The graph does not contain an active selection")
}
# Filter the nodes in the graph
if (!is.null(graph$selection$nodes)) {
selection_nodes <- graph$selection$nodes
selection_nodes_df <-
graph$nodes_df[which(graph$nodes_df$nodes %in% selection_nodes),]
selection_edges_df <-
graph$edges_df[which(graph$edges_df$from %in% selection_nodes &
graph$edges_df$to %in% selection_nodes),]
}
# Filter the edges in the graph
if (!is.null(graph$selection$edges)) {
selection_from <- graph$selection$edges$from
selection_to <- graph$selection$edges$to
selection_edges_df <-
graph$edges_df[which(graph$edges_df$from %in% selection_from &
graph$edges_df$to %in% selection_to),]
selection_nodes_df <-
graph$nodes_df[which(graph$nodes_df$nodes %in%
unique(c(selection_edges_df$from,
selection_edges_df$to))),]
}
# Create a subgraph
subgraph <-
create_graph(
nodes_df = selection_nodes_df,
edges_df = selection_edges_df,
graph_attrs = graph$graph_attrs,
node_attrs = graph$node_attrs,
edge_attrs = graph$edge_attrs,
directed = graph$directed,
graph_name = graph$graph_name,
graph_time = graph$graph_time,
graph_tz = graph$graph_tz)
# Return the subgraph
return(subgraph)
}
|
/R/create_subgraph_ws.R
|
no_license
|
julianflowers/DiagrammeR
|
R
| false
| false
| 3,124
|
r
|
#' Create a subgraph based on a selection of nodes
#' or edges
#' @description Create a subgraph based on a
#' selection of nodes or edges extant in the graph
#' object.
#' @param graph a graph object of class
#' \code{dgr_graph} that is created using
#' \code{create_graph}.
#' @examples
#' # Create a simple graph
#' nodes <-
#' create_nodes(
#' nodes = c("a", "b", "c", "d",
#' "e", "f", "g", "h"),
#' value = c(3.5, 2.6, 9.4, 2.7,
#' 5.2, 2.1, 4.8, 8.5))
#'
#' edges <-
#' create_edges(
#' from = c("a", "b", "c", "g", "e",
#' "e", "h", "f", "a", "c"),
#' to = c("d", "c", "a", "c", "h",
#' "b", "d", "e", "f", "d"))
#'
#' graph <-
#' create_graph(nodes_df = nodes,
#' edges_df = edges)
#'
#' get_nodes(graph)
#' #> [1] "a" "b" "c" "d" "e" "f" "g" "h"
#'
#' get_edges(graph, return_type = "vector")
#' #> [1] "a -> d" "b -> c" "c -> a" "g -> c" "e -> h"
#' #> [6] "e -> b" "h -> d" "f -> e" "a -> f" "c -> d"
#'
#' # Create a selection of nodes
#' graph <-
#' select_nodes(
#' graph = graph,
#' node_attr = "value",
#' search = "> 3")
#'
#' # Create a subgraph based on the selection
#' subgraph <-
#' create_subgraph_ws(graph)
#'
#' # Check the nodes available in the subgraph
#' get_nodes(subgraph)
#' #> [1] "a" "c" "e" "g" "h"
#'
#' # Check the edges available in the subgraph
#' get_edges(subgraph, return_type = "vector")
#' #> [1] "c -> a" "g -> c" "e -> h"
#' @return a graph object of class \code{dgr_graph}.
#' @export create_subgraph_ws
create_subgraph_ws <- function(graph) {
# Stop function if the graph does not contain a selection
if (is.null(graph$selection)) {
stop("The graph does not contain an active selection")
}
# Filter the nodes in the graph
if (!is.null(graph$selection$nodes)) {
selection_nodes <- graph$selection$nodes
selection_nodes_df <-
graph$nodes_df[which(graph$nodes_df$nodes %in% selection_nodes),]
selection_edges_df <-
graph$edges_df[which(graph$edges_df$from %in% selection_nodes &
graph$edges_df$to %in% selection_nodes),]
}
# Filter the edges in the graph
if (!is.null(graph$selection$edges)) {
selection_from <- graph$selection$edges$from
selection_to <- graph$selection$edges$to
selection_edges_df <-
graph$edges_df[which(graph$edges_df$from %in% selection_from &
graph$edges_df$to %in% selection_to),]
selection_nodes_df <-
graph$nodes_df[which(graph$nodes_df$nodes %in%
unique(c(selection_edges_df$from,
selection_edges_df$to))),]
}
# Create a subgraph
subgraph <-
create_graph(
nodes_df = selection_nodes_df,
edges_df = selection_edges_df,
graph_attrs = graph$graph_attrs,
node_attrs = graph$node_attrs,
edge_attrs = graph$edge_attrs,
directed = graph$directed,
graph_name = graph$graph_name,
graph_time = graph$graph_time,
graph_tz = graph$graph_tz)
# Return the subgraph
return(subgraph)
}
|
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Do not modify this file since it was automatically generated from:
%
% File.R
%
% by the Rdoc compiler part of the R.oo package.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\name{mkdirs.File}
\alias{mkdirs.File}
\alias{File.mkdirs}
\alias{mkdirs.File}
\alias{mkdirs,File-method}
\title{Creates a directory including any necessary but nonexistent parent directories}
\usage{\method{mkdirs}{File}(this, ...)}
\description{
Creates a directory including any necessary but nonexistent parent directories.
}
\value{
Returns \code{\link[base:logical]{TRUE}} if the directory was succesfully created,
otherwise \code{\link[base:logical]{FALSE}}.
Note that if the directory already exists, \code{\link[base:logical]{FALSE}} is returned.
}
\author{Henrik Bengtsson (\url{http://www.braju.com/R/})}
\seealso{
Internally \code{\link[base]{dir.create}}() is used.
For more information see \code{\link{File}}.
}
\keyword{internal}
\keyword{methods}
|
/man/mkdirs.File.Rd
|
no_license
|
HenrikBengtsson/R.io
|
R
| false
| false
| 1,120
|
rd
|
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Do not modify this file since it was automatically generated from:
%
% File.R
%
% by the Rdoc compiler part of the R.oo package.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\name{mkdirs.File}
\alias{mkdirs.File}
\alias{File.mkdirs}
\alias{mkdirs.File}
\alias{mkdirs,File-method}
\title{Creates a directory including any necessary but nonexistent parent directories}
\usage{\method{mkdirs}{File}(this, ...)}
\description{
Creates a directory including any necessary but nonexistent parent directories.
}
\value{
Returns \code{\link[base:logical]{TRUE}} if the directory was succesfully created,
otherwise \code{\link[base:logical]{FALSE}}.
Note that if the directory already exists, \code{\link[base:logical]{FALSE}} is returned.
}
\author{Henrik Bengtsson (\url{http://www.braju.com/R/})}
\seealso{
Internally \code{\link[base]{dir.create}}() is used.
For more information see \code{\link{File}}.
}
\keyword{internal}
\keyword{methods}
|
library(doParallel)
library(foreach)
getDoParWorkers()
detectCores()
cl=makeCluster(detectCores()-2)
registerDoParallel(cl)
getDoParWorkers()
myfunc = function(i)
{
source("simdatDAG1LMD.R")
library(geepack)
set.seed(60)
seeds = floor(runif(1000)*10^8);
EXPIT <- function(term) {
return( exp(term)/(1+exp(term)) )
}
set.seed(seeds[i])
tmpdata = gendata(N=500)
tmpdata$ager = ifelse(tmpdata$shiftage<=4,1,0);
###IPWIEE
###IPWIEE data
dat = tmpdata
dat = dat[dat$s1>=dat$time,]
dat$yl = c(NA, dat$pek[1:nrow(dat)-1])
dat$yl = ifelse(dat$time==0, NA, dat$yl)
dat$lyl = c(NA, dat$yl[1:nrow(dat)-1])
dat$lyl = ifelse(dat$time<=2, NA, dat$lyl)
dat$llyl = c(NA, dat$lyl[1:nrow(dat)-1])
dat$llyl = ifelse(dat$time<=4, NA, dat$llyl)
dat$lllyl = c(NA, dat$llyl[1:nrow(dat)-1])
dat$lllyl = ifelse(dat$time<=6, NA, dat$lllyl)
dat$Ri = ifelse(!is.na(dat$pek),1,0)
dat$Rim1 = c(NA, dat$Ri[1:nrow(dat)-1]) ##Status of the last observation
dat$Rim1 = ifelse(dat$time==0, NA, dat$Rim1)
###IPW-IEE-D
ipwdat = dat[,c("Case", "time", "shiftage", "sex", "edu", "Ri","pek","s1","Rim1","yl", "lyl","llyl","lllyl","ager")]
ipwdat$pred_obs=NA;ipwdat$Rim1=ifelse(ipwdat$time==0, 0, ipwdat$Rim1);
ipwdat22 = ipwdat[ipwdat$s1==2 & ipwdat$Rim1==1,];
ipwdat2 = ipwdat22[,c("yl", "shiftage", "sex","edu","time","s1")]
logistfit = glm(Ri ~ yl +sex, family = binomial(link=logit),data = ipwdat22)
ipwdat22$pred_obs = predict(logistfit, newdata = ipwdat2, type="response")
##########
ipwdat44 = ipwdat[ipwdat$s1==4 & ipwdat$Rim1==1,];
ipwdat442 = ipwdat44[ipwdat44$time==2,]; ipwdat444 = ipwdat44[ipwdat44$time==4,];
ipwdat42 = ipwdat442[,c("yl", "lyl","llyl","lllyl", "shiftage", "sex","edu","time","s1")]
ipwdat44 = ipwdat444[,c("yl", "lyl","llyl","lllyl", "shiftage", "sex","edu","time","s1")]
logistfit2 = glm(Ri ~ yl + sex, family = binomial(link=logit),data = ipwdat442)
logistfit4 = glm(Ri ~ yl + sex, family = binomial(link=logit),data = ipwdat444)
ipwdat442$pred_obs = predict(logistfit2, newdata = ipwdat42, type="response")
ipwdat444$pred_obs = predict(logistfit4, newdata = ipwdat44, type="response")
###############
ipwdat66 = ipwdat[ipwdat$s1==6 & ipwdat$Rim1==1,];
ipwdat662 = ipwdat66[ipwdat66$time==2,]; ipwdat664 = ipwdat66[ipwdat66$time==4,]; ipwdat666 = ipwdat66[ipwdat66$time==6,];
ipwdat62 = ipwdat662[,c("yl", "lyl","llyl","lllyl", "shiftage", "sex","edu","time","s1")]
ipwdat64 = ipwdat664[,c("yl", "lyl","llyl","lllyl", "shiftage", "sex","edu","time","s1")]
ipwdat66 = ipwdat666[,c("yl", "lyl","llyl","lllyl", "shiftage", "sex","edu","time","s1")]
logistfit2 = glm(Ri ~ yl+sex, family = binomial(link=logit),data = ipwdat662)
logistfit4 = glm(Ri ~ yl+sex, family = binomial(link=logit),data = ipwdat664)
logistfit6 = glm(Ri ~ yl+sex, family = binomial(link=logit),data = ipwdat666)
ipwdat662$pred_obs = predict(logistfit2, newdata = ipwdat62, type="response")
ipwdat664$pred_obs = predict(logistfit4, newdata = ipwdat64, type="response")
ipwdat666$pred_obs = predict(logistfit6, newdata = ipwdat66, type="response")
#################
ipwdat88 = ipwdat[ipwdat$s1==8 & ipwdat$Rim1==1,]
ipwdat882 = ipwdat88[ipwdat88$time==2,]; ipwdat884 = ipwdat88[ipwdat88$time==4,]; ipwdat886 = ipwdat88[ipwdat88$time==6,]; ipwdat888 = ipwdat88[ipwdat88$time==8,];
ipwdat82 = ipwdat882[,c("yl", "lyl","llyl","lllyl", "shiftage", "sex","edu","time","s1")]
ipwdat84 = ipwdat884[,c("yl", "lyl","llyl","lllyl", "shiftage", "sex","edu","time","s1")]
ipwdat86 = ipwdat886[,c("yl", "lyl","llyl","lllyl", "shiftage", "sex","edu","time","s1")]
ipwdat88 = ipwdat888[,c("yl", "lyl","llyl","lllyl", "shiftage", "sex","edu","time","s1")]
logistfit2 = glm(Ri ~ yl + sex, family = binomial(link=logit),data = ipwdat882)
logistfit4 = glm(Ri ~ yl + sex, family = binomial(link=logit),data = ipwdat884)
logistfit6 = glm(Ri ~ yl + sex, family = binomial(link=logit),data = ipwdat886)
logistfit8 = glm(Ri ~ yl + sex, family = binomial(link=logit),data = ipwdat888)
ipwdat882$pred_obs = predict(logistfit2, newdata = ipwdat82, type="response")
ipwdat884$pred_obs = predict(logistfit4, newdata = ipwdat84, type="response")
ipwdat886$pred_obs = predict(logistfit6, newdata = ipwdat86, type="response")
ipwdat888$pred_obs = predict(logistfit8, newdata = ipwdat88, type="response")
##PREDICT for waves
combine = rbind(ipwdat22,ipwdat442,ipwdat444,ipwdat662,ipwdat664,ipwdat666,ipwdat882,ipwdat884,ipwdat886,ipwdat888)
combine = combine[order(combine$Case),];
combine$Rim1=combine$Ri=combine$yl=combine$lyl=combine$llyl=combine$lllyl=NULL;
combw = reshape(combine, idvar = "Case", v.names=c("pek","pred_obs"),timevar="time", direction="wide")
combw$pi.2=1/combw$pred_obs.2; combw$pi.4=1/(combw$pred_obs.2*combw$pred_obs.4);
combw$pi.6=1/(combw$pred_obs.2*combw$pred_obs.4*combw$pred_obs.6); combw$pi.8=1/(combw$pred_obs.2*combw$pred_obs.4*combw$pred_obs.6*combw$pred_obs.8);
combw$pred_obs.2 = combw$pred_obs.4 = combw$pred_obs.6 = combw$pred_obs.8 = NULL;
long = reshape(combw, varying=list(c(7:10),c(11:14)), v.names = c("pek","pi"), new.row.names=1:1000000, times=c(2,4,6,8), direction="long")
long$id=NULL;
ipwdat0 = ipwdat[ipwdat$time==0,]; ipwdat0$pi=1;
ipwdat0$yl=ipwdat0$lyl=ipwdat0$llyl=ipwdat0$lllyl=ipwdat0$pred_obs=ipwdat0$Ri=ipwdat0$Rim1=NULL;
ripwdatd = rbind(ipwdat0,long)
ripwdatd = ripwdatd[order(ripwdatd$Case),];
ripwdatd = ripwdatd[ripwdatd$time<= ripwdatd$s1,]
ripwdatd$Ri = ifelse(!is.na(ripwdatd$pek),1,0)
## Augmented IPW-D ###
#### Imputation models (10)
midat12 = dat[dat$time==2 & dat$Rim1==1 & dat$s1==2,]
mifit1 = lm(pek ~ yl + sex, data = midat12)
mi12 = midat12[,c("yl", "shiftage", "sex","edu","time","s1")]
midat12$pred_obs = predict(mifit1, newdata = mi12, type="response")
midat12$pred_obs = ifelse(!is.na(midat12$pek), midat12$pek, midat12$pred_obs)
midat12$pek = midat12$pred_obs;
midat122 = dat[dat$time==2 & dat$Rim1==1 & dat$s1==4,]
mifit12 = lm(pek ~ yl + sex, data = midat122)
mi12 = midat122[,c("yl", "shiftage", "sex","edu","time","s1")]
midat122$pred_obs = predict(mifit12, newdata = mi12, type="response")
midat122$pred_obs = ifelse(!is.na(midat122$pek), midat122$pek, midat122$pred_obs)
midat122$pek = midat122$pred_obs;
midat123 = dat[dat$time==2 & dat$Rim1==1 & dat$s1==6,]
mifit13 = lm(pek ~ yl + sex, data = midat123)
mi12 = midat123[,c("yl", "shiftage", "sex","edu","time","s1")]
midat123$pred_obs = predict(mifit13, newdata = mi12, type="response")
midat123$pred_obs = ifelse(!is.na(midat123$pek), midat123$pek, midat123$pred_obs)
midat123$pek = midat123$pred_obs;
midat124 = dat[dat$time==2 & dat$Rim1==1 & dat$s1==8,]
mifit14 = lm(pek ~ yl + sex, data = midat124)
mi12 = midat124[,c("yl", "shiftage", "sex","edu","time","s1")]
midat124$pred_obs = predict(mifit14, newdata = mi12, type="response")
midat124$pred_obs = ifelse(!is.na(midat124$pek), midat124$pek, midat124$pred_obs)
midat124$pek = midat124$pred_obs;
midat23 = dat[dat$time==4 & dat$Rim1==1 & dat$s1==4,]
mifit2 = lm(pek ~ yl + sex, data = midat23)
mi23 = midat23[,c("yl", "lyl","shiftage", "sex","edu","time","s1")]
midat23$pred_obs = predict(mifit2, newdata = mi23, type="response")
midat23$pred_obs = ifelse(!is.na(midat23$pek), midat23$pek, midat23$pred_obs)
midat232 = dat[dat$time==4 & dat$Rim1==1 & dat$s1==6,]
mifit22 = lm(pek ~ yl + sex, data = midat232)
mi23 = midat232[,c("yl", "lyl","shiftage", "sex","edu","time","s1")]
midat232$pred_obs = predict(mifit22, newdata = mi23, type="response")
midat232$pred_obs = ifelse(!is.na(midat232$pek), midat232$pek, midat232$pred_obs)
midat233 = dat[dat$time==4 & dat$Rim1==1 & dat$s1==8,]
mifit23 = lm(pek ~ yl + sex, data = midat233)
mi23 = midat233[,c("yl", "lyl","shiftage", "sex","edu","time","s1")]
midat233$pred_obs = predict(mifit23, newdata = mi23, type="response")
midat233$pred_obs = ifelse(!is.na(midat233$pek), midat233$pek, midat233$pred_obs)
midat34 = dat[dat$time==6 & dat$Rim1==1 & dat$s1==6,]
mifit3 = lm(pek ~ yl + sex, data = midat34)
mi34 = midat34[,c("yl", "lyl","llyl", "shiftage", "sex","edu","time","s1")]
midat34$pred_obs = predict(mifit3, newdata = mi34, type="response")
midat34$pred_obs = ifelse(!is.na(midat34$pek), midat34$pek, midat34$pred_obs)
midat342 = dat[dat$time==6 & dat$Rim1==1 & dat$s1==8,]
mifit32 = lm(pek ~ yl + sex, data = midat342)
mi34 = midat342[,c("yl", "lyl","llyl", "shiftage", "sex","edu","time","s1")]
midat342$pred_obs = predict(mifit32, newdata = mi34, type="response")
midat342$pred_obs = ifelse(!is.na(midat342$pek), midat342$pek, midat342$pred_obs)
midat45 = dat[dat$time==8 & dat$Rim1==1,]
mifit4 = lm(pek ~ yl + sex, data = midat45)
mi45 = midat45[,c("yl", "lyl","llyl","lllyl", "shiftage", "sex","edu","time","s1")]
midat45$pred_obs = predict(mifit4, newdata = mi45, type="response")
midat45$pred_obs = ifelse(!is.na(midat45$pek), midat45$pek, midat45$pred_obs)
###########################Fitted values are not covariates but outcomes
tmpmidat23 = dat[dat$time==4 & is.na(dat$yl) & dat$s1==4,]
midat23$pek = midat23$pred_obs; midat23$pred_obs=NULL; midat23 = rbind(tmpmidat23,midat23);
midat23$Ri = ifelse(!is.na(midat23$pek),1,0)
mifit5 = lm(pek ~ lyl + sex, data = midat23)
mi23 = midat23[,c("lyl", "shiftage", "sex","edu","time","s1")]
midat23$pred_obs = predict(mifit5, newdata = mi23, type="response")
midat23$pred_obs = ifelse(!is.na(midat23$pek), midat23$pek, midat23$pred_obs)
midat23$pek = midat23$pred_obs;
tmpmidat23 = dat[dat$time==4 & is.na(dat$yl) & dat$s1==6,]
midat232$pek = midat232$pred_obs; midat232$pred_obs=NULL; midat232 = rbind(tmpmidat23,midat232);
midat232$Ri = ifelse(!is.na(midat232$pek),1,0)
mifit52 = lm(pek ~ lyl + sex, data = midat232)
mi23 = midat232[,c("lyl", "shiftage", "sex","edu","time","s1")]
midat232$pred_obs = predict(mifit52, newdata = mi23, type="response")
midat232$pred_obs = ifelse(!is.na(midat232$pek), midat232$pek, midat232$pred_obs)
midat232$pek = midat232$pred_obs;
tmpmidat23 = dat[dat$time==4 & is.na(dat$yl) & dat$s1==8,]
midat233$pek = midat233$pred_obs; midat233$pred_obs=NULL; midat233 = rbind(tmpmidat23,midat233);
midat233$Ri = ifelse(!is.na(midat233$pek),1,0)
mifit53 = lm(pek ~ lyl + sex, data = midat233)
mi23 = midat233[,c("lyl", "shiftage", "sex","edu","time","s1")]
midat233$pred_obs = predict(mifit53, newdata = mi23, type="response")
midat233$pred_obs = ifelse(!is.na(midat233$pek), midat233$pek, midat233$pred_obs)
midat233$pek = midat233$pred_obs;
tmpmidat34 = dat[dat$time==6 & is.na(dat$yl) & !is.na(dat$lyl) & dat$s1==6,]
midat34$pek = midat34$pred_obs; midat34$pred_obs=NULL; midat34 = rbind(tmpmidat34,midat34);
midat34$Ri = ifelse(!is.na(midat34$pek),1,0)
mifit6 = lm(pek ~ lyl + sex, data = midat34)
mi34 = midat34[,c("lyl","llyl", "shiftage", "sex","edu","time","s1")]
midat34$pred_obs = predict(mifit6, newdata = mi34, type="response")
midat34$pred_obs = ifelse(!is.na(midat34$pek), midat34$pek, midat34$pred_obs)
tmpmidat342 = dat[dat$time==6 & is.na(dat$yl) & !is.na(dat$lyl) & dat$s1==8,]
midat342$pek = midat342$pred_obs; midat342$pred_obs=NULL; midat342 = rbind(tmpmidat342,midat342);
midat342$Ri = ifelse(!is.na(midat342$pek),1,0)
mifit62 = lm(pek ~ lyl + sex, data = midat342)
mi34 = midat342[,c("lyl","llyl", "shiftage", "sex","edu","time","s1")]
midat342$pred_obs = predict(mifit62, newdata = mi34, type="response")
midat342$pred_obs = ifelse(!is.na(midat342$pek), midat342$pek, midat342$pred_obs)
tmpmidat45 = dat[dat$time==8 & is.na(dat$yl) & !is.na(dat$lyl),]
midat45$pek = midat45$pred_obs; midat45$pred_obs=NULL; midat45 = rbind(tmpmidat45,midat45);
midat45$Ri = ifelse(!is.na(midat45$pek),1,0)
mifit7 = lm(pek ~ lyl + sex, data = midat45)
mi45 = midat45[,c("lyl","llyl","lllyl" , "shiftage", "sex","edu","time","s1")]
midat45$pred_obs = predict(mifit7, newdata = mi45, type="response")
midat45$pred_obs = ifelse(!is.na(midat45$pek), midat45$pek, midat45$pred_obs)
######
tmpmidat34 = dat[dat$time==6 & is.na(dat$yl) & is.na(dat$lyl) & !is.na(dat$llyl) & dat$s1==6,]
midat34$pek = midat34$pred_obs; midat34$pred_obs=NULL; midat34 = rbind(tmpmidat34,midat34);
midat34$Ri = ifelse(!is.na(midat34$pek),1,0)
mifit8 = lm(pek ~ llyl + sex, data = midat34)
mi34 = midat34[,c("llyl", "shiftage", "sex","edu","time","s1")]
midat34$pred_obs = predict(mifit8, newdata = mi34, type="response")
midat34$pred_obs = ifelse(!is.na(midat34$pek), midat34$pek, midat34$pred_obs)
midat34$pek = midat34$pred_obs;
tmpmidat342 = dat[dat$time==6 & is.na(dat$yl) & is.na(dat$lyl) & !is.na(dat$llyl) & dat$s1==8,]
midat342$pek = midat342$pred_obs; midat342$pred_obs=NULL; midat342 = rbind(tmpmidat342,midat342);
midat342$Ri = ifelse(!is.na(midat342$pek),1,0)
mifit82 = lm(pek ~ llyl + sex, data = midat342)
mi34 = midat342[,c("llyl", "shiftage", "sex","edu","time","s1")]
midat342$pred_obs = predict(mifit82, newdata = mi34, type="response")
midat342$pred_obs = ifelse(!is.na(midat342$pek), midat342$pek, midat342$pred_obs)
midat342$pek = midat342$pred_obs;
tmpmidat45 = dat[dat$time==8 & is.na(dat$yl) & is.na(dat$lyl) & !is.na(dat$llyl),]
midat45$pek = midat45$pred_obs; midat45$pred_obs=NULL; midat45 = rbind(tmpmidat45,midat45);
midat45$Ri = ifelse(!is.na(midat45$pek),1,0)
mifit9 = lm(pek ~ llyl + sex, data = midat45)
mi45 = midat45[,c("llyl","lllyl", "shiftage", "sex","edu","time","s1")]
midat45$pred_obs = predict(mifit9, newdata = mi45, type="response")
midat45$pred_obs = ifelse(!is.na(midat45$pek), midat45$pek, midat45$pred_obs)
######
tmpmidat45 = dat[dat$time==8 & is.na(dat$yl) & is.na(dat$llyl),]
midat45$pek = midat45$pred_obs; midat45$pred_obs=NULL; midat45 = rbind(tmpmidat45,midat45);
midat45$Ri = ifelse(!is.na(midat45$pek),1,0)
mifit10 = lm(pek ~ lllyl + sex, data = midat45)
mi45 = midat45[,c("lllyl", "shiftage", "sex","edu","time","s1")]
midat45$pred_obs = predict(mifit10, newdata = mi45, type="response")
midat45$pred_obs = ifelse(!is.na(midat45$pek), midat45$pek, midat45$pred_obs)
midat45$pek = midat45$pred_obs;
#####
midat = rbind(midat12, midat122, midat123, midat124, midat23, midat232, midat233, midat34, midat342, midat45);
midat$pred_obs = NULL;
midat = rbind(dat[dat$time==0,],midat)
midatsortd = midat[order(midat$Case),]
midatsortd = midatsortd[midatsortd$time<=midatsortd$s1,]
########## complete data set
midatsortd$pi=1/ripwdatd$pi
midatsortd$Rim1 = midatsortd$yl = midatsortd$lyl = midatsortd$llyl = midatsortd$lllyl=NULL;
midatsortd$Ri = ripwdatd$Ri; midatsortd$digreal = ripwdatd$pek;
##NR##
beta = c(18,0,0,0,0,0,0,0,0,0);
diff=10;r=1;crit=0.00001
while(diff>=crit)
{
phi = phic = 1;
U = 0; dU=0;
auniqueid = unique(midatsortd$Case);
for(i in auniqueid)
{
tmp5 = midatsortd[midatsortd$Case==i,];
n = nrow(tmp5);
tmpdiag = diag(5); tmpdiag = tmpdiag[1:n,!tmpdiag[,1]]; tmpdiag2 = tmp5$sex[1]*tmpdiag;
x = matrix(c(rep(1, n),as.vector(tmpdiag), tmp5$sex,as.vector(tmpdiag2)),nrow=n) #nix4
last = tmp5[n,]; last$time=10; last2 = do.call("rbind",replicate(5-n, last, simplify = FALSE));tmp5 = rbind(tmp5,last2);
##j=1
xtmp = as.data.frame(matrix(c(rep(tmp5$digreal[1],4), tmp5$sex[1], tmp5$edu[1]),nrow=1))
colnames(xtmp) = c("yl","lyl","llyl","lllyl","sex","edu")
W1 = NULL;
if(!is.na(tmp5$digreal[1]) & tmp5$s1[1]==0) {W1=c(tmp5$digreal[1],predict(mifit1, newdata = xtmp, type="response"), predict(mifit5, newdata = xtmp, type="response"),predict(mifit8, newdata = xtmp, type="response"),predict(mifit10, newdata = xtmp, type="response"))[1:n]}else{W1=W1}
if(!is.na(tmp5$digreal[1]) & tmp5$s1[1]==2) {W1=c(tmp5$digreal[1],predict(mifit1, newdata = xtmp, type="response"), predict(mifit5, newdata = xtmp, type="response"),predict(mifit8, newdata = xtmp, type="response"),predict(mifit10, newdata = xtmp, type="response"))[1:n]}else{W1=W1}
if(!is.na(tmp5$digreal[1]) & tmp5$s1[1]==4) {W1=c(tmp5$digreal[1],predict(mifit12, newdata = xtmp, type="response"), predict(mifit5, newdata = xtmp, type="response"),predict(mifit8, newdata = xtmp, type="response"),predict(mifit10, newdata = xtmp, type="response"))[1:n]}else{W1=W1}
if(!is.na(tmp5$digreal[1]) & tmp5$s1[1]==6) {W1=c(tmp5$digreal[1],predict(mifit13, newdata = xtmp, type="response"), predict(mifit52, newdata = xtmp, type="response"),predict(mifit8, newdata = xtmp, type="response"),predict(mifit10, newdata = xtmp, type="response"))[1:n]}else{W1=W1}
if(!is.na(tmp5$digreal[1]) & tmp5$s1[1]==8) {W1=c(tmp5$digreal[1],predict(mifit14, newdata = xtmp, type="response"), predict(mifit53, newdata = xtmp, type="response"),predict(mifit82, newdata = xtmp, type="response"),predict(mifit10, newdata = xtmp, type="response"))[1:n]}else{W1=W1}
#j=2
xtmp2l = data.frame(cbind(tmp5$digreal[2],tmp5$digreal[1],tmp5$sex[1],tmp5$edu[1],tmp5$s1[1]));
colnames(xtmp2l) = c("yl","lyl","sex","edu","s1")
xtmp2 = data.frame(cbind(tmp5$digreal[2],tmp5$digreal[1],tmp5$sex[1],tmp5$edu[1],tmp5$s1[1]));
colnames(xtmp2) = c("lyl","llyl","sex","edu","s1")
xtmp2ll = data.frame(cbind(tmp5$digreal[2],tmp5$digreal[1],tmp5$sex[1],tmp5$edu[1]));
colnames(xtmp2ll) = c("llyl","lllyl","sex","edu")
W2=NULL
if(!is.na(tmp5$digreal[2]) & tmp5$s1[1]==2){W2=c(tmp5$digreal[1], tmp5$digreal[2],predict(mifit2, newdata=xtmp2l,type="response"),predict(mifit6, newdata=xtmp2,type="response"),predict(mifit9, newdata=xtmp2ll,type="response"))[1:n]}else{W2=W2}
if(!is.na(tmp5$digreal[2]) & tmp5$s1[1]==4){W2=c(tmp5$digreal[1], tmp5$digreal[2],predict(mifit2, newdata=xtmp2l,type="response"),predict(mifit6, newdata=xtmp2,type="response"),predict(mifit9, newdata=xtmp2ll,type="response"))[1:n]}else{W2=W2}
if(!is.na(tmp5$digreal[2]) & tmp5$s1[1]==6){W2=c(tmp5$digreal[1], tmp5$digreal[2],predict(mifit22, newdata=xtmp2l,type="response"),predict(mifit6, newdata=xtmp2,type="response"),predict(mifit9, newdata=xtmp2ll,type="response"))[1:n]}else{W2=W2}
if(!is.na(tmp5$digreal[2]) & tmp5$s1[1]==8){W2=c(tmp5$digreal[1], tmp5$digreal[2],predict(mifit23, newdata=xtmp2l,type="response"),predict(mifit62, newdata=xtmp2,type="response"),predict(mifit9, newdata=xtmp2ll,type="response"))[1:n]}else{W2=W2}
#j=3
xtmp3 = data.frame(cbind(tmp5$digreal[3],tmp5$digreal[2],tmp5$digreal[1],tmp5$sex[1],tmp5$edu[1],tmp5$s1[1]));
colnames(xtmp3) = c("yl","lyl","llyl","sex","edu","s1")
xtmp3l = data.frame(cbind(tmp5$digreal[3],tmp5$digreal[2],tmp5$digreal[1],tmp5$sex[1],tmp5$edu[1]));
colnames(xtmp3l) = c("lyl","llyl","lllyl","sex","edu")
W3=NULL
if(!is.na(tmp5$digreal[3]) & tmp5$s1[1]==6){W3=c(tmp5$digreal[1], tmp5$digreal[2],tmp5$digreal[3],predict(mifit3, newdata=xtmp3,type="response"),predict(mifit7, newdata=xtmp3l,type="response"))[1:n]}else{W3=W3}
if(!is.na(tmp5$digreal[3]) & tmp5$s1[1]==8){W3=c(tmp5$digreal[1], tmp5$digreal[2],tmp5$digreal[3],predict(mifit32, newdata=xtmp3,type="response"),predict(mifit7, newdata=xtmp3l,type="response"))[1:n]}else{W3=W3}
#j=4
xtmp4 = data.frame(cbind(tmp5$digreal[4],tmp5$digreal[3],tmp5$digreal[2],tmp5$digreal[1],tmp5$sex[1],tmp5$edu[1]));
colnames(xtmp4) = c("yl","lyl","llyl","lllyl","sex","edu")
W4=NULL
if(!is.na(tmp5$digreal[3]) & tmp5$s1[1]==8){W4=c(tmp5$digreal[1], tmp5$digreal[2],tmp5$digreal[3],tmp5$digreal[4],predict(mifit4, newdata=xtmp4,type="response"))[1:n]}else{W4=W4}
fitted = x %*% t(t(beta)); tmp5 = tmp5[tmp5$s1>=tmp5$time,];
### For the left hand side of AIPW (complete side)
var = diag(phi,n); varc = diag(phic,n)
if(!is.na(tmp5$pi[n]) & !is.na(tmp5$digreal[n])) {uleft=(1/tmp5$pi[n])*(t(x) %*% var %*% (tmp5$digreal - x%*%t(t(beta)) ) )} else{uleft=matrix(c(rep(0,ncol(x))),ncol=1)}
if(!is.na(tmp5$pi[n]) & !is.na(tmp5$digreal[n])) {duleft = (1/tmp5$pi[n])*(t(x) %*% varc %*% x)} else{duleft=diag(0,ncol(x))}
### For the right hand side
if(!is.null(W1) & !is.na(tmp5$pi[2])) {c1=as.matrix((tmp5$Ri[1]/(tmp5$pi[1])-(tmp5$Ri[2])/(tmp5$pi[2]))*(W1-fitted)); p1=(tmp5$Ri[1]/(tmp5$pi[1])-(tmp5$Ri[2])/(tmp5$pi[2]))} else{c1=matrix(c(rep(0,n)),ncol=1);p1=0}
if(!is.null(W2)& !is.na(tmp5$pi[3])) {c2=as.matrix((tmp5$Ri[2]/(tmp5$pi[2])-(tmp5$Ri[3])/(tmp5$pi[3]))*(W2-fitted));p2=(tmp5$Ri[2]/(tmp5$pi[2])-(tmp5$Ri[3])/(tmp5$pi[3]))} else{c2=matrix(c(rep(0,n)),ncol=1); p2=0;}
if(!is.null(W3)& !is.na(tmp5$pi[4])) {c3=as.matrix((tmp5$Ri[3]/(tmp5$pi[3])-(tmp5$Ri[4])/(tmp5$pi[4]))*(W3-fitted)); p3=(tmp5$Ri[3]/(tmp5$pi[3])-(tmp5$Ri[4])/(tmp5$pi[4]))} else{c3=matrix(c(rep(0,n)),ncol=1); p3=0;}
if(!is.null(W4)& !is.na(tmp5$pi[5])) {c4=as.matrix((tmp5$Ri[4]/(tmp5$pi[4])-(tmp5$Ri[5])/(tmp5$pi[5]))*(W4-fitted));p4=(tmp5$Ri[4]/(tmp5$pi[4])-(tmp5$Ri[5])/(tmp5$pi[5]))} else{c4=matrix(c(rep(0,n)),ncol=1);p4=0;}
uright = t(x) %*% var %*% (c1+c2+c3+c4)
duright = t(x) %*% var %*% x *(p1+p2+p3+p4)
if(tmp5$Ri[n]==1) {Ubeta = uleft+uright; dUbeta = duleft+duright} else{Ubeta = uright; dUbeta = duright;}
U = U + Ubeta
dU = dU + dUbeta
}
diff = max(solve(-dU) %*% U)
beta = beta - solve(-dU) %*% U
#r=r+1
#cat(r, "\n")
}
mybetad = beta
t(mybetad)
#outtmp = c(mybetad)
#out = rbind(out, outtmp)
#cat(m, "\n")
}
test = foreach(i=1:1000) %dopar% myfunc(i)
test2 = do.call("rbind", test)
write.csv(test2,"aipw_stratD.csv")
stopCluster(cl)
|
/DAG1/sample_500/aipw_stratD.R
|
no_license
|
lw499/mortalcohort_github
|
R
| false
| false
| 22,107
|
r
|
library(doParallel)
library(foreach)
getDoParWorkers()
detectCores()
cl=makeCluster(detectCores()-2)
registerDoParallel(cl)
getDoParWorkers()
myfunc = function(i)
{
source("simdatDAG1LMD.R")
library(geepack)
set.seed(60)
seeds = floor(runif(1000)*10^8);
EXPIT <- function(term) {
return( exp(term)/(1+exp(term)) )
}
set.seed(seeds[i])
tmpdata = gendata(N=500)
tmpdata$ager = ifelse(tmpdata$shiftage<=4,1,0);
###IPWIEE
###IPWIEE data
dat = tmpdata
dat = dat[dat$s1>=dat$time,]
dat$yl = c(NA, dat$pek[1:nrow(dat)-1])
dat$yl = ifelse(dat$time==0, NA, dat$yl)
dat$lyl = c(NA, dat$yl[1:nrow(dat)-1])
dat$lyl = ifelse(dat$time<=2, NA, dat$lyl)
dat$llyl = c(NA, dat$lyl[1:nrow(dat)-1])
dat$llyl = ifelse(dat$time<=4, NA, dat$llyl)
dat$lllyl = c(NA, dat$llyl[1:nrow(dat)-1])
dat$lllyl = ifelse(dat$time<=6, NA, dat$lllyl)
dat$Ri = ifelse(!is.na(dat$pek),1,0)
dat$Rim1 = c(NA, dat$Ri[1:nrow(dat)-1]) ##Status of the last observation
dat$Rim1 = ifelse(dat$time==0, NA, dat$Rim1)
###IPW-IEE-D
ipwdat = dat[,c("Case", "time", "shiftage", "sex", "edu", "Ri","pek","s1","Rim1","yl", "lyl","llyl","lllyl","ager")]
ipwdat$pred_obs=NA;ipwdat$Rim1=ifelse(ipwdat$time==0, 0, ipwdat$Rim1);
ipwdat22 = ipwdat[ipwdat$s1==2 & ipwdat$Rim1==1,];
ipwdat2 = ipwdat22[,c("yl", "shiftage", "sex","edu","time","s1")]
logistfit = glm(Ri ~ yl +sex, family = binomial(link=logit),data = ipwdat22)
ipwdat22$pred_obs = predict(logistfit, newdata = ipwdat2, type="response")
##########
ipwdat44 = ipwdat[ipwdat$s1==4 & ipwdat$Rim1==1,];
ipwdat442 = ipwdat44[ipwdat44$time==2,]; ipwdat444 = ipwdat44[ipwdat44$time==4,];
ipwdat42 = ipwdat442[,c("yl", "lyl","llyl","lllyl", "shiftage", "sex","edu","time","s1")]
ipwdat44 = ipwdat444[,c("yl", "lyl","llyl","lllyl", "shiftage", "sex","edu","time","s1")]
logistfit2 = glm(Ri ~ yl + sex, family = binomial(link=logit),data = ipwdat442)
logistfit4 = glm(Ri ~ yl + sex, family = binomial(link=logit),data = ipwdat444)
ipwdat442$pred_obs = predict(logistfit2, newdata = ipwdat42, type="response")
ipwdat444$pred_obs = predict(logistfit4, newdata = ipwdat44, type="response")
###############
ipwdat66 = ipwdat[ipwdat$s1==6 & ipwdat$Rim1==1,];
ipwdat662 = ipwdat66[ipwdat66$time==2,]; ipwdat664 = ipwdat66[ipwdat66$time==4,]; ipwdat666 = ipwdat66[ipwdat66$time==6,];
ipwdat62 = ipwdat662[,c("yl", "lyl","llyl","lllyl", "shiftage", "sex","edu","time","s1")]
ipwdat64 = ipwdat664[,c("yl", "lyl","llyl","lllyl", "shiftage", "sex","edu","time","s1")]
ipwdat66 = ipwdat666[,c("yl", "lyl","llyl","lllyl", "shiftage", "sex","edu","time","s1")]
logistfit2 = glm(Ri ~ yl+sex, family = binomial(link=logit),data = ipwdat662)
logistfit4 = glm(Ri ~ yl+sex, family = binomial(link=logit),data = ipwdat664)
logistfit6 = glm(Ri ~ yl+sex, family = binomial(link=logit),data = ipwdat666)
ipwdat662$pred_obs = predict(logistfit2, newdata = ipwdat62, type="response")
ipwdat664$pred_obs = predict(logistfit4, newdata = ipwdat64, type="response")
ipwdat666$pred_obs = predict(logistfit6, newdata = ipwdat66, type="response")
#################
ipwdat88 = ipwdat[ipwdat$s1==8 & ipwdat$Rim1==1,]
ipwdat882 = ipwdat88[ipwdat88$time==2,]; ipwdat884 = ipwdat88[ipwdat88$time==4,]; ipwdat886 = ipwdat88[ipwdat88$time==6,]; ipwdat888 = ipwdat88[ipwdat88$time==8,];
ipwdat82 = ipwdat882[,c("yl", "lyl","llyl","lllyl", "shiftage", "sex","edu","time","s1")]
ipwdat84 = ipwdat884[,c("yl", "lyl","llyl","lllyl", "shiftage", "sex","edu","time","s1")]
ipwdat86 = ipwdat886[,c("yl", "lyl","llyl","lllyl", "shiftage", "sex","edu","time","s1")]
ipwdat88 = ipwdat888[,c("yl", "lyl","llyl","lllyl", "shiftage", "sex","edu","time","s1")]
logistfit2 = glm(Ri ~ yl + sex, family = binomial(link=logit),data = ipwdat882)
logistfit4 = glm(Ri ~ yl + sex, family = binomial(link=logit),data = ipwdat884)
logistfit6 = glm(Ri ~ yl + sex, family = binomial(link=logit),data = ipwdat886)
logistfit8 = glm(Ri ~ yl + sex, family = binomial(link=logit),data = ipwdat888)
ipwdat882$pred_obs = predict(logistfit2, newdata = ipwdat82, type="response")
ipwdat884$pred_obs = predict(logistfit4, newdata = ipwdat84, type="response")
ipwdat886$pred_obs = predict(logistfit6, newdata = ipwdat86, type="response")
ipwdat888$pred_obs = predict(logistfit8, newdata = ipwdat88, type="response")
##PREDICT for waves
combine = rbind(ipwdat22,ipwdat442,ipwdat444,ipwdat662,ipwdat664,ipwdat666,ipwdat882,ipwdat884,ipwdat886,ipwdat888)
combine = combine[order(combine$Case),];
combine$Rim1=combine$Ri=combine$yl=combine$lyl=combine$llyl=combine$lllyl=NULL;
combw = reshape(combine, idvar = "Case", v.names=c("pek","pred_obs"),timevar="time", direction="wide")
combw$pi.2=1/combw$pred_obs.2; combw$pi.4=1/(combw$pred_obs.2*combw$pred_obs.4);
combw$pi.6=1/(combw$pred_obs.2*combw$pred_obs.4*combw$pred_obs.6); combw$pi.8=1/(combw$pred_obs.2*combw$pred_obs.4*combw$pred_obs.6*combw$pred_obs.8);
combw$pred_obs.2 = combw$pred_obs.4 = combw$pred_obs.6 = combw$pred_obs.8 = NULL;
long = reshape(combw, varying=list(c(7:10),c(11:14)), v.names = c("pek","pi"), new.row.names=1:1000000, times=c(2,4,6,8), direction="long")
long$id=NULL;
ipwdat0 = ipwdat[ipwdat$time==0,]; ipwdat0$pi=1;
ipwdat0$yl=ipwdat0$lyl=ipwdat0$llyl=ipwdat0$lllyl=ipwdat0$pred_obs=ipwdat0$Ri=ipwdat0$Rim1=NULL;
ripwdatd = rbind(ipwdat0,long)
ripwdatd = ripwdatd[order(ripwdatd$Case),];
ripwdatd = ripwdatd[ripwdatd$time<= ripwdatd$s1,]
ripwdatd$Ri = ifelse(!is.na(ripwdatd$pek),1,0)
## Augmented IPW-D ###
#### Imputation models (10)
midat12 = dat[dat$time==2 & dat$Rim1==1 & dat$s1==2,]
mifit1 = lm(pek ~ yl + sex, data = midat12)
mi12 = midat12[,c("yl", "shiftage", "sex","edu","time","s1")]
midat12$pred_obs = predict(mifit1, newdata = mi12, type="response")
midat12$pred_obs = ifelse(!is.na(midat12$pek), midat12$pek, midat12$pred_obs)
midat12$pek = midat12$pred_obs;
midat122 = dat[dat$time==2 & dat$Rim1==1 & dat$s1==4,]
mifit12 = lm(pek ~ yl + sex, data = midat122)
mi12 = midat122[,c("yl", "shiftage", "sex","edu","time","s1")]
midat122$pred_obs = predict(mifit12, newdata = mi12, type="response")
midat122$pred_obs = ifelse(!is.na(midat122$pek), midat122$pek, midat122$pred_obs)
midat122$pek = midat122$pred_obs;
midat123 = dat[dat$time==2 & dat$Rim1==1 & dat$s1==6,]
mifit13 = lm(pek ~ yl + sex, data = midat123)
mi12 = midat123[,c("yl", "shiftage", "sex","edu","time","s1")]
midat123$pred_obs = predict(mifit13, newdata = mi12, type="response")
midat123$pred_obs = ifelse(!is.na(midat123$pek), midat123$pek, midat123$pred_obs)
midat123$pek = midat123$pred_obs;
midat124 = dat[dat$time==2 & dat$Rim1==1 & dat$s1==8,]
mifit14 = lm(pek ~ yl + sex, data = midat124)
mi12 = midat124[,c("yl", "shiftage", "sex","edu","time","s1")]
midat124$pred_obs = predict(mifit14, newdata = mi12, type="response")
midat124$pred_obs = ifelse(!is.na(midat124$pek), midat124$pek, midat124$pred_obs)
midat124$pek = midat124$pred_obs;
midat23 = dat[dat$time==4 & dat$Rim1==1 & dat$s1==4,]
mifit2 = lm(pek ~ yl + sex, data = midat23)
mi23 = midat23[,c("yl", "lyl","shiftage", "sex","edu","time","s1")]
midat23$pred_obs = predict(mifit2, newdata = mi23, type="response")
midat23$pred_obs = ifelse(!is.na(midat23$pek), midat23$pek, midat23$pred_obs)
midat232 = dat[dat$time==4 & dat$Rim1==1 & dat$s1==6,]
mifit22 = lm(pek ~ yl + sex, data = midat232)
mi23 = midat232[,c("yl", "lyl","shiftage", "sex","edu","time","s1")]
midat232$pred_obs = predict(mifit22, newdata = mi23, type="response")
midat232$pred_obs = ifelse(!is.na(midat232$pek), midat232$pek, midat232$pred_obs)
midat233 = dat[dat$time==4 & dat$Rim1==1 & dat$s1==8,]
mifit23 = lm(pek ~ yl + sex, data = midat233)
mi23 = midat233[,c("yl", "lyl","shiftage", "sex","edu","time","s1")]
midat233$pred_obs = predict(mifit23, newdata = mi23, type="response")
midat233$pred_obs = ifelse(!is.na(midat233$pek), midat233$pek, midat233$pred_obs)
midat34 = dat[dat$time==6 & dat$Rim1==1 & dat$s1==6,]
mifit3 = lm(pek ~ yl + sex, data = midat34)
mi34 = midat34[,c("yl", "lyl","llyl", "shiftage", "sex","edu","time","s1")]
midat34$pred_obs = predict(mifit3, newdata = mi34, type="response")
midat34$pred_obs = ifelse(!is.na(midat34$pek), midat34$pek, midat34$pred_obs)
midat342 = dat[dat$time==6 & dat$Rim1==1 & dat$s1==8,]
mifit32 = lm(pek ~ yl + sex, data = midat342)
mi34 = midat342[,c("yl", "lyl","llyl", "shiftage", "sex","edu","time","s1")]
midat342$pred_obs = predict(mifit32, newdata = mi34, type="response")
midat342$pred_obs = ifelse(!is.na(midat342$pek), midat342$pek, midat342$pred_obs)
midat45 = dat[dat$time==8 & dat$Rim1==1,]
mifit4 = lm(pek ~ yl + sex, data = midat45)
mi45 = midat45[,c("yl", "lyl","llyl","lllyl", "shiftage", "sex","edu","time","s1")]
midat45$pred_obs = predict(mifit4, newdata = mi45, type="response")
midat45$pred_obs = ifelse(!is.na(midat45$pek), midat45$pek, midat45$pred_obs)
###########################Fitted values are not covariates but outcomes
tmpmidat23 = dat[dat$time==4 & is.na(dat$yl) & dat$s1==4,]
midat23$pek = midat23$pred_obs; midat23$pred_obs=NULL; midat23 = rbind(tmpmidat23,midat23);
midat23$Ri = ifelse(!is.na(midat23$pek),1,0)
mifit5 = lm(pek ~ lyl + sex, data = midat23)
mi23 = midat23[,c("lyl", "shiftage", "sex","edu","time","s1")]
midat23$pred_obs = predict(mifit5, newdata = mi23, type="response")
midat23$pred_obs = ifelse(!is.na(midat23$pek), midat23$pek, midat23$pred_obs)
midat23$pek = midat23$pred_obs;
tmpmidat23 = dat[dat$time==4 & is.na(dat$yl) & dat$s1==6,]
midat232$pek = midat232$pred_obs; midat232$pred_obs=NULL; midat232 = rbind(tmpmidat23,midat232);
midat232$Ri = ifelse(!is.na(midat232$pek),1,0)
mifit52 = lm(pek ~ lyl + sex, data = midat232)
mi23 = midat232[,c("lyl", "shiftage", "sex","edu","time","s1")]
midat232$pred_obs = predict(mifit52, newdata = mi23, type="response")
midat232$pred_obs = ifelse(!is.na(midat232$pek), midat232$pek, midat232$pred_obs)
midat232$pek = midat232$pred_obs;
tmpmidat23 = dat[dat$time==4 & is.na(dat$yl) & dat$s1==8,]
midat233$pek = midat233$pred_obs; midat233$pred_obs=NULL; midat233 = rbind(tmpmidat23,midat233);
midat233$Ri = ifelse(!is.na(midat233$pek),1,0)
mifit53 = lm(pek ~ lyl + sex, data = midat233)
mi23 = midat233[,c("lyl", "shiftage", "sex","edu","time","s1")]
midat233$pred_obs = predict(mifit53, newdata = mi23, type="response")
midat233$pred_obs = ifelse(!is.na(midat233$pek), midat233$pek, midat233$pred_obs)
midat233$pek = midat233$pred_obs;
tmpmidat34 = dat[dat$time==6 & is.na(dat$yl) & !is.na(dat$lyl) & dat$s1==6,]
midat34$pek = midat34$pred_obs; midat34$pred_obs=NULL; midat34 = rbind(tmpmidat34,midat34);
midat34$Ri = ifelse(!is.na(midat34$pek),1,0)
mifit6 = lm(pek ~ lyl + sex, data = midat34)
mi34 = midat34[,c("lyl","llyl", "shiftage", "sex","edu","time","s1")]
midat34$pred_obs = predict(mifit6, newdata = mi34, type="response")
midat34$pred_obs = ifelse(!is.na(midat34$pek), midat34$pek, midat34$pred_obs)
tmpmidat342 = dat[dat$time==6 & is.na(dat$yl) & !is.na(dat$lyl) & dat$s1==8,]
midat342$pek = midat342$pred_obs; midat342$pred_obs=NULL; midat342 = rbind(tmpmidat342,midat342);
midat342$Ri = ifelse(!is.na(midat342$pek),1,0)
mifit62 = lm(pek ~ lyl + sex, data = midat342)
mi34 = midat342[,c("lyl","llyl", "shiftage", "sex","edu","time","s1")]
midat342$pred_obs = predict(mifit62, newdata = mi34, type="response")
midat342$pred_obs = ifelse(!is.na(midat342$pek), midat342$pek, midat342$pred_obs)
tmpmidat45 = dat[dat$time==8 & is.na(dat$yl) & !is.na(dat$lyl),]
midat45$pek = midat45$pred_obs; midat45$pred_obs=NULL; midat45 = rbind(tmpmidat45,midat45);
midat45$Ri = ifelse(!is.na(midat45$pek),1,0)
mifit7 = lm(pek ~ lyl + sex, data = midat45)
mi45 = midat45[,c("lyl","llyl","lllyl" , "shiftage", "sex","edu","time","s1")]
midat45$pred_obs = predict(mifit7, newdata = mi45, type="response")
midat45$pred_obs = ifelse(!is.na(midat45$pek), midat45$pek, midat45$pred_obs)
######
tmpmidat34 = dat[dat$time==6 & is.na(dat$yl) & is.na(dat$lyl) & !is.na(dat$llyl) & dat$s1==6,]
midat34$pek = midat34$pred_obs; midat34$pred_obs=NULL; midat34 = rbind(tmpmidat34,midat34);
midat34$Ri = ifelse(!is.na(midat34$pek),1,0)
mifit8 = lm(pek ~ llyl + sex, data = midat34)
mi34 = midat34[,c("llyl", "shiftage", "sex","edu","time","s1")]
midat34$pred_obs = predict(mifit8, newdata = mi34, type="response")
midat34$pred_obs = ifelse(!is.na(midat34$pek), midat34$pek, midat34$pred_obs)
midat34$pek = midat34$pred_obs;
tmpmidat342 = dat[dat$time==6 & is.na(dat$yl) & is.na(dat$lyl) & !is.na(dat$llyl) & dat$s1==8,]
midat342$pek = midat342$pred_obs; midat342$pred_obs=NULL; midat342 = rbind(tmpmidat342,midat342);
midat342$Ri = ifelse(!is.na(midat342$pek),1,0)
mifit82 = lm(pek ~ llyl + sex, data = midat342)
mi34 = midat342[,c("llyl", "shiftage", "sex","edu","time","s1")]
midat342$pred_obs = predict(mifit82, newdata = mi34, type="response")
midat342$pred_obs = ifelse(!is.na(midat342$pek), midat342$pek, midat342$pred_obs)
midat342$pek = midat342$pred_obs;
tmpmidat45 = dat[dat$time==8 & is.na(dat$yl) & is.na(dat$lyl) & !is.na(dat$llyl),]
midat45$pek = midat45$pred_obs; midat45$pred_obs=NULL; midat45 = rbind(tmpmidat45,midat45);
midat45$Ri = ifelse(!is.na(midat45$pek),1,0)
mifit9 = lm(pek ~ llyl + sex, data = midat45)
mi45 = midat45[,c("llyl","lllyl", "shiftage", "sex","edu","time","s1")]
midat45$pred_obs = predict(mifit9, newdata = mi45, type="response")
midat45$pred_obs = ifelse(!is.na(midat45$pek), midat45$pek, midat45$pred_obs)
######
tmpmidat45 = dat[dat$time==8 & is.na(dat$yl) & is.na(dat$llyl),]
midat45$pek = midat45$pred_obs; midat45$pred_obs=NULL; midat45 = rbind(tmpmidat45,midat45);
midat45$Ri = ifelse(!is.na(midat45$pek),1,0)
mifit10 = lm(pek ~ lllyl + sex, data = midat45)
mi45 = midat45[,c("lllyl", "shiftage", "sex","edu","time","s1")]
midat45$pred_obs = predict(mifit10, newdata = mi45, type="response")
midat45$pred_obs = ifelse(!is.na(midat45$pek), midat45$pek, midat45$pred_obs)
midat45$pek = midat45$pred_obs;
#####
midat = rbind(midat12, midat122, midat123, midat124, midat23, midat232, midat233, midat34, midat342, midat45);
midat$pred_obs = NULL;
midat = rbind(dat[dat$time==0,],midat)
midatsortd = midat[order(midat$Case),]
midatsortd = midatsortd[midatsortd$time<=midatsortd$s1,]
########## complete data set
midatsortd$pi=1/ripwdatd$pi
midatsortd$Rim1 = midatsortd$yl = midatsortd$lyl = midatsortd$llyl = midatsortd$lllyl=NULL;
midatsortd$Ri = ripwdatd$Ri; midatsortd$digreal = ripwdatd$pek;
##NR##
beta = c(18,0,0,0,0,0,0,0,0,0);
diff=10;r=1;crit=0.00001
while(diff>=crit)
{
phi = phic = 1;
U = 0; dU=0;
auniqueid = unique(midatsortd$Case);
for(i in auniqueid)
{
tmp5 = midatsortd[midatsortd$Case==i,];
n = nrow(tmp5);
tmpdiag = diag(5); tmpdiag = tmpdiag[1:n,!tmpdiag[,1]]; tmpdiag2 = tmp5$sex[1]*tmpdiag;
x = matrix(c(rep(1, n),as.vector(tmpdiag), tmp5$sex,as.vector(tmpdiag2)),nrow=n) #nix4
last = tmp5[n,]; last$time=10; last2 = do.call("rbind",replicate(5-n, last, simplify = FALSE));tmp5 = rbind(tmp5,last2);
##j=1
xtmp = as.data.frame(matrix(c(rep(tmp5$digreal[1],4), tmp5$sex[1], tmp5$edu[1]),nrow=1))
colnames(xtmp) = c("yl","lyl","llyl","lllyl","sex","edu")
W1 = NULL;
if(!is.na(tmp5$digreal[1]) & tmp5$s1[1]==0) {W1=c(tmp5$digreal[1],predict(mifit1, newdata = xtmp, type="response"), predict(mifit5, newdata = xtmp, type="response"),predict(mifit8, newdata = xtmp, type="response"),predict(mifit10, newdata = xtmp, type="response"))[1:n]}else{W1=W1}
if(!is.na(tmp5$digreal[1]) & tmp5$s1[1]==2) {W1=c(tmp5$digreal[1],predict(mifit1, newdata = xtmp, type="response"), predict(mifit5, newdata = xtmp, type="response"),predict(mifit8, newdata = xtmp, type="response"),predict(mifit10, newdata = xtmp, type="response"))[1:n]}else{W1=W1}
if(!is.na(tmp5$digreal[1]) & tmp5$s1[1]==4) {W1=c(tmp5$digreal[1],predict(mifit12, newdata = xtmp, type="response"), predict(mifit5, newdata = xtmp, type="response"),predict(mifit8, newdata = xtmp, type="response"),predict(mifit10, newdata = xtmp, type="response"))[1:n]}else{W1=W1}
if(!is.na(tmp5$digreal[1]) & tmp5$s1[1]==6) {W1=c(tmp5$digreal[1],predict(mifit13, newdata = xtmp, type="response"), predict(mifit52, newdata = xtmp, type="response"),predict(mifit8, newdata = xtmp, type="response"),predict(mifit10, newdata = xtmp, type="response"))[1:n]}else{W1=W1}
if(!is.na(tmp5$digreal[1]) & tmp5$s1[1]==8) {W1=c(tmp5$digreal[1],predict(mifit14, newdata = xtmp, type="response"), predict(mifit53, newdata = xtmp, type="response"),predict(mifit82, newdata = xtmp, type="response"),predict(mifit10, newdata = xtmp, type="response"))[1:n]}else{W1=W1}
#j=2
xtmp2l = data.frame(cbind(tmp5$digreal[2],tmp5$digreal[1],tmp5$sex[1],tmp5$edu[1],tmp5$s1[1]));
colnames(xtmp2l) = c("yl","lyl","sex","edu","s1")
xtmp2 = data.frame(cbind(tmp5$digreal[2],tmp5$digreal[1],tmp5$sex[1],tmp5$edu[1],tmp5$s1[1]));
colnames(xtmp2) = c("lyl","llyl","sex","edu","s1")
xtmp2ll = data.frame(cbind(tmp5$digreal[2],tmp5$digreal[1],tmp5$sex[1],tmp5$edu[1]));
colnames(xtmp2ll) = c("llyl","lllyl","sex","edu")
W2=NULL
if(!is.na(tmp5$digreal[2]) & tmp5$s1[1]==2){W2=c(tmp5$digreal[1], tmp5$digreal[2],predict(mifit2, newdata=xtmp2l,type="response"),predict(mifit6, newdata=xtmp2,type="response"),predict(mifit9, newdata=xtmp2ll,type="response"))[1:n]}else{W2=W2}
if(!is.na(tmp5$digreal[2]) & tmp5$s1[1]==4){W2=c(tmp5$digreal[1], tmp5$digreal[2],predict(mifit2, newdata=xtmp2l,type="response"),predict(mifit6, newdata=xtmp2,type="response"),predict(mifit9, newdata=xtmp2ll,type="response"))[1:n]}else{W2=W2}
if(!is.na(tmp5$digreal[2]) & tmp5$s1[1]==6){W2=c(tmp5$digreal[1], tmp5$digreal[2],predict(mifit22, newdata=xtmp2l,type="response"),predict(mifit6, newdata=xtmp2,type="response"),predict(mifit9, newdata=xtmp2ll,type="response"))[1:n]}else{W2=W2}
if(!is.na(tmp5$digreal[2]) & tmp5$s1[1]==8){W2=c(tmp5$digreal[1], tmp5$digreal[2],predict(mifit23, newdata=xtmp2l,type="response"),predict(mifit62, newdata=xtmp2,type="response"),predict(mifit9, newdata=xtmp2ll,type="response"))[1:n]}else{W2=W2}
#j=3
xtmp3 = data.frame(cbind(tmp5$digreal[3],tmp5$digreal[2],tmp5$digreal[1],tmp5$sex[1],tmp5$edu[1],tmp5$s1[1]));
colnames(xtmp3) = c("yl","lyl","llyl","sex","edu","s1")
xtmp3l = data.frame(cbind(tmp5$digreal[3],tmp5$digreal[2],tmp5$digreal[1],tmp5$sex[1],tmp5$edu[1]));
colnames(xtmp3l) = c("lyl","llyl","lllyl","sex","edu")
W3=NULL
if(!is.na(tmp5$digreal[3]) & tmp5$s1[1]==6){W3=c(tmp5$digreal[1], tmp5$digreal[2],tmp5$digreal[3],predict(mifit3, newdata=xtmp3,type="response"),predict(mifit7, newdata=xtmp3l,type="response"))[1:n]}else{W3=W3}
if(!is.na(tmp5$digreal[3]) & tmp5$s1[1]==8){W3=c(tmp5$digreal[1], tmp5$digreal[2],tmp5$digreal[3],predict(mifit32, newdata=xtmp3,type="response"),predict(mifit7, newdata=xtmp3l,type="response"))[1:n]}else{W3=W3}
#j=4
xtmp4 = data.frame(cbind(tmp5$digreal[4],tmp5$digreal[3],tmp5$digreal[2],tmp5$digreal[1],tmp5$sex[1],tmp5$edu[1]));
colnames(xtmp4) = c("yl","lyl","llyl","lllyl","sex","edu")
W4=NULL
if(!is.na(tmp5$digreal[3]) & tmp5$s1[1]==8){W4=c(tmp5$digreal[1], tmp5$digreal[2],tmp5$digreal[3],tmp5$digreal[4],predict(mifit4, newdata=xtmp4,type="response"))[1:n]}else{W4=W4}
fitted = x %*% t(t(beta)); tmp5 = tmp5[tmp5$s1>=tmp5$time,];
### For the left hand side of AIPW (complete side)
var = diag(phi,n); varc = diag(phic,n)
if(!is.na(tmp5$pi[n]) & !is.na(tmp5$digreal[n])) {uleft=(1/tmp5$pi[n])*(t(x) %*% var %*% (tmp5$digreal - x%*%t(t(beta)) ) )} else{uleft=matrix(c(rep(0,ncol(x))),ncol=1)}
if(!is.na(tmp5$pi[n]) & !is.na(tmp5$digreal[n])) {duleft = (1/tmp5$pi[n])*(t(x) %*% varc %*% x)} else{duleft=diag(0,ncol(x))}
### For the right hand side
if(!is.null(W1) & !is.na(tmp5$pi[2])) {c1=as.matrix((tmp5$Ri[1]/(tmp5$pi[1])-(tmp5$Ri[2])/(tmp5$pi[2]))*(W1-fitted)); p1=(tmp5$Ri[1]/(tmp5$pi[1])-(tmp5$Ri[2])/(tmp5$pi[2]))} else{c1=matrix(c(rep(0,n)),ncol=1);p1=0}
if(!is.null(W2)& !is.na(tmp5$pi[3])) {c2=as.matrix((tmp5$Ri[2]/(tmp5$pi[2])-(tmp5$Ri[3])/(tmp5$pi[3]))*(W2-fitted));p2=(tmp5$Ri[2]/(tmp5$pi[2])-(tmp5$Ri[3])/(tmp5$pi[3]))} else{c2=matrix(c(rep(0,n)),ncol=1); p2=0;}
if(!is.null(W3)& !is.na(tmp5$pi[4])) {c3=as.matrix((tmp5$Ri[3]/(tmp5$pi[3])-(tmp5$Ri[4])/(tmp5$pi[4]))*(W3-fitted)); p3=(tmp5$Ri[3]/(tmp5$pi[3])-(tmp5$Ri[4])/(tmp5$pi[4]))} else{c3=matrix(c(rep(0,n)),ncol=1); p3=0;}
if(!is.null(W4)& !is.na(tmp5$pi[5])) {c4=as.matrix((tmp5$Ri[4]/(tmp5$pi[4])-(tmp5$Ri[5])/(tmp5$pi[5]))*(W4-fitted));p4=(tmp5$Ri[4]/(tmp5$pi[4])-(tmp5$Ri[5])/(tmp5$pi[5]))} else{c4=matrix(c(rep(0,n)),ncol=1);p4=0;}
uright = t(x) %*% var %*% (c1+c2+c3+c4)
duright = t(x) %*% var %*% x *(p1+p2+p3+p4)
if(tmp5$Ri[n]==1) {Ubeta = uleft+uright; dUbeta = duleft+duright} else{Ubeta = uright; dUbeta = duright;}
U = U + Ubeta
dU = dU + dUbeta
}
diff = max(solve(-dU) %*% U)
beta = beta - solve(-dU) %*% U
#r=r+1
#cat(r, "\n")
}
mybetad = beta
t(mybetad)
#outtmp = c(mybetad)
#out = rbind(out, outtmp)
#cat(m, "\n")
}
test = foreach(i=1:1000) %dopar% myfunc(i)
test2 = do.call("rbind", test)
write.csv(test2,"aipw_stratD.csv")
stopCluster(cl)
|
library(GO.db)
library(ggplot2)
library(shiny)
library(DT)
# Gets the ancestor and children data
bpAncestors <- as.list(GOBPANCESTOR)
ccAncestors <- as.list(GOCCANCESTOR)
mfAncestors <- as.list(GOMFANCESTOR)
ancestors <- append(bpAncestors, ccAncestors)
ancestors <- append(ancestors, mfAncestors)
bpChildren <- as.list(GOBPCHILDREN)
ccChildren <- as.list(GOCCCHILDREN)
mfChildren <- as.list(GOMFCHILDREN)
children <- append(bpChildren, ccChildren)
children <- append(children, mfChildren)
children <- children[which(!is.na(children))]
ui <- fluidPage(
titlePanel("GO Term Visualizer"),
sidebarLayout(
sidebarPanel(
textAreaInput("query", label = "GO Terms"),
actionButton("getData", "GO"),
textInput("active", label = "Select a term from the graph")
#textI("active")
),
mainPanel(
plotOutput("coolplot"),
tableOutput("table")
)
)
)
server <- function(input, output) {
lstOfTerms <- reactive({
req(input$query)
unique(strsplit(input$query, "\n")[[1]])
})
data <- eventReactive(input$getData , {
# This block goes through the user-given list of terms and creates a named list
# where each element of the list is an ancestor to at least one of the terms in
# lstOfTerms. Each item contains a list of the query terms that are descendents
# of the list item.
terms <- list()
terms <- unlist(lapply(unique(lstOfTerms()), function(x) {
theAncestors <- ancestors[[x]]
sapply(theAncestors, function(y) {
terms[[y]][1] <- c(terms[[y]][1], x)
})
}))
queryDescendents <- split(unname(terms), names(terms))
queryDescendents$all <- NULL
# For each term in queryDescendents, it returns the list of children of the term.
childs <- lapply(names(queryDescendents), function(x) {
children[[x]]
})
names(childs) <- names(queryDescendents)
# The two list of lists (queryDescendents and childs) are consolidated into one list.
data <- lapply(c(1:length(queryDescendents)), function(x) {
return(list(queryDescendents = queryDescendents[[x]], children = childs[[x]]))
})
names(data) <- names(queryDescendents)
return(data)
})
observe ({ print(head(data())) })
activeTerm <- reactive({
if (input$active == "") { NULL }
else { input$active }
})
#observe ({ print(activeTerm())})
termsToPlot <- reactive({
if (is.null(activeTerm())) {
c("GO:0005575", "GO:0003674", "GO:0008150")
} else {
#c("GO:0005575", "GO:0003674", "GO:0008150")
data()[[activeTerm()]][[2]]
}
})
currDF <- reactive({
curr_df <- data.frame(goID = termsToPlot(),
num = sapply(termsToPlot(), function(x) {return(length(data()[[x]][[1]]))}),
term = sapply(termsToPlot(), function(x) {return(Term(x))}),
def = sapply(termsToPlot(), function(x) {return(Definition(x))}))
curr_df <- curr_df[order(curr_df$num, decreasing = T),]
curr_df <- curr_df[which(curr_df$num > 0),]
curr_df
})
output$coolplot <- renderPlot({
ggplot(currDF(), aes(x = factor(goID, levels = goID[order(num, decreasing = T)]), y = num)) +
geom_col() +
ggtitle(paste0(activeTerm(), ": ", "biological process"), subtitle = "Plo")
})
output$table <- renderTable({
currDF()
#currDF()[order(currDF()$num, decreasing = T),]
})
}
shinyApp(ui = ui, server = server)
|
/goApp.R
|
no_license
|
zuhaibGit/goTermVisualizer
|
R
| false
| false
| 3,466
|
r
|
library(GO.db)
library(ggplot2)
library(shiny)
library(DT)
# Gets the ancestor and children data
bpAncestors <- as.list(GOBPANCESTOR)
ccAncestors <- as.list(GOCCANCESTOR)
mfAncestors <- as.list(GOMFANCESTOR)
ancestors <- append(bpAncestors, ccAncestors)
ancestors <- append(ancestors, mfAncestors)
bpChildren <- as.list(GOBPCHILDREN)
ccChildren <- as.list(GOCCCHILDREN)
mfChildren <- as.list(GOMFCHILDREN)
children <- append(bpChildren, ccChildren)
children <- append(children, mfChildren)
children <- children[which(!is.na(children))]
ui <- fluidPage(
titlePanel("GO Term Visualizer"),
sidebarLayout(
sidebarPanel(
textAreaInput("query", label = "GO Terms"),
actionButton("getData", "GO"),
textInput("active", label = "Select a term from the graph")
#textI("active")
),
mainPanel(
plotOutput("coolplot"),
tableOutput("table")
)
)
)
server <- function(input, output) {
lstOfTerms <- reactive({
req(input$query)
unique(strsplit(input$query, "\n")[[1]])
})
data <- eventReactive(input$getData , {
# This block goes through the user-given list of terms and creates a named list
# where each element of the list is an ancestor to at least one of the terms in
# lstOfTerms. Each item contains a list of the query terms that are descendents
# of the list item.
terms <- list()
terms <- unlist(lapply(unique(lstOfTerms()), function(x) {
theAncestors <- ancestors[[x]]
sapply(theAncestors, function(y) {
terms[[y]][1] <- c(terms[[y]][1], x)
})
}))
queryDescendents <- split(unname(terms), names(terms))
queryDescendents$all <- NULL
# For each term in queryDescendents, it returns the list of children of the term.
childs <- lapply(names(queryDescendents), function(x) {
children[[x]]
})
names(childs) <- names(queryDescendents)
# The two list of lists (queryDescendents and childs) are consolidated into one list.
data <- lapply(c(1:length(queryDescendents)), function(x) {
return(list(queryDescendents = queryDescendents[[x]], children = childs[[x]]))
})
names(data) <- names(queryDescendents)
return(data)
})
observe ({ print(head(data())) })
activeTerm <- reactive({
if (input$active == "") { NULL }
else { input$active }
})
#observe ({ print(activeTerm())})
termsToPlot <- reactive({
if (is.null(activeTerm())) {
c("GO:0005575", "GO:0003674", "GO:0008150")
} else {
#c("GO:0005575", "GO:0003674", "GO:0008150")
data()[[activeTerm()]][[2]]
}
})
currDF <- reactive({
curr_df <- data.frame(goID = termsToPlot(),
num = sapply(termsToPlot(), function(x) {return(length(data()[[x]][[1]]))}),
term = sapply(termsToPlot(), function(x) {return(Term(x))}),
def = sapply(termsToPlot(), function(x) {return(Definition(x))}))
curr_df <- curr_df[order(curr_df$num, decreasing = T),]
curr_df <- curr_df[which(curr_df$num > 0),]
curr_df
})
output$coolplot <- renderPlot({
ggplot(currDF(), aes(x = factor(goID, levels = goID[order(num, decreasing = T)]), y = num)) +
geom_col() +
ggtitle(paste0(activeTerm(), ": ", "biological process"), subtitle = "Plo")
})
output$table <- renderTable({
currDF()
#currDF()[order(currDF()$num, decreasing = T),]
})
}
shinyApp(ui = ui, server = server)
|
# Association Rules for Market Basket Analysis (R)
library(arules) # association rules
library(arulesViz) # data visualization of association rules
library(RColorBrewer) # color palettes for plots
data(Groceries) # grocery transactions object from arules package
# show the dimensions of the transactions object
print(dim(Groceries))
print(dim(Groceries)[1]) # 9835 market baskets for shopping trips
print(dim(Groceries)[2]) # 169 initial store items
# examine frequency for each item with support greater than 0.025
pdf(file="fig_market_basket_initial_item_support.pdf",
width = 8.5, height = 11)
itemFrequencyPlot(Groceries, support = 0.025, cex.names=0.8, xlim = c(0,0.3),
type = "relative", horiz = TRUE, col = "dark red", las = 1,
xlab = paste("Proportion of Market Baskets Containing Item",
"\n(Item Relative Frequency or Support)"))
dev.off()
# explore possibilities for combining similar items
print(head(itemInfo(Groceries)))
print(levels(itemInfo(Groceries)[["level1"]])) # 10 levels... too few
print(levels(itemInfo(Groceries)[["level2"]])) # 55 distinct levels
# aggregate items using the 55 level2 levels for food categories
# to create a more meaningful set of items
groceries <- aggregate(Groceries, itemInfo(Groceries)[["level2"]])
print(dim(groceries)[1]) # 9835 market baskets for shopping trips
print(dim(groceries)[2]) # 55 final store items (categories)
pdf(file="fig_market_basket_final_item_support.pdf", width = 8.5, height = 11)
itemFrequencyPlot(groceries, support = 0.025, cex.names=1.0, xlim = c(0,0.5),
type = "relative", horiz = TRUE, col = "blue", las = 1,
xlab = paste("Proportion of Market Baskets Containing Item",
"\n(Item Relative Frequency or Support)"))
dev.off()
# obtain large set of association rules for items by category and all shoppers
# this is done by setting very low criteria for support and confidence
first.rules <- apriori(groceries,
parameter = list(support = 0.001, confidence = 0.05))
print(summary(first.rules)) # yields 69,921 rules... too many
# select association rules using thresholds for support and confidence
second.rules <- apriori(groceries,
parameter = list(support = 0.025, confidence = 0.05))
print(summary(second.rules)) # yields 344 rules
# data visualization of association rules in scatter plot
pdf(file="fig_market_basket_rules.pdf", width = 8.5, height = 8.5)
plot(second.rules,
control=list(jitter=2, col = rev(brewer.pal(9, "Greens")[4:9])),
shading = "lift")
dev.off()
# grouped matrix of rules
pdf(file="fig_market_basket_rules_matrix.pdf", width = 8.5, height = 8.5)
plot(second.rules, method="grouped",
control=list(col = rev(brewer.pal(9, "Greens")[4:9])))
dev.off()
# select rules with vegetables in consequent (right-hand-side) item subsets
vegie.rules <- subset(second.rules, subset = rhs %pin% "vegetables")
inspect(vegie.rules) # 41 rules
# sort by lift and identify the top 10 rules
top.vegie.rules <- head(sort(vegie.rules, decreasing = TRUE, by = "lift"), 10)
inspect(top.vegie.rules)
pdf(file="fig_market_basket_farmer_rules.pdf", width = 11, height = 8.5)
plot(top.vegie.rules, method="graph",
control=list(type="items"),
shading = "lift")
dev.off()
|
/Association_Rules_for_Market_Basket_Analysis.R
|
no_license
|
YangLei2586/Association-Rules-for-Market-Basket-Analysis
|
R
| false
| false
| 3,498
|
r
|
# Association Rules for Market Basket Analysis (R)
library(arules) # association rules
library(arulesViz) # data visualization of association rules
library(RColorBrewer) # color palettes for plots
data(Groceries) # grocery transactions object from arules package
# show the dimensions of the transactions object
print(dim(Groceries))
print(dim(Groceries)[1]) # 9835 market baskets for shopping trips
print(dim(Groceries)[2]) # 169 initial store items
# examine frequency for each item with support greater than 0.025
pdf(file="fig_market_basket_initial_item_support.pdf",
width = 8.5, height = 11)
itemFrequencyPlot(Groceries, support = 0.025, cex.names=0.8, xlim = c(0,0.3),
type = "relative", horiz = TRUE, col = "dark red", las = 1,
xlab = paste("Proportion of Market Baskets Containing Item",
"\n(Item Relative Frequency or Support)"))
dev.off()
# explore possibilities for combining similar items
print(head(itemInfo(Groceries)))
print(levels(itemInfo(Groceries)[["level1"]])) # 10 levels... too few
print(levels(itemInfo(Groceries)[["level2"]])) # 55 distinct levels
# aggregate items using the 55 level2 levels for food categories
# to create a more meaningful set of items
groceries <- aggregate(Groceries, itemInfo(Groceries)[["level2"]])
print(dim(groceries)[1]) # 9835 market baskets for shopping trips
print(dim(groceries)[2]) # 55 final store items (categories)
pdf(file="fig_market_basket_final_item_support.pdf", width = 8.5, height = 11)
itemFrequencyPlot(groceries, support = 0.025, cex.names=1.0, xlim = c(0,0.5),
type = "relative", horiz = TRUE, col = "blue", las = 1,
xlab = paste("Proportion of Market Baskets Containing Item",
"\n(Item Relative Frequency or Support)"))
dev.off()
# obtain large set of association rules for items by category and all shoppers
# this is done by setting very low criteria for support and confidence
first.rules <- apriori(groceries,
parameter = list(support = 0.001, confidence = 0.05))
print(summary(first.rules)) # yields 69,921 rules... too many
# select association rules using thresholds for support and confidence
second.rules <- apriori(groceries,
parameter = list(support = 0.025, confidence = 0.05))
print(summary(second.rules)) # yields 344 rules
# data visualization of association rules in scatter plot
pdf(file="fig_market_basket_rules.pdf", width = 8.5, height = 8.5)
plot(second.rules,
control=list(jitter=2, col = rev(brewer.pal(9, "Greens")[4:9])),
shading = "lift")
dev.off()
# grouped matrix of rules
pdf(file="fig_market_basket_rules_matrix.pdf", width = 8.5, height = 8.5)
plot(second.rules, method="grouped",
control=list(col = rev(brewer.pal(9, "Greens")[4:9])))
dev.off()
# select rules with vegetables in consequent (right-hand-side) item subsets
vegie.rules <- subset(second.rules, subset = rhs %pin% "vegetables")
inspect(vegie.rules) # 41 rules
# sort by lift and identify the top 10 rules
top.vegie.rules <- head(sort(vegie.rules, decreasing = TRUE, by = "lift"), 10)
inspect(top.vegie.rules)
pdf(file="fig_market_basket_farmer_rules.pdf", width = 11, height = 8.5)
plot(top.vegie.rules, method="graph",
control=list(type="items"),
shading = "lift")
dev.off()
|
library(glmnet)
library(useful)
library(coefplot)
library(magrittr)
land_train <- readr::read_csv('data/manhattan_Train.csv')
land_test <- readRDS('data/manhattan_Test.rds')
View(land_train)
valueFormula <- TotalValue ~ FireService +
ZoneDist1 + ZoneDist2 + Class + LandUse +
OwnerType + LotArea + BldgArea + ComArea +
ResArea + OfficeArea + RetailArea +
GarageArea + FactryArea + NumBldgs +
NumFloors + UnitsRes + UnitsTotal +
LotFront + LotDepth + BldgFront +
BldgDepth + LotType + Landmark + BuiltFAR +
Built + HistoricDistrict - 1
valueFormula
class(valueFormula)
value1 <- lm(valueFormula, data=land_train)
coefplot(value1, sort='magnitude')
landX_train <- build.x(valueFormula, data=land_train, contrasts=FALSE, sparse=TRUE)
landX_train
landY_train <- build.y(alueFormula, data=land_train)
head(landY_train, n=20)v
value2 <- glmnet(x=landX_train, y=landY_train, family='gaussian')
coefpath(value2)
|
/code/glmnet.r
|
no_license
|
brooklynbagel/LiveMLAugust18
|
R
| false
| false
| 950
|
r
|
library(glmnet)
library(useful)
library(coefplot)
library(magrittr)
land_train <- readr::read_csv('data/manhattan_Train.csv')
land_test <- readRDS('data/manhattan_Test.rds')
View(land_train)
valueFormula <- TotalValue ~ FireService +
ZoneDist1 + ZoneDist2 + Class + LandUse +
OwnerType + LotArea + BldgArea + ComArea +
ResArea + OfficeArea + RetailArea +
GarageArea + FactryArea + NumBldgs +
NumFloors + UnitsRes + UnitsTotal +
LotFront + LotDepth + BldgFront +
BldgDepth + LotType + Landmark + BuiltFAR +
Built + HistoricDistrict - 1
valueFormula
class(valueFormula)
value1 <- lm(valueFormula, data=land_train)
coefplot(value1, sort='magnitude')
landX_train <- build.x(valueFormula, data=land_train, contrasts=FALSE, sparse=TRUE)
landX_train
landY_train <- build.y(alueFormula, data=land_train)
head(landY_train, n=20)v
value2 <- glmnet(x=landX_train, y=landY_train, family='gaussian')
coefpath(value2)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/misc.R
\name{get_sequences}
\alias{get_sequences}
\title{Title Get fasta sequences from PANGEA dataframe}
\usage{
get_sequences(all_data, country_name, outfile_name, add_unknown)
}
\arguments{
\item{all_data}{Dataframe containing all data}
\item{country_name}{String for country name of interest}
\item{outfile_name}{String with the output filenama to save sequences}
\item{add_unknown}{value "yes" or "no". If "yes" include all genders (females,
males and unknown), if "no" do not add sample in which gender is unknown.}
}
\value{
Fasta file for selected DNA sequences.
}
\description{
Function to select sequences from specific country and save these DNA sequences
as FASTA.
}
\examples{
#To Do.
}
|
/man/get_sequences.Rd
|
no_license
|
thednainus/pangeaZA
|
R
| false
| true
| 781
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/misc.R
\name{get_sequences}
\alias{get_sequences}
\title{Title Get fasta sequences from PANGEA dataframe}
\usage{
get_sequences(all_data, country_name, outfile_name, add_unknown)
}
\arguments{
\item{all_data}{Dataframe containing all data}
\item{country_name}{String for country name of interest}
\item{outfile_name}{String with the output filenama to save sequences}
\item{add_unknown}{value "yes" or "no". If "yes" include all genders (females,
males and unknown), if "no" do not add sample in which gender is unknown.}
}
\value{
Fasta file for selected DNA sequences.
}
\description{
Function to select sequences from specific country and save these DNA sequences
as FASTA.
}
\examples{
#To Do.
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/layers-convolutional.R
\name{layer_conv_lstm_2d}
\alias{layer_conv_lstm_2d}
\title{Convolutional LSTM.}
\usage{
layer_conv_lstm_2d(object, filters, kernel_size, strides = c(1L, 1L),
padding = "valid", data_format = NULL, dilation_rate = c(1L, 1L),
activation = "tanh", recurrent_activation = "hard_sigmoid",
use_bias = TRUE, kernel_initializer = "glorot_uniform",
recurrent_initializer = "orthogonal", bias_initializer = "zeros",
unit_forget_bias = TRUE, kernel_regularizer = NULL,
recurrent_regularizer = NULL, bias_regularizer = NULL,
activity_regularizer = NULL, kernel_constraint = NULL,
recurrent_constraint = NULL, bias_constraint = NULL,
return_sequences = FALSE, go_backwards = FALSE, stateful = FALSE,
dropout = 0, recurrent_dropout = 0, batch_size = NULL, name = NULL,
trainable = NULL, weights = NULL, input_shape = NULL)
}
\arguments{
\item{object}{Model or layer object}
\item{filters}{Integer, the dimensionality of the output space (i.e. the
number output of filters in the convolution).}
\item{kernel_size}{An integer or list of n integers, specifying the
dimensions of the convolution window.}
\item{strides}{An integer or list of n integers, specifying the strides of
the convolution. Specifying any stride value != 1 is incompatible with
specifying any \code{dilation_rate} value != 1.}
\item{padding}{One of \code{"valid"} or \code{"same"} (case-insensitive).}
\item{data_format}{A string, one of \code{channels_last} (default) or
\code{channels_first}. The ordering of the dimensions in the inputs.
\code{channels_last} corresponds to inputs with shape \code{(batch, time, ..., channels)} while \code{channels_first} corresponds to inputs with shape \code{(batch, time, channels, ...)}. It defaults to the \code{image_data_format} value found
in your Keras config file at \code{~/.keras/keras.json}. If you never set it,
then it will be "channels_last".}
\item{dilation_rate}{An integer or list of n integers, specifying the
dilation rate to use for dilated convolution. Currently, specifying any
\code{dilation_rate} value != 1 is incompatible with specifying any \code{strides}
value != 1.}
\item{activation}{Activation function to use. If you don't specify anything,
no activation is applied (ie. "linear" activation: \code{a(x) = x}).}
\item{recurrent_activation}{Activation function to use for the recurrent
step.}
\item{use_bias}{Boolean, whether the layer uses a bias vector.}
\item{kernel_initializer}{Initializer for the \code{kernel} weights matrix, used
for the linear transformation of the inputs..}
\item{recurrent_initializer}{Initializer for the \code{recurrent_kernel} weights
matrix, used for the linear transformation of the recurrent state..}
\item{bias_initializer}{Initializer for the bias vector.}
\item{unit_forget_bias}{Boolean. If TRUE, add 1 to the bias of the forget
gate at initialization. Use in combination with \code{bias_initializer="zeros"}.
This is recommended in \href{http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf}{Jozefowicz etal.}}
\item{kernel_regularizer}{Regularizer function applied to the \code{kernel}
weights matrix.}
\item{recurrent_regularizer}{Regularizer function applied to the
\code{recurrent_kernel} weights matrix.}
\item{bias_regularizer}{Regularizer function applied to the bias vector.}
\item{activity_regularizer}{Regularizer function applied to the output of the
layer (its "activation")..}
\item{kernel_constraint}{Constraint function applied to the \code{kernel} weights
matrix.}
\item{recurrent_constraint}{Constraint function applied to the
\code{recurrent_kernel} weights matrix.}
\item{bias_constraint}{Constraint function applied to the bias vector.}
\item{return_sequences}{Boolean. Whether to return the last output in the
output sequence, or the full sequence.}
\item{go_backwards}{Boolean (default FALSE). If TRUE, rocess the input
sequence backwards.}
\item{stateful}{Boolean (default FALSE). If TRUE, the last state for each
sample at index i in a batch will be used as initial state for the sample
of index i in the following batch.}
\item{dropout}{Float between 0 and 1. Fraction of the units to drop for the
linear transformation of the inputs.}
\item{recurrent_dropout}{Float between 0 and 1. Fraction of the units to drop
for the linear transformation of the recurrent state.}
\item{batch_size}{Fixed batch size for layer}
\item{name}{An optional name string for the layer. Should be unique in a
model (do not reuse the same name twice). It will be autogenerated if it
isn't provided.}
\item{trainable}{Whether the layer weights will be updated during training.}
\item{weights}{Initial weights for layer.}
\item{input_shape}{Dimensionality of the input (integer) not including the
samples axis. This argument is required when using this layer as the first
layer in a model.}
}
\description{
It is similar to an LSTM layer, but the input transformations and recurrent
transformations are both convolutional.
}
\section{Input shape}{
\itemize{
\item if data_format='channels_first' 5D tensor with shape:
\code{(samples,time, channels, rows, cols)}
\itemize{
\item if data_format='channels_last' 5D
tensor with shape: \code{(samples,time, rows, cols, channels)}
}
}
}
\section{References}{
\itemize{
\item \href{http://arxiv.org/abs/1506.04214v1}{Convolutional LSTM Network: A Machine Learning Approach for Precipitation Nowcasting}
The current implementation does not include the feedback loop on the cells
output
}
}
\seealso{
Other convolutional layers: \code{\link{layer_conv_1d}},
\code{\link{layer_conv_2d_transpose}},
\code{\link{layer_conv_2d}},
\code{\link{layer_conv_3d_transpose}},
\code{\link{layer_conv_3d}},
\code{\link{layer_cropping_1d}},
\code{\link{layer_cropping_2d}},
\code{\link{layer_cropping_3d}},
\code{\link{layer_separable_conv_1d}},
\code{\link{layer_separable_conv_2d}},
\code{\link{layer_upsampling_1d}},
\code{\link{layer_upsampling_2d}},
\code{\link{layer_upsampling_3d}},
\code{\link{layer_zero_padding_1d}},
\code{\link{layer_zero_padding_2d}},
\code{\link{layer_zero_padding_3d}}
}
|
/man/layer_conv_lstm_2d.Rd
|
no_license
|
MrfksIv/keras
|
R
| false
| true
| 6,188
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/layers-convolutional.R
\name{layer_conv_lstm_2d}
\alias{layer_conv_lstm_2d}
\title{Convolutional LSTM.}
\usage{
layer_conv_lstm_2d(object, filters, kernel_size, strides = c(1L, 1L),
padding = "valid", data_format = NULL, dilation_rate = c(1L, 1L),
activation = "tanh", recurrent_activation = "hard_sigmoid",
use_bias = TRUE, kernel_initializer = "glorot_uniform",
recurrent_initializer = "orthogonal", bias_initializer = "zeros",
unit_forget_bias = TRUE, kernel_regularizer = NULL,
recurrent_regularizer = NULL, bias_regularizer = NULL,
activity_regularizer = NULL, kernel_constraint = NULL,
recurrent_constraint = NULL, bias_constraint = NULL,
return_sequences = FALSE, go_backwards = FALSE, stateful = FALSE,
dropout = 0, recurrent_dropout = 0, batch_size = NULL, name = NULL,
trainable = NULL, weights = NULL, input_shape = NULL)
}
\arguments{
\item{object}{Model or layer object}
\item{filters}{Integer, the dimensionality of the output space (i.e. the
number output of filters in the convolution).}
\item{kernel_size}{An integer or list of n integers, specifying the
dimensions of the convolution window.}
\item{strides}{An integer or list of n integers, specifying the strides of
the convolution. Specifying any stride value != 1 is incompatible with
specifying any \code{dilation_rate} value != 1.}
\item{padding}{One of \code{"valid"} or \code{"same"} (case-insensitive).}
\item{data_format}{A string, one of \code{channels_last} (default) or
\code{channels_first}. The ordering of the dimensions in the inputs.
\code{channels_last} corresponds to inputs with shape \code{(batch, time, ..., channels)} while \code{channels_first} corresponds to inputs with shape \code{(batch, time, channels, ...)}. It defaults to the \code{image_data_format} value found
in your Keras config file at \code{~/.keras/keras.json}. If you never set it,
then it will be "channels_last".}
\item{dilation_rate}{An integer or list of n integers, specifying the
dilation rate to use for dilated convolution. Currently, specifying any
\code{dilation_rate} value != 1 is incompatible with specifying any \code{strides}
value != 1.}
\item{activation}{Activation function to use. If you don't specify anything,
no activation is applied (ie. "linear" activation: \code{a(x) = x}).}
\item{recurrent_activation}{Activation function to use for the recurrent
step.}
\item{use_bias}{Boolean, whether the layer uses a bias vector.}
\item{kernel_initializer}{Initializer for the \code{kernel} weights matrix, used
for the linear transformation of the inputs..}
\item{recurrent_initializer}{Initializer for the \code{recurrent_kernel} weights
matrix, used for the linear transformation of the recurrent state..}
\item{bias_initializer}{Initializer for the bias vector.}
\item{unit_forget_bias}{Boolean. If TRUE, add 1 to the bias of the forget
gate at initialization. Use in combination with \code{bias_initializer="zeros"}.
This is recommended in \href{http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf}{Jozefowicz etal.}}
\item{kernel_regularizer}{Regularizer function applied to the \code{kernel}
weights matrix.}
\item{recurrent_regularizer}{Regularizer function applied to the
\code{recurrent_kernel} weights matrix.}
\item{bias_regularizer}{Regularizer function applied to the bias vector.}
\item{activity_regularizer}{Regularizer function applied to the output of the
layer (its "activation")..}
\item{kernel_constraint}{Constraint function applied to the \code{kernel} weights
matrix.}
\item{recurrent_constraint}{Constraint function applied to the
\code{recurrent_kernel} weights matrix.}
\item{bias_constraint}{Constraint function applied to the bias vector.}
\item{return_sequences}{Boolean. Whether to return the last output in the
output sequence, or the full sequence.}
\item{go_backwards}{Boolean (default FALSE). If TRUE, rocess the input
sequence backwards.}
\item{stateful}{Boolean (default FALSE). If TRUE, the last state for each
sample at index i in a batch will be used as initial state for the sample
of index i in the following batch.}
\item{dropout}{Float between 0 and 1. Fraction of the units to drop for the
linear transformation of the inputs.}
\item{recurrent_dropout}{Float between 0 and 1. Fraction of the units to drop
for the linear transformation of the recurrent state.}
\item{batch_size}{Fixed batch size for layer}
\item{name}{An optional name string for the layer. Should be unique in a
model (do not reuse the same name twice). It will be autogenerated if it
isn't provided.}
\item{trainable}{Whether the layer weights will be updated during training.}
\item{weights}{Initial weights for layer.}
\item{input_shape}{Dimensionality of the input (integer) not including the
samples axis. This argument is required when using this layer as the first
layer in a model.}
}
\description{
It is similar to an LSTM layer, but the input transformations and recurrent
transformations are both convolutional.
}
\section{Input shape}{
\itemize{
\item if data_format='channels_first' 5D tensor with shape:
\code{(samples,time, channels, rows, cols)}
\itemize{
\item if data_format='channels_last' 5D
tensor with shape: \code{(samples,time, rows, cols, channels)}
}
}
}
\section{References}{
\itemize{
\item \href{http://arxiv.org/abs/1506.04214v1}{Convolutional LSTM Network: A Machine Learning Approach for Precipitation Nowcasting}
The current implementation does not include the feedback loop on the cells
output
}
}
\seealso{
Other convolutional layers: \code{\link{layer_conv_1d}},
\code{\link{layer_conv_2d_transpose}},
\code{\link{layer_conv_2d}},
\code{\link{layer_conv_3d_transpose}},
\code{\link{layer_conv_3d}},
\code{\link{layer_cropping_1d}},
\code{\link{layer_cropping_2d}},
\code{\link{layer_cropping_3d}},
\code{\link{layer_separable_conv_1d}},
\code{\link{layer_separable_conv_2d}},
\code{\link{layer_upsampling_1d}},
\code{\link{layer_upsampling_2d}},
\code{\link{layer_upsampling_3d}},
\code{\link{layer_zero_padding_1d}},
\code{\link{layer_zero_padding_2d}},
\code{\link{layer_zero_padding_3d}}
}
|
% Generated by roxygen2 (4.0.2): do not edit by hand
\name{changepoints}
\alias{changepoints}
\alias{changepoints.sbs}
\alias{changepoints.wbs}
\title{Change-points detected by WBS or BS}
\usage{
changepoints(object, ...)
\method{changepoints}{sbs}(object, th = NULL, th.const = 1.3, Kmax = NULL,
...)
\method{changepoints}{wbs}(object, th = NULL, th.const = 1.3, Kmax = 50,
penalty = c("ssic.penalty", "bic.penalty", "mbic.penalty"), ...)
}
\arguments{
\item{object}{an object of 'wbs' or 'sbs' class returned by, respectively, \code{\link{wbs}} and \code{\link{sbs}} functions}
\item{...}{further arguments that may be passed to the penalty functions}
\item{th}{a vector of positive scalars}
\item{th.const}{a vector of positive scalars}
\item{Kmax}{a maximum number of change-points to be detected}
\item{penalty}{a character vector with names of penalty functions used}
}
\value{
\item{sigma}{Median Absolute Deviation estimate of the noise level}
\item{th}{a vector of thresholds}
\item{no.cpt.th}{the number of change-points detected for each value of \code{th}}
\item{cpt.th}{a list with the change-points detected for each value of \code{th}}
\item{Kmax}{a maximum number of change-points detected}
\item{ic.curve}{a list with values of the chosen information criteria}
\item{no.cpt.ic}{the number of change-points detected for each information criterion considered}
\item{cpt.ic}{a list with the change-points detected for each information criterion considered}
}
\description{
The function applies user-specified stopping criteria to extract change-points from \code{object}
generated by \code{\link{wbs}} or \code{\link{sbs}}. For \code{object} of class 'sbs', the function returns
change-points whose corresponding test statistic exceeds threshold given in \code{th}. For \code{object} of class 'wbs',
the change-points can be also detected using information criteria with penalties specified in \code{penalty}.
}
\details{
For the change-point detection based on thresholding (\code{object} of class 'sbs' or 'wbs'), the user can either specify the thresholds in \code{th} directly,
determine the maximum number \code{Kmax} of change-points to be detected, or let \code{th} depend on \code{th.const}.
When \code{Kmax} is given, the function automatically sets \code{th} to the lowest threshold such that the number of detected change-points is lower or equal than \code{Kmax}.
Note that for the BS algorithm it might be not possible to find the threshold such that exactly \code{Kmax} change-points are found.
When \code{th} and \code{Kmax} are omitted, the threshold value is set to
\deqn{th = sigma \times th.const \sqrt{2\log(n)},}{th=sigma * th.const* sqrt(2 log(n)),}
where sigma is the Median Absolute Deviation estimate of the noise level and \eqn{n}{n} is the number of elements in \code{x}.
For the change-point detection based on information criteria (\code{object} of class 'wbs' only),
the user can specify both the maximum number of change-points (\code{Kmax}) and a type of the penalty used.
Parameter \code{penalty} should contain a list of characters with names of the functions of at least two arguments (\code{n} and \code{cpt}).
For each penalty given, the following information criterion is minimized over candidate sets of change-points \code{cpt}:
\deqn{\frac{n}{2}\log\hat{\sigma}_{k}^{2}+penalty(n,cpt),}{n/2 log(sigma_k)+ penalty(n,cpt),}
where \eqn{k}{k} denotes the number of elements in \eqn{cpt}{cpt}, \eqn{\hat{\sigma}_{k}}{sigma_k} is the corresponding maximum
likelihood estimator of the residual variance.
}
\examples{
#we generates gaussian noise + Poisson process signal with 10 jumps on average
set.seed(10)
N <- rpois(1,10)
true.cpt <- sample(1000,N)
m1 <- matrix(rep(1:1000,N),1000,N,byrow=FALSE)
m2 <- matrix(rep(true.cpt,1000),1000,N,byrow=TRUE)
x <- rnorm(1000) + apply(m1>=m2,1,sum)
# we apply the BS and WBS algorithms with default values for their parameters
s <- sbs(x)
w <- wbs(x)
s.cpt <- changepoints(s)
s.cpt
w.cpt <- changepoints(w)
w.cpt
#we can use different stopping criteria, invoking sbs/wbs functions is not necessary
s.cpt <- changepoints(s,th.const=c(1,1.3))
s.cpt
w.cpt <- changepoints(w,th.const=c(1,1.3))
w.cpt
}
|
/man/changepoints.Rd
|
no_license
|
pra1981/wbs
|
R
| false
| false
| 4,211
|
rd
|
% Generated by roxygen2 (4.0.2): do not edit by hand
\name{changepoints}
\alias{changepoints}
\alias{changepoints.sbs}
\alias{changepoints.wbs}
\title{Change-points detected by WBS or BS}
\usage{
changepoints(object, ...)
\method{changepoints}{sbs}(object, th = NULL, th.const = 1.3, Kmax = NULL,
...)
\method{changepoints}{wbs}(object, th = NULL, th.const = 1.3, Kmax = 50,
penalty = c("ssic.penalty", "bic.penalty", "mbic.penalty"), ...)
}
\arguments{
\item{object}{an object of 'wbs' or 'sbs' class returned by, respectively, \code{\link{wbs}} and \code{\link{sbs}} functions}
\item{...}{further arguments that may be passed to the penalty functions}
\item{th}{a vector of positive scalars}
\item{th.const}{a vector of positive scalars}
\item{Kmax}{a maximum number of change-points to be detected}
\item{penalty}{a character vector with names of penalty functions used}
}
\value{
\item{sigma}{Median Absolute Deviation estimate of the noise level}
\item{th}{a vector of thresholds}
\item{no.cpt.th}{the number of change-points detected for each value of \code{th}}
\item{cpt.th}{a list with the change-points detected for each value of \code{th}}
\item{Kmax}{a maximum number of change-points detected}
\item{ic.curve}{a list with values of the chosen information criteria}
\item{no.cpt.ic}{the number of change-points detected for each information criterion considered}
\item{cpt.ic}{a list with the change-points detected for each information criterion considered}
}
\description{
The function applies user-specified stopping criteria to extract change-points from \code{object}
generated by \code{\link{wbs}} or \code{\link{sbs}}. For \code{object} of class 'sbs', the function returns
change-points whose corresponding test statistic exceeds threshold given in \code{th}. For \code{object} of class 'wbs',
the change-points can be also detected using information criteria with penalties specified in \code{penalty}.
}
\details{
For the change-point detection based on thresholding (\code{object} of class 'sbs' or 'wbs'), the user can either specify the thresholds in \code{th} directly,
determine the maximum number \code{Kmax} of change-points to be detected, or let \code{th} depend on \code{th.const}.
When \code{Kmax} is given, the function automatically sets \code{th} to the lowest threshold such that the number of detected change-points is lower or equal than \code{Kmax}.
Note that for the BS algorithm it might be not possible to find the threshold such that exactly \code{Kmax} change-points are found.
When \code{th} and \code{Kmax} are omitted, the threshold value is set to
\deqn{th = sigma \times th.const \sqrt{2\log(n)},}{th=sigma * th.const* sqrt(2 log(n)),}
where sigma is the Median Absolute Deviation estimate of the noise level and \eqn{n}{n} is the number of elements in \code{x}.
For the change-point detection based on information criteria (\code{object} of class 'wbs' only),
the user can specify both the maximum number of change-points (\code{Kmax}) and a type of the penalty used.
Parameter \code{penalty} should contain a list of characters with names of the functions of at least two arguments (\code{n} and \code{cpt}).
For each penalty given, the following information criterion is minimized over candidate sets of change-points \code{cpt}:
\deqn{\frac{n}{2}\log\hat{\sigma}_{k}^{2}+penalty(n,cpt),}{n/2 log(sigma_k)+ penalty(n,cpt),}
where \eqn{k}{k} denotes the number of elements in \eqn{cpt}{cpt}, \eqn{\hat{\sigma}_{k}}{sigma_k} is the corresponding maximum
likelihood estimator of the residual variance.
}
\examples{
#we generates gaussian noise + Poisson process signal with 10 jumps on average
set.seed(10)
N <- rpois(1,10)
true.cpt <- sample(1000,N)
m1 <- matrix(rep(1:1000,N),1000,N,byrow=FALSE)
m2 <- matrix(rep(true.cpt,1000),1000,N,byrow=TRUE)
x <- rnorm(1000) + apply(m1>=m2,1,sum)
# we apply the BS and WBS algorithms with default values for their parameters
s <- sbs(x)
w <- wbs(x)
s.cpt <- changepoints(s)
s.cpt
w.cpt <- changepoints(w)
w.cpt
#we can use different stopping criteria, invoking sbs/wbs functions is not necessary
s.cpt <- changepoints(s,th.const=c(1,1.3))
s.cpt
w.cpt <- changepoints(w,th.const=c(1,1.3))
w.cpt
}
|
rm(list=ls())
source(file = "./package_load.R", chdir = T)
# Number of bases: 5, 10, 15, 20
process <- "ebf" # ebf: empirical basis functions, gsk: gaussian kernels
margin <- "gsk" # ebf: empirical basis functions, gsk: gaussian kernels
time <- "current" # current or future
L <- 15 # number of knots to use for the basis functions
cv <- 2 # which cross-validation set to use
loc.fun <- scale.fun <- ~ time + elev # + B1 + B2 + B3 + B4 + B5 + 0
# fit the model and get predictions
source(file = "./fitmodel.R")
rm(list=ls())
source(file = "./package_load.R", chdir = T)
# Number of bases: 5, 10, 15, 20
process <- "ebf" # ebf: empirical basis functions, gsk: gaussian kernels
margin <- "gsk" # ebf: empirical basis functions, gsk: gaussian kernels
time <- "future" # current or future
L <- 15 # number of knots to use for the basis functions
cv <- 2 # which cross-validation set to use
loc.fun <- scale.fun <- ~ time + elev # + B1 + B2 + B3 + B4 + B5 + 0
# fit the model and get predictions
source(file = "./fitmodel.R")
|
/markdown/precipitation/fit-ebf-15-2.R
|
permissive
|
sammorris81/extreme-decomp
|
R
| false
| false
| 1,114
|
r
|
rm(list=ls())
source(file = "./package_load.R", chdir = T)
# Number of bases: 5, 10, 15, 20
process <- "ebf" # ebf: empirical basis functions, gsk: gaussian kernels
margin <- "gsk" # ebf: empirical basis functions, gsk: gaussian kernels
time <- "current" # current or future
L <- 15 # number of knots to use for the basis functions
cv <- 2 # which cross-validation set to use
loc.fun <- scale.fun <- ~ time + elev # + B1 + B2 + B3 + B4 + B5 + 0
# fit the model and get predictions
source(file = "./fitmodel.R")
rm(list=ls())
source(file = "./package_load.R", chdir = T)
# Number of bases: 5, 10, 15, 20
process <- "ebf" # ebf: empirical basis functions, gsk: gaussian kernels
margin <- "gsk" # ebf: empirical basis functions, gsk: gaussian kernels
time <- "future" # current or future
L <- 15 # number of knots to use for the basis functions
cv <- 2 # which cross-validation set to use
loc.fun <- scale.fun <- ~ time + elev # + B1 + B2 + B3 + B4 + B5 + 0
# fit the model and get predictions
source(file = "./fitmodel.R")
|
source("startup.R")
function(input, output, session) {
##### For Tab Panel E-value #####
evals <- reactive({
if ( input$outcomeType == "RR" ) {
if ( is.na( input$est.RR )) return("Enter your point estimate")
if ( is.na( input$trueRR )) return("Enter a true value")
evals = round( evalues.RR( est = input$est.RR, lo = input$lo.RR, hi = input$hi.RR, true = input$trueRR )[2,], 2 )
}
if ( input$outcomeType == "OR.rare" ) {
if ( is.na( input$est.OR.rare )) return("Enter your point estimate")
if ( is.na( input$trueORrare )) return("Enter a true value")
evals = round( evalues.OR( est = input$est.OR.rare, lo = input$lo.OR.rare, hi = input$hi.OR.rare, rare = TRUE, true = input$trueORrare )[2,], 2 )
}
if ( input$outcomeType == "OR.com" ) {
if ( is.na( input$est.OR.com )) return("Enter your point estimate")
if ( is.na( input$trueORcom )) return("Enter a true value")
evals = round( evalues.OR( est = input$est.OR.com, lo = input$lo.OR.com, hi = input$hi.OR.com, rare = FALSE, true = input$trueORcom )[2,], 2 )
}
if ( input$outcomeType == "HR.rare" ) {
if ( is.na( input$est.HR.rare )) return("Enter your point estimate")
if ( is.na( input$trueHRrare )) return("Enter a true value")
evals = round( evalues.HR( est = input$est.HR.rare, lo = input$lo.HR.rare, hi = input$hi.HR.rare, rare = TRUE, true = input$trueHRrare )[2,], 2 )
}
if ( input$outcomeType == "HR.com" ) {
if ( is.na( input$est.HR.com )) return("Enter your point estimate")
if ( is.na( input$trueHRcom )) return("Enter a true value")
evals = round( evalues.HR( est = input$est.HR.com, lo = input$lo.HR.com, hi = input$hi.HR.com, rare = FALSE, true = input$trueHRcom )[2,], 2 )
}
if ( input$outcomeType == "MD" ) {
if ( is.na( input$est.MD )) return("Enter your point estimate")
if ( is.na( input$trueMD )) return("Enter a true value")
evals = round( evalues.MD( est = input$est.MD, se = input$se.MD, true = input$trueMD )[2,], 2 )
}
if ( input$outcomeType == "OLS" ) {
if ( is.na( input$estOLS ) ) return("Enter your point estimate")
if ( is.na( input$sdOLS ) ) return("Enter your standard deviation")
if ( is.na( input$trueOLS )) return("Enter a true value")
evals = round( evalues.OLS( est = input$estOLS,
se = input$seOLS,
sd = input$sdOLS,
delta = input$deltaOLS,
true = input$trueOLS )[2,], 2 )
}
if ( input$outcomeType == "RD" ) {
if ( any( is.na( c( input$n11, input$n10, input$n01, input$n00, input$trueRD ) ) ) ) {
return("Enter all of the above information")
}
evals = round( as.numeric( evalues.RD( n11 = input$n11, n10 = input$n10, n01 = input$n01, n00 = input$n00,
true = input$trueRD, alpha = input$alpha, grid = input$grid ) ), 2 )
}
return( evals )
})
output$result.text = renderText({
##### Create String for UI #####
#if there is input for the CI (either lower or upper)
if ( !is.na(evals()[2]) | !is.na(evals()[3]) ) {
eval.CI = min(evals(), na.rm=TRUE)
result.string = paste( "E-value for point estimate: ", evals()[1],
" and for confidence interval: ", eval.CI,
sep="" )
#if user only gave point estimate
} else {
result.string = paste( "E-value for point estimate: ", evals()[1],
sep="" )
}
return( result.string )
})
#### Make the plot ####
bias.factor <- reactive({
bf <- input$est.RR/input$trueRR
if( input$outcomeType == "OR.rare" ){
bf <- input$est.OR.rare/input$trueORrare
}else if( input$outcomeType == "OR.com" ){
bf <- sqrt(input$est.OR.com)/sqrt(input$trueORcom)
}else if( input$outcomeType == "HR.rare" ){
bf <- input$est.HR.rare/input$trueHRrare
}else if ( input$outcomeType == "HR.com" ){
bf <- ( (( 1 - 0.5^sqrt(input$est.HR.com) )/( 1 - 0.5^sqrt(1/input$est.HR.com) )) )/( (( 1 - 0.5^sqrt(input$trueHRcom) )/( 1 - 0.5^sqrt(1/input$trueHRcom) )) )
} else if ( input$outcomeType == "OLS" ){
bf <- exp( 0.91 * (input$deltaOLS * input$estOLS / input$sdOLS) ) / exp( 0.91*input$trueOLS )
} else if ( input$outcomeType == "MD" ){
bf <- exp(0.91*input$est.MD)/exp(0.91*input$trueMD)
}else if ( input$outcomeType == "RD" ){
N = input$n10 + input$n11 + input$n01 + input$n00
N1 = input$n10 + input$n11
N0 = input$n00 + input$n01
f = N1/N
p0 = input$n01/N0
p1 = input$n11/N1
# Ding 2016, page 376 expression
bf <- (1/(2*p0*f) )*( sqrt( (input$trueRD + p0*(1-f) - p1*f)^2 +
4*p1*p0*f*(1-f) ) -
(input$trueRD + p0*(1-f) - p1*f) )
}
return( bf )
})
output$curveOfExplainAway <- renderPlotly({
# MM: do not attempt to make plot unless we have the point estimate
if( !is.na( bias.factor() ) ) {
rr.ud <- function(rr.eu) {
if(bias.factor() > 1){
( bias.factor()*(1 - rr.eu) )/( bias.factor() - rr.eu )
}else{
( (1/bias.factor())*(1 - rr.eu) )/( (1/bias.factor()) - rr.eu )
}
}
g <- ggplotly(
ggplot(data.frame(rr.eu = c(0, 20)), aes(rr.eu)) +
stat_function(fun = rr.ud) +
scale_y_continuous(limits = c(1, evals()[1]*3)) +
scale_x_continuous(limits = c(1, evals()[1]*3)) +
xlab("Risk ratio for exposure-confounder relationship") + ylab("Risk ratio for confounder-outcome relationship") +
geom_point(dat = data.frame(rr.eu = evals()[1], rr.ud = evals()[1]), aes(rr.eu, rr.ud)) +
geom_text(dat = data.frame(rr.eu = evals()[1], rr.ud = evals()[1]),
aes(rr.eu, rr.ud),
label = paste0("E-value:\n (", round(evals()[1], 2), ",", round(evals()[1], 2),")"),
nudge_x = evals()[1]*(3/5), size = 3) +
theme_minimal()
)
g$x$data[[2]]$text <- "E-value"
g$x$data[[1]]$text <- gsub("y", "RR_UD", g$x$data[[1]]$text)
g$x$data[[1]]$text <- gsub("rr.eu", "RR_EU", g$x$data[[1]]$text)
return(g)
} else {
# if we don't have point estimate,
# then show blank placeholder graph
df = data.frame()
g = ggplotly( ggplot(df) +
geom_point() +
xlim(0, 10) +
ylim(0, 10) +
theme_minimal() +
xlab("Risk ratio for exposure-confounder relationship") + ylab("Risk ratio for confounder-outcome relationship") +
annotate("text", x = 5, y = 5, label = "(Enter your point estimate)") )
return(g)
}
})
##### For Tab Panel Calibrated Fixed sensitivity parameters #####
mydata <- reactive({
inFile <- input$calibrated_uploaddat
if(is.null(inFile))
return(NULL)
tbl <- read.csv(inFile$datapath, stringsAsFactors = FALSE)
})
### jl testing if data is being read okay:
# output$calibrated_tab1 = renderTable(mydata())
calibrated_output <- observeEvent(input$calibrated_calculate, {
if(input$calibrated_scale=="RR"){
q = logHR_to_logRR(log(input$calibrated_q))
r = input$calibrated_r
tail = input$calibrated_tail
method = input$calibrated_method
Bmin = input$calibrated_Bmin
Bmax = input$calibrated_Bmax
calib = mydata()[[input$calibrated_calib.name]]
R = input$calibrated_R
dat = mydata()
calib.name = input$calibrated_calib.name
} else {
if(input$calibrated_scale=="Log-RR"){
q = logHR_to_logRR(input$calibrated_q)
r = input$calibrated_r
tail = input$calibrated_tail
method = input$calibrated_method
Bmin = input$calibrated_Bmin
Bmax = input$calibrated_Bmax
calib = mydata()[[input$calibrated_calib.name]]
R = input$calibrated_R
dat = mydata()
calib.name = input$calibrated_calib.name
}
}
output$calibrated_text1 = renderText({
## just for testing, can delete
# print(c(q_2,r_2,tail_2,method_2,Bmin_2,Bmax_2,calib_2,R_2,calib.name_2))
#
# d=as.data.frame(list(author = c("Teo 2010", "Da silva-gane 2012", "Hussain 2013",
# "Shih 2013", "Shum 2014", "Brown 2015", "Kwok 2016", "Verberne 2016",
# "Chandna 2016", "Reindl-schwaighofer 2017", "Raman 2018", "Tam-tham 2018"),
# year = c(2010, 2012, 2013, 2013, 2014, 2015, 2016, 2016, 2016,2017, 2018, 2018),
# hr = c(0.44, 0.44, 0.46, 1.16, 0.46, 0.31,0.22, 0.62, 0.53, 0.23, 0.61, 0.67),
# lb = c(0.22, 0.22, 0.32,1.07, 0.31, 0.21, 0.17, 0.42, 0.39, 0.18, 0.41, 0.53),
# ub = c(0.86, 0.92, 0.68, 1.25, 0.68, 0.47, 0.3, 0.92, 0.73, 0.29, 0.91, 0.83),
# n = c(57, 154, 306, 8341, 199, 286, 558, 311, 250, 8796, 204,838),
# nx = c(41, 124, 164, 6292, 157, 164, 126, 204, 92, 8622,123, 500),
# n0 = c(16, 30, 142, 2049, 42, 122, 432, 107, 158,174, 81, 338),
# yi = c(-0.82098055206983, -0.82098055206983, -0.776528789498996,0.148420005118273, -0.776528789498996, -1.17118298150295, -1.51412773262978,
# -0.478035800943, -0.63487827243597, -1.46967597005894, -0.49429632181478, -0.400477566597125),
# vyi = c(0.116911650846615, 0.141626456589046,0.0397704305571613, 0.00145351248489691, 0.0397704305571613,0.0450842985184214, 0.0250415490680121,
# 0.0405449956738431, 0.0266844577833026,0.0139873914540288, 0.0416478534714748, 0.0119380066476652),
# calib = c(-0.815500241994327, -0.814528779625426, -0.776052752266121,0.147175232542529, -0.776052752266121, -1.15489254358239,-1.49702475156308,
# -0.488331228832504, -0.637983041992715, -1.46055146962155, -0.504254067888611, -0.404485663510471),
# calib.logRR = c(-0.560977462897841, -0.560319288814832,
# -0.534223240540279, 0.101988678750287, -0.534223240540279,
# -0.788465088962296, -1.01180042488262, -0.337559759120245,
# -0.440157063435869, -0.988320988323321, -0.3485033298636,
# -0.279841496330782)))
#
# q = logHR_to_logRR(log(.8))
# r = .1
# tail = "below"
# method = "calibrated"
# Bmin = 1
# Bmax = 4
# calib = d$calib.logRR
# R = 2000
# dat = d
# calib.name = "calib.logRR"
cm = suppressWarnings(confounded_meta(method=method,q=q, r=r, Bmin=Bmin, Bmax=Bmax, .calib=calib, tail=tail, .give.CI=TRUE, .R=R, .dat=dat, .calib.name=calib.name))
p = round( as.numeric(cm$Est[which(cm$Value=="Phat.t")]), 3 )
p_lo = round( as.numeric(cm$CI.lo[which(cm$Value=="Phat.t")]), 3 )
p_hi = round( as.numeric(cm$CI.hi[which(cm$Value=="Phat.t")]), 3 )
##### Create String for UI #####
string_p = paste( p, " (95% CI: ", p_lo, ", ", p_hi, ")", sep="" )
return( string_p )
}) ## closes calibrated_text1
output$calibrated_text2 = renderText({
cm = suppressWarnings(confounded_meta(method=method,q=q, r=r, Bmin=Bmin, Bmax=Bmax, .calib=calib, tail=tail, .give.CI=TRUE, .R=R, .dat=dat, .calib.name=calib.name))
p = round( as.numeric(cm$Est[which(cm$Value=="Phat.t" )]), 3 )
Tmin = round( as.numeric(cm$Est[which(cm$Value=="That" )]), 3 )
Tmin_lo = round( as.numeric(cm$CI.lo[which(cm$Value=="That" )]), 3 )
Tmin_hi = round( as.numeric(cm$CI.hi[which(cm$Value=="That" )]), 3 )
##### Create String for UI #####
string_Tmin = ifelse(p < r, "Not applicable. This is already the case, even with no bias, given your pooled effect size, threshold, and choice of tail.", paste( Tmin, " (95% CI: ", Tmin_lo, ", ", Tmin_hi, ")", sep="" ))
return( string_Tmin )
}) ## closes calibrated_text2
output$calibrated_text3 = renderText({
cm = suppressWarnings(confounded_meta(method=method,q=q, r=r, Bmin=Bmin, Bmax=Bmax, .calib=calib, tail=tail, .give.CI=TRUE, .R=R, .dat=dat, .calib.name=calib.name))
p = round( as.numeric(cm$Est[ which(cm$Value=="Phat.t") ]), 3 )
Gmin = round( as.numeric(cm$Est[ which(cm$Value=="Ghat") ]), 3 )
Gmin_lo = round( as.numeric(cm$CI.lo[ which(cm$Value=="Ghat") ]), 3 )
Gmin_hi = round( as.numeric(cm$CI.hi[ which(cm$Value=="Ghat") ]), 3 )
##### Create String for UI #####
string_Gmin = ifelse(p < r, "Not applicable. This is already the case, even with no bias, given your pooled effect size, threshold, and choice of tail.", paste( Gmin, " (95% CI: ", Gmin_lo, ", ", Gmin_hi, ")", sep="" ))
return( string_Gmin )
}) ## closes calibrated_text3
### warnings:
output$calibrated_kwarn <- reactive({
numStudies <- input$calibrated_k
ifelse(numStudies <=10,
"WARNING: These methods may not work well for meta-analyses with fewer than 10 studies.",
"")
}) ## closes calibrated_kwarn
output$calibrated_phatwarn <- reactive({
cm = suppressWarnings(confounded_meta(method=method,q=q, r=r, Bmin=Bmin, Bmax=Bmax, .calib=calib, tail=tail, .give.CI=TRUE, .R=R, .dat=dat, .calib.name=calib.name))
p = round( cm$Est[ cm$Value=="Phat.t" ], 3 )
ifelse(p<0.15 | p>0.85,
HTML(paste('WARNING: Extreme estimated proportion', 'The estimated proportion of meaningfully strong effects is <0.15 or >0.85. The methods implemented in this website do not always work well in these situations. We would recommend instead applying alternative methods that have the same interpretation (see the "More Resouces" tab).', sep = "<br/>")), "")
}) ## closes calibrated_phatwarn_2
# ### 10/21/20 TBD, get the rest working then ask Maya about these plots:
# output$calibrated_plot1 <- renderPlot({
# suppressWarnings(sens_plot_addtail(method=method, type="line", q=q, r=r, Bmin=Bmin, Bmax=Bmax, .calib=calib, tail=tail, .give.CI=TRUE, .R=R, .dat=dat, .calib.name=calib.name ))
# }) ## closes calibrated_plot1
}) ## closes calibrated_output
### results text for calibrated Fixed sensitivity parameters tab
output$calibrated_results_prop = renderText({
paste("Proportion of studies with population causal effects", input$calibrated_tail, input$calibrated_q, ":")
})
output$calibrated_results_minbias = renderText({
paste("Minimum bias factor (RR scale) to reduce to less than", input$calibrated_r, "the proportion of studies with population causal effects", input$calibrated_tail, input$calibrated_q, ":")
})
output$calibrated_results_minconf = renderText({
paste("Minimum confounding strength (RR scale) to reduce to less than", input$calibrated_r, "the proportion of studies with population causal effects", input$calibrated_tail, input$calibrated_q, ":")
})
##### For Tab Panel Parametric Fixed sensitivity parameters #####
parametric_output <- observeEvent(input$parametric_calculate, {
if(input$parametric_scale=="RR"){
yr_2 = log(input$parametric_yr)
t2_2 = input$parametric_t2
q_2 = log(input$parametric_q)
vyr_2 = input$parametric_se_yr^2
vt2_2 = input$parametric_se_t2^2
muB_2 = log(input$parametric_muB)
sigB_2 = input$parametric_sigB
r_2 = input$parametric_r
tail_2 = input$parametric_tail
method_2 = input$parametric_method
} else {
if(input$parametric_scale=="Log-RR"){
yr_2 = input$parametric_yr
t2_2 = input$parametric_t2
q_2 = input$parametric_q
vyr_2 = input$parametric_se_yr^2
vt2_2 = input$parametric_se_t2^2
muB_2 = input$parametric_muB
sigB_2 = input$parametric_sigB
r_2 = input$parametric_r
tail_2 = input$parametric_tail
method_2 = input$parametric_method
}
}
### for testing, can delete:
# yr_2 = log(1.2)
# t2_2 = 0.1
# q_2 = 1.1
# vyr_2 = 0.01
# vt2_2 = 0.1
# muB_2 = 1.5
# sigB_2 = 0
# r_2 = 0.2
# tail_2 = "below"
# method_2 = "parametric"
output$parametric_text1 = renderText({
cm = suppressWarnings(confounded_meta(method=method_2,q=q_2, r = r_2, muB = muB_2, sigB = sigB_2, yr = yr_2, vyr = vyr_2,
t2 = t2_2, vt2 = vt2_2, CI.level = 0.95, tail = tail_2))
p = round( as.numeric(cm$Est[which(cm$Value=="Prop")]), 3 )
p_lo = round( as.numeric(cm$CI.lo[which(cm$Value=="Prop")]), 3 )
p_hi = round( as.numeric(cm$CI.hi[which(cm$Value=="Prop")]), 3 )
##### Create String for UI #####
string_p = paste( p, " (95% CI: ", p_lo, ", ", p_hi, ")", sep="" )
return( string_p )
}) ## closes parametric_text1
output$parametric_text2 = renderText({
cm = suppressWarnings(confounded_meta(method=method_2,q=q_2, r = r_2, muB = muB_2, sigB = sigB_2, yr = yr_2, vyr = vyr_2,
t2 = t2_2, vt2 = vt2_2, CI.level = 0.95, tail = tail_2))
p = round( as.numeric(cm$Est[which(cm$Value=="Prop" )]), 3 )
Tmin = round( as.numeric(cm$Est[which(cm$Value=="Tmin" )]), 3 )
Tmin_lo = round( as.numeric(cm$CI.lo[which(cm$Value=="Tmin" )]), 3 )
Tmin_hi = round( as.numeric(cm$CI.hi[which(cm$Value=="Tmin" )]), 3 )
##### Create String for UI #####
string_Tmin = ifelse(p < r_2, "Not applicable. This is already the case, even with no bias, given your pooled effect size, threshold, and choice of tail.", paste( Tmin, " (95% CI: ", Tmin_lo, ", ", Tmin_hi, ")", sep="" ))
return( string_Tmin )
}) ## closes parametric_text2
output$parametric_text3 = renderText({
cm = suppressWarnings(confounded_meta(method=method_2,q=q_2, r = r_2, muB = muB_2, sigB = sigB_2, yr = yr_2, vyr = vyr_2,
t2 = t2_2, vt2 = vt2_2, CI.level = 0.95, tail = tail_2))
p = round( as.numeric(cm$Est[ which(cm$Value=="Prop") ]), 3 )
Gmin = round( as.numeric(cm$Est[ which(cm$Value=="Gmin") ]), 3 )
Gmin_lo = round( as.numeric(cm$CI.lo[ which(cm$Value=="Gmin") ]), 3 )
Gmin_hi = round( as.numeric(cm$CI.hi[ which(cm$Value=="Gmin") ]), 3 )
##### Create String for UI #####
string_Gmin = ifelse(p < r_2, "Not applicable. This is already the case, even with no bias, given your pooled effect size, threshold, and choice of tail.", paste( Gmin, " (95% CI: ", Gmin_lo, ", ", Gmin_hi, ")", sep="" ))
return( string_Gmin )
}) ## closes parametric_text3
### warnings:
output$parametric_kwarn <- reactive({
numStudies <- input$parametric_k
ifelse(numStudies <=10,
"WARNING: These methods may not work well for meta-analyses with fewer than 10 studies.",
"")
}) ## closes parametric_kwarn_2
output$parametric_phatwarn <- reactive({
cm = suppressWarnings(confounded_meta(method=method_2,q=q_2, r = r_2, muB = muB_2, sigB = sigB_2, yr = yr_2, vyr = vyr_2,
t2 = t2_2, vt2 = vt2_2, CI.level = 0.95, tail = tail_2))
p = round( cm$Est[ cm$Value=="Prop" ], 3 )
ifelse(p<0.15 | p>0.85,
HTML(paste('WARNING: Extreme estimated proportion', 'The estimated proportion of meaningfully strong effects is <0.15 or >0.85. The methods implemented in this website do not always work well in these situations. We would recommend instead applying alternative methods that have the same interpretation (see the "More Resouces" tab).', sep = "<br/>")), "")
}) ## closes parametric_phatwarn_2
### 10/21/20 TBD, get the rest working then ask Maya about these plots:
output$parametric_plot <- renderPlot({
suppressWarnings(sens_plot_addtail( type="dist", q=q_2, yr=yr_2, vyr=vyr_2, t2=t2_2, vt2=vt2_2,
muB=muB_2, sigB=sigB_2, tail=tail_2 ))
}) ## closes parametric_plot1
}) ## closes parametric_output
### results text for parametric Fixed sensitivity parameters tab
output$parametric_results_prop = renderText({
paste("Proportion of studies with population causal effects", input$parametric_tail, input$parametric_q, ":")
})
output$parametric_results_minbias = renderText({
paste("Minimum bias factor (RR scale) to reduce to less than", input$parametric_r, "the proportion of studies with population causal effects", input$parametric_tail, input$parametric_q, ":")
})
output$parametric_results_minconf = renderText({
paste("Minimum confounding strength (RR scale) to reduce to less than", input$parametric_r, "the proportion of studies with population causal effects", input$parametric_tail, input$parametric_q, ":")
})
##### For Tab Panel Range of sensitivity parameters #####
output$plot_3 <- renderPlot({
if(input$scale_3=="RR"){
yr_3 = log(input$yr_3)
t2_3 = input$t2_3
q_3= log(input$q_3)
vyr_3 = input$se_yr_3^2
vt2_3 = input$se_t2_3^2
sigB_3 = input$sigB_3
Bmin_3 = log(input$Bmin_3)
Bmax_3 = log(input$Bmax_3)
tail_3 = input$tail_3
method_3 = "parametric"
} else {
if(input$scale_3=="Log-RR"){
yr_3 = input$yr_3
t2_3 = input$t2_3
q_3 = input$q_3
vyr_3 = input$se_yr_3^2
vt2_3 = input$se_t2_3^2
sigB_3 = input$sigB_3
Bmin_3 = input$Bmin_3
Bmax_3 = input$Bmax_3
tail_3 = input$tail_3
method_3 = "parametric"
}
}
suppressWarnings(sens_plot_addtail(method = method_3, type="line", q=q_3, yr=yr_3, vyr=vyr_3, t2=t2_3, vt2=vt2_3,
Bmin=Bmin_3, Bmax=Bmax_3, sigB=sigB_3, tail=tail_3 ))
})
} ## closes function
### EXTRA CODE BELOW, CAN PROBABLY DELETE BUT KEEP FOR NOW ###
# ##### For Tab Panel Fixed sensitivity parameters #####
# output$plot2 <- renderPlot({
#
# # observeEvent( input$make.plot, {
# yr_2 = log(input$yr_2)
# t2_2 = input$t2_2
# q_2 = log(input$q_2)
# vyr_2 = input$se_yr_2^2
# vt2_2 = input$se_t2_2^2
# muB_2 = log(input$muB_2)
# sigB_2 = input$sigB_2
# r_2 = input$r_2
#
#
# suppressWarnings(sens_plot_addtail( type="dist", q=q_2, yr=yr_2, vyr=vyr_2, t2=t2_2, vt2=vt2_2,
# muB=muB_2, sigB=sigB_2 ))
#
#
# # } )
#
# })
##### For Tab Panel Range of sensitivity parameters #####
# output$plot1 <- renderPlot({
#
# if(input$scale=="RR"){
# yr = log(input$yr)
# t2 = input$t2
# q = log(input$q)
# vyr = input$se_yr^2
# vt2 = input$se_t2^2
# sigB = input$sigB
# Bmin = log(input$Bmin)
# Bmax = log(input$Bmax)
# tail = input$tail
# } else {
# if(input$scale=="Log-RR"){
# yr = input$yr
# t2 = input$t2
# q = input$q
# vyr = input$se_yr^2
# vt2 = input$se_t2^2
# sigB = input$sigB
# Bmin = input$Bmin
# Bmax = input$Bmax
# tail = input$tail
# }
# }
#
# suppressWarnings(sens_plot_addtail( type="line", q=q, yr=yr, vyr=vyr, t2=t2, vt2=vt2,
# Bmin=Bmin, Bmax=Bmax, sigB=sigB, tail=tail ))
# })
##### WARNINGS #####
##### For Tab Panel Range of sensitivity parameters #####
# output$kwarn <- reactive({
# numStudies <- input$k
# ifelse(numStudies <=10,
# "WARNING: These methods may not work well for meta-analyses with fewer than 10 studies.",
# "")
# })
##### For Tab Panel Fixed sensitivity parameters #####
# output$kwarn_2 <- reactive({
# numStudies <- input$k_2
# ifelse(numStudies <=10,
# "WARNING: These methods may not work well for meta-analyses with fewer than 10 studies.",
# "")
# })
# output$phatwarn_2 <- reactive({
# yr_2 = log(input$yr_2)
# t2_2 = input$t2_2
# q_2 = log(input$q_2)
# vyr_2 = input$se_yr_2^2
# vt2_2 = input$se_t2_2^2
# muB_2 = log(input$muB_2)
# sigB_2 = input$sigB_2
# r_2 = input$r_2
# tail_2 = input$tail_2
#
# cm = suppressWarnings(confounded_meta(q=q_2, r = r_2, muB = muB_2, sigB = sigB_2, yr = yr_2, vyr = vyr_2,
# t2 = t2_2, vt2 = vt2_2, CI.level = 0.95, tail = tail_2))
#
# p = round( cm$Est[ cm$Value=="Prop" ], 3 )
# ifelse(p<0.15 | p>0.85,
# HTML(paste('WARNING: Extreme estimated proportion', 'The estimated proportion of meaningfully strong effects is <0.15 or >0.85. The methods implemented in this website do not always work well in these situations. We would recommend instead applying alternative methods that have the same interpretation (see the "More Resouces" tab).', sep = "<br/>")), "")
# })
|
/Shiny app/evalue/server.R
|
no_license
|
belalanik/evalue
|
R
| false
| false
| 25,845
|
r
|
source("startup.R")
function(input, output, session) {
##### For Tab Panel E-value #####
evals <- reactive({
if ( input$outcomeType == "RR" ) {
if ( is.na( input$est.RR )) return("Enter your point estimate")
if ( is.na( input$trueRR )) return("Enter a true value")
evals = round( evalues.RR( est = input$est.RR, lo = input$lo.RR, hi = input$hi.RR, true = input$trueRR )[2,], 2 )
}
if ( input$outcomeType == "OR.rare" ) {
if ( is.na( input$est.OR.rare )) return("Enter your point estimate")
if ( is.na( input$trueORrare )) return("Enter a true value")
evals = round( evalues.OR( est = input$est.OR.rare, lo = input$lo.OR.rare, hi = input$hi.OR.rare, rare = TRUE, true = input$trueORrare )[2,], 2 )
}
if ( input$outcomeType == "OR.com" ) {
if ( is.na( input$est.OR.com )) return("Enter your point estimate")
if ( is.na( input$trueORcom )) return("Enter a true value")
evals = round( evalues.OR( est = input$est.OR.com, lo = input$lo.OR.com, hi = input$hi.OR.com, rare = FALSE, true = input$trueORcom )[2,], 2 )
}
if ( input$outcomeType == "HR.rare" ) {
if ( is.na( input$est.HR.rare )) return("Enter your point estimate")
if ( is.na( input$trueHRrare )) return("Enter a true value")
evals = round( evalues.HR( est = input$est.HR.rare, lo = input$lo.HR.rare, hi = input$hi.HR.rare, rare = TRUE, true = input$trueHRrare )[2,], 2 )
}
if ( input$outcomeType == "HR.com" ) {
if ( is.na( input$est.HR.com )) return("Enter your point estimate")
if ( is.na( input$trueHRcom )) return("Enter a true value")
evals = round( evalues.HR( est = input$est.HR.com, lo = input$lo.HR.com, hi = input$hi.HR.com, rare = FALSE, true = input$trueHRcom )[2,], 2 )
}
if ( input$outcomeType == "MD" ) {
if ( is.na( input$est.MD )) return("Enter your point estimate")
if ( is.na( input$trueMD )) return("Enter a true value")
evals = round( evalues.MD( est = input$est.MD, se = input$se.MD, true = input$trueMD )[2,], 2 )
}
if ( input$outcomeType == "OLS" ) {
if ( is.na( input$estOLS ) ) return("Enter your point estimate")
if ( is.na( input$sdOLS ) ) return("Enter your standard deviation")
if ( is.na( input$trueOLS )) return("Enter a true value")
evals = round( evalues.OLS( est = input$estOLS,
se = input$seOLS,
sd = input$sdOLS,
delta = input$deltaOLS,
true = input$trueOLS )[2,], 2 )
}
if ( input$outcomeType == "RD" ) {
if ( any( is.na( c( input$n11, input$n10, input$n01, input$n00, input$trueRD ) ) ) ) {
return("Enter all of the above information")
}
evals = round( as.numeric( evalues.RD( n11 = input$n11, n10 = input$n10, n01 = input$n01, n00 = input$n00,
true = input$trueRD, alpha = input$alpha, grid = input$grid ) ), 2 )
}
return( evals )
})
output$result.text = renderText({
##### Create String for UI #####
#if there is input for the CI (either lower or upper)
if ( !is.na(evals()[2]) | !is.na(evals()[3]) ) {
eval.CI = min(evals(), na.rm=TRUE)
result.string = paste( "E-value for point estimate: ", evals()[1],
" and for confidence interval: ", eval.CI,
sep="" )
#if user only gave point estimate
} else {
result.string = paste( "E-value for point estimate: ", evals()[1],
sep="" )
}
return( result.string )
})
#### Make the plot ####
bias.factor <- reactive({
bf <- input$est.RR/input$trueRR
if( input$outcomeType == "OR.rare" ){
bf <- input$est.OR.rare/input$trueORrare
}else if( input$outcomeType == "OR.com" ){
bf <- sqrt(input$est.OR.com)/sqrt(input$trueORcom)
}else if( input$outcomeType == "HR.rare" ){
bf <- input$est.HR.rare/input$trueHRrare
}else if ( input$outcomeType == "HR.com" ){
bf <- ( (( 1 - 0.5^sqrt(input$est.HR.com) )/( 1 - 0.5^sqrt(1/input$est.HR.com) )) )/( (( 1 - 0.5^sqrt(input$trueHRcom) )/( 1 - 0.5^sqrt(1/input$trueHRcom) )) )
} else if ( input$outcomeType == "OLS" ){
bf <- exp( 0.91 * (input$deltaOLS * input$estOLS / input$sdOLS) ) / exp( 0.91*input$trueOLS )
} else if ( input$outcomeType == "MD" ){
bf <- exp(0.91*input$est.MD)/exp(0.91*input$trueMD)
}else if ( input$outcomeType == "RD" ){
N = input$n10 + input$n11 + input$n01 + input$n00
N1 = input$n10 + input$n11
N0 = input$n00 + input$n01
f = N1/N
p0 = input$n01/N0
p1 = input$n11/N1
# Ding 2016, page 376 expression
bf <- (1/(2*p0*f) )*( sqrt( (input$trueRD + p0*(1-f) - p1*f)^2 +
4*p1*p0*f*(1-f) ) -
(input$trueRD + p0*(1-f) - p1*f) )
}
return( bf )
})
output$curveOfExplainAway <- renderPlotly({
# MM: do not attempt to make plot unless we have the point estimate
if( !is.na( bias.factor() ) ) {
rr.ud <- function(rr.eu) {
if(bias.factor() > 1){
( bias.factor()*(1 - rr.eu) )/( bias.factor() - rr.eu )
}else{
( (1/bias.factor())*(1 - rr.eu) )/( (1/bias.factor()) - rr.eu )
}
}
g <- ggplotly(
ggplot(data.frame(rr.eu = c(0, 20)), aes(rr.eu)) +
stat_function(fun = rr.ud) +
scale_y_continuous(limits = c(1, evals()[1]*3)) +
scale_x_continuous(limits = c(1, evals()[1]*3)) +
xlab("Risk ratio for exposure-confounder relationship") + ylab("Risk ratio for confounder-outcome relationship") +
geom_point(dat = data.frame(rr.eu = evals()[1], rr.ud = evals()[1]), aes(rr.eu, rr.ud)) +
geom_text(dat = data.frame(rr.eu = evals()[1], rr.ud = evals()[1]),
aes(rr.eu, rr.ud),
label = paste0("E-value:\n (", round(evals()[1], 2), ",", round(evals()[1], 2),")"),
nudge_x = evals()[1]*(3/5), size = 3) +
theme_minimal()
)
g$x$data[[2]]$text <- "E-value"
g$x$data[[1]]$text <- gsub("y", "RR_UD", g$x$data[[1]]$text)
g$x$data[[1]]$text <- gsub("rr.eu", "RR_EU", g$x$data[[1]]$text)
return(g)
} else {
# if we don't have point estimate,
# then show blank placeholder graph
df = data.frame()
g = ggplotly( ggplot(df) +
geom_point() +
xlim(0, 10) +
ylim(0, 10) +
theme_minimal() +
xlab("Risk ratio for exposure-confounder relationship") + ylab("Risk ratio for confounder-outcome relationship") +
annotate("text", x = 5, y = 5, label = "(Enter your point estimate)") )
return(g)
}
})
##### For Tab Panel Calibrated Fixed sensitivity parameters #####
mydata <- reactive({
inFile <- input$calibrated_uploaddat
if(is.null(inFile))
return(NULL)
tbl <- read.csv(inFile$datapath, stringsAsFactors = FALSE)
})
### jl testing if data is being read okay:
# output$calibrated_tab1 = renderTable(mydata())
calibrated_output <- observeEvent(input$calibrated_calculate, {
if(input$calibrated_scale=="RR"){
q = logHR_to_logRR(log(input$calibrated_q))
r = input$calibrated_r
tail = input$calibrated_tail
method = input$calibrated_method
Bmin = input$calibrated_Bmin
Bmax = input$calibrated_Bmax
calib = mydata()[[input$calibrated_calib.name]]
R = input$calibrated_R
dat = mydata()
calib.name = input$calibrated_calib.name
} else {
if(input$calibrated_scale=="Log-RR"){
q = logHR_to_logRR(input$calibrated_q)
r = input$calibrated_r
tail = input$calibrated_tail
method = input$calibrated_method
Bmin = input$calibrated_Bmin
Bmax = input$calibrated_Bmax
calib = mydata()[[input$calibrated_calib.name]]
R = input$calibrated_R
dat = mydata()
calib.name = input$calibrated_calib.name
}
}
output$calibrated_text1 = renderText({
## just for testing, can delete
# print(c(q_2,r_2,tail_2,method_2,Bmin_2,Bmax_2,calib_2,R_2,calib.name_2))
#
# d=as.data.frame(list(author = c("Teo 2010", "Da silva-gane 2012", "Hussain 2013",
# "Shih 2013", "Shum 2014", "Brown 2015", "Kwok 2016", "Verberne 2016",
# "Chandna 2016", "Reindl-schwaighofer 2017", "Raman 2018", "Tam-tham 2018"),
# year = c(2010, 2012, 2013, 2013, 2014, 2015, 2016, 2016, 2016,2017, 2018, 2018),
# hr = c(0.44, 0.44, 0.46, 1.16, 0.46, 0.31,0.22, 0.62, 0.53, 0.23, 0.61, 0.67),
# lb = c(0.22, 0.22, 0.32,1.07, 0.31, 0.21, 0.17, 0.42, 0.39, 0.18, 0.41, 0.53),
# ub = c(0.86, 0.92, 0.68, 1.25, 0.68, 0.47, 0.3, 0.92, 0.73, 0.29, 0.91, 0.83),
# n = c(57, 154, 306, 8341, 199, 286, 558, 311, 250, 8796, 204,838),
# nx = c(41, 124, 164, 6292, 157, 164, 126, 204, 92, 8622,123, 500),
# n0 = c(16, 30, 142, 2049, 42, 122, 432, 107, 158,174, 81, 338),
# yi = c(-0.82098055206983, -0.82098055206983, -0.776528789498996,0.148420005118273, -0.776528789498996, -1.17118298150295, -1.51412773262978,
# -0.478035800943, -0.63487827243597, -1.46967597005894, -0.49429632181478, -0.400477566597125),
# vyi = c(0.116911650846615, 0.141626456589046,0.0397704305571613, 0.00145351248489691, 0.0397704305571613,0.0450842985184214, 0.0250415490680121,
# 0.0405449956738431, 0.0266844577833026,0.0139873914540288, 0.0416478534714748, 0.0119380066476652),
# calib = c(-0.815500241994327, -0.814528779625426, -0.776052752266121,0.147175232542529, -0.776052752266121, -1.15489254358239,-1.49702475156308,
# -0.488331228832504, -0.637983041992715, -1.46055146962155, -0.504254067888611, -0.404485663510471),
# calib.logRR = c(-0.560977462897841, -0.560319288814832,
# -0.534223240540279, 0.101988678750287, -0.534223240540279,
# -0.788465088962296, -1.01180042488262, -0.337559759120245,
# -0.440157063435869, -0.988320988323321, -0.3485033298636,
# -0.279841496330782)))
#
# q = logHR_to_logRR(log(.8))
# r = .1
# tail = "below"
# method = "calibrated"
# Bmin = 1
# Bmax = 4
# calib = d$calib.logRR
# R = 2000
# dat = d
# calib.name = "calib.logRR"
cm = suppressWarnings(confounded_meta(method=method,q=q, r=r, Bmin=Bmin, Bmax=Bmax, .calib=calib, tail=tail, .give.CI=TRUE, .R=R, .dat=dat, .calib.name=calib.name))
p = round( as.numeric(cm$Est[which(cm$Value=="Phat.t")]), 3 )
p_lo = round( as.numeric(cm$CI.lo[which(cm$Value=="Phat.t")]), 3 )
p_hi = round( as.numeric(cm$CI.hi[which(cm$Value=="Phat.t")]), 3 )
##### Create String for UI #####
string_p = paste( p, " (95% CI: ", p_lo, ", ", p_hi, ")", sep="" )
return( string_p )
}) ## closes calibrated_text1
output$calibrated_text2 = renderText({
cm = suppressWarnings(confounded_meta(method=method,q=q, r=r, Bmin=Bmin, Bmax=Bmax, .calib=calib, tail=tail, .give.CI=TRUE, .R=R, .dat=dat, .calib.name=calib.name))
p = round( as.numeric(cm$Est[which(cm$Value=="Phat.t" )]), 3 )
Tmin = round( as.numeric(cm$Est[which(cm$Value=="That" )]), 3 )
Tmin_lo = round( as.numeric(cm$CI.lo[which(cm$Value=="That" )]), 3 )
Tmin_hi = round( as.numeric(cm$CI.hi[which(cm$Value=="That" )]), 3 )
##### Create String for UI #####
string_Tmin = ifelse(p < r, "Not applicable. This is already the case, even with no bias, given your pooled effect size, threshold, and choice of tail.", paste( Tmin, " (95% CI: ", Tmin_lo, ", ", Tmin_hi, ")", sep="" ))
return( string_Tmin )
}) ## closes calibrated_text2
output$calibrated_text3 = renderText({
cm = suppressWarnings(confounded_meta(method=method,q=q, r=r, Bmin=Bmin, Bmax=Bmax, .calib=calib, tail=tail, .give.CI=TRUE, .R=R, .dat=dat, .calib.name=calib.name))
p = round( as.numeric(cm$Est[ which(cm$Value=="Phat.t") ]), 3 )
Gmin = round( as.numeric(cm$Est[ which(cm$Value=="Ghat") ]), 3 )
Gmin_lo = round( as.numeric(cm$CI.lo[ which(cm$Value=="Ghat") ]), 3 )
Gmin_hi = round( as.numeric(cm$CI.hi[ which(cm$Value=="Ghat") ]), 3 )
##### Create String for UI #####
string_Gmin = ifelse(p < r, "Not applicable. This is already the case, even with no bias, given your pooled effect size, threshold, and choice of tail.", paste( Gmin, " (95% CI: ", Gmin_lo, ", ", Gmin_hi, ")", sep="" ))
return( string_Gmin )
}) ## closes calibrated_text3
### warnings:
output$calibrated_kwarn <- reactive({
numStudies <- input$calibrated_k
ifelse(numStudies <=10,
"WARNING: These methods may not work well for meta-analyses with fewer than 10 studies.",
"")
}) ## closes calibrated_kwarn
output$calibrated_phatwarn <- reactive({
cm = suppressWarnings(confounded_meta(method=method,q=q, r=r, Bmin=Bmin, Bmax=Bmax, .calib=calib, tail=tail, .give.CI=TRUE, .R=R, .dat=dat, .calib.name=calib.name))
p = round( cm$Est[ cm$Value=="Phat.t" ], 3 )
ifelse(p<0.15 | p>0.85,
HTML(paste('WARNING: Extreme estimated proportion', 'The estimated proportion of meaningfully strong effects is <0.15 or >0.85. The methods implemented in this website do not always work well in these situations. We would recommend instead applying alternative methods that have the same interpretation (see the "More Resouces" tab).', sep = "<br/>")), "")
}) ## closes calibrated_phatwarn_2
# ### 10/21/20 TBD, get the rest working then ask Maya about these plots:
# output$calibrated_plot1 <- renderPlot({
# suppressWarnings(sens_plot_addtail(method=method, type="line", q=q, r=r, Bmin=Bmin, Bmax=Bmax, .calib=calib, tail=tail, .give.CI=TRUE, .R=R, .dat=dat, .calib.name=calib.name ))
# }) ## closes calibrated_plot1
}) ## closes calibrated_output
### results text for calibrated Fixed sensitivity parameters tab
output$calibrated_results_prop = renderText({
paste("Proportion of studies with population causal effects", input$calibrated_tail, input$calibrated_q, ":")
})
output$calibrated_results_minbias = renderText({
paste("Minimum bias factor (RR scale) to reduce to less than", input$calibrated_r, "the proportion of studies with population causal effects", input$calibrated_tail, input$calibrated_q, ":")
})
output$calibrated_results_minconf = renderText({
paste("Minimum confounding strength (RR scale) to reduce to less than", input$calibrated_r, "the proportion of studies with population causal effects", input$calibrated_tail, input$calibrated_q, ":")
})
##### For Tab Panel Parametric Fixed sensitivity parameters #####
parametric_output <- observeEvent(input$parametric_calculate, {
if(input$parametric_scale=="RR"){
yr_2 = log(input$parametric_yr)
t2_2 = input$parametric_t2
q_2 = log(input$parametric_q)
vyr_2 = input$parametric_se_yr^2
vt2_2 = input$parametric_se_t2^2
muB_2 = log(input$parametric_muB)
sigB_2 = input$parametric_sigB
r_2 = input$parametric_r
tail_2 = input$parametric_tail
method_2 = input$parametric_method
} else {
if(input$parametric_scale=="Log-RR"){
yr_2 = input$parametric_yr
t2_2 = input$parametric_t2
q_2 = input$parametric_q
vyr_2 = input$parametric_se_yr^2
vt2_2 = input$parametric_se_t2^2
muB_2 = input$parametric_muB
sigB_2 = input$parametric_sigB
r_2 = input$parametric_r
tail_2 = input$parametric_tail
method_2 = input$parametric_method
}
}
### for testing, can delete:
# yr_2 = log(1.2)
# t2_2 = 0.1
# q_2 = 1.1
# vyr_2 = 0.01
# vt2_2 = 0.1
# muB_2 = 1.5
# sigB_2 = 0
# r_2 = 0.2
# tail_2 = "below"
# method_2 = "parametric"
output$parametric_text1 = renderText({
cm = suppressWarnings(confounded_meta(method=method_2,q=q_2, r = r_2, muB = muB_2, sigB = sigB_2, yr = yr_2, vyr = vyr_2,
t2 = t2_2, vt2 = vt2_2, CI.level = 0.95, tail = tail_2))
p = round( as.numeric(cm$Est[which(cm$Value=="Prop")]), 3 )
p_lo = round( as.numeric(cm$CI.lo[which(cm$Value=="Prop")]), 3 )
p_hi = round( as.numeric(cm$CI.hi[which(cm$Value=="Prop")]), 3 )
##### Create String for UI #####
string_p = paste( p, " (95% CI: ", p_lo, ", ", p_hi, ")", sep="" )
return( string_p )
}) ## closes parametric_text1
output$parametric_text2 = renderText({
cm = suppressWarnings(confounded_meta(method=method_2,q=q_2, r = r_2, muB = muB_2, sigB = sigB_2, yr = yr_2, vyr = vyr_2,
t2 = t2_2, vt2 = vt2_2, CI.level = 0.95, tail = tail_2))
p = round( as.numeric(cm$Est[which(cm$Value=="Prop" )]), 3 )
Tmin = round( as.numeric(cm$Est[which(cm$Value=="Tmin" )]), 3 )
Tmin_lo = round( as.numeric(cm$CI.lo[which(cm$Value=="Tmin" )]), 3 )
Tmin_hi = round( as.numeric(cm$CI.hi[which(cm$Value=="Tmin" )]), 3 )
##### Create String for UI #####
string_Tmin = ifelse(p < r_2, "Not applicable. This is already the case, even with no bias, given your pooled effect size, threshold, and choice of tail.", paste( Tmin, " (95% CI: ", Tmin_lo, ", ", Tmin_hi, ")", sep="" ))
return( string_Tmin )
}) ## closes parametric_text2
output$parametric_text3 = renderText({
cm = suppressWarnings(confounded_meta(method=method_2,q=q_2, r = r_2, muB = muB_2, sigB = sigB_2, yr = yr_2, vyr = vyr_2,
t2 = t2_2, vt2 = vt2_2, CI.level = 0.95, tail = tail_2))
p = round( as.numeric(cm$Est[ which(cm$Value=="Prop") ]), 3 )
Gmin = round( as.numeric(cm$Est[ which(cm$Value=="Gmin") ]), 3 )
Gmin_lo = round( as.numeric(cm$CI.lo[ which(cm$Value=="Gmin") ]), 3 )
Gmin_hi = round( as.numeric(cm$CI.hi[ which(cm$Value=="Gmin") ]), 3 )
##### Create String for UI #####
string_Gmin = ifelse(p < r_2, "Not applicable. This is already the case, even with no bias, given your pooled effect size, threshold, and choice of tail.", paste( Gmin, " (95% CI: ", Gmin_lo, ", ", Gmin_hi, ")", sep="" ))
return( string_Gmin )
}) ## closes parametric_text3
### warnings:
output$parametric_kwarn <- reactive({
numStudies <- input$parametric_k
ifelse(numStudies <=10,
"WARNING: These methods may not work well for meta-analyses with fewer than 10 studies.",
"")
}) ## closes parametric_kwarn_2
output$parametric_phatwarn <- reactive({
cm = suppressWarnings(confounded_meta(method=method_2,q=q_2, r = r_2, muB = muB_2, sigB = sigB_2, yr = yr_2, vyr = vyr_2,
t2 = t2_2, vt2 = vt2_2, CI.level = 0.95, tail = tail_2))
p = round( cm$Est[ cm$Value=="Prop" ], 3 )
ifelse(p<0.15 | p>0.85,
HTML(paste('WARNING: Extreme estimated proportion', 'The estimated proportion of meaningfully strong effects is <0.15 or >0.85. The methods implemented in this website do not always work well in these situations. We would recommend instead applying alternative methods that have the same interpretation (see the "More Resouces" tab).', sep = "<br/>")), "")
}) ## closes parametric_phatwarn_2
### 10/21/20 TBD, get the rest working then ask Maya about these plots:
output$parametric_plot <- renderPlot({
suppressWarnings(sens_plot_addtail( type="dist", q=q_2, yr=yr_2, vyr=vyr_2, t2=t2_2, vt2=vt2_2,
muB=muB_2, sigB=sigB_2, tail=tail_2 ))
}) ## closes parametric_plot1
}) ## closes parametric_output
### results text for parametric Fixed sensitivity parameters tab
output$parametric_results_prop = renderText({
paste("Proportion of studies with population causal effects", input$parametric_tail, input$parametric_q, ":")
})
output$parametric_results_minbias = renderText({
paste("Minimum bias factor (RR scale) to reduce to less than", input$parametric_r, "the proportion of studies with population causal effects", input$parametric_tail, input$parametric_q, ":")
})
output$parametric_results_minconf = renderText({
paste("Minimum confounding strength (RR scale) to reduce to less than", input$parametric_r, "the proportion of studies with population causal effects", input$parametric_tail, input$parametric_q, ":")
})
##### For Tab Panel Range of sensitivity parameters #####
output$plot_3 <- renderPlot({
if(input$scale_3=="RR"){
yr_3 = log(input$yr_3)
t2_3 = input$t2_3
q_3= log(input$q_3)
vyr_3 = input$se_yr_3^2
vt2_3 = input$se_t2_3^2
sigB_3 = input$sigB_3
Bmin_3 = log(input$Bmin_3)
Bmax_3 = log(input$Bmax_3)
tail_3 = input$tail_3
method_3 = "parametric"
} else {
if(input$scale_3=="Log-RR"){
yr_3 = input$yr_3
t2_3 = input$t2_3
q_3 = input$q_3
vyr_3 = input$se_yr_3^2
vt2_3 = input$se_t2_3^2
sigB_3 = input$sigB_3
Bmin_3 = input$Bmin_3
Bmax_3 = input$Bmax_3
tail_3 = input$tail_3
method_3 = "parametric"
}
}
suppressWarnings(sens_plot_addtail(method = method_3, type="line", q=q_3, yr=yr_3, vyr=vyr_3, t2=t2_3, vt2=vt2_3,
Bmin=Bmin_3, Bmax=Bmax_3, sigB=sigB_3, tail=tail_3 ))
})
} ## closes function
### EXTRA CODE BELOW, CAN PROBABLY DELETE BUT KEEP FOR NOW ###
# ##### For Tab Panel Fixed sensitivity parameters #####
# output$plot2 <- renderPlot({
#
# # observeEvent( input$make.plot, {
# yr_2 = log(input$yr_2)
# t2_2 = input$t2_2
# q_2 = log(input$q_2)
# vyr_2 = input$se_yr_2^2
# vt2_2 = input$se_t2_2^2
# muB_2 = log(input$muB_2)
# sigB_2 = input$sigB_2
# r_2 = input$r_2
#
#
# suppressWarnings(sens_plot_addtail( type="dist", q=q_2, yr=yr_2, vyr=vyr_2, t2=t2_2, vt2=vt2_2,
# muB=muB_2, sigB=sigB_2 ))
#
#
# # } )
#
# })
##### For Tab Panel Range of sensitivity parameters #####
# output$plot1 <- renderPlot({
#
# if(input$scale=="RR"){
# yr = log(input$yr)
# t2 = input$t2
# q = log(input$q)
# vyr = input$se_yr^2
# vt2 = input$se_t2^2
# sigB = input$sigB
# Bmin = log(input$Bmin)
# Bmax = log(input$Bmax)
# tail = input$tail
# } else {
# if(input$scale=="Log-RR"){
# yr = input$yr
# t2 = input$t2
# q = input$q
# vyr = input$se_yr^2
# vt2 = input$se_t2^2
# sigB = input$sigB
# Bmin = input$Bmin
# Bmax = input$Bmax
# tail = input$tail
# }
# }
#
# suppressWarnings(sens_plot_addtail( type="line", q=q, yr=yr, vyr=vyr, t2=t2, vt2=vt2,
# Bmin=Bmin, Bmax=Bmax, sigB=sigB, tail=tail ))
# })
##### WARNINGS #####
##### For Tab Panel Range of sensitivity parameters #####
# output$kwarn <- reactive({
# numStudies <- input$k
# ifelse(numStudies <=10,
# "WARNING: These methods may not work well for meta-analyses with fewer than 10 studies.",
# "")
# })
##### For Tab Panel Fixed sensitivity parameters #####
# output$kwarn_2 <- reactive({
# numStudies <- input$k_2
# ifelse(numStudies <=10,
# "WARNING: These methods may not work well for meta-analyses with fewer than 10 studies.",
# "")
# })
# output$phatwarn_2 <- reactive({
# yr_2 = log(input$yr_2)
# t2_2 = input$t2_2
# q_2 = log(input$q_2)
# vyr_2 = input$se_yr_2^2
# vt2_2 = input$se_t2_2^2
# muB_2 = log(input$muB_2)
# sigB_2 = input$sigB_2
# r_2 = input$r_2
# tail_2 = input$tail_2
#
# cm = suppressWarnings(confounded_meta(q=q_2, r = r_2, muB = muB_2, sigB = sigB_2, yr = yr_2, vyr = vyr_2,
# t2 = t2_2, vt2 = vt2_2, CI.level = 0.95, tail = tail_2))
#
# p = round( cm$Est[ cm$Value=="Prop" ], 3 )
# ifelse(p<0.15 | p>0.85,
# HTML(paste('WARNING: Extreme estimated proportion', 'The estimated proportion of meaningfully strong effects is <0.15 or >0.85. The methods implemented in this website do not always work well in these situations. We would recommend instead applying alternative methods that have the same interpretation (see the "More Resouces" tab).', sep = "<br/>")), "")
# })
|
# Count Stats: significant cell population stats
# aya43@sfu.ca 20161220
#Directory
root = "~/projects/IMPC"
result_dir = "result"; suppressWarnings(dir.create (result_dir))
setwd(root)
panelL = c("P1")
centreL = c("Sanger_SPLEEN")#,"Sanger_MLN","CIPHE","TCP","H")
#Options
options(stringsAsFactors=FALSE)
# options(device="cairo")
options(na.rm=T)
#Input
phenoMeta_dir = paste(result_dir, "/", panelL, "/", centreL, "/phenoMeta.Rdata", sep="")
sampleMeta_dir = paste(result_dir, "/", panelL, "/", centreL, "/sampleMeta.Rdata", sep="")
matrixPvalTRIM_dir = paste(result_dir, "/", panelL, "/", centreL, "/matrixPvalTRIM_CountAdj.Rdata",sep="")
rchy_dir = paste(result_dir, "/", panelL, "/", centreL, "/rchy", sep="")
#Output
stats_dir = paste(result_dir, "/", panelL, "/", centreL, "/stats", sep=""); for (ri in 1:length(stats_dir)) { suppressWarnings(dir.create(stats_dir[ri])) }
sigGene_dir = paste(stats_dir, "/sigGene", sep="") #sig nodes in common per gene
perFile_dir = paste(stats_dir, "/perFile", sep="") #sig nodes & layers per file
sigFile_dir = paste(stats_dir, "/sigFile", sep="") #sig nodes in common per file
rchyGene_dir = paste(stats_dir, "/rchyGene", sep="") #should be max of all included
rchyFile_dir = paste(stats_dir, "/rchyFile", sep="") #should be max of all included
rchyEdgesFile_dir = paste(stats_dir, "/rchyEdgesFile", sep="") #should be max of all included
rchyNodesFile_dir = paste(stats_dir, "/rchyNodesFile", sep="") #should be max of all included
rchyEdgesFileDiff_dir = paste(stats_dir, "/rchyEdgesFileDiff", sep="") #should be max of all included
rchyNodesFileDiff_dir = paste(stats_dir, "/rchyNodesFileDiff", sep="") #should be max of all included
source("~/projects/IMPC/code/_funcAlice.R")
libr("foreach")
libr("doMC")
libr("RchyOptimyx")
#Setup Cores
no_cores = 1#detectCores()-1
registerDoMC(no_cores)
#Options for script
interestedCols = c("fileName","gene")
start = Sys.time()
for (ci in 1:length(paste0(panelL,centreL))) {
start1 = Sys.time()
cat("\n",paste0(panelL," ",centreL)[ci],"; loading matrix ", sep="")
m0 = get(load(matrixPvalTRIM_dir[ci]))
sampleMeta0 = get(load(sampleMeta_dir[ci]))
sampleMeta = sampleMeta0[match(rownames(m0),sampleMeta0$fileName),]
phenoMeta0 = get(load(phenoMeta_dir[ci]))
phenoMeta = phenoMeta0[match(colnames(m0),phenoMeta0$phenotype),]
uniqueVals = sort(unique(sampleMeta[,interestedCols[2]]))
#rchy stats
rchyfolders = list.dirs(rchy_dir[ci],full.names=T,recursive=F)
rchyfolders = rchyfolders[sapply(rchyfolders, function(x) length(list.files(x))>0)]
fileNames0 = lapply(rchyfolders, function(rf) list.files(rf,full.names=T,recursive=F,pattern="_merged"))
fileNames0 = lapply(fileNames0, function(x) gsub("_merged.Rdata",".fcs",fileNames(x)))
fn = unique(Reduce("union",fileNames0))
rchyNodes0 = rchyEdges0 = matrix(0,nrow=length(fn),ncol=length(rchyfolders),dimnames=list(fn,NULL))
#break on task 1
result = foreach (rfi = 1:length(rchyfolders)) %dopar% {
nodesAll = edgesAll = rchyEdges0 = rchyEdges0 = rchyNodes = rchyEdges = rchyNodesDiff = rchyEdgesDiff = NULL #all nodes/edges, gene #of , file #of
for (gi in 1:length(uniqueVals)) {
fnInG = fileNames0[[rfi]][fileNames0[[rfi]] %in% sampleMeta$fileName[sampleMeta$gene==uniqueVals[gi]]]
if (length(fnInG)>0) {
nodes = edges = NA
for (fnii in 1:length(fnInG)) {
fni = fnInG[fnii]
rchy = get(load(paste0(rchyfolders[rfi],"/",fni,"_merged.Rdata")))
nodesAll[[fni]] = nodes0 = Reduce("union",sapply(rchy,function(x) x@nodes[1,]))
edgesAll[[fni]] = edges0 = Reduce("union",sapply(rchy,function(x) x@edges[1,]))
if(!length(nodes)==0) {
if (is.na(nodes)) {
nodes = nodes0
} else {
nodes = intersect(nodes,nodes0)
}
}
rchyNodes0[fni] = length(nodes0)
if(!length(edges)==0) {
if (is.na(edges)) { edges = edges0
} else {
edges = intersect(edges,edges0)
}
}
rchyEdges0[fni] = length(edges0)
}
rchyEdges[gi] = length(edges)
rchyNodes[gi] = length(nodes)
rchyNodesDiff[gi] = length(setdiff(Reduce('union',nodesAll[fnInG]), nodes))
rchyEdgesDiff[gi] = length(setdiff(Reduce('union',edgesAll[fnInG]), edges))
} else {
rchyEdges[gi] = rchyNodes[gi] = rchyNodesDiff[gi] = rchyEdgesDiff[gi] = 0
}
}
rchyNodes0 = rchyNodes0[match(fn,names(rchyNodes0))]
rchyEdges0 = rchyEdges0[match(fn,names(rchyEdges0))]
rchyFile = cbind(rchyNodes0,rchyEdges0)
colnames(rchyFile) = c(paste0("nodes_Rchy_",fileNames(rchyfolders[rfi])),paste0("edges_Rchy)",fileNames(rchyfolders[rfi])))
rchyGene = cbind(rchyEdges,rchyNodes,rchyNodesDiff,rchyEdgesDiff)
colnames(rchyGene) = c(paste0("nodesInCommon_Rchy_",fileNames(rchyfolders[rfi])),paste0("edgesInCommon_Rchy",fileNames(rchyfolders[rfi])),
paste0("edgesNotInCommon_Rchy",fileNames(rchyfolders[rfi])),paste0("edgesNotInCommon_Rchy",fileNames(rchyfolders[rfi])))
#inCommon between files
rchyNodesCommon = rchyEdgesCommon = rchyNodesDiff = rchyEdgesDiff = matrix(0,nrow=length(fileNames0[[rfi]]),ncol=length(fileNames0[[rfi]]))
for (fni in 2:length(fileNames0[[rfi]])) {
for (fnj in 1:(fni-1)) {
rchyNodesCommon[fni,fnj] = rchyNodesCommon[fnj,fni] = length(intersect(nodesAll[[fni]],nodesAll[[fnj]]))
rchyEdgesCommon[fni,fnj] = rchyEdgesCommon[fnj,fni] = length(intersect(edgesAll[[fni]],edgesAll[[fnj]]))
rchyNodesDiff[fni,fnj] = rchyNodesDiff[fnj,fni] = length(union(setdiff(nodesAll[[fni]],nodesAll[[fnj]]),setdiff(nodesAll[[fnj]],nodesAll[[fni]])))
rchyEdgesDiff[fni,fnj] = rchyEdgesDiff[fnj,fni] = length(union(setdiff(nodesAll[[fni]],nodesAll[[fnj]]),setdiff(edgesAll[[fnj]],edgesAll[[fni]])))
}
}
rchyNodesCommon = cbind(sampleMeta$gene[match(fileNames0[[rfi]],sampleMeta$fileName)],fileNames0[[rfi]],rchyNodesCommon)
rchyEdgesCommon = cbind(sampleMeta$gene[match(fileNames0[[rfi]],sampleMeta$fileName)],fileNames0[[rfi]],rchyEdgesCommon)
rchyNodesDiff = cbind(sampleMeta$gene[match(fileNames0[[rfi]],sampleMeta$fileName)],fileNames0[[rfi]],rchyNodesDiff)
rchyEdgesDiff = cbind(sampleMeta$gene[match(fileNames0[[rfi]],sampleMeta$fileName)],fileNames0[[rfi]],rchyEdgesDiff)
colnames(rchyNodesCommon) = colnames(rchyEdgesCommon) = colnames(rchyNodesDiff) = colnames(rchyEdgesDiff) = c("gene","fileName",fileNames0[[rfi]])
save(rchyNodesCommon,file=paste0(rchyNodesFile_dir,"_",fileNames(rchyfolders[rfi]),".Rdata"))
write.csv(rchyNodesCommon,file=paste0(rchyNodesFile_dir,"_",fileNames(rchyfolders[rfi]),".csv"))
save(rchyEdgesCommon,file=paste0(rchyEdgesFile_dir,"_",fileNames(rchyfolders[rfi]),".Rdata"))
write.csv(rchyEdgesCommon,file=paste0(rchyEdgesFile_dir,"_",fileNames(rchyfolders[rfi]),".csv"))
save(rchyNodesDiff,file=paste0(rchyNodesFileDiff_dir,"_",fileNames(rchyfolders[rfi]),".Rdata"))
write.csv(rchyNodesDiff,file=paste0(rchyNodesFileDiff_dir,"_",fileNames(rchyfolders[rfi]),".csv"))
save(rchyEdgesDiff,file=paste0(rchyEdgesFileDiff_dir,"_",fileNames(rchyfolders[rfi]),".Rdata"))
write.csv(rchyEdgesDiff,file=paste0(rchyEdgesFileDiff_dir,"_",fileNames(rchyfolders[rfi]),".csv"))
return(list(rchyGene=rchyGene,rchyFile=rchyFile))
}
rchyGene0 = cbind(uniqueVals,Reduce('cbind',lapply(result,function(x) x$rchyGene)))
rchyFile0 = cbind(fn,Reduce('cbind',lapply(result,function(x) x$rchyFile)))
colnames(rchyGene0)[1] = "gene"
save(rchyGene0,file=paste0(rchyGene_dir,".Rdata"))
write.csv(rchyGene0,file=paste0(rchyGene_dir,".csv"))
colnames(rchyFile0)[1] = "file"
save(rchyFile0,file=paste0(rchyFile_dir,".Rdata"))
write.csv(rchyFile0,file=paste0(rchyFile_dir,".csv"))
TimeOutput(start1)
#file stats
nLayers = nSig = NULL
for (fi in 1:nrow(m0)) {
sigs = m0[fi,]!=0
nLayers[fi] = max(phenoMeta$phenolevel[sigs])
nSig[fi] = sum(sigs)
}
inFile = cbind(sampleMeta[,interestedCols],nLayers,nSig)
colnames(inFile) = c(interestedCols,"nlayers","nSigNodes")
rownames(inFile) = NULL
save(inFile,file=paste0(perFile_dir,".Rdata"))
write.csv(inFile,file=paste0(perFile_dir,".csv"))
#sig nodes in common between files
# sigFile0 = matrix(0,nrow=nrow(m0),ncol=nrow(m0))
# sigFileNot0 = matrix(0,nrow=nrow(m0),ncol=nrow(m0))
#
# break on task 2
loop.ind = triLoopInd(nrow(m0),no_cores)
sigF = foreach (i = 1:length(loop.ind)) %dopar% {
sigFile0 = sigFileNot0 = list()
indexx = 1
for (fi in loop.ind[[i]][1]:loop.ind[[i]][2]) {
sigFile = rep(0,nrow(m0))
sigFileNot = rep(0,nrow(m0))
for (fj in (fi+1):nrow(m0)) {
sigFile[fj] = sum(apply(m0[c(fi,fj),], 2, function(x) all(x!=0)))
sigFileNot[fj] = sum(apply(m0[c(fi,fj),], 2, function(x) !all(x!=0) & any(x!=0) ))
}
sigFile0[[indexx]] = sigFile
sigFileNot0[[indexx]] = sigFileNot
indexx = indexx+1
}
return(list(sigFile0=Reduce('cbind',sigFile0),sigFileNot0=Reduce('cbind',sigFileNot0)))
}
sigFileNot = cbind(rep(0,nrow(m0)), Reduce('cbind',lapply(sigF,function(x)x$sigFileNot0)) )
for (fi in 2:nrow(m0)) { for (fj in 1:(fi-1)) { sigFileNot[fj,fi] = sigFileNot[fi,fj] } }
sigFileNot = cbind(sampleMeta[,interestedCols],sigFileNot)
sigFile = cbind(rep(0,nrow(m0)), Reduce('cbind',lapply(sigF,function(x)x$sigFile0)))
for (fi in 2:nrow(m0)) { for (fj in 1:(fi-1)) { sigFile[fj,fi] = sigFile[fi,fj] } }
sigFile = cbind(sampleMeta[,interestedCols],sigFile)
colnames(sigFile) = colnames(sigFileNot) = c(interestedCols,rownames(m0))
rownames(sigFile) = rownames(sigFileNot) = NULL
save(sigFile,file=paste0(sigFile_dir,".Rdata"))
write.csv(sigFile,file=paste0(sigFile_dir,".csv"))
save(sigFileNot,file=paste0(sigFile_dir,"Not.Rdata"))
write.csv(sigFileNot,file=paste0(sigFile_dir,"Not.csv"))
TimeOutput(start1)
#sig nodes in common per gene
inCommonDir = inCommon = NULL
# for (uvi in 1:length(uniqueVals)) {
result = foreach (uvi = 1:length(uniqueVals)) %dopar% {
inCommonDir = inCommon = NULL
uv = uniqueVals[uvi]
muv = m0[sampleMeta$gene==uv,]
if (is.null(dim(muv))) { muv = matrix(muv,nrow=1); colnames(muv) = colnames(m0) }
incommonnNOT = apply(muv, 2, function(x) !all(x!=0) & any(x!=0))
incommonn = apply(muv, 2, function(x) all(x!=0))
incommonDirr = apply(muv, 2, function(x) all(x>0)) | apply(muv, 2, function(x) all(x<0))==T
return(list(inCommon=sum(incommonn),inCommonDir=sum(incommonDirr),inCommonNOT=sum(incommonnNOT)))
# inCommon[uvi] = sum(incommonn)
# inCommonDir[uvi] = sum(incommonDirr)
}
inCommon = sapply(result, function(x) x$inCommon)
inCommonDir = sapply(result, function(x) x$inCommonDir)
incommonnNOT = sapply(result, function(x) x$incommonNOT)
inComm = cbind(uniqueVals,table(sampleMeta$gene),inCommonDir,inCommon,incommonnNOT)
rownames(inComm) = NULL
colnames(inComm) = c("gene","filesPerGene","nodesInCommon_UpDown","nodesInCommon_noUpDown","nodesNotInCommon")
save(inComm,file=paste0(sigGene_dir,".Rdata"))
write.csv(inComm,file=paste0(sigGene_dir,".csv"))
TimeOutput(start1)
}
TimeOutput(start)
|
/flowtype-IMPC_pipeline-master/04_stat_pvalue.R
|
no_license
|
aya49/flowGraph_experiments
|
R
| false
| false
| 11,419
|
r
|
# Count Stats: significant cell population stats
# aya43@sfu.ca 20161220
#Directory
root = "~/projects/IMPC"
result_dir = "result"; suppressWarnings(dir.create (result_dir))
setwd(root)
panelL = c("P1")
centreL = c("Sanger_SPLEEN")#,"Sanger_MLN","CIPHE","TCP","H")
#Options
options(stringsAsFactors=FALSE)
# options(device="cairo")
options(na.rm=T)
#Input
phenoMeta_dir = paste(result_dir, "/", panelL, "/", centreL, "/phenoMeta.Rdata", sep="")
sampleMeta_dir = paste(result_dir, "/", panelL, "/", centreL, "/sampleMeta.Rdata", sep="")
matrixPvalTRIM_dir = paste(result_dir, "/", panelL, "/", centreL, "/matrixPvalTRIM_CountAdj.Rdata",sep="")
rchy_dir = paste(result_dir, "/", panelL, "/", centreL, "/rchy", sep="")
#Output
stats_dir = paste(result_dir, "/", panelL, "/", centreL, "/stats", sep=""); for (ri in 1:length(stats_dir)) { suppressWarnings(dir.create(stats_dir[ri])) }
sigGene_dir = paste(stats_dir, "/sigGene", sep="") #sig nodes in common per gene
perFile_dir = paste(stats_dir, "/perFile", sep="") #sig nodes & layers per file
sigFile_dir = paste(stats_dir, "/sigFile", sep="") #sig nodes in common per file
rchyGene_dir = paste(stats_dir, "/rchyGene", sep="") #should be max of all included
rchyFile_dir = paste(stats_dir, "/rchyFile", sep="") #should be max of all included
rchyEdgesFile_dir = paste(stats_dir, "/rchyEdgesFile", sep="") #should be max of all included
rchyNodesFile_dir = paste(stats_dir, "/rchyNodesFile", sep="") #should be max of all included
rchyEdgesFileDiff_dir = paste(stats_dir, "/rchyEdgesFileDiff", sep="") #should be max of all included
rchyNodesFileDiff_dir = paste(stats_dir, "/rchyNodesFileDiff", sep="") #should be max of all included
source("~/projects/IMPC/code/_funcAlice.R")
libr("foreach")
libr("doMC")
libr("RchyOptimyx")
#Setup Cores
no_cores = 1#detectCores()-1
registerDoMC(no_cores)
#Options for script
interestedCols = c("fileName","gene")
start = Sys.time()
for (ci in 1:length(paste0(panelL,centreL))) {
start1 = Sys.time()
cat("\n",paste0(panelL," ",centreL)[ci],"; loading matrix ", sep="")
m0 = get(load(matrixPvalTRIM_dir[ci]))
sampleMeta0 = get(load(sampleMeta_dir[ci]))
sampleMeta = sampleMeta0[match(rownames(m0),sampleMeta0$fileName),]
phenoMeta0 = get(load(phenoMeta_dir[ci]))
phenoMeta = phenoMeta0[match(colnames(m0),phenoMeta0$phenotype),]
uniqueVals = sort(unique(sampleMeta[,interestedCols[2]]))
#rchy stats
rchyfolders = list.dirs(rchy_dir[ci],full.names=T,recursive=F)
rchyfolders = rchyfolders[sapply(rchyfolders, function(x) length(list.files(x))>0)]
fileNames0 = lapply(rchyfolders, function(rf) list.files(rf,full.names=T,recursive=F,pattern="_merged"))
fileNames0 = lapply(fileNames0, function(x) gsub("_merged.Rdata",".fcs",fileNames(x)))
fn = unique(Reduce("union",fileNames0))
rchyNodes0 = rchyEdges0 = matrix(0,nrow=length(fn),ncol=length(rchyfolders),dimnames=list(fn,NULL))
#break on task 1
result = foreach (rfi = 1:length(rchyfolders)) %dopar% {
nodesAll = edgesAll = rchyEdges0 = rchyEdges0 = rchyNodes = rchyEdges = rchyNodesDiff = rchyEdgesDiff = NULL #all nodes/edges, gene #of , file #of
for (gi in 1:length(uniqueVals)) {
fnInG = fileNames0[[rfi]][fileNames0[[rfi]] %in% sampleMeta$fileName[sampleMeta$gene==uniqueVals[gi]]]
if (length(fnInG)>0) {
nodes = edges = NA
for (fnii in 1:length(fnInG)) {
fni = fnInG[fnii]
rchy = get(load(paste0(rchyfolders[rfi],"/",fni,"_merged.Rdata")))
nodesAll[[fni]] = nodes0 = Reduce("union",sapply(rchy,function(x) x@nodes[1,]))
edgesAll[[fni]] = edges0 = Reduce("union",sapply(rchy,function(x) x@edges[1,]))
if(!length(nodes)==0) {
if (is.na(nodes)) {
nodes = nodes0
} else {
nodes = intersect(nodes,nodes0)
}
}
rchyNodes0[fni] = length(nodes0)
if(!length(edges)==0) {
if (is.na(edges)) { edges = edges0
} else {
edges = intersect(edges,edges0)
}
}
rchyEdges0[fni] = length(edges0)
}
rchyEdges[gi] = length(edges)
rchyNodes[gi] = length(nodes)
rchyNodesDiff[gi] = length(setdiff(Reduce('union',nodesAll[fnInG]), nodes))
rchyEdgesDiff[gi] = length(setdiff(Reduce('union',edgesAll[fnInG]), edges))
} else {
rchyEdges[gi] = rchyNodes[gi] = rchyNodesDiff[gi] = rchyEdgesDiff[gi] = 0
}
}
rchyNodes0 = rchyNodes0[match(fn,names(rchyNodes0))]
rchyEdges0 = rchyEdges0[match(fn,names(rchyEdges0))]
rchyFile = cbind(rchyNodes0,rchyEdges0)
colnames(rchyFile) = c(paste0("nodes_Rchy_",fileNames(rchyfolders[rfi])),paste0("edges_Rchy)",fileNames(rchyfolders[rfi])))
rchyGene = cbind(rchyEdges,rchyNodes,rchyNodesDiff,rchyEdgesDiff)
colnames(rchyGene) = c(paste0("nodesInCommon_Rchy_",fileNames(rchyfolders[rfi])),paste0("edgesInCommon_Rchy",fileNames(rchyfolders[rfi])),
paste0("edgesNotInCommon_Rchy",fileNames(rchyfolders[rfi])),paste0("edgesNotInCommon_Rchy",fileNames(rchyfolders[rfi])))
#inCommon between files
rchyNodesCommon = rchyEdgesCommon = rchyNodesDiff = rchyEdgesDiff = matrix(0,nrow=length(fileNames0[[rfi]]),ncol=length(fileNames0[[rfi]]))
for (fni in 2:length(fileNames0[[rfi]])) {
for (fnj in 1:(fni-1)) {
rchyNodesCommon[fni,fnj] = rchyNodesCommon[fnj,fni] = length(intersect(nodesAll[[fni]],nodesAll[[fnj]]))
rchyEdgesCommon[fni,fnj] = rchyEdgesCommon[fnj,fni] = length(intersect(edgesAll[[fni]],edgesAll[[fnj]]))
rchyNodesDiff[fni,fnj] = rchyNodesDiff[fnj,fni] = length(union(setdiff(nodesAll[[fni]],nodesAll[[fnj]]),setdiff(nodesAll[[fnj]],nodesAll[[fni]])))
rchyEdgesDiff[fni,fnj] = rchyEdgesDiff[fnj,fni] = length(union(setdiff(nodesAll[[fni]],nodesAll[[fnj]]),setdiff(edgesAll[[fnj]],edgesAll[[fni]])))
}
}
rchyNodesCommon = cbind(sampleMeta$gene[match(fileNames0[[rfi]],sampleMeta$fileName)],fileNames0[[rfi]],rchyNodesCommon)
rchyEdgesCommon = cbind(sampleMeta$gene[match(fileNames0[[rfi]],sampleMeta$fileName)],fileNames0[[rfi]],rchyEdgesCommon)
rchyNodesDiff = cbind(sampleMeta$gene[match(fileNames0[[rfi]],sampleMeta$fileName)],fileNames0[[rfi]],rchyNodesDiff)
rchyEdgesDiff = cbind(sampleMeta$gene[match(fileNames0[[rfi]],sampleMeta$fileName)],fileNames0[[rfi]],rchyEdgesDiff)
colnames(rchyNodesCommon) = colnames(rchyEdgesCommon) = colnames(rchyNodesDiff) = colnames(rchyEdgesDiff) = c("gene","fileName",fileNames0[[rfi]])
save(rchyNodesCommon,file=paste0(rchyNodesFile_dir,"_",fileNames(rchyfolders[rfi]),".Rdata"))
write.csv(rchyNodesCommon,file=paste0(rchyNodesFile_dir,"_",fileNames(rchyfolders[rfi]),".csv"))
save(rchyEdgesCommon,file=paste0(rchyEdgesFile_dir,"_",fileNames(rchyfolders[rfi]),".Rdata"))
write.csv(rchyEdgesCommon,file=paste0(rchyEdgesFile_dir,"_",fileNames(rchyfolders[rfi]),".csv"))
save(rchyNodesDiff,file=paste0(rchyNodesFileDiff_dir,"_",fileNames(rchyfolders[rfi]),".Rdata"))
write.csv(rchyNodesDiff,file=paste0(rchyNodesFileDiff_dir,"_",fileNames(rchyfolders[rfi]),".csv"))
save(rchyEdgesDiff,file=paste0(rchyEdgesFileDiff_dir,"_",fileNames(rchyfolders[rfi]),".Rdata"))
write.csv(rchyEdgesDiff,file=paste0(rchyEdgesFileDiff_dir,"_",fileNames(rchyfolders[rfi]),".csv"))
return(list(rchyGene=rchyGene,rchyFile=rchyFile))
}
rchyGene0 = cbind(uniqueVals,Reduce('cbind',lapply(result,function(x) x$rchyGene)))
rchyFile0 = cbind(fn,Reduce('cbind',lapply(result,function(x) x$rchyFile)))
colnames(rchyGene0)[1] = "gene"
save(rchyGene0,file=paste0(rchyGene_dir,".Rdata"))
write.csv(rchyGene0,file=paste0(rchyGene_dir,".csv"))
colnames(rchyFile0)[1] = "file"
save(rchyFile0,file=paste0(rchyFile_dir,".Rdata"))
write.csv(rchyFile0,file=paste0(rchyFile_dir,".csv"))
TimeOutput(start1)
#file stats
nLayers = nSig = NULL
for (fi in 1:nrow(m0)) {
sigs = m0[fi,]!=0
nLayers[fi] = max(phenoMeta$phenolevel[sigs])
nSig[fi] = sum(sigs)
}
inFile = cbind(sampleMeta[,interestedCols],nLayers,nSig)
colnames(inFile) = c(interestedCols,"nlayers","nSigNodes")
rownames(inFile) = NULL
save(inFile,file=paste0(perFile_dir,".Rdata"))
write.csv(inFile,file=paste0(perFile_dir,".csv"))
#sig nodes in common between files
# sigFile0 = matrix(0,nrow=nrow(m0),ncol=nrow(m0))
# sigFileNot0 = matrix(0,nrow=nrow(m0),ncol=nrow(m0))
#
# break on task 2
loop.ind = triLoopInd(nrow(m0),no_cores)
sigF = foreach (i = 1:length(loop.ind)) %dopar% {
sigFile0 = sigFileNot0 = list()
indexx = 1
for (fi in loop.ind[[i]][1]:loop.ind[[i]][2]) {
sigFile = rep(0,nrow(m0))
sigFileNot = rep(0,nrow(m0))
for (fj in (fi+1):nrow(m0)) {
sigFile[fj] = sum(apply(m0[c(fi,fj),], 2, function(x) all(x!=0)))
sigFileNot[fj] = sum(apply(m0[c(fi,fj),], 2, function(x) !all(x!=0) & any(x!=0) ))
}
sigFile0[[indexx]] = sigFile
sigFileNot0[[indexx]] = sigFileNot
indexx = indexx+1
}
return(list(sigFile0=Reduce('cbind',sigFile0),sigFileNot0=Reduce('cbind',sigFileNot0)))
}
sigFileNot = cbind(rep(0,nrow(m0)), Reduce('cbind',lapply(sigF,function(x)x$sigFileNot0)) )
for (fi in 2:nrow(m0)) { for (fj in 1:(fi-1)) { sigFileNot[fj,fi] = sigFileNot[fi,fj] } }
sigFileNot = cbind(sampleMeta[,interestedCols],sigFileNot)
sigFile = cbind(rep(0,nrow(m0)), Reduce('cbind',lapply(sigF,function(x)x$sigFile0)))
for (fi in 2:nrow(m0)) { for (fj in 1:(fi-1)) { sigFile[fj,fi] = sigFile[fi,fj] } }
sigFile = cbind(sampleMeta[,interestedCols],sigFile)
colnames(sigFile) = colnames(sigFileNot) = c(interestedCols,rownames(m0))
rownames(sigFile) = rownames(sigFileNot) = NULL
save(sigFile,file=paste0(sigFile_dir,".Rdata"))
write.csv(sigFile,file=paste0(sigFile_dir,".csv"))
save(sigFileNot,file=paste0(sigFile_dir,"Not.Rdata"))
write.csv(sigFileNot,file=paste0(sigFile_dir,"Not.csv"))
TimeOutput(start1)
#sig nodes in common per gene
inCommonDir = inCommon = NULL
# for (uvi in 1:length(uniqueVals)) {
result = foreach (uvi = 1:length(uniqueVals)) %dopar% {
inCommonDir = inCommon = NULL
uv = uniqueVals[uvi]
muv = m0[sampleMeta$gene==uv,]
if (is.null(dim(muv))) { muv = matrix(muv,nrow=1); colnames(muv) = colnames(m0) }
incommonnNOT = apply(muv, 2, function(x) !all(x!=0) & any(x!=0))
incommonn = apply(muv, 2, function(x) all(x!=0))
incommonDirr = apply(muv, 2, function(x) all(x>0)) | apply(muv, 2, function(x) all(x<0))==T
return(list(inCommon=sum(incommonn),inCommonDir=sum(incommonDirr),inCommonNOT=sum(incommonnNOT)))
# inCommon[uvi] = sum(incommonn)
# inCommonDir[uvi] = sum(incommonDirr)
}
inCommon = sapply(result, function(x) x$inCommon)
inCommonDir = sapply(result, function(x) x$inCommonDir)
incommonnNOT = sapply(result, function(x) x$incommonNOT)
inComm = cbind(uniqueVals,table(sampleMeta$gene),inCommonDir,inCommon,incommonnNOT)
rownames(inComm) = NULL
colnames(inComm) = c("gene","filesPerGene","nodesInCommon_UpDown","nodesInCommon_noUpDown","nodesNotInCommon")
save(inComm,file=paste0(sigGene_dir,".Rdata"))
write.csv(inComm,file=paste0(sigGene_dir,".csv"))
TimeOutput(start1)
}
TimeOutput(start)
|
# Outer estimation of the factor scores
step4 <-
function(data, outerW, model, pairwise){
blocks <- model$blocks
if(pairwise){
Latent <- matrix(NA, nrow=nrow(data), ncol=length(model$latent)) # factor scores
colnames(Latent) <- model$latent
for(i in model$latent){
mf <- as.matrix(data[ , blocks[[i]] ])
#Latent[,i] <- mf %*% as.matrix(outerW[blocks[[i]], i])
Latent[,i] <- mf %*% outerW[blocks[[i]], i, drop=FALSE]
}
Latent <- scale(Latent)
}
else {Latent <- scale(as.matrix(data) %*% outerW)} # old
# the attributes for the scale are meaningless
# No, they are meaningfull: w'Sw=1
#attributes(Latent)[c(3,4)] <- NULL
## Alternatively: without scaling in each iteration
# else {Latent <- scale(as.matrix(data) %*% outerW, center = TRUE, scale = FALSE)
# attr(Latent, "scaled:scale") <- 1}
return(Latent)
}
|
/semPLS/R/step4.R
|
no_license
|
ingted/R-Examples
|
R
| false
| false
| 878
|
r
|
# Outer estimation of the factor scores
step4 <-
function(data, outerW, model, pairwise){
blocks <- model$blocks
if(pairwise){
Latent <- matrix(NA, nrow=nrow(data), ncol=length(model$latent)) # factor scores
colnames(Latent) <- model$latent
for(i in model$latent){
mf <- as.matrix(data[ , blocks[[i]] ])
#Latent[,i] <- mf %*% as.matrix(outerW[blocks[[i]], i])
Latent[,i] <- mf %*% outerW[blocks[[i]], i, drop=FALSE]
}
Latent <- scale(Latent)
}
else {Latent <- scale(as.matrix(data) %*% outerW)} # old
# the attributes for the scale are meaningless
# No, they are meaningfull: w'Sw=1
#attributes(Latent)[c(3,4)] <- NULL
## Alternatively: without scaling in each iteration
# else {Latent <- scale(as.matrix(data) %*% outerW, center = TRUE, scale = FALSE)
# attr(Latent, "scaled:scale") <- 1}
return(Latent)
}
|
library(comparer)
### Name: plot.mbc
### Title: Plot mbc class
### Aliases: plot.mbc
### ** Examples
m1 <- mbc(function(x) {Sys.sleep(rexp(1, 30));mean(x)},
function(x) {Sys.sleep(rexp(1, 5));median(x)}, input=runif(100))
plot(m1)
|
/data/genthat_extracted_code/comparer/examples/plot.mbc.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 240
|
r
|
library(comparer)
### Name: plot.mbc
### Title: Plot mbc class
### Aliases: plot.mbc
### ** Examples
m1 <- mbc(function(x) {Sys.sleep(rexp(1, 30));mean(x)},
function(x) {Sys.sleep(rexp(1, 5));median(x)}, input=runif(100))
plot(m1)
|
#' Thai major rice production of 2562//63 (2019/20) crop year by province
#'
#' Data from Office of Agricultural Economics (OAE)
#'
#' @docType data
#'
#' @usage data(rice_2562)
#'
#' @format An object of class \code{"tibble"}
#'
#' @keywords datasets
#'
#' @source \href{http://www.oae.go.th}{}
#'
#' @examples
#' data(rice_2562)
#' head(rice_2562)
"rice_2562"
|
/R/rice_2562.R
|
permissive
|
pasin008/mapthai
|
R
| false
| false
| 361
|
r
|
#' Thai major rice production of 2562//63 (2019/20) crop year by province
#'
#' Data from Office of Agricultural Economics (OAE)
#'
#' @docType data
#'
#' @usage data(rice_2562)
#'
#' @format An object of class \code{"tibble"}
#'
#' @keywords datasets
#'
#' @source \href{http://www.oae.go.th}{}
#'
#' @examples
#' data(rice_2562)
#' head(rice_2562)
"rice_2562"
|
shiny::shinyUI(
shinydashboard::dashboardPage(
skin = "blue",
shinydashboard::dashboardHeader(
title = "RiskStratifiedEstimation"
),
shinydashboard::dashboardSidebar(
shinydashboard::sidebarMenu(
id = "menu1",
shinydashboard::menuItem(
tabName = "analysis",
text = "Analysis",
icon = icon("cogs"),
selected = TRUE
)
),
shinydashboard::sidebarMenu(
tags$head(
tags$style(
HTML(
'#estOutcomeEstimation+ div>.selectize-dropdown{bottom: 100% !important; top:auto!important;}'
)
)
),
tags$head(
tags$style(
HTML(
'#predictionPopulation+ div>.selectize-dropdown{bottom: 100% !important; top:auto!important;}'
)
)
),
id = "menu2",
shinydashboard::menuItem(
tabName = "estimation",
text = "Estimation",
icon = icon("chart-bar")
),
shinydashboard::menuItem(
tabName = "prediction",
text = "Prediction",
icon = icon("dice-six")
),
shiny::selectInput(
"treatment",
"Treatment",
unique(mapExposures$exposure_name),
selected = unique(mapExposures$exposure_name)[1]
),
shiny::selectInput(
"comparator",
"Comparator",
unique(mapExposures$exposure_name),
selected = unique(mapExposures$exposure_name)[2]
),
shiny::selectInput(
"stratOutcome",
"Stratification Outcome",
stratOptions,
stratOptions[1]
),
shiny::selectInput(
"estOutcome",
"Estimation outcome (max: 5)",
unique(mapOutcomes$outcome_name),
selected = "",
multiple = TRUE,
selectize = TRUE
),
addInfo(
item = shiny::selectizeInput(
"database",
"Database",
unique(databaseOptions),
unique(databaseOptions)[1]
),
infoId = "testInfo"
),
shiny::checkboxGroupInput(
"analysis",
"Analysis",
unique(analysisTypeOptions),
unique(analysisTypeOptions)[1]
),
shiny::conditionalPanel(
condition = "input.menu2 == 'estimation'",
shiny::selectInput(
"estOutcomeEstimation",
"Evaluate outcome",
choices = unique(mapOutcomes$outcome_name),
selected = ""
)
),
shiny::conditionalPanel(
condition = "input.menu2 == 'prediction'",
shiny::selectInput(
"predictionPopulation",
"Cohort",
c("Comparator",
"EntirePopulation",
"Matched",
"Treatment"),
selected = "EntirePopulation"
)
)
)
),
shinydashboard::dashboardBody(
shinydashboard::tabItems(
shinydashboard::tabItem(
tabName = "analysis",
shiny::tabsetPanel(
id = "relativePanel",
shiny::tabPanel(
"Incidence",
DT::dataTableOutput("mainTableIncidence")
),
shiny::tabPanel(
"Relative",
DT::dataTableOutput("mainTableRelative")
),
shiny::tabPanel(
"Absolute",
DT::dataTableOutput("mainTableAbsolute")
),
shiny::tabPanel(
"Risk stratified analysis",
plotly::plotlyOutput("combinedPlot",
height = "600px")
)
)
),
shinydashboard::tabItem(
tabName = "estimation",
shiny::tabsetPanel(
id = "estimationTabset",
shiny::tabPanel(
"Propensity scores",
shiny::plotOutput(
"evaluationPlotPs",
height = "600px"
)
),
shiny::tabPanel(
"Covariate balance",
shiny::plotOutput(
"evaluationPlotBalance",
height = "600px"
)
)
)
),
shinydashboard::tabItem(
tabName = "prediction",
shiny::fluidRow(
shinydashboard::box(
status = "info",
title = "Calibration",
shiny::plotOutput(
"calibrationPlot",
height = "600px"
)
),
shinydashboard::box(
status = "info",
title = "Discrimination",
shiny::plotOutput(
"discriminationPlot",
height = "600px"
)
)
)
)
)
)
)
)
|
/inst/shiny/ui.R
|
permissive
|
mi-erasmusmc/RiskStratifiedEstimation
|
R
| false
| false
| 4,944
|
r
|
shiny::shinyUI(
shinydashboard::dashboardPage(
skin = "blue",
shinydashboard::dashboardHeader(
title = "RiskStratifiedEstimation"
),
shinydashboard::dashboardSidebar(
shinydashboard::sidebarMenu(
id = "menu1",
shinydashboard::menuItem(
tabName = "analysis",
text = "Analysis",
icon = icon("cogs"),
selected = TRUE
)
),
shinydashboard::sidebarMenu(
tags$head(
tags$style(
HTML(
'#estOutcomeEstimation+ div>.selectize-dropdown{bottom: 100% !important; top:auto!important;}'
)
)
),
tags$head(
tags$style(
HTML(
'#predictionPopulation+ div>.selectize-dropdown{bottom: 100% !important; top:auto!important;}'
)
)
),
id = "menu2",
shinydashboard::menuItem(
tabName = "estimation",
text = "Estimation",
icon = icon("chart-bar")
),
shinydashboard::menuItem(
tabName = "prediction",
text = "Prediction",
icon = icon("dice-six")
),
shiny::selectInput(
"treatment",
"Treatment",
unique(mapExposures$exposure_name),
selected = unique(mapExposures$exposure_name)[1]
),
shiny::selectInput(
"comparator",
"Comparator",
unique(mapExposures$exposure_name),
selected = unique(mapExposures$exposure_name)[2]
),
shiny::selectInput(
"stratOutcome",
"Stratification Outcome",
stratOptions,
stratOptions[1]
),
shiny::selectInput(
"estOutcome",
"Estimation outcome (max: 5)",
unique(mapOutcomes$outcome_name),
selected = "",
multiple = TRUE,
selectize = TRUE
),
addInfo(
item = shiny::selectizeInput(
"database",
"Database",
unique(databaseOptions),
unique(databaseOptions)[1]
),
infoId = "testInfo"
),
shiny::checkboxGroupInput(
"analysis",
"Analysis",
unique(analysisTypeOptions),
unique(analysisTypeOptions)[1]
),
shiny::conditionalPanel(
condition = "input.menu2 == 'estimation'",
shiny::selectInput(
"estOutcomeEstimation",
"Evaluate outcome",
choices = unique(mapOutcomes$outcome_name),
selected = ""
)
),
shiny::conditionalPanel(
condition = "input.menu2 == 'prediction'",
shiny::selectInput(
"predictionPopulation",
"Cohort",
c("Comparator",
"EntirePopulation",
"Matched",
"Treatment"),
selected = "EntirePopulation"
)
)
)
),
shinydashboard::dashboardBody(
shinydashboard::tabItems(
shinydashboard::tabItem(
tabName = "analysis",
shiny::tabsetPanel(
id = "relativePanel",
shiny::tabPanel(
"Incidence",
DT::dataTableOutput("mainTableIncidence")
),
shiny::tabPanel(
"Relative",
DT::dataTableOutput("mainTableRelative")
),
shiny::tabPanel(
"Absolute",
DT::dataTableOutput("mainTableAbsolute")
),
shiny::tabPanel(
"Risk stratified analysis",
plotly::plotlyOutput("combinedPlot",
height = "600px")
)
)
),
shinydashboard::tabItem(
tabName = "estimation",
shiny::tabsetPanel(
id = "estimationTabset",
shiny::tabPanel(
"Propensity scores",
shiny::plotOutput(
"evaluationPlotPs",
height = "600px"
)
),
shiny::tabPanel(
"Covariate balance",
shiny::plotOutput(
"evaluationPlotBalance",
height = "600px"
)
)
)
),
shinydashboard::tabItem(
tabName = "prediction",
shiny::fluidRow(
shinydashboard::box(
status = "info",
title = "Calibration",
shiny::plotOutput(
"calibrationPlot",
height = "600px"
)
),
shinydashboard::box(
status = "info",
title = "Discrimination",
shiny::plotOutput(
"discriminationPlot",
height = "600px"
)
)
)
)
)
)
)
)
|
library(coda)
library(gdata)
library(rjags)
library(broman)
library(plyr)
library(reshape)
library(xtable)
version <- "i-009f9c75a9206a0ac"
#######################################################################################################################################
# load data
load(file = "../Library/SalixBorerWide.rda")
load(file = paste0("../ModelBuild/GlobalModel/", version, "/MCMCsamplesZ.rda"))
chains <- apply(rbind(as.matrix(MCMCsamplesZ[[1]]), as.matrix(MCMCsamplesZ[[2]]), as.matrix(MCMCsamplesZ[[3]])), 2, mean)
zWeights <- as.data.frame(cbind(chains[1:2087], chains[2088:4174], chains[4175:6261]))
SalixBorerWide <- cbind(SalixBorerWide, zWeights)
#######################################################################################################################################
# define weights and create data object for jags
work1 <- subset(SalixBorerWide, site09==1, select = c(habitat, sex, site09, site10, V1))
work2 <- subset(SalixBorerWide, site10==1, select = c(habitat, sex, site10, site11, V2))
names(work1) <- c("habitat", "sex", "denom", "num", "w")
names(work2) <- c("habitat", "sex", "denom", "num", "w")
work1$year <- 1
work2$year <- 2
work3 <- rbind(work1, work2)
work3$check <- 0; i <- which(work3$w <= .1 | work3$w >= .9); work3$check[i] <- 1
table(work3$check)/dim(work3)[1]
work3a <- subset(work3, w <= .1)
work4 <- ddply(work3a, c("habitat", "sex", "year"), function(x) c(
alive = sum(x$num, na.rm = TRUE),
total = sum(x$denom, na.rm = TRUE)))
work4$proportion <- work4$alive/work4$total
DataQuery <- list(
alive = work4$alive,
n = work4$total,
habitat = work4$habitat + 1,
sex = work4$sex + 1,
year = work4$year)
#######################################################################################################################################
# do weighted regression on diameter
sink("SurvivalStems.R")
cat("
model {
# priors
for (i in 1:2) {
for (j in 1:2) {
for (k in 1:2) {
p[i, j, k] ~ dunif (0, 1)
}
}
}
# likelihood
for (i in 1:8) {
alive[i] ~ dbinom(p[habitat[i], sex[i], year[i]], n[i])
}
# mean comparisons
sex.riparian.2010 <- p[1, 1, 1] - p[1, 2, 1]
sex.riparian.2011 <- p[1, 1, 2] - p[1, 2, 2]
sex.upland.2010 <- p[2, 1, 1] - p[2, 2, 1]
sex.upland.2011 <- p[2, 1, 2] - p[2, 2, 2]
year.riparian.male <- p[1, 1, 1] - p[1, 1, 2]
year.riparian.female <- p[1, 2, 1] - p[1, 2, 2]
year.upland.male <- p[2, 1, 1] - p[2, 1, 2]
year.upland.female <- p[2, 2, 1] - p[2, 2, 2]
habitat.male.2010 <- p[1, 1, 1] - p[2, 1, 1]
habitat.female.2010 <- p[1, 2, 1] - p[2, 2, 1]
habitat.male.2011 <- p[1, 1, 2] - p[2, 1, 2]
habitat.female.2011 <- p[1, 2, 2] - p[2, 2, 2]
}
",fill = TRUE)
sink()
# Initial values
inits <- function() {list(
p = array(runif(8,0,1), dim = c(2,2,2)))}
# Parameters monitored
params <- c(
"p",
"sex.riparian.2010",
"sex.upland.2010",
"sex.riparian.2011",
"sex.upland.2011",
"year.riparian.male",
"year.riparian.female",
"year.upland.male",
"year.upland.female",
"habitat.male.2010",
"habitat.female.2010",
"habitat.male.2011",
"habitat.female.2011")
# MCMC settings
n.adapt = 5000
n.update = 10000
n.iter = 15000
# JAGS run
jm = jags.model("SurvivalStems.R", data = DataQuery, inits = inits(), n.chains = 3, n.adapt = n.adapt)
update(jm, n.iter = n.update)
zm = coda.samples(jm, variable.names = params, n.iter = n.iter, thin = 10)
# summarize JAGS run
summary(zm)
gelman.diag(zm, multivariate = FALSE)
MCMCsamplesSurvivalA <- zm
unlist("SurvivalStems.R")
#######################################################################################################################################
# define weights and create data object for jags
work3a <- subset(work3, w >= .9)
work4 <- ddply(work3a, c("habitat", "sex", "year"), function(x) c(
alive = sum(x$num, na.rm = TRUE),
total = sum(x$denom, na.rm = TRUE)))
work4$proportion <- work4$alive/work4$total
DataQuery <- list(
alive = work4$alive,
n = work4$total,
habitat = work4$habitat + 1,
sex = work4$sex + 1,
year = work4$year)
#######################################################################################################################################
# do weighted regression on diameter
sink("SurvivalStems.R")
cat("
model {
# priors
for (i in 1:2) {
for (j in 1:2) {
for (k in 1:2) {
p[i, j, k] ~ dunif (0, 1)
}
}
}
# likelihood
for (i in 1:8) {
alive[i] ~ dbinom(p[habitat[i], sex[i], year[i]], n[i])
}
# mean comparisons
sex.riparian.2010 <- p[1, 1, 1] - p[1, 2, 1]
sex.riparian.2011 <- p[1, 1, 2] - p[1, 2, 2]
sex.upland.2010 <- p[2, 1, 1] - p[2, 2, 1]
sex.upland.2011 <- p[2, 1, 2] - p[2, 2, 2]
year.riparian.male <- p[1, 1, 1] - p[1, 1, 2]
year.riparian.female <- p[1, 2, 1] - p[1, 2, 2]
year.upland.male <- p[2, 1, 1] - p[2, 1, 2]
year.upland.female <- p[2, 2, 1] - p[2, 2, 2]
habitat.male.2010 <- p[1, 1, 1] - p[2, 1, 1]
habitat.female.2010 <- p[1, 2, 1] - p[2, 2, 1]
habitat.male.2011 <- p[1, 1, 2] - p[2, 1, 2]
habitat.female.2011 <- p[1, 2, 2] - p[2, 2, 2]
}
",fill = TRUE)
sink()
# Initial values
inits <- function() {list(
p = array(runif(8,0,1), dim = c(2,2,2)))}
# Parameters monitored
params <- c(
"p",
"sex.riparian.2010",
"sex.upland.2010",
"sex.riparian.2011",
"sex.upland.2011",
"year.riparian.male",
"year.riparian.female",
"year.upland.male",
"year.upland.female",
"habitat.male.2010",
"habitat.female.2010",
"habitat.male.2011",
"habitat.female.2011")
# MCMC settings
n.adapt = 5000
n.update = 10000
n.iter = 15000
# JAGS run
jm = jags.model("SurvivalStems.R", data = DataQuery, inits = inits(), n.chains = 3, n.adapt = n.adapt)
update(jm, n.iter = n.update)
zm = coda.samples(jm, variable.names = params, n.iter = n.iter, thin = 10)
# summarize JAGS run
summary(zm)
gelman.diag(zm, multivariate = FALSE)
MCMCsamplesSurvivalNA <- zm
unlist("SurvivalStems.R")
#######################################################################################################################################
chains2 <- rbind(as.matrix(MCMCsamplesSurvivalA[[1]]), as.matrix(MCMCsamplesSurvivalA[[2]]), as.matrix(MCMCsamplesSurvivalA[[3]]))
Dtable <- array(NA, dim = c(4, 6))
Dtable[1, 1] <- median(chains2[, matchcols(chains2, with = "p\\[1,1,1")])
Dtable[2, 1] <- median(chains2[, matchcols(chains2, with = "p\\[1,2,1")])
Dtable[3, 1] <- median(chains2[, matchcols(chains2, with = "p\\[2,1,1")])
Dtable[4, 1] <- median(chains2[, matchcols(chains2, with = "p\\[2,2,1")])
Dtable[1, 2] <- quantile(chains2[, matchcols(chains2, with = "p\\[1,1,1")], prob = .025)
Dtable[2, 2] <- quantile(chains2[, matchcols(chains2, with = "p\\[1,2,1")], prob = .025)
Dtable[3, 2] <- quantile(chains2[, matchcols(chains2, with = "p\\[2,1,1")], prob = .025)
Dtable[4, 2] <- quantile(chains2[, matchcols(chains2, with = "p\\[2,2,1")], prob = .025)
Dtable[1, 3] <- quantile(chains2[, matchcols(chains2, with = "p\\[1,1,1")], prob = .975)
Dtable[2, 3] <- quantile(chains2[, matchcols(chains2, with = "p\\[1,2,1")], prob = .975)
Dtable[3, 3] <- quantile(chains2[, matchcols(chains2, with = "p\\[2,1,1")], prob = .975)
Dtable[4, 3] <- quantile(chains2[, matchcols(chains2, with = "p\\[2,2,1")], prob = .975)
Dtable[1, 4] <- median(chains2[, matchcols(chains2, with = "p\\[1,1,2")])
Dtable[2, 4] <- median(chains2[, matchcols(chains2, with = "p\\[1,2,2")])
Dtable[3, 4] <- median(chains2[, matchcols(chains2, with = "p\\[2,1,2")])
Dtable[4, 4] <- median(chains2[, matchcols(chains2, with = "p\\[2,2,2")])
Dtable[1, 5] <- quantile(chains2[, matchcols(chains2, with = "p\\[1,1,2")], prob = .025)
Dtable[2, 5] <- quantile(chains2[, matchcols(chains2, with = "p\\[1,2,2")], prob = .025)
Dtable[3, 5] <- quantile(chains2[, matchcols(chains2, with = "p\\[2,1,2")], prob = .025)
Dtable[4, 5] <- quantile(chains2[, matchcols(chains2, with = "p\\[2,2,2")], prob = .025)
Dtable[1, 6] <- quantile(chains2[, matchcols(chains2, with = "p\\[1,1,2")], prob = .975)
Dtable[2, 6] <- quantile(chains2[, matchcols(chains2, with = "p\\[1,2,2")], prob = .975)
Dtable[3, 6] <- quantile(chains2[, matchcols(chains2, with = "p\\[2,1,2")], prob = .975)
Dtable[4, 6] <- quantile(chains2[, matchcols(chains2, with = "p\\[2,2,2")], prob = .975)
#######################################################################################################################################
habitat <- c("Riparian", "Riparian", "Upland", "Upland")
sex <- c("Male", "Female", "Male", "Female")
S2010 <- paste0("(", round(Dtable[,2], 2), " -- ", round(Dtable[,3], 2), ")")
S2011 <- paste0("(", round(Dtable[,5], 2), " -- ", round(Dtable[,6], 2), ")")
Dtable2 <- as.data.frame(cbind(habitat, sex, round(Dtable[,1],2), S2010, round(Dtable[,4],2), S2011))
xtable(Dtable2, digits = 2)
#######################################################################################################################################
chains2 <- rbind(as.matrix(MCMCsamplesSurvivalNA[[1]]), as.matrix(MCMCsamplesSurvivalNA[[2]]), as.matrix(MCMCsamplesSurvivalNA[[3]]))
Dtable <- array(NA, dim = c(4, 6))
Dtable[1, 1] <- median(chains2[, matchcols(chains2, with = "p\\[1,1,1")])
Dtable[2, 1] <- median(chains2[, matchcols(chains2, with = "p\\[1,2,1")])
Dtable[3, 1] <- median(chains2[, matchcols(chains2, with = "p\\[2,1,1")])
Dtable[4, 1] <- median(chains2[, matchcols(chains2, with = "p\\[2,2,1")])
Dtable[1, 2] <- quantile(chains2[, matchcols(chains2, with = "p\\[1,1,1")], prob = .025)
Dtable[2, 2] <- quantile(chains2[, matchcols(chains2, with = "p\\[1,2,1")], prob = .025)
Dtable[3, 2] <- quantile(chains2[, matchcols(chains2, with = "p\\[2,1,1")], prob = .025)
Dtable[4, 2] <- quantile(chains2[, matchcols(chains2, with = "p\\[2,2,1")], prob = .025)
Dtable[1, 3] <- quantile(chains2[, matchcols(chains2, with = "p\\[1,1,1")], prob = .975)
Dtable[2, 3] <- quantile(chains2[, matchcols(chains2, with = "p\\[1,2,1")], prob = .975)
Dtable[3, 3] <- quantile(chains2[, matchcols(chains2, with = "p\\[2,1,1")], prob = .975)
Dtable[4, 3] <- quantile(chains2[, matchcols(chains2, with = "p\\[2,2,1")], prob = .975)
Dtable[1, 4] <- median(chains2[, matchcols(chains2, with = "p\\[1,1,2")])
Dtable[2, 4] <- median(chains2[, matchcols(chains2, with = "p\\[1,2,2")])
Dtable[3, 4] <- median(chains2[, matchcols(chains2, with = "p\\[2,1,2")])
Dtable[4, 4] <- median(chains2[, matchcols(chains2, with = "p\\[2,2,2")])
Dtable[1, 5] <- quantile(chains2[, matchcols(chains2, with = "p\\[1,1,2")], prob = .025)
Dtable[2, 5] <- quantile(chains2[, matchcols(chains2, with = "p\\[1,2,2")], prob = .025)
Dtable[3, 5] <- quantile(chains2[, matchcols(chains2, with = "p\\[2,1,2")], prob = .025)
Dtable[4, 5] <- quantile(chains2[, matchcols(chains2, with = "p\\[2,2,2")], prob = .025)
Dtable[1, 6] <- quantile(chains2[, matchcols(chains2, with = "p\\[1,1,2")], prob = .975)
Dtable[2, 6] <- quantile(chains2[, matchcols(chains2, with = "p\\[1,2,2")], prob = .975)
Dtable[3, 6] <- quantile(chains2[, matchcols(chains2, with = "p\\[2,1,2")], prob = .975)
Dtable[4, 6] <- quantile(chains2[, matchcols(chains2, with = "p\\[2,2,2")], prob = .975)
#######################################################################################################################################
habitat <- c("Riparian", "Riparian", "Upland", "Upland")
sex <- c("Male", "Female", "Male", "Female")
S2010 <- paste0("(", round(Dtable[,2], 2), " -- ", round(Dtable[,3], 2), ")")
S2011 <- paste0("(", round(Dtable[,5], 2), " -- ", round(Dtable[,6], 2), ")")
Dtable2 <- as.data.frame(cbind(habitat, sex, round(Dtable[,1],2), S2010, round(Dtable[,4],2), S2011))
xtable(Dtable2, digits = 2)
unlist("SurvivalStems.R")
|
/FiguresTablesEtc/table4.R
|
permissive
|
CCheCastaldo/CheCastaldo_etal_2019_EcolMongr
|
R
| false
| false
| 11,912
|
r
|
library(coda)
library(gdata)
library(rjags)
library(broman)
library(plyr)
library(reshape)
library(xtable)
version <- "i-009f9c75a9206a0ac"
#######################################################################################################################################
# load data
load(file = "../Library/SalixBorerWide.rda")
load(file = paste0("../ModelBuild/GlobalModel/", version, "/MCMCsamplesZ.rda"))
chains <- apply(rbind(as.matrix(MCMCsamplesZ[[1]]), as.matrix(MCMCsamplesZ[[2]]), as.matrix(MCMCsamplesZ[[3]])), 2, mean)
zWeights <- as.data.frame(cbind(chains[1:2087], chains[2088:4174], chains[4175:6261]))
SalixBorerWide <- cbind(SalixBorerWide, zWeights)
#######################################################################################################################################
# define weights and create data object for jags
work1 <- subset(SalixBorerWide, site09==1, select = c(habitat, sex, site09, site10, V1))
work2 <- subset(SalixBorerWide, site10==1, select = c(habitat, sex, site10, site11, V2))
names(work1) <- c("habitat", "sex", "denom", "num", "w")
names(work2) <- c("habitat", "sex", "denom", "num", "w")
work1$year <- 1
work2$year <- 2
work3 <- rbind(work1, work2)
work3$check <- 0; i <- which(work3$w <= .1 | work3$w >= .9); work3$check[i] <- 1
table(work3$check)/dim(work3)[1]
work3a <- subset(work3, w <= .1)
work4 <- ddply(work3a, c("habitat", "sex", "year"), function(x) c(
alive = sum(x$num, na.rm = TRUE),
total = sum(x$denom, na.rm = TRUE)))
work4$proportion <- work4$alive/work4$total
DataQuery <- list(
alive = work4$alive,
n = work4$total,
habitat = work4$habitat + 1,
sex = work4$sex + 1,
year = work4$year)
#######################################################################################################################################
# do weighted regression on diameter
sink("SurvivalStems.R")
cat("
model {
# priors
for (i in 1:2) {
for (j in 1:2) {
for (k in 1:2) {
p[i, j, k] ~ dunif (0, 1)
}
}
}
# likelihood
for (i in 1:8) {
alive[i] ~ dbinom(p[habitat[i], sex[i], year[i]], n[i])
}
# mean comparisons
sex.riparian.2010 <- p[1, 1, 1] - p[1, 2, 1]
sex.riparian.2011 <- p[1, 1, 2] - p[1, 2, 2]
sex.upland.2010 <- p[2, 1, 1] - p[2, 2, 1]
sex.upland.2011 <- p[2, 1, 2] - p[2, 2, 2]
year.riparian.male <- p[1, 1, 1] - p[1, 1, 2]
year.riparian.female <- p[1, 2, 1] - p[1, 2, 2]
year.upland.male <- p[2, 1, 1] - p[2, 1, 2]
year.upland.female <- p[2, 2, 1] - p[2, 2, 2]
habitat.male.2010 <- p[1, 1, 1] - p[2, 1, 1]
habitat.female.2010 <- p[1, 2, 1] - p[2, 2, 1]
habitat.male.2011 <- p[1, 1, 2] - p[2, 1, 2]
habitat.female.2011 <- p[1, 2, 2] - p[2, 2, 2]
}
",fill = TRUE)
sink()
# Initial values
inits <- function() {list(
p = array(runif(8,0,1), dim = c(2,2,2)))}
# Parameters monitored
params <- c(
"p",
"sex.riparian.2010",
"sex.upland.2010",
"sex.riparian.2011",
"sex.upland.2011",
"year.riparian.male",
"year.riparian.female",
"year.upland.male",
"year.upland.female",
"habitat.male.2010",
"habitat.female.2010",
"habitat.male.2011",
"habitat.female.2011")
# MCMC settings
n.adapt = 5000
n.update = 10000
n.iter = 15000
# JAGS run
jm = jags.model("SurvivalStems.R", data = DataQuery, inits = inits(), n.chains = 3, n.adapt = n.adapt)
update(jm, n.iter = n.update)
zm = coda.samples(jm, variable.names = params, n.iter = n.iter, thin = 10)
# summarize JAGS run
summary(zm)
gelman.diag(zm, multivariate = FALSE)
MCMCsamplesSurvivalA <- zm
unlist("SurvivalStems.R")
#######################################################################################################################################
# define weights and create data object for jags
work3a <- subset(work3, w >= .9)
work4 <- ddply(work3a, c("habitat", "sex", "year"), function(x) c(
alive = sum(x$num, na.rm = TRUE),
total = sum(x$denom, na.rm = TRUE)))
work4$proportion <- work4$alive/work4$total
DataQuery <- list(
alive = work4$alive,
n = work4$total,
habitat = work4$habitat + 1,
sex = work4$sex + 1,
year = work4$year)
#######################################################################################################################################
# do weighted regression on diameter
sink("SurvivalStems.R")
cat("
model {
# priors
for (i in 1:2) {
for (j in 1:2) {
for (k in 1:2) {
p[i, j, k] ~ dunif (0, 1)
}
}
}
# likelihood
for (i in 1:8) {
alive[i] ~ dbinom(p[habitat[i], sex[i], year[i]], n[i])
}
# mean comparisons
sex.riparian.2010 <- p[1, 1, 1] - p[1, 2, 1]
sex.riparian.2011 <- p[1, 1, 2] - p[1, 2, 2]
sex.upland.2010 <- p[2, 1, 1] - p[2, 2, 1]
sex.upland.2011 <- p[2, 1, 2] - p[2, 2, 2]
year.riparian.male <- p[1, 1, 1] - p[1, 1, 2]
year.riparian.female <- p[1, 2, 1] - p[1, 2, 2]
year.upland.male <- p[2, 1, 1] - p[2, 1, 2]
year.upland.female <- p[2, 2, 1] - p[2, 2, 2]
habitat.male.2010 <- p[1, 1, 1] - p[2, 1, 1]
habitat.female.2010 <- p[1, 2, 1] - p[2, 2, 1]
habitat.male.2011 <- p[1, 1, 2] - p[2, 1, 2]
habitat.female.2011 <- p[1, 2, 2] - p[2, 2, 2]
}
",fill = TRUE)
sink()
# Initial values
inits <- function() {list(
p = array(runif(8,0,1), dim = c(2,2,2)))}
# Parameters monitored
params <- c(
"p",
"sex.riparian.2010",
"sex.upland.2010",
"sex.riparian.2011",
"sex.upland.2011",
"year.riparian.male",
"year.riparian.female",
"year.upland.male",
"year.upland.female",
"habitat.male.2010",
"habitat.female.2010",
"habitat.male.2011",
"habitat.female.2011")
# MCMC settings
n.adapt = 5000
n.update = 10000
n.iter = 15000
# JAGS run
jm = jags.model("SurvivalStems.R", data = DataQuery, inits = inits(), n.chains = 3, n.adapt = n.adapt)
update(jm, n.iter = n.update)
zm = coda.samples(jm, variable.names = params, n.iter = n.iter, thin = 10)
# summarize JAGS run
summary(zm)
gelman.diag(zm, multivariate = FALSE)
MCMCsamplesSurvivalNA <- zm
unlist("SurvivalStems.R")
#######################################################################################################################################
chains2 <- rbind(as.matrix(MCMCsamplesSurvivalA[[1]]), as.matrix(MCMCsamplesSurvivalA[[2]]), as.matrix(MCMCsamplesSurvivalA[[3]]))
Dtable <- array(NA, dim = c(4, 6))
Dtable[1, 1] <- median(chains2[, matchcols(chains2, with = "p\\[1,1,1")])
Dtable[2, 1] <- median(chains2[, matchcols(chains2, with = "p\\[1,2,1")])
Dtable[3, 1] <- median(chains2[, matchcols(chains2, with = "p\\[2,1,1")])
Dtable[4, 1] <- median(chains2[, matchcols(chains2, with = "p\\[2,2,1")])
Dtable[1, 2] <- quantile(chains2[, matchcols(chains2, with = "p\\[1,1,1")], prob = .025)
Dtable[2, 2] <- quantile(chains2[, matchcols(chains2, with = "p\\[1,2,1")], prob = .025)
Dtable[3, 2] <- quantile(chains2[, matchcols(chains2, with = "p\\[2,1,1")], prob = .025)
Dtable[4, 2] <- quantile(chains2[, matchcols(chains2, with = "p\\[2,2,1")], prob = .025)
Dtable[1, 3] <- quantile(chains2[, matchcols(chains2, with = "p\\[1,1,1")], prob = .975)
Dtable[2, 3] <- quantile(chains2[, matchcols(chains2, with = "p\\[1,2,1")], prob = .975)
Dtable[3, 3] <- quantile(chains2[, matchcols(chains2, with = "p\\[2,1,1")], prob = .975)
Dtable[4, 3] <- quantile(chains2[, matchcols(chains2, with = "p\\[2,2,1")], prob = .975)
Dtable[1, 4] <- median(chains2[, matchcols(chains2, with = "p\\[1,1,2")])
Dtable[2, 4] <- median(chains2[, matchcols(chains2, with = "p\\[1,2,2")])
Dtable[3, 4] <- median(chains2[, matchcols(chains2, with = "p\\[2,1,2")])
Dtable[4, 4] <- median(chains2[, matchcols(chains2, with = "p\\[2,2,2")])
Dtable[1, 5] <- quantile(chains2[, matchcols(chains2, with = "p\\[1,1,2")], prob = .025)
Dtable[2, 5] <- quantile(chains2[, matchcols(chains2, with = "p\\[1,2,2")], prob = .025)
Dtable[3, 5] <- quantile(chains2[, matchcols(chains2, with = "p\\[2,1,2")], prob = .025)
Dtable[4, 5] <- quantile(chains2[, matchcols(chains2, with = "p\\[2,2,2")], prob = .025)
Dtable[1, 6] <- quantile(chains2[, matchcols(chains2, with = "p\\[1,1,2")], prob = .975)
Dtable[2, 6] <- quantile(chains2[, matchcols(chains2, with = "p\\[1,2,2")], prob = .975)
Dtable[3, 6] <- quantile(chains2[, matchcols(chains2, with = "p\\[2,1,2")], prob = .975)
Dtable[4, 6] <- quantile(chains2[, matchcols(chains2, with = "p\\[2,2,2")], prob = .975)
#######################################################################################################################################
habitat <- c("Riparian", "Riparian", "Upland", "Upland")
sex <- c("Male", "Female", "Male", "Female")
S2010 <- paste0("(", round(Dtable[,2], 2), " -- ", round(Dtable[,3], 2), ")")
S2011 <- paste0("(", round(Dtable[,5], 2), " -- ", round(Dtable[,6], 2), ")")
Dtable2 <- as.data.frame(cbind(habitat, sex, round(Dtable[,1],2), S2010, round(Dtable[,4],2), S2011))
xtable(Dtable2, digits = 2)
#######################################################################################################################################
chains2 <- rbind(as.matrix(MCMCsamplesSurvivalNA[[1]]), as.matrix(MCMCsamplesSurvivalNA[[2]]), as.matrix(MCMCsamplesSurvivalNA[[3]]))
Dtable <- array(NA, dim = c(4, 6))
Dtable[1, 1] <- median(chains2[, matchcols(chains2, with = "p\\[1,1,1")])
Dtable[2, 1] <- median(chains2[, matchcols(chains2, with = "p\\[1,2,1")])
Dtable[3, 1] <- median(chains2[, matchcols(chains2, with = "p\\[2,1,1")])
Dtable[4, 1] <- median(chains2[, matchcols(chains2, with = "p\\[2,2,1")])
Dtable[1, 2] <- quantile(chains2[, matchcols(chains2, with = "p\\[1,1,1")], prob = .025)
Dtable[2, 2] <- quantile(chains2[, matchcols(chains2, with = "p\\[1,2,1")], prob = .025)
Dtable[3, 2] <- quantile(chains2[, matchcols(chains2, with = "p\\[2,1,1")], prob = .025)
Dtable[4, 2] <- quantile(chains2[, matchcols(chains2, with = "p\\[2,2,1")], prob = .025)
Dtable[1, 3] <- quantile(chains2[, matchcols(chains2, with = "p\\[1,1,1")], prob = .975)
Dtable[2, 3] <- quantile(chains2[, matchcols(chains2, with = "p\\[1,2,1")], prob = .975)
Dtable[3, 3] <- quantile(chains2[, matchcols(chains2, with = "p\\[2,1,1")], prob = .975)
Dtable[4, 3] <- quantile(chains2[, matchcols(chains2, with = "p\\[2,2,1")], prob = .975)
Dtable[1, 4] <- median(chains2[, matchcols(chains2, with = "p\\[1,1,2")])
Dtable[2, 4] <- median(chains2[, matchcols(chains2, with = "p\\[1,2,2")])
Dtable[3, 4] <- median(chains2[, matchcols(chains2, with = "p\\[2,1,2")])
Dtable[4, 4] <- median(chains2[, matchcols(chains2, with = "p\\[2,2,2")])
Dtable[1, 5] <- quantile(chains2[, matchcols(chains2, with = "p\\[1,1,2")], prob = .025)
Dtable[2, 5] <- quantile(chains2[, matchcols(chains2, with = "p\\[1,2,2")], prob = .025)
Dtable[3, 5] <- quantile(chains2[, matchcols(chains2, with = "p\\[2,1,2")], prob = .025)
Dtable[4, 5] <- quantile(chains2[, matchcols(chains2, with = "p\\[2,2,2")], prob = .025)
Dtable[1, 6] <- quantile(chains2[, matchcols(chains2, with = "p\\[1,1,2")], prob = .975)
Dtable[2, 6] <- quantile(chains2[, matchcols(chains2, with = "p\\[1,2,2")], prob = .975)
Dtable[3, 6] <- quantile(chains2[, matchcols(chains2, with = "p\\[2,1,2")], prob = .975)
Dtable[4, 6] <- quantile(chains2[, matchcols(chains2, with = "p\\[2,2,2")], prob = .975)
#######################################################################################################################################
habitat <- c("Riparian", "Riparian", "Upland", "Upland")
sex <- c("Male", "Female", "Male", "Female")
S2010 <- paste0("(", round(Dtable[,2], 2), " -- ", round(Dtable[,3], 2), ")")
S2011 <- paste0("(", round(Dtable[,5], 2), " -- ", round(Dtable[,6], 2), ")")
Dtable2 <- as.data.frame(cbind(habitat, sex, round(Dtable[,1],2), S2010, round(Dtable[,4],2), S2011))
xtable(Dtable2, digits = 2)
unlist("SurvivalStems.R")
|
% Generated by roxygen2 (4.0.1): do not edit by hand
\name{freqpolygon}
\alias{freqpolygon}
\alias{panel.freqpolygon}
\title{Frequency Polygons}
\usage{
freqpolygon(x, ..., panel = "panel.freqpolygon")
panel.freqpolygon(x, plot.points = "jitter", ref = FALSE, groups = NULL,
weights = NULL, jitter.amount = 0.01 * diff(current.panel.limits()$ylim),
type = "density", breaks = NULL, nint = NULL, center = NULL,
wdth = NULL, width = wdth, gcol = trellis.par.get("reference.line")$col,
glwd = trellis.par.get("reference.line")$lwd, h, v, ...,
identifier = "density")
}
\arguments{
\item{x}{a formula or a numeric vector}
\item{\dots}{additional arguments passed on to \code{\link{histogram}}
and \code{panel}.}
\item{panel}{a panel function}
\item{plot.points}{one of \code{TRUE}, \code{FALSE}, \code{"jitter"}, or \code{"rug"} indicating
how points are to be displayed}
\item{gcol}{color of guidelines}
\item{glwd}{width of guidelines}
\item{groups,weights,jitter.amount,identifier}{as in \code{\link{densityplot}}
or \code{\link{histogram}}}
\item{type}{one of \code{'density'}, \code{'percent'}, or \code{'count'}}
\item{breaks}{a vector of breaks for the frequency polygon bins}
\item{nint}{an approximate number of bins for the frequency polygon}
\item{center}{center of one of the bins}
\item{width}{width of the bins}
\item{wdth}{alternative to \code{width} to avoid conflict with \code{densityplot} argument
names}
\item{h,v}{a vector of values for additional horizontal and vertical lines}
\item{ref}{a logical indicating whether a horizontal reference line should be
added (roughly equivalent to \code{h=0})}
}
\value{
a trellis object
}
\description{
Frequency polygons are an alternative to histograms that make it simpler to overlay multiple
distributions.
}
\details{
These functions are still under development. Future improvements may be forthcoming.
}
\note{
This function make use of \code{histogram} to determine overall layout. Often
this works reasonably well but sometimes it does not. In particular, when \code{groups} is
used to overlay multiple frequency polygons, there is often too little head room.
In the latter cases, it may be
necessary to use \code{ylim} to determine an approprate viewing rectangle for the
plot.
}
\examples{
freqpolygon(~age | substance, data=HELPrct, v=35)
freqpolygon(~age, data=HELPrct, labels=TRUE, type='count')
freqpolygon(~age | substance, data=HELPrct, groups=sex)
freqpolygon(~age | substance, data=HELPrct, groups=sex, ylim=c(0,0.11))
## comparison of histogram and frequency polygon
histogram(~eruptions, faithful, type='density', width=.5)
ladd( panel.freqpolygon(faithful$eruptions, width=.5 ))
}
|
/man/freqpolygon.Rd
|
no_license
|
TotallyBullshit/mosaic
|
R
| false
| false
| 2,686
|
rd
|
% Generated by roxygen2 (4.0.1): do not edit by hand
\name{freqpolygon}
\alias{freqpolygon}
\alias{panel.freqpolygon}
\title{Frequency Polygons}
\usage{
freqpolygon(x, ..., panel = "panel.freqpolygon")
panel.freqpolygon(x, plot.points = "jitter", ref = FALSE, groups = NULL,
weights = NULL, jitter.amount = 0.01 * diff(current.panel.limits()$ylim),
type = "density", breaks = NULL, nint = NULL, center = NULL,
wdth = NULL, width = wdth, gcol = trellis.par.get("reference.line")$col,
glwd = trellis.par.get("reference.line")$lwd, h, v, ...,
identifier = "density")
}
\arguments{
\item{x}{a formula or a numeric vector}
\item{\dots}{additional arguments passed on to \code{\link{histogram}}
and \code{panel}.}
\item{panel}{a panel function}
\item{plot.points}{one of \code{TRUE}, \code{FALSE}, \code{"jitter"}, or \code{"rug"} indicating
how points are to be displayed}
\item{gcol}{color of guidelines}
\item{glwd}{width of guidelines}
\item{groups,weights,jitter.amount,identifier}{as in \code{\link{densityplot}}
or \code{\link{histogram}}}
\item{type}{one of \code{'density'}, \code{'percent'}, or \code{'count'}}
\item{breaks}{a vector of breaks for the frequency polygon bins}
\item{nint}{an approximate number of bins for the frequency polygon}
\item{center}{center of one of the bins}
\item{width}{width of the bins}
\item{wdth}{alternative to \code{width} to avoid conflict with \code{densityplot} argument
names}
\item{h,v}{a vector of values for additional horizontal and vertical lines}
\item{ref}{a logical indicating whether a horizontal reference line should be
added (roughly equivalent to \code{h=0})}
}
\value{
a trellis object
}
\description{
Frequency polygons are an alternative to histograms that make it simpler to overlay multiple
distributions.
}
\details{
These functions are still under development. Future improvements may be forthcoming.
}
\note{
This function make use of \code{histogram} to determine overall layout. Often
this works reasonably well but sometimes it does not. In particular, when \code{groups} is
used to overlay multiple frequency polygons, there is often too little head room.
In the latter cases, it may be
necessary to use \code{ylim} to determine an approprate viewing rectangle for the
plot.
}
\examples{
freqpolygon(~age | substance, data=HELPrct, v=35)
freqpolygon(~age, data=HELPrct, labels=TRUE, type='count')
freqpolygon(~age | substance, data=HELPrct, groups=sex)
freqpolygon(~age | substance, data=HELPrct, groups=sex, ylim=c(0,0.11))
## comparison of histogram and frequency polygon
histogram(~eruptions, faithful, type='density', width=.5)
ladd( panel.freqpolygon(faithful$eruptions, width=.5 ))
}
|
\name{p.page4}
\alias{p.page4}
\title{A postiori probability of hyperparameters}
\description{
Function to determine a postiori probability of hyperparameters
\eqn{\rho}{rho}, \eqn{\lambda}{lambda} and \eqn{\psi_2}{psi2},
given observations and \eqn{\psi_1}{psi1}.
}
\usage{
p.page4(D1, D2, H1, H2, V, y, z, E.theta, Edash.theta, extractor, include.prior=FALSE,
lognormally.distributed=FALSE, return.log=FALSE, phi)
}
\arguments{
\item{D1}{Matrix of code run points}
\item{D2}{Matrix of observation points}
\item{H1}{Basis function (vectorized)}
\item{H2}{Regression function for D2}
\item{V}{Covariance matrix; default value of \code{NULL} results in
the function evaluating it (but this takes a long time, so supply
\code{V} if known)}
\item{y}{Vector of code outputs}
\item{z}{Vector of observation values}
\item{E.theta}{Expectation over theta}
\item{Edash.theta}{Expectation over theta WRT \eqn{E'}}
\item{extractor}{Function to extract independent variables and
parameters from D1}
\item{include.prior}{Boolean, with \code{TRUE}
meaning to include the prior PDF for \eqn{\theta}{theta} and default
value of \code{FALSE} meaning to return the likelihood multiplied by an
undetermined constant}
\item{lognormally.distributed}{Boolean with \code{TRUE} meaning to assume
lognormality. See \code{prob.psi1} for details}
\item{return.log}{Boolean, with default \code{FALSE} meaning to return
the probability, and \code{TRUE} meaning to return the (natural)
logarithm of the probability (which is useful when considering very
small probabilities)}
\item{phi}{Hyperparameters}
}
\references{
\itemize{
\item
M. C. Kennedy and A. O'Hagan 2001. \emph{Bayesian
calibration of computer models}. Journal of the Royal Statistical
Society B, 63(3) pp425-464
\item
M. C. Kennedy and A. O'Hagan 2001. \emph{Supplementary details on
Bayesian calibration of computer models}, Internal report, University
of Sheffield. Available at
\url{http://www.tonyohagan.co.uk/academic/ps/calsup.ps}
\item
R. K. S. Hankin 2005. \emph{Introducing BACCO, an R bundle for
Bayesian analysis of computer code output}, Journal of Statistical
Software, 14(16)
}
}
\author{Robin K. S. Hankin}
\seealso{\code{\link{W2}}}
\examples{
data(toys)
p.page4(D1=D1.toy, D2=D2.toy, H1=H1.toy, H2=H2.toy, V=NULL, y=y.toy,
z=z.toy,E.theta=E.theta.toy, Edash.theta=Edash.theta.toy, extractor=extractor.toy, phi=phi.toy)
## Now compare the above value with p.page4() calculated with phi
## differing only in psi2:
phi.toy.new <- phi.change(phi.fun=phi.fun.toy, old.phi = phi.toy, psi2=c(8,8,8))
p.page4(D1=D1.toy, D2=D2.toy, H1=H1.toy, H2=H2.toy, V=V.toy, y=y.toy, z=z.toy,
E.theta=E.theta.toy, Edash.theta=Edash.theta.toy,
extractor=extractor.toy, phi=phi.toy.new)
## different!
}
\keyword{array}
|
/man/p.page4.Rd
|
no_license
|
RobinHankin/calibrator
|
R
| false
| false
| 2,884
|
rd
|
\name{p.page4}
\alias{p.page4}
\title{A postiori probability of hyperparameters}
\description{
Function to determine a postiori probability of hyperparameters
\eqn{\rho}{rho}, \eqn{\lambda}{lambda} and \eqn{\psi_2}{psi2},
given observations and \eqn{\psi_1}{psi1}.
}
\usage{
p.page4(D1, D2, H1, H2, V, y, z, E.theta, Edash.theta, extractor, include.prior=FALSE,
lognormally.distributed=FALSE, return.log=FALSE, phi)
}
\arguments{
\item{D1}{Matrix of code run points}
\item{D2}{Matrix of observation points}
\item{H1}{Basis function (vectorized)}
\item{H2}{Regression function for D2}
\item{V}{Covariance matrix; default value of \code{NULL} results in
the function evaluating it (but this takes a long time, so supply
\code{V} if known)}
\item{y}{Vector of code outputs}
\item{z}{Vector of observation values}
\item{E.theta}{Expectation over theta}
\item{Edash.theta}{Expectation over theta WRT \eqn{E'}}
\item{extractor}{Function to extract independent variables and
parameters from D1}
\item{include.prior}{Boolean, with \code{TRUE}
meaning to include the prior PDF for \eqn{\theta}{theta} and default
value of \code{FALSE} meaning to return the likelihood multiplied by an
undetermined constant}
\item{lognormally.distributed}{Boolean with \code{TRUE} meaning to assume
lognormality. See \code{prob.psi1} for details}
\item{return.log}{Boolean, with default \code{FALSE} meaning to return
the probability, and \code{TRUE} meaning to return the (natural)
logarithm of the probability (which is useful when considering very
small probabilities)}
\item{phi}{Hyperparameters}
}
\references{
\itemize{
\item
M. C. Kennedy and A. O'Hagan 2001. \emph{Bayesian
calibration of computer models}. Journal of the Royal Statistical
Society B, 63(3) pp425-464
\item
M. C. Kennedy and A. O'Hagan 2001. \emph{Supplementary details on
Bayesian calibration of computer models}, Internal report, University
of Sheffield. Available at
\url{http://www.tonyohagan.co.uk/academic/ps/calsup.ps}
\item
R. K. S. Hankin 2005. \emph{Introducing BACCO, an R bundle for
Bayesian analysis of computer code output}, Journal of Statistical
Software, 14(16)
}
}
\author{Robin K. S. Hankin}
\seealso{\code{\link{W2}}}
\examples{
data(toys)
p.page4(D1=D1.toy, D2=D2.toy, H1=H1.toy, H2=H2.toy, V=NULL, y=y.toy,
z=z.toy,E.theta=E.theta.toy, Edash.theta=Edash.theta.toy, extractor=extractor.toy, phi=phi.toy)
## Now compare the above value with p.page4() calculated with phi
## differing only in psi2:
phi.toy.new <- phi.change(phi.fun=phi.fun.toy, old.phi = phi.toy, psi2=c(8,8,8))
p.page4(D1=D1.toy, D2=D2.toy, H1=H1.toy, H2=H2.toy, V=V.toy, y=y.toy, z=z.toy,
E.theta=E.theta.toy, Edash.theta=Edash.theta.toy,
extractor=extractor.toy, phi=phi.toy.new)
## different!
}
\keyword{array}
|
#library(stringr)
library(plyr)
games <- list.files(file.path("..", "games"), pattern = "-at-")
ps_files <- list.files(file.path("..", "games", games), recursive = TRUE,
full.names = TRUE, pattern = "player-stats.tsv")
ps_files <- cbind(games,
matrix(ps_files, ncol = 2, byrow = TRUE,
dimnames = list(NULL, c("new", "old"))))
identical(seq_along(games),
laply(games, function(gg) grep(gg, ps_files[ , "old"])))
identical(seq_along(games),
laply(games, function(gg) grep(gg, ps_files[ , "new"])))
for(i in seq_len(nrow(ps_files))) {
x <- ps_files[i, ]
message("game: ", i, " ", x["games"])
new_stats <- read.delim(x["new"], stringsAsFactors = FALSE)
new_stats <- mutate(new_stats, player = paste(player, last, sep = "-"))
#str(new_stats)
old_stats <- read.delim(x["old"], stringsAsFactors = FALSE)
#str(old_stats)
names(old_stats) <- paste0(names(old_stats), rep(c('', '_old'), c(2, 4)))
tmp <- join(old_stats, new_stats, by = c('game', 'player'))
print(rbind(old = nrow(old_stats), new = nrow(new_stats), joined = nrow(tmp)))
unequal_points <- with(tmp, points_old != points | is.na(points))
unequal_def <- with(tmp, Ds_old != def | is.na(def))
n_discrepancies <- sum(unequal_points | unequal_def)
if(n_discrepancies > 0) {
message(n_discrepancies, " discrepancies")
print(tmp[unequal_points | unequal_def, ])
} else {
message("AGREE!")
}
message("\n")
}
## this is a callahan
## 11 2014-05-04_phlSP-at-wdcCT phlSP-1-peters assists_old 3 assists 2
## 11 2014-05-04_phlSP-at-wdcCT phlSP-21-panna goals_old 1 goals 0
game <- "2014-05-04_phlSP-at-wdcCT"
game_play <- read.delim(file.path("..", "games", game, "06_possess-game",
paste0(game, "_gameplay-resolved.tsv")))
str(game_play)
subset(game_play, pl_code %in% c("G") & pl_pnum == '21')
subset(game_play, poss_abs %in% 94:96)
names(pass_files) <- games
pass_dat <-
ldply(pass_files, function(gg) read.delim(gg,
colClasses = list(beg_plyr = "character",
innards = "character",
end_plyr = "character")),
.id = "game")
str(pass_dat) # 16032 obs. of 15 variables
|
/scripts/39_develop-player-stats.r
|
no_license
|
jennybc/vanNH
|
R
| false
| false
| 2,292
|
r
|
#library(stringr)
library(plyr)
games <- list.files(file.path("..", "games"), pattern = "-at-")
ps_files <- list.files(file.path("..", "games", games), recursive = TRUE,
full.names = TRUE, pattern = "player-stats.tsv")
ps_files <- cbind(games,
matrix(ps_files, ncol = 2, byrow = TRUE,
dimnames = list(NULL, c("new", "old"))))
identical(seq_along(games),
laply(games, function(gg) grep(gg, ps_files[ , "old"])))
identical(seq_along(games),
laply(games, function(gg) grep(gg, ps_files[ , "new"])))
for(i in seq_len(nrow(ps_files))) {
x <- ps_files[i, ]
message("game: ", i, " ", x["games"])
new_stats <- read.delim(x["new"], stringsAsFactors = FALSE)
new_stats <- mutate(new_stats, player = paste(player, last, sep = "-"))
#str(new_stats)
old_stats <- read.delim(x["old"], stringsAsFactors = FALSE)
#str(old_stats)
names(old_stats) <- paste0(names(old_stats), rep(c('', '_old'), c(2, 4)))
tmp <- join(old_stats, new_stats, by = c('game', 'player'))
print(rbind(old = nrow(old_stats), new = nrow(new_stats), joined = nrow(tmp)))
unequal_points <- with(tmp, points_old != points | is.na(points))
unequal_def <- with(tmp, Ds_old != def | is.na(def))
n_discrepancies <- sum(unequal_points | unequal_def)
if(n_discrepancies > 0) {
message(n_discrepancies, " discrepancies")
print(tmp[unequal_points | unequal_def, ])
} else {
message("AGREE!")
}
message("\n")
}
## this is a callahan
## 11 2014-05-04_phlSP-at-wdcCT phlSP-1-peters assists_old 3 assists 2
## 11 2014-05-04_phlSP-at-wdcCT phlSP-21-panna goals_old 1 goals 0
game <- "2014-05-04_phlSP-at-wdcCT"
game_play <- read.delim(file.path("..", "games", game, "06_possess-game",
paste0(game, "_gameplay-resolved.tsv")))
str(game_play)
subset(game_play, pl_code %in% c("G") & pl_pnum == '21')
subset(game_play, poss_abs %in% 94:96)
names(pass_files) <- games
pass_dat <-
ldply(pass_files, function(gg) read.delim(gg,
colClasses = list(beg_plyr = "character",
innards = "character",
end_plyr = "character")),
.id = "game")
str(pass_dat) # 16032 obs. of 15 variables
|
# p.18, Program 1-1
RollDie = function(n) sample(1:6, n, replace = T)
r20 = RollDie(20)
r200 = RollDie(200)
r2000 = RollDie(2000)
r20000 = RollDie(20000)
par(mfrow=c(2,2))
hist(r20, br = c(0,1,2,3,4,5,6), main= '', freq=F, ylab='Relative Frequency', ylim=c(0,0.5))
hist(r200, br = c(0,1,2,3,4,5,6), main= '', freq=F, ylab='Relative Frequency', ylim=c(0,0.5))
hist(r2000, br = c(0,1,2,3,4,5,6), main= '', freq=F, ylab='Relative Frequency', ylim=c(0,0.5))
hist(r20000, br = c(0,1,2,3,4,5,6), main= '', freq=F, ylab='Relative Frequency', ylim=c(0,0.5))
# p.107 Ch4.8
# 4.1
library(distrEx)
x = DiscreteDistribution(supp=c(1:6), prob=rep(1/6,6))
plot(x)
# 4.2
E(x)
var(x)
sd(x)
|
/[Probability].R
|
no_license
|
esoterikosQ/KNOU
|
R
| false
| false
| 676
|
r
|
# p.18, Program 1-1
RollDie = function(n) sample(1:6, n, replace = T)
r20 = RollDie(20)
r200 = RollDie(200)
r2000 = RollDie(2000)
r20000 = RollDie(20000)
par(mfrow=c(2,2))
hist(r20, br = c(0,1,2,3,4,5,6), main= '', freq=F, ylab='Relative Frequency', ylim=c(0,0.5))
hist(r200, br = c(0,1,2,3,4,5,6), main= '', freq=F, ylab='Relative Frequency', ylim=c(0,0.5))
hist(r2000, br = c(0,1,2,3,4,5,6), main= '', freq=F, ylab='Relative Frequency', ylim=c(0,0.5))
hist(r20000, br = c(0,1,2,3,4,5,6), main= '', freq=F, ylab='Relative Frequency', ylim=c(0,0.5))
# p.107 Ch4.8
# 4.1
library(distrEx)
x = DiscreteDistribution(supp=c(1:6), prob=rep(1/6,6))
plot(x)
# 4.2
E(x)
var(x)
sd(x)
|
#' Adjust a Dataset
#' Adjust the dimensions of a dataset to build the blocks
#' @param D Dataset containing numeric values
#' @param tb Temporal block size
#' @param sb Spatial block size
#' @return Dataset adjusted to build the blocks.
#' @examples
#' #Adjust a block
#' D <- STSADatasetAdjust(STMotif::example_dataset, 20, 12)
#' @export
STSADatasetAdjust <- function(D, tb, sb) {
c = ncol(D)
r = nrow(D)
ec = c %% sb
er = r %% tb
D = D[1:(r-er), 1:(c-ec)]
return (D)
}
#' CSAMiningProcess
#'
#' CSA Datamining Process
#' @param D Dataset containing numeric values
#' @param DS Dataset containing SAX encoded values
#' @param w Word Size
#' @param a Number of letters to do the encode
#' @param sb Spatial block size
#' @param tb Temporal block size
#' @param si Minimum number of occurrences inside each block
#' @param ka Minimum number of spatial-time series with occurrences inside each block
#' @return Return a list of ranked motifs. Each motif contains the information [isaxcode, recmatrix, vectst, rank], as described:
#' @return isaxcode: Motif sequences in character format
#' @return recmatrix: Matrix giving as information the blocks containing this motif
#' @return vectst: Coordinate of the start positions of the motif in the original dataset
#' @return rank: L of information used for motif ranking, as [dist, word, qtd, proj]
#' @examples
#' #CSA Datamining process
#' D <- STMotif::example_dataset
#' DS <- NormSAX(STMotif::example_dataset,5)
#' rmotif <- CSAMiningProcess(D,DS,4,5,4,10,2,2)
#' @export
CSAMiningProcess <- function (D,DS,w,a,sb,tb,si,ka){
DS <- NormSAX(D,a)
stmotifs <- SearchSTMotifs(D,DS,w,a,sb,tb,si,ka)
rstmotifs <- RankSTMotifs(stmotifs)
return(rstmotifs)
}
#' Normalize the data and SAX indexing
#' @param D Dataset containing numeric values
#' @param a Number of letters use to encode
#' @return A normalized and encoded dataset for a given alphabet a
#' @examples
#' #Normalization and Sax Dataset
#' DS <- NormSAX(STMotif::example_dataset, 5)
#' @export
NormSAX <- function (D,a){
vector <- as.matrix(D)
vector <- as.vector(vector)
vectorNorm <- (vector-mean(vector, na.rm = T))/stats::sd(vector, na.rm = T)
DS <- STSSaxEncode(D, vectorNorm, a)
return (DS)
}
#' SearchSTMotifs
#'
#' Search for Spatial-time Motifs
#' @param D Dataset containing numeric values
#' @param DS Dataset containing SAX encoded values
#' @param w Word Size
#' @param a Number of letters to do the encode
#' @param sb "Space slice" Number of columns in each block
#' @param tb "Time slice" Number of rows in each block
#' @param si Support of Global Occurrence (GO)
#' @param ka Support for Spatial Occurrence (SO)
#' @return Return a list of identified motifs. Each motif contains the information [isaxcode, recmatrix, vectst], as described:
#' @return isaxcode: Motif sequences in character format
#' @return recmatrix: Matrix giving as information the blocks containing this motif
#' @return vectst: Coordinate of the start positions of the motif in the original dataset
#' @examples
#' #Search for Spatial-time Motifs
#' D <- STMotif::example_dataset
#' DS <- NormSAX(STMotif::example_dataset,5)
#' stmotifs <- SearchSTMotifs(D,DS,4,5,4,10,2,2)
#' @export
SearchSTMotifs <- function (D,DS,w,a,sb,tb,si=3,ka=3){
saxblocks <- STSComputeBlocks(DS, tb, sb)
saxblocks$rectangles <- NULL
blocks <- STSComputeBlocks(D, tb, sb)
nrows = blocks$nrows
ncols = blocks$ncols
rectangles = blocks$rectangles
blocks$rectangles <- NULL
motifs<-list()
size=length(blocks$datasets)
for (i in 1:size) {
block = blocks$datasets[[i]]
saxblock = saxblocks$datasets[[i]]
block = as.vector(as.matrix(block))
saxblock = as.vector(as.matrix(saxblock))
motifs[[i]] <- identifyMotifsInBlock(ts = block, tss = saxblock, tb = tb ,w = w, a = a)
}
stmotifs <- list()
for (i in 1:length(motifs)) {
stmotifs <- STSIdentifySTMotif(stmotifs, motifs[[i]], nrows, ncols, rectangles[[i]], ka = ka, si = si)
}
sttightmotifs <- list()
if (length(stmotifs)>0){
for (i in 1:length(stmotifs)) {
stmotif = stmotifs[[i]]
s = stmotif$vecs
t = stmotif$vect
stmotif$vecst = data.frame(s, t)
stmotif$vecs <- NULL
stmotif$vect <- NULL
stmotifs[[i]] = stmotif
}
for(stmotif in (stmotifs)) {
sttightmotifsSplit <- STSIdentifyTightSTMotif(stmotif, rectangles)
for (item in (sttightmotifsSplit)) {
pos = length(sttightmotifs)+1
sttightmotifs[[pos]] <- item
names(sttightmotifs)[pos] = item$isaxcod
}
}
}
return (sttightmotifs)
}
#' Rank the STmotifs
#' Rank motifs by their quality
#' @param stmotifs List of identified motifs
#' @return The ranked version of the identified list of motifs
#' @examples
#' #Search for Spatial-time Motifs
#' D <- STMotif::example_dataset
#' DS <- NormSAX(STMotif::example_dataset,5)
#' stmotifs <- SearchSTMotifs(D,DS,4,5,4,10,2,2)
#' rstmotifs <- RankSTMotifs(stmotifs)
#' @export
RankSTMotifs <- function(stmotifs) {
rstmotifs<-list()
if(length(stmotifs)>0){
dataRank <- NULL
for (i in 1:length(stmotifs)) {
s <- stmotifs[[i]][["vecst"]][["s"]]
t <- stmotifs[[i]][["vecst"]][["t"]]
word <- stmotifs[[i]]$isaxcod
occurrences<- data.frame(space = s, time = t)
distance_rank <- comp_distance(occurrences)
word_rank <- comp_word(stmotifs[[i]]$isaxcod)
qtd_rank <- log(nrow(occurrences), base=2)
dataRank <- rbind(dataRank, data.frame(dist = distance_rank, word = word_rank, qtd=qtd_rank))
}
rownames(dataRank) <- c(1:length(stmotifs))
rstmotifs <- rank(dataRank,stmotifs)
}
return(rstmotifs)
}
|
/R/mainFunction.R
|
no_license
|
cran/STMotif
|
R
| false
| false
| 5,691
|
r
|
#' Adjust a Dataset
#' Adjust the dimensions of a dataset to build the blocks
#' @param D Dataset containing numeric values
#' @param tb Temporal block size
#' @param sb Spatial block size
#' @return Dataset adjusted to build the blocks.
#' @examples
#' #Adjust a block
#' D <- STSADatasetAdjust(STMotif::example_dataset, 20, 12)
#' @export
STSADatasetAdjust <- function(D, tb, sb) {
c = ncol(D)
r = nrow(D)
ec = c %% sb
er = r %% tb
D = D[1:(r-er), 1:(c-ec)]
return (D)
}
#' CSAMiningProcess
#'
#' CSA Datamining Process
#' @param D Dataset containing numeric values
#' @param DS Dataset containing SAX encoded values
#' @param w Word Size
#' @param a Number of letters to do the encode
#' @param sb Spatial block size
#' @param tb Temporal block size
#' @param si Minimum number of occurrences inside each block
#' @param ka Minimum number of spatial-time series with occurrences inside each block
#' @return Return a list of ranked motifs. Each motif contains the information [isaxcode, recmatrix, vectst, rank], as described:
#' @return isaxcode: Motif sequences in character format
#' @return recmatrix: Matrix giving as information the blocks containing this motif
#' @return vectst: Coordinate of the start positions of the motif in the original dataset
#' @return rank: L of information used for motif ranking, as [dist, word, qtd, proj]
#' @examples
#' #CSA Datamining process
#' D <- STMotif::example_dataset
#' DS <- NormSAX(STMotif::example_dataset,5)
#' rmotif <- CSAMiningProcess(D,DS,4,5,4,10,2,2)
#' @export
CSAMiningProcess <- function (D,DS,w,a,sb,tb,si,ka){
DS <- NormSAX(D,a)
stmotifs <- SearchSTMotifs(D,DS,w,a,sb,tb,si,ka)
rstmotifs <- RankSTMotifs(stmotifs)
return(rstmotifs)
}
#' Normalize the data and SAX indexing
#' @param D Dataset containing numeric values
#' @param a Number of letters use to encode
#' @return A normalized and encoded dataset for a given alphabet a
#' @examples
#' #Normalization and Sax Dataset
#' DS <- NormSAX(STMotif::example_dataset, 5)
#' @export
NormSAX <- function (D,a){
vector <- as.matrix(D)
vector <- as.vector(vector)
vectorNorm <- (vector-mean(vector, na.rm = T))/stats::sd(vector, na.rm = T)
DS <- STSSaxEncode(D, vectorNorm, a)
return (DS)
}
#' SearchSTMotifs
#'
#' Search for Spatial-time Motifs
#' @param D Dataset containing numeric values
#' @param DS Dataset containing SAX encoded values
#' @param w Word Size
#' @param a Number of letters to do the encode
#' @param sb "Space slice" Number of columns in each block
#' @param tb "Time slice" Number of rows in each block
#' @param si Support of Global Occurrence (GO)
#' @param ka Support for Spatial Occurrence (SO)
#' @return Return a list of identified motifs. Each motif contains the information [isaxcode, recmatrix, vectst], as described:
#' @return isaxcode: Motif sequences in character format
#' @return recmatrix: Matrix giving as information the blocks containing this motif
#' @return vectst: Coordinate of the start positions of the motif in the original dataset
#' @examples
#' #Search for Spatial-time Motifs
#' D <- STMotif::example_dataset
#' DS <- NormSAX(STMotif::example_dataset,5)
#' stmotifs <- SearchSTMotifs(D,DS,4,5,4,10,2,2)
#' @export
SearchSTMotifs <- function (D,DS,w,a,sb,tb,si=3,ka=3){
saxblocks <- STSComputeBlocks(DS, tb, sb)
saxblocks$rectangles <- NULL
blocks <- STSComputeBlocks(D, tb, sb)
nrows = blocks$nrows
ncols = blocks$ncols
rectangles = blocks$rectangles
blocks$rectangles <- NULL
motifs<-list()
size=length(blocks$datasets)
for (i in 1:size) {
block = blocks$datasets[[i]]
saxblock = saxblocks$datasets[[i]]
block = as.vector(as.matrix(block))
saxblock = as.vector(as.matrix(saxblock))
motifs[[i]] <- identifyMotifsInBlock(ts = block, tss = saxblock, tb = tb ,w = w, a = a)
}
stmotifs <- list()
for (i in 1:length(motifs)) {
stmotifs <- STSIdentifySTMotif(stmotifs, motifs[[i]], nrows, ncols, rectangles[[i]], ka = ka, si = si)
}
sttightmotifs <- list()
if (length(stmotifs)>0){
for (i in 1:length(stmotifs)) {
stmotif = stmotifs[[i]]
s = stmotif$vecs
t = stmotif$vect
stmotif$vecst = data.frame(s, t)
stmotif$vecs <- NULL
stmotif$vect <- NULL
stmotifs[[i]] = stmotif
}
for(stmotif in (stmotifs)) {
sttightmotifsSplit <- STSIdentifyTightSTMotif(stmotif, rectangles)
for (item in (sttightmotifsSplit)) {
pos = length(sttightmotifs)+1
sttightmotifs[[pos]] <- item
names(sttightmotifs)[pos] = item$isaxcod
}
}
}
return (sttightmotifs)
}
#' Rank the STmotifs
#' Rank motifs by their quality
#' @param stmotifs List of identified motifs
#' @return The ranked version of the identified list of motifs
#' @examples
#' #Search for Spatial-time Motifs
#' D <- STMotif::example_dataset
#' DS <- NormSAX(STMotif::example_dataset,5)
#' stmotifs <- SearchSTMotifs(D,DS,4,5,4,10,2,2)
#' rstmotifs <- RankSTMotifs(stmotifs)
#' @export
RankSTMotifs <- function(stmotifs) {
rstmotifs<-list()
if(length(stmotifs)>0){
dataRank <- NULL
for (i in 1:length(stmotifs)) {
s <- stmotifs[[i]][["vecst"]][["s"]]
t <- stmotifs[[i]][["vecst"]][["t"]]
word <- stmotifs[[i]]$isaxcod
occurrences<- data.frame(space = s, time = t)
distance_rank <- comp_distance(occurrences)
word_rank <- comp_word(stmotifs[[i]]$isaxcod)
qtd_rank <- log(nrow(occurrences), base=2)
dataRank <- rbind(dataRank, data.frame(dist = distance_rank, word = word_rank, qtd=qtd_rank))
}
rownames(dataRank) <- c(1:length(stmotifs))
rstmotifs <- rank(dataRank,stmotifs)
}
return(rstmotifs)
}
|
install.packages("httr")
install.packages("plumber")
install.packages("data.table")
install.packages("RCurl")
|
/init.R
|
no_license
|
aliarsalankazmi/fb-r-messenger-bot
|
R
| false
| false
| 110
|
r
|
install.packages("httr")
install.packages("plumber")
install.packages("data.table")
install.packages("RCurl")
|
server <- function(input, output) {
# SPARK VALUE BOX THEME ------------------------------------------------------
valueBoxSpark <-
function(value,
title,
sparkobj = NULL,
subtitle,
info = NULL,
icon = NULL,
color = "aqua",
width = 12,
href = NULL){
shinydashboard:::validateColor(color)
if (!is.null(icon))
shinydashboard:::tagAssert(icon, type = "i")
info_icon <- tags$small(
tags$i(
class = "fa fa-info-circle fa-lg",
title = info,
`data-toggle` = "tooltip",
style = "color: rgba(255, 255, 255, 0.75);"
),
# bs3 pull-right
# bs4 float-right
class = "pull-right float-right"
)
boxContent <- div(
class = paste0("small-box bg-", color),
div(
class = "inner",
tags$small(title),
if (!is.null(sparkobj)) info_icon,
h3(value),
if (!is.null(sparkobj)) sparkobj,
p(subtitle)
),
# bs3 icon-large
# bs4 icon
if (!is.null(icon)) div(class = "icon-large icon", icon, style = "z-index; 0")
)
if (!is.null(href))
boxContent <- a(href = href, boxContent)
div(
class = if (!is.null(width)) paste0("col-sm-", width),
boxContent
)
}
# OVERVIEW TAB - START -------------------------------------------------------
output$overviewSalesGrowth <- renderValueBox({
salesGrowth <-
df %>%
group_by(Year) %>%
summarise(`Total Sales` = sum(Sales)) %>%
mutate_each(funs(factor(.)), c("Year")) %>%
mutate(GrowthValue = `Total Sales` - lag(`Total Sales`),
GrowthPerc = (`Total Sales` - lag(`Total Sales`)) / `Total Sales`) %>%
mutate(GrowthValue = replace_na(GrowthValue, 0),
GrowthPerc = replace_na(GrowthPerc, 0))
hcSalesGrowth <-
salesGrowth %>%
hchart("area", hcaes(x = Year, y = GrowthValue), name = "Sales Growth") %>%
hc_size(height = 50) %>%
hc_credits(enabled = F) %>%
hc_tooltip(enabled = F) %>%
hc_add_theme(hc_theme_sparkline_vb())
vbSalesGrowth <- valueBoxSpark(
value = dollar(mean(salesGrowth$GrowthValue), prefix = "$", big.mark = ",", decimal.mark = ".", accuracy = .01),
title = toupper("AVERAGE SALES GROWTH"),
sparkobj = hcSalesGrowth,
info = "This is the sales growth from the first day until today",
subtitle = tagList("Growth per year ",
HTML("↑"),
percent(mean(salesGrowth$GrowthPerc),
decimal.mark = ".",
accuracy = .01)),
icon = icon("money-bill-wave"),
color = "teal",
href = NULL
)
vbSalesGrowth
})
output$overviewProfitGrowth <- renderValueBox({
profitGrowth <-
df %>%
group_by(Year) %>%
summarise(`Total Profit` = sum(Profit)) %>%
mutate_each(funs(factor(.)), c("Year")) %>%
mutate(GrowthValue = `Total Profit` - lag(`Total Profit`),
GrowthPerc = (`Total Profit` - lag(`Total Profit`)) / `Total Profit`) %>%
mutate(GrowthValue = replace_na(GrowthValue, 0),
GrowthPerc = replace_na(GrowthPerc, 0))
hcProfitGrowth <-
profitGrowth %>%
hchart("area", hcaes(x = Year, y = GrowthValue), name = "Profit Growth") %>%
hc_size(height = 50) %>%
hc_credits(enabled = F) %>%
hc_tooltip(enabled = F) %>%
hc_add_theme(hc_theme_sparkline_vb())
vbProfitGrowth <-
valueBoxSpark(
value = dollar(mean(profitGrowth$GrowthValue), prefix = "$", big.mark = ",", decimal.mark = ".", accuracy = .01),
title = toupper("AVERAGE PROFIT GROWTH"),
sparkobj = hcProfitGrowth,
info = "This is the profit growth from the first day until today",
subtitle = tagList("Growth per year ",
HTML("↑"),
percent(mean(profitGrowth$GrowthPerc),
decimal.mark = ".",
accuracy = .01),
),
icon = icon("hand-holding-usd"),
color = "teal",
href = NULL
)
vbProfitGrowth
})
output$overviewSalesSeason <- renderValueBox({
salesSeason <-
df %>%
mutate(Month = month(`Order Date`, label = T, abbr = F)) %>%
group_by(Year, Month) %>%
summarise(Sales.Number = length(Sales)) %>%
group_by(Month) %>%
mutate(Sales.Number = mean(Sales.Number))
peakSeason <- salesSeason[salesSeason$Sales.Number == max(salesSeason$Sales.Number), ]
peakSeason2 <-
df %>%
mutate(Month = month(`Order Date`, label = T, abbr = F)) %>%
filter(Month == peakSeason$Month[1]) %>%
group_by(Year, Month) %>%
summarise(Sales.Number = length(Sales))
hcSalesSeason <-
peakSeason2 %>%
hchart(
"area",
hcaes(x = Year, y = Sales.Number)) %>%
hc_size(height = 50) %>%
hc_tooltip(enabled = F) %>%
hc_add_theme(hc_theme_sparkline_vb())
vbSalesSeason <-
valueBoxSpark(
value = peakSeason$Month[1],
title = toupper("PEAK SALES MONTH"),
sparkobj = hcSalesSeason,
info = "Graph showing number of time on peak month each year",
subtitle = paste("Avg. sales on peak month :",
number(peakSeason$Sales.Number[1],
big.mark = ","),
"transaction"),
icon = icon("calendar-alt"),
color = "teal",
href = NULL
)
vbSalesSeason
})
output$overviewProfitByMarket <- renderEcharts4r({
df %>%
group_by(Market) %>%
summarise(Profit = sum(Profit)) %>%
arrange(Profit) %>%
e_chart(Market) %>%
e_pie(Profit, radius = c ("50%", "75%")) %>%
e_theme_custom("www/Chart_Theme.json") %>%
e_title(text = "Profit by Market",
left = "center",
top = "0") %>%
e_legend(F) %>%
e_tooltip(trigger = "item",
formatter = JS("
function(params){return(
'<b>' + params.name + '</b>'
+ ' : $'
+ (params.value).toLocaleString('en-US',
{maximumFractionDigits : 2, minimumFractionDigits: 2})
)}
"))
})
output$overviewProfitBySegment <- renderEcharts4r({
df %>%
group_by(Segment) %>%
summarise(Profit = sum(Profit)) %>%
arrange(Profit) %>%
e_chart(Segment) %>%
e_pie(Profit, radius = c ("50%", "75%")) %>%
e_theme_custom("www/Chart_Theme.json") %>%
e_title(text = "Profit by Segment",
left = "center",
top = "0") %>%
e_legend(F) %>%
e_tooltip(trigger = "item",
formatter = JS("
function(params){return(
'<b>' + params.name + '</b>'
+ ': $'
+ (params.value).toLocaleString('en-US',
{maximumFractionDigits: 2, minimumFractionDigits: 2})
)}
"))
})
output$overviewProfitByCategory <- renderEcharts4r({
df %>%
mutate(Returned = replace_na(as.character(Returned), "No")) %>%
filter(Returned == "No") %>%
group_by(`Sub-Category`) %>%
summarise(Profit = sum(Profit)) %>%
arrange(-Profit) %>%
head(5) %>%
e_charts(`Sub-Category`) %>%
e_bar(Profit) %>%
e_flip_coords() %>%
e_y_axis(inverse = TRUE) %>%
e_theme_custom("www/Chart_Theme.json") %>%
e_title(text = "Most Profitable Sub-category",
left = "center",
top = "0") %>%
e_legend(show = FALSE) %>%
e_axis_labels(x = "Profit") %>%
e_x_axis(name = "Profit",
nameLocation = "center",
nameGap = "25",
formatter = e_axis_formatter(style = c("currency"), currency = "USD")) %>%
e_tooltip(trigger = "item",
formatter = JS("
function(params){return(
'<b>' + params.name + '</b>'
+ ' : $'
+ params.value[0]
)}
"))
})
output$overviewProfitMissed <- renderEcharts4r({
df %>%
filter(Returned == "Yes") %>%
group_by(`Sub-Category`) %>%
summarise(`Missed Profit` = sum(Profit)) %>%
arrange(-`Missed Profit`) %>%
head(5) %>%
e_charts(`Sub-Category`) %>%
e_bar(`Missed Profit`) %>%
e_theme_custom("www/Chart_Theme.json") %>%
e_legend(show = FALSE) %>%
e_flip_coords() %>%
e_y_axis(inverse = TRUE) %>%
e_x_axis(name = "Missed Profit",
nameLocation = "center",
nameGap = "25",
formatter = e_axis_formatter(style = "currency", currency = "USD")) %>%
e_title(text = "Most Missed Profit by Sub-category (Returned)",
left = "center",
top = "0") %>%
e_tooltip(trigger = "item",
formatter = JS("
function(params){
return('<b>' + params.name + ':' + '</b>' + ' $' +
params.value[0])
}"))
})
# OVERVIEW TAB - END ---------------------------------------------------------
# MAP TAB - START ------------------------------------------------------------
output$salesMap <- renderEcharts4r({
df_map <-
df %>%
mutate(Year = year(`Order Date`)) %>%
filter(Category == input$categorySelector,
Year == input$yearSelector)
if (input$valueSelector == "Profit") {
plot_map <-
df_map %>%
group_by(Country) %>%
summarise(Total = sum(Profit)) %>%
e_charts(Country) %>%
e_map(Total) %>%
e_visual_map(Total) %>%
e_theme_custom("www/Chart_Theme.json") %>%
e_tooltip(trigger = "item",
formatter = JS("
function(params){return(
'<b>' + params.name
+ ':' + '</b>'
+ ' $' +
(params.value).toLocaleString('en-US',
{maximumFractionDigits: 2, minimumFractionDigits: 2 })
)}
"))
}
else if (input$valueSelector == "Sales") {
plot_map <-
df_map %>%
group_by(Country) %>%
summarise(Total = sum(Sales)) %>%
e_charts(Country) %>%
e_map(Total) %>%
e_visual_map(Total) %>%
e_theme_custom("www/Chart_Theme.json") %>%
e_tooltip(trigger = "item",
formatter = JS("
function(params){return(
'<b>' + params.name
+ ':' + '</b>'
+ ' $'
+ (params.value).toLocaleString('en-US',
{maximumFractionDigits: 2, minimumFractionDigits: 2 })
)}
"))
}
else {
plot_map <-
df_map %>%
group_by(Country) %>%
summarise(Total = length(Sales)) %>%
e_charts(Country) %>%
e_map(Total) %>%
e_visual_map(Total) %>%
e_theme_custom("www/Chart_Theme.json") %>%
e_tooltip(trigger = "item",
formatter = JS("
function(params){return(
'<b>' + params.name
+ ': ' + '</b>'
+ params.value
+ ' transactions'
)}
"))
}
plot_map
})
# MAP TAB - END --------------------------------------------------------------
# COUNTRY TAB - START --------------------------------------------------------
output$countrySales <- renderEcharts4r({
df %>%
filter(Country == input$countrySelector) %>%
filter(Mon.Year >= input$dateSelector[1] & Mon.Year <= input$dateSelector[2]) %>%
filter(Category %in% input$categoryCheckSelector) %>%
group_by(Segment, Mon.Year) %>%
summarise(Sales = sum(Sales)) %>%
group_by(Segment) %>%
e_charts(Mon.Year) %>%
e_line(Sales) %>%
e_theme_custom("www/Chart_Theme.json") %>%
e_title(text = "Value of Sales for Each Segment",
top = "0",
left = "center") %>%
e_legend(top = "30") %>%
e_y_axis(formatter = e_axis_formatter(style = "currency", currency = "USD")) %>%
e_axis_labels(y = "Value of Sales") %>%
e_mark_point(data = list(type = "max"),
title = "Max") %>%
e_mark_point(data = list(type = "min"),
title = "Min") %>%
e_tooltip(
trigger = "item",
formatter = JS("
function(params) {return(
'<b>' + params.value[0] + '</b>'
+ ': $'
+ params.value[1].toLocaleString('en-US',
{maximumFractionDigits: 2, minimumFractionDigits: 2 })
)}
")
)
})
output$countryProfit <- renderEcharts4r({
df %>%
filter(Country == input$countrySelector) %>%
filter(Mon.Year >= input$dateSelector[1] & Mon.Year <= input$dateSelector[2]) %>%
filter(Category %in% input$categoryCheckSelector) %>%
group_by(Segment, Mon.Year) %>%
summarise(Profit = sum(Profit)) %>%
group_by(Segment) %>%
e_charts(Mon.Year) %>%
e_line(Profit) %>%
e_theme_custom("www/Chart_Theme.json") %>%
e_title(text = "Profit for Each Segment",
top = "0",
left = "center") %>%
e_legend(top = "30") %>%
e_y_axis(formatter = e_axis_formatter(style = "currency", currency = "USD")) %>%
e_axis_labels(y = "Profit") %>%
e_mark_point(data = list(type = "max"),
title = "Max") %>%
e_mark_point(data = list(type = "min"),
title = "Min") %>%
e_tooltip(
trigger = "item",
formatter = JS("
function(params) {return(
'<b>' + params.value[0] + '</b>'
+ ': $'
+ params.value[1].toLocaleString('en-US',
{maximumFractionDigits: 2, minimumFractionDigits: 2 })
)}
")
)
})
output$topSubcategory <- renderEcharts4r({
df %>%
filter(Country == input$countrySelector) %>%
filter(Mon.Year >= input$dateSelector[1] & Mon.Year <= input$dateSelector[2]) %>%
filter(Category %in% input$categoryCheckSelector) %>%
group_by(`Sub-Category`) %>%
summarise(Total = sum(`Sales`)) %>%
arrange(Total) %>%
head(5) %>%
e_charts(`Sub-Category`) %>%
e_bar(Total) %>%
e_flip_coords() %>%
# e_y_axis(inverse = TRUE) %>%
e_theme_custom("www/Chart_Theme.json") %>%
e_legend(show = FALSE) %>%
e_title("Most Profitable Sub-category",
top = "0",
left = "center") %>%
e_x_axis(name = "Profit",
nameLocation = "center",
nameGap = "25",
formatter = e_axis_formatter(style = "currency", currency = "USD")) %>%
e_tooltip(trigger = "item",
formatter = JS("
function(params){return(
params.value[1] + ' : '
+params.value[0]
)}
"))
})
output$shippingStats <- renderEcharts4r({
df %>%
filter(Country == input$countrySelector) %>%
filter(Mon.Year >= input$dateSelector[1] & Mon.Year <= input$dateSelector[2]) %>%
filter(Category %in% input$categoryCheckSelector) %>%
group_by(Segment, `Ship Mode`) %>%
summarise(Total = length(`Ship Mode`)) %>%
arrange(Total) %>%
group_by(Segment) %>%
e_charts(`Ship Mode`) %>%
e_bar(Total, stack = "stack") %>%
e_flip_coords() %>%
e_theme_custom("www/Chart_Theme.json") %>%
e_title(text = "Ship Mode by Segment",
top = "0",
left = "center") %>%
e_legend(top = "30") %>%
e_axis(name = "Number of Shipping (shipments)",
nameLocation = "center",
nameGap = "25") %>%
e_tooltip(trigger = "item",
formatter = JS("
function(params){return(
'<b>' + params.value[1] + '</b>'
+ ' : '
+ params.value[0]
+ ' shipments'
)}
"))
})
# COUNTRY TAB - END ----------------------------------------------------------
}
|
/Data_Visualization/server.R
|
no_license
|
kularudi/Capstone_Algoritma_Academy
|
R
| false
| false
| 17,760
|
r
|
server <- function(input, output) {
# SPARK VALUE BOX THEME ------------------------------------------------------
valueBoxSpark <-
function(value,
title,
sparkobj = NULL,
subtitle,
info = NULL,
icon = NULL,
color = "aqua",
width = 12,
href = NULL){
shinydashboard:::validateColor(color)
if (!is.null(icon))
shinydashboard:::tagAssert(icon, type = "i")
info_icon <- tags$small(
tags$i(
class = "fa fa-info-circle fa-lg",
title = info,
`data-toggle` = "tooltip",
style = "color: rgba(255, 255, 255, 0.75);"
),
# bs3 pull-right
# bs4 float-right
class = "pull-right float-right"
)
boxContent <- div(
class = paste0("small-box bg-", color),
div(
class = "inner",
tags$small(title),
if (!is.null(sparkobj)) info_icon,
h3(value),
if (!is.null(sparkobj)) sparkobj,
p(subtitle)
),
# bs3 icon-large
# bs4 icon
if (!is.null(icon)) div(class = "icon-large icon", icon, style = "z-index; 0")
)
if (!is.null(href))
boxContent <- a(href = href, boxContent)
div(
class = if (!is.null(width)) paste0("col-sm-", width),
boxContent
)
}
# OVERVIEW TAB - START -------------------------------------------------------
output$overviewSalesGrowth <- renderValueBox({
salesGrowth <-
df %>%
group_by(Year) %>%
summarise(`Total Sales` = sum(Sales)) %>%
mutate_each(funs(factor(.)), c("Year")) %>%
mutate(GrowthValue = `Total Sales` - lag(`Total Sales`),
GrowthPerc = (`Total Sales` - lag(`Total Sales`)) / `Total Sales`) %>%
mutate(GrowthValue = replace_na(GrowthValue, 0),
GrowthPerc = replace_na(GrowthPerc, 0))
hcSalesGrowth <-
salesGrowth %>%
hchart("area", hcaes(x = Year, y = GrowthValue), name = "Sales Growth") %>%
hc_size(height = 50) %>%
hc_credits(enabled = F) %>%
hc_tooltip(enabled = F) %>%
hc_add_theme(hc_theme_sparkline_vb())
vbSalesGrowth <- valueBoxSpark(
value = dollar(mean(salesGrowth$GrowthValue), prefix = "$", big.mark = ",", decimal.mark = ".", accuracy = .01),
title = toupper("AVERAGE SALES GROWTH"),
sparkobj = hcSalesGrowth,
info = "This is the sales growth from the first day until today",
subtitle = tagList("Growth per year ",
HTML("↑"),
percent(mean(salesGrowth$GrowthPerc),
decimal.mark = ".",
accuracy = .01)),
icon = icon("money-bill-wave"),
color = "teal",
href = NULL
)
vbSalesGrowth
})
output$overviewProfitGrowth <- renderValueBox({
profitGrowth <-
df %>%
group_by(Year) %>%
summarise(`Total Profit` = sum(Profit)) %>%
mutate_each(funs(factor(.)), c("Year")) %>%
mutate(GrowthValue = `Total Profit` - lag(`Total Profit`),
GrowthPerc = (`Total Profit` - lag(`Total Profit`)) / `Total Profit`) %>%
mutate(GrowthValue = replace_na(GrowthValue, 0),
GrowthPerc = replace_na(GrowthPerc, 0))
hcProfitGrowth <-
profitGrowth %>%
hchart("area", hcaes(x = Year, y = GrowthValue), name = "Profit Growth") %>%
hc_size(height = 50) %>%
hc_credits(enabled = F) %>%
hc_tooltip(enabled = F) %>%
hc_add_theme(hc_theme_sparkline_vb())
vbProfitGrowth <-
valueBoxSpark(
value = dollar(mean(profitGrowth$GrowthValue), prefix = "$", big.mark = ",", decimal.mark = ".", accuracy = .01),
title = toupper("AVERAGE PROFIT GROWTH"),
sparkobj = hcProfitGrowth,
info = "This is the profit growth from the first day until today",
subtitle = tagList("Growth per year ",
HTML("↑"),
percent(mean(profitGrowth$GrowthPerc),
decimal.mark = ".",
accuracy = .01),
),
icon = icon("hand-holding-usd"),
color = "teal",
href = NULL
)
vbProfitGrowth
})
output$overviewSalesSeason <- renderValueBox({
salesSeason <-
df %>%
mutate(Month = month(`Order Date`, label = T, abbr = F)) %>%
group_by(Year, Month) %>%
summarise(Sales.Number = length(Sales)) %>%
group_by(Month) %>%
mutate(Sales.Number = mean(Sales.Number))
peakSeason <- salesSeason[salesSeason$Sales.Number == max(salesSeason$Sales.Number), ]
peakSeason2 <-
df %>%
mutate(Month = month(`Order Date`, label = T, abbr = F)) %>%
filter(Month == peakSeason$Month[1]) %>%
group_by(Year, Month) %>%
summarise(Sales.Number = length(Sales))
hcSalesSeason <-
peakSeason2 %>%
hchart(
"area",
hcaes(x = Year, y = Sales.Number)) %>%
hc_size(height = 50) %>%
hc_tooltip(enabled = F) %>%
hc_add_theme(hc_theme_sparkline_vb())
vbSalesSeason <-
valueBoxSpark(
value = peakSeason$Month[1],
title = toupper("PEAK SALES MONTH"),
sparkobj = hcSalesSeason,
info = "Graph showing number of time on peak month each year",
subtitle = paste("Avg. sales on peak month :",
number(peakSeason$Sales.Number[1],
big.mark = ","),
"transaction"),
icon = icon("calendar-alt"),
color = "teal",
href = NULL
)
vbSalesSeason
})
output$overviewProfitByMarket <- renderEcharts4r({
df %>%
group_by(Market) %>%
summarise(Profit = sum(Profit)) %>%
arrange(Profit) %>%
e_chart(Market) %>%
e_pie(Profit, radius = c ("50%", "75%")) %>%
e_theme_custom("www/Chart_Theme.json") %>%
e_title(text = "Profit by Market",
left = "center",
top = "0") %>%
e_legend(F) %>%
e_tooltip(trigger = "item",
formatter = JS("
function(params){return(
'<b>' + params.name + '</b>'
+ ' : $'
+ (params.value).toLocaleString('en-US',
{maximumFractionDigits : 2, minimumFractionDigits: 2})
)}
"))
})
output$overviewProfitBySegment <- renderEcharts4r({
df %>%
group_by(Segment) %>%
summarise(Profit = sum(Profit)) %>%
arrange(Profit) %>%
e_chart(Segment) %>%
e_pie(Profit, radius = c ("50%", "75%")) %>%
e_theme_custom("www/Chart_Theme.json") %>%
e_title(text = "Profit by Segment",
left = "center",
top = "0") %>%
e_legend(F) %>%
e_tooltip(trigger = "item",
formatter = JS("
function(params){return(
'<b>' + params.name + '</b>'
+ ': $'
+ (params.value).toLocaleString('en-US',
{maximumFractionDigits: 2, minimumFractionDigits: 2})
)}
"))
})
output$overviewProfitByCategory <- renderEcharts4r({
df %>%
mutate(Returned = replace_na(as.character(Returned), "No")) %>%
filter(Returned == "No") %>%
group_by(`Sub-Category`) %>%
summarise(Profit = sum(Profit)) %>%
arrange(-Profit) %>%
head(5) %>%
e_charts(`Sub-Category`) %>%
e_bar(Profit) %>%
e_flip_coords() %>%
e_y_axis(inverse = TRUE) %>%
e_theme_custom("www/Chart_Theme.json") %>%
e_title(text = "Most Profitable Sub-category",
left = "center",
top = "0") %>%
e_legend(show = FALSE) %>%
e_axis_labels(x = "Profit") %>%
e_x_axis(name = "Profit",
nameLocation = "center",
nameGap = "25",
formatter = e_axis_formatter(style = c("currency"), currency = "USD")) %>%
e_tooltip(trigger = "item",
formatter = JS("
function(params){return(
'<b>' + params.name + '</b>'
+ ' : $'
+ params.value[0]
)}
"))
})
output$overviewProfitMissed <- renderEcharts4r({
df %>%
filter(Returned == "Yes") %>%
group_by(`Sub-Category`) %>%
summarise(`Missed Profit` = sum(Profit)) %>%
arrange(-`Missed Profit`) %>%
head(5) %>%
e_charts(`Sub-Category`) %>%
e_bar(`Missed Profit`) %>%
e_theme_custom("www/Chart_Theme.json") %>%
e_legend(show = FALSE) %>%
e_flip_coords() %>%
e_y_axis(inverse = TRUE) %>%
e_x_axis(name = "Missed Profit",
nameLocation = "center",
nameGap = "25",
formatter = e_axis_formatter(style = "currency", currency = "USD")) %>%
e_title(text = "Most Missed Profit by Sub-category (Returned)",
left = "center",
top = "0") %>%
e_tooltip(trigger = "item",
formatter = JS("
function(params){
return('<b>' + params.name + ':' + '</b>' + ' $' +
params.value[0])
}"))
})
# OVERVIEW TAB - END ---------------------------------------------------------
# MAP TAB - START ------------------------------------------------------------
output$salesMap <- renderEcharts4r({
df_map <-
df %>%
mutate(Year = year(`Order Date`)) %>%
filter(Category == input$categorySelector,
Year == input$yearSelector)
if (input$valueSelector == "Profit") {
plot_map <-
df_map %>%
group_by(Country) %>%
summarise(Total = sum(Profit)) %>%
e_charts(Country) %>%
e_map(Total) %>%
e_visual_map(Total) %>%
e_theme_custom("www/Chart_Theme.json") %>%
e_tooltip(trigger = "item",
formatter = JS("
function(params){return(
'<b>' + params.name
+ ':' + '</b>'
+ ' $' +
(params.value).toLocaleString('en-US',
{maximumFractionDigits: 2, minimumFractionDigits: 2 })
)}
"))
}
else if (input$valueSelector == "Sales") {
plot_map <-
df_map %>%
group_by(Country) %>%
summarise(Total = sum(Sales)) %>%
e_charts(Country) %>%
e_map(Total) %>%
e_visual_map(Total) %>%
e_theme_custom("www/Chart_Theme.json") %>%
e_tooltip(trigger = "item",
formatter = JS("
function(params){return(
'<b>' + params.name
+ ':' + '</b>'
+ ' $'
+ (params.value).toLocaleString('en-US',
{maximumFractionDigits: 2, minimumFractionDigits: 2 })
)}
"))
}
else {
plot_map <-
df_map %>%
group_by(Country) %>%
summarise(Total = length(Sales)) %>%
e_charts(Country) %>%
e_map(Total) %>%
e_visual_map(Total) %>%
e_theme_custom("www/Chart_Theme.json") %>%
e_tooltip(trigger = "item",
formatter = JS("
function(params){return(
'<b>' + params.name
+ ': ' + '</b>'
+ params.value
+ ' transactions'
)}
"))
}
plot_map
})
# MAP TAB - END --------------------------------------------------------------
# COUNTRY TAB - START --------------------------------------------------------
output$countrySales <- renderEcharts4r({
df %>%
filter(Country == input$countrySelector) %>%
filter(Mon.Year >= input$dateSelector[1] & Mon.Year <= input$dateSelector[2]) %>%
filter(Category %in% input$categoryCheckSelector) %>%
group_by(Segment, Mon.Year) %>%
summarise(Sales = sum(Sales)) %>%
group_by(Segment) %>%
e_charts(Mon.Year) %>%
e_line(Sales) %>%
e_theme_custom("www/Chart_Theme.json") %>%
e_title(text = "Value of Sales for Each Segment",
top = "0",
left = "center") %>%
e_legend(top = "30") %>%
e_y_axis(formatter = e_axis_formatter(style = "currency", currency = "USD")) %>%
e_axis_labels(y = "Value of Sales") %>%
e_mark_point(data = list(type = "max"),
title = "Max") %>%
e_mark_point(data = list(type = "min"),
title = "Min") %>%
e_tooltip(
trigger = "item",
formatter = JS("
function(params) {return(
'<b>' + params.value[0] + '</b>'
+ ': $'
+ params.value[1].toLocaleString('en-US',
{maximumFractionDigits: 2, minimumFractionDigits: 2 })
)}
")
)
})
output$countryProfit <- renderEcharts4r({
df %>%
filter(Country == input$countrySelector) %>%
filter(Mon.Year >= input$dateSelector[1] & Mon.Year <= input$dateSelector[2]) %>%
filter(Category %in% input$categoryCheckSelector) %>%
group_by(Segment, Mon.Year) %>%
summarise(Profit = sum(Profit)) %>%
group_by(Segment) %>%
e_charts(Mon.Year) %>%
e_line(Profit) %>%
e_theme_custom("www/Chart_Theme.json") %>%
e_title(text = "Profit for Each Segment",
top = "0",
left = "center") %>%
e_legend(top = "30") %>%
e_y_axis(formatter = e_axis_formatter(style = "currency", currency = "USD")) %>%
e_axis_labels(y = "Profit") %>%
e_mark_point(data = list(type = "max"),
title = "Max") %>%
e_mark_point(data = list(type = "min"),
title = "Min") %>%
e_tooltip(
trigger = "item",
formatter = JS("
function(params) {return(
'<b>' + params.value[0] + '</b>'
+ ': $'
+ params.value[1].toLocaleString('en-US',
{maximumFractionDigits: 2, minimumFractionDigits: 2 })
)}
")
)
})
output$topSubcategory <- renderEcharts4r({
df %>%
filter(Country == input$countrySelector) %>%
filter(Mon.Year >= input$dateSelector[1] & Mon.Year <= input$dateSelector[2]) %>%
filter(Category %in% input$categoryCheckSelector) %>%
group_by(`Sub-Category`) %>%
summarise(Total = sum(`Sales`)) %>%
arrange(Total) %>%
head(5) %>%
e_charts(`Sub-Category`) %>%
e_bar(Total) %>%
e_flip_coords() %>%
# e_y_axis(inverse = TRUE) %>%
e_theme_custom("www/Chart_Theme.json") %>%
e_legend(show = FALSE) %>%
e_title("Most Profitable Sub-category",
top = "0",
left = "center") %>%
e_x_axis(name = "Profit",
nameLocation = "center",
nameGap = "25",
formatter = e_axis_formatter(style = "currency", currency = "USD")) %>%
e_tooltip(trigger = "item",
formatter = JS("
function(params){return(
params.value[1] + ' : '
+params.value[0]
)}
"))
})
output$shippingStats <- renderEcharts4r({
df %>%
filter(Country == input$countrySelector) %>%
filter(Mon.Year >= input$dateSelector[1] & Mon.Year <= input$dateSelector[2]) %>%
filter(Category %in% input$categoryCheckSelector) %>%
group_by(Segment, `Ship Mode`) %>%
summarise(Total = length(`Ship Mode`)) %>%
arrange(Total) %>%
group_by(Segment) %>%
e_charts(`Ship Mode`) %>%
e_bar(Total, stack = "stack") %>%
e_flip_coords() %>%
e_theme_custom("www/Chart_Theme.json") %>%
e_title(text = "Ship Mode by Segment",
top = "0",
left = "center") %>%
e_legend(top = "30") %>%
e_axis(name = "Number of Shipping (shipments)",
nameLocation = "center",
nameGap = "25") %>%
e_tooltip(trigger = "item",
formatter = JS("
function(params){return(
'<b>' + params.value[1] + '</b>'
+ ' : '
+ params.value[0]
+ ' shipments'
)}
"))
})
# COUNTRY TAB - END ----------------------------------------------------------
}
|
# K-Means Clustering
# Importing the dataset
dataset = read.csv('parkinsons.csv')
dataset = dataset[,-1]
dataset = dataset[,-17]
# Splitting the dataset into the Training set and Test set
# install.packages('caTools')
# library(caTools)
# set.seed(123)
# split = sample.split(dataset$DependentVariable, SplitRatio = 0.8)
# training_set = subset(dataset, split == TRUE)
# test_set = subset(dataset, split == FALSE)
# Feature Scaling
# training_set = scale(training_set)
# test_set = scale(test_set)
# Using the elbow method to find the optimal number of clusters
# Fitting K-Means to the dataset
set.seed(29)
kmeans = kmeans(x = dataset, centers = 2)
y_kmeans = kmeans$cluster
#Print to file
sink("output_parkinsons.txt")
print(y_kmeans)
sink()
# Visualising the clusters
|
/R-kodovi/parkinsons.R
|
no_license
|
pimaja/Ant-Colony-Optimization-Clustering
|
R
| false
| false
| 808
|
r
|
# K-Means Clustering
# Importing the dataset
dataset = read.csv('parkinsons.csv')
dataset = dataset[,-1]
dataset = dataset[,-17]
# Splitting the dataset into the Training set and Test set
# install.packages('caTools')
# library(caTools)
# set.seed(123)
# split = sample.split(dataset$DependentVariable, SplitRatio = 0.8)
# training_set = subset(dataset, split == TRUE)
# test_set = subset(dataset, split == FALSE)
# Feature Scaling
# training_set = scale(training_set)
# test_set = scale(test_set)
# Using the elbow method to find the optimal number of clusters
# Fitting K-Means to the dataset
set.seed(29)
kmeans = kmeans(x = dataset, centers = 2)
y_kmeans = kmeans$cluster
#Print to file
sink("output_parkinsons.txt")
print(y_kmeans)
sink()
# Visualising the clusters
|
/Step1/2_40_cells_01_Lecture_des_donnees_+_Normalisation.R
|
no_license
|
HKeyHKey/Rodriguez-Martinez2017
|
R
| false
| false
| 9,783
|
r
| ||
#Zero-integral basis
ZsplineBasis = function(knots,k)
{
library(fda)
r = length(knots)
lambda_index = c(0:(r-1))
g = lambda_index[length(lambda_index) - 1]
N = g+(k-1)+1
lambda = c(rep(min(knots),k-1),knots,rep(max(knots),k-1))
div = seq(min(lambda), max(lambda), length = 1000)
# standard B-spline basis; collocation matrix := C
splajn.basis = create.bspline.basis(range(knots),nbasis = N , norder = k, breaks = knots)
C = eval.basis(div, splajn.basis)
# Matrix D
differ = lambda[(1+k):(r+2*(k-1))] - lambda[(1:(r+k-2))]
D = (k)*diag(1/differ)
# Matrix L
L = array(0, c(N,N-1))
L[1,1]=1
L[N,N-1]=-1
for (j in (2:(N-1))){
L[j,j-1] = (-1)
L[j,j] = 1
}
# Spline0 basis: collocation matrix C0
C0 = C%*%D%*%L
# Matrix M - function for computing integral
SLP=function(step, c){
integral = step*(0.5*c[1]+sum(c[2:(length(c)-1)]) +0.5*c[length(c)])
return (integral)
}
division = seq(min(lambda), max(lambda), length = 10000);step=diff(div[1:2])
CC = eval.basis(division, splajn.basis)
CC0 = CC%*%D%*%L
M=array(0, c(N-1,N-1))
for (i in 1:(N-1)){
for (j in 1:(N-1)){
non_null = c()
product = CC0[,i]*CC0[,j]
for (m in 1:length(div)){
if (product[m] != 0) {non_null[m] = product[m]}
}
M[i,j]=SLP(step, product)
}
}
return(list(C0 = C0, M0 = M, K = L, D = D))
}
#ZsplineBasis(knots,k)
|
/SFPCA/NulBaze.R
|
no_license
|
AMenafoglio/BayesSpaces-codes
|
R
| false
| false
| 1,496
|
r
|
#Zero-integral basis
ZsplineBasis = function(knots,k)
{
library(fda)
r = length(knots)
lambda_index = c(0:(r-1))
g = lambda_index[length(lambda_index) - 1]
N = g+(k-1)+1
lambda = c(rep(min(knots),k-1),knots,rep(max(knots),k-1))
div = seq(min(lambda), max(lambda), length = 1000)
# standard B-spline basis; collocation matrix := C
splajn.basis = create.bspline.basis(range(knots),nbasis = N , norder = k, breaks = knots)
C = eval.basis(div, splajn.basis)
# Matrix D
differ = lambda[(1+k):(r+2*(k-1))] - lambda[(1:(r+k-2))]
D = (k)*diag(1/differ)
# Matrix L
L = array(0, c(N,N-1))
L[1,1]=1
L[N,N-1]=-1
for (j in (2:(N-1))){
L[j,j-1] = (-1)
L[j,j] = 1
}
# Spline0 basis: collocation matrix C0
C0 = C%*%D%*%L
# Matrix M - function for computing integral
SLP=function(step, c){
integral = step*(0.5*c[1]+sum(c[2:(length(c)-1)]) +0.5*c[length(c)])
return (integral)
}
division = seq(min(lambda), max(lambda), length = 10000);step=diff(div[1:2])
CC = eval.basis(division, splajn.basis)
CC0 = CC%*%D%*%L
M=array(0, c(N-1,N-1))
for (i in 1:(N-1)){
for (j in 1:(N-1)){
non_null = c()
product = CC0[,i]*CC0[,j]
for (m in 1:length(div)){
if (product[m] != 0) {non_null[m] = product[m]}
}
M[i,j]=SLP(step, product)
}
}
return(list(C0 = C0, M0 = M, K = L, D = D))
}
#ZsplineBasis(knots,k)
|
### ----------------------------------------------------------------------------
### This is the set of simulations related to the Athens-Clarke county primary
### service area.
### Counties are limited to Clarke, Oconee, Barrow, Madison, Jackson, Oglethorpe
### Georgia-specific simulations have been updated with starting values based
### on what has been seen in the ACC area. This also includes the start date
### of the epidemic for this area and the date of intervention, selected to be
### the shelter in home order from the ACC government.
### ----------------------------------------------------------------------------
rm(list = ls())
source("Code/model_fncs.R")
library(ggplot2)
### Read and Format Athens Cases Data ------------------------------------------
acc_df <- read.csv("Data/ACC Healthcare Region Simulation - Case Counts by County GA.csv")
# Primary service area
# Clarke, Oconee, Jackson, Madison, Oglethorpe, Barrow
# Not sure if Barrow actually counts
# Jackson and Oglethorpe not in dataset
acc_df[is.na(acc_df)] <- 0
acc_df$primary <- acc_df$Clarke + acc_df$Oconee + acc_df$Barrow + acc_df$Madison
acc_df$primary_cum <- cumsum(acc_df$primary)
names(acc_df)[1] <- "Date"
acc_df$Date <- as.Date(acc_df$Date)
# Currently cut off for 3/24
acc_df <- acc_df[1:which(acc_df$Date == as.character(Sys.Date())), ]
# Plot of daily Athens cases
ggplot(data = acc_df, mapping = aes(x = Date, y = primary)) +
geom_bar(stat = "identity") +
scale_x_date(breaks = function(x) seq.Date(from = min(x)+2,
to = max(x),
by = "3 days"), date_labels = "%b %d")+
# minor_breaks = function(x) seq.Date(from = min(x),
# to = max(x),
# by = "2 years")) +
labs(x = "Day",
y = "New Cases (Primary Service Area)") +
theme_classic() +
theme(axis.text.x = element_text(angle = 45, hjust = 1))
# Plot of cumulative Athens cases
ggplot(data = acc_df, mapping = aes(x = Date, y = primary_cum)) +
geom_bar(stat = "identity") +
scale_x_date(breaks = function(x) seq.Date(from = min(x)+2,
to = max(x),
by = "3 days"), date_labels = "%b %d")+
labs(x = "Day",
y = "Cumulative Cases (Primary Service Area)") +
theme_classic() +
theme(axis.text.x = element_text(angle = 45, hjust = 1))
### Read and Format Athens Scenarios -------------------------------------------
# Most difficult part is setting the initial numbers of E and I in the model
# Based on the Georgia work of Handel and Drake
# * Intermediate: 136 cases (15 early cases / 0.11); see Georgia model assumptions
# * Upper Bound: hard, not date of death in ACC area
# * will use GA scaling, 2*136 = 272
# * not using GA scaling, step up one day and go with intermediate method
# * then 24 early cases / 0.11 = 218
# * Lower Bound: hard, not really to two-ish week mark since first case
# * will use GA scaling, 1/8 * 136 = 17
# * not using GA scaling, step back one day and go with intermediate method
# * then 9 early cases / 0.11 = 82
scenarios <- read.csv("Data/athens_scenarios.csv")
# Only the first 8 are currently setup
# scenarios <- scenarios[c(1:8, 15), ]
# move columns 11 and 12 to the end
scenarios <- scenarios[, c(1:10, 13:31, 11, 12)]
### Natural Epidemic (Scenario 6) ----------------------------------------------
# ACC-area cases began on 3/14/20
# Major intervention 3/20/20 with shelter in place, etc. from government
# Intervention days then are 6 (w)
scen_row <- 6
# If nationally was 3/12/20 then this is prior to ACC outbreak so z = 0
gamma <- function(z = scenarios[scen_row, "z"], b=scenarios[scen_row, "b"], a0=scenarios[scen_row, "a0"], t){
# piecewise function
# default parameters z = 12, b=1/7, a0=1/1.5
# z: time at start of intervention (notionally March 12)
# b: intercept (positive)
# a0: post intervention isolation ratae
# t: time in the model
gamma <- ifelse(t<=z, gamma <- b, gamma <- a0)
return(gamma)
}
eta <- function(t, w = scenarios[scen_row, "w"]) ifelse(t<=w, 1/3, 1/3)
q <- function(t, w = scenarios[scen_row, "w"], q0=scenarios[scen_row, "q0"], q1=scenarios[scen_row, "q1"]) ifelse(t<=w, q0, q1)
beta <- function(t, w = scenarios[scen_row, "w"], beta0=scenarios[scen_row, "beta0"], beta.factor=2) {
ifelse(t<=w, beta0, beta0 / beta.factor)
}
# First ACC-area case on 3/14/20
start = as.Date("2020-03-14")
s <- scenarios[,3:31]
i <- scen_row
out6 <- evaluate.model(params=list(beta0=s[i,1], sigma=s[i,2], z=s[i,3], b=s[i,4], a0=s[i,5], w=s[i,6], presymptomatic=s[i,8], c=s[i,7], dt=s[i,9]),
init = list(S=s[i,10], E1=s[i,11], E2=s[i,12], E3=s[i,13], E4=s[i,14], E5=s[i,15], E6=s[i,16],
I1 = s[i,17], I2 = s[i,18], I3 = s[i,19], I4 = s[i,20], Iu1=s[i,21], Iu2=s[i,22], Iu3=s[i,23], Iu4=s[i,24],
H=s[i,25], Ru=s[i,26], C=s[i,27]),
nsims=15, nstep=NULL, start=start)
plot.model.acc(out6, acc_df$Date, acc_df$primary,
log='y', title='Benchmark: Natural epidemic')
### Baseline Model (Scenario 7) ------------------------------------------------
# out.base <- evaluate.model(params=list(beta0=0.6584, sigma=1/6.4, z=0, b=0.143, a0=1/1.5, w=100, c=1, presymptomatic=1, dt=0.05),
# init = list(S=447451, E1=ei, E2=ei, E3=ei, E4=ei, E5=ei, E6=ei,
# I1 = ii, I2= ii, I3=ii, I4=ii, Iu1=0, Iu2=0, Iu3=0, Iu4=0,
# H=0, Ru=0, C=0),
# nsims=15, nstep=NULL, start=as.Date("2020-03-01"))
scen_row <- 7
# If nationally was 3/12/20 then this is prior to ACC outbreak so z = 0
gamma <- function(z = scenarios[scen_row, "z"], b=scenarios[scen_row, "b"], a0=scenarios[scen_row, "a0"], t){
# piecewise function
# default parameters z = 12, b=1/7, a0=1/1.5
# z: time at start of intervention (notionally March 12)
# b: intercept (positive)
# a0: post intervention isolation ratae
# t: time in the model
gamma <- ifelse(t<=z, gamma <- b, gamma <- a0)
return(gamma)
}
eta <- function(t, w = scenarios[scen_row, "w"]) ifelse(t<=w, 1/3, 1/3)
q <- function(t, w = scenarios[scen_row, "w"], q0=scenarios[scen_row, "q0"], q1=scenarios[scen_row, "q1"]) ifelse(t<=w, q0, q1)
beta <- function(t, w = scenarios[scen_row, "w"], beta0=scenarios[scen_row, "beta0"], beta.factor=2) {
ifelse(t<=w, beta0, beta0 / beta.factor)
}
s <- scenarios[,3:31]
i <- scen_row
out7 <- evaluate.model(params=list(beta0=s[i,1], sigma=s[i,2], z=s[i,3], b=s[i,4], a0=s[i,5], w=s[i,6], presymptomatic=s[i,8], c=s[i,7], dt=s[i,9]),
init = list(S=s[i,10], E1=s[i,11], E2=s[i,12], E3=s[i,13], E4=s[i,14], E5=s[i,15], E6=s[i,16],
I1 = s[i,17], I2 = s[i,18], I3 = s[i,19], I4 = s[i,20], Iu1=s[i,21], Iu2=s[i,22], Iu3=s[i,23], Iu4=s[i,24],
H=s[i,25], Ru=s[i,26], C=s[i,27]),
nsims=15, nstep=NULL, start=start)
plot.model.acc(out7,acc_df$Date, acc_df$primary,
log='y', title='Benchmark: Baseline')
### Social Distancing Intervention (Scenario 8) --------------------------------
scen_row <- 8
# If nationally was 3/12/20 then this is prior to ACC outbreak so z = 0
gamma <- function(z = scenarios[scen_row, "z"], b=scenarios[scen_row, "b"], a0=scenarios[scen_row, "a0"], t){
# piecewise function
# default parameters z = 12, b=1/7, a0=1/1.5
# z: time at start of intervention (notionally March 12)
# b: intercept (positive)
# a0: post intervention isolation ratae
# t: time in the model
gamma <- ifelse(t<=z, gamma <- b, gamma <- a0)
return(gamma)
}
eta <- function(t, w = scenarios[scen_row, "w"]) ifelse(t<=w, 1/3, 1/3)
q <- function(t, w = scenarios[scen_row, "w"], q0=scenarios[scen_row, "q0"], q1=scenarios[scen_row, "q1"]) ifelse(t<=w, q0, q1)
beta <- function(t, w = scenarios[scen_row, "w"], beta0=scenarios[scen_row, "beta0"], beta.factor=2) {
ifelse(t<=w, beta0, beta0 / beta.factor)
}
s <- scenarios[,3:31]
i <- scen_row
out8 <- evaluate.model(params=list(beta0=s[i,1], sigma=s[i,2], z=s[i,3], b=s[i,4], a0=s[i,5], w=s[i,6], presymptomatic=s[i,8], c=s[i,7], dt=s[i,9]),
init = list(S=s[i,10], E1=s[i,11], E2=s[i,12], E3=s[i,13], E4=s[i,14], E5=s[i,15], E6=s[i,16],
I1 = s[i,17], I2 = s[i,18], I3 = s[i,19], I4 = s[i,20], Iu1=s[i,21], Iu2=s[i,22], Iu3=s[i,23], Iu4=s[i,24],
H=s[i,25], Ru=s[i,26], C=s[i,27]),
nsims=15, nstep=NULL, start=start)
plot.model.acc(out8, acc_df$Date, acc_df$primary,
log='y', title='Athens Primary Service Area with Social Distancing Intervention')
### Smaller and Larger Starting Sizes (Scenarios 3 and 5) ----------------------
# Smaller first
scen_row <- 3
# If nationally was 3/12/20 then this is prior to ACC outbreak so z = 0
gamma <- function(z = scenarios[scen_row, "z"], b=scenarios[scen_row, "b"], a0=scenarios[scen_row, "a0"], t){
# piecewise function
# default parameters z = 12, b=1/7, a0=1/1.5
# z: time at start of intervention (notionally March 12)
# b: intercept (positive)
# a0: post intervention isolation ratae
# t: time in the model
gamma <- ifelse(t<=z, gamma <- b, gamma <- a0)
return(gamma)
}
eta <- function(t, w = scenarios[scen_row, "w"]) ifelse(t<=w, 1/3, 1/3)
q <- function(t, w = scenarios[scen_row, "w"], q0=scenarios[scen_row, "q0"], q1=scenarios[scen_row, "q1"]) ifelse(t<=w, q0, q1)
beta <- function(t, w = scenarios[scen_row, "w"], beta0=scenarios[scen_row, "beta0"], beta.factor=2) {
ifelse(t<=w, beta0, beta0 / beta.factor)
}
s <- scenarios[,3:31]
i <- scen_row
out3 <- evaluate.model(params=list(beta0=s[i,1], sigma=s[i,2], z=s[i,3], b=s[i,4], a0=s[i,5], w=s[i,6], presymptomatic=s[i,8], c=s[i,7], dt=s[i,9]),
init = list(S=s[i,10], E1=s[i,11], E2=s[i,12], E3=s[i,13], E4=s[i,14], E5=s[i,15], E6=s[i,16],
I1 = s[i,17], I2 = s[i,18], I3 = s[i,19], I4 = s[i,20], Iu1=s[i,21], Iu2=s[i,22], Iu3=s[i,23], Iu4=s[i,24],
H=s[i,25], Ru=s[i,26], C=s[i,27]),
nsims=15, nstep=NULL, start=start)
plot.model.acc(out3, acc_df$Date, acc_df$primary,
log='y', title='Athens Primary Service Area with Social Distancing Intervention (lower bound)')
# Bigger next
scen_row <- 5
# Too stringent, earlier assumptions likely make more sense
# If nationally was 3/12/20 then this is prior to ACC outbreak so z = 0
gamma <- function(z = scenarios[scen_row, "z"], b=scenarios[scen_row, "b"], a0=scenarios[scen_row, "a0"], t){
# piecewise function
# default parameters z = 12, b=1/7, a0=1/1.5
# z: time at start of intervention (notionally March 12)
# b: intercept (positive)
# a0: post intervention isolation ratae
# t: time in the model
gamma <- ifelse(t<=z, gamma <- b, gamma <- a0)
return(gamma)
}
eta <- function(t, w = scenarios[scen_row, "w"]) ifelse(t<=w, 1/3, 1/3)
q <- function(t, w = scenarios[scen_row, "w"], q0=scenarios[scen_row, "q0"], q1=scenarios[scen_row, "q1"]) ifelse(t<=w, q0, q1)
beta <- function(t, w = scenarios[scen_row, "w"], beta0=scenarios[scen_row, "beta0"], beta.factor=2) {
ifelse(t<=w, beta0, beta0 / beta.factor)
}
s <- scenarios[,3:31]
i <- scen_row
out5 <- evaluate.model(params=list(beta0=s[i,1], sigma=s[i,2], z=s[i,3], b=s[i,4], a0=s[i,5], w=s[i,6], presymptomatic=s[i,8], c=s[i,7], dt=s[i,9]),
init = list(S=s[i,10], E1=s[i,11], E2=s[i,12], E3=s[i,13], E4=s[i,14], E5=s[i,15], E6=s[i,16],
I1 = s[i,17], I2 = s[i,18], I3 = s[i,19], I4 = s[i,20], Iu1=s[i,21], Iu2=s[i,22], Iu3=s[i,23], Iu4=s[i,24],
H=s[i,25], Ru=s[i,26], C=s[i,27]),
nsims=15, nstep=NULL, start=start)
plot.model.acc(out5, acc_df$Date, acc_df$primary,
log='y', title='Athens Primary Service Area with Social Distancing Intervention (upper bound)')
# No presymptomatic here, that seems to make a difference, doesn't behave well
# Adding presymptomatic == 1, seems to make sense, creates a definite upper bound
# in cumulative reported cases
### Both Immedidate Interventions (Scenario 15) --------------------------------
scen_row <- 15
# Too stringent, earlier assumptions likely make more sense
# If nationally was 3/12/20 then this is prior to ACC outbreak so z = 0
gamma <- function(z = scenarios[scen_row, "z"], b=scenarios[scen_row, "b"], a0=scenarios[scen_row, "a0"], t){
# piecewise function
# default parameters z = 12, b=1/7, a0=1/1.5
# z: time at start of intervention (notionally March 12)
# b: intercept (positive)
# a0: post intervention isolation ratae
# t: time in the model
gamma <- ifelse(t<=z, gamma <- b, gamma <- a0)
return(gamma)
}
eta <- function(t, w = scenarios[scen_row, "w"]) ifelse(t<=w, 1/3, 1/3)
q <- function(t, w = scenarios[scen_row, "w"], q0=scenarios[scen_row, "q0"], q1=scenarios[scen_row, "q1"]) ifelse(t<=w, q0, q1)
beta <- function(t, w = scenarios[scen_row, "w"], beta0=scenarios[scen_row, "beta0"], beta.factor=2) {
ifelse(t<=w, beta0, beta0 / beta.factor)
}
s <- scenarios[,3:31]
i <- scen_row
out15 <- evaluate.model(params=list(beta0=s[i,1], sigma=s[i,2], z=s[i,3], b=s[i,4], a0=s[i,5], w=s[i,6], presymptomatic=s[i,8], c=s[i,7], dt=s[i,9]),
init = list(S=s[i,10], E1=s[i,11], E2=s[i,12], E3=s[i,13], E4=s[i,14], E5=s[i,15], E6=s[i,16],
I1 = s[i,17], I2 = s[i,18], I3 = s[i,19], I4 = s[i,20], Iu1=s[i,21], Iu2=s[i,22], Iu3=s[i,23], Iu4=s[i,24],
H=s[i,25], Ru=s[i,26], C=s[i,27]),
nsims=15, nstep=NULL, start=start)
plot.model.acc(out15, acc_df$Date, acc_df$primary,
log='y', title='Both early interventions')
# Not sure this makes sense.
### Current "Most Likely" Georgia Scenario 9 -----------------------------------
scen_row <- 10
# Too stringent, earlier assumptions likely make more sense
# If nationally was 3/12/20 then this is prior to ACC outbreak so z = 0
gamma <- function(z = scenarios[scen_row, "z"], b=scenarios[scen_row, "b"], a0=scenarios[scen_row, "a0"], t){
# piecewise function
# default parameters z = 12, b=1/7, a0=1/1.5
# z: time at start of intervention (notionally March 12)
# b: intercept (positive)
# a0: post intervention isolation ratae
# t: time in the model
gamma <- ifelse(t<=z, gamma <- b, gamma <- a0)
return(gamma)
}
eta <- function(t, w = scenarios[scen_row, "w"]) ifelse(t<=w, 1/3, 1/3)
q <- function(t, w = scenarios[scen_row, "w"], q0=scenarios[scen_row, "q0"], q1=scenarios[scen_row, "q1"]) ifelse(t<=w, q0, q1)
beta <- function(t, w = scenarios[scen_row, "w"], beta0=scenarios[scen_row, "beta0"], beta.factor=2) {
ifelse(t<=w, beta0, beta0 / beta.factor)
}
s <- scenarios[,3:31]
i <- scen_row
out10 <- evaluate.model(params=list(beta0=s[i,1], sigma=s[i,2], z=s[i,3], b=s[i,4], a0=s[i,5], w=s[i,6], presymptomatic=s[i,8], c=s[i,7], dt=s[i,9]),
init = list(S=s[i,10], E1=s[i,11], E2=s[i,12], E3=s[i,13], E4=s[i,14], E5=s[i,15], E6=s[i,16],
I1 = s[i,17], I2 = s[i,18], I3 = s[i,19], I4 = s[i,20], Iu1=s[i,21], Iu2=s[i,22], Iu3=s[i,23], Iu4=s[i,24],
H=s[i,25], Ru=s[i,26], C=s[i,27]),
nsims=15, nstep=NULL, start=start)
plot.model.acc(out10, acc_df$Date, acc_df$primary,
log='y', title='Most likely scenario, more early cases but improved case ascertainment')
|
/Colquitt_and_surrounding/Code/athens_simulations_primary.R
|
no_license
|
ingels11/COVID19_ACC_Model
|
R
| false
| false
| 16,054
|
r
|
### ----------------------------------------------------------------------------
### This is the set of simulations related to the Athens-Clarke county primary
### service area.
### Counties are limited to Clarke, Oconee, Barrow, Madison, Jackson, Oglethorpe
### Georgia-specific simulations have been updated with starting values based
### on what has been seen in the ACC area. This also includes the start date
### of the epidemic for this area and the date of intervention, selected to be
### the shelter in home order from the ACC government.
### ----------------------------------------------------------------------------
rm(list = ls())
source("Code/model_fncs.R")
library(ggplot2)
### Read and Format Athens Cases Data ------------------------------------------
acc_df <- read.csv("Data/ACC Healthcare Region Simulation - Case Counts by County GA.csv")
# Primary service area
# Clarke, Oconee, Jackson, Madison, Oglethorpe, Barrow
# Not sure if Barrow actually counts
# Jackson and Oglethorpe not in dataset
acc_df[is.na(acc_df)] <- 0
acc_df$primary <- acc_df$Clarke + acc_df$Oconee + acc_df$Barrow + acc_df$Madison
acc_df$primary_cum <- cumsum(acc_df$primary)
names(acc_df)[1] <- "Date"
acc_df$Date <- as.Date(acc_df$Date)
# Currently cut off for 3/24
acc_df <- acc_df[1:which(acc_df$Date == as.character(Sys.Date())), ]
# Plot of daily Athens cases
ggplot(data = acc_df, mapping = aes(x = Date, y = primary)) +
geom_bar(stat = "identity") +
scale_x_date(breaks = function(x) seq.Date(from = min(x)+2,
to = max(x),
by = "3 days"), date_labels = "%b %d")+
# minor_breaks = function(x) seq.Date(from = min(x),
# to = max(x),
# by = "2 years")) +
labs(x = "Day",
y = "New Cases (Primary Service Area)") +
theme_classic() +
theme(axis.text.x = element_text(angle = 45, hjust = 1))
# Plot of cumulative Athens cases
ggplot(data = acc_df, mapping = aes(x = Date, y = primary_cum)) +
geom_bar(stat = "identity") +
scale_x_date(breaks = function(x) seq.Date(from = min(x)+2,
to = max(x),
by = "3 days"), date_labels = "%b %d")+
labs(x = "Day",
y = "Cumulative Cases (Primary Service Area)") +
theme_classic() +
theme(axis.text.x = element_text(angle = 45, hjust = 1))
### Read and Format Athens Scenarios -------------------------------------------
# Most difficult part is setting the initial numbers of E and I in the model
# Based on the Georgia work of Handel and Drake
# * Intermediate: 136 cases (15 early cases / 0.11); see Georgia model assumptions
# * Upper Bound: hard, not date of death in ACC area
# * will use GA scaling, 2*136 = 272
# * not using GA scaling, step up one day and go with intermediate method
# * then 24 early cases / 0.11 = 218
# * Lower Bound: hard, not really to two-ish week mark since first case
# * will use GA scaling, 1/8 * 136 = 17
# * not using GA scaling, step back one day and go with intermediate method
# * then 9 early cases / 0.11 = 82
scenarios <- read.csv("Data/athens_scenarios.csv")
# Only the first 8 are currently setup
# scenarios <- scenarios[c(1:8, 15), ]
# move columns 11 and 12 to the end
scenarios <- scenarios[, c(1:10, 13:31, 11, 12)]
### Natural Epidemic (Scenario 6) ----------------------------------------------
# ACC-area cases began on 3/14/20
# Major intervention 3/20/20 with shelter in place, etc. from government
# Intervention days then are 6 (w)
scen_row <- 6
# If nationally was 3/12/20 then this is prior to ACC outbreak so z = 0
gamma <- function(z = scenarios[scen_row, "z"], b=scenarios[scen_row, "b"], a0=scenarios[scen_row, "a0"], t){
# piecewise function
# default parameters z = 12, b=1/7, a0=1/1.5
# z: time at start of intervention (notionally March 12)
# b: intercept (positive)
# a0: post intervention isolation ratae
# t: time in the model
gamma <- ifelse(t<=z, gamma <- b, gamma <- a0)
return(gamma)
}
eta <- function(t, w = scenarios[scen_row, "w"]) ifelse(t<=w, 1/3, 1/3)
q <- function(t, w = scenarios[scen_row, "w"], q0=scenarios[scen_row, "q0"], q1=scenarios[scen_row, "q1"]) ifelse(t<=w, q0, q1)
beta <- function(t, w = scenarios[scen_row, "w"], beta0=scenarios[scen_row, "beta0"], beta.factor=2) {
ifelse(t<=w, beta0, beta0 / beta.factor)
}
# First ACC-area case on 3/14/20
start = as.Date("2020-03-14")
s <- scenarios[,3:31]
i <- scen_row
out6 <- evaluate.model(params=list(beta0=s[i,1], sigma=s[i,2], z=s[i,3], b=s[i,4], a0=s[i,5], w=s[i,6], presymptomatic=s[i,8], c=s[i,7], dt=s[i,9]),
init = list(S=s[i,10], E1=s[i,11], E2=s[i,12], E3=s[i,13], E4=s[i,14], E5=s[i,15], E6=s[i,16],
I1 = s[i,17], I2 = s[i,18], I3 = s[i,19], I4 = s[i,20], Iu1=s[i,21], Iu2=s[i,22], Iu3=s[i,23], Iu4=s[i,24],
H=s[i,25], Ru=s[i,26], C=s[i,27]),
nsims=15, nstep=NULL, start=start)
plot.model.acc(out6, acc_df$Date, acc_df$primary,
log='y', title='Benchmark: Natural epidemic')
### Baseline Model (Scenario 7) ------------------------------------------------
# out.base <- evaluate.model(params=list(beta0=0.6584, sigma=1/6.4, z=0, b=0.143, a0=1/1.5, w=100, c=1, presymptomatic=1, dt=0.05),
# init = list(S=447451, E1=ei, E2=ei, E3=ei, E4=ei, E5=ei, E6=ei,
# I1 = ii, I2= ii, I3=ii, I4=ii, Iu1=0, Iu2=0, Iu3=0, Iu4=0,
# H=0, Ru=0, C=0),
# nsims=15, nstep=NULL, start=as.Date("2020-03-01"))
scen_row <- 7
# If nationally was 3/12/20 then this is prior to ACC outbreak so z = 0
gamma <- function(z = scenarios[scen_row, "z"], b=scenarios[scen_row, "b"], a0=scenarios[scen_row, "a0"], t){
# piecewise function
# default parameters z = 12, b=1/7, a0=1/1.5
# z: time at start of intervention (notionally March 12)
# b: intercept (positive)
# a0: post intervention isolation ratae
# t: time in the model
gamma <- ifelse(t<=z, gamma <- b, gamma <- a0)
return(gamma)
}
eta <- function(t, w = scenarios[scen_row, "w"]) ifelse(t<=w, 1/3, 1/3)
q <- function(t, w = scenarios[scen_row, "w"], q0=scenarios[scen_row, "q0"], q1=scenarios[scen_row, "q1"]) ifelse(t<=w, q0, q1)
beta <- function(t, w = scenarios[scen_row, "w"], beta0=scenarios[scen_row, "beta0"], beta.factor=2) {
ifelse(t<=w, beta0, beta0 / beta.factor)
}
s <- scenarios[,3:31]
i <- scen_row
out7 <- evaluate.model(params=list(beta0=s[i,1], sigma=s[i,2], z=s[i,3], b=s[i,4], a0=s[i,5], w=s[i,6], presymptomatic=s[i,8], c=s[i,7], dt=s[i,9]),
init = list(S=s[i,10], E1=s[i,11], E2=s[i,12], E3=s[i,13], E4=s[i,14], E5=s[i,15], E6=s[i,16],
I1 = s[i,17], I2 = s[i,18], I3 = s[i,19], I4 = s[i,20], Iu1=s[i,21], Iu2=s[i,22], Iu3=s[i,23], Iu4=s[i,24],
H=s[i,25], Ru=s[i,26], C=s[i,27]),
nsims=15, nstep=NULL, start=start)
plot.model.acc(out7,acc_df$Date, acc_df$primary,
log='y', title='Benchmark: Baseline')
### Social Distancing Intervention (Scenario 8) --------------------------------
scen_row <- 8
# If nationally was 3/12/20 then this is prior to ACC outbreak so z = 0
gamma <- function(z = scenarios[scen_row, "z"], b=scenarios[scen_row, "b"], a0=scenarios[scen_row, "a0"], t){
# piecewise function
# default parameters z = 12, b=1/7, a0=1/1.5
# z: time at start of intervention (notionally March 12)
# b: intercept (positive)
# a0: post intervention isolation ratae
# t: time in the model
gamma <- ifelse(t<=z, gamma <- b, gamma <- a0)
return(gamma)
}
eta <- function(t, w = scenarios[scen_row, "w"]) ifelse(t<=w, 1/3, 1/3)
q <- function(t, w = scenarios[scen_row, "w"], q0=scenarios[scen_row, "q0"], q1=scenarios[scen_row, "q1"]) ifelse(t<=w, q0, q1)
beta <- function(t, w = scenarios[scen_row, "w"], beta0=scenarios[scen_row, "beta0"], beta.factor=2) {
ifelse(t<=w, beta0, beta0 / beta.factor)
}
s <- scenarios[,3:31]
i <- scen_row
out8 <- evaluate.model(params=list(beta0=s[i,1], sigma=s[i,2], z=s[i,3], b=s[i,4], a0=s[i,5], w=s[i,6], presymptomatic=s[i,8], c=s[i,7], dt=s[i,9]),
init = list(S=s[i,10], E1=s[i,11], E2=s[i,12], E3=s[i,13], E4=s[i,14], E5=s[i,15], E6=s[i,16],
I1 = s[i,17], I2 = s[i,18], I3 = s[i,19], I4 = s[i,20], Iu1=s[i,21], Iu2=s[i,22], Iu3=s[i,23], Iu4=s[i,24],
H=s[i,25], Ru=s[i,26], C=s[i,27]),
nsims=15, nstep=NULL, start=start)
plot.model.acc(out8, acc_df$Date, acc_df$primary,
log='y', title='Athens Primary Service Area with Social Distancing Intervention')
### Smaller and Larger Starting Sizes (Scenarios 3 and 5) ----------------------
# Smaller first
scen_row <- 3
# If nationally was 3/12/20 then this is prior to ACC outbreak so z = 0
gamma <- function(z = scenarios[scen_row, "z"], b=scenarios[scen_row, "b"], a0=scenarios[scen_row, "a0"], t){
# piecewise function
# default parameters z = 12, b=1/7, a0=1/1.5
# z: time at start of intervention (notionally March 12)
# b: intercept (positive)
# a0: post intervention isolation ratae
# t: time in the model
gamma <- ifelse(t<=z, gamma <- b, gamma <- a0)
return(gamma)
}
eta <- function(t, w = scenarios[scen_row, "w"]) ifelse(t<=w, 1/3, 1/3)
q <- function(t, w = scenarios[scen_row, "w"], q0=scenarios[scen_row, "q0"], q1=scenarios[scen_row, "q1"]) ifelse(t<=w, q0, q1)
beta <- function(t, w = scenarios[scen_row, "w"], beta0=scenarios[scen_row, "beta0"], beta.factor=2) {
ifelse(t<=w, beta0, beta0 / beta.factor)
}
s <- scenarios[,3:31]
i <- scen_row
out3 <- evaluate.model(params=list(beta0=s[i,1], sigma=s[i,2], z=s[i,3], b=s[i,4], a0=s[i,5], w=s[i,6], presymptomatic=s[i,8], c=s[i,7], dt=s[i,9]),
init = list(S=s[i,10], E1=s[i,11], E2=s[i,12], E3=s[i,13], E4=s[i,14], E5=s[i,15], E6=s[i,16],
I1 = s[i,17], I2 = s[i,18], I3 = s[i,19], I4 = s[i,20], Iu1=s[i,21], Iu2=s[i,22], Iu3=s[i,23], Iu4=s[i,24],
H=s[i,25], Ru=s[i,26], C=s[i,27]),
nsims=15, nstep=NULL, start=start)
plot.model.acc(out3, acc_df$Date, acc_df$primary,
log='y', title='Athens Primary Service Area with Social Distancing Intervention (lower bound)')
# Bigger next
scen_row <- 5
# Too stringent, earlier assumptions likely make more sense
# If nationally was 3/12/20 then this is prior to ACC outbreak so z = 0
gamma <- function(z = scenarios[scen_row, "z"], b=scenarios[scen_row, "b"], a0=scenarios[scen_row, "a0"], t){
# piecewise function
# default parameters z = 12, b=1/7, a0=1/1.5
# z: time at start of intervention (notionally March 12)
# b: intercept (positive)
# a0: post intervention isolation ratae
# t: time in the model
gamma <- ifelse(t<=z, gamma <- b, gamma <- a0)
return(gamma)
}
eta <- function(t, w = scenarios[scen_row, "w"]) ifelse(t<=w, 1/3, 1/3)
q <- function(t, w = scenarios[scen_row, "w"], q0=scenarios[scen_row, "q0"], q1=scenarios[scen_row, "q1"]) ifelse(t<=w, q0, q1)
beta <- function(t, w = scenarios[scen_row, "w"], beta0=scenarios[scen_row, "beta0"], beta.factor=2) {
ifelse(t<=w, beta0, beta0 / beta.factor)
}
s <- scenarios[,3:31]
i <- scen_row
out5 <- evaluate.model(params=list(beta0=s[i,1], sigma=s[i,2], z=s[i,3], b=s[i,4], a0=s[i,5], w=s[i,6], presymptomatic=s[i,8], c=s[i,7], dt=s[i,9]),
init = list(S=s[i,10], E1=s[i,11], E2=s[i,12], E3=s[i,13], E4=s[i,14], E5=s[i,15], E6=s[i,16],
I1 = s[i,17], I2 = s[i,18], I3 = s[i,19], I4 = s[i,20], Iu1=s[i,21], Iu2=s[i,22], Iu3=s[i,23], Iu4=s[i,24],
H=s[i,25], Ru=s[i,26], C=s[i,27]),
nsims=15, nstep=NULL, start=start)
plot.model.acc(out5, acc_df$Date, acc_df$primary,
log='y', title='Athens Primary Service Area with Social Distancing Intervention (upper bound)')
# No presymptomatic here, that seems to make a difference, doesn't behave well
# Adding presymptomatic == 1, seems to make sense, creates a definite upper bound
# in cumulative reported cases
### Both Immedidate Interventions (Scenario 15) --------------------------------
scen_row <- 15
# Too stringent, earlier assumptions likely make more sense
# If nationally was 3/12/20 then this is prior to ACC outbreak so z = 0
gamma <- function(z = scenarios[scen_row, "z"], b=scenarios[scen_row, "b"], a0=scenarios[scen_row, "a0"], t){
# piecewise function
# default parameters z = 12, b=1/7, a0=1/1.5
# z: time at start of intervention (notionally March 12)
# b: intercept (positive)
# a0: post intervention isolation ratae
# t: time in the model
gamma <- ifelse(t<=z, gamma <- b, gamma <- a0)
return(gamma)
}
eta <- function(t, w = scenarios[scen_row, "w"]) ifelse(t<=w, 1/3, 1/3)
q <- function(t, w = scenarios[scen_row, "w"], q0=scenarios[scen_row, "q0"], q1=scenarios[scen_row, "q1"]) ifelse(t<=w, q0, q1)
beta <- function(t, w = scenarios[scen_row, "w"], beta0=scenarios[scen_row, "beta0"], beta.factor=2) {
ifelse(t<=w, beta0, beta0 / beta.factor)
}
s <- scenarios[,3:31]
i <- scen_row
out15 <- evaluate.model(params=list(beta0=s[i,1], sigma=s[i,2], z=s[i,3], b=s[i,4], a0=s[i,5], w=s[i,6], presymptomatic=s[i,8], c=s[i,7], dt=s[i,9]),
init = list(S=s[i,10], E1=s[i,11], E2=s[i,12], E3=s[i,13], E4=s[i,14], E5=s[i,15], E6=s[i,16],
I1 = s[i,17], I2 = s[i,18], I3 = s[i,19], I4 = s[i,20], Iu1=s[i,21], Iu2=s[i,22], Iu3=s[i,23], Iu4=s[i,24],
H=s[i,25], Ru=s[i,26], C=s[i,27]),
nsims=15, nstep=NULL, start=start)
plot.model.acc(out15, acc_df$Date, acc_df$primary,
log='y', title='Both early interventions')
# Not sure this makes sense.
### Current "Most Likely" Georgia Scenario 9 -----------------------------------
scen_row <- 10
# Too stringent, earlier assumptions likely make more sense
# If nationally was 3/12/20 then this is prior to ACC outbreak so z = 0
gamma <- function(z = scenarios[scen_row, "z"], b=scenarios[scen_row, "b"], a0=scenarios[scen_row, "a0"], t){
# piecewise function
# default parameters z = 12, b=1/7, a0=1/1.5
# z: time at start of intervention (notionally March 12)
# b: intercept (positive)
# a0: post intervention isolation ratae
# t: time in the model
gamma <- ifelse(t<=z, gamma <- b, gamma <- a0)
return(gamma)
}
eta <- function(t, w = scenarios[scen_row, "w"]) ifelse(t<=w, 1/3, 1/3)
q <- function(t, w = scenarios[scen_row, "w"], q0=scenarios[scen_row, "q0"], q1=scenarios[scen_row, "q1"]) ifelse(t<=w, q0, q1)
beta <- function(t, w = scenarios[scen_row, "w"], beta0=scenarios[scen_row, "beta0"], beta.factor=2) {
ifelse(t<=w, beta0, beta0 / beta.factor)
}
s <- scenarios[,3:31]
i <- scen_row
out10 <- evaluate.model(params=list(beta0=s[i,1], sigma=s[i,2], z=s[i,3], b=s[i,4], a0=s[i,5], w=s[i,6], presymptomatic=s[i,8], c=s[i,7], dt=s[i,9]),
init = list(S=s[i,10], E1=s[i,11], E2=s[i,12], E3=s[i,13], E4=s[i,14], E5=s[i,15], E6=s[i,16],
I1 = s[i,17], I2 = s[i,18], I3 = s[i,19], I4 = s[i,20], Iu1=s[i,21], Iu2=s[i,22], Iu3=s[i,23], Iu4=s[i,24],
H=s[i,25], Ru=s[i,26], C=s[i,27]),
nsims=15, nstep=NULL, start=start)
plot.model.acc(out10, acc_df$Date, acc_df$primary,
log='y', title='Most likely scenario, more early cases but improved case ascertainment')
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/experiments.R
\name{ga_experiment}
\alias{ga_experiment}
\title{Experiments Meta data}
\usage{
ga_experiment(accountId, webPropertyId, profileId, experimentId)
}
\arguments{
\item{accountId}{Account Id}
\item{webPropertyId}{Web Property Id}
\item{profileId}{Profile Id}
\item{experimentId}{Experiment Id}
}
\value{
Experiment Meta Data
}
\description{
Experiments Meta data
}
\seealso{
Other managementAPI functions: \code{\link{ga_accounts}},
\code{\link{ga_adwords_list}}, \code{\link{ga_adwords}},
\code{\link{ga_custom_vars_list}},
\code{\link{ga_custom_vars}},
\code{\link{ga_experiment_list}},
\code{\link{ga_filter_list}},
\code{\link{ga_filter_view_list}},
\code{\link{ga_filter_view}}, \code{\link{ga_filter}},
\code{\link{ga_goal_list}}, \code{\link{ga_goal}},
\code{\link{ga_segment_list}},
\code{\link{ga_unsampled_list}},
\code{\link{ga_unsampled}}, \code{\link{ga_view_list}},
\code{\link{ga_view}}, \code{\link{ga_webproperty_list}},
\code{\link{ga_webproperty}},
\code{\link{google_analytics_account_list}}
}
|
/man/ga_experiment.Rd
|
no_license
|
Cecile31/googleAnalyticsR
|
R
| false
| true
| 1,135
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/experiments.R
\name{ga_experiment}
\alias{ga_experiment}
\title{Experiments Meta data}
\usage{
ga_experiment(accountId, webPropertyId, profileId, experimentId)
}
\arguments{
\item{accountId}{Account Id}
\item{webPropertyId}{Web Property Id}
\item{profileId}{Profile Id}
\item{experimentId}{Experiment Id}
}
\value{
Experiment Meta Data
}
\description{
Experiments Meta data
}
\seealso{
Other managementAPI functions: \code{\link{ga_accounts}},
\code{\link{ga_adwords_list}}, \code{\link{ga_adwords}},
\code{\link{ga_custom_vars_list}},
\code{\link{ga_custom_vars}},
\code{\link{ga_experiment_list}},
\code{\link{ga_filter_list}},
\code{\link{ga_filter_view_list}},
\code{\link{ga_filter_view}}, \code{\link{ga_filter}},
\code{\link{ga_goal_list}}, \code{\link{ga_goal}},
\code{\link{ga_segment_list}},
\code{\link{ga_unsampled_list}},
\code{\link{ga_unsampled}}, \code{\link{ga_view_list}},
\code{\link{ga_view}}, \code{\link{ga_webproperty_list}},
\code{\link{ga_webproperty}},
\code{\link{google_analytics_account_list}}
}
|
#Henry Text Analytics with R
# https://github.com/datasciencedojo/IntroToTextAnalyticsWithR
#Reference: https://www.youtube.com/watch?v=4vuw0AsHeGw
# rehttps://github.com/datasciencedojo/IntroToTextAnalyticsWithR
a=1
b=2
c=1
x<-runif(10, min=1, max=3)
hist(x)
install.packages("e1071")
library(e1071)
kurtosis(x)
xn<-rnorm(10000000,3,2)
hist(xn)
kurtosis(xn)
qqplot(xn)
xnsample<-rnorm(200,3,2)
qqplot(xn,xnsample)
qqplot
qqnorm(xnsample)
install.packages("ExtDist")
xl<-rLaplace(1000,3,2)
|
/first_script.R
|
no_license
|
hmendozar17/TextAnalyticsWithR
|
R
| false
| false
| 495
|
r
|
#Henry Text Analytics with R
# https://github.com/datasciencedojo/IntroToTextAnalyticsWithR
#Reference: https://www.youtube.com/watch?v=4vuw0AsHeGw
# rehttps://github.com/datasciencedojo/IntroToTextAnalyticsWithR
a=1
b=2
c=1
x<-runif(10, min=1, max=3)
hist(x)
install.packages("e1071")
library(e1071)
kurtosis(x)
xn<-rnorm(10000000,3,2)
hist(xn)
kurtosis(xn)
qqplot(xn)
xnsample<-rnorm(200,3,2)
qqplot(xn,xnsample)
qqplot
qqnorm(xnsample)
install.packages("ExtDist")
xl<-rLaplace(1000,3,2)
|
#Quantitative Biology
#Measures of Biodiversity
#Author: Benjamin Gazeau
#Date: 02 July 2021
# Load Packages
library(vegan)
library(ggplot2)
library(BiodiversityR)
library(betapart)
library(dplyr)
# Read Data
spp <- read.csv("C:/Users/benji/Desktop/Stats (2021)/Quantitative_Ecology-main/exercises/diversity/SeaweedsSpp.csv")
spp <- select(spp, -1) #remove X column
# Whittaker's Beta-diversity
# 1) True Beta-diversity
# Gamma-diversity / Alpha-diversity
true_beta <- data.frame(
beta = ncol(spp) / specnumber(spp, MARGIN = 1),
section_no = c(1:58)
)
# Plot true beta
ggplot(data = true_beta, (aes(x = section_no, y = beta))) +
geom_line() + xlab("Coastal section, west to east") + ylab("True beta-diversity")
# 2) Absolute Species Turnover
# Gamma-diversity - Alpha-diversity
abs_beta <- data.frame(
beta = ncol(spp) - specnumber(spp, MARGIN = 1),
section_no = c(1:58)
)
# Plot absolute species turnover
ggplot(data = abs_beta, (aes(x = section_no, y = beta))) +
geom_line() + xlab("Coastal section, west to east") + ylab("Absolute beta-diversity")
# Contemporary definitions of Beta-diversity ------------------------------
# 1. Species Turnover (beta-sim)
# Same alpha-diversity between sections, but different species makeup
# Hence, this beta-diversity refers to processes that cause communities to differ
# due to species being lost/gained from section to section without corresponding
# changes in alpha-diversity
# 2. Nestedness-Resultant Beta-Diversity (beta-sne)
# Two places share same species, number of species can differ amongst quadrants
# Nestedness-resultant beta-diversity refers to processes that cause species to be
# gained or lost, and the community with the lowest alpha-diversity is a subset of
# the richer community
# Calculating Turnover and Nestedness-Resultant --------------------------
# Decompose total Sørensen dissimilarity into turnover and nestedness-resultant components:
Y.core <- betapart.core(spp)
Y.pair <- beta.pair(Y.core, index.family = "sor")
# Let Y1 be the turnover component (beta-sim):
Y1 <- as.matrix(Y.pair$beta.sim)
# Let Y2 be the nestedness-resultant component (beta-sne):
Y2 <- as.matrix(Y.pair$beta.sne)
round(Y1[1:10, 1:10], 4) # Matrix of beta-sim
round(Y2[1:10, 1:10], 4) # Matrix of beta-sne
# QUESTIONS ---------------------------------------------------------------
# 1. Plot species turnover as a function of Section number, and provide a mechanistic exaplanation for the pattern observed.
y1_plot <- as.data.frame(Y1)
ggplot(y1_plot, aes(x = 1:58, y = y1_plot[,1])) +
geom_line() +
labs(x = "Coastal Section, West to East", y = "Species Turnover (beta-sim)")
# The graph shows an increasing trend as we move from 1 to 58, meaning there are
# different species compositions as we move from site 1 on the west coast to site
# 58 on the east coast. The species composition along the shore is constantly changing,
# and the species composition of site 1 is completely different from that of site 58.
# As we calculated beta-sim, while the makeup of species changes, the species richness does not.
# 2. Based on an assessment of literature on the topic, provide a discussion of nestedness-resultant
# β-diversity. Use either a marine or terrestrial example to explain this mode of structuring biodiversity.
# Biotas from sites with lower species richness are subsets of biotas from sites with higher species richness,
# resulting in nestedness of species assemblages. This represents the non-random process of species extinction
# at different locations (Baselga 2010). In the simplest terms, this is demonstrated by the presence of 12
# species at Site A, 4 species (of the original 12) at Site B, and just 2 species at Site C. There is no
# turnover in species, but a difference in richness, where the composition of Site C is a subset of Site A.
|
/Species Dissimilarities .R
|
no_license
|
BenjaminGazeau/Quantitative-Ecology
|
R
| false
| false
| 3,868
|
r
|
#Quantitative Biology
#Measures of Biodiversity
#Author: Benjamin Gazeau
#Date: 02 July 2021
# Load Packages
library(vegan)
library(ggplot2)
library(BiodiversityR)
library(betapart)
library(dplyr)
# Read Data
spp <- read.csv("C:/Users/benji/Desktop/Stats (2021)/Quantitative_Ecology-main/exercises/diversity/SeaweedsSpp.csv")
spp <- select(spp, -1) #remove X column
# Whittaker's Beta-diversity
# 1) True Beta-diversity
# Gamma-diversity / Alpha-diversity
true_beta <- data.frame(
beta = ncol(spp) / specnumber(spp, MARGIN = 1),
section_no = c(1:58)
)
# Plot true beta
ggplot(data = true_beta, (aes(x = section_no, y = beta))) +
geom_line() + xlab("Coastal section, west to east") + ylab("True beta-diversity")
# 2) Absolute Species Turnover
# Gamma-diversity - Alpha-diversity
abs_beta <- data.frame(
beta = ncol(spp) - specnumber(spp, MARGIN = 1),
section_no = c(1:58)
)
# Plot absolute species turnover
ggplot(data = abs_beta, (aes(x = section_no, y = beta))) +
geom_line() + xlab("Coastal section, west to east") + ylab("Absolute beta-diversity")
# Contemporary definitions of Beta-diversity ------------------------------
# 1. Species Turnover (beta-sim)
# Same alpha-diversity between sections, but different species makeup
# Hence, this beta-diversity refers to processes that cause communities to differ
# due to species being lost/gained from section to section without corresponding
# changes in alpha-diversity
# 2. Nestedness-Resultant Beta-Diversity (beta-sne)
# Two places share same species, number of species can differ amongst quadrants
# Nestedness-resultant beta-diversity refers to processes that cause species to be
# gained or lost, and the community with the lowest alpha-diversity is a subset of
# the richer community
# Calculating Turnover and Nestedness-Resultant --------------------------
# Decompose total Sørensen dissimilarity into turnover and nestedness-resultant components:
Y.core <- betapart.core(spp)
Y.pair <- beta.pair(Y.core, index.family = "sor")
# Let Y1 be the turnover component (beta-sim):
Y1 <- as.matrix(Y.pair$beta.sim)
# Let Y2 be the nestedness-resultant component (beta-sne):
Y2 <- as.matrix(Y.pair$beta.sne)
round(Y1[1:10, 1:10], 4) # Matrix of beta-sim
round(Y2[1:10, 1:10], 4) # Matrix of beta-sne
# QUESTIONS ---------------------------------------------------------------
# 1. Plot species turnover as a function of Section number, and provide a mechanistic exaplanation for the pattern observed.
y1_plot <- as.data.frame(Y1)
ggplot(y1_plot, aes(x = 1:58, y = y1_plot[,1])) +
geom_line() +
labs(x = "Coastal Section, West to East", y = "Species Turnover (beta-sim)")
# The graph shows an increasing trend as we move from 1 to 58, meaning there are
# different species compositions as we move from site 1 on the west coast to site
# 58 on the east coast. The species composition along the shore is constantly changing,
# and the species composition of site 1 is completely different from that of site 58.
# As we calculated beta-sim, while the makeup of species changes, the species richness does not.
# 2. Based on an assessment of literature on the topic, provide a discussion of nestedness-resultant
# β-diversity. Use either a marine or terrestrial example to explain this mode of structuring biodiversity.
# Biotas from sites with lower species richness are subsets of biotas from sites with higher species richness,
# resulting in nestedness of species assemblages. This represents the non-random process of species extinction
# at different locations (Baselga 2010). In the simplest terms, this is demonstrated by the presence of 12
# species at Site A, 4 species (of the original 12) at Site B, and just 2 species at Site C. There is no
# turnover in species, but a difference in richness, where the composition of Site C is a subset of Site A.
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utilities.R
\name{extract_area}
\alias{extract_area}
\title{Helper function to extract the area slots from
\code{\link[sp]{SpatialPolygons}} objects}
\usage{
extract_area(sp_poly)
}
\arguments{
\item{sp_poly}{An object of class \code{\link[sp]{SpatialPolygons}}}
}
\value{
A numeric vector with the areas of all polygons in the object. The
areas for polygons that represent holes are stored as negative values.
}
\description{
Helper function to extract the area slots from
\code{\link[sp]{SpatialPolygons}} objects
}
\examples{
library(sp);
library(maptools);
data("state.vbm");
a <- extract_area(state.vbm);
}
|
/man/extract_area.Rd
|
no_license
|
rnuske/fisim
|
R
| false
| true
| 692
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utilities.R
\name{extract_area}
\alias{extract_area}
\title{Helper function to extract the area slots from
\code{\link[sp]{SpatialPolygons}} objects}
\usage{
extract_area(sp_poly)
}
\arguments{
\item{sp_poly}{An object of class \code{\link[sp]{SpatialPolygons}}}
}
\value{
A numeric vector with the areas of all polygons in the object. The
areas for polygons that represent holes are stored as negative values.
}
\description{
Helper function to extract the area slots from
\code{\link[sp]{SpatialPolygons}} objects
}
\examples{
library(sp);
library(maptools);
data("state.vbm");
a <- extract_area(state.vbm);
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/oss.R
\name{oss.save}
\alias{oss.save}
\title{oss.save}
\usage{
oss.save(x, ...)
}
\arguments{
\item{...}{}
}
\description{
oss.save
}
\examples{
a <- 1:10
oss.save('oss://ross-test/test.RData', a)
}
|
/man/oss.save.Rd
|
permissive
|
gahoo/ross
|
R
| false
| true
| 278
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/oss.R
\name{oss.save}
\alias{oss.save}
\title{oss.save}
\usage{
oss.save(x, ...)
}
\arguments{
\item{...}{}
}
\description{
oss.save
}
\examples{
a <- 1:10
oss.save('oss://ross-test/test.RData', a)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/equ7.R
\name{equ7}
\alias{equ7}
\title{Equation 7}
\usage{
equ7(temp, rate, augment = F, return_fit = F)
}
\arguments{
\item{temp}{temperature (in Celsius)}
\item{rate}{rate measurement}
\item{augment}{logical wether the dataset with fits should be returned instead of the parameter values}
\item{return_fit}{logical wether the model fit object should be returned}
}
\value{
a data frame of, depending on augment argument, if FALSE, parameters, if TRUE, data with predicted values
}
\description{
Equation 7 from Montagnes et al (2008) citing Schoolfield et al. (1981)
}
\examples{
output <- with(Emiliania_huxleyi, equ7(temp=temp, rate=rate))
}
|
/man/equ7.Rd
|
no_license
|
low-decarie/temperatureresponse
|
R
| false
| true
| 727
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/equ7.R
\name{equ7}
\alias{equ7}
\title{Equation 7}
\usage{
equ7(temp, rate, augment = F, return_fit = F)
}
\arguments{
\item{temp}{temperature (in Celsius)}
\item{rate}{rate measurement}
\item{augment}{logical wether the dataset with fits should be returned instead of the parameter values}
\item{return_fit}{logical wether the model fit object should be returned}
}
\value{
a data frame of, depending on augment argument, if FALSE, parameters, if TRUE, data with predicted values
}
\description{
Equation 7 from Montagnes et al (2008) citing Schoolfield et al. (1981)
}
\examples{
output <- with(Emiliania_huxleyi, equ7(temp=temp, rate=rate))
}
|
#### CalcAuxAtPoint function
## The function takes the following
## w - a matrix. Each row is observation and each column is questionnaire time in the interval. w equal to Inf once
# an observation is censore/had the event
## w.res - a matrix of the same dimensions as w. Equal to the x(t) at time w. For example second column is
# second questionnaire result for all participents.
## point - scalar. The time of the risk set in the main analysis. In terms of the paper, t.
###
# The function returns a list with theree objects:
# df.lr: a data frame. for each subject it gives the interval in which the exposure/treatment has occured according to the data.
# a.point: time of last questionnire, i.e., a(t) in terms of the paper
# x.one : one/zero. Equals to 1 if X(a.point)=1.
CalcAuxAtPoint <- function(w, w.res, point) {
n.sample <- nrow(w)
interval.w <- FindIntervalCPP(point = point, w =w)
right.for.surv <- left.for.surv <- x.one <- a.point <- vector(length = n.sample)
for (j in 1:n.sample)
{
if (interval.w[j]==1)
{
right.for.surv[j] <- Inf
left.for.surv[j] <- 0
a.point[j] <- 0
} else if (any(w.res[j,1:(interval.w[j]-1)]==1))
{
right.for.surv[j] <- w[j, Position(f = function(x) x==1,x=w.res[j,])]
left.for.surv[j] <- ifelse(right.for.surv[j]==w[j,1], 0,
w[j, Position(f = function(x) x==1,x=w.res[j,])-1])
x.one[j] <- 1
a.point[j] <- Inf
} else
{
right.for.surv[j] <- Inf
left.for.surv[j] <- ifelse(w[j,1]==Inf, 0,
w[j, interval.w[j]-1])
a.point[j] <- left.for.surv[j]
}}
ret.list <- list(df.lr = data.frame(left = left.for.surv, right = right.for.surv), x.one = x.one, a.point= a.point)
return(ret.list)
}
|
/ICcalib/R/CalcAuxAtPoint.R
|
no_license
|
akhikolla/TestedPackages-NoIssues
|
R
| false
| false
| 1,815
|
r
|
#### CalcAuxAtPoint function
## The function takes the following
## w - a matrix. Each row is observation and each column is questionnaire time in the interval. w equal to Inf once
# an observation is censore/had the event
## w.res - a matrix of the same dimensions as w. Equal to the x(t) at time w. For example second column is
# second questionnaire result for all participents.
## point - scalar. The time of the risk set in the main analysis. In terms of the paper, t.
###
# The function returns a list with theree objects:
# df.lr: a data frame. for each subject it gives the interval in which the exposure/treatment has occured according to the data.
# a.point: time of last questionnire, i.e., a(t) in terms of the paper
# x.one : one/zero. Equals to 1 if X(a.point)=1.
CalcAuxAtPoint <- function(w, w.res, point) {
n.sample <- nrow(w)
interval.w <- FindIntervalCPP(point = point, w =w)
right.for.surv <- left.for.surv <- x.one <- a.point <- vector(length = n.sample)
for (j in 1:n.sample)
{
if (interval.w[j]==1)
{
right.for.surv[j] <- Inf
left.for.surv[j] <- 0
a.point[j] <- 0
} else if (any(w.res[j,1:(interval.w[j]-1)]==1))
{
right.for.surv[j] <- w[j, Position(f = function(x) x==1,x=w.res[j,])]
left.for.surv[j] <- ifelse(right.for.surv[j]==w[j,1], 0,
w[j, Position(f = function(x) x==1,x=w.res[j,])-1])
x.one[j] <- 1
a.point[j] <- Inf
} else
{
right.for.surv[j] <- Inf
left.for.surv[j] <- ifelse(w[j,1]==Inf, 0,
w[j, interval.w[j]-1])
a.point[j] <- left.for.surv[j]
}}
ret.list <- list(df.lr = data.frame(left = left.for.surv, right = right.for.surv), x.one = x.one, a.point= a.point)
return(ret.list)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/assign_parameters.R
\name{assign_parameters.default}
\alias{assign_parameters.default}
\title{conversion helper}
\usage{
\method{assign_parameters}{default}(
x,
infiltration = NULL,
subcatchment = NULL,
subcatchment_typologies = NULL,
conduit_material = NULL,
junction_parameters = NULL
)
}
\description{
conversion helper
}
\keyword{internal}
|
/man/assign_parameters.default.Rd
|
no_license
|
dleutnant/swmmr
|
R
| false
| true
| 434
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/assign_parameters.R
\name{assign_parameters.default}
\alias{assign_parameters.default}
\title{conversion helper}
\usage{
\method{assign_parameters}{default}(
x,
infiltration = NULL,
subcatchment = NULL,
subcatchment_typologies = NULL,
conduit_material = NULL,
junction_parameters = NULL
)
}
\description{
conversion helper
}
\keyword{internal}
|
\name{draw.pv.ctt}
\alias{draw.pv.ctt}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Plausible Value Imputation Using a Known Measurement Error Variance
(Based on Classical Test Theory)
}
\description{
This function provides unidimensional plausible value imputation with a
known measurement error variance or classical test theory (Mislevy, 1991).
The reliability of the scale is estimated by Cronbach's Alpha or can be
provided by the user.
}
\usage{
draw.pv.ctt(y, dat.scale = NULL, x=NULL, samp.pars = TRUE,
alpha = NULL, sig.e = NULL, var.e=NULL , true.var = NULL)
}
% The default of \code{NULL} assumes that there are
% no scale scores available.
\arguments{
\item{y}{
Vector of scale scores if \code{y} should not be used.
}
\item{dat.scale}{
Matrix of item responses
}
\item{x}{
Matrix of covariates
}
\item{samp.pars}{
An optional logical indicating whether scale parameters
(reliability or measurement error standard deviation) should be sampled
}
\item{alpha}{
Reliability estimate of the scale. The default of
\code{NULL} means that Cronbach's alpha will be used
as a reliability estimate.
}
\item{sig.e}{
Optional vector of the standard deviation of the error.
Note that it is \emph{not} the error variance.
}
\item{var.e}{
Optional vector of the variance of the error.
}
\item{true.var}{
True score variance
}
}
\details{
The linear model is assumed for drawing plausible values of a variable
\eqn{Y} contaminated by measurement error. Assuming \eqn{Y= \theta + e}
and a linear regression model for \eqn{\theta}
\deqn{ \theta = \bold{X} \beta + \epsilon}
(plausible value) imputations from the posterior distribution
\eqn{P( \theta | Y , \bold{X} )} are drawn. See Mislevy (1991) for details.
}
\value{
A vector with plausible values
}
\references{
Blackwell, M., Honaker, J., & King, G. (2011).
\emph{Multiple overimputation: A unified approach to measurement error and
missing data}. Technical Report.
Mislevy, R. J. (1991). Randomization-based inference about latent variables
from complex samples. \emph{Psychometrika}, \bold{56}, 177-196.
}
\author{
Alexander Robitzsch
}
\note{
Plausible value imputation is also labeled as multiple overimputation
(Blackwell, Honaker & King, 2011).
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
See also \code{\link[sirt:plausible.value.imputation.raschtype]{plausible.value.imputation.raschtype}}
(\pkg{sirt}) for plausible value imputation.
}
%% plausible.value.imputation.raschtype
\examples{
#############################################################################
# SIMULATED EXAMPLE 1: Scale scores
#############################################################################
set.seed(899)
n <- 5000 # number of students
x <- round( runif( n , 0 ,1 ) )
y <- rnorm(n)
# simulate true score theta
theta <- .6 + .4*x + .5 * y + rnorm(n)
# simulate observed score by adding measurement error
sig.e <- rep( sqrt(.40) , n )
theta_obs <- theta + rnorm( n , sd=sig.e)
# calculate alpha
( alpha <- var( theta ) / var( theta_obs ) )
# [1] 0.7424108
# => Ordinarily, sig.e or alpha will be known, assumed or estimated by using items,
# replications or an appropriate measurement model.
# create matrix of predictors
X <- as.matrix( cbind(x , y ) )
# plausible value imputation with scale score
imp1 <- draw.pv.ctt( y=theta_obs , x = X , sig.e =sig.e )
# check results
lm( imp1 ~ x + y )
# imputation with alpha as an input
imp2 <- draw.pv.ctt( y=theta_obs , x = X , alpha = .74 )
lm( imp2 ~ x + y )
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{Plausible value imputation}
\keyword{Latent variables}
%% \keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
/man/draw.pv.ctt.Rd
|
no_license
|
cksun/miceadds
|
R
| false
| false
| 3,928
|
rd
|
\name{draw.pv.ctt}
\alias{draw.pv.ctt}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Plausible Value Imputation Using a Known Measurement Error Variance
(Based on Classical Test Theory)
}
\description{
This function provides unidimensional plausible value imputation with a
known measurement error variance or classical test theory (Mislevy, 1991).
The reliability of the scale is estimated by Cronbach's Alpha or can be
provided by the user.
}
\usage{
draw.pv.ctt(y, dat.scale = NULL, x=NULL, samp.pars = TRUE,
alpha = NULL, sig.e = NULL, var.e=NULL , true.var = NULL)
}
% The default of \code{NULL} assumes that there are
% no scale scores available.
\arguments{
\item{y}{
Vector of scale scores if \code{y} should not be used.
}
\item{dat.scale}{
Matrix of item responses
}
\item{x}{
Matrix of covariates
}
\item{samp.pars}{
An optional logical indicating whether scale parameters
(reliability or measurement error standard deviation) should be sampled
}
\item{alpha}{
Reliability estimate of the scale. The default of
\code{NULL} means that Cronbach's alpha will be used
as a reliability estimate.
}
\item{sig.e}{
Optional vector of the standard deviation of the error.
Note that it is \emph{not} the error variance.
}
\item{var.e}{
Optional vector of the variance of the error.
}
\item{true.var}{
True score variance
}
}
\details{
The linear model is assumed for drawing plausible values of a variable
\eqn{Y} contaminated by measurement error. Assuming \eqn{Y= \theta + e}
and a linear regression model for \eqn{\theta}
\deqn{ \theta = \bold{X} \beta + \epsilon}
(plausible value) imputations from the posterior distribution
\eqn{P( \theta | Y , \bold{X} )} are drawn. See Mislevy (1991) for details.
}
\value{
A vector with plausible values
}
\references{
Blackwell, M., Honaker, J., & King, G. (2011).
\emph{Multiple overimputation: A unified approach to measurement error and
missing data}. Technical Report.
Mislevy, R. J. (1991). Randomization-based inference about latent variables
from complex samples. \emph{Psychometrika}, \bold{56}, 177-196.
}
\author{
Alexander Robitzsch
}
\note{
Plausible value imputation is also labeled as multiple overimputation
(Blackwell, Honaker & King, 2011).
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
See also \code{\link[sirt:plausible.value.imputation.raschtype]{plausible.value.imputation.raschtype}}
(\pkg{sirt}) for plausible value imputation.
}
%% plausible.value.imputation.raschtype
\examples{
#############################################################################
# SIMULATED EXAMPLE 1: Scale scores
#############################################################################
set.seed(899)
n <- 5000 # number of students
x <- round( runif( n , 0 ,1 ) )
y <- rnorm(n)
# simulate true score theta
theta <- .6 + .4*x + .5 * y + rnorm(n)
# simulate observed score by adding measurement error
sig.e <- rep( sqrt(.40) , n )
theta_obs <- theta + rnorm( n , sd=sig.e)
# calculate alpha
( alpha <- var( theta ) / var( theta_obs ) )
# [1] 0.7424108
# => Ordinarily, sig.e or alpha will be known, assumed or estimated by using items,
# replications or an appropriate measurement model.
# create matrix of predictors
X <- as.matrix( cbind(x , y ) )
# plausible value imputation with scale score
imp1 <- draw.pv.ctt( y=theta_obs , x = X , sig.e =sig.e )
# check results
lm( imp1 ~ x + y )
# imputation with alpha as an input
imp2 <- draw.pv.ctt( y=theta_obs , x = X , alpha = .74 )
lm( imp2 ~ x + y )
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{Plausible value imputation}
\keyword{Latent variables}
%% \keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
nCut <- 20
myYearG3 <- "2016-2018"
# RACE --------------------------------------------------------------------------------------------------------------------------
raceTest <- datCounty_RE %>%
filter(raceCode != "Multi-NH") %>%
filter(Ndeaths > nCut ) %>%
select(-YLL,-mean.age,-YLLper,-cDeathRate,-rateLCI,-rateUCI,-YLL.adj.rate,LCI=aLCI,UCI=aUCI)
#LOWEST ----------------------------------------
raceTest2 <- raceTest %>% group_by(county,yearG3,sex,CAUSE) %>%
mutate(bestRate = min(aRate), #MINIMUM RATE
bestSE = aSE) %>%
filter(bestRate == aRate) %>%
mutate(lowRace = raceCode) %>%
select(-(Ndeaths:aSE),-raceCode)
raceTest_LOW <- left_join(raceTest,raceTest2,by=c("county","yearG3","sex","CAUSE")) %>%
mutate(rateRatio = round(aRate/bestRate,1),
Ztest = (aRate - bestRate) / sqrt(aSE^2 + bestSE^2),
pValue = 1-pnorm(Ztest),
pMark = as.factor(ifelse(aRate==bestRate,"Lowest",ifelse(pValue < .01,"Sig. Higher (p<.01)","No Difference")))
)
# Execute these lines to update RACE data for LGHC MEASURES Shiny and State Health Report
if (1==2) {
raceViewWork <- raceTest_LOW %>%
filter(Level == "lev2" ) %>%
# filter(!(CAUSE %in% c("A02","D04","E03") ) & Level %in% c("lev2","lev3") )
filter(yearG3 == "2016-2018",sex=="Total")
raceDisparityUnique <- raceViewWork %>%
group_by(yearG3,county,CAUSE) %>%
mutate(rankX=rank(-rateRatio)) %>% # ranks higher RR for each CONDITION in each County
filter(rankX==1) %>% select(-rankX) %>%
ungroup()
tNames <- gbdMap0 %>% select(CAUSE=LABEL,causeName=nameOnly)
ccbRaceDisparity <- raceDisparityUnique %>%
left_join(tNames,by="CAUSE") %>%
mutate(causeName = ifelse(CAUSE=="Z01","Ill-Defined",causeName))
saveRDS(ccbRaceDisparity , file= path(myPlace,"/myData/",whichData,"ccbRaceDisparity.RDS"))
}
#HIGHEST -------------------------------------------
raceTest2 <- raceTest %>% group_by(county,yearG3,sex,CAUSE) %>%
mutate(bestRate = max(aRate), # MAXIMUM RATE
bestSE = aSE) %>%
filter(bestRate == aRate) %>%
mutate(lowRace = raceCode) %>%
select(-(Ndeaths:aSE),-raceCode)
raceTest_HIGH <- left_join(raceTest,raceTest2,by=c("county","yearG3","sex","CAUSE")) %>%
mutate(rateRatio = round(aRate/bestRate,1),
Ztest = (aRate - bestRate) / sqrt(aSE^2 + bestSE^2),
# pValue = 1-pnorm(Ztest),
pValue = pnorm(Ztest),
pMark = as.factor(ifelse(aRate==bestRate,"Highest",ifelse(pValue < .01,"Sig. Lower (p<.01)","No Difference")))
)
# Age ----------------------------------------------------------------------------------------------------------------------------
ageTest <- datCounty_AGE_3year %>%
filter(Ndeaths > nCut,!is.na(cDeathRate) ) %>% # need !is.na becuase of tiny number missing age --> NA fix
select(-YLL,-mean.age,-YLLper,cDeathRate,LCI=rateLCI,UCI=rateUCI)
# LOWEST
ageTest2 <- ageTest %>% group_by(county,yearG3,sex,CAUSE) %>%
mutate(bestRate = min(cDeathRate),
bestSE = rateSE) %>%
filter(bestRate == cDeathRate) %>%
mutate(lowAge = ageG) %>%
select(-(Ndeaths:UCI),-ageG)
ageTest_LOW <- left_join(ageTest,ageTest2,by=c("county","yearG3","sex","CAUSE")) %>%
mutate(rateRatio = round(cDeathRate/bestRate,1),
Ztest = (cDeathRate - bestRate) / sqrt(rateSE^2 + bestSE^2),
pValue = 1-pnorm(Ztest),
pMark = as.factor(ifelse(cDeathRate==bestRate,"Lowest",ifelse(pValue < .01,"Sig. Higher (p<.01)","No Difference")))
)
# HIGHEST
ageTest2 <- ageTest %>% group_by(county,yearG3,sex,CAUSE) %>%
mutate(bestRate = max(cDeathRate),
bestSE = rateSE) %>%
filter(bestRate == cDeathRate) %>%
mutate(lowAge = ageG) %>%
select(-(Ndeaths:UCI),-ageG)
ageTest_HIGH <- left_join(ageTest,ageTest2,by=c("county","yearG3","sex","CAUSE")) %>%
mutate(rateRatio = round(cDeathRate/bestRate,1),
Ztest = (cDeathRate - bestRate) / sqrt(rateSE^2 + bestSE^2),
pValue = pnorm(Ztest),
pMark = as.factor(ifelse(cDeathRate==bestRate,"Highest",ifelse(pValue < .01,"Sig. Lower (p<.01)","No Difference")))
)
# Sex -------------------------------------------------------------------------------------------------------------------------------
sexTest <- datCounty_3year %>%
filter(Ndeaths > nCut, !is.na(aRate),sex != "Total" ) %>%
select(-YLL,-mean.age,-YLLper,-cDeathRate,-rateLCI,-rateUCI,-YLL.adj.rate,LCI=aLCI,UCI=aUCI)
# LOWEST ---
sexTest2 <- sexTest %>% group_by(county,yearG3,CAUSE) %>%
mutate(bestRate = min(aRate),
bestSE = aSE) %>%
filter(bestRate == aRate) %>%
mutate(lowRace = sex) %>%
select(-(Ndeaths:aSE),-sex)
sexTest_LOW <- left_join(sexTest,sexTest2,by=c("county","yearG3","CAUSE")) %>%
mutate(rateRatio = round(aRate/bestRate,1),
Ztest = (aRate - bestRate) / sqrt(aSE^2 + bestSE^2),
pValue = 1-pnorm(Ztest),
pMark = as.factor(ifelse(aRate == bestRate,"Lowest", ifelse(pValue < .01,"Sig. Higher (p<.01)","No Difference")))
)
# HIGHEST ---
sexTest2 <- sexTest %>% group_by(county,yearG3,CAUSE) %>%
mutate(bestRate = max(aRate),
bestSE = aSE) %>%
filter(bestRate == aRate) %>%
mutate(lowRace = sex) %>%
select(-(Ndeaths:aSE),-sex)
sexTest_HIGH <- left_join(sexTest,sexTest2,by=c("county","yearG3","CAUSE")) %>%
mutate(rateRatio = round(aRate/bestRate,1),
Ztest = (aRate - bestRate) / sqrt(aSE^2 + bestSE^2),
pValue = pnorm(Ztest),
pMark = as.factor(ifelse(aRate==bestRate,"Highest",ifelse(pValue < .01,"Sig. Lower (p<.01)","No Difference")))
)
|
/myCBD/myFunctions/make_DISPARITY_DATA.R
|
no_license
|
CDPHrusers/CACommunityBurden
|
R
| false
| false
| 5,927
|
r
|
nCut <- 20
myYearG3 <- "2016-2018"
# RACE --------------------------------------------------------------------------------------------------------------------------
raceTest <- datCounty_RE %>%
filter(raceCode != "Multi-NH") %>%
filter(Ndeaths > nCut ) %>%
select(-YLL,-mean.age,-YLLper,-cDeathRate,-rateLCI,-rateUCI,-YLL.adj.rate,LCI=aLCI,UCI=aUCI)
#LOWEST ----------------------------------------
raceTest2 <- raceTest %>% group_by(county,yearG3,sex,CAUSE) %>%
mutate(bestRate = min(aRate), #MINIMUM RATE
bestSE = aSE) %>%
filter(bestRate == aRate) %>%
mutate(lowRace = raceCode) %>%
select(-(Ndeaths:aSE),-raceCode)
raceTest_LOW <- left_join(raceTest,raceTest2,by=c("county","yearG3","sex","CAUSE")) %>%
mutate(rateRatio = round(aRate/bestRate,1),
Ztest = (aRate - bestRate) / sqrt(aSE^2 + bestSE^2),
pValue = 1-pnorm(Ztest),
pMark = as.factor(ifelse(aRate==bestRate,"Lowest",ifelse(pValue < .01,"Sig. Higher (p<.01)","No Difference")))
)
# Execute these lines to update RACE data for LGHC MEASURES Shiny and State Health Report
if (1==2) {
raceViewWork <- raceTest_LOW %>%
filter(Level == "lev2" ) %>%
# filter(!(CAUSE %in% c("A02","D04","E03") ) & Level %in% c("lev2","lev3") )
filter(yearG3 == "2016-2018",sex=="Total")
raceDisparityUnique <- raceViewWork %>%
group_by(yearG3,county,CAUSE) %>%
mutate(rankX=rank(-rateRatio)) %>% # ranks higher RR for each CONDITION in each County
filter(rankX==1) %>% select(-rankX) %>%
ungroup()
tNames <- gbdMap0 %>% select(CAUSE=LABEL,causeName=nameOnly)
ccbRaceDisparity <- raceDisparityUnique %>%
left_join(tNames,by="CAUSE") %>%
mutate(causeName = ifelse(CAUSE=="Z01","Ill-Defined",causeName))
saveRDS(ccbRaceDisparity , file= path(myPlace,"/myData/",whichData,"ccbRaceDisparity.RDS"))
}
#HIGHEST -------------------------------------------
raceTest2 <- raceTest %>% group_by(county,yearG3,sex,CAUSE) %>%
mutate(bestRate = max(aRate), # MAXIMUM RATE
bestSE = aSE) %>%
filter(bestRate == aRate) %>%
mutate(lowRace = raceCode) %>%
select(-(Ndeaths:aSE),-raceCode)
raceTest_HIGH <- left_join(raceTest,raceTest2,by=c("county","yearG3","sex","CAUSE")) %>%
mutate(rateRatio = round(aRate/bestRate,1),
Ztest = (aRate - bestRate) / sqrt(aSE^2 + bestSE^2),
# pValue = 1-pnorm(Ztest),
pValue = pnorm(Ztest),
pMark = as.factor(ifelse(aRate==bestRate,"Highest",ifelse(pValue < .01,"Sig. Lower (p<.01)","No Difference")))
)
# Age ----------------------------------------------------------------------------------------------------------------------------
ageTest <- datCounty_AGE_3year %>%
filter(Ndeaths > nCut,!is.na(cDeathRate) ) %>% # need !is.na becuase of tiny number missing age --> NA fix
select(-YLL,-mean.age,-YLLper,cDeathRate,LCI=rateLCI,UCI=rateUCI)
# LOWEST
ageTest2 <- ageTest %>% group_by(county,yearG3,sex,CAUSE) %>%
mutate(bestRate = min(cDeathRate),
bestSE = rateSE) %>%
filter(bestRate == cDeathRate) %>%
mutate(lowAge = ageG) %>%
select(-(Ndeaths:UCI),-ageG)
ageTest_LOW <- left_join(ageTest,ageTest2,by=c("county","yearG3","sex","CAUSE")) %>%
mutate(rateRatio = round(cDeathRate/bestRate,1),
Ztest = (cDeathRate - bestRate) / sqrt(rateSE^2 + bestSE^2),
pValue = 1-pnorm(Ztest),
pMark = as.factor(ifelse(cDeathRate==bestRate,"Lowest",ifelse(pValue < .01,"Sig. Higher (p<.01)","No Difference")))
)
# HIGHEST
ageTest2 <- ageTest %>% group_by(county,yearG3,sex,CAUSE) %>%
mutate(bestRate = max(cDeathRate),
bestSE = rateSE) %>%
filter(bestRate == cDeathRate) %>%
mutate(lowAge = ageG) %>%
select(-(Ndeaths:UCI),-ageG)
ageTest_HIGH <- left_join(ageTest,ageTest2,by=c("county","yearG3","sex","CAUSE")) %>%
mutate(rateRatio = round(cDeathRate/bestRate,1),
Ztest = (cDeathRate - bestRate) / sqrt(rateSE^2 + bestSE^2),
pValue = pnorm(Ztest),
pMark = as.factor(ifelse(cDeathRate==bestRate,"Highest",ifelse(pValue < .01,"Sig. Lower (p<.01)","No Difference")))
)
# Sex -------------------------------------------------------------------------------------------------------------------------------
sexTest <- datCounty_3year %>%
filter(Ndeaths > nCut, !is.na(aRate),sex != "Total" ) %>%
select(-YLL,-mean.age,-YLLper,-cDeathRate,-rateLCI,-rateUCI,-YLL.adj.rate,LCI=aLCI,UCI=aUCI)
# LOWEST ---
sexTest2 <- sexTest %>% group_by(county,yearG3,CAUSE) %>%
mutate(bestRate = min(aRate),
bestSE = aSE) %>%
filter(bestRate == aRate) %>%
mutate(lowRace = sex) %>%
select(-(Ndeaths:aSE),-sex)
sexTest_LOW <- left_join(sexTest,sexTest2,by=c("county","yearG3","CAUSE")) %>%
mutate(rateRatio = round(aRate/bestRate,1),
Ztest = (aRate - bestRate) / sqrt(aSE^2 + bestSE^2),
pValue = 1-pnorm(Ztest),
pMark = as.factor(ifelse(aRate == bestRate,"Lowest", ifelse(pValue < .01,"Sig. Higher (p<.01)","No Difference")))
)
# HIGHEST ---
sexTest2 <- sexTest %>% group_by(county,yearG3,CAUSE) %>%
mutate(bestRate = max(aRate),
bestSE = aSE) %>%
filter(bestRate == aRate) %>%
mutate(lowRace = sex) %>%
select(-(Ndeaths:aSE),-sex)
sexTest_HIGH <- left_join(sexTest,sexTest2,by=c("county","yearG3","CAUSE")) %>%
mutate(rateRatio = round(aRate/bestRate,1),
Ztest = (aRate - bestRate) / sqrt(aSE^2 + bestSE^2),
pValue = pnorm(Ztest),
pMark = as.factor(ifelse(aRate==bestRate,"Highest",ifelse(pValue < .01,"Sig. Lower (p<.01)","No Difference")))
)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/model_get_gandk.R
\name{get_gandk}
\alias{get_gandk}
\title{G and k model}
\usage{
get_gandk()
}
\value{
The list contains rprior, dprior (generate and evaluate the density of prior distribution),
generate_randomness (generate data-generating variables), robservation (create synthetic
data sets), parameter_names (useful for plotting), thetadim (dimension of parameter),
ydim (dimension of observations), parameters (list of hyperparameters,
to be passed to rprior,dprior,robservation)
}
\description{
This function returns a list representing the g-and-k
quantile distribution.
}
|
/man/get_gandk.Rd
|
no_license
|
ramcqueary/winference
|
R
| false
| true
| 660
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/model_get_gandk.R
\name{get_gandk}
\alias{get_gandk}
\title{G and k model}
\usage{
get_gandk()
}
\value{
The list contains rprior, dprior (generate and evaluate the density of prior distribution),
generate_randomness (generate data-generating variables), robservation (create synthetic
data sets), parameter_names (useful for plotting), thetadim (dimension of parameter),
ydim (dimension of observations), parameters (list of hyperparameters,
to be passed to rprior,dprior,robservation)
}
\description{
This function returns a list representing the g-and-k
quantile distribution.
}
|
# install.packages("quanteda")
# install.packages("readr")
# install.packages("RWeka")
library("quanteda")
library("readr")
library("RWeka")
#Dictionnaire des éléments taggés
# Train ngram Mot--------------------------------------------------
#Reviews non tagged negative
cheminN <-"Datasets/reviews-tagged/train-tagged/neg/"
val1 <- "N"
datasetNegative <- data.frame()
test<-data.frame()
#Extraction
dataset1 <- data.frame()
for(fich in dir(path=cheminN, pattern="*.txt$", recursive=TRUE)){
print(fich)
res = read.delim(paste0(cheminN,fich), header = FALSE, sep ="\t")
res<-subset(res, grepl("JJ",res$V2)|grepl("RB",res$V2)|
grepl("NN",res$V2)|grepl("VB",res$V2))
res$V3<-as.character(res$V3)
tmp<-paste(res$V3,collapse = " ")
test<-rbind(test,tmp)
test<- cbind(test,val1)
names(test) <- c("text","class")
dataset1 <- rbind.data.frame(dataset1,test)
test<-data.frame()
}
datasetNegative <- rbind(datasetNegative,dataset1)
dataset1 <- data.frame()
datasetNegative$text<-as.character(datasetNegative$text)
rm(res,dataset1,test,cheminN,fich,tmp,val1)
#Reviews non tagged positive
cheminP <-"Datasets/reviews-tagged/train-tagged/pos/"
val1 <- "P"
datasetPositive <- data.frame()
test<-data.frame()
#Extraction
dataset1 <- data.frame()
for(fich in dir(path=cheminP, pattern="*.txt$", recursive=TRUE)){
print(fich)
res = read.delim(paste0(cheminP,fich), header = FALSE, sep ="\t")
res<-subset(res, grepl("JJ",res$V2)|grepl("RB",res$V2)|
grepl("NN",res$V2)|grepl("VB",res$V2))
res$V3<-as.character(res$V3)
tmp<-paste(res$V3,collapse = " ")
test<-rbind(test,tmp)
test<- cbind(test,val1)
names(test) <- c("text","class")
dataset1 <- rbind.data.frame(dataset1,test)
test<-data.frame()
}
datasetPositive <- rbind(datasetPositive,dataset1)
dataset1 <- data.frame()
datasetPositive$text<-as.character(datasetPositive$text)
rm(res,dataset1,test,cheminP,fich,tmp,val1)
#End processing
datasetTrain<-data.frame()
datasetTrain <- rbind(datasetPositive,datasetNegative)
# 2 gram Mot dictionnaire-------------------------
train.tokens <- tokens_skipgrams(tokens(datasetTrain$text, what = "word",
remove_numbers = TRUE, remove_punct = TRUE,
remove_symbols = TRUE, remove_hyphens = TRUE),n= 2, skip = 0, concatenator = " ")
train.tokens.dfm <- dfm(train.tokens, tolower = FALSE)
train.tokens.dfm <- dfm_trim(train.tokens.dfm, min_docfreq = 10)
train.tokens.matrix <- as.matrix(train.tokens.dfm)
dictTmp <- colnames(train.tokens.matrix)
write.table(dictTmp, file = "newDict/2grams_tagged_words.txt", sep = "\n" , col.names = FALSE, row.names = FALSE)
rm(train.tokens,train.tokens.dfm,train.tokens.matrix,dictTmp)
# 3 gram Mot dictionnaire-------------------------
train.tokens <- tokens_skipgrams(tokens(datasetTrain$text, what = "word",
remove_numbers = TRUE, remove_punct = TRUE,
remove_symbols = TRUE, remove_hyphens = TRUE),n= 3, skip = 0, concatenator = " ")
train.tokens.dfm <- dfm(train.tokens, tolower = FALSE)
train.tokens.dfm <- dfm_trim(train.tokens.dfm, min_docfreq = 10)
train.tokens.matrix <- as.matrix(train.tokens.dfm)
dictTmp <- colnames(train.tokens.matrix)
write.table(dictTmp, file = "newDict/3grams_tagged_words.txt", sep = "\n" , col.names = FALSE, row.names = FALSE )
rm(train.tokens,train.tokens.dfm,train.tokens.matrix,dictTmp)
# 2-3 gram Mot dictionnaire-------------------------
train.tokens <- tokens_skipgrams(tokens(datasetTrain$text, what = "word",
remove_numbers = TRUE, remove_punct = TRUE,
remove_symbols = TRUE, remove_hyphens = TRUE),n= 2:3, skip = 0, concatenator = " ")
train.tokens.dfm <- dfm(train.tokens, tolower = FALSE)
train.tokens.dfm <- dfm_trim(train.tokens.dfm, min_docfreq = 15)
train.tokens.matrix <- as.matrix(train.tokens.dfm)
dictTmp <- colnames(train.tokens.matrix)
write.table(dictTmp, file = "newDict/2_3_grams_tagged_words.txt", sep = "\n" , col.names = FALSE, row.names = FALSE )
rm(train.tokens,train.tokens.dfm,train.tokens.matrix,dictTmp)
rm(list=ls())
|
/Scripts_dictionnaire/ngramsDictTagged.R
|
no_license
|
limsamh/Project-Text-Mining
|
R
| false
| false
| 4,285
|
r
|
# install.packages("quanteda")
# install.packages("readr")
# install.packages("RWeka")
library("quanteda")
library("readr")
library("RWeka")
#Dictionnaire des éléments taggés
# Train ngram Mot--------------------------------------------------
#Reviews non tagged negative
cheminN <-"Datasets/reviews-tagged/train-tagged/neg/"
val1 <- "N"
datasetNegative <- data.frame()
test<-data.frame()
#Extraction
dataset1 <- data.frame()
for(fich in dir(path=cheminN, pattern="*.txt$", recursive=TRUE)){
print(fich)
res = read.delim(paste0(cheminN,fich), header = FALSE, sep ="\t")
res<-subset(res, grepl("JJ",res$V2)|grepl("RB",res$V2)|
grepl("NN",res$V2)|grepl("VB",res$V2))
res$V3<-as.character(res$V3)
tmp<-paste(res$V3,collapse = " ")
test<-rbind(test,tmp)
test<- cbind(test,val1)
names(test) <- c("text","class")
dataset1 <- rbind.data.frame(dataset1,test)
test<-data.frame()
}
datasetNegative <- rbind(datasetNegative,dataset1)
dataset1 <- data.frame()
datasetNegative$text<-as.character(datasetNegative$text)
rm(res,dataset1,test,cheminN,fich,tmp,val1)
#Reviews non tagged positive
cheminP <-"Datasets/reviews-tagged/train-tagged/pos/"
val1 <- "P"
datasetPositive <- data.frame()
test<-data.frame()
#Extraction
dataset1 <- data.frame()
for(fich in dir(path=cheminP, pattern="*.txt$", recursive=TRUE)){
print(fich)
res = read.delim(paste0(cheminP,fich), header = FALSE, sep ="\t")
res<-subset(res, grepl("JJ",res$V2)|grepl("RB",res$V2)|
grepl("NN",res$V2)|grepl("VB",res$V2))
res$V3<-as.character(res$V3)
tmp<-paste(res$V3,collapse = " ")
test<-rbind(test,tmp)
test<- cbind(test,val1)
names(test) <- c("text","class")
dataset1 <- rbind.data.frame(dataset1,test)
test<-data.frame()
}
datasetPositive <- rbind(datasetPositive,dataset1)
dataset1 <- data.frame()
datasetPositive$text<-as.character(datasetPositive$text)
rm(res,dataset1,test,cheminP,fich,tmp,val1)
#End processing
datasetTrain<-data.frame()
datasetTrain <- rbind(datasetPositive,datasetNegative)
# 2 gram Mot dictionnaire-------------------------
train.tokens <- tokens_skipgrams(tokens(datasetTrain$text, what = "word",
remove_numbers = TRUE, remove_punct = TRUE,
remove_symbols = TRUE, remove_hyphens = TRUE),n= 2, skip = 0, concatenator = " ")
train.tokens.dfm <- dfm(train.tokens, tolower = FALSE)
train.tokens.dfm <- dfm_trim(train.tokens.dfm, min_docfreq = 10)
train.tokens.matrix <- as.matrix(train.tokens.dfm)
dictTmp <- colnames(train.tokens.matrix)
write.table(dictTmp, file = "newDict/2grams_tagged_words.txt", sep = "\n" , col.names = FALSE, row.names = FALSE)
rm(train.tokens,train.tokens.dfm,train.tokens.matrix,dictTmp)
# 3 gram Mot dictionnaire-------------------------
train.tokens <- tokens_skipgrams(tokens(datasetTrain$text, what = "word",
remove_numbers = TRUE, remove_punct = TRUE,
remove_symbols = TRUE, remove_hyphens = TRUE),n= 3, skip = 0, concatenator = " ")
train.tokens.dfm <- dfm(train.tokens, tolower = FALSE)
train.tokens.dfm <- dfm_trim(train.tokens.dfm, min_docfreq = 10)
train.tokens.matrix <- as.matrix(train.tokens.dfm)
dictTmp <- colnames(train.tokens.matrix)
write.table(dictTmp, file = "newDict/3grams_tagged_words.txt", sep = "\n" , col.names = FALSE, row.names = FALSE )
rm(train.tokens,train.tokens.dfm,train.tokens.matrix,dictTmp)
# 2-3 gram Mot dictionnaire-------------------------
train.tokens <- tokens_skipgrams(tokens(datasetTrain$text, what = "word",
remove_numbers = TRUE, remove_punct = TRUE,
remove_symbols = TRUE, remove_hyphens = TRUE),n= 2:3, skip = 0, concatenator = " ")
train.tokens.dfm <- dfm(train.tokens, tolower = FALSE)
train.tokens.dfm <- dfm_trim(train.tokens.dfm, min_docfreq = 15)
train.tokens.matrix <- as.matrix(train.tokens.dfm)
dictTmp <- colnames(train.tokens.matrix)
write.table(dictTmp, file = "newDict/2_3_grams_tagged_words.txt", sep = "\n" , col.names = FALSE, row.names = FALSE )
rm(train.tokens,train.tokens.dfm,train.tokens.matrix,dictTmp)
rm(list=ls())
|
#将基因分成不同的基因sets然后画图--18.12.23
annotLookup2 <- read.table("annotation.table",header = T)
human_gene <- read.table("mouse2human_ensembl_gene_id.table",header = T)
fib1 <- read.delim("RNASeqFIB_Replicate1.tab.tsv",header=T,row.names=1,check.names=FALSE,sep=" ")
fib2 <- read.delim("RNASeqFIB_Replicate2.tab.tsv",header=T,row.names=1,check.names=FALSE,sep=" ")
esc1 <- read.delim("RNASeqESC_Replicate1.tab.tsv",header=T,row.names=1,check.names=FALSE,sep=" ")
esc2 <- read.delim("RNASeqESC_Replicate2.tab.tsv",header=T,row.names=1,check.names=FALSE,sep=" ")
#------------------DNA_repair-----------------------------------------
DNA_repair <- read.table("../GO_data/HALLMARK_DNA_REPAIR.txt",header = T, sep="\t")
#有一些NA值,证明一些human_DNA_repair_gene 没有对应的老鼠基因
mouse_DNA_repair_gid <- human_gene$mouse_ensembl_gene_id[match(DNA_repair[2:nrow(DNA_repair),],human_gene$human_external_gene_name)]
mouse_DNA_repair_oid <- as.character(annotLookup2$original_id[match(mouse_DNA_repair_gid,annotLookup2$ensembl_gene_id)])
#载入fib1
temp=match(mouse_DNA_repair_oid,row.names(fib1))
temp=temp[!is.na(temp)]
mouse_DNA_repair_exp1 <- fib1[temp,]
#载入fib2
temp=match(mouse_DNA_repair_oid,row.names(fib2))
temp=temp[!is.na(temp)]
mouse_DNA_repair_exp2 <- fib2[temp,]
#import esc1
temp=match(mouse_DNA_repair_oid,row.names(esc1))
temp=temp[!is.na(temp)]
mouse_DNA_repair_exp3 <- esc1[temp,]
#载入esc2
temp=match(mouse_DNA_repair_oid,row.names(esc2))
temp=temp[!is.na(temp)]
mouse_DNA_repair_exp4 <- esc2[temp,]
slope_func <- function(exp.table) {
x <- c(1,2,3)
slope <- c(rep(0,nrow(exp.table)))
for (i in 1:nrow(exp.table)) {
y <- as.numeric(exp.table[i,]);
t <- lm(y~x)
slope[i] <- t$coefficients[[2]]
}
return(slope)
}
#---------------slope------------------------------
#log2(fast/slow)------------------------------------
#fib1
#除去0,都加0.1
#mouse_DNA_repair_exp1_slope <- mouse_DNA_repair_exp1+0.1
#mouse_DNA_repair_exp1_f2s <- log(mouse_DNA_repair_exp1$FIB_fast_TPM/mouse_DNA_repair_exp1$FIB_slow_TPM,2)
mouse_DNA_repair_exp1_slope <- slope_func(mouse_DNA_repair_exp1)
mouse_DNA_repair_exp1_slope <- as.data.frame(mouse_DNA_repair_exp1_slope)
library(ggplot2)
library(reshape2)
require(gridExtra)
ggplot(mouse_DNA_repair_exp1_slope,aes(
x="",
y=mouse_DNA_repair_exp1_slope
#color=as.factor(mouse_DNA_repair_exp1.m$variable)
)) +
geom_violin(fill = "grey80",colour = "#3366FF") +
#scale_fill_brewer(palette="Dark2")
#scale_color_manual(values=c("#999999", "#E69F00", "#56B4E9")) +
#geom_jitter(height = 0, width = 0.1) +
geom_boxplot(width=0.3) +
stat_summary(fun.y = "mean", geom = "text", label="------", size= 6, color= "red") +
#geom_dotplot(binaxis='y', stackdir='center', dotsize=0.4)+
#stat_summary(geom = "errorbar", fun.data = mean_se, position = "dodge")
scale_colour_discrete(name ="FIB1") +
labs(title = "DNA_repair", x="FIB1", y="slope") +
#coord_fixed(0.2) +
geom_hline(yintercept=0, linetype="dashed",
color = "blue", size=0.5) +
theme(
plot.title = element_text(lineheight=.8, size=15,face="bold",hjust = 0.5))
ggsave("./figures/1225/DNA_repair_fib1_slope.png", width = 2, height = 4)
#FIB2------------------------------------
mouse_DNA_repair_exp2_slope <- slope_func(mouse_DNA_repair_exp2)
mouse_DNA_repair_exp2_slope <- as.data.frame(mouse_DNA_repair_exp2_slope)
library(ggplot2)
library(reshape2)
require(gridExtra)
ggplot(mouse_DNA_repair_exp2_slope,aes(
x="",
y=mouse_DNA_repair_exp2_slope
#color=as.factor(mouse_DNA_repair_exp2.m$variable)
)) +
geom_violin(fill = "grey80",colour = "#3366FF") +
#scale_fill_brewer(palette="Dark2")
#scale_color_manual(values=c("#999999", "#E69F00", "#56B4E9")) +
#geom_jitter(height = 0, width = 0.1) +
geom_boxplot(width=0.3) +
stat_summary(fun.y = "mean", geom = "text", label="------", size= 6, color= "red") +
#geom_dotplot(binaxis='y', stackdir='center', dotsize=0.4)+
#stat_summary(geom = "errorbar", fun.data = mean_se, position = "dodge")
scale_colour_discrete(name ="FIB2") +
labs(title = "DNA_repair", x="FIB2", y="slope") +
#coord_fixed(0.2) +
geom_hline(yintercept=0, linetype="dashed",
color = "blue", size=0.5) +
theme(
plot.title = element_text(lineheight=.8, size=15,face="bold",hjust = 0.5))
ggsave("./figures/1225/DNA_repair_FIB2_slope.png", width = 2, height = 4)
#----------esc1----------------------
mouse_DNA_repair_exp3_slope <- slope_func(mouse_DNA_repair_exp3)
mouse_DNA_repair_exp3_slope <- as.data.frame(mouse_DNA_repair_exp3_slope)
library(ggplot2)
library(reshape2)
require(gridExtra)
ggplot(mouse_DNA_repair_exp3_slope,aes(
x="",
y=mouse_DNA_repair_exp3_slope
#color=as.factor(mouse_DNA_repair_exp3.m$variable)
)) +
geom_violin(fill = "grey80",colour = "#3366FF") +
#scale_fill_brewer(palette="Dark2")
#scale_color_manual(values=c("#999999", "#E69F00", "#56B4E9")) +
#geom_jitter(height = 0, width = 0.1) +
geom_boxplot(width=0.3) +
stat_summary(fun.y = "mean", geom = "text", label="------", size= 6, color= "red") +
#geom_dotplot(binaxis='y', stackdir='center', dotsize=0.4)+
#stat_summary(geom = "errorbar", fun.data = mean_se, position = "dodge")
scale_colour_discrete(name ="ESC1") +
labs(title = "DNA_repair", x="ESC1", y="slope") +
#coord_fixed(0.2) +
geom_hline(yintercept=0, linetype="dashed",
color = "blue", size=0.5) +
theme(
plot.title = element_text(lineheight=.8, size=15,face="bold",hjust = 0.5))
ggsave("./figures/1225/DNA_repair_ESC1_slope.png", width = 2, height = 4)
#----------------esc2--------------------
mouse_DNA_repair_exp4_slope <- slope_func(mouse_DNA_repair_exp4)
mouse_DNA_repair_exp4_slope <- as.data.frame(mouse_DNA_repair_exp4_slope)
library(ggplot2)
library(reshape2)
require(gridExtra)
ggplot(mouse_DNA_repair_exp4_slope,aes(
x="",
y=mouse_DNA_repair_exp4_slope
#color=as.factor(mouse_DNA_repair_exp4.m$variable)
)) +
geom_violin(fill = "grey80",colour = "#3366FF") +
#scale_fill_brewer(palette="Dark2")
#scale_color_manual(values=c("#999999", "#E69F00", "#56B4E9")) +
#geom_jitter(height = 0, width = 0.1) +
geom_boxplot(width=0.3) +
stat_summary(fun.y = "mean", geom = "text", label="------", size= 6, color= "red") +
#geom_dotplot(binaxis='y', stackdir='center', dotsize=0.4)+
#stat_summary(geom = "errorbar", fun.data = mean_se, position = "dodge")
scale_colour_discrete(name ="ESC2") +
labs(title = "DNA_repair", x="ESC2", y="slope") +
#coord_fixed(0.2) +
geom_hline(yintercept=0, linetype="dashed",
color = "blue", size=0.5) +
theme(
plot.title = element_text(lineheight=.8, size=15,face="bold",hjust = 0.5))
ggsave("./figures/1225/DNA_repair_ESC2_slope.png", width = 2, height = 4)
#----------go-ribosome----------------------------
GO_RIBOSOME <- read.table("../GO_data/GO0005840GO_RIBOSOME.txt",header = T, sep="\t")
#有一些NA值,证明一些human_GO_RIBOSOME_gene 没有对应的老鼠基因
mouse_GO_RIBOSOME_gid <- human_gene$mouse_ensembl_gene_id[match(GO_RIBOSOME[2:nrow(GO_RIBOSOME),],human_gene$human_external_gene_name)]
mouse_GO_RIBOSOME_oid <- as.character(annotLookup2$original_id[match(mouse_GO_RIBOSOME_gid,annotLookup2$ensembl_gene_id)])
#载入fib1
temp=match(mouse_GO_RIBOSOME_oid,row.names(fib1))
temp=temp[!is.na(temp)]
mouse_GO_RIBOSOME_exp1 <- fib1[temp,]
#载入fib2
temp=match(mouse_GO_RIBOSOME_oid,row.names(fib2))
temp=temp[!is.na(temp)]
mouse_GO_RIBOSOME_exp2 <- fib2[temp,]
#import esc1
temp=match(mouse_GO_RIBOSOME_oid,row.names(esc1))
temp=temp[!is.na(temp)]
mouse_GO_RIBOSOME_exp3 <- esc1[temp,]
#载入esc2
temp=match(mouse_GO_RIBOSOME_oid,row.names(esc2))
temp=temp[!is.na(temp)]
mouse_GO_RIBOSOME_exp4 <- esc2[temp,]
#fib1
#除去0,都加0.1
#mouse_GO_RIBOSOME_exp1_slope <- mouse_GO_RIBOSOME_exp1+0.1
#mouse_GO_RIBOSOME_exp1_f2s <- log(mouse_GO_RIBOSOME_exp1$FIB_fast_TPM/mouse_GO_RIBOSOME_exp1$FIB_slow_TPM,2)
mouse_GO_RIBOSOME_exp1_slope <- slope_func(mouse_GO_RIBOSOME_exp1)
mouse_GO_RIBOSOME_exp1_slope <- as.data.frame(mouse_GO_RIBOSOME_exp1_slope)
library(ggplot2)
library(reshape2)
require(gridExtra)
ggplot(mouse_GO_RIBOSOME_exp1_slope,aes(
x="",
y=mouse_GO_RIBOSOME_exp1_slope
#color=as.factor(mouse_GO_RIBOSOME_exp1.m$variable)
)) +
geom_violin(fill = "grey80",colour = "#3366FF") +
#scale_fill_brewer(palette="Dark2")
#scale_color_manual(values=c("#999999", "#E69F00", "#56B4E9")) +
#geom_jitter(height = 0, width = 0.1) +
geom_boxplot(width=0.3) +
stat_summary(fun.y = "mean", geom = "text", label="------", size= 6, color= "red") +
#geom_dotplot(binaxis='y', stackdir='center', dotsize=0.4)+
#stat_summary(geom = "errorbar", fun.data = mean_se, position = "dodge")
scale_colour_discrete(name ="FIB1") +
labs(title = "GO_RIBOSOME", x="FIB1", y="slope") +
#coord_fixed(0.2) +
geom_hline(yintercept=0, linetype="dashed",
color = "blue", size=0.5) +
theme(
plot.title = element_text(lineheight=.8, size=15,face="bold",hjust = 0.5))
ggsave("./figures/1225/GO_RIBOSOME_fib1_slope.png", width = 2, height = 4)
#FIB2------------------------------------
mouse_GO_RIBOSOME_exp2_slope <- slope_func(mouse_GO_RIBOSOME_exp2)
mouse_GO_RIBOSOME_exp2_slope <- as.data.frame(mouse_GO_RIBOSOME_exp2_slope)
library(ggplot2)
library(reshape2)
require(gridExtra)
ggplot(mouse_GO_RIBOSOME_exp2_slope,aes(
x="",
y=mouse_GO_RIBOSOME_exp2_slope
#color=as.factor(mouse_GO_RIBOSOME_exp2.m$variable)
)) +
geom_violin(fill = "grey80",colour = "#3366FF") +
#scale_fill_brewer(palette="Dark2")
#scale_color_manual(values=c("#999999", "#E69F00", "#56B4E9")) +
#geom_jitter(height = 0, width = 0.1) +
geom_boxplot(width=0.3) +
stat_summary(fun.y = "mean", geom = "text", label="------", size= 6, color= "red") +
#geom_dotplot(binaxis='y', stackdir='center', dotsize=0.4)+
#stat_summary(geom = "errorbar", fun.data = mean_se, position = "dodge")
scale_colour_discrete(name ="FIB2") +
labs(title = "GO_RIBOSOME", x="FIB2", y="slope") +
#coord_fixed(0.2) +
geom_hline(yintercept=0, linetype="dashed",
color = "blue", size=0.5) +
theme(
plot.title = element_text(lineheight=.8, size=15,face="bold",hjust = 0.5))
ggsave("./figures/1225/GO_RIBOSOME_FIB2_slope.png", width = 2, height = 4)
#----------esc1----------------------
mouse_GO_RIBOSOME_exp3_slope <- slope_func(mouse_GO_RIBOSOME_exp3)
mouse_GO_RIBOSOME_exp3_slope <- as.data.frame(mouse_GO_RIBOSOME_exp3_slope)
library(ggplot2)
library(reshape2)
require(gridExtra)
ggplot(mouse_GO_RIBOSOME_exp3_slope,aes(
x="",
y=mouse_GO_RIBOSOME_exp3_slope
#color=as.factor(mouse_GO_RIBOSOME_exp3.m$variable)
)) +
geom_violin(fill = "grey80",colour = "#3366FF") +
#scale_fill_brewer(palette="Dark2")
#scale_color_manual(values=c("#999999", "#E69F00", "#56B4E9")) +
#geom_jitter(height = 0, width = 0.1) +
geom_boxplot(width=0.3) +
stat_summary(fun.y = "mean", geom = "text", label="------", size= 6, color= "red") +
#geom_dotplot(binaxis='y', stackdir='center', dotsize=0.4)+
#stat_summary(geom = "errorbar", fun.data = mean_se, position = "dodge")
scale_colour_discrete(name ="ESC1") +
labs(title = "GO_RIBOSOME", x="ESC1", y="slope") +
#coord_fixed(0.2) +
geom_hline(yintercept=0, linetype="dashed",
color = "blue", size=0.5) +
theme(
plot.title = element_text(lineheight=.8, size=15,face="bold",hjust = 0.5))
ggsave("./figures/1225/GO_RIBOSOME_ESC1_slope.png", width = 2, height = 4)
#----------------esc2--------------------
mouse_GO_RIBOSOME_exp4_slope <- slope_func(mouse_GO_RIBOSOME_exp4)
mouse_GO_RIBOSOME_exp4_slope <- as.data.frame(mouse_GO_RIBOSOME_exp4_slope)
library(ggplot2)
library(reshape2)
require(gridExtra)
ggplot(mouse_GO_RIBOSOME_exp4_slope,aes(
x="",
y=mouse_GO_RIBOSOME_exp4_slope
#color=as.factor(mouse_GO_RIBOSOME_exp4.m$variable)
)) +
geom_violin(fill = "grey80",colour = "#3366FF") +
#scale_fill_brewer(palette="Dark2")
#scale_color_manual(values=c("#999999", "#E69F00", "#56B4E9")) +
#geom_jitter(height = 0, width = 0.1) +
geom_boxplot(width=0.3) +
stat_summary(fun.y = "mean", geom = "text", label="------", size= 6, color= "red") +
#geom_dotplot(binaxis='y', stackdir='center', dotsize=0.4)+
#stat_summary(geom = "errorbar", fun.data = mean_se, position = "dodge")
scale_colour_discrete(name ="ESC2") +
labs(title = "GO_RIBOSOME", x="ESC2", y="slope") +
#coord_fixed(0.2) +
geom_hline(yintercept=0, linetype="dashed",
color = "blue", size=0.5) +
theme(
plot.title = element_text(lineheight=.8, size=15,face="bold",hjust = 0.5))
ggsave("./figures/1225/GO_RIBOSOME_ESC2_slope.png", width = 2, height = 4)
#response to dna damage---------------------------
RESPONSE_TO_DNA_DAMAGE_STIMULUS <- read.table("../GO_data/GO0006974RESPONSE_TO_DNA_DAMAGE_STIMULUS.txt",header = T, sep="\t")
#有一些NA值,证明一些human_RESPONSE_TO_DNA_DAMAGE_STIMULUS_gene 没有对应的老鼠基因
mouse_RESPONSE_TO_DNA_DAMAGE_STIMULUS_gid <- human_gene$mouse_ensembl_gene_id[match(RESPONSE_TO_DNA_DAMAGE_STIMULUS[2:nrow(RESPONSE_TO_DNA_DAMAGE_STIMULUS),],human_gene$human_external_gene_name)]
mouse_RESPONSE_TO_DNA_DAMAGE_STIMULUS_oid <- as.character(annotLookup2$original_id[match(mouse_RESPONSE_TO_DNA_DAMAGE_STIMULUS_gid,annotLookup2$ensembl_gene_id)])
#载入fib1
temp=match(mouse_RESPONSE_TO_DNA_DAMAGE_STIMULUS_oid,row.names(fib1))
temp=temp[!is.na(temp)]
mouse_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp1 <- fib1[temp,]
#载入fib2
temp=match(mouse_RESPONSE_TO_DNA_DAMAGE_STIMULUS_oid,row.names(fib2))
temp=temp[!is.na(temp)]
mouse_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp2 <- fib2[temp,]
#import esc1
temp=match(mouse_RESPONSE_TO_DNA_DAMAGE_STIMULUS_oid,row.names(esc1))
temp=temp[!is.na(temp)]
mouse_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp3 <- esc1[temp,]
#载入esc2
temp=match(mouse_RESPONSE_TO_DNA_DAMAGE_STIMULUS_oid,row.names(esc2))
temp=temp[!is.na(temp)]
mouse_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp4 <- esc2[temp,]
#log2(fast/slow)------------------------------------
#fib1
#除去0,都加0.1
#mouse_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp1_slope <- mouse_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp1+0.1
#mouse_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp1_f2s <- log(mouse_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp1$FIB_fast_TPM/mouse_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp1$FIB_slow_TPM,2)
mouse_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp1_slope <- slope_func(mouse_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp1)
mouse_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp1_slope <- as.data.frame(mouse_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp1_slope)
library(ggplot2)
library(reshape2)
require(gridExtra)
ggplot(mouse_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp1_slope,aes(
x="",
y=mouse_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp1_slope
#color=as.factor(mouse_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp1.m$variable)
)) +
geom_violin(fill = "grey80",colour = "#3366FF") +
#scale_fill_brewer(palette="Dark2")
#scale_color_manual(values=c("#999999", "#E69F00", "#56B4E9")) +
#geom_jitter(height = 0, width = 0.1) +
geom_boxplot(width=0.3) +
stat_summary(fun.y = "mean", geom = "text", label="------", size= 6, color= "red") +
#geom_dotplot(binaxis='y', stackdir='center', dotsize=0.4)+
#stat_summary(geom = "errorbar", fun.data = mean_se, position = "dodge")
scale_colour_discrete(name ="FIB1") +
labs(title = "RESPONSE_TO_DNA_DAMAGE_STIMULUS", x="FIB1", y="slope") +
#coord_fixed(0.2) +
geom_hline(yintercept=0, linetype="dashed",
color = "blue", size=0.5) +
theme(
plot.title = element_text(lineheight=.8, size=15,face="bold",hjust = 0.5))
ggsave("./figures/1225/RESPONSE_TO_DNA_DAMAGE_STIMULUS_fib1_slope.png", width = 2, height = 4)
#FIB2------------------------------------
mouse_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp2_slope <- slope_func(mouse_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp2)
mouse_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp2_slope <- as.data.frame(mouse_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp2_slope)
library(ggplot2)
library(reshape2)
require(gridExtra)
ggplot(mouse_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp2_slope,aes(
x="",
y=mouse_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp2_slope
#color=as.factor(mouse_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp2.m$variable)
)) +
geom_violin(fill = "grey80",colour = "#3366FF") +
#scale_fill_brewer(palette="Dark2")
#scale_color_manual(values=c("#999999", "#E69F00", "#56B4E9")) +
#geom_jitter(height = 0, width = 0.1) +
geom_boxplot(width=0.3) +
stat_summary(fun.y = "mean", geom = "text", label="------", size= 6, color= "red") +
#geom_dotplot(binaxis='y', stackdir='center', dotsize=0.4)+
#stat_summary(geom = "errorbar", fun.data = mean_se, position = "dodge")
scale_colour_discrete(name ="FIB2") +
labs(title = "RESPONSE_TO_DNA_DAMAGE_STIMULUS", x="FIB2", y="slope") +
#coord_fixed(0.2) +
geom_hline(yintercept=0, linetype="dashed",
color = "blue", size=0.5) +
theme(
plot.title = element_text(lineheight=.8, size=15,face="bold",hjust = 0.5))
ggsave("./figures/1225/RESPONSE_TO_DNA_DAMAGE_STIMULUS_FIB2_slope.png", width = 2, height = 4)
#----------esc1----------------------
mouse_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp3_slope <- slope_func(mouse_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp3)
mouse_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp3_slope <- as.data.frame(mouse_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp3_slope)
library(ggplot2)
library(reshape2)
require(gridExtra)
ggplot(mouse_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp3_slope,aes(
x="",
y=mouse_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp3_slope
#color=as.factor(mouse_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp3.m$variable)
)) +
geom_violin(fill = "grey80",colour = "#3366FF") +
#scale_fill_brewer(palette="Dark2")
#scale_color_manual(values=c("#999999", "#E69F00", "#56B4E9")) +
#geom_jitter(height = 0, width = 0.1) +
geom_boxplot(width=0.3) +
stat_summary(fun.y = "mean", geom = "text", label="------", size= 6, color= "red") +
#geom_dotplot(binaxis='y', stackdir='center', dotsize=0.4)+
#stat_summary(geom = "errorbar", fun.data = mean_se, position = "dodge")
scale_colour_discrete(name ="ESC1") +
labs(title = "RESPONSE_TO_DNA_DAMAGE_STIMULUS", x="ESC1", y="slope") +
#coord_fixed(0.2) +
geom_hline(yintercept=0, linetype="dashed",
color = "blue", size=0.5) +
theme(
plot.title = element_text(lineheight=.8, size=15,face="bold",hjust = 0.5))
ggsave("./figures/1225/RESPONSE_TO_DNA_DAMAGE_STIMULUS_ESC1_slope.png", width = 2, height = 4)
#----------------esc2--------------------
mouse_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp4_slope <- slope_func(mouse_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp4)
mouse_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp4_slope <- as.data.frame(mouse_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp4_slope)
library(ggplot2)
library(reshape2)
require(gridExtra)
ggplot(mouse_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp4_slope,aes(
x="",
y=mouse_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp4_slope
#color=as.factor(mouse_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp4.m$variable)
)) +
geom_violin(fill = "grey80",colour = "#3366FF") +
#scale_fill_brewer(palette="Dark2")
#scale_color_manual(values=c("#999999", "#E69F00", "#56B4E9")) +
#geom_jitter(height = 0, width = 0.1) +
geom_boxplot(width=0.3) +
stat_summary(fun.y = "mean", geom = "text", label="------", size= 6, color= "red") +
#geom_dotplot(binaxis='y', stackdir='center', dotsize=0.4)+
#stat_summary(geom = "errorbar", fun.data = mean_se, position = "dodge")
scale_colour_discrete(name ="ESC2") +
labs(title = "RESPONSE_TO_DNA_DAMAGE_STIMULUS", x="ESC2", y="slope") +
#coord_fixed(0.2) +
geom_hline(yintercept=0, linetype="dashed",
color = "blue", size=0.5) +
theme(
plot.title = element_text(lineheight=.8, size=15,face="bold",hjust = 0.5))
ggsave("./figures/1225/RESPONSE_TO_DNA_DAMAGE_STIMULUS_ESC2_slope.png", width = 2, height = 4)
#-----------REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS--------------
REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS <- read.table("../GO_data/GO_2001020GO_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS.txt",header = T, sep="\t")
#有一些NA值,证明一些human_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_gene 没有对应的老鼠基因
mouse_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_gid <- human_gene$mouse_ensembl_gene_id[match(REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS[2:nrow(REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS),],human_gene$human_external_gene_name)]
mouse_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_oid <- as.character(annotLookup2$original_id[match(mouse_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_gid,annotLookup2$ensembl_gene_id)])
#载入fib1
temp=match(mouse_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_oid,row.names(fib1))
temp=temp[!is.na(temp)]
mouse_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp1 <- fib1[temp,]
#载入fib2
temp=match(mouse_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_oid,row.names(fib2))
temp=temp[!is.na(temp)]
mouse_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp2 <- fib2[temp,]
#import esc1
temp=match(mouse_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_oid,row.names(esc1))
temp=temp[!is.na(temp)]
mouse_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp3 <- esc1[temp,]
#载入esc2
temp=match(mouse_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_oid,row.names(esc2))
temp=temp[!is.na(temp)]
mouse_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp4 <- esc2[temp,]
#log2(fast/slow)------------------------------------
#fib1
#除去0,都加0.1
#mouse_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp1_slope <- mouse_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp1+0.1
#mouse_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp1_f2s <- log(mouse_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp1$FIB_fast_TPM/mouse_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp1$FIB_slow_TPM,2)
mouse_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp1_slope <- slope_func(mouse_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp1)
mouse_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp1_slope <- as.data.frame(mouse_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp1_slope)
library(ggplot2)
library(reshape2)
require(gridExtra)
ggplot(mouse_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp1_slope,aes(
x="",
y=mouse_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp1_slope
#color=as.factor(mouse_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp1.m$variable)
)) +
geom_violin(fill = "grey80",colour = "#3366FF") +
#scale_fill_brewer(palette="Dark2")
#scale_color_manual(values=c("#999999", "#E69F00", "#56B4E9")) +
#geom_jitter(height = 0, width = 0.1) +
geom_boxplot(width=0.3) +
stat_summary(fun.y = "mean", geom = "text", label="------", size= 6, color= "red") +
#geom_dotplot(binaxis='y', stackdir='center', dotsize=0.4)+
#stat_summary(geom = "errorbar", fun.data = mean_se, position = "dodge")
scale_colour_discrete(name ="FIB1") +
labs(title = "REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS", x="FIB1", y="slope") +
#coord_fixed(0.2) +
geom_hline(yintercept=0, linetype="dashed",
color = "blue", size=0.5) +
theme(
plot.title = element_text(lineheight=.8, size=15,face="bold",hjust = 0.5))
ggsave("./figures/1225/REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_fib1_slope.png", width = 2, height = 4)
#FIB2------------------------------------
mouse_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp2_slope <- slope_func(mouse_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp2)
mouse_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp2_slope <- as.data.frame(mouse_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp2_slope)
library(ggplot2)
library(reshape2)
require(gridExtra)
ggplot(mouse_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp2_slope,aes(
x="",
y=mouse_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp2_slope
#color=as.factor(mouse_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp2.m$variable)
)) +
geom_violin(fill = "grey80",colour = "#3366FF") +
#scale_fill_brewer(palette="Dark2")
#scale_color_manual(values=c("#999999", "#E69F00", "#56B4E9")) +
#geom_jitter(height = 0, width = 0.1) +
geom_boxplot(width=0.3) +
stat_summary(fun.y = "mean", geom = "text", label="------", size= 6, color= "red") +
#geom_dotplot(binaxis='y', stackdir='center', dotsize=0.4)+
#stat_summary(geom = "errorbar", fun.data = mean_se, position = "dodge")
scale_colour_discrete(name ="FIB2") +
labs(title = "REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS", x="FIB2", y="slope") +
#coord_fixed(0.2) +
geom_hline(yintercept=0, linetype="dashed",
color = "blue", size=0.5) +
theme(
plot.title = element_text(lineheight=.8, size=15,face="bold",hjust = 0.5))
ggsave("./figures/1225/REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_FIB2_slope.png", width = 2, height = 4)
#----------esc1----------------------
mouse_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp3_slope <- slope_func(mouse_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp3)
mouse_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp3_slope <- as.data.frame(mouse_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp3_slope)
library(ggplot2)
library(reshape2)
require(gridExtra)
ggplot(mouse_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp3_slope,aes(
x="",
y=mouse_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp3_slope
#color=as.factor(mouse_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp3.m$variable)
)) +
geom_violin(fill = "grey80",colour = "#3366FF") +
#scale_fill_brewer(palette="Dark2")
#scale_color_manual(values=c("#999999", "#E69F00", "#56B4E9")) +
#geom_jitter(height = 0, width = 0.1) +
geom_boxplot(width=0.3) +
stat_summary(fun.y = "mean", geom = "text", label="------", size= 6, color= "red") +
#geom_dotplot(binaxis='y', stackdir='center', dotsize=0.4)+
#stat_summary(geom = "errorbar", fun.data = mean_se, position = "dodge")
scale_colour_discrete(name ="ESC1") +
labs(title = "REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS", x="ESC1", y="slope") +
#coord_fixed(0.2) +
geom_hline(yintercept=0, linetype="dashed",
color = "blue", size=0.5) +
theme(
plot.title = element_text(lineheight=.8, size=15,face="bold",hjust = 0.5))
ggsave("./figures/1225/REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_ESC1_slope.png", width = 2, height = 4)
#----------------esc2--------------------
mouse_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp4_slope <- slope_func(mouse_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp4)
mouse_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp4_slope <- as.data.frame(mouse_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp4_slope)
library(ggplot2)
library(reshape2)
require(gridExtra)
ggplot(mouse_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp4_slope,aes(
x="",
y=mouse_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp4_slope
#color=as.factor(mouse_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp4.m$variable)
)) +
geom_violin(fill = "grey80",colour = "#3366FF") +
#scale_fill_brewer(palette="Dark2")
#scale_color_manual(values=c("#999999", "#E69F00", "#56B4E9")) +
#geom_jitter(height = 0, width = 0.1) +
geom_boxplot(width=0.3) +
stat_summary(fun.y = "mean", geom = "text", label="------", size= 6, color= "red") +
#geom_dotplot(binaxis='y', stackdir='center', dotsize=0.4)+
#stat_summary(geom = "errorbar", fun.data = mean_se, position = "dodge")
scale_colour_discrete(name ="ESC2") +
labs(title = "REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS", x="ESC2", y="slope") +
#coord_fixed(0.2) +
geom_hline(yintercept=0, linetype="dashed",
color = "blue", size=0.5) +
theme(
plot.title = element_text(lineheight=.8, size=15,face="bold",hjust = 0.5))
ggsave("./figures/1225/REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_ESC2_slope.png", width = 2, height = 4)
#PLOT FIB1+FIB2
#grid.arrange(FIB1, FIB2, ncol=2)
#------new go gene sets----------------------
#------------------REGULATION_OF_DNA_DAMAGE_CHECKPOINT--------------------
REGULATION_OF_DNA_DAMAGE_CHECKPOINT <- read.table("../GO_data/GO_REGULATION_OF_DNA_DAMAGE_CHECKPOINT.txt",header = T, sep="\t")
#有一些NA值,证明一些human_REGULATION_OF_DNA_DAMAGE_CHECKPOINT_gene 没有对应的老鼠基因
mouse_REGULATION_OF_DNA_DAMAGE_CHECKPOINT_gid <- human_gene$mouse_ensembl_gene_id[match(REGULATION_OF_DNA_DAMAGE_CHECKPOINT[2:nrow(REGULATION_OF_DNA_DAMAGE_CHECKPOINT),],human_gene$human_external_gene_name)]
mouse_REGULATION_OF_DNA_DAMAGE_CHECKPOINT_oid <- as.character(annotLookup2$original_id[match(mouse_REGULATION_OF_DNA_DAMAGE_CHECKPOINT_gid,annotLookup2$ensembl_gene_id)])
#载入fib1
temp=match(mouse_REGULATION_OF_DNA_DAMAGE_CHECKPOINT_oid,row.names(fib1))
temp=temp[!is.na(temp)]
mouse_REGULATION_OF_DNA_DAMAGE_CHECKPOINT_exp1 <- fib1[temp,]
#载入fib2
temp=match(mouse_REGULATION_OF_DNA_DAMAGE_CHECKPOINT_oid,row.names(fib2))
temp=temp[!is.na(temp)]
mouse_REGULATION_OF_DNA_DAMAGE_CHECKPOINT_exp2 <- fib2[temp,]
#import esc1
temp=match(mouse_REGULATION_OF_DNA_DAMAGE_CHECKPOINT_oid,row.names(esc1))
temp=temp[!is.na(temp)]
mouse_REGULATION_OF_DNA_DAMAGE_CHECKPOINT_exp3 <- esc1[temp,]
#载入esc2
temp=match(mouse_REGULATION_OF_DNA_DAMAGE_CHECKPOINT_oid,row.names(esc2))
temp=temp[!is.na(temp)]
mouse_REGULATION_OF_DNA_DAMAGE_CHECKPOINT_exp4 <- esc2[temp,]
#fib1
#除去0,都加0.1
#mouse_REGULATION_OF_DNA_DAMAGE_CHECKPOINT_exp1_slope <- mouse_REGULATION_OF_DNA_DAMAGE_CHECKPOINT_exp1+0.1
#mouse_REGULATION_OF_DNA_DAMAGE_CHECKPOINT_exp1_f2s <- log(mouse_REGULATION_OF_DNA_DAMAGE_CHECKPOINT_exp1$FIB_fast_TPM/mouse_REGULATION_OF_DNA_DAMAGE_CHECKPOINT_exp1$FIB_slow_TPM,2)
mouse_REGULATION_OF_DNA_DAMAGE_CHECKPOINT_exp1_slope <- slope_func(mouse_REGULATION_OF_DNA_DAMAGE_CHECKPOINT_exp1)
mouse_REGULATION_OF_DNA_DAMAGE_CHECKPOINT_exp1_slope <- as.data.frame(mouse_REGULATION_OF_DNA_DAMAGE_CHECKPOINT_exp1_slope)
library(ggplot2)
library(reshape2)
require(gridExtra)
ggplot(mouse_REGULATION_OF_DNA_DAMAGE_CHECKPOINT_exp1_slope,aes(
x="",
y=mouse_REGULATION_OF_DNA_DAMAGE_CHECKPOINT_exp1_slope
#color=as.factor(mouse_REGULATION_OF_DNA_DAMAGE_CHECKPOINT_exp1.m$variable)
)) +
geom_violin(fill = "grey80",colour = "#3366FF") +
#scale_fill_brewer(palette="Dark2")
#scale_color_manual(values=c("#999999", "#E69F00", "#56B4E9")) +
#geom_jitter(height = 0, width = 0.1) +
geom_boxplot(width=0.3) +
stat_summary(fun.y = "mean", geom = "text", label="------", size= 6, color= "red") +
#geom_dotplot(binaxis='y', stackdir='center', dotsize=0.4)+
#stat_summary(geom = "errorbar", fun.data = mean_se, position = "dodge")
scale_colour_discrete(name ="FIB1") +
labs(title = "REGULATION_OF_DNA_DAMAGE_CHECKPOINT", x="FIB1", y="slope") +
#coord_fixed(0.2) +
geom_hline(yintercept=0, linetype="dashed",
color = "blue", size=0.5) +
theme(
plot.title = element_text(lineheight=.8, size=15,face="bold",hjust = 0.5))
ggsave("./figures/1225/REGULATION_OF_DNA_DAMAGE_CHECKPOINT_fib1_slope.png", width = 2, height = 4)
#FIB2------------------------------------
mouse_REGULATION_OF_DNA_DAMAGE_CHECKPOINT_exp2_slope <- slope_func(mouse_REGULATION_OF_DNA_DAMAGE_CHECKPOINT_exp2)
mouse_REGULATION_OF_DNA_DAMAGE_CHECKPOINT_exp2_slope <- as.data.frame(mouse_REGULATION_OF_DNA_DAMAGE_CHECKPOINT_exp2_slope)
library(ggplot2)
library(reshape2)
require(gridExtra)
ggplot(mouse_REGULATION_OF_DNA_DAMAGE_CHECKPOINT_exp2_slope,aes(
x="",
y=mouse_REGULATION_OF_DNA_DAMAGE_CHECKPOINT_exp2_slope
#color=as.factor(mouse_REGULATION_OF_DNA_DAMAGE_CHECKPOINT_exp2.m$variable)
)) +
geom_violin(fill = "grey80",colour = "#3366FF") +
#scale_fill_brewer(palette="Dark2")
#scale_color_manual(values=c("#999999", "#E69F00", "#56B4E9")) +
#geom_jitter(height = 0, width = 0.1) +
geom_boxplot(width=0.3) +
stat_summary(fun.y = "mean", geom = "text", label="------", size= 6, color= "red") +
#geom_dotplot(binaxis='y', stackdir='center', dotsize=0.4)+
#stat_summary(geom = "errorbar", fun.data = mean_se, position = "dodge")
scale_colour_discrete(name ="FIB2") +
labs(title = "REGULATION_OF_DNA_DAMAGE_CHECKPOINT", x="FIB2", y="slope") +
#coord_fixed(0.2) +
geom_hline(yintercept=0, linetype="dashed",
color = "blue", size=0.5) +
theme(
plot.title = element_text(lineheight=.8, size=15,face="bold",hjust = 0.5))
ggsave("./figures/1225/REGULATION_OF_DNA_DAMAGE_CHECKPOINT_FIB2_slope.png", width = 2, height = 4)
#----------esc1----------------------
mouse_REGULATION_OF_DNA_DAMAGE_CHECKPOINT_exp3_slope <- slope_func(mouse_REGULATION_OF_DNA_DAMAGE_CHECKPOINT_exp3)
mouse_REGULATION_OF_DNA_DAMAGE_CHECKPOINT_exp3_slope <- as.data.frame(mouse_REGULATION_OF_DNA_DAMAGE_CHECKPOINT_exp3_slope)
library(ggplot2)
library(reshape2)
require(gridExtra)
ggplot(mouse_REGULATION_OF_DNA_DAMAGE_CHECKPOINT_exp3_slope,aes(
x="",
y=mouse_REGULATION_OF_DNA_DAMAGE_CHECKPOINT_exp3_slope
#color=as.factor(mouse_REGULATION_OF_DNA_DAMAGE_CHECKPOINT_exp3.m$variable)
)) +
geom_violin(fill = "grey80",colour = "#3366FF") +
#scale_fill_brewer(palette="Dark2")
#scale_color_manual(values=c("#999999", "#E69F00", "#56B4E9")) +
#geom_jitter(height = 0, width = 0.1) +
geom_boxplot(width=0.3) +
stat_summary(fun.y = "mean", geom = "text", label="------", size= 6, color= "red") +
#geom_dotplot(binaxis='y', stackdir='center', dotsize=0.4)+
#stat_summary(geom = "errorbar", fun.data = mean_se, position = "dodge")
scale_colour_discrete(name ="ESC1") +
labs(title = "REGULATION_OF_DNA_DAMAGE_CHECKPOINT", x="ESC1", y="slope") +
#coord_fixed(0.2) +
geom_hline(yintercept=0, linetype="dashed",
color = "blue", size=0.5) +
theme(
plot.title = element_text(lineheight=.8, size=15,face="bold",hjust = 0.5))
ggsave("./figures/1225/REGULATION_OF_DNA_DAMAGE_CHECKPOINT_ESC1_slope.png", width = 2, height = 4)
#----------------esc2--------------------
mouse_REGULATION_OF_DNA_DAMAGE_CHECKPOINT_exp4_slope <- slope_func(mouse_REGULATION_OF_DNA_DAMAGE_CHECKPOINT_exp4)
mouse_REGULATION_OF_DNA_DAMAGE_CHECKPOINT_exp4_slope <- as.data.frame(mouse_REGULATION_OF_DNA_DAMAGE_CHECKPOINT_exp4_slope)
library(ggplot2)
library(reshape2)
require(gridExtra)
ggplot(mouse_REGULATION_OF_DNA_DAMAGE_CHECKPOINT_exp4_slope,aes(
x="",
y=mouse_REGULATION_OF_DNA_DAMAGE_CHECKPOINT_exp4_slope
#color=as.factor(mouse_REGULATION_OF_DNA_DAMAGE_CHECKPOINT_exp4.m$variable)
)) +
geom_violin(fill = "grey80",colour = "#3366FF") +
#scale_fill_brewer(palette="Dark2")
#scale_color_manual(values=c("#999999", "#E69F00", "#56B4E9")) +
#geom_jitter(height = 0, width = 0.1) +
geom_boxplot(width=0.3) +
stat_summary(fun.y = "mean", geom = "text", label="------", size= 6, color= "red") +
#geom_dotplot(binaxis='y', stackdir='center', dotsize=0.4)+
#stat_summary(geom = "errorbar", fun.data = mean_se, position = "dodge")
scale_colour_discrete(name ="ESC2") +
labs(title = "REGULATION_OF_DNA_DAMAGE_CHECKPOINT", x="ESC2", y="slope") +
#coord_fixed(0.2) +
geom_hline(yintercept=0, linetype="dashed",
color = "blue", size=0.5) +
theme(
plot.title = element_text(lineheight=.8, size=15,face="bold",hjust = 0.5))
ggsave("./figures/1225/REGULATION_OF_DNA_DAMAGE_CHECKPOINT_ESC2_slope.png", width = 2, height = 4)
#----------HALLMARK_PEROXISOME----------------------------
HALLMARK_PEROXISOME <- read.table("../GO_data/HALLMARK_PEROXISOME.txt",header = T, sep="\t")
#有一些NA值,证明一些human_HALLMARK_PEROXISOME_gene 没有对应的老鼠基因
mouse_HALLMARK_PEROXISOME_gid <- human_gene$mouse_ensembl_gene_id[match(HALLMARK_PEROXISOME[2:nrow(HALLMARK_PEROXISOME),],human_gene$human_external_gene_name)]
mouse_HALLMARK_PEROXISOME_oid <- as.character(annotLookup2$original_id[match(mouse_HALLMARK_PEROXISOME_gid,annotLookup2$ensembl_gene_id)])
#载入fib1
temp=match(mouse_HALLMARK_PEROXISOME_oid,row.names(fib1))
temp=temp[!is.na(temp)]
mouse_HALLMARK_PEROXISOME_exp1 <- fib1[temp,]
#载入fib2
temp=match(mouse_HALLMARK_PEROXISOME_oid,row.names(fib2))
temp=temp[!is.na(temp)]
mouse_HALLMARK_PEROXISOME_exp2 <- fib2[temp,]
#import esc1
temp=match(mouse_HALLMARK_PEROXISOME_oid,row.names(esc1))
temp=temp[!is.na(temp)]
mouse_HALLMARK_PEROXISOME_exp3 <- esc1[temp,]
#载入esc2
temp=match(mouse_HALLMARK_PEROXISOME_oid,row.names(esc2))
temp=temp[!is.na(temp)]
mouse_HALLMARK_PEROXISOME_exp4 <- esc2[temp,]
#fib1
#除去0,都加0.1
#mouse_HALLMARK_PEROXISOME_exp1_slope <- mouse_HALLMARK_PEROXISOME_exp1+0.1
#mouse_HALLMARK_PEROXISOME_exp1_f2s <- log(mouse_HALLMARK_PEROXISOME_exp1$FIB_fast_TPM/mouse_HALLMARK_PEROXISOME_exp1$FIB_slow_TPM,2)
mouse_HALLMARK_PEROXISOME_exp1_slope <- slope_func(mouse_HALLMARK_PEROXISOME_exp1)
mouse_HALLMARK_PEROXISOME_exp1_slope <- as.data.frame(mouse_HALLMARK_PEROXISOME_exp1_slope)
library(ggplot2)
library(reshape2)
require(gridExtra)
ggplot(mouse_HALLMARK_PEROXISOME_exp1_slope,aes(
x="",
y=mouse_HALLMARK_PEROXISOME_exp1_slope
#color=as.factor(mouse_HALLMARK_PEROXISOME_exp1.m$variable)
)) +
geom_violin(fill = "grey80",colour = "#3366FF") +
#scale_fill_brewer(palette="Dark2")
#scale_color_manual(values=c("#999999", "#E69F00", "#56B4E9")) +
#geom_jitter(height = 0, width = 0.1) +
geom_boxplot(width=0.3) +
stat_summary(fun.y = "mean", geom = "text", label="------", size= 6, color= "red") +
#geom_dotplot(binaxis='y', stackdir='center', dotsize=0.4)+
#stat_summary(geom = "errorbar", fun.data = mean_se, position = "dodge")
scale_colour_discrete(name ="FIB1") +
labs(title = "HALLMARK_PEROXISOME", x="FIB1", y="slope") +
#coord_fixed(0.2) +
geom_hline(yintercept=0, linetype="dashed",
color = "blue", size=0.5) +
theme(
plot.title = element_text(lineheight=.8, size=15,face="bold",hjust = 0.5))
ggsave("./figures/1225/HALLMARK_PEROXISOME_fib1_slope.png", width = 2, height = 4)
#FIB2------------------------------------
mouse_HALLMARK_PEROXISOME_exp2_slope <- slope_func(mouse_HALLMARK_PEROXISOME_exp2)
mouse_HALLMARK_PEROXISOME_exp2_slope <- as.data.frame(mouse_HALLMARK_PEROXISOME_exp2_slope)
library(ggplot2)
library(reshape2)
require(gridExtra)
ggplot(mouse_HALLMARK_PEROXISOME_exp2_slope,aes(
x="",
y=mouse_HALLMARK_PEROXISOME_exp2_slope
#color=as.factor(mouse_HALLMARK_PEROXISOME_exp2.m$variable)
)) +
geom_violin(fill = "grey80",colour = "#3366FF") +
#scale_fill_brewer(palette="Dark2")
#scale_color_manual(values=c("#999999", "#E69F00", "#56B4E9")) +
#geom_jitter(height = 0, width = 0.1) +
geom_boxplot(width=0.3) +
stat_summary(fun.y = "mean", geom = "text", label="------", size= 6, color= "red") +
#geom_dotplot(binaxis='y', stackdir='center', dotsize=0.4)+
#stat_summary(geom = "errorbar", fun.data = mean_se, position = "dodge")
scale_colour_discrete(name ="FIB2") +
labs(title = "HALLMARK_PEROXISOME", x="FIB2", y="slope") +
#coord_fixed(0.2) +
geom_hline(yintercept=0, linetype="dashed",
color = "blue", size=0.5) +
theme(
plot.title = element_text(lineheight=.8, size=15,face="bold",hjust = 0.5))
ggsave("./figures/1225/HALLMARK_PEROXISOME_FIB2_slope.png", width = 2, height = 4)
#----------esc1----------------------
mouse_HALLMARK_PEROXISOME_exp3_slope <- slope_func(mouse_HALLMARK_PEROXISOME_exp3)
mouse_HALLMARK_PEROXISOME_exp3_slope <- as.data.frame(mouse_HALLMARK_PEROXISOME_exp3_slope)
library(ggplot2)
library(reshape2)
require(gridExtra)
ggplot(mouse_HALLMARK_PEROXISOME_exp3_slope,aes(
x="",
y=mouse_HALLMARK_PEROXISOME_exp3_slope
#color=as.factor(mouse_HALLMARK_PEROXISOME_exp3.m$variable)
)) +
geom_violin(fill = "grey80",colour = "#3366FF") +
#scale_fill_brewer(palette="Dark2")
#scale_color_manual(values=c("#999999", "#E69F00", "#56B4E9")) +
#geom_jitter(height = 0, width = 0.1) +
geom_boxplot(width=0.3) +
stat_summary(fun.y = "mean", geom = "text", label="------", size= 6, color= "red") +
#geom_dotplot(binaxis='y', stackdir='center', dotsize=0.4)+
#stat_summary(geom = "errorbar", fun.data = mean_se, position = "dodge")
scale_colour_discrete(name ="ESC1") +
labs(title = "HALLMARK_PEROXISOME", x="ESC1", y="slope") +
#coord_fixed(0.2) +
geom_hline(yintercept=0, linetype="dashed",
color = "blue", size=0.5) +
theme(
plot.title = element_text(lineheight=.8, size=15,face="bold",hjust = 0.5))
ggsave("./figures/1225/HALLMARK_PEROXISOME_ESC1_slope.png", width = 2, height = 4)
#----------------esc2--------------------
mouse_HALLMARK_PEROXISOME_exp4_slope <- slope_func(mouse_HALLMARK_PEROXISOME_exp4)
mouse_HALLMARK_PEROXISOME_exp4_slope <- as.data.frame(mouse_HALLMARK_PEROXISOME_exp4_slope)
library(ggplot2)
library(reshape2)
require(gridExtra)
ggplot(mouse_HALLMARK_PEROXISOME_exp4_slope,aes(
x="",
y=mouse_HALLMARK_PEROXISOME_exp4_slope
#color=as.factor(mouse_HALLMARK_PEROXISOME_exp4.m$variable)
)) +
geom_violin(fill = "grey80",colour = "#3366FF") +
#scale_fill_brewer(palette="Dark2")
#scale_color_manual(values=c("#999999", "#E69F00", "#56B4E9")) +
#geom_jitter(height = 0, width = 0.1) +
geom_boxplot(width=0.3) +
stat_summary(fun.y = "mean", geom = "text", label="------", size= 6, color= "red") +
#geom_dotplot(binaxis='y', stackdir='center', dotsize=0.4)+
#stat_summary(geom = "errorbar", fun.data = mean_se, position = "dodge")
scale_colour_discrete(name ="ESC2") +
labs(title = "HALLMARK_PEROXISOME", x="ESC2", y="slope") +
#coord_fixed(0.2) +
geom_hline(yintercept=0, linetype="dashed",
color = "blue", size=0.5) +
theme(
plot.title = element_text(lineheight=.8, size=15,face="bold",hjust = 0.5))
ggsave("./figures/1225/HALLMARK_PEROXISOME_ESC2_slope.png", width = 2, height = 4)
#----------HALLMARK_P53_PATHWAY----------------------------
HALLMARK_P53_PATHWAY <- read.table("../GO_data/HALLMARK_P53_PATHWAY.txt",header = T, sep="\t")
#有一些NA值,证明一些human_HALLMARK_P53_PATHWAY_gene 没有对应的老鼠基因
mouse_HALLMARK_P53_PATHWAY_gid <- human_gene$mouse_ensembl_gene_id[match(HALLMARK_P53_PATHWAY[2:nrow(HALLMARK_P53_PATHWAY),],human_gene$human_external_gene_name)]
mouse_HALLMARK_P53_PATHWAY_oid <- as.character(annotLookup2$original_id[match(mouse_HALLMARK_P53_PATHWAY_gid,annotLookup2$ensembl_gene_id)])
#载入fib1
temp=match(mouse_HALLMARK_P53_PATHWAY_oid,row.names(fib1))
temp=temp[!is.na(temp)]
mouse_HALLMARK_P53_PATHWAY_exp1 <- fib1[temp,]
#载入fib2
temp=match(mouse_HALLMARK_P53_PATHWAY_oid,row.names(fib2))
temp=temp[!is.na(temp)]
mouse_HALLMARK_P53_PATHWAY_exp2 <- fib2[temp,]
#import esc1
temp=match(mouse_HALLMARK_P53_PATHWAY_oid,row.names(esc1))
temp=temp[!is.na(temp)]
mouse_HALLMARK_P53_PATHWAY_exp3 <- esc1[temp,]
#载入esc2
temp=match(mouse_HALLMARK_P53_PATHWAY_oid,row.names(esc2))
temp=temp[!is.na(temp)]
mouse_HALLMARK_P53_PATHWAY_exp4 <- esc2[temp,]
#fib1
#除去0,都加0.1
#mouse_HALLMARK_P53_PATHWAY_exp1_slope <- mouse_HALLMARK_P53_PATHWAY_exp1+0.1
#mouse_HALLMARK_P53_PATHWAY_exp1_f2s <- log(mouse_HALLMARK_P53_PATHWAY_exp1$FIB_fast_TPM/mouse_HALLMARK_P53_PATHWAY_exp1$FIB_slow_TPM,2)
mouse_HALLMARK_P53_PATHWAY_exp1_slope <- slope_func(mouse_HALLMARK_P53_PATHWAY_exp1)
mouse_HALLMARK_P53_PATHWAY_exp1_slope <- as.data.frame(mouse_HALLMARK_P53_PATHWAY_exp1_slope)
library(ggplot2)
library(reshape2)
require(gridExtra)
ggplot(mouse_HALLMARK_P53_PATHWAY_exp1_slope,aes(
x="",
y=mouse_HALLMARK_P53_PATHWAY_exp1_slope
#color=as.factor(mouse_HALLMARK_P53_PATHWAY_exp1.m$variable)
)) +
geom_violin(fill = "grey80",colour = "#3366FF") +
#scale_fill_brewer(palette="Dark2")
#scale_color_manual(values=c("#999999", "#E69F00", "#56B4E9")) +
#geom_jitter(height = 0, width = 0.1) +
geom_boxplot(width=0.3) +
stat_summary(fun.y = "mean", geom = "text", label="------", size= 6, color= "red") +
#geom_dotplot(binaxis='y', stackdir='center', dotsize=0.4)+
#stat_summary(geom = "errorbar", fun.data = mean_se, position = "dodge")
scale_colour_discrete(name ="FIB1") +
labs(title = "HALLMARK_P53_PATHWAY", x="FIB1", y="slope") +
#coord_fixed(0.2) +
geom_hline(yintercept=0, linetype="dashed",
color = "blue", size=0.5) +
theme(
plot.title = element_text(lineheight=.8, size=15,face="bold",hjust = 0.5))
ggsave("./figures/1225/HALLMARK_P53_PATHWAY_fib1_slope.png", width = 2, height = 4)
#FIB2------------------------------------
mouse_HALLMARK_P53_PATHWAY_exp2_slope <- slope_func(mouse_HALLMARK_P53_PATHWAY_exp2)
mouse_HALLMARK_P53_PATHWAY_exp2_slope <- as.data.frame(mouse_HALLMARK_P53_PATHWAY_exp2_slope)
library(ggplot2)
library(reshape2)
require(gridExtra)
ggplot(mouse_HALLMARK_P53_PATHWAY_exp2_slope,aes(
x="",
y=mouse_HALLMARK_P53_PATHWAY_exp2_slope
#color=as.factor(mouse_HALLMARK_P53_PATHWAY_exp2.m$variable)
)) +
geom_violin(fill = "grey80",colour = "#3366FF") +
#scale_fill_brewer(palette="Dark2")
#scale_color_manual(values=c("#999999", "#E69F00", "#56B4E9")) +
#geom_jitter(height = 0, width = 0.1) +
geom_boxplot(width=0.3) +
stat_summary(fun.y = "mean", geom = "text", label="------", size= 6, color= "red") +
#geom_dotplot(binaxis='y', stackdir='center', dotsize=0.4)+
#stat_summary(geom = "errorbar", fun.data = mean_se, position = "dodge")
scale_colour_discrete(name ="FIB2") +
labs(title = "HALLMARK_P53_PATHWAY", x="FIB2", y="slope") +
#coord_fixed(0.2) +
geom_hline(yintercept=0, linetype="dashed",
color = "blue", size=0.5) +
theme(
plot.title = element_text(lineheight=.8, size=15,face="bold",hjust = 0.5))
ggsave("./figures/1225/HALLMARK_P53_PATHWAY_FIB2_slope.png", width = 2, height = 4)
#----------esc1----------------------
mouse_HALLMARK_P53_PATHWAY_exp3_slope <- slope_func(mouse_HALLMARK_P53_PATHWAY_exp3)
mouse_HALLMARK_P53_PATHWAY_exp3_slope <- as.data.frame(mouse_HALLMARK_P53_PATHWAY_exp3_slope)
library(ggplot2)
library(reshape2)
require(gridExtra)
ggplot(mouse_HALLMARK_P53_PATHWAY_exp3_slope,aes(
x="",
y=mouse_HALLMARK_P53_PATHWAY_exp3_slope
#color=as.factor(mouse_HALLMARK_P53_PATHWAY_exp3.m$variable)
)) +
geom_violin(fill = "grey80",colour = "#3366FF") +
#scale_fill_brewer(palette="Dark2")
#scale_color_manual(values=c("#999999", "#E69F00", "#56B4E9")) +
#geom_jitter(height = 0, width = 0.1) +
geom_boxplot(width=0.3) +
stat_summary(fun.y = "mean", geom = "text", label="------", size= 6, color= "red") +
#geom_dotplot(binaxis='y', stackdir='center', dotsize=0.4)+
#stat_summary(geom = "errorbar", fun.data = mean_se, position = "dodge")
scale_colour_discrete(name ="ESC1") +
labs(title = "HALLMARK_P53_PATHWAY", x="ESC1", y="slope") +
#coord_fixed(0.2) +
geom_hline(yintercept=0, linetype="dashed",
color = "blue", size=0.5) +
theme(
plot.title = element_text(lineheight=.8, size=15,face="bold",hjust = 0.5))
ggsave("./figures/1225/HALLMARK_P53_PATHWAY_ESC1_slope.png", width = 2, height = 4)
#----------------esc2--------------------
mouse_HALLMARK_P53_PATHWAY_exp4_slope <- slope_func(mouse_HALLMARK_P53_PATHWAY_exp4)
mouse_HALLMARK_P53_PATHWAY_exp4_slope <- as.data.frame(mouse_HALLMARK_P53_PATHWAY_exp4_slope)
library(ggplot2)
library(reshape2)
require(gridExtra)
ggplot(mouse_HALLMARK_P53_PATHWAY_exp4_slope,aes(
x="",
y=mouse_HALLMARK_P53_PATHWAY_exp4_slope
#color=as.factor(mouse_HALLMARK_P53_PATHWAY_exp4.m$variable)
)) +
geom_violin(fill = "grey80",colour = "#3366FF") +
#scale_fill_brewer(palette="Dark2")
#scale_color_manual(values=c("#999999", "#E69F00", "#56B4E9")) +
#geom_jitter(height = 0, width = 0.1) +
geom_boxplot(width=0.3) +
stat_summary(fun.y = "mean", geom = "text", label="------", size= 6, color= "red") +
#geom_dotplot(binaxis='y', stackdir='center', dotsize=0.4)+
#stat_summary(geom = "errorbar", fun.data = mean_se, position = "dodge")
scale_colour_discrete(name ="ESC2") +
labs(title = "HALLMARK_P53_PATHWAY", x="ESC2", y="slope") +
#coord_fixed(0.2) +
geom_hline(yintercept=0, linetype="dashed",
color = "blue", size=0.5) +
theme(
plot.title = element_text(lineheight=.8, size=15,face="bold",hjust = 0.5))
ggsave("./figures/1225/HALLMARK_P53_PATHWAY_ESC2_slope.png", width = 2, height = 4)
#----------RIBOSOME_BIOGENESIS_AND_ASSEMBLY----------------------------
RIBOSOME_BIOGENESIS_AND_ASSEMBLY <- read.table("../GO_data/GO_0042254RIBOSOME_BIOGENESIS_AND_ASSEMBLY.txt",header = T, sep="\t")
#有一些NA值,证明一些human_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_gene 没有对应的老鼠基因
mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_gid <- human_gene$mouse_ensembl_gene_id[match(RIBOSOME_BIOGENESIS_AND_ASSEMBLY[2:nrow(RIBOSOME_BIOGENESIS_AND_ASSEMBLY),],human_gene$human_external_gene_name)]
mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_oid <- as.character(annotLookup2$original_id[match(mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_gid,annotLookup2$ensembl_gene_id)])
#载入fib1
temp=match(mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_oid,row.names(fib1))
temp=temp[!is.na(temp)]
mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_exp1 <- fib1[temp,]
#载入fib2
temp=match(mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_oid,row.names(fib2))
temp=temp[!is.na(temp)]
mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_exp2 <- fib2[temp,]
#import esc1
temp=match(mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_oid,row.names(esc1))
temp=temp[!is.na(temp)]
mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_exp3 <- esc1[temp,]
#载入esc2
temp=match(mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_oid,row.names(esc2))
temp=temp[!is.na(temp)]
mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_exp4 <- esc2[temp,]
#fib1
#除去0,都加0.1
#mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_exp1_slope <- mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_exp1+0.1
#mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_exp1_f2s <- log(mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_exp1$FIB_fast_TPM/mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_exp1$FIB_slow_TPM,2)
mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_exp1_slope <- slope_func(mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_exp1)
mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_exp1_slope <- as.data.frame(mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_exp1_slope)
library(ggplot2)
library(reshape2)
require(gridExtra)
ggplot(mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_exp1_slope,aes(
x="",
y=mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_exp1_slope
#color=as.factor(mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_exp1.m$variable)
)) +
geom_violin(fill = "grey80",colour = "#3366FF") +
#scale_fill_brewer(palette="Dark2")
#scale_color_manual(values=c("#999999", "#E69F00", "#56B4E9")) +
#geom_jitter(height = 0, width = 0.1) +
geom_boxplot(width=0.3) +
stat_summary(fun.y = "mean", geom = "text", label="------", size= 6, color= "red") +
#geom_dotplot(binaxis='y', stackdir='center', dotsize=0.4)+
#stat_summary(geom = "errorbar", fun.data = mean_se, position = "dodge")
scale_colour_discrete(name ="FIB1") +
labs(title = "RIBOSOME_BIOGENESIS_AND_ASSEMBLY", x="FIB1", y="slope") +
#coord_fixed(0.2) +
geom_hline(yintercept=0, linetype="dashed",
color = "blue", size=0.5) +
theme(
plot.title = element_text(lineheight=.8, size=15,face="bold",hjust = 0.5))
ggsave("./figures/1225/RIBOSOME_BIOGENESIS_AND_ASSEMBLY_fib1_slope.png", width = 2, height = 4)
#FIB2------------------------------------
mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_exp2_slope <- slope_func(mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_exp2)
mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_exp2_slope <- as.data.frame(mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_exp2_slope)
library(ggplot2)
library(reshape2)
require(gridExtra)
ggplot(mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_exp2_slope,aes(
x="",
y=mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_exp2_slope
#color=as.factor(mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_exp2.m$variable)
)) +
geom_violin(fill = "grey80",colour = "#3366FF") +
#scale_fill_brewer(palette="Dark2")
#scale_color_manual(values=c("#999999", "#E69F00", "#56B4E9")) +
#geom_jitter(height = 0, width = 0.1) +
geom_boxplot(width=0.3) +
stat_summary(fun.y = "mean", geom = "text", label="------", size= 6, color= "red") +
#geom_dotplot(binaxis='y', stackdir='center', dotsize=0.4)+
#stat_summary(geom = "errorbar", fun.data = mean_se, position = "dodge")
scale_colour_discrete(name ="FIB2") +
labs(title = "RIBOSOME_BIOGENESIS_AND_ASSEMBLY", x="FIB2", y="slope") +
#coord_fixed(0.2) +
geom_hline(yintercept=0, linetype="dashed",
color = "blue", size=0.5) +
theme(
plot.title = element_text(lineheight=.8, size=15,face="bold",hjust = 0.5))
ggsave("./figures/1225/RIBOSOME_BIOGENESIS_AND_ASSEMBLY_FIB2_slope.png", width = 2, height = 4)
#----------esc1----------------------
mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_exp3_slope <- slope_func(mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_exp3)
mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_exp3_slope <- as.data.frame(mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_exp3_slope)
library(ggplot2)
library(reshape2)
require(gridExtra)
ggplot(mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_exp3_slope,aes(
x="",
y=mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_exp3_slope
#color=as.factor(mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_exp3.m$variable)
)) +
geom_violin(fill = "grey80",colour = "#3366FF") +
#scale_fill_brewer(palette="Dark2")
#scale_color_manual(values=c("#999999", "#E69F00", "#56B4E9")) +
#geom_jitter(height = 0, width = 0.1) +
geom_boxplot(width=0.3) +
stat_summary(fun.y = "mean", geom = "text", label="------", size= 6, color= "red") +
#geom_dotplot(binaxis='y', stackdir='center', dotsize=0.4)+
#stat_summary(geom = "errorbar", fun.data = mean_se, position = "dodge")
scale_colour_discrete(name ="ESC1") +
labs(title = "RIBOSOME_BIOGENESIS_AND_ASSEMBLY", x="ESC1", y="slope") +
#coord_fixed(0.2) +
geom_hline(yintercept=0, linetype="dashed",
color = "blue", size=0.5) +
theme(
plot.title = element_text(lineheight=.8, size=15,face="bold",hjust = 0.5))
ggsave("./figures/1225/RIBOSOME_BIOGENESIS_AND_ASSEMBLY_ESC1_slope.png", width = 2, height = 4)
#----------------esc2--------------------
mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_exp4_slope <- slope_func(mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_exp4)
mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_exp4_slope <- as.data.frame(mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_exp4_slope)
library(ggplot2)
library(reshape2)
require(gridExtra)
ggplot(mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_exp4_slope,aes(
x="",
y=mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_exp4_slope
#color=as.factor(mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_exp4.m$variable)
)) +
geom_violin(fill = "grey80",colour = "#3366FF") +
#scale_fill_brewer(palette="Dark2")
#scale_color_manual(values=c("#999999", "#E69F00", "#56B4E9")) +
#geom_jitter(height = 0, width = 0.1) +
geom_boxplot(width=0.3) +
stat_summary(fun.y = "mean", geom = "text", label="------", size= 6, color= "red") +
#geom_dotplot(binaxis='y', stackdir='center', dotsize=0.4)+
#stat_summary(geom = "errorbar", fun.data = mean_se, position = "dodge")
scale_colour_discrete(name ="ESC2") +
labs(title = "RIBOSOME_BIOGENESIS_AND_ASSEMBLY", x="ESC2", y="slope") +
#coord_fixed(0.2) +
geom_hline(yintercept=0, linetype="dashed",
color = "blue", size=0.5) +
theme(
plot.title = element_text(lineheight=.8, size=15,face="bold",hjust = 0.5))
ggsave("./figures/1225/RIBOSOME_BIOGENESIS_AND_ASSEMBLY_ESC2_slope.png", width = 2, height = 4)
#----------POSITIVE_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS----------------------------
POSITIVE_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS <- read.table("../GO_data/GO_POSITIVE_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS.txt",header = T, sep="\t")
#有一些NA值,证明一些human_POSITIVE_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_gene 没有对应的老鼠基因
mouse_POSITIVE_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_gid <- human_gene$mouse_ensembl_gene_id[match(POSITIVE_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS[2:nrow(POSITIVE_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS),],human_gene$human_external_gene_name)]
mouse_POSITIVE_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_oid <- as.character(annotLookup2$original_id[match(mouse_POSITIVE_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_gid,annotLookup2$ensembl_gene_id)])
#载入fib1
temp=match(mouse_POSITIVE_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_oid,row.names(fib1))
temp=temp[!is.na(temp)]
mouse_POSITIVE_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp1 <- fib1[temp,]
#载入fib2
temp=match(mouse_POSITIVE_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_oid,row.names(fib2))
temp=temp[!is.na(temp)]
mouse_POSITIVE_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp2 <- fib2[temp,]
#import esc1
temp=match(mouse_POSITIVE_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_oid,row.names(esc1))
temp=temp[!is.na(temp)]
mouse_POSITIVE_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp3 <- esc1[temp,]
#载入esc2
temp=match(mouse_POSITIVE_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_oid,row.names(esc2))
temp=temp[!is.na(temp)]
mouse_POSITIVE_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp4 <- esc2[temp,]
#fib1
#除去0,都加0.1
#mouse_POSITIVE_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp1_slope <- mouse_POSITIVE_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp1+0.1
#mouse_POSITIVE_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp1_f2s <- log(mouse_POSITIVE_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp1$FIB_fast_TPM/mouse_POSITIVE_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp1$FIB_slow_TPM,2)
mouse_POSITIVE_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp1_slope <- slope_func(mouse_POSITIVE_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp1)
mouse_POSITIVE_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp1_slope <- as.data.frame(mouse_POSITIVE_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp1_slope)
library(ggplot2)
library(reshape2)
require(gridExtra)
ggplot(mouse_POSITIVE_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp1_slope,aes(
x="",
y=mouse_POSITIVE_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp1_slope
#color=as.factor(mouse_POSITIVE_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp1.m$variable)
)) +
geom_violin(fill = "grey80",colour = "#3366FF") +
#scale_fill_brewer(palette="Dark2")
#scale_color_manual(values=c("#999999", "#E69F00", "#56B4E9")) +
#geom_jitter(height = 0, width = 0.1) +
geom_boxplot(width=0.3) +
stat_summary(fun.y = "mean", geom = "text", label="------", size= 6, color= "red") +
#geom_dotplot(binaxis='y', stackdir='center', dotsize=0.4)+
#stat_summary(geom = "errorbar", fun.data = mean_se, position = "dodge")
scale_colour_discrete(name ="FIB1") +
labs(title = "POSITIVE_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS", x="FIB1", y="slope") +
#coord_fixed(0.2) +
geom_hline(yintercept=0, linetype="dashed",
color = "blue", size=0.5) +
theme(
plot.title = element_text(lineheight=.8, size=15,face="bold",hjust = 0.5))
ggsave("./figures/1225/POSITIVE_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_fib1_slope.png", width = 2, height = 4)
#FIB2------------------------------------
mouse_POSITIVE_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp2_slope <- slope_func(mouse_POSITIVE_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp2)
mouse_POSITIVE_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp2_slope <- as.data.frame(mouse_POSITIVE_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp2_slope)
library(ggplot2)
library(reshape2)
require(gridExtra)
ggplot(mouse_POSITIVE_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp2_slope,aes(
x="",
y=mouse_POSITIVE_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp2_slope
#color=as.factor(mouse_POSITIVE_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp2.m$variable)
)) +
geom_violin(fill = "grey80",colour = "#3366FF") +
#scale_fill_brewer(palette="Dark2")
#scale_color_manual(values=c("#999999", "#E69F00", "#56B4E9")) +
#geom_jitter(height = 0, width = 0.1) +
geom_boxplot(width=0.3) +
stat_summary(fun.y = "mean", geom = "text", label="------", size= 6, color= "red") +
#geom_dotplot(binaxis='y', stackdir='center', dotsize=0.4)+
#stat_summary(geom = "errorbar", fun.data = mean_se, position = "dodge")
scale_colour_discrete(name ="FIB2") +
labs(title = "POSITIVE_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS", x="FIB2", y="slope") +
#coord_fixed(0.2) +
geom_hline(yintercept=0, linetype="dashed",
color = "blue", size=0.5) +
theme(
plot.title = element_text(lineheight=.8, size=15,face="bold",hjust = 0.5))
ggsave("./figures/1225/POSITIVE_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_FIB2_slope.png", width = 2, height = 4)
#----------esc1----------------------
mouse_POSITIVE_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp3_slope <- slope_func(mouse_POSITIVE_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp3)
mouse_POSITIVE_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp3_slope <- as.data.frame(mouse_POSITIVE_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp3_slope)
library(ggplot2)
library(reshape2)
require(gridExtra)
ggplot(mouse_POSITIVE_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp3_slope,aes(
x="",
y=mouse_POSITIVE_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp3_slope
#color=as.factor(mouse_POSITIVE_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp3.m$variable)
)) +
geom_violin(fill = "grey80",colour = "#3366FF") +
#scale_fill_brewer(palette="Dark2")
#scale_color_manual(values=c("#999999", "#E69F00", "#56B4E9")) +
#geom_jitter(height = 0, width = 0.1) +
geom_boxplot(width=0.3) +
stat_summary(fun.y = "mean", geom = "text", label="------", size= 6, color= "red") +
#geom_dotplot(binaxis='y', stackdir='center', dotsize=0.4)+
#stat_summary(geom = "errorbar", fun.data = mean_se, position = "dodge")
scale_colour_discrete(name ="ESC1") +
labs(title = "POSITIVE_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS", x="ESC1", y="slope") +
#coord_fixed(0.2) +
geom_hline(yintercept=0, linetype="dashed",
color = "blue", size=0.5) +
theme(
plot.title = element_text(lineheight=.8, size=15,face="bold",hjust = 0.5))
ggsave("./figures/1225/POSITIVE_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_ESC1_slope.png", width = 2, height = 4)
#----------------esc2--------------------
mouse_POSITIVE_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp4_slope <- slope_func(mouse_POSITIVE_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp4)
mouse_POSITIVE_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp4_slope <- as.data.frame(mouse_POSITIVE_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp4_slope)
library(ggplot2)
library(reshape2)
require(gridExtra)
ggplot(mouse_POSITIVE_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp4_slope,aes(
x="",
y=mouse_POSITIVE_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp4_slope
#color=as.factor(mouse_POSITIVE_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp4.m$variable)
)) +
geom_violin(fill = "grey80",colour = "#3366FF") +
#scale_fill_brewer(palette="Dark2")
#scale_color_manual(values=c("#999999", "#E69F00", "#56B4E9")) +
#geom_jitter(height = 0, width = 0.1) +
geom_boxplot(width=0.3) +
stat_summary(fun.y = "mean", geom = "text", label="------", size= 6, color= "red") +
#geom_dotplot(binaxis='y', stackdir='center', dotsize=0.4)+
#stat_summary(geom = "errorbar", fun.data = mean_se, position = "dodge")
scale_colour_discrete(name ="ESC2") +
labs(title = "POSITIVE_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS", x="ESC2", y="slope") +
#coord_fixed(0.2) +
geom_hline(yintercept=0, linetype="dashed",
color = "blue", size=0.5) +
theme(
plot.title = element_text(lineheight=.8, size=15,face="bold",hjust = 0.5))
ggsave("./figures/1225/POSITIVE_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_ESC2_slope.png", width = 2, height = 4)
#----------RIBOSOME_BIOGENESIS_AND_ASSEMBLY----------------------------
RIBOSOME_BIOGENESIS_AND_ASSEMBLY <- read.table("../GO_data/GO_0042254RIBOSOME_BIOGENESIS_AND_ASSEMBLY.txt",header = T, sep="\t")
#有一些NA值,证明一些human_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_gene 没有对应的老鼠基因
mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_gid <- human_gene$mouse_ensembl_gene_id[match(RIBOSOME_BIOGENESIS_AND_ASSEMBLY[2:nrow(RIBOSOME_BIOGENESIS_AND_ASSEMBLY),],human_gene$human_external_gene_name)]
mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_oid <- as.character(annotLookup2$original_id[match(mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_gid,annotLookup2$ensembl_gene_id)])
#载入fib1
temp=match(mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_oid,row.names(fib1))
temp=temp[!is.na(temp)]
mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_exp1 <- fib1[temp,]
#载入fib2
temp=match(mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_oid,row.names(fib2))
temp=temp[!is.na(temp)]
mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_exp2 <- fib2[temp,]
#import esc1
temp=match(mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_oid,row.names(esc1))
temp=temp[!is.na(temp)]
mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_exp3 <- esc1[temp,]
#载入esc2
temp=match(mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_oid,row.names(esc2))
temp=temp[!is.na(temp)]
mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_exp4 <- esc2[temp,]
#fib1
#除去0,都加0.1
#mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_exp1_slope <- mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_exp1+0.1
#mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_exp1_f2s <- log(mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_exp1$FIB_fast_TPM/mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_exp1$FIB_slow_TPM,2)
mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_exp1_slope <- slope_func(mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_exp1)
mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_exp1_slope <- as.data.frame(mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_exp1_slope)
library(ggplot2)
library(reshape2)
require(gridExtra)
ggplot(mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_exp1_slope,aes(
x="",
y=mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_exp1_slope
#color=as.factor(mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_exp1.m$variable)
)) +
geom_violin(fill = "grey80",colour = "#3366FF") +
#scale_fill_brewer(palette="Dark2")
#scale_color_manual(values=c("#999999", "#E69F00", "#56B4E9")) +
#geom_jitter(height = 0, width = 0.1) +
geom_boxplot(width=0.3) +
stat_summary(fun.y = "mean", geom = "text", label="------", size= 6, color= "red") +
#geom_dotplot(binaxis='y', stackdir='center', dotsize=0.4)+
#stat_summary(geom = "errorbar", fun.data = mean_se, position = "dodge")
scale_colour_discrete(name ="FIB1") +
labs(title = "RIBOSOME_BIOGENESIS_AND_ASSEMBLY", x="FIB1", y="slope") +
#coord_fixed(0.2) +
geom_hline(yintercept=0, linetype="dashed",
color = "blue", size=0.5) +
theme(
plot.title = element_text(lineheight=.8, size=15,face="bold",hjust = 0.5))
ggsave("./figures/1225/RIBOSOME_BIOGENESIS_AND_ASSEMBLY_fib1_slope.png", width = 2, height = 4)
#FIB2------------------------------------
mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_exp2_slope <- slope_func(mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_exp2)
mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_exp2_slope <- as.data.frame(mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_exp2_slope)
library(ggplot2)
library(reshape2)
require(gridExtra)
ggplot(mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_exp2_slope,aes(
x="",
y=mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_exp2_slope
#color=as.factor(mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_exp2.m$variable)
)) +
geom_violin(fill = "grey80",colour = "#3366FF") +
#scale_fill_brewer(palette="Dark2")
#scale_color_manual(values=c("#999999", "#E69F00", "#56B4E9")) +
#geom_jitter(height = 0, width = 0.1) +
geom_boxplot(width=0.3) +
stat_summary(fun.y = "mean", geom = "text", label="------", size= 6, color= "red") +
#geom_dotplot(binaxis='y', stackdir='center', dotsize=0.4)+
#stat_summary(geom = "errorbar", fun.data = mean_se, position = "dodge")
scale_colour_discrete(name ="FIB2") +
labs(title = "RIBOSOME_BIOGENESIS_AND_ASSEMBLY", x="FIB2", y="slope") +
#coord_fixed(0.2) +
geom_hline(yintercept=0, linetype="dashed",
color = "blue", size=0.5) +
theme(
plot.title = element_text(lineheight=.8, size=15,face="bold",hjust = 0.5))
ggsave("./figures/1225/RIBOSOME_BIOGENESIS_AND_ASSEMBLY_FIB2_slope.png", width = 2, height = 4)
#----------esc1----------------------
mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_exp3_slope <- slope_func(mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_exp3)
mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_exp3_slope <- as.data.frame(mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_exp3_slope)
library(ggplot2)
library(reshape2)
require(gridExtra)
ggplot(mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_exp3_slope,aes(
x="",
y=mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_exp3_slope
#color=as.factor(mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_exp3.m$variable)
)) +
geom_violin(fill = "grey80",colour = "#3366FF") +
#scale_fill_brewer(palette="Dark2")
#scale_color_manual(values=c("#999999", "#E69F00", "#56B4E9")) +
#geom_jitter(height = 0, width = 0.1) +
geom_boxplot(width=0.3) +
stat_summary(fun.y = "mean", geom = "text", label="------", size= 6, color= "red") +
#geom_dotplot(binaxis='y', stackdir='center', dotsize=0.4)+
#stat_summary(geom = "errorbar", fun.data = mean_se, position = "dodge")
scale_colour_discrete(name ="ESC1") +
labs(title = "RIBOSOME_BIOGENESIS_AND_ASSEMBLY", x="ESC1", y="slope") +
#coord_fixed(0.2) +
geom_hline(yintercept=0, linetype="dashed",
color = "blue", size=0.5) +
theme(
plot.title = element_text(lineheight=.8, size=15,face="bold",hjust = 0.5))
ggsave("./figures/1225/RIBOSOME_BIOGENESIS_AND_ASSEMBLY_ESC1_slope.png", width = 2, height = 4)
#----------------esc2--------------------
mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_exp4_slope <- slope_func(mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_exp4)
mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_exp4_slope <- as.data.frame(mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_exp4_slope)
library(ggplot2)
library(reshape2)
require(gridExtra)
ggplot(mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_exp4_slope,aes(
x="",
y=mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_exp4_slope
#color=as.factor(mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_exp4.m$variable)
)) +
geom_violin(fill = "grey80",colour = "#3366FF") +
#scale_fill_brewer(palette="Dark2")
#scale_color_manual(values=c("#999999", "#E69F00", "#56B4E9")) +
#geom_jitter(height = 0, width = 0.1) +
geom_boxplot(width=0.3) +
stat_summary(fun.y = "mean", geom = "text", label="------", size= 6, color= "red") +
#geom_dotplot(binaxis='y', stackdir='center', dotsize=0.4)+
#stat_summary(geom = "errorbar", fun.data = mean_se, position = "dodge")
scale_colour_discrete(name ="ESC2") +
labs(title = "RIBOSOME_BIOGENESIS_AND_ASSEMBLY", x="ESC2", y="slope") +
#coord_fixed(0.2) +
geom_hline(yintercept=0, linetype="dashed",
color = "blue", size=0.5) +
theme(
plot.title = element_text(lineheight=.8, size=15,face="bold",hjust = 0.5))
ggsave("./figures/1225/RIBOSOME_BIOGENESIS_AND_ASSEMBLY_ESC2_slope.png", width = 2, height = 4)
#----------REGULATION_OF_MITOCHONDRIAL_DEPOLARIZATION----------------------------
REGULATION_OF_MITOCHONDRIAL_DEPOLARIZATION <- read.table("../GO_data/GO_REGULATION_OF_MITOCHONDRIAL_DEPOLARIZATION.txt",header = T, sep="\t")
#有一些NA值,证明一些human_REGULATION_OF_MITOCHONDRIAL_DEPOLARIZATION_gene 没有对应的老鼠基因
mouse_REGULATION_OF_MITOCHONDRIAL_DEPOLARIZATION_gid <- human_gene$mouse_ensembl_gene_id[match(REGULATION_OF_MITOCHONDRIAL_DEPOLARIZATION[2:nrow(REGULATION_OF_MITOCHONDRIAL_DEPOLARIZATION),],human_gene$human_external_gene_name)]
mouse_REGULATION_OF_MITOCHONDRIAL_DEPOLARIZATION_oid <- as.character(annotLookup2$original_id[match(mouse_REGULATION_OF_MITOCHONDRIAL_DEPOLARIZATION_gid,annotLookup2$ensembl_gene_id)])
#载入fib1
temp=match(mouse_REGULATION_OF_MITOCHONDRIAL_DEPOLARIZATION_oid,row.names(fib1))
temp=temp[!is.na(temp)]
mouse_REGULATION_OF_MITOCHONDRIAL_DEPOLARIZATION_exp1 <- fib1[temp,]
#载入fib2
temp=match(mouse_REGULATION_OF_MITOCHONDRIAL_DEPOLARIZATION_oid,row.names(fib2))
temp=temp[!is.na(temp)]
mouse_REGULATION_OF_MITOCHONDRIAL_DEPOLARIZATION_exp2 <- fib2[temp,]
#import esc1
temp=match(mouse_REGULATION_OF_MITOCHONDRIAL_DEPOLARIZATION_oid,row.names(esc1))
temp=temp[!is.na(temp)]
mouse_REGULATION_OF_MITOCHONDRIAL_DEPOLARIZATION_exp3 <- esc1[temp,]
#载入esc2
temp=match(mouse_REGULATION_OF_MITOCHONDRIAL_DEPOLARIZATION_oid,row.names(esc2))
temp=temp[!is.na(temp)]
mouse_REGULATION_OF_MITOCHONDRIAL_DEPOLARIZATION_exp4 <- esc2[temp,]
#fib1
#除去0,都加0.1
#mouse_REGULATION_OF_MITOCHONDRIAL_DEPOLARIZATION_exp1_slope <- mouse_REGULATION_OF_MITOCHONDRIAL_DEPOLARIZATION_exp1+0.1
#mouse_REGULATION_OF_MITOCHONDRIAL_DEPOLARIZATION_exp1_f2s <- log(mouse_REGULATION_OF_MITOCHONDRIAL_DEPOLARIZATION_exp1$FIB_fast_TPM/mouse_REGULATION_OF_MITOCHONDRIAL_DEPOLARIZATION_exp1$FIB_slow_TPM,2)
mouse_REGULATION_OF_MITOCHONDRIAL_DEPOLARIZATION_exp1_slope <- slope_func(mouse_REGULATION_OF_MITOCHONDRIAL_DEPOLARIZATION_exp1)
mouse_REGULATION_OF_MITOCHONDRIAL_DEPOLARIZATION_exp1_slope <- as.data.frame(mouse_REGULATION_OF_MITOCHONDRIAL_DEPOLARIZATION_exp1_slope)
library(ggplot2)
library(reshape2)
require(gridExtra)
ggplot(mouse_REGULATION_OF_MITOCHONDRIAL_DEPOLARIZATION_exp1_slope,aes(
x="",
y=mouse_REGULATION_OF_MITOCHONDRIAL_DEPOLARIZATION_exp1_slope
#color=as.factor(mouse_REGULATION_OF_MITOCHONDRIAL_DEPOLARIZATION_exp1.m$variable)
)) +
geom_violin(fill = "grey80",colour = "#3366FF") +
#scale_fill_brewer(palette="Dark2")
#scale_color_manual(values=c("#999999", "#E69F00", "#56B4E9")) +
#geom_jitter(height = 0, width = 0.1) +
geom_boxplot(width=0.3) +
stat_summary(fun.y = "mean", geom = "text", label="------", size= 6, color= "red") +
#geom_dotplot(binaxis='y', stackdir='center', dotsize=0.4)+
#stat_summary(geom = "errorbar", fun.data = mean_se, position = "dodge")
scale_colour_discrete(name ="FIB1") +
labs(title = "REGULATION_OF_MITOCHONDRIAL_DEPOLARIZATION", x="FIB1", y="slope") +
#coord_fixed(0.2) +
geom_hline(yintercept=0, linetype="dashed",
color = "blue", size=0.5) +
theme(
plot.title = element_text(lineheight=.8, size=15,face="bold",hjust = 0.5))
ggsave("./figures/1225/REGULATION_OF_MITOCHONDRIAL_DEPOLARIZATION_fib1_slope.png", width = 2, height = 4)
#FIB2------------------------------------
mouse_REGULATION_OF_MITOCHONDRIAL_DEPOLARIZATION_exp2_slope <- slope_func(mouse_REGULATION_OF_MITOCHONDRIAL_DEPOLARIZATION_exp2)
mouse_REGULATION_OF_MITOCHONDRIAL_DEPOLARIZATION_exp2_slope <- as.data.frame(mouse_REGULATION_OF_MITOCHONDRIAL_DEPOLARIZATION_exp2_slope)
library(ggplot2)
library(reshape2)
require(gridExtra)
ggplot(mouse_REGULATION_OF_MITOCHONDRIAL_DEPOLARIZATION_exp2_slope,aes(
x="",
y=mouse_REGULATION_OF_MITOCHONDRIAL_DEPOLARIZATION_exp2_slope
#color=as.factor(mouse_REGULATION_OF_MITOCHONDRIAL_DEPOLARIZATION_exp2.m$variable)
)) +
geom_violin(fill = "grey80",colour = "#3366FF") +
#scale_fill_brewer(palette="Dark2")
#scale_color_manual(values=c("#999999", "#E69F00", "#56B4E9")) +
#geom_jitter(height = 0, width = 0.1) +
geom_boxplot(width=0.3) +
stat_summary(fun.y = "mean", geom = "text", label="------", size= 6, color= "red") +
#geom_dotplot(binaxis='y', stackdir='center', dotsize=0.4)+
#stat_summary(geom = "errorbar", fun.data = mean_se, position = "dodge")
scale_colour_discrete(name ="FIB2") +
labs(title = "REGULATION_OF_MITOCHONDRIAL_DEPOLARIZATION", x="FIB2", y="slope") +
#coord_fixed(0.2) +
geom_hline(yintercept=0, linetype="dashed",
color = "blue", size=0.5) +
theme(
plot.title = element_text(lineheight=.8, size=15,face="bold",hjust = 0.5))
ggsave("./figures/1225/REGULATION_OF_MITOCHONDRIAL_DEPOLARIZATION_FIB2_slope.png", width = 2, height = 4)
#----------esc1----------------------
mouse_REGULATION_OF_MITOCHONDRIAL_DEPOLARIZATION_exp3_slope <- slope_func(mouse_REGULATION_OF_MITOCHONDRIAL_DEPOLARIZATION_exp3)
mouse_REGULATION_OF_MITOCHONDRIAL_DEPOLARIZATION_exp3_slope <- as.data.frame(mouse_REGULATION_OF_MITOCHONDRIAL_DEPOLARIZATION_exp3_slope)
library(ggplot2)
library(reshape2)
require(gridExtra)
ggplot(mouse_REGULATION_OF_MITOCHONDRIAL_DEPOLARIZATION_exp3_slope,aes(
x="",
y=mouse_REGULATION_OF_MITOCHONDRIAL_DEPOLARIZATION_exp3_slope
#color=as.factor(mouse_REGULATION_OF_MITOCHONDRIAL_DEPOLARIZATION_exp3.m$variable)
)) +
geom_violin(fill = "grey80",colour = "#3366FF") +
#scale_fill_brewer(palette="Dark2")
#scale_color_manual(values=c("#999999", "#E69F00", "#56B4E9")) +
#geom_jitter(height = 0, width = 0.1) +
geom_boxplot(width=0.3) +
stat_summary(fun.y = "mean", geom = "text", label="------", size= 6, color= "red") +
#geom_dotplot(binaxis='y', stackdir='center', dotsize=0.4)+
#stat_summary(geom = "errorbar", fun.data = mean_se, position = "dodge")
scale_colour_discrete(name ="ESC1") +
labs(title = "REGULATION_OF_MITOCHONDRIAL_DEPOLARIZATION", x="ESC1", y="slope") +
#coord_fixed(0.2) +
geom_hline(yintercept=0, linetype="dashed",
color = "blue", size=0.5) +
theme(
plot.title = element_text(lineheight=.8, size=15,face="bold",hjust = 0.5))
ggsave("./figures/1225/REGULATION_OF_MITOCHONDRIAL_DEPOLARIZATION_ESC1_slope.png", width = 2, height = 4)
#----------------esc2--------------------
mouse_REGULATION_OF_MITOCHONDRIAL_DEPOLARIZATION_exp4_slope <- slope_func(mouse_REGULATION_OF_MITOCHONDRIAL_DEPOLARIZATION_exp4)
mouse_REGULATION_OF_MITOCHONDRIAL_DEPOLARIZATION_exp4_slope <- as.data.frame(mouse_REGULATION_OF_MITOCHONDRIAL_DEPOLARIZATION_exp4_slope)
library(ggplot2)
library(reshape2)
require(gridExtra)
ggplot(mouse_REGULATION_OF_MITOCHONDRIAL_DEPOLARIZATION_exp4_slope,aes(
x="",
y=mouse_REGULATION_OF_MITOCHONDRIAL_DEPOLARIZATION_exp4_slope
#color=as.factor(mouse_REGULATION_OF_MITOCHONDRIAL_DEPOLARIZATION_exp4.m$variable)
)) +
geom_violin(fill = "grey80",colour = "#3366FF") +
#scale_fill_brewer(palette="Dark2")
#scale_color_manual(values=c("#999999", "#E69F00", "#56B4E9")) +
#geom_jitter(height = 0, width = 0.1) +
geom_boxplot(width=0.3) +
stat_summary(fun.y = "mean", geom = "text", label="------", size= 6, color= "red") +
#geom_dotplot(binaxis='y', stackdir='center', dotsize=0.4)+
#stat_summary(geom = "errorbar", fun.data = mean_se, position = "dodge")
scale_colour_discrete(name ="ESC2") +
labs(title = "REGULATION_OF_MITOCHONDRIAL_DEPOLARIZATION", x="ESC2", y="slope") +
#coord_fixed(0.2) +
geom_hline(yintercept=0, linetype="dashed",
color = "blue", size=0.5) +
theme(
plot.title = element_text(lineheight=.8, size=15,face="bold",hjust = 0.5))
ggsave("./figures/1225/REGULATION_OF_MITOCHONDRIAL_DEPOLARIZATION_ESC2_slope.png", width = 2, height = 4)
#----------WONG_MITOCHONDRIA_GENE_MODULE----------------------------
#----------WONG_MITOCHONDRIA_GENE_MODULE----------------------------
WONG_MITOCHONDRIA_GENE_MODULE <- read.table("../GO_data/WONG_MITOCHONDRIA_GENE_MODULE.txt",header = T, sep="\t")
#有一些NA值,证明一些human_WONG_MITOCHONDRIA_GENE_MODULE_gene 没有对应的老鼠基因
mouse_WONG_MITOCHONDRIA_GENE_MODULE_gid <- human_gene$mouse_ensembl_gene_id[match(WONG_MITOCHONDRIA_GENE_MODULE[2:nrow(WONG_MITOCHONDRIA_GENE_MODULE),],human_gene$human_external_gene_name)]
mouse_WONG_MITOCHONDRIA_GENE_MODULE_oid <- as.character(annotLookup2$original_id[match(mouse_WONG_MITOCHONDRIA_GENE_MODULE_gid,annotLookup2$ensembl_gene_id)])
#载入fib1
temp=match(mouse_WONG_MITOCHONDRIA_GENE_MODULE_oid,row.names(fib1))
temp=temp[!is.na(temp)]
mouse_WONG_MITOCHONDRIA_GENE_MODULE_exp1 <- fib1[temp,]
#载入fib2
temp=match(mouse_WONG_MITOCHONDRIA_GENE_MODULE_oid,row.names(fib2))
temp=temp[!is.na(temp)]
mouse_WONG_MITOCHONDRIA_GENE_MODULE_exp2 <- fib2[temp,]
#import esc1
temp=match(mouse_WONG_MITOCHONDRIA_GENE_MODULE_oid,row.names(esc1))
temp=temp[!is.na(temp)]
mouse_WONG_MITOCHONDRIA_GENE_MODULE_exp3 <- esc1[temp,]
#载入esc2
temp=match(mouse_WONG_MITOCHONDRIA_GENE_MODULE_oid,row.names(esc2))
temp=temp[!is.na(temp)]
mouse_WONG_MITOCHONDRIA_GENE_MODULE_exp4 <- esc2[temp,]
#fib1
#除去0,都加0.1
#mouse_WONG_MITOCHONDRIA_GENE_MODULE_exp1_slope <- mouse_WONG_MITOCHONDRIA_GENE_MODULE_exp1+0.1
#mouse_WONG_MITOCHONDRIA_GENE_MODULE_exp1_f2s <- log(mouse_WONG_MITOCHONDRIA_GENE_MODULE_exp1$FIB_fast_TPM/mouse_WONG_MITOCHONDRIA_GENE_MODULE_exp1$FIB_slow_TPM,2)
mouse_WONG_MITOCHONDRIA_GENE_MODULE_exp1_slope <- slope_func(mouse_WONG_MITOCHONDRIA_GENE_MODULE_exp1)
mouse_WONG_MITOCHONDRIA_GENE_MODULE_exp1_slope <- as.data.frame(mouse_WONG_MITOCHONDRIA_GENE_MODULE_exp1_slope)
library(ggplot2)
library(reshape2)
require(gridExtra)
ggplot(mouse_WONG_MITOCHONDRIA_GENE_MODULE_exp1_slope,aes(
x="",
y=mouse_WONG_MITOCHONDRIA_GENE_MODULE_exp1_slope
#color=as.factor(mouse_WONG_MITOCHONDRIA_GENE_MODULE_exp1.m$variable)
)) +
geom_violin(fill = "grey80",colour = "#3366FF") +
#scale_fill_brewer(palette="Dark2")
#scale_color_manual(values=c("#999999", "#E69F00", "#56B4E9")) +
#geom_jitter(height = 0, width = 0.1) +
geom_boxplot(width=0.3) +
stat_summary(fun.y = "mean", geom = "text", label="------", size= 6, color= "red") +
#geom_dotplot(binaxis='y', stackdir='center', dotsize=0.4)+
#stat_summary(geom = "errorbar", fun.data = mean_se, position = "dodge")
scale_colour_discrete(name ="FIB1") +
labs(title = "WONG_MITOCHONDRIA_GENE_MODULE", x="FIB1", y="slope") +
#coord_fixed(0.2) +
geom_hline(yintercept=0, linetype="dashed",
color = "blue", size=0.5) +
theme(
plot.title = element_text(lineheight=.8, size=15,face="bold",hjust = 0.5))
ggsave("./figures/1225/WONG_MITOCHONDRIA_GENE_MODULE_fib1_slope.png", width = 2, height = 4)
#FIB2------------------------------------
mouse_WONG_MITOCHONDRIA_GENE_MODULE_exp2_slope <- slope_func(mouse_WONG_MITOCHONDRIA_GENE_MODULE_exp2)
mouse_WONG_MITOCHONDRIA_GENE_MODULE_exp2_slope <- as.data.frame(mouse_WONG_MITOCHONDRIA_GENE_MODULE_exp2_slope)
library(ggplot2)
library(reshape2)
require(gridExtra)
ggplot(mouse_WONG_MITOCHONDRIA_GENE_MODULE_exp2_slope,aes(
x="",
y=mouse_WONG_MITOCHONDRIA_GENE_MODULE_exp2_slope
#color=as.factor(mouse_WONG_MITOCHONDRIA_GENE_MODULE_exp2.m$variable)
)) +
geom_violin(fill = "grey80",colour = "#3366FF") +
#scale_fill_brewer(palette="Dark2")
#scale_color_manual(values=c("#999999", "#E69F00", "#56B4E9")) +
#geom_jitter(height = 0, width = 0.1) +
geom_boxplot(width=0.3) +
stat_summary(fun.y = "mean", geom = "text", label="------", size= 6, color= "red") +
#geom_dotplot(binaxis='y', stackdir='center', dotsize=0.4)+
#stat_summary(geom = "errorbar", fun.data = mean_se, position = "dodge")
scale_colour_discrete(name ="FIB2") +
labs(title = "WONG_MITOCHONDRIA_GENE_MODULE", x="FIB2", y="slope") +
#coord_fixed(0.2) +
geom_hline(yintercept=0, linetype="dashed",
color = "blue", size=0.5) +
theme(
plot.title = element_text(lineheight=.8, size=15,face="bold",hjust = 0.5))
ggsave("./figures/1225/WONG_MITOCHONDRIA_GENE_MODULE_FIB2_slope.png", width = 2, height = 4)
#----------esc1----------------------
mouse_WONG_MITOCHONDRIA_GENE_MODULE_exp3_slope <- slope_func(mouse_WONG_MITOCHONDRIA_GENE_MODULE_exp3)
mouse_WONG_MITOCHONDRIA_GENE_MODULE_exp3_slope <- as.data.frame(mouse_WONG_MITOCHONDRIA_GENE_MODULE_exp3_slope)
library(ggplot2)
library(reshape2)
require(gridExtra)
ggplot(mouse_WONG_MITOCHONDRIA_GENE_MODULE_exp3_slope,aes(
x="",
y=mouse_WONG_MITOCHONDRIA_GENE_MODULE_exp3_slope
#color=as.factor(mouse_WONG_MITOCHONDRIA_GENE_MODULE_exp3.m$variable)
)) +
geom_violin(fill = "grey80",colour = "#3366FF") +
#scale_fill_brewer(palette="Dark2")
#scale_color_manual(values=c("#999999", "#E69F00", "#56B4E9")) +
#geom_jitter(height = 0, width = 0.1) +
geom_boxplot(width=0.3) +
stat_summary(fun.y = "mean", geom = "text", label="------", size= 6, color= "red") +
#geom_dotplot(binaxis='y', stackdir='center', dotsize=0.4)+
#stat_summary(geom = "errorbar", fun.data = mean_se, position = "dodge")
scale_colour_discrete(name ="ESC1") +
labs(title = "WONG_MITOCHONDRIA_GENE_MODULE", x="ESC1", y="slope") +
#coord_fixed(0.2) +
geom_hline(yintercept=0, linetype="dashed",
color = "blue", size=0.5) +
theme(
plot.title = element_text(lineheight=.8, size=15,face="bold",hjust = 0.5))
ggsave("./figures/1225/WONG_MITOCHONDRIA_GENE_MODULE_ESC1_slope.png", width = 2, height = 4)
#----------------esc2--------------------
mouse_WONG_MITOCHONDRIA_GENE_MODULE_exp4_slope <- slope_func(mouse_WONG_MITOCHONDRIA_GENE_MODULE_exp4)
mouse_WONG_MITOCHONDRIA_GENE_MODULE_exp4_slope <- as.data.frame(mouse_WONG_MITOCHONDRIA_GENE_MODULE_exp4_slope)
library(ggplot2)
library(reshape2)
require(gridExtra)
ggplot(mouse_WONG_MITOCHONDRIA_GENE_MODULE_exp4_slope,aes(
x="",
y=mouse_WONG_MITOCHONDRIA_GENE_MODULE_exp4_slope
#color=as.factor(mouse_WONG_MITOCHONDRIA_GENE_MODULE_exp4.m$variable)
)) +
geom_violin(fill = "grey80",colour = "#3366FF") +
#scale_fill_brewer(palette="Dark2")
#scale_color_manual(values=c("#999999", "#E69F00", "#56B4E9")) +
#geom_jitter(height = 0, width = 0.1) +
geom_boxplot(width=0.3) +
stat_summary(fun.y = "mean", geom = "text", label="------", size= 6, color= "red") +
#geom_dotplot(binaxis='y', stackdir='center', dotsize=0.4)+
#stat_summary(geom = "errorbar", fun.data = mean_se, position = "dodge")
scale_colour_discrete(name ="ESC2") +
labs(title = "WONG_MITOCHONDRIA_GENE_MODULE", x="ESC2", y="slope") +
#coord_fixed(0.2) +
geom_hline(yintercept=0, linetype="dashed",
color = "blue", size=0.5) +
theme(
plot.title = element_text(lineheight=.8, size=15,face="bold",hjust = 0.5))
ggsave("./figures/1225/WONG_MITOCHONDRIA_GENE_MODULE_ESC2_slope.png", width = 2, height = 4)
|
/Analysis/Zhisheng/violin plots for selected gene sets/violin1226_linear_slope.R
|
no_license
|
carey-lab/ProliferationMitochondriaHeterogeneity
|
R
| false
| false
| 90,374
|
r
|
#将基因分成不同的基因sets然后画图--18.12.23
annotLookup2 <- read.table("annotation.table",header = T)
human_gene <- read.table("mouse2human_ensembl_gene_id.table",header = T)
fib1 <- read.delim("RNASeqFIB_Replicate1.tab.tsv",header=T,row.names=1,check.names=FALSE,sep=" ")
fib2 <- read.delim("RNASeqFIB_Replicate2.tab.tsv",header=T,row.names=1,check.names=FALSE,sep=" ")
esc1 <- read.delim("RNASeqESC_Replicate1.tab.tsv",header=T,row.names=1,check.names=FALSE,sep=" ")
esc2 <- read.delim("RNASeqESC_Replicate2.tab.tsv",header=T,row.names=1,check.names=FALSE,sep=" ")
#------------------DNA_repair-----------------------------------------
DNA_repair <- read.table("../GO_data/HALLMARK_DNA_REPAIR.txt",header = T, sep="\t")
#有一些NA值,证明一些human_DNA_repair_gene 没有对应的老鼠基因
mouse_DNA_repair_gid <- human_gene$mouse_ensembl_gene_id[match(DNA_repair[2:nrow(DNA_repair),],human_gene$human_external_gene_name)]
mouse_DNA_repair_oid <- as.character(annotLookup2$original_id[match(mouse_DNA_repair_gid,annotLookup2$ensembl_gene_id)])
#载入fib1
temp=match(mouse_DNA_repair_oid,row.names(fib1))
temp=temp[!is.na(temp)]
mouse_DNA_repair_exp1 <- fib1[temp,]
#载入fib2
temp=match(mouse_DNA_repair_oid,row.names(fib2))
temp=temp[!is.na(temp)]
mouse_DNA_repair_exp2 <- fib2[temp,]
#import esc1
temp=match(mouse_DNA_repair_oid,row.names(esc1))
temp=temp[!is.na(temp)]
mouse_DNA_repair_exp3 <- esc1[temp,]
#载入esc2
temp=match(mouse_DNA_repair_oid,row.names(esc2))
temp=temp[!is.na(temp)]
mouse_DNA_repair_exp4 <- esc2[temp,]
slope_func <- function(exp.table) {
x <- c(1,2,3)
slope <- c(rep(0,nrow(exp.table)))
for (i in 1:nrow(exp.table)) {
y <- as.numeric(exp.table[i,]);
t <- lm(y~x)
slope[i] <- t$coefficients[[2]]
}
return(slope)
}
#---------------slope------------------------------
#log2(fast/slow)------------------------------------
#fib1
#除去0,都加0.1
#mouse_DNA_repair_exp1_slope <- mouse_DNA_repair_exp1+0.1
#mouse_DNA_repair_exp1_f2s <- log(mouse_DNA_repair_exp1$FIB_fast_TPM/mouse_DNA_repair_exp1$FIB_slow_TPM,2)
mouse_DNA_repair_exp1_slope <- slope_func(mouse_DNA_repair_exp1)
mouse_DNA_repair_exp1_slope <- as.data.frame(mouse_DNA_repair_exp1_slope)
library(ggplot2)
library(reshape2)
require(gridExtra)
ggplot(mouse_DNA_repair_exp1_slope,aes(
x="",
y=mouse_DNA_repair_exp1_slope
#color=as.factor(mouse_DNA_repair_exp1.m$variable)
)) +
geom_violin(fill = "grey80",colour = "#3366FF") +
#scale_fill_brewer(palette="Dark2")
#scale_color_manual(values=c("#999999", "#E69F00", "#56B4E9")) +
#geom_jitter(height = 0, width = 0.1) +
geom_boxplot(width=0.3) +
stat_summary(fun.y = "mean", geom = "text", label="------", size= 6, color= "red") +
#geom_dotplot(binaxis='y', stackdir='center', dotsize=0.4)+
#stat_summary(geom = "errorbar", fun.data = mean_se, position = "dodge")
scale_colour_discrete(name ="FIB1") +
labs(title = "DNA_repair", x="FIB1", y="slope") +
#coord_fixed(0.2) +
geom_hline(yintercept=0, linetype="dashed",
color = "blue", size=0.5) +
theme(
plot.title = element_text(lineheight=.8, size=15,face="bold",hjust = 0.5))
ggsave("./figures/1225/DNA_repair_fib1_slope.png", width = 2, height = 4)
#FIB2------------------------------------
mouse_DNA_repair_exp2_slope <- slope_func(mouse_DNA_repair_exp2)
mouse_DNA_repair_exp2_slope <- as.data.frame(mouse_DNA_repair_exp2_slope)
library(ggplot2)
library(reshape2)
require(gridExtra)
ggplot(mouse_DNA_repair_exp2_slope,aes(
x="",
y=mouse_DNA_repair_exp2_slope
#color=as.factor(mouse_DNA_repair_exp2.m$variable)
)) +
geom_violin(fill = "grey80",colour = "#3366FF") +
#scale_fill_brewer(palette="Dark2")
#scale_color_manual(values=c("#999999", "#E69F00", "#56B4E9")) +
#geom_jitter(height = 0, width = 0.1) +
geom_boxplot(width=0.3) +
stat_summary(fun.y = "mean", geom = "text", label="------", size= 6, color= "red") +
#geom_dotplot(binaxis='y', stackdir='center', dotsize=0.4)+
#stat_summary(geom = "errorbar", fun.data = mean_se, position = "dodge")
scale_colour_discrete(name ="FIB2") +
labs(title = "DNA_repair", x="FIB2", y="slope") +
#coord_fixed(0.2) +
geom_hline(yintercept=0, linetype="dashed",
color = "blue", size=0.5) +
theme(
plot.title = element_text(lineheight=.8, size=15,face="bold",hjust = 0.5))
ggsave("./figures/1225/DNA_repair_FIB2_slope.png", width = 2, height = 4)
#----------esc1----------------------
mouse_DNA_repair_exp3_slope <- slope_func(mouse_DNA_repair_exp3)
mouse_DNA_repair_exp3_slope <- as.data.frame(mouse_DNA_repair_exp3_slope)
library(ggplot2)
library(reshape2)
require(gridExtra)
ggplot(mouse_DNA_repair_exp3_slope,aes(
x="",
y=mouse_DNA_repair_exp3_slope
#color=as.factor(mouse_DNA_repair_exp3.m$variable)
)) +
geom_violin(fill = "grey80",colour = "#3366FF") +
#scale_fill_brewer(palette="Dark2")
#scale_color_manual(values=c("#999999", "#E69F00", "#56B4E9")) +
#geom_jitter(height = 0, width = 0.1) +
geom_boxplot(width=0.3) +
stat_summary(fun.y = "mean", geom = "text", label="------", size= 6, color= "red") +
#geom_dotplot(binaxis='y', stackdir='center', dotsize=0.4)+
#stat_summary(geom = "errorbar", fun.data = mean_se, position = "dodge")
scale_colour_discrete(name ="ESC1") +
labs(title = "DNA_repair", x="ESC1", y="slope") +
#coord_fixed(0.2) +
geom_hline(yintercept=0, linetype="dashed",
color = "blue", size=0.5) +
theme(
plot.title = element_text(lineheight=.8, size=15,face="bold",hjust = 0.5))
ggsave("./figures/1225/DNA_repair_ESC1_slope.png", width = 2, height = 4)
#----------------esc2--------------------
mouse_DNA_repair_exp4_slope <- slope_func(mouse_DNA_repair_exp4)
mouse_DNA_repair_exp4_slope <- as.data.frame(mouse_DNA_repair_exp4_slope)
library(ggplot2)
library(reshape2)
require(gridExtra)
ggplot(mouse_DNA_repair_exp4_slope,aes(
x="",
y=mouse_DNA_repair_exp4_slope
#color=as.factor(mouse_DNA_repair_exp4.m$variable)
)) +
geom_violin(fill = "grey80",colour = "#3366FF") +
#scale_fill_brewer(palette="Dark2")
#scale_color_manual(values=c("#999999", "#E69F00", "#56B4E9")) +
#geom_jitter(height = 0, width = 0.1) +
geom_boxplot(width=0.3) +
stat_summary(fun.y = "mean", geom = "text", label="------", size= 6, color= "red") +
#geom_dotplot(binaxis='y', stackdir='center', dotsize=0.4)+
#stat_summary(geom = "errorbar", fun.data = mean_se, position = "dodge")
scale_colour_discrete(name ="ESC2") +
labs(title = "DNA_repair", x="ESC2", y="slope") +
#coord_fixed(0.2) +
geom_hline(yintercept=0, linetype="dashed",
color = "blue", size=0.5) +
theme(
plot.title = element_text(lineheight=.8, size=15,face="bold",hjust = 0.5))
ggsave("./figures/1225/DNA_repair_ESC2_slope.png", width = 2, height = 4)
#----------go-ribosome----------------------------
GO_RIBOSOME <- read.table("../GO_data/GO0005840GO_RIBOSOME.txt",header = T, sep="\t")
#有一些NA值,证明一些human_GO_RIBOSOME_gene 没有对应的老鼠基因
mouse_GO_RIBOSOME_gid <- human_gene$mouse_ensembl_gene_id[match(GO_RIBOSOME[2:nrow(GO_RIBOSOME),],human_gene$human_external_gene_name)]
mouse_GO_RIBOSOME_oid <- as.character(annotLookup2$original_id[match(mouse_GO_RIBOSOME_gid,annotLookup2$ensembl_gene_id)])
#载入fib1
temp=match(mouse_GO_RIBOSOME_oid,row.names(fib1))
temp=temp[!is.na(temp)]
mouse_GO_RIBOSOME_exp1 <- fib1[temp,]
#载入fib2
temp=match(mouse_GO_RIBOSOME_oid,row.names(fib2))
temp=temp[!is.na(temp)]
mouse_GO_RIBOSOME_exp2 <- fib2[temp,]
#import esc1
temp=match(mouse_GO_RIBOSOME_oid,row.names(esc1))
temp=temp[!is.na(temp)]
mouse_GO_RIBOSOME_exp3 <- esc1[temp,]
#载入esc2
temp=match(mouse_GO_RIBOSOME_oid,row.names(esc2))
temp=temp[!is.na(temp)]
mouse_GO_RIBOSOME_exp4 <- esc2[temp,]
#fib1
#除去0,都加0.1
#mouse_GO_RIBOSOME_exp1_slope <- mouse_GO_RIBOSOME_exp1+0.1
#mouse_GO_RIBOSOME_exp1_f2s <- log(mouse_GO_RIBOSOME_exp1$FIB_fast_TPM/mouse_GO_RIBOSOME_exp1$FIB_slow_TPM,2)
mouse_GO_RIBOSOME_exp1_slope <- slope_func(mouse_GO_RIBOSOME_exp1)
mouse_GO_RIBOSOME_exp1_slope <- as.data.frame(mouse_GO_RIBOSOME_exp1_slope)
library(ggplot2)
library(reshape2)
require(gridExtra)
ggplot(mouse_GO_RIBOSOME_exp1_slope,aes(
x="",
y=mouse_GO_RIBOSOME_exp1_slope
#color=as.factor(mouse_GO_RIBOSOME_exp1.m$variable)
)) +
geom_violin(fill = "grey80",colour = "#3366FF") +
#scale_fill_brewer(palette="Dark2")
#scale_color_manual(values=c("#999999", "#E69F00", "#56B4E9")) +
#geom_jitter(height = 0, width = 0.1) +
geom_boxplot(width=0.3) +
stat_summary(fun.y = "mean", geom = "text", label="------", size= 6, color= "red") +
#geom_dotplot(binaxis='y', stackdir='center', dotsize=0.4)+
#stat_summary(geom = "errorbar", fun.data = mean_se, position = "dodge")
scale_colour_discrete(name ="FIB1") +
labs(title = "GO_RIBOSOME", x="FIB1", y="slope") +
#coord_fixed(0.2) +
geom_hline(yintercept=0, linetype="dashed",
color = "blue", size=0.5) +
theme(
plot.title = element_text(lineheight=.8, size=15,face="bold",hjust = 0.5))
ggsave("./figures/1225/GO_RIBOSOME_fib1_slope.png", width = 2, height = 4)
#FIB2------------------------------------
mouse_GO_RIBOSOME_exp2_slope <- slope_func(mouse_GO_RIBOSOME_exp2)
mouse_GO_RIBOSOME_exp2_slope <- as.data.frame(mouse_GO_RIBOSOME_exp2_slope)
library(ggplot2)
library(reshape2)
require(gridExtra)
ggplot(mouse_GO_RIBOSOME_exp2_slope,aes(
x="",
y=mouse_GO_RIBOSOME_exp2_slope
#color=as.factor(mouse_GO_RIBOSOME_exp2.m$variable)
)) +
geom_violin(fill = "grey80",colour = "#3366FF") +
#scale_fill_brewer(palette="Dark2")
#scale_color_manual(values=c("#999999", "#E69F00", "#56B4E9")) +
#geom_jitter(height = 0, width = 0.1) +
geom_boxplot(width=0.3) +
stat_summary(fun.y = "mean", geom = "text", label="------", size= 6, color= "red") +
#geom_dotplot(binaxis='y', stackdir='center', dotsize=0.4)+
#stat_summary(geom = "errorbar", fun.data = mean_se, position = "dodge")
scale_colour_discrete(name ="FIB2") +
labs(title = "GO_RIBOSOME", x="FIB2", y="slope") +
#coord_fixed(0.2) +
geom_hline(yintercept=0, linetype="dashed",
color = "blue", size=0.5) +
theme(
plot.title = element_text(lineheight=.8, size=15,face="bold",hjust = 0.5))
ggsave("./figures/1225/GO_RIBOSOME_FIB2_slope.png", width = 2, height = 4)
#----------esc1----------------------
mouse_GO_RIBOSOME_exp3_slope <- slope_func(mouse_GO_RIBOSOME_exp3)
mouse_GO_RIBOSOME_exp3_slope <- as.data.frame(mouse_GO_RIBOSOME_exp3_slope)
library(ggplot2)
library(reshape2)
require(gridExtra)
ggplot(mouse_GO_RIBOSOME_exp3_slope,aes(
x="",
y=mouse_GO_RIBOSOME_exp3_slope
#color=as.factor(mouse_GO_RIBOSOME_exp3.m$variable)
)) +
geom_violin(fill = "grey80",colour = "#3366FF") +
#scale_fill_brewer(palette="Dark2")
#scale_color_manual(values=c("#999999", "#E69F00", "#56B4E9")) +
#geom_jitter(height = 0, width = 0.1) +
geom_boxplot(width=0.3) +
stat_summary(fun.y = "mean", geom = "text", label="------", size= 6, color= "red") +
#geom_dotplot(binaxis='y', stackdir='center', dotsize=0.4)+
#stat_summary(geom = "errorbar", fun.data = mean_se, position = "dodge")
scale_colour_discrete(name ="ESC1") +
labs(title = "GO_RIBOSOME", x="ESC1", y="slope") +
#coord_fixed(0.2) +
geom_hline(yintercept=0, linetype="dashed",
color = "blue", size=0.5) +
theme(
plot.title = element_text(lineheight=.8, size=15,face="bold",hjust = 0.5))
ggsave("./figures/1225/GO_RIBOSOME_ESC1_slope.png", width = 2, height = 4)
#----------------esc2--------------------
mouse_GO_RIBOSOME_exp4_slope <- slope_func(mouse_GO_RIBOSOME_exp4)
mouse_GO_RIBOSOME_exp4_slope <- as.data.frame(mouse_GO_RIBOSOME_exp4_slope)
library(ggplot2)
library(reshape2)
require(gridExtra)
ggplot(mouse_GO_RIBOSOME_exp4_slope,aes(
x="",
y=mouse_GO_RIBOSOME_exp4_slope
#color=as.factor(mouse_GO_RIBOSOME_exp4.m$variable)
)) +
geom_violin(fill = "grey80",colour = "#3366FF") +
#scale_fill_brewer(palette="Dark2")
#scale_color_manual(values=c("#999999", "#E69F00", "#56B4E9")) +
#geom_jitter(height = 0, width = 0.1) +
geom_boxplot(width=0.3) +
stat_summary(fun.y = "mean", geom = "text", label="------", size= 6, color= "red") +
#geom_dotplot(binaxis='y', stackdir='center', dotsize=0.4)+
#stat_summary(geom = "errorbar", fun.data = mean_se, position = "dodge")
scale_colour_discrete(name ="ESC2") +
labs(title = "GO_RIBOSOME", x="ESC2", y="slope") +
#coord_fixed(0.2) +
geom_hline(yintercept=0, linetype="dashed",
color = "blue", size=0.5) +
theme(
plot.title = element_text(lineheight=.8, size=15,face="bold",hjust = 0.5))
ggsave("./figures/1225/GO_RIBOSOME_ESC2_slope.png", width = 2, height = 4)
#response to dna damage---------------------------
RESPONSE_TO_DNA_DAMAGE_STIMULUS <- read.table("../GO_data/GO0006974RESPONSE_TO_DNA_DAMAGE_STIMULUS.txt",header = T, sep="\t")
#有一些NA值,证明一些human_RESPONSE_TO_DNA_DAMAGE_STIMULUS_gene 没有对应的老鼠基因
mouse_RESPONSE_TO_DNA_DAMAGE_STIMULUS_gid <- human_gene$mouse_ensembl_gene_id[match(RESPONSE_TO_DNA_DAMAGE_STIMULUS[2:nrow(RESPONSE_TO_DNA_DAMAGE_STIMULUS),],human_gene$human_external_gene_name)]
mouse_RESPONSE_TO_DNA_DAMAGE_STIMULUS_oid <- as.character(annotLookup2$original_id[match(mouse_RESPONSE_TO_DNA_DAMAGE_STIMULUS_gid,annotLookup2$ensembl_gene_id)])
#载入fib1
temp=match(mouse_RESPONSE_TO_DNA_DAMAGE_STIMULUS_oid,row.names(fib1))
temp=temp[!is.na(temp)]
mouse_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp1 <- fib1[temp,]
#载入fib2
temp=match(mouse_RESPONSE_TO_DNA_DAMAGE_STIMULUS_oid,row.names(fib2))
temp=temp[!is.na(temp)]
mouse_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp2 <- fib2[temp,]
#import esc1
temp=match(mouse_RESPONSE_TO_DNA_DAMAGE_STIMULUS_oid,row.names(esc1))
temp=temp[!is.na(temp)]
mouse_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp3 <- esc1[temp,]
#载入esc2
temp=match(mouse_RESPONSE_TO_DNA_DAMAGE_STIMULUS_oid,row.names(esc2))
temp=temp[!is.na(temp)]
mouse_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp4 <- esc2[temp,]
#log2(fast/slow)------------------------------------
#fib1
#除去0,都加0.1
#mouse_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp1_slope <- mouse_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp1+0.1
#mouse_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp1_f2s <- log(mouse_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp1$FIB_fast_TPM/mouse_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp1$FIB_slow_TPM,2)
mouse_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp1_slope <- slope_func(mouse_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp1)
mouse_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp1_slope <- as.data.frame(mouse_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp1_slope)
library(ggplot2)
library(reshape2)
require(gridExtra)
ggplot(mouse_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp1_slope,aes(
x="",
y=mouse_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp1_slope
#color=as.factor(mouse_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp1.m$variable)
)) +
geom_violin(fill = "grey80",colour = "#3366FF") +
#scale_fill_brewer(palette="Dark2")
#scale_color_manual(values=c("#999999", "#E69F00", "#56B4E9")) +
#geom_jitter(height = 0, width = 0.1) +
geom_boxplot(width=0.3) +
stat_summary(fun.y = "mean", geom = "text", label="------", size= 6, color= "red") +
#geom_dotplot(binaxis='y', stackdir='center', dotsize=0.4)+
#stat_summary(geom = "errorbar", fun.data = mean_se, position = "dodge")
scale_colour_discrete(name ="FIB1") +
labs(title = "RESPONSE_TO_DNA_DAMAGE_STIMULUS", x="FIB1", y="slope") +
#coord_fixed(0.2) +
geom_hline(yintercept=0, linetype="dashed",
color = "blue", size=0.5) +
theme(
plot.title = element_text(lineheight=.8, size=15,face="bold",hjust = 0.5))
ggsave("./figures/1225/RESPONSE_TO_DNA_DAMAGE_STIMULUS_fib1_slope.png", width = 2, height = 4)
#FIB2------------------------------------
mouse_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp2_slope <- slope_func(mouse_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp2)
mouse_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp2_slope <- as.data.frame(mouse_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp2_slope)
library(ggplot2)
library(reshape2)
require(gridExtra)
ggplot(mouse_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp2_slope,aes(
x="",
y=mouse_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp2_slope
#color=as.factor(mouse_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp2.m$variable)
)) +
geom_violin(fill = "grey80",colour = "#3366FF") +
#scale_fill_brewer(palette="Dark2")
#scale_color_manual(values=c("#999999", "#E69F00", "#56B4E9")) +
#geom_jitter(height = 0, width = 0.1) +
geom_boxplot(width=0.3) +
stat_summary(fun.y = "mean", geom = "text", label="------", size= 6, color= "red") +
#geom_dotplot(binaxis='y', stackdir='center', dotsize=0.4)+
#stat_summary(geom = "errorbar", fun.data = mean_se, position = "dodge")
scale_colour_discrete(name ="FIB2") +
labs(title = "RESPONSE_TO_DNA_DAMAGE_STIMULUS", x="FIB2", y="slope") +
#coord_fixed(0.2) +
geom_hline(yintercept=0, linetype="dashed",
color = "blue", size=0.5) +
theme(
plot.title = element_text(lineheight=.8, size=15,face="bold",hjust = 0.5))
ggsave("./figures/1225/RESPONSE_TO_DNA_DAMAGE_STIMULUS_FIB2_slope.png", width = 2, height = 4)
#----------esc1----------------------
mouse_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp3_slope <- slope_func(mouse_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp3)
mouse_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp3_slope <- as.data.frame(mouse_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp3_slope)
library(ggplot2)
library(reshape2)
require(gridExtra)
ggplot(mouse_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp3_slope,aes(
x="",
y=mouse_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp3_slope
#color=as.factor(mouse_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp3.m$variable)
)) +
geom_violin(fill = "grey80",colour = "#3366FF") +
#scale_fill_brewer(palette="Dark2")
#scale_color_manual(values=c("#999999", "#E69F00", "#56B4E9")) +
#geom_jitter(height = 0, width = 0.1) +
geom_boxplot(width=0.3) +
stat_summary(fun.y = "mean", geom = "text", label="------", size= 6, color= "red") +
#geom_dotplot(binaxis='y', stackdir='center', dotsize=0.4)+
#stat_summary(geom = "errorbar", fun.data = mean_se, position = "dodge")
scale_colour_discrete(name ="ESC1") +
labs(title = "RESPONSE_TO_DNA_DAMAGE_STIMULUS", x="ESC1", y="slope") +
#coord_fixed(0.2) +
geom_hline(yintercept=0, linetype="dashed",
color = "blue", size=0.5) +
theme(
plot.title = element_text(lineheight=.8, size=15,face="bold",hjust = 0.5))
ggsave("./figures/1225/RESPONSE_TO_DNA_DAMAGE_STIMULUS_ESC1_slope.png", width = 2, height = 4)
#----------------esc2--------------------
mouse_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp4_slope <- slope_func(mouse_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp4)
mouse_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp4_slope <- as.data.frame(mouse_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp4_slope)
library(ggplot2)
library(reshape2)
require(gridExtra)
ggplot(mouse_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp4_slope,aes(
x="",
y=mouse_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp4_slope
#color=as.factor(mouse_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp4.m$variable)
)) +
geom_violin(fill = "grey80",colour = "#3366FF") +
#scale_fill_brewer(palette="Dark2")
#scale_color_manual(values=c("#999999", "#E69F00", "#56B4E9")) +
#geom_jitter(height = 0, width = 0.1) +
geom_boxplot(width=0.3) +
stat_summary(fun.y = "mean", geom = "text", label="------", size= 6, color= "red") +
#geom_dotplot(binaxis='y', stackdir='center', dotsize=0.4)+
#stat_summary(geom = "errorbar", fun.data = mean_se, position = "dodge")
scale_colour_discrete(name ="ESC2") +
labs(title = "RESPONSE_TO_DNA_DAMAGE_STIMULUS", x="ESC2", y="slope") +
#coord_fixed(0.2) +
geom_hline(yintercept=0, linetype="dashed",
color = "blue", size=0.5) +
theme(
plot.title = element_text(lineheight=.8, size=15,face="bold",hjust = 0.5))
ggsave("./figures/1225/RESPONSE_TO_DNA_DAMAGE_STIMULUS_ESC2_slope.png", width = 2, height = 4)
#-----------REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS--------------
REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS <- read.table("../GO_data/GO_2001020GO_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS.txt",header = T, sep="\t")
#有一些NA值,证明一些human_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_gene 没有对应的老鼠基因
mouse_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_gid <- human_gene$mouse_ensembl_gene_id[match(REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS[2:nrow(REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS),],human_gene$human_external_gene_name)]
mouse_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_oid <- as.character(annotLookup2$original_id[match(mouse_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_gid,annotLookup2$ensembl_gene_id)])
#载入fib1
temp=match(mouse_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_oid,row.names(fib1))
temp=temp[!is.na(temp)]
mouse_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp1 <- fib1[temp,]
#载入fib2
temp=match(mouse_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_oid,row.names(fib2))
temp=temp[!is.na(temp)]
mouse_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp2 <- fib2[temp,]
#import esc1
temp=match(mouse_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_oid,row.names(esc1))
temp=temp[!is.na(temp)]
mouse_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp3 <- esc1[temp,]
#载入esc2
temp=match(mouse_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_oid,row.names(esc2))
temp=temp[!is.na(temp)]
mouse_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp4 <- esc2[temp,]
#log2(fast/slow)------------------------------------
#fib1
#除去0,都加0.1
#mouse_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp1_slope <- mouse_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp1+0.1
#mouse_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp1_f2s <- log(mouse_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp1$FIB_fast_TPM/mouse_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp1$FIB_slow_TPM,2)
mouse_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp1_slope <- slope_func(mouse_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp1)
mouse_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp1_slope <- as.data.frame(mouse_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp1_slope)
library(ggplot2)
library(reshape2)
require(gridExtra)
ggplot(mouse_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp1_slope,aes(
x="",
y=mouse_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp1_slope
#color=as.factor(mouse_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp1.m$variable)
)) +
geom_violin(fill = "grey80",colour = "#3366FF") +
#scale_fill_brewer(palette="Dark2")
#scale_color_manual(values=c("#999999", "#E69F00", "#56B4E9")) +
#geom_jitter(height = 0, width = 0.1) +
geom_boxplot(width=0.3) +
stat_summary(fun.y = "mean", geom = "text", label="------", size= 6, color= "red") +
#geom_dotplot(binaxis='y', stackdir='center', dotsize=0.4)+
#stat_summary(geom = "errorbar", fun.data = mean_se, position = "dodge")
scale_colour_discrete(name ="FIB1") +
labs(title = "REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS", x="FIB1", y="slope") +
#coord_fixed(0.2) +
geom_hline(yintercept=0, linetype="dashed",
color = "blue", size=0.5) +
theme(
plot.title = element_text(lineheight=.8, size=15,face="bold",hjust = 0.5))
ggsave("./figures/1225/REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_fib1_slope.png", width = 2, height = 4)
#FIB2------------------------------------
mouse_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp2_slope <- slope_func(mouse_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp2)
mouse_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp2_slope <- as.data.frame(mouse_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp2_slope)
library(ggplot2)
library(reshape2)
require(gridExtra)
ggplot(mouse_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp2_slope,aes(
x="",
y=mouse_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp2_slope
#color=as.factor(mouse_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp2.m$variable)
)) +
geom_violin(fill = "grey80",colour = "#3366FF") +
#scale_fill_brewer(palette="Dark2")
#scale_color_manual(values=c("#999999", "#E69F00", "#56B4E9")) +
#geom_jitter(height = 0, width = 0.1) +
geom_boxplot(width=0.3) +
stat_summary(fun.y = "mean", geom = "text", label="------", size= 6, color= "red") +
#geom_dotplot(binaxis='y', stackdir='center', dotsize=0.4)+
#stat_summary(geom = "errorbar", fun.data = mean_se, position = "dodge")
scale_colour_discrete(name ="FIB2") +
labs(title = "REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS", x="FIB2", y="slope") +
#coord_fixed(0.2) +
geom_hline(yintercept=0, linetype="dashed",
color = "blue", size=0.5) +
theme(
plot.title = element_text(lineheight=.8, size=15,face="bold",hjust = 0.5))
ggsave("./figures/1225/REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_FIB2_slope.png", width = 2, height = 4)
#----------esc1----------------------
mouse_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp3_slope <- slope_func(mouse_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp3)
mouse_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp3_slope <- as.data.frame(mouse_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp3_slope)
library(ggplot2)
library(reshape2)
require(gridExtra)
ggplot(mouse_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp3_slope,aes(
x="",
y=mouse_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp3_slope
#color=as.factor(mouse_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp3.m$variable)
)) +
geom_violin(fill = "grey80",colour = "#3366FF") +
#scale_fill_brewer(palette="Dark2")
#scale_color_manual(values=c("#999999", "#E69F00", "#56B4E9")) +
#geom_jitter(height = 0, width = 0.1) +
geom_boxplot(width=0.3) +
stat_summary(fun.y = "mean", geom = "text", label="------", size= 6, color= "red") +
#geom_dotplot(binaxis='y', stackdir='center', dotsize=0.4)+
#stat_summary(geom = "errorbar", fun.data = mean_se, position = "dodge")
scale_colour_discrete(name ="ESC1") +
labs(title = "REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS", x="ESC1", y="slope") +
#coord_fixed(0.2) +
geom_hline(yintercept=0, linetype="dashed",
color = "blue", size=0.5) +
theme(
plot.title = element_text(lineheight=.8, size=15,face="bold",hjust = 0.5))
ggsave("./figures/1225/REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_ESC1_slope.png", width = 2, height = 4)
#----------------esc2--------------------
mouse_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp4_slope <- slope_func(mouse_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp4)
mouse_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp4_slope <- as.data.frame(mouse_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp4_slope)
library(ggplot2)
library(reshape2)
require(gridExtra)
ggplot(mouse_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp4_slope,aes(
x="",
y=mouse_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp4_slope
#color=as.factor(mouse_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp4.m$variable)
)) +
geom_violin(fill = "grey80",colour = "#3366FF") +
#scale_fill_brewer(palette="Dark2")
#scale_color_manual(values=c("#999999", "#E69F00", "#56B4E9")) +
#geom_jitter(height = 0, width = 0.1) +
geom_boxplot(width=0.3) +
stat_summary(fun.y = "mean", geom = "text", label="------", size= 6, color= "red") +
#geom_dotplot(binaxis='y', stackdir='center', dotsize=0.4)+
#stat_summary(geom = "errorbar", fun.data = mean_se, position = "dodge")
scale_colour_discrete(name ="ESC2") +
labs(title = "REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS", x="ESC2", y="slope") +
#coord_fixed(0.2) +
geom_hline(yintercept=0, linetype="dashed",
color = "blue", size=0.5) +
theme(
plot.title = element_text(lineheight=.8, size=15,face="bold",hjust = 0.5))
ggsave("./figures/1225/REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_ESC2_slope.png", width = 2, height = 4)
#PLOT FIB1+FIB2
#grid.arrange(FIB1, FIB2, ncol=2)
#------new go gene sets----------------------
#------------------REGULATION_OF_DNA_DAMAGE_CHECKPOINT--------------------
REGULATION_OF_DNA_DAMAGE_CHECKPOINT <- read.table("../GO_data/GO_REGULATION_OF_DNA_DAMAGE_CHECKPOINT.txt",header = T, sep="\t")
#有一些NA值,证明一些human_REGULATION_OF_DNA_DAMAGE_CHECKPOINT_gene 没有对应的老鼠基因
mouse_REGULATION_OF_DNA_DAMAGE_CHECKPOINT_gid <- human_gene$mouse_ensembl_gene_id[match(REGULATION_OF_DNA_DAMAGE_CHECKPOINT[2:nrow(REGULATION_OF_DNA_DAMAGE_CHECKPOINT),],human_gene$human_external_gene_name)]
mouse_REGULATION_OF_DNA_DAMAGE_CHECKPOINT_oid <- as.character(annotLookup2$original_id[match(mouse_REGULATION_OF_DNA_DAMAGE_CHECKPOINT_gid,annotLookup2$ensembl_gene_id)])
#载入fib1
temp=match(mouse_REGULATION_OF_DNA_DAMAGE_CHECKPOINT_oid,row.names(fib1))
temp=temp[!is.na(temp)]
mouse_REGULATION_OF_DNA_DAMAGE_CHECKPOINT_exp1 <- fib1[temp,]
#载入fib2
temp=match(mouse_REGULATION_OF_DNA_DAMAGE_CHECKPOINT_oid,row.names(fib2))
temp=temp[!is.na(temp)]
mouse_REGULATION_OF_DNA_DAMAGE_CHECKPOINT_exp2 <- fib2[temp,]
#import esc1
temp=match(mouse_REGULATION_OF_DNA_DAMAGE_CHECKPOINT_oid,row.names(esc1))
temp=temp[!is.na(temp)]
mouse_REGULATION_OF_DNA_DAMAGE_CHECKPOINT_exp3 <- esc1[temp,]
#载入esc2
temp=match(mouse_REGULATION_OF_DNA_DAMAGE_CHECKPOINT_oid,row.names(esc2))
temp=temp[!is.na(temp)]
mouse_REGULATION_OF_DNA_DAMAGE_CHECKPOINT_exp4 <- esc2[temp,]
#fib1
#除去0,都加0.1
#mouse_REGULATION_OF_DNA_DAMAGE_CHECKPOINT_exp1_slope <- mouse_REGULATION_OF_DNA_DAMAGE_CHECKPOINT_exp1+0.1
#mouse_REGULATION_OF_DNA_DAMAGE_CHECKPOINT_exp1_f2s <- log(mouse_REGULATION_OF_DNA_DAMAGE_CHECKPOINT_exp1$FIB_fast_TPM/mouse_REGULATION_OF_DNA_DAMAGE_CHECKPOINT_exp1$FIB_slow_TPM,2)
mouse_REGULATION_OF_DNA_DAMAGE_CHECKPOINT_exp1_slope <- slope_func(mouse_REGULATION_OF_DNA_DAMAGE_CHECKPOINT_exp1)
mouse_REGULATION_OF_DNA_DAMAGE_CHECKPOINT_exp1_slope <- as.data.frame(mouse_REGULATION_OF_DNA_DAMAGE_CHECKPOINT_exp1_slope)
library(ggplot2)
library(reshape2)
require(gridExtra)
ggplot(mouse_REGULATION_OF_DNA_DAMAGE_CHECKPOINT_exp1_slope,aes(
x="",
y=mouse_REGULATION_OF_DNA_DAMAGE_CHECKPOINT_exp1_slope
#color=as.factor(mouse_REGULATION_OF_DNA_DAMAGE_CHECKPOINT_exp1.m$variable)
)) +
geom_violin(fill = "grey80",colour = "#3366FF") +
#scale_fill_brewer(palette="Dark2")
#scale_color_manual(values=c("#999999", "#E69F00", "#56B4E9")) +
#geom_jitter(height = 0, width = 0.1) +
geom_boxplot(width=0.3) +
stat_summary(fun.y = "mean", geom = "text", label="------", size= 6, color= "red") +
#geom_dotplot(binaxis='y', stackdir='center', dotsize=0.4)+
#stat_summary(geom = "errorbar", fun.data = mean_se, position = "dodge")
scale_colour_discrete(name ="FIB1") +
labs(title = "REGULATION_OF_DNA_DAMAGE_CHECKPOINT", x="FIB1", y="slope") +
#coord_fixed(0.2) +
geom_hline(yintercept=0, linetype="dashed",
color = "blue", size=0.5) +
theme(
plot.title = element_text(lineheight=.8, size=15,face="bold",hjust = 0.5))
ggsave("./figures/1225/REGULATION_OF_DNA_DAMAGE_CHECKPOINT_fib1_slope.png", width = 2, height = 4)
#FIB2------------------------------------
mouse_REGULATION_OF_DNA_DAMAGE_CHECKPOINT_exp2_slope <- slope_func(mouse_REGULATION_OF_DNA_DAMAGE_CHECKPOINT_exp2)
mouse_REGULATION_OF_DNA_DAMAGE_CHECKPOINT_exp2_slope <- as.data.frame(mouse_REGULATION_OF_DNA_DAMAGE_CHECKPOINT_exp2_slope)
library(ggplot2)
library(reshape2)
require(gridExtra)
ggplot(mouse_REGULATION_OF_DNA_DAMAGE_CHECKPOINT_exp2_slope,aes(
x="",
y=mouse_REGULATION_OF_DNA_DAMAGE_CHECKPOINT_exp2_slope
#color=as.factor(mouse_REGULATION_OF_DNA_DAMAGE_CHECKPOINT_exp2.m$variable)
)) +
geom_violin(fill = "grey80",colour = "#3366FF") +
#scale_fill_brewer(palette="Dark2")
#scale_color_manual(values=c("#999999", "#E69F00", "#56B4E9")) +
#geom_jitter(height = 0, width = 0.1) +
geom_boxplot(width=0.3) +
stat_summary(fun.y = "mean", geom = "text", label="------", size= 6, color= "red") +
#geom_dotplot(binaxis='y', stackdir='center', dotsize=0.4)+
#stat_summary(geom = "errorbar", fun.data = mean_se, position = "dodge")
scale_colour_discrete(name ="FIB2") +
labs(title = "REGULATION_OF_DNA_DAMAGE_CHECKPOINT", x="FIB2", y="slope") +
#coord_fixed(0.2) +
geom_hline(yintercept=0, linetype="dashed",
color = "blue", size=0.5) +
theme(
plot.title = element_text(lineheight=.8, size=15,face="bold",hjust = 0.5))
ggsave("./figures/1225/REGULATION_OF_DNA_DAMAGE_CHECKPOINT_FIB2_slope.png", width = 2, height = 4)
#----------esc1----------------------
mouse_REGULATION_OF_DNA_DAMAGE_CHECKPOINT_exp3_slope <- slope_func(mouse_REGULATION_OF_DNA_DAMAGE_CHECKPOINT_exp3)
mouse_REGULATION_OF_DNA_DAMAGE_CHECKPOINT_exp3_slope <- as.data.frame(mouse_REGULATION_OF_DNA_DAMAGE_CHECKPOINT_exp3_slope)
library(ggplot2)
library(reshape2)
require(gridExtra)
ggplot(mouse_REGULATION_OF_DNA_DAMAGE_CHECKPOINT_exp3_slope,aes(
x="",
y=mouse_REGULATION_OF_DNA_DAMAGE_CHECKPOINT_exp3_slope
#color=as.factor(mouse_REGULATION_OF_DNA_DAMAGE_CHECKPOINT_exp3.m$variable)
)) +
geom_violin(fill = "grey80",colour = "#3366FF") +
#scale_fill_brewer(palette="Dark2")
#scale_color_manual(values=c("#999999", "#E69F00", "#56B4E9")) +
#geom_jitter(height = 0, width = 0.1) +
geom_boxplot(width=0.3) +
stat_summary(fun.y = "mean", geom = "text", label="------", size= 6, color= "red") +
#geom_dotplot(binaxis='y', stackdir='center', dotsize=0.4)+
#stat_summary(geom = "errorbar", fun.data = mean_se, position = "dodge")
scale_colour_discrete(name ="ESC1") +
labs(title = "REGULATION_OF_DNA_DAMAGE_CHECKPOINT", x="ESC1", y="slope") +
#coord_fixed(0.2) +
geom_hline(yintercept=0, linetype="dashed",
color = "blue", size=0.5) +
theme(
plot.title = element_text(lineheight=.8, size=15,face="bold",hjust = 0.5))
ggsave("./figures/1225/REGULATION_OF_DNA_DAMAGE_CHECKPOINT_ESC1_slope.png", width = 2, height = 4)
#----------------esc2--------------------
mouse_REGULATION_OF_DNA_DAMAGE_CHECKPOINT_exp4_slope <- slope_func(mouse_REGULATION_OF_DNA_DAMAGE_CHECKPOINT_exp4)
mouse_REGULATION_OF_DNA_DAMAGE_CHECKPOINT_exp4_slope <- as.data.frame(mouse_REGULATION_OF_DNA_DAMAGE_CHECKPOINT_exp4_slope)
library(ggplot2)
library(reshape2)
require(gridExtra)
ggplot(mouse_REGULATION_OF_DNA_DAMAGE_CHECKPOINT_exp4_slope,aes(
x="",
y=mouse_REGULATION_OF_DNA_DAMAGE_CHECKPOINT_exp4_slope
#color=as.factor(mouse_REGULATION_OF_DNA_DAMAGE_CHECKPOINT_exp4.m$variable)
)) +
geom_violin(fill = "grey80",colour = "#3366FF") +
#scale_fill_brewer(palette="Dark2")
#scale_color_manual(values=c("#999999", "#E69F00", "#56B4E9")) +
#geom_jitter(height = 0, width = 0.1) +
geom_boxplot(width=0.3) +
stat_summary(fun.y = "mean", geom = "text", label="------", size= 6, color= "red") +
#geom_dotplot(binaxis='y', stackdir='center', dotsize=0.4)+
#stat_summary(geom = "errorbar", fun.data = mean_se, position = "dodge")
scale_colour_discrete(name ="ESC2") +
labs(title = "REGULATION_OF_DNA_DAMAGE_CHECKPOINT", x="ESC2", y="slope") +
#coord_fixed(0.2) +
geom_hline(yintercept=0, linetype="dashed",
color = "blue", size=0.5) +
theme(
plot.title = element_text(lineheight=.8, size=15,face="bold",hjust = 0.5))
ggsave("./figures/1225/REGULATION_OF_DNA_DAMAGE_CHECKPOINT_ESC2_slope.png", width = 2, height = 4)
#----------HALLMARK_PEROXISOME----------------------------
HALLMARK_PEROXISOME <- read.table("../GO_data/HALLMARK_PEROXISOME.txt",header = T, sep="\t")
#有一些NA值,证明一些human_HALLMARK_PEROXISOME_gene 没有对应的老鼠基因
mouse_HALLMARK_PEROXISOME_gid <- human_gene$mouse_ensembl_gene_id[match(HALLMARK_PEROXISOME[2:nrow(HALLMARK_PEROXISOME),],human_gene$human_external_gene_name)]
mouse_HALLMARK_PEROXISOME_oid <- as.character(annotLookup2$original_id[match(mouse_HALLMARK_PEROXISOME_gid,annotLookup2$ensembl_gene_id)])
#载入fib1
temp=match(mouse_HALLMARK_PEROXISOME_oid,row.names(fib1))
temp=temp[!is.na(temp)]
mouse_HALLMARK_PEROXISOME_exp1 <- fib1[temp,]
#载入fib2
temp=match(mouse_HALLMARK_PEROXISOME_oid,row.names(fib2))
temp=temp[!is.na(temp)]
mouse_HALLMARK_PEROXISOME_exp2 <- fib2[temp,]
#import esc1
temp=match(mouse_HALLMARK_PEROXISOME_oid,row.names(esc1))
temp=temp[!is.na(temp)]
mouse_HALLMARK_PEROXISOME_exp3 <- esc1[temp,]
#载入esc2
temp=match(mouse_HALLMARK_PEROXISOME_oid,row.names(esc2))
temp=temp[!is.na(temp)]
mouse_HALLMARK_PEROXISOME_exp4 <- esc2[temp,]
#fib1
#除去0,都加0.1
#mouse_HALLMARK_PEROXISOME_exp1_slope <- mouse_HALLMARK_PEROXISOME_exp1+0.1
#mouse_HALLMARK_PEROXISOME_exp1_f2s <- log(mouse_HALLMARK_PEROXISOME_exp1$FIB_fast_TPM/mouse_HALLMARK_PEROXISOME_exp1$FIB_slow_TPM,2)
mouse_HALLMARK_PEROXISOME_exp1_slope <- slope_func(mouse_HALLMARK_PEROXISOME_exp1)
mouse_HALLMARK_PEROXISOME_exp1_slope <- as.data.frame(mouse_HALLMARK_PEROXISOME_exp1_slope)
library(ggplot2)
library(reshape2)
require(gridExtra)
ggplot(mouse_HALLMARK_PEROXISOME_exp1_slope,aes(
x="",
y=mouse_HALLMARK_PEROXISOME_exp1_slope
#color=as.factor(mouse_HALLMARK_PEROXISOME_exp1.m$variable)
)) +
geom_violin(fill = "grey80",colour = "#3366FF") +
#scale_fill_brewer(palette="Dark2")
#scale_color_manual(values=c("#999999", "#E69F00", "#56B4E9")) +
#geom_jitter(height = 0, width = 0.1) +
geom_boxplot(width=0.3) +
stat_summary(fun.y = "mean", geom = "text", label="------", size= 6, color= "red") +
#geom_dotplot(binaxis='y', stackdir='center', dotsize=0.4)+
#stat_summary(geom = "errorbar", fun.data = mean_se, position = "dodge")
scale_colour_discrete(name ="FIB1") +
labs(title = "HALLMARK_PEROXISOME", x="FIB1", y="slope") +
#coord_fixed(0.2) +
geom_hline(yintercept=0, linetype="dashed",
color = "blue", size=0.5) +
theme(
plot.title = element_text(lineheight=.8, size=15,face="bold",hjust = 0.5))
ggsave("./figures/1225/HALLMARK_PEROXISOME_fib1_slope.png", width = 2, height = 4)
#FIB2------------------------------------
mouse_HALLMARK_PEROXISOME_exp2_slope <- slope_func(mouse_HALLMARK_PEROXISOME_exp2)
mouse_HALLMARK_PEROXISOME_exp2_slope <- as.data.frame(mouse_HALLMARK_PEROXISOME_exp2_slope)
library(ggplot2)
library(reshape2)
require(gridExtra)
ggplot(mouse_HALLMARK_PEROXISOME_exp2_slope,aes(
x="",
y=mouse_HALLMARK_PEROXISOME_exp2_slope
#color=as.factor(mouse_HALLMARK_PEROXISOME_exp2.m$variable)
)) +
geom_violin(fill = "grey80",colour = "#3366FF") +
#scale_fill_brewer(palette="Dark2")
#scale_color_manual(values=c("#999999", "#E69F00", "#56B4E9")) +
#geom_jitter(height = 0, width = 0.1) +
geom_boxplot(width=0.3) +
stat_summary(fun.y = "mean", geom = "text", label="------", size= 6, color= "red") +
#geom_dotplot(binaxis='y', stackdir='center', dotsize=0.4)+
#stat_summary(geom = "errorbar", fun.data = mean_se, position = "dodge")
scale_colour_discrete(name ="FIB2") +
labs(title = "HALLMARK_PEROXISOME", x="FIB2", y="slope") +
#coord_fixed(0.2) +
geom_hline(yintercept=0, linetype="dashed",
color = "blue", size=0.5) +
theme(
plot.title = element_text(lineheight=.8, size=15,face="bold",hjust = 0.5))
ggsave("./figures/1225/HALLMARK_PEROXISOME_FIB2_slope.png", width = 2, height = 4)
#----------esc1----------------------
mouse_HALLMARK_PEROXISOME_exp3_slope <- slope_func(mouse_HALLMARK_PEROXISOME_exp3)
mouse_HALLMARK_PEROXISOME_exp3_slope <- as.data.frame(mouse_HALLMARK_PEROXISOME_exp3_slope)
library(ggplot2)
library(reshape2)
require(gridExtra)
ggplot(mouse_HALLMARK_PEROXISOME_exp3_slope,aes(
x="",
y=mouse_HALLMARK_PEROXISOME_exp3_slope
#color=as.factor(mouse_HALLMARK_PEROXISOME_exp3.m$variable)
)) +
geom_violin(fill = "grey80",colour = "#3366FF") +
#scale_fill_brewer(palette="Dark2")
#scale_color_manual(values=c("#999999", "#E69F00", "#56B4E9")) +
#geom_jitter(height = 0, width = 0.1) +
geom_boxplot(width=0.3) +
stat_summary(fun.y = "mean", geom = "text", label="------", size= 6, color= "red") +
#geom_dotplot(binaxis='y', stackdir='center', dotsize=0.4)+
#stat_summary(geom = "errorbar", fun.data = mean_se, position = "dodge")
scale_colour_discrete(name ="ESC1") +
labs(title = "HALLMARK_PEROXISOME", x="ESC1", y="slope") +
#coord_fixed(0.2) +
geom_hline(yintercept=0, linetype="dashed",
color = "blue", size=0.5) +
theme(
plot.title = element_text(lineheight=.8, size=15,face="bold",hjust = 0.5))
ggsave("./figures/1225/HALLMARK_PEROXISOME_ESC1_slope.png", width = 2, height = 4)
#----------------esc2--------------------
mouse_HALLMARK_PEROXISOME_exp4_slope <- slope_func(mouse_HALLMARK_PEROXISOME_exp4)
mouse_HALLMARK_PEROXISOME_exp4_slope <- as.data.frame(mouse_HALLMARK_PEROXISOME_exp4_slope)
library(ggplot2)
library(reshape2)
require(gridExtra)
ggplot(mouse_HALLMARK_PEROXISOME_exp4_slope,aes(
x="",
y=mouse_HALLMARK_PEROXISOME_exp4_slope
#color=as.factor(mouse_HALLMARK_PEROXISOME_exp4.m$variable)
)) +
geom_violin(fill = "grey80",colour = "#3366FF") +
#scale_fill_brewer(palette="Dark2")
#scale_color_manual(values=c("#999999", "#E69F00", "#56B4E9")) +
#geom_jitter(height = 0, width = 0.1) +
geom_boxplot(width=0.3) +
stat_summary(fun.y = "mean", geom = "text", label="------", size= 6, color= "red") +
#geom_dotplot(binaxis='y', stackdir='center', dotsize=0.4)+
#stat_summary(geom = "errorbar", fun.data = mean_se, position = "dodge")
scale_colour_discrete(name ="ESC2") +
labs(title = "HALLMARK_PEROXISOME", x="ESC2", y="slope") +
#coord_fixed(0.2) +
geom_hline(yintercept=0, linetype="dashed",
color = "blue", size=0.5) +
theme(
plot.title = element_text(lineheight=.8, size=15,face="bold",hjust = 0.5))
ggsave("./figures/1225/HALLMARK_PEROXISOME_ESC2_slope.png", width = 2, height = 4)
#----------HALLMARK_P53_PATHWAY----------------------------
HALLMARK_P53_PATHWAY <- read.table("../GO_data/HALLMARK_P53_PATHWAY.txt",header = T, sep="\t")
#有一些NA值,证明一些human_HALLMARK_P53_PATHWAY_gene 没有对应的老鼠基因
mouse_HALLMARK_P53_PATHWAY_gid <- human_gene$mouse_ensembl_gene_id[match(HALLMARK_P53_PATHWAY[2:nrow(HALLMARK_P53_PATHWAY),],human_gene$human_external_gene_name)]
mouse_HALLMARK_P53_PATHWAY_oid <- as.character(annotLookup2$original_id[match(mouse_HALLMARK_P53_PATHWAY_gid,annotLookup2$ensembl_gene_id)])
#载入fib1
temp=match(mouse_HALLMARK_P53_PATHWAY_oid,row.names(fib1))
temp=temp[!is.na(temp)]
mouse_HALLMARK_P53_PATHWAY_exp1 <- fib1[temp,]
#载入fib2
temp=match(mouse_HALLMARK_P53_PATHWAY_oid,row.names(fib2))
temp=temp[!is.na(temp)]
mouse_HALLMARK_P53_PATHWAY_exp2 <- fib2[temp,]
#import esc1
temp=match(mouse_HALLMARK_P53_PATHWAY_oid,row.names(esc1))
temp=temp[!is.na(temp)]
mouse_HALLMARK_P53_PATHWAY_exp3 <- esc1[temp,]
#载入esc2
temp=match(mouse_HALLMARK_P53_PATHWAY_oid,row.names(esc2))
temp=temp[!is.na(temp)]
mouse_HALLMARK_P53_PATHWAY_exp4 <- esc2[temp,]
#fib1
#除去0,都加0.1
#mouse_HALLMARK_P53_PATHWAY_exp1_slope <- mouse_HALLMARK_P53_PATHWAY_exp1+0.1
#mouse_HALLMARK_P53_PATHWAY_exp1_f2s <- log(mouse_HALLMARK_P53_PATHWAY_exp1$FIB_fast_TPM/mouse_HALLMARK_P53_PATHWAY_exp1$FIB_slow_TPM,2)
mouse_HALLMARK_P53_PATHWAY_exp1_slope <- slope_func(mouse_HALLMARK_P53_PATHWAY_exp1)
mouse_HALLMARK_P53_PATHWAY_exp1_slope <- as.data.frame(mouse_HALLMARK_P53_PATHWAY_exp1_slope)
library(ggplot2)
library(reshape2)
require(gridExtra)
ggplot(mouse_HALLMARK_P53_PATHWAY_exp1_slope,aes(
x="",
y=mouse_HALLMARK_P53_PATHWAY_exp1_slope
#color=as.factor(mouse_HALLMARK_P53_PATHWAY_exp1.m$variable)
)) +
geom_violin(fill = "grey80",colour = "#3366FF") +
#scale_fill_brewer(palette="Dark2")
#scale_color_manual(values=c("#999999", "#E69F00", "#56B4E9")) +
#geom_jitter(height = 0, width = 0.1) +
geom_boxplot(width=0.3) +
stat_summary(fun.y = "mean", geom = "text", label="------", size= 6, color= "red") +
#geom_dotplot(binaxis='y', stackdir='center', dotsize=0.4)+
#stat_summary(geom = "errorbar", fun.data = mean_se, position = "dodge")
scale_colour_discrete(name ="FIB1") +
labs(title = "HALLMARK_P53_PATHWAY", x="FIB1", y="slope") +
#coord_fixed(0.2) +
geom_hline(yintercept=0, linetype="dashed",
color = "blue", size=0.5) +
theme(
plot.title = element_text(lineheight=.8, size=15,face="bold",hjust = 0.5))
ggsave("./figures/1225/HALLMARK_P53_PATHWAY_fib1_slope.png", width = 2, height = 4)
#FIB2------------------------------------
mouse_HALLMARK_P53_PATHWAY_exp2_slope <- slope_func(mouse_HALLMARK_P53_PATHWAY_exp2)
mouse_HALLMARK_P53_PATHWAY_exp2_slope <- as.data.frame(mouse_HALLMARK_P53_PATHWAY_exp2_slope)
library(ggplot2)
library(reshape2)
require(gridExtra)
ggplot(mouse_HALLMARK_P53_PATHWAY_exp2_slope,aes(
x="",
y=mouse_HALLMARK_P53_PATHWAY_exp2_slope
#color=as.factor(mouse_HALLMARK_P53_PATHWAY_exp2.m$variable)
)) +
geom_violin(fill = "grey80",colour = "#3366FF") +
#scale_fill_brewer(palette="Dark2")
#scale_color_manual(values=c("#999999", "#E69F00", "#56B4E9")) +
#geom_jitter(height = 0, width = 0.1) +
geom_boxplot(width=0.3) +
stat_summary(fun.y = "mean", geom = "text", label="------", size= 6, color= "red") +
#geom_dotplot(binaxis='y', stackdir='center', dotsize=0.4)+
#stat_summary(geom = "errorbar", fun.data = mean_se, position = "dodge")
scale_colour_discrete(name ="FIB2") +
labs(title = "HALLMARK_P53_PATHWAY", x="FIB2", y="slope") +
#coord_fixed(0.2) +
geom_hline(yintercept=0, linetype="dashed",
color = "blue", size=0.5) +
theme(
plot.title = element_text(lineheight=.8, size=15,face="bold",hjust = 0.5))
ggsave("./figures/1225/HALLMARK_P53_PATHWAY_FIB2_slope.png", width = 2, height = 4)
#----------esc1----------------------
mouse_HALLMARK_P53_PATHWAY_exp3_slope <- slope_func(mouse_HALLMARK_P53_PATHWAY_exp3)
mouse_HALLMARK_P53_PATHWAY_exp3_slope <- as.data.frame(mouse_HALLMARK_P53_PATHWAY_exp3_slope)
library(ggplot2)
library(reshape2)
require(gridExtra)
ggplot(mouse_HALLMARK_P53_PATHWAY_exp3_slope,aes(
x="",
y=mouse_HALLMARK_P53_PATHWAY_exp3_slope
#color=as.factor(mouse_HALLMARK_P53_PATHWAY_exp3.m$variable)
)) +
geom_violin(fill = "grey80",colour = "#3366FF") +
#scale_fill_brewer(palette="Dark2")
#scale_color_manual(values=c("#999999", "#E69F00", "#56B4E9")) +
#geom_jitter(height = 0, width = 0.1) +
geom_boxplot(width=0.3) +
stat_summary(fun.y = "mean", geom = "text", label="------", size= 6, color= "red") +
#geom_dotplot(binaxis='y', stackdir='center', dotsize=0.4)+
#stat_summary(geom = "errorbar", fun.data = mean_se, position = "dodge")
scale_colour_discrete(name ="ESC1") +
labs(title = "HALLMARK_P53_PATHWAY", x="ESC1", y="slope") +
#coord_fixed(0.2) +
geom_hline(yintercept=0, linetype="dashed",
color = "blue", size=0.5) +
theme(
plot.title = element_text(lineheight=.8, size=15,face="bold",hjust = 0.5))
ggsave("./figures/1225/HALLMARK_P53_PATHWAY_ESC1_slope.png", width = 2, height = 4)
#----------------esc2--------------------
mouse_HALLMARK_P53_PATHWAY_exp4_slope <- slope_func(mouse_HALLMARK_P53_PATHWAY_exp4)
mouse_HALLMARK_P53_PATHWAY_exp4_slope <- as.data.frame(mouse_HALLMARK_P53_PATHWAY_exp4_slope)
library(ggplot2)
library(reshape2)
require(gridExtra)
ggplot(mouse_HALLMARK_P53_PATHWAY_exp4_slope,aes(
x="",
y=mouse_HALLMARK_P53_PATHWAY_exp4_slope
#color=as.factor(mouse_HALLMARK_P53_PATHWAY_exp4.m$variable)
)) +
geom_violin(fill = "grey80",colour = "#3366FF") +
#scale_fill_brewer(palette="Dark2")
#scale_color_manual(values=c("#999999", "#E69F00", "#56B4E9")) +
#geom_jitter(height = 0, width = 0.1) +
geom_boxplot(width=0.3) +
stat_summary(fun.y = "mean", geom = "text", label="------", size= 6, color= "red") +
#geom_dotplot(binaxis='y', stackdir='center', dotsize=0.4)+
#stat_summary(geom = "errorbar", fun.data = mean_se, position = "dodge")
scale_colour_discrete(name ="ESC2") +
labs(title = "HALLMARK_P53_PATHWAY", x="ESC2", y="slope") +
#coord_fixed(0.2) +
geom_hline(yintercept=0, linetype="dashed",
color = "blue", size=0.5) +
theme(
plot.title = element_text(lineheight=.8, size=15,face="bold",hjust = 0.5))
ggsave("./figures/1225/HALLMARK_P53_PATHWAY_ESC2_slope.png", width = 2, height = 4)
#----------RIBOSOME_BIOGENESIS_AND_ASSEMBLY----------------------------
RIBOSOME_BIOGENESIS_AND_ASSEMBLY <- read.table("../GO_data/GO_0042254RIBOSOME_BIOGENESIS_AND_ASSEMBLY.txt",header = T, sep="\t")
#有一些NA值,证明一些human_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_gene 没有对应的老鼠基因
mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_gid <- human_gene$mouse_ensembl_gene_id[match(RIBOSOME_BIOGENESIS_AND_ASSEMBLY[2:nrow(RIBOSOME_BIOGENESIS_AND_ASSEMBLY),],human_gene$human_external_gene_name)]
mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_oid <- as.character(annotLookup2$original_id[match(mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_gid,annotLookup2$ensembl_gene_id)])
#载入fib1
temp=match(mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_oid,row.names(fib1))
temp=temp[!is.na(temp)]
mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_exp1 <- fib1[temp,]
#载入fib2
temp=match(mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_oid,row.names(fib2))
temp=temp[!is.na(temp)]
mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_exp2 <- fib2[temp,]
#import esc1
temp=match(mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_oid,row.names(esc1))
temp=temp[!is.na(temp)]
mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_exp3 <- esc1[temp,]
#载入esc2
temp=match(mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_oid,row.names(esc2))
temp=temp[!is.na(temp)]
mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_exp4 <- esc2[temp,]
#fib1
#除去0,都加0.1
#mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_exp1_slope <- mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_exp1+0.1
#mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_exp1_f2s <- log(mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_exp1$FIB_fast_TPM/mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_exp1$FIB_slow_TPM,2)
mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_exp1_slope <- slope_func(mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_exp1)
mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_exp1_slope <- as.data.frame(mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_exp1_slope)
library(ggplot2)
library(reshape2)
require(gridExtra)
ggplot(mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_exp1_slope,aes(
x="",
y=mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_exp1_slope
#color=as.factor(mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_exp1.m$variable)
)) +
geom_violin(fill = "grey80",colour = "#3366FF") +
#scale_fill_brewer(palette="Dark2")
#scale_color_manual(values=c("#999999", "#E69F00", "#56B4E9")) +
#geom_jitter(height = 0, width = 0.1) +
geom_boxplot(width=0.3) +
stat_summary(fun.y = "mean", geom = "text", label="------", size= 6, color= "red") +
#geom_dotplot(binaxis='y', stackdir='center', dotsize=0.4)+
#stat_summary(geom = "errorbar", fun.data = mean_se, position = "dodge")
scale_colour_discrete(name ="FIB1") +
labs(title = "RIBOSOME_BIOGENESIS_AND_ASSEMBLY", x="FIB1", y="slope") +
#coord_fixed(0.2) +
geom_hline(yintercept=0, linetype="dashed",
color = "blue", size=0.5) +
theme(
plot.title = element_text(lineheight=.8, size=15,face="bold",hjust = 0.5))
ggsave("./figures/1225/RIBOSOME_BIOGENESIS_AND_ASSEMBLY_fib1_slope.png", width = 2, height = 4)
#FIB2------------------------------------
mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_exp2_slope <- slope_func(mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_exp2)
mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_exp2_slope <- as.data.frame(mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_exp2_slope)
library(ggplot2)
library(reshape2)
require(gridExtra)
ggplot(mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_exp2_slope,aes(
x="",
y=mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_exp2_slope
#color=as.factor(mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_exp2.m$variable)
)) +
geom_violin(fill = "grey80",colour = "#3366FF") +
#scale_fill_brewer(palette="Dark2")
#scale_color_manual(values=c("#999999", "#E69F00", "#56B4E9")) +
#geom_jitter(height = 0, width = 0.1) +
geom_boxplot(width=0.3) +
stat_summary(fun.y = "mean", geom = "text", label="------", size= 6, color= "red") +
#geom_dotplot(binaxis='y', stackdir='center', dotsize=0.4)+
#stat_summary(geom = "errorbar", fun.data = mean_se, position = "dodge")
scale_colour_discrete(name ="FIB2") +
labs(title = "RIBOSOME_BIOGENESIS_AND_ASSEMBLY", x="FIB2", y="slope") +
#coord_fixed(0.2) +
geom_hline(yintercept=0, linetype="dashed",
color = "blue", size=0.5) +
theme(
plot.title = element_text(lineheight=.8, size=15,face="bold",hjust = 0.5))
ggsave("./figures/1225/RIBOSOME_BIOGENESIS_AND_ASSEMBLY_FIB2_slope.png", width = 2, height = 4)
#----------esc1----------------------
mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_exp3_slope <- slope_func(mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_exp3)
mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_exp3_slope <- as.data.frame(mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_exp3_slope)
library(ggplot2)
library(reshape2)
require(gridExtra)
ggplot(mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_exp3_slope,aes(
x="",
y=mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_exp3_slope
#color=as.factor(mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_exp3.m$variable)
)) +
geom_violin(fill = "grey80",colour = "#3366FF") +
#scale_fill_brewer(palette="Dark2")
#scale_color_manual(values=c("#999999", "#E69F00", "#56B4E9")) +
#geom_jitter(height = 0, width = 0.1) +
geom_boxplot(width=0.3) +
stat_summary(fun.y = "mean", geom = "text", label="------", size= 6, color= "red") +
#geom_dotplot(binaxis='y', stackdir='center', dotsize=0.4)+
#stat_summary(geom = "errorbar", fun.data = mean_se, position = "dodge")
scale_colour_discrete(name ="ESC1") +
labs(title = "RIBOSOME_BIOGENESIS_AND_ASSEMBLY", x="ESC1", y="slope") +
#coord_fixed(0.2) +
geom_hline(yintercept=0, linetype="dashed",
color = "blue", size=0.5) +
theme(
plot.title = element_text(lineheight=.8, size=15,face="bold",hjust = 0.5))
ggsave("./figures/1225/RIBOSOME_BIOGENESIS_AND_ASSEMBLY_ESC1_slope.png", width = 2, height = 4)
#----------------esc2--------------------
mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_exp4_slope <- slope_func(mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_exp4)
mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_exp4_slope <- as.data.frame(mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_exp4_slope)
library(ggplot2)
library(reshape2)
require(gridExtra)
ggplot(mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_exp4_slope,aes(
x="",
y=mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_exp4_slope
#color=as.factor(mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_exp4.m$variable)
)) +
geom_violin(fill = "grey80",colour = "#3366FF") +
#scale_fill_brewer(palette="Dark2")
#scale_color_manual(values=c("#999999", "#E69F00", "#56B4E9")) +
#geom_jitter(height = 0, width = 0.1) +
geom_boxplot(width=0.3) +
stat_summary(fun.y = "mean", geom = "text", label="------", size= 6, color= "red") +
#geom_dotplot(binaxis='y', stackdir='center', dotsize=0.4)+
#stat_summary(geom = "errorbar", fun.data = mean_se, position = "dodge")
scale_colour_discrete(name ="ESC2") +
labs(title = "RIBOSOME_BIOGENESIS_AND_ASSEMBLY", x="ESC2", y="slope") +
#coord_fixed(0.2) +
geom_hline(yintercept=0, linetype="dashed",
color = "blue", size=0.5) +
theme(
plot.title = element_text(lineheight=.8, size=15,face="bold",hjust = 0.5))
ggsave("./figures/1225/RIBOSOME_BIOGENESIS_AND_ASSEMBLY_ESC2_slope.png", width = 2, height = 4)
#----------POSITIVE_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS----------------------------
POSITIVE_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS <- read.table("../GO_data/GO_POSITIVE_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS.txt",header = T, sep="\t")
#有一些NA值,证明一些human_POSITIVE_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_gene 没有对应的老鼠基因
mouse_POSITIVE_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_gid <- human_gene$mouse_ensembl_gene_id[match(POSITIVE_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS[2:nrow(POSITIVE_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS),],human_gene$human_external_gene_name)]
mouse_POSITIVE_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_oid <- as.character(annotLookup2$original_id[match(mouse_POSITIVE_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_gid,annotLookup2$ensembl_gene_id)])
#载入fib1
temp=match(mouse_POSITIVE_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_oid,row.names(fib1))
temp=temp[!is.na(temp)]
mouse_POSITIVE_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp1 <- fib1[temp,]
#载入fib2
temp=match(mouse_POSITIVE_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_oid,row.names(fib2))
temp=temp[!is.na(temp)]
mouse_POSITIVE_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp2 <- fib2[temp,]
#import esc1
temp=match(mouse_POSITIVE_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_oid,row.names(esc1))
temp=temp[!is.na(temp)]
mouse_POSITIVE_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp3 <- esc1[temp,]
#载入esc2
temp=match(mouse_POSITIVE_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_oid,row.names(esc2))
temp=temp[!is.na(temp)]
mouse_POSITIVE_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp4 <- esc2[temp,]
#fib1
#除去0,都加0.1
#mouse_POSITIVE_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp1_slope <- mouse_POSITIVE_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp1+0.1
#mouse_POSITIVE_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp1_f2s <- log(mouse_POSITIVE_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp1$FIB_fast_TPM/mouse_POSITIVE_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp1$FIB_slow_TPM,2)
mouse_POSITIVE_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp1_slope <- slope_func(mouse_POSITIVE_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp1)
mouse_POSITIVE_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp1_slope <- as.data.frame(mouse_POSITIVE_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp1_slope)
library(ggplot2)
library(reshape2)
require(gridExtra)
ggplot(mouse_POSITIVE_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp1_slope,aes(
x="",
y=mouse_POSITIVE_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp1_slope
#color=as.factor(mouse_POSITIVE_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp1.m$variable)
)) +
geom_violin(fill = "grey80",colour = "#3366FF") +
#scale_fill_brewer(palette="Dark2")
#scale_color_manual(values=c("#999999", "#E69F00", "#56B4E9")) +
#geom_jitter(height = 0, width = 0.1) +
geom_boxplot(width=0.3) +
stat_summary(fun.y = "mean", geom = "text", label="------", size= 6, color= "red") +
#geom_dotplot(binaxis='y', stackdir='center', dotsize=0.4)+
#stat_summary(geom = "errorbar", fun.data = mean_se, position = "dodge")
scale_colour_discrete(name ="FIB1") +
labs(title = "POSITIVE_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS", x="FIB1", y="slope") +
#coord_fixed(0.2) +
geom_hline(yintercept=0, linetype="dashed",
color = "blue", size=0.5) +
theme(
plot.title = element_text(lineheight=.8, size=15,face="bold",hjust = 0.5))
ggsave("./figures/1225/POSITIVE_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_fib1_slope.png", width = 2, height = 4)
#FIB2------------------------------------
mouse_POSITIVE_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp2_slope <- slope_func(mouse_POSITIVE_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp2)
mouse_POSITIVE_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp2_slope <- as.data.frame(mouse_POSITIVE_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp2_slope)
library(ggplot2)
library(reshape2)
require(gridExtra)
ggplot(mouse_POSITIVE_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp2_slope,aes(
x="",
y=mouse_POSITIVE_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp2_slope
#color=as.factor(mouse_POSITIVE_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp2.m$variable)
)) +
geom_violin(fill = "grey80",colour = "#3366FF") +
#scale_fill_brewer(palette="Dark2")
#scale_color_manual(values=c("#999999", "#E69F00", "#56B4E9")) +
#geom_jitter(height = 0, width = 0.1) +
geom_boxplot(width=0.3) +
stat_summary(fun.y = "mean", geom = "text", label="------", size= 6, color= "red") +
#geom_dotplot(binaxis='y', stackdir='center', dotsize=0.4)+
#stat_summary(geom = "errorbar", fun.data = mean_se, position = "dodge")
scale_colour_discrete(name ="FIB2") +
labs(title = "POSITIVE_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS", x="FIB2", y="slope") +
#coord_fixed(0.2) +
geom_hline(yintercept=0, linetype="dashed",
color = "blue", size=0.5) +
theme(
plot.title = element_text(lineheight=.8, size=15,face="bold",hjust = 0.5))
ggsave("./figures/1225/POSITIVE_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_FIB2_slope.png", width = 2, height = 4)
#----------esc1----------------------
mouse_POSITIVE_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp3_slope <- slope_func(mouse_POSITIVE_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp3)
mouse_POSITIVE_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp3_slope <- as.data.frame(mouse_POSITIVE_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp3_slope)
library(ggplot2)
library(reshape2)
require(gridExtra)
ggplot(mouse_POSITIVE_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp3_slope,aes(
x="",
y=mouse_POSITIVE_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp3_slope
#color=as.factor(mouse_POSITIVE_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp3.m$variable)
)) +
geom_violin(fill = "grey80",colour = "#3366FF") +
#scale_fill_brewer(palette="Dark2")
#scale_color_manual(values=c("#999999", "#E69F00", "#56B4E9")) +
#geom_jitter(height = 0, width = 0.1) +
geom_boxplot(width=0.3) +
stat_summary(fun.y = "mean", geom = "text", label="------", size= 6, color= "red") +
#geom_dotplot(binaxis='y', stackdir='center', dotsize=0.4)+
#stat_summary(geom = "errorbar", fun.data = mean_se, position = "dodge")
scale_colour_discrete(name ="ESC1") +
labs(title = "POSITIVE_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS", x="ESC1", y="slope") +
#coord_fixed(0.2) +
geom_hline(yintercept=0, linetype="dashed",
color = "blue", size=0.5) +
theme(
plot.title = element_text(lineheight=.8, size=15,face="bold",hjust = 0.5))
ggsave("./figures/1225/POSITIVE_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_ESC1_slope.png", width = 2, height = 4)
#----------------esc2--------------------
mouse_POSITIVE_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp4_slope <- slope_func(mouse_POSITIVE_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp4)
mouse_POSITIVE_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp4_slope <- as.data.frame(mouse_POSITIVE_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp4_slope)
library(ggplot2)
library(reshape2)
require(gridExtra)
ggplot(mouse_POSITIVE_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp4_slope,aes(
x="",
y=mouse_POSITIVE_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp4_slope
#color=as.factor(mouse_POSITIVE_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_exp4.m$variable)
)) +
geom_violin(fill = "grey80",colour = "#3366FF") +
#scale_fill_brewer(palette="Dark2")
#scale_color_manual(values=c("#999999", "#E69F00", "#56B4E9")) +
#geom_jitter(height = 0, width = 0.1) +
geom_boxplot(width=0.3) +
stat_summary(fun.y = "mean", geom = "text", label="------", size= 6, color= "red") +
#geom_dotplot(binaxis='y', stackdir='center', dotsize=0.4)+
#stat_summary(geom = "errorbar", fun.data = mean_se, position = "dodge")
scale_colour_discrete(name ="ESC2") +
labs(title = "POSITIVE_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS", x="ESC2", y="slope") +
#coord_fixed(0.2) +
geom_hline(yintercept=0, linetype="dashed",
color = "blue", size=0.5) +
theme(
plot.title = element_text(lineheight=.8, size=15,face="bold",hjust = 0.5))
ggsave("./figures/1225/POSITIVE_REGULATION_OF_RESPONSE_TO_DNA_DAMAGE_STIMULUS_ESC2_slope.png", width = 2, height = 4)
#----------RIBOSOME_BIOGENESIS_AND_ASSEMBLY----------------------------
RIBOSOME_BIOGENESIS_AND_ASSEMBLY <- read.table("../GO_data/GO_0042254RIBOSOME_BIOGENESIS_AND_ASSEMBLY.txt",header = T, sep="\t")
#有一些NA值,证明一些human_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_gene 没有对应的老鼠基因
mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_gid <- human_gene$mouse_ensembl_gene_id[match(RIBOSOME_BIOGENESIS_AND_ASSEMBLY[2:nrow(RIBOSOME_BIOGENESIS_AND_ASSEMBLY),],human_gene$human_external_gene_name)]
mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_oid <- as.character(annotLookup2$original_id[match(mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_gid,annotLookup2$ensembl_gene_id)])
#载入fib1
temp=match(mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_oid,row.names(fib1))
temp=temp[!is.na(temp)]
mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_exp1 <- fib1[temp,]
#载入fib2
temp=match(mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_oid,row.names(fib2))
temp=temp[!is.na(temp)]
mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_exp2 <- fib2[temp,]
#import esc1
temp=match(mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_oid,row.names(esc1))
temp=temp[!is.na(temp)]
mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_exp3 <- esc1[temp,]
#载入esc2
temp=match(mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_oid,row.names(esc2))
temp=temp[!is.na(temp)]
mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_exp4 <- esc2[temp,]
#fib1
#除去0,都加0.1
#mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_exp1_slope <- mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_exp1+0.1
#mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_exp1_f2s <- log(mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_exp1$FIB_fast_TPM/mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_exp1$FIB_slow_TPM,2)
mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_exp1_slope <- slope_func(mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_exp1)
mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_exp1_slope <- as.data.frame(mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_exp1_slope)
library(ggplot2)
library(reshape2)
require(gridExtra)
ggplot(mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_exp1_slope,aes(
x="",
y=mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_exp1_slope
#color=as.factor(mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_exp1.m$variable)
)) +
geom_violin(fill = "grey80",colour = "#3366FF") +
#scale_fill_brewer(palette="Dark2")
#scale_color_manual(values=c("#999999", "#E69F00", "#56B4E9")) +
#geom_jitter(height = 0, width = 0.1) +
geom_boxplot(width=0.3) +
stat_summary(fun.y = "mean", geom = "text", label="------", size= 6, color= "red") +
#geom_dotplot(binaxis='y', stackdir='center', dotsize=0.4)+
#stat_summary(geom = "errorbar", fun.data = mean_se, position = "dodge")
scale_colour_discrete(name ="FIB1") +
labs(title = "RIBOSOME_BIOGENESIS_AND_ASSEMBLY", x="FIB1", y="slope") +
#coord_fixed(0.2) +
geom_hline(yintercept=0, linetype="dashed",
color = "blue", size=0.5) +
theme(
plot.title = element_text(lineheight=.8, size=15,face="bold",hjust = 0.5))
ggsave("./figures/1225/RIBOSOME_BIOGENESIS_AND_ASSEMBLY_fib1_slope.png", width = 2, height = 4)
#FIB2------------------------------------
mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_exp2_slope <- slope_func(mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_exp2)
mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_exp2_slope <- as.data.frame(mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_exp2_slope)
library(ggplot2)
library(reshape2)
require(gridExtra)
ggplot(mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_exp2_slope,aes(
x="",
y=mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_exp2_slope
#color=as.factor(mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_exp2.m$variable)
)) +
geom_violin(fill = "grey80",colour = "#3366FF") +
#scale_fill_brewer(palette="Dark2")
#scale_color_manual(values=c("#999999", "#E69F00", "#56B4E9")) +
#geom_jitter(height = 0, width = 0.1) +
geom_boxplot(width=0.3) +
stat_summary(fun.y = "mean", geom = "text", label="------", size= 6, color= "red") +
#geom_dotplot(binaxis='y', stackdir='center', dotsize=0.4)+
#stat_summary(geom = "errorbar", fun.data = mean_se, position = "dodge")
scale_colour_discrete(name ="FIB2") +
labs(title = "RIBOSOME_BIOGENESIS_AND_ASSEMBLY", x="FIB2", y="slope") +
#coord_fixed(0.2) +
geom_hline(yintercept=0, linetype="dashed",
color = "blue", size=0.5) +
theme(
plot.title = element_text(lineheight=.8, size=15,face="bold",hjust = 0.5))
ggsave("./figures/1225/RIBOSOME_BIOGENESIS_AND_ASSEMBLY_FIB2_slope.png", width = 2, height = 4)
#----------esc1----------------------
mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_exp3_slope <- slope_func(mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_exp3)
mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_exp3_slope <- as.data.frame(mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_exp3_slope)
library(ggplot2)
library(reshape2)
require(gridExtra)
ggplot(mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_exp3_slope,aes(
x="",
y=mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_exp3_slope
#color=as.factor(mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_exp3.m$variable)
)) +
geom_violin(fill = "grey80",colour = "#3366FF") +
#scale_fill_brewer(palette="Dark2")
#scale_color_manual(values=c("#999999", "#E69F00", "#56B4E9")) +
#geom_jitter(height = 0, width = 0.1) +
geom_boxplot(width=0.3) +
stat_summary(fun.y = "mean", geom = "text", label="------", size= 6, color= "red") +
#geom_dotplot(binaxis='y', stackdir='center', dotsize=0.4)+
#stat_summary(geom = "errorbar", fun.data = mean_se, position = "dodge")
scale_colour_discrete(name ="ESC1") +
labs(title = "RIBOSOME_BIOGENESIS_AND_ASSEMBLY", x="ESC1", y="slope") +
#coord_fixed(0.2) +
geom_hline(yintercept=0, linetype="dashed",
color = "blue", size=0.5) +
theme(
plot.title = element_text(lineheight=.8, size=15,face="bold",hjust = 0.5))
ggsave("./figures/1225/RIBOSOME_BIOGENESIS_AND_ASSEMBLY_ESC1_slope.png", width = 2, height = 4)
#----------------esc2--------------------
mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_exp4_slope <- slope_func(mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_exp4)
mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_exp4_slope <- as.data.frame(mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_exp4_slope)
library(ggplot2)
library(reshape2)
require(gridExtra)
ggplot(mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_exp4_slope,aes(
x="",
y=mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_exp4_slope
#color=as.factor(mouse_RIBOSOME_BIOGENESIS_AND_ASSEMBLY_exp4.m$variable)
)) +
geom_violin(fill = "grey80",colour = "#3366FF") +
#scale_fill_brewer(palette="Dark2")
#scale_color_manual(values=c("#999999", "#E69F00", "#56B4E9")) +
#geom_jitter(height = 0, width = 0.1) +
geom_boxplot(width=0.3) +
stat_summary(fun.y = "mean", geom = "text", label="------", size= 6, color= "red") +
#geom_dotplot(binaxis='y', stackdir='center', dotsize=0.4)+
#stat_summary(geom = "errorbar", fun.data = mean_se, position = "dodge")
scale_colour_discrete(name ="ESC2") +
labs(title = "RIBOSOME_BIOGENESIS_AND_ASSEMBLY", x="ESC2", y="slope") +
#coord_fixed(0.2) +
geom_hline(yintercept=0, linetype="dashed",
color = "blue", size=0.5) +
theme(
plot.title = element_text(lineheight=.8, size=15,face="bold",hjust = 0.5))
ggsave("./figures/1225/RIBOSOME_BIOGENESIS_AND_ASSEMBLY_ESC2_slope.png", width = 2, height = 4)
#----------REGULATION_OF_MITOCHONDRIAL_DEPOLARIZATION----------------------------
REGULATION_OF_MITOCHONDRIAL_DEPOLARIZATION <- read.table("../GO_data/GO_REGULATION_OF_MITOCHONDRIAL_DEPOLARIZATION.txt",header = T, sep="\t")
#有一些NA值,证明一些human_REGULATION_OF_MITOCHONDRIAL_DEPOLARIZATION_gene 没有对应的老鼠基因
mouse_REGULATION_OF_MITOCHONDRIAL_DEPOLARIZATION_gid <- human_gene$mouse_ensembl_gene_id[match(REGULATION_OF_MITOCHONDRIAL_DEPOLARIZATION[2:nrow(REGULATION_OF_MITOCHONDRIAL_DEPOLARIZATION),],human_gene$human_external_gene_name)]
mouse_REGULATION_OF_MITOCHONDRIAL_DEPOLARIZATION_oid <- as.character(annotLookup2$original_id[match(mouse_REGULATION_OF_MITOCHONDRIAL_DEPOLARIZATION_gid,annotLookup2$ensembl_gene_id)])
#载入fib1
temp=match(mouse_REGULATION_OF_MITOCHONDRIAL_DEPOLARIZATION_oid,row.names(fib1))
temp=temp[!is.na(temp)]
mouse_REGULATION_OF_MITOCHONDRIAL_DEPOLARIZATION_exp1 <- fib1[temp,]
#载入fib2
temp=match(mouse_REGULATION_OF_MITOCHONDRIAL_DEPOLARIZATION_oid,row.names(fib2))
temp=temp[!is.na(temp)]
mouse_REGULATION_OF_MITOCHONDRIAL_DEPOLARIZATION_exp2 <- fib2[temp,]
#import esc1
temp=match(mouse_REGULATION_OF_MITOCHONDRIAL_DEPOLARIZATION_oid,row.names(esc1))
temp=temp[!is.na(temp)]
mouse_REGULATION_OF_MITOCHONDRIAL_DEPOLARIZATION_exp3 <- esc1[temp,]
#载入esc2
temp=match(mouse_REGULATION_OF_MITOCHONDRIAL_DEPOLARIZATION_oid,row.names(esc2))
temp=temp[!is.na(temp)]
mouse_REGULATION_OF_MITOCHONDRIAL_DEPOLARIZATION_exp4 <- esc2[temp,]
#fib1
#除去0,都加0.1
#mouse_REGULATION_OF_MITOCHONDRIAL_DEPOLARIZATION_exp1_slope <- mouse_REGULATION_OF_MITOCHONDRIAL_DEPOLARIZATION_exp1+0.1
#mouse_REGULATION_OF_MITOCHONDRIAL_DEPOLARIZATION_exp1_f2s <- log(mouse_REGULATION_OF_MITOCHONDRIAL_DEPOLARIZATION_exp1$FIB_fast_TPM/mouse_REGULATION_OF_MITOCHONDRIAL_DEPOLARIZATION_exp1$FIB_slow_TPM,2)
mouse_REGULATION_OF_MITOCHONDRIAL_DEPOLARIZATION_exp1_slope <- slope_func(mouse_REGULATION_OF_MITOCHONDRIAL_DEPOLARIZATION_exp1)
mouse_REGULATION_OF_MITOCHONDRIAL_DEPOLARIZATION_exp1_slope <- as.data.frame(mouse_REGULATION_OF_MITOCHONDRIAL_DEPOLARIZATION_exp1_slope)
library(ggplot2)
library(reshape2)
require(gridExtra)
ggplot(mouse_REGULATION_OF_MITOCHONDRIAL_DEPOLARIZATION_exp1_slope,aes(
x="",
y=mouse_REGULATION_OF_MITOCHONDRIAL_DEPOLARIZATION_exp1_slope
#color=as.factor(mouse_REGULATION_OF_MITOCHONDRIAL_DEPOLARIZATION_exp1.m$variable)
)) +
geom_violin(fill = "grey80",colour = "#3366FF") +
#scale_fill_brewer(palette="Dark2")
#scale_color_manual(values=c("#999999", "#E69F00", "#56B4E9")) +
#geom_jitter(height = 0, width = 0.1) +
geom_boxplot(width=0.3) +
stat_summary(fun.y = "mean", geom = "text", label="------", size= 6, color= "red") +
#geom_dotplot(binaxis='y', stackdir='center', dotsize=0.4)+
#stat_summary(geom = "errorbar", fun.data = mean_se, position = "dodge")
scale_colour_discrete(name ="FIB1") +
labs(title = "REGULATION_OF_MITOCHONDRIAL_DEPOLARIZATION", x="FIB1", y="slope") +
#coord_fixed(0.2) +
geom_hline(yintercept=0, linetype="dashed",
color = "blue", size=0.5) +
theme(
plot.title = element_text(lineheight=.8, size=15,face="bold",hjust = 0.5))
ggsave("./figures/1225/REGULATION_OF_MITOCHONDRIAL_DEPOLARIZATION_fib1_slope.png", width = 2, height = 4)
#FIB2------------------------------------
mouse_REGULATION_OF_MITOCHONDRIAL_DEPOLARIZATION_exp2_slope <- slope_func(mouse_REGULATION_OF_MITOCHONDRIAL_DEPOLARIZATION_exp2)
mouse_REGULATION_OF_MITOCHONDRIAL_DEPOLARIZATION_exp2_slope <- as.data.frame(mouse_REGULATION_OF_MITOCHONDRIAL_DEPOLARIZATION_exp2_slope)
library(ggplot2)
library(reshape2)
require(gridExtra)
ggplot(mouse_REGULATION_OF_MITOCHONDRIAL_DEPOLARIZATION_exp2_slope,aes(
x="",
y=mouse_REGULATION_OF_MITOCHONDRIAL_DEPOLARIZATION_exp2_slope
#color=as.factor(mouse_REGULATION_OF_MITOCHONDRIAL_DEPOLARIZATION_exp2.m$variable)
)) +
geom_violin(fill = "grey80",colour = "#3366FF") +
#scale_fill_brewer(palette="Dark2")
#scale_color_manual(values=c("#999999", "#E69F00", "#56B4E9")) +
#geom_jitter(height = 0, width = 0.1) +
geom_boxplot(width=0.3) +
stat_summary(fun.y = "mean", geom = "text", label="------", size= 6, color= "red") +
#geom_dotplot(binaxis='y', stackdir='center', dotsize=0.4)+
#stat_summary(geom = "errorbar", fun.data = mean_se, position = "dodge")
scale_colour_discrete(name ="FIB2") +
labs(title = "REGULATION_OF_MITOCHONDRIAL_DEPOLARIZATION", x="FIB2", y="slope") +
#coord_fixed(0.2) +
geom_hline(yintercept=0, linetype="dashed",
color = "blue", size=0.5) +
theme(
plot.title = element_text(lineheight=.8, size=15,face="bold",hjust = 0.5))
ggsave("./figures/1225/REGULATION_OF_MITOCHONDRIAL_DEPOLARIZATION_FIB2_slope.png", width = 2, height = 4)
#----------esc1----------------------
mouse_REGULATION_OF_MITOCHONDRIAL_DEPOLARIZATION_exp3_slope <- slope_func(mouse_REGULATION_OF_MITOCHONDRIAL_DEPOLARIZATION_exp3)
mouse_REGULATION_OF_MITOCHONDRIAL_DEPOLARIZATION_exp3_slope <- as.data.frame(mouse_REGULATION_OF_MITOCHONDRIAL_DEPOLARIZATION_exp3_slope)
library(ggplot2)
library(reshape2)
require(gridExtra)
ggplot(mouse_REGULATION_OF_MITOCHONDRIAL_DEPOLARIZATION_exp3_slope,aes(
x="",
y=mouse_REGULATION_OF_MITOCHONDRIAL_DEPOLARIZATION_exp3_slope
#color=as.factor(mouse_REGULATION_OF_MITOCHONDRIAL_DEPOLARIZATION_exp3.m$variable)
)) +
geom_violin(fill = "grey80",colour = "#3366FF") +
#scale_fill_brewer(palette="Dark2")
#scale_color_manual(values=c("#999999", "#E69F00", "#56B4E9")) +
#geom_jitter(height = 0, width = 0.1) +
geom_boxplot(width=0.3) +
stat_summary(fun.y = "mean", geom = "text", label="------", size= 6, color= "red") +
#geom_dotplot(binaxis='y', stackdir='center', dotsize=0.4)+
#stat_summary(geom = "errorbar", fun.data = mean_se, position = "dodge")
scale_colour_discrete(name ="ESC1") +
labs(title = "REGULATION_OF_MITOCHONDRIAL_DEPOLARIZATION", x="ESC1", y="slope") +
#coord_fixed(0.2) +
geom_hline(yintercept=0, linetype="dashed",
color = "blue", size=0.5) +
theme(
plot.title = element_text(lineheight=.8, size=15,face="bold",hjust = 0.5))
ggsave("./figures/1225/REGULATION_OF_MITOCHONDRIAL_DEPOLARIZATION_ESC1_slope.png", width = 2, height = 4)
#----------------esc2--------------------
mouse_REGULATION_OF_MITOCHONDRIAL_DEPOLARIZATION_exp4_slope <- slope_func(mouse_REGULATION_OF_MITOCHONDRIAL_DEPOLARIZATION_exp4)
mouse_REGULATION_OF_MITOCHONDRIAL_DEPOLARIZATION_exp4_slope <- as.data.frame(mouse_REGULATION_OF_MITOCHONDRIAL_DEPOLARIZATION_exp4_slope)
library(ggplot2)
library(reshape2)
require(gridExtra)
ggplot(mouse_REGULATION_OF_MITOCHONDRIAL_DEPOLARIZATION_exp4_slope,aes(
x="",
y=mouse_REGULATION_OF_MITOCHONDRIAL_DEPOLARIZATION_exp4_slope
#color=as.factor(mouse_REGULATION_OF_MITOCHONDRIAL_DEPOLARIZATION_exp4.m$variable)
)) +
geom_violin(fill = "grey80",colour = "#3366FF") +
#scale_fill_brewer(palette="Dark2")
#scale_color_manual(values=c("#999999", "#E69F00", "#56B4E9")) +
#geom_jitter(height = 0, width = 0.1) +
geom_boxplot(width=0.3) +
stat_summary(fun.y = "mean", geom = "text", label="------", size= 6, color= "red") +
#geom_dotplot(binaxis='y', stackdir='center', dotsize=0.4)+
#stat_summary(geom = "errorbar", fun.data = mean_se, position = "dodge")
scale_colour_discrete(name ="ESC2") +
labs(title = "REGULATION_OF_MITOCHONDRIAL_DEPOLARIZATION", x="ESC2", y="slope") +
#coord_fixed(0.2) +
geom_hline(yintercept=0, linetype="dashed",
color = "blue", size=0.5) +
theme(
plot.title = element_text(lineheight=.8, size=15,face="bold",hjust = 0.5))
ggsave("./figures/1225/REGULATION_OF_MITOCHONDRIAL_DEPOLARIZATION_ESC2_slope.png", width = 2, height = 4)
#----------WONG_MITOCHONDRIA_GENE_MODULE----------------------------
#----------WONG_MITOCHONDRIA_GENE_MODULE----------------------------
WONG_MITOCHONDRIA_GENE_MODULE <- read.table("../GO_data/WONG_MITOCHONDRIA_GENE_MODULE.txt",header = T, sep="\t")
#有一些NA值,证明一些human_WONG_MITOCHONDRIA_GENE_MODULE_gene 没有对应的老鼠基因
mouse_WONG_MITOCHONDRIA_GENE_MODULE_gid <- human_gene$mouse_ensembl_gene_id[match(WONG_MITOCHONDRIA_GENE_MODULE[2:nrow(WONG_MITOCHONDRIA_GENE_MODULE),],human_gene$human_external_gene_name)]
mouse_WONG_MITOCHONDRIA_GENE_MODULE_oid <- as.character(annotLookup2$original_id[match(mouse_WONG_MITOCHONDRIA_GENE_MODULE_gid,annotLookup2$ensembl_gene_id)])
#载入fib1
temp=match(mouse_WONG_MITOCHONDRIA_GENE_MODULE_oid,row.names(fib1))
temp=temp[!is.na(temp)]
mouse_WONG_MITOCHONDRIA_GENE_MODULE_exp1 <- fib1[temp,]
#载入fib2
temp=match(mouse_WONG_MITOCHONDRIA_GENE_MODULE_oid,row.names(fib2))
temp=temp[!is.na(temp)]
mouse_WONG_MITOCHONDRIA_GENE_MODULE_exp2 <- fib2[temp,]
#import esc1
temp=match(mouse_WONG_MITOCHONDRIA_GENE_MODULE_oid,row.names(esc1))
temp=temp[!is.na(temp)]
mouse_WONG_MITOCHONDRIA_GENE_MODULE_exp3 <- esc1[temp,]
#载入esc2
temp=match(mouse_WONG_MITOCHONDRIA_GENE_MODULE_oid,row.names(esc2))
temp=temp[!is.na(temp)]
mouse_WONG_MITOCHONDRIA_GENE_MODULE_exp4 <- esc2[temp,]
#fib1
#除去0,都加0.1
#mouse_WONG_MITOCHONDRIA_GENE_MODULE_exp1_slope <- mouse_WONG_MITOCHONDRIA_GENE_MODULE_exp1+0.1
#mouse_WONG_MITOCHONDRIA_GENE_MODULE_exp1_f2s <- log(mouse_WONG_MITOCHONDRIA_GENE_MODULE_exp1$FIB_fast_TPM/mouse_WONG_MITOCHONDRIA_GENE_MODULE_exp1$FIB_slow_TPM,2)
mouse_WONG_MITOCHONDRIA_GENE_MODULE_exp1_slope <- slope_func(mouse_WONG_MITOCHONDRIA_GENE_MODULE_exp1)
mouse_WONG_MITOCHONDRIA_GENE_MODULE_exp1_slope <- as.data.frame(mouse_WONG_MITOCHONDRIA_GENE_MODULE_exp1_slope)
library(ggplot2)
library(reshape2)
require(gridExtra)
ggplot(mouse_WONG_MITOCHONDRIA_GENE_MODULE_exp1_slope,aes(
x="",
y=mouse_WONG_MITOCHONDRIA_GENE_MODULE_exp1_slope
#color=as.factor(mouse_WONG_MITOCHONDRIA_GENE_MODULE_exp1.m$variable)
)) +
geom_violin(fill = "grey80",colour = "#3366FF") +
#scale_fill_brewer(palette="Dark2")
#scale_color_manual(values=c("#999999", "#E69F00", "#56B4E9")) +
#geom_jitter(height = 0, width = 0.1) +
geom_boxplot(width=0.3) +
stat_summary(fun.y = "mean", geom = "text", label="------", size= 6, color= "red") +
#geom_dotplot(binaxis='y', stackdir='center', dotsize=0.4)+
#stat_summary(geom = "errorbar", fun.data = mean_se, position = "dodge")
scale_colour_discrete(name ="FIB1") +
labs(title = "WONG_MITOCHONDRIA_GENE_MODULE", x="FIB1", y="slope") +
#coord_fixed(0.2) +
geom_hline(yintercept=0, linetype="dashed",
color = "blue", size=0.5) +
theme(
plot.title = element_text(lineheight=.8, size=15,face="bold",hjust = 0.5))
ggsave("./figures/1225/WONG_MITOCHONDRIA_GENE_MODULE_fib1_slope.png", width = 2, height = 4)
#FIB2------------------------------------
mouse_WONG_MITOCHONDRIA_GENE_MODULE_exp2_slope <- slope_func(mouse_WONG_MITOCHONDRIA_GENE_MODULE_exp2)
mouse_WONG_MITOCHONDRIA_GENE_MODULE_exp2_slope <- as.data.frame(mouse_WONG_MITOCHONDRIA_GENE_MODULE_exp2_slope)
library(ggplot2)
library(reshape2)
require(gridExtra)
ggplot(mouse_WONG_MITOCHONDRIA_GENE_MODULE_exp2_slope,aes(
x="",
y=mouse_WONG_MITOCHONDRIA_GENE_MODULE_exp2_slope
#color=as.factor(mouse_WONG_MITOCHONDRIA_GENE_MODULE_exp2.m$variable)
)) +
geom_violin(fill = "grey80",colour = "#3366FF") +
#scale_fill_brewer(palette="Dark2")
#scale_color_manual(values=c("#999999", "#E69F00", "#56B4E9")) +
#geom_jitter(height = 0, width = 0.1) +
geom_boxplot(width=0.3) +
stat_summary(fun.y = "mean", geom = "text", label="------", size= 6, color= "red") +
#geom_dotplot(binaxis='y', stackdir='center', dotsize=0.4)+
#stat_summary(geom = "errorbar", fun.data = mean_se, position = "dodge")
scale_colour_discrete(name ="FIB2") +
labs(title = "WONG_MITOCHONDRIA_GENE_MODULE", x="FIB2", y="slope") +
#coord_fixed(0.2) +
geom_hline(yintercept=0, linetype="dashed",
color = "blue", size=0.5) +
theme(
plot.title = element_text(lineheight=.8, size=15,face="bold",hjust = 0.5))
ggsave("./figures/1225/WONG_MITOCHONDRIA_GENE_MODULE_FIB2_slope.png", width = 2, height = 4)
#----------esc1----------------------
mouse_WONG_MITOCHONDRIA_GENE_MODULE_exp3_slope <- slope_func(mouse_WONG_MITOCHONDRIA_GENE_MODULE_exp3)
mouse_WONG_MITOCHONDRIA_GENE_MODULE_exp3_slope <- as.data.frame(mouse_WONG_MITOCHONDRIA_GENE_MODULE_exp3_slope)
library(ggplot2)
library(reshape2)
require(gridExtra)
ggplot(mouse_WONG_MITOCHONDRIA_GENE_MODULE_exp3_slope,aes(
x="",
y=mouse_WONG_MITOCHONDRIA_GENE_MODULE_exp3_slope
#color=as.factor(mouse_WONG_MITOCHONDRIA_GENE_MODULE_exp3.m$variable)
)) +
geom_violin(fill = "grey80",colour = "#3366FF") +
#scale_fill_brewer(palette="Dark2")
#scale_color_manual(values=c("#999999", "#E69F00", "#56B4E9")) +
#geom_jitter(height = 0, width = 0.1) +
geom_boxplot(width=0.3) +
stat_summary(fun.y = "mean", geom = "text", label="------", size= 6, color= "red") +
#geom_dotplot(binaxis='y', stackdir='center', dotsize=0.4)+
#stat_summary(geom = "errorbar", fun.data = mean_se, position = "dodge")
scale_colour_discrete(name ="ESC1") +
labs(title = "WONG_MITOCHONDRIA_GENE_MODULE", x="ESC1", y="slope") +
#coord_fixed(0.2) +
geom_hline(yintercept=0, linetype="dashed",
color = "blue", size=0.5) +
theme(
plot.title = element_text(lineheight=.8, size=15,face="bold",hjust = 0.5))
ggsave("./figures/1225/WONG_MITOCHONDRIA_GENE_MODULE_ESC1_slope.png", width = 2, height = 4)
#----------------esc2--------------------
mouse_WONG_MITOCHONDRIA_GENE_MODULE_exp4_slope <- slope_func(mouse_WONG_MITOCHONDRIA_GENE_MODULE_exp4)
mouse_WONG_MITOCHONDRIA_GENE_MODULE_exp4_slope <- as.data.frame(mouse_WONG_MITOCHONDRIA_GENE_MODULE_exp4_slope)
library(ggplot2)
library(reshape2)
require(gridExtra)
ggplot(mouse_WONG_MITOCHONDRIA_GENE_MODULE_exp4_slope,aes(
x="",
y=mouse_WONG_MITOCHONDRIA_GENE_MODULE_exp4_slope
#color=as.factor(mouse_WONG_MITOCHONDRIA_GENE_MODULE_exp4.m$variable)
)) +
geom_violin(fill = "grey80",colour = "#3366FF") +
#scale_fill_brewer(palette="Dark2")
#scale_color_manual(values=c("#999999", "#E69F00", "#56B4E9")) +
#geom_jitter(height = 0, width = 0.1) +
geom_boxplot(width=0.3) +
stat_summary(fun.y = "mean", geom = "text", label="------", size= 6, color= "red") +
#geom_dotplot(binaxis='y', stackdir='center', dotsize=0.4)+
#stat_summary(geom = "errorbar", fun.data = mean_se, position = "dodge")
scale_colour_discrete(name ="ESC2") +
labs(title = "WONG_MITOCHONDRIA_GENE_MODULE", x="ESC2", y="slope") +
#coord_fixed(0.2) +
geom_hline(yintercept=0, linetype="dashed",
color = "blue", size=0.5) +
theme(
plot.title = element_text(lineheight=.8, size=15,face="bold",hjust = 0.5))
ggsave("./figures/1225/WONG_MITOCHONDRIA_GENE_MODULE_ESC2_slope.png", width = 2, height = 4)
|
#Machine Learning USe CAse: German Credit DATA
library("data.table")
library("mlr3")
library("mlr3learners")
library("mlr3viz")
library("ggplot2")
#The goal is to classify people by their credit risk (good or bad)
#using 20 personal, demographic and financial features:
#Importing data
data("german", package = "rchallenge")
#Exploring the data
dim(german)
str(german)
#Using skimr and DataExplorer as they create very well readable and
#understandable overviews:
skimr::skim(german)
#Prior to calling DataExplorer, we shorten the (very lengthy) factor
#levels of the german credit data set to get plots which nicely fit
#on the screen.
german_short = german
is_factor = sapply(german_short, is.factor)
german_short[is_factor] = lapply(german[is_factor], function(x){
levels(x) = abbreviate(mlr3misc::str_trunc(levels(x), 16, "..."), 12)
x
})
#Normal View
DataExplorer::plot_bar(german, nrow = 6, ncol = 3)
#Improve text format
DataExplorer::plot_bar(german_short, nrow = 6, ncol = 3)
DataExplorer::plot_histogram(german_short, nrow = 1, ncol = 3)
DataExplorer::plot_boxplot(german_short, by = "credit_risk", nrow = 1, ncol = 3)
#he typical questions that arise when building a machine learning workflow are:
# What is the problem we are trying to solve?
# What are appropriate learning algorithms?
# How do we evaluate "good" performance?
#
# More systematically in mlr3 they can be expressed via five components:
#
# The Task definition.
# The Learner definition.
# The training.
# The prediction.
# The evaluation via one or multiple Measures.
#
task = TaskClassif$new("GermanCredit", german, target = "credit_risk")
#Using Logistic Regresion
library("mlr3learners")
learner_logreg = lrn("classif.log_reg")
print(learner_logreg)
#Training
learner_logreg$train(task)
train_set = sample(task$row_ids, 0.8 * task$nrow)
test_set = setdiff(task$row_ids, train_set)
head(train_set)
learner_logreg$train(task, row_ids = train_set)
#The fitted model can be accessed via:
learner_logreg$model
class(learner_logreg$model)
summary(learner_logreg$model)
#Using random forest
learner_rf = lrn("classif.ranger", importance = "permutation")
learner_rf$train(task, row_ids = train_set)
learner_rf$importance()
#Importance in plot ggplot2
importance = as.data.table(learner_rf$importance(), keep.rownames = TRUE)
colnames(importance) = c("Feature", "Importance")
ggplot(importance, aes(x = reorder(Feature, Importance), y = Importance)) +
geom_col() + coord_flip() + xlab("")
#PRediction process
pred_logreg = learner_logreg$predict(task, row_ids = test_set)
pred_rf = learner_rf$predict(task, row_ids = test_set)
pred_logreg
pred_rf
#confusion matrix
pred_logreg$confusion
pred_rf$confusion
#Add probability scale
learner_logreg$predict_type = "prob"
learner_logreg$predict(task, row_ids = test_set)
resampling = rsmp("holdout", ratio = 2/3)
print(resampling)
res = resample(task, learner = learner_logreg, resampling = resampling)
res
res$aggregate()
resampling = rsmp("subsampling", repeats = 10)
rr = resample(task, learner = learner_logreg, resampling = resampling)
rr$aggregate()
resampling = resampling = rsmp("cv", folds = 10)
rr = resample(task, learner = learner_logreg, resampling = resampling)
rr$aggregate()
# false positive rate
rr$aggregate(msr("classif.fpr"))
# false positive rate and false negative
measures = msrs(c("classif.fpr", "classif.fnr"))
rr$aggregate(measures)
mlr_resamplings
mlr_measures
learners = lrns(c("classif.log_reg", "classif.ranger"), predict_type = "prob")
bm_design = benchmark_grid(
tasks = task,
learners = learners,
resamplings = rsmp("cv", folds = 10)
)
bmr = benchmark(bm_design)
measures = msrs(c("classif.ce", "classif.auc"))
performances = bmr$aggregate(measures)
performances[, c("learner_id", "classif.ce", "classif.auc")]
learner_rf$param_set
learner_rf$param_set$values = list(verbose = FALSE)
## ?ranger::ranger
as.data.table(learner_rf$param_set)[, .(id, class, lower, upper)]
#Performance
rf_med = lrn("classif.ranger", id = "med", predict_type = "prob")
rf_low = lrn("classif.ranger", id = "low", predict_type = "prob",
num.trees = 5, mtry = 2)
rf_high = lrn("classif.ranger", id = "high", predict_type = "prob",
num.trees = 1000, mtry = 11)
learners = list(rf_low, rf_med, rf_high)
bm_design = benchmark_grid(
tasks = task,
learners = learners,
resamplings = rsmp("cv", folds = 10)
)
bmr = benchmark(bm_design)
print(bmr)
measures = msrs(c("classif.ce", "classif.auc"))
performances = bmr$aggregate(measures)
performances[, .(learner_id, classif.ce, classif.auc)]
autoplot(bmr)
#
# The "low" settings seem to underfit a bit, the "high" setting
# is comparable to the default setting "med".
#
#Reference:
# Binder, et al. (2020, March 11). mlr3gallery: mlr3 Basics - German Credit.
# Retrieved from https://mlr3gallery.mlr-org.com/posts/2020-03-11-basics-german-credit/
|
/GermanCredit.R
|
no_license
|
crisortiz92/exampleML_GermanCredit
|
R
| false
| false
| 5,573
|
r
|
#Machine Learning USe CAse: German Credit DATA
library("data.table")
library("mlr3")
library("mlr3learners")
library("mlr3viz")
library("ggplot2")
#The goal is to classify people by their credit risk (good or bad)
#using 20 personal, demographic and financial features:
#Importing data
data("german", package = "rchallenge")
#Exploring the data
dim(german)
str(german)
#Using skimr and DataExplorer as they create very well readable and
#understandable overviews:
skimr::skim(german)
#Prior to calling DataExplorer, we shorten the (very lengthy) factor
#levels of the german credit data set to get plots which nicely fit
#on the screen.
german_short = german
is_factor = sapply(german_short, is.factor)
german_short[is_factor] = lapply(german[is_factor], function(x){
levels(x) = abbreviate(mlr3misc::str_trunc(levels(x), 16, "..."), 12)
x
})
#Normal View
DataExplorer::plot_bar(german, nrow = 6, ncol = 3)
#Improve text format
DataExplorer::plot_bar(german_short, nrow = 6, ncol = 3)
DataExplorer::plot_histogram(german_short, nrow = 1, ncol = 3)
DataExplorer::plot_boxplot(german_short, by = "credit_risk", nrow = 1, ncol = 3)
#he typical questions that arise when building a machine learning workflow are:
# What is the problem we are trying to solve?
# What are appropriate learning algorithms?
# How do we evaluate "good" performance?
#
# More systematically in mlr3 they can be expressed via five components:
#
# The Task definition.
# The Learner definition.
# The training.
# The prediction.
# The evaluation via one or multiple Measures.
#
task = TaskClassif$new("GermanCredit", german, target = "credit_risk")
#Using Logistic Regresion
library("mlr3learners")
learner_logreg = lrn("classif.log_reg")
print(learner_logreg)
#Training
learner_logreg$train(task)
train_set = sample(task$row_ids, 0.8 * task$nrow)
test_set = setdiff(task$row_ids, train_set)
head(train_set)
learner_logreg$train(task, row_ids = train_set)
#The fitted model can be accessed via:
learner_logreg$model
class(learner_logreg$model)
summary(learner_logreg$model)
#Using random forest
learner_rf = lrn("classif.ranger", importance = "permutation")
learner_rf$train(task, row_ids = train_set)
learner_rf$importance()
#Importance in plot ggplot2
importance = as.data.table(learner_rf$importance(), keep.rownames = TRUE)
colnames(importance) = c("Feature", "Importance")
ggplot(importance, aes(x = reorder(Feature, Importance), y = Importance)) +
geom_col() + coord_flip() + xlab("")
#PRediction process
pred_logreg = learner_logreg$predict(task, row_ids = test_set)
pred_rf = learner_rf$predict(task, row_ids = test_set)
pred_logreg
pred_rf
#confusion matrix
pred_logreg$confusion
pred_rf$confusion
#Add probability scale
learner_logreg$predict_type = "prob"
learner_logreg$predict(task, row_ids = test_set)
resampling = rsmp("holdout", ratio = 2/3)
print(resampling)
res = resample(task, learner = learner_logreg, resampling = resampling)
res
res$aggregate()
resampling = rsmp("subsampling", repeats = 10)
rr = resample(task, learner = learner_logreg, resampling = resampling)
rr$aggregate()
resampling = resampling = rsmp("cv", folds = 10)
rr = resample(task, learner = learner_logreg, resampling = resampling)
rr$aggregate()
# false positive rate
rr$aggregate(msr("classif.fpr"))
# false positive rate and false negative
measures = msrs(c("classif.fpr", "classif.fnr"))
rr$aggregate(measures)
mlr_resamplings
mlr_measures
learners = lrns(c("classif.log_reg", "classif.ranger"), predict_type = "prob")
bm_design = benchmark_grid(
tasks = task,
learners = learners,
resamplings = rsmp("cv", folds = 10)
)
bmr = benchmark(bm_design)
measures = msrs(c("classif.ce", "classif.auc"))
performances = bmr$aggregate(measures)
performances[, c("learner_id", "classif.ce", "classif.auc")]
learner_rf$param_set
learner_rf$param_set$values = list(verbose = FALSE)
## ?ranger::ranger
as.data.table(learner_rf$param_set)[, .(id, class, lower, upper)]
#Performance
rf_med = lrn("classif.ranger", id = "med", predict_type = "prob")
rf_low = lrn("classif.ranger", id = "low", predict_type = "prob",
num.trees = 5, mtry = 2)
rf_high = lrn("classif.ranger", id = "high", predict_type = "prob",
num.trees = 1000, mtry = 11)
learners = list(rf_low, rf_med, rf_high)
bm_design = benchmark_grid(
tasks = task,
learners = learners,
resamplings = rsmp("cv", folds = 10)
)
bmr = benchmark(bm_design)
print(bmr)
measures = msrs(c("classif.ce", "classif.auc"))
performances = bmr$aggregate(measures)
performances[, .(learner_id, classif.ce, classif.auc)]
autoplot(bmr)
#
# The "low" settings seem to underfit a bit, the "high" setting
# is comparable to the default setting "med".
#
#Reference:
# Binder, et al. (2020, March 11). mlr3gallery: mlr3 Basics - German Credit.
# Retrieved from https://mlr3gallery.mlr-org.com/posts/2020-03-11-basics-german-credit/
|
# checkEnds.R
#' checkEnds
#'
#' \code{checkEnds} Utility to check source files for the presence of
#' an \code{# [END]} comment.
#'
#' @section Details: whenever \code{# [END]} comments are used in a project, ALL
#' source files MUST include this tag. The function checks all files with an
#' \code{.R} extension recursively from \code{path} and reports any files that
#' do not have an \code{# [END]} tag as the last line of the script. If no such
#'files are present, a message is printed.
#'
#' @param path (char) path to directory that is to be checked recursively.
#' Defaults to \code{getwd()}.
#' @param excl (char) Vector of regular expressions for files and directories
#' that are excluded from checking. Defaults to exclude
#' \code{./doc} and \code{./R/RcppExports.R} since these
#' are/contain autogenerated scripts.
#' @return NULL (invisible) Invoked for the side-effect of printing a
#' report to console.
#'
#' @author (c) 2019 \href{https://orcid.org/0000-0002-1134-6758}{Boris Steipe},
#' licensed under MIT (see file \code{LICENSE} in this package).
#'
#' @examples
#' # Check all files in the project
#' checkEnds()
#'
#' # Check file in and below the ./dev directory only
#' checkEnds(path = "./dev")
#'
checkEnds <- function(path = getwd(), excl = c("^doc/",
"^R/RcppExports\\.R$")) {
fileNames <- list.files(path = path,
pattern = ".*\\.R$",
recursive = TRUE,
all.files = TRUE,
include.dirs = TRUE)
# remove files and directories listed in excl
sel <- grepl(paste("(", excl, ")", sep = "", collapse = "|"), fileNames)
fileNames <- fileNames[! sel]
allIsGood <- TRUE
for (fN in fileNames) {
x <- readLines(paste0(path, "/", fN))
if (! grepl("^# \\[[Ee][Nn][Dd]\\]$", x[length(x)])) {
message(sprintf("Malformed or missing [END] tag in \"./%s\"\n", fN))
allIsGood <- FALSE
}
}
if (allIsGood) {
message(sprintf("%d files checked, no [END] tags missing.\n",
length(fileNames)))
}
return(invisible(NULL))
}
# [END]
|
/dev/checkEnds.R
|
permissive
|
skoestlmeier/qqid
|
R
| false
| false
| 2,245
|
r
|
# checkEnds.R
#' checkEnds
#'
#' \code{checkEnds} Utility to check source files for the presence of
#' an \code{# [END]} comment.
#'
#' @section Details: whenever \code{# [END]} comments are used in a project, ALL
#' source files MUST include this tag. The function checks all files with an
#' \code{.R} extension recursively from \code{path} and reports any files that
#' do not have an \code{# [END]} tag as the last line of the script. If no such
#'files are present, a message is printed.
#'
#' @param path (char) path to directory that is to be checked recursively.
#' Defaults to \code{getwd()}.
#' @param excl (char) Vector of regular expressions for files and directories
#' that are excluded from checking. Defaults to exclude
#' \code{./doc} and \code{./R/RcppExports.R} since these
#' are/contain autogenerated scripts.
#' @return NULL (invisible) Invoked for the side-effect of printing a
#' report to console.
#'
#' @author (c) 2019 \href{https://orcid.org/0000-0002-1134-6758}{Boris Steipe},
#' licensed under MIT (see file \code{LICENSE} in this package).
#'
#' @examples
#' # Check all files in the project
#' checkEnds()
#'
#' # Check file in and below the ./dev directory only
#' checkEnds(path = "./dev")
#'
checkEnds <- function(path = getwd(), excl = c("^doc/",
"^R/RcppExports\\.R$")) {
fileNames <- list.files(path = path,
pattern = ".*\\.R$",
recursive = TRUE,
all.files = TRUE,
include.dirs = TRUE)
# remove files and directories listed in excl
sel <- grepl(paste("(", excl, ")", sep = "", collapse = "|"), fileNames)
fileNames <- fileNames[! sel]
allIsGood <- TRUE
for (fN in fileNames) {
x <- readLines(paste0(path, "/", fN))
if (! grepl("^# \\[[Ee][Nn][Dd]\\]$", x[length(x)])) {
message(sprintf("Malformed or missing [END] tag in \"./%s\"\n", fN))
allIsGood <- FALSE
}
}
if (allIsGood) {
message(sprintf("%d files checked, no [END] tags missing.\n",
length(fileNames)))
}
return(invisible(NULL))
}
# [END]
|
#' @title Cumulative frequency analysis
#' @description Function used to calculate the frequency, percentage, cumulative
#' frequency and cumulative percentage for each health profile in an KHQ5D dataset
#' @param scores data.frame with colnames RL, PL, SL, E, and S representing
#' Role limitation, Physical limitation, Social Limitation, Emotions
#' and Sleep. Alternatively a data.frame with the KHQ5D health profiles can be
#' provided in a five digit format e.g., data.frame(state = c(11111, 22432,
#' 34241, 43332)).
#' @param save.xlsx logical to indicate whether or not save the results; Default: FALSE.
#' @param filename string specifying the file name if save.xlsx = TRUE;
#' Default: "Res_KHQ5D_Frequency.xlsx".
#' @param sheetName string specifying the sheet name if save.xlsx = TRUE;
#' Default: "Frequency".
#' @param ignore.invalid logical to indicate whether to ignore items data
#' with invalid, incomplete or missing data; Default: FALSE.
#' @return A data frame with the Health states, Frequency, Percentage, Cumulative
#' frequency and Cumulative percentage for each five digit profile in an KHQ5D
#' dataset.
#' @details Named vector RL, PL, SL, E and S represent Role limitation,
#' Physical limitation, Social Limitation, Emotions and Sleep, respectfully.
#'
#'
#' @examples
#' scores.df <- data.frame(
#' RL = c(1,2,3,4,2),
#' PL = c(4,3,4,3,2),
#' SL = c(1,2,2,4,1),
#' E = c(1,3,4,3,4),
#' S = c(1,2,1,2,1))
#'
#' KHQ5DFreq(scores = scores.df, ignore.invalid = TRUE)
#'
#' scores.df2 <- data.frame(state = c(11111, 22432, 34241, 43332, 22141))
#'
#' KHQ5DFreq(scores = scores.df2, ignore.invalid = TRUE)
#'
#' KHQ5DFreq(scores = scores.df2$state, ignore.invalid = TRUE)
#'
#' KHQ5DFreq(scores = c(11111,11111,22432, 34241, 43332, 22141),
#' ignore.invalid = TRUE)
#'
#' KHQ5DFreq(scores = KHQ5D_data, ignore.invalid = TRUE)
#'
#' KHQ5DFreq(scores = scores.df, save.xlsx = FALSE,
#' filename = "Res_KHQ5D_Frequency.xlsx",
#' sheetName = "Frequency",
#' ignore.invalid = TRUE)
#'
#' @seealso
#' \code{\link[KHQ]{KHQConvKHQ5D}} and \code{\link[KHQ]{KHQ5D}}
#' @rdname KHQ5DFreq
#' @export
#' @importFrom magrittr %>%
#' @importFrom openxlsx write.xlsx
#' @importFrom stats na.omit
KHQ5DFreq <- function(
scores,
save.xlsx = FALSE,
filename = NULL,
sheetName = NULL,
ignore.invalid = FALSE
){
# Checking dimension names
DimensionNames <- c("RL", "PL", "SL", "E", "S")
if (is.numeric(scores) & length(names(scores)) == 5) {
if (!all(DimensionNames %in% names(scores))) {
print(names(scores))
stop("Unable to identify KHQ5D dimensions (RL, PL, SL, E, and S) in data.")
}
} else if (is.data.frame(scores) & length(names(scores)) == 5) {
if (!all(DimensionNames %in% names(scores))) {
print(names(scores))
stop("Unable to identify KHQ5D dimensions (RL, PL, SL, E, and S) in data.")
}
}
# Splitting data with five digit format into a five column data.frame and converting the single numeric entry to data.frame
if (length(as.data.frame(scores)) == 1 & length(names(scores)) != 5) {
scores <- data.frame(score = scores)
colnames(scores) <- "score"
scores <- strsplit(as.character(scores$score), "")
if (!all(lengths(scores) == 5)) {
digitMissing <- data.frame(digitMissing = lengths(scores) != 5, numDigit = lengths(scores))
digitMissing$rowDigitMissing <- rownames(digitMissing)
print(digitMissing[digitMissing[,1] == "TRUE", 2:3])
stop("Unable to identify the five digit format in the data.")
}
scores <- do.call(rbind, scores) %>% data.frame()
colnames(scores) <- DimensionNames
} else if (is.numeric(scores)) {
scores <- as.data.frame(matrix(scores, ncol = 5))
colnames(scores) <- DimensionNames
}
# Checking NAs
if (ignore.invalid == TRUE & any(is.na(scores) == TRUE)) {
scores <- stats::na.omit(scores)
} else if (ignore.invalid == FALSE & any(is.na(scores) == TRUE)){
rowNAs = which(is.na(scores) == TRUE, arr.ind = TRUE) %>% data.frame()
print(scores[unique(rowNAs$row),])
stop("Missing/non-numeric dimension found. In case the response was randomly lost, consider use ignore.invalid == TRUE to avoid further problems.")
}
# Checking coded scores
if (any(apply(scores, 1, function(x) !all(x %in% 1:4)) == TRUE)) {
rowCodedWrong <- data.frame(rowCodedWrong = apply(scores, 1, function(x) !all(x %in% 1:4)))
rowCodedWrong <- data.frame(rowCodedWrong = rowCodedWrong, numRowCodeWrong = rownames(rowCodedWrong))
print(scores[rowCodedWrong[rowCodedWrong[,1] == "TRUE", 2],])
stop("Scores must be coded as 1, 2, 3 or 4 for KHQ5D.")
}
# Health state, Frequency, Percentage, Cumulative frequency, Cumulative percentage
states <- paste0(scores$RL, scores$PL, scores$SL, scores$E, scores$S)
frequencies <- sort(table(states), decreasing = TRUE)
percentage <- round(prop.table(as.numeric(frequencies)) * 100, 1)
cum.freq <- cumsum(as.numeric(frequencies))
cum.perc <- round(cum.freq/sum(frequencies) * 100, 1)
df_res_freq <- data.frame(HealthState = names(frequencies),
Frequency = as.numeric(frequencies),
Percentage = percentage,
CumulativeFreq = cum.freq,
CumulativePerc = cum.perc,
stringsAsFactors = FALSE)
# Saving results to an Excel file
if (save.xlsx == TRUE & is.null(filename) & is.null(sheetName)) {
openxlsx::write.xlsx(df_res_freq, file = "Res_KHQ5D_Frequency.xlsx", sheetName = "Frequency", keepNA = FALSE, na.string = "NA", overwrite = TRUE)
} else if (save.xlsx == TRUE) {
openxlsx::write.xlsx(df_res_freq, file = filename, sheetName = sheetName, keepNA = FALSE, na.string = "NA", overwrite = TRUE)
}
return(df_res_freq)
}
|
/R/KHQ5DFreq.R
|
permissive
|
cran/KHQ
|
R
| false
| false
| 6,114
|
r
|
#' @title Cumulative frequency analysis
#' @description Function used to calculate the frequency, percentage, cumulative
#' frequency and cumulative percentage for each health profile in an KHQ5D dataset
#' @param scores data.frame with colnames RL, PL, SL, E, and S representing
#' Role limitation, Physical limitation, Social Limitation, Emotions
#' and Sleep. Alternatively a data.frame with the KHQ5D health profiles can be
#' provided in a five digit format e.g., data.frame(state = c(11111, 22432,
#' 34241, 43332)).
#' @param save.xlsx logical to indicate whether or not save the results; Default: FALSE.
#' @param filename string specifying the file name if save.xlsx = TRUE;
#' Default: "Res_KHQ5D_Frequency.xlsx".
#' @param sheetName string specifying the sheet name if save.xlsx = TRUE;
#' Default: "Frequency".
#' @param ignore.invalid logical to indicate whether to ignore items data
#' with invalid, incomplete or missing data; Default: FALSE.
#' @return A data frame with the Health states, Frequency, Percentage, Cumulative
#' frequency and Cumulative percentage for each five digit profile in an KHQ5D
#' dataset.
#' @details Named vector RL, PL, SL, E and S represent Role limitation,
#' Physical limitation, Social Limitation, Emotions and Sleep, respectfully.
#'
#'
#' @examples
#' scores.df <- data.frame(
#' RL = c(1,2,3,4,2),
#' PL = c(4,3,4,3,2),
#' SL = c(1,2,2,4,1),
#' E = c(1,3,4,3,4),
#' S = c(1,2,1,2,1))
#'
#' KHQ5DFreq(scores = scores.df, ignore.invalid = TRUE)
#'
#' scores.df2 <- data.frame(state = c(11111, 22432, 34241, 43332, 22141))
#'
#' KHQ5DFreq(scores = scores.df2, ignore.invalid = TRUE)
#'
#' KHQ5DFreq(scores = scores.df2$state, ignore.invalid = TRUE)
#'
#' KHQ5DFreq(scores = c(11111,11111,22432, 34241, 43332, 22141),
#' ignore.invalid = TRUE)
#'
#' KHQ5DFreq(scores = KHQ5D_data, ignore.invalid = TRUE)
#'
#' KHQ5DFreq(scores = scores.df, save.xlsx = FALSE,
#' filename = "Res_KHQ5D_Frequency.xlsx",
#' sheetName = "Frequency",
#' ignore.invalid = TRUE)
#'
#' @seealso
#' \code{\link[KHQ]{KHQConvKHQ5D}} and \code{\link[KHQ]{KHQ5D}}
#' @rdname KHQ5DFreq
#' @export
#' @importFrom magrittr %>%
#' @importFrom openxlsx write.xlsx
#' @importFrom stats na.omit
KHQ5DFreq <- function(
scores,
save.xlsx = FALSE,
filename = NULL,
sheetName = NULL,
ignore.invalid = FALSE
){
# Checking dimension names
DimensionNames <- c("RL", "PL", "SL", "E", "S")
if (is.numeric(scores) & length(names(scores)) == 5) {
if (!all(DimensionNames %in% names(scores))) {
print(names(scores))
stop("Unable to identify KHQ5D dimensions (RL, PL, SL, E, and S) in data.")
}
} else if (is.data.frame(scores) & length(names(scores)) == 5) {
if (!all(DimensionNames %in% names(scores))) {
print(names(scores))
stop("Unable to identify KHQ5D dimensions (RL, PL, SL, E, and S) in data.")
}
}
# Splitting data with five digit format into a five column data.frame and converting the single numeric entry to data.frame
if (length(as.data.frame(scores)) == 1 & length(names(scores)) != 5) {
scores <- data.frame(score = scores)
colnames(scores) <- "score"
scores <- strsplit(as.character(scores$score), "")
if (!all(lengths(scores) == 5)) {
digitMissing <- data.frame(digitMissing = lengths(scores) != 5, numDigit = lengths(scores))
digitMissing$rowDigitMissing <- rownames(digitMissing)
print(digitMissing[digitMissing[,1] == "TRUE", 2:3])
stop("Unable to identify the five digit format in the data.")
}
scores <- do.call(rbind, scores) %>% data.frame()
colnames(scores) <- DimensionNames
} else if (is.numeric(scores)) {
scores <- as.data.frame(matrix(scores, ncol = 5))
colnames(scores) <- DimensionNames
}
# Checking NAs
if (ignore.invalid == TRUE & any(is.na(scores) == TRUE)) {
scores <- stats::na.omit(scores)
} else if (ignore.invalid == FALSE & any(is.na(scores) == TRUE)){
rowNAs = which(is.na(scores) == TRUE, arr.ind = TRUE) %>% data.frame()
print(scores[unique(rowNAs$row),])
stop("Missing/non-numeric dimension found. In case the response was randomly lost, consider use ignore.invalid == TRUE to avoid further problems.")
}
# Checking coded scores
if (any(apply(scores, 1, function(x) !all(x %in% 1:4)) == TRUE)) {
rowCodedWrong <- data.frame(rowCodedWrong = apply(scores, 1, function(x) !all(x %in% 1:4)))
rowCodedWrong <- data.frame(rowCodedWrong = rowCodedWrong, numRowCodeWrong = rownames(rowCodedWrong))
print(scores[rowCodedWrong[rowCodedWrong[,1] == "TRUE", 2],])
stop("Scores must be coded as 1, 2, 3 or 4 for KHQ5D.")
}
# Health state, Frequency, Percentage, Cumulative frequency, Cumulative percentage
states <- paste0(scores$RL, scores$PL, scores$SL, scores$E, scores$S)
frequencies <- sort(table(states), decreasing = TRUE)
percentage <- round(prop.table(as.numeric(frequencies)) * 100, 1)
cum.freq <- cumsum(as.numeric(frequencies))
cum.perc <- round(cum.freq/sum(frequencies) * 100, 1)
df_res_freq <- data.frame(HealthState = names(frequencies),
Frequency = as.numeric(frequencies),
Percentage = percentage,
CumulativeFreq = cum.freq,
CumulativePerc = cum.perc,
stringsAsFactors = FALSE)
# Saving results to an Excel file
if (save.xlsx == TRUE & is.null(filename) & is.null(sheetName)) {
openxlsx::write.xlsx(df_res_freq, file = "Res_KHQ5D_Frequency.xlsx", sheetName = "Frequency", keepNA = FALSE, na.string = "NA", overwrite = TRUE)
} else if (save.xlsx == TRUE) {
openxlsx::write.xlsx(df_res_freq, file = filename, sheetName = sheetName, keepNA = FALSE, na.string = "NA", overwrite = TRUE)
}
return(df_res_freq)
}
|
#######################################################################
#
# Loading Packages
#
#######################################################################
library(readxl)
library(tidyverse)
library(lubridate)
library(zoo)
library(ggmap)
library(here)
library(geosphere)
#######################################################################
#
# Reading data
#
#######################################################################
#It creates the vector with all the files names
file.ls <- list.files(path='01 Raw Petrol Data/',pattern='\\.xlsx$')
#New list to store the information of each file
petrolprices<-list()
#The next for loop read each file and seach the first true row (not all files have the same)
for(i in 1:length(file.ls)){
#Set the sheet to 1
desired_sheet <- 1
#Read all the file without skiping rows
temp_read <- readxl::read_xlsx(paste("01 Raw Petrol Data/",file.ls[i],sep=""),sheet = desired_sheet)
#Set skip row to NULL
skip_rows <- NULL
#String that I am searching in the first column
search_string <- "ServiceStationName"
#Number of rows where I am going to seach
max_rows_to_search <- 10
#THe while loop read the information in the first column until find search_string
while (length(skip_rows) == 0){
if(names(temp_read[1])==search_string){
skip_rows<-0
break
}
skip_rows <- which(stringr::str_detect(temp_read[1:max_rows_to_search,1][[1]],search_string)) - 0
}
#Read the file again skiping the rows with no information
temp_read <- as_tibble(readxl::read_excel(
paste("01 Raw Petrol Data/",file.ls[i],sep=""),
sheet = desired_sheet,
skip = skip_rows
))
#Stores the values in the list
petrolprices[[i]]<-temp_read
}
#Convert dates as character to date
petrolprices<-lapply(petrolprices, function(x){
if(class(x$PriceUpdatedDate)=="character"){
x$PriceUpdatedDate<-dmy_hms(x$PriceUpdatedDate)
}
else{
x
};return(x)
})
#Append all list elements into one dataframe
petrolprices<-do.call(rbind, lapply(petrolprices, as.data.frame))
petrolprices<-petrolprices %>% filter(!is.na(Price))
#######################################################################
#
# Data Tyding
#
#######################################################################
#change na to the last observation
petrolprices_t<-petrolprices
petrolprices_t[,c(1,2,3,4,5)]<-na.locf(petrolprices[,c(1,2,3,4,5)])
#####################
# Geocoding address
#####################
stations<-petrolprices_t %>% select(ServiceStationName,Address,Suburb,Postcode,Brand) %>%
distinct()
register_google(key = "GoogleAPIKeyHere")
#Getting coordinates from address and location name
coordinates<-ggmap::geocode(stations$Address, output = "more", source = "google")
coordinates2<-ggmap::geocode(stations$ServiceStationName, output = "more", source = "google")
#Checking address with incorrect information
id<-is.na(coordinates$address) | !str_detect(coordinates$address,"nsw")
#Creating final dataset with both coordinates
coordinates_final<-coordinates
coordinates_final[id,]<-coordinates2[id,]
#Combinating with address
stations_final<-cbind(stations,coordinates_final) %>%
select(-type,-north,-south,-east,-west,-loctype)
#####################
# Manual Changes
#####################
#Loading changes
corrections<-read_xlsx(here("03 Data Tidying","Address_corrections.xlsx"))
stations_final<-stations_final %>%left_join(corrections, by=c("Address"))%>%
mutate(address=ifelse(is.na(New_Address),address,New_Address)) %>%
mutate(lat=ifelse(is.na(lat.y),lat.x,lat.y)) %>%
mutate(lon=ifelse(is.na(lon.y),lon.x,lon.y)) %>%
select(station_name=ServiceStationName, address=Address, suburb=Suburb,
postcode=Postcode,brand=Brand,lon,lat) %>%
mutate(station_name=tolower(station_name),
address=tolower(address),
suburb=tolower(suburb),
brand=tolower(brand))
stations_final$ID<-seq.int(nrow(stations_final))
#######################################
# Too close stations (are the same?)
#######################################
#Creating distance matrix
distance_matrix<-distm(as.matrix(stations_final[,c("lon","lat")]), fun = distHaversine)
distance_matrix_2<-distance_matrix
distance_matrix_2<-data.frame(distance_matrix_2)
distance_matrix_2$ID_1<-seq.int(nrow(distance_matrix))
distance_matrix_2<-distance_matrix_2 %>%
gather(key="ID_2",value="distance",-ID_1) %>%
mutate(ID_2=parse_number(ID_2)) %>%
filter(!ID_1==ID_2) %>%
rowwise() %>%
mutate(ID=paste(min(ID_1,ID_2),max(ID_1,ID_2),sep="_")) %>%
distinct(ID, .keep_all=TRUE)
#DF with all the duplicated stations
aux_matrix <- distance_matrix_2 %>% ungroup() %>%
left_join(stations_final[,c("ID","brand")],by=c("ID_1"="ID")) %>%
left_join(stations_final[,c("ID","brand")],by=c("ID_2"="ID")) %>%
filter(brand.x==brand.y,distance<20)
#Changing ID of the same stations
stations_nodup<-stations_final %>%
left_join(aux_matrix[,c("ID_1","ID_2")],by=c("ID"="ID_1")) %>%
mutate(ID=ifelse(is.na(ID_2),ID,ID_2)) %>%
select(-ID_2) %>%
select(ID,everything())
#Manual Changes
stations_nodup<-stations_nodup %>%
mutate(ID=case_when(
ID==2082~20,
ID==2163~852,
ID==2198~964,
ID==1656~1654,
ID==2402~1654,
ID==2195~1810,
TRUE ~ ID
))
#DF with too close stations with different brand
#Maybe they changed their names
aux_matrix2 <- distance_matrix_2 %>% ungroup() %>%
left_join(stations_final[,c("ID","brand")],by=c("ID_1"="ID")) %>%
left_join(stations_final[,c("ID","brand")],by=c("ID_2"="ID")) %>%
filter(!brand.x==brand.y,distance<20)
#I am not going to change this stations
#Creating final tables to make the join
station_list<-stations_nodup %>%
select(ID,brand,address)
coordinate_list<-stations_nodup %>%
select(-brand,-address) %>% group_by(ID) %>%
summarise_all(first)
#######################################################################
#
# Joining Data
#
#######################################################################
mergeddata<-petrolprices_t %>%
select(address=Address, brand=Brand, fuel=FuelCode,
date=PriceUpdatedDate,price=Price) %>%
mutate(address=tolower(address), brand=tolower(brand)) %>%
left_join(station_list) %>% left_join(coordinate_list)
#E10 is the one with more data so I am going to use it
table(mergeddata$fuel)
saveRDS(mergeddata,here("04 Tidy Data","Geocoded_petrol_data.rds"))
saveRDS(stations_nodup,here("04 Tidy Data","Stations_list_all.rds"))
|
/03 Data Tidying/01 Geocoding Petrol Data.R
|
no_license
|
felipemonroy/Spatial-Analysis--Petrol-Price
|
R
| false
| false
| 7,128
|
r
|
#######################################################################
#
# Loading Packages
#
#######################################################################
library(readxl)
library(tidyverse)
library(lubridate)
library(zoo)
library(ggmap)
library(here)
library(geosphere)
#######################################################################
#
# Reading data
#
#######################################################################
#It creates the vector with all the files names
file.ls <- list.files(path='01 Raw Petrol Data/',pattern='\\.xlsx$')
#New list to store the information of each file
petrolprices<-list()
#The next for loop read each file and seach the first true row (not all files have the same)
for(i in 1:length(file.ls)){
#Set the sheet to 1
desired_sheet <- 1
#Read all the file without skiping rows
temp_read <- readxl::read_xlsx(paste("01 Raw Petrol Data/",file.ls[i],sep=""),sheet = desired_sheet)
#Set skip row to NULL
skip_rows <- NULL
#String that I am searching in the first column
search_string <- "ServiceStationName"
#Number of rows where I am going to seach
max_rows_to_search <- 10
#THe while loop read the information in the first column until find search_string
while (length(skip_rows) == 0){
if(names(temp_read[1])==search_string){
skip_rows<-0
break
}
skip_rows <- which(stringr::str_detect(temp_read[1:max_rows_to_search,1][[1]],search_string)) - 0
}
#Read the file again skiping the rows with no information
temp_read <- as_tibble(readxl::read_excel(
paste("01 Raw Petrol Data/",file.ls[i],sep=""),
sheet = desired_sheet,
skip = skip_rows
))
#Stores the values in the list
petrolprices[[i]]<-temp_read
}
#Convert dates as character to date
petrolprices<-lapply(petrolprices, function(x){
if(class(x$PriceUpdatedDate)=="character"){
x$PriceUpdatedDate<-dmy_hms(x$PriceUpdatedDate)
}
else{
x
};return(x)
})
#Append all list elements into one dataframe
petrolprices<-do.call(rbind, lapply(petrolprices, as.data.frame))
petrolprices<-petrolprices %>% filter(!is.na(Price))
#######################################################################
#
# Data Tyding
#
#######################################################################
#change na to the last observation
petrolprices_t<-petrolprices
petrolprices_t[,c(1,2,3,4,5)]<-na.locf(petrolprices[,c(1,2,3,4,5)])
#####################
# Geocoding address
#####################
stations<-petrolprices_t %>% select(ServiceStationName,Address,Suburb,Postcode,Brand) %>%
distinct()
register_google(key = "GoogleAPIKeyHere")
#Getting coordinates from address and location name
coordinates<-ggmap::geocode(stations$Address, output = "more", source = "google")
coordinates2<-ggmap::geocode(stations$ServiceStationName, output = "more", source = "google")
#Checking address with incorrect information
id<-is.na(coordinates$address) | !str_detect(coordinates$address,"nsw")
#Creating final dataset with both coordinates
coordinates_final<-coordinates
coordinates_final[id,]<-coordinates2[id,]
#Combinating with address
stations_final<-cbind(stations,coordinates_final) %>%
select(-type,-north,-south,-east,-west,-loctype)
#####################
# Manual Changes
#####################
#Loading changes
corrections<-read_xlsx(here("03 Data Tidying","Address_corrections.xlsx"))
stations_final<-stations_final %>%left_join(corrections, by=c("Address"))%>%
mutate(address=ifelse(is.na(New_Address),address,New_Address)) %>%
mutate(lat=ifelse(is.na(lat.y),lat.x,lat.y)) %>%
mutate(lon=ifelse(is.na(lon.y),lon.x,lon.y)) %>%
select(station_name=ServiceStationName, address=Address, suburb=Suburb,
postcode=Postcode,brand=Brand,lon,lat) %>%
mutate(station_name=tolower(station_name),
address=tolower(address),
suburb=tolower(suburb),
brand=tolower(brand))
stations_final$ID<-seq.int(nrow(stations_final))
#######################################
# Too close stations (are the same?)
#######################################
#Creating distance matrix
distance_matrix<-distm(as.matrix(stations_final[,c("lon","lat")]), fun = distHaversine)
distance_matrix_2<-distance_matrix
distance_matrix_2<-data.frame(distance_matrix_2)
distance_matrix_2$ID_1<-seq.int(nrow(distance_matrix))
distance_matrix_2<-distance_matrix_2 %>%
gather(key="ID_2",value="distance",-ID_1) %>%
mutate(ID_2=parse_number(ID_2)) %>%
filter(!ID_1==ID_2) %>%
rowwise() %>%
mutate(ID=paste(min(ID_1,ID_2),max(ID_1,ID_2),sep="_")) %>%
distinct(ID, .keep_all=TRUE)
#DF with all the duplicated stations
aux_matrix <- distance_matrix_2 %>% ungroup() %>%
left_join(stations_final[,c("ID","brand")],by=c("ID_1"="ID")) %>%
left_join(stations_final[,c("ID","brand")],by=c("ID_2"="ID")) %>%
filter(brand.x==brand.y,distance<20)
#Changing ID of the same stations
stations_nodup<-stations_final %>%
left_join(aux_matrix[,c("ID_1","ID_2")],by=c("ID"="ID_1")) %>%
mutate(ID=ifelse(is.na(ID_2),ID,ID_2)) %>%
select(-ID_2) %>%
select(ID,everything())
#Manual Changes
stations_nodup<-stations_nodup %>%
mutate(ID=case_when(
ID==2082~20,
ID==2163~852,
ID==2198~964,
ID==1656~1654,
ID==2402~1654,
ID==2195~1810,
TRUE ~ ID
))
#DF with too close stations with different brand
#Maybe they changed their names
aux_matrix2 <- distance_matrix_2 %>% ungroup() %>%
left_join(stations_final[,c("ID","brand")],by=c("ID_1"="ID")) %>%
left_join(stations_final[,c("ID","brand")],by=c("ID_2"="ID")) %>%
filter(!brand.x==brand.y,distance<20)
#I am not going to change this stations
#Creating final tables to make the join
station_list<-stations_nodup %>%
select(ID,brand,address)
coordinate_list<-stations_nodup %>%
select(-brand,-address) %>% group_by(ID) %>%
summarise_all(first)
#######################################################################
#
# Joining Data
#
#######################################################################
mergeddata<-petrolprices_t %>%
select(address=Address, brand=Brand, fuel=FuelCode,
date=PriceUpdatedDate,price=Price) %>%
mutate(address=tolower(address), brand=tolower(brand)) %>%
left_join(station_list) %>% left_join(coordinate_list)
#E10 is the one with more data so I am going to use it
table(mergeddata$fuel)
saveRDS(mergeddata,here("04 Tidy Data","Geocoded_petrol_data.rds"))
saveRDS(stations_nodup,here("04 Tidy Data","Stations_list_all.rds"))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/add.R, R/add_.R
\name{e_sankey}
\alias{e_sankey}
\alias{e_sankey_}
\title{Sankey}
\usage{
e_sankey(
e,
source,
target,
value,
layout = "none",
rm_x = TRUE,
rm_y = TRUE,
...
)
e_sankey_(
e,
source,
target,
value,
layout = "none",
rm_x = TRUE,
rm_y = TRUE,
...
)
}
\arguments{
\item{e}{An \code{echarts4r} object as returned by \code{\link{e_charts}} or
a proxy as returned by \code{\link{echarts4rProxy}}.}
\item{source, target}{Source and target columns.}
\item{value}{Value change from \code{source} to \code{target}.}
\item{layout}{Layout of sankey.}
\item{rm_x, rm_y}{Whether to remove the x and y axis, defaults to \code{TRUE}.}
\item{...}{Any other option to pass, check See Also section.}
}
\description{
Draw a sankey diagram.
}
\examples{
sankey <- data.frame(
source = c("a", "b", "c", "d", "c"),
target = c("b", "c", "d", "e", "e"),
value = ceiling(rnorm(5, 10, 1)),
stringsAsFactors = FALSE
)
sankey |>
e_charts() |>
e_sankey(source, target, value)
}
\seealso{
\href{https://echarts.apache.org/en/option.html#series-sankey}{Additional arguments}
}
|
/man/e_sankey.Rd
|
permissive
|
JohnCoene/echarts4r
|
R
| false
| true
| 1,188
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/add.R, R/add_.R
\name{e_sankey}
\alias{e_sankey}
\alias{e_sankey_}
\title{Sankey}
\usage{
e_sankey(
e,
source,
target,
value,
layout = "none",
rm_x = TRUE,
rm_y = TRUE,
...
)
e_sankey_(
e,
source,
target,
value,
layout = "none",
rm_x = TRUE,
rm_y = TRUE,
...
)
}
\arguments{
\item{e}{An \code{echarts4r} object as returned by \code{\link{e_charts}} or
a proxy as returned by \code{\link{echarts4rProxy}}.}
\item{source, target}{Source and target columns.}
\item{value}{Value change from \code{source} to \code{target}.}
\item{layout}{Layout of sankey.}
\item{rm_x, rm_y}{Whether to remove the x and y axis, defaults to \code{TRUE}.}
\item{...}{Any other option to pass, check See Also section.}
}
\description{
Draw a sankey diagram.
}
\examples{
sankey <- data.frame(
source = c("a", "b", "c", "d", "c"),
target = c("b", "c", "d", "e", "e"),
value = ceiling(rnorm(5, 10, 1)),
stringsAsFactors = FALSE
)
sankey |>
e_charts() |>
e_sankey(source, target, value)
}
\seealso{
\href{https://echarts.apache.org/en/option.html#series-sankey}{Additional arguments}
}
|
#advanve scatterplots
set.seed(67)
x= rnorm(10,5,7)
y= rpois(10,7)
z=rnorm(10,6,7)
t= rpois(10,9)
#at first we create a simple scatterplot
plot(x,y,col=123, pch=10, main="Multi scatterplot",
col.main="red",cex.main=1.5, xlab = "indep",ylab = "depend")
#now add another layer of our scatterplot
points(z,t,col= "blue",pch=4)
#and the last one
points(y,t,col=777,pch=9)
#this gives us three layers of scatterplot
#################
#Legends
#we can add a legend to be able to understand out layered sactterplot
#the first two numbers specify the position on the x y scale,
#than we have legends names, colours(col) and symbols(pch) to be used in the scale
#with cex you can sdjust the size and bty removes the surrounding box.
?legends
legends(-6,5.9, legend("Level 1","leven 2","level 3"),
col = c(123,"blue",777),pch=c(10,4,9),cex=0.65,bty="n")
#Exercise
#create the following vectors
x=1:5
y=rep(4,5)
x1= 1.1:5.1
y1=5;1
#now plot those 3 levels in your scatterplot (x-y,x1-y1, x1-x)
#pick a suitable format for your plot
#add a legend without box to your plot
#solution
plot(x,y,main="Exercise Solution", xlab = "",ylab = "",
cex.main=1.3,col.main=777,pch=3,col="red",bty="n")
points(x1,y1, col="green", pch=8)
points(x1,x,col="blue",pch=9)
legends(1,3.5, legends=c("level 1","level 2","level 3"),
col=c("red","green","blue"),pch=c(3,8,9),cex=0.75,bty="n")
|
/AdvanceScatterplot_Legends.R
|
no_license
|
tanvisenjaliya/Introduction_to_R
|
R
| false
| false
| 1,438
|
r
|
#advanve scatterplots
set.seed(67)
x= rnorm(10,5,7)
y= rpois(10,7)
z=rnorm(10,6,7)
t= rpois(10,9)
#at first we create a simple scatterplot
plot(x,y,col=123, pch=10, main="Multi scatterplot",
col.main="red",cex.main=1.5, xlab = "indep",ylab = "depend")
#now add another layer of our scatterplot
points(z,t,col= "blue",pch=4)
#and the last one
points(y,t,col=777,pch=9)
#this gives us three layers of scatterplot
#################
#Legends
#we can add a legend to be able to understand out layered sactterplot
#the first two numbers specify the position on the x y scale,
#than we have legends names, colours(col) and symbols(pch) to be used in the scale
#with cex you can sdjust the size and bty removes the surrounding box.
?legends
legends(-6,5.9, legend("Level 1","leven 2","level 3"),
col = c(123,"blue",777),pch=c(10,4,9),cex=0.65,bty="n")
#Exercise
#create the following vectors
x=1:5
y=rep(4,5)
x1= 1.1:5.1
y1=5;1
#now plot those 3 levels in your scatterplot (x-y,x1-y1, x1-x)
#pick a suitable format for your plot
#add a legend without box to your plot
#solution
plot(x,y,main="Exercise Solution", xlab = "",ylab = "",
cex.main=1.3,col.main=777,pch=3,col="red",bty="n")
points(x1,y1, col="green", pch=8)
points(x1,x,col="blue",pch=9)
legends(1,3.5, legends=c("level 1","level 2","level 3"),
col=c("red","green","blue"),pch=c(3,8,9),cex=0.75,bty="n")
|
#' Imputation by random forests
#'
#' Imputes univariate missing data using random forests.
#'
#' @aliases mice.impute.rf
#' @inheritParams mice.impute.pmm
#' @param ntree The number of trees to grow. The default is 10.
#' @param rfPackage A single string specifying the backend for estimating the
#' random forest. The default backend is the \code{ranger} package. The only
#' alternative currently implemented is the \code{randomForest} package, which
#' used to be the default in mice 3.13.10 and earlier.
#' @param \dots Other named arguments passed down to
#' \code{mice:::install.on.demand()}, \code{randomForest::randomForest()} and
#' \code{randomForest:::randomForest.default()}.
#' @return Vector with imputed data, same type as \code{y}, and of length
#' \code{sum(wy)}
#' @details
#' Imputation of \code{y} by random forests. The method
#' calls \code{randomForrest()} which implements Breiman's random forest
#' algorithm (based on Breiman and Cutler's original Fortran code)
#' for classification and regression. See Appendix A.1 of Doove et al.
#' (2014) for the definition of the algorithm used.
#' @note An alternative implementation was independently
#' developed by Shah et al (2014). This were available as
#' functions \code{CALIBERrfimpute::mice.impute.rfcat} and
#' \code{CALIBERrfimpute::mice.impute.rfcont} (now archived).
#' Simulations by Shah (Feb 13, 2014) suggested that
#' the quality of the imputation for 10 and 100 trees was identical,
#' so mice 2.22 changed the default number of trees from \code{ntree = 100} to
#' \code{ntree = 10}.
#' @author Lisa Doove, Stef van Buuren, Elise Dusseldorp, 2012; Patrick Rockenschaub, 2021
#' @references
#'
#' Doove, L.L., van Buuren, S., Dusseldorp, E. (2014), Recursive partitioning
#' for missing data imputation in the presence of interaction Effects.
#' Computational Statistics \& Data Analysis, 72, 92-104.
#'
#' Shah, A.D., Bartlett, J.W., Carpenter, J., Nicholas, O., Hemingway, H. (2014),
#' Comparison of random forest and parametric imputation models for
#' imputing missing data using MICE: A CALIBER study. American Journal
#' of Epidemiology, doi: 10.1093/aje/kwt312.
#'
#' Van Buuren, S. (2018).
#' \href{https://stefvanbuuren.name/fimd/sec-cart.html}{\emph{Flexible Imputation of Missing Data. Second Edition.}}
#' Chapman & Hall/CRC. Boca Raton, FL.
#' @seealso \code{\link{mice}}, \code{\link{mice.impute.cart}},
#' \code{\link[randomForest]{randomForest}}
#' \code{\link[ranger]{ranger}}
#' @family univariate imputation functions
#' @keywords datagen
#' @examples
#' library("lattice")
#'
#' imp <- mice(nhanes2, meth = "rf", ntree = 3)
#' plot(imp)
#' @export
mice.impute.rf <- function(y, ry, x, wy = NULL, ntree = 10,
rfPackage = c("ranger", "randomForest"), ...) {
rfPackage <- match.arg(rfPackage)
if (is.null(wy)) wy <- !ry
ntree <- max(1, ntree) # safety
nmis <- sum(wy)
xobs <- x[ry, , drop = FALSE]
xmis <- x[wy, , drop = FALSE]
yobs <- y[ry]
# Find eligible donors
f <- switch(rfPackage,
randomForest = .randomForest.donors,
ranger = .ranger.donors
)
forest <- f(xobs, xmis, yobs, ntree, ...)
# Sample from donors
if (nmis == 1) forest <- array(forest, dim = c(1, ntree))
apply(forest, MARGIN = 1, FUN = function(s) sample(unlist(s), 1))
}
# Find eligible donors using the randomForest package (default)
.randomForest.donors <- function(xobs, xmis, yobs, ntree, ...) {
install.on.demand("randomForest", ...)
onetree <- function(xobs, xmis, yobs, ...) {
# Function to fit a single tree
fit <- randomForest::randomForest(
x = xobs,
y = yobs,
ntree = 1, ...
)
leafnr <- predict(object = fit, newdata = xobs, nodes = TRUE)
leafnr <- as.vector(attr(leafnr, "nodes"))
nodes <- predict(object = fit, newdata = xmis, nodes = TRUE)
nodes <- as.vector(attr(nodes, "nodes"))
donor <- lapply(nodes, function(s) yobs[leafnr == s])
return(donor)
}
sapply(seq_len(ntree), FUN = function(s) onetree(xobs, xmis, yobs, ...))
}
# Find eligible donors using the ranger package
.ranger.donors <- function(xobs, xmis, yobs, ntree, ...) {
install.on.demand("ranger", ...)
# Fit all trees at once
fit <- ranger::ranger(x = xobs, y = yobs, num.trees = ntree)
nodes <- predict(
object = fit, data = rbind(xobs, xmis),
type = "terminalNodes", predict.all = TRUE
)
nodes <- ranger::predictions(nodes)
nodes_obs <- nodes[1:nrow(xobs), ]
nodes_mis <- nodes[(nrow(xobs) + 1):nrow(nodes), ]
select_donors <- function(i) {
# Function to extract all eligible donors for each missing value
donors <- split(yobs, nodes_obs[, i])
donors[as.character(nodes_mis[, i])]
}
sapply(seq_len(ntree), FUN = select_donors)
}
|
/R/mice.impute.rf.R
|
no_license
|
carpenitoThomas/mice
|
R
| false
| false
| 4,762
|
r
|
#' Imputation by random forests
#'
#' Imputes univariate missing data using random forests.
#'
#' @aliases mice.impute.rf
#' @inheritParams mice.impute.pmm
#' @param ntree The number of trees to grow. The default is 10.
#' @param rfPackage A single string specifying the backend for estimating the
#' random forest. The default backend is the \code{ranger} package. The only
#' alternative currently implemented is the \code{randomForest} package, which
#' used to be the default in mice 3.13.10 and earlier.
#' @param \dots Other named arguments passed down to
#' \code{mice:::install.on.demand()}, \code{randomForest::randomForest()} and
#' \code{randomForest:::randomForest.default()}.
#' @return Vector with imputed data, same type as \code{y}, and of length
#' \code{sum(wy)}
#' @details
#' Imputation of \code{y} by random forests. The method
#' calls \code{randomForrest()} which implements Breiman's random forest
#' algorithm (based on Breiman and Cutler's original Fortran code)
#' for classification and regression. See Appendix A.1 of Doove et al.
#' (2014) for the definition of the algorithm used.
#' @note An alternative implementation was independently
#' developed by Shah et al (2014). This were available as
#' functions \code{CALIBERrfimpute::mice.impute.rfcat} and
#' \code{CALIBERrfimpute::mice.impute.rfcont} (now archived).
#' Simulations by Shah (Feb 13, 2014) suggested that
#' the quality of the imputation for 10 and 100 trees was identical,
#' so mice 2.22 changed the default number of trees from \code{ntree = 100} to
#' \code{ntree = 10}.
#' @author Lisa Doove, Stef van Buuren, Elise Dusseldorp, 2012; Patrick Rockenschaub, 2021
#' @references
#'
#' Doove, L.L., van Buuren, S., Dusseldorp, E. (2014), Recursive partitioning
#' for missing data imputation in the presence of interaction Effects.
#' Computational Statistics \& Data Analysis, 72, 92-104.
#'
#' Shah, A.D., Bartlett, J.W., Carpenter, J., Nicholas, O., Hemingway, H. (2014),
#' Comparison of random forest and parametric imputation models for
#' imputing missing data using MICE: A CALIBER study. American Journal
#' of Epidemiology, doi: 10.1093/aje/kwt312.
#'
#' Van Buuren, S. (2018).
#' \href{https://stefvanbuuren.name/fimd/sec-cart.html}{\emph{Flexible Imputation of Missing Data. Second Edition.}}
#' Chapman & Hall/CRC. Boca Raton, FL.
#' @seealso \code{\link{mice}}, \code{\link{mice.impute.cart}},
#' \code{\link[randomForest]{randomForest}}
#' \code{\link[ranger]{ranger}}
#' @family univariate imputation functions
#' @keywords datagen
#' @examples
#' library("lattice")
#'
#' imp <- mice(nhanes2, meth = "rf", ntree = 3)
#' plot(imp)
#' @export
mice.impute.rf <- function(y, ry, x, wy = NULL, ntree = 10,
rfPackage = c("ranger", "randomForest"), ...) {
rfPackage <- match.arg(rfPackage)
if (is.null(wy)) wy <- !ry
ntree <- max(1, ntree) # safety
nmis <- sum(wy)
xobs <- x[ry, , drop = FALSE]
xmis <- x[wy, , drop = FALSE]
yobs <- y[ry]
# Find eligible donors
f <- switch(rfPackage,
randomForest = .randomForest.donors,
ranger = .ranger.donors
)
forest <- f(xobs, xmis, yobs, ntree, ...)
# Sample from donors
if (nmis == 1) forest <- array(forest, dim = c(1, ntree))
apply(forest, MARGIN = 1, FUN = function(s) sample(unlist(s), 1))
}
# Find eligible donors using the randomForest package (default)
.randomForest.donors <- function(xobs, xmis, yobs, ntree, ...) {
install.on.demand("randomForest", ...)
onetree <- function(xobs, xmis, yobs, ...) {
# Function to fit a single tree
fit <- randomForest::randomForest(
x = xobs,
y = yobs,
ntree = 1, ...
)
leafnr <- predict(object = fit, newdata = xobs, nodes = TRUE)
leafnr <- as.vector(attr(leafnr, "nodes"))
nodes <- predict(object = fit, newdata = xmis, nodes = TRUE)
nodes <- as.vector(attr(nodes, "nodes"))
donor <- lapply(nodes, function(s) yobs[leafnr == s])
return(donor)
}
sapply(seq_len(ntree), FUN = function(s) onetree(xobs, xmis, yobs, ...))
}
# Find eligible donors using the ranger package
.ranger.donors <- function(xobs, xmis, yobs, ntree, ...) {
install.on.demand("ranger", ...)
# Fit all trees at once
fit <- ranger::ranger(x = xobs, y = yobs, num.trees = ntree)
nodes <- predict(
object = fit, data = rbind(xobs, xmis),
type = "terminalNodes", predict.all = TRUE
)
nodes <- ranger::predictions(nodes)
nodes_obs <- nodes[1:nrow(xobs), ]
nodes_mis <- nodes[(nrow(xobs) + 1):nrow(nodes), ]
select_donors <- function(i) {
# Function to extract all eligible donors for each missing value
donors <- split(yobs, nodes_obs[, i])
donors[as.character(nodes_mis[, i])]
}
sapply(seq_len(ntree), FUN = select_donors)
}
|
rm(list=ls())
setwd("C:/Users/Alicia/Documents/GitHub/FL_Carbon")
envdata<-read.csv("LongleafRemeasEnvData.csv", header=T, sep=",")
setwd("C:/Users/Alicia/Desktop/FL")
load("longleaf_remeas_start.Rdata")
load("longleaf_remeas_end.Rdata")
envdata$SOILGRIDS_C_AVG<-((envdata$SOC0_5_NAD/10)*(1/3))+((envdata$SOC5_15NAD/10)*(2/3))
envdata$SOILGRIDS_N_AVG<-((envdata$N0_5_NAD/100)*(1/3))+((envdata$N5_15_NAD/100)*(2/3))
envdata$SOILGRIDS_CN<-envdata$SOILGRIDS_C_AVG/envdata$SOILGRIDS_N_AVG
envdata$SOILGRIDS_CN_SCALE<-(envdata$SOILGRIDS_CN*6.212)+24.634
diameter.totals<-list()
age.totals<-list()
diameter.totals.end<-list()
age.totals.end<-list()
plot_data_end<-list()
plot_data_start<- list()
names(envdata)[16]<-"aridity"
names(envdata)[17]<-"TPA_start"
names(envdata)[18]<-"TPA_end"
a<-c(1, 3:23)
# growth<-(10^(1.774252 -0.007376*CN + 0.008805*CN*aridity -1.127642*aridity -0.034125*temp))*(0.5718692*age[i,j]^(-0.4281308))
for (s in a){
aridity<-envdata[s, 6]*0.0001
temp<-envdata[s, 11]
CN_scale<-envdata[s,15]
envdata[s,16]<-envdata[s, 6]*0.0001
plot_data_start[[s]]<-plots.start[[s]] %>%
filter(STATUSCD=="1") %>%
# mutate(TPA=ifelse(is.na(TPA_UNADJ), TPAGROW_UNADJ, TPA_UNADJ)) %>%
mutate(TPA_total=sum(round(TPA_UNADJ))) %>%
mutate(age=round((10^((1.774252/-0.5718692) + (-0.007376/-0.5718692)*CN_scale + (0.008805/-0.5718692)*CN_scale*aridity +
(-1.127642/-0.5718692)*aridity + (-0.034125/-0.5718692)*temp))*(DIA^(-1/-0.5718692))))
# mutate(age=round((10^(-7.143884 + 0.02970085*CN_scale - 0.03545391*CN_scale*aridity + 4.540362*aridity + 0.137402*temp))*(DIA^1.748652)))
# mutate(age=round((10^(-3.018915 + 1.2741*aridity))*(DIA^2.307065)))
envdata[s,17]<-unique(plot_data_start[[s]]$TPA_total)
for (h in 1:length(plot_data_start$DIA)){
diameter.totals[[s]]<-rep((plot_data_start[[s]]$DIA), round(plot_data_start[[s]]$TPA_UNADJ))
age.totals[[s]]<-rep((plot_data_start[[s]]$age), round(plot_data_start[[s]]$TPA_UNADJ))
}
# hist(diameter.totals[[s]], main = paste("Start plot", s), xlab = "Diameter (in)")
plot_data_end[[s]]<-plots.end[[s]] %>%
filter(STATUSCD=="1") %>%
# mutate(TPA=ifelse(is.na(TPA_UNADJ), TPAGROW_UNADJ, TPA_UNADJ)) %>%
mutate(TPA_total=sum(round(TPA_UNADJ))) %>%
mutate(age=round((10^((1.774252/-0.5718692) + (-0.007376/-0.5718692)*CN_scale + (0.008805/-0.5718692)*CN_scale*aridity +
(-1.127642/-0.5718692)*aridity + (-0.034125/-0.5718692)*temp))*(DIA^(-1/-0.5718692)))) %>%
# mutate(age=round((10^(-7.143884 + 0.02970085*CN_scale - 0.03545391*CN_scale*aridity + 4.540362*aridity + 0.137402*temp))*(DIA^1.748652))) %>%
mutate(TASB=((0.0725*((DIA*2.54)^2.5074))+(0.0016*((DIA*2.54)^3.0786))+(0.0214*((DIA*2.54)^2.0051)))*(round(TPA_UNADJ)))
# mutate(TASB=(0.041281*((DIA*2.54)^2.722214))*(round(TPA_UNADJ)))
envdata[s,18]<-unique(plot_data_end[[s]]$TPA_total)
for (h in 1:length(plot_data_end$DIA)){
diameter.totals.end[[s]]<-rep((plot_data_end[[s]]$DIA), round(plot_data_end[[s]]$TPA_UNADJ))
age.totals.end[[s]]<-rep((plot_data_end[[s]]$age), round(plot_data_end[[s]]$TPA_UNADJ))
}
}
diameters.new<-vector()
extra<-vector()
diff<-vector()
age.new<-vector()
age.start<-vector()
for (g in a){
if ((length(diameter.totals.end[[g]])-length(diameter.totals[[g]]))>0) {
diameters.new<-diameter.totals[[g]]
diff<-length(diameter.totals.end[[g]])-length(diameter.totals[[g]])
extra<-runif(diff, 0, 5)
diameter.totals[[g]]<-c(diameters.new, extra)
age.start<-age.totals[[g]]
# age.new<-round((10^(-9.35027 + 0.03729744*envdata[g,15] - 0.04455875*envdata[g,16]*envdata[s,15] + 5.373766*envdata[g,16] + 0.1510293*envdata[g,11]))*(extra^2.257168))
age.new<-round((10^((1.774252/-0.5718692) + (-0.007376/-0.5718692)*envdata[g,15] + (0.008805/-0.5718692)*envdata[g,15]*envdata[g,16]
+(-1.127642/-0.5718692)*envdata[g,16] + (-0.034125/-0.5718692)*envdata[g,11]))*(extra^(-1/-0.5718692)))
# age.new<-round((10^(-7.143884 + 0.02970085*envdata[g,15] - 0.03545391*envdata[g,16]*envdata[g,15] + 4.540362*envdata[g,16] + 0.137402*envdata[g,11]))*(extra^1.748652))
age.totals[[g]]<-c(age.start, age.new)
}
}
save(age.totals, file="C:/Users/Alicia/Documents/GitHub/FL_Carbon/Longleaf Remeasurment/longleafAgeTotals2.rdata")
save(age.totals.end, file="C:/Users/Alicia/Documents/GitHub/FL_Carbon/Longleaf Remeasurment/longleafAgeTotalsEnd2.rdata")
save(diameter.totals.end, file="C:/Users/Alicia/Documents/GitHub/FL_Carbon/Longleaf Remeasurment/longleafDIATotalsEnd2.rdata")
save(diameter.totals, file="C:/Users/Alicia/Documents/GitHub/FL_Carbon/Longleaf Remeasurment/longleafDIATotals2.rdata")
save(plot_data_end, file="C:/Users/Alicia/Documents/GitHub/FL_Carbon/Longleaf Remeasurment/longleafPlotEnd2.rdata")
save(plot_data_start, file="C:/Users/Alicia/Documents/GitHub/FL_Carbon/Longleaf Remeasurment/longleafPlotStart2.rdata")
save(diameter.totals, file="C:/Users/Alicia/Documents/GitHub/FL_Carbon/Longleaf Remeasurment/longleafDIATotals2.rdata")
# save(age.totals, file="C:/Users/Alicia/Documents/GitHub/FL_Carbon/Longleaf Remeasurment/longleafAgeTotals.rdata")
# save(age.totals.end, file="C:/Users/Alicia/Documents/GitHub/FL_Carbon/Longleaf Remeasurment/longleafAgeTotalsEnd.rdata")
# save(diameter.totals.end, file="C:/Users/Alicia/Documents/GitHub/FL_Carbon/Longleaf Remeasurment/longleafDIATotalsEnd.rdata")
# save(diameter.totals, file="C:/Users/Alicia/Documents/GitHub/FL_Carbon/Longleaf Remeasurment/longleafDIATotals.rdata")
# save(plot_data_end, file="C:/Users/Alicia/Documents/GitHub/FL_Carbon/Longleaf Remeasurment/longleafPlotEnd.rdata")
# save(plot_data_start, file="C:/Users/Alicia/Documents/GitHub/FL_Carbon/Longleaf Remeasurment/longleafPlotStart.rdata")
# save(diameter.totals, file="C:/Users/Alicia/Documents/GitHub/FL_Carbon/Longleaf Remeasurment/longleafDIATotals.rdata")
write.csv(envdata, file="C:/Users/Alicia/Documents/GitHub/FL_Carbon/Longleaf Remeasurment/longleafEnvData2.csv")
# if (Diameter[i,j]<=3.94){ M<- rbinom(1,1,(.162/8))}
# else if (Diameter[i,j]>3.94 & Diameter[i,j]<=7.97){M<-rbinom(1,1,(.026/8))}
# else if (Diameter[i,j]>7.97 & Diameter[i,j]<=11.81){M<-rbinom(1,1,(.006/8))}
# else if (Diameter[i,j]>11.81 & Diameter[i,j]<=15.75){M<-rbinom(1,1,(.013/8))}
# else if (Diameter[i,j]>15.75 & Diameter[i,j]<=19.69){M<-rbinom(1,1,(.024/8))}
# else if (Diameter[i,j]>19.69 & Diameter[i,j]<=23.62){M<-rbinom(1,1,(.047/8))}
# else if (Diameter[i,j]>23.62 & Diameter[i,j]<=27.56){M<-rbinom(1,1,(.060/8))}
# else if (Diameter[i,j]>27.56){M<-rbinom(1,1,(0.129/8))}
x<-c(5, 15, 25, 35, 45, 55, 65, 75)
x2<-x^2
y<-c((.162/8), (.026/8), (.006/8), (.013/8), (.024/8), (.047/8), (.060/8), (0.129/8))
newx<-seq(0,80,0.5)
newx2<-newx^2
mod<-GAM(y ~ x+x2)
summary(mod)
plot(mod)
new.y <- predict(mod, list(x = newx, x2=newx2),type="response")
plot(x, y, pch = 16, xlab = "DIA", ylab = "p(mort)")
lines(newx, new.y)
|
/Longleaf Remeasurment/LLmortPlotCalc.R
|
no_license
|
alformanack/FL_Carbon
|
R
| false
| false
| 6,940
|
r
|
rm(list=ls())
setwd("C:/Users/Alicia/Documents/GitHub/FL_Carbon")
envdata<-read.csv("LongleafRemeasEnvData.csv", header=T, sep=",")
setwd("C:/Users/Alicia/Desktop/FL")
load("longleaf_remeas_start.Rdata")
load("longleaf_remeas_end.Rdata")
envdata$SOILGRIDS_C_AVG<-((envdata$SOC0_5_NAD/10)*(1/3))+((envdata$SOC5_15NAD/10)*(2/3))
envdata$SOILGRIDS_N_AVG<-((envdata$N0_5_NAD/100)*(1/3))+((envdata$N5_15_NAD/100)*(2/3))
envdata$SOILGRIDS_CN<-envdata$SOILGRIDS_C_AVG/envdata$SOILGRIDS_N_AVG
envdata$SOILGRIDS_CN_SCALE<-(envdata$SOILGRIDS_CN*6.212)+24.634
diameter.totals<-list()
age.totals<-list()
diameter.totals.end<-list()
age.totals.end<-list()
plot_data_end<-list()
plot_data_start<- list()
names(envdata)[16]<-"aridity"
names(envdata)[17]<-"TPA_start"
names(envdata)[18]<-"TPA_end"
a<-c(1, 3:23)
# growth<-(10^(1.774252 -0.007376*CN + 0.008805*CN*aridity -1.127642*aridity -0.034125*temp))*(0.5718692*age[i,j]^(-0.4281308))
for (s in a){
aridity<-envdata[s, 6]*0.0001
temp<-envdata[s, 11]
CN_scale<-envdata[s,15]
envdata[s,16]<-envdata[s, 6]*0.0001
plot_data_start[[s]]<-plots.start[[s]] %>%
filter(STATUSCD=="1") %>%
# mutate(TPA=ifelse(is.na(TPA_UNADJ), TPAGROW_UNADJ, TPA_UNADJ)) %>%
mutate(TPA_total=sum(round(TPA_UNADJ))) %>%
mutate(age=round((10^((1.774252/-0.5718692) + (-0.007376/-0.5718692)*CN_scale + (0.008805/-0.5718692)*CN_scale*aridity +
(-1.127642/-0.5718692)*aridity + (-0.034125/-0.5718692)*temp))*(DIA^(-1/-0.5718692))))
# mutate(age=round((10^(-7.143884 + 0.02970085*CN_scale - 0.03545391*CN_scale*aridity + 4.540362*aridity + 0.137402*temp))*(DIA^1.748652)))
# mutate(age=round((10^(-3.018915 + 1.2741*aridity))*(DIA^2.307065)))
envdata[s,17]<-unique(plot_data_start[[s]]$TPA_total)
for (h in 1:length(plot_data_start$DIA)){
diameter.totals[[s]]<-rep((plot_data_start[[s]]$DIA), round(plot_data_start[[s]]$TPA_UNADJ))
age.totals[[s]]<-rep((plot_data_start[[s]]$age), round(plot_data_start[[s]]$TPA_UNADJ))
}
# hist(diameter.totals[[s]], main = paste("Start plot", s), xlab = "Diameter (in)")
plot_data_end[[s]]<-plots.end[[s]] %>%
filter(STATUSCD=="1") %>%
# mutate(TPA=ifelse(is.na(TPA_UNADJ), TPAGROW_UNADJ, TPA_UNADJ)) %>%
mutate(TPA_total=sum(round(TPA_UNADJ))) %>%
mutate(age=round((10^((1.774252/-0.5718692) + (-0.007376/-0.5718692)*CN_scale + (0.008805/-0.5718692)*CN_scale*aridity +
(-1.127642/-0.5718692)*aridity + (-0.034125/-0.5718692)*temp))*(DIA^(-1/-0.5718692)))) %>%
# mutate(age=round((10^(-7.143884 + 0.02970085*CN_scale - 0.03545391*CN_scale*aridity + 4.540362*aridity + 0.137402*temp))*(DIA^1.748652))) %>%
mutate(TASB=((0.0725*((DIA*2.54)^2.5074))+(0.0016*((DIA*2.54)^3.0786))+(0.0214*((DIA*2.54)^2.0051)))*(round(TPA_UNADJ)))
# mutate(TASB=(0.041281*((DIA*2.54)^2.722214))*(round(TPA_UNADJ)))
envdata[s,18]<-unique(plot_data_end[[s]]$TPA_total)
for (h in 1:length(plot_data_end$DIA)){
diameter.totals.end[[s]]<-rep((plot_data_end[[s]]$DIA), round(plot_data_end[[s]]$TPA_UNADJ))
age.totals.end[[s]]<-rep((plot_data_end[[s]]$age), round(plot_data_end[[s]]$TPA_UNADJ))
}
}
diameters.new<-vector()
extra<-vector()
diff<-vector()
age.new<-vector()
age.start<-vector()
for (g in a){
if ((length(diameter.totals.end[[g]])-length(diameter.totals[[g]]))>0) {
diameters.new<-diameter.totals[[g]]
diff<-length(diameter.totals.end[[g]])-length(diameter.totals[[g]])
extra<-runif(diff, 0, 5)
diameter.totals[[g]]<-c(diameters.new, extra)
age.start<-age.totals[[g]]
# age.new<-round((10^(-9.35027 + 0.03729744*envdata[g,15] - 0.04455875*envdata[g,16]*envdata[s,15] + 5.373766*envdata[g,16] + 0.1510293*envdata[g,11]))*(extra^2.257168))
age.new<-round((10^((1.774252/-0.5718692) + (-0.007376/-0.5718692)*envdata[g,15] + (0.008805/-0.5718692)*envdata[g,15]*envdata[g,16]
+(-1.127642/-0.5718692)*envdata[g,16] + (-0.034125/-0.5718692)*envdata[g,11]))*(extra^(-1/-0.5718692)))
# age.new<-round((10^(-7.143884 + 0.02970085*envdata[g,15] - 0.03545391*envdata[g,16]*envdata[g,15] + 4.540362*envdata[g,16] + 0.137402*envdata[g,11]))*(extra^1.748652))
age.totals[[g]]<-c(age.start, age.new)
}
}
save(age.totals, file="C:/Users/Alicia/Documents/GitHub/FL_Carbon/Longleaf Remeasurment/longleafAgeTotals2.rdata")
save(age.totals.end, file="C:/Users/Alicia/Documents/GitHub/FL_Carbon/Longleaf Remeasurment/longleafAgeTotalsEnd2.rdata")
save(diameter.totals.end, file="C:/Users/Alicia/Documents/GitHub/FL_Carbon/Longleaf Remeasurment/longleafDIATotalsEnd2.rdata")
save(diameter.totals, file="C:/Users/Alicia/Documents/GitHub/FL_Carbon/Longleaf Remeasurment/longleafDIATotals2.rdata")
save(plot_data_end, file="C:/Users/Alicia/Documents/GitHub/FL_Carbon/Longleaf Remeasurment/longleafPlotEnd2.rdata")
save(plot_data_start, file="C:/Users/Alicia/Documents/GitHub/FL_Carbon/Longleaf Remeasurment/longleafPlotStart2.rdata")
save(diameter.totals, file="C:/Users/Alicia/Documents/GitHub/FL_Carbon/Longleaf Remeasurment/longleafDIATotals2.rdata")
# save(age.totals, file="C:/Users/Alicia/Documents/GitHub/FL_Carbon/Longleaf Remeasurment/longleafAgeTotals.rdata")
# save(age.totals.end, file="C:/Users/Alicia/Documents/GitHub/FL_Carbon/Longleaf Remeasurment/longleafAgeTotalsEnd.rdata")
# save(diameter.totals.end, file="C:/Users/Alicia/Documents/GitHub/FL_Carbon/Longleaf Remeasurment/longleafDIATotalsEnd.rdata")
# save(diameter.totals, file="C:/Users/Alicia/Documents/GitHub/FL_Carbon/Longleaf Remeasurment/longleafDIATotals.rdata")
# save(plot_data_end, file="C:/Users/Alicia/Documents/GitHub/FL_Carbon/Longleaf Remeasurment/longleafPlotEnd.rdata")
# save(plot_data_start, file="C:/Users/Alicia/Documents/GitHub/FL_Carbon/Longleaf Remeasurment/longleafPlotStart.rdata")
# save(diameter.totals, file="C:/Users/Alicia/Documents/GitHub/FL_Carbon/Longleaf Remeasurment/longleafDIATotals.rdata")
write.csv(envdata, file="C:/Users/Alicia/Documents/GitHub/FL_Carbon/Longleaf Remeasurment/longleafEnvData2.csv")
# if (Diameter[i,j]<=3.94){ M<- rbinom(1,1,(.162/8))}
# else if (Diameter[i,j]>3.94 & Diameter[i,j]<=7.97){M<-rbinom(1,1,(.026/8))}
# else if (Diameter[i,j]>7.97 & Diameter[i,j]<=11.81){M<-rbinom(1,1,(.006/8))}
# else if (Diameter[i,j]>11.81 & Diameter[i,j]<=15.75){M<-rbinom(1,1,(.013/8))}
# else if (Diameter[i,j]>15.75 & Diameter[i,j]<=19.69){M<-rbinom(1,1,(.024/8))}
# else if (Diameter[i,j]>19.69 & Diameter[i,j]<=23.62){M<-rbinom(1,1,(.047/8))}
# else if (Diameter[i,j]>23.62 & Diameter[i,j]<=27.56){M<-rbinom(1,1,(.060/8))}
# else if (Diameter[i,j]>27.56){M<-rbinom(1,1,(0.129/8))}
x<-c(5, 15, 25, 35, 45, 55, 65, 75)
x2<-x^2
y<-c((.162/8), (.026/8), (.006/8), (.013/8), (.024/8), (.047/8), (.060/8), (0.129/8))
newx<-seq(0,80,0.5)
newx2<-newx^2
mod<-GAM(y ~ x+x2)
summary(mod)
plot(mod)
new.y <- predict(mod, list(x = newx, x2=newx2),type="response")
plot(x, y, pch = 16, xlab = "DIA", ylab = "p(mort)")
lines(newx, new.y)
|
#' Factor scatter chart
#'
#' Use this function to create scatter plot with both x-axis and y-axis containing numeric variables
#' and with an optional color variable which is a factor variable
#'
#' @author Vedha Viyash
#'
#' @param plot_data This is the data.frame which will be used for the plot
#' @param x_name The column name of the variable in the x-axis, It has to be a numeric variable
#' @param y_name The column name of the variable in the y-axis, It has to be a numeric variable
#' @param color_name The column name of the variable in the color axis, It has to be a factor variable
#' @param static_color string. If the colour_name is not specified, this color will be filled for the plot
#' @param hover_name The column name of the variable that will be present in the hovers, can also be a normal vector.
#' @param plot_height num. The height of the plot, If not provided the plot will take the whole area available in the UI
#' @param plot_width num. The height of the plot, If not provided the plot will take the whole area available in the UI
#' @param show_legend boolean. This will let you display or hide the grids in the x-axis, default shown
#' @param show_x_axis_grid boolean. This will let you display or hide the grids in the x-axis, default hidden
#' @param show_y_axis_grid boolean. This will let you display or hide the grids in the y-axis, default hidden
#' @param marker_size num. This is the size of the markers in the scatter plot, defaults to marker_default_size from constants.R
#' @param plot_title string. This is the title of the plot, defauts to NULL
#' @param x_axis_title string. This is the x-axis title of the plot, defaults to NULL
#' @param y_axis_title string. This is the y-axis title of the plot, defaults to NULL
#' @return Returns a plotly plot object which can be rendered as HTML
#'
#' @examples
#'
#' ## You can plot by just using a data.frame, x-axis and y-axis.
#' numeric_scatter_chart(iris, Sepal.Length, Petal.Length)
#'
#' ## You can also change the color of the plot by specifying `static_color`
#' numeric_scatter_chart(iris, Sepal.Length, Petal.Length, static_color = "#f55b96")
#'
#' ## You can pass a third variable to get a plot with the color axis
#' numeric_scatter_chart(iris, Sepal.Length, Petal.Length, color_name = Species)
#'
#' @import plotly
#' @importFrom magrittr %>%
#' @export
numeric_scatter_chart <- function(plot_data, x_name, y_name, color_name = NULL, static_color = "#4dbd5b",
hover_name = NULL, plot_height = NULL, plot_width = NULL,
show_legend = TRUE, show_x_axis_grid = FALSE, show_y_axis_grid = FALSE,
marker_size = marker_default_size, plot_title = NULL,
x_axis_title = NULL, y_axis_title = NULL) {
x_name <- rlang::enquo(x_name)
x_value <- plot_data[[rlang::quo_name(x_name)]]
x_min <- min(x_value) - stats::sd(x_value)/2
x_max <- max(x_value) + stats::sd(x_value)/2
y_name <- rlang::enquo(y_name)
y_value <- plot_data[[rlang::quo_name(y_name)]]
color_name <- rlang::enquo(color_name)
hover_name <- rlang::enquo(hover_name)
if (rlang::quo_is_null(color_name)) {
show_legend <- FALSE
if (rlang::quo_is_null(hover_name)) {
hover_value <- paste0(x_value, ", ", y_value)
} else {
hover_name_in_data <- plot_data[[rlang::quo_name(hover_name)]]
if (is.null(hover_name_in_data)) {
hover_value <- hover_name
} else {
hover_value <- hover_name_in_data
}
}
plot <- plot_ly(
plot_data,
x = x_value,
y = y_value,
color = static_color,
colors = static_color,
type = 'scatter',
height = plot_height,
width = plot_width,
mode = 'markers',
marker = list(size = marker_size),
hoverinfo = 'text',
hovertext = hover_value,
textposition = 'top center',
cliponaxis = FALSE
) %>% layout(
title = plot_title, showlegend = show_legend,
xaxis = list(title = x_axis_title, showgrid = show_x_axis_grid),
yaxis = list(title = y_axis_title, showgrid = show_y_axis_grid),
legend = list(y = 0.5, yanchor = "center")
)
return(plot)
}
color_value <- plot_data[[rlang::quo_name(color_name)]]
colors_value <- discrete_color_palette[1:length(unique(color_value))]
if (rlang::quo_is_null(hover_name)) {
hover_value <- paste0(
"X: ", x_value, "<br>Y: ", y_value,
"<br>Color: ", color_value
)
} else {
hover_name_in_data <- plot_data[[rlang::quo_name(hover_name)]]
if (is.null(hover_name_in_data)) {
hover_value <- hover_name
} else {
hover_value <- hover_name_in_data
}
}
plot <- plot_ly(
plot_data,
x = x_value,
y = y_value,
color = color_value,
colors = colors_value,
type = 'scatter',
height = plot_height,
width = plot_width,
mode = 'markers',
marker = list(size = marker_size),
hoverinfo = 'text',
hovertext = hover_value,
textposition = 'top center',
cliponaxis = FALSE
) %>% layout(
title = plot_title, showlegend = show_legend,
xaxis = list(title = x_axis_title, showgrid = show_x_axis_grid),
yaxis = list(title = y_axis_title, showgrid = show_y_axis_grid),
legend = list(y = 0.5, yanchor = "center")
)
return(plot)
}
|
/R/numeric_scatter_chart.R
|
no_license
|
vedhav/tidycharts
|
R
| false
| false
| 5,485
|
r
|
#' Factor scatter chart
#'
#' Use this function to create scatter plot with both x-axis and y-axis containing numeric variables
#' and with an optional color variable which is a factor variable
#'
#' @author Vedha Viyash
#'
#' @param plot_data This is the data.frame which will be used for the plot
#' @param x_name The column name of the variable in the x-axis, It has to be a numeric variable
#' @param y_name The column name of the variable in the y-axis, It has to be a numeric variable
#' @param color_name The column name of the variable in the color axis, It has to be a factor variable
#' @param static_color string. If the colour_name is not specified, this color will be filled for the plot
#' @param hover_name The column name of the variable that will be present in the hovers, can also be a normal vector.
#' @param plot_height num. The height of the plot, If not provided the plot will take the whole area available in the UI
#' @param plot_width num. The height of the plot, If not provided the plot will take the whole area available in the UI
#' @param show_legend boolean. This will let you display or hide the grids in the x-axis, default shown
#' @param show_x_axis_grid boolean. This will let you display or hide the grids in the x-axis, default hidden
#' @param show_y_axis_grid boolean. This will let you display or hide the grids in the y-axis, default hidden
#' @param marker_size num. This is the size of the markers in the scatter plot, defaults to marker_default_size from constants.R
#' @param plot_title string. This is the title of the plot, defauts to NULL
#' @param x_axis_title string. This is the x-axis title of the plot, defaults to NULL
#' @param y_axis_title string. This is the y-axis title of the plot, defaults to NULL
#' @return Returns a plotly plot object which can be rendered as HTML
#'
#' @examples
#'
#' ## You can plot by just using a data.frame, x-axis and y-axis.
#' numeric_scatter_chart(iris, Sepal.Length, Petal.Length)
#'
#' ## You can also change the color of the plot by specifying `static_color`
#' numeric_scatter_chart(iris, Sepal.Length, Petal.Length, static_color = "#f55b96")
#'
#' ## You can pass a third variable to get a plot with the color axis
#' numeric_scatter_chart(iris, Sepal.Length, Petal.Length, color_name = Species)
#'
#' @import plotly
#' @importFrom magrittr %>%
#' @export
numeric_scatter_chart <- function(plot_data, x_name, y_name, color_name = NULL, static_color = "#4dbd5b",
hover_name = NULL, plot_height = NULL, plot_width = NULL,
show_legend = TRUE, show_x_axis_grid = FALSE, show_y_axis_grid = FALSE,
marker_size = marker_default_size, plot_title = NULL,
x_axis_title = NULL, y_axis_title = NULL) {
x_name <- rlang::enquo(x_name)
x_value <- plot_data[[rlang::quo_name(x_name)]]
x_min <- min(x_value) - stats::sd(x_value)/2
x_max <- max(x_value) + stats::sd(x_value)/2
y_name <- rlang::enquo(y_name)
y_value <- plot_data[[rlang::quo_name(y_name)]]
color_name <- rlang::enquo(color_name)
hover_name <- rlang::enquo(hover_name)
if (rlang::quo_is_null(color_name)) {
show_legend <- FALSE
if (rlang::quo_is_null(hover_name)) {
hover_value <- paste0(x_value, ", ", y_value)
} else {
hover_name_in_data <- plot_data[[rlang::quo_name(hover_name)]]
if (is.null(hover_name_in_data)) {
hover_value <- hover_name
} else {
hover_value <- hover_name_in_data
}
}
plot <- plot_ly(
plot_data,
x = x_value,
y = y_value,
color = static_color,
colors = static_color,
type = 'scatter',
height = plot_height,
width = plot_width,
mode = 'markers',
marker = list(size = marker_size),
hoverinfo = 'text',
hovertext = hover_value,
textposition = 'top center',
cliponaxis = FALSE
) %>% layout(
title = plot_title, showlegend = show_legend,
xaxis = list(title = x_axis_title, showgrid = show_x_axis_grid),
yaxis = list(title = y_axis_title, showgrid = show_y_axis_grid),
legend = list(y = 0.5, yanchor = "center")
)
return(plot)
}
color_value <- plot_data[[rlang::quo_name(color_name)]]
colors_value <- discrete_color_palette[1:length(unique(color_value))]
if (rlang::quo_is_null(hover_name)) {
hover_value <- paste0(
"X: ", x_value, "<br>Y: ", y_value,
"<br>Color: ", color_value
)
} else {
hover_name_in_data <- plot_data[[rlang::quo_name(hover_name)]]
if (is.null(hover_name_in_data)) {
hover_value <- hover_name
} else {
hover_value <- hover_name_in_data
}
}
plot <- plot_ly(
plot_data,
x = x_value,
y = y_value,
color = color_value,
colors = colors_value,
type = 'scatter',
height = plot_height,
width = plot_width,
mode = 'markers',
marker = list(size = marker_size),
hoverinfo = 'text',
hovertext = hover_value,
textposition = 'top center',
cliponaxis = FALSE
) %>% layout(
title = plot_title, showlegend = show_legend,
xaxis = list(title = x_axis_title, showgrid = show_x_axis_grid),
yaxis = list(title = y_axis_title, showgrid = show_y_axis_grid),
legend = list(y = 0.5, yanchor = "center")
)
return(plot)
}
|
library(matlab)
par(family="HiraMaruProN-W4")
for ( i in 1001:1100 ) {
png( paste ("v",i,".png",sep=""),400,400)
par(mai=rep(0,4))
qqplot(0:1500/1500,runif(1000),xlim=0:1,ylim=0:1,type="s",lwd=3,xaxs="i",yaxs="i")
abline(0,1)
points(meshgrid(0:4/4,0:4/4) , pch=3,cex=3,col="gray70")
points(meshgrid(0:20/20,0:20/20) , pch=3,cex=.7, col="gray70")
points(meshgrid(0:100/100,0:100/100) , pch=3,cex=.1 , col="gray70")
dev.off()
}
par(mai=c(1,1,1,1))
qqplot(0:300/300,a<-replicate( 1e6, max(abs(sort(runif(1000))-0:999/999)) )*10)
sum(a>0.51)
binom.test(1204,1e5)
## ここから実行結果
> sum(a>0.5)
[1] 11798
> sum(a>0.51)
[1] 9609
> binom.test(9609,1e6)
Exact binomial test
data: 9609 and 1e+06
number of successes = 9609, number of trials = 1e+06, p-value < 2.2e-16
alternative hypothesis: true probability of success is not equal to 0.5
95 percent confidence interval:
0.009418720 0.009802128
sample estimates:
probability of success
0.009609
>
> sum(a>0.6)
[1] 1289
> sum(a>0.7)
[1] 76
> max(a)
[1] 0.8689204
>
|
/unif1000qqplots.R
|
no_license
|
tulamili/r-snippets
|
R
| false
| false
| 1,056
|
r
|
library(matlab)
par(family="HiraMaruProN-W4")
for ( i in 1001:1100 ) {
png( paste ("v",i,".png",sep=""),400,400)
par(mai=rep(0,4))
qqplot(0:1500/1500,runif(1000),xlim=0:1,ylim=0:1,type="s",lwd=3,xaxs="i",yaxs="i")
abline(0,1)
points(meshgrid(0:4/4,0:4/4) , pch=3,cex=3,col="gray70")
points(meshgrid(0:20/20,0:20/20) , pch=3,cex=.7, col="gray70")
points(meshgrid(0:100/100,0:100/100) , pch=3,cex=.1 , col="gray70")
dev.off()
}
par(mai=c(1,1,1,1))
qqplot(0:300/300,a<-replicate( 1e6, max(abs(sort(runif(1000))-0:999/999)) )*10)
sum(a>0.51)
binom.test(1204,1e5)
## ここから実行結果
> sum(a>0.5)
[1] 11798
> sum(a>0.51)
[1] 9609
> binom.test(9609,1e6)
Exact binomial test
data: 9609 and 1e+06
number of successes = 9609, number of trials = 1e+06, p-value < 2.2e-16
alternative hypothesis: true probability of success is not equal to 0.5
95 percent confidence interval:
0.009418720 0.009802128
sample estimates:
probability of success
0.009609
>
> sum(a>0.6)
[1] 1289
> sum(a>0.7)
[1] 76
> max(a)
[1] 0.8689204
>
|
% Generated by roxygen2 (4.1.0.9001): do not edit by hand
% Please edit documentation in R/manipulateIPA.R
\name{manipulateIPA}
\alias{manipulateIPA}
\title{Perform interactive pathway analysis on rna and metabolite datasets}
\usage{
manipulateIPA(geneResult = NULL, metaboliteResult = NULL, method,
countThreshold = 2, p.adjustMethod = "BH", pathwayType = c("All",
"KEGG", "SMPDB", "Reactome"))
}
\arguments{
\item{geneResult}{Dataframe with transcript IDs as rownames, transcript signifigance values (between 0 and 1) as the first column, and optionally transcript fold change values as the second column. Defaults to NULL}
\item{metaboliteResult}{Dataframe with metabolite IDs as rownames, metabolite signifigance values (between 0 and 1) as the first column, and optionally metabolite fold change values as the second column. Defaults to NULL}
\item{method}{method Method of pathway analysis to perform. Options include "fisher.exact", "EASE", "mean.significance", and "hypergeometric.test". Defaults to "fisher.exact.}
\item{countThreshold}{Integer indicating the minimum number of pathway members to require for a valid result. Defaults to 2.}
\item{p.adjustMethod}{Character indicating the type of multiple hypothesis correction to perform. See ?p.adjust() for details}
\item{pathwayType}{Pathway database to use for pathway analysis. Options include "KEGG", "SMPDB", "Reactome", or "All". Defaults to "All"}
}
\value{
Returns an interactive dataplot of top 5 significant pathways with ticker to adjust FDR thresholds
}
\description{
This takes dataframes with rna and metabolite info and performs interactive pathway analysis
}
\note{
ipa is powered by the following open source databases. Commercial use and/or redistribution may restricted. Please see respective terms of use pages and citations for more details.
KEGG
Terms of Use: http://www.kegg.jp/kegg/legal.html
Citations:
Kanehisa, M., Goto, S., Sato, Y., Kawashima, M., Furumichi, M., and Tanabe, M. Data, information, knowledge and principle: back to metabolism in KEGG. Nucleic Acids Res. 42, D199-D205 (2014).
Kanehisa, M. and Goto, S.; KEGG: Kyoto Encyclopedia of Genes and Genomes. Nucleic Acids Res. 28, 27-30 (2000).
SMPDB
Terms of Use: http://smpdb.ca/about
Citations:
Wishart DS, Frolkis A, Knox C, et al. SMPDB: The Small Molecule Pathway Database. Nucleic Acids Res. 2010 Jan;38(Database issue):D480-7.
Jewison T, Su Y, Disfany FM, et al. SMPDB 2.0: Big Improvements to the Small Molecule Pathway Database Nucleic Acids Res. 2014 Jan;42(Database issue):D478-84.
Reactome
Terms of Use: http://www.reactome.org/pages/about/license-agreement/
Citations:
Croft et al. 2014 PMID: 24243840
Milacic et al. 2012 PMID:24213504
}
\examples{
data(kData)
data(rData)
## Not run: manipulateIPA(kData, rData, method="fisher.exact") ## End(Not run)
}
\keyword{IPA}
|
/man/manipulateIPA.Rd
|
no_license
|
rramaker/IntegratedPathTools
|
R
| false
| false
| 2,861
|
rd
|
% Generated by roxygen2 (4.1.0.9001): do not edit by hand
% Please edit documentation in R/manipulateIPA.R
\name{manipulateIPA}
\alias{manipulateIPA}
\title{Perform interactive pathway analysis on rna and metabolite datasets}
\usage{
manipulateIPA(geneResult = NULL, metaboliteResult = NULL, method,
countThreshold = 2, p.adjustMethod = "BH", pathwayType = c("All",
"KEGG", "SMPDB", "Reactome"))
}
\arguments{
\item{geneResult}{Dataframe with transcript IDs as rownames, transcript signifigance values (between 0 and 1) as the first column, and optionally transcript fold change values as the second column. Defaults to NULL}
\item{metaboliteResult}{Dataframe with metabolite IDs as rownames, metabolite signifigance values (between 0 and 1) as the first column, and optionally metabolite fold change values as the second column. Defaults to NULL}
\item{method}{method Method of pathway analysis to perform. Options include "fisher.exact", "EASE", "mean.significance", and "hypergeometric.test". Defaults to "fisher.exact.}
\item{countThreshold}{Integer indicating the minimum number of pathway members to require for a valid result. Defaults to 2.}
\item{p.adjustMethod}{Character indicating the type of multiple hypothesis correction to perform. See ?p.adjust() for details}
\item{pathwayType}{Pathway database to use for pathway analysis. Options include "KEGG", "SMPDB", "Reactome", or "All". Defaults to "All"}
}
\value{
Returns an interactive dataplot of top 5 significant pathways with ticker to adjust FDR thresholds
}
\description{
This takes dataframes with rna and metabolite info and performs interactive pathway analysis
}
\note{
ipa is powered by the following open source databases. Commercial use and/or redistribution may restricted. Please see respective terms of use pages and citations for more details.
KEGG
Terms of Use: http://www.kegg.jp/kegg/legal.html
Citations:
Kanehisa, M., Goto, S., Sato, Y., Kawashima, M., Furumichi, M., and Tanabe, M. Data, information, knowledge and principle: back to metabolism in KEGG. Nucleic Acids Res. 42, D199-D205 (2014).
Kanehisa, M. and Goto, S.; KEGG: Kyoto Encyclopedia of Genes and Genomes. Nucleic Acids Res. 28, 27-30 (2000).
SMPDB
Terms of Use: http://smpdb.ca/about
Citations:
Wishart DS, Frolkis A, Knox C, et al. SMPDB: The Small Molecule Pathway Database. Nucleic Acids Res. 2010 Jan;38(Database issue):D480-7.
Jewison T, Su Y, Disfany FM, et al. SMPDB 2.0: Big Improvements to the Small Molecule Pathway Database Nucleic Acids Res. 2014 Jan;42(Database issue):D478-84.
Reactome
Terms of Use: http://www.reactome.org/pages/about/license-agreement/
Citations:
Croft et al. 2014 PMID: 24243840
Milacic et al. 2012 PMID:24213504
}
\examples{
data(kData)
data(rData)
## Not run: manipulateIPA(kData, rData, method="fisher.exact") ## End(Not run)
}
\keyword{IPA}
|
pwr<-read.csv("household_power_consumption.txt", sep = ";")
date1 <- as.Date(pwr$Date, "%d/%m/%Y")
DATE1 <- as.Date("01/02/2007", "%d/%m/%Y")
DATE2 <- as.Date("02/02/2007", "%d/%m/%Y")
spwr <- subset(pwr, date1>=DATE1 & date1<=DATE2)
spwr[,3]<-as.numeric(as.character(spwr[,3]))
z = c(1,1441,2880)
zl = c("Thu","Fri","Sat")
png(filename = "plot2.png", width = 480, height = 480)
with(spwr, plot(Global_active_power,type="l",ylab="Global Active Power (kilowatts)",xlab="",xaxt="n"))
axis(1, at=z,labels=zl)
dev.off()
|
/plot2.R
|
no_license
|
burubaxair/ExData_Plotting1
|
R
| false
| false
| 515
|
r
|
pwr<-read.csv("household_power_consumption.txt", sep = ";")
date1 <- as.Date(pwr$Date, "%d/%m/%Y")
DATE1 <- as.Date("01/02/2007", "%d/%m/%Y")
DATE2 <- as.Date("02/02/2007", "%d/%m/%Y")
spwr <- subset(pwr, date1>=DATE1 & date1<=DATE2)
spwr[,3]<-as.numeric(as.character(spwr[,3]))
z = c(1,1441,2880)
zl = c("Thu","Fri","Sat")
png(filename = "plot2.png", width = 480, height = 480)
with(spwr, plot(Global_active_power,type="l",ylab="Global Active Power (kilowatts)",xlab="",xaxt="n"))
axis(1, at=z,labels=zl)
dev.off()
|
#' A model comparison toolkit.
#' \code{LCTMcompare}
#'
#' The function LCTMcompare gives a summary of comparison between fitted LCTM models.
#'
#' @param modelA is the output from hlme() R model or model is the output of SASmodelbuilder(oe, os, op, of) passed through it
#' @param modelB the model to be compared which is the output from hlme() R model or model is the output of SASmodelbuilder(oe, os, op, of) passed through it
#' @return A selection of model adequacy tests, including the APPA (average posterior probability of assignment), the OCC (odds of correct classification), entropy $E$, Relative entropy ($E_k$),
#' @references \url{https://bmjopen.bmj.com/content/8/7/e020683}
#' @examples
#' data(bmi_long, package='LCTMtools')
#' require(lcmm)
#' set.seed(999)
#' data(bmi_long, package = 'LCTMtools' )
#' # Use the hlme function from the 'lcmm' R package to fit a 2 class latent class trajectory model
#' model2classes <- lcmm::hlme(fixed = bmi ~ age + I(age^2),
#' mixture= ~ age,
#' random = ~ age,
#' ng = 2,
#' nwg = TRUE,
#' subject = "id",
#' data = bmi_long[1:500, ] )
#' # Compute model adequacy measures
#' LCTMtoolkit(model2classes)
#' # Compare with a 3 class model
#' model3classes <- lcmm::hlme(fixed = bmi ~ age + I(age^2),
#' mixture= ~ age,
#' random = ~ age,
#' ng = 3,
#' nwg = TRUE,
#' subject = "id",
#' data = bmi_long[1:500, ] )
#' LCTMtoolkit(model3classes)
#' LCTMcompare(model2classes, model3classes)
#' @export
LCTMcompare <- function(modelA, modelB) {
model <- modelA
n <- nrow(model$pprob)
K <- ncol(model$pprob) - 2
p <- model$pprob[, c(paste0("prob", 1:K, sep = ""))]
if (class(model$call) == "SAS") {
PI <- os$PI/100
} else {
PI <- exp(c(model$best[1:(K - 1)], 0))/(sum(exp(c(model$best[1:(K - 1)],
0))))
}
output1 <- t(data.frame(Entropy = entropy(p), Relative_entropy = relative_entropy(p),
BIC = model$BIC, AIC = model$AIC))
model <- modelB
n <- nrow(model$pprob)
K <- ncol(model$pprob) - 2
p <- model$pprob[, c(paste0("prob", 1:K, sep = ""))]
if (class(model$call) == "SAS") {
PI <- os$PI/100
} else {
PI <- exp(c(model$best[1:(K - 1)], 0))/(sum(exp(c(model$best[1:(K - 1)],
0))))
}
output2 <- t(data.frame(Entropy = entropy(p), Relative_entropy = relative_entropy(p),
BIC = model$BIC, AIC = model$AIC))
output1 <- round(output1, 3)
output2 <- round(output2, 3)
Recommendation <- c("Close to zero", "Close to 1", "-", "-")
output <- data.frame(output1, output2, Recommendation)
return(output)
}
|
/R/LCTMcompare.R
|
no_license
|
chandryou/LCTMtools
|
R
| false
| false
| 2,882
|
r
|
#' A model comparison toolkit.
#' \code{LCTMcompare}
#'
#' The function LCTMcompare gives a summary of comparison between fitted LCTM models.
#'
#' @param modelA is the output from hlme() R model or model is the output of SASmodelbuilder(oe, os, op, of) passed through it
#' @param modelB the model to be compared which is the output from hlme() R model or model is the output of SASmodelbuilder(oe, os, op, of) passed through it
#' @return A selection of model adequacy tests, including the APPA (average posterior probability of assignment), the OCC (odds of correct classification), entropy $E$, Relative entropy ($E_k$),
#' @references \url{https://bmjopen.bmj.com/content/8/7/e020683}
#' @examples
#' data(bmi_long, package='LCTMtools')
#' require(lcmm)
#' set.seed(999)
#' data(bmi_long, package = 'LCTMtools' )
#' # Use the hlme function from the 'lcmm' R package to fit a 2 class latent class trajectory model
#' model2classes <- lcmm::hlme(fixed = bmi ~ age + I(age^2),
#' mixture= ~ age,
#' random = ~ age,
#' ng = 2,
#' nwg = TRUE,
#' subject = "id",
#' data = bmi_long[1:500, ] )
#' # Compute model adequacy measures
#' LCTMtoolkit(model2classes)
#' # Compare with a 3 class model
#' model3classes <- lcmm::hlme(fixed = bmi ~ age + I(age^2),
#' mixture= ~ age,
#' random = ~ age,
#' ng = 3,
#' nwg = TRUE,
#' subject = "id",
#' data = bmi_long[1:500, ] )
#' LCTMtoolkit(model3classes)
#' LCTMcompare(model2classes, model3classes)
#' @export
LCTMcompare <- function(modelA, modelB) {
model <- modelA
n <- nrow(model$pprob)
K <- ncol(model$pprob) - 2
p <- model$pprob[, c(paste0("prob", 1:K, sep = ""))]
if (class(model$call) == "SAS") {
PI <- os$PI/100
} else {
PI <- exp(c(model$best[1:(K - 1)], 0))/(sum(exp(c(model$best[1:(K - 1)],
0))))
}
output1 <- t(data.frame(Entropy = entropy(p), Relative_entropy = relative_entropy(p),
BIC = model$BIC, AIC = model$AIC))
model <- modelB
n <- nrow(model$pprob)
K <- ncol(model$pprob) - 2
p <- model$pprob[, c(paste0("prob", 1:K, sep = ""))]
if (class(model$call) == "SAS") {
PI <- os$PI/100
} else {
PI <- exp(c(model$best[1:(K - 1)], 0))/(sum(exp(c(model$best[1:(K - 1)],
0))))
}
output2 <- t(data.frame(Entropy = entropy(p), Relative_entropy = relative_entropy(p),
BIC = model$BIC, AIC = model$AIC))
output1 <- round(output1, 3)
output2 <- round(output2, 3)
Recommendation <- c("Close to zero", "Close to 1", "-", "-")
output <- data.frame(output1, output2, Recommendation)
return(output)
}
|
testlist <- list(Rs = numeric(0), atmp = numeric(0), relh = c(-1.72131968218895e+83, -7.88781071482505e+93, 1.0823131123826e-105, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), temp = c(8.5728629954997e-312, 1.56898424065867e+82, 8.96970809549085e-158, -1.3258495253834e-113, 2.79620616433656e-119, -6.80033518839696e+41, 2.68298522855314e-211, 1444042902784.06, 6.68889884134308e+51, -4.05003163986346e-308, -3.52601820453991e+43, -1.49815227045093e+197, -2.61605817623304e+76, -1.18078903777423e-90, 1.86807199752012e+112, -5.58551357556946e+160, 2.00994342527714e-162, 1.81541609400943e-79, 7.89363005545926e+139, 2.3317908961407e-93, 2.16562581831091e+161))
result <- do.call(meteor:::ET0_Makkink,testlist)
str(result)
|
/meteor/inst/testfiles/ET0_Makkink/AFL_ET0_Makkink/ET0_Makkink_valgrind_files/1615846100-test.R
|
no_license
|
akhikolla/updatedatatype-list3
|
R
| false
| false
| 782
|
r
|
testlist <- list(Rs = numeric(0), atmp = numeric(0), relh = c(-1.72131968218895e+83, -7.88781071482505e+93, 1.0823131123826e-105, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), temp = c(8.5728629954997e-312, 1.56898424065867e+82, 8.96970809549085e-158, -1.3258495253834e-113, 2.79620616433656e-119, -6.80033518839696e+41, 2.68298522855314e-211, 1444042902784.06, 6.68889884134308e+51, -4.05003163986346e-308, -3.52601820453991e+43, -1.49815227045093e+197, -2.61605817623304e+76, -1.18078903777423e-90, 1.86807199752012e+112, -5.58551357556946e+160, 2.00994342527714e-162, 1.81541609400943e-79, 7.89363005545926e+139, 2.3317908961407e-93, 2.16562581831091e+161))
result <- do.call(meteor:::ET0_Makkink,testlist)
str(result)
|
Age.labels <- c('Total', '0', '1-4', '5-9', '10-14','15-19','20-24','25-29',
'30-34','35-39','40-44','45-49','50-54','55-59','60-64','65-69',
'70-74','75-79','80-84')
Category.labels <- c(
'Smoking related cancer',
'Non-Smoking related cancer',
'Cardiovascular',
'Respiratory-Infectious',
'Respiratory-Non-infectious',
'External',
'Other')
sd.frommx <- function(mx,sex='f'){
i.openage <- length(mx)
OPENAGE <- i.openage - 1
RADIX <- 1
ax <- mx * 0 + .5
ax[1] <- AKm02a0(m0 = mx[1], sex = sex)
qx <- mx / (1 + (1 - ax) * mx)
qx[i.openage] <- ifelse(is.na(qx[i.openage]), NA, 1)
ax[i.openage] <- 1 / mx[i.openage]
px <- 1 - qx
px[is.nan(px)] <- 0
lx <- c(RADIX, RADIX * cumprod(px[1:OPENAGE]))
dx <- lx * qx
Lx <- lx - (1 - ax) * dx
Lx[i.openage ] <- lx[i.openage ] * ax[i.openage ]
Tx <- c(rev(cumsum(rev(Lx[1:OPENAGE]))),0) + Lx[i.openage]
ex <- Tx / lx
age <- 0:(i.openage-1)
vx <- sum(dx*(age+ax-ex[1L])^2)
sd <- sqrt(vx)
return(sd)
}
sdfrommxc <- function(mxcvec = c(m1),sex){
dim(mxcvec) <- c(111,length(mxcvec)/111)
mx <- rowSums(mxcvec)
sd.frommx(mx,sex)
}
COD.decomp <- function(.SD){
sex <- .SD$Sex[1]
}
AKm02a0 <- function(m0, sex = "m"){
sex <- rep(sex, length(m0))
ifelse(sex == "m",
ifelse(m0 < .0230, {0.14929 - 1.99545 * m0},
ifelse(m0 < 0.08307, {0.02832 + 3.26201 * m0},.29915)),
# f
ifelse(m0 < 0.01724, {0.14903 - 2.05527 * m0},
ifelse(m0 < 0.06891, {0.04667 + 3.88089 * m0}, 0.31411))
)
}
# Decomposition function based on Horiuchi etal 2008,
Decomp <-function (func, rates1, rates2, N, ...) {
y1 <- func(rates1, ...)
y2 <- func(rates2, ...)
d <- rates2 - rates1
n <- length(rates1)
delta <- d/N
x <- rates1 + d * matrix(rep(0.5:(N - 0.5)/N, length(rates1)),
byrow = TRUE, ncol = N)
cc <- matrix(0, nrow = n, ncol = N)
for (j in 1:N) {
for (i in 1:n) {
z <- rep(0, n)
z[i] <- delta[i]/2
cc[i, j] <- func((x[, j] + z), ...) - func((x[, j] -
z), ...)
}
}
return(rowSums(cc))
}
my_reshape.function <- function(i=names(x)[1],DecompIn=x){
Z <- DecompIn[[i]]
Z.names <- names(Z)
ZZ <- lapply(Z.names, function(ii,Z,i){
Z2 <- Z[[as.character(ii)]]
XX <- cbind(Country=as.integer(i),year=as.integer(ii),age=0:110,Z2)
XX
}, Z = Z,i=i)
# now stick it together
D <- as.data.frame(do.call(rbind, ZZ))
D <- data.table(D)
#DT <- as.data.table(melt(D,
# id.vars = list("state","year","age"),
# variable.name = "Cause"))
D
}
|
/R/Sensitivity Analysis/Functions_Sensitivity.R
|
permissive
|
jmaburto/Lifespan-inequality-Denmark
|
R
| false
| false
| 2,886
|
r
|
Age.labels <- c('Total', '0', '1-4', '5-9', '10-14','15-19','20-24','25-29',
'30-34','35-39','40-44','45-49','50-54','55-59','60-64','65-69',
'70-74','75-79','80-84')
Category.labels <- c(
'Smoking related cancer',
'Non-Smoking related cancer',
'Cardiovascular',
'Respiratory-Infectious',
'Respiratory-Non-infectious',
'External',
'Other')
sd.frommx <- function(mx,sex='f'){
i.openage <- length(mx)
OPENAGE <- i.openage - 1
RADIX <- 1
ax <- mx * 0 + .5
ax[1] <- AKm02a0(m0 = mx[1], sex = sex)
qx <- mx / (1 + (1 - ax) * mx)
qx[i.openage] <- ifelse(is.na(qx[i.openage]), NA, 1)
ax[i.openage] <- 1 / mx[i.openage]
px <- 1 - qx
px[is.nan(px)] <- 0
lx <- c(RADIX, RADIX * cumprod(px[1:OPENAGE]))
dx <- lx * qx
Lx <- lx - (1 - ax) * dx
Lx[i.openage ] <- lx[i.openage ] * ax[i.openage ]
Tx <- c(rev(cumsum(rev(Lx[1:OPENAGE]))),0) + Lx[i.openage]
ex <- Tx / lx
age <- 0:(i.openage-1)
vx <- sum(dx*(age+ax-ex[1L])^2)
sd <- sqrt(vx)
return(sd)
}
sdfrommxc <- function(mxcvec = c(m1),sex){
dim(mxcvec) <- c(111,length(mxcvec)/111)
mx <- rowSums(mxcvec)
sd.frommx(mx,sex)
}
COD.decomp <- function(.SD){
sex <- .SD$Sex[1]
}
AKm02a0 <- function(m0, sex = "m"){
sex <- rep(sex, length(m0))
ifelse(sex == "m",
ifelse(m0 < .0230, {0.14929 - 1.99545 * m0},
ifelse(m0 < 0.08307, {0.02832 + 3.26201 * m0},.29915)),
# f
ifelse(m0 < 0.01724, {0.14903 - 2.05527 * m0},
ifelse(m0 < 0.06891, {0.04667 + 3.88089 * m0}, 0.31411))
)
}
# Decomposition function based on Horiuchi etal 2008,
Decomp <-function (func, rates1, rates2, N, ...) {
y1 <- func(rates1, ...)
y2 <- func(rates2, ...)
d <- rates2 - rates1
n <- length(rates1)
delta <- d/N
x <- rates1 + d * matrix(rep(0.5:(N - 0.5)/N, length(rates1)),
byrow = TRUE, ncol = N)
cc <- matrix(0, nrow = n, ncol = N)
for (j in 1:N) {
for (i in 1:n) {
z <- rep(0, n)
z[i] <- delta[i]/2
cc[i, j] <- func((x[, j] + z), ...) - func((x[, j] -
z), ...)
}
}
return(rowSums(cc))
}
my_reshape.function <- function(i=names(x)[1],DecompIn=x){
Z <- DecompIn[[i]]
Z.names <- names(Z)
ZZ <- lapply(Z.names, function(ii,Z,i){
Z2 <- Z[[as.character(ii)]]
XX <- cbind(Country=as.integer(i),year=as.integer(ii),age=0:110,Z2)
XX
}, Z = Z,i=i)
# now stick it together
D <- as.data.frame(do.call(rbind, ZZ))
D <- data.table(D)
#DT <- as.data.table(melt(D,
# id.vars = list("state","year","age"),
# variable.name = "Cause"))
D
}
|
# plots correaltion scatter-plot with the abline: colonic butyrate ~ liver
# CONTROLS
# Coefficients:
# Estimate SE t-value Pr(>|t|)
# (Intercept) 2.130286 0.089043 23.92 9.36e-13 ***
# butyrate_CD$butyrate 0.045983 0.005872 7.83 1.76e-06 ***
# Adjusted R-squared: 0.80 at 14 DF
library(readxl)
library(ggplot2)
#fetches the data from an excel file
butyrate_CD<-
read_xlsx("C:/Users/stia/OneDrive - Norwegian University of Life Sciences/FOODSofNORWAY/FON_011/FON_011a/1_Manuscript/Caroline_fon11a/butyrate-CD.xlsx")
# Fit regression line
corr_eqn <- function(x,y, digits = 2) {
corr_coef <- round(cor(x, y), digits = digits)
paste("italic(r) == ", corr_coef)
}
labels = data.frame(x = 14, y = 3.5, label = corr_eqn(butyrate_CD$butyrate,
butyrate_CD$liver))
#subset the yeast
butyrate_CD<-butyrate_CD[butyrate_CD$diet=="y",]
pl5<-ggplot(butyrate_CD, aes(x = butyrate, y = liver)) +
geom_point(shape = 19, size = 4, aes(colour = diet)) +
scale_color_manual(values=c( "#009E73"))+
# scale_fill_manual(values=c("#CC79A7","#009E73"))
geom_smooth(colour = "#009E73", fill = "lightgreen", method = 'lm') +
# ggtitle("Example") +
ylab("LIVER INDEX") +
xlab("COLONIC BUTYRATE, µM/g") +
theme(legend.key = element_blank(),
legend.background = element_rect(colour = 'black'),
legend.position = "bottom",
legend.title = element_blank(),
plot.title = element_text(lineheight = .8, face = "bold", vjust = 1),
axis.text.x = element_text(size = 11, vjust = 0.5,
hjust = 1, colour = 'black'),
axis.text.y = element_text(size = 11, colour = 'black'),
axis.title = element_text(size = 10, face = 'bold'),
axis.line = element_line(colour = "black"),
plot.background = element_rect(colour = 'black', size = 1),
panel.background = element_blank()) +
theme_minimal()+
geom_text(data = labels, aes(x = x, y = y,
label = label), parse = TRUE)+
theme(legend.position = 'none')+
theme(panel.grid = element_blank())
pl5
|
/Fig-S2C.R
|
no_license
|
stan-iakhno/FON11A
|
R
| false
| false
| 2,193
|
r
|
# plots correaltion scatter-plot with the abline: colonic butyrate ~ liver
# CONTROLS
# Coefficients:
# Estimate SE t-value Pr(>|t|)
# (Intercept) 2.130286 0.089043 23.92 9.36e-13 ***
# butyrate_CD$butyrate 0.045983 0.005872 7.83 1.76e-06 ***
# Adjusted R-squared: 0.80 at 14 DF
library(readxl)
library(ggplot2)
#fetches the data from an excel file
butyrate_CD<-
read_xlsx("C:/Users/stia/OneDrive - Norwegian University of Life Sciences/FOODSofNORWAY/FON_011/FON_011a/1_Manuscript/Caroline_fon11a/butyrate-CD.xlsx")
# Fit regression line
corr_eqn <- function(x,y, digits = 2) {
corr_coef <- round(cor(x, y), digits = digits)
paste("italic(r) == ", corr_coef)
}
labels = data.frame(x = 14, y = 3.5, label = corr_eqn(butyrate_CD$butyrate,
butyrate_CD$liver))
#subset the yeast
butyrate_CD<-butyrate_CD[butyrate_CD$diet=="y",]
pl5<-ggplot(butyrate_CD, aes(x = butyrate, y = liver)) +
geom_point(shape = 19, size = 4, aes(colour = diet)) +
scale_color_manual(values=c( "#009E73"))+
# scale_fill_manual(values=c("#CC79A7","#009E73"))
geom_smooth(colour = "#009E73", fill = "lightgreen", method = 'lm') +
# ggtitle("Example") +
ylab("LIVER INDEX") +
xlab("COLONIC BUTYRATE, µM/g") +
theme(legend.key = element_blank(),
legend.background = element_rect(colour = 'black'),
legend.position = "bottom",
legend.title = element_blank(),
plot.title = element_text(lineheight = .8, face = "bold", vjust = 1),
axis.text.x = element_text(size = 11, vjust = 0.5,
hjust = 1, colour = 'black'),
axis.text.y = element_text(size = 11, colour = 'black'),
axis.title = element_text(size = 10, face = 'bold'),
axis.line = element_line(colour = "black"),
plot.background = element_rect(colour = 'black', size = 1),
panel.background = element_blank()) +
theme_minimal()+
geom_text(data = labels, aes(x = x, y = y,
label = label), parse = TRUE)+
theme(legend.position = 'none')+
theme(panel.grid = element_blank())
pl5
|
#!/usr/bin/env RScript
pacman::p_load(treedater)
args <- c()
path1 <- snakemake@input[[1]]
path2 <- snakemake@input[[2]]
alpha <- as.double(snakemake@params[[1]])
ncpu <- as.integer(snakemake@params[[2]])
out <- snakemake@output[[1]]
tree <- read.tree(path1)
date <- sampleYearsFromLabels(tree$tip.label, regex = "\\d{4}-\\d{2}-\\d{2}")
slen <- ncol(read.dna(path2, format = "fasta", as.character = T))
chr <- ape::chronos(tree, model = "discrete", control = chronos.control(nb.rate.cat = 1))
rate <- attr(chr, "rates")
x <- capture.output(
tsim <- dater(
tree, date, slen, clock = "strict",
omega0 = rate, numStartConditions = 0, ncpu = ncpu
)
)
x <- capture.output(tips <- outlierTips(tsim))
writeLines(as.character(dplyr::pull(dplyr::filter(tips, q >= alpha), taxon)), out)
|
/workflow/scripts/outliers.R
|
permissive
|
dnanto/proposal
|
R
| false
| false
| 799
|
r
|
#!/usr/bin/env RScript
pacman::p_load(treedater)
args <- c()
path1 <- snakemake@input[[1]]
path2 <- snakemake@input[[2]]
alpha <- as.double(snakemake@params[[1]])
ncpu <- as.integer(snakemake@params[[2]])
out <- snakemake@output[[1]]
tree <- read.tree(path1)
date <- sampleYearsFromLabels(tree$tip.label, regex = "\\d{4}-\\d{2}-\\d{2}")
slen <- ncol(read.dna(path2, format = "fasta", as.character = T))
chr <- ape::chronos(tree, model = "discrete", control = chronos.control(nb.rate.cat = 1))
rate <- attr(chr, "rates")
x <- capture.output(
tsim <- dater(
tree, date, slen, clock = "strict",
omega0 = rate, numStartConditions = 0, ncpu = ncpu
)
)
x <- capture.output(tips <- outlierTips(tsim))
writeLines(as.character(dplyr::pull(dplyr::filter(tips, q >= alpha), taxon)), out)
|
context('conversion functions')
test_that('tas_conversion', {
# Check to see that it works as expected with a single input
input <- 273
out <- tas_conversion(input)
expect_equal(input, out + 273.15)
# Check to see that it works with a matrix input
input <- matrix(data = rep(273), nrow = 4, ncol = 5)
out <- tas_conversion(input)
expect_equal(unique(as.vector(out)), -0.15)
})
test_that('pr_conversion', {
# Create a matrix of values with the YYYYMM as row names.
pr <- matrix(10, nrow = 5, ncol = 5)
row.names(pr) <- c(200001:200005)
# Convert pr, then check the output.
out <- pr_conversion(pr)
# Since we constructed a pr matrix with constant values we expect the rows to have a
# single unique value, so the unique rows should be a vector equal in length to the
# time element of pr (the columns).
unique_rows <- apply(out, 1, unique)
expect_null(dim(unique_rows))
expect_equal(length(unique_rows), ncol(pr))
# Now check the entries in the first column, it should equal 10 * the number of seconds in the
# the time step.
#
# Calculate the expect number of seconds in each month.
dpm <- c(31, 29, 31, 30, 31) # days per month.
spm <- dpm * 86400 # 86400 seconds per day
expected_col <- 10*spm # original input was 10
expect_equal(as.vector(out[,1]), expected_col)
# Expect Errors
# If the time of the input is not the YYYYMM format
pr2 <- pr
row.names(pr2) <- paste0(row.names(pr), '01')
expect_error(pr_conversion(pr2), 'row.names of input must be YYYYMM format')
})
|
/tests/testthat/testthat_conversion_functions.R
|
no_license
|
JGCRI/an2month
|
R
| false
| false
| 1,603
|
r
|
context('conversion functions')
test_that('tas_conversion', {
# Check to see that it works as expected with a single input
input <- 273
out <- tas_conversion(input)
expect_equal(input, out + 273.15)
# Check to see that it works with a matrix input
input <- matrix(data = rep(273), nrow = 4, ncol = 5)
out <- tas_conversion(input)
expect_equal(unique(as.vector(out)), -0.15)
})
test_that('pr_conversion', {
# Create a matrix of values with the YYYYMM as row names.
pr <- matrix(10, nrow = 5, ncol = 5)
row.names(pr) <- c(200001:200005)
# Convert pr, then check the output.
out <- pr_conversion(pr)
# Since we constructed a pr matrix with constant values we expect the rows to have a
# single unique value, so the unique rows should be a vector equal in length to the
# time element of pr (the columns).
unique_rows <- apply(out, 1, unique)
expect_null(dim(unique_rows))
expect_equal(length(unique_rows), ncol(pr))
# Now check the entries in the first column, it should equal 10 * the number of seconds in the
# the time step.
#
# Calculate the expect number of seconds in each month.
dpm <- c(31, 29, 31, 30, 31) # days per month.
spm <- dpm * 86400 # 86400 seconds per day
expected_col <- 10*spm # original input was 10
expect_equal(as.vector(out[,1]), expected_col)
# Expect Errors
# If the time of the input is not the YYYYMM format
pr2 <- pr
row.names(pr2) <- paste0(row.names(pr), '01')
expect_error(pr_conversion(pr2), 'row.names of input must be YYYYMM format')
})
|
#Aufgabe 5.4
x <- c(-5, -4, 1, 3, 6)
p <- c(0.3, 0.1,0.1, 0.2, 0.3)
p_mal_x <- x*p
p_mal_x
erwartungswert <- sum(p_mal_x)
erwartungswert
#Aufgabe 5.5
x <- c(2:12)
x
p <- 1/36*c(1,2,3,4,5,6,5,4,3,2,1)
p
erwartungswert <- sum(x*p)
erwartungswert
varianzvektor <- (x-erwartungswert)^2*p
varianzvektor
varianz <- sum(varianzvektor)
varianz
standardabweichung <- sqrt(varianz)
|
/SW05/serie_05_exercise.R
|
no_license
|
GutManuel85/ASTAT
|
R
| false
| false
| 376
|
r
|
#Aufgabe 5.4
x <- c(-5, -4, 1, 3, 6)
p <- c(0.3, 0.1,0.1, 0.2, 0.3)
p_mal_x <- x*p
p_mal_x
erwartungswert <- sum(p_mal_x)
erwartungswert
#Aufgabe 5.5
x <- c(2:12)
x
p <- 1/36*c(1,2,3,4,5,6,5,4,3,2,1)
p
erwartungswert <- sum(x*p)
erwartungswert
varianzvektor <- (x-erwartungswert)^2*p
varianzvektor
varianz <- sum(varianzvektor)
varianz
standardabweichung <- sqrt(varianz)
|
#Упражнение 3.1
impLab <- read.csv("FGLab.csv",stringsAsFactors = F,row.names = 1)
Manlab <- impLab[impLab$Пол=='муж',c('Имя','Рост')]
Manlab
#Упражнение 3.2
NameLab <- impLab[c(-2,-6), "Имя",drop=F]
NameLab
#Упражнение 3.3
sr <- impLab[impLab$Пол=='жен','Рост']
mean(sr)
#Упражнение 3.4
namemax <- impLab[impLab$Возраст == min(impLab[impLab$Пол=='муж',"Возраст"]) & impLab$Пол =='муж', 'Имя']
namemax
|
/Uprajnenine3.R
|
no_license
|
Code-sto/EVM
|
R
| false
| false
| 526
|
r
|
#Упражнение 3.1
impLab <- read.csv("FGLab.csv",stringsAsFactors = F,row.names = 1)
Manlab <- impLab[impLab$Пол=='муж',c('Имя','Рост')]
Manlab
#Упражнение 3.2
NameLab <- impLab[c(-2,-6), "Имя",drop=F]
NameLab
#Упражнение 3.3
sr <- impLab[impLab$Пол=='жен','Рост']
mean(sr)
#Упражнение 3.4
namemax <- impLab[impLab$Возраст == min(impLab[impLab$Пол=='муж',"Возраст"]) & impLab$Пол =='муж', 'Имя']
namemax
|
# Take bed files for each plate and turn into counts file.
# This iis an example using DCM1R1
files<-list.files()
DCM1R1files<-files[grep("_counts.bed.gz", files)]
DCM1R1 <- read.table(DCM1R1files[1], as.is=TRUE, header=FALSE, sep="\t")
DCM1R1counts <- as.data.frame(DCM1R1[,13], row.names = DCM1R1[,4])
for (i in 2:length(DCM1R1files)){
print(i)
DCM1R1 <- read.table(DCM1R1files[i], as.is=TRUE, header=FALSE, sep="\t")
DCM1R1counts<-cbind(DCM1R1counts, DCM1R1[,13])
}
DCM1R1names <- strsplit(DCM1R1files, "_")
DCM1R1names<-as.data.frame(matrix(unlist(DCM1R1names), ncol =2, byrow=TRUE))
colnames(DCM1R1counts)<-DCM1R1names[,1]
write.table(DCM1R1counts, gzfile("DCM1R1.data.gz"),sep="\t",quote=F)
save(DCM1R1counts, file="DCM1R1.data.Rd")
|
/Deep/bed2GeneCounts.R
|
permissive
|
piquelab/GxExC
|
R
| false
| false
| 772
|
r
|
# Take bed files for each plate and turn into counts file.
# This iis an example using DCM1R1
files<-list.files()
DCM1R1files<-files[grep("_counts.bed.gz", files)]
DCM1R1 <- read.table(DCM1R1files[1], as.is=TRUE, header=FALSE, sep="\t")
DCM1R1counts <- as.data.frame(DCM1R1[,13], row.names = DCM1R1[,4])
for (i in 2:length(DCM1R1files)){
print(i)
DCM1R1 <- read.table(DCM1R1files[i], as.is=TRUE, header=FALSE, sep="\t")
DCM1R1counts<-cbind(DCM1R1counts, DCM1R1[,13])
}
DCM1R1names <- strsplit(DCM1R1files, "_")
DCM1R1names<-as.data.frame(matrix(unlist(DCM1R1names), ncol =2, byrow=TRUE))
colnames(DCM1R1counts)<-DCM1R1names[,1]
write.table(DCM1R1counts, gzfile("DCM1R1.data.gz"),sep="\t",quote=F)
save(DCM1R1counts, file="DCM1R1.data.Rd")
|
robu.custom <- function(formula, data, studynum,var.eff.size, userweights,
modelweights = c("CORR", "HIER"), rho = 0.8,
small = TRUE, userdfs = NULL, ...) {
# Evaluate model weighting scheme.
modelweights <- match.arg(modelweights)
if (modelweights == "CORR" && rho > 1 | rho < 0)
stop ("Rho must be a value between 0 and 1.")
if (missing(userweights)){
user_weighting = FALSE
} else {
user_weighting = TRUE
}
cl <- match.call() # Full model call
mf <- match.call(expand.dots = FALSE)
ml <- mf[[2]] # Model formula
m <- match(c("formula", "data", "studynum",
"var.eff.size", "userweights"), names(mf))
mf <- mf[c(1L, m)]
mf$drop.unused.levels <- TRUE
mf[[1L]] <- as.name("model.frame")
mf <- eval(mf, parent.frame())
if(!user_weighting){
dframe <- data.frame(effect.size = mf[,1],
stats::model.matrix(formula, mf),
studynum = mf[["(studynum)"]],
var.eff.size = mf[["(var.eff.size)"]])
X.full.names <- names(dframe)[-match(c("effect.size",
"studynum",
"var.eff.size"),
names(dframe))]
} else { # Begin userweights
dframe <- data.frame(effect.size = mf[,1],
stats::model.matrix(formula, mf),
studynum = mf[["(studynum)"]],
var.eff.size = mf[["(var.eff.size)"]],
userweights = mf[["(userweights)"]])
X.full.names <- names(dframe)[-match(c("effect.size",
"studynum",
"userweights",
"var.eff.size"),
names(dframe))]
} # End userweights
study_orig_id <- dframe$studynum
dframe$study <- as.factor(dframe$studynum)
dframe$study <- as.numeric(dframe$study)
dframe <- dframe[order(dframe$study),]
k_temp <- as.data.frame(unclass(rle(sort(dframe$study))))
dframe$k <- k_temp[[1]][ match(dframe$study, k_temp[[2]])]
dframe$avg.var.eff.size <- stats::ave(dframe$var.eff.size, dframe$study)
dframe$sd.eff.size <- sqrt(dframe$var.eff.size)
switch(modelweights,
HIER = { # Begin HIER
dframe$weights <- 1 / dframe$var.eff.size
}, # End HIER
CORR = { # Begin CORR
dframe$weights <- 1 / (dframe$k * dframe$avg.var.eff.size)
} # End CORR
)
X.full <- dframe[c("study", X.full.names)]
data.full.names <- names(dframe)[-match(c("studynum",X.full.names),
names(dframe))]
data.full <- dframe[c(data.full.names)]
k <- data.full[ !duplicated(data.full$study), ]$k
k_list <- as.list(k)
M <- nrow(data.full) # Number of units in analysis
p <- ncol(X.full) - 2 # Number of (non-intercept) covariates
N <- max(data.full$study) # Number of studies
W <- as.matrix(by(data.full$weights, data.full$study,
function(x) diag(x, nrow = length(x)),
simplify = FALSE))
X <- data.matrix(X.full)
X <- lapply(split(X[,2:(p + 2)], X[,1]), matrix, ncol = p + 1)
y <- by(data.full$effect.size, data.full$study,
function(x) matrix(x))
J <- by(rep(1, nrow(X.full)), X.full$study,
function(x) matrix(x, nrow = length(x),
ncol = length(x)))
sigma <- by(data.full$sd.eff.size, data.full$study,
function(x) tcrossprod(x))
vee <- by(data.full$var.eff.size, data.full$study,
function(x) diag(x, nrow = length(x)))
SigmV <- Map(function(sigma, V)
sigma - V, sigma, vee)
sumXWX <- Reduce("+", Map(function(X, W)
t(X) %*% W %*% X,
X, W))
sumXWy <- Reduce("+", Map(function(X, W, y)
t(X) %*% W %*% y,
X, W, y))
sumXWJWX <- Reduce("+", Map(function(X, W, J)
t(X) %*% W %*% J %*% W %*% X,
X, W, J))
Matrx_WKXX <- Reduce("+",
Map(function(X, W, k) { t(X) %*% (W / k) %*% X},
X, W, k_list))
Matrx_wk_XJX_XX <- Reduce("+",
Map(function(X, W, J, k) {(W / k)[1,1] * ( t(X) %*% J %*% X - t(X) %*% X) },
X, W, J, k_list))
switch(modelweights,
HIER = { # Begin HIER
tr.sumJJ <- Reduce("+", Map(function(J)
sum(diag(J %*% J)),
J))
sumXJX <- Reduce("+", Map(function(X, J)
t(X) %*% J %*% X,
X, J))
sumXWJJX <- Reduce("+", Map(function(X, W, J)
t(X) %*% W %*% J %*% J %*% X,
X, W, J))
sumXJJWX <- Reduce("+", Map(function(X, W, J)
t(X) %*% J %*% J %*% W %*% X,
X, W, J))
sumXWWX <- Reduce("+", Map(function(X, W)
t(X) %*% W %*% W %*% X,
X, W))
sumXJWX <- Reduce("+", Map(function(X, W, J)
t(X) %*% J %*% W %*% X,
X , W, J))
sumXWJX <- Reduce("+", Map(function(X, W, J)
t(X) %*% W %*% J %*% X,
X, W, J))
} # End HIER
)
b <- solve(sumXWX) %*% sumXWy
Xreg <- as.matrix(X.full[-c(1)], dimnames = NULL)
data.full$pred <- Xreg %*% b
data.full$e <- data.full$effect.size - data.full$pred
if (!user_weighting) {
switch(modelweights,
HIER = { # Begin HIER
# Sigma_aj = tau.sq * J_j + omega.sq * I_j + V_j
# Qe is sum of squares 1
# Qe = Sigma(T'WT)-(Sigma(T'WX)(Sigma(X'WX))^-1(Sigma(X'WT)
# where W = V^(-1) and V = data.full$var.eff.size
# Also, Qe = (y-xb)' W (y-xb)
sumV <- sum(data.full$var.eff.size)
W <- diag(1 / data.full$var.eff.size)
sumW <- sum(W)
Qe <- t(data.full$e) %*% W %*% data.full$e
# Qa is sum of squares 2
# Qa = sum(T-XB.hat)'J(T-XB.hat)
# where B.hat = (X'WX)^-1(X'WT)
# Also, Qa = (y-xb)'A (y-xb), A=diag(J)
e <- by(data.full$e, data.full$study, function(x) matrix(x))
sumEJE <- Reduce("+", Map(function(e, J) t(e) %*% J %*% e, e, J))
Qa <- sumEJE
# MoM estimators for tau.sq and omega.sq can be written as
# omega.sq.h = A2(Qa-C1)-A1(Qe-C2) / B1A2-B2A1
# tau.sq.h = Qe-C2/A2 - omega.sq.h(B2/A2) where
# Vi = (t(X)WX)^-1
V.i <- solve(sumXWX)
# A1 = Sigma(kj^2) - tr(V*Sigma(kj*t(Xj)*Jj*Wj*Xj)) -
# tr(V*Sigma(kj*t(Xj)*Jj*Wj*Xj)) +
# tr(V*[Sigma(t(Xj)*Jj*Xj)]*V*Sigma(t(Xj)*Wj*Jj*Wj*Xj))
# B1 = Sigma(kj) - tr(V Sigma(t(Xj)*Jj*Wj*Xj)) -
# tr(V Sigma(t(Xj)*Wj*Jj*Xj)) +
# tr(V*[Sigma(t(Xj)*Jj*Xj)]*V*Sigma(t(Xj)*Wj^2*Xj))
# C1 = tr(W^-1) - tr(V*Sigma(t(X)*Jj*Xj))
A1 <- tr.sumJJ - sum(diag(V.i %*% sumXJJWX)) -
sum(diag(V.i %*% sumXWJJX)) +
sum(diag(V.i %*% sumXJX %*% V.i %*% sumXWJWX))
B1 <- length(data.full$study) -
sum(diag(V.i %*% sumXWJX)) -
sum(diag(V.i %*% sumXJWX)) +
sum(diag(V.i %*% sumXJX%*%V.i %*% sumXWWX))
C1 <- sumV - sum(diag(V.i %*% sumXJX))
# A2 = tr(W) - tr(V*Sigma(t(X)*Wj*Jj*Wj*Xj))
# B2 = tr(W) - tr(V*Sigma(t(X)*Wj^2*Xj))
# C2 = Sigma(kj-p)
A2 <- sumW - sum(diag(V.i %*% sumXWJWX))
B2 <- sumW - sum(diag(V.i %*% sumXWWX))
C2 <- length(data.full$study) - (p + 1)
# MoM estimator for omega.sq.h = A2(Qa-C1)-A1(Qe-C2) / B1A2-B2A1
# Estimate of between-studies-wthin-cluster variance component
omega.sq1 <- ((Qa - C1) * A2 - (Qe - C2) * A1) / (B1 * A2 - B2 * A1)
omega.sq <- ifelse(omega.sq1 < 0, 0, omega.sq1)
# MoM estimators for tau.sq: Qe-C2/A2 - omega.sq.h(B2/A2)
# Estimate of between-clusters variance component
tau.sq1 <- ((Qe - C2) / A2) - omega.sq * (B2 / A2)
tau.sq <- ifelse(tau.sq1 < 0, 0, tau.sq1)
# Approximate inverse variance weights
data.full$r.weights <- (1 / (as.vector(data.full$var.eff.size) +
as.vector(tau.sq) +
as.vector(omega.sq)))
# Model info list for hierarchical effects
mod_info <- list(omega.sq = omega.sq, tau.sq = tau.sq)
}, # End HIER
CORR = { # Begin CORR
W <- diag (data.full$weights)
sumW <- sum(data.full$weights) # Sum (k.j*w.j)
Qe <- t(data.full$e) %*% W %*% data.full$e
# The following components (denom, termA, termB, term1, term2)
# are used in the calculation of the estimate of the residual
# variance component tau.sq.hat.
# Note: The effect of correlation on the estimates occurs entirely
# through the rho*term2 component.
denom <- sumW - sum(diag(solve(sumXWX) %*% sumXWJWX))
termA <- sum(diag(solve(sumXWX) %*% Matrx_WKXX)) #ZH_edit
termB <- sum(diag(solve(sumXWX) %*% Matrx_wk_XJX_XX ))#ZH_edit
term1 <- (Qe - N + termA) / denom
term2 <- termB / denom
tau.sq1 <- term1 + rho * term2
tau.sq <- ifelse(tau.sq1 < 0, 0, tau.sq1)
df <- N - termA - rho * (termB)
I.2.1 <- ((Qe - df) / Qe) * 100
I.2 <- ifelse(I.2.1 < 0, 0, I.2.1)
# Approximate inverse variance weights
data.full$r.weights <- 1 / (as.vector(data.full$k) *
(as.vector(data.full$avg.var.eff.size) +
as.vector(tau.sq)))
# Model info list for correlated effects
mod_info <- list(rho = rho, I.2 = I.2, tau.sq = tau.sq,
term1 = term1, term2 = term2)
} # End CORR
)
} else { # Begin userweights
data.full$r.weights <- data.full$userweights
# Model info list for userweights
mod_info <- list(k = k, N = N, p = p, M = M)
} # End userweights
W.r.big <- diag(data.full$r.weights) # W
W.r <- by(data.full$r.weights, data.full$study, # Wj
function(x) diag(x, nrow = length(x)))
sumXWX.r <- Reduce("+", Map(function(X, W)
t(X) %*% W %*% X,
X, W.r))
sumXWy.r <- Reduce("+", Map(function(X, W, y)
t(X) %*% W %*% y,
X, W.r, y))
b.r <- solve(sumXWX.r) %*% sumXWy.r
data.full$pred.r <- Xreg %*% b.r
data.full$e.r <- cbind(data.full$effect.size) - data.full$pred.r
data.full$e.r <- as.numeric(data.full$e.r)
sigma.hat.r <- by(data.full$e.r, data.full$study,
function(x) tcrossprod(x))
if (!small) { # Begin small = FALSE
sumXWeeWX.r <- Reduce("+", Map(function(X, W, V)
t(X) %*% W %*% V %*% W %*% X,
X, W.r, sigma.hat.r))
VR.r <- solve(sumXWX.r) %*% sumXWeeWX.r %*% solve(sumXWX.r)
SE <- sqrt(diag(VR.r)) * sqrt(N / (N - (p + 1)))
t <- b.r / SE
if (is.null(userdfs))
dfs <- N - (p + 1)
else
dfs <- userdfs
prob <- 2 * (1 - stats::pt(abs(t), dfs))
CI.L <- b.r - stats::qt(.975, dfs) * SE
CI.U <- b.r + stats::qt(.975, dfs) * SE
} else { # Begin small = TRUE
Q <- solve(sumXWX.r) # Q = (X'WX)^(-1)
Q.list <- rep(list(Q), N)
H <- Xreg %*% Q %*% t(Xreg) %*% W.r.big # H = X * Q * X' * W
ImH <- diag(c(1), dim(Xreg)[1], dim(Xreg)[1]) - H
data.full$ImH <- cbind(ImH)
ImHj <- lapply(split(x = ImH,f = as.factor(data.full$study)),
function(x){matrix(x, ncol =M)})
#ImHj <- by(data.full$ImH, data.full$study,
# function(x) as.matrix(x))
diag_one <- by(rep(1, M), X.full$study,
function(x) diag(x, nrow = length(x)))
ImHii <- Map(function(X, Q, W, D)
D - X %*% Q %*% t(X) %*% W,
X, Q.list, W.r, diag_one)
if (!user_weighting){
Working_Matrx_E <- diag(1/data.full$r.weights) #1/W
Working_Matrx_E_j <- by(data.full$r.weights, data.full$study, # Wj
function(x) diag(1/x, nrow = length(x))) #1/W_j
switch(modelweights,
HIER = {
# Inside Matrix = E_j^0.5 * ImH_j *E * t(ImH_j) * E_j^0.5
# In this case, the formula can be simplified to
# Inside Matrix = E_j^0.5 * ImH_jj * E_j^1.5
InsideMatrx_list <- Map(
function (W_E_j, ImH_jj) {
sqrt(W_E_j) %*% ImH_jj %*% (W_E_j^1.5)
},
Working_Matrx_E_j, ImHii)
eigenres_list <- lapply(InsideMatrx_list, function(x) eigen(x))
eigenval_list <- lapply(eigenres_list, function(x) x$values)
eigenvec_list <- lapply(eigenres_list, function(x) x$vectors)
A.MBB <- Map(function (eigenvec, eigenval, k, W_E_j) {
eigenval_InvSqrt <- ifelse(eigenval< 10^-10, 0, 1/sqrt(eigenval)) # Pseudo_Inverse
sqrt(W_E_j) %*% eigenvec %*% diag(eigenval_InvSqrt, k, k) %*% t(eigenvec) %*%sqrt(W_E_j) # Pseudo_Inverse
},
eigenvec_list,
eigenval_list,
k_list,
Working_Matrx_E_j)
},
CORR = {
# In this case, the formula can be simplified to
# A_MBB = ImH_jj ^ (-0.5)
eigenres_list <- lapply(ImHii, function(x) eigen(x))
eigenval_list <- lapply(eigenres_list, function(x) x$values)
eigenvec_list <- lapply(eigenres_list, function(x) x$vectors)
A.MBB <- Map(function (eigenvec, eigenval, k, W_E_j) {
eigenval_InvSqrt <- ifelse(eigenval< 10^-10, 0, 1/sqrt(eigenval)) # Pseudo_Inverse
eigenvec %*% diag(eigenval_InvSqrt, k, k) %*% t(eigenvec) # Pseudo_Inverse
},
eigenvec_list,
eigenval_list,
k_list,
Working_Matrx_E_j)
})
} else { # Begin userweights
V.big <- diag(c(1), dim(Xreg)[1], dim(Xreg)[1]) %*%
diag(data.full$avg.var.eff.size)
v.j <- by(data.full$avg.var.eff.size, data.full$study,
function(x) diag(x, nrow = length(x)))
v.j.sqrt_list <- lapply(v.j, function (x) sqrt(x))
Working_Matrx_E_j <- v.j
Working_Matrx_E <- V.big
InsideMatrx_list <- Map(
function (ImH_j) {
ImH_j %*% Working_Matrx_E %*% t(ImH_j)
},
ImHj)
eigenres_list <- lapply(InsideMatrx_list, function(x) eigen(x))
eigenval_list <- lapply(eigenres_list, function(x) x$values)
eigenvec_list <- lapply(eigenres_list, function(x) x$vectors)
A.MBB <- Map(function (eigenvec, eigenval, k, v.j.sqrt) {
eigenval_InvSqrt <- ifelse(eigenval< 10^-10, 0, 1/sqrt(eigenval)) # Pseudo_Inverse
v.j.sqrt %*% eigenvec %*% diag(eigenval_InvSqrt, k, k) %*% t(eigenvec) # Pseudo_Inverse
},
eigenvec_list,
eigenval_list,
k_list,
v.j.sqrt_list)
} # End userweights
sumXWA.MBBeeA.MBBWX.r <- Map(function(X,W,A,S)
t(X) %*% W %*% A %*% S %*% A %*% W %*%X,
X, W.r, A.MBB, sigma.hat.r)
sumXWA.MBBeeA.MBBWX.r <- Reduce("+", sumXWA.MBBeeA.MBBWX.r)
giTemp <- Map(function(I, A, W, X, Q)
t(I) %*% A %*% W %*% X %*% Q,
ImHj, A.MBB, W.r, X, Q.list)
giTemp <- do.call(rbind,giTemp)
gi_matrix <- lapply(X = 1:(p+1), FUN = function(i){ matrix(giTemp[,i], nrow = M) })
if (!user_weighting) {
W.mat <- matrix(rep(1/sqrt(data.full$r.weights),times = N),nrow = M)
B_matrix_half <- lapply(X = gi_matrix, FUN = function(gi_mat){ W.mat * gi_mat})
}else{
B_matrix_half <- gi_matrix
# B_matrix_half <- lapply(X = gi_matrix, FUN = function(gi_mat){ solve(sqrt(V.big)) %*% gi_mat})
}
B_mat <- lapply(X = B_matrix_half, FUN = tcrossprod)
B_trace_square <- sapply(X = B_mat, FUN = function(B){ (sum(diag(B)))^2})
B_square_trace <- sapply(X = B_mat, FUN = function(B){sum(B * B)})
if (is.null(userdfs))
dfs <- B_trace_square/B_square_trace
else
dfs <- userdfs
VR.MBB1 <- solve(sumXWX.r) %*% sumXWA.MBBeeA.MBBWX.r %*% solve(sumXWX.r)
VR.r <- VR.MBB1
SE <- sqrt(diag(VR.r))
t <- b.r / SE
prob <- 2 * (1 - stats::pt(abs(t), df = dfs))
CI.L <- b.r - stats::qt(.975, dfs) * SE
CI.U <- b.r + stats::qt(.975, dfs) * SE
} # End small = TRUE
reg_table <- data.frame(cbind(b.r, SE, t, dfs, prob, CI.L, CI.U))
#names(X.full)[2] <- "intercept"
labels <- c(colnames(X.full[2:length(X.full)]))
sig <- ifelse(prob < .01, "***",
ifelse(prob > .01 & prob < .05, "**",
ifelse(prob > .05 & prob < .10, "*", "")))
reg_table <- cbind(labels, reg_table, sig)
colnames(reg_table) <- c("labels", "b.r", "SE", "t", "dfs", "prob", "CI.L",
"CI.U", "sig")
if (!small) { # Begin small = FALSE
mod_label_sm <- ""
mod_notice <- ""
} else { # Begin small = TRUE
mod_label_sm <- "with Small-Sample Corrections"
mod_notice <- "Note: If df < 4, do not trust the results"
} # End small = TRUE
if (!user_weighting) {
switch(modelweights,
HIER = { # Begin HIER
mod_label <- c("RVE: Hierarchical Effects Model", mod_label_sm)
}, # End HIER
CORR = { # Begin CORR
mod_label <- c("RVE: Correlated Effects Model", mod_label_sm)
} # End CORR
)
} else { # Begin userweights
mod_label <- c("RVE: User Specified Weights", mod_label_sm)
} # End userweights
res <- list(data.full = data.full, X.full = X.full, reg_table = reg_table,
mod_label = mod_label, mod_notice = mod_notice, modelweights =
modelweights, mod_info = mod_info, user_weighting =
user_weighting, ml = ml, cl = cl, N = N, M = M, k = k,
k_list = k_list, p = p, X = X, y = y, Xreg = Xreg, b.r = b.r,
VR.r = VR.r, dfs = dfs, small = small, data = data, labels =
labels, study_orig_id = study_orig_id)
class(res) <- "robu"
res
}
|
/Code/Utilities/robu.custom.r
|
no_license
|
timesler/AttachmentStabilityMetaAnalysis_Opie2019
|
R
| false
| false
| 21,031
|
r
|
robu.custom <- function(formula, data, studynum,var.eff.size, userweights,
modelweights = c("CORR", "HIER"), rho = 0.8,
small = TRUE, userdfs = NULL, ...) {
# Evaluate model weighting scheme.
modelweights <- match.arg(modelweights)
if (modelweights == "CORR" && rho > 1 | rho < 0)
stop ("Rho must be a value between 0 and 1.")
if (missing(userweights)){
user_weighting = FALSE
} else {
user_weighting = TRUE
}
cl <- match.call() # Full model call
mf <- match.call(expand.dots = FALSE)
ml <- mf[[2]] # Model formula
m <- match(c("formula", "data", "studynum",
"var.eff.size", "userweights"), names(mf))
mf <- mf[c(1L, m)]
mf$drop.unused.levels <- TRUE
mf[[1L]] <- as.name("model.frame")
mf <- eval(mf, parent.frame())
if(!user_weighting){
dframe <- data.frame(effect.size = mf[,1],
stats::model.matrix(formula, mf),
studynum = mf[["(studynum)"]],
var.eff.size = mf[["(var.eff.size)"]])
X.full.names <- names(dframe)[-match(c("effect.size",
"studynum",
"var.eff.size"),
names(dframe))]
} else { # Begin userweights
dframe <- data.frame(effect.size = mf[,1],
stats::model.matrix(formula, mf),
studynum = mf[["(studynum)"]],
var.eff.size = mf[["(var.eff.size)"]],
userweights = mf[["(userweights)"]])
X.full.names <- names(dframe)[-match(c("effect.size",
"studynum",
"userweights",
"var.eff.size"),
names(dframe))]
} # End userweights
study_orig_id <- dframe$studynum
dframe$study <- as.factor(dframe$studynum)
dframe$study <- as.numeric(dframe$study)
dframe <- dframe[order(dframe$study),]
k_temp <- as.data.frame(unclass(rle(sort(dframe$study))))
dframe$k <- k_temp[[1]][ match(dframe$study, k_temp[[2]])]
dframe$avg.var.eff.size <- stats::ave(dframe$var.eff.size, dframe$study)
dframe$sd.eff.size <- sqrt(dframe$var.eff.size)
switch(modelweights,
HIER = { # Begin HIER
dframe$weights <- 1 / dframe$var.eff.size
}, # End HIER
CORR = { # Begin CORR
dframe$weights <- 1 / (dframe$k * dframe$avg.var.eff.size)
} # End CORR
)
X.full <- dframe[c("study", X.full.names)]
data.full.names <- names(dframe)[-match(c("studynum",X.full.names),
names(dframe))]
data.full <- dframe[c(data.full.names)]
k <- data.full[ !duplicated(data.full$study), ]$k
k_list <- as.list(k)
M <- nrow(data.full) # Number of units in analysis
p <- ncol(X.full) - 2 # Number of (non-intercept) covariates
N <- max(data.full$study) # Number of studies
W <- as.matrix(by(data.full$weights, data.full$study,
function(x) diag(x, nrow = length(x)),
simplify = FALSE))
X <- data.matrix(X.full)
X <- lapply(split(X[,2:(p + 2)], X[,1]), matrix, ncol = p + 1)
y <- by(data.full$effect.size, data.full$study,
function(x) matrix(x))
J <- by(rep(1, nrow(X.full)), X.full$study,
function(x) matrix(x, nrow = length(x),
ncol = length(x)))
sigma <- by(data.full$sd.eff.size, data.full$study,
function(x) tcrossprod(x))
vee <- by(data.full$var.eff.size, data.full$study,
function(x) diag(x, nrow = length(x)))
SigmV <- Map(function(sigma, V)
sigma - V, sigma, vee)
sumXWX <- Reduce("+", Map(function(X, W)
t(X) %*% W %*% X,
X, W))
sumXWy <- Reduce("+", Map(function(X, W, y)
t(X) %*% W %*% y,
X, W, y))
sumXWJWX <- Reduce("+", Map(function(X, W, J)
t(X) %*% W %*% J %*% W %*% X,
X, W, J))
Matrx_WKXX <- Reduce("+",
Map(function(X, W, k) { t(X) %*% (W / k) %*% X},
X, W, k_list))
Matrx_wk_XJX_XX <- Reduce("+",
Map(function(X, W, J, k) {(W / k)[1,1] * ( t(X) %*% J %*% X - t(X) %*% X) },
X, W, J, k_list))
switch(modelweights,
HIER = { # Begin HIER
tr.sumJJ <- Reduce("+", Map(function(J)
sum(diag(J %*% J)),
J))
sumXJX <- Reduce("+", Map(function(X, J)
t(X) %*% J %*% X,
X, J))
sumXWJJX <- Reduce("+", Map(function(X, W, J)
t(X) %*% W %*% J %*% J %*% X,
X, W, J))
sumXJJWX <- Reduce("+", Map(function(X, W, J)
t(X) %*% J %*% J %*% W %*% X,
X, W, J))
sumXWWX <- Reduce("+", Map(function(X, W)
t(X) %*% W %*% W %*% X,
X, W))
sumXJWX <- Reduce("+", Map(function(X, W, J)
t(X) %*% J %*% W %*% X,
X , W, J))
sumXWJX <- Reduce("+", Map(function(X, W, J)
t(X) %*% W %*% J %*% X,
X, W, J))
} # End HIER
)
b <- solve(sumXWX) %*% sumXWy
Xreg <- as.matrix(X.full[-c(1)], dimnames = NULL)
data.full$pred <- Xreg %*% b
data.full$e <- data.full$effect.size - data.full$pred
if (!user_weighting) {
switch(modelweights,
HIER = { # Begin HIER
# Sigma_aj = tau.sq * J_j + omega.sq * I_j + V_j
# Qe is sum of squares 1
# Qe = Sigma(T'WT)-(Sigma(T'WX)(Sigma(X'WX))^-1(Sigma(X'WT)
# where W = V^(-1) and V = data.full$var.eff.size
# Also, Qe = (y-xb)' W (y-xb)
sumV <- sum(data.full$var.eff.size)
W <- diag(1 / data.full$var.eff.size)
sumW <- sum(W)
Qe <- t(data.full$e) %*% W %*% data.full$e
# Qa is sum of squares 2
# Qa = sum(T-XB.hat)'J(T-XB.hat)
# where B.hat = (X'WX)^-1(X'WT)
# Also, Qa = (y-xb)'A (y-xb), A=diag(J)
e <- by(data.full$e, data.full$study, function(x) matrix(x))
sumEJE <- Reduce("+", Map(function(e, J) t(e) %*% J %*% e, e, J))
Qa <- sumEJE
# MoM estimators for tau.sq and omega.sq can be written as
# omega.sq.h = A2(Qa-C1)-A1(Qe-C2) / B1A2-B2A1
# tau.sq.h = Qe-C2/A2 - omega.sq.h(B2/A2) where
# Vi = (t(X)WX)^-1
V.i <- solve(sumXWX)
# A1 = Sigma(kj^2) - tr(V*Sigma(kj*t(Xj)*Jj*Wj*Xj)) -
# tr(V*Sigma(kj*t(Xj)*Jj*Wj*Xj)) +
# tr(V*[Sigma(t(Xj)*Jj*Xj)]*V*Sigma(t(Xj)*Wj*Jj*Wj*Xj))
# B1 = Sigma(kj) - tr(V Sigma(t(Xj)*Jj*Wj*Xj)) -
# tr(V Sigma(t(Xj)*Wj*Jj*Xj)) +
# tr(V*[Sigma(t(Xj)*Jj*Xj)]*V*Sigma(t(Xj)*Wj^2*Xj))
# C1 = tr(W^-1) - tr(V*Sigma(t(X)*Jj*Xj))
A1 <- tr.sumJJ - sum(diag(V.i %*% sumXJJWX)) -
sum(diag(V.i %*% sumXWJJX)) +
sum(diag(V.i %*% sumXJX %*% V.i %*% sumXWJWX))
B1 <- length(data.full$study) -
sum(diag(V.i %*% sumXWJX)) -
sum(diag(V.i %*% sumXJWX)) +
sum(diag(V.i %*% sumXJX%*%V.i %*% sumXWWX))
C1 <- sumV - sum(diag(V.i %*% sumXJX))
# A2 = tr(W) - tr(V*Sigma(t(X)*Wj*Jj*Wj*Xj))
# B2 = tr(W) - tr(V*Sigma(t(X)*Wj^2*Xj))
# C2 = Sigma(kj-p)
A2 <- sumW - sum(diag(V.i %*% sumXWJWX))
B2 <- sumW - sum(diag(V.i %*% sumXWWX))
C2 <- length(data.full$study) - (p + 1)
# MoM estimator for omega.sq.h = A2(Qa-C1)-A1(Qe-C2) / B1A2-B2A1
# Estimate of between-studies-wthin-cluster variance component
omega.sq1 <- ((Qa - C1) * A2 - (Qe - C2) * A1) / (B1 * A2 - B2 * A1)
omega.sq <- ifelse(omega.sq1 < 0, 0, omega.sq1)
# MoM estimators for tau.sq: Qe-C2/A2 - omega.sq.h(B2/A2)
# Estimate of between-clusters variance component
tau.sq1 <- ((Qe - C2) / A2) - omega.sq * (B2 / A2)
tau.sq <- ifelse(tau.sq1 < 0, 0, tau.sq1)
# Approximate inverse variance weights
data.full$r.weights <- (1 / (as.vector(data.full$var.eff.size) +
as.vector(tau.sq) +
as.vector(omega.sq)))
# Model info list for hierarchical effects
mod_info <- list(omega.sq = omega.sq, tau.sq = tau.sq)
}, # End HIER
CORR = { # Begin CORR
W <- diag (data.full$weights)
sumW <- sum(data.full$weights) # Sum (k.j*w.j)
Qe <- t(data.full$e) %*% W %*% data.full$e
# The following components (denom, termA, termB, term1, term2)
# are used in the calculation of the estimate of the residual
# variance component tau.sq.hat.
# Note: The effect of correlation on the estimates occurs entirely
# through the rho*term2 component.
denom <- sumW - sum(diag(solve(sumXWX) %*% sumXWJWX))
termA <- sum(diag(solve(sumXWX) %*% Matrx_WKXX)) #ZH_edit
termB <- sum(diag(solve(sumXWX) %*% Matrx_wk_XJX_XX ))#ZH_edit
term1 <- (Qe - N + termA) / denom
term2 <- termB / denom
tau.sq1 <- term1 + rho * term2
tau.sq <- ifelse(tau.sq1 < 0, 0, tau.sq1)
df <- N - termA - rho * (termB)
I.2.1 <- ((Qe - df) / Qe) * 100
I.2 <- ifelse(I.2.1 < 0, 0, I.2.1)
# Approximate inverse variance weights
data.full$r.weights <- 1 / (as.vector(data.full$k) *
(as.vector(data.full$avg.var.eff.size) +
as.vector(tau.sq)))
# Model info list for correlated effects
mod_info <- list(rho = rho, I.2 = I.2, tau.sq = tau.sq,
term1 = term1, term2 = term2)
} # End CORR
)
} else { # Begin userweights
data.full$r.weights <- data.full$userweights
# Model info list for userweights
mod_info <- list(k = k, N = N, p = p, M = M)
} # End userweights
W.r.big <- diag(data.full$r.weights) # W
W.r <- by(data.full$r.weights, data.full$study, # Wj
function(x) diag(x, nrow = length(x)))
sumXWX.r <- Reduce("+", Map(function(X, W)
t(X) %*% W %*% X,
X, W.r))
sumXWy.r <- Reduce("+", Map(function(X, W, y)
t(X) %*% W %*% y,
X, W.r, y))
b.r <- solve(sumXWX.r) %*% sumXWy.r
data.full$pred.r <- Xreg %*% b.r
data.full$e.r <- cbind(data.full$effect.size) - data.full$pred.r
data.full$e.r <- as.numeric(data.full$e.r)
sigma.hat.r <- by(data.full$e.r, data.full$study,
function(x) tcrossprod(x))
if (!small) { # Begin small = FALSE
sumXWeeWX.r <- Reduce("+", Map(function(X, W, V)
t(X) %*% W %*% V %*% W %*% X,
X, W.r, sigma.hat.r))
VR.r <- solve(sumXWX.r) %*% sumXWeeWX.r %*% solve(sumXWX.r)
SE <- sqrt(diag(VR.r)) * sqrt(N / (N - (p + 1)))
t <- b.r / SE
if (is.null(userdfs))
dfs <- N - (p + 1)
else
dfs <- userdfs
prob <- 2 * (1 - stats::pt(abs(t), dfs))
CI.L <- b.r - stats::qt(.975, dfs) * SE
CI.U <- b.r + stats::qt(.975, dfs) * SE
} else { # Begin small = TRUE
Q <- solve(sumXWX.r) # Q = (X'WX)^(-1)
Q.list <- rep(list(Q), N)
H <- Xreg %*% Q %*% t(Xreg) %*% W.r.big # H = X * Q * X' * W
ImH <- diag(c(1), dim(Xreg)[1], dim(Xreg)[1]) - H
data.full$ImH <- cbind(ImH)
ImHj <- lapply(split(x = ImH,f = as.factor(data.full$study)),
function(x){matrix(x, ncol =M)})
#ImHj <- by(data.full$ImH, data.full$study,
# function(x) as.matrix(x))
diag_one <- by(rep(1, M), X.full$study,
function(x) diag(x, nrow = length(x)))
ImHii <- Map(function(X, Q, W, D)
D - X %*% Q %*% t(X) %*% W,
X, Q.list, W.r, diag_one)
if (!user_weighting){
Working_Matrx_E <- diag(1/data.full$r.weights) #1/W
Working_Matrx_E_j <- by(data.full$r.weights, data.full$study, # Wj
function(x) diag(1/x, nrow = length(x))) #1/W_j
switch(modelweights,
HIER = {
# Inside Matrix = E_j^0.5 * ImH_j *E * t(ImH_j) * E_j^0.5
# In this case, the formula can be simplified to
# Inside Matrix = E_j^0.5 * ImH_jj * E_j^1.5
InsideMatrx_list <- Map(
function (W_E_j, ImH_jj) {
sqrt(W_E_j) %*% ImH_jj %*% (W_E_j^1.5)
},
Working_Matrx_E_j, ImHii)
eigenres_list <- lapply(InsideMatrx_list, function(x) eigen(x))
eigenval_list <- lapply(eigenres_list, function(x) x$values)
eigenvec_list <- lapply(eigenres_list, function(x) x$vectors)
A.MBB <- Map(function (eigenvec, eigenval, k, W_E_j) {
eigenval_InvSqrt <- ifelse(eigenval< 10^-10, 0, 1/sqrt(eigenval)) # Pseudo_Inverse
sqrt(W_E_j) %*% eigenvec %*% diag(eigenval_InvSqrt, k, k) %*% t(eigenvec) %*%sqrt(W_E_j) # Pseudo_Inverse
},
eigenvec_list,
eigenval_list,
k_list,
Working_Matrx_E_j)
},
CORR = {
# In this case, the formula can be simplified to
# A_MBB = ImH_jj ^ (-0.5)
eigenres_list <- lapply(ImHii, function(x) eigen(x))
eigenval_list <- lapply(eigenres_list, function(x) x$values)
eigenvec_list <- lapply(eigenres_list, function(x) x$vectors)
A.MBB <- Map(function (eigenvec, eigenval, k, W_E_j) {
eigenval_InvSqrt <- ifelse(eigenval< 10^-10, 0, 1/sqrt(eigenval)) # Pseudo_Inverse
eigenvec %*% diag(eigenval_InvSqrt, k, k) %*% t(eigenvec) # Pseudo_Inverse
},
eigenvec_list,
eigenval_list,
k_list,
Working_Matrx_E_j)
})
} else { # Begin userweights
V.big <- diag(c(1), dim(Xreg)[1], dim(Xreg)[1]) %*%
diag(data.full$avg.var.eff.size)
v.j <- by(data.full$avg.var.eff.size, data.full$study,
function(x) diag(x, nrow = length(x)))
v.j.sqrt_list <- lapply(v.j, function (x) sqrt(x))
Working_Matrx_E_j <- v.j
Working_Matrx_E <- V.big
InsideMatrx_list <- Map(
function (ImH_j) {
ImH_j %*% Working_Matrx_E %*% t(ImH_j)
},
ImHj)
eigenres_list <- lapply(InsideMatrx_list, function(x) eigen(x))
eigenval_list <- lapply(eigenres_list, function(x) x$values)
eigenvec_list <- lapply(eigenres_list, function(x) x$vectors)
A.MBB <- Map(function (eigenvec, eigenval, k, v.j.sqrt) {
eigenval_InvSqrt <- ifelse(eigenval< 10^-10, 0, 1/sqrt(eigenval)) # Pseudo_Inverse
v.j.sqrt %*% eigenvec %*% diag(eigenval_InvSqrt, k, k) %*% t(eigenvec) # Pseudo_Inverse
},
eigenvec_list,
eigenval_list,
k_list,
v.j.sqrt_list)
} # End userweights
sumXWA.MBBeeA.MBBWX.r <- Map(function(X,W,A,S)
t(X) %*% W %*% A %*% S %*% A %*% W %*%X,
X, W.r, A.MBB, sigma.hat.r)
sumXWA.MBBeeA.MBBWX.r <- Reduce("+", sumXWA.MBBeeA.MBBWX.r)
giTemp <- Map(function(I, A, W, X, Q)
t(I) %*% A %*% W %*% X %*% Q,
ImHj, A.MBB, W.r, X, Q.list)
giTemp <- do.call(rbind,giTemp)
gi_matrix <- lapply(X = 1:(p+1), FUN = function(i){ matrix(giTemp[,i], nrow = M) })
if (!user_weighting) {
W.mat <- matrix(rep(1/sqrt(data.full$r.weights),times = N),nrow = M)
B_matrix_half <- lapply(X = gi_matrix, FUN = function(gi_mat){ W.mat * gi_mat})
}else{
B_matrix_half <- gi_matrix
# B_matrix_half <- lapply(X = gi_matrix, FUN = function(gi_mat){ solve(sqrt(V.big)) %*% gi_mat})
}
B_mat <- lapply(X = B_matrix_half, FUN = tcrossprod)
B_trace_square <- sapply(X = B_mat, FUN = function(B){ (sum(diag(B)))^2})
B_square_trace <- sapply(X = B_mat, FUN = function(B){sum(B * B)})
if (is.null(userdfs))
dfs <- B_trace_square/B_square_trace
else
dfs <- userdfs
VR.MBB1 <- solve(sumXWX.r) %*% sumXWA.MBBeeA.MBBWX.r %*% solve(sumXWX.r)
VR.r <- VR.MBB1
SE <- sqrt(diag(VR.r))
t <- b.r / SE
prob <- 2 * (1 - stats::pt(abs(t), df = dfs))
CI.L <- b.r - stats::qt(.975, dfs) * SE
CI.U <- b.r + stats::qt(.975, dfs) * SE
} # End small = TRUE
reg_table <- data.frame(cbind(b.r, SE, t, dfs, prob, CI.L, CI.U))
#names(X.full)[2] <- "intercept"
labels <- c(colnames(X.full[2:length(X.full)]))
sig <- ifelse(prob < .01, "***",
ifelse(prob > .01 & prob < .05, "**",
ifelse(prob > .05 & prob < .10, "*", "")))
reg_table <- cbind(labels, reg_table, sig)
colnames(reg_table) <- c("labels", "b.r", "SE", "t", "dfs", "prob", "CI.L",
"CI.U", "sig")
if (!small) { # Begin small = FALSE
mod_label_sm <- ""
mod_notice <- ""
} else { # Begin small = TRUE
mod_label_sm <- "with Small-Sample Corrections"
mod_notice <- "Note: If df < 4, do not trust the results"
} # End small = TRUE
if (!user_weighting) {
switch(modelweights,
HIER = { # Begin HIER
mod_label <- c("RVE: Hierarchical Effects Model", mod_label_sm)
}, # End HIER
CORR = { # Begin CORR
mod_label <- c("RVE: Correlated Effects Model", mod_label_sm)
} # End CORR
)
} else { # Begin userweights
mod_label <- c("RVE: User Specified Weights", mod_label_sm)
} # End userweights
res <- list(data.full = data.full, X.full = X.full, reg_table = reg_table,
mod_label = mod_label, mod_notice = mod_notice, modelweights =
modelweights, mod_info = mod_info, user_weighting =
user_weighting, ml = ml, cl = cl, N = N, M = M, k = k,
k_list = k_list, p = p, X = X, y = y, Xreg = Xreg, b.r = b.r,
VR.r = VR.r, dfs = dfs, small = small, data = data, labels =
labels, study_orig_id = study_orig_id)
class(res) <- "robu"
res
}
|
if (!require("shiny")) {
install.packages("shiny")
library(shiny)
}
if (!require("shinyWidgets")) {
install.packages("shinyWidgets")
library(shinyWidgets)
}
if (!require("shinythemes")) {
install.packages("shinythemes")
library(shinythemes)
}
if (!require("leaflet")) {
install.packages("leaflet")
library(leaflet)
}
if (!require("leaflet.extras")) {
install.packages("leaflet.extras")
library(leaflet.extras)
}
# Define UI for application that draws a histogram
shinyUI(
navbarPage(strong("Citi Bike Study",style="color: white;"),
theme=shinytheme("cerulean"), # select your themes https://rstudio.github.io/shinythemes/
#------------------------------- tab panel - Maps ---------------------------------
tabPanel("Maps",
icon = icon("map-marker-alt"), #choose the icon for
div(class = 'outer',
# side by side plots
fluidRow(
splitLayout(cellWidths = c("50%", "50%"),
leafletOutput("left_map",width="100%",height=1200),
leafletOutput("right_map",width="100%",height=1200))),
#control panel on the left
absolutePanel(id = "control", class = "panel panel-default", fixed = TRUE, draggable = TRUE,
top = 200, left = 50, right = "auto", bottom = "auto", width = 250, height = "auto",
tags$h4('Citi Bike Activity Comparison'),
tags$br(),
tags$h5('Pre-covid(Left) Right(Right)'),
prettyRadioButtons(
inputId = "adjust_score",
label = "Score List:",
choices = c("start_cnt",
"end_cnt",
"day_diff_absolute",
"day_diff_percentage"),
inline = TRUE,
status = "danger",
fill = TRUE
),
awesomeRadio("adjust_time",
label="Time",
choices =c("Overall",
"Weekday",
"Weekend"),
selected = "Overall",
status = "warning"),
# selectInput('adjust_weather',
# label = 'Adjust for Weather',
# choices = c('Yes','No'),
# selected = 'Yes'
# ),
style = "opacity: 0.80"
), #Panel Control - Closing
) #Maps - Div closing
) #tabPanel maps closing
) #navbarPage closing
) #Shiny UI closing
|
/15-Spring2023/Projects_StarterCodes/Project2-ShinyApp/app/ui.R
|
no_license
|
TZstatsADS/ADS_Teaching
|
R
| false
| false
| 3,751
|
r
|
if (!require("shiny")) {
install.packages("shiny")
library(shiny)
}
if (!require("shinyWidgets")) {
install.packages("shinyWidgets")
library(shinyWidgets)
}
if (!require("shinythemes")) {
install.packages("shinythemes")
library(shinythemes)
}
if (!require("leaflet")) {
install.packages("leaflet")
library(leaflet)
}
if (!require("leaflet.extras")) {
install.packages("leaflet.extras")
library(leaflet.extras)
}
# Define UI for application that draws a histogram
shinyUI(
navbarPage(strong("Citi Bike Study",style="color: white;"),
theme=shinytheme("cerulean"), # select your themes https://rstudio.github.io/shinythemes/
#------------------------------- tab panel - Maps ---------------------------------
tabPanel("Maps",
icon = icon("map-marker-alt"), #choose the icon for
div(class = 'outer',
# side by side plots
fluidRow(
splitLayout(cellWidths = c("50%", "50%"),
leafletOutput("left_map",width="100%",height=1200),
leafletOutput("right_map",width="100%",height=1200))),
#control panel on the left
absolutePanel(id = "control", class = "panel panel-default", fixed = TRUE, draggable = TRUE,
top = 200, left = 50, right = "auto", bottom = "auto", width = 250, height = "auto",
tags$h4('Citi Bike Activity Comparison'),
tags$br(),
tags$h5('Pre-covid(Left) Right(Right)'),
prettyRadioButtons(
inputId = "adjust_score",
label = "Score List:",
choices = c("start_cnt",
"end_cnt",
"day_diff_absolute",
"day_diff_percentage"),
inline = TRUE,
status = "danger",
fill = TRUE
),
awesomeRadio("adjust_time",
label="Time",
choices =c("Overall",
"Weekday",
"Weekend"),
selected = "Overall",
status = "warning"),
# selectInput('adjust_weather',
# label = 'Adjust for Weather',
# choices = c('Yes','No'),
# selected = 'Yes'
# ),
style = "opacity: 0.80"
), #Panel Control - Closing
) #Maps - Div closing
) #tabPanel maps closing
) #navbarPage closing
) #Shiny UI closing
|
# The following R script can be submitted on the ND CRC cluster using the job script (after customization)
# in the "jobs" folder
setwd('~/RW-2-BIO')
# load model run details
source('sites/HARVARD/inst/config.R')
# load needed function
source('R/process_rw_model.R')
require(reshape2)
.libPaths('~/Rlibs')
# prepare workspace
require(ggplot2)
require(abind)
require(dplyr)
# run step
process_rw_model(census_site = census_site,
mvers = mvers,
dvers = dvers,
site = site,
nest = nest,
finalyr = finalyr,
pool = 500)
|
/sites/HARVARD/inst/process_model_HARVARD.R
|
no_license
|
PalEON-Project/RW-2-BIO
|
R
| false
| false
| 626
|
r
|
# The following R script can be submitted on the ND CRC cluster using the job script (after customization)
# in the "jobs" folder
setwd('~/RW-2-BIO')
# load model run details
source('sites/HARVARD/inst/config.R')
# load needed function
source('R/process_rw_model.R')
require(reshape2)
.libPaths('~/Rlibs')
# prepare workspace
require(ggplot2)
require(abind)
require(dplyr)
# run step
process_rw_model(census_site = census_site,
mvers = mvers,
dvers = dvers,
site = site,
nest = nest,
finalyr = finalyr,
pool = 500)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.