blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
67e4ada786a0172ed0804494ddfa6187a36c5b36
|
ea10d0c311ee84dc4ecd44c49dafe873180023e4
|
/SWSurfaceStations.R
|
01c8fd9ab02044d48a28f3277b05401b863faae6
|
[] |
no_license
|
saadtarik/SurfaceStationSummary
|
f7e720182df57f08b59579110136005d1b3c3a95
|
47febbfe57dc4f1ea1f5ccb57c1f7190cc2afd0e
|
refs/heads/master
| 2021-01-20T06:53:44.722362
| 2013-07-10T23:37:51
| 2013-07-10T23:37:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 496
|
r
|
SWSurfaceStations.R
|
## DATA TO FIND
# country code
country.code <- "SW"
# Bounding box. Latitudes and longitudes outside this range will be set to NA.
# This assumes that the country code is sufficient to identify the location of a
# station correctly.
long.range <- c(6, 11)
lat.range <- c(45.5, 48)
# years we want data for
year.range <- c(as.Date("2010-01-01"),
as.Date("2012-12-31"))
# Set working directory for this analysis----
working.dir = '~/Documents/projects/Rcode/Test/SW/ClimateMaps'
|
d55a28dfb5109ece9c2b5839ba07beea9eae7893
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/h2o/examples/h2o.head.Rd.R
|
02f68638813f2884fa9a6e50572d71af0e3dbc16
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 435
|
r
|
h2o.head.Rd.R
|
library(h2o)
### Name: h2o.head
### Title: Return the Head or Tail of an H2O Dataset.
### Aliases: h2o.head head.H2OFrame h2o.tail tail.H2OFrame
### ** Examples
## No test:
library(h2o)
h2o.init(ip <- "localhost", port = 54321, startH2O = TRUE)
australia_path <- system.file("extdata", "australia.csv", package = "h2o")
australia <- h2o.uploadFile(path = australia_path)
head(australia, 10)
tail(australia, 10)
## End(No test)
|
7319ff205d6218ee4f19e81876a6f4d0d84c02bd
|
5da43dc717c58284b2c89a3aba668c1eb6ba35f2
|
/R/parseCubistModel.R
|
1d657b3036b70217a93cd66b87fed97cd30feac9
|
[] |
no_license
|
topepo/Cubist
|
513dddbd1ad8e314fe3a62984a76eaa41f6444b5
|
cb293c67673772d754cc846c8711a31b40b26175
|
refs/heads/master
| 2023-03-21T17:42:31.927207
| 2023-02-09T15:16:14
| 2023-02-09T15:16:14
| 23,597,893
| 37
| 16
| null | 2023-03-09T19:45:52
| 2014-09-02T22:28:15
|
C
|
UTF-8
|
R
| false
| false
| 11,434
|
r
|
parseCubistModel.R
|
## TODO:
## 3) R function to write R prediction function
countRules <- function(x)
{
x <- strsplit(x, "\n")[[1]]
comNum <- ruleNum <- condNum <- rep(NA, length(x))
comIdx <- rIdx <- 0
for(i in seq(along = x))
{
tt <- parser(x[i])
if(names(tt)[1] == "rules")
{
comIdx <- comIdx + 1
rIdx <- 0
}
comNum[i] <-comIdx
if(names(tt)[1] == "conds")
{
rIdx <- rIdx + 1
cIdx <- 0
}
ruleNum[i] <-rIdx
if(names(tt)[1] == "type")
{
cIdx <- cIdx + 1
condNum[i] <- cIdx
}
}
numCom <- sum(grepl("^rules=", x))
rulesPerCom <- unlist(lapply(split(ruleNum, as.factor(comNum)), max))
rulesPerCom <- rulesPerCom[rulesPerCom > 0]
rulesPerCom
}
getSplits <- function(x)
{
x <- strsplit(x, "\n")[[1]]
comNum <- ruleNum <- condNum <- rep(NA, length(x))
comIdx <- rIdx <- 0
for(i in seq(along = x))
{
tt <- parser(x[i])
## Start of a new rule
if(names(tt)[1] == "rules")
{
comIdx <- comIdx + 1
rIdx <- 0
}
comNum[i] <-comIdx
## Start of a new condition
if(names(tt)[1] == "conds")
{
rIdx <- rIdx + 1
cIdx <- 0
}
ruleNum[i] <-rIdx
## Within a rule, type designates the type of conditional statement
## type = 2 appears to be a simple split of a continuous predictor
if(names(tt)[1] == "type")
{
cIdx <- cIdx + 1
condNum[i] <- cIdx
}
}
numCom <- sum(grepl("^rules=", x))
rulesPerCom <- unlist(lapply(split(ruleNum, as.factor(comNum)), max))
rulesPerCom <- rulesPerCom[rulesPerCom > 0]
if (! is.null(rulesPerCom) && numCom > 0)
names(rulesPerCom) <- paste("Com", 1:numCom)
## In object x, what element starts a new rule
isNewRule <- ifelse(grepl("^conds=", x), TRUE, FALSE)
splitVar <- rep("", length(x))
splitVal <- rep(NA, length(x))
splitCats <- rep("", length(x))
splitDir <- rep("", length(x))
## This is a simple continuous split, such as
##
## nox > 0.668
##
## or
##
## type="2" att="nox" cut="0.66799998" result=">"
##
isType2 <- grepl("^type=\"2\"", x)
if(any(isType2))
{
splitVar[isType2] <- type2(x[isType2])$var
splitVar[isType2] <- gsub("\"", "", splitVar[isType2])
splitDir[isType2] <- type2(x[isType2])$rslt
splitVal[isType2] <- type2(x[isType2])$val
}
## This is a split of categorical data such as
##
## X4 in {c, d}
##
## or
##
## type="3" att="X4" elts="c","d"
##
isType3 <- grepl("^type=\"3\"", x)
if(any(isType3))
{
splitVar[isType3] <- type3(x[isType3])$var
splitCats[isType3] <- type3(x[isType3])$val
splitCats[isType3] <- gsub("[{}]", "", splitCats[isType3])
splitCats[isType3] <- gsub("\"", "", splitCats[isType3])
splitCats[isType3] <- gsub(" ", "", splitCats[isType3])
}
if(!any(isType2) & !any(isType3)) return(NULL)
splitData <- data.frame(committee = comNum,
rule = ruleNum,
variable = splitVar,
dir = splitDir,
value = as.numeric(splitVal),
category = splitCats)
splitData$type <- ""
if(any(isType2)) splitData$type[isType2] <- "type2"
if(any(isType3)) splitData$type[isType3] <- "type3"
splitData <- splitData[splitData$variable != "" ,]
splitData
}
## This function is no longer used
printCubistRules <- function(x, dig = max(3, getOption("digits") - 5))
{
comNum <- ruleNum <- condNum <- rep(NA, length(x))
comIdx <- rIdx <- 0
for(i in seq(along = x))
{
tt <- parser(x[i])
if(names(tt)[1] == "rules")
{
comIdx <- comIdx + 1
rIdx <- 0
}
comNum[i] <-comIdx
if(names(tt)[1] == "conds")
{
rIdx <- rIdx + 1
cIdx <- 0
}
ruleNum[i] <-rIdx
if(names(tt)[1] == "type")
{
cIdx <- cIdx + 1
condNum[i] <- cIdx
}
}
numCom <- sum(grepl("^rules=", x))
rulesPerCom <- unlist(lapply(split(ruleNum, as.factor(comNum)), max))
rulesPerCom <- rulesPerCom[rulesPerCom > 0]
names(rulesPerCom) <- paste("Com", 1:numCom)
cat("Number of committees:", numCom, "\n")
cat("Number of rules per committees:",
paste(rulesPerCom, collapse = ", "), "\n\n")
isNewRule <- ifelse(grepl("^conds=", x), TRUE, FALSE)
isEqn <- ifelse(grepl("^coeff=", x), TRUE, FALSE)
cond <- rep("", length(x))
isType2 <- grepl("^type=\"2\"", x)
if(any(isType2)) cond[isType2] <- type2(x[isType2], dig = dig)$text
isType3 <- grepl("^type=\"3\"", x)
if(any(isType3)) cond[isType3] <- type3(x[isType3])$text
isEqn <- grepl("^coeff=", x)
eqtn <- rep("", length(x))
eqtn[isEqn] <- eqn(x[isEqn], dig = dig)
tmp <- x[isNewRule]
tmp <- parser(tmp)
ruleN <- rep(NA, length(x))
ruleN[isNewRule] <- as.numeric(unlist(lapply(tmp, function(x) x["cover"])))
for(i in seq(along = x))
{
if(isNewRule[i])
{
cat("Rule ", comNum[i], "/", ruleNum[i], ": (n=",
ruleN[i], ")\n", sep = "")
cat(" If\n")
} else {
if(cond[i] != "")
{
cat(" |", cond[i], "\n")
if(cond[i+1] == "")
{
cat(" Then\n")
cat(" prediction =", eqtn[i+1], "\n\n")
}
}
}
}
}
type3 <- function(x)
{
aInd <- regexpr("att=", x)
eInd <- regexpr("elts=", x)
var <- substring(x, aInd + 4, eInd - 2)
val <- substring(x, eInd + 5)
multVals <- grepl(",", val)
val <- gsub(",", ", ", val)
val <- ifelse(multVals, paste("{", val, "}", sep = ""), val)
txt <- ifelse(multVals, paste(var, "in", val), paste(var, "=", val))
list(var = var, val = val, text = txt)
}
type2 <- function(x, dig = 3)
{
x <- gsub("\"", "", x)
aInd <- regexpr("att=", x)
cInd <- regexpr("cut=", x)
rInd <- regexpr("result=", x)
vInd <- regexpr("val=", x)
var <- val <- rslt <- rep("", length(x))
missingRule <- cInd < 1 & vInd > 0
if(any(missingRule))
{
var[missingRule] <- substring(x[missingRule], aInd[missingRule] + 4, vInd[missingRule] - 2)
val[missingRule] <- "NA"
rslt[missingRule] <- "="
}
if(any(!missingRule))
{
var[!missingRule] <- substring(x[!missingRule], aInd[!missingRule] + 4, cInd[!missingRule] - 2)
val[!missingRule] <- substring(x[!missingRule], cInd[!missingRule] + 4, rInd[!missingRule] - 1)
val[!missingRule] <- format(as.numeric(val[!missingRule]), digits = dig)
rslt[!missingRule] <- substring(x[!missingRule], rInd[!missingRule] + 7)
}
list(var = var, val = as.numeric(val), rslt = rslt,
text = paste(var, rslt, val))
}
eqn <- function(x, dig = 10, text = TRUE, varNames = NULL)
{
x <- gsub("\"", "", x)
out <- vector(mode = "list", length = length(x))
for(j in seq(along = x))
{
starts <- gregexpr("(coeff=)|(att=)", x[j])[[1]]
p <- (length(starts) - 1)/2
vars <- vector(mode = "numeric", length = p + 1)
tmp <- vector(mode = "character", length = length(starts))
for(i in seq(along = starts))
{
if(i < length(starts))
{
txt <- substring(x[j], starts[i], starts[i + 1] - 2)
} else txt <- substring(x[j], starts[i])
tmp[i] <- gsub("(coeff=)|(att=)", "", txt)
}
valSeq <- seq(1, length(tmp), by = 2)
vals <- as.double(tmp[valSeq])
nms <- tmp[-valSeq]
if(text)
{
signs <- sign(vals)
vals <- abs(vals)
for(i in seq(along = vals))
{
if(i == 1)
{
txt <- ifelse(signs[1] == -1,
format(-vals[1], digits = dig),
format(vals[1], digits = dig))
} else {
tmp2 <- ifelse(signs[i] == -1,
paste("-", format(vals[i], digits = dig)),
paste("+", format(vals[i], digits = dig)))
txt <- paste(txt, tmp2, nms[i-1])
}
}
out[j] <- txt
} else {
nms <- c("(Intercept)", nms)
names(vals) <- nms
if(!is.null(varNames))
{
vars2 <- varNames[!(varNames %in% nms)]
#cat("j", j, "\tcoefs:", length(vals), "\tother:", length(vars2))
vals2 <- rep(NA, length(vars2))
names(vals2) <- vars2
vals <- c(vals, vals2)
newNames <- c("(Intercept)", varNames)
vals <- vals[newNames]
}
#cat("\tfinal:", length(vals), "\n")
out[[j]] <- vals
}
}
out
}
parser <- function(x)
{
x <- strsplit(x, " ")
x <- lapply(x,
function(y)
{
y <- strsplit(y, "=")
nms <- unlist(lapply(y, function(z) z[1]))
val <- unlist(lapply(y, function(z) z[2]))
names(val) <- nms
val
})
if(length(x) == 1) x <- x[[1]]
x
}
#' @importFrom stats reshape
coef.cubist <- function(object, varNames = NULL, ...) {
x <- object$model
x <- strsplit(x, "\n")[[1]]
comNum <- ruleNum <- condNum <- rep(NA, length(x))
comIdx <- rIdx <- 0
for (i in seq(along = x)) {
tt <- parser(x[i])
if (names(tt)[1] == "rules") {
comIdx <- comIdx + 1
rIdx <- 0
}
comNum[i] <- comIdx
if (names(tt)[1] == "conds") {
rIdx <- rIdx + 1
cIdx <- 0
}
ruleNum[i] <- rIdx
if (names(tt)[1] == "type") {
cIdx <- cIdx + 1
condNum[i] <- cIdx
}
}
isEqn <- ifelse(grepl("^coeff=", x), TRUE, FALSE)
isEqn <- grepl("^coeff=", x)
coefs <-
eqn(x[isEqn],
dig = 0,
text = FALSE,
varNames = varNames)
p <- length(coefs)
dims <- unlist(lapply(coefs, length))
coefs <- do.call("c", coefs)
coms <- rep(comNum[isEqn], dims)
rls <- rep(ruleNum[isEqn], dims)
out <-
data.frame(
tmp = paste(coms, rls, sep = "."),
value = coefs,
var = names(coefs)
)
out <-
reshape(
out,
direction = "wide",
v.names = "value",
timevar = "var",
idvar = "tmp"
)
colnames(out) <- gsub("value.", "", colnames(out), fixed = TRUE)
tmp <- strsplit(as.character(out$tmp), ".", fixed = TRUE)
out$committee <- unlist(lapply(tmp, function(x) x[1]))
out$rule <- unlist(lapply(tmp, function(x) x[2]))
out$tmp <- NULL
out
}
|
708f1d5e2c3d1706de319d15153368153475a58e
|
498da3ea40beb28640eba6418869b020b9d9ea52
|
/tests/testthat/test_pathways.R
|
2b8ab8ae04b7a6f3a9beda7d864f4737edad4e28
|
[] |
no_license
|
GuangchuangYu/fgsea
|
14bd5cac15a4f96a44b591917a9cab7fdb536958
|
ea22e290dcf7ee55ebd086ead321ad0b6d0f49f9
|
refs/heads/master
| 2021-01-13T08:19:09.140795
| 2016-10-24T11:41:21
| 2016-10-24T11:41:21
| 71,782,575
| 2
| 0
| null | 2016-10-24T11:35:20
| 2016-10-24T11:35:19
| null |
UTF-8
|
R
| false
| false
| 486
|
r
|
test_pathways.R
|
context("Pathways")
test_that("reactomePathways works", {
if (!requireNamespace("reactome.db")) {
skip("No reactome.db")
}
data(exampleRanks)
pathways <- reactomePathways(names(exampleRanks))
expect_true("11461" %in% pathways$`Chromatin organization`)
})
test_that("gmtPathways works", {
pathways <- gmtPathways(system.file("extdata", "mouse.reactome.gmt", package="fgsea"))
expect_true("11461" %in% pathways$`5992314_Chromatin_organization`)
})
|
f4ca15728052206b6d3f80fe5e3877fd2058c8f4
|
efa46ff6a91d57fcb8e1c14a36b46f8ac0ec76dc
|
/getData.R
|
d6579335b378a2fb569c2c7d093bc93ad76392d4
|
[] |
no_license
|
chtiprog/ExData_Plotting1
|
a8cf407c83c8353b33ca3d679f4712d53c298648
|
2896d0ed2c51e489de3e9422cc6d067936915254
|
refs/heads/master
| 2021-01-18T03:59:50.217896
| 2015-03-08T07:07:19
| 2015-03-08T07:07:19
| 30,431,200
| 0
| 0
| null | 2015-02-06T20:18:29
| 2015-02-06T20:18:29
| null |
UTF-8
|
R
| false
| false
| 875
|
r
|
getData.R
|
fileUrl <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
# Download and unzip the data file if it does not exist already
if (!file.exists("household_power_consumption.zip")) {
download.file(fileUrl, destfile="household_power_consumption.zip", method="curl")
unzip("household_power_consumption.zip")
}
# Read the data
columnClasses <- c("character","character","numeric","numeric", "numeric",
"numeric","numeric","numeric","numeric")
data <- read.table("household_power_consumption.txt", header=TRUE, sep=';', na.strings="?",
colClasses=columnClasses)
# Subset the data with the only 2 dates concerned by the assignment
data <- data[data$Date %in% c("1/2/2007", "2/2/2007"),]
# Create a new column with datetime
data$DataTime <- strptime(paste(data$Date, data$Time), "%d/%m/%Y %H:%M:%S")
|
cdae5722b075ad6c8e4669b584f058890fa2f1b7
|
285d35f03c59b3eca8a8639e30009cc020ee7fbf
|
/03-txome/20200912_tpm_analysis.R
|
593d990dc7b89cdd90952585733c06722b5043d3
|
[] |
no_license
|
octopode/cteno-lipids-2021
|
5a039c0506111cc3bbd6e689bd7bf02ac585dc1c
|
b2c66cf59149e5c79918bdcc317303a23e59e338
|
refs/heads/master
| 2023-03-26T07:59:36.219753
| 2021-03-12T18:19:25
| 2021-03-12T18:19:25
| 289,368,885
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,594
|
r
|
20200912_tpm_analysis.R
|
library(tidyverse)
library(ggpubr)
env_data <- read_tsv("/Users/jwinnikoff/Documents/MBARI/Lipids/cteno-lipids-2020/metadata/20200912_Cteno_depth_temp_EST.tsv")
dir_tpms <- "/Users/jwinnikoff/Documents/MBARI/Lipids/cteno-lipids-2020/kallisto"
key_annot = c(
"OG0001664.tsv" = "ELOV6",
"OG0004874.tsv" = "ELOV2",
"OG0006673.tsv" = "SCD1",
"OG0009524.tsv" = "ELOV3/6"
)
depth_3state = c(
"CTE_Aulacoctena_acuminata" = 2,
"CTE_Bathyctena_chuni" = 3,
"CTE_Bathocyroe_fosteri" = 2,
"CTE_Benthic_cteno" = 3,
"CTE_Beroe_abyssicola" = 2,
"CTE_Beroe_cucumis" = 2,
"CTE_Beroe_forskalii" = 1,
"CTE_Beroe_ovata" = 1,
"CTE_Bolinopsis_infundibulum" = 2,
"CTE_Bolinopsis_vitrea" = 1,
"CTE_Cestum_veneris" = 1,
"CTE_Charistephane_fugiens" = 2,
"CTE_Ctenoceros_spclear" = 2,
"CTE_Cydippid_spblack" = 3,
"CTE_Cydippid_sppeach" = 3,
"CTE_Cydippid_spredx" = 2,
"CTE_Deiopea_kaloktenota" = 1,
"CTE_Dryodora_glandiformis" = 1,
"CTE_Euplokamis_dunlapae" = 1,
"CTE_Haeckelia_beehleri" = 1,
"CTE_Haeckelia_rubra" = 1,
"CTE_Hormiphora_californensis" = 1,
"CTE_Kiyohimea_sp" = 2,
"CTE_Lampocteis_cruentiventer" = 2,
"CTE_Lampea_deep" = 3,
"CTE_Lampea_lactea" = 1,
"CTE_Leucothea_pulchra" = 1,
"CTE_Llyria_spbenthic" = 3,
"CTE_Llyria_spcopper" = 3,
"CTE_Llyria_spdeep" = 3,
"CTE_Mertensia_ovum" = 1,
"CTE_Nepheloctena_red " = 2,
"CTE_Nepheloctena_whit" = 2,
"CTE_Ocyropsis_crystallina" = 1,
"CTE_Ocyropsis_maculata" = 1,
"CTE_Tetraphalia_sp" = 1,
"CTE_Thalassocalyce_inconstans" = 2,
"CTE_Velamen_parallelum" = 1,
"CTE_Vermillion_lobate" = 2,
"CTE_Weird_cteno" = 2
)
# read TPMs
readdata <- list.files(path = dir_tpms, full.names = T) %>%
lapply(
function(file_data){
data_this_file <- file_data %>%
read_tsv(col_names=FALSE) %>%
mutate(file = file_data %>% basename())
return(data_this_file)
}
) %>%
do.call(rbind, .) %>%
magrittr::set_colnames(c("target_id", "length", "eff_length", "est_counts", "tpm", "file_data"))
reads_top <- readdata %>%
rowwise() %>%
mutate(species = strsplit(target_id, "\\|") %>% unlist() %>% .[1] %>% gsub("[0-9]", "", x=.)) %>%
group_by(species, file_data) %>%
filter(tpm == max(tpm)) %>%
left_join(env_data, by="species")
# max by species
reads_top %>%
filter(temp_med <= 7.5) %>%
ungroup() %>%
ggplot(aes(x=species, y=tpm)) +
facet_wrap(~file_data, ncol=4) +
geom_col() +
#geom_text(aes(label=species)) +
theme_pubr() +
theme(axis.text.x = element_text(angle = 90, hjust = 1, vjust = 0.5))
reads_sum <- readdata %>%
rowwise() %>%
mutate(
sample = strsplit(target_id, "\\|") %>% unlist() %>% .[1],
species = sample %>% gsub("[0-9]", "", x=.)
) %>%
ungroup() %>%
complete(sample, file_data, fill=list("tpm"=0)) %>%
group_by(sample, species, file_data) %>%
summarize(
tpm_tot = sum(tpm),
tpm_avg = mean(tpm)
) %>%
rowwise() %>%
mutate(depthclass = depth_3state[species])
#left_join(env_data, by="species") %>%
#left_join(read_tsv("/Users/jwinnikoff/Documents/MBARI/lab-work/converge/datasets/ctenoPK/trait/Cteno_depth_temp_desc_20180529.txt"), by="sp")
# plot total by species
reads_sum %>%
filter(file_data == "OG0006673.tsv") %>%
#filter(temp_med <= 7.5) %>%
ungroup() %>%
ggplot(aes(x=sample, y=tpm_tot)) +
#facet_wrap(~file_data, ncol=4) +
geom_col() +
#geom_text(aes(label=species)) +
theme_pubr() +
theme(axis.text.x = element_text(angle = 90, hjust = 1, vjust = 0.5)) +
labs(x="sample", y="transcripts per million, sum of all isoforms")
barplots <- reads_sum %>%
filter(species != "CTE_Euplokamis_dunlapae") %>%
filter(species != "CTE_Vermillion_lobate") %>%
filter(file_data != "OG0009524.tsv") %>%
mutate(annot = key_annot[file_data]) %>%
group_by(annot) %>%
group_split() %>%
lapply(
.,
function(data){
gg <- data %>%
ggplot(aes(x=sample, y=tpm_tot, fill=desc(depthclass))) +
geom_col() +
theme_pubr() +
theme(
axis.text.x = element_text(angle = 90, hjust = 1, vjust = 0.5),
legend.position = "none"
) +
labs(
title = data %>% pull(annot) %>% first(),
x = "sample",
y = "transcripts per million, sum of all isoforms"
)
return(gg)
}
)
barplots_pretty <- barplots %>%
.[1:2] %>%
lapply(
.,
function(gg){gg + theme(axis.text.x = element_blank()) + xlab(NULL)}
) %>%
c(., barplots %>% .[3])
barplots_pretty %>%
grid.arrange(grobs=., ncol=1, heights=c(1,1,2))
|
a6a5d6f3b0d63ff9414b0ce64635fc3d38c45036
|
cb6f2a406e75c379a647e0913ac407a2e067c693
|
/man/compute.config.matrices.Rd
|
143052d6723c0745e04692fc9922b22affbb5e8a
|
[] |
no_license
|
NKI-CCB/iTOP
|
9f797340aa9bf90a1bb7b1bb273c7b7f2b59a37a
|
e93ad3a8bbd7754153c57c44afc85970c9b682c2
|
refs/heads/master
| 2021-04-09T16:02:13.978456
| 2018-06-13T08:14:16
| 2018-06-13T08:14:16
| 125,842,771
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,709
|
rd
|
compute.config.matrices.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/functions.R
\name{compute.config.matrices}
\alias{compute.config.matrices}
\title{Compute configuration matrices}
\usage{
compute.config.matrices(data, similarity_fun = inner.product, center = TRUE,
mod.rv = TRUE)
}
\arguments{
\item{data}{List of datasets.}
\item{similarity_fun}{Either a function pointer to the similarity function to be used for all datasets; or a list of function pointers,
if different similarity functions need to be used for different datasets (default=inner.product).}
\item{center}{Either a boolean indicating whether centering should be used for all datasets; or a list of booleans,
if centering should be used for some datasets but not all of them (default=TRUE).}
\item{mod.rv}{Either a boolean indicating whether the modified RV coefficient should be used for all datasets; or a list of booleans,
if the modified RV should be used for some datasets but not all of them (default=TRUE).}
}
\value{
A list of n configuration matrices, where n is the number of datasets.
}
\description{
Given a list of n data matrices (corresponding to n datasets), this function computes the configuration matrix for each of these
configuration matrices. By default inner product similarity is used, but other similarity (such as Jaccard similarity for binary data)
can also be used (see the vignette 'A quick introduction to iTOP' for more information). In addition, the configuration matrices can be centered and prepared for use with
the modified RV coefficient, both of which we will briefly explain here.
}
\details{
The RV coefficient often results in values very close to one when both datasets are not centered around zero, even for orthogonal data.
For inner product similarity and Jaccard similarity, we recommend using centering. However, for some other similarity measures, centering
may not be beneficial (for example, because the measure itself is already centered, such as in the case of Pearson correlation). For more information on
centering of binary (and other non-continuous) data, for which we used kernel centering of the configuration matrix, we refer to our manuscript: Aben et al., 2018, doi.org/10.1101/293993.
The modified RV coefficient was proposed for high-dimensional data, as the regular RV coefficient would result in values close to one even for
orthogonal data. We recommend always using the modified RV coefficient.
}
\examples{
set.seed(2)
n = 100
p = 100
x1 = matrix(rnorm(n*p), n, p)
x2 = x1 + matrix(rnorm(n*p), n, p)
x3 = x2 + matrix(rnorm(n*p), n, p)
data = list(x1=x1, x2=x2, x3=x3)
config_matrices = compute.config.matrices(data)
cors = rv.cor.matrix(config_matrices)
}
|
953823b177812455e308e704a41663d0701cba51
|
0500ba15e741ce1c84bfd397f0f3b43af8cb5ffb
|
/cran/paws/man/clouddirectory.Rd
|
88fdfd944871cf9b0688d95fd1c2d63cbaa8a150
|
[
"Apache-2.0"
] |
permissive
|
paws-r/paws
|
196d42a2b9aca0e551a51ea5e6f34daca739591b
|
a689da2aee079391e100060524f6b973130f4e40
|
refs/heads/main
| 2023-08-18T00:33:48.538539
| 2023-08-09T09:31:24
| 2023-08-09T09:31:24
| 154,419,943
| 293
| 45
|
NOASSERTION
| 2023-09-14T15:31:32
| 2018-10-24T01:28:47
|
R
|
UTF-8
|
R
| false
| true
| 13,693
|
rd
|
clouddirectory.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/paws.R
\name{clouddirectory}
\alias{clouddirectory}
\title{Amazon CloudDirectory}
\usage{
clouddirectory(
config = list(),
credentials = list(),
endpoint = NULL,
region = NULL
)
}
\arguments{
\item{config}{Optional configuration of credentials, endpoint, and/or region.
\itemize{
\item{\strong{credentials}:} {\itemize{
\item{\strong{creds}:} {\itemize{
\item{\strong{access_key_id}:} {AWS access key ID}
\item{\strong{secret_access_key}:} {AWS secret access key}
\item{\strong{session_token}:} {AWS temporary session token}
}}
\item{\strong{profile}:} {The name of a profile to use. If not given, then the default profile is used.}
\item{\strong{anonymous}:} {Set anonymous credentials.}
\item{\strong{endpoint}:} {The complete URL to use for the constructed client.}
\item{\strong{region}:} {The AWS Region used in instantiating the client.}
}}
\item{\strong{close_connection}:} {Immediately close all HTTP connections.}
\item{\strong{timeout}:} {The time in seconds till a timeout exception is thrown when attempting to make a connection. The default is 60 seconds.}
\item{\strong{s3_force_path_style}:} {Set this to \code{true} to force the request to use path-style addressing, i.e. \verb{http://s3.amazonaws.com/BUCKET/KEY}.}
\item{\strong{sts_regional_endpoint}:} {Set sts regional endpoint resolver to regional or legacy \url{https://docs.aws.amazon.com/sdkref/latest/guide/feature-sts-regionalized-endpoints.html}}
}}
\item{credentials}{Optional credentials shorthand for the config parameter
\itemize{
\item{\strong{creds}:} {\itemize{
\item{\strong{access_key_id}:} {AWS access key ID}
\item{\strong{secret_access_key}:} {AWS secret access key}
\item{\strong{session_token}:} {AWS temporary session token}
}}
\item{\strong{profile}:} {The name of a profile to use. If not given, then the default profile is used.}
\item{\strong{anonymous}:} {Set anonymous credentials.}
}}
\item{endpoint}{Optional shorthand for complete URL to use for the constructed client.}
\item{region}{Optional shorthand for AWS Region used in instantiating the client.}
}
\value{
A client for the service. You can call the service's operations using
syntax like \code{svc$operation(...)}, where \code{svc} is the name you've assigned
to the client. The available operations are listed in the
Operations section.
}
\description{
Amazon Cloud Directory
Amazon Cloud Directory is a component of the AWS Directory Service that
simplifies the development and management of cloud-scale web, mobile,
and IoT applications. This guide describes the Cloud Directory
operations that you can call programmatically and includes detailed
information on data types and errors. For information about Cloud
Directory features, see \href{https://aws.amazon.com/directoryservice/}{AWS Directory Service} and the \href{https://docs.aws.amazon.com/clouddirectory/latest/developerguide/what_is_cloud_directory.html}{Amazon Cloud Directory Developer Guide}.
}
\section{Service syntax}{
\if{html}{\out{<div class="sourceCode">}}\preformatted{svc <- clouddirectory(
config = list(
credentials = list(
creds = list(
access_key_id = "string",
secret_access_key = "string",
session_token = "string"
),
profile = "string",
anonymous = "logical"
),
endpoint = "string",
region = "string",
close_connection = "logical",
timeout = "numeric",
s3_force_path_style = "logical",
sts_regional_endpoint = "string"
),
credentials = list(
creds = list(
access_key_id = "string",
secret_access_key = "string",
session_token = "string"
),
profile = "string",
anonymous = "logical"
),
endpoint = "string",
region = "string"
)
}\if{html}{\out{</div>}}
}
\section{Operations}{
\tabular{ll}{
\link[paws.security.identity:clouddirectory_add_facet_to_object]{add_facet_to_object} \tab Adds a new Facet to an object\cr
\link[paws.security.identity:clouddirectory_apply_schema]{apply_schema} \tab Copies the input published schema, at the specified version, into the Directory with the same name and version as that of the published schema\cr
\link[paws.security.identity:clouddirectory_attach_object]{attach_object} \tab Attaches an existing object to another object\cr
\link[paws.security.identity:clouddirectory_attach_policy]{attach_policy} \tab Attaches a policy object to a regular object\cr
\link[paws.security.identity:clouddirectory_attach_to_index]{attach_to_index} \tab Attaches the specified object to the specified index\cr
\link[paws.security.identity:clouddirectory_attach_typed_link]{attach_typed_link} \tab Attaches a typed link to a specified source and target object\cr
\link[paws.security.identity:clouddirectory_batch_read]{batch_read} \tab Performs all the read operations in a batch\cr
\link[paws.security.identity:clouddirectory_batch_write]{batch_write} \tab Performs all the write operations in a batch\cr
\link[paws.security.identity:clouddirectory_create_directory]{create_directory} \tab Creates a Directory by copying the published schema into the directory\cr
\link[paws.security.identity:clouddirectory_create_facet]{create_facet} \tab Creates a new Facet in a schema\cr
\link[paws.security.identity:clouddirectory_create_index]{create_index} \tab Creates an index object\cr
\link[paws.security.identity:clouddirectory_create_object]{create_object} \tab Creates an object in a Directory\cr
\link[paws.security.identity:clouddirectory_create_schema]{create_schema} \tab Creates a new schema in a development state\cr
\link[paws.security.identity:clouddirectory_create_typed_link_facet]{create_typed_link_facet} \tab Creates a TypedLinkFacet\cr
\link[paws.security.identity:clouddirectory_delete_directory]{delete_directory} \tab Deletes a directory\cr
\link[paws.security.identity:clouddirectory_delete_facet]{delete_facet} \tab Deletes a given Facet\cr
\link[paws.security.identity:clouddirectory_delete_object]{delete_object} \tab Deletes an object and its associated attributes\cr
\link[paws.security.identity:clouddirectory_delete_schema]{delete_schema} \tab Deletes a given schema\cr
\link[paws.security.identity:clouddirectory_delete_typed_link_facet]{delete_typed_link_facet} \tab Deletes a TypedLinkFacet\cr
\link[paws.security.identity:clouddirectory_detach_from_index]{detach_from_index} \tab Detaches the specified object from the specified index\cr
\link[paws.security.identity:clouddirectory_detach_object]{detach_object} \tab Detaches a given object from the parent object\cr
\link[paws.security.identity:clouddirectory_detach_policy]{detach_policy} \tab Detaches a policy from an object\cr
\link[paws.security.identity:clouddirectory_detach_typed_link]{detach_typed_link} \tab Detaches a typed link from a specified source and target object\cr
\link[paws.security.identity:clouddirectory_disable_directory]{disable_directory} \tab Disables the specified directory\cr
\link[paws.security.identity:clouddirectory_enable_directory]{enable_directory} \tab Enables the specified directory\cr
\link[paws.security.identity:clouddirectory_get_applied_schema_version]{get_applied_schema_version} \tab Returns current applied schema version ARN, including the minor version in use\cr
\link[paws.security.identity:clouddirectory_get_directory]{get_directory} \tab Retrieves metadata about a directory\cr
\link[paws.security.identity:clouddirectory_get_facet]{get_facet} \tab Gets details of the Facet, such as facet name, attributes, Rules, or ObjectType\cr
\link[paws.security.identity:clouddirectory_get_link_attributes]{get_link_attributes} \tab Retrieves attributes that are associated with a typed link\cr
\link[paws.security.identity:clouddirectory_get_object_attributes]{get_object_attributes} \tab Retrieves attributes within a facet that are associated with an object\cr
\link[paws.security.identity:clouddirectory_get_object_information]{get_object_information} \tab Retrieves metadata about an object\cr
\link[paws.security.identity:clouddirectory_get_schema_as_json]{get_schema_as_json} \tab Retrieves a JSON representation of the schema\cr
\link[paws.security.identity:clouddirectory_get_typed_link_facet_information]{get_typed_link_facet_information} \tab Returns the identity attribute order for a specific TypedLinkFacet\cr
\link[paws.security.identity:clouddirectory_list_applied_schema_arns]{list_applied_schema_arns} \tab Lists schema major versions applied to a directory\cr
\link[paws.security.identity:clouddirectory_list_attached_indices]{list_attached_indices} \tab Lists indices attached to the specified object\cr
\link[paws.security.identity:clouddirectory_list_development_schema_arns]{list_development_schema_arns} \tab Retrieves each Amazon Resource Name (ARN) of schemas in the development state\cr
\link[paws.security.identity:clouddirectory_list_directories]{list_directories} \tab Lists directories created within an account\cr
\link[paws.security.identity:clouddirectory_list_facet_attributes]{list_facet_attributes} \tab Retrieves attributes attached to the facet\cr
\link[paws.security.identity:clouddirectory_list_facet_names]{list_facet_names} \tab Retrieves the names of facets that exist in a schema\cr
\link[paws.security.identity:clouddirectory_list_incoming_typed_links]{list_incoming_typed_links} \tab Returns a paginated list of all the incoming TypedLinkSpecifier information for an object\cr
\link[paws.security.identity:clouddirectory_list_index]{list_index} \tab Lists objects attached to the specified index\cr
\link[paws.security.identity:clouddirectory_list_managed_schema_arns]{list_managed_schema_arns} \tab Lists the major version families of each managed schema\cr
\link[paws.security.identity:clouddirectory_list_object_attributes]{list_object_attributes} \tab Lists all attributes that are associated with an object\cr
\link[paws.security.identity:clouddirectory_list_object_children]{list_object_children} \tab Returns a paginated list of child objects that are associated with a given object\cr
\link[paws.security.identity:clouddirectory_list_object_parent_paths]{list_object_parent_paths} \tab Retrieves all available parent paths for any object type such as node, leaf node, policy node, and index node objects\cr
\link[paws.security.identity:clouddirectory_list_object_parents]{list_object_parents} \tab Lists parent objects that are associated with a given object in pagination fashion\cr
\link[paws.security.identity:clouddirectory_list_object_policies]{list_object_policies} \tab Returns policies attached to an object in pagination fashion\cr
\link[paws.security.identity:clouddirectory_list_outgoing_typed_links]{list_outgoing_typed_links} \tab Returns a paginated list of all the outgoing TypedLinkSpecifier information for an object\cr
\link[paws.security.identity:clouddirectory_list_policy_attachments]{list_policy_attachments} \tab Returns all of the ObjectIdentifiers to which a given policy is attached\cr
\link[paws.security.identity:clouddirectory_list_published_schema_arns]{list_published_schema_arns} \tab Lists the major version families of each published schema\cr
\link[paws.security.identity:clouddirectory_list_tags_for_resource]{list_tags_for_resource} \tab Returns tags for a resource\cr
\link[paws.security.identity:clouddirectory_list_typed_link_facet_attributes]{list_typed_link_facet_attributes} \tab Returns a paginated list of all attribute definitions for a particular TypedLinkFacet\cr
\link[paws.security.identity:clouddirectory_list_typed_link_facet_names]{list_typed_link_facet_names} \tab Returns a paginated list of TypedLink facet names for a particular schema\cr
\link[paws.security.identity:clouddirectory_lookup_policy]{lookup_policy} \tab Lists all policies from the root of the Directory to the object specified\cr
\link[paws.security.identity:clouddirectory_publish_schema]{publish_schema} \tab Publishes a development schema with a major version and a recommended minor version\cr
\link[paws.security.identity:clouddirectory_put_schema_from_json]{put_schema_from_json} \tab Allows a schema to be updated using JSON upload\cr
\link[paws.security.identity:clouddirectory_remove_facet_from_object]{remove_facet_from_object} \tab Removes the specified facet from the specified object\cr
\link[paws.security.identity:clouddirectory_tag_resource]{tag_resource} \tab An API operation for adding tags to a resource\cr
\link[paws.security.identity:clouddirectory_untag_resource]{untag_resource} \tab An API operation for removing tags from a resource\cr
\link[paws.security.identity:clouddirectory_update_facet]{update_facet} \tab Does the following:\cr
\link[paws.security.identity:clouddirectory_update_link_attributes]{update_link_attributes} \tab Updates a given typed link’s attributes\cr
\link[paws.security.identity:clouddirectory_update_object_attributes]{update_object_attributes} \tab Updates a given object's attributes\cr
\link[paws.security.identity:clouddirectory_update_schema]{update_schema} \tab Updates the schema name with a new name\cr
\link[paws.security.identity:clouddirectory_update_typed_link_facet]{update_typed_link_facet} \tab Updates a TypedLinkFacet\cr
\link[paws.security.identity:clouddirectory_upgrade_applied_schema]{upgrade_applied_schema} \tab Upgrades a single directory in-place using the PublishedSchemaArn with schema updates found in MinorVersion\cr
\link[paws.security.identity:clouddirectory_upgrade_published_schema]{upgrade_published_schema} \tab Upgrades a published schema under a new minor version revision using the current contents of DevelopmentSchemaArn
}
}
\examples{
\dontrun{
svc <- clouddirectory()
svc$add_facet_to_object(
Foo = 123
)
}
}
|
a0767909041acdca1dabfd2b30f84f37283bec7a
|
1b5c6f504c76c0cb0559ea54b1835d1dbe98a27e
|
/plot1.R
|
fc7856e25e8930b4c6e24b0b66a61a62d760b393
|
[] |
no_license
|
Fpschwartz1/ExploratoryDataAnalysis_Project1
|
f4df3ce37f3e28313ea299bbdd70891493fb9026
|
49736f6e32079b38c4adaddd7d8e457739d37420
|
refs/heads/master
| 2016-09-06T16:56:31.513950
| 2014-07-10T23:08:23
| 2014-07-10T23:08:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 799
|
r
|
plot1.R
|
# Reading the file "household_power_consumption.txt".
# There was no problem with RAM memory space in my computer
t<-read.table("household_power_consumption.txt", sep=";", na.strings = "?", header=TRUE,
colClasses=c("character","character","numeric","numeric","numeric","numeric","numeric","numeric","numeric"))
# Subsetting to dates "1/2/2007" and "2/2/2007"
t<-t[(t$Date=="1/2/2007" | t$Date=="2/2/2007") & !is.na(t$Global_active_power),1:9]
# setting the background
par(bg="transparent")
# ploting the histrogram of the Global_active_power variable
hist(t$Global_active_power,
col = "orangered", main="Global Active Power",
xlab="Global Active Power (kilowatts)")
# copying my plot to a PNG file
dev.copy(png, file = "plot1.png")
# closing the PNG device
dev.off()
|
8e20679c4b9df0f694f85fa2c0dc9ddeb0af154c
|
ae4f4f0ed037b6fa643cf51c74627f63a5c207a9
|
/Demographics_T1Bifactors.R
|
7e31c83a8a93584bf007ace0a9619024475fe235
|
[] |
no_license
|
PennLINC/KaczkurkinPark_BifactorStructure
|
bd49a0c70497e066e852ffa018cabd6806c0fa00
|
2bb62f8c398f93e22bf7c8f384649a01de4eb211
|
refs/heads/master
| 2021-10-23T09:24:48.906784
| 2019-03-16T20:44:21
| 2019-03-16T20:44:21
| 157,735,704
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,690
|
r
|
Demographics_T1Bifactors.R
|
###############################
#### Table 1: Demographics ####
###############################
###############################
### Load data and libraries ###
###############################
subjData <- readRDS("/data/jux/BBL/projects/pncT1AcrossDisorder/subjectData/n1394_T1_subjData.rds")
#Load libraries
library(plyr)
library(varhandle)
#################################
### Total sample demographics ###
#################################
#Total sample means
meanAge_total <- mean(subjData$age)
#Total sample sd
sdAge_total <- sd(subjData$age)
#Total age range
rangeAge_total <- range(subjData$age)
#Total number of males and females (0=male, 1=female)
sexTable_total <- table(subjData$sex)
#Percentage of females
subjData$sex <- unfactor(subjData$sex)
percentFemale <- mean(subjData$sex)
#Total number of whites (0=non-white, 1=white)
whiteTable_total <- table(subjData$white)
#Percentage of White
subjData$white <- unfactor(subjData$white)
percentWhite <- mean(subjData$white)
#Maternal Education Summary table
medu1_total<-table(subjData$medu1)
#Maternal education: 12 years or less
medu1_12orLess <-length(which(subjData$medu1<=12))
#Maternal education: greater than 12 years
medu1_13andUp <- length(which(subjData$medu1>12))
#Maternal education: missing
medu1_missing <- length(which(is.na(subjData$medu)))
#Percentages for maternal education
percent12orLess <- medu1_12orLess/1394
percent13andUp <- medu1_13andUp/1394
percentMissing <- medu1_missing/1394
#########################
#### Psychopathology ####
#########################
#Typically Developing (N and percent)
Td_total <- sum(subjData$Td,na.rm=TRUE)
Td_percent <- Td_total/1394
#ADHD Diagnosis
Add_total <-sum(subjData$Add,na.rm=TRUE)
Add_percent <- Add_total/1394
#Agoraphobia Diagnosis
Agr_total <-sum(subjData$Agr,na.rm=TRUE)
Agr_percent <- Agr_total/1394
#Anorexia Diagnosis
Ano_total <-sum(subjData$Ano,na.rm=TRUE)
Ano_percent <- Ano_total/1394
#Bulimia Diagnosis
Bul_total <-sum(subjData$Bul,na.rm=TRUE)
Bul_percent <- Bul_total/1394
#Conduct Disorder Diagnosis
Con_total <-sum(subjData$Con,na.rm=TRUE)
Con_percent <- Con_total/1394
#Generalized Anxiety Disorder Diagnosis
Gad_total <-sum(subjData$Gad,na.rm=TRUE)
Gad_percent <- Gad_total/1394
#Major Depression Diagnosis
Mdd_total <-sum(subjData$Mdd,na.rm=TRUE)
Mdd_percent <- Mdd_total/1394
#Mania Diagnosis
Man_total <-sum(subjData$Man,na.rm=TRUE)
Man_percent <- Man_total/1394
#OCD Diagnosis
Ocd_total <-sum(subjData$Ocd,na.rm=TRUE)
Ocd_percent <- Ocd_total/1394
#ODD Diagnosis
Odd_total <-sum(subjData$Odd,na.rm=TRUE)
Odd_percent <- Odd_total/1394
#Panic Diagnosis
Pan_total <-sum(subjData$Pan,na.rm=TRUE)
Pan_percent <- Pan_total/1394
#Psychosis spectrum Diagnosis
Ps_total <-sum(subjData$Ps,na.rm=TRUE)
Ps_percent <- Ps_total/1394
#PTSD Diagnosis
Ptd_total <-sum(subjData$Ptd,na.rm=TRUE)
Ptd_percent <- Ptd_total/1394
#Seperation Anxiety Diagnosis
Sep_total <-sum(subjData$Sep,na.rm=TRUE)
Sep_percent <- Sep_total/1394
#Social Phobia Diagnosis
Soc_total <-sum(subjData$Soc,na.rm=TRUE)
Soc_percent <- Soc_total/1394
#Specific Phobia Diagnosis
Sph_total <-sum(subjData$Sph,na.rm=TRUE)
Sph_percent <- Sph_total/1394
#########################################
#### Percentages on psychiatric meds ####
#########################################
antiPsy <- mean(subjData$medclass_Antipsychotic)
antiCon <- mean(subjData$medclass_Anticonvulsant)
antiDep <- mean(subjData$medclass_Antidepressant)
benzo <- mean(subjData$medclass_Benzodiazepine)
stim <- mean(subjData$medclass_Stimulant)
nonStimADHD <- mean(subjData$medclass_NonstimulantADHDmed)
Lithium <- mean(subjData$medclass_Lithium)
Other <- mean(subjData$medclass_Other)
|
b1b7ea49074ca6db01e41403fdbb906bc530d922
|
ff61b2eececdbd441514e6e693e1a7295301ff66
|
/plot1.R
|
15f66d132d057192a9392757a8ec0c1c1b6c788a
|
[] |
no_license
|
boukevanderpol/ExData_Plotting1
|
a1f477d5210204a392c667c16d3fc57343b49ac7
|
eeeb46e185a249c12cfc1f45bff0ef2660c8b110
|
refs/heads/master
| 2021-01-22T11:48:09.125919
| 2016-01-05T13:44:22
| 2016-01-05T13:44:22
| 48,806,642
| 0
| 0
| null | 2015-12-30T15:22:54
| 2015-12-30T15:22:54
| null |
UTF-8
|
R
| false
| false
| 1,222
|
r
|
plot1.R
|
# Setting the working directory where data is located.
setwd("~/R/EDA/project1")
# loading packages
library(data.table)
library(dplyr)
library(tidyr)
library(readr)
#library(lattice)
#library(ggplot2)
library(lubridate)
# Loading the data into R
x <- read_csv(file = "household_power_consumption.txt",
col_types = cols(
Date = "c",
Time = "c" ,
Global_active_power = "d",
Global_reactive_power = "d",
Voltage = "d",
Global_intensity = "d",
Sub_metering_1 = "d",
Sub_metering_2 = "d",
Sub_metering_3 = "d"
)
)
# This assignment concerns two dates => Filter rows with these two dates
x <- filter(x, Date %in% c("1/2/2007", "2/2/2007"))
# Convert classes of variables (with lubridate package)
x$Date <- dmy(x$Date)
x$Time <- hms(x$Time)
# Create png
png("plot1.png", width = 480, height = 480)
# Create plot1 - histogram
hist(x$Global_active_power,
col = "red",
main = "Golbal Active Power",
xlab = "Global Active Power (kilowatts)")
# Close connection png
dev.off()
|
192985ee87f5e25fedcb8b3152d2d4b8e9a5a5d8
|
47d717b2d089c1a02c518c6121a8c50e9ea79c93
|
/man/exampleProteomicsData.Rd
|
a4e34ae4fa1e8b93b23ab1eeaf1326138986cdf2
|
[] |
no_license
|
elolab/PowerExplorer
|
9bd0fbee338de3137910ee42a28c99ef3b77f7c6
|
f5cbe6e70bb2800ccfbaf3be8c72cadc0b3c9129
|
refs/heads/master
| 2023-03-31T12:40:23.303622
| 2018-11-07T11:50:19
| 2018-11-07T11:50:19
| 357,198,197
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 710
|
rd
|
exampleProteomicsData.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/exampleProteomicsData.R
\docType{data}
\name{exampleProteomicsData}
\alias{exampleProteomicsData}
\title{Randomly Generated Proteomics Dataset}
\format{An list contains \code{"dataMatrix"} and \code{"groupVec"}}
\usage{
data(exampleProteomicsData)
}
\description{
This is a randomly generated proteomics dataset with
130 protein entries (rows) and 15 samples (columns) in
3 sample groups A, B and C, the log2 fold change (LFC)
between group B and A is specified as 1,
between C and B is also 1, thus the LFC is 2 between C and A.
}
\examples{
data(exampleProteomicsData)
head(exampleProteomicsData$dataMatrix)
}
\keyword{datasets}
|
40874601d95cf871b01ee76003560f42f77badd3
|
5a9beb9f519afb900b0329ace2d0f132c2848cc8
|
/Text Mining with R/Sentiment Analysis with Tidy Data.R
|
fdf76e80e71e7ed9618c1abfada673824d8a1f92
|
[] |
no_license
|
ZehongZ/R-Studio
|
d6d8525d29c4fc005f07a6db252f427f844ad3b1
|
1c06ea907552e8958f476e1ad3e9a9efe31e8549
|
refs/heads/master
| 2021-07-09T10:58:00.965761
| 2020-08-28T07:54:16
| 2020-08-28T07:54:16
| 173,672,330
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,207
|
r
|
Sentiment Analysis with Tidy Data.R
|
#The sentimens Dataset
library(tidytext)
data("sentiments")
#get_sentiments() to get specific sentiment lexicons without the columns that are not used in that lexicon
get_sentiments("afinn")
get_sentiments("bing")
get_sentiments("nrc")
#Sentiment Analysis with Inner Join
library(janeaustenr)
library(dplyr)
library(stringr)
tidy_books<-austen_books()%>%
group_by(book)%>%
mutate(linenumber=row_number(),
chapter=cumsum(str_detect(text, regex("^chapter [\\divxlc]",ignore_case=TRUE))))%>%
ungroup()%>%
unnest_tokens(word, text)
#Filter() for the joy words
nrcjoy<-get_sentiments("nrc")%>%
filter(sentiment=="joy")
tidy_books%>%
filter(book=="Emma")%>%
inner_join(nrcjoy)%>%
count(word, sort=TRUE)
#Exam how sentiment changes throughout each novel
library(tidyr)
janeaustensentiment<-tidy_books%>%
inner_join(get_sentiments("bing"))%>%
count(book, index=linenumber%/% 80, sentiment)%>% #%/% does integer division
spread(sentiment, n, fill=0)%>%
mutate(sentiment=positive-negative)
#Plot these sentiment scores
library(ggplot2)
ggplot(janeaustensentiment, aes(index, sentiment, fill=book))+
geom_col(show.legend = FALSE)+
facet_wrap(~book, ncol=2, scales = "free_x")
#Comparing the Three Sentiment Dictionaries
pride_prejudice<-tidy_books%>%
filter(book=="Pride & Prejudice")
pride_prejudice
afinn<-pride_prejudice%>%
inner_join(get_sentiments("afinn"))%>%
group_by(index=linenumber %/% 80)%>%
summarise(sentiment=sum(score))%>%
mutate(method="AFINN")
bing_and_nrc<-bind_rows(
pride_prejudice%>%
inner_join(get_sentiments("bing"))%>%
mutate(method="Bing et al.")%>%
pride_prejudice%>%
inner_join(get_sentiments("nrc")%>%
filter(sentiment %in% c("positive","negative")))%>%
mutate(method="NRC"))%>%
count(method, index=linenumber %/% 80, sentiment)%>%
spread(sentiment,n,fill=0)%>%
mutate(sentiment=positive-negative)
#Bind together and visualize
bind_rows(afinn, bing_and_nrc)%>%
ggplot(aes(index, sentiment, fill=method))+
geom_col(show.legend = FALSE)+
facet_wrap(~method, ncol=1, scale="free_y")
#See the numbers positive and negative words in these lexicons
get_sentiments("nrc")%>%
filter(sentiment %in% c("positive","negative"))%>%
count(sentiment)
get_sentiments("bing")%>%
count(sentiment)
#Most Common Positive and Negative Words
bing_word_counts<-tidy_books%>%
inner_join(get_sentiments("bing"))%>%
count(word, sentiment, sort=TRUE)%>%
ungroup()
bing_word_counts
bing_word_counts%>%
group_by(sentiment)%>%
top_n(10)%>%
ungroup()%>%
mutate(word=reorder(word,n))%>%
ggplot(aes(word, n, fill=sentiment))+
geom_col(show.legend = FALSE)+
facet_wrap(~sentiment, scales="free_y")+
labs(y="Contribution to sentiment",x=NULL)+
coord_flip()
#Spot an anomaly
custom_stop_words<-bind_rows(data_frame(word=c("miss"),lexicon=c("custom")),stop_words)
custom_stop_words
#Wordclouds for most common words
library(wordcloud)
tidy_books%>%
anti_join(stop_words)%>%
count(word)%>%
with(wordcloud(word, n, max.words = 100))
#Wordclouds for most common negative and positive words
library(reshape2)
tidy_books%>%
inner_join(get_sentiments("bing"))%>%
count(word, sentiment, sort=TRUE)%>%
acast(word~sentiment, value.var="n",fill=0)%>%
comparison.cloud(colors=c("gray20","gray80"),max.words = 100)
#Looking at Units beyond just words
PandP_sentences<-data_frame(text=prideprejudice)%>%
unnest_tokens(sentence, text, token="sentences")
PandP_sentences$sentence[2]
austen_chapters<-austen_books()%>%
group_by(book)%>%
unnest_tokens(chapter, text, token="regex",pattern="Chapter | CHAPTER [\\dIVXLC]")%>%
ungroup()
austen_chapters%>%
group_by(book)%>%
summarise(chapters=n())
#Find the chapter has the highest proportion of negative words
bingnegative<-get_sentiments("bing")%>%
filter(sentiment=="negative")
wordcounts<-tidy_books%>%
group_by(book,chapter)%>%
summarize(words=n())
tidy_books%>%
semi_join(bingnegative)%>%
group_by(book,chapter)%>%
summarize(negativewords=n())%>%
left_join(wordcounts, by=c("book","chapter"))%>%
mutate(ratio=negativewords/words)%>%
filter(chapter !=0)%>%
top_n(1)%>%
ungroup()
|
3eff5efcdf9b2810851f7234020da1327cd060b3
|
2db3a064b96b1427bddadc747805d31356f908a7
|
/R/Met.Save.Data.R
|
62499bd01ceaa7060dd8cd1af74f2e8c224ef770
|
[] |
no_license
|
cran/Metabonomic
|
d6981fc2ac985d5675a0d2b5d3d2697fd90a9385
|
89950548805f047697b68dae60163f145937c159
|
refs/heads/master
| 2021-01-20T02:29:05.117518
| 2010-09-13T00:00:00
| 2010-09-13T00:00:00
| 17,717,831
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 281
|
r
|
Met.Save.Data.R
|
Met.Save.Data <-
function()
{
fileName<-tclvalue(tkgetSaveFile())
write.table(datos$datos, file = fileName, append = FALSE, quote = FALSE, sep = "\t",
eol = "\n", na = "NA", dec = ".", row.names = FALSE,
col.names = FALSE, qmethod = c("escape", "double"))
}
|
cb4e973abaeb913c90a35914c0ca4aaa33795561
|
66a4f6d2f5293a94b97e88325d6c0c6048771f7c
|
/liuq_srp.R
|
6110317076b929322f0ee09830da1017623581f9
|
[] |
no_license
|
QMmmmLiu/Multiplicative-PHQ-9
|
140967a514f9ec0e7705fc3205f81c4ff900dcf0
|
51d4b038ce8ca2df036f28ba0363dcbda29ee03f
|
refs/heads/master
| 2020-07-19T05:09:28.877341
| 2019-09-19T18:56:41
| 2019-09-19T18:56:41
| 206,379,808
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,405
|
r
|
liuq_srp.R
|
phq=x[,c(2,4,354:362)]
sapply(3:11,function(i) levels(phq[,i])<<-c(1,2,3,4))
sapply(3:11,function(i) phq[,i]<<-as.numeric(phq[,i]))
phq.r=data.frame(cbind(phq[,3:11],phq$login,phq$Asmnt,
rowSums(phq[,3:11]),
exp(rowMeans(log(phq[,3:11])))))
colnames(phq.r)[-c(1:9)]=c("id","time","score","mscore")
levels(phq.r$time)=c(1,2,4)
nid=phq.r$id[table(phq.r$id)==3]
par(mfrow=c(1,3))
#dist plot
#baseline
plot0=hist(phq.r$score.0,main=paste("Baseline"),xlab="PHQ-9 sum",prob=T,ylim=c(0,.1));
lines(density(phq.r$score.1), # density plot
lwd = 2, # thickness of line
col = "blue")
curve(dnorm(x, mean=mean(phq.r$score.0), sd=sd(phq.r$score.0)),
col="red", lwd=2, add=TRUE)
curve(dlnorm(x, mean=mean(log(phq.r$score.0)), sd=sd(log(phq.r$score.0))),
col="green", lwd=2, add=TRUE)
se.0=sapply(1:length(plot0$counts), function(i) sd(phq.r$score.0[phq.r$score.0>plot0$breaks[i] & phq.r$score.0<=plot0$breaks[i+1]]))/sqrt(plot0$counts)
plot1=hist(phq.r$score.1,main=paste("One Month"),xlab="PHQ-9 sum",prob=T,ylim=c(0,.1));
lines(density(phq.r$score.1), # density plot
lwd = 2, # thickness of line
col = "blue")
curve(dnorm(x, mean=mean(phq.r$score.1), sd=sd(phq.r$score.1)),
col="red", lwd=2, add=TRUE)
curve(dlnorm(x, mean=mean(log(phq.r$score.1)), sd=sd(log(phq.r$score.1))),
col="green", lwd=2, add=TRUE)
se.1=sapply(1:length(plot1$counts), function(i) sd(phq.r$score.1[phq.r$score.1>plot1$breaks[i] & phq.r$score.1<=plot1$breaks[i+1]]))/sqrt(plot1$counts)
plot3=hist(phq.r$score.3,main=paste("Three Month"),xlab="PHQ-9 sum",prob=T,ylim=c(0,.1));
lines(density(phq.r$score.3), # density plot
lwd = 2, # thickness of line
col = "blue")
curve(dnorm(x, mean=mean(phq.r$score.3), sd=sd(phq.r$score.3)),
col="red", lwd=2, add=TRUE)
curve(dlnorm(x, mean=mean(log(phq.r$score.3)), sd=sd(log(phq.r$score.3))),
col="green", lwd=2, add=TRUE)
se.3=sapply(1:length(plot3$counts), function(i) sd(phq.r$score.3[phq.r$score.3>plot3$breaks[i] & phq.r$score.3<=plot3$breaks[i+1]]))/sqrt(plot3$counts)
par(mfrow=c(1,3))
plot(se.0[plot0$counts>=10],main="Baseline",ylab="SE",xlab="",xaxt='n')
plot(se.1[plot1$counts>=10],main="One Month",ylab="SE",xlab="",xaxt='n')
plot(se.3[plot3$counts>=10],main="Three Month",ylab="SE",xlab="",xaxt='n')
# inter-panel relations
phq.r2=data.frame(cbind(phq[,3:11],phq$login,phq$Asmnt,
rowSums(phq[,3:11]),
exp(rowMeans(log(phq[,3:11])))))
phq.r2=na.omit(phq.r2)
colnames(phq.r2)[-c(1:9)]=c("id","time","score","mscore")
levels(phq.r2$time)=c(1,2,4)
phq.r2=phq.r2[table(phq.r$id)==3,]
lmaf=(lm(score~(time==2)+(time==4),phq.r2))
AIC(lmaf)
summary(lmaf);confint(lmaf)
lmmfe=(lm(log(score)*exp(mean(log(score)))~(time==2)+(time==4),phq.r2))
AIC(lmmfe)
lmmf=(lm(log(score)~(time==2)+(time==4),phq.r2))
summary(lmmf);exp(confint(lmmf))
lmat=lm(score~poly(as.numeric(as.character(time)),2),phq.r2)
AIC(lmat);summary(lmat);confint(lmat)
lmmte=lm(log(score)*exp(mean(log(score)))~log(as.numeric(as.character(time))),phq.r2)
AIC(lmmte)
lmmt=lm(log(score)~as.numeric(as.character(time)),phq.r2)
summary(lmmt);exp(confint(lmmt))
library(nlme)
lari=lme(score~as.numeric(as.character(time)),random=~1|id,phq.r2)
laris=lme(score~as.numeric(as.character(time)),random=~1+as.numeric(as.character(time))|id,phq.r2)
lmrie=lme(log(score)*exp(mean(log(score)))~log(as.numeric(as.character(time))),random=~1|id,phq.r2)
lmri=lme(log(score)~log(as.numeric(as.character(time))),random=~1|id,phq.r2)
lmrise=lme(log(score)*exp(mean(log(score)))~log(as.numeric(as.character(time))),random=~1+log(as.numeric(as.character(time)))|id,phq.r2)
lmris=lme(log(score)~log(as.numeric(as.character(time))),random=~1+log(as.numeric(as.character(time)))|id,phq.r2)
# item-rest relations
fit.m=matrix(,ncol=5)
est.m=matrix(,ncol=8)
fit.a=matrix(,ncol=5)
est.a=matrix(,ncol=8)
irf<-function(i){
phq.r2$rest=phq.r2$score-phq.r2[,i]
phq.r2$item=phq.r2[,i]
phq.r2$t=as.numeric(as.character(phq.r2$time))
am=lme(rest~t+as.factor(item),random=~1+as.numeric(as.character(time))|id,phq.r2,control="optim")
mm=lme(log(rest)~log(as.numeric(as.character(time)))+as.factor(item),random=~1+log(as.numeric(as.character(time)))|id,phq.r2,control=lmeControl(maxIter=1e8,msMaxIter=1e8))
mme=lme(log(rest)*exp(mean(log(rest)))~log(as.numeric(as.character(time)))+as.factor(item),random=~1+log(as.numeric(as.character(time)))|id,phq.r2)
fit.a<<-rbind(fit.a,c(AIC(am),anova(am)[3,]))
fit.m<<-rbind(fit.m,c(AIC(mme),anova(mm)[3,]))
est.a<<-rbind(est.a,cbind(summary(am)$tTable[3:5,],intervals(am,which="fixed")$fixed[3:5,]))
est.m<<-rbind(est.m,cbind(summary(mm)$tTable[3:5,],exp(intervals(mm,which="fixed")$fixed[3:5,])))
cat(i)
}
res.ir=sapply(c(1:2,4:9),irf)
write.csv(cbind(est.a,est.m),"item_rest_est.csv")
write.csv(cbind(fit.a,fit.m),"item_rest_fit.csv")
# item 3
phq.r2$rest=phq.r2$score-phq.r2[,2]
phq.r2$item=phq.r2[,2]
phq.r2$t=as.numeric(as.character(phq.r2$time))
library(lmer)
am=lme(rest~t+as.factor(item),random=~1|id,phq.r2,control="optim")
mm=lme(log(rest)~log(as.numeric(as.character(time)))+as.factor(item),random=~1|id,phq.r2,control=lmeControl(maxIter=1e8,msMaxIter=1e8))
mme=lme(log(rest)*exp(mean(log(rest)))~log(as.numeric(as.character(time)))+as.factor(item),random=~1+log(as.numeric(as.character(time)))|id,phq.r2)
|
a664851e77e1c2f2a52fd15796d683362e0662b2
|
02e865334769049a7a92ffe4b3d37cb66c97ae04
|
/Unit 5 /twitter.R
|
82377bafcc09d42d8abc88d40012afe7fc099810
|
[] |
no_license
|
egorgrachev/Analytics_Edge
|
5d17c046273fe1461842d4b2e0d81eaec4c3f1be
|
9d4a5e611f0ac5dc39ce566b7f68739fecac66dd
|
refs/heads/master
| 2021-01-16T01:02:05.038312
| 2015-04-15T09:31:37
| 2015-04-15T09:31:37
| 33,805,264
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,066
|
r
|
twitter.R
|
install.packages("tm")
tweets = read.csv("AnalyticsEdge/Unit 5 /tweets.csv", stringsAsFactors=FALSE)
str(tweets)
tweets$Negative = as.factor(tweets$Avg <= -1)
str(tweets)
table(tweets$Negative)
library(tm)
install.packages("SnowballC")
library(SnowballC)
corpus = Corpus(VectorSource(tweets$Tweet))
corpus = tm_map(corpus, tolower)
corpus = tm_map(corpus, PlainTextDocument)
corpus = tm_map(corpus, removePunctuation)
corpus = tm_map(corpus, removeWords, c("apple", stopwords("english")))
corpus = tm_map(corpus, stemDocument)
corpus[[1]]
frequencies = DocumentTermMatrix(corpus)
frequencies
inspect(frequencies[1000:1005, 505:515])
findFreqTerms(frequencies, lowfreq=100)
sparse = removeSparseTerms(frequencies, 0.995)
sparse
tweetsSparse = as.data.frame(as.matrix(sparse))
colnames(tweetsSparse) = make.names(colnames(tweetsSparse))
tweetsSparse$Negative = tweets$Negative
library(caTools)
set.seed(123)
split = sample.split(tweetsSparse$Negative, SplitRatio = 0.7)
trainSparse = subset(tweetsSparse, split==TRUE)
teatSparse = subset(trainSparse, split==FALSE)
|
b446f472549525ce5f48f9b7f3693edb3b84c735
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/GERGM/R/gergm.R
|
c8c69104f6dce16f7c7a2b8e88c9b0877612c91b
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 25,379
|
r
|
gergm.R
|
#' @title A Function to estimate a GERGM.
#' @description The main function provided by the package.
#'
#' @param formula A formula object that specifies the relationship between
#' statistics and the observed network. Currently, the user may specify a model
#' using any combination of the following statistics: `out2stars(alpha = 1)`,
#' `in2stars(alpha = 1)`, `ctriads(alpha = 1)`, `mutual(alpha = 1)`,
#' `ttriads(alpha = 1)`, `absdiff(covariate = "MyCov")`,
#' `edgecov(covariate = "MyCov")`, `sender(covariate = "MyCov")`,
#' `reciever(covariate = "MyCov")`, `nodematch(covariate)`,
#' `nodemix(covariate, base = "MyBase")`, `netcov(network)`. To use
#' exponential downweighting for any of the network level terms, simply
#' specify a value for alpha less than 1. The `(alpha = 1)` term may be omitted
#' from the structural terms if no exponential downweighting is required. In
#' this case, the terms may be provided as: `out2star`, `in2star`, `ctriads`,
#' `recip`, `ttriads`. If the network is undirected the user may only specify
#' the following terms: `twostars(alpha = 1)`, `ttriads(alpha = 1)`,
#' `absdiff(covariate = "MyCov")`, `edgecov(covariate = "MyCov")`,
#' `sender(covariate = "MyCov")`, `nodematch(covariate)`,
#' `nodemix(covariate, base = "MyBase")`, `netcov(network)`. An intercept
#' term is included by default, but can be omitted by setting
#' omit_intercept_term = TRUE. If the user specifies
#' `nodemix(covariate, base = NULL)`, then all levels of the covariate
#' will be matched on.
#' @param covariate_data A data frame containing node level covariates the user
#' wished to transform into sender or reciever effects. It must have row names
#' that match every entry in colnames(raw_network), should have descriptive
#' column names. If left NULL, then no sender or reciever effects will be
#' added.
#' @param normalization_type If only a raw_network is provided and
#' omit_intercept_term = TRUE then, the function
#' will automatically check to determine if all edges fall in the [0,1] interval.
#' If edges are determined to fall outside of this interval, then a trasformation
#' onto the interval may be specified. If "division" is selected, then the data
#' will have a value added to them such that the minimum value is atleast zero
#' (if necessary) and then all edge values will be divided by the maximum to
#' ensure that the maximum value is in [0,1]. If "log" is selected, then the data
#' will have a value added to them such that the minimum value is atleast zero
#' (if necessary), then 1 will be added to all edge values before they are logged
#' and then divided by the largest value, again ensuring that the resulting
#' network is on [0,1]. Defaults to "log" and need not be set to NULL if
#' providing covariates as it will be ignored.
#' @param network_is_directed Logical specifying whether or not the observed
#' network is directed. Default is TRUE.
#' @param use_MPLE_only Logical specifying whether or not only the maximum pseudo
#' likelihood estimates should be obtained. In this case, no simulations will be
#' performed. Default is FALSE.
#' @param transformation_type Specifies how covariates are transformed onto the
#' raw network. When working with heavly tailed data that are not strictly
#' positive, select "Cauchy" to transform the data using a Cauchy distribution.
#' If data are strictly positive and heavy tailed (such as financial data) it is
#' suggested the user select "LogCauchy" to perform a Log-Cauchy transformation
#' of the data. For a tranformation of the data using a Gaussian distribution,
#' select "Gaussian" and for strictly positive raw networks, select "LogNormal".
#' The Default value is "Cauchy".
#' @param estimation_method Simulation method for MCMC estimation. Default is
#' "Gibbs" which will generally be faster with well behaved networks but will not
#' allow for exponential downweighting.
#' @param maximum_number_of_lambda_updates Maximum number of iterations of outer
#' MCMC loop which alternately estimates transform parameters and ERGM
#' parameters. In the case that data_transformation = NULL, this argument is
#' ignored. Default is 10.
#' @param maximum_number_of_theta_updates Maximum number of iterations within the
#' MCMC inner loop which estimates the ERGM parameters. Default is 100.
#' @param number_of_networks_to_simulate Number of simulations generated for
#' estimation via MCMC. Default is 500.
#' @param thin The proportion of samples that are kept from each simulation. For
#' example, thin = 1/200 will keep every 200th network in the overall simulated
#' sample. Default is 1.
#' @param proposal_variance The variance specified for the Metropolis Hastings
#' simulation method. This parameter is inversely proportional to the average
#' acceptance rate of the M-H sampler and should be adjusted so that the average
#' acceptance rate is approximately 0.25. Default is 0.1.
#' @param downweight_statistics_together Logical specifying whether or not the
#' weights should be applied inside or outside the sum. Default is TRUE and user
#' should not select FALSE under normal circumstances.
#' @param MCMC_burnin Number of samples from the MCMC simulation procedure that
#' will be discarded before drawing the samples used for estimation.
#' Default is 100.
#' @param seed Seed used for reproducibility. Default is 123.
#' @param convergence_tolerance Threshold designated for stopping criterion. If
#' the difference of parameter estimates from one iteration to the next all have
#' a p -value (under a paired t-test) greater than this value, the parameter
#' estimates are declared to have converged. Default is 0.01.
#' @param MPLE_gain_factor Multiplicative constant between 0 and 1 that controls
#' how far away the initial theta estimates will be from the standard MPLEs via
#' a one step Fisher update. In the case of strongly dependent data, it is
#' suggested to use a value of 0.10. Default is 0.
#' @param acceptable_fit_p_value_threshold A p-value threshold for how closely
#' statistics of observed network conform to statistics of networks simulated
#' from GERGM parameterized by converged final parameter estimates. Default value
#' is 0.05.
#' @param force_x_theta_updates Defaults to 1 where theta estimation is not
#' allowed to converge until thetas have updated for x iterations . Useful when
#' model is not degenerate but simulated statistics do not match observed network
#' well when algorithm stops after first y updates.
#' @param force_x_lambda_updates Defaults to 1 where lambda estimation is not
#' allowed to converge until lambdas have updated for x iterations . Useful when
#' model is not degenerate but simulated statistics do not match observed network
#' well when algorithm stops after first y updates.
#' @param output_directory The directory where you would like output generated
#' by the GERGM estimation proceedure to be saved (if output_name is specified).
#' This includes, GOF, trace, and parameter estimate plots, as well as a summary
#' of the estimation proceedure and an .Rdata file containing the GERGM object
#' returned by this function. May be left as NULL if the user would prefer all
#' plots be printed to the graphics device.
#' @param output_name The common name stem you would like to assign to all
#' objects output by the gergm function. Default value of NULL will not save any
#' output directly to .pdf files, it will be printed to the console instead. Must
#' be a character string or NULL. For example, if "Test" is supplied as the
#' output_name, then 4 files will be output: "Test_GOF.pdf", "Test_Parameter_Estim
#' ates.pdf", "Test_GERGM_Object.Rdata", "Test_Estimation_Log.txt", and
#' "Test_Trace_Plot.pdf"
#' @param generate_plots Defaults to TRUE, if FALSE, then no diagnostic or
#' parameter plots are generated.
#' @param verbose Defaults to TRUE (providing lots of output while model is
#' running). Can be set to FALSE if the user wishes to see less output.
#' @param omit_intercept_term Defualts to FALSE, can be set to TRUE if the
#' user wishes to omit the model intercept term.
#' @param hyperparameter_optimization Logical indicating whether automatic
#' hyperparameter optimization should be used. Defaults to FALSE. If TRUE, then
#' the algorithm will automatically seek to find an optimal burnin and number of
#' networks to simulate, and if using Metropolis Hasings, will attempt to select
#' a proposal variance that leads to a acceptance rate within +-0.05 of
#' target_accept_rate. Furthermore, if degeneracy is detected, the algorithm
#' will attempt to adress the issue automatically. WARNING: This feature is
#' experimental, and may greatly increase runtime. Please monitor console
#' output!
#' @param target_accept_rate The target Metropolis Hastings acceptance rate.
#' Defaults to 0.25
#' @param ... Optional arguments, currently unsupported.
#' @return A gergm object containing parameter estimates.
#' @examples
#' \dontrun{
#' set.seed(12345)
#' net <- matrix(rnorm(100,0,20),10,10)
#' colnames(net) <- rownames(net) <- letters[1:10]
#' formula <- net ~ mutual + ttriads
#'
#' test <- gergm(formula,
#' normalization_type = "division",
#' network_is_directed = TRUE,
#' use_MPLE_only = FALSE,
#' estimation_method = "Metropolis",
#' number_of_networks_to_simulate = 40000,
#' thin = 1/10,
#' proposal_variance = 0.5,
#' downweight_statistics_together = TRUE,
#' MCMC_burnin = 10000,
#' seed = 456,
#' convergence_tolerance = 0.01,
#' MPLE_gain_factor = 0,
#' force_x_theta_updates = 4)
#' }
#' @export
gergm <- function(formula,
covariate_data = NULL,
normalization_type = c("log","division"),
network_is_directed = c(TRUE, FALSE),
use_MPLE_only = c(FALSE, TRUE),
transformation_type = c("Cauchy","LogCauchy","Gaussian","LogNormal"),
estimation_method = c("Gibbs", "Metropolis"),
maximum_number_of_lambda_updates = 10,
maximum_number_of_theta_updates = 10,
number_of_networks_to_simulate = 500,
thin = 1,
proposal_variance = 0.1,
downweight_statistics_together = TRUE,
MCMC_burnin = 100,
seed = 123,
convergence_tolerance = 0.01,
MPLE_gain_factor = 0,
acceptable_fit_p_value_threshold = 0.05,
force_x_theta_updates = 1,
force_x_lambda_updates = 1,
output_directory = NULL,
output_name = NULL,
generate_plots = TRUE,
verbose = TRUE,
omit_intercept_term = FALSE,
hyperparameter_optimization = FALSE,
target_accept_rate = 0.25,
...
){
# pass in experimental features through elipsis
using_correlation_network = FALSE
object <- as.list(substitute(list(...)))[-1L]
if (length(object) > 0) {
if (!is.null(object$using_correlation_network)) {
if (object$using_correlation_network) {
using_correlation_network <- TRUE
cat("Using experimental correlation network feature...\n")
}
}
}
# hard coded possible stats
possible_structural_terms <- c("out2stars",
"in2stars",
"ctriads",
"mutual",
"ttriads",
"edges")
possible_structural_terms_undirected <- c("edges",
"twostars",
"ttriads")
possible_covariate_terms <- c("absdiff",
"nodecov",
"nodematch",
"sender",
"receiver",
"intercept",
"nodemix")
possible_network_terms <- "netcov"
possible_transformations <- c("cauchy",
"logcauchy",
"gaussian",
"lognormal")
# check terms for undirected network
if (!network_is_directed) {
formula <- parse_undirected_structural_terms(
formula,
possible_structural_terms,
possible_structural_terms_undirected)
}
# automatically add an intercept term unless omit_intercept_term is TRUE
if (!omit_intercept_term) {
formula <- add_intercept_term(formula)
#check for an edges statistic
form <- as.formula(formula)
parsed <- deparse(form)
if (length(parsed) > 1) {
parsed <- paste0(parsed, collapse = " ")
}
if (grepl("edges",parsed)) {
stop("You may not specify an edges statistic if omit_intercept_term == FALSE as this will introduce two identical intercept terms and instability in the model. An intercept term is automatically added in the lambda transformation step unless omit_intercept_term == TRUE, and we have found this method of adding an intercept to be less prone to degeneracy.")
}
}
# set logical values for whether we are using MPLE only, whether the network
# is directed, and which estimation method we are using as well as the
# transformation type
use_MPLE_only <- use_MPLE_only[1] #default is FALSE
network_is_directed <- network_is_directed[1] #default is TRUE
estimation_method <- estimation_method[1] #default is Gibbs
transformation_type <- transformation_type[1] #default is "Cauchy"
transformation_type <- tolower(transformation_type)
normalization_type <- normalization_type[1]
# if we are using a correlation network, then the network must be undirected.
if (using_correlation_network) {
network_is_directed <- FALSE
}
if (is.null(output_directory) & !is.null(output_name)) {
stop("You have specified an output file name but no output directory. Please
specify both or neither.")
}
if (length(which(possible_transformations %in% transformation_type == T)) != 1) {
stop("You have specified a transformation that is not recognized. Please
specify one of: Cauchy, LogCauchy, Gaussian, or LogNormal")
}
#make sure proposal variance is greater than zero
if (proposal_variance <= 0.001) {
proposal_variance <- 0.001
cat("You supplied a proposal variance that was less than or equal to zero. It has been reset to 0.001, considder respecifying...\n")
}
formula <- as.formula(formula)
#0. Prepare the data
Transformed_Data <- Prepare_Network_and_Covariates(
formula,
possible_structural_terms,
possible_covariate_terms,
possible_network_terms,
covariate_data = covariate_data,
normalization_type = normalization_type,
is_correlation_network = using_correlation_network,
is_directed = network_is_directed)
data_transformation <- NULL
if (!is.null(Transformed_Data$transformed_covariates)) {
data_transformation <- Transformed_Data$transformed_covariates
}
gpar.names <- c(Transformed_Data$gpar.names, "dispersion")
#1. Create GERGM object from network
GERGM_Object <- Create_GERGM_Object_From_Formula(
formula,
theta.coef = NULL,
possible_structural_terms,
possible_covariate_terms,
possible_network_terms,
raw_network = Transformed_Data$network,
together = 1,
transform.data = data_transformation,
lambda.coef = NULL,
transformation_type = transformation_type,
is_correlation_network = using_correlation_network,
is_directed = network_is_directed
)
GERGM_Object@theta_estimation_converged <- FALSE
GERGM_Object@lambda_estimation_converged <- FALSE
GERGM_Object@observed_network <- GERGM_Object@network
GERGM_Object@observed_bounded_network <- GERGM_Object@bounded.network
GERGM_Object@simulation_only <- FALSE
GERGM_Object@transformation_type <- transformation_type
GERGM_Object@downweight_statistics_together <- downweight_statistics_together
GERGM_Object@directed_network <- network_is_directed
if (!is.null(data_transformation)) {
GERGM_Object@data_transformation <- data_transformation
}
if (is.null(output_name)) {
GERGM_Object@print_output <- FALSE
}else{
GERGM_Object@print_output <- TRUE
}
# if we are using a correlation network then set field to TRUE.
GERGM_Object@is_correlation_network <- using_correlation_network
# set adaptive metropolis parameters
GERGM_Object@hyperparameter_optimization <- hyperparameter_optimization
GERGM_Object@target_accept_rate <- target_accept_rate
GERGM_Object@proposal_variance <- proposal_variance
GERGM_Object@estimation_method <- estimation_method
GERGM_Object@number_of_simulations <- number_of_networks_to_simulate
GERGM_Object@thin <- thin
GERGM_Object@burnin <- MCMC_burnin
GERGM_Object@MPLE_gain_factor <- MPLE_gain_factor
#2. Estimate GERGM
GERGM_Object <- Estimate_GERGM(formula,
MPLE.only = use_MPLE_only,
max.num.iterations = maximum_number_of_lambda_updates,
mc.num.iterations = maximum_number_of_theta_updates,
seed = seed,
tolerance = convergence_tolerance,
possible.stats = possible_structural_terms,
GERGM_Object = GERGM_Object,
force_x_theta_updates = force_x_theta_updates,
verbose = verbose,
force_x_lambda_updates = force_x_lambda_updates)
#3. Perform degeneracy diagnostics and create GOF plots
if (!GERGM_Object@theta_estimation_converged) {
warning("Estimation procedure did not detect convergence in Theta estimates. Estimation halted when maximum number of updates was reached. Be careful to assure good model fit or select a more relaxed convergence criterion.")
GERGM_Object <- store_console_output(GERGM_Object,"Estimation procedure did not detect convergence in Theta estimates. Estimation halted when maximum number of updates was reached. Be careful to assure good model fit or select a more relaxed convergence criterion.")
}
if (!GERGM_Object@lambda_estimation_converged) {
warning("Estimation procedure did not detect convergence in Lambda estimates. Estimation halted when maximum number of updates was reached. Be careful to assure good model fit or select a more relaxed convergence criterion.")
GERGM_Object <- store_console_output(GERGM_Object,"Estimation procedure did not detect convergence in Lambda estimates. Estimation halted when maximum number of updates was reached. Be careful to assure good model fit or select a more relaxed convergence criterion.")
}
#now simulate from last update of theta parameters
GERGM_Object <- Simulate_GERGM(GERGM_Object,
seed1 = seed,
possible.stats = possible_structural_terms)
colnames(GERGM_Object@lambda.coef) = gpar.names
num.nodes <- GERGM_Object@num_nodes
triples <- t(combn(1:num.nodes, 3))
# change back column names if we are dealing with an undirected network
if (!network_is_directed) {
change <- which(colnames(GERGM_Object@theta.coef) == "in2stars")
if (length(change) > 0) {
colnames(GERGM_Object@theta.coef)[change] <- "twostars"
}
}
init.statistics <- NULL
if (GERGM_Object@is_correlation_network) {
init.statistics <- h2(GERGM_Object@network,
triples = triples,
statistics = rep(1, length(possible_structural_terms)),
alphas = GERGM_Object@weights,
together = downweight_statistics_together)
}else{
init.statistics <- h2(GERGM_Object@bounded.network,
triples = triples,
statistics = rep(1, length(possible_structural_terms)),
alphas = GERGM_Object@weights,
together = downweight_statistics_together)
}
# fix issue with the wrong stats being saved
GERGM_Object@stats[2,] <- init.statistics
hsn.tot <- GERGM_Object@MCMC_output$Statistics
# save these statistics so we can make GOF plots in the future, otherwise
# they would be the transformed statistics which would produce poor GOF plots.
GERGM_Object@simulated_statistics_for_GOF <- hsn.tot
#thin statsitics
hsn.tot <- Thin_Statistic_Samples(hsn.tot)
#calculate t.test p-values for calculating the difference in the means of
# the newly simulated data with the original network
statistic_test_p_values <- rep(NA,length(possible_structural_terms))
for (i in 1:length(possible_structural_terms)) {
statistic_test_p_values[i] <- round(t.test(hsn.tot[, i],
mu = init.statistics[i])$p.value,3)
}
stats.data <- data.frame(Observed = init.statistics,
Simulated = colMeans(hsn.tot))
rownames(stats.data) <- possible_structural_terms
cat("Statistics of observed network and networks simulated from final theta parameter estimates:\n")
GERGM_Object <- store_console_output(GERGM_Object,"Statistics of observed network and networks simulated from final theta parameter estimates:\n")
GERGM_Object <- store_console_output(GERGM_Object, toString(stats.data))
statistic_test_p_values <- data.frame(p_values = statistic_test_p_values)
rownames(statistic_test_p_values) <- possible_structural_terms
cat("\nt-test p-values for statistics of observed network and networks simulated from final theta parameter estimates:\n \n")
GERGM_Object <- store_console_output(GERGM_Object,"\nt-test p-values for statistics of observed network and networks simulated from final theta parameter estimates:\n \n")
print(statistic_test_p_values)
GERGM_Object <- store_console_output(GERGM_Object, toString(statistic_test_p_values))
colnames(statistic_test_p_values) <- "p_values"
GERGM_Object@observed_simulated_t_test <- statistic_test_p_values
#test to see if we have an acceptable fit
acceptable_fit <- statistic_test_p_values[which(GERGM_Object@stats_to_use == 1), 1]
if (min(acceptable_fit) > acceptable_fit_p_value_threshold) {
GERGM_Object@acceptable_fit <- TRUE
message("Parameter estimates simulate networks that are statistically indistinguishable from observed network on the statistics specified by the user. ")
GERGM_Object <- store_console_output(GERGM_Object,"Parameter estimates simulate networks that are statistically indistinguishable from observed network on the statistics specified by the user. ")
}else{
GERGM_Object@acceptable_fit <- FALSE
message("Parameter estimates simulate networks that are statistically distinguishable from observed network. Check GOF plots to determine if the model provides a reasonable fit . This is a very stringent test for goodness of fit, so results may still be acceptable even if this criterion is not met.")
GERGM_Object <- store_console_output(GERGM_Object, "Parameter estimates simulate networks that are statistically distinguishable from observed network. Check GOF plots to determine if the model provides a reasonable fit . This is a very stringent test for goodness of fit, so results may still be acceptable even if this criterion is not met.")
}
#4. output everything to the appropriate files and return GERGM object.
if (generate_plots) {
# only generate output if output_name is not NULL
if (!is.null(output_name)) {
if (is.null(output_directory)) {
output_directory <- getwd()
}
current_directory <- getwd()
setwd(output_directory)
pdf(file = paste(output_name,"_GOF.pdf",sep = ""), height = 4, width = 8)
GOF(GERGM_Object)
dev.off()
pdf(file = paste(output_name,"_Parameter_Estimates.pdf",sep = ""), height = 4, width = 5)
Estimate_Plot(GERGM_Object)
dev.off()
pdf(file = paste(output_name,"_Trace_Plot.pdf",sep = ""), height = 4, width = 6)
Trace_Plot(GERGM_Object)
dev.off()
save(GERGM_Object, file = paste(output_name,"_GERGM_Object.Rdata",sep = ""))
write.table(GERGM_Object@console_output,file = paste(output_name,"_Estimation_Log.txt",sep = ""),row.names = F,col.names = F,fileEncoding = "utf8", quote = F)
setwd(current_directory)
} else{
# if we are not saving everything to a directory then just print stuff to
# the graphics device
GOF(GERGM_Object)
Sys.sleep(2)
Estimate_Plot(GERGM_Object)
Sys.sleep(2)
Trace_Plot(GERGM_Object)
}
}
# transform networks back to observed scale
cat("Transforming networks simulated via MCMC as part of the fit diagnostics back on to the scale of observed network. You can access these networks through the '@MCMC_output$Networks' field returned by this function...\n")
GERGM_Object <- Convert_Simulated_Networks_To_Observed_Scale(GERGM_Object)
return(GERGM_Object)
}
|
dc3f82ffcadc78767b72d147eddfc112ba87f6f4
|
cd901f78760d0856a58e2791d94751b3e3e5c3e8
|
/R/callECNV.R
|
9b420101cf8ad31054837fe666163e70f36940cc
|
[] |
no_license
|
sanadamakomi/exonCNV
|
4d6056596d2a17df5e56075400441207bf6eb77f
|
92aaeb8ea242aa6965e3910ae5825c68ec30c65b
|
refs/heads/master
| 2022-08-10T09:24:41.165518
| 2022-08-04T07:59:10
| 2022-08-04T07:59:10
| 175,590,331
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 13,045
|
r
|
callECNV.R
|
#' @title Fit data
#' @param mergedCovFile Path of merged coverage file.
#' @param parameterFile Path of metrics file.
#' @param path Path to write to.
#' @param lowdepth A numeric value, regions that avaerage depth less than this
#' value will replaced by NA.
#' @export
#' @author Zhan-Ni Chen
performFitPoisson <- function(mergedCovFile, parameterFile, path = NULL, lowdepth = 10) {
if (! file.exists(mergedCovFile)) stop(paste0(mergedCovFile, 'no exists.'))
if (! file.exists(parameterFile)) stop(paste0(parameterFile, 'no exists.'))
if (is.null(path)) path <- paste0(normalizePath('.'), 'probability.txt')
write(paste0("Start to fit data...\nCutoff: lowdepth=", lowdepth), stdout())
covDat <- read.table(mergedCovFile, header = TRUE, sep = "\t", quote = "",
comment.char = "#", na.strings = "NA",
fill = TRUE, stringsAsFactors = FALSE)
paraDat <- read.table(parameterFile, header = TRUE, sep = "\t", quote = "",
comment.char = "#", na.strings = "NA",
fill = TRUE, stringsAsFactors = FALSE)
# 1. get ids in parameter file
x.ids <- setdiff(colnames(covDat), c('chr', 'start', 'end', 'id'))
x.nopara <- x.ids[which(! x.ids %in% paraDat$id)]
if (length(x.nopara) > 0) stop(paste0('Error: ', paste(x.nopara, collapse = ', '), ' no exists in ', parameterFile))
paraDat <- paraDat[which(paraDat$id %in% x.ids), ]
x.ids.m <- paraDat[which(paraDat[, 'gender'] == 'Male'), 'id']
x.ids.f <- paraDat[which(paraDat[, 'gender'] == 'Female'), 'id']
# 2. normalize coverage and
ratio <- min(paraDat$total.read) / paraDat$total.read
names(ratio) <- paraDat$id
for(i in paraDat$id) {
covDat[,i] <- as.integer(covDat[,i] * ratio[i])
}
# 3. fit poisson
ppmatrix <- matrix(data = 1, nrow = nrow(covDat), ncol = length(x.ids))
colnames(ppmatrix) <- x.ids
for (i in 1:nrow(covDat)){
if (covDat[i, 'chr'] %in% c('X', 'Y')) {
if (length(which(covDat[i, x.ids.m] > lowdepth)) > length(x.ids.m) * 1/2) {
ppmatrix[i, x.ids.m] <- doPois(covDat[i, x.ids.m])
}
if (length(which(covDat[i, x.ids.f] > lowdepth)) > length(x.ids.f) * 1/2) {
ppmatrix[i, x.ids.f] <- doPois(covDat[i, x.ids.f])
}
} else {
if (length(which(covDat[i, x.ids] > lowdepth)) > length(x.ids) * 1/2) {
ppmatrix[i, x.ids] <- doPois(covDat[i, x.ids])
}
}
}
ppmatrix <- cbind(covDat[,c('chr', 'start', 'end', 'id')], ppmatrix)
write.table(ppmatrix, file = path, row.names = FALSE, col.names = TRUE, quote = FALSE, sep = "\t")
write(paste0("Write to path: \n", normalizePath(path)), stdout())
}
#' @title Call exon CNVs.
#' @param probFile Path of probability file.
#' @param mergedCovFile Path of merged coverage file.
#' @param path Directory path to write to.
#' @param sample.id Sample ids to call CNV. By default it will run in all
#' samples.
#' @param gene.name A vector of gene symbols to call CNV.
#' @param cutoff A list of cutoff parameters to filter exon CNV, 'prob' is
#' probability, 'pool.count' is the least counts of sample of which gene has
#' CNV. 'baseline' is the least baseline depth to call CNV. 'lowdepth' is
#' least depth in all samples to call CNV.
#' @export
#' @importFrom utils read.table
#' @importFrom utils write.table
#' @importFrom stringr str_match
#' @importFrom stats dpois
#' @importFrom stats nlm
#' @author Zhan-Ni Chen
callExonCNV <- function(probFile, mergedCovFile, path = NULL, sample.id = NULL, gene.name=NULL, cutoff = list(prob = 1E-4, pool.count = 1, baseline = 50, lowdepth = 10)) {
if (! file.exists(probFile)) stop(paste0(probFile, 'no exists.'))
if (! file.exists(mergedCovFile)) stop(paste0(mergedCovFile, 'no exists.'))
if (is.null(path)) path <- '.'
if (! dir.exists(path)) dir.create(path)
write("Start to call exon CNV...", stdout())
write(paste0("Cutoff: ", paste(paste0(names(cutoff), "=", cutoff[names(cutoff)]), collapse = ', ')), stdout())
path <- normalizePath(path)
dat <- read.table(probFile, header = TRUE, sep = "\t", quote = "",
comment.char = "#", na.strings = "NA",
fill = TRUE, stringsAsFactors = FALSE)
depth.dat <- read.table(mergedCovFile, header = TRUE, sep = "\t", quote = "",
comment.char = "#", na.strings = "NA",
fill = TRUE, stringsAsFactors = FALSE)
infos <- str_match(dat[,'id'], "^(.+)\\((.+?)\\)_([0-9]+)_[0-9]+$")
if (! is.null(gene.name)) {
dat <- dat[which(infos[,2] %in% gene.name),,drop=FALSE]
depth.dat <- depth.dat[which(infos[,2] %in% gene.name),,drop=FALSE]
}
all.id <- setdiff(colnames(dat), c('chr', 'start', 'end', 'id'))
if (is.null(sample.id)) {
sample.id <- all.id
} else {
sample.id <- intersect(sample.id, all.id)
if (length(sample.id) == 0) stop(paste0(paste(sample.id, collapse = ' '), 'no exists.'))
}
counts <- apply(dat[, all.id], 1,
function(x) {length(which(x < cutoff$prob))})
infos <- str_match(dat[,'id'], "^(.+)\\((.+?)\\)_([0-9]+)_[0-9]+$")
gene.table <- table(infos[,2])
exon.num <- as.data.frame(gene.table[infos[,2]])
dosample <- sapply(sample.id, function(i){
write(paste0('Perform ', i), stdout())
raw.path <- paste0(path, '/', i, '.raw.cnv.txt')
positive.path <- paste0(path, '/', i, '.positive.cnv.txt')
filter.path <- paste0(path, '/', i, '.gene.cnv.txt')
baseline <- apply(depth.dat[, setdiff(colnames(depth.dat),
c('chr', 'start', 'end', 'id', i)), drop = FALSE],
1, median, na.rm = TRUE)
log2 <- calculateLog2ratio(x = depth.dat[,i], baseline = baseline, badDepth = cutoff$lowdepth)
cn <- round(2 * 2 ^ log2, 1)
svType <- rep('.', nrow(dat))
dup.idx <- which(dat[,i] < cutoff$prob & log2 > 0)
del.idx <- which(dat[,i] < cutoff$prob & log2 < 0)
svType[dup.idx] <- 'DUP'
svType[del.idx] <- 'DEL'
df <- data.frame(
chr=dat[,'chr'],
start=dat[,'start'],
end=dat[,'end'],
gene=infos[,2],
transcript=infos[,3],
exon.num=exon.num$Freq,
exon=infos[,4],
exon.len=dat[,'end']-dat[,'start'],
svtype=svType,
cn=cn,
prob=format(dat[,i], digits = 3, scientific = TRUE),
depth=round(depth.dat[,i], 0),
baseline=round(baseline, 0),
pool.count=counts,
past.count=rep(NA, nrow(dat))
)
write.table(df, file = raw.path, row.names = FALSE, col.names = TRUE, quote = FALSE, sep = "\t")
write.table(df[which(df$svtype %in% c('DEL', 'DUP')),, drop = FALSE],
file = positive.path, row.names = FALSE,
col.names = TRUE, quote = FALSE, sep = "\t")
# filter
gene.df <- filterExonCNV(df, cutoff = cutoff)
if (! is.null(gene.df)) {
count1 <- as.numeric(as.vector(gene.df[, ncol(gene.df)]))
bad.index <- which(count1 / gene.df$exon.num >= 0.2)
gene.df.good <- gene.df[setdiff(1:nrow(gene.df), bad.index),,drop = FALSE]
gene.df.bad <- gene.df[bad.index,,drop = FALSE]
rank.coefficient <- -log10(gene.df.good$prob) + gene.df.good$exon.sv.num * 2
gene.df.good <- gene.df.good[order(rank.coefficient, decreasing = TRUE),,drop = FALSE]
write.table(rbind(gene.df.good, gene.df.bad),
file = filter.path, row.names = FALSE,
col.names = TRUE, quote = FALSE, sep = "\t")
write(paste0('Write to path:\n', normalizePath(filter.path)), stdout())
} else {
write.table(data.frame(
chr=NA,
start=NA,
end=NA,
gene=NA,
trans=NA,
exon.num=NA,
exon.sv.id=NA,
exon.sv.num=NA,
svtype=NA,
cn=NA,
prob=NA,
depth=NA,
baseline=NA,
pool.count=NA,
past.count=NA,
exon.low.num=NA
),
file = filter.path, row.names = FALSE,
col.names = TRUE, quote = FALSE, sep = "\t")
}
})
}
filterExonCNV <- function(x, cutoff = list(prob = 1E-4, pool.count = 3, baseline = 50, lowdepth = 10)) {
# filter cutoff
filter.x <- x[which(as.numeric(as.vector(x[,'prob'])) < cutoff$prob &
as.numeric(as.vector(x[,'baseline'])) >= cutoff$baseline ),, drop = FALSE]
if (nrow(filter.x) > 0) {
# merge exons sharing the same gene symbol
filter.gene <- unique(as.character(as.vector(filter.x[,'gene'])))
result <- lapply(filter.gene, function(gene) {
idx <- which(as.character(as.vector(x[,'gene'])) %in% gene)
gene.whole.dat <- x[idx,,drop = FALSE]
count1 <- length(which(as.numeric(as.vector(gene.whole.dat[,'prob'])) == 1))
gene.dat <- gene.whole.dat[which(gene.whole.dat[,'svtype'] %in% c('DEL', 'DUP')),,drop = FALSE]
# call by sv type
svtype.class <- unique(as.character(as.vector(gene.dat[,'svtype'])))
out.dat <- list()
for (sv in svtype.class) {
sub.dat <- gene.dat[which(as.character(as.vector(gene.dat[,'svtype'])) %in% sv),,drop = FALSE]
exon.sv.id <- continuousInteger(sort(as.numeric(as.vector(sub.dat[, 'exon']))))
past.count <- na.omit(as.numeric(as.vector(sub.dat[, 'past.count'])))
if (length(past.count) > 0) {
past.count <- min(past.count)
} else {
past.count <- NA
}
out.dat[[sv]] <- data.frame(chr=sub.dat[1, 'chr'],
start=min(as.numeric(as.vector(sub.dat[, 'start']))),
end=max(as.numeric(as.vector(sub.dat[, 'end']))),
gene=gene,
trans=sub.dat[1, 'transcript'],
exon.num=sub.dat[1, 'exon.num'],
exon.sv.id=paste(exon.sv.id, collapse = ','),
exon.sv.num=nrow(sub.dat),
svtype=sv,
cn=median(as.numeric(as.vector(sub.dat[, 'cn'])), na.rm = TRUE),
prob=min(as.numeric(as.vector(sub.dat[, 'prob'])), na.rm = TRUE),
depth=median(as.numeric(as.vector(sub.dat[, 'depth'])), na.rm = TRUE),
baseline=median(as.numeric(as.vector(sub.dat[, 'baseline'])), na.rm = TRUE),
pool.count=min(as.numeric(as.vector(sub.dat[, 'pool.count'])), na.rm = TRUE),
past.count=past.count,
exon.low.num=count1)
}
out.dat <- do.call('rbind', out.dat)
rownames(out.dat) <- NULL
out.dat
})
result <- do.call('rbind', result)
return(result[which(as.numeric(as.vector(result[,'pool.count'])) <= cutoff$pool.count | as.numeric(as.vector(result[,'cn'])) == 0),,drop = FALSE])
} else {
return(NULL)
}
}
doPois <- function(depth, ...){
depth <- as.integer(depth)
negpois.LL <- function(par, ...){
mu <- par[1]
LL <- sum(dpois(depth, lambda = mu, log=T))
-LL
}
out.pois <- nlm(negpois.LL, 2)
dpois(depth, lambda = out.pois$estimate[1])
}
continuousInteger <- function(x) {
t <- as.data.frame(reduce(IRanges(x,x)))[,1:2]
apply(t, 1, function(x) paste(unique(x), collapse = "~"))
}
revContinuousInteger <- function(x) {
s <- unlist(strsplit(as.character(x), ','))
out <- c()
for (i in s){
if (grepl('~', i)) {
s1 <- as.numeric(unlist(strsplit(i, '~')))
out <- c(out, seq(s1[1], s1[2], 1))
} else{
out <- c(out, as.numeric(i))
}
}
sort(out)
}
#' @title Compute the log2 ratio between sample depth and baseline.
#' @description Input vectors of sample depth and baseline to compute the log2
#' ratio.
#' @param x A numeric vector of sample depth.
#' @param baseline A numeric vector contains baseline depth of region with the
#' same order in sample depth vector.
#' @param badDepth A numeric value; a baseline depth low than badDepth will be
#' set to \code{NA}.
#' @return A numeric vector of log2 ratio.
#' @author Zhan-Ni Chen
calculateLog2ratio <- function(x, baseline, badDepth) {
x <- as.numeric(as.vector(x))
baseline <- as.numeric(as.vector(baseline))
x[which(x == 0)] <- 0.01
baseline[which(baseline < badDepth)] <- NA
log2ratio <- log2(x / baseline)
log2ratio[which(is.na(log2ratio))] <- NA
log2ratio
}
|
8f9bb8e889384c1789bfaf60469f8c5e1236c95b
|
b8a0090cea7e4b950d067e7cb051d7996b981988
|
/manuscript-figure-plots/make-cell-type-count-table.R
|
4eb183175deadc3832d718da3ea9a72f615a8a8e
|
[] |
no_license
|
dnarna909/2020-sn-muscle
|
7f828c168a663528fdf7025113d1265ba039868f
|
74c0879afbe8fdf149fa60c8552d0eea0ed98f18
|
refs/heads/master
| 2022-11-13T03:16:22.519454
| 2020-07-01T21:38:45
| 2020-07-01T21:38:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,096
|
r
|
make-cell-type-count-table.R
|
#!/usr/bin/env Rscript
library(dplyr)
library(tidyr)
args <- commandArgs(T)
CLUSTER_NAMES <- args[1]
CLUSTER_ASSIGNMENTS <- args[2]
LIBRARY_LABELS <- args[3]
library_to_modality_and_species <- read.table(LIBRARY_LABELS, head=T, as.is=T, sep='\t') %>%
dplyr::select(library, species, modality)
clusters <- read.table(CLUSTER_ASSIGNMENTS, head=F, sep='\t', col.names = c('library', 'barcode', 'cluster'), colClasses = c('character'))
cluster_names <- read.table(CLUSTER_NAMES, head=T, sep='\t', colClasses = c('character')) %>% dplyr::rename(cluster=old_name)
clusters <- left_join(clusters, cluster_names)
clusters <- left_join(clusters, library_to_modality_and_species)
clusters$label <- paste(clusters$species, clusters$modality, sep='_')
tbl <- clusters %>%
dplyr::group_by(label, new_name) %>%
dplyr::summarize(count=n()) %>%
dplyr::ungroup() %>%
tidyr::spread(key=label, value=count) %>%
dplyr::rename(cell_type=new_name)
tbl <- tbl[rev(order(tbl$Human_RNA)),]
write.table(tbl, file = 'cell_type_counts.tsv', append = F, quote = F, sep = '\t', row.names = F, col.names = T)
|
594dae575e19344bd0e65e4fe1decdccddf68969
|
cf15fbeea99db004b475b65fdce55219a94c8182
|
/x_y_arcmaps.r
|
7c4935eb7f96867274ac96d0497193d6069ea488
|
[] |
no_license
|
tessam30/RProgramming
|
d09383a630e05c74452ae07c9856f0d634f790c4
|
87476adeb7c4a532de9f34febcc98d722e8d9c7f
|
refs/heads/master
| 2021-01-02T22:31:02.584799
| 2017-10-24T02:09:12
| 2017-10-24T02:09:12
| 34,971,410
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,572
|
r
|
x_y_arcmaps.r
|
# Creating x/y great-arc maps
# https://flowingdata.com/2011/05/11/how-to-map-connections-with-great-circles/
# required libs
library(maps)
library(geosphere)
library(mapproj)
map("state")
map("world", proj='bonne', param = 10)
xlim <- c(-171.738281, -56.601563)
ylim <- c(12.039321, 71.856229)
map("world", col="#f2f2f2", fill=TRUE, bg="white", lwd=0.05, xlim=xlim, ylim=ylim)
lat_ca <- 39.164141
lon_ca <- -121.640625
lat_me <- 45.213004
lon_me <- -68.906250
inter <- gcIntermediate(c(lon_ca, lat_ca), c(lon_me, lat_me), n=50, addStartEnd=TRUE)
lines(inter, col = "red")
airports <- read.csv("http://datasets.flowingdata.com/tuts/maparcs/airports.csv", header=TRUE)
flights <- read.csv("http://datasets.flowingdata.com/tuts/maparcs/flights.csv", header=TRUE, as.is=TRUE)
map("world", col="#f2f2f2", fill=TRUE, bg="white", lwd=0.05, xlim=xlim, ylim=ylim)
fsub <- flights[flights$airline == "AA",]
for (j in 1:length(fsub$airline)) {
air1 <- airports[airports$iata == fsub[j,]$airport1,]
air2 <- airports[airports$iata == fsub[j,]$airport2,]
inter <- gcIntermediate(c(air1[1,]$long, air1[1,]$lat), c(air2[1,]$long, air2[1,]$lat), n=100, addStartEnd=TRUE)
lines(inter, col="gray", lwd=0.8)
}
# Use colorRampPalette to create a gradient fill based on given colors
pal <- colorRampPalette(c("#ffffd9", "#081d58"))
colors <- pal(100)
map("world", col="#f7f4f9", fill=TRUE, bg="gray", lwd=0.05, xlim=xlim, ylim=ylim)
fsub <- flights[flights$airline == "AA",]
maxcnt <- max(fsub$cnt)
for (j in 1:length(fsub$airline)) {
air1 <- airports[airports$iata == fsub[j,]$airport1,]
air2 <- airports[airports$iata == fsub[j,]$airport2,]
inter <- gcIntermediate(c(air1[1,]$long, air1[1,]$lat), c(air2[1,]$long, air2[1,]$lat), n=100, addStartEnd=TRUE)
colindex <- round( (fsub[j,]$cnt / maxcnt) * length(colors) )
lines(inter, col=colors[colindex], lwd=1.5)
}
# Map it all!!!
pal <- colorRampPalette(c("#f2f2f2", "black"))
pal <- colorRampPalette(c("#f2f2f2", "red"))
colors <- pal(100)
map("world", col="#f2f2f2", fill=TRUE, bg="white", lwd=0.05, xlim=xlim, ylim=ylim)
fsub <- flights[flights$airline == "AA",]
fsub <- fsub[order(fsub$cnt),]
maxcnt <- max(fsub$cnt)
for (j in 1:length(fsub$airline)) {
air1 <- airports[airports$iata == fsub[j,]$airport1,]
air2 <- airports[airports$iata == fsub[j,]$airport2,]
inter <- gcIntermediate(c(air1[1,]$long, air1[1,]$lat), c(air2[1,]$long, air2[1,]$lat), n=100, addStartEnd=TRUE)
colindex <- round( (fsub[j,]$cnt / maxcnt) * length(colors) )
lines(inter, col=colors[colindex], lwd=0.8)
}
# To map everything
# Unique carriers
carriers <- unique(flights$airline)
# Color
pal <- colorRampPalette(c("#333333", "white", "#1292db"))
colors <- pal(100)
for (i in 1:length(carriers)) {
pdf(paste("carrier", carriers[i], ".pdf", sep=""), width=11, height=7)
map("world", col="#191919", fill=TRUE, bg="#000000", lwd=0.05, xlim=xlim, ylim=ylim)
fsub <- flights[flights$airline == carriers[i],]
fsub <- fsub[order(fsub$cnt),]
maxcnt <- max(fsub$cnt)
for (j in 1:length(fsub$airline)) {
air1 <- airports[airports$iata == fsub[j,]$airport1,]
air2 <- airports[airports$iata == fsub[j,]$airport2,]
inter <- gcIntermediate(c(air1[1,]$long, air1[1,]$lat), c(air2[1,]$long, air2[1,]$lat), n=100, addStartEnd=TRUE)
colindex <- round( (fsub[j,]$cnt / maxcnt) * length(colors) )
lines(inter, col=colors[colindex], lwd=0.6)
}
dev.off()
}
|
7be58e46a0edeb86b0c2066e38c2be7dbd50b907
|
86066ea78219cab7f897c1e5e02850da2f66e82f
|
/StatisticalInferenceTrial1.R
|
a658660f4992213df6167e4551479c72a56d0a77
|
[] |
no_license
|
vishmaram/StatisticalInferenceTrials
|
471f00d97ff33277271abd13fe3e928d0bb1a2e7
|
3e68e0b08f0c0d3aac1ff9f5bcb24f2622cb7281
|
refs/heads/master
| 2021-01-01T03:55:58.468196
| 2016-05-26T23:35:10
| 2016-05-26T23:35:10
| 59,789,598
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 30
|
r
|
StatisticalInferenceTrial1.R
|
# This is used for learning -1
|
b9af6d44bb6fab62853bcff389e261f962d9dea4
|
33193bde7e91aecd9f72392957640ce8790717a3
|
/app.R
|
a1a5491d2961f050932524263dcdeb23245ae902
|
[] |
no_license
|
keshavbans/CryptoCurrency
|
4c69759cbb04383cc82ede6a3190ec5626c1baeb
|
37c541a7bbcfaf4fc919080fa4dd4b9cde1f0314
|
refs/heads/master
| 2021-08-29T08:00:07.135688
| 2017-12-13T13:39:57
| 2017-12-13T13:39:57
| 114,125,255
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,909
|
r
|
app.R
|
# Upload neccessary libraries (shiny, ggplot2)
library(shiny)
library(ggplot2)
# Upload Datasets
bitcoin <- read.csv("bitcoin_price.csv")
dash <- read.csv("dash_price.csv")
ethereum <- read.csv("ethereum_price.csv")
iota <- read.csv("iota_price.csv")
litecoin <- read.csv("litecoin_price.csv")
monero <- read.csv("monero_price.csv")
nem <- read.csv("nem_price.csv")
neo <- read.csv("neo_price.csv")
numeraire <- read.csv("numeraire_price.csv")
omisego <- read.csv("omisego_price.csv")
qtum <- read.csv("qtum_price.csv")
ripple <- read.csv("ripple_price.csv")
stratis<- read.csv("stratis_price.csv")
waves<- read.csv("waves_price.csv")
#User Interface of the app
ui <- fluidPage(
# App title ----
titlePanel("Digital Currency!"),
# Sidebar layout with input and output definitions ----
sidebarLayout(
# Sidebar panel for inputs ----
sidebarPanel(
# Input: Slider for the number of bins ----
selectInput("dataset", "Choose a dataset:",
choices = c("Bitcoin","Dash","Ethereum","Iota", "Litecoin", "Monero", "Nem", "Neo", "Numeraire", "Omisego", "Qtum", "ripple", "Stratis", "Waves")),
helpText("Note: while the data view will show only the specified",
"number of observations, the summary will still be based",
"on the full dataset."),
actionButton("update", "Update View")
),
# Main panel for displaying outputs ----
mainPanel(
# Output: Histogram ----
h4("Summary"),
verbatimTextOutput("summary"),
h4("Plots"),
plotOutput(outputId = "distPlot")
)
)
)
server <- function(input, output) {
# Histogram of the Old Faithful Geyser Data ----
# with requested number of bins
# This expression that generates a histogram is wrapped in a call
# to renderPlot to indicate that:
#
# 1. It is "reactive" and therefore should be automatically
# re-executed when inputs (input$bins) change
# 2. Its output type is a plot
datasetInput <- eventReactive(input$update, {
switch(input$dataset,
"Bitcoin" = bitcoin,
"Dash" = dash,
"Ethereum" = ethereum,
"Iota" = iota,
"Litecoin" = litecoin,
"Monero" = monero,
"Nem" = nem,
"Neo" = neo,
"Numeraire" = numeraire,
"Omisego" = omisego,
"Qtum" = qtum,
"ripple" = ripple,
"Stratis" = stratis,
"Waves" = waves)
}, ignoreNULL = FALSE)
output$summary <- renderPrint({
dataset <- datasetInput()
summary(dataset)
})
output$distPlot <- renderPlot({
dataset <- datasetInput()
summary(dataset)
x <- as.Date(dataset$Date,format='%B %d, %Y')
y <- dataset$Open
ggplot(dataset, aes(x = x, y = y)) + geom_line()
})
}
shinyApp(ui = ui, server = server)
|
79255a33157adbd79f87428a8c6cbf8c413ee6c5
|
468075902da967e77578f8445a542faf2ee51227
|
/R/LEstep.R
|
11f7927fa655bfc4f4fffabc550e0875c6e58b98
|
[] |
no_license
|
fchamroukhi/HDME
|
202dd27585ff2a50fe0c59b62bb8e5836cf74d3f
|
09d44e933cc4cd60e85cf920621708da44d12016
|
refs/heads/master
| 2020-06-19T18:22:12.092085
| 2019-10-23T13:24:54
| 2019-10-23T13:24:54
| 196,819,715
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 530
|
r
|
LEstep.R
|
#E-step
Le.step = function(eta, wk, Y, X, K, R)
{
# source("Pik.R")
# source("LPi.R")
n = dim(X)[1]
tau = matrix(rep(0,n*K), ncol=K)
pik = Pik(n, K, X, wk)
for (i in 1:n)
{ Sum = 0
for(k in 1:K)
{ #be careful for case R > 2
ETAk = as.matrix(eta[k,,])
if(R==2) ETAk = t(ETAk)
P_eta = Pi(R,X[i,],ETAk)
# P_eta = Pi(R,X[i,],t(as.matrix(eta[k,,])))
tau[i,k] = pik[i,k]*P_eta[Y[i]]
#print(tau[i,k])
Sum = Sum + tau[i,k]
}
tau[i,] = tau[i,]/Sum
}
return (tau)
}
|
8070eab984b0b67dc982f4c22b1a2a4027def742
|
dd62c2f20d16320a51860352282be509867bb72b
|
/munge/homicide.R
|
95b6a68bf384910317bca606d9b2fde6407d28cf
|
[] |
no_license
|
WaverlyWei/Violence-Project
|
73b94a957fee2ff273223c49e145b18e63f0d098
|
d1dd9ab2345d2e71c5f8d7ebb039ce23f33d94c4
|
refs/heads/master
| 2020-03-26T04:45:55.756501
| 2019-08-21T17:43:47
| 2019-08-21T17:43:47
| 144,521,679
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,463
|
r
|
homicide.R
|
library(readstata13)
library(vimp)
library(SuperLearner)
library(ctmle)
library(ggplot2)
library(Amelia)
library(superheat)
setwd("/Users/waverlywei/Desktop/New Trauma Project ")
dat.1 <- readxl::read_excel("Master by Neighborhood for Alan.xlsx",sheet = 1)
dat.2 <- read.dta13("chronicdz3.dta")
# variable selection with vim package
sl_lib = c("SL.glm","SL.mean","SL.glmnet","SL.rpart")
# set covariates
cov = dat.2[,c(2:4,25,49:83)]
# ===== homicide as outcome ======== #
df_imp = cbind(out=dat.2$homicide_ayll,cov)
n = ncol(df_imp) - 1
imp = low = high = NULL
for (i in 1 :n){
single_vim =vim(f1 = y~x, f2 = fit~x, data = df_imp, y = df_imp[,1], indx = i, SL.library = sl_lib)
# create importance measure vector and CI's
imp = c(imp,single_vim$est)
low = c(low, single_vim$ci[1])
high = c(high, single_vim$ci[2])
}
nam = names(df_imp)[2:ncol(df_imp)]
df = data.frame(imp, low, high, nam)
imp_descend = df[with(df,order(-imp)),]
save("imp_descend", file = "imp_descend.Rda")
# ==========all "_ayll" variables as outcomes + covariates ===========#
df_all = cbind(dat.2[,5:24],cov)
imp = low = high = out_name = cov_name = NULL
# remove NA outcomes
NA_index = apply(df_all[,1:20],1, function(x) any(is.na(x)))
df_all = df_all[!NA_index,]
# create a new dataframe for each outcome variable to feed into importance measure function
for (i in 1:20){
for (j in 1:39){
print(i)
df_now = df_all[,c(i,21:ncol(df_all))]
single_vim =vim(f1 = y~x, f2 = fit~x, data = df_now, y = df_now[,1], indx = j, SL.library = sl_lib)
out_name = c(out_name, names(df_all)[i])
cov_name = c(cov_name,names(df_now)[j])
imp = c(imp,single_vim$est)
low = c(low, single_vim$ci[1])
high = c(high, single_vim$ci[2])
}
}
whole = data.frame(out_name, cov_name, imp, low, high)
save("whole", file = "whole.Rda")
idx = seq(1,nrow(whole),39)
whole_new = whole[-idx, ]
## Heatmap using superheat
# re-organize data
try = matrix(whole_new$imp, nrow = 20, ncol = 38,byrow = TRUE)
rownames(try) = names(dat.2)[5:24]
colnames(try) = whole_new$cov_name[1:38]
superheat(try,bottom.label.text.angle = 80,bottom.label.text.size = 3,row.dendrogram = TRUE,
title = "Variable Importance Measures")
## ===========SKIP=======
# TRY heatmap using ggplot
h = ggplot(whole_new, aes(out_name,cov_name)) +ggtitle('Variable Importance Measures')+
geom_tile(aes(fill = imp), color = "white") +
scale_fill_gradient(low = "white", high = "red") +
ylab("Covariates") +
xlab("Outcome") +
theme(legend.title = element_text(size = 10),
legend.text = element_text(size = 12),
plot.title = element_text(size=16),
axis.title=element_text(size=14,face="bold"),
axis.text.x = element_text(angle = 90, hjust = 1)) +
labs(fill = "variable importance")
h
## bar plot
imp_top = imp_descend[1:10,]
p <- ggplot(imp_top,aes(imp_top$nam,imp_top$imp))
p = p + geom_crossbar(aes(ymin = imp_top$low,ymax = imp_top$high,colour = "red"), width = 0.1) +
ylab("importance")+xlab("Covariates")+ggtitle("ayll_homicide Top10 Important Variables")+
theme(axis.text.x = element_text(face="bold", size=8, angle=45))+
theme(plot.title = element_text(hjust = 0.5))+ theme(legend.position="none")+
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_blank(), axis.line = element_line(colour = "black"))
## ==============variable selection with C-TMLE==============
# set Y
Y = df_imp$out
N = nrow(df_imp)
str_vec = c()
est_vec = c()
p_vec = c()
low_vec = c()
high_vec = c()
# set pbpl as A for now
# > mean ==1, <mean == 0
for ( i in 3:40){
A = ifelse(df_imp[,i]<=mean(df_imp[,i]),0,1)
# set W
W = df_imp[,-c(1,i)]
# Q
Q = cbind(rep(mean(Y[A == 0]), N), rep(mean(Y[A == 1]), N))
# variable selection
#ctmle_discrete_fit1 <- ctmleDiscrete(Y = Y, A = A, W = data.frame(W), Q = Q,
# preOrder = FALSE, detailed = TRUE)
# try fit2, w/o preset Q
ctmle_discrete_fit2 <- ctmleDiscrete(Y = Y, A = A, W = data.frame(W),
preOrder = FALSE, detailed = TRUE)
res = summary(ctmle_discrete_fit2)
# selected candidate
selected = res$selected
str = paste(res$terms[1:selected],collapse = ",")
vec = c(vec,str)
}
# create df
df = cbind(names(df_imp)[3:40],vec)
names(df) = c("A","important vars")
# out
save(df,file = "ctmle_results.Rda")
#-----------------------
# PCA
#------------------------
|
f498bcb0a98b668a1cae0ee619810e79cdec80c8
|
6b7eac94cab95036dfcb8f49f992524947aa40ca
|
/man/sum_betas.Rd
|
81039b32f2842b015ddbbfcc6e11d075a0919461
|
[
"MIT"
] |
permissive
|
Urban-Analytics/rampuaR
|
d9e4a7b4acfbf06cccc0b25a68dfafebc1836256
|
4a73131228b872a517916e964ac732ff3b25d519
|
refs/heads/master
| 2023-01-14T11:27:10.922266
| 2020-11-24T15:20:49
| 2020-11-24T15:20:49
| 280,127,722
| 2
| 0
|
MIT
| 2020-11-05T10:31:09
| 2020-07-16T10:44:09
|
R
|
UTF-8
|
R
| false
| true
| 657
|
rd
|
sum_betas.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/covid_status_functions.R
\name{sum_betas}
\alias{sum_betas}
\title{Summing betas for use in COVID probability calculation}
\usage{
sum_betas(df, betas, risk_cap_val = NA)
}
\arguments{
\item{df}{The input list - the output from the create_input function}
\item{betas}{List of betas associated with variables to be used in
calculating probability of becoming a COVID case}
\item{risk_cap_val}{The value at which current_risk will be capped}
}
\value{
the sum of betas
}
\description{
Calculating probabilities of becoming a COVID case based on each individuals
'current_risk'
}
|
11809b36e4adc6a86274b78363d9dbbf53bf48b5
|
7daf72d1abe4b13d1e26dc46abddfebcfc42d9e8
|
/man/min_n.Rd
|
e8608dc45ee126f08a8ecbf6cc02541d81a41a50
|
[
"MIT"
] |
permissive
|
farcego/rbl
|
6c39a7f2e63564c75860aa6a7887b2b49ffb73fb
|
b1cfa946b978dae09bf4d4b79267c4269e067627
|
refs/heads/master
| 2020-03-21T15:25:49.368438
| 2017-06-15T09:22:11
| 2017-06-15T09:22:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 711
|
rd
|
min_n.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils_functionals.r
\name{min_n}
\alias{min_n}
\title{Create a function that return a result if number of valid obs >= n}
\usage{
min_n(f, n)
}
\arguments{
\item{f}{a function having a vector of data as first argument}
\item{n}{the minimum number of observation required to return a non-NA
result. A number in \code{(0;1[} will be interpreted as a proportion.}
}
\description{
Create a function that return a result if number of valid obs >= n
}
\examples{
mean5 <- min_n(mean, 5)
mean5(c(1:4, NA))
mean5(c(1:5, NA))
mean90percent <- min_n(mean, 0.90)
mean90percent(c(1:8, NA, NA))
mean90percent(c(1:9, NA))
}
\keyword{internal}
|
4dc621bb49649cf025fcf66e8da1ee6d18153fe8
|
b66a11af854338b50f57b714c34336b030d00c97
|
/code/theme.R
|
40476591f0e5f5d479228167a3095765b38007ca
|
[] |
no_license
|
aritrakn/code_pitfalls_iml
|
60f9ae0e2011e6b923db54f64800e595b320f143
|
40296afedb6461469c7a32ae563e8d22e27ed625
|
refs/heads/main
| 2023-07-04T22:20:33.076448
| 2021-08-17T07:06:56
| 2021-08-17T07:06:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 93
|
r
|
theme.R
|
library("ggplot2")
th = theme_bw() +
theme(text = element_text(size = 16))
theme_set(th)
|
486d6b2a5962e05d022dc3d77acef00222888882
|
238f01972914d67ddbc3a8dfb43ad01a49f7de90
|
/man/RFortLangComp-package.Rd
|
4b191eeba588e1d0b2ea53f2b810b0b3ee707cc8
|
[] |
no_license
|
aadler/RFortLangComp
|
115680aef39996c30bcab8212fa8f938f5b66756
|
42a0ecbc926dd6a26f9447cb91205769c3729ad4
|
refs/heads/master
| 2020-04-13T03:43:52.322220
| 2019-05-26T20:15:33
| 2019-05-26T20:15:33
| 162,940,340
| 5
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,063
|
rd
|
RFortLangComp-package.Rd
|
\name{RFortLangComp-package}
\alias{RFortLangComp-package}
\alias{RFortLangComp}
\title{
R, Fortran, and C versions of layer loss functions for speed comparison
}
\description{
This package contains various flavors of R, Fortran, and C versions of a simple layer loss cost function. These flavors represent levels of OpenMP parallelism. These functions' performance will be compared with Rcpp versions of the same function in the CppLangComp package. These packages are \bold{not} intended for uploading to CRAN.
}
\details{
The functions are called by name into the same environment as the functions from CppLangComp and timings are compared.
}
\author{
Maintainer: Avraham Adler \email{Avraham.Adler@gmail.com}
}
\references{
There will be a blog post on \url{https://www.avrahamadler.com} that will discuss these packages.
}
\keyword{ package }
\seealso{
Optional links to other man pages
}
\examples{
\dontrun{
## Optional simple examples of the most important functions
## These can be in \dontrun{} and \donttest{} blocks.
}
}
|
98ee26c3eae0c71f3dd39cf4e9f4566a3ed572b9
|
7441a5909020383eb5b328439c2025367c9375ae
|
/man/trendfilter.Rd
|
444853d6ad1e4d6cf7e074f4a870fe985b0ddb52
|
[] |
no_license
|
cran/genlasso
|
1c306ff866222fd38561173aa936de8fe64e4d47
|
c2367f08977cfcc615f3e0e33ad885ab3d72a94e
|
refs/heads/master
| 2022-08-29T09:01:57.881638
| 2022-08-22T07:10:10
| 2022-08-22T07:10:10
| 17,696,330
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,854
|
rd
|
trendfilter.Rd
|
\name{trendfilter}
\alias{trendfilter}
\title{
Compute the trend filtering solution path for any polynomial order
}
\description{
This function computes the solution path for the trend filtering
problem of an arbitrary polynomial order. When the order is set to
zero, trend filtering is equivalent to the 1d fused lasso, see
\code{\link{fusedlasso1d}}.
}
\usage{
trendfilter(y, pos, X, ord = 1, approx = FALSE, maxsteps = 2000,
minlam = 0, rtol = 1e-07, btol = 1e-07, eps = 1e-04,
verbose = FALSE)
}
\arguments{
\item{y}{
a numeric response vector.
}
\item{pos}{
an optional numeric vector specifying the positions of the
observations, and missing \code{pos} is assumed to mean unit spacing.
}
\item{X}{
an optional matrix of predictor variables, with observations along
the rows, and variables along the columns. If the passed \code{X}
has more columns than rows, then a warning is given, and a small ridge
penalty is added to the generalized lasso criterion before the path
is computed. If \code{X} has less columns than rows, then its rank is
not checked for efficiency, and (unlike the \code{genasso} function) a
ridge penalty is not automatically added if it is rank deficient.
Therefore, a tall, rank deficient \code{X} may cause errors.
}
\item{ord}{
an integer specifying the desired order of the piecewise polyomial
produced by the solution of the trend filtering problem. Must be
non-negative, and the default to 1 (linear trend filtering).
}
\item{approx}{
a logical variable indicating if the approximate solution path
should be used (with no dual coordinates leaving the boundary).
Default is \code{FALSE}. Note
that for the 1d fused lasso (zeroth order trend filtering), with
identity predictor matrix, this approximate path is the same as
the exact solution path.
}
\item{maxsteps}{
an integer specifying the maximum number of steps for the algorithm
to take before termination. Default is 2000.
}
\item{minlam}{
a numeric variable indicating the value of lambda at which the path
should terminate. Default is 0.
}
\item{rtol}{
a numeric variable giving the tolerance for determining the rank of
a matrix: if a diagonal value in the R factor of a QR decomposition
is less than R, in absolute value, then it is considered zero. Hence
making rtol larger means being less stringent with determination of
matrix rank. In general, do not change this unless you know what you
are getting into! Default is 1e-7.
}
\item{btol}{
a numeric variable giving the tolerance for accepting "late" hitting
and leaving times: future hitting times and leaving times should always
be less than the current knot in the path, but sometimes for numerical
reasons they are larger; any computed hitting or leaving time larger
than the current knot + btol is thrown away. Hence making btol larger
means being less stringent withthe determination of hitting and leaving
times. Again, in general, do not change this unless you know what you
are getting into! Default is 1e-7.
}
\item{eps}{
a numeric variable indicating the multiplier for the ridge penalty,
in the case that \code{X} is wide (more columns than rows). If numeric
problems occur, make \code{eps} larger. Default is 1e-4.
}
\item{verbose}{
a logical variable indicating if progress should be reported after
each knot in the path.
}
}
\details{
When the predictor matrix is the identity, trend filtering fits a
piecewise polynomial to linearly ordered observations. The result is
similar to that of a polynomial regression spline or a smoothing
spline, except the knots in the piecewise polynomial (changes in the
(k+1)st derivative, if the polynomial order is k) are chosen
adaptively based on the observations. This is in contrast to
regression splines, where the knots are prespecified, and smoothing
splines, which place a knot at every data point.
With a nonidentity predictor matrix, the trend filtering problem
enforces piecewise polynomial smoothness along successive components
of the coefficient vector. This can be used to fit a kind of varying
coefficient model.
We note that, in the signal approximator (identity predictor matrix)
case, fitting trend filtering estimate with arbitrary positions \code{pos}
is theoretically no harder than doing so on an evenly spaced grid. However
in practice, with differing gaps between points, the algorithm can
become numerically unstable even for large (or moderately large) problems.
This is especially true as the polynomial order increases. Hence, use the
positions argument \code{pos} with caution.
}
\value{
Returns an object of class "trendfilter", a subclass of
"genlasso". This is a list with at least following components:
\item{lambda}{
values of lambda at which the solution path changes slope,
i.e., kinks or knots.
}
\item{beta}{
a matrix of primal coefficients, each column corresponding to a knot
in the solution path.
}
\item{fit}{
a matrix of fitted values, each column corresponding to a knot in
the solution path.
}
\item{u}{
a matrix of dual coefficients, each column corresponding to a knot
in the solution path.
}
\item{hit}{
a vector of logical values indicating if a new variable in the dual
solution hit the box contraint boundary. A value of \code{FALSE}
indicates a variable leaving the boundary.
}
\item{df}{
a vector giving an unbiased estimate of the degrees of freedom of
the fit at each knot in the solution path.
}
\item{y}{
the observed response vector. Useful for plotting and other
methods.
}
\item{completepath}{
a logical variable indicating whether the complete path was
computed (terminating the path early with the \code{maxsteps} or
\code{minlam} options results in a value of \code{FALSE}).
}
\item{bls}{
the least squares solution, i.e., the solution at lambda = 0. This
can be \code{NULL} when \code{completepath} is \code{FALSE}.
}
\item{ord}{
the order of the piecewise polyomial that has been fit.
}
\item{call}{
the matched call.
}
}
\author{
Taylor B. Arnold and Ryan J. Tibshirani
}
\references{
Tibshirani, R. J. and Taylor, J. (2011), "The solution path of the
generalized lasso", Annals of Statistics 39 (3) 1335--1371.
Tibshirani, R. J. (2014), "Adaptive piecewise polynomial estimation
via trend filtering", Annals of Statistics 42 (1): 285--323.
Arnold, T. B. and Tibshirani, R. J. (2014), "Efficient implementations
of the generalized lasso dual path algorithm", arXiv: 1405.3222.
Kim, S.-J., Koh, K., Boyd, S. and Gorinevsky, D. (2009), "l1 trend
filtering", SIAM Review 51 (2), 339--360.
}
\seealso{
\code{\link{fusedlasso1d}}, \code{\link{genlasso}},
\code{\link{cv.trendfilter}}, \code{\link{plot.trendfilter}}
}
\examples{
# Constant trend filtering (the 1d fused lasso)
set.seed(0)
n = 100
beta0 = rep(sample(1:10,5),each=n/5)
y = beta0 + rnorm(n,sd=0.8)
a = fusedlasso1d(y)
plot(a)
# Linear trend filtering
set.seed(0)
n = 100
beta0 = numeric(n)
beta0[1:20] = (0:19)*4/19+2
beta0[20:45] = (25:0)*3/25+3
beta0[45:80] = (0:35)*9/35+3
beta0[80:100] = (20:0)*4/20+8
y = beta0 + rnorm(n)
a = trendfilter(y,ord=1)
plot(a,df=c(2,3,4,10))
# Cubic trend filtering
set.seed(0)
n = 100
beta0 = numeric(100)
beta0[1:40] = (1:40-20)^3
beta0[40:50] = -60*(40:50-50)^2 + 60*100+20^3
beta0[50:70] = -20*(50:70-50)^2 + 60*100+20^3
beta0[70:100] = -1/6*(70:100-110)^3 + -1/6*40^3 + 6000
beta0 = -beta0
beta0 = (beta0-min(beta0))*10/diff(range(beta0))
y = beta0 + rnorm(n)
a = trendfilter(y,ord=3)
plot(a,nlam=5)
}
\keyword{models}
|
f9df8fff27614445aaecc05c4a5b0f90402efda8
|
69b49ce61413bc8190227621b0aa8dfaf951a048
|
/src/Concerto/TestBundle/Resources/R/concerto5/R/concerto.file.getUrl.R
|
c5b2a779bebfff148567e90a6aed46138de449ba
|
[
"Apache-2.0"
] |
permissive
|
campsych/concerto-platform
|
de926ae820f2a3cf6985598f3824dee8f4615232
|
988b67e8d52acbf25fdc9078e7592cc07d2dd9a3
|
refs/heads/master
| 2023-08-31T08:09:05.570628
| 2023-08-23T16:43:03
| 2023-08-23T16:43:03
| 55,242,761
| 164
| 109
|
Apache-2.0
| 2023-07-26T15:10:48
| 2016-04-01T15:34:25
|
PHP
|
UTF-8
|
R
| false
| false
| 203
|
r
|
concerto.file.getUrl.R
|
concerto.file.getUrl = function(filename, noCache=F){
url = paste0(concerto$mediaUrl, "/", filename)
if(noCache) {
url = paste0(url, "?ts=",as.numeric(Sys.time()))
}
return(url)
}
|
d7a4c09dfd2d2982401f3b9f0f23545ea5e2dbda
|
2161e2c9b1463f3f0b8d27a9447c136e5e08d2b9
|
/man/SumRelAbund.Rd
|
d2735c66a33cca47fd6ffb92ccb85a90f4ca0c47
|
[] |
no_license
|
NCRN/NCRNbirds
|
14a258e8182849bb0434eb4368fa291105d56a7c
|
5a512b736d674d9308c27667e7a99b142aebfcef
|
refs/heads/master
| 2023-08-16T13:00:26.367713
| 2023-07-11T15:54:50
| 2023-07-11T15:54:50
| 32,335,489
| 5
| 12
| null | 2023-08-17T15:09:47
| 2015-03-16T15:44:44
|
R
|
UTF-8
|
R
| false
| true
| 3,144
|
rd
|
SumRelAbund.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/SumRelAbund.R
\name{SumRelAbund}
\alias{SumRelAbund}
\title{SumRelAbund}
\usage{
SumRelAbund(
object,
parks = NA,
points = NA,
AOU = NA,
years = NA,
times = NA,
band = 1,
visits = NA,
CalcByYear = FALSE,
max = TRUE,
sort = FALSE,
abund = 10,
output = "dataframe",
...
)
}
\arguments{
\item{object}{An \code{NCRNbirds} object or a \code{list} of such objects.}
\item{parks}{A character vector of park codes. Only visits within these parks will be returned.}
\item{points}{A character vector. The names of one or more points where the data was collected.}
\item{AOU}{A character vector. One or more AOU (American Onothological Union) codes of bird species.Detections will be summed by each individual species.}
\item{years}{A vector of numbers. Will return only data from the indicated years.}
\item{times}{A numeric vector of length 1 passed on to \code{\link{getVisits}} and \code{\link{getBirds}}. Returns only data from points where the number of years that a point has been visited is greater or equal to the value of \code{times}. This is determined based on the data found in the \code{Visits} slot.}
\item{band}{A numeric vector. Defaults to 1. Only observations whose \code{Distance_id} field matches a value in \code{band} will be returned.}
\item{visits}{The visits that will be used for the matrix. Defautls to \code{NA}. See Details below,}
\item{CalcByYear}{Logical, if \code{TRUE}, will calculate mean detections across all visits per year. Defaults to \code{FALSE}, calculating per visit.}
\item{max}{Logical, if \code{TRUE} (default), relative abundance will be calculated from the maximum count among visits in a given year.}
\item{sort}{Logical, if \code{TRUE}, when multiple species are selected it will calculate and sort relative abundance per species. See \code{abund}.}
\item{abund}{Numeric, When \code{sort} = \code{TRUE}, used to provide a numeric value to select the most abundant species.
E.g., abund = 10 will return mean detections of the top 10 species. You can use the returned \code{data.frame} to provide species AOU.}
\item{output}{Either "dataframe" (the default) or "list". Note that this must be in quotes. Determines the type of output from the function.}
\item{...}{Additional arguments passed to \code{getChecklist}}
}
\description{
Produces a summary of raw detections by species for plotting and analysis.
}
\details{
Summarizes relative abundance by species (mean detections per point) for a \code{NCRNbirds} object or a \code{list} of such objects.
If \code{visits} is left as \code{NA} then the visits used will be 1 through the number of visits indicated in the \code{visits} slot.
Otherwise a numeric vectore e.g. c(1,2) can be used to select which visits are used.
\code{SumRelAbund} requires at least 2 monitoring points or at least 2 years of data to be specified, unless both \code{CalcByYear} and
\code{sort} are \code{TRUE}.
If \code{sort} is \code{TRUE}, then data will be combined accross the visits, points and years indicated by the other arguments.
}
|
b55cd5dd7d7e1ee72afd2243c91460d6124c089f
|
c0b85e47a0c19abb799841a0cadbcf9c0fb75052
|
/R/contrastLimma.R
|
221efd1a51551bc4aebe3c684fc44a52e4a90326
|
[] |
no_license
|
jtlovell/physGenomicsPVFinal
|
e4c4c3eb737a67d3aef390a3d89f12d4829d875f
|
9dbf6632a7e783e365e1604602a76436505f79b4
|
refs/heads/master
| 2021-01-10T09:27:05.839396
| 2016-02-02T21:16:17
| 2016-02-02T21:16:17
| 50,861,717
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,281
|
r
|
contrastLimma.R
|
contrastLimma<-function(counts, info, formula, use.qualityWeights=TRUE, block, tests="all",geneIDs=NA, useBlock=TRUE, getTopTable=TRUE, getEbayes=T...){
if(is.na(geneIDs)){
geneIDs<-rownames(counts)
}
design<-model.matrix(as.formula(formula), data = info)
y <- DGEList(counts = counts)
y <- calcNormFactors(y)
if(use.qualityWeights){
v <- voomWithQualityWeights(y, design=design, plot = F)
}else{
v <- voom(y, design=design, plot = F)
}
if(useBlock){
dupcor <- duplicateCorrelation(counts,design, block=as.factor(block))
fit <- lmFit(v, design=design, correlation=dupcor$consensus, block=as.factor(block))
}else{
fit <- lmFit(v, design=design)
}
fit<-eBayes(fit)
out<-data.frame(gene=geneIDs,
sigma=fit$sigma,
s2.post=fit$s2.post,
Amean=fit$Amean)
if(tests=="all"){
tests<-attr(design, "dimnames")[[2]]
}
tests.out<-lapply(tests, function(x){
if(getEbayes & getTopTable){
out2<-data.frame(fit$stdev.unscaled[,x],
fit$coefficients[,x],
fit$lods[,x],
fit$p.value[,x],
qvalue(fit$p.value[,x])$qvalue)
colnames(out2)<-paste("ebayes",x,c("stdev.unscaled","coefficients","lods","p.value","q.value"),sep="_")
out3<-data.frame(toptable(fit, p.value=1, coef=x, number=100000))
out3<-out3[,c("logFC","t","B")]
colnames(out3)<-paste("tt",x,colnames(out3),sep="_")
out2<-data.frame(out2, out3)
}else{
if(getTopTable){
out2<-data.frame(toptable(fit, p.value=1, coef=x, number=100000))
out3<-out3[,c("logFC","t","B")]
colnames(out2)<-paste("tt", x,colnames(out2),sep="_")
}else{
out2<-data.frame(fit$stdev.unscaled[,x],
fit$coefficients[,x],
fit$lods[,x],
fit$p.value[,x],
qvalue(fit$p.value[,x])$qvalue)
colnames(out2)<-paste("ebayes",x,c("stdev.unscaled","coefficients","lods","p.value","q.value"),sep="_")
}
}
out2
})
tests.out2<-do.call(cbind, tests.out)
all.out<-cbind(data.frame(out),tests.out2)
colnames(all.out)<-tolower(colnames(all.out))
return(all.out)
}
|
ccd9b5fd626574828c6aa5135b9c225076f32e3f
|
f86e886e41d8f3b8de507189566bec977c3a5d52
|
/tools/mapping_quality_stats/mapping_quality_stats.r
|
cdc360f799d996a1ced4a33b1203dc1f7234051c
|
[
"MIT"
] |
permissive
|
bgruening/tools-artbio
|
e8398bbab54af97eafc32d6f69b746b9c9a8bf67
|
e1e871049975dff030bf1e6fe2df8b8fa8997141
|
refs/heads/master
| 2023-08-05T07:19:03.096000
| 2023-07-20T00:14:25
| 2023-07-20T00:14:25
| 53,533,173
| 0
| 0
|
MIT
| 2023-07-20T11:47:22
| 2016-03-09T21:28:48
|
Python
|
UTF-8
|
R
| false
| false
| 809
|
r
|
mapping_quality_stats.r
|
## Setup R error handling to go to stderr
options(show.error.messages = FALSE,
error = function() {
cat(geterrmessage(), file = stderr())
q("no", 1, FALSE)
}
)
warnings()
library(optparse)
library(ggplot2)
option_list <- list(
make_option(c("-i", "--input"), type = "character", help = "Path to tabular file"),
make_option(c("-o", "--output"), type = "character", help = "path to the pdf plot")
)
parser <- OptionParser(usage = "%prog [options] file", option_list = option_list)
args <- parse_args(parser)
# data frame implementation
table <- read.delim(args$input, header = TRUE)
colnames(table) <- c("MAPQ", "Counts")
# Barplot
pdf(file = args$output)
ggplot(table, aes(x = MAPQ, y = Counts)) +
geom_bar(stat = "identity")
devname <- dev.off()
|
93e6dbed243ca9f9ceb50ef5473ab614c2c94c94
|
d73c90a41950a261f89e9559942a682a855a2e52
|
/GWASanalyses_CVLT_nooutlier_nlme.R
|
a8c6b1bd8c7f58944eda53647a9a12f4843bb649
|
[] |
no_license
|
amandazheutlin/RNA_GWAS
|
ab5cb8e68a46941d5bf2f3b78d059d86bdb7e2db
|
3d6d90c88becbf16fb9b242d61ddb82c67baa5c2
|
refs/heads/master
| 2021-01-15T11:29:18.520083
| 2017-08-07T20:43:20
| 2017-08-07T20:43:20
| 99,617,978
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,772
|
r
|
GWASanalyses_CVLT_nooutlier_nlme.R
|
# RNA GWAS
# swedish data
# exclude outlier (ID = 2008601953)
# load data and add neurocog variable
setwd("/mnt/nfs/swe_gwas/ABZ/RNA_GWAS")
load("swedenclean.rdata")
CVLT = read.table("GWAS-CVLT.txt",header=T)
swedenclean$CVLT = CVLT[match(swedenclean$StudyID,CVLT$IID),3]
swedenclean <- swedenclean[order(swedenclean$CVLT,na.last=F),]
swedenclean <- swedenclean[c(1:191,193),] # exclude outlier
swedenclean[180:192,18564:18566]
results_orig <- read.table("RNA-GWAS-CVLT_nooutlier_results.txt",header=T)
results_orig <- results_orig[order(results_orig$p.KR),]
hits <- rownames(results_orig)[1:76]
hitsdf <- swedenclean[,colnames(swedenclean)%in%hits]
hitsdf <- cbind(swedenclean[,c(1:4,18566)],hitsdf)
# GWAS
# run mixed regression for each marker
# save summary statistics
library(nlme)
#RNA = names(swedenclean)[7:18565]
#RNAtest = RNA[1:2]
#RNAtest10 = RNA[1:10]
# linear mixed model; predictor = expression
# covar = age, sex; random = family ID
models <- lapply(hits, function(x) {
lme(eval(substitute(CVLT ~ i + Age + Sex, list(i = as.name(x)))), random= ~1|Family, data = swedenclean)
})
# summary statistics
model_stats <- lapply(models, function(x) summary(x)$tTable)
# save results
results = NULL
for (i in 1:76) {
temp_results <- as.data.frame(model_stats[[i]])
results <- rbind(results,temp_results[2,])
}
# compute degrees of freedom and p-values
colnames(results)[4:5] <- c("tvalue","pvalue")
results <- results[order(results$pvalue),]
results$p.bon <- p.adjust(results$pvalue,method="bonferroni",n=18559)
results$p.FDR <- p.adjust(results$pvalue,method="BH",n=18559)
results$marker <- rownames(results)
# write results
write.table(results,"RNA-GWAS-CVLT_nooutlier_results_nlme.txt",sep="\t",
col.names=T,row.names=T,quote=F)
|
a2bbd00929a3242048affa00e53a1c8e5d31aca7
|
34f05b36f66e0e4a35fcdc711fc111af8948dea5
|
/GS3008/Source/buttonsneedle.R
|
ec13bc77740168f4c025e43fdf221198f8e2816c
|
[] |
no_license
|
mikigom/mikigom_course
|
a81f39cbd54188257978d03bb5899ab72d716c24
|
95cac5a43eab97c275d92e9b82cca1ef1f133a1a
|
refs/heads/master
| 2020-07-05T10:06:11.529685
| 2017-04-21T21:46:17
| 2017-04-21T21:46:17
| 66,783,387
| 0
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 597
|
r
|
buttonsneedle.R
|
buffon.needle <- function(d, l, n, m){
touch = 0
probability = 0
random_postion=0
random_theta=0
theta_vector = rep(1,n)
probability_vector = rep(1,n)
for (i in 1:n){
touch = 0
random_theta <- runif(1, min=0, max=pi)
for (k in 1:m){
random_position <- runif(1, min=0, max=d/2)
if(random_position+(l/2)*sin(random_theta)>(d/2)) {touch=touch+1}
}
probability = touch / m
theta_vector[i] = random_theta
probability_vector[i] = probability
}
plot(theta_vector,probability_vector, xlab = "Theta", ylab = "Probability", main="Boffun's needle")
}
|
4d798b49d701539e9b263b927961594003001365
|
aaac559889d1968ee128d67460bcf4a4272e39fb
|
/figure/Plot 1.R
|
13fa24c9be5bf9f9bbb9eaf39af5fba0403f8cfb
|
[] |
no_license
|
Omar-Ma/ExData_Plotting1
|
7a6f9cd928afe2f42ac50f6d0e9edc5e680b99a7
|
4bfad1eb25ea314250548c63f399a7424c03ef17
|
refs/heads/master
| 2021-01-09T07:02:54.416243
| 2014-10-12T23:04:21
| 2014-10-12T23:04:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 264
|
r
|
Plot 1.R
|
da<-read.table("household_power_consumption.txt",sep=";",header=T)
da$Date<-as.Date(da$Date,"%d/%m/%Y")
da1<-subset(da,Date=="2007-02-01"|Date=="2007-02-02")
hist(da1$Global_active_power,xlab="Global Active Power (kilowatts)",col="red",main="Global Active Power")
|
860a0770b47e2126caa7906f3b4766a873016a3c
|
765a4a79c4ca4fc7c91b97e53118ab804b4044ba
|
/gtex_analysis/pre-process/combineAllTissueTpm.R
|
460c9ac08e17908e207b53186cc6d642f3949de9
|
[] |
no_license
|
pughlab/net-seq
|
bbf2e658ef5602bcae6de24d9e6dc8f5149cea18
|
6fc6717a293d049a660532159be165363010a6cb
|
refs/heads/master
| 2021-05-12T08:32:20.578205
| 2019-07-16T14:29:00
| 2019-07-16T14:29:00
| 117,287,433
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 632
|
r
|
combineAllTissueTpm.R
|
args <- commandArgs(TRUE)
tpm.dir <- '/mnt/work1/users/pughlab/external_data/GTEx/FPKMmatrix/tpm/rdata'
all.tissue.tpm <- list.files(tpm.dir, pattern="tpm.Rdata")
all.tpm.mat <- data.frame()
for(each.tissue.tpm in all.tissue.tpm){
load(file.path(tpm.dir, each.tissue.tpm))
if(each.tissue.tpm %in% all.tissue.tpm[1]){
all.tpm.mat <- tpm.mat
} else if(rownames(tpm.mat) == rownames(all.tpm.mat)) {
all.tpm.mat <- cbind(all.tpm.mat, tpm.mat)
} else {
print(paste("Warning: Could not append ", each.tissue.tpm, " - mismatching rows", sep=""))
}
}
save(all.tpm.mat, file=file.path(tpm.dir, "allTissue.tpm.Rdata"))
|
2b4446714ec512f99195b6518111c1a20e434e25
|
b1d10d40427e33e895be6ed3c76aeb45447810b5
|
/R/document.R
|
f59a3ff01969f998097f928b5a047a93ce062043
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
brazil-data-cube/rlccs
|
0594484126727cb0cf9ad47f4204abda902f426c
|
218921499978f04f5b33df5be46a4ff95dd07030
|
refs/heads/master
| 2023-03-11T15:01:16.441672
| 2021-02-23T13:28:31
| 2021-02-23T13:28:31
| 337,800,683
| 6
| 3
|
MIT
| 2021-02-23T13:28:32
| 2021-02-10T17:31:33
|
R
|
UTF-8
|
R
| false
| false
| 1,720
|
r
|
document.R
|
#' @title Document development functions
#'
#' @describeIn extensions
#' The \code{RLCCSDocument()} function is a constructor of
#' LCCS documents. Currently, this class is used to represent the return of all
#' LCCS-WS endpoints. The general use of this document is possible since the
#' service return follows the same hierarchical structure.
#'
#' @param content a \code{list} data structure representing the JSON file
#' received in HTTP response (see \code{\link{content_response}()} function)
#'
#' @param q a \code{RLCCSDocument} object expressing the LCCS-WS query used
#' to retrieve the document.
#'
#' @param subclass a \code{character} corresponding to the subclass of the
#' document to be created.
#'
#' @return
#' The \code{RLCCSDocument()} function returns a \code{RLCCSDocument} object
#' with subclass defined by \code{subclass} parameter.
#'
#' @export
RLCCSDocument <- function(content, q, subclass) {
# DELETE operations do not return contents in the LCCS-WS (0.6.0)
if (!is.null(content))
return(structure(
content,
query = q,
class = c(subclass, "RLCCSDocument", "list")
))
NULL
}
#' @export
subclass.RLCCSDocument <- function(x) {
class(x)[[1]]
}
#' @export
check_subclass.RLCCSDocument <- function(x, subclasses) {
if (!subclass(x) %in% subclasses)
.error("Expecting %s document(s).",
paste0("`", subclasses, "`", collapse = " or "))
}
#' @title Document utils functions
#'
#' @param d \code{RLCCSDocument} object
#'
#' @return a \code{RLCCSQuery} object with the predecessor subclass with the
#' fields used in the request.
#'
#' @export
doc_query <- function(d) {
.check_obj(d, "RLCCSDocument")
attr(d, "query")
}
|
4703320613e8814f0ec872d0afbc8a0a542db6e0
|
6a6277580cba9e63e3a1f15b515bbeb7635aa032
|
/Plot4.R
|
ee655d451f7fc36a5f8edbf7a501553dd153d845
|
[] |
no_license
|
NMDC70/Plotting2
|
e6acbce5745b17c3cc70c3f5ebbb1eeeca0e705f
|
2b7c0f9a846fb9dc6befdc4856a9b50e5d65302d
|
refs/heads/master
| 2021-01-10T16:24:56.938308
| 2015-10-18T06:43:22
| 2015-10-18T06:43:22
| 44,427,625
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,000
|
r
|
Plot4.R
|
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
coal <- c("Coal|coal")
SCCSubsetcoal <- SCC[grep(coal, SCC$Short.Name), ]
## Compared with EI sector(total 99 observations) & other columns of SCC searching for coal
## in Short.Name gives the largest subset (230 observations)
coal1 <- c("Comb|Fuel|Vessels|fired|Fired|Burning")
SCCSubsetcoal1 <- SCC[grep(coal1, SCCSubsetcoal$Short.Name), ]
## All 230 obervations however are not related to combustion of coal.
## We need to take all items where coal is used as Fuel, fired, burned for various different
## end use. Our final subset gives us an index vector with 127 values.
d1 <- NEI[NEI$SCC %in% SCCSubsetcoal1$SCC, ]
pcoalcomb <- aggregate(Emissions~year, data = d1, sum)
png(filename = "plot4.png")
plot(pcoalcomb$year, pcoalcomb$Emissions, type = "l", col = "green", main = "United States Coal Combustion PM25 Emission trend", xlab = "Year", ylab = "Total PM25 Emissions from Coal Combustion")
dev.off()
|
3d29fdde7f6bc19f51d625a6018038a9a70e98a0
|
8194aec24987b5a235f5d83785444f115f55cdc3
|
/man/triangle.design.Rd
|
f9c144e77358e7b0bdf5b5579ee384b54a53bac4
|
[] |
no_license
|
cran/SensoMineR
|
f4bf5739257fb64d8742a8eaaa4d8f87357d41d4
|
1b0d41884381acd0033408afcd8a024754d0810d
|
refs/heads/master
| 2020-08-08T23:39:58.342203
| 2020-05-19T13:50:03
| 2020-05-19T13:50:03
| 18,806,219
| 4
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,731
|
rd
|
triangle.design.Rd
|
\name{triangle.design}
\alias{triangle.design}
\title{Construct a design for triangle tests}
\description{
Construct a design to make triangle tests.
}
\usage{
triangle.design (nbprod , nbpanelist, bypanelist = nbprod*(nbprod-1)/2,
labprod=1:nbprod, labpanelist=1:nbpanelist)
}
\arguments{
\item{nbprod}{number of products to compare}
\item{nbpanelist}{number of panelists who make the triangle test}
\item{bypanelist}{number of expermient that each panelist can done (by default each panelist make all the comparisons between the products}
\item{labprod}{name of the products (by default, the product are coded from 1 to the number of products}
\item{labpanelist}{name of the panelists (by default, the panelists are coded from 1 to the number of panelists}
}
\details{
Triangle test: panelists receive three coded samples. They are told that two of the sample are the same and one is different.
Panelists are asked to identify the odd sample.}
\value{
Returns an data.frame of dimension (\emph{t,3}), where \emph{t} is the number of experiments.
In column 1, 2 and 3 the product to test are given.
The product in column 1 is by coded "X", in column 2 is coded by "Y" and in column 3 is coded by "Z".
Panelist should start by product "X", then "Y" and then by "Z".
}
\author{Francois Husson}
\seealso{\code{\link{triangle.test}}, \code{\link{triangle.pair.test}}}
\examples{
##Example 1
design1 = triangle.design (nbprod = 4, nbpanelist = 8)
##Example 2
design2 = triangle.design(nbprod = 4, nbpanelist = 6, bypanelist = 3,
labprod=c("prod1","prod2","prod3","prod4"),
labpanelist=c("John","Audrey","Peter","Martina","James","Lisa"))
}
\keyword{models}
|
8c336c8c1b60836712ebdc7be401cbe9102c5af8
|
00e7438f79f95ffab664390a0cbacaf407f4433b
|
/Gender vs Age MH/MH - Gender vs Age Statistics.R
|
e362f807fc3c8ad394bc1f15f807ab11f0ba7b4c
|
[] |
no_license
|
Key2-Success/HeartBD2K
|
95b410f2b7233419650e6972058112532a7223d8
|
21ad025c40a396707e97dede993ac8c8b393bf13
|
refs/heads/master
| 2018-12-14T19:51:21.941506
| 2018-09-13T22:38:29
| 2018-09-13T22:38:29
| 108,905,096
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,744
|
r
|
MH - Gender vs Age Statistics.R
|
library(tm)
library(qdap)
library(qdapTools)
library(stringi)
library(stringr)
library(purrr)
# load in distinct dataframes as well as distinct MH (chosen from random sample of 1000)
load(file = "~/Kitu/College/Junior Year/Extracurriculars/Data Science Research Internship/MH - Gender vs Age/adult vs gender/dtm_female_adult_MH.Rdata")
load(file = "~/Kitu/College/Junior Year/Extracurriculars/Data Science Research Internship/MH - Gender vs Age/adult vs gender/male_adult_MH.Rdata")
load(file = "~/Kitu/College/Junior Year/Extracurriculars/Data Science Research Internship/MH - Gender vs Age/adult vs gender/dtm_male_adult_MH.Rdata")
load(file = "~/Kitu/College/Junior Year/Extracurriculars/Data Science Research Internship/MH - Gender vs Age/adult vs gender/female_adult_MH.Rdata")
# creating mydf (create female first)
dtm_tf2 <- weightTfIdf(dtm_female_adult_MH)
m2 <- as.matrix(dtm_tf2)
rownames(m2) <- 1:nrow(m2)
norm_eucl <- function(m2) m2/apply(m2, MARGIN = 1, FUN = function(x) sum(x^2)^0.5) # normalize vectors so Euclidean distance makes sense
m_norm2 <- norm_eucl(m2)
m_norm2 <- m_norm2[, order(colSums(-m_norm2))]
m_norm2 <- t(m_norm2)
m_norm2[is.na(m_norm2)] <- 0
c2 <- kmeans(m_norm2, 10) # cluster into 10 clusters
m_norm2 <- m_norm2[order(-rowSums(m_norm2)), ] # orders them
pca <- prcomp((m_norm2))
dat.loadings <- pca$x[ , 1:2]
c <- kmeans(dat.loadings, centers = 10)
pca1 <- pca$x[ , 1]
pca2 <- pca$x[ , 2]
mydf <- data.frame(ID = names(pca1), PCA1 = pca1, PCA2 = pca2, Cluster = factor(c$cluster))
remove(dat.loadings, m_norm2, m2, c, c2, dtm_tf2, pca, pca1, pca2)
# do on female first
save(mydf, file = "~/Kitu/College/Junior Year/Extracurriculars/Data Science Research Internship/MH - Gender vs Age/adult vs gender/mydf_female_adult_MH.Rdata")
mydf_fem <- mydf
# creating mydf (create female first)
dtm_tf2 <- weightTfIdf(dtm_male_adult_MH)
m2 <- as.matrix(dtm_tf2)
rownames(m2) <- 1:nrow(m2)
norm_eucl <- function(m2) m2/apply(m2, MARGIN = 1, FUN = function(x) sum(x^2)^0.5) # normalize vectors so Euclidean distance makes sense
m_norm2 <- norm_eucl(m2)
m_norm2 <- m_norm2[, order(colSums(-m_norm2))]
m_norm2 <- t(m_norm2)
m_norm2[is.na(m_norm2)] <- 0
c2 <- kmeans(m_norm2, 10) # cluster into 10 clusters
m_norm2 <- m_norm2[order(-rowSums(m_norm2)), ] # orders them
pca <- prcomp((m_norm2))
dat.loadings <- pca$x[ , 1:2]
c <- kmeans(dat.loadings, centers = 10)
pca1 <- pca$x[ , 1]
pca2 <- pca$x[ , 2]
mydf <- data.frame(ID = names(pca1), PCA1 = pca1, PCA2 = pca2, Cluster = factor(c$cluster))
remove(dat.loadings, m_norm2, m2, c, c2, dtm_tf2, pca, pca1, pca2)
save(mydf, file = "~/Kitu/College/Junior Year/Extracurriculars/Data Science Research Internship/MH - Gender vs Age/adult vs gender/mydf_male_adult_MH.Rdata")
remove(dtm_female_adult_MH, dtm_male_adult_MH)
# combine all unique MH terms found in each
mydf <- rbind(mydf, mydf_fem)
remove(mydf_fem)
mydf <- mydf[ , 1]
mydf <- as.data.frame(mydf)
mydf$mydf <- as.character(mydf$mydf)
mydf <- subset(mydf, !duplicated(mydf))
names(mydf)[1] <- "word"
# clean up so exact match is used
mydf$word <- paste0("\\b", mydf$word)
mydf$test <- "\\b"
mydf$word <- paste(mydf$word, mydf$test, sep = "")
mydf <- mydf[ , -c(2)]
mydf <- as.data.frame(mydf)
names(mydf)[1] <- "word"
mydf$word <- as.character(mydf$word)
# find occurrences of each MH term
male_adult_MH$MH2 <- tolower(male_adult_MH$MH2)
mydf$male_adult <- map_int(mydf$word, function(x){sum(str_detect(male_adult_MH$MH2, pattern = x))})
female_adult_MH$MH2 <- tolower(female_adult_MH$MH2)
mydf$female_adult <- map_int(mydf$word, function(x){sum(str_detect(female_adult_MH$MH2, pattern = x))})
# calculates p value for 2 proportion z-test
stats_manual <- function(word)
{
# calculate raw occurrences
prop_f <- mydf[mydf$word == word, 3]
prop_m <- mydf[mydf$word == word, 2]
# calculate p-value
p <- prop.test(x = c(prop_f, prop_m), n = c(dim(female_adult_MH)[1], dim(male_adult_MH)[1]))
p <- p$p.value
return (p)
}
# create dataframes to store p-values
df_pval <- data.frame()
df_pval2 <- data.frame()
# create for-loop for binding dataframe for p-values
for (i in 1:nrow(mydf))
{
word <- mydf$word[i]
df_pval <- data.frame(word, stats_manual(word))
df_pval2 <- rbind(df_pval2, df_pval)
}
mydf <- cbind(mydf, df_pval2)
mydf <- mydf[ , c(1, 2, 3, 5)]
names(mydf)[4] <- "p-value"
remove(df_pval)
# calculate proportions for each term
options(scipen = 999) # disable scientific notation
mydf$prop_female <- mydf$female_adult/nrow(female_adult_MH)
mydf$prop_male <- mydf$male_adult/nrow(male_adult_MH)
# save raw occurrence counts
save(mydf, file = "~/Kitu/College/Junior Year/Extracurriculars/Data Science Research Internship/MH - Gender vs Age/adult vs gender/mydf_adult_male-female_MH.Rdata")
# remove \\b
mydf2 <- mydf
mydf2 <- substr(mydf2$word, 1, nchar(mydf2$word)-2)
mydf2 <- as.data.frame(mydf2)
mydf2$mydf2 <- substring(mydf2$mydf2, 3)
mydf <- cbind(mydf2, mydf)
mydf <- mydf[ , c(1, 3, 4, 5, 6, 7)]
names(mydf)[1] <- "word"
# save p-value dataframes
options(scipen = 999)
mydf <- mydf[order(mydf$`p-value`), ]
mydf_sig <- mydf[mydf$`p-value` < 0.05, ]
mydf_sig <- mydf_sig[complete.cases(mydf_sig), ]
# create differences
for (i in 1:nrow(mydf_sig))
{
mydf_sig$diffs[i] <- mydf_sig$prop_male[i] - mydf_sig$prop_female[i]
}
# order by most different
mydf_sig$most_diff <- abs(mydf_sig$diffs)
mydf_sig <- mydf_sig[order(-mydf_sig$most_diff), ]
save(mydf_sig, file = "~/Kitu/College/Junior Year/Extracurriculars/Data Science Research Internship/MH - Gender vs Age/adult vs gender/sig_MH_adult_btwn_genders.Rdata")
|
aeaa51ff8047fdd6b7311e93d3b7e68d47a1106d
|
b9c36e4ab2b701917065c94c923cf9452384ebe3
|
/brakingsystem-dagstat16.R
|
9148ee434b2d2411513ad719c40bb242a0ae2adc
|
[] |
no_license
|
geeeero/dagstat16
|
eef139c7fbd5fa1db1f4fc95286065704ce4278c
|
4b695d0ace18f055385cea9a4339fc4cc4c650ea
|
refs/heads/master
| 2021-01-10T11:18:58.401725
| 2016-09-07T16:55:25
| 2016-09-07T16:55:25
| 52,964,355
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,931
|
r
|
brakingsystem-dagstat16.R
|
#install.packages("devtools")
#library("devtools")
#install_github("louisaslett/ReliabilityTheory")
library("ReliabilityTheory")
library(ggplot2)
library(reshape2)
bottomlegend <- theme(legend.position = 'bottom', legend.direction = 'horizontal', legend.title = element_blank())
rightlegend <- theme(legend.title = element_blank())
# produces survival signature matrix for one component of type "name",
# for use in nonParBayesSystemInference()
oneCompSurvSign <- function(name){
res <- data.frame(name=c(0,1), Probability=c(0,1))
names(res)[1] <- name
res
}
# produces data frame with prior and posterior lower & upper component survival function
# for component of type "name" based on nonParBayesSystemInferencePriorSets() inputs
# for all components except survival signature; nLower, nUpper, yLower, yUpper must be data frames
# where each column corresponds to the component type, so there must be a match
oneCompPriorPostSet <- function(name, at.times, test.data, nLower, nUpper, yLower, yUpper){
sig <- oneCompSurvSign(name)
nodata <- list(name=NULL)
names(nodata) <- name
nL <- nLower[, match(name, names(nLower))]
nU <- nUpper[, match(name, names(nUpper))]
yL <- yLower[, match(name, names(yLower))]
yU <- yUpper[, match(name, names(yUpper))]
data <- test.data[match(name, names(test.data))]
prio <- nonParBayesSystemInferencePriorSets(at.times, sig, nodata, nL, nU, yL, yU)
post <- nonParBayesSystemInferencePriorSets(at.times, sig, data, nL, nU, yL, yU)
datavec <- unlist(data)
erf <- sapply(at.times, FUN = function(t) sum(datavec > t)/length(datavec))
data.frame(Time=rep(at.times,2), Lower=c(prio$lower,post$lower), Upper=c(prio$upper,post$upper),
Erf=rep(erf,2), Item=rep(c("Prior", "Posterior"), each=length(at.times)))
}
tuered <- rgb(0.839,0.000,0.290)
tueblue <- rgb(0.000,0.400,0.800)
tueyellow <- rgb(1.000,0.867,0.000)
tuegreen <- rgb(0.000,0.675,0.510)
tuewarmred <- rgb(0.969,0.192,0.192)
tueorange <- rgb(1.000,0.604,0.000)
tuedarkblue <- rgb(0.063,0.063,0.451)
haz2rel <- function(hazvec){
ptj <- cumprod(1-hazvec)
c(1-1e-6, ptj)
}
# ----------------------------------------------
ab <- graph.formula(s -- M -- C1:C2:C3:C4, P1:P2:P3:P4 -- t,
C1 -- P1, C2 -- P2, C3 -- P3, C4 -- P4, s -- H -- P3:P4)
ab <- setCompTypes(ab, list("M"=c("M"), "H"=c("H"), "C"=c("C1", "C2", "C3", "C4"), "P"=c("P1", "P2", "P3", "P4")))
# data
set.seed(233)
Mdata <- rexp(5, rate=0.25)
Hdata <- rlnorm(10, 1.5, 0.3)
Cdata <- rexp(15, rate=0.3)
Pdata <- rgamma(20, scale=0.9, shape=3.2)
abnulldata <- list("M"=NULL, "H"=NULL, "C"=NULL, "P"=NULL)
abtestdata <- list("M"=Mdata, "H"=Hdata, "C"=Cdata, "P"=Pdata)
abdat <- melt(abtestdata); names(abdat) <- c("x", "Part")
abdat$Part <- ordered(abdat$Part, levels=c("M", "H", "C", "P", "System"))
absig <- computeSystemSurvivalSignature(ab)
abt <- seq(0, 10, length.out=301)
#MpriorU <- 1-pexp(abt, rate=0.15)
MpriorU <- 1-pweibull(abt, shape=2.5, scale=8)
MpriorU[MpriorU==1] <- 1-1e-6
#MpriorL <- 1-pexp(abt, rate=0.5)
MpriorL <- 1-pweibull(abt, shape=2.5, scale=6)
MpriorL[MpriorL==1] <- 1-1e-6
# priors
abnL <- data.frame(M=rep(1,301), H=rep(1,301), C=rep(1,301), P=rep(1,301))
abnU <- data.frame(M=rep(8,301), H=rep(2,301), C=rep(2,301), P=rep(2,301))
abyL <- data.frame(M=MpriorL,
#M=c(rep(0.8, 150), rep(0.6, 60), rep(0.2, 30), rep(0.1, 61)),
H=rep(0.0001, 301),
#H=c(rep(0.5, 150), rep(0.25, 60), rep(0.01, 91)),
C=c(rep(c(0.75, 0.73, 0.71, 0.70, 0.60, 0.45, 0.30, 0.23, 0.21, 0.20), each=30), 0.20),
P=c(rep(c(0.5, 0.01), each=150), 0.01))
abyU <- data.frame(M=MpriorU,
#M=c(rep(0.99, 180), rep(0.9, 60), rep(0.6, 30), rep(0.4, 31)),
H=rep(0.9999, 301),
#H=c(rep(0.99, 90), rep(0.9, 90), rep(0.7, 30), rep(0.5, 30), rep(0.3,61)),
C=c(rep(c(0.99, 0.98, 0.96, 0.95, 0.90, 0.65, 0.50, 0.45, 0.43, 0.42), each=30), 0.42),
P=c(rep(c(0.99, 0.75), each=150), 0.75))
#posteriors
abM <- oneCompPriorPostSet("M", abt, abtestdata, abnL, abnU, abyL, abyU)
abH <- oneCompPriorPostSet("H", abt, abtestdata, abnL, abnU, abyL, abyU)
abC <- oneCompPriorPostSet("C", abt, abtestdata, abnL, abnU, abyL, abyU)
abP <- oneCompPriorPostSet("P", abt, abtestdata, abnL, abnU, abyL, abyU)
abprio <- nonParBayesSystemInferencePriorSets(abt, absig, abnulldata, abnL, abnU, abyL, abyU)
abpost <- nonParBayesSystemInferencePriorSets(abt, absig, abtestdata, abnL, abnU, abyL, abyU)
#data frame for plot
abdf <- rbind(data.frame(abM, Part="M"), data.frame(abH, Part="H"), data.frame(abC, Part="C"), data.frame(abP, Part="P"),
data.frame(Time=rep(abt,2), Lower=c(abprio$lower,abpost$lower), Upper=c(abprio$upper,abpost$upper),
Erf=NA, Item=rep(c("Prior", "Posterior"), each=length(abt)), Part="System"))
abdf$Item <- ordered(abdf$Item, levels=c("Prior", "Posterior"))
abdf$Part <- ordered(abdf$Part, levels=c("M", "H", "C", "P", "System"))
priopostcolours1 <- scale_fill_manual(values = c(tuegreen, tuedarkblue))
priopostcolours2 <- scale_colour_manual(values = c(tuegreen, tuedarkblue))
#the plot
ab1 <- ggplot(abdf, aes(x=Time))
#ab1 <- ab1 + scale_fill_manual(values = c(tuered, tueblue)) + scale_colour_manual(values = c(tuered, tueblue))
ab1 <- ab1 + priopostcolours1 + priopostcolours2
ab1 <- ab1 + geom_line(aes(y=Upper, group=Item, colour=Item)) + geom_line(aes(y=Lower, group=Item, colour=Item))
ab1 <- ab1 + geom_ribbon(aes(ymin=Lower, ymax=Upper, group=Item, colour=Item, fill=Item), alpha=0.5)
ab1 <- ab1 + geom_line(aes(y=Erf, group=Item), colour=tueorange, lty=2)
ab1 <- ab1 + facet_wrap(~Part, nrow=2) + geom_rug(aes(x=x), data=abdat) + xlab("Time") + ylab("Survival Probability")
ab1 <- ab1 + bottomlegend
pdf("figs/brakingsystem-dagstat16.pdf", width=8, height=5)
ab1
dev.off()
|
d59b1300f2b53f3aaf3c173905c36476df1d4d9f
|
e06965698053952f7f97c60349a590e42d08b633
|
/man/make_processor.Rd
|
d405803f18c3371b3c83ce96f1a023df97d8c242
|
[
"Apache-2.0"
] |
permissive
|
kcf-jackson/sketch
|
a9940c89ed8183627914861a11893856b1c47429
|
b597f01e540f35aab1f5ee2d3744f6f64c70c94d
|
refs/heads/master
| 2022-11-01T03:28:32.088340
| 2022-10-23T14:22:05
| 2022-10-23T14:22:05
| 222,058,097
| 106
| 5
|
NOASSERTION
| 2022-10-23T14:22:07
| 2019-11-16T06:36:59
|
HTML
|
UTF-8
|
R
| false
| true
| 466
|
rd
|
make_processor.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/assets-to_shiny_tag.R
\name{make_processor}
\alias{make_processor}
\title{Make a handle to process header}
\usage{
make_processor(pred, fun)
}
\arguments{
\item{pred}{A function, taking a string and returning a logical.}
\item{fun}{A function, taking a string and returning a 'shiny.tag' object.}
}
\value{
A header processor / handler.
}
\description{
Make a handle to process header
}
|
9c9a16850edf7f28efe78803ec4b0ed253558b37
|
b7421dc801628ffc279cc6a89c4f915bc1a21e79
|
/man/clear_job_processing.Rd
|
6ffcb43ec08ec53992a3e69ad5504052cd5cadef
|
[] |
no_license
|
wush978/RzmqJobQueue
|
58033aeec908e1ff2264e011d0658be379c13987
|
9204035457cecbaa6c45ebaa5a4682b88abfe359
|
refs/heads/master
| 2021-01-22T14:39:48.978653
| 2013-06-25T08:10:51
| 2013-06-25T08:10:51
| 8,583,104
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 275
|
rd
|
clear_job_processing.Rd
|
\name{clear_job_processing}
\alias{clear_job_processing}
\title{clear_job_processing
Clear the hash values in redis of jobs under execution}
\usage{
clear_job_processing()
}
\description{
clear_job_processing
Clear the hash values in redis of jobs under execution
}
|
1fb9baca8e0bbe62d3fe250a3cba492abcfc5ead
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/stm/examples/plot.estimateEffect.Rd.R
|
acf3164070489218e131405268f45bd901180a82
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,550
|
r
|
plot.estimateEffect.Rd.R
|
library(stm)
### Name: plot.estimateEffect
### Title: Plot effect of covariates on topics
### Aliases: plot.estimateEffect
### ** Examples
## Not run:
##D
##D prep <- estimateEffect(1:3 ~ treatment, gadarianFit, gadarian)
##D plot(prep, "treatment", model=gadarianFit,
##D method="pointestimate")
##D plot(prep, "treatment", model=gadarianFit,
##D method="difference",cov.value1=1,cov.value2=0)
##D
##D #If the covariate were a binary factor,
##D #the factor labels can be used to
##D #specify the values of cov.value1 (e.g., cov.value1="treat").
##D
##D # String variables must be turned to factors prior to plotting.
##D #If you see this error, Error in rep.int(c(1, numeric(n)), n - 1L) :
##D # invalid 'times' value, then you likely have not done this.
##D
##D #Example of binary times binary interaction
##D gadarian$binaryvar <- sample(c(0,1), nrow(gadarian), replace=T)
##D temp <- textProcessor(gadarian$open.ended.response,metadata=gadarian)
##D out <- prepDocuments(temp$documents, temp$vocab, temp$meta)
##D stm1 <- stm(out$documents, out$vocab, 3, prevalence=~treatment*binaryvar,
##D data=gadarian)
##D prep <- estimateEffect(c(2) ~ treatment*binaryvar, stmobj=stm1,
##D metadata=gadarian)
##D
##D par(mfrow=c(1,2))
##D plot(prep, "treatment", method="pointestimate",
##D cov.value1=1, cov.value2=0, xlim=c(-1,1), moderator="binaryvar", moderator.value=1)
##D plot(prep, "treatment", method="pointestimate",
##D cov.value1=1, cov.value2=0, xlim=c(-1,1), moderator="binaryvar",
##D moderator.value=0)
## End(Not run)
|
07142c4e7231ea3fdf0c201b536ae1a4aa5b2cf3
|
a9fb5a228b2316e5b43f58e4b8d6c858cb7784f7
|
/R/DsATACsc-class.R
|
82c7e6abd261deb4c42fc2ef417b9c0d0333e559
|
[] |
no_license
|
GreenleafLab/ChrAccR
|
f94232d5ac15caff2c5b2c364090bfb30b63e61a
|
43d010896dc95cedac3a8ea69aae3f67b2ced910
|
refs/heads/master
| 2023-06-24T05:29:29.804920
| 2023-03-17T13:01:49
| 2023-03-17T13:01:49
| 239,655,070
| 17
| 7
| null | 2023-05-05T09:51:23
| 2020-02-11T02:01:37
|
R
|
UTF-8
|
R
| false
| false
| 45,139
|
r
|
DsATACsc-class.R
|
#' DsATACsc
#'
#' A class for storing single-cell ATAC-seq accessibility data
#' inherits from \code{\linkS4class{DsATAC}}. Provides a few additional methods
#' but is otherwise identical to \code{\linkS4class{DsATAC}}.
#'
#' @name DsATACsc-class
#' @rdname DsATACsc-class
#' @author Fabian Mueller
#' @exportClass DsATACsc
setClass("DsATACsc",
contains = "DsATAC",
package = "ChrAccR"
)
setMethod("initialize","DsATACsc",
function(
.Object,
fragments,
coord,
counts,
sampleAnnot,
genome,
diskDump,
diskDump.fragments,
diskDump.fragments.nSamplesPerFile,
sparseCounts
) {
.Object@fragments <- fragments
.Object@coord <- coord
.Object@counts <- counts
.Object@countTransform <- rep(list(character(0)), length(.Object@counts))
names(.Object@countTransform) <- names(.Object@counts)
.Object@sampleAnnot <- sampleAnnot
.Object@genome <- genome
.Object@diskDump <- diskDump
.Object@diskDump.fragments <- diskDump.fragments
.Object@diskDump.fragments.nSamplesPerFile <- diskDump.fragments.nSamplesPerFile
.Object@sparseCounts <- sparseCounts
.Object@pkgVersion <- packageVersion("ChrAccR")
.Object
}
)
#' @noRd
DsATACsc <- function(sampleAnnot, genome, diskDump=FALSE, diskDump.fragments=TRUE, sparseCounts=TRUE){
obj <- new("DsATACsc",
list(),
list(),
list(),
sampleAnnot,
genome,
diskDump,
diskDump.fragments,
diskDump.fragments.nSamplesPerFile=500L,
sparseCounts
)
return(obj)
}
################################################################################
# Single-cell methods
################################################################################
if (!isGeneric("simulateDoublets")) {
setGeneric(
"simulateDoublets",
function(.object, ...) standardGeneric("simulateDoublets"),
signature=c(".object")
)
}
#' simulateDoublets-methods
#'
#' EXPERIMENTAL: Simulate doublets by adding counts in matrices for each region set
#'
#' @param .object \code{\linkS4class{DsATACsc}} object
#' @param byGroup sample by group. Must be a column name in the sample annotation table
#' @param n number of doublets to simulate (per group)
#' @param sampleRatio fraction of non-zero events to subsample. If \code{0 < sampleRatio < 1}, the individual cell counts will be subsampled
#' for each region.
#' @return an \code{\linkS4class{DsATACsc}} object containing counts for simulated doublets. Fragment data will be discarded.
#'
#' @rdname simulateDoublets-DsATACsc-method
#' @docType methods
#' @aliases simulateDoublets
#' @aliases simulateDoublets,DsATACsc-method
#' @author Fabian Mueller
#' @export
#' @noRd
setMethod("simulateDoublets",
signature(
.object="DsATAC"
),
function(
.object,
byGroup=NULL,
n=10000,
sampleRatio=1.0
) {
.sampleSparseMat <- function(mat, sampleRatio=0.5){
total <- length(mat@x)
sampleTo <- floor(total * (1-sampleRatio))
mat@x[sample(seq_len(total), sampleTo)] <- 0
mat <- drop0(mat)
mat
}
.sampleDensMat <- function(mat, sampleRatio=0.5){
nonZero <- which(mat > 0)
sampleTo <- floor(length(nonZero) * (1-sampleRatio))
mat[sample(nonZero, sampleTo)] <- 0
}
if (class(.object)!="DsATACsc"){
logger.warning("Doublet detection is intended for single-cell datasets [DsATACsc] only. Applying it to general DsATAC objects is intended for backwards compatibility only.")
}
res <- removeFragmentData(.object)
nCells <- length(getSamples(.object))
if (is.null(byGroup)){
idx <- cbind(sample(seq_len(nCells), n, replace=TRUE), sample(seq_len(nCells), n, replace=TRUE))
} else {
ggs <- getSampleAnnot(.object)
if (!is.element(byGroup, colnames(ggs))){
logger.error("byGroup must be a valid column name in the sample annotation")
}
logger.info(paste0("Simulating doublets by group: '", byGroup, "'..."))
gIdx <- getGroupsFromTable(ggs[,byGroup, drop=FALSE], minGrpSize=1)[[1]]
idx <- do.call("rbind", lapply(gIdx, FUN=function(x){
cbind(sample(x, n, replace=TRUE), sample(x, n, replace=TRUE))
}))
}
colnames(idx) <- c("idx1", "idx2")
n <- nrow(idx)
ph <- data.frame(doubletId=paste0("d", 1:n), idx)
res@sampleAnnot <- ph
rownames(res@sampleAnnot) <- ph[,"doubletId"]
res@diskDump <- FALSE
regTypes <- getRegionTypes(.object)
for (rt in regTypes){
logger.status(paste0("Simulating doublets for region set: '", rt, "'..."))
cm <- ChrAccR::getCounts(.object, rt, allowSparseMatrix=TRUE)
pkg <- attr(class(cm), "package")
isSparse <- is.character(pkg) && pkg=="Matrix"
cm1 <- cm[, idx[, 1], drop=FALSE]
cm2 <- cm[, idx[, 2], drop=FALSE]
if (sampleRatio > 0 && sampleRatio < 1){
logger.info(c("Subsampling to ratio:", sampleRatio))
if (isSparse){
cm1 <- .sampleSparseMat(cm1, sampleRatio)
cm2 <- .sampleSparseMat(cm2, sampleRatio)
} else {
cm1 <- .sampleDensMat(cm1, sampleRatio)
cm2 <- .sampleDensMat(cm2, sampleRatio)
}
}
cmm <- cm1 + cm2
colnames(cmm) <- ph[,"doubletId"]
if (res@sparseCounts & !isSparse) {
res@counts[[rt]] <- as(cmm, "sparseMatrix")
res@counts[[rt]] <- drop0(res@counts[[rt]])
} else {
res@counts[[rt]] <- cmm
}
if (res@diskDump) res@counts[[rt]] <- as(res@counts[[rt]], "HDF5Array")
}
return(res)
}
)
#-------------------------------------------------------------------------------
if (!isGeneric("getScQcStatsTab")) {
setGeneric(
"getScQcStatsTab",
function(.object, ...) standardGeneric("getScQcStatsTab"),
signature=c(".object")
)
}
#' getScQcStatsTab-methods
#'
#' Retrieve a table of QC statistics for single cells
#'
#' @param .object \code{\linkS4class{DsATACsc}} object
#' @return an \code{data.frame} contain QC statistics for each cell
#'
#' @rdname getScQcStatsTab-DsATACsc-method
#' @docType methods
#' @aliases getScQcStatsTab
#' @aliases getScQcStatsTab,DsATACsc-method
#' @author Fabian Mueller
#' @export
setMethod("getScQcStatsTab",
signature(
.object="DsATACsc"
),
function(
.object
) {
cellAnnot <- getSampleAnnot(.object)
sampleIdCn <- findOrderedNames(colnames(cellAnnot), c(".sampleid", "sampleid", "cellId", ".CR.cellQC.barcode"), ignore.case=TRUE)
nFragCns <- c(
total=findOrderedNames(colnames(cellAnnot), c(".CR.cellQC.total", ".CR.cellQC.atac_raw_reads", "nFrags")),
pass=findOrderedNames(colnames(cellAnnot), c(".CR.cellQC.passed_filters", ".CR.cellQC.atac_fragments", "nFrags")),
tss=findOrderedNames(colnames(cellAnnot), ".CR.cellQC.(atac_)?TSS_fragments"),
peak=findOrderedNames(colnames(cellAnnot), ".CR.cellQC.(atac_)?peak_region_fragments"),
duplicate=findOrderedNames(colnames(cellAnnot), c(".CR.cellQC.duplicate", ".CR.cellQC.atac_dup_reads")),
mito=findOrderedNames(colnames(cellAnnot), c(".CR.cellQC.mitochondrial", ".CR.cellQC.atac_mitochondrial_reads"))
)
summaryDf <- data.frame(cell=getSamples(.object), sample=rep("sample", nrow(cellAnnot)))
if (!is.na(sampleIdCn)) summaryDf[,"sample"] <- cellAnnot[,sampleIdCn]
if (is.na(nFragCns["total"]) && is.na(nFragCns["pass"])){
logger.info("Number of fragments not annotated. --> trying to count from fragment data")
hasFragments <- length(.object@fragments) > 0
if (!hasFragments) logger.error("No fragment data found")
cellAnnot[,".countedFragments"] <- getFragmentNum(.object)
nFragCns["total"] <- ".countedFragments"
nFragCns["pass"] <- ".countedFragments"
}
for (cn in c("total", "pass")){
summaryDf[,muRtools::normalize.str(paste("n", cn, sep="_"), return.camel=TRUE)] <- cellAnnot[,nFragCns[cn]]
}
# to be divided by total reads
for (cn in c("mito", "duplicate")){
if (!is.na(nFragCns[cn])) summaryDf[,muRtools::normalize.str(paste("frac", cn, sep="_"), return.camel=TRUE)] <- cellAnnot[,nFragCns[cn]]/summaryDf[,"nTotal"]
}
# to be divided by passing reads
for (cn in c("tss", "peak")){
if (!is.na(nFragCns[cn])) summaryDf[,muRtools::normalize.str(paste("frac", cn, sep="_"), return.camel=TRUE)] <- cellAnnot[,nFragCns[cn]]/summaryDf[,"nPass"]
}
# tssEnrichment
cn <- findOrderedNames(colnames(cellAnnot), c(".tssEnrichment", ".tssEnrichment_smoothed", "tssEnrichment", ".tssEnrichment_unsmoothed"))
if (!is.na(cn)){
summaryDf[,"tssEnrichment"] <- cellAnnot[,cn]
}
return(summaryDf)
}
)
#-------------------------------------------------------------------------------
if (!isGeneric("filterCellsTssEnrichment")) {
setGeneric(
"filterCellsTssEnrichment",
function(.object, ...) standardGeneric("filterCellsTssEnrichment"),
signature=c(".object")
)
}
#' filterCellsTssEnrichment-methods
#'
#' Filter out cells with low TSS enrichment
#'
#' @param .object \code{\linkS4class{DsATAC}} object
#' @param cutoff TSS enrichment cutoff to filter cells
#' @return modified \code{\linkS4class{DsATAC}} object without filtered cells
#'
#' @rdname filterCellsTssEnrichment-DsATAC-method
#' @docType methods
#' @aliases filterCellsTssEnrichment
#' @aliases filterCellsTssEnrichment,DsATAC-method
#' @author Fabian Mueller
#' @export
setMethod("filterCellsTssEnrichment",
signature(
.object="DsATACsc"
),
function(
.object,
cutoff=6
) {
cellAnnot <- getSampleAnnot(.object)
cn <- findOrderedNames(colnames(cellAnnot), c(".tssEnrichment", ".tssEnrichment_smoothed", "tssEnrichment", ".tssEnrichment_unsmoothed"))
if (is.na(cn)){
logger.info("TSS enrichment not annotated. Computing TSS enrichment ...")
tsseRes <- getTssEnrichmentBatch(.object, tssGr=NULL)
.object <- addSampleAnnotCol(.object, ".tssEnrichment_unsmoothed", tsseRes$tssEnrichment)
.object <- addSampleAnnotCol(.object, ".tssEnrichment", tsseRes$tssEnrichment.smoothed)
cn <- ".tssEnrichment_unsmoothed"
cellAnnot <- getSampleAnnot(.object)
}
tsse <- cellAnnot[,cn]
.object <- .object[tsse >= cutoff]
# workaround: currently saving single-cell DsATAC datasets does not support re-chunking of disk-dumped fragment data
chunkedFragmentFiles <- .object@diskDump.fragments && .hasSlot(.object, "diskDump.fragments.nSamplesPerFile") && .object@diskDump.fragments.nSamplesPerFile > 1
if (chunkedFragmentFiles){
logger.start("Undisking ...")
.object <- undiskFragmentData(.object)
logger.completed()
}
return(.object)
}
)
#-------------------------------------------------------------------------------
if (!isGeneric("unsupervisedAnalysisSc")) {
setGeneric(
"unsupervisedAnalysisSc",
function(.object, ...) standardGeneric("unsupervisedAnalysisSc"),
signature=c(".object")
)
}
#' unsupervisedAnalysisSc-methods
#'
#' Perform unsupervised analysis on single-cell data. Performs dimensionality reduction
#' and clustering.
#'
#' @param .object \code{\linkS4class{DsATACsc}} object
#' @param regionType character string specifying the region type
#' @param regionIdx indices of regions to be used (logical or integer vector). If \code{NULL} (default) all regions of the specified regionType will be used.
#' @param dimRedMethod character string specifying the dimensionality reduction method. Currently on \code{"tf-idf_irlba"} is supported
#' @param usePcs integer vector specifying the principal components to use for UMAP and clustering
#' @param clusteringMethod character string specifying the clustering method. Currently on \code{"seurat_louvain"} is supported
#' @return an \code{S3} object containing dimensionality reduction results and clustering
#'
#' @rdname unsupervisedAnalysisSc-DsATACsc-method
#' @docType methods
#' @aliases unsupervisedAnalysisSc
#' @aliases unsupervisedAnalysisSc,DsATACsc-method
#' @author Fabian Mueller
#' @export
setMethod("unsupervisedAnalysisSc",
signature(
.object="DsATACsc"
),
function(
.object,
regionType,
regionIdx=NULL,
dimRedMethod="tf-idf_irlba",
usePcs=1:50,
clusteringMethod="seurat_louvain"
) {
if (!is.element(regionType, getRegionTypes(.object))) logger.error(c("Unsupported region type:", regionType))
if (!is.element(dimRedMethod, c("tf-idf_irlba"))) logger.error(c("Unsupported dimRedMethod:", dimRedMethod))
if (!is.integer(usePcs)) logger.error(c("usePcs must be an integer vector"))
if (!is.element(clusteringMethod, c("seurat_louvain"))) logger.error(c("Unsupported clusteringMethod:", clusteringMethod))
if (!is.null(regionIdx)){
if (is.logical(regionIdx)) regionIdx <- which(regionIdx)
if (!is.integer(regionIdx) || any(regionIdx < 1) || any(regionIdx > getNRegions(.object, regionType))) logger.error("Invalid regionIdx")
}
dsn <- .object
if (!is.null(regionIdx)){
nRegs <- getNRegions(.object, regionType)
logger.info(c("Retaining", length(regionIdx), "regions for dimensionality reduction"))
idx <- rep(TRUE, nRegs)
idx[regionIdx] <- FALSE
dsn <- removeRegions(.object, idx, regionType)
}
if (dimRedMethod=="tf-idf_irlba"){
logger.start(c("Performing dimensionality reduction using", dimRedMethod))
if (length(dsn@countTransform[[regionType]]) > 0) logger.warning("Counts have been pre-normalized. dimRedMethod 'tf-idf_irlba' might not be applicable.")
if (!is.element("tf-idf", dsn@countTransform[[regionType]])){
dsn <- transformCounts(dsn, method="tf-idf", regionTypes=regionType)
}
cm <- ChrAccR::getCounts(dsn, regionType, asMatrix=TRUE)
pcaCoord <- muRtools::getDimRedCoords.pca(t(cm), components=1:max(usePcs), method="irlba_svd")
logger.completed()
}
cellIds <- colnames(cm)
logger.start(c("Getting UMAP coordinates"))
umapCoord <- muRtools::getDimRedCoords.umap(pcaCoord[,usePcs])
umapRes <- attr(umapCoord, "umapRes")
attr(umapCoord, "umapRes") <- NULL
logger.completed()
if (clusteringMethod=="seurat_louvain"){
logger.start(c("Performing clustering using", clusteringMethod))
if (!requireNamespace("Seurat")) logger.error(c("Could not load dependency: Seurat"))
# Louvain clustering using Seurat
dummyMat <- matrix(11.0, ncol=length(cellIds), nrow=11)
colnames(dummyMat) <- cellIds
rownames(dummyMat) <- paste0("df", 1:nrow(dummyMat))
sObj <- Seurat::CreateSeuratObject(dummyMat, project='scATAC', min.cells=0, min.features=0, assay="ATAC")
sObj[["pca"]] <- Seurat::CreateDimReducObject(embeddings=pcaCoord, key="PC_", assay="ATAC")
sObj <- Seurat::FindNeighbors(sObj, reduction="pca", assay="ATAC", dims=usePcs, k.param=30)
clustRes <- Seurat::FindClusters(sObj, k.param=30, algorithm=1, n.start=100, n.iter=10)
clustAss <- factor(paste0("c", clustRes@active.ident), levels=paste0("c", levels(clustRes@active.ident)))
names(clustAss) <- names(clustRes@active.ident)
logger.completed()
}
res <- list(
pcaCoord=pcaCoord,
umapCoord=umapCoord,
umapRes=umapRes,
clustAss=clustAss,
regionType=regionType,
regionIdx=regionIdx
)
class(res) <- "unsupervisedAnalysisResultSc"
return(res)
}
)
#-------------------------------------------------------------------------------
if (!isGeneric("dimRed_UMAP")) {
setGeneric(
"dimRed_UMAP",
function(.object, ...) standardGeneric("dimRed_UMAP"),
signature=c(".object")
)
}
#' dimRed_UMAP-methods
#'
#' Retrieve dimension reduction embedding and object using UMAP
#'
#' @param .object \code{\linkS4class{DsATACsc}} object
#' @param regions character string specifying the region type to retrieve the UMAP coordinates from.
#' Alternatively, a \code{GRanges} object specifying coordinates that fragment counts
#' will be aggregated over
#' @param tfidf normalize the counts using TF-IDF transformation
#' @param pcs components to use to compute the SVD
#' @param normPcs flag indicating whether to apply z-score normalization to PCs for each cell
#' @param umapParams parameters to compute UMAP coordinates (passed on to
#' \code{muRtools::getDimRedCoords.umap} and further to \code{uwot::umap})
#' @param rmDepthCor correlation cutoff to be used to discard principal components
#' associated with fragment depth (all iterationa). By default (value >=1)
#' no filtering will be applied.
#' @return an \code{S3} object containing dimensionality reduction results
#'
#' @details
#' The output object includes the final singular values/principal components (\code{result$pcaCoord}), the low-dimensional coordinates (\code{result$umapCoord}) as well as region set that provided the basis for the dimension reduction (\code{result$regionGr}).
#'
#' @rdname dimRed_UMAP-DsATACsc-method
#' @docType methods
#' @aliases dimRed_UMAP
#' @aliases dimRed_UMAP,DsATACsc-method
#' @author Fabian Mueller
#' @export
setMethod("dimRed_UMAP",
signature(
.object="DsATACsc"
),
function(
.object,
regions,
tfidf=TRUE,
pcs=1:50,
normPcs=FALSE,
umapParams=list(
distMethod="euclidean",
min_dist=0.5,
n_neighbors=25
),
rmDepthCor=1
) {
rt <- regions
gr <- NULL
if (is.character(rt)){
if (!is.element(rt, getRegionTypes(.object))){
logger.error(c("Invalid region type:", rt))
}
}
if (is.element(class(rt), c("GRanges"))){
logger.start("Aggregating fragment counts")
gr <- rt
rt <- ".regionsForDimRed"
.object <- regionAggregation(.object, gr, rt, signal="insertions", dropEmpty=FALSE, bySample=FALSE)
logger.completed()
}
dsn <- .object
idfBase <- NULL
if (tfidf){
logger.start("Transforming counts using TF-IDF")
bcm_unnorm <- ChrAccR::getCounts(dsn, rt, allowSparseMatrix=TRUE) > 0 # unnormalized binary count matrix
idfBase <- log(1 + ncol(bcm_unnorm) / safeMatrixStats(bcm_unnorm, "rowSums", na.rm=TRUE))
dsn <- transformCounts(dsn, method="tf-idf", regionTypes=rt) #TODO: renormalize based on sequencing depth rather than aggregated counts across peaks only?
logger.completed()
}
gr <- getCoord(dsn, rt)
cm <- ChrAccR::getCounts(dsn, rt, allowSparseMatrix=TRUE)
ph <- getSampleAnnot(dsn)
depthCol <- colnames(ph) %in% c("numIns", ".CR.cellQC.passed_filters", ".CR.cellQC.total", ".CR.cellQC.atac_fragments", ".CR.cellQC.atac_raw_reads")
depthV <- NULL
pcCorFragmentCount <- NULL
doRmDepthPcs <- FALSE
if (any(depthCol)){
depthV <- ph[,colnames(ph)[depthCol][1]]
}
if (!is.null(depthV) && rmDepthCor > 0 && rmDepthCor < 1){
doRmDepthPcs <- TRUE
}
mat <- cm
pcaCoord <- NULL
if (length(pcs) > 1){
logger.start("SVD")
pcaCoord <- muRtools::getDimRedCoords.pca(safeMatrixStats(cm, "t"), components=1:max(pcs), method="irlba_svd")
mat <- pcaCoord
if (normPcs) {
logger.info("Scaling SVDs")
mat <- rowZscores(mat, na.rm=TRUE)
}
if (doRmDepthPcs){
rr <- rmDepthPcs(mat, depthV, cutoff=rmDepthCor, pcIdx=pcs)
pcs <- rr$pcIdx_filtered
pcCorFragmentCount <- rr$fragCountCor
}
mat <- mat[, pcs, drop=FALSE]
logger.completed()
}
logger.start(c("UMAP dimension reduction"))
paramL <- c(list(X=mat), umapParams)
umapCoord <- do.call(muRtools::getDimRedCoords.umap, paramL)
umapRes <- attr(umapCoord, "umapRes")
attr(umapCoord, "umapRes") <- NULL
logger.completed()
res <- list(
pcaCoord=pcaCoord,
pcs = pcs,
pcCorFragmentCount = pcCorFragmentCount,
idfBase=idfBase,
umapCoord=umapCoord,
umapRes=umapRes,
regionGr=gr,
.params=list(normPcs=normPcs)
)
class(res) <- "DimRed_UMAP_sc"
return(res)
}
)
#-------------------------------------------------------------------------------
if (!isGeneric("iterativeLSI")) {
setGeneric(
"iterativeLSI",
function(.object, ...) standardGeneric("iterativeLSI"),
signature=c(".object")
)
}
#' iterativeLSI-methods
#'
#' Perform iterative LSI clustering and dimension reduction as described in doi:10.1038/s41587-019-0332-7
#'
#' @param .object \code{\linkS4class{DsATACsc}} object
#' @param it0regionType character string specifying the region type to start with
#' @param it0nMostAcc the number of the most accessible regions to consider in iteration 0
#' @param it0pcs the principal components to consider in iteration 0
#' @param it0clusterResolution resolution paramter for Seurat's clustering (\code{Seurat::FindClusters}) in iteration 0
#' @param it0clusterMinCells the minimum number of cells in a cluster in order for it to be considered in peak calling (iteration 0)
#' @param it0nTopPeaksPerCluster the number of best peaks to be considered for each cluster in the merged peak set (iteration 0)
#' @param it1pcs the principal components to consider in iteration 0
#' @param it1clusterResolution resolution paramter for Seurat's clustering (\code{Seurat::FindClusters}) in iteration 1
#' @param it1mostVarPeaks the number of the most variable peaks to consider after iteration 1
#' @param it2pcs the principal components to consider in the final iteration (2)
#' @param it2clusterResolution resolution paramter for Seurat's clustering (\code{Seurat::FindClusters}) in the final iteration (2)
#' @param rmDepthCor correlation cutoff to be used to discard principal components associated with fragment depth (all iterationa)
#' @param normPcs flag indicating whether to apply z-score normalization to PCs for each cell (all iterations)
#' @param umapParams parameters to compute UMAP coordinates (passed on to \code{muRtools::getDimRedCoords.umap} and further to \code{uwot::umap})
#' @return an \code{S3} object containing dimensionality reduction results, peak sets and clustering
#'
#' @details
#' In order to obtain a low dimensional representation of single-cell ATAC datasets in terms of principal components and UMAP coordinates, we recommend an iterative application of the Latent Semantic Indexing approach [10.1016/j.cell.2018.06.052] described in [doi:10.1038/s41587-019-0332-7]. This approach also identifies cell clusters and a peak set that represents a consensus peak set of cluster peaks in a given dataset. In brief, in an initial iteration clusters are identified based on the most accessible regions (e.g. genomic tiling regions). Here, the counts are first normalized using the term frequency–inverse document frequency (TF-IDF) transformation and singular values are computed based on these normalized counts in selected regions (i.e. the most accessible regions in the initial iteration). Clusters are identified based on the singular values using Louvain clustering (as implemented in the \code{Seurat} package). Peak calling is then performed on the aggregated insertion sites from all cells of each cluster (using MACS2) and a union/consensus set of peaks uniform-length non-overlapping peaks is selected. In a second iteration, the peak regions whose TF-IDF-normalized counts which exhibit the most variability across the initial clusters provide the basis for a refined clustering using derived singular values. In the final iteration, the most variable peaks across the refined clusters are identified as the final peak set and singular values are computed again. Based on these final singular values UMAP coordinates are computed for low-dimensional projection.
#'
#' The output object includes the final singular values/principal components (\code{result$pcaCoord}), the low-dimensional coordinates (\code{result$umapCoord}), the final cluster assignment of all cells (\code{result$clustAss}), the complete, unfiltered initial cluster peak set (\code{result$clusterPeaks_unfiltered}) as well as the final cluster-variable peak set (\code{result$regionGr}).
#'
#' @rdname iterativeLSI-DsATACsc-method
#' @docType methods
#' @aliases iterativeLSI
#' @aliases iterativeLSI,DsATACsc-method
#' @author Fabian Mueller
#' @export
setMethod("iterativeLSI",
signature(
.object="DsATACsc"
),
function(
.object,
it0regionType="t5k",
it0nMostAcc=20000L,
it0pcs=1:25,
it0clusterResolution=0.8,
it0clusterMinCells=200L,
it0nTopPeaksPerCluster=2e5,
it1pcs=1:50,
it1clusterResolution=0.8,
it1mostVarPeaks=50000L,
it2pcs=1:50,
it2clusterResolution=0.8,
rmDepthCor=0.5,
normPcs=FALSE,
umapParams=list(
distMethod="euclidean",
min_dist=0.5,
n_neighbors=25
)
) {
callParams <- as.list(match.call())
callParams <- callParams[setdiff(names(callParams), ".object")]
cellIds <- getSamples(.object)
if (length(.object@fragments) != length(cellIds)) logger.error("Object does not contain fragment information for all samples")
ph <- getSampleAnnot(.object)
depthCol <- colnames(ph) %in% c("numIns", ".CR.cellQC.passed_filters", ".CR.cellQC.total", ".CR.cellQC.atac_fragments", ".CR.cellQC.atac_raw_reads")
depthV <- NULL
doRmDepthPcs <- FALSE
if (any(depthCol)){
depthV <- ph[,colnames(ph)[depthCol][1]]
}
if (!is.null(depthV) && rmDepthCor > 0 && rmDepthCor < 1){
doRmDepthPcs <- TRUE
}
logger.start("Iteration 0")
dsr <- .object
for (rt in setdiff(getRegionTypes(dsr), it0regionType)){
dsr <- removeRegionType(dsr, rt)
}
if (!is.null(it0nMostAcc)){
regAcc <- safeMatrixStats(ChrAccR::getCounts(dsr, it0regionType, allowSparseMatrix=TRUE), statFun="rowMeans", na.rm=TRUE)
if (it0nMostAcc < length(regAcc)){
idx2rem <- rank(-regAcc, na.last="keep", ties.method="min") > it0nMostAcc
logger.info(c("Retaining the", sum(!idx2rem), "most accessible regions for dimensionality reduction"))
dsr <- removeRegions(dsr, idx2rem, it0regionType)
}
}
logger.start(c("Performing TF-IDF-based dimension reduction"))
if (length(dsr@countTransform[[it0regionType]]) > 0) logger.warning("Counts have been pre-normalized. 'tf-idf' might not be applicable.")
dsn <- transformCounts(dsr, method="tf-idf", regionTypes=it0regionType)
cm <- ChrAccR::getCounts(dsn, it0regionType, allowSparseMatrix=TRUE)
pcaCoord_it0 <- muRtools::getDimRedCoords.pca(safeMatrixStats(cm, "t"), components=1:max(it0pcs), method="irlba_svd")
pcs <- pcaCoord_it0
if (normPcs) {
logger.info("Scaling SVDs")
pcs <- rowZscores(pcs, na.rm=TRUE) # z-score normalize PCs for each cell
}
it0fragCountCor <- NULL
if (doRmDepthPcs){
rr <- rmDepthPcs(pcs, depthV, cutoff=rmDepthCor, pcIdx=it0pcs)
it0pcs <- rr$pcIdx_filtered
it0fragCountCor <- rr$fragCountCor
}
pcs <- pcs[, it0pcs, drop=FALSE]
logger.completed()
logger.start(c("Clustering"))
if (!requireNamespace("Seurat")) logger.error(c("Could not load dependency: Seurat"))
# Louvain clustering using Seurat
dummyMat <- matrix(11.0, ncol=length(cellIds), nrow=11)
colnames(dummyMat) <- cellIds
rownames(dummyMat) <- paste0("df", 1:nrow(dummyMat))
sObj <- Seurat::CreateSeuratObject(dummyMat, project='scATAC', min.cells=0, min.features=0, assay="ATAC")
sObj[["pca"]] <- Seurat::CreateDimReducObject(embeddings=pcs, key="PC_", assay="ATAC")
sObj <- Seurat::FindNeighbors(sObj, reduction="pca", assay="ATAC", dims=1:ncol(pcs), k.param=30)
clustRes <- Seurat::FindClusters(sObj, k.param=30, algorithm=1, n.start=100, n.iter=10, resolution=it0clusterResolution)
clustAss_it0 <- factor(paste0("c", clustRes@active.ident), levels=paste0("c", levels(clustRes@active.ident)))
names(clustAss_it0) <- names(clustRes@active.ident)
logger.info(c("Number of clusters found:", nlevels(clustAss_it0)))
ct <- table(clustAss_it0)
peakCallClusters <- names(ct)[ct >= it0clusterMinCells]
doExcludeClusters <- !all(levels(clustAss_it0) %in% peakCallClusters)
if (doExcludeClusters){
if (length(peakCallClusters) < 1) logger.error("No clusters with enough cells found on which to call peaks")
logger.info(c("Considering the following clusters for peak calling:", paste(peakCallClusters, collapse=",")))
}
logger.completed()
logger.start(c("Peak calling"))
logger.start("Creating cluster pseudo-bulk samples")
ca <- as.character(clustAss_it0[cellIds])
dsr <- addSampleAnnotCol(dsr, "clustAss_it0", ca)
dsm <- dsr
if (doExcludeClusters){
dsm <- dsm[ca %in% peakCallClusters]
}
dsrClust <- mergeSamples(dsm, "clustAss_it0", countAggrFun="sum")
logger.completed()
logger.start("Calling peaks")
clustPeakGrl <- callPeaks(dsrClust)
if (!is.null(it0nTopPeaksPerCluster)){
logger.info(paste0("Selecting the ", it0nTopPeaksPerCluster, " peaks with highest score for each cluster"))
clustPeakGrl <- GRangesList(lapply(clustPeakGrl, FUN=function(x){
idx <- rank(-elementMetadata(x)[,"score_norm"], na.last="keep", ties.method="min") <= it0nTopPeaksPerCluster
x[idx]
}))
}
peakUnionGr <- getNonOverlappingByScore(unlist(clustPeakGrl), scoreCol="score_norm")
peakUnionGr <- sortGr(peakUnionGr)
names(peakUnionGr) <- NULL
logger.completed()
logger.start("Aggregating counts for union peak set")
# dsrClust <- regionAggregation(dsrClust, peakUnionGr, "clusterPeaks", signal="insertions", dropEmpty=FALSE)
dsr <- regionAggregation(dsr, peakUnionGr, "clusterPeaks", signal="insertions", dropEmpty=FALSE, bySample=FALSE)
logger.completed()
logger.completed()
logger.completed()
logger.start("Iteration 1")
it1regionType <- "clusterPeaks"
logger.start(c("Performing TF-IDF-based dimension reduction"))
dsr <- removeRegionType(dsr, it0regionType)
dsn <- transformCounts(dsr, method="tf-idf", regionTypes=it1regionType) #TODO: renormalize based on sequencing depth rather than aggregated counts across peaks only?
cm <- ChrAccR::getCounts(dsn, it1regionType, allowSparseMatrix=TRUE)
pcaCoord_it1 <- muRtools::getDimRedCoords.pca(safeMatrixStats(cm, "t"), components=1:max(it1pcs), method="irlba_svd")
pcs <- pcaCoord_it1
if (normPcs) {
logger.info("Scaling SVDs")
pcs <- rowZscores(pcs, na.rm=TRUE)
}
it1fragCountCor <- NULL
if (doRmDepthPcs){
rr <- rmDepthPcs(pcs, depthV, cutoff=rmDepthCor, pcIdx=it1pcs)
it1pcs <- rr$pcIdx_filtered
it1fragCountCor <- rr$fragCountCor
}
pcs <- pcs[, it1pcs, drop=FALSE]
logger.completed()
logger.start(c("Clustering"))
sObj <- Seurat::CreateSeuratObject(dummyMat, project='scATAC', min.cells=0, min.features=0, assay="ATAC")
sObj[["pca"]] <- Seurat::CreateDimReducObject(embeddings=pcs, key="PC_", assay="ATAC")
sObj <- Seurat::FindNeighbors(sObj, reduction="pca", assay="ATAC", dims=1:ncol(pcs), k.param=30)
clustRes <- Seurat::FindClusters(sObj, k.param=30, algorithm=1, n.start=100, n.iter=10, resolution=it1clusterResolution)
clustAss_it1 <- factor(paste0("c", clustRes@active.ident), levels=paste0("c", levels(clustRes@active.ident)))
names(clustAss_it1) <- names(clustRes@active.ident)
logger.info(c("Number of clusters found:", nlevels(clustAss_it1)))
logger.completed()
if (!is.null(it1mostVarPeaks) && it1mostVarPeaks < nrow(cm)){
logger.start(c("Identifying cluster-variable peaks"))
logger.start("Creating cluster pseudo-bulk samples")
dsr <- addSampleAnnotCol(dsr, "clustAss_it1", as.character(clustAss_it1[cellIds]))
dsrClust <- mergeSamples(dsr, "clustAss_it1", countAggrFun="sum")
logger.completed()
logger.start("Identifying target peaks")
dsnClust <- transformCounts(dsrClust, method="RPKM", regionTypes=it1regionType)
l2cpm <- log2(ChrAccR::getCounts(dsnClust, it1regionType) / 1e3 + 1) # compute log2(CPM) from RPKM
peakVar <- matrixStats::rowVars(l2cpm, na.rm=TRUE)
if (it1mostVarPeaks < length(peakVar)){
idx2rem <- rank(-peakVar, na.last="keep", ties.method="min") > it1mostVarPeaks
logger.info(c("Retaining the", sum(!idx2rem), "most variable peaks"))
dsr <- removeRegions(dsr, idx2rem, it1regionType)
}
peakCoords <- ChrAccR::getCoord(dsr, it1regionType)
logger.completed()
logger.completed()
}
logger.completed()
logger.start("Iteration 2")
it2regionType <- it1regionType
logger.start(c("Performing TF-IDF-based dimension reduction"))
umapRes <- dimRed_UMAP(dsr, it2regionType, tfidf=TRUE, pcs=it2pcs, normPcs=normPcs, umapParams=umapParams)
pcaCoord_sel <- umapRes$pcaCoord[, umapRes$pcs, drop=FALSE]
if (normPcs) pcaCoord_sel <- rowZscores(pcaCoord_sel, na.rm=TRUE)
logger.completed()
logger.start(c("Clustering"))
sObj <- Seurat::CreateSeuratObject(dummyMat, project='scATAC', min.cells=0, min.features=0, assay="ATAC")
sObj[["pca"]] <- Seurat::CreateDimReducObject(embeddings=pcaCoord_sel, key="PC_", assay="ATAC")
sObj <- Seurat::FindNeighbors(sObj, reduction="pca", assay="ATAC", dims=1:ncol(pcaCoord_sel), k.param=30)
clustRes <- Seurat::FindClusters(sObj, k.param=30, algorithm=1, n.start=100, n.iter=10, resolution=it2clusterResolution)
clustAss <- factor(paste0("c", clustRes@active.ident), levels=paste0("c", levels(clustRes@active.ident)))
names(clustAss) <- names(clustRes@active.ident)
logger.info(c("Number of clusters found:", nlevels(clustAss)))
dsr <- addSampleAnnotCol(dsr, "clustAss_it2", as.character(clustAss[cellIds]))
logger.completed()
logger.completed()
res <- list(
pcaCoord=umapRes$pcaCoord,
pcs = umapRes$pcs,
pcCorFragmentCount=umapRes$pcCorFragmentCount,
idfBase=umapRes$idfBase,
umapCoord=umapRes$umapCoord,
umapRes=umapRes$umapRes,
clustAss=clustAss,
regionGr=peakCoords,
clusterPeaks_unfiltered=peakUnionGr,
iterationData = list(
iteration0 = list(
pcaCoord=pcaCoord_it0,
clustAss=clustAss_it0,
pcs=it0pcs,
pcCorFragmentCount=it0fragCountCor,
nMostAcc=it0nMostAcc,
clusterResolution=it0clusterResolution,
clusterMinCells=it0clusterMinCells,
nTopPeaksPerCluster=it0nTopPeaksPerCluster
),
iteration1 = list(
pcaCoord=pcaCoord_it1,
clustAss=clustAss_it1,
pcs=it1pcs,
pcCorFragmentCount=it1fragCountCor,
clusterResolution=it1clusterResolution,
mostVarPeaks=it1mostVarPeaks
)
),
.params=c(list(normPcs=normPcs), callParams)
)
class(res) <- "iterativeLSIResultSc"
return(res)
}
)
#-------------------------------------------------------------------------------
if (!isGeneric("mergePseudoBulk")) {
setGeneric(
"mergePseudoBulk",
function(.object, ...) standardGeneric("mergePseudoBulk"),
signature=c(".object")
)
}
#' mergePseudoBulk-methods
#'
#' Merge cells into pseudobulk samples based on annotation
#'
#' @param .object \code{\linkS4class{DsATACsc}} object
#' @param mergeGroups factor or character vector or column name in sample annotation table.
#' Can alternatively be a (named) list containing sample indices or names
#' for each group to merge.
#' @param cleanSampleAnnot clean up sample annotation table in the new object
#' @return a new \code{\linkS4class{DsATAC}} object with cells merged into pseudobulk samples
#'
#' @rdname mergePseudoBulk-DsATACsc-method
#' @docType methods
#' @aliases mergePseudoBulk
#' @aliases mergePseudoBulk,DsATACsc-method
#' @author Fabian Mueller
#' @export
setMethod("mergePseudoBulk",
signature(
.object="DsATACsc"
),
function(
.object,
mergeGroups,
cleanSampleAnnot=TRUE
) {
phu <- getSampleAnnot(.object)
dsam <- mergeSamples(.object, mergeGroups, countAggrFun="sum")
ph <- getSampleAnnot(dsam)
if (cleanSampleAnnot){
# clean up sample annotation (avoid huge concatenations)
ph <- data.frame(
pseudoBulkId = getSamples(dsam),
stringsAsFactors = FALSE
)
rownames(ph) <- ph[,"pseudoBulkId"]
}
if (is.character(mergeGroups) && length(mergeGroups) == 1 && is.element(mergeGroups, colnames(phu))){
ph[,".nCells"] <- as.integer(table(phu[,mergeGroups])[getSamples(dsam)])
}
dsam@sampleAnnot <- ph
class(dsam) <- "DsATAC" # now a bulk dataset
return(dsam)
}
)
#-------------------------------------------------------------------------------
if (!isGeneric("samplePseudoBulk")) {
setGeneric(
"samplePseudoBulk",
function(.object, ...) standardGeneric("samplePseudoBulk"),
signature=c(".object")
)
}
#' samplePseudoBulk-methods
#'
#' Samples pseudo-bulk samples from single-cells
#'
#' @param .object \code{\linkS4class{DsATACsc}} object
#' @param nnData Data to use for nearest neighbor matching. Can either be the
#' name of a region type in \code{.object} or a data matrix with
#' the same number of rows as \code{.object} has cells.
#' @param nSamples number of pseudobulk samples to be returned
#' @param nCellsPerSample number of cells to be aggregated per sample
#' @return \code{S3} data structure containing a list of sampling results as well as
#' a \code{\linkS4class{DsATAC}} object containing pseudo-bulk aggregates
#'
#' @details
#' Samples pseudo-bulk samples from single-cells by sampling \code{nSamples} individual cells
#' and then merging it with its \code{nCellsPerSample - 1} nearest neighbors (according to \code{nnData}).
#'
#' @rdname samplePseudoBulk-DsATACsc-method
#' @docType methods
#' @aliases samplePseudoBulk
#' @aliases samplePseudoBulk,DsATACsc-method
#' @author Fabian Mueller
#' @export
setMethod("samplePseudoBulk",
signature(
.object="DsATACsc"
),
function(
.object,
nnData,
nSamples,
nCellsPerSample=100
) {
cellIds <- getSamples(.object)
seed_cells <- sample(cellIds, nSamples)
if (is.character(nnData)) nnData <- t(getCounts(.object, nnData))
if (nrow(nnData) != length(cellIds)) logger.error("Invalid value for nnData")
if (is.null(rownames(nnData))) rownames(nnData) <- cellIds
# retrieve the nearest neighbors for each seed cell
knnRes <- FNN::get.knnx(nnData, nnData[seed_cells,], k=nCellsPerSample)
neighborCells <- knnRes$nn.index
neighborCells <- matrix(cellIds[neighborCells], ncol=ncol(neighborCells))
rownames(neighborCells) <- seed_cells
sampleRes <- lapply(1:length(seed_cells), FUN=function(i){
res <- list(
seedCellId = seed_cells[i],
cellIds = neighborCells[i,]
)
return(res)
})
names(sampleRes) <- paste0("pb", 1:length(seed_cells))
mergeIdxL <- lapply(sampleRes, FUN=function(x){
x$cellIds
})
dsam <- mergeSamples(.object, mergeIdxL, countAggrFun="sum")
# clean up sample annotation (avoid huge concatenations)
ph <- data.frame(
pseudoBulkId = getSamples(dsam),
stringsAsFactors = FALSE
)
ph[,"seedCell"] <- sapply(getSamples(dsam), FUN=function(x){sampleRes[[x]]$seedCellId})
ph[,"nCells"] <- sapply(getSamples(dsam), FUN=function(x){length(sampleRes[[x]]$cellIds)})
rownames(ph) <- getSamples(dsam)
dsam@sampleAnnot <- ph
class(dsam) <- "DsATAC" # now a bulk dataset
res <- list(
samplingResult = sampleRes,
dataset = dsam
)
class(res) <- "PseudoBulkSamplingResult"
return(res)
}
)
#-------------------------------------------------------------------------------
#' getDiffAcc-methods
#'
#' Compute differential accessibility for single-cell datasets by randomly drawing cells from each group and aggregating them into pseudo-bulk samples
#' which are then compared using bulk differential methods
#'
#' @param .object \code{\linkS4class{DsATACsc}} object
#' @param regionType character string specifying the region type
#' @param comparisonCol column name in the cell annotation table to base the comparison on. Alternatively, a vector with one element for each
#' cell in the dataset that can be coerced to a factor
#' @param grp1Name name of the first group in the comparison. if not specified, it will be taken as the first factor level specified in the
#' cell annotation (see \code{'comparisonCol'}).
#' @param grp2Name name of the second group (reference) in the comparison. if not specified, it will be taken as the first factor level specified in the
#' cell annotation (see \code{'comparisonCol'}).
#' @param nCellsPerBulk number of cells to sample to create each pseudo-bulk sample
#' @param nBulkPerGroup number of pseudo-bulk samples to create for each group
#' @param method Method for determining differential accessibility. Currently only \code{'DESeq2'} is supported
#' @return a \code{data.frame} containing differential accessibility information
#'
#' @rdname getDiffAcc-DsATACsc-method
#' @docType methods
#' @aliases getDiffAcc
#' @aliases getDiffAcc,DsATAC-method
#' @author Fabian Mueller
#' @export
#' @noRd
setMethod("getDiffAcc",
signature(
.object="DsATACsc"
),
function(
.object,
regionType,
comparisonCol,
grp1Name=NULL,
grp2Name=NULL,
nCellsPerBulk=100,
nBulkPerGroup=20,
method='DESeq2'
) {
ph <- getSampleAnnot(.object)
if (!is.element(method, c("DESeq2"))) logger.error(c("Invalid method for calling differential accessibility:", method))
if (is.character(comparisonCol) && length(comparisonCol)==1){
if (!is.element(comparisonCol, colnames(ph))) logger.error(c("Comparison column not found in sample annotation:", comparisonCol))
contrastF <- factor(ph[,comparisonCol])
} else if (length(comparisonCol)==nrow(ph)){
contrastF <- factor(comparisonCol)
} else {
logger.error("Invalid value for comparisonCol")
}
if (length(levels(contrastF)) < 2) logger.error(c("Invalid comparison column. There should be at least 2 groups."))
if (is.null(grp1Name)) grp1Name <- levels(contrastF)[1]
if (is.null(grp2Name)) grp2Name <- levels(contrastF)[2]
if (!is.element(grp1Name, c(levels(contrastF), ".ALL"))) logger.error(c("Invalid group name (1). No cells annotated with that group:", grp1Name))
if (!is.element(grp2Name, c(levels(contrastF), ".ALL"))) logger.error(c("Invalid group name (2). No cells annotated with that group:", grp2Name))
cidx.grp1 <- which(contrastF==grp1Name)
if (grp1Name==".ALL") cidx.grp1 <- which(contrastF!=grp2Name)
cidx.grp2 <- which(contrastF==grp2Name)
if (grp2Name==".ALL") cidx.grp2 <- which(contrastF!=grp1Name)
if (method=="DESeq2"){
logger.info(c("Using method:", method))
cm <- ChrAccR::getCounts(.object, regionType, allowSparseMatrix=TRUE)
logger.start("Creating pseudo-bulk samples")
nCells <- min(c(length(cidx.grp1), length(cidx.grp2)))
doBoostrap <- FALSE
if (nCells < nCellsPerBulk){
logger.warning(c("Few cells detected per group", "--> selecting only", nCells, "cells for sampling"))
doBoostrap <- TRUE
} else {
nCells <- nCellsPerBulk
}
logger.info(c("Using", nCells, "cells per sample"))
logger.info(c("Using", nBulkPerGroup, "samples per group"))
cidxL.grp1 <- lapply(1:nBulkPerGroup, FUN=function(i){
sample(cidx.grp1, nCells, replace=doBoostrap)
})
cidxL.grp2 <- lapply(1:nBulkPerGroup, FUN=function(i){
sample(cidx.grp2, nCells, replace=doBoostrap)
})
cm.grp1 <- do.call("cbind", lapply(cidxL.grp1, FUN=function(cids){
safeMatrixStats(cm[,cids,drop=FALSE], statFun="rowSums", na.rm=TRUE)
}))
colnames(cm.grp1) <- paste(grp1Name, "sample", 1:nBulkPerGroup, sep="_")
cm.grp2 <- do.call("cbind", lapply(cidxL.grp2, FUN=function(cids){
safeMatrixStats(cm[,cids,drop=FALSE], statFun="rowSums", na.rm=TRUE)
}))
colnames(cm.grp2) <- paste(grp2Name, "sample", 1:nBulkPerGroup, sep="_")
logger.completed()
logger.start("Creating DESeq2 dataset")
designF <- as.formula(paste0("~", paste("group", collapse="+")))
sannot <- data.frame(sampleId=c(colnames(cm.grp1), colnames(cm.grp2)), group=rep(c(grp1Name, grp2Name), times=rep(nBulkPerGroup, 2)))
dds <- DESeq2::DESeqDataSetFromMatrix(
countData=cbind(cm.grp1, cm.grp2),
colData=sannot,
design=designF
)
rowRanges(dds) <- getCoord(.object, regionType)
dds <- DESeq2::DESeq(dds)
logger.completed()
logger.start("Differential table")
diffRes <- DESeq2::results(dds, contrast=c("group", grp1Name, grp2Name))
dm <- data.frame(diffRes)
rankMat <- cbind(
# rank(-dm[,"baseMean"]), na.last="keep", ties.method="min"),
rank(-abs(dm[,"log2FoldChange"]), na.last="keep", ties.method="min"),
rank(dm[,"pvalue"], na.last="keep", ties.method="min")
)
dm[,"cRank"] <- matrixStats::rowMaxs(rankMat, na.rm=FALSE)
# dm[,"cRank"] <- rowMaxs(rankMat, na.rm=TRUE)
dm[!is.finite(dm[,"cRank"]),"cRank"] <- NA
dm[,"cRank_rerank"] <- rank(dm[,"cRank"], na.last="keep", ties.method="min")
sidx.grp1 <- which(sannot[,"group"]==grp1Name)
sidx.grp2 <- which(sannot[,"group"]==grp2Name)
l10fpkm <- log10(DESeq2::fpkm(dds, robust=TRUE)+1)
grp1.m.l10fpkm <- rowMeans(l10fpkm[, sidx.grp1, drop=FALSE], na.rm=TRUE)
grp2.m.l10fpkm <- rowMeans(l10fpkm[, sidx.grp2, drop=FALSE], na.rm=TRUE)
vstCounts <- assay(DESeq2::vst(dds, blind=FALSE))
grp1.m.vst <- rowMeans(vstCounts[, sidx.grp1, drop=FALSE], na.rm=TRUE)
grp2.m.vst <- rowMeans(vstCounts[, sidx.grp2, drop=FALSE], na.rm=TRUE)
res <- data.frame(
log2BaseMean=log2(dm[,"baseMean"]),
meanLog10FpkmGrp1=grp1.m.l10fpkm,
meanLog10FpkmGrp2=grp2.m.l10fpkm,
meanVstCountGrp1=grp1.m.vst,
meanVstCountGrp2=grp2.m.vst,
dm
)
# add group names to column names
for (cn in c("meanLog10FpkmGrp", "meanVstCountGrp")){
colnames(res)[colnames(res)==paste0(cn,"1")] <- paste0(cn, "1_", grp1Name)
colnames(res)[colnames(res)==paste0(cn,"2")] <- paste0(cn, "2_", grp2Name)
}
logger.completed()
}
return(res)
}
)
|
40f089fb76f9a7717c03b4ad6f6faf39b1f258d9
|
3c0609b158edf4b860c0da58a0b864825c29c41f
|
/example.R
|
e838ee20c97ed0b2827a06e9aa87cdd7b241104b
|
[] |
no_license
|
clsong/JAE-Song_et_al-2017
|
aed2003b663c344b93c2d71637a03db159354b7c
|
48b20693e0e778a89ade759bf8221a5ce65bb243
|
refs/heads/master
| 2021-05-08T16:13:13.311513
| 2019-03-15T15:03:27
| 2019-03-15T15:03:27
| 120,145,206
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 553
|
r
|
example.R
|
#R-code of "Why are some plant-pollinator networks more nested than others?" by:
#Chuliang Song, Rudolf P. Rohr, and Serguei Saavedra
#Journal of Animal Ecology
rm(list=ls())
source('toolbox.R') #load the toolbox
web <- load_data() #load network.csv
print(NODF <- nestedness_NODF(web)) # this calculates the raw value of NODF
print(max_NODF <- max_nest(web)) # this calculates the maximum value of NODF for that network
print(combined_NODF <- comb_nest(web,NODF,max_NODF)) # this calculates the combined NODF statistic as described in the manuscript
|
d203a8a24acacf8ab5450d72c97a396e33ea3d8a
|
147e55d3e91a7bd1865de3f6ea62077ce6591241
|
/GenGraph/ui.R
|
b5c8695180b1a4cd897da137e059e57c0e9e616c
|
[] |
no_license
|
jolivero2001/shiny-dashboard
|
a995736252fa3df38004be94e57f634f70e8e0eb
|
524f6c50cd6259dea5a103bcf7f7c39d5958256d
|
refs/heads/master
| 2021-06-03T19:57:24.808410
| 2021-04-11T14:43:24
| 2021-04-11T14:43:24
| 102,426,793
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,919
|
r
|
ui.R
|
library(shinydashboard)
library(ggvis)
library(shiny)
library(dplyr)
library(ggplot2)
library(DBI)
library(shinyjs)
library(lazyeval)
library(shinyAce)
library(knitr)
library(tidyr)
library(corrplot)
library(ggraph)
#dm <- dropdownMenu(type="messages")
mm <- dropdownMenu(type="notifications")
tm <- dropdownMenu(type="tasks")
sm <- sidebarMenu(id="tabs",
menuItem(
text="Set Up",startExpanded = TRUE,icon=icon("dashboard"),
menuSubItem(text="Plots",tabName="Plots",icon=icon("bar-chart-o")),
menuSubItem(text="Analytics",tabName="Analytics",icon=icon("bar-chart-o")),
menuSubItem(text="Other Variables",icon=icon("th"),tabName="Variables"),
menuSubItem(text="NPlots",tabName="NPlots",icon=icon("line-chart")),
menuSubItem(text="Operator",tabName="Operator",icon=icon("users")),
menuSubItem(text="Selection",tabName="Selection",icon=icon("align-justify")),
menuSubItem(text="Transformation",tabName="Transformation"),
menuSubItem(text="View",tabName="View"),
menuSubItem(text="Guidelines Tool",icon=icon("user"),tabName="Semantic")
)
)
ui <- dashboardPage(
dashboardHeader(title = Sys.Date(),mm,tm),
dashboardSidebar(sm,
tabItems(
tabItem(
tabName="Plots",
fluidPage(
useShinyjs(),
fluidRow(
selectInput("gVariable", "Plots:",
choices = names(dat.moodle1),
multiple = TRUE,
selected = "Points")
)
)
),
tabItem(
tabName="Analytics",
fluidPage(
useShinyjs(),
fluidRow(
selectInput("aVariable", "Analytics:",
c("n","Regresion","1way Anova","2way Anova","Correlation","Segmentation"),
multiple= FALSE,
selected = "1")
)
)
),
tabItem(
tabName="Variables",
fluidPage(
useShinyjs(),
fluidRow(
uiOutput("oSource")
)
)
),
tabItem(
tabName="NPlots",
fluidPage(
useShinyjs(),
fluidRow(
sliderInput("number","Number of plots", value=1,min=1,max=20)
,
sliderInput("plotActive","Plot Active", value=1,min=1,max=20)
)
)
),
tabItem(
tabName="Operator",
fluidPage(
useShinyjs(),
fluidRow(
selectInput("filter1Variable", "Operators:",
c("n","Join","Union"),
selected = "1")
)
)
),
tabItem(
tabName="Transformation",
fluidPage(
useShinyjs(),
fluidRow(
uiOutput("tSource")
)
)
),
tabItem(
tabName="View",
fluidPage(
useShinyjs(),
fluidRow(
selectInput("filter3Variable", "View:",
c("Table","Summarize"),
selected = "1")
)
)
),
tabItem(
tabName="Selection",
fluidPage(
useShinyjs(),
fluidRow(
selectInput("filterVariable", "Selection:",
c("n","Filter","Group+","Editor"),
multiple = TRUE,
selected = "n")
)
)
),
tabItem(
tabName="Semantic",
fluidPage(
useShinyjs(),
fluidRow(
selectInput("filter4Variable", "Guidelines:",
c("n","Logaritmic X","Logaritmic Y","Banking 45"),
multiple = TRUE,
selected = "n")
)
)
)
)
),
dashboardBody(
fluidRow(
useShinyjs(),
box(width = 20,collapsible=TRUE,
splitLayout(cellArgs = list(style = "padding: 10px"),
#box(width = 3,
uiOutput("dNumber"),
#box(width = 3,
uiOutput("dSource"),
#box(width = 3,
uiOutput("xSource"),
#box(width = 20 ,collapsible=TRUE,
#splitLayout(cellArgs = list(style = "padding: 10px"),
uiOutput("Size"),
uiOutput("Color"),
uiOutput("Stroke"),
uiOutput("Shape"),
uiOutput("Text"),
uiOutput("fSource"),
uiOutput("gSource")
)
)),
fluidRow(
useShinyjs(),
box(width = 2,collapsible=TRUE,
splitLayout(cellArgs = list(style = "padding: 10px"),
uiOutput("p_ui"),
uiOutput("p_uif"))),
box(width = 7,collapsible=TRUE,
splitLayout(cellWidths = c("100%"),
tabsetPanel(id="tabs2",
type = "pills",
tabPanel("Plots", uiOutput("plots")),
tabPanel("Analytics", uiOutput("plots2")),
tabPanel("Data", uiOutput("summary"))
#tabPanel("Editor", htmlOutput("knitDoc"))
#tabPanel("Controls",uiOutput("p_ui"))
)
)),
box(width = 3,
splitLayout(cellArgs = list(style = "padding: 10px"),
uiOutput("xAxes"),
uiOutput("yAxes"),
uiOutput("Facetx"),
uiOutput("Factor2")
)),
box(width = 3,collapsible=TRUE,
bootstrapPage(
div(
class="container-fluid",
div(class="row-fluid",
div(class="span6",
aceEditor("ace",mode="markdown",value='')
,actionButton("eval", "Update")
)
#,
#div(class="span6",
# htmlOutput("knitDoc")
#)
))
)
))
)
)
|
e15ff56d6b149eeacaaa9eadcf076da9e51935af
|
175178c455fbe90cfc4a6bde924a350f218a673c
|
/phenotype.R
|
6845b18b75efa2262b96234eb909b7186800c1f9
|
[] |
no_license
|
yanweicai/Replica_CC_TOVIDLTAK
|
210a8cdb149d85c71a22601d61d053fa72039924
|
69a26a2595c0bbd8270d3cec70bf9b178732041b
|
refs/heads/master
| 2023-02-15T15:18:11.302746
| 2021-01-09T03:45:10
| 2021-01-09T03:45:10
| 324,623,628
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,920
|
r
|
phenotype.R
|
library(lme4)
library(lmerTest)
library(qvalue)
library(varComp)
library(miqtl)
library(ggplot2)
library(tidyverse)
setwd("~/Dropbox/ValdarLab/IDSScross_git/src/")
# read in data table
info <- read.table(file="../data/info_3study.txt",header=TRUE,sep = "\t")
info$CCbyStudy <- paste0(info$CC,'_',info$Study)
options(contrasts = rep("contr.sum", 2)) # Set contrasts -- see below
### Section 1. Variance components of phenotype models.
phone_list=c("D.BW","BWratio","LWratio","AST","ALT","TBIL")
outdf <- data.frame() # output data frame
for (ph in phone_list){
message(ph)
info$y <- as.numeric(info[,ph])
# Adjust for Batch effects
fit0 <- lmer( y ~ 1 + Study + (1|CCline) + (1|CCbyStudy) + (1|Dosing.Date), data=info,REML=FALSE)
batef <- ranef(fit0)$Dosing.Date;batch2num <- batef[[1]]; names(batch2num) <- rownames(batef)
info$y <- info$y - batch2num[info$Dosing.Date]
### single study linear mix model by lmer
fit1 <- lmer( y ~ 1 +(1|CCline), data=(info[which(info$Study=='TOV'),]),REML=FALSE)
varout1 <- as.data.frame(VarCorr(fit1))
h2.tov <- round(varout1$vcov[[1]]/sum(varout1$vcov),3)
h2.tov.p <- ranova(fit1)$"Pr(>Chisq)"[2]
fit2 <- lmer( y ~ 1 + (1|CCline), data=info[which(info$Study=='TAK'),],REML=FALSE)
varout2 <- as.data.frame(VarCorr(fit2))
h2.tak <- round(varout2$vcov[[1]]/sum(varout2$vcov),3)
h2.tak.p <- ranova(fit2)$"Pr(>Chisq)"[2]
fit3 <- lmer( y ~ 1 + (1|CCline), data=info[which(info$Study=='GLD'),],REML=FALSE)
varout3<- as.data.frame(VarCorr(fit3))
h2.gld <- round(varout3$vcov[[1]]/sum(varout3$vcov),3)
h2.gld.p <- ranova(fit3)$"Pr(>Chisq)"[2]
# phenotype analysis for three studies
fit5 <- lmer( y ~ 1 + Study + (1|CCline) + (1|CCbyStudy), data=info,REML=FALSE)
fit7 <- lmer( y ~ 1 + (1|CCline) + (1|CCbyStudy), data=info,REML=FALSE)
h2.SxS.p <- ranova(fit5)$"Pr(>Chisq)"[3]
h2.C.p <- ranova(fit5)$"Pr(>Chisq)"[2]
h2.S.p <- anova(fit5,fit7)$"Pr(>Chisq)"[2]
varout5 <- as.data.frame(VarCorr(fit5))
v.CC <- varout5$vcov[which(varout5$grp=="CCline")]
v.R <- varout5$vcov[which(varout5$grp=="Residual")]
v.CCS <- varout5$vcov[which(varout5$grp=='CCbyStudy')] # var(X)+var(Y)+2Cov(X+Y)
v.All <- var(info$y,use='na.or.complete')
v.Study <- v.All-v.CC-v.R-v.CCS
h2.mega.CS <- round(v.CCS/v.All,3)
h2.mega.CC <- round(v.CC/v.All,3)
h2.mega.Study <- round(v.Study/v.All,3)
p2p <- function(plist){
plisto <- rep('',length(plist))
plisto[which(plist>=0.1)]<-''
plisto[which(plist<=0.1)]<-'.'
plisto[which(plist<=0.05)]<-'*'
plisto[which(plist<=0.01)]<-'**'
plisto[which(plist<=0.001)]<-'***'
plisto
}
thisdf <- data.frame(ph=ph,h2.tov=paste0(h2.tov,p2p(h2.tov.p)),h2.tak=paste0(h2.tak,p2p(h2.tak.p)),h2.gld=paste0(h2.gld,p2p(h2.gld.p)),
CC.123=paste0(h2.mega.CC,p2p(h2.C.p)),
Study.123=paste0(h2.mega.Study,p2p(h2.S.p)),
SxS.123=paste0(h2.mega.CS,p2p(h2.SxS.p)),
h2.tov.p=h2.tov.p,h2.tak.p=h2.tak.p,h2.gld.p=h2.gld.p,
h2.C.p=h2.C.p,h2.S.p=h2.S.p,h2.SxS.p=h2.SxS.p)
#thisdf2 <- data.frame(ph=ph,h2.tov=h2.tov,h2.tov.p=h2.tov.p,h2.tak=h2.tak,h2.tak.p=h2.tak.p,h2.gld=h2.gld,h2.gld.p=h2.gld.p,
# Study.123=mega123[1],Study.123.p=mega123[2],CC.123=mega123[3],CC.123.p=mega123[4],SxS.123=mega123[5],SxS.123.p=mega123[6])
outdf <- rbind(outdf,thisdf)
}
# outdf --> Variance components of phenotype models
### Section 2. shrinkage estimation of variance components (Body weight)
library(limma)
BWsum <- info %>% filter(!is.na(D.BW)) %>% group_by(Study,CCline) %>% dplyr::summarize(mean = mean(D.BW),var = var(D.BW), obs=length(D.BW))
BWsum$x <- 0;BWsum$x[which(BWsum$Study=='TAK')]<-1;BWsum$x[which(BWsum$Study=='GLD')]<-2;
BWsum$var.shink <- BWsum$var
BWsum$var.shink <- squeezeVar(BWsum$var,(BWsum$obs-1))$var.post
BWsum$sd <- sqrt(BWsum$var.shink)
BWsum$cv <- BWsum$sd/BWsum$mean
BWcount <- as.data.frame.matrix(table(BWsum[,c('CCline','x')]))
strainfull <- rownames(BWcount)[which(BWcount[[1]]==1 & BWcount[[2]]==1 & BWcount[[3]]==1)]
BWsum$col <- 0
BWsum$col[which(BWsum$CCline %in% strainfull)] <- 1
BWsumsub <- BWsum[which(BWsum$x==0),]
BWsumsub$x <- 3
BWsum <- rbind(BWsum,BWsumsub)
# plot for the within-strain differences figure
#pdf(file="Result/BWsd.pdf",width=4.5,height=4.5)
p<- ggplot(BWsum,aes(x=x,y=sd,group=CCline))
for (xi in 0:3){p <- p + geom_vline(xintercept = xi, color = "gray", size=1)}
p <- p + geom_point(aes(color=CCline),show.legend = F) +
geom_line(aes(color=CCline),show.legend = F,subset(BWsum,col==1)) +
geom_line(aes(),color='gray',show.legend = F,subset(BWsum,x %in% c(0,1) & col==0)) +
geom_line(aes(),color='gray',show.legend = F,subset(BWsum,x %in% c(1,2) & col==0)) +
geom_line(aes(),color='gray',show.legend = F,subset(BWsum,x %in% c(2,3) & col==0))
p+theme(text=element_text(size=15),panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_blank(), axis.line = element_blank())
#dev.off()
### Section 3. QTL mapping for ALT/AST phenotype
for (ph in c('ALT','AST')){
message(ph)
info.ph <- cbind(info[,1:7],data.frame(ph=info[,ph]))
info.ph$ph.adj <- info.ph$ph
# debatch and take strain mean within each
Tov_df <- info.ph[which(info.ph$Study=='TOV'),]
batcheff <- ranef( ( lmer(ph~ (1|CCline) + (1|Dosing.Date),data=Tov_df) ))$Dosing.Date
batcheffv <- batcheff[[1]];names(batcheffv) <- rownames(batcheff)
Tov_df$ph.adj <- Tov_df$ph - batcheffv[as.character(Tov_df$Dosing.Date)]
Tov_df2 <- merge( aggregate(ph.adj ~ CCline,Tov_df,length),aggregate(ph.adj ~ CCline,Tov_df,mean),by="CCline")
colnames(Tov_df2) <- c('CC','weight','ph')
Tov_df2$Study <- 'TOV'
Tak_df <- info.ph[which(info.ph$Study=='TAK'),]
batcheff <- ranef( ( lmer(ph~ (1|CCline) + (1|Dosing.Date),data=Tak_df) ))$Dosing.Date
batcheffv <- batcheff[[1]];names(batcheffv) <- rownames(batcheff)
Tak_df$ph.adj <- Tak_df$ph - batcheffv[as.character(Tak_df$Dosing.Date)]
Tak_df2 <- merge( aggregate(ph.adj ~ CCline,Tak_df,length),aggregate(ph.adj ~ CCline,Tak_df,mean),by="CCline")
colnames(Tak_df2) <- c('CC','weight','ph')
Tak_df2$Study <- 'TAK'
Gld_df <- info.ph[which(info.ph$Study=='GLD'),]
batcheff <- ranef( ( lmer(ph~ (1|CCline) + (1|Dosing.Date),data=Gld_df) ))$Dosing.Date
batcheffv <- batcheff[[1]];names(batcheffv) <- rownames(batcheff)
Gld_df$ph.adj <- Gld_df$ph - batcheffv[as.character(Gld_df$Dosing.Date)]
Gld_df2 <- merge( aggregate(ph.adj ~ CCline,Gld_df,length),aggregate(ph.adj ~ CCline,Gld_df,mean),by="CCline")
colnames(Gld_df2) <- c('CC','weight','ph')
Gld_df2$Study <- 'GLD'
info.adj <- rbind(Tov_df2,Tak_df2,Gld_df2)
info.adj$Study <- as.factor(info.adj$Study)
ph.df <- info.adj
ph.df$pheno.id <- paste(ph.df$Study,ph.df$CC,sep=".")
ph.df$SUBJECT.NAME <- as.factor(ph.df$CC)
ph.df[-which(ph.df$CC=='CC078'),] -> ph.df
Tov_df2 <- ph.df[which(ph.df$Study=='TOV'),]
Tak_df2 <- ph.df[which(ph.df$Study=='TAK'),]
Gld_df2 <- ph.df[which(ph.df$Study=='GLD'),]
# mapping
MI=TRUE
myweights <- Tov_df2$weight; names(myweights) <- Tov_df2$CC
Scan_Tov <- scan.h2lmm(genomecache="~/Dropbox/ValdarLab/Takeda_copy/segments_happy_format_mm10/",
data=Tov_df2, formula= ph ~ 1,geno.id="CC",pheno.id = "CC",weights = myweights,use.multi.impute=MI,num.imp=15,print.locus.fit=FALSE)
myweights <- Tak_df2$weight; names(myweights) <- Tak_df2$CC
Scan_Tak <- scan.h2lmm(genomecache="~/Dropbox/ValdarLab/Takeda_copy/segments_happy_format_mm10/",
data=Tak_df2, formula= ph ~ 1,geno.id="CC",pheno.id = "CC",weights = myweights,use.multi.impute=MI,num.imp=15,print.locus.fit=FALSE)
myweights <- Gld_df2$weight; names(myweights) <- Gld_df2$CC
Scan_Gld <- scan.h2lmm(genomecache="~/Dropbox/ValdarLab/Takeda_copy/segments_happy_format_mm10/",
data=Gld_df2, formula= ph ~ 1,weights = myweights,geno.id="CC",pheno.id = "CC",use.multi.impute=MI,num.imp=15,print.locus.fit=FALSE)
myweights <- ph.df$weight;names(myweights) <- ph.df$pheno.id
Scan_mega <- scan.h2lmm(genomecache="~/Dropbox/ValdarLab/Takeda_copy/segments_happy_format_mm10/",
data=ph.df, formula= ph ~ 1+Study,weights = myweights,pheno.id="pheno.id",use.multi.impute=MI,num.imp=20,print.locus.fit=FALSE)
saveRDS(Scan_mega,file=paste0("~/Dropbox/ValdarLab/IDSScross/Result/PhenoAuto/",ph,"scan.RDS"))
#HPeak <- ceiling(max( -log10(Scan_Tov$p.value),-log10(Scan_Tak$p.value), -log10(Scan_Gld$p.value),-log10(Scan_mega$p.value)))
pdf(paste0("~/Dropbox/ValdarLab/IDSScross/Result/Pheno/",ph,".mapping.mm10.pdf"),width=10,height=14)
par(mfrow=c(4,1))
genome.plotter.whole(scan.list=list(MI=Scan_Tov),main="TOV")
genome.plotter.whole(scan.list=list(MI=Scan_Tak),main="TAK")
genome.plotter.whole(scan.list=list(MI=Scan_Gld),main="GLD")
genome.plotter.whole(scan.list=list(MI=Scan_mega),main="Mega")
dev.off()
}
# threshold for ALT: 9.265661 (gev:-0.2303773 1.2742609 6.5247375)
# threshold for AST: 6.516393 (gev:-0.01232742 0.77762467 4.24846875)
pdf(paste0("~/Dropbox/ValdarLab/IDSScross/Result/Pheno/",ph,".mapping.mm10.pdf"),width=8.5,height=7)
par(mfrow=c(2,1))
genome.plotter.whole(scan.list=list(TOV=Scan_Tov,TAK=Scan_Tak,IDL=Scan_Gld),main="",y.max.manual=6,
main.colors = c("blue", "orange", "gray48"))
genome.plotter.whole(scan.list=list(Mega=Scan_mega),main="",y.max.manual=HPeak)
dev.off()
pdf(paste0("~/Dropbox/ValdarLab/IDSScross/Result/Pheno/ALTAST.mapping.mm10.pdf"),width=8.5,height=7)
par(mfrow=c(2,1))
genome.plotter.whole(scan.list=list(Mega=ALT.scan),main="",hard.thresholds=9.265661)
genome.plotter.whole(scan.list=list(Mega=AST.scan),main="",hard.thresholds=6.516393)
dev.off()
|
82b91cc3d089b5201f58deb93209c7386d56742e
|
7d9cbb939c81cf32bce02b7b4d43dcefad0b65dc
|
/R/allezTable.R
|
36257d3086e54940e0ab3c626be9702dce0ab2bc
|
[] |
no_license
|
atbroman/allez
|
e677a6309b0b2a5465788b155a8bf338eb77d4ea
|
2a7a57b3f5b20b25c971a524745b4cadb8341b49
|
refs/heads/master
| 2020-05-03T21:42:42.393259
| 2017-03-03T16:01:32
| 2017-03-03T16:01:32
| 9,983,732
| 1
| 4
| null | 2015-08-19T19:15:16
| 2013-05-10T14:59:06
|
R
|
UTF-8
|
R
| false
| false
| 2,619
|
r
|
allezTable.R
|
## Outputs top GO/KEGG categories ##
## score >= 0
## z.score is one-sided: z.score < 0 indicate enrichment
## for genes outside of gene set
allezTable <- function(allez.out,
n.low=5,
n.upp=500,
n.cell=0,
zthr=5,
symbol=FALSE,
in.set=FALSE){
## gene list of gene_id, probe_id, or symbol, from set.data ##
idcol <- ifelse(symbol,3,2)
## z.score column ##
zcol <- grep("z.score",colnames(allez.out$setscores))[1]
## Number of genes in list and functional set ##
nc <- tapply(allez.out$aux$set.data$gscores,
allez.out$aux$set.data[,1],
function(x) sum(x > 0))
G <- length(allez.out$aux$globe)
## If set.size==G then z.score=NA ##
ok <- (allez.out$setscores$set.size >= n.low) &
(allez.out$setscores$set.size <= n.upp) &
(allez.out$setscores$set.size < G) &
(allez.out$setscores[,zcol] >= zthr) &
(nc[rownames(allez.out$setscores)] >= n.cell)
allez.table <- allez.out$setscores[ok,
-grep("sd",colnames(allez.out$setscores))]
## Subset set.data ##
set.data <- allez.out$aux$set.data[
allez.out$aux$set.data[,1] %in% rownames(allez.table),]
set.data <- set.data[order(set.data$gscores,decreasing=TRUE),]
## rownames(genes) == rownames(allez.table) ##
genes <- data.frame(
pos=tapply(set.data[,idcol],set.data[,1],paste,collapse=";"),
neg=tapply(set.data[,idcol],set.data[,1],function(x)
paste(rev(x),collapse=";")))
allez.table$genes <- if(nrow(allez.table)>0)
genes[cbind(rownames(allez.table),
ifelse(allez.table[,grep("z.score",colnames(allez.table))[1]]>0,
"pos","neg"))] else character(0)
if(in.set==TRUE){
set.data <- set.data[set.data$gscores > 0,]
genes <- data.frame(
pos=tapply(set.data[,idcol],set.data[,1],paste,collapse=";"),
neg=tapply(set.data[,idcol],set.data[,1],function(x)
paste(rev(x),collapse=";")))
allez.table <- cbind(allez.table,
in.set=nc[rownames(allez.table)],
in.genes=if(nrow(allez.table)>0)
genes[cbind(rownames(allez.table),
ifelse(allez.table[,grep("z.score",
colnames(allez.table))[1]]>0,"pos","neg"))] else
character(0))
}
##allez.table$in.set <- allez.table$set.mean*allez.table$n.genes
ord <- order(allez.table$set.mean,decreasing=TRUE)
allez.table[ord,]
}
|
e141c88ba9ba94df2ea23d39909e0594e95f2685
|
40e98bcc4d58a29c44594ee70132af4f90216a65
|
/hw4/min_span_tree.R
|
1a03d64535f731339e9cf549c1e7a9ebfea0b7ce
|
[] |
no_license
|
Sta523-Fa14/hw_examples
|
b0365ec7379a85a7f9d0ad2e333c8ff000f6c011
|
a862a6dfc252c42696a80cea95c620f6a76fdcd0
|
refs/heads/master
| 2016-09-05T23:33:41.418471
| 2014-11-24T18:00:07
| 2014-11-24T18:00:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 86
|
r
|
min_span_tree.R
|
min_span_tree = function(g)
{
return(list(list(edges=c(1L), weights=c(1))))
}
|
9211c6168ba8de699f22dc2d55bcb42fa3e4226f
|
6b9a398030a320ca38a3ff8c11adbb235deddcdf
|
/manuscripts_etc/Manuscript Figures/Mixtures.R
|
237c5fc7cc032580fe4b5f90bc42a57ccaa5c449
|
[
"MIT",
"CC-BY-4.0"
] |
permissive
|
alexholcombe/nStream
|
ffb9dc89eaec1222b957d44c634aad68282b8f51
|
fddf0ad89a5a2353f0f76fef70923500d6cad628
|
refs/heads/master
| 2020-05-23T08:09:21.402053
| 2019-12-12T04:35:40
| 2019-12-12T04:35:40
| 80,474,968
| 3
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,223
|
r
|
Mixtures.R
|
rm(list=ls())
library(ggplot2)
devtools::load_all('~/gitCode/mixRSVP/')
###Guessing Distribution bounds###
minSP <- 7
maxSP <- 11
targetSP <- rep(minSP:maxSP, each = 20)
minSPE <- 1 - maxSP
maxSPE <- 24 - minSP
SPE <- seq(minSPE, maxSPE, .1)
###Guessing Probs###
guessingDist <- createGuessingDistribution(minSPE, maxSPE, targetSP,24)
guessingDist <- guessingDist/sum(guessingDist)
guessingDist <- data.frame(SPE=minSPE:maxSPE, prob = guessingDist)
##Bin the SPEs because guessing is the same within a bin##
guessingSPE <- c((min(SPE)):max(SPE))[cut(SPE,breaks = (min(SPE)):max(SPE) , include.lowest = T, labels = F)]
##assign a probability to an SPE based on its bin##
guessingFreqs <- sapply(guessingSPE,FUN = function(x){
if(x %in% guessingDist$SPE){
guessingDist$prob[guessingDist$SPE == x]
} else {
0
}
}
)
shape = 3
scale = .6
binAreasEfficacy<- sapply(SPE, areaOfGaussianBin, .1,shape,scale, 'Gamma')
binProb <- binAreasEfficacy/sum(binAreasEfficacy)
mixtureFreq = binProb*.75 + guessingFreqs * .25
data <- data.frame(SPE = SPE,
mixture = mixtureFreq,
guessing = guessingFreqs*.25,
efficacious = binProb*.75,
model = 'Gamma',
stringsAsFactors = F)
binAreasEfficacy<- sapply(SPE, areaOfGaussianBin, .1,.8,.8,'Normal')
binProb <- binAreasEfficacy/sum(binAreasEfficacy)
mixtureFreq = binProb*.75 + guessingFreqs * .25
dataNorm <- data.frame(SPE = SPE,
mixture = mixtureFreq,
guessing = guessingFreqs*.25,
efficacious = binProb*.75,
model = 'Normal',
stringsAsFactors = F)
data = rbind(dataNorm, data)
SPESamples <- data.frame(SPE = numeric(300), model = rep(c('Normal', 'Gamma'), each = 150))
mixturePlots <- ggplot(data, aes(x = SPE))+
geom_line(aes(y = mixture), linetype = 2)+
geom_area(aes(y = guessing), alpha = .5)+
geom_area(aes(y = efficacious), alpha = .7)+
theme_apa(base_size = 15)+
facet_wrap(~model)+
labs(y = 'Density')
ggsave(plot = mixturePlots, filename = '~/gitCode/nStream/manuscripts_etc/Manuscript Figures/ExampleMixtures.png', width = 8, height = 4.5, units = "in")
|
e5925556b5583ccbd2ebc1da89581e0f56b053c2
|
eca7e6e4e027cfb1fb4b3de0a05a30dabab285ba
|
/man/modCompare.Rd
|
8c66297709a7b847d921dff9cc6a5cde5dfbf15e
|
[] |
no_license
|
BagchiLab-Uconn/RSPPlme4
|
4076c68a0de603dd429a42c30d10d7a596d01536
|
9b92e37924cfb4f4749efbf4f335a86632dc5466
|
refs/heads/master
| 2023-08-17T17:12:40.959835
| 2023-08-09T12:25:48
| 2023-08-09T12:25:48
| 94,588,272
| 3
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 486
|
rd
|
modCompare.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/modCompare.R
\name{modCompare}
\alias{modCompare}
\title{Compares the Deviances of Two Models.}
\usage{
modCompare(modsH1, modsH0)
}
\arguments{
\item{modsH1}{A more complex model of class \code{\link{klmer}}}
\item{modsH0}{A simpler (null) model of class \code{\link{klmer}}}
}
\value{
The difference in deviances between the null and complex model.
}
\description{
Compares the Deviances of Two Models.
}
|
2b9f5e523642c4398103f6d2ccf4114cf8b9bbd5
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/DAMisc/examples/panel.2cat.Rd.R
|
2ca18edfbb235f60d60cc5a7d048c03e00b5f8f9
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 392
|
r
|
panel.2cat.Rd.R
|
library(DAMisc)
### Name: panel.2cat
### Title: Lattice panel function for confidence intervals with capped bars
### Aliases: panel.2cat
### ** Examples
library(car)
library(lattice)
library(effects)
data(Duncan)
Duncan$inc.cat <- cut(Duncan$income, 3)
mod <- lm(prestige~ inc.cat * type + education,
data=Duncan)
e1 <- effect("inc.cat*type", mod)
update(plot(e1), panel=panel.2cat)
|
376f97d221cd79d9f60b2dd0d1ce0b7da22c49e8
|
87bdf3725cc8bb122b670a53b0cdb366678d5d7c
|
/jsm_2020_app/server.R
|
37af3ff57d6d9b42f6704fc02d4df8e7736cfb05
|
[] |
no_license
|
brandonkopp/ASA-Presidents-Shiny
|
4b6e25c9e6cc07abae44c900251a65921c9e2c56
|
340d952a101ef9284727f7d7b03549bf7a7aa518
|
refs/heads/master
| 2023-03-27T08:22:26.111496
| 2020-07-24T16:53:34
| 2020-07-24T16:53:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 17,896
|
r
|
server.R
|
require(shiny)
server <- function(input, output, session) {
source("global.R", local=TRUE)
###### WELCOME ####################
observeEvent(input$glossary, {
showModal(modalDialog(
title = "Glossary",
includeHTML("./html/glossary.html"),
easyClose = TRUE,
footer = NULL
))
})
pres_dat <- reactive({
allspeeches[allspeeches$year == input$yearslider, ]
})
location_dat <- reactive({
if(nrow(pres_dat()) == 1){
locations[locations$location %in% unique(pres_dat()$location), ]
}
})
states_dat <- reactive({
map_data("state")
})
countries_dat <- reactive({
map_data("world") %>%
filter(region %in% c("USA","Canada"))
})
output$president_title <- renderUI({
if(!input$yearslider %in% missing_data_years){
HTML(paste0("<h3>", pres_dat()$president_name_forward, "</h3>"))
}
})
output$yeartext <- renderUI({
HTML(paste0("<h1 class='year_text small_margin'>", input$yearslider, "</h1>"))
})
output$no_meeting_warning <- renderUI({
if(input$yearslider %in% missing_data_years){
HTML(paste0("<h3 style='color:red' class='small_margin'>NO MEETING</h3>"))
}
})
output$conference_table <- renderUI({
if(!input$yearslider %in% missing_data_years){
HTML(paste0("<table class='content_table'>",
"<tr class='content_row'><td style='min-width:60px'><b>JSM #: </b></td><td>", pres_dat()$jsm_number," Annual</td></tr>",
"<tr class='content_row'><td><b>Dates: </b></td><td>", pres_dat()$jsm_dates,"</td></tr>",
ifelse(!is.na(pres_dat()$jsm_theme), paste0("<tr class='content_row'><td><b>Theme: </b></td><td>", pres_dat()$jsm_theme,"</td></tr>"),""),
"</table>"))
}
})
output$president_image <- renderUI({
if(!input$yearslider %in% missing_data_years){
imglink <- paste0("http://jsm2020.s3.amazonaws.com/images/",input$yearslider,".jpg")
tags$img(src = imglink, alt=input$presspeech, class="president_image")
}
})
output$president_table <- renderUI({
if(!input$yearslider %in% missing_data_years){
HTML(paste0("<table class='content_table'>",
"<tr class='content_row'><td style='min-width:120px'><b>Name: </b></td><td>", pres_dat()$president_name_forward,"</td></tr>",
"<tr class='content_row'><td><b>Affiliation: </b></td><td>", pres_dat()$affiliation,"</td></tr>",
"<tr class='content_row'><td><b>Pres #: </b></td><td>", pres_dat()$pres_number,"</td></tr>",
"<tr class='content_row'><td><b>Speech Title: </b></td><td>", pres_dat()$title,"</td></tr>",
"<tr class='content_row'><td><b>Speech Date: </b></td><td>", pres_dat()$date_of_address,"</td></tr>",
"</table>"))
}
})
output$location_image <- renderUI({
imglink <- location_dat()$image_url[[1]]
tags$img(src = imglink, style="border-radius:7px; width:100%")
})
output$location_map <- renderPlot(({
current_location <- location_dat()$location[[1]]
plot_location(locations, states_dat(), countries_dat(), current_location)
}))
output$image_attribution <- renderUI({
if(nrow(location_dat()) > 0){
HTML(paste0("<small class='text-muted'>Source: Flickr, Credit: <a href='", location_dat()$photographer_page,"'>",
location_dat()$image_credit,"</a>, License: ", location_dat()$image_license,"</small>"))
}
})
output$location_table <- renderUI({
if(nrow(location_dat()) > 0){
HTML(paste0("<table class='content_table'>",
"<tr class='content_row'><td style='min-width:120px'><b>Location: </b></td><td>", location_dat()$location,"</td></tr>",
"<tr class='content_row'><td><b>Times Hosting: </b></td><td>", location_dat()$num_hosting,"</td></tr>",
"<tr class='content_row'><td><b>Years Hosting: </b></td><td>", location_dat()$years,"</td></tr>",
"</table>"))
}
})
####################################
####### BY SPEECH FUNCTIONS ######
speech_text <- reactive({
allspeeches[allspeeches$yearpresident == input$presspeech, c("speechtext","category")]
})
speech_tdm <- reactive({
withProgress(message = 'Calculating Word Frequency', value = 0, {
make_tdm(speech_text())
})
})
speech_link_id <- reactive({
allspeeches$id[allspeeches$yearpresident == input$presspeech]
})
speech_year <- reactive({
allspeeches$year[allspeeches$yearpresident == input$presspeech]
})
speech_doi <- reactive({
allspeeches$doi[allspeeches$yearpresident == input$presspeech]
})
output$freqplot <- renderPlot({
withProgress(message = 'Building Most Used Terms Plot', value = 0, {
topncount(speech_tdm(), top=input$seltopn)
})
})
output$speechcloud <- renderPlot({
withProgress(message = 'Building Wordcloud', value = 0, {
suppressWarnings(cloud(speech_tdm(), words = input$selwords))
})
})
output$spsentplot <- renderPlot({
withProgress(message = 'Calculating Sentiment Analysis', value = 0, {
spsentgraph(speech_text()$speechtext[[1]])
})
})
output$sentcloud <- renderPlot({
withProgress(message = 'Building Sentiment Wordcloud', value = 0, {
sentiment <- allspeeches[allspeeches$yearpresident == input$presspeech, c("pos.words","neg.words")]
suppressWarnings(spsentcloud(sentiment, maxwords = input$selsentwords))
})
})
output$spcwords <- renderValueBox({
words <- allspeeches[allspeeches$yearpresident == input$presspeech, "tokens"][[1]]
valueBox(
value = formatC(words, digits = 0, format = "f"),
subtitle = "Number of Words",
icon = icon("book"),
color = if (words <= median(allspeeches$tokens, na.rm = T)) "green" else "red"
)
})
output$len_rank <- renderValueBox({
len_rank <- allspeeches[allspeeches$yearpresident == input$presspeech, "length_rank"][[1]]
valueBox(
value = formatC(len_rank, digits = 0, format = "f"),
subtitle = "Length Rank",
icon = icon("ruler"),
color = if (len_rank >= length(len_rank)/2) "green" else "red"
)
})
output$spcttr <- renderValueBox({
ttr <- allspeeches[allspeeches$yearpresident == input$presspeech, "ttr"][[1]]
valueBox(
value = formatC(ttr, digits = 3, format = "f"),
subtitle = "Type-Token Ratio (TTR)",
icon = icon("calculator"),
color = if (ttr <= median(allspeeches$ttr, na.rm=T)) "green" else "red"
)
})
output$spcfkgrade <- renderValueBox({
fkg <- allspeeches[allspeeches$yearpresident == input$presspeech, "fkage"][[1]]
valueBox(
value = formatC(fkg, digits = 1, format = "f"),
subtitle = "Flesch-Kincaid Age Score",
icon = icon("child"),
color = if (fkg <= median(allspeeches$fkage, na.rm = T)) "green" else "red"
)
})
output$presSpeechImage1 <- renderUI({
filename <- unique(allspeeches[allspeeches$yearpresident == input$presspeech, "year"])
imglink <- paste0("http://jsm2020.s3.amazonaws.com/images/",filename,".jpg")
tags$img(src = imglink, alt=input$presspeech, class="president_image")
})
output$pres_narrative <- renderUI({
HTML(speech_narrative(allspeeches, input$presspeech))
})
output$topic_models <- renderPlot({
topic_plot(topics, speech_link_id())
})
output$speechwordsincontext <- renderDataTable({
input$submitspeech
withProgress(message = 'Extracting Words in Context', value = 0, {
wordlist <- isolate(stringsplitter(input$speechword))
context(speech=speech_text()[ ,"speechtext"],wordlist)
})
}, options = list(
lengthMenu = list(c(10, 20, 30, -1), c('10', '20', '30', 'All')),
pageLength = 10))
output$newwords <- renderUI({
new_words <- entities[entities$id == speech_link_id(), "new_word"][[1]]
ents <- paste(sort(new_words), collapse = ", ")
HTML(ents)
})
output$newwordslabel <- renderText({
num_new_words <- length(entities[entities$id == speech_link_id(), "new_word"][[1]])
paste(num_new_words, "words/names never before used in an ASA address")
})
output$ent_people <- renderUI({
new_words <- entities[entities$id == speech_link_id(), "people"][[1]]
ents <- paste(sort(new_words), collapse = ", ")
HTML(ents)
})
output$ent_people_label <- renderText({
num_new_words <- length(entities[entities$id == speech_link_id(), "people"][[1]])
paste0("People (", num_new_words, ")")
})
output$ent_groups <- renderUI({
new_words <- entities[entities$id == speech_link_id(), "groups"][[1]]
ents <- paste(sort(new_words), collapse = ", ")
HTML(ents)
})
output$ent_groups_label <- renderText({
num_new_words <- length(entities[entities$id == speech_link_id(), "groups"][[1]])
paste0("Groups (", num_new_words, ")")
})
observeEvent(input$showspeech, {
showModal(modalDialog(
title = "ASA Presidential Address",
HTML(paste0("<iframe width='775' height='575' src='https://jsm2020.s3.amazonaws.com/pdfs/",
speech_year(), ".pdf'></iframe>")),
easyClose = TRUE,
footer = HTML(paste0("<small class='text-muted'>Source: <a href='",speech_doi(),
"' target='_blank' rel='nofollow'>",speech_doi(),"</a></small>"))
))
})
observeEvent(input$show_word_loadings, {
showModal(modalDialog(width=1000,
title = "Word/Topic Associations",
HTML(paste0("<p>The following plot shows the top 10 words associated with ",
"each topic. These associations were used to choose the generic ",
"topic label.","</p>")),
HTML("<img style='width:100%' src='word_topic_associations.png'>"),
easyClose = TRUE,
footer = NULL
))
})
#######################################
###### COMPARE SPEECHES FUNCTIONS #####
output$speechscatter <- renderPlotly({
xInSp <- switch(input$xvarspeech,
"Number of Words" = "tokens",
"Number of Unique Words"="types",
"Average Sentence Length"="avg.sentc.length",
"Average Word Length"="avg.word.length",
"Length Rank" = "length_rank",
"Type Token Ratio (TTR)" = "ttr",
"Flesch-Kincaid Grade" = "fkgrade",
"Flesch-Kincaid Age" = "fkage",
"Sentiment" = "polarity",
"Number of Unique Positive Words"="num_pos_words",
"Number of Unique Negative Words"="num_neg_words",
"Year" = "year",
"Topic Loading for Government" = "topic_1",
"Topic Loading for Economics" = "topic_2",
"Topic Loading for Science" = "topic_3",
"Topic Loading for Education" = "topic_4",
"Topic Loading for Technology" = "topic_5",
"Topic Loading for Health" = "topic_6",
"Topic Loading for Surveys" = "topic_7",
"Topic Loading for Profession" = "topic_8",
"Topic Loading for Business" = "topic_9"
)
yInSp <- switch(input$yvarspeech,
"Number of Words" = "tokens",
"Number of Unique Words"="types",
"Average Sentence Length"="avg.sentc.length",
"Average Word Length"="avg.word.length",
"Length Rank" = "length_rank",
"Type Token Ratio (TTR)" = "ttr",
"Flesch-Kincaid Grade" = "fkgrade",
"Flesch-Kincaid Age" = "fkage",
"Sentiment" = "polarity",
"Number of Unique Positive Words"="num_pos_words",
"Number of Unique Negative Words"="num_neg_words",
"Topic Loading for Government" = "topic_1",
"Topic Loading for Economics" = "topic_2",
"Topic Loading for Science" = "topic_3",
"Topic Loading for Education" = "topic_4",
"Topic Loading for Technology" = "topic_5",
"Topic Loading for Health" = "topic_6",
"Topic Loading for Surveys" = "topic_7",
"Topic Loading for Profession" = "topic_8",
"Topic Loading for Business" = "topic_9"
)
cInSp <- switch(input$cvarspeech,
"Affiliation" = "category",
"Gender"="gender",
"Era" = "period",
"Main Topic"="topic_label"
)
df <- allspeeches[-107 ,c("president","year","yearpresident","id","category","gender",
"tokens","types","ttr","fkgrade","fkage","avg.sentc.length","avg.word.length",
"polarity","num_neg_words","num_pos_words","period","length_rank")]
df <- left_join(df, topics) %>%
mutate(main_topic = paste0("topic_", main_topic)) %>%
left_join(topic_crosswalk, by=c("main_topic"="topic"))
g <- ggplot(df,aes_string(x=xInSp, y=yInSp)) +
geom_point(aes_string(color=cInSp,pch=cInSp, text="yearpresident"),size=2, alpha=0.5) +
labs(
x = input$xvarspeech,
y = input$yvarspeech,
color = input$cvarspeech,
pch= input$cvarspeech
) +
theme(plot.title = element_blank(),
plot.background = element_rect(fill = 'white', colour = 'white'),
panel.border = element_rect(fill = NA, color = 'white', size = 2),
panel.background = element_rect(fill = 'white', colour = 'white'),
panel.grid.major = element_line(colour = "grey79", size=.3, linetype = 3),
panel.grid.minor = element_blank(),
axis.text = element_text(size = 10, color="black", face="bold"),
axis.title = element_text(size = 12, face="bold", color = "black"),
axis.ticks = element_blank(),
axis.line = element_line(colour = "black", size=1),
legend.background = element_blank(),
legend.key = element_blank(),
legend.text = element_text(size = 10, color= "black"),
legend.title = element_text(size = 12,face="bold"),
legend.position = "right") +
guides(colour = guide_legend(override.aes = list(size=4)),
pch = guide_legend(override.aes = list(size=4)))
(gg <- ggplotly(g, tooltip = "text", height=550, width=850))
})
############################################
###### WORDS THROUGH TIME FUNCTIONS ##########
output$timeplot <- renderPlot({
input$submit
plotTimeplot(for_print=FALSE)
})
plotTimeplot <- function(for_print=FALSE) {
colorIn <- switch(input$time_colorvar,
"Affiliation" = "category",
"Gender"="gender",
"None"=FALSE,
)
withProgress(message = 'Building Word Through Time Plot', value = 0, {
wordlist <- isolate(stringsplitter(input$words))
words_over_time(df=allspeeches,words = wordlist, stop=TRUE, colorvar=colorIn,
leg=input$time_colorvar, scale=input$scale, forprint=for_print)
})
}
output$download_timeplot = downloadHandler(
filename = 'words_through_time.png',
content = function(file) {
device <- function(..., width, height) {
grDevices::png(..., width = 2500, height = 2200,
res = 200, units = "px")
}
ggsave(file, plot = plotTimeplot(for_print=TRUE), device = device)
})
output$topicplot <- renderPlot({
plotTopicplot(for_print=FALSE)
})
plotTopicplot <- function(for_print=FALSE) {
dataIn <- switch(input$select_topic,
"Government" = "topic_1",
"Economics" = "topic_2",
"Science" = "topic_3",
"Education" = "topic_4",
"Technology" = "topic_5",
"Health" = "topic_6",
"Surveys" = "topic_7",
"Profession" = "topic_8",
"Business" = "topic_9"
)
colorIn <- switch(input$topic_colorvar,
"Affiliation" = "category",
"Gender"="gender",
"None"=FALSE,
)
withProgress(message = 'Building Topics Through Time Plot', value = 0, {
wordlist <- isolate(stringsplitter(input$words))
topics_over_time(topic = dataIn, topic_title=input$select_topic, colorvar=colorIn,
legend_title=input$topic_colorvar, forprint=for_print)
})
}
output$download_topicplot = downloadHandler(
filename = 'topics_through_time.png',
content = function(file) {
device <- function(..., width, height) {
grDevices::png(..., width = 2500, height = 2200,
res = 200, units = "px")
}
ggsave(file, plot = plotTopicplot(for_print=TRUE), device = device)
})
output$decadetext <- renderUI({
HTML(paste0("<h1 class='decade_text small_margin'>", input$decadeslider, "s</h1>"))
})
decadetdm <- reactive({
make_tdm(allspeeches[allspeeches$decade == input$decadeslider & allspeeches$category != "Unknown",
c("speechtext","category")], collapse_var="category")
})
output$decadefreqplot <- renderPlot({
withProgress(message = 'Building Decade Frequency Plot', value = 0, {
topncount(decadetdm(), top=20)
})
})
output$decadecompcloud <- renderPlot({
withProgress(message = 'Building Decade Wordcloud', value = 0, {
suppressWarnings(compcloud(decadetdm(),max=450))
})
})
#######################################
}
|
75d7933fcce04ccc9179a2620ffa69cfdd48f7d0
|
0921a01e8b564edb12d217c0dfdba580f5f58964
|
/run_analysis.R
|
fc35d0dd7f9e21080b129cad222bb48e704285eb
|
[] |
no_license
|
hlopezo/Getting-and-Cleaning-Data-Course-Project
|
125f4c4ab5ad6cb3ed17425980402fe75720f986
|
d0620fb3009d29407b4ca9b581d0f1f359b22c6c
|
refs/heads/master
| 2022-11-26T06:29:40.529562
| 2020-07-13T20:24:53
| 2020-07-13T20:24:53
| 279,200,304
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,251
|
r
|
run_analysis.R
|
rm(list=ls())
#reading features and activity data
features <- read.table("C:/Users/Henry/Desktop/Henry/UCI HAR Dataset/Getting-and-Cleaning-Data-Course-Project/features.txt")
#View(features)
activities <- read.table("C:/Users/Henry/Desktop/Henry/UCI HAR Dataset/Getting-and-Cleaning-Data-Course-Project/activity_labels.txt")
#View(activities)
#reading train data
x_train <- read.table("C:/Users/Henry/Desktop/Henry/UCI HAR Dataset/Getting-and-Cleaning-Data-Course-Project/train/X_train.txt")
#View(x_train)
colnames(x_train) <- features$V2 #we put the names equal to second vector of features
y_train <- read.table("C:/Users/Henry/Desktop/Henry/UCI HAR Dataset/Getting-and-Cleaning-Data-Course-Project/train/y_train.txt")
#View(y_train)
#colnames(y_train)
x_train$activity <- y_train$V1
#colnames(x_train)
subject_train <- read.table("C:/Users/Henry/Desktop/Henry/UCI HAR Dataset/Getting-and-Cleaning-Data-Course-Project/train/subject_train.txt")
#View(subject_train)
x_train$subject <- factor(subject_train$V1)
###### test data
x_test <- read.table("C:/Users/Henry/Desktop/Henry/UCI HAR Dataset/Getting-and-Cleaning-Data-Course-Project/test/X_test.txt")
#View(x_test)
colnames(x_test) <- features$V2
y_test <- read.table("C:/Users/Henry/Desktop/Henry/UCI HAR Dataset/Getting-and-Cleaning-Data-Course-Project/test/y_test.txt")
x_test$activity <- y_test$V1
subject_test <- read.table("C:/Users/Henry/Desktop/Henry/UCI HAR Dataset/Getting-and-Cleaning-Data-Course-Project/test/subject_test.txt")
x_test$subject <- factor(subject_test$V1)
#merge train and test sets (STEP 1)
dataset <- rbind(x_test, x_train)
#filter column names (STEP 2)
#Extracts only the measurements on the mean and standard deviation for each measurement.
#install.packages("dplir")
library(dplyr)
colnames(dataset)
TidyData <- dataset %>% select(subject,activity,contains("mean"), contains("std"))
colnames(TidyData)
#Uses descriptive activity names to name the activities in the data set
#change the numver for the string activitie
TidyData$activity <- activities[TidyData$activity, 2]
names(TidyData) <- gsub("Acc", "Accelerometer", names(TidyData))
names(TidyData)<-gsub("Gyro", "Gyroscope", names(TidyData))
names(TidyData)<-gsub("BodyBody", "Body", names(TidyData))
names(TidyData)<-gsub("Mag", "Magnitude", names(TidyData))
names(TidyData)<-gsub("^t", "Time", names(TidyData))
names(TidyData)<-gsub("^f", "Frequency", names(TidyData))
names(TidyData)<-gsub("tBody", "TimeBody", names(TidyData))
names(TidyData)<-gsub("-mean()", "Mean", names(TidyData), ignore.case = TRUE)
names(TidyData)<-gsub("-std()", "STD", names(TidyData), ignore.case = TRUE)
names(TidyData)<-gsub("-freq()", "Frequency", names(TidyData), ignore.case = TRUE)
names(TidyData)<-gsub("angle", "Angle", names(TidyData))
names(TidyData)<-gsub("gravity", "Gravity", names(TidyData))
names(TidyData)
####Step5
#Step 5: From the data set in step 4, creates a second, independent tidy data set with the average of each variable for
#each activity and each subject.
Data <- TidyData %>%
group_by(subject,activity) %>%
arrange(activity) %>%
summarise_all(funs(mean))
names(Data)
write.table(Data, "FinalData.txt", row.name=FALSE)
|
091f91add47fbaddaef1d783def8bfdbfafcd206
|
2468dbf1813bee70de399dd77cd3d78d27ec5694
|
/Phase enrichment.R
|
cd2c4e206cde874d2ddc1f05da74ca4d2e99ae21
|
[] |
no_license
|
Nagel-lab/Heat_stress_translatome
|
43f7940349902eaea81ce3ac8852ed3e42e6ce65
|
51c3ef1ac0be15c133f6cb0e3c1f14abbc497750
|
refs/heads/main
| 2023-03-08T05:49:16.703830
| 2021-02-24T22:45:52
| 2021-02-24T22:45:52
| 342,018,308
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,316
|
r
|
Phase enrichment.R
|
# This script shows how to analyze the phase enrichment from a list of DRGs, as compared to circadian total mRNAs
results_TOT <- read.table("total_cycling.txt", header = T)
phases_TOT <- results_TOT[,c(1,5)]
names(phases_TOT) <- c("AGI","LAG")
# replace the phase 25.5 by 1.5 and 24 by 0 to avoid repeating the times phases 0 and 1.5
library(stringr)
phases_TOT$LAG <- str_replace_all(phases_TOT$LAG, '25.5', '1.5')
phases_TOT$LAG <- str_replace_all(phases_TOT$LAG, '24', '0')
ls.str(phases_TOT)
phases_TOT$LAG <- as.numeric(phases_TOT$LAG)
# count the number of transcript by phase in the reference
library(plyr)
library(dplyr)
phases_TOT_Freq <- count(phases_TOT, vars = "LAG")
names(phases_TOT_Freq) <- c("Phase","TOT")
# Count the number of transcript by phase in the uploaded list
# This requires to import the list of genes of interest prior this analysis
phases_upload <- merge.data.frame(upload,phases_TOT, by= "AGI")
phases_upload <- phases_upload[,c(1,2)]
phases_upload_Freq <- count(phases_upload, vars = "LAG")
names(phases_upload_Freq) <- c("Phase", "Upload")
# merge tables with the phase information
phases_summary <- Reduce(function(x,y) merge(x = x, y = y, by = "Phase"),
list(phases_TOT_Freq,phases_upload_Freq))
# calculate number of genes that do not have the phase (calculated line by line)
phases_summary$TOT.not <- (sum(phases_summary$TOT)-phases_summary$TOT)
phases_summary$Upload.not <- (sum(phases_summary$Upload)-phases_summary$Upload)
# calculate phase enrichment
phases_summary$TOT.Prop <- (phases_summary$TOT)/(sum(phases_summary$TOT))
phases_summary$Upload.Prop <- (phases_summary$Upload)/(sum(phases_summary$Upload))
phases_summary$Enrich <- (phases_summary$Upload.Prop)/(phases_summary$TOT.Prop)
# Chi-square test to calculate significant differences of enrichment
list.y.var <- unique(phases_summary$Phase)
Upload = list()
result = list()
for(i in list.y.var)
{
sub = phases_summary[phases_summary$Phase == i, c(2,4,3,5)]
names(sub) <- rep(c("N","non_N"),2)
sub = rbind(sub[,1:2],sub[,3:4])
sub = as.matrix(sub)
khi2 <- chisq.test(sub)
result[[i]] <- chisq.test(sub)
Upload[[i]] <- khi2$p.value
}
# create a table with the results
Chi_square <- as.matrix(Chi_square_phases_up)
|
49c67b325cb2e42f3694eb1ed2794af366d4056b
|
40c01ced1dd4fefa82825819258d07fc4f21e7f6
|
/man/ci2crit.Rd
|
b81857f1f83a273f942104b8fb3c894c99f7cb59
|
[
"MIT"
] |
permissive
|
jeksterslabds/jeksterslabRboot
|
dcfef61e25da41b124be53e9492d714af3932372
|
06b3dd3c2ac5ddb0c9791c13b1dfc719522dc6b0
|
refs/heads/master
| 2022-11-21T17:41:11.098857
| 2020-07-16T07:24:59
| 2020-07-16T07:24:59
| 276,638,775
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 3,205
|
rd
|
ci2crit.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/alpha2crit.R
\name{ci2crit}
\alias{ci2crit}
\title{Confidence Intervals to Critical Values}
\usage{
ci2crit(
ci = c(0.999, 0.99, 0.95),
dist = "z",
two.tailed = TRUE,
right.tail = TRUE,
...
)
}
\arguments{
\item{ci}{Numeric vector.
Confidence interval.
By default,
\code{ci} is set to conventional
confidence intervals
\code{ci = c(0.999, 0.99, 0.95)}.}
\item{dist}{Character string.
\code{dist = "z"} for the standard normal distribution.
\code{dist = "t"} for the t distribution.
\code{dist = "F"} for the F distribution.
\code{dist = "chisq"} for the chi-square distribution.}
\item{two.tailed}{Logical.
If \code{TRUE}, two-tailed alpha.
If \code{FALSE}, one-tailed alpha.
Ignored if \code{dist = "F"} or \code{dist = "chisq"}
as both tests are one-tailed using the right tail.}
\item{right.tail}{Logical.
If \code{TRUE}, right tail (positive critical value).
If \code{FALSE}, left tail (negative critical value).
Ignored if \code{two.tailed = TRUE}.
Ignored if \code{dist = "F"} or \code{dist = "chisq"}
as both tests are one-tailed using the right tail.}
\item{...}{Degrees of freedom.
\code{df} for \code{dist = "t"} and \code{dist = "chisq"}.
\code{df1} and \code{df2} for \code{dist = "F"}.}
}
\value{
Returns
\eqn{z}, \eqn{t}, \eqn{\chi^2}, or \eqn{F}
critical value/s
associated with
the specified confidence interval/s.
The results are sorted from smallest to largest.
}
\description{
Calculates the \eqn{z}, \eqn{t}, \eqn{\chi^2}, or \eqn{F}
critical value/s
of confidence limits
associated with the specified confidence interval/s.
}
\examples{
# z two-tailed
## vector
ci2crit(
ci = c(
0.999,
0.99,
0.95
),
)
## single numeric value
ci2crit(ci = 0.95)
# t two-tailed
## vector
ci2crit(
ci = c(
0.999,
0.99,
0.95
),
dist = "t",
df = 1000
)
## single numeric value
ci2crit(
ci = 0.95,
dist = "t",
df = 1000
)
# z one-tailed right
## vector
ci2crit(
ci = c(
0.999,
0.99,
0.95
),
dist = "z",
two.tailed = FALSE
)
# t one-tailed right
## vector
ci2crit(
ci = c(
0.999,
0.99,
0.95
),
dist = "t",
two.tailed = FALSE,
df = 5
)
# F one-tailed
## vector
ci2crit(
ci = c(
0.999,
0.99,
0.95
),
dist = "F",
two.tailed = FALSE,
df1 = 2,
df2 = 2
)
# chisq one-tailed
## vector
ci2crit(
ci = c(
0.999,
0.99,
0.95
),
dist = "chisq",
two.tailed = FALSE,
df = 1
)
# z one-tailed left
## vector
ci2crit(
ci = c(
0.999,
0.99,
0.95
),
dist = "z",
two.tailed = FALSE,
right.tail = FALSE
)
# t one-tailed left
## vector
ci2crit(
ci = c(
0.999,
0.99,
0.95
),
dist = "t",
two.tailed = FALSE,
right.tail = FALSE,
df = 5
)
}
\references{
\href{https://en.wikipedia.org/wiki/Statistical_significance}{Wikipedia: Statistical significance}
\href{https://en.wikipedia.org/wiki/Confidence_interval}{Wikipedia: Confidence interval}
}
\seealso{
Other alpha functions:
\code{\link{alpha2crit}()},
\code{\link{alpha2prob}()},
\code{\link{ci2prob}()},
\code{\link{nhstplot}()}
}
\author{
Ivan Jacob Agaloos Pesigan
}
\concept{alpha functions}
\keyword{alpha}
|
da228b948b6546ba6d44a229dd8c7315a52fd8c6
|
4cb5426e8432d4af8f6997c420520ffb29cefd3e
|
/P74.R
|
750d243b3bd914c49549308cb3b8af61f9d9edaf
|
[
"CC0-1.0"
] |
permissive
|
boyland-pf/MorpheusData
|
8e00e43573fc6a05ef37f4bfe82eee03bef8bc6f
|
10dfe4cd91ace1b26e93235bf9644b931233c497
|
refs/heads/master
| 2021-10-23T03:47:35.315995
| 2019-03-14T21:30:03
| 2019-03-14T21:30:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,608
|
r
|
P74.R
|
# making table data sets
library(dplyr)
library(tidyr)
library(MorpheusData)
#############benchmark 1
#How to solve this could be our future work.
dat <- read.table(text=
"ID MGW.one MGW.two HEL.one HEL.two
A 10.00 19 12 13.00
B -13.29 13 12 -0.12
C -6.95 10 15 4.00
", header=T)
#dat <- read.table(text=
#"ID MGW.one MGW.two HEL.mean
#A 10.00 19.00 19
#B -13.29 13.00 13
#C -6.95 10.00 10
#", header=T)
write.csv(dat, "data-raw/p74_input1.csv", row.names=FALSE)
df_out = dat %>% gather(key, value, -`ID`) %>%
separate(key, into = c("label", "num")) %>%
group_by(ID, label) %>%
summarise(mean = mean(value)) %>%
spread(label, mean)
# df_out = dat %>% select(-`HEL.mean`) %>% gather(key, value, -`ID`) %>%
# group_by(ID) %>%
# summarise(mean = mean(value))
write.csv(df_out, "data-raw/p74_output1.csv", row.names=FALSE)
p74_output1 <- read.csv("data-raw/p74_output1.csv", check.names = FALSE)
fctr.cols <- sapply(p74_output1, is.factor)
int.cols <- sapply(p74_output1, is.integer)
p74_output1[, fctr.cols] <- sapply(p74_output1[, fctr.cols], as.character)
p74_output1[, int.cols] <- sapply(p74_output1[, int.cols], as.numeric)
save(p74_output1, file = "data/p74_output1.rdata")
p74_input1 <- read.csv("data-raw/p74_input1.csv", check.names = FALSE)
fctr.cols <- sapply(p74_input1, is.factor)
int.cols <- sapply(p74_input1, is.integer)
p74_input1[, fctr.cols] <- sapply(p74_input1[, fctr.cols], as.character)
p74_input1[, int.cols] <- sapply(p74_input1[, int.cols], as.numeric)
save(p74_input1, file = "data/p74_input1.rdata")
|
0578cb9b17f5d661d0fe42c3225f311c4a8b27bc
|
8824061cab2431fb2421dd20675ecd02f1900313
|
/AIPS-PCA.R
|
b50b41067b7f2b2cf06e09a682d6e7b2568fdf44
|
[] |
no_license
|
biomedicaldatascience/AIPS
|
20e1f732eeb56941a3188196fec7c5b5c49c81b8
|
080b59e16d66e835688606e786113268a75fd15e
|
refs/heads/master
| 2021-01-03T11:54:21.618166
| 2020-02-12T18:46:48
| 2020-02-12T18:46:48
| 240,072,812
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,898
|
r
|
AIPS-PCA.R
|
#AIPS.pc computes PC scores using function either "eigen" or "svd".
AIPS.pc <- function(infile, K=NULL, method="eigen",outplot) {
#read merged data coded with Additive Components
#When merging data in Plink, the data with Ancestry Informtive Markers(AIMs) should be in the first part of the merged data.
#If the data with AIMs is different part, please modify the script by ancestry index.
plink.dta <- read.table(infile, header =TRUE)
X<-plink.dta[,7:length(plink.dta)];
p<-length(plink.dta)-6;p;#number of markers
n<-nrow(plink.dta);n;#number of sample size
x_mean <- rep(NA, p)
x_sd <- as.vector(rep(NA, p) )
for(j in 1:p){
x_mean[j] <- mean(X[,j], na.rm=TRUE)
x_sd[j] <- sd(X[,j], na.rm=TRUE)
replace(X[,j], is.na(X[,j]) , x_mean[j]) -> X[,j] # replace mean if there is missing genotype value.
}
adj.std <- cbind(x_mean,x_sd)
vec1_sample <- as.vector(rep(1,n))
vec1_snps <- as.vector(rep(1,p))
X <- as.matrix(X)
# Standardize the data
X_sig <- vec1_sample%*% t(x_sd)
X_std <- (X - vec1_sample %*% t(x_mean))/X_sig;
## Compare n and p (n<p or n>=p) and compute the eigenvectors and scores
####on larger number of samples than markers;
#Choose the method from either "eigen" or "svd"
if(n >= p & method=="eigen"){
r <-eigen(cov(X_std))
evec <- r$vectors
eval <- r$values
sdev <- sqrt(eval)
if (is.null(K)) {
scores <- X_std%*%evec
}
else {
scores <- X_std%*%evec[,1:K]
}
}
if(n >= p & method=="svd"){
r <- svd(X_std)
evec <- r$v
sdev <- r$d/sqrt(p-1)
eval <- sdev*sdev
if (is.null(K)) {
scores <- X_std%*%evec
}
else {
scores <- X_std%*%evec[,1:K]
}
}
#on larger number of markers(n<p);
#Choose the method from either "eigen" or "svd"
if(n < p & method=="eigen"){
r <- eigen(cov(t(X_std)))
eval <- r$values
evec <- t(X_std)%*%r$vectors
if (is.null(K)) {
scores <- X_std%*%evec
}
else {
scores <- X_std%*%evec[,1:K]
}
}
if(n < p & method=="svd"){
r <- svd(t(X_std))
evec <- t(X_std)%*%r$v
sdev <- r$d/sqrt(p-1)
eval <- sdev*sdev
if (is.null(K)) {
scores <- X_std%*%evec
}
else {
scores <- X_std%*%evec[,1:K]
}
}
score0 <- cbind(plink.dta[,1:2],scores)
# to generate plot to check eigenvalues
png(outplot)
plot(c(1:10),eval[1:10],xaxt="n",type="o",xlab="Order of Eigenvalues",ylab="Eigenvalues")
title(main="Plot of Top Eigenvalues", col.main="black", font.main=1)
dev.off()
#Return the results as a list and save
list (snp.weights=evec, eval=eval,pcs.discovery=score0,adj.discovery=adj.std)
}
W <- AIPS.pc(infile="outputA.raw", K=10, method="svd",outplot="output.png")
score0 <- W$pcs.discovery
eigenvalues <- W$eval
save(score0,eigenvalues,file="pca.euro.RData")
|
9daff7661301911300e3c79318f60a8d24fc42db
|
b4007c30747e4213f7540294c59c77b8f72e3ab8
|
/E3_script_two_conf.R
|
6e1ebdf7af1496f1f706b443ef39ab75e6a1c739
|
[] |
no_license
|
noraracht/kraken_scripts
|
412245cee06e434ba3667b22a638a8b1aff2c46d
|
b43ef75eb2d0c7dfae06930ab64a838e5a0764f2
|
refs/heads/master
| 2020-07-06T17:01:57.971381
| 2020-04-13T15:27:48
| 2020-04-13T15:27:48
| 203,085,698
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,033
|
r
|
E3_script_two_conf.R
|
require(ggplot2); require(scales); require(reshape2)
getwd()
setwd("/Users/admin/Documents/Skimming/tree_of_life/dros_contam_test")
d= read.csv('Drosophila_contam_both_species3.csv')
print (d)
dm = (melt(d[,c(1,2,grep(pattern = "*Dros*", names(d)))],id.vars = 1:2))
dm$Pair=""
dm[grep("sim_WXD1",dm$variable),"Pair"] = "Small"
dm[grep("sech_plant",dm$variable),"Pair"] = "Medium"
dm[grep("yakuba",dm$variable),"Pair"] = "Large"
dm$Kraken = grepl ("*ucseq*", dm$variable)
dm$k = 35
dm[grep (".1$", dm$variable),"k"] = 32
dm[grep (".2$", dm$variable),"k"] = 29
dm[grep (".3$", dm$variable),"k"] = 28
dm[!dm$Kraken,"k"] = "None"
names(dm)
print (dm)
dm = (merge(dm, dm[!dm$Kraken & dm$cl == 0 & dm$bin == "00-00",c(5,4)], by.x = "Pair", by.y = "Pair"))
print (dm)
names(dm)[8] = "D"
names(dm)[5] = "est"
dm$error = with(dm, (est-D)/D)
print (dm)
write.csv(dm,"/Users/admin/Documents/Skimming/tree_of_life/dros_contam_test/Drosophila_contam_both_species_formatted.csv", row.names = FALSE)
#This code should be used on formatted input.
install.packages("readxl")
library(readxl)
dm= read_excel('Drosophila_contam_both_species_formatted_withAlpha.xls')
print(dm)
qplot(cl/100,error,data=dm[dm$k != 29,],linetype=as.factor(confidence),shape=bin,color=Kraken)+geom_line()+
facet_grid(D~k,scales="free_y")+
scale_y_continuous(labels=percent,name="Relative error in Skmer distance")+scale_x_continuous(labels=percent,name=expression("Contamination level"~c[l]))+
scale_color_brewer(name="Filtering", palette = "Set2")+
scale_shape_manual(name="M",values=c(5,3,4,15))+
scale_linetype_manual(name="Filtering",values=c(1,2,3))+
theme_classic() +theme(panel.border = element_rect(fill=NA,size = 1), legend.position = c(.449,.91),legend.direction = "horizontal",panel.grid.major.y = element_line(linetype = 1,size=0.24,color="gray"))
qplot(cl/100,(error),data=dm[dm$k != 29,],linetype=as.factor(confidence),color=k,shape=k)+geom_line()+
facet_grid(percent(round(D,3))~bin,scales="free",space = "free_x")+
scale_y_continuous(name="Relative error in Skmer distance", labels = percent)+ #,breaks=c(-sqrt(0.05),sqrt(c(0,0.05,0.2,0.5,1,2,5))),labels=function(x) percent(sign(x)*x^2))+
scale_x_continuous(labels=scales::percent_format(accuracy = 1),name=expression("Contamination level"~c[l]),breaks=c(0,0.05,.1,0.2,0.4,0.6))+
scale_color_manual(values=c("#fecc5c","#fd8d3c","#e31a1c","black"))+
scale_linetype_manual(name="confidence",values=c(1,2,3),labels=c("0.00","0.05","None")) +
scale_shape_manual(values=c(0,2,6,19))+
theme_classic() +theme(panel.border = element_rect(fill=NA,size = 1), legend.position = "bottom",panel.grid.major.y = element_line(linetype = 1,size=0.24,color="gray"))
ggsave("E2_with_conf.pdf",width = 8,height = 7)
qplot(cl/100,sign(error)*sqrt(abs(error)),data=dm[dm$k != 29,],linetype=as.factor(confidence),color=k,shape=k)+geom_line()+
facet_grid(percent(round(D,3))~bin,scales="free",space = "free_x")+
scale_y_continuous(name="Relative error in Skmer distance",breaks=c(-sqrt(0.05),sqrt(c(0,0.05,0.2,0.5,1,2,5))),labels=function(x) percent(sign(x)*x^2))+
scale_x_continuous(labels=scales::percent_format(accuracy = 1),name=expression("Contamination level"~c[l]),breaks=c(0,0.05,.1,0.2,0.4,0.6))+
scale_color_manual(values=c("#fecc5c","#fd8d3c","#e31a1c","black"))+
scale_linetype_manual(name="confidence",values=c(1,2,3),labels=c("0.00","0.05","None"))+scale_shape_manual(values=c(0,2,6,19))+
theme_classic() +theme(panel.border = element_rect(fill=NA,size = 1), legend.position = "bottom",panel.grid.major.y = element_line(linetype = 1,size=0.2,color="gray"))
ggsave("E2-sqrt_with_conf.pdf",width = 8,height = 7)
ds = (read.csv("drosophilaskims.csv"))
names(ds)
qplot(abs(bk_no_clean_up-fna_dist)/fna_dist-abs(ak_no_clean_up-fna_dist)/fna_dist,data=ds,binwidth=0.02)
qplot(abs(bk_cleaned-fna_dist)/fna_dist-abs(ak_cleaned-fna_dist)/fna_dist,data=ds,binwidth=0.02)
qplot(fna_dist,abs(bk_cleaned-fna_dist)/fna_dist-abs(ak_cleaned-fna_dist)/fna_dist,data=ds)
ggplot(aes(x=fna_dist,y=abs(bk_cleaned-fna_dist)/fna_dist-abs(ak_cleaned-fna_dist)/fna_dist),data=ds)+
#geom_violin(aes(group=cut(ds$fna_dist,breaks=c(0,0.03,0.08,0.11,0.126,0.145,0.2))),scale="width")+
geom_point(color="blue")+geom_smooth(se=F,method="lm",color="red")+
theme_light()+
scale_y_continuous(labels=percent,name="Decrease in relative error after Kraken")+
scale_x_continuous(name=("D"),labels=percent)+
facet_wrap(~s1)
ggplot(aes(x=abs(bk_cleaned-fna_dist)/fna_dist,y=abs(bk_cleaned-fna_dist)/fna_dist-abs(ak_cleaned-fna_dist)/fna_dist),data=ds)+
#geom_violin(aes(group=cut(ds$fna_dist,breaks=c(0,0.03,0.08,0.11,0.126,0.145,0.2))),scale="width")+
geom_point(color="blue")+geom_smooth(se=F,method="lm",color="red")+
theme_light()+
scale_y_continuous(labels=percent,name="Decrease in relative error after Kraken")+
scale_x_continuous(name=("D"),labels=percent)#+
facet_wrap(~s1)
qplot(abs(bk_no_clean_up-fna_dist)/fna_dist-abs(bk_cleaned-fna_dist)/fna_dist,data=ds)
|
c42985b88f3024cc80db1ba97403ad458734eede
|
f853610d699f1a4e815cd3ea52aedd188f695d30
|
/plot3.R
|
49a34f1747995152a38fcd3429b8173041ed3ef9
|
[] |
no_license
|
jysmith/ExData_Plotting1
|
4cda8048e225ec5ec506b76eec346d574d88a9ba
|
c1b0d2c4e5acdbf54cb7385647ab5a078704a1ed
|
refs/heads/master
| 2021-01-18T11:19:38.270733
| 2015-02-08T17:03:35
| 2015-02-08T17:03:35
| 30,498,146
| 0
| 0
| null | 2015-02-08T17:00:32
| 2015-02-08T17:00:32
| null |
UTF-8
|
R
| false
| false
| 895
|
r
|
plot3.R
|
# Read data file and extract the part for 2007-02-01 and 2007-02-02
data_raw <- read.table("household_power_consumption.txt", header=TRUE,
sep=";", na.strings="?", nrows=2080000,
colClasses=c(rep("character",2), rep("numeric",7)) )
data <- data_raw[data_raw$Date == "1/2/2007" | data_raw$Date == "2/2/2007",]
rm(data_raw) # clear large data frame
# Convert strings to Date/Time class
time <- strptime( paste(data$Date, data$Time) , "%d/%m/%Y %T" )
# Create plot3
png(filename="plot3.png", width=480, height=480)
plot(time, data$Sub_metering_1, type="l",
xlab="", ylab="Energy sub metering")
points(time, data$Sub_metering_2, type="l", col="red")
points(time, data$Sub_metering_3, type="l", col="blue")
legend("topright", lty=1, col=c("black","red","blue"),
legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"))
dev.off()
|
e08a7627b9f47ca7eb40099e4af5a832be0c7ebe
|
62d26b0055a5d5ec25adc12c4f628e97a5fc50c5
|
/R/py_int/covid_run.R
|
33d238734af48b1c2745337c7cf7e2b1fa33d8aa
|
[
"MIT"
] |
permissive
|
Urban-Analytics/RAMP-UA
|
8eae83ed06145ab9d6695045130d20e01100b0ca
|
ae5f26d6c5c9e03bb0902078f8ada316c766290e
|
refs/heads/master
| 2023-07-25T00:06:57.050746
| 2022-11-08T09:55:55
| 2022-11-08T09:55:55
| 259,974,353
| 12
| 11
|
MIT
| 2023-07-06T22:15:36
| 2020-04-29T16:05:13
|
Jupyter Notebook
|
UTF-8
|
R
| false
| false
| 14,285
|
r
|
covid_run.R
|
####################################################################################################################
####################################################################################################################
####################################################################################################################
######################## R counterpart to the microsim/main.py spatial interaction model. ########################
######################## This code converts 'risk' received from microsim/main.py into a ########################
######################## probability of being infected with COVID and then uses a Bernoulli ########################
######################## draw to assign COVID cases. This code also assigns timings to each ########################
######################## disease stage (SEIR) for each individual; and a mortality risk to ########################
######################## each individual based on age and health variables. ########################
####################################################################################################################
####################################################################################################################
####################################################################################################################
#############################################################
################# Loading packages and data #################
#############################################################
load_rpackages <- function() {
list.of.packages <- c("rampuaR")
new.packages <- list.of.packages[!(list.of.packages %in% installed.packages()[,"Package"])]
if(length(new.packages)) devtools::install_github("Urban-Analytics/rampuaR", dependencies = F)
library(rvcheck)
rampr_version <- check_github("Urban-Analytics/rampuaR")
if(!rampr_version$up_to_date) devtools::install_github("Urban-Analytics/rampuaR", dependencies = F) #If there is a newer version of rampuaR package available this will download it
library(tidyr)
library(readr)
library(mixdist)
library(dplyr)
library(rampuaR) #Contains all the key functions used below.
library(withr)
}
load_init_data <- function() {
#both msoas and gam_cases are stored in the rampuaR package
data(gam_cases) #Estimated number of infections by day, based on smoothed PHE data. In the first outbreak PHE data estimated to only make up about 5% of total cases (NHS pers comms), so we amplify PHE cases by 20 and smooth.
data(msoas) #List of Devon MSOAs and their risk level, cases will be seeded in High risk MSOAS. High risk MSOAs are those which are well connected by train and plane, high pop density and rich
w <<- NULL
model_cases <<- NULL
}
initialize_r <- function() {
load_rpackages()
load_init_data()
}
run_status <- function(pop,
timestep = 1,
rep = NULL,
current_risk_beta = 0.008,
risk_cap = NA,
seed_days = 10,
exposed_dist = "weibull",
exposed_mean = 2.56,
exposed_sd = 0.72,
presymp_dist = "weibull",
presymp_mean = 2.3,
presymp_sd = 0.35,
infection_dist = "lognormal",
infection_mean = 18,
infection_sd = 1.1,
output_switch = TRUE,
rank_assign = FALSE,
local_outbreak_timestep = 0,
local_outbreak = FALSE,
msoa_infect="E02004152",
number_people_local=100,
local_prob_increase=0.75,
overweight_sympt_mplier = 1.46,
overweight = 1,
obesity_30 = 1,
obesity_35 = 1.4,
obesity_40 = 1.9,
cvd = 1,
diabetes = 1,
bloodpressure = 1,
improve_health = FALSE,
set_seed = TRUE) {
# option to set the seed of the model run, but resolved to be too complicated to actually set the seed due to different pathways each indidividual can take. Redundant at this stage
if(set_seed == TRUE) {
seed <- rep
set.seed(seed)
} else {
seed <- NULL
}
print(paste0("the seed number is ",seed))
seed_cases <- ifelse(seed_days > 0, TRUE, FALSE)
print(paste("R timestep:", timestep))
## Creating a temp directory to store the output.
if(timestep==1) {
# windows does not allow colons in folder names so substitute sys.time() to hyphen
tmp.dir <<- paste0('R/py_int', "/output/", gsub(":","-", gsub(" ","-",Sys.time())))
if(!dir.exists(tmp.dir)){
dir.create(tmp.dir, recursive = TRUE)
}
}
#If true the population gets switched to a healthier version, according to BMI. There are three obese classes,
# Obese I, Obese II and Obese III. In the healthier population, individuals in these classes move down a level,
# e.g. Obese III becomes Obese II etc. Obese I move to Overweight class. Currently obesity is associated with
# increased mortality risk, so we expect fewer deaths in a healthier population.
if(improve_health == TRUE){
pop$BMIvg6 <- pop$BMI_healthier
}
if(output_switch){write.csv(pop, paste0( tmp.dir,"/daily_", timestep, ".csv"))} # Saves a copy of the population at the start of each day - quite heavy but good for understanding what is going.
### Function for formatting the data for use in the rest of the code. There is much more data available for each individual, I think this needs to be passed in in the python code.
df_cr_in <- rampuaR::create_input(micro_sim_pop = pop,
vars = c("area", # must match columns in the population data.frame
"house_id",
"id",
"current_risk",
"BMIvg6",
"cvd",
"diabetes",
"bloodpressure"))
### Calculating the mortality risk for each individual. Mortality risk is essentially based on age (which is baked in in the rampuaR::mortality_risk() function.
### Values here essentially multiply this mortality risk, so obesity_40 = 1.48 meaning that individuals with a BMI > 40 would have their mortality risk increased by 48%.
### This would mean older and BMI > 40 individuals have the highest mortality risk.
df_msoa <- rampuaR::mortality_risk(df = df_cr_in,
obesity_40 = obesity_40,
obesity_35 = obesity_35,
obesity_30 = obesity_30,
overweight = overweight,
cvd = cvd,
diabetes = diabetes,
bloodpressure = bloodpressure)
### Similarly to above, some individuals are more likely to be symptomatic if infected. This is based on age which is baked into the rampuaR::sympt_risk() function.
### Older individuals are more likely to be symptomatic. Values here multiply that risk, e.g. overweight individuals are 46% more likely to be symptomatic.
df_msoa <- rampuaR::sympt_risk(df = df_msoa,
overweight_sympt_mplier = 1.46,
cvd = NULL,
diabetes = NULL,
bloodpressure = NULL)
### On the first day we seed cases in high risk MSOAS in individuals which spend at least 30% of their time outside their home.
if(timestep==1){
msoas <- msoas[msoas$risk == "High",]
pop_hr <- pop %>% filter(area %in% msoas$area & pnothome > 0.3)
seeds <- withr::with_seed(seed, sample(1:nrow(pop_hr), size = gam_cases[timestep]))
seeds_id <- pop_hr$id[seeds]
df_msoa$new_status[df_msoa$id %in% seeds_id] <- 1 #to be exposed their 'new_status' is changed to 1 (from 0)
print("First day seeded")
}
### Previously there were other factors that affected a persons 'betas' or their risk of being infected, such as age and gender.
#Now we keep it purely down to activities in the spatial interaction model - current_risk.
other_betas <- list(current_risk = current_risk_beta)
### This just sums up the betas, was more important when there were other factors and an intercept, which we no longer use.
### We also toyed with capping the risk as we were previously getting some very large values due to the way workplaces were set up.
### But we tend to not use this cap anymore.
df_sum_betas <- rampuaR::sum_betas(df = df_msoa,
betas = other_betas,
risk_cap_val = risk_cap)
print("betas calculated")
# df_prob <- covid_prob(df = df_sum_betas)
### This converts the summed up betas (aka the current_risk) into a probability of being infected, so has to be a value between 0-1.
df_prob <- rampuaR::infection_prob(df = df_sum_betas, dt = 1)
print("probabilities calculated")
### An optional function for implementing a local outbreak of a given size in a given MSOA, at a given timestep.
if(local_outbreak == TRUE & timestep == local_outbreak_timestep){
print("Local outbreak - super spreader event!")
df_prob <- rampuaR::local_outbreak(df=df_prob,
msoa_infect=msoa_infect,
number_people=number_people_local,
risk_prob=local_prob_increase)
}
### In this stage we carry out a bernoulli draw for each susceptible individual,
### with their infection probability giving them a probability of being infected with COVID.
### After this function a given number of people will have their new_status set to 1, indicating they are infected with COVID and are in the exposed stage.
if(timestep > 1){
df_ass <- rampuaR::case_assign(df = df_prob,
tmp.dir=tmp.dir,
save_output = output_switch,
seed = seed)
} else {
df_ass <- df_prob
}
### just some print out sanity checks
print("cases assigned")
print(paste0("PHE cases ", gam_cases[timestep]))
model_cases[timestep] <- (sum(df_prob$new_status == 0) - sum(df_ass$new_status == 0))
print(paste0("model cases ", model_cases[timestep]))
print(paste0("Adjusted PHE cases ", gam_cases[timestep]))
#### For seeding cases after day 1 we rank individuals by the amount of risk they have and assign a given number of cases (from gam_cases) based on this.
#### We do it this way so cases don't just stay in the high risk MSOAs for the seeding period.
if(timestep > 1 & timestep <= seed_days & seed_cases == TRUE){
df_ass <- rank_assign(df = df_prob, daily_case = gam_cases[timestep], seed = seed)
print(paste0((sum(df_prob$new_status == 0) - sum(df_ass$new_status == 0))," cases reassigned"))
}
#### It is possible to rank assign cases for the whole model (rather than bernoulli trial approach) but this isn't really what we want,
#### these few lines are redundant.
if((rank_assign == TRUE & seed_cases == FALSE) | (rank_assign == TRUE & seed_cases == TRUE & timestep > seed_days)){
if(timestep > 1 & (w[timestep] <= 0.9 | w[timestep] >= 1.1)){
df_ass <- rank_assign(df = df_prob, daily_case = gam_cases[timestep],seed = seed)
print(paste0((sum(df_prob$new_status == 0) - sum(df_ass$new_status == 0))," cases reassigned"))
}
}
#### This function assigns newly infected individuals the number of days they will be in each disease status.
#### The shape of the distributions these time periods are taken from can be altered with this function
df_inf <- rampuaR::infection_length(df = df_ass,
exposed_dist = exposed_dist,
exposed_mean = exposed_mean,
exposed_sd = exposed_sd,
presymp_dist = presymp_dist,
presymp_mean = presymp_mean,
presymp_sd = presymp_sd,
infection_dist = infection_dist,
infection_mean = infection_mean,
infection_sd = infection_sd,
seed = seed)
print("infection and recovery lengths assigned")
#### In this functions, individuals which have reached the end of their
#### symptomatic duration will either recover or die based on their mortality risk, through another bernoulli draw.
df_rem <- rampuaR::removed_age(df_inf, seed = seed)
print("individuals removed")
#### Here the duration of the current disease status is reduced by one day/ individuals move on to their next disease stage.
df_rec <- rampuaR::recalc_sympdays(df_rem)
print("updating infection lengths")
df_msoa <- df_rec #area_cov(df = df_rec, area = area, hid = hid)
#### The output is formattted for passing back into the python spatial interaction model.
df_out <- data.frame(area=df_msoa$area,
ID=df_msoa$id,
house_id=df_msoa$house_id,
disease_status=df_msoa$new_status,
exposed_days = df_msoa$exposed_days,
presymp_days = df_msoa$presymp_days,
symp_days = df_msoa$symp_days)
#if(output_switch){write.csv(df_out, paste0(tmp.dir, "/daily_out_", timestep, ".csv"))}
return(df_out)
}
|
ecceafc3217cc259f9e17d8ab9e461f75c708bb5
|
90b33d58d125d9a561411e1256c60daf45563ca0
|
/man/optimalSD.Rd
|
c711f85ac1317d739e73110e49b3ebb76070a2da
|
[] |
no_license
|
edzer/sensors4plumes
|
e78d336ae390c8bcc993561b5e1eb2a4eb7cd40e
|
a834b2194b0c75be0d74bb27c94f3459d5f3dbb1
|
refs/heads/master
| 2020-05-24T02:10:33.850671
| 2017-03-28T14:33:48
| 2017-03-28T14:33:48
| 84,813,077
| 3
| 0
| null | 2017-03-13T10:20:28
| 2017-03-13T10:20:28
| null |
UTF-8
|
R
| false
| false
| 712
|
rd
|
optimalSD.Rd
|
\name{optimalSD}
\alias{SDgenetic}
\alias{SDglobal}
\alias{SDgreedy}
\alias{SDmanual}
\alias{SDssa}
\docType{data}
\title{
Optimised sampling designs
}
\description{
For each of the optimisation algorithms a resulting sampling design is provided. These are taken from the examples of the respective cost functions.
}
\usage{
data(SDgenetic)
data(SDglobal)
data(SDgreedy)
data(SDmanual)
data(SDssa)
}
\format{
\code{SDgenetic}: \code{list} \cr
\code{SDglobal}: \code{list} \cr
\code{SDgreedy}: \code{list} \cr
\code{SDmanual}: \code{list} \cr
\code{SDssa}: \code{list} \cr
}
\author{ Kristina B. Helle, \email{kristina.helle@uni-muenster.de} }
\examples{
data(SDgenetic)
}
\keyword{datasets}
|
c210c66e9cdece4c85251001c813d50d323fce4c
|
00741d47c446fbe1f0163732b59be757d64d2298
|
/Script/evaluation/ARI/ARI_utils/conclude_ARISampled_dat6.R
|
3903226c184c7a460446831fa48f93c540101e1b
|
[] |
no_license
|
JinmiaoChenLab/Batch-effect-removal-benchmarking
|
aebd54fda05eb9e8ba21afcd11c5d10158dfaec5
|
60d52c29e29b7849b1505167da572165cc5d5b82
|
refs/heads/master
| 2022-03-11T17:47:01.250098
| 2022-01-24T02:50:57
| 2022-01-24T02:50:57
| 206,039,306
| 64
| 51
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,060
|
r
|
conclude_ARISampled_dat6.R
|
# Author : Nicole Lee
# Date : 29/08/2019
# Purpose: Third function to be called in ARI pipeline
# Following calculation of ARI scores for all batch-
# correction methods, this function is used to
# produce F1 score based on normalised ARI scores
# Returns a CSV file containing median and F1 scores
# for all batch-correction methods
# (Specific script for Dataset 6)
conclude_ARISampled_6 <- function(dir_this, plot_title){
library(ggplot2)
setwd(dir_this)
filesneed<-list.files(pattern = "ARISampled_")
method<-vector()
medianARIbatch<-vector()
medianARIcelltype<-vector()
for (x in 1:length(filesneed)){
temp<-read.table(filesneed[x], header = TRUE, stringsAsFactors = FALSE)
method[x]<-as.character(temp$use_case[1])
medianARIbatch[x]<-temp$ari_batchfirst[21]
medianARIcelltype[x]<-temp$ari_celltype[21]
rm(temp)
}
# normalise values to 0 - 1
min_batch <- min(medianARIbatch)
max_batch <- max(medianARIbatch)
min_cell <- min(medianARIcelltype)
max_cell <- max(medianARIcelltype)
medianARIbatch_norm <- (medianARIbatch-min_batch)/(max_batch-min_batch)
medianARIcelltype_norm <- (medianARIcelltype-min_cell)/(max_cell-min_cell)
# produce final fscore ARI, similar to scMerge paper
medianfscoreARI <- (2 * (1 - medianARIbatch_norm)*(medianARIcelltype_norm))/
(1 - medianARIbatch_norm + medianARIcelltype_norm)
sum_xy<-medianARIcelltype_norm+(1-medianARIbatch_norm)
finaldf<-data.frame("ARIMethod" = method,
"ARIbatchMedian" = medianARIbatch,
"ARIbatchMedian_norm" = medianARIbatch_norm,
"ARIcelltypeMedian" = medianARIcelltype,
"ARIcelltypeMedian_norm" = medianARIcelltype_norm,
"ARI_fscore" = medianfscoreARI,
"ARI_summedXY" = sum_xy)
write.csv(finaldf, file = paste0("ARI_Sampled", plot_title, "_allmethod.csv"), row.names = FALSE)
return(finaldf)
}
|
36c97ce47e42b600246c45e9e6f95f180b00612a
|
eaf6d592d069f12a673f0cb63c97f2e271ad4b03
|
/man/split_replace_raster.Rd
|
736c29f1280b4ad0ca2a76f7df8c1dcbe5e20fb9
|
[] |
no_license
|
inder-tg/geoTS
|
bcaecf12928bd535eb4066e92febf7d7b7140aa7
|
0cd98b78556e876692247722a16133d15e1ab06b
|
refs/heads/master
| 2022-07-26T09:58:59.697717
| 2022-07-20T14:23:10
| 2022-07-20T14:23:10
| 185,881,619
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,390
|
rd
|
split_replace_raster.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/split_replace_raster.R
\name{split_replace_raster}
\alias{split_replace_raster}
\title{Split a Raster* object and replace cell values (optional)}
\usage{
split_replace_raster(raster, partPerSide, save = T, replace = F,
valToReplace, replacedBy, dataType, cellsToProcess = 1:(partPerSide^2),
format = "GTiff", outputPath, name, ...)
}
\arguments{
\item{raster}{Raster* object}
\item{partPerSide}{numeric indicating the number of cells in which \code{raster} will be split}
\item{save}{logical, should the output be saved, default is \code{TRUE}}
\item{replace}{logical, default \code{FALSE}, when \code{TRUE}, \code{valToReplace} and \code{replacedBy} must by specified}
\item{valToReplace}{indicates a value to be replaced across \code{raster} cells}
\item{replacedBy}{indicates the value by which \code{valToReplace} is replaced}
\item{dataType}{character, output data type. See \code{\link[raster]{dataType}}}
\item{cellsToProcess}{numeric vector indicating which of the \code{partPerSide^2} should be processed/saved}
\item{format}{character, output file type, default \code{"GTiff"}. See \code{\link[raster]{writeFormats}}}
\item{outputPath}{character with full path name where the resulting Raster* objects will be saved}
\item{name}{character with the name to assign to final products}
\item{...}{additional arguments used by \code{\link[raster]{writeRaster}}}
}
\value{
At \code{outputPath} the user will find \code{length(cellsToProcess)} files
}
\description{
This function will split a Raster* object into \code{partPerSide^2} parts. Additionally,
it allows to replace cell values (\code{valToReplace}) within Raster* object by another
value of user's choice (\code{replacedBy}). When \code{save = T}, the resulting \code{cellsToProcess}
Raster* objects are saved in directory \code{outputPath}.
}
\details{
Before processing any of the \code{cellsToProcess} the temporary raster
directory is re-directed. Basically, prior to process the i-th cell,
at \code{outputPath} a new subdirectory is created, which, in turn, is erased
automatically once the i-th cell has been processed. As a result of multiple testing
we found that this measure avoids memory overflow.
}
\seealso{
\code{\link[raster]{writeRaster}}, \code{\link[raster]{aggregate}},
\code{\link[raster]{rasterOptions}}
}
|
c30ff88d70e370ec5ad5b15a67ea6669bd9b640a
|
6a2f6ab46c35441db0288fbde4be1a5188f2ec30
|
/man/ti_monocle_ddrtree.Rd
|
eaa65e72a55e2a41469c2156efb5c80022e591b8
|
[] |
no_license
|
herrinca/dynmethods
|
f7595c8ce4f06cb2cb4b809c49ceebd705330940
|
0a5768cf4452b2b745ee675bbd013140d54029da
|
refs/heads/master
| 2020-03-26T22:19:11.513964
| 2018-08-21T18:03:51
| 2018-08-21T18:03:51
| 145,448,352
| 0
| 0
| null | 2018-08-20T17:17:18
| 2018-08-20T17:17:18
| null |
UTF-8
|
R
| false
| true
| 1,891
|
rd
|
ti_monocle_ddrtree.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ti_monocle_ddrtree.R
\name{ti_monocle_ddrtree}
\alias{ti_monocle_ddrtree}
\title{Inferring a trajectory inference using Monocle DDRTree}
\usage{
ti_monocle_ddrtree(reduction_method = "DDRTree", max_components = 2L,
norm_method = "vstExprs", auto_param_selection = TRUE,
run_environment = NULL)
}
\arguments{
\item{reduction_method}{discrete; A character string specifying the algorithm
to use for dimensionality reduction. (default: \code{"DDRTree"}; values:
{\code{"DDRTree"}})}
\item{max_components}{integer; The dimensionality of the reduced space
(default: \code{2L}; range: from \code{2L} to \code{20L})}
\item{norm_method}{discrete; Determines how to transform expression values
prior to reducing dimensionality (default: \code{"vstExprs"}; values: {\code{"vstExprs"},
\code{"log"}, \code{"none"}})}
\item{auto_param_selection}{logical; When this argument is set to TRUE
(default), it will automatically calculate the proper value for the ncenter
(number of centroids) parameters which will be passed into DDRTree call.}
\item{run_environment}{In which environment to run the method, can be \code{"docker"} or \code{"singularity"}.}
}
\value{
A TI method wrapper to be used together with
\code{\link[dynwrap:infer_trajectories]{infer_trajectory}}
}
\description{
Will generate a trajectory using \href{https://doi.org/10.1038/nmeth.4402}{Monocle DDRTree}.
This method was wrapped inside a
\href{https://github.com/dynverse/dynmethods/tree/master/containers/monocle_ddrtree}{container}.
The original code of this method is available
\href{https://github.com/cole-trapnell-lab/monocle-release}{here}.
}
\references{
Qiu, X., Mao, Q., Tang, Y., Wang, L., Chawla, R., Pliner, H.A.,
Trapnell, C., 2017. Reversed graph embedding resolves complex single-cell
trajectories. Nature Methods 14, 979–982.
}
|
32cc3d82c118148987c3188cf521ebf51a56eabf
|
59b832f22a3f29d3eed81e9123178f756ce82555
|
/Recombination_Functions/plot_recombination_functions.Rscript
|
7c9b89e4d9a342f8260ae48ceb838fcd2d2fccd9
|
[] |
no_license
|
cory-weller/HS-reconstruction-gwas
|
a5e727347a07cd475b93520181047e220ca479c7
|
7663d6c68e1d63bdec1de364b5876fd589cebefb
|
refs/heads/master
| 2023-02-20T17:02:45.982270
| 2021-01-18T14:28:56
| 2021-01-18T14:28:56
| 271,129,562
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,085
|
rscript
|
plot_recombination_functions.Rscript
|
#!/usr/bin/env Rscript
bed <- fread('recombination_map.bed')
setnames(bed, c("chr", "start", "stop", "c"))
x.chromosome <- "X"
dmel <- TRUE
bed[chr==x.chromosome, chr := "X"]
# Correction for Drosophila
if(dmel==TRUE) {
# stoare & add maximum value of 2L onto every start, stop for 2R
# store & add maximum value of 3L onto every star,t stop for 3R
# Reduce these later
max2L <- max(bed[chr=="2L"]$stop)
max3L <- max(bed[chr=="3L"]$stop)
bed[chr=="2R", start := start + max2L]
bed[chr=="2R", stop := stop + max2L]
bed[chr=="3R", start := start + max3L]
bed[chr=="3R", stop := stop + max3L]
bed[chr %in% c("2L","2R"), chr := "2"]
bed[chr %in% c("3L","3R"), chr := "3"]
}
# Get list of unique chromosome names within .bed file
chromosomes <- unique(bed$chr)
# Convert c (cM per Mb) to Morgans
bed[, M := c * ((stop-start)/1e8)]
# Create hash table with chr -> expected value for number of recombination events
# e.g.,
# > recombination_rates[["2L"]]
# [1] 0.5533038
recombination_rates <- new.env()
for(chromosome in chromosomes) {
recombination_rates[[chromosome]] <- sum(bed[chr==chromosome]$M) # convert c (cM per Megabase) to Morgans
}
chromosome_sizes <- new.env()
for(chromosome in chromosomes) {
chromosome_sizes[[chromosome]] <- max(bed[chr==chromosome]$stop)
}
# Create hash table with random value (0,1) -> recombination position, via linear interpolation of scaled cumulative sum of recombination rates
bed[, cumulative_M := cumsum(M), by=chr]
bed[, scaled := cumulative_M/max(cumulative_M), by=chr]
genomeSize <- sum(bed[, list(size=max(stop)), by=chr]$size)
recombination_function <- new.env()
for(chromosome in chromosomes) {
recombination_function[[as.character(chromosome)]] <- approxfun(c(0, bed[chr==chromosome]$scaled), c(0,bed[chr==chromosome]$stop))
}
dat <- CJ(chromosome=c("2","3","X"), y.pos=seq(0,1,0.001))
dat[, id := 1:.N]
dat[, x.pos := recombination_function[[chromosome]](y.pos), by=id]
ggplot(dat, aes(x=x.pos, y=y.pos)) + geom_line() + facet_grid(.~chromosome, scales="free")
|
a9ca505fd191c589e9bd619fb6940cbcb87f800f
|
8cea90e27b19a97ce2445f60824b55da001b6e85
|
/plot_sample_means.R
|
c329adb7b8e0b6e03e84c434894ea5e42cda51e2
|
[] |
no_license
|
glaubius/Rscripts
|
090aea7eb54b8e3cf0b0835e5fa69477c19d91d2
|
f950d95add5d052e44a6a71fcfa344a271556d8c
|
refs/heads/master
| 2020-12-31T07:10:11.148474
| 2017-03-07T21:22:42
| 2017-03-07T21:22:42
| 80,557,632
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,678
|
r
|
plot_sample_means.R
|
y## Plot sample means with confidence intervals
## adapted from https://www.youtube.com/watch?v=x4ekQ1nanQ4
## Inputs: list of rmse_values, list to save sample_means, matrix of cis
num_means <- length(rmse_values) - 1
sample_mean <- matrix(nrow=num_means, ncol=2)
cis <- matrix(nrow=num_means, ncol=2)
for (i in 1:num_means){
rows <- i + 1
observations <- rmse_values[1:rows]
#store number of means and sample mean
sample_mean[i, 1] <- rows
sample_mean[i, 2] <- mean(observations)
#construct ci
stdev <- sd(observations)
n <- length(observations)
se_mean <- stdev / sqrt(n)
#store cis
cis[i, 1] <- sample_mean[i, 2] - 1.96 * se_mean
cis[i, 2] <- sample_mean[i, 2] + 1.96 * se_mean
}
#Plot all data, see subset below
plot(sample_mean[,1], sample_mean[,2], xlab = "Number of Simulations", ylab = "Mean of RMSE values", cex = 0.5, col="blue")
# segments(x0=sample_mean[, 1], x1=sample_mean[, 1], y0=cis[, 1], y1=cis[, 2], col="red", lwd=20)
arrows(sample_mean[,1], cis[,1], sample_mean[,1], cis[,2], code=3, angle=90, length=0.05, col='red')
#Subset data for greater legibility in plots, then plot
Sub_sample_num <- sample_mean[,1][seq(1, nrow(sample_mean), 10)]
Sub_sample_mean <- sample_mean[,2][seq(1, nrow(sample_mean), 10)]
Sub_cis_low <- cis[,1][seq(1, nrow(cis), 10)]
Sub_cis_high <- cis[,2][seq(1, nrow(cis), 10)]
plot(Sub_sample_num, Sub_sample_mean, xlab = "Number of Simulations", ylab = "Mean of RMSE values", cex = 0.5, col="blue")
# segments(x0=sample_mean[, 1], x1=sample_mean[, 1], y0=cis[, 1], y1=cis[, 2], col="red", lwd=20)
arrows(Sub_sample_num, Sub_cis_low, Sub_sample_num, Sub_cis_high, code=3, angle=90, length=0.05, col='red')
|
1709a944e8efdde1fb6493760b02a86a9e9e1c1f
|
1853a82480662e1f24356b14c4774e5078fc0a1c
|
/edge_thread_compare_female.R
|
e7d701f51a5af6d46704c66bb58b0e1e01814468
|
[] |
no_license
|
lots-of-things/edge_forum
|
a6954e0411bbf407eb1496cb7193ed271193d742
|
7a4dbbde7308288b93ff42c8a8b59bb90b986a36
|
refs/heads/master
| 2020-07-23T03:09:19.357241
| 2017-07-10T04:22:23
| 2017-07-10T04:22:23
| 94,350,039
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,178
|
r
|
edge_thread_compare_female.R
|
# group by individual and measure fraction of each gender not counting the individual
# Getting a uniquified view of thread metrics for plotting
thread_info = data_thread %>%
filter(UniqueContributors>5) %>%
select(Year,
Title,
Link,
Type,
ThreadId,
DebateSize,
Female_Contributions,
FemaleParticipation,
Live,
UniqueContributors,
UniqueFemaleContributors,
UniqueFemaleParticipation,
starts_with('Thread_'),
-starts_with('Thread_Text')) %>%
unique()
# test whether unique female participation is segmented to threads in a significant way
prop.test(thread_info$UniqueFemaleContributors,thread_info$UniqueContributors)
# display results of prp test
female_confint0 = binom.confint(sum(thread_info$UniqueFemaleContributors),sum(thread_info$UniqueContributors),methods='wilson')[5:6]
female_confint = cbind(as.character(thread_info$ThreadId),thread_info$DebateSize,thread_info$Title,binom.confint(thread_info$UniqueFemaleContributors,thread_info$UniqueContributors,methods='wilson')[,4:6])
names(female_confint)[1]='Thread'
names(female_confint)[2]='ThreadSize'
names(female_confint)[3]='Title'
ggplot(female_confint,aes(reorder(Thread,ThreadSize),mean))+
geom_point()+
geom_errorbar(aes(ymin=lower, ymax=upper))+
geom_hline(yintercept=female_confint0$lower)+
geom_hline(yintercept=female_confint0$upper)+
ylab('Female Fraction')+
xlab('Thread')+
theme(axis.text.x = element_text(angle = 45, hjust=1))
# smae but for all female participation
prop.test(thread_info$Female_Contributions,thread_info$DebateSize)
# display result
female_confint0 = binom.confint(sum(thread_info$Female_Contributions),sum(thread_info$DebateSize),methods='wilson')[5:6]
female_confint = cbind(as.character(thread_info$ThreadId),binom.confint(thread_info$Female_Contributions,thread_info$DebateSize,methods='wilson')[,4:6])
names(female_confint)[1]='Thread'
ggplot(female_confint,aes(Thread,mean))+
geom_point()+
geom_errorbar(aes(ymin=lower, ymax=upper))+
geom_hline(yintercept=female_confint0$lower)+
geom_hline(yintercept=female_confint0$upper)
#
|
827beb4919ac7d292e9611497c72b352e9acb81a
|
39a61aba62505091e3d7033bb62113976473d912
|
/expression/candidate_genes_allen_expression_byregions_PT.R
|
845c680004fef0d281a4a811e7873faca468add2
|
[] |
no_license
|
amaiacc/GeneticsPlanumTemporaleAsymmetry
|
83dcd1d3abe81ad2aca33dcebe854e69689b7a4a
|
8224b0ffa531226784b1bdb7710bdf118ea3e9f2
|
refs/heads/master
| 2020-08-31T07:36:59.201396
| 2020-01-02T13:41:53
| 2020-01-02T13:41:53
| 218,637,880
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,460
|
r
|
candidate_genes_allen_expression_byregions_PT.R
|
#----------------------------------------------------------------------
# Load libraries for plotting
#----------------------------------------------------------------------
library(ggplot2)
library(grid)
library(ggbeeswarm)
library(grid.Extra); library(grid)
library(lme4);library(fmsb)
# allen brain packages
# devtools::install_github('oganm/allenBrain')
library(allenBrain)
## if needed
# install.packages("devtools")
## main package
library(devtools)
install.packages('oro.nifti')
# install_github('aaronjfisher/ggBrain',build_vignettes=TRUE)
## to access help pages
library(ggBrain)
help(package=ggBrain)
#
# function to make first letter of a string capital
simpleCap <- function(x) {
s <- strsplit(x, " ")[[1]]
s2<- paste(toupper(substring(s, 1,1)), substring(s, 2), sep="", collapse=" ")
return(s2)
}
library(dplyr)
library(tidyr)
#----------------------------------------------------------------------
# white background
#----------------------------------------------------------------------
t1<-theme(
plot.background = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.border = element_rect(colour='grey', fill=NA),
panel.background = element_blank(),
axis.line = element_line(size=.4)
)
mytheme2<-theme_bw() + theme(panel.spacing = unit(0, "lines"),
strip.background = element_rect(fill="white"), strip.text = element_text(size=16),
# axis.title.x=element_blank(), axis.ticks.x =element_blank(), # axis.text.x =element_blank(),
# axis.text.x = element_text(size=16,colour="black",hjust=1,vjust=.5),
# axis.title.y=element_text(size=16),axis.text.y=element_text(size=16,colour="black"),
title=element_text(size=16),
axis.title=element_text(size=16),axis.text=element_text(size=16,colour="black"),
legend.text=element_text(size=16), legend.title =element_text(size=16) )
cols=c("#d95f02","#1f78b4","#33a02c","#000000") # '#1b9e77','#d95f02','#7570b3 # "#a6cee3" ,"#7570b3"
#----------------------------------------------------------------------
args<-commandArgs(TRUE)
# From: P:/lg-dyslexia-exomes/working/analysis/Gene_expression_AllenBrainAtlas
#----------------------------------------------------------------------
# Set working directory, paths and files
#----------------------------------------------------------------------
# Define directories, dependent on system
if (Sys.info()['sysname']=='Windows') {dir="P://workspaces/"} else {dir="/data/workspaces/lag/workspaces/"}
setwd(paste(dir,"/lg-dyslexia-exomes/working/",sep="")) # this is where I initally donwloaded the data
allen_path<-paste(getwd(),'/expression_data/AllenBrainAtlas/genes',sep="")
output_plots_path<-paste(dir,'lg-ukbiobank/working_data/amaia/genetic_data/PT/allen_brain/',sep="")
output_tables_path<-paste(dir,'lg-ukbiobank/working_data/amaia/genetic_data/PT/allen_brain/',sep="")
#----------------------------------------------------------------------
probes<-read.csv(paste(dir,"/lg-dyslexia-exomes/working/expression_data/AllenBrainAtlas/H0351.1009/probes.csv",sep=""))
# a<-paste(dir,"/lg-dyslexia-exomes/working/working_data/expression/genes_ITIH5.txt",sep="")
a<-paste(dir,"/lg-dyslexia-exomes/working/working_data/expression/genes_PT.txt",sep="")
# cand_genes<-read.table(a,stringsAsFactors=FALSE)
# cand_genes<-c("ITIH5","SLC35E2A","NADK","TMEM52","BOK","C19orf12","PPP1R14A","AC011479.2") # SPINT2" coded as AC011479.2
cand_genes<-c("ITIH5","BOK","BOK-AS1","ING5","DTYMK","AC114730.11")
#----------------------------------------------------------------------
# Read generated table (including all genes in cand_genes) from candidate_genes_allen_expression.R
#----------------------------------------------------------------------
date<-"2019-02-20"# "2018-11-30" #"2018-10-26"# "2018-06-08"
table_name<-paste(getwd(),"/working_data/expression/", tail(unlist(strsplit(a,"/")),n=1),"_microarrayExp_AllenBrainAtlas_",date,".csv", sep="")
# table_name<-"working_data/expression/genes_ITIH5.txt_microarrayExp_AllenBrainAtlas_2018-06-08.csv"
all_genes<-read.csv(table_name,quote = "")
table(all_genes$Gene_name)
# PAC
# Contains a present/absent flag which indicates whether the probe's
# expression is well above background. It is set to 1 when both of the
# following conditions are met.
#
# 1) The 2-sided t-test p-value is lower than 0.01, (indicating the mean
# signal of the probe's expression is significantly different from the
# corresponding background).
# 2) The difference between the background subtracted signal and the
# background is significant (> 2.6 * background standard deviation).
#----------------------------------------------------------------------
all_genes_exp<-subset(all_genes,PAC==1)
# average expression of gene per probe and sample
all_genes_exp2<- all_genes_exp %>% group_by(Gene_name,Structure_acronym,Structure_info) %>% summarise(avgExp = mean(Expression_PAC))
all_genes_avgExp_list<-lapply(as.character(unique(all_genes_exp2$Gene_name)),function(x){
g<-subset(all_genes_exp2,Gene_name==x)
maxAcr<-as.character(g$Structure_acronym[which(g$avgExp==max(g$avgExp))])
maxReg<-as.character(g$Structure_info[which(g$avgExp==max(g$avgExp))])
minAcr<-as.character(g$Structure_acronym[which(g$avgExp==min(g$avgExp))])
minReg<-as.character(g$Structure_info[which(g$avgExp==min(g$avgExp))])
s<-cbind(Gene=x,Region_maxExpr=maxReg,Acr_maxExpr=maxAcr,Region_MinExpr=minReg,Acr_MinExpr=minAcr,t(summary(g$avgExp)))
return(s)
})
all_genes_avgExp<-do.call("rbind",all_genes_avgExp_list)
write.csv(all_genes_avgExp,file=paste(output_tables_path,"all_genes_avgExp.csv",sep="/"),row.names = FALSE)
#----------------------------------------------------------------------
# linear model for exp?
#----------------------------------------------------------------------
for (ngene in c("ITIH5","BOK","DTYMK")){
gene<-subset(all_genes,Gene_name==ngene&PAC==1) # if PAC=0 -> not diff to backaground
# lm_gene<-lm(Expression~ Probe_id + Subject + Structure_acronym + Hemis, data=subset(gene))
# lm_gene2<-lm(Expression~ Probe_id + Subject + Structure_acronym*Hemis, data=subset(gene))
# anova(lm_gene,lm_gene2)
#
# lmm_gene<-lmer(Expression~ + (1|Subject) + Probe_id + Structure_acronym + Hemis, data=subset(gene))
# summary(lmm_gene)
# lmm_gene2<-lmer(Expression~ + (1|Subject) + Probe_id + Structure_acronym*Hemis, data=subset(gene))
#
# anova(lmm_gene,lmm_gene2)
# rm(lmm_gene,lmm_gene2)
#
# # only temporal regions
# temp<-subset(gene,Region=="Temporal lobe")
# lm_gene_tmp0<-lm(Expression ~ Probe_id + Subject + Structure_acronym + Hemis, data=temp)
# lm_gene_tmp<-lm(Expression ~ Probe_id + Subject + Structure_acronym + Hemis, data=temp)
# lm_gene_tmp2<-lm(Expression ~ Probe_id + Subject + Structure_acronym*Hemis, data=temp)
# anova(lm_gene_tmp,lm_gene_tmp2)
#
# lmm_gene_tmp<-lmer(Expression~ + (1|Subject) + Probe_id + Structure_acronym + Hemis, data=temp)
# lmm_gene_tmp2<-lmer(Expression~ + (1|Subject) + Probe_id + Structure_acronym*Hemis, data=temp)
# anova(lmm_gene_tmp,lmm_gene_tmp2)
# # clean all models
# rm(lm_gene_tmp0, lm_gene_tmp,lm_gene_tmp2)
# rm(lmm_gene_tmp,lmm_gene_tmp2)
# rm(temp)
#----------------------------------------------------------------------
# Plot all regions
#----------------------------------------------------------------------
for (r in unique(gene$Region)){
g_r<-ggplot(data=subset(gene,Region==r),aes(x=as.factor(Probe_id),y=Expression_PAC,colour=factor(Hemis))) + #geom_quasirandom(dodge.width=1) +
geom_boxplot(alpha = 0.2,outlier.shape = NA) +
facet_wrap(~ Structure_acronym,scales="free_x") + mytheme2 + #coord_cartesian(ylim=c(3,10)) +
labs(x=NULL,y="Expression * PAC", title= r ) +
theme(legend.position="none") + theme(axis.text.x = element_text(angle = 90, hjust = 1)) +
scale_color_manual(values=cols[2:3]) +
theme(legend.position="bottom",legend.title=element_blank())
assign(gsub(" ","_",paste(ngene,"_",r,sep="")),g_r)
rm(g_r)
}
#----------------------------------------------------------------------
# candidate region: Planum Temporale
#----------------------------------------------------------------------
# # all regions
# x0<-lmer(Expression~ (1|Subject) + Probe_id + Structure_acronym , data=gene)
# x1<-lmer(Expression~ (1|Subject) + Probe_id + Structure_acronym + Hemis, data=gene)
# anova(x0,x1)
# # check interaction with region, to see if effect in PT is specific
# x2<-lmer(Expression~ (1|Subject) + Probe_id + Structure_acronym * Hemis, data=gene)
# anova(x1,x2)
# pt only
pt<-subset(gene,Structure_acronym=="PLT")
z0<-lmer(Expression~ (1|Subject) + Probe_id , data=pt)
z<-lmer(Expression ~ (1|Subject) + Probe_id + Hemis, data=pt)
anova(z0,z)
summary(z)
rm(z0,z)
chisq_v<-round(anova(z0,z)$`Chisq`[2],digits=2)
p_v<-format(anova(z0,z)$`Pr(>Chisq)`[2],digits=4)
note<-paste("Chisq(1) =",chisq_v,"; p-value=",p_v,sep="")
#
pt$sample<-gsub("sampleH0351.","",pt$Subject)
pt$Probe_id<-gsub("probe","",pt$Probe_id)
# combine with probes, to get correct name
pt<-merge(pt,probes,by.x=c("Probe_id"),by.y="probe_id",stringAsFactors=FALSE,all.x=TRUE)
# plots for planum temporale
g<-ggplot(data=pt,aes(x=as.factor(probe_name),y=Expression_PAC,colour=factor(Hemis))) +
geom_boxplot(fill="white",outlier.colour = NA, position = position_dodge(width=0.9)) +
geom_quasirandom(dodge.width=0.9, aes(colour=factor(Hemis)),size=3,alpha=0.4) +
# geom_text(label=note,position=2,size=3) +???
# facet_grid(.~ Structure_acronym,scales="free_x") +
mytheme2 +
coord_cartesian(ylim=c(0,10)) +
labs(x=NULL,y="Expression * PAC", title= ngene ) +
theme(axis.text.x = element_text(angle = 90, hjust = 1)) +
scale_color_manual(values=cols[2:3]) +
theme(legend.position="bottom",legend.title=element_blank())
# g_pt<-ggplot(data=pt,aes(x=as.factor(probe_name),y=Expression_PAC,colour=factor(Hemis))) +
# geom_boxplot(fill="white",outlier.colour = NA, position = position_dodge(width=0.9),alpha=0.5) +
# geom_text(aes(label=sample),position=position_jitterdodge(dodge.width=1),size=3,alpha=1,angle=20) +
# # facet_grid(.~ Structure_acronym,scales="free_x") +
# mytheme2 +
# coord_cartesian(ylim=c(0,10)) +
# labs(x=NULL,y="Expression * PAC", title= ngene ) +
# theme(axis.text.x = element_text(angle = 90, hjust = 1)) +
# scale_color_manual(values=cols[2:3]) +
# theme(legend.position="bottom",legend.title=element_blank())
assign(paste(ngene,"_g",sep=""),g)
assign(paste(ngene,"_g_pt",sep=""),g_pt)
# save pt data for gene
assign(paste(ngene,"_pt_data"),pt)
ggsave(g,file=paste(output_plots_path,ngene,"_PT_summary.png",sep=""),height=6,width=6)
ggsave(g_pt,file=paste(output_plots_path,ngene,"_PT_summary_samples.png",sep=""),height=6,width=8)
# clean
rm(g,g_pt,pt)
rm(gene)
}
rm(ngene)
#----------------------------------------------------------------------
# Plot temporal lobe
#----------------------------------------------------------------------
ITIH5_Temporal_lobe
BOK_Temporal_lobe
DTYMK_Temporal_lobe
# PT
library(gridExtra)
ITIH5_BOK_pt<-grid.arrange(ITIH5_g + theme(axis.text.x = element_text(angle = 0, hjust = 0.5)) ,
BOK_g + theme(axis.text.x = element_text(angle = 0, hjust = 0.5)),
ncol=2,widths=c(3,2))
ITIH5_BOK_DTYMK_pt<-grid.arrange(ITIH5_g + theme(axis.text.x = element_text(angle = 0, hjust = 0.5)) ,
BOK_g + theme(axis.text.x = element_text(angle = 0, hjust = 0.5)),
DTYMK_g + theme(axis.text.x = element_text(angle = 0, hjust = 0.5)),
ncol=3,widths=c(3,2,2))
ggsave(ITIH5_BOK_pt,file=paste(output_plots_path,"ITIH5_BOK","_PT_summary.png",sep=""),width=15,height=7)
ggsave(ITIH5_BOK_DTYMK_pt,file=paste(output_plots_path,"ITIH5_BOK_DTYMK","_PT_summary.png",sep=""),width=20,height=7)
|
57167c9a7aeb6a9fd9482f39d2f8b1f13837a886
|
fe4aadb9b9d7f2f2e05aa17d9f438364d52fa7fe
|
/tests/testthat/test-source_web_tool_scripts.R
|
09f1336bc50973a19551a78443567a88395490c8
|
[
"MIT"
] |
permissive
|
fiona511/PACTA_analysis
|
32d2ba6e871f648ee7f5f31b6e9d0a53d0c37c5b
|
6b4684868afc9c90f9625177b5e18d919cefab4e
|
refs/heads/master
| 2023-03-07T18:45:56.103190
| 2021-02-18T12:23:38
| 2021-02-18T12:23:38
| 340,041,597
| 0
| 0
|
NOASSERTION
| 2021-02-18T12:21:48
| 2021-02-18T12:21:47
| null |
UTF-8
|
R
| false
| false
| 255
|
r
|
test-source_web_tool_scripts.R
|
test_that("stop_on_error stops on error", {
expect_error(stop_on_error(exit_code = 0), NA)
expect_error(stop_on_error(exit_code = -1), NA)
expect_error(stop_on_error(exit_code = 1), "error")
expect_error(stop_on_error(exit_code = 99), "error")
})
|
03d74df81d798c96fbb35428bb2a39c0194dd070
|
bf61596f18dc48b2e6bd6ae6ccd5e9aaa1e575b1
|
/RFILES/tab2_png.R
|
79e9bd4e8d87eb0b9a93e9ccc0828b5fb8fe6a85
|
[] |
no_license
|
rfaridi/bd_remittance
|
0dee712fbdb7f3fb7d96f7aee6990a3f51fce52d
|
bffd63becb8679de7cd13e7b2e795b836dc48b98
|
refs/heads/master
| 2021-01-13T14:52:48.242693
| 2016-12-14T16:18:20
| 2016-12-14T16:18:20
| 76,474,568
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 352
|
r
|
tab2_png.R
|
load(file="./RData/remitFin.RData")
source('functions.R',echo=F)
library(dplyr)
tab2 <- remit.fin %>%
select(FY2013:FY2015)
dvipng.dvi(dvi.latex(
latex(tab2,col.just = strsplit("ccc", "")[[1]],
rowlabel='Countries',
rowlabel.just="c",
rgroup=cc.rg,
n.rgroup=cc.g,
booktabs = T)
),file="./Figures/tab2.png")
|
e77eec80313ab8c3c2245be02ee45da2cafc673d
|
4575fac146c9e774b29c8f1e34fc8bfa83a0d747
|
/script/Union.manhattan.r
|
d228ef52bf12b0dda1c2322f8d80b93d2b788692
|
[] |
no_license
|
yywan0913/Tibetan_Han
|
db591b41ded4a997a494ca5587657808d19c5f44
|
18bd32db2ccda4c7e34ca32819392998201de3a2
|
refs/heads/master
| 2023-05-04T01:52:06.119935
| 2021-05-19T06:00:19
| 2021-05-19T06:00:19
| 368,444,043
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,521
|
r
|
Union.manhattan.r
|
library(data.table)
library(reshape2)
args = commandArgs(TRUE)
outpdf = args[1]
region.num = as.numeric(args[2])
if(is.null(region.num)) region.num = 3
chromfile = "input/chrom.len"
svfile = "input/sv.fst.fordraw.xls"
snpfile = "input/snp.fst.fordraw.filter.xls"
indelfile = "input/indel.fst.fordraw.xls"
#outpdf = "union.manhattan.pdf"
genegfffile = "input/hg19.bed.gtf.gz"
genegff = gettextf("gzip -dc %s",genegfffile)
posfile = "pos.txt"
options(stringsAsFactors=F)
chrom.data = read.table(chromfile,header=F,sep="\t")
data1 = fread(svfile,header=T,sep="\t")
data1 = as.data.frame(data1)
data2 = fread(snpfile,header=T,sep="\t")
data2 = as.data.frame(data2)
data3 = fread(indelfile,header=T,sep="\t")
data3 = as.data.frame(data3)
gene.data = fread(genegff,header=F,sep="\t")
gene.data = as.data.frame(gene.data)
gene.data[,1] = gsub('chr','',gene.data[,1])
pos.data = read.table(posfile,header=F,sep="\t",fill=F)
var.color =c("#E64B35","#00A087","#3C5488")
pch = 15:17
if(grepl('.png$',outpdf)) png(outpdf,width=24,height=30, units = "in",res=400)
if(grepl('.pdf$',outpdf)) pdf(outpdf,width=24,height=30)
if(region.num==3){
layout(mat=matrix(c(1,1,1,2,3,4,5,7,9,6,8,10),ncol=3,byrow=T),height=c(10,10,2,8))
}
if(region.num==2){
layout(mat=matrix(c(1,1,2,3,4,6,5,7),ncol=2,byrow=T),height=c(11,11,2,8))
}
# up
source('source/source.up.manhattan.r')
par(mar=c(3,5,3,4))
print(11)
up.manhattan(chrom.data,sv.data=data1,snp.data=data2,indel.data=data3,col=var.color,pos.data=pos.data)
# median
source('source/source.median.manhattan.r')
par(mar=c(3,5,3,4))
for(i in 1:region.num){
pos.datai = pos.data[i,]
print('22')
median.manhanttan(pos.datai,gene.data=gene.data,sv.data=data1,snp.data=data2,indel.data=data3,color=var.color,pch=pch)
}
# bottom
source('source/source.bottom.r')
AL="input/AL"
HT="input/HT"
Tibetan= scan(AL,what="")
Han = scan(HT,what="")
gene.col = c("#009CFFFF","#FF5500FF","#D20000FF","#9E819BFF","#763400FF","#FFC600FF","#54FFAAFF",'yellow')
set.seed(123)
for(i in 1:region.num){
posi.file = pos.data[i,7]
data = read.table(posi.file,sep="\t",check.names=F,header=T)
sampleAL = intersect(Tibetan,colnames(data))
sampleHT = intersect(Han,colnames(data))
sample20AL = sample(sampleAL,30)
sample20HT = sample(sampleHT,30)
matchsampleAL = match(sample20AL,colnames(data))
matchsampleHT = match(sample20HT,colnames(data))
data.AL = data[,matchsampleAL]
data.HT = data[,matchsampleHT]
data.info = data[,1:3]
PlotRegion(data.info,data.AL,data.HT,gene.data,gene.col)
}
dev.off()
|
a3cc65cea4b64566415b77bb248eb09be1315101
|
89bd53b22672cbe74e727e8e45defc891af1052d
|
/oldRcodes/usingdeltas/results.gopher/allsol.R
|
92c6e2c66ffcec767f2fab86d0e49458e948589e
|
[] |
no_license
|
hbhat4000/sdeinference
|
a62e8f5ddc6bbc913dbc8dc4c210ff30cf16143f
|
14db858c43a1b50001818399ef16e74ae926f51b
|
refs/heads/master
| 2020-04-04T05:30:05.922893
| 2018-07-11T22:59:38
| 2018-07-11T22:59:38
| 54,491,406
| 8
| 5
| null | null | null | null |
UTF-8
|
R
| false
| false
| 680
|
r
|
allsol.R
|
allsol = matrix(0,nrow=8,ncol=6)
allsol[1,] = c( 1.020837, 0.000000, 1.404953 , 31, 0.05, 300 )
allsol[2,] = c( 1.041597, 0.000000, 1.430114 , 30, 0.02, 300 )
allsol[3,] = c( 1.048930, 0.000000, 1.438882 , 34, 0.01, 300 )
allsol[4,] = c( 0.671489, 0.000000, 1.143841 , 31, 0.01, 100 )
allsol[5,] = c( 1.052622, 0.000000, 1.443300 , 34, 0.005, 300 )
allsol[6,] = c( 0.673062, 0.000000, 1.146046 , 28, 0.005, 100 )
allsol[7,] = c( 1.054888, 0.000000, 1.445993 , 35, 0.002, 300 )
allsol[8,] = c( 0.674330, 0.000000, 1.147694 , 26, 0.002, 100 )
truesol = c(.5, 0, 1)
rmserror = numeric(length = 8)
for (i in c(1:8))
{
rmserror[i] = sqrt(sum( (truesol - allsol[i,1:3])^2 ))
}
|
a5ea234e2421626e2f4fa1f8110e6c01506ea11b
|
6cbc43051fa0df8e06c91391966443bd640b1fcb
|
/tests/testthat.R
|
371733308754e8aab1e48f885da7541b0d32f593
|
[] |
no_license
|
sboysel/Rgitbook
|
30da6a2dc408fc9f49e882a53ca182711830fcb0
|
2192bb6cecbec7a72638b0f8a4062b81fd6e68fe
|
refs/heads/master
| 2020-12-28T09:29:58.319512
| 2016-01-18T06:23:46
| 2016-01-18T06:23:46
| 49,751,718
| 0
| 0
| null | 2016-01-16T00:06:37
| 2016-01-16T00:06:37
| null |
UTF-8
|
R
| false
| false
| 60
|
r
|
testthat.R
|
library(testthat)
library(Rgitbook)
test_check("Rgitbook")
|
eec61d91b440a83564e263feee94d2f59ea915d4
|
2db9f112c91b32b183f96c52781cb3d72fc56ed5
|
/wordcloud.R
|
dd3e180e7f5cbd81815e80f06c0ed069ee456608
|
[] |
no_license
|
ravikrcs/WordCloud
|
4e63a3ed5a81ae35e9101966b10576a62f6f845a
|
e0ef93e036726ce02d6f48285b825dbe92d55ca5
|
refs/heads/master
| 2020-05-06T13:58:47.625907
| 2019-04-08T14:27:24
| 2019-04-08T14:27:24
| 180,166,418
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 880
|
r
|
wordcloud.R
|
install.packages("tm")
install.packages("wordcloud")
install.packages("RColorBrewer")
library(tm)
library(wordcloud)
library(RColorBrewer)
text_file ="C:\\Users\\raj\\Downloads\\v74i07.txt"
textfile=readLines(text_file)
file1<-Corpus(VectorSource(textfile))
file2<-tm_map(file1,stripWhitespace)
file2<-tm_map(file2,tolower)
file2<-tm_map(file2,removeNumbers)
file2<-tm_map(file2,removePunctuation)
file2<-tm_map(file2,removeWords, stopwords("english"))
file2<-tm_map(file2,removeWords, c("and","the","for","are","can","also","with"))
tdm_file2<-TermDocumentMatrix (file2)
TDM1<-as.matrix(tdm_file2) #Convert this into a matrix format
v = sort(rowSums(TDM1), decreasing = TRUE) #Gives you the frequencies for every word
summary(v)
str(names(v), v)
wordcloud(names(v), v, scale=c(5,0.5), max.words=100, colors=brewer.pal(3,"Dark2"))
|
da8413887eabd196be1bce614830411659c839dd
|
5140c5ba4359cd71640c71db0361630d03c95b82
|
/CRISPR/gQTL_viewer/global.R
|
39eadf6144a874242707d7ec6108805d87507465
|
[] |
no_license
|
scalefreegan/steinmetz-lab
|
ec672b5ae254f4203368a9b9f062c8c111e79edb
|
f88200afff4adf81cc18c65e42d177f87104c1d7
|
refs/heads/master
| 2020-04-04T07:38:52.502810
| 2018-08-07T11:51:44
| 2018-08-07T11:51:44
| 34,375,573
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,968
|
r
|
global.R
|
.local = FALSE
if (system("hostname",intern=T) == "mac-steinmetz55.embl.de" || system("hostname",intern=T) == "interzone.local") {
print("yes")
.local = TRUE
} else {
print(system("hostname"))
}
# Import packages ---------------------------------------------------
library(shiny)
library(dplyr)
library(reshape2)
library(DT)
library(GenomicRanges)
library(ggplot2)
library(stringr)
library("BSgenome.Scerevisiae.UCSC.sacCer3")
library("TxDb.Scerevisiae.UCSC.sacCer3.sgdGene")
library("org.Sc.sgd.db")
library(rtracklayer)
library(jsonlite)
library(RCurl)
library(httr)
set_config( config( ssl_verifypeer = 0L ) )
library(VariantAnnotation)
# Global variables ---------------------------------------------------
id2name = id2name(TxDb.Scerevisiae.UCSC.sacCer3.sgdGene)
type = "mlod"
# gene name map
x = org.Sc.sgdGENENAME
keys <- mappedkeys(x)
# Convert to a list
gname <- as.list(x[keys])
# short description map
x = org.Sc.sgdALIAS
keys <- mappedkeys(x)
# Convert to a list
dname <- as.list(x[keys])
# short description map
x = org.Sc.sgdDESCRIPTION
keys <- mappedkeys(x)
# Convert to a list
dname_long <- as.list(x[keys])
# Web resources ---------------------------------------------------
#addResourcePath('data', "/var/www2/html/mQTL/data")
# Misc material ---------------------------------------------------
# composite rda
if (.local) {
DDIR = "/Users/brooks/Documents/git/steinmetz_local/genphen/metabolome"
} else {
DDIR = "/g/steinmetz/brooks/git/steinmetz-lab/CRISPR"
}
# tmp resource location / will be changed
if (.local) {
DDIR = "/Users/brooks/Documents/git/steinmetz-lab/CRISPR"
VDIR = "/Users/brooks/Documents/steinmetz_local/yeast/genomes/S288CxYJM789"
} else {
DDIR = "/g/steinmetz/brooks/git/steinmetz-lab/CRISPR"
VDIR = "/g/steinmetz/brooks/yeast/genomes/S288CxYJM789"
}
gQTL = read.delim(file.path(DDIR, "data/journal.pgen.1003803.s016.TXT"),sep="\t", skip=5)
load(file.path(VDIR,"yjm789snpsANDindels_info.rda"))
|
2869153596665a48e20afd8b8a4fff4bf8731de9
|
8a3f1e19b810de50cba01cf5359d84f09b85a04e
|
/Macroeconometrics/VAR-Oct 10.R
|
091aa1237d7c7e43e86e6c56655947688864590a
|
[] |
no_license
|
naafeysardar/sardar
|
cd7343e25aea1331568df4874d950473a80ad60b
|
8d17b285ee90df3e490e1b8f2461d70f9bd11310
|
refs/heads/master
| 2021-07-21T03:28:07.120860
| 2020-08-21T16:24:08
| 2020-08-21T16:24:08
| 207,188,568
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,022
|
r
|
VAR-Oct 10.R
|
library(vars)
data.raw <- read.csv("u-inf.csv", header = TRUE)
dataset <- ts(data.raw, start = c(1948,1), frequency = 12)
u <- dataset[,"u"]
inf <- dataset[,"inf"]
varfit <- VAR(dataset, p=1)
varfit
# Make Forecasts
varpred <- predict(varfit, n.ahead = 12)
varpred
# Inflation Forecast
varpred$fcst$"inf"
varpred$fcst$"inf"[, "fcst"]
library(tstools)
getVarForecasts(varfit, var="inf", n=1:12, start = c(2018, 8))
plot(getVarForecasts(varfit, var="inf", n=1:12, start = c(2018, 8)))
# Inflation forecast pushed back to the mean.
plot(getVarForecasts(varfit, var="u", n=13:36, start = c(2019, 8)))
# U is non-stationery which is why it doesn't move back to the mean.
varfit2 <- VAR(dataset, p=2)
getVarForecasts(varfit2, var="inf", n=1:12, start = c(2018, 8))
# Test for no. of lags
VARselect(dataset, lag.max=12)
varfit <- VAR(dataset, ic="SC", lag.max=12)
# 2-Step Ahead Forecast
inf2 <- tsreg(inf, lags(ts.combine(inf, u), 2))
inf2
last(inf)
last(u)
0.1239+0.4460*0.2+0.0052*3.9
0.23338*12
|
65dff45e03d6e8f258193e8dadb7bed5ab51acb2
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/gdpc/examples/gdpc.Rd.R
|
1add01c4834ee1d51ce5b3e5425da35f4d96d6c5
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 626
|
r
|
gdpc.Rd.R
|
library(gdpc)
### Name: gdpc
### Title: Generalized Dynamic Principal Components
### Aliases: gdpc
### Keywords: ts
### ** Examples
T <- 200 #length of series
m <- 500 #number of series
set.seed(1234)
f <- rnorm(T + 1)
x <- matrix(0, T, m)
u <- matrix(rnorm(T * m), T, m)
for (i in 1:m) {
x[, i] <- 10 * sin(2 * pi * (i/m)) * f[1:T] + 10 * cos(2 * pi * (i/m)) * f[2:(T + 1)] + u[, i]
}
fit <- gdpc(x, k = 1) #find first DPC with one lag
fit
par(mfrow = c(1, 2)) #plot loadings
plot(fit, which = 'Loadings', which_load = 0, xlab = '', ylab = '')
plot(fit, which = 'Loadings', which_load = 1, xlab = '', ylab = '')
|
aa8ceb871df9a24595f09f7f4cbe1290a8d8116c
|
1b846992a7d75f424df987d7d77a844fa588d389
|
/run_Analysis.R
|
a05441647e1dd253d21121f191b155686846304f
|
[] |
no_license
|
coryjpiette/CleanData_FinalAssignment
|
850c624b3d052572e196c7b3edf6310024213ada
|
466528093630cacd1ea307670967b05829a587d5
|
refs/heads/master
| 2021-01-08T05:30:04.290824
| 2020-02-23T14:33:02
| 2020-02-23T14:33:02
| 241,926,967
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,132
|
r
|
run_Analysis.R
|
# reading all the files into R
training_data <- read.table("C:/Users/shubhayush/Documents/coursera/data/getdata_projectfiles_UCI HAR Dataset/UCI HAR Dataset/train/X_train.txt")
test_data <- read.table("C:/Users/shubhayush/Documents/coursera/data/getdata_projectfiles_UCI HAR Dataset/UCI HAR Dataset/test/X_test.txt")
variable_names <- read.table("C:/Users/shubhayush/Documents/coursera/data/getdata_projectfiles_UCI HAR Dataset/UCI HAR Dataset/features.txt")
training_activity_codes <- read.table("C:/Users/shubhayush/Documents/coursera/data/getdata_projectfiles_UCI HAR Dataset/UCI HAR Dataset/train/y_train.txt")
test_activity_codes <- read.table("C:/Users/shubhayush/Documents/coursera/data/getdata_projectfiles_UCI HAR Dataset/UCI HAR Dataset/test/y_test.txt")
activity_names <- read.table("C:/Users/shubhayush/Documents/coursera/data/getdata_projectfiles_UCI HAR Dataset/UCI HAR Dataset/activity_labels.txt")
subject_train <- read.table("C:/Users/shubhayush/Documents/coursera/data/getdata_projectfiles_UCI HAR Dataset/UCI HAR Dataset/train/subject_train.txt")
subject_test <- read.table("C:/Users/shubhayush/Documents/coursera/data/getdata_projectfiles_UCI HAR Dataset/UCI HAR Dataset/test/subject_test.txt")
# giving the column names to the training and test-data as per the variable_names
variable_names_select <- variable_names[,2]
activity_labels <- activity_names[,2]
colnames(training_data) <- variable_names_select
colnames(test_data) <- variable_names_select
colnames(training_activity_codes)<-"activitylabel"
colnames(test_activity_codes)<-"activitylabel"
# collecting and sorting the columns with mean and standard deviation
ColstoSelect <- sort(c(grep("mean",variable_names[,2]),grep("std",variable_names[,2])))
# creating a column with activity names
training_activity_codes$activitylabel[training_activity_codes$activitylabel==1]<-"walking"
training_activity_codes$activitylabel[training_activity_codes$activitylabel==2]<-"walkingUpstairs"
training_activity_codes$activitylabel[training_activity_codes$activitylabel==3]<-"walkingDownstairs"
training_activity_codes$activitylabel[training_activity_codes$activitylabel==4]<-"sitting"
training_activity_codes$activitylabel[training_activity_codes$activitylabel==5]<-"standing"
training_activity_codes$activitylabel[training_activity_codes$activitylabel==6]<-"laying"
test_activity_codes$activitylabel[test_activity_codes$activitylabel==1]<-"walking"
test_activity_codes$activitylabel[test_activity_codes$activitylabel==2]<-"walkingUpstairs"
test_activity_codes$activitylabel[test_activity_codes$activitylabel==3]<-"walkingDownstairs"
test_activity_codes$activitylabel[test_activity_codes$activitylabel==4]<-"sitting"
test_activity_codes$activitylabel[test_activity_codes$activitylabel==5]<-"standing"
test_activity_codes$activitylabel[test_activity_codes$activitylabel==6]<-"laying"
# adding columns of the person/subject who took part in the activity and type of activity
training_data <- cbind(subject_train,training_activity_codes$activitylabel,training_data)
test_data <- cbind(subject_test,test_activity_codes$activitylabel,test_data)
# selecting columns pertaining to mean and standard devaiation to the dataset
training_data_select <- training_data[,c(1,2,(ColstoSelect)+2)]
test_data_select <- test_data[,c(1,2,(ColstoSelect)+2)]
colnames(training_data_select)[1:2]<- c("subjectId","activity")
colnames(test_data_select)[1:2]<- c("subjectId","activity")
# merging the train and test data. Herein train and test subjects can be identified
library(dplyr)
training_data_select <- mutate(training_data_select,datatype = "train")
test_data_select <- mutate(test_data_select,datatype = "test")
final_data <- merge(training_data_select,test_data_select,all= TRUE)
final_data <- arrange(final_data,subjectId)
# finding mean as persubject and activity
final_data1 <- select(final_data,-datatype)
final_mean <- group_by(final_data1,subjectId,activity)
final_means <- summarise_at(final_mean,vars(-group_cols()),mean)
write.table(final_means,"Samsung_mean_data_MSP.txt",row.names = FALSE)
|
30552a01c4d2c9d24412f67a23b529c20cacda13
|
b9cfd96d6a96d0b8721b455bc22cf11503f83d2f
|
/man/getSaddlePointsOfGame.Rd
|
7542cc0ff4756707145ec7f26f791ddd43afcbb2
|
[] |
no_license
|
ChristophJW/solveTPZSG
|
e5fb41a1ab5b4fa4296f6cd93b9bc5a0cd2a7714
|
34926b7472bcd53455ec67557ee68d5e5acb9265
|
refs/heads/master
| 2021-04-09T14:02:48.473623
| 2018-04-19T07:14:06
| 2018-04-19T07:14:06
| 125,489,481
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 392
|
rd
|
getSaddlePointsOfGame.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/function.R
\name{getSaddlePointsOfGame}
\alias{getSaddlePointsOfGame}
\title{Find the saddlepoints of the game.}
\usage{
getSaddlePointsOfGame(matrix, maxCol)
}
\arguments{
\item{matrix}{A matrix}
\item{maxCol}{A numeric}
}
\value{
The matrix of saddlepoints.
}
\description{
Find the saddlepoints of the game.
}
|
768b802ce6051f874d172bfed96af1930e57f0e3
|
6b3805d48275edd2b4431e5127206720fcc24008
|
/R/corregp.r
|
7e395acb655d6dc47c1230d64981bcbe350659f9
|
[] |
no_license
|
cran/corregp
|
d188c093d43270f0d943ccef2148ed8397f86d29
|
994df0352e125c2b1763e3c116ba24e33f7f5fd3
|
refs/heads/master
| 2021-05-04T11:23:05.651296
| 2018-03-14T09:20:10
| 2018-03-14T09:20:10
| 48,078,492
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 122,031
|
r
|
corregp.r
|
#' Functions and Methods for Correspondence Regression
#'
#' This package provides functions and methods for performing correspondence regression, i.e. the correspondence analysis of the
#' crosstabulation of a categorical variable Y in function of another one X, where X can in turn be made up of the combination of various
#' categorical variables.
#' Consequently, correspondence regression can be used to analyze the effects for a polytomous or multinomial outcome variable.
#' The central function in the package is \code{\link{corregp}}, which enables methods for printing, summarizing and plotting the output.
#' Additionally, there are functions for computing confidence intervals, ellipses or 3D ellipsoids (by means of bootstrapping).
#' @section Contents:
#' This package consists of the following datasets, functions, generics and methods (some internal functions are no longer exported in version 2):
#' \subsection{Datasets}{
#' \itemize{
#' \item{\code{\link{HairEye}} }{Hair and eye color of statistics students (data frame).}
#' \item{\code{\link{COMURE}} }{The use of linguistic variants in translations vs. non-translations and in six different registers.}
#' \item{\code{\link{AVT}} }{The use of linguistic variants in audio-visual translation (subtitles).}
#' \item{\code{\link{TSS}}} {The use of inflected or uninflected determiners in vernacular Belgian Dutch.}
#' }
#' }
#' \subsection{Functions}{
#' \itemize{
#' \item{\code{\link{ci}} }{A helper function to compute confidence intervals on the basis of a numeric vector.}
#' \item{\code{\link{corregp}} }{The basic function to perform correspondence regression. Typically, one starts here, and then one uses \code{print}, \code{summary}, \code{anova}, \code{screeplot} or \code{plot} methods.}
#' \item{\code{\link{corregplicate}} }{A function for repeated correspondence regressions with bootstrapping in order to handle large data sets.}
#' }
#' }
#' \subsection{Generics}{
#' \itemize{
#' \item{\code{\link{cint}} }{Compute confidence intervals.}
#' \item{\code{\link{cell}} }{Compute confidence ellipses.}
#' \item{\code{\link{cell3d}} }{Compute 3D confidence ellipsoids.}
#' \item{\code{\link{ciplot}} }{Plot confidence intervals.}
#' \item{\code{\link{pcplot}} }{Plot parallel coordinates.}
#' \item{\code{\link{agplot}} }{Plot an association graph.}
#' \item{\code{\link{plotag}} }{Plot an association graph.}
#' }
#' }
#' \subsection{Methods}{
#' \itemize{
#' \item{\code{\link{print.corregp}} }{Print the output of a correspondence regression.}
#' \item{\code{\link{summary.corregp}} }{Give a \code{summary} of a correspondence regression.}
#' \item{\code{\link{print.summary.corregp}} }{Print the \code{summary} of a correspondence regression.}
#' \item{\code{\link{screeplot.corregp}} }{Make a scree plot on the basis of the output of a correspondence regression.}
#' \item{\code{\link{anova.corregp}} }{Give an \code{anova} table on the basis of a correspondence regression.}
#' \item{\code{\link{print.anova.corregp}} }{Print an \code{anova} table on the basis of a correspondence regression.}
#' \item{\code{\link{coef.corregp}} }{Give the coefficients on the basis of a correspondence regression.}
#' \item{\code{\link{coefficients.corregp}} }{Give the coefficients on the basis of a correspondence regression.}
#' \item{\code{\link{fitted.corregp}} }{Give the fitted values on the basis of a correspondence regression.}
#' \item{\code{\link{fitted.values.corregp}} }{Give the fitted values on the basis of a correspondence regression.}
#' \item{\code{\link{residuals.corregp}} }{Give the residuals on the basis of a correspondence regression.}
#' \item{\code{\link{resid.corregp}} }{Give the residuals on the basis of a correspondence regression.}
#' \item{\code{\link{cint.corregp}} }{Compute confidence intervals on the basis of the output of a correspondence regression. Typically, this function is not so much used directly as it is called by a \code{ciplot.corregp} command.}
#' \item{\code{\link{ciplot.corregp}} }{Plot confidence intervals on the basis of the output of a correspondence regression.}
#' \item{\code{\link{pcplot.corregp}} }{Make a parallel coordinate plot on the basis of the output of a correspondence regression.}
#' \item{\code{\link{cell.corregp}} }{Compute confidence ellipses on the basis of the output of a correspondence regression. Typically, this function is not so much used directly as it is called by a \code{plot.corregp} command.}
#' \item{\code{\link{plot.corregp}} }{Plot the output (and the optional confidence ellipses) of a correspondence regression.}
#' \item{\code{\link{cell3d.corregp}} }{Compute 3D confidence ellipsoids on the basis of a correspondence regression. Typically, this function is not so much used directly as it is called by a \code{plot3d.corregp} command.}
#' \item{\code{\link{plot3d.corregp}} }{Plot the 3D output (and the optional confidence ellipsoids) of a correspondence regression.}
#' \item{\code{\link{agplot.corregp}} }{Make an association graph on the basis of the output of a correspondence regression.}
#' \item{\code{\link{plotag.corregp}} }{Make an association graph on the basis of the output of a correspondence regression.}
#' }
#' }
#'
#' @section Future prospects:
#' \itemize{
#' \item Specify a \code{predict} method for a.o. supplementary points.
#' \item Specify a \code{plot} method for an \code{anova} table.
#' \item Enable scale transformations for all plots (and corresponding confidence regions).
#' \item Provide the possibility for so-called "calibration lines".
#' }
#'
#' @section Author:
#' Koen Plevoets, \email{koen.plevoets@@ugent.be}
#'
#' @section Acknowledgements:
#' This package has benefited greatly from the helpful comments of Isabelle Delaere and Gert De Sutter. Thanks to Kurt Hornik and Uwe Ligges for proofing this package.
#' @docType package
#' @name corregp-package
NULL
#' @import data.table
NULL
#' @import graphics
NULL
#' @import rgl
NULL
#' @import utils
NULL
#' @import stats
NULL
#' @importFrom stats anova
NULL
#' @importFrom ellipse ellipse
NULL
#' @importFrom gplots barplot2
NULL
#' @importFrom gplots plotCI
NULL
#' @importFrom diagram openplotmat
NULL
#' @importFrom diagram coordinates
NULL
#' @importFrom diagram straightarrow
NULL
#' @importFrom diagram textellipse
NULL
#' @importFrom diagram textrect
NULL
utils::globalVariables(c("B_", "D_chi", "D_num", "E_den", "E_num", "F_", "F_t_s", "F_x_s", "F_x_t", "F_y_s", "F_y_t", "N_"))
#' Hair and Eye Color of Statistics Students (Data Frame)
#'
#' The distribution of hair color, eye color and sex among 592 statistics students (from Snee 1974 and Friendly 1992).
#' @format A data frame with 592 rows and 3 variables.
#' @source This is simply a data frame version of the in-built data set \code{\link[datasets]{HairEyeColor}}.
#' @examples
#' \donttest{
#' data(HairEye)
#' haireye.crg <- corregp(Eye ~ Hair * Sex, data = HairEye, b = 3000)
#' haireye.crg
#' summary(haireye.crg, parm = "b", add_ci = TRUE)
#' screeplot(haireye.crg, add_ci = TRUE)
#' anova(haireye.crg, nf = 2)
#' plot(haireye.crg, x_ell = TRUE, xsub = c("Hair", "Sex"))
#' }
#' @docType data
#' @name HairEye
NULL
#' The Use of Linguistic Variants in Translations vs. Non-translations and in Six Different Registers
#'
#' This data set was a case study in the COMURE project ("\strong{co}rpus-based, \strong{mu}ltivariate research of \strong{re}gister variation in translated and
#' non-translated Belgian Dutch") which was conducted at the Department of Translation, Interpreting and Communication of Ghent University between 2010 and 2014.
#' @format A data frame with 3762 rows and 5 variables.
#' \itemize{
#' \item{\code{Variant} }{The linguistic variant used in a set of alternatives (27 levels).}
#' \item{\code{Variable} }{The linguistic variable specifying a set of alternatives (13 levels).}
#' \item{\code{Variety} }{The dichotomization of \code{Variant} into standard and non-standard.}
#' \item{\code{Register} }{The register or "Text type" of the data (6 levels).}
#' \item{\code{Language} }{The language (and source language) of the data (3 levels).}
#' }
#' @source
#' Delaere, I., G. De Sutter and K. Plevoets (2012) Is translated language more standardized than non-translated language? \emph{Target} \strong{24} (2), 203--224.
#' @examples
#' \donttest{
#' data(COMURE)
#' # The execution of corregp may be slow, due to bootstrapping:
#' comure.crg <- corregp(Variant ~ Register * Language, data = COMURE, part = "Variable", b = 3000)
#' comure.crg
#' summary(comure.crg, parm = "b", add_ci = TRUE)
#' screeplot(comure.crg, add_ci = TRUE)
#' anova(comure.crg, nf = 2)
#' comure.col <- ifelse( xtabs(~ Variant + Variety, data = COMURE)[, "Standard"] > 0, "blue", "red")
#' plot(comure.crg, x_ell = TRUE, xsub = c("Register", "Language"), col_btm = comure.col,
#' col_top = "black")
#' }
#' @docType data
#' @name COMURE
NULL
#' The Use of Linguistic Variants in Audio-Visual Translation (Subtitles)
#'
#' This data set was a follow-up study to the \code{\link{COMURE}} project and was conducted at the Department of Translation, Interpreting and Communication of
#' Ghent University between 2014 and 2018.
#' @format A data frame with 3302 rows and 7 variables.
#' \itemize{
#' \item{\code{Variant} }{The linguistic variant used in a set of alternatives (27 levels).}
#' \item{\code{Variable} }{The linguistic variable specifying a set of alternatives (13 levels).}
#' \item{\code{Variety} }{The dichotomization of \code{Variant} into standard and non-standard.}
#' \item{\code{Speaker} }{The role of the speaker in the data (2 levels).}
#' \item{\code{Language} }{The language (and source language) of the data (3 levels).}
#' \item{\code{Language2} }{The same as \code{Language} but with the observations of level \code{intra.nl} set to \code{NA}.}
#' \item{\code{Genre} }{The genre or register of the data (2 levels).}
#' }
#' @source
#' Prieels, L., I. Delaere, K. Plevoets and G. De Sutter (2015) A corpus-based multivariate analysis of linguistic norm-adherence in audiovisual and written
#' translation. \emph{Across Languages and Cultures} \strong{16} (2), 209--231.
#' @examples
#' \donttest{
#' data(AVT)
#' # The execution of corregp may be slow, due to bootstrapping:
#' avt.crg <- corregp(Variant ~ Speaker * Language * Genre, data = AVT, part = "Variable", b = 3000)
#' avt.crg
#' summary(avt.crg, parm = "b", add_ci = TRUE)
#' screeplot(avt.crg, add_ci = TRUE)
#' anova(avt.crg, nf = 2)
#' avt.col <- ifelse( xtabs(~ Variant + Variety, data = AVT)[, "Standard"] > 0, "blue", "red")
#' plot(avt.crg, x_ell = TRUE, xsub = c("Speaker", "Language", "Genre"), col_btm = avt.col,
#' col_top = "black")
#' }
#' @docType data
#' @name AVT
NULL
#' The Use of Inflected or Uninflected Determiners in the Belgian Dutch Vernacular
#'
#' The distribution of the Belgian Dutch \emph{-e(n)}-suffix with 14 determiners in 14 registers and for several speaker characteristics.
#' @format A data frame with 40778 rows and 13 variables.
#' \itemize{
#' \item{\code{Variant} }{The linguistic variant used in a set of alternatives (35 levels).}
#' \item{\code{Variable} }{The linguistic variable specifying a set of alternatives (14 levels).}
#' \item{\code{Inflected} }{Numeric variable specifying whether the linguistic variant is inflected (\code{1}) or not (\code{0}).}
#' \item{\code{Register} }{The register of the data in the Spoken Dutch Corpus (14 levels: see
#' \href{http://lands.let.ru.nl/cgn/doc_English/topics/version_1.0/overview.htm}{here} for their definition).}
#' \item{\code{Register2} }{The dichotomization of \code{Register} into private and public.}
#' \item{\code{SpeakerID} }{The ID of the speaker in the Spoken Dutch Corpus (1144 levels).}
#' \item{\code{Region} }{The region in which the speaker lived until the age of 18 (4 levels).}
#' \item{\code{Sex} }{The sex of the speaker (2 levels).}
#' \item{\code{BirthYear} }{The year in which the speaker was born (63 levels).}
#' \item{\code{Decade} }{The decade in which the speaker was born (7 levels).}
#' \item{\code{Generation} }{The generation cohort in which the speaker was born (5 levels).}
#' \item{\code{Education} }{The level of education of the speaker (3 levels).}
#' \item{\code{Occupation} }{The level of occupation of the speaker (10 levels: see
#' \href{http://lands.let.ru.nl/cgn/doc_English/topics/version_1.0/metadata/speakers.htm}{here} for their definition).}
#' }
#' @source
#' Plevoets, K. (2008) \emph{Tussen spreek- en standaardtaal}. Leuven, Doctoral dissertation. Available online \href{https://biblio.ugent.be/publication/1168055/file/1168056}{here}.
#' @examples
#' \donttest{
#' data(TSS)
#' # The execution of corregp may be slow, due to bootstrapping:
#' tss.crg <- corregp(Variant ~ Register2 * Region, data = TSS, part = "Variable", b = 3000)
#' tss.crg
#' summary(tss.crg, parm = "b", add_ci = TRUE)
#' screeplot(tss.crg, add_ci = TRUE)
#' anova(tss.crg, nf = 2)
#' tss.col <- ifelse( xtabs(~ Variant + Inflected, data = TSS)[, 1] > 0, "blue", "red")
#' plot(tss.crg, x_ell = TRUE, xsub = c("Register2", "Region"), col_btm = tss.col, col_top = "black")
#' }
#' @docType data
#' @name TSS
NULL
#' Correspondence Regression
#'
#' This is the basic function for \emph{correspondence regression}, i.e. the correspondence analysis of a contingency table formed
#' by the categorical variables Y and X, where X can be in turn made up of the combinations of various categorical variables.
#' @param formula A \code{\link[stats]{formula}} specification of which factors to cross with each other. The left-hand (\code{y}) side must be a single factor.
#' The right-hand side (\code{x}) can involve all the usual specifications of interactions and/or nested analyses.
#' @param data The data frame containing the variables specified in the \code{formula}.
#' @param part Character vector specifying the names of conditional factors (e.g. a factor partioning the levels of the left-hand side \code{y} into groups).
#' This argument is relevant for analyses in which one wants to remove between-item variation.
#' @param b Number of the bootstrap replications (simulations). If \code{0} (i.e. the default), then the analysis is exploratory.
#' @param xep Logical specifying whether to output the separate terms in the right-hand side (\code{x}) as components in a list.
#' If \code{FALSE}, then all \code{x} output is collected in a matrix.
#' @param std Logical specifying whether to output the standardized coordinates. Defaults to \code{FALSE}.
#' @param rel Logical specifying whether to divide the coordinates by the \code{sqrt} of their totals, so that one obtains coordinates for
#' the relative frequencies (as is customary in correspondence analysis). Defaults to \code{TRUE}.
#' @param phi Logical specifying whether to compute the output on the scale of the \emph{Chi-squared} value of the contingency table or of the \emph{Phi-squared} value
#' (which is \emph{Chi-squared} divided by \emph{N}). Reminiscent of \code{\link[MASS]{corresp}} in package \pkg{MASS}, defaults to \code{FALSE}.
#' @param chr Character specifying the separator string for constructing the interaction terms.
#' @param b_scheme Character specifying the sampling scheme for bootstrapping. Must match either \code{"multinomial"} (the default) or \code{"product-multinomial"}.
#' @details
#' Correspondence regression rests on the idea, described by Gilula and Haberman (1988), of using a correspondence analysis to model a polytomous or multinomial (i.e.
#' 'multi-category') response variable (\code{Y}) in terms of other (possibly interacting) factors (\code{X}) (see also 3.2 in Van der Heijden et al. 1989). These are
#' specified in the argument \code{formula}, which can be constructed in all the usual ways of specifying a model formula: e.g.
#' \itemize{
#' \item \code{Y ~ X1 + X2 + X1 : X2} or \code{Y ~ X1 * X2}
#' \item \code{Y ~ (X1 + X2 + X3) ^ 2}
#' \item \code{Y ~ X1 * X2 * X3 - X1 : X2 : X3}
#' \item \ldots
#' }
#' Correspondence regression then crosstabulates the \code{Y} factor with all the combinations in \code{X}, thus producing a typical contingency table, on which a simple
#' correspondence analysis is performed (see Greenacre 2017: 121-128 for the outline of this approach). The more general effects in \code{X} are obtained by aggregating
#' the combinations.
#'
#' Correspondence regression also allows for inferential validation of the effects, which is done by means of the bootstrap (in fact, Monte Carlo simulation). Setting the argument
#' \code{b} to a number \eqn{> 0}, \code{b} replicates of the contingency table are generated with multinomial sampling. From these, \code{b} new values are derived for the
#' coordinates in both \code{Y} and \code{X} as well as for the eigenvalues (also called the "principal inertias"). On the basis of the replicate/simulated values,
#' confidence intervals, ellipses or ellipsoids can be computed. CAUTION: bootstrapping/simulation is computationally quite intensive, so it can take a while to reach
#' results, especially with a large \code{b}.
#'
#' The argument \code{parm} can be used when one wants to perform a correspondence regression of \code{Y} onto \code{X} conditional on other factors. These conditioning factors are
#' therefore equivalent to \emph{random factors}, and \code{corregp} always conditions on the joint variable of all the specified factors. One such use of conditioning factors is
#' a so-called \emph{lectometric} analysis in linguistics, where the levels of \code{Y} are grouped/partitioned/nested into clusters and one wants to exclude the heterogeneity
#' between the clusters.
#' @return An object of class "corregp", i.e. a list with components:
#' \item{\code{eigen} }{A vector of eigenvalues of the correpondence regression.}
#' \item{\code{y} }{The coordinates (matrix) of the Y levels.}
#' \item{\code{x} }{The coordinates of the X levels. If \code{xep} is \code{TRUE}, then this is a list with a component for each term name.}
#' \item{\code{freq} }{A list of the frequencies of every Y and X level.}
#' \item{\code{conf} }{If \eqn{b>0}. A list of bootstrap replicates for the eigenvalues, the coordinates of Y levels, the coordinates of X levels and the frequencies of both the Y levels and the X levels.}
#' \item{\code{aux} }{A list of auxiliary information (such as the U and V matrices of the SVD, the specified values for all the arguments) to be passed to other functions and methods.}
#' @references
#' Gilula, Z. and S.J. Haberman (1988) The analysis of multivariate contingency tables by restricted canonical and restricted association models.
#' \emph{Journal of the American Statistical Association} \strong{83} (403), 760--771.
#'
#' Greenacre, M. (2017) \emph{Correspondence analysis in practice, Third edition}. Boca Raton: Chapman and Hall/CRC.
#'
#' Van der Heijden, P.G.M., A. de Falguerolles and J. de Leeuw (1989) A combined approach to contingency table analysis using correspondence analysis and log-linear analysis.
#' \emph{Applied Statistics} \strong{38} (2), 249--292.
#' @seealso \code{\link{print.corregp}}, \code{\link{summary.corregp}}, \code{\link{screeplot.corregp}}, \code{\link{anova.corregp}}, \code{\link{plot.corregp}}.
#' @examples
#' \donttest{
#' data(HairEye)
#' haireye.crg <- corregp(Eye ~ Hair * Sex, data = HairEye, b = 3000)
#' haireye.crg
#' }
#' @export
corregp <- function(formula,data,part=NULL,b=0,xep=TRUE,std=FALSE,rel=TRUE,phi=FALSE,chr=".",b_scheme="multinomial") {
f.var <- all.vars(formula,functions=FALSE,unique=TRUE)
x.var <- f.var[-1]
y.var <- f.var[1]
if (!is.null(part) && is.na(part)) {
part <- NULL
}
f.dat <- data.table::data.table(data[,c(x.var,y.var,part)])
f.trm <- strsplit(labels(terms(formula,keep.order=FALSE)),split=":")
chr <- chr[1]
names(f.trm) <- sapply(f.trm,paste,collapse=chr)
y.aux <- f.dat[,list(N_=.N),keyby=c(y.var,part)]
x.aux <- f.dat[,list(N_=.N),keyby=c(x.var,part)]
t.aux <- lapply(f.trm,function(t01){f.dat[,list(N_=.N),keyby=t01]})
f.tab <- f.dat[,list(N_=as.double(.N)),by=names(f.dat)]
if (is.null(part)) {
f.tab <- f.tab[
merge(data.table::data.table(x.aux[,x.var,with=FALSE],Tmp_=1),data.table::data.table(y.aux[,y.var,with=FALSE],Tmp_=1),by="Tmp_",allow.cartesian=TRUE)[,-"Tmp_",with=FALSE],
on=c(x.var,y.var)][is.na(N_), N_:=0]
}
else {
f.tab <- f.tab[merge(x.aux[,c(x.var,part),with=FALSE],y.aux[,c(y.var,part),with=FALSE],by=part,allow.cartesian=TRUE), on=c(x.var,y.var,part)][is.na(N_), N_:=0]
y.aux <- y.aux[,list(N_=sum(N_)),by=y.var]
x.aux <- x.aux[,list(N_=sum(N_)),by=x.var]
}
n.tot <- f.tab[,sum(N_)]
b <- ifelse(is.null(b)||is.na(b),0,b)
if (b>0) {
b.sch <- pmatch(tolower(b_scheme),c("multinomial","product-multinomial","product_multinomial"))
if (b.sch==1) {
f.rep <- matrix(rmultinom(b,n.tot,f.tab$N_/n.tot),ncol=b,dimnames=list(NULL,paste("B",1:b,sep="")))
storage.mode(f.rep) <- "double"
f.tab <- data.table::melt(cbind(f.tab,f.rep), id.vars=c(x.var,y.var,part), measure.vars=c("N_",paste("B",1:b,sep="")), variable.name="B_", value.name="F_")
}
if (b.sch>1) {
f.tab <- rbind(
f.tab[,B_:="N_"],
f.tab[,c( lapply(.SD,function(j){rep(j,times=b)}), list( B_=paste("B",rep(1:b,each=length(N_)),sep=""), N_=as.double(rmultinom(b,sum(N_),N_)) ) ),
by=c(x.var,part),.SDcols=y.var]
)[,F_:=N_]
}
if (is.na(b.sch)) {
warning("argument 'b_scheme' has to match either 'multinomial' or 'product-multinomial', so no bootstrapping was done",call.=FALSE)
f.tab[,c("B_","F_") := list("N_",as.double(N_))]
}
}
else {
f.tab[,c("B_","F_") := list("N_",as.double(N_))]
}
data.table::setkeyv(f.tab,c("B_",x.var,y.var,part))
f.tab[,F_x_s := sum(F_), by=c("B_",x.var,part)]
f.tab[,F_y_s := sum(F_), by=c("B_",y.var,part)]
f.tab[,F_t_s := sum(F_), by=c("B_",part)]
f.tab[,F_x_t := sum(F_), by=c("B_",x.var)]
f.tab[,F_y_t := sum(F_), by=c("B_",y.var)]
f.tab[,E_num := F_x_s*F_y_s/F_t_s]
f.tab[,E_den := F_x_t*F_y_t]
f.tab[,D_num := F_-E_num]
f.tab[,D_chi := D_num/sqrt(E_den)]
f.tab[!is.finite(D_chi), D_chi:=0]
n.rot <- ifelse(phi[1],1,sqrt(n.tot))
f.chi <- n.rot*as.matrix(data.table::dcast(f.tab[B_=="N_"],formula(paste(paste(x.var,collapse="+"),"~",y.var,sep="")),fun.aggregate=sum,value.var="D_chi")[,-x.var,with=FALSE])
f.svd <- svd(f.chi)
s.svd <- f.svd$d[f.svd$d != 0]
r.ank <- 1:length(s.svd)
u.svd <- f.svd$u[,r.ank]
v.svd <- f.svd$v[,r.ank]
f.tab <- merge(f.tab,
data.table::data.table(x.aux[,x.var,with=FALSE], matrix(u.svd,nrow=nrow(u.svd),dimnames=list(NULL,paste("U_",r.ank,sep="")))),
by.x=x.var, by.y=x.var, all.x=TRUE, sort=FALSE)
f.tab <- merge(f.tab,
data.table::data.table(y.aux[,y.var,with=FALSE], matrix(v.svd,nrow=nrow(v.svd),dimnames=list(NULL,paste("V_",r.ank,sep="")))),
by.x=y.var, by.y=y.var, all.x=TRUE, sort=FALSE)
f.tab[,paste("S_",r.ank,sep="") := eval(parse(text=paste("list(",paste("U_",r.ank,"*D_chi*V_",r.ank,sep="",collapse=","),")",sep="")))]
if (std[1]) {
f.tab[,paste("U_",r.ank,sep="") := eval(parse(text=paste("list(",paste(paste("U_",r.ank,sep=""),paste("/s.svd[",r.ank,"]",sep=""),sep="",collapse=","),")",sep="")))]
f.tab[,paste("V_",r.ank,sep="") := eval(parse(text=paste("list(",paste(paste("V_",r.ank,sep=""),paste("/s.svd[",r.ank,"]",sep=""),sep="",collapse=","),")",sep="")))]
}
f.tab[,paste("Y_",r.ank,sep="") := eval(parse(text=paste("list(",paste("D_chi*U_",r.ank,sep="",collapse=","),")",sep="")))]
f.tab[,paste("X_",r.ank,sep="") := eval(parse(text=paste("list(",paste("D_num*V_",r.ank,sep="",collapse=","),")",sep="")))]
if (rel[1]) {
f.tab[,paste("W_",y.var,sep="") := sqrt(1/sum(F_)),by=c("B_",y.var)]
for (t02 in f.trm) {
f.tab[,paste("W_",paste(t02,collapse=chr),sep="") := sqrt(1/F_y_t)*(1/sum(F_)),by=c("B_",t02)]
}
}
else {
f.tab[,paste("W_",y.var,sep="") := 1]
for (t02 in f.trm) {
f.tab[,paste("W_",paste(t02,collapse=chr),sep="") := sqrt(1/(F_y_t*sum(F_))),by=c("B_",t02)]
}
}
y.lab <- levels(y.aux[,eval(parse(text=y.var))])
x.lab <- lapply(f.trm,function(t03){apply(t.aux[[paste(t03,collapse=chr)]][,t03,with=FALSE],1,paste,collapse=chr)})
y.loc <- n.rot*matrix(do.call(cbind,
f.tab[B_=="N_",eval(parse(text=paste("list(",paste("sum(W_",y.var,"*Y_",r.ank,",na.rm=TRUE)",sep="",collapse=","),")",sep=""))),
keyby=y.var][,-y.var,with=FALSE]),
ncol=length(r.ank), dimnames=list(y.lab,r.ank))
x.loc <- lapply(f.trm,function(t04){
n.rot*matrix(do.call(cbind,
f.tab[B_=="N_", eval(parse(text=paste("list(",paste("sum(W_",paste(t04,collapse=chr),"*X_",r.ank,",na.rm=TRUE)",sep="",collapse=","),")",sep=""))),
keyby=c(t04)][,-t04,with=FALSE]),
ncol=length(r.ank), dimnames=list(x.lab[[paste(t04,collapse=chr)]],r.ank))
})
y.frq <- y.aux[,N_]
names(y.frq) <- y.lab
x.frq <- lapply(f.trm,function(t05){t.aux[[paste(t05,collapse=chr)]][,N_]})
for (t06 in names(f.trm)) {
names(x.frq[[t06]]) <- x.lab[[t06]]
}
xep <- ifelse(length(x.var)==1,FALSE,xep[1])
if (!xep) {
x.loc <- do.call(rbind,x.loc)
names(x.frq) <- NULL
x.frq <- do.call("c",x.frq)
}
f.out <- list(eigen=s.svd^2,y=y.loc,x=x.loc,freq=list(y=y.frq,x=x.frq))
if (b>0) {
y.con <- sapply(y.lab,function(yl){
yl=n.rot*matrix(do.call(cbind,
f.tab[B_!="N_" & eval(parse(text=paste(y.var,"==\"",yl,"\"",sep=""))),
eval(parse(text=paste("list(",paste("sum(W_",y.var,"*Y_",r.ank,",na.rm=TRUE)",sep="",collapse=","),")",sep=""))),by="B_"][,-"B_",with=FALSE]),
ncol=length(r.ank),dimnames=list(NULL,r.ank))
},simplify=FALSE,USE.NAMES=TRUE)
x.con <- lapply(f.trm,function(t07){
sapply(x.lab[[paste(t07,collapse=chr)]],function(xl){
n.rot*matrix(do.call(cbind,
f.tab[B_!="N_" & eval(parse(text=paste(t07,paste("\"",unlist(strsplit(xl,split=chr,fixed=TRUE)),"\"",sep=""),sep="==",collapse="&"))),
eval(parse(text=paste("list(",paste("sum(W_",paste(t07,collapse=chr),"*X_",r.ank,",na.rm=TRUE)",sep="",collapse=","),")",sep=""))),
by="B_"][,-"B_",with=FALSE]),
ncol=length(r.ank),dimnames=list(NULL,r.ank))
},simplify=FALSE,USE.NAMES=TRUE)
})
y.rep <- as.matrix(data.table::dcast(f.tab[B_!="N_",c("B_",y.var,"F_"),with=FALSE],formula(paste("B_~",y.var,sep="")),fun.aggregate=sum,value.var="F_")[,-"B_",with=FALSE])
x.rep <- lapply(f.trm,function(t08){
as.matrix(data.table::dcast(f.tab[B_!="N_",c("B_",t08,"F_"),with=FALSE],formula(paste("B_~",paste(t08,collapse="+"),sep="")),
fun.aggregate=sum,sep=chr,value.var="F_")[,-"B_",with=FALSE])
})
s.con <- (n.rot*matrix(do.call(cbind,
f.tab[B_!="N_",eval(parse(text=paste("list(",paste("sum(S_",r.ank,")",sep="",collapse=","),")",sep=""))),by="B_"][,-"B_",with=FALSE]),
nrow=b,dimnames=list(NULL,r.ank)))^2
if (!xep) {
names(x.con) <- NULL
x.con <- do.call("c",x.con)
x.rep <- do.call(cbind,x.rep)
}
f.out$conf <- list(eigen=s.con,y=y.con,x=x.con,freq=list(y=y.rep,x=x.rep))
}
colnames(u.svd) <- r.ank
colnames(v.svd) <- r.ank
if (is.null(part)) {
part <- NA
}
f.out$aux <- list(U=u.svd,V=v.svd,formula=formula,data=as.list(match.call())$data,part=part,b=b,std=std,rel=rel,phi=phi,chr=chr)
class(f.out) <- "corregp"
f.out
}
#' Repeated Correspondence Regression
#'
#' A function for repeated correspondence regressions with bootstrapping in order to handle large data sets. This is essentially a wrapper
#' \code{replicate(n = r, expr = corregp(...), simplify = FALSE)}, so it may dissappear in the future.
#' @param formula A \code{\link[stats]{formula}} specification of which factors to cross with each other. The left-hand (\code{y}) side must be a single factor.
#' The right-hand side (\code{x}) can involve all the usual specifications of interactions and/or nested analyses.
#' @param data The data frame containing the variables specified in the \code{formula}.
#' @param part Character vector specifying the names of conditional factors (e.g. a factor partioning the levels of the left-hand side \code{y} into groups).
#' This argument is relevant for analyses in which one wants to remove between-item variation.
#' @param b Number of the bootstrap replications (simulations).
#' @param r Number of repeated calls to \code{\link{corregp}}.
#' @param xep Logical specifying whether to output the separate terms in the right-hand side (\code{x}) as components in a list.
#' If \code{FALSE}, then all \code{x} output is collected in a matrix.
#' @param std Logical specifying whether to output the standardized coordinates. Defaults to \code{FALSE}.
#' @param rel Logical specifying whether to divide the coordinates by the \code{sqrt} of their totals, so that one obtains coordinates for
#' the relative frequencies (as is customary in correspondence analysis). Defaults to \code{TRUE}.
#' @param phi Logical specifying whether to compute the output on the scale of the \emph{Chi-squared} value of the contingency table or of the \emph{Phi-squared} value
#' (which is \emph{Chi-squared} divided by \emph{N}). Reminiscent of \code{\link[MASS]{corresp}} in package \pkg{MASS}, defaults to \code{FALSE}.
#' @param chr Character specifying the separator string for constructing the interaction terms.
#' @param b_scheme Character specifying the sampling scheme for bootstrapping. Must match either \code{"multinomial"} (the default) or \code{"product-multinomial"}.
#' @return An object of class "corregp" in which the bootstrap replications of all the repeated calls to \code{corregp} are put together.
#' @seealso \code{\link{corregp}}.
#' @export
corregplicate <- function(formula,data,part=NULL,b=100,r=10,xep=TRUE,std=FALSE,rel=TRUE,phi=FALSE,chr=".",b_scheme="multinomial") {
if (b==0 || r==0) {
stop("both b and r must be > 0",call.=FALSE)
}
r.epl <- replicate(n=r,corregp(formula=formula,data=data,part=part,b=b,xep=xep,std=std,rel=rel,phi=phi,chr=chr,b_scheme=b_scheme),simplify=FALSE)
r.out <- r.epl[[1]][c("eigen","y","x","freq")]
s.con <- do.call(rbind,lapply(r.epl,function(r01){r01$conf$eigen}))
y.lab <- names(r.epl[[1]]$conf$y)
y.con <- sapply(y.lab,function(yl){
do.call(rbind,lapply(r.epl,function(r02){r02$conf$y[[yl]]}))
},simplify=FALSE,USE.NAMES=TRUE)
y.rep <- do.call(rbind,lapply(r.epl,function(r03){r03$conf$freq$y}))
if (xep) {
x.lab <- sapply(r.epl[[1]]$conf$x,function(x){names(x)},simplify=FALSE,USE.NAMES=TRUE)
x.con <- sapply(names(x.lab),function(tl){
sapply(x.lab[[tl]],function(xl){
do.call(rbind,lapply(r.epl,function(r04){r04$conf$x[[tl]][[xl]]}))
},simplify=FALSE,USE.NAMES=TRUE)
},simplify=FALSE,USE.NAMES=TRUE)
x.rep <- sapply(names(x.lab),function(tl){
do.call(rbind,lapply(r.epl,function(r05){r05$conf$freq$x[[tl]]}))
},simplify=FALSE,USE.NAMES=TRUE)
}
else {
x.lab <- names(r.epl[[1]]$conf$x)
x.con <- sapply(x.lab,function(xl){
do.call(rbind,lapply(r.epl,function(r04){r04$conf$x[[xl]]}))
},simplify=FALSE,USE.NAMES=TRUE)
x.rep <- do.call(rbind,lapply(r.epl,function(r05){r05$conf$freq$x}))
}
r.out$conf <- list(eigen=s.con,y=y.con,x=x.con,freq=list(y=y.rep,x=x.rep))
r.out$aux <- r.epl[[1]]$aux
class(r.out) <- "corregp"
r.out
}
#' Printing Correspondence Regression
#'
#' Method to print the output of \code{\link{corregp}}.
#' @param x The output of a call to \code{\link{corregp}} (i.e. an object of class "corregp").
#' @param nf The number of dimensions to print. Defaults to the first two dimensions.
#' @param ... Further arguments passed to or from other methods.
#' @return The output of a call to \code{\link{corregp}}.
#' @seealso \code{\link{corregp}}.
#' @examples
#' \donttest{
#' data(HairEye)
#' haireye.crg <- corregp(Eye ~ Hair * Sex, data = HairEye, b = 3000)
#' haireye.crg
#' print(haireye.crg, nf = 3)
#' }
#' @export
print.corregp <- function(x,nf=2,...) {
crg <- x
o.rnk <- sum(crg$eigen>1e-08,na.rm=TRUE)
nf <- ifelse(is.null(nf),o.rnk,nf)
nf <- ifelse(is.character(nf),match(nf,table=colnames(crg$y)),nf)[1]
if(nf>o.rnk || is.na(nf)) {
nf <- o.rnk
if (o.rnk>1) {
warning(paste(as.list(match.call())$x,"only has",o.rnk,"axes",sep=" ",collapse=NULL),call.=FALSE)
}
}
f.var <- all.vars(crg$aux$formula,functions=FALSE,unique=TRUE)
cat("Correspondence regression of ",format(crg$aux$formula)," in ",crg$aux$data,"\n",sep="",fill=FALSE)
if (all(!is.na(crg$aux$part))) {
cat("Conditioning factor(s): ",crg$aux$part,"\n",sep=" ",fill=FALSE)
}
cat("\nEigenvalues: ",crg$eigen[1:nf],"\n",sep=" ",fill=FALSE)
cat("\nY (",f.var[1],"):\n",sep="",fill=FALSE)
print(crg$y[,1:nf],...)
if (is.list(crg$x)) {
p.var <- names(crg$x)
cat("\nX:\n",sep="",fill=FALSE)
for (p in 1:length(crg$x)) {
cat(p.var[p],":\n",sep="",fill=FALSE)
print(crg$x[[p]][,1:nf],...)
cat("\n",sep="",fill=FALSE)
}
}
else {
cat("\nX",ifelse(length(f.var)==2,paste(" (",f.var[2],")",sep="",collapse=NULL),""),":\n",sep="",fill=FALSE)
print(crg$x[,1:nf],...)
cat("\n",sep="",fill=FALSE)
}
invisible()
}
#' Summarizing Correspondence Regression
#'
#' Method to produce a summary of a correspondence regression.
#' @param object The outout of a call to \code{\link{corregp}} (i.e. an object of class "corregp").
#' @param parm The parameter for which to compute the contributions \code{contrib}. Can be either \code{"y"} for the Y contributions, \code{"x"} for the X contributions, \code{"both"}
#' which can be abbreviated to \code{"b"}, or a vector of term names in X. Defaults to \code{"b"}.
#' @param contrib The type of contributions to be computed: either \emph{from points to axes} (absolute contributions) or \emph{from axes to points} (squared correlations).
#' The specification can be \code{"pnts_to_axes"} or \code{"axes_to_pnts"}, \code{"pts2axs"} or \code{"axs2pts"}, \code{"p_a"} or \code{"a_p"}, or any other reasonable abbreviation.
#' @param nf The number of dimensions to be retained in the reduced space. Defaults to all dimensions (no reduction).
#' @param add_ci Logical specifying whether to compute confidence intervals for the eigenvalues (and eigenvalues only). Defaults to \code{FALSE}.
#' @param cl The confidence level for the confidence intervals. Defaults to \code{0.95}.
#' @param nq Logical specifying whether to use a normal quantile (i.e. apply \code{\link[stats]{qnorm}}) in the computation of the confidence intervals.
#' Defaults to \code{TRUE}. If \code{FALSE}, then the confidence intervals are computed directly with the \code{\link[stats]{quantile}} function.
#' @param ... Further arguments passed to or from other methods.
#' @return An object of class "summary.corregp", providing a summary of a correspondence regression, i.e. a list with components:
#' \item{\code{formula} }{The \code{\link[stats]{formula}} specified to the \code{formula} argument in the call to \code{corregp}.}
#' \item{\code{data} }{The name of the data frame specified to the \code{data} argument in the call to \code{corregp}.}
#' \item{\code{part} }{The name of the factor specified to the \code{part} argument in the call to \code{corregp}.}
#' \item{\code{chi_squared} }{The chi-squared value of the correspondence regression.}
#' \item{\code{phi_squared} }{The phi-squared value of the correspondence regression, i.e. the chi-squared value divided by \code{N}.}
#' \item{\code{N} }{The total number of observations.}
#' \item{\code{eigen} }{Depending on \code{add_ci}: if \code{FALSE}, a matrix of the actual eigenvalues, their percentages and cumulative percentages; if \code{TRUE}, a list of the actual eigenvalues, their percentages and cumulative percentages together with the lower and upper confidence limits for each.}
#' \item{\code{y} }{If \code{parm} is \code{"y"} or \code{"b"}. A list of components \code{p_a} for the absolute contributions and//or \code{a_p} for the squared correlations, depending on \code{contrib}.}
#' \item{\code{x} }{If \code{parm} is \code{"y"}, \code{"b"} or any of the term names in X. A list of components \code{p_a} for the absolute contributions and/or \code{a_p} for the squared correlations, depending in \code{contrib}.}
#' @seealso \code{\link{corregp}}, \code{\link{print.summary.corregp}}, \code{\link{anova.corregp}}.
#' @examples
#' \donttest{
#' data(HairEye)
#' haireye.crg <- corregp(Eye ~ Hair * Sex, data = HairEye, b = 3000)
#' summary(haireye.crg, add_ci = TRUE)
#' summary(haireye.crg, parm = "y", contrib = "pts_axs", nf = 2)
#' }
#' @export
summary.corregp <- function(object,parm=NULL,contrib=NULL,nf=NULL,add_ci=FALSE,cl=0.95,nq=TRUE,...) {
crg <- object
if (is.null(parm) && !is.null(contrib)) {
parm <- "b"
}
if (!is.null(parm) && is.null(contrib)) {
contrib <- "b"
}
parm <- ifelse(is.null(parm),NA,parm)
contrib <- tolower(contrib)
contrib <- ifelse(contrib %in% c("p_a","pts_axs","pts2axs","ptstoaxs","pts_to_axs","pnts_axes","pnts2axes","pntstoaxes","pnts_to_axes"),"p_a",contrib)
contrib <- ifelse(contrib %in% c("a_p","axs_pts","axs2pts","axstopts","axs_to_pts","axes_pnts","axes2pnts","axestopnts","axes_to_pnts"),"a_p",contrib)
o.eig <- crg$eigen
o.rnk <- sum(o.eig>1e-08,na.rm=TRUE)
nf <- ifelse(is.null(nf),o.rnk,nf)
nf <- ifelse(is.character(nf),match(nf,table=colnames(crg$y)),nf)[1]
if(nf>o.rnk || is.na(nf)) {
nf <- o.rnk
warning(paste(as.list(match.call())$object,"only has",o.rnk,"axes",sep=" ",collapse=NULL),call.=FALSE)
}
o.tot <- sum(crg$freq$y)
o.phi <- ifelse(crg$aux$phi,o.tot,1)
s.out <- list(formula=crg$aux$formula,data=crg$aux$data,part=crg$aux$part,chi_squared=sum(o.eig)*o.phi,phi_squared=sum(o.eig)*o.phi/o.tot,N=o.tot)
o.lab <- colnames(crg$y[,1:nf])
if (add_ci && is.null(crg$conf)) {
add_ci <- FALSE
warning(paste("no bootstrapping was done in",as.list(match.call())$object,sep=" ",collapse=NULL),call.=FALSE)
}
if (add_ci) {
s.out$eigen$value <- as.matrix(rbind(o.eig[1:nf],apply(matrix(crg$conf$eigen[,1:nf],ncol=nf),2,ci,cl=cl,nq=nq)))
s.out$eigen$"%" <- as.matrix(rbind(o.eig[1:nf]/sum(o.eig),apply(sweep(matrix(crg$conf$eigen[,1:nf],ncol=nf),1,apply(crg$conf$eigen,1,sum),"/"),2,ci,cl=cl,nq=nq)))
s.out$eigen$"cum_%" <- as.matrix(rbind(cumsum(o.eig[1:nf])/sum(o.eig),apply(sweep(matrix(apply(matrix(crg$conf$eigen[,1:nf],ncol=nf),1,cumsum),ncol=nf),1,apply(crg$conf$eigen,1,sum),"/"),2,ci,cl=cl,nq=nq)))
dimnames(s.out$eigen$value) <- list(c("","lower","upper"),o.lab)
dimnames(s.out$eigen$"%") <- list(c("","lower","upper"),o.lab)
dimnames(s.out$eigen$"cum_%") <- list(c("","lower","upper"),o.lab)
}
else {
s.out$eigen <- as.matrix(rbind(o.eig[1:nf],o.eig[1:nf]/sum(o.eig),cumsum(o.eig[1:nf])/sum(o.eig)),ncol=nf)
dimnames(s.out$eigen) <- list(c("value","%","cum_%"),o.lab)
}
if (!is.na(parm)) {
if (crg$aux$std) {
o.std <- rep(1,times=length(o.eig))
}
else {
o.std <- o.eig
}
}
if (parm %in% c("y","b","both")) {
if (contrib %in% c("p_a","b","both")) {
if (crg$aux$rel) {
y.rel <- crg$freq$y
}
else {
y.rel <- rep(1,times=nrow(crg$y))
}
s.out$y$p_a <- sweep(sweep(matrix(crg$y[,1:nf]^2,ncol=nf,dimnames=list(rownames(crg$y),1:nf)),1,y.rel,"*"),2,o.std[1:nf],"/")
}
if (contrib %in% c("a_p","b","both")) {
s.out$y$a_p <- sweep(sweep(matrix(crg$y[,1:nf]^2,ncol=nf,dimnames=list(rownames(crg$y),1:nf)),2,(o.eig/o.std)[1:nf],"*"),1,apply(sweep(crg$y^2,2,(o.eig/o.std),"*"),1,sum),"/")
}
}
if (!is.list(crg$x) && parm %in% c("x","b","both")) {
if (contrib %in% c("p_a","b","both")) {
if (crg$aux$rel) {
x.rel <- crg$freq$x
}
else {
x.rel <- rep(1,times=nrow(crg$x))
}
s.out$x$p_a <- sweep(sweep(matrix(crg$x[,1:nf]^2,ncol=nf,dimnames=list(rownames(crg$x),1:nf)),1,x.rel,"*"),2,o.std[1:nf],"/")
}
if (contrib %in% c("a_p","b","both")) {
s.out$x$a_p <- sweep(sweep(matrix(crg$x[,1:nf]^2,ncol=nf,dimnames=list(rownames(crg$x),1:nf)),2,(o.eig/o.std)[1:nf],"*"),1,apply(sweep(crg$x^2,2,(o.eig/o.std),"*"),1,sum),"/")
}
}
if (is.list(crg$x) && all(parm %in% c("x","b","both",names(crg$x)))) {
if (parm %in% c("x","b","both")) {
parm <- names(crg$x)
}
if (contrib %in% c("p_a","b","both")) {
if (crg$aux$rel) {
x.rel <- crg$freq$x[parm]
}
else {
x.rel <- lapply(crg$x[parm],function(p1){rep(1,times=nrow(p1))})
}
s.out$x$p_a <- lapply(parm,function(p2){
sweep(sweep(matrix(crg$x[[p2]][,1:nf]^2,ncol=nf,dimnames=list(rownames(crg$x[[p2]]),1:nf)),1,x.rel[[p2]],"*"),2,o.std[1:nf],"/")
})
names(s.out$x$p_a) <- parm
}
if (contrib %in% c("a_p","b","both")) {
s.out$x$a_p <- lapply(parm,function(p3){
sweep(sweep(matrix(crg$x[[p3]][,1:nf]^2,ncol=nf,dimnames=list(rownames(crg$x[[p3]]),1:nf)),2,(o.eig/o.std)[1:nf],"*"),1,apply(sweep(crg$x[[p3]]^2,2,(o.eig/o.std),"*"),1,sum),"/")
})
names(s.out$x$a_p) <- parm
}
}
class(s.out) <- "summary.corregp"
s.out
}
#' Printing the Summary of Correspondence Regression
#'
#' Method to print the output of \code{\link{summary.corregp}}.
#' @param x The output of a call to \code{summary} on a "corregp" object (i.e. an object of class "summary.corregp").
#' @param ... Further arguments passed to or from other methods.
#' @return The output of a call to \code{summary} on a "corregp" object. The eigenvalues and contributions are printed with \code{TOTAL}s.
#' @seealso \code{\link{summary.corregp}}.
#' @examples
#' \donttest{
#' data(HairEye)
#' haireye.crg <- corregp(Eye ~ Hair * Sex, data = HairEye, b = 3000)
#' summary(haireye.crg, add_ci = TRUE)
#' summary(haireye.crg, parm = "y", contrib = "pts_axs", nf = 2)
#' }
#' @export
print.summary.corregp <- function(x,...) {
crs <- x
f.var <- all.vars(crs$formula,functions=FALSE,unique=TRUE)
cat("Summary of correspondence regression of ",format(crs$formula)," in ",crs$data,"\n",sep="",fill=FALSE)
if (all(!is.na(crs$part))) {
cat("Conditioning factor(s): ",crs$part,"\n",sep=" ",fill=FALSE)
}
cat("\nChi-squared: ",crs$chi_squared,"\nPhi-squared: ",crs$phi_squared,"\nN: ",crs$N,"\n",sep="",fill=FALSE)
cat("\n\nEigenvalues:\n",sep="",fill=FALSE)
if (is.list(crs$eigen)) {
cat(" Value:\n",sep="",fill=FALSE)
print(cbind(crs$eigen$value,TOTAL=c(sum(crs$eigen$value[1,]),NA,NA)),na.print="",...)
cat("\n Percentage (%):\n",sep="",fill=FALSE)
print(cbind(crs$eigen$"%",TOTAL=c(sum(crs$eigen$"%"[1,]),NA,NA)),na.print="",...)
cat("\n Cumulative percentage (cum_%):\n",sep="",fill=FALSE)
print(cbind(crs$eigen$"cum_%",TOTAL=c(sum(crs$eigen$"%"[1,]),NA,NA)),na.print="",...)
cat("\n",sep="",fill=FALSE)
}
else {
print(cbind(crs$eigen,TOTAL=c(apply(as.matrix(crs$eigen[1:2,],ncol=ncol(crs$eigen)),1,sum),sum(crs$eigen[2,]))),...)
cat("\n",sep="",fill=FALSE)
}
if ("y" %in% names(crs) || "x" %in% names(crs)) {
cat("\nContributions:\n\n",sep="",fill=FALSE)
}
if ("y" %in% names(crs)) {
cat("Y (",f.var[1],"):\n",sep="",fill=FALSE)
if ("p_a" %in% names(crs$y)) {
cat(" Points to axes (Absolute contributions):\n",sep="",fill=FALSE)
print(rbind(crs$y$p_a,TOTAL=apply(crs$y$p_a,2,sum)),...)
cat("\n",sep="",fill=FALSE)
}
if ("a_p" %in% names(crs$y)) {
cat(" Axes to points (Squared correlations):\n",sep="",fill=FALSE)
print(cbind(crs$y$a_p,TOTAL=apply(crs$y$a_p,1,sum)),...)
cat("\n",sep="",fill=FALSE)
}
}
if ("x" %in% names(crs)) {
cat("X",ifelse(length(f.var)==2,paste(" (",f.var[2],")",sep="",collapse=NULL),""),":\n",sep="",fill=FALSE)
if ("p_a" %in% names(crs$x)) {
cat(" Points to axes (Absolute contributions):\n",sep="",fill=FALSE)
if (is.list(crs$x$p_a)) {
p.var <- names(crs$x$p_a)
for (p1 in 1:length(p.var)) {
cat("\n ",p.var[p1],":\n",sep="",fill=FALSE)
print(rbind(crs$x$p_a[[p1]],TOTAL=apply(as.matrix(crs$x$p_a[[p1]][complete.cases(crs$x$p_a[[p1]]),],ncol=ncol(crs$x$p_a[[p1]])),2,sum)),...)
}
cat("\n",sep="",fill=FALSE)
}
else {
print(rbind(crs$x$p_a,TOTAL=apply(as.matrix(crs$x$p_a[complete.cases(crs$x$p_a),],ncol=ncol(crs$x$p_a)),2,sum)),...)
cat("\n",sep="",fill=FALSE)
}
}
if ("a_p" %in% names(crs$x)) {
cat(" Axes to points (Squared correlations):\n",sep="",fill=FALSE)
if (is.list(crs$x$a_p)) {
p.var <- names(crs$x$a_p)
for (p2 in 1:length(p.var)) {
cat("\n ",p.var[p2],":\n",sep="",fill=FALSE)
print(cbind(crs$x$a_p[[p2]],TOTAL=apply(as.matrix(crs$x$a_p[[p2]],ncol=ncol(crs$x$a_p[[p2]])),1,sum)),...)
}
cat("\n",sep="",fill=FALSE)
}
else {
print(cbind(crs$x$a_p,TOTAL=apply(as.matrix(crs$x$a_p,ncol=ncol(crs$x$a_p)),1,sum)),...)
cat("\n",sep="",fill=FALSE)
}
}
}
invisible()
}
#' Confidence Interval
#'
#' This is the basic function for computing a confidence interval on the basis of a sample of data values.
#' @param x A numeric vector.
#' @param cl The confidence level for the confidence interval. Defaults to \code{0.95}.
#' @param nq Logical specifying whether to use a normal quantile (i.e. apply \code{\link[stats]{qnorm}}) in the computation of the confidence interval.
#' Defaults to \code{TRUE}. If \code{FALSE}, then the confidence interval is computed directly with the \code{\link[stats]{quantile}} function.
#' @return A vector with two components \code{Lower} and \code{Upper} giving the lower and upper confidence limits respectively.
#' @seealso \code{\link{ciplot.corregp}}, \code{\link{anova.corregp}}, \code{\link{agplot.corregp}}, \code{\link[stats]{confint}}.
#' @examples
#' \donttest{
#' data(HairEye)
#' haireye.crg <- corregp(Eye ~ Hair * Sex, data = HairEye, b = 3000)
#' ci(haireye.crg$conf$eigen[, 1])
#' ci(haireye.crg$conf$eigen[, 2])
#' }
#' @export
ci <- function(x,cl=0.95,nq=TRUE) {
x <- x[complete.cases(x)]
if (nq) {
avg <- mean(x)
s.e <- sd(x)*abs(qnorm((1-cl)/2))
out <- c(Lower=avg-s.e,Upper=avg+s.e)
}
else {
out <- c(Lower=quantile(x,probs=(1-cl)/2,names=FALSE),Upper=quantile(x,probs=1-(1-cl)/2,names=FALSE))
}
return(ifelse(!is.na(out),out,c(Lower=NA,Upper=NA)))
}
#' Scree Plotting
#'
#' Method to produce a \emph{scree plot}, i.e. a bar chart of the eigenvalues.
#' @param x The output of a call to \code{\link{corregp}} (i.e. an object of class "corregp").
#' @param type A character specification of which type of values to plot: either \code{"value"} for the \emph{actual eigenvalues}, \code{"\%"} for \emph{percentages} or
#' \code{"cum_\%"} for \emph{cumulative percentages}. Defaults to \code{"value"}.
#' @param add_ci Logical specifying whether to include the confidence intervals. Defaults to \code{FALSE}.
#' @param cl The confidence level for the confidence intervals. Defaults to \code{0.95}.
#' @param nq Logical specifying whether to use a normal quantile (i.e. apply \code{\link[stats]{qnorm}}) in the computation of the confidence intervals.
#' Defaults to \code{TRUE}. If \code{FALSE}, then the confidence intervals are computed directly with the \code{\link[stats]{quantile}} function.
#' @param ... Further arguments passed to or from other methods.
#' @details \code{screeplot} (of a \code{corregp} output) makes use of \code{\link[gplots]{barplot2}} from the package \pkg{gplots}.
#' @return A plot window containing the scree plot.
#' @seealso \code{\link{corregp}}, \code{\link{summary.corregp}}, \code{\link{anova.corregp}}.
#' @examples
#' \donttest{
#' data(HairEye)
#' haireye.crg <- corregp(Eye ~ Hair * Sex, data = HairEye, b = 3000)
#' screeplot(haireye.crg, add_ci = TRUE)
#' }
#' @export
screeplot.corregp <- function(x,type="value",add_ci=FALSE,cl=0.95,nq=TRUE,...) {
crg <- x
if (add_ci && is.null(crg$conf)) {
add_ci <- FALSE
warning(paste("no bootstrapping was done in",as.list(match.call())$x,sep=" ",collapse=NULL),call.=FALSE)
}
type <- tolower(type)
if (add_ci) {
r.int <- summary(object=crg,parm=NULL,contrib=NULL,nf=NULL,add_ci=TRUE,cl=cl,nq=nq)$eigen[[type]]
r.val <- r.int[1,]
}
else {
r.val <- summary(object=crg,parm=NULL,contrib=NULL,nf=NULL,add_ci=FALSE)$eigen[type,]
}
gplots::barplot2(r.val,plot.ci=add_ci,ci.l=r.int[2,],ci.u=r.int[3,],...)
invisible()
}
#' Building an ANOVA Table for Correspondence Regression
#'
#' Method to construct an ANOVA table for correspondence regression, i.e. a table with the Chi-squared deviation for each term in the \code{formula} of the
#' \code{\link{corregp}} call (or of each individual level in X in case \code{xep = FALSE}).
#' @param object The output of a call to \code{\link{corregp}} (i.e. an object of class "corregp").
#' @param nf The number of dimensions to be retained in the reduced space. Defaults to all dimensions (no reduction).
#' @param cl The confidence level for the confidence intervals. Defaults to \code{0.95}.
#' @param nq Logical specifying whether to use a normal quantile (i.e. apply \code{\link[stats]{qnorm}}) in the computation of the confidence interval.
#' Defaults to \code{TRUE}. If \code{FALSE}, then the confidence interval is computed directly with the \code{\link[stats]{quantile}} function.
#' @param ... Further arguments passed to or from other methods.
#' @details
#' If \code{object} was made with bootstrap replications, then \code{anova.corregp} will automatically compute confidence intervals for the Chi-squared
#' deviations by means of the \code{\link{ci}} function.
#' @return A matrix with the Chi-squared deviations for all the terms in the \code{formula} of \code{object}, based on the selected number of dimensions. If
#' \code{object} was made with the argument \code{xep = FALSE}, then the output contains the Chi-squared deviation for every individual level in X.
#' @seealso \code{\link{print.anova.corregp}}, \code{\link{ci}}, \code{\link{summary.corregp}}.
#' @examples
#' \donttest{
#' data(HairEye)
#' haireye.crg <- corregp(Eye ~ Hair * Sex, data = HairEye, b = 3000)
#' anova(haireye.crg, nf = 2)
#' }
#' @export
anova.corregp <- function(object,nf=NULL,cl=0.95,nq=TRUE,...) {
crg <- object
o.rnk <- sum(crg$eigen>1e-08,na.rm=TRUE)
nf <- ifelse(is.null(nf),o.rnk,nf)
nf <- ifelse(is.character(nf),match(nf,table=colnames(crg$y)),nf)[1]
if(nf>o.rnk || is.na(nf)) {
nf <- o.rnk
warning(paste(as.list(match.call())$object,"only has",o.rnk,"axes",sep=" ",collapse=NULL),call.=FALSE)
}
if (crg$aux$std) {
o.std <- crg$eigen[1:nf]
}
else {
o.std <- rep(1,times=nf)
}
if (crg$aux$rel) {
o.rel <- crg$freq$x
if (!is.null(crg$conf)) {
c.rel <- crg$conf$freq$x
}
}
else {
if (is.list(crg$x)) {
o.rel <- lapply(crg$x,function(p1){rep(1,times=nrow(p1))})
if (!is.null(crg$conf)) {
c.rel <- lapply(crg$x,function(p2){matrix(1,nrow=crg$aux$b,ncol=nrow(p2))})
}
}
else {
o.rel <- rep(1,times=nrow(crg$x))
if (!is.null(crg$conf)) {
c.rel <- matrix(1,nrow=crg$aux$b,ncol=nrow(crg$x))
}
}
}
o.phi <- ifelse(!crg$aux$phi,1,sum(crg$freq$y))
if (is.list(crg$x)) {
o.trm <- labels(terms(crg$aux$formula,keep.order=FALSE))
o.cmb <- lapply(strsplit(o.trm,split=":"),function(p3){
unlist(lapply(1:length(p3),function(l3){
match(utils::combn(p3,l3,paste,collapse=":"),table=o.trm)*rep((-1)^l3,times=choose(length(p3),l3))
}))*(-1)^length(p3)
})
o.cmb <- lapply(1:length(o.cmb),function(p4){o.cmb[[p4]][!is.na(o.cmb[[p4]])]})
a.out <- o.phi*do.call(what=rbind,args=lapply(1:length(o.cmb),function(p5){
sum(unlist(lapply(names(crg$x[abs(o.cmb[[p5]])]),function(o5){
sum(sweep(sweep(matrix(crg$x[[o5]][,1:nf]^2,ncol=nf),2,o.std,"*"),1,o.rel[[o5]],"*"),na.rm=TRUE)
}))*sign(o.cmb[[p5]]),na.rm=TRUE)
}))
dimnames(a.out) <- list(names(crg$x),"X^2")
if (!is.null(crg$conf)) {
c.ssq <- lapply(crg$conf$x,function(p6){
do.call(what=cbind,args=lapply(1:length(p6),function(o6){
apply(sweep(matrix(p6[[o6]][,1:nf]^2,ncol=nf),2,o.std,"*"),1,sum)
}))
})
c.csq <- o.phi*do.call(what=cbind,args=lapply(names(crg$x),function(p7){apply(c.ssq[[p7]]*c.rel[[p7]],1,sum,na.rm=TRUE)}))
c.int <- do.call(what=rbind,args=lapply(o.cmb,function(p8){
ci(apply(sweep(matrix(c.csq[,abs(p8)],ncol=length(p8)),2,sign(p8),"*"),1,sum),cl=cl,nq=nq)
}))
a.out <- cbind(a.out,c.int)
}
}
else {
a.out <- matrix(o.phi*apply(sweep(sweep(matrix(crg$x[,1:nf]^2,ncol=nf),2,o.std,"*"),1,o.rel,"*"),1,sum),ncol=1)
dimnames(a.out) <- list(rownames(crg$x),"X^2")
if (!is.null(crg$conf)) {
c.int <- t(apply(o.phi*do.call(what=cbind,args=lapply(crg$conf$x,function(p9){
apply(sweep(matrix(p9[,1:nf]^2,ncol=nf),2,o.std,"*"),1,sum)
}))*c.rel,2,ci,cl=cl,nq=nq))
a.out <- cbind(a.out,c.int)
}
}
class(a.out) <- "anova.corregp"
a.out
}
#' Printing the ANOVA Table of Correspondence Regression
#'
#' Method to print the output of \code{\link{anova.corregp}}.
#' @param x The output of a call to \code{anova} on a "corregp" object (i.e. an object of class "anova.corregp").
#' @param ... Further arguments passed to or from other methods.
#' @return The output of a call to \code{anova} on a "corregp" object.
#' @seealso \code{\link{anova.corregp}}.
#' @examples
#' \donttest{
#' data(HairEye)
#' haireye.crg <- corregp(Eye ~ Hair * Sex, data = HairEye, b = 3000)
#' anova(haireye.crg, nf = 2)
#' }
#' @export
print.anova.corregp <- function(x,...) {
cra <- x
cat("ANOVA Table\n(Type III Tests)\n\n",sep="",fill=FALSE)
print(unclass(cra),...)
cat("\n",sep="",fill=FALSE)
invisible()
}
#' Extracting Coefficients from Correspondence Regression
#'
#' Method to extract the coefficients (i.e. scores) of a correspondence regression.
#' @param object The output of a call to \code{\link{corregp}} (i.e. an object of class "corregp").
#' @param parm The parameter for which to extract the coefficients. Can be either \code{"y"}, \code{"x"}, or any vector of term names in X, level names in X or
#' level names in Y. Defaults to \code{"x"}.
#' @param axes The axes for which to extract the coefficients: a vector of indices. Defaults to all the axes.
#' @param ... Further arguments passed to or from other methods.
#' @details
#' The coefficients in correspondence regression are the same as the coordinate scores.
#' @return A matrix or vector with coefficients (i.e. scores) for the parameters and axes of interest.
#' @seealso \code{\link{fitted.corregp}}, \code{\link{residuals.corregp}}.
#' @examples
#' \donttest{
#' data(HairEye)
#' haireye.crg <- corregp(Eye ~ Hair * Sex, data = HairEye, b = 3000)
#' coef(haireye.crg, parm = c("Hair", "Sex"), axes = 1:2)
#' coefficients(haireye.crg, parm = c("Hair", "Sex"), axes = 1:2)
#' }
#' @export
coef.corregp <- function(object,parm="x",axes=NULL,...) {
crg <- object
o.rnk <- sum(crg$eigen>1e-08,na.rm=TRUE)
if (is.null(axes)) {
axes <- 1:o.rnk
}
if (is.character(axes)) {
axes <- match(axes,table=colnames(crg$y))
}
if (any(axes>o.rnk)) {
axes <- 1:o.rnk
warning(paste(as.list(match.call())$object,"only has",o.rnk,"axes",sep=" ",collapse=NULL),call.=FALSE)
}
out <- NULL
if (length(parm)==1 && tolower(parm)=="x") {
if (is.list(crg$x)) {
out <- do.call(what=rbind,args=crg$x)[,axes]
}
else {
out <- crg$x[,axes]
}
}
if (is.list(crg$x) && all(parm %in% names(crg$x))) {
out <- do.call(what=rbind,args=crg$x[parm])[,axes]
}
if (is.list(crg$x) && all(parm %in% unlist(lapply(crg$x,rownames)))) {
out <- do.call(what=rbind,args=crg$x)[parm,axes]
}
if (all(parm %in% rownames(crg$x))) {
out <- crg$x[parm,axes]
}
if (all(parm %in% rownames(crg$y))) {
out <- crg$y[parm,axes]
}
if (length(parm)==1 && tolower(parm)=="y") {
out <- crg$y[,axes]
}
out
}
#' @rdname coef.corregp
#' @export
coefficients.corregp <- function(object,parm="x",axes=NULL,...) {
coef(object,parm,axes,...)
}
#' Extracting Fitted Values from Correspondence Regression
#'
#' Method to extract the fitted values of a correspondence regression.
#' @param object The output of a call to \code{\link{corregp}} (i.e. an object of class "corregp").
#' @param parm The parameter for which to extract the fitted values. Can be \code{"all"}, \code{"both"} (or abbreviations), \code{"y"} or \code{"x"} for
#' the fitted values of every cell in the data, but it can also be any vector of term names in X or level names in X. Defaults to \code{"all"}.
#' @param nf The number of dimensions to be retained in the reduced space. Defaults to all dimensions (no reduction).
#' @param ... Further arguments passed to or from other methods.
#' @details
#' If all dimensions are retained, then the fitted values will only be equal to the observed counts if no conditioning factors were specified with the argument
#' \code{"part"} in the \code{\link{corregp}} call. This is because the associations with the conditioning factors (in \code{"part"}) are not taken into
#' account.
#' @return A matrix or vector with the fitted values for the parameters of interest, based on the selected number of dimensions.
#' @seealso \code{\link{coef.corregp}}, \code{\link{residuals.corregp}}.
#' @examples
#' \donttest{
#' data(HairEye)
#' haireye.crg <- corregp(Eye ~ Hair * Sex, data = HairEye, b = 3000)
#' fitted(haireye.crg, parm = c("Hair", "Sex"), nf = 2)
#' fitted.values(haireye.crg, parm = c("Hair", "Sex"), nf = 2)
#' }
#' @export
fitted.corregp <- function(object,parm="all",nf=NULL,...) {
crg <- object
o.rnk <- sum(crg$eigen>1e-08,na.rm=TRUE)
nf <- ifelse(is.null(nf),o.rnk,nf)
nf <- ifelse(is.character(nf),match(nf,table=colnames(crg$y)),nf)[1]
if(nf>o.rnk || is.na(nf)) {
nf <- o.rnk
warning(paste(as.list(match.call())$object,"only has",o.rnk,"axes",sep=" ",collapse=NULL),call.=FALSE)
}
if (crg$aux$std) {
o.std <- sqrt(crg$eigen[1:nf])
}
else {
o.std <- 1/sqrt(crg$eigen[1:nf])
}
x.mat <- NULL
if (crg$aux$rel) {
y.rel <- crg$freq$y
if (nf!=1) {
y.mat <- sweep(crg$y[,1:nf],1,y.rel,"*")
}
else {
y.mat <- matrix(y.rel*crg$y[,1],ncol=1)
}
if (length(parm)==1 && tolower(parm) %in% c("x","y","a","b","all","both")) {
if (is.list(crg$x)) {
x.frq <- crg$freq$x
names(x.frq) <- NULL
x.rel <- unlist(x.frq)
if (nf!=1) {
x.mat <- sweep(do.call(what=rbind,args=crg$x)[,1:nf],1,x.rel,"*")
}
else {
x.mat <- matrix(x.rel*do.call(what=rbind,args=crg$x)[,1],ncol=1)
}
}
else {
x.rel <- crg$freq$x
if (nf!=1) {
x.mat <- sweep(crg$x[,1:nf],1,x.rel,"*")
}
else {
x.mat <- matrix(x.rel*crg$x[,1],ncol=1)
}
}
}
if (is.list(crg$x) && all(parm %in% names(crg$x))) {
x.frq <- crg$freq$x[parm]
names(x.frq) <- NULL
x.rel <- unlist(x.frq)
if (nf!=1) {
x.mat <- sweep(do.call(what=rbind,args=crg$x[parm])[,1:nf],1,x.rel,"*")
}
else {
x.mat <- matrix(x.rel*do.call(what=rbind,args=crg$x[parm])[,1],ncol=1)
}
}
if (is.list(crg$x) && all(parm %in% unlist(lapply(crg$x,rownames)))) {
x.frq <- crg$freq$x
names(x.frq) <- NULL
x.rel <- unlist(x.frq)[parm]
if (nf!=1) {
x.mat <- sweep(matrix(do.call(what=rbind,args=crg$x)[parm,1:nf],ncol=nf),1,x.rel,"*")
}
else {
x.mat <- matrix(x.rel*do.call(what=rbind,args=crg$x)[parm,1],ncol=1)
}
}
if (all(parm %in% rownames(crg$x))) {
x.rel <- crg$freq$x[parm]
if (nf!=1) {
x.mat <- sweep(matrix(crg$x[parm,1:nf],ncol=nf),1,x.rel,"*")
}
else {
x.mat <- matrix(x.rel*crg$x[parm,1],ncol=1)
}
}
}
else {
y.rel <- crg$freq$y
if (nf!=1) {
y.mat <- sweep(crg$y[,1:nf],1,sqrt(y.rel),"*")
}
else {
y.mat <- matrix(sqrt(y.rel)*crg$y[,1],ncol=1)
}
if (length(parm)==1 && tolower(parm) %in% c("x","y","a","b","all","both")) {
if (is.list(crg$x)) {
x.frq <- crg$freq$x
names(x.frq) <- NULL
x.rel <- unlist(x.frq)
if (nf!=1) {
x.mat <- sweep(do.call(what=rbind,args=crg$x)[,1:nf],1,sqrt(x.rel),"*")
}
else {
x.mat <- matrix(sqrt(x.rel)*do.call(what=rbind,args=crg$x)[,1],ncol=1)
}
}
else {
x.rel <- crg$freq$x
if (nf!=1) {
x.mat <- sweep(crg$x[,1:nf],1,sqrt(x.rel),"*")
}
else {
x.mat <- matrix(sqrt(x.rel)*crg$x[,1],ncol=1)
}
}
}
if (is.list(crg$x) && all(parm %in% names(crg$x))) {
x.frq <- crg$freq$x[parm]
names(x.frq) <- NULL
x.rel <- unlist(x.frq)
if (nf!=1) {
x.mat <- sweep(do.call(what=rbind,args=crg$x[parm])[,1:nf],1,sqrt(x.rel),"*")
}
else {
x.mat <- matrix(sqrt(x.rel)*do.call(what=rbind,args=crg$x[parm])[,1],ncol=1)
}
}
if (is.list(crg$x) && all(parm %in% unlist(lapply(crg$x,rownames)))) {
x.frq <- crg$freq$x
names(x.frq) <- NULL
x.rel <- unlist(x.frq)[parm]
if (nf!=1) {
x.mat <- sweep(matrix(do.call(what=rbind,args=crg$x)[parm,1:nf],ncol=nf),1,sqrt(x.rel),"*")
}
else {
x.mat <- matrix(sqrt(x.rel)*do.call(what=rbind,args=crg$x)[parm,1],ncol=1)
}
}
if (all(parm %in% rownames(crg$x))) {
x.rel <- crg$freq$x[parm]
if (nf!=1) {
x.mat <- sweep(matrix(crg$x[parm,1:nf],ncol=nf),1,sqrt(x.rel),"*")
}
else {
x.mat <- matrix(sqrt(x.rel)*crg$x[parm,1],ncol=1)
}
}
}
if (!is.null(x.mat)) {
o.tot <- sum(crg$freq$y)
out <- outer(x.rel,y.rel/o.tot)+sqrt(ifelse(crg$aux$phi,o.tot,1)/o.tot)*x.mat%*%diag(o.std,nrow=nf)%*%t(y.mat)
}
else {
out <- NULL
}
out
}
#' @rdname fitted.corregp
#' @method fitted.values corregp
#' @export
fitted.values.corregp <- function(object,parm="all",nf=NULL,...) {
fitted(object,parm,nf,...)
}
#' Extracting Residuals from Correspondence Regression
#'
#' Method to extract the residuals of a correspondence regression.
#' @param object The output of a call to \code{\link{corregp}} (i.e. an object of class "corregp").
#' @param parm The parameter for which to extract the residuals. Can be \code{"all"}, \code{"both"} (or abbreviations), \code{"y"} or \code{"x"} for
#' the residuals of every cell in the data, but it can also be any vector of term names in X or level names in X. Defaults to \code{"all"}.
#' @param nf The number of dimensions to be retained in the reduced space. Defaults to all dimensions (no reduction).
#' @param ... Further arguments passed to or from other methods.
#' @details
#' If all dimensions are retained, then the residuals will only be exactly zero to the observed counts if no conditioning factors were specified with the argument
#' \code{"part"} in the \code{\link{corregp}} call. This is because the associations with the conditioning factors (in \code{"part"}) are not taken into
#' account.
#' @return A matrix or vector with the residuals for the parameters of interest, based on the selected number of dimensions.
#' @seealso \code{\link{coef.corregp}}, \code{\link{fitted.corregp}}.
#' @examples
#' \donttest{
#' data(HairEye)
#' haireye.crg <- corregp(Eye ~ Hair * Sex, data = HairEye, b = 3000)
#' residuals(haireye.crg, parm = c("Hair", "Sex"), nf = 2)
#' resid(haireye.crg, parm = c("Hair", "Sex"), nf = 2)
#' }
#' @export
residuals.corregp <- function(object,parm="all",nf=NULL,...) {
crg <- object
o.rnk <- sum(crg$eigen>1e-08,na.rm=TRUE)
nf <- ifelse(is.null(nf),o.rnk,nf)
nf <- ifelse(is.character(nf),match(nf,table=colnames(crg$y)),nf)[1]
if(nf>o.rnk || is.na(nf)) {
nf <- o.rnk
warning(paste(as.list(match.call())$object,"only has",o.rnk,"axes",sep=" ",collapse=NULL),call.=FALSE)
}
if (nf<o.rnk) {
if (crg$aux$std) {
o.std <- sqrt(crg$eigen[-(1:nf)])
}
else {
o.std <- 1/sqrt(crg$eigen[-(1:nf)])
}
}
else {
o.std <- 0
}
x.mat <- NULL
if (crg$aux$rel) {
if (nf<o.rnk) {
y.mat <- sweep(crg$y[,-(1:nf)],1,crg$freq$y,"*")
}
else {
y.mat <- matrix(rep(0,times=nrow(crg$y)),ncol=1,dimnames=list(rownames(crg$y),NULL))
}
if (length(parm)==1 && tolower(parm) %in% c("x","y","a","b","all","both")) {
if (is.list(crg$x)) {
x.frq <- crg$freq$x
names(x.frq) <- NULL
x.rel <- unlist(x.frq)
if (nf<o.rnk) {
x.mat <- sweep(do.call(what=rbind,args=crg$x)[,-(1:nf)],1,x.rel,"*")
}
else {
x.mat <- matrix(rep(0,times=length(x.rel)),ncol=1,dimnames=list(names(x.rel),NULL))
}
}
else {
x.rel <- crg$freq$x
if (nf<o.rnk) {
x.mat <- sweep(crg$x[,-(1:nf)],1,x.rel,"*")
}
else {
x.mat <- matrix(rep(0,times=length(x.rel)),ncol=1,dimnames=list(names(x.rel),NULL))
}
}
}
if (is.list(crg$x) && all(parm %in% names(crg$x))) {
x.frq <- crg$freq$x[parm]
names(x.frq) <- NULL
x.rel <- unlist(x.frq)
if (nf<o.rnk) {
x.mat <- sweep(do.call(what=rbind,args=crg$x[parm])[,-(1:nf)],1,x.rel,"*")
}
else {
x.mat <- matrix(rep(0,times=length(x.rel)),ncol=1,dimnames=list(names(x.rel),NULL))
}
}
if (is.list(crg$x) && all(parm %in% unlist(lapply(crg$x,rownames)))) {
x.frq <- crg$freq$x
names(x.frq) <- NULL
x.rel <- unlist(x.frq)[parm]
if (nf<o.rnk) {
x.mat <- sweep(matrix(do.call(what=rbind,args=crg$x)[parm,-(1:nf)],nrow=length(parm),dimnames=list(parm,NULL)),1,x.rel,"*")
}
else {
x.mat <- matrix(rep(0,times=length(parm)),ncol=1,dimnames=list(parm,NULL))
}
}
if (all(parm %in% rownames(crg$x))) {
x.rel <- crg$freq$x[parm]
if (nf<o.rnk) {
x.mat <- sweep(matrix(crg$x[parm,-(1:nf)],nrow=length(parm),dimnames=list(parm,NULL)),1,x.rel,"*")
}
else {
x.mat <- matrix(rep(0,times=length(parm)),ncol=1,dimnames=list(parm,NULL))
}
}
}
else {
if (nf<o.rnk) {
y.mat <- sweep(crg$y[,-(1:nf)],1,sqrt(crg$freq$y),"*")
}
else {
y.mat <- matrix(rep(0,times=nrow(crg$y)),ncol=1,dimnames=list(rownames(crg$y),NULL))
}
if (length(parm)==1 && tolower(parm) %in% c("x","y","a","b","all","both")) {
if (is.list(crg$x)) {
x.frq <- crg$freq$x
names(x.frq) <- NULL
x.rel <- unlist(x.frq)
if (nf<o.rnk) {
x.mat <- sweep(do.call(what=rbind,args=crg$x)[,-(1:nf)],1,sqrt(x.rel),"*")
}
else {
x.mat <- matrix(rep(0,times=length(x.rel)),ncol=1,dimnames=list(names(x.rel),NULL))
}
}
else {
x.rel <- crg$freq$x
if (nf<o.rnk) {
x.mat <- sweep(crg$x[,-(1:nf)],1,sqrt(x.rel),"*")
}
else {
x.mat <- matrix(rep(0,times=length(x.rel)),ncol=1,dimnames=list(names(x.rel),NULL))
}
}
}
if (is.list(crg$x) && all(parm %in% names(crg$x))) {
x.frq <- crg$freq$x[parm]
names(x.frq) <- NULL
x.rel <- unlist(x.frq)
if (nf<o.rnk) {
x.mat <- sweep(do.call(what=rbind,args=crg$x[parm])[,-(1:nf)],1,sqrt(x.rel),"*")
}
else {
x.mat <- matrix(rep(0,times=length(x.rel)),ncol=1,dimnames=list(names(x.rel),NULL))
}
}
if (is.list(crg$x) && all(parm %in% unlist(lapply(crg$x,rownames)))) {
x.frq <- crg$freq$x
names(x.frq) <- NULL
x.rel <- unlist(x.frq)[parm]
if (nf<o.rnk) {
x.mat <- sweep(matrix(do.call(what=rbind,args=crg$x)[parm,-(1:nf)],nrow=length(parm),dimnames=list(parm,NULL)),1,sqrt(x.rel),"*")
}
else {
x.mat <- matrix(rep(0,times=length(parm)),ncol=1,dimnames=list(parm,NULL))
}
}
if (all(parm %in% rownames(crg$x))) {
x.rel <- crg$freq$x[parm]
if (nf<o.rnk) {
x.mat <- sweep(matrix(crg$x[parm,-(1:nf)],nrow=length(parm),dimnames=list(parm,NULL)),1,sqrt(x.rel),"*")
}
else {
x.mat <- matrix(rep(0,times=length(parm)),ncol=1,dimnames=list(parm,NULL))
}
}
}
if (!is.null(x.mat)) {
o.tot <- sum(crg$freq$y)
out <- sqrt(ifelse(crg$aux$phi,o.tot,1)/o.tot)*x.mat%*%diag(o.std,nrow=length(o.std))%*%t(y.mat)
}
else {
out <- NULL
}
out
}
#' @rdname residuals.corregp
#' @export
resid.corregp <- function(object,parm="all",nf=NULL,...) {
residuals(object,parm,nf,...)
}
#' Getting \code{conf} Components from \code{corregp} Objects
#'
#' Internal function for retrieving the \code{conf} component(s) in a \code{corregp} object.
#' @param crg The output of a call to \code{\link{corregp}} (i.e. an object of class "corregp").
#' @param parm The parameter for which to retrieve the \code{conf} components. Can be either \code{"y"}, \code{"x"}, or any vector of term names in X, level names
#' in X or level names in Y.
#' @details
#' \code{confGet} is an internal function to be called by \code{\link{cint.corregp}}, \code{\link{cell.corregp}} or \code{\link{cell3d.corregp}}, but not by users.
#' @return A list of components selected with \code{parm}.
confGet <- function(crg,parm) {
stopifnot(class(crg)=="corregp")
c.out <- NULL
if (length(parm)==1 && tolower(parm)=="x") {
if(is.list(crg$x)) {
c.out <- do.call(what=c,args=crg$conf$x)
names(c.out) <- unlist(lapply(crg$x,rownames))
}
else {
c.out <- crg$conf$x
}
}
if (is.list(crg$x) && all(parm %in% names(crg$x))) {
c.out <- do.call(what=c,args=crg$conf$x[parm])
names(c.out) <- unlist(lapply(crg$x[parm],rownames))
}
if (is.list(crg$x) && all(parm %in% unlist(lapply(crg$x,rownames)))) {
c.out <- do.call(what=c,args=crg$conf$x)
names(c.out) <- unlist(lapply(crg$x,rownames))
c.out <- c.out[parm]
}
if (all(parm %in% rownames(crg$x))) {
c.out <- crg$conf$x[parm]
}
if (all(parm %in% rownames(crg$y))) {
c.out <- crg$conf$y[parm]
}
if (length(parm)==1 && tolower(parm)=="y") {
c.out <- crg$conf$y
}
c.out
}
#' Confidence Intervals for Correspondence Regression
#'
#' Method to compute confidence intervals for coordinates in correspondence regression.
#' @param object The output of a call to \code{\link{corregp}} (i.e. an object of class "corregp").
#' @param parm The parameter for which to compute the confidence intervals. Can be either \code{"y"}, \code{"x"}, or any vector of term names in X, level names in X or
#' level names in Y. Defaults to \code{"x"}.
#' @param axis The axis for which to compute the confidence intervals.
#' @param cl The confidence level for the confidence interval. Defaults to \code{0.95}.
#' @param nq Logical specifying whether to use a normal quantile (i.e. apply \code{\link[stats]{qnorm}}) in the computation of the confidence intervals.
#' Defaults to \code{TRUE}. If \code{FALSE}, then the confidence intervals are computed directly with the \code{\link[stats]{quantile}} function.
#' @param ... Further arguments passed to or from other methods.
#' @details
#' \code{cint} (of a \code{corregp} output) makes use of \code{\link{ci}}.
#'
#' Typically, \code{cint} is not so much used directly as it is called by a \code{\link{ciplot.corregp}} command.
#' @return A matrix with \code{Lower} and \code{Upper} confidence limits for the coordinates of interest.
#' @seealso \code{\link{ci}}, \code{\link{ciplot.corregp}}, \code{\link{agplot.corregp}}.
#' @examples
#' \donttest{
#' data(HairEye)
#' haireye.crg <- corregp(Eye ~ Hair * Sex, data = HairEye, b = 3000)
#' cint(haireye.crg, parm = "y", axis = 1)
#' cint(haireye.crg, parm = c("Hair", "Sex"), axis = 1)
#' }
#' @export
cint.corregp <- function(object,parm="x",axis,cl=0.95,nq=TRUE,...) {
crg <- object
if (is.null(crg$conf)) {
stop(paste("no bootstrapping was done in",as.list(match.call())$object,sep=" ",collapse=NULL),call.=FALSE)
}
if (is.character(axis)) {
axis <- match(axis,table=colnames(crg$y))[1]
}
p.con <- confGet(crg=crg,parm=parm)
do.call(what=rbind,args=lapply(p.con,function(p){ci(p[,axis],cl=cl,nq=nq)}))
}
#' @rdname cint.corregp
#' @export
cint <- function(object,...) {
UseMethod("cint")
}
#' Plotting Confidence Intervals for Correspondence Regression
#'
#' Method to plot confidence intervals for coordinates in correspondence regression.
#' @param x The output of a call to \code{\link{corregp}} (i.e. an object of class "corregp").
#' @param parm The parameter for which to plot the confidence intervals. Can be either \code{"y"}, \code{"x"}, or any vector of term names in X, level names in X or
#' level names in Y. Defaults to \code{"x"}.
#' @param axis The axis for which to plot the confidence intervals.
#' @param cl The confidence level for the confidence intervals. Defaults to \code{0.95}.
#' @param nq Logical specifying whether to use a normal quantile (i.e. apply \code{\link[stats]{qnorm}}) in the computation of the confidence intervals.
#' Defaults to \code{TRUE}. If \code{FALSE}, then the confidence intervals are computed directly with the \code{\link[stats]{quantile}} function.
#' @param horiz Logical specifying whether the confidence intervals should be plotted horizontally or not. Defaults to \code{FALSE}.
#' @param na.rm Logical specifying whether to omit \code{NA} coordinates from the plot. Defaults to \code{FALSE}.
#' @param type The type of plot: see \code{\link[graphics]{plot.default}}. For correspondence regression, there is an additional option \code{"labs"} which
#' plots the text labels at the centers of the confidence intervals. Defaults to \code{"p"}.
#' @param col Color of the text labels: either \code{numeric} or see \code{\link[grDevices]{colors}}.
#' @param cex Character expansion factor: a number to specify the size of the text labels.
#' @param font Font of the text labels: \code{1} for plain, \code{2} for bold, \code{3} for italic, and \code{4} for bold italic. Defaults to \code{1}.
#' @param family Font family of the text labels: can be \code{"serif"}, \code{"sans"}, \code{"mono"} or one of the \code{\link[grDevices]{Hershey}} fonts.
#' @param alim Vector of two values specifying the lower and upper limit between which to plot the axis.
#' @param adir Reading direction of the text labels on the (horizontal) axis: either a \code{numeric} value between \code{0} and \code{3} (see the \code{las} argument in
#' the graphical parameters \code{\link[graphics]{par}}) or a \code{character} value matching either \code{"horizontal"} or \code{"vertical"}. Defaults to \code{1} (horizontal).
#' @param ecol Color of the error bars: either \code{numeric} or see \code{\link[grDevices]{colors}}.
#' @param ewid Width of the error bars: a number to specify the line width.
#' @param etyp Line type of the error bars: \code{0} or \code{"blank"}, \code{1} or \code{"solid"}, \code{2} or \code{"dashed"}, \code{3} or \code{"dotted"},
#' \code{4} or \code{"dotdash"}, \code{5} or \code{"longdash"}, \code{6} or \code{"twodash"}. Defaults to \code{1}.
#' @param psym The symbol (or "plotting character") to use for the centers of the confidence intervals.
#' @param pcol Color of the center symbol: either \code{numeric} or see \code{\link[grDevices]{colors}}.
#' @param pcex Character expansion factor of the center symbol.
#' @param pbgc Background color of the center symbol: either \code{numeric} or see \code{\link[grDevices]{colors}}.
#' @param lwd Width of all lines except for the error bars, e.g. the connecting lines: a number to specify the line width.
#' @param lty Line type of all lines except for the error bars, e.g. the connecting lines: \code{0} or \code{"blank"}, \code{1} or \code{"solid"}, \code{2} or \code{"dashed"},
#' \code{3} or \code{"dotted"}, \code{4} or \code{"dotdash"}, \code{5} or \code{"longdash"}, \code{6} or \code{"twodash"}. Defaults to \code{1}.
#' @param sfrac Width of "crossbar" at the end of error bar as a fraction of the x plotting region. Defaults to 0.01.
#' @param gap Space left between the center of the error bar and the lines marking the error bar in units of the height (width) of the letter "O". Defaults to 0.
#' @param main The main title of the plot.
#' @param sub The subtitle of the plot.
#' @param ... Further arguments passed to or from other methods.
#' @details \code{ciplot} (of a \code{corregp} output) makes use of \code{\link[gplots]{plotCI}} from the package \pkg{gplots}.
#' @return A plot window containing the confidence intervals.
#' @seealso \code{\link{ci}}, \code{\link[gplots]{plotCI}}.
#' @examples
#' \donttest{
#' data(HairEye)
#' haireye.crg <- corregp(Eye ~ Hair * Sex, data = HairEye, b = 3000)
#' ciplot(haireye.crg, parm = "y", axis = 1)
#' ciplot(haireye.crg, parm = c("Hair", "Sex"), axis = 1)
#' }
#' @export
ciplot.corregp <- function(x,parm="x",axis,cl=0.95,nq=TRUE,horiz=FALSE,na.rm=FALSE,type="p",col="darkgrey",cex=par("cex"),font=par("font"),family=par("family"),alim=NULL,adir=1,ecol="darkgrey",ewid=par("lwd"),etyp=par("lty"),psym=16,pcol=par("col"),pcex=cex,pbgc=par("bg"),lwd=ewid,lty=etyp,sfrac=0.01,gap=0,main=NULL,sub=NULL,...) {
crg <- x
if (is.null(crg$conf)) {
stop(paste("no bootstrapping was done in",as.list(match.call())$x,sep=" ",collapse=NULL),call.=FALSE)
}
if (type=="labs" && horiz) {
horiz <- FALSE
warning("type='labs' will plot the confidence intervals vertically",call.=FALSE)
}
if (is.character(axis)) {
axis <- match(axis,table=colnames(crg$y))[1]
}
if (is.character(adir)) {
adir <- pmatch(tolower(adir),table=c("horizontal","vertical"))
}
a.lab <- colnames(crg$y)[axis]
p.loc <- NULL
if (length(parm)==1 && tolower(parm)=="x") {
if(is.list(crg$x)) {
p.loc <- do.call(what=rbind,args=crg$x)[,axis]
}
else {
p.loc <- crg$x[,axis]
}
}
if (is.list(crg$x) && all(parm %in% names(crg$x))) {
p.loc <- do.call(what=rbind,args=crg$x[parm])[,axis]
}
if (is.list(crg$x) && all(parm %in% unlist(lapply(crg$x,rownames)))) {
p.loc <- do.call(what=rbind,args=crg$x)[parm,axis]
if (length(p.loc)==1) {
names(p.loc) <- parm
}
}
if (all(parm %in% rownames(crg$x))) {
p.loc <- crg$x[parm,axis]
if (length(p.loc)==1) {
names(p.loc) <- parm
}
}
if (all(parm %in% rownames(crg$y))) {
p.loc <- crg$y[parm,axis]
if (length(p.loc)==1) {
names(p.loc) <- parm
}
}
if (length(parm)==1 && tolower(parm)=="y") {
p.loc <- crg$y[,axis]
}
p.int <- cint(object=crg,parm=parm,axis=axis,cl=cl,nq=nq)
if (na.rm) {
p.int <- p.int[!is.na(p.loc),]
p.loc <- p.loc[!is.na(p.loc)]
}
p.mai <- par("mai")
n.mai <- p.mai
if (horiz) {
a.err <- "x"
x.arg <- p.loc
y.arg <- 1:length(p.loc)
if (is.null(alim)) {
x.lim <- signif(range(range(p.int[,1],na.rm=TRUE),range(p.int[,2],na.rm=TRUE)))
}
else {
x.lim <- alim
}
y.lim <- c(1,length(p.loc))
x.axt <- "s"
y.axt <- "n"
x.lab <- a.lab
y.lab <- ""
a.num <- 2
a.las <- 1
n.mai[2] <- max(c(p.mai[2] - 0.3,strwidth(names(p.loc),units="inches",cex=cex,font=font,family=family))) + 0.3
}
else {
a.err <- "y"
x.arg <- 1:length(p.loc)
y.arg <- p.loc
x.lim <- c(1,length(p.loc))
if (is.null(alim)) {
y.lim <- signif(range(range(p.int[,1],na.rm=TRUE),range(p.int[,2],na.rm=TRUE)))
}
else {
y.lim <- alim
}
x.axt <- "n"
y.axt <- "s"
x.lab <- ""
y.lab <- a.lab
a.num <- 1
a.las <- adir
if (adir > 1) {
n.mai[1] <- max(c(p.mai[1] - 0.3,strwidth(names(p.loc),units="inches",cex=cex,font=font,family=family))) + 0.3
}
}
par(mai=n.mai)
gplots::plotCI(x=x.arg,y=y.arg,uiw=NULL,ui=p.int[,2],li=p.int[,1],err=a.err,xlim=x.lim,ylim=y.lim,pch=NA,barcol=ecol,lwd=ewid,lty=etyp,sfrac=sfrac,gap=gap,xaxt=x.axt,yaxt=y.axt,labels=FALSE,add=FALSE,xlab=x.lab,ylab=y.lab,main=main,sub=sub,...)
if (type=="labs") {
text(x=x.arg,y=y.arg,labels=names(p.loc),col=col,cex=cex,font=font,family=family)
}
else {
points(x=x.arg,y=y.arg,type=type,pch=psym,col=pcol,cex=pcex,bg=pbgc,lwd=lwd,lty=lty)
axis(side=a.num,at=1:length(p.loc),labels=names(p.loc),col.axis=col,cex.axis=cex,font.axis=font,las=a.las)
}
par(mai=p.mai)
invisible()
}
#' @rdname ciplot.corregp
#' @export
ciplot <- function(x,...) {
UseMethod("ciplot")
}
#' Parallel Coordinate Plotting for Correspondence Regression
#'
#' Method to produce a \emph{parallel coordinate plot} of the output of a correspondence regression.
#' @param x The output of a call to \code{\link{corregp}} (i.e. an object of class "corregp").
#' @param parm The parameter for which to plot the coordinates. Can be either \code{"y"}, \code{"x"}, or any vector of term names in X, level names in X or
#' level names in Y. Defaults to \code{"x"}.
#' @param axes The axes to plot.
#' @param add_ci Logical specifying whether to include the confidence intervals. Defaults to \code{FALSE}.
#' @param cl The confidence level for the confidence intervals. Defaults to \code{0.95}.
#' @param nq Logical specifying whether to use a normal quantile (i.e. apply \code{\link[stats]{qnorm}}) in the computation of the confidence intervals.
#' Defaults to \code{TRUE}. If \code{FALSE}, then the confidence intervals are computed directly with the \code{\link[stats]{quantile}} function.
#' @param col Color of the text labels: either \code{numeric} or see \code{\link[grDevices]{colors}}.
#' @param cex Character expansion factor: a number to specify the size of the text labels.
#' @param font Font of the text labels: \code{1} for plain, \code{2} for bold, \code{3} for italic, and \code{4} for bold italic. Defaults to \code{1}.
#' @param family Font family of the text labels: can be \code{"serif"}, \code{"sans"}, \code{"mono"} or one of the \code{\link[grDevices]{Hershey}} fonts.
#' @param lwd Width of the connecting lines: a number to specify the line width.
#' @param lty Line type of the connecting lines: \code{0} or \code{"blank"}, \code{1} or \code{"solid"}, \code{2} or \code{"dashed"}, \code{3} or
#' \code{"dotted"}, \code{4} or \code{"dotdash"}, \code{5} or \code{"longdash"}, \code{6} or \code{"twodash"}. Defaults to \code{1}.
#' @param lcol Color of the connecting lines: either \code{numeric} or see \code{\link[grDevices]{colors}}.
#' @param psym The symbol (or "plotting character") for the values of the coordinates on the axes.
#' @param pcol Color of the symbol for the values on the axes: either \code{numeric} or see \code{\link[grDevices]{colors}}.
#' @param pcex Character expansion factor of the symbol for the values on the axes.
#' @param ecol Color of the error lines (connecting the confidence intervals on each axis): either \code{numeric} or see \code{\link[grDevices]{colors}}.
#' @param ewid Width of the error lines (connecting the confidence intervals on each axis): a number to specify the line width.
#' @param etyp Line type of the error lines (connecting the confidence intervals on each axis): \code{0} or \code{"blank"}, \code{1} or \code{"solid"},
#' \code{2} or \code{"dashed"}, \code{3} or \code{"dotted"}, \code{4} or \code{"dotdash"}, \code{5} or \code{"longdash"}, \code{6} or
#' \code{"twodash"}. Defaults to \code{2}.
#' @param acol Color of the parallel axes: either \code{numeric} or see \code{\link[grDevices]{colors}}.
#' @param awid Width of the parallel axes: a number to specify the line width.
#' @param atyp Line type of the parallel axes: \code{0} or \code{"blank"}, \code{1} or \code{"solid"}, \code{2} or \code{"dashed"}, \code{3} or
#' \code{"dotted"}, \code{4} or \code{"dotdash"}, \code{5} or \code{"longdash"}, \code{6} or \code{"twodash"}. Defaults to \code{1}.
#' @param acex Character expansion factor for the labels of the parallel axes.
#' @param afnt Font for the labels of the parallel axes: \code{1} for plain, \code{2} for bold, \code{3} for italic, and \code{4} for bold italic.
#' @param adir Reading direction of the labels on the parallel axes: either a \code{numeric} value between \code{0} and \code{3} (see the \code{las}
#' argument in the graphical parameters \code{\link[graphics]{par}}) or a \code{character} value matching either \code{"horizontal"} or
#' \code{"vertical"}. Defaults to \code{1} (horizontal).
#' @param add_scale Logical specifying whether to add a scale for the parallel axes (which are normalised).
#' @param main The main title of the plot.
#' @param sub The subtitle of the plot.
#' @param ... Further arguments passed to or from other methods.
#' @details Although adding lines for confidence intervals is possible, it is not recommended, as it typically leads to an unreadable plot.
#' @return A parallel coordinate plot containing the output of a correspondence regression.
#' @seealso \code{\link{ciplot.corregp}}, \code{\link{plot.corregp}}, \code{\link{plot3d.corregp}}, \code{\link{agplot.corregp}}.
#' @examples
#' \donttest{
#' data(HairEye)
#' haireye.crg <- corregp(Eye ~ Hair * Sex, data = HairEye, b = 3000)
#' pcplot(haireye.crg, parm = "y", axes = 1:3)
#' pcplot(haireye.crg, parm = c("Hair", "Sex"), axes = 1:3)
#' }
#' @export
pcplot.corregp <- function(x,parm="x",axes,add_ci=FALSE,cl=0.95,nq=TRUE,col="darkgrey",cex=par("cex"),font=par("font"),family=par("family"),lwd=par("lwd"),lty=par("lty"),lcol=col,psym=NULL,pcol=col,pcex=cex,ecol="red",ewid=1,etyp=2,acol="black",awid=1,atyp=1,acex=cex,afnt=font,adir=1,add_scale=FALSE,main=NULL,sub=NULL,...) {
crg <- x
if (add_ci && is.null(crg$conf)) {
add_ci <- FALSE
warning(paste("no bootstrapping was done in",as.list(match.call())$x,sep=" ",collapse=NULL),call.=FALSE)
}
if (length(axes)==1) {
stop("pcplot is suited for more than 1 axis; use 'ciplot' instead",call.=FALSE)
}
else {
a.len <- length(axes)
}
if (is.character(axes)) {
axes <- match(axes,table=colnames(crg$y))
}
if (is.character(adir)) {
adir <- pmatch(tolower(adir),table=c("horizontal","vertical"))
}
if (length(parm)==1 && tolower(parm)=="x") {
if(is.list(crg$x)) {
p.loc <- do.call(what=rbind,args=crg$x)[,axes]
}
else {
p.loc <- crg$x[,axes]
}
}
if (is.list(crg$x) && all(parm %in% names(crg$x))) {
p.loc <- do.call(what=rbind,args=crg$x[parm])[,axes]
}
if (is.list(crg$x) && all(parm %in% unlist(lapply(crg$x,rownames)))) {
p.loc <- do.call(what=rbind,args=crg$x)[parm,axes]
if (nrow(p.loc)==1) {
rownames(p.loc) <- parm
}
}
if (all(parm %in% rownames(crg$x))) {
p.loc <- rbind(crg$x[parm,axes])
if (nrow(p.loc)==1) {
rownames(p.loc) <- parm
}
}
if (all(parm %in% rownames(crg$y))) {
p.loc <- rbind(crg$y[parm,axes])
if (nrow(p.loc)==1) {
rownames(p.loc) <- parm
}
}
if (length(parm)==1 && tolower(parm)=="y") {
p.loc <- crg$y[,axes]
}
p.min <- apply(p.loc,2,min,na.rm=TRUE)
p.max <- apply(p.loc,2,max,na.rm=TRUE)
if (add_ci) {
a.int <- lapply(axes,function(a){cint(object=crg,parm=parm,axis=a,cl=cl,nq=nq)})
p.min <- pmin(p.min,sapply(a.int,function(a1){min(a1[,1],na.rm=TRUE)}))
p.max <- pmax(p.max,sapply(a.int,function(a2){max(a2[,2],na.rm=TRUE)}))
}
a.loc <- sweep(sweep(p.loc,2,p.min,"-"),2,(p.max-p.min),"/")
p.mai <- par("mai")
n.mai <- p.mai
n.mai[2] <- max(strwidth(rownames(a.loc),units="inches",cex=cex,font=font,family=family)) + 0.3
if (adir > 1) {
n.mai[1] <- max(c(p.mai[1] - 0.3,strwidth(colnames(a.loc),units="inches",cex=acex,font=afnt))) + 0.3
}
par(mai=n.mai)
matplot(t(a.loc),type="n",xlim=c(1,a.len),ylim=c(0,1),ylab="",main=main,sub=sub,axes=FALSE,add=FALSE,...)
axis(side=1,at=1:a.len,labels=colnames(a.loc),col.axis=acol,cex.axis=acex,font.axis=afnt,las=adir)
if (add_scale) {
axis(side=4,col.axis=acol,cex.axis=acex,font.axis=afnt,las=1)
}
abline(v=1:a.len,col=acol,lty=atyp,lwd=awid)
matlines(t(a.loc),col=lcol,lty=lty,lwd=lwd)
if (add_ci) {
matlines(t(sweep(sweep(sapply(a.int,function(a3){a3[,1]}),2,p.min,"-"),2,(p.max-p.min),"/")),col=ecol,lty=etyp,lwd=ewid)
matlines(t(sweep(sweep(sapply(a.int,function(a4){a4[,2]}),2,p.min,"-"),2,(p.max-p.min),"/")),col=ecol,lty=etyp,lwd=ewid)
}
psym <- ifelse(is.null(psym),NA,psym)
if (!is.na(psym)) {
matpoints(t(a.loc),pch=psym,col=pcol,cex=pcex)
}
mtext(text=rownames(a.loc),side=2,at=a.loc[,1],las=1,col=col,cex=cex,font=font,family=family)
par(mai=p.mai)
invisible()
}
#' @rdname pcplot.corregp
#' @export
pcplot <- function(x,...) {
UseMethod("pcplot")
}
#' Confidence Ellipses for Correspondence Regression
#'
#' Method to compute confidence ellipses for coordinates in correspondence regression.
#' @param object The output of a call to \code{\link{corregp}} (i.e. an object of class "corregp").
#' @param parm The parameter for which to compute the confidence ellipses. Can be either \code{"y"}, \code{"x"}, or any vector of term names in X, level names in X or
#' level names in Y. Defaults to \code{"x"}.
#' @param axes The axes for which to compute the confidence ellipses: a vector of two values. Defaults to the first two axes.
#' @param cl The confidence level for the confidence ellipses. Defaults to \code{0.95}.
#' @param np The number of points to represent the confidence ellipses. Defaults to \code{100}.
#' @param ... Further arguments passed to or from other methods.
#' @details
#' \code{cell} (of a \code{corregp} output) makes use of \code{\link[ellipse]{ellipse}} from the package \pkg{ellipse}.
#'
#' Typically, \code{cell} is not so much used directly as it is called by a \code{\link{plot.corregp}} command.
#' @return A list containing \code{np} points for each confidence ellipse of interest.
#' @seealso \code{\link{plot.corregp}}.
#' @examples
#' \donttest{
#' data(HairEye)
#' haireye.crg <- corregp(Eye ~ Hair * Sex, data = HairEye, b = 3000)
#' cell(haireye.crg, parm = "y")
#' cell(haireye.crg, parm = c("Hair", "Sex"))
#' }
#' @export
cell.corregp <- function(object,parm="x",axes=1:2,cl=0.95,np=100,...) {
crg <- object
if (is.null(crg$conf)) {
stop(paste("no bootstrapping was done in",as.list(match.call())$object,sep=" ",collapse=NULL),call.=FALSE)
}
if (is.character(axes)) {
axes <- match(axes,table=colnames(crg$y))[1:2]
}
p.con <- confGet(crg=crg,parm=parm)
p.con <- p.con[unlist(lapply(p.con,function(p0){any(complete.cases(p0))}))]
lapply(p.con,function(p1){ellipse::ellipse(cov(p1[complete.cases(p1),axes]),centre=apply(p1[complete.cases(p1),axes],2,mean),level=cl,npoints=np)})
}
#' @rdname cell.corregp
#' @export
cell <- function(object,...) {
UseMethod("cell")
}
#' Plotting Correspondence Regression
#'
#' Basic method to plot the output of a correspondence regression.
#' @param x The output of a call to \code{\link{corregp}} (i.e. an object of class "corregp").
#' @param axes The axes to plot: a vector of two values. Defaults to the first two axes.
#' @param y_btm Logical specifying whether the Y levels should be plotted first ("at the bottom") and then be overlaid by the X levels. Defaults to \code{TRUE}.
#' @param y_ell Logical specifying whether the confidence ellipses of the Y levels should be plotted. Defaults to \code{FALSE}.
#' @param x_ell Logical specifying whether the confidence ellipses of the X levels should be plotted. Defaults to \code{FALSE}.
#' @param ysub Vector of indices to select a subset of the Y levels.
#' @param xsub Vector of indices to select a subset of the X levels.
#' @param hlim Vector of two values specifying the lower and upper limit between which to plot the horizontal axis.
#' @param vlim Vector of two values specifying the lower and upper limit between which to plot the vertical axis.
#' @param expa_btm Expansion factor for the bottom coordinates: a number to rescale the axes.
#' @param expa_top Expansion factor for the top coordinates: a number to rescale the axes.
#' @param asp The aspect ratio for the whole plot. See \code{\link[graphics]{plot.window}}.
#' @param asp_btm The aspect ratio for the bottom coordinates. See \code{\link[graphics]{plot.window}}.
#' @param asp_top The aspect ratio for the top coordinates. See \code{\link[graphics]{plot.window}}.
#' @param col_btm Color of the bottom levels: either \code{numeric} or see \code{\link[grDevices]{colors}}. Defaults to \code{"darkgrey"}.
#' @param col_top Color of the top levels: either \code{numeric} or see \code{\link[grDevices]{colors}}. Defaults to \code{"red"}.
#' @param cex_btm Character expansion factor of the bottom levels: a number to specify the size of the text labels.
#' @param cex_top Character expansion factor of the top levels: a number to specify the size of the text labels.
#' @param font_btm Font of the bottom levels: \code{1} for plain, \code{2} for bold, \code{3} for italic, and \code{4} for bold italic. Defaults to \code{1}.
#' @param font_top Font of the top levels: \code{1} for plain, \code{2} for bold, \code{3} for italic, and \code{4} for bold italic. Defaults to \code{1}.
#' @param fam_btm Font family of the bottom levels: can be \code{"serif"}, \code{"sans"}, \code{"mono"} or one of the \code{\link[grDevices]{Hershey}} fonts.
#' @param fam_top Font family of the top levels: can be \code{"serif"}, \code{"sans"}, \code{"mono"} or one of the \code{\link[grDevices]{Hershey}} fonts.
#' @param col_ell Color of the confidence ellipses: either a number or see \code{\link[grDevices]{colors}}.
#' @param lwd_ell Width of the confidence ellipses: a number to specify the line width.
#' @param lty_ell Line type of the confidence ellipses: \code{0} or \code{"blank"}, \code{1} or \code{"solid"}, \code{2} or \code{"dashed"}, \code{3} or \code{"dotted"},
#' \code{4} or \code{"dotdash"}, \code{5} or \code{"longdash"}, \code{6} or \code{"twodash"}. Defaults to \code{1}.
#' @param col_ori Color of the lines through the origin: either \code{numeric} or see \code{\link[grDevices]{colors}}.
#' @param lwd_ori Width of the lines through the origin: a number to specify the line width.
#' @param lty_ori Line type of the lines through the origin: \code{0} or \code{"blank"}, \code{1} or \code{"solid"}, \code{2} or \code{"dashed"},
#' \code{3} or \code{"dotted"}, \code{4} or \code{"dotdash"}, \code{5} or \code{"longdash"}, \code{6} or \code{"twodash"}. Defaults to \code{1}.
#' @param main The main title of the plot.
#' @param sub The subtitle of the plot.
#' @param hlab The title of the horizontal axis.
#' @param vlab The title of the vertical axis.
#' @param cl The confidence level for the confidence ellipses. Defaults to \code{0.95}.
#' @param np The number of points to represent the confidence ellipses. Defaults to \code{100}.
#' @param add_ori Logical specifying whether to add lines through the origin. Defaults to \code{TRUE}.
#' @param ... Further arguments passed to or from other methods.
#' @details The plot of a correspondence regression is by definition a \code{\link[stats]{biplot}}.
#' @return A plot window containing the output of a correspondence regression.
#' @references
#' Gower, J., S. Lubbe and N. Le Roux (2011) \emph{Understanding biplots}. Chichester: Wiley.
#'
#' Greenacre, M. (2010) \emph{Biplots in practice}. Bilbao: Fundacion BBVA.
#' @seealso \code{\link{corregp}}, \code{\link{summary.corregp}}, \code{\link{screeplot.corregp}}, \code{\link{anova.corregp}}, \code{\link[stats]{biplot}}.
#' @examples
#' \donttest{
#' data(HairEye)
#' haireye.crg <- corregp(Eye ~ Hair * Sex, data = HairEye, b = 3000)
#' plot(haireye.crg, x_ell = TRUE, xsub = c("Hair", "Sex"))
#' }
#' @export
plot.corregp <- function(x,axes=1:2,y_btm=TRUE,y_ell=FALSE,x_ell=FALSE,ysub=NULL,xsub=NULL,hlim=NULL,vlim=NULL,expa_btm=1,expa_top=1,asp=1,asp_btm=asp,asp_top=asp,col_btm="darkgrey",col_top="red",cex_btm=par("cex"),cex_top=cex_btm,font_btm=par("font"),font_top=font_btm,fam_btm=par("family"),fam_top=fam_btm,col_ell=par("col"),lwd_ell=par("lwd"),lty_ell=par("lty"),col_ori=par("col"),lwd_ori=par("lwd"),lty_ori=1,main=NULL,sub=NULL,hlab=NULL,vlab=NULL,cl=0.95,np=100,add_ori=TRUE,...) {
crg <- x
if (is.character(axes)) {
axes <- match(axes,table=colnames(crg$y))[1:2]
}
y.mat <- crg$y[,axes]
if (is.list(crg$x)) {
x.mat <- do.call(what=rbind,args=crg$x)[,axes]
if (is.numeric(xsub) || all(xsub %in% names(crg$x))) {
xsub <- unlist(lapply(crg$x[xsub],rownames))
}
}
else {
x.mat <- crg$x[,axes]
}
if (is.character(ysub)) {
ysub <- match(ysub,table=rownames(y.mat))
}
if (is.null(ysub)) {
ysub <- 1:nrow(y.mat)
}
if (is.character(xsub)) {
xsub <- match(xsub,table=rownames(x.mat))
}
if (is.null(xsub)) {
xsub <- 1:nrow(x.mat)
}
ell_btm <- NULL
ell_top <- NULL
if ((y_ell || x_ell) && is.null(crg$conf)) {
y_ell <- x_ell <- FALSE
warning(paste("no bootstrapping was done in",as.list(match.call())$x,sep=" ",collapse=NULL),call.=FALSE)
}
if (y_btm) {
loc_btm <- y.mat
loc_top <- x.mat
sub_btm <- ysub
sub_top <- xsub
if (y_ell) {
ell_btm <- cell(object=crg,parm=rownames(y.mat)[ysub],axes=axes,cl=cl,np=np)
}
if (x_ell) {
ell_top <- cell(object=crg,parm=rownames(x.mat)[xsub],axes=axes,cl=cl,np=np)
}
}
else {
loc_btm <- x.mat
loc_top <- y.mat
sub_btm <- xsub
sub_top <- ysub
if (y_ell) {
ell_top <- cell(object=crg,parm=rownames(y.mat)[ysub],axes=axes,cl=cl,np=np)
}
if (x_ell) {
ell_btm <- cell(object=crg,parm=rownames(x.mat)[xsub],axes=axes,cl=cl,np=np)
}
}
if (!is.null(hlim) && !is.null(vlim)) {
asp <- asp_btm <- asp_top <- NA
}
if (is.null(hlim)) {
hlim <- signif(range(range(y.mat[,1],na.rm=TRUE),range(x.mat[,1],na.rm=TRUE)))
}
if (is.null(vlim)) {
vlim <- signif(range(range(y.mat[,2],na.rm=TRUE),range(x.mat[,2],na.rm=TRUE)))
}
if (is.null(hlab)) {
hlab <- colnames(y.mat)[1]
}
if (is.null(vlab)) {
vlab <- colnames(y.mat)[2]
}
p.pty <- par("pty")
par(pty="s")
plot(x=loc_btm[sub_btm,1],y=loc_btm[sub_btm,2],type="n",xlim=(hlim/expa_btm),ylim=(vlim/expa_btm),main=main,sub=sub,xlab=hlab,ylab=vlab,asp=asp_btm,...)
if (add_ori) {
abline(h=0,v=0,col=col_ori,lwd=lwd_ori,lty=lty_ori)
}
if (length(col_ell)==1) {
col_ell <- rep(col_ell,times=2)
}
if (length(lwd_ell)==1) {
lwd_ell <- rep(lwd_ell,times=2)
}
if (length(lty_ell)==1) {
lty_ell <- rep(lty_ell,times=2)
}
if (!is.null(ell_btm)) {
lapply(ell_btm,function(p1){lines(x=p1[,1],y=p1[,2],col=col_ell[1],lwd=lwd_ell[1],lty=lty_ell[1])})
}
text(x=loc_btm[sub_btm,1],y=loc_btm[sub_btm,2],labels=rownames(loc_btm)[sub_btm],col=col_btm,cex=cex_btm,font=font_btm,family=fam_btm)
par(new=TRUE)
plot(x=loc_top[sub_top,1],y=loc_top[sub_top,2],type="n",xlim=(hlim/expa_top),ylim=(vlim/expa_top),main=NA,sub=NA,xlab=NA,ylab=NA,asp=asp_top,axes=FALSE)
if (expa_btm != expa_top) {
axis(side=3,col.ticks=col_top,...)
axis(side=4,col.ticks=col_top,...)
}
if (!is.null(ell_top)) {
lapply(ell_top,function(p2){lines(x=p2[,1],y=p2[,2],col=col_ell[2],lwd=lwd_ell[2],lty=lty_ell[2])})
}
text(x=loc_top[sub_top,1],y=loc_top[sub_top,2],labels=rownames(loc_top)[sub_top],col=col_top,cex=cex_top,font=font_top,family=fam_top)
par(pty=p.pty)
invisible()
}
#' 3D Confidence Ellipsoids for Correspondence Regression
#'
#' Method to compute 3D confidence ellipsoids for coordinates in correspondence regression.
#' @param object The output of a call to \code{\link{corregp}} (i.e. an object of class "corregp").
#' @param parm The parameter for which to compute the confidence ellipsoids. Can be either \code{"y"}, \code{"x"}, or any vector of term names in X, level names in X or
#' level names in Y. Defaults to \code{"x"}.
#' @param axes The axes for which to compute the confidence ellipsoids: a vector of three values. Defaults to the first three axes.
#' @param cl The confidence level for the confidence ellipsoids. Defaults to \code{0.95}.
#' @param ... Further arguments passed to or from other methods.
#' @details
#' \code{cell3d} (of a \code{corregp} output) makes use of \code{\link[rgl]{ellipse3d}} from the package \pkg{rgl}.
#'
#' Typically, \code{cell3d} is not so much used directly as it is called by a \code{\link{plot3d.corregp}} command.
#' @return A list containing coordinate points for each confidence ellipsoid of interest.
#' @seealso \code{\link{plot3d.corregp}}.
#' @examples
#' \donttest{
#' data(HairEye)
#' haireye.crg <- corregp(Eye ~ Hair * Sex, data = HairEye, b = 3000)
#' cell3d(haireye.crg, parm = "y")
#' cell3d(haireye.crg, parm = c("Hair", "Sex"))
#' }
#' @export
cell3d.corregp <- function(object,parm="x",axes=1:3,cl=0.95,...) {
crg <- object
if (is.null(crg$conf)) {
stop(paste("no bootstrapping was done in",as.list(match.call())$object,sep=" ",collapse=NULL),call.=FALSE)
}
if (is.character(axes)) {
axes <- match(axes,table=colnames(crg$y))[1:3]
}
p.con <- confGet(crg=crg,parm=parm)
p.con <- p.con[unlist(lapply(p.con,function(p0){any(complete.cases(p0))}))]
lapply(p.con,function(p1){rgl::ellipse3d(cov(p1[complete.cases(p1),axes]),centre=apply(p1[complete.cases(p1),axes],2,mean),level=cl)})
}
#' @rdname cell3d.corregp
#' @export
cell3d <- function(object,...) {
UseMethod("cell3d")
}
#' 3D Plotting for Correspondence Regression
#'
#' Method to produce a 3D plot for a correspondence regression.
#' @param x The output of a call to \code{\link{corregp}} (i.e. an object of class "corregp").
#' @param axes The axes to plot: a vector of three values. Defaults to the first three axes.
#' @param y_btm Logical specifying whether the Y levels should be plotted first ("at the bottom") and then be overlaid by the X levels. Defaults to \code{TRUE}.
#' @param y_ell Logical specifying whether the confidence ellipsoids of the Y levels should be plotted. Defaults to \code{FALSE}.
#' @param x_ell Logical specifying whether the confidence ellipsoids of the X levels should be plotted. Defaults to \code{FALSE}.
#' @param ysub Vector of indices to select a subset of the Y levels.
#' @param xsub Vector of indices to select a subset of the X levels.
#' @param hlim Vector of two values specifying the lower and upper limit between which to plot the horizontal axis.
#' @param vlim Vector of two values specifying the lower and upper limit between which to plot the vertical axis.
#' @param dlim Vector of two values specifying the lower and upper limit between which to plot the "depth" axis.
#' @param asp The aspect ratio for the whole plot. See \code{\link[rgl]{aspect3d}}.
#' @param col_btm Color of the bottom levels: either \code{numeric} or see \code{\link[grDevices]{colors}}. Defaults to \code{"darkgrey"}.
#' @param col_top Color of the top levels: either \code{numeric} or see \code{\link[grDevices]{colors}}. Defaults to \code{"red"}.
#' @param cex_btm Character expansion factor of the bottom levels: a number to specify the size of the text labels.
#' @param cex_top Character expansion factor of the top levels: a number to specify the size of the text labels.
#' @param font_btm Font of the bottom levels: \code{1} for plain, \code{2} for bold, \code{3} for italic, and \code{4} for bold italic.
#' @param font_top Font of the top levels: \code{1} for plain, \code{2} for bold, \code{3} for italic, and \code{4} for bold italic.
#' @param fam_btm Font family of the bottom levels: can be \code{"serif"}, \code{"sans"}, \code{"mono"} or code{"symbol"}.
#' @param fam_top Font family of the top levels: can be \code{"serif"}, \code{"sans"}, \code{"mono"} or \code{"symbol"}.
#' @param col_ell Color of the confidence ellipsoids: either a number or see \code{\link[grDevices]{colors}}. Defaults to \code{"black"}.
#' @param lwd_ell Width of the confidence ellipsoids: a number to specify the line width.
#' @param lty_ell Line type of the confidence ellipsoids: either \code{"shade"}, \code{"wire"}, or \code{"dots"}. Defaults to \code{"shade"}.
#' @param opa_ell Opaqueness of the confidence ellipsoids: a number between \code{0} for fully transparent and \code{1} for fully opaque. Defaults to \code{0.2}.
#' @param col_ori Color of the lines through the origin: either a number or see \code{\link[grDevices]{colors}}. Defaults to \code{"grey"}.
#' @param lwd_ori Width of the lines through the origin: a number to specify the line width. Defaults to \code{1}.
#' @param main The main title of the plot.
#' @param sub The subtitle of the plot.
#' @param hlab The title of the horizontal axis.
#' @param vlab The title of the vertical axis.
#' @param dlab The title of the "depth" axis.
#' @param cl The confidence level for the confidence ellipsoids. Defaults to \code{0.95}.
#' @param add_ori Logical specifying whether to add lines through the origin. Defaults to \code{TRUE}.
#' @param ... Further arguments passed to or from other methods.
#' @details \code{plot3d} (of a \code{corregp} output) makes use of \code{\link[rgl]{plot3d}} (and \code{\link[rgl]{text3d}} and \code{\link[rgl]{abclines3d}}) from the package \pkg{rgl}.
#' @return A 3D plot window containing the output of a correspondence regression.
#' @seealso \code{\link{corregp}}, \code{\link{pcplot.corregp}}, \code{\link{agplot.corregp}}, \code{\link[rgl]{plot3d}}.
#' @examples
#' \donttest{
#' data(HairEye)
#' haireye.crg <- corregp(Eye ~ Hair * Sex, data = HairEye, b = 3000)
#' plot3d(haireye.crg, x_ell = TRUE, xsub = c("Hair", "Sex"))
#' }
#' @export
plot3d.corregp <- function(x,axes=1:3,y_btm=TRUE,y_ell=FALSE,x_ell=FALSE,ysub=NULL,xsub=NULL,hlim=NULL,vlim=NULL,dlim=NULL,asp=par3d("scale"),col_btm="darkgrey",col_top="red",cex_btm=par3d("cex"),cex_top=cex_btm,font_btm=par3d("font"),font_top=font_btm,fam_btm=par3d("family"),fam_top=fam_btm,col_ell="black",lwd_ell=1,lty_ell="shade",opa_ell=0.2,col_ori="grey",lwd_ori=1,main=NULL,sub=NULL,hlab=NULL,vlab=NULL,dlab=NULL,cl=0.95,add_ori=TRUE,...) {
crg <- x
if (is.character(axes)) {
axes <- match(axes,table=colnames(crg$y))[1:3]
}
axes <- c(axes[1],axes[3],axes[2])
y.mat <- crg$y[,axes]
if (is.list(crg$x)) {
x.mat <- do.call(what=rbind,args=crg$x)[,axes]
if (is.numeric(xsub) || all(xsub %in% names(crg$x))) {
xsub <- unlist(lapply(crg$x[xsub],rownames))
}
}
else {
x.mat <- crg$x[,axes]
}
if (is.character(ysub)) {
ysub <- match(ysub,table=rownames(y.mat))
}
if (is.null(ysub)) {
ysub <- 1:nrow(y.mat)
}
if (is.character(xsub)) {
xsub <- match(xsub,table=rownames(x.mat))
}
if (is.null(xsub)) {
xsub <- 1:nrow(x.mat)
}
ell_btm <- NULL
ell_top <- NULL
if ((y_ell || x_ell) && is.null(crg$conf)) {
y_ell <- x_ell <- FALSE
warning(paste("no bootstrapping was done in",as.list(match.call())$x,sep=" ",collapse=NULL),call.=FALSE)
}
if (y_btm) {
loc_btm <- y.mat
loc_top <- x.mat
sub_btm <- ysub
sub_top <- xsub
if (y_ell) {
ell_btm <- cell3d(object=crg,parm=rownames(y.mat)[ysub],axes=axes,cl=cl)
}
if (x_ell) {
ell_top <- cell3d(object=crg,parm=rownames(x.mat)[xsub],axes=axes,cl=cl)
}
}
else {
loc_btm <- x.mat
loc_top <- y.mat
sub_btm <- xsub
sub_top <- ysub
if (y_ell) {
ell_top <- cell3d(object=crg,parm=rownames(y.mat)[ysub],axes=axes,cl=cl)
}
if (x_ell) {
ell_btm <- cell3d(object=crg,parm=rownames(x.mat)[xsub],axes=axes,cl=cl)
}
}
if (is.null(hlim)) {
hlim <- signif(range(range(y.mat[,1],na.rm=TRUE),range(x.mat[,1],na.rm=TRUE)))
}
if (is.null(vlim)) {
vlim <- signif(range(range(y.mat[,2],na.rm=TRUE),range(x.mat[,2],na.rm=TRUE)))
}
if (is.null(dlim)) {
dlim <- signif(range(range(y.mat[,3],na.rm=TRUE),range(x.mat[,3],na.rm=TRUE)))
}
if (is.null(hlab)) {
hlab <- colnames(y.mat)[1]
}
if (is.null(vlab)) {
vlab <- colnames(y.mat)[2]
}
if (is.null(dlab)) {
dlab <- colnames(y.mat)[3]
}
rgl::plot3d(x=loc_btm[sub_btm,1],y=loc_btm[sub_btm,2],z=loc_btm[sub_btm,3],type="n",xlim=hlim,ylim=vlim,zlim=dlim,main=main,sub=sub,xlab=hlab,ylab=vlab,zlab=dlab,aspect=asp,...)
if (add_ori) {
rgl::abclines3d(x=0,y=0,z=0,a=1,b=0,c=0,col=col_ori,lwd=lwd_ori)
rgl::abclines3d(x=0,y=0,z=0,a=0,b=1,c=0,col=col_ori,lwd=lwd_ori)
rgl::abclines3d(x=0,y=0,z=0,a=0,b=0,c=1,col=col_ori,lwd=lwd_ori)
}
if (length(col_ell)==1) {
col_ell <- rep(col_ell,times=2)
}
if (length(lwd_ell)==1) {
lwd_ell <- rep(lwd_ell,times=2)
}
if (length(lty_ell)==1) {
lty_ell <- rep(lty_ell,times=2)
}
if (length(opa_ell)==1) {
opa_ell <- rep(opa_ell,times=2)
}
if (!is.null(ell_btm)) {
lapply(ell_btm,rgl::plot3d,add=TRUE,xlim=hlim,ylim=vlim,zlim=dlim,lit=FALSE,col=col_ell[1],lwd=lwd_ell[1],type=lty_ell[1],alpha=opa_ell[1])
}
rgl::text3d(x=loc_btm[sub_btm,1],y=loc_btm[sub_btm,2],z=loc_btm[sub_btm,3],texts=rownames(loc_btm)[sub_btm],col=col_btm,cex=cex_btm,font=font_btm,family=fam_btm)
rgl::plot3d(x=loc_top[sub_top,1],y=loc_top[sub_top,2],z=loc_top[sub_top,3],type="n",xlim=hlim,ylim=vlim,zlim=dlim,main="",sub="",xlab="",ylab="",zlab="",add=TRUE)
if (!is.null(ell_top)) {
lapply(ell_top,rgl::plot3d,add=TRUE,xlim=hlim,ylim=vlim,zlim=dlim,lit=FALSE,col=col_ell[2],lwd=lwd_ell[2],type=lty_ell[2],alpha=opa_ell[2])
}
rgl::text3d(x=loc_top[sub_top,1],y=loc_top[sub_top,2],z=loc_top[sub_top,3],texts=rownames(loc_top)[sub_top],col=col_top,cex=cex_top,font=font_top,family=fam_top)
invisible()
}
#' Plotting an Association Graph for Correspondence Regression
#'
#' Function to make an association graph of the (significant) coordinate scores in correspondence regression.
#' @param x The output of a call to \code{\link{corregp}} (i.e. an object of class "corregp").
#' @param axes The axes for which to plot the association graph: a vector of indices. Defaults to all the axes.
#' @param ysub Vector of indices to select a subset of the Y levels.
#' @param xsub Vector of indices to select a subset of the X levels. Can also be \code{"all"} or \code{"both"} (or abbreviations).
#' @param sort Vector of axes for which to sort the coordinate scores. The default (\code{NULL}) plots all levels in the order in which they appear in the
#' correspondence regression \code{x}.
#' @param na.rm Logical specifying whether to omit \code{NA} coordinates from the plot. Defaults to \code{FALSE}.
#' @param col Color of the association graph: either \code{numeric} or see \code{\link[grDevices]{colors}}.
#' @param cex Character expansion factor: a number to specify the size of the text labels.
#' @param font Font of the text labels (levels): \code{1} for plain, \code{2} for bold, \code{3} for italic, and \code{4} for bold italic. Defaults to \code{1}.
#' @param family Font family of the text labels (levels): can be \code{"serif"}, \code{"sans"}, \code{"mono"} or one of the \code{\link[grDevices]{Hershey}} fonts.
#' @param lwd Line width of the association graph: a number to specify the line width.
#' @param lty Line type of the association graph (i.e. linking edges): \code{0} or \code{"blank"}, \code{1} or \code{"solid"}, \code{2} or \code{"dashed"},
#' \code{3} or \code{"dotted"}, \code{4} or \code{"dotdash"}, \code{5} or \code{"longdash"}, \code{6} or \code{"twodash"}. Defaults to \code{1}.
#' @param ycol Color of the levels in Y: either \code{numeric} or see \code{\link[grDevices]{colors}}.
#' @param xcol Color of the levels in X: either \code{numeric} or see \code{\link[grDevices]{colors}}.
#' @param ncol Fill color of the nodes: either \code{numeric} or see \code{\link[grDevices]{colors}}. Defaults to \code{c("white","lightgray")}: the first value
#' is for the nodes of the axes and the second value is for the nodes of the X and Y levels.
#' @param nwid Line width of the nodes: a number to specify the line width. If a vector of two values is specified, then the first width is for the nodes of
#' the axes and the second width is for the nodes of the X and Y levels.
#' @param lcol Color of the links (edges): either \code{numeric} or see \code{\link[grDevices]{colors}}. If a vector of two values is specified, then
#' the first color is for the scores \code{> 0} and the second color is for the scores \code{< 0}.
#' @param lwid Line width of the links (edges): a number to specify the line width. If a vector of two values is specified, then the first width is for
#' the scores \code{> 0} and the second width is for the scores \code{< 0}.
#' @param pcol Color of the pointer (arrow head): either \code{numeric} or see \code{\link[grDevices]{colors}}. If a vector of two values is specified, then
#' the first color is for the scores \code{> 0} and the second color is for the scores \code{< 0}.
#' @param ppos Relative position of the pointer (arrow head): a vector of values between 0 and 1 for each axis.
#' @param ptyp Type of of the pointer (arrow head): can be \code{"simple"}, \code{"curved"}, \code{"triangle"}, \code{"circle"}, \code{"ellipse"} or
#' \code{"T"}. Defaults to \code{"simple"}.
#' @param zoom Zoom factor of the association graph. Defaults to \code{1}.
#' @param hshft Horizontal shift of the association graph. Defaults to \code{0}.
#' @param vshft Vertical shift of the association graph. Defaults to \code{0}.
#' @param main The main title of the association graph.
#' @param cl The confidence level for the confidence intervals. Defaults to \code{0.95}.
#' @param nq Logical specifying whether to use a normal quantile (i.e. apply \code{\link[stats]{qnorm}}) in the computation of the confidence interval.
#' Defaults to \code{TRUE}. If \code{FALSE}, then the confidence interval is computed directly with the \code{\link[stats]{quantile}} function.
#' @param digits Integer specifying the number of decimals for the scores as labels of the links (edges). Defauls to \code{2}.
#' @param ... Further arguments passed to or from other methods.
#' @details
#' Association graphs (of a \code{corregp} output) in the \pkg{corregp} package make use of various functionalities of the package \pkg{diagram}.
#' @return A plot window containing the association graph.
#' @seealso \code{\link{corregp}}, \code{\link{cint.corregp}}, \code{\link{pcplot.corregp}}, \code{\link{plot3d.corregp}}.
#' @examples
#' \donttest{
#' data(HairEye)
#' haireye.crg <- corregp(Eye ~ Hair * Sex, data = HairEye, b = 3000)
#' agplot(haireye.crg, axes = 1:2, xsub = c("Hair", "Sex"))
#' plotag(haireye.crg, axes = 1:2, xsub = c("Hair", "Sex"))
#' }
#' @export
agplot.corregp <- function(x,axes=NULL,ysub=NULL,xsub=NULL,sort=NULL,na.rm=FALSE,col="black",cex=par("cex"),font=par("font"),family=par("family"),lwd=par("lwd"),lty=par("lty"),ycol=col,xcol=col,ncol=c("white","lightgray"),nwid=lwd,lcol=col,lwid=lwd,pcol=lcol,ppos=NULL,ptyp="simple",zoom=1,hshft=0,vshft=0,main=NULL,cl=0.95,nq=TRUE,digits=2,...) {
crg <- x
if (is.null(crg$conf)) {
stop(paste("no bootstrapping was done in",as.list(match.call())$x,sep=" ",collapse=NULL),call.=FALSE)
}
o.rnk <- sum(crg$eigen>1e-08,na.rm=TRUE)
if (is.null(axes)) {
axes <- 1:o.rnk
}
if (is.character(axes)) {
axes <- match(axes,table=colnames(crg$y))
}
if (any(axes>o.rnk)) {
axes <- 1:o.rnk
warning(paste(as.list(match.call())$object,"only has",o.rnk,"axes",sep=" ",collapse=NULL),call.=FALSE)
}
if (is.null(ysub)) {
ysub <- rownames(crg$y)
}
if (is.null(xsub)) {
xsub <- "x"
}
a.len <- length(axes)
y.mat <- round(matrix(crg$y[ysub,axes],nrow=length(ysub),ncol=a.len,dimnames=list(ysub,axes)),digits=digits)
y.mat[as.matrix(do.call(what=cbind,args=lapply(axes,function(a1){apply(cint(crg,parm=ysub,axis=a1,cl=cl,nq=nq),1,prod)<0})),ncol=a.len)] <- 0
if (length(xsub)==1 && tolower(xsub) %in% c("x","a","b","all","both")) {
if (is.list(crg$x)) {
x.mat <- round(as.matrix(do.call(what=rbind,args=crg$x)[,axes],ncol=a.len),digits=digits)
}
else {
x.mat <- round(as.matrix(crg$x[,axes],ncol=a.len),digits=digits)
}
xsub <- "x"
}
if (is.list(crg$x) && all(xsub %in% names(crg$x))) {
x.mat <- round(as.matrix(do.call(what=rbind,args=crg$x[xsub])[,axes],ncol=a.len),digits=digits)
}
if (is.list(crg$x) && is.numeric(xsub)) {
x.mat <- round(as.matrix(do.call(what=rbind,args=crg$x[xsub])[,axes],ncol=a.len),digits=digits)
xsub <- names(crg$x)[xsub]
}
if (is.list(crg$x) && all(xsub %in% unlist(lapply(crg$x,rownames)))) {
x.mat <- round(matrix(do.call(what=rbind,args=crg$x)[xsub,axes],nrow=length(xsub),ncol=a.len,dimnames=list(xsub,axes)),digits=digits)
}
if (all(xsub %in% rownames(crg$x))) {
x.mat <- round(matrix(crg$x[xsub,axes],nrow=length(xsub),ncol=a.len,dimnames=list(xsub,axes)),digits=digits)
}
x.mat[as.matrix(do.call(what=cbind,args=lapply(axes,function(a2){apply(cint(crg,parm=xsub,axis=a2,cl=cl,nq=nq),1,prod)<0})),ncol=a.len)] <- 0
if (na.rm) {
x.mat <- as.matrix(x.mat[complete.cases(x.mat),],ncol=a.len)
y.mat <- as.matrix(y.mat[complete.cases(y.mat),],ncol=a.len)
}
y.len <- nrow(y.mat)
x.len <- nrow(x.mat)
if (!is.null(sort) && !any(is.na(sort))) {
if (is.character(sort)) {
sort <- match(sort,table=colnames(crg$y))
}
if (any(!sort %in% axes)) {
sort <- axes
warning("the values in argument 'sort' do not match the values in argument 'axes'",call.=FALSE)
}
y.ord <- do.call(order,c(data.frame(y.mat[,sort]),decreasing=TRUE))
x.ord <- do.call(order,c(data.frame(x.mat[,sort]),decreasing=TRUE))
}
else {
y.ord <- 1:y.len
x.ord <- 1:x.len
}
y.mat <- y.mat[y.ord,]
x.mat <- x.mat[x.ord,]
a.pos <- matrix(c(rep(0.5,times=a.len),seq(from=a.len,to=1)/(a.len+1)),ncol=2,dimnames=list(axes,NULL))
y.pos <- matrix(c(rep(0.3,times=y.len),seq(from=y.len,to=1)/(y.len+1)),ncol=2,dimnames=list(rownames(y.mat),NULL))
x.pos <- matrix(c(rep(0.7,times=x.len),seq(from=x.len,to=1)/(x.len+1)),ncol=2,dimnames=list(rownames(x.mat),NULL))
c.pos <- rbind(y.pos,x.pos)
c.mat <- rbind(y.mat,x.mat)
l.mat <- do.call(what=rbind,args=lapply(1:a.len,function(a3){cbind(rep(a3,times=sum(c.mat[,a3]!=0,na.rm=TRUE)),which(c.mat[,a3]!=0))}))
p.mat <- matrix(nrow=nrow(l.mat),ncol=2,dimnames=list(as.vector(c.mat[!c.mat%in%c(0,NA)]),NULL))
a.rad <- max(c(strwidth(rownames(a.pos),units="inches",cex=cex,font=font,family=family),strheight(rownames(a.pos),units="inches",cex=cex,font=font,family=family))+0.2)/10
c.rad <- max(strwidth(rownames(c.pos),units="inches",cex=cex,font=font,family=family)+0.01)/10
v.rad <- max(strheight(c(rownames(a.pos),rownames(c.pos)),units="inches",cex=cex,font=font,family=family)+0.05)/10
if (is.null(ppos)) {
p.pos <- seq(from=0.8,to=0.3,length.out=a.len)
}
else {
p.pos <- rep(ppos,length.out=a.len)
}
lcol <- rep(lcol,length.out=2)
pcol <- rep(pcol,length.out=2)
lwid <- rep(lwid,length.out=2)
nwid <- rep(nwid,length.out=2)
glim <- c(0,1)
if (zoom != 1) {
glim <- c(-1/zoom+1,1/zoom)
}
p.mar <- par("mar")
par(mar=rep(1,times=4))
diagram::openplotmat(xlim=glim,ylim=glim,main=main,...)
diagram::coordinates(pos=rbind(a.pos,c.pos),mx=hshft,my=vshft,hor=FALSE,relsize=zoom)
for (l4 in 1:nrow(l.mat)) {
l.col <- ifelse(as.numeric(rownames(p.mat)[l4])>0,lcol[1],lcol[2])
p.col <- ifelse(as.numeric(rownames(p.mat)[l4])>0,pcol[1],pcol[2])
l.wid <- ifelse(as.numeric(rownames(p.mat)[l4])>0,lwid[1],lwid[2])
p.mat[l4,] <- diagram::straightarrow(from=a.pos[l.mat[l4,1],],to=c.pos[l.mat[l4,2],],lcol=l.col,lwd=l.wid,lty=lty,arr.col=p.col,arr.pos=p.pos[l.mat[l4,1]],arr.type=ptyp)
diagram::textellipse(a.pos[l.mat[l4,1],],radx=a.rad,rady=a.rad,lab=rownames(a.pos)[l.mat[l4,1]],shadow.size=0,box.col=ncol[1],lcol="black",lwd=nwid[1],col=col,cex=cex,font=font,family=family,xpd=TRUE)
}
ycol <- rep(ycol,length.out=y.len)[y.ord]
lapply(1:nrow(y.pos),function(c5){diagram::textrect(y.pos[c5,]-c(c.rad-0.00001,0),radx=c.rad,rady=v.rad,lab=rownames(y.pos)[c5],shadow.size=0,box.col="lightgray",lcol="black",lwd=nwid[2],col=ycol[c5],cex=cex,font=font,family=family,xpd=TRUE)})
xcol <- rep(xcol,length.out=x.len)[x.ord]
lapply(1:nrow(x.pos),function(c6){diagram::textrect(x.pos[c6,]+c(c.rad-0.00001,0),radx=c.rad,rady=v.rad,lab=rownames(x.pos)[c6],shadow.size=0,box.col=ncol[2],lcol="black",lwd=nwid[2],col=xcol[c6],cex=cex,font=font,family=family,xpd=TRUE)})
lapply(1:nrow(p.mat),function(l7){text(p.mat[l7,1],p.mat[l7,2],labels=rownames(p.mat)[l7],pos=3,offset=0.5,col=ifelse(as.numeric(rownames(p.mat)[l7])>0,pcol[1],pcol[2]),cex=cex,font=font,family=family)})
par(mar=p.mar)
invisible()
}
#' @rdname agplot.corregp
#' @export
plotag.corregp <- function(x,axes=NULL,ysub=NULL,xsub=NULL,sort=NULL,na.rm=FALSE,col="black",cex=par("cex"),font=par("font"),family=par("family"),lwd=par("lwd"),lty=par("lty"),ycol=col,xcol=col,ncol=c("white","lightgray"),nwid=lwd,lcol=col,lwid=lwd,pcol=lcol,ppos=NULL,ptyp="simple",zoom=1,hshft=0,vshft=0,main=NULL,cl=0.95,nq=TRUE,digits=2,...) {
agplot.corregp(x,axes,ysub,xsub,sort,na.rm,col,cex,font,family,lwd,lty,ycol,xcol,ncol,nwid,lcol,lwid,pcol,ppos,ptyp,zoom,hshft,vshft,main,cl,nq,digits,...)
}
#' @rdname agplot.corregp
#' @export
agplot <- function(x,...) {
UseMethod("agplot")
}
#' @rdname agplot.corregp
#' @export
plotag <- function(x,...) {
UseMethod("plotag")
}
|
3a1a1e9f22adb252cb41467727570723ebe276c5
|
36795a7fa830cc052d5dd565322bf4e98968a246
|
/data-raw/sir-scratch.R
|
e773322801a15f6709e3b00e128b1f69fac7f377
|
[] |
no_license
|
mlaviolet/tidyepi
|
1e64fdc1bf21ca054a627d07f3879cacfe5168d2
|
8806bd13c3162dfaeb897de8f73393b62d986333
|
refs/heads/master
| 2022-05-17T09:04:07.263946
| 2022-05-10T02:07:29
| 2022-05-10T02:07:29
| 183,680,215
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,523
|
r
|
sir-scratch.R
|
library(dplyr)
library(tidyr)
# library(purrr)
data(cancer)
data(seer_weight)
level <- 95
sir <- cancer %>%
filter(Year == 2015) %>%
mutate(std_group = if_else(Sex == "Female", 1, 0)) %>%
gather(key, value, n, pop) %>%
# Female is referent
unite("std_group", std_group, key) %>%
select(agegroup, std_group, value) %>%
spread(std_group, value) %>%
# compute observed and expected for each age group, then sum
summarize(expected = sum(`1_n` / `1_pop` * `0_pop`),
observed = sum(`0_n`)) %>%
# compute SIR and LCI, UCI
mutate(sir = observed / expected,
sir_lci = qgamma((100 - level) / 200, observed) / expected,
sir_uci = qgamma((100 + level) / 200, observed + 1) / expected)
f3 <- function(df, std_group, agegroup, n, pop, level = 95) {
n <- enquo(n)
pop <- enquo(pop)
std_group <- enquo(std_group)
agegroup <- enquo(agegroup)
df %>%
gather(key, value, !!n, !!pop) %>%
unite("std_group", !!std_group, key) %>%
select(!!agegroup, !!std_group, value) %>%
spread(!!std_group, value) %>%
# compute observed and expected for each age group, then sum
summarize(expected = sum(`1_n` / `1_pop` * `0_pop`),
observed = sum(`0_n`)) %>%
# compute SIR and LCI, UCI
mutate(sir = observed / expected,
sir_lci = qgamma((100 - level) / 200, observed) / expected,
sir_uci = qgamma((100 + level) / 200, observed + 1) / expected) %>%
select(observed, expected, sir, sir_lci, sir_uci)
}
df <- cancer %>%
filter(Year == 2015) %>%
mutate(std_group = if_else(Sex == "Female", 1, 0))
# this is OK
(f3(df, std_group, agegroup, n, pop))
sirs <- cancer %>%
group_by(Year) %>%
mutate(std_group = if_else(Sex == "Female", 1, 0)) %>%
do(f3(., std_group, agegroup, n, pop))
# Good to here ------------------------------------------------------------
df <- cancer %>%
filter(Year == 2015) %>%
mutate(std_group = if_else(Sex == "Female", 1, 0))
f2 <- function(df) {
df %>%
gather(key, value, n, pop) %>%
unite("std_group", std_group, key) %>%
select(agegroup, std_group, value) %>%
spread(std_group, value) %>%
# compute observed and expected for each age group, then sum
summarize(expected = sum(`1_n` / `1_pop` * `0_pop`),
observed = sum(`0_n`)) %>%
# compute SIR and LCI, UCI
mutate(sir = observed / expected,
sir_lci = qgamma((100 - level) / 200, observed) / expected,
sir_uci = qgamma((100 + level) / 200, observed + 1) / expected)
}
(f2(df))
sir <- cancer %>%
group_by(Year) %>%
mutate(std_group = if_else(Sex == "Female", 1, 0)) %>%
do(f2(.))
f3 <- function(df, std_group, agegroup, n, pop) {
n <- enquo(n)
pop <- enquo(pop)
std_group <- enquo(std_group)
agegroup <- enquo(agegroup)
df %>%
gather(key, value, !!n, !!pop) #%>%
# unite("std_group", !!std_group, key) %>%
# select(!!agegroup, !!std_group, value) %>%
# spread(!!std_group, value) %>%
# # compute observed and expected for each age group, then sum
# summarize(expected = sum(`1_n` / `1_pop` * `0_pop`),
# observed = sum(`0_n`)) %>%
# # compute SIR and LCI, UCI
# mutate(sir = observed / expected,
# sir_lci = qgamma((100 - level) / 200, observed) / expected,
# sir_uci = qgamma((100 + level) / 200, observed + 1) / expected)
}
(f3(df, std_group, agegroup, n, pop))
sirs <- cancer %>%
mutate(std_group = if_else(Sex == "Female", 1, 0)) %>%
group_by(Year) %>%
do(f3(., std_group, agegroup, n, pop))
f1 <- function(df) {
df %>%
group_by(agegroup) %>%
summarize(n = sum(n),
pop = sum(pop))
}
sir <- cancer %>%
# filter(Year == 2015) %>%
mutate(std_group = if_else(Sex == "Female", 1, 0)) %>%
group_by(Year, std_group) %>%
nest() %>%
mutate(agg_data = map(.$data, f1)) %>%
# merge with standard weights
mutate(agg_data = map(.$agg_data, inner_join, seer_weight,
by = "agegroup")) %>%
# OK to here
mutate(adj_rate = map(.$agg_data, directAdjust(.$data, n, pop, wgt)))
# call to directAdjust is the problem
sir[1,"agg_data"][[1]]
chk1 <- do()
# split(.$std_group)
# try list columns
sir[2, 2]$data %>%
# group_by(.data$agegroup) %>%
summarize(n = sum(n),
pop = sum(pop))
library(dplyr)
data(cancer)
cancer %>%
group_by(Year) %>%
do(reshapeForSIR(., agegroup, Sex, "Female", n, pop)) %>%
do(indirectAdjust(., study_count, study_pop, ref_count, ref_pop))
# test using example from epitools::ageadjust.indirect
dth60 <- c(141, 926, 1253, 1080, 1869, 4891, 14956, 30888,
41725, 26501, 5928)
pop60 <- c(1784033, 7065148, 15658730, 10482916, 9939972,
10563872, 9114202, 6850263, 4702482, 1874619, 330915)
dth40 <- c(45, 201, 320, 670, 1126, 3160, 9723, 17935,
22179, 13461, 2238)
pop40 <- c(906897, 3794573, 10003544, 10629526, 9465330,
8249558, 7294330, 5022499, 2920220, 1019504, 142532)
dth_dat <- data.frame(dth60, pop60, dth40, pop40)
dth_dat %>% indirectAdjust(dth40, pop40, dth60, pop60)
epitools::ageadjust.indirect(dth40, pop40, dth60, pop60)$sir
dth_dat %>%
# compute observed and expected for each age group, then sum
summarize(expected = sum(dth60 / pop60 * pop40),
observed = sum(dth40)) %>%
# compute SIR and LCI, UCI
mutate(sir = observed / expected,
sir_lci = qgamma((100 - level) / 200, observed) / expected,
sir_uci = qgamma((100 + level) / 200, observed + 1) / expected)
# looks good
|
ff713f987494d21ec24184a7e7b3df2f1d47d337
|
d30e6c440f48ebdeca74505d946db88b8ccea14c
|
/myapp/ui.R
|
a2663b124db221220c8abe3085f76071cd82b1b8
|
[] |
no_license
|
JessvdK/RLadies_ShinyWorkshop
|
71aa595419e7c5d73128ddc4252ef32861f17f60
|
8c64cd790adcd7f4b9021954eba40df637effdeb
|
refs/heads/master
| 2020-03-24T08:38:17.476343
| 2018-07-27T16:46:19
| 2018-07-27T16:46:19
| 142,602,269
| 3
| 4
| null | null | null | null |
UTF-8
|
R
| false
| false
| 312
|
r
|
ui.R
|
library(shiny)
# Define UI for app
ui <- fluidPage(
# App title ----
titlePanel("Hello Shiny!"),
# Sidebar layout with a input and output definitions ----
sidebarLayout(
# Sidebar panel ----
sidebarPanel("Sidebar Panel"),
# Main panel ----
mainPanel("Main Panel" )
)
)
|
e61dd853e97fb065e736c936f73abec9cb446e82
|
57925a8ca3b068b6689d13f0c0ad5ac084f50d9e
|
/PraceDomowe/PD5/gr2/NowikowskiAndrzej/pilkakopana/app.R
|
401ab9f68aa628b0b7f79a9df3b0f362a65931d1
|
[] |
no_license
|
ramusz1/WizualizacjaDanych2018
|
41b4172761b5aaedd6ed2150af1e847b8862f0a3
|
52153880f70aa028963d40dcfeb1d3cb94272c6b
|
refs/heads/master
| 2020-04-24T15:19:19.028703
| 2019-05-30T11:33:51
| 2019-05-30T11:33:51
| 172,061,362
| 3
| 0
| null | 2019-02-22T12:17:59
| 2019-02-22T12:17:59
| null |
UTF-8
|
R
| false
| false
| 19,391
|
r
|
app.R
|
#
# This is a Shiny web application. You can run the application by clicking
# the 'Run App' button above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
library(shinydashboard)
library(dplyr)
library(ggplot2)
library(ggthemes)
# utils
get_points <- function(own_goals, enemy_goals) {
ifelse(own_goals == enemy_goals,
1,
ifelse(own_goals > enemy_goals, 3, 0))
}
get_color <- function(value,
red_breakpoint,
green_breakpoint) {
ifelse(value < red_breakpoint,
'red',
ifelse(value > green_breakpoint, 'green', 'blue'))
}
get_points_color <- function(points) {
get_color(points, 1, 2)
}
get_win_ratio_color <- function(win_ratio) {
get_color(win_ratio, 0.4, 0.6)
}
get_goal_ratio_color <- function(goal_ratio) {
get_color(goal_ratio, 0.9, 1.1)
}
source_url <-
"https://raw.githubusercontent.com/michbur/soccer-data/master/PL_dat.csv"
dat <- read.csv2(source_url)
# dat <- dat %>% select(-date)
overview_dat <- rbind(
dat %>% mutate(
team = home_team,
enemy = away_team,
own_goals = home_team_goal,
enemy_goals = away_team_goal
),
dat %>% mutate(
team = away_team,
enemy = home_team,
own_goals = away_team_goal,
enemy_goals = home_team_goal
)
) %>% select(-c(home_team, away_team, home_team_goal, away_team_goal))
overview_dat <-
overview_dat %>% mutate(score = get_points(own_goals, enemy_goals))
# str(dat)
teams <-
unique(rbind(
dat %>% mutate(team = away_team) %>% select(team),
dat %>% mutate(team = home_team) %>% select(team)
))
seasons <- levels(unlist(unique(dat %>% select(season))))
ui <- dashboardPage(
skin = "green",
header = dashboardHeader(title = "Piłka kopana w Anglii"),
sidebar = dashboardSidebar(sidebarMenu(
menuItem(
"Overview",
tabName = "overview",
icon = icon("dashboard"),
menuSubItem(
tabName = "overview",
icon = NULL,
selectInput(
inputId = 'overview_season',
label = "Select seasons",
choices = unique(dat$season),
# multiple = TRUE,
selected = unique(dat$season)
)
),
menuSubItem(
tabName = "overview",
icon = NULL,
sliderInput(
inputId = "overview_stage_range",
label = "Select stage range:",
min = min(dat %>% select(stage)),
max = max(dat %>% select(stage)),
value = c(min(dat %>% select(stage)), max(dat %>% select(stage))),
step = 1
)
)
),
menuItem(
"Team details",
tabName = "teamdetails",
icon = icon('info-circle'),
menuSubItem(
tabName = "teamdetails",
icon = NULL,
selectInput(
inputId = "details_season",
label = "Select season",
choices = seasons
)
),
menuSubItem(
tabName = "teamdetails",
icon = NULL,
selectInput(
inputId = 'details_team',
label = 'Select team:',
choices = teams
)
),
menuSubItem(
tabName = "teamdetails",
icon = NULL,
uiOutput('details_opponent_input')
)
),
menuItem(
"See also",
icon = icon("send", lib = 'glyphicon'),
href = "https://github.com/mini-pw/WizualizacjaDanych2018/"
)
)),
body = dashboardBody(tabItems(
# First tab content
tabItem(tabName = "overview",
fluidRow(
box(
width = 12,
title = "Total score",
collapsible = T,
status = 'primary',
plotOutput('overview_total_score')
)
),
fluidRow(
box(
width = 12,
title = "Total goals",
collapsible = T,
status = 'primary',
plotOutput('overview_total_goals')
)
)),
# Second tab content
tabItem(
tabName = "teamdetails",
h1("Team details"),
fluidRow(
column(
width = 6,
h3("Home matches"),
valueBoxOutput('details_home_avg_points'),
valueBoxOutput('details_home_avg_win_ratio'),
valueBoxOutput('details_home_avg_goals_ratio')
),
column(
width = 6,
h3("Away matches"),
valueBoxOutput('details_away_avg_points'),
valueBoxOutput('details_away_avg_win_ratio'),
valueBoxOutput('details_away_avg_goals_ratio')
)
),
conditionalPanel(
"input.details_team_enemy != 'None'",
fluidRow(
column(
width = 6,
uiOutput('home_vs'),
valueBoxOutput('details_home_avg_points_vs'),
valueBoxOutput('details_home_avg_win_ratio_vs'),
valueBoxOutput('details_home_avg_goals_ratio_vs')
),
column(
width = 6,
uiOutput('away_vs'),
valueBoxOutput('details_away_avg_points_vs'),
valueBoxOutput('details_away_avg_win_ratio_vs'),
valueBoxOutput('details_away_avg_goals_ratio_vs')
)
),
fluidRow(
column(
width = 6,
valueBoxOutput('details_home_wins_vs'),
valueBoxOutput('details_home_draws_vs'),
valueBoxOutput('details_home_loses_vs')
),
column(
width = 6,
valueBoxOutput('details_away_wins_vs'),
valueBoxOutput('details_away_draws_vs'),
valueBoxOutput('details_away_loses_vs')
)
)
),
h3("Team points"),
fluidRow(column(12, plotOutput(
'details_team_points'
))),
h3("Last matches"),
fluidRow(column(
12, dataTableOutput('details_last_matches')
))
)
))
)
# Define server logic required to draw a histogram
server <- function(input, output) {
# team details
team_details_r <-
reactive({
df <-
dat %>% filter(season == input$details_season) %>% filter(home_team == input$details_team |
away_team == input$details_team) %>% arrange(desc(date))
# browser()
validate(need(nrow(df) != 0, "No data"))
return(df)
})
output$details_last_matches <-
renderDataTable(team_details_r(),
options = list(pageLength = 5))
avg_home_points_r <- reactive({
round(
team_details_r() %>% filter(home_team == input$details_team) %>% mutate(points = get_points(home_team_goal, away_team_goal)) %>% summarize(mean(points)),
2
)
})
avg_home_win_ratio_r <- reactive({
won_matches <-
nrow(
team_details_r() %>% filter(home_team == input$details_team) %>% mutate(points = get_points(home_team_goal, away_team_goal)) %>% filter(points == 3)
)
all_matches <-
nrow(team_details_r() %>% filter(home_team == input$details_team))
round(won_matches / all_matches,
2)
})
avg_home_goal_ratio_r <- reactive({
goal_scored <-
team_details_r() %>% filter(home_team == input$details_team) %>% summarise(sum(home_team_goal))
goal_against <-
team_details_r() %>% filter(home_team == input$details_team) %>% summarise(sum(away_team_goal))
round(goal_scored / goal_against,
2)
})
avg_away_points_r <- reactive({
round(
team_details_r() %>% filter(away_team == input$details_team) %>% mutate(points = get_points(away_team_goal, home_team_goal)) %>% summarize(mean(points)),
2
)
})
avg_away_win_ratio_r <- reactive({
won_matches <-
nrow(
team_details_r() %>% filter(away_team == input$details_team) %>% mutate(points = get_points(away_team_goal, home_team_goal)) %>% filter(points == 3)
)
all_matches <-
nrow(team_details_r() %>% filter(away_team == input$details_team))
round(won_matches / all_matches,
2)
})
avg_away_goal_ratio_r <- reactive({
goal_scored <-
team_details_r() %>% filter(away_team == input$details_team) %>% summarise(sum(away_team_goal))
goal_against <-
team_details_r() %>% filter(away_team == input$details_team) %>% summarise(sum(home_team_goal))
round(goal_scored / goal_against,
2)
})
output$details_home_avg_points <-
renderValueBox(valueBox(
avg_home_points_r(),
'Average points per match',
color = get_points_color(avg_home_points_r())
))
output$details_away_avg_points <-
renderValueBox(valueBox(
avg_away_points_r(),
'Average points per match',
color = get_points_color(avg_away_points_r())
))
output$details_home_avg_win_ratio <-
renderValueBox(valueBox(
avg_home_win_ratio_r(),
'Average win ratio',
color = get_win_ratio_color(avg_home_win_ratio_r())
))
output$details_away_avg_win_ratio <-
renderValueBox(valueBox(
avg_away_win_ratio_r(),
'Average win ratio',
color = get_win_ratio_color(avg_away_win_ratio_r())
))
output$details_home_avg_goals_ratio <-
renderValueBox(
valueBox(
avg_home_goal_ratio_r(),
'Average goal scored/against ratio',
color = get_goal_ratio_color(avg_home_goal_ratio_r())
)
)
output$details_away_avg_goals_ratio <-
renderValueBox(
valueBox(
avg_away_goal_ratio_r(),
'Average goal scored/against ratio',
color = get_goal_ratio_color(avg_away_goal_ratio_r())
)
)
avg_home_points_vs_r <- reactive({
df <-
team_details_r() %>% filter(home_team == input$details_team &
away_team == input$details_team_enemy) %>% mutate(points = get_points(home_team_goal, away_team_goal))
validate(need(nrow(df) != 0, "No matches"))
round(df %>% summarize(mean(points)),
2)
})
avg_home_win_ratio_vs_r <- reactive({
df <-
team_details_r() %>% filter(home_team == input$details_team &
away_team == input$details_team_enemy)
validate(need(nrow(df) != 0, "No matches"))
won_matches <-
nrow(df %>% mutate(points = get_points(home_team_goal, away_team_goal)) %>% filter(points == 3))
round(won_matches / nrow(df),
2)
})
avg_home_goal_ratio_vs_r <- reactive({
df <-
team_details_r() %>% filter(home_team == input$details_team &
away_team == input$details_team_enemy)
validate(need(nrow(df) != 0, "No matches"))
goal_scored <- df %>% summarise(sum(home_team_goal))
goal_against <- df %>% summarise(sum(away_team_goal))
if (goal_against == 0) {
return(1)
}
round(goal_scored / goal_against,
2)
})
avg_away_points_vs_r <- reactive({
df <-
team_details_r() %>% filter(away_team == input$details_team &
home_team == input$details_team_enemy)
validate(need(nrow(df) != 0, "No matches"))
round(df %>% mutate(points = get_points(away_team_goal, home_team_goal)) %>% summarize(mean(points)),
2)
})
avg_away_win_ratio_vs_r <- reactive({
df <- team_details_r() %>% filter(away_team == input$details_team &
home_team == input$details_team_enemy)
validate(need(nrow(df) != 0, "No matches"))
won_matches <-
nrow(df %>% mutate(points = get_points(away_team_goal, home_team_goal)) %>% filter(points == 3))
round(won_matches / nrow(df),
2)
})
avg_away_goal_ratio_vs_r <- reactive({
df <- team_details_r() %>% filter(away_team == input$details_team &
home_team == input$details_team_enemy)
validate(need(nrow(df) != 0, "No matches"))
goal_scored <-
df %>% summarise(sum(away_team_goal))
goal_against <-
df %>% summarise(sum(home_team_goal))
if (goal_against == 0) {
return(1)
}
round(goal_scored / goal_against,
2)
})
output$details_home_avg_points_vs <-
renderValueBox(
valueBox(
avg_home_points_vs_r(),
'Average points per match',
color = get_points_color(avg_home_points_vs_r())
)
)
output$details_away_avg_points_vs <-
renderValueBox(
valueBox(
avg_away_points_vs_r(),
'Average points per match',
color = get_points_color(avg_away_points_vs_r())
)
)
output$details_home_avg_win_ratio_vs <-
renderValueBox(
valueBox(
avg_home_win_ratio_vs_r(),
'Average win ratio',
color = get_win_ratio_color(avg_home_win_ratio_vs_r())
)
)
output$details_away_avg_win_ratio_vs <-
renderValueBox(
valueBox(
avg_away_win_ratio_vs_r(),
'Average win ratio',
color = get_win_ratio_color(avg_away_win_ratio_vs_r())
)
)
output$details_home_avg_goals_ratio_vs <-
renderValueBox(
valueBox(
avg_home_goal_ratio_vs_r(),
'Average goal scored/against ratio',
color = get_goal_ratio_color(avg_home_goal_ratio_vs_r())
)
)
output$details_away_avg_goals_ratio_vs <-
renderValueBox(
valueBox(
avg_away_goal_ratio_vs_r(),
'Average goal scored/against ratio',
color = get_goal_ratio_color(avg_away_goal_ratio_vs_r())
)
)
avg_home_wins_vs_r <- reactive({
df <- team_details_r() %>% filter(home_team == input$details_team &
away_team == input$details_team_enemy)
validate(need(nrow(df) != 0, "No matches"))
nrow(df %>% mutate(points = get_points(home_team_goal, away_team_goal)) %>% filter(points == 3))
})
avg_home_draws_vs_r <- reactive({
df <- team_details_r() %>% filter(home_team == input$details_team &
away_team == input$details_team_enemy)
validate(need(nrow(df) != 0, "No matches"))
nrow(df %>% mutate(points = get_points(home_team_goal, away_team_goal)) %>% filter(points == 1))
})
avg_home_loses_vs_r <- reactive({
df <- team_details_r() %>% filter(home_team == input$details_team &
away_team == input$details_team_enemy)
validate(need(nrow(df) != 0, "No matches"))
nrow(df %>% mutate(points = get_points(home_team_goal, away_team_goal)) %>% filter(points == 0))
})
output$details_home_wins_vs <-
renderValueBox(valueBox(avg_home_wins_vs_r(),
'Wins count'))
output$details_home_draws_vs <-
renderValueBox(valueBox(avg_home_draws_vs_r(),
'Draws count'))
output$details_home_loses_vs <-
renderValueBox(valueBox(avg_home_loses_vs_r(),
'Loses count'))
avg_away_wins_vs_r <- reactive({
df <- team_details_r() %>% filter(away_team == input$details_team &
home_team == input$details_team_enemy)
validate(need(nrow(df) != 0, "No matches"))
nrow(df %>% mutate(points = get_points(away_team_goal, home_team_goal)) %>% filter(points == 3))
})
avg_away_draws_vs_r <- reactive({
df <- team_details_r() %>% filter(away_team == input$details_team &
home_team == input$details_team_enemy)
validate(need(nrow(df) != 0, "No matches"))
nrow(df %>% mutate(points = get_points(away_team_goal, home_team_goal)) %>% filter(points == 1))
})
avg_away_loses_vs_r <- reactive({
df <- team_details_r() %>% filter(away_team == input$details_team &
home_team == input$details_team_enemy)
validate(need(nrow(df) != 0, "No matches"))
nrow(df %>% mutate(points = get_points(away_team_goal, home_team_goal)) %>% filter(points == 0))
})
output$details_away_wins_vs <-
renderValueBox(valueBox(avg_away_wins_vs_r(),
'Wins count'))
output$details_away_draws_vs <-
renderValueBox(valueBox(avg_away_draws_vs_r(),
'Draws count'))
output$details_away_loses_vs <-
renderValueBox(valueBox(avg_away_loses_vs_r(),
'Loses count'))
team_points_r <- reactive({
team_details_r() %>% mutate(points = ifelse(
home_team == input$details_team,
get_points(home_team_goal, away_team_goal),
get_points(away_team_goal, home_team_goal)
)) %>%
arrange(stage) %>%
mutate(total_points = cumsum(points))
})
output$details_team_points <- renderPlot({
ggplot(data = team_points_r(), aes(x = stage, y = total_points)) +
geom_line(size = 1) +
geom_point(size = 3) +
ylab("Total points") +
xlab("Stage")
})
overview_dat_r <- reactive({
# browser()
df <- overview_dat %>%
filter(season == input$overview_season) %>%
filter(stage >= input$overview_stage_range[1]) %>%
filter(stage <= input$overview_stage_range[2])
# browser()
validate(need(nrow(df) != 0, "No data"))
return(df)
})
output$overview_total_score <- renderPlot({
df <-
overview_dat_r() %>% group_by(team) %>% summarize(total_score = sum(score))
# df <- overview_dat %>% group_by(team) %>% summarize(total_score=sum(score))
zig <- df %>% arrange(total_score) %>% pull(team)
ggplot(data = as.data.frame(df), aes(x = team, y = total_score)) +
geom_col() +
geom_text(aes(label = total_score), hjust = -1.0) +
coord_flip() +
xlab('') +
ylab(
paste0(
"Total score in season ",
input$overview_season,
' within stages ',
input$overview_stage_range[[1]],
' and ',
input$overview_stage_range[[2]]
)
) +
scale_x_discrete(limits = zig) +
theme_gdocs()
})
output$overview_total_goals <- renderPlot({
df <-
overview_dat_r() %>% group_by(team) %>% summarize(total_goals = sum(own_goals))
# df <- overview_dat %>% group_by(team) %>% summarize(total_score=sum(score))
zig <- df %>% arrange(total_goals) %>% pull(team)
ggplot(data = as.data.frame(df), aes(x = team, y = total_goals)) +
geom_col() +
geom_text(aes(label = total_goals), hjust = -1.0) +
coord_flip() +
xlab('') +
ylab(
paste0(
"Total goals in season ",
input$overview_season,
' within stages ',
input$overview_stage_range[[1]],
' and ',
input$overview_stage_range[[2]]
)
) +
scale_x_discrete(limits = zig) +
theme_gdocs()
})
output$details_opponent_input <- renderUI({
selectInput(
inputId = 'details_team_enemy',
label = 'Select opponent:',
choices = c('None', teams %>% filter(team != input$details_team))
)
})
output$home_vs <- renderUI({
h3(paste0('Home matches vs ', input$details_team_enemy))
})
output$away_vs <- renderUI({
h3(paste0('Away matches vs ', input$details_team_enemy))
})
}
# Run the application
shinyApp(ui = ui, server = server)
|
3cce4bb3dcf40483c521455b181625b3f13e2681
|
fd13440b88de8d110a6b1850732bd09db959b6c0
|
/output/run_all_plots.r
|
c5810fecd814a0f47b4c2ab6df628f8db5a7c32b
|
[] |
no_license
|
gmcewan/SalmonFarmTreatmentStrategy
|
866c379401d0afda29397b68c2b9be786608b1bc
|
9ba1330d49fc8e0a6c054792dbadbef316aadb07
|
refs/heads/master
| 2020-05-25T15:44:28.614631
| 2016-10-11T17:47:46
| 2016-10-11T17:47:46
| 70,181,841
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,501
|
r
|
run_all_plots.r
|
source("plot-resistances.r")
source("plot-licecounts.r")
source("plot-treatments.r")
# get all the output directories
output.dirs = list.dirs(path=".", recursive=FALSE)
multi.dir.list = c("./responsive",
"./mosaic-30",
"./rotation",
"./periodic-longer",
"./periodic-shorter",
"./combination")
do.resistance = TRUE
do.treatment = TRUE
do.count = TRUE
do.infection.pressure = TRUE
start.multi <- function(filename, max.x, max.y) {
# do all the resistances in a single figure
open.chart.file(filename)
# par(mfrow=c(3,3)) #, mar=c(0,0,0,0), oma=c(5,5,0,0), xpd=NA)
# set up the layout
layout(matrix(c(1, 6, 7, 8, 2, 9, 10, 11, 0, 3, 4, 5), 3, 4, byrow=TRUE),
c(1,6,6,6), c(4,4,1), TRUE)
# do the y-axes down the left
par(mar=c(0.3,5,2,0))
for (i in seq(1,2,1)) {
plot(0, type="n", xaxt="n", yaxt="n",
xlab="", ylab="", axes=0,
xlim=c(0, max.x), ylim=c(0, max.y))
mtext("Proportion of alleles", side=2, line=3)
axis(2, seq(0, max.y, max.y/10), seq(0, max.y, max.y/10))
}
# do the x-axes along the bottom
par(mar=c(5,0.3,0,0))
for (i in seq(1,3,1)) {
plot(0, type="n", xaxt="n", yaxt="n",
xlab="", ylab="", axes=0,
xlim=c(0, max.x), ylim=c(0, max.y))
mtext("Cycles", side=1, line=3)
axis(1, seq(cycles.length/2, max.x-cycles.length/2, cycles.length),
seq(1, cycles.num, 1), tick=FALSE)
axis(1, seq(0, max.x, cycles.length), labels=FALSE)
}
}
if (do.resistance) {
cat(paste("---","Resistances","\n"))
if (!SINGLE.PLOTS) {
max.x = cycles.length*cycles.num
max.y = 1
start.multi("resistances.pdf", max.x, max.y)
plot.count = 1
for (outdir in multi.dir.list) {
cat(paste(outdir,"\n"))
x.axis = (plot.count > 4)
y.axis = (plot.count %in% c(1,3,5))
plot.resistances(outdir, x.axis, y.axis)
plot.count = plot.count + 1
}
dev.off()
} else {
for (outdir in output.dirs[nchar(output.dirs)>2]) {
if (substring(outdir,3,3) != ".") {
cat(paste(outdir,"\n"))
open.chart.file(paste(outdir,"/resistance.pdf",sep=""), figure.height=5)
plot.resistances(outdir)
dev.off()
}
}
}
}
if (do.count) {
cat(paste("---","lice counts","\n"))
if (!SINGLE.PLOTS) {
max.x = cycles.length*cycles.num
max.y = 10
start.multi("licecounts.pdf", max.x, max.y)
for (outdir in multi.dir.list) {
cat(paste(outdir,"\n"))
plot.licecounts(outdir)
}
dev.off()
} else {
for (outdir in output.dirs[nchar(output.dirs)>2]) {
if (substring(outdir,3,3) != ".") {
cat(paste(outdir,"\n"))
open.chart.file(paste(outdir,"/licecounts.pdf",sep=""), figure.height=5)
plot.licecounts(outdir)
dev.off()
}
}
}
}
if (do.infection.pressure) {
# create all the infection pressure files
cat(paste("---","infection pressure","\n"))
for (outdir in output.dirs[nchar(output.dirs)>2]) {
if (substring(outdir,3,3) != ".") {
cat(paste(outdir,"\n"))
write.pressure.file(outdir)
}
}
}
if (do.treatment) {
cat(paste("---","treatment counts","\n"))
if (!SINGLE.PLOTS) {
max.x = cycles.num
max.y = 20
open.chart.file("treatments.pdf")
# par(mfrow=c(3,3)) #, mar=c(0,0,0,0), oma=c(5,5,0,0), xpd=NA)
# set up the layout
layout(matrix(c(1, 6, 7, 8, 2, 9, 10, 11, 0, 3, 4, 5), 3, 4, byrow=TRUE),
c(1,6,6,6), c(4,4,1), TRUE)
# do the y-axes down the left
par(mar=c(0.3,5,2,0))
for (i in seq(1,2,1)) {
plot(0, type="n", xaxt="n", yaxt="n",
xlab="", ylab="", axes=0,
xlim=c(0, max.x), ylim=c(0, max.y))
mtext("Proportion of alleles", side=2, line=3)
axis(2, seq(0, max.y, 2), seq(0, max.y, 2))
}
# do the x-axes along the bottom
par(mar=c(5,0.3,0,0))
for (i in seq(1,3,1)) {
plot(0, type="n", xaxt="n", yaxt="n",
xlab="", ylab="", axes=0,
xlim=c(1, max.x), ylim=c(0, max.y))
mtext("Cycles", side=1, line=3)
axis(1, seq(1, cycles.num, 1), seq(1, cycles.num, 1))
}
for (outdir in multi.dir.list) {
cat(paste(outdir,"\n"))
plot.treatments(outdir)
}
dev.off()
} else {
for (outdir in output.dirs[nchar(output.dirs)>2]) {
if (substring(outdir,3,3) != ".") {
cat(paste(outdir,"\n"))
open.chart.file(paste(outdir,"/treatments.pdf",sep=""), figure.height=5)
plot.treatments(outdir)
dev.off()
}
}
}
}
|
2beccf48dffe0edbde18713fd4e9a126e9b334d4
|
f8eec53636689e647d3d828059506d3bcac2406b
|
/public/slides/admitidos-graphs.R
|
752b498f83f0b8760a8e19ac144cb45bb3d357be
|
[] |
no_license
|
mamaciasq/martin
|
e16ab27cd9b1cd10ae98eac8bc12754e6095980f
|
7841a5ad043323df85d70d7682bd3907acecc542
|
refs/heads/master
| 2021-01-24T10:15:18.477631
| 2018-04-27T04:45:03
| 2018-04-27T04:45:03
| 123,045,807
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,238
|
r
|
admitidos-graphs.R
|
library(tidyverse) # version 1.2.1
library(readxl) # version 1.0.0
library(DT) # version 0.4
library(highcharter) # version 0.5.0.9999
library(treemap) # version 2.4-2
source("admitidos-pregrado.R", encoding = 'UTF-8')
source("funciones.R", encoding = 'UTF-8')
col <- c( "#8cc63f", # verde
"#f15a24", # naranja
"#0071bc", # azul vivo
"#6d6666", # gris
"#fbb03b", # amarillo
"#93278f", # morado
"#29abe2", # azul claro
"#c1272d", # rojo
"#8b7355", # cafe
"#855b5b", # vinotinto
"#ed1e79") # rosado
ano <- 2018
semestre <- 1 # 1 o 2 según corresponda
periodo_actual_titulo <- " 2018-I"
# Desagregaciones temáticas:
############### Edad: ###############
col <- c( "#8cc63f", # verde, 17 o menos
"#f15a24", # naranja, 18 a 20
"#0071bc", # azul vivo, 21 a 25
"#6d6666", # gris, 26 o más
"#fbb03b", # amarillo, sin información
"#93278f", # morado
"#29abe2", # azul claro
"#c1272d", # rojo
"#8b7355", # cafe
"#855b5b", # vinotinto
"#ed1e79") # rosado
################ 1. Tabla
CAT_EDAD_TABLA <- tabla(
datos = Consolidado,
categoria = "CAT_EDAD",
variable = 'Rango de edad - en años - del admitido',
mensaje = "Número de admitidos por grupos de edad",
titulo = "Admitidos por grupos de edad"
);CAT_EDAD_TABLA
# saveWidget(CAT_EDAD_TABLA,
# file = file.path(getwd(), "Resultados/Admitidos",
# "CAT_EDAD_TABLA.html"),
# selfcontained = F, libdir = "libraryjs")
################ 2. Serie
CAT_EDAD_SERIE <- series(
datos = Consolidado,
categoria = "CAT_EDAD",
colores = col,
titulo = "Número de admitidos por grupos de edad (en años)",
eje = "Número de admitidos (k: miles)"
);CAT_EDAD_SERIE
# saveWidget(CAT_EDAD_SERIE,
# file = file.path(getwd(), "Resultados/Admitidos",
# "CAT_EDAD_SERIE.html"),
# selfcontained = F, libdir = "libraryjs")
################ 3. Actual
CAT_EDAD_BARRA <- barra_vertical(
datos = Consolidado,
categoria = "CAT_EDAD",
colores = col,
ano = ano,
periodo = semestre,
periodo_titulo = periodo_actual_titulo,
titulo = "Admitidos por grupos de edad",
eje = "Número de admitidos (k: miles)"
); CAT_EDAD_BARRA
# saveWidget(CAT_EDAD_BARRA,
# file = file.path(getwd(), "Resultados/Admitidos",
# "CAT_EDAD_BARRA.html"),
# selfcontained = F, libdir = "libraryjs")
############### Sexo: ###############
col <- c( "#8cc63f", # verde, hombres
"#f15a24", # naranja, mujeres
"#0071bc", # azul vivo
"#6d6666", # gris
"#fbb03b", # amarillo
"#93278f", # morado
"#29abe2", # azul claro
"#c1272d", # rojo
"#8b7355", # cafe
"#855b5b", # vinotinto
"#ed1e79") # rosado
################ 1. Tabla
SEXO_TABLA <- tabla(
datos = Consolidado,
categoria = "SEXO",
variable = 'Sexo del admitido',
mensaje = "Número de admitidos por sexo",
titulo = "Admitidos por sexo"
);SEXO_TABLA
# saveWidget(SEXO_TABLA,
# file = file.path(getwd(), "Resultados/Admitidos",
# "SEXO_TABLA.html"),
# selfcontained = F, libdir = "libraryjs")
################ 2. Serie
SEXO_SERIE <- series(
datos = Consolidado,
categoria = "SEXO",
colores = col,
titulo = "Número de admitidos por sexo",
eje = "Número de admitidos (k: miles)"
);SEXO_SERIE
# saveWidget(SEXO_SERIE,
# file = file.path(getwd(), "Resultados/Admitidos",
# "SEXO_SERIE.html"),
# selfcontained = F, libdir = "libraryjs")
################ 3. Actual
SEXO_TORTA <- torta(
datos = Consolidado,
variable = "SEXO",
colores = col,
titulo = "Admitidos por sexo",
etiqueta = "Número de admitidos",
ano = ano,
periodo = semestre,
periodo_titulo = periodo_actual_titulo
);SEXO_TORTA
# saveWidget(SEXO_TORTA,
# file = file.path(getwd(), "Resultados/Admitidos",
# "SEXO_TORTA.html"),
# selfcontained = F, libdir = "libraryjs")
############### Estrato socioeconómico: ###############
col <- c( "#8cc63f", # verde, estrato 2 o menos
"#f15a24", # naranja, estrato 3
"#0071bc", # azul vivo, estrato 4 o más
"#6d6666", # gris, ND/NE
"#fbb03b", # amarillo
"#93278f", # morado
"#29abe2", # azul claro
"#c1272d", # rojo
"#8b7355", # cafe
"#855b5b", # vinotinto
"#ed1e79") # rosado
################ 1. Tabla
ESTRATO_TABLA <- tabla(
datos = Consolidado,
categoria = "ESTRATO",
variable = 'Estrato socioeconómico del admitido',
mensaje = "Número de admitidos según el estrato socioeconómico",
titulo = "Admitidos según el estrato socioeconómico"
);ESTRATO_TABLA
# saveWidget(ESTRATO_TABLA,
# file = file.path(getwd(), "Resultados/Admitidos",
# "ESTRATO_TABLA.html"),
# selfcontained = F, libdir = "libraryjs")
################ 2. Serie
ESTRATO_SERIE <- series(
datos = Consolidado,
categoria = "ESTRATO",
colores = col,
titulo = "Número de admitidos por estrato socioeconómico",
eje = "Número de admitidos (k: miles)"
);ESTRATO_SERIE
# saveWidget(ESTRATO_SERIE,
# file = file.path(getwd(), "Resultados/Admitidos",
# "ESTRATO_SERIE.html"),
# selfcontained = F, libdir = "libraryjs")
################ 3. Actual
ESTRATO_TORTA <- torta(
datos = Consolidado,
variable = "ESTRATO",
colores = col,
titulo = "Admitidos por estrato socioeconómico",
etiqueta = "Número de admitidos",
ano = ano,
periodo = semestre,
periodo_titulo = periodo_actual_titulo
);ESTRATO_TORTA
# saveWidget(ESTRATO_TORTA,
# file = file.path(getwd(), "Resultados/Admitidos",
# "ESTRATO_TORTA.html"),
# selfcontained = F, libdir = "libraryjs")
ESTRATO_BARRA <- barra_vertical(
datos = Consolidado,
categoria = "ESTRATO",
colores = col,
ano = ano,
periodo = semestre,
periodo_titulo = periodo_actual_titulo,
titulo = "Admitidos por estrato socioeconómico",
eje = "Número de admitidos (k: miles)"
); ESTRATO_BARRA
# saveWidget(CAT_EDAD_BARRA,
# file = file.path(getwd(), "Resultados/Admitidos",
# "ESTRATO_BARRA.html"),
# selfcontained = F, libdir = "libraryjs")
############### Área de conocimiento SNIES: ###############
col <- c( "#93278f", # morado, agronomia..
"#29abe2", # azul claro, bellas artes
"#fbb03b", # amarillo, ciencias de...
"#f15a24", # naranja, ciencias sociales...
"#0071bc", # azul vivo, economia...
"#8cc63f", # verde, ingenieria...
"#6d6666", # gris, matemáticas...
"#c1272d", # rojo, sin informacion
"#8b7355", # cafe
"#855b5b", # vinotinto
"#ed1e79") # rosado
################ 1. Tabla
AREAC_SNIES_TABLA <- tabla(
datos = Consolidado,
categoria = "AREAC_SNIES",
variable = 'Modalidades de los admitidos por área de conocimiento (SNIES)',
mensaje = "Número de admitidos por área de conocimiento (SNIES)",
titulo = "Admitidos por área de conocimiento (SNIES)"
);AREAC_SNIES_TABLA
# saveWidget(AREAC_SNIES_TABLA,
# file = file.path(getwd(), "Resultados/Admitidos",
# "AREAC_SNIES_TABLA.html"),
# selfcontained = F, libdir = "libraryjs")
################ 2. Serie
AREAC_SNIES_SERIE <- series(
datos = Consolidado,
categoria = "AREAC_SNIES",
colores = col,
titulo = "Número de admitidos por área de conocimiento (SNIES)",
eje = "Número de admitidos"
);AREAC_SNIES_SERIE
#
# saveWidget(AREAC_SNIES_SERIE,
# file = file.path(getwd(), "Resultados/Admitidos",
# "AREAC_SNIES_SERIE.html"),
# selfcontained = F, libdir = "libraryjs")
################ 3. Actual
AREAC_SNIES_BARRA <- barra_horizontal(
datos = Consolidado,
categoria = "AREAC_SNIES",
colores = col,
ano = ano,
periodo = semestre,
periodo_titulo = periodo_actual_titulo,
titulo = "Admitidos por área de conocimiento (SNIES)",
eje = "Número de admitidos"
); AREAC_SNIES_BARRA
# saveWidget(AREAC_SNIES_BARRA,
# file = file.path(getwd(), "Resultados/Admitidos",
# "AREAC_SNIES_BARRA.html"),
# selfcontained = F, libdir = "libraryjs")
############### Área de conocimiento CINE: ###############
col <- c( "#29abe2", # azul claro, Administración...
"#f15a24", # naranja, Agricultura...
"#fbb03b", # amarillo, Artes y humanidades
"#0071bc", # azul vivo, Ciencias naturales...
"#93278f", # morado, Ciencias sociales...
"#8cc63f", # verde, ingenieria...
"#6d6666", # gris, Salud y ...
"#8b7355", # cafe, sin información
"#c1272d", # rojo, TIC
"#855b5b", # vinotinto
"#ed1e79") # rosado
################ 1. Tabla
AREA_CINE_TABLA <- tabla(
datos = Consolidado %>%
filter(is.na(Clase)==FALSE),
categoria = "AREA_CINE",
variable = 'Modalidades de los admitidos por área de conocimiento (CINE)',
mensaje = "Número de admitidos por área de conocimiento (CINE)",
titulo = "Admitidos por área de conocimiento (CINE)"
);AREA_CINE_TABLA
# saveWidget(AREA_CINE_TABLA,
# file = file.path(getwd(), "Resultados/Admitidos",
# "AREA_CINE_TABLA.html"),
# selfcontained = F, libdir = "libraryjs")
################ 2. Serie
AREA_CINE_SERIE <- series(
datos = Consolidado %>%
filter(is.na(Clase)==FALSE),
categoria = "AREA_CINE",
colores = col,
titulo = "Número de admitidos por área de conocimiento (CINE)",
eje = "Número de admitidos"
);AREA_CINE_SERIE
# saveWidget(AREA_CINE_SERIE,
# file = file.path(getwd(), "Resultados/Admitidos",
# "AREA_CINE_SERIE.html"),
# selfcontained = F, libdir = "libraryjs")
################ 3. Actual
AREA_CINE_BARRA <- barra_horizontal(
datos = Consolidado %>%
filter(is.na(Clase)==FALSE),
categoria = "AREA_CINE",
colores = col,
ano = ano,
periodo = semestre,
periodo_titulo = periodo_actual_titulo,
titulo = "Admitidos por área de conocimiento (CINE)",
eje = "Número de admitidos"
); AREA_CINE_BARRA
# saveWidget(AREA_CINE_BARRA,
# file = file.path(getwd(), "Resultados/Admitidos",
# "AREA_CINE_BARRA.html"),
# selfcontained = F, libdir = "libraryjs")
############### Serie de: Evolución Histórica Total de Admitidos ###############
col <- c( "#8cc63f", # verde, Total
"#f15a24", # naranja
"#0071bc", # azul vivo
"#6d6666", # gris
"#fbb03b", # amarillo
"#93278f", # morado
"#29abe2", # azul claro
"#c1272d", # rojo
"#8b7355", # cafe
"#855b5b", # vinotinto
"#ed1e79") # rosado
EVOLUCION_TABLA <- tablaall(
datos = Consolidado,
categoria = "TOTAL",
mensaje = "Número de admitidos",
titulo = "Admitidos"
);EVOLUCION_TABLA
# saveWidget(ADMITIDO_TABLA,
# file = file.path(getwd(), "Resultados/Admitidos",
# "EVOLUCION_TABLA.html"),
# selfcontained = F, libdir = "libraryjs")
EVOLUCION_SERIE <- series(
datos = Consolidado,
categoria = "TOTAL",
colores = col,
titulo = "Evolución histórica del número total de admitidos a pregrado",
eje = "Número de admitidos (k: miles)"
);EVOLUCION_SERIE
# saveWidget(EVOLUCION_SERIE,
# file = file.path(getwd(), "Resultados/Admitidos",
# "EVOLUCION_SERIE.html"),
# selfcontained = F, libdir = "libraryjs")
|
ffd039747124c7533301a1a80753b66bf647e445
|
bbbb9a5e75c7f0e51f153f20d4d990a1f33b60a5
|
/R/frm_fb_mh_refresh_imputed_values.R
|
219741041409939f79dde32fa3126144ce8ee8ca
|
[] |
no_license
|
strategist922/mdmb
|
01417c2fb8de64586d518234f6b4f1269c71b1fa
|
82bc222769d2f170e016a6ba223327bd4eaee723
|
refs/heads/master
| 2021-01-21T12:30:35.438258
| 2017-08-20T12:43:21
| 2017-08-20T12:43:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,004
|
r
|
frm_fb_mh_refresh_imputed_values.R
|
frm_fb_mh_refresh_imputed_values <- function( imputations_mcmc , acc_bounds, ind0 )
{
impute_vars <- imputations_mcmc$impute_vars
NV <- imputations_mcmc$NV
mh_imputations_values <- imputations_mcmc$mh_imputations_values
if (NV > 0){
for (vv in 1:NV){
# cat("-------" , vv , "------\n")
# vv <- 2
var_vv <- impute_vars[vv]
ind0_vv <- ind0[[ var_vv ]]
mh_imp_vv <- mh_imputations_values[[ var_vv ]]
mh_adapt <- ( ! is.null( mh_imp_vv) ) &
( ind0_vv$model %in% c("linreg") )
if ( mh_adapt ){
acc_pars <- list( mh_imp_vv[,1] , mh_imp_vv[,2] )
res0 <- frm_proposal_refresh_helper( acceptance_parameters = acc_pars ,
proposal_sd = mh_imp_vv[,3] , acceptance_bounds = acc_bounds)
mh_imp_vv$sd_proposal <- res0$proposal_sd
mh_imp_vv[,1:2] <- 0 * mh_imp_vv[,1:2]
mh_imputations_values[[ var_vv ]] <- mh_imp_vv
}
}
}
#---- arrange output
imputations_mcmc$mh_imputations_values <- mh_imputations_values
return(imputations_mcmc)
}
|
f57465968d2cba8ea210dd5a1391ff87f32a896e
|
28f660487cc9a1047c942ca31bb03e2b5ce66ac3
|
/inst/doc/PlotsAndStats.R
|
48f91abc84bb69e5f6c14eb893eda1f973aa4002
|
[] |
no_license
|
cran/cheddar
|
8f8f412e0acb74e86980f63e2c7e254489039064
|
f01d603cb35255f41c74511e93e4f395d0155bcc
|
refs/heads/master
| 2022-09-14T18:17:49.023624
| 2022-09-01T13:40:05
| 2022-09-01T13:40:05
| 17,695,040
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 26,536
|
r
|
PlotsAndStats.R
|
### R code from vignette source 'PlotsAndStats.Rnw'
###################################################
### code chunk number 1: PlotsAndStats.Rnw:33-44
###################################################
library(cheddar)
# Makes copy-paste much less painful
options(continue=' ')
options(width=90)
options(prompt='> ')
options(SweaveHooks = list(fig=function() par(mgp=c(2.5,1,0),
mar=c(4,4,2,1),
oma=c(0,0,1,0),
cex.main=0.8)))
###################################################
### code chunk number 2: PlotsAndStats.Rnw:122-124
###################################################
getOption("SweaveHooks")[["fig"]]()
data(TL84)
PlotNPS(TL84, 'Log10M', 'Log10N')
###################################################
### code chunk number 3: PlotsAndStats.Rnw:142-143
###################################################
getOption("SweaveHooks")[["fig"]]()
PlotNPS(TL84, 'Log10M', 'Log10N', show.web=FALSE, highlight.nodes=NULL)
###################################################
### code chunk number 4: PlotsAndStats.Rnw:150-151
###################################################
getOption("SweaveHooks")[["fig"]]()
PlotNPS(TL84, 'Log10M', 'Log10N', show.nodes.as='labels', show.web=FALSE)
###################################################
### code chunk number 5: PlotsAndStats.Rnw:158-160
###################################################
getOption("SweaveHooks")[["fig"]]()
PlotNPS(TL84, 'Log10M', 'Log10N', show.nodes.as='labels', show.web=FALSE,
node.labels='node', cex=0.5)
###################################################
### code chunk number 6: PlotsAndStats.Rnw:166-169
###################################################
getOption("SweaveHooks")[["fig"]]()
lots.of.letters <- c(letters, LETTERS, paste(LETTERS,letters,sep=''))
PlotNPS(TL84, 'Log10M', 'Log10N', show.nodes.as='labels', show.web=FALSE,
node.labels=lots.of.letters[1:NumberOfNodes(TL84)])
###################################################
### code chunk number 7: PlotsAndStats.Rnw:174-175
###################################################
getOption("SweaveHooks")[["fig"]]()
PlotNPS(TL84, 'Log10M', 'Log10N', show.nodes.as='both', show.web=FALSE, cex=2)
###################################################
### code chunk number 8: PlotsAndStats.Rnw:185-187
###################################################
getOption("SweaveHooks")[["fig"]]()
PlotNPS(TL84, 'Log10M', 'Log10N', xlab=Log10MLabel(TL84),
ylab=Log10NLabel(TL84))
###################################################
### code chunk number 9: PlotsAndStats.Rnw:197-206
###################################################
getOption("SweaveHooks")[["fig"]]()
par(mfrow=c(1,3))
PlotNPS(TL84, 'Log10M', 'OutDegree', show.web=FALSE)
abline(lm(OutDegree(TL84) ~ Log10M(TL84)))
PlotNPS(TL84, 'Log10M', 'InDegree', show.web=FALSE)
abline(lm(InDegree(TL84) ~ Log10M(TL84)))
PlotNPS(TL84, 'Log10M', 'Degree', show.web=FALSE)
abline(lm(Degree(TL84) ~ Log10M(TL84)))
###################################################
### code chunk number 10: PlotsAndStats.Rnw:218-219
###################################################
getOption("SweaveHooks")[["fig"]]()
PlotNPS(TL84, 'Log10M', 'PreyAveragedTrophicLevel')
###################################################
### code chunk number 11: PlotsAndStats.Rnw:226-227
###################################################
getOption("SweaveHooks")[["fig"]]()
PlotNPS(TL84, 'Log10M', 'ChainAveragedTrophicLevel')
###################################################
### code chunk number 12: PlotsAndStats.Rnw:242-247
###################################################
getOption("SweaveHooks")[["fig"]]()
par(mfrow=c(1,2))
PlotNPS(TL84, 'Log10M', 'PreyAveragedTrophicLevel', ylim=c(1, 6),
main='Prey-averaged')
PlotNPS(TL84, 'Log10M', 'ChainAveragedTrophicLevel', ylim=c(1, 6),
main='Chain-averaged')
###################################################
### code chunk number 13: PlotsAndStats.Rnw:261-266
###################################################
getOption("SweaveHooks")[["fig"]]()
par(mfrow=c(2,2))
PlotMvN(TL84)
PlotNvM(TL84)
PlotBvM(TL84)
PlotMvB(TL84)
###################################################
### code chunk number 14: PlotsAndStats.Rnw:279-280
###################################################
getOption("SweaveHooks")[["fig"]]()
PlotRankNPS(TL84, 'Log10N')
###################################################
### code chunk number 15: PlotsAndStats.Rnw:285-286
###################################################
getOption("SweaveHooks")[["fig"]]()
PlotRankNPS(TL84, 'Log10N', rank.by='M')
###################################################
### code chunk number 16: PlotsAndStats.Rnw:294-295
###################################################
getOption("SweaveHooks")[["fig"]]()
PlotRankNPS(TL84, 'Log10N', rank.by='M', show.web=TRUE)
###################################################
### code chunk number 17: PlotsAndStats.Rnw:300-301
###################################################
getOption("SweaveHooks")[["fig"]]()
PlotRankNPS(TL84, 'PreyAveragedTrophicLevel', rank.by='M')
###################################################
### code chunk number 18: PlotsAndStats.Rnw:309-310
###################################################
getOption("SweaveHooks")[["fig"]]()
PlotRankNPS(TL84, 'PreyAveragedTrophicLevel', rank.by='M', log10.rank=TRUE)
###################################################
### code chunk number 19: PlotsAndStats.Rnw:320-324
###################################################
getOption("SweaveHooks")[["fig"]]()
par(mfrow=c(1,3))
PlotMvRankM(TL84)
PlotNvRankN(TL84)
PlotBvRankB(TL84)
###################################################
### code chunk number 20: PlotsAndStats.Rnw:338-339
###################################################
getOption("SweaveHooks")[["fig"]]()
PlotNPSDistribution(TL84, 'Log10M')
###################################################
### code chunk number 21: PlotsAndStats.Rnw:345-346
###################################################
getOption("SweaveHooks")[["fig"]]()
PlotNPSDistribution(TL84, 'Log10M', density.args=list(bw=3))
###################################################
### code chunk number 22: PlotsAndStats.Rnw:366-367
###################################################
getOption("SweaveHooks")[["fig"]]()
PlotNvM(TL84, col=1, pch=19, highlight.nodes=NULL)
###################################################
### code chunk number 23: PlotsAndStats.Rnw:374-375
###################################################
getOption("SweaveHooks")[["fig"]]()
PlotNvM(TL84, col=1:56, pch=19, highlight.nodes=NULL)
###################################################
### code chunk number 24: PlotsAndStats.Rnw:384-385
###################################################
getOption("SweaveHooks")[["fig"]]()
PlotNvM(TL84, colour.by='resolved.to', pch=19, highlight.nodes=NULL)
###################################################
### code chunk number 25: PlotsAndStats.Rnw:393-397
###################################################
getOption("SweaveHooks")[["fig"]]()
colour.spec <- c(Species='purple3', Genus='green3', 'red3')
PlotNvM(TL84, colour.by='resolved.to', colour.spec=colour.spec, pch=19,
highlight.nodes=NULL)
legend("topright", legend=names(colour.spec), pch=19, col=colour.spec)
###################################################
### code chunk number 26: PlotsAndStats.Rnw:408-420
###################################################
getOption("SweaveHooks")[["fig"]]()
symbol.spec = c(Bacteria=21, Plantae=22, Chromista=23,
Protozoa=24, Animalia=25, 19)
colour.spec = c(Bacteria='purple3', Plantae='green3',
Chromista='blue3', Protozoa='orange3',
Animalia='red3', 'black')
PlotNvM(TL84,
symbol.by='kingdom', symbol.spec=symbol.spec,
bg.by='kingdom', bg.spec=colour.spec,
colour.by='kingdom', colour.spec=colour.spec,
highlight.nodes=NULL)
legend("topright", legend=names(colour.spec), pch=symbol.spec,
col=colour.spec, pt.bg=colour.spec)
###################################################
### code chunk number 27: PlotsAndStats.Rnw:432-446
###################################################
getOption("SweaveHooks")[["fig"]]()
symbol.spec = c(Bacteria=21, Plantae=22, Chromista=23,
Protozoa=24, Animalia=25, 19)
colour.spec = c(Bacteria='purple3', Plantae='green3',
Chromista='blue3', Protozoa='orange3',
Animalia='red3', 'black')
PlotNvM(TL84,
symbol.by='kingdom', symbol.spec=symbol.spec,
bg.by='kingdom', bg.spec=colour.spec,
colour.by='kingdom', colour.spec=colour.spec,
highlight.nodes=NULL, show.web=FALSE)
legend("topright", legend=names(colour.spec), pch=symbol.spec,
col=colour.spec, pt.bg=colour.spec)
models <- NvMLinearRegressions(TL84, class='kingdom')
colours <- PlotLinearModels(models, colour.spec=colour.spec)
###################################################
### code chunk number 28: PlotsAndStats.Rnw:457-458
###################################################
getOption("SweaveHooks")[["fig"]]()
PlotNvM(TL84, pch=NA, highlight.nodes=NULL)
###################################################
### code chunk number 29: PlotsAndStats.Rnw:471-480
###################################################
getOption("SweaveHooks")[["fig"]]()
par(mfrow=c(1,2))
# Don't add ticks
options(cheddarTopAndRightTicks=FALSE)
PlotNvM(TL84)
# Add ticks
options(cheddarTopAndRightTicks=TRUE)
PlotNvM(TL84)
###################################################
### code chunk number 30: PlotsAndStats.Rnw:496-497
###################################################
getOption("SweaveHooks")[["fig"]]()
PlotNvM(TL84, highlight.nodes=Cannibals)
###################################################
### code chunk number 31: PlotsAndStats.Rnw:503-504
###################################################
getOption("SweaveHooks")[["fig"]]()
PlotNvM(TL84, highlight.nodes=IsolatedNodes)
###################################################
### code chunk number 32: PlotsAndStats.Rnw:510-511
###################################################
getOption("SweaveHooks")[["fig"]]()
PlotNvM(TL84, highlight.nodes='Chaoborus punctipennis')
###################################################
### code chunk number 33: PlotsAndStats.Rnw:524-525
###################################################
getOption("SweaveHooks")[["fig"]]()
PlotNvM(TL84, highlight.links=ResourceLargerThanConsumer)
###################################################
### code chunk number 34: PlotsAndStats.Rnw:531-533
###################################################
getOption("SweaveHooks")[["fig"]]()
PlotNvM(TL84, highlight.nodes='Chaoborus punctipennis',
highlight.links=TrophicLinksForNodes(TL84, 'Chaoborus punctipennis'))
###################################################
### code chunk number 35: PlotsAndStats.Rnw:554-558
###################################################
getOption("SweaveHooks")[["fig"]]()
data(YthanEstuary)
par(mfrow=c(1,2))
PlotNvM(YthanEstuary)
PlotNvM(YthanEstuary, show.na=TRUE)
###################################################
### code chunk number 36: PlotsAndStats.Rnw:568-569
###################################################
getOption("SweaveHooks")[["fig"]]()
PlotNvM(YthanEstuary, xlim=c(-10, 4), ylim=c(-10, 13), show.na=TRUE)
###################################################
### code chunk number 37: PlotsAndStats.Rnw:581-604
###################################################
getOption("SweaveHooks")[["fig"]]()
par(mfrow=c(2,2))
np <- NPS(TL84)
np[1,'M'] <- NA
PlotNvM(Community(nodes=np, trophic.links=TLPS(TL84), properties=CPS(TL84)),
main='Node 1 M=NA', show.nodes.as='both', cex=2, show.na=TRUE)
np <- NPS(TL84)
np[1,'N'] <- NA
PlotNvM(Community(nodes=np, trophic.links=TLPS(TL84), properties=CPS(TL84)),
main='Node 1 N=NA', show.nodes.as='both', cex=2, show.na=TRUE)
np <- NPS(TL84)
np[1,'M'] <- NA
np[1,'N'] <- NA
PlotNvM(Community(nodes=np, trophic.links=TLPS(TL84), properties=CPS(TL84)),
main='Node 1 M=NA and N=NA', show.nodes.as='both', cex=2, show.na=TRUE)
np <- NPS(TL84)
np[c(10, 20, 30, 40),'M'] <- NA
np[c(10, 20, 30, 40),'N'] <- NA
PlotNvM(Community(nodes=np, trophic.links=TLPS(TL84), properties=CPS(TL84)),
main='Nodes 10, 20, 30 and 40 M=NA and N=NA', show.nodes.as='both',
cex=2, show.na=TRUE)
###################################################
### code chunk number 38: PlotsAndStats.Rnw:616-619
###################################################
getOption("SweaveHooks")[["fig"]]()
par(mfrow=c(1,2))
PlotMvRankM(YthanEstuary)
PlotMvRankM(YthanEstuary, show.na=TRUE)
###################################################
### code chunk number 39: PlotsAndStats.Rnw:643-644
###################################################
getOption("SweaveHooks")[["fig"]]()
PlotTLPS(TL84, 'resource.Log10M', 'consumer.Log10M')
###################################################
### code chunk number 40: PlotsAndStats.Rnw:653-654
###################################################
getOption("SweaveHooks")[["fig"]]()
PlotTLPS(TL84, 'resource.Log10M', 'consumer.Log10M', axes.limits.equal=TRUE)
###################################################
### code chunk number 41: PlotsAndStats.Rnw:675-680
###################################################
getOption("SweaveHooks")[["fig"]]()
par(mfrow=c(2,2))
PlotPredationMatrix(TL84)
PlotMRvMC(TL84)
PlotNCvNR(TL84)
PlotBRvBC(TL84)
###################################################
### code chunk number 42: PlotsAndStats.Rnw:694-695
###################################################
getOption("SweaveHooks")[["fig"]]()
PlotMRvMC(TL84)
###################################################
### code chunk number 43: PlotsAndStats.Rnw:703-705
###################################################
getOption("SweaveHooks")[["fig"]]()
PlotMRvMC(TL84, colour.by='consumer.category', bg.by='consumer.category',
symbol.by='consumer.category')
###################################################
### code chunk number 44: PlotsAndStats.Rnw:716-719
###################################################
SumMByClass(TL84)
SumNByClass(TL84)
SumBiomassByClass(TL84)
###################################################
### code chunk number 45: PlotsAndStats.Rnw:723-726
###################################################
SumMByClass(TL84, 'kingdom')
SumNByClass(TL84, 'kingdom')
SumBiomassByClass(TL84, 'kingdom')
###################################################
### code chunk number 46: PlotsAndStats.Rnw:732-734
###################################################
SumBiomassByClass(TL84)
ApplyByClass(TL84, 'Biomass', 'category', sum)
###################################################
### code chunk number 47: PlotsAndStats.Rnw:747-749
###################################################
models <- NvMLinearRegressions(TL84)
names(models)
###################################################
### code chunk number 48: PlotsAndStats.Rnw:752-753
###################################################
sapply(models, 'coef')
###################################################
### code chunk number 49: PlotsAndStats.Rnw:760-762
###################################################
models <- NvMLinearRegressions(TL84, class='phylum')
names(models)
###################################################
### code chunk number 50: PlotsAndStats.Rnw:770-771
###################################################
sapply(models, is.null)
###################################################
### code chunk number 51: PlotsAndStats.Rnw:777-780
###################################################
data(BroadstoneStream)
models <- NvMLinearRegressions(BroadstoneStream)
sapply(models, is.null)
###################################################
### code chunk number 52: PlotsAndStats.Rnw:784-787
###################################################
NvMSlope(TL84)
NvMIntercept(TL84)
NvMSlopeAndIntercept(TL84)
###################################################
### code chunk number 53: PlotsAndStats.Rnw:790-793
###################################################
NvMSlopeByClass(TL84)
NvMInterceptByClass(TL84)
NvMSlopeAndInterceptByClass(TL84)
###################################################
### code chunk number 54: PlotsAndStats.Rnw:796-799
###################################################
NvMSlopeByClass(TL84, class='kingdom')
NvMInterceptByClass(TL84, class='kingdom')
NvMSlopeAndInterceptByClass(TL84, class='kingdom')
###################################################
### code chunk number 55: PlotsAndStats.Rnw:835-842
###################################################
getOption("SweaveHooks")[["fig"]]()
data(TL86)
par(mfrow=c(1,2))
PlotMvN(TL84, show.nodes.as='both', cex=2, xlim=c(-2, 10), ylim=c(-14, 0),
highlight.nodes=NULL, highlight.links=NULL, main='')
PlotMvN(TL86, show.nodes.as='both', cex=2, xlim=c(-2, 10), ylim=c(-14, 0),
highlight.nodes=NULL, highlight.links=NULL, main='')
title(main='Jonsson et al. (2005) AER, Fig. 3 (p 30)', outer=TRUE)
###################################################
### code chunk number 56: PlotsAndStats.Rnw:851-857
###################################################
getOption("SweaveHooks")[["fig"]]()
par(mfrow=c(1,2))
PlotMCvMR(TL84, xlim=c(-14, 0), ylim=c(-14, 0), main='')
abline(a=0, b=1, lty=2)
PlotMCvMR(TL86, xlim=c(-14, 0), ylim=c(-14, 0), main='')
abline(a=0, b=1, lty=2)
title(main='Jonsson et al. (2005) AER, Fig. 4 (p 33)', outer=TRUE)
###################################################
### code chunk number 57: PlotsAndStats.Rnw:866-872
###################################################
getOption("SweaveHooks")[["fig"]]()
par(mfrow=c(2,2))
PlotNvM(TL84, xlim=c(-14, 0), ylim=c(-2,10), show.web=FALSE, main='')
PlotNvM(TL86, xlim=c(-14, 0), ylim=c(-2,10), show.web=FALSE, main='')
PlotBvM(TL84, xlim=c(-14, 0), ylim=c(-8,2), show.web=FALSE, main='')
PlotBvM(TL86, xlim=c(-14, 0), ylim=c(-8,2), show.web=FALSE, main='')
title(main='Jonsson et al. (2005) AER, Fig. 5 (p 37)', outer=TRUE)
###################################################
### code chunk number 58: PlotsAndStats.Rnw:881-891
###################################################
getOption("SweaveHooks")[["fig"]]()
par(mfrow=c(2,2))
PlotNCvNR(TL84, xlim=c(0, 10), ylim=c(-2,10), main='')
abline(a=0, b=1, lty=2)
PlotNCvNR(TL86, xlim=c(0, 10), ylim=c(-2,10), main='')
abline(a=0, b=1, lty=2)
PlotBCvBR(TL84, xlim=c(-8, -2), ylim=c(-8, -2), main='')
abline(a=0, b=1, lty=2)
PlotBCvBR(TL86, xlim=c(-8, -2), ylim=c(-8, -2), main='')
abline(a=0, b=1, lty=2)
title(main='Jonsson et al. (2005) AER, Fig. 7 (p 47)', outer=TRUE)
###################################################
### code chunk number 59: PlotsAndStats.Rnw:900-910
###################################################
getOption("SweaveHooks")[["fig"]]()
par(mfrow=c(2,2))
TL84.no.iso <- RemoveIsolatedNodes(TL84)
TL86.no.iso <- RemoveIsolatedNodes(TL86)
tl84.levels <- floor(TrophicHeight(TL84.no.iso))
tl86.levels <- floor(TrophicHeight(TL86.no.iso))
PlotNPyramid(TL84.no.iso, level=tl84.levels, main='', ylab='Trophic height')
PlotNPyramid(TL86.no.iso, level=tl86.levels, main='')
PlotBPyramid(TL84.no.iso, level=tl84.levels, main='', ylab='Trophic height')
PlotBPyramid(TL86.no.iso, level=tl86.levels, main='')
title(main='Jonsson et al. (2005) AER, Fig. 8 (p 49)', outer=TRUE)
###################################################
### code chunk number 60: PlotsAndStats.Rnw:919-925
###################################################
getOption("SweaveHooks")[["fig"]]()
par(mfrow=c(2,2))
PlotNvRankN(TL84, xlim=c(0,60), ylim=c(-2, 10), main='')
PlotNvRankN(TL86, xlim=c(0,60), ylim=c(-2, 10), main='')
PlotBvRankB(TL84, xlim=c(0,60), ylim=c(-8, -2), main='')
PlotBvRankB(TL86, xlim=c(0,60), ylim=c(-8, -2), main='')
title(main='Jonsson et al. (2005) AER, Fig. 10 (p 57)', outer=TRUE)
###################################################
### code chunk number 61: PlotsAndStats.Rnw:934-946
###################################################
getOption("SweaveHooks")[["fig"]]()
par(mfrow=c(2,2))
PlotRankNPS(TL84, property='Log10N', rank.by='M', log10.rank=TRUE,
xlim=c(0,2), ylim=c(-2, 10), ylab=Log10NLabel(TL84), main='')
PlotRankNPS(TL86, property='Log10N', rank.by='M', log10.rank=TRUE,
xlim=c(0,2), ylim=c(-2, 10), ylab=Log10NLabel(TL84), main='')
PlotRankNPS(TL84, property='Log10Biomass', rank.by='M',
log10.rank=TRUE, xlim=c(0,2), ylim=c(-8, -2),
ylab=Log10BLabel(TL84), main='')
PlotRankNPS(TL86, property='Log10Biomass', rank.by='M',
log10.rank=TRUE, xlim=c(0,2), ylim=c(-8, -2),
ylab=Log10BLabel(TL84), main='')
title(main='Jonsson et al. (2005) AER, Fig. 11 (p 60)', outer=TRUE)
###################################################
### code chunk number 62: PlotsAndStats.Rnw:957-981
###################################################
getOption("SweaveHooks")[["fig"]]()
PlotCommunityVCommunity <- function(a, b, property, xlim=NULL, ylim=NULL, ...)
{
a.nodes <- NP(a, 'node')
b.nodes <- NP(b, 'node')
all.nodes <- union(a.nodes, b.nodes)
a.values <- NPS(a, property)[,property]
names(a.values) <- a.nodes
b.values <- NPS(b, property)[,property]
names(b.values) <- b.nodes
points <- PlaceMissingPoints(a.values[all.nodes], xlim,
b.values[all.nodes], ylim)
plot(points[,1], points[,2], xlim=xlim, ylim=ylim, ...)
abline(a=0, b=1, lty=2)
}
par(mfrow=c(1,2))
PlotCommunityVCommunity(TL84, TL86, 'Log10N', xlim=c(-2,10), ylim=c(-2,10),
xlab=~log[10]~(N~of~84), ylab=~log[10]~(N~of~86),pch=19)
PlotCommunityVCommunity(TL84, TL86, 'Log10Biomass',
xlim=c(-8,-2), ylim=c(-8,-2),
xlab=~log[10]~(B~of~84), ylab=~log[10]~(B~of~86),pch=19)
title(main='Jonsson et al. (2005) AER, Fig. 12 (p 61)', outer=TRUE)
###################################################
### code chunk number 63: PlotsAndStats.Rnw:994-1010
###################################################
getOption("SweaveHooks")[["fig"]]()
data(pHWebs)
par(mfrow=c(2,2))
for(community in pHWebs[1:2])
{
PlotNvM(community, xlim=c(-15, 10), ylim=c(-5,15), main='',
highlight.nodes=NULL)
text(-15, 13, with(CPS(community), paste(title, ', pH ', pH, sep='')),
adj=0, cex=1.5)
tlps <- TLPS(community, node.properties='M')
tlps <- tlps[!is.na(tlps$resource.M) & !is.na(tlps$consumer.M),]
interaction.strength <- log10( (tlps$consumer.M / tlps$resource.M)^0.75 )
plot(density(interaction.strength), xlim=c(-4,14), ylim=c(0,0.6),
main='', xlab=~log[10]((M[C]/M[R])^0.75))
rug(interaction.strength)
}
title(main='Layer et al. (2010) AER, Fig. 6 (p 282)', outer=TRUE)
###################################################
### code chunk number 64: PlotsAndStats.Rnw:1022-1037
###################################################
getOption("SweaveHooks")[["fig"]]()
data(BroadstoneStream)
par(mfrow=c(1,2))
PlotMvN(BroadstoneStream, show.nodes.as='labels', label.cex=0.8,
xlim=c(-2, 4.2), ylim=c(-6,2), main='', show.na=FALSE,
highlight.links=NULL)
abline(a=0, b=-1)
tlps <- TLPS(BroadstoneStream, node.properties='M')
lty <- rep(0, NumberOfTrophicLinks(BroadstoneStream))
lty[tlps$resource.M > tlps$consumer.M] <- 1
PlotMvN(BroadstoneStream, show.nodes.as='labels', label.cex=0.8,
xlim=c(-2, 4.2), ylim=c(-6,2), main='', show.na=FALSE,
highlight.links=NULL, link.lty=lty)
abline(a=0, b=-1)
title(main='Woodward et al. (2005) AER, Fig. 4 (p 108)', outer=TRUE)
###################################################
### code chunk number 65: PlotsAndStats.Rnw:1052-1055
###################################################
collection <- CommunityCollection(list(TL84, TL86, YthanEstuary))
table <- NvMTriTrophicTable(collection)
print(round(table,2))
###################################################
### code chunk number 66: PlotsAndStats.Rnw:1059-1088
###################################################
res <- lapply(list(TL84, TL86, YthanEstuary), function(community)
{
community <- RemoveNodes(community, remove=with(NPS(community), node[is.na(M) | is.na(N)]))
community <- RemoveCannibalisticLinks(community)
community <- RemoveIsolatedNodes(community)
chains <- ThreeNodeChains(community, node.properties='M')
MR <- chains$bottom.M
MI <- chains$intermediate.M
MC <- chains$top.M
lp <- TLPS(community, node.properties='M')
return (c('MR<=MI<=MC'=sum(MR<=MI & MI<=MC),
'MR<=MC<MI'=sum(MR<=MC & MC<MI),
'MI<MR<=MC'=sum(MI<MR & MR<=MC),
'MI<=MC<MR'=sum(MI<=MC & MC<MR),
'MC<MR<MI'=sum(MC<MR & MR<MI),
'MC<MI<MR'=sum(MC<MI & MI<MR),
'All 2-chains'=nrow(chains),
'MR<MC'=sum(lp$resource.M<lp$consumer.M),
'MR=MC'=sum(lp$resource.M==lp$consumer.M),
'MR>MC'=sum(lp$resource.M>lp$consumer.M),
'All links'=nrow(lp)))
})
res <- do.call('cbind', res)
colnames(res) <- c('TL84', 'TL86', 'Ythan Estuary')
print(round(res,2))
###################################################
### code chunk number 67: PlotsAndStats.Rnw:1095-1107
###################################################
getOption("SweaveHooks")[["fig"]]()
par(mfrow=c(3,2))
for(community in list(TL84, TL86, YthanEstuary))
{
community <- RemoveIsolatedNodes(community)
pch <- rep(1, NumberOfNodes(community))
pch[IsIntermediateNode(community)] <- 20
pch[IsTopLevelNode(community)] <- 8
PlotNvM(community, col=1, highlight.nodes=NULL, show.web=FALSE,
main='', pch=pch)
PlotAuppervAlower(community, main='')
}
title(main='Cohen et al. (2009) PNAS, Fig. 1 (p 22336)', outer=TRUE)
###################################################
### code chunk number 68: PlotsAndStats.Rnw:1116-1119
###################################################
data(ChesapeakeBay)
res <- NodeQuantitativeDescriptors(ChesapeakeBay, 'biomass.flow')
print(round(res[1:6,],2))
###################################################
### code chunk number 69: PlotsAndStats.Rnw:1123-1125
###################################################
res <- QuantitativeDescriptors(ChesapeakeBay, 'biomass.flow')
print(round(res,3))
|
b8a2976e748b9e9f3852a43cba3f9d72aff7626a
|
0e5605d06591219417601c639c40d4d45c665774
|
/HomeDepot/R/Long_xgb_v2.R
|
c09042bfed0f5afb4daaf5a3011f208420d51feb
|
[] |
no_license
|
nguyenhailong/Kaggle
|
2e7898d01144d76e43857328d398ca167863c5a4
|
6e36f95ae5455f003fe1c452d98a49ef28d0de35
|
refs/heads/master
| 2020-05-26T05:10:50.090619
| 2017-02-19T14:41:36
| 2017-02-19T14:41:36
| 82,465,390
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,690
|
r
|
Long_xgb_v2.R
|
# Based on Ben Hamner script from Springleaf
# https://www.kaggle.com/benhamner/springleaf-marketing-response/random-forest-example
library(readr)
library(xgboost)
library(dplyr)
library(tidyr)
setwd('~/GitHub/Kaggle-Telstra/R')
#my favorite seed^^
set.seed(2401)
cat("reading the train and test data\n")
train <- read_csv("../data/train.csv")
test <- read_csv("../data/test.csv")
event_type <- read_csv('../data/event_type.csv')
log_feature <- read_csv('../data/log_feature.csv')
resource_type <- read_csv('../data/resource_type.csv')
severity_type <- read_csv('../data/severity_type.csv')
dim(event_type)
event_type$exist = T
event_type = event_type %>% spread(event_type,exist, fill = FALSE)
sum(event_type) - sum(event_type$id)
dim(resource_type)
resource_type$exist = T
resource_type = resource_type %>% spread(resource_type,exist, fill = FALSE)
sum(resource_type) - sum(resource_type$id)
dim(severity_type)
severity_type$exist = T
severity_type = severity_type %>% spread(severity_type,exist, fill = FALSE)
sum(severity_type) - sum(severity_type$id)
sum(log_feature$volume)
log_feature = log_feature %>% spread(log_feature,volume, fill = NA)
sum(log_feature,na.rm = T) - sum(log_feature$id,na.rm = T)
log_feature[is.na(log_feature)] <- 0
train = merge(train, event_type,'id')
train = merge(train, log_feature,'id')
train = merge(train, resource_type,'id')
train = merge(train, severity_type,'id')
test = merge(test, event_type,'id')
test = merge(test, log_feature,'id')
test = merge(test, resource_type,'id')
test = merge(test, severity_type,'id')
# There are some NAs in the integer columns so conversion to zero
#train[is.na(train)] <- 0
#test[is.na(test)] <- 0
feature.names <- names(train)[c(-1,-3)]
cat("Feature Names\n")
#feature.names
cat("assuming text variables are categorical & replacing them with numeric ids\n")
for (f in feature.names) {
if (class(train[[f]])=="character") {
levels <- unique(c(train[[f]], test[[f]]))
train[[f]] <- as.integer(factor(train[[f]], levels=levels))
test[[f]] <- as.integer(factor(test[[f]], levels=levels))
}
}
cat("train data column names after slight feature engineering\n")
#names(train)
cat("test data column names after slight feature engineering\n")
#names(test)
tra<-train[,feature.names]
h<-sample(nrow(train),nrow(train)*0.2)
dval<-xgb.DMatrix(data=data.matrix(tra[h,]),label=train$fault_severity[h],missing = NA)
dtrainFull<-xgb.DMatrix(data=data.matrix(tra),label=train$fault_severity,missing = NA)
watchlist<-list(val=dval,train=dtrainFull)
eta_rate = 0.023
depth = 6
sub = 0.83
col = 0.77
#parallel_tree = 1
round = 10000
param <- list( objective = "multi:softprob",
num_class = 3,
booster = "gbtree",
eval_metric = "mlogloss",
eta = eta_rate, # 0.06, #0.01,
max_depth = depth, #changed from default of 8
subsample = sub, # 0.7
colsample_bytree = col # 0.7
#num_parallel_tree = 2
# alpha = 0.0001,
# lambda = 1
)
#modelcv
start.time = Sys.time()
gc()
clf_cv <- xgb.cv(param=param, data=dtrainFull,
#watchlist = watchlist,
early.stop.round = 300, # train with a validation set will stop if the performance keeps getting worse consecutively for k rounds
nthread = 6, # number of CPU threads
maximize = FALSE,
nrounds = round,
nfold = 5, # number of CV folds
verbose = T,
prediction = T,
print.every.n = 10)
Sys.time() - start.time
best_round = which.min(clf_cv$dt$test.mlogloss.mean)
cat('Best AUC: ', clf_cv$dt$test.mlogloss.mean[best_round],'+',clf_cv$dt$test.mlogloss.std[best_round], ' at round: ',best_round, '\n')
cat("saving the CV prediction file\n")
cv_pred <- data.frame(id=train$id, fault_severity=clf_cv$pred)
filename <- paste0("../model/train_",clf_cv$dt$test.mlogloss.mean[best_round],'(',clf_cv$dt$test.mlogloss.std[best_round],")_Long_xgb_features", length(feature.names),"_depth",depth,"_eta", eta_rate, "_round", best_round, "_sub",sub,"_col",col,".csv")
write_csv(cv_pred, filename)
#model
gc()
clf <- xgb.train(param=param, data=dtrainFull,
watchlist = watchlist,
maximize = FALSE,
nrounds = best_round,
verbose = T,
print.every.n = 10)
save(clf, file =paste0("../model/",clf_cv$dt$test.mlogloss.mean[best_round],'(',clf_cv$dt$test.mlogloss.std[best_round],")_Long_xgb_features", length(feature.names),"_depth",depth,"_eta", eta_rate, "_round", best_round, "_sub",sub,"_col",col,".RData"))
Sys.time() - start.time
pred1 <- predict(clf, data.matrix(test[,feature.names]))
yprob = matrix(pred1,nrow = nrow(test),ncol = 3,byrow = T)
submission <- as.data.frame(cbind(test$id,yprob))
names(submission) = c('id','predict_0','predict_1','predict_2')
cat("saving the submission file\n")
filename <- paste0("../submissions/test_",clf_cv$dt$test.mlogloss.mean[best_round],'(',clf_cv$dt$test.mlogloss.std[best_round],")_Long_xgb_features", length(feature.names),"_depth",depth,"_eta", eta_rate, "_round", best_round, "_sub",sub,"_col",col,".csv")
write_csv(submission, filename)
#Reprint
cat('Best AUC: ', clf_cv$dt$test.mlogloss.mean[best_round],'+',clf_cv$dt$test.mlogloss.std[best_round], ' at round: ',best_round, '\n')
Sys.time() - start.time
|
03f39406e7fb2a4e1bb0e74076174698618f5604
|
665c32727f3920aaaa8e2535f66d8df09d55944a
|
/man/write_survival.Rd
|
0bedc448fa7449d481f42bf0214435dd243baaa1
|
[] |
no_license
|
cran/survivalAnalysis
|
5dd6ad5938641467b41655c955416422ca078d7a
|
ea726a3265120f159a15cede3191b892bc79bf73
|
refs/heads/master
| 2022-02-23T03:34:21.388556
| 2022-02-11T13:00:02
| 2022-02-11T13:00:02
| 147,193,710
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,593
|
rd
|
write_survival.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/output.R
\name{write_survival}
\alias{write_survival}
\title{Print the essentials of a SurvivalAnalysisUnivariateResult.}
\usage{
write_survival(
...,
file,
label = NULL,
p_precision = 3,
hr_precision = 2,
time_precision = 1,
include_end_separator = FALSE,
timespan_unit = c("days", "months", "years")
)
}
\arguments{
\item{...}{Results generated by \code{\link{analyse_survival}},
or \code{\link{analyse_multivariate}}, or lists of such objects}
\item{file}{A connection, or a character string naming the file to print to.
(see \code{\link{cat}})}
\item{label}{A label describing the result,
or a vector of the same size as results in ... (will then be mapped 1:1)}
\item{p_precision, hr_precision, time_precision}{Precision with which to print floating point values}
\item{include_end_separator}{Boolean:
Append "\\n---\\n" as separator?
Comes handy if printing multiple results following each other}
\item{timespan_unit}{Unit for time spans: "days", "months" or "years"}
}
\value{
None (invisible NULL).
}
\description{
Write complete textual information for one or multiple survival
analysis results in a text file.
}
\details{
As write_survival takes potentially multiple objects, it cannot
return its input in a cleanly defined way.
You can still elegantly combine \code{write_survival} in a pipe followed by
\code{\link{kaplan_meier_plot}} or \code{\link{kaplan_meier_grid}}
for a single input object if you apply the
tee pipe operator \code{\%T>\%} in front of \code{write_survival}.
}
|
fab367fd4d81549afc44b9ad2011673a949c4b78
|
e8c685f68dc752f5f332b44e5b4d76c048a3e436
|
/R/st.err.R
|
9006d9ba54c9105322096398624d12a4473df5f8
|
[] |
no_license
|
cran/IsoCorr
|
a8d2b82206ae741f2014d6516e1f08da00b54df0
|
e9b1a16aac0cc741ee34e0baa6bfb607242127d5
|
refs/heads/master
| 2022-12-25T20:23:38.517959
| 2020-10-01T07:30:12
| 2020-10-01T07:30:12
| 301,808,939
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 116
|
r
|
st.err.R
|
st.err <- function(x, na.rm = FALSE) {
if(na.rm == TRUE){
x <- na.omit(x)
}
sd(x)/sqrt(length(x))
}
|
5a9db761bda740a871ed9bdba765c800310848db
|
6786bc8704dd3adfb85aad5fa881f82ac9f82032
|
/R/utils.r
|
29e2e4990d1fb391f1d189970f5becdd24f36869
|
[] |
no_license
|
hadley/mutatr
|
1d5247e8bb320ae29cfb7156f09a268873695f10
|
05d1a9bfe7dc2db970aa2d4ad588caa427f1718f
|
refs/heads/master
| 2021-01-23T13:18:21.256231
| 2010-06-28T17:11:17
| 2010-06-28T17:11:17
| 277,007
| 8
| 0
| null | 2013-03-30T13:35:17
| 2009-08-13T14:01:02
|
R
|
UTF-8
|
R
| false
| false
| 350
|
r
|
utils.r
|
#' Environment name.
#' Extract the name of an environment from its printed output.
#'
#' @param env environment
#' @keywords internal
envname <- function(env) {
gsub("<environment: |>", "", utils::capture.output(print(env))[1])
}
#' Is this a mutatr object?
#'
#' @param x object to test
#' @export
is.mutatr <- function(x) inherits(x, "mutatr")
|
c8d2348e27f067d55a3100bc54af984d0a13ebfb
|
584e0856fc8c9b514e9abe4b5405572f3a1f4463
|
/R/p53sf.R
|
54028f27f49b746e232ee865b6146b26be3c682c
|
[
"MIT"
] |
permissive
|
shaoyoucheng/p53retriever
|
9af93dae438956af9f10e5887071b615f02720ad
|
108be4b09ec040d0e299dd6c067d5049caf54612
|
refs/heads/master
| 2021-12-10T15:37:30.667361
| 2016-08-24T08:28:18
| 2016-08-24T08:28:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 68,451
|
r
|
p53sf.R
|
#' Locate candidate p53 responsive elements (full and half) on a DNA sequence
#'
#' @param seq.ini A character string containing the sequence. The sequence must be composed exclusively of DNA bases (a,c,t,g)
#' @return A dataframe containing the responsive elements located on the input sequence.
#' @export
# Halves and full matrix
#--------------------------------
p53sf<-function(seq.ini){
data(bkg.models)
subject<-DNAString(seq.ini)
couples.matrix<-NULL
occurrences.matrix<-NULL
couples.flag<-0
plus.matches <- matchPWM(pwm.1, subject, min.score="80%")
if(length(plus.matches)>0){
sequence<-as.character(plus.matches)
start.seq<-start(plus.matches)
stop.seq<-end(plus.matches)
n.mut<-unlist(lapply(sequence, function(x) ((1000-PWMscoreStartingAt(pwm.1, x, starting.at=1))/100)))
#ww<-unlist(lapply(sequence, function(x) substr(x,5,6)))
occurrences.matrix<-cbind(sequence,start.seq,stop.seq,n.mut)
colnames(occurrences.matrix)<-c("seq","start","stop","n.mut")
for(n in 1:nrow(occurrences.matrix)){
n.couples.run<-which(
(is.between(as.numeric(occurrences.matrix[,"start"])-as.numeric(occurrences.matrix[n,"start"]),9,24))&
(as.numeric(occurrences.matrix[,"n.mut"])+as.numeric(occurrences.matrix[n,"n.mut"])<(3)))
if(length(n.couples.run)>0){
couples.flag<-1
first<-t(matrix(rep(occurrences.matrix[n,],length(n.couples.run)),ncol=length(n.couples.run)))
second<-matrix(occurrences.matrix[n.couples.run,],nrow=length(n.couples.run))
couples.matrix.run<-cbind(first, second, as.numeric(second[,2])-(as.numeric(first[,3])+1),
as.numeric(second[,4])+as.numeric(first[,4]))
couples.matrix<-rbind(couples.matrix,couples.matrix.run)
}
}
}
#------------------------------------------------------
### half sites
#------------------------------------------------------
halves.matrix<-NULL
resume.matrix.halves<-NULL
if(length(which(occurrences.matrix[,"n.mut"]=="0"))>0) {
halves.matrix<-matrix(occurrences.matrix[occurrences.matrix[,"n.mut"]=="0",],nrow=length(which(occurrences.matrix[,"n.mut"]=="0")))
ww.half<-apply(halves.matrix,1,function(x) substr(x[1],5,6))
halves.matrix[,ncol(halves.matrix)]<-ww.half
colnames(halves.matrix)<-c("sequence","start","stop","WW1")
n.mut.tot<-apply(halves.matrix,1,function(x) ((1000-PWMscoreStartingAt(pwm.6, DNAString(x["sequence"]), starting.at=1))/100))
if(length(which(n.mut.tot<1))>0){
halves.matrix<-matrix(halves.matrix[which(n.mut.tot==0),],nrow=length(which(n.mut.tot==0)))
colnames(halves.matrix)<-c("sequence","start","stop","WW1")
WW2<-rep("",nrow(halves.matrix))
spacer<-rep(0,nrow(halves.matrix))
n.mut.tot<-rep(0,nrow(halves.matrix))
mutations<-rep("0000000000",nrow(halves.matrix))
halves.matrix.2<-cbind(as.data.frame(halves.matrix),WW2,spacer,mutations,n.mut.tot)
halves.matrix.2[,"start"]<-as.numeric(halves.matrix[,"start"])
halves.matrix.2[,"stop"]<-as.numeric(halves.matrix[,"stop"])
halves.matrix.2[,"spacer"]<-as.numeric(spacer)
halves.matrix.2[,"n.mut.tot"]<-as.numeric(n.mut.tot)
halves.matrix.2<-transform(halves.matrix.2,
sequence=as.character(sequence),
mutations=as.character(mutations),
WW2=as.character(WW2),
WW1=as.character(WW1))
halves.matrix.2<-halves.matrix.2[,c("start","stop","spacer","n.mut.tot","sequence","mutations","WW1","WW2")]
grades<-rep(1,length=nrow(halves.matrix.2))
grades[halves.matrix.2[,"WW1"]=="AT"]<-2
labels<-rep("half",length=nrow(halves.matrix.2))
resume.matrix.halves<-cbind(halves.matrix.2,labels,grades)
}
}
#---------------------------------------------------------------------
# Analysis of full sites
#---------------------------------------------------------------------
resume.matrix.full<-NULL
if(couples.flag==1){
colnames(couples.matrix)<-c("seq.1","start.1","stop.1","n.mut.1","seq.2","start.2","stop.2","n.mut.2","spacer","n.mut.tot")
work.matrix<-couples.matrix
pattern.matrix<-matrix(0,nrow=nrow(work.matrix),ncol=20)
ww.matrix<-matrix("",nrow=nrow(work.matrix),ncol=2)
colnames(pattern.matrix)<-c("R1","R2","R3","C1","W1","W2","G1","Y1","Y2","Y3",
"R4","R5","R6","C2","W3","W4","G2","Y4","Y5","Y6")
colnames(ww.matrix)<-c("WW1","WW2")
double.pwm<-cbind(pwm.6,pwm.6)
for(p in (1:nrow(work.matrix))){
pair.seq<-paste(couples.matrix[p,"seq.1"],couples.matrix[p,"seq.2"],sep="")
line<-vector(mode="numeric",length=20)
for (l in (1:20)){
letter<-substr(pair.seq,l,l)
line[l]<-as.numeric(double.pwm[as.character(letter),l]=="0")
}
pattern.matrix[p,]<-line
ww.matrix[p,1]<-as.character(substr(pair.seq,5,6))
ww.matrix[p,2]<-as.character(substr(pair.seq,15,16))
}
pattern<-as.data.frame(pattern.matrix)
ww<-as.data.frame(ww.matrix)
couple.matrix<-cbind(work.matrix,pattern,ww)
sequence<-apply(couple.matrix,1,function(x) toupper(substr(seq.ini,x["start.1"],x["stop.2"])))
mutations<-apply(couple.matrix,1,function(x) paste(paste(x[11:20],collapse=""),paste(rep("n",length=x["spacer"]),collapse=""),paste(x[21:30],collapse=""),sep=""))
smart.matrix<-cbind(matrix(work.matrix[,c("start.1","stop.2","spacer","n.mut.tot")],nrow=nrow(couple.matrix)),sequence,mutations,ww,pattern)
colnames(smart.matrix)[1:2]<-c("start","stop")
smart.matrix[,"n.mut.tot"]<-rowSums(pattern)
smart.matrix[,"spacer"]<-as.numeric(couples.matrix[,"spacer"])
smart.matrix[,"start"]<-as.numeric(couples.matrix[,"start.1"])
smart.matrix[,"stop"]<-as.numeric(couples.matrix[,"stop.2"])
class.matrix<-transform(smart.matrix,
sequence=as.character(sequence),
mutations=as.character(mutations),
WW1=as.character(WW1),
WW2=as.character(WW2))
grades<-vector(mode="integer",length=nrow(class.matrix))
labels<-vector(mode="character",length=nrow(class.matrix))
#-----------------------
# Rules for full sites
#-----------------------
#1) O
#----------------------------------------------------
labels[which(class.matrix[,"n.mut.tot"]==0)]<-"O"
#-----------------------------------------------------
grades[which(class.matrix[,"n.mut.tot"]==0)]<-2
grades[which((class.matrix[,"n.mut.tot"]==0)
&(class.matrix[,"spacer"]%in%c(1,2))
&((class.matrix[,"WW1"]=="AT")|(class.matrix[,"WW2"]=="AT")))]<-3
grades[which((class.matrix[,"n.mut.tot"]==0)
&(class.matrix[,"spacer"]==0))]<-4
grades[which((class.matrix[,"n.mut.tot"]==0)
&(class.matrix[,"spacer"]==0)
&((class.matrix[,"WW1"]=="AT")|(class.matrix[,"WW2"]=="AT")))]<-5
#2) A
#----------------------------------------------------
mut.cols<-c("C1","G2","C2","G1")
labels[which((class.matrix[,"n.mut.tot"]==1)
&(rowSums(class.matrix[,mut.cols])==1))]<-"A"
#----------------------------------------------------
mut.cols<-c("C1","G2")
grades[which((class.matrix[,"n.mut.tot"]==1)
&(rowSums(class.matrix[,mut.cols])==1))]<-1
grades[which((class.matrix[,"n.mut.tot"]==1)
&(rowSums(class.matrix[,mut.cols])==1)
&(class.matrix[,"spacer"]==0))]<-3
grades[which((class.matrix[,"n.mut.tot"]==1)
&(rowSums(class.matrix[,mut.cols])==1)
&(class.matrix[,"spacer"]==0)
&((class.matrix[,"WW1"]=="AT")|(class.matrix[,"WW2"]=="AT")))]<-4
grades[which((class.matrix[,"n.mut.tot"]==1)
&(rowSums(class.matrix[,mut.cols])==1)
&(class.matrix[,"spacer"]!=0)
&((class.matrix[,"WW1"]=="AT")|(class.matrix[,"WW2"]=="AT")))]<-2
#------------------------------------
mut.cols<-c("C2","G1")
grades[which((class.matrix[,"n.mut.tot"]==1)
&(rowSums(class.matrix[,mut.cols])==1))]<-1
grades[which((class.matrix[,"n.mut.tot"]==1)
&(rowSums(class.matrix[,mut.cols])==1)
&(class.matrix[,"spacer"]==0))]<-2
grades[which((class.matrix[,"n.mut.tot"]==1)
&(rowSums(class.matrix[,mut.cols])==1)
&(class.matrix[,"spacer"]==0)
&((class.matrix[,"WW1"]=="AT")|(class.matrix[,"WW2"]=="AT")))]<-3
#3) B
#----------------------------------------------------
mut.cols<-c("W1","W2","W3","W4")
labels[which((class.matrix[,"n.mut.tot"]==1)
&(rowSums(class.matrix[,mut.cols])==1))]<-"B"
#----------------------------------------------------
mut.cols<-c("W1","W4")
grades[which((class.matrix[,"n.mut.tot"]==1)
&(rowSums(class.matrix[,mut.cols])==1))]<-1
grades[which((class.matrix[,"n.mut.tot"]==1)
&(rowSums(class.matrix[,mut.cols])==1)
&(class.matrix[,"spacer"]==0))]<-3
grades[which((class.matrix[,"n.mut.tot"]==1)
&(rowSums(class.matrix[,mut.cols])==1)
&(class.matrix[,"spacer"]==0)
&((class.matrix[,"WW1"]=="AT")|(class.matrix[,"WW2"]=="AT")))]<-4
grades[which((class.matrix[,"n.mut.tot"]==1)
&(rowSums(class.matrix[,mut.cols])==1)
&(class.matrix[,"spacer"]%in%c(1,2)))]<-2
grades[which((class.matrix[,"n.mut.tot"]==1)
&(rowSums(class.matrix[,mut.cols])==1)
&(class.matrix[,"spacer"]>2)
&((class.matrix[,"WW1"]=="AT")|(class.matrix[,"WW2"]=="AT")))]<-2
#------------------------------------
mut.cols<-c("W2","W3")
grades[which((class.matrix[,"n.mut.tot"]==1)
&(rowSums(class.matrix[,mut.cols])==1))]<-1
grades[which((class.matrix[,"n.mut.tot"]==1)
&(rowSums(class.matrix[,mut.cols])==1)
&(class.matrix[,"spacer"]==0))]<-3
grades[which((class.matrix[,"n.mut.tot"]==1)
&(rowSums(class.matrix[,mut.cols])==1)
&(class.matrix[,"spacer"]==0)
&((class.matrix[,"WW1"]=="AT")|(class.matrix[,"WW2"]=="AT")))]<-4
grades[which((class.matrix[,"n.mut.tot"]==1)
&(rowSums(class.matrix[,mut.cols])==1)
&(class.matrix[,"spacer"]>0)
&((class.matrix[,"WW1"]=="AT")|(class.matrix[,"WW2"]=="AT")))]<-2
#4) C
#----------------------------------------------------
mut.cols<-c("R1","R4","Y3","Y6")
labels[which((class.matrix[,"n.mut.tot"]==1)
&(rowSums(class.matrix[,mut.cols])==1))]<-"C"
#----------------------------------------------------
mut.cols<-c("R1","Y6")
grades[which((class.matrix[,"n.mut.tot"]==1)
&(rowSums(class.matrix[,mut.cols])==1))]<-1
grades[which((class.matrix[,"n.mut.tot"]==1)
&(rowSums(class.matrix[,mut.cols])==1)
&(class.matrix[,"spacer"]==0))]<-4
grades[which((class.matrix[,"n.mut.tot"]==1)
&(rowSums(class.matrix[,mut.cols])==1)
&(class.matrix[,"spacer"]==0)
&((class.matrix[,"WW1"]=="AT")|(class.matrix[,"WW2"]=="AT")))]<-5
grades[which((class.matrix[,"n.mut.tot"]==1)
&(rowSums(class.matrix[,mut.cols])==1)
&(class.matrix[,"spacer"]%in%c(1,2)))]<-2
grades[which((class.matrix[,"n.mut.tot"]==1)
&(rowSums(class.matrix[,mut.cols])==1)
&(class.matrix[,"spacer"]%in%c(1,2))
&((class.matrix[,"WW1"]=="AT")|(class.matrix[,"WW2"]=="AT")))]<-3
grades[which((class.matrix[,"n.mut.tot"]==1)
&(rowSums(class.matrix[,mut.cols])==1)
&(class.matrix[,"spacer"]>2)
&((class.matrix[,"WW1"]=="AT")|(class.matrix[,"WW2"]=="AT")))]<-2
#------------------------------------
mut.cols<-c("R4","Y3")
grades[which((class.matrix[,"n.mut.tot"]==1)
&(rowSums(class.matrix[,mut.cols])==1))]<-1
grades[which((class.matrix[,"n.mut.tot"]==1)
&(rowSums(class.matrix[,mut.cols])==1)
&(class.matrix[,"spacer"]==0))]<-4
grades[which((class.matrix[,"n.mut.tot"]==1)
&(rowSums(class.matrix[,mut.cols])==1)
&(class.matrix[,"spacer"]==0)
&((class.matrix[,"WW1"]=="AT")|(class.matrix[,"WW2"]=="AT")))]<-5
grades[which((class.matrix[,"n.mut.tot"]==1)
&(rowSums(class.matrix[,mut.cols])==1)
&(class.matrix[,"spacer"]%in%c(1,2)))]<-2
grades[which((class.matrix[,"n.mut.tot"]==1)
&(rowSums(class.matrix[,mut.cols])==1)
&(class.matrix[,"spacer"]%in%c(1,2))
&((class.matrix[,"WW1"]=="AT")|(class.matrix[,"WW2"]=="AT")))]<-3
grades[which((class.matrix[,"n.mut.tot"]==1)
&(rowSums(class.matrix[,mut.cols])==1)
&(class.matrix[,"spacer"]>2)
&((class.matrix[,"WW1"]=="AT")|(class.matrix[,"WW2"]=="AT")))]<-2
#------------------------------------
#5) D
#----------------------------------------------------
mut.cols<-c("R2","R3","R5","R6","Y1","Y2","Y4","Y5")
labels[which((class.matrix[,"n.mut.tot"]==1)
&(rowSums(class.matrix[,mut.cols])==1))]<-"D"
#----------------------------------------------------
mut.cols<-c("R2","R3","Y4","Y5")
grades[which((class.matrix[,"n.mut.tot"]==1)
&(rowSums(class.matrix[,mut.cols])==1))]<-1
grades[which((class.matrix[,"n.mut.tot"]==1)
&(rowSums(class.matrix[,mut.cols])==1)
&(class.matrix[,"spacer"]==0))]<-3
grades[which((class.matrix[,"n.mut.tot"]==1)
&(rowSums(class.matrix[,mut.cols])==1)
&(class.matrix[,"spacer"]==0)
&((class.matrix[,"WW1"]=="AT")|(class.matrix[,"WW2"]=="AT")))]<-4
grades[which((class.matrix[,"n.mut.tot"]==1)
&(rowSums(class.matrix[,mut.cols])==1)
&(class.matrix[,"spacer"]>0)
&((class.matrix[,"WW1"]=="AT")|(class.matrix[,"WW2"]=="AT")))]<-2
#------------------------------------
mut.cols<-c("R5","R6","Y1","Y2")
grades[which((class.matrix[,"n.mut.tot"]==1)
&(rowSums(class.matrix[,mut.cols])==1))]<-1
grades[which((class.matrix[,"n.mut.tot"]==1)
&(rowSums(class.matrix[,mut.cols])==1)
&(class.matrix[,"spacer"]==0))]<-2
grades[which((class.matrix[,"n.mut.tot"]==1)
&(rowSums(class.matrix[,mut.cols])==1)
&(class.matrix[,"spacer"]==0)
&((class.matrix[,"WW1"]=="AT")|(class.matrix[,"WW2"]=="AT")))]<-4
#------------------------------------
#6) AA
#----------------------------------------------------
mut.cols<-c("C1","G1","C2","G2")
labels[which((class.matrix[,"n.mut.tot"]==2)
&(rowSums(class.matrix[,mut.cols])==2))]<-"AA"
#----------------------------------------------------
# same half-site
mut.cols<-c("C1","G1","C2","G2")
grades[which((class.matrix[,"n.mut.tot"]==2)
&(rowSums(class.matrix[,mut.cols])==2)
&(rowSums(class.matrix[,mut.cols[1:2]])!=1))]<-1
grades[which((class.matrix[,"n.mut.tot"]==2)
&(rowSums(class.matrix[,mut.cols])==2)
&(rowSums(class.matrix[,mut.cols[1:2]])!=1)
&((class.matrix[,"WW1"]=="AT")|(class.matrix[,"WW2"]=="AT")))]<-2
# different half-site
#----------------------------------------------------
mut.cols<-c("C1","G1","C2","G2")
grades[which((class.matrix[,"n.mut.tot"]==2)
&(rowSums(class.matrix[,mut.cols])==2)
&(rowSums(class.matrix[,mut.cols[1:2]])==1))]<-1
#----------------------------------------------------
# 7) BB
#----------------------------------------------------
mut.cols<-c("W1","W2","W3","W4")
labels[which((class.matrix[,"n.mut.tot"]==2)
&(rowSums(class.matrix[,mut.cols])==2))]<-"BB"
#----------------------------------------------------
# same half site
mut.cols<-c("W1","W2","W3","W4")
grades[which((class.matrix[,"n.mut.tot"]==2)
&(rowSums(class.matrix[,mut.cols])==2)
&(rowSums(class.matrix[,mut.cols[1:2]])!=1))]<-1
grades[which((class.matrix[,"n.mut.tot"]==2)
&(rowSums(class.matrix[,mut.cols])==2)
&(rowSums(class.matrix[,mut.cols[1:2]])!=1)
&(class.matrix[,"spacer"]==0))]<-3
grades[which((class.matrix[,"n.mut.tot"]==2)
&(rowSums(class.matrix[,mut.cols])==2)
&(rowSums(class.matrix[,mut.cols[1:2]])!=1)
&(class.matrix[,"spacer"]>0)
&((class.matrix[,"WW1"]=="AT")|(class.matrix[,"WW2"]=="AT")))]<-2
#----------------------------------------------------
#different half site
mut.cols<-c("W1","W2","W3","W4")
grades[which((class.matrix[,"n.mut.tot"]==2)
&(rowSums(class.matrix[,mut.cols])==2)
&(rowSums(class.matrix[,mut.cols[1:2]])==1))]<-1
grades[which((class.matrix[,"n.mut.tot"]==2)
&(rowSums(class.matrix[,mut.cols])==2)
&(rowSums(class.matrix[,mut.cols[1:2]])==1)
&(class.matrix[,"spacer"]==0))]<-2
#----------------------------------------------------
# 8) AB
#----------------------------------------------------------
mut.cols.1<-c("C1","G1","C2","G2")
mut.cols.2<-c("W1","W2","W3","W4")
labels[which((class.matrix[,"n.mut.tot"]==2)
&(rowSums(class.matrix[,mut.cols.1])==1)
&(rowSums(class.matrix[,mut.cols.2])==1))]<-"AB"
#-----------------------------------------------------------
# same half site
mut.cols.1<-c("C1","G1","C2","G2")
mut.cols.2<-c("W1","W2","W3","W4")
grades[which((class.matrix[,"n.mut.tot"]==2)
&(rowSums(class.matrix[,mut.cols.1])==1)
&(rowSums(class.matrix[,mut.cols.2])==1)
&(rowSums(class.matrix[,c(mut.cols.1[1:2],mut.cols.2[1:2])])!=1))]<-1
grades[which((class.matrix[,"n.mut.tot"]==2)
&(rowSums(class.matrix[,mut.cols.1])==1)
&(rowSums(class.matrix[,mut.cols.2])==1)
&(rowSums(class.matrix[,c(mut.cols.1[1:2],mut.cols.2[1:2])])!=1)
&(class.matrix[,"spacer"]==0))]<-3
grades[which((class.matrix[,"n.mut.tot"]==2)
&(rowSums(class.matrix[,mut.cols.1])==1)
&(rowSums(class.matrix[,mut.cols.2])==1)
&(rowSums(class.matrix[,c(mut.cols.1[1:2],mut.cols.2[1:2])])!=1)
&(class.matrix[,"spacer"]>0)
&((class.matrix[,"WW1"]=="AT")|(class.matrix[,"WW2"]=="AT")))]<-2
#----------------------------------------------------
#different half site
grades[which((class.matrix[,"n.mut.tot"]==2)
&(rowSums(class.matrix[,mut.cols.1])==1)
&(rowSums(class.matrix[,mut.cols.2])==1)
&(rowSums(class.matrix[,c(mut.cols.1[1:2],mut.cols.2[1:2])])==1))]<-1
grades[which((class.matrix[,"n.mut.tot"]==2)
&(rowSums(class.matrix[,mut.cols.1])==1)
&(rowSums(class.matrix[,mut.cols.2])==1)
&(rowSums(class.matrix[,c(mut.cols.1[1:2],mut.cols.2[1:2])])==1)
&((class.matrix[,"WW1"]=="AT")|(class.matrix[,"WW2"]=="AT"))
&(class.matrix[,"spacer"]==0))]<-2
#----------------------------------------------------
# 1st or 4th quarter
grades[which((class.matrix[,"n.mut.tot"]==2)
&(rowSums(class.matrix[,mut.cols.1])==1)
&(rowSums(class.matrix[,mut.cols.2])==1)
&((rowSums(class.matrix[,c(mut.cols.1[c(1,4)],mut.cols.2[c(1,4)])])==2)))]<-1
grades[which((class.matrix[,"n.mut.tot"]==2)
&(rowSums(class.matrix[,mut.cols.1])==1)
&(rowSums(class.matrix[,mut.cols.2])==1)
&((rowSums(class.matrix[,c(mut.cols.1[c(1,4)],mut.cols.2[c(1,4)])])==2))
&(class.matrix[,"spacer"]==0))]<-3
grades[which((class.matrix[,"n.mut.tot"]==2)
&(rowSums(class.matrix[,mut.cols.1])==1)
&(rowSums(class.matrix[,mut.cols.2])==1)
&((rowSums(class.matrix[,c(mut.cols.1[c(1,4)],mut.cols.2[c(1,4)])])==2))
&((class.matrix[,"WW1"]=="AT")|(class.matrix[,"WW2"]=="AT"))
&(class.matrix[,"spacer"]==0))]<-4
grades[which((class.matrix[,"n.mut.tot"]==2)
&(rowSums(class.matrix[,mut.cols.1])==1)
&(rowSums(class.matrix[,mut.cols.2])==1)
&((rowSums(class.matrix[,c(mut.cols.1[c(1,4)],mut.cols.2[c(1,4)])])==2))
&((class.matrix[,"WW1"]=="AT")|(class.matrix[,"WW2"]=="AT"))
&(class.matrix[,"spacer"]>0))]<-2
#----------------------------------------------------
# all 1st or all 4th quarter
grades[which((class.matrix[,"n.mut.tot"]==2)
&(rowSums(class.matrix[,mut.cols.1])==1)
&(rowSums(class.matrix[,mut.cols.2])==1)
&((rowSums(class.matrix[,c(mut.cols.1[c(1)],mut.cols.2[c(1)])])==2)|(rowSums(class.matrix[,c(mut.cols.1[c(4)],mut.cols.2[c(4)])])==2))
)]<-1
grades[which((class.matrix[,"n.mut.tot"]==2)
&(rowSums(class.matrix[,mut.cols.1])==1)
&(rowSums(class.matrix[,mut.cols.2])==1)
&((rowSums(class.matrix[,c(mut.cols.1[c(1)],mut.cols.2[c(1)])])==2)|(rowSums(class.matrix[,c(mut.cols.1[c(4)],mut.cols.2[c(4)])])==2))
&(class.matrix[,"spacer"]==0))]<-3
grades[which((class.matrix[,"n.mut.tot"]==2)
&(rowSums(class.matrix[,mut.cols.1])==1)
&(rowSums(class.matrix[,mut.cols.2])==1)
&((rowSums(class.matrix[,c(mut.cols.1[c(1)],mut.cols.2[c(1)])])==2)|(rowSums(class.matrix[,c(mut.cols.1[c(4)],mut.cols.2[c(4)])])==2))
&((class.matrix[,"WW1"]=="AT")|(class.matrix[,"WW2"]=="AT"))
&(class.matrix[,"spacer"]==0))]<-4
grades[which((class.matrix[,"n.mut.tot"]==2)
&(rowSums(class.matrix[,mut.cols.1])==1)
&(rowSums(class.matrix[,mut.cols.2])==1)
&((rowSums(class.matrix[,c(mut.cols.1[c(1)],mut.cols.2[c(1)])])==2)|(rowSums(class.matrix[,c(mut.cols.1[c(4)],mut.cols.2[c(4)])])==2))
&((class.matrix[,"WW1"]=="AT")|(class.matrix[,"WW2"]=="AT"))
&(class.matrix[,"spacer"]%in%c(1,2)))]<-3
#------------------------------------------------------
# 9) BD
#----------------------------------------------------
mut.cols.1<-c("W1","W2","W3","W4")
mut.cols.2<-c("R2","R3","Y1","Y2","R5","R6","Y4","Y5")
labels[which((class.matrix[,"n.mut.tot"]==2)
&(rowSums(class.matrix[,mut.cols.1])==1)
&(rowSums(class.matrix[,mut.cols.2])==1))]<-"BD"
#----------------------------------------------------
# same half site
mut.cols.1<-c("W1","W2","W3","W4")
mut.cols.2<-c("R2","R3","Y1","Y2","R5","R6","Y4","Y5")
grades[which((class.matrix[,"n.mut.tot"]==2)
&(rowSums(class.matrix[,mut.cols.1])==1)
&(rowSums(class.matrix[,mut.cols.2])==1)
&(rowSums(class.matrix[,c(mut.cols.1[1:2],mut.cols.2[1:4])])!=1))]<-1
grades[which((class.matrix[,"n.mut.tot"]==2)
&(rowSums(class.matrix[,mut.cols.1])==1)
&(rowSums(class.matrix[,mut.cols.2])==1)
&(rowSums(class.matrix[,c(mut.cols.1[1:2],mut.cols.2[1:4])])!=1)
&(class.matrix[,"spacer"]==0))]<-2
grades[which((class.matrix[,"n.mut.tot"]==2)
&(rowSums(class.matrix[,mut.cols.1])==1)
&(rowSums(class.matrix[,mut.cols.2])==1)
&(rowSums(class.matrix[,c(mut.cols.1[1:2],mut.cols.2[1:4])])!=1)
&((class.matrix[,"WW1"]=="AT")|(class.matrix[,"WW2"]=="AT"))
&(class.matrix[,"spacer"]==0))]<-3
grades[which((class.matrix[,"n.mut.tot"]==2)
&(rowSums(class.matrix[,mut.cols.1])==1)
&(rowSums(class.matrix[,mut.cols.2])==1)
&(rowSums(class.matrix[,c(mut.cols.1[1:2],mut.cols.2[1:4])])!=1)
&((class.matrix[,"WW1"]=="AT")|(class.matrix[,"WW2"]=="AT"))
&(class.matrix[,"spacer"]>0))]<-2
#----------------------------------------------------
#different half site
grades[which((class.matrix[,"n.mut.tot"]==2)
&(rowSums(class.matrix[,mut.cols.1])==1)
&(rowSums(class.matrix[,mut.cols.2])==1)
&(rowSums(class.matrix[,c(mut.cols.1[1:2],mut.cols.2[1:4])])==1))]<-1
grades[which((class.matrix[,"n.mut.tot"]==2)
&(rowSums(class.matrix[,mut.cols.1])==1)
&(rowSums(class.matrix[,mut.cols.2])==1)
&(rowSums(class.matrix[,c(mut.cols.1[1:2],mut.cols.2[1:4])])==1)
&((class.matrix[,"WW1"]=="AT")|(class.matrix[,"WW2"]=="AT"))
&(class.matrix[,"spacer"]==0))]<-2
#----------------------------------------------------
# 1st or 4th quarter
grades[which((class.matrix[,"n.mut.tot"]==2)
&(rowSums(class.matrix[,mut.cols.1])==1)
&(rowSums(class.matrix[,mut.cols.2])==1)
&((rowSums(class.matrix[,c(mut.cols.1[c(1,4)],mut.cols.2[c(1,2,7,8)])])==2)))]<-1
grades[which((class.matrix[,"n.mut.tot"]==2)
&(rowSums(class.matrix[,mut.cols.1])==1)
&(rowSums(class.matrix[,mut.cols.2])==1)
&((rowSums(class.matrix[,c(mut.cols.1[c(1,4)],mut.cols.2[c(1,2,7,8)])])==2))
&(class.matrix[,"spacer"]==0))]<-2
grades[which((class.matrix[,"n.mut.tot"]==2)
&(rowSums(class.matrix[,mut.cols.1])==1)
&(rowSums(class.matrix[,mut.cols.2])==1)
&((rowSums(class.matrix[,c(mut.cols.1[c(1,4)],mut.cols.2[c(1,2,7,8)])])==2))
&((class.matrix[,"WW1"]=="AT")|(class.matrix[,"WW2"]=="AT"))
&(class.matrix[,"spacer"]==0))]<-3
grades[which((class.matrix[,"n.mut.tot"]==2)
&(rowSums(class.matrix[,mut.cols.1])==1)
&(rowSums(class.matrix[,mut.cols.2])==1)
&((rowSums(class.matrix[,c(mut.cols.1[c(1,4)],mut.cols.2[c(1,2,7,8)])])==2))
&((class.matrix[,"WW1"]=="AT")|(class.matrix[,"WW2"]=="AT"))
&(class.matrix[,"spacer"]>0))]<-2
#----------------------------------------------------
# All 1st or all 4th quarter
grades[which((class.matrix[,"n.mut.tot"]==2)
&(rowSums(class.matrix[,mut.cols.1])==1)
&(rowSums(class.matrix[,mut.cols.2])==1)
&((rowSums(class.matrix[,c(mut.cols.1[c(1)],mut.cols.2[c(1,2)])])==2)|(rowSums(class.matrix[,c(mut.cols.1[c(4)],mut.cols.2[c(7,8)])])==2))
)]<-1
grades[which((class.matrix[,"n.mut.tot"]==2)
&(rowSums(class.matrix[,mut.cols.1])==1)
&(rowSums(class.matrix[,mut.cols.2])==1)
&((rowSums(class.matrix[,c(mut.cols.1[c(1)],mut.cols.2[c(1,2)])])==2)|(rowSums(class.matrix[,c(mut.cols.1[c(4)],mut.cols.2[c(7,8)])])==2))
&(class.matrix[,"spacer"]==0)
)]<-3
grades[which((class.matrix[,"n.mut.tot"]==2)
&(rowSums(class.matrix[,mut.cols.1])==1)
&(rowSums(class.matrix[,mut.cols.2])==1)
&((rowSums(class.matrix[,c(mut.cols.1[c(1)],mut.cols.2[c(1,2)])])==2)|(rowSums(class.matrix[,c(mut.cols.1[c(4)],mut.cols.2[c(7,8)])])==2))
&((class.matrix[,"WW1"]=="AT")|(class.matrix[,"WW2"]=="AT"))
&(class.matrix[,"spacer"]==0)
)]<-4
grades[which((class.matrix[,"n.mut.tot"]==2)
&(rowSums(class.matrix[,mut.cols.1])==1)
&(rowSums(class.matrix[,mut.cols.2])==1)
&((rowSums(class.matrix[,c(mut.cols.1[c(1)],mut.cols.2[c(1,2)])])==2)|(rowSums(class.matrix[,c(mut.cols.1[c(4)],mut.cols.2[c(7,8)])])==2))
&((class.matrix[,"WW1"]=="AT")|(class.matrix[,"WW2"]=="AT"))
&(class.matrix[,"spacer"]>0)
)]<-2
#--------------------------------------------------
# 10) AD
#----------------------------------------------------
mut.cols.1<-c("C1","G1","C2","G2")
mut.cols.2<-c("R2","R3","Y1","Y2","R5","R6","Y4","Y5")
labels[which((class.matrix[,"n.mut.tot"]==2)
&(rowSums(class.matrix[,mut.cols.1])==1)
&(rowSums(class.matrix[,mut.cols.2])==1))]<-"AD"
#----------------------------------------------------
# same half site
mut.cols.1<-c("C1","G1","C2","G2")
mut.cols.2<-c("R2","R3","Y1","Y2","R5","R6","Y4","Y5")
grades[which((class.matrix[,"n.mut.tot"]==2)
&(rowSums(class.matrix[,mut.cols.1])==1)
&(rowSums(class.matrix[,mut.cols.2])==1)
&(rowSums(class.matrix[,c(mut.cols.1[1:2],mut.cols.2[1:4])])!=1))]<-1
grades[which((class.matrix[,"n.mut.tot"]==2)
&(rowSums(class.matrix[,mut.cols.1])==1)
&(rowSums(class.matrix[,mut.cols.2])==1)
&(rowSums(class.matrix[,c(mut.cols.1[1:2],mut.cols.2[1:4])])!=1)
&(class.matrix[,"spacer"]==0))]<-2
grades[which((class.matrix[,"n.mut.tot"]==2)
&(rowSums(class.matrix[,mut.cols.1])==1)
&(rowSums(class.matrix[,mut.cols.2])==1)
&(rowSums(class.matrix[,c(mut.cols.1[1:2],mut.cols.2[1:4])])!=1)
&(class.matrix[,"spacer"]==0)
&((class.matrix[,"WW1"]=="AT")|(class.matrix[,"WW2"]=="AT")))]<-3
grades[which((class.matrix[,"n.mut.tot"]==2)
&(rowSums(class.matrix[,mut.cols.1])==1)
&(rowSums(class.matrix[,mut.cols.2])==1)
&(rowSums(class.matrix[,c(mut.cols.1[1:2],mut.cols.2[1:4])])!=1)
&(class.matrix[,"spacer"]>0)
&((class.matrix[,"WW1"]=="AT")|(class.matrix[,"WW2"]=="AT")))]<-2
#----------------------------------------------------
#different half site
grades[which((class.matrix[,"n.mut.tot"]==2)
&(rowSums(class.matrix[,mut.cols.1])==1)
&(rowSums(class.matrix[,mut.cols.2])==1)
&(rowSums(class.matrix[,c(mut.cols.1[1:2],mut.cols.2[1:4])])==1))]<-1
grades[which((class.matrix[,"n.mut.tot"]==2)
&(rowSums(class.matrix[,mut.cols.1])==1)
&(rowSums(class.matrix[,mut.cols.2])==1)
&(rowSums(class.matrix[,c(mut.cols.1[1:2],mut.cols.2[1:4])])==1)
&(class.matrix[,"spacer"]==0))]<-2
grades[which((class.matrix[,"n.mut.tot"]==2)
&(rowSums(class.matrix[,mut.cols.1])==1)
&(rowSums(class.matrix[,mut.cols.2])==1)
&(rowSums(class.matrix[,c(mut.cols.1[1:2],mut.cols.2[1:4])])==1)
&((class.matrix[,"WW1"]=="AT")|(class.matrix[,"WW2"]=="AT"))
&(class.matrix[,"spacer"]==0))]<-3
#----------------------------------------------------
# 1st or 4th quarter
grades[which((class.matrix[,"n.mut.tot"]==2)
&(rowSums(class.matrix[,mut.cols.1])==1)
&(rowSums(class.matrix[,mut.cols.2])==1)
&((rowSums(class.matrix[,c(mut.cols.1[c(1,4)],mut.cols.2[c(1,2,7,8)])])==2)))]<-1
grades[which((class.matrix[,"n.mut.tot"]==2)
&(rowSums(class.matrix[,mut.cols.1])==1)
&(rowSums(class.matrix[,mut.cols.2])==1)
&((rowSums(class.matrix[,c(mut.cols.1[c(1,4)],mut.cols.2[c(1,2,7,8)])])==2))
&(class.matrix[,"spacer"]==0))]<-2
grades[which((class.matrix[,"n.mut.tot"]==2)
&(rowSums(class.matrix[,mut.cols.1])==1)
&(rowSums(class.matrix[,mut.cols.2])==1)
&((rowSums(class.matrix[,c(mut.cols.1[c(1,4)],mut.cols.2[c(1,2,7,8)])])==2))
&((class.matrix[,"WW1"]=="AT")|(class.matrix[,"WW2"]=="AT"))
&(class.matrix[,"spacer"]==0))]<-3
#--------------------------------------------------
# All 1st or all 4th quarter
grades[which((class.matrix[,"n.mut.tot"]==2)
&(rowSums(class.matrix[,mut.cols.1])==1)
&(rowSums(class.matrix[,mut.cols.2])==1)
&((rowSums(class.matrix[,c(mut.cols.1[c(1)],mut.cols.2[c(1,2)])])==2)|(rowSums(class.matrix[,c(mut.cols.1[c(4)],mut.cols.2[c(7,8)])])==2))
)]<-1
grades[which((class.matrix[,"n.mut.tot"]==2)
&(rowSums(class.matrix[,mut.cols.1])==1)
&(rowSums(class.matrix[,mut.cols.2])==1)
&((rowSums(class.matrix[,c(mut.cols.1[c(1)],mut.cols.2[c(1,2)])])==2)|(rowSums(class.matrix[,c(mut.cols.1[c(4)],mut.cols.2[c(7,8)])])==2))
&(class.matrix[,"spacer"]==0)
)]<-3
grades[which((class.matrix[,"n.mut.tot"]==2)
&(rowSums(class.matrix[,mut.cols.1])==1)
&(rowSums(class.matrix[,mut.cols.2])==1)
&((rowSums(class.matrix[,c(mut.cols.1[c(1)],mut.cols.2[c(1,2)])])==2)|(rowSums(class.matrix[,c(mut.cols.1[c(4)],mut.cols.2[c(7,8)])])==2))
&((class.matrix[,"WW1"]=="AT")|(class.matrix[,"WW2"]=="AT"))
&(class.matrix[,"spacer"]==0)
)]<-4
#-------------------------------------------------------------------------------------
# 11) DD
#----------------------------------------------------
mut.cols<-c("R2","R3","Y1","Y2","R5","R6","Y4","Y5")
labels[which((class.matrix[,"n.mut.tot"]==2)
&(rowSums(class.matrix[,mut.cols])==2))]<-"DD"
#----------------------------------------------------
# same half site
mut.cols<-c("R2","R3","Y1","Y2","R5","R6","Y4","Y5")
grades[which((class.matrix[,"n.mut.tot"]==2)
&(rowSums(class.matrix[,mut.cols])==2)
&(rowSums(class.matrix[,mut.cols[1:4]])!=1))]<-1
grades[which((class.matrix[,"n.mut.tot"]==2)
&(rowSums(class.matrix[,mut.cols])==2)
&(rowSums(class.matrix[,mut.cols[1:4]])!=1)
&(class.matrix[,"spacer"]==0))]<-2
grades[which((class.matrix[,"n.mut.tot"]==2)
&(rowSums(class.matrix[,mut.cols])==2)
&(rowSums(class.matrix[,mut.cols[1:4]])!=1)
&((class.matrix[,"WW1"]=="AT")|(class.matrix[,"WW2"]=="AT"))
&(class.matrix[,"spacer"]==0))]<-3
#----------------------------------------------------
#different half site
grades[which((class.matrix[,"n.mut.tot"]==2)
&(rowSums(class.matrix[,mut.cols])==2)
&(rowSums(class.matrix[,mut.cols[1:4]])==1))]<-1
grades[which((class.matrix[,"n.mut.tot"]==2)
&(rowSums(class.matrix[,mut.cols])==2)
&(rowSums(class.matrix[,mut.cols[1:4]])==1)
&(class.matrix[,"spacer"]==0))]<-2
grades[which((class.matrix[,"n.mut.tot"]==2)
&(rowSums(class.matrix[,mut.cols])==2)
&(rowSums(class.matrix[,mut.cols[1:4]])==1)
&((class.matrix[,"WW1"]=="AT")|(class.matrix[,"WW2"]=="AT"))
&(class.matrix[,"spacer"]==0))]<-3
grades[which((class.matrix[,"n.mut.tot"]==2)
&(rowSums(class.matrix[,mut.cols])==2)
&(rowSums(class.matrix[,mut.cols[1:4]])==1)
&((class.matrix[,"WW1"]=="AT")|(class.matrix[,"WW2"]=="AT"))
&(class.matrix[,"spacer"]%in%c(1,2)))]<-2
#----------------------------------------------------
# 1st or 4th quarter
grades[which((class.matrix[,"n.mut.tot"]==2)
&(rowSums(class.matrix[,mut.cols])==2)
&(rowSums(class.matrix[,mut.cols[c(1,2,7,8)]])==2))]<-1
grades[which((class.matrix[,"n.mut.tot"]==2)
&(rowSums(class.matrix[,mut.cols])==2)
&(rowSums(class.matrix[,mut.cols[c(1,2,7,8)]])==2)
&(class.matrix[,"spacer"]==0))]<-2
grades[which((class.matrix[,"n.mut.tot"]==2)
&(rowSums(class.matrix[,mut.cols])==2)
&(rowSums(class.matrix[,mut.cols[c(1,2,7,8)]])==2)
&((class.matrix[,"WW1"]=="AT")|(class.matrix[,"WW2"]=="AT"))
&(class.matrix[,"spacer"]==0))]<-3
grades[which((class.matrix[,"n.mut.tot"]==2)
&(rowSums(class.matrix[,mut.cols])==2)
&(rowSums(class.matrix[,mut.cols[c(1,2,7,8)]])==2)
&((class.matrix[,"WW1"]=="AT")|(class.matrix[,"WW2"]=="AT"))
&(class.matrix[,"spacer"]%in%c(1,2)))]<-2
#--------------------------------------------------
# All 1st or all 4th quarter
grades[which((class.matrix[,"n.mut.tot"]==2)
&(rowSums(class.matrix[,mut.cols])==2)
&((rowSums(class.matrix[,mut.cols[c(1,2)]])==2)|(rowSums(class.matrix[,mut.cols[c(7,8)]])==2))
)]<-1
grades[which((class.matrix[,"n.mut.tot"]==2)
&(rowSums(class.matrix[,mut.cols])==2)
&((rowSums(class.matrix[,mut.cols[c(1,2)]])==2)|(rowSums(class.matrix[,mut.cols[c(7,8)]])==2))
&(class.matrix[,"spacer"]==0))]<-3
grades[which((class.matrix[,"n.mut.tot"]==2)
&(rowSums(class.matrix[,mut.cols])==2)
&((rowSums(class.matrix[,mut.cols[c(1,2)]])==2)|(rowSums(class.matrix[,mut.cols[c(7,8)]])==2))
&((class.matrix[,"WW1"]=="AT")|(class.matrix[,"WW2"]=="AT"))
&(class.matrix[,"spacer"]==0))]<-4
#--------------------------------------------------
# 12) CC
#----------------------------------------------------
mut.cols<-c("R1","Y3","R4","Y6")
labels[which((class.matrix[,"n.mut.tot"]==2)
&(rowSums(class.matrix[,mut.cols])==2))]<-"CC"
#----------------------------------------------------
mut.cols<-c("R1","Y3","R4","Y6")
grades[which((class.matrix[,"n.mut.tot"]==2)
&(rowSums(class.matrix[,mut.cols])==2))]<-1
grades[which((class.matrix[,"n.mut.tot"]==2)
&(rowSums(class.matrix[,mut.cols])==2)
&(class.matrix[,"spacer"]==0))]<-4
grades[which((class.matrix[,"n.mut.tot"]==2)
&(rowSums(class.matrix[,mut.cols])==2)
&((class.matrix[,"WW1"]=="AT")|(class.matrix[,"WW2"]=="AT"))
&(class.matrix[,"spacer"]==0))]<-5
grades[which((class.matrix[,"n.mut.tot"]==2)
&(rowSums(class.matrix[,mut.cols])==2)
&(class.matrix[,"spacer"]%in%c(1,2)))]<-2
grades[which((class.matrix[,"n.mut.tot"]==2)
&(rowSums(class.matrix[,mut.cols])==2)
&((class.matrix[,"WW1"]=="AT")|(class.matrix[,"WW2"]=="AT"))
&(class.matrix[,"spacer"]%in%c(1,2)))]<-3
#--------------------------------------------------
# 13) BC
#----------------------------------------------------
mut.cols.1<-c("W1","W2","W3","W4")
mut.cols.2<-c("R1","Y3","R4","Y6")
labels[which((class.matrix[,"n.mut.tot"]==2)
&(rowSums(class.matrix[,mut.cols.1])==1)
&(rowSums(class.matrix[,mut.cols.2])==1))]<-"BC"
#----------------------------------------------------
# same half site
mut.cols.1<-c("W1","W2","W3","W4")
mut.cols.2<-c("R1","Y3","R4","Y6")
grades[which((class.matrix[,"n.mut.tot"]==2)
&(rowSums(class.matrix[,mut.cols.1])==1)
&(rowSums(class.matrix[,mut.cols.2])==1)
&(rowSums(class.matrix[,c(mut.cols.1[1:2],mut.cols.2[1:2])])!=1)
)]<-1
grades[which((class.matrix[,"n.mut.tot"]==2)
&(rowSums(class.matrix[,mut.cols.1])==1)
&(rowSums(class.matrix[,mut.cols.2])==1)
&(rowSums(class.matrix[,c(mut.cols.1[1:2],mut.cols.2[1:2])])!=1)
&(class.matrix[,"spacer"]==0))]<-2
grades[which((class.matrix[,"n.mut.tot"]==2)
&(rowSums(class.matrix[,mut.cols.1])==1)
&(rowSums(class.matrix[,mut.cols.2])==1)
&(rowSums(class.matrix[,c(mut.cols.1[1:2],mut.cols.2[1:2])])!=1)
&((class.matrix[,"WW1"]=="AT")|(class.matrix[,"WW2"]=="AT"))
&(class.matrix[,"spacer"]==0))]<-3
#----------------------------------------------------
#different half site
grades[which((class.matrix[,"n.mut.tot"]==2)
&(rowSums(class.matrix[,mut.cols.1])==1)
&(rowSums(class.matrix[,mut.cols.2])==1)
&(rowSums(class.matrix[,c(mut.cols.1[1:2],mut.cols.2[1:2])])==1)
)]<-1
grades[which((class.matrix[,"n.mut.tot"]==2)
&(rowSums(class.matrix[,mut.cols.1])==1)
&(rowSums(class.matrix[,mut.cols.2])==1)
&(rowSums(class.matrix[,c(mut.cols.1[1:2],mut.cols.2[1:2])])==1)
&(class.matrix[,"spacer"]==0))]<-3
grades[which((class.matrix[,"n.mut.tot"]==2)
&(rowSums(class.matrix[,mut.cols.1])==1)
&(rowSums(class.matrix[,mut.cols.2])==1)
&(rowSums(class.matrix[,c(mut.cols.1[1:2],mut.cols.2[1:2])])==1)
&((class.matrix[,"WW1"]=="AT")|(class.matrix[,"WW2"]=="AT"))
&(class.matrix[,"spacer"]==0))]<-4
grades[which((class.matrix[,"n.mut.tot"]==2)
&(rowSums(class.matrix[,mut.cols.1])==1)
&(rowSums(class.matrix[,mut.cols.2])==1)
&(rowSums(class.matrix[,c(mut.cols.1[1:2],mut.cols.2[1:2])])==1)
&((class.matrix[,"WW1"]=="AT")|(class.matrix[,"WW2"]=="AT"))
&(class.matrix[,"spacer"]%in%c(1,2)))]<-2
#----------------------------------------------------
# all 1st or all 4th quarter
grades[which((class.matrix[,"n.mut.tot"]==2)
&(rowSums(class.matrix[,mut.cols.1])==1)
&(rowSums(class.matrix[,mut.cols.2])==1)
&((rowSums(class.matrix[,c(mut.cols.1[c(1)],mut.cols.2[c(1)])])==2)|(rowSums(class.matrix[,c(mut.cols.1[c(4)],mut.cols.2[c(4)])])==2))
)]<-1
grades[which((class.matrix[,"n.mut.tot"]==2)
&(rowSums(class.matrix[,mut.cols.1])==1)
&(rowSums(class.matrix[,mut.cols.2])==1)
&((rowSums(class.matrix[,c(mut.cols.1[c(1)],mut.cols.2[c(1)])])==2)|(rowSums(class.matrix[,c(mut.cols.1[c(4)],mut.cols.2[c(4)])])==2))
&(class.matrix[,"spacer"]==0))]<-3
grades[which((class.matrix[,"n.mut.tot"]==2)
&(rowSums(class.matrix[,mut.cols.1])==1)
&(rowSums(class.matrix[,mut.cols.2])==1)
&((rowSums(class.matrix[,c(mut.cols.1[c(1)],mut.cols.2[c(1)])])==2)|(rowSums(class.matrix[,c(mut.cols.1[c(4)],mut.cols.2[c(4)])])==2))
&((class.matrix[,"WW1"]=="AT")|(class.matrix[,"WW2"]=="AT"))
&(class.matrix[,"spacer"]==0))]<-4
grades[which((class.matrix[,"n.mut.tot"]==2)
&(rowSums(class.matrix[,mut.cols.1])==1)
&(rowSums(class.matrix[,mut.cols.2])==1)
&((rowSums(class.matrix[,c(mut.cols.1[c(1)],mut.cols.2[c(1)])])==2)|(rowSums(class.matrix[,c(mut.cols.1[c(4)],mut.cols.2[c(4)])])==2))
&((class.matrix[,"WW1"]=="AT")|(class.matrix[,"WW2"]=="AT"))
&(class.matrix[,"spacer"]%in%c(1,2)))]<-2
#--------------------------------------------------
# 14) CD
#----------------------------------------------------
mut.cols.1<-c("R1","Y3","R4","Y6")
mut.cols.2<-c("R2","R3","Y1","Y2","R5","R6","Y4","Y5")
labels[which((class.matrix[,"n.mut.tot"]==2)
&(rowSums(class.matrix[,mut.cols.1])==1)
&(rowSums(class.matrix[,mut.cols.2])==1))]<-"CD"
#----------------------------------------------------
mut.cols.1<-c("R1","Y3","R4","Y6")
mut.cols.2<-c("R2","R3","Y1","Y2","R5","R6","Y4","Y5")
grades[which((class.matrix[,"n.mut.tot"]==2)
&(rowSums(class.matrix[,mut.cols.1])==1)
&(rowSums(class.matrix[,mut.cols.2])==1)
)]<-1
grades[which((class.matrix[,"n.mut.tot"]==2)
&(rowSums(class.matrix[,mut.cols.1])==1)
&(rowSums(class.matrix[,mut.cols.2])==1)
&(class.matrix[,"spacer"]==0))]<-3
grades[which((class.matrix[,"n.mut.tot"]==2)
&(rowSums(class.matrix[,mut.cols.1])==1)
&(rowSums(class.matrix[,mut.cols.2])==1)
&((class.matrix[,"WW1"]=="AT")|(class.matrix[,"WW2"]=="AT"))
&(class.matrix[,"spacer"]==0))]<-4
grades[which((class.matrix[,"n.mut.tot"]==2)
&(rowSums(class.matrix[,mut.cols.1])==1)
&(rowSums(class.matrix[,mut.cols.2])==1)
&((class.matrix[,"WW1"]=="AT")|(class.matrix[,"WW2"]=="AT"))
&(class.matrix[,"spacer"]%in%c(1,2)))]<-2
#----------------------------------------------------
# 15) AC
#----------------------------------------------------
mut.cols.1<-c("C1","G1","C2","G2")
mut.cols.2<-c("R1","Y3","R4","Y6")
labels[which((class.matrix[,"n.mut.tot"]==2)
&(rowSums(class.matrix[,mut.cols.1])==1)
&(rowSums(class.matrix[,mut.cols.2])==1))]<-"AC"
#----------------------------------------------------
# same half site
mut.cols.1<-c("C1","G1","C2","G2")
mut.cols.2<-c("R1","Y3","R4","Y6")
grades[which((class.matrix[,"n.mut.tot"]==2)
&(rowSums(class.matrix[,mut.cols.1])==1)
&(rowSums(class.matrix[,mut.cols.2])==1)
&(rowSums(class.matrix[,c(mut.cols.1[1:2],mut.cols.2[1:2])])!=1))]<-1
grades[which((class.matrix[,"n.mut.tot"]==2)
&(rowSums(class.matrix[,mut.cols.1])==1)
&(rowSums(class.matrix[,mut.cols.2])==1)
&(rowSums(class.matrix[,c(mut.cols.1[1:2],mut.cols.2[1:2])])!=1)
&(class.matrix[,"spacer"]==0))]<-2
grades[which((class.matrix[,"n.mut.tot"]==2)
&(rowSums(class.matrix[,mut.cols.1])==1)
&(rowSums(class.matrix[,mut.cols.2])==1)
&(rowSums(class.matrix[,c(mut.cols.1[1:2],mut.cols.2[1:2])])!=1)
&(class.matrix[,"spacer"]==0)
&((class.matrix[,"WW1"]=="AT")|(class.matrix[,"WW2"]=="AT")))]<-3
#----------------------------------------------------
#different half site
grades[which((class.matrix[,"n.mut.tot"]==2)
&(rowSums(class.matrix[,mut.cols.1])==1)
&(rowSums(class.matrix[,mut.cols.2])==1)
&(rowSums(class.matrix[,c(mut.cols.1[1:2],mut.cols.2[1:2])])==1))]<-1
grades[which((class.matrix[,"n.mut.tot"]==2)
&(rowSums(class.matrix[,mut.cols.1])==1)
&(rowSums(class.matrix[,mut.cols.2])==1)
&(rowSums(class.matrix[,c(mut.cols.1[1:2],mut.cols.2[1:2])])==1)
&(class.matrix[,"spacer"]==0))]<-2
grades[which((class.matrix[,"n.mut.tot"]==2)
&(rowSums(class.matrix[,mut.cols.1])==1)
&(rowSums(class.matrix[,mut.cols.2])==1)
&(rowSums(class.matrix[,c(mut.cols.1[1:2],mut.cols.2[1:2])])==1)
&(class.matrix[,"spacer"]==0)
&((class.matrix[,"WW1"]=="AT")|(class.matrix[,"WW2"]=="AT")))]<-3
#----------------------------------------------------
# all 1st or all 4th quarter
grades[which((class.matrix[,"n.mut.tot"]==2)
&(rowSums(class.matrix[,mut.cols.1])==1)
&(rowSums(class.matrix[,mut.cols.2])==1)
&((rowSums(class.matrix[,c(mut.cols.1[c(1)],mut.cols.2[c(1)])])==2)|(rowSums(class.matrix[,c(mut.cols.1[c(4)],mut.cols.2[c(4)])])==2))
)]<-1
grades[which((class.matrix[,"n.mut.tot"]==2)
&(rowSums(class.matrix[,mut.cols.1])==1)
&(rowSums(class.matrix[,mut.cols.2])==1)
&((rowSums(class.matrix[,c(mut.cols.1[c(1)],mut.cols.2[c(1)])])==2)|(rowSums(class.matrix[,c(mut.cols.1[c(4)],mut.cols.2[c(4)])])==2))
&(class.matrix[,"spacer"]==0))]<-3
grades[which((class.matrix[,"n.mut.tot"]==2)
&(rowSums(class.matrix[,mut.cols.1])==1)
&(rowSums(class.matrix[,mut.cols.2])==1)
&((rowSums(class.matrix[,c(mut.cols.1[c(1)],mut.cols.2[c(1)])])==2)|(rowSums(class.matrix[,c(mut.cols.1[c(4)],mut.cols.2[c(4)])])==2))
&(class.matrix[,"spacer"]==0)
&((class.matrix[,"WW1"]=="AT")|(class.matrix[,"WW2"]=="AT")))]<-4
#--------------------------------------------------
### 16) A+C+C
#----------------------------------------------------
mut.cols.1<-c("C1","G1","C2","G2")
mut.cols.2<-c("R1","Y3","R4","Y6")
labels[which((class.matrix[,"n.mut.tot"]==3)
&(rowSums(class.matrix[,mut.cols.1])==1)
&(rowSums(class.matrix[,mut.cols.2])==2))]<-"ACC"
#----------------------------------------------------
# A in C1 or G2
#----------------------------------------------------
mut.cols.1<-c("C1","G1","C2","G2")
mut.cols.2<-c("R1","Y3","R4","Y6")
grades[which((class.matrix[,"n.mut.tot"]==3)
&(rowSums(class.matrix[,mut.cols.1])==1)
&(rowSums(class.matrix[,mut.cols.2])==2)
&(rowSums(class.matrix[,c(mut.cols.1[c(1,4)])])==1)
)]<-1
grades[which((class.matrix[,"n.mut.tot"]==3)
&(rowSums(class.matrix[,mut.cols.1])==1)
&(rowSums(class.matrix[,mut.cols.2])==2)
&(rowSums(class.matrix[,c(mut.cols.1[c(1,4)])])==1)
&(class.matrix[,"spacer"]==0)
)]<-3
grades[which((class.matrix[,"n.mut.tot"]==3)
&(rowSums(class.matrix[,mut.cols.1])==1)
&(rowSums(class.matrix[,mut.cols.2])==2)
&(rowSums(class.matrix[,c(mut.cols.1[c(1,4)])])==1)
&(class.matrix[,"spacer"]==0)
&((class.matrix[,"WW1"]=="AT")|(class.matrix[,"WW2"]=="AT"))
)]<-4
grades[which((class.matrix[,"n.mut.tot"]==3)
&(rowSums(class.matrix[,mut.cols.1])==1)
&(rowSums(class.matrix[,mut.cols.2])==2)
&(rowSums(class.matrix[,c(mut.cols.1[c(1,4)])])==1)
&(class.matrix[,"spacer"]%in%c(1,2))
&((class.matrix[,"WW1"]=="AT")|(class.matrix[,"WW2"]=="AT"))
)]<-2
#----------------------------------------------------
# A in G1 or C2
#----------------------------------------------------
grades[which((class.matrix[,"n.mut.tot"]==3)
&(rowSums(class.matrix[,mut.cols.1])==1)
&(rowSums(class.matrix[,mut.cols.2])==2)
&(rowSums(class.matrix[,c(mut.cols.1[c(1,4)])])==0)
)]<-1
grades[which((class.matrix[,"n.mut.tot"]==3)
&(rowSums(class.matrix[,mut.cols.1])==1)
&(rowSums(class.matrix[,mut.cols.2])==2)
&(rowSums(class.matrix[,c(mut.cols.1[c(1,4)])])==0)
&(class.matrix[,"spacer"]==0)
)]<-2
grades[which((class.matrix[,"n.mut.tot"]==3)
&(rowSums(class.matrix[,mut.cols.1])==1)
&(rowSums(class.matrix[,mut.cols.2])==2)
&(rowSums(class.matrix[,c(mut.cols.1[c(1,4)])])==0)
&(class.matrix[,"spacer"]==0)
&((class.matrix[,"WW1"]=="AT")|(class.matrix[,"WW2"]=="AT"))
)]<-3
#------------------------------------------------------
# 17) BCC
#----------------------------------------------------
mut.cols.1<-c("W1","W2","W3","W4")
mut.cols.2<-c("R1","Y3","R4","Y6")
labels[which((class.matrix[,"n.mut.tot"]==3)
&(rowSums(class.matrix[,mut.cols.1])==1)
&(rowSums(class.matrix[,mut.cols.2])==2))]<-"BCC"
#----------------------------------------------------
# B in W1 or W4
#----------------------------------------------------
mut.cols.1<-c("W1","W2","W3","W4")
mut.cols.2<-c("R1","Y3","R4","Y6")
grades[which((class.matrix[,"n.mut.tot"]==3)
&(rowSums(class.matrix[,mut.cols.1])==1)
&(rowSums(class.matrix[,mut.cols.2])==2)
&(rowSums(class.matrix[,c(mut.cols.1[c(1,4)])])==1)
)]<-1
grades[which((class.matrix[,"n.mut.tot"]==3)
&(rowSums(class.matrix[,mut.cols.1])==1)
&(rowSums(class.matrix[,mut.cols.2])==2)
&(rowSums(class.matrix[,c(mut.cols.1[c(1,4)])])==1)
&(class.matrix[,"spacer"]==0)
)]<-3
grades[which((class.matrix[,"n.mut.tot"]==3)
&(rowSums(class.matrix[,mut.cols.1])==1)
&(rowSums(class.matrix[,mut.cols.2])==2)
&(rowSums(class.matrix[,c(mut.cols.1[c(1,4)])])==1)
&(class.matrix[,"spacer"]==0)
&((class.matrix[,"WW1"]=="AT")|(class.matrix[,"WW2"]=="AT"))
)]<-4
grades[which((class.matrix[,"n.mut.tot"]==3)
&(rowSums(class.matrix[,mut.cols.1])==1)
&(rowSums(class.matrix[,mut.cols.2])==2)
&(rowSums(class.matrix[,c(mut.cols.1[c(1,4)])])==1)
&(class.matrix[,"spacer"]%in%c(1,2))
&((class.matrix[,"WW1"]=="AT")|(class.matrix[,"WW2"]=="AT"))
)]<-2
#----------------------------------------------------
# B in W2 or W3
#----------------------------------------------------
grades[which((class.matrix[,"n.mut.tot"]==3)
&(rowSums(class.matrix[,mut.cols.1])==1)
&(rowSums(class.matrix[,mut.cols.2])==2)
&(rowSums(class.matrix[,c(mut.cols.1[c(1,4)])])==0)
)]<-1
grades[which((class.matrix[,"n.mut.tot"]==3)
&(rowSums(class.matrix[,mut.cols.1])==1)
&(rowSums(class.matrix[,mut.cols.2])==2)
&(rowSums(class.matrix[,c(mut.cols.1[c(1,4)])])==0)
&(class.matrix[,"spacer"]==0)
)]<-2
grades[which((class.matrix[,"n.mut.tot"]==3)
&(rowSums(class.matrix[,mut.cols.1])==1)
&(rowSums(class.matrix[,mut.cols.2])==2)
&(rowSums(class.matrix[,c(mut.cols.1[c(1,4)])])==0)
&(class.matrix[,"spacer"]==0)
&((class.matrix[,"WW1"]=="AT")|(class.matrix[,"WW2"]=="AT"))
)]<-3
#--------------------------------------------------
### 18) CCC
#----------------------------------------------------
mut.cols<-c("R1","Y3","R4","Y6")
labels[which((class.matrix[,"n.mut.tot"]==3)
&(rowSums(class.matrix[,mut.cols])==3))]<-"CCC"
#----------------------------------------------------
mut.cols<-c("R1","Y3","R4","Y6")
grades[which((class.matrix[,"n.mut.tot"]==3)
&(rowSums(class.matrix[,mut.cols])==3))]<-1
grades[which((class.matrix[,"n.mut.tot"]==3)
&(rowSums(class.matrix[,mut.cols])==3)
&(class.matrix[,"spacer"]==0))]<-3
grades[which((class.matrix[,"n.mut.tot"]==3)
&(rowSums(class.matrix[,mut.cols])==3)
&((class.matrix[,"WW1"]=="AT")|(class.matrix[,"WW2"]=="AT"))
&(class.matrix[,"spacer"]==0))]<-4
grades[which((class.matrix[,"n.mut.tot"]==3)
&(rowSums(class.matrix[,mut.cols])==3)
&((class.matrix[,"WW1"]=="AT")|(class.matrix[,"WW2"]=="AT"))
&(class.matrix[,"spacer"]%in%c(1,2)))]<-2
#--------------------------------------------------
#------------------------------------------------------
# 19) CCD
#----------------------------------------------------
mut.cols.1<-c("R2","R3","Y1","Y2","R5","R6","Y4","Y5")
mut.cols.2<-c("R1","Y3","R4","Y6")
labels[which((class.matrix[,"n.mut.tot"]==3)
&(rowSums(class.matrix[,mut.cols.1])==1)
&(rowSums(class.matrix[,mut.cols.2])==2))]<-"CCD"
#----------------------------------------------------
# D in first or last quarter
#----------------------------------------------------
mut.cols.1<-c("R2","R3","Y1","Y2","R5","R6","Y4","Y5")
mut.cols.2<-c("R1","Y3","R4","Y6")
grades[which((class.matrix[,"n.mut.tot"]==3)
&(rowSums(class.matrix[,mut.cols.1])==1)
&(rowSums(class.matrix[,mut.cols.2])==2)
&(rowSums(class.matrix[,c(mut.cols.1[c(1,2,7,8)])])==1)
)]<-1
grades[which((class.matrix[,"n.mut.tot"]==3)
&(rowSums(class.matrix[,mut.cols.1])==1)
&(rowSums(class.matrix[,mut.cols.2])==2)
&(rowSums(class.matrix[,c(mut.cols.1[c(1,2,7,8)])])==1)
&(class.matrix[,"spacer"]==0)
)]<-3
grades[which((class.matrix[,"n.mut.tot"]==3)
&(rowSums(class.matrix[,mut.cols.1])==1)
&(rowSums(class.matrix[,mut.cols.2])==2)
&(rowSums(class.matrix[,c(mut.cols.1[c(1,2,7,8)])])==1)
&(class.matrix[,"spacer"]==0)
&((class.matrix[,"WW1"]=="AT")|(class.matrix[,"WW2"]=="AT"))
)]<-4
grades[which((class.matrix[,"n.mut.tot"]==3)
&(rowSums(class.matrix[,mut.cols.1])==1)
&(rowSums(class.matrix[,mut.cols.2])==2)
&(rowSums(class.matrix[,c(mut.cols.1[c(1,2,7,8)])])==1)
&(class.matrix[,"spacer"]%in%c(1,2))
&((class.matrix[,"WW1"]=="AT")|(class.matrix[,"WW2"]=="AT"))
)]<-2
#----------------------------------------------------
# D in W2 or W3
#----------------------------------------------------
grades[which((class.matrix[,"n.mut.tot"]==3)
&(rowSums(class.matrix[,mut.cols.1])==1)
&(rowSums(class.matrix[,mut.cols.2])==2)
&(rowSums(class.matrix[,c(mut.cols.1[c(1,2,7,8)])])==0)
)]<-1
grades[which((class.matrix[,"n.mut.tot"]==3)
&(rowSums(class.matrix[,mut.cols.1])==1)
&(rowSums(class.matrix[,mut.cols.2])==2)
&(rowSums(class.matrix[,c(mut.cols.1[c(1,2,7,8)])])==0)
&(class.matrix[,"spacer"]==0)
)]<-2
grades[which((class.matrix[,"n.mut.tot"]==3)
&(rowSums(class.matrix[,mut.cols.1])==1)
&(rowSums(class.matrix[,mut.cols.2])==2)
&(rowSums(class.matrix[,c(mut.cols.1[c(1,2,7,8)])])==0)
&(class.matrix[,"spacer"]==0)
&((class.matrix[,"WW1"]=="AT")|(class.matrix[,"WW2"]=="AT"))
)]<-3
#--------------------------------------------------
### 20) CCCC
#----------------------------------------------------
mut.cols<-c("R1","Y3","R4","Y6")
labels[which((class.matrix[,"n.mut.tot"]==4)
&(rowSums(class.matrix[,mut.cols])==4))]<-"CCCC"
#----------------------------------------------------
mut.cols<-c("R1","Y3","R4","Y6")
grades[which((class.matrix[,"n.mut.tot"]==4)
&(rowSums(class.matrix[,mut.cols])==4))]<-1
grades[which((class.matrix[,"n.mut.tot"]==4)
&(rowSums(class.matrix[,mut.cols])==4)
&(class.matrix[,"spacer"]==0))]<-2
grades[which((class.matrix[,"n.mut.tot"]==4)
&(rowSums(class.matrix[,mut.cols])==4)
&((class.matrix[,"WW1"]=="AT")|(class.matrix[,"WW2"]=="AT"))
&(class.matrix[,"spacer"]==0))]<-3
grades[which((class.matrix[,"n.mut.tot"]==4)
&(rowSums(class.matrix[,mut.cols])==4)
&((class.matrix[,"WW1"]=="AT")|(class.matrix[,"WW2"]=="AT"))
&(class.matrix[,"spacer"]%in%c(1,2)))]<-2
#-------------------------------------------------
#------------------------------------------------
### 21) ABC
#First quarter
#---------------------------------------------------------
mut.cols.1<-c("C1","W1")
mut.cols.2<-c("R1","Y6")
labels[which((class.matrix[,"n.mut.tot"]==3)
&(rowSums(class.matrix[,mut.cols.1])==2)
&(rowSums(class.matrix[,mut.cols.2])==1))]<-"ABC"
#-----------------------------------------------------------
grades[which((class.matrix[,"n.mut.tot"]==3)
&(rowSums(class.matrix[,mut.cols.1])==2)
&(rowSums(class.matrix[,mut.cols.2])==1))]<-1
grades[which((class.matrix[,"n.mut.tot"]==3)
&(rowSums(class.matrix[,mut.cols.1])==2)
&(rowSums(class.matrix[,mut.cols.2])==1)
&(class.matrix[,"spacer"]==0)
&((class.matrix[,"WW1"]=="AT")|(class.matrix[,"WW2"]=="AT")))]<-3
grades[which((class.matrix[,"n.mut.tot"]==3)
&(rowSums(class.matrix[,mut.cols.1])==2)
&(rowSums(class.matrix[,mut.cols.2])==1)
&(class.matrix[,"spacer"]%in%c(1,2))
&((class.matrix[,"WW1"]=="AT")|(class.matrix[,"WW2"]=="AT")))]<-2
#-----------------------------------------------------
#Fourth quarter
#---------------------------------------------------------
mut.cols.1<-c("G2","W4")
mut.cols.2<-c("R1","Y6")
labels[which((class.matrix[,"n.mut.tot"]==3)
&(rowSums(class.matrix[,mut.cols.1])==2)
&(rowSums(class.matrix[,mut.cols.2])==1))]<-"ABC"
#-----------------------------------------------------------
grades[which((class.matrix[,"n.mut.tot"]==3)
&(rowSums(class.matrix[,mut.cols.1])==2)
&(rowSums(class.matrix[,mut.cols.2])==1))]<-1
grades[which((class.matrix[,"n.mut.tot"]==3)
&(rowSums(class.matrix[,mut.cols.1])==2)
&(rowSums(class.matrix[,mut.cols.2])==1)
&(class.matrix[,"spacer"]==0)
&((class.matrix[,"WW1"]=="AT")|(class.matrix[,"WW2"]=="AT")))]<-3
grades[which((class.matrix[,"n.mut.tot"]==3)
&(rowSums(class.matrix[,mut.cols.1])==2)
&(rowSums(class.matrix[,mut.cols.2])==1)
&(class.matrix[,"spacer"]%in%c(1,2))
&((class.matrix[,"WW1"]=="AT")|(class.matrix[,"WW2"]=="AT")))]<-2
#-----------------------------------------------------
#------------------------------------------------
### 22) BCD
#First quarter
#---------------------------------------------------------
mut.cols.1<-c("W1","W1")
mut.cols.2<-c("R2","R3")
mut.cols.3<-c("R1","Y6")
labels[which((class.matrix[,"n.mut.tot"]==3)
&(rowSums(class.matrix[,mut.cols.1])==2)
&(rowSums(class.matrix[,mut.cols.2])==1)
&(rowSums(class.matrix[,mut.cols.3])==1))]<-"BCD"
#-----------------------------------------------------------
grades[which((class.matrix[,"n.mut.tot"]==3)
&(rowSums(class.matrix[,mut.cols.1])==2)
&(rowSums(class.matrix[,mut.cols.2])==1)
&(rowSums(class.matrix[,mut.cols.3])==1))]<-1
grades[which((class.matrix[,"n.mut.tot"]==3)
&(rowSums(class.matrix[,mut.cols.1])==2)
&(rowSums(class.matrix[,mut.cols.2])==1)
&(rowSums(class.matrix[,mut.cols.3])==1)
&(class.matrix[,"spacer"]==0))]<-2
grades[which((class.matrix[,"n.mut.tot"]==3)
&(rowSums(class.matrix[,mut.cols.1])==2)
&(rowSums(class.matrix[,mut.cols.2])==1)
&(rowSums(class.matrix[,mut.cols.3])==1)
&(class.matrix[,"spacer"]==0)
&((class.matrix[,"WW1"]=="AT")|(class.matrix[,"WW2"]=="AT")))]<-3
grades[which((class.matrix[,"n.mut.tot"]==3)
&(rowSums(class.matrix[,mut.cols.1])==2)
&(rowSums(class.matrix[,mut.cols.2])==1)
&(rowSums(class.matrix[,mut.cols.3])==1)
&(class.matrix[,"spacer"]%in%c(1,2))
&((class.matrix[,"WW1"]=="AT")|(class.matrix[,"WW2"]=="AT")))]<-2
#--------------------------------------------------------
#Fourth quarter
#---------------------------------------------------------
mut.cols.1<-c("W4","W4")
mut.cols.2<-c("Y4","Y5")
mut.cols.3<-c("R1","Y6")
labels[which((class.matrix[,"n.mut.tot"]==3)
&(rowSums(class.matrix[,mut.cols.1])==2)
&(rowSums(class.matrix[,mut.cols.2])==1)
&(rowSums(class.matrix[,mut.cols.3])==1))]<-"BCD"
#-----------------------------------------------------------
grades[which((class.matrix[,"n.mut.tot"]==3)
&(rowSums(class.matrix[,mut.cols.1])==2)
&(rowSums(class.matrix[,mut.cols.2])==1)
&(rowSums(class.matrix[,mut.cols.3])==1))]<-1
grades[which((class.matrix[,"n.mut.tot"]==3)
&(rowSums(class.matrix[,mut.cols.1])==2)
&(rowSums(class.matrix[,mut.cols.2])==1)
&(rowSums(class.matrix[,mut.cols.3])==1)
&(class.matrix[,"spacer"]==0))]<-2
grades[which((class.matrix[,"n.mut.tot"]==3)
&(rowSums(class.matrix[,mut.cols.1])==2)
&(rowSums(class.matrix[,mut.cols.2])==1)
&(rowSums(class.matrix[,mut.cols.3])==1)
&(class.matrix[,"spacer"]==0)
&((class.matrix[,"WW1"]=="AT")|(class.matrix[,"WW2"]=="AT")))]<-3
grades[which((class.matrix[,"n.mut.tot"]==3)
&(rowSums(class.matrix[,mut.cols.1])==2)
&(rowSums(class.matrix[,mut.cols.2])==1)
&(rowSums(class.matrix[,mut.cols.3])==1)
&(class.matrix[,"spacer"]%in%c(1,2))
&((class.matrix[,"WW1"]=="AT")|(class.matrix[,"WW2"]=="AT")))]<-2
#------------------------------------------------
#------------------------------------------------
### 23) ACD
#First quarter
#--------------------------------------------------------------
mut.cols.1<-c("C1","C1")
mut.cols.2<-c("R2","R3")
mut.cols.3<-c("R1","Y6")
labels[which((class.matrix[,"n.mut.tot"]==3)
&(rowSums(class.matrix[,mut.cols.1])==2)
&(rowSums(class.matrix[,mut.cols.2])==1)
&(rowSums(class.matrix[,mut.cols.3])==1))]<-"ACD"
#--------------------------------------------------------------
grades[which((class.matrix[,"n.mut.tot"]==3)
&(rowSums(class.matrix[,mut.cols.1])==2)
&(rowSums(class.matrix[,mut.cols.2])==1)
&(rowSums(class.matrix[,mut.cols.3])==1))]<-1
grades[which((class.matrix[,"n.mut.tot"]==3)
&(rowSums(class.matrix[,mut.cols.1])==2)
&(rowSums(class.matrix[,mut.cols.2])==1)
&(rowSums(class.matrix[,mut.cols.3])==1)
&(class.matrix[,"spacer"]==0))]<-2
grades[which((class.matrix[,"n.mut.tot"]==3)
&(rowSums(class.matrix[,mut.cols.1])==2)
&(rowSums(class.matrix[,mut.cols.2])==1)
&(rowSums(class.matrix[,mut.cols.3])==1)
&(class.matrix[,"spacer"]==0)
&((class.matrix[,"WW1"]=="AT")|(class.matrix[,"WW2"]=="AT")))]<-3
#--------------------------------------------------------
#Fourth quarter
#--------------------------------------------------------------
mut.cols.1<-c("G2","G2")
mut.cols.2<-c("Y4","Y5")
mut.cols.3<-c("R1","Y6")
labels[which((class.matrix[,"n.mut.tot"]==3)
&(rowSums(class.matrix[,mut.cols.1])==2)
&(rowSums(class.matrix[,mut.cols.2])==1)
&(rowSums(class.matrix[,mut.cols.3])==1))]<-"ACD"
#--------------------------------------------------------------
grades[which((class.matrix[,"n.mut.tot"]==3)
&(rowSums(class.matrix[,mut.cols.1])==2)
&(rowSums(class.matrix[,mut.cols.2])==1)
&(rowSums(class.matrix[,mut.cols.3])==1))]<-1
grades[which((class.matrix[,"n.mut.tot"]==3)
&(rowSums(class.matrix[,mut.cols.1])==2)
&(rowSums(class.matrix[,mut.cols.2])==1)
&(rowSums(class.matrix[,mut.cols.3])==1)
&(class.matrix[,"spacer"]==0))]<-2
grades[which((class.matrix[,"n.mut.tot"]==3)
&(rowSums(class.matrix[,mut.cols.1])==2)
&(rowSums(class.matrix[,mut.cols.2])==1)
&(rowSums(class.matrix[,mut.cols.3])==1)
&(class.matrix[,"spacer"]==0)
&((class.matrix[,"WW1"]=="AT")|(class.matrix[,"WW2"]=="AT")))]<-3
#--------------------------------------------------------
#-------------------
### 24) CDD
#-------------------
# all D and C in the 1st or 4th quarter
mut.cols.1<-c("R2","R3","Y4","Y5")
mut.cols.2<-c("R1","Y6")
labels[which((class.matrix[,"n.mut.tot"]==3)
&(rowSums(class.matrix[,mut.cols.1])==2)
&(rowSums(class.matrix[,mut.cols.2])==1))]<-"CDD"
grades[which((class.matrix[,"n.mut.tot"]==3)
&(rowSums(class.matrix[,mut.cols.1])==2)
&(rowSums(class.matrix[,mut.cols.2])==1)
&(class.matrix[,"spacer"]==0)
&((class.matrix[,"WW1"]=="AT")|(class.matrix[,"WW2"]=="AT")))]<-2
#First quarter
#---------------------------------------------------------
mut.cols.1<-c("R2","R3")
mut.cols.2<-c("R1","Y6")
labels[which((class.matrix[,"n.mut.tot"]==3)
&(rowSums(class.matrix[,mut.cols.1])==2)
&(rowSums(class.matrix[,mut.cols.2])==1))]<-"CDD"
#-----------------------------------------------------------
grades[which((class.matrix[,"n.mut.tot"]==3)
&(rowSums(class.matrix[,mut.cols.1])==2)
&(rowSums(class.matrix[,mut.cols.2])==1))]<-1
grades[which((class.matrix[,"n.mut.tot"]==3)
&(rowSums(class.matrix[,mut.cols.1])==2)
&(rowSums(class.matrix[,mut.cols.2])==1)
&(class.matrix[,"spacer"]==0))]<-2
grades[which((class.matrix[,"n.mut.tot"]==3)
&(rowSums(class.matrix[,mut.cols.1])==2)
&(rowSums(class.matrix[,mut.cols.2])==1)
&(class.matrix[,"spacer"]==0)
&((class.matrix[,"WW1"]=="AT")|(class.matrix[,"WW2"]=="AT")))]<-3
grades[which((class.matrix[,"n.mut.tot"]==3)
&(rowSums(class.matrix[,mut.cols.1])==2)
&(rowSums(class.matrix[,mut.cols.2])==1)
&(class.matrix[,"spacer"]%in%c(1,2))
&((class.matrix[,"WW1"]=="AT")|(class.matrix[,"WW2"]=="AT")))]<-2
#-----------------------------------------------------
#Fourth quarter
#---------------------------------------------------------
mut.cols.1<-c("Y4","Y5")
mut.cols.2<-c("R1","Y6")
labels[which((class.matrix[,"n.mut.tot"]==3)
&(rowSums(class.matrix[,mut.cols.1])==2)
&(rowSums(class.matrix[,mut.cols.2])==1))]<-"CDD"
#-----------------------------------------------------------
grades[which((class.matrix[,"n.mut.tot"]==3)
&(rowSums(class.matrix[,mut.cols.1])==2)
&(rowSums(class.matrix[,mut.cols.2])==1))]<-1
grades[which((class.matrix[,"n.mut.tot"]==3)
&(rowSums(class.matrix[,mut.cols.1])==2)
&(rowSums(class.matrix[,mut.cols.2])==1)
&(class.matrix[,"spacer"]==0))]<-2
grades[which((class.matrix[,"n.mut.tot"]==3)
&(rowSums(class.matrix[,mut.cols.1])==2)
&(rowSums(class.matrix[,mut.cols.2])==1)
&(class.matrix[,"spacer"]==0)
&((class.matrix[,"WW1"]=="AT")|(class.matrix[,"WW2"]=="AT")))]<-3
grades[which((class.matrix[,"n.mut.tot"]==3)
&(rowSums(class.matrix[,mut.cols.1])==2)
&(rowSums(class.matrix[,mut.cols.2])==1)
&(class.matrix[,"spacer"]%in%c(1,2))
&((class.matrix[,"WW1"]=="AT")|(class.matrix[,"WW2"]=="AT")))]<-2
#-----------------------------------------------------
resume.matrix<-cbind(class.matrix,labels,grades)
if(length(which(resume.matrix[,"grades"]>0))>0){
resume.matrix.full<-resume.matrix[which(resume.matrix[,"grades"]>0),c("start","stop","spacer","n.mut.tot","sequence","mutations","WW1","WW2","labels","grades")]
}
}
pre.complete<-rbind(resume.matrix.full,resume.matrix.halves)
return(pre.complete)
}
|
bd79727e646a2fd1cf96e2b25666e85adf3b9732
|
1c7545bb3e9c165b8a630422480d086000047ab2
|
/historic/template/ui.R
|
2ee8b6b792fe8ce97c9069ec787487ed8016db0f
|
[] |
no_license
|
MCTTAN/virtulis
|
b4c20a3266f0065158ce6fcdd140d971207af7e5
|
c88ad5aa1921dc9598635e77add26baa6de2130a
|
refs/heads/master
| 2020-06-29T14:51:20.104027
| 2020-04-23T00:29:00
| 2020-04-23T00:29:00
| 200,564,063
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 943
|
r
|
ui.R
|
if(!require(leaflet)){
install.packages("leaflet")
library(leaflet)
}
library(leaflet)
months <- seq(1,12)
years <- seq(2000,2008)
HTML('<div data-iframe-height></div>')
navbarPage(
title="IBM Data Science Experience", id="nav",
tabPanel(
div(class="outer",
tags$head(
# Include our custom CSS
includeCSS("styles.css"),
includeScript("gomap.js")
),
leafletOutput("map", width="100%", height="100%"),
absolutePanel(id = "controls", class = "panel panel-default", fixed = TRUE,
draggable = TRUE, top = 60, left = "auto", right = 20, bottom = "auto",
width = 330, height = "auto",
h2("Choose year & month"),
selectInput("year", "Year", years),
selectInput("month", "Month", months)
),
tags$div(id="cite",'Data Scientists: Jorge Castañón, Oscar Lara, Shahab Sheikh, Jihyoung Kim',fontsize=20)
)
)
)
|
e67e5ac58b987d7eb36707f37fb2e72801c8ecff
|
f1556a59213e9dafb25db0d01760a1443c55b6b2
|
/models_old/LGBM_01/スコア.R
|
00964dfc267128d01a6ebe92c9b7b8a210240b30
|
[] |
no_license
|
you1025/probspace_youtube_view_count
|
0e53b0e6931a97b39f04d50a989a1c59522d56a7
|
f53d3acd6c4e5e6537f8236ad545d251278decaa
|
refs/heads/master
| 2022-11-13T13:22:51.736741
| 2020-07-12T04:14:35
| 2020-07-12T04:14:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,464
|
r
|
スコア.R
|
# train_rmse: xxxxxxxx, test_rmse: xxxxxxxx - xxx
# train_rmse: 1.024154, test_rmse: 1.076321 - categoryId + likes + dislikes + comment_count(ベースライン)
# train_rmse: 1.039356, test_rmse: 1.081406 - categoryId(自前 LabelEncoding)
# train_rmse: 1.024154, test_rmse: 1.076321 - comments_disabled(カテゴリ指定)
# train_rmse: 0.921678, test_rmse: 1.003369 - ↑comments_disabled(自前 LabelEncoding)
# train_rmse: 0.921678, test_rmse: 1.003369 - ratings_disabled(カテゴリ指定)
# train_rmse: 0.8779254,test_rmse: 0.9466283- ↑ratings_disabled(自前 LabelEncoding)
# めんどいからカテゴリ値は自前 Encoding で良いと思う
# train_rmse: 0.8549646, test_rmse: 0.938067 - ↑title_length
# train_rmse: 0.7769351, test_rmse: 0.8799019- ↑published_year
# train_rmse: 0.7235635, test_rmse: 0.8702272- ↑published_month
# train_rmse: 0.7252054, test_rmse: 0.8731753 - published_month_x + published_month_y
# train_rmse: 0.6992839, test_rmse: 0.8750406 - published_day
# train_rmse: 0.7186323, test_rmse: 0.8752473 - published_day_x + published_day_y
# train_rmse: 0.7098051, test_rmse: 0.8743768 - published_term_in_month
# train_rmse: 0.7132829, test_rmse: 0.8671304 - ↑published_dow
# train_rmse: 0.7034046, test_rmse: 0.8668063 - published_dow_x + published_dow_y
# train_rmse: 0.7004901, test_rmse: 0.8639794 - ↑published_hour
# train_rmse: 0.6792362, test_rmse: 0.8658031 - published_hour_x + published_hour_y
# train_rmse: 0.6357584, test_rmse: 0.8537153 - ↑channel_title_length
# train_rmse: 0.6419064, test_rmse: 0.8476512 - ↑flg_categoryId_low
# train_rmse: 0.6840687, test_rmse: 0.8508976 - flg_categoryId_high
# train_rmse: 0.6544945, test_rmse: 0.8519549 - flg_no_tags
# train_rmse: 0.6109983, test_rmse: 0.8458459 - ↑tag_characters
# train_rmse: 0.580868, test_rmse: 0.8393788 - ↑tag_count
# train_rmse: 0.5854865, test_rmse: 0.8422699 - flg_no_description
# train_rmse: 0.5939091, test_rmse: 0.8388593 - ↑description_length
# train_rmse: 0.5967388, test_rmse: 0.8393317 - flg_url
# train_rmse: 0.5630573, test_rmse: 0.8377533 - ↑url_count
# train_rmse: 0.5569464, test_rmse: 0.8354229 - ↑days_from_published
# train_rmse: 0.5755771, test_rmse: 0.8307188 - ↑diff_likes_dislikes
# train_rmse: 0.5674941, test_rmse: 0.8316972 - sum_likes_dislikes
# train_rmse: 0.5454238, test_rmse: 0.8310772 - ratio_likes
# train_rmse: 0.5354847, test_rmse: 0.8317923 - sum_likes_dislikes_comments
# train_rmse: 0.5597025, test_rmse: 0.8283579 - ↑ratio_comments_likedis
# train_rmse: 0.5323023, test_rmse: 0.8101541 - ↑flg_japanese
# train_rmse: 0.5761294, test_rmse: 0.8161116 - flg_emoji
# train_rmse: 0.5679533, test_rmse: 0.8122008 - flg_official
# train_rmse: 0.5789512, test_rmse: 0.81652 - flg_movie_number
# train_rmse: 0.5459787, test_rmse: 0.8195892 - published_hour2
# train_rmse: 0.5586361, test_rmse: 0.8104735 - published_hour2_x + published_hour2_y
# train_rmse: 0.550295, test_rmse: 0.8166714 - comments_ratings_disabled_japanese
# train_rmse: 0.4909489, test_rmse: 0.817313 - flg_comments_ratings_disabled_japanese_high
# train_rmse: 0.5347964, test_rmse: 0.8164009 - flg_comments_ratings_disabled_japanese_very_high
# train_rmse: 0.5027024, test_rmse: 0.814753 - flg_comments_ratings_disabled_japanese_low
# train_rmse: 0.5329838, test_rmse: 0.8175195 - flg_comments_ratings_disabled_japanese_very_low
# train_rmse: 0.5451223, test_rmse: 0.8095946 - ↑"mean" 全部のせ
# train_rmse: 0.5305789, test_rmse: 0.8080263 - ↑"median" 全部のせ
# train_rmse: 0.5461993, test_rmse: 0.8074823 - ↑"min" 全部のせ
# train_rmse: 0.5429794, test_rmse: 0.8121191 - "max" 全部のせ
# train_rmse: 0.5276186, test_rmse: 0.8096625 - "sd" 全部のせ
# train_rmse: 0.5523874, test_rmse: 0.8110684 - -flg_categoryId_high_*
# train_rmse: 0.5560334, test_rmse: 0.8069065 - ↑-flg_no_tags_*
# train_rmse: 0.55471, test_rmse: 0.8061451 - ↑-flg_no_description_*
# train_rmse: 0.5105766, test_rmse: 0.8112012 - -flg_url_*
# train_rmse: 0.5934399, test_rmse: 0.8087113 - -flg_emoji_*
# train_rmse: 0.5892704, test_rmse: 0.8096245 - -flg_official_*
# train_rmse: 0.5479506, test_rmse: 0.8109986 - -flg_movie_number_*
# train_rmse: 0.5256873, test_rmse: 0.808933 - -comments_ratings_disabled_japanese_*
# train_rmse: 0.5367527, test_rmse: 0.8127005 - min と max 入れ替え
# train_rmse: xxxxxxxxx, test_rmse: xxxxxxxxx - xxx
# train_rmse: xxxxxxxxx, test_rmse: xxxxxxxxx - xxx
|
7590cec06ddc16649932338c41344aa5ae041737
|
f101dadb65e8613751a8db176f23516d17b1bc09
|
/src/server/manhattan.R
|
d3e3cba5125af9df123f317aa5fcea0ae6cc3809
|
[] |
no_license
|
Raistrawby/Rshiny_project
|
017b1c3b7319c9ac309b074b60953d9e0a6afc0b
|
42fbb9c822bc960dc4af32d3d39a4ad7760ba174
|
refs/heads/main
| 2023-05-23T01:59:47.217100
| 2021-06-09T09:31:48
| 2021-06-09T09:31:48
| 339,028,705
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,105
|
r
|
manhattan.R
|
library(tidyverse)
# load_data <- function() {
# geneExpression = readFile("", T, "SYMBOL", org.Hs.eg.db)
# geneList <- get_geneList(geneExpression, 0.75)
#
# go_gse <- gse_analysis(geneList, "SYMBOL")
# go_sea <- sea_analysis(geneList, "SYMBOL")
#
# KEGG_GSEA <- get_KEGG_GSEA(geneList$GSEA, "hsa")
# KEGG_SEA <- get_SEA_KEGG(geneList$SEA, "hsa")
#
# interpro_db <- get_interpro_db(names(geneList$GSEA), "hsa")
# interpro_gsea <- get_interpro_gsea(geneList$GSEA, interpro_db)
# interpro_sea <- get_interpro_sea(names(geneList$SEA), interpro_db)
#
# return(
# list(
# "go_gse" = go_gse,
# "go_sea" = go_sea,
# "KEGG_GSEA" = KEGG_GSEA,
# "KEGG_SEA" = KEGG_SEA,
# "interpro_gsea" = interpro_gsea,
# "interpro_sea" = interpro_sea
# )
# )
# }
create_small_df <- function(result_obj, source, analysis) {
filtered_df = data.frame(result_obj@result$pvalue, source, analysis)
colnames(filtered_df) = c("p.adjust", "source", "analysis")
return(filtered_df)
}
create_all_set_df <- function(data) {
small_go_sea = create_small_df(data$go_sea, "GO", "SEA")
small_go_gsea = create_small_df(data$go_gse, "GO", "GSEA")
small_kegg_sea = create_small_df(data$KEGG_SEA, "KEGG", "SEA")
small_kegg_gsea = create_small_df(data$KEGG_GSEA, "KEGG", "GSEA")
small_ip_sea = create_small_df(data$interpro_sea, "INTERPRO", "SEA")
small_ip_gsea = create_small_df(data$interpro_gsea, "INTERPRO", "GSEA")
rbind(
small_go_sea,
small_go_gsea,
small_kegg_sea,
small_kegg_gsea,
small_ip_sea,
small_ip_gsea
)
}
getManatthanPlot <- function(go_gse, go_sea, KEGG_GSEA, KEGG_SEA, interpro_gsea, interpro_sea){
data = list(
"go_gse" = go_gse,
"go_sea" = go_sea,
"KEGG_GSEA" = KEGG_GSEA,
"KEGG_SEA" = KEGG_SEA,
"interpro_gsea" = interpro_gsea,
"interpro_sea" = interpro_sea
)
test = create_all_set_df(data)
test$position = seq.int(nrow(test))
test %>%
group_by(source, analysis) %>%
mutate(position = sample(position)) %>%
mutate(test = paste(source, analysis)) %>%
ggplot(aes(
x = position,
y = -log(p.adjust),
color = paste(source, analysis)
)) +
geom_point(alpha = 0.7) +
facet_grid(. ~ test,
scales = "free_x",
space = "free_x",
switch = "x") +
scale_y_continuous(limits = c(0, 20), expand = c(0, 0)) +
labs(fill = "Source and analysis type") +
theme_bw() +
geom_hline(yintercept = -log(0.05), linetype = "dashed") +
theme(
axis.title.x = element_blank(),
axis.text.x = element_blank(),
axis.ticks.x = element_blank(),
panel.grid = element_blank(),
panel.grid.major = element_blank(),
axis.line.y = element_blank(),
panel.grid.minor = element_blank(),
strip.background = element_blank(),
panel.border = element_rect(fill = NA),
strip.text = element_text(face = "bold"),
strip.text.x = element_text(angle = 90, hjust = 1),
legend.position = "none"
)
}
|
57231407f55ec7f5a76cd24bf0e259acfa5c6bc8
|
c0856a1759cd37e8537bc67762ff962d90b47ee9
|
/server.R
|
36a1b967f8fcf28b316acf14a6e7d2b155786f49
|
[] |
no_license
|
OmidAghababaei/Developing_Data_Products_Project
|
9cdcdfebe7db2782557eda946ed20e57a3eaae52
|
180ef6c46e6f739025f0538ebf0fcb5060f5435b
|
refs/heads/main
| 2023-03-05T01:21:03.914473
| 2021-02-14T05:21:37
| 2021-02-14T05:21:37
| 338,712,847
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,339
|
r
|
server.R
|
library(shiny)
library(ggplot2)
library(grid)
library(gridExtra)
library(plotly)
shinyServer(
function(input, output) {
Solar<- airquality$Solar.R
Ozone<-airquality$Ozone
Temperature<-airquality$Temp
model1 <- lm(Ozone ~ Solar.R, data = airquality)
model2 <- lm(Temp ~ Solar.R, data = airquality)
model3 <- lm(Ozone ~ Temp, data = airquality)
##### Model 1
model1pred <- reactive({
SolarInput <- input$SliderSolar1
predict(model1, newdata = data.frame(Solar.R = SolarInput))
})
output$pred1 <- renderText({
model1pred()
})
output$plot1 <- renderPlotly({
SolarInput <- input$SliderSolar1
minS = min(airquality[complete.cases(airquality),]$Solar.R) - 5
maxS = max(airquality[complete.cases(airquality),]$Solar.R) + 5
minO = min(airquality[complete.cases(airquality),]$Ozone) - 5
maxO = max(airquality[complete.cases(airquality),]$Ozone) + 5
g1 <- ggplot(airquality, aes( y = Ozone, x = Solar,
ylab = "Ozone",xlab = "Solar.R"), bty = "n", pch = 16,
xlim = c(minS, maxS), ylim = c(minO, maxO)) + geom_point()
if(input$showModel1){
g1 <- g1 + stat_smooth(method = 'lm', color = "red", lwd = 2)
}
g1 <- g1 + ggtitle("Ozon Prediction with Solar predictor")
g1 <- g1 + geom_point(x = SolarInput, y = model1pred(), col = "Green", pch = 16, cex = 6)
ggplotly(g1)
})
### Model 2
model2pred <- reactive({
SolarInput <- input$SliderSolar2
predict(model2, newdata = data.frame(Solar.R = SolarInput))
})
output$pred2 <- renderText({
model2pred()
})
output$plot2 <- renderPlotly({
SolarInput <- input$SliderSolar2
minT = min(airquality[complete.cases(airquality),]$Temp) - 5
maxT = max(airquality[complete.cases(airquality),]$Temp) + 5
minS = min(airquality[complete.cases(airquality),]$Solar.R) - 5
maxS = max(airquality[complete.cases(airquality),]$Solar.R) + 5
g2 <- ggplot(airquality, aes( y = Temperature, x = Solar, ylab = "Ozone",
xlab = "Solar.R"), bty = "n", pch = 16,
xlim = c(minS, maxS), ylim = c(minO, maxO)) + geom_point()
if(input$showModel2){
g2 <- g2 + stat_smooth(method = 'lm', color = "Green", lwd = 2)
}
g2 <- g2+ ggtitle("Temprature Prediction with Solar Predictor")
g2 <- g2 + geom_point(x = SolarInput, y = model2pred(), position = "identity", col = "blue", pch = 16, cex = 6)
ggplotly(g2)
})
##### Model 3
model3pred <- reactive({
TempInput <- input$SliderTemp
predict(model3, newdata = data.frame(Temp = TempInput))
})
output$pred3 <- renderText({
model3pred()
})
output$plot3 <- renderPlotly({
TempInput <- input$SliderTemp
minT = min(airquality[complete.cases(airquality),]$Temp) - 5
maxT = max(airquality[complete.cases(airquality),]$Temp) + 5
minO = min(airquality[complete.cases(airquality),]$Ozone) - 5
maxO = max(airquality[complete.cases(airquality),]$Ozone) + 5
g3 <- ggplot(airquality, aes( y = Ozone, x = Temperature, ylab = "Ozone",
xlab = "Temp"), bty = "n", pch = 16,
xlim = c(minT, maxT), ylim = c(minO, maxO)) + geom_point()
if(input$showModel3){
g3 <- g3 + stat_smooth(method = 'lm', color = "blue", lwd = 2)
}
g3 <- g3 + ggtitle("Ozone Prediction with Temperature Predictor")
g3 <- g3 + geom_point(x = TempInput, y = model3pred(), position = "identity", col = "Yellow", pch = 16, cex = 6)
ggplotly(g3)
})
})
|
34b58babcedac6540d7a07bcc924b7d2e7a6160d
|
1544494905a24c6f85b42438668e9bfb5a5bf5bd
|
/data/my trait data.R
|
b5a7a073d5c28091a23a8657f3afd53aed37ddc3
|
[] |
no_license
|
maudbv/Abundance-richness-correlation-BP
|
42b96e0a094ce7eb5f6c6bb6b53c8ddf3e52c862
|
6ef20818fc08866e25a574324cf5ec00f39c3089
|
refs/heads/master
| 2021-06-22T16:34:33.407196
| 2020-12-18T12:43:43
| 2020-12-18T12:43:43
| 30,317,336
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,519
|
r
|
my trait data.R
|
# Import trait data from Banks Peninsula (from summer 2014/2015)
library(doBy)
# import Seed mass data (in g)
mySM <- read.csv(file="data/traits/SM measurements.csv",na.str=c("","NA"), as.is=T, stringsAsFactor=F)
mySM.mean <- summaryBy(SM.mg. ~ Sp.code, data= mySM, na.rm= T)
# import height data (in cm)
myH<- read.csv(file="data/traits/Height measurements.csv",na.str=c("","NA"), as.is=T, stringsAsFactor=F)
myH$Hmax = apply(cbind(myH$Hrep,myH$Hveg), 1, FUN = max, na.rm=T)
myH$Hmax [myH$Hmax== -Inf] = NA
myH.mean <- summaryBy(Hveg + Hrep + Hmax ~ spcode, data= myH, na.rm= T)
## leaf traits:
# import leaf area photo numbers
leaf.pictures<- read.csv(file="data/traits/leaf area pictures.csv", sep = c(","),
na.str=c("","NA"), as.is=T, stringsAsFactor=F)
leaf.pictures$nb.leaves.num <- as.numeric(leaf.pictures$nb.leaves.num)
#import leaf mass data
leaf.mass <- read.csv(file="data/traits/leaf mass measurements.csv",na.str=c("","NA"), sep = c(","), as.is=T, stringsAsFactor=F)
# import leaf area measurements from ImageJ
files <- paste("data/traits/measures",dir(path = "data/traits/measures"), sep ="/")
area.raw <- do.call("rbind", lapply(files,
function(x) read.delim(x, header = T,sep="\t",stringsAsFactors = FALSE)))
write.csv(area.raw,"area.raw.csv")
area.sum <- summaryBy(Area~ Label,area.raw, FUN = sum)
# match leaf pictures with leaf area measurements
areas <-sapply(leaf.pictures$photo. , FUN = function(x) {
ind <- grep(x, area.sum$Label)
area <- NA
# if the number is also asn iphone picture
if (length(ind) >1 & length(grep("IMG", area.sum$Label[ind])) ==1 ) ind <- ind[-grep("IMG", area.sum$Label[ind])]
# if the number exists also as a picture from Mica's camera
if (length(ind) >1 & length(grep("DSC", area.sum$Label[ind])) ==1 ) ind <- ind[-grep("DSC", area.sum$Label[ind])]
# if we have a normal unique index of the picture, extract the total area:
if (length(ind) ==1) area <- area.sum$Area.sum[ind]
# if the number has some strange measured shapes added by ImageJ => sum the areas
if (length(ind)>1 & length(grep(":", area.sum$Label[ind])) ==(length(ind) -1) ) {
area <- sum(area.sum$Area.sum[ind])
ind <-NA
}
# if the number has some strange measured shapes added by ImageJ => sum the areas
if (length(ind)>1 & length(grep("_alternate", area.sum$Label[ind])) == 1) {
area <- sum(area.sum$Area.sum[ind])
ind <-NA
}
# if the number has some strange measured shapes added by ImageJ => sum the areas
if (length(ind)>1 & length(grep("-", area.sum$Label[ind])) == 1) {
area <- sum(area.sum$Area.sum[ind])
ind <-NA
}
#if non of the above is true :
if (length(ind)>1) warning(paste(length(ind), "measures for", x))
return(area)
})
leaf.pictures$area.total <- areas[match(leaf.pictures$photo., names(areas))]
write.csv(leaf.pictures, file = "leaf.pictures.csv")
# SUM area and mass per subrep (sum the subpictures)
subrep_area<- summaryBy(nb.leaves.num + area.total ~ spcode +obs + rep + subrep ,id= ~ name + type ,leaf.pictures, FUN = sum, na.rm =F)
subrep_mass<- summaryBy(as.numeric(dry.mass..mg.)~ name + obs + rep + subrep, leaf.mass, FUN = sum )
# merge leaf mass with leaf pictures
mySLA <- merge(subrep_area,subrep_mass, by.x=c("obs","rep","subrep", "name"),by.y=c("obs","rep","subrep", "name"))
mySLA <- orderBy(~ name + obs + rep + subrep, mySLA)
names(mySLA) <- c("obs","rep","subrep", "name","spcode", "nb.leaves","area.total","type", "dry.mass")
# merge large leaves that are spread over different subreps (ferns in particular)
# obs numbers :
# 22, 23, 93 Pteridium esculentum
# 66 Polystichum vestitum
# 10, 11, 9, 8, 7 Polystichum oculatum
obs_large_leaves <- c(22, 23,93, 66,10,11,9,8,7)
tmp <- summaryBy(.~ name + obs, mySLA[mySLA$obs %in% obs_large_leaves,], id =~ spcode + rep + subrep + type, FUN = sum)
names(tmp) <- c("name","obs", "nb.leaves","area.total", "dry.mass","spcode","rep","subrep","type")
mySLA <- mySLA[- which(mySLA$obs %in% obs_large_leaves),]
mySLA <- rbind(mySLA, tmp[,names(mySLA)])
### Remove erroneous data :
# Galium propinquum
mySLA <- mySLA[-which(mySLA$spcode == "GALPRO"),]
# Asplenium hookerianum : keep only rounded leaf samples
mySLA <- mySLA [-grep("elongated", mySLA $type),]
# Carmichaelia australis : keep only photosynthetic stems, not the small leaves
mySLA <-mySLA[-which(mySLA$spcode == "CARAUS" & mySLA$type == "leaves"),]
# Calculate SLA and LA
mySLA$leaf.area <- mySLA$area.total/mySLA$nb.leaves # leaf area in mm2
mySLA$leaf.dry.mass <- mySLA$dry.mass/mySLA$nb.leaves # dry mass in mg
mySLA$sla <- mySLA$area.total/mySLA$dry.mass # sla in mm2/mg == m2/Kg
# Calculate mean leaf traits per species
mySLA.mean <- summaryBy(leaf.area + leaf.dry.mass + sla ~ name + spcode ,data= mySLA, FUN = c(mean, sd), na.rm=T)
mySLA.mean$nreps <- summaryBy(obs ~ name + spcode ,data= mySLA, FUN = length) [,3]
mySLA.mean$nleaves <- summaryBy(nb.leaves ~ name + spcode ,data= mySLA, FUN = sum, na.rm=T) [,3]
### merge mean traits into one dataframe
mytraits <- merge(mySM.mean, myH.mean, by.x="Sp.code", by.y="spcode", all=T)
mytraits$logHmax <- log(mytraits$Hmax.mean)
mytraits$logSM <- log(mytraits$SM.mg..mean)
names(mytraits) = c("Sp.code" ,"SM","Hveg","Hrep","Hmax","logHmax","logSM")
mytraits$SLA <- mySLA.mean[match(mytraits$Sp.code, mySLA.mean$spcode), "sla.mean"]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.