blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 327 | content_id stringlengths 40 40 | detected_licenses listlengths 0 91 | license_type stringclasses 2 values | repo_name stringlengths 5 134 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 46 values | visit_date timestamp[us]date 2016-08-02 22:44:29 2023-09-06 08:39:28 | revision_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | committer_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | github_id int64 19.4k 671M ⌀ | star_events_count int64 0 40k | fork_events_count int64 0 32.4k | gha_license_id stringclasses 14 values | gha_event_created_at timestamp[us]date 2012-06-21 16:39:19 2023-09-14 21:52:42 ⌀ | gha_created_at timestamp[us]date 2008-05-25 01:21:32 2023-06-28 13:19:12 ⌀ | gha_language stringclasses 60 values | src_encoding stringclasses 24 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 7 9.18M | extension stringclasses 20 values | filename stringlengths 1 141 | content stringlengths 7 9.18M |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6b4246edf59f096db1bf7e1b2966b40fa12de2a8 | fd1dcaa81dc2344f7f63dc79ecd6451b5adbc973 | /lab5/l5p2.R | c472a6506f25cfdc6e8a64d867b8abf41d21cb29 | [] | no_license | Srinjoy-Santra/Advanced-Programming-Lab | b7ba6cdcdeb7f67c4073019c547dd27820e4ba52 | cbb99b60876af031ff5702cedd6cf43fdee57b2c | refs/heads/master | 2021-10-25T11:48:46.264588 | 2019-04-04T01:57:14 | 2019-04-04T01:57:14 | 170,554,562 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 231 | r | l5p2.R | #war to extract substring of 6 characters from given string and replace that substring with "Odisha"
linkin="Linkin Park is an American rock band from Agoura Hills"
part=substr(linkin,19,25)
print(sub(part,"Odisha",linkin))
|
ca0c4a0e56f4af0b5c595b6ec9e7507ae2a56309 | a040bdcfb00ebedfba5e35a463d16d43c9569387 | /information-retrieval-bow/test_information_retrieval_bow.R | 2e9ec15d95d1db3d3a91a828a5bf6d17a0e9eedf | [] | no_license | mpudil/projects | 6a9ab02668be9ad6f5e0c4e9690026c9e41baa8f | b9795489011068a262e3e24b76fa0cc482eb7210 | refs/heads/master | 2022-07-13T09:45:24.974556 | 2021-01-29T18:52:41 | 2021-01-29T18:52:41 | 158,999,291 | 1 | 1 | null | 2022-06-22T03:06:53 | 2018-11-25T04:58:22 | Jupyter Notebook | UTF-8 | R | false | false | 7,506 | r | test_information_retrieval_bow.R | #!/usr/bin/env Rscript
## Rscript test_information_retrieval_bow.R
library(testthat)
library(here)
source("information_retrieval_bow.R")
artfiles <- paste0("Data/nyt-collection-text/art/",
list.files(here("Data", "nyt-collection-text", "art")))
musicfiles <- paste0("Data/nyt-collection-text/music/",
list.files(here("Data", "nyt-collection-text", "music")))
punctuation <- paste0("Data/Punctuation/", list.files(here("Data", "Punctuation")))
# Vignette pt 1 tests -----------------------------------------------------
# Normal cases (with punctuation)
foo <- "I like %$@to*&, chew;: gum, but don't like|}{[] bubble@#^)( gum!?"
test_that("bag of words counts tokens", {
bow <- tokenize(foo) %>% makeBoW
gum_indx <- which(names(bow)=="gum")
like_indx <- which(names(bow)=="gum")
expect_equal(bow[gum_indx], bow[like_indx])
})
# Edge cases
bar <- "foo"
test_that("tokenize function keeps character length 1 the same", {
expect_equal(tokenize(bar), c("foo"))
})
test_that("bag of words for character length 1 returns token as name with count of 1", {
bow <- makeBoW(tokenize(bar), "foo")
expect_equal(which(colnames(bow)=="foo"), 1)
expect_equal(bow[1],1)
})
test_that("makeBoW leaves out words in lexicon but not in tokenlist", {
lex <- tokenize(foo) %>% unique
bow_superman <- makeBoW(tokenize(foo), c(lex, "superman"))
bow_reg <- makeBoW(tokenize(foo))
expect_equal(ncol(bow_superman), ncol(bow_reg))
})
test_that("makeBoW can handle punctuation", {
p <- makeBoW(tokenize(punctuation[1]))
expect_equal(colnames(p), c("and", "cook", "do", "eating", "enjoy", "food",
"i", "it", "like", "make", "to", "too", "you"))
expect_equal(as.vector(p), c(2, 1, 1, 1, 1, 1, 1, 1, 2, 1, 2, 1, 1))
p2 <- makeBoW(tokenize(punctuation[2]))
expect_equal(colnames(p2), c("are", "both", "can", "cook", "do", "good", "hamburgers", "i", "like",
"make", "or", "pizza", "really", "to", "yes", "you"))
expect_equal(as.vector(p2), c(1, 1, 1, 2, 1, 1, 1, 2, 2, 1, 1, 1, 1, 2, 1, 1))
p3 <- makeBoW(tokenize(punctuation[3]))
expect_equal(colnames(p3), c("and", "classs", "cook", "does", "food", "likes", "mom", "my", "she", "to",
"took", "well"))
expect_equal(as.vector(p3), c(1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1))
})
# Vignette pt 2 tests -----------------------------------------------------
test_that("No duplicate words and no missing words (even with punctuation", {
pac <- analyzeCorpus(punctuation)
expect_equal(colnames(pac), c("and", "cook", "do", "eating", "enjoy", "food", "i", "it", "like",
"make", "to", "too", "you", "are", "both", "can", "good", "hamburgers",
"or", "pizza", "really", "yes", "classs", "does", "likes", "mom", "my",
"she", "took", "well"))
})
test_that("Counts are correct in analyzeCorpus", {
pac <- analyzeCorpus(punctuation)
expect_equal(pac$food == c(1,0,1), rep(TRUE, 3))
expect_equal(pac$i, c(1,2,0))
expect_equal(pac$enjoy, c(1,0,0))
expect_equal(dim(pac), c(3,30))
})
test_that("rownames are basenames of the file without the extension", {
expect_equal(rownames(analyzeCorpus(musicfiles[1:3])), c("0023931.txt", "0075170.txt", "0170797.txt"))
})
test_that("No missing values or non-numeric or negative values in dataframe", {
# Note: Converting to numeric via as.numeric(as.character(.)) will convert non-numbers to NA.
expect_equal(analyzeCorpus(musicfiles)[1,] %>% as.character %>% as.numeric %>% is.na %>% sum, 0)
})
test_that("analyzeCorpus produces appropriate error when an illigitimate filename is used as input", {
expect_error(analyzeCorpus(c("doesnotexist.txt", "notafile.txt")))
# And even when only one of the filenames does not exist
expect_error(analyzeCorpus(c(artfiles[1], "doesnotexist.txt")))
})
# Vignette #3 Tests
## To run the tests, just run
## Rscript test_information_retrieval_bow.R
library(testthat)
source("information_retrieval_bow.R")
# Normal Cases ------------------------------------------------------------
mat <- matrix(nrow=3, ncol=3, data=c(1:9))
test_that("distMatrix function reports accurate euclidean distance between 2 vectors", {
expect_equal(round(distMatrix(mat),2), matrix(nrow=3, ncol=3, byrow=T, data=c(0, 1.73, 3.46,
1.73, 0, 1.73,
3.46, 1.73, 0)))
expect_equal(distMatrix(mat, metric="max"), matrix(nrow=3, ncol=3, byrow=T, data=c(0, 1, 2,
1, 0, 1,
2, 1, 0)))
})
test_that("Diagonals are 0", {
expect_equal(diag(distMatrix(mat)), rep(0,3))
expect_equal(diag(distMatrix(mat, metric="max")), rep(0,3))
})
test_that("Matrices are symmetric",{
expect_true(isSymmetric.matrix(distMatrix(mat)))
expect_true(isSymmetric.matrix(distMatrix(mat, metric="max")))
})
# Edge Cases --------------------------------------------------------------
test_that("Distance Matrix of Zero matrix is a Zero matrix", {
zeromat <- matrix(nrow=3, ncol=3, data=rep(0, 9))
expect_equal(distMatrix(zeromat), zeromat)
expect_equal(distMatrix(zeromat, metric = "max"), zeromat)
})
test_that("Small matrices work", {
smallmat <- matrix(nrow=2, ncol=1, data=c(1,2))
expect_equal(distMatrix(smallmat),
matrix(nrow=2, ncol=2, byrow=T, data=c(0,1,1,0)))
expect_equal(distMatrix(smallmat, metric = "max"),
matrix(nrow=2, ncol=2, byrow=T, data=c(0,1,1,0)))
})
test_that("Negative numbers throw an error", {
negmatrix <- matrix(nrow=2, ncol=2, data=c(-4, -3, -1, 3))
expect_error(distMatrix(negmatrix))
})
# Vignette pt 4 Tests -----------------------------------------------------
punct1 <- as.data.frame(makeBoW(tokenize(punctuation[1])))
rownames(punct1) <- basename(punctuation[1])
punct23 <- analyzeCorpus(punctuation[2:3])
music110 <- analyzeCorpus(musicfiles[1:10])
music1 <- makeBoW(tokenize(musicfiles[1])) %>% data.frame
rownames(music1) <- musicfiles[1]
music210 <- analyzeCorpus(musicfiles[2:10])
test_that("Inputs are valid", {
expect_error(IrSearch(punct1, punct23, k=0))
expect_error(IrSearch(punct1, punct23, k=10))
expect_error(IrSearch(punct1, punct23, method = "invalid_method"))
expect_error(IrSearch(punct1, c(1:ncol(punct1))))
})
test_that("Correct searches from punct files", {
expect_equal(IrSearch(punct1, punct23), "punct3.txt")
expect_equal(IrSearch(punct1, punct23, method = "maximum"), "punct2.txt")
expect_equal(IrSearch(punct1, punct23, k=2), c("punct3.txt", "punct2.txt"))
})
test_that("Doesn't matter if doc_bow is a row in corp_bow", {
expect_equal(IrSearch(music1, music110), IrSearch(music1, music210))
expect_true(all(IrSearch(music1, music110, k=2) == IrSearch(music1, music210, k=2)))
m1 <- IrSearch(music1, music110, k=2, method="maximum")
m2 <- IrSearch(music1, music210, k=2, method="maximum")
expect_true(all(m1 == m2))
}) |
65b4e83d7fbdbdc9b2dc7137bf52d9c4e16397e3 | a1bc3aeb3b1326408ae14864ba84fc44529705f0 | /R_Programming/Prog_Assign_4/ProramminAssignment3.R | a2bda78022cdf1aedc5474d144764d810333c8e6 | [] | no_license | vrindaprabhu/DataScience_Johns_Hopkins | fa1e7d28d11a0bea11626848561676b778e4161d | 0da47e3c9bbc86e5bbce1adbd2c9d0904beda2a6 | refs/heads/master | 2021-01-22T03:22:26.802940 | 2016-05-01T17:40:38 | 2016-05-01T17:40:38 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,055 | r | ProramminAssignment3.R | # http://dr-k-lo.blogspot.in/2013/11/in-in-r-underused-in-operator.html
trim <- function (x) gsub("^\\s+|\\s+$", "", x)
best <- function(state, outcome) {
## Read outcome data
outcome.data <- read.csv("outcome-of-care-measures.csv",na.strings = "Not Available",stringsAsFactors = F)
outcome.list <- list(11, 17, 23)
names(outcome.list) <- c('heart attack', 'heart failure', 'pneumonia')
check.state <- unique(outcome.data$State)
## Check that state and outcome are valid
if(!(outcome %in% names(outcome.list))) {
stop('invalid outcome')
}
if(!(state %in% check.state)) {
stop('invalid state')
}
## Return hospital name in that state with lowest 30-day death
data.subset <- outcome.data[(outcome.data[,'State'] == state),c(2,outcome.list[[outcome]])]
indices <- which(data.subset[,2] == min(as.double(data.subset[,2]),na.rm = T),arr.ind = T)
data.subset[indices,"Hospital.Name"]
}
rankhospital <- function(state, outcome, num = "best") {
## Read outcome data
outcome.data <- read.csv("outcome-of-care-measures.csv",na.strings = "Not Available",stringsAsFactors = F)
outcome.list <- list(11, 17, 23)
names(outcome.list) <- c('heart attack', 'heart failure', 'pneumonia')
check.state <- unique(outcome.data$State)
## Check that state and outcome are valid
if(!(outcome %in% names(outcome.list))) {
stop('invalid outcome')
}
if(!(state %in% check.state)) {
stop('invalid state')
}
## Return hospital name in that state with the given rank ## 30-day death rate
data.subset <- outcome.data[(outcome.data[,'State'] == state),c(2,outcome.list[[outcome]])]
rank.data <- data.subset[order(data.subset[,2],data.subset[,'Hospital.Name']),]
if(num == 'best'){
indices <- which(rank.data[,2] == min(as.double(rank.data[,2]),na.rm = T),arr.ind = T)
return(rank.data[indices,"Hospital.Name"])
}
if(num == 'worst'){
indices <- which(rank.data[,2] == max(as.double(rank.data[,2]),na.rm = T),arr.ind = T)
return(rank.data[indices,"Hospital.Name"])
}
rank.data[num,"Hospital.Name"]
}
rankall <- function(outcome, num = "best") {
## Read outcome data
outcome.data <- read.csv("outcome-of-care-measures.csv",na.strings = "Not Available",stringsAsFactors = F)
outcome.list <- list(11, 17, 23)
names(outcome.list) <- c('heart attack', 'heart failure', 'pneumonia')
check.state <- unique(outcome.data$State)
## Check that outcome is valid
if(!(outcome %in% names(outcome.list))) {
stop('invalid outcome')
}
## Return hospital name in that state with the given rank ## 30-day death rate
data.subset <- outcome.data[order(outcome.data[,outcome.list[[outcome]]],outcome.data[,'Hospital.Name']),
c(2,outcome.list[[outcome]],grep("State", colnames(outcome.data)))]
rank.data <- split(data.subset,data.subset$State )
if(num == 'best') {
hospital.matrix <- sapply(check.state,function(x){
indices <- which(rank.data[[x]][,2] == min(as.double(rank.data[[x]][,2]),na.rm = T),arr.ind = T)
hospital.name <- rank.data[[x]][indices,c("Hospital.Name","State")]
hospital.name[order(hospital.name[,1])[1],]
})
rownames(hospital.matrix) <- c('hospital','state')
return(as.data.frame(t(hospital.matrix)))
}
if(num == 'worst') {
hospital.matrix <- sapply(check.state,function(x){
indices <- which(rank.data[[x]][,2] == max(as.double(rank.data[[x]][,2]),na.rm = T),arr.ind = T)
hospital.name <- rank.data[[x]][indices,c("Hospital.Name","State")]
hospital.name[order(hospital.name[,1])[1],]
})
rownames(hospital.matrix) <- c('hospital','state')
return(as.data.frame(t(hospital.matrix)))
}
hospital.matrix <- sapply(check.state,function(x){c(rank.data[[x]][num,c("Hospital.Name")],x)})
rownames(hospital.matrix) <- c('hospital','state')
return(as.data.frame(t(hospital.matrix)))
}
|
966c9f6483eec0bf44493524efbfdc12e66fc699 | e370cb059339a541ae1b0b4a649eb5e43acf0609 | /sparseSingleCell.R | abd2fe055d94db1f0f497193b6a48c6dc2269e2e | [] | no_license | pmb59/sparseSingleCell | fa6fb9d8a9314a9fe625b56e5e4da3320ebf040b | 0937e0e622cdccf0bf2787b879857d80dccf2464 | refs/heads/master | 2023-04-13T16:20:38.787059 | 2023-04-01T18:21:52 | 2023-04-01T18:21:52 | 161,413,482 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,109 | r | sparseSingleCell.R | library(fdapace)
library(data.table)
# fgf4 (mm10), '+' strand
chr <- 7
start <- 144861386
end <- 144865243
EXT <- 500
# read chormatin accessibility scNMT-seq data
files <- list.files(
path = ".", pattern = "acc_processed.tsv", all.files = FALSE,
full.names = FALSE, recursive = FALSE,
ignore.case = FALSE, include.dirs = FALSE, no.. = FALSE
)
length(files)
#---------------------------------
# Prepare Input lists for FPCA
#---------------------------------
Ly <- list()
Lt <- list()
counter <- 0
for (i in 1:length(files)) {
c1 <- fread(files[i], head = FALSE)
c1f <- c1[which(c1$V1 == chr & c1$V2 >= start - EXT & c1$V2 <= start + EXT), ]
if (length(c1f$V3) > 0) {
counter <- counter + 1
Ly[[counter]] <- c1f$V3
Lt[[counter]] <- c1f$V2
}
rm(c1, c1f)
}
# Number of cells with at least one GpC value
length(Ly)
# Proportion of cells with data
100 * (length(Ly) / length(files))
# Each vector in t should be in ascending order in fdapace
Ly_sorted <- list()
Lt_sorted <- list()
ID <- list()
for (j in 1:length(Lt)) {
temp <- sort(Lt[[j]], index.return = TRUE, decreasing = FALSE)$ix
Lt_sorted[[j]] <- Lt[[j]][temp]
Ly_sorted[[j]] <- Ly[[j]][temp]
rm(temp)
}
#---------------------------------
# fdapace
#---------------------------------
pace <- FPCA(Ly = Ly_sorted, Lt = Lt_sorted, optns = list(maxK = 30, nRegGrid = 100, plot = TRUE, outPercent = c(0.06, 1)))
pdf("Fig1_design_plot.pdf", height = 6, width = 6)
CreateDesignPlot(Lt_sorted, obsGrid = NULL, isColorPlot = TRUE, noDiagonal = TRUE, addLegend = TRUE)
dev.off()
library(wesanderson)
cellcolor <- wes_palette("BottleRocket2", n = 5, type = "discrete")
pdf("Fig2.pdf", height = 5, width = 7)
par(mfrow = c(2, 3))
par(mar = c(5, 4, 2, 1))
for (i in 2:5) {
CreatePathPlot(pace, K = 9, subset = i, main = "", pch = 16, showMean = FALSE, col = cellcolor[i - 1], xlab = "chr7", ylab = "GpC accessibility", main = paste("cell ", i - 1))
}
CreateScreePlot(pace)
CreateFuncBoxPlot(pace, xlab = "chr7", ylab = "GpC accessibility", main = "Functional box-plot", optns = list(variant = "pointwise"))
dev.off()
|
3ded7a524b3390bc52ee68db55db5cc3ec24b2a2 | 2b3cbc05953d0502cfd03db9cc8818ceff5783c2 | /80bb2a25-ac5d-47d0-abfc-b3f3811f0936/R/Temp/aV8MoC1aZQAmo.R | 43cfc893c876764546a0a4c729d0b2e7cd87a1f4 | [] | no_license | ayanmanna8/test | 89124aa702fba93a0b6a02dbe6914e9bc41c0d60 | 4f49ec6cc86d2b3d981940a39e07c0aeae064559 | refs/heads/master | 2023-03-11T19:23:17.704838 | 2021-02-22T18:46:13 | 2021-02-22T18:46:13 | 341,302,242 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 850 | r | aV8MoC1aZQAmo.R | with(a4155cdb4d45441a8925ebefb1984cbf9, {ROOT <- 'D:/SEMOSS_v4.0.0_x64/SEMOSS_v4.0.0_x64/semosshome/db/Atadata2__3b3e4a3b-d382-4e98-9950-9b4e8b308c1c/version/80bb2a25-ac5d-47d0-abfc-b3f3811f0936';source("D:/SEMOSS_v4.0.0_x64/SEMOSS_v4.0.0_x64/semosshome/R/Recommendations/advanced_federation_blend.r");a2Hrpdwy3col1<- as.character(FRAME878836$location);linka6mzFw <- data.table("col1"=c("null"), "col2"=c("null")); linka6mzFw <- unique(linka6mzFw);asJKqNdie<- curate(a2Hrpdwy3col1,linka6mzFw);asJKqNdie <- as.data.table(asJKqNdie);names(asJKqNdie)<-"aEje4KxyL";FRAME878836 <- cbind(FRAME878836,asJKqNdie);FRAME878836 <- FRAME878836[,-c("location")];colnames(FRAME878836)[colnames(FRAME878836)=="aEje4KxyL"] <- "location";rm(asJKqNdie,linka6mzFw,a2Hrpdwy3col1,a2Hrpdwy3, best_match, best_match_nonzero, best_match_zero, blend, curate, self_match );}); |
b90e9591ebebdf96ec7a3fb9ee30e3cf999ffa21 | e1986ad57cf85a086abb699dcb1a0ae23dd54be7 | /inst/examples/data/linreg/example_bptest.R | c003d42068d3396c78e478aaca8d9800a5ed5a97 | [] | no_license | Kale14/mmstat4 | 4fb108216f768bc404a7f353621f4f129258ba0a | 5ee81b9f5452e043b3a43708801997c72af3cda2 | refs/heads/main | 2023-03-29T22:25:45.841324 | 2021-04-07T09:15:41 | 2021-04-07T09:15:41 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 264 | r | example_bptest.R | library("rio")
x <- import("https://shinyapps.wiwi.hu-berlin.de/d/StaedteMietenR.sav")
x <- x[complete.cases(x),]
lm <- lm (Miete~Fläche, data=x)
summary(lm)
plot(x$Fläche, residuals(lm))
abline(h=0, col="red")
#
library("lmtest")
bptest(Miete~Fläche, data=x) |
5adb801e0280af8e5e38cd376e09d268b740f9eb | a4e9ab2f19e9858a6791072e0343fc13044a6053 | /cnv_histone_heatmap.R | 060757d30672e2cfe1412bafd2ca51afb5df9584 | [] | no_license | 18853857973/CRC_lncRNA | 02e617ec6c4386df6cfc8a68aec72c26e9a5532e | f3e3080ec1df743bf61b9d8cff20471f44152f26 | refs/heads/master | 2020-03-10T21:15:16.715549 | 2018-03-06T13:32:04 | 2018-03-06T13:32:04 | 129,589,228 | 0 | 1 | null | 2018-04-15T07:59:58 | 2018-04-15T07:59:57 | null | GB18030 | R | false | false | 25,420 | r | cnv_histone_heatmap.R | setwd('D:\\CRC_lncRNA\\cnv\\percentCNV')
############################
#全部lncRNA
all_novel=read.table('D:\\CRC_lncRNA\\filter\\lncRNA\\lncRNA.final.v2.novel.geneid.txt')
all_novel=all_novel[,1]
all_novel_num=length(all_novel)
all_known=read.table('D:\\CRC_lncRNA\\filter\\lncRNA\\lncRNA.final.v2.known.geneid.txt')
all_known=all_known[,1]
all_known_num=length(all_known)
#大于25的
per_novel=read.table('percentages25novel.geneid.txt')
per_novel=unique(per_novel[,1])
per_novel_num=length(per_novel)
per_known=read.table('percentages25known.geneid.txt')
per_known=unique(per_known[,1])
per_known_num=length(per_known)
#剩下的
novel_less=all_novel_num-per_novel_num
known_less=all_known_num-per_known_num
type=c(rep("novel",all_novel_num),rep("known",all_known_num))
num=c(rep("CNV",per_novel_num),rep("NON_CNV",novel_less),rep("CNV",per_known_num),rep("NON_CNV",known_less))
per_df=data.frame(type=type,num=num)
pdf(file='D:\\CRC_lncRNA\\cnv\\percentCNV\\num2_lncRNA_CNV_percent_bar.pdf')
sp=ggplot(per_df,aes(type,fill=factor(num))) + geom_bar(position='fill',width=0.5)+labs(x="",y="percent")+ggtitle("CNV_percent_in_novel_known")
sp+theme_bw() + theme(title=element_text(size=15,color="black"
),plot.title = element_text(hjust = 0.5),legend.title=element_blank(),panel.border = element_blank(),panel.grid.major = element_blank(),panel.grid.minor = element_blank(),axis.line = element_line(colour = "black"),axis.title.x = element_text(size = 20, face = "bold"),axis.title.y= element_text(size = 30, face = "bold"),axis.text.x=element_text(size=25,color="black"))
dev.off()
#####################################################
#venplot
#大于25的
per_novel_gene=read.table('percentages25novel.geneid.txt')
per_novel_gene=unique(per_novel_gene[,1])
per_known_gene=read.table('percentages25known.geneid.txt')
per_known_gene=unique(per_known_gene[,1])
normal_DESeq_edgR_intersect=read.table('D:\\CRC_lncRNA\\diffexp\\normal_DESeq_edgR_intersect_gene.txt',sep='\t')
normal_DESeq_edgR_intersect=normal_DESeq_edgR_intersect[,1]
rec_DESeq_edgR_intersect=read.table('D:\\CRC_lncRNA\\diffexp\\rec_DESeq_edgR_intersect_gene.txt',sep='\t')
rec_DESeq_edgR_intersect=rec_DESeq_edgR_intersect[,1]
union_per_novel_known_gene=union(per_novel_gene,per_known_gene)
venn.diagram(list(normal_tumor_differentlncRNA=normal_DESeq_edgR_intersect,CNVlncRNA=union_per_novel_known_gene),cat.cex=c(1,1),lwd=c(1,1),cex=2,fill=c("red","blue"),"D:\\CRC_lncRNA\\cnv\\normal_tumor_CNV_intersectgene.pdf")
venn.diagram(list(recornot_differentlncRNA=rec_DESeq_edgR_intersect,CNVlncRNA=union_per_novel_known_gene),cat.cex=c(1,1),lwd=c(1,1),cex=2,fill=c("red","blue"),"D:\\CRC_lncRNA\\cnv\\rec_ornot_CNV_intersectgene.pdf")
intersect_normal_cnv=(intersect(normal_DESeq_edgR_intersect,union_per_novel_known_gene))
#找出附近的蛋白质编码基因并做功能富集分析
nearcoding=c()
L=strsplit(intersect_normal_cnv, "-")
for (k in 1:length(intersect_normal_cnv)){
if (L[[k]][1]=="LINC"){
nearcoding=c(nearcoding,L[[k]][2])
}else{
nearcoding=c(nearcoding,L[[k]][1])
}
}
nearcoding=unique(nearcoding)
write.table(nearcoding,'D:\\CRC_lncRNA\\cnv\\percentCNV\\nearcoding.txt',quote=F,col.names = F,row.names = F)
normal_intersect_down=read.table('D:\\CRC_lncRNA\\diffexp\\tumor_vs_normal_DESeq2_edgeR_intersect_down.txt',sep='\t')
normal_intersect_up=read.table('D:\\CRC_lncRNA\\diffexp\\tumor_vs_normal_DESeq2_edgeR_intersect_up.txt',sep='\t')
normal_intersect_up=normal_intersect_up[,1]
normal_intersect_down=normal_intersect_down[,1]
#肿瘤vs正常的且有cnv
intersect_normal_cnv_up=intersect(intersect_normal_cnv,normal_intersect_up)
intersect_normal_cnv_down=intersect(intersect_normal_cnv,normal_intersect_down)
intersect_normal_cnv_up_down=c(intersect_normal_cnv_up,intersect_normal_cnv_down)
#找出差异基因对应的gtf文件为bed文件
intersect_normal_cnv_up_down=data.frame(lncRNA=intersect_normal_cnv_up_down)
lncRNA_gtf=read.table('D:\\CRC_lncRNA\\filter\\lncRNA\\only_min_max_position_lncRNA.final.v2.gtf',stringsAsFactors = F)
colnames(lncRNA_gtf)=c("chr","start","end","lncRNA")
intersect_normal_cnv_up_down_gtf=merge(intersect_normal_cnv_up_down,lncRNA_gtf,by='lncRNA',sort=F)
intersect_normal_cnv_up_down_gtf=intersect_normal_cnv_up_down_gtf[,c(2,3,4,1)]
write.table(intersect_normal_cnv_up_down_gtf,'D:\\CRC_lncRNA\\diffexp\\num2_intersect_normal_cnv_up_down_gtf.bed',quote = F,col.names = F,row.names = F,sep = '\t')
countData_normal_cnv=read.table("D:\\CRC_lncRNA\\diffexp\\data_normal_num2.txt",sep='\t',stringsAsFactors = F)
############################
#获取正常和肿瘤样本与cnv拷贝数交集的logFC值
intersect_normal_cnv_up_logFC=res_normal[rownames(res_normal)%in%intersect_normal_cnv_up,]
intersect_normal_cnv_up_logFC=intersect_normal_cnv_up_logFC[order(intersect_normal_cnv_up_logFC[,2],decreasing=T),]
intersect_normal_cnv_down_logFC=res_normal[rownames(res_normal)%in%intersect_normal_cnv_down,]
intersect_normal_cnv_down_logFC=intersect_normal_cnv_down_logFC[order(intersect_normal_cnv_down_logFC[,2],decreasing=F),]
write.table(intersect_normal_cnv_up_logFC,'D:\\CRC_lncRNA\\cnv\\differentgene_updown_heatmap\\num2_intersect_normal_cnv_up_logFC.txt',quote = F)
write.table(intersect_normal_cnv_down_logFC,'D:\\CRC_lncRNA\\cnv\\differentgene_updown_heatmap\\num2_intersect_normal_cnv_down_logFC.txt',quote = F)
intersect_normal_cnv_up_down_logFC=rbind(intersect_normal_cnv_up_logFC,intersect_normal_cnv_down_logFC)
#rec_to_not_rec
# intersect_rec_cnv=(intersect(rec_DESeq_edgR_intersect,union_per_novel_known_gene))
# intersect_rec_cnv_up=intersect(intersect_rec_cnv,rec_intersect_up)
# intersect_rec_cnv_down=intersect(intersect_rec_cnv,rec_intersect_down)
# intersect_normal_cnv_up_down=c(intersect_rec_cnv_up,intersect_rec_cnv_down)
intersect_rec_cnv_up_logFC=res_rec[rownames(res_rec)%in%intersect_normal_cnv_up,]
intersect_rec_cnv_up_logFC=intersect_rec_cnv_up_logFC[order(intersect_rec_cnv_up_logFC[,2],decreasing=T),]
intersect_rec_cnv_down_logFC=res_rec[rownames(res_rec)%in%intersect_normal_cnv_down,]
intersect_rec_cnv_down_logFC=intersect_rec_cnv_down_logFC[order(intersect_rec_cnv_down_logFC[,2],decreasing=T),]
intersect_rec_cnv_up_down_logFC=rbind(intersect_rec_cnv_up_logFC,intersect_rec_cnv_down_logFC)
intersect_normal_rec_cnv_up_down_logFC=cbind(-intersect_normal_cnv_up_down_logFC[,2],intersect_rec_cnv_up_down_logFC[,2])
colnames(intersect_normal_rec_cnv_up_down_logFC)=c("tumor-normal","rec-nonrec")
#heatmap 上为上调,下为下调
library(pheatmap)
pheatmap(intersect_normal_rec_cnv_up_down_logFC,cluster_cols = F,cluster_rows =F ,
colorRampPalette(c("green", "black", "red"))(50),show_rownames=F,show_colnames=F)
#正常和肿瘤、复发和未复发间差异lncRNA和有拷贝数变异CNV的lncRNA的三种交集
venn.diagram(list(normal_differentlncRNA=normal_DESeq_edgR_intersect,rec_differentlncRNA=rec_DESeq_edgR_intersect,CNVlncRNA=union_per_novel_known_gene),cat.cex=c(1,1,1),lwd=c(1,1,1),cex=2,fill=c("red","blue","yellow"),"D:\\CRC_lncRNA\\cnv\\TF_normal_rec_ornot_CNV_intersectgene.pdf")
lncRNA_CNV2=intersect(normal_DESeq_edgR_intersect,rec_DESeq_edgR_intersect)
lncRNA_CNV=intersect(lncRNA_CNV2,union_per_novel_known_gene)
write.table(lncRNA_CNV,'D:\\CRC_lncRNA\\cnv\\percentCNV\\num2_normal_rec_0.25CNV_lncRNA.txt',quote = F,col.names = F,row.names = F)
lncRNA_CNV_nearcoding=c()
L=strsplit(lncRNA_CNV, "-")
for (k in 1:length(lncRNA_CNV)){
if (L[[k]][1]=="LINC"){
lncRNA_CNV_nearcoding=c(lncRNA_CNV_nearcoding,L[[k]][2])
}else{
lncRNA_CNV_nearcoding=c(lncRNA_CNV_nearcoding,L[[k]][1])
}
}
lncRNA_CNV_nearcoding=unique(lncRNA_CNV_nearcoding)
write.table(lncRNA_CNV_nearcoding,'D:\\CRC_lncRNA\\cnv\\percentCNV\\num2_normal_rec_0.25CNV_lncRNA_nearcoding.txt',quote=F,col.names = F,row.names = F)
#肿瘤相对于正常
# tumor_vs_normal_rec_0.25CNV_lncRNA_down=intersect(lncRNA_CNV,normal_intersect_up)
# tumor_vs_normal_rec_0.25CNV_lncRNA_up=intersect(lncRNA_CNV,normal_intersect_down)
#
# rec_tumor_vs_normal_0.25CNV_lncRNA_up=intersect(lncRNA_CNV,rec_intersect_up)
# rec_tumor_vs_normal_0.25CNV_lncRNA_down=intersect(lncRNA_CNV,rec_intersect_down)
#取差异lncRNA和CNV 变异的交集
intersect_normal_dflncRNA_CNV_up=intersect(normal_intersect_up,union_per_novel_known_gene)
intersect_normal_dflncRNA_CNV_down=intersect(normal_intersect_down,union_per_novel_known_gene)
intersect_normal_dflncRNA_CNV_up_data=countData_normal[rownames(countData_normal)%in%intersect_normal_dflncRNA_CNV_up,]
intersect_normal_dflncRNA_CNV_down_data=countData_normal[rownames(countData_normal)%in%intersect_normal_dflncRNA_CNV_down,]
intersect_normal_dflncRNA_CNV_up_down_data=rbind(intersect_normal_dflncRNA_CNV_up_data,intersect_normal_dflncRNA_CNV_down_data)
write.table(intersect_normal_dflncRNA_CNV_up_down_data,'intersect_normal_dflncRNA_CNV_up_down_data.txt',quote = F)
intersect_rec_dflncRNA_CNV_up=intersect(rec_intersect_up,union_per_novel_known_gene)
intersect_rec_dflncRNA_CNV_down=intersect(rec_intersect_down,union_per_novel_known_gene)
intersect_rec_dflncRNA_CNV_up_data=countData_rec[rownames(countData_rec)%in%intersect_rec_dflncRNA_CNV_up,]
intersect_rec_dflncRNA_CNV_down_data=countData_rec[rownames(countData_rec)%in%intersect_rec_dflncRNA_CNV_down,]
intersect_rec_dflncRNA_CNV_up_down_data=rbind(intersect_rec_dflncRNA_CNV_up_data,intersect_rec_dflncRNA_CNV_down_data)
write.table(intersect_rec_dflncRNA_CNV_up_down_data,'intersect_rec_dflncRNA_CNV_up_down_data.txt',quote = F)
#heatmap
upregulateMatrix=intersect_normal_dflncRNA_CNV_up_down_data
sampleInfo=data.frame(colnames(intersect_normal_dflncRNA_CNV_up_down_data),Subset=group_list_normal)
colnum=2
pdf("D:\\CRC_lncRNA\\cnv\\percentCNV\\intersect_normal_dflncRNA_CNV.pdf")
source('D:\\R\\heatmap.R')
dev.off()
upregulateMatrix=intersect_rec_dflncRNA_CNV_up_down_data
sampleInfo=data.frame(colnames(intersect_rec_dflncRNA_CNV_up_down_data),Subset=group_list_rec)
colnum=2
pdf("D:\\CRC_lncRNA\\cnv\\percentCNV\\intersect_rec_dflncRNA_CNV.pdf")
source('D:\\R\\heatmap.R')
dev.off()
#三者交集
#9lncRNA heatmap
lncRNA_CNV=read.table('D:\\CRC_lncRNA\\cnv\\percentCNV\\num2_normal_rec_0.25CNV_lncRNA.txt',check.names = F,stringsAsFactors = F)
lncRNA_CNV=lncRNA_CNV[,1]
intersect_normal_cnv_up_logFC=read.table('D:\\CRC_lncRNA\\cnv\\differentgene_updown_heatmap\\num2_intersect_normal_cnv_up_logFC.txt',check.names = F,stringsAsFactors = F)
intersect_normal_cnv_down_logFC=read.table('D:\\CRC_lncRNA\\cnv\\differentgene_updown_heatmap\\num2_intersect_normal_cnv_down_logFC.txt',check.names = F,stringsAsFactors = F)
# lncRNA_CNV_up=intersect_normal_cnv_up_logFC[rownames(intersect_normal_cnv_up_logFC)%in%lncRNA_CNV,]
# lncRNA_CNV_down=intersect_normal_cnv_down_logFC[rownames(intersect_normal_cnv_down_logFC)%in%lncRNA_CNV,]
#正常和肿瘤
countData_all_lncRNA=countData_all[rownames(countData_all)%in%lncRNA_CNV,]
group_list_normal<- factor(c(rep('normal',20),rep('tumor',20)))
countData_all_lncRNA2=countData_all_lncRNA
countData_all_lncRNA2=matrix(as.numeric(unlist(countData_all_lncRNA2)),ncol=ncol(countData_all_lncRNA2))
rownames(countData_all_lncRNA2)=rownames(countData_all_lncRNA)
colnames(countData_all_lncRNA2)=colnames(countData_all_lncRNA)
upregulateMatrix=countData_all_lncRNA2[,c(1:10,31:40,11:30)]
lncRNA_rec_normal_cnv=countData_all_lncRNA2[,c(1:10,31:40,11:30)]
write.table(lncRNA_rec_normal_cnv,"D:\\CRC_lncRNA\\cnv\\percentCNV\\lncRNA_rec_normal_cnv.txt",quote=F,sep='\t')
sampleInfo=data.frame(colnames(upregulateMatrix),Subset=group_list_normal)
colnum=2
pdf("D:\\CRC_lncRNA\\cnv\\percentCNV\\9lncRNAlncRNA_CNV_nomal.pdf")
source('D:\\R\\heatmap.R')
dev.off()
#复发未复发
rec_DESeq2_edgeR_res_intersect_down=read.table('D:\\CRC_lncRNA\\diffexp\\rec_DESeq2_edgeR_res_intersect_down.txt',check.names = F,stringsAsFactors = F)
rec_DESeq2_edgeR_res_intersect_up=read.table('D:\\CRC_lncRNA\\diffexp\\rec_DESeq2_edgeR_res_intersect_up.txt',check.names = F,stringsAsFactors = F)
rec_DESeq2_edgeR_res_intersect_up_down=rbind(rec_DESeq2_edgeR_res_intersect_up,rec_DESeq2_edgeR_res_intersect_down)
rec_DESeq2_edgeR_res_intersect_up_down_logFC=rec_DESeq2_edgeR_res_intersect_up_down[rownames(rec_DESeq2_edgeR_res_intersect_up_down)%in%lncRNA_CNV,c(2,3)]
rec_DESeq2_edgeR_res_intersect_up_down_logFC=rec_DESeq2_edgeR_res_intersect_up_down_logFC[order(rec_DESeq2_edgeR_res_intersect_up_down_logFC[,1],decreasing = T),]
group_list_rec=factor(c(rep('rec',10),rep('norec',10)))
upregulateMatrix2=countData_all_lncRNA2[,c(11:30)]
upregulateMatrix=upregulateMatrix2[(rownames(rec_DESeq2_edgeR_res_intersect_up_down_logFC)),]
sampleInfo=data.frame(colnames(upregulateMatrix),Subset=group_list_rec)
colnum=2
pdf("D:\\CRC_lncRNA\\cnv\\percentCNV\\9lncRNAlncRNA_CNV_rec.pdf")
source('D:\\R\\heatmap.R')
dev.off()
####################
#lncRNA和其附近的蛋白质编码基因相关性散点图
lncRNA_rec_normal_cnv=read.table("D:\\CRC_lncRNA\\cnv\\percentCNV\\lncRNA_rec_normal_cnv.txt",check.names = F,sep='\t')
lncRNA_CNV_nearcoding=c()
L=strsplit(rownames(lncRNA_rec_normal_cnv), "-")
for (k in 1:length(rownames(lncRNA_rec_normal_cnv))){
if (L[[k]][1]=="LINC"){
lncRNA_CNV_nearcoding=c(lncRNA_CNV_nearcoding,L[[k]][2])
}else{
lncRNA_CNV_nearcoding=c(lncRNA_CNV_nearcoding,L[[k]][1])
}
}
nearcoding=read.table('D:\\CRC_lncRNA\\filter\\RSEM_expression\\pcRNA.rsem.FPKM_sort.txt',check.names = F,sep='\t')
nearcoding=nearcoding[,c(1:10,31:40,11:30)]
# nearcodingene=read.table('D:\\CRC_lncRNA\\cnv\\percentCNV\\num2_normal_rec_0.25CNV_lncRNA_nearcoding.txt',check.names = F,sep='\t')
nearcoding_data=nearcoding[rownames(nearcoding)%in%lncRNA_CNV_nearcoding,]
nearcoding_data_order=nearcoding_data[lncRNA_CNV_nearcoding,]
library(ggplot2)
for (i in c(6:length(lncRNA_CNV_nearcoding))){
print (i)
cor_num=cor(as.numeric(lncRNA_rec_normal_cnv[i,]),as.numeric(nearcoding_data_order[i,]))
gendata=rbind(lncRNA_rec_normal_cnv[i,],nearcoding_data_order[i,])
gendata_t=data.frame(t(gendata))
colnames(gendata_t)=c("lncRNA","coding")
print (rownames(gendata)[1])
pdf(paste('D:\\CRC_lncRNA\\TCGA_survive\\cor_with_nearcodinggene\\',rownames(gendata)[1],"_point_cor.pdf",sep=''))
sp2=ggplot(gendata_t, aes(x=lncRNA, y=coding)) +geom_point()+labs(title = paste("cor:",cor_num,sep = ''))
sp2+theme_bw() + theme(legend.title=element_blank(),legend.position=c(0.8,0.3),panel.border = element_blank(),panel.grid.major = element_blank(),panel.grid.minor = element_blank(),axis.line = element_line(colour = "black"),axis.title.x = element_text(size = 15, face = "bold"),axis.title.y= element_text(size = 15, face = "bold"))
dev.off()
}
#############
#上下调基因和cnv 的Amp和Del,卡方检验
setwd('D:\\CRC_lncRNA\\cnv\\percentCNV')
res_up_normal=read.table("D:\\CRC_lncRNA\\diffexp\\tumor_vs_normal_lfc_1_pval_0.05.deseq.up_regulate.xls",sep='\t')
res_down_normal=read.table("D:\\CRC_lncRNA\\diffexp\\tumor_vs_normal_lfc_1_pval_0.05.deseq.down_regulate.xls",sep='\t')
diff_gene_edgeR_up_normal=read.csv( "D:\\CRC_lncRNA\\diffexp\\up_PValue0.05_diff_gene_edgeR_tumor_vs_normal_edgeR.csv",header=T,row.names = 1)
diff_gene_edgeR_down_normal=read.csv( "D:\\CRC_lncRNA\\diffexp\\down_PValue0.05_diff_gene_edgeR_tumor_vs_normal_edgeR.csv",header=T,row.names = 1)
normal_intersect_up=intersect(rownames(res_up_normal),rownames(diff_gene_edgeR_down_normal))
normal_intersect_down=intersect(rownames(res_down_normal),rownames(diff_gene_edgeR_up_normal))
normal_intersect_up_length=length(normal_intersect_up)
normal_intersect_down_length=length(normal_intersect_down)
cnv_known_novel_Amp_geneid=read.table('D:\\CRC_lncRNA\\cnv\\percentCNV\\percentages25.Amp.geneid.txt')
cnv_known_novel_Amp_geneid=cnv_known_novel_Amp_geneid[,1]
cnv_known_novel_Del_geneid=read.table('D:\\CRC_lncRNA\\cnv\\percentCNV\\percentages25.Del.geneid.txt')
cnv_known_novel_Del_geneid=cnv_known_novel_Del_geneid[,1]
cnv_known_novel_Amp_geneid_length=length(cnv_known_novel_Amp_geneid)
cnv_known_novel_Del_geneid_length=length(cnv_known_novel_Del_geneid)
x5 = matrix(c(cnv_known_novel_Amp_geneid_length,cnv_known_novel_Del_geneid_length,normal_intersect_up_length,normal_intersect_down_length),nc = 2 , byrow = T)
chisq.test(x5)
#p-value = 3.072e-13
diff_gene_edgeR_up_rec=read.csv("D:\\CRC_lncRNA\\diffexp\\up_PValue0.05_diff_gene_edgeR_rec_vs_norec_edgeR.csv")
diff_gene_edgeR_down_rec=read.csv("D:\\CRC_lncRNA\\diffexp\\down_PValue0.05_diff_gene_edgeR_rec_vs_norec_edgeR.csv")
#正常和肿瘤与拷贝数变异大于25%的lncRNA交集在13种癌症中热图
###################################################################################################################################################
known_novel="novel_known"
setwd(paste("D:\\CRC_lncRNA\\cnv\\differentgene_updown_heatmap"))
cancername=read.table("D:\\CRC_lncRNA\\cnv\\percentCNV\\cancer13.txt",stringsAsFactors = F)
cancername=cancername[,1]
COAD_amp_df=read.table(paste('COADREAD.res_',known_novel,'.Amp.geneid_precent_sorted.gistic',sep=''),sep='\t',stringsAsFactors = F)
COAD_amp=COAD_amp_df[,c(3,4)]
colnames(COAD_amp)=c("gene","COADREAD_Amp")
COAD_del_df=read.table(paste('COADREAD.res_',known_novel,'.Del.geneid_precent_sorted2.gistic',sep=''),sep='\t',stringsAsFactors = F)
COAD_del=COAD_del_df[,c(3,4)]
colnames(COAD_del)=c("gene","COADREAD_Del")
all_cancer_amp_del=merge(COAD_amp,COAD_del,by='gene',sort = F)
dim(all_cancer_amp_del)
intersect_normal_cnv_up_logFC_lncRNA=read.table('D:\\CRC_lncRNA\\cnv\\differentgene_updown_heatmap\\TF_intersect_normal_cnv_up_logFC.txt',sep=' ',stringsAsFactors = F)
intersect_normal_cnv_down_logFC_lncRNA=read.table('D:\\CRC_lncRNA\\cnv\\differentgene_updown_heatmap\\TF_intersect_normal_cnv_down_logFC.txt',sep=' ',stringsAsFactors = F)
#all_cancer_amp_del_sorted=all_cancer_amp_del[order(intersect_normal_cnv_up_down),]
for (can in cancername){
print (can)
COAD_amp_df=read.table(paste(can,'.res_',known_novel,'.Amp.geneid_precent_sorted.gistic',sep=''),sep='\t',stringsAsFactors = F)
COAD_amp=COAD_amp_df[,c(3,4)]
colnames(COAD_amp)=c("gene",paste(can,"_Amp",sep=''))
COAD_del_df=read.table(paste(can,'.res_',known_novel,'.Del.geneid_precent_sorted2.gistic',sep=''),sep='\t',stringsAsFactors = F)
COAD_del=COAD_del_df[,c(3,4)]
colnames(COAD_del)=c("gene",paste(can,"_Del",sep=''))
COAD_amp_del=merge(COAD_amp,COAD_del,by='gene',sort = F)
dim(COAD_amp_del)
all_cancer_amp_del=merge(all_cancer_amp_del,COAD_amp_del,by='gene',sort = F)
}
all_cancer_amp_del2=all_cancer_amp_del
all_cancer_amp_del_up=all_cancer_amp_del2[all_cancer_amp_del2[,1]%in%rownames(intersect_normal_cnv_up_logFC_lncRNA),]
all_cancer_amp_del_down=all_cancer_amp_del2[all_cancer_amp_del2[,1]%in%rownames(intersect_normal_cnv_down_logFC_lncRNA),]
# all_cancer_amp_del_up=all_cancer_amp_del2[1:1098,]
# all_cancer_amp_del_down=all_cancer_amp_del2[1099:nrow(all_cancer_amp_del2),]
all_cancer_amp_del_up_sorted=all_cancer_amp_del_up[order(all_cancer_amp_del_up[,2],decreasing = T),]
all_cancer_amp_del_down_sorted=all_cancer_amp_del_down[order(all_cancer_amp_del_down[,3],decreasing = F),]
all_cancer_amp_del_up_down_sorted=rbind(all_cancer_amp_del_up_sorted,all_cancer_amp_del_down_sorted)
#all_cancer_amp_del3=merge(intersect_normal_cnv_up_down,all_cancer_amp_del2,by.x='lncRNA',by.y='gene',sort=F)
dim(all_cancer_amp_del_up_down_sorted)
pdf(paste("D:\\CRC_lncRNA\\cnv\\differentgene_updown_heatmap\\",known_novel,"_heatmapinallcancer13.pdf",sep=''),width = 2000, height = 1500)
#png(paste("D:\\CRC_lncRNA\\cnv\\differentgene_updown_heatmap\\",known_novel,"_heatmapinallcancer13.png",sep=''),width = 2000, height = 1500)
pheatmap(all_cancer_amp_del_up_down_sorted[,-1],gaps_row=(nrow(all_cancer_amp_del_up_sorted)+1),cluster_cols = F,cluster_rows =F,show_rownames = F,
color = colorRampPalette(c("navy", "white", "firebrick3"))(100))
dev.off()
all_cancer_amp_del_up_sorted[1:5,1:3]
all_cancer_amp_del_up_sorted_amp=all_cancer_amp_del_up_sorted[all_cancer_amp_del_up_sorted[,2]>0.25,1]
all_cancer_amp_del_up_sorted_del=all_cancer_amp_del_up_sorted[all_cancer_amp_del_up_sorted[,3]<(-0.25),1]
all_cancer_amp_del_down_sorted[1:5,1:3]
all_cancer_amp_del_down_sorted_amp=all_cancer_amp_del_down_sorted[all_cancer_amp_del_down_sorted[,2]>0.25,1]
all_cancer_amp_del_down_sorted_del=all_cancer_amp_del_down_sorted[all_cancer_amp_del_down_sorted[,3]<(-0.25),1]
all_cancer_amp_del_up_sorted_amp_del_length=length(all_cancer_amp_del_up_sorted_amp)+length(all_cancer_amp_del_up_sorted_del)
all_cancer_amp_del_down_sorted_amp_del_length=length(all_cancer_amp_del_down_sorted_amp)+length(all_cancer_amp_del_down_sorted_del)
#卡方检验
x3 = matrix(c(length(all_cancer_amp_del_up_sorted_amp),length(all_cancer_amp_del_up_sorted_amp),length(all_cancer_amp_del_down_sorted_amp),length(all_cancer_amp_del_down_sorted_del)),nc = 2 , byrow = T)
chisq.test(x3)
noraml_bar_df=data.frame(normal=c(rep("up",all_cancer_amp_del_up_sorted_amp_del_length),rep("down",all_cancer_amp_del_down_sorted_amp_del_length)),normal_val=c(rep('Del',length(all_cancer_amp_del_up_sorted_del)),rep('Amp',length(all_cancer_amp_del_up_sorted_amp)),rep('Del',length(all_cancer_amp_del_down_sorted_del)),rep('Amp',length(all_cancer_amp_del_down_sorted_amp))))
#,per=c(per_normal_novel,per_normal_known)
pdf(file='D:\\CRC_lncRNA\\cnv\\differentgene_updown_heatmap\\TF_amp_del_length_bar_percent.pdf')
sp=ggplot(noraml_bar_df,aes(normal,fill=factor(normal_val))) + geom_bar(position='fill',width=0.5)+labs(x="",y="percent")+ggtitle("25%cnv_in_up_down")
sp+theme_bw() + theme(title=element_text(size=15,color="black"
),plot.title = element_text(hjust = 0.5),legend.title=element_blank(),panel.border = element_blank(),panel.grid.major = element_blank(),panel.grid.minor = element_blank(),axis.line = element_line(colour = "black"),axis.title.x = element_text(size = 20, face = "bold"),axis.title.y= element_text(size = 30, face = "bold"),axis.text.x=element_text(size=25,color="black"))
dev.off()
#transcriptid
###########################################################################################################################
#全部lncRNA
all_novel=read.table('lncRNA.final.v2.novel.transcriptid.txt')
all_novel=all_novel[,1]
all_novel_num=length(all_novel)
all_known=read.table('lncRNA.final.v2.known.transcriptid.txt')
all_known=all_known[,1]
all_known_num=length(all_known)
#大于25的
per_novel=read.table('percentages25novel.transcriptid.txt')
per_novel=per_novel[,1]
per_novel_num=length(per_novel)
per_known=read.table('percentages25known.transcriptid.txt')
per_known=per_known[,1]
per_known_num=length(per_known)
#剩下的
novel_less=all_novel_num-per_novel_num
known_less=all_known_num-per_known_num
type=c(rep("novel",all_novel_num),rep("known",all_known_num))
num=c(rep("CNV",per_novel_num),rep("NON_CNV",novel_less),rep("CNV",per_known_num),rep("NON_CNV",known_less))
per_df=data.frame(type=type,num=num)
png(file='lncRNA_CNV_percent.png',bg="transparent")
sp=ggplot(per_df,aes(type,fill=factor(num))) + geom_bar(position='fill',width=0.5)+labs(x="",y="percent")+ggtitle("CNV_percent_in_novel_known")
sp+theme_bw() + theme(title=element_text(size=15,color="black"
),plot.title = element_text(hjust = 0.5),legend.title=element_blank(),panel.border = element_blank(),panel.grid.major = element_blank(),panel.grid.minor = element_blank(),axis.line = element_line(colour = "black"),axis.title.x = element_text(size = 20, face = "bold"),axis.title.y= element_text(size = 30, face = "bold"),axis.text.x=element_text(size=25,color="black"))
dev.off()
normal_cnv_intersect_up_data=countData_normal_cnv[rownames(countData_normal_cnv)%in%intersect_normal_cnv_up,]
normal_cnv_intersect_down_data=countData_normal_cnv[rownames(countData_normal_cnv)%in%intersect_normal_cnv_down,]
normal_cnv_intersect_data=rbind(normal_cnv_intersect_up_data,normal_cnv_intersect_down_data)
dim(normal_cnv_intersect_up_data)
dim(normal_cnv_intersect_down_data)
dim(normal_cnv_intersect_data)
# #画热图
# #正常和cnv
# upregulateMatrix=normal_cnv_intersect_data
# group_list_normal<- factor(c(rep('normal',20),rep('tumor',20)))
# sampleInfo=data.frame(colnames(normal_cnv_intersect_data),Subset=group_list_normal)
# colnum=2
# pdf(file=paste("D:\\CRC_lncRNA\\cnv\\percentCNV\\normal_cnv_heatmap.pdf",sep=''))
# source('D:\\R\\heatmap.R')
# dev.off()
#
#
# #复发未复发
# countData_rec=read.table('D:\\CRC_lncRNA\\diffexp\\lncRNA.rsem.count_sort_rec_not_TF.txt',sep='\t',header = T,stringsAsFactors = F)
# colData=data.frame(sample=colnames(countData_rec),Type=c(rep('recu',10),rep('unrecu',10)))
# rec_up_gene=read.table('D:\\CRC_lncRNA\\diffexp\\rec_DESeq2_edgeR_intersect_up.txt',sep='\t',stringsAsFactors = F)
# rec_up_gene=rec_up_gene[,1]
# rec_down_gene=read.table('D:\\CRC_lncRNA\\diffexp\\rec_DESeq2_edgeR_intersect_down.txt',sep='\t',stringsAsFactors = F)
# rec_down_gene=rec_down_gene[,1]
#
#
#
# rec_cnv_intersect_up_data=countData_normal_cnv[rownames(countData_normal_cnv)%in%intersect_normal_cnv_up,]
# rec_cnv_intersect_down_data=countData_normal_cnv[rownames(countData_normal_cnv)%in%intersect_normal_cnv_down,]
# rec_cnv_intersect_data=rbind(normal_cnv_intersect_up_data,normal_cnv_intersect_down_data)
# dim(normal_cnv_intersect_up_data)
# dim(normal_cnv_intersect_down_data)
# dim(normal_cnv_intersect_data)
|
226bb483019f1de76dfb5dc5ff39bcad6460e3ac | f4a9cb3be91d66eacdd00fb693f1dabd2843ef40 | /MIdterm/easyRasch/R/Probability.R | b90c04b2685abbdc37435241f2afd42908c19916 | [] | no_license | domlockett/MIdterm | f36443cbb04eb8f1f3036cfa4f959124c2f8c174 | 983c5ddc0b69270abefe45f31ee54045dbaf0228 | refs/heads/master | 2021-04-06T06:27:07.258874 | 2018-03-14T23:15:04 | 2018-03-14T23:15:04 | 125,237,208 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,603 | r | Probability.R | #' Probability
#'
#' Returns probability that a student gets a question right given their ability, and the difficulty of the question
#'
#' @param raschObj An objecto of class Rasch
#' @param theta A proposed value representing a person's ability
#'
#' @return A vector of P and a vector of PQ
#' @author Dominique Lockett
#'
#' @examples
#' dom<-new("Rasch",name="dom", a=c(4,2,3,4,5,6,7,8,9), y=c(1,1,1,1,1,0,1,1,1))
#' probability(dom,.2)
#'
#' @note This is a help session file
#' @export
setGeneric(name="probability",
def=function(raschObj, theta){
standardGeneric("probability")
}
)
#' @export
setMethod("probability",
definition=function(raschObj, theta){
theta<-.2
#set up the whole deal like an S4 to save the trouble of redoing it all.
#Set up a function which cycles through each row of our dataset and return the evaluation of Rasch formula
P<- apply(d, 1, function(d) {return(exp(theta-d)/ 1+ exp(theta-d))})
#Now we need to also consider y, so we add it to our dataset
dat<-cbind(raschObj@a, raschObj@y)
#I can't figure out how to do simple evaluation like above so we manually add the function so
#we have a working If Else statement
PQ<- apply(dat, 1, function(dat) if (dat[2]==1) {return(exp(theta-dat[1])/ 1+ exp(theta-dat[1]))} else {return(1-(exp(theta-dat[1])/ 1+ exp(theta-dat[1])))})
#Now develop our list of return and give them names!
r<-list(P,PQ)
names(r)<-c("P","PQ")
return(r)})
|
28abe0ca0f0052834c5c28890418adcc5faaca43 | 52a9cd42e569609451cf803f24caeb98902679ec | /scRNA-seq/codes/practice11_smart-seq2_mouse_hsc_26.R | aa4b1c31c6c6de4290a7753250e9b173f72624ac | [] | no_license | hsuh001/project | 478135d6b2aa27bb05f00a0b31b415a6fc9f172c | ebd1980e0d045488674821b7420ce05fc3066c39 | refs/heads/main | 2023-02-28T03:04:29.268325 | 2021-02-05T14:29:34 | 2021-02-05T14:29:34 | 326,706,671 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,725 | r | practice11_smart-seq2_mouse_hsc_26.R | ########################################
# practice 11, Smart-seq2, mouse haematopoietic stem cell
# date: 2021.02.04 - 02.04
# author: Jing Xiao
# ref: https://jieandze1314.osca.top/04/04-11
########################################
# rm all objects --------------------------------------------------------------
rm(list = ls())
# set work directory ----------------------------------------------------------
# work_dir <- "/home1/jxiao/project/scRNA-seq/data/test_data"
work_dir <- "D:/JLab/project/scRNA-seq/data/test_data"
setwd(work_dir)
# load data -------------------------------------------------------------------
library(scRNAseq)
sce_nest_hsc <- NestorowaHSCData()
sce_nest_hsc
# class: SingleCellExperiment
# dim: 46078 1920
# metadata(0):
# assays(1): counts
# rownames(46078): ENSMUSG00000000001 ENSMUSG00000000003 ... ENSMUSG00000107391
# ENSMUSG00000107392
# rowData names(0):
# colnames(1920): HSPC_007 HSPC_013 ... Prog_852 Prog_810
# colData names(2): cell.type FACS
# reducedDimNames(1): diffusion
# altExpNames(1): ERCC
# gene annotation -------------------------------------------------------------
library(AnnotationHub)
ens_mm_v97 <- AnnotationHub(localHub = TRUE)[["AH73905"]]
anno <- select(
ens_mm_v97,
keys = rownames(sce_nest_hsc),
keytype = "GENEID",
columns = c("SYMBOL", "SEQNAME")
)
# 全部对应
sum(is.na(anno$SYMBOL))
# [1] 0
sum(is.na(anno$SEQNAME))
# [1] 0
# 接下来只需要匹配顺序即可
rowData(sce_nest_hsc) <- anno[match(rownames(sce_nest_hsc), anno$GENEID),]
sce_nest_hsc
# class: SingleCellExperiment
# dim: 46078 1920
# metadata(0):
# assays(1): counts
# rownames(46078): ENSMUSG00000000001 ENSMUSG00000000003 ... ENSMUSG00000107391
# ENSMUSG00000107392
# rowData names(3): GENEID SYMBOL SEQNAME
# colnames(1920): HSPC_007 HSPC_013 ... Prog_852 Prog_810
# colData names(2): cell.type FACS
# reducedDimNames(1): diffusion
# altExpNames(1): ERCC
# qc --------------------------------------------------------------------------
# 其实有线粒体gene,也有其他类型的如CHR_MG4151_PATCH
grep("MT", rowData(sce_nest_hsc)$SEQNAME)
# [1] 17989 17990 17991 17992 17993 17994 17995 17996 17997 17998 17999 18000 18001 18002
# [15] 18003 18004 18005 18006 18007 18008 18009 18010 18011 18012 18013 18014 18015 18016
# [29] 18017 18018 18019 18020 18021 18022 18023 18024 18974
library(scater)
stats <- perCellQCMetrics(sce_nest_hsc)
qc <- quickPerCellQC(
stats,
percent_subsets = c("altexps_ERCC_percent")
)
colSums(as.matrix(qc), na.rm = TRUE)
# low_lib_size low_n_features high_altexps_ERCC_percent
# 146 28 241
# discard
# 264
sce_nest_hsc_filtered <- sce_nest_hsc[, !qc$discard]
dim(sce_nest_hsc_filtered)
# [1] 46078 1656
##### 使用qc标准对原数据作图
colData(sce_nest_hsc) <- cbind(colData(sce_nest_hsc), stats)
sce_nest_hsc$discard <- qc$discard
# 使用qc标准对原数据作图
gridExtra::grid.arrange(
plotColData(sce_nest_hsc, y = "sum", colour_by = "discard") +
scale_y_log10() + ggtitle("Total count"),
plotColData(sce_nest_hsc, y = "detected", colour_by = "discard") +
scale_y_log10() + ggtitle("Detected features"),
plotColData(sce_nest_hsc, y = "altexps_ERCC_percent",
colour_by = "discard") + ggtitle("ERCC percent"),
ncol = 3
)
# 归一化normalization by deconvolution -------------------------------------------
library(scran)
set.seed(101000110)
cluster_nest_hsc <- quickCluster(sce_nest_hsc_filtered)
sce_nest_hsc_filtered <- computeSumFactors(
sce_nest_hsc_filtered,
cluster = cluster_nest_hsc
)
# logNormCounts()
sce_nest_hsc_filtered <- logNormCounts(sce_nest_hsc_filtered)
summary(sizeFactors(sce_nest_hsc_filtered))
# Min. 1st Qu. Median Mean 3rd Qu. Max.
# 0.04368 0.42180 0.74844 1.00000 1.24926 15.92737
# measure the degree of change ------------------------------------------------
# and HVGs selection by proportion
dec_nest_hsc_spike <- modelGeneVarWithSpikes(
sce_nest_hsc_filtered,
spikes = "ERCC"
)
top_hvgs_nest_hsc <- getTopHVGs(dec_nest_hsc_spike, prop = 0.1)
length(top_hvgs_nest_hsc)
# [1] 384
# 查看方差大小
plot(
dec_nest_hsc_spike$mean,
dec_nest_hsc_spike$total,
main = "Smart-seq2_mouse_hsc", pch = 16, cex = 0.5,
xlab = "Mean of log-expression",
ylab = "Variance of log-expression"
)
cur_fit <- metadata(dec_nest_hsc_spike)
points(cur_fit$mean, cur_fit$var, col = "red", pch = 16)
curve(cur_fit$trend(x), col = "dodgerblue", add = TRUE, lwd = 2)
names(cur_fit)
# [1] "mean" "var" "trend" "std.dev"
# 一共92个ERCC spike-in
length(unique(names(cur_fit$mean)))
# [1] 92
# dimension reduce ------------------------------------------------------------
set.seed(101010011)
sce_nest_hsc_filtered <- denoisePCA(
sce_nest_hsc_filtered,
subset.row = top_hvgs_nest_hsc,
technical = dec_nest_hsc_spike
)
# 检查PCs的数量
ncol(reducedDim(sce_nest_hsc_filtered, "PCA"))
# [1] 9
sce_nest_hsc_filtered <- runTSNE(sce_nest_hsc_filtered, dimred = "PCA")
# clustering, graph-based -----------------------------------------------------
snn_gr_nest_hsc <- buildSNNGraph(sce_nest_hsc_filtered, use.dimred = "PCA")
# 鉴定cluster
cluster_nest_hsc <- igraph::cluster_walktrap(snn_gr_nest_hsc)$membership
colLabels(sce_nest_hsc_filtered) <- factor(cluster_nest_hsc)
table(cluster_nest_hsc)
# cluster_nest_hsc
# 1 2 3 4 5 6 7 8 9
# 203 472 258 175 142 229 20 83 74
# 绘制t-SNE图查看分群
plotTSNE(sce_nest_hsc_filtered, colour_by = "label")
# detecting markers -----------------------------------------------------------
markers_nest_hsc <- findMarkers(
sce_nest_hsc_filtered,
groups = colLabels(sce_nest_hsc_filtered),
test.type = "wilcox",
direction = "up",
lfc = 0.5,
row.data = rowData(sce_nest_hsc_filtered)[, "SYMBOL", drop = FALSE]
)
# use cluster 8 as an explaination
chosen_cluster <- "8"
markers_cluster_8 <- markers_nest_hsc[[chosen_cluster]]
# cluster 8的top 10
interest_markers <- markers_cluster_8[markers_cluster_8$Top <= 10, ]
length(interest_markers)
# [1] 13
# 提取cluster 8与其他clusters对比的AUC结果
aucs <- getMarkerEffects(interest_markers, prefix = "AUC")
rownames(aucs) <- interest_markers$SYMBOL
library(pheatmap)
pheatmap(aucs, color = viridis::plasma(100))
# annotating cell type --------------------------------------------------------
library(SingleR)
mm_ref <- MouseRNAseqData()
mm_ref
# class: SummarizedExperiment
# dim: 21214 358
# metadata(0):
# assays(1): logcounts
# rownames(21214): Xkr4 Rp1 ... LOC100039574 LOC100039753
# rowData names(0):
# colnames(358): ERR525589Aligned ERR525592Aligned ... SRR1044043Aligned
# SRR1044044Aligned
# colData names(3): label.main label.fine label.ont
# 进行转换
renamed <- sce_nest_hsc_filtered
# mm_ref使用的事symbol name,需要进行转换
rownames(renamed) <- uniquifyFeatureNames(
rownames(renamed),
rowData(sce_nest_hsc_filtered)$SYMBOL
)
# 在参考数据集中找cell对应的细胞类型
predict <- SingleR(
test = renamed,
ref = mm_ref,
labels = mm_ref$label.fine
)
table(predict$labels)
# B cells Endothelial cells Erythrocytes Granulocytes Macrophages
# 61 1 1005 1 2
# Monocytes NK cells T cells
# 500 1 85
tab <- table(
Pred = predict$labels,
Cluster = sce_nest_hsc_filtered$label
)
pheatmap::pheatmap(
log10(tab + 10),
color = viridis::viridis(100)
)
|
196ae35bc94c5795eb2d26b72d74c6b6e3078808 | 7c93da7aba0814728b53cd2f0f307070d0260d44 | /man/my_lm.Rd | 88b401f62f2bd5f885f5f61014901f31c191d8fa | [] | no_license | ZhiqingYang/package302 | 738171326e54a00fa0a3511d137571f6a5c8b154 | 229d94a97c6f417020797f7bd725a0a57101daed | refs/heads/master | 2023-07-01T10:12:05.968289 | 2021-08-10T14:12:08 | 2021-08-10T14:12:08 | 373,801,783 | 1 | 0 | null | null | null | null | UTF-8 | R | false | true | 484 | rd | my_lm.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/my_lm.R
\name{my_lm}
\alias{my_lm}
\title{linear models}
\usage{
my_lm(formula, data)
}
\arguments{
\item{formula}{A class object}
\item{data}{A set of data}
}
\value{
A dataframe of summary with rows for each coefficient and columns for the
Estimate, Std. Error, t value, and Pr(>|t|).
}
\description{
my_lm is used to fit linear models.
}
\examples{
my_lm(mpg ~ hp + wt, mtcars)
}
\keyword{prediction}
|
0cf5089a629cccf798c77af730a941b98e68d007 | 898f6a55b8c3565ecf88e69c77a10e6e76872f53 | /src/31-dpi-reduction.R | e340a484f3939935f62a46900add3782f278b52c | [] | no_license | DIGI-VUB/HTR-tests | 93a47b578d656fc4844bb08b953f7b6ded86a8b7 | efa8fae88e4848c70a7828a8e724ecb96d421a7c | refs/heads/master | 2023-03-05T17:42:43.319745 | 2021-02-17T11:24:30 | 2021-02-17T11:24:30 | 283,727,132 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 685 | r | 31-dpi-reduction.R | library(zip)
library(magick)
setwd("/home/jwijffels/magick/imgs")
unzip("text_en_foto.zip", exdir = getwd())
setwd("/home/jwijffels/magick/imgs/text_en_foto")
x <- list.files(pattern = ".jpg$")
for(i in seq_along(x)){
cat(sprintf("%s/%s: %s ", i, length(x), x[i]), sep = "\n")
from <- x[i]
to <- sprintf("converted-%s", from)
info <- image_info(image_read(from))
system(sprintf("convert -resample 70 %s %s", from, to))
#convert -resample 70 RABrugge_TBO119_693_088.jpg output.jpg
img <- image_read(to)
img <- image_resize(img, sprintf("%sx%s", info$width, info$height))
image_write(img, path = from, quality = 100)
file.remove(to)
}
zip("img-dpi70.zip", files = x)
|
61e0f4b8b864904ae1f89f6ae1e3d2b118da7cf8 | 0cc6e78c988fabb6e2e1eb8bfe7d2515bf1e4367 | /R/pbo-package.R | 194e4bad5c1013105af9eb2213ffbe414b68ade2 | [] | no_license | mrbcuda/pbo | 9f90799855ea80eab6ab75f1d49acdf3575a7c3a | b132dda7b29e573ba86bfb378798706f6a6a54c1 | refs/heads/master | 2022-07-03T10:37:25.975465 | 2022-05-26T14:19:44 | 2022-05-26T14:19:44 | 15,350,480 | 41 | 13 | null | 2016-08-25T21:23:44 | 2013-12-20T23:16:39 | R | UTF-8 | R | false | false | 870 | r | pbo-package.R | #' Probability of backtest overfitting.
#' @description Computes the probability of backtest overfitting
#' @details Implements algorithms for computing the probability of
#' backtest overfitting, performance degradation and probability of loss,
#' and first- and second-order stochastic dominance,
#' based on the approach specified in Bailey et al., September 2013.
#' Provides a collection of pre-configured plots based on \code{lattice} graphics.
#' @author Matt Barry \email{mrb@@softisms.com}
#' @references See Bailey, David H. and Borwein, Jonathan M. and
#' Lopez de Prado, Marcos and Zhu, Qiji Jim, The Probability of Back-Test
#' Overfitting (September 1, 2013). Available at SSRN. See
#' \url{https://papers.ssrn.com/sol3/papers.cfm?abstract_id=2326253}.
#' @keywords probability backtest overfitting PBO CSCV
#' @name pbo-package
#' @docType package
NULL
|
e6d8f70858b3eb32dc2d4b47de82c288a4e89fc1 | 74b3857116ff10aad01fcabeed10e84ee23ca918 | /Frecuencias/Graficos/Prueba_Sebastian.R | c5faa473da334c7a60f0e15c6f0733df51993df9 | [] | no_license | TBmex/baps_1177_linnage4 | b492591a636d47cb7cf8f6531fdff146d904e5d9 | 28f6bc89e4bc43e746745868a412eb0aac2b6c47 | refs/heads/master | 2023-04-17T00:57:06.228653 | 2021-04-27T08:17:33 | 2021-04-27T08:17:33 | 316,462,878 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,446 | r | Prueba_Sebastian.R | #Tabla de Sebastian
Tabla_Sebastian <- Transmission %>% select(Genotipo, N, N_incluster) %>%
mutate(N_NO_incluster = N - N_incluster)
Genotipos_Españoles_S <- Tabla_Sebastian %>%
filter(Genotipo %in% c(5,9,7,8,15,4,1)) %>%
select(-Genotipo) %>% summarise_all(funs(sum))
Genotipos_mixtos_S <- Tabla_Sebastian %>%
filter(Genotipo %in% c(2,3,10,6,14,12)) %>%
select(-Genotipo) %>% summarise_all(funs(sum))
Genotipos_NO_Españoles_S <- Tabla_Sebastian %>%
filter(Genotipo %in% c(13, 11, 16)) %>%
select(-Genotipo) %>% summarise_all(funs(sum))
# Tabla y subset de genotipos Genotipos_Españoles, Genotipos_mixtos, Genotipos_NO_Españoles
Genotipos_3_grupos_S <- bind_rows(Genotipos_Españoles_S, Genotipos_mixtos_S, Genotipos_NO_Españoles_S) %>%
mutate(Genotipos = c("Genotipos_Españoles", "Genotipos_mixtos", "Genotipos_NO_Españoles")) %>%
select(Genotipos, N, N_incluster, N_NO_incluster)
write_csv(Genotipos_3_grupos_S, file = "Genotipos_3_grupos_S")
Gen_RefGen <- Genotipos_3_grupos_S[c(1,2),c(3,4)]
fisher.test(Gen_RefGen)
Subset <- Subset %>%
mutate(odds_ratio = c(Genotipos_3_grupos_OR[["estimate"]], "reference", NA),
conf.low = c(Genotipos_3_grupos_OR[["conf.int"]][[1]],"reference", NA),
conf.high = c(Genotipos_3_grupos_OR[["conf.int"]][[2]],"reference", NA),
pvalue = c(Genotipos_3_grupos_OR[["p.value"]],"reference", NA))
fisher.test(Gen_RefGen, alternative = "greater") |
710b2942fc148540dc310a6270faeee23095e53b | 3d6ce5aeec4d36945cb1fbedcf4107425322358d | /code/protein_combine_single_model.R | 1530537b8d7302773b54b708425b8a7cadcb0a97 | [] | no_license | weidai00/SPNG | 1bcd398c4a3b19280819f34d9375880a12478b7e | 85aee84d693c72b1bb714351f4f40c427e10849a | refs/heads/main | 2023-04-09T10:04:02.439793 | 2021-04-19T00:36:21 | 2021-04-19T00:36:21 | 321,524,483 | 0 | 0 | null | 2021-04-14T11:18:22 | 2020-12-15T01:58:19 | null | UTF-8 | R | false | false | 1,087 | r | protein_combine_single_model.R |
Type = "SPNG"
#stacking_test
test_data = vector()
data1 = vector()
data2 = vector()
data3 = vector()
data4 = vector()
data5 = vector()
data6 = vector()
xgboost = vector()
svm = vector()
rf = vector()
nb = vector()
knn = vector()
lightgbm = vector()
data1 =read.csv(sprintf("result/%s_single_feature_xgboost_test_label.csv",Type),header = T)
xgboost = data1[,-1]
data2 =read.csv(sprintf("result/%s_single_feature_svm_test_label.csv",Type),header = T)
svm = data2[,-1]
data3 =read.csv(sprintf("result/%s_single_feature_rf_test_label.csv",Type),header = T)
rf = data3[,-1]
data4 =read.csv(sprintf("result/%s_single_feature_nb_test_label.csv",Type),header = T)
Class = data4$Class
nb = data4[,-1]
data5 =read.csv(sprintf("result/%s_single_feature_knn_test_label.csv",Type),header = T)
knn = data5[,-1]
data6 =read.csv(sprintf("result/%s_single_feature_lightgbm_test_label.csv",Type),header = T)
lightgbm = data6[,-1]
test_data = cbind(Class,nb,rf,svm,xgboost,lightgbm,knn)
write.csv(test_data,sprintf("feature_test/%s_test_stacking_label.csv",Type),row.names = F)
|
8ed1da9196ac4b696b530a194bc8272053ebc590 | a809165d0a0dc39e01b70f775f48c5d2032c4862 | /analysis/Figure-8E_Reprocess_Wang_2019.r | d4945c213d800c67fb1e1dff4f2ed58368bdeca3 | [] | no_license | malihhhh/su2c-gsc-scrna | cf22c5784da244cb9c12baa911155533ac2c052f | 4f3ba85d49d1d138553169f0c953a643e10da108 | refs/heads/master | 2023-04-02T00:28:22.424088 | 2023-03-18T16:31:06 | 2023-03-18T16:31:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,177 | r | Figure-8E_Reprocess_Wang_2019.r | ###############################################################################
library(Seurat)
library(biomaRt)
source('whitley_scRNA_helpers.R')
top_dir <- '~/projects/su2c_v2'
raw_data_dir <- file.path(top_dir, 'data/raw/scRNA/Wang_2019_CancerDiscov')
preproc_data_dir <- file.path(top_dir, 'data/preprocessed/scRNA/Wang_2019_CancerDiscov')
gene_sets_dir <- file.path(top_dir, '/data/preprocessed/GeneSets')
if (!dir.exists(preproc_data_dir)) {
dir.create(preproc_data_dir)
}
# Load Genesets
gene_sets_file <- 'genesets_and_info.rds'
genesets_and_info <- readRDS(file = file.path(gene_sets_dir, gene_sets_file))
genesets <- genesets_and_info$gene_set_list
# rename RNA.GSC.c1, RNA.GSC.c2 to Developmental, Injury Response, repectively
genesets$Developmental <- genesets$RNA.GSC.c1
genesets$RNA.GSC.c1 <- NULL
genesets$Injury_Response <- genesets$RNA.GSC.c2
genesets$RNA.GSC.c2 <- NULL
all_files <- dir(raw_data_dir, recursive = TRUE)
matrix_files <- all_files[grep('matrix.gene_vs_barcode.tsv$', all_files)]
meta_data <- data.frame(CellID = character(0), SampleID = character(0))
ensembl_current <- useMart(host="www.ensembl.org",
biomart='ENSEMBL_MART_ENSEMBL')
# listDatasets(ensembl_current)[grep('hsapiens', listDatasets(ensembl_current)$dataset), ]
ensembl_current <- useDataset(dataset = 'hsapiens_gene_ensembl',
mart = ensembl_current)
attr <- listAttributes(ensembl_current)
download_biomart <- TRUE
if (!download_biomart) {
reload <- TRUE
tryCatch({
BM.mapping <- readRDS(file = file.path(preproc_data_dir, 'BM_mapping.rds'))
reload <- FALSE
})
if (reload) {
print('reloading ENSEMBL biomart mappings as none detected in preproc_data_dir')
}
download_biomart <- reload
}
if (download_biomart) {
BM.mapping <- getBM(attributes = c('hgnc_symbol',
'chromosome_name'),
mart = ensembl_current)
saveRDS(BM.mapping, file = file.path(preproc_data_dir, 'BM_mapping.rds'))
}
###############################################################################
## Define Routines
add_zeros <- function(x, new_genes) {
x_names <- rownames(x)
x <- rbind(x, matrix(0, nrow = length(new_genes), ncol = ncol(x)))
rownames(x) <- c(x_names, new_genes)
return(x)
}
Wang_preprocessing_routine <- function(input_dir, files_use, output_dir, genesets, BM.mapping, prefix) {
print('combining data')
print(Sys.time())
first_loaded <- TRUE
pb <- txtProgressBar(min = 0, max = length(files_use), style = 3)
for (i in 1:length(files_use)) {
f <- files_use[i]
loaded_mat <- as.matrix(read.delim(file = file.path(input_dir, f), header = TRUE, row.names = 1))
base_filename <- basename(f)
SampleID <- regmatches(base_filename, regexpr('^GSM[0-9]*', base_filename))
meta_data <- rbind(meta_data, data.frame(CellID = paste0(SampleID, colnames(loaded_mat)),
SampleID = rep(SampleID, ncol(loaded_mat))))
if (first_loaded) {
first_loaded <- FALSE
all_genes <- rownames(loaded_mat)
combined_mat <- loaded_mat
} else {
all_genes <- union(rownames(loaded_mat), all_genes)
diff_genes_1 <- setdiff(all_genes, rownames(loaded_mat))
diff_genes_2 <- setdiff(all_genes, combined_mat)
# set zeros for genes where no counts detected
loaded_mat <- add_zeros(loaded_mat, diff_genes_1)
combined_mat <- add_zeros(combined_mat, diff_genes_2)
combined_mat <- cbind(combined_mat[all_genes,], loaded_mat[all_genes, ])
}
rm(loaded_mat)
gc(full = TRUE)
setTxtProgressBar(pb, i)
}
rownames(meta_data) <- colnames(combined_mat) <- meta_data$CellID
rownames(combined_mat) <- all_genes
# Run Seurat Pipeline
seurat_obj <- seurat_subroutine(combined_mat, meta_data)
rm(combined_mat)
rm(meta_data)
gc(full = TRUE)
# Run TSNE
seurat_obj <- Seurat::RunTSNE(seurat_obj)
# Run Clustering
seurat_obj <- Seurat::FindClusters(seurat_obj, force.recalc = TRUE, print.output = FALSE)
seurat_obj@meta.data$cluster <- seurat_obj@meta.data$res.0.8
# scoring
seurat_obj <- scoring_subroutine(seurat_obj, genesets, preproc_data_dir, paste0(prefix, '_full'))
# calculate z-scored avg chromosome expression
chr_avg_output_list <- calcAvgChrMat(seurat_obj, BM.mapping, chr.use = as.character(1:22))
chr_mat <- chr_avg_output_list$output.mat
rownames(chr_mat) <- paste0('chr.', rownames(chr_mat))
seurat_obj <- Seurat::AddMetaData(seurat_obj, as.data.frame(t(chr_mat)))
# write.csv(chr_avg_output_list$chr.mapping, file = file.path(output_dir, paste0(prefix, '_full_chr_mapping.csv')))
# write.csv(chr_avg_output_list$chr.summary, file = file.path(output_dir, paste0(prefix, '_full_chr_summary.csv')))
# seurat_obj <- identify.glioma(seurat_obj)
# # save results
print('Saving Full Data')
print(Sys.time())
saveRDS(seurat_obj, file = file.path(output_dir, paste0(prefix, '_full_seurat.rds')))
# ## Rerun Seurat pipeline on just glioma cells
# # Run Seurat Pipeline
# obj_raw_data <- seurat_obj@raw.data
# obj_meta_data <- seurat_obj@meta.data
# rm(seurat_obj)
# gc(full = TRUE)
# glioma_cells <- rownames(obj_meta_data)[obj_meta_data$is.glioma == 'glioma']
# obj_raw_data <- obj_raw_data[,glioma_cells]
# obj_meta_data <- obj_meta_data[glioma_cells,]
# gc(full = TRUE)
# seurat_obj_glioma <- seurat_subroutine(obj_raw_data, obj_meta_data)
# # Run TSNE
# seurat_obj_glioma <- Seurat::RunTSNE(seurat_obj_glioma)
# # Run Clustering
# seurat_obj_glioma <- Seurat::FindClusters(seurat_obj_glioma, force.recalc = TRUE, print.output = FALSE)
# seurat_obj_glioma@meta.data$cluster <- seurat_obj_glioma@meta.data$res.0.8
# # scoring
# seurat_obj_glioma <- scoring_subroutine(seurat_obj_glioma, genesets, preproc_data_dir, paste0(prefix, '_glioma'))
# print('Saving Filtered Data')
# print(Sys.time())
# saveRDS(seurat_obj_glioma, file = file.path(output_dir, paste0(prefix, '_glioma_seurat.rds')))
# rm(seurat_obj_glioma)
# gc(full = TRUE)
# print('Finished')
# print(Sys.time())
}
###############################################################################
## Do for snRNA-seq samples
snRNA_samples <- matrix_files[1:10]
# snRNA_samples <- matrix_files[1:2]
print('snRNA samples')
basename(snRNA_samples)
Wang_preprocessing_routine(input_dir = raw_data_dir,
files_use = snRNA_samples,
output_dir = preproc_data_dir,
genesets = genesets,
BM.mapping = BM.mapping,
prefix = 'Wang_snRNA')
gc(full = TRUE)
## Do for scRNA-seq samples
scRNA_samples <- matrix_files[11:length(matrix_files)]
# scRNA_samples <- matrix_files[11:12]
print('snRNA samples')
basename(scRNA_samples)
Wang_preprocessing_routine(input_dir = raw_data_dir,
files_use = scRNA_samples,
output_dir = preproc_data_dir,
genesets = genesets,
BM.mapping = BM.mapping,
prefix = 'Wang_scRNA')
|
aaed5f2d30df542261a19ea38040b8ff8a2b5940 | 6088e2bb2b05dd8ab9f88e4873a18788d99d7a74 | /man/wavPacketBasis.Rd | 39efb1c8399f2bd5f3d10778a07900fb5ed76d46 | [] | no_license | wconstan/wmtsa | bb0c1ff3be00ef0d719bcd559945303a6949505c | 3329d400256153490f8e7015b3dee6a531ea348f | refs/heads/master | 2021-01-01T05:24:42.540549 | 2017-12-06T02:59:32 | 2017-12-06T02:59:32 | 58,664,317 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,594 | rd | wavPacketBasis.Rd | %% WARNING: This file was automatically generated from the associated
%% wav_xform.mid file. Do NOT edit this Rd file to make a change. Instead,
%% edit the wav_xform.mid file in the project MID directory. Once the
%% wav_xform.mid file has been updated, this Rd file, and all other
%% documentation (such as corresponding LaTeX, SGML and HTML documentation)
%% should be regenerated using the mid.pl Perl script.
%% R documentation for the wavPacketBasis function
\name{wavPacketBasis}
\alias{wavPacketBasis}
\title{Extract wavelet packet basis from a DWPT}
\concept{waveletwavelet packet basistransforms, discrete wavelet packet transform}
\usage{wavPacketBasis(x, indices=0)}
\description{Returns the DWPT crystals (in a list) corresponding to the
basis specified by the indices vector. The indices
are mapped as follows:
\describe{
\item{0}{original series}
\item{1:2}{\eqn{\{W_{1,0}, W_{1,1}\}}{W(1,0), W(1,1)}, i.e., all level 1 crystals}
\item{3:6}{\eqn{\{W_{2,0},\ldots, W_{2,3}\}}{W(2,0),...,W(2,3)}, i.e., all level 2 crystals}}
and so on. If the indices do not form a basis, an error is issued.}
\arguments{
\item{x}{an object of class \code{wavTransform} associated with the output
of the \code{wavDWPT} function.}
\item{indices}{an integer vector. Each integer denotes a particular crystal of the DWPT
to extract. The set of crystals shoudl form a basis, i.e., the collective frequency ranges
associated with the set of crystals should span normalized frequencies [0, 1/2]. The indices for each
DWPT level and the corresponding (ideal) normalized frequency ranges are listed in the table below:
\describe{
\item{0}{Frequency range: [0, 1/2], associated with crystal \eqn{W_{0,0}}{W(0,0)} (the original series).}
\item{1,2}{Frequency range: [0,1/4],[1/4, 1/2], associated with crystals \eqn{W_{1,0}$, $W_{1,1}}{W(1,0), W(1,1)}, respectively.}
\item{3,4,5,6}{Frequency range: [0,1/8],[1/8, 1/4],[1/4,3/8],[3/8, 1/2], associated with crystals
\eqn{W_{2,0}$,$W_{2,1}$,$W_{2,2}$,$W_{2,3}}{W(2,0),W(2,1),W(2,2),W(2,3)}, respectively.}}
and so forth.}
}
\seealso{
\code{\link{wavDWPT}}, \code{\link{wavBestBasis}}.}
\examples{
## calculate a 3-level DWPT of the sunspots series
W <- wavDWPT(sunspots, n.level=3)
## extract the level 1 basis
W12 <- wavPacketBasis(W, 1:2)
## obtain the names of the crystals that were
## extracted: "w1.0" "w1.1"
names(W12$data)
## extract basis corresponding to crystal set:
## "w2.0" "w2.1" "w1.1". This set comprises a
## split-level basis
Wsplit <- wavPacketBasis(W, c(3,4,2))
names(Wsplit$data)
}
\keyword{univar}
|
bbc76c3d48a996681a3f710cff1e1f8b4ce005c7 | d8963252a704cf30857e1cb43bc44bc14c468652 | /R/AdvancedData Analyis HW/HW06_MinxiangPan_mp3335.R | 1c0cc0dc22bde3d479818b3dc6dfab4aa8d6fd85 | [] | no_license | panda4869/My-Project | e2290ab8adb25b181885ae37a28e2c990059ee26 | 1ec0b1d77c0964793c980a95070b006795d369aa | refs/heads/master | 2021-01-10T05:24:06.427466 | 2018-02-27T01:39:53 | 2018-02-27T01:39:53 | 51,176,985 | 0 | 0 | null | 2017-08-16T17:00:40 | 2016-02-05T22:04:31 | R | UTF-8 | R | false | false | 2,318 | r | HW06_MinxiangPan_mp3335.R | #problem 1
data(ChickWeight)
View(ChickWeight)
class(ChickWeight$Diet)
#anova on original data
fit1<-lm(weight~Diet,data=ChickWeight[which(ChickWeight$Time==18),])
anova(fit1)
row.mat<-match(ChickWeight[which(ChickWeight$Time==18),]$Chick,ChickWeight[which(ChickWeight$Time==0),]$Chick)
#adjust for birthweight
#combine the data
Weight.b<-ChickWeight[which(ChickWeight$Time==0),]$weight[row.mat]
Diet.ad<-ChickWeight[which(ChickWeight$Time==18),]$Diet
dat<-cbind(ChickWeight[which(ChickWeight$Time==18),],Weight.b)
plot(weight~Diet,data=dat)
View(ChickWeight[which(ChickWeight$Time==18),])
View(ChickWeight[which(ChickWeight$Time==0),])
#add an additional varaible
fit2<-lm(weight~Weight.b+Diet,data=dat)
summary(fit2)
summary(aov(weight~Weight.b+Diet,data=dat))
##lsmean
install.packages("lsmeans")
library("lsmeans")
library("estimability")
fit1.rg1<-ref.grid(fit1)
lsmeans(fit1.rg1,"Diet")
fit2.rg1<-ref.grid(fit2)
lsmeans(fit2.rg1,"Diet")
tapply(dat$weight, dat$Diet, FUN=mean)
tapply(dat$Weight.b, dat$Diet, FUN=mean)
# normality
hist(resid(fit1))
qqnorm(resid(fit1))
qqline(resid(fit1))
shapiro.test(resid(fit1))
#it is normal
#
par(mfrow=c(1,2))
hist(resid(fit2))
qqnorm(resid(fit2))
qqline(resid(fit2))
shapiro.test(resid(fit2))
#unequal variance
bartlett.test(weight~Diet,data=ChickWeight[which(ChickWeight$Time==18),])
bartlett.test(weight-Weight.b~Diet,data=dat)
#variance are equal
#Test for Parallelism
summary(aov(weight~Weight.b*Diet,data=dat))
#no parallelism
#Problem 2
library(nlme)
#compound symmetry structure
fit.gls<-gls(weight~Diet*Time,data=ChickWeight[which(ChickWeight$Time==10 |ChickWeight$Time==18 | ChickWeight$Time==21),],
correlation=corCompSymm(form=~1|Chick),method="REML")
summary(fit.gls)
anova(fit.gls)
#unstructured covariance
fit.ustr<-gls(weight~Diet*Time,data=ChickWeight[which(ChickWeight$Time==10 |ChickWeight$Time==18 | ChickWeight$Time==21),],
correlation=corSymm(form=~1|Chick),weights=varIdent(form=~1|Time),method="REML")
summary(fit.ustr)
anova(fit.ustr)
anova(fit.gls,fit.ustr)
# normality test
hist(resid(fit.gls))
qqnorm(resid(fit.gls))
qqline(resid(fit.gls))
shapiro.test(resid(fit.gls))
hist(resid(fit.ustr))
qqnorm(resid(fit.ustr))
qqline(resid(fit.ustr))
shapiro.test(resid(fit.ustr))
# maybe we should try Mauchly's sphericity test |
e34b3277ba5197eff4fc1d9c93110967b70356c4 | c5a439705bfeeab8c207e63b53f32e29ae2b81e5 | /Archive/code scraps from makePointsQuest().R | 25e76ac2cbed321d1193ff225458b4e48e63e208 | [] | no_license | Brent-Dickinson/estimation-package | e98b8de8db442170567aa7b099ea75c534d7ee05 | b53850d9d61853861ae21f9634efe1a21ad7d9b5 | refs/heads/master | 2021-01-22T09:47:41.915459 | 2013-04-10T13:41:05 | 2013-04-10T13:41:05 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,317 | r | code scraps from makePointsQuest().R | plot_su = read.csv(paste(datawd, 'delete me/sample_point.csv', sep = ''), stringsAsFactors = F)
quest_su = quest_extra
quest_su = quest_su[-which(quest_su$QUEST_NUMBER %in% int),]
quest_su = quest_su[-which(is.na(quest_su$OWNER)),]
quest_su = quest_su[quest_su$AC_WOOD >= 1,]
ref_su = read.csv(paste(datawd, 'state_county_su_reference.csv', sep = ''), stringsAsFactors = F)
source('functions/makeSu.R')
quest_expanded = makeSu(quest = quest_su, plot = plot_su, ref = ref_su)
# to fix intensified sample:
#pc = read.csv('c:/users/ffrc_brent/dropbox/mary ct/point count by owner.csv', header = T, stringsAsFactors = F)
#pc_imp = pc[pc$CountOfPLOT > 1,]
#nums = quest_11$QUEST_NUMBER[quest_11$QUEST_NUMBER %in% pc_imp$QUEST]
#quest_11$POINT_COUNT[quest_11$QUEST_NUMBER %in% pc_imp$QUEST] = pc_imp$CountOfPLOT[pc_imp$QUEST %in% nums]
## notes on the above fix:
# the above pc file does not contain all of the QUEST_NUMBER's from the intensified sample. the reason for this is unknown at present.
# those ownerships from the intensified sample without a POINT_COUNT value from pc were assigned a POINT_COUNT value of 1 above in line 33.
# that approach is reasonable because the sampling intensity is about 1 point per 760 acres and all the ownerships without POINT_COUNT values from pc are under 300 acres in size.
|
dea1809f311ce2ec2e1728edc38d978e570776a3 | c47a9dab242120ea05ad8877af7c7efdec48dcb8 | /egssimtools.Rcheck/00_pkg_src/egssimtools/R/read_arch_genome.R | b0abb5c68f0f291325e688906bc9ece57ebba489 | [] | no_license | rscherrer/ExplicitGenomeSpeciation | 4ee1b8fdf3c19a085d750e73ce94ae4f2b25a070 | 55b3d4cf79f97bf6c91d366ce7274e411d612510 | refs/heads/raph | 2021-08-10T20:54:15.903930 | 2021-04-14T14:12:54 | 2021-04-14T14:12:54 | 184,590,896 | 1 | 1 | null | 2021-04-14T14:12:55 | 2019-05-02T13:59:12 | C++ | UTF-8 | R | false | false | 804 | r | read_arch_genome.R | #' Read locus-specific genome architecture
#'
#' Make a table from locus-specific genetic architecture details
#'
#' @param folder Path to the simulation
#' @param filename Name of the architecture file
#'
#' @return A data frame with, for each locus, its location, trait, effect, dominance, chromosome and degree
#'
#' @examples
#'
#' root <- system.file("extdata", "example_1", package = "egssimtools")
#' read_arch_genome(root)
#'
#' @export
read_arch_genome <- function(folder, filename = "architecture.txt") {
arch <- read_arch(folder, filename)
data.frame(
locus = seq(arch$location),
location = arch$locations,
trait = factor(arch$traits),
effect = arch$effects,
dominance = arch$dominances,
chromosome = get_chromosomes(arch),
degree = get_degrees(arch)
)
}
|
6f5c3a666f1ca294eef6f2723617f8c32baa5194 | 9c27c6dd264cc1699632f571a067a31419a67bac | /predictive graduation model.R | f49a10875ade36d2cee9d9ffd86ad7f1303b24a4 | [] | no_license | jasonmpfaff/Predictive-models-in-R | bf66b6c015eabd2663ff5e8e2aac1b4756f41d91 | b4251df7fecd1f57c2b4e532fe3ceb9001ae7bd0 | refs/heads/master | 2021-01-18T23:58:00.721864 | 2018-04-12T10:20:46 | 2018-04-12T10:20:46 | 47,942,926 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,819 | r | predictive graduation model.R | ##read in base data set##
boosted<-read.csv("modeldata.csv")
install.packages("xlsx")
install.packages("ada")
install.packages("lattice")
install.packages("caret")
install.packages("e1071")
##load the below packages and install any not already installed using install.packages##
library(xlsx)
library(ada)
library(lattice)
library(caret)
library(e1071)
##run the model ensemble first##
#random forest##
rfmodel<-train(as.factor(grad) ~ age+marital+mom+efc+single+ged+year+clock+reenter+sex,data=boosted, method="rf")
rfmodel
rfmodel$finalModel
rfpredictions<-predict(rfmodel, newdata=boosted, type="raw")
rfpredictionsII<-data.frame(rfpredictions, boosted$SyStudentID)
rfpredictionsII
##bayes glm##
bglmmodel<-train(as.factor(grad) ~ age+marital+mom+efc+single+ged+year+clock+reenter+sex,data=boosted, method="bayesglm")
bglmmodel
bglmmodel$finalModel
bglmpredictions<-predict(bglmmodel, newdata=boosted, type="raw")
bglmpredictionsII<-data.frame(bglmpredictions, boosted$SyStudentID)
bglmpredictionsII
confusionMatrix(bglmmodel)
##NaiveBayes##
nbmodel<-train(as.factor(grad) ~ age+marital+mom+efc+single+ged+year+clock+reenter+sex,data=boosted, method="nb")
nbmodel
nbmodel$finalModel
nbpredictions<-predict(nbmodel, newdata=boosted, type="raw")
nbpredictionsII<-data.frame(nbpredictions, boosted$SyStudentID)
nbpredictionsII
confusionMatrix(nbmodel)
##neural network##
nnetmodel<-train(as.factor(grad) ~ age+marital+mom+efc+single+ged+year+clock+reenter+sex,data=boosted, method="nnet")
nnetmodel
nnetmodel$finalModel
nnetpredictions<-predict(nnetmodel, newdata=boosted, type="raw")
nnetpredictionsII<-data.frame(nnetpredictions, boosted$SyStudentID)
nnetpredictionsII
confusionMatrix(nnetmodel)
##boosted ada##
adamodel<-ada(grad~age+marital+mom+efc+single+ged+year+clock+reenter+sex+bglm+nb+nnet, data=boosted, loss="logistic", type="discrete")
adamodel
adamodelpredictions<-predict(adamodel, newdata=boosted)
adamodelpredictionsII<-data.frame(adamodelpredictions, boosted$SyStudentID)
adamodelpredictionsII
confusionMatrix(adamodel)
##traditional additive multivariate logistic regression##
##this is the base scoring model##
model <- glm(grad ~ age+marital+mom+efc+single+ged+year+clock+reenter+sex+bglm+nb+nnet+bonus, data=boosted, family=binomial)
evmodel <- evtree(grad ~ age+marital+mom+efc+single+ged+year+clock+reenter+sex+bglm+nb+nnet+bonus, data=boosted)
install.packages("partykit")
install.packages("rpart")
install.packages("evtree")
library(evtree)
library(partykit)
summary(model)
coeffs1<-coefficients(model)
coeffs2<-exp(coeffs1)
coeffs2
##final results frame
#modeldata<-data.frame(bonuslist, rfpredictionsII, bglmpredictionsII, nbpredictionsII,nnetpredictionsII, boosted)
#modeldata
## GLM score and format output and output scores to spreadsheet##
vscores<-predict(model, newdata=boosted, type="response")
vscores1<-exp(vscores)
vscores2<-(vscores1-1)
vscorepredict<-data.frame(vscores2, boosted$SyStudentID, boosted$grad)
colnames(vscorelist)<-c("V-score", "studentID", "grad")
vscorepredict
##visuals##
varplot(adamodel, TRUE, FALSE)
plot(adamodel)
plot(adamodel, FALSE, FALSE)
plot(adamodel, TRUE, FALSE)
plot(jitter(grad)~age, boosted)
boxplot(jitter(grad)~clock,boosted)
forplot<-data.frame(boosted$grad,boosted$age, boosted$marital, boosted$mom, boosted$efc, boosted$single, boosted$ged, boosted$year, boosted$clock, boosted$reenter, boosted$sex)
splom(forplot)
write.xlsx(c(vscorepredict),"\\\\deltafile01/DeltaUsers/001VIR/NonVABeach/jason.pfaff/My Documents/finalscorelist.xlsx")
write.xlsx(c(modeldata),"\\\\deltafile01/DeltaUsers/001VIR/NonVABeach/jason.pfaff/My Documents/modeldata.xlsx")
write.xlsx(c(bglmpredictionsII, nbpredictionsII, nnetpredictionsII,bonuslist ),"\\\\deltafile01/DeltaUsers/001VIR/NonVABeach/jason.pfaff/My Documents/masterlist.xlsx")
|
b1941b4a8dad82ea469b25eda8937f9ee8eca747 | bf1d60b316a9770810c18e29c33493533b80d039 | /PublicPlot.R | f0cd06f310d49f685011e56ac0a6995a7f08b610 | [] | no_license | dnbarron/PublicPrivatePay | f0b17d50386fdff62fa5d54046c022c0384965ad | 8b4dd84e5813aedba96bcb88e672c208f59b4a02 | refs/heads/master | 2020-12-24T15:49:28.094289 | 2012-09-11T09:47:36 | 2012-09-11T09:47:36 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,582 | r | PublicPlot.R | pl.segs <- c("Personal service","Unskilled manual","Semi-skilled, manual",
"Junior non-manual","Skilled manual","Int. non-man, foreman",
"Foreman manual","Managers, small","Int. non-manual",
"Professional empolyees","Managers, large")
b0 <- c(0.115,0.093,0.081,0.033,0.021,0.157,0.049,0.031,0.016,-0.114,0.002)
b0.se <- c(0.017,0.019,0.013,0.01,0.02,0.022,0.022,0.025,0.012,0.025,0.016)
const <- c(0.955,1.142,1.067,1.181,0.843,1.368,1.25,1.17,1.202,1.456,1.38)
public <- c(0.118,0.116,0.18,0.024,0.097,0.154,0.162,0.061,0.04,-0.1,0.058)
male <- c(0.061,0.087,0.173,0.079,0.244,0.144,0.258,0.202,0.148,0.073,0.193)
int <- c(-0.031,-0.077,-0.192,0.041,-0.085,0.008,-0.154,-0.072,-0.068,-0.021,-0.108)
pvt.fem <- const
pub.fem <- const + public
pvt.male <- const + male
pub.male <- const + public + male + int
ll <- gl(4,11,labels=c("Private, Female","Public, Female","Private, Male","Public, Male"))
seg <- gl(11,k=1,length=44,labels=pl.segs)
plot.data <- data.frame(SEG=seg,Type=ll,Prediction=c(pvt.fem,pub.fem,pvt.male,pub.male))
ix <- order(plot.data$Prediction)
plot.data <- plot.data[ix,]
nn <- ggplot(data=plot.data,aes(x=SEG))
nn + geom_point(aes(y=Prediction,shape=Type)) + coord_flip()
plt.dta <- data.frame(SEG=as.character(pl.segs),Public0=b0,Public0se=b0.se,Public1=b0.1,
Public1se=b0.1se,Male=male,Malese=male.se,Interaction=intact,Interactionse=intact.se)
plt.dta2 <- transform(plt.dta,pred=Public1+Male+Interaction)
pp <- ggplot(data=plt.dta2, aes(x=SEG))
pp + geom_pointrange(aes(y=Public0,ymin=Public0-2*Public0se,ymax=Public0+2*Public0se))
pp + geom_point(aes(y=Public1)) + geom_point(aes(y=pred),colour="red")
pp + geom_pointrange(aes(y=Public1))
pvt <- c(0.068,0.042,0.115,0.247,0.142,0.043,0.054,0.071,0.078,0.041,0.100)
pblc <- c(0.095,0.041,0.072,0.192,0.025,0.038,0.024,0.036,0.306,0.064,0.108)
bar.dta <- data.frame(Employees=c(pvt,pblc),Sector=gl(2,11,labels=c("Private","Public")),
SEG=gl(11,k=1,length=22,labels=pl.segs))
bb <- ggplot(data=bar.dta,aes(x=SEG)) + opts(axis.text.x=theme_text(size=15),axis.text.y=theme_text(size=15),axis.title.x=theme_text(size=15,vjust=0)) + theme_bw()
bb + geom_bar(aes(y=Employees,fill=Sector),position="dodge") + coord_flip() + xlab("") + scale_y_continuous(name="Employees",formatter="percent")
xtabs(~jbseg+PrivateSect,data=dta3,subset=wave==17&ss)
#### baseline
b0 <- c(1.16,1.28,1.43,1.46,1.61,1.78,1.82,1.85,2.04,2.13)
b1 <- c(.171,.074,.137,.095,.037,.046,.126,.042,.065,-.045)
(b1)/b0
levels(dta3$jbseg) <- str_trim(dta3$jbseg)
ss.pl <- pl.segs %in% dta3$jbseg
pl.segs <- c("personal service wks","unskilled manual","semi-skilled, manual",
"junior non-manual","skilled manual","int. Non-man, foreman",
"foreman manual","managers, small","int. Non-manual",
"professional empolyees","managers, large")
emp.wv17 <- c(357,234,
133,66,
460,140,
1130,421,
624,39,
157,65,
204,36,
319,68,
548,865,
143,143,
389,118)
emp.wv17 <- matrix(emp.wv17,ncol=2,byrow=TRUE)
emp.wv1 <- c(168,122,
138,91,
386,111,
863,269,
564,57,
195,74,
211,56,
200,47,
206,398,
144,83,
277,146)
emp.wv1 <- matrix(emp.wv1,ncol=2,byrow=TRUE)
pr.emp.wv1 <- prop.table(emp.wv1,2)
pr.emp.wv17 <- prop.table(emp.wv17,2)
apply(pr.emp.wv17,2,sum)
bar.dta2 <- data.frame(Employees=c(as.vector(pr.emp.wv1),as.vector(pr.emp.wv17)),
Sector=gl(2,11,44,labels=c("Private","Public")),
SEG=gl(11,k=1,length=44,labels=pl.segs),
Year=gl(2,22,labels=c("1991","2008")))
bb2 <- ggplot(data=bar.dta2,aes(x=SEG,y=Employees)) + opts(axis.text.x=theme_text(size=15),axis.text.y=theme_text(size=15),axis.title.x=theme_text(size=15,vjust=0)) + theme_bw()
bb2 + geom_bar(aes(fill=Sector),position="dodge") + coord_flip() + facet_grid(.~Year) + scale_y_continuous(name="Employees",formatter="percent") + xlab("")
const <- c(.955,1.14,1.07,1.18,.843,1.25,1.17,1.20,1.46,1.38)
pub <- c(.118,.116,.180,.024,.097,.162,.061,.040,-.100,.058)
sgs <- c("Personal service","Unskilled manual","Junior non-manual","Semi-skilled manual","Skilled manual","Foreman, manual","Manager, small","Intermediate non-manual","Manager, large", "Professional")
pdta <- data.frame(LHRWAGE=c(const,const+pub),Sector=gl(2,10,labels=c("Private","Public")),SEG=c(sgs,sgs))
gg <- ggplot(data=pdta,aes(x=SEG))
gg + geom_point(aes(y=LHRWAGE,colour=Sector)) + coord_flip() |
6e6401416cb69a48ca17623a4ca0e7d54fdc3b6d | 244fa469814afd6b479247b48c746912b2b573d8 | /plot4.R | d418d308a0ef2d14e5c894fdcf80518e57f074a0 | [] | no_license | Glabenweek/ExData_Plotting1 | 3cf4bcdf5c1315e1cef7e580c8cbc58e5f8ed13e | b23e34e6882cc996c325fd83b0b08e30dfda1bfd | refs/heads/master | 2021-01-09T06:46:58.475082 | 2015-10-06T15:02:29 | 2015-10-06T15:02:29 | 35,348,321 | 0 | 0 | null | 2015-05-09T22:45:27 | 2015-05-09T22:45:26 | null | UTF-8 | R | false | false | 2,738 | r | plot4.R | ### 06/10/2015
### Script for plot 4
### Remark: The part up to line 44 is common to the 4 R code files
# Create the folder to save the original data
if (!file.exists("data")) {
dir.create("data")
}
# Download the original data in the data folder and unzip the file
download.file("https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip",
destfile="./data/original_data.zip")
unzip("./data/original_data.zip",
exdir = "./data")
# Read the txt file
data <- read.table("./data/household_power_consumption.txt",
sep=";",
header=T,
stringsAsFactors=F)
# paste time and date columns
data$Date<-strptime(paste(data$Date,data$Time,sep=" "),"%d/%m/%Y %H:%M:%S")
# define as numerical the character columns
data$Global_active_power<-as.numeric(data$Global_active_power)
data$Global_reactive_power<-as.numeric(data$Global_reactive_power)
data$Voltage<-as.numeric(data$Voltage)
data$Global_intensity<-as.numeric(data$Global_intensity)
data$Sub_metering_1<-as.numeric(data$Sub_metering_1)
data$Sub_metering_2<-as.numeric(data$Sub_metering_2)
# subset the data from the 2 dates requested
sub_data<-subset(data,Date<"2007-02-03")
sub_data<-subset(sub_data,Date>="2007-02-01")
# remove the "data" object from working environment
rm(data)
### Create plot4
# set local system to have english weekday names (mine is in french)
Sys.setlocale("LC_TIME", "English")
png("plot4.png")
# Setup multiple plots in a 2x2 grid
par(mfrow=c(2,2))
plot(sub_data$Date,
sub_data$Global_active_power,
xlab="",
ylab="Global Active Power",
type = "l")
plot(sub_data$Date,
sub_data$Voltage,
xlab="datetime",
ylab="Voltage",
type = "l")
## Get the vertical limits for the third plot
ymax <- max(max(sub_data$Sub_metering_1),
max(sub_data$Sub_metering_2),
max(sub_data$Sub_metering_3))
ymin <- min(min(sub_data$Sub_metering_1),
min(sub_data$Sub_metering_2),
min(sub_data$Sub_metering_3))
plot(sub_data$Date,
sub_data$Sub_metering_1,
xlab="",
ylab="Energy sub metering",
type = "l",
ylim = c(ymin,ymax))
points(sub_data$Date,
sub_data$Sub_metering_2,
col = "red",
type = "l")
points(sub_data$Date,
sub_data$Sub_metering_3,
col = "blue",
type = "l")
legend(par("usr")[2],
par("usr")[4],
yjust=1,
xjust=1,
c(colnames(sub_data)[7:9]),
lwd=1,
lty=1,
col=c('black','red', 'blue'),
bty="n")
plot(sub_data$Date,
sub_data$Global_reactive_power,
xlab="datetime",
ylab="Global_reactive_power",
type = "l")
dev.off() |
e805263d4645e91e3ef243d1eb0d5125c3920c27 | 148a64186852c34c4d19d212f127fbe525f0e691 | /R/appendixA.R | bfb0f1b4cf2f0a93e8fc9934f6ccc516c04fad39 | [] | no_license | ppernot/2022_Tightness | ba34a093e0940569f56fdb2495ce28d769b89b15 | a83586dc700511b45dd42a2b86332db1ea1588c5 | refs/heads/main | 2023-04-12T14:37:32.937410 | 2022-09-08T07:33:56 | 2022-09-08T07:33:56 | 485,421,586 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 8,954 | r | appendixA.R | figDir = '../Figs'
library(ErrViewLib)
gPars = ErrViewLib::setgPars(type = 'publish')
scalePoints = 0.2
set.seed(123)
## Unit variance distributions ####
Normal = function(N)
rnorm(N)
T3 = function(N,df=3)
rt(N, df = 3) / sqrt(3)
Uniform = function(N)
runif(N, -sqrt(3), sqrt(3))
Laplace = function(N, df = 1)
normalp::rnormp(N, p = df) / sqrt(df^(2/df)*gamma(3/df)/gamma(1/df))
ftab = c('Uniform','Normal','Laplace','T3')
# Oracle = function(N) # Scaled-shifted Bernoulli
# 2*rbinom(N,size=1,prob=0.5) - 1
# Beta = function(N,p=1e-2)
# 2*rbeta(N,shape1=p,shape2=p) -1
nMC = 10^5
N = 5
uTrue = 1/sqrt(N)
resu = resm = rest = resz = list()
for (k in seq_along(ftab)) {
fun = get(ftab[k])
resu[[ftab[k]]] = resm[[ftab[k]]] = rest[[ftab[k]]] = resz[[ftab[k]]] = rep(0,nMC)
for(j in 1:nMC) {
S = fun(N) # Random sample
mu = mean(S)
umu = sd(S)/sqrt(N)
resm[[ftab[k]]][j] = mu
resu[[ftab[k]]][j] = umu
rest[[ftab[k]]][j] = mu/umu
resz[[ftab[k]]][j] = mu/uTrue
}
}
# Fig_A01 ####
png(file = paste0(figDir,'/Fig_A01.png'),
width = 2*gPars$reso, height = 2*gPars$reso)
par(mfrow = c(2,2),
mar = c(3,3,2,1),
tcl = gPars$tcl,
mgp = gPars$mgp,
pty = gPars$pty,
lwd = 2*gPars$lwd,
cex = gPars$cex)
for (k in seq_along(ftab)) {
D = density(rest[[ftab[k]]])
D$y = D$y / max(D$y) * dt(0, df = N - 1)
plot(
D$x, D$y,
type = 'l',
main = ftab[k],
xlim = c(-3,4),
xlab = 'Score',
xaxs = 'i',
ylim = c(0, 0.5),
yaxs = 'i',
ylab = 'Density',
col = gPars$cols[2]
)
grid()
curve(
dt(x, df = N - 1),
from = -4,
to = 4,
n = 1000,
add = TRUE,
lty = 2,
col = gPars$cols[2]
)
D = density(resz[[ftab[k]]])
D$y = D$y / max(D$y) * dnorm(0)
lines(D$x, D$y,
col = gPars$cols[5])
curve(
dnorm(x),
from = -4,
to = 4,
n = 1000,
add = TRUE,
lty = 2,
col = gPars$cols[5]
)
legend(
c(-3,0.45), bty = 'n', cex = 1, xjust = 0,
title = paste0(
'Var(T)=',signif(var(rest[[ftab[k]]]),2),'\n',
'Var(Z)=',signif(var(resz[[ftab[k]]]),2)
),
legend =c('t-score','z-score'),
col = gPars$cols[c(2,5)],
lwd = 2*gPars$lwd,
pch = NA
)
box()
}
dev.off()
# Fig_A03 ####
png(file = paste0(figDir,'/Fig_A03a.png'),
width = gPars$reso, height = gPars$reso)
sel = sample.int(nMC,size = 1000)
X = resu[['Normal']][sel]
Y = resm[['Normal']][sel]
ErrViewLib::plotEvsPU(
X , Y ,
runQuant = TRUE,
# cumMAE = TRUE,
scalePoints = scalePoints,
label = 1,
gPars = gPars
)
dev.off()
png(file = paste0(figDir,'/Fig_A03b.png'),
width = gPars$reso, height = gPars$reso)
uE = resu[['Normal']]
E = resm[['Normal']]
ErrViewLib::plotConfidence(
E, uE,
legend = 'Noisy data',
oracle = FALSE,
probref = TRUE,
conf_probref = TRUE,
label = 2, ylim = c(0,1.1),
gPars = gPars
)
dev.off()
png(file = paste0(figDir,'/Fig_A03c.png'),
width = gPars$reso, height = gPars$reso)
uE = resu[['Normal']]
Z = rest[['Normal']]
ErrViewLib::plotLZV(
uE, Z,
method = 'cho',
xlab = 'Prediction uncertainty, uE',
varZ = (N-1)/(N-3),
label = 3,
gPars = gPars
)
dev.off()
png(file = paste0(figDir,'/Fig_A03d.png'),
width = gPars$reso, height = gPars$reso)
uE = resu[['Normal']]
Z = rest[['Normal']]
ErrViewLib::plotLZV(
1:length(Z), Z,
method = 'cho',
xlab = 'Point index',
nBin = 10,
label = 3,
gPars = gPars
)
abline(h=(N-1)/(N-3),lwd = gPars$lwd, col=2, lty=2)
dev.off()
png(file = paste0(figDir,'/Fig_A03e.png'),
width = gPars$reso, height = gPars$reso)
uE = resu[['Normal']]
E = resm[['Normal']]
ErrViewLib::plotRelDiag(
uE, E,
nBin = 10,
nBoot = 1000,
BSmethod = 'perc',
label = 4,
gPars = gPars
)
dev.off()
# Convergence of Var(T) ####
## Distributions
Normal = function(N)
rnorm(N)
T3 = function(N, df = 3)
rt(N, df = df)
Uniform = function(N)
runif(N, -1, 1)
Exp1 = function(N, df = 1)
normalp::rnormp(N, p = df)
Oracle = function(N)
# Scaled-shifted Bernoulli
2 * rbinom(N, size = 1, prob = 0.5) - 1
Beta = function(N, p = 0.5)
2 * rbeta(N, shape1 = p, shape2 = p) - 1
Exp4 = function(N, df = 4)
normalp::rnormp(N, p = df)
nMC = 10^5
ftab = c('Beta','Uniform','Exp4','Normal','Exp1','T3')
nSeq= c(5:14,seq(15,30,by=5))
resuVarT = resuMeanT = list()
for (k in seq_along(ftab)) {
fun = get(ftab[k])
resuVarT[[ftab[k]]] = rep(0,length(nSeq))
resuMeanT[[ftab[k]]] = rep(0,length(nSeq))
for(i in seq_along(nSeq)) {
N = nSeq[i]
mu = umu = rep(0,nMC)
for(j in 1:nMC) {
S = fun(N) # Random sample
mu[j] = mean(S)
umu[j] = sd(S)/sqrt(N)
}
sel = umu != 0
t = mu[sel]/umu[sel]
resuMeanT[[ftab[k]]][i] = mean(t)
resuVarT[[ftab[k]]][i] = var(t)
}
}
for (k in seq_along(ftab)) {
print(c(resuMeanT[[ftab[k]]][1],resuVarT[[ftab[k]]][1]))
}
# Fig_A02 ####
ftabp = c('Uniform','Exp4','Normal','Exp1','T3')
png(file = paste0(figDir,'/Fig_A02.png'),
width = gPars$reso, height = gPars$reso)
par(mfrow = c(1,1),
mar = c(3,3,2,1),
tcl = gPars$tcl,
mgp = gPars$mgp,
pty = gPars$pty,
lwd = 2*gPars$lwd,
cex = gPars$cex)
for (k in seq_along(ftabp)) {
if(k==1) {
plot(
nSeq,resuVarT[[ftabp[k]]],
type = 'l', log = 'x',
xlab = 'Sample size, n',
ylab = 'Var(T)',
ylim = c(0.95,3),
col=gPars$cols[k])
grid(lwd=2)
} else {
lines(
nSeq,resuVarT[[ftabp[k]]],
col=gPars$cols[k])
}
}
law = (nSeq-1)/(nSeq-3)
icol = which(ftabp=='Normal')
points(nSeq,law,
pch=19, col = gPars$cols[icol])
abline(h=1, lty =2)
box()
legend(
'topright', bty = 'n',
legend = ftabp,
col = gPars$cols,
lty = 1,
pch = NA
)
dev.off()
# Heteroscedastic case ####
set.seed(123)
nMC = 10^4
N = 5
resvH = reseH = resuH = resmH = ressH = restH = reszH = rep(0,nMC)
for(i in 1:nMC) {
V = runif(1,-2,2)
uE = 0.01*(1 + V^2)
E = rnorm(1, 0, uE)
S = rnorm(N, 0, uE)
mu = mean(S)
umu = sd(S)
reseH[i] = E
resvH[i] = V
resmH[i] = mu
resuH[i] = umu / sqrt(N)
ressH[i] = uE
restH[i] = mu / (umu / sqrt(N))
reszH[i] = V / uE
}
# Fig_A04 ####
sel = sample.int(nMC,size = 1000)
png(file = paste0(figDir,'/Fig_A04a.png'),
width = gPars$reso, height = gPars$reso)
X = resuH[sel]
Y = resmH[sel]
ErrViewLib::plotEvsPU(
X , Y ,
xlim = c(0,0.04),
runQuant = TRUE,
# cumMAE = TRUE,
scalePoints = scalePoints,
label = 1,
# xlim = c(0,3),
title = 'n = 5',
gPars = gPars
)
dev.off()
png(file = paste0(figDir,'/Fig_A04b.png'),
width = gPars$reso, height = gPars$reso)
ErrViewLib::plotConfidence(
resmH, resuH,
legend = 'Noisy data',
oracle = FALSE,
probref = TRUE,
conf_probref = TRUE,
label = 2,
gPars = gPars
)
dev.off()
png(file = paste0(figDir,'/Fig_A04c.png'),
width = gPars$reso, height = gPars$reso)
ErrViewLib::plotLZV(
resuH, restH,
method = 'cho',
xlab = 'Prediction uncertainty, uE',
nBin = 10,
slide = FALSE,
ylim = c(0,4),
varZ =(N-1)/(N-3),
label = 3,
gPars = gPars
)
dev.off()
png(file = paste0(figDir,'/Fig_A04d.png'),
width = gPars$reso, height = gPars$reso)
ErrViewLib::plotLZV(
resvH, restH,
method = 'cho',
nBin = 10,
slide = FALSE,
ylim = c(0,4),
xlab = 'Predicted value, V',
varZ =(N-1)/(N-3),
label = 4,
gPars = gPars
)
dev.off()
set.seed(123)
nMC = 10^4
N = 10
resvH = reseH = resuH = resmH = ressH = restH = reszH = rep(0,nMC)
for(i in 1:nMC) {
V = runif(1,-2,2)
uE = 0.01*(1 + V^2)
E = rnorm(1, 0, uE)
S = rnorm(N, 0, uE)
mu = mean(S)
umu = sd(S)
reseH[i] = E
resvH[i] = V
resmH[i] = mu
resuH[i] = umu / sqrt(N)
ressH[i] = uE
restH[i] = mu / (umu / sqrt(N))
reszH[i] = V / uE
}
# Fig_A05 ####
sel = sample.int(nMC,size = 1000)
png(file = paste0(figDir,'/Fig_A05a.png'),
width = gPars$reso, height = gPars$reso)
X = resuH[sel]
Y = resmH[sel]
ErrViewLib::plotEvsPU(
X , Y ,
xlim = c(0,0.03),
runQuant = TRUE,
# cumMAE = TRUE,
scalePoints = scalePoints,
label = 1,
# xlim = c(0,3),
title = 'n = 10',
gPars = gPars
)
dev.off()
png(file = paste0(figDir,'/Fig_A05b.png'),
width = gPars$reso, height = gPars$reso)
ErrViewLib::plotConfidence(
resmH, resuH,
legend = 'Noisy data',
oracle = FALSE,
probref = TRUE,
conf_probref = TRUE,
label = 2,
gPars = gPars
)
dev.off()
png(file = paste0(figDir,'/Fig_A05c.png'),
width = gPars$reso, height = gPars$reso)
ErrViewLib::plotLZV(
resuH, restH,
method = 'cho',
xlab = 'Prediction uncertainty, uE',
nBin = 10,
slide = FALSE,
ylim = c(0,4),
varZ =(N-1)/(N-3),
label = 3,
gPars = gPars
)
dev.off()
png(file = paste0(figDir,'/Fig_A05d.png'),
width = gPars$reso, height = gPars$reso)
ErrViewLib::plotLZV(
resvH, restH,
method = 'cho',
nBin = 10,
slide = FALSE,
xlab = 'Predicted value, V',
ylim = c(0,4),
varZ =(N-1)/(N-3),
label = 4,
gPars = gPars
)
dev.off()
|
1f73709a3453d171da600ac03ad7e74f59c79d31 | 0f532d15ddbcaa1f9e6ae7c6e72041348bde9807 | /devScripts/read_YujieHe2016.R | 45069f6b44f517a12f128a322586244a89b4fe6c | [] | no_license | xiajz/ISRaD | a4567e95d23bfea31a1ff032c700331f618ba501 | 203078df68da22d5d091cb82204f16047fc98e03 | refs/heads/master | 2020-12-08T15:01:46.769054 | 2019-12-21T06:45:51 | 2019-12-21T06:45:51 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,319 | r | read_YujieHe2016.R | #' Read He 2016
#'
#' Read in the data from Yujie He's 2016 Science paper as a raw csv file
#'
#' @param Yujie_file The raw csv data
#'
#' @return ISRaD compliant file structure with only columns that overlap with original data
#'
#' @importFrom rcrossref cr_citation
read_YujieHe2016 <- function(Yujie_file = NULL){
requireNamespace('tidyverse')
if(is.null(Yuijie_file)){
Yuijie_file<- "~/Dropbox/ISRaD_data/Compilations/Yujie/raw/Yujie_dataset2.csv"
}
Yujie_dataset <- utils::read.csv(Yujie_file, na.strings = c("","NA"),
stringsAsFactors = FALSE, colClasses='character') %>%
#replace NA pc_dataset_name with 'no_ref'
dplyr::mutate(pc_dataset_name = as.factor(if_else(is.na(.data$pc_dataset_name),
"no_ref", gsub('\\s+', '_',
as.character(.data$pc_dataset_name))))) %>%
#remove sites without longitude specified
dplyr::group_by(.data$Site) %>% #some sites have multiple lat-lon, rename them
dplyr::mutate(site_name = ifelse(length(.data$Site) == 1,
as.character(.data$Site), sprintf('%s:%s,%s', as.character(.data$Site), .data$Lat, .data$Lon))) %>%
#create profile names from the site name and profile ID
dplyr::mutate(profile_name = paste(.data$site_name, .data$ProfileID, sep="_")) %>%
#create layer names from profile name and top-bottom depths
dplyr::mutate(layer_name = paste(.data$profile_name, .data$Layer_top, .data$Layer_bottom, sep="_")) %>%
ungroup() %>%
mutate(pro_veg_note=paste(.data$VegTypeCodeStr_Local, .data$VegLocal,
.data$VegType_Species, sep=";")) %>%
rename(entry_name=.data$pc_dataset_name,
site_lat=.data$Lat,
site_long=.data$Lon,
site_elevation=.data$Elevation,
pro_name=.data$profile_name,
pro_MAT=.data$MAT_original,
pro_MAP=.data$MAP_original,
pro_soil_age=.data$Soil_Age,
pro_soil_taxon=.data$SoilOrder_LEN_USDA_original,
pro_parent_material_notes=.data$ParentMaterial,
pro_slope=.data$Slope,
pro_slope_shape=.data$SlopePosition,
pro_aspect=.data$Aspect,
pro_land_cover=.data$VegTypeCodeStr_Local,
lyr_name=.data$layer_name,
lyr_obs_date_y=.data$SampleYear,
lyr_top=.data$Layer_top_norm,
lyr_bot=.data$Layer_bottom_norm,
lyr_hzn=.data$HorizonDesignation,
lyr_rc_year=.data$Measurement_Year,
lyr_13c=.data$d13C,
lyr_14c=.data$D14C_BulkLayer,
lyr_14c_sigma=.data$D14C_err,
lyr_fraction_modern=.data$FractionModern,
lyr_fraction_modern_sigma=.data$FractionModern_sigma,
lyr_bd_samp=.data$BulkDensity_original,
lyr_bet_surface_area=.data$SpecificSurfaceArea,
lyr_ph_h2o=.data$PH_H2O,
lyr_c_tot=.data$pct_C_original,
lyr_n_tot=.data$pct_N,
lyr_c_to_n=.data$CN,
lyr_sand_tot_psa=.data$sand_pct,
lyr_silt_tot_psa=.data$silt_pct,
lyr_clay_tot_psa=.data$clay_pct,
lyr_cat_exch=.data$cation_exch,
lyr_fe_dith=.data$Fe_d,
lyr_fe_ox=.data$Fe_o,
lyr_fe_py=.data$Fep,
lyr_al_py=.data$Alp,
lyr_al_dith=.data$Ald,
lyr_al_ox=.data$Alo,
lyr_smect_vermic=.data$Smectite)
#scrub non ascii chacaters
#ans <- lapply(ans, function(x) stringi::stri_trans_general(as.character(x), "latin-ascii"))
ans <- list(metadata=Yujie_dataset %>%
select(.data$entry_name, .data$doi) %>% unique() %>%
group_by(.data$entry_name) %>%
mutate(curator_name="Yujie He",
curator_organization = "ISRaD",
curator_email = "info.israd@gmail.com",
modification_date_d = format(as.Date(Sys.Date(),format="%Y-%m-%d"), "%d"),
modification_date_m = format(as.Date(Sys.Date(),format="%Y-%m-%d"), "%m"),
modification_date_y = format(as.Date(Sys.Date(),format="%Y-%m-%d"), "%Y"),
contact_name = "Yujie He",
contact_email = "yujiehe.pu@gmail.com",
compilation_doi = "10.1126/science.aad4273"),
site=Yujie_dataset %>%
select(.data$entry_name, starts_with('site_')) %>% unique(),
profile=Yujie_dataset %>%
select(.data$entry_name, .data$site_name, starts_with('pro_')) %>% unique() %>%
mutate(pro_treatment = "control",
pro_soil_taxon_sys = "USDA"),
layer=Yujie_dataset %>%
select(.data$entry_name, .data$site_name, .data$pro_name, starts_with('lyr_')) %>% unique())
##Fill in bib with doi citations from rcrossref
temp <- rcrossref::cr_cn(ans$metadata$doi, format='text', raw=TRUE)
ans$metadata$bibliographical_reference <- unlist(lapply(temp, function(x){
return(dplyr::if_else(is.null(x), 'NA', x))
}))
##drop 'modern' notation from faction modern
#ans$layer$lyr_fraction_modern <- as.numeric(ans$layer$lyr_fraction_modern)
##convert the land cover vocab
land_cover <- openxlsx::read.xlsx(
"~/Dropbox/ISRaD_data/Compilations/Yujie/info/vegetation_class_code.xlsx")
ans$profile$pro_land_cover <- stats::setNames(land_cover$Controlled,
land_cover$VegTypeCodeStr_Local)[ans$profile$pro_land_cover]
## pull in the template
utils::download.file(url='https://github.com/International-Soil-Radiocarbon-Database/ISRaD/raw/master/inst/extdata/ISRaD_Master_Template.xlsx',
destfile="~/Dropbox/ISRaD_data/Compilations/Yujie/ISRaD_Master_Template.xlsx")
template <- lapply(list( metadata = 'metadata', site='site', profile='profile',
flux="flux", layer="layer", interstitial="interstitial",
fraction="fraction", incubation="incubation",
`controlled vocabulary`="controlled vocabulary"),
function(x){openxlsx::read.xlsx(
"~/Dropbox/ISRaD_data/Compilations/Yujie/ISRaD_Master_Template.xlsx",
sheet=x) %>% mutate_all(as.character)})
#Deal with template versions nicely
template_version <- 0
if('template_version' %in% names(template$metadata)){
template_version <- template$metadata$template_version[3]
template$metadata <- template$metadata[1:2,]
}
ans$metadata$template_version <- template_version
##pull the studies appart for curation
#currentEntry <- ans$metadata$entry_name[1]
for(currentEntry in as.character(ans$metadata$entry_name)){
sliceEntry <- template
for(mySheet in names(ans)){
sliceEntry[[mySheet]] <- template[[mySheet]] %>%
bind_rows(
ans[[mySheet]] %>%
filter(.data$entry_name == currentEntry) %>%
mutate_all(as.character))
}
openxlsx::write.xlsx(sliceEntry, file =
file.path("~/Dropbox/ISRaD_data/Compilations/Yujie/read_YujiHe2016_out",
paste0(currentEntry, ".xlsx")))
}
return(ans)
}
|
34671796215c01819e2a4e1a0b9054b0bc3e18cb | e2ccff462a561b4de986c65897106600848fec89 | /plot3.R | a8371b4dca5825ab214a60cae6125b7f7027faa0 | [] | no_license | daviddamen/ExData_Plotting1 | 4f0bddab2c5be4bc4ea9c64328db9abddc34089b | d7f151a91af337dbd1db626f962cad0ca559c214 | refs/heads/master | 2021-01-09T20:55:22.547226 | 2014-06-07T16:38:49 | 2014-06-07T16:38:49 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,160 | r | plot3.R | #
# Read input file.
# We are only interested in data from the dates 2007-02-01 and 2007-02-02, hence data outside
# this range is skipped.
#
columns <- c("Date","Time","Global_active_power","Global_reactive_power","Voltage","Global_intensity","Sub_metering_1","Sub_metering_2","Sub_metering_3")
data <- read.csv2("household_power_consumption.txt", skip=66636, nrow=2880, col.names=columns, stringsAsFactors=FALSE)
#
# Preprocess data to get in the right formats
#
data$Timestamp <- strptime(paste(data$Date, data$Time), format="%d/%m/%Y %H:%M:%S")
data$Sub_metering_1 <- as.double(data$Sub_metering_1)
data$Sub_metering_2 <- as.double(data$Sub_metering_2)
data$Sub_metering_3 <- as.double(data$Sub_metering_3)
#
# Create plot:
# Sub_metering over time
#
png(filename="plot3.png", width=500, height=500, pointsize=12)
with(data, plot(Timestamp, Sub_metering_1, type="l", xlab="", ylab="Energy sub metering"))
with(data, lines(Timestamp, Sub_metering_2, col="red"))
with(data, lines(Timestamp, Sub_metering_3, col="blue"))
legend("topright", legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), col=c("black", "red", "blue"), lty=1)
dev.off()
|
a9a3cb5325b853681a276455675a20e552c40768 | c1ac0e0d0ba41f7abf1c074b21b3fca225364509 | /plot3.R | b705f70dcce9277ca2f9cd5440cb7ac5a5662639 | [] | no_license | TPopo/ExData_Plotting1 | 5b8ec76989a46d8e3f72d452ffac551538d14e87 | c56f3c68e8247cca72de716df8dd08a18b84a4a0 | refs/heads/master | 2021-05-02T07:26:54.469844 | 2018-02-12T18:53:02 | 2018-02-12T18:53:02 | 120,828,366 | 0 | 0 | null | 2018-02-08T22:57:46 | 2018-02-08T22:57:45 | null | UTF-8 | R | false | false | 1,381 | r | plot3.R | ## set working directory with data
setwd("C:/Users/TonyP/Desktop/Coursera/elecpwrcons")
## read in data using read.table
data <- read.table("household_power_consumption.txt", header = TRUE, sep = ";",
colClasses = c("character","character", rep("numeric",7)),
na.strings = "?")
## subset the data because we are only interested in two dates, also
## create POSIX date/time
data.subset <- data[data$Date %in% c("1/2/2007", "2/2/2007"),]
data.subset$Date <- as.Date(data.subset$Date, format = "%d/%m/%Y")
data.subset$DateTime <- as.POSIXct(paste(data.subset$Date, data.subset$Time))
## Create 3 vectors to graph separately
subMetering1 <- as.numeric(data.subset$Sub_metering_1)
subMetering2 <- as.numeric(data.subset$Sub_metering_2)
subMetering3 <- as.numeric(data.subset$Sub_metering_3)
## create vector to plot, open device, plot to device, and turn device off
globalActivePower <- as.numeric(data.subset$Global_active_power)
png("plot3.png", width=480, height=480)
plot(data.subset$DateTime, subMetering1, type="l", ylab="Energy Submetering", xlab="")
lines(data.subset$DateTime, subMetering2, type="l", col="red")
lines(data.subset$DateTime, subMetering3, type="l", col="blue")
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=1, lwd=2.5, col=c("black", "red", "blue"))
dev.off() |
f645392e611c0f52fed59af1cdea50f4aa6e1617 | 412ba9126fc2145813ab7237cda0c5c6b1b89207 | /man/trt_paneldata.Rd | 4b3fe605749a92bbf5db7b87d234170b86dd670f | [] | no_license | PatrickPfeifferDSc/bite | a739b10904f56e9a2d5ff21c5437edca6b2b24b2 | 3e3f21c10386ad81746a1bd05c52dbd9387fd75b | refs/heads/master | 2020-03-08T03:26:52.619261 | 2019-08-21T21:04:28 | 2019-08-21T21:04:28 | 127,891,241 | 0 | 3 | null | null | null | null | UTF-8 | R | false | true | 1,068 | rd | trt_paneldata.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/trt_paneldata.R
\docType{data}
\name{trt_paneldata}
\alias{trt_paneldata}
\title{Example dataset containing the panel information}
\format{A data frame with subject related feature columns:
\describe{
\item{ID}{ID of subject/item}
\item{panelT}{timepoint t of meassured features and dependent variable}
\item{y}{dependent variable, outcome}
\item{V1}{variable 1, equivalent to V1 in baseline}
\item{V2}{variable 2, equivalent to V2 in baseline}
\item{t2}{dummy for panel time = 2}
\item{t3}{dummy for panel time = 3}
\item{t4}{dummy for panel time = 4}
}}
\source{
This dataset stems from a simulation process and represents fictive data.
}
\usage{
trt_paneldata
}
\description{
A simulated dataset (from the framework of the shared factor model) to
demonstrate the use of the SR model in package 'bite'. The file contains
5000 subjects, each of which has 4 panel observations on simulated variables.
The second of 2 argument datasets for\code{\link{bayesTrtEffects}}.
}
\keyword{datasets}
|
f431b17adfc1b48128ddd945042a57b1cb64c5bb | 0877d83cdf78f6e3bb122c7d2c031791684506d3 | /man/pct_acid_tol.Rd | cabcaca5760e1acaaa966e8af6654babc72c6e24 | [] | no_license | BWAM/BAP | fec1dbe4475f3869f8007894e9ad9a5581cb1277 | 9dd041516b2f4c8a2269516c57d7ade41746d7e9 | refs/heads/master | 2023-04-30T00:25:15.586434 | 2023-04-26T16:17:49 | 2023-04-26T16:17:49 | 180,187,817 | 0 | 1 | null | 2023-04-17T16:54:43 | 2019-04-08T16:18:52 | R | UTF-8 | R | false | true | 364 | rd | pct_acid_tol.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/acid_tol_index.R
\name{pct_acid_tol}
\alias{pct_acid_tol}
\title{Acid Tolerance Index (ATI)}
\usage{
pct_acid_tol(Genus)
}
\arguments{
\item{Genus}{= Genus level taxa count data.}
}
\value{
The percentage of Acid Tolerant Individuals (ATI).
}
\description{
Acid Tolerance Index (ATI)
}
|
ffb5088c2fbb0a8563d146cfdda3d568d3f780ab | a546edb72260612a371847728a903f704cd15918 | /man/topmiRNA_toptarget.Rd | 231db29435c37ed6b3e1da38f0ffd24674a6a00a | [
"MIT"
] | permissive | wizbionet/wizbionet | adcf0366d002892a67209357a6802cd6a179348c | b5fe22074d770df36b3afc47805cf899c69a7bfa | refs/heads/master | 2022-12-08T07:18:00.668772 | 2020-09-02T21:20:01 | 2020-09-02T21:20:01 | 292,099,931 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,180 | rd | topmiRNA_toptarget.Rd | \name{topmiRNA_toptarget}
\alias{topmiRNA_toptarget}
\alias{topmiRNA_toptarget}
\alias{wizbionet::topmiRNA_toptarget}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Prioritizes microRNA-target interactions from the the multiMiR Package
}
\description{
This function retrieves miRNA-target interactions and and identify miRNAs and genes with highest number of analyzed interactors.
}
\usage{
topmiRNA_toptarget(DEmir_up,DEgenes_down,DEmir_down,
DEgenes_up, multimir_args, mirna_type)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{DEmir_up}{
vector with up-regulated miRNAs. To see ID examples see help(get_multimir) and field mirna= .
If you don't want to define miRNA put NULL than multimir will analyze all possible pairs
}
\item{DEgenes_down}{
vector with down-regulated genes. To see ID examples examples see help(get_multimir) and field target= .
If you don't want to define targets put NULL than multimir will analyze all possible pairs
}
\item{DEmir_down}{
vector with down-regulated miRNAs
}
\item{DEgenes_up}{
vector with up-regulated genes
}
\item{mirna_type}{
"mature_mir" or "pre_mir" mirna_type will be used for deduplication and data aggregation
}
\item{multimir_args}{
#This parameter is a component of the multiMiR::get_multimir function .
You can see description using command:
help(get_multimir)
#You can modify all components using
multimir_args<- as.list(args(multiMiR::get_multimir))
#Important: Don't add mirna= and target= fields they are already included as DEmir_up,DEgenes_down,DEmir_down, DEgenes_up!
}
}
\value{
This function generates a list with three data frames
1) multimir_output - is a data frame with results from get_multimir function.
It provides information about up and down-regulation of the pairs miRNA-targets.
It can by used for constructing interaction network in the cytoscape
2) top_miR - is a data frame with aggregated and prioritized results from get_multimir function showing number of genes associated with pre miRNAs
It has columns with name clus_... providing logical information if gene was in top 2 clusters (cl1 and cl2)
~top 20 percents and column clusNR_... providing information in which cluster the gene was present (cl1,cl2,cl3,cl4).
2) top_gene - is a data frame with aggregated and prioritized results from get_multimir function showing number of genes associated with analyzed targets. It also has columns with information if gene if in top cluster
}
\references{
Ru Y, Kechris KJ, Tabakoff B, Hoffman P, Radcliffe RA, Bowler R, Mahaffey S, Rossi S, Calin GA, Bemis L, Theodorescu D (2014). “The multiMiR R package and database: integration of microRNA–target interactions along with their disease and drug associations.” Nucleic Acids Research, 42(17), e133. doi: 10.1093/nar/gku631, http://dx.doi.org/10.1093/nar/gku631.
Ru Y, Mulvahill M, Mahaffey S, Kechris K. multiMiR: Integration of multiple microRNA-target databases with their disease and drug associations. https://github.com/KechrisLab/multiMiR.
}
\author{
Zofia Wicik
zofiawicik@gmail.com
}
\examples{
#Example###
#set parameters
DEmir_up<-c('hsa-miR-150-5p','hsa-miR-448-5p','hsa-miR-448-3p',
'hsa-miR-493-5p','hsa-miR-493-3p') # example DE miRNAs
DEgenes_down<-c('5797','8826','7994','2775','7182','79647','5733',
'158158','9480','8626','50636') # example DE genes
DEmir_down<-c('hsa-miR-4731-5p','hsa-miR-541-3p','hsa-miR-449b-5p','hsa-miR-541-5p')
DEgenes_up<-c('203859','4745','4916','126298','2258','8464','55917','23450','29767')
mirna_type<-"pre_mir" # "mature_mir"
multimir_args= list(url = NULL,
org = "hsa",
table = "all",
predicted.cutoff = 10,
predicted.cutoff.type = "p",
predicted.site = "conserved"
)
#execute function
output<- wizbionet::topmiRNA_toptarget(DEmir_up,DEgenes_down,
DEmir_down, DEgenes_up, multimir_args,mirna_type)
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ miRNA}% use one of RShowDoc("KEYWORDS")
\keyword{ predictions }% __ONLY ONE__ keyword per line
|
28ad61391d95c293be534774c06f76aa985c0ec4 | 2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0 | /fuzzedpackages/OpenMx/man/mxComputeReportDeriv.Rd | 6e38f4ca3b071ee608818a0fcf16930043a3f4f7 | [] | no_license | akhikolla/testpackages | 62ccaeed866e2194652b65e7360987b3b20df7e7 | 01259c3543febc89955ea5b79f3a08d3afe57e95 | refs/heads/master | 2023-02-18T03:50:28.288006 | 2021-01-18T13:23:32 | 2021-01-18T13:23:32 | 329,981,898 | 7 | 1 | null | null | null | null | UTF-8 | R | false | true | 405 | rd | mxComputeReportDeriv.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/MxCompute.R
\name{mxComputeReportDeriv}
\alias{mxComputeReportDeriv}
\alias{MxComputeReportDeriv-class}
\title{Report derivatives}
\usage{
mxComputeReportDeriv(freeSet = NA_character_)
}
\arguments{
\item{freeSet}{names of matrices containing free variables}
}
\description{
Copy the internal gradient and Hessian back to R.
}
|
d0df9cb98cf115b9dd33ee27c44660cc8a708cc8 | b332ef10b161db840062a087e917bdc549f8e768 | /utils_regex.R | d3919d83dbdf9aa84f982f5ccbc73ae2be4e0039 | [] | no_license | sdobbins/utils | b5551ad467874bce405fb1f598d31f9ca092a268 | b41322c1869f770e02ca247b7298aabc123b7f01 | refs/heads/master | 2021-05-05T23:49:13.447778 | 2018-02-20T00:44:21 | 2018-02-20T00:44:21 | 116,899,926 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 13,411 | r | utils_regex.R | # @author Scott Dobbins
# @version 0.9.9.6
# @date 2017-11-19 01:00
### Constants ---------------------------------------------------------------
regex_metacharacters <- c("$", "(", ")", "*", "+", ".", "?", "[", "\\", "]", "^", "{", "|", "}")
regex_metacharacters_escaped <- paste0("\\", regex_metacharacters)
regex_metacharacters_set <- "[]|{}$()*+.?\\[^]"
regex_metacharacters_set_captured <- paste0("(", regex_metacharacters_set, ")")
### Regex -------------------------------------------------------------------
rem <- function(strings, pattern, exact = FALSE) {
return (sub(pattern = pattern, replacement = "", x = strings, fixed = exact))
}
grem <- function(strings, pattern, exact = FALSE) {
return (gsub(pattern = pattern, replacement = "", x = strings, fixed = exact))
}
grems <- function(strings, patterns, exacts = FALSE) {
exacts <- recycle_arguments(exacts, length(patterns))
for (i in seq_along(patterns)) {
strings <- gsub(pattern = patterns[[i]], replacement = "", x = strings, fixed = exacts[[i]])
}
return (strings)
}
subs <- function(strings, changes, exacts = FALSE) {
changes_names <- get_names(changes)
exacts <- recycle_arguments(exacts, length(changes))
for (i in seq_along(changes)) {
strings <- sub(pattern = changes[[i]], replacement = changes_names[[i]], x = strings, fixed = exacts[[i]])
}
return (strings)
}
gsubs <- function(strings, changes, exacts = FALSE) {
changes_names <- get_names(changes)
exacts <- recycle_arguments(exacts, length(changes))
for (i in seq_along(changes)) {
strings <- gsub(pattern = changes[[i]], replacement = changes_names[[i]], x = strings, fixed = exacts[[i]])
}
return (strings)
}
greps <- function(strings, patterns, exacts = FALSE) {
exacts <- recycle_arguments(exacts, length(patterns))
results <- list()
for (i in seq_along(patterns)) {
results[[patterns[[i]]]] <- grep(strings, pattern = patterns[[i]], fixed = exacts[[i]])
}
return (results)
}
grepls <- function(strings, patterns, exacts = FALSE) {
exacts <- recycle_arguments(exacts, length(patterns))
results <- list()
for (i in seq_along(patterns)) {
results[[patterns[[i]]]] <- grepl(strings, pattern = patterns[[i]], fixed = exacts[[i]])
}
return (results)
}
regexprs <- function(strings, patterns, exacts = FALSE) {
exacts <- recycle_arguments(exacts, length(patterns))
results <- list()
for (i in seq_along(patterns)) {
results[[patterns[[i]]]] <- regexpr(strings, pattern = patterns[[i]], fixed = exacts[[i]])
}
return (results)
}
gregexprs <- function(strings, patterns, exacts = FALSE) {
exacts <- recycle_arguments(exacts, length(patterns))
results <- list()
for (i in seq_along(patterns)) {
results[[patterns[[i]]]] <- gregexpr(strings, pattern = patterns[[i]], fixed = exacts[[i]])
}
return (results)
}
begins_with <- function(strings, pattern) {
return (grepl(x = strings, pattern = beginning_with(pattern)))
}
begins_with_word <- function(strings, pattern) {
return (grepl(x = strings, pattern = beginning_with_word(pattern)))
}
ends_with <- function(strings, pattern) {
return (grepl(x = strings, pattern = ending_with(pattern)))
}
ends_with_word <- function(strings, pattern) {
return (grepl(x = strings, pattern = ending_with_word(pattern)))
}
### Regex Helpers -----------------------------------------------------------
### Simple Repeats
possible <- function(strings) {
return (paste0(non_capturing_group(strings), "?"))
}
multiple <- function(strings) {
return (paste0(non_capturing_group(strings), "+"))
}
some <- function(strings) {
return (paste0(non_capturing_group(strings), "*"))
}
### Complex Repeats
as_many_of <- function(strings, spacing = " ") {
return (paste0(non_capturing_group(paste0(strings, spacing, "?")), "*"))
}
n_or_fewer <- function(strings, n) {
return (paste0(non_capturing_group(strings), "{,", n, "}"))
}
n_or_more <- function(strings, n) {
return (paste0(non_capturing_group(strings), "{", n, ",}"))
}
m_to_n <- function(strings, m, n) {
return (paste0(non_capturing_group(strings), "{", m, ",", n, "}"))
}
### Selectors/Modifiers
any_of <- function(strings) {
return (non_capturing_group(paste0(strings, collapse = "|")))
}
literal <- function(strings) {
return (gsub(pattern = regex_metacharacters_set_captured, "\\\\\\1", strings))
}
word <- function(strings) {
return (paste0("\\b", strings, "\\b"))
}
capturing_group <- function(strings) {
return (paste0("(", strings, ")"))
}
non_capturing_group <- function(strings) {
return (paste0("(?:", strings, ")"))
}
selection_group <- function(strings, not = FALSE) {
if (length(not) == 1L) {
if (not) {
invert <- "^"
} else {
invert <- ""
}
return (paste0("[", invert, strings, "]"))
} else {
assert_that(length(strings) == length(not),
msg = "Length of negation condition doesn't match length of captured strings")
return (paste0("[", if_else(not, "^", ""), strings, "]"))
}
}
beginning_with <- function(patterns) {
return (paste0("^", patterns))
}
beginning_with_word <- function(patterns) {
return (paste0("^", patterns, "\\b"))
}
ending_with <- function(patterns) {
return (paste0(patterns, "$"))
}
ending_with_word <- function(patterns) {
return (paste0("\\b", patterns, "$"))
}
### Grabbers
with_preceding <- function(strings, marks = " ", mandatory = FALSE) {
needs_grouping <- nchar(marks) > 1L
marks[needs_grouping] <- non_capturing_group(marks[needs_grouping])
if (mandatory) {
return (paste0(marks, non_capturing_group(strings)))
} else {
return (paste0(marks, "?", non_capturing_group(strings)))
}
}
with_following <- function(strings, marks = " ", mandatory = FALSE) {
needs_grouping <- nchar(marks) > 1L
marks[needs_grouping] <- non_capturing_group(marks[needs_grouping])
if (mandatory) {
return (paste0(non_capturing_group(strings), marks))
} else {
return (paste0(non_capturing_group(strings), marks, "?"))
}
}
and_preceding <- function(strings, precedings = " ", succeedings = " ?", exact = FALSE, greedy = FALSE, stoppers = "") {
if (exact) {
strings <- literal(strings)
}
greed <- if_else(greedy, "", "?")
return (paste0("([^", precedings, stoppers, "]*", precedings, ")*", greed, strings, succeedings))
}
and_after <- function(strings, precedings = "", exact = FALSE, greedy = TRUE, stoppers = "") {
if (exact) {
strings <- literal(strings)
}
if (length(stoppers) == 1L) {
stoppers <- rep(stoppers, length(strings))
}
if (length(greedy) == 1L) {
greedy <- rep(greedy, length(strings))
}
chars <- if_else(stoppers == "", "[\n\r\t -~]*", paste0("[^", stoppers, "]*"))
greed <- if_else(greedy, "", paste0("(?!", chars, strings, ")"))
return (paste0(precedings, strings, greed, chars))
}
and_between <- function(starts, ends, preceding_starts = " ", precedings = " ", succeedings = " ", exact = FALSE, greedy = FALSE, stoppers = "") {
if (exact) {
starts <- literal(starts)
ends <- literal(ends)
}
greed <- if_else(greedy, "", "?")
preceding_starts_chars <- grem(preceding_starts, "[]\\[]")
stop_sets <- paste0("[", preceding_starts_chars, precedings, "]")
not_stop_sets <- paste0("[^", preceding_starts_chars, precedings, "]")
look_aheads <- if_else(greedy, "", paste0("(?!", starts, ")"))
return (paste0(preceding_starts, starts, "(", not_stop_sets, "*", stop_sets, look_aheads, ")*", greed, ends, succeedings))
}
#and_between_containing <- function()#*** not implemented yet
### Formatters -------------------------------------------------------------
### Simple removers
fix_spaces <- function(strings) {
return (gsubs(changes = c(" " = " +",
"^ ",
" $"),
strings))
}
remove_parentheticals <- function(strings) {
return (grem(pattern = " ?\\([^)]*\\)", strings))
}
remove_square_brackets <- function(strings) {
return (grem(pattern = " ?\\[[^]]*\\]", strings))
}
fix_parentheses <- function(strings) {
return (gsub(pattern = "(\\w)\\(", replacement = "\\1 \\(", strings))
}
remove_quotes <- function(strings) {
return (grem(pattern = "\"", strings))
}
remove_nonASCII_chars <- function(strings) {
return (grem(pattern = "[^ -~]+", strings))
}
remove_extra_whitespace <- function(strings) {
return (gsub(pattern = "\\s{2,}", replacement = " ", strings))
}
remove_bad_formatting <- trimws %.% remove_extra_whitespace %.% remove_quotes %.% remove_nonASCII_chars
remove_duplicate <- function(strings, duplicate, exact = FALSE) {
return (gsub(pattern = multiple(duplicate), replacement = duplicate, x = strings, fixed = exact))
}
remove_duplicates <- function(strings, duplicates, exact = FALSE) {
exact <- recycle_arguments(exact, length(duplicates))
for (i in seq_along(duplicates)) {
strings <- gsub(pattern = multiple(duplicates[[i]]), replacement = duplicates[[i]], x = strings, fixed = exact[[i]])
}
return (strings)
}
remove_hanging_punctuation <- function(strings, chars, exact = FALSE, isolated = TRUE) {
if (isolated) {
changes <- c("\\2" = paste0("(^| )", chars, "([A-Za-z]*)"),
"\\1" = paste0("([A-Za-z]*)", chars, "( |$)"),
" " = paste0(" ?", chars, "( |$)"))
} else {
changes <- c("\\2" = paste0("(^| )", chars, "([A-Za-z]*)"),
"\\1" = paste0("([A-Za-z]*)", chars, "( |$)"))
}
return (gsubs(strings = strings, changes = changes, exact = exact))
}
remove_single_letters <- function(strings, ...) {
return (remove_words_with_fewer_than_n_letters(strings = strings, n = 2L, ...))
}
remove_words_with_fewer_than_n_letters <- function(strings, n, with_punctuation = "", only_lower_case = FALSE, only_upper_case = FALSE) {
assert_that(!(only_lower_case && only_upper_case),
msg = "Only lower case and only upper case are mutually exclusive.")
assert_that(n >= 2L,
msg = "There are no words with less than 1 letter.")
if (with_punctuation == "") {
punctuation <- ""
} else {
punctuation <- possible(non_capturing_group(with_punctuation))
}
if (only_lower_case) {
letters_used <- "[a-z]"
} else if (only_upper_case) {
letters_used <- "[A-Z]"
} else {
letters_used <- "[A-Za-z]"
}
basic_pattern <- paste0(punctuation, word(m_to_n(letters_used, 1L, n - 1L)), punctuation)
return (gsub(pattern = paste0(non_capturing_group(paste0(basic_pattern, " ")), "|", non_capturing_group(paste0(possible(" "), basic_pattern))), replacement = "", x = strings))
}
### Specific formatters
format_commas <- function(strings) {
return (gsub(pattern = " *, *", replacement = ", ", x = strings))
}
provide_buffers_around <- function(strings, chars, buffers = " ", exact = FALSE) {
if (length(chars) > 1L) {
exact <- recycle_arguments(exact, length(chars))
buffers <- recycle_arguments(buffer, length(buffers))
for (i in seq_along(chars)) {
strings <- gsub(pattern = non_capturing_group(chars[[i]]), replacement = paste0(buffers[[i]], "\\1", buffers[[i]]), x = strings, fixed = exact[[i]])
}
return (strings)
} else {
return (gsub(pattern = non_capturing_group(chars), replacement = paste0(buffers, "\\1", buffers), x = strings, fixed = exact))
}
}
remove_buffers_around <- function(strings, chars, buffers = " ", exact = FALSE) {
if (length(chars) > 1L) {
exact <- recycle_arguments(exact, length(chars))
buffers <- recycle_arguments(buffer, length(buffers))
for (i in seq_along(chars)) {
strings <- gsub(pattern = paste0(buffers[[i]], non_capturing_group(chars[[i]]), buffers[[i]]), replacement = "\\1", x = strings, fixed = exact[[i]])
}
return (strings)
} else {
return (gsub(pattern = paste0(buffers, non_capturing_group(chars), buffers), replacement = "\\1", x = strings, fixed = exact))
}
}
### Complex Removers --------------------------------------------------------
remove_before <- function(strings, points, exact = FALSE, greedy = FALSE, inclusive = FALSE) {
if (greedy) {
positions <- map_int(gregexpr(points, strings, fixed = exact), last)
} else {
positions <- as.integer(regexpr(points, strings, fixed = exact))
}
slicer <- positions != -1L
if (is.factor(strings)) {
strings <- as.character(strings)
}
if (inclusive) {
strings[slicer] <- sub(points, "", substr(strings[slicer], start = positions, stop = nchar(strings)), fixed = exact)
} else {
strings[slicer] <- substr(strings[slicer], start = positions[slicer], stop = nchar(srings))
}
return (strings)
}
remove_after <- function(strings, points, exact = FALSE, greedy = FALSE, inclusive = FALSE) {
if (greedy) {
matches <- regexpr(points, strings, fixed = exact)
match_lengths <- attr(matches, "match.length")
positions <- as.integer(matches)
} else {
matches <- gregexpr(points, strings, fixed = exact)
match_lengths <- map_int(matches, ~last(. %@% "match.length"))
positions <- map_int(matches, last)
}
slicer <- positions != -1L
if (is.factor(strings)) {
strings <- as.character(strings)
}
if (inclusive) {
strings[slicer] <- substr(strings[slicer], start = 1L, stop = positions[slicer] - 1L)
} else {
strings[slicer] <- substr(strings[slicer], start = 1L, stop = positions[slicer] + match_lengths[slicer])
}
return (strings)
}
|
a3049aad16748db61dde93fd336080774efe79fe | 405ca64b0c4518cb40238e56ab7e65c55d1a648f | /R/grid_arrange_shared_legend.R | 145ceb6c535c62863d4eea7413a6c58bb3632436 | [] | no_license | jon-mellon/mellonMisc | 08b4cb332acd89f0f4fa88b6ee6a1c433e63be85 | bd8201370f037bddc24f57365d233c47573a6eeb | refs/heads/master | 2022-07-29T02:23:00.632699 | 2022-06-29T14:45:26 | 2022-06-29T14:45:26 | 33,554,731 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,321 | r | grid_arrange_shared_legend.R | #' @export grid_arrange_shared_legend
grid_arrange_shared_legend <- function (..., nrow = 1, ncol = length(plots),
position = c("bottom", "right"), legend.index = 1,
left = NULL, bottom = NULL, right = NULL, top = NULL) {
# browser()
plots <- list(...)
position <- match.arg(position)
g <- ggplotGrob(plots[[legend.index]] + theme(legend.position = position))$grobs
legend <- g[[which(sapply(g, function(x) x$name) == "guide-box")]]
legend$grobs[[1]]$grobs[[1]]$width[[1]] <- grid::unit(ncol + 5, "npc")
lheight <- sum(legend$height)
lwidth <- sum(legend$width)
gl <- lapply(plots, function(x) x + theme(legend.position = "none"))
gl <- c(gl, nrow = nrow, ncol = ncol)
combined <- switch(position, bottom = arrangeGrob(do.call(arrangeGrob, gl),
legend, ncol = 1,
heights = grid::unit.c(grid::unit(1, "npc") - lheight, lheight),
top = top, bottom = bottom, left = left, right = right),
right = arrangeGrob(do.call(arrangeGrob, gl),
legend, ncol = 2,
widths = grid::unit.c(grid::unit(1, "npc") - lwidth, lwidth),
top = top, bottom = bottom, left = left, right = right))
return(combined)
}
|
f20453e8b7a2ad805a2266e85f4dbb2a42d250e1 | e22a88797c78d37415711b114c3a161499468c01 | /KalmiaPollenKinematics_SensitivityAnalysis.R | eb3311b4fdf8f1cb5820c7455950607de11de6cd | [] | no_license | callinSwitzer/Kalmia | 704417b6819410a430c0c73fab084df3eb686032 | bd78fa268cd357f5df945c0c6e44503f100afefc | refs/heads/master | 2020-07-27T11:41:35.687372 | 2017-10-20T21:35:21 | 2017-10-20T21:35:21 | 73,429,437 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 18,980 | r | KalmiaPollenKinematics_SensitivityAnalysis.R | ## Callin Switzer
## 29 Nov 2016
## Kalmia pollen and anther kinematics
# 1. Read in digitized files
# 2. Smooth digitized points, and impute
# 3. Use imputed points to calculate velocity and acceleration (normal and tangential)
# 4.
## TODO:
# use cross-validation to decide smoothing parameters or plot noise/resoluation tradeoff
# or simply justify the choice -- by using visual inspection
# compute acceleration and velocity for different values of smoothing parameters
# idea -- show video with smoothed vs. unsmoothed points added -- background subtracted.
# Setup
ipak <- function(pkg){
new.pkg <- pkg[!(pkg %in% installed.packages()[, "Package"])]
if(length(new.pkg)) install.packages(new.pkg, dependencies = TRUE)
sapply(pkg, require, character.only = TRUE)
}
packages <- c("ggplot2", "scales", "multcomp", "plyr", "car", "lme4", "signal", "reshape2", "viridis")
ipak(packages)
# read in metadata
dfile <- "/Users/callinswitzer/Dropbox/ExperSummer2015/LaurelsOnly.csv"
metDat <- read.csv(dfile)
metDat <- metDat[metDat$digitizedFile!= "", ]
# set constants:
fps <- 5000 # frames per second
ii = 11
# read in each .csv file for analysis
# make a list of data frames
digdirect <- "/Users/callinswitzer/Dropbox/ExperSummer2015/AllLaurelsDigitizations/"
maxVals = data.frame()
for(kk in seq(from = 0.9, to = 0.05, by = -0.05)){
newDF <- data.frame()
for(ii in 1:nrow(metDat)){
# ignore ii ==7, because the video started too late
if(ii == 7) next
ddfile <- paste0(digdirect, metDat$digitizedFile[ii])
dp <- read.csv(ddfile)
# calibrate locations, based on digitized pin or other object
# calibration points
pin <- data.frame(dp$pt1_cam1_X, dp$pt1_cam1_Y, dp$pt2_cam1_X, dp$pt2_cam1_Y)
pin <- pin[complete.cases(pin), ]
# get the number of pixels in the calibration
PixInPin <- (sqrt((pin$dp.pt1_cam1_X - pin$dp.pt2_cam1_X)^2 +
(pin$dp.pt1_cam1_Y-pin$dp.pt2_cam1_Y)^2)) /
metDat$CalSizeMM[ii] # to get to mm
# get anther and pollen locations
antherPoll <- data.frame(anthx = dp$pt3_cam1_X, anthy= dp$pt3_cam1_Y,
polx = dp$pt4_cam1_X, poly= dp$pt4_cam1_Y)
# get frame where pollen starts and leaves
antherPoll$polStart = 1:nrow(antherPoll) == metDat$framePollenStartedLeaving[ii]
antherPoll$polEnd = 1:nrow(antherPoll) == metDat$framePollenReleaseComplete[ii]
# gives only rows where either anth and pollen are complete
antherPoll = antherPoll[ complete.cases(antherPoll[c('anthx')]) |
complete.cases(antherPoll[c('polx')]), ]
# if x value starts to right of screen, reverse points,
# so all x values start on the left part of the screen at 0
if(lm(antherPoll[,1] ~ I(1:length( antherPoll[,1])))$coefficients[2] < 0 ){
antherPoll$anthx <- metDat[ii,'vidWidth'] - antherPoll$anthx
antherPoll$polx <- metDat[ii,'vidWidth'] - antherPoll$polx
}
# cbind data frame, to add smoothed columns
antherPoll <- data.frame(cbind(antherPoll, antherPoll))
# plot(x = antherPoll$anthx.1, y = antherPoll$anthy.1)
# plot(antherPoll$anthx.1)
# smooth with SG is based on the least-squares fitting of
# polynomials to segments of the data
# other options include smoothing splines (tend to "cut the corners" of curves)
# butterworth filters (inaccurate at endpoints)
# Kernel smoothing
x <- na.omit(antherPoll$anthy.1)
xx <- c(x[round(length(x)/ 2):1], x, x[round(length(x)):round(length(x)/ 2)])
want = c(rep(FALSE, round(length(x)/ 2)), rep(TRUE, length(x)), rep(FALSE, round(length(x)/2)))
sg <- sgolayfilt(xx, p = 3, n = 11) # Savitzky-Golay filter
# plot(xx[want], type="b", col = 'red', pch = 20)
# points(sg[want], pch = 20, type = 'o') # smoothed SG data
W = 0.99
b1 <- butter(5, W, type = 'low')
y1 <- filtfilt(b1, xx)
# points(y1[want], pch=20, col='grey')
# filter with Savitzky-Golay filter or Butterworth filter
# degree = 3, frame size = 11 points
foo = sapply(X = c("anthx.1", "anthy.1", "polx.1", "poly.1"), FUN = function(y){
#sm1 <- sgolayfilt(na.omit(antherPoll[, x]), p = 3, n = 51)
# butterworth filter
x <- na.omit(antherPoll[, y])
xx <- c(x[round(length(x)/ 2):1], x, x[round(length(x)):round(length(x)/ 2)])
want = c(rep(FALSE, round(length(x)/ 2)), rep(TRUE, length(x)), rep(FALSE, round(length(x)/2)))
W = kk # sweet spot seems to be about 0.2
b1 <- butter(5, W, type = 'low')
y1 <- filtfilt(b1, xx)
sm1 <- y1[want]
antherPoll[, y][complete.cases(antherPoll[, y])] <<- sm1
})
# add time to data frame
antherPoll$tme = 0: (nrow(antherPoll) - 1) / fps # time
# add columns with absolute position into dataframe (calculated from smoothed data)
# calculate position from starting point, not from minimum point
bar = sapply(X = c("anthx.1", "anthy.1", "polx.1", "poly.1"), FUN = function(x){
newName = paste0(x, ".abs")
tmp <- antherPoll[,x] / PixInPin / 1000
antherPoll[,newName] <<- tmp - na.omit(tmp)[1]
#antherPoll[,newName] <<- tmp - min(na.omit(tmp))
})
# add columns to show velocity, based on smoothed, absolute position
# velocity is in m/s
bat = sapply(X = c("anthx.1.abs", "anthy.1.abs", "polx.1.abs", "poly.1.abs"),
FUN = function(x){
newName = paste0(x, ".vel")
tmp <- c(NaN, diff(antherPoll[,x])) * fps # add a NaN to beginning of data
antherPoll[,newName] <<- tmp
})
# calculate speed
antherPoll$anthspeed = sqrt(antherPoll$anthx.1.abs.vel^2 + antherPoll$anthy.1.abs.vel^2)
antherPoll$polspeed = sqrt(antherPoll$polx.1.abs.vel^2 + antherPoll$poly.1.abs.vel^2)
# plot(antherPoll$anthspeed)
###########################################
# pollen acceleration
polVelocity = cbind(antherPoll$polx.1.abs.vel, antherPoll$poly.1.abs.vel)
polSpeed = antherPoll$polspeed
# plot(polSpeed)
tme = antherPoll$tme
polAccel = data.frame(rbind(c(NA, NA), apply(polVelocity, MARGIN = 2, FUN = diff))) * fps
# par(mfrow =c(2,2))
# plot(polAccel[,1], x = antherPoll$tme, type = 'l') # calculated
# plot(polAccel[,2], x = antherPoll$tme, type = 'l')
# unit tangent vector
T_t = polVelocity / polSpeed
DT = data.frame(rbind(c(NA, NA), apply(T_t, MARGIN = 2, FUN = diff))) * fps
NormDT = sqrt(DT[,1]^2 + DT[,2]^2)
Curvature = NormDT / polSpeed
# compute a_N (normal acceleration) and a_T (tangential acceleration)
# a_T = ds/dt
a_T = c(NA, diff(polSpeed) * fps)
N_t = data.frame(t(sapply(1:nrow(DT), FUN = function(x) unlist(DT[x, ] / NormDT[x]))))
# plot(a_T, type = "l", ylim = c(-3000, 3000))
# a_N = speed^2 * curvature
a_N = polSpeed^2 * Curvature
# check total accel by adding normal and tangential accelerations
# a_total = a_T * T_t + a_N * N_t
a_total = as.data.frame(t(sapply(X = 1:nrow(polAccel), FUN = function(x) a_T[x] * T_t[x, ] + a_N[x] * N_t[x,] )))
# plot(a_total) # includes both x and y coordinates
# plot(polAccel)
# par(mfrow = c(2,2))
# plot(unlist(a_total[,1]))
# plot(unlist(a_total[,2]))
# plot(polAccel[,1])
# plot(polAccel[,2])
# plot(a_N)
# plot(a_T)
#
a_T_Pol = a_T
# plot(a_T, x = tme)
# plot(a_N, x = tme)
# calculate magnitude of acceleration, using two methods
# 1. Normal and tangential acceleration
a_mag1 = sqrt(a_T^2 + a_N^2)
# plot(a_mag1)
amag2 = sqrt(polAccel[,1]^2 + polAccel[,2]^2)
# plot(amag2, type = 'l')
# plot(polVelocity[,1])
########################################
###########################################
# anther acceleration
anthVelocity = cbind(antherPoll$anthx.1.abs.vel, antherPoll$anthy.1.abs.vel)
anthSpeed = antherPoll$anthspeed
# plot(anthSpeed)
tme = antherPoll$tme
anthAccel = data.frame(rbind(c(NA, NA), apply(anthVelocity, MARGIN = 2, FUN = diff))) * fps
# par(mfrow =c(2,2))
# # plot(anthAccel[,1], x = antherPoll$tme, type = 'l') # calculated
# plot(anthAccel[,2], x = antherPoll$tme, type = 'l')
# unit tangent vector
T_t = anthVelocity / anthSpeed
DT = data.frame(rbind(c(NA, NA), apply(T_t, MARGIN = 2, FUN = diff))) * fps
NormDT = sqrt(DT[,1]^2 + DT[,2]^2)
Curvature = NormDT / anthSpeed
# compute a_N (normal acceleration) and a_T (tangential acceleration)
# a_T = ds/dt
a_T = c(NA, diff(anthSpeed) * fps)
a_T_anth = a_T
N_t = data.frame(t(sapply(1:nrow(DT), FUN = function(x) unlist(DT[x, ] / NormDT[x]))))
# plot(a_T, type = "l", ylim = c(-3000, 3000))
# a_N = speed^2 * curvature
a_N = anthSpeed^2 * Curvature
# check total accel by adding normal and tangential accelerations
# a_total = a_T * T_t + a_N * N_t
a_total = as.data.frame(t(sapply(X = 1:nrow(anthAccel), FUN = function(x) a_T[x] * T_t[x, ] + a_N[x] * N_t[x,] )))
# plot(a_total) # includes both x and y coordinates
# plot(anthAccel)
# par(mfrow = c(2,2))
# plot(unlist(a_total[,1]))
# plot(unlist(a_total[,2]))
# plot(anthAccel[,1])
# plot(anthAccel[,2])
# plot(a_N)
# plot(a_T)
# par(mfrow = c(2,1))
# plot(a_T, x = tme, type = 'l')
# max(a_T, na.rm = TRUE)
# which.max(a_T)
# plot(a_T_Pol, x = tme, type = 'l')
# max(a_T_Pol, na.rm = TRUE)
# which.max(a_T_Pol)
#
tmeRoll <- seq(from = -which(antherPoll$polStart) + 1, length.out = length(tme)) / fps
dfi <- data.frame(anthSpeed, polSpeed, a_T_anth, a_T_Pol, tme,
trial = metDat$VideoName[ii],
tmeStart = antherPoll$polStart,
tmeEnd = antherPoll$polEnd,
centeredTime = tmeRoll)
newDF <- rbind(newDF,dfi)
print(ii)
}
antherPoll$frame <- 1:nrow(antherPoll)
ggplot(na.omit(antherPoll)) +
geom_point(aes(x = anthx, y = anthy), colour = "grey", alpha = 0.3) +
geom_path(aes(x = anthx, y = anthy), color = "grey", alpha = 0.3) +
geom_point(aes(x = anthx.1, y = anthy.1, colour = frame)) +
geom_path(aes(x = anthx.1, y = anthy.1, color = frame)) +
scale_color_viridis()
ggplot((antherPoll)) +
geom_point(aes(x = polx, y = poly), colour = "grey", alpha = 0.3) +
geom_path(aes(x = polx, y = poly), color = "grey", alpha = 0.3) +
geom_point(aes(x = polx.1, y = poly.1, colour = frame)) +
geom_path(aes(x = polx.1, y = poly.1, color = frame)) +
scale_color_viridis() +
coord_fixed(ratio = 1)
plot(antherPoll$anthx, antherPoll$anthy)
points(antherPoll$anthx.1, antherPoll$anthy.1, type = 'b', pch = 20)
theme_set(theme_classic())
savePath = "/Users/callinswitzer/Dropbox/ExperSummer2015/Kalmia2015FiguresAndData/"
# anther speed
ggplot(newDF, aes(x = centeredTime, y = anthSpeed, group = trial)) +
geom_line(alpha = 0.5) +
xlim(c(-0.01, 0.02)) +
ylim(c(0,6)) +
labs(x = "Time (s)", y = "Anther speed (m/s)")
ggsave(paste0(savePath, "antherSpeed", "filt_", kk, ".pdf"), width = 5, height = 4)
# pollen speed
ggplot(newDF, aes(x = centeredTime, y = polSpeed, group = trial)) +
geom_line(alpha = 0.5) +
xlim(c(-0.01, 0.02)) +
ylim(c(0,6)) +
labs(x = "Time (s)", y = "Pollen speed (m/s)")
ggsave(paste0(savePath, "pollenSpeed", "filt_", kk, ".pdf"), width = 5, height = 4)
# anther tangential acceleration
ggplot(newDF, aes(x = centeredTime, y = a_T_anth, group = trial)) +
geom_line(alpha = 0.5) +
#ylim(c(-2500, 4000)) +
xlim(c(-0.01, 0.02)) +
labs(x = "Time (s)", y = "Anther tangential acceleration (m/s/s)")
ggsave(paste0(savePath, "antherTangAccel", "filt_", kk, ".pdf"), width = 5, height = 4)
# pollen tangential acceleration
# anther tangential acceleration
ggplot(newDF, aes(x = centeredTime, y = a_T_Pol, group = trial)) +
geom_line(alpha = 0.5) +
#ylim(c(-2500, 4000)) +
xlim(c(-0.01, 0.02)) +
labs(x = "Time (s)", y = "Pollen tangential acceleration (m/s/s)")
ggsave(paste0(savePath, "PollenTangAccel", "filt_", kk, ".pdf"), width = 5, height = 4)
# find max for each measurement for each trial
# anther speed
mmx <- as.data.frame(t(sapply(unique(as.character(newDF$trial)), FUN = function(x){
tmp <- newDF[newDF$trial == x, ]
return (unlist(tmp[which.max(tmp$anthSpeed),]))
})))
mmx$trial <- row.names(mmx)
ggplot() +
geom_line(data = newDF, aes(x = centeredTime, y = anthSpeed, group = as.factor(trial)), alpha = 0.5) +
xlim(c(-0.01, 0.02)) +
ylim(c(0,6)) +
labs(x = "Time (s)", y = "Anther speed (m/s)") +
geom_point(data = mmx, aes(x = centeredTime, y = anthSpeed), color = 'red', alpha = 0.5) +
theme(legend.position = "none")
#+ facet_wrap(~ trial)
ggsave(paste0(savePath, "antherSpeedMax", "filt_", kk, ".pdf"), width = 5, height = 4)
# pollen speed
mmp <- as.data.frame(t(sapply(unique(as.character(newDF$trial)), FUN = function(x){
tmp <- newDF[newDF$trial == x, ]
tmp <- tmp[abs(tmp$centeredTime) < 0.01, ]
return (unlist(tmp[which.max(tmp$polSpeed),]))
})))
mmp$trial <- row.names(mmp)
# pollen speed
ggplot() +
geom_line(data = newDF, aes(x = centeredTime, y = polSpeed, group = trial), alpha = 0.5) +
xlim(c(-0.01, 0.02)) +
ylim(c(0,6)) +
labs(x = "Time (s)", y = "Pollen speed (m/s)") +
geom_point(data = mmp, aes(x = centeredTime, y = polSpeed), color = 'red', alpha = 0.5) +
theme(legend.position = "none")
ggsave(paste0(savePath, "pollenSpeedMax", "filt_", kk, ".pdf"), width = 5, height = 4)
# anther acceleration
mma <- as.data.frame(t(sapply(unique(as.character(newDF$trial)), FUN = function(x){
tmp <- newDF[newDF$trial == x, ]
# get only points that are within 0.05 seconds of the centered time
# to ignore the anthers hitting the other side of the flower
tmp <- tmp[abs(tmp$centeredTime) < 0.005, ]
return (unlist(tmp[which.max(tmp$a_T_anth),]))
})))
mma$trial <- row.names(mma)
ggplot() +
geom_line(data = newDF, aes(x = centeredTime, y = a_T_anth, group = trial), alpha = 0.5) +
#ylim(c(-2500, 4000)) +
xlim(c(-0.01, 0.02)) +
labs(x = "Time (s)", y = "Anther tangential acceleration (m/s/s)") +
geom_point(data = mma, aes(x = centeredTime, y = a_T_anth), color = 'red', alpha = 0.5)
ggsave(paste0(savePath, "antherTangAccelMax", "filt_", kk, ".pdf"), width = 5, height = 4)
# pollen acceleration
mmpp <- as.data.frame(t(sapply(unique(as.character(newDF$trial)), FUN = function(x){
tmp <- newDF[newDF$trial == x, ]
tmp <- tmp[abs(tmp$centeredTime) < 0.007, ]
return (unlist(tmp[which.max(tmp$a_T_Pol),]))
})))
mmpp$trial <- row.names(mmpp)
ggplot() +
geom_line(data = newDF, aes(x = centeredTime, y = a_T_Pol, group = trial), alpha = 0.5) +
#ylim(c(-2500, 4000)) +
xlim(c(-0.01, 0.02)) +
labs(x = "Time (s)", y = "Pollen tangential acceleration (m/s/s)") +
geom_point(data = mmpp, aes(x = centeredTime, y = a_T_Pol), color = 'red', alpha = 0.5)
ggsave(paste0(savePath, "pollenTangAccelMax", "filt_", kk, ".pdf"), width = 5, height = 4)
# estimate ranges for acceleration, and speed
md = merge(x = mmx[, c('trial', 'anthSpeed')], metDat, by.x = "trial", by.y =
"VideoName")
md = merge(x = mmp[, c('trial', 'polSpeed')], md, by = "trial")
md = merge(x = mmpp[, c('trial', 'a_T_Pol')], md, by = "trial")
md = merge(x = mma[, c('trial', 'a_T_anth')], md, by = "trial")
#LMER
modVelMaxAnth <- lmer(formula = anthSpeed ~ (1|plant/FlowerNumber), data = md)
summary(modVelMaxAnth)
#confint(modVelMaxAnth)
modVelMaxPol <- lmer(formula = polSpeed ~ (1|plant/FlowerNumber), data = md)
summary(modVelMaxPol)
#confint(modVelMaxPol)
modAccMaxPol <- lmer(formula = a_T_Pol ~ (1|plant/FlowerNumber), data = md)
summary(modAccMaxPol)
#confint(modAccMaxPol)
modAccMaxAnth <- lmer(formula = a_T_anth ~ (1|plant/FlowerNumber), data = md)
summary(modAccMaxAnth)
#confint(modAccMaxAnth)
newRow = c(summary(modVelMaxAnth)$coef[1], summary(modVelMaxPol)$coef[1],
summary(modAccMaxPol)$coef[1], summary(modAccMaxAnth)$coef[1])
maxVals <- rbind(maxVals, newRow)
}
colnames(maxVals) <- c("Max Veloc Anth (m/s)", "Max Veloc Pol (m/s)", "Max Acc Pol (m/s)", "Max Acc Anth (m/s)")
maxVals$filtParameter <- seq(from = 0.9, to = 0.05, by = -0.05)
maxVals[, is.na(colnames(maxVals)) ] <- NULL
maxV_long <- melt(maxVals, id.vars = "filtParameter")
maxV_long$acc = as.numeric(sapply(maxV_long$variable, function(x) grep(pattern = "Acc", as.character(x)) == 1))
library(plyr)
maxV_long$acc <- mapvalues(as.character(maxV_long$acc), from = c("1", NA), to = c("Acceleration", "Velocity"))
ggplot(maxV_long, aes(x = filtParameter, y = value, color = variable)) +
geom_line(size = 2) +
facet_wrap(~acc, scales = 'free') +
labs(x = "filter parameter", y = "Value") +
scale_color_viridis(discrete = TRUE, name = "Measured Variable")
|
38149430f8daf4c04ad66d79e55172c4f56a036d | def84d2fc19445079483f0d6afa54431a222f6be | /limerscript.R | 0ed2a0337fc2cabb11f5b6e1eb1b311851aa0a7e | [] | no_license | usaidoti/NCCI | f6a71dc6b0a6ff9bc0a17fb3a7dceb04e539ab44 | 7c98efcb56228cba4859773de71eb7bb1a33fef5 | refs/heads/master | 2021-01-14T06:22:05.187017 | 2017-01-26T19:15:20 | 2017-01-26T19:15:20 | 81,868,446 | 0 | 0 | null | 2017-02-13T20:29:54 | 2017-02-13T20:29:54 | null | UTF-8 | R | false | false | 10,931 | r | limerscript.R | #Use setwd('path/to/project') to set working directory
#Load packages
library(limer)
library(plyr)
library(reshape2)
#connect to limer, change api link, username and password where necessary
options(lime_api = 'http://survey.itechcenter.ne/index.php/admin/remotecontrol')
options(lime_username = 'your_username')
options(lime_password = 'your_password')
get_session_key()
survey_df<-call_limer(method='list_surveys')
View(survey_df)
#save date
date<-Sys.time()
#Download surveys. Check 'survey_df' table for any new files and add to list below.
AGA046<-get_responses(iSurveyID= 954197, sLanguageCode = 'fr', sResponseType = 'short')
DIF010<-get_responses(iSurveyID= 397193, sLanguageCode = 'fr', sResponseType = 'short')
AGA051<-get_responses(iSurveyID= 335852, sLanguageCode = 'fr', sResponseType = 'short')
DIF018<-get_responses(iSurveyID= 669388, sLanguageCode = 'fr', sResponseType = 'short')
NIA029<-get_responses(iSurveyID= 215531, sLanguageCode = 'fr', sResponseType = 'short')
AGA045<-get_responses(iSurveyID= 139427, sLanguageCode = 'fr', sResponseType = 'short')
AGA041<-get_responses(iSurveyID= 318229, sLanguageCode = 'fr', sResponseType = 'short')
DIF044<-get_responses(iSurveyID= 251232, sLanguageCode = 'fr', sResponseType = 'short')
AGA055<-get_responses(iSurveyID= 731787, sLanguageCode = 'fr', sResponseType = 'short')
TILL006<-get_responses(iSurveyID= 828848, sLanguageCode = 'fr', sResponseType = 'short')
AGA060<-get_responses(iSurveyID= 675145, sLanguageCode = 'fr', sResponseType = 'short')
AGA059<-get_responses(iSurveyID= 212898, sLanguageCode = 'fr', sResponseType = 'short')
AGA061<-get_responses(iSurveyID= 191773, sLanguageCode = 'fr', sResponseType = 'short')
DIF059<-get_responses(iSurveyID= 485985, sLanguageCode = 'fr', sResponseType = 'short')
TILL007<-get_responses(iSurveyID= 943461, sLanguageCode = 'fr', sResponseType = 'short')
TILL009<-get_responses(iSurveyID= 541311, sLanguageCode = 'fr', sResponseType = 'short')
DIF040<-get_responses(iSurveyID= 448111, sLanguageCode = 'fr', sResponseType = 'short')
DIF048<-get_responses(iSurveyID= 716422, sLanguageCode = 'fr', sResponseType = 'short')
DIF045<-get_responses(iSurveyID= 259664, sLanguageCode = 'fr', sResponseType = 'short')
AGA062<-get_responses(iSurveyID= 517332, sLanguageCode = 'fr', sResponseType = 'short')
DIF038<-get_responses(iSurveyID= 852367, sLanguageCode = 'fr', sResponseType = 'short')
AGA031<-get_responses(iSurveyID= 584813, sLanguageCode = 'fr', sResponseType = 'short')
CFWNM2<-get_responses(iSurveyID= 376845, sLanguageCode = 'fr', sResponseType = 'short')
TIL016<-get_responses(iSurveyID= 521157, sLanguageCode = 'fr', sResponseType = 'short')
AGA066<-get_responses(iSurveyID= 854153, sLanguageCode = 'fr', sResponseType = 'short')
#Make copies with only the columns of interest, also excluding empty surveys
AGA041c <- AGA041[c("Q1","Q2","Q6","Q7","Q8","Q9","Q10")]
AGA045c <- AGA045[c("Q1","Q2","Q6","Q7","Q8","Q9","Q10")]
#AGA046c <- AGA046[c("Q1","Q2","Q6","Q7","Q8","Q9","Q10","Q17")]
AGA051c <- AGA051[c("Q1","Q2","Q6","Q7","Q8","Q9","Q10","Q17","Q41.1.","Q41.2.","Q41.3.","Q41.4.")]
AGA055c <- AGA055[c("Q1","Q2","Q6","Q7","Q8","Q9","Q10","Q41.1.","Q41.2.","Q41.3.","Q41.4.")]
AGA059c <- AGA059[c("Q1","Q2","Q6","Q7","Q8","Q9","Q10","Q23.1.","Q23.2.","Q23.3.","Q23.4.")]
AGA060c <- AGA060[c("Q1","Q2","Q6","Q7","Q8","Q9","Q10","Q16","Q23.1.","Q23.2.","Q23.3.","Q23.4.")]
AGA061c <- AGA061[c("Q1","Q2","Q6","Q7","Q8","Q9","Q10")]
DIF010c <- DIF010[c("Q1","Q2","Q6","Q7","Q8","Q9","Q10","Q17","Q41.1.","Q41.2.","Q41.3.","Q41.4.")]
DIF018c <- DIF018[c("Q1","Q2","Q6","Q7","Q8","Q9","Q10","Q17","Q41.1.","Q41.2.","Q41.3.","Q41.4.")]
DIF044c <- DIF044[c("Q1","Q2","Q6","Q7","Q8","Q9","Q10","Q41.1.","Q41.2.","Q41.3.","Q41.4.")]
NIA029c <- NIA029[c("Q1","Q2","Q6","Q7","Q8","Q9","Q10","Q41.1.","Q41.2.","Q41.3.","Q41.4.")]
TILL006c <- TILL006[c("Q1","Q2","Q6","Q7","Q8","Q9","Q10")]
TILL007c <- TILL007[c("Q1","Q2","Q6","Q7","Q8","Q9","Q10","Q23.1.","Q23.2.","Q23.3.","Q23.4.")]
TILL009c <- TILL009[c("Q1","Q2","Q6","Q7","Q8","Q9","Q10","Q21","Q25.1.","Q25.2.","Q25.3.","Q25.4.")]
#DIF040c <- DIF040[c("Q1","Q2","Q6","Q7","Q8","Q9","Q10","Q15","Q16.1.","Q16.2.","Q16.3.")]
DIF048c <- DIF048[c("Q1","Q2","Q6","Q7","Q8","Q9","Q10","Q15","Q16.1.","Q16.2.","Q16.3.")]
DIF045c <- DIF045[c("Q1","Q2","Q6","Q7","Q8","Q9","Q10","Q15","Q16.1.","Q16.2.","Q16.3.")]
AGA062c <- AGA062[c("Q1","Q2","Q6","Q7","Q8","Q9","Q10","Q18","Q32.1.","Q32.2.","Q32.3.","Q32.4.")]
DIF038c <- DIF038[c("Q1","Q2","Q6","Q7","Q8","Q9","Q10","Q16","Q32.1.","Q32.2.","Q32.3.","Q32.4.")]
AGA031c <- AGA031[c("Q1","Q2","Q6","Q7","Q8","Q9","Q10","Q45","Q53.1.","Q53.2.","Q53.3.","Q53.4.")]
CFWNM2c <- CFWNM2[c("Q1","Q2","Q6","Q7","Q8","Q9","Q11","Q33","Q34.1.","Q34.2.","Q34.3.","Q34.4.")]
TIL016c <- TIL016[c("Q1","Q2","Q6","Q7","Q8","Q9","Q10","Q21","Q27.1.","Q27.2.","Q27.3.","Q27.4.")]
AGA066c <- AGA066[c("Q1","Q2","Q6","Q7","Q8","Q9","Q10","Q17","Q41.1.","Q41.2.","Q41.3.","Q41.4.")]
#Add identification column
AGA041c$Survey <- 'AGA041'
AGA045c$Survey <- 'AGA045'
#AGA046c$Survey <- 'AGA046'
AGA051c$Survey <- 'AGA051'
AGA055c$Survey <- 'AGA055'
AGA059c$Survey <- 'AGA059'
AGA060c$Survey <- 'AGA060'
AGA061c$Survey <- 'AGA061'
DIF010c$Survey <- 'DIF010'
DIF018c$Survey <- 'DIF018'
DIF044c$Survey <- 'DIF044'
NIA029c$Survey <- 'NIA029'
TILL006c$Survey <- 'TILL006'
TILL007c$Survey <- 'TILL007'
TILL009c$Survey <- 'TILL009'
#DIF040c$Survey <- 'DIF040'
DIF048c$Survey <- 'DIF048'
DIF045c$Survey <- 'DIF045'
AGA062c$Survey <- 'AGA062'
DIF038c$Survey <- 'DIF038'
AGA031c$Survey <- 'AGA031'
CFWNM2c$Survey <- 'CFWNM2c'
TIL016c$Survey <- 'TIL016c'
AGA066c$Survey <- 'AGA066c'
#Move misaligned columns to desired, consistent location
AGA059c$Q41.1.<-AGA059c$Q23.1.
AGA059c$Q41.2.<-AGA059c$Q23.2.
AGA059c$Q41.3.<-AGA059c$Q23.3.
AGA059c$Q41.4.<-AGA059c$Q23.4.
AGA060c$Q41.1.<-AGA060c$Q23.1.
AGA060c$Q41.2.<-AGA060c$Q23.2.
AGA060c$Q41.3.<-AGA060c$Q23.3.
AGA060c$Q41.4.<-AGA060c$Q23.4.
TILL007c$Q41.1.<-TILL007c$Q23.1.
TILL007c$Q41.2.<-TILL007c$Q23.2.
TILL007c$Q41.3.<-TILL007c$Q23.3.
TILL007c$Q41.4.<-TILL007c$Q23.4.
TILL009c$Q41.1.<-TILL009c$Q25.1.
TILL009c$Q41.2.<-TILL009c$Q25.2.
TILL009c$Q41.3.<-TILL009c$Q25.3.
TILL009c$Q41.4.<-TILL009c$Q25.4.
TILL009c$Q17 <-TILL009c$Q21
TILL009c$Q21<-NULL
#DIF040c$Q41.1.<-DIF040c$Q16.1.
#DIF040c$Q41.2.<-DIF040c$Q16.2.
#DIF040c$Q41.3.<-DIF040c$Q16.3.
#DIF040c$Q41.4.<- NA
#DIF040c$Q17<-DIF048c$Q15
DIF048c$Q41.1.<-DIF048c$Q16.1.
DIF048c$Q41.2.<-DIF048c$Q16.2.
DIF048c$Q41.3.<-DIF048c$Q16.3.
DIF048c$Q41.4.<- NA
DIF048c$Q17<-DIF048c$Q15
DIF045c$Q41.1.<-DIF045c$Q16.1.
DIF045c$Q41.2.<-DIF045c$Q16.2.
DIF045c$Q41.3.<-DIF045c$Q16.3.
DIF045c$Q41.4.<- NA
DIF045c$Q17<-DIF045c$Q15
AGA062c$Q41.1.<-AGA062c$Q32.1.
AGA062c$Q41.2.<-AGA062c$Q32.2.
AGA062c$Q41.3.<-AGA062c$Q32.3.
AGA062c$Q41.4.<- AGA062c$Q32.4.
AGA062c$Q17<-AGA062c$Q18
AGA059c$Q23.1.<-NULL
AGA059c$Q23.2.<-NULL
AGA059c$Q23.3.<-NULL
AGA059c$Q23.4.<-NULL
AGA060c$Q23.1.<-NULL
AGA060c$Q23.2.<-NULL
AGA060c$Q23.3.<-NULL
AGA060c$Q23.4.<-NULL
TILL007c$Q23.1.<-NULL
TILL007c$Q23.2.<-NULL
TILL007c$Q23.3.<-NULL
TILL007c$Q23.4.<-NULL
AGA060c$Q17<-AGA060c$Q16
AGA060c$Q16<-NULL
TILL009c$Q25.1.<-NULL
TILL009c$Q25.2.<-NULL
TILL009c$Q25.3.<-NULL
TILL009c$Q25.4.<-NULL
#DIF040c$Q16.1.<-NULL
#DIF040c$Q16.2.<-NULL
#DIF040c$Q16.3.<-NULL
#DIF040c$Q15<-NULL
DIF048c$Q16.1.<-NULL
DIF048c$Q16.2.<-NULL
DIF048c$Q16.3.<-NULL
DIF048c$Q15<-NULL
DIF045c$Q16.1.<-NULL
DIF045c$Q16.2.<-NULL
DIF045c$Q16.3.<-NULL
DIF045c$Q15<-NULL
AGA062c$Q32.1.<-NULL
AGA062c$Q32.2.<-NULL
AGA062c$Q32.3.<-NULL
AGA062c$Q32.4.<-NULL
AGA062c$Q18<-NULL
DIF038c$Q41.1.<-DIF038c$Q32.1.
DIF038c$Q41.2.<-DIF038c$Q32.2.
DIF038c$Q41.3.<-DIF038c$Q32.3.
DIF038c$Q41.4.<-DIF038c$Q32.4.
DIF038c$Q17<-DIF038c$Q16
DIF038c$Q32.1.<- NULL
DIF038c$Q32.2.<- NULL
DIF038c$Q32.3.<- NULL
DIF038c$Q32.4.<- NULL
DIF038c$Q16<- NULL
AGA031c$Q41.1.<-AGA031c$Q53.1.
AGA031c$Q41.2.<-AGA031c$Q53.2.
AGA031c$Q41.3.<-AGA031c$Q53.3.
AGA031c$Q41.4.<-AGA031c$Q53.4.
AGA031c$Q17<-AGA031c$Q45
AGA031c$Q53.1.<- NULL
AGA031c$Q53.2.<- NULL
AGA031c$Q53.3.<- NULL
AGA031c$Q53.4.<- NULL
AGA031c$Q45<- NULL
CFWNM2c$Q10<-CFWNM2c$Q11
CFWNM2c$Q17<-CFWNM2c$Q33
CFWNM2c$Q41.1.<-CFWNM2c$Q34.1.
CFWNM2c$Q41.2.<-CFWNM2c$Q34.2.
CFWNM2c$Q41.3.<-CFWNM2c$Q34.3.
CFWNM2c$Q41.4.<-CFWNM2c$Q34.4.
CFWNM2c$Q11<-NULL
CFWNM2c$Q33<-NULL
CFWNM2c$Q34.1.<-NULL
CFWNM2c$Q34.2.<-NULL
CFWNM2c$Q34.3.<-NULL
CFWNM2c$Q34.4.<-NULL
TIL016c$Q17<-TIL016c$Q21
TIL016c$Q41.1.<-TIL016c$Q27.1.
TIL016c$Q41.2.<-TIL016c$Q27.2.
TIL016c$Q41.3.<-TIL016c$Q27.3.
TIL016c$Q41.4.<-TIL016c$Q27.4.
TIL016c$Q21<-NULL
TIL016c$Q27.1.<-NULL
TIL016c$Q27.2.<-NULL
TIL016c$Q27.3.<-NULL
TIL016c$Q27.4.<-NULL
#Combine tables
NCCIbind <- rbind.fill(AGA041c, AGA045c, AGA051c, AGA055c, AGA059c, AGA060c, AGA061c, DIF010c, DIF018c, DIF044c,NIA029c, TILL006c, TILL007c, TILL009c, DIF048c, DIF045c, AGA062c, DIF038c, AGA031c, CFWNM2c, TIL016c, AGA066c)
#Replace numerical codes with text labels
NCCIbind$Language <- factor(NCCIbind$Q1, levels = c(1,2,3,4,5,6,7,8), labels = c("Haussa", "Français", "Toubou", "Tamasheq","Kanouri","Zarma","Fulfulde Adamawa","Arabe"))
NCCIbind$Region <- factor(NCCIbind$Q2, levels = c(1,2,3,4,"-oth-"), labels = c("Agadez","Diffa","Niamey","Tillabery","Other"))
NCCIbind$Age.Group<-cut(NCCIbind$Q7,breaks=c(0,17,30,45,60,100), labels=c("Under 18","18-30","31-45","46-60","60+" ))
NCCIbind$Ethnicity <- factor(NCCIbind$Q9, levels = c(1,2,3,4,5,6,7,8), labels = c("Haussa", "Touareg", "Peul", "Zarma/Songhai","Toubou","Kanouri","Arabe","Je préfère ne pas répondre"))
NCCIbind$Gender<-NCCIbind$Q8
NCCIbind$Participation<-factor(NCCIbind$Q10, labels = c("Je suis un jeune participant à la formation et un membre de l'équipe qui va bénéficier des unités de production d'eau","Je suis un organisateur (autorité, partenaire)","Je suis un membre de la communauté (spectateur de processus)", "Sans réponse" ))
NCCIbind$Selection.Beneficiaries <- factor(NCCIbind$Q17, levels = c(1,2,3,4,5,6), labels = c("Très juste", "Juste", "Neutre", "Peu juste","Très injuste","Sans Response"))
#Create separate table for ‘multiple selection’ questions
NCCImelt2 = melt(subset(NCCIbind, Q41.1.=="Y" | Q41.2.=="Y" | Q41.3.=="Y" | Q41.4.=="Y"), id.vars=c("Q1","Q2","Q6","Q7","Q8","Q9","Q10"), measure.vars=c("Q41.1.","Q41.2.","Q41.3.","Q41.4."))
NCCImelt2$QForm <- paste(NCCImelt2$variable,NCCImelt2$value)
NCCImelt2$Comment.Entendu <- factor(NCCImelt2$QForm, levels = c("Q41.1. Y","Q41.2. Y", "Q41.3. Y", "Q41.4. Y"), labels = c("Via les cartes d'information ", "Au travers d'annonces pendant l'activité ", "A la radio", "Par le bouche à oreille"))
NCCImelt2$QForm<-NULL
NCCIbind$Cartes.information<-NCCIbind$Q41.1.
NCCIbind$Annonces.pendant<-NCCIbind$Q41.2.
NCCIbind$radio<-NCCIbind$Q41.3.
NCCIbind$bouche.orielle<-NCCIbind$Q41.4.
NCCIbind$Q41.1.<-NULL
NCCIbind$Q41.2.<-NULL
NCCIbind$Q41.3.<-NULL
NCCIbind$Q41.4.<-NULL
save.image()
|
f17a09d79ac24df9c67d65c2749339d42f786b90 | 18492b283f897173eea63670167c9cf217243b89 | /Part3_graph1.R | 2165dd7d478440cf6a1587ed5131b59dc39261dc | [] | no_license | cmm16/stat405_project | d28c8af47274a7e8815498df3f85004de8f181b2 | e462e46b679da200c24882ff78dbecb4cfe03613 | refs/heads/master | 2020-07-31T18:08:05.010594 | 2019-12-06T02:06:51 | 2019-12-06T02:06:51 | 210,705,210 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 427 | r | Part3_graph1.R | library(RSQLite)
dcon <- dbConnect(SQLite(), dbname = "group10.db")
dbListTables(dcon)
res <- dbSendQuery(conn = dcon, "
SELECT DAY_OF_WEEK, avg(DEP_DELAY)
FROM flights
WHERE ORIGIN = 'DFW'
GROUP BY DAY_OF_WEEK;")
avg_delays <- dbFetch(res, -1)
dbClearResult(res)
plot(avg_delays$DAY_OF_WEEK, avg_delays$`avg(DEP_DELAY)`, xlab = "Day of Week",
ylab = 'Avg. Delays', main = "Average Delay Times for DFW", col = "blue")
|
8138c1b7f5a768a44d5a352066a2cc3669da7c91 | 2407690f9e04b517096a826cd63634b346e1770f | /changes.R | 71c80ac3d69cead1aa3ed1ecd4a99f7c8f943044 | [] | no_license | tylerlau07/romance_nominal_change | b9a94ff6b47056ed0f858a5fe61ec48dcce835c8 | 1c33aaf2cba7b045f162bde9008d65b53925098a | refs/heads/master | 2020-05-21T16:45:44.633613 | 2016-09-30T06:45:25 | 2016-09-30T06:45:25 | 61,847,094 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,058 | r | changes.R | # This program will look at which specific words changed to what classes
library(readr)
library(plyr)
library(ggplot2)
library(reshape2)
library(gtools)
### Read stats files ###
files = list.files(pattern = '\\.csv')
# We want to make a data frame showing what each word became, start with counts
df_wordcount <- data.frame()
# Add counts for each word
file_originfo <- read_csv(files[1])
# Original
for (i in 1:nrow(file_originfo)) {
word <- file_originfo$`Declined Noun`[i]
df_wordcount[word, 'Gender'] <- unlist(strsplit(file_originfo$`0`[i], ' '))[1]
df_wordcount[word, 'Declension'] <- unlist(strsplit(file_originfo$`0`[i], ' '))[2]
df_wordcount[word, 'Case'] <- unlist(strsplit(file_originfo$`0`[i], ' '))[3]
df_wordcount[word, 'Number'] <- unlist(strsplit(file_originfo$`0`[i], ' '))[4]
}
# Now we want to go through files and add 1 to each change in declension and gender that takes place
for (file in files) {
read <- read_csv(file)
for (row in 1:nrow(read)) {
word <- read$`Declined Noun`[row]
# Generation 15 info (Gen, Dec, Case, Num)
final_info <- unlist(strsplit(read[row, ncol(read)], ' '))
gen <- final_info[1]
dec <- final_info[2]
case <- final_info[3]
num <- final_info[4]
# Assign
df_wordcount[word, gen] <- ifelse(invalid(df_wordcount[word, gen]), 1, df_wordcount[word, gen] + 1)
df_wordcount[word, dec] <- ifelse(invalid(df_wordcount[word, dec]), 1, df_wordcount[word, dec] + 1)
df_wordcount[word, paste(case, num, sep = ".")] <- ifelse(invalid(df_wordcount[word, paste(case, num, sep = ".")]), 1, df_wordcount[word, paste(case, num, sep = ".")] + 1)
}
}
df_wordpercent <- cbind(df_wordcount[ , 1:4], df_wordcount[5:ncol(df_wordcount)]/50*100)
df_wordpercent[is.na(df_wordpercent)] <- 0
### Now we want to see what happened
# Declension IV nouns: basically all went to M or rarely N
IV <- df_wordpercent[df_wordpercent$Declension == "IV", ]
# Declension V nouns: goes to F only ~20% of the time, M otherwise
V <- df_wordpercent[df_wordpercent$Declension == "V", ]
# Feminine I nouns that went to F less than 90% of the time
fI <- df_wordpercent[df_wordpercent$Gender == "f" & df_wordpercent$Declension == "I" & df_wordpercent$f < 90, ]
# Masculine II nouns that went to F more than 10% of the time
mII <- df_wordpercent[df_wordpercent$Gender == "m" & df_wordpercent$Declension == "II" & df_wordpercent$f > 10, ]
# Neuter II nouns that went to F more than 10% of the time
nIIf <- df_wordpercent[df_wordpercent$Gender == "n" & df_wordpercent$Declension == "II" & df_wordpercent$f > 10, ]
# Neuter II nouns:
nII <- df_wordpercent[df_wordpercent$Gender == "n" & df_wordpercent$Declension == "II", ]
# Masculine III nouns:
mIII <- df_wordpercent[df_wordpercent$Gender == "m" & df_wordpercent$Declension == "III", ]
# Neuter III nouns:
nIII <- df_wordpercent[df_wordpercent$Gender == "n" & df_wordpercent$Declension == "III", ]
# Feminine III nouns:
fIII <- df_wordpercent[df_wordpercent$Gender == "f" & df_wordpercent$Declension == "III", ]
View(fIII)
|
cb43d66017a1d66318b420ea8fb9934a1ceb9f22 | b5955887c1a960b1e8dafb55590a606fd15cbfbb | /bloomrs/R/snap_points.R | 59f37bc58c3a980844e7a8066ac5a901e92b5d5c | [] | no_license | clarkwrks/cyanohabs | bddded0f90f08e4b9c653ce052c9c44f9b10fd32 | f33cf4d77a8c9b4130e027414346885110f39a24 | refs/heads/master | 2021-06-26T13:50:51.676216 | 2019-12-04T22:59:59 | 2019-12-04T22:59:59 | 225,966,570 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,448 | r | snap_points.R | library(sp)
library(rgdal)
library(raster)
library(rgeos)
#' Snaps an input location to the nearest availabile data. "snapping.cases"
#' dictate selection criteria and precision. See powerpoint.
#'
#' @param focal.point reported location of feature (intake). Required fields: unique_id, huc12, wb_comid
#' @param candidate.points shapefile of locations with available data (created by bloomr::GenCandidatePoints)
#' @param output.prefix
#' @return list with one SpatialPointsDataFrame for each snapping case, possibly empty
SnapPoints <- function(focal.point, candidate.points, output.prefix = "test",
snap.cases = c("proximate", "adjacent", "waterbody", "watershed")){
# assign focal.point's unique_id to all candidate.points
candidate.points$unique_id <- focal.point$unique_id
# caculate the distance from each canidate point to the focal point
candidate.points$snap_dist <- spDistsN1(candidate.points, focal.point)
# select all candidate points at least 600 m from shore. For the spatial
# coverage estimate a value of 636 m is used. Here we round down to the
# nearest pixel (300 m).
core.points <- candidate.points[candidate.points$shr_dst_m >= 600, ]
# initialize an empty list for appending valid snap cases
snap.results <- list()
if("adjacent" %in% snap.cases){
# select all core.points within 300 m of focal.point
near.points <- core.points[core.points$snap_dist <= 300, ]
if(nrow(near.points) > 0){
print(paste0(focal.point$unique_id, ": Snapping to 3x3 within 300 m"))
# find closest point
snap.point <- near.points[which.min(near.points$snap_dist), ]
# calculate the distance from candidate.points to the snap.point
candidate.points$window.dist <- spDistsN1(candidate.points, snap.point)
# select the 9 points closest to the snap.point
adjacent.points <- candidate.points[with(candidate.points@data, order(window.dist, snap_dist)), ][1:9,]
adjacent.points$case <- "adjacent"
# not all cases will add a window.dist value, remove to avoid errors later
adjacent.points$window.dist <- NULL
candidate.points$window.dist <- NULL
# adjacent.points$snap_dist <- snap.point$snap_dist
snap.results <- c(snap.results, adjacent.points)
} else {
print(paste0(focal.point$unique_id, ": No adjacent points available"))
}
}
if("proximate" %in% snap.cases){
# this is identical to the code block for the adjacent case, except that
# we select all core.point within 900 m of the focal.point
near.points <- core.points[core.points$snap_dist <= 900, ]
if(nrow(near.points) > 0){
print(paste0(focal.point$unique_id, ": Snapping to 3x3 within 900 m"))
snap.point <- near.points[which.min(near.points$snap_dist), ]
candidate.points$window.dist <- spDistsN1(candidate.points, snap.point)
proximate.points <- candidate.points[with(candidate.points@data, order(window.dist, snap_dist)), ][1:9,]
proximate.points$case <- "proximate"
proximate.points$window.dist <- NULL
candidate.points$window.dist <- NULL
# proximate.points$snap_dist <- snap.point$snap_dist
snap.results <- c(snap.results, proximate.points)
} else {
print(paste0(focal.point$unique_id, ": No proximate points available"))
}
}
if("waterbody" %in% snap.cases){
# select candidate.points within 900 m of poi.point
waterbody.points <- candidate.points[candidate.points$snap_dist <= 900, ]
# expand selection to all candidate.points with matching comids
waterbody.points <- candidate.points[candidate.points$wb_comid %in% waterbody.points$wb_comid, ]
# select only comids with >= 9 candidate.points
waterbody.points <- waterbody.points[as.data.frame(table(waterbody.points$wb_comid))$Freq > 8, ]
# select closest comid
wb.point <- waterbody.points[which.min(waterbody.points$snap_dist), ]
# select all candidate.points with matching comid
waterbody.points <- candidate.points[candidate.points$wb_comid %in% wb.point$wb_comid,]
if(nrow(waterbody.points) > 8){
print(paste0(focal.point$unique_id, ": Snapping to nearest waterbody"))
waterbody.points$case <- "waterbody"
snap.results <- c(snap.results, waterbody.points)
} else {
print(paste0(focal.point$unique_id, ": No waterbody points available"))
}
}
if("watershed" %in% snap.cases){
# select all candidate.points matching focal.point comid
watershed.points <- candidate.points[which(candidate.points$huc12 == focal.point$huc12), ]
if(nrow(watershed.points) > 8){
print(paste0(focal.point$unique_id, ": Snapping to nearest watershed"))
watershed.points$case <- "watershed"
snap.results <- c(snap.results, watershed.points)
} else {
print(paste0(focal.point$unique_id, ": No watershed points available"))
}
}
if(length(unlist(snap.results)) == 0){
null.point <- candidate.points[which.min(candidate.points$snap_dist), ]
null.point$case <- "unresolved"
print(paste0(focal.point$unique_id, ": Unable to resolve"))
return(null.point)
}
snap.dir <- paste0(output.prefix, "_snapping/")
dir.create(snap.dir, showWarnings = FALSE)
# write each set of snap points to a seperate shapefile "/snapping/output.prefix_case_uniqueid.shp"
lapply(1:length(snap.results), function(i) shapefile(x = snap.results[[i]],
filename = paste0(snap.dir, snap.results[[i]]$case[1], "_", snap.results[[i]]$unique_id[1])))
return(snap.results)
}
MultipointToSingle <- function(snap.points){
snap.point <- snap.points[which.min(snap.points$snap_dist), ]
}
# # create dummy points and required attributes
# test.coords <- data.frame(x = c(1451894, 1446547, 1448165, 1450797), y = c(579348.6, 586258.3, 580445.4, 574166.3))
# test.attrs <- data.frame(unique_id=as.factor(c("a111", "b222", "c333", "d444")),
# huc12 = rep("030901011703", 4),
# wb_comid = c(21489874, 21489752, 21489802, NA))
# proj.albers <- CRS("+proj=aea +lat_1=29.5 +lat_2=45.5 +lat_0=23 +lon_0=-96 +x_0=0 +y_0=0 +ellps=GRS80 +datum=NAD83 +units=m +no_defs ")
# test.spdf <- SpatialPointsDataFrame(test.coords, test.attrs, proj4string=proj.albers)
#
# # need candidate.points pre-generated by "gen_candidate_points.R"
# fl.candidate.points <- GenCandidatePoints(fl.water.mask, "test", fl.nhd.wb, fl.nhd.huc12)
#
# # a single focal.point and one snapping case
# test.snap <- SnapPoints(test.spdf[3,], fl.candidate.points, c("proximate"), output.prefix = "test")
# # loop through all points in a shapefile and all snapping cases
# test.snaps <- lapply(1:length(test.spdf), function(i)
# SnapPoints(test.spdf[i,], fl.candidate.points, output.prefix = "test1"))
#
#
# pws <- shapefile(file.choose())
# # discard pws locations >100 m from NHD
# pws <- pws[pws$COMID_JCjo != 0,]
# pws$unique_id <- paste0(pws$PWSID, pws$FACILITY_I)
# crs(pws) <- crs(fl.water.mask)
# fl.pws <- crop(pws, fl.water.mask)
# fl.pws$huc12 <- over(fl.pws, fl.nhd.huc12)$HUC_12
# fl.pws$wb_comid <- over(fl.pws, fl.nhd.wb)$COMID
#
# fl.pws.snaps <- lapply(1:length(fl.pws), function(i)
# SnapPoints(fl.pws[i,], fl.candidate.points, output.prefix = "flpwstest")) |
59efd50ea810cc95ad84219f5ba69ca17a9a24db | 873e385bc941e28ca345ab1e0a1d4c925e79bdba | /run_analysis.R | a0c59a2dc09303e55cff622f5dbfb4d20d5b7420 | [] | no_license | reejoe/gettingcleaningdata | 3fef0d31bd73c05fa9970acbbcd13d023c0f1b51 | fbc5c65c51db6e44de74a0b66f1c7a5ef7579d7e | refs/heads/master | 2021-01-10T18:30:18.021272 | 2015-01-27T20:05:09 | 2015-01-27T20:05:09 | 29,832,536 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,359 | r | run_analysis.R | # run_analysis.R - source code
# set the working dir where the data is present
setwd("~/R/courseproj")
pathData <- file.path("./data","UCI HAR Dataset")
#read the activity, subject and feature data from files into variables
TestData <- read.table(file.path(pathData,"test","Y_test.txt"),header = FALSE)
trainData <- read.table(file.path(pathData,"train","Y_train.txt"), header = FALSE)
subjTestData <- read.table(file.path(pathData,"test","subject_test.txt"), header = FALSE)
subjTrainData <- read.table(file.path(pathData,"train","subject_train.txt"), header = FALSE)
featTestData <- read.table(file.path(pathData,"test","X_test.txt"),header = FALSE)
featTrainData <- read.table(file.path(pathData,"train","X_train.txt"), header = FALSE)
# Merge the training and test data into one
subjData <- rbind(subjTrainData,subjTestData)
actData <- rbind(trainData,TestData)
featureData <- rbind(featTrainData,featTestData)
# set the names to variables
names(subjData) <- c("Subject")
names(actData) <- c("Activity")
featureDataName <- read.table(file.path(pathData,"features.txt"),head=FALSE)
names(featureData) <- featureDataName$V2
# Merge columns to produce data frame
combineData <- cbind(subjData,actData)
processData <- cbind(featureData,combineData)
#subset fetures by measurement on mean and SD
subFeatureNameData <- featureDataName$V2[grep("mean\\(\\)|std\\(\\)", featureDataName$V2)]
selectedNames <-c(as.character(subFeatureNameData), "Subject", "Activity" )
# Produce the data based on selected names
processData <-subset(processData,select=selectedNames)
# Assign labels
names(processData) <-gsub("^t", "Time", names(processData))
names(processData) <-gsub("^f", "Frequency", names(processData))
names(processData) <-gsub("Acc", "Accelerometer", names(processData))
names(processData) <-gsub("Gyro", "Gyroscope", names(processData))
names(processData) <-gsub("Mag", "Magnitude", names(processData))
names(processData) <-gsub("BodyBody", "Body", names(processData))
#produce output tiny data set
library(plyr)
Data2 <-aggregate(. ~subject + Activity, processData, mean)
Data2 <-Data2[order(Data2$subject,Data2$Activity),]
write.table(Data2, file = "tidydata.txt",row.name=FALSE) |
f86e92b7938f771a73f7f5e7709d92a311d61301 | 9dc1278807d585d24cf5b9ba2f74b9b5f40d8c2d | /tests/testthat/test_addClusterCols.R | 308575d4e15a2064736a6a69b8a3371bcfff9555 | [
"MIT"
] | permissive | stephenwilliams22/Spaniel | b6387e686d9e280deeab89d63655a93bb5476f05 | 6dada98d8a9eddde4a4610457b8d4311f9ecb2ec | refs/heads/master | 2020-08-01T22:26:42.014191 | 2019-09-25T14:09:30 | 2019-09-25T14:09:30 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,104 | r | test_addClusterCols.R | # Tests for markClusterCol function
# ------------------------------------------------------------------------------
context("Testing markClusterCol")
# These tests were created to ensure that the markClusterCol functions works
# correctly and marks the correct column with Cluster_ prefix
# Test markClusterCol with Seurat and sce objects
# ------------------------------------------------------------------------------
# Create test data
set.seed(1234)
counts <- sample(seq(0, 4),
625,
replace = TRUE,
prob = c(0.65, 0.25, 0.04, 0.008, 0.002)) %>%
matrix(nrow = 25) %>%
data.frame()
colnames(counts) <- paste0("cell_", seq(1, 25))
rownames(counts) <- paste0("gene.", seq(1, 25))
# test metadata with 5 columns the last three columns contain the
# clustering information
md <- counts %>%
t() %>%
data.frame() %>%
select(c(gene.1, gene.2, gene.3, gene.4, gene.5)) %>%
dplyr::rename(col1 = gene.1,
col2 = gene.2,
res.0.6 = gene.3,
res.0.8 = gene.4,
res.1.0 = gene.5)
# Test with Seurat object
# ------------------------------------------------------------------------------
test.seurat <- Seurat::CreateSeuratObject(counts = counts,
meta.data = md)
test.md.before <- getMetadata(test.seurat)
### prefix all columns containing patten with "cluster_"
pat <- "res"
test.seurat <- markClusterCol(test.seurat, pattern = pat)
test.md.after <- getMetadata(test.seurat)
# check that no columns are marked before running markClusterCol
test.marked.before <- colnames(test.md.before) %>%
grepl("cluster_", .) %>%
sum()
#checked that columns containing cluser columns are marked
test.marked.after <- colnames(test.md.after) %>%
grepl("cluster_", .) %>%
sum()
#check that columns not containing cluster info remain unmarked
test.marked.after.other <- colnames(test.md.after)[1:5]%>%
grepl("cluster_", .) %>%
sum()
test_that("markClusterCol check that columns
are marked with cluster_ correctly, Seurat", {
expect_is(test.seurat, "Seurat")
expect_is(test.md.before, "data.frame")
expect_is(test.md.after, "data.frame")
expect_equal(colnames(test.md.before)[6], "res.0.6")
expect_equal(colnames(test.md.after)[6], "cluster_res.0.6")
expect_equal(test.marked.before, 0)
expect_equal(test.marked.after, 3)
expect_equal(test.marked.after.other, 0)
})
# Test with SingleCellExperiment object
# ------------------------------------------------------------------------------
test.sce <- SingleCellExperiment(assays = list(counts = as.matrix(counts)),
colData = md)
test.md.before <- getMetadata(test.sce)
### prefix all columns containing patten with "cluster_"
pat <- "res"
test.sce <- markClusterCol(test.sce, pattern = pat)
test.md.after <- getMetadata(test.sce)
# check that no columns are marked before running markClusterCol
test.marked.before <- colnames(test.md.before) %>%
grepl("cluster_", .) %>%
sum()
#checked that columns containing cluser columns are marked
test.marked.after <- colnames(test.md.after) %>%
grepl("cluster_", .) %>%
sum()
#check that columns not containing cluster info remain unmarked
test.marked.after.other <- colnames(test.md.after)[1:2]%>%
grepl("cluster_", .) %>%
sum()
test_that("markClusterCol check that columns
are marked with cluster_ correctly, SCE", {
expect_is(test.sce, "SingleCellExperiment")
expect_is(test.md.before, "data.frame")
expect_is(test.md.after, "data.frame")
expect_equal(colnames(test.md.before)[3], "res.0.6")
expect_equal(colnames(test.md.after)[3], "cluster_res.0.6")
expect_equal(test.marked.before, 0)
expect_equal(test.marked.after, 3)
expect_equal(test.marked.after.other, 0)
})
|
60508f229d6be1c4b90b9721edde70d40927d8ff | 349f3040c82503673f11fecfa41c90664bd8b243 | /BasketAnalysis.R | 56c1413614b7fe6db22ef7a820364f94d65bf5e6 | [] | no_license | RasLillebo/BasketAnalysis | c5eb3af33eba782684e5f71f7de7b44ea959b9e5 | 82d0fdb0dcfae177e451a4468762d0a89bf4b91d | refs/heads/master | 2022-12-13T13:43:12.444985 | 2020-08-31T19:18:51 | 2020-08-31T19:18:51 | 289,509,170 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,174 | r | BasketAnalysis.R | #Basket Analysis
#Inspired by: https://www.datacamp.com/community/tutorials/market-basket-analysis-r
Packages <- c("arules", "arulesViz", "tidyverse", "readr", "knitr",
"ggplot2", "lubridate", "plyr", "dplyr")
#install.packages(Packages)
lapply(Packages, library, character.only=TRUE)
#InvoiceNo, StockCode, Description, Quantity, InvoiceDate, UnitPrice, CustomerID, Country
retail <- read.csv('C:/Users/rasmu/OneDrive/Skrivebord/Github/Data/Online_retail')
retail <- retail[complete.cases(retail), ]
retail = retail %>% mutate(Description = as.factor(Description), Country = as.factor(Country),
Date = as.Date(retail$InvoiceDate), TransTime = format(retail$InvoiceDate,"%H:%M:%S"),
InvoiceNo = as.numeric(as.character(retail$InvoiceNo)))
transactionData <- ddply(retail,c("InvoiceNo","Date"),
function(retail)paste(retail$Description,
collapse = ","))
transactionData$InvoiceNo <- NULL
transactionData$Date <- NULL
write.csv(transactionData,"C:/Users/rasmu/OneDrive/Skrivebord/Github/Data/OnlineRetailtr.csv",
quote = FALSE, row.names = FALSE)
tr <- read.transactions('C:/Users/rasmu/OneDrive/Skrivebord/Github/Data/OnlineRetailtr.csv',
format = 'basket', sep=',')
summary(tr)
# Create an item frequency plot for the top 20 items
if (!require("RColorBrewer")) {
# install color package of R
install.packages("RColorBrewer")
#include library RColorBrewer
library(RColorBrewer)
}
windows()
par(mfrow=c(2, 1))
itemFrequencyPlot(tr,topN=20,type="absolute",col=brewer.pal(8,'Pastel2'), main="Absolute Item Frequency Plot")
itemFrequencyPlot(tr,topN=20,type="relative",col=brewer.pal(8,'Pastel2'),main="Relative Item Frequency Plot")
association.rules <- apriori(tr, parameter = list(supp=0.001, conf=0.8,maxlen=10))
inspect(association.rules[1:10])
shorter.association.rules <- apriori(tr, parameter = list(supp=0.001, conf=0.8,maxlen=3))
subset.rules <- which(colSums(is.subset(association.rules, association.rules)) > 1) # get subset rules in vector
length(subset.rules)
subset.association.rules. <- association.rules[-subset.rules]
metal.association.rules <- apriori(tr, parameter = list(supp=0.001, conf=0.8),appearance = list(default="lhs",rhs="METAL"))
inspect(head(metal.association.rules))
metal.association.rules <- apriori(tr, parameter = list(supp=0.001, conf=0.8),appearance = list(lhs="METAL",default="rhs"))
inspect(head(metal.association.rules))
subRules<-association.rules[quality(association.rules)$confidence>0.4]
#Plot SubRules
windows()
par(mfrow=c(2, 1))
plot(subRules, jitter=0)
plot(subRules,method="two-key plot", jitter=0)
windows()
par(mfrow=c(2, 1))
top10subRules <- head(subRules, n = 10, by = "confidence")
plot(top10subRules, method = "graph", engine = "htmlwidget")
saveAsGraph(head(subRules, n = 1000, by = "lift"), file = "rules.graphml")
subRules2<-head(subRules, n=20, by="lift")
windows()
plot(subRules2, method="paracoord")
|
1b80df83b3cf9b051cab8281a3e2a5c7e8a19a77 | c0c468863a8e46cb61a8eff37de1ffc4b7d4fd89 | /man/getAreaData.Rd | dec4b1a70326936dba81b818576fb72adbcde2d4 | [] | no_license | SWS-Methodology/faoswsSeed | 91357a1b56fdc452a3de975ff38092ac3ced6ae4 | 5d6d2d939bc66367c8d060a82fd1861848189149 | refs/heads/master | 2021-01-14T13:57:55.046420 | 2020-11-17T11:27:36 | 2020-11-17T11:27:36 | 29,237,978 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,381 | rd | getAreaData.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/getAreaData.R
\name{getAreaData}
\alias{getAreaData}
\title{Function for obtaining the area harvested/sown data}
\usage{
getAreaData(dataContext, areaSownElementCode = "5025",
areaHarvestedElementCode = "5312", seedElementCode = "5525")
}
\arguments{
\item{dataContext}{The context for the data, as generated by the SWS. This
object can be created via a call like swsContext.datasets[[1]] (assuming
the user is running this script on the SWS or after a call to
GetTestEnvironment).}
\item{areaSownElementCode}{The element code providing the dimension which
corresponds to the area sown variable in the database.}
\item{areaHarvestedElementCode}{The element code providing the dimension
which corresponds to the area harvested variable in the database.}
\item{seedElementCode}{The element code providing the dimension which
corresponds to the seed variable in the database.}
}
\value{
A data.table object containing the data queried from the database.
}
\description{
This function pulls the trade data from the database. The main function
pulling the data is faosws::GetData, but additional steps are performed by
this function (such as setting up the appropriate pivot, adding variables
which are missing from the data as NA's, and setting data with missing
flags and 0 values to NA values).
}
|
57376a29fcd8af6eeffa149e32ba46fcd93cfe44 | 1d85ea0fd495bbb892175f20676ae38f61baa475 | /R/compareAVHRRimages.R | 336fa67ca2f8818419329698d1e403d1b1f39884 | [] | no_license | steingod/R-mipolsat | e6a3ddedd31f0eaf26f6f56bb5b30219cc63968a | a19c0c34557cb81faa4f9297c44413af8e59488b | refs/heads/master | 2021-01-19T20:29:57.560832 | 2013-05-28T20:33:58 | 2013-05-28T20:33:58 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,622 | r | compareAVHRRimages.R | #
# NAME:
# NA
#
# PURPOSE:
# NA
#
# REQUIREMENTS:
# NA
#
# INPUT:
# NA
#
# OUTPUT:
# NA
#
# NOTES:
# NA
#
# BUGS:
# NA
#
# AUTHOR:
# Ãystein Godøy, METNO/FOU
#
# MODIFIED:
# NA
#
# CVS_ID:
# $Id: compareAVHRRimages.R,v 1.3 2013-04-11 20:29:04 steingod Exp $
compareAVHRRimages <- function(dataset1, dataset2, channel=1, map=TRUE) {
if (missing(dataset1) || missing(dataset2)) {
cat("Remember to provide an object from readosisaf.\n")
return;
}
if ((dataset1$header$ucs_ul_x != dataset2$header$ucs_ul_x) ||
(dataset1$header$ucs_ul_y != dataset2$header$ucs_ul_y) ||
(dataset1$header$ucs_dx != dataset2$header$ucs_dx) ||
(dataset1$header$ucs_dy != dataset2$header$ucs_dy)) {
return("Datasets do not match geographically")
}
if ((dataset1$header$xsize != dataset2$header$xsize) ||
(dataset1$header$ysize != dataset2$header$ysize)) {
return("Datasets do not match in size")
}
if ((dataset1$header$year != dataset2$header$year) ||
(dataset1$header$month != dataset2$header$month) ||
(dataset1$header$day != dataset2$header$day) ||
(dataset1$header$hour != dataset2$header$hour) ||
(dataset1$header$minute != dataset2$header$minute)) {
return("Datasets do not match in time")
}
eastings <- dataset1$header$ucs_ul_x+
(dataset1$header$ucs_dx*(0:(dataset1$header$xsize-1)))
northings <- dataset1$header$ucs_ul_y-
(dataset1$header$ucs_dy*(0:(dataset1$header$ysize-1)))
eastings <- sort(eastings)
northings <- sort(northings)
t <- matrix(dataset1$data[,channel]-dataset2$data[,channel],
ncol=dataset1$header$ysize,nrow=dataset1$header$xsize)
aspectratio <- dataset1$header$ysize/dataset1$header$xsize
par(fin=c(5,5*aspectratio))
##image(eastings,northings,t[,dataset1$header$ysize:1])
if (map==TRUE) {
data(gshhsmapdata)
mapdata <- milatlon2ucs(gshhsmapdata$lat,gshhsmapdata$lon)
##lines(mapdata$eastings,mapdata$northing)
filled.contour(eastings,northings,t[,dataset1$header$ysize:1],
asp=aspectratio,
plot.axes={axis(1);axis(2);
lines(mapdata$eastings,mapdata$northing)},
color.palette=topo.colors)
} else {
filled.contour(eastings,northings,t[,dataset1$header$ysize:1],
asp=aspectratio,color.palette=topo.colors)
}
title(paste("Comparison of channel",
channel,
"for products","\n",
dataset1$header$filename,
"and","\n",
dataset2$header$filename),
sub=sprintf("%4d-%02d-%02d %02d:%02d UTC",
dataset1$header$year,dataset1$header$month,dataset1$header$day,
dataset1$header$hour,dataset1$header$minute),
cex.main=0.75, cex.sub=0.8)
}
|
8327b25d901bbe1bc027228290aa83d666b15e3b | d20511398fec50d8bfc5433c8903b5471da51f1d | /searchForWord3.R | 0cfa28560194789ba7005daba9290b3dd1decb0b | [] | no_license | TechyTrickster/GuttenbergPressAnalysisInR | 0c3bcab2b956ad8f8c8a846ec7cd80631e0664f4 | 2cb3b7fccb4fcec55a15385569a746604141840c | refs/heads/master | 2023-02-05T23:24:01.501873 | 2020-12-30T19:20:59 | 2020-12-30T19:20:59 | 318,933,322 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 982 | r | searchForWord3.R | options(warn = -1)
options(readr.num_columns = 0)
args = commandArgs(trailingOnly = T)
table = read.table(args[3], sep = ",")
titles = readr::read_delim(args[1], ":", escape_double = F, trim_ws = T, col_names = F)
searchWord = tolower(args[2])
fileName = tools::file_path_sans_ext(basename(args[3])) #calculate the number of times the word appears in the title
index = stringr::str_replace_all(fileName, '-8', "")
index = stringr::str_replace_all(index, '-0', "")
index = stringr::str_replace_all(index, ".txt.processed", "")
title = titles$X2[which(titles$X1 == index)][1]
countInTitle = length(which(unlist(strsplit(tolower(as.character(title)), " ")) == as.character(args[2])))
countInBody = sum(as.integer(table$V2[which(table$V1 == searchWord)]))
output = paste0(args[3], ":", countInBody, ":", countInTitle, ":", searchWord, ":", title, ":", index) #reference a pre made frequency table to find the number of times the word appears in the body
print(output, max.levels=F)
|
e8be5ba48a0a7a5604af033d6ebca5ee532a6e9a | 4c9d2d93b2fa8661cc959320bef1d31384f9e89a | /man/loadDatainEnvironment.Rd | 6bb231b8956799a4bda0c2c7df821188807e2ceb | [
"MIT"
] | permissive | abdala9512/dareML | 5a0d602fc28821e7f024543f4291bd172838ef16 | 9473f1eb81e277419e42cd1fdfef72b259c19c08 | refs/heads/main | 2023-06-20T08:22:04.492566 | 2021-07-09T02:49:12 | 2021-07-09T02:49:12 | 324,870,579 | 0 | 0 | null | 2020-12-28T00:43:49 | 2020-12-27T23:47:25 | null | UTF-8 | R | false | true | 275 | rd | loadDatainEnvironment.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{loadDatainEnvironment}
\alias{loadDatainEnvironment}
\title{Title}
\usage{
loadDatainEnvironment(data, varname, ...)
}
\arguments{
\item{varname}{}
}
\value{
}
\description{
Title
}
|
829815bdd807b1537facd5a0240715fcd09018e0 | bfa627b1c454c7109b2db7aba6c0b8e0c0ec4518 | /unsupervised_learning.R | b840075701a0ff3da0c6fb97c6c55b229869bc05 | [] | no_license | noahhhhhh/IntroToStatsLearning_R | dbe85800982669ca55c6f665dac772dde75879db | 8135ebab79157571e75492edd1298fa8647850b7 | refs/heads/master | 2021-01-10T05:46:19.486130 | 2015-12-22T23:02:31 | 2015-12-22T23:02:31 | 48,078,465 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 10,604 | r | unsupervised_learning.R |
#####################################################
## principal component analysis (PCA) ###############
#####################################################
states <- row.names(USArrests)
states
# [1] "Alabama" "Alaska" "Arizona" "Arkansas" "California"
# [6] "Colorado" "Connecticut" "Delaware" "Florida" "Georgia"
# [11] "Hawaii" "Idaho" "Illinois" "Indiana" "Iowa"
# [16] "Kansas" "Kentucky" "Louisiana" "Maine" "Maryland"
# [21] "Massachusetts" "Michigan" "Minnesota" "Mississippi" "Missouri"
# [26] "Montana" "Nebraska" "Nevada" "New Hampshire" "New Jersey"
# [31] "New Mexico" "New York" "North Carolina" "North Dakota" "Ohio"
# [36] "Oklahoma" "Oregon" "Pennsylvania" "Rhode Island" "South Carolina"
# [41] "South Dakota" "Tennessee" "Texas" "Utah" "Vermont"
# [46] "Virginia" "Washington" "West Virginia" "Wisconsin" "Wyoming"
# col names
names(USArrests)
# [1] "Murder" "Assault" "UrbanPop" "Rape"
# examine the data
apply(USArrests, 2, mean)
# Murder Assault UrbanPop Rape
# 7.788 170.760 65.540 21.232
apply(USArrests, 2, sd)
# Murder Assault UrbanPop Rape
# 4.355510 83.337661 14.474763 9.366385
# pca
pr.out <- prcomp(USArrests, scale = T)
names(pr.out)
# [1] "sdev" "rotation" "center" "scale" "x"
# rotation matrix provides the principal component loadings
pr.out$rotation
# PC1 PC2 PC3 PC4
# Murder -0.5358995 0.4181809 -0.3412327 0.64922780
# Assault -0.5831836 0.1879856 -0.2681484 -0.74340748
# UrbanPop -0.2781909 -0.8728062 -0.3780158 0.13387773
# Rape -0.5434321 -0.1673186 0.8177779 0.08902432
# score
pr.out$x
# PC1 PC2 PC3 PC4
# Alabama -0.97566045 1.12200121 -0.43980366 0.154696581
# Alaska -1.93053788 1.06242692 2.01950027 -0.434175454
# Arizona -1.74544285 -0.73845954 0.05423025 -0.826264240
# Arkansas 0.13999894 1.10854226 0.11342217 -0.180973554
# California -2.49861285 -1.52742672 0.59254100 -0.338559240
# Colorado -1.49934074 -0.97762966 1.08400162 0.001450164
# Connecticut 1.34499236 -1.07798362 -0.63679250 -0.117278736
# Delaware -0.04722981 -0.32208890 -0.71141032 -0.873113315
# Florida -2.98275967 0.03883425 -0.57103206 -0.095317042
# Georgia -1.62280742 1.26608838 -0.33901818 1.065974459
# Hawaii 0.90348448 -1.55467609 0.05027151 0.893733198
# Idaho 1.62331903 0.20885253 0.25719021 -0.494087852
# Illinois -1.36505197 -0.67498834 -0.67068647 -0.120794916
# Indiana 0.50038122 -0.15003926 0.22576277 0.420397595
# Iowa 2.23099579 -0.10300828 0.16291036 0.017379470
# Kansas 0.78887206 -0.26744941 0.02529648 0.204421034
# Kentucky 0.74331256 0.94880748 -0.02808429 0.663817237
# Louisiana -1.54909076 0.86230011 -0.77560598 0.450157791
# Maine 2.37274014 0.37260865 -0.06502225 -0.327138529
# Maryland -1.74564663 0.42335704 -0.15566968 -0.553450589
# Massachusetts 0.48128007 -1.45967706 -0.60337172 -0.177793902
# Michigan -2.08725025 -0.15383500 0.38100046 0.101343128
# Minnesota 1.67566951 -0.62590670 0.15153200 0.066640316
# Mississippi -0.98647919 2.36973712 -0.73336290 0.213342049
# Missouri -0.68978426 -0.26070794 0.37365033 0.223554811
# Montana 1.17353751 0.53147851 0.24440796 0.122498555
# Nebraska 1.25291625 -0.19200440 0.17380930 0.015733156
# Nevada -2.84550542 -0.76780502 1.15168793 0.311354436
# New Hampshire 2.35995585 -0.01790055 0.03648498 -0.032804291
# New Jersey -0.17974128 -1.43493745 -0.75677041 0.240936580
# New Mexico -1.96012351 0.14141308 0.18184598 -0.336121113
# New York -1.66566662 -0.81491072 -0.63661186 -0.013348844
# North Carolina -1.11208808 2.20561081 -0.85489245 -0.944789648
# North Dakota 2.96215223 0.59309738 0.29824930 -0.251434626
# Ohio 0.22369436 -0.73477837 -0.03082616 0.469152817
# Oklahoma 0.30864928 -0.28496113 -0.01515592 0.010228476
# Oregon -0.05852787 -0.53596999 0.93038718 -0.235390872
# Pennsylvania 0.87948680 -0.56536050 -0.39660218 0.355452378
# Rhode Island 0.85509072 -1.47698328 -1.35617705 -0.607402746
# South Carolina -1.30744986 1.91397297 -0.29751723 -0.130145378
# South Dakota 1.96779669 0.81506822 0.38538073 -0.108470512
# Tennessee -0.98969377 0.85160534 0.18619262 0.646302674
# Texas -1.34151838 -0.40833518 -0.48712332 0.636731051
# Utah 0.54503180 -1.45671524 0.29077592 -0.081486749
# Vermont 2.77325613 1.38819435 0.83280797 -0.143433697
# Virginia 0.09536670 0.19772785 0.01159482 0.209246429
# Washington 0.21472339 -0.96037394 0.61859067 -0.218628161
# West Virginia 2.08739306 1.41052627 0.10372163 0.130583080
# Wisconsin 2.05881199 -0.60512507 -0.13746933 0.182253407
# Wyoming 0.62310061 0.31778662 -0.23824049 -0.164976866
biplot(pr.out, scale = 0) # The scale=0 argument to biplot() ensures that the arrows are scaled to represent the loadings
# change the sign
pr.out$rotation <- -pr.out$rotation
pr.out$x <- - pr.out$x
biplot(pr.out, scale = 0)
# sd of each PC
pr.out$sdev
# [1] 1.5748783 0.9948694 0.5971291 0.4164494
# var of each PC
pr.var <- pr.out$sdev^2
pr.var
# [1] 2.4802416 0.9897652 0.3565632 0.1734301
# var explained by each PC
pve <- pr.var/sum(pr.var)
# [1] 0.62006039 0.24744129 0.08914080 0.04335752
# plot it
plot(pve , xlab =" Principal Component ", ylab=" Proportion of
Variance Explained ", ylim=c(0,1) ,type = 'b')
plot(cumsum(pve), xlab=" Principal Component ", ylab ="Cumulative Proportion of Variance Explained ", ylim=c(0,1) ,type = 'b')
#####################################################
## Clustering #######################################
#####################################################
# k-means
set.seed(2)
x <- matrix(rnorm(50 * 2), ncol = 2)
x[1:25, 1] <- x[1:25, 1] + 3
x[1:25, 2] <- x[1:25, 2] - 4
# k = 2
km.out <- kmeans(x, 2, nstart = 20)
km.out$cluster
# [1] 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
# [44] 1 1 1 1 1 1 1
plot(x, col = (km.out$cluster + 1), main = "K-Means Clustiner Results with K = 2"
, xlab = ""
, ylab = ""
, pch = 20
, cex = 2)
# k = 3
set.seed(4)
km.out <- kmeans(x, 3, nstart = 20)
km.out
plot(x, col = (km.out$cluster + 1), main="K-Means Clustering Results with K=3"
, xlab ="", ylab="", pch = 20, cex = 2)
# perform 20 times of random initial cluster setting
set.seed(3)
km.out <- kmeans(x, 3, nstart = 1)
km.out$tot.withinss
# [1] 104.3319
km.out <- kmeans(x, 3, nstart = 20) # the best one will be selected over these 20 random init clusters
km.out$tot.withinss
# [1] 97.97927
# individual withiness
km.out$withinss
# [1] 25.74089 19.56137 52.67700
# hierarchical clustering
# use dist() to get a 50*50 euclidean distance matrix
hc.complete <- hclust(dist(x), method = "complete")
hc.average <- hclust(dist(x), method = "average")
hc.single <- hclust(dist(x), method = "single")
# plot
par(mfrow = c(1, 3))
plot(hc.complete, main = "Complete Linkage", xlab= "", sub = "", cex =.9)
plot(hc.average, main = "Average Linkage", xlab= "", sub = "", cex =.9)
plot(hc.single, main = "Single Linkage", xlab= "", sub = "", cex =.9)
# cut tree, k = 2
cutree(hc.complete, 2)
cutree(hc.average, 2)
cutree(hc.single, 2)
# scale it before performing hc
xsc <- scale(x)
plot(hclust(dist(xsc), method = "complete"), main = "Hierarchical Clustering with Scaled Features")
# use coorelation distance
x <- matrix(rnorm(30*3), ncol = 3)
dd <- as.dist(1 - cor(t(x)))
plot(hclust(dd, method = "complete"), main = "Complete Linkage with Correlation-Based Distance")
#####################################################
## A NCI60 Data Example #############################
#####################################################
library(ISLR)
nci.labs <- NCI60$labs
nci.data <- NCI60$data
dim(nci.data)
# [1] 64 6830
nci.labs[1:4]
# [1] "CNS" "CNS" "CNS" "RENAL"
table(nci.labs)
# BREAST CNS COLON K562A-repro K562B-repro LEUKEMIA MCF7A-repro
# 7 5 7 1 1 6 1
# MCF7D-repro MELANOMA NSCLC OVARIAN PROSTATE RENAL UNKNOWN
# 1 8 9 6 2 9 1
# PCA
pr.out <- prcomp(nci.data, scale = T)
Cols <- function(vec){
cols <- rainbow(length(unique(vec)))
return(cols[as.numeric(as.factor(vec))])
}
par(mfrow = c(1, 2))
plot(pr.out$x[, 1:2], col = Cols(nci.labs), pch = 19, xlab = "Z1", ylab = "Z2")
plot(pr.out$x[, 1:3], col = Cols(nci.labs), pch = 19, xlab = "Z1", ylab = "Z3")
summary(pr.out)
plot(pr.out)
# PVE
pve <- 100 * pr.out$sdev^2/sum(pr.out$sdev^2)
par(mfrow = c(1, 2))
plot(pve, type = "o", ylab = "PVE", xlab = "PC", col = "blue")
plot(cumsum(pve), type = "o", ylab = "Cum PVE", xlab = "PC", col = "brown3")
# Clustering
# hcust
sd.data <- scale(nci.data)
par(mfrow = c(1, 3))
data.dist <- dist(sd.data)
plot(hclust(data.dist), labels = nci.labs, main = "Complete Linkage", xlab = "", ylab = "")
plot(hclust(data.dist, method = "average"), labels = nci.labs, main = "Average Linkage", xlab = "", ylab = "")
plot(hclust(data.dist, method = "single"), labels = nci.labs, main = "Single Linkage", xlab = "", ylab = "")
hc.out <- hclust(dist(sd.data))
hc.clusters <- cutree(hc.out, 4)
table(hc.clusters, nci.labs)
par(mfrow = c(1, 1))
plot(hc.out, labels = nci.labs)
abline(h = 139, col = "red")
hc.out
# Call:
# hclust(d = dist(sd.data))
#
# Cluster method : complete
# Distance : euclidean
# Number of objects: 64
# kmeans
set.seed(2)
km.out <- kmeans(sd.data, 4, nstart = 20)
km.clusters <- km.out$cluster
table(km.clusters, hc.clusters)
# hc.clusters
# km.clusters 1 2 3 4
# 1 11 0 0 9
# 2 0 0 8 0
# 3 9 0 0 0
# 4 20 7 0 0
# perform hcust on PCs
hc.out <- hclust(dist(pr.out$x[, 1:5]))
plot(hc.out, labels = nci.labs, main = "Hier.Cust. on First 5 Score Vectors")
table(cutree(hc.out, 4), nci.labs)
# Sometimes
# performing clustering on the first few principal component score vectors
# can give better results than performing clustering on the full data.
|
2e059579a754ae5055755ff9a4f19afefc3bb311 | 20b53f6afe1e9e6f300e4b61153ce4ef19f07c4a | /man/BAtable.Rd | 64c324949a0cba2340c5ae61e90b88ff8742b54a | [] | no_license | MrConradHarrison/cleftqCATsim | 92f31a920fd7186fd40bee870cdb00c1d18040bb | 59685e68022d01f19e9b93d62737036f35fbcb5c | refs/heads/main | 2023-08-18T20:04:15.587322 | 2021-09-21T09:35:00 | 2021-09-21T09:35:00 | 315,989,002 | 0 | 1 | null | null | null | null | UTF-8 | R | false | true | 500 | rd | BAtable.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/BAtable.R
\name{BAtable}
\alias{BAtable}
\title{BAtable}
\usage{
BAtable(x, y)
}
\arguments{
\item{x}{A vector of linear assessment scores}
\item{y}{A vector of CAT assessment scores}
}
\value{
A data frame
}
\description{
Creates a data frame of linear assessment factor scores, CAT factor scores, and the mean and difference of paired scores. Used for calculating limits of agreement and creating Bland Altman plots.
}
|
4173214bdb98729b241cc97bd6fc3d7dae48eb7d | 780ae51ce6f9450a65b6a16863f47f04bad82717 | /getTICs.V2.function.R | 361eadb8e25267ed0539e1375424ce9dd63e09db | [] | no_license | zyleeyang/Untargeted-metabolomics | 09a4fe07bbb035bdff77d5dcf9444290df7bb369 | 71d857a734632961aa00600a966188786c66032e | refs/heads/master | 2023-04-17T04:15:24.064390 | 2018-01-17T23:40:05 | 2018-01-17T23:40:05 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,196 | r | getTICs.V2.function.R | #getTIC---
getTIC <- function(file,rtcor=NULL) {
object <- xcmsRaw(file)
cbind(if (is.null(rtcor)) object@scantime else rtcor, rawEIC(object,mzrange=range(object@env$mz))$intensity)
}
#overlay TIC from all files in current folder or from xcmsSet, create pdf----
getTICs <- function(xcmsSet=NULL,files=NULL, pdfname="TICs.pdf",rt=c("raw","corrected")) {
for (j in 1:length(FractionList)){
Fraction <- FractionList[j]
print(paste(Fraction, "start", sep=" "))
ResultsDIR <- as.character(Dirs[Fraction, "ResultsDIR"])
setwd(ResultsDIR)
load(paste(Fraction, "xset3.RData", sep="."))
xcmsSet <- xset3
if (is.null(xcmsSet)) {
filepattern <- c("[Cc][Dd][Ff]", "[Nn][Cc]", "([Mm][Zz])?[Xx][Mm][Ll]",
"[Mm][Zz][Dd][Aa][Tt][Aa]", "[Mm][Zz][Mm][Ll]")
filepattern <- paste(paste("\\.", filepattern, "$", sep = ""), collapse = "|")
if (is.null(files))
files <- getwd()
info <- file.info(files)
listed <- list.files(files[info$isdir], pattern = filepattern,
recursive = TRUE, full.names = TRUE)
files <- c(files[!info$isdir], listed)
} else {
files <- filepaths(xcmsSet)
}
N <- length(files)
TIC <- vector("list",N)
for (i in 1:N) {
cat(files[i],"n")
if (!is.null(xcmsSet) && rt == "corrected")
rtcor <- xcmsSet@rt$corrected[[i]] else
rtcor <- NULL
TIC[[i]] <- getTIC(files[i],rtcor=rtcor)
}
setwd(ResultsDIR)
pdfname= paste(Fraction, "TICs.pdf", sep=".")
pdf(pdfname,w=16,h=10)
cols <- rainbow(N)
lty = 1:N
pch = 1:N
xlim = range(sapply(TIC, function(x) range(x[,1])))
ylim = range(sapply(TIC, function(x) range(x[,2])))
plot(0, 0, type="n", xlim = xlim, ylim = ylim, main = paste(Fraction, Experiment, "TICs", sep=" "), xlab = "Retention Time", ylab = "TIC")
for (i in 1:N) {
tic <- TIC[[i]]
points(tic[,1], tic[,2], col = cols[i], pch = pch[i], type="l")
}
legend("topright",paste(basename(files)), col = cols, lty = lty, pch = pch)
dev.off()
invisible(TIC)
print(paste(Fraction, "done", sep=" "))
}
}
#Example
#getTICs(xcmsSet=xset3, pdfname="TICs.pdf",rt="corrected") |
e72c54fefc2b641491cb30740aa29ce0594dce08 | d1a360fc9e6f2415d4b9c5a7964d2cb7b4c01e45 | /Bayesian Baseball 2016/Scripts/04- Conditional Model.r | 943ce48a9e047ebeb0a12314511ec99ac0afe77a | [] | no_license | blakeshurtz/Bayesian-Baseball | 8ea89ac5d191c9fb2558ef2776f5e6e71cc88ddc | 86d0cf8cd8fba05d108c0a7a677de6158f856b1a | refs/heads/master | 2022-01-30T00:14:25.207927 | 2019-07-02T19:34:25 | 2019-07-02T19:34:25 | 146,781,829 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 11,167 | r | 04- Conditional Model.r | library(rethinking)
library(tidyverse)
#Starting Fresh
urlfile<-'https://raw.githubusercontent.com/blakeobeans/Bayesian-Baseball/master/Cubs/Season/cubs.csv'
data<-read.csv(urlfile)
#Pitcher Stats
urlfile<-'https://raw.githubusercontent.com/blakeobeans/Bayesian-Baseball/master/Cubs/Pitching/pitching.csv'
pitcher<-read.csv(urlfile)
data<- left_join(data, pitcher,by="Pit")
#WL Record
urlfile<-'https://raw.githubusercontent.com/blakeobeans/Bayesian-Baseball/master/Cubs/Team%20Rankings/rankings.csv'
WL<-read.csv(urlfile)
data<- left_join(data, WL,by="Opp")
#Transform Data
data$score <- data$R-data$RA #Positive score indicates Giants win. Beats logistic. No ties.
data$opp_team <- coerce_index(data$Opp) #ID for team (function from rethinking)
data$pit_id <- coerce_index(data$Pit) #Home pitcher
names(data) <- c("tm", "opp", "R", "RA", "pit", "pitera", "wl", "score", "opp_team", "pit_id")
data$pitera_norm <- (data$pitera - mean(data$pitera))/sd(data$pitera) #normalize ERA
data$wl_norm <- (data$wl - mean(data$wl))/sd(data$wl) #normalize WL
data <- as.data.frame(data)
#MLM. Pitcher ERA, WL record, how many levels in baseball?
#Note hyperparameters
standata <- data[,c("score", "opp_team", "pitera_norm", "wl_norm")]
set.seed(1234)
model2 <- map2stan(
alist(
score ~ dnorm( mu , sigma ) ,
mu <- a + a_team[opp_team] + b * pitera_norm + c * wl_norm,
sigma ~ dcauchy(0, 2.5),
a ~ dnorm(0,3),
a_team[opp_team] ~ dnorm( ai , as ), #adaptive prior from the data
ai ~ dnorm(0, 1),
as ~ dcauchy(0,2),
b ~ dnorm( 0, 1 ),
c ~ dnorm(0,1)
),
data=standata, iter=12000, warmup=3000, chains=4, cores=4)
#Predicting game1
d.pred <- list(
pitera_norm = -.63, #Lester
wl_norm = 1.29, #WL normalized for CLE (from WL dataset)
opp_team = 0) #placeholder
#Posterior Simulation
set.seed(1234)
sim.model <- sim( model2 , data=d.pred, n=6000)
sim <- as.data.frame(sim.model)
prob_success1 <- sum(sim$V1 > 0)/nrow(sim); prob_success1 #percent of scores that are wins
prob_fail <- sum(sim$V1 < 0)/nrow(sim); prob_fail #percent of scores that are losses
#Win 4 out of 7
dbinom(4, size=7, prob=prob_success1) + dbinom(5, size=7, prob=prob_success1) + dbinom(6, size=7, prob=prob_success1) + dbinom(7, size=7, prob=prob_success1)
prob_win1 <- pbinom(3, size=7, prob=prob_success1, lower.tail = FALSE) ; prob_win1
exp1 <- 7*prob_win1; exp1
var1 <- 7*prob_win1*(1-prob_win1); var1
binomplot <- as.data.frame(cbind(exp1, var1))
#Predicting 2016 Game 2
#After game 1
game1 <- c(-6, 21, -.63, 1.29)
standata2 <- rbind(standata, game1)
#run model with game 1 data
set.seed(1234)
model3 <- map2stan(
alist(
score ~ dnorm( mu , sigma ) ,
mu <- a + a_team[opp_team] + b * pitera_norm + c * wl_norm,
sigma ~ dcauchy(0, 2.5),
a ~ dnorm(0,3),
a_team[opp_team] ~ dnorm( ai , as ), #adaptive prior from the data
ai ~ dnorm(0, 1),
as ~ dcauchy(0,2),
b ~ dnorm( 0, 1 ),
c ~ dnorm(0,1)
),
data=standata2, iter=12000, warmup=3000, chains=4, cores=4)
#Predicting Game 2
d.pred <- list(
pitera_norm = -0.004, #Arrieta
wl_norm = 1.29, #WL normalized for CLE (from WL dataset)
opp_team = 21) #New team- Indians (1 game played)
#Posterior Simulation
set.seed(1234)
sim.model <- sim( model3 , data=d.pred, n=6000)
sim <- as.data.frame(sim.model)
#Posterior Statistics
prob_success2 <- sum(sim$V1 > 0)/nrow(sim); prob_success2 #percent of scores that are wins
prob_fail <- sum(sim$V1 < 0)/nrow(sim); prob_fail #percent of scores that are losses
#Win 4 out of 6
dbinom(4, size=6, prob=prob_success2) + dbinom(5, size=6, prob=prob_success2) + dbinom(6, size=6, prob=prob_success2)
prob_win2 <- pbinom(3, size=6, prob=prob_success2, lower.tail = FALSE) ; prob_win2
exp2 <- 7*prob_win2; exp2
var2 <- 7*prob_win2*(1-prob_win2); var2
game2exp <- c(exp2, var2)
binomplot <- rbind(binomplot, game2exp)
#Predicting 2016 Game 3
game2 <- c(4, 21, -0.004, 1.29)
standata3 <- rbind(standata2, game2)
#run model with game 2 data
set.seed(1234)
model4 <- map2stan(
alist(
score ~ dnorm( mu , sigma ) ,
mu <- a + a_team[opp_team] + b * pitera_norm + c * wl_norm,
sigma ~ dcauchy(0, 2.5),
a ~ dnorm(0,3),
a_team[opp_team] ~ dnorm( ai , as ), #adaptive prior from the data
ai ~ dnorm(0, 1),
as ~ dcauchy(0,2),
b ~ dnorm( 0, 1 ),
c ~ dnorm(0,1)
),
data=standata3, iter=12000, warmup=3000, chains=4, cores=4)
#Predicting Game 3
d.pred <- list(
pitera_norm = .616,
wl_norm = 1.29, #WL normalized for CLE (from WL dataset)
opp_team = 21) #New team- Indians (1 game played)
#Posterior Simulation
set.seed(1234)
sim.model <- sim( model4 , data=d.pred, n=6000)
sim <- as.data.frame(sim.model)
#Posterior Statistics
prob_success3 <- sum(sim$V1 > 0)/nrow(sim); prob_success3 #percent of scores that are wins
prob_fail <- sum(sim$V1 < 0)/nrow(sim); prob_fail #percent of scores that are losses
#Win 3 out of 5
dbinom(3, size=5, prob=prob_success3) + dbinom(4, size=5, prob=prob_success3) + dbinom(5, size=5, prob=prob_success3)
prob_win3 <- pbinom(2, size=5, prob=prob_success3, lower.tail = FALSE) ; prob_win3
exp3 <- 7*prob_win3; exp3
var3 <- 7*prob_win3*(1-prob_win3); var3
game3exp <- c(exp3, var3)
binomplot <- rbind(binomplot, game3exp)
#Predicting 2016 Game 4
game3 <- c(-1, 21, .616, 1.29)
standata4 <- rbind(standata3, game3)
#run model with game 3 data
set.seed(1234)
model5 <- map2stan(
alist(
score ~ dnorm( mu , sigma ) ,
mu <- a + a_team[opp_team] + b * pitera_norm + c * wl_norm,
sigma ~ dcauchy(0, 2.5),
a ~ dnorm(0,3),
a_team[opp_team] ~ dnorm( ai , as ), #adaptive prior from the data
ai ~ dnorm(0, 1),
as ~ dcauchy(0,2),
b ~ dnorm( 0, 1 ),
c ~ dnorm(0,1)
),
data=standata4, iter=12000, warmup=3000, chains=4, cores=4)
#Predicting Game 4
d.pred <- list(
pitera_norm = .24,
wl_norm = 1.29, #WL normalized for CLE (from WL dataset)
opp_team = 21) #New team- Indians (1 game played)
#Posterior Simulation
set.seed(1234)
sim.model <- sim( model5 , data=d.pred, n=6000)
sim <- as.data.frame(sim.model)
#Posterior Statistics
prob_success4 <- sum(sim$V1 > 0)/nrow(sim); prob_success4 #percent of scores that are wins
prob_fail <- sum(sim$V1 < 0)/nrow(sim); prob_fail #percent of scores that are losses
#After the fact: Win 3 out of 4
dbinom(3, size=4, prob=prob_success4) + dbinom(4, size=4, prob=prob_success4)
prob_win4 <- pbinom(2, size=4, prob=prob_success4, lower.tail = FALSE); prob_win4
exp4 <- 7*prob_win4; exp4
var4 <- 7*prob_win4*(1-prob_win4); var4
game4exp <- c(exp4, var4)
binomplot <- rbind(binomplot, game4exp)
#Predicting 2016 Game 5
game4 <- c(-5, 21, .24, 1.29)
standata5 <- rbind(standata4, game4)
#run model with game 4 data
set.seed(1234)
model6 <- map2stan(
alist(
score ~ dnorm( mu , sigma ) ,
mu <- a + a_team[opp_team] + b * pitera_norm + c * wl_norm,
sigma ~ dcauchy(0, 2.5),
a ~ dnorm(0,3),
a_team[opp_team] ~ dnorm( ai , as ), #adaptive prior from the data
ai ~ dnorm(0, 1),
as ~ dcauchy(0,2),
b ~ dnorm( 0, 1 ),
c ~ dnorm(0,1)
),
data=standata5, iter=12000, warmup=3000, chains=4, cores=4)
#Predicting Game 5
d.pred <- list(
pitera_norm = -.64,
wl_norm = 1.29, #WL normalized for CLE (from WL dataset)
opp_team = 21) #New team- Indians (1 game played)
#Posterior Simulation
set.seed(1234)
sim.model <- sim( model6, data=d.pred, n=6000)
sim <- as.data.frame(sim.model)
#Posterior Statistics
prob_success5 <- sum(sim$V1 > 0)/nrow(sim); prob_success5 #percent of scores that are wins
prob_fail <- sum(sim$V1 < 0)/nrow(sim); prob_fail #percent of scores that are losses
#Win 3 out of 3
dbinom(3, size=3, prob=prob_success5)
prob_win5 <- pbinom(2, size=3, prob=prob_success5, lower.tail = FALSE); prob_win5
exp5 <- 7*prob_win5; exp5
var5 <- 7*prob_win5*(1-prob_win5); var5
game5exp <- c(exp5, var5)
binomplot <- rbind(binomplot, game5exp)
#Predicting 2016 Game 6
game5 <- c(1, 21, -.64, 1.29)
standata6 <- rbind(standata5, game5)
#run model with game 5 data
set.seed(1234)
model7 <- map2stan(
alist(
score ~ dnorm( mu , sigma ) ,
mu <- a + a_team[opp_team] + b * pitera_norm + c * wl_norm,
sigma ~ dcauchy(0, 2.5),
a ~ dnorm(0,3),
a_team[opp_team] ~ dnorm( ai , as ), #adaptive prior from the data
ai ~ dnorm(0, 1),
as ~ dcauchy(0,2),
b ~ dnorm( 0, 1 ),
c ~ dnorm(0,1)
),
data=standata6, iter=12000, warmup=3000, chains=4, cores=4)
#Predicting Game 6
d.pred <- list(
pitera_norm = -0.004,
wl_norm = 1.29, #WL normalized for CLE (from WL dataset)
opp_team = 21) #New team- Indians (1 game played)
#Posterior Simulation
set.seed(1234)
sim.model <- sim( model7, data=d.pred, n=6000)
sim <- as.data.frame(sim.model)
#Posterior Statistics
prob_success6 <- sum(sim$V1 > 0)/nrow(sim); prob_success6 #percent of scores that are wins
prob_fail <- sum(sim$V1 < 0)/nrow(sim); prob_fail #percent of scores that are losses
#Win 2 out of 2
dbinom(2, size=2, prob=prob_success6)
prob_win6 <- pbinom(1, size=2, prob=prob_success6, lower.tail = FALSE); prob_win6
exp6 <- 7*prob_win6; exp6
var6 <- 7*prob_win6*(1-prob_win6); var6
game6exp <- c(exp6, var6)
binomplot <- rbind(binomplot, game6exp)
#Predicting 2016 Game 7
game6 <- c(6, 21, -0.004, 1.29)
standata7 <- rbind(standata6, game6)
#run model with game 6 data
set.seed(1234)
model8 <- map2stan(
alist(
score ~ dnorm( mu , sigma ) ,
mu <- a + a_team[opp_team] + b * pitera_norm + c * wl_norm,
sigma ~ dcauchy(0, 2.5),
a ~ dnorm(0,3),
a_team[opp_team] ~ dnorm( ai , as ), #adaptive prior from the data
ai ~ dnorm(0, 1),
as ~ dcauchy(0,2),
b ~ dnorm( 0, 1 ),
c ~ dnorm(0,1)
),
data=standata7, iter=12000, warmup=3000, chains=4, cores=4)
#Predicting Game 7
d.pred <- list(
pitera_norm = -2,
wl_norm = 1.29, #WL normalized for CLE (from WL dataset)
opp_team = 21) #New team- Indians (1 game played)
#Posterior Simulation
set.seed(1234)
sim.model <- sim( model8, data=d.pred, n=6000)
sim <- as.data.frame(sim.model)
#Posterior Statistics
prob_success7 <- sum(sim$V1 > 0)/nrow(sim); prob_success7 #percent of scores that are wins
prob_fail <- sum(sim$V1 < 0)/nrow(sim); prob_fail #percent of scores that are losses
#Win 1 out of 1
dbinom(1, size=1, prob=prob_success7)
prob_win7 <- pbinom(0, size=1, prob=prob_success7, lower.tail = FALSE); prob_win7
exp7 <- 7*prob_win7; exp7
var7 <- 7*prob_win7*(1-prob_win7); var7
game7exp <- c(exp7, var7)
binomplot <- rbind(binomplot, game7exp)
#Plot Single-Game Probabilities
probabilities <- c(prob_success1, prob_success2, prob_success3, prob_success4, prob_success5, prob_success6, prob_success7)
probabilities <- c(.597, .527, .518, .528, .5515, .5215, .641)
plot(probabilities, type="b", xlab="Game", main="Conditional Probability of Single-Game Success",
sub="Games 1 through 7", ylim=c(.4,.7))
abline(h=.5, col="red")
#Plot Binomial Probabilities
probabilities <- c(prob_win1, prob_win2, prob_win3, prob_win4, prob_win5, prob_win6, prob_win7)
probabilities <- c(.7, .4, .54, .36, .17, .27, .94)
plot(probabilities, type="b", xlab="Game", main="Conditional Probability of Cubs Winning World Series",
sub="Games 1 through 7", ylim=c(0,1))
abline(h=.5, col="red")
|
375de80577e2b756b93bdd252963b1c10c815426 | 2da85138f00ce42f78a27a323a48c2a41eee3576 | /tests/testthat/test_cindex.R | bddfab94cfb3e85ae18f5a2cbb7773c365c8be9b | [
"MIT"
] | permissive | Ppower123/survivalmodels | 91d6eef5a0f1565877f6b18578ffd69008c311f4 | aef24ed54cfebf6c8ffb43f6a0f54bdb57262ea1 | refs/heads/main | 2023-08-10T15:17:16.419503 | 2021-09-10T14:39:26 | 2021-09-10T14:39:26 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 179 | r | test_cindex.R | skip_if_not_installed("survival")
test_that("cindex", {
expect_equal(cindex(1:10, 10:1), 1)
expect_equal(cindex(1:10, 1:10), 0)
expect_error(cindex(1:5, 1:6), "length")
})
|
93effbb2c7220c9ee1211484db3b3cd9d592f278 | 2cc56a6341f179923977128ad90bb31419e033d0 | /man/find_terms.Rd | 1c22c31bd135007e6992f9d9e071f9c62c441e56 | [] | no_license | cran/insight | 5e1d2d1c46478c603b491f53aa80de57bc8f54b4 | 247206683ad374a1ba179356410d095f6861aede | refs/heads/master | 2023-07-19T11:33:37.490704 | 2023-06-29T13:30:02 | 2023-06-29T13:30:02 | 174,554,249 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 2,578 | rd | find_terms.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/find_terms.R
\name{find_terms}
\alias{find_terms}
\alias{find_terms.default}
\title{Find all model terms}
\usage{
find_terms(x, ...)
\method{find_terms}{default}(x, flatten = FALSE, as_term_labels = FALSE, verbose = TRUE, ...)
}
\arguments{
\item{x}{A fitted model.}
\item{...}{Currently not used.}
\item{flatten}{Logical, if \code{TRUE}, the values are returned
as character vector, not as list. Duplicated values are removed.}
\item{as_term_labels}{Logical, if \code{TRUE}, extracts model formula and tries to
access the \code{"term.labels"} attribute. This should better mimic the \code{terms()}
behaviour even for those models that do not have such a method, but may be
insufficient, e.g. for mixed models.}
\item{verbose}{Toggle warnings.}
}
\value{
A list with (depending on the model) following elements (character
vectors):
\itemize{
\item \code{response}, the name of the response variable
\item \code{conditional}, the names of the predictor variables from the \emph{conditional}
model (as opposed to the zero-inflated part of a model)
\item \code{random}, the names of the random effects (grouping factors)
\item \code{zero_inflated}, the names of the predictor variables from the \emph{zero-inflated} part of the model
\item \code{zero_inflated_random}, the names of the random effects (grouping factors)
\item \code{dispersion}, the name of the dispersion terms
\item \code{instruments}, the names of instrumental variables
}
Returns \code{NULL} if no terms could be found (for instance, due to
problems in accessing the formula).
}
\description{
Returns a list with the names of all terms, including response
value and random effects, "as is". This means, on-the-fly tranformations
or arithmetic expressions like \code{log()}, \code{I()}, \code{as.factor()} etc. are
preserved.
}
\note{
The difference to \code{\link[=find_variables]{find_variables()}} is that \code{find_terms()}
may return a variable multiple times in case of multiple transformations
(see examples below), while \code{find_variables()} returns each variable
name only once.
}
\examples{
if (require("lme4")) {
data(sleepstudy)
m <- suppressWarnings(lmer(
log(Reaction) ~ Days + I(Days^2) + (1 + Days + exp(Days) | Subject),
data = sleepstudy
))
find_terms(m)
}
# sometimes, it is necessary to retrieve terms from "term.labels" attribute
m <- lm(mpg ~ hp * (am + cyl), data = mtcars)
find_terms(m, as_term_labels = TRUE)
}
|
aec974359c317515935be9c9f4f8d052e5a7176a | e0a60bc74db826bf7071c24fa8ad5dc90bacbc4c | /20150322temp.R | 638243d6023f4d84dcdec3851777ade9112a7446 | [] | no_license | vstarkweather/RepRes_PA2 | f234bc6f67a6211705b66e35c4e6e87a0cc16254 | 2f4872cc21b935e58ac2d1b997b31f8b87a7c726 | refs/heads/master | 2020-05-17T05:19:14.812355 | 2015-05-28T21:33:47 | 2015-05-28T21:33:47 | 33,049,427 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,805 | r | 20150322temp.R | cropPrint <- print(data.frame(event = cropDamageSummary$event,
cropDamage_USD = format(cropDamageSummary$cropDamage,
big.mark = ",")))
propertyPrint <- print(data.frame(event = propertyDamageSummary$event,
propertyDamage_USD = format(propertyDamageSummary$propertyDamage,
big.mark = ",")))
damagesPrint <- print(data.frame(event = damageSummary$event,
damages_USD = format(damageSummary$damage,
big.mark = ",")))
injuryPrint <- print(data.frame(event = injurySummary$event,
injuries = format(injurySummary$injuries,
big.mark = ",")))
fatalityPrint <- print(data.frame(event = fatalitySummary$event,
fatalities = format(fatalitySummary$fatalities,
big.mark = ",")))
casualityPrint<- print(data.frame(event = casualitySummary$event,
casualities = format(casualitySummary$casualities,
big.mark = ",")))
injuryLast <- slice(injuryPrint, 1:10)
fatalityLast <- slice(fatalityPrint, 1:10)
casualityLast <- slice(casualityPrint, 1:10)
cropLLast <- slice(cropPrint, 1:10)
propertyLast <- slice(propertyPrint, 1:10)
damagesLast <- slice(damagesPrint, 1:10)
damageSum <- summarize(group_by(stormDamage, fixedEvent),
allCrop = sum(cropDmgD),
allProperty = sum(propDmgD),
allDamage = sum(cropDmgD + propDmgD)) |
138670309ec4eb6ff0ca65a332da030d27591a6a | 72778c1b19a668cabba6969300276f6666bbbd63 | /R/merge.R | 80682446a16312760457808d4dd87642bf42cc80 | [] | no_license | AndreMikulec/xts | 3fb9cf89fc68eda31f8368f6a60b157ca9a7878a | 57b00a3fef07a6210c6b91fd9b5a46697ba0e75b | refs/heads/master | 2020-03-23T21:08:21.848900 | 2018-10-06T14:10:53 | 2018-10-06T14:10:53 | 142,084,302 | 0 | 0 | null | 2018-07-24T00:39:45 | 2018-07-24T00:39:44 | null | UTF-8 | R | false | false | 11,302 | r | merge.R | #
# xts: eXtensible time-series
#
# Copyright (C) 2008 Jeffrey A. Ryan jeff.a.ryan @ gmail.com
#
# Contributions from Joshua M. Ulrich
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
merge.xts <- function(...,
all=TRUE,
fill=NA,
suffixes=NULL,
join="outer",
retside=TRUE,
retclass="xts",
tzone=NULL,
drop=NULL,
check.names=NULL) {
if(is.logical(retclass) && !retclass) {
setclass=FALSE
} else setclass <- TRUE
fill.fun <- NULL
if(is.function(fill)) {
fill.fun <- fill
fill <- NA
}
# as.list(substitute(list(...))) # this is how zoo handles colnames - jar
mc <- match.call(expand.dots=FALSE)
dots <- mc$...
if(is.null(suffixes)) {
syms <- names(dots)
syms[nchar(syms)==0] <- as.character(dots)[nchar(syms)==0]
if(is.null(syms)) syms <- as.character(dots)
} else
if(length(suffixes) != length(dots)) {
warning("length of suffixes and does not match number of merged objects")
syms <- as.character(dots)
} else {
syms <- as.character(suffixes)
sfx <- as.character(suffixes)
}
.times <- .External('number_of_cols', ..., PACKAGE="xts")
symnames <- rep(syms, .times) # moved call to make.names inside of mergeXts/do_merge_xts
if(length(dots) == 1) {
# this is for compat with zoo; one object AND a name
if(!is.null(names(dots))) {
x <- list(...)[[1]]
if(is.null(colnames(x)))
colnames(x) <- symnames
return(x)
}
}
if( !missing(join) ) {
# join logic applied to index:
# inspired by: http://blogs.msdn.com/craigfr/archive/2006/08/03/687584.aspx
#
# (full) outer - all cases, equivelant to all=c(TRUE,TRUE)
# left - all x, && y's that match x
# right - all ,y && x's that match x
# inner - only x and y where index(x)==index(y)
all <- switch(pmatch(join,c("outer","left","right","inner")),
c(TRUE, TRUE ), # outer
c(TRUE, FALSE), # left
c(FALSE, TRUE ), # right
c(FALSE, FALSE) # inner
)
if( length(dots) > 2 ) {
all <- all[1]
warning("'join' only applicable to two object merges")
}
}
if( length(all) != 2 ) {
if( length(all) > 2 )
warning("'all' must be of length two")
all <- rep(all[1], 2)
}
if( length(dots) > 2 )
retside <- TRUE
if( length(retside) != 2 )
retside <- rep(retside[1], 2)
x <- .External('mergeXts',
all=all[1:2],
fill=fill,
setclass=setclass,
symnames=symnames,
suffixes=suffixes,
retside=retside,
env=new.env(),
tzone=tzone,
..., PACKAGE="xts")
if(!is.logical(retclass) && retclass != 'xts') {
asFun <- paste("as", retclass, sep=".")
if(!exists(asFun)) {
warning(paste("could not locate",asFun,"returning 'xts' object instead"))
return(x)
}
xx <- try(do.call(asFun, list(x)))
if(!inherits(xx,'try-error')) {
return(xx)
}
}
if(!is.null(fill.fun)) {
fill.fun(x)
} else
return(x)
}
.merge.xts <- function(x,y,...,
all=TRUE,
fill=NA,
suffixes=NULL,
join="outer",
retside=TRUE,
retclass="xts") {
if(missing(y))
return(x)
if(is.logical(retclass) && !retclass) {
setclass <- FALSE
} else setclass <- TRUE
mc <- match.call(expand.dots=FALSE)
xName <- deparse(mc$x)
yName <- deparse(mc$y)
dots <- mc$...
if(!missing(...) && length(all) > 2) {
xx <- list(x,y,...)
all <- rep(all, length.out=length(xx))
if(!base::all(all==TRUE) && !base::all(all==FALSE) ) {
xT <- xx[which(all)]
xF <- xx[which(!all)]
return((rmerge0(do.call('rmerge0',xT),
do.call('rmerge0',xF), join="left"))[,c(which(all),which(!all))])
}
}
tryXts <- function(y) {
if(!is.xts(y)) {
y <- try.xts(y, error=FALSE)
if(!is.xts(y)) {
if (NROW(y) == NROW(x)) {
y <- structure(y, index = .index(x))
}
else if (NROW(y) == 1 && NCOL(y) == 1) {
y <- structure(rep(y, length.out = NROW(x)), index = .index(x))
}
else stop(paste("cannot convert", deparse(substitute(y)),
"to suitable class for merge"))
}
}
return(y)
}
if( !missing(join) ) {
# join logic applied to index:
# inspired by: http://blogs.msdn.com/craigfr/archive/2006/08/03/687584.aspx
#
# (full) outer - all cases, equivelant to all=c(TRUE,TRUE)
# left - all x, && y's that match x
# right - all ,y && x's that match x
# inner - only x and y where index(x)==index(y)
all <- switch(pmatch(join,c("outer","left","right","inner")),
c(TRUE, TRUE ), # outer
c(TRUE, FALSE), # left
c(FALSE, TRUE ), # right
c(FALSE, FALSE) # inner
)
}
makeUnique <- function(cnames, nc, suff, dots) {
if(is.null(suff) || length(suff) != (length(dots)+2)) return(make.unique(cnames))
paste(cnames, rep(suff, times=nc),sep=".")
}
if( length(all) == 1 )
all <- rep(all, length.out=length(dots)+2)
if( length(retside) == 1 )
retside <- rep(retside, length.out=length(dots)+2)
y <- tryXts(y)
COLNAMES <- c(colnames(x),colnames(y))
if(length(COLNAMES) != (NCOL(x)+NCOL(y)))
COLNAMES <- c(rep(xName,NCOL(x)), rep(yName,NCOL(y)))
xCOLNAMES <- colnames(x)
if(is.null(xCOLNAMES))
xCOLNAMES <- rep(xName,NCOL(x))
yCOLNAMES <- colnames(y)
if(is.null(yCOLNAMES))
yCOLNAMES <- rep(yName,NCOL(y))
COLNAMES <- c(xCOLNAMES,yCOLNAMES)
nCOLS <- c(NCOL(x), NCOL(y), sapply(dots, function(x) NCOL(eval.parent(x))))
CNAMES <- if(length(dots)==0) {
makeUnique(COLNAMES, nCOLS, suffixes, dots)
} else NULL
x <- .Call("do_merge_xts",
x, y, all, fill[1], setclass, CNAMES, retside, PACKAGE="xts")
if(length(dots) > 0) {
for(i in 1:length(dots)) {
currentCOLNAMES <- colnames(eval.parent(dots[[i]]))
if(is.null(currentCOLNAMES))
currentCOLNAMES <- rep(deparse(dots[[i]]),NCOL(eval.parent(dots[[i]])))
COLNAMES <- c(COLNAMES, currentCOLNAMES)
if( i==length(dots) ) #last merge, set colnames now
CNAMES <- makeUnique(COLNAMES, nCOLS, suffixes, dots)
x <- .Call("do_merge_xts",
x, tryXts(eval.parent(dots[[i]])), all,
fill[1], setclass, CNAMES, retside, PACKAGE="xts")
}
}
if(!is.logical(retclass) && retclass != 'xts') {
xx <- try(do.call(paste("as",retclass,sep="."), list(x)))
if(!inherits(xx,'try-error')) {
return(xx)
}
}
return(x)
}
rmerge0 <- function(x,y,...,
all=TRUE,
fill=NA,
suffixes=NULL,
join="outer",
retside=TRUE,
retclass="xts") {
if(missing(y) || is.null(y))
return(x)
if(is.logical(retclass) && !retclass) {
setclass <- FALSE
} else setclass <- TRUE
mc <- match.call(expand.dots=FALSE)
xName <- deparse(mc$x)
yName <- deparse(mc$y)
dots <- mc$...
# if(!missing(...) && length(all) > 2) {
# x <- list(x,y,...)
# all <- rep(all, length.out=length(x))
# xT <- x[which(all)]
# xF <- x[which(!all)]
# return((rmerge0(do.call('rmerge0',xT), do.call('rmerge0',xF), join="left"))[,c(which(all),which(!all))])
# }
tryXts <- function(y) {
if(!is.xts(y)) {
y <- try.xts(y, error=FALSE)
if(!is.xts(y)) {
if (NROW(y) == NROW(x)) {
y <- structure(y, index = .index(x))
}
else if (NROW(y) == 1 && NCOL(y) == 1) {
y <- structure(rep(y, length.out = NROW(x)), index = .index(x))
}
else stop(paste("cannot convert", deparse(substitute(y)),
"to suitable class for merge"))
}
}
return(y)
}
if( !missing(join) ) {
# join logic applied to index:
# inspired by: http://blogs.msdn.com/craigfr/archive/2006/08/03/687584.aspx
#
# (full) outer - all cases, equivelant to all=c(TRUE,TRUE)
# left - all x, && y's that match x
# right - all ,y && x's that match x
# inner - only x and y where index(x)==index(y)
all <- switch(pmatch(join,c("outer","left","right","inner")),
c(TRUE, TRUE ), # outer
c(TRUE, FALSE), # left
c(FALSE, TRUE ), # right
c(FALSE, FALSE) # inner
)
}
makeUnique <- function(cnames, nc, suff, dots) {
if(is.null(suff) || length(suff) != (length(dots)+2)) return(make.unique(cnames))
paste(cnames, rep(suff, times=nc),sep=".")
}
if( length(all) == 1 )
all <- rep(all, length.out=length(dots)+2)
if( length(retside) == 1 )
retside <- rep(retside, length.out=length(dots)+2)
y <- tryXts(y)
COLNAMES <- c(colnames(x),colnames(y))
if(length(COLNAMES) != (NCOL(x)+NCOL(y)))
COLNAMES <- c(rep(xName,NCOL(x)), rep(yName,NCOL(y)))
xCOLNAMES <- colnames(x)
if(is.null(xCOLNAMES))
xCOLNAMES <- rep(xName,NCOL(x))
yCOLNAMES <- colnames(y)
if(is.null(yCOLNAMES))
yCOLNAMES <- rep(yName,NCOL(y))
COLNAMES <- c(xCOLNAMES,yCOLNAMES)
nCOLS <- c(NCOL(x), NCOL(y), sapply(dots, function(x) NCOL(eval.parent(x))))
# CNAMES <- if(length(dots)==0) {
# makeUnique(COLNAMES, nCOLS, suffixes, dots)
# } else NULL
CNAMES <- NULL
x <- .Call("do_merge_xts",
x, y, all, fill[1], setclass, CNAMES, retside, PACKAGE="xts")
if(length(dots) > 0) {
for(i in 1:length(dots)) {
currentCOLNAMES <- colnames(eval.parent(dots[[i]]))
if(is.null(currentCOLNAMES))
currentCOLNAMES <- rep(deparse(dots[[i]]),NCOL(eval.parent(dots[[i]])))
COLNAMES <- c(COLNAMES, currentCOLNAMES)
# if( i==length(dots) ) #last merge, set colnames now
# CNAMES <- makeUnique(COLNAMES, nCOLS, suffixes, dots)
x <- .Call("do_merge_xts",
x, tryXts(eval.parent(dots[[i]])), all,
fill[1], setclass, CNAMES, retside, PACKAGE="xts")
}
}
return(x)
}
#library(xts)
#x <- .xts(1:10, 1:10)
#rmerge(x,x,x)
#rmerge(x,x,1)
#z <- as.zoo(x)
#rmerge(x,z)
#rmerge(x,x,z)
#rmerge(x,1,z,z)
#X <- .xts(1:1e6, 1:1e6)
#system.time(rmerge(X,X,X,X,X,X,X))
|
bf91351c331aaa586c8ae241ab738f1cdc8ceb92 | efed85c519c7a02278315ca4a8cac26da48ac09f | /DIVERSITY_PCOA_BOX_PLOTS.R | 1acdea994878515368e94117bc3a299dc9294dc8 | [] | no_license | eertekin/gypsum_paper | fd0c31946bd5bbeb978ac1e386c71c01e1cf6b79 | c40961dd1633d9a884700184e8e0006a44b8be7e | refs/heads/master | 2022-11-16T01:35:40.289710 | 2020-07-13T01:56:36 | 2020-07-13T01:56:36 | 254,787,115 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,178 | r | DIVERSITY_PCOA_BOX_PLOTS.R | library(ggplot2) ; library(tidyr) ; library(dplyr) ; library(stringr) ; library(cowplot) ; library(gridExtra)
## OTU PCOA PLOT ##
OTU_pcoa = pcoa(bray_curtis_CSS_normalized_otu_table)
otu_vectors = as.data.frame(OTU_pcoa$vectors[,1:2])
otu_vectors$site = c(rep("CL" , 17) , rep("MTQ" , 8) , rep("KM" , 9))
site_colors = c("#FFC000" , "#00B0F0" , "#92D050")
ggplot(otu_vectors, aes( x = Axis.1 , y = Axis.2 , fill = site)) +
geom_point(size = 8 , shape = 21 , color = "gray41") + scale_fill_manual(values = site_colors) + theme_bw() +
theme( axis.title=element_text(size = 15) , axis.text = element_text(size = 15) , aspect.ratio = 1 , legend.position = "none" ) +
xlab("Variance explained = 10%") + ylab("Variance explained = 47%")
ggsave("otu_pcoaplot.pdf" , height = 5 , width = 5)
## OTU OBSERVED RICHNESS BOX PLOT ##
ggplot(Obs_richness_site , aes(x = Site , y = Richness , fill = Site)) +
stat_boxplot(geom = "errorbar", width = 0.2 , color = "gray41") +
geom_boxplot(color = "gray41") + scale_fill_manual(values = site_colors) +
scale_x_discrete(limits=c("CL" , "MTQ" , "KM")) + theme_bw() +
theme( axis.title=element_text(size = 15) , axis.text = element_text(size = 15) , aspect.ratio = 1 , legend.position = "none") +
coord_cartesian(ylim = c(200, 700)) +
xlab("") + ylab("Observed richness")
ggsave("otu_boxplot.pdf" , height = 5 , width = 5)
## METAGENOME PCOA PLOT ##
gypsum_dist = vegdist(t(gypsum.phylum.summary[,2:ncol(gypsum.phylum.summary)]) , method = "bray")
gypsum.phylum.pcoa = pcoa(gypsum_dist)
phylum_vectors = as.data.frame(gypsum.phylum.pcoa$vectors[,1:2])
phylum_vectors$site = c(rep("CL" , 3) , rep("KM" , 3) , rep("MTQ" , 3))
ggplot(phylum_vectors, aes( x = Axis.1 , y = Axis.2 , fill = site)) +
geom_point(size = 8 , shape = 21 , color = "gray41") + scale_fill_manual(values = site_colors) + theme_bw() +
theme( axis.title=element_text(size = 15) , axis.text = element_text(size = 15) , aspect.ratio = 1 , legend.position = "none" ) +
xlab("Variance explained = 59%") + ylab("Variance explained = 30%")
ggsave("phylum_pcoaplot.pdf" , height = 5 , width = 5)
## FUNCTIONS OBSERVED RICHNESS BOXPLOT##
KO_richness = data.frame(community = c("CL1" , "CL2" , "CL3" , "KM1" , "KM2" , "KM3" , "MTQ1" , "MTQ2" , "MTQ3") ,
site = c(rep("CL" , 3) , rep("KM" , 3) , rep("MTQ" , 3)) ,
richness = c(4505,4566,4689,4318,4210,4504,4495,4508,4695) )
ggplot(KO_richness , aes(x = site , y = richness , fill = site)) +
stat_boxplot(geom = "errorbar", width = 0.2 , color = "gray41") +
geom_boxplot(color = "gray41") + scale_fill_manual(values = site_colors) +
scale_x_discrete(limits=c("CL" , "MTQ" , "KM")) + theme_bw() +
theme( axis.title=element_text(size=20) , axis.text = element_text(size = 15) ,
aspect.ratio = 1 , legend.position = "none" , plot.margin = unit(c(0.5,0.5,0.5,0.5) , "in") ) +
coord_cartesian(ylim = c(4200, 5000)) +
xlab("") + ylab("Observed richness")
ggsave("functions_boxplot.pdf" , height = 5 , width = 5)
## FUNCTIONS PCOA PLOT ##
functions_dist = vegdist(t(gypsum_kegg_sums[,2:ncol(gypsum_kegg_sums)]) , method = "bray")
functions_pcoa = pcoa(functions_dist)
functions_vectors = as.data.frame(functions_pcoa$vectors[,1:2])
functions_vectors$site = c(rep("CL" , 3) , rep("KM" , 3) , rep("MTQ" , 3))
ggplot(functions_vectors, aes( x = Axis.1 , y = Axis.2 , fill = site)) +
geom_point(size = 8 , shape = 21 , color = "gray41") + scale_fill_manual(values = site_colors) + theme_bw() +
scale_x_continuous(breaks = c(-0.1,0,0.1)) +
theme( axis.title=element_text(size=20) , axis.text = element_text(size = 15) , plot.margin = unit(c(0.35,0.35,0.35,0.35) , "in" ) ,
aspect.ratio = 1 , legend.position = "top" ) +
xlab("Variance explained = 67%") + ylab("Variance explained = 21%")
ggsave("functions_pcoaplot_2.pdf" , height = 5 , width = 5)
|
a639f1eda2459cdde188b73dd1454158748148f3 | ec98d6494de23f1a71dc579379e84c817abe0a95 | /run_analysis.R | f4da70d50bfac9ad0fd7d0f05cccaff0b19fe02a | [] | no_license | jdmercado/getting-cleaning-data | 0b99a289a5b49386207e05fd793c038de537b703 | 369a3e758506b7a43fd5dc98ddcd0a630b8288ee | refs/heads/master | 2021-01-19T13:32:53.069599 | 2014-08-24T23:27:51 | 2014-08-24T23:27:51 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,976 | r | run_analysis.R | # run_analysis -- Based on data downloaded from a Human Activity Recognition
# data set using smartphones (see readme for credit information), the
# script merges some tables, extracts variables related to mean and
# standard deviation of different measurements, reshapes them, and
# generates a tidy data set with the averages of the variables extracted.
# This data set is written to disk.
#
run_analysis <- {
setwd("~")
# Data should have already been downloaded to a directory datProj
dirf <- "datProj"
if (!file.exists(dirf)) {
stop("First download and unzip data to \"~/datProj\"")
}
setwd("datProj")
options(stringsAsFactors = FALSE) # disable stringsAsFactors as TRUE
# Load general data
ftr <- read.table("./UCI HAR Dataset/features.txt")
activ <- read.table("./UCI HAR Dataset/activity_labels.txt")
# Load test data
tsSub <- read.table("./UCI HAR Dataset/test/subject_test.txt")
tsX <- read.table("./UCI HAR Dataset/test/X_test.txt",colClasses="numeric")
tsY <- read.table("./UCI HAR Dataset/test/Y_test.txt")
# Load train data
trSub <- read.table("./UCI HAR Dataset/train/subject_train.txt")
trX <- read.table("./UCI HAR Dataset/train/X_train.txt",colClasses="numeric")
trY <- read.table("./UCI HAR Dataset/train/Y_train.txt")
# Put together training data set
tr <- data.frame(trSub[], trY[], trX[,])
names(tr) <- c("Subject","idActiv",ftr[,2]) # assigns column names
# Put together test data set
ts <- data.frame(tsSub[], tsY[], tsX[,])
names(ts) <- c("Subject","idActiv",ftr[,2]) # assigns column names
# Free space from data not needed anymore
rm(trSub)
rm(trY)
rm(trX)
rm(tsSub)
rm(tsY)
rm(tsX)
# Create one data set from training and test data sets
dat <- rbind(tr, ts)
# Select features for mean() and standard deviation or std()
var <- ftr$V2[grep("mean\\(\\)|std\\(\\)",ftr$V2)]
# Extract new data frame only with those measurements
df <- dat[,c("Subject","idActiv",var[])] # include also subject and activity
# Free space not needed
rm(dat)
rm(tr)
rm(ts)
# Merge data frame with activity labels to get descriptive activity names
dfm <- merge(activ, df[order(df$idActiv),], by.x="V1", by.y="idActiv")
dfm <- dfm[,2:69]
rm(df) # Free space of data not needed
# Improve variable names to make them more descriptive
var <- gsub('^t','Time',var)
var <- gsub('^f','Freq',var)
var <- gsub('([[:upper:]])', ' \\1', var) # split on uppercase letters
names(dfm) <- c("Activity","Subject",var[]) # replace column names
# Reshape data to create a second data set with averages of variables
# by activity and subject
library(reshape2)
dfMelt <- melt(dfm, id=c("Activity","Subject"), measure.vars=var[])
dfMean <- dcast(dfMelt, Activity + Subject ~ variable, mean)
# Write text file with tidy data set of summary found
write.table(dfMean, file="tidyDat2.txt", row.names=FALSE)
} |
b1b03ca66882bf680baa3cc0114ad4e1b648c17d | 284160a67638e032ec00c4591e9d56b733eaa3a2 | /ui.R | d9f9b4178dd31b1c796bf834e032e42d6e637124 | [] | no_license | alexplocik/PicoGreen | f80a4dc9ea0dacb3d240a63fdabb9533af6912c3 | 86d3221eb5b4a89d055275c1e2a8c78d11ff14d3 | refs/heads/master | 2016-08-12T06:22:55.572334 | 2015-12-28T22:54:50 | 2015-12-28T22:54:50 | 48,707,312 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,164 | r | ui.R | library(shiny)
shinyUI(
# titlePanel("Tecan PicoGreen GUI"),
navbarPage("Tecan PicoGreen GUI",
tabPanel('Measurements',
wellPanel(fluidRow(column(12,
column(2, textInput(inputId = "measurements", label = "Measurements (96-well plate)", value = "2740 1255 677 306 174 103 66 46 44 43 45 43 2730 1414 622 306 168 97 56 45 43 45 45 42 438 752 1064 454 609 929 360 538 566 35 40 37 575 714 968 483 618 957 406 611 606 38 37 37 192 40 222 1047 25 24 25 24 23 24 22 23 383 39 242 1039 22 23 24 24 25 23 23 23 24 21 23 23 23 22 26 23 23 22 23 22 22 22 24 23 22 22 23 24 23 22 22 24")),
column(2, textInput(inputId = "background.position", label = "Background wells", value = "A12 B12")),
column(2, textInput(inputId = "sample.dilution.factor", label = "Sample dilution factor value", value = "10"))))),
wellPanel(fluidRow(column(12,
column(2, textInput(inputId = "std.curve.position.1", label = "Standard curve wells", value = "A1 A2 A3 A4 A5 A6 A7 A8")),
column(2, textInput(inputId = "starting.conc.1", label = "Starting concentration", value = "1")),
column(2, textInput(inputId = "serial.dilution.factor.1", label = "Serial dilution factor", value = "2"))),
column(12,
column(2, textInput(inputId = "std.curve.position.2", label = "Standard curve wells", value = "B1 B2 B3 B4 B5 B6 B7 B8")),
column(2, textInput(inputId = "starting.conc.2", label = "Starting concentration", value = "1")),
column(2, textInput(inputId = "serial.dilution.factor.2", label = "Serial dilution factor", value = "2"))
))),
wellPanel(fluidRow(column(12,
column(2, textInput(inputId = "group1", label = "Group 1 name", value = "Group 1"), textInput(inputId = "group1.pos", label = "Group 1 wells", value = "C1, C4, C7 D1, D4, D7")),
column(2, textInput(inputId = "group2", label = "Group 2 name", value = "Group 2"), textInput(inputId = "group2.pos", label = "Group 2 wells", value = "C2, C5, C8 D2, D5, D8")),
column(2, textInput(inputId = "group3", label = "Group 3 name", value = "Group 3"), textInput(inputId = "group3.pos", label = "Group 3 wells", value = "C3, C6, C9 D3, D6, D9")),
column(2, textInput(inputId = "group4", label = "Group 4 name", value = "Group 4"), textInput(inputId = "group4.pos", label = "Group 4 wells", value = "C10, C11, C12 D10, D11, D12")),
column(2, textInput(inputId = "group5", label = "Group 5 name", value = "Group 5"), textInput(inputId = "group5.pos", label = "Group 5 wells", value = "E1, E3, F1, F3")),
column(2, textInput(inputId = "group6", label = "Group 6 name", value = "Group 6"), textInput(inputId = "group6.pos", label = "Group 6 wells", value = "E4, F4"))
))),
submitButton("Submit"),
hr(),
fluidRow(column(12, h5("96-well plate"), verbatimTextOutput("plate"))),
fluidRow(column(4, h5("Standard curve"), plotOutput("std_curve")), column(8, h5("Summary stats"), plotOutput("sample_plot")))
),
tabPanel('Concentration',
fluidRow(column(6, tableOutput("table")))
),
tabPanel('Summary Stats',
fluidRow(column(6, tableOutput("summary_stats")))
)
)
)# End Shiny |
9fcf91f285409eb65ffa5d4f1f7c7e404c05ee84 | cba10b84d2cc708dd66148a4511451d77a92a7c5 | /R/SS_output.R | 7db04f4a8605a945768ca5a287df03d4b15baf0b | [
"LicenseRef-scancode-warranty-disclaimer"
] | no_license | r4ss/r4ss | 03e626ae535ab959ff8109a1de37e3e8b44fe7ad | 0ef80c1a57e4a05e6172338ddcb0cda49530fa93 | refs/heads/main | 2023-08-17T08:36:58.041402 | 2023-08-15T21:42:05 | 2023-08-15T21:42:05 | 19,840,143 | 35 | 57 | null | 2023-07-24T20:28:49 | 2014-05-16T00:51:48 | R | UTF-8 | R | false | false | 159,683 | r | SS_output.R | #' A function to create a list object for the output from Stock Synthesis
#'
#' Reads the Report.sso and (optionally) the covar.sso, CompReport.sso and
#' other files produced by Stock Synthesis and formats the important
#' content of these files into a list in the R workspace. A few statistics
#' unavailable elsewhere are taken from the .par file. Summary
#' information and statistics can be returned to the R console or just
#' contained within the list produced by this function.
#'
#'
#' @template dir
#' @param dir.mcmc Optional directory containing MCMC output. This can either be
#' relative to `dir`, such that `file.path(dir, dir.mcmc)`
#' will end up in the right place, or an absolute path.
#' @param repfile Name of the big report file (could be renamed by user).
#' @param compfile Name of the composition report file.
#' @param covarfile Name of the covariance output file.
#' @param forefile Name of the forecast file.
#' @param wtfile Name of the file containing weight at age data.
#' @param warnfile Name of the file containing warnings.
#' @param ncols Deprecated. This value is now calculated automatically.
#' @param forecast Read the forecast-report file?
#' @param warn Read the Warning.sso file?
#' @param covar Read covar.sso?
#' @param readwt Read the weight-at-age file?
#' @template verbose
#' @param printstats Print summary statistics about the output to the R GUI?
#' @param hidewarn Hides some warnings output from the R GUI.
#' @param NoCompOK Allow the function to work without a CompReport file.
#' @param aalmaxbinrange The largest length bin range allowed for composition
#' data to be considered as conditional age-at-length data.
#' @return Many values are returned. Complete list would be quite long, but
#' should probably be created at some point in the future.
#' @author Ian Stewart, Ian Taylor
#' @export
#' @seealso [SS_plots()]
#' @examples
#' \dontrun{
#' # read model output
#' myreplist <- SS_output(dir = "c:/SS/Simple/")
#' # make a bunch of plots
#' SS_plots(myreplist)
#'
#' # read model output and also read MCMC results (if run), which in
#' # this case would be stored in c:/SS/Simple/mcmc/
#' myreplist <- SS_output(dir = "c:/SS/Simple/", dir.mcmc = "mcmc")
#' }
#'
SS_output <-
function(dir = "C:/myfiles/mymodels/myrun/",
dir.mcmc = NULL,
repfile = "Report.sso",
compfile = "CompReport.sso",
covarfile = "covar.sso",
forefile = "Forecast-report.sso",
wtfile = "wtatage.ss_new",
warnfile = "warning.sso",
ncols = lifecycle::deprecated(),
forecast = TRUE,
warn = TRUE,
covar = TRUE,
readwt = TRUE,
verbose = TRUE,
printstats = TRUE,
hidewarn = FALSE,
NoCompOK = TRUE,
aalmaxbinrange = 4) {
flush.console()
#################################################################################
## embedded functions: emptytest, match_report_line and match_report_table
#################################################################################
emptytest <- function(x) {
# function to help test for empty columns
sum(!is.na(x) & x == "") / length(x)
}
match_report_line <- function(string, obj = rawrep[, 1], substr1 = TRUE) {
# return a line number from the report file (or other file)
# substr1 controls whether to compare subsets or the whole line
match(string, if (substr1) {
substring(obj, 1, nchar(string))
} else {
obj
})
}
match_report_table <- function(string1,
adjust1,
string2 = NULL,
adjust2 = -1,
which_blank = 1,
cols = "nonblank",
matchcol1 = 1,
matchcol2 = 1,
obj = rawrep,
blank_lines = rep_blank_or_hash_lines,
substr1 = TRUE,
substr2 = TRUE,
header = FALSE,
type.convert = FALSE) {
# extract a table from Report.sso by matching a keyword
#
# return a subset of values from the report file (or other file)
# subset is defined by character strings at the start and end, with integer
# adjustments of the number of lines to above/below the two strings
#
#
# @param string1 keyword near top of table
# @param adjust1 integer for number of rows after string1 to start table
# @param string2 keyword near bottom of table
# (or NULL to use blank line to end table)
# @param adjust2 integer for number of rows after string2 to end table
# (often a negative value)
# @param which_blank which blank line (after string1) to use as the end
# of the table (if using string2 = NULL)
# @param cols which columns to return, can be an integer, a vector, "all",
# or 'nonblank' (where this last returns all columns with at least one
# non-blank values in it)
# @param matchcol1 which column to search for string1
# @param matchcol2 which column to search for string2
# @param obj matrix object in which to search (always rawrep so far)
# @param blank_lines vector of line numbers of obj which are blank
# (to save the time of replicating this in each function call)
# @param substr1 allow string1 to be a substring of the text in matchcol1?
# (It must be start at the beginning regardless)
# @param substr2 allow string2 to be a substring of the text in matchcol2?
# (It must be start at the beginning regardless)
# @param header Is the first row of the table a header?
# @param apply type.convert() function to the resulting table?
line1 <- match(
string1,
if (substr1) {
substring(obj[, matchcol1], 1, nchar(string1))
} else {
obj[, matchcol1]
}
)
if (is.null(string2)) {
# get first blank or "#" line after the start
line2 <- blank_lines[blank_lines > line1][which_blank]
# if no remaining blank lines, use the end of the file
if (is.na(line2)) {
line2 <- nrow(obj)
}
} else {
line2 <- match(
string2,
if (substr2) {
substring(obj[, matchcol2], 1, nchar(string2))
} else {
obj[, matchcol2]
}
)
}
if (is.na(line1) | is.na(line2)) {
return(NULL)
}
if (is.numeric(cols)) {
out <- obj[(line1 + adjust1):(line2 + adjust2), cols]
}
if (cols[1] == "all") {
out <- obj[(line1 + adjust1):(line2 + adjust2), ]
}
if (cols[1] == "nonblank") {
# returns only columns that contain at least one non-empty value
out <- obj[(line1 + adjust1):(line2 + adjust2), ]
out <- out[, apply(out, 2, emptytest) < 1]
}
if (header && nrow(out) > 0) {
out[1, out[1, ] == ""] <- "NoName"
names(out) <- out[1, ]
out <- out[-1, ]
}
if (type.convert) {
out <- type.convert(out, as.is = TRUE)
}
return(out)
} # end match_report_table
df.rename <- function(df, oldnames, newnames) {
# function to replace names in dataframes
# added to clean up adaptation to more consistent
# syntax in Report.sso as of SS version 3.30.01.15.
if (!is.null(df)) {
for (iname in seq_along(oldnames)) {
names(df)[names(df) == oldnames[iname]] <- newnames[iname]
}
}
return(df)
}
# check inputs
if (lifecycle::is_present(ncols)) {
lifecycle::deprecate_warn(
when = "1.46.0",
what = "SS_output(ncols)",
details = "Input 'ncols' no longer needed."
)
}
# check to make sure the first input is in the corect format
if (!is.character(dir) | length(dir) != 1) {
stop("Input 'dir' should be a character string for a directory")
}
# get info on output files created by Stock Synthesis
shortrepfile <- repfile
repfile <- file.path(dir, repfile)
# figure out which par file to read
parfile <- dir(dir, pattern = ".par$")
if (length(parfile) > 1) {
parinfo <- file.info(file.path(dir, parfile))
parfile <- parfile[!parinfo[["isdir"]] & # exclude directories
parinfo[["mtime"]] == max(parinfo[["mtime"]][!parinfo[["isdir"]]])] # pick most recently changed file
# if there are still duplicates (with the same 'mtime' value),
# choose anything called "ss.par"
if (length(parfile) > 1 && any(parfile == "ss.par")) {
parfile <- "ss.par"
}
# if there are still duplicates after all that, choose the first one
if (length(parfile) > 1) {
parfile <- parfile[1]
}
if (verbose) {
message(
"Multiple files in directory match pattern *.par\n",
"choosing most recently modified:", parfile
)
}
}
if (length(parfile) == 0) {
if (!hidewarn) {
message("Some stats skipped because the .par file not found.")
}
parfile <- NA
} else {
parfile <- file.path(dir, parfile)
}
# read three rows to get start time and version number from rep file
if (file.exists(repfile)) {
if (file.info(repfile)$size > 0) {
if (verbose) {
message("Getting header info from:\n ", repfile)
}
} else {
stop("report file is empty: ", repfile)
}
} else {
stop("can't find report file: ", repfile)
}
rephead <- readLines(con = repfile, n = 50)
# warn if SS version used to create rep file is too old or too new for this code
# note: SS_versionCode is new with V3.20
# perhaps in the future we will use it to replace SS_versionshort throughout r4ss?
SS_versionCode <- rephead[grep("#V", rephead)]
SS_version <- rephead[grep("Stock_Synthesis", rephead)]
SS_version <- SS_version[substring(SS_version, 1, 2) != "#C"] # remove any version numbering in the comments
SS_version <- SS_version[1]
if (substring(SS_version, 1, 2) == "#V") {
SS_version <- substring(SS_version, 3)
}
if (substring(SS_version, 1, 4) == "3.30") {
SS_versionshort <- "3.30"
SS_versionNumeric <- as.numeric(SS_versionshort)
} else {
# typically something like "SS-V3.24"
SS_versionshort <- toupper(substr(SS_version, 1, 8))
SS_versionNumeric <- as.numeric(substring(SS_versionshort, 5))
}
SS_versionMax <- 3.30
SS_versionMin <- 3.24
# test for version compatibility with this code
if (SS_versionNumeric < SS_versionMin | SS_versionNumeric > SS_versionMax) {
warning(
"This function tested on SS versions 3.24 and 3.30.\n",
" You are using ", strsplit(SS_version, split = ";")[[1]][1],
" which MIGHT NOT WORK with this package."
)
} else {
if (verbose) {
message(
"This function tested on SS versions 3.24 and 3.30.\n",
" You are using ", strsplit(SS_version, split = ";")[[1]][1],
" which SHOULD work with this package."
)
}
}
findtime <- function(lines) {
# quick function to get model start time from SS output files
time <- strsplit(lines[grep("ime", lines)], "ime: ")[[1]]
if (length(time) < 2) {
return()
} else {
return(time[2])
}
}
repfiletime <- findtime(rephead)
if (verbose) {
message("Report file time:", repfiletime)
}
# time check for CompReport file
comp <- FALSE
if (is.null(compfile)) {
if (verbose) {
message("Skipping CompReport because 'compfile = NULL'")
}
} else {
if (file.exists(file.path(dir, compfile))) {
# non-NULL compfile input provided and file exists
compfile <- file.path(dir, compfile)
comphead <- readLines(con = compfile, n = 30)
compskip <- grep("Composition_Database", comphead)
if (length(compskip) == 0) {
if (verbose) {
message(
"No composition data, possibly because detailed output",
" is turned off in the starter file."
)
}
} else {
# compend value helps diagnose when no comp data exists in CompReport.sso file.
compend <- grep(" end ", comphead)
if (length(compend) == 0) {
compend <- 999
}
comptime <- findtime(comphead)
if (is.null(comptime) || is.null(repfiletime)) {
message(
"problem comparing the file creation times:\n",
" Report.sso:", repfiletime, "\n",
" CompReport.sso:", comptime, "\n"
)
} else {
if (comptime != repfiletime) {
message("CompReport time:", comptime, "\n")
stop(shortrepfile, " and ", compfile, " were from different model runs.")
}
}
comp <- TRUE
}
} else {
# non-NULL compfile input provided and file DOESN'T exist
if (!is.null(compfile)) {
if (!NoCompOK) {
stop(
"Missing ", compfile,
". Change the 'compfile' input, rerun model to get the file,",
" or change input to 'NoCompOK = TRUE'"
)
} else {
message("Composition file not found: ", compfile)
}
}
}
} # end check for NULL compfile input
# read report file
if (verbose) {
message("Reading full report file")
}
flush.console()
ncols <- get_ncol(repfile)
rawrep <- read.table(
file = repfile, col.names = 1:ncols, fill = TRUE, quote = "",
colClasses = "character", nrows = -1, comment.char = "",
blank.lines.skip = FALSE
)
# which lines in report file are all blank (either spaces or empty)
rep_blank_lines <- which(apply(rawrep, 1, emptytest) == 1)
# which lines in report file have hash in first column and blank after
rep_hash_lines <- which(rawrep[, 1] == "#" & apply(rawrep[, -1], 1, emptytest) == 1)
# combine both types (could be modified in the future to focus on just one type
rep_blank_or_hash_lines <- sort(unique(c(rep_blank_lines, rep_hash_lines)))
# check empty columns
# these checks should not be triggered thanks to use of get_ncol() above,
# added in December 2019
nonblanks <- apply(rawrep, 2, emptytest) < 1
maxnonblank <- max(0, (1:ncols)[nonblanks == TRUE])
if (maxnonblank == ncols) {
stop(
"all columns are used and some data may been missed,\n",
" increase 'ncols' input above current value (ncols=", ncols, ")"
)
}
# check for revised format to facilitate custom reporting
# added with 3.30.15.06
custom <- !is.na(match_report_line(string = "report:1", obj = rawrep[, 2]))
if (verbose) {
if ((maxnonblank + 1) == ncols) {
message("Got all columns using ncols = ", ncols)
}
if ((maxnonblank + 1) < ncols) {
message(
"Got all columns. To speed code, use ncols = ", maxnonblank + 1,
" in the future."
)
}
message("Got Report file")
}
flush.console()
# read forecast report file
# (this function no longer supports reading yield curve from forecast file
# where it occurred in older SS versions)
if (forecast) {
forecastname <- file.path(dir, forefile)
temp <- file.info(forecastname)$size
if (is.na(temp) | temp == 0) {
if (verbose) {
message("Forecast-report.sso file is missing or empty.")
}
} else {
# read the file
rawforecast1 <- read.table(
file = forecastname, col.names = 1:ncols, fill = TRUE, quote = "",
colClasses = "character", nrows = -1
)
# forecast
grab <- rawforecast1[, 1]
nforecastyears <- as.numeric(rawforecast1[grab %in% c("N_forecast_yrs:"), 2])
nforecastyears <- nforecastyears[1]
# get SPR target
sprtarg <- as.numeric(rawforecast1[match_report_line(
"SPR_target",
rawforecast1[, 1]
), 2])
# starting in SSv3.30.10.00, the Forecast-report file has been restructured
target_definitions <- grep("_as_target", rawforecast1[, 1], value = TRUE)
if (length(target_definitions) == 0) {
# old setup (prior to 3.30.10.00)
btarg <- as.numeric(rawforecast1[match_report_line(
"Btarget",
rawforecast1[, 1]
), 2])
} else {
# new setup with biomass target
if ("Ratio_SSB/B0_as_target" %in% target_definitions) {
btarg <- as.numeric(rawforecast1[match_report_line(
"Ratio_target",
rawforecast1[, 1]
), 2])
}
# new setup with F0.1_as target
if ("F0.1_as_target" %in% target_definitions) {
btarg <- -999
}
}
}
} else {
if (verbose) {
message("You skipped the forecast file.")
}
}
if (!exists("btarg")) {
nforecastyears <- NA
sprtarg <- -999
btarg <- -999
if (verbose) {
message(
" setting SPR target and Biomass target to -999.",
" Lines won't be drawn for these targets by SS_plots unless",
" 'sprtarg' and 'btarg' are provided as inputs."
)
}
}
# set default minimum biomass thresholds based on typical west coast groundfish
minbthresh <- -999
if (!is.na(btarg) & btarg == 0.4) {
if (verbose) {
message(
"Setting minimum biomass threshhold to 0.25",
" based on US west coast assumption associated with biomass target of 0.4.",
" (can replace or override in SS_plots by setting 'minbthresh')"
)
}
minbthresh <- 0.25 # west coast assumption for non flatfish
}
if (!is.na(btarg) & btarg == 0.25) {
if (verbose) {
message(
"Setting minimum biomass threshhold to 0.125",
" based on US west coast assumption associated with flatfish target of 0.25.",
" (can replace or override in SS_plots by setting 'minbthresh')"
)
}
minbthresh <- 0.125 # west coast assumption for flatfish
}
flush.console()
# check for use of temporary files
logfile <- dir(dir, pattern = ".log$")
logfile <- logfile[logfile != "fmin.log"]
if (length(logfile) > 1) {
filetimes <- file.info(file.path(dir, logfile))$mtime
logfile <- logfile[filetimes == max(filetimes)]
if (verbose) {
message(
"Multiple files in directory match pattern *.log\n",
"choosing most recently modified file:", logfile, "\n"
)
}
}
if (length(logfile) == 1 && file.info(file.path(dir, logfile))$size > 0) {
logfile <- readLines(file.path(dir, logfile))
logfile <- grep("^size", logfile, value = TRUE)
if (length(logfile) == 0) {
warning("Error reading ss.log. Check the file, it should contain rows starting with 'size'")
logfile <- NA
} else {
logfile <- tidyr::separate(as.data.frame(logfile),
col = 1,
into = c("File", "Size"),
sep = " = "
)
names(logfile) <- c("TempFile", "Size")
logfile[["Size"]] <- as.numeric(logfile[["Size"]])
maxtemp <- max(logfile[["Size"]])
if (verbose) {
if (maxtemp == 0) {
message(
"Got log file. There were NO temporary files were written",
" in this run."
)
} else {
message("Temporary files were written in this run.")
}
}
}
} else {
logfile <- NA
if (verbose) {
message(
"No non-empty log file in directory or too many files ",
" matching pattern *.log"
)
}
}
# read warnings file
if (warn) {
warnname <- file.path(dir, warnfile)
if (!file.exists(warnname)) {
# no warnings.sso file
message(warnfile, " file not found")
warnrows <- NA
warnlines <- NA
} else {
# read warning.sso file
warnlines <- readLines(warnname, warn = FALSE)
# number of rows isn't equal to number of warnings, just used to
# detect empty file
warnrows <- length(warnlines)
if (verbose && warnrows > 0) {
message("Got warning file. Final line:", tail(warnlines, 1))
}
}
} else {
# chose not to read warning.sso file
if (verbose) {
message("You skipped the warnings file")
}
warnrows <- NA
warnlines <- NA
}
if (verbose) {
message("Finished reading files")
}
flush.console()
# length selectivity is read earlier than other tables because it was used
# to get fleet info this can be moved to join rest of selex stuff after
# SSv3.11 is not supported any more
sizeselex <- match_report_table("LEN_SELEX", 6, header = TRUE, type.convert = TRUE)
# update to size selectivity to naming convention associated with 3.30.01.15
sizeselex <- df.rename(sizeselex,
oldnames = c("fleet", "year", "seas", "gender", "morph", "label"),
newnames = c("Fleet", "Yr", "Seas", "Sex", "Morph", "Label")
)
## read DEFINITIONS section (new in SSv3.20)
## (which_blank = 2 skips the "#" near the end to include the final table)
rawdefs <- match_report_table("DEFINITIONS", 1,
which_blank = 1,
blank_lines = rep_blank_lines
)
# # re-read that section for older models which didn't have a hash
# if ("LIKELIHOOD" %in% rawdefs[, 1]) {
# rawdefs <- match_report_table("DEFINITIONS", 1, which_blank = 1)
# }
# four eras for DEFINITIONS section
# - prior to 3.20: section didn't exist
# - these versions not really supported by r4ss, but might work anyway
# - 3.20 up through 3.24: section was brief with fleet info in rows
# - identify by version < 3.30 & presence of DEFINITIONS
# - 3.30 up through 3.30.11: table of fleet info by column was added
# - identify by version >= 3.30, absence of "Jitter"
# - 3.30.12 to 3.30.20: lots more definitions added
# - identify by presence of "Jitter" and "Fleet_names:" in first column
# - 3.30.21+: fleet info in rows removed, Length_ & Age_comp_error_controls added
# - identify by presence of "Jitter" and absence of "Fleet_names:" in first column
# check for new format for definitions (starting with 3.30.12)
# ("Jitter" is an indicator of the new format)
# placeholders for tables added in 3.30.21
Length_comp_error_controls <- NULL
Age_comp_error_controls <- NULL
if ("Jitter:" %in% rawdefs[["X1"]]) {
get.def <- function(string) {
# function to grab numeric value from 2nd column matching string in 1st column
row <- grep(string, rawdefs[["X1"]])[1]
if (length(row) > 0) {
return(as.numeric(rawdefs[row, 2]))
} else {
return(NULL)
}
}
# apply function above to get a bunch of things
# in some cases, duplicate names are used for backward compatibility
N_seasons <- nseasons <- get.def("N_seasons")
N_sub_seasons <- get.def("N_sub_seasons")
Season_Durations <- seasdurations <- as.numeric(rawdefs[
grep(
"Season_Durations",
rawdefs[["X1"]]
),
1 + 1:nseasons
])
Spawn_month <- spawnmonth <- get.def("Spawn_month")
Spawn_seas <- spawnseas <- get.def("Spawn_seas")
Spawn_timing_in_season <- get.def("Spawn_timing_in_season")
N_areas <- nareas <- get.def("N_areas")
Start_year <- startyr <- get.def("Start_year")
End_year <- endyr <- get.def("End_year")
Retro_year <- get.def("Retro_year")
N_forecast_yrs <- get.def("N_forecast_yrs")
N_sexes <- nsexes <- get.def("N_sexes")
Max_age <- accuage <- get.def("Max_age")
Empirical_wt_at_age <- get.def("Empirical_wt_at_age")
N_bio_patterns <- get.def("N_bio_patterns")
N_platoons <- get.def("N_platoons")
# following quants added in 3.30.13
NatMort_option <- get.def("NatMort")
GrowthModel_option <- get.def("GrowthModel")
Maturity_option <- get.def("Maturity")
Fecundity_option <- get.def("Fecundity")
# end quants added in 3.30.13
Start_from_par <- get.def("Start_from_par")
Do_all_priors <- get.def("Do_all_priors")
Use_softbound <- get.def("Use_softbound")
N_nudata <- get.def("N_nudata")
Max_phase <- get.def("Max_phase")
Current_phase <- get.def("Current_phase")
Jitter <- get.def("Jitter")
ALK_tolerance <- get.def("ALK_tolerance")
# fleetdefs table starts with final "Fleet" in column 1 (within DEFINITIONS)
fleetdefs <- rawdefs[tail(grep("Fleet", rawdefs[["X1"]]), 1):nrow(rawdefs), ]
names(fleetdefs) <- fleetdefs[1, ] # set names equal to first row
fleetdefs <- fleetdefs[-1, ] # remove first row
# remove any blank columns beyond Fleet_name
fleetdefs <- fleetdefs[, 1:grep("fleet_name", tolower(names(fleetdefs)))]
# make values numeric (other than Fleet_name)
fleetdefs <- type.convert(fleetdefs, as.is = TRUE)
fleetdefs <- df.rename(fleetdefs,
oldnames = c("fleet_name"),
newnames = c("Fleet_name")
)
# fleet_type definitions from TPL:
# 1=fleet with catch; 2=discard only fleet with F;
# 3=survey(ignore catch); 4=ignore completely
fleet_type <- fleetdefs[["fleet_type"]]
fleet_timing <- fleetdefs[["timing"]]
fleet_area <- fleetdefs[["area"]]
catch_units <- fleetdefs[["catch_units"]]
## equ_catch_se <- fleetdefs[["equ_catch_se"]]
## catch_se <- fleetdefs[["catch_se"]]
survey_units <- fleetdefs[["survey_units"]]
survey_error <- fleetdefs[["survey_error"]]
fleet_ID <- fleetdefs[["Fleet"]]
IsFishFleet <- fleet_type <= 2 # based on definitions above
nfishfleets <- sum(IsFishFleet)
FleetNames <- fleetdefs[["Fleet_name"]]
nfleets <- max(fleet_ID)
# process some season info
seasfracs <- round(12 * cumsum(seasdurations)) / 12
seasfracs <- seasfracs - seasdurations / 2 # should be mid-point of each season as a fraction of the year
# end DEFINITIONS elements in 3.30.12-3.30.20
if ("Length_comp_error_controls" %in% rawdefs[["X1"]]) {
# read table of length comp error controls (added 3.30.21)
Length_comp_error_controls <-
match_report_table("Length_comp_error_controls",
adjust1 = 1,
header = TRUE, type.convert = TRUE
)
if (nrow(Length_comp_error_controls) > 0) {
present_Length_comp_error_controls <- TRUE
}
}
# if that table has information in it then proceed with renaming columns
if (exists("Length_comp_error_controls") & exists("present_Length_comp_error_controls")) {
# rename "NoName" columns
names(Length_comp_error_controls)[names(Length_comp_error_controls) == "NoName"] <-
c("NoName", "Fleet_name")
# remove extra column with hash symbols
Length_comp_error_controls <- Length_comp_error_controls %>%
dplyr::select(-NoName)
}
if ("Age_comp_error_controls" %in% rawdefs[["X1"]]) {
# read table of age comp error controls (added 3.30.21)
Age_comp_error_controls <-
match_report_table("Age_comp_error_controls",
adjust1 = 1,
header = TRUE, type.convert = TRUE
)
if (nrow(Age_comp_error_controls) > 0) {
present_Age_comp_error_controls <- TRUE
}
}
# if that table has information in it then proceed with renaming columns
if (exists("Age_comp_error_controls") & exists("present_Age_comp_error_controls") > 0) {
# rename "NoName" columns
names(Age_comp_error_controls)[names(Age_comp_error_controls) == "NoName"] <-
c("NoName", "Fleet_name")
# remove extra column with hash symbols
Age_comp_error_controls <- Age_comp_error_controls %>%
dplyr::select(-NoName)
}
# end read of 3.30.12+ DEFINITIONS
} else {
# old format for DEFINITIONS (up through 3.30.11)
# get season stuff
nseasons <- as.numeric(rawdefs[grep("N_seasons", rawdefs[, 1]), 2])
seasdurations <- as.numeric(rawdefs[grep("Season_Durations", rawdefs[, 1]), 1 + 1:nseasons])
seasfracs <- round(12 * cumsum(seasdurations)) / 12
seasfracs <- seasfracs - seasdurations / 2 # should be mid-point of each season as a fraction of the year
if (SS_versionNumeric >= 3.30) {
# version 3.3 (fleet info switched from columns to rows starting with 3.30)
FleetNames <- as.character(rawdefs[grep("fleet_names", rawdefs[["X1"]]), -1])
FleetNames <- FleetNames[!is.na(FleetNames) & FleetNames != ""]
# get fleet info
nfleets <- length(FleetNames)
fleet_ID <- 1:nfleets
fleetdefs <- tail(rawdefs, nfleets + 1)
fleetdefs <- fleetdefs[, apply(rawdefs[-(1:3), ], 2, emptytest) < 1]
fleetdefs[fleetdefs == ""] <- NA
if (fleetdefs[1, 1] == "#_rows") { # up to version 3.30.11
fleetdefs <- fleetdefs[-1, 1:7] # hardwiring dimensions and names
names(fleetdefs) <- c(
"fleet_type", "timing", "area", "catch_units",
"catch_mult", "survey_units", "survey_error"
)
} else {
# additional columns starting with 3.30.12
# column names are now dynamic
names(fleetdefs) <- fleetdefs[1, ]
names(fleetdefs)[1] <- "fleet"
fleetdefs <- fleetdefs[-1, ]
}
fleetdefs <- type.convert(fleetdefs, as.is = TRUE)
# fleet_type definitions from TPL:
# 1=fleet with catch; 2=discard only fleet with F;
# 3=survey(ignore catch); 4=ignore completely
fleet_type <- fleetdefs[["fleet_type"]]
fleet_timing <- fleetdefs[["timing"]]
fleet_area <- fleetdefs[["area"]]
catch_units <- fleetdefs[["catch_units"]]
equ_catch_se <- fleetdefs[["equ_catch_se"]]
catch_se <- fleetdefs[["catch_se"]]
survey_units <- fleetdefs[["survey_units"]]
survey_error <- fleetdefs[["survey_error"]]
IsFishFleet <- fleet_type <= 2 # based on definitions above
# end of 3.30 - 3.30.11 version of DEFINITIONS
} else {
# version 3.20-3.24
# get fleet info
fleetdefs <- rawdefs[-(1:3), apply(rawdefs[-(1:3), ], 2, emptytest) < 1]
fleetdefs[fleetdefs == ""] <- NA
lab <- fleetdefs[["X1"]]
fleet_ID <- as.numeric(fleetdefs[grep("fleet_ID", lab), -1])
names(fleetdefs) <- c("Label", paste("Fleet", fleet_ID, sep = ""))
FleetNames <- as.character(fleetdefs[grep("fleet_names", lab), -1])
fleet_area <- as.numeric(fleetdefs[grep("fleet_area", lab), -1])
catch_units <- as.numeric(fleetdefs[grep("Catch_units", lab), -1])
catch_error <- as.numeric(fleetdefs[grep("Catch_error", lab), -1])
survey_units <- as.numeric(fleetdefs[grep("Survey_units", lab), -1])
survey_error <- as.numeric(fleetdefs[grep("Survey_error", lab), -1])
IsFishFleet <- !is.na(catch_units)
nfleets <- length(FleetNames)
}
# positions of timeseries section (used in various places below)
begin <- match_report_line("TIME_SERIES") + 2
end <- match_report_line("SPR_series") - 2
# more dimensions
nfishfleets <- sum(IsFishFleet)
nsexes <- length(unique(as.numeric(sizeselex[["Sex"]])))
nareas <- max(as.numeric(rawrep[begin:end, 1]))
# startyr is the 'initial' year not including VIRG or INIT years
startyr <- min(as.numeric(rawrep[begin:end, 2])) + 2
temptime <- rawrep[begin:end, 2:3]
# endyr is the beginning of the last year of the normal timeseries
endyr <- max(as.numeric(temptime[temptime[, 2] == "TIME", 1]))
tempaccu <- as.character(rawrep[match_report_line("Natural_Mortality") + 1, -(1:5)])
accuage <- max(as.numeric(tempaccu[tempaccu != ""]))
} # end read of DEFINITIONS
# compositions
if (comp) { # skip this stuff if no CompReport.sso file
# read header section of file to get bin information
# first, figure out how many columns are needed
ncols.compfile <- get_ncol(compfile, skip = 3)
# now read table using the appropriate number of columns
allbins <- read.table(
file = compfile, col.names = 1:ncols.compfile, fill = TRUE,
colClasses = "character", skip = 3, nrows = 25
)
# lbins is data length bins
lbins <- as.numeric(allbins[grep("Size_Bins_dat", allbins[, 1]) + 2, -1])
lbins <- lbins[!is.na(lbins)]
nlbins <- length(lbins)
# lbinspop is Pop_len_mid used for selex and bio quantities
lbinspop <- as.numeric(allbins[grep("Size_Bins_pop", allbins[, 1]) + 2, -1])
lbinspop <- lbinspop[!is.na(lbinspop)]
nlbinspop <- length(lbinspop)
Lbin_method <- as.numeric(allbins[match_report_line(
"Method_for_Lbin_definition",
allbins[, 1]
), 2])
if (compend == compskip + 2) {
message("It appears that there is no composition data in CompReport.sso")
comp <- FALSE # turning off switch to function doesn't look for comp data later on
agebins <- NA
sizebinlist <- NA
nagebins <- length(agebins)
} else {
# read composition database
# figure out number of columns based on header row
col.names <- as.character(read.table(
file = compfile, skip = compskip,
nrows = 1, colClasses = "character"
))
rawcompdbase <- read.table(
file = compfile, col.names = col.names, fill = TRUE,
colClasses = "character", skip = compskip, nrows = -1
)
names(rawcompdbase) <- rawcompdbase[1, ]
names(rawcompdbase)[names(rawcompdbase) == "Used?"] <- "Used"
endfile <- grep("End_comp_data", rawcompdbase[, 1])
compdbase <- rawcompdbase[2:(endfile - 2), ] # subtract header line and last 2 lines
# update to naming convention associated with current SS version
# most changes associated with 3.30.12,
# Nsamp_adj added in 3.30.15
compdbase <- df.rename(compdbase,
oldnames = c("Pick_sex", "Pick_gender", "Gender", "N", "Rep"),
newnames = c("Sexes", "Sexes", "Sex", "Nsamp_adj", "Repl.")
)
# remove duplicate rows for unsexed fish
# (issue was introduced in SS3 version 3.30.20 and discovered
# after the release of 3.30.21)
# all values identical except for Cum_obs and Cum_exp
duplicates <- compdbase %>%
dplyr::select(-Cum_obs, -Cum_exp) %>%
duplicated()
if (verbose) {
message(
"Removing ", sum(duplicates), " out of ", nrow(compdbase),
" rows in CompReport.sso which are duplicates."
)
}
compdbase <- compdbase[!duplicates, ]
# done removing duplicates
# "Sexes" (formerly "Pick_sex" or "Pick_gender"):
# 0 (unknown), 1 (female), 2 (male), or 3 (females and then males)
# this is the user input in the data file
#
# "Sex" (formerly "Gender"): 1 (unknown or female), or 2 (male)
# this is a code used internally by SS
#
# add new column in code below:
# "sex": 0 (unknown), 1 (female), or 2 (male)
# this is the code used by r4ss
compdbase[["sex"]] <- compdbase[["Sexes"]]
compdbase[["sex"]][compdbase[["Sexes"]] == 3] <- compdbase[["Sex"]][compdbase[["Sexes"]] == 3]
# make correction to tag output associated with 3.24f (fixed in later versions)
if (substr(SS_version, 1, 9) == "SS-V3.24f") {
if (!hidewarn) {
message("Correcting for bug in tag data output associated with SSv3.24f\n")
}
tag1rows <- compdbase[["Sexes"]] == "TAG1"
if (any(tag1rows)) {
tag1 <- compdbase[tag1rows, ]
tag1new <- tag1
tag1new[, 4:23] <- tag1new[, 3:22] # shift columns over
tag1new[["Yr.S"]] <- tag1new[["Yr"]] # move Yr.S
tag1new[["Yr"]] <- floor(as.numeric(tag1new[["Yr"]])) # turn Yr.S into Yr
compdbase[tag1rows, ] <- tag1new
}
}
# remove rows within missing observations (beginning of each section)
compdbase <- compdbase[compdbase[["Obs"]] != "", ]
# replace underscores with NA
compdbase[compdbase == "_"] <- NA
# replace any NA values in the Used? column with "yes".
compdbase[["Used"]][is.na(compdbase[["Used"]])] <- "yes"
# add SuprPer column for versions where it didn't exist
if (!("SuprPer" %in% names(compdbase))) {
compdbase[["SuprPer"]] <- "No"
}
compdbase[["SuprPer"]][is.na(compdbase[["SuprPer"]])] <- "No"
n <- sum(is.na(compdbase[["Nsamp_adj"]]) &
compdbase[["Used"]] != "skip" &
compdbase[["Kind"]] != "TAG2")
if (n > 0) {
warning(
n, " rows from composition database have NA sample size\n",
"but are not part of a super-period. (Maybe input as N=0?)\n"
)
}
compdbase <- type.convert(compdbase, as.is = TRUE)
# configure seasons
if (nseasons > 1) {
compdbase[["YrSeasName"]] <- paste(floor(compdbase[["Yr"]]), "s", compdbase[["Seas"]], sep = "")
} else {
compdbase[["YrSeasName"]] <- compdbase[["Yr"]]
}
# starting with SSv3.24a, the Yr.S column is already in the output, otherwise fill it in
if (!"Yr.S" %in% names(compdbase)) {
if (any(floor(compdbase[["Yr"]]) != compdbase[["Yr"]])) {
# in some cases, year is already a decimal number
compdbase[["Yr.S"]] <- compdbase[["Yr"]]
compdbase[["Yr"]] <- floor(compdbase[["Yr"]])
} else {
# add fraction of season to distinguish between samples
compdbase[["Yr.S"]] <- compdbase[["Yr"]] + (0.5 / nseasons) * compdbase[["Seas"]]
}
}
# deal with Lbins
compdbase[["Lbin_range"]] <- compdbase[["Lbin_hi"]] - compdbase[["Lbin_lo"]]
compdbase[["Lbin_mid"]] <- 0.5 * (compdbase[["Lbin_lo"]] + compdbase[["Lbin_hi"]])
# divide into objects by kind
Lbin_range <- compdbase[["Lbin_range"]]
if (is.null(Lbin_range)) { # if/else required to avoid warning if no comp data at all
notconditional <- TRUE
conditional <- FALSE
} else {
notconditional <- !is.na(Lbin_range) & Lbin_range > aalmaxbinrange
conditional <- !is.na(Lbin_range) & Lbin_range <= aalmaxbinrange
}
if ("skip" %in% compdbase[["SuprPer"]]) {
# formatting error in some SS 3.30 versions caused skip to appear in
# the wrong column, so copy to the right one
compdbase[["Used"]][compdbase[["SuprPer"]] == "skip"] <- "skip"
# probability of being a super-period is low, so assigning "No"
# to assist with identification of ghost comps below
compdbase[["SuprPer"]][compdbase[["SuprPer"]] == "No"]
}
if (SS_versionNumeric >= 3.22) {
# new designation of ghost fleets from negative samp size to negative fleet
lendbase <- compdbase[compdbase[["Kind"]] == "LEN" &
compdbase[["Used"]] != "skip", ]
sizedbase <- compdbase[compdbase[["Kind"]] == "SIZE" &
compdbase[["Used"]] != "skip", ]
agedbase <- compdbase[compdbase[["Kind"]] == "AGE" &
compdbase[["Used"]] != "skip" & notconditional, ]
condbase <- compdbase[compdbase[["Kind"]] == "AGE" &
compdbase[["Used"]] != "skip" & conditional, ]
morphcompdbase <- compdbase[compdbase[["Kind"]] == "GP%" &
compdbase[["Used"]] != "skip", ]
} else {
# older designation of ghost fleets from negative samp size to negative fleet
lendbase <- compdbase[compdbase[["Kind"]] == "LEN" &
(compdbase[["SuprPer"]] == "Sup" |
(!is.na(compdbase[["Nsamp_adj"]]) & compdbase[["Nsamp_adj"]] > 0)), ]
sizedbase <- compdbase[compdbase[["Kind"]] == "SIZE" &
(compdbase[["SuprPer"]] == "Sup" |
(!is.na(compdbase[["Nsamp_adj"]]) & compdbase[["Nsamp_adj"]] > 0)), ]
agedbase <- compdbase[compdbase[["Kind"]] == "AGE" &
(compdbase[["SuprPer"]] == "Sup" |
(!is.na(compdbase[["Nsamp_adj"]]) & compdbase[["Nsamp_adj"]] > 0)) &
notconditional, ]
condbase <- compdbase[compdbase[["Kind"]] == "AGE" &
(compdbase[["SuprPer"]] == "Sup" |
(!is.na(compdbase[["Nsamp_adj"]]) & compdbase[["Nsamp_adj"]] > 0)) &
conditional, ]
}
ghostagedbase <- compdbase[compdbase[["Kind"]] == "AGE" &
compdbase[["Used"]] == "skip" &
compdbase[["SuprPer"]] == "No" & notconditional, ]
ghostcondbase <- compdbase[compdbase[["Kind"]] == "AGE" &
compdbase[["Used"]] == "skip" &
compdbase[["SuprPer"]] == "No" & conditional, ]
ghostlendbase <- compdbase[compdbase[["Kind"]] == "LEN" &
compdbase[["Used"]] == "skip" &
compdbase[["SuprPer"]] == "No", ]
compdbase[["Kind"]][compdbase[["Kind"]] == "L@A" & compdbase[["Ageerr"]] < 0] <- "W@A"
# extra processing for sizedbase
if (!is.null(sizedbase) && nrow(sizedbase) > 0) {
sizedbase[["bio.or.num"]] <- c("bio", "num")[sizedbase[["Lbin_lo"]]]
sizedbase[["units"]] <- c("kg", "lb", "cm", "in")[sizedbase[["Lbin_hi"]]]
sizedbase[["method"]] <- sizedbase[["Ageerr"]]
if (any(sizedbase[["units"]] %in% c("lb", "in"))) {
if (verbose) {
message(
"Note: converting bins in generalized size comp data ",
" in sizedbase back to the original units of lbs or inches."
)
}
}
# convert bins from kg to lbs when that was the original unit
sizedbase[["Bin"]][sizedbase[["units"]] == "lb"] <-
sizedbase[["Bin"]][sizedbase[["units"]] == "lb"] / 0.4536
# convert bins from cm to inches when that was the original unit
sizedbase[["Bin"]][sizedbase[["units"]] == "in"] <-
sizedbase[["Bin"]][sizedbase[["units"]] == "in"] / 2.54
sizebinlist <- list()
for (imethod in 1:max(sizedbase[["method"]])) {
tmp <- sort(unique(sizedbase[["Bin"]][sizedbase[["method"]] == imethod]))
if (length(tmp) == 0) tmp <- NULL
sizebinlist[[paste("size_method_", imethod, sep = "")]] <- tmp
}
} else {
sizebinlist <- NA
}
if (is.null(compdbase[["Nsamp_adj"]])) {
good <- TRUE
} else {
good <- !is.na(compdbase[["Nsamp_adj"]])
}
ladbase <- compdbase[compdbase[["Kind"]] == "L@A" & good, ]
wadbase <- compdbase[compdbase[["Kind"]] == "W@A" & good, ]
tagdbase1 <- compdbase[compdbase[["Kind"]] == "TAG1", ]
tagdbase2 <- compdbase[compdbase[["Kind"]] == "TAG2", ]
# consider range of bins for conditional age at length data
if (verbose) {
message(
"CompReport file separated by this code as follows",
" (rows = Ncomps*Nbins):\n",
if (nrow(lendbase) > 0) {
paste0(
" ", nrow(lendbase),
" rows of length comp data\n"
)
},
if (nrow(sizedbase) > 0) {
paste0(
" ", nrow(sizedbase),
" rows of generalized size comp data\n"
)
},
if (nrow(agedbase) > 0) {
paste0(
" ", nrow(agedbase),
" rows of age comp data\n"
)
},
if (nrow(condbase) > 0) {
paste0(
" ", nrow(condbase),
" rows of conditional age-at-length data\n"
)
},
if (nrow(ghostagedbase) > 0) {
paste0(
" ", nrow(ghostagedbase),
" rows of ghost fleet age comp data\n"
)
},
if (nrow(ghostcondbase) > 0) {
paste0(
" ", nrow(ghostcondbase),
" rows of ghost fleet conditional age-at-length data\n"
)
},
if (nrow(ghostlendbase) > 0) {
paste0(
" ", nrow(ghostlendbase),
" rows of ghost fleet length comp data\n"
)
},
if (nrow(ladbase) > 0) {
paste0(
" ", nrow(ladbase),
" rows of mean length at age data\n"
)
},
if (nrow(wadbase) > 0) {
paste0(
" ", nrow(wadbase),
" rows of mean weight at age data\n"
)
},
if (nrow(tagdbase1) > 0) {
paste0(
" ", nrow(tagdbase1),
" rows of 'TAG1' comp data\n"
)
},
if (nrow(tagdbase2) > 0) {
paste0(
" ", nrow(tagdbase2),
" rows of 'TAG2' comp data"
)
},
if (nrow(morphcompdbase) > 0) {
paste0(
" ", nrow(morphcompdbase),
" rows of morph comp data"
)
}
)
}
# convert bin indices to true lengths
if (nrow(agedbase) > 0) {
Lbin_ranges <- as.data.frame(table(agedbase[["Lbin_range"]]))
names(Lbin_ranges)[1] <- "Lbin_hi-Lbin_lo"
if (length(unique(agedbase[["Lbin_range"]])) > 1) {
warning(
"different ranges of Lbin_lo to Lbin_hi found in age comps.\n",
paste(utils::capture.output(print(Lbin_ranges)), collapse = "\n"),
"\n consider increasing 'aalmaxbinrange' to designate\n",
"some of these data as conditional age-at-length."
)
}
agebins <- sort(unique(agedbase[["Bin"]][!is.na(agedbase[["Bin"]])]))
} else {
if (nrow(condbase) > 0) {
agebins <- sort(unique(condbase[["Bin"]][!is.na(condbase[["Bin"]])]))
} else {
agebins <- NA
}
}
nagebins <- length(agebins)
}
} else {
# if comp option is turned off
lbins <- NA
nlbins <- NA
#### need to get length bins from somewhere
## temp <- rawrep[grep("NUMBERS_AT_LENGTH",rawrep[,1])+1,]
## lbinspop <- as.numeric(temp[temp!=""][-(1:11)])
## nlbinspop <- length(lbinspop)
##
#### if natlen were already defined, it could be
## lbinspop <- as.numeric(names(natlen)[-c(1:11)])
lbinspop <- NA
nlbinspop <- ncol(sizeselex) - 5 # hopefully this works alright
agebins <- NA
nagebins <- NA
Lbin_method <- 2
sizebinlist <- NA
}
# info on growth morphs (see also section setting mainmorphs below)
morph_indexing <- match_report_table("MORPH_INDEXING", 1,
header = TRUE, type.convert = TRUE
)
# rename some headers to match output from most recent SS versions
morph_indexing <- df.rename(morph_indexing,
oldnames = c("Gpattern", "Bseas", "BirthSeason", "Gender"),
newnames = c("GP", "BirthSeas", "BirthSeas", "Sex")
)
if (!is.null(morph_indexing)) {
# calculate number of growth patterns
ngpatterns <- max(morph_indexing[["GP"]])
} else {
ngpatterns <- NULL
}
if (verbose) {
message("Finished dimensioning")
}
flush.console()
# stats list: items that are output to the GUI (if printstats==T) for a quick summary of results
stats <- list()
stats[["SS_version"]] <- SS_version
stats[["SS_versionshort"]] <- SS_versionshort
stats[["SS_versionNumeric"]] <- SS_versionNumeric
stats[["StartTime"]] <- paste(as.character(match_report_table("StartTime", 0, "StartTime", 0, cols = 1:6)), collapse = " ")
stats[["RunTime"]] <- paste(as.character(match_report_table("StartTime", 2, "StartTime", 2, cols = 4:9)), collapse = " ")
# data return object to fill in various things
returndat <- list()
# input files
tempfiles <- match_report_table("Data_File", 0, "Control_File", 0, cols = 1:2)
stats[["Files_used"]] <- paste(c(tempfiles[1, ], tempfiles[2, ]), collapse = " ")
returndat[["Data_File"]] <- tempfiles[1, 2]
returndat[["Control_File"]] <- tempfiles[2, 2]
# log determinant of the Hessian (previously was from ss.cor file)
log_det_hessian <- match_report_table("Hessian", 0,
"Hessian", 0,
cols = 2
)
if (log_det_hessian == "Not") { # first part of "Not requested."
covar <- FALSE
log_det_hessian <- NA
}
# as.numeric() doesn't give warning if value is NA
stats[["log_det_hessian"]] <- as.numeric(log_det_hessian)
# two additional outputs added in 3.30.20
# (also "total_LogL" which is redundant with value in LIKELIHOOD
# table read later)
Final_phase <- match_report_table("Final_phase", 0,
"Final_phase", 0,
cols = 2
)
if (!is.null(Final_phase)) {
stats[["Final_phase"]] <- as.numeric(Final_phase)
}
N_iterations <- match_report_table("N_iterations", 0,
"N_iterations", 0,
cols = 2
)
if (!is.null(N_iterations)) {
stats[["N_iterations"]] <- as.numeric(N_iterations)
}
# check warnings
stats[["Nwarnings"]] <- warnrows
if (length(warn) > 20) {
warn <- c(warn[1:20], paste(
"Note:", length(warn) - 20,
"additional lines truncated. Look in",
warnfile,
"file to see full list."
))
}
stats[["warnings"]] <- warnlines
# likelihoods
rawlike <- match_report_table("LIKELIHOOD", 2, "Fleet:", -2)
# check for new section added in SS version 3.30.13.04 (2019-05-31)
laplace_line <- which(rawlike[, 1] == "#_info_for_Laplace_calculations")
if (length(laplace_line) > 0) {
rawlike <- rawlike[-laplace_line, ]
}
# make numeric, clean up blank values
like <- data.frame(signif(as.numeric(rawlike[, 2]), digits = 7))
names(like) <- "values"
rownames(like) <- rawlike[, 1]
lambdas <- rawlike[, 3]
lambdas[lambdas == ""] <- NA
lambdas <- as.numeric(lambdas)
like[["lambdas"]] <- lambdas
# separate new section added in SS version 3.30.13.04 (2019-05-31)
if (length(laplace_line) > 0) {
stats[["likelihoods_used"]] <- like[1:(laplace_line - 1), ]
stats[["likelihoods_laplace"]] <- like[laplace_line:nrow(like), ]
} else {
stats[["likelihoods_used"]] <- like
stats[["likelihoods_laplace"]] <- NULL
}
# read fleet-specific likelihoods
likelihoods_by_fleet <- match_report_table("Fleet:", 0, header = TRUE)
# there was no space before "Parm_devs_detail" prior to 3.30.15.06
if (!is.null(likelihoods_by_fleet) &&
"Parm_devs_detail" %in% likelihoods_by_fleet[, 1]) {
likelihoods_by_fleet <- match_report_table("Fleet:", 0,
"Parm_devs_detail", -1,
header = TRUE
)
}
# clean up fleet-specific likelihoods
likelihoods_by_fleet[likelihoods_by_fleet == "_"] <- NA
likelihoods_by_fleet <- type.convert(likelihoods_by_fleet, as.is = TRUE)
# replace numeric column names with fleet names
names(likelihoods_by_fleet) <- c("Label", "ALL", FleetNames)
labs <- likelihoods_by_fleet[["Label"]]
# removing ":" at the end of likelihood components
for (irow in seq_along(labs)) {
labs[irow] <- substr(labs[irow], 1, nchar(labs[irow]) - 1)
}
likelihoods_by_fleet[["Label"]] <- labs
stats[["likelihoods_by_fleet"]] <- likelihoods_by_fleet
likelihoods_by_tag_group <- match_report_table("Tag_Group:", 0, header = TRUE)
# check for presence of tag data likelihood which has different column structure
if (!is.null(likelihoods_by_tag_group)) {
# clean up tag group likelihoods
likelihoods_by_tag_group[likelihoods_by_tag_group == "_"] <- NA
likelihoods_by_tag_group <- type.convert(likelihoods_by_tag_group,
as.is = TRUE
)
# rename columns from numbers to "TagGroup_1", etc.
names(likelihoods_by_tag_group) <- c(
"Label", "ALL",
paste0(
"TagGroup_",
names(likelihoods_by_tag_group)[-(1:2)]
)
)
# remove colon from "Tag_Group:"
likelihoods_by_tag_group[["Label"]][1] <- "Tag_Group"
stats[["likelihoods_by_tag_group"]] <- likelihoods_by_tag_group
}
# read detail on parameters devs (if present, 3.30 only)
Parm_devs_detail <- match_report_table("Parm_devs_detail", 1,
header = TRUE, type.convert = TRUE
)
stats[["Parm_devs_detail"]] <- Parm_devs_detail
# parameters
parameters <- match_report_table("PARAMETERS", 1, header = TRUE)
parameters <- df.rename(parameters,
oldnames = c("PR_type", "Prior_Like"),
newnames = c("Pr_type", "Pr_Like")
)
parameters[parameters == "_"] <- NA
parameters[parameters == " "] <- NA
parameters[parameters == "1.#INF"] <- Inf # set infinite values equal to R's infinity
# fix for issue with SSv3.21f
if (SS_versionNumeric == 3.21) {
temp <- names(parameters)
message(
"Inserting new 13th column heading in parameters section",
"due to error in Report.sso in SSv3.21f"
)
temp <- c(temp[1:12], "PR_type_code", temp[-(1:12)])
temp <- temp[-length(temp)]
names(parameters) <- temp
}
# fix issue with missing column in dev output
# associated with at least SS versions 3.30.01 and 3.30.13
if ("Gradient" %in% names(parameters) &&
any(parameters[["Gradient"]] %in% c("dev", "F"))) {
bad <- parameters[["Gradient"]] %in% c("dev", "F")
parameters[["Pr_type"]][bad] <- parameters[["Gradient"]][bad]
parameters[["Gradient"]][bad] <- NA
}
# make values numeric
parameters <- type.convert(parameters, as.is = TRUE)
# convert really old numeric codes to names
# note that codes used in control file for SS version 3.30 don't match
# these from earlier models
# it's possible that SS_output doesn't work for models prior to 3.21, in
# which case this section could be removed
if (SS_versionNumeric < 3.21) {
parameters[["Pr_type_numeric"]] <- parameters[["Pr_type"]]
parameters[["Pr_type"]][parameters[["Pr_type_numeric"]] == -1] <- "No_prior"
parameters[["Pr_type"]][parameters[["Pr_type_numeric"]] == 0] <- "Normal"
parameters[["Pr_type"]][parameters[["Pr_type_numeric"]] == 1] <- "Sym_Beta"
parameters[["Pr_type"]][parameters[["Pr_type_numeric"]] == 2] <- "Full_Beta"
parameters[["Pr_type"]][parameters[["Pr_type_numeric"]] == 3] <- "Log_Norm"
parameters[["Pr_type"]][parameters[["Pr_type_numeric"]] == 4] <- "Log_Norm_adjusted"
}
# fix for duplicate parameter labels in 3.30.03.03,
# not robust to more than 2 growth patterns but probably will be fixed soon
ParmLabels <- parameters[["Label"]]
ParmLabels[duplicated(ParmLabels)] <- paste0(ParmLabels[duplicated(ParmLabels)], "_2")
# end fix
rownames(parameters) <- ParmLabels
if (!is.na(parfile)) {
parline <- read.table(parfile, fill = TRUE, comment.char = "", nrows = 1)
} else {
parline <- matrix(NA, 1, 16)
}
stats[["N_estimated_parameters"]] <- parline[1, 6]
# subset to active parameters only
pars <- parameters[!is.na(parameters[["Active_Cnt"]]), ]
if (nrow(pars) > 0) {
pars[["Afterbound"]] <- ""
pars[["checkdiff"]] <- pars[["Value"]] - pars[["Min"]]
pars[["checkdiff2"]] <- pars[["Max"]] - pars[["Value"]]
pars[["checkdiff3"]] <- abs(pars[["Value"]] - (pars[["Max"]] - (pars[["Max"]] - pars[["Min"]]) / 2))
pars[["Afterbound"]][pars[["checkdiff"]] < 0.001 | pars[["checkdiff2"]] < 0.001 | pars[["checkdiff2"]] < 0.001] <- "CHECK"
pars[["Afterbound"]][!pars[["Afterbound"]] %in% "CHECK"] <- "OK"
}
stats[["table_of_phases"]] <- table(parameters[["Phase"]])
# subset columns for printed table of estimated parameters
estimated_non_dev_parameters <- pars[, names(pars) %in%
c(
"Value", "Phase", "Min", "Max", "Init", "Prior", "Gradient", "Pr_type",
"Pr_SD", "Pr_Like", "Parm_StDev", "Status", "Afterbound"
)]
# exclude parameters that represent recdevs or other deviations
devnames <- c(
"RecrDev", "InitAge", "ForeRecr",
"DEVadd", "DEVmult", "DEVrwalk", "DEV_MR_rwalk", "ARDEV"
)
# look for rows in table of parameters that have label indicating deviation
devrows <- NULL
for (iname in seq_along(devnames)) {
devrows <- unique(c(devrows, grep(
devnames[iname],
rownames(estimated_non_dev_parameters)
)))
}
# remove any dev rows from table
if (!is.null(devrows) & length(devrows) > 0) {
estimated_non_dev_parameters <- estimated_non_dev_parameters[-devrows, ]
}
# add table to stats that get printed in console
stats[["estimated_non_dev_parameters"]] <- estimated_non_dev_parameters
# Semi-parametric (2D-AR1) selectivity parameters
seldev_pars <- parameters[
grep("ARDEV", parameters[["Label"]], fixed = TRUE),
names(parameters) %in% c("Label", "Value")
]
if (nrow(seldev_pars) == 0) {
# if semi-parametric selectivity IS NOT used
seldev_pars <- NULL
seldev_matrix <- NULL
} else {
# if semi-parametric selectivity IS used
if (any(duplicated(FleetNames))) {
warning(
"Duplicated fleet names will cause only the semi-parametric",
" selectivity to be available for the first of the duplicates."
)
}
# parse parameter labels to get info
# the parameter labels look like like
# Fishery_ARDEV_y1991_A3 (for age-based selectivity)
# or
# Fishery_ARDEV_y1991_Lbin3 (for length-based selectivity)
#
# the code below parses those strings to figure out age vs. length,
# separate the numeric year value and bin number
seldev_label_info <- strsplit(seldev_pars[["Label"]], split = "_")
seldev_label_info <- data.frame(do.call(rbind, lapply(seldev_label_info, rbind)))
# add columns to pars data.frame with info from labels
seldev_pars[["Fleet"]] <- seldev_label_info[["X1"]]
yr_col <- grep("^y\\d\\d\\d\\d$", seldev_label_info[1, ])
type_bin_col <- grep("^[aAlL][[:alpha:]]{0,3}\\d$", seldev_label_info[1, ])
seldev_pars[["Year"]] <- as.numeric(substring(seldev_label_info[[yr_col]], 2))
# note: bin was indicated by "a" for length- and age-based selectivity
# until early 2020 when separate "A" or "Lbin" codes were used
seldev_pars[["Type"]] <- ifelse(
substring(seldev_label_info[[type_bin_col]], 1, 1) %in%
c("A", "a"),
yes = "age",
no = "length"
)
# how many non-numeric digits to skip over in parsing bin value
first_bin_digit <- ifelse(seldev_pars[["Type"]] == "age", 2, 5)
# parse bin (age or length bin)
seldev_pars[["Bin"]] <- as.numeric(substring(seldev_label_info[[type_bin_col]], first_bin_digit))
# remove label column which is redundant with rownames
seldev_pars <- seldev_pars[, -1]
# make matrix
seldev_matrix <- list()
for (fleet in sort(unique(seldev_pars[["Fleet"]]))) {
# subset for specific fleet
seldev_pars_f <- seldev_pars[seldev_pars[["Fleet"]] == fleet, ]
for (type in unique(seldev_pars_f[["Type"]])) {
# subset for type (unlikely to have more than 1 per fleet, but safer this way)
seldev_pars_sub <- seldev_pars_f[seldev_pars_f[["Type"]] == type, ]
seldev_label <- paste0(fleet, "_", type, "_seldevs")
seldev_yrs <- sort(unique(seldev_pars_sub[["Year"]]))
seldev_bins <- sort(unique(seldev_pars_sub[["Bin"]]))
# create empty matrix with labels on each dimension
if (type == "length") {
seldev_matrix[[seldev_label]] <-
matrix(
nrow = length(seldev_yrs), ncol = length(seldev_bins),
dimnames = list(Year = seldev_yrs, Lbin = seldev_bins)
)
}
if (type == "age") {
seldev_matrix[[seldev_label]] <-
matrix(
nrow = length(seldev_yrs), ncol = length(seldev_bins),
dimnames = list(Year = seldev_yrs, Age = seldev_bins)
)
}
# loop over years and bins to fill in matrix
for (y in seldev_yrs) {
for (bin in seldev_bins) {
seldev_matrix[[seldev_label]][paste(y), paste(bin)] <-
seldev_pars_sub[["Value"]][seldev_pars_sub[["Year"]] == y & seldev_pars_sub[["Bin"]] == bin][1]
}
} # end loop over years
} # end loop over types
} # end loop over fleets
} # end check for semi-parametric selectivity
# Dirichlet-Multinomial parameters
# more processing of these parameters is done later in SS_output()
# after info on the comps has been read
DM_pars <- parameters[
grep("ln\\((EffN_mult)|(DM_theta)\\)", parameters[["Label"]]),
names(parameters) %in% c("Value", "Phase", "Min", "Max")
]
# calculate additional values based on estimate parameter
# non-log Theta
DM_pars[["Theta"]] <- exp(DM_pars[["Value"]])
# Theta ratio related to weighting
DM_pars$"Theta/(1+Theta)" <- DM_pars[["Theta"]] / (1 + DM_pars[["Theta"]])
# check the covar.sso file
# this section moved down within SS_output for 3.30.20 to avoid
# reading covar if -nohess used
if (covar) {
covarfile <- file.path(dir, covarfile)
if (!file.exists(covarfile)) {
message("covar file not found, input 'covar' changed to FALSE")
covar <- FALSE
} else {
# time check for CoVar file
covarhead <- readLines(con = covarfile, n = 10)
covarskip <- grep("active-i", covarhead) - 1
covartime <- findtime(covarhead)
# the conversion to R time class below may no longer be necessary as strings should match
if (is.null(covartime) || is.null(repfiletime)) {
message(
"problem comparing the file creation times:\n",
" Report.sso:", repfiletime, "\n",
" covar.sso:", covartime
)
} else {
if (covartime != repfiletime) {
message("covar time:", covartime)
stop(
shortrepfile, " and ", covarfile,
" were from different model runs. Change input to covar=FALSE"
)
}
}
# covar file exists, but has problems
nowrite <- grep("do not write", covarhead)
if (length(nowrite) > 0) {
warning(
"covar file contains the warning\n",
" '", covarhead[nowrite], "'\n",
" input 'covar' changed to FALSE.\n"
)
covar <- FALSE
}
}
}
# read covar.sso file
if (covar) {
CoVar <- read.table(covarfile, header = TRUE, colClasses = c(rep("numeric", 4), rep("character", 4), "numeric"), skip = covarskip)
if (verbose) {
message("Got covar file.")
}
stdtable <- CoVar[CoVar[["Par..j"]] == "Std", c(7, 9, 5)]
names(stdtable) <- c("name", "std", "type")
N_estimated_parameters2 <- sum(stdtable[["type"]] == "Par")
# this section was muddling Derived Quants with Parameters in early version of SSv3.20
# got work-around pending fix from Rick to use of "Par" vs. "Der" in covar file.
if (is.na(stats[["N_estimated_parameters"]])) {
stats[["N_estimated_parameters"]] <- N_estimated_parameters2
} else {
if (stats[["N_estimated_parameters"]] != N_estimated_parameters2) {
warning(
stats[["N_estimated_parameters"]],
" estimated parameters indicated by the par file\n ",
N_estimated_parameters2,
" estimated parameters shown in the covar file\n ",
"Returning the par file value: ", stats[["N_estimated_parameters"]]
)
}
}
# check for NA values (see https://github.com/r4ss/r4ss/issues/830)
if (any(is.na(stdtable[["std"]]))) {
warning(
"NA value for parameter uncertainty found in ",
sum(is.na(stdtable[["std"]])),
" rows of covar.sso file. ",
"First par with NA: ",
stdtable[["name"]][is.na(stdtable[["std"]])]
)
}
Nstd <- sum(stdtable[["std"]] > 0, na.rm = TRUE)
checkbadrun <- unique(stdtable[["std"]])
if (length(checkbadrun) == 1) {
if (checkbadrun %in% c(NA, "NaN", "na")) {
stop(paste0(
"No quantities were estimated in the covar file \nand all",
"estimates of standard deviation are ", checkbadrun, ". \nTry re-running",
"stock synthesis."
))
}
}
if (Nstd <= 1) {
stop("Too few estimated quantities in covar file (n=", Nstd, "). Change input to covar=FALSE.")
}
} else {
if (verbose) {
message("You skipped the covar file")
}
}
flush.console()
# read weight-at-age file
wtatage <- NULL
if (readwt) {
wtfile <- file.path(dir, wtfile)
wtatage <- SS_readwtatage(file = wtfile, verbose = verbose)
}
# read MCMC output
if (is.null(dir.mcmc)) {
# if no directory provided, set results to NULL
mcmc <- NULL
} else {
# directory provided, check to make sure it exsists
dir.mcmc.full <- NULL
if (dir.exists(dir.mcmc)) {
dir.mcmc.full <- dir.mcmc
}
if (dir.exists(file.path(dir, dir.mcmc))) {
dir.mcmc.full <- file.path(dir, dir.mcmc)
}
# warn if directory doesn't exist
if (is.null(dir.mcmc.full)) {
warning(
"'dir.mcmc' directory not found either as an absolute path ",
"or relative to the 'dir' input"
)
mcmc <- NULL
} else {
# check for presence of posteriors file
if ("posteriors.sso" %in% dir(dir.mcmc.full)) {
# run function to read posteriors.sso and derived_posteriors.sso
if (verbose) {
message("Running 'SSgetMCMC' to get MCMC output")
}
mcmc <- SSgetMCMC(dir = dir.mcmc.full)
} else {
warning(
"skipping reading MCMC output because posterior.sso file",
" not found in \n",
dir.mcmc.full
)
mcmc <- NULL
}
}
}
# derived quantities
der <- match_report_table("DERIVED_QUANTITIES", 4, header = TRUE)
# make older SS output names match current SS output conventions
der <- df.rename(der, oldnames = "LABEL", newnames = "Label")
# remove extra row (don't remember why it occurs)
der <- der[der[["Label"]] != "Bzero_again", ]
der[der == "_"] <- NA
der[der == ""] <- NA
# remove bad rows that were present in 3.30-beta in September 2016
# (note that spelling differs from "Parm_devs_detail" after likelihood)
test <- grep("Parm_dev_details", der[["Label"]])
if (length(test) > 0) {
der <- der[1:(min(test) - 1), ]
}
# convert columns to numeric
der <- type.convert(der, as.is = TRUE)
# replace SPB with SSB as changed in SS version 3.30.10.00 (29 Nov. 2017)
der[["Label"]] <- gsub("SPB_", "SSB_", der[["Label"]], fixed = TRUE)
# set rownames equal to Label column
# (skipping any duplicates, such as ln(SPB)_YYYY for models with limited year range)
rownames(der)[!duplicated(der[["Label"]])] <- der[["Label"]][!duplicated(der[["Label"]])]
# get management ratio labels from top of DERIVED_QUANTITIES
managementratiolabels <- match_report_table("DERIVED_QUANTITIES", 1, "DERIVED_QUANTITIES", 3, cols = 1:2)
names(managementratiolabels) <- c("Ratio", "Label")
# new message about how forecast selectivity is modeled added in 3.30.06
# (has impact on read of time-varying parameters below)
forecast_selectivity <- grep("forecast_selectivity", rawrep[, 1], value = TRUE)
if (length(forecast_selectivity) == 0) {
forecast_selectivity <- NA
offset <- -1
} else {
offset <- -2
}
# time-varying parameters
MGparmAdj <- match_report_table("MGparm_By_Year_after_adjustments", 1,
header = TRUE, type.convert = TRUE
)
# make older SS output names match current SS output conventions
MGparmAdj <- df.rename(MGparmAdj, oldnames = "Year", newnames = "Yr")
# time-varying size-selectivity parameters
SelSizeAdj <- match_report_table("selparm(Size)_By_Year_after_adjustments", 2)
if (is.null(SelSizeAdj) || nrow(SelSizeAdj) <= 2) {
SelSizeAdj <- NULL
} else {
SelSizeAdj <- SelSizeAdj[, apply(SelSizeAdj, 2, emptytest) < 1]
SelSizeAdj[SelSizeAdj == ""] <- NA
# make values numeric
SelSizeAdj <- type.convert(SelSizeAdj, as.is = TRUE)
# provide column names (first test for extra column added in 3.30.06.02)
if (rawrep[match_report_line("selparm(Size)_By_Year_after_adjustments") + 1, 3]
== "Change?") {
names(SelSizeAdj) <- c(
"Fleet", "Yr", "Change?",
paste0("Par", 1:(ncol(SelSizeAdj) - 3))
)
} else {
names(SelSizeAdj) <- c(
"Fleet", "Yr",
paste0("Par", 1:(ncol(SelSizeAdj) - 2))
)
}
}
# time-varying age-selectivity parameters
SelAgeAdj <- match_report_table("selparm(Age)_By_Year_after_adjustments", 2)
if (!is.null(SelAgeAdj) && nrow(SelAgeAdj) > 2) {
SelAgeAdj <- SelAgeAdj[, apply(SelAgeAdj, 2, emptytest) < 1]
SelAgeAdj[SelAgeAdj == ""] <- NA
# test for empty table
if (SelAgeAdj[1, 1] == "RECRUITMENT_DIST") {
SelAgeAdj <- NA
} else {
# make values numeric
SelAgeAdj <- type.convert(SelAgeAdj, as.is = TRUE)
names(SelAgeAdj) <- c("Flt", "Yr", paste0("Par", 1:(ncol(SelAgeAdj) - 2)))
# provide rownames (after testing for extra column added in 3.30.06.02)
if (rawrep[match_report_line("selparm(Age)_By_Year_after_adjustments") + 1, 3]
== "Change?") {
names(SelAgeAdj) <- c(
"Fleet", "Yr", "Change?",
paste0("Par", 1:(ncol(SelAgeAdj) - 3))
)
} else {
names(SelAgeAdj) <- c(
"Fleet", "Yr",
paste0("Par", 1:(ncol(SelAgeAdj) - 2))
)
}
}
} else {
SelAgeAdj <- NULL
}
# recruitment distribution
recruitment_dist <- match_report_table("RECRUITMENT_DIST", 1,
header = TRUE, type.convert = TRUE
)
if (!is.null(recruitment_dist)) {
# calculate first season with recruitment
if ("Frac/sex" %in% names(recruitment_dist)) {
first_seas_with_recruits <-
min(recruitment_dist[["Seas"]][recruitment_dist$"Frac/sex" > 0])
} else {
first_seas_with_recruits <-
min(recruitment_dist[["Seas"]][recruitment_dist[["Value"]] > 0])
}
# starting in SSv3.24Q there are additional tables
# (in v3.30 RECRUITMENT_DIST_BENCHMARK was renamed RECRUITMENT_DIST_Bmark
# and RECRUITMENT_DIST_FORECAST was renamed RECRUITMENT_DIST_endyr)
recruit_dist_Bmark <- match_report_table("RECRUITMENT_DIST_B", 1,
header = TRUE, type.convert = TRUE
)
if (!is.null(recruit_dist_Bmark)) {
if (SS_versionNumeric < 3.30) {
recruit_dist_endyr <- match_report_table("RECRUITMENT_DIST_FORECAST", 1,
header = TRUE, type.convert = TRUE
)
} else {
recruit_dist_endyr <- match_report_table("RECRUITMENT_DIST_endyr", 1,
header = TRUE, type.convert = TRUE
)
# fix needed for 3.30.19 and 3.30.19.01 (fixed in future versions of SS3)
if (length(grep("RECRUITMENT_DIST_TIMESERIES", recruit_dist_endyr[["Settle#"]])) == 1) {
tmp_brk_line <- grep("RECRUITMENT_DIST_TIMESERIES", recruit_dist_endyr[["Settle#"]]) - 1
recruit_dist_endyr <- recruit_dist_endyr[seq_len(tmp_brk_line), ]
}
}
# bundle original and extra tables into a list
recruitment_dist <- list(
recruit_dist = recruitment_dist,
recruit_dist_Bmark = recruit_dist_Bmark,
recruit_dist_endyr = recruit_dist_endyr
)
}
}
# max gradient
stats[["maximum_gradient_component"]] <-
as.numeric(match_report_table("Convergence_Level", 0,
"Convergence_Level", 0,
cols = 2
))
# parameters with highest gradients (3.30 only)
if ("Gradient" %in% names(parameters)) {
if (any(!is.na(parameters[["Gradient"]]))) {
# number of gradients to report is 5 (an arbitrary choice),
# or fewer if fewer than 5 parameters estimated.
ngrads <- min(5, max(parameters[["Active_Cnt"]], na.rm = TRUE))
# add highest gradients to table of stats that get printed to the console
stats[["parameters_with_highest_gradients"]] <-
head(parameters[
order(abs(parameters[["Gradient"]]), decreasing = TRUE),
c("Value", "Gradient")
], n = 5)
}
}
# sigma_R
# accounting for additional Bmsy/Bzero line introduced in 3.24U
# should be now robust up through 3.24AZ (if that ever gets created)
if (SS_versionNumeric >= 3.30 |
substring(SS_version, 1, 9) %in% paste0("SS-V3.24", LETTERS[21:26]) |
substring(SS_version, 1, 10) %in% paste0("SS-V3.24A", LETTERS)) {
last_row_index <- 11
} else {
last_row_index <- 10
}
srhead <- match_report_table("SPAWN_RECRUIT", 0,
"SPAWN_RECRUIT", last_row_index,
cols = 1:6
)
# account for extra blank line in early 3.30 versions (at least 3.30.01)
if (all(srhead[7, ] == "")) {
last_row_index <- 12
srhead <- match_report_table("SPAWN_RECRUIT", 0,
"SPAWN_RECRUIT", last_row_index,
cols = 1:6
)
}
if (is.null(srhead)) {
# if there's no SPAWN_RECRUIT section (presumably because minimal
# output was chosen in the starter file)
rmse_table <- NULL
breakpoints_for_bias_adjustment_ramp <- NULL
sigma_R_in <- parameters["SR_sigmaR", "Value"]
} else {
# if SPAWN_RECRUIT is present
# get table of info on root mean squared error of recdevs (rmse)
rmse_table <- as.data.frame(srhead[-(1:(last_row_index - 1)), 1:5])
rmse_table <- rmse_table[!grepl("SpawnBio", rmse_table[, 2]), ]
rmse_table <- type.convert(rmse_table, as.is = TRUE)
names(rmse_table) <- srhead[last_row_index - 1, 1:5]
names(rmse_table)[4] <- "RMSE_over_sigmaR"
row.names(rmse_table) <- NULL
# info on sigmaR as input or estimated
sigma_R_in <- as.numeric(srhead[grep("sigmaR", srhead[, 2]), 1])
# info on recdev method
if (any(srhead[1, ] == "RecDev_method:")) {
RecDev_method <- srhead[1, which(srhead[1, ] == "RecDev_method:") + 1] %>% as.numeric()
} else {
RecDev_method <- NULL
}
# Bias adjustment ramp
biascol <- grep("breakpoints_for_bias", srhead)
breakpoints_for_bias_adjustment_ramp <- srhead[
grep("breakpoints_for_bias", srhead[, biascol]), 1:5
]
colnames(breakpoints_for_bias_adjustment_ramp) <- c(
"last_yr_early",
"first_yr_full", "last_yr_full", "first_yr_recent", "max_bias_adj"
)
rownames(breakpoints_for_bias_adjustment_ramp) <- NULL
}
## Spawner-recruit curve
# read SPAWN_RECRUIT table
raw_recruit <- match_report_table("SPAWN_RECRUIT", last_row_index + 1)
if (!is.null(raw_recruit) && raw_recruit[1, 1] == "S/Rcurve") {
raw_recruit <- match_report_table("SPAWN_RECRUIT", last_row_index)
}
# account for extra blank line in 3.30.01 (and maybe similar versions)
if (!is.null(raw_recruit) &&
nrow(raw_recruit) < length(startyr:endyr)) {
raw_recruit <- match_report_table("SPAWN_RECRUIT", last_row_index + 1,
which_blank = 2
)
if (raw_recruit[1, 1] == "S/Rcurve") {
raw_recruit <- match_report_table("SPAWN_RECRUIT", last_row_index,
which_blank = 2
)
}
}
if (is.null(raw_recruit)) {
recruit <- NULL
} else {
# process SPAWN_RECRUIT table
names(raw_recruit) <- raw_recruit[1, ]
raw_recruit[raw_recruit == "_"] <- NA
raw_recruit <- raw_recruit[-(1:2), ] # remove header rows
recruit <- raw_recruit[-(1:2), ] # remove rows for Virg and Init
# temporary change for model that has bad values in dev column
recruit[["dev"]][recruit[["dev"]] == "-nan(ind)"] <- NA
# make values numeric
recruit <- type.convert(recruit, as.is = TRUE)
# make older SS output names match current SS output conventions
recruit <- df.rename(recruit,
oldnames = c("year", "spawn_bio", "adjusted", "biasadj"),
newnames = c("Yr", "SpawnBio", "bias_adjusted", "biasadjuster")
)
}
# starting in 3.30.11.00, a table with the full spawn recr curve was added
SPAWN_RECR_CURVE <- NULL
if (!is.na(match_report_line("Full_Spawn_Recr_Curve"))) {
SPAWN_RECR_CURVE <- match_report_table("Full_Spawn_Recr_Curve", 1,
header = TRUE, type.convert = TRUE
)
}
# section was renamed in 3.30.15.06
if (!is.na(match_report_line("SPAWN_RECR_CURVE"))) {
SPAWN_RECR_CURVE <- match_report_table("SPAWN_RECR_CURVE", 1,
header = TRUE, type.convert = TRUE
)
}
## FIT_LEN_COMPS
if (SS_versionNumeric >= 3.30) {
# This section existed but wasn't read prior to 3.30
fit_len_comps <- match_report_table("FIT_LEN_COMPS", 1, header = TRUE)
} else {
fit_len_comps <- NULL
}
if (!is.null(dim(fit_len_comps)) && nrow(fit_len_comps) > 0) {
# replace underscores with NA
fit_len_comps[fit_len_comps == "_"] <- NA
# make columns numeric (except "Used", which may contain "skip")
fit_len_comps <- type.convert(fit_len_comps, as.is = TRUE)
} else {
fit_len_comps <- NULL
}
# Length_Comp_Fit_Summary
if (SS_versionNumeric < 3.30) {
# old way didn't have key word and had parentheses and other issues with column names
lenntune <- match_report_table("FIT_AGE_COMPS", -(nfleets + 2),
"FIT_AGE_COMPS", -1,
cols = 1:10, header = TRUE
)
names(lenntune)[10] <- "FleetName"
# convert underscores
lenntune[lenntune == "_"] <- NA
# reorder columns (leaving out sample sizes perhaps to save space)
lenntune <- lenntune[lenntune[["N"]] > 0, c(10, 1, 4:9)]
# avoid NA warnings by removing #IND values
lenntune$"MeaneffN/MeaninputN"[lenntune$"MeaneffN/MeaninputN" == "-1.#IND"] <- NA
lenntune <- type.convert(lenntune, as.is = TRUE)
lenntune$"HarMean/MeanInputN" <- lenntune$"HarMean(effN)" / lenntune$"mean(inputN*Adj)"
} else {
# new in 3.30 has keyword at top
lenntune <- match_report_table("Length_Comp_Fit_Summary", 1, header = TRUE)
if (!is.null(lenntune)) {
lenntune <- df.rename(lenntune,
oldnames = c("FleetName", "Factor", "HarMean_effN"),
newnames = c("Fleet_name", "Data_type", "HarMean")
)
if ("Data_type" %in% names(lenntune)) {
# format starting with 3.30.12 doesn't need adjustment, just convert to numeric
# ("Factor", introduced in 3.30.12, was renamed "Data_type" in 3.30.20)
lenntune <- type.convert(lenntune, as.is = TRUE)
} else {
# process 3.30 versions prior to 3.30.12
# reorder columns (leaving out sample sizes perhaps to save space)
lenntune <- lenntune[lenntune[["Nsamp_adj"]] > 0, ]
lenntune <- type.convert(lenntune, as.is = TRUE)
## new column "Recommend_Var_Adj" in 3.30 now matches calculation below
# lenntune$"HarMean/MeanInputN" <- lenntune$"HarMean"/lenntune$"mean_inputN*Adj"
lenntune$"HarMean(effN)/mean(inputN*Adj)" <-
lenntune$"HarMean" / lenntune$"mean_inputN*Adj"
# change name to make it clear what the harmonic mean is based on
lenntune <- df.rename(lenntune,
oldnames = c("HarMean", "mean_inputN*Adj"),
newnames = c("HarMean(effN)", "mean(inputN*Adj)")
)
# drop distracting column
lenntune <- lenntune[, names(lenntune) != "mean_effN"]
# put recommendation and fleetnames at the end
# (probably a more efficient way to do this)
end.names <- c("Recommend_Var_Adj", "Fleet_name")
lenntune <- lenntune[, c(
which(!names(lenntune) %in% end.names),
which(names(lenntune) %in% end.names)
)]
} # end pre-3.30.12 version of processing Length_Comp_Fit_Summary
} # end 3.30 version of processing Length_Comp_Fit_Summary
}
stats[["Length_Comp_Fit_Summary"]] <- lenntune
## FIT_AGE_COMPS
fit_age_comps <- match_report_table("FIT_AGE_COMPS", 1, header = TRUE)
# process FIT_AGE_COMPS
if (!is.null(dim(fit_age_comps)) && nrow(fit_age_comps) > 0) {
# replace underscores with NA
fit_age_comps[fit_age_comps == "_"] <- NA
# make columns numeric (except "Used", which may contain "skip")
fit_age_comps <- type.convert(fit_age_comps, as.is = TRUE)
} else {
fit_age_comps <- NULL
}
# Age_Comp_Fit_Summary
if (SS_versionNumeric < 3.30) {
# 3.24 and before had no keyword for tuning info below FIT_AGE_COMPS
# so working backwards from the following section to get it
agentune <- match_report_table("FIT_SIZE_COMPS", -(nfleets + 2),
"FIT_SIZE_COMPS", -2,
cols = 1:10, header = TRUE
)
} else {
# 3.30 version has keyword (if included in output)
# and requires little processing
start <- match_report_line("Age_Comp_Fit_Summary")
if (is.na(start)) {
agentune <- NULL
} else {
if (rawrep[start + 1, 1] == "") {
adjust1 <- 2
which_blank <- 2
} else {
adjust1 <- 1
which_blank <- 1
}
agentune <- match_report_table("Age_Comp_Fit_Summary",
adjust1 = adjust1,
header = TRUE, which_blank = which_blank
)
}
} # end 3.30 version
agentune <- df.rename(agentune,
oldnames = c("FleetName", "N", "Factor", "HarMean_effN"),
newnames = c("Fleet_name", "Nsamp_adj", "Data_type", "HarMean")
)
if ("Data_type" %in% names(agentune)) {
# format starting with 3.30.12 doesn't need adjustment, just
# convert to numeric
# ("Factor", introduced in 3.30.12, was renamed "Data_type" in 3.30.20)
agentune <- type.convert(agentune, as.is = TRUE)
} else {
if (!is.null(dim(agentune))) {
names(agentune)[ncol(agentune)] <- "Fleet_name"
# convert underscores
agentune[agentune == "_"] <- NA
# remove empty rows with NA or zero sample size
agentune <- agentune[!is.na(agentune[["Nsamp_adj"]]) &
agentune[["Nsamp_adj"]] > 0, ]
# avoid NA warnings by removing #IND values
agentune$"MeaneffN/MeaninputN"[agentune$"MeaneffN/MeaninputN" == "-1.#IND"] <- NA
agentune <- type.convert(agentune, as.is = TRUE)
# calculate ratio to be more transparent
agentune$"HarMean(effN)/mean(inputN*Adj)" <-
agentune$"HarMean(effN)" / agentune$"mean(inputN*Adj)"
# calculate recommended value (for length data this is done internally in SS)
agentune[["Recommend_Var_Adj"]] <-
agentune[["Var_Adj"]] * agentune$"HarMean(effN)/mean(inputN*Adj)"
# remove distracting columns (no longer present in recent versions of SS)
badnames <- c("mean_effN", "Mean(effN/inputN)", "MeaneffN/MeaninputN")
agentune <- agentune[, !names(agentune) %in% badnames]
# put fleetnames column at the end (probably a more efficient way to do this)
agentune <- agentune[, c(
which(names(agentune) != "Fleet_name"),
which(names(agentune) == "Fleet_name")
)]
# change name to make it clear what's reported and be constent with lengths
agentune <- df.rename(agentune,
oldnames = c("Var_Adj"),
newnames = c("Curr_Var_Adj")
)
} else {
agentune <- NULL
}
}
stats[["Age_Comp_Fit_Summary"]] <- agentune
## FIT_SIZE_COMPS
fit_size_comps <- NULL
if (SS_versionNumeric >= 3.30) {
# test for SS version 3.30.12 and beyond
if (!is.na(match_report_line("FIT_SIZE_COMPS"))) {
# note that there are hashes in between sub-sections,
# so using rep_blank_lines instead of default
# rep_blank_or_hash_lines to find ending
fit_size_comps <- match_report_table("FIT_SIZE_COMPS", 1,
header = FALSE,
blank_lines = rep_blank_lines
)
if (!is.null(dim(fit_size_comps)) &&
nrow(fit_size_comps) > 0 &&
fit_size_comps[1, 1] != "#_none") {
# column names
names(fit_size_comps) <- fit_size_comps[2, ]
# add new columns for method-specific info
fit_size_comps[["Method"]] <- NA
fit_size_comps[["Units"]] <- NA
fit_size_comps[["Scale"]] <- NA
fit_size_comps[["Add_to_comp"]] <- NA
# find the lines with the method-specific info
method_lines <- grep("#Method:", fit_size_comps[, 1])
# method info is table to store info from only those lines
method_info <- fit_size_comps[method_lines, ]
# find the lines with the fit summary
if (any(grepl("Size_Comp_Fit_Summary", fit_size_comps[, 1]))) {
# new header line added in version 3.30.20
tune_lines <- grep("Size_Comp_Fit_Summary", fit_size_comps[, 1]) + 1
} else {
tune_lines <- grep("Factor", fit_size_comps[, 1])
}
# place to store fit summary which is split across methods
sizentune <- NULL
# loop over methods to fill in new columns
for (imethod in seq_along(method_lines)) {
start <- method_lines[imethod]
if (imethod != length(method_lines)) {
end <- method_lines[imethod + 1] - 1
} else {
end <- nrow(fit_size_comps)
}
fit_size_comps[["Method"]][start:end] <- method_info[imethod, 2]
fit_size_comps[["Units"]][start:end] <- method_info[imethod, 4]
fit_size_comps[["Scale"]][start:end] <- method_info[imethod, 6]
fit_size_comps[["Add_to_comp"]][start:end] <- method_info[imethod, 8]
# split out rows with info on tuning
sizentune <- rbind(sizentune, fit_size_comps[tune_lines[imethod]:end, ])
}
# format sizentune (info on tuning) has been split into
# a separate data.frame, needs formatting: remove extra columns, change names
goodcols <- c(
# grab columns up through Fleet_name + added Method column
1:grep("name", tolower(sizentune[1, ])),
grep("Method", names(sizentune))
)
# fill in header for Method in first row
sizentune[1, max(goodcols)] <- "Method"
sizentune <- sizentune[, goodcols]
# use first row for names
names(sizentune) <- sizentune[1, ]
# rename Factor to Data_type (changed in 3.30.20)
sizentune <- df.rename(sizentune,
oldnames = c("Factor", "HarMean_effN"),
newnames = c("Data_type", "HarMean")
)
# subset for rows with single-character value for
# Data_type (should always be 7 but seems to have been
# 6 in some earlier models)
# this should filter out extra header rows
sizentune <- sizentune[nchar(sizentune[["Data_type"]]) == 1, ]
# convert to numeric values as needed
sizentune <- type.convert(sizentune, as.is = TRUE)
stats[["Size_Comp_Fit_Summary"]] <- sizentune
# remove extra summary rows of fit_size_comps
fit_size_comps <- fit_size_comps[fit_size_comps[["Fleet_Name"]] %in% FleetNames, ]
} # end check for non-empty fit_size_comps
} else {
# formatting used for earlier 3.30 versions (prior to 3.30.12)
fit_size_comps <- match_report_table("FIT_SIZE_COMPS", 1,
"Size_Comp_Fit_Summary", -(nfleets + 2),
header = TRUE
)
}
}
# extra formatting for all versions
if (!is.null(dim(fit_size_comps)) && nrow(fit_size_comps) > 0) {
# replace underscores with NA
fit_size_comps[fit_size_comps == "_"] <- NA
# make columns numeric (except "Used", which may contain "skip")
fit_size_comps <- type.convert(fit_size_comps, as.is = TRUE)
}
# Size comp effective N tuning check
# (only available in version 3.30.01.12 and above)
if (SS_versionNumeric >= 3.30) {
if (!exists("sizentune")) {
# if this table hasn't already been parsed from fit_size_comps above
sizentune <- match_report_table("Size_Comp_Fit_Summary", 1, "OVERALL_COMPS", -1,
cols = 1:10, header = TRUE
)
if (!is.null(dim(sizentune))) {
sizentune[, 1] <- sizentune[, 10]
sizentune <- sizentune[sizentune[["Npos"]] > 0, c(1, 3, 4, 5, 6, 8, 9)]
} else {
sizentune <- NULL
}
}
stats[["Size_comp_Eff_N_tuning_check"]] <- sizentune
}
# placeholders for tables read from data file
# in versions prior to 3.30.21
age_data_info <- NULL
len_data_info <- NULL
# if D-M parameters present
if (nrow(DM_pars) > 0) {
if (!is.null(Length_comp_error_controls) |
!is.null(Age_comp_error_controls)) {
# approach used from 3.30.21+ when all info was available in Report.sso
# (info was added earlier but r4ss didn't switch right away)
# loop over fleets within each comp database
# to copy DM sample size over from one table to another
# surely there are far better ways of doing this with merge
# or dplyr function
if (comp) { # only possible if CompReport.sso was read
# map select columns from fit_len_comps to lendbase
# (can expand to other columns like MV_T_parm in the future)
if (nrow(lendbase) > 0) {
lendbase <- fit_len_comps %>%
dplyr::rename(Like_sum = Like) %>% # like for vector not bin
dplyr::select(Fleet, Time, Sexes, Part, Nsamp_DM) %>%
dplyr::left_join(lendbase, .)
}
# repeat for other parts of CompReport.sso
if (nrow(agedbase) > 0) {
agedbase <- fit_age_comps %>%
dplyr::rename(Like_sum = Like) %>% # like for vector not bin
dplyr::select(Fleet, Time, Sexes, Part, Nsamp_DM) %>%
dplyr::left_join(agedbase, .)
}
if (nrow(condbase) > 0) {
condbase <- fit_age_comps %>%
dplyr::rename(Like_sum = Like) %>% # like for vector not bin
dplyr::select(Fleet, Time, Sexes, Part, Nsamp_DM) %>%
dplyr::left_join(condbase, .)
}
# IGT 28 Jan 2023: need to add support for DM for generalized size comps
} # end test for whether CompReport.sso info is available
# end approach used starting in 3.30.21
} else {
# approach prior to 3.30.21 when info was needed from
# the data file
# figure out which fleet uses which parameter,
# currently (as of SS version 3.30.10.00), requires reading data file
if (verbose) {
message("Reading data.ss_new (or data_echo.ss_new) for info on Dirichlet-Multinomial parameters")
}
datname <- get_dat_new_name(dir)
datfile <- SS_readdat(
file = file.path(dir, datname),
verbose = verbose,
)
# when new data file is empty, find input data file
if (is.null(datfile)) {
starter <- SS_readstarter(
file = file.path(dir, "starter.ss"),
verbose = verbose
)
datfile <- SS_readdat(
file = file.path(dir, starter[["datfile"]]),
verbose = verbose, version = "3.30"
)
}
age_data_info <- datfile[["age_info"]]
len_data_info <- datfile[["len_info"]]
if (!is.null(age_data_info) & !is.null(len_data_info)) {
age_data_info[["CompError"]] <- as.numeric(age_data_info[["CompError"]])
age_data_info[["ParmSelect"]] <- as.numeric(age_data_info[["ParmSelect"]])
len_data_info[["CompError"]] <- as.numeric(len_data_info[["CompError"]])
len_data_info[["ParmSelect"]] <- as.numeric(len_data_info[["ParmSelect"]])
if (!any(age_data_info[["CompError"]] > 0) & !any(len_data_info[["CompError"]] > 0)) {
stop(
"Problem with Dirichlet-Multinomial parameters: \n",
" Report file indicates parameters exist, but no CompError values\n",
" in data.ss_new are > 0."
)
}
} # end check for no Length_ or Age_comp_error_controls tables
# get Dirichlet-Multinomial parameter values and adjust input N
# the old way before that info was available in fit_len_comps
# and fit_age_comps
get_DM_sample_size <- function(CompError,
f,
sub,
data_info,
dbase) {
ipar <- data_info[["ParmSelect"]][f]
if (ipar %in% 1:nrow(DM_pars)) {
if (CompError == 1) {
Theta <- DM_pars[["Theta"]][ipar]
}
if (CompError == 2) {
beta <- DM_pars[["Theta"]][ipar]
}
} else {
stop(
"Issue with Dirichlet-Multinomial parameter:",
"Fleet = ", f, "and ParmSelect = ", ipar
)
}
if (CompError == 1) {
Nsamp_DM <-
1 / (1 + Theta) +
dbase[["Nsamp_adj"]][sub] * Theta / (1 + Theta)
}
if (CompError == 2) {
Nsamp_DM <-
dbase[["Nsamp_adj"]][sub] * (1 + beta) /
(dbase[["Nsamp_adj"]][sub] + beta)
}
Nsamp_DM
} # end get_DM_sample_size()
if (comp) { # only possible if CompReport.sso was read
if (nrow(agedbase) > 0) {
agedbase[["Nsamp_DM"]] <- NA
}
if (nrow(lendbase) > 0) {
lendbase[["Nsamp_DM"]] <- NA
}
if (nrow(condbase) > 0) {
condbase[["Nsamp_DM"]] <- NA
}
# loop over fleets within agedbase
for (f in unique(agedbase[["Fleet"]])) {
# D-M likelihood for age comps
if (age_data_info[["CompError"]][f] > 0) {
sub <- agedbase[["Fleet"]] == f
agedbase[["Nsamp_DM"]][sub] <-
get_DM_sample_size(
CompError = age_data_info[["CompError"]][f],
f = f,
sub = sub,
data_info = age_data_info,
dbase = agedbase
)
} # end test for D-M likelihood in age comp
} # end loop over fleets within agedbase
# loop over fleets within lendbase
for (f in unique(lendbase[["Fleet"]])) {
# D-M likelihood for len comps
if (len_data_info[["CompError"]][f] > 0) {
sub <- lendbase[["Fleet"]] == f
lendbase[["Nsamp_DM"]][sub] <-
get_DM_sample_size(
CompError = len_data_info[["CompError"]][f],
f = f,
sub = sub,
data_info = len_data_info,
dbase = lendbase
)
} # end test for D-M likelihood in len comp
} # end loop over fleets within lendbase
# loop over fleets within condbase
for (f in unique(condbase[["Fleet"]])) {
# D-M likelihood for age comps
if (age_data_info[["CompError"]][f] > 0) {
sub <- condbase[["Fleet"]] == f
condbase[["Nsamp_DM"]][sub] <-
get_DM_sample_size(
CompError = age_data_info[["CompError"]][f],
f = f,
sub = sub,
data_info = age_data_info,
dbase = condbase
)
} # end test for D-M likelihood in age comp
} # end loop over fleets within condbase
} # end test for whether CompReport.sso info is available
} # end processing DM pars & samples prior to 3.30.21
} # end if DM pars are present
# get information that will help diagnose jitter coverage and bad bounds
jitter_info <- parameters[
!is.na(parameters[["Active_Cnt"]]) &
!is.na(parameters[["Min"]]),
c("Value", "Min", "Max", "Init")
]
jitter_info[["sigma"]] <- (jitter_info[["Max"]] - jitter_info[["Min"]]) / (2 * qnorm(.999))
jitter_info[["CV"]] <- jitter_info[["sigma"]] / jitter_info[["Init"]]
jitter_info[["InitLocation"]] <- pnorm(
q = jitter_info[["Init"]],
mean = (jitter_info[["Max"]] + jitter_info[["Min"]]) / 2,
sd = jitter_info[["sigma"]]
)
if (verbose) {
message("Finished primary run statistics list")
}
flush.console()
# add stuff to list to return
if (SS_versionNumeric <= 3.24) {
returndat[["definitions"]] <- fleetdefs
returndat[["fleet_ID"]] <- fleet_ID
returndat[["fleet_area"]] <- fleet_area
returndat[["catch_units"]] <- catch_units
returndat[["catch_error"]] <- catch_error
}
if (SS_versionNumeric >= 3.30) {
returndat[["definitions"]] <- fleetdefs
returndat[["fleet_ID"]] <- fleet_ID
returndat[["fleet_type"]] <- fleet_type
returndat[["fleet_timing"]] <- fleet_timing
returndat[["fleet_area"]] <- fleet_area
returndat[["catch_units"]] <- catch_units
if (exists("catch_se")) {
returndat[["catch_se"]] <- catch_se
returndat[["equ_catch_se"]] <- equ_catch_se
} else {
returndat[["catch_se"]] <- NA
returndat[["equ_catch_se"]] <- NA
}
}
# simple function to return additional things from the DEFINITIONS
# section that were added with SS version 3.30.12
return.def <- function(x) {
if (exists(x)) {
get(x)
} else {
NULL
}
}
returndat[["mcmc"]] <- mcmc
returndat[["survey_units"]] <- survey_units
returndat[["survey_error"]] <- survey_error
returndat[["IsFishFleet"]] <- IsFishFleet
returndat[["nfishfleets"]] <- nfishfleets
returndat[["nfleets"]] <- nfleets
returndat[["nsexes"]] <- nsexes
returndat[["ngpatterns"]] <- ngpatterns
returndat[["lbins"]] <- lbins
returndat[["Lbin_method"]] <- Lbin_method
returndat[["nlbins"]] <- nlbins
returndat[["lbinspop"]] <- lbinspop
returndat[["nlbinspop"]] <- nlbinspop
returndat[["sizebinlist"]] <- sizebinlist
returndat[["age_data_info"]] <- age_data_info
returndat[["len_data_info"]] <- len_data_info
returndat[["agebins"]] <- agebins
returndat[["nagebins"]] <- nagebins
returndat[["accuage"]] <- accuage
returndat[["nareas"]] <- nareas
returndat[["startyr"]] <- startyr
returndat[["endyr"]] <- endyr
returndat[["nseasons"]] <- nseasons
returndat[["seasfracs"]] <- seasfracs
returndat[["seasdurations"]] <- seasdurations
returndat[["N_sub_seasons"]] <- return.def("N_sub_seasons")
returndat[["Spawn_month"]] <- return.def("Spawn_month")
returndat[["Spawn_seas"]] <- return.def("Spawn_seas")
returndat[["Spawn_timing_in_season"]] <- return.def("Spawn_timing_in_season")
returndat[["Retro_year"]] <- return.def("Retro_year")
returndat[["N_forecast_yrs"]] <- return.def("N_forecast_yrs")
returndat[["Empirical_wt_at_age"]] <- return.def("Empirical_wt_at_age")
returndat[["N_bio_patterns"]] <- return.def("N_bio_patterns")
returndat[["N_platoons"]] <- return.def("N_platoons")
returndat[["NatMort_option"]] <- return.def("NatMort_option")
returndat[["GrowthModel_option"]] <- return.def("GrowthModel_option")
returndat[["Maturity_option"]] <- return.def("Maturity_option")
returndat[["Fecundity_option"]] <- return.def("Fecundity_option")
returndat[["Start_from_par"]] <- return.def("Start_from_par")
returndat[["Do_all_priors"]] <- return.def("Do_all_priors")
returndat[["Use_softbound"]] <- return.def("Use_softbound")
returndat[["N_nudata"]] <- return.def("N_nudata")
returndat[["Max_phase"]] <- return.def("Max_phase")
returndat[["Current_phase"]] <- return.def("Current_phase")
returndat[["Jitter"]] <- return.def("Jitter")
returndat[["ALK_tolerance"]] <- return.def("ALK_tolerance")
returndat[["Length_comp_error_controls"]] <- Length_comp_error_controls
returndat[["Age_comp_error_controls"]] <- Age_comp_error_controls
returndat[["nforecastyears"]] <- nforecastyears
returndat[["morph_indexing"]] <- morph_indexing
returndat[["MGparmAdj"]] <- MGparmAdj
returndat[["forecast_selectivity"]] <- forecast_selectivity
returndat[["SelSizeAdj"]] <- SelSizeAdj
returndat[["SelAgeAdj"]] <- SelAgeAdj
returndat[["recruitment_dist"]] <- recruitment_dist
returndat[["recruit"]] <- recruit
returndat[["SPAWN_RECR_CURVE"]] <- SPAWN_RECR_CURVE
returndat[["breakpoints_for_bias_adjustment_ramp"]] <-
breakpoints_for_bias_adjustment_ramp
# Static growth
# note: keyword "BIOLOGY" was not unique enough at some point
# but revision on 11 June 2020 seems to be working so far
# formatting change in 3.30.15.06 puts table one line lower
biology <- match_report_table("BIOLOGY",
adjust1 = ifelse(custom, 2, 1),
header = TRUE, type.convert = TRUE
)
# updated BIOLOGY table names based on change July 2022 change
# https://github.com/nmfs-stock-synthesis/stock-synthesis/issues/348
biology <- df.rename(biology,
oldnames = c("Low", "Mean_Size", "Wt_len", "Wt_len_F", "Mat_len", "Spawn", "Wt_len_M", "Fecundity"),
newnames = c("Len_lo", "Len_mean", "Wt_F", "Wt_F", "Mat", "Mat*Fec", "Wt_M", "Fec")
)
# determine fecundity type
FecType <- 0
# get parameter labels
pl <- parameters[["Label"]]
FecGrep1 <- grep("Eggs/kg_slope_wt_Fem", pl)
FecGrep2 <- grep("Eggs_exp_len_Fem", pl)
FecGrep3 <- grep("Eggs_exp_wt_Fem", pl)
FecGrep4 <- grep("Eggs_slope_len_Fem", pl)
FecGrep5 <- grep("Eggs_slope_Wt_Fem", pl)
if (length(FecGrep1) > 0) {
FecType <- 1
FecPar1name <- grep("Eggs/kg_inter_Fem", pl, value = TRUE)[1]
FecPar2name <- pl[FecGrep1[1]]
}
if (length(FecGrep2) > 0) {
FecType <- 2
FecPar1name <- grep("Eggs_scalar_Fem", pl, value = TRUE)[1]
FecPar2name <- pl[FecGrep2[1]]
}
if (length(FecGrep3) > 0) {
FecType <- 3
FecPar1name <- grep("Eggs_scalar_Fem", pl, value = TRUE)[1]
FecPar2name <- pl[FecGrep3[1]]
}
if (length(FecGrep4) > 0) {
FecType <- 4
FecPar1name <- grep("Eggs_intercept_Fem", pl, value = TRUE)[1]
FecPar2name <- pl[FecGrep4[1]]
}
if (length(FecGrep5) > 0) {
FecType <- 5
FecPar1name <- grep("Eggs_intercept_Fem", pl, value = TRUE)[1]
FecPar2name <- pl[FecGrep5[1]]
}
if (is.na(lbinspop[1])) {
lbinspop <- biology[["Len_lo"]][biology[["GP"]] == 1]
}
# warning for 3.30 models with multiple growth patterns that have
# repeat fecundity values, likely to be sorted out in new SS version
if (length(returndat[["FecPar1"]]) > 1) {
warning(
"Plots will only show fecundity and related quantities",
"for Growth Pattern 1"
)
returndat[["FecPar1"]] <- returndat[["FecPar1"]][1]
returndat[["FecPar2"]] <- returndat[["FecPar2"]][2]
}
# cleanup and tests related to biology at length table
if (!is.null(biology)) {
# fix for extra header associated with extra column header
# for single sex models that got fixed in 3.30.16
if (nsexes == 1 &&
is.na(biology[["Fec"]][1]) &&
"Wt_M" %in% names(biology)) {
# copy Wt_len_M to Fecundity
biology[["Fec"]] <- biology[["Wt_M"]]
# remove Wt_len_M
biology <- biology[, !names(biology) %in% "Wt_M"]
}
# test to figure out if fecundity is proportional to spawning biomass
# check for any mismatch between weight-at-length and fecundity
returndat[["SpawnOutputUnits"]] <-
ifelse(!is.null(biology[["Fec"]][1]) &&
!is.na(biology[["Fec"]][1]) &&
any(biology[["Wt_F"]] != biology[["Fec"]]),
"numbers", "biomass"
)
}
# add biology and fecundity varibles to list getting returned
returndat[["biology"]] <- biology
returndat[["FecType"]] <- FecType
returndat[["FecPar1name"]] <- FecPar1name
returndat[["FecPar2name"]] <- FecPar2name
returndat[["FecPar1"]] <- parameters[["Value"]][parameters[["Label"]] == FecPar1name]
returndat[["FecPar2"]] <- parameters[["Value"]][parameters[["Label"]] == FecPar2name]
# get natural mortality type and vectors of M by age
adjust1 <- ifelse(custom, 2, 1)
M_type <- rawrep[match_report_line("Natural_Mortality") + adjust1 - 1, 2]
M_type <- as.numeric(gsub(
pattern = ".*([0-9]+)",
replacement = "\\1",
x = M_type
))
# in SS 3.30 the number of rows of Natural_Mortality is the product of
# the number of sexes, growth patterns, settlement events but settlement
# events didn't exist in 3.24
# this first table includes all time periods as of 3.30.20
Natural_Mortality <- match_report_table("Natural_Mortality",
adjust1 = adjust1,
header = TRUE,
type.convert = TRUE
)
# the Bmark and endyr tables have been subsumed into the table above
# in 3.30.20
Natural_Mortality_Bmark <- match_report_table("Natural_Mortality_Bmark",
adjust1 = 1,
header = TRUE,
type.convert = TRUE
)
Natural_Mortality_endyr <- match_report_table("Natural_Mortality_endyr",
adjust1 = 1,
header = TRUE,
type.convert = TRUE
)
returndat[["M_type"]] <- M_type
returndat[["Natural_Mortality"]] <- Natural_Mortality
returndat[["Natural_Mortality_Bmark"]] <- Natural_Mortality_Bmark
returndat[["Natural_Mortality_endyr"]] <- Natural_Mortality_endyr
# get growth parameters
Growth_Parameters <- match_report_table("Growth_Parameters", 1,
"Growth_Parameters", 1 + ngpatterns * nsexes,
header = TRUE, type.convert = TRUE
)
returndat[["Growth_Parameters"]] <- Growth_Parameters
Seas_Effects <- match_report_table("Seas_Effects", 1,
header = TRUE, type.convert = TRUE
)
returndat[["Seas_Effects"]] <- Seas_Effects
# ending year growth, including pattern for the CV (added in SSv3.22b_Aug3)
# CVtype will occur on same line or following
growthCVtype <- match_report_table("Biology_at_age", 0,
"Biology_at_age", 1,
header = FALSE
)
growthCVtype <- grep("endyr_with_", unlist(growthCVtype), value = TRUE)
if (length(growthCVtype) > 0) {
returndat[["growthCVtype"]] <- strsplit(growthCVtype,
split = "endyr_with_"
)[[1]][2]
} else {
returndat[["growthCVtype"]] <- "unknown"
}
# formatting change in 3.30.15.06 puts table one line lower
growdat <- match_report_table("Biology_at_age",
adjust1 = ifelse(custom, 2, 1),
header = TRUE, type.convert = TRUE
)
if (!is.null(growdat)) {
# make older SS output names match current SS output conventions
growdat <- df.rename(growdat,
oldnames = c("Gender"),
newnames = c("Sex")
)
# extract a few quantities related to growth morphs/platoons
# note 16-June-2020: these values don't seem to be used anywhere
nmorphs <- max(growdat[["Morph"]])
midmorphs <- c(c(0, nmorphs / nsexes) + ceiling(nmorphs / nsexes / 2))
}
returndat[["endgrowth"]] <- growdat
# test for use of empirical weight-at-age input file (wtatage.ss)
# should match only "MEAN_BODY_WT(Begin)" or "MEAN_BODY_WT(begin)"
test <- match_report_table("MEAN_BODY_WT(", 0,
"MEAN_BODY_WT(", 1,
header = FALSE
)
wtatage_switch <- length(grep("wtatage.ss", test)) > 0
returndat[["wtatage_switch"]] <- wtatage_switch
# mean body weight
mean_body_wt <- match_report_table("MEAN_BODY_WT(begin)", 1,
header = TRUE, type.convert = TRUE
)
returndat[["mean_body_wt"]] <- mean_body_wt
# get time series of mean length at age
mean_size <- match_report_table("MEAN_SIZE_TIMESERIES", 1,
"mean_size_Jan_1", -2,
cols = 1:(4 + accuage + 1),
header = TRUE,
type.convert = TRUE
)
# filter values for range of years in time series
# (may not be needed in more recent SS versions)
growthvaries <- FALSE
if (!is.null(mean_size)) {
if (SS_versionNumeric < 3.30) {
mean_size <- mean_size[mean_size[["Beg"]] == 1 &
mean_size[["Yr"]] >= startyr &
mean_size[["Yr"]] < endyr, ]
} else {
mean_size <- mean_size[mean_size[["SubSeas"]] == 1 &
mean_size[["Yr"]] >= startyr &
mean_size[["Yr"]] < endyr, ]
}
if (nseasons > 1) {
mean_size <- mean_size[mean_size[["Seas"]] == 1, ]
}
# loop over morphs to check for time-varying growth
# (typically only 1 or 1:2 for females and males)
for (morph in unique(mean_size[["Morph"]])) {
# check is based on ages 0 up to accuage-1, because the mean
# length in the plus group can vary over time as a function of changes
# in the numbers at age (where fishing down the old fish causes
# fewer additional ages lumped into that group)
if (sum(!duplicated(mean_size[
mean_size[["Morph"]] == morph,
paste(0:(accuage - 1))
])) > 1) {
growthvaries <- TRUE
}
}
returndat[["growthseries"]] <- mean_size
returndat[["growthvaries"]] <- growthvaries
}
# Length-based selectivity and retention
if (!forecast) {
sizeselex <- sizeselex[sizeselex[["Yr"]] <= endyr, ]
}
returndat[["sizeselex"]] <- sizeselex
# Age-based selectivity
# Updated for 3.30.17 which added an additional row in the AGE_SELEX header
ageselex <- match_report_table("COMBINED_ALK*selL*selA", 1, header = TRUE)
if (!is.null(ageselex)) {
# account for additional header row added in March 2021
# SS commit: 31ae478d1bae53235e14912d8c5c452a62c71adb
# (not the most efficient way to do this)
if (any(grepl("COMBINED_ALK", names(ageselex)))) {
ageselex <- match_report_table("AGE_SELEX", 5, header = TRUE)
}
ageselex <- df.rename(ageselex,
oldnames = c(
"fleet", "year", "seas", "gender",
"morph", "label", "factor"
),
newnames = c(
"Fleet", "Yr", "Seas", "Sex",
"Morph", "Label", "Factor"
)
)
# filter forecast years from selectivity if no forecast
# NOTE: maybe refine this in 3.30
if (!forecast) {
ageselex <- ageselex[ageselex[["Yr"]] <= endyr, ]
}
# make values numeric
ageselex <- type.convert(ageselex, as.is = TRUE)
}
returndat[["ageselex"]] <- ageselex
# EXPLOITATION
# read first 20 rows to figure out where meta-data ends
exploitation_head <- match_report_table("EXPLOITATION", 1,
"EXPLOITATION", 20,
header = FALSE
)
# check for new header info added in 3.30.13_beta (14 Feb. 2019)
if (exploitation_head[1, 1] == "Info:") {
# NOTE: add read of additional header info here
exploitation <- match_report_table("EXPLOITATION",
which(exploitation_head[, 1] == "Yr"),
header = TRUE,
# using rep_blank_lines instead of default
# rep_blank_or_hash_lines to find ending because of hash
blank_lines = rep_blank_lines
)
# remove meta-data about fleets (filtered by color in 1st column):
# "Catchunits:","FleetType:","FleetArea:","FleetID:"
exploitation <- exploitation[-grep(":", exploitation[, 1]), ]
# find line with F_method like this "Info: F_Method:=3;.Continuous_F;..."
# F_method info contains additional information that might be useful elsewhere
F_method_info <- exploitation_head[grep(
"F_Method:",
exploitation_head[, 2]
), 2]
F_method_info <- gsub(
pattern = ".",
replacement = " ",
x = F_method_info,
fixed = TRUE
)
F_method_info <- strsplit(F_method_info,
split = ";",
fixed = TRUE
)[[1]]
# get numeric value for F_method
F_method <- as.numeric(strsplit(F_method_info[[1]],
split = "=",
fixed = TRUE
)[[1]][2])
} else {
# old format prior to 3.30.13
exploitation <- match_report_table("EXPLOITATION", 5, header = TRUE)
# get numeric value for F_method
F_method <- as.numeric(rawrep[match_report_line("F_Method"), 2])
}
returndat[["F_method"]] <- F_method
if (!is.null(exploitation)) {
# more processing of exploitation (if present)
exploitation[exploitation == "_"] <- NA
# make text numeric
# "init_yr" not used as of 3.30.13, but must have been in the past
# "INIT" appears to be used in 3.30.13 and beyond
exploitation[["Yr"]][exploitation[["Yr"]] %in% c("INIT", "init_yr")] <- startyr - 1
# make columns numeric
exploitation <- type.convert(exploitation, as.is = TRUE)
}
returndat[["exploitation"]] <- exploitation
# catch
catch <- match_report_table("CATCH", 1, substr1 = FALSE, header = TRUE)
# if table is present, then do processing of it
if (!is.null(catch)) {
# update to new column names used starting with 3.30.13
catch <- df.rename(catch,
oldnames = c("Name", "Yr.frac"),
newnames = c("Fleet_Name", "Time")
)
# fix likelihood associated with 0 catch
catch[["Like"]][catch[["Like"]] == "-1.#IND"] <- NA
# change "INIT" or "init" to year value following convention used elsewhere
catch[["Yr"]][tolower(catch[["Yr"]]) == "init"] <- startyr - 1
# make columns numeric
catch <- type.convert(catch, as.is = TRUE)
}
returndat[["catch"]] <- catch
# age associated with summary biomass
summary_age <- rawrep[match_report_line("TIME_SERIES"), ifelse(custom, 3, 2)]
summary_age <- as.numeric(substring(summary_age, nchar("BioSmry_age:_") + 1))
returndat[["summary_age"]] <- summary_age
# time series
timeseries <- match_report_table("TIME_SERIES", 1, header = TRUE)
# temporary fix for 3.30.03.06
timeseries <- timeseries[timeseries[["Seas"]] != "recruits", ]
timeseries[timeseries == "_"] <- NA
timeseries <- type.convert(timeseries, as.is = TRUE)
## # sum catches and other quantities across fleets
## # commented out pending additional test for more than one fleet with catch,
## # without which the apply function has errors
## timeseries[["dead_B_sum"]] <- apply(timeseries[,grep("dead(B)",names(timeseries),
## fixed=TRUE)], 1, sum)
## timeseries[["dead_N_sum"]] <- apply(timeseries[,grep("dead(N)",names(timeseries),
## fixed=TRUE)], 1, sum)
## timeseries[["retain_B_sum"]] <- apply(timeseries[,grep("retain(B)",names(timeseries),
## fixed=TRUE)], 1, sum)
## timeseries[["retain_N_sum"]] <- apply(timeseries[,grep("retain(N)",names(timeseries),
## fixed=TRUE)], 1, sum)
## timeseries[["sel_B_sum"]] <- apply(timeseries[,grep("sel(B)",names(timeseries),
## fixed=TRUE)], 1, sum)
## timeseries[["sel_N_sum"]] <- apply(timeseries[,grep("sel(N)",names(timeseries),
## fixed=TRUE)], 1, sum)
## timeseries[["obs_cat_sum"]] <- apply(timeseries[,grep("obs_cat",names(timeseries),
## fixed=TRUE)], 1, sum)
returndat[["timeseries"]] <- timeseries
# get spawning season
# currently (v3.20b), Spawning Biomass is only calculated
# in a unique spawning season within the year
if (!exists("spawnseas")) {
spawnseas <- unique(timeseries[["Seas"]][!is.na(timeseries[["SpawnBio"]])])
# problem with spawning season calculation when NA values in SpawnBio
if (length(spawnseas) == 0) {
spawnseas <- NA
}
}
returndat[["spawnseas"]] <- spawnseas
# set mainmorphs as those morphs born in the first season with recruitment
# and the largest fraction of the platoons (should equal middle platoon when present)
if (is.null(morph_indexing)) {
mainmorphs <- NULL
} else {
if (SS_versionNumeric >= 3.30) {
# new "platoon" label
temp <- morph_indexing[morph_indexing[["BirthSeas"]] ==
first_seas_with_recruits &
morph_indexing[["Platoon_Dist"]] ==
max(morph_indexing[["Platoon_Dist"]]), ]
mainmorphs <- min(temp[["Index"]][temp[["Sex"]] == 1])
if (nsexes == 2) {
mainmorphs <- c(mainmorphs, min(temp[["Index"]][temp[["Sex"]] == 2]))
}
}
if (SS_versionNumeric < 3.30) {
# old "sub_morph" label
temp <- morph_indexing[morph_indexing[["BirthSeas"]] ==
first_seas_with_recruits &
morph_indexing[["Sub_Morph_Dist"]] ==
max(morph_indexing[["Sub_Morph_Dist"]]), ]
mainmorphs <- min(temp[["Index"]][temp[["Sex"]] == 1])
if (nsexes == 2) {
mainmorphs <- c(mainmorphs, min(temp[["Index"]][temp[["Sex"]] == 2]))
}
}
if (length(mainmorphs) == 0) {
warning("Error with morph indexing")
}
}
returndat[["mainmorphs"]] <- mainmorphs
# get birth seasons as vector of seasons with non-zero recruitment
birthseas <- sort(unique(timeseries[["Seas"]][timeseries[["Recruit_0"]] > 0]))
# temporary fix for model with missing Recruit_0 values
# (so far this has only been seen in one 3.30 model with 2 GPs)
if (length(birthseas) == 0) {
birthseas <- sort(unique(morph_indexing[["BirthSeas"]]))
}
returndat[["birthseas"]] <- birthseas
# stats and dimensions
timeseries[["Yr"]] <- timeseries[["Yr"]] + (timeseries[["Seas"]] - 1) / nseasons
ts <- timeseries[timeseries[["Yr"]] <= endyr + 1, ]
tsyears <- ts[["Yr"]][ts[["Seas"]] == 1]
# Depletion
tsspaw_bio <- ts[["SpawnBio"]][ts[["Seas"]] == spawnseas & ts[["Area"]] == 1]
if (nareas > 1) # loop over areas if necessary to sum spawning biomass
{
for (a in 2:nareas) {
tsspaw_bio <- tsspaw_bio + ts[["SpawnBio"]][ts[["Seas"]] == spawnseas &
ts[["Area"]] == a]
}
}
if (nsexes == 1) {
tsspaw_bio <- tsspaw_bio / 2
}
depletionseries <- tsspaw_bio / tsspaw_bio[1]
stats[["SBzero"]] <- tsspaw_bio[1]
stats[["current_depletion"]] <- depletionseries[length(depletionseries)]
# total landings
ls <- nrow(ts) - 1
totretainedmat <- as.matrix(ts[, substr(
names(ts), 1,
nchar("retain(B)")
) == "retain(B)"])
ts[["totretained"]] <- 0
ts[["totretained"]][3:ls] <- rowSums(totretainedmat)[3:ls]
# total catch
totcatchmat <- as.matrix(ts[, substr(
names(ts), 1,
nchar("enc(B)")
) == "enc(B)"])
ts[["totcatch"]] <- 0
ts[["totcatch"]][3:ls] <- rowSums(totcatchmat)[3:ls]
# harvest rates
if (F_method == 1) {
stringmatch <- "Hrate:_"
} else {
stringmatch <- "F:_"
}
Hrates <- as.matrix(ts[, substr(
names(ts), 1,
nchar(stringmatch)
) == stringmatch])
fmax <- max(Hrates)
# depletion basis
depletion_basis <- as.numeric(rawrep[match_report_line("Depletion_basis"), 2])
if (is.na(depletion_basis)) {
# older versions had a different string
depletion_basis <- as.numeric(rawrep[match_report_line("Depletion_method"), 2])
}
if (depletion_basis %in% c(1, 3:4)) {
starter <- SS_readstarter(
file = file.path(dir, "starter.ss"),
verbose = verbose
)
depletion_multiplier <- starter[["depl_denom_frac"]]
} else {
depletion_multiplier <- 1
}
Bratio_denominator <- rawrep[match_report_line("B_ratio_denominator"), 2]
if (Bratio_denominator == "no_depletion_basis") {
Bratio_label <- "no_depletion_basis"
} else {
# create Bratio label for use in various plots
if (grepl(pattern = "100", x = Bratio_denominator)) {
# exclude 100% if present
Bratio_label <- paste0(
"B/",
substring(Bratio_denominator, 6)
)
} else {
Bratio_label <- paste0(
"B/(",
Bratio_denominator,
")"
)
}
if (Bratio_label == "B/Virgin_Biomass") {
Bratio_label <- "B/B_0"
}
}
returndat[["depletion_basis"]] <- depletion_basis
returndat[["depletion_multiplier"]] <- depletion_multiplier
returndat[["Bratio_denominator"]] <- Bratio_denominator
returndat[["Bratio_label"]] <- Bratio_label
## discard fractions ###
# degrees of freedom for T-distribution
# (or indicator 0, -1, -2 for other distributions)
if (SS_versionNumeric < 3.20) {
# old header from 3.11
DF_discard <- rawrep[match_report_line("DISCARD_OUTPUT"), 3]
if (length(grep("T_distribution", DF_discard)) > 0) {
DF_discard <- as.numeric(strsplit(DF_discard, "=_")[[1]][2])
}
if (length(grep("_normal_with_Std_in_as_CV", DF_discard)) > 0) {
DF_discard <- 0
}
if (length(grep("_normal_with_Std_in_as_stddev", DF_discard)) > 0) {
DF_discard <- -1
}
if (length(grep("_lognormal", DF_discard)) > 0) {
DF_discard <- -2
}
shift <- 2
discard_spec <- NULL
} else { # newer header in 3.20 and beyond
DF_discard <- NA
shift <- 1
# read first 20 lines
discard_header <- match_report_table(
"DISCARD_SPECIFICATION", 1,
"DISCARD_SPECIFICATION", 20
)
if (!is.null(discard_header)) {
# read table of discard info by fleet at bottom of header
discard_spec <- match_report_table("DISCARD_SPECIFICATION",
which(discard_header[, 3] == "errtype"),
header = TRUE, type.convert = TRUE
)
discard_spec <- type.convert(discard_spec, as.is = TRUE)
# not sure under what circumstances this first name wasn't "Fleet" already
names(discard_spec)[1] <- "Fleet"
} else {
discard_spec <- NULL
}
}
# read DISCARD_OUTPUT table
discard <- match_report_table("DISCARD_OUTPUT", shift, header = TRUE)
# rerun read of discard with header = FALSE
# if in SSv3.20b which had missing line break
if (!is.null(discard) && names(discard)[1] != "Fleet") {
discard <- match_report_table("DISCARD_OUTPUT", shift, header = FALSE)
# note that these column names are from 3.20b and have changed since that time
names(discard) <- c(
"Fleet", "Yr", "Seas", "Obs", "Exp",
"Std_in", "Std_use", "Dev"
)
}
# rename columns to standard used with 3.30.13 (starting Feb 14, 2019)
discard <- df.rename(discard,
oldnames = c("Name", "Yr.frac"),
newnames = c("Fleet_Name", "Time")
)
# process discard info if table was present
if (!is.null(discard) && nrow(discard) > 1) {
discard[discard == "_"] <- NA
# v3.23 and before had things combined under "Name"
# which has been renamed above to "Fleet_Name"
if (SS_versionNumeric <= 3.23) {
discard <- type.convert(discard, as.is = TRUE)
if (!"Fleet_Name" %in% names(discard)) {
discard[["Fleet_Name"]] <- discard[["Fleet"]]
}
discard[["Fleet"]] <- NA
for (i in 1:nrow(discard)) {
discard[["Fleet"]][i] <- strsplit(discard[["Fleet_Name"]][i], "_")[[1]][1]
discard[["Fleet_Name"]][i] <- substring(
discard[["Fleet_Name"]][i],
nchar(discard[["Fleet"]][i]) + 2
)
}
discard_tuning_info <- NULL # not bothering to support this for 3.23 and before
} else {
# v3.24 and beyond has separate columns
# for fleet number and fleet name
discard <- type.convert(discard, as.is = TRUE)
# get info on variance adjustments for discards
discard_tuning_info <- calc_var_adjust(discard, type = "sd")
}
} else {
discard <- NA # IGT 23-04-2023: not sure why this is NA instead of NULL
discard_tuning_info <- NULL
}
returndat[["discard"]] <- discard
returndat[["discard_spec"]] <- discard_spec
returndat[["discard_tuning_info"]] <- discard_tuning_info
returndat[["DF_discard"]] <- DF_discard
## Average body weight observations
# degrees of freedom for T-distribution
DF_mnwgt <- rawrep[match_report_line("log(L)_based_on_T_distribution"), 1]
if (!is.na(DF_mnwgt)) {
DF_mnwgt <- as.numeric(strsplit(DF_mnwgt, "=_")[[1]][2])
mnwgt <- match_report_table("MEAN_BODY_WT_OUTPUT", 2, header = TRUE)
mnwgt <- df.rename(mnwgt,
oldnames = c("Name"),
newnames = c("Fleet_Name")
)
mnwgt[mnwgt == "_"] <- NA
# v3.23 and before had things combined under "Name"
# which has been renamed above to "Fleet_Name"
if (SS_versionNumeric <= 3.23) {
mnwgt <- type.convert(mnwgt, as.is = TRUE)
if (!"Fleet_Name" %in% names(mnwgt)) {
mnwgt[["Fleet_Name"]] <- mnwgt[["Fleet"]]
}
mnwgt[["Fleet"]] <- NA
for (i in 1:nrow(mnwgt)) {
mnwgt[["Fleet"]][i] <- strsplit(mnwgt[["Fleet_Name"]][i], "_")[[1]][1]
mnwgt[["Fleet_Name"]][i] <- substring(
mnwgt[["Fleet_Name"]][i],
nchar(mnwgt[["Fleet_Name"]][i]) + 2
)
}
mnwgt_tuning_info <- NULL
} else { # v3.24 and beyond has separate columns for fleet number and fleet name
mnwgt <- type.convert(mnwgt, as.is = TRUE)
# get info on variance adjustments for mean body weight
mnwgt_tuning_info <- calc_var_adjust(mnwgt, type = "CV")
}
} else {
DF_mnwgt <- NA
mnwgt <- NA
mnwgt_tuning_info <- NULL
}
returndat[["mnwgt"]] <- mnwgt
returndat[["mnwgt_tuning_info"]] <- mnwgt_tuning_info
returndat[["DF_mnwgt"]] <- DF_mnwgt
# Yield and SPR time-series
spr <- match_report_table("SPR_SERIES", 5, header = TRUE)
# read again if missing using capitalization prior to 3.30.15.06
if (is.null(spr)) {
spr <- match_report_table("SPR_series", 5, header = TRUE)
}
if (!is.null(spr)) {
# clean up SPR output
# make older SS output names match current SS output conventions
names(spr) <- gsub(pattern = "SPB", replacement = "SSB", names(spr))
spr <- df.rename(spr,
oldnames = c("Year", "spawn_bio", "SPR_std", "Y/R", "F_std"),
newnames = c("Yr", "SpawnBio", "SPR_report", "YPR", "F_report")
)
spr[spr == "_"] <- NA
spr[spr == "&"] <- NA
spr[spr == "-1.#IND"] <- NA
spr <- type.convert(spr, as.is = TRUE)
# spr <- spr[spr[["Year"]] <= endyr,]
spr[["spr"]] <- spr[["SPR"]]
stats[["last_years_SPR"]] <- spr[["spr"]][nrow(spr)]
stats[["SPRratioLabel"]] <- managementratiolabels[1, 2]
stats[["last_years_SPRratio"]] <- spr[["SPR_std"]][nrow(spr)]
}
returndat[["sprseries"]] <- spr
returndat[["managementratiolabels"]] <- managementratiolabels
returndat[["F_report_basis"]] <- managementratiolabels[["Label"]][2]
returndat[["sprtarg"]] <- sprtarg
returndat[["btarg"]] <- btarg
# override minbthresh = 0.25 if it looks like hake
if (!is.na(btarg) & btarg == 0.4 & startyr == 1966 & sprtarg == 0.4 &
accuage == 20 & wtatage_switch) {
if (verbose) {
message(
"Setting minimum biomass threshhold to 0.10",
" because this looks like the Pacific Hake model.",
" You can replace or override in SS_plots via the",
" 'minbthresh' input."
)
}
minbthresh <- 0.1 # treaty value for hake
}
returndat[["minbthresh"]] <- minbthresh
# read Kobe plot
if (length(grep("Kobe_Plot", rawrep[, 1])) != 0) {
# head of Kobe_Plot section differs by SS version,
# but I haven't kept track of which is which
# read first 5 lines to figure out which one is the header
Kobe_head <- match_report_table("Kobe_Plot", 0, "Kobe_Plot", 5, header = TRUE)
shift <- grep("^Y(ea)?r", Kobe_head[, 1]) # may be "Year" or "Yr"
if (length(shift) == 0) {
# work around for bug in output for 3.24z (and some other versions)
shift <- grep("MSY_basis:_Y(ea)?r", Kobe_head[, 1])
if (length(shift) == 0) {
stop("Bug: r4ss cannot find the start of table for the Kobe plot.")
}
}
Kobe_warn <- NA
Kobe_MSY_basis <- NA
if (length(grep("_basis_is_not", Kobe_head[1, 1])) > 0) {
Kobe_warn <- Kobe_head[1, 1]
}
if (length(grep("MSY_basis", Kobe_head[2, 1])) > 0) {
Kobe_MSY_basis <- Kobe_head[2, 1]
}
Kobe <- match_report_table("Kobe_Plot", shift, header = TRUE)
Kobe[Kobe == "_"] <- NA
Kobe[Kobe == "1.#INF"] <- NA
Kobe[Kobe == "-1.#IND"] <- NA
names(Kobe) <- gsub("/", ".", names(Kobe), fixed = TRUE)
Kobe[, 1:3] <- lapply(Kobe[, 1:3], as.numeric)
} else {
Kobe <- NA
Kobe_warn <- NA
Kobe_MSY_basis <- NA
}
returndat[["Kobe_warn"]] <- Kobe_warn
returndat[["Kobe_MSY_basis"]] <- Kobe_MSY_basis
returndat[["Kobe"]] <- Kobe
flush.console()
## variance and sample size tuning information
INDEX_1 <- match_report_table("INDEX_1", 1, "INDEX_1", (nfleets + 1), header = TRUE)
# fill in column name that was missing in SS 3.24 (and perhaps other versions)
# and replace inconsistent name in some 3.30 versions with standard name
INDEX_1 <- df.rename(INDEX_1,
oldnames = c("NoName", "fleetname"),
newnames = c("Name", "Name")
)
# which column of INDEX_1 has number of CPUE values (used in reading INDEX_2)
if (SS_versionNumeric >= 3.30) {
ncpue_column <- 11
INDEX_1 <- match_report_table("INDEX_1", 1, "INDEX_3", -4, header = TRUE)
# remove any comments at the bottom of table
INDEX_1 <- INDEX_1[substr(INDEX_1[["Fleet"]], 1, 1) != "#", ]
# count of observations per index
ncpue <- sum(as.numeric(INDEX_1[["N"]]), na.rm = TRUE)
} else {
ncpue_column <- 11
ncpue <- sum(as.numeric(rawrep[
match_report_line("INDEX_1") + 1 + 1:nfleets,
ncpue_column
]))
}
# add to list of stuff that gets returned
returndat[["index_variance_tuning_check"]] <- INDEX_1
# CPUE/Survey series - will not match if not found
cpue <- match_report_table("INDEX_2", 1, "INDEX_2", ncpue + 1, header = TRUE)
cpue[cpue == "_"] <- NA
if (length(cpue) > 0) {
# make older SS output names match current SS output conventions
# note: "Fleet_name" (formerly "Name") introduced in 3.30.12
# and might change as result of discussion on inconsistent use of
# similar column names.
cpue <- df.rename(cpue,
oldnames = c("Yr.S", "Yr.frac", "Supr_Per", "Name"),
newnames = c("Time", "Time", "SuprPer", "Fleet_name")
)
# process old fleet number/name combo (e.g. "2_SURVEY")
if (SS_versionNumeric < 3.24) {
cpue[["Name"]] <- NA
for (i in 1:nrow(cpue)) {
cpue[["Fleet"]][i] <- strsplit(cpue[["Fleet"]][i], "_")[[1]][1]
cpue[["Name"]][i] <- substring(cpue[["Fleet"]][i], nchar(cpue[["Fleet"]][i]) + 2)
}
}
# replace any bad values (were present in at least one 3.24s model)
if (any(cpue[["Exp"]] == "1.#QNAN")) {
cpue[["Exp"]][cpue[["Exp"]] == "1.#QNAN"] <- NA
cpue[["Calc_Q"]][cpue[["Calc_Q"]] == "1.#QNAN"] <- NA
cpue[["Eff_Q"]][cpue[["Eff_Q"]] == "1.#QNAN"] <- NA
}
# work-around for missing SE_input values 3.30.16
# https://github.com/nmfs-stock-synthesis/stock-synthesis/issues/169
# https://github.com/r4ss/r4ss/issues/324
badrows <- which(cpue[["Use"]] == "")
if (length(badrows) > 0) {
# shift columns to the right
columns <- which(names(cpue) == "SE_input"):which(names(cpue) == "Use")
cpue[badrows, columns] <- cpue[badrows, columns - 1]
# add NA value for missing column
cpue[badrows, "SE_input"] <- NA
}
# make columns numeric
cpue <- type.convert(cpue, as.is = TRUE)
} else {
# if INDEX_2 not present
cpue <- NULL
}
returndat[["cpue"]] <- cpue
# Numbers at age
natage <- match_report_table("NUMBERS_AT_AGE", 1,
substr1 = FALSE,
header = TRUE, type.convert = TRUE
)
if (is.null(natage) || nrow(natage) == 0) {
natage <- NULL
} else {
# make older SS output names match current SS output conventions
natage <- df.rename(natage,
oldnames = c("Gender", "SubMorph"),
newnames = c("Sex", "Platoon")
)
}
returndat[["natage"]] <- natage
# NUMBERS_AT_AGE_Annual with and without fishery
natage_annual_1_no_fishery <- match_report_table("NUMBERS_AT_AGE_Annual_1", 1,
header = TRUE, type.convert = TRUE
)
natage_annual_2_with_fishery <- match_report_table("NUMBERS_AT_AGE_Annual_2", 1,
header = TRUE, type.convert = TRUE
)
returndat[["natage_annual_1_no_fishery"]] <- natage_annual_1_no_fishery
returndat[["natage_annual_2_with_fishery"]] <- natage_annual_2_with_fishery
# Biomass at age (introduced in 3.30)
batage <- match_report_table("BIOMASS_AT_AGE", 1,
substr1 = FALSE,
header = TRUE, type.convert = TRUE
)
returndat[["batage"]] <- batage
# Numbers at length
col.adjust <- 12
if (SS_versionNumeric < 3.30) {
col.adjust <- 11
}
# test ending based on text because sections changed within 3.24 series
natlen <- match_report_table("NUMBERS_AT_LENGTH", 1,
substr1 = FALSE,
header = TRUE, type.convert = TRUE
)
# make older SS output names match current SS output conventions
natlen <- df.rename(natlen,
oldnames = c("Gender", "SubMorph"),
newnames = c("Sex", "Platoon")
)
returndat[["natlen"]] <- natlen
# Biomass at length (first appeared in version 3.24l, 12-5-2012)
batlen <- match_report_table("BIOMASS_AT_LENGTH", 1,
substr1 = FALSE,
header = TRUE, type.convert = TRUE
)
returndat[["batlen"]] <- batlen
# F at age (first appeared in version 3.30.13, 8-Mar-2019)
fatage <- match_report_table("F_AT_AGE", 1, header = TRUE, type.convert = TRUE)
returndat[["fatage"]] <- fatage
# read discard at age (added with 3.30.12, 29-Aug-2018)
discard_at_age <- match_report_table("DISCARD_AT_AGE", 1,
header = TRUE, type.convert = TRUE
)
returndat[["discard_at_age"]] <- discard_at_age
# catch at age
catage <- match_report_table("CATCH_AT_AGE", 1,
header = TRUE, type.convert = TRUE
)
returndat[["catage"]] <- catage
# Movement
movement <- match_report_table("MOVEMENT", 1, substr1 = FALSE, header = TRUE)
if (!is.null(movement)) {
names(movement) <- c(
names(movement)[1:6],
paste("age", names(movement)[-(1:6)], sep = "")
)
movement <- df.rename(movement,
oldnames = c("Gpattern"),
newnames = c("GP")
)
for (i in 1:ncol(movement)) {
movement[, i] <- as.numeric(movement[, i])
}
}
returndat[["movement"]] <- movement
# tag reporting rates
tagreportrates <- match_report_table("Reporting_Rates_by_Fishery", 1,
"See_composition_data_output", -1,
substr2 = TRUE,
header = TRUE,
type.convert = TRUE
)
returndat[["tagreportrates"]] <- tagreportrates
# tag release table
# (no space after this table before Tags_Alive table)
tagrelease <- match_report_table("TAG_Recapture", 1,
"Tags_Alive", -1,
cols = 1:10
)
if (!is.null(tagrelease)) {
# strip off info from header
tagfirstperiod <- as.numeric(tagrelease[1, 1])
tagaccumperiod <- as.numeric(tagrelease[2, 1])
# remove header and convert to numeric
names(tagrelease) <- tagrelease[4, ]
tagrelease <- tagrelease[-(1:4), ]
tagrelease <- type.convert(tagrelease, as.is = TRUE)
} else {
tagrelease <- NULL
tagfirstperiod <- NULL
tagaccumperiod <- NULL
}
returndat[["tagrelease"]] <- tagrelease
returndat[["tagfirstperiod"]] <- tagfirstperiod
returndat[["tagaccumperiod"]] <- tagaccumperiod
# tags alive
# (no space after this table before Total_recaptures table)
tagsalive <- match_report_table(
"Tags_Alive", 1,
"Total_recaptures", -1
)
if (!is.null(tagsalive)) {
tagcols <- ncol(tagsalive)
names(tagsalive) <- c("TG", paste0("period", 0:(tagcols - 2)))
tagsalive[tagsalive == ""] <- NA
tagsalive <- type.convert(tagsalive, as.is = TRUE)
}
returndat[["tagsalive"]] <- tagsalive
# total recaptures
tagtotrecap <- match_report_table("Total_recaptures", 1)
if (!is.null(tagtotrecap)) {
tagcols <- ncol(tagtotrecap)
names(tagtotrecap) <- c("TG", paste0("period", 0:(tagcols - 2)))
tagtotrecap[tagtotrecap == ""] <- NA
tagtotrecap <- type.convert(tagtotrecap, as.is = TRUE)
}
returndat[["tagtotrecap"]] <- tagtotrecap
# age-length matrix
# this section is more complex because of blank lines internally
# first look for rows like " Seas: 12 Sub_Seas: 2 Morph: 12"
sdsize_lines <- grep("^sdsize", rawrep[, 1])
# check for presence of any lines with that string
if (length(sdsize_lines) > 0) {
# the section ends with first blank line after the last of the sdsize_lines
# so count the blanks as 1 greater than those in between the keyword
# and the last of those sdsize_lines
# an alternative here would be to modify match_report_table to allow input of a
# specific line number to end the section
which_blank <- 1 + length(rep_blank_or_hash_lines[
rep_blank_or_hash_lines > match_report_line("AGE_LENGTH_KEY") &
rep_blank_or_hash_lines < max(sdsize_lines)
])
# because of rows like " Seas: 12 Sub_Seas: 2 Morph: 12", the number of columns
# needs to be at least 6 even if there are fewer ages
rawALK <- match_report_table("AGE_LENGTH_KEY", 4,
cols = 1:max(6, accuage + 2),
header = FALSE,
which_blank = which_blank
)
# confirm that the section is present
if (length(rawALK) > 1 && # this should filter NULL values
length(grep("AGE_AGE_KEY", rawALK[, 1])) == 0) {
morph_col <- 5
if (SS_versionNumeric < 3.30 &
length(grep("Sub_Seas", rawALK[, 3])) == 0) {
morph_col <- 3
}
starts <- grep("Morph:", rawALK[, morph_col]) + 2
ends <- grep("mean", rawALK[, 1]) - 1
N_ALKs <- length(starts)
# 3rd dimension should be either nmorphs or nmorphs*(number of Sub_Seas)
ALK <- array(NA, c(nlbinspop, accuage + 1, N_ALKs))
dimnames(ALK) <- list(
Length = rev(lbinspop),
TrueAge = 0:accuage,
Matrix = 1:N_ALKs
)
# loop over subsections within age-length matrix
for (i in 1:N_ALKs) {
# get matrix of values
ALKtemp <- rawALK[starts[i]:ends[i], 2 + 0:accuage]
# loop over ages to convert values to numeric
ALKtemp <- type.convert(ALKtemp, as.is = TRUE)
# fill in appropriate slice of array
ALK[, , i] <- as.matrix(ALKtemp)
# get info on each matrix (such as "Seas: 1 Sub_Seas: 1 Morph: 1")
Matrix.Info <- rawALK[starts[i] - 2, ]
# filter out empty elements
Matrix.Info <- Matrix.Info[Matrix.Info != ""]
# combine elements to form a label in the dimnames
dimnames(ALK)$Matrix[i] <- paste(Matrix.Info, collapse = " ")
}
returndat[["ALK"]] <- ALK
} # end check for keyword present
} # end check for length(sdsize_lines) > 0
# ageing error matrices
rawAAK <- match_report_table("AGE_AGE_KEY", 1)
if (!is.null(rawAAK)) {
# some SS versions output message,
# others just had no values resulting in a string with NULL dimension
if (rawAAK[[1]][1] == "no_age_error_key_used" |
is.null(dim(rawAAK))) {
N_ageerror_defs <- 0
} else {
starts <- grep("KEY:", rawAAK[, 1])
N_ageerror_defs <- length(starts)
if (N_ageerror_defs > 0) {
# loop over ageing error types to get definitions
nrowsAAK <- nrow(rawAAK) / N_ageerror_defs - 3
AAK <- array(NA, c(N_ageerror_defs, nrowsAAK, accuage + 1))
age_error_mean <- age_error_sd <- data.frame(age = 0:accuage)
for (i in 1:N_ageerror_defs) {
AAKtemp <- rawAAK[starts[i] + 2 + 1:nrowsAAK, -1]
rownames.tmp <- rawAAK[starts[i] + 2 + 1:nrowsAAK, 1]
AAKtemp <- type.convert(AAKtemp, as.is = TRUE)
AAK[i, , ] <- as.matrix(AAKtemp)
age_error_mean[[paste("type", i, sep = "")]] <-
as.numeric((rawAAK[starts[i] + 1, -1]))
age_error_sd[[paste("type", i, sep = "")]] <-
as.numeric((rawAAK[starts[i] + 2, -1]))
}
# add names to 3 dimensions of age-age-key
if (!is.null(AAK)) {
dimnames(AAK) <- list(
AgeingErrorType = 1:N_ageerror_defs,
ObsAgeBin = rownames.tmp,
TrueAge = 0:accuage
)
}
returndat[["AAK"]] <- AAK
returndat[["age_error_mean"]] <- age_error_mean
returndat[["age_error_sd"]] <- age_error_sd
}
} # end check for ageing error matrices
returndat[["N_ageerror_defs"]] <- N_ageerror_defs
} # end check for NULL output of ageing error info
# get equilibrium yield for newer versions of SS (some 3.24 and all 3.30),
# which have SPR/YPR profile in Report.sso
# (this was previously in Forecast-report.sso, but reading this info
# is no longer supported for those older versions)
if (SS_versionNumeric >= 3.30) {
# 3.30 models have "Finish SPR/YPR profile" followed by some additional comments
yieldraw <- match_report_table("SPR/YPR_Profile", 1, "Finish", -2)
} else {
# 3.24 models and earlier use blank line to end table
yieldraw <- match_report_table("SPR/YPR_Profile", 1)
}
if (!is.null(yieldraw)) {
names <- yieldraw[1, ]
names[names == "SSB/Bzero"] <- "Depletion"
yielddat <- yieldraw[c(2:(as.numeric(length(yieldraw[, 1]) - 1))), ]
yielddat[yielddat == "-nan(ind)"] <- NA # this value sometimes occurs in 3.30 models
names(yielddat) <- names
yielddat <- type.convert(yielddat, as.is = TRUE)
} else {
yielddat <- NA
}
returndat[["equil_yield"]] <- yielddat
# Z at age
# With_fishery
# No_fishery_for_Z=M_and_dynamic_Bzero
Z_at_age <- match_report_table("Z_AT_AGE_Annual_2", 1, header = TRUE)
if (!is.null(Z_at_age)) {
Z_at_age[Z_at_age == "_"] <- NA
# if birth season is not season 1, you can get infinite values
Z_at_age[Z_at_age == "-1.#INF"] <- NA
Z_at_age <- type.convert(Z_at_age, as.is = TRUE)
}
returndat[["Z_at_age"]] <- Z_at_age
if (!is.na(match_report_line("Report_Z_by_area_morph_platoon"))) {
# from 3.30.16.03 onward the old end of the Z_AT_AGE_Annual 1 table
# doesn't work so should just use the blank line
# (not available in early versions)
M_at_age <- match_report_table("Z_AT_AGE_Annual_1", 1, header = TRUE)
} else {
# In earlier versions the M at age table ended with comments
# Note: Z calculated as -ln(Nt+1 / Nt)
# Note: Z calculation for maxage not possible, for maxage-1 includes numbers at maxage, so is approximate
M_at_age <- match_report_table("Z_AT_AGE_Annual_1", 1,
"-ln(Nt+1", -1,
matchcol2 = 5,
header = TRUE
)
}
if (!is.null(M_at_age)) {
M_at_age[M_at_age == "_"] <- NA
# if birth season is not season 1, you can get infinite values
M_at_age[M_at_age == "-1.#INF"] <- NA
M_at_age <- type.convert(M_at_age, as.is = TRUE)
}
returndat[["M_at_age"]] <- M_at_age
# new section added in SSv3.30.16.03
if (is.na(match_report_line("Report_Z_by_area_morph_platoon"))) {
Z_by_area <- NULL
M_by_area <- NULL
} else {
if (!is.na(match_report_line("Report_Z_by_area_morph_platoon_2"))) {
# format associated with 3.30.19 and beyond (separate tables with/without fishery)
Z_by_area <- match_report_table("Report_Z_by_area_morph_platoon_2",
adjust1 = 1,
header = TRUE,
type.convert = TRUE
)
M_by_area <- match_report_table("Report_Z_by_area_morph_platoon_1",
adjust1 = 1,
adjust2 = -3, # remove 2 lines at end ("Note: Z calculated as -ln(Nt+1 / Nt)")
header = TRUE,
type.convert = TRUE
)
} else {
# format associated with 3.30.16.03 to 3.30.18.00 (tables under common header)
Report_Z_by_area_morph_platoon <-
match_report_table("Report_Z_by_area_morph_platoon",
adjust1 = 1,
header = FALSE
)
Z_by_area <- match_report_table("With_fishery",
adjust1 = 1,
"No_fishery_for_Z=M",
adjust2 = -1,
matchcol1 = 2,
matchcol2 = 2,
obj = Report_Z_by_area_morph_platoon,
header = TRUE,
type.convert = TRUE
)
M_by_area <- match_report_table("No_fishery_for_Z=M",
blank_lines = nrow(Report_Z_by_area_morph_platoon) + 1,
adjust1 = 1,
matchcol1 = 2,
obj = Report_Z_by_area_morph_platoon,
header = TRUE,
type.convert = TRUE
)
}
returndat["Z_by_area"] <- list(Z_by_area)
returndat["M_by_area"] <- list(M_by_area)
}
# Dynamic_Bzero output "with fishery"
Dynamic_Bzero <- match_report_table("Spawning_Biomass_Report_2", 1)
# Dynamic_Bzero output "no fishery"
Dynamic_Bzero2 <- match_report_table("Spawning_Biomass_Report_1", 1)
if (!is.null(Dynamic_Bzero)) {
Dynamic_Bzero <- cbind(Dynamic_Bzero, Dynamic_Bzero2[, -(1:2)])
Dynamic_Bzero <- type.convert(Dynamic_Bzero[-(1:2), ], as.is = TRUE)
# if (nareas == 1 & ngpatterns == 1) { # for simpler models, do some cleanup
if (ncol(Dynamic_Bzero) == 4) {
names(Dynamic_Bzero) <- c("Yr", "Era", "SSB", "SSB_nofishing")
}
if (nareas > 1 & !is.null(ngpatterns) && ngpatterns == 1) { # for spatial models, do some cleanup
names(Dynamic_Bzero) <- c(
"Yr", "Era", paste0("SSB_area", 1:nareas),
paste0("SSB_nofishing_area", 1:nareas)
)
Dynamic_Bzero[["SSB"]] <- apply(Dynamic_Bzero[, 2 + 1:nareas], 1, sum)
Dynamic_Bzero[["SSB_nofishing"]] <-
apply(Dynamic_Bzero[, 2 + nareas + 1:nareas], 1, sum)
}
}
returndat[["Dynamic_Bzero"]] <- Dynamic_Bzero
# adding stuff to list which gets returned by function
if (comp) {
returndat[["comp_data_exists"]] <- TRUE
returndat[["lendbase"]] <- lendbase
returndat[["sizedbase"]] <- sizedbase
returndat[["agedbase"]] <- agedbase
returndat[["condbase"]] <- condbase
returndat[["ghostagedbase"]] <- ghostagedbase
returndat[["ghostcondbase"]] <- ghostcondbase
returndat[["ghostlendbase"]] <- ghostlendbase
returndat[["ladbase"]] <- ladbase
returndat[["wadbase"]] <- wadbase
returndat[["tagdbase1"]] <- tagdbase1
returndat[["tagdbase2"]] <- tagdbase2
returndat[["morphcompdbase"]] <- morphcompdbase
} else {
returndat[["comp_data_exists"]] <- FALSE
}
# tables on fit to comps and mean age stuff from within Report.sso
returndat[["len_comp_fit_table"]] <- fit_len_comps
returndat[["age_comp_fit_table"]] <- fit_age_comps
returndat[["size_comp_fit_table"]] <- fit_size_comps
returndat[["derived_quants"]] <- der
returndat[["parameters"]] <- parameters
returndat[["Dirichlet_Multinomial_pars"]] <- DM_pars
returndat[["FleetNames"]] <- FleetNames
returndat[["repfiletime"]] <- repfiletime
# type of stock recruit relationship
SRRtype <- rawrep[match_report_line("SPAWN_RECRUIT"), 3]
if (!is.na(SRRtype) && SRRtype == "Function:") {
SRRtype <- as.numeric(rawrep[match_report_line("SPAWN_RECRUIT"), 4])
}
returndat[["SRRtype"]] <- SRRtype
# get "sigma" used by Pacific Council in P-star calculations
SSB_final_Label <- paste0("SSB_", endyr + 1)
if (SSB_final_Label %in% der[["Label"]]) {
SSB_final_EST <- der[["Value"]][der[["Label"]] == SSB_final_Label]
SSB_final_SD <- der[["StdDev"]][der[["Label"]] == SSB_final_Label]
returndat[["Pstar_sigma"]] <- sqrt(log((SSB_final_SD / SSB_final_EST)^2 + 1))
} else {
returndat[["Pstar_sigma"]] <- NULL
}
# get alternative "sigma" based on OFL catch used by Pacific Council
# (added 23 Sept 2019 based on decision by PFMC SSC)
OFL_final_Label <- paste0("OFLCatch_", endyr + 1)
if (OFL_final_Label %in% der[["Label"]]) {
OFL_final_EST <- der[["Value"]][der[["Label"]] == OFL_final_Label]
OFL_final_SD <- der[["StdDev"]][der[["Label"]] == OFL_final_Label]
returndat[["OFL_sigma"]] <- sqrt(log((OFL_final_SD / OFL_final_EST)^2 + 1))
} else {
returndat[["OFL_sigma"]] <- NULL
}
if (covar) {
returndat[["CoVar"]] <- CoVar
returndat[["stdtable"]] <- stdtable
}
# extract parameter lines representing annual recruit devs
recdevEarly <- parameters[substring(parameters[["Label"]], 1, 13) == "Early_RecrDev", ]
early_initage <- parameters[substring(parameters[["Label"]], 1, 13) == "Early_InitAge", ]
main_initage <- parameters[substring(parameters[["Label"]], 1, 12) == "Main_InitAge", ]
recdev <- parameters[substring(parameters[["Label"]], 1, 12) == "Main_RecrDev", ]
recdevFore <- parameters[substring(parameters[["Label"]], 1, 8) == "ForeRecr", ]
recdevLate <- parameters[substring(parameters[["Label"]], 1, 12) == "Late_RecrDev", ]
# empty variable to fill in sections
recruitpars <- NULL
# assign "type" label to each one and identify year
if (nrow(early_initage) > 0) {
early_initage[["type"]] <- "Early_InitAge"
early_initage[["Yr"]] <- startyr - as.numeric(substring(early_initage[["Label"]], 15))
recruitpars <- rbind(recruitpars, early_initage)
}
if (nrow(recdevEarly) > 0) {
recdevEarly[["type"]] <- "Early_RecrDev"
recdevEarly[["Yr"]] <- as.numeric(substring(recdevEarly[["Label"]], 15))
recruitpars <- rbind(recruitpars, recdevEarly)
}
if (nrow(main_initage) > 0) {
main_initage[["type"]] <- "Main_InitAge"
main_initage[["Yr"]] <- startyr - as.numeric(substring(main_initage[["Label"]], 14))
recruitpars <- rbind(recruitpars, main_initage)
}
if (nrow(recdev) > 0) {
recdev[["type"]] <- "Main_RecrDev"
recdev[["Yr"]] <- as.numeric(substring(recdev[["Label"]], 14))
recruitpars <- rbind(recruitpars, recdev)
}
if (nrow(recdevFore) > 0) {
recdevFore[["type"]] <- "ForeRecr"
recdevFore[["Yr"]] <- as.numeric(substring(recdevFore[["Label"]], 10))
recruitpars <- rbind(recruitpars, recdevFore)
}
if (nrow(recdevLate) > 0) {
recdevLate[["type"]] <- "Late_RecrDev"
recdevLate[["Yr"]] <- as.numeric(substring(recdevLate[["Label"]], 14))
recruitpars <- rbind(recruitpars, recdevLate)
}
# sort by year and remove any retain only essential columns
if (!is.null(recruitpars)) {
recruitpars <- recruitpars[
order(recruitpars[["Yr"]]),
c("Value", "Parm_StDev", "type", "Yr")
]
}
# add recruitpars to list of stuff that gets returned
returndat[["recruitpars"]] <- recruitpars
if (is.null(recruitpars)) {
sigma_R_info <- NULL
} else {
# calculating values related to tuning SigmaR
sigma_R_info <- data.frame(
period = c("Main", "Early+Main", "Early+Main+Late"),
N_devs = 0,
SD_of_devs = NA,
Var_of_devs = NA,
mean_SE = NA,
mean_SEsquared = NA
)
# calculate recdev stats for Main period
subset <- recruitpars[["type"]] %in% c("Main_InitAge", "Main_RecrDev")
within_period <- sigma_R_info[["period"]] == "Main"
sigma_R_info[["N_devs"]][within_period] <- sum(subset)
sigma_R_info[["SD_of_devs"]][within_period] <- sd(recruitpars[["Value"]][subset])
sigma_R_info[["mean_SE"]][within_period] <- mean(recruitpars[["Parm_StDev"]][subset])
sigma_R_info[["mean_SEsquared"]][within_period] <-
mean((recruitpars[["Parm_StDev"]][subset])^2)
# calculate recdev stats for Early+Main periods
subset <- recruitpars[["type"]] %in% c(
"Early_RecrDev", "Early_InitAge",
"Main_InitAge", "Main_RecrDev"
)
within_period <- sigma_R_info[["period"]] == "Early+Main"
sigma_R_info[["N_devs"]][within_period] <- sum(subset)
sigma_R_info[["SD_of_devs"]][within_period] <- sd(recruitpars[["Value"]][subset])
sigma_R_info[["mean_SE"]][within_period] <- mean(recruitpars[["Parm_StDev"]][subset])
sigma_R_info[["mean_SEsquared"]][within_period] <-
mean((recruitpars[["Parm_StDev"]][subset])^2)
# calculate recdev stats for Early+Main+Late periods
subset <- recruitpars[["type"]] %in% c(
"Early_RecrDev", "Early_InitAge",
"Main_InitAge", "Main_RecrDev", "Late_RecrDev"
)
within_period <- sigma_R_info[["period"]] == "Early+Main+Late"
sigma_R_info[["N_devs"]][within_period] <- sum(subset)
sigma_R_info[["SD_of_devs"]][within_period] <- sd(recruitpars[["Value"]][subset])
sigma_R_info[["mean_SE"]][within_period] <- mean(recruitpars[["Parm_StDev"]][subset])
sigma_R_info[["mean_SEsquared"]][within_period] <-
mean((recruitpars[["Parm_StDev"]][subset])^2)
# add variance as square of SD
sigma_R_info[["Var_of_devs"]] <- sigma_R_info[["SD_of_devs"]]^2
# add sqrt of sum
sigma_R_info[["sqrt_sum_of_components"]] <- sqrt(sigma_R_info[["Var_of_devs"]] +
sigma_R_info[["mean_SEsquared"]])
# ratio of sqrt of sum to sigmaR
sigma_R_info[["SD_of_devs_over_sigma_R"]] <- sigma_R_info[["SD_of_devs"]] / sigma_R_in
sigma_R_info[["sqrt_sum_over_sigma_R"]] <- sigma_R_info[["sqrt_sum_of_components"]] / sigma_R_in
sigma_R_info[["alternative_sigma_R"]] <- sigma_R_in * sigma_R_info[["sqrt_sum_over_sigma_R"]]
# if there's no uncertainty in the recdevs (probably because of -nohess)
# then don't report alternative sigma R values
# could also use [["log_det_hessian"]] as the filter
sigma_R_info[["alternative_sigma_R"]][sigma_R_info[["mean_SE"]] == 0] <- "needs_Hessian"
}
stats[["sigma_R_in"]] <- sigma_R_in
stats[["sigma_R_info"]] <- sigma_R_info
stats[["rmse_table"]] <- rmse_table
stats[["RecDev_method"]] <- RecDev_method
# process adjustments to recruit devs
RecrDistpars <- parameters[substring(parameters[["Label"]], 1, 8) == "RecrDist", ]
returndat[["RecrDistpars"]] <- RecrDistpars
# adding read of wtatage file
returndat[["wtatage"]] <- wtatage
# adding new jitter info table
returndat[["jitter_info"]] <- jitter_info
# add list of stats to list that gets returned
returndat <- c(returndat, stats)
# add info on semi-parametric selectivity deviations
returndat[["seldev_pars"]] <- seldev_pars
returndat[["seldev_matrix"]] <- seldev_matrix
# print list of statistics
if (printstats) {
message("\nStatistics shown below (to turn off, change input to printstats=FALSE)")
# remove scientific notation (only for display, not returned values,
# which were added to returndat already)
stats[["likelihoods_used"]] <- format(stats[["likelihoods_used"]], scientific = 20)
stats[["estimated_non_dev_parameters"]] <- format(stats[["estimated_non_dev_parameters"]],
scientific = 20
)
print(stats)
}
# add log file to list that gets returned
returndat[["logfile"]] <- logfile
# return the inputs to this function so they can be used by SS_plots
# or other functions
inputs <- list()
inputs[["dir"]] <- dir
inputs[["repfile"]] <- repfile
inputs[["forecast"]] <- forecast
inputs[["warn"]] <- warn
inputs[["covar"]] <- covar
inputs[["verbose"]] <- verbose
returndat[["inputs"]] <- inputs
if (verbose) {
message("completed SS_output")
}
invisible(returndat)
} # end function
|
da40b1d3e5d3143f62828d7878edb0899e22af50 | 936382417fd1a2fc071e004a2da206cfba62bff2 | /man/scale_color_vatech.Rd | cef3d3d7ebad0920dc96ef50e11ad53740accfa0 | [] | no_license | McCartneyAC/university | 7f2575868abe45ec87bfefc361aa5a3709f16424 | 7770fe7c9de6226056929c2f7e747d1d6c124fa2 | refs/heads/master | 2021-06-30T08:38:24.535591 | 2020-10-05T18:30:29 | 2020-10-05T18:30:29 | 171,352,272 | 6 | 0 | null | null | null | null | UTF-8 | R | false | true | 329 | rd | scale_color_vatech.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/scale_color_vatech.R
\name{scale_color_vatech}
\alias{scale_color_vatech}
\title{Scale Colors as Virginia Polytechnic}
\usage{
scale_color_vatech(palette = c("vatech"), alpha = 1, ...)
}
\description{
Scale Colors as Virginia Polytechnic
}
|
9e7fe94d5abcb535baf5f9a8575bd9e0fc9b8823 | 40176bf2c09c491aef2492be832fa31b9fc3f4a2 | /helpers/save_as_geotiff.R | a851272e7b3da6e2512d2b22458a994ca8b6ba93 | [] | no_license | ihough/temperature-france | 1b5dadccf0bbcfc801a8b31f7a443c7e1d743867 | 97dec45db3d7b620f8e0da93540064df9ab461e5 | refs/heads/master | 2021-01-19T22:46:41.639349 | 2018-02-21T10:18:58 | 2018-02-21T10:18:58 | 88,866,625 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,096 | r | save_as_geotiff.R | library(raster)
source("helpers/report.R")
save_as_geotiff <- function(extracted_data, grid_pixels, tif_path) {
# Confirm the extracted data is in the same order as the grid pixels
if (identical(grid_pixels$index, extracted_data$index)) {
char_cols <- function(dt) {
cols <- colnames(dt)[sapply(dt, class) == "character"]
# Warn if any character columns are not an id
if (any(!grepl("(^|_)id$", cols))) {
report(paste("Raster cannot contain strings; excluding", cols))
}
cols
}
non_char_cols <- function(dt) {
setdiff(colnames(dt), char_cols(dt))
}
# Add all non-character columns except the grid index to the grid pixels
# Rasters cannot contain strings so character columns would be coerced to NA
cols_to_add <- setdiff(non_char_cols(extracted_data), "index")
grid_pixels@data[cols_to_add] <- extracted_data[, cols_to_add, with = FALSE]
# Convert to a raster and save
writeRaster(stack(grid_pixels), tif_path, overwrite = TRUE)
} else {
stop("extracted_data$index != grid_pixels$index")
}
}
|
9cba95ebb9b1d14149f04c19a3141109050534e4 | 292913980173140e473e5d79159f0a632014d339 | /baser/plot4.R | b826aa4730f0b181d487a2452c8d1f371862116d | [] | no_license | robertncrampton/ExData_Plotting1 | 6b8932dbb6f51e27969f05c249450701b1caff55 | 366d51601c7b774f4296e2cdc6f0305b1f46af2d | refs/heads/master | 2020-12-25T04:53:30.272765 | 2016-02-28T18:39:29 | 2016-02-28T18:39:29 | 52,730,633 | 0 | 0 | null | 2016-02-28T16:23:23 | 2016-02-28T16:23:23 | null | UTF-8 | R | false | false | 979 | r | plot4.R | ##Read the data from the text file into R
hp <- readtable("hp.txt", header = TRUE, sep = ";")
##Subset to only the rows that are over the time period we're looking at
hpnew <- hp[66637:69516,]
##Create a new column that combines the date and Time variables
hpnew$Date.Time <- paste(hpnew$Date, hpnew$Time)
## Format the Date and Time variables to Date and Time
hpnew$Date.Time <- strptime(hpnew$Date.Time, format = "%Y-%m-%d %H:%M:%S)
##Graph Plot
png("plot4.png")
par(mfrow = c(2,2))
plot(hpnew$Date.Time, hpnew$Global_active_power, type = "l", xlab = "", ylab = "Global Active Power (killowats)")
plot(hpnew$Date.Time, hpnew$Voltage, type = "l", xlab = "", ylab = "Global Active Power (killowats)")
plot(hpnew$Date.Time, hpnew$Sub_metering_1, type = "l", xlab = "", ylab = "Energy sub metering")
lines(hpnew$Date.Time, hpnew$Sub_metering_2, col = "red")
lines(hpnew$Date.Time, hpnew$Sub_metering_3, col = "blue")
plot(hpnew$Date.Time, hpnew$Global_reactive_power, type = "l")
|
3895c909d56a866435c696d89c1b0c05e9686de3 | 02ba845f08038f1b0fb6d8d03d2affc6c820a7a3 | /man/subset_focals.Rd | 578d63992fccc289bf3210b374f0bbacac202a25 | [] | no_license | amboseli/ramboseli | 21b238dd61b2f1d63b3e19abaec2cb6ea58eb2af | 2b0bcc264d08f0dfef3f97e801382dee6da78f8b | refs/heads/master | 2023-03-16T06:31:27.022772 | 2021-03-12T01:14:11 | 2021-03-12T01:14:11 | 103,771,434 | 0 | 3 | null | null | null | null | UTF-8 | R | false | true | 621 | rd | subset_focals.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/biographical-data.R
\name{subset_focals}
\alias{subset_focals}
\title{Obtain a subset of adult female focal samples that excludes behavioral observation gaps.}
\usage{
subset_focals(babase, members_l)
}
\arguments{
\item{babase}{A DBI connection to the babase database}
\item{members_l}{A subset of members table produced by the function 'subset_members'}
}
\value{
A subset of focal samples that excludes behavioral observation gaps.
}
\description{
Obtain a subset of adult female focal samples that excludes behavioral observation gaps.
}
|
7dd3d9b2a3d72dbdc48d524618c4a61c466c4aa7 | 9262e777f0812773af7c841cd582a63f92d398a4 | /inst/userguide/figures/CS7--Cs25_prep-covariates.R | bd93d3f82e986b6321ca9ecf2919c7d287ef3004 | [
"CC0-1.0",
"LicenseRef-scancode-public-domain"
] | permissive | nwfsc-timeseries/MARSS | f0124f9ba414a28ecac1f50c4596caaab796fdd2 | a9d662e880cb6d003ddfbd32d2e1231d132c3b7e | refs/heads/master | 2023-06-07T11:50:43.479197 | 2023-06-02T19:20:17 | 2023-06-02T19:20:17 | 438,764,790 | 1 | 2 | NOASSERTION | 2023-06-02T19:17:41 | 2021-12-15T20:32:14 | R | UTF-8 | R | false | false | 368 | r | CS7--Cs25_prep-covariates.R | ###################################################
### code chunk number 28: Cs25_prep-covariates
###################################################
# transpose to make time go across columns
# drop=FALSE so that R doesn't change our matrix to a vector
phos <- t(log(ivesDataByWeek[, "Phosph", drop = FALSE]))
d.phos <- (phos - apply(phos, 1, mean, na.rm = TRUE))
|
3ad4c843d04c2e078032081aa21eda7be3544139 | a85e536f8cbe2af99fab307509920955bd0fcf0a | /R/mplot.R | cbd70ca46963f969dc83c62db7b569fb8d130758 | [] | no_license | ProjectMOSAIC/mosaic | 87ea45d46fb50ee1fc7088e42bd35263e3bda45f | a64f2422667bc5f0a65667693fcf86d921ac7696 | refs/heads/master | 2022-12-13T12:19:40.946670 | 2022-12-07T16:52:46 | 2022-12-07T16:52:46 | 3,154,501 | 71 | 27 | null | 2021-02-17T21:52:00 | 2012-01-11T14:58:31 | HTML | UTF-8 | R | false | false | 25,293 | r | mplot.R | utils::globalVariables(c('pair','lwr','upr','fitted','.resid',
'.std.resid', '.cooksd', '.fitted',
'lower', 'upper',
'fcoef', 'density', 'probability',
'.hat', 'grid.arrange', 'estimate','se'))
#' @importFrom ggplot2 fortify
#' @importFrom stats qqnorm
# #' @importFrom broom augment
#'
NA
#' Generic plotting
#'
#' Generic function plotting for R objects. Currently plots exist for
#' `data.frame`s, `lm`s, (including `glm`s).
#'
#' @rdname mplot
#' @param object an R object from which a plot will be constructed.
#' @param data_text text representation of the data set. In typical use cases, the default value should suffice.
#' @export
mplot <- function(object, ...) {
if (inherits(object, "data.frame")) {
return(mPlot(object, ..., data_text = rlang::expr_deparse(substitute(object)))) # substitute(object)))
}
UseMethod("mplot")
}
#' @rdname mplot
#' @export
mplot.default <- function(object, ...) {
message("mplot() doesn't know how to handle this kind of input.")
message('use methods("mplot") to see a list of available methods.')
}
#' @rdname mplot
#' @param data a data frame containing the variables that might be used in the plot.
# Note that for maps, the data frame must contain coordinates of the polygons
# comprising the map and a variable for determining which coordinates are part
# of the same region. See \code{\link{sp2df}} for one way to create such
# a data frame. Typically \code{\link{merge}} will be used to combine the map
# data with some auxiliary data to be displayed as fill color on the map, although
# this is not necessary if all one wants is a map.
#' @param format,default default type of plot to create; one of
#' `"scatter"`,
#' `"jitter"`,
#' `"boxplot"`,
#' `"violin"`,
#' `"histogram"`,
#' `"density"`,
#' `"frequency polygon"`,
#' or
# \code{"xyplot"}.
# or
#' `"map"`.
#' Unique prefixes suffice.
#' @param system which graphics system to use (initially) for plotting (\pkg{ggplot2}
#' or \pkg{lattice}). A check box will allow on the fly change of plotting system.
#' @param show a logical, if `TRUE`, the code will be displayed each time the plot is
#' changed.
#' @return Nothing. Just for side effects.
#' @param which a numeric vector used to select from 7 potential plots
#' @param ask if TRUE, each plot will be displayed separately after the user
#' responds to a prompt.
#' @param multiplot if TRUE and `ask == FALSE`, all plots will be
#' displayed together.
#' @param title title for plot
#' @param ... additional arguments. If `object` is an `lm`, subsets
#' of these arguments are passed to `gridExtra::grid.arrange` and to the
#' \pkg{lattice} plotting routines; in particular,
#' `nrow` and `ncol` can be used to control the number of rows
#' and columns used.
#' @param id.nudge a numeric used to increase (>1) or decrease (<1) the amount that observation labels are
#' nudged. Use a negative value to nudge down instead of up.
#' @param id.n Number of id labels to display.
#' @param id.size Size of id labels.
#' @param id.color Color of id labels.
#' @param add.smooth A logicial indicating whether a LOESS smooth should be added
#' (where this makes sense to do).
#' Currently ignored for lattice plots.
#' @param span A positive number indicating the amount of smoothing.
#' A larger number indicates more smoothing. See [`stats::loess()`] for details.
#' Currently ignored for lattice plots.
#' @param smooth.color,smooth.size,smooth.alpha Color, size, and alpha used for
#' LOESS curve. Currently ignored for lattice plots.
#' @details
#' The method for models (lm and glm) is still a work in progress, but should be usable for
#' relatively simple models. When the results for a logistic regression model created with
#' [glm()] are satisfactory will depend on the format and structure of the data
#' used to fit the model.
#'
#' Due to a bug in RStudio 1.3, the method for data frames may not display the controls
#' consistently. We have found that executing this code usually fixes the problem:
#'
#' ```
#' library(manipulate)
#' manipulate(plot(A), A = slider(1, 10))
#' ```
#'
#'
#' @examples
#' lm( width ~ length * sex, data = KidsFeet) %>%
#' mplot(which = 1:3, id.n = 5)
#' lm( width ~ length * sex, data = KidsFeet) %>%
#' mplot(smooth.color = "blue", smooth.size = 1.2, smooth.alpha = 0.3, id.size = 3)
#' lm(width ~ length * sex, data = KidsFeet) %>%
#' mplot(rows = 2:3, which = 7)
# #' @importFrom ggrepel geom_text_repel
#' @export
mplot.lm <-
function(
object,
which = c(1:3, 7),
system = c("ggplot2", "lattice", "base"),
ask = FALSE,
multiplot = "package:gridExtra" %in% search(),
par.settings = theme.mosaic(),
level = .95,
title = paste("model: ", deparse(object$call), "\n"),
rows = TRUE,
id.n = 3L,
id.size = 5,
id.color = "red",
id.nudge = 1,
add.smooth = TRUE,
smooth.color = "red",
smooth.alpha = 0.6,
smooth.size = 0.7,
span = 3/4,
...) {
system <- match.arg(system)
check_installed('ggrepel')
geom_smooth_or_not <-
if (add.smooth)
geom_line(stat = "smooth", method = "loess", span = span,
alpha = smooth.alpha, color = smooth.color, size = smooth.size)
else
geom_blank()
dots <- list(...)
if ("col" %in% names(dots)) {
dots$col <- dots$col[1]
}
if (multiplot && ! "package:gridExtra" %in% search()) {
message("multiplot = TRUE only works when 'gridExtra' is loaded.")
message(" I'm setting multiplot = FALSE and continuing.")
multiplot <- FALSE
}
if (system == "base") {
return(plot( object, which = intersect(which, 1:6)))
}
rlang::check_installed('broom')
fdata <- broom::augment(object)
fdata <-
fdata %>%
mutate(
.row = 1L:nrow(fdata)
)
# broom::augment() does always supply .resid :-/
if (is.null(fdata[[".resid"]])) {
fdata <- fdata %>% mutate(.resid = resid(object))
}
fdata_clean <- fdata %>% filter(!is.na(.std.resid))
removed_idx <- which(fdata$.hat >= 1)
if (any(c(2, 3, 5, 6) %in% which) && length(removed_idx)) {
warning("Observations with leverage 1 not plotted: ",
paste(removed_idx, collapse = ", "),
call. = FALSE)
}
# fdata <- cbind(fdata, row = 1:nrow(fdata))
if (!inherits(object, "lm"))
stop("use only with \"lm\" objects")
if (!is.numeric(which) || any(which < 1) || any(which > 7))
stop("'which' must be in 1:7")
isGlm <- inherits(object, "glm")
show <- rep(FALSE, 7)
show[which] <- TRUE
ylab23 <- if (isGlm)
"Std. deviance resid."
else "Standardized residuals"
# residuals vs fitted
g1 <- ggplot(fdata, aes(.fitted, .resid)) +
geom_point() +
geom_smooth_or_not +
geom_hline(linetype = 2, size = .2, yintercept = 0) +
ggrepel::geom_text_repel(
data = fdata %>% arrange(-abs(.std.resid)) %>% head(id.n),
aes(label = .row),
color = id.color,
segment.color = id.color,
size = id.size) +
scale_x_continuous("Fitted Value") +
scale_y_continuous("Residual") +
labs(title = "Residuals vs Fitted")
l1 <- do.call(xyplot,
c(list( .std.resid ~ .fitted, data = fdata,
type = c("p","smooth"),
panel = function(x,y,...) {
panel.abline(h = 0, linetype = 2, lwd = .5)
panel.xyplot(x,y,...)
},
main = "Residuals vs Fitted",
xlab = "Fitted Value",
ylab = "Residual",
par.settings = par.settings),
dots)
)
# normal qq
# remove NAs and NaNs before computing quantiles
a <- quantile(fdata$.std.resid, c(0.25, 0.75), na.rm = TRUE)
b <- qnorm(c(0.25, 0.75))
slope <- diff(a)/diff(b)
int <- a[1] - slope * b[1]
QN <-
as.data.frame(qqnorm(fdata$.std.resid, plot.it = FALSE)) %>%
mutate(.row = 1:nrow(fdata))
g2 <- ggplot(fdata_clean, aes(sample = .std.resid)) +
stat_qq() +
geom_abline(slope = slope, intercept = int, linetype = "dashed") +
ggrepel::geom_text_repel(
inherit.aes = FALSE,
data = QN %>% arrange(-abs(y)) %>% head(id.n),
aes(y = y, x = x, label = .row),
color = id.color,
segment.color = id.color,
size = id.size) +
scale_x_continuous("Theoretical Quantiles") +
scale_y_continuous("Standardized Residuals") +
labs(title = "Normal Q-Q")
l2 <- do.call(qqmath,
c(list( ~ .std.resid, data = fdata_clean,
panel = function(x,...) {
panel.abline(a = int, b = slope)
panel.qqmath(x,...)
},
main = "Normal Q-Q",
xlab = "Theoretical Quantiles",
ylab = ylab23,
par.settings = par.settings),
dots)
)
# scale-location
g3 <- ggplot(fdata_clean, aes(.fitted, sqrt(abs(.std.resid)))) +
geom_point() +
geom_smooth_or_not +
ggrepel::geom_text_repel(
data = fdata_clean %>% arrange(-abs(.std.resid)) %>% head(id.n),
aes(label = .row),
color = id.color,
segment.color = id.color,
size = id.size) +
scale_x_continuous("Fitted Values") +
scale_y_continuous(as.expression(
substitute(sqrt(abs(YL)), list(YL = as.name(ylab23))) )) +
labs(title = "Scale-Location")
l3 <- do.call(xyplot,
c(list( sqrt(abs(.std.resid)) ~ .fitted, data = fdata_clean,
type = c("p","smooth"),
main = "Scale-Location",
xlab = "Fitted Value",
ylab = as.expression(
substitute(sqrt(abs(YL)), list(YL = as.name(ylab23)))
),
par.settings = par.settings),
dots)
)
# cook's distance
g4 <-
ggplot(data = fdata, aes(.row, .cooksd, ymin = 0, ymax = .cooksd)) +
geom_point() +
geom_linerange() +
scale_x_continuous("Observation Number", limits = c(0, NA)) +
scale_y_continuous("Cook's distance") +
labs(title = "Cook's Distance")
if (id.n > 0L) {
g4 <- g4 +
ggrepel::geom_text_repel(
data = fdata %>% arrange(-abs(.cooksd)) %>% head(id.n),
aes(x = .row, y = .cooksd, label = .row),
color = id.color,
segment.color = id.color,
size = id.size)
}
l4 <- do.call( xyplot,
c(list( .cooksd ~ .row, data = fdata,
type = c("p","h"),
main = "Cook's Distance",
xlab = "Observation number",
ylab = "Cook's distance",
par.settings = par.settings),
dots)
)
# residuals vs leverage
g5 <-
ggplot(fdata_clean, aes(x = .hat, y = .std.resid)) +
geom_point() +
geom_smooth_or_not +
ggrepel::geom_text_repel(
data = fdata_clean %>% arrange(-abs(.std.resid)) %>% head(id.n),
aes(label = .row),
color = id.color,
segment.color = id.color,
size = id.size) +
geom_hline(linetype = 2, size = .2, yintercept = 0) +
labs(title = "Residuals vs Leverage",
x = "Leverage",
y = "Standardized Residual") +
lims(x = c(0, NA))
l5 <- do.call( xyplot,
c(list( .std.resid ~ .hat, data = fdata_clean,
type = c('p','smooth'),
panel = function(x,y,...) {
panel.abline( h = 0, lty = 2, lwd = .5)
panel.xyplot( x, y, ...)
},
main = "Residuals vs Leverage",
xlab = "Leverage",
ylab = "Standardized Residuals",
par.settings = par.settings),
dots)
)
# cooksd vs leverage
g6 <- ggplot(fdata_clean, aes(.hat, .cooksd)) +
geom_point() +
geom_smooth_or_not +
ggrepel::geom_text_repel(
data = fdata_clean %>% arrange(-abs(.std.resid)) %>% head(id.n),
aes(label = .row),
color = id.color,
segment.color = id.color,
size = id.size) +
scale_x_continuous("Leverage") +
scale_y_continuous("Cook's distance") +
labs(title = "Cook's dist vs Leverage")
l6 <- do.call(xyplot,
c(list( .cooksd ~ .hat, data = fdata_clean,
type = c("p", "smooth"),
main = "Cook's dist vs Leverage",
xlab = "Leverage",
ylab = "Cook's distance",
par.settings = par.settings),
dots)
)
g7 <- mplot(summary(object), level = level, rows = rows, ..., system = "ggplot2")
l7 <- mplot(summary(object), level = level, rows = rows, ..., system = "lattice")
plots <- if (system == "ggplot2") {
list(g1, g2, g3, g4, g5, g6, g7)
} else {
lapply( list(l1, l2, l3, l4, l5, l6, l7),
function(x) update(x, par.settings = par.settings))
}
plots <- plots[which]
if (ask) {
for (p in plots) {
readline("Hit <RETURN> for next plot")
print(p)
}
}
if (multiplot) {
rlang::check_installed('gridExtra')
dots <- list(...)
nn <- intersect(
union(names(formals(gridExtra::arrangeGrob)), names(formals(grid.layout))),
names(dots)
)
dots <- dots[ nn ]
return(do.call(gridExtra::grid.arrange, c(plots, dots)))
result <- do.call(
gridExtra::arrangeGrob,
c(plots, dots) # , c(list(main = title), dots))
)
plot(result)
return(result)
}
# Question: should a single plot be returned as is or in a list of length 1?
if (length(plots) == 1) {
return(plots[[1]])
}
return(plots)
}
#' @rdname mplot
#' @examples
#' \dontrun{
#' mplot( HELPrct )
#' mplot( HELPrct, "histogram" )
#' }
#' @export
mplot.data.frame <-
function(
object, format, default = format,
system = c("ggformula", "ggplot2", "lattice"),
show = FALSE,
data_text = rlang::expr_deparse(substitute(object)),
# data_text = substitute(object),
title = "", ...
) {
print(data_text)
return(
mPlot(object, format = format, default = default, system = system,
show = show, title = title, data_text = data_text, ...)
)
}
# plotTypes <- c('scatter', 'jitter', 'boxplot', 'violin', 'histogram',
# 'density', 'frequency polygon', 'xyplot')
# if (missing(default) & missing(format)) {
# choice <-
# menu(title = "Choose a plot type.",
# choices = c(
# "1-variable (histogram, density plot, etc.)",
# "2-variable (scatter, boxplot, etc.)"
# )
# )
# default <- c("histogram", "scatter") [choice]
# }
# default <- match.arg(default, plotTypes)
# system <- match.arg(system)
#
# dataName <- substitute(object)
# if (default == "xyplot")
# default <- "scatter"
# if (default %in% c("scatter", "jitter", "boxplot", "violin")) {
# return(
# mScatter(lazy_data, default = default, system = system, show = show, title = title)
# )
# }
# # if (default == "map") {
# # return(eval(parse(
# # text = paste("mMap(", dataName,
# # ", default = default, system = system, show = show, title = title)"))
# # ))
# # }
# return(eval(parse(
# text = paste("mUniplot(", dataName,
# ", default = default, system = system, show = show, title = title)"))
# ))
# }
#' Extract data from R objects
#'
#' @rdname fortify
#' @param level confidence level
#' @param ... additional arguments
#' @export
fortify.summary.lm <- function(model, data = NULL, level = 0.95, ...) {
E <- as.data.frame(coef(model, level = level))
# grab only part of the third name that comes before space
statName <- strsplit(names(E)[3], split = " ")[[1]][1]
names(E) <- c("estimate", "se", "stat", "pval")
# add coefficient names to data frame
E$coef <- row.names(E)
E$statName <- statName
E$lower <- confint(model, level = level, ...)[,1]
E$upper <- confint(model, level = level, ...)[,2]
E$level <- level
return(E)
}
#' @rdname fortify
#' @export
fortify.summary.glm <- function(model, data = NULL, level = 0.95, ...) {
E <- as.data.frame(coef(model, level = level))
# grab only part of the third name that comes before space
statName <- strsplit(names(E)[3], split = " ")[[1]][1]
names(E) <- c("estimate", "se", "stat", "pval")
# add coefficient names to data frame
E$coef <- row.names(E)
E$statName <- statName
E <- mutate(E,
lower = estimate + qnorm((1-level)/2) * se,
upper = estimate + qnorm(1-(1-level)/2) * se,
level = level)
return(E)
}
#' @rdname confint
#' @param object and R object
#' @param parm a vector of parameters
#' @param level a confidence level
#' @examples
#' lm(width ~ length * sex, data = KidsFeet) %>%
#' summary() %>%
#' confint()
#' @export
confint.summary.lm <- function (object, parm, level = 0.95, ...) {
cf <- coef(object)[, 1]
pnames <- names(cf)
if (missing(parm))
parm <- pnames
else if (is.numeric(parm))
parm <- pnames[parm]
a <- (1 - level)/2
a <- c(a, 1 - a)
fac <- qt(a, object$df[2])
pct <- paste( format(100*a, digits = 3, trim = TRUE, scientific = FALSE), "%" )
ci <- array(NA, dim = c(length(parm), 2L), dimnames = list(parm,
pct))
ses <- sqrt(diag(vcov(object)))[parm]
ci[] <- cf[parm] + ses %o% fac
ci
}
#' @rdname mplot
#' @param level a confidence level
#' @param par.settings \pkg{lattice} theme settings
#' @param rows rows to show. This may be a numeric vector,
#' `TRUE` (for all rows), or a character vector of row names.
#' @examples
#' lm(width ~ length * sex, data = KidsFeet) %>%
#' summary() %>%
#' mplot()
#'
#' lm(width ~ length * sex, data = KidsFeet) %>%
#' summary() %>%
#' mplot(rows = c("sex", "length"))
#'
#' lm(width ~ length * sex, data = KidsFeet) %>%
#' summary() %>%
#' mplot(rows = TRUE)
#' @export
mplot.summary.lm <- function(object,
system = c("ggplot2", "lattice"),
level = 0.95,
par.settings = trellis.par.get(),
rows = TRUE,
...){
system <- match.arg(system)
fdata <- fortify(object, level = level) %>%
mutate(signif = pval < (1-level),
fcoef = factor(coef, levels = coef)
)
row.names(fdata) <- fdata$coef
fdata <- fdata[rows, ]
fdata <- fdata[nrow(fdata):1, ]
g <- ggplot(data = fdata,
aes(x = fcoef, y = estimate,
ymin = lower, ymax = upper,
color = signif)) + # (pval < (1-level)/2))) +
geom_pointrange(size = 1.2) +
geom_hline(yintercept = 0, color = "red", alpha = .5, linetype = 2) +
labs(x = "coefficient", title = paste0(format(100*level), "% confidence intervals") ) +
theme(legend.position = "none") +
coord_flip()
cols <- rep( par.settings$superpose.line$col, length.out = 2)
cols <- cols[2 - fdata$signif]
l <- xyplot( fcoef ~ estimate + lower + upper,
data = fdata,
fdata = fdata,
xlab = "estimate",
ylab = "coefficient",
main = paste0(format(100 * level), "% confidence intervals"),
...,
panel = function(x, y, fdata, ...) {
dots <- list(...)
if ("col" %in% names(dots)) {
dots$col <- rep(dots$col, length.out = 2) [2 - fdata$signif]
}
dots <- .updateList(
list(lwd = 2, alpha = 0.6, cex = 1.4, col = cols),
dots
)
dots[["type"]] <- NULL
panel.abline(v = 0, col = "red", alpha = .5, lty = 2)
do.call( panel.points,
c( list (x = fdata$estimate, y = y),
dots )
)
do.call( panel.segments,
c( list(y0 = y, y1 = y, x0 = fdata$lower,
x1 = fdata$upper),
dots )
)
}
)
if (system == "ggplot2") {
return(g)
} else {
return(l)
}
}
#' @export
mplot.summary.glm <- mplot.summary.lm
#' @rdname fortify
#' @param model an R object
#' @param data original data set, if needed
#' @param order one of `"pval"`, `"diff"`, or `"asis"` determining the
#' order of the `pair` factor, which determines the order in which the differences
#' are displayed on the plot.
#' @export
# fortify.TukeyHSD <- function(model, data, ...) {
# nms <- names(model)
# l <- length(model)
# plotData <- do.call(
# rbind,
# lapply(seq_len(l), function(i) {
# res <- transform(as.data.frame(model[[i]]),
# var = nms[[i]],
# pair = row.names(model[[i]]) )
# } )
# )
# names(plotData) <- c("diff", "lwr", "upr", "pval", "var", "pair")
# return(plotData)
# }
fortify.TukeyHSD <- function(model, data, order = c("asis", "pval", "difference"), ...) {
order <- match.arg(order)
nms <- names(model)
l <- length(model)
plotData <- do.call(
rbind,
lapply(seq_len(l), function(i) {
res <- transform(as.data.frame(model[[i]]),
var = nms[[i]],
pair = row.names(model[[i]]) )
} )
)
names(plotData) <- c("diff", "lwr", "upr", "pval", "var", "pair")
plotData <-
plotData %>%
mutate(pair =
switch(order,
"asis" = reorder(pair, 1:nrow(plotData)),
"pval" = reorder(pair, pval),
"difference" = reorder(pair, diff),
)
)
return(plotData)
}
#' @rdname mplot
#' @param xlab label for x-axis
#' @param ylab label for y-axis
#' @param order one of `"pval"`, `"diff"`, or `"asis"` determining the
#' order of the `pair` factor, which determines the order in which the differences
#' are displayed on the plot.
#' @examples
#' lm(age ~ substance, data = HELPrct) %>%
#' TukeyHSD() %>%
#' mplot()
#' lm(age ~ substance, data = HELPrct) %>%
#' TukeyHSD() %>%
#' mplot(system = "lattice")
#' @export
mplot.TukeyHSD <- function(object, system = c("ggplot2", "lattice"),
ylab = "", xlab = "difference in means",
title = paste0(attr(object, "conf.level") * 100, "% family-wise confidence level"),
par.settings = trellis.par.get(),
order = c("asis", "pval", "difference"),
# which = 1L:2L,
...) {
system <- match.arg(system)
order <- match.arg(order)
fdata <- fortify(object, order = order)
# res <- list()
if (system == "ggplot2") {
# if (1 %in% which) {
p1 <-
ggplot( data = fdata,
aes(x = diff, color = log10(pval), y = factor(pair, levels = rev(levels(pair)))) ) +
geom_point(size = 2) +
geom_segment(aes(x = lwr, xend = upr, y = pair, yend = pair) ) +
geom_vline( xintercept = 0, color = "red", linetype = 2, alpha = .5 ) +
facet_grid( var ~ ., scales = "free_y") +
labs(x = xlab, y = ylab, title = title)
# res <- c(res, list(p1))
# }
# if (2 %in% which) {
# p2 <-
# ggplot( data = fdata,
# aes(x = diff, color = log10(pval), y = factor(pair, levels = rev(levels(pair)))) ) +
# geom_point(size = 2) +
# geom_segment(aes(x = lwr, xend = upr, y = pair, yend = pair) ) +
# geom_vline( xintercept = 0, color = "red", linetype = 2, alpha = .5 ) +
# facet_grid( var ~ ., scales = "free_y") +
# labs(x = xlab, y = ylab, title = title)
# res <- c(res, list(p2))
# }
return(p1)
}
cols <- par.settings$superpose.line$col[1 +
as.numeric( sign(fdata$lwr) * sign(fdata$upr) < 0)]
xyplot( factor(pair, levels = rev(levels(pair))) ~ diff + lwr + upr | var, data = fdata,
panel = function(x,y,subscripts,...) {
n <- length(x)
m <- round(n/3)
panel.abline(v = 0, col = "red", lty = 2, alpha = .5)
panel.segments(x0 = x[(m+1):(2*m)], x1 = x[(2*m+1):(3*m)], y0 = y, y1 = y, col = cols[subscripts])
panel.xyplot(x[1:m], y, cex = 1.4, pch = 16, col = cols[subscripts])
},
scales = list( y = list(relation = "free", rot = 30) ),
xlab = xlab,
ylab = ylab,
main = title,
...
)
}
|
b7201a8f81aff1414c14247bacb250dc62e26088 | a18669233ba8da5bb14eff99a7efb49d0822f1e4 | /man/li_powell.Rd | b326d547ca851b88ef9512f201950c76d6cc04c7 | [] | no_license | cran/AdjBQR | dd4ffecc9cbf7432cf08414a430f5c78dd243b48 | 892f239ff2cbf52a22e151fc654fda1c916fdf7f | refs/heads/master | 2021-01-13T12:46:49.517358 | 2016-10-30T16:58:03 | 2016-10-30T16:58:03 | 72,359,434 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,226 | rd | li_powell.Rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/AdjBQR.R
\name{li_powell}
\alias{li_powell}
\title{Asymmetric-Laplace-type Working Likelihood For Cenaored Quantile Regression}
\usage{
li_powell(pars, y, x, tau, sig)
}
\arguments{
\item{pars}{regression coefficient vector}
\item{y}{the response vector}
\item{x}{the design matrix with one in the first column corresponding to the intercept}
\item{tau}{the quantile level}
\item{sig}{scale parameter sigma}
}
\value{
the working log (asymmetric Laplace-type) likelihood function (the part involving the regression coefficients)
}
\description{
Asymmetric-Laplace-type working likelihood for linear
quantile regression with responses
subject to left censoring at zero
}
\details{
The asymmetric-Laplace-type working likelihood is proportional to exponential
of the negative Powell objective function for censored quantile regression
}
\references{
Powell, J. L. (1986). Censored regression quantiles. Journal of Econometrics, 32, 143-155.
Yang, Y., Wang, H. and He, X. (2015). Posterior inference in Bayesian quantile regression with asymmetric Laplace
likelihood. International Statistical Review, 2015. doi: 10.1111/insr.12114.
}
|
dd7600e274724778f35d1cbb6a6e368c765f98eb | 862c4bca74786b462929176b28f2f54c4021c5ec | /man/compute.atac.network.Rd | 6e7fad2bc6cfaf999e21031d17bd60a367a7bb81 | [] | no_license | iaconogi/bigSCale2 | 1d94d232781f08e28ee2a0c43214798a10cc9301 | e47f0cd4b6374e5bcc52d99f4c50d0671aada811 | refs/heads/master | 2023-07-06T21:52:31.163393 | 2020-07-12T08:52:34 | 2020-07-12T08:52:34 | 169,756,139 | 109 | 42 | null | 2023-07-03T12:18:59 | 2019-02-08T15:31:16 | R | UTF-8 | R | false | true | 3,621 | rd | compute.atac.network.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Functions.R
\name{compute.atac.network}
\alias{compute.atac.network}
\title{ATAC-seq Gene regulatory network}
\usage{
compute.atac.network(expr.data, feature.file, quantile.p = 0.998)
}
\arguments{
\item{expr.data}{matrix of expression counts. Works also with sparse matrices of the \pkg{Matrix} package.}
\item{quantile.p}{only the first \eqn{1 - quantile.p} correlations are used to create the edges of the network. If the networ is too sparse(dense) decrease(increase) \eqn{quantile.p}}
\item{gene.names}{character of gene names, now it supports Gene Symbols or Ensembl, Mouse and Human.}
\item{clustering}{type of clustering and correlations computed to infer the network.
\itemize{
\item {\bold{recursive}} Best quality at the expenses of computational time. If the dataset is larger than 10-15K cells and is highly heterogeneous this can lead to very long computational times (24-48 hours depending of the hardware).
\item {\bold{direct}} Best trade-off between quality and computational time. If you want to get a quick output not much dissimilar from the top quality of \bold{recursive} one use this option. Can handle quickly also large datasets (>15-20K cells in 30m-2hours depending on hardware)
\item {\bold{normal}} To be used if the correlations (the output value \bold{cutoff.p}) detected with either \bold{direct} or \bold{recursive} are too low. At the moment, bigSCale displays a warning if the correlation curoff is lower than 0.8 and suggests to eithe use \bold{normal} clustering or increase the input parameter \bold{quantile.p}
}}
\item{speed.preset}{Used only if \code{clustering='recursive'} . It regulates the speed vs. accuracy of the Zscores calculations. To have a better network quality it is reccomended to use the default \bold{slow}.
\itemize{
\item {\bold{slow}} {Highly reccomended, the best network quality but the slowest computational time.}
\item {\bold{normal}} {A balance between network quality and computational time. }
\item {\bold{fast}} {Fastest computational time, worste network quality.}
}}
\item{previous.output}{previous output of \code{compute.network()} can be passed as input to evaluate networks with a different quantile.p without re-running the code. Check the online tutorial at https://github.com/iaconogi/bigSCale2.}
}
\value{
A list with the following items:
\itemize{
\item {\bold{centrality}} {Main output: a Data-frame with the network centrality (Degree,Betweenness,Closeness,PAGErank) for each gene(node) of the network}
\item {\bold{graph}} {The regulatory network in iGraph object}
\item {\bold{correlations}} {All pairwise correlations between genes. The correlation is an average between \emph{Pearson} and \emph{Spearman}. Note that it is stored in single precision format (to save memory space) using the package \pkg{float32}.To make any operation or plot on the correlations first transform it to the standard double precisione by running \code{correlations=dbl(correlations)} }
\item {\bold{cutoff.p}} {The adptive cutoff used to select significant correlations}
\item {\bold{tot.scores}} {The Z-scores over which the correlations are computed. The visually check the correlation between to genes \emph{i} and \emph{j} run \code{plot(tot.scores[,i],tot.scores[,j])} }
\item {\bold{clusters}} {The clusters in which the cells have been partitioned}
\item {\bold{model}} {Bigscale numerical model of the noise}
}
}
\description{
Infers the gene regulatory network from single cell ATAC-seq data
}
\examples{
out=compute.network(expr.data,gene.names)
}
|
c7176532d70ba4c93f9e2927930ce14f26efe93e | eb59d9f92cd907aaad4881992f323cc1529b39fa | /R/testing_prior.R | 31f58483494aa5592bed17fc636d7f46b6c9bb98 | [] | no_license | cran/TeachBayes | a20021b9fd698140185aeb5916a9f5e42b901826 | 47f45100474dd8e8288b06386ca91a16288b5922 | refs/heads/master | 2021-01-11T22:12:37.681770 | 2017-03-25T09:58:44 | 2017-03-25T09:58:44 | 78,935,724 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 460 | r | testing_prior.R | testing_prior <- function(lo=.1, hi=.9, n_values=9,
pequal=0.5, uniform=FALSE){
p1 <- seq(lo, hi, length = n_values)
p2 <- p1
n_diagonal <- n_values
n_off_diag <- n_values ^ 2 - n_values
prior <- matrix(0, n_values, n_values) +
(1 - pequal) / n_off_diag
diag(prior) <- pequal / n_values
if(uniform==TRUE)
prior <- 0 * prior + 1 / n_values ^ 2
dimnames(prior)[[1]] <- p1
dimnames(prior)[[2]] <- p2
prior
}
|
dbd0ccaf610cb2b5501f0569639b563719054b70 | b3c39d9bc7cdd82f225cc1707c69c55513519a1d | /R/zzz.R | b3cdb1144f34d877694a16f84b6123277929a44f | [] | no_license | teyden/MiRKC | d81e02a0e2b349635faea46102b5ee69fbafe740 | 7de32668537ff68d7cbebadafb8d70a338525de9 | refs/heads/master | 2020-12-27T09:21:48.191058 | 2020-06-21T08:22:56 | 2020-06-21T08:22:56 | 237,850,361 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 81 | r | zzz.R | .onLoad <- function(libname, pkg){
library.dynam("MiRKC", pkg, libname)
}
|
228da81481d799db89342c5908c0818aeb697a3a | 9bbdcb3936c5063edf237fe550fba4f5bf0a9b49 | /man/cpBodyGetCenterOfGravity.Rd | 6b53ee54e6945f47bec90da83cbb453e0b5a3e1e | [
"MIT"
] | permissive | coolbutuseless/chipmunkcore | b2281f89683e0b9268f26967496f560ea1b5bb99 | 97cc78ad3a68192f9c99cee93203510e20151dde | refs/heads/master | 2022-12-10T17:56:15.459688 | 2020-09-08T22:40:10 | 2020-09-08T22:40:10 | 288,990,789 | 17 | 1 | null | null | null | null | UTF-8 | R | false | true | 528 | rd | cpBodyGetCenterOfGravity.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cpBody.R
\name{cpBodyGetCenterOfGravity}
\alias{cpBodyGetCenterOfGravity}
\title{Get the offset of the center of gravity in body local coordinates.}
\usage{
cpBodyGetCenterOfGravity(body)
}
\arguments{
\item{body}{[\code{cpBody *}]}
}
\value{
[\code{cpVect *}]
}
\description{
Get the offset of the center of gravity in body local coordinates.
}
\details{
C function prototype: \code{CP_EXPORT cpVect cpBodyGetCenterOfGravity(const cpBody *body);}
}
|
d08617fe3a502c0df03652b9acd872c4818a723e | 75c27c5ae72919555352c9b7585723f530edc096 | /manual/KSSL-snapshot-tutorial.R | 3ff12d8d366341330f48d65fa175547f54da4849 | [] | no_license | ncss-tech/lab-data-delivery | d641944c9dc27639e2d30440e7d8d1a3f5419ac7 | a1f22d17a5aa610e30bd256d9ba6bbefe1969e34 | refs/heads/master | 2023-07-28T03:27:36.045632 | 2023-07-08T02:45:05 | 2023-07-08T02:45:05 | 97,866,682 | 7 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,996 | r | KSSL-snapshot-tutorial.R | library(DBI)
library(RSQLite)
# connect
db <- dbConnect(RSQLite::SQLite(), 'E:/NASIS-KSSL-LDM/LDM/LDM-compact.sqlite')
# list tables
dbListTables(db)
# list fields
dbListFields(db, 'nasis_ncss')
dbListFields(db, 'physical')
# get data
dbGetQuery(db, "SELECT site_key, user_site_id from nasis_site WHERE user_site_id = 'S1999NY061001' ;")
dbGetQuery(db, "SELECT result_source_key, prep_code, labsampnum, clay_total, particle_size_method from physical WHERE result_source_key = 1 ;")
dbGetQuery(db, "SELECT result_source_key, prep_code, labsampnum, ca_nh4_ph_7, ca_nh4_ph_7_method from chemical WHERE result_source_key = 1 ;")
dbGetQuery(db, "SELECT * from calculations WHERE result_source_key = 1 ;")
dbGetQuery(db, "SELECT layer_key, natural_key, pedon_key, hzn_top, hzn_bot, hzn_desgn from layer WHERE pedon_key = 1 ;")
dbGetQuery(db, "SELECT layer_key, labsampnum, pedon_key, hzn_top, hzn_bot, hzn_desgn from layer WHERE pedon_key = 52931 ;")
dbGetQuery(db, "SELECT * from layer WHERE labsampnum = 'UCD03792' ;")
dbGetQuery(db, "SELECT * from layer WHERE labsampnum = 'UCD03792' ;")
x <- dbGetQuery(db, "SELECT * from physical WHERE labsampnum = '78P00891' ;")
x <- dbGetQuery(db, "SELECT pedobjupdate FROM nasis_ncss; ")
nrow(x)
head(x)
# none in here...
dbGetQuery(db, "SELECT * from calculations WHERE labsampnum = 'UCD03792' ;")
x.c <- dbGetQuery(db, "SELECT * from chemical WHERE labsampnum = 'UCD03792' ;")
x.p <- dbGetQuery(db, "SELECT * from physical WHERE labsampnum = 'UCD03792' ;")
f <- function(i) {
! all(is.na(i))
}
idx <- which(sapply(x.c, f))
x.c[, idx]
idx <- which(sapply(x.p, f))
x.p[, idx]
x.p[, c('labsampnum', 'prep_code', names(x.p)[grep('density', names(x.p))])]
## priority columns
x <- dbGetQuery(db, "SELECT * from nasis_ncss;")
table(x$priority, x$priority2)
table(x$labdatadescflag, x$priority2)
table(x$labdatadescflag, x$priority)
x <- dbGetQuery(db, "SELECT * FROM layer LIMIT 10 ;")
str(x)
# close file
dbDisconnect(db)
|
307e31f142756c47fa1ccbf099f10c8be87fe85b | 5a9beb9f519afb900b0329ace2d0f132c2848cc8 | /Using R, tidyverse and mlr/Chapter #3 KNN.R | df5d95e20745e36105780be1e56b03c0f47079d2 | [] | no_license | ZehongZ/R-Studio | d6d8525d29c4fc005f07a6db252f427f844ad3b1 | 1c06ea907552e8958f476e1ad3e9a9efe31e8549 | refs/heads/master | 2021-07-09T10:58:00.965761 | 2020-08-28T07:54:16 | 2020-08-28T07:54:16 | 173,672,330 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,180 | r | Chapter #3 KNN.R | #Build KNN model
library(mlr)
library(tidyverse)
#Loading the diabetes data
data(diabetes, package="mclust")
diabetesTib<-as_tibble(diabetes)
summary(diabetesTib)
#Plotting the diabetes
library(ggplot2)
ggplot(diabetes, aes(x=glucose, y=insulin, col=class))+
geom_point()+
theme_bw()
ggplot(diabetesTib, aes(sspg, insulin, col=class))+
geom_point()+
theme_bw()
ggplot(diabetesTib, aes(sspg, glucose, col=class))+
geom_point()+
theme_bw()
diabetesTask<-makeClassifTask(data=diabetesTib, target = "class")
diabetesTask
#Defineing the learner
knn<-makeLearner("classif.knn", par.vals = list("k"=2))
#Training the model
knnModel<-train(knn, diabetesTask)
knnPred<-predict(knnModel, newdata=diabetesTib)
performance(knnPred, measures=list(mmce, acc))
#Creating a hldout cross validation resampling description
holdout<-makeResampleDesc(method="Holdout", split=2/3, stratify = TRUE)
#Performaing Hold-out Cross-validation
holdoutCV<-resample(learner=knn, task=diabetesTask,
resampling = holdout, measures=list(mmce, acc))
#Confusion matrix for hold-out cross-validation
calculateConfusionMatrix(holdoutCV$pred, relative=TRUE)
#Creating a k-fold cross validation resampling description
kFold<-makeResampleDesc(method="RepCV", folds=10, reps=50, stratify=TRUE)
kFoldCV<-resample(learner=knn, task=diabetesTask, resampling=kFold, measures=list(mmce,acc))
kFoldCV$aggr
#Calculating a confustion matrix
calculateConfusionMatrix(kFoldCV$pred, relative = TRUE)
#Creating a leave-one-out cross validation resampling description
LOO<-makeResampleDesc(method="LOO")
LOOCV<-resample(learner=knn, task=diabetesTask, resampling=LOO,
measures=list(mmce, acc))
LOOCV$aggr
#Calculating a confusion matrix
calculateConfusionMatrix(LOOCV$pred, relative=TRUE)
#Turning k to improve our model
knnParamSpace<-makeParamSet(makeDiscreteParam("k", values=1:10))
gridSearch<-makeTuneControlGrid()
cvForTuning<-makeResampleDesc("RepCV", folds=10, reps=20)
tunedK<-tuneParams("classif.knn", task=diabetesTask,
resampling = cvForTuning,
par.set=knnParamSpace, control=gridSearch)
tunedK
#Visualize the tuning process
knnTuningData<-generateHyperParsEffectData(tunedK)
plotHyperParsEffect(knnTuningData, x="k", y= "mmce.test.mean",
plot.type = "line")+
theme_bw()
#Train for final model
tunedKnn<-setHyperPars(makeLearner('classif.knn'), par.vals=tunedK$x)
tunedModel<-train(tunedKnn, diabetesTask)
#Including hyperparameter tuning in our cross-validation
inner<-makeResampleDesc("CV")
outer<-makeResampleDesc("RepCV", folds=10, reps=5)
knnWrapper<-makeTuneWrapper("classif.knn", resampling=inner,
par.set=knnParamSpace,
control = gridSearch)
cvWithTuning<-resample(knnWrapper, diabetesTask, resampling = outer)
cvWithTuning
#Using model to make predictions
newDiabetesPatients<-tibble(glucose=c(82,108,300),
insulin=c(361, 288, 1052),
sspg=c(200, 186, 135))
newDiabetesPatients
newPatientsPred<-predict(tunedModel, newdata=newDiabetesPatients)
getPredictionResponse(newPatientsPred)
|
f1928ac254326b217b34757cc5f7a119961847a8 | 856153d54cc9110b94417c7ac62502cda5afd21f | /BFX/AGGA/HW2.R | 4a3e28669c360545a9a6c06988835a47fcde8833 | [] | no_license | drsaeva/JHU-Course-Code | c63cc5fea63988dab2b5fb9cd4179cc12fc883be | 17a29d9e22588b62c46ee9530ae9e94dfc241546 | refs/heads/master | 2020-04-05T22:57:00.684524 | 2017-11-05T21:42:36 | 2017-11-05T21:42:36 | 68,043,538 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 8,478 | r | HW2.R | ## Plink code
# filtering by <10% missing rate per snp (geno)
# maf > 10% (maf)
# <30% missing rate per individual (mind)
# hwe at signficance <0.001 (hwe)
plink --map subjects_153.map --ped subjects_153.ped --geno 0.1 --maf 0.1 -hwe 0.001 --mind 0.3 --recode --out m_p_1
# genome file, MDS
plink --ped subjects_153.ped --map subjects_153.map --genome
plink --ped subjects_153.ped --map subjects_153.map --read-genome plink.genome --cluster --mds-plot 2
## R code to plot MDS
mds <- read.table("D:/Data/hw2/plink.mds", header=T)
cli <- read.table("D:/Data/hw2/clinical_table.txt", header=T, sep="\t")
classes <- data.frame(3, 1:153, 4)
colnames(classes) <- c("Profile", "Sex", "Suicide_Status")
row.names(classes) <- cli[,1]
for (i in 1:nrow(cli)) {
# profile
if (cli[i, 7] == "Unaffected control") {
classes$Profile[i] = 1
}
if (cli[i, 7] == "Schiz.") {
classes$Profile[i] = 2
}
if (cli[i, 7] == "BP") {
classes$Profile[i] = 3
}
if (cli[i, 7] == "Dep.") {
classes$Profile[i] = 4
}
# sex
if (cli[i, 5] == "M") {
classes$Sex[i] = 1
}
if (cli[i, 5] == "F") {
classes$Sex[i] = 2
}
# suicide_status
if(cli[i,13] == "Yes") {
classes$Suicide_Status[i] = 1
}
if(cli[i,13] == "No") {
classes$Suicide_Status[i] = 2
}
}
plot(mds[,4:5], col=classes$Profile, pch=16, xlab="p1",
ylab="p2",main="Two eigenvectors from MDS of 153 GWAS subjects colored by disease profile")
legend("center", pch=16, col=c(1:4), c("Unaffected control","Schiz.","BP","Dep."))
par(mfrow=c(1,2))
plot(mds[,4:5], col=classes$Sex, pch=16, xlab="p1",
ylab="p2",main="Two eigenvectors from MDS of 153 GWAS\n subjects colored by sex")
legend("center", pch=16, col=c(1:2), c("M","F"))
plot(mds[,4:5], col=classes$Suicide_Status, pch=16, xlab="p1",
ylab="p2",main="Two eigenvectors from MDS of 153 GWAS\n subjects colored by suicide status")
legend("center", pch=16, col=c(1:2), c("Y","N"))
# identify Lifetime_Drug_Use variables, loop into classes matrix
unique(cli[,19])
[1] Moderate drug use in present Social Heavy drug use in present
[4] Little or none Moderate drug use in past Heavy drug use in past
[7] Unknown
Lifetime_Drug_Use <- c()
for (i in 1:nrow(cli)) {
if (cli[i, 19] == "Unknown") {
Lifetime_Drug_Use[i] = 1
}
if (cli[i, 19] == "Social") {
Lifetime_Drug_Use[i] = 2
}
if (cli[i, 19] == "Little or none") {
Lifetime_Drug_Use[i] = 3
}
if (cli[i, 19] == "Moderate drug use in present") {
Lifetime_Drug_Use[i] = 4
}
if (cli[i, 19] == "Moderate drug use in past") {
Lifetime_Drug_Use[i] = 5
}
if (cli[i, 19] == "Heavy drug use in present") {
Lifetime_Drug_Use[i] = 6
}
if (cli[i, 19] == "Heavy drug use in past") {
Lifetime_Drug_Use[i] = 7
}
}
classes <- cbind(classes, Lifetime_Drug_Use)
plot(mds[,4:5], col=classes$Lifetime_Drug_Use, pch=16, xlab="p1",
ylab="p2",main="Two eigenvectors from MDS of 153 GWAS\n subjects colored by drug use")
legend("center", pch=16, col=c(1:7), c("Unk","Soc", "L/N", "M/Pr", "M/Pa", "H/Pr", "H/Pa"))
Psychotic_Feature <- c()
for (i in 1:nrow(cli)) {
if (cli[i, 14] == "Unknown") {
Psychotic_Feature[i] = 1
}
if (cli[i, 14] == "Yes") {
Psychotic_Feature[i] = 2
}
if (cli[i, 14] == "No") {
Psychotic_Feature[i] = 3
}
}
classes <- cbind(classes, Psychotic_Feature)
plot(mds[,4:5], col=classes$Psychotic_Feature, pch=16, xlab="p1",
ylab="p2",main="Two eigenvectors from MDS of 153 GWAS\n subjects colored by psychotic feature")
legend("center", pch=16, col=c(1:7), c("Unk","Y", "N"))
plot(mds[,4:5], col=cli[,4]/5, pch=16, xlab="p1",
ylab="p2",main="Two eigenvectors from MDS of 153 GWAS subjects")
range10 <- function(x){
(10*(x-min(x))/(max(x)-min(x)))
}
Brain_PH <- range10(cli[,11])
classes <- cbind(classes, Brain_PH)
classes$Brain_PH <- round(classes$Brain_PH)
for (i in 1:nrow(classes)) {
classes$Brain_PH[i] <- classes$Brain_PH[i]+1
}
rbPal <- colorRampPalette(c('red','blue'))
classes$Brain_PH <- rbPal(11)[classes$Brain_PH]
plot(mds[,4:5], col=classes$Brain_PH, pch=16, xlab="p1",
ylab="p2",main="Two eigenvectors from MDS of 153 GWAS\n subjects colored by brain pH")
legend("center", pch=16, col=c('red', 'blue'), c("Acidic","Neutral"))
classes$Brain_PH_Simple <- cli[,11]
for (i in 1:nrow(classes)) {
if (classes$Brain_PH_Simple[i] >= 6.395) {
classes$Brain_PH_Simple[i] <- 'blue'
} else {
classes$Brain_PH_Simple[i] <- 'red'
}
}
plot(mds[,4:5], col=classes$Brain_PH_Simple, pch=16, xlab="p1",
ylab="p2",main="Two eigenvectors from MDS of 153 GWAS\n subjects colored by simplified brain pH")
legend("center", pch=16, col=c('red', 'blue'), c("Acidic","Neutral"))
# create keep file from list of subjects either BP or control and generate phenotype file
keep.bp <- matrix(,,2)
for (i in 1:nrow(cli)) {
if (cli[i,7] == "BP" || cli[i,7] == "Unaffected control") {
keep.bp <- rbind(keep.bp, c(as.character(cli[i,1]), 1))
}
}
keep.bp <- keep.bp[2:nrow(keep.bp),]
affected.bp <- matrix(,,2)
for (i in 1:nrow(cli)) {
if (cli[i,7] == "BP") {
affected.bp <- rbind(affected.bp, c(as.character(cli[i,1]),2))
} else {
affected.bp <- rbind(affected.bp, c(as.character(cli[i,1]),1))
}
}
affected.bp <- affected.bp[2:nrow(affected.bp),]
pheno.bp <- cbind(affected.bp[,1], c(rep(1,nrow(affected.bp))), affected.bp[,2])
colnames(pheno.bp) <- c("FID", "IID", "pheno")
pheno.bp <- as.data.frame(pheno.bp)
# generate covariate file for plink usage with eigenvector 1, sex, and left brain status
cov <- matrix(,,5)
for (i in 1:nrow(cli)) {
cov <- rbind(cov, c(as.character(cli$Database_ID[i]), 1, mds[i,4], as.character(cli[i,5]),
as.character(cli[i,12])))
}
cov <- cov[2:nrow(cov),]
colnames(cov) <- c("Family_ID", "Individual_ID", "covariate_1", "covariate_2", "covariate_3")
for (i in 1:nrow(cov)) {
if (cov[i,4] == "M") {
cov [i,4] <- 0
}
if (cov[i,4] == "F") {
cov [i,4] <- 1
}
if (cov[i,5] == "Fixed") {
cov [i,5] <- 0
}
if (cov[i,5] == "Frozen") {
cov [i,5] <- 1
}
}
cov <- as.data.frame(cov)
write.table(cov, file="D:/Data/hw2/mycov.txt", sep="\t", col.names=T, row.names=F, quote=F)
write.table(pheno.bp, file="D:/Data/hw2/pheno_bp.txt", sep="\t", col.names=T, row.names=F, quote=F)
write.table(keep.bp, file="D:/Data/hw2/keep_bp.txt", sep="\t", col.names=T, row.names=F, quote=F)
## Plink code
# linear regression using files from above and map/ped files
plink --ped subjects_153.ped --map subjects_153.map --linear --covar mycov.txt --keep keep_bp.txt --pheno pheno_bp.txt --all-pheno
## R code
# read in assoc file, identify SNPs with p < 0.01 and most prevalent chr among top 100 SNPs
snp.lin <- read.table("D:/Data/hw2/plink.pheno.assoc.logistic", header=T)
snp.lin.0_01 <- snp.lin[snp.lin[,9] < .01,]
nrow(snp.lin)
nrow(snp.lin.0_01)
snp.lin.sorted <- snp.lin.0_01[order(snp.lin.0_01$P),]
table(snp.lin.sorted$CHR[1:100])
# create keep file from list of subjects either schizophrenic or control and generate phenotype file
keep.sc <- matrix(,,2)
for (i in 1:nrow(cli)) {
if (cli[i,7] == "Schiz." || cli[i,7] == "Unaffected control") {
keep.sc <- rbind(keep.sc, c(as.character(cli[i,1]), 1))
}
}
keep.sc <- keep.sc[2:nrow(keep.sc),]
affected.sc <- matrix(,,2)
for (i in 1:nrow(cli)) {
if (cli[i,7] == "Schiz.") {
affected.sc <- rbind(affected.sc, c(as.character(cli[i,1]),2))
} else {
affected.sc <- rbind(affected.sc, c(as.character(cli[i,1]),1))
}
}
affected.sc <- affected.sc[2:nrow(affected.sc),]
pheno.sc <- cbind(affected.sc[,1], c(rep(1,nrow(affected.sc))), affected.sc[,2])
colnames(pheno.sc) <- c("FID", "IID", "pheno")
pheno.sc <- as.data.frame(pheno.sc)
write.table(pheno.sc, file="D:/Data/hw2/pheno_sc.txt", sep="\t", col.names=T, row.names=F, quote=F)
write.table(keep.sc, file="D:/Data/hw2/keep_sc.txt", sep="\t", col.names=T, row.names=F, quote=F)
## Plink code
# linear regression using files from above and map/ped files
plink --ped subjects_153.ped --map subjects_153.map --linear --covar mycov.txt --keep keep_sc.txt --pheno pheno_sc.txt --all-pheno
## R code
# read in assoc file, identify SNPs with p < 0.01 and most prevalent chr among top 100 SNPs
snp.lin <- read.table("D:/Data/hw2/plink.pheno.assoc.logistic", header=T)
snp.lin.0_01 <- snp.lin[snp.lin[,9] < .01,]
nrow(snp.lin)
nrow(snp.lin.0_01)
snp.lin.sorted <- snp.lin.0_01[order(snp.lin.0_01$P),]
table(snp.lin.sorted$CHR[1:100])
|
52d231db0f34f2bd739bc6ff85cb58cbdcfc3f29 | 696db476049ebf1c61606c29208bba638eb1d952 | /code/xx_scratch.R | 47cb4a38dc2780e29d53037333934c9312c0c770 | [] | no_license | bcjaeger/INTERMACS-Conditional-RPEs | d71a49910bc0b41d39ee9e5612281307b2291263 | e432640944c09b8847a193dcd6e562ff6cedcbd8 | refs/heads/master | 2020-08-30T13:04:14.072793 | 2019-10-30T01:16:25 | 2019-10-30T01:16:25 | 218,389,450 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,137 | r | xx_scratch.R |
# factor: m0_primary_dgn?
# mdl_full <- xgboost(
# params = params,
# data = as.matrix(trn_mat),
# label = trn_lab,
# nrounds = cv_full$best_iteration,
# print_every_n = 50
# )
#
# ftr_rdcd <- xgb.importance(model = mdl_full) %>%
# pull(Feature) %>%
# .[1:50]
#
# trn_mat_rdcd <- select_at(trn_mat, ftr_rdcd)
# tst_mat_rdcd <- select_at(tst_mat, ftr_rdcd)
#
# cv_rdcd <- xgb.cv(
# params = params,
# data = as.matrix(trn_mat_rdcd),
# label = trn_lab,
# nfold = 10,
# nrounds = 5000,
# print_every_n = 50,
# early_stopping_rounds = 100
# )
#
# mdl_rdcd <- xgboost(
# params = params,
# data = as.matrix(trn_mat_rdcd),
# label = trn_lab,
# nrounds = cv_rdcd$best_iteration,
# print_every_n = 50
# )
#
# library(midytrees)
#
# impute_init <- list(training = trn_mat_rdcd, testing = tst_mat_rdcd) %>%
# map(kNN_mi, nimpute = 10, composition = 'midy')
#
# imputes <- impute_init %>%
# bind_rows(.id = 'role') %>%
# mutate(data = as_xmats(data, formula = ~.)) %>%
# select(-miss_strat) %>%
# deframe()
#
# imputes$training_label <- expand_label(trn_lab, 'midy', 10)
#
# imputes$folds <- gen_fold_indices(
# ntrain = nrow(trn_mat),
# nfolds = 10
# )
#
# cv_midy <- xgb_cv(
# params = params,
# data = imputes$training,
# label = imputes$training_label,
# folds = imputes$folds,
# nrounds = 5000,
# print_every_n = 50,
# early_stopping_rounds = 100
# )
#
# mdl_midy <- midytrees::xgb_train(
# params = params,
# data = xgb.DMatrix(
# data = imputes$training,
# label = imputes$training_label
# ),
# nrounds = cv_midy$best_iteration,
# print_every_n = 50
# )
#
# midy_trn_prd <- predict(
# object = mdl_midy,
# newdata = imputes$training,
# outputmargin = TRUE
# ) %>%
# pool_preds(
# nobs = nrow(trn_mat),
# nimpute = 10,
# miss_strat = 'midy'
# )
#
# midy_tst_prd <- predict(
# object = mdl_midy,
# newdata = imputes$testing,
# outputmargin = TRUE
# ) %>%
# pool_preds(
# nobs = nrow(tst_mat),
# nimpute = 10,
# miss_strat = 'midy'
# )
#
# bh <- basehaz.gbm(
# t = training$time,
# delta = training$status,
# f.x = midy_trn_prd,
# t.eval = eval_times,
# smooth = TRUE,
# cumulative = TRUE
# )
#
# midy_tst_prb <- matrix(
# data = 0,
# nrow=nrow(tst_mat),
# ncol=length(eval_times)
# )
#
# for(i in 1:length(bh)){
# midy_tst_prb[,i] <- exp(-exp(midy_tst_prd) * (bh[i]))
# }
#
# midy_rslt <- tibble(
# type = 'midy',
# mdl = list(mdl_midy),
# mat = list(trn_mat_impt),
# ftr = list(ftr_rdcd),
# prb = list(midy_tst_prb)
# )
#
# rslt <- tibble(
# type = c("full", "rdcd"),
# mdl = list(mdl_full, mdl_rdcd),
# mat = list(trn_mat, trn_mat_rdcd)
# ) %>%
# mutate(
# ftr = map(mat, names),
# prb = pmap(
# .l = list(mdl, mat, ftr),
# .f = function(.mdl, .mat, .ftr){
#
# bh <- basehaz.gbm(
# t = training$time,
# delta = training$status,
# f.x = predict(
# .mdl,
# newdata = as.matrix(.mat),
# outputmargin = TRUE
# ),
# t.eval = eval_times,
# smooth = TRUE,
# cumulative = TRUE
# )
#
# tst_prd <- predict(
# .mdl,
# newdata = as.matrix(tst_mat[,.ftr]),
# outputmargin = TRUE
# )
#
# tst_prb <- matrix(
# data = 0,
# nrow=nrow(tst_mat),
# ncol=length(eval_times)
# )
#
# for(i in 1:length(bh)){
# tst_prb[,i] <- exp(-exp(tst_prd) * (bh[i]))
# }
#
# tst_prb
#
# }
# )
# ) %>%
# bind_rows(midy_rslt)
#
# concordance <- pec::cindex(
# object = rslt$prb,
# formula = Surv(time, status) ~ 1,
# cens.model = 'cox',
# data = testing,
# eval.times = eval_times
# ) %>%
# use_series("AppCindex") %>%
# set_names(rslt$type) %>%
# bind_cols() %>%
# mutate(time = eval_times) %>%
# filter(time == max(time)) %>%
# select(-time) %>%
# mutate_all(as.numeric) %>%
# rename_all(~paste0("cstat_",.x))
#
# int_brier <- pec::pec(
# object = set_names(rslt$prb, rslt$type),
# formula = Surv(time, status) ~ 1,
# cens.model = 'cox',
# data = testing,
# exact = FALSE,
# times = eval_times,
# start = eval_times[1],
# maxtime = eval_times[length(eval_times)]
# ) %>%
# ibs() %>%
# tibble(
# name = rownames(.),
# value = as.numeric(.)
# ) %>%
# dplyr::select(name, value) %>%
# mutate(
# value = 1 - value / value[name=='Reference'],
# value = format(round(100 * value, 1), nsmall=1)
# ) %>%
# filter(name!='Reference') %>%
# spread(name, value) %>%
# mutate_all(as.numeric) %>%
# rename_all(~paste0("bstat_",.x))
#
# results[[i]] <- bind_cols(
# int_brier, concordance
# )
#
# }
#
# output <- bind_rows(results) %>%
# mutate_all(as.numeric) %>%
# summarize_all(mean) %>%
# mutate(params = list(params))
#
# output
# split_names <-
# gsub(
# ".csv",
# "",
# data_files,
# fixed = TRUE
# )
#
# train_proportion <- 0.75
#
# set.seed(329)
#
# for(f in data_files){
#
# train_test_splits <- vector(mode='list', length = 1000)
# analysis <- read_csv(paste0("data/analysis/",f))
#
# for(i in seq_along(train_test_splits)){
#
# trn_indx <- sample(
# x = nrow(analysis),
# size = round(nrow(analysis)*train_proportion),
# replace = FALSE
# )
#
# train_test_splits[[i]] <- list(
# training = trn_indx,
# testing = setdiff(1:nrow(analysis), trn_indx)
# )
#
# }
#
# outfile <- gsub('.csv', '.rds', f)
#
# write_rds(
# train_test_splits,
# file.path("data","R objects",paste0("train_test_splits_",outfile))
# )
#
# }
#
# library(tidyverse)
# library(xgboost)
# library(glue)
# library(midy)
# library(rBayesianOptimization)
#
# source("code/functions/xgb_bayes_opt.R")
#
# target <- 'dead'
# time <- 'M0_25MD'
#
# training <- read_csv(
# glue("data/training/train_{target}_{time}.csv")
# ) %>%
# mutate(label = label_for_survival(time, status))
#
#
# xgb_label <- training$label
#
# xgb_data <- training %>%
# select(-time, -status, -label) %>%
# spread_cats() %>%
# as.matrix() %>%
# xgb.DMatrix(label = xgb_label)
#
# opt_xgb <- xgb_bayes_opt(
# trn_dmat = xgb_data,
# trn_y = xgb_label,
# objective = 'survival:cox',
# eval_metric = 'cox-nloglik',
# eval_maximize = FALSE,
# nfolds = 15,
# init_points = 100,
# n_iter = 100
# )
#
# params <- opt_xgb %>%
# use_series('Best_Par') %>%
# enframe() %>%
# spread(name, value) %>%
# mutate(
# eta = eta / 5,
# objective = 'binary:logistic',
# eval_metric = 'auc'
# ) %>%
# as.list()
#
# xgb_cv <- xgboost::xgb.cv(
# data=xgb_data,
# nrounds = 5000,
# early_stopping_rounds = 100,
# print_every_n = 100,
# nfold = 15,
# params = params
# )
#
# xgb_mdl <- xgboost(
# data = xgb_data,
# nrounds = xgb_cv$best_iteration,
# params = params,
# verbose = FALSE
# ) |
896ac59559335e4a846e0ba3125358f53ad363fd | a99306823f0fc75efccc0ddcf826e7e958117f13 | /man/limit.Rd | 81c3e44c3daaea7c5770270417abbb6aa1e62799 | [] | no_license | shearer/PropCIs | 57890579b8a12674e7a489fcb2d6a8090f7e5358 | b9ee93571772eed551c184d1d86269ec44f4a610 | refs/heads/master | 2022-12-19T08:48:30.438508 | 2018-08-23T19:01:15 | 2018-08-23T19:01:20 | 11,749,170 | 7 | 1 | null | 2018-08-23T18:47:36 | 2013-07-29T20:33:25 | R | UTF-8 | R | false | false | 112 | rd | limit.Rd | \name{limit}
\alias{limit}
\title{
internal function
}
\description{
internal function of orscoreci
}
|
fd8ad12b5fd11bcb2c9f7eb255127d48c4a62d48 | 0084280ad5d1400c280c110c402d3018b7a129af | /R/manifest/nested-list-example.R | 9918ee24c0f1d2597f3735d93bff645106739470 | [
"MIT"
] | permissive | fpbarthel/GLASS | 457626861206a5b6a6f1c9541a5a7c032a55987a | 333d5d01477e49bb2cf87be459d4161d4cde4483 | refs/heads/master | 2022-09-22T00:45:41.045137 | 2020-06-01T19:12:30 | 2020-06-01T19:12:47 | 131,726,642 | 24 | 10 | null | null | null | null | UTF-8 | R | false | false | 2,733 | r | nested-list-example.R |
## Working with nested lists in R/tidyverse example code
## Related to question asked on stackoverflow
## URL: https://stackoverflow.com/questions/50477156/convert-a-tidy-table-to-deeply-nested-list-using-r-and-tidyverse
## @Author FLoris Barthel
library(tidyverse)
library(gapminder)
# json = gapminder %>%
# filter(continent == "Oceania") %>% ## Limit data to Oceania to get a smaller table
# nest(-continent, .key = countries) %>%
# mutate(countries = map(countries, nest, -country, .key=years))
#
# jsonlite::toJSON(json, pretty = T)
library(tidyverse)
library(stringi)
n_patient = 2
n_samples = 3
n_readgroup = 4
n_mate = 2
df = data.frame(patient = rep(rep(LETTERS[1:n_patient], n_samples),2),
sample = rep(rep(seq(1:n_samples), each = n_patient),2),
readgroup = rep(stri_rand_strings(n_patient * n_samples * n_readgroup, 6, '[A-Z]'),2),
mate = rep(1:n_mate, each = n_patient * n_samples * n_readgroup)) %>%
mutate(file = sprintf("%s.%s.%s_%s", patient, sample, readgroup, mate)) %>%
arrange(file)
json = df %>%
nest(-patient, .key = samples) %>%
mutate(samples = map(samples, nest, -sample, .key=readgroups))
json3 <- df %>% nest(-(1:3),.key=mate) %>% nest(-(1:2),.key=readgroups) %>% nest(-1,.key=samples)
jsonlite::toJSON(json3,pretty=T)
vars <- names(df)[-1] # or whatever variables you want to nest, order matters!
nest_by <- function(df, ..., reverse = T) {
var_pairs <- map((length(vars)-1):1,~vars[.x:(.x+1)])
json4 <- reduce(var_pairs,~{nm<-.y[1];nest(.x,.y,.key=!!enquo(nm))},.init=df)
}
test <- function(...) {
enquo(...)
}
jsonlite::toJSON(json4,pretty=T)
json = df %>%
group_by(patient) %>%
group_by(sample, add = T) %>%
nest()
jsonlite::toJSON(json, pretty = T)
# [
# {
# "patient" : "A",
# "samples" : [
# {
# "sample" : "P",
# "files" : [
# {
# "file" : "ZZEVYQ"
# },
# {
# "file" : "XRYBUE"
# }
# ]
# },
# {
# "sample" : "R",
# "files" : [
# {
# "file" : "KIUXRU"
# },
# {
# "file" : "ZCHBKN"
# }
# ]
# }
# ]
# },
# {
# "patient" : "B",
# "samples" : [
# {
# "sample" : "P",
# "files" : [
# {
# "file" : "WZYAPM"
# },
# {
# "file" : "CYEJCK"
# }
# ]
# },
# {
# "sample" : "R",
# "files" : [
# {
# "file" : "EKDFYT"
# },
# {
# "file" : "XFAYXX"
# }
# ]
# }
# ]
# }
# ] |
e118122056823c2416efca19977a4444eb17d45b | 2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0 | /fuzzedpackages/gbp/man/gbp3d_solver_dpp_filt.Rd | 24bb65ca91831fac8a40497c17c223343d80cd64 | [] | no_license | akhikolla/testpackages | 62ccaeed866e2194652b65e7360987b3b20df7e7 | 01259c3543febc89955ea5b79f3a08d3afe57e95 | refs/heads/master | 2023-02-18T03:50:28.288006 | 2021-01-18T13:23:32 | 2021-01-18T13:23:32 | 329,981,898 | 7 | 1 | null | null | null | null | UTF-8 | R | false | true | 2,172 | rd | gbp3d_solver_dpp_filt.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gbp3d_cpp_rd.r
\name{gbp3d_solver_dpp_filt}
\alias{gbp3d_solver_dpp_filt}
\title{gbp3d_solver_dpp_filt}
\usage{
gbp3d_solver_dpp_filt(ldh, m)
}
\arguments{
\item{ldh}{it scale <matrix>
- l, d, h it scale along x, y, z <numeric>}
\item{m}{bn scale <matrix>
- l, d, h bn scale along x, y, z <numeric>
- l, d, h in row and each col is a single bn
should make sure bn list are sorted via volume
so that the first col is the most prefered smallest bn, and also
the last col is the least prefered largest and often dominant bn
should make sure no X in front of Y if bnX dominant bnY,
bnX dominant bnY if all(X(l, d, h) > Y(l, d, h)) and should always prefer Y.
should make sure bn such that l >= d >= h or vice versa.}
}
\value{
gbp3q
a gbp3q instantiate with p profit, it item (x, y, z, l, d, h) position scale matrix, bn bin (l, d, h) scale matrix,
k it selection, o objective, f bn selection, and ok an indicator of all fit or not.
}
\description{
solve gbp3d w.r.t select most preferable often smallest bin from bn list
}
\details{
gbp3d_solver_dpp_filt is built on top of gbp3d_solver_dpp
aims to select the most preferable bn from a list of bn that can fit all or most it
gbp3d_solver_dpp()'s objective is fit all or most it into a single given bn (l, d, h)
gbp3d_solver_dpp_filt()'s objective is select the most preferable given a list of bn
where bn list is specified in 3xN matrix that the earlier column the more preferable
gbp3d_solver_dpp_filt() use an approx binary search and determine f w.r.t bn.n_cols
where f = 1 indicate the bn being selected and only one of 1 in result returned.
ok = true if any bin can fit all it and algorithm will select smallest bn can fit all
otherwise ok = false and algorithm will select a bn can maximize volume of fitted it
often recommend to make the last and least preferable bn dominate all other bn in list
when design bn list, bnX dominant bnY if all(X(l, d, h) > Y(l, d, h)).
}
\seealso{
Other gbp3q: \code{\link{gbp3q_checkr}},
\code{\link{gbp3q}}
}
|
fb89704759ca96fbe99516f54cf65a890f0dc0f4 | b4d2b30c3a97b82757b3bb4c1846a9d953aedf86 | /Sports-Analytics/college_shooting/scraping.R | b4798b0b4048a340d8da5b081da78799d2093025 | [] | no_license | CoderShubham2000/MLH-Local-HackDay | e2a1aeac5f7a89c9ecf0c3c4d1e9fa296672ca25 | 51fa6d2d07c581501b631dd0fb62bc3fea7119d8 | refs/heads/main | 2023-04-06T23:45:00.990516 | 2021-04-04T06:22:29 | 2021-04-04T06:22:29 | 352,527,995 | 3 | 8 | null | null | null | null | UTF-8 | R | false | false | 838 | r | scraping.R | # Change working directory
setwd("C:/Users/hashi/basketball_ref")
# Install the package to do our analysis
devtools::install_github("mbjoseph/bbr")
# Activate the package
library(bbr)
# Create an empty data frame
seasons<- data.frame(Data=as.Date(character()),
File = character(),
User = character(),
stringsAsFactors = FALSE)
# Arbitary starting point
season_num<- 1990
# A for loop to get three seasons and concatenate them into our empty data frame
for(i in 1:26){
seasons<-rbind(seasons, get_season(season_num))
season_num<- season_num + 1
}
# Get the name of our variables
names(seasons)
# We can subset our data to select a player
LebronJames<-subset(seasons, seasons$player == 'LeBron James')
BenSimmons<-subset(seasons, seasons$player == 'Ben Simmons')
|
99971c118a46244b63eb4ff3ed1f8755bdb00a6c | ace27a97e2012c2c3c275e78f3394fc5ca11b736 | /R/igc.R | e09f9eb77a4114179c070da254bb3a1788b5f44d | [] | no_license | pem725/MRES | 6a27ed4884d2ea31524307b0703ee0d097097f14 | 01b0d92577a344614b04a357a820357082896e84 | refs/heads/master | 2016-09-05T10:40:03.284385 | 2013-09-16T15:42:34 | 2013-09-16T15:42:34 | 4,679,282 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,431 | r | igc.R |
#################################################################
### igc - compute individual growth curve parameters using lm
### updated: 11/05/2008
###
### usage igc(dat,idvar="",ivar="",dvar="",parms=2 or 3
igc <- function(x,...){
UseMethod("igc")
}
igc.default <- function(x,idvar,ivar,dvar,byvar=NULL,cvar=NULL,parms=2,method="OLS"){
# First index the data file (x) with the names
dat.orig <- x
idv <- match(idvar,names(x))
ID <- match(idvar,names(x))
IV <- match(ivar,names(x))
DV <- match(dvar,names(x))
if(!is.null(cvar)){
CV <- match(cvar,names(x))
}
if(!is.null(byvar)){
BV <- match(byvar,names(x))
}
# subset data so we have only the three relevant variables
if (is.null(cvar) & is.null(byvar)){
x <- data.frame(ID=x[,ID],IV=x[,IV],DV=x[,DV])
rm(ID,IV,DV) # get rid of these values because they mess up things below
} ## else if (is.null(cvar)
#### Missing Data Handling
# Second get rid of NA's in the dv vector
x <- x[!is.na(x$DV),]
# Third, make sure we have more than 3 observations per subject and only retain those that do have more than 3
x.sums <- data.frame(ID=row.names(table(x$ID,x$IV)),counts=rowSums(table(x$ID,x$IV)))
x.lim <- x.sums[x.sums[,2] > 2,]
x <- x[x$ID %in% x.lim[,1],]
IDuniq <- unique(x$ID)
# Fourth, store some useful values for later analysis
if (min(x$IV,na.rm=T) < 0){
xlim <- c(min(x$IV,na.rm=T),max(x$IV,na.rm=T))
} else {
xlim <- c(0,max(x$IV,na.rm=T))
}
if (min(x$DV,na.rm=T) < 0){
ylim <- c(min(x$DV,na.rm=T),max(x$DV,na.rm=T))
} else {
ylim <- c(0,max(x$DV,na.rm=T))
}
### Now get the parameter estimates
if (parms == 2){
if (method == "OLS"){ # Compute the IGC's using OLS via lm
gc.out <- data.frame(id=IDuniq,Intparm=0,Lparm=0,IntSE=0,LparmSE=0,Rsq=0)
for (i in 1:length(IDuniq)){
dat <- subset(x,x$ID == IDuniq[i])
lm.tmp <- lm(DV~IV,data=dat)
gc.out[i,2] <- coef(lm.tmp)[[1]]
gc.out[i,3] <- coef(lm.tmp)[[2]]
gc.out[i,4] <- summary(lm.tmp)$coefficients[3]
gc.out[i,5] <- summary(lm.tmp)$coefficients[4]
gc.out[i,6] <- summary(lm.tmp)[[9]]
}
}
if (method == "ML"){ # Compute the IGC's using ML via lmer
library(lme4)
lme.tmp <- lmer(DV~IV + (IV|ID),data=x,na.action=na.exclude)
gc.out <- data.frame(id=as.numeric(as.character(rownames(coef(lme.tmp)[[1]]))),Intparm=coef(lme.tmp)[[1]][,1],Lparm=coef(lme.tmp)[[1]][,2])
}
# compute fixed parameters for graphiing the results
lm.fixed <- lm(DV~IV,data=x)
fixed.parms <- c(coef(lm.fixed)[[1]],coef(lm.fixed)[[2]],summary(lm.fixed)$coefficients[3],summary(lm.fixed)$coefficients[4])
}
if (parms == 3){
if (method == "OLS"){
gc.out <- data.frame(id=IDuniq,Intparm=0,Lparm=0,Qparm=0,IntSE=0,LparmSE=0,QparmSE=0,Rsq=0)
for (i in 1:length(IDuniq)){
dat <- subset(x,x$ID == IDuniq[i])
lm.tmp <- lm(DV~IV+I(IV*IV),data=dat)
gc.out[i,2] <- coef(lm.tmp)[[1]]
gc.out[i,3] <- coef(lm.tmp)[[2]]
gc.out[i,4] <- coef(lm.tmp)[[3]]
gc.out[i,5] <- summary(lm.tmp)$coefficients[4]
gc.out[i,6] <- summary(lm.tmp)$coefficients[5]
gc.out[i,7] <- summary(lm.tmp)$coefficients[6]
gc.out[i,8] <- summary(lm.tmp)[[9]]
}
}
if (method == "ML"){
library(nlme)
dat.grpd <- groupedData(DV~IV | ID, data=x)
lme.tmp <- lme(DV~IV + I(IV*IV),data=dat.grpd)
gc.out <- data.frame(id=rownames(coef(lme.tmp)),Intparm=coef(lme.tmp)[,1],Lparm=coef(lme.tmp)[,2],Qparm=coef(lme.tmp)[,3])
}
# compute fixed parameters for graphing the results
lm.fixed <- lm(DV~IV+I(IV*IV),data=x)
fixed.parms <- c(coef(lm.fixed)[[1]],coef(lm.fixed)[[2]],coef(lm.fixed)[[3]],summary(lm.fixed)$coefficients[4],summary(lm.fixed)$coefficients[5],summary(lm.fixed)$coefficients[6])
}
if(!is.null(cvar)){
cvar <- dat.orig[,c(idv,CV)]
cvar <- cvar[cvar[,1] %in% gc.out$id,]
cvar <- cvar[!duplicated(cvar),2]
}
res <- list(params=gc.out,parms=parms,method=method,xlim=xlim,ylim=ylim,fixed.parms=fixed.parms,cvar=cvar,dvar=dvar)
class(res) <- "igc"
return(res)
}
plot.igc <- function(x,xlab="",ylab="",main="",selines=T,cplot=F...){
ylim <- x$ylim
xlim <- x$xlim
gcdat <- x$params
fixed <- x$fixed.parms
cvar <- x$cvar
if (x$parms == 2){
curve(gcdat[1,3]*x + gcdat[1,2],min(xlim),max(xlim),ylim=ylim,xlab=xlab,ylab=ylab,main=main)
for (i in 2:nrow(gcdat)){
curve(gcdat[i,3]*x + gcdat[i,2],min(xlim),max(xlim),add=T)
}
curve(fixed[2]*x + fixed[1],min(xlim),max(xlim),lwd=2,col="red",add=T)
if (selines==T){
curve((fixed[2]+1.96*fixed[4])*x + fixed[1]+1.96*fixed[3],min(xlim),max(xlim),lwd=2,lty=2,col="red",add=T)
curve((fixed[2]-1.96*fixed[4])*x + fixed[1]-1.96*fixed[3],min(xlim),max(xlim),lwd=2,lty=2,col="red",add=T)
}
}
if (x$parms == 3){
curve(gcdat[1,3]*x + gcdat[1,4]*x^2 + gcdat[1,2],min(xlim),max(xlim),ylim=ylim,xlab=xlab,ylab=ylab,main=main)
for (i in 2:nrow(gcdat)){
curve(gcdat[i,3]*x + gcdat[i,4]*x^2 + gcdat[i,2],min(xlim),max(xlim),lwd=2,add=T)
}
curve(fixed[3]*x^2 + fixed[2]*x + fixed[1],min(xlim),max(xlim),lwd=2,col="red",add=T)
if (selines==T){
curve((fixed[3]+1.96*fixed[6])*x^2 + (fixed[2]+1.96*fixed[5])*x + fixed[1]+1.96*fixed[4],min(xlim),max(xlim),lwd=2,lty=2,col="red",add=T)
curve((fixed[3]-1.96*fixed[6])*x^2 + (fixed[2]-1.96*fixed[5])*x + fixed[1]-1.96*fixed[4],min(xlim),max(xlim),lwd=2,lty=2,col="red",add=T)
}
}
}
summary.igc <- function(x){
if (length(x) == 6){
out <- matrix(0,3,2)
out[1,1] <- round(mean(x[[2]],na.rm=T),2)
out[2,1] <- round(mean(x[[3]],na.rm=T),2)
out[3,1] <- round(mean(x[[4]],na.rm=T),2)
out[1,2] <- round(sd(x[[2]],na.rm=T),2)
out[2,2] <- round(sd(x[[3]],na.rm=T),2)
out[3,2] <- round(sd(x[[4]],na.rm=T),2)
rownames(out) <- c("Intercept","Linear.Slope","R-squared")
colnames(out) <- c("Mean","SD")
}
if (length(x) == 7){
out <- matrix(0,4,2)
out[1,1] <- round(mean(x[[2]],na.rm=T),2)
out[2,1] <- round(mean(x[[3]],na.rm=T),2)
out[3,1] <- round(mean(x[[4]],na.rm=T),2)
out[4,1] <- round(mean(x[[5]],na.rm=T),2)
out[1,2] <- round(sd(x[[2]],na.rm=T),2)
out[2,2] <- round(sd(x[[3]],na.rm=T),2)
out[3,2] <- round(sd(x[[4]],na.rm=T),2)
out[4,2] <- round(sd(x[[5]],na.rm=T),2)
rownames(out) <- c("Intercept","Linear.Slope","Quadratic.Slope","R-squared")
colnames(out) <- c("Mean","SD")
}
return(out)
}
coef.igc <- function(x,prefix=x$dvar){
dat <- x$params
if (x$parms == 2){
if (x$method == "OLS"){
names(dat) <- c("id",paste(prefix,"I",sep=""),paste(prefix,"L",sep=""),paste(prefix,"Ise",sep=""),paste(prefix,"Lse",sep=""),paste(prefix,"Rsq",sep=""))
}
if (x$method == "ML"){
names(dat) <- c("id",paste(prefix,"I",sep=""),paste(prefix,"L",sep=""))
}
}
if (x$parms == 3){
if (x$method == "OLS"){
names(dat) <- c("id",paste(prefix,"I",sep=""),paste(prefix,"L",sep=""),paste(prefix,"Q",sep=""),paste(prefix,"Ise",sep=""),paste(prefix,"Lse",sep=""),paste(prefix,paste(prefix,"Qse",sep=""),"Rsq",sep=""))
}
if (x$method == "ML"){
names(dat) <- c("id",paste(prefix,"I",sep=""),paste(prefix,"L",sep=""),paste(prefix,"Q",sep=""))
}
}
return(dat)
}
|
56f49f7190c1b30914d1c3d989185cd865bd1f5f | 0500ba15e741ce1c84bfd397f0f3b43af8cb5ffb | /cran/paws.game.development/tests/testthat/test_gamelift.R | eab4f4c6f031c8f93758ee37aa4213cdbfc2a031 | [
"Apache-2.0"
] | permissive | paws-r/paws | 196d42a2b9aca0e551a51ea5e6f34daca739591b | a689da2aee079391e100060524f6b973130f4e40 | refs/heads/main | 2023-08-18T00:33:48.538539 | 2023-08-09T09:31:24 | 2023-08-09T09:31:24 | 154,419,943 | 293 | 45 | NOASSERTION | 2023-09-14T15:31:32 | 2018-10-24T01:28:47 | R | UTF-8 | R | false | false | 1,344 | r | test_gamelift.R | svc <- paws::gamelift()
test_that("describe_ec2_instance_limits", {
expect_error(svc$describe_ec2_instance_limits(), NA)
})
test_that("describe_fleet_attributes", {
expect_error(svc$describe_fleet_attributes(), NA)
})
test_that("describe_fleet_capacity", {
expect_error(svc$describe_fleet_capacity(), NA)
})
test_that("describe_fleet_utilization", {
expect_error(svc$describe_fleet_utilization(), NA)
})
test_that("describe_game_session_queues", {
expect_error(svc$describe_game_session_queues(), NA)
})
test_that("describe_matchmaking_configurations", {
expect_error(svc$describe_matchmaking_configurations(), NA)
})
test_that("describe_matchmaking_rule_sets", {
expect_error(svc$describe_matchmaking_rule_sets(), NA)
})
test_that("describe_vpc_peering_authorizations", {
expect_error(svc$describe_vpc_peering_authorizations(), NA)
})
test_that("describe_vpc_peering_connections", {
expect_error(svc$describe_vpc_peering_connections(), NA)
})
test_that("list_aliases", {
expect_error(svc$list_aliases(), NA)
})
test_that("list_builds", {
expect_error(svc$list_builds(), NA)
})
test_that("list_fleets", {
expect_error(svc$list_fleets(), NA)
})
test_that("list_game_server_groups", {
expect_error(svc$list_game_server_groups(), NA)
})
test_that("list_scripts", {
expect_error(svc$list_scripts(), NA)
})
|
d41f9a7958e378f62a13f4e1b7b829700c0b389c | f16b7412963b61d5f5412714ae4ef0a9cac90578 | /R/write_league_history.R | ed8e8d2217cc98ba2dfee66bca34764b632c0bce | [] | no_license | tjconstant/ffs.query | 50d69e065793ee65f8337a789a853ed3314b6a9b | a37179419380170c45fef02af1dae9373c16986a | refs/heads/master | 2020-03-26T07:10:34.125290 | 2018-08-28T19:43:42 | 2018-08-28T19:43:42 | 144,639,807 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 271 | r | write_league_history.R | write_league_history <- function(league_ids, filename = "data-raw/master.csv"){
history <- tibble::tibble()
for(leage_id in league_ids){
history <- dplyr::bind_rows(history, get_league_history(leage_id))
}
readr::write_csv(x = history, path = filename)
}
|
45aca089bd46b400d269987768545cfa22d2c0e5 | 34711b0d14ec3da4118109de00b19eff88d12eb1 | /Luciano Bicaku/Lidhja midis Moshes dhe masave te marra/LIdhjaMidisMoshesMasave.r | 4dd59d70a750adf243874f9e810116baefdd318a | [] | no_license | LucianoBicaku/projektSatistike | b3694600c8360519395c6c9c08f48afe21d115ed | 3a26f423495c1ba709010d3d8250f1e58df3912c | refs/heads/master | 2022-07-31T23:52:39.528334 | 2020-05-25T17:20:39 | 2020-05-25T17:20:39 | 266,548,739 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 347 | r | LIdhjaMidisMoshesMasave.r | png(file = "LidhjaMosheMasa.png")
color <- c(
"#f17bc7",
"#6d194b",
"#796bb6",
"#7d6e8d",
"#6c5a9b",
"#d7c7d8",
"#8f8f8f"
)
with(dataset, Barplot(Masat.Parandaluese,
by = Mosha, style = "divided",
legend.pos = "above", xlab = "",
ylab = "Nr i personve", main = "", col = color, border = "white"
))
dev.off() |
b0f8b2ea5be1c951202c65d039279e47639f6670 | c00bab3c856df375472c5ea86c37b0c9d85583fb | /man/fars_summarize_years.Rd | 28c7f70294bf6373c2ab762ceeed0aa57c6220c1 | [] | no_license | adrHuerta/hw_coursera | 623dc0677f072a84c3540243884e97f46295f7b3 | a29ce3d1050a3e405c144ca13bbb5d47b64fd0b8 | refs/heads/master | 2021-01-22T05:10:55.130130 | 2017-02-12T09:22:17 | 2017-02-12T09:22:17 | 81,631,255 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 498 | rd | fars_summarize_years.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/r_functions_files.R
\name{fars_summarize_years}
\alias{fars_summarize_years}
\title{Read information of specific year giving summarize information}
\usage{
fars_summarize_years(years)
}
\arguments{
\item{years}{an character or integer value that represent a year}
}
\value{
an object of tbl_df class with two variables: year and n (count)
}
\description{
Read information of specific year giving summarize information
}
|
40068350d5a4b0adf2e7c41e5951e8a67dd04433 | 7fd36ab86277d0e9ff157b723fcb8ab82b3b9488 | /tests/test1.R | d777e5f5f0149d9cbce941c64e85f0243cf47c2e | [] | no_license | JoeHarkness/FARS | 2cbfaa4261b7d3b154a164957500e5d2f94e80ef | 3e33f9a5c418ecabd98c51bb047bacd2a6f74c1b | refs/heads/master | 2021-01-21T14:35:56.220258 | 2017-06-24T19:47:45 | 2017-06-24T19:47:45 | 95,312,221 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 66 | r | test1.R | expect_that(make_filename(2012),matches("accident_2012.csv.bz2"))
|
39b2aaf2730fc1e981f7b2f081e4a9de3d631be7 | c8668c41f68a561a78ce6e0baace051147d50a40 | /R/getStdVars.R | b4f1fd58ef96467532ecaf093eadbf9fff8a766c | [
"MIT"
] | permissive | wStockhausen/wtsDisMELSConn | fb3f7dfcccd71aaf72c95548cec9f4d5781b3907 | 57f9c18bccb0679eadf7b99bc946f63a206d3dd8 | refs/heads/master | 2021-01-18T23:10:10.632878 | 2017-04-04T16:10:08 | 2017-04-04T16:10:08 | 19,750,888 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,718 | r | getStdVars.R | #'
#'@title Get standard variable names for DisMELS output
#'
#'@description Function to get standard variable names for DisMELS output.
#'
#'@param newResType - flag ("NEW" or "OLD") indicating if results are based on the new or old DisMELS results format.
#'
#'@return data frame with columns for names of standard variables ('vars') and types ('types)
#'
#'@details none
#'
#'@export
#'
getStdVars<-function(newResType){
if (toupper(newResType)=='NEW'){
stdVarsAll<-c('typeName','id','parentID','origID','startTime','time',
'horizType','vertType','horizPos1','horizPos2','vertPos','gridCellID','track',
'active','alive','attached','age','ageInStage','number');
type<-c('character','integer','integer','integer','character','character',
'integer','integer','numeric','numeric','numeric','character','character',
'character','character','character','numeric','numeric','numeric')
} else {
stdVarsAll<-c('typeName','id','parentID','origID','horizType','vertType',
'active','alive','attached','startTime','time',
'age','ageInStage','size','number','horizPos1','horizPos2','vertPos',
'temp','salinity','gridCellID','track');
type<-c('character','integer','integer','integer','integer','integer',
'character','character','character','character','character',
'numeric','numeric','numeric','numeric','numeric','numeric','numeric',
'numeric','numeric','character','character');
}
return(data.frame(list(vars=stdVarsAll,types=type),stringsAsFactors=FALSE));
} |
4c480f674fd9bc2ba93c26b09fd02f7028bdfda5 | 1f2386bf37e4442ed70adb41ce4a744230acd08e | /R/data_transformation2.R | cda82080e41ffba075353e84d11475dd459d4518 | [] | no_license | limbo1996/R_and_statics | 804ccbc77db9eb41d9de6589c2ebe776ea89f868 | 747fb50b6fbfd71ae4516e2c240202957e7f124b | refs/heads/master | 2022-09-21T08:39:17.599797 | 2020-06-01T13:30:13 | 2020-06-01T13:30:13 | 218,050,333 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,236 | r | data_transformation2.R | library(nycflights13)
library(tidyverse)
library(dplyr)
filght <- flights
by_day <- group_by(flights, year, month, day)
a <- summarise(by_day, delay = mean(dep_delay, na.rm = T))
#非管道操作
by_dest <- group_by(flights, dest)
delay <- summarise(by_dest,
count = n(),
dist = mean(distance, na.rm = T),
delay = mean(arr_delay, na.rm = T)
)
delay <- filter(delay, count > 20, dest != "HNL")
ggplot(data = delay, mapping = aes(x = dist, y = delay)) +
geom_point(aes(size = count), alpha = 1/3)+
geom_smooth(se = F)
# 管道操作
delay <- flights %>%
group_by(dest) %>%
summarise(
count = n(),
dist = mean(distance, na.rm = T),
delay = mean(arr_delay, na.rm = T)
) %>%
filter(count >20, dest != 'HNL')
#管道操作可以避免中间环节,比如上面不使用管道时需要对中间数据框命名,浪费时间且不易读
# 注意要去掉缺失值,否则,结果都是NA
# 计数
not_cancelled <- flights %>%
filter(!is.na(dep_delay), !is.na(arr_delay))
delays <- not_cancelled %>%
group_by(tailnum) %>%
summarise(
delay = mean(arr_delay)
)
ggplot(data = delays, mapping = aes(x = delay)) +
geom_freqpoly(binwidth = 10)
# 有些航班延误达到5个小时,但是曲线看不出具体数目,可以使用计数
delays <- not_cancelled %>%
group_by(tailnum) %>%
summarise(
delay = mean(arr_delay, na.rm = T),
n = n()
)
ggplot(data = delays, mapping = aes(x = n, y = delay)) +
geom_point(alpha = 1/10)
#调整样本大小
delays %>%
filter(n > 25) %>%
ggplot(mapping = aes(x = n, y = delay)) +
geom_point(alpha = 1/10)
# 查看棒球手击球的数量与表现之间的关系
# convert to a tibble so it print nicely
batting <- as_tibble(Lahman::Batting)
batters <- batting %>%
group_by(playerID) %>%
summarise(
ba = sum(H, na.rm = T) / sum(AB, na.rm = T),
ab = sum(AB, na.rm = T)
)
batters %>%
filter(ab > 100) %>%
ggplot(mapping = aes(x = ab, y = ba))+
geom_point() +
geom_smooth(se = FALSE)
# 如果只想计数 计算不同机场的航班数
not_cancelled %>%
count(dest)
# 或者添加一些条件 计算不同航班的飞行距离总和
not_cancelled %>%
count(tailnum, wt = distance)
# 逻辑值在sum 和mean中也可以使用
not_cancelled %>%
group_by(year, month, day) %>%
summarise(n_early = sum(dep_time < 500))
# 原因是与数字函数使用时,TRUE转换为1, FALSE转换为0,也就是说只有
#小于500的是1被计数,这样可以免于一步filter
# 不使用上面的方法,而是先过滤
not_cancelled %>%
filter(dep_time < 500) %>%
group_by(year, month, day) %>%
summarise(n_early = n())
# 同上计算延迟大于一小时的航班的比例
not_cancelled %>%
group_by(year, month, day) %>%
summarise(hour_delay = mean(arr_time > 60))
# 数目
not_cancelled %>%
group_by(year, month, day) %>%
summarise(hour_delay = sum(arr_time > 60))
# 按多变量分组
# 每次的summary都会去除一个group
daily <- group_by(filght, year, month, day)
(per_day <- summarise(daily, flights = n()))
(per_month <- summarise(per_day, flights = sum(flights)))
(per_year <- summarise(per_month, flights = sum(flights)))
#per_month 和per_year的flight如果也是用n()会出错
# 因为n()只计算行数,所以如果使用n() Per_month得到的flight
# 是每个月的天数(因为合并为月一组了所有天数加在一起而不是航班书)
# Per_year同理,所以需要使用sum(flight)将每个行的航班数加起来
# 删除分组
daily %>%
ungroup() %>% # no longer grouped by date
summarise(flight = n()) # all flights
#练习题
# 不使用count得到相同结果
not_cancelled %>%
count(dest)
not_cancelled %>%
group_by(dest) %>%
summarise(n = n())
# 同上
not_cancelled %>%
count(tailnum, wt = distance)
not_cancelled %>%
group_by(tailnum) %>%
summarise(n = sum(distance))
# group和filter以及mutate同时使用也很方便
flights %>%
group_by(year, month, day) %>%
filter(rank(desc(arr_delay)) < 10)
# 筛选大于阈值的group
flights %>%
group_by(dest) %>%
filter(n() > 365)
|
d081e03a58a7d54f7146964322936ce9ee7636f6 | ebc7589f4f894059d84253e3f394b243bea05049 | /LD_project/RDist.R | d997ebc9a839a218da6641ab8bfdc7be97439cbc | [] | no_license | reworkhow/MPI | 2eb993c8e18309eb7b35c4dfc726f0804823082c | 2fce76d891972c1b7d4bc4c34e9245b8568533bf | refs/heads/master | 2020-06-23T23:05:57.430870 | 2016-12-08T07:29:16 | 2016-12-08T07:29:16 | 74,638,345 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 4,972 | r | RDist.R | # Input parameters
Ne = 5 #effective population size
u = 0.0025 #mutation rate
r = 0.01 #recombination rate
nGenerations = 400 #number of generations of random mating
interval = 50 #output frequency
# Setting up the matrices
n = 2*Ne #there are 2*Ne gamets in the population
x = matrix(ncol=4,nrow=0) #x stores all combinations of 4 haplotypes possible with n gametes
for (i in 0:n){
for (j in 0:(n-i)){
for (k in 0:(n-i-j)){
l = n-i-j-k
x = rbind(x,c(i,j,k,l))
}
}
}
# Possibilities of mutations
a = (1-u)*(1-u) #no mutation and locus 1 abd no mutation at locus 1
b = (1-u)*u #mutation at one locus
c = u*u #mutation at both locci
Mu = matrix(nrow=4,ncol=4, #possibilities to get a haplotype given mutation
c(a,b,b,c, #00 can come without mutations, from single mutation in 01, from single mutation in 10, from 2 mutations in 11
b,a,c,b, #01 can come from single mutation in 00, without mutations in 01 ,from 2 mutations in 10, from single mutation in 11
b,c,a,b, #10 can come from single mutation in 00, from 2 mutations in 01, without mutations in 10 ,from single mutation in 11
c,b,b,a),byrow=T) #11 from 2 mutations in 00, from single mutation in 01, from single mutation in 10, without mutations 11
sizeX = nrow(x) #number of combinations
A = matrix(nrow=sizeX,ncol=sizeX)
nMinus1Inv = 1/(n-1)
rsqr = c()
for (i in 1:sizeX){
x00 = x[i,1] #count for 00 haplotype from x matrix
x01 = x[i,2] #count for 01 haplotype from x matrix
x10 = x[i,3] #count for 10 haplotype from x matrix
x11 = x[i,4] #count for 11 haplotype from x matrix
p00 = x00/n #probability for 00 haplotype from x matrix
p01 = x01/n #probability for 01 haplotype from x matrix
p10 = x10/n #probability for 10 haplotype from x matrix
p11 = x11/n #probability for 11 haplotype from x matrix
p1 = p10 + p11 #total probability of 1 at locus 1
p2 = p01 + p11 #total probability of 1 at locus 2
cov = p11 - p1*p2 #covariance between the locci
rsqr = c(rsqr,cov^2/(p1*(1-p1)*p2*(1-p2))) #squared correlation between the locci
# Adding recombinations
p.recomb = c(p00*(1-r) + r*nMinus1Inv*( p00*(x00-1 + x10) + p01*(x00 + x10) ), #00 can come from 00 without recombination, from recombination of 00 with 00 or 10, recombination of 01 with 00 or 10
p01*(1-r) + r*nMinus1Inv*( p00*(x01 + x11) + p01*(x01-1 + x11) ), #01 can come from 01 without recombination, from recombination of 00 with 01 or 11, recombination of 01 with 01 or 11
p10*(1-r) + r*nMinus1Inv*( p10*(x10-1 + x00) + p11*(x10 + x00) ), #10 can come from 10 without recombination, from recombination of 10 with 10 or 00, recombination of 11 with 10 or 00
p11*(1-r) + r*nMinus1Inv*( p10*(x11 + x01) + p11*(x11-1 + x01) ) #11 can come form 11 without recombination, from recombination of 10 with 11 or 01, recombination of 11 with 11 or 01
)
#putting together mutations and recombinations
p.mut = Mu%*%p.recomb
#print(x[i,])
#print(p.mut)
#getting multinomial probabilities for all possible combinations of haplotypes
#transform matrx 286*286 -----> A%*%P to get new P in new generation, p is a vectot of 286 elements
for (j in 1:sizeX){
A[j,i] = dmultinom(x[j,],prob=p.mut,size=n)
}
}
#Getting transition matrix probabilities, starting with equal probabilities for all haplotypes
p = c()
for (i in 1:sizeX){
p = c(p,dmultinom(x[i,],prob=c(1,1,1,1),size=n))
}
count = interval
quartz(1)
par(mfrow=c(2,5))
expr2Vec = c();
for (i in 1:nGenerations){
data = cbind(rsqr,p)
data=data[complete.cases(data),] #exclude the combinations where there is no segregation at both loci
r2=round(data[,1],4)
probs = data[,2]/sum(data[,2]) #standardize probabilities to sum to one for segregating locci
cat("generation",i,"\n")
sumPr = by(probs,r2,sum) #summing probabilities for given rsq
res =cbind(sort(unique(r2)),as.matrix(sumPr)[,1])
expr2=sum(as.matrix(res[,1]*res[,2])[,1]) #expected rsq is weighted mean of rsqs
print(expr2)
expr2Vec = cbind(expr2Vec,c(i,expr2)) #storing expected rsq for plotting
if (count==interval){
plot(sort(unique(r2)),sumPr,yaxp=c(0,1,10),ylim=c(0,1)) #plotting rsq for generations determined by interval
count = 0
}
count = count + 1
p = A%*%p #recalculating new probabilities every generation
}
cat("generation",nGenerations,"\n")
print(cbind(x,rsqr,p)) #output with haplotype combination,rsq and probability
quartz(2)
windows(2)
plot(expr2Vec[1,],expr2Vec[2,]) #plotting expected rsq every generation
|
58d489e7a83782c4da0654fd70b7a4ed8e0eeab9 | 692a5ee984fa4fdb1c7d797e722ecd94687f9e4d | /1_3_sock_pair_count.R | e0e5617bf266fbd32d506792a7a805efa29f2d03 | [] | no_license | saajanrajak/hacker_rank | cae4f628fc6d45bea1c30f2b7d270543e36ca1b1 | 6fd47742eb8cfb9d5dfbad5bb84e333be25319f8 | refs/heads/main | 2023-01-30T14:28:00.652271 | 2020-12-18T18:26:27 | 2020-12-18T18:26:27 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 394 | r | 1_3_sock_pair_count.R | # Sock pair Count
# https://www.hackerrank.com/challenges/sock-merchant/problem?h_l=interview&playlist_slugs%5B%5D=interview-preparation-kit&playlist_slugs%5B%5D=warmup
pair_function <- function(n_col,arr) {
df <- data.frame(socks = arr)
df %>%
count(socks) %>%
mutate(pairs_count = n%/%2) %>%
select(pairs_count) %>%
sum()
}
pair_function(5, c(2,3,4,2,4,4,4,4))
|
0d6be712e46de4e9b1d3c025857eca7e53c2b5ed | 830f467753e1a4ae9b6306cd10259624c3b40281 | /met_aq_merged.R | e93eee809bf6f6a71a552e954da242f64bdfbe09 | [] | no_license | kobajuluwa-eq/AirQualityScripts | d6c13b56704c7c5558fd5542a77ac4631b690324 | d1b58bd1cf906eab0700037611282c227d61fea4 | refs/heads/main | 2023-03-18T22:40:19.624188 | 2021-03-13T06:36:54 | 2021-03-13T06:36:54 | 347,293,685 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 124,810 | r | met_aq_merged.R | library(tidyverse);library(plyr);library(data.table)
library(openair);library(sqldf);library(ggplot2);library(dplyr)
library(openxlsx);library(readxl);library(ggpubr);library(lubridate)
#function to replace all NaN with NA
is.nan.data.frame <- function(x)
do.call(cbind, lapply(x, is.nan))
# import AQ data ####################
abe_colnames <- c("date","abeno2","abeo3","abeno","abeso2","abelat","abelon","abepm1","abepm25","abepm10","abeco","abetvoc","abeco2","abesite")
iko_colnames <- c("date","ikono2","ikoo3","ikono","ikoso2","ikolat","ikolon","ikopm1","ikopm25","ikopm10","ikoco","ikotvoc","ikoco2","ikosite")
jan_colnames <- c("date","janno2","jano3","janno","janso2","janlat","janlon","janpm1","janpm25","janpm10","janco","jantvoc","janco2","jansite")
las_colnames <- c("date","lasno2","laso3","lasno","lasso2","laslat","laslon","laspm1","laspm25","laspm10","lasco","lastvoc","lasco2","lassite")
ncf_colnames <- c("date","ncfno2","ncfo3","ncfno","ncfso2","ncflat","ncflon","ncfpm1","ncfpm25","ncfpm10","ncfco","ncftvoc","ncfco2","ncfsite")
uni_colnames <- c("date","unino2","unio3","unino","uniso2","unilat","unilon","unipm1","unipm25","unipm10","unico","unitvoc","unico2","unisite")
#import aug 5min
aug5minfile <- "C:/Users/Obajuluwa/OneDrive/OneDrive Collaboration for August to December Reports 290121/Rev0/August Database/AQ_5min_August_QC.xlsx"
excel_sheets(aug5minfile)[1:6]
augabe <- read_xlsx(aug5minfile,sheet = excel_sheets(aug5minfile)[1], na = c("NA","-999"),guess_max = 10000,col_names = abe_colnames, skip = 1)
augiko <- read_xlsx(aug5minfile,sheet = excel_sheets(aug5minfile)[2], na = c("NA","-999"),guess_max = 10000,col_names = iko_colnames, skip = 1)
augjan <- read_xlsx(aug5minfile,sheet = excel_sheets(aug5minfile)[3], na = c("NA","-999"),guess_max = 10000,col_names = jan_colnames, skip = 1)
auglas <- read_xlsx(aug5minfile,sheet = excel_sheets(aug5minfile)[4], na = c("NA","-999"),guess_max = 10000,col_names = las_colnames, skip = 1)
augncf <- read_xlsx(aug5minfile,sheet = excel_sheets(aug5minfile)[5], na = c("NA","-999"),guess_max = 10000,col_names = ncf_colnames, skip = 1)
auguni <- read_xlsx(aug5minfile,sheet = excel_sheets(aug5minfile)[6], na = c("NA","-999"),guess_max = 10000,col_names = uni_colnames, skip = 1)
#import sep 5min
sep5minfile <- "C:/Users/Obajuluwa/OneDrive/OneDrive Collaboration for August to December Reports 290121/Rev0/September Database/AQ_5min_September_QC.xlsx"
excel_sheets(sep5minfile)[1:6]
sepabe <- read_xlsx(sep5minfile,sheet = excel_sheets(sep5minfile)[1], na = c("NA","-999"),guess_max = 10000,col_names = abe_colnames, skip = 1)
sepiko <- read_xlsx(sep5minfile,sheet = excel_sheets(sep5minfile)[2], na = c("NA","-999"),guess_max = 10000,col_names = iko_colnames, skip = 1)
sepjan <- read_xlsx(sep5minfile,sheet = excel_sheets(sep5minfile)[3], na = c("NA","-999"),guess_max = 10000,col_names = jan_colnames, skip = 1)
seplas <- read_xlsx(sep5minfile,sheet = excel_sheets(sep5minfile)[4], na = c("NA","-999"),guess_max = 10000,col_names = las_colnames, skip = 1)
sepncf <- read_xlsx(sep5minfile,sheet = excel_sheets(sep5minfile)[5], na = c("NA","-999"),guess_max = 10000,col_names = ncf_colnames, skip = 1)
sepuni <- read_xlsx(sep5minfile,sheet = excel_sheets(sep5minfile)[6], na = c("NA","-999"),guess_max = 10000,col_names = uni_colnames, skip = 1)
#import oct 5min
oct5minfile <- "C:/Users/Obajuluwa/OneDrive/OneDrive Collaboration for August to December Reports 290121/Rev0/October Database/AQ_5min_October_QC.xlsx"
excel_sheets(oct5minfile)[1:6]
octabe <- read_xlsx(oct5minfile,sheet = excel_sheets(oct5minfile)[1], na = c("NA","-999"),guess_max = 10000,col_names = abe_colnames, skip = 1)
octiko <- read_xlsx(oct5minfile,sheet = excel_sheets(oct5minfile)[2], na = c("NA","-999"),guess_max = 10000,col_names = iko_colnames, skip = 1)
octjan <- read_xlsx(oct5minfile,sheet = excel_sheets(oct5minfile)[3], na = c("NA","-999"),guess_max = 10000,col_names = jan_colnames, skip = 1)
octlas <- read_xlsx(oct5minfile,sheet = excel_sheets(oct5minfile)[4], na = c("NA","-999"),guess_max = 10000,col_names = las_colnames, skip = 1)
octncf <- read_xlsx(oct5minfile,sheet = excel_sheets(oct5minfile)[5], na = c("NA","-999"),guess_max = 10000,col_names = ncf_colnames, skip = 1)
octuni <- read_xlsx(oct5minfile,sheet = excel_sheets(oct5minfile)[6], na = c("NA","-999"),guess_max = 10000,col_names = uni_colnames, skip = 1)
#import nov 5min
nov5minfile <- "C:/Users/Obajuluwa/OneDrive/OneDrive Collaboration for August to December Reports 290121/Rev0/November Database/AQ_5min_November_QC.xlsx"
excel_sheets(nov5minfile)[1:6]
novabe <- read_xlsx(nov5minfile,sheet = excel_sheets(nov5minfile)[1], na = c("NA","-999"),guess_max = 10000,col_names = abe_colnames, skip = 1)
noviko <- read_xlsx(nov5minfile,sheet = excel_sheets(nov5minfile)[2], na = c("NA","-999"),guess_max = 10000,col_names = iko_colnames, skip = 1)
novjan <- read_xlsx(nov5minfile,sheet = excel_sheets(nov5minfile)[3], na = c("NA","-999"),guess_max = 10000,col_names = jan_colnames, skip = 1)
novlas <- read_xlsx(nov5minfile,sheet = excel_sheets(nov5minfile)[4], na = c("NA","-999"),guess_max = 10000,col_names = las_colnames, skip = 1)
novncf <- read_xlsx(nov5minfile,sheet = excel_sheets(nov5minfile)[5], na = c("NA","-999"),guess_max = 10000,col_names = ncf_colnames, skip = 1)
novuni <- read_xlsx(nov5minfile,sheet = excel_sheets(nov5minfile)[6], na = c("NA","-999"),guess_max = 10000,col_names = uni_colnames, skip = 1)
#import dec 5min
dec5minfile <- "C:/Users/Obajuluwa/OneDrive/OneDrive Collaboration for August to December Reports 290121/Rev0/December Database/AQ_5min_December_QC.xlsx"
excel_sheets(dec5minfile)[1:6]
decabe <- read_xlsx(dec5minfile,sheet = excel_sheets(dec5minfile)[1], na = c("NA","-999"),guess_max = 10000,col_names = abe_colnames, skip = 1)
deciko <- read_xlsx(dec5minfile,sheet = excel_sheets(dec5minfile)[2], na = c("NA","-999"),guess_max = 10000,col_names = iko_colnames, skip = 1)
decjan <- read_xlsx(dec5minfile,sheet = excel_sheets(dec5minfile)[3], na = c("NA","-999"),guess_max = 10000,col_names = jan_colnames, skip = 1)
declas <- read_xlsx(dec5minfile,sheet = excel_sheets(dec5minfile)[4], na = c("NA","-999"),guess_max = 10000,col_names = las_colnames, skip = 1)
decncf <- read_xlsx(dec5minfile,sheet = excel_sheets(dec5minfile)[5], na = c("NA","-999"),guess_max = 10000,col_names = ncf_colnames, skip = 1)
decuni <- read_xlsx(dec5minfile,sheet = excel_sheets(dec5minfile)[6], na = c("NA","-999"),guess_max = 10000,col_names = uni_colnames, skip = 1)
#import jan 5min
jan5minfile <- "C:/Users/Obajuluwa/OneDrive/OneDrive Collaboration for August to December Reports 290121/Rev0/January Database/AQ_5min_January_QC.xlsx"
excel_sheets(jan5minfile)[1:6]
janabe <- read_xlsx(jan5minfile,sheet = excel_sheets(jan5minfile)[1], na = c("NA","-999"),guess_max = 10000,col_names = abe_colnames, skip = 1)
janiko <- read_xlsx(jan5minfile,sheet = excel_sheets(jan5minfile)[2], na = c("NA","-999"),guess_max = 10000,col_names = iko_colnames, skip = 1)
janjan <- read_xlsx(jan5minfile,sheet = excel_sheets(jan5minfile)[3], na = c("NA","-999"),guess_max = 10000,col_names = jan_colnames, skip = 1)
janlas <- read_xlsx(jan5minfile,sheet = excel_sheets(jan5minfile)[4], na = c("NA","-999"),guess_max = 10000,col_names = las_colnames, skip = 1)
janncf <- read_xlsx(jan5minfile,sheet = excel_sheets(jan5minfile)[5], na = c("NA","-999"),guess_max = 10000,col_names = ncf_colnames, skip = 1)
januni <- read_xlsx(jan5minfile,sheet = excel_sheets(jan5minfile)[6], na = c("NA","-999"),guess_max = 10000,col_names = uni_colnames, skip = 1)
# bind each site for all six months
abe_bind <- rbind(augabe,sepabe,octabe,novabe,decabe,janabe)
iko_bind <- rbind(augiko,sepiko,octiko,noviko,deciko,janiko)
jan_bind <- rbind(augjan,sepjan,octjan,novjan,decjan,janjan)
las_bind <- rbind(auglas,seplas,octlas,novlas,declas,janlas)
ncf_bind <- rbind(augncf,sepncf,octncf,novncf,decncf,janncf)
uni_bind <- rbind(auguni,sepuni,octuni,novuni,decuni,januni)
startDate <- "2020-08-01 00:00:00" # august 1 2020
endDate <- "2021-01-31 23:59:00" # february 1 2021
# average to 5 min
abe_5min <- timeAverage(abe_bind, avg.time = "5 min", statistic = "mean", start.date = startDate, end.date = endDate)
iko_5min <- timeAverage(iko_bind, avg.time = "5 min", statistic = "mean", start.date = startDate, end.date = endDate)
jan_5min <- timeAverage(jan_bind, avg.time = "5 min", statistic = "mean", start.date = startDate, end.date = endDate)
las_5min <- timeAverage(las_bind, avg.time = "5 min", statistic = "mean", start.date = startDate, end.date = endDate)
ncf_5min <- timeAverage(ncf_bind, avg.time = "5 min", statistic = "mean", start.date = startDate, end.date = endDate)
uni_5min <- timeAverage(uni_bind, avg.time = "5 min", statistic = "mean", start.date = startDate, end.date = endDate)
names(abe_5min)
summary(abe_5min)
aq_allsites_5min <- Reduce(function(x, y) merge(x, y,by = "date", all=TRUE), list(abe_5min,iko_5min,jan_5min,las_5min,ncf_5min,uni_5min))
summary(aq_allsites_5min)
nrow(aq_allsites_5min)
# import met data ##############################################
abe_colnames_met <- c("date","abepress","abetemp","aberh","abewb_temp","abews","abewd","aberain","abelat","abelon","abesite")
iko_colnames_met <- c("date","ikopress","ikotemp","ikorh","ikowb_temp","ikows","ikowd","ikorain","ikolat","ikolon","ikosite")
jan_colnames_met <- c("date","janpress","jantemp","janrh","janwb_temp","janws","janwd","janrain","janlat","janlon","jansite")
las_colnames_met <- c("date","laspress","lastemp","lasrh","laswb_temp","lasws","laswd","lasrain","laslat","laslon","lassite")
ncf_colnames_met <- c("date","ncfpress","ncftemp","ncfrh","ncfwb_temp","ncfws","ncfwd","ncfrain","ncflat","ncflon","ncfsite")
uni_colnames_met <- c("date","unipress","unitemp","unirh","uniwb_temp","uniws","uniwd","unirain","unilat","unilon","unisite")
#import aug 5min
met_aug5minfile <- "C:/Users/Obajuluwa/OneDrive/OneDrive Collaboration for August to December Reports 290121/Rev0/August Database/MET_5min_August.xlsx"
excel_sheets(met_aug5minfile)[1:6]
met_augabe <- read_xlsx(met_aug5minfile,sheet = excel_sheets(met_aug5minfile)[1], na = c("NA","-999"),guess_max = 10000,col_names = abe_colnames_met, skip = 1)
met_augiko <- read_xlsx(met_aug5minfile,sheet = excel_sheets(met_aug5minfile)[2], na = c("NA","-999"),guess_max = 10000,col_names = iko_colnames_met, skip = 1)
met_augjan <- read_xlsx(met_aug5minfile,sheet = excel_sheets(met_aug5minfile)[3], na = c("NA","-999"),guess_max = 10000,col_names = jan_colnames_met, skip = 1)
met_auglas <- read_xlsx(met_aug5minfile,sheet = excel_sheets(met_aug5minfile)[4], na = c("NA","-999"),guess_max = 10000,col_names = las_colnames_met, skip = 1)
met_augncf <- read_xlsx(met_aug5minfile,sheet = excel_sheets(met_aug5minfile)[5], na = c("NA","-999"),guess_max = 10000,col_names = ncf_colnames_met, skip = 1)
met_auguni <- read_xlsx(met_aug5minfile,sheet = excel_sheets(met_aug5minfile)[6], na = c("NA","-999"),guess_max = 10000,col_names = uni_colnames_met, skip = 1)
#import sep 5min
met_sep5minfile <- "C:/Users/Obajuluwa/OneDrive/OneDrive Collaboration for August to December Reports 290121/Rev0/September Database/MET_5min_September.xlsx"
excel_sheets(met_sep5minfile)[1:6]
met_sepabe <- read_xlsx(met_sep5minfile,sheet = excel_sheets(met_sep5minfile)[1], na = c("NA","-999"),guess_max = 10000,col_names = abe_colnames_met, skip = 1)
met_sepiko <- read_xlsx(met_sep5minfile,sheet = excel_sheets(met_sep5minfile)[2], na = c("NA","-999"),guess_max = 10000,col_names = iko_colnames_met, skip = 1)
met_sepjan <- read_xlsx(met_sep5minfile,sheet = excel_sheets(met_sep5minfile)[3], na = c("NA","-999"),guess_max = 10000,col_names = jan_colnames_met, skip = 1)
met_seplas <- read_xlsx(met_sep5minfile,sheet = excel_sheets(met_sep5minfile)[4], na = c("NA","-999"),guess_max = 10000,col_names = las_colnames_met, skip = 1)
met_sepncf <- read_xlsx(met_sep5minfile,sheet = excel_sheets(met_sep5minfile)[5], na = c("NA","-999"),guess_max = 10000,col_names = ncf_colnames_met, skip = 1)
met_sepuni <- read_xlsx(met_sep5minfile,sheet = excel_sheets(met_sep5minfile)[6], na = c("NA","-999"),guess_max = 10000,col_names = uni_colnames_met, skip = 1)
#import oct 5min
met_oct5minfile <- "C:/Users/Obajuluwa/OneDrive/OneDrive Collaboration for August to December Reports 290121/Rev0/October Database/MET_5min_October.xlsx"
excel_sheets(met_oct5minfile)[1:6]
met_octabe <- read_xlsx(met_oct5minfile,sheet = excel_sheets(met_oct5minfile)[1], na = c("NA","-999"),guess_max = 10000,col_names = abe_colnames_met, skip = 1)
met_octiko <- read_xlsx(met_oct5minfile,sheet = excel_sheets(met_oct5minfile)[2], na = c("NA","-999"),guess_max = 10000,col_names = iko_colnames_met, skip = 1)
met_octjan <- read_xlsx(met_oct5minfile,sheet = excel_sheets(met_oct5minfile)[3], na = c("NA","-999"),guess_max = 10000,col_names = jan_colnames_met, skip = 1)
met_octlas <- read_xlsx(met_oct5minfile,sheet = excel_sheets(met_oct5minfile)[4], na = c("NA","-999"),guess_max = 10000,col_names = las_colnames_met, skip = 1)
met_octncf <- read_xlsx(met_oct5minfile,sheet = excel_sheets(met_oct5minfile)[5], na = c("NA","-999"),guess_max = 10000,col_names = ncf_colnames_met, skip = 1)
met_octuni <- read_xlsx(met_oct5minfile,sheet = excel_sheets(met_oct5minfile)[6], na = c("NA","-999"),guess_max = 10000,col_names = uni_colnames_met, skip = 1)
#import nov 5min
met_nov5minfile <- "C:/Users/Obajuluwa/OneDrive/OneDrive Collaboration for August to December Reports 290121/Rev0/November Database/MET_5min_November.xlsx"
excel_sheets(met_nov5minfile)[1:6]
met_novabe <- read_xlsx(met_nov5minfile,sheet = excel_sheets(met_nov5minfile)[1], na = c("NA","-999"),guess_max = 10000,col_names = abe_colnames_met, skip = 1)
met_noviko <- read_xlsx(met_nov5minfile,sheet = excel_sheets(met_nov5minfile)[2], na = c("NA","-999"),guess_max = 10000,col_names = iko_colnames_met, skip = 1)
met_novjan <- read_xlsx(met_nov5minfile,sheet = excel_sheets(met_nov5minfile)[3], na = c("NA","-999"),guess_max = 10000,col_names = jan_colnames_met, skip = 1)
met_novlas <- read_xlsx(met_nov5minfile,sheet = excel_sheets(met_nov5minfile)[4], na = c("NA","-999"),guess_max = 10000,col_names = las_colnames_met, skip = 1)
met_novncf <- read_xlsx(met_nov5minfile,sheet = excel_sheets(met_nov5minfile)[5], na = c("NA","-999"),guess_max = 10000,col_names = ncf_colnames_met, skip = 1)
met_novuni <- read_xlsx(met_nov5minfile,sheet = excel_sheets(met_nov5minfile)[6], na = c("NA","-999"),guess_max = 10000,col_names = uni_colnames_met, skip = 1)
#import dec 5min
met_dec5minfile <- "C:/Users/Obajuluwa/OneDrive/OneDrive Collaboration for August to December Reports 290121/Rev0/December Database/MET_5min_December.xlsx"
excel_sheets(met_dec5minfile)[1:6]
met_decabe <- read_xlsx(met_dec5minfile,sheet = excel_sheets(met_dec5minfile)[1], na = c("NA","-999"),guess_max = 10000,col_names = abe_colnames_met, skip = 1)
met_deciko <- read_xlsx(met_dec5minfile,sheet = excel_sheets(met_dec5minfile)[2], na = c("NA","-999"),guess_max = 10000,col_names = iko_colnames_met, skip = 1)
met_decjan <- read_xlsx(met_dec5minfile,sheet = excel_sheets(met_dec5minfile)[3], na = c("NA","-999"),guess_max = 10000,col_names = jan_colnames_met, skip = 1)
met_declas <- read_xlsx(met_dec5minfile,sheet = excel_sheets(met_dec5minfile)[4], na = c("NA","-999"),guess_max = 10000,col_names = las_colnames_met, skip = 1)
met_decncf <- read_xlsx(met_dec5minfile,sheet = excel_sheets(met_dec5minfile)[5], na = c("NA","-999"),guess_max = 10000,col_names = ncf_colnames_met, skip = 1)
met_decuni <- read_xlsx(met_dec5minfile,sheet = excel_sheets(met_dec5minfile)[6], na = c("NA","-999"),guess_max = 10000,col_names = uni_colnames_met, skip = 1)
#import jan 5min
met_jan5minfile <- "C:/Users/Obajuluwa/OneDrive/OneDrive Collaboration for August to December Reports 290121/Rev0/January Database/MET_5min_January.xlsx"
excel_sheets(met_jan5minfile)[1:6]
met_janabe <- read_xlsx(met_jan5minfile,sheet = excel_sheets(met_jan5minfile)[1], na = c("NA","-999"),guess_max = 10000,col_names = abe_colnames_met, skip = 1)
met_janiko <- read_xlsx(met_jan5minfile,sheet = excel_sheets(met_jan5minfile)[2], na = c("NA","-999"),guess_max = 10000,col_names = iko_colnames_met, skip = 1)
met_janjan <- read_xlsx(met_jan5minfile,sheet = excel_sheets(met_jan5minfile)[3], na = c("NA","-999"),guess_max = 10000,col_names = jan_colnames_met, skip = 1)
met_janlas <- read_xlsx(met_jan5minfile,sheet = excel_sheets(met_jan5minfile)[4], na = c("NA","-999"),guess_max = 10000,col_names = las_colnames_met, skip = 1)
met_janncf <- read_xlsx(met_jan5minfile,sheet = excel_sheets(met_jan5minfile)[5], na = c("NA","-999"),guess_max = 10000,col_names = ncf_colnames_met, skip = 1)
met_januni <- read_xlsx(met_jan5minfile,sheet = excel_sheets(met_jan5minfile)[6], na = c("NA","-999"),guess_max = 10000,col_names = uni_colnames_met, skip = 1)
# bind each site for all six months
met_abe_bind <- rbind(met_augabe,met_sepabe,met_octabe,met_novabe,met_decabe,met_janabe)
met_iko_bind <- rbind(met_augiko,met_sepiko,met_octiko,met_noviko,met_deciko,met_janiko)
met_jan_bind <- rbind(met_augjan,met_sepjan,met_octjan,met_novjan,met_decjan,met_janjan)
met_las_bind <- rbind(met_auglas,met_seplas,met_octlas,met_novlas,met_declas,met_janlas)
met_ncf_bind <- rbind(met_augncf,met_sepncf,met_octncf,met_novncf,met_decncf,met_janncf)
met_uni_bind <- rbind(met_auguni,met_sepuni,met_octuni,met_novuni,met_decuni,met_januni)
# average to 5 min
met_abe_5min <- timeAverage(met_abe_bind, avg.time = "5 min", statistic = "mean", start.date = startDate, end.date = endDate)
met_iko_5min <- timeAverage(met_iko_bind, avg.time = "5 min", statistic = "mean", start.date = startDate, end.date = endDate)
met_jan_5min <- timeAverage(met_jan_bind, avg.time = "5 min", statistic = "mean", start.date = startDate, end.date = endDate)
met_las_5min <- timeAverage(met_las_bind, avg.time = "5 min", statistic = "mean", start.date = startDate, end.date = endDate)
met_ncf_5min <- timeAverage(met_ncf_bind, avg.time = "5 min", statistic = "mean", start.date = startDate, end.date = endDate)
met_uni_5min <- timeAverage(met_uni_bind, avg.time = "5 min", statistic = "mean", start.date = startDate, end.date = endDate)
met_allsites_5min <- Reduce(function(x, y) merge(x, y,by = "date", all=TRUE), list(met_abe_5min,met_iko_5min,met_jan_5min,met_las_5min,met_ncf_5min,met_uni_5min))
summary(met_allsites_5min)
# merge aq mnd met for all sites ###########################
allsites_5min <- merge(aq_allsites_5min,met_allsites_5min, by = "date")
summary(allsites_5min)
nrow(allsites_5min)
ncol(allsites_5min)
allsites_5min[is.nan(allsites_5min)] <- NA
summary(allsites_5min)
#
# tempnames <-grep("*temp", names(met_allsites_5min), value = TRUE)
# tempcols <- select(met_allsites_5min, all_of(tempnames))
# summary(tempcols[,seq_len(ncol(tempcols)) %% 2 != 0])
# average to x hours ###############
allsites_24h <- timeAverage(allsites_5min, avg.time = "1 day", statistic = "mean")
allsites_1h <- timeAverage(allsites_5min, avg.time = "1 hour", statistic = "mean")
allsites_8h <- timeAverage(allsites_5min, avg.time = "8 hour", statistic = "mean")
# format, add site column, filter by site, then stack ##################
hnames = names(allsites_24h)
abenames <-c("date",grep("^a", hnames, value = TRUE))
ikonames <-c("date",grep("^i", hnames, value = TRUE))
lasnames <-c("date",grep("^l", hnames, value = TRUE))
jannames <-c("date",grep("^j", hnames, value = TRUE))
ncfnames <-c("date",grep("^n", hnames, value = TRUE))
uninames <-c("date",grep("^u", hnames, value = TRUE))
abe_5m <- select(allsites_5min,all_of(abenames))
iko_5m <- select(allsites_5min,all_of(ikonames))
las_5m <- select(allsites_5min,all_of(lasnames))
jan_5m <- select(allsites_5min,all_of(jannames))
ncf_5m <- select(allsites_5min,all_of(ncfnames))
uni_5m <- select(allsites_5min,all_of(uninames))
############## quick export for mr Ganiyu ################
#
# abe_5m_drop <- abe_5m %>% drop_na(abepm25) %>% select(c(1,8,9,10,14,15,16,18,20)) %>% drop_na(abetemp)
# iko_5m_drop <- iko_5m %>% drop_na(ikopm25) %>% select(c(1,8,9,10,14,15,16,18,20)) %>% drop_na(ikotemp)
# las_5m_drop <- las_5m %>% drop_na(laspm25) %>% select(c(1,8,9,10,14,15,16,18,20)) %>% drop_na(lastemp)
# jan_5m_drop <- jan_5m %>% drop_na(janpm25) %>% select(c(1,8,9,10,14,15,16,18,20)) %>% drop_na(jantemp)
# ncf_5m_drop <- ncf_5m %>% drop_na(ncfpm25) %>% select(c(1,8,9,10,14,15,16,18,20)) %>% drop_na(ncftemp)
# uni_5m_drop <- uni_5m %>% drop_na(unipm25) %>% select(c(1,8,9,10,14,15,16,18,20)) %>% drop_na(unitemp)
#
# names(abe_5m_drop)
# explistxl <- list('abesan' = abe_5m_drop,
# 'ikorodu' = iko_5m_drop,
# 'jankara' = jan_5m_drop,
# 'lasepa' = las_5m_drop,
# 'ncf' = ncf_5m_drop,
# 'unilag' = uni_5m_drop)
# getwd()
#
# write.xlsx(explistxl, "Met_AQ_data_for_correlation.xlsx", row.names = FALSE)
############
abe_24h <- select(allsites_24h,all_of(abenames))
iko_24h <- select(allsites_24h,all_of(ikonames))
las_24h <- select(allsites_24h,all_of(lasnames))
jan_24h <- select(allsites_24h,all_of(jannames))
ncf_24h <- select(allsites_24h,all_of(ncfnames))
uni_24h <- select(allsites_24h,all_of(uninames))
abe_24h$site <- "abesan"
iko_24h$site <- "ikorodu"
las_24h$site <- "lasepa"
jan_24h$site <- "jankara"
ncf_24h$site <- "ncf"
uni_24h$site <- "unilag"
summary(abe_24h)
colnames(abe_24h)
abe_1h <- select(allsites_1h,all_of(abenames))
iko_1h <- select(allsites_1h,all_of(ikonames))
las_1h <- select(allsites_1h,all_of(lasnames))
jan_1h <- select(allsites_1h,all_of(jannames))
ncf_1h <- select(allsites_1h,all_of(ncfnames))
uni_1h <- select(allsites_1h,all_of(uninames))
abe_1h$site <- "abesan"
iko_1h$site <- "ikorodu"
las_1h$site <- "lasepa"
jan_1h$site <- "jankara"
ncf_1h$site <- "ncf"
uni_1h$site <- "unilag"
abe_8h <- select(allsites_8h,all_of(abenames))
iko_8h <- select(allsites_8h,all_of(ikonames))
las_8h <- select(allsites_8h,all_of(lasnames))
jan_8h <- select(allsites_8h,all_of(jannames))
ncf_8h <- select(allsites_8h,all_of(ncfnames))
uni_8h <- select(allsites_8h,all_of(uninames))
abe_8h$site <- "abesan"
iko_8h$site <- "ikorodu"
las_8h$site <- "lasepa"
jan_8h$site <- "jankara"
ncf_8h$site <- "ncf"
uni_8h$site <- "unilag"
force_bind = function(df1, df2, df3, df4, df5, df6) {
colnames(df2) = colnames(df1)
colnames(df3) = colnames(df1)
colnames(df4) = colnames(df1)
colnames(df5) = colnames(df1)
colnames(df6) = colnames(df1)
bind_rows(df1, df2, df3, df4, df5, df6)
}
all_24h <- data.frame(force_bind(abe_24h, iko_24h, las_24h, jan_24h, ncf_24h, uni_24h))
all_1h <- data.frame(force_bind(abe_1h, iko_1h, las_1h, jan_1h, ncf_1h, uni_1h))
all_8h <- data.frame(force_bind(abe_8h, iko_8h, las_8h, jan_8h, ncf_8h, uni_8h))
all_24h[is.nan(all_24h)] <- NA
all_1h[is.nan(all_1h)] <- NA
all_8h[is.nan(all_8h)] <- NA
names(all_24h)
nnames <- c("date","no2","o3","no","so2","lat.x","lon.x","pm1","pm25","pm10","co","tvoc","co2",
"press","temp","rh","wb_temp","ws","wd","rain","lat.y","lon.y","site")
all_24h <- setNames(all_24h, nnames)
all_1h <- setNames(all_1h, nnames)
all_8h <- setNames(all_8h, nnames)
# add met categories ################################
all_24h$temprange <- cut(all_24h$temp, breaks = c(-Inf,28.9,30.9,35.9, Inf), labels = c("<28.9","29-30.9","31-35.9",">36"), include.lowest = TRUE)
all_24h$rhrange <- cut(all_24h$rh, breaks = c(-Inf,68.08, 82.6, Inf), labels = c("<68.08","68.09-82.60",">82.61"), include.lowest = TRUE)
all_24h$rainrange <- cut(all_24h$rain, breaks = c(-Inf,0, Inf), labels = c("dry","wet"), include.lowest = TRUE)
all_24h$wsrange <- cut(all_24h$ws, breaks = c(-Inf,1.39, 2.79, Inf), labels = c("<1.39","1.40-2.79",">2.80"), include.lowest = TRUE)
all_1h$temprange <- cut(all_1h$temp, breaks = c(-Inf,28.9,30.9,35.9, Inf), labels = c("<28.9","29-30.9","31-35.9",">36"), include.lowest = TRUE)
all_1h$rhrange <- cut(all_1h$rh, breaks = c(-Inf,68.08, 82.6, Inf), labels = c("<68.08","68.09-82.60",">82.61"), include.lowest = TRUE)
all_1h$rainrange <- cut(all_1h$rain, breaks = c(-Inf,0, Inf), labels = c("dry","wet"), include.lowest = TRUE)
all_1h$wsrange <- cut(all_1h$ws, breaks = c(-Inf,1.39, 2.79, Inf), labels = c("<1.39","1.40-2.79",">2.80"), include.lowest = TRUE)
all_8h$temprange <- cut(all_8h$temp, breaks = c(-Inf,28.9,30.9,35.9, Inf), labels = c("<28.9","29-30.9","31-35.9",">36"), include.lowest = TRUE)
all_8h$rhrange <- cut(all_8h$rh, breaks = c(-Inf,68.08, 82.6, Inf), labels = c("<68.08","68.09-82.60",">82.61"), include.lowest = TRUE)
all_8h$rainrange <- cut(all_8h$rain, breaks = c(-Inf,0, Inf), labels = c("dry","wet"), include.lowest = TRUE)
all_8h$wsrange <- cut(all_8h$ws, breaks = c(-Inf,1.39, 2.79, Inf), labels = c("<1.39","1.40-2.79",">2.80"), include.lowest = TRUE)
#######################################
#exceedance plots for all met parameters ########
# PM plots #################
# expdir for PM plots
met_plt_dir_pm <- "C:/Users/Obajuluwa/OneDrive/OneDrive Collaboration for August to December Reports 290121/Rev0/Six-Month Interim Report/plots and tables/Met_Exceedances Plots/PM plots"
dir.create(met_plt_dir_pm)
setwd(met_plt_dir_pm)
# filter pm exceedances events
pm25_all_24h <- filter(all_24h, pm25 > 25)
pm10_all_24h <- filter(all_24h, pm10 > 50)
# add a pollutrant column
pm25_all_24h$pollutant <- "PM2.5"
pm10_all_24h$pollutant <- "PM10"
# bind for multiple pollutants
pm_all_24h <- rbind(pm25_all_24h,pm10_all_24h)
# arrange pollutants as per perference
pm_all_24h$pollutant <- factor(pm_all_24h$pollutant, levels = c("PM2.5","PM10"))
# ABESAN #####################################
# group by and summarise events into bins
pm_all_24h_temp_tbl <- pm_all_24h %>%
group_by(temprange,pollutant, site) %>%
dplyr::summarise(counts = n(),.groups = "keep")
# barplot
abe_pm_temp_pl <- ggplot(na.omit(filter(pm_all_24h_temp_tbl, site == "abesan")), aes(x=temprange,y = counts, fill = pollutant)) +
geom_bar(stat = 'identity', position = 'dodge') +
ylab("Day Count") +
xlab("Temperature range (\u00B0C)") +
scale_fill_manual("legend", values = c("PM2.5" = "darkgrey", "PM10" = "gray18")) +
theme_bw() +
theme(legend.position = c(0.8,0.9),legend.title = element_blank(), legend.text = element_text(size = 10),legend.direction = "horizontal",legend.key = element_rect(size = 3),
legend.background = element_rect(linetype = "solid", colour = "black")) +
ggtitle(label = "")
abe_pm_temp_pl
# RELATIVE HUMIDITY
# group by and summarise events into bins
pm_all_24h_rh_tbl <- pm_all_24h %>%
group_by(rhrange,pollutant, site) %>%
dplyr::summarise(counts = n(),.groups = "keep")
# barplot
abe_pm_rh_pl <- ggplot(na.omit(filter(pm_all_24h_rh_tbl, site == "abesan")), aes(x=rhrange,y = counts, fill = pollutant)) +
geom_bar(stat = 'identity', position = 'dodge') +
ylab("Day Count") +
xlab("Range of Relative Humidity (%)") +
scale_fill_manual("legend", values = c("PM2.5" = "darkgrey", "PM10" = "gray18")) +
theme_bw() +
theme(legend.position = c(0.18,0.9),legend.title = element_blank(), legend.text = element_text(size = 10),legend.direction = "horizontal",
legend.background = element_rect(linetype = "solid", colour = "black")) +
ggtitle(label = "")
abe_pm_rh_pl
# PRECIPITATION
# group by and summarise events into bins
pm_all_24h_rain_tbl <- pm_all_24h %>%
group_by(rainrange,pollutant, site) %>%
dplyr::summarise(counts = n(),.groups = "keep")
# barplot
abe_pm_rain_pl <- ggplot(na.omit(filter(pm_all_24h_rain_tbl, site == "abesan")), aes(x=rainrange,y = counts, fill = pollutant)) +
geom_bar(stat = 'identity', position = 'dodge') +
ylab("Day Count") +
xlab("Precipitation (mm)") +
scale_fill_manual("legend", values = c("PM2.5" = "darkgrey", "PM10" = "gray18")) +
theme_bw() +
theme(legend.position = c(0.8,0.9),legend.title = element_blank(), legend.text = element_text(size = 10),legend.direction = "horizontal",
legend.background = element_rect(linetype = "solid", colour = "black")) +
ggtitle(label = "")
abe_pm_rain_pl
# WIND SPEED
# group by and summarise events into bins
pm_all_24h_ws_tbl <- pm_all_24h %>%
group_by(wsrange,pollutant, site) %>%
dplyr::summarise(counts = n(),.groups = "keep")
# barplot
abe_pm_ws_pl <- ggplot(na.omit(filter(pm_all_24h_ws_tbl, site == "abesan")), aes(x=wsrange,y = counts, fill = pollutant)) +
geom_bar(stat = 'identity', position = 'dodge') +
ylab("Day Count") +
xlab("Wind speed range (m/sec)") +
scale_fill_manual("legend", values = c("PM2.5" = "darkgrey", "PM10" = "gray18")) +
theme_bw() +
theme(legend.position = c(0.8,0.9),legend.title = element_blank(), legend.text = element_text(size = 10),legend.direction = "horizontal",
legend.background = element_rect(linetype = "solid", colour = "black")) +
ggtitle(label = "")
abe_pm_ws_pl
abe_met_pm <- ggarrange(abe_pm_temp_pl,abe_pm_rh_pl,abe_pm_rain_pl,abe_pm_ws_pl,
labels = c("(a)","(b)","(c)","(d)"),
ncol = 2,nrow = 2)
abe_met_pm
abe_met_pm <- annotate_figure(abe_met_pm, top = text_grob(bquote(Abesan~PM~Exceedances~at~various~Meteorological~conditions~from~August~2020~to~January~2021), color = "black", face = "bold", size = 18))
metpltwidth <- 8000
metpltheight <- 5000
# Open a tiff file no2
jpeg(file="abe_met_pm.jpeg", res=700, width=metpltwidth, height=metpltheight, pointsize=10,
type="windows", antialias="cleartype")
# 2. Create a plot
abe_met_pm
# Close the pdf file
dev.off()
# IKORODU ####################################################################
# barplot
iko_pm_temp_pl <- ggplot(na.omit(filter(pm_all_24h_temp_tbl, site == "ikorodu")), aes(x=temprange,y = counts, fill = pollutant)) +
geom_bar(stat = 'identity', position = 'dodge') +
ylab("Day Count") +
xlab("Temperature range (\u00B0C)") +
scale_fill_manual("legend", values = c("PM2.5" = "darkgrey", "PM10" = "gray18")) +
theme_bw() +
theme(legend.position = c(0.8,0.9),legend.title = element_blank(), legend.text = element_text(size = 10),legend.direction = "horizontal",legend.key = element_rect(size = 3),
legend.background = element_rect(linetype = "solid", colour = "black")) +
ggtitle(label = "")
iko_pm_temp_pl
# RELATIVE HUMIDITY
# group by and summarise events into bins
pm_all_24h_rh_tbl <- pm_all_24h %>%
group_by(rhrange,pollutant, site) %>%
dplyr::summarise(counts = n(),.groups = "keep")
# barplot
iko_pm_rh_pl <- ggplot(na.omit(filter(pm_all_24h_rh_tbl, site == "ikorodu")), aes(x=rhrange,y = counts, fill = pollutant)) +
geom_bar(stat = 'identity', position = 'dodge') +
ylab("Day Count") +
xlab("Range of Relative Humidity (%)") +
scale_fill_manual("legend", values = c("PM2.5" = "darkgrey", "PM10" = "gray18")) +
theme_bw() +
theme(legend.position = c(0.18,0.9),legend.title = element_blank(), legend.text = element_text(size = 10),legend.direction = "horizontal",
legend.background = element_rect(linetype = "solid", colour = "black")) +
ggtitle(label = "")
iko_pm_rh_pl
# PRECIPITATION
# group by and summarise events into bins
pm_all_24h_rain_tbl <- pm_all_24h %>%
group_by(rainrange,pollutant, site) %>%
dplyr::summarise(counts = n(),.groups = "keep")
# barplot
iko_pm_rain_pl <- ggplot(na.omit(filter(pm_all_24h_rain_tbl, site == "ikorodu")), aes(x=rainrange,y = counts, fill = pollutant)) +
geom_bar(stat = 'identity', position = 'dodge') +
ylab("Day Count") +
xlab("Precipitation (mm)") +
scale_fill_manual("legend", values = c("PM2.5" = "darkgrey", "PM10" = "gray18")) +
theme_bw() +
theme(legend.position = c(0.8,0.9),legend.title = element_blank(), legend.text = element_text(size = 10),legend.direction = "horizontal",
legend.background = element_rect(linetype = "solid", colour = "black")) +
ggtitle(label = "")
iko_pm_rain_pl
# WIND SPEED
# group by and summarise events into bins
pm_all_24h_ws_tbl <- pm_all_24h %>%
group_by(wsrange,pollutant, site) %>%
dplyr::summarise(counts = n(),.groups = "keep")
# barplot
iko_pm_ws_pl <- ggplot(na.omit(filter(pm_all_24h_ws_tbl, site == "ikorodu")), aes(x=wsrange,y = counts, fill = pollutant)) +
geom_bar(stat = 'identity', position = 'dodge') +
ylab("Day Count") +
xlab("Wind speed range (m/sec)") +
scale_fill_manual("legend", values = c("PM2.5" = "darkgrey", "PM10" = "gray18")) +
theme_bw() +
theme(legend.position = c(0.8,0.9),legend.title = element_blank(), legend.text = element_text(size = 10),legend.direction = "horizontal",
legend.background = element_rect(linetype = "solid", colour = "black")) +
ggtitle(label = "")
iko_pm_ws_pl
iko_met_pm <- ggarrange(iko_pm_temp_pl,iko_pm_rh_pl,iko_pm_rain_pl,iko_pm_ws_pl,
labels = c("(a)","(b)","(c)","(d)"),
ncol = 2,nrow = 2)
iko_met_pm
iko_met_pm <- annotate_figure(iko_met_pm, top = text_grob(bquote(Ikorodu~PM~Exceedances~at~various~Meteorological~conditions~from~August~2020~to~January~2021), color = "black", face = "bold", size = 18))
metpltwidth <- 8000
metpltheight <- 5000
# Open a tiff file no2
jpeg(file="iko_met_pm.jpeg", res=700, width=metpltwidth, height=metpltheight, pointsize=10,
type="windows", antialias="cleartype")
# 2. Create a plot
iko_met_pm
# Close the pdf file
dev.off()
# JANKARA ####################################################################
# barplot
jan_pm_temp_pl <- ggplot(na.omit(filter(pm_all_24h_temp_tbl, site == "jankara")), aes(x=temprange,y = counts, fill = pollutant)) +
geom_bar(stat = 'identity', position = 'dodge') +
ylab("Day Count") +
xlab("Temperature range (\u00B0C)") +
scale_fill_manual("legend", values = c("PM2.5" = "darkgrey", "PM10" = "gray18")) +
theme_bw() +
theme(legend.position = c(0.18,0.9),legend.title = element_blank(), legend.text = element_text(size = 10),legend.direction = "horizontal",legend.key = element_rect(size = 3),
legend.background = element_rect(linetype = "solid", colour = "black")) +
ggtitle(label = "")
jan_pm_temp_pl
# RELATIVE HUMIDITY
# group by and summarise events into bins
pm_all_24h_rh_tbl <- pm_all_24h %>%
group_by(rhrange,pollutant, site) %>%
dplyr::summarise(counts = n(),.groups = "keep")
# barplot
jan_pm_rh_pl <- ggplot(na.omit(filter(pm_all_24h_rh_tbl, site == "jankara")), aes(x=rhrange,y = counts, fill = pollutant)) +
geom_bar(stat = 'identity', position = 'dodge') +
ylab("Day Count") +
xlab("Range of Relative Humidity (%)") +
scale_fill_manual("legend", values = c("PM2.5" = "darkgrey", "PM10" = "gray18")) +
theme_bw() +
theme(legend.position = c(0.18,0.9),legend.title = element_blank(), legend.text = element_text(size = 10),legend.direction = "horizontal",
legend.background = element_rect(linetype = "solid", colour = "black")) +
ggtitle(label = "")
jan_pm_rh_pl
# PRECIPITATION
# group by and summarise events into bins
pm_all_24h_rain_tbl <- pm_all_24h %>%
group_by(rainrange,pollutant, site) %>%
dplyr::summarise(counts = n(),.groups = "keep")
# barplot
jan_pm_rain_pl <- ggplot(na.omit(filter(pm_all_24h_rain_tbl, site == "jankara")), aes(x=rainrange,y = counts, fill = pollutant)) +
geom_bar(stat = 'identity', position = 'dodge') +
ylab("Day Count") +
xlab("Precipitation (mm)") +
scale_fill_manual("legend", values = c("PM2.5" = "darkgrey", "PM10" = "gray18")) +
theme_bw() +
theme(legend.position = c(0.8,0.9),legend.title = element_blank(), legend.text = element_text(size = 10),legend.direction = "horizontal",
legend.background = element_rect(linetype = "solid", colour = "black")) +
ggtitle(label = "")
jan_pm_rain_pl
# WIND SPEED
# group by and summarise events into bins
pm_all_24h_ws_tbl <- pm_all_24h %>%
group_by(wsrange,pollutant, site) %>%
dplyr::summarise(counts = n(),.groups = "keep")
# barplot
jan_pm_ws_pl <- ggplot(na.omit(filter(pm_all_24h_ws_tbl, site == "jankara")), aes(x=wsrange,y = counts, fill = pollutant)) +
geom_bar(stat = 'identity', position = 'dodge') +
ylab("Day Count") +
xlab("Wind speed range (m/sec)") +
scale_fill_manual("legend", values = c("PM2.5" = "darkgrey", "PM10" = "gray18")) +
theme_bw() +
theme(legend.position = c(0.18,0.9),legend.title = element_blank(), legend.text = element_text(size = 10),legend.direction = "horizontal",
legend.background = element_rect(linetype = "solid", colour = "black")) +
ggtitle(label = "")
jan_pm_ws_pl
jan_met_pm <- ggarrange(jan_pm_temp_pl,jan_pm_rh_pl,jan_pm_rain_pl,jan_pm_ws_pl,
labels = c("(a)","(b)","(c)","(d)"),
ncol = 2,nrow = 2)
jan_met_pm
jan_met_pm <- annotate_figure(jan_met_pm, top = text_grob(bquote(Jankara~PM~Exceedances~at~various~Meteorological~conditions~from~August~2020~to~January~2021), color = "black", face = "bold", size = 18))
metpltwidth <- 8000
metpltheight <- 5000
# Open a tiff file no2
jpeg(file="jan_met_pm.jpeg", res=700, width=metpltwidth, height=metpltheight, pointsize=10,
type="windows", antialias="cleartype")
# 2. Create a plot
jan_met_pm
# Close the pdf file
dev.off()
# Lasepa ####################################################################
# barplot
las_pm_temp_pl <- ggplot(na.omit(filter(pm_all_24h_temp_tbl, site == "lasepa")), aes(x=temprange,y = counts, fill = pollutant)) +
geom_bar(stat = 'identity', position = 'dodge') +
ylab("Day Count") +
xlab("Temperature range (\u00B0C)") +
scale_fill_manual("legend", values = c("PM2.5" = "darkgrey", "PM10" = "gray18")) +
theme_bw() +
theme(legend.position = c(0.8,0.9),legend.title = element_blank(), legend.text = element_text(size = 10),legend.direction = "horizontal",legend.key = element_rect(size = 3),
legend.background = element_rect(linetype = "solid", colour = "black")) +
ggtitle(label = "")
las_pm_temp_pl
# RELATIVE HUMIDITY
# group by and summarise events into bins
pm_all_24h_rh_tbl <- pm_all_24h %>%
group_by(rhrange,pollutant, site) %>%
dplyr::summarise(counts = n(),.groups = "keep")
# barplot
las_pm_rh_pl <- ggplot(na.omit(filter(pm_all_24h_rh_tbl, site == "lasepa")), aes(x=rhrange,y = counts, fill = pollutant)) +
geom_bar(stat = 'identity', position = 'dodge') +
ylab("Day Count") +
xlab("Range of Relative Humidity (%)") +
scale_fill_manual("legend", values = c("PM2.5" = "darkgrey", "PM10" = "gray18")) +
theme_bw() +
theme(legend.position = c(0.18,0.9),legend.title = element_blank(), legend.text = element_text(size = 10),legend.direction = "horizontal",
legend.background = element_rect(linetype = "solid", colour = "black")) +
ggtitle(label = "")
las_pm_rh_pl
# PRECIPITATION
# group by and summarise events into bins
pm_all_24h_rain_tbl <- pm_all_24h %>%
group_by(rainrange,pollutant, site) %>%
dplyr::summarise(counts = n(),.groups = "keep")
# barplot
las_pm_rain_pl <- ggplot(na.omit(filter(pm_all_24h_rain_tbl, site == "lasepa")), aes(x=rainrange,y = counts, fill = pollutant)) +
geom_bar(stat = 'identity', position = 'dodge') +
ylab("Day Count") +
xlab("Precipitation (mm)") +
scale_fill_manual("legend", values = c("PM2.5" = "darkgrey", "PM10" = "gray18")) +
theme_bw() +
theme(legend.position = c(0.8,0.9),legend.title = element_blank(), legend.text = element_text(size = 10),legend.direction = "horizontal",
legend.background = element_rect(linetype = "solid", colour = "black")) +
ggtitle(label = "")
las_pm_rain_pl
# WIND SPEED
# group by and summarise events into bins
pm_all_24h_ws_tbl <- pm_all_24h %>%
group_by(wsrange,pollutant, site) %>%
dplyr::summarise(counts = n(),.groups = "keep")
# barplot
las_pm_ws_pl <- ggplot(na.omit(filter(pm_all_24h_ws_tbl, site == "lasepa")), aes(x=wsrange,y = counts, fill = pollutant)) +
geom_bar(stat = 'identity', position = 'dodge') +
ylab("Day Count") +
xlab("Wind speed range (m/sec)") +
scale_fill_manual("legend", values = c("PM2.5" = "darkgrey", "PM10" = "gray18")) +
theme_bw() +
theme(legend.position = c(0.8,0.9),legend.title = element_blank(), legend.text = element_text(size = 10),legend.direction = "horizontal",
legend.background = element_rect(linetype = "solid", colour = "black")) +
ggtitle(label = "")
las_pm_ws_pl
las_met_pm <- ggarrange(las_pm_temp_pl,las_pm_rh_pl,las_pm_rain_pl,las_pm_ws_pl,
labels = c("(a)","(b)","(c)","(d)"),
ncol = 2,nrow = 2)
las_met_pm
las_met_pm <- annotate_figure(las_met_pm, top = text_grob(bquote(LASEPA~PM~Exceedances~at~various~Meteorological~conditions~from~August~2020~to~January~2021), color = "black", face = "bold", size = 18))
metpltwidth <- 8000
metpltheight <- 5000
# Open a tiff file no2
jpeg(file="las_met_pm.jpeg", res=700, width=metpltwidth, height=metpltheight, pointsize=10,
type="windows", antialias="cleartype")
# 2. Create a plot
las_met_pm
# Close the pdf file
dev.off()
# NCF ####################################################################
# barplot
ncf_pm_temp_pl <- ggplot(na.omit(filter(pm_all_24h_temp_tbl, site == "ncf")), aes(x=temprange,y = counts, fill = pollutant)) +
geom_bar(stat = 'identity', position = 'dodge') +
ylab("Day Count") +
xlab("Temperature range (\u00B0C)") +
scale_fill_manual("legend", values = c("PM2.5" = "darkgrey", "PM10" = "gray18")) +
theme_bw() +
theme(legend.position = c(0.8,0.9),legend.title = element_blank(), legend.text = element_text(size = 10),legend.direction = "horizontal",legend.key = element_rect(size = 3),
legend.background = element_rect(linetype = "solid", colour = "black")) +
ggtitle(label = "")
ncf_pm_temp_pl
# RELATIVE HUMIDITY
# group by and summarise events into bins
pm_all_24h_rh_tbl <- pm_all_24h %>%
group_by(rhrange,pollutant, site) %>%
dplyr::summarise(counts = n(),.groups = "keep")
# barplot
ncf_pm_rh_pl <- ggplot(na.omit(filter(pm_all_24h_rh_tbl, site == "ncf")), aes(x=rhrange,y = counts, fill = pollutant)) +
geom_bar(stat = 'identity', position = 'dodge') +
ylab("Day Count") +
xlab("Range of Relative Humidity (%)") +
scale_fill_manual("legend", values = c("PM2.5" = "darkgrey", "PM10" = "gray18")) +
theme_bw() +
theme(legend.position = c(0.18,0.9),legend.title = element_blank(), legend.text = element_text(size = 10),legend.direction = "horizontal",
legend.background = element_rect(linetype = "solid", colour = "black")) +
ggtitle(label = "")
ncf_pm_rh_pl
# PRECIPITATION
# group by and summarise events into bins
pm_all_24h_rain_tbl <- pm_all_24h %>%
group_by(rainrange,pollutant, site) %>%
dplyr::summarise(counts = n(),.groups = "keep")
# barplot
ncf_pm_rain_pl <- ggplot(na.omit(filter(pm_all_24h_rain_tbl, site == "ncf")), aes(x=rainrange,y = counts, fill = pollutant)) +
geom_bar(stat = 'identity', position = 'dodge') +
ylab("Day Count") +
xlab("Precipitation (mm)") +
scale_fill_manual("legend", values = c("PM2.5" = "darkgrey", "PM10" = "gray18")) +
theme_bw() +
theme(legend.position = c(0.8,0.9),legend.title = element_blank(), legend.text = element_text(size = 10),legend.direction = "horizontal",
legend.background = element_rect(linetype = "solid", colour = "black")) +
ggtitle(label = "")
ncf_pm_rain_pl
# WIND SPEED
# group by and summarise events into bins
pm_all_24h_ws_tbl <- pm_all_24h %>%
group_by(wsrange,pollutant, site) %>%
dplyr::summarise(counts = n(),.groups = "keep")
# barplot
ncf_pm_ws_pl <- ggplot(na.omit(filter(pm_all_24h_ws_tbl, site == "ncf")), aes(x=wsrange,y = counts, fill = pollutant)) +
geom_bar(stat = 'identity', position = 'dodge') +
ylab("Day Count") +
xlab("Wind speed range (m/sec)") +
scale_fill_manual("legend", values = c("PM2.5" = "darkgrey", "PM10" = "gray18")) +
theme_bw() +
theme(legend.position = c(0.18,0.9),legend.title = element_blank(), legend.text = element_text(size = 10),legend.direction = "horizontal",
legend.background = element_rect(linetype = "solid", colour = "black")) +
ggtitle(label = "")
ncf_pm_ws_pl
ncf_met_pm <- ggarrange(ncf_pm_temp_pl,ncf_pm_rh_pl,ncf_pm_rain_pl,ncf_pm_ws_pl,
labels = c("(a)","(b)","(c)","(d)"),
ncol = 2,nrow = 2)
ncf_met_pm
ncf_met_pm <- annotate_figure(ncf_met_pm, top = text_grob(bquote(NCF~PM~Exceedances~at~various~Meteorological~conditions~from~August~2020~to~January~2021), color = "black", face = "bold", size = 18))
metpltwidth <- 8000
metpltheight <- 5000
# Open a tiff file no2
jpeg(file="ncf_met_pm.jpeg", res=700, width=metpltwidth, height=metpltheight, pointsize=10,
type="windows", antialias="cleartype")
# 2. Create a plot
ncf_met_pm
# Close the pdf file
dev.off()
# UNILAG ####################################################################
# barplot
uni_pm_temp_pl <- ggplot(na.omit(filter(pm_all_24h_temp_tbl, site == "unilag")), aes(x=temprange,y = counts, fill = pollutant)) +
geom_bar(stat = 'identity', position = 'dodge') +
ylab("Day Count") +
xlab("Temperature range (\u00B0C)") +
scale_fill_manual("legend", values = c("PM2.5" = "darkgrey", "PM10" = "gray18")) +
theme_bw() +
theme(legend.position = c(0.8,0.9),legend.title = element_blank(), legend.text = element_text(size = 10),legend.direction = "horizontal",legend.key = element_rect(size = 3),
legend.background = element_rect(linetype = "solid", colour = "black")) +
ggtitle(label = "")
uni_pm_temp_pl
# RELATIVE HUMIDITY
# group by and summarise events into bins
pm_all_24h_rh_tbl <- pm_all_24h %>%
group_by(rhrange,pollutant, site) %>%
dplyr::summarise(counts = n(),.groups = "keep")
# barplot
uni_pm_rh_pl <- ggplot(na.omit(filter(pm_all_24h_rh_tbl, site == "unilag")), aes(x=rhrange,y = counts, fill = pollutant)) +
geom_bar(stat = 'identity', position = 'dodge') +
ylab("Day Count") +
xlab("Range of Relative Humidity (%)") +
scale_fill_manual("legend", values = c("PM2.5" = "darkgrey", "PM10" = "gray18")) +
theme_bw() +
theme(legend.position = c(0.18,0.9),legend.title = element_blank(), legend.text = element_text(size = 10),legend.direction = "horizontal",
legend.background = element_rect(linetype = "solid", colour = "black")) +
ggtitle(label = "")
uni_pm_rh_pl
# PRECIPITATION
# group by and summarise events into bins
pm_all_24h_rain_tbl <- pm_all_24h %>%
group_by(rainrange,pollutant, site) %>%
dplyr::summarise(counts = n(),.groups = "keep")
# barplot
uni_pm_rain_pl <- ggplot(na.omit(filter(pm_all_24h_rain_tbl, site == "unilag")), aes(x=rainrange,y = counts, fill = pollutant)) +
geom_bar(stat = 'identity', position = 'dodge') +
ylab("Day Count") +
xlab("Precipitation (mm)") +
scale_fill_manual("legend", values = c("PM2.5" = "darkgrey", "PM10" = "gray18")) +
theme_bw() +
theme(legend.position = c(0.8,0.9),legend.title = element_blank(), legend.text = element_text(size = 10),legend.direction = "horizontal",
legend.background = element_rect(linetype = "solid", colour = "black")) +
ggtitle(label = "")
uni_pm_rain_pl
# WIND SPEED
# group by and summarise events into bins
pm_all_24h_ws_tbl <- pm_all_24h %>%
group_by(wsrange,pollutant, site) %>%
dplyr::summarise(counts = n(),.groups = "keep")
# barplot
uni_pm_ws_pl <- ggplot(na.omit(filter(pm_all_24h_ws_tbl, site == "unilag")), aes(x=wsrange,y = counts, fill = pollutant)) +
geom_bar(stat = 'identity', position = 'dodge') +
ylab("Day Count") +
xlab("Wind speed range (m/sec)") +
scale_fill_manual("legend", values = c("PM2.5" = "darkgrey", "PM10" = "gray18")) +
theme_bw() +
theme(legend.position = c(0.18,0.9),legend.title = element_blank(), legend.text = element_text(size = 10),legend.direction = "horizontal",
legend.background = element_rect(linetype = "solid", colour = "black")) +
ggtitle(label = "")
uni_pm_ws_pl
uni_met_pm <- ggarrange(uni_pm_temp_pl,uni_pm_rh_pl,uni_pm_rain_pl,uni_pm_ws_pl,
labels = c("(a)","(b)","(c)","(d)"),
ncol = 2,nrow = 2)
uni_met_pm
uni_met_pm <- annotate_figure(uni_met_pm, top = text_grob(bquote(UNILAG~PM~Exceedances~at~various~Meteorological~conditions~from~August~2020~to~January~2021), color = "black", face = "bold", size = 18))
metpltwidth <- 8000
metpltheight <- 5000
# Open a tiff file no2
jpeg(file="uni_met_pm.jpeg", res=700, width=metpltwidth, height=metpltheight, pointsize=10,
type="windows", antialias="cleartype")
# 2. Create a plot
uni_met_pm
# Close the pdf file
dev.off()
#########################################
########################################
# SO2 plots ###########################################
# expdir for SO2 plots
met_plt_dir_so2 <- "C:/Users/Obajuluwa/OneDrive/OneDrive Collaboration for August to December Reports 290121/Rev0/Six-Month Interim Report/plots and tables/Met_Exceedances Plots/SO2 plots"
dir.create(met_plt_dir_so2)
setwd(met_plt_dir_so2)
# filter pm exceedances events
so2_all_24h <- filter(all_24h, so2 > 20)
# add a pollutrant column
so2_all_24h$pollutant <- "SO2"
# bind for multiple pollutants
# pm_all_24h <- rbind(pm25_all_24h,pm10_all_24h)
# arrange pollutants as per perference
# pm_all_24h$pollutant <- factor(pm_all_24h$pollutant, levels = c("PM2.5","PM10"))
# ABESAN #####################################
# group by and summarise events into bins
so2_all_24h_temp_tbl <- so2_all_24h %>%
group_by(temprange,pollutant, site) %>%
dplyr::summarise(counts = n(),.groups = "keep")
# barplot
abe_so2_temp_pl <- ggplot(na.omit(filter(so2_all_24h_temp_tbl, site == "abesan")), aes(x=temprange,y = counts, fill = pollutant)) +
geom_bar(stat = 'identity', position = 'dodge') +
ylab("Day Count") +
xlab("Temperature range (\u00B0C)") +
scale_fill_manual("legend", values = c("SO2" = "grey18")) +
theme_bw() +
theme(legend.position = c(0.8,0.9),legend.title = element_blank(), legend.text = element_text(size = 10),legend.direction = "horizontal",legend.key = element_rect(size = 3),
legend.background = element_rect(linetype = "solid", colour = "black")) +
ggtitle(label = "")
abe_so2_temp_pl
# RELATIVE HUMIDITY
# group by and summarise events into bins
so2_all_24h_rh_tbl <- so2_all_24h %>%
group_by(rhrange,pollutant, site) %>%
dplyr::summarise(counts = n(),.groups = "keep")
# barplot
abe_so2_rh_pl <- ggplot(na.omit(filter(so2_all_24h_rh_tbl, site == "abesan")), aes(x=rhrange,y = counts, fill = pollutant)) +
geom_bar(stat = 'identity', position = 'dodge') +
ylab("Day Count") +
xlab("Range of Relative Humidity (%)") +
scale_fill_manual("legend", values = c("SO2" = "grey18")) +
theme_bw() +
theme(legend.position = c(0.18,0.9),legend.title = element_blank(), legend.text = element_text(size = 10),legend.direction = "horizontal",
legend.background = element_rect(linetype = "solid", colour = "black")) +
ggtitle(label = "")
abe_so2_rh_pl
# PRECIPITATION
# group by and summarise events into bins
so2_all_24h_rain_tbl <- so2_all_24h %>%
group_by(rainrange,pollutant, site) %>%
dplyr::summarise(counts = n(),.groups = "keep")
# barplot
abe_so2_rain_pl <- ggplot(na.omit(filter(so2_all_24h_rain_tbl, site == "abesan")), aes(x=rainrange,y = counts, fill = pollutant)) +
geom_bar(stat = 'identity', position = 'dodge') +
ylab("Day Count") +
xlab("Precipitation (mm)") +
scale_fill_manual("legend", values = c("SO2" = "grey18")) +
theme_bw() +
theme(legend.position = c(0.8,0.9),legend.title = element_blank(), legend.text = element_text(size = 10),legend.direction = "horizontal",
legend.background = element_rect(linetype = "solid", colour = "black")) +
ggtitle(label = "")
abe_so2_rain_pl
# WIND SPEED
# group by and summarise events into bins
so2_all_24h_ws_tbl <- so2_all_24h %>%
group_by(wsrange,pollutant, site) %>%
dplyr::summarise(counts = n(),.groups = "keep")
# barplot
abe_so2_ws_pl <- ggplot(na.omit(filter(so2_all_24h_ws_tbl, site == "abesan")), aes(x=wsrange,y = counts, fill = pollutant)) +
geom_bar(stat = 'identity', position = 'dodge') +
ylab("Day Count") +
xlab("Wind speed range (m/sec)") +
scale_fill_manual("legend", values = c("SO2" = "grey18")) +
theme_bw() +
theme(legend.position = c(0.8,0.9),legend.title = element_blank(), legend.text = element_text(size = 10),legend.direction = "horizontal",
legend.background = element_rect(linetype = "solid", colour = "black")) +
ggtitle(label = "")
abe_so2_ws_pl
abe_met_so2 <- ggarrange(abe_so2_temp_pl,abe_so2_rh_pl,abe_so2_rain_pl,abe_so2_ws_pl,
labels = c("(a)","(b)","(c)","(d)"),
ncol = 2,nrow = 2)
abe_met_so2
abe_met_so2 <- annotate_figure(abe_met_so2, top = text_grob(bquote(Abesan~SO[2]~Exceedances~at~various~Meteorological~conditions~from~August~2020~to~January~2021), color = "black", face = "bold", size = 18))
metpltwidth <- 8000
metpltheight <- 5000
# Open a tiff file no2
jpeg(file="abe_met_so2.jpeg", res=700, width=metpltwidth, height=metpltheight, pointsize=10,
type="windows", antialias="cleartype")
# 2. Create a plot
abe_met_so2
# Close the pdf file
dev.off()
# IKORODU ####################################################################
# barplot
iko_so2_temp_pl <- ggplot(na.omit(filter(so2_all_24h_temp_tbl, site == "ikorodu")), aes(x=temprange,y = counts, fill = pollutant)) +
geom_bar(stat = 'identity', position = 'dodge') +
ylab("Day Count") +
xlab("Temperature range (\u00B0C)") +
scale_fill_manual("legend", values = c("SO2" = "grey18")) +
theme_bw() +
theme(legend.position = c(0.8,0.9),legend.title = element_blank(), legend.text = element_text(size = 10),legend.direction = "horizontal",legend.key = element_rect(size = 3),
legend.background = element_rect(linetype = "solid", colour = "black")) +
ggtitle(label = "")
iko_so2_temp_pl
# RELATIVE HUMIDITY
# group by and summarise events into bins
so2_all_24h_rh_tbl <- so2_all_24h %>%
group_by(rhrange,pollutant, site) %>%
dplyr::summarise(counts = n(),.groups = "keep")
# barplot
iko_so2_rh_pl <- ggplot(na.omit(filter(so2_all_24h_rh_tbl, site == "ikorodu")), aes(x=rhrange,y = counts, fill = pollutant)) +
geom_bar(stat = 'identity', position = 'dodge') +
ylab("Day Count") +
xlab("Range of Relative Humidity (%)") +
scale_fill_manual("legend", values = c("SO2" = "grey18")) +
theme_bw() +
theme(legend.position = c(0.18,0.9),legend.title = element_blank(), legend.text = element_text(size = 10),legend.direction = "horizontal",
legend.background = element_rect(linetype = "solid", colour = "black")) +
ggtitle(label = "")
iko_so2_rh_pl
# PRECIPITATION
# group by and summarise events into bins
so2_all_24h_rain_tbl <- so2_all_24h %>%
group_by(rainrange,pollutant, site) %>%
dplyr::summarise(counts = n(),.groups = "keep")
# barplot
iko_so2_rain_pl <- ggplot(na.omit(filter(so2_all_24h_rain_tbl, site == "ikorodu")), aes(x=rainrange,y = counts, fill = pollutant)) +
geom_bar(stat = 'identity', position = 'dodge') +
ylab("Day Count") +
xlab("Precipitation (mm)") +
scale_fill_manual("legend", values = c("SO2" = "grey18")) +
theme_bw() +
theme(legend.position = c(0.8,0.9),legend.title = element_blank(), legend.text = element_text(size = 10),legend.direction = "horizontal",
legend.background = element_rect(linetype = "solid", colour = "black")) +
ggtitle(label = "")
iko_so2_rain_pl
# WIND SPEED
# group by and summarise events into bins
so2_all_24h_ws_tbl <- so2_all_24h %>%
group_by(wsrange,pollutant, site) %>%
dplyr::summarise(counts = n(),.groups = "keep")
# barplot
iko_so2_ws_pl <- ggplot(na.omit(filter(so2_all_24h_ws_tbl, site == "ikorodu")), aes(x=wsrange,y = counts, fill = pollutant)) +
geom_bar(stat = 'identity', position = 'dodge') +
ylab("Day Count") +
xlab("Wind speed range (m/sec)") +
scale_fill_manual("legend", values = c("SO2" = "grey18")) +
theme_bw() +
theme(legend.position = c(0.8,0.9),legend.title = element_blank(), legend.text = element_text(size = 10),legend.direction = "horizontal",
legend.background = element_rect(linetype = "solid", colour = "black")) +
ggtitle(label = "")
iko_so2_ws_pl
iko_met_so2 <- ggarrange(iko_so2_temp_pl,iko_so2_rh_pl,iko_so2_rain_pl,iko_so2_ws_pl,
labels = c("(a)","(b)","(c)","(d)"),
ncol = 2,nrow = 2)
iko_met_so2
iko_met_so2 <- annotate_figure(iko_met_so2, top = text_grob(bquote(Ikorodu~SO[2]~Exceedances~at~various~Meteorological~conditions~from~August~2020~to~January~2021), color = "black", face = "bold", size = 18))
metpltwidth <- 8000
metpltheight <- 5000
# Open a tiff file no2
jpeg(file="iko_met_so2.jpeg", res=700, width=metpltwidth, height=metpltheight, pointsize=10,
type="windows", antialias="cleartype")
# 2. Create a plot
iko_met_so2
# Close the pdf file
dev.off()
# JANKARA ####################################################################
# barplot
jan_so2_temp_pl <- ggplot(na.omit(filter(so2_all_24h_temp_tbl, site == "jankara")), aes(x=temprange,y = counts, fill = pollutant)) +
geom_bar(stat = 'identity', position = 'dodge') +
ylab("Day Count") +
xlab("Temperature range (\u00B0C)") +
scale_fill_manual("legend", values = c("SO2" = "grey18")) +
theme_bw() +
theme(legend.position = c(0.8,0.9),legend.title = element_blank(), legend.text = element_text(size = 10),legend.direction = "horizontal",legend.key = element_rect(size = 3),
legend.background = element_rect(linetype = "solid", colour = "black")) +
ggtitle(label = "")
jan_so2_temp_pl
# RELATIVE HUMIDITY
# group by and summarise events into bins
so2_all_24h_rh_tbl <- so2_all_24h %>%
group_by(rhrange,pollutant, site) %>%
dplyr::summarise(counts = n(),.groups = "keep")
# barplot
jan_so2_rh_pl <- ggplot(na.omit(filter(so2_all_24h_rh_tbl, site == "jankara")), aes(x=rhrange,y = counts, fill = pollutant)) +
geom_bar(stat = 'identity', position = 'dodge') +
ylab("Day Count") +
xlab("Range of Relative Humidity (%)") +
scale_fill_manual("legend", values = c("SO2" = "grey18")) +
theme_bw() +
theme(legend.position = c(0.18,0.9),legend.title = element_blank(), legend.text = element_text(size = 10),legend.direction = "horizontal",
legend.background = element_rect(linetype = "solid", colour = "black")) +
ggtitle(label = "")
jan_so2_rh_pl
# PRECIPITATION
# group by and summarise events into bins
so2_all_24h_rain_tbl <- so2_all_24h %>%
group_by(rainrange,pollutant, site) %>%
dplyr::summarise(counts = n(),.groups = "keep")
# barplot
jan_so2_rain_pl <- ggplot(na.omit(filter(so2_all_24h_rain_tbl, site == "jankara")), aes(x=rainrange,y = counts, fill = pollutant)) +
geom_bar(stat = 'identity', position = 'dodge') +
ylab("Day Count") +
xlab("Precipitation (mm)") +
scale_fill_manual("legend", values = c("SO2" = "grey18")) +
theme_bw() +
theme(legend.position = c(0.8,0.9),legend.title = element_blank(), legend.text = element_text(size = 10),legend.direction = "horizontal",
legend.background = element_rect(linetype = "solid", colour = "black")) +
ggtitle(label = "")
jan_so2_rain_pl
# WIND SPEED
# group by and summarise events into bins
so2_all_24h_ws_tbl <- so2_all_24h %>%
group_by(wsrange,pollutant, site) %>%
dplyr::summarise(counts = n(),.groups = "keep")
# barplot
jan_so2_ws_pl <- ggplot(na.omit(filter(so2_all_24h_ws_tbl, site == "jankara")), aes(x=wsrange,y = counts, fill = pollutant)) +
geom_bar(stat = 'identity', position = 'dodge') +
ylab("Day Count") +
xlab("Wind speed range (m/sec)") +
scale_fill_manual("legend", values = c("SO2" = "grey18")) +
theme_bw() +
theme(legend.position = c(0.18,0.9),legend.title = element_blank(), legend.text = element_text(size = 10),legend.direction = "horizontal",
legend.background = element_rect(linetype = "solid", colour = "black")) +
ggtitle(label = "")
jan_so2_ws_pl
jan_met_so2 <- ggarrange(jan_so2_temp_pl,jan_so2_rh_pl,jan_so2_rain_pl,jan_so2_ws_pl,
labels = c("(a)","(b)","(c)","(d)"),
ncol = 2,nrow = 2)
jan_met_so2
jan_met_so2 <- annotate_figure(jan_met_so2, top = text_grob(bquote(Jankara~SO2~Exceedances~at~various~Meteorological~conditions~from~August~2020~to~January~2021), color = "black", face = "bold", size = 18))
metpltwidth <- 8000
metpltheight <- 5000
# Open a tiff file no2
jpeg(file="jan_met_so2.jpeg", res=700, width=metpltwidth, height=metpltheight, pointsize=10,
type="windows", antialias="cleartype")
# 2. Create a plot
jan_met_so2
# Close the pdf file
dev.off()
# Lasepa ####################################################################
# barplot
las_so2_temp_pl <- ggplot(na.omit(filter(so2_all_24h_temp_tbl, site == "lasepa")), aes(x=temprange,y = counts, fill = pollutant)) +
geom_bar(stat = 'identity', position = 'dodge') +
ylab("Day Count") +
xlab("Temperature range (\u00B0C)") +
scale_fill_manual("legend", values = c("SO2" = "grey18")) +
theme_bw() +
theme(legend.position = c(0.8,0.9),legend.title = element_blank(), legend.text = element_text(size = 10),legend.direction = "horizontal",legend.key = element_rect(size = 3),
legend.background = element_rect(linetype = "solid", colour = "black")) +
ggtitle(label = "")
las_so2_temp_pl
# RELATIVE HUMIDITY
# group by and summarise events into bins
so2_all_24h_rh_tbl <- so2_all_24h %>%
group_by(rhrange,pollutant, site) %>%
dplyr::summarise(counts = n(),.groups = "keep")
# barplot
las_so2_rh_pl <- ggplot(na.omit(filter(so2_all_24h_rh_tbl, site == "lasepa")), aes(x=rhrange,y = counts, fill = pollutant)) +
geom_bar(stat = 'identity', position = 'dodge') +
ylab("Day Count") +
xlab("Range of Relative Humidity (%)") +
scale_fill_manual("legend", values = c("SO2" = "grey18")) +
theme_bw() +
theme(legend.position = c(0.8,0.9),legend.title = element_blank(), legend.text = element_text(size = 10),legend.direction = "horizontal",
legend.background = element_rect(linetype = "solid", colour = "black")) +
ggtitle(label = "")
las_so2_rh_pl
# PRECIPITATION
# group by and summarise events into bins
so2_all_24h_rain_tbl <- so2_all_24h %>%
group_by(rainrange,pollutant, site) %>%
dplyr::summarise(counts = n(),.groups = "keep")
# barplot
las_so2_rain_pl <- ggplot(na.omit(filter(so2_all_24h_rain_tbl, site == "lasepa")), aes(x=rainrange,y = counts, fill = pollutant)) +
geom_bar(stat = 'identity', position = 'dodge') +
ylab("Day Count") +
xlab("Precipitation (mm)") +
scale_fill_manual("legend", values = c("SO2" = "grey18")) +
theme_bw() +
theme(legend.position = c(0.8,0.9),legend.title = element_blank(), legend.text = element_text(size = 10),legend.direction = "horizontal",
legend.background = element_rect(linetype = "solid", colour = "black")) +
ggtitle(label = "")
las_so2_rain_pl
# WIND SPEED
# group by and summarise events into bins
so2_all_24h_ws_tbl <- so2_all_24h %>%
group_by(wsrange,pollutant, site) %>%
dplyr::summarise(counts = n(),.groups = "keep")
# barplot
las_so2_ws_pl <- ggplot(na.omit(filter(so2_all_24h_ws_tbl, site == "lasepa")), aes(x=wsrange,y = counts, fill = pollutant)) +
geom_bar(stat = 'identity', position = 'dodge') +
ylab("Day Count") +
xlab("Wind speed range (m/sec)") +
scale_fill_manual("legend", values = c("SO2" = "grey18")) +
theme_bw() +
theme(legend.position = c(0.8,0.9),legend.title = element_blank(), legend.text = element_text(size = 10),legend.direction = "horizontal",
legend.background = element_rect(linetype = "solid", colour = "black")) +
ggtitle(label = "")
las_so2_ws_pl
las_met_so2 <- ggarrange(las_so2_temp_pl,las_so2_rh_pl,las_so2_rain_pl,las_so2_ws_pl,
labels = c("(a)","(b)","(c)","(d)"),
ncol = 2,nrow = 2)
las_met_so2
las_met_so2 <- annotate_figure(las_met_so2, top = text_grob(bquote(LASEPA~SO2~Exceedances~at~various~Meteorological~conditions~from~August~2020~to~January~2021), color = "black", face = "bold", size = 18))
metpltwidth <- 8000
metpltheight <- 5000
# Open a tiff file no2
jpeg(file="las_met_so2.jpeg", res=700, width=metpltwidth, height=metpltheight, pointsize=10,
type="windows", antialias="cleartype")
# 2. Create a plot
las_met_so2
# Close the pdf file
dev.off()
# NCF ####################################################################
# barplot
ncf_so2_temp_pl <- ggplot(na.omit(filter(so2_all_24h_temp_tbl, site == "ncf")), aes(x=temprange,y = counts, fill = pollutant)) +
geom_bar(stat = 'identity', position = 'dodge') +
ylab("Day Count") +
xlab("Temperature range (\u00B0C)") +
scale_fill_manual("legend", values = c("SO2" = "grey18")) +
theme_bw() +
theme(legend.position = c(0.9,0.9),legend.title = element_blank(), legend.text = element_text(size = 10),legend.direction = "horizontal",legend.key = element_rect(size = 3),
legend.background = element_rect(linetype = "solid", colour = "black")) +
ggtitle(label = "")
ncf_so2_temp_pl
# RELATIVE HUMIDITY
# group by and summarise events into bins
so2_all_24h_rh_tbl <- so2_all_24h %>%
group_by(rhrange,pollutant, site) %>%
dplyr::summarise(counts = n(),.groups = "keep")
# barplot
ncf_so2_rh_pl <- ggplot(na.omit(filter(so2_all_24h_rh_tbl, site == "ncf")), aes(x=rhrange,y = counts, fill = pollutant)) +
geom_bar(stat = 'identity', position = 'dodge') +
ylab("Day Count") +
xlab("Range of Relative Humidity (%)") +
scale_fill_manual("legend", values = c("SO2" = "grey18")) +
theme_bw() +
theme(legend.position = c(0.18,0.9),legend.title = element_blank(), legend.text = element_text(size = 10),legend.direction = "horizontal",
legend.background = element_rect(linetype = "solid", colour = "black")) +
ggtitle(label = "")
ncf_so2_rh_pl
# PRECIPITATION
# group by and summarise events into bins
so2_all_24h_rain_tbl <- so2_all_24h %>%
group_by(rainrange,pollutant, site) %>%
dplyr::summarise(counts = n(),.groups = "keep")
# barplot
ncf_so2_rain_pl <- ggplot(na.omit(filter(so2_all_24h_rain_tbl, site == "ncf")), aes(x=rainrange,y = counts, fill = pollutant)) +
geom_bar(stat = 'identity', position = 'dodge') +
ylab("Day Count") +
xlab("Precipitation (mm)") +
scale_fill_manual("legend", values = c("SO2" = "grey18")) +
theme_bw() +
theme(legend.position = c(0.18,0.9),legend.title = element_blank(), legend.text = element_text(size = 10),legend.direction = "horizontal",
legend.background = element_rect(linetype = "solid", colour = "black")) +
ggtitle(label = "")
ncf_so2_rain_pl
# WIND SPEED
# group by and summarise events into bins
so2_all_24h_ws_tbl <- so2_all_24h %>%
group_by(wsrange,pollutant, site) %>%
dplyr::summarise(counts = n(),.groups = "keep")
# barplot
ncf_so2_ws_pl <- ggplot(na.omit(filter(so2_all_24h_ws_tbl, site == "ncf")), aes(x=wsrange,y = counts, fill = pollutant)) +
geom_bar(stat = 'identity', position = 'dodge') +
ylab("Day Count") +
xlab("Wind speed range (m/sec)") +
scale_fill_manual("legend", values = c("SO2" = "grey18")) +
theme_bw() +
theme(legend.position = c(0.18,0.9),legend.title = element_blank(), legend.text = element_text(size = 10),legend.direction = "horizontal",
legend.background = element_rect(linetype = "solid", colour = "black")) +
ggtitle(label = "")
ncf_so2_ws_pl
ncf_met_so2 <- ggarrange(ncf_so2_temp_pl,ncf_so2_rh_pl,ncf_so2_rain_pl,ncf_so2_ws_pl,
labels = c("(a)","(b)","(c)","(d)"),
ncol = 2,nrow = 2)
ncf_met_so2
ncf_met_so2 <- annotate_figure(ncf_met_so2, top = text_grob(bquote(NCF~SO[2]~Exceedances~at~various~Meteorological~conditions~from~August~2020~to~January~2021), color = "black", face = "bold", size = 18))
metpltwidth <- 8000
metpltheight <- 5000
# Open a tiff file no2
jpeg(file="ncf_met_so2.jpeg", res=700, width=metpltwidth, height=metpltheight, pointsize=10,
type="windows", antialias="cleartype")
# 2. Create a plot
ncf_met_so2
# Close the pdf file
dev.off()
# UNILAG ####################################################################
# barplot
uni_so2_temp_pl <- ggplot(na.omit(filter(so2_all_24h_temp_tbl, site == "unilag")), aes(x=temprange,y = counts, fill = pollutant)) +
geom_bar(stat = 'identity', position = 'dodge') +
ylab("Day Count") +
xlab("Temperature range (\u00B0C)") +
scale_fill_manual("legend", values = c("SO2" = "grey18")) +
theme_bw() +
theme(legend.position = c(0.8,0.9),legend.title = element_blank(), legend.text = element_text(size = 10),legend.direction = "horizontal",legend.key = element_rect(size = 3),
legend.background = element_rect(linetype = "solid", colour = "black")) +
ggtitle(label = "")
uni_so2_temp_pl
# RELATIVE HUMIDITY
# group by and summarise events into bins
so2_all_24h_rh_tbl <- so2_all_24h %>%
group_by(rhrange,pollutant, site) %>%
dplyr::summarise(counts = n(),.groups = "keep")
# barplot
uni_so2_rh_pl <- ggplot(na.omit(filter(so2_all_24h_rh_tbl, site == "unilag")), aes(x=rhrange,y = counts, fill = pollutant)) +
geom_bar(stat = 'identity', position = 'dodge') +
ylab("Day Count") +
xlab("Range of Relative Humidity (%)") +
scale_fill_manual("legend", values = c("SO2" = "grey18")) +
theme_bw() +
theme(legend.position = c(0.18,0.9),legend.title = element_blank(), legend.text = element_text(size = 10),legend.direction = "horizontal",
legend.background = element_rect(linetype = "solid", colour = "black")) +
ggtitle(label = "")
uni_so2_rh_pl
# PRECIPITATION
# group by and summarise events into bins
so2_all_24h_rain_tbl <- so2_all_24h %>%
group_by(rainrange,pollutant, site) %>%
dplyr::summarise(counts = n(),.groups = "keep")
# barplot
uni_so2_rain_pl <- ggplot(na.omit(filter(so2_all_24h_rain_tbl, site == "unilag")), aes(x=rainrange,y = counts, fill = pollutant)) +
geom_bar(stat = 'identity', position = 'dodge') +
ylab("Day Count") +
xlab("Precipitation (mm)") +
scale_fill_manual("legend", values = c("SO2" = "grey18")) +
theme_bw() +
theme(legend.position = c(0.8,0.9),legend.title = element_blank(), legend.text = element_text(size = 10),legend.direction = "horizontal",
legend.background = element_rect(linetype = "solid", colour = "black")) +
ggtitle(label = "")
uni_so2_rain_pl
# WIND SPEED
# group by and summarise events into bins
so2_all_24h_ws_tbl <- so2_all_24h %>%
group_by(wsrange,pollutant, site) %>%
dplyr::summarise(counts = n(),.groups = "keep")
# barplot
uni_so2_ws_pl <- ggplot(na.omit(filter(so2_all_24h_ws_tbl, site == "unilag")), aes(x=wsrange,y = counts, fill = pollutant)) +
geom_bar(stat = 'identity', position = 'dodge') +
ylab("Day Count") +
xlab("Wind speed range (m/sec)") +
scale_fill_manual("legend", values = c("SO2" = "grey18")) +
theme_bw() +
theme(legend.position = c(0.18,0.9),legend.title = element_blank(), legend.text = element_text(size = 10),legend.direction = "horizontal",
legend.background = element_rect(linetype = "solid", colour = "black")) +
ggtitle(label = "")
uni_so2_ws_pl
uni_met_so2 <- ggarrange(uni_so2_temp_pl,uni_so2_rh_pl,uni_so2_rain_pl,uni_so2_ws_pl,
labels = c("(a)","(b)","(c)","(d)"),
ncol = 2,nrow = 2)
uni_met_so2
uni_met_so2 <- annotate_figure(uni_met_so2, top = text_grob(bquote(UNILAG~SO2~Exceedances~at~various~Meteorological~conditions~from~August~2020~to~January~2021), color = "black", face = "bold", size = 18))
metpltwidth <- 8000
metpltheight <- 5000
# Open a tiff file no2
jpeg(file="uni_met_so2.jpeg", res=700, width=metpltwidth, height=metpltheight, pointsize=10,
type="windows", antialias="cleartype")
# 2. Create a plot
uni_met_so2
# Close the pdf file
dev.off()
##########################################
##########################################
# 03 plots 1hr #################
# expdir for O3 plots
met_plt_dir_o3 <- "C:/Users/Obajuluwa/OneDrive/OneDrive Collaboration for August to December Reports 290121/Rev0/Six-Month Interim Report/plots and tables/Met_Exceedances Plots/O3 plots"
dir.create(met_plt_dir_o3)
setwd(met_plt_dir_o3)
# filter o3 exceedances events
o3_1h <- filter(all_1h, o3 > 180)
no2_all_1h <- filter(all_1h, no2 > 188.1)
co_all_1h <- filter(all_1h, co > 10)
# add a pollutant column
o3_1h$pollutant <- "O3"
no2_all_1h$pollutant <- "NO2"
co_all_1h$pollutant <- "CO"
# bind for multiple pollutants
o3_all_1h <- rbind(o3_1h,no2_all_1h,co_all_1h)
# arrange pollutants as per perference
o3_all_1h$pollutant <- factor(o3_all_1h$pollutant, levels = c("NO2","CO","O3"))
# ABESAN #####################################
# group by and summarise events into bins
o3_all_1h_temp_tbl <- o3_all_1h %>%
group_by(temprange,pollutant, site) %>%
dplyr::summarise(counts = n(),.groups = "keep")
# barplot
abe_o3_temp_pl <- ggplot(na.omit(filter(o3_all_1h_temp_tbl, site == "abesan")), aes(x=temprange,y = counts, fill = pollutant)) +
geom_bar(stat = 'identity', position = 'dodge') +
ylab("Day Count") +
xlab("Temperature range (\u00B0C)") +
scale_fill_manual("legend", values = c("O3" = "chocolate", "NO2" = "gray18","CO" = "darkgrey")) +
theme_bw() +
theme(legend.position = c(0.8,0.9),legend.title = element_blank(), legend.text = element_text(size = 10),legend.direction = "horizontal",legend.key = element_rect(size = 3),
legend.background = element_rect(linetype = "solid", colour = "black")) +
ggtitle(label = "")
abe_o3_temp_pl
# RELATIVE HUMIDITY
# group by and summarise events into bins
o3_all_1h_rh_tbl <- o3_all_1h %>%
group_by(rhrange,pollutant, site) %>%
dplyr::summarise(counts = n(),.groups = "keep")
# barplot
abe_o3_rh_pl <- ggplot(na.omit(filter(o3_all_1h_rh_tbl, site == "abesan")), aes(x=rhrange,y = counts, fill = pollutant)) +
geom_bar(stat = 'identity', position = 'dodge') +
ylab("Day Count") +
xlab("Range of Relative Humidity (%)") +
scale_fill_manual("legend", values = c("O3" = "chocolate", "NO2" = "gray18","CO" = "darkgrey")) +
theme_bw() +
theme(legend.position = c(0.18,0.9),legend.title = element_blank(), legend.text = element_text(size = 10),legend.direction = "horizontal",
legend.background = element_rect(linetype = "solid", colour = "black")) +
ggtitle(label = "")
abe_o3_rh_pl
# PRECIPITATION
# group by and summarise events into bins
o3_all_1h_rain_tbl <- o3_all_1h %>%
group_by(rainrange,pollutant, site) %>%
dplyr::summarise(counts = n(),.groups = "keep")
# barplot
abe_o3_rain_pl <- ggplot(na.omit(filter(o3_all_1h_rain_tbl, site == "abesan")), aes(x=rainrange,y = counts, fill = pollutant)) +
geom_bar(stat = 'identity', position = 'dodge') +
ylab("Day Count") +
xlab("Precipitation (mm)") +
scale_fill_manual("legend", values = c("O3" = "chocolate", "NO2" = "gray18","CO" = "darkgrey")) +
theme_bw() +
theme(legend.position = c(0.8,0.9),legend.title = element_blank(), legend.text = element_text(size = 10),legend.direction = "horizontal",
legend.background = element_rect(linetype = "solid", colour = "black")) +
ggtitle(label = "")
abe_o3_rain_pl
# WIND SPEED
# group by and summarise events into bins
o3_all_1h_ws_tbl <- o3_all_1h %>%
group_by(wsrange,pollutant, site) %>%
dplyr::summarise(counts = n(),.groups = "keep")
# barplot
abe_o3_ws_pl <- ggplot(na.omit(filter(o3_all_1h_ws_tbl, site == "abesan")), aes(x=wsrange,y = counts, fill = pollutant)) +
geom_bar(stat = 'identity', position = 'dodge') +
ylab("Day Count") +
xlab("Wind speed range (m/sec)") +
scale_fill_manual("legend", values = c("O3" = "chocolate", "NO2" = "gray18","CO" = "darkgrey")) +
theme_bw() +
theme(legend.position = c(0.8,0.9),legend.title = element_blank(), legend.text = element_text(size = 10),legend.direction = "horizontal",
legend.background = element_rect(linetype = "solid", colour = "black")) +
ggtitle(label = "")
abe_o3_ws_pl
abe_met_o3 <- ggarrange(abe_o3_temp_pl,abe_o3_rh_pl,abe_o3_rain_pl,abe_o3_ws_pl,
labels = c("(a)","(b)","(c)","(d)"),
ncol = 2,nrow = 2)
abe_met_o3
abe_met_o3 <- annotate_figure(abe_met_o3, top = text_grob(bquote(from~August~2020~to~January~2021), color = "black", face = "bold", size = 18))
abe_met_o3 <- annotate_figure(abe_met_o3, top = text_grob(bquote(Abesan~Hourly~O[3]~CO~NO[2]~Exceedances~at~various~Meteorological~conditions), color = "black", face = "bold", size = 18))
metpltwidth <- 8000
metpltheight <- 5000
# Open a tiff file no2
jpeg(file="abe_met_o3.jpeg", res=700, width=metpltwidth, height=metpltheight, pointsize=10,
type="windows", antialias="cleartype")
# 2. Create a plot
abe_met_o3
# Close the pdf file
dev.off()
# IKORODU ####################################################################
# barplot
iko_o3_temp_pl <- ggplot(na.omit(filter(o3_all_1h_temp_tbl, site == "ikorodu")), aes(x=temprange,y = counts, fill = pollutant)) +
geom_bar(stat = 'identity', position = 'dodge') +
ylab("Day Count") +
xlab("Temperature range (\u00B0C)") +
scale_fill_manual("legend", values = c("O3" = "chocolate", "NO2" = "gray18","CO" = "darkgrey")) +
theme_bw() +
theme(legend.position = c(0.18,0.9),legend.title = element_blank(), legend.text = element_text(size = 10),legend.direction = "horizontal",legend.key = element_rect(size = 3),
legend.background = element_rect(linetype = "solid", colour = "black")) +
ggtitle(label = "")
iko_o3_temp_pl
# RELATIVE HUMIDITY
# group by and summarise events into bins
o3_all_1h_rh_tbl <- o3_all_1h %>%
group_by(rhrange,pollutant, site) %>%
dplyr::summarise(counts = n(),.groups = "keep")
# barplot
iko_o3_rh_pl <- ggplot(na.omit(filter(o3_all_1h_rh_tbl, site == "ikorodu")), aes(x=rhrange,y = counts, fill = pollutant)) +
geom_bar(stat = 'identity', position = 'dodge') +
ylab("Day Count") +
xlab("Range of Relative Humidity (%)") +
scale_fill_manual("legend", values = c("O3" = "chocolate", "NO2" = "gray18","CO" = "darkgrey")) +
theme_bw() +
theme(legend.position = c(0.8,0.9),legend.title = element_blank(), legend.text = element_text(size = 10),legend.direction = "horizontal",
legend.background = element_rect(linetype = "solid", colour = "black")) +
ggtitle(label = "")
iko_o3_rh_pl
# PRECIPITATION
# group by and summarise events into bins
o3_all_1h_rain_tbl <- o3_all_1h %>%
group_by(rainrange,pollutant, site) %>%
dplyr::summarise(counts = n(),.groups = "keep")
# barplot
iko_o3_rain_pl <- ggplot(na.omit(filter(o3_all_1h_rain_tbl, site == "ikorodu")), aes(x=rainrange,y = counts, fill = pollutant)) +
geom_bar(stat = 'identity', position = 'dodge') +
ylab("Day Count") +
xlab("Precipitation (mm)") +
scale_fill_manual("legend", values = c("O3" = "chocolate", "NO2" = "gray18","CO" = "darkgrey")) +
theme_bw() +
theme(legend.position = c(0.18,0.9),legend.title = element_blank(), legend.text = element_text(size = 10),legend.direction = "horizontal",
legend.background = element_rect(linetype = "solid", colour = "black")) +
ggtitle(label = "")
iko_o3_rain_pl
# WIND SPEED
# group by and summarise events into bins
o3_all_1h_ws_tbl <- o3_all_1h %>%
group_by(wsrange,pollutant, site) %>%
dplyr::summarise(counts = n(),.groups = "keep")
# barplot
iko_o3_ws_pl <- ggplot(na.omit(filter(o3_all_1h_ws_tbl, site == "ikorodu")), aes(x=wsrange,y = counts, fill = pollutant)) +
geom_bar(stat = 'identity', position = 'dodge') +
ylab("Day Count") +
xlab("Wind speed range (m/sec)") +
scale_fill_manual("legend", values = c("O3" = "chocolate", "NO2" = "gray18","CO" = "darkgrey")) +
theme_bw() +
theme(legend.position = c(0.8,0.9),legend.title = element_blank(), legend.text = element_text(size = 10),legend.direction = "horizontal",
legend.background = element_rect(linetype = "solid", colour = "black")) +
ggtitle(label = "")
iko_o3_ws_pl
iko_met_o3 <- ggarrange(iko_o3_temp_pl,iko_o3_rh_pl,iko_o3_rain_pl,iko_o3_ws_pl,
labels = c("(a)","(b)","(c)","(d)"),
ncol = 2,nrow = 2)
iko_met_o3
iko_met_o3 <- annotate_figure(iko_met_o3, top = text_grob(bquote(from~August~2020~to~January~2021), color = "black", face = "bold", size = 18))
iko_met_o3 <- annotate_figure(iko_met_o3, top = text_grob(bquote(Ikorodu~Hourly~O[3]~CO~NO[2]~Exceedances~at~various~Meteorological~conditions), color = "black", face = "bold", size = 18))
metpltwidth <- 8000
metpltheight <- 5000
# Open a tiff file no2
jpeg(file="iko_met_o3.jpeg", res=700, width=metpltwidth, height=metpltheight, pointsize=10,
type="windows", antialias="cleartype")
# 2. Create a plot
iko_met_o3
# Close the pdf file
dev.off()
# JANKARA ####################################################################
# barplot
jan_o3_temp_pl <- ggplot(na.omit(filter(o3_all_1h_temp_tbl, site == "jankara")), aes(x=temprange,y = counts, fill = pollutant)) +
geom_bar(stat = 'identity', position = 'dodge') +
ylab("Day Count") +
xlab("Temperature range (\u00B0C)") +
scale_fill_manual("legend", values = c("O3" = "chocolate", "NO2" = "gray18","CO" = "darkgrey")) +
theme_bw() +
theme(legend.position = c(0.8,0.9),legend.title = element_blank(), legend.text = element_text(size = 10),legend.direction = "horizontal",legend.key = element_rect(size = 3),
legend.background = element_rect(linetype = "solid", colour = "black")) +
ggtitle(label = "")
jan_o3_temp_pl
# RELATIVE HUMIDITY
# group by and summarise events into bins
o3_all_1h_rh_tbl <- o3_all_1h %>%
group_by(rhrange,pollutant, site) %>%
dplyr::summarise(counts = n(),.groups = "keep")
# barplot
jan_o3_rh_pl <- ggplot(na.omit(filter(o3_all_1h_rh_tbl, site == "jankara")), aes(x=rhrange,y = counts, fill = pollutant)) +
geom_bar(stat = 'identity', position = 'dodge') +
ylab("Day Count") +
xlab("Range of Relative Humidity (%)") +
scale_fill_manual("legend", values = c("O3" = "chocolate", "NO2" = "gray18","CO" = "darkgrey")) +
theme_bw() +
theme(legend.position = c(0.18,0.9),legend.title = element_blank(), legend.text = element_text(size = 10),legend.direction = "horizontal",
legend.background = element_rect(linetype = "solid", colour = "black")) +
ggtitle(label = "")
jan_o3_rh_pl
# PRECIPITATION
# group by and summarise events into bins
o3_all_1h_rain_tbl <- o3_all_1h %>%
group_by(rainrange,pollutant, site) %>%
dplyr::summarise(counts = n(),.groups = "keep")
# barplot
jan_o3_rain_pl <- ggplot(na.omit(filter(o3_all_1h_rain_tbl, site == "jankara")), aes(x=rainrange,y = counts, fill = pollutant)) +
geom_bar(stat = 'identity', position = 'dodge') +
ylab("Day Count") +
xlab("Precipitation (mm)") +
scale_fill_manual("legend", values = c("O3" = "chocolate", "NO2" = "gray18","CO" = "darkgrey")) +
theme_bw() +
theme(legend.position = c(0.8,0.9),legend.title = element_blank(), legend.text = element_text(size = 10),legend.direction = "horizontal",
legend.background = element_rect(linetype = "solid", colour = "black")) +
ggtitle(label = "")
jan_o3_rain_pl
# WIND SPEED
# group by and summarise events into bins
o3_all_1h_ws_tbl <- o3_all_1h %>%
group_by(wsrange,pollutant, site) %>%
dplyr::summarise(counts = n(),.groups = "keep")
# barplot
jan_o3_ws_pl <- ggplot(na.omit(filter(o3_all_1h_ws_tbl, site == "jankara")), aes(x=wsrange,y = counts, fill = pollutant)) +
geom_bar(stat = 'identity', position = 'dodge') +
ylab("Day Count") +
xlab("Wind speed range (m/sec)") +
scale_fill_manual("legend", values = c("O3" = "chocolate", "NO2" = "gray18","CO" = "darkgrey")) +
theme_bw() +
theme(legend.position = c(0.8,0.9),legend.title = element_blank(), legend.text = element_text(size = 10),legend.direction = "horizontal",
legend.background = element_rect(linetype = "solid", colour = "black")) +
ggtitle(label = "")
jan_o3_ws_pl
jan_met_o3 <- ggarrange(jan_o3_temp_pl,jan_o3_rh_pl,jan_o3_rain_pl,jan_o3_ws_pl,
labels = c("(a)","(b)","(c)","(d)"),
ncol = 2,nrow = 2)
jan_met_o3
jan_met_o3 <- annotate_figure(jan_met_o3, top = text_grob(bquote(from~August~2020~to~January~2021), color = "black", face = "bold", size = 18))
jan_met_o3 <- annotate_figure(jan_met_o3, top = text_grob(bquote(JANKARA~Hourly~O[3]~CO~NO[2]~Exceedances~at~various~Meteorological~conditions), color = "black", face = "bold", size = 18))
metpltwidth <- 8000
metpltheight <- 5000
# Open a tiff file no2
jpeg(file="jan_met_o3.jpeg", res=700, width=metpltwidth, height=metpltheight, pointsize=10,
type="windows", antialias="cleartype")
# 2. Create a plot
jan_met_o3
# Close the pdf file
dev.off()
# Lasepa ####################################################################
# barplot
las_o3_temp_pl <- ggplot(na.omit(filter(o3_all_1h_temp_tbl, site == "lasepa")), aes(x=temprange,y = counts, fill = pollutant)) +
geom_bar(stat = 'identity', position = 'dodge') +
ylab("Day Count") +
xlab("Temperature range (\u00B0C)") +
scale_fill_manual("legend", values = c("O3" = "chocolate", "NO2" = "gray18","CO" = "darkgrey")) +
theme_bw() +
theme(legend.position = c(0.18,0.9),legend.title = element_blank(), legend.text = element_text(size = 10),legend.direction = "horizontal",legend.key = element_rect(size = 3),
legend.background = element_rect(linetype = "solid", colour = "black")) +
ggtitle(label = "")
las_o3_temp_pl
# RELATIVE HUMIDITY
# group by and summarise events into bins
o3_all_1h_rh_tbl <- o3_all_1h %>%
group_by(rhrange,pollutant, site) %>%
dplyr::summarise(counts = n(),.groups = "keep")
# barplot
las_o3_rh_pl <- ggplot(na.omit(filter(o3_all_1h_rh_tbl, site == "lasepa")), aes(x=rhrange,y = counts, fill = pollutant)) +
geom_bar(stat = 'identity', position = 'dodge') +
ylab("Day Count") +
xlab("Range of Relative Humidity (%)") +
scale_fill_manual("legend", values = c("O3" = "chocolate", "NO2" = "gray18","CO" = "darkgrey")) +
theme_bw() +
theme(legend.position = c(0.8,0.9),legend.title = element_blank(), legend.text = element_text(size = 10),legend.direction = "horizontal",
legend.background = element_rect(linetype = "solid", colour = "black")) +
ggtitle(label = "")
las_o3_rh_pl
# PRECIPITATION
# group by and summarise events into bins
o3_all_1h_rain_tbl <- o3_all_1h %>%
group_by(rainrange,pollutant, site) %>%
dplyr::summarise(counts = n(),.groups = "keep")
# barplot
las_o3_rain_pl <- ggplot(na.omit(filter(o3_all_1h_rain_tbl, site == "lasepa")), aes(x=rainrange,y = counts, fill = pollutant)) +
geom_bar(stat = 'identity', position = 'dodge') +
ylab("Day Count") +
xlab("Precipitation (mm)") +
scale_fill_manual("legend", values = c("O3" = "chocolate", "NO2" = "gray18","CO" = "darkgrey")) +
theme_bw() +
theme(legend.position = c(0.18,0.9),legend.title = element_blank(), legend.text = element_text(size = 10),legend.direction = "horizontal",
legend.background = element_rect(linetype = "solid", colour = "black")) +
ggtitle(label = "")
las_o3_rain_pl
# WIND SPEED
# group by and summarise events into bins
o3_all_1h_ws_tbl <- o3_all_1h %>%
group_by(wsrange,pollutant, site) %>%
dplyr::summarise(counts = n(),.groups = "keep")
# barplot
las_o3_ws_pl <- ggplot(na.omit(filter(o3_all_1h_ws_tbl, site == "lasepa")), aes(x=wsrange,y = counts, fill = pollutant)) +
geom_bar(stat = 'identity', position = 'dodge') +
ylab("Day Count") +
xlab("Wind speed range (m/sec)") +
scale_fill_manual("legend", values = c("O3" = "chocolate", "NO2" = "gray18","CO" = "darkgrey")) +
theme_bw() +
theme(legend.position = c(0.18,0.9),legend.title = element_blank(), legend.text = element_text(size = 10),legend.direction = "horizontal",
legend.background = element_rect(linetype = "solid", colour = "black")) +
ggtitle(label = "")
las_o3_ws_pl
las_met_o3 <- ggarrange(las_o3_temp_pl,las_o3_rh_pl,las_o3_rain_pl,las_o3_ws_pl,
labels = c("(a)","(b)","(c)","(d)"),
ncol = 2,nrow = 2)
las_met_o3
las_met_o3 <- annotate_figure(las_met_o3, top = text_grob(bquote(from~August~2020~to~January~2021), color = "black", face = "bold", size = 18))
las_met_o3 <- annotate_figure(las_met_o3, top = text_grob(bquote(LASEPA~Hourly~O[3]~CO~NO[2]~Exceedances~at~various~Meteorological~conditions), color = "black", face = "bold", size = 18))
metpltwidth <- 8000
metpltheight <- 5000
# Open a tiff file no2
jpeg(file="las_met_o3.jpeg", res=700, width=metpltwidth, height=metpltheight, pointsize=10,
type="windows", antialias="cleartype")
# 2. Create a plot
las_met_o3
# Close the pdf file
dev.off()
# NCF ####################################################################
# barplot
ncf_o3_temp_pl <- ggplot(na.omit(filter(o3_all_1h_temp_tbl, site == "ncf")), aes(x=temprange,y = counts, fill = pollutant)) +
geom_bar(stat = 'identity', position = 'dodge') +
ylab("Day Count") +
xlab("Temperature range (\u00B0C)") +
scale_fill_manual("legend", values = c("O3" = "chocolate", "NO2" = "gray18","CO" = "darkgrey")) +
theme_bw() +
theme(legend.position = c(0.18,0.9),legend.title = element_blank(), legend.text = element_text(size = 10),legend.direction = "horizontal",legend.key = element_rect(size = 3),
legend.background = element_rect(linetype = "solid", colour = "black")) +
ggtitle(label = "")
ncf_o3_temp_pl
# RELATIVE HUMIDITY
# group by and summarise events into bins
o3_all_1h_rh_tbl <- o3_all_1h %>%
group_by(rhrange,pollutant, site) %>%
dplyr::summarise(counts = n(),.groups = "keep")
# barplot
ncf_o3_rh_pl <- ggplot(na.omit(filter(o3_all_1h_rh_tbl, site == "ncf")), aes(x=rhrange,y = counts, fill = pollutant)) +
geom_bar(stat = 'identity', position = 'dodge') +
ylab("Day Count") +
xlab("Range of Relative Humidity (%)") +
scale_fill_manual("legend", values = c("O3" = "chocolate", "NO2" = "gray18","CO" = "darkgrey")) +
theme_bw() +
theme(legend.position = c(0.18,0.9),legend.title = element_blank(), legend.text = element_text(size = 10),legend.direction = "horizontal",
legend.background = element_rect(linetype = "solid", colour = "black")) +
ggtitle(label = "")
ncf_o3_rh_pl
# PRECIPITATION
# group by and summarise events into bins
o3_all_1h_rain_tbl <- o3_all_1h %>%
group_by(rainrange,pollutant, site) %>%
dplyr::summarise(counts = n(),.groups = "keep")
# barplot
ncf_o3_rain_pl <- ggplot(na.omit(filter(o3_all_1h_rain_tbl, site == "ncf")), aes(x=rainrange,y = counts, fill = pollutant)) +
geom_bar(stat = 'identity', position = 'dodge') +
ylab("Day Count") +
xlab("Precipitation (mm)") +
scale_fill_manual("legend", values = c("O3" = "chocolate", "NO2" = "gray18","CO" = "darkgrey")) +
theme_bw() +
theme(legend.position = c(0.8,0.9),legend.title = element_blank(), legend.text = element_text(size = 10),legend.direction = "horizontal",
legend.background = element_rect(linetype = "solid", colour = "black")) +
ggtitle(label = "")
ncf_o3_rain_pl
# WIND SPEED
# group by and summarise events into bins
o3_all_1h_ws_tbl <- o3_all_1h %>%
group_by(wsrange,pollutant, site) %>%
dplyr::summarise(counts = n(),.groups = "keep")
# barplot
ncf_o3_ws_pl <- ggplot(na.omit(filter(o3_all_1h_ws_tbl, site == "ncf")), aes(x=wsrange,y = counts, fill = pollutant)) +
geom_bar(stat = 'identity', position = 'dodge') +
ylab("Day Count") +
xlab("Wind speed range (m/sec)") +
scale_fill_manual("legend", values = c("O3" = "chocolate", "NO2" = "gray18","CO" = "darkgrey")) +
theme_bw() +
theme(legend.position = c(0.18,0.9),legend.title = element_blank(), legend.text = element_text(size = 10),legend.direction = "horizontal",
legend.background = element_rect(linetype = "solid", colour = "black")) +
ggtitle(label = "")
ncf_o3_ws_pl
ncf_met_o3 <- ggarrange(ncf_o3_temp_pl,ncf_o3_rh_pl,ncf_o3_rain_pl,ncf_o3_ws_pl,
labels = c("(a)","(b)","(c)","(d)"),
ncol = 2,nrow = 2)
ncf_met_o3
ncf_met_o3 <- annotate_figure(ncf_met_o3, top = text_grob(bquote(from~August~2020~to~January~2021), color = "black", face = "bold", size = 18))
ncf_met_o3 <- annotate_figure(ncf_met_o3, top = text_grob(bquote(NCF~Hourly~O[3]~CO~NO[2]~Exceedances~at~various~Meteorological~conditions), color = "black", face = "bold", size = 18))
metpltwidth <- 8000
metpltheight <- 5000
# Open a tiff file no2
jpeg(file="ncf_met_o3.jpeg", res=700, width=metpltwidth, height=metpltheight, pointsize=10,
type="windows", antialias="cleartype")
# 2. Create a plot
ncf_met_o3
# Close the pdf file
dev.off()
# UNILAG ####################################################################
# barplot
uni_o3_temp_pl <- ggplot(na.omit(filter(o3_all_1h_temp_tbl, site == "unilag")), aes(x=temprange,y = counts, fill = pollutant)) +
geom_bar(stat = 'identity', position = 'dodge') +
ylab("Day Count") +
xlab("Temperature range (\u00B0C)") +
scale_fill_manual("legend", values = c("O3" = "chocolate", "NO2" = "gray18","CO" = "darkgrey")) +
theme_bw() +
theme(legend.position = c(0.18,0.9),legend.title = element_blank(), legend.text = element_text(size = 10),legend.direction = "horizontal",legend.key = element_rect(size = 3),
legend.background = element_rect(linetype = "solid", colour = "black")) +
ggtitle(label = "")
uni_o3_temp_pl
# RELATIVE HUMIDITY
# group by and summarise events into bins
o3_all_1h_rh_tbl <- o3_all_1h %>%
group_by(rhrange,pollutant, site) %>%
dplyr::summarise(counts = n(),.groups = "keep")
# barplot
uni_o3_rh_pl <- ggplot(na.omit(filter(o3_all_1h_rh_tbl, site == "unilag")), aes(x=rhrange,y = counts, fill = pollutant)) +
geom_bar(stat = 'identity', position = 'dodge') +
ylab("Day Count") +
xlab("Range of Relative Humidity (%)") +
scale_fill_manual("legend", values = c("O3" = "chocolate", "NO2" = "gray18","CO" = "darkgrey")) +
theme_bw() +
theme(legend.position = c(0.18,0.9),legend.title = element_blank(), legend.text = element_text(size = 10),legend.direction = "horizontal",
legend.background = element_rect(linetype = "solid", colour = "black")) +
ggtitle(label = "")
uni_o3_rh_pl
# PRECIPITATION
# group by and summarise events into bins
o3_all_1h_rain_tbl <- o3_all_1h %>%
group_by(rainrange,pollutant, site) %>%
dplyr::summarise(counts = n(),.groups = "keep")
# barplot
uni_o3_rain_pl <- ggplot(na.omit(filter(o3_all_1h_rain_tbl, site == "unilag")), aes(x=rainrange,y = counts, fill = pollutant)) +
geom_bar(stat = 'identity', position = 'dodge') +
ylab("Day Count") +
xlab("Precipitation (mm)") +
scale_fill_manual("legend", values = c("O3" = "chocolate", "NO2" = "gray18","CO" = "darkgrey")) +
theme_bw() +
theme(legend.position = c(0.8,0.9),legend.title = element_blank(), legend.text = element_text(size = 10),legend.direction = "horizontal",
legend.background = element_rect(linetype = "solid", colour = "black")) +
ggtitle(label = "")
uni_o3_rain_pl
# WIND SPEED
# group by and summarise events into bins
o3_all_1h_ws_tbl <- o3_all_1h %>%
group_by(wsrange,pollutant, site) %>%
dplyr::summarise(counts = n(),.groups = "keep")
# barplot
uni_o3_ws_pl <- ggplot(na.omit(filter(o3_all_1h_ws_tbl, site == "unilag")), aes(x=wsrange,y = counts, fill = pollutant)) +
geom_bar(stat = 'identity', position = 'dodge') +
ylab("Day Count") +
xlab("Wind speed range (m/sec)") +
scale_fill_manual("legend", values = c("O3" = "chocolate", "NO2" = "gray18","CO" = "darkgrey")) +
theme_bw() +
theme(legend.position = c(0.18,0.9),legend.title = element_blank(), legend.text = element_text(size = 10),legend.direction = "horizontal",
legend.background = element_rect(linetype = "solid", colour = "black")) +
ggtitle(label = "")
uni_o3_ws_pl
uni_met_o3 <- ggarrange(uni_o3_temp_pl,uni_o3_rh_pl,uni_o3_rain_pl,uni_o3_ws_pl,
labels = c("(a)","(b)","(c)","(d)"),
ncol = 2,nrow = 2)
uni_met_o3
uni_met_o3 <- annotate_figure(uni_met_o3, top = text_grob(bquote(from~August~2020~to~January~2021), color = "black", face = "bold", size = 18))
uni_met_o3 <- annotate_figure(uni_met_o3, top = text_grob(bquote(UNILAG~Hourly~O[3]~CO~NO[2]~Exceedances~at~various~Meteorological~conditions), color = "black", face = "bold", size = 18))
metpltwidth <- 8000
metpltheight <- 5000
# Open a tiff file no2
jpeg(file="uni_met_o3.jpeg", res=700, width=metpltwidth, height=metpltheight, pointsize=10,
type="windows", antialias="cleartype")
# 2. Create a plot
uni_met_o3
# Close the pdf file
dev.off()
#########################################
##########################################
# 03 plots 8hr #################
# expdir for O3 plots
met_plt_dir_o3 <- "C:/Users/Obajuluwa/OneDrive/OneDrive Collaboration for August to December Reports 290121/Rev0/Six-Month Interim Report/plots and tables/Met_Exceedances Plots/O3 plots"
dir.create(met_plt_dir_o3)
setwd(met_plt_dir_o3)
# filter o3 exceedances events
o3_8h <- filter(all_8h, o3 > 100)
# no2_all_8h <- filter(all_8h, no2 > 188.1)
co_all_8h <- filter(all_8h, co > 10.35)
# add a pollutant column
o3_8h$pollutant <- "O3"
# no2_all_8h$pollutant <- "NO2"
co_all_8h$pollutant <- "CO"
# bind for multiple pollutants
o3_all_8h <- rbind(o3_8h,co_all_8h)
# arrange pollutants as per perference
o3_all_8h$pollutant <- factor(o3_all_8h$pollutant, levels = c("CO","O3"))
# ABESAN #####################################
# group by and summarise events into bins
o3_all_8h_temp_tbl <- o3_all_8h %>%
group_by(temprange,pollutant, site) %>%
dplyr::summarise(counts = n(),.groups = "keep")
# barplot
abe_o3_temp_pl <- ggplot(na.omit(filter(o3_all_8h_temp_tbl, site == "abesan")), aes(x=temprange,y = counts, fill = pollutant)) +
geom_bar(stat = 'identity', position = 'dodge') +
ylab("Day Count") +
xlab("Temperature range (\u00B0C)") +
scale_fill_manual("legend", values = c("O3" = "chocolate","CO" = "darkgrey")) +
theme_bw() +
theme(legend.position = c(0.18,0.9),legend.title = element_blank(), legend.text = element_text(size = 10),legend.direction = "horizontal",legend.key = element_rect(size = 3),
legend.background = element_rect(linetype = "solid", colour = "black")) +
ggtitle(label = "")
abe_o3_temp_pl
# RELATIVE HUMIDITY
# group by and summarise events into bins
o3_all_8h_rh_tbl <- o3_all_8h %>%
group_by(rhrange,pollutant, site) %>%
dplyr::summarise(counts = n(),.groups = "keep")
# barplot
abe_o3_rh_pl <- ggplot(na.omit(filter(o3_all_8h_rh_tbl, site == "abesan")), aes(x=rhrange,y = counts, fill = pollutant)) +
geom_bar(stat = 'identity', position = 'dodge') +
ylab("Day Count") +
xlab("Range of Relative Humidity (%)") +
scale_fill_manual("legend", values = c("O3" = "chocolate","CO" = "darkgrey")) +
theme_bw() +
theme(legend.position = c(0.8,0.9),legend.title = element_blank(), legend.text = element_text(size = 10),legend.direction = "horizontal",
legend.background = element_rect(linetype = "solid", colour = "black")) +
ggtitle(label = "")
abe_o3_rh_pl
# PRECIPITATION
# group by and summarise events into bins
o3_all_8h_rain_tbl <- o3_all_8h %>%
group_by(rainrange,pollutant, site) %>%
dplyr::summarise(counts = n(),.groups = "keep")
# barplot
abe_o3_rain_pl <- ggplot(na.omit(filter(o3_all_8h_rain_tbl, site == "abesan")), aes(x=rainrange,y = counts, fill = pollutant)) +
geom_bar(stat = 'identity', position = 'dodge') +
ylab("Day Count") +
xlab("Precipitation (mm)") +
scale_fill_manual("legend", values = c("O3" = "chocolate","CO" = "darkgrey")) +
theme_bw() +
theme(legend.position = c(0.18,0.9),legend.title = element_blank(), legend.text = element_text(size = 10),legend.direction = "horizontal",
legend.background = element_rect(linetype = "solid", colour = "black")) +
ggtitle(label = "")
abe_o3_rain_pl
# WIND SPEED
# group by and summarise events into bins
o3_all_8h_ws_tbl <- o3_all_8h %>%
group_by(wsrange,pollutant, site) %>%
dplyr::summarise(counts = n(),.groups = "keep")
# barplot
abe_o3_ws_pl <- ggplot(na.omit(filter(o3_all_8h_ws_tbl, site == "abesan")), aes(x=wsrange,y = counts, fill = pollutant)) +
geom_bar(stat = 'identity', position = 'dodge') +
ylab("Day Count") +
xlab("Wind speed range (m/sec)") +
scale_fill_manual("legend", values = c("O3" = "chocolate","CO" = "darkgrey")) +
theme_bw() +
theme(legend.position = c(0.8,0.9),legend.title = element_blank(), legend.text = element_text(size = 10),legend.direction = "horizontal",
legend.background = element_rect(linetype = "solid", colour = "black")) +
ggtitle(label = "")
abe_o3_ws_pl
abe_met_o3 <- ggarrange(abe_o3_temp_pl,abe_o3_rh_pl,abe_o3_rain_pl,abe_o3_ws_pl,
labels = c("(a)","(b)","(c)","(d)"),
ncol = 2,nrow = 2)
abe_met_o3
abe_met_o3 <- annotate_figure(abe_met_o3, top = text_grob(bquote(from~August~2020~to~January~2021), color = "black", face = "bold", size = 18))
abe_met_o3 <- annotate_figure(abe_met_o3, top = text_grob(bquote(Abesan~8-Hourly~O[3]~CO~NO[2]~Exceedances~at~various~Meteorological~conditions), color = "black", face = "bold", size = 18))
metpltwidth <- 8000
metpltheight <- 5000
# Open a tiff file no2
jpeg(file="abe_met_8h_o3.jpeg", res=700, width=metpltwidth, height=metpltheight, pointsize=10,
type="windows", antialias="cleartype")
# 2. Create a plot
abe_met_o3
# Close the pdf file
dev.off()
# IKORODU ####################################################################
# barplot
iko_o3_temp_pl <- ggplot(na.omit(filter(o3_all_8h_temp_tbl, site == "ikorodu")), aes(x=temprange,y = counts, fill = pollutant)) +
geom_bar(stat = 'identity', position = 'dodge') +
ylab("Day Count") +
xlab("Temperature range (\u00B0C)") +
scale_fill_manual("legend", values = c("O3" = "chocolate","CO" = "darkgrey")) +
theme_bw() +
theme(legend.position = c(0.18,0.9),legend.title = element_blank(), legend.text = element_text(size = 10),legend.direction = "horizontal",legend.key = element_rect(size = 3),
legend.background = element_rect(linetype = "solid", colour = "black")) +
ggtitle(label = "")
iko_o3_temp_pl
# RELATIVE HUMIDITY
# group by and summarise events into bins
o3_all_8h_rh_tbl <- o3_all_8h %>%
group_by(rhrange,pollutant, site) %>%
dplyr::summarise(counts = n(),.groups = "keep")
# barplot
iko_o3_rh_pl <- ggplot(na.omit(filter(o3_all_8h_rh_tbl, site == "ikorodu")), aes(x=rhrange,y = counts, fill = pollutant)) +
geom_bar(stat = 'identity', position = 'dodge') +
ylab("Day Count") +
xlab("Range of Relative Humidity (%)") +
scale_fill_manual("legend", values = c("O3" = "chocolate", "NO2" = "gray18","CO" = "darkgrey")) +
theme_bw() +
theme(legend.position = c(0.18,0.9),legend.title = element_blank(), legend.text = element_text(size = 10),legend.direction = "horizontal",
legend.background = element_rect(linetype = "solid", colour = "black")) +
ggtitle(label = "")
iko_o3_rh_pl
# PRECIPITATION
# group by and summarise events into bins
o3_all_8h_rain_tbl <- o3_all_8h %>%
group_by(rainrange,pollutant, site) %>%
dplyr::summarise(counts = n(),.groups = "keep")
# barplot
iko_o3_rain_pl <- ggplot(na.omit(filter(o3_all_8h_rain_tbl, site == "ikorodu")), aes(x=rainrange,y = counts, fill = pollutant)) +
geom_bar(stat = 'identity', position = 'dodge') +
ylab("Day Count") +
xlab("Precipitation (mm)") +
scale_fill_manual("legend", values = c("O3" = "chocolate", "NO2" = "gray18","CO" = "darkgrey")) +
theme_bw() +
theme(legend.position = c(0.8,0.9),legend.title = element_blank(), legend.text = element_text(size = 10),legend.direction = "horizontal",
legend.background = element_rect(linetype = "solid", colour = "black")) +
ggtitle(label = "")
iko_o3_rain_pl
# WIND SPEED
# group by and summarise events into bins
o3_all_8h_ws_tbl <- o3_all_8h %>%
group_by(wsrange,pollutant, site) %>%
dplyr::summarise(counts = n(),.groups = "keep")
# barplot
iko_o3_ws_pl <- ggplot(na.omit(filter(o3_all_8h_ws_tbl, site == "ikorodu")), aes(x=wsrange,y = counts, fill = pollutant)) +
geom_bar(stat = 'identity', position = 'dodge') +
ylab("Day Count") +
xlab("Wind speed range (m/sec)") +
scale_fill_manual("legend", values = c("O3" = "chocolate", "NO2" = "gray18","CO" = "darkgrey")) +
theme_bw() +
theme(legend.position = c(0.8,0.9),legend.title = element_blank(), legend.text = element_text(size = 10),legend.direction = "horizontal",
legend.background = element_rect(linetype = "solid", colour = "black")) +
ggtitle(label = "")
iko_o3_ws_pl
iko_met_o3 <- ggarrange(iko_o3_temp_pl,iko_o3_rh_pl,iko_o3_rain_pl,iko_o3_ws_pl,
labels = c("(a)","(b)","(c)","(d)"),
ncol = 2,nrow = 2)
iko_met_o3
iko_met_o3 <- annotate_figure(iko_met_o3, top = text_grob(bquote(from~August~2020~to~January~2021), color = "black", face = "bold", size = 18))
iko_met_o3 <- annotate_figure(iko_met_o3, top = text_grob(bquote(Ikorodu~8-Hourly~O[3]~CO~NO[2]~Exceedances~at~various~Meteorological~conditions), color = "black", face = "bold", size = 18))
metpltwidth <- 8000
metpltheight <- 5000
# Open a tiff file no2
jpeg(file="iko_met_8h_o3.jpeg", res=700, width=metpltwidth, height=metpltheight, pointsize=10,
type="windows", antialias="cleartype")
# 2. Create a plot
iko_met_o3
# Close the pdf file
dev.off()
# JANKARA ####################################################################
# barplot
jan_o3_temp_pl <- ggplot(na.omit(filter(o3_all_8h_temp_tbl, site == "jankara")), aes(x=temprange,y = counts, fill = pollutant)) +
geom_bar(stat = 'identity', position = 'dodge') +
ylab("Day Count") +
xlab("Temperature range (\u00B0C)") +
scale_fill_manual("legend", values = c("O3" = "chocolate", "CO" = "darkgrey")) +
theme_bw() +
theme(legend.position = c(0.8,0.9),legend.title = element_blank(), legend.text = element_text(size = 10),legend.direction = "horizontal",legend.key = element_rect(size = 3),
legend.background = element_rect(linetype = "solid", colour = "black")) +
ggtitle(label = "")
jan_o3_temp_pl
# RELATIVE HUMIDITY
# group by and summarise events into bins
o3_all_8h_rh_tbl <- o3_all_8h %>%
group_by(rhrange,pollutant, site) %>%
dplyr::summarise(counts = n(),.groups = "keep")
# barplot
jan_o3_rh_pl <- ggplot(na.omit(filter(o3_all_8h_rh_tbl, site == "jankara")), aes(x=rhrange,y = counts, fill = pollutant)) +
geom_bar(stat = 'identity', position = 'dodge') +
ylab("Day Count") +
xlab("Range of Relative Humidity (%)") +
scale_fill_manual("legend", values = c("O3" = "chocolate", "CO" = "darkgrey")) +
theme_bw() +
theme(legend.position = c(0.18,0.9),legend.title = element_blank(), legend.text = element_text(size = 10),legend.direction = "horizontal",
legend.background = element_rect(linetype = "solid", colour = "black")) +
ggtitle(label = "")
jan_o3_rh_pl
# PRECIPITATION
# group by and summarise events into bins
o3_all_8h_rain_tbl <- o3_all_8h %>%
group_by(rainrange,pollutant, site) %>%
dplyr::summarise(counts = n(),.groups = "keep")
# barplot
jan_o3_rain_pl <- ggplot(na.omit(filter(o3_all_8h_rain_tbl, site == "jankara")), aes(x=rainrange,y = counts, fill = pollutant)) +
geom_bar(stat = 'identity', position = 'dodge') +
ylab("Day Count") +
xlab("Precipitation (mm)") +
scale_fill_manual("legend", values = c("O3" = "chocolate", "CO" = "darkgrey")) +
theme_bw() +
theme(legend.position = c(0.8,0.9),legend.title = element_blank(), legend.text = element_text(size = 10),legend.direction = "horizontal",
legend.background = element_rect(linetype = "solid", colour = "black")) +
ggtitle(label = "")
jan_o3_rain_pl
# WIND SPEED
# group by and summarise events into bins
o3_all_8h_ws_tbl <- o3_all_8h %>%
group_by(wsrange,pollutant, site) %>%
dplyr::summarise(counts = n(),.groups = "keep")
# barplot
jan_o3_ws_pl <- ggplot(na.omit(filter(o3_all_8h_ws_tbl, site == "jankara")), aes(x=wsrange,y = counts, fill = pollutant)) +
geom_bar(stat = 'identity', position = 'dodge') +
ylab("Day Count") +
xlab("Wind speed range (m/sec)") +
scale_fill_manual("legend", values = c("O3" = "chocolate", "CO" = "darkgrey")) +
theme_bw() +
theme(legend.position = c(0.8,0.9),legend.title = element_blank(), legend.text = element_text(size = 10),legend.direction = "horizontal",
legend.background = element_rect(linetype = "solid", colour = "black")) +
ggtitle(label = "")
jan_o3_ws_pl
jan_met_o3 <- ggarrange(jan_o3_temp_pl,jan_o3_rh_pl,jan_o3_rain_pl,jan_o3_ws_pl,
labels = c("(a)","(b)","(c)","(d)"),
ncol = 2,nrow = 2)
jan_met_o3
jan_met_o3 <- annotate_figure(jan_met_o3, top = text_grob(bquote(from~August~2020~to~January~2021), color = "black", face = "bold", size = 18))
jan_met_o3 <- annotate_figure(jan_met_o3, top = text_grob(bquote(JANKARA~8-Hourly~O[3]~CO~NO[2]~Exceedances~at~various~Meteorological~conditions), color = "black", face = "bold", size = 18))
metpltwidth <- 8000
metpltheight <- 5000
# Open a tiff file no2
jpeg(file="jan_met_8h_o3.jpeg", res=700, width=metpltwidth, height=metpltheight, pointsize=10,
type="windows", antialias="cleartype")
# 2. Create a plot
jan_met_o3
# Close the pdf file
dev.off()
# Lasepa ####################################################################
# barplot
las_o3_temp_pl <- ggplot(na.omit(filter(o3_all_8h_temp_tbl, site == "lasepa")), aes(x=temprange,y = counts, fill = pollutant)) +
geom_bar(stat = 'identity', position = 'dodge') +
ylab("Day Count") +
xlab("Temperature range (\u00B0C)") +
scale_fill_manual("legend", values = c("O3" = "chocolate", "CO" = "darkgrey")) +
theme_bw() +
theme(legend.position = c(0.18,0.9),legend.title = element_blank(), legend.text = element_text(size = 10),legend.direction = "horizontal",legend.key = element_rect(size = 3),
legend.background = element_rect(linetype = "solid", colour = "black")) +
ggtitle(label = "")
las_o3_temp_pl
# RELATIVE HUMIDITY
# group by and summarise events into bins
o3_all_8h_rh_tbl <- o3_all_8h %>%
group_by(rhrange,pollutant, site) %>%
dplyr::summarise(counts = n(),.groups = "keep")
# barplot
las_o3_rh_pl <- ggplot(na.omit(filter(o3_all_8h_rh_tbl, site == "lasepa")), aes(x=rhrange,y = counts, fill = pollutant)) +
geom_bar(stat = 'identity', position = 'dodge') +
ylab("Day Count") +
xlab("Range of Relative Humidity (%)") +
scale_fill_manual("legend", values = c("O3" = "chocolate", "CO" = "darkgrey")) +
theme_bw() +
theme(legend.position = c(0.8,0.9),legend.title = element_blank(), legend.text = element_text(size = 10),legend.direction = "horizontal",
legend.background = element_rect(linetype = "solid", colour = "black")) +
ggtitle(label = "")
las_o3_rh_pl
# PRECIPITATION
# group by and summarise events into bins
o3_all_8h_rain_tbl <- o3_all_8h %>%
group_by(rainrange,pollutant, site) %>%
dplyr::summarise(counts = n(),.groups = "keep")
# barplot
las_o3_rain_pl <- ggplot(na.omit(filter(o3_all_8h_rain_tbl, site == "lasepa")), aes(x=rainrange,y = counts, fill = pollutant)) +
geom_bar(stat = 'identity', position = 'dodge') +
ylab("Day Count") +
xlab("Precipitation (mm)") +
scale_fill_manual("legend", values = c("O3" = "chocolate", "CO" = "darkgrey")) +
theme_bw() +
theme(legend.position = c(0.18,0.9),legend.title = element_blank(), legend.text = element_text(size = 10),legend.direction = "horizontal",
legend.background = element_rect(linetype = "solid", colour = "black")) +
ggtitle(label = "")
las_o3_rain_pl
# WIND SPEED
# group by and summarise events into bins
o3_all_8h_ws_tbl <- o3_all_8h %>%
group_by(wsrange,pollutant, site) %>%
dplyr::summarise(counts = n(),.groups = "keep")
# barplot
las_o3_ws_pl <- ggplot(na.omit(filter(o3_all_8h_ws_tbl, site == "lasepa")), aes(x=wsrange,y = counts, fill = pollutant)) +
geom_bar(stat = 'identity', position = 'dodge') +
ylab("Day Count") +
xlab("Wind speed range (m/sec)") +
scale_fill_manual("legend", values = c("O3" = "chocolate", "CO" = "darkgrey")) +
theme_bw() +
theme(legend.position = c(0.18,0.9),legend.title = element_blank(), legend.text = element_text(size = 10),legend.direction = "horizontal",
legend.background = element_rect(linetype = "solid", colour = "black")) +
ggtitle(label = "")
las_o3_ws_pl
las_met_o3 <- ggarrange(las_o3_temp_pl,las_o3_rh_pl,las_o3_rain_pl,las_o3_ws_pl,
labels = c("(a)","(b)","(c)","(d)"),
ncol = 2,nrow = 2)
las_met_o3
las_met_o3 <- annotate_figure(las_met_o3, top = text_grob(bquote(from~August~2020~to~January~2021), color = "black", face = "bold", size = 18))
las_met_o3 <- annotate_figure(las_met_o3, top = text_grob(bquote(LASEPA~8-Hourly~O[3]~CO~NO[2]~Exceedances~at~various~Meteorological~conditions), color = "black", face = "bold", size = 18))
metpltwidth <- 8000
metpltheight <- 5000
# Open a tiff file no2
jpeg(file="las_met_8h_o3.jpeg", res=700, width=metpltwidth, height=metpltheight, pointsize=10,
type="windows", antialias="cleartype")
# 2. Create a plot
las_met_o3
# Close the pdf file
dev.off()
# NCF ####################################################################
# barplot
ncf_o3_temp_pl <- ggplot(na.omit(filter(o3_all_8h_temp_tbl, site == "ncf")), aes(x=temprange,y = counts, fill = pollutant)) +
geom_bar(stat = 'identity', position = 'dodge') +
ylab("Day Count") +
xlab("Temperature range (\u00B0C)") +
scale_fill_manual("legend", values = c("O3" = "chocolate","CO" = "darkgrey")) +
theme_bw() +
theme(legend.position = c(0.18,0.9),legend.title = element_blank(), legend.text = element_text(size = 10),legend.direction = "horizontal",legend.key = element_rect(size = 3),
legend.background = element_rect(linetype = "solid", colour = "black")) +
ggtitle(label = "")
ncf_o3_temp_pl
# RELATIVE HUMIDITY
# group by and summarise events into bins
o3_all_8h_rh_tbl <- o3_all_8h %>%
group_by(rhrange,pollutant, site) %>%
dplyr::summarise(counts = n(),.groups = "keep")
# barplot
ncf_o3_rh_pl <- ggplot(na.omit(filter(o3_all_8h_rh_tbl, site == "ncf")), aes(x=rhrange,y = counts, fill = pollutant)) +
geom_bar(stat = 'identity', position = 'dodge') +
ylab("Day Count") +
xlab("Range of Relative Humidity (%)") +
scale_fill_manual("legend", values = c("O3" = "chocolate","CO" = "darkgrey")) +
theme_bw() +
theme(legend.position = c(0.18,0.9),legend.title = element_blank(), legend.text = element_text(size = 10),legend.direction = "horizontal",
legend.background = element_rect(linetype = "solid", colour = "black")) +
ggtitle(label = "")
ncf_o3_rh_pl
# PRECIPITATION
# group by and summarise events into bins
o3_all_8h_rain_tbl <- o3_all_8h %>%
group_by(rainrange,pollutant, site) %>%
dplyr::summarise(counts = n(),.groups = "keep")
# barplot
ncf_o3_rain_pl <- ggplot(na.omit(filter(o3_all_8h_rain_tbl, site == "ncf")), aes(x=rainrange,y = counts, fill = pollutant)) +
geom_bar(stat = 'identity', position = 'dodge') +
ylab("Day Count") +
xlab("Precipitation (mm)") +
scale_fill_manual("legend", values = c("O3" = "chocolate","CO" = "darkgrey")) +
theme_bw() +
theme(legend.position = c(0.8,0.9),legend.title = element_blank(), legend.text = element_text(size = 10),legend.direction = "horizontal",
legend.background = element_rect(linetype = "solid", colour = "black")) +
ggtitle(label = "")
ncf_o3_rain_pl
# WIND SPEED
# group by and summarise events into bins
o3_all_8h_ws_tbl <- o3_all_8h %>%
group_by(wsrange,pollutant, site) %>%
dplyr::summarise(counts = n(),.groups = "keep")
# barplot
ncf_o3_ws_pl <- ggplot(na.omit(filter(o3_all_8h_ws_tbl, site == "ncf")), aes(x=wsrange,y = counts, fill = pollutant)) +
geom_bar(stat = 'identity', position = 'dodge') +
ylab("Day Count") +
xlab("Wind speed range (m/sec)") +
scale_fill_manual("legend", values = c("O3" = "chocolate","CO" = "darkgrey")) +
theme_bw() +
theme(legend.position = c(0.18,0.9),legend.title = element_blank(), legend.text = element_text(size = 10),legend.direction = "horizontal",
legend.background = element_rect(linetype = "solid", colour = "black")) +
ggtitle(label = "")
ncf_o3_ws_pl
ncf_met_o3 <- ggarrange(ncf_o3_temp_pl,ncf_o3_rh_pl,ncf_o3_rain_pl,ncf_o3_ws_pl,
labels = c("(a)","(b)","(c)","(d)"),
ncol = 2,nrow = 2)
ncf_met_o3
ncf_met_o3 <- annotate_figure(ncf_met_o3, top = text_grob(bquote(from~August~2020~to~January~2021), color = "black", face = "bold", size = 18))
ncf_met_o3 <- annotate_figure(ncf_met_o3, top = text_grob(bquote(NCF~8-Hourly~O[3]~CO~NO[2]~Exceedances~at~various~Meteorological~conditions), color = "black", face = "bold", size = 18))
metpltwidth <- 8000
metpltheight <- 5000
# Open a tiff file no2
jpeg(file="ncf_met_8h_o3.jpeg", res=700, width=metpltwidth, height=metpltheight, pointsize=10,
type="windows", antialias="cleartype")
# 2. Create a plot
ncf_met_o3
# Close the pdf file
dev.off()
# UNILAG ####################################################################
# barplot
uni_o3_temp_pl <- ggplot(na.omit(filter(o3_all_8h_temp_tbl, site == "unilag")), aes(x=temprange,y = counts, fill = pollutant)) +
geom_bar(stat = 'identity', position = 'dodge') +
ylab("Day Count") +
xlab("Temperature range (\u00B0C)") +
scale_fill_manual("legend", values = c("O3" = "chocolate", "CO" = "darkgrey")) +
theme_bw() +
theme(legend.position = c(0.18,0.9),legend.title = element_blank(), legend.text = element_text(size = 10),legend.direction = "horizontal",legend.key = element_rect(size = 3),
legend.background = element_rect(linetype = "solid", colour = "black")) +
ggtitle(label = "")
uni_o3_temp_pl
# RELATIVE HUMIDITY
# group by and summarise events into bins
o3_all_8h_rh_tbl <- o3_all_8h %>%
group_by(rhrange,pollutant, site) %>%
dplyr::summarise(counts = n(),.groups = "keep")
# barplot
uni_o3_rh_pl <- ggplot(na.omit(filter(o3_all_8h_rh_tbl, site == "unilag")), aes(x=rhrange,y = counts, fill = pollutant)) +
geom_bar(stat = 'identity', position = 'dodge') +
ylab("Day Count") +
xlab("Range of Relative Humidity (%)") +
scale_fill_manual("legend", values = c("O3" = "chocolate", "CO" = "darkgrey")) +
theme_bw() +
theme(legend.position = c(0.18,0.9),legend.title = element_blank(), legend.text = element_text(size = 10),legend.direction = "horizontal",
legend.background = element_rect(linetype = "solid", colour = "black")) +
ggtitle(label = "")
uni_o3_rh_pl
# PRECIPITATION
# group by and summarise events into bins
o3_all_8h_rain_tbl <- o3_all_8h %>%
group_by(rainrange,pollutant, site) %>%
dplyr::summarise(counts = n(),.groups = "keep")
# barplot
uni_o3_rain_pl <- ggplot(na.omit(filter(o3_all_8h_rain_tbl, site == "unilag")), aes(x=rainrange,y = counts, fill = pollutant)) +
geom_bar(stat = 'identity', position = 'dodge') +
ylab("Day Count") +
xlab("Precipitation (mm)") +
scale_fill_manual("legend", values = c("O3" = "chocolate", "CO" = "darkgrey")) +
theme_bw() +
theme(legend.position = c(0.8,0.9),legend.title = element_blank(), legend.text = element_text(size = 10),legend.direction = "horizontal",
legend.background = element_rect(linetype = "solid", colour = "black")) +
ggtitle(label = "")
uni_o3_rain_pl
# WIND SPEED
# group by and summarise events into bins
o3_all_8h_ws_tbl <- o3_all_8h %>%
group_by(wsrange,pollutant, site) %>%
dplyr::summarise(counts = n(),.groups = "keep")
# barplot
uni_o3_ws_pl <- ggplot(na.omit(filter(o3_all_8h_ws_tbl, site == "unilag")), aes(x=wsrange,y = counts, fill = pollutant)) +
geom_bar(stat = 'identity', position = 'dodge') +
ylab("Day Count") +
xlab("Wind speed range (m/sec)") +
scale_fill_manual("legend", values = c("O3" = "chocolate", "CO" = "darkgrey")) +
theme_bw() +
theme(legend.position = c(0.18,0.9),legend.title = element_blank(), legend.text = element_text(size = 10),legend.direction = "horizontal",
legend.background = element_rect(linetype = "solid", colour = "black")) +
ggtitle(label = "")
uni_o3_ws_pl
uni_met_o3 <- ggarrange(uni_o3_temp_pl,uni_o3_rh_pl,uni_o3_rain_pl,uni_o3_ws_pl,
labels = c("(a)","(b)","(c)","(d)"),
ncol = 2,nrow = 2)
uni_met_o3
uni_met_o3 <- annotate_figure(uni_met_o3, top = text_grob(bquote(from~August~2020~to~January~2021), color = "black", face = "bold", size = 18))
uni_met_o3 <- annotate_figure(uni_met_o3, top = text_grob(bquote(UNILAG~8-Hourly~O[3]~CO~NO[2]~Exceedances~at~various~Meteorological~conditions), color = "black", face = "bold", size = 18))
metpltwidth <- 8000
metpltheight <- 5000
# Open a tiff file no2
jpeg(file="uni_met_8h_o3.jpeg", res=700, width=metpltwidth, height=metpltheight, pointsize=10,
type="windows", antialias="cleartype")
# 2. Create a plot
uni_met_o3
# Close the pdf file
dev.off()
#########################################
#####pm-met regression tables ##############
#
# # define function to extract regression equation
#
# regEq <- function(lmObj, dig) {
# gsub(":", "*",
# paste0(
# names(lmObj$model)[1]," = ",
# paste0(
# c(round(lmObj$coef[1], dig), round(sign(lmObj$coef[-1])*lmObj$coef[-1], dig)),
# c("", rep("*", length(lmObj$coef)-1)),
# paste0(c("", names(lmObj$coef)[-1]), c(ifelse(sign(lmObj$coef)[-1]==1," + "," - "), "")),
# collapse=""
# )
# )
# )
# }
#
#
# names(abe_5m)
# pmlist <- c(9,10)
# metlist <- names(abe_5m)[15:18]
#
#
# abepm25_mdl <- lapply(metlist,function(x){
# lm(substitute(abepm25 ~ i, list(i = as.name(x))), data = abe_5m)
# })
#
# summary(abepm25_mdl[[4]])[[8]]
#
# abepm10_mdl <- lapply(metlist,function(x){
# lm(substitute(abepm10 ~ i, list(i = as.name(x))), data = abe_5m)
# })
#
#
# ############################################# DEPRECATED
# extractfun <- function (x){
# summlist <- list()
#
# for (j in 1:4){
# dat <- c(`Regression Equation` = regEq(x[[j]]),
# `R2`= round(summary(x[[j]])[[8]] * 100,2)
# )
# summlist[[j]] <- dat
# }
#
# final <- do.call(rbind,summlist)
# return(final)
# }
#
# extractfun(abepm25_mdl)
# ####################################################
#
#
|
9ef4ad73787bdbd6f9cdbd9f3dd4bfcd55a72c2c | 4e263337af30425e2bfc61284f45f611cec6cd0e | /Analysis/0_clean_data.R | c3483c205a030042568b63967fe1de994ea32803 | [] | no_license | yeatmanlab/Parametric_speech_public | c9ce4f443783c11355a07d4d5c3c87f5a0936bb6 | 8df268acda5c9e425c6df43291191207082d91a4 | refs/heads/master | 2020-04-23T17:47:01.871970 | 2019-02-18T19:39:35 | 2019-02-18T19:39:35 | 171,344,531 | 2 | 1 | null | null | null | null | UTF-8 | R | false | false | 8,214 | r | 0_clean_data.R | # Process data
#!/usr/bin/env Rscript
## prep_data.R
## loads, cleans, aggregates, and saves raw response data and psychometric fits.
library(dplyr)
#setwd('/home/eobrien/bde/Projects/Parametric_Speech_public/Speech')
## When loading/saving, this script assumes the working directory is set to the
## root directory of the repo. Relative to this script's location that is:
#setwd("..")
## load the raw response data
response_df <- data.frame()
data_dir <- file.path("Results", "Raw")
raw_files <- list.files(path=data_dir)
## keep only categorization data (not discrimination); remove practice blocks
## and pilot data (subject "nnn")
raw_files <- raw_files[!grepl("practice", raw_files)]
raw_files <- raw_files[!grepl("nnn", raw_files)]
raw_files <- raw_files[!grepl("Pilot", raw_files)]
raw_files <- raw_files[!grepl("Plots", raw_files)]
## read in remaining raw data files
opts <- options(warn=2) # convert warnings to errors, while reading in files
for (fname in raw_files) {
## skip=1 because first row of each file is a timestamp
df_tmp <- tryCatch(read.csv(file.path(data_dir, fname), row.names=1, skip=1),
error=function(e) {print(paste("skipping", fname,
conditionMessage(e))); e})
if(inherits(df_tmp, "error")) next
## only keep complete blocks
if(dim(df_tmp)[1] == 105) {
df_tmp$subject_id <- strsplit(fname, "_")[[1]][1]
df_tmp$continuum <- "/ʃa/-/sa/"
df_tmp$duration <- ifelse(grepl("100", df_tmp$stimulus), "100","300")
df_tmp$run_id <- paste0(df_tmp$subject_id, '_', df_tmp$sound2)
df_tmp$stimulus <- as.character(df_tmp$stimulus)
df_tmp$step <- sapply(df_tmp$stimulus,
function(i) strsplit(strsplit(i, "_")[[1]][2],
".", fixed=TRUE)[[1]][1])
df_tmp$response <- ifelse(df_tmp$selection %in% c("Sa"), 1, 0)
## get the timestamp
conn <- file(file.path(data_dir, fname), "r")
df_tmp$psych_date <- strsplit(readLines(conn, 1), ",")[[1]][1]
close(conn)
## concatenate with other files
response_df <- rbind(response_df, df_tmp)
} else {
print(paste("skipping", fname, "(incomplete block)"))
}
}
options(opts) # restore default options
# Change GB240 and GB208 to whatever they should be
response_df$subject_id <- gsub("GB240", "HB240",response_df$subject_id )
response_df$subject_id <- gsub("GB241", "KB241",response_df$subject_id )
response_df$subject_id <- gsub("KB578", "JB578",response_df$subject_id )
## load the repository / registry data
repository_df <- read.csv("../RDRPRepository_DATA_2018-10-05_1343.csv")
registry_df <- read.csv("../RDRPRegistry_DATA_2018-10-05_1402.csv")
demog_df <- merge(repository_df, registry_df, by = "record_id")
## filter out subjects not in our sample
subject_ids <- unique(response_df$subject_id)
record_ids <- demog_df %>% filter(sid.x %in% subject_ids) %>% dplyr::select(record_id)
subject_df <- demog_df %>% filter(record_id %in% record_ids$record_id)
## get reading scores
names <- colnames(demog_df)
wj_cols <- c("wj_brs","wj_wa_ss","wj_lwid_ss")
ctopp_cols <- c("ctopp_pa","ctopp_rapid","ctopp_pm")
twre_cols <- c("twre_index","twre_pde_ss","twre_swe_ss")
wasi_cols <- c("wasi_fs2", "wasi_mr_ts")
reading_columns <- c("record_id", "dys_dx", "adhd_dx", "brain_injury", "aud_dis",
"psych_dx", wj_cols, ctopp_cols, twre_cols, wasi_cols)
reading_df <- subject_df %>% dplyr::select(reading_columns)
reading_df <- reading_df[!duplicated(reading_df),]
## combine scores from distinct sessions
reading_df <- reading_df %>% group_by(record_id) %>%
summarise_all(funs(mean(as.numeric(.), na.rm=TRUE)))
## biographic details
bio_df <- subject_df[c("sid.x", "dob", "record_id","gender")]
bio_df[bio_df==""] <- NA
bio_df <- na.omit(bio_df)
bio_df$dob <- as.POSIXct(bio_df$dob, format="%Y-%m-%d")
colnames(bio_df)[colnames(bio_df) == "sid.x"] <- "subject_id"
## merge biographic info, reading scores, and psychometric data
use_df <- merge(bio_df, response_df)
use_df <- merge(use_df, reading_df)
## compute age at testing
use_df$age_at_testing <- with(use_df, difftime(psych_date, dob, units="weeks"))
use_df$age_at_testing <- as.numeric(use_df$age_at_testing) / 52.25
# Subjects who did not pass the hearing screening
hearing <- c("JB724")
# Num subjects
length(unique(use_df$subject_id))
# How many subjects were in the age group and had no auditory disorder- ie, were eligible for the study
use_df <- use_df %>%
filter(age_at_testing >= 8) %>%
filter(age_at_testing < 13) %>%
filter(aud_dis == 0 | is.nan(aud_dis))
length(unique(use_df$subject_id))
## How many passed thehearing and wasi screens?
use_df <- use_df %>%
filter(!(subject_id %in% hearing)) %>%
# no auditory disorder
filter(wasi_fs2 >= 80 | is.nan(wasi_fs2)) %>% # WASI criterion
filter(wasi_mr_ts > 30) # WASI nonverbal not less than 2 sd below mean
length(unique(use_df$subject_id))
## assign to groups
use_df$read <- (use_df$wj_brs + use_df$twre_index)/2
use_df$group <- with(use_df, ifelse(read<= 85, "Dyslexic",
ifelse(read >= 100, "Above Average",
"Below Average")))
## drop identifying information
use_df <- use_df[ , !(names(use_df) == "dob")]
## ## ## ## ## ## ## ## ## ## ##
## LOAD PSYCHOMETRIC FIT DATA ##
## ## ## ## ## ## ## ## ## ## ##
#setwd("..")
fpath <- file.path("Results", "Psychometrics", "Fit15")
flist <- list.files(fpath)
psychometric_df <- do.call(rbind, lapply(file.path(fpath, flist), read.csv))
## make subject_id & asymptote column names consistent
psychometric_df <- rename(psychometric_df, subject_id=SubjectID,
lo_asymp=guess, hi_asymp=lapse)
psychometric_df$continuum <- '/ʃa/-/sa/'
psychometric_df$subject_id <- gsub("GB240", "HB240",psychometric_df$subject_id )
psychometric_df$subject_id <- gsub("GB241", "KB241",psychometric_df$subject_id )
psychometric_df$subject_id <- gsub("KB578", "JB578",psychometric_df$subject_id )
## na.locf is "last observation carry forward". This works because we know the
## rows of the psychometrics dataframe are loaded in groups of 3, where all 3
## rows of the CSV file are the same contrast, and "single" is the last row.
psychometric_df$continuum <- zoo::na.locf(psychometric_df$continuum)
## add group and reading ability to psychometrics dataframe
columns <- c("subject_id", "group", "wj_brs","twre_index","adhd_dx", "wasi_mr_ts","age_at_testing",
"ctopp_pa", "ctopp_pm", "ctopp_rapid","gender")
group_table <- unique(use_df[columns])
# Which subjects are in use_df, but not psychometric df?
use_list <- unique(psychometric_df$subject_id)
qual_list <- unique(use_df$subject_id)
comp <- setdiff(qual_list, use_list)
psychometric_df <- subset(psychometric_df,
subject_id %in% use_df$subject_id)
psychometric_df <- merge(psychometric_df, group_table, all.x=TRUE, all.y=FALSE)
length(unique(psychometric_df$subject_id))
psychometric_df <- psychometric_df %>%
filter(threshold >= 1) %>%
filter(threshold <= 7) %>%
filter(deviance < 30)
length(unique(psychometric_df$subject_id))
# Make sure there are no duplicate columns
psychometric_df <- psychometric_df[!duplicated(psychometric_df), ]
write.table(psychometric_df, file="cleaned_psychometrics.csv", sep=",",
quote=FALSE, row.names=FALSE)
# For the purposes of publicationt able, get gender and age distributions
subj_sum <- psychometric_df %>%
group_by(subject_id) %>%
summarise(group = unique(group),
num_girls = unique(gender) - 1)
table(subj_sum$group)
table(subj_sum$group, subj_sum$num_girls)
# Now, save only the data for subjects we have full data for
use_df <- subset(use_df, subject_id %in% psychometric_df$subject_id)
write.table(use_df, file="cleaned_data.csv", sep=",", quote=FALSE, row.names=FALSE)
setwd("./Analysis")
####### See any disorders
disorders <- repository_df %>%
dplyr::select(c("record_id","learning_dis_notes","other_dis")) %>%
subset(record_id %in% use_df$record_id)%>%
subset(learning_dis_notes != "" | other_dis != "")
|
7ab832323b44f99af298a9b53767930fbb97309c | 4a2c6f223ff6063640475840209927bf85a9f33b | /medicago/compare-parameter-runs.R | 4f350aaca9e7031f5b5cd24938766b251abb2734 | [] | no_license | petrelharp/local_pca | d69cc4122c381bf981af65a8beb8914fabede4d5 | abf0c31da5cd74a1de62083580d482f5bd08d7de | refs/heads/master | 2023-06-25T18:12:39.355780 | 2023-06-14T04:39:12 | 2023-06-14T04:39:12 | 47,361,457 | 61 | 13 | null | 2021-02-25T17:20:18 | 2015-12-03T21:23:41 | HTML | UTF-8 | R | false | false | 4,601 | r | compare-parameter-runs.R | #/bin/env Rscript
usage <- "
Gets some summary statistics comparing the results (MDS coordinates)
across runs of the algorithm with different parameters.
"
library(jsonlite)
dirs <- c("./lostruct_results/type_snp_size_10000_weights_none_jobid_278544",
"./lostruct_results/type_snp_size_1000_weights_none_jobid_450751",
"./lostruct_results/type_snp_size_10000_weights_none_jobid_080290",
"./lostruct_results/type_bp_size_100000_weights_none_jobid_381845",
"./lostruct_results/type_bp_size_10000_weights_none_jobid_519007")
param.list <- lapply(dirs, function (dd) {
fromJSON(file.path(dd,"config.json")) } )
params <- data.frame(
outdir=sapply(param.list,"[[","outdir"),
type=sapply(param.list,"[[","type"),
size=sapply(param.list,"[[","size"),
npc=sapply(param.list,"[[","npc"),
nmds=sapply(param.list,"[[","nmds")
)
chrom.names <- paste0("chr",1:8)
region.file.names <- paste0(chrom.names,"-filtered-set-2014Apr15.regions.csv")
chrom.lens <- c( chr1=52991155, chr2=45729672, chr3=55515152, chr4=56582383, chr5=43630510, chr6=35275713, chr7=49172423, chr8=45569985, chl_Mt=124033 )
chrom.starts <- cumsum(c(0,chrom.lens[-length(chrom.lens)]))
names(chrom.starts) <- names(chrom.lens)
chrom_pos <- function (chrom,pos) {
return(pos + chrom.starts[chrom])
}
get_regions <- function (dd) {
out <- do.call(rbind, lapply( file.path(dd,region.file.names), function (dn) {
z <- read.csv(dn, header=TRUE, stringsAsFactors=FALSE)
this.chrom <- z$chrom[1]
breaks <- c(0,(1/2)*(z$start[-1]+z$end[-nrow(z)]),chrom.lens[this.chrom])
z$real_start <- chrom.starts[this.chrom]+breaks[-nrow(z)]
z$real_end <- chrom.starts[this.chrom]+breaks[-1]
return(z)
} ) )
}
match_window <- function (chrom,pos,reg) {
# find which window corresp to (chrom,pos) in reg
cp <- chrom_pos(chrom,pos)
return(findInterval(cp,c(0,reg$real_end)))
}
compare_mds <- function (d1,d2,k) {
# correlation of d1 with mean of matching windows in d2
reg1 <- get_regions(d1)
reg2 <- get_regions(d2)
win2 <- factor(match_window(reg2$chrom,(reg2$start+reg2$end)/2,reg1),levels=1:nrow(reg1))
mds1 <- read.csv(file.path(d1,"mds_coords.csv"),header=TRUE)
mds2 <- read.csv(file.path(d2,"mds_coords.csv"),header=TRUE)
nmds1 <- sum(grepl("MDS",colnames(mds1)))
nmds2 <- sum(grepl("MDS",colnames(mds2)))
nmds <- min(nmds1,nmds2)
if (k>nmds) { return(NA) }
this.mds2 <- tapply(mds2[,paste0("MDS",k)],win2,mean,na.rm=TRUE)
return( cor( mds1[,paste0("MDS",k)], this.mds2, use="pairwise" ) )
}
# Produces a matrix with upper triangle correlations in MDS1, and lower triangle in MDS2
mds.cors <- list( matrix(NA,nrow=nrow(params),ncol=ncol(params)) )[c(1,1)]
for (i in 1:nrow(params)) {
for (j in 1:nrow(params)) {
mds.cors[[1]][i,j] <- compare_mds(params$outdir[i],params$outdir[j],1)
mds.cors[[2]][j,i] <- compare_mds(params$outdir[i],params$outdir[j],2)
}
}
for (k in 1:2) {
colnames(mds.cors[[k]]) <- rownames(mds.cors[[k]]) <- sprintf("%d%s, %d PCs", params$size, params$type, params$npc)
}
library(xtable)
options(digits=2)
lapply(mds.cors,xtable)
# % latex table generated in R 3.3.1 by xtable 1.8-2 package
# % Wed Feb 8 16:17:12 2017
# \begin{table}[ht]
# \centering
# \begin{tabular}{rrrrrr}
# \hline
# & 10000snp, 2 PCs & 1000snp, 2 PCs & 10000snp, 5 PCs & 100000bp, 2 PCs & 10000bp, 2 PCs \\
# \hline
# 10000snp, 2 PCs & 1.00 & 0.87 & 0.96 & 0.90 & 0.88 \\
# 1000snp, 2 PCs & 0.68 & 1.00 & 0.73 & 0.68 & 0.94 \\
# 10000snp, 5 PCs & 0.96 & 0.92 & 1.00 & 0.88 & 0.93 \\
# 100000bp, 2 PCs & 0.90 & 0.87 & 0.88 & 1.00 & 0.87 \\
# 10000bp, 2 PCs & 0.68 & 0.93 & 0.72 & 0.67 & 1.00 \\
# \hline
# \end{tabular}
# \end{table}
#
# [[2]]
# % latex table generated in R 3.3.1 by xtable 1.8-2 package
# % Wed Feb 8 16:17:12 2017
# \begin{table}[ht]
# \centering
# \begin{tabular}{rrrrrr}
# \hline
# & 10000snp, 2 PCs & 1000snp, 2 PCs & 10000snp, 5 PCs & 100000bp, 2 PCs & 10000bp, 2 PCs \\
# \hline
# 10000snp, 2 PCs & 1.00 & 0.54 & 0.93 & 0.87 & 0.56 \\
# 1000snp, 2 PCs & 0.82 & 1.00 & 0.76 & 0.83 & 0.92 \\
# 10000snp, 5 PCs & 0.93 & 0.50 & 1.00 & 0.83 & 0.52 \\
# 100000bp, 2 PCs & 0.87 & 0.59 & 0.84 & 1.00 & 0.58 \\
# 10000bp, 2 PCs & 0.83 & 0.92 & 0.77 & 0.84 & 1.00 \\
# \hline
# \end{tabular}
# \end{table}
#
|
513f80792b0adaca1ec49f01dd37a82729f01d38 | 68a9979822adbf0ab71997e4c0bec88cc2845249 | /data-raw/g_nongradient.R | 5d5295b17912abe949f86637fc269f6676c3a5ae | [
"MIT"
] | permissive | edwindj/hodgedecompose | 21cdaa722c0b8ac0c63c7b16028f0dcc4e90e6ba | c6e3d1f40b8031ea66fd2c1f96ecef5c64cf7732 | refs/heads/main | 2023-07-29T17:40:11.769612 | 2021-09-01T10:08:54 | 2021-09-01T10:08:54 | 364,612,103 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 231 | r | g_nongradient.R | "from,to,weight
2,1,1
1,8,4.2
3,2,2
2,6,8.1
3,4,3.1
3,5,5.9
3,6,9.8
3,8,7.1
4,5,3.1
4,6,6.9
5,6,4.1
7,6,1
8,7,2
" -> csv
g_nongradient <- read.csv(text = csv, strip.white = TRUE)
usethis::use_data(g_nongradient, overwrite = TRUE)
|
80e0369e05f718554d049c18a2192a5609eec5b1 | 509d648580adfb17d0a33157ca1dd551320b7112 | /R/validate_geos.R | d27c24b53bbea176b43f7e3b9dde99154b49e1db | [
"MIT"
] | permissive | CT-Data-Haven/cwi | 55beb8668edffba5c46314dcb8b9e7f8e6d3e96e | 8dabdd8bbcfad40db8969bfc37ea4fc557281002 | refs/heads/main | 2023-09-04T15:17:01.766095 | 2023-08-25T18:27:12 | 2023-08-25T18:27:12 | 138,631,910 | 7 | 4 | NOASSERTION | 2023-02-12T17:53:16 | 2018-06-25T18:03:54 | R | UTF-8 | R | false | false | 2,807 | r | validate_geos.R | # this should still work even for null counties--that should just mean erasing counties out of returned data
county_x_state <- function(st, counties) {
if (is.null(counties)) {
counties <- "all"
}
# take state code, name, or abbrev
out <- dplyr::filter(tidycensus::fips_codes, state_code == st | state_name == st | state == st)
out$county_geoid <- paste0(out$state_code, out$county_code)
if (!identical(counties, "all")) {
out <- dplyr::filter(out, county_geoid %in% counties)
}
out <- dplyr::select(out, state = state_name, county_geoid, county)
out
}
get_state_fips <- function(state) {
xw <- dplyr::distinct(tidycensus::fips_codes, state, state_code, state_name)
if (grepl("^\\d$", state)) {
state <- as.numeric(state)
}
if (is.numeric(state)) {
unpad <- state
state <- sprintf("%02d", state)
cli::cli_inform("Converting state {unpad} to {state}.")
}
if (state %in% xw$state_code) {
return(state)
} else if (state %in% xw$state) {
return(xw$state_code[xw$state == state])
} else if (state %in% xw$state_name) {
return(xw$state_code[xw$state_name == state])
} else {
return(NULL)
}
}
get_county_fips <- function(state, counties) {
xw <- county_x_state(state, "all")
if (is.null(counties)) {
counties <- "all"
}
if (identical(counties, "all") | identical(counties, "*")) {
counties <- xw$county_geoid
} else {
if (is.numeric(counties)) {
counties <- sprintf("%s%03d", state, counties)
}
counties <- dplyr::case_when(
grepl("^\\d{3}$", counties) ~ paste0(state, counties),
!grepl("\\d", counties) & !grepl(" County$", counties) ~ paste(counties, "County"),
TRUE ~ counties
)
cty_from_name <- xw[xw$county %in% counties, ]
cty_from_fips <- xw[xw$county_geoid %in% counties, ]
# any counties requested that didn't match?
matches <- unique(rbind(cty_from_name, cty_from_fips))
mismatch <- setdiff(counties, c(matches$county, matches$county_geoid))
if (length(mismatch) > 0) {
cli::cli_warn("Some counties you requested didn't match for the state {state}: {mismatch}")
}
counties <- matches$county_geoid
}
# remove COGs
if (state == "09") {
counties <- stringr::str_subset(counties, "^090")
}
counties
}
check_fips_nchar <- function(fips, n_correct) {
if (!is.null(fips)) {
n <- nchar(fips)
if (!identical(fips, "all") & !all(n == n_correct)) {
return(FALSE)
} else {
return(TRUE)
}
}
return(TRUE)
}
# takes e.g. list(tracts = 11, bgs = 12)
nhood_fips_type <- function(fips, n_list) {
check <- purrr::map(n_list, function(n) check_fips_nchar(fips, n))
# check <- purrr::keep(check, isTRUE)
check
}
|
819686952c05f1f3430305e4f5db0966068609df | f36b2ad1dc17ec05278f13c7fa72a1fd8343ee19 | /R/chk-character-or-factor.R | 16cae65e73a9b2a988f2675f5285d1f4501f6e08 | [
"MIT"
] | permissive | poissonconsulting/chk | 45f5d81df8a967aad6e148f0bff9a9f5b89a51ac | c2545f04b23e918444d4758e4362d20dfaa8350b | refs/heads/main | 2023-06-14T19:32:17.452025 | 2023-05-27T23:53:25 | 2023-05-27T23:53:25 | 199,894,184 | 43 | 3 | NOASSERTION | 2023-01-05T18:50:23 | 2019-07-31T16:42:59 | R | UTF-8 | R | false | false | 1,069 | r | chk-character-or-factor.R | #' Check Character or Factor
#'
#' @description
#' Checks if character or factor using
#'
#' `is.character(x) || is.factor(x)`
#'
#' @inheritParams params
#' @inherit params return
#'
#' @family chk_typeof
#'
#' @examples
#' # chk_character_or_factor
#' chk_character_or_factor("1")
#' chk_character_or_factor(factor("1"))
#' try(chk_character(1))
#' @export
chk_character_or_factor <- function(x, x_name = NULL) {
if (vld_character_or_factor(x)) {
return(invisible(x))
}
if (is.null(x_name)) x_name <- deparse_backtick_chk((substitute(x)))
abort_chk(x_name, " must be character or factor", x = x)
}
#' @describeIn chk_character_or_factor Validate Character or Factor
#'
#' @examples
#' # vld_character_or_factor
#' vld_character_or_factor("1")
#' vld_character_or_factor(matrix("a"))
#' vld_character_or_factor(character(0))
#' vld_character_or_factor(NA_character_)
#' vld_character_or_factor(1)
#' vld_character_or_factor(TRUE)
#' vld_character_or_factor(factor("text"))
#' @export
vld_character_or_factor <- function(x) is.character(x) || is.factor(x)
|
84a6cbda92cb713f42fbce641a4d314bed54e427 | 4da94238447c5ed5188163d9a4b2098de09bfa9a | /RandomizedMatrx.R | d9ceb6f2d3352a0024183a8baf62296abf3038f7 | [] | no_license | ShawnQin/OlfactoryCode | d3b7847ad62f9eb76bdfb96a4c0e181081de54d3 | 39454887000de44c1b6b8e646c78dcc18a584378 | refs/heads/master | 2020-12-24T09:56:50.864823 | 2017-05-19T07:44:31 | 2017-05-19T07:44:31 | 73,254,572 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,772 | r | RandomizedMatrx.R | RandomizedMatrx <- function(OldMatrix,method="switch",...){
# this function return randomized interaction network
# matrix old interaction network
# method mathod used to randomize the matrix, can be "switch","bnOdorWise" and "bngloble"
# ====================================
if(method=='switch'){
MaxTry <- 100*sum(OldMatrix != 0) # maximum number of try
# all interaction pairs index
allInx <- which(OldMatrix !=0,arr.ind = TRUE)
LEN <- dim(allInx)[1] #total number of edges
# ValueInter <- OldMatrix[allInx] # store the interaction type or value
# RandomizedMatrx <- matrix(0,nrow = nrow(OldMatrix),ncol = ncol(OldMatrix))
# select pairs of interaction
count <- 0 # how many times has been tried
UpdateMat <- OldMatrix
while (count <= MaxTry) {
sampleFlag <- 1
while(sampleFlag){
temp <- sample(LEN,2) # which two rows
InxTry <- allInx[temp,]
if(InxTry[1,1] != InxTry[2,1] && InxTry[1,2] != InxTry[2,2]){
sampleFlag <- 0
}
}
# switch or not
if(UpdateMat[InxTry[1,1],InxTry[2,2]] == 0 && UpdateMat[InxTry[2,1],InxTry[1,2]] == 0){
allInx[temp[1],] <- c(InxTry[1,1],InxTry[2,2])
allInx[temp[2],] <- c(InxTry[2,1],InxTry[1,2])
UpdateMat[allInx[temp,]] <- UpdateMat[InxTry]
UpdateMat[InxTry] <- 0
# ValueInter[temp] <- ValueInter[c(temp[2],temp[1])]
}
count <- count + 1
# RandomizedMatrx <- matrix(0,nrow = nrow(OldMatrix),ncol = ncol(OldMatrix))
# RandomizedMatrx[allInx] <- ValueInter
# if(any(colSums(abs(UpdateMat)) != colSums(abs(OldMatrix))) | any(rowSums(abs(UpdateMat)) != rowSums(abs(OldMatrix)))){
# browser()
# }
}
}
return(UpdateMat)
} |
d99971cdf80f707364d8e041dd82c1858da3ab73 | 0dfe50e7f553927442a27ed4b1cf366216b06727 | /univariate/check-if-normal.R | 78860d7812f018b321e35c38dc4387a0cde7674c | [] | no_license | kgdunn/figures | 3543d2bcb96cc61cc9c2217da3a4210dd23b1103 | 662076362df316069ba9c903a0a71344da887142 | refs/heads/main | 2021-07-06T06:59:34.977099 | 2021-06-14T20:47:11 | 2021-06-14T20:47:11 | 244,129,830 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 292 | r | check-if-normal.R | N = 100
bitmap('check-if-normal.png', type="png256", width=10, height=7, res=300, pointsize=14)
s1 <- rf(N, 20, 20)
plot(s1, main="", xlab="A sequence of normal values?", ylab="", cex.lab=1.5, cex.main=1.8, lwd=2, cex.sub=1.8, cex.axis=1.8)
write.table(s1, 'check-if-normal.dat')
dev.off() |
8fc1d89894110e52f87c86822050f9fbb3dd7384 | c3826e89c7c78acdcc4596820d03fa96c8710b38 | /R/MathGenerics.R | 907046234f3487b1f148dc7cd7c7bcc47a56d5f8 | [
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] | permissive | chen496/SomaDataIO | 7b393fad010774e17e086555a026c2a38de06415 | b8f00329aaa283f8243d1064a7bda19b873fdd67 | refs/heads/master | 2023-06-24T21:22:02.222540 | 2021-07-27T20:45:52 | 2021-07-27T20:45:52 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,355 | r | MathGenerics.R | #' Mathematical Group Generics for ADAT Object
#'
#' This is the S3 group generic method to apply mathematical functions
#' to the RFU data of `soma_adat` objects.
#' The clinical meta data is *not* transformed and remains in
#' the returned object. Typical generic functions include:
#' * `log()`
#' * `abs()`
#' * `sign()`
#' * `floor()`
#' * `sqrt()`
#' * `exp()`
#' * See [groupGeneric()] (\emph{Math}) for full listing
#' @name MathGenerics
#' @param x The `soma_adat` class object to perform the transformation.
#' @param ... Additional arguments passed to the various group generics
#' as appropriate.
#' @return A `soma_adat` object with the same dimensions of the input
#' object with the feature columns transformed by the specified generic.
#' @author Stu Field
#' @seealso [groupGeneric()]
#' @examples
#' example_data$seq.3343.1
#'
#' # log-transformation
#' a <- log(example_data)
#' a$seq.3343.1
#' b <- log10(example_data)
#' b$seq.3343.1
#' isTRUE(all.equal(b, log(example_data, base = 10)))
#'
#' # floor
#' c <- floor(example_data)
#' c$seq.3343.1
#'
#' # square-root
#' d <- sqrt(example_data)
#' d$seq.3343.1
#'
#' # rounding
#' e <- round(example_data)
#' e$seq.3343.1
#' @importFrom usethis ui_stop ui_value
#' @export
Math.soma_adat <- function(x, ...) {
.apts <- getAnalytes(x)
class <- class(x)
mode_ok <- vapply(x[, .apts], function(.x)
is.numeric(.x) || is.complex(.x), NA)
if ( all(mode_ok) ) {
x[, .apts] <- lapply(X = x[, .apts], FUN = .Generic, ...)
} else {
usethis::ui_stop(
"Non-numeric variable(s) in `soma_adat` object \\
where RFU values should be: {ui_value(names(x[, .apts])[ !mode_ok ])}."
)
}
structure(x, class = class)
}
#' @importFrom stringr str_glue
#' @importFrom lifecycle deprecate_warn
#' @method Math soma.adat
#' @export
Math.soma.adat <- function(x, ...) {
.msg <- stringr::str_glue(
"The {ui_value('soma.adat')} class is now {ui_value('soma_adat')}. \\
This math generic `{.Generic}` will be deprecated.
Please either:
1) Re-class with x %<>% addClass('soma_adat')
2) Re-call 'read_adat(file)' to pick up the new 'soma_adat' class."
)
deprecate_warn("2019-01-31", "SomaRead::Math.soma.adat()", details = .msg)
class(x) <- c("soma_adat", "data.frame")
do.call(.Generic, list(x = x, ...))
}
|
2240f32a53510b8cc1446d1c83bf769b3561d0f1 | 14c3d1d0f0859cd48eef9725ccd7c18dd128288a | /Analysis.R | 637f5cedaef61ea23e5d28c492336ffd02bf4262 | [] | no_license | vishmaram/RepData_PeerAssessment1 | 9968d047045b279d5f674c69992692c76595231f | a7e81499a1b6f22ffbcd4e2a83cbb706cc6f08a7 | refs/heads/master | 2021-01-22T21:32:59.664270 | 2016-04-25T04:58:17 | 2016-04-25T04:58:17 | 57,007,287 | 0 | 0 | null | 2016-04-25T02:54:57 | 2016-04-25T02:54:57 | null | UTF-8 | R | false | false | 2,472 | r | Analysis.R | unzip("activity.zip", exdir = "data/", overwrite = TRUE)
activity <- read.csv("data/activity.csv")
head(activity)
naLogical <- !is.na(activity$steps)
activityEdited <- activity[naLogical,]
sum(is.na(activity$steps))
activityByDay <- aggregate(activityEdited$steps,list(activityEdited$date), sum)
names(activityByDay) <- c("Date", "Total Steps")
hist(activityByDay$`Total Steps`, main="Total number of steps taken each day")
summary(activityByDay$`Total Steps`)
activityByInterval <- aggregate(activityEdited$steps, list(activityEdited$interval), mean)
head(activityByInterval)
names(activityByInterval) <- c("Interval", "Avg. Steps")
plot(activityByInterval$Interval,activityByInterval$`Avg. Steps`,type = "l",xlab="Interval",ylab="Avg. Steps Taken")
title(main = "Average Steps by interval")
which.max(activityByInterval$`Avg. Steps`)
activityByInterval[104,]
activityByInterval[activityByInterval$`Avg. Steps` == 206.000,]
activityByDayAvg <- aggregate(activityEdited$steps,list(activityEdited$date), mean)
names(activityByDayAvg) <- c("Date", "Mean Steps")
head(activityByDayAvg)
activityMerged <- merge(activity,activityByInterval,by.x = "interval", by.y="Interval")
head(activityMerged)
activityMerged[is.na(activityMerged$steps),2] <- activityMerged[is.na(activityMerged$steps),4]
newActivitySet <- activityMerged
newActivityByDay <- aggregate(newActivitySet$steps,list(newActivitySet$date), sum)
names(newActivityByDay) <- c("Date", "Total Steps")
hist(newActivityByDay$`Total Steps`)
summary(newActivityByDay$`Total Steps`)
newActivitySet <- cbind(newActivitySet,weekdays(as.POSIXlt(newActivitySet$date)),"Weekend")
head(newActivitySet)
names(newActivitySet) <- c("Interval", "Steps","Date","AvgSteps", "WeekDay","DayType")
newActivitySet$DayType <- as.character(newActivitySet$DayType)
newActivitySet[newActivitySet$WeekDay != "Sunday" & newActivitySet$WeekDay !="Saturday", 6] <- "Weekday"
newActivitySet$DayType <- as.factor(newActivitySet$DayType)
newActivitySetByInterval <- aggregate(newActivitySet$Steps, list(newActivitySet$Interval, newActivitySet$DayType), mean)
head(newActivitySetByInterval)
names(newActivitySetByInterval) <- c("Interval", "DayType", "Average")
library(ggplot2)
ggplot(newActivitySetByInterval, mapping = aes(Interval,Average, col = DayType,title='Average Steps by interval by day type')) +
ylab("Average Steps") +
geom_point(size=3)+ geom_smooth(method="lm") + facet_grid(facets = DayType~.)
|
5b1368482adf018bd9a20ca8b8366b4b9ada24d7 | 983fc432d61023469f68452de3e5b1fa3e0dd5d2 | /bayesian_immigration_estimates.R | 53cca831a714b6dd6194342bf8f57c580fdbd127 | [] | no_license | guittarj/MS_TraitsTransplants | 00127715eae477960e50f243362a41d9f4868ab3 | 0f849a0b4e22809637e3287b2da5da9a266f63c6 | refs/heads/master | 2021-06-05T08:24:56.479628 | 2016-10-24T14:21:14 | 2016-10-24T14:21:14 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,751 | r | bayesian_immigration_estimates.R | # A script that estimates immigration rates using JAGS, given the rates of turnover
# we observe in local controls, and the composition of controls at the site level
setwd(wd)
#load packages
loadpax(c("scales","R2jags", "lattice"))
# round cover to nearest unit
cover0 <- round((cover * 100) / rowSums(cover))
# initialize
mvals <- data.frame(site = character(), m = numeric(), rep = integer())
posts <- list()
rhat <- data.frame(deviance = numeric(), m = numeric())
# for each site...
for(site in unique(cover.meta$siteID)) {
# a filter vector for TTCs by site
site.filt <- cover.meta$TTtreat == 'TTC' &
cover.meta$destSiteID == site &
cover.meta$Year %in% c(2011:2013)
# a filter vector for TT1s by site
turf.filt <- cover.meta$TTtreat == 'TT1' &
cover.meta$destSiteID == site &
cover.meta$Year %in% c(2011:2013)
# relative abundances in TT1s by year
N <- as.data.frame(cbind(cover0[turf.filt, ], cover.meta[turf.filt, c('Year','turfID')]))
N <- N %>%
gather(sp, N0, -turfID, -Year) %>%
group_by(turfID, sp) %>%
mutate(N1 = ifelse(Year == 2013, NA,
ifelse(Year == 2012, N0[Year == 2013], N0[Year == 2012])))
turfabun <- N %>%
group_by(turfID, Year) %>%
mutate(abun = N0 / sum(N0))
# matrix of total cover for each turf * year (using TT1s)
commN <- N %>%
group_by(turfID, Year) %>%
summarise(N = sum(N0))
commN <- commN[match(N$turfID, commN$turfID), ]
# Using mean relative abundances in TTCs for local flora source
siteabun <- as.data.frame(cbind(cover0[site.filt, ], cover.meta[site.filt, c('turfID','Year')]))
siteabun <- siteabun %>%
gather(sp, abun, -turfID, -Year) %>%
group_by(turfID, Year) %>%
mutate(abun = abun / sum(abun)) %>%
group_by(sp, Year) %>%
summarize(abun = mean(abun))
siteabun <- siteabun[match(N$sp, siteabun$sp), ]
#filter out situations where the spp aren't in the site flora at all (bc errors)
filt <- siteabun %>%
group_by(sp) %>%
mutate(filt = ifelse(sum(abun == 0) > 0, FALSE, TRUE))
filt <- filt$filt & !is.na(N$N1)
N1 <- N$N1[filt]
commN <- commN$N[filt]
turfabun <- turfabun$abun[filt]
siteabun <- siteabun$abun[filt]
N1P <- N1
# organize into a list
data <- list('N1','commN','turfabun','siteabun')
# Initialize
inits <- lapply(as.list(c(0.1, 0.9, 0.5)), function(x) list(m = x))
reps <- length(inits)
parameters <- c('m', 'N1P')
sink(paste0(wd, "\\model.txt"))
cat("
model {
for (i in 1:length(N1)) {
N1[i] ~ dpois(commN[i] * ((1 - m) * turfabun[i] + m * siteabun[i]))
}
m ~ dunif(0, 1) # nothing changes if m ~ dunif(0, 0.5)
for (i in 1:length(N1)) {
zN1P[i] ~ dpois(commN[i] * ((1 - m) * turfabun[i] + m * siteabun[i]))
N1P[i] <- zN1P[i]
}
}
", fill=TRUE)
sink()
m1 <- jags(data, inits, parameters, "model.txt", n.thin = 50,
n.chains = reps, n.burnin = 1000, n.iter = 10000)
rhat <- rbind(rhat, m1$BUGSoutput$summary[, 'Rhat'])
m1.mcmc <- as.mcmc(m1)
tmp <- sapply(m1.mcmc, function(x) mean(x[, 'm']))
mvals <- rbind(mvals, data.frame(site, m = tmp, rep = 1:length(inits)))
tmp <- as.matrix(m1.mcmc[[2]])[, -c(1,2)]
tmp.ordr <- gsub(pattern = 'N1P\\[', '', colnames(tmp))
tmp.ordr <- as.numeric(gsub(pattern = '\\]', '', tmp.ordr))
tmp <- colMeans(tmp[, match(1:ncol(tmp), tmp.ordr)])
tmp <- data.frame(N1 = N1, N1_predicted = tmp)
posts[[site]] <- tmp
}
rsq <- do.call('rbind', posts)
rsq <- summary(lm(N1 ~ N1_predicted, rsq))$r.squared
rsq
m.bayes <- mvals %>%
group_by(site) %>%
summarise(m = mean(m))
write.csv(m.bayes, file = "data\\m.bayes.csv", row.names = FALSE)
|
fa371db95e9acae1f351dd851861cde21720beb2 | 4bf6efaf2507ad57a4c79ebf90a837d2cae70527 | /man/jonckheere.Rd | e4e5050ea81f4981d1c7ecedba0518d9a1320b73 | [] | no_license | kloke/npsm | c340a40a54cc852fc5c938128ae181e6603838bc | 2d47c5c6352a46eaae2dfbd3ab833ab2b6637805 | refs/heads/master | 2021-12-02T11:27:11.446950 | 2021-11-29T22:22:49 | 2021-11-29T22:22:49 | 25,049,501 | 0 | 4 | null | null | null | null | UTF-8 | R | false | false | 996 | rd | jonckheere.Rd | \name{jonckheere}
\alias{jonckheere}
\title{ Jonckheere's Test for Ordered Alternatives}
\description{
Computes Jonckheere's Test for Ordered Alternatives; see Section 5.6 of Kloke and McKean (2014).
}
\usage{
jonckheere(y, groups)
}
\arguments{
\item{y}{vector of responses}
\item{groups}{vector of associated groups (levels)}
}
\details{
Computes Jonckheere's Test for Ordered Alternatives.
The main source was downloaded from the site:
smtp.biostat.wustl.edu/sympa/biostat/arc/s-news/2000-10/msg00126.html
}
\value{
\item{Jonckheere}{test statistic}
\item{ExpJ}{null expectation}
\item{VarJ}{null variance}
\item{p}{p-value}
}
\references{
Kloke, J. and McKean, J.W. (2014), \emph{Nonparametric statistcal methods using R}, Boca Raton, FL: Chapman-Hall.
smtp.biostat.wustl.edu/sympa/biostat/arc/s-news/2000-10/msg00126.html
}
\author{ John Kloke \email{kloke@biostat.wisc.edu}, Joseph McKean}
\examples{
r<-rnorm(30)
gp<-c(rep(1,10),rep(2,10),rep(3,10))
jonckheere(r,gp)
}
|
837422cfe8047c46efc50fab6b4e7e7b16b8a964 | d2682a3d2004a473456b95019af095397063645e | /man/clean_data.Rd | 627baee6fc6ab9ea6c76719e8683577a45cf969e | [] | no_license | kattaoa/oktennis | dbb6879713d7c5450008f9cb9c63ff1d1c32a083 | 9c0868714d907e650c7694691ad85304bef03247 | refs/heads/master | 2020-06-19T06:58:35.014913 | 2018-12-08T10:11:11 | 2018-12-08T10:11:11 | 160,927,133 | 1 | 1 | null | null | null | null | UTF-8 | R | false | true | 1,957 | rd | clean_data.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data_clean_functions.R
\name{clean_data}
\alias{clean_data}
\title{Cleans data extracted from ATP player stats website}
\usage{
clean_data(x)
}
\arguments{
\item{x}{Data from ATP site}
}
\value{
Cleans the data to have the following columns:
\itemize{
\item \code{name}: First and Last Name
\item \code{rank}: Rank
\item \code{age}: Age and Birth Date
\item \code{pro_start}: Pro_start
\item \code{weight}: Weight
\item \code{height}: Height
\item \code{residence}: Residence
\item \code{hand}: Hand
\item \code{coach}: Coach
\item \code{aces}: Aces
\item \code{df}: Double Faults
\item \code{first_serve}: 1st Serve
\item \code{first_serve_won}: 1st Serve Points Won
\item \code{second_serve_won}: 2nd Serve Points Won
\item \code{bp_faced}: Break Points Faced
\item \code{bp_saved}: Break Points Saved
\item \code{serv_game_played}: Service Games Played
\item \code{serv_game_won}: Service Games Won
\item \code{total_serv_won}: Total Service Points Won
\item \code{first_return}: 1st Serve Return Points Won
\item \code{second_return}: 2nd Serve Return Points Won
\item \code{bp_opp}: Break Points Opportunities
\item \code{bp_conv}: Break Points Converted
\item \code{ret_game_played}: Return Games Played
\item \code{ret_game_won}: Return Games Won
\item \code{ret_won}: Return Points Won
\item \code{total_ret_won}: Total Points Won
}
}
\description{
After having retrieved data from the ATP website, this function cleans the
player stats in a systematic way given that all the information has been
found and is in the correct format.
}
\seealso{
Other web scraping functions: \code{\link{extract_data}},
\code{\link{get_ATP_code}}, \code{\link{get_ATP_url}},
\code{\link{get_plus_name}},
\code{\link{manipulate_data}}
}
\concept{web scraping functions}
|
a05b586ec5bd9a3e55dbbc90a8c1860d2b385ed5 | 2bec5a52ce1fb3266e72f8fbeb5226b025584a16 | /HardyWeinberg/R/HWGenotypePlot.R | 62642f6db163744cf3723301dfc263cce3e18615 | [] | no_license | akhikolla/InformationHouse | 4e45b11df18dee47519e917fcf0a869a77661fce | c0daab1e3f2827fd08aa5c31127fadae3f001948 | refs/heads/master | 2023-02-12T19:00:20.752555 | 2020-12-31T20:59:23 | 2020-12-31T20:59:23 | 325,589,503 | 9 | 2 | null | null | null | null | UTF-8 | R | false | false | 1,518 | r | HWGenotypePlot.R | HWGenotypePlot <- function(X,plottype=1,xlab=expression(f[AA]),ylab=ifelse(plottype==1,expression(f[AB]),
expression(f[BB])),asp=1,pch=19,xlim=c(0,1),ylim=c(0,1),cex=1,cex.axis=2,cex.lab=2,...) {
# Makes a scatter plot of genotype frequencies.
if (is.vector(X)) {
if (length(X) != 3) {
stop("X must have three elements")
}
else {
X <- matrix(X, ncol = 3, dimnames = list(c("1"),
names(X)))
}
}
nr <- nrow(X)
nc <- ncol(X)
if (any(X < 0))
stop("X must be non-negative")
if (nc != 3)
stop("X must have three columns")
if (nrow(X) == 1) {
Xcom <- X/sum(X)
} else {
Xcom <- HWClo(X)
}
fAA <- seq(0,1,by=0.01)
fAB <- 2*(sqrt(fAA)-fAA)
fBB <- (1-sqrt(fAA))^2
if(is.element(plottype,c(1,2))) {
opar <- par(mar=c(5,5,2,1))
if(plottype==1) { # heterozygote versus homozygote
plot(Xcom[,1],Xcom[,2],pch=pch,xlim=xlim,ylim=ylim,xlab=xlab,ylab=ylab,cex=cex,cex.axis=cex.axis,cex.lab=cex.lab,...)
lines(c(0,1),c(1,0),lwd=2,col="red")
points(fAA,fAB,pch=19,col="blue",type="l",lwd=2)
}
if(plottype==2) { # homozygote versus homozygote
plot(Xcom[,1],Xcom[,3],pch=pch,xlim=xlim,ylim=ylim,xlab=xlab,ylab=ylab,cex=cex,cex.axis=cex.axis,cex.lab=cex.lab,...)
lines(c(0,1),c(1,0),lwd=2,col="red")
points(fAA,fBB,pch=19,col="blue",type="l",lwd=2)
}
par(opar)
} else stop("HWGenotypePlot: invalid argument for plottype")
return(NULL)
}
|
d2148ba2e5057315fc0336ca653d09d9d5cdaae1 | 9ca1c15ff4731aa0abcf8197f7c6e496db4ea9fa | /man/MetaQC.Rd | 33a734d67161de22e1426943925b5690c8d6c5d3 | [] | no_license | donkang75/MetaQC | f1337b35ab2c91443ba6500cd1e5111658a768f4 | 854fc1cb4098e85ef7e8a8fb98a444fbe26265eb | refs/heads/master | 2020-04-10T22:47:57.561805 | 2013-02-22T04:42:10 | 2013-02-22T04:42:10 | 1,641,558 | 4 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,830 | rd | MetaQC.Rd | \name{MetaQC}
\alias{MetaQC}
\title{
MetaQC: Objective Quality Control and Inclusion/Exclusion Criteria for Genomic Meta-Analysis
}
\description{
MetaQC implements our proposed quantitative quality control measures: (1) internal homogeneity of co-expression structure among studies (internal quality control; IQC); (2) external consistency of co-expression structure correlating with pathway database (external quality control; EQC); (3) accuracy of differentially expressed gene detection (accuracy quality control; AQCg) or pathway identification (AQCp); (4) consistency of differential expression ranking in genes (consistency quality control; CQCg) or pathways (CQCp). (See the reference for detailed explanation.)
For each quality control index, the p-values from statistical hypothesis testing are minus log transformed and PCA biplots were applied to assist visualization and decision. Results generate systematic suggestions to exclude problematic studies in microarray meta-analysis and potentially can be extended to GWAS or other types of genomic meta-analysis. The identified problematic studies can be scrutinized to identify technical and biological causes (e.g. sample size, platform, tissue collection, preprocessing etc) of their bad quality or irreproducibility for final inclusion/exclusion decision.
}
\usage{
MetaQC(DList, GList, isParallel = FALSE, nCores = NULL,
useCache = TRUE, filterGenes = TRUE,
maxNApctAllowed=.3, cutRatioByMean=.4, cutRatioByVar=.4, minNumGenes=5,
verbose = FALSE, resp.type = c("Twoclass", "Multiclass", "Survival"))
}
\arguments{
\item{DList}{
Either a list of all data matrices (Case 1) or a list of lists (Case 2); The first case is simplified input data structure only for two classes comparison. Each data name should be set as the name of each list element. Each data should be a numeric matrix that has genes in the rows and samples in the columns. Row names should be official gene symbols and column names be class labels. For the full description of input data, you can use the second data format. Each data is represented as a list which should have x, y, and geneid (geneid can be replaced to row names of matrix x) elements, representing expression data, outcome or class labels, and gene ids, respectively. Additionally, in the survival analysis, censoring.status should be set.
}
\item{GList}{
The location of a file which has sets of gene symbol lists such as gmt files. By default, the gmt file will be converted to list object and saved with the same name with ".rda". Alternatively, a list of gene sets is allowed; the name of each element of the list should be set as a unique pathway name, and each pathway should have a character vector of gene symbols.
}
\item{isParallel}{
Whether to use multiple cores in parallel for fast computing. By default, it is false.
}
\item{nCores}{
When isParallel is true, the number of cores can be set. By default, all cores in the machine are used in the unix-like machine, and 2 cores are used in windows.
}
\item{useCache}{
Whether imported gmt file should be saved for the next use. By default, it is true.
}
\item{filterGenes}{
Whether to use gene filtering (recommended).
}
\item{maxNApctAllowed}{
Filtering out genes which have missing values more than specified ratio (Default .3). Applied if filterGenes is TRUE.
}
\item{cutRatioByMean}{
Filtering out specified ratio of genes which have least expression value (Default .4). Applied if filterGenes is TRUE.
}
\item{cutRatioByVar}{
Filtering out specified ratio of genes which have least sample wise expression variance (Default .4). Applied if filterGenes is TRUE.
}
\item{minNumGenes}{
Mininum number of genes in a pathway. A pathway which has members smaller than the specified value will be removed.
}
\item{verbose}{
Whether to print out logs.
}
\item{resp.type}{
The type of response variable. Three options are: "Twoclass" (unpaired), "Multiclass", "Survival." By default, Twoclass is used
}
}
\value{
A proto R object.
Use RunQC function to run QC procedure.
Use Plot function to plot PCA figure.
Use Print function to view various information.
See examples below.
}
\references{
Dongwan D. Kang, Etienne Sibille, Naftali Kaminski, and George C. Tseng. (Nucleic Acids Res. 2012) MetaQC: Objective Quality Control and Inclusion/Exclusion Criteria for Genomic Meta-Analysis.
}
\author{
Don Kang (donkang75@gmail.com) and George Tseng (ctseng@pitt.edu)
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
\code{\link{runQC}}
}
\examples{
\dontrun{
requireAll(c("proto", "foreach"))
## Toy Example
data(brain) #already hugely filtered
#Two default gmt files are automatically downloaded,
#otherwise it is required to locate it correctly.
#Refer to http://www.broadinstitute.org/gsea/downloads.jsp
brainQC <- MetaQC(brain, "c2.cp.biocarta.v3.0.symbols.gmt",
filterGenes=FALSE, verbose=TRUE)
#B is recommended to be >= 1e4 in real application
runQC(brainQC, B=1e2, fileForCQCp="c2.all.v3.0.symbols.gmt")
brainQC
plot(brainQC)
## For parallel computation with only 2 cores
## R >= 2.14.0 in windows to use parallel computing
brainQC <- MetaQC(brain, "c2.cp.biocarta.v3.0.symbols.gmt",
filterGenes=FALSE, verbose=TRUE, isParallel=TRUE, nCores=2)
#B is recommended to be >= 1e4 in real application
runQC(brainQC, B=1e2, fileForCQCp="c2.all.v3.0.symbols.gmt")
plot(brainQC)
## For parallel computation with half cores
## In windows, only 3 cores are used if not specified explicitly
brainQC <- MetaQC(brain, "c2.cp.biocarta.v3.0.symbols.gmt",
filterGenes=FALSE, verbose=TRUE, isParallel=TRUE)
#B is recommended to be >= 1e4 in real application
runQC(brainQC, B=1e2, fileForCQCp="c2.all.v3.0.symbols.gmt")
plot(brainQC)
## Real Example which is used in the paper
#download the brainFull file
#from https://github.com/downloads/donkang75/MetaQC/brainFull.rda
load("brainFull.rda")
brainQC <- MetaQC(brainFull, "c2.cp.biocarta.v3.0.symbols.gmt", filterGenes=TRUE,
verbose=TRUE, isParallel=TRUE)
runQC(brainQC, B=1e4, fileForCQCp="c2.all.v3.0.symbols.gmt") #B was 1e5 in the paper
plot(brainQC)
## Survival Data Example
#download Breast data
#from https://github.com/downloads/donkang75/MetaQC/Breast.rda
load("Breast.rda")
breastQC <- MetaQC(Breast, "c2.cp.biocarta.v3.0.symbols.gmt", filterGenes=FALSE,
verbose=TRUE, isParallel=TRUE, resp.type="Survival")
runQC(breastQC, B=1e4, fileForCQCp="c2.all.v3.0.symbols.gmt")
breastQC
plot(breastQC)
}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ QualityControl }
\keyword{ MetaAnalysis }% __ONLY ONE__ keyword per line
\keyword{ Microarray }
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.