blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 327 | content_id stringlengths 40 40 | detected_licenses listlengths 0 91 | license_type stringclasses 2 values | repo_name stringlengths 5 134 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 46 values | visit_date timestamp[us]date 2016-08-02 22:44:29 2023-09-06 08:39:28 | revision_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | committer_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | github_id int64 19.4k 671M ⌀ | star_events_count int64 0 40k | fork_events_count int64 0 32.4k | gha_license_id stringclasses 14 values | gha_event_created_at timestamp[us]date 2012-06-21 16:39:19 2023-09-14 21:52:42 ⌀ | gha_created_at timestamp[us]date 2008-05-25 01:21:32 2023-06-28 13:19:12 ⌀ | gha_language stringclasses 60 values | src_encoding stringclasses 24 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 7 9.18M | extension stringclasses 20 values | filename stringlengths 1 141 | content stringlengths 7 9.18M |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7ffd81a943ff7ad4696848ec796fcb34fdd9768e | 28583c4f03ab95aa8396fd35aa8e2a7f47ebf712 | /man/test_package.Rd | 13483bd733120d2ced55d8a205490cdd8ab588bd | [] | no_license | vishalbelsare/autotest | 3f2b0ee5f767722f6dec19239538c9fb9dc73e48 | b9aa8528088a2c5faa56fc38754138cde56b74dc | refs/heads/master | 2020-03-27T02:39:46.694035 | 2018-04-06T03:47:22 | 2018-04-06T03:47:22 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,307 | rd | test_package.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/test-package.R
\name{test_package}
\alias{test_package}
\alias{test_check}
\title{Run all tests in an installed package.}
\usage{
test_package(package, filter = NULL, reporter = default_reporter(), ...)
test_check(package, filter = NULL, reporter = "check", ...)
}
\arguments{
\item{package}{package name}
\item{filter}{If not \code{NULL}, only tests with file names matching this
regular expression will be executed. Matching will take on the file
name after it has been stripped of \code{"test-"} and \code{".R"}.}
\item{reporter}{reporter to use}
\item{...}{Additional arguments passed to \code{\link[=grepl]{grepl()}} to control filtering.}
}
\value{
the results as a "autotest_results" (list)
}
\description{
Test are run in an environment that inherits from the package's namespace
environment, so that tests can access non-exported functions and variables.
Tests should be placed in \code{tests/autotest}. Use \code{test_check()} with
\code{R CMD check} and \code{test_package()} interactively at the console.
}
\section{R CMD check}{
Create \code{tests/autotest.R} that contains:
\preformatted{
library(autotest)
library(yourpackage)
test_check("yourpackage")
}
}
\examples{
\dontrun{test_package("autotest")}
}
|
c6633e0477735200bcdf7094e9ec331e8b5f786d | b5f8227ec8d3fa529551fd45d6123baedc099d1d | /R/filters.R | 06cd6d52855d24120f213a05c954bbf071103e78 | [] | no_license | alb202/PACER | 9440ae37afda9abcd6441fcf584e76dfcca7bdd6 | b6407405347b8561bcf5642f0ba118ed1cc1925f | refs/heads/master | 2021-01-25T11:27:27.787253 | 2018-06-09T00:57:06 | 2018-06-09T00:57:06 | 93,922,655 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 8,997 | r | filters.R | filter_unique_positions <- function(gr){
gr <- sort.GenomicRanges(gr)
return(sort.GenomicRanges(gr[!(GenomicRanges::duplicated.GenomicRanges(x = gr))]))
}
filter_by_gene <- function(gr, gene_list, invert=FALSE){
### If the gene list is empty, just return the full set of alignments
if(length(gene_list)==1 & gene_list[1]==""){
return(gr)
}
### Check if the gene list are ensembl IDs or external IDs
if(isTRUE(is_ensembl_gene_name(gr = gr, gene_list = gene_list))){
# If the gene list is Ensembl, use the Ensembl ID for comparison
matches <- mcols(gr)$ensembl_gene_id %in% gene_list
} else {
# If the gene list is not Ensembl, use the external ID for comparison
matches <- mcols(gr)$external_gene_name %in% gene_list
}
### If you want genes that are not in the list, invert the matching index
if(isTRUE(invert))
matches <- !matches
return(gr[matches])
}
is_ensembl_gene_name <- function(gr, gene_list){
ensembl_matches <- gene_list %in% mcols(gr)$ensembl_gene_id
external_matches <- gene_list %in% mcols(gr)$external_gene_name
if(sum(ensembl_matches)>=sum(external_matches)){
return(TRUE)
} else{
return(FALSE)
}
}
assign_5prime_to_a_length <- function(gr, primary_length){
# Filter alignments by strand
pos <- gr[strand(gr)=="+"]
neg <- gr[strand(gr)=="-"]
# Filter the primary length alignments
pos_primary <- pos[width(pos)==primary_length]
neg_primary <- neg[width(neg)==primary_length]
# Filter the secondary length alignments
pos_secondary <- pos[width(pos)!=primary_length]
neg_secondary <- neg[width(neg)!=primary_length]
# Filter secondary lengths by overlap
pos_secondary_filtered <- pos_secondary[-subjectHits(findOverlaps(query = pos_primary, subject = pos_secondary, minoverlap = 1, type = "start", select = "all"))]
neg_secondary_filtered <- neg_secondary[-subjectHits(findOverlaps(query = neg_primary, subject = neg_secondary, minoverlap = 1, type = "end", select = "all"))]
# Combine the primary length alignments with the filtered secondary length alignments, sort and return
return(sort.GenomicRanges(c(pos_primary, neg_primary, pos_secondary_filtered, neg_secondary_filtered)))
}
#
# assign_5prime_to_longer_slower <- function(gr){
# chromosomes <- sort(unique.Vector(seqnames(gr)))
# lengths <- sort(unique.Vector(width(gr)), decreasing = TRUE)
# results <- GAlignments()
# for(j in 1:length(chromosomes)){
# #print(chromosomes[j])
# # Filter alignments by strand
# pos <- gr[strand(gr)=="+" & seqnames(gr)==chromosomes[j]]
# neg <- gr[strand(gr)=="-" & seqnames(gr)==chromosomes[j]]
# #results <- c(gr[width(gr)==lengths[1]])
# for (i in 1:(length(lengths))){
# #print(lengths[i])
# #print(length(pos))
# #print(length(neg))
# # Filter the primary length alignments
# pos_primary <- pos[width(pos)==lengths[i]]
# neg_primary <- neg[width(neg)==lengths[i]]
# #print(length(pos_primary))
# #print(length(neg_primary))
# # Remove the alignments that share a 5' end with a primary alignment
# pos <- pos[!start(pos) %in% as.vector(start(pos_primary))]
# neg <- neg[!end(neg) %in% as.vector(end(neg_primary))]
# # Concatenate, sort and return results
# results <- c(results, pos_primary, neg_primary)
# }
# }
# #print(class(results))
# return(sort.GenomicRanges(results))
# }
assign_5prime_to_longer <- function(gr){
lengths <- sort(unique.Vector(width(gr)), decreasing = TRUE)
results <- c(gr[width(gr)==lengths[1]])
for (i in 1:(length(lengths)-1)){
#higher <- alignments[qwidth(alignments) %in% lengths[1:i]]
lower <- gr[width(gr) %in% lengths[(i+1)]]
#positive <- results[strand(results)=="+"]
#negative <- results[strand(results)=="-"]
positive_results <- findOverlaps(query = lower,
subject = results[strand(results)=="+"],
type = "start",
select = "first",
ignore.strand=FALSE)
negative_results <- findOverlaps(query = lower,
subject = results[strand(results)=="-"],
type = "end",
select = "first",
ignore.strand=FALSE)
positive_results <- replace(x = positive_results, !is.na(positive_results), FALSE)
positive_results <- replace(x = positive_results, is.na(positive_results), TRUE)
negative_results <- replace(x = negative_results, !is.na(negative_results), FALSE)
negative_results <- replace(x = negative_results, is.na(negative_results), TRUE)
#full_results <- positive_results & negative_results
results <- c(results, lower[positive_results & negative_results])
}
return(sort.GenomicRanges(results))
}
filter_alignments_by_size <- function(alignments, minimum=10, maximum=30){
# results <- alignments[qwidth(alignments)>=minimum & qwidth(alignments)<=maximum]
return(sort.GenomicRanges(alignments[qwidth(alignments)>=minimum & qwidth(alignments)<=maximum]))
}
filter_BAM_tags <- function(gr){
# Get the index for alignments with no mismatches
no_mismatches_index <- mcols(gr)$NM==0
# Get the index for alignments with up to 2 mismatches
two_mismatches_index <- mcols(gr)$NM<=2
# Get the index for alignments with no mismatches in the first 22 bases
MD_split <- strsplit(mcols(gr)$MD, split = "[A-Z]")
setA <- ifelse(as.numeric(mcols(gr)$NM)<=0, TRUE, FALSE)
setB <- ifelse(strand(gr)=="+"&unlist(lapply(X = MD_split, FUN = function(x) as.numeric(x[[1]][1])>=22)), TRUE, FALSE)
setC <- ifelse(strand(gr)=="-"&unlist(lapply(X = MD_split, FUN = function(x) as.numeric(x[[length(x)]][1])>=22)), TRUE, FALSE)
no_mismatches_in_seed_index <- setA | setB | setC
return(list(no_mm=no_mismatches_index, two_mm=two_mismatches_index, no_mm_seed=no_mismatches_in_seed_index))
}
#
# filter_MD_tag <- function(strand, NM, MD){
# if(NM==0)
# return(TRUE)
# MD_split <- strsplit(x = MD, split = "[A-Z]")
# # return(filter_MD_tags3(strand = strand, MD = MD_split))
# if(strand=="-")
# MD_split <- rev(MD_split[[1]])
# if((as.numeric(MD_split[[1]][1])>=as.numeric(22)))
# return(TRUE)
# else
# return(FALSE)
# MD <- strsplit(x = mcols(gr)$MD, split = "[A-Z]")
# ((strand(gr)=="+" & unlist(lapply(X = MD, FUN = function(x) as.numeric(x[[1]])))>=22) |
# (strand(gr)=="-" & unlist(lapply(X = rev(MD), FUN = function(x) as.numeric(x[[1]])))>=22) | )
#else()
# # return(FALSE)
# }
filter_by_metadata <- function(target, source, column){
matches <- mcols(target)[,column] %in% mcols(source)[,column]
results <- target[matches]
return(sort.GenomicRanges(results))
}
filter_by_regions <- function(gr, regions, type=c("both", "sense", "antisense"), invert=FALSE){
if (type=="both") {
results <- subsetByOverlaps(query = gr, subject = regions, invert = invert, ignore.strand=TRUE)
}
if (type=="sense") {
results <- subsetByOverlaps(query = gr, subject = regions, invert = invert, ignore.strand=FALSE)
}
if (type=="antisense") {
strand(regions) <- invert_vector(as.character(strand(regions)))
results <- subsetByOverlaps(query = gr, subject = regions, invert = invert, ignore.strand=FALSE)
}
return(sort.GenomicRanges(results))
}
filter_RNA_from_intervals <- function(gr){
results <- subset(x = gr,
gene_biotype!="snoRNA" &
gene_biotype!="miRNA" &
gene_biotype!="rRNA" &
gene_biotype!="tRNA" &
gene_biotype!="snRNA")
return(sort.GenomicRanges(results))
}
remove_overrepresented_sequences <- function(alignments, cutoff=0.001){
counts <- rle(sort(as.character(mcols(alignments)$seq)))
counts_df <- data.frame(values=counts$values, lengths=counts$lengths)
overreppresented <- subset(counts_df, lengths>(cutoff*length(alignments)))
'%nin%' <- Negate('%in%')
results <- subset(alignments, seq %nin% overreppresented$values)
return(sort.GenomicRanges(results))
}
subsample_gr <- function(gr, size){
return(sort.GenomicRanges(gr[sample(x = 1:length(gr), size = size, replace = FALSE)]))
}
filter_ambiguous_bases <- function(seqs){
return(!unlist(mclapply(X = as.character(seqs),
FUN = function(x) grepl(pattern = "R|Y|S|W|K|M|B|D|H|V|N|\\.",
x = x,
ignore.case = TRUE,
fixed = FALSE))))
}
# Test the 5' filter
# table(start(two_mm_5prime_filtered[width(two_mm_5prime_filtered)!=22 & strand(two_mm_5prime_filtered)=="+" &
# seqnames(two_mm_5prime_filtered)=="I"]) %in% start(two_mm_5prime_filtered[width(two_mm_5prime_filtered)==22 &
# strand(two_mm_5prime_filtered)=="+" & seqnames(two_mm_5prime_filtered)=="I"]))
|
bb7dcc7c3d100b6e45fd60d39bbce585c46bd9e7 | 6ce79966b1b89de1a6d6eb29cea945188c18652c | /man/delta.sumqlogdetK.Rd | 381b5827d03902dbacb86a614231b958801bc946 | [] | no_license | feng-li/movingknots | d3041a0998f0873459814a09e413c714fff700c6 | 5f921070e4cd160a831c5191255f88dd7d4c850c | refs/heads/master | 2021-06-10T00:18:57.172246 | 2021-03-22T05:56:44 | 2021-03-22T05:56:44 | 145,708,629 | 4 | 3 | null | null | null | null | UTF-8 | R | false | true | 512 | rd | delta.sumqlogdetK.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/models__linear__etc__delta.sumqlogdetK.R
\name{delta.sumqlogdetK}
\alias{delta.sumqlogdetK}
\title{gradient for sum q_i log|K_i|}
\usage{
delta.sumqlogdetK(q.i, diag.K)
}
\arguments{
\item{q.i}{NA}
\item{diag.K}{NA}
\item{args}{"list" args$subset: the subsets of the diagonal K matrix.}
}
\value{
"matrix" 1-by-
}
\description{
Details
}
\references{
NA
}
\author{
Feng Li, Department of Statistics, Stockholm University, Sweden.
}
|
95bd1504922515708d53d012c4e6a10d0926ac2e | 0f48862850cbe23f93ff6a2dbb95337aa0fadb94 | /tests/testthat/test_samtools_pileup.R | ad5516388e0133e6a7817a9eb46c5f32670636b3 | [] | no_license | iansealy/MMAPPR2 | 3d9f52f53864dbff5fcbe5ba0165bd572579f526 | 06ef8906ba70d650af588cc2ba845d9068746259 | refs/heads/master | 2022-07-20T07:35:42.544745 | 2019-09-30T16:50:45 | 2019-09-30T16:50:45 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,977 | r | test_samtools_pileup.R | context('Samtools Pileup')
test_that("Simple pileup works", {
skip_if_not_installed('mockery')
output <- paste(
'1 1 A 5 ....C AAAAA',
'1 2 A 5 ...C. AAAAA',
'1 3 A 5 ..C.. AAAAA',
sep='\n'
)
mockOutput <- mockery::stub(.samtoolsPileup, 'system2', output)
files <- Rsamtools::BamFileList(c(Rsamtools::BamFile("f")))
pileup <- .samtoolsPileup(files, new('MmapprParam', minDepth=0), new("GRanges"))
expect_true(all(names(pileup) %in% c('nucleotide', 'pos', 'f')))
expect_equal(nrow(pileup), 15)
expect_equal(as.character(pileup$nucleotide[1:5]), c('A', 'C', 'G', 'T', 'cvg'))
})
test_that("Pileup filters by depth", {
skip_if_not_installed('mockery')
output <- paste(
'1 1 A 5 ....C AAAAA',
'1 2 A 5 ...C. AAAAA',
'1 3 A 6 ..C... AAAAAA',
sep='\n'
)
mockOutput <- mockery::stub(.samtoolsPileup, 'system2', output)
files <- Rsamtools::BamFileList(c(Rsamtools::BamFile("f")))
pileup <- .samtoolsPileup(files, new('MmapprParam', minDepth=6), new("GRanges"))
expect_true(all(names(pileup) %in% c('nucleotide', 'pos', 'f')))
expect_equal(nrow(pileup), 5)
expect_equal(as.character(pileup$nucleotide[1:5]), c('A', 'C', 'G', 'T', 'cvg'))
})
test_that("Pileup for multiple files is read correctly", {
skip_if_not_installed('mockery')
output <- paste(
'1 1 A 5 ....C AAAAA',
'1 2 A 5 ...C. AAAAA',
'1 3 A 5 ..C.. AAAAA',
sep='\n'
)
mockOutput <- mockery::mock(output, output)
mockery::stub(.samtoolsPileup, 'system2', mockOutput)
files <- Rsamtools::BamFileList(c(Rsamtools::BamFile("f1"), Rsamtools::BamFile("f2")))
pileup <- .samtoolsPileup(files, new('MmapprParam', minDepth=5), new("GRanges"))
expect_true(all(names(pileup) %in% c('nucleotide', 'pos', 'f1', 'f2')))
expect_equal(nrow(pileup), 15)
expect_equal(as.character(pileup$nucleotide[1:5]), c('A', 'C', 'G', 'T', 'cvg'))
mockery::expect_called(mockOutput, 2)
})
test_that("Depth filter leaves NAs with outer join", {
skip_if_not_installed('mockery')
output1 <- paste(
'1 1 A 4 .... AAAA',
'1 2 A 5 ...C. AAAAA',
'1 3 A 4 ..C. AAAA',
sep='\n'
)
output2 <- paste(
'1 1 A 5 ....C AAAAA',
'1 2 A 4 ...C AAAA',
'1 3 A 4 ..C. AAAA',
sep='\n'
)
mockOutput <- mockery::mock(output1, output2)
mockery::stub(.samtoolsPileup, 'system2', mockOutput)
files <- Rsamtools::BamFileList(c(Rsamtools::BamFile("f1"), Rsamtools::BamFile("f2")))
pileup <- .samtoolsPileup(files, new('MmapprParam', minDepth=5), new("GRanges"))
expect_true(all(names(pileup) %in% c('nucleotide', 'pos', 'f1', 'f2')))
expect_equal(nrow(pileup), 10)
expect_equal(as.character(pileup$nucleotide[1:5]), c('A', 'C', 'G', 'T', 'cvg'))
expect_equal(sum(pileup$pos == 1), 5)
expect_equal(sum(pileup$pos == 2), 5)
expect_equal(as.numeric(pileup$f1[pileup$pos == 1]), rep(as.numeric(NA), times=5))
expect_equal(as.numeric(pileup$f2[pileup$pos == 2]), rep(as.numeric(NA), times=5))
mockery::expect_called(mockOutput, 2)
mockery::stub(.calcDistForChr, '.samtoolsPileup', mockery::mock(pileup, pileup))
})
test_that("Bad data throws error", {
skip_if_not_installed('mockery')
output <- paste(
'1 1 A 5 $$%^#Q ....C AAAAA',
'1 2 3sdfsd 5 ...C. AAAAA',
'Z 3 GARBAGE..C.. AAAAA',
sep='\n'
)
mockOutput <- mockery::stub(.samtoolsPileup, 'system2', output)
expect_error(samtoolsPileup(
Rsamtools::BamFile("f"), new('MmapprParam', minDepth=0), new("GRanges"))
)
}) |
bb0125bd3d4d61e2ee75ab2a3880fcdd34bf011a | 02754f51d7970c6c76097084e8fa1a75bd7bf8dc | /week4-cluster-sum2/utils.R | 53cb2cee6a36047baf7e950c8a6d9d8d70d789bf | [] | no_license | tutrunghieu/html2015b | eea165eaddc2953ae7097446c393e5433307febd | 10e0933d4d5d7b5fd5a4348ac90929eb3ffbad85 | refs/heads/master | 2016-09-03T07:01:02.903848 | 2015-10-17T15:11:01 | 2015-10-17T15:11:01 | 42,796,729 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,287 | r | utils.R |
eval <- function(X, L, C)
{
n <- nrow(X);
e <- rep(NA, n);
for(k in 1:n)
{
Xk <- X[k, ];
Ck <- C[L[k], ];
e[k] <- mean( abs( Xk - Ck ) );
}
return( sum(e) );
}
colorAvg <- function(img)
{
r <- mean(img[,,1]);
g <- mean(img[,,2]);
b <- mean(img[,,3]);
return( c(r, g, b) );
}
colorAvg2 <- function(img)
{
mr = nrow(img);
mc = ncol(img);
mr <- mr / 2;
mc <- mc / 2;
a1 <- img[1:mr, 1:mc, ];
a2 <- img[1:mr + mr, 1:mc, ];
a3 <- img[1:mr, 1:mc + mc, ];
a4 <- img[1:mr + mr, 1:mc + mc, ];
v1 <- colorAvg(a1);
v2 <- colorAvg(a2);
v3 <- colorAvg(a3);
v4 <- colorAvg(a4);
m <- rbind(v1, v2, v3, v4);
return(m);
}
colorAvg3 <- function(img)
{
mr = nrow(img);
mc = ncol(img);
mr <- mr / 2;
mc <- mc / 2;
a1 <- img[1:mr, 1:mc, ];
a2 <- img[1:mr + mr, 1:mc, ];
a3 <- img[1:mr, 1:mc + mc, ];
a4 <- img[1:mr + mr, 1:mc + mc, ];
v1 <- colorAvg2(a1);
v2 <- colorAvg2(a2);
v3 <- colorAvg2(a3);
v4 <- colorAvg2(a4);
m <- rbind(v1, v2, v3, v4);
return(m);
}
colorAvg123 <- function(img)
{
img <- readJPEG(img);
m1 <- colorAvg(img);
m2 <- colorAvg2(img);
m3 <- colorAvg3(img);
m <- rbind(m1, m2, m3);
return( as.vector(m) );
}
|
aaaaefd8f168e400789a82da0e59fbcc087509c6 | 7f7b6c972c584b10bfb41e16d585140461619936 | /StaticDynamicStreamMapping/NHD_compare.R | b92b6249ba988ba3399f2ff4dfa16fa2ccb836c0 | [] | no_license | khafen74/ProjectAnalysisFiguresR | 288c291a122f34bfc17b2f05b774554f0ec9b88b | 60f375b75af3faa2c5f44e659923d179a32ffc56 | refs/heads/master | 2021-01-02T22:43:25.755166 | 2017-11-03T11:12:29 | 2017-11-03T11:12:29 | 99,379,837 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 744 | r | NHD_compare.R | # Do setup ----------------------------------------------------------------
rm(list=ls())
setwd("C:\\konrad\\USGS\\PROSPER_NHD\\data\\csv")
fn <- "MR_HR_join.csv"
dat <- read.csv(fn)
# Differences between MR and HR classifications ---------------------------
dat$perInt <- ifelse((dat$FCODE==46006 | dat$FCODE==55800)&
(dat$FCODE_1==46003 | dat$FCODE_1==46007), 1, 0)
dat$intPer <- ifelse((dat$FCODE==46003 | dat$FCODE==46007)&
(dat$FCODE_1==46006 | dat$FCODE_1==55800), 1, 0)
datsub <- subset(dat, dat$Month > 7 & dat$Month <11)
# Calculate classification conversions (MR ---> HR) -----------------------
perInt <- sum(datsub$perInt)/nrow(datsub)
intPer <- sum(datsub$intPer)/nrow(datsub)
|
93456ea0db79bd10b13b7aea16d26954d67f9a16 | 4d07a036ce45095d2cb210400886f3dd4accaa95 | /MetaboAnalyst/src/main/webapp/resources/rscripts/metaboanalystr/correlation_ml.R | 4755a21b21cf2d28d599347769ec07f91ef5866c | [] | no_license | Sam-Stuart/Wegan | 15d293a997885f38dfc19c73a22333637f3b4c64 | 67325369d0e41750f6c7a2a663fae0f9f86318de | refs/heads/master | 2023-05-11T20:18:11.212834 | 2022-09-12T16:08:25 | 2022-09-12T16:08:25 | 254,185,020 | 3 | 2 | null | 2023-05-10T21:46:26 | 2020-04-08T19:45:57 | R | UTF-8 | R | false | false | 13,901 | r | correlation_ml.R | #'Perform Machine Learning Regression'
#'@description Build a linear regression model for one user selected predictor variable
#'@usage reg.machine.anal(mSetObj=NA, method=method)
#'@param mSetObj Input the name of the created mSetObj
#'@param method Set ML regression method, default is random forest
#'@author Louisa Normington\email{normingt@ualberta.ca}
#'University of Alberta, Canada
#'License: GNU GPL (>= 2)
#'@export
ml.reg.anal <- function(mSetObj=NA,
method="random forest",
data="false"
) {
#install.packages(c("e1071", "randomForest"))
library("e1071")
library("randomForest")
library("Metrics")
mSetObj <- .get.mSet(mSetObj)
### SET DATA (whether to use original data or not)
if (data == "false") {
input <- mSetObj$dataSet$norm #default use norm
} else {
input <- mSetObj$dataSet$orig
}
#Text should be visable to user
AddErrMsg("The first column will be the response variable. The remaining columns will be the predictor variables.")
AddErrMsg("Response variable must be numeric for machine regression analysis. Predictor variables can be numeric or categorical.")
AddErrMsg("For categorical variables, make sure to use characters for the levels and not numbers. For example, if you have levels 1, 2 and 3, change the level labels to I, II and III.")
#Generate test and train data for model building
set.seed(37)
index <- sample(1:nrow(input), 0.7*nrow(input))
train_data <- input[index,]
test_data <- input[-index,]
predictors_train <- model.matrix(train_data[,1] ~ ., train_data)[,-1] # Train predictor variables, creating dummy variables for categorical variables
predictors_test <- model.matrix(test_data[,1] ~ ., test_data)[,-1] # Test predictor variables, creating dummy variables for categorical variables
response_train_name <- colnames(input)[1] #response_train variable name
predictors_train_name <- colnames(predictors_train)[-1] #response_train variable name
#Text should be visable to user
cat("The train data for model building is 70% of the dataset, while the test data for model testing is 30% of the dataset.")
#Generate formula
formula <- as.formula(paste(response_train_name, "~", paste(predictors_train_name, collapse = "+")))
if (method == "SVM") {
#Build model
model <- e1071::tune(e1071::svm, formula, data = as.data.frame(predictors_train), ranges = list(epsilon = seq(0,1,0.1), cost = 2^(seq(0.5,8,.5))))
tunedModel <- model$best.model
model_name <- "SVM Regression"
#Extract predicted values
prediction <- predict(tunedModel, newdata = as.matrix(predictors_test)) #Need to create loop for when family="multinomial"
#Store results for plotting
mSetObj$analSet$svmReg$meth <- model_name
mSetObj$analSet$svmReg$pred <- prediction
mSetObj$analSet$svmReg$test <- test_data
#Generate and download summary of parameter testing and write to txt document
summary <- summary(tunedModel)
residuals <- residuals(tunedModel)
decision_values <- tunedModel[["decision.values"]]
fitted <- predict(tunedModel)
svm_RMSE <- Metrics::rmse(predictors_train[,1], fitted)
fileName <- "ML_regression_summary.txt"#"SVM_regression_summary.txt"
#Store results
mSetObj$analSet$svmReg$res <- list(summary = summary, predicted.values = fitted, residuals = residuals, decision.values = decision_values, RSME = svm_RMSE, fileName = fileName)
mSetObj$analSet$svmReg$mod <- list(model_name = model_name, model = model, response = response_train_name, predictor = predictors_train_name)
#Download text document containing the results, called the fileName. Document goes into the working directory and should be accessible to the user as part of the report.
sink(fileName)
cat("Formula:\n")
print(formula)
# cat("\nReference category:\n")
# cat(paste0(reference))
print(summary)
cat("Residuals:\n")
print(residuals)
cat("\nDecision values:\n")
print(decision_values)
cat("\nPredicted values:\n")
print(fitted)
cat("\nRMSE:\n")
cat(paste0(svm_RMSE))
sink()
} else { #Method is random forest
#Build model
model <- randomForest::tuneRF(y = train_data[,1], x = predictors_train[,-1], ntreeTry = 500, stepFactor = 2, improve = 0.05, trace = TRUE, doBest = TRUE, plot = FALSE, importance = TRUE)
model_name <- "Random Forest Regression"
#Extract predicted values
prediction <- predict(model, newdata = as.matrix(predictors_test)) #Need to create loop for when family="multinomial"
#Store results for plotting
mSetObj$analSet$rfReg$meth <- model_name
mSetObj$analSet$rfReg$pred <- prediction
mSetObj$analSet$rfReg$test <- test_data
#Generate and download summary of parameter testing and write to txt document
summary <- model
predictor_importance <- randomForest::importance(model)
fitted <- predict(model)
svm_RMSE <- Metrics::rmse(predictors_train[,1], fitted)
fileName <- "ml_regression_summary.txt"#"random_forest_regression_summary.txt"
#Store results
mSetObj$analSet$rfReg$res <- list(summary = summary, predicted.values = fitted, RSME = svm_RMSE, predictor.importance = predictor_importance, fileName = fileName)
mSetObj$analSet$rfReg$mod <- list(model_name = model_name, model = model, response = response_train_name, predictor = predictors_train_name)
#Download text document containing the results, called the fileName. Document goes into the working directory and should be accessible to the user as part of the report.
sink(fileName)
cat("Formula:\n")
print(formula)
# cat("\nReference category:\n")
# cat(paste0(reference))
print(model)
cat("\nPredicted values:\n")
print(fitted)
cat("\nRMSE:\n")
cat(paste0(svm_RMSE, "\n"))
cat("\nPredictor variable importance:\n")
print(predictor_importance)
sink()
}
return(.set.mSet(mSetObj))
}
#'Plot svm predicted vs actual data plot with line of best fit
#'@description Scatter plot with line of best fit, where response variable is y and predictor variable is x
#'@usage plot.pred.svmReg(mSetObj, method=method, imgName, format="png", dpi=72, width=NA)
#'@param mSetObj Input the name of the created mSetObj (see InitDataObjects)
#'@param method Set ML regression method, default is random forest
#'@param imgName Input the image name
#'@param format Select the image format, "png" or "pdf", default is "png"
#'@param dpi Input the dpi. If the image format is "pdf", users need not define the dpi. For "png" images,
#'the default dpi is 72. It is suggested that for high-resolution images, select a dpi of 300.
#'@param width Input the width, there are 2 default widths. The first, width=NULL, is 10.5.
#'The second default is width=0, where the width is 7.2. Otherwise users can input their own width.
#'@author Louisa Normington\email{normingt@ualberta.ca}
#'University of Alberta, Canada
#'License: GNU GPL (>= 2)
#'@export
ml.pred.plot <- function(mSetObj=NA,
method="random forest",
facA = "NULL",
data="false",
col_dots="NULL",
col_line="NULL",
plot_ci="false",
plot_title=" ",
plot_ylab=" ",
plot_xlab=" ",
imgName, format="png", dpi=72, width=NA){
## used to be called: plot,pred.MLReg
##
#Extract necessary objects from mSetObj
mSetObj <- .get.mSet(mSetObj)
### TROUBLESHOOTING:
## col_dots1<-"blue"
## col_line1<-"red"
## plot_ci1<-TRUE
## plot_title1 <- paste0("Predicted vs Actual\n(", as.expression(formula), ")")
## plot_ylab1 <- "Actual"
## plot_xlab1<- "Predicted"
#SET POINT COLOR
col_dots1 <-
switch(
col_dots,
"NULL" = "black",
"blue" = "blue",
"red" = "red",
"green" = "green",
"grey" = "grey",
NULL
)
#SET LINE COLOR
col_line1 <-
switch(
col_line,
"NULL" = "black",
"blue" = "blue",
"red" = "red",
"green" = "green",
"grey" = "grey",
NULL
)
#SET WHETHER TO ADD 95% CONF INT
if (plot_ci == "false") {
plot_ci1 <- FALSE # default
} else {
plot_ci1 <- TRUE
}
# PLOT TITLE
if(plot_title == " "){
plot_title1 <- paste0("Predicted vs Actual\n(", as.expression(formula), ")")
} else {
plot_title1 <- plot_title
}
## y actual input[,facA] fA
## x prediction fpred
# PLOT YAXIS
if(plot_ylab == " "){
plot_ylab1 <- "Actual"
} else { # facA, response
plot_ylab1 <- plot_ylab
}
# PLOT XAXIS
if(plot_xlab == " "){
plot_xlab1 <- "Predicted"
} else { #prediction
plot_xlab1 <- plot_xlab
}
#Set plot dimensions
if(is.na(width)){
w <- 10.5
} else if(width == 0){
w <- 7.2
} else{
w <- width
}
h <- w
# plot(x=prediction, y=model_data[,facA], xlab=paste0("Predicted ", facA), ylab=paste0("Actual ", facA), main=model_name, yaxt="n"); axis(2, las=2); abline(a=0,b=1)
if (method=="SVM") {
facA <- mSetObj$analSet$svmReg$mod$response
method <- mSetObj$analSet$svmReg$meth
prediction <- mSetObj$analSet$svmReg$predicted.values
test_data <- mSetObj$analSet$svmReg$test
input <- test_data
dfpred <- data.frame(fpred = prediction, fA = input[,facA])
formula2 <- as.formula("fA ~ fpred")
model2 <- lm(formula = formula2, data = dfpred)
#NAME PLOT FOR DOWNLOAD
### must put imgName2 first, re-writing imgName var in next line
imgName2 <- paste(gsub( "\\_\\d+\\_", "", imgName),
".json", sep="")
imgName <- paste(imgName, "dpi", dpi, ".", format, sep="")
mSetObj$imgSet$plot.pred.svmReg <- imgName
} else { #random forest is default
facA <- mSetObj$analSet$rfReg$mod$response
method <- mSetObj$analSet$rfReg$meth
prediction <- mSetObj$analSet$rfReg$predicted.values
test_data <- mSetObj$analSet$rfReg$test
input <- test_data
dfpred <- data.frame(fpred = prediction, fA = input[,facA])
formula2 <- as.formula("fA ~ fpred")
model2 <- lm(formula = formula2, data = dfpred)
#NAME PLOT FOR DOWNLOAD
### must put imgName2 first, re-writing imgName var in next line
imgName2 <- paste(gsub( "\\_\\d+\\_", "", imgName),
".json", sep="")
imgName <- paste(imgName, "dpi", dpi, ".", format, sep="")
mSetObj$imgSet$plot.pred.rfReg <- imgName
}
## MAKE PLOT
a0 <- ggplot(data = dfpred, aes(x = fpred, y = fA)) +
labs(title = plot_title1) +
ylab(plot_ylab1)+ xlab(plot_xlab1) +
geom_smooth(se = plot_ci1, color = col_line1, fullrange = TRUE) +#, formula = formula2) +
geom_point(shape = 16, color = col_dots1) +
theme_bw() +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
axis.text = element_text(size = 12, colour = "black"),
axis.title = element_text(size = 12),
# legend.title=element_text(12), legend.text=element_text(size=12),
plot.title = element_text(face = 'bold', hjust = 0.5)
)
#GENERATE PLOT
Cairo::Cairo(file=imgName, unit="in", dpi=dpi, width=w, height=h, type=format, bg="white")
print(a0)
# plot(prediction, test_data[,1], xlab="Predicted", ylab="Actual", main=method, yaxt="n"); axis(2, las=2); abline(a=0,b=1)
dev.off()
# STORE IN mSET
if (method == "SVM") {
mSetObj$analSet$svmReg$plotpred <- list(plot = a0, title = plot_title1, xlab = plot_xlab1, ylab = plot_ylab1)
} else {
mSetObj$analSet$rfReg$plotpred <- list(plot = a0, title = plot_title1, xlab = plot_xlab1, ylab = plot_ylab1)
}
#JSON OBJECT MAKING
build <- ggplot_build(a0)
build_line <- build$data[[1]]
build_points <- build$data[[2]]
linear_plot_json <- list()
linear_plot_json$main <- plot_title1 #title
linear_plot_json$axis <- c(plot_xlab1, plot_ylab1) #axis titles
linear_plot_json$points$coords <- build_points[,c("x","y")] #[,1:2]
linear_plot_json$points$cols <- build$data[[1]][,grepl("col",colnames(build_points))] #[,6] #colours
linear_plot_json$points$shape <- build_points[,c("group")]#[,5]
linear_plot_json$points$size <- build_points[,c("size")]#[,7]
linear_plot_json$lines$cols <- build_line[,grepl("col",colnames(build_line))]
# linear_plot_json$label <- build$data[[3]][,c("label")]
# linear_plot_json$lines$ci <- build$data[[1]][,c("se")]
if(any(grepl("ymin", colnames(build_line))) && any(grepl("ymax", colnames(build_line))) ){
ci<- build_line[,c("x","y", "ymin", "ymax")]
colnames(ci) <- c("x","y","CI_down", "CI_up")
linear_plot_json$lines$ci <- ci # build$data[[1]][,c("ymin", "ymax")]
} else{
linear_plot_json$lines$ci <- data.frame(x = build_line[,c("x")], y = build_line[,c("y")], CI_down = 0, CI_up = 0)
}
## BOOLEANS
if(plot_ci1 == TRUE){
linear_plot_json$bool_ci <- TRUE
} else{
linear_plot_json$bool_ci <- FALSE
}
linear_plot_json$model$r_sq <-
summary(model2)[["r.squared"]] #Extract R^2
linear_plot_json$model$r_sq_adj <-
summary(model2)[["adj.r.squared"]] #Extract adjusted R^2
linear_plot_json$model$slope <-
summary(model2)[["coefficients"]][2] # beta
linear_plot_json$model$yint <-
summary(model2)[["coefficients"]][1] # alpha
json.obj <- RJSONIO::toJSON(linear_plot_json, .na='null')
sink(imgName2)
cat(json.obj)
sink()
if(!.on.public.web){
return(.set.mSet(mSetObj))
}
}
|
15980c759b1bff207e7c9f5710b1a4bd7bbb5937 | 5debb7ef018afc3e17890b9bfeb89bfe51c3030b | /R/view_help.R | 33dd636b9bd31a10fe61a0a9c3bef46ee5793a7e | [] | no_license | shs576/RDocumentation | ed15c1be42fc62c28bddff69fd27760c383cc8ed | 9c9bd8c714e633fdbe5739dc9c89fee5ab1055ce | refs/heads/master | 2021-09-15T12:37:47.606145 | 2018-06-01T14:13:29 | 2018-06-01T14:13:29 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,951 | r | view_help.R | get_help_search_body <- function(paths) {
lut <- c(alias = "aliases", concept = "concept", keyword = "keywords", name = "name", title = "title")
body <- paths
body$fields <- concat(lut[body$fields])
body$matching_titles <- concat(unique(body$matches$Topic))
body$matching_packages <- concat(unique(body$matches$Package))
body$called_function <- "help_search"
body[c("lib.loc", "matches", "types", "package")] <- NULL
body
}
#' @importFrom utils tail
get_help_body <- function(paths, package = "", topic = "") {
if (!length(paths)) {
# no documentation found locally, use specified package and topic names
packages <- if (length(package) == 0) "" else package
topic_names <- ""
topic <- if (length(topic) == 0) "" else topic
} else {
# documentation was found
split <- strsplit(paths, "/")
packages <- sapply(split, function(x) return(x[length(x)-2]))
topic_names <- sapply(split, tail, n = 1)
topic <- attr(paths, "topic")
}
body <- list(packages = concat(packages),
topic_names = concat(topic_names),
topic = topic,
called_function = "help")
body
}
get_find_package_body <- function(package) {
list(called_function = "find_package", package_name = package)
}
#' @importFrom httr POST
#' @importFrom httr GET
#' @importFrom httr status_code
#' @importFrom httr content
#' @importFrom httr user_agent
#' @importFrom httr content_type_json
#' @importFrom httr timeout
#' @importFrom httr cookies
#' @importFrom httr add_headers
#' @importFrom rjson toJSON
#' @importFrom utils browseURL
#' @importFrom utils read.table
view_help <- function(body){
# create doc directory if doesn't exist yet
dir.create(get_rdocs_dir(), showWarnings = FALSE)
go_to_url <- "https://www.rdocumentation.org/rstudio/view?viewer_pane=1"
resp <- POST(go_to_url,
add_headers(Accept = "text/html"),
user_agent("rstudio"),
config = (content_type_json()),
body = rjson::toJSON(body),
encode = "json",
timeout(getOption("RDocumentation.timeOut")))
if (status_code(resp) == 200) {
writeBin(content(resp, "raw"), get_html_file())
browser <- getOption("browser")
p <- tools::startDynamicHelp(NA)
url <- build_local_url(p)
browseURL(url, browser)
return(invisible())
} else{
stop("bad return status")
}
}
#' @importFrom httr parse_url
build_local_url <- function(p) {
url <- sprintf("http://127.0.0.1:%s/library/RDocumentation/doc/index.html", p)
append <- "?viewer_pane=1"
rstudio_port <- Sys.getenv("RSTUDIO_SESSION_PORT")
if (nchar(rstudio_port) > 0) {
append <- c(append, paste0("Rstudio_port=", rstudio_port))
}
shared_secret <- Sys.getenv("RS_SHARED_SECRET")
if (nchar(shared_secret) > 0) {
append <- c(append, paste0("RS_SHARED_SECRET=", shared_secret))
}
url <- paste0(url, paste0(append, collapse = "&"))
return(url)
} |
be61732da669f9ca15abe02e43d43e429e770dd4 | d3430da51e26f51bec0b1fe12dd23c12c279e1f0 | /man/Naturvernomrade.Rd | 8bc2cf74e43995a1c37dd4cef3e57aa061e52106 | [
"CC-BY-4.0"
] | permissive | hmalmedal/N1000 | cd3f9136762a7b41eddf87e56e6492413d0f92b2 | a5c001b6522efb190fe638075635ab1fc55cf218 | refs/heads/master | 2023-05-12T18:19:31.892163 | 2019-02-24T10:56:25 | 2023-05-01T08:14:33 | 172,326,587 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,855 | rd | Naturvernomrade.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/datadoc.R
\docType{data}
\name{Naturvernomrade}
\alias{Naturvernomrade}
\title{Naturvernomrade}
\format{
\if{html}{\out{<div class="sourceCode">}}\preformatted{Simple feature collection with 42 features and 4 fields
Geometry type: POLYGON
Dimension: XY
Bounding box: xmin: 1183.26 ymin: 6472365 xmax: 1088054 ymax: 7893205
Projected CRS: ETRS89 / UTM zone 33N
# A tibble: 42 × 5
oppdateringsdato navn vernedato verneform geometry
* <date> <chr> <date> <chr> <POLYGON [m]>
1 2017-01-05 Sjunkhatten 2010-02-05 NP ((494981.1 7472050, 495072.8 74…
2 2017-01-05 Rohkunborri 2011-02-25 NP ((642141.4 7608935, 645026.2 76…
3 2017-01-05 Hallingskarvet 2006-12-22 NP ((92235.33 6740841, 92255.15 67…
4 2017-01-05 Folgefonna 2005-04-29 NP ((28219.27 6691623, 28162.51 66…
5 2017-01-05 Ånderdalen 2004-06-04 NP ((585497 7677287, 586045 767750…
6 2017-01-05 Femundsmarka 2003-02-21 NP ((341250.5 6931621, 341307 6931…
7 2017-01-05 Fulufjellet 2012-04-27 NP ((373520.6 6824409, 370912.8 68…
8 2017-01-05 Ytre Hvaler 2009-06-26 NP ((271280.2 6550636, 271187.1 65…
9 2017-01-05 Øvre Pasvik 2003-08-29 NP ((1043856 7739675, 1044377 7737…
10 2017-01-05 Stabbursdalen 2002-12-20 NP ((845960.9 7801580, 846028.5 77…
# ℹ 32 more rows
# ℹ Use `print(n = ...)` to see more rows
}\if{html}{\out{</div>}}
}
\source{
\code{Basisdata_0000_Norge_25833_N1000Restriksjonsomrader_GML.gml}
}
\usage{
Naturvernomrade
}
\description{
Naturvernområde
}
\author{
© \href{https://kartverket.no/}{Kartverket}
}
\keyword{datasets}
|
59555b31cbe3ac017a5e28bc215e8a6ac3b3e0d3 | 177e0503f7a1f7c63963b62e318f7980d273399d | /cachematrix.R | 7fbe8eab09ab164bee7ae7d98c98b163512fdffa | [] | no_license | tklinger123/ProgrammingAssignment2 | 41f9584681f2b9b8f39190590abbf73cc074fd32 | 7c66b75d48a53596889070f30ecd9ed98fb39209 | refs/heads/master | 2021-04-18T22:10:54.499136 | 2018-03-26T14:22:16 | 2018-03-26T14:22:16 | 126,831,026 | 0 | 0 | null | 2018-03-26T13:12:43 | 2018-03-26T13:12:42 | null | UTF-8 | R | false | false | 1,231 | r | cachematrix.R | ##################################
## programming assignment 2:
## caching the inverse of a matrix
## done by thomas klinger
##################################
# this function creates a matrix and its inverse
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL # init inv
set <- function(y) {
x <<- y # assign input to x
inv <<- NULL # clear inv in parent
}
get <- function() x # retrieve all from parent
setinverse <- function(inverse) inv <<- inverse
getinverse <- function() inv
list(set = set, get = get, # create list and return
setinverse = setinverse,
getinverse = getinverse)
}
# this function requires an argument returned by makeCacheMatrix
# so that it can retrieve the inverse from the cached value
# from the environment of makeCacheMatrix
cacheSolve <- function(x, ...) {
inv <- x$getinverse() # try to retrieve an inverse from x
if (!is.null(inv)) {
message("getting cached data")
return(inv) # cached inverse available
}
data <- x$get()
inv <- solve(data, ...) # calculate the inverse
x$setinverse(inv) # and set it
inv
}
|
4f94c23c45fd945b406b9ff023a882c34b8c5eb1 | c3928a8d0427c0c037d8a94858acfb9b85ee3eef | /backend/src/main/resources/r_script_temp/readCategoryCounts.R | 37e3951d9ad5fa98ee6fe4d4484236d37450fd24 | [
"MIT"
] | permissive | ambro01/NanoporeQC | f6fdd61c727d0acdb533d9be616187b14af92cae | f5d4ebec8b735a0eb81a53ff3cbde565a8967092 | refs/heads/master | 2020-03-28T00:26:35.739541 | 2018-09-20T19:14:09 | 2018-09-20T19:14:09 | 147,413,512 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,733 | r | readCategoryCounts.R | # category, count
# ilosc nukleotydow w zaleznosci od typu odczytu
#' Plot the proportion of template, complement and 2D reads found a dataset.
#'
#' Generates a bar plot showing the breakdown of read types found in a set of fast5 files. There is a strict hierarchy to the types of read that can be found in a fast5 file. A full 2D read requires both a complement and template strand to have been read correctly. Similarly, a complement strand can only be present if the template was read successfully. Finally, you can encounter a file containing now called bases on either strand.
#' Here we visualise the total number of fast5 files, along with the counts containing each of the categories above. For an ideal dataset all four bars will be the same height. This is unlikely, but the drop between bars can give some indication of data quality.
#' @param summaryData Object of class \linkS4class{Fast5Summary}.
#' @return Returns an object of class \code{gg} representing the plot.
#' @examples
#' if( require(minionSummaryData) ) {
#' data(s.typhi.rep2, package = 'minionSummaryData')
#' plotReadCategoryCounts( s.typhi.rep2 )
#' }
#' @export
#' @importFrom dplyr summarise count
out <- tryCatch(readCategoryCounts(summaryData), error = function(cond){return (tibble())})
temp <- tryCatch(select(out, category), error = function(cond){return (tibble(category = character()))})
category <- matrix(as.character(unlist(temp)), nrow = nrow(temp))
temp <- tryCatch(select(out, count), error = function(cond){return (tibble(count = numeric()))})
count <- matrix(as.numeric(unlist(temp)), nrow = nrow(temp))
files_count <- count[1,1]
template_count <- count[2,1]
complement_count <- count[3,1]
full_2d_count <- count[4,1] |
269a615e60bf26e2b64db9a00f84bd2d767bc47c | 2e1754a5130e4f3f5725baace28b15a078072cd9 | /app/library/spam/demo/article-jss-example1.R | dd04f334fe3d3fa70378db93744512b1a3b97676 | [
"Apache-2.0"
] | permissive | agreenville/Bipartite_Network_win | b4399e1150439373a886bf677868584abc0f635a | ce9c5dc2fb7707314ddcbe05342f06a6afc728bc | refs/heads/master | 2021-05-11T05:37:10.926023 | 2018-01-18T23:57:02 | 2018-01-18T23:57:02 | 117,963,829 | 0 | 0 | Apache-2.0 | 2018-01-18T23:57:03 | 2018-01-18T09:50:02 | R | UTF-8 | R | false | false | 269 | r | article-jss-example1.R | # This is file ../spam/demo/article-jss-example1.R
# This file is part of the spam package,
# http://www.math.uzh.ch/furrer/software/spam/
# by Reinhard Furrer [aut, cre], Florian Gerber [ctb]
# This demo is depreciated. Please run
demo('jss10-example1')
|
1b47960819197447864c57b08694bcee4cd21600 | 75ae7bd581c200f31825edb298d66195b3d6e114 | /alpha-view/SVM.R | 9276bb26037e73d3e7fd4ef29f88df272090fe13 | [
"MIT"
] | permissive | joseph97git/alpha-learn | 58468f0a144216da761d81015bd6231010dd32a2 | 3ab6339e60f0ca0194f2e32b575d28c57ac0aeb4 | refs/heads/master | 2020-05-09T19:30:52.111462 | 2020-02-17T21:22:59 | 2020-02-17T21:22:59 | 181,380,730 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 621 | r | SVM.R | getSVMFit <- function(train, var1, var2, kern, deg) {
# fits svm model given the two input variable parameters
# create formula
svmFormula <- as.formula(paste("signal ~", var1, "+", var2))
# fit svm model
svm_fit <- svm(svmFormula, data = train, kernel = kern, degree = deg)
return(svm_fit)
}
getSVMPredict <- function(model, test) {
# gets predicted data based on the given model
# predict on test data
pred <- predict(model, test)
# check accuracy
test_signal <- data.frame("signal" = factor(test$signal))
rownames(test_signal) <- 1:nrow(test_signal)
return(test_signal)
}
|
b40071b1c10775b4b87d85caa6fa456228f98a62 | 13f974b729d9c355dc639a6783ecca2b612bf152 | /machine-learning-app/app.R | b55f1340c214c0b9035e65dd6a93c68023f400ec | [] | no_license | fonluiz/decision-trees-app | 9a3cdd3aa4922db6d55ab5e9f7a6ab44238dd073 | 3e2654ce31bca9e12646ad6cd71ebb71a9e017cc | refs/heads/master | 2021-04-28T15:11:33.675964 | 2018-03-20T18:17:27 | 2018-03-20T18:17:27 | 121,983,454 | 0 | 0 | null | 2018-03-20T18:17:28 | 2018-02-18T19:32:39 | R | UTF-8 | R | false | false | 1,040 | r | app.R | library(shinydashboard)
library(shinyjs)
library(tidyverse)
source("ui_scripts.R")
source("decision-tree/server.R")
source("neural-network/server.R")
ui <- dashboardPage(
dashboardHeader(title = "Machine Learning App - chess", titleWidth = 300),
dashboardSidebar(
width = 300,
useShinyjs(),
sidebarMenu(id = "menu",
menuItem("Modelos", tabName = "tab1", icon = icon("bookmark")),
menuItem("Resultados", tabName = "tab2", icon = icon("bookmark"))
)
),
dashboardBody(
tabItems(
tabItem(tabName = "tab1",
tab1()
),
tabItem(tabName = "tab2",
tab2()
),
tabItem(tabName = "tab3",
tab3()
)
)
)
)
server <- shinyServer(function(input, output) {
# plot tree
output$treePlot <- renderDecisionTreePlot(input)
# plot neural network
output$neuralNetworkPlot <- renderNeuralNetworkPlot(input)
})
# Run the application
shinyApp(ui = ui, server = server)
|
a3394efac82655c7d83f9977bab35daa66cb6a84 | 4641658628f93059b76ba797024cec234dc1c00e | /data_cleaning.R | 6a837b48cc2be2b4664f2861ac18ea02988108f9 | [] | no_license | mirandamanschot/demo-tidytext-02 | 35bf3f3c0f6d14128f1f6c2083aea252ea51764b | 434d957603a07989d0ea0f855b5108b862b9920e | refs/heads/main | 2023-04-22T03:17:34.778099 | 2021-05-19T12:45:47 | 2021-05-19T12:45:47 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,477 | r | data_cleaning.R | #' data_cleaning.R
#'
#' Clean up and select a date range of tweets
#' from donald trump for analysis
#'
#'
# --- Library --- #
library(readr)
library(readr)
library(dplyr)
library(tibble)
library(tidyr)
library(tidytext)
library(textstem)
library(vader)
library(lubridate)
library(stm)
# --- load the data --- #
tweets <- read_csv('data/tweets_01-08-2021.csv')
# let's focus on tweets around when corona started
# why? i needed to make the code run faster somehow
# for teaching purposes
start_date <- as.Date("2020-01-01")
end_date <- as.Date("2020-04-30")
bus_day_start <- hms::as_hms("09:00:00")
bus_day_end <- hms::as_hms("20:00:00")
tweets_covid <-
tweets %>%
mutate(date = force_tz(date, "UTC"),
date_est = with_tz(date, "America/New_York"),
date_posted = floor_date(date_est, unit = 'day'),
hour_posted = floor_date(date_est, unit = 'hour'),
time = hms::as_hms(hour_posted)
) %>%
filter(between(as.Date(date_posted),
start_date,
end_date)
) %>%
mutate(business_hours =
case_when(
time < bus_day_start | time > bus_day_end ~ "non-business hours",
TRUE ~ "business hours"
)
) %>%
rownames_to_column("text_id")
trump_tweets <-
tweets_covid %>%
select(text_id, text, date_est, business_hours)
write_csv(trump_tweets, 'data/trump_early_2020_tweets.csv')
|
1cbee10c3c7512390c00961cd3511f382308c1ac | 8e9f9bc21f7d3137d1913fd5bec802f936b5462d | /Life cycle function.R | f36b370fe6fdea6d089a0b698ed84d73bdd7f189 | [] | no_license | klwilson23/LifeCycleDiversity | 3087aa80b4b19655db4dae3fe3462f33381927e4 | 71134716ac29340482d2ad4609049ba97340ca79 | refs/heads/master | 2021-07-12T11:10:46.495751 | 2020-07-29T00:29:32 | 2020-07-29T00:29:32 | 188,898,890 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,622 | r | Life cycle function.R | # notes: age-structure a 4.2 is 1 in Freshwater, and 2 ocean winter survivals (not 3 ocean winters)
# notes: double-check the bad-bad correlation
LifeCycle <- function(Nyears=100,CR=4,N0=100,propAge=rep(1/4,4),freshMarAges,recCV=0.25,Ncycles=4,freshSurvMn=0.7,marSurvMn=0.5,freshSurvCV=0.1,marSurvCV=0.1,freshRho=0,marRho=0,lifeCycleNames,propRisk,marSurv_Scen="none",probGood=0.8,probBad=0.2,goodSurv=0.077,badSurv=0.0125,startProb=0.5)
{
marSurvMn <- goodSurv
freshSurv <- sapply(1:Ncycles,function(x){exp(-(-log(freshSurvMn)/freshMarAges[x,1]))})
mnfreshSurv <- exp(--log(freshSurvMn)/sum((propAge*freshMarAges[,1])))
marSurv <- sapply(1:Ncycles,function(x){exp(-(-log(marSurvMn)/freshMarAges[x,2]))})
mnmarSurv <- exp(--log(marSurvMn)/sum((propAge*freshMarAges[,2])))
# matrix of good and bad survivals
goodSurvYr <- exp(--log(goodSurv)/sum((propAge*freshMarAges[,2])))
badSurvYr <- exp(--log(badSurv)/sum((propAge*freshMarAges[,2])))
if(marSurv_Scen=="none")
{
marSurvYr <- rep(0,Nyears)
}else{
marSurvYr <- array(NA,dim=c(Nyears,2,Ncycles),dimnames=list("Years"=1:Nyears,"Survival"=c("Good","Bad"),"Life cycles"=lifeCycleNames))
survs <- sapply(1:Ncycles,function(x){exp(-(-log(c(goodSurv,badSurv))/freshMarAges[x,2]))})
for(i in 1:Ncycles)
for(j in 1:2){
marSurvYr[1:Nyears,j,i] <- survs[j,i]
}
marSurvYrNm <- rep(NA,Nyears)
marSurvYrNm[1] <- ifelse(rbinom(1,1,prob=startProb)==1,"Good","Bad")
}
popDyn <- array(NA,dim=c(Nyears,Ncycles,max(freshMarAges[,"Freshwater"])+max(freshMarAges[,"Marine"])+1),dimnames=list("Year"=1:Nyears,"Life cycles"=lifeCycleNames,"Age"=1:(max(freshMarAges[,"Freshwater"])+max(freshMarAges[,"Marine"])+1)))
RecruitsLC <- SpawnersLC <- matrix(NA,nrow=Nyears,ncol=Ncycles)
Spawners <- rep(NA,Nyears)
Recruits <- rep(NA,Nyears)
freshSurvYr <- rep(0,Nyears)
surv.start <- sapply(1:Ncycles,function(x){rep(c(freshSurv[x],marSurv[x],1),times=c(freshMarAges[x,1],freshMarAges[x,2],1))})
surv.actual <- sapply(1:Ncycles,function(x){rep(c(freshSurv[x],marSurvYr[1,marSurvYrNm[1],x],1),times=c(freshMarAges[x,1],freshMarAges[x,2],1))})
stage.start <- sapply(1:Ncycles,function(x){rep(c(1,2,3),times=c(freshMarAges[x,1],freshMarAges[x,2],1))})
survivorship <- survivorshipStart <- survival <- survivalstage <- matrix(0,nrow=Ncycles,ncol=length(popDyn[1,1,]))
fecundity <- matrix(0,nrow=Ncycles,ncol=length(popDyn[1,1,]))
for(i in 1:Ncycles){
survival[i,1:length(surv.start[[i]])] <- surv.start[[i]]
survivalstage[i,1:length(surv.start[[i]])] <- stage.start[[i]]
survivorship[i,1:length(surv.start[[i]])] <- cumprod(c(1,surv.start[[i]][-length(surv.start[[i]])]))
survivorshipStart[i,1:length(surv.start[[i]])] <- cumprod(c(1,surv.actual[[i]][-length(surv.actual[[i]])]))
fecundity[i,length(surv.start[[i]])] <- 1
}
recProp <- as.vector((t(t(survivorship*fecundity)[t(survivorship*fecundity)!=0]))/sum(survivorship*fecundity))
#recProp <- rowSums(survivorship)/sum(survivorship)
# in order to get the proportions-at-age observed in spawners, we need recruits to be allocated differentially to those proportions-at-age and survivorship vector
recProp <- (propAge/recProp)/sum((propAge/recProp))
SPR0 <- sum(survivorship*fecundity*recProp) #spawner per recruit
R0 <- N0/SPR0 # equilibrium recruits
alpha.R <- CR/SPR0
beta.R <- -log(alpha.R*SPR0)/(R0*SPR0)
if(marSurv_Scen=="none"){
survivorshipStart <- survivorship
}
popDyn[1,,] <- R0*survivorshipStart*recProp
Spawners[1] <- sum(sapply(1:Ncycles,function(x){popDyn[1,x,sum(freshMarAges[x,])+1]}))
Recruits[1] <- alpha.R*Spawners[1]*exp(beta.R*Spawners[1])
RecruitsLC[1,] <- (alpha.R*Spawners[1]*exp(beta.R*Spawners[1]))*recProp
SpawnersLC[1,] <- sapply(1:Ncycles,function(x){popDyn[1,x,sum(freshMarAges[x,])+1]})
for(Iyear in 2:Nyears)
{
# get recruitment from last years' spawners
Spawners[Iyear] <- max(0,sum(sapply(1:Ncycles,function(x){popDyn[Iyear-1,x,sum(freshMarAges[x,])+1]})))
SpawnersLC[Iyear,] <- sapply(1:Ncycles,function(x){popDyn[Iyear-1,x,sum(freshMarAges[x,])+1]})
#recProp <- SpawnersLC[Iyear,]/Spawners[Iyear]
expectedRec <- alpha.R*Spawners[Iyear]*exp(beta.R*Spawners[Iyear])
#Recruits[Iyear] <- rlnorm(1,log(expectedRec),recCV)
Recruits[Iyear] <- pmax(0,rnorm(1,expectedRec,expectedRec*recCV))
RecruitsLC[Iyear,] <- Recruits[Iyear]*recProp
# calculate time-varying freshwater and marine survival
if(marSurv_Scen=="none")
{
freshSurvYr[Iyear] <- rnorm(1,0,freshSurvCV)
freshSurvYr[Iyear] <- (freshRho)*freshSurvYr[Iyear-1]+(1-freshRho)*freshSurvYr[Iyear]
#marbetaPars <- get_beta(marSurv,marSurvCV)
marSurvYr[Iyear] <- rnorm(1,0,marSurvCV)
marSurvYr[Iyear] <- (marRho)*marSurvYr[Iyear-1]+(1-marRho)*marSurvYr[Iyear]
}else{
freshSurvYr[Iyear] <- 0
marSurvYrNm[Iyear] <- ifelse(marSurvYrNm[Iyear-1]=="Good",sample(c("Good","Bad"),1,prob=c(probGood,1-probGood)),sample(c("Good","Bad"),1,prob=c(1-probBad,probBad)))
}
for(Ilife in 1:Ncycles)
{
popDyn[Iyear,Ilife,1] <- Recruits[Iyear]*recProp[Ilife]
for(Iage in 2:length(popDyn[Iyear,Ilife,]))
{
if(marSurv_Scen=="none"){
surv <- ifelse(survivalstage[Ilife,Iage-1]==1,
exp(log(freshSurv[Ilife]/(1-freshSurv[Ilife]))+freshSurvYr[Iyear])/(1+exp(log(freshSurv[Ilife]/(1-freshSurv[Ilife]))+freshSurvYr[Iyear])),
ifelse(survivalstage[Ilife,Iage-1]==2,exp(log(marSurv[Ilife]/(1-marSurv[Ilife]))+marSurvYr[Iyear])/(1+exp(log(marSurv[Ilife]/(1-marSurv[Ilife]))+marSurvYr[Iyear])),0))
}else{
surv <- ifelse(survivalstage[Ilife,Iage-1]==1,
freshSurv[Ilife],
ifelse(survivalstage[Ilife,Iage-1]==2,
marSurvYr[Iyear,marSurvYrNm[Iyear],Ilife],
0))
}
popDyn[Iyear,Ilife,Iage] <- popDyn[Iyear-1,Ilife,Iage-1]*surv
}
}
}
closureRisk <- sum(ifelse(Spawners>=(propRisk*N0),0,1))/Nyears
marSurvYrNm <- factor(marSurvYrNm,levels=c("Good","Bad"))
return(list("closureRisk"=closureRisk,"RecruitsLC"=RecruitsLC,"SpawnersLC"=SpawnersLC,"survival"=survival,"survivorship"=survivorship,"Spawners"=Spawners,"Recruits"=Recruits,"marSurvYr"=exp(log(mnmarSurv/(1-mnmarSurv))+marSurvYr)/(1+exp(log(mnmarSurv/(1-mnmarSurv))+marSurvYr)),"freshSurvYr"=exp(log(mnfreshSurv/(1-mnfreshSurv))+freshSurvYr)/(1+exp(log(mnfreshSurv/(1-mnfreshSurv))+freshSurvYr)),"marSurvYrNm"=marSurvYrNm))
} |
42bd1f51c892ad3df61a28f256209bba6606dbbf | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/colorscience/examples/compuphaseDifferenceRGB.Rd.R | 46af44e30eee85b24c0cc29390434b3c4e9522eb | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 227 | r | compuphaseDifferenceRGB.Rd.R | library(colorscience)
### Name: compuphaseDifferenceRGB
### Title: compuphase Difference RGB
### Aliases: compuphaseDifferenceRGB
### Keywords: datasets
### ** Examples
compuphaseDifferenceRGB(c(124,63,78),c(241,65,78))
|
2c22284f5569787a386eb022247b1d76f8015e20 | 9484b93da2ee67dd0989745745b0ab3ce4567324 | /code/old/Run_Combine_Testing_pull_imp_OTUs.R | 271406a9c0477e84cdb918fe25e46a2e3fa4f54d | [
"MIT"
] | permissive | SchlossLab/Sze_FollowUps_Microbiome_2017 | 0d8d2abefbec52914d0fce24c3f977d266583913 | 8ef69b722a0610d27ef898fa9ea0ab1385f6807b | refs/heads/master | 2021-01-18T23:39:58.668847 | 2017-09-16T20:37:43 | 2017-09-16T20:37:43 | 46,723,430 | 1 | 6 | null | 2017-09-16T20:37:44 | 2015-11-23T13:53:29 | HTML | UTF-8 | R | false | false | 9,135 | r | Run_Combine_Testing_pull_imp_OTUs.R | ### Combine models from the Training runs
### Specifically identify mean and SD of model
### and pull out most important OTUs used within it
## Marc Sze
### Note:
## Random Forest: from the R package: “For each tree, the prediction accuracy
## on the out-of-bag portion of the data is recorded. Then the same is done after permuting
## each predictor variable. The difference between the two accuracies are then averaged over all trees,
## and normalized by the standard error. For regression, the MSE is computed on the out-of-bag data for each
## tree, and then the same computed after permuting a variable. The differences are averaged and normalized
## by the standard error. If the standard error is equal to 0 for a variable, the division is not done.”
## Cutoff of 0.5 (default was used for this) for RF model
###Load needed Libraries and functions
source('code/functions.R')
loadLibs(c("dplyr", "reshape2", "scales", "caret", "pROC"))
# Set up relevent environment variables
imp_vars_list <- list()
run_info_list <- list()
run_predictions <- list()
best_tune <- list()
probs_predictions <- list()
n <- 100
best_model_data <- as.data.frame(matrix(nrow = 100, ncol = 6))
for(i in 1:n){
load(paste("exploratory/RF_model_", i, ".RData", sep=""))
if(i == 1){
write.csv(eighty_twenty_splits, "data/process/tables/test_data_splits.csv",
row.names = F)
write.csv(test_data, "data/process/tables/test_tune_data.csv",
row.names = F)
}
probs_predictions[[paste("run_", i, sep = "")]] <-
predict(test_tune_list[[paste("data_split", i, sep = "")]],
test_test_data, type = 'prob')
rm(list = setdiff(ls(),
c("test_tune_list", "test_predictions", "best_tune",
"best_model_data", "imp_vars_list", "run_info_list",
"run_predictions", "n", "i", "probs_predictions", "all_runs_list")))
best_tune[paste("run_", i, sep = "")] <- test_tune_list[[paste(
"data_split", i, sep = "")]]$bestTune
run_info_list[[paste("run_", i, sep = "")]] <-
test_tune_list[[paste("data_split", i, sep = "")]]$results
imp_vars_list[[paste("run_", i, sep = "")]] <-
varImp(test_tune_list[[paste("data_split", i, sep = "")]],
scale = FALSE)$importance %>%
mutate(Variable = rownames(.)) %>% arrange(desc(Overall))
run_predictions[[paste("run_", i, sep = "")]] <- test_predictions[[paste(
"data_split", i, sep = "")]]
best_model_data[i, ] <- filter(run_info_list[[i]],
mtry == best_tune[[i]]) %>% select(-mtry)
colnames(best_model_data) <- colnames(select(run_info_list[[i]], -mtry))
rownames(best_model_data)[i] <- paste("run_", i, sep = "")
rm(test_predictions, test_tune_list)
}
# Write out ROC summary table
write.csv(
mutate(best_model_data, run = rownames(best_model_data),
best_mtry = t(as.data.frame.list(best_tune))),
"data/process/tables/ROC_model_summary.csv", row.names = F)
# Calculate number of times an OTU is in the top 10% of overall importance
OTU_appearance_table <- as.data.frame(data_frame(
Variable = imp_vars_list[["run_1"]]$Variable) %>%
mutate(total_appearance = 0))
rownames(OTU_appearance_table) <- OTU_appearance_table$Variable
for(j in 1:length(imp_vars_list)){
tempVars <- imp_vars_list[[j]][c(1:round(length(
rownames(imp_vars_list[[j]]))*0.10)), ][, "Variable"]
for(i in tempVars){
OTU_appearance_table[i, "total_appearance"] <-
OTU_appearance_table[i, "total_appearance"] + 1
}
}
OTU_appearance_table <- arrange(OTU_appearance_table,
desc(total_appearance))
# Keep Those over 50% of the total 100 runs of 80/20 splits
OTU_appearance_table <- filter(OTU_appearance_table, total_appearance > 50)
# Write out the important variables to a table
write.csv(OTU_appearance_table,
"data/process/tables/rf_wCV_imp_vars_summary.csv", row.names = F)
# Collect the mean and SD for the MDA of the most important variables
top_vars_MDA <- lapply(imp_vars_list, function(x)
x[order(x[, "Variable"]), ] %>% filter(Variable %in% OTU_appearance_table$Variable))
top_vars_MDA_by_run <- as.data.frame(matrix(nrow = length(OTU_appearance_table$Variable),
ncol = length(imp_vars_list),
dimnames = list(
nrow = top_vars_MDA[["run_1"]]$Variable[
order(top_vars_MDA[["run_1"]]$Overall, decreasing = T)],
ncol = paste("run_", seq(1:100), sep = ""))))
for(i in 1:length(top_vars_MDA_by_run)){
tempData <- top_vars_MDA[[i]]
rownames(tempData) <- tempData$Variable
top_vars_MDA_by_run[, i] <- tempData[rownames(top_vars_MDA_by_run), "Overall"]
rm(tempData)
}
# "1" pulls the value of mean or sd from the data frame
MDA_vars_summary <- cbind(
mean_MDA = t(summarise_each(as.data.frame(t(top_vars_MDA_by_run)), funs(mean)))[, 1],
sd_MDA = t(summarise_each(as.data.frame(t(top_vars_MDA_by_run)), funs(sd)))[, 1],
variable = rownames(top_vars_MDA_by_run))
write.csv(MDA_vars_summary[order(MDA_vars_summary[, "mean_MDA"], decreasing = TRUE), ],
"data/process/tables/lesion_model_top_vars_MDA_Summary.csv", row.names = F)
lesion_model_top_vars_MDA_full_data <-
mutate(top_vars_MDA_by_run, variables = rownames(top_vars_MDA_by_run)) %>%
melt(id = c("variables"))
write.csv(lesion_model_top_vars_MDA_full_data,
"data/process/tables/lesion_model_top_vars_MDA_full_data.csv", row.names = F)
# Pull out middle(ish) model from runs and use that in the prediction of lesion in
middle_run <- as.numeric(
strsplit((best_model_data[order(desc(best_model_data$ROC)), ] %>%
mutate(run = rownames(.)) %>%
slice(length(rownames(best_model_data))/2) %>%
select(run))[1,], "_")[[1]][2])
# Get Ranges of 100 10-fold 20 times CV data (worse, middle, best)
actual_data <- read.csv("data/process/tables/full_test_data.csv", header = T, row.names = 1)
data_splits <- read.csv("data/process/tables/test_data_splits.csv",
header = T, stringsAsFactors = F)
best_run <- as.numeric(strsplit((
mutate(best_model_data, run = rownames(best_model_data)) %>%
filter(ROC == max(best_model_data$ROC)) %>%
select(run))[1,], "_")[[1]][2])
worse_run <- as.numeric(strsplit((mutate(best_model_data,
run = rownames(best_model_data)) %>%
filter(ROC == min(best_model_data$ROC)) %>%
select(run))[1,], "_")[[1]][2])
best_split <- data_splits[, best_run]
worse_split <- data_splits[, worse_run]
middle_split <- data_splits[, middle_run]
roc_data_list <- list(
best_roc = roc(
actual_data[-best_split, ]$lesion ~
probs_predictions[[best_run]][, "Yes"]),
middle_roc = roc(actual_data[-middle_split, ]$lesion ~
probs_predictions[[middle_run]][, "Yes"]),
worse_roc = roc(actual_data[-worse_split, ]$lesion ~
probs_predictions[[worse_run]][, "Yes"]))
# Build data table for figure 3
test_roc_data <- cbind(
sensitivities = c(roc_data_list[["best_roc"]]$sensitivities,
roc_data_list[["middle_roc"]]$sensitivities,
roc_data_list[["worse_roc"]]$sensitivities),
specificities = c(roc_data_list[["best_roc"]]$specificities,
roc_data_list[["middle_roc"]]$specificities,
roc_data_list[["worse_roc"]]$specificities),
run = c(rep("best_roc",
length(roc_data_list[["best_roc"]]$sensitivities)),
rep("middle_roc", length(roc_data_list[["middle_roc"]]$sensitivities)),
rep("worse_roc", length(roc_data_list[["worse_roc"]]$sensitivities))))
write.csv(test_roc_data,
"data/process/tables/test_data_roc.csv", row.names = F)
# Create AUC data table for figure 3
auc_data_table <- as.data.frame(matrix(
nrow = 3, ncol = 4, dimnames = list(
nrows = c("best", "middle", "worse"), ncols = c("AUC", "ROC_cv", "Sens_cv", "Spec_cv"))))
auc_data_table[, "AUC"] <- c(
roc_data_list[["best_roc"]]$auc,
roc_data_list[["middle_roc"]]$auc,
roc_data_list[["worse_roc"]]$auc)
auc_data_table[, "ROC_cv"] <- c(
best_model_data[paste("run_", best_run, sep = ""), "ROC"],
best_model_data[paste("run_", middle_run, sep = ""), "ROC"],
best_model_data[paste("run_", worse_run, sep = ""), "ROC"])
auc_data_table[, "Sens_cv"] <- c(
best_model_data[paste("run_", best_run, sep = ""), "Sens"],
best_model_data[paste("run_", middle_run, sep = ""), "Sens"],
best_model_data[paste("run_", worse_run, sep = ""), "Sens"])
auc_data_table[, "Spec_cv"] <- c(
best_model_data[paste("run_", best_run, sep = ""), "Spec"],
best_model_data[paste("run_", middle_run, sep = ""), "Spec"],
best_model_data[paste("run_", worse_run, sep = ""), "Spec"])
write.csv(auc_data_table, "data/process/tables/auc_summary.csv")
# Keep everything but roc_data_list in memory
rm(list = setdiff(ls(), "roc_data_list"))
save.image("exploratory/rocs.RData")
|
e1bb4d5f8f3bff6f87d01935f7bcae158a9f96e4 | fe14f07a765d50471a87487f26de32bffe709912 | /server.R | ce8cf64f7dceaf0f2c5f799dc2a35fe4e3ad51e1 | [] | no_license | fipinoch/Mi-Indicador | dcc45eccb5dc726074a94a56c58026a4d62cd46c | 38c687b096eb411dcdf1581906b23a89f5b140da | refs/heads/master | 2020-04-07T10:28:48.657302 | 2018-11-19T20:56:33 | 2018-11-19T20:56:33 | 158,288,290 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 942 | r | server.R | server <- function(input, output, session){
df <- reactive({
df <- df_datos %>%
filter(nombre == input$indicador,
fecha >= input$daterange[1], fecha <= input$daterange[2])
})
output$grafico <- plotly::renderPlotly({
plotly::ggplotly(
df() %>%
ggplot2::ggplot(ggplot2::aes(x = fecha, y = valor)) +
ggplot2::geom_line() +
ggplot2::labs(x = '',
y = df_var$unidad_medida[df_var$nombre == input$indicador],
title = input$indicador)
)
})
output$datos <- renderDataTable({df()})
output$downloaddatos <- downloadHandler(
filename = function(){
paste0(input$indicador, '.csv')
},
content = function(file){
write.csv(df(), file, row.names = FALSE, fileEncoding = 'latin1')
}
)
} |
846c6b8753aa91a16d91a0cf84285e7210b8d83f | 40c7e4c11a8b91e70e8d3b6bdea98f90b4add997 | /MTA201039 RStudio script.R | e4200ac8b82f01d129b0a23bfa156cd815267beb | [] | no_license | hendrikknoche/BCI-fishMed10 | 3166cf267e06a6f221ef741625423fa58a45f4c9 | b0079ef2e22813919eea6b5762865120acabaf35 | refs/heads/master | 2023-03-05T01:56:44.254789 | 2021-02-17T13:36:13 | 2021-02-17T13:36:13 | 261,782,937 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 17,417 | r | MTA201039 RStudio script.R | library(pgirmess)
library(tidyverse)
library(reshape2)
#library(MASS)
library(tidyr)
library(car)
library(ggplot2)
library(normalr)
library(dplyr)
library(clinfun)
library(pastecs)
library(QuantPsyc)
library(Hmisc)
normalize <- function(x) {
return ((x - min(x)) / (max(x) - min(x)))
}
rFromWilcox<-function(wilcoxModel, N){
z<- qnorm(wilcoxModel$p.value/2)
r<- z/ sqrt(N)
cat(wilcoxModel$data.name, "Effect Size, r = ", r)
}
#reads the datafile
data = readbulk::read_bulk('Questionnaire data', sep=';', na.strings = 'NA', stringsAsFactors=FALSE,row.names = NULL)
#renames the columns to usable names
data <- data %>% rename("ID" = "Participant.no.")
data <- data %>% rename("PC" = "I.felt.in.control.of.the.fisherman.s.actions.")
data <- data %>% rename("R_PC" = "I.felt.in.control.....while.trying.to.reel.in.the.fish..performing.the.key.sequence...")
data <- data %>% rename("T_PC" = "I.felt.in.control..0...when.the.fish.was.tugging.away.from.me..moving.a.column.away...")
data <- data %>% rename("E_PC" = "I.felt.in.control.....when.the.fish.escaped..")
data <- data %>% rename("Compare_PC" = "In.this.condition...")
data <- data %>% rename("FR" = "How.much.frustration.did.you.feel.....during.this.condition..")
data <- data %>% rename("OFR" = "How.much.frustration.did.you.feel.....overall.since.we.started.the.experiment..")
data <- data %>% rename("R_FR" = "How.much.frustration.did.you.feel.....while.you.were.trying.to.reel.in.the.fish..perform.the.key.sequence..")
data <- data %>% rename("T_FR" = "How.much.frustration.did.you.feel.....while.the.fish.was.tugging.away.from.you..moving.a.column.away..")
data <- data %>% rename("E_FR" = "How.much.frustration.did.you.feel.....when.the.fish.escaped..")
data <- data %>% rename("Compare_FR" = "In.this.condition....1")
data <- data %>% rename("Estimate" = "How.likely.do.you.think.you.were.to.succeed.in.reeling.the.fish.up.by.one.lane.in.this.condition..Provide.the.answer.on.a.scale.of.1...100.")
data <- data %>% rename("PC_Sham" = "I.felt.in.control.when.I.got.help.from.the.other.character.")
data <- data %>% rename("FR_Sham" = "I.felt.frustrated.when.I.got.help.from.the.other.character.")
data <- data %>% rename("PC_AS" = "I.felt.in.control.when.when.my.character.reeled.the.fish.up.by.two.lanes.")
data <- data %>% rename("FR_AS" = "I.felt.frustrated.when.my.character.reeled.the.fish.up.by.two.lanes.")
data <- data %>% rename("PC_AF" = "I.felt.in.control.when.the.big.clamp.prevented.the.fish.from.swimming.away.from.me.")
data <- data %>% rename("FR_AF" = "I.felt.frustrated.when.the.big.clamp.prevented.the.fish.from.swimming.away.from.me.")
data$Blame<-ifelse(is.na(data$Blame),"neutral",data$Blame)
data$Condition<-as.factor(data$Condition)
#Wilcox test between R_PC and Estimate
R_PC_var <- c(data$R_PC)
Estimate_var <- c(data$Estimate)
R_PC_var <- normalize(R_PC_var)
Estimate_var <- normalize(Estimate_var)
R_PC_Estimate_data <- data.frame(coding_var= rep(c("R_PC","Estimate"), each = 64), score = c(R_PC_var, Estimate_var))
wilcox.test(as.numeric(R_PC_Estimate_data$score) ~ as.numeric(R_PC_Estimate_data$coding_var))
#Boxplots between R_PC and Estimate
R_PC_Estimate_boxplot <- ggplot(R_PC_Estimate_data, aes(coding_var, score), inherit.aes = FALSE)
R_PC_Estimate_boxplot + geom_jitter(width = 0.05, height = 0.05) + stat_summary(fun.data = mean_cl_boot, geom = "errorbar", colour = "red") +
stat_summary(fun.y = mean, geom = "point", colour = "red", size = 4) + labs(x = "", y = "Normalized scores/estimates")
#Means for the Estimate depending after which playthrough they were specified.
Estimate_Playthrough <- data[,c("ID","Estimate","Playthrough_Order")]
#Turn format of the data to wide
Estimate_Playthrough <- spread(Estimate_Playthrough,Playthrough_Order, Estimate)
Estimate_Playthrough$ID<-NULL
summary(Estimate_Playthrough)
friedman.test(as.matrix(Estimate_Playthrough))
#Means for the FR depending on which playthrough they were specified.
FR_Playthrough <- data[,c("ID","FR","Playthrough_Order")]
#Boxplot for FR depending after which playthrough they were specified.
FR_Playthrough_boxplot <- ggplot(FR_Playthrough, aes(as.character(Playthrough_Order), FR))
FR_Playthrough_boxplot + geom_boxplot() + geom_jitter(width = 0.15, height = 0.1) + labs(x = as.character("Playthrough Order"), y = "Frustration")
#Turn format of the data to wide
FR_Playthrough <- spread(FR_Playthrough,Playthrough_Order, FR)
FR_Playthrough$ID<-NULL
summary(FR_Playthrough)
friedman.test(as.matrix(FR_Playthrough))
#Means for the PC depending after which playthrough they were specified.
PC_Playthrough <- data[,c("ID","PC","Playthrough_Order")]
#Boxplot for PC depending after which playthrough they were specified.
PC_Playthrough_boxplot <- ggplot(PC_Playthrough, aes(as.character(Playthrough_Order), PC))
PC_Playthrough_boxplot + geom_boxplot() + labs(x = as.character("Playthrough Order"), y = "Perceived Control")
#Turn format of the data to wide
PC_Playthrough <- spread(PC_Playthrough,Playthrough_Order, PC)
PC_Playthrough$ID<-NULL
summary(PC_Playthrough)
friedman.test(as.matrix(PC_Playthrough))
#Scatterplot between frustration and perceived control within all conditions
scatter_FR_PC_data <- data[,c("Blame", "Condition","PC","FR")]
#Ridiculous way of changing names within in a column (we're in a hurry)
scatter_FR_PC_data$Condition[which(scatter_FR_PC_data$Condition == "1")] = as.character("4.Control")
scatter_FR_PC_data$Condition<-ifelse(is.na(scatter_FR_PC_data$Condition),"4.Control",scatter_FR_PC_data$Condition)
scatter_FR_PC_data$Condition[which(scatter_FR_PC_data$Condition == "2")] = as.character("2.Sham")
scatter_FR_PC_data$Condition<-ifelse(is.na(scatter_FR_PC_data$Condition),"1.Sham",scatter_FR_PC_data$Condition)
scatter_FR_PC_data$Condition[which(scatter_FR_PC_data$Condition == "3")] = as.character("3.AS")
scatter_FR_PC_data$Condition<-ifelse(is.na(scatter_FR_PC_data$Condition),"2.AS",scatter_FR_PC_data$Condition)
scatter_FR_PC_data$Condition[which(scatter_FR_PC_data$Condition == "4")] = as.character("4.AF")
scatter_FR_PC_data$Condition<-ifelse(is.na(scatter_FR_PC_data$Condition),"3.AF",scatter_FR_PC_data$Condition)
scatter_FR_PC <-ggplot(scatter_FR_PC_data, aes(PC, FR, color=Blame, shape=Blame))
scatter_FR_PC + geom_point() + xlim(1,7) + ylim(1,7) + geom_smooth(method=lm, se=FALSE) +geom_jitter(width = .1)+ labs(x="Perceived Control", y="Frustration")+theme_bw()
#Sgnificance Test for Linear Regression
FR_PC.lm = lm(PC ~ FR, data=scatter_FR_PC_data)
summary(FR_PC.lm)
lm.beta(FR_PC.lm)
#Sgnificance Test for Linear Regression for Sham
scatter_FR_PC_Sham_data<- scatter_FR_PC_data%>%filter(Condition=="2.Sham")
FR_PC.lm = lm(PC ~ FR, data=scatter_FR_PC_Sham_data)
summary(FR_PC.lm)
lm.beta(FR_PC.lm)
#Sgnificance Test for Linear Regression for Self
scatter_FR_PC_Self_data<- scatter_FR_PC_data%>%filter(Blame=="Self")
FR_PC.lm = lm(PC ~ FR, data=scatter_FR_PC_Self_data)
summary(FR_PC.lm)
lm.beta(FR_PC.lm)
#Sgnificance Test for Linear Regression for System
scatter_FR_PC_System_data<- scatter_FR_PC_data%>%filter(Blame=="System")
FR_PC.lm = lm(PC ~ FR, data=scatter_FR_PC_System_data)
summary(FR_PC.lm)
lm.beta(FR_PC.lm)
#Sgnificance Test for Linear Regression for Neutral
scatter_FR_PC_Neutral_data<- scatter_FR_PC_data%>%filter(Blame=="neutral")
FR_PC.lm = lm(PC ~ FR, data=scatter_FR_PC_Neutral_data)
summary(FR_PC.lm)
lm.beta(FR_PC.lm)
#scatterplot between frustration and perceived control in regard ot PAM, wihtin the three PAM conditions
scatter_data_PAM <- data[,c("Condition","PC_Sham","FR_Sham","PC_AS","FR_AS","PC_AF","FR_AF")]
scatter_data_PAM<-scatter_data_PAM[-c(1:16),]
scatter_data_PAM$PC_Sham<-ifelse(is.na(scatter_data_PAM$PC_AS), scatter_data_PAM$PC_Sham, scatter_data_PAM$PC_AS)
scatter_data_PAM$PC_AS<-NULL
scatter_data_PAM$PC_Sham<-ifelse(is.na(scatter_data_PAM$PC_AF), scatter_data_PAM$PC_Sham, scatter_data_PAM$PC_AF)
scatter_data_PAM$PC_AF<-NULL
scatter_data_PAM$FR_Sham<-ifelse(is.na(scatter_data_PAM$FR_AS), scatter_data_PAM$FR_Sham, scatter_data_PAM$FR_AS)
scatter_data_PAM$FR_AS<-NULL
scatter_data_PAM$FR_Sham<-ifelse(is.na(scatter_data_PAM$FR_AF), scatter_data_PAM$FR_Sham, scatter_data_PAM$FR_AF)
scatter_data_PAM$FR_AF<-NULL
scatter_data_PAM <- scatter_data_PAM%>%rename("FR" = "FR_Sham")
scatter_data_PAM <- scatter_data_PAM %>% rename("PC" = "PC_Sham")
#Ridiculous way of changing names within in a column (we're in a hurry)
scatter_data_PAM$Condition[which(scatter_data_PAM$Condition == "2")] = as.character("1.Sham")
scatter_data_PAM$Condition<-ifelse(is.na(scatter_data_PAM$Condition),"1.Sham",scatter_data_PAM$Condition)
scatter_data_PAM$Condition[which(scatter_data_PAM$Condition == "3")] = as.character("2.AS")
scatter_data_PAM$Condition<-ifelse(is.na(scatter_data_PAM$Condition),"2.AS",scatter_data_PAM$Condition)
scatter_data_PAM$Condition[which(scatter_data_PAM$Condition == "4")] = as.character("3.AF")
scatter_data_PAM$Condition<-ifelse(is.na(scatter_data_PAM$Condition),"3.AF",scatter_data_PAM$Condition)
scatter_data_PAM_plot <-ggplot(scatter_data_PAM , aes(PC, FR, color=Condition, shape=Condition))
scatter_data_PAM_plot + geom_point() + xlim(1,7) + ylim(1,7) + geom_smooth(method=lm, se=FALSE) +geom_jitter(width = .1)+ labs(x="Perceived Control", y="Frustration")+theme_bw()
#Sgnificance Test for Linear Regression
FR_PC_PAM.lm = lm(PC ~ FR, data=scatter_data_PAM)
summary(FR_PC_PAM.lm)
lm.beta(FR_PC_PAM.lm)
#Sgnificance Test for Linear Regression for Sham within PAMs
scatter_Sham_data<- scatter_data_PAM%>%filter(Condition=="1.Sham")
FR_PC.lm = lm(PC ~ FR, data=scatter_Sham_data)
summary(FR_PC.lm)
lm.beta(FR_PC.lm)
#scatterplot between frustration and perceived control within Sham
scatter_FR_PC_Sham_data <- data[,c("Condition","PC_Sham","FR_Sham")]
scatter_FR_PC_Sham_data<- scatter_FR_PC_Sham_data%>%filter(!is.na(FR_Sham))
scatter_FR_PC_Sham <-ggplot(scatter_FR_PC_Sham_data, aes(PC_Sham, FR_Sham))
scatter_FR_PC_Sham + geom_point() + xlim(1,7) + ylim(1,7) + geom_smooth(method=lm, se=FALSE) +geom_jitter(width = .1)+ labs(x="Perceived Control", y="Frustration")+theme_bw()
#scatterplot between frustration and perceived control within AS
scatter_FR_PC_AS_data <- data[,c("Condition","PC_AS","FR_AS")]
scatter_FR_PC_AS_data<- scatter_FR_PC_AS_data%>%filter(!is.na(FR_AS))
scatter_FR_PC_AS <-ggplot(scatter_FR_PC_AS_data, aes(PC_AS, FR_AS))
scatter_FR_PC_AS + geom_point() + xlim(1,7) + ylim(1,7) + geom_smooth(method=lm, se=FALSE) + geom_jitter(width = .1)+ labs(x="Perceived control", y="Frustration")+theme_bw()
#scatterplot between frustration and perceived control within AF
scatter_FR_PC_AF_data <- data[,c("Condition","PC_AF","FR_AF")]
scatter_FR_PC_AF_data<- scatter_FR_PC_AF_data%>%filter(!is.na(FR_AF))
scatter_FR_PC_AF <-ggplot(scatter_FR_PC_AF_data, aes(PC_AF, FR_AF))
scatter_FR_PC_AF + geom_point() + xlim(1,7) + ylim(1,7) + geom_smooth(method=lm, se=FALSE) +geom_jitter(width = .1)+ labs(x="Perceived control", y="Frustration")+theme_bw()
#Did frustration with the PAMs depend on the Blame factor within individual PAMs
Blame_FR_PAM <- data[,c("ID","Condition", "Blame", "FR_Sham", "FR_AS", "FR_AF")]
#moving everything into one column
Blame_FR_PAM$FR_Sham<-ifelse(is.na(Blame_FR_PAM$FR_AS), Blame_FR_PAM$FR_Sham, Blame_FR_PAM$FR_AS)
Blame_FR_PAM$FR_AS<-NULL
Blame_FR_PAM$FR_Sham<-ifelse(is.na(Blame_FR_PAM$FR_AF), Blame_FR_PAM$FR_Sham, Blame_FR_PAM$FR_AF)
Blame_FR_PAM$FR_AF<-NULL
#Blame_FR <- Blame_FR%>%filter(!is.na(FR_Sham))%>%pivot_wider(names_from = "Blame", values_from = "FR_Sham")
B2<-Blame_FR_PAM[,c(2,3,4)]%>%filter(!Blame=="neutral" & Condition=="2")
B3<-Blame_FR_PAM[,c(2,3,4)]%>%filter(!Blame=="neutral" & Condition=="3")
B3 <- B3 %>% rename("FR_AS" = "FR_Sham")
B4<-Blame_FR_PAM[,c(2,3,4)]%>%filter(!Blame=="neutral" & Condition=="4")
B4 <- B4 %>% rename("FR_AF" = "FR_Sham")
wilcox.test(as.numeric(B2$FR_Sham) ~ B2$Blame)
wilcox.test(as.numeric(B3$FR_AS) ~ B3$Blame)
wilcox.test(as.numeric(B4$FR_AF) ~ B4$Blame)
#Did frustration depend on the Blame factor
Blame_FR <- data[,c("Condition", "Blame", "FR")]
#Control
B_FR1<-Blame_FR[,c(1,2,3)]%>%filter(!Blame=="neutral" & Condition=="1")
wilcox.test(as.numeric(B_FR1$FR) ~ B_FR1$Blame)
#Sham
B_FR2<-Blame_FR[,c(1,2,3)]%>%filter(!Blame=="neutral" & Condition=="2")
wilcox.test(as.numeric(B_FR2$FR) ~ B_FR2$Blame)
#AS
B_FR3<-Blame_FR[,c(1,2,3)]%>%filter(!Blame=="neutral" & Condition=="3")
wilcox.test(as.numeric(B_FR3$FR) ~ B_FR3$Blame)
#AF
B_FR4<-Blame_FR[,c(1,2,3)]%>%filter(!Blame=="neutral" & Condition=="4")
wilcox.test(as.numeric(B_FR4$FR) ~ B_FR4$Blame)
#Did perceived control with the PAMs depend on the Blame factor within individual PAMs
Blame_PC_PAM <- data[,c("ID","Condition", "Blame", "PC_Sham", "PC_AS", "PC_AF")]
#moving everything into one column
Blame_PC_PAM$PC_Sham<-ifelse(is.na(Blame_PC_PAM$PC_AS), Blame_PC_PAM$PC_Sham, Blame_PC_PAM$PC_AS)
Blame_PC_PAM$PC_AS<-NULL
Blame_PC_PAM$PC_Sham<-ifelse(is.na(Blame_PC_PAM$PC_AF), Blame_PC_PAM$PC_Sham, Blame_PC_PAM$PC_AF)
Blame_PC_PAM$PC_AF<-NULL
C2<-Blame_PC_PAM[,c(2,3,4)]%>%filter(!Blame=="neutral" & Condition=="2")
C3<-Blame_PC_PAM[,c(2,3,4)]%>%filter(!Blame=="neutral" & Condition=="3")
C3 <- C3 %>% rename("PC_AS" = "PC_Sham")
C4<-Blame_PC_PAM[,c(2,3,4)]%>%filter(!Blame=="neutral" & Condition=="4")
C4 <- C4 %>% rename("PC_AF" = "PC_Sham")
wilcox.test(as.numeric(C2$PC_Sham) ~ C2$Blame)
wilcox.test(as.numeric(C3$PC_AS) ~ C3$Blame)
wilcox.test(as.numeric(C4$PC_AF) ~ C4$Blame)
#Did perceived control depend on the Blame factor
Blame_PC <- data[,c("ID","Condition", "Blame", "PC")]
#Control
B_PC1<-Blame_PC[,c(2,3,4)]%>%filter(!Blame=="neutral" & Condition=="1")
wilcox.test(as.numeric(B_PC1$PC) ~ B_PC1$Blame)
#Sham
B_PC2<-Blame_PC[,c(2,3,4)]%>%filter(!Blame=="neutral" & Condition=="2")
wilcox.test(as.numeric(B_PC2$PC) ~ B_PC2$Blame)
#AS
B_PC3<-Blame_PC[,c(2,3,4)]%>%filter(!Blame=="neutral" & Condition=="3")
wilcox.test(as.numeric(B_PC3$PC) ~ B_PC3$Blame)
#AF
B_PC4<-Blame_PC[,c(2,3,4)]%>%filter(!Blame=="neutral" & Condition=="4")
wilcox.test(as.numeric(B_PC4$PC) ~ B_PC4$Blame)
#Friedman test checking whether Blame attribution changed depending on the playthrough order
#Data prep
Blame_Conditions_or_Playthrough <- data[,c("ID","Blame","Playthrough_Order","Condition")]
#Change Blame attribution from char to numeric
Blame_Conditions_or_Playthrough$Blame[Blame_Conditions_or_Playthrough$Blame == "neutral"] <- 0
Blame_Conditions_or_Playthrough$Blame[Blame_Conditions_or_Playthrough$Blame == "Self"] <- 1
Blame_Conditions_or_Playthrough$Blame[Blame_Conditions_or_Playthrough$Blame == "System"] <- -1
Blame_Conditions_or_Playthrough$Blame<-as.numeric(Blame_Conditions_or_Playthrough$Blame)
#Get rid of the Condition column
Blame_Conditions_or_Playthrough$Condition<-NULL
#Turn format of the data to wide
Blame_Conditions_or_Playthrough <- spread(Blame_Conditions_or_Playthrough,Playthrough_Order, Blame)
Blame_Conditions_or_Playthrough$ID<-NULL
friedman.test(as.matrix(Blame_Conditions_or_Playthrough))
friedmanmc(as.matrix(Blame_Conditions_or_Playthrough))
#Friedman test checking whether Blame attribution changed between different conditions
#Data prep
Blame_Conditions_or_Playthrough <- data[,c("ID","Blame","Playthrough_Order","Condition")]
#Change Blame attribution from char to numeric
Blame_Conditions_or_Playthrough$Blame[Blame_Conditions_or_Playthrough$Blame == "neutral"] <- 0
Blame_Conditions_or_Playthrough$Blame[Blame_Conditions_or_Playthrough$Blame == "Self"] <- 1
Blame_Conditions_or_Playthrough$Blame[Blame_Conditions_or_Playthrough$Blame == "System"] <- -1
Blame_Conditions_or_Playthrough$Blame<-as.numeric(Blame_Conditions_or_Playthrough$Blame)
#Get rid of the Plauthrough_Order column
Blame_Conditions_or_Playthrough$Playthrough_Order<-NULL
#Turn format of the data to wide
Blame_Conditions_or_Playthrough <- spread(Blame_Conditions_or_Playthrough,Condition, Blame)
Blame_Conditions_or_Playthrough$ID<-NULL
friedman.test(as.matrix(Blame_Conditions_or_Playthrough))
friedmanmc(as.matrix(Blame_Conditions_or_Playthrough))
Blame_Condition_wilcox_model <- wilcox.test(Blame_Conditions_or_Playthrough$"2", Blame_Conditions_or_Playthrough$"3", paired=TRUE, correct=FALSE)
Blame_Condition_wilcox_model
rFromWilcox(Blame_Condition_wilcox_model, 32)
#Boxplots for Blame attribution depending on the condition
Blame_Conditions <- data[,c("Blame","Condition")]
#Change Blame attribution from char to numeric
Blame_Conditions$Blame[Blame_Conditions$Blame == "neutral"] <- 0
Blame_Conditions$Blame[Blame_Conditions$Blame == "Self"] <- 1
Blame_Conditions$Blame[Blame_Conditions$Blame == "System"] <- -1
Blame_Conditions$Blame<-as.numeric(Blame_Conditions$Blame)
BC1<-Blame_Conditions[,c(1,2)]%>%filter( Condition=="1")
BC1$Condition<-NULL
BC2<-Blame_Conditions[,c(1,2)]%>%filter( Condition=="2")
BC2$Condition<-NULL
BC3<-Blame_Conditions[,c(1,2)]%>%filter( Condition=="3")
BC3$Condition<-NULL
BC4<-Blame_Conditions[,c(1,2)]%>%filter( Condition=="4")
BC4$Condition<-NULL
BC1_var <- c(BC1$Blame)
BC2_var <- c(BC2$Blame)
BC3_var <- c(BC3$Blame)
BC4_var <- c(BC4$Blame)
Blame_Conditions_long <- data.frame(coding_var= rep(c("1 - Control", "2 - Sham", "3 - AS", "4 - AF"), each = 16), score = c(BC1_var, BC2_var, BC3_var, BC4_var))
Blame_Conditions <- ggplot(Blame_Conditions_long, aes(coding_var, score), inherit.aes = FALSE)
Blame_Conditions + geom_boxplot() + geom_jitter(width = 0.3, height = 0.05) + labs(x = "", y = "Blame scores")
|
39aa0550211d0195e561c3e9a97de29e95acb03c | 9d6098e1d565e569c77ac61806a6a7386ebe84bc | /Individual_assignment_movie_sentiment_cloud.R | b7df14b0c82bcfb1c6cef2d2c5ba026efe51cf55 | [] | no_license | yolonda520/Movies_analysis_R | 33a8ea841e4ba34115dcd381efdcabdaf33bfdcd | fe6d7dafe102d825ed0929e4885ac4042fcda511 | refs/heads/master | 2022-04-24T20:36:04.213919 | 2020-04-28T02:00:31 | 2020-04-28T02:00:31 | 259,476,175 | 0 | 0 | null | 2020-04-27T23:46:47 | 2020-04-27T22:59:37 | R | UTF-8 | R | false | false | 2,110 | r | Individual_assignment_movie_sentiment_cloud.R | library(magrittr)
library(rvest)
library(dplyr)
library(tidyverse)
library(tidytext)
library(stringr)
library(ggplot2)
library(reshape2)
library(wordcloud)
library(tidyverse)
library(tidyr)
library(wordcloud)
Joker <- xml2::read_html("https://www.imdb.com/title/tt7286456/reviews?ref_=tt_urv")
Joker_review <- Joker %>%
html_nodes('.text') %>%
html_text()
#View(Joker_review)
The_Dark_Knight <-xml2::read_html("https://www.imdb.com/title/tt0468569/reviews?ref_=tt_urv")
The_Dark_Knight_review <- The_Dark_Knight %>%
html_nodes('.text') %>%
html_text()
#View(The_Dark_Knight_review)
df_Joker <- data_frame(id=1:25, text=Joker_review)
#View(df_Joker)
df_The_Dark_Knight <- data_frame(id=1:25, text= The_Dark_Knight_review)
#View(df_The_Dark_Knight)
cust_stop <- data_frame(
word=c("movie","film","movies"),
lexicon=rep("custom",each=3)
)
afinn <- get_sentiments("afinn")
nrc <- get_sentiments("nrc")
bing <- get_sentiments("bing")
# Joker_review_frequencies <- df_Joker %>%
# unnest_tokens(word, text)%>%
# anti_join(stop_words) %>%
# anti_join(cust_stop) %>%
# inner_join(get_sentiments("nrc")) %>% #pizza flavor word cloud
# count(word, sentiment, sort=TRUE) %>%
# acast(word ~sentiment, value.var="n", fill=0) %>%
# comparison.cloud(colors = c("black", "red","pink","yellow","orange","grey","blue","green"),
# max.words=100,
# scale = c(0.8,0.8),
# fixed.asp=TRUE, #True将长宽比例固定
# title.size=1
# )
The_Dark_Knight_review_frequencies <- df_The_Dark_Knight %>%
unnest_tokens(word, text)%>%
anti_join(stop_words) %>%
anti_join(cust_stop)%>%
inner_join(get_sentiments("nrc")) %>% #pizza flavor word cloud
count(word, sentiment, sort=TRUE) %>%
acast(word ~sentiment, value.var="n", fill=0) %>%
comparison.cloud(colors = c("black", "red","pink","yellow","orange","grey","blue","green"),
max.words=100,
scale = c(0.8,0.8),
fixed.asp=TRUE, #True将长宽比例固定
title.size=1
)
|
a4cd70e19460d6a61d9792cccebece4131a165f5 | f991c879a03a6e5a81141ae59a4fca3a708a55b9 | /R/05_projections_functions.R | 226f90717d98f82b6ff7311c5df9affcc0ecba2d | [
"MIT"
] | permissive | SC-COSMO/sccosmomcma | d1469077b9182d803320ca7ca89b4851a5e81a0f | 0e051ae94a2ff0641a9fee09d00097205ea19748 | refs/heads/main | 2023-04-14T03:12:50.174840 | 2021-10-11T22:52:25 | 2021-10-11T22:52:25 | 371,769,010 | 3 | 0 | null | null | null | null | UTF-8 | R | false | false | 81,784 | r | 05_projections_functions.R | #' Generates interventions input list for performing projections
#' with the SC-COSMO model
#'
#' \code{get_projection_scenarios} generates interventions input
#' list for performing projections with the SC-COSMO model
#' for selected state.
#' @param n_t Simulation total time.
#' @param v_soc_dist_factor Vector with social distancing multipliers for the
#' different mobility segments calibrated to.
#' @param v_mean_soc_dist_factor Vector with mean social distancing multipliers
#' for different mobility segments calibrated to.
#' @param v_n_date0_NPI Vector with the time steps (0 = \code{date_init}) at
#' which effect of NPI changed in the calibration period.
#' @param date_proj0 the time step (0 = \code{date_init}) where projection starts
#' (calibration period is over).
#' @return
#' A list of named (scenarios) of intervention lists formatted for
#' input into the SC-COSMO model.
#' @export
get_projection_scenarios <- function(n_t ,
v_soc_dist_factor,
v_mean_soc_dist_factor,
v_n_date0_NPI,
date_proj0) {
v_ind_red_factor <- 1 - v_mean_soc_dist_factor
l_projection_scenarios <- list()
# No Intervention ---------------------------------------------------------
# ## Begin from calibration start
# i1 <- make_intervention(intervention_type = "StatusQuo",
# time_start = 0,
# time_stop = v_n_date0_NPI[1] + n_lag_inf)
#
# i2 <- make_intervention(intervention_type = "SocialDistancing",
# time_start = v_n_date0_NPI[1] + n_lag_inf,
# time_stop = v_n_date0_NPI[2] + n_lag_inf,
# intervention_factor = 1,
# intervention_change_rate = 0.5,
# resume_school = FALSE)
#
# i3 <- make_intervention(intervention_type = "SocialDistancing",
# time_start = v_n_date0_NPI[2] + n_lag_inf,
# time_stop = v_n_date0_NPI[3] + n_lag_inf,
# intervention_factor = 1,
# intervention_change_rate = 0.5,
# resume_school = FALSE)
#
# i4 <- make_intervention(intervention_type = "SocialDistancing",
# time_start = v_n_date0_NPI[3] + n_lag_inf,
# time_stop = v_n_date0_NPI[4] + n_lag_inf,
# intervention_factor = 1,
# intervention_change_rate = 0.5,
# resume_school = FALSE)
#
# i5 <- make_intervention(intervention_type = "SocialDistancing",
# time_start = v_n_date0_NPI[4] + n_lag_inf,
# time_stop = v_n_date0_NPI[5] + n_lag_inf,
# intervention_factor = 1,
# intervention_change_rate = 0.5,
# resume_school = FALSE)
#
# i6 <- make_intervention(intervention_type = "SocialDistancing",
# time_start = v_n_date0_NPI[5] + n_lag_inf,
# time_stop = date_proj0 + n_lag_inf,
# intervention_factor = 1,
# intervention_change_rate = 0.5,
# resume_school = FALSE)
#
# ## Start projection
# i7 <- make_intervention(intervention_type = "SocialDistancing",
# time_start = date_proj0 + n_lag_inf,
# time_stop = n_t + 1,
# intervention_factor = 1,
# intervention_change_rate = 0.5,
# resume_school = FALSE)
#
# l_interventions <- add_intervention(interventions = NULL, intervention = i1)
# l_interventions <- add_intervention(interventions = l_interventions, intervention = i2)
# l_interventions <- add_intervention(interventions = l_interventions, intervention = i3)
# l_interventions <- add_intervention(interventions = l_interventions, intervention = i4)
# l_interventions <- add_intervention(interventions = l_interventions, intervention = i5)
# l_interventions <- add_intervention(interventions = l_interventions, intervention = i6)
# l_interventions <- add_intervention(interventions = l_interventions, intervention = i7)
#
# name_int <- "No NPIs implemented"
#
# l_projection_scenarios[[name_int]] <- l_interventions
#
# Base Case ---------------------------------------------------------------
## Begin from calibration start
i1 <- make_intervention(intervention_type = "StatusQuo",
time_start = 0,
time_stop = v_n_date0_NPI[1] + n_lag_inf)
i2 <- make_intervention(intervention_type = "SocialDistancing",
time_start = v_n_date0_NPI[1] + n_lag_inf, # date_sd0
time_stop = v_n_date0_NPI[2] + n_lag_inf,
intervention_factor = v_soc_dist_factor[1],
intervention_change_rate = 0.5,
resume_school = FALSE)
i3 <- make_intervention(intervention_type = "SocialDistancing",
time_start = v_n_date0_NPI[2] + n_lag_inf,
time_stop = v_n_date0_NPI[3] + n_lag_inf,
intervention_factor = v_soc_dist_factor[2],
intervention_change_rate = 0.5,
resume_school = FALSE)
i4 <- make_intervention(intervention_type = "SocialDistancing",
time_start = v_n_date0_NPI[3] + n_lag_inf,
time_stop = v_n_date0_NPI[4] + n_lag_inf,
intervention_factor = v_soc_dist_factor[3],
intervention_change_rate = 0.5,
resume_school = FALSE)
i5 <- make_intervention(intervention_type = "SocialDistancing",
time_start = v_n_date0_NPI[4] + n_lag_inf,
time_stop = v_n_date0_NPI[5] + n_lag_inf,
intervention_factor = v_soc_dist_factor[4],
intervention_change_rate = 0.5,
resume_school = FALSE)
i6 <- make_intervention(intervention_type = "SocialDistancing",
time_start = v_n_date0_NPI[5] + n_lag_inf,
time_stop = date_proj0 + n_lag_inf,
intervention_factor = v_soc_dist_factor[5],
intervention_change_rate = 0.5,
resume_school = FALSE)
## Start projection
i7 <- make_intervention(intervention_type = "SocialDistancing",
time_start = date_proj0 + n_lag_inf,
time_stop = n_t + 1,
intervention_factor = v_soc_dist_factor[5],
intervention_change_rate = 0.5,
resume_school = FALSE)
l_interventions <- add_intervention(interventions = NULL, intervention = i1)
l_interventions <- add_intervention(interventions = l_interventions, intervention = i2)
l_interventions <- add_intervention(interventions = l_interventions, intervention = i3)
l_interventions <- add_intervention(interventions = l_interventions, intervention = i4)
l_interventions <- add_intervention(interventions = l_interventions, intervention = i5)
l_interventions <- add_intervention(interventions = l_interventions, intervention = i6)
l_interventions <- add_intervention(interventions = l_interventions, intervention = i7)
name_int <- "Social distancing: status quo; Schooling: not in-person; Holiday bump: no"
l_projection_scenarios[[name_int]] <- l_interventions
# Interventions -----------------------------------------------------------
# Create vector with effective contact rates
v_ind_red_factor_proj <- c(BaseCase = as.numeric(v_soc_dist_factor[5]), # continue with estimated SD
IncreaseSD = as.numeric(min(v_soc_dist_factor)) # increase SD by applying the min SD observed
)
school_factor <- as.numeric(v_soc_dist_factor[5])
## Scenario: STATUS QUO --------------------------------------------------
for(int_factor in v_ind_red_factor_proj){ # int_factor = v_soc_dist_factor[5]
name_int_red_factor <- names(v_ind_red_factor_proj)[which(v_ind_red_factor_proj == int_factor)]
for(resume_school in c(T,F)){ # resume_school = T
if(resume_school == F & name_int_red_factor == "BaseCase"){
next
}
### Intervention: resume_school and/or social distancing
n_date_NPI_proj <- "2021-01-10"
n_date0_NPI_proj <- as.numeric(difftime(as.Date(n_date_NPI_proj),
n_date_ini, units = "days"))
i1 <- make_intervention(intervention_type = "StatusQuo",
time_start = 0,
time_stop = v_n_date0_NPI[1] + n_lag_inf)
i2 <- make_intervention(intervention_type = "SocialDistancing",
time_start = v_n_date0_NPI[1] + n_lag_inf, # date_sd0
time_stop = v_n_date0_NPI[2] + n_lag_inf,
intervention_factor = v_soc_dist_factor[1],
intervention_change_rate = 0.5,
resume_school = FALSE)
i3 <- make_intervention(intervention_type = "SocialDistancing",
time_start = v_n_date0_NPI[2] + n_lag_inf,
time_stop = v_n_date0_NPI[3] + n_lag_inf,
intervention_factor = v_soc_dist_factor[2],
intervention_change_rate = 0.5,
resume_school = FALSE)
i4 <- make_intervention(intervention_type = "SocialDistancing",
time_start = v_n_date0_NPI[3] + n_lag_inf,
time_stop = v_n_date0_NPI[4] + n_lag_inf,
intervention_factor = v_soc_dist_factor[3],
intervention_change_rate = 0.5,
resume_school = FALSE)
i5 <- make_intervention(intervention_type = "SocialDistancing",
time_start = v_n_date0_NPI[4] + n_lag_inf,
time_stop = v_n_date0_NPI[5] + n_lag_inf,
intervention_factor = v_soc_dist_factor[4],
intervention_change_rate = 0.5,
resume_school = FALSE)
i6 <- make_intervention(intervention_type = "SocialDistancing",
time_start = v_n_date0_NPI[5] + n_lag_inf,
time_stop = date_proj0 + n_lag_inf,
intervention_factor = v_soc_dist_factor[5],
intervention_change_rate = 0.5,
resume_school = FALSE)
## Start projection
# Continue with social distancing until 2021-01-11
i7 <- make_intervention(intervention_type = "SocialDistancing",
time_start = date_proj0 + n_lag_inf,
time_stop = n_date0_NPI_proj + n_lag_inf,
intervention_factor = v_soc_dist_factor[5],
intervention_change_rate = 0.5,
resume_school = FALSE)
if(resume_school){
# Add interventions
i8 <- make_intervention(intervention_type = "SocialDistancing",
time_start = n_date0_NPI_proj + n_lag_inf,
time_stop = n_t + 1,
intervention_factor = int_factor,
intervention_change_rate = 0.5,
resume_school = resume_school,
school_intervention_factor = school_factor)
l_interventions <- add_intervention(interventions = NULL, intervention = i1)
l_interventions <- add_intervention(interventions = l_interventions, intervention = i2)
l_interventions <- add_intervention(interventions = l_interventions, intervention = i3)
l_interventions <- add_intervention(interventions = l_interventions, intervention = i4)
l_interventions <- add_intervention(interventions = l_interventions, intervention = i5)
l_interventions <- add_intervention(interventions = l_interventions, intervention = i6)
l_interventions <- add_intervention(interventions = l_interventions, intervention = i7)
l_interventions <- add_intervention(interventions = l_interventions, intervention = i8)
if(name_int_red_factor == "IncreaseSD"){
name_int <- "Social distancing: stricter; Schooling: in-person; Holiday bump: no"
}
if(name_int_red_factor == "BaseCase"){
name_int <- "Social distancing: status quo; Schooling: in-person; Holiday bump: no"
}
l_projection_scenarios[[name_int]] <- l_interventions
}else{
# Add interventions: school and/or work at 50% and 75%
i8 <- make_intervention(intervention_type = "SocialDistancing",
time_start = n_date0_NPI_proj + n_lag_inf,
time_stop = n_t + 1,
intervention_factor = int_factor,
intervention_change_rate = 0.5,
resume_school = resume_school)
l_interventions <- add_intervention(interventions = NULL, intervention = i1)
l_interventions <- add_intervention(interventions = l_interventions, intervention = i2)
l_interventions <- add_intervention(interventions = l_interventions, intervention = i3)
l_interventions <- add_intervention(interventions = l_interventions, intervention = i4)
l_interventions <- add_intervention(interventions = l_interventions, intervention = i5)
l_interventions <- add_intervention(interventions = l_interventions, intervention = i6)
l_interventions <- add_intervention(interventions = l_interventions, intervention = i7)
l_interventions <- add_intervention(interventions = l_interventions, intervention = i8)
if(name_int_red_factor == "IncreaseSD"){
name_int <- "Social distancing: stricter; Schooling: not in-person; Holiday bump: no"
}
if(name_int_red_factor == "BaseCase"){
name_int <- "Social distancing: status quo; Schooling: not in-person; Holiday bump: no"
}
l_projection_scenarios[[name_int]] <- l_interventions
}
}
}
## Scenario: increase in contacts - HOLIDAYS ------------------------------
for(int_factor in v_ind_red_factor_proj){
name_int_red_factor <- names(v_ind_red_factor_proj)[which(v_ind_red_factor_proj == int_factor)]
for(resume_school in c(T,F)){
### Intervention: resume_school and/or social distancing
n_date_NPI_proj <- "2021-01-10"
n_date0_NPI_proj <- as.numeric(difftime(as.Date(n_date_NPI_proj),
n_date_ini, units = "days"))
### Intervention: increase effective contacts on holidays
sd_holidays <- min(max(v_soc_dist_factor[5]+0.30, 0),1)
n_date_holidays_init <- "2020-12-24"
n_date0_holidays_init <- as.numeric(difftime(as.Date(n_date_holidays_init),
n_date_ini, units = "days"))
n_date_holidays_end <- "2021-01-06"
n_date0_holidays_end <- as.numeric(difftime(as.Date(n_date_holidays_end),
n_date_ini, units = "days"))
i1 <- make_intervention(intervention_type = "StatusQuo",
time_start = 0,
time_stop = v_n_date0_NPI[1] + n_lag_inf)
i2 <- make_intervention(intervention_type = "SocialDistancing",
time_start = v_n_date0_NPI[1] + n_lag_inf, # date_sd0
time_stop = v_n_date0_NPI[2] + n_lag_inf,
intervention_factor = v_soc_dist_factor[1],
intervention_change_rate = 0.5,
resume_school = FALSE)
i3 <- make_intervention(intervention_type = "SocialDistancing",
time_start = v_n_date0_NPI[2] + n_lag_inf,
time_stop = v_n_date0_NPI[3] + n_lag_inf,
intervention_factor = v_soc_dist_factor[2],
intervention_change_rate = 0.5,
resume_school = FALSE)
i4 <- make_intervention(intervention_type = "SocialDistancing",
time_start = v_n_date0_NPI[3] + n_lag_inf,
time_stop = v_n_date0_NPI[4] + n_lag_inf,
intervention_factor = v_soc_dist_factor[3],
intervention_change_rate = 0.5,
resume_school = FALSE)
i5 <- make_intervention(intervention_type = "SocialDistancing",
time_start = v_n_date0_NPI[4] + n_lag_inf,
time_stop = v_n_date0_NPI[5] + n_lag_inf,
intervention_factor = v_soc_dist_factor[4],
intervention_change_rate = 0.5,
resume_school = FALSE)
i6 <- make_intervention(intervention_type = "SocialDistancing",
time_start = v_n_date0_NPI[5] + n_lag_inf,
time_stop = date_proj0 + n_lag_inf,
intervention_factor = v_soc_dist_factor[5],
intervention_change_rate = 0.5,
resume_school = FALSE)
## Start projection
# Continue with social distancing until 2020-12-24
i7 <- make_intervention(intervention_type = "SocialDistancing",
time_start = date_proj0 + n_lag_inf,
time_stop = n_date0_holidays_init + n_lag_inf,
intervention_factor = v_soc_dist_factor[5],
intervention_change_rate = 0.5,
resume_school = FALSE)
# Decrease SD on holidays from 2020-12-24 to 2021-01-06
i8 <- make_intervention(intervention_type = "SocialDistancing",
time_start = n_date0_holidays_init + n_lag_inf,
time_stop = n_date0_holidays_end + n_lag_inf,
intervention_factor = sd_holidays,
intervention_change_rate = 0.5,
resume_school = FALSE)
# Return to estimated SD at 12/07 from 2021-01-06 to 2021-01-10
i9 <- make_intervention(intervention_type = "SocialDistancing",
time_start = n_date0_holidays_end + n_lag_inf,
time_stop = n_date0_NPI_proj + n_lag_inf,
intervention_factor = v_soc_dist_factor[5],
intervention_change_rate = 0.5,
resume_school = FALSE)
if(resume_school){
# Add interventions: school and/or work at 50% and 75%
i10 <- make_intervention(intervention_type = "SocialDistancing",
time_start = n_date0_NPI_proj + n_lag_inf,
time_stop = n_t + 1,
intervention_factor = int_factor,
intervention_change_rate = 0.5,
resume_school = resume_school,
school_intervention_factor = school_factor)
l_interventions <- add_intervention(interventions = NULL, intervention = i1)
l_interventions <- add_intervention(interventions = l_interventions, intervention = i2)
l_interventions <- add_intervention(interventions = l_interventions, intervention = i3)
l_interventions <- add_intervention(interventions = l_interventions, intervention = i4)
l_interventions <- add_intervention(interventions = l_interventions, intervention = i5)
l_interventions <- add_intervention(interventions = l_interventions, intervention = i6)
l_interventions <- add_intervention(interventions = l_interventions, intervention = i7)
l_interventions <- add_intervention(interventions = l_interventions, intervention = i8)
l_interventions <- add_intervention(interventions = l_interventions, intervention = i9)
l_interventions <- add_intervention(interventions = l_interventions, intervention = i10)
if(name_int_red_factor == "IncreaseSD"){
name_int <- "Social distancing: stricter; Schooling: in-person; Holiday bump: yes"
}
if(name_int_red_factor == "BaseCase"){
name_int <- "Social distancing: status quo; Schooling: in-person; Holiday bump: yes"
}
l_projection_scenarios[[name_int]] <- l_interventions
}else{
# Add interventions: school and/or work at 50% and 75%
i10 <- make_intervention(intervention_type = "SocialDistancing",
time_start = n_date0_NPI_proj + n_lag_inf,
time_stop = n_t + 1,
intervention_factor = int_factor,
intervention_change_rate = 0.5,
resume_school = resume_school)
l_interventions <- add_intervention(interventions = NULL, intervention = i1)
l_interventions <- add_intervention(interventions = l_interventions, intervention = i2)
l_interventions <- add_intervention(interventions = l_interventions, intervention = i3)
l_interventions <- add_intervention(interventions = l_interventions, intervention = i4)
l_interventions <- add_intervention(interventions = l_interventions, intervention = i5)
l_interventions <- add_intervention(interventions = l_interventions, intervention = i6)
l_interventions <- add_intervention(interventions = l_interventions, intervention = i7)
l_interventions <- add_intervention(interventions = l_interventions, intervention = i8)
l_interventions <- add_intervention(interventions = l_interventions, intervention = i9)
l_interventions <- add_intervention(interventions = l_interventions, intervention = i10)
if(name_int_red_factor == "IncreaseSD"){
name_int <- "Social distancing: stricter; Schooling: not in-person; Holiday bump: yes"
}
if(name_int_red_factor == "BaseCase"){
name_int <- "Social distancing: status quo; Schooling: not in-person; Holiday bump: yes"
}
l_projection_scenarios[[name_int]] <- l_interventions
}
}
}
return(l_projection_scenarios)
}
#' Calculate epidemiological outcomes projections from SC-COSMO
#'
#' \code{project_epi_out} projects epidemiological outcomes from the
#' SC-COSMO model for selected states in Mexico.
#'
#' @param v_states_project Vector specifying the name of the states to be
#' projected.
#' @param l_params_all List of all SC-COSMO model parameters.
#' @param n_date_ini Initial calendar date of the simulation.
#' @param n_lag_inf Lag in time series of infectious individuals.
#' @return
#' A list with epidemiological outcomes.
#' @export
project_epi_out <- function(v_states_project,
l_params_all,
n_date_ini,
n_lag_inf = NULL){ # User defined
df_DXCumtot <- data.frame(county = NULL, Outcome = NULL, time = NULL, value = NULL)
df_DXInctot <- data.frame(county = NULL, Outcome = NULL, time = NULL, value = NULL)
df_DXtot <- data.frame(county = NULL, Outcome = NULL, time = NULL, value = NULL)
df_DXDcov_tot <- data.frame(county = NULL, Outcome = NULL, time = NULL, value = NULL)
df_DXDIncCov_tot <- data.frame(county = NULL, Outcome = NULL, time = NULL, value = NULL)
df_Hosp_tot <- data.frame(county = NULL, Outcome = NULL, time = NULL, value = NULL)
df_NonICU_tot <- data.frame(county = NULL, Outcome = NULL, time = NULL, value = NULL)
df_ICU_tot <- data.frame(county = NULL, Outcome = NULL, time = NULL, value = NULL)
df_InfCumtot <- data.frame(county = NULL, Outcome = NULL, time = NULL, value = NULL)
df_InfCumprop <- data.frame(county = NULL, Outcome = NULL, time = NULL, value = NULL)
df_InfInctot <- data.frame(county = NULL, Outcome = NULL, time = NULL, value = NULL)
df_Inftot <- data.frame(county = NULL, Outcome = NULL, time = NULL, value = NULL)
df_ExpInf_tot <- data.frame(county = NULL, Outcome = NULL, time = NULL, value = NULL)
df_Dcov_tot <- data.frame(county = NULL, Outcome = NULL, time = NULL, value = NULL)
df_Rec_tot <- data.frame(county = NULL, Outcome = NULL, time = NULL, value = NULL)
df_Rec_prop <- data.frame(county = NULL, Outcome = NULL, time = NULL, value = NULL)
df_Rt_tot <- data.frame(county = NULL, Outcome = NULL, time = NULL, value = NULL)
df_CDR_tot <- data.frame(county = NULL, Outcome = NULL, time = NULL, value = NULL)
df_DXCumages <- c()
df_DXIncages <- c()
df_InfCumages <- c()
# print(paste0(l_interventions[[4]]$intervention_factor,
# "; r_beta = ", round(l_params_all$r_beta, 3),
# "; r_tau = ", round(l_params_all$r_tau, 3),
# # "; r_nu_exp2_dx_lb = ", round(l_params_all$r_nu_exp2_dx_lb, 3),
# # "; r_nu_exp2_dx_ub = ", round(l_params_all$r_nu_exp2_dx_ub, 3),
# # "; r_nu_exp2_dx_rate = ", round(l_params_all$r_nu_exp2_dx_rate, 3),
# # "; n_nu_exp2_dx_mid = ", round(l_params_all$n_nu_exp2_dx_mid, 3),
# "; n_date_ini = ", n_date_ini))
#
### Run SC-COSMO model with updated calibrated parameters
l_out_cosmo <- sccosmomcma::cosmo(l_params_all = l_params_all)
### Population
df_popize <- calc_popsize_totals(l_out_cosmo)
####### Epidemiological Output ###########################################
v_dates <- n_date_ini + 0:n_t_project
v_dates0 <- 0:sum(n_t_project)
#### Cumulative infections total ####
df_infcum_ages <- calc_infcum_totals(l_out_cosmo)
if(is.null(n_lag_inf)){
df_InfCumtot <- bind_rows(df_InfCumtot,
data.frame(county = v_states_project,
Outcome = "Cumulative infections",
dates = v_dates,
dates0 = v_dates0,
time = df_infcum_ages$time,
value = df_infcum_ages[, "All"])) # (l_params_all$n_ages + 2)
df_InfCumages <- bind_rows(df_InfCumages,
data.frame(county = v_states_project,
Outcome = "Cumulative infections",
dates = v_dates,
dates0 = v_dates0,
df_infcum_ages,
check.names = FALSE))
} else {
df_InfCumtot <- bind_rows(df_InfCumtot,
data.frame(county = v_states_project,
Outcome = "Cumulative infections",
dates = v_dates,
dates0 = v_dates0,
time = df_infcum_ages$time[-c(1:n_lag_inf)],
value = df_infcum_ages[-c(1:n_lag_inf), "All"])) # (l_params_all$n_ages + 2)
df_InfCumages <- bind_rows(df_InfCumages,
data.frame(county = v_states_project,
Outcome = "Cumulative infections",
dates = v_dates,
dates0 = v_dates0,
df_infcum_ages[-c(1:n_lag_inf), ],
check.names = FALSE))
}
#### Cumulative infections proportion ####
if(is.null(n_lag_inf)){
df_InfCumprop <- bind_rows(df_InfCumprop,
data.frame(county = v_states_project,
Outcome = "Cumulative infections proportion",
dates = v_dates,
dates0 = v_dates0,
time = df_infcum_ages$time,
value = df_infcum_ages[, "All"]/df_popize[, ncol(df_popize)])) # (l_params_all$n_ages + 2)
} else {
df_InfCumprop <- bind_rows(df_InfCumprop,
data.frame(county = v_states_project,
Outcome = "Cumulative infections proportion",
dates = v_dates,
dates0 = v_dates0,
time = df_infcum_ages$time[-c(1:n_lag_inf)],
value = df_infcum_ages[-c(1:n_lag_inf), "All"]/df_popize[-c(1:n_lag_inf), ncol(df_popize)])) # (l_params_all$n_ages + 2)
}
#### Incident infections total ####
df_infinc_ages <- calc_infinc_totals(l_out_cosmo)
if(is.null(n_lag_inf)){
df_InfInctot <- bind_rows(df_InfInctot,
data.frame(county = v_states_project,
Outcome = "Incident infections",
dates = v_dates,
dates0 = v_dates0,
time = df_infinc_ages$time,
value = df_infinc_ages[, "All"])) # (l_params_all$n_ages + 2)
} else {
df_InfInctot <- bind_rows(df_InfInctot,
data.frame(county = v_states_project,
Outcome = "Incident infections",
dates = v_dates,
dates0 = v_dates0,
time = df_infinc_ages$time[-c(1:n_lag_inf)],
value = df_infinc_ages[-c(1:n_lag_inf), "All"])) # (l_params_all$n_ages + 2)
}
#### Prevalence: Total COVID Infections ####
df_inftot_ages <- calc_inf_totals(l_out_cosmo)
if(is.null(n_lag_inf)){
df_Inftot <- bind_rows(df_Inftot,
data.frame(county = v_states_project,
Outcome = "Prevalent infections",
time = df_inftot_ages$time,
dates = v_dates,
dates0 = v_dates0,
value = df_inftot_ages[, (l_params_all$n_ages + 2)]))
} else {
df_Inftot <- bind_rows(df_Inftot,
data.frame(county = v_states_project,
Outcome = "Prevalent infections",
dates = v_dates,
dates0 = v_dates0,
time = df_inftot_ages$time[-c(1:n_lag_inf)],
value = df_inftot_ages[-c(1:n_lag_inf), (l_params_all$n_ages + 2)]))
}
#### Prevalence: Total COVID Infections (Es and Is) ####
df_expinftot_ages <- calc_expinf_totals(l_out_cosmo)
if(is.null(n_lag_inf)){
df_ExpInf_tot <- bind_rows(df_ExpInf_tot,
data.frame(county = v_states_project,
Outcome = "Infections (Es and Is)",
time = df_expinftot_ages$time,
value = df_expinftot_ages[, (l_params_all$n_ages + 2)]))
} else {
df_ExpInf_tot <- bind_rows(df_ExpInf_tot,
data.frame(county = v_states_project,
Outcome = "Infections (Es and Is)",
time = df_expinftot_ages$time[-c(1:n_lag_inf)],
value = df_expinftot_ages[-c(1:n_lag_inf), (l_params_all$n_ages + 2)]))
}
#### Cumulative detected cases total ####
df_dxcum_ages <- calc_dxcum_totals(l_out_cosmo)
if(is.null(n_lag_inf)){
df_DXCumtot <- bind_rows(df_DXCumtot,
data.frame(county = v_states_project,
Outcome = "Cumulative detected cases",
dates = v_dates,
dates0 = v_dates0,
time = df_dxcum_ages$time,
value = df_dxcum_ages[, (l_params_all$n_ages + 2)]))
df_DXCumages <- bind_rows(df_DXCumages,
data.frame(county = v_states_project,
Outcome = "Cumulative detected cases",
dates = v_dates,
dates0 = v_dates0,
df_dxcum_ages,
check.names = FALSE))
} else {
df_DXCumtot <- bind_rows(df_DXCumtot,
data.frame(county = v_states_project,
Outcome = "Cumulative detected cases",
dates = v_dates,
dates0 = v_dates0,
time = df_dxcum_ages$time[-c(1:n_lag_inf)],
value = df_dxcum_ages[-c(1:n_lag_inf), (l_params_all$n_ages + 2)]))
df_DXCumages <- bind_rows(df_DXCumages,
data.frame(county = v_states_project,
Outcome = "Cumulative detected cases",
dates = v_dates,
dates0 = v_dates0,
df_dxcum_ages[-c(1:n_lag_inf), ],
check.names = FALSE))
}
#### Incident detected total cases ####
df_dxinc_ages <- calc_dxinc_totals(l_out_cosmo)
if(is.null(n_lag_inf)){
df_DXInctot <- bind_rows(df_DXInctot,
data.frame(county = v_states_project,
Outcome = "Incident detected cases",
dates = v_dates,
dates0 = v_dates0,
time = df_dxinc_ages$time,
value = df_dxinc_ages[, (l_params_all$n_ages + 2)]))
df_DXIncages <- bind_rows(df_DXIncages,
data.frame(county = v_states_project,
Outcome = "Incident detected cases",
dates = v_dates,
dates0 = v_dates0,
df_dxinc_ages,
check.names = FALSE))
} else {
df_DXInctot <- bind_rows(df_DXInctot,
data.frame(county = v_states_project,
Outcome = "Incident detected cases",
dates = v_dates,
dates0 = v_dates0,
time = df_dxinc_ages$time[-c(1:n_lag_inf)],
value = df_dxinc_ages[-c(1:n_lag_inf), (l_params_all$n_ages + 2)]))
df_DXIncages <- bind_rows(df_DXIncages,
data.frame(county = v_states_project,
Outcome = "Incident detected cases",
dates = v_dates,
dates0 = v_dates0,
df_dxinc_ages[-c(1:n_lag_inf), ],
check.names = FALSE))
}
#### Prevalent detected cases total ####
df_dx_ages <- calc_dx_totals(l_out_cosmo)
if(is.null(n_lag_inf)){
df_DXtot <- bind_rows(df_DXtot,
data.frame(county = v_states_project,
Outcome = "Detected cases",
dates = v_dates,
dates0 = v_dates0,
time = df_dx_ages$time,
value = df_dx_ages[, (l_params_all$n_ages + 2)]))
} else {
df_DXtot <- bind_rows(df_DXtot,
data.frame(county = v_states_project,
Outcome = "Detected cases",
dates = v_dates,
dates0 = v_dates0,
time = df_dx_ages$time[-c(1:n_lag_inf)],
value = df_dx_ages[-c(1:n_lag_inf), (l_params_all$n_ages + 2)]))
}
#### Prevalence Recovered total ####
df_Rec_ages <- calc_rec_totals(l_out_cosmo)
if(is.null(n_lag_inf)){
df_Rec_tot <- bind_rows(df_Rec_tot,
data.frame(county = v_states_project,
Outcome = "Recovered prevalence",
dates = v_dates,
dates0 = v_dates0,
time = df_Rec_ages$time,
value = df_Rec_ages[, (l_params_all$n_ages + 2)]))
} else {
df_Rec_tot <- bind_rows(df_Rec_tot,
data.frame(county = v_states_project,
Outcome = "Recovered prevalence",
dates = v_dates,
dates0 = v_dates0,
time = df_Rec_ages$time[-c(1:n_lag_inf)],
value = df_Rec_ages[-c(1:n_lag_inf), (l_params_all$n_ages + 2)]))
}
#### Prevalence Recovered proportion ####
if(is.null(n_lag_inf)){
df_Rec_prop <- bind_rows(df_Rec_prop,
data.frame(county = v_states_project,
Outcome = "Recovered prevalence proportion",
dates = v_dates,
dates0 = v_dates0,
time = df_Rec_ages$time,
value = df_Rec_ages[, (l_params_all$n_ages + 2)]/df_popize[, ncol(df_popize)]))
} else {
df_Rec_prop <- bind_rows(df_Rec_prop,
data.frame(county = v_states_project,
Outcome = "Recovered prevalence proportion",
dates = v_dates,
dates0 = v_dates0,
time = df_Rec_ages$time[-c(1:n_lag_inf)],
value = df_Rec_ages[-c(1:n_lag_inf), (l_params_all$n_ages + 2)]/df_popize[-c(1:n_lag_inf), ncol(df_popize)]))
}
#### Case Detection Ratio ####
df_CDRdenom <- df_ExpInf_tot %>%
rename(denom = value)
df_CDR_tot <- merge((df_DXtot %>% select(-Outcome)), (df_CDRdenom %>% select(-Outcome))) %>%
mutate(value_orig = value) %>%
mutate(value = value_orig/denom) %>%
mutate(Outcome = "CDR proportion") %>%
select(-value_orig, -denom)
df_CDR_tot <- df_CDR_tot[, c("county", "Outcome", "dates", "dates0", "time", "value")]
#relocate(county, Outcome, dates, dates0, time, value)
#### Incident COVID19 deaths infections ####
df_DXDIncCov_ages <- calc_incdeathsdx_totals(l_out_cosmo)
if(is.null(n_lag_inf)){
df_DXDIncCov_tot <- bind_rows(df_DXDIncCov_tot,
data.frame(county = v_states_project,
Outcome = "Incident COVID19 deaths infections",
time = df_DXDIncCov_ages$time,
dates = v_dates,
dates0 = v_dates0,
value = df_DXDIncCov_ages[, (l_params_all$n_ages + 2)]))
} else {
df_DXDIncCov_tot <- bind_rows(df_DXDIncCov_tot,
data.frame(county = v_states_project,
Outcome = "Incident COVID19 deaths infections",
dates = v_dates,
dates0 = v_dates0,
time = df_DXDIncCov_ages$time[-c(1:n_lag_inf)],
value = df_DXDIncCov_ages[-c(1:n_lag_inf), (l_params_all$n_ages + 2)]))
}
### Apply delay in deaths
df_DXDIncCov_tot <- df_DXDIncCov_tot %>%
mutate(dates = dates + n_death_delay) %>%
filter(dates <= n_date_end_project & dates >= l_dates_targets$deaths[1]) %>%
complete(dates = seq.Date(from = as.Date(l_dates_targets$deaths[1]),
to = as.Date(n_date_end_project),
by = "day"),
fill = list(county = v_states_project,
Outcome = "Incident COVID19 deaths infections",
value = df_DXDIncCov_tot$value[1]
)) %>%
mutate(dates0 = as.numeric(dates - dates[1]),
time = n_lag_inf:(as.Date(n_date_end_project) -
as.Date(l_dates_targets$deaths[1]) + n_lag_inf))
#### Cumulative COVID Deaths ####
df_DCov_ages <- calc_deaths_totals(l_out_cosmo)
if(is.null(n_lag_inf)){
df_Dcov_tot <- bind_rows(df_Dcov_tot,
data.frame(county = v_states_project,
Outcome = "COVID deaths",
time = df_DCov_ages$time,
dates = v_dates,
dates0 = v_dates0,
value = df_DCov_ages[, (l_params_all$n_ages + 2)]))
} else {
df_Dcov_tot <- bind_rows(df_Dcov_tot,
data.frame(county = v_states_project,
Outcome = "COVID deaths",
dates = v_dates,
dates0 = v_dates0,
time = df_DCov_ages$time[-c(1:n_lag_inf)],
value = df_DCov_ages[-c(1:n_lag_inf), (l_params_all$n_ages + 2)]))
}
### Apply delay in deaths
df_Dcov_tot <- df_Dcov_tot %>%
mutate(dates = dates + n_death_delay) %>%
filter(dates <= n_date_end_project & dates >= l_dates_targets$deaths[1]) %>%
complete(dates = seq.Date(from = as.Date(l_dates_targets$deaths[1]),
to = as.Date(n_date_end_project),
by = "day"),
fill = list(county = v_states_project,
Outcome = "COVID deaths",
value = df_Dcov_tot$value[1]
)) %>%
mutate(dates0 = as.numeric(dates - dates[1]),
time = n_lag_inf:(as.Date(n_date_end_project) -
as.Date(l_dates_targets$deaths[1]) + n_lag_inf))
###################### Effective Reproduction Number Rt ######################
rt_start <- 1
system.time(df_Rt_raw <- calc_reproduction_number_wt(l_out_cosmo,
v_time = rt_start:(l_params_all$n_t),
nsim_chosen = 100))
if(is.null(n_lag_inf)){
df_Rt_tot <- bind_rows(df_Rt_tot,
data.frame(county = v_states_project,
Outcome = "R effective",
dates = v_dates[-c(1:(rt_start + 1))],
dates0 = v_dates0[-c(1:(rt_start + 1))],
time = df_Rt_raw$time,
value = df_Rt_raw$Rt))
} else {
df_Rt_tot <- bind_rows(df_Rt_tot,
data.frame(county = v_states_project,
Outcome = "R effective",
dates = v_dates[-c(1:(rt_start + 1))],
dates0 = v_dates0[-c(1:(rt_start + 1))],
time = df_Rt_raw$time[-c(1:(n_lag_inf))],
value = df_Rt_raw$Rt[-c(1:(n_lag_inf))]))
}
# print(paste0("Rt calculated in ",
# round(Rt_time[3]/60, 2), " minutes"))
############################# Hospitalizations ##############################
#### PREPPING HOSPITAL CALCS
l_hosp <- prep_dx_hospitalizations(l_out_cosmo, use_prevalence = FALSE)
#### All Hospitalization Prevalence ####
df_Hosp_ages <- calc_dx_hosp(l_hosp, l_out_cosmo)
if(is.null(n_lag_inf)){
df_Hosp_tot <- bind_rows(df_Hosp_tot,
data.frame(county = v_states_project,
Outcome = "Total hospitalizations",
dates = v_dates,
dates0 = v_dates0,
time = df_Hosp_ages$time,
value = df_Hosp_ages[, (l_params_all$n_ages + 2)]))
} else {
df_Hosp_tot <- bind_rows(df_Hosp_tot,
data.frame(county = v_states_project,
Outcome = "Total hospitalizations",
dates = v_dates,
dates0 = v_dates0,
time = df_Hosp_ages$time[-c(1:n_lag_inf)],
value = df_Hosp_ages[-c(1:n_lag_inf), (l_params_all$n_ages + 2)]))
}
#### Hospitalizations without ventilator ####
df_NonICU_ages <- calc_dx_hosp_nonicu(l_hosp, l_out_cosmo)
if(is.null(n_lag_inf)){
df_NonICU_tot <- bind_rows(df_NonICU_tot,
data.frame(county = v_states_project,
Outcome = "Hospitalizations without ventilator",
dates = v_dates,
dates0 = v_dates0,
time = df_NonICU_ages$time,
value = df_NonICU_ages[, (l_params_all$n_ages + 2)]))
} else {
df_NonICU_tot <- bind_rows(df_NonICU_tot,
data.frame(county = v_states_project,
Outcome = "Hospitalizations without ventilator",
dates = v_dates,
dates0 = v_dates0,
time = df_NonICU_ages$time[-c(1:n_lag_inf)],
value = df_NonICU_ages[-c(1:n_lag_inf), (l_params_all$n_ages + 2)]))
}
#### ICU Prevalence ####
df_ICU_ages <- calc_dx_hosp_icu(l_hosp, l_out_cosmo)
if(is.null(n_lag_inf)){
df_ICU_tot <- bind_rows(df_ICU_tot,
data.frame(county = v_states_project,
Outcome = "Hospitalizations with ventilator",
dates = v_dates,
dates0 = v_dates0,
time = df_ICU_ages$time,
value = df_ICU_ages[, (l_params_all$n_ages + 2)]))
} else {
df_ICU_tot <- bind_rows(df_ICU_tot,
data.frame(county = v_states_project,
Outcome = "Hospitalizations with ventilator",
dates = v_dates,
dates0 = v_dates0,
time = df_ICU_ages$time[-c(1:n_lag_inf)],
value = df_ICU_ages[-c(1:n_lag_inf), (l_params_all$n_ages + 2)]))
}
#### DXCOVID19 deaths ####
df_DXDCov_ages <- calc_deathsdx_totals(l_out_cosmo)
if(is.null(n_lag_inf)){
df_DXDcov_tot <- bind_rows(df_DXDcov_tot,
data.frame(county = v_states_project,
Outcome = "Detected COVID deaths",
dates = v_dates,
dates0 = v_dates0,
time = df_DXDCov_ages$time,
value = df_DXDCov_ages[, (l_params_all$n_ages + 2)]))
} else {
df_DXDcov_tot <- bind_rows(df_DXDcov_tot,
data.frame(county = v_states_project,
Outcome = "Detected COVID deaths",
dates = v_dates,
dates0 = v_dates0,
time = df_DXDCov_ages$time[-c(1:n_lag_inf)],
value = df_DXDCov_ages[-c(1:n_lag_inf), (l_params_all$n_ages + 2)]))
}
### Apply delay in deaths
df_DXDcov_tot <- df_DXDcov_tot %>%
mutate(dates = dates + n_death_delay) %>%
filter(dates <= n_date_end_project & dates >= l_dates_targets$deaths[1]) %>%
complete(dates = seq.Date(from = as.Date(l_dates_targets$deaths[1]),
to = as.Date(n_date_end_project),
by = "day"),
fill = list(county = v_states_project,
Outcome = "Detected COVID deaths",
value = df_DXDcov_tot$value[1]
)) %>%
mutate(dates0 = as.numeric(dates - dates[1]),
time = n_lag_inf:(as.Date(n_date_end_project) -
as.Date(l_dates_targets$deaths[1]) + n_lag_inf))
### Combine outputs and generate dates for each county
df_out_mex_tot <- bind_rows(df_InfCumtot,
df_InfCumprop,
df_InfInctot,
df_Inftot,
df_Dcov_tot,
df_DXCumtot,
df_DXInctot,
df_DXtot,
df_Rec_tot,
df_Rec_prop,
df_Rt_tot,
df_Hosp_tot,
df_NonICU_tot,
df_ICU_tot,
df_DXDcov_tot,
df_CDR_tot,
df_DXDIncCov_tot)
df_out_mex_ages <- bind_rows(df_InfCumages,
df_DXCumages,
df_DXIncages)
### Return data.frame
return(list(Total = df_out_mex_tot,
Ages = df_out_mex_ages))
}
acomb <- function(...) abind::abind(..., along=3)
#' Probabilistic projections of interventions
#'
#' \code{project_interventions_probabilistic} produces probabilistic projections
#' of interventions
#'
#' @param m_calib_post Matrix with calibrated parameters from posterior
#' distribution.
#' @param n_date_ini Initial date of calibration.
#' @param v_n_date0_NPI Vector with the time steps (\code{0 = date_init}) at
#' which effect of NPI changed in the calibration period.
#' @param n_t_calib Number of calibration days.
#' @param n_t_project Number of projection days.
#' @param n_lag_inf Lag in time series of infectious individuals.
#' @return
#' A list with probabilistic projections of interventions.
#' @export
project_interventions_probabilistic <- function(m_calib_post,
n_date_ini,
v_n_date0_NPI,
n_t_calib,
n_t_project,
n_lag_inf){
if(is.null(dim(m_calib_post))) { # If vector, change to matrix
m_calib_post <- t(m_calib_post)
}
### Number of posterior samples
n_samp <- nrow(m_calib_post)
#### Compute model-predicted outputs for al interventions for each sample of posterior distribution ####
v_mean_soc_dist_factor <- colMeans(m_calib_post[, c("r_soc_dist_factor",
"r_soc_dist_factor_2",
"r_soc_dist_factor_3",
"r_soc_dist_factor_4",
"r_soc_dist_factor_5"), drop = FALSE])
### Get OS
os <- get_os()
print(paste0("Parallelized projections on ", os))
### Get cores
no_cores <- 50
### Evaluate model at each posterior sample and store resultsl
if(os == "macosx" | os == "linux"){
cl <- makeForkCluster(no_cores)
registerDoParallel(cl)
time_foreach <- system.time(
df_out_projection_post_all <- foreach(i = 1:n_samp, .combine = c) %dopar% { # i = 1
# ### Progress bar
# if(!exists("pb")) pb <- tcltk::tkProgressBar(title = "Parallel task for Target coverage",
# min = 1, max = n_samp)
# info <- sprintf("%s%% done", round(i/n_samp*100))
# tcltk::setTkProgressBar(pb, i, label = sprintf("Progress of simulations (%s)", info))
# write_log_file(msg=paste(Sys.time(),": INITIATING iteration:",i,"\n"),log_flag=GLOBAL_LOGGING_ENABLED)
### Call projection scenarios
l_interventions_scenarios <- get_projection_scenarios(n_t = n_t_project + n_lag_inf,
v_soc_dist_factor = m_calib_post[i,
c("r_soc_dist_factor",
"r_soc_dist_factor_2",
"r_soc_dist_factor_3",
"r_soc_dist_factor_4",
"r_soc_dist_factor_5")],
v_mean_soc_dist_factor = v_mean_soc_dist_factor,
v_n_date0_NPI = v_n_date0_NPI,
date_proj0 = n_t_calib + n_lag_inf)
df_out_mex_total <- c()
df_out_mex_ages <- c()
### Iterate over projection scenarios
for(scenario_name in names(l_interventions_scenarios)) { # scenario_name <- names(l_interventions_scenarios)[3]
print(paste0("Running scenario ", scenario_name))
l_interventions <- l_interventions_scenarios[[scenario_name]]
### Initialize parameters
l_params_init <- sccosmomcma::load_params_init(n_t = n_t_project + n_lag_inf, # Number of days
ctry = "Mexico",
ste = v_states_project,
cty = v_states_project,
r_beta = m_calib_post[i,"r_beta"],
l_nu_exp2_dx = add_period(l_period_def = NULL,
l_period_add = make_period(
functional_form = "general logit",
time_start = 0,
time_stop = n_t_project + n_lag_inf,
val_start = as.numeric(m_calib_post[i,"r_nu_exp2_dx_lb"]),
val_end = as.numeric(m_calib_post[i,"r_nu_exp2_dx_ub"]),
v_logit_change_rate = as.numeric(m_calib_post[i,"r_nu_exp2_dx_rate"]),
v_logit_change_mid = as.numeric(m_calib_post[i,"n_nu_exp2_dx_mid"]))),
l_nu_inf2_dx = add_period(l_period_def = NULL,
l_period_add = make_period(
functional_form = "general logit",
time_start = 0,
time_stop = n_t_project + n_lag_inf,
val_start = as.numeric(m_calib_post[i,"r_nu_exp2_dx_lb"]),
val_end = as.numeric(m_calib_post[i,"r_nu_exp2_dx_ub"]),
v_logit_change_rate = as.numeric(m_calib_post[i,"r_nu_exp2_dx_rate"]),
v_logit_change_mid = as.numeric(m_calib_post[i,"n_nu_exp2_dx_mid"]))),
v_inf_init_ages = v_inf_init_ages,
l_contact_info = l_contact_matrices,
l_interventions = l_interventions,
n_hhsize = n_hhsize,
r_tau = m_calib_post[i,"r_tau"],
r_omega = 0, #1/200
l_cfr = get_non_const_multiage_list(v_time_stop = 1:(n_t_project+n_lag_inf), m_ageval = m_cfr_proj),
m_r_exit_tot = v_hosp_map["m_r_exit_tot"],
m_r_exit_icu = v_hosp_map["m_r_exit_icu"],
m_r_exit_nonicu = v_hosp_map["m_r_exit_nonicu"],
m_sigma_tot = v_hosp_map["m_sigma_tot"],
m_sigma_nonicu = v_hosp_map["m_sigma_nonicu"],
m_sigma_icu = v_hosp_map["m_sigma_icu"]
)
## Load all parameter values
l_params_all <- sccosmomcma::load_all_params(l_params_init = l_params_init)
df_out_scenario <- project_epi_out(v_states_project = v_states_project,
l_params_all = l_params_all,
n_date_ini = n_date_ini,
n_lag_inf = n_lag_inf)
### Store interventions for total population and by age groups
df_out_scenario_tot <- df_out_scenario$Total
df_out_scenario_ages <- df_out_scenario$Ages
### Add intervention names
df_out_scenario_tot$Intervention <- scenario_name
df_out_scenario_ages$Intervention <- scenario_name
### Add intervention and base-case type
df_out_scenario_tot$intervention_type = ""
df_out_scenario_tot$BaseCase_type = ""
if(scenario_name == "No NPIs implemented"){
df_out_scenario_tot$intervention_type <- "NoNPI"
df_out_scenario_tot$BaseCase_type <- NA
}else if(scenario_name == "Social distancing: status quo; Schooling: not in-person; Holiday bump: no"){
df_out_scenario_tot$intervention_type <- "BaseCase"
df_out_scenario_tot$BaseCase_type <- "StatusQuo"
}else if(scenario_name == "Social distancing: status quo; Schooling: not in-person; Holiday bump: yes"){
df_out_scenario_tot$intervention_type <- "BaseCase"
df_out_scenario_tot$BaseCase_type <- "Holidays"
}else if(scenario_name == "Social distancing: status quo; Schooling: in-person; Holiday bump: no"){
df_out_scenario_tot$intervention_type <- "SchoolSD"
df_out_scenario_tot$BaseCase_type <- "StatusQuo"
}else if(scenario_name == "Social distancing: status quo; Schooling: in-person; Holiday bump: yes"){
df_out_scenario_tot$intervention_type <- "SchoolSD"
df_out_scenario_tot$BaseCase_type <- "Holidays"
}else if(scenario_name == "Social distancing: stricter; Schooling: not in-person; Holiday bump: no"){
df_out_scenario_tot$intervention_type <- "IncreaseSD"
df_out_scenario_tot$BaseCase_type <- "StatusQuo"
}else if(scenario_name == "Social distancing: stricter; Schooling: not in-person; Holiday bump: yes"){
df_out_scenario_tot$intervention_type <- "IncreaseSD"
df_out_scenario_tot$BaseCase_type <- "Holidays"
}else if(scenario_name == "Social distancing: stricter; Schooling: in-person; Holiday bump: no"){
df_out_scenario_tot$intervention_type <- "IncreaseSDSchoolSD"
df_out_scenario_tot$BaseCase_type <- "StatusQuo"
}else if(scenario_name == "Social distancing: stricter; Schooling: in-person; Holiday bump: yes"){
df_out_scenario_tot$intervention_type <- "IncreaseSDSchoolSD"
df_out_scenario_tot$BaseCase_type <- "Holidays"
}
# Combine scenarios
df_out_scenario_tot <- df_out_scenario_tot %>%
mutate(type = "Model-predicted",
simulation = i)
df_out_scenario_ages <- df_out_scenario_ages %>%
mutate(type = "Model-predicted",
simulation = i)
df_out_mex_total <- bind_rows(df_out_mex_total,
df_out_scenario_tot)
# df_out_mex_ages <- bind_rows(df_out_mex_ages,
# df_out_scenario_ages)
}
# Return data.frame
df_out_mex_total
}
)
}else if(os == "windows"){
cl <- makeCluster(no_cores) # initialize cluster object
registerDoParallel(cl)
opts <- list(attachExportEnv = TRUE)
time_foreach <- system.time(
df_out_projection_post_all <- foreach(i = 1:n_samp, .combine = rbind, .export = ls(globalenv()), # i = 1
.packages=c("sccosmomcma",
"tidyverse",
"dplyr",
"lubridate",
"dampack",
"epitools"),
.options.snow = opts) %dopar% { # i = 1
# ### Progress bar
# if(!exists("pb")) pb <- tcltk::tkProgressBar(title = "Parallel task for Target coverage",
# min = 1, max = n_samp)
# info <- sprintf("%s%% done", round(i/n_samp*100))
# tcltk::setTkProgressBar(pb, i, label = sprintf("Progress of simulations (%s)", info))
# write_log_file(msg=paste(Sys.time(),": INITIATING iteration:",i,"\n"),log_flag=GLOBAL_LOGGING_ENABLED)
### Call projection scenarios
l_interventions_scenarios <- get_projection_scenarios(n_t = n_t_project + n_lag_inf,
v_soc_dist_factor = m_calib_post[i,
c("r_soc_dist_factor",
"r_soc_dist_factor_2",
"r_soc_dist_factor_3",
"r_soc_dist_factor_4",
"r_soc_dist_factor_5")],
v_mean_soc_dist_factor = v_mean_soc_dist_factor,
v_n_date0_NPI = v_n_date0_NPI,
date_proj0 = n_t_calib + n_lag_inf)
df_out_mex_total <- c()
df_out_mex_ages <- c()
### Iterate over projection scenarios
for(scenario_name in names(l_interventions_scenarios)) { # scenario_name <- names(l_interventions_scenarios)[1]
print(paste0("Running scenario ", scenario_name))
l_interventions <- l_interventions_scenarios[[scenario_name]]
### Initialize parameters
l_params_init <- sccosmomcma::load_params_init(n_t = n_t_project + n_lag_inf, # Number of days
ctry = "Mexico",
ste = v_states_project,
cty = v_states_project,
v_reduced_sus = v_reduced_sus,
r_beta = m_calib_post[i,"r_beta"],
l_nu_exp2_dx = add_period(l_period_def = NULL,
l_period_add = make_period(
functional_form = "general logit",
time_start = 0,
time_stop = n_t_project + n_lag_inf,
val_start = as.numeric(m_calib_post[i,"r_nu_exp2_dx_lb"]),
val_end = as.numeric(m_calib_post[i,"r_nu_exp2_dx_ub"]),
v_logit_change_rate = as.numeric(m_calib_post[i,"r_nu_exp2_dx_rate"]),
v_logit_change_mid = as.numeric(m_calib_post[i,"n_nu_exp2_dx_mid"]))),
l_nu_inf2_dx = add_period(l_period_def = NULL,
l_period_add = make_period(
functional_form = "general logit",
time_start = 0,
time_stop = n_t_project + n_lag_inf,
val_start = as.numeric(m_calib_post[i,"r_nu_exp2_dx_lb"]),
val_end = as.numeric(m_calib_post[i,"r_nu_exp2_dx_ub"]),
v_logit_change_rate = as.numeric(m_calib_post[i,"r_nu_exp2_dx_rate"]),
v_logit_change_mid = as.numeric(m_calib_post[i,"n_nu_exp2_dx_mid"]))),
v_inf_init_ages = v_inf_init_ages,
l_contact_info = l_contact_matrices,
l_interventions = l_interventions,
n_hhsize = n_hhsize,
r_tau = m_calib_post[i,"r_tau"],
r_omega = 0, #1/200
l_cfr = get_non_const_multiage_list(v_time_stop = 1:(n_t_project+n_lag_inf), m_ageval = m_cfr_proj),
m_r_exit_tot = v_hosp_map["m_r_exit_tot"],
m_r_exit_icu = v_hosp_map["m_r_exit_icu"],
m_r_exit_nonicu = v_hosp_map["m_r_exit_nonicu"],
m_sigma_tot = v_hosp_map["m_sigma_tot"],
m_sigma_nonicu = v_hosp_map["m_sigma_nonicu"],
m_sigma_icu = v_hosp_map["m_sigma_icu"]
)
## Load all parameter values
l_params_all <- load_all_params(l_params_init = l_params_init)
df_out_scenario <- project_epi_out(v_states_project = v_states_project,
l_params_all = l_params_all,
n_date_ini = n_date_ini,
n_lag_inf = n_lag_inf)
### Store interventions for total population and by age groups
df_out_scenario_tot <- df_out_scenario$Total
df_out_scenario_ages <- df_out_scenario$Ages
### Add intervention names
df_out_scenario_tot$Intervention <- scenario_name
df_out_scenario_ages$Intervention <- scenario_name
### Add intervention and base-case type
df_out_scenario_tot$intervention_type = ""
df_out_scenario_tot$BaseCase_type = ""
if(scenario_name == "No NPIs implemented"){
df_out_scenario_tot$intervention_type <- "NoNPI"
df_out_scenario_tot$BaseCase_type <- NA
}else if(scenario_name == "Social distancing: status quo; Schooling: not in-person; Holiday bump: no"){
df_out_scenario_tot$intervention_type <- "BaseCase"
df_out_scenario_tot$BaseCase_type <- "StatusQuo"
}else if(scenario_name == "Social distancing: status quo; Schooling: not in-person; Holiday bump: yes"){
df_out_scenario_tot$intervention_type <- "BaseCase"
df_out_scenario_tot$BaseCase_type <- "Holidays"
}else if(scenario_name == "Social distancing: status quo; Schooling: in-person; Holiday bump: no"){
df_out_scenario_tot$intervention_type <- "SchoolSD"
df_out_scenario_tot$BaseCase_type <- "StatusQuo"
}else if(scenario_name == "Social distancing: status quo; Schooling: in-person; Holiday bump: yes"){
df_out_scenario_tot$intervention_type <- "SchoolSD"
df_out_scenario_tot$BaseCase_type <- "Holidays"
}else if(scenario_name == "Social distancing: stricter; Schooling: not in-person; Holiday bump: no"){
df_out_scenario_tot$intervention_type <- "IncreaseSD"
df_out_scenario_tot$BaseCase_type <- "StatusQuo"
}else if(scenario_name == "Social distancing: stricter; Schooling: not in-person; Holiday bump: yes"){
df_out_scenario_tot$intervention_type <- "IncreaseSD"
df_out_scenario_tot$BaseCase_type <- "Holidays"
}else if(scenario_name == "Social distancing: stricter; Schooling: in-person; Holiday bump: no"){
df_out_scenario_tot$intervention_type <- "IncreaseSDSchoolSD"
df_out_scenario_tot$BaseCase_type <- "StatusQuo"
}else if(scenario_name == "Social distancing: stricter; Schooling: in-person; Holiday bump: yes"){
df_out_scenario_tot$intervention_type <- "IncreaseSDSchoolSD"
df_out_scenario_tot$BaseCase_type <- "Holidays"
}
######## Combine scenarios #######
df_out_scenario_tot <- df_out_scenario_tot %>%
mutate(type = "Model-predicted",
simulation = i)
df_out_scenario_ages <- df_out_scenario_ages %>%
mutate(type = "Model-predicted",
simulation = i)
df_out_mex_total <- bind_rows(df_out_mex_total,
df_out_scenario_tot)
# df_out_mex_ages <- bind_rows(df_out_mex_ages,
# df_out_scenario_ages)
}
# Return data.frame
df_out_mex_total
}
)
}
stopCluster(cl)
print(paste0("Model evaluated ", scales::comma(n_samp), " times in ",
round(time_foreach[3]/60, 2), " minutes"))
df_out_projection_post_all_summ <- df_out_projection_post_all %>%
group_by(type, Intervention, intervention_type, BaseCase_type, Outcome, dates) %>%
summarise(mean = mean(value),
median = quantile(value, probs = 0.5, names = FALSE),
sd = sd(value),
lb = quantile(value, probs = 0.025, names = FALSE),
ub = quantile(value, probs = 0.975, names = FALSE)
)
colnames(df_out_projection_post_all_summ)[colnames(df_out_projection_post_all_summ)=="mean"] <- "value"
return(list(df_all = df_out_projection_post_all,
df_summ = df_out_projection_post_all_summ))
}
|
5ceae004a0f18c61391943ef07390661fb02789e | ee8dd63922e47711a5911d282472e6784c5d67c0 | /R/plot-.R | 49f67c49265896c62d97cec074ca74fecb9669c3 | [
"MIT"
] | permissive | atusy/qntmap | 07ff96149b4d8fb5ee2386b0892d524d1f55aa34 | 5b6a349ac12b600daad7e806e22982e514150b86 | refs/heads/master | 2021-06-04T06:01:19.809161 | 2021-04-06T13:54:15 | 2021-04-06T13:54:15 | 97,662,265 | 2 | 0 | MIT | 2021-04-06T13:54:15 | 2017-07-19T02:07:09 | R | UTF-8 | R | false | false | 2,050 | r | plot-.R | #' @name plot-qntmap
#'
#' @title Plot methods for `qntmap` package
#' @description
#' S3 methods to plot object with original classes in `qntmap` package.
#' See [`graphics::plot()`] for general use of `plot`.
#' Mapping data (`qm_xmap` and `qm_qntmap` classes) are visualized by heat maps.
#'
#' @param x
#' An object of class `qntmap`, `qm_cluster`, or `qm_xmap`,
#' returned by [quantify()], [qntmap()], and [read_xmap()], respectively.
#' @param zname,y
#' A string specifying a component of `x` to determine colors to fill the map.
#' `y` is the alias of `zname`.
#' @param zlim
#' A range of z.
#' @param colors
#' A color scale "viridis" (default) or "gray" applicable when fill is continuous.
#' @param interactive
#' `TRUE` (default) produces plots with shiny WebUI, and
#' `FALSE` produces plots with [`ggplot2::ggplot()`].
#' @param unit
#' Unit of x- and y-axis ("px", "um", "nm" or "cm").
#' @param ...
#' Arguments passed to internal functions.
#'
#' @seealso [`graphics::plot()`]
#'
#' @importFrom graphics plot
NULL
#' @rdname plot-qntmap
#' @examples
#' # qm_raster class object
#' d <- data.frame(expand.grid(x = 1:5, y = 1:5), fill = runif(5))
#' class(d) <- c("qm_raster", class(d))
#' plot(d, "fill", interactive = FALSE)
#' @export
plot.qntmap <- function(
x,
y = setdiff(names(x), c("x", "y"))[1L],
zname = y,
zlim = NULL,
colors = c("magma", "viridis", "gray"),
interactive = TRUE,
unit = c("px", "um", "mm", "cm"),
...
) {
if (interactive) return(plot_shiny(x, y, pcol = colors == "viridis", ...))
unit <- match.arg(unit)
print(autoplot(
object = x, zname = zname, zlim = zlim,
colors = match.arg(colors), unit = unit, ...
))
}
#' @rdname plot-qntmap
#' @export
plot.qm_xmap <- plot.qntmap
#' @rdname plot-qntmap
#' @export
plot.qm_cluster <- plot.qntmap
formals(plot.qm_cluster)$y <- "cluster"
|
24f8c2d10bddd095e6338cfd645a8b1468df90eb | 99afed5c94a31a683f41d46b1210f0d30c0807c0 | /MROI/projects/data4coding.R | 6578315919c85b3ada5fd469e82d31daa083dc67 | [] | no_license | wenrurumon/AAC_tradition | d157bec40bc93d764d0a6836908a8f0dcca22d4c | 4b8107741de3491527c10be51a3e08bb06e66e2e | refs/heads/master | 2023-04-17T13:05:07.743416 | 2023-03-29T11:14:45 | 2023-03-29T11:14:45 | 135,133,458 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,073 | r | data4coding.R |
rm(list=ls())
library(openxlsx)
library(data.table)
library(dplyr)
x <- read.xlsx('data4coding.xlsx',sheet=3)
x2 <- x %>%
filter(Channel!='C2C'&Month>=201806&Skuname!='Others') %>%
select(
Brand=Brandname,Subbrand=ProductLine,
ProductLine=Channel,SKU=Skuname,
TA=Target.Audience,value=`Value(RMB)`
) %>%
mutate(ProductLine=gsub('\\(.+?\\)','',SKU)) %>%
group_by(Brand,Subbrand,ProductLine,SKU,TA) %>%
summarise(value=sum(value))
x2 <- x2 %>% group_by(Brand,Subbrand,ProductLine,TA) %>%
summarise(SKU=paste(SKU,collapse=', '),value=sum(value),n=n())
x <- read.xlsx('data4coding.xlsx',sheet=4)
colnames(x)[19] <- 'value'
x3 <- x %>%
filter(Year*100+Month>=201806) %>%
group_by(Brand=Brand.CN,Subbrand=gene.CN,ProductLine=gene.CN
,TA=Consumer.Type,SKU=SKU.Name) %>% summarise(value=sum(as.numeric(value)))
x3 <- x3 %>% group_by(Brand,Subbrand,ProductLine,TA) %>%
summarise(SKU=paste(SKU,collapse=', '),value=sum(value),n=n())
x2 <- data.table(Channel='EC',x2)
x3 <- data.table(Channel='OTC',x3
x <- rbind(x2,x3) %>% select(Channel,Brand,Subbrand,ProductLine,TA)
|
82a8c94373aa8de37daac518e0cb1af6b6fc0fa7 | f537078788fada8d2658f0c45b7c3f21808ab447 | /Codigo/Main.R | 4a88e5ba4e5ba19624069e6d145b72900ccaf935 | [] | no_license | Jerry-Master/ProjectAA1 | c252512c564bc195a17d684bc96d45f2de600ff3 | 963b0daf02527f3fb0dd56d7284794ca1820ddc2 | refs/heads/master | 2023-06-04T07:34:50.911854 | 2021-06-26T16:55:40 | 2021-06-26T16:55:40 | 265,807,457 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 10,556 | r | Main.R | # LDA/ QDA
library(MASS)
# RDA
library(klaR)
# Multinomial
library(nnet)
# Cross-Validation
library(TunePareto)
# Naive Bayes
library(e1071)
# k-NN
library(class)
# Correspondence analysis
library(ca)
# Cross-validation nn
library(caret)
### 1. Read data
set.seed(2105)
clev <- read.csv("../data/cleveland.csv", header=F)
### 2. Preprocess data
source("Preprocessing.R")
# Missings
clev <- clev[,much.na.cols(clev,60)]
dummy <- c("V1", "V2", "V36", "V69", "V70", "V71", "V72", "V73", "V28", "location")
clev <- remove.var(clev, dummy)
clev <- knn.imputation(clev, 7)
# Multicollinearity
# Uncomment this lines to see which variables have big correlation
#corr.factors <- cor(clev)
#which(abs(corr.factors)-diag(diag(corr.factors))>0.9, arr.ind=T)
clev <- remove.var(clev, c("V57", "V55"))
# Factors
factores <- c("V58", "V4", "V9", "V16", "V18", "V19", "V20", "V21", "V22", "V23", "V24", "V25", "V26", "V27", "V38", "V39", "V41", "V51", "V56", "V11", "V59", "V60", "V61", "V63", "V65", "V67", "V68")
for (f in factores){
clev[,f] <- as.factor(clev[,f])
}
clev <- move.value(clev, "V25", 2, 1)
#### 2.1 Visualizations
source("Visualizations.R")
histograms(clev)
boxplot.num(clev)
histograms(clev, F)
show.cor(clev)
#### 2.2 Modification of values
qqplots(clev)
boxcox.plots(clev)
# Variables with many zeros:
boxcox.plot.special(clev, c("V14", "V15", "V40"))
# Apply log / sqrt transformation
clev <- apply.trans(clev, sqrt.neg.vars=c("V10", "V12", "V31", "V43"), sqrt.vars = c("V14", "V40"))
clev <- scale.num(clev)
#### 2.3. Feature extraction
# Separe train and test data, seed for reproducibility.
set.seed(2000)
n <- nrow(clev)
train.lenght <- round(2*n/3)
clev <- clev[sample(n),]
train <- clev[1:train.lenght,]
test <- clev[(train.lenght+1):n,]
col.class <- as.numeric(train$V58)
col.class[col.class==1] <- "red"
col.class[col.class==2] <- "green"
col.class[col.class==3] <- "blue"
col.class[col.class==4] <- "yellow"
col.class[col.class==5] <- "purple"
pca <- pca.num(train)
par(mfrow=c(1,1))
plot.pca(train, col.class, pca = pca)
fda <- plot.fda(train, V58~.-V21-V22-V59, col.class)
train <- extract.fda(fda, train)
test <- extract.fda(fda, test)
# Correspondence analysis
mca.features <- mcaplot(train, factores)
### 3. Resampling protocol
source("Resampling.R")
### 4. Models
rda.model <- rda(V58~V3+V4+V9+V10+V11+V12+V14+V15+V16+V18+V19+V20+V21+V22+V23+V24+V25+V26+V27+V29+V31+V32+V33+V34+V35+V37+V38+V39+V40+V41+V43+V44+V51+V56+V60+V61+V63+V65+V67+V68, data=train)
cross.validation(train, train$V58, rda.model, 1, 10, T)
rda.model.fda <- rda(V58~.,data=train)
cross.validation(train, train$V58, rda.model.fda, 2, 10, T)
cross.validation.naive(train, train$V58, naive.model, 10, 10)
err <- c()
for (k in 1:20){
err <- c(err, cross.validation.knn(train, train$V58, 10,10, k))
}
plot(err, type = "l")
err
multinomial.model <- multinom(V58~., data=train)
cross.validation(train, train$V58, multinomial.model, 10, 10, F)
multinomial.model.step <- step(multinomial.model)
cross.validation(train, train$V58, multinomial.model.step, 10, 10, F)
multinomial.model.noFDA <- multinom(V58~.-LD1-LD2-LD3-LD4, data=train)
cross.validation(train, train$V58, multinomial.model.noFDA, 10, 10, F)
multinomial.model.noFDA.step <- step(multinomial.model.noFDA)
cross.validation(train, train$V58, multinomial.model.noFDA.step, 10, 10, F)
### Test error
rda.model <- update(rda.model.fda, data=train)
pred.test <- predict(rda.model.fda, test)
pred.test <- pred.test$class
(err.table <- table(True=test$V58, Pred=pred.test))
(err.test <- 1-sum(diag(err.table))/sum(err.table))
## Hungarian data
hung <- read.csv("../data/hungarian.csv", header=F)
# Missings
hung <- hung[,much.na.cols(hung,60)]
dummy <- c("V1", "V2", "V36", "V69", "V70", "V71", "V72", "V73", "V28")
hung <- remove.var(hung, dummy)
hung <- knn.imputation(hung, 7)
# Multicollinearity
#corr.factors <- cor(hung)
#which(abs(corr.factors)-diag(diag(corr.factors))>0.9, arr.ind=T)
hung <- remove.var(hung, c("V57", "V55"))
# Factors
factores <- c("V58", "V4", "V9", "V16", "V19", "V20", "V21", "V22", "V23", "V24", "V25", "V26", "V27", "V38", "V39", "V56", "V11")
for (f in factores){
hung[,f] <- as.factor(hung[,f])
}
hung <- move.value(hung, "V25", 2, 1)
histograms(hung)
boxplot.num(hung)
histograms(hung, F)
show.cor(hung)
qqplots(hung)
boxcox.plots(hung)
# Variable with many 0
boxcox.plot.special(hung, c("V40"))
# Apply transformations
hung <- apply.trans(hung, sqrt.neg.vars=c("V10", "V12"), sqrt.vars = c("V31", "V29", "V42", "V43"), log.vars = c("V6", "V7", "V40"))
hung <- scale.num(hung)
# Remove variables and levels which causes troubles
hung <- remove.var(hung, c("V23", "V39"))
hung <- move.value(hung, "V19", 2, 1)
# Train / test
set.seed(2000)
n <- nrow(hung)
train.lenght <- round(2*n/3)
hung <- hung[sample(n),]
train <- hung[1:train.lenght,]
test <- hung[(train.lenght+1):n,]
col.class <- as.numeric(train$V58)
col.class[col.class==1] <- "red"
col.class[col.class==2] <- "green"
col.class[col.class==3] <- "blue"
col.class[col.class==4] <- "yellow"
col.class[col.class==5] <- "purple"
col.class2 <- as.numeric(train$V58)
col.class2[col.class2==1] <- "red"
col.class2[col.class2==2] <- "green"
col.class2[col.class2==3] <- "blue"
col.class2[col.class2==4] <- "yellow"
col.class2[col.class2==5] <- "purple"
pca <- pca.num(hung)
par(mfrow=c(1,1))
plot.pca(hung, col.class2, pca = pca)
# Uncomment if you want to try the result with PCA features (Disclaimer: results are bad)
# If you execute this line, you will have to remove the pca features later on.
# hung <- extract.pca(pca, hung)
fda <- plot.fda(train, V58~.-V56, col.class)
train <- extract.fda(fda, train)
test <- extract.fda(fda, test)
plot(test$LD1, test$LD2, col=c("red","green","yellow","blue","purple")[as.numeric(test$V58)], xlab="LD1", ylab="LD2")
legend("topleft", legend=c("0","1","2","3","4"), fill=c("red","green","yellow","blue","purple"))
mca.features <- mcaplot(train, factores)
# Models
rda.model <- rda(V58~.-LD1-LD2-LD3-LD4, data=train)
cross.validation(train, train$V58, rda.model, 1, 10, T)
rda.model.fda <- rda(V58~.,data=train)
cross.validation(train, train$V58, rda.model.fda, 1, 10, T)
cross.validation.naive(train, train$V58, naive.model, 10, 10)
err <- c()
for (k in 1:20){
err <- c(err, cross.validation.knn(train, train$V58, 10,10, k))
}
plot(err, type = "l")
err
multinomial.model <- multinom(V58~., data=train)
cross.validation(train, train$V58, multinomial.model, 10, 10, F)
multinomial.model.step <- step(multinomial.model)
cross.validation(train, train$V58, multinomial.model.step, 10, 10, F)
multinomial.model.noFDA <- multinom(V58~.-LD1-LD2-LD3-LD4, data=train)
cross.validation(train, train$V58, multinomial.model.noFDA, 10, 10, F)
multinomial.model.noFDA.step <- step(multinomial.model.noFDA)
cross.validation(train, train$V58, multinomial.model.noFDA.step, 10, 10, F)
# Uncomment this line to see that there are small groups
#qda.model <- qda(V58~.-LD1-LD2-LD3-LD4, data=train)
## Redes neuronales
# 10x10 CV, is temporary
trc <- trainControl (method="repeatedcv", number=10, repeats=10)
# First try, only FDA features
decays <- c(0.0001, 0.001, 0.01, 0.1, 1)
nn.model10x10CV <- train(V58~LD1+LD2+LD3+LD4, data = train, method = 'nnet',
trace=F, maxit=1000,
tuneGrid = expand.grid(.size=9,.decay=decays), trControl=trc)
nn.model10x10CV$results
nn.model10x10CV$bestTune
# Second try, with all the data
trc <- trainControl (method="repeatedcv", number=5, repeats=1)
decays <- c(0, 0.01, 0.1, 1)
nn.model1x5CV <- train(V58~., data = train, method = 'nnet',
trace=F, maxit=1000, MaxNWt=2000,
tuneGrid = expand.grid(.size=9,.decay=decays), trControl=trc)
nn.model1x5CV$results
nn.model1x5CV$bestTune
# Train on the rest of the data
nn <- nnet(V58~., data=train, maxit=1000, size=9, decay=1, MaxNWt=2000)
# Training error
table(train$V58, apply(nn$fitted.values, 1, which.max)-1)
# Third try, more neurons, and quittid variables with many levels
trc <- trainControl (method="repeatedcv", number=10, repeats=1)
decays <- c(0.5, 0.67, 0.66, 0.68, 0.7, 1)
nn.model1x10CV <- train(V58~.-LD1-LD2-LD3-LD4-V56-V20-V21-V22, data = train,
method = 'nnet',
trace=F, maxit=1000, MaxNWts=10000,
tuneGrid = expand.grid(.size=20,.decay=decays),
trControl=trc)
nn.model1x10CV$results
nn.model1x10CV$bestTune
# Train on the rest of the data
nn <- nnet(V58~.-LD1-LD2-LD3-LD4-V56-V20-V21-V22, data=train, maxit=1000, size=20, decay=0.68, MaxNWts=10000)
# Training error
(tab <- table(train$V58, apply(nn$fitted.values, 1, which.max)-1))
(err <- 1 - sum(diag(tab))/sum(tab))
# Test error
pred <- predict(nn, test)
(tab <- table(test$V58, apply(pred, 1, which.max)-1))
(err <- 1 - sum(diag(tab))/sum(tab))
# Joining labels to binarize
target <- as.numeric(test$V58)-1
target[target > 1] <- 1
pred <- apply(pred, 1, which.max)-1
pred[pred>1] <- 1
(tab <- table(target, pred))
(err <- 1 - sum(diag(tab))/sum(tab))
## Two-classes
# Be careful to remove PCA features if they have been added
# If they do are added, try simply reexecuting the hung preprocessing part of the code.
aux <- hung
aux$V58[aux$V58 != 0] <- 1
aux$V58 <- droplevels(aux$V58)
# Train / test
set.seed(2000)
n <- nrow(aux)
train.lenght <- round(2*n/3)
aux <- aux[sample(n),]
train_aux <- aux[1:train.lenght,]
test_aux <- aux[(train.lenght+1):n,]
# A model that achieves 0 training error
nn <- nnet(V58~.-V56-V20-V21-V22, data=train_aux, maxit=1000, size=30, decay=0, MaxNWts = 10000)
(tab<- table(train_aux$V58, (nn$fitted.values > 0.5)*1))
(err.train <- 1 - sum(diag(tab))/sum(tab))
# Tuning the decay for it
decays <- c(0.4, 0.52, 0.55, 0.57, 0.6, 0.7)
nn.model1x10CV <- train(V58~.-V56-V20-V21-V22, data = train_aux,
method = 'nnet',
trace=F, maxit=1000, MaxNWts=10000000,
tuneGrid = expand.grid(.size=30,.decay=decays), trControl=trc)
nn.model1x10CV$results
nn.model1x10CV$bestTune
# Update weights
nn <- nnet(V58~.-V56-V20-V21-V22, data=train_aux, maxit=1000, size=30, decay=0.55, MaxNWt=10000)
# Training error
(tab<- table(train_aux$V58, (nn$fitted.values > 0.5)*1))
(err.train <- 1 - sum(diag(tab))/sum(tab))
# Test error
pred <- predict(nn, test_aux)
(tab<- table(test_aux$V58, (pred > 0.5)*1))
(err.test <- 1 - sum(diag(tab))/sum(tab))
|
c81d3a1e17e777d1869384ac49ddf83b995d9b13 | e3f2ffe6640fff8bd617d26d2d8ffc70dd8345ba | /0.2_PreprocessCSI_Universal.R | f29cbd598951b7d69d0445d3ad095bd2f8d463dc | [] | no_license | charleshjw/CISM_Enhanced | 3818e2797fa83b958e4c5c91d6023ba784ca1b86 | 2041bc8705bd30f93f394e3265f41b4725802362 | refs/heads/master | 2020-05-27T22:17:13.985329 | 2019-05-31T02:00:21 | 2019-05-31T02:00:21 | 81,352,049 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 18,659 | r | 0.2_PreprocessCSI_Universal.R | # To take updated raw downlodas from market research providers (BoA, JPM, Citi
# and Barclays, and standardize observations and save to target Access database.
# It needs to be run once every time data is downloaded or updated. Take 5-10
# minutes to run.
#
# Author: E620927
###############################################################################
Output_curve = file.path(Output_root,"CurveInfo")
if(!file.exists(Output_curve)) {
dir.create(Output_curve)
}
access_table_names <- c("I_HIST_CSI_QTRAVE","I_HIST_CSI_MONAVE","I_HIST_CSI_MONEND");
#Process CSI if table names cannot be found in DB or REPROCESS is TRUE
if (!all(dbHasTable(dbc,access_table_names)) | REPROCESS) {
#read data inputs
data_jpm <- read.csv(concat_file_path(tableFilter(list_files,"VariableName","data_jpm")))
data_barclays <- read.xlsx(concat_file_path(tableFilter(list_files,"VariableName","data_barclays")),sheetIndex=1)
#data_citi <- read.xlsx(concat_file_path(tableFilter(list_files,"VariableName","data_citi")),sheetIndex=2)
data_boa <- read.csv(concat_file_path(tableFilter(list_files,"VariableName","data_boa")))
data_mmd <- read.xlsx(concat_file_path(tableFilter(list_files,"VariableName","data_mmd")),sheetIndex=1)
# data_armbs_arms <- read.xlsx(concat_file_path(tableFilter(list_files,"VariableName","data_ambs_arms")),sheetIndex=1)
# data_nambs_subprime <- read.xlsx(concat_file_path(tableFilter(list_files,"VariableName","data_subprime")),sheetIndex=1)
##
csi_ts_list <- list()
##################################################################################
###Process JPM
data_jpm[data_jpm=="N/A"] <- NA
data_jpm$Date <- as.Date(as.character(data_jpm$Date),format="%d-%b-%y")
# CSI_ALL_DailyClean <- list()
#j <-5
for (j in 2:ncol(data_jpm)) {
csi_name <- colnames(data_jpm)[j]
csi_name <- gsub("[.]","-",csi_name)
data_jpm_index_original <- as.xts(as.numeric(as.character(data_jpm[,j])),data_jpm[,1])
data_jpm_index_original <- na.omit(data_jpm_index_original)
file_name = paste(Output_curve,"/",csi_name,"_daily.png",sep="")
png(file_name, width=600,height=400)
# file_name = paste(Output_curve,"/",csi_name,"_daily.jpg",sep="")
# jpeg(file_name, width=600,height=400)
# plot(data_jpm_index_original,main=paste0(csi_name,": JPMM"))
print(plot.xts(data_jpm_index_original))
dev.off()
csi_ts_list[[csi_name]] <- data_jpm_index_original
}
###Process Barclays
#CSI_ALL <- list()
#delete rows with n/a
cutoff_liborOAS <- as.Date("2010-04-01")
data_barclays<-data_barclays[data_barclays[,2]!="n/a",]
for (j in 2:ncol(data_barclays)) {
# j <- 4
csi_name <- colnames(data_barclays)[j]
csi_name <- gsub("[.]","-",csi_name)
data_barclays_index_original <- as.xts(as.numeric(as.character(data_barclays[,j])),data_barclays[,1])
data_barclays_index_original <- na.omit(data_barclays_index_original)
if (grepl("LiborOAS",csi_name)) {
data_barclays_index_original <- data_barclays_index_original[time(data_barclays_index_original) >= cutoff_liborOAS]
}
data_barclays_index_original <- data_barclays_index_original*100
file_name = paste(Output_curve,"/",csi_name,"_daily.png",sep="")
png(file_name, width=600,height=400)
print(plot(data_barclays_index_original,main=paste0("ORIGINAL ",csi_name,"/Barlcays")))
dev.off()
csi_ts_list[[csi_name]] <- data_barclays_index_original
}
csi_ts_list[[1]]
###################################################################################
####Citi
##colnames(data_citi)
##head(data_citi)
#data_citi <- data_citi[1:(nrow(data_citi)-1),]
#data_citi$Date <- as.Date(as.numeric(as.character(data_citi$Date)), origin="1899-12-30")
#
#for (j in 2:ncol(data_citi)) {
## j <- 2
# csi_name <- colnames(data_citi)[j]
# data_citi_index_original <- as.xts(as.numeric(as.character(data_citi[,j])),data_citi[,1])
# file_name = paste(Output_curve,"/",gsub("[.]","-",csi_name),"_daily.png",sep="")
# png(file_name, width=600,height=400)
# plot(na.omit(data_citi_index_original),main=paste0(csi_name,": Citi"))
# dev.off()
#
# CSI_ALL_DailyClean <- rbind(CSI_ALL_DailyClean,cbind(DATE=format(index(data_citi_index_original),"%m/%d/%Y"),MONTH=gsub(" ","",as.yearmon(index(data_citi_index_original))),CSI_ID=gsub("[.]","-",csi_name),CSI_VALUE=as.vector(data_citi_index_original)))
#}
##################################################################################
###BoA
segments <- unique(data_boa$Bond.Index)
segments <- segments[segments!=""]
data_boa$Date <- as.Date(data_boa$Date,format="%m/%d/%Y")
boa_retrieve_csiname <- function(name) {
# name <- tmp[,"Description"]
name <- as.character(unique(name))
name <- name[name!=""]
if (length(name) != 1) {
stop("BOA data error. Series name cannot be identified.")
}
if (grepl("Euro Covered Bond French Issuers Index",name)) {
return("CVB_FRA_EUR")
} else if (grepl("Euro Covered Bond Nordic Issuers Index",name)) {
return("CVB_NOR_EUR")
} else if (grepl("Euro Covered Bond UK Issuers Index",name)) {
return("CVB_UK_EUR")
} else if (grepl("Sterling Non-Gilts Covered",name)) {
return("CVB_GBP")
} else if (grepl("Australian Quasi Govt",name)) {
return("QGOV_AUD")
} else if (grepl("Euro Quasi-Govt",name)) {
return("QGOV_EUR")
} else if (grepl("Sterling Quasi Govt",name)) {
return("QGOV_GBP")
} else if (grepl("Global Broad Mkt Quasi-Govt",name)) {
return("QGOV_GLOBE")
} else if (grepl("MBS GNMA 30 Year Current Coupon",name)) {
return("AMBS_GNMA_30Y_CurrentCoupon_OAS_ICE")
} else if (grepl("Mortgages GNMA All 15 Year",name)) {
return("AMBS_GNMA_15Y_OAS_ICE")
} else if (grepl("Mortgages GNMA All 30 Year",name)) {
return("AMBS_GNMA_30Y_OAS_ICE")
} else if (grepl("Mortgages GNMA 15 & 30 Yr Current Coupon",name)) {
return("AMBS_GNMA_15Y_30Y_OAS_ICE")
} else if (grepl("Mortgages GNMA Master",name)) {
return("AMBS_GNMA_ALL_OAS_ICE")
} else if (grepl("Mortgages All FHLMC & FNMA 30 Yr",name)) {
return("AMBS_FNMA_FHLMC_30Y_OAS_ICE")
} else if (grepl("MBS All FNMA Current Coupon",name)) {
return("AMBS_FNMA_ALL_CurrentCoupon_OAS_ICE")
} else if (grepl("Mortgages FNMA All 15 Yr",name)) {
return("AMBS_FNMA_15Y_OAS_ICE")
} else if (grepl("Mortgages FNMA All 30 Yr",name)) {
return("AMBS_FNMA_30Y_OAS_ICE")
} else if (grepl("Mortgages FNMA Master",name)) {
return("AMBS_FNMA_ALL_OAS_ICE")
} else if (grepl("MBS FNMA 15 Year Current Coupon",name)) {
return("AMBS_FNMA_15Y_CurrentCoupon_OAS_ICE")
} else if (grepl("MBS FNMA 30 Year Current Coupon",name)) {
return("AMBS_FNMA_30Y_CurrentCoupon_OAS_ICE")
} else if (grepl("MBS FHLMC 30 Yr Current Coupon",name)) {
return("AMBS_FHLMC_30Y_CurrentCoupon_OAS_ICE")
} else if (grepl("Mortgages FHLMC All 30 Yr",name)) {
return("AMBS_FHLMC_30Y_OAS_ICE")
}
}
for (j in 1:length(segments)) {
# j <- 10
print(j)
tmp <- data_boa[data_boa$Bond.Index==segments[j],]
tmp <- tmp[tmp[,"Description"]!="",]
csi_name <- boa_retrieve_csiname(tmp[,"Description"])
csi_name <- gsub("[.]","-",csi_name)
data_boa_index_original <- as.xts(as.numeric(tmp[,"OAS"]),tmp[,"Date"])
data_boa_index_original <- na.omit(data_boa_index_original)
file_name = paste(Output_curve,"/",csi_name,"_daily.png",sep="")
png(file_name, width=600,height=400)
print(plot(data_boa_index_original, main=paste0(csi_name,": BOA")))
dev.off()
csi_ts_list[[csi_name]] <- data_boa_index_original
# if ("Libor.OAS" %in% colnames(tmp) & grepl("OAS",csi_name)) {
# csi_name <- gsub("OAS","LiborOAS",csi_name)
# data_boa_index_original <- as.xts(as.numeric(tmp[,"Libor.OAS"]),tmp[,"Date"])
# data_boa_index_original <- na.omit(data_boa_index_original)
# file_name = paste(Output_curve,"/",csi_name,"_daily.png",sep="")
# png(file_name, width=600,height=400)
# print(plot(data_boa_index_original, main=paste0(csi_name,": BOA")))
# dev.off()
# csi_ts_list[[csi_name]] <- data_boa_index_original
# }
}
##################################################################################
###MMD
colnames(data_mmd)[colnames(data_mmd)=="AAA.GO.5.yr"] = "MUNI_GO_AAA_5Y"
colnames(data_mmd)[colnames(data_mmd)=="AA.GO.5.yr"] = "MUNI_GO_AA_5Y"
colnames(data_mmd)[colnames(data_mmd)=="A.GO.5.yr"] = "MUNI_GO_A_5Y"
colnames(data_mmd)[colnames(data_mmd)=="BAA.GO.5.yr"] = "MUNI_GO_BAA_5Y"
colnames(data_mmd)[colnames(data_mmd)=="AAA.GO.10.yr"] = "MUNI_GO_AAA_10Y"
colnames(data_mmd)[colnames(data_mmd)=="AA.GO.10.yr"] = "MUNI_GO_AA_10Y"
data_mmd <- data_mmd[!is.na(data_mmd[,1]),]
#csi_name = "MUNI_GO_AAA_5Y"
mmd_csi_5y <- c("MUNI_GO_AAA_5Y","MUNI_GO_AA_5Y","MUNI_GO_A_5Y","MUNI_GO_BAA_5Y")
for (csi_name in mmd_csi_5y) {
# csi_name <- mmd_csi_5y[1]
tmp_data <- 100*(data_mmd[,csi_name]-data_mmd[,"Treasury.5.yr"])
data_mmd_index_original <- as.xts(as.numeric(tmp_data),data_mmd[,1])
data_mmd_index_original <- na.omit(data_mmd_index_original)
file_name = paste(Output_curve,"/",csi_name,"_daily.png",sep="")
png(file_name, width=600,height=400)
print(plot(data_mmd_index_original,main=paste0(csi_name,": MMD")))
dev.off()
csi_ts_list[[csi_name]] <- data_mmd_index_original
}
mmd_csi_10y <- c("MUNI_GO_AAA_10Y","MUNI_GO_AA_10Y")
for (csi_name in mmd_csi_10y) {
tmp_data <- 100*(data_mmd[,csi_name]-data_mmd[,"Treasury.10.yr"])
data_mmd_index_original <- as.xts(as.numeric(tmp_data),data_mmd[,1])
data_mmd_index_original <- na.omit(data_mmd_index_original)
file_name = paste(Output_curve,"/",csi_name,"_daily.png",sep="")
png(file_name, width=600,height=400)
print(plot(data_mmd_index_original,main=paste0(csi_name,": MMD")))
dev.off()
csi_ts_list[[csi_name]] <- data_mmd_index_original
}
###################################################################################
####Process self constructured NAMBS Subprime########
#for (j in 2:ncol(data_nambs_subprime)) {
## j <- 2
# csi_name <- colnames(data_nambs_subprime)[j]
# csi_name <- gsub("[.]","-",csi_name)
#
# data_armbs_arm <- as.xts(as.numeric(as.character(data_nambs_subprime[,j])),data_nambs_subprime[,1])
# data_armbs_arm <- na.omit(data_armbs_arm)
# file_name = paste(Output_curve,"/",gsub("[.]","-",csi_name),"_daily.png",sep="")
# png(file_name, width=600,height=400)
# plot(data_armbs_arm,main=paste0("ORIGINAL ",csi_name,"/Self-Constructed"))
# dev.off()
#
# csi_ts_list[[csi_name]] <- data_armbs_arm
#}
##################################################################################
##Outlier Adjustments
#adjustment for JPM outliers
outliers <- c("AMBS_CMO_PAC_2Y","AMBS_CMO_SEQ_5Y")
for (csi_name in outliers) {
# csi_name = outliers[1]
orig <- csi_ts_list[[csi_name]]
csi_ts_list[[csi_name]] <- csi_ts_list[[csi_name]][csi_ts_list[[csi_name]]!=0]
file_name = paste(Output_curve,"/",csi_name,"_daily_outliers_removed.png",sep="")
png(file_name, width=800,height=600)
par(mfrow=c(1,2))
print(plot(orig,main=paste0("Original ",csi_name,"/JPMM")))
print(plot(csi_ts_list[[csi_name]],main=paste0("OUTLIER REMOVED ",csi_name,"/JPMM")))
dev.off()
}
#smooth the four outliers for Agency Debts
outlier_csi <- names(csi_ts_list)[grepl("^AD",names(csi_ts_list))]
outlier_dates <- as.Date(c("10/12/2001","10/11/2002","05/24/2004","10/12/2004","5/23/2005","10/12/2005"),format="%m/%d/%Y")
for (csi_name in outlier_csi) {
# csi_name = outlier_csi[1]
orig <- csi_ts_list[[csi_name]]
select <- which(index(csi_ts_list[[csi_name]])%in%outlier_dates)
csi_ts_list[[csi_name]][select] <- (as.numeric(csi_ts_list[[csi_name]][select-1])+as.numeric(csi_ts_list[[csi_name]][select+1]))/2
# csi_ts_list[[csi_name]] <- csi_ts_list[[csi_name]][csi_ts_list[[csi_name]]>=0]
file_name = paste(Output_curve,"/",csi_name,"_daily_outliers_removed.png",sep="")
png(file_name, width=800,height=600)
par(mfrow=c(1,2))
print(plot(orig,main=paste0("Original ",csi_name,"/Barclays")))
print(plot(csi_ts_list[[csi_name]],main=paste0("OUTLIER REMOVED ",csi_name,"/Barlcays")))
dev.off()
}
##################################################################################
########################overlays
file_names <- list()
overrides <- list()
file_names[[1]] <- concat_file_path(tableFilter(list_files,"VariableName","data_overrides_aux"))
file_names[[2]] <- concat_file_path(tableFilter(list_files,"VariableName","data_overrides_clo"))
file_names[[3]] <- concat_file_path(tableFilter(list_files,"VariableName","data_overrides_nambs"))
for (i in 1:length(file_names)) {
# i <- 3
if (file.exists(file_names[[i]])) {
data <- read.xlsx(file_names[[i]],sheetIndex=1)
for (j in 2:ncol(data)) {
csi_name <- colnames(data)[j]
csi_name <- gsub("[.]","-",csi_name)
overrides[[csi_name]] <- as.xts(as.numeric(as.character(data[,j])),data[,1])
}
}
}
for (i in 1:length(overrides)) {
# i <- 2
csi_name = names(overrides)[i]
csi_name <- gsub("[.]","-",csi_name)
override <- overrides[[csi_name]]
orig <- csi_ts_list[[csi_name]]
seg1 <- index(orig)<min(index(override))
seg2 <- index(orig)>max(index(override))
tsnew <- rbind(orig[index(orig)<min(index(override))],override,orig[index(orig)>max(index(override))])
file_name = paste(Output_curve,"/",csi_name,"_override.png",sep="")
png(file_name, width=1200,height=800)
par(mfrow=c(3,1))
print(plot(orig,main=paste("Original:",csi_name)))
print(plot(override,main=paste("Overlays:",csi_name)))
print(plot(tsnew,main=paste("Overlaid:",csi_name)))
abline(v=.index(orig)[1],col="red")
dev.off()
csi_ts_list[[csi_name]] <- tsnew
}
##################################################################################
######################Synthetic indexes
rmbs_fix <- names(csi_ts_list)[grepl("^AMBS_.*_15Y$",names(csi_ts_list))]
names <- c("GNMA","FHLMC","FNMA")
for (name in names) {
# name = names[1]
csi_name <- paste0("AMBS_ARMS_",name)
csi_ts_list[[csi_name]] <- csi_ts_list[[rmbs_fix[grepl(name,rmbs_fix)]]]-csi_ts_list[["SWAP_SPREAD_5Y"]]
file_name = paste(Output_curve,"/",csi_name,"_daily.png",sep="")
png(file_name, width=600,height=400)
print(plot(csi_ts_list[[csi_name]],main=paste0(csi_name,": Synthetic")))
dev.off()
}
#####################Delete Swap Spread 5Y index which is solely used for creating synthetic indexes
csi_ts_list = csi_ts_list[names(csi_ts_list)!="SWAP_SPREAD_5Y"]
##################################################################################
###Output the daily data after adjustments for outliers, overlay and synthetic indexes
CSI_ALL_DailyTs_Merge <- xtslist_to_df(csi_ts_list)
file_name = paste(Output_curve,"/CSI_ALL_DailyClean.csv",sep="")
write.csv(as.data.frame(CSI_ALL_DailyTs_Merge), file_name)
CSI_ALL_DailyDB <- xtslist_to_db(csi_ts_list)
file_name = paste(Output_curve,"/CSI_ALL_DailyCleanDB.csv",sep="")
write.csv(CSI_ALL_DailyDB, file_name)
table_name <- "I_HIST_CSI_DailyClean";
try(sqlDrop(dbc, table_name))
saveTable(dbc,table_name,data.frame(Date=as.character(index(CSI_ALL_DailyTs_Merge)),CSI_ALL_DailyTs_Merge))
##################################################################################
#calculation
CSI_MONTH_END <- list()
CSI_MONTH_AVE <- list()
CSI_QUARTER_AVE <- list()
for (i in 1:length(csi_ts_list)) {
# i <- 2
csi <- csi_ts_list[[i]]
csi <- csi[as.numeric(as.yearperiod(index(csi)))<=History_END_DATE]
#Monthly End
ep1 <- endpoints(csi,on="months")
csi_monthend <- csi[ep1]
index(csi_monthend) <- as.Date(as.yearmon(index(csi_monthend))+1/12)-1
CSI_MONTH_END[[i]] <- csi_monthend
#Monthly Ave
#trim to month start
csi_trimed <- trimSeriesToMonthStart(csi)
csi_mean_mon <- period.apply(csi_trimed,INDEX=endpoints(csi_trimed,'months'),FUN=mean_na_ignored)
index(csi_mean_mon) <- as.Date(as.yearmon(index(csi_mean_mon))+1/12)-1
# csi_mean_mon[is.na(csi_mean_mon)]<-""
CSI_MONTH_AVE[[i]] <- csi_mean_mon
#Quarterly Ave
#trim to quarter start
csi_trimed <- trimSeriesToQuarterStart(csi)
#trim according to the end date
# csi_trimed <- csi_trimed[as.numeric(as.yearqtr(index(csi_trimed)))<=History_END_DATE]
csi_mean_qtr <- period.apply(csi_trimed,INDEX=endpoints(csi_trimed,'quarters'),FUN=mean_na_ignored)
# csi_mean_qtr[is.na(csi_mean_qtr)]<-""
index(csi_mean_qtr) <- as.Date(as.yearqtr(index(csi_mean_qtr))+1/4)-1
CSI_QUARTER_AVE[[i]] <- csi_mean_qtr
csi_merged <- merge(csi_monthend,csi_mean_mon,csi_mean_qtr)
file_name = paste(Output_curve,"/",gsub("[.]","-",names(csi_ts_list)[i]),"_comparison.png",sep="")
png(file_name, width=600,height=400)
ts.plot(csi_merged,col=1:3,type="b",main=names(csi_ts_list)[i])
dev.off()
}
names(CSI_MONTH_END) <- names(csi_ts_list)
names(CSI_MONTH_AVE) <- names(csi_ts_list)
names(CSI_QUARTER_AVE) <- names(csi_ts_list)
# CSI_MONTH_END_df <- xtslist_to_df(CSI_MONTH_END)
# CSI_MONTH_AVE_df <- xtslist_to_df(CSI_MONTH_AVE)
# CSI_QUARTER_AVE_df <- xtslist_to_df(CSI_QUARTER_AVE)
#
# file_name = paste0(Output_curve,"/CSI_MONTH_END.csv")
# write.csv(as.data.frame(CSI_MONTH_END_df),file_name)
#
# file_name = paste0(Output_curve,"/CSI_MONTH_AVE.csv")
# write.csv(as.data.frame(CSI_MONTH_AVE_df),file_name)
#
# file_name = paste0(Output_curve,"/CSI_QUARTER_AVE.csv")
# write.csv(as.data.frame(CSI_QUARTER_AVE_df),file_name)
CSI_MONTH_END_db <- xtslist_to_db(CSI_MONTH_END)
CSI_MONTH_AVE_db <- xtslist_to_db(CSI_MONTH_AVE)
CSI_QUARTER_AVE_db <- xtslist_to_db(CSI_QUARTER_AVE)
table_name <- "I_HIST_CSI_QTRAVE";
deleteTable(dbc,table_name)
saveTable(dbc,table_name,characterizeTable(versionDataFrame(CSI_QUARTER_AVE_db,current_version)))
file_name = paste0(Output_curve,"/",table_name,".csv")
write.csv(CSI_QUARTER_AVE_db,file_name)
table_name <- "I_HIST_CSI_MONAVE";
deleteTable(dbc,table_name)
saveTable(dbc,table_name,characterizeTable(versionDataFrame(CSI_MONTH_AVE_db,current_version)))
file_name = paste0(Output_curve,"/",table_name,".csv")
write.csv(CSI_MONTH_AVE_db,file_name)
table_name <- "I_HIST_CSI_MONEND";
deleteTable(dbc,table_name)
saveTable(dbc,table_name,characterizeTable(versionDataFrame(CSI_MONTH_END_db,current_version)))
file_name = paste0(Output_curve,"/",table_name,".csv")
write.csv(CSI_MONTH_END_db,file_name)
}
|
e3197859c60a9dc9ca7f00337b949c8b5a18205d | 3fd21aa0843f2ad47158d0b17044992c1449135e | /man/wcagepredssumstats.Rd | 39635df0c80fd4cfa1421b3b2fcbf7672c5b33aa | [] | no_license | mrm10/HHFindAgeProjectFinal | 20c1c48f9ae700259365aed48bb5c2a7f8aa22b1 | 5c00c0cbe6ad80a71e9e36ffc57f91bc45c30570 | refs/heads/master | 2021-01-13T09:56:58.888210 | 2016-02-03T11:45:49 | 2016-02-03T11:45:49 | 50,967,043 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 815 | rd | wcagepredssumstats.Rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/SumStats.R
\name{wcagepredssumstats}
\alias{sumstats}
\alias{wcagepredssumstats}
\title{sumstats}
\usage{
sumstats(academicyear, sumstats = TRUE)
}
\arguments{
\item{academicyear}{This function allows a user to input academic year in order to find the average age and other characteristics for that year}
\item{sumstats}{When set to True displays the summary statistics of the Williams College faculty in the inputted academic year and when set to FALSE outputs the average age for that given academic year}
}
\description{
Allows a user to find the summary statistics such as the minimum, first quartile, median, mean, third quartile, and maximum of the faculty age distribution at Williams College in a given academic year
}
|
4903cc5ad62693f174e6800fb9d362ff1b1a45d7 | 29585dff702209dd446c0ab52ceea046c58e384e | /NPflow/R/DPMpost.R | 07337acae1f4c17100567262648d45a71ea1e585 | [] | no_license | ingted/R-Examples | 825440ce468ce608c4d73e2af4c0a0213b81c0fe | d0917dbaf698cb8bc0789db0c3ab07453016eab9 | refs/heads/master | 2020-04-14T12:29:22.336088 | 2016-07-21T14:01:14 | 2016-07-21T14:01:14 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 11,647 | r | DPMpost.R | #'Posterior estimation for Dirichlet process mixture of multivariate (potentially skew) distibutions models
#'
#'Partially collapse slice Gibbs sampling for Dirichlet process mixture of multivariate
#'normal, skew normal or skew t distributions.
#'
#'@details This function is a wrapper around \code{\link{DPMGibbsN}}, \code{\link{DPMGibbsN_parallel}},
#'\code{\link{DPMGibbsN_SeqPrior}}, \code{\link{DPMGibbsSkewN}}, \code{\link{DPMGibbsSkewN_parallel}},
#'\code{\link{DPMGibbsSkewT}}, \code{\link{DPMGibbsSkewT_parallel}},
#'\code{\link{DPMGibbsSkewT_SeqPrior}}, \code{\link{DPMGibbsSkewT_SeqPrior_parallel}}.
#'
#'@param data data matrix \code{d x n} with \code{d} dimensions in rows
#'and \code{n} observations in columns.
#'
#'@param hyperG0 prior mixing distribution.
#'
#'@param a shape hyperparameter of the Gamma prior
#'on the concentration parameter of the Dirichlet Process. Default is \code{0.0001}.
#'
#'@param b scale hyperparameter of the Gamma prior
#'on the concentration parameter of the Dirichlet Process. Default is \code{0.0001}. If \code{0},
#'then the concentration is fixed set to \code{a}.
#'
#'@param N number of MCMC iterations.
#'
#'@param doPlot logical flag indicating wether to plot MCMC iteration or not.
#'Default to \code{TRUE}.
#'
#'@param nbclust_init number of clusters at initialisation.
#'Default to 30 (or less if there are less than 30 observations).
#'
#'@param plotevery an integer indicating the interval between plotted iterations when \code{doPlot}
#' is \code{TRUE}.
#'
#'@param diagVar logical flag indicating wether the variance of each cluster is
#'estimated as a diagonal matrix, or as a full matrix.
#'Default is \code{TRUE} (diagonal variance).
#'
#'@param verbose logical flag indicating wether partition info is
#'written in the console at each MCMC iteration.
#'
#'@param distrib the distribution used for the clustering. Current possibilities are
#'\code{"gaussian"}, \code{"skewnorm"} and \code{"skewt"}.
#'
#'@param ncores number of cores to use.
#'
#'@param type_connec The type of connection between the processors. Supported
#'cluster types are \code{"SOCK"}, \code{"FORK"}, \code{"MPI"}, and
#'\code{"NWS"}. See also \code{\link[parallel:makeCluster]{makeCluster}}.
#'
#'@param informPrior an optional informative prior such as the approximation computed
#'by \code{summary.DPMMclust}.
#'
#'@param ... additional arguments to be passed to \code{\link{plot_DPM}}.
#'Only used if \code{doPlot} is \code{TRUE}.
#'
#'@return a object of class \code{DPMclust} with the following attributes:
#' \itemize{
#' \item{\code{mcmc_partitions}:}{ a list of length \code{N}. Each
#' element \code{mcmc_partitions[n]} is a vector of length
#' \code{n} giving the partition of the \code{n} observations.}
#' \item{\code{alpha}:}{a vector of length \code{N}. \code{cost[j]} is the cost
#' associated to partition \code{c[[j]]}}
#' \item{\code{U_SS_list}:}{a list of length \code{N} containing the lists of
#' sufficient statistics for all the mixture components at each MCMC iteration}
#' \item{\code{weights_list}:}{a list of length \code{N} containing the weights of each
#' mixture component for each MCMC iterations}
#' \item{\code{logposterior_list}:}{a list of length \code{N} containing the logposterior values
#' at each MCMC iterations}
#' \item{\code{data}:}{the data matrix \code{d x n} with \code{d} dimensions in rows
#'and \code{n} observations in columns}
#' \item{\code{nb_mcmcit}:}{ the number of MCMC itertations}
#' \item{\code{clust_distrib}:}{the parametric distribution of the mixture component}
#' \item{\code{hyperG0}:}{the prior on the cluster location}
#' }
#'
#'@author Boris Hejblum
#'
#'@references Hejblum BP, Alkhassim C, Gottardo R, Caron F, Thiebaut R, Sequential Dirichlet
#'Process Mixtures of Multivariate Skew t-distributions for Model-based Clustering
#'of Flow Cytometry Data, in preparation.
#'
#'@export
#'
#'@examples
#' rm(list=ls())
#' library(ggplot2)
#' library(truncnorm)
#'
#' #Number of data
#' n <- 2000
#' set.seed(123)
#' set.seed(4321)
#'
#'
#' d <- 2
#' ncl <- 4
#'
#' # Sample data
#'
#' sdev <- array(dim=c(d,d,ncl))
#'
#' xi <- matrix(nrow=d, ncol=ncl, c(-1.5, 1.5, 1.5, 1.5, 2, -2.5, -2.5, -3))
#' psi <- matrix(nrow=d, ncol=4, c(0.3, -0.7, -0.8, 0, 0.3, -0.7, 0.2, 0.9))
#' nu <- c(100,25,8,5)
#' p <- c(0.15, 0.05, 0.5, 0.3) # frequence des clusters
#' sdev[, ,1] <- matrix(nrow=d, ncol=d, c(0.3, 0, 0, 0.3))
#' sdev[, ,2] <- matrix(nrow=d, ncol=d, c(0.1, 0, 0, 0.3))
#' sdev[, ,3] <- matrix(nrow=d, ncol=d, c(0.3, 0, 0, 0.2))
#' sdev[, ,4] <- .3*diag(2)
#'
#'
#' c <- rep(0,n)
#' w <- rep(1,n)
#' z <- matrix(0, nrow=d, ncol=n)
#' for(k in 1:n){
#' c[k] = which(rmultinom(n=1, size=1, prob=p)!=0)
#' w[k] <- rgamma(1, shape=nu[c[k]]/2, rate=nu[c[k]]/2)
#' z[,k] <- xi[, c[k]] + psi[, c[k]]*rtruncnorm(n=1, a=0, b=Inf, mean=0, sd=1/sqrt(w[k])) +
#' (sdev[, , c[k]]/sqrt(w[k]))%*%matrix(rnorm(d, mean = 0, sd = 1), nrow=d, ncol=1)
#' #cat(k, "/", n, " observations simulated\n", sep="")
#' }
#'
#' # Set parameters of G0
#' hyperG0 <- list()
#' hyperG0[["b_xi"]] <- rowMeans(z)
#' hyperG0[["b_psi"]] <- rep(0,d)
#' hyperG0[["kappa"]] <- 0.001
#' hyperG0[["D_xi"]] <- 100
#' hyperG0[["D_psi"]] <- 100
#' hyperG0[["nu"]] <- d+1
#' hyperG0[["lambda"]] <- diag(apply(z,MARGIN=1, FUN=var))/3
#'
#' # hyperprior on the Scale parameter of DPM
#' a <- 0.0001
#' b <- 0.0001
#'
#'
#'
#' ## Data
#' ########
#' library(ggplot2)
#' p <- (ggplot(data.frame("X"=z[1,], "Y"=z[2,]), aes(x=X, y=Y))
#' + geom_point()
#' #+ ggtitle("Simple example in 2d data")
#' +xlab("D1")
#' +ylab("D2")
#' +theme_bw())
#' p #pdf(height=8.5, width=8.5)
#'
#' c2plot <- factor(c)
#' levels(c2plot) <- c("4", "1", "3", "2")
#' pp <- (ggplot(data.frame("X"=z[1,], "Y"=z[2,], "Cluster"=as.character(c2plot)))
#' + geom_point(aes(x=X, y=Y, colour=Cluster, fill=Cluster))
#' #+ ggtitle("Slightly overlapping skew-normal simulation\n")
#' + xlab("D1")
#' + ylab("D2")
#' + theme_bw()
#' + scale_colour_discrete(guide=guide_legend(override.aes = list(size = 6, shape=22))))
#' pp #pdf(height=7, width=7.5)
#'
#'\dontrun{
#' MCMCsample_st <- DPMpost(data=z, hyperG0=hyperG0, N=2000,
#' distrib="skewt",
#' gg.add=list(theme_bw(),
#' guides(shape=guide_legend(override.aes = list(fill="grey45"))))
#' )
#' s <- summary(MCMCsample_st, burnin = 1500, thin=5, lossFn = "Binder")
#' s
#' plot(s)
#' #plot(s, hm=TRUE) #pdf(height=8.5, width=10.5) #png(height=700, width=720)
#'}
#'
#'
#'
#'
#'
#'
DPMpost <- function (data, hyperG0, a=0.0001, b=0.0001, N, doPlot=TRUE,
nbclust_init=30, plotevery=floor(N/10),
diagVar=TRUE, verbose=TRUE,
distrib=c("gaussian", "skewnorm", "skewt"),
ncores = 1,
type_connec = "SOCK",
informPrior=NULL,
...
){
if(ncores>1){
if(ncores < parallel::detectCores()){
stop("Number of requested cores is higher than what is available")
}
}
if(ncores<2){
if(is.null(informPrior)){
res <- switch(distrib,
"gaussian"=DPMGibbsN(data, hyperG0, a, b, N, doPlot, nbclust_init, plotevery, diagVar,
verbose, ...),
"skewnorm"=DPMGibbsSkewN(data, hyperG0, a, b, N, doPlot, nbclust_init, plotevery, diagVar,
verbose, ...),
"skewt"=DPMGibbsSkewT(data, hyperG0, a, b, N, doPlot, nbclust_init, plotevery, diagVar,
verbose, ...)
)
}else{
res <- switch(distrib,
"gaussian"=DPMGibbsN_SeqPrior(data, informPrior, hyperG0, N,
nbclust_init, doPlot=doPlot, plotevery=plotevery,
diagVar=diagVar, verbose=verbose, ...),
"skewnorm"=stop("Skew normal ditributions with informative prior is not implemented yet.\n",
"Contact the maintainer if you would like to see this feature implemented.\n",
"In the meantime, try the skew t distribution with 'skewt' which is a generalization ",
"of the skew normal distribution."),
"skewt"=DPMGibbsSkewT_SeqPrior(data, informPrior, hyperG0, N, nbclust_init,
doPlot=doPlot, plotevery=plotevery, diagVar=diagVar,
verbose=verbose, ...)
)
}
}else{
if(is.null(informPrior)){
if(distrib=="skewnorm"){
warning("Parallel implementation with skew normal ditributions is not available yet.\n",
"Contact the maintainer if you would like to see this feature implemented.\n",
"In the meantime, the non-parallel implementation is being run instead")
}
res <- switch(distrib,
"gaussian"=DPMGibbsN_parallel(ncores, type_connec, data, hyperG0, a, b, N,
doPlot, nbclust_init, plotevery, diagVar,
verbose, ...),
"skewnorm"=DPMGibbsSkewN(data, hyperG0, a, b, N, doPlot, nbclust_init, plotevery, diagVar,
verbose, ...),
"skewt"=DPMGibbsSkewT_parallel(ncores, type_connec, data, hyperG0, a, b, N,
doPlot, nbclust_init, plotevery, diagVar,
verbose, ...)
)
}else{
warning("Parallel implementation with an informative prior for gaussian ditributions is not available yet.\n",
"Contact the maintainer if you would like to see this feature implemented.\n",
"In the meantime, the non-parallel implementation is being run instead.")
res <- switch(distrib,
"gaussian"=DPMGibbsN_SeqPrior(data, informPrior, hyperG0, N,
nbclust_init, doPlot=doPlot, plotevery=plotevery,
diagVar=diagVar, verbose=verbose, ...),
"skewnorm"=stop("Skew normal ditributions with informative prior is not implemented yet.\n",
"Contact the maintainer if you would like to see this feature implemented.\n",
"In the meantime, try the skew t distribution with 'skewt' which is a generalization ",
"of the skew normal distribution."),
"skewt"=DPMGibbsSkewT_SeqPrior_parallel(ncores, type_connec, data, informPrior,
hyperG0, N, nbclust_init, doPlot=doPlot,
plotevery=plotevery, diagVar=diagVar,
verbose=verbose, ...)
)
}
}
return(res)
}
|
e7ed6db5484263c6682092c9dc581daab245b439 | b548999a84c8e3ec3a6d675208dd1772b22d820f | /google-trends-plot.R | ed3ee54f86130a78ad7e6705e8e3281ef6027cb8 | [] | no_license | zremek/google-trends-ds-ml-ai | 083d1a539c69b2caca85ba1acb3978f94300f98f | 7ce6d9910e9cd8279ab2e8ede933010d21779bdc | refs/heads/main | 2023-03-11T10:48:13.335413 | 2023-03-08T09:49:32 | 2023-03-08T09:49:32 | 315,697,410 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,516 | r | google-trends-plot.R | library(tidyverse)
library(cowplot)
library(PerformanceAnalytics)
# https://trends.google.com/trends/explore?date=2008-01-01%202020-09-30&q=%2Fm%2F0bs2j8q,%2Fm%2F01hyh_,%2Fm%2F0jt3_q3,%2Fm%2F0mkz&hl=en-US
with_ai <- read_csv("multiTimeline.csv",
skip = 2, col_types = "ccccc")
# https://trends.google.com/trends/explore?date=2008-01-01%202020-09-30&q=%2Fm%2F0bs2j8q,%2Fm%2F01hyh_,%2Fm%2F0jt3_q3&hl=en-US
no_ai <- read_csv("multiTimeline(1).csv",
skip = 2, col_types = "cccc")
with_ai[with_ai == "<1"] <- "0.5"
no_ai[no_ai == "<1"] <- "0.5"
with_ai[,2:5] <- apply(with_ai[,2:5], 2, as.numeric)
no_ai[,2:4] <- apply(no_ai[,2:4], 2, as.numeric)
with_ai <- rename(with_ai,
`big data` = `Big data: (Cały świat)`,
`uczenie maszynowe` = `Uczenie maszynowe: (Cały świat)`,
`data science` = `Danologia: (Cały świat)`,
`sztuczna inteligencja` = `Sztuczna inteligencja: (Cały świat)`)
no_ai <- rename(no_ai,
`big data` = `Big data: (Cały świat)`,
`uczenie maszynowe` = `Uczenie maszynowe: (Cały świat)`,
`data science` = `Danologia: (Cały świat)`)
# chart.Correlation(with_ai[,2:5], histogram = TRUE, method = "pearson")
# chart.Correlation(no_ai[,2:4], histogram = TRUE, method = "spearman")
#
# summary(lm(log(`data science`) ~ parse_date(Miesiąc, "%Y-%m"), no_ai))
#
# ggplot(no_ai, aes(parse_date(Miesiąc, "%Y-%m"), `data science`, 10)) + geom_point() +
# geom_smooth()
with_ai <- gather(with_ai, "Temat", "Popularność", -Miesiąc)
no_ai <- gather(no_ai, "Temat", "Popularność", -Miesiąc)
with_ai$Temat <- fct_relevel(with_ai$Temat,
"big data",
"data science",
"uczenie maszynowe",
"sztuczna inteligencja")
w <- ggplot(with_ai) +
geom_line(aes(x = parse_date(Miesiąc, "%Y-%m"),
y = Popularność, colour = Temat),
size = 1) +
labs(title = '1A', x = "",
subtitle = 'Temat "sztuczna inteligencja" był najbardziej popularnym wśród analizowanych') +
scale_colour_brewer(palette = "Set1", name = "Temat w wyszukiwarce\nGoogle") +
scale_x_date(date_breaks = "1 year", date_labels = "%Y", minor_breaks = NULL) +
theme_minimal(base_family = "serif", base_size = 10) +
theme(legend.position = "bottom", legend.direction = "horizontal", legend.box.just = "left")
n <- ggplot(no_ai) +
geom_line(aes(x = parse_date(Miesiąc, "%Y-%m"),
y = Popularność, colour = Temat),
size = 1) +
labs(title = '1B', x = "",
subtitle = 'Pomijając "sztuczną inteligencję", temat "big data" był od 02.2012 do 03.2017 najpopularniejszym.\nPóźniej najbardziej popularny stał się temat "uczenia maszynowego".\nŚrednio "data science" było najmniej popularnym tematem. Jednak od grudnia 2018 jego popularność\nprzekracza popularność tematu "big data"',
caption = 'Dane z Google Trends od 01.01.2008 do 30.09.2020, zakres geograficzny "Cały świat"') +
scale_colour_brewer(palette = "Set1") +
scale_x_date(date_breaks = "1 year", date_labels = "%Y", minor_breaks = NULL) +
theme_minimal(base_family = "serif", base_size = 10) +
guides(colour = "none")
# (p <- cowplot::plot_grid(w, n, nrow = 2))
#
# png("google-trends.png", width = 160, height = 190, units = "mm", res = 300)
# plot(p) # Rys. 1.
# dev.off()
|
25c9e81364b76eae576f262f35908572c3d0a6a3 | 691a1a785b2f0a47a04777ada08cb1a8bf4b94ef | /Sourced_Functions/MainPipeLineFunctionsToSource_v3.R | 3a53059b1c8cd7c60c77c73ac85c48ca572daa51 | [] | no_license | arthurvickie/Multi-Marker_Method | cbafc3e6a9a16c703b3d241d19234d0dfdb38e89 | d1d90c3c6f99d587a987f4be761c8dbe042f176a | refs/heads/main | 2023-07-15T10:17:00.234816 | 2021-08-30T20:49:22 | 2021-08-30T20:49:22 | 377,881,258 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 95,778 | r | MainPipeLineFunctionsToSource_v3.R | #############################################
## File to source for whole pipelines
## Includes TIE and Power pipelines
## Using long form eqn and Ghat for calculations
#############################################
#edit 5/21/20 added in ability to standardize scores and weight scores to ghat pipelines
#edited version for HapGen Data, Only using Ghat versions
#edit 6/18/2020 added in code for running SKAT for comparison
#edited 9/24/2020 to run with ProjectIISourceFunctions_v2.R so that s is equal to the number of PCs, not the PVE (this was changed back!)
#Relies on ProjectIISourceFunctions_v2.R
##################################################
## Original Pipelines
##################################################
# TIE Pipeline
#TIE Pipeline with Ghat
RunTIEPipelineLocalGHat = function(chr, gene, numPairs, YPrev, s, standardizeScores = FALSE, weightedScores = FALSE, scoreWeights){
#function to run whole TIE pipeline locally, uses Ghat instead of long form eqn for calculations
#Inputs:
#chr = chromosome number
#gene = gene name, in quotes
#numPairs = number of D/R pairs
#YPrev = prevalence of binary outcome Y
#s = PVE for choosing the number of PCs
#standardizeScores = T or F whether the scores should be standardized based on maximum score value
#weightedScores = T or F whether the scores will be weighted
#scoreWeights = m x 1 vector of weights, one weight for each SNP
#Outputs:
#No direct outputs, writes scores and pvalues to csv files
#also writes TIE values to csv files
library(parallel)
#always the same
numSims = 5000
#define path to data
#for HapGen generated data
path = paste0("/home/vlynn/Paper_II_Sims/HapGen_Files/",gene,"_Results_",numPairs,"Pairs")
#source the needed functions
source("/home/vlynn/Paper_II_Sims/HapGen_Files/Scripts/ProjectIISourceFunctions_v2.R")
myList = lapply(1:numSims, rep, times = 1)
statsAndPVals = mclapply(myList, function(ii){
#define matrix to hold all Stats and Pvalues
statsAndPVals = matrix(NA, nrow = numSims, ncol = 16)
#pull recipient and donor genotypes
RGenos = obtainRGenotypes(chr = chr, numSamples = numPairs, simNum = ii, gene = gene, path = path)
DGenos = obtainDGenotypes(chr = chr, numSamples = numPairs, simNum = ii, gene = gene, path = path)
#calculate single snp scores
IBS.snp = calcIBSMismatch(RGenosMat = RGenos, DGenosMat = DGenos)
Incomp.snp = calcIncompatibilityScore(RGenosMat = RGenos, DGenosMat = DGenos)
AMS.snp = calcAMS(RGenosMat = RGenos, DGenosMat = DGenos)
BinMM.snp = calcBinaryMM(RGenosMat = RGenos, DGenosMat = DGenos)
#calculate gene based scores
#check to see if weights are used for scores
if(weightedScores == FALSE){
#check to see if scores should be standardized (can't be weighted and standardized)
if(standardizeScores == FALSE){
IBS.gene = calcGeneScore(SingleSNPKernel = IBS.snp, standardize = FALSE, useWeights = FALSE)
Incomp.gene = calcGeneScore(SingleSNPKernel = Incomp.snp, standardize = FALSE, useWeights = FALSE)
AMS.gene = calcGeneScore(SingleSNPKernel = AMS.snp, standardize = FALSE, useWeights = FALSE)
BinMM.gene = calcGeneScore(SingleSNPKernel = BinMM.snp, standardize = FALSE, useWeights = FALSE)
} else {
IBS.gene = calcGeneScore(SingleSNPKernel = IBS.snp, standardize = TRUE, useWeights = FALSE)
Incomp.gene = calcGeneScore(SingleSNPKernel = Incomp.snp, standardize = TRUE, useWeights = FALSE)
AMS.gene = calcGeneScore(SingleSNPKernel = AMS.snp, standardize = TRUE, useWeights = FALSE)
BinMM.gene = calcGeneScore(SingleSNPKernel = BinMM.snp, standardize = TRUE, useWeights = FALSE)
}
} else {
IBS.gene = calcGeneScore(SingleSNPKernel = IBS.snp, standardize = FALSE, useWeights = TRUE, scoreWeights)
Incomp.gene = calcGeneScore(SingleSNPKernel = Incomp.snp, standardize = FALSE, useWeights = TRUE, scoreWeights)
AMS.gene = calcGeneScore(SingleSNPKernel = AMS.snp, standardize = FALSE, useWeights = TRUE, scoreWeights)
BinMM.gene = calcGeneScore(SingleSNPKernel = BinMM.snp, standardize = FALSE, useWeights = TRUE, scoreWeights)
}
#generate covariates
#for now, a single binary and a single continous covariate
CovData = GenCovData(SampleSize = numPairs, BinaryValues = 1, ContinuousValues = 1)
#generate phenotypes, both continuous and binary
CatPhenos = GenNullPhenos(SampleSize = numPairs, includeCov = TRUE, YCat = TRUE, YPrev = YPrev, Covariates = CovData)
ContPhenos = GenNullPhenos(SampleSize = numPairs, includeCov = TRUE, YCat = FALSE, Covariates = CovData)
#Need to calculate UR and US scores
#will have then for continuous and binary phenos
#need separate US for each score type
#Recipient genotype UR values:
UR_CatPhenos_Ghat = CalcUScoreGhat(SampleSize = numPairs, includeCov = TRUE, CovData = CovData, CalcUR = TRUE, RGenoData = RGenos, Phenos = CatPhenos, BinPhenos = TRUE)
UR_ContPhenos_Ghat = CalcUScoreGhat(SampleSize = numPairs, includeCov = TRUE, CovData = CovData, CalcUR = TRUE, RGenoData = RGenos, Phenos = ContPhenos, BinPhenos = FALSE)
#US for IBS Score
US_IBS_CatPhenos_Ghat = CalcUScoreGhat(SampleSize = numPairs, includeCov = TRUE, CovData = CovData, CalcUR = FALSE, ScoreData = IBS.gene, Phenos = CatPhenos, BinPhenos = TRUE)
US_IBS_ContPhenos_Ghat = CalcUScoreGhat(SampleSize = numPairs, includeCov = TRUE, CovData = CovData, CalcUR = FALSE, ScoreData = IBS.gene, Phenos = ContPhenos, BinPhenos = FALSE)
#US for Incomp Score
US_Incomp_CatPhenos_Ghat = CalcUScoreGhat(SampleSize = numPairs, includeCov = TRUE, CovData = CovData, CalcUR = FALSE, ScoreData = Incomp.gene, Phenos = CatPhenos, BinPhenos = TRUE)
US_Incomp_ContPhenos_Ghat = CalcUScoreGhat(SampleSize = numPairs, includeCov = TRUE, CovData = CovData, CalcUR = FALSE, ScoreData = Incomp.gene, Phenos = ContPhenos, BinPhenos = FALSE)
#US for AMS Score
US_AMS_CatPhenos_Ghat = CalcUScoreGhat(SampleSize = numPairs, includeCov = TRUE, CovData = CovData, CalcUR = FALSE, ScoreData = AMS.gene, Phenos = CatPhenos, BinPhenos = TRUE)
US_AMS_ContPhenos_Ghat = CalcUScoreGhat(SampleSize = numPairs, includeCov = TRUE, CovData = CovData, CalcUR = FALSE, ScoreData = AMS.gene, Phenos = ContPhenos, BinPhenos = FALSE)
#US for Bin MM Score
US_BinMM_CatPhenos_Ghat = CalcUScoreGhat(SampleSize = numPairs, includeCov = TRUE, CovData = CovData, CalcUR = FALSE, ScoreData = BinMM.gene, Phenos = CatPhenos, BinPhenos = TRUE)
US_BinMM_ContPhenos_Ghat = CalcUScoreGhat(SampleSize = numPairs, includeCov = TRUE, CovData = CovData, CalcUR = FALSE, ScoreData = BinMM.gene, Phenos = ContPhenos, BinPhenos = FALSE)
#Need to calculate Q values
#R geno QR values
QR_CatPhenos_Ghat = CalcQValuesGhat(SampleSize = numPairs, includeCov = TRUE, CovData = CovData, CalcUR = TRUE, RGenoData = RGenos, Phenos = CatPhenos, BinPhenos = TRUE)
QR_ContPhenos_Ghat = CalcQValuesGhat(SampleSize = numPairs, includeCov = TRUE, CovData = CovData, CalcUR = TRUE, RGenoData = RGenos, Phenos = ContPhenos, BinPhenos = FALSE)
#QS for IBS Score
QS_IBS_CatPhenos_Ghat = CalcQValuesGhat(SampleSize = numPairs, includeCov = TRUE, CovData = CovData, CalcUR = FALSE, ScoreData = IBS.gene, Phenos = CatPhenos, BinPhenos = TRUE)
QS_IBS_ContPhenos_Ghat = CalcQValuesGhat(SampleSize = numPairs, includeCov = TRUE, CovData = CovData, CalcUR = FALSE, ScoreData = IBS.gene, Phenos = ContPhenos, BinPhenos = FALSE)
#QS for Incomp Score
QS_Incomp_CatPhenos_Ghat = CalcQValuesGhat(SampleSize = numPairs, includeCov = TRUE, CovData = CovData, CalcUR = FALSE, ScoreData = Incomp.gene, Phenos = CatPhenos, BinPhenos = TRUE)
QS_Incomp_ContPhenos_Ghat = CalcQValuesGhat(SampleSize = numPairs, includeCov = TRUE, CovData = CovData, CalcUR = FALSE, ScoreData = Incomp.gene, Phenos = ContPhenos, BinPhenos = FALSE)
#QS for AMS score
QS_AMS_CatPhenos_Ghat = CalcQValuesGhat(SampleSize = numPairs, includeCov = TRUE, CovData = CovData, CalcUR = FALSE, ScoreData = AMS.gene, Phenos = CatPhenos, BinPhenos = TRUE)
QS_AMS_ContPhenos_Ghat = CalcQValuesGhat(SampleSize = numPairs, includeCov = TRUE, CovData = CovData, CalcUR = FALSE, ScoreData = AMS.gene, Phenos = ContPhenos, BinPhenos = FALSE)
#QS for Bin MM Score
QS_BinMM_CatPhenos_Ghat = CalcQValuesGhat(SampleSize = numPairs, includeCov = TRUE, CovData = CovData, CalcUR = FALSE, ScoreData = BinMM.gene, Phenos = CatPhenos, BinPhenos = TRUE)
QS_BinMM_ContPhenos_Ghat = CalcQValuesGhat(SampleSize = numPairs, includeCov = TRUE, CovData = CovData, CalcUR = FALSE, ScoreData = BinMM.gene, Phenos = ContPhenos, BinPhenos = FALSE)
#Create combined Qs (QR, QS)
#R geno and IBS score
Q_IBS_CatPhenos_Ghat = cbind(QR_CatPhenos_Ghat, QS_IBS_CatPhenos_Ghat)
Q_IBS_ContPhenos_Ghat = cbind(QR_ContPhenos_Ghat, QS_IBS_ContPhenos_Ghat)
#R geno and Incomp Score
Q_Incomp_CatPhenos_Ghat = cbind(QR_CatPhenos_Ghat, QS_Incomp_CatPhenos_Ghat)
Q_Incomp_ContPhenos_Ghat = cbind(QR_ContPhenos_Ghat, QS_Incomp_ContPhenos_Ghat)
#R geno and AMS score
Q_AMS_CatPhenos_Ghat = cbind(QR_CatPhenos_Ghat, QS_AMS_CatPhenos_Ghat)
Q_AMS_ContPhenos_Ghat = cbind(QR_ContPhenos_Ghat, QS_AMS_ContPhenos_Ghat)
#R geno and Bin MM score
Q_BinMM_CatPhenos_Ghat = cbind(QR_CatPhenos_Ghat, QS_BinMM_CatPhenos_Ghat)
Q_BinMM_ContPhenos_Ghat = cbind(QR_ContPhenos_Ghat, QS_BinMM_ContPhenos_Ghat)
#calculate full variance
#each set of Qs will have a variance calculation
#R genos + IBS Score
OrgVar_Q_IBS_CatPhenos_Ghat = CalcVariance(SampleSize = numPairs, QValues = Q_IBS_CatPhenos_Ghat)
OrgVar_Q_IBS_ContPhenos_Ghat = CalcVariance(SampleSize = numPairs, QValues = Q_IBS_ContPhenos_Ghat)
#R genos + Incomp Score
OrgVar_Q_Incomp_CatPhenos_Ghat = CalcVariance(SampleSize = numPairs, QValues = Q_Incomp_CatPhenos_Ghat)
OrgVar_Q_Incomp_ContPhenos_Ghat = CalcVariance(SampleSize = numPairs, QValues = Q_Incomp_ContPhenos_Ghat)
#R genos + AMS Score
OrgVar_Q_AMS_CatPhenos_Ghat = CalcVariance(SampleSize = numPairs, QValues = Q_AMS_CatPhenos_Ghat)
OrgVar_Q_AMS_ContPhenos_Ghat = CalcVariance(SampleSize = numPairs, QValues = Q_AMS_ContPhenos_Ghat)
#R genos + Bin MM Score
OrgVar_Q_BinMM_CatPhenos_Ghat = CalcVariance(SampleSize = numPairs, QValues = Q_BinMM_CatPhenos_Ghat)
OrgVar_Q_BinMM_ContPhenos_Ghat = CalcVariance(SampleSize = numPairs, QValues = Q_BinMM_ContPhenos_Ghat)
#calculate final stat and p value
#R geno and IBS score
Stat_IBS_CatPhenos = CalcStatisticPVal(SampleSize = numPairs, Variance = OrgVar_Q_IBS_CatPhenos_Ghat, UscoresR = UR_CatPhenos_Ghat, UscoreS = US_IBS_CatPhenos_Ghat, s = s)
Stat_IBS_ContPhenos = CalcStatisticPVal(SampleSize = numPairs, Variance = OrgVar_Q_IBS_ContPhenos_Ghat, UscoresR = UR_ContPhenos_Ghat, UscoreS = US_IBS_ContPhenos_Ghat, s = s)
#R geno and Incomp score
Stat_Incomp_CatPhenos = CalcStatisticPVal(SampleSize = numPairs, Variance = OrgVar_Q_Incomp_CatPhenos_Ghat, UscoresR = UR_CatPhenos_Ghat, UscoreS = US_Incomp_CatPhenos_Ghat, s = s)
Stat_Incomp_ContPhenos = CalcStatisticPVal(SampleSize = numPairs, Variance = OrgVar_Q_Incomp_ContPhenos_Ghat, UscoresR = UR_ContPhenos_Ghat, UscoreS = US_Incomp_ContPhenos_Ghat, s = s)
#R geno and AMS Score
Stat_AMS_CatPhenos = CalcStatisticPVal(SampleSize = numPairs, Variance = OrgVar_Q_AMS_CatPhenos_Ghat, UscoresR = UR_CatPhenos_Ghat, UscoreS = US_AMS_CatPhenos_Ghat, s = s)
Stat_AMS_ContPhenos = CalcStatisticPVal(SampleSize = numPairs, Variance = OrgVar_Q_AMS_ContPhenos_Ghat, UscoresR = UR_ContPhenos_Ghat, UscoreS = US_AMS_ContPhenos_Ghat, s = s)
#R geno and Bin MM score
Stat_BinMM_CatPhenos = CalcStatisticPVal(SampleSize = numPairs, Variance = OrgVar_Q_BinMM_CatPhenos_Ghat, UscoresR = UR_CatPhenos_Ghat, UscoreS = US_BinMM_CatPhenos_Ghat, s = s)
Stat_BinMM_ContPhenos = CalcStatisticPVal(SampleSize = numPairs, Variance = OrgVar_Q_BinMM_ContPhenos_Ghat, UscoresR = UR_ContPhenos_Ghat, UscoreS = US_BinMM_ContPhenos_Ghat, s = s)
#fill columns in order
##Binary Phenos
##IBS Cat, Incomp Cat, AMS Cat, Bin MM Cat,
statsAndPValsCatPhenos_IBS = Stat_IBS_CatPhenos[1:2]
statsAndPValsCatPhenos_Incomp = Stat_Incomp_CatPhenos[1:2]
statsAndPValsCatPhenos_AMS = Stat_AMS_CatPhenos[1:2]
statsAndPValsCatPhenos_BinMM = Stat_BinMM_CatPhenos[1:2]
##Cont Phenos
##IBS Cont, Incomp Cont, AMS Cont, Bin MM Cont
statsAndPValsContPhenos_IBS = Stat_IBS_ContPhenos[1:2]
statsAndPValsContPhenos_Incomp = Stat_Incomp_ContPhenos[1:2]
statsAndPValsContPhenos_AMS = Stat_AMS_ContPhenos[1:2]
statsAndPValsContPhenos_BinMM = Stat_BinMM_ContPhenos[1:2]
IBSCat.mat = as.matrix(statsAndPValsCatPhenos_IBS)
IncompCat.mat = as.matrix(statsAndPValsCatPhenos_Incomp)
AMSCat.mat = as.matrix(statsAndPValsCatPhenos_AMS)
BinMMCat.mat = as.matrix(statsAndPValsCatPhenos_BinMM)
IBSCont.mat = as.matrix(statsAndPValsContPhenos_IBS)
IncompCont.mat = as.matrix(statsAndPValsContPhenos_Incomp)
AMSCont.mat = as.matrix(statsAndPValsContPhenos_AMS)
BinMMCont.mat = as.matrix(statsAndPValsContPhenos_BinMM)
statsAndPVals = cbind(t(IBSCat.mat),t(IncompCat.mat),t(AMSCat.mat),t(BinMMCat.mat),t(IBSCont.mat),t(IncompCont.mat),t(AMSCont.mat),t(BinMMCont.mat))
print(paste0("Simulation ",ii," is complete."))
statsAndPVals
}, mc.cores=4)
statsAndPValsAll = matrix(unlist(statsAndPVals),nrow = numSims, ncol = 16, byrow = TRUE)
statsAndPValsCatPhenos = statsAndPValsAll[,1:8]
statsAndPValsContPhenos = statsAndPValsAll[,9:16]
#write out the Stats and p values
if(weightedScores == FALSE){
if(standardizeScores == FALSE){
write.csv(statsAndPValsCatPhenos, file = paste0(path,"/TIE_CatPhenos_Prev",YPrev*100,"_Ghat_StatsAndPValues_s",s*100,".csv"))
write.csv(statsAndPValsContPhenos, file = paste0(path,"/TIE_ContPhenos_Ghat_StatsAndPValues_s",s*100,".csv"))
} else { #scores are standardized
write.csv(statsAndPValsCatPhenos, file = paste0(path,"/TIE_CatPhenos_Prev",YPrev*100,"_Ghat_StandardizedScores_StatsAndPValues_s",s*100,".csv"))
write.csv(statsAndPValsContPhenos, file = paste0(path,"/TIE_ContPhenos_Ghat_StandardizedScores_StatsAndPValues_s",s*100,".csv"))
}
} else { #scores are weighted
write.csv(statsAndPValsCatPhenos, file = paste0(path,"/TIE_CatPhenos_Prev",YPrev*100,"_Ghat_WeightedScores_StatsAndPValues_s",s*100,".csv"))
write.csv(statsAndPValsContPhenos, file = paste0(path,"/TIE_ContPhenos_Ghat_WeightedScores_StatsAndPValues_s",s*100,".csv"))
}
#calculate the type I errors for each combination of score and r geno
#first column for cat phenos, second for cont phenos
#rows go IBS, Incomp, AMS, Bin MM
ties = matrix(nrow = 4, ncol = 2)
#pull only the pvalues
PValsCatPhenos = statsAndPValsCatPhenos[,c(2,4,6,8)]
PValsContPhenos = statsAndPValsContPhenos[,c(2,4,6,8)]
#calc type I error
for(jj in 1:4){
ties[jj,1] = sum(PValsCatPhenos[,jj] <= 0.05)/numSims
ties[jj,2] = sum(PValsContPhenos[,jj] <= 0.05)/numSims
}
#write out type I error results
if(weightedScores == FALSE){
if(standardizeScores == FALSE){
write.csv(ties, file = paste0(path,"/TIE_Results_Ghat_CatAndContPhenos_Prev",YPrev*100,"_s",s*100,".csv"))
} else { #scores are standardized
write.csv(ties, file = paste0(path,"/TIE_Results_Ghat_StandardizedScores_CatAndContPhenos_Prev",YPrev*100,"_s",s*100,".csv"))
}
} else { #scores are weighted
write.csv(ties, file = paste0(path,"/TIE_Results_Ghat_WeightedScores_CatAndContPhenos_Prev",YPrev*100,"_s",s*100,".csv"))
}
}
#Power Pipeline with Ghat for Scores being true
RunPowerPipelineLocalGhat_Scores = function(chr, gene, numPairs, YPrev, s, Gamma, TrueScore, ORSize, standardizeScores = FALSE, weightedScores = FALSE, scoreWeights, percentageAssoc, LowLD){
#function to run whole power pipeline locally, uses Ghat instead of long form eqn for calculations
#Inputs:
#chr = chromosome number
#gene = gene name, in quotes
#numPairs = number of D/R pairs
#YPrev = prevalence of binary outcome Y
#s = PVE for choosing the number of PCs
#Gamma = effect size for score, length 1, can be 0
#TrueScore = IBS.gene, Incomp.gene, AMS.gene, or BinMM.gene
#ORSize = Small, Medium, or Large for what OR was used for the associated SNP/score
#standardizeScores = T or F whether the scores should be standardized based on maximum score value
#weightedScores = T or F whether the scores will be weighted
#scoreWeights = m x 1 vector of weights, one weight for each SNP
#percentageAssoc = percentage of SNPs associated with outcome (either 5, 25, 50, 75, or 100)
#LowLD = True or FALSE whether the associated SNPs are in low LD or high LD
#### This value shows which score is potentially being used to create phenotypes
#Outputs:
#No direct outputs, writes scores and pvalues to csv files
#also writes power values to csv files
library(parallel)
#need to define this for naming at the end
snpOrScore = TrueScore
#number of sims always the same
numSims = 5000
#define path to data
#for sampled haplotype data
# path = paste0("/home/vlynn/Paper_II_Sims/",gene,"_Results_",numPairs,"Pairs")
#for HapGen generated data
path = paste0("/home/vlynn/Paper_II_Sims/HapGen_Files/",gene,"_Results_",numPairs,"Pairs")
#source the needed functions
source("/home/vlynn/Paper_II_Sims/HapGen_Files/Scripts/ProjectIISourceFunctions_v2.R")
#define matrix to hold all Stats and Pvalues
myList = lapply(1:numSims, rep, times = 1)
statsAndPValsMat = matrix(NA,ncol=24)
statsAndPValsMat = mclapply(myList, function(ii){
statsAndPValsMat = matrix(NA,ncol=24)
#pull recipient and donor genotypes
RGenos = obtainRGenotypes(chr = chr, numSamples = numPairs, simNum = ii, gene = gene, path = path)
DGenos = obtainDGenotypes(chr = chr, numSamples = numPairs, simNum = ii, gene = gene, path = path)
#calculate single snp scores
IBS.snp = calcIBSMismatch(RGenosMat = RGenos, DGenosMat = DGenos)
Incomp.snp = calcIncompatibilityScore(RGenosMat = RGenos, DGenosMat = DGenos)
AMS.snp = calcAMS(RGenosMat = RGenos, DGenosMat = DGenos)
BinMM.snp = calcBinaryMM(RGenosMat = RGenos, DGenosMat = DGenos)
#calculate gene based scores
#check to see if weights are used for scores
if(weightedScores == FALSE){
#check to see if scores should be standardized (can't be weighted and standardized)
if(standardizeScores == FALSE){
IBS.gene = calcGeneScore(SingleSNPKernel = IBS.snp, standardize = FALSE, useWeights = FALSE)
Incomp.gene = calcGeneScore(SingleSNPKernel = Incomp.snp, standardize = FALSE, useWeights = FALSE)
AMS.gene = calcGeneScore(SingleSNPKernel = AMS.snp, standardize = FALSE, useWeights = FALSE)
BinMM.gene = calcGeneScore(SingleSNPKernel = BinMM.snp, standardize = FALSE, useWeights = FALSE)
} else {
IBS.gene = calcGeneScore(SingleSNPKernel = IBS.snp, standardize = TRUE, useWeights = FALSE)
Incomp.gene = calcGeneScore(SingleSNPKernel = Incomp.snp, standardize = TRUE, useWeights = FALSE)
AMS.gene = calcGeneScore(SingleSNPKernel = AMS.snp, standardize = TRUE, useWeights = FALSE)
BinMM.gene = calcGeneScore(SingleSNPKernel = BinMM.snp, standardize = TRUE, useWeights = FALSE)
}
} else {
IBS.gene = calcGeneScore(SingleSNPKernel = IBS.snp, standardize = FALSE, useWeights = TRUE, scoreWeights)
Incomp.gene = calcGeneScore(SingleSNPKernel = Incomp.snp, standardize = FALSE, useWeights = TRUE, scoreWeights)
AMS.gene = calcGeneScore(SingleSNPKernel = AMS.snp, standardize = FALSE, useWeights = TRUE, scoreWeights)
BinMM.gene = calcGeneScore(SingleSNPKernel = BinMM.snp, standardize = FALSE, useWeights = TRUE, scoreWeights)
}
#also need to calculate gene based scores if not all SNPs are associated
IBS.gene.PercentOfSNPs = calcGeneScorePercentOfSNPs(SingleSNPKernel = IBS.snp, gene = gene, percentageAssoc = percentageAssoc, LowLD = LowLD, standardize = FALSE, useWeights = FALSE)
Incomp.gene.PercentOfSNPs = calcGeneScorePercentOfSNPs(SingleSNPKernel = Incomp.snp, gene = gene, percentageAssoc = percentageAssoc, LowLD = LowLD, standardize = FALSE, useWeights = FALSE)
AMS.gene.PercentOfSNPs = calcGeneScorePercentOfSNPs(SingleSNPKernel = AMS.snp, gene = gene, percentageAssoc = percentageAssoc, LowLD = LowLD, standardize = FALSE, useWeights = FALSE)
BinMM.gene.PercentOfSNPs = calcGeneScorePercentOfSNPs(SingleSNPKernel = BinMM.snp, gene = gene, percentageAssoc = percentageAssoc, LowLD = LowLD, standardize = FALSE, useWeights = FALSE)
#need to use TrueScore to pull gene based scores matrix for generating phenotypes
if(TrueScore == "IBS.gene"){
PhenoScore = IBS.gene.PercentOfSNPs
} else if(TrueScore == "Incomp.gene"){
PhenoScore = Incomp.gene.PercentOfSNPs
} else if(TrueScore == "AMS.gene"){
PhenoScore = AMS.gene.PercentOfSNPs
} else {
PhenoScore = BinMM.gene.PercentOfSNPs
}
#generate covariates
#for now, a single binary and a single continous covariate
CovData = GenCovData(SampleSize = numPairs, BinaryValues = 1, ContinuousValues = 1)
#need to define null Betas for phenotype generation
nSNP = ncol(RGenos) #this should be the number of SNPs
Betas = rep(0,nSNP) #generate null beta values
Betas = as.matrix(Betas, ncol = 1)
#generate phenotypes, both continuous and binary
#Based on single true score
CatPhenos = GenAltPhenos(SampleSize = numPairs, includeCov = TRUE, YCat = TRUE, YPrev = YPrev, Covariates = CovData, RGenoData = RGenos, ScoreData = PhenoScore, Betas = Betas, Gamma = Gamma)
ContPhenos = GenAltPhenos(SampleSize = numPairs, includeCov = TRUE, YCat = FALSE, Covariates = CovData, RGenoData = RGenos, ScoreData = PhenoScore, Betas = Betas, Gamma = Gamma)
#Need to calculate UR and US scores
#will have then for continuous and binary phenos
#need separate US for each score type
#Recipient genotype UR values:
UR_CatPhenos_Ghat = CalcUScoreGhat(SampleSize = numPairs, includeCov = TRUE, CovData = CovData, CalcUR = TRUE, RGenoData = RGenos, Phenos = CatPhenos, BinPhenos = TRUE)
UR_ContPhenos_Ghat = CalcUScoreGhat(SampleSize = numPairs, includeCov = TRUE, CovData = CovData, CalcUR = TRUE, RGenoData = RGenos, Phenos = ContPhenos, BinPhenos = FALSE)
#US for IBS Score
US_IBS_CatPhenos_Ghat = CalcUScoreGhat(SampleSize = numPairs, includeCov = TRUE, CovData = CovData, CalcUR = FALSE, ScoreData = IBS.gene, Phenos = CatPhenos, BinPhenos = TRUE)
US_IBS_ContPhenos_Ghat = CalcUScoreGhat(SampleSize = numPairs, includeCov = TRUE, CovData = CovData, CalcUR = FALSE, ScoreData = IBS.gene, Phenos = ContPhenos, BinPhenos = FALSE)
#US for Incomp Score
US_Incomp_CatPhenos_Ghat = CalcUScoreGhat(SampleSize = numPairs, includeCov = TRUE, CovData = CovData, CalcUR = FALSE, ScoreData = Incomp.gene, Phenos = CatPhenos, BinPhenos = TRUE)
US_Incomp_ContPhenos_Ghat = CalcUScoreGhat(SampleSize = numPairs, includeCov = TRUE, CovData = CovData, CalcUR = FALSE, ScoreData = Incomp.gene, Phenos = ContPhenos, BinPhenos = FALSE)
#US for AMS Score
US_AMS_CatPhenos_Ghat = CalcUScoreGhat(SampleSize = numPairs, includeCov = TRUE, CovData = CovData, CalcUR = FALSE, ScoreData = AMS.gene, Phenos = CatPhenos, BinPhenos = TRUE)
US_AMS_ContPhenos_Ghat = CalcUScoreGhat(SampleSize = numPairs, includeCov = TRUE, CovData = CovData, CalcUR = FALSE, ScoreData = AMS.gene, Phenos = ContPhenos, BinPhenos = FALSE)
#US for Bin MM Score
US_BinMM_CatPhenos_Ghat = CalcUScoreGhat(SampleSize = numPairs, includeCov = TRUE, CovData = CovData, CalcUR = FALSE, ScoreData = BinMM.gene, Phenos = CatPhenos, BinPhenos = TRUE)
US_BinMM_ContPhenos_Ghat = CalcUScoreGhat(SampleSize = numPairs, includeCov = TRUE, CovData = CovData, CalcUR = FALSE, ScoreData = BinMM.gene, Phenos = ContPhenos, BinPhenos = FALSE)
#Need to calculate Q values
#R geno QR values
QR_CatPhenos_Ghat = CalcQValuesGhat(SampleSize = numPairs, includeCov = TRUE, CovData = CovData, CalcUR = TRUE, RGenoData = RGenos, Phenos = CatPhenos, BinPhenos = TRUE)
QR_ContPhenos_Ghat = CalcQValuesGhat(SampleSize = numPairs, includeCov = TRUE, CovData = CovData, CalcUR = TRUE, RGenoData = RGenos, Phenos = ContPhenos, BinPhenos = FALSE)
#QS for IBS Score
QS_IBS_CatPhenos_Ghat = CalcQValuesGhat(SampleSize = numPairs, includeCov = TRUE, CovData = CovData, CalcUR = FALSE, ScoreData = IBS.gene, Phenos = CatPhenos, BinPhenos = TRUE)
QS_IBS_ContPhenos_Ghat = CalcQValuesGhat(SampleSize = numPairs, includeCov = TRUE, CovData = CovData, CalcUR = FALSE, ScoreData = IBS.gene, Phenos = ContPhenos, BinPhenos = FALSE)
#QS for Incomp Score
QS_Incomp_CatPhenos_Ghat = CalcQValuesGhat(SampleSize = numPairs, includeCov = TRUE, CovData = CovData, CalcUR = FALSE, ScoreData = Incomp.gene, Phenos = CatPhenos, BinPhenos = TRUE)
QS_Incomp_ContPhenos_Ghat = CalcQValuesGhat(SampleSize = numPairs, includeCov = TRUE, CovData = CovData, CalcUR = FALSE, ScoreData = Incomp.gene, Phenos = ContPhenos, BinPhenos = FALSE)
#QS for AMS score
QS_AMS_CatPhenos_Ghat = CalcQValuesGhat(SampleSize = numPairs, includeCov = TRUE, CovData = CovData, CalcUR = FALSE, ScoreData = AMS.gene, Phenos = CatPhenos, BinPhenos = TRUE)
QS_AMS_ContPhenos_Ghat = CalcQValuesGhat(SampleSize = numPairs, includeCov = TRUE, CovData = CovData, CalcUR = FALSE, ScoreData = AMS.gene, Phenos = ContPhenos, BinPhenos = FALSE)
#QS for Bin MM Score
QS_BinMM_CatPhenos_Ghat = CalcQValuesGhat(SampleSize = numPairs, includeCov = TRUE, CovData = CovData, CalcUR = FALSE, ScoreData = BinMM.gene, Phenos = CatPhenos, BinPhenos = TRUE)
QS_BinMM_ContPhenos_Ghat = CalcQValuesGhat(SampleSize = numPairs, includeCov = TRUE, CovData = CovData, CalcUR = FALSE, ScoreData = BinMM.gene, Phenos = ContPhenos, BinPhenos = FALSE)
#Create combined Qs (QR, QS)
#R geno and IBS score
Q_IBS_CatPhenos_Ghat = cbind(QR_CatPhenos_Ghat, QS_IBS_CatPhenos_Ghat)
Q_IBS_ContPhenos_Ghat = cbind(QR_ContPhenos_Ghat, QS_IBS_ContPhenos_Ghat)
#R geno and Incomp Score
Q_Incomp_CatPhenos_Ghat = cbind(QR_CatPhenos_Ghat, QS_Incomp_CatPhenos_Ghat)
Q_Incomp_ContPhenos_Ghat = cbind(QR_ContPhenos_Ghat, QS_Incomp_ContPhenos_Ghat)
#R geno and AMS score
Q_AMS_CatPhenos_Ghat = cbind(QR_CatPhenos_Ghat, QS_AMS_CatPhenos_Ghat)
Q_AMS_ContPhenos_Ghat = cbind(QR_ContPhenos_Ghat, QS_AMS_ContPhenos_Ghat)
#R geno and Bin MM score
Q_BinMM_CatPhenos_Ghat = cbind(QR_CatPhenos_Ghat, QS_BinMM_CatPhenos_Ghat)
Q_BinMM_ContPhenos_Ghat = cbind(QR_ContPhenos_Ghat, QS_BinMM_ContPhenos_Ghat)
#calculate full variance
#each set of Qs will have a variance calculation
#R genos + IBS Score
OrgVar_Q_IBS_CatPhenos_Ghat = CalcVariance(SampleSize = numPairs, QValues = Q_IBS_CatPhenos_Ghat)
OrgVar_Q_IBS_ContPhenos_Ghat = CalcVariance(SampleSize = numPairs, QValues = Q_IBS_ContPhenos_Ghat)
#R genos + Incomp Score
OrgVar_Q_Incomp_CatPhenos_Ghat = CalcVariance(SampleSize = numPairs, QValues = Q_Incomp_CatPhenos_Ghat)
OrgVar_Q_Incomp_ContPhenos_Ghat = CalcVariance(SampleSize = numPairs, QValues = Q_Incomp_ContPhenos_Ghat)
#R genos + AMS Score
OrgVar_Q_AMS_CatPhenos_Ghat = CalcVariance(SampleSize = numPairs, QValues = Q_AMS_CatPhenos_Ghat)
OrgVar_Q_AMS_ContPhenos_Ghat = CalcVariance(SampleSize = numPairs, QValues = Q_AMS_ContPhenos_Ghat)
#R genos + Bin MM Score
OrgVar_Q_BinMM_CatPhenos_Ghat = CalcVariance(SampleSize = numPairs, QValues = Q_BinMM_CatPhenos_Ghat)
OrgVar_Q_BinMM_ContPhenos_Ghat = CalcVariance(SampleSize = numPairs, QValues = Q_BinMM_ContPhenos_Ghat)
#calculate final stat and p value
#R geno and IBS score
Stat_IBS_CatPhenos = CalcStatisticPVal(SampleSize = numPairs, Variance = OrgVar_Q_IBS_CatPhenos_Ghat, UscoresR = UR_CatPhenos_Ghat, UscoreS = US_IBS_CatPhenos_Ghat, s = s)
Stat_IBS_ContPhenos = CalcStatisticPVal(SampleSize = numPairs, Variance = OrgVar_Q_IBS_ContPhenos_Ghat, UscoresR = UR_ContPhenos_Ghat, UscoreS = US_IBS_ContPhenos_Ghat, s = s)
#R geno and Incomp score
Stat_Incomp_CatPhenos = CalcStatisticPVal(SampleSize = numPairs, Variance = OrgVar_Q_Incomp_CatPhenos_Ghat, UscoresR = UR_CatPhenos_Ghat, UscoreS = US_Incomp_CatPhenos_Ghat, s = s)
Stat_Incomp_ContPhenos = CalcStatisticPVal(SampleSize = numPairs, Variance = OrgVar_Q_Incomp_ContPhenos_Ghat, UscoresR = UR_ContPhenos_Ghat, UscoreS = US_Incomp_ContPhenos_Ghat, s = s)
#R geno and AMS Score
Stat_AMS_CatPhenos = CalcStatisticPVal(SampleSize = numPairs, Variance = OrgVar_Q_AMS_CatPhenos_Ghat, UscoresR = UR_CatPhenos_Ghat, UscoreS = US_AMS_CatPhenos_Ghat, s = s)
Stat_AMS_ContPhenos = CalcStatisticPVal(SampleSize = numPairs, Variance = OrgVar_Q_AMS_ContPhenos_Ghat, UscoresR = UR_ContPhenos_Ghat, UscoreS = US_AMS_ContPhenos_Ghat, s = s)
#R geno and Bin MM score
Stat_BinMM_CatPhenos = CalcStatisticPVal(SampleSize = numPairs, Variance = OrgVar_Q_BinMM_CatPhenos_Ghat, UscoresR = UR_CatPhenos_Ghat, UscoreS = US_BinMM_CatPhenos_Ghat, s = s)
Stat_BinMM_ContPhenos = CalcStatisticPVal(SampleSize = numPairs, Variance = OrgVar_Q_BinMM_ContPhenos_Ghat, UscoresR = UR_ContPhenos_Ghat, UscoreS = US_BinMM_ContPhenos_Ghat, s = s)
#fill columns in order
##Binary Phenos
##IBS Cat, Incomp Cat, AMS Cat, Bin MM Cat,
statsAndPValsMat[,1:3] = Stat_IBS_CatPhenos
statsAndPValsMat[,4:6] = Stat_Incomp_CatPhenos
statsAndPValsMat[,7:9] = Stat_AMS_CatPhenos
statsAndPValsMat[,10:12] = Stat_BinMM_CatPhenos
##Cont Phenos
##IBS Cont, Incomp Cont, AMS Cont, Bin MM Cont
statsAndPValsMat[,13:15] = Stat_IBS_ContPhenos
statsAndPValsMat[,16:18] = Stat_Incomp_ContPhenos
statsAndPValsMat[,19:21] = Stat_AMS_ContPhenos
statsAndPValsMat[,22:24] = Stat_BinMM_ContPhenos
print(paste0("Simulation ",ii," is complete."))
statsAndPValsMat
}, mc.cores=4)
statsAndPValsAll = matrix(unlist(statsAndPValsMat),nrow = numSims, ncol = 24, byrow = TRUE)
statsAndPValsCatPhenos = statsAndPValsAll[,1:12]
statsAndPValsContPhenos = statsAndPValsAll[,13:24]
#define values for naming conventions
if(LowLD == TRUE){
ld = "LowLD"
} else {
ld = "HighLD"
}
if(weightedScores == FALSE){
weighted = ""
} else{
weighted = "WeightedScores"
}
if(standardizeScores == FALSE){
standardized = ""
} else {
standardized = "StandardizedScores"
}
#write out the Stats and p values
write.csv(statsAndPValsCatPhenos, file = paste0(path,"/Power_CatPhenos_",weighted,"_",standardized,"_Prev",YPrev*100,"_Ghat_StatsAndPValues_",percentageAssoc,"SNPsAssociated_",ld,"_s",s,"_TrueScore",TrueScore,"_",ORSize,"OR_assocSNPOrScore",snpOrScore,".csv"))
write.csv(statsAndPValsContPhenos, file = paste0(path,"/Power_ContPhenos__",weighted,"_",standardized,"_Ghat_StatsAndPValues_",percentageAssoc,"SNPsAssociated_",ld,"_s",s,"_TrueScore",TrueScore,"_",ORSize,"OR_assocSNPOrScore",snpOrScore,".csv"))
#calculate the power for each combination of score and r geno
#first column for cat phenos, second for cont phenos
#rows go IBS, Incomp, AMS, Bin MM
power = matrix(nrow = 4, ncol = 2)
#pull only the pvalues
PValsCatPhenos = statsAndPValsCatPhenos[,c(2,5,8,11)]
PValsContPhenos = statsAndPValsContPhenos[,c(2,5,8,11)]
#calc power
for(jj in 1:4){
power[jj,1] = sum(PValsCatPhenos[,jj] <= 0.05)/numSims
power[jj,2] = sum(PValsContPhenos[,jj] <= 0.05)/numSims
}
#write out power results
write.csv(power, file = paste0(path,"/Power_Results_",percentageAssoc,"SNPsAssociated_",ld,"_",weighted,"_",standardized,"_Ghat_CatAndContPhenos_Prev",YPrev*100,"_s",s,"_TrueScore",TrueScore,"_",ORSize,"OR_assocSNPOrScore",snpOrScore,".csv"))
write.csv(power, file = paste0(path,"/Power_Results_",percentageAssoc,"SNPsAssociated_",ld,"_",weighted,"_",standardized,"_Ghat_CatAndContPhenos_Prev",YPrev*100,"_s",s,"_TrueScore",TrueScore,"_",ORSize,"OR_assocSNPOrScore",snpOrScore,".csv"))
}
#Power Pipeline with Ghat for RSNPs being true
RunPowerPipelineLocalGhat_RSNPs = function(chr, gene, numPairs, YPrev, s, Gamma, TrueScore, ORSize, standardizeScores = FALSE, weightedScores = FALSE, scoreWeights, percentageAssoc, LowLD){
#function to run whole power pipeline locally, uses Ghat instead of long form eqn for calculations
#Inputs:
#chr = chromosome number
#gene = gene name, in quotes
#numPairs = number of D/R pairs
#YPrev = prevalence of binary outcome Y
#s = PVE for choosing the number of PCs
#Gamma = effect size for score, length 1, can be 0
#TrueScore = IBS.gene, Incomp.gene, AMS.gene, or BinMM.gene
#ORSize = small, medium or large for effect size of associated R geno SNP
#standardizeScores = T or F whether the scores should be standardized based on maximum score value
#weightedScores = T or F whether the scores will be weighted
#scoreWeights = m x 1 vector of weights, one weight for each SNP
#### This value shows which score is potentially being used to create phenotypes
#percentageAssoc = percentage of SNPs associated with outcome (either 5, 25, 50, 75, or 100)
#LowLD = True or FALSE whether the associated SNPs are in low LD or high LD
#Outputs:
#No direct outputs, writes scores and pvalues to csv files
#also writes power values to csv files
library(parallel)
#need to define this for naming at the end
snpOrScore = "RSNP"
#define effect based on OR size
if(ORSize == "small"){
effect = 0.14
} else if(ORSize == "medium"){
effect = 0.41
} else {
effect = 0.69
}
#number of sims always the same
numSims = 5000
#define path to data
#for sampled haplotype data
# path = paste0("/home/vlynn/Paper_II_Sims/",gene,"_Results_",numPairs,"Pairs")
#for HapGen generated data
path = paste0("/home/vlynn/Paper_II_Sims/HapGen_Files/",gene,"_Results_",numPairs,"Pairs")
#source the needed functions
source("/home/vlynn/Paper_II_Sims/HapGen_Files/Scripts/ProjectIISourceFunctions.R")
#determine which SNPs to actually set as assoc. based on gene
assocSNPs = DetermineAssocRSNPs(gene = gene, LowLD = LowLD, percentageAssoc = percentageAssoc)
#turn output matrices into output lists
myList = lapply(1:numSims, rep, times = 1)
statsAndPValsMat = matrix(NA,ncol=24)
statsAndPValsMat = mclapply(myList, function(ii){
statsAndPValsMat = matrix(NA,ncol=24)
#pull recipient and donor genotypes
RGenos = obtainRGenotypes(chr = chr, numSamples = numPairs, simNum = ii, gene = gene, path = path)
DGenos = obtainDGenotypes(chr = chr, numSamples = numPairs, simNum = ii, gene = gene, path = path)
#calculate single snp scores
IBS.snp = calcIBSMismatch(RGenosMat = RGenos, DGenosMat = DGenos)
Incomp.snp = calcIncompatibilityScore(RGenosMat = RGenos, DGenosMat = DGenos)
AMS.snp = calcAMS(RGenosMat = RGenos, DGenosMat = DGenos)
BinMM.snp = calcBinaryMM(RGenosMat = RGenos, DGenosMat = DGenos)
#calculate gene based scores
#check to see if weights are used for scores
if(weightedScores == FALSE){
#check to see if scores should be standardized (can't be weighted and standardized)
if(standardizeScores == FALSE){
IBS.gene = calcGeneScore(SingleSNPKernel = IBS.snp, standardize = FALSE, useWeights = FALSE)
Incomp.gene = calcGeneScore(SingleSNPKernel = Incomp.snp, standardize = FALSE, useWeights = FALSE)
AMS.gene = calcGeneScore(SingleSNPKernel = AMS.snp, standardize = FALSE, useWeights = FALSE)
BinMM.gene = calcGeneScore(SingleSNPKernel = BinMM.snp, standardize = FALSE, useWeights = FALSE)
} else {
IBS.gene = calcGeneScore(SingleSNPKernel = IBS.snp, standardize = TRUE, useWeights = FALSE)
Incomp.gene = calcGeneScore(SingleSNPKernel = Incomp.snp, standardize = TRUE, useWeights = FALSE)
AMS.gene = calcGeneScore(SingleSNPKernel = AMS.snp, standardize = TRUE, useWeights = FALSE)
BinMM.gene = calcGeneScore(SingleSNPKernel = BinMM.snp, standardize = TRUE, useWeights = FALSE)
}
} else {
IBS.gene = calcGeneScore(SingleSNPKernel = IBS.snp, standardize = FALSE, useWeights = TRUE, scoreWeights)
Incomp.gene = calcGeneScore(SingleSNPKernel = Incomp.snp, standardize = FALSE, useWeights = TRUE, scoreWeights)
AMS.gene = calcGeneScore(SingleSNPKernel = AMS.snp, standardize = FALSE, useWeights = TRUE, scoreWeights)
BinMM.gene = calcGeneScore(SingleSNPKernel = BinMM.snp, standardize = FALSE, useWeights = TRUE, scoreWeights)
}
#need to use TrueScore to pull gene based scores matrix for generating phenotypes
if(TrueScore == "IBS.gene"){
PhenoScore = IBS.gene
} else if(TrueScore == "Incomp.gene"){
PhenoScore = Incomp.gene
} else if(TrueScore == "AMS.gene"){
PhenoScore = AMS.gene
} else {
PhenoScore = BinMM.gene
}
#generate covariates
#for now, a single binary and a single continous covariate
CovData = GenCovData(SampleSize = numPairs, BinaryValues = 1, ContinuousValues = 1)
#need to define null Betas for phenotype generation
nSNP = ncol(RGenos) #this should be the number of SNPs
nullBetas = rep(0,nSNP) #generate null beta values
Betas = nullBetas
#set assoc Betas
#all betas have same effect for now
for(jj in assocSNPs){
Betas[jj] = effect
Betas = as.matrix(Betas, ncol = 1)
}
#generate phenotypes, both continuous and binary
#Based on single true score
CatPhenos = GenAltPhenos(SampleSize = numPairs, includeCov = TRUE, YCat = TRUE, YPrev = YPrev, Covariates = CovData, RGenoData = RGenos, ScoreData = PhenoScore, Betas = Betas, Gamma = Gamma)
ContPhenos = GenAltPhenos(SampleSize = numPairs, includeCov = TRUE, YCat = FALSE, Covariates = CovData, RGenoData = RGenos, ScoreData = PhenoScore, Betas = Betas, Gamma = Gamma)
#Need to calculate UR and US scores
#will have then for continuous and binary phenos
#need separate US for each score type
#Recipient genotype UR values:
UR_CatPhenos_Ghat = CalcUScoreGhat(SampleSize = numPairs, includeCov = TRUE, CovData = CovData, CalcUR = TRUE, RGenoData = RGenos, Phenos = CatPhenos, BinPhenos = TRUE)
UR_ContPhenos_Ghat = CalcUScoreGhat(SampleSize = numPairs, includeCov = TRUE, CovData = CovData, CalcUR = TRUE, RGenoData = RGenos, Phenos = ContPhenos, BinPhenos = FALSE)
#US for IBS Score
US_IBS_CatPhenos_Ghat = CalcUScoreGhat(SampleSize = numPairs, includeCov = TRUE, CovData = CovData, CalcUR = FALSE, ScoreData = IBS.gene, Phenos = CatPhenos, BinPhenos = TRUE)
US_IBS_ContPhenos_Ghat = CalcUScoreGhat(SampleSize = numPairs, includeCov = TRUE, CovData = CovData, CalcUR = FALSE, ScoreData = IBS.gene, Phenos = ContPhenos, BinPhenos = FALSE)
#US for Incomp Score
US_Incomp_CatPhenos_Ghat = CalcUScoreGhat(SampleSize = numPairs, includeCov = TRUE, CovData = CovData, CalcUR = FALSE, ScoreData = Incomp.gene, Phenos = CatPhenos, BinPhenos = TRUE)
US_Incomp_ContPhenos_Ghat = CalcUScoreGhat(SampleSize = numPairs, includeCov = TRUE, CovData = CovData, CalcUR = FALSE, ScoreData = Incomp.gene, Phenos = ContPhenos, BinPhenos = FALSE)
#US for AMS Score
US_AMS_CatPhenos_Ghat = CalcUScoreGhat(SampleSize = numPairs, includeCov = TRUE, CovData = CovData, CalcUR = FALSE, ScoreData = AMS.gene, Phenos = CatPhenos, BinPhenos = TRUE)
US_AMS_ContPhenos_Ghat = CalcUScoreGhat(SampleSize = numPairs, includeCov = TRUE, CovData = CovData, CalcUR = FALSE, ScoreData = AMS.gene, Phenos = ContPhenos, BinPhenos = FALSE)
#US for Bin MM Score
US_BinMM_CatPhenos_Ghat = CalcUScoreGhat(SampleSize = numPairs, includeCov = TRUE, CovData = CovData, CalcUR = FALSE, ScoreData = BinMM.gene, Phenos = CatPhenos, BinPhenos = TRUE)
US_BinMM_ContPhenos_Ghat = CalcUScoreGhat(SampleSize = numPairs, includeCov = TRUE, CovData = CovData, CalcUR = FALSE, ScoreData = BinMM.gene, Phenos = ContPhenos, BinPhenos = FALSE)
#Need to calculate Q values
#R geno QR values
QR_CatPhenos_Ghat = CalcQValuesGhat(SampleSize = numPairs, includeCov = TRUE, CovData = CovData, CalcUR = TRUE, RGenoData = RGenos, Phenos = CatPhenos, BinPhenos = TRUE)
QR_ContPhenos_Ghat = CalcQValuesGhat(SampleSize = numPairs, includeCov = TRUE, CovData = CovData, CalcUR = TRUE, RGenoData = RGenos, Phenos = ContPhenos, BinPhenos = FALSE)
#QS for IBS Score
QS_IBS_CatPhenos_Ghat = CalcQValuesGhat(SampleSize = numPairs, includeCov = TRUE, CovData = CovData, CalcUR = FALSE, ScoreData = IBS.gene, Phenos = CatPhenos, BinPhenos = TRUE)
QS_IBS_ContPhenos_Ghat = CalcQValuesGhat(SampleSize = numPairs, includeCov = TRUE, CovData = CovData, CalcUR = FALSE, ScoreData = IBS.gene, Phenos = ContPhenos, BinPhenos = FALSE)
#QS for Incomp Score
QS_Incomp_CatPhenos_Ghat = CalcQValuesGhat(SampleSize = numPairs, includeCov = TRUE, CovData = CovData, CalcUR = FALSE, ScoreData = Incomp.gene, Phenos = CatPhenos, BinPhenos = TRUE)
QS_Incomp_ContPhenos_Ghat = CalcQValuesGhat(SampleSize = numPairs, includeCov = TRUE, CovData = CovData, CalcUR = FALSE, ScoreData = Incomp.gene, Phenos = ContPhenos, BinPhenos = FALSE)
#QS for AMS score
QS_AMS_CatPhenos_Ghat = CalcQValuesGhat(SampleSize = numPairs, includeCov = TRUE, CovData = CovData, CalcUR = FALSE, ScoreData = AMS.gene, Phenos = CatPhenos, BinPhenos = TRUE)
QS_AMS_ContPhenos_Ghat = CalcQValuesGhat(SampleSize = numPairs, includeCov = TRUE, CovData = CovData, CalcUR = FALSE, ScoreData = AMS.gene, Phenos = ContPhenos, BinPhenos = FALSE)
#QS for Bin MM Score
QS_BinMM_CatPhenos_Ghat = CalcQValuesGhat(SampleSize = numPairs, includeCov = TRUE, CovData = CovData, CalcUR = FALSE, ScoreData = BinMM.gene, Phenos = CatPhenos, BinPhenos = TRUE)
QS_BinMM_ContPhenos_Ghat = CalcQValuesGhat(SampleSize = numPairs, includeCov = TRUE, CovData = CovData, CalcUR = FALSE, ScoreData = BinMM.gene, Phenos = ContPhenos, BinPhenos = FALSE)
#Create combined Qs (QR, QS)
#R geno and IBS score
Q_IBS_CatPhenos_Ghat = cbind(QR_CatPhenos_Ghat, QS_IBS_CatPhenos_Ghat)
Q_IBS_ContPhenos_Ghat = cbind(QR_ContPhenos_Ghat, QS_IBS_ContPhenos_Ghat)
#R geno and Incomp Score
Q_Incomp_CatPhenos_Ghat = cbind(QR_CatPhenos_Ghat, QS_Incomp_CatPhenos_Ghat)
Q_Incomp_ContPhenos_Ghat = cbind(QR_ContPhenos_Ghat, QS_Incomp_ContPhenos_Ghat)
#R geno and AMS score
Q_AMS_CatPhenos_Ghat = cbind(QR_CatPhenos_Ghat, QS_AMS_CatPhenos_Ghat)
Q_AMS_ContPhenos_Ghat = cbind(QR_ContPhenos_Ghat, QS_AMS_ContPhenos_Ghat)
#R geno and Bin MM score
Q_BinMM_CatPhenos_Ghat = cbind(QR_CatPhenos_Ghat, QS_BinMM_CatPhenos_Ghat)
Q_BinMM_ContPhenos_Ghat = cbind(QR_ContPhenos_Ghat, QS_BinMM_ContPhenos_Ghat)
#calculate full variance
#each set of Qs will have a variance calculation
#R genos + IBS Score
OrgVar_Q_IBS_CatPhenos_Ghat = CalcVariance(SampleSize = numPairs, QValues = Q_IBS_CatPhenos_Ghat)
OrgVar_Q_IBS_ContPhenos_Ghat = CalcVariance(SampleSize = numPairs, QValues = Q_IBS_ContPhenos_Ghat)
#R genos + Incomp Score
OrgVar_Q_Incomp_CatPhenos_Ghat = CalcVariance(SampleSize = numPairs, QValues = Q_Incomp_CatPhenos_Ghat)
OrgVar_Q_Incomp_ContPhenos_Ghat = CalcVariance(SampleSize = numPairs, QValues = Q_Incomp_ContPhenos_Ghat)
#R genos + AMS Score
OrgVar_Q_AMS_CatPhenos_Ghat = CalcVariance(SampleSize = numPairs, QValues = Q_AMS_CatPhenos_Ghat)
OrgVar_Q_AMS_ContPhenos_Ghat = CalcVariance(SampleSize = numPairs, QValues = Q_AMS_ContPhenos_Ghat)
#R genos + Bin MM Score
OrgVar_Q_BinMM_CatPhenos_Ghat = CalcVariance(SampleSize = numPairs, QValues = Q_BinMM_CatPhenos_Ghat)
OrgVar_Q_BinMM_ContPhenos_Ghat = CalcVariance(SampleSize = numPairs, QValues = Q_BinMM_ContPhenos_Ghat)
#calculate final stat and p value
#R geno and IBS score
Stat_IBS_CatPhenos = CalcStatisticPVal(SampleSize = numPairs, Variance = OrgVar_Q_IBS_CatPhenos_Ghat, UscoresR = UR_CatPhenos_Ghat, UscoreS = US_IBS_CatPhenos_Ghat, s = s)
Stat_IBS_ContPhenos = CalcStatisticPVal(SampleSize = numPairs, Variance = OrgVar_Q_IBS_ContPhenos_Ghat, UscoresR = UR_ContPhenos_Ghat, UscoreS = US_IBS_ContPhenos_Ghat, s = s)
#R geno and Incomp score
Stat_Incomp_CatPhenos = CalcStatisticPVal(SampleSize = numPairs, Variance = OrgVar_Q_Incomp_CatPhenos_Ghat, UscoresR = UR_CatPhenos_Ghat, UscoreS = US_Incomp_CatPhenos_Ghat, s = s)
Stat_Incomp_ContPhenos = CalcStatisticPVal(SampleSize = numPairs, Variance = OrgVar_Q_Incomp_ContPhenos_Ghat, UscoresR = UR_ContPhenos_Ghat, UscoreS = US_Incomp_ContPhenos_Ghat, s = s)
#R geno and AMS Score
Stat_AMS_CatPhenos = CalcStatisticPVal(SampleSize = numPairs, Variance = OrgVar_Q_AMS_CatPhenos_Ghat, UscoresR = UR_CatPhenos_Ghat, UscoreS = US_AMS_CatPhenos_Ghat, s = s)
Stat_AMS_ContPhenos = CalcStatisticPVal(SampleSize = numPairs, Variance = OrgVar_Q_AMS_ContPhenos_Ghat, UscoresR = UR_ContPhenos_Ghat, UscoreS = US_AMS_ContPhenos_Ghat, s = s)
#R geno and Bin MM score
Stat_BinMM_CatPhenos = CalcStatisticPVal(SampleSize = numPairs, Variance = OrgVar_Q_BinMM_CatPhenos_Ghat, UscoresR = UR_CatPhenos_Ghat, UscoreS = US_BinMM_CatPhenos_Ghat, s = s)
Stat_BinMM_ContPhenos = CalcStatisticPVal(SampleSize = numPairs, Variance = OrgVar_Q_BinMM_ContPhenos_Ghat, UscoresR = UR_ContPhenos_Ghat, UscoreS = US_BinMM_ContPhenos_Ghat, s = s)
#fill columns in order
##Binary Phenos
##IBS Cat, Incomp Cat, AMS Cat, Bin MM Cat,
statsAndPValsMat[,1:3] = Stat_IBS_CatPhenos
statsAndPValsMat[,4:6] = Stat_Incomp_CatPhenos
statsAndPValsMat[,7:9] = Stat_AMS_CatPhenos
statsAndPValsMat[,10:12] = Stat_BinMM_CatPhenos
##Cont Phenos
##IBS Cont, Incomp Cont, AMS Cont, Bin MM Cont
statsAndPValsMat[,13:15] = Stat_IBS_ContPhenos
statsAndPValsMat[,16:18] = Stat_Incomp_ContPhenos
statsAndPValsMat[,19:21] = Stat_AMS_ContPhenos
statsAndPValsMat[,22:24] = Stat_BinMM_ContPhenos
print(paste0("Simulation ",ii," is complete."))
#reset Betas
Betas = nullBetas
statsAndPValsMat
}, mc.cores = 4)
statsAndPValsAll = matrix(unlist(statsAndPValsMat),nrow = numSims, ncol = 24, byrow = TRUE)
statsAndPValsCatPhenos = statsAndPValsAll[,1:12]
statsAndPValsContPhenos = statsAndPValsAll[,13:24]
#define values for naming conventions
if(LowLD == TRUE){
ld = "LowLD"
} else {
ld = "HighLD"
}
if(weightedScores == FALSE){
weighted = ""
} else{
weighted = "WeightedScores"
}
if(standardizeScores == FALSE){
standardized = ""
} else {
standardized = "StandardizedScores"
}
#write out the Stats and p values
write.csv(statsAndPValsCatPhenos, file = paste0(path,"/Power_CatPhenos_",weighted,"_",standardized,"_Prev",YPrev*100,"_Ghat_StatsAndPValues_",percentageAssoc,"SNPsAssociated_",ld,"_s",s,"_TrueScore",TrueScore,"_",ORSize,"OR_assocSNPOrScore",snpOrScore,".csv"))
write.csv(statsAndPValsContPhenos, file = paste0(path,"/Power_ContPhenos__",weighted,"_",standardized,"_Ghat_StatsAndPValues_",percentageAssoc,"SNPsAssociated_",ld,"_s",s,"_TrueScore",TrueScore,"_",ORSize,"OR_assocSNPOrScore",snpOrScore,".csv"))
#calculate the power for each combination of score and r geno
#first column for cat phenos, second for cont phenos
#rows go IBS, Incomp, AMS, Bin MM
power = matrix(nrow = 4, ncol = 2)
#pull only the pvalues
PValsCatPhenos = statsAndPValsCatPhenos[,c(2,5,8,11)]
PValsContPhenos = statsAndPValsContPhenos[,c(2,5,8,11)]
#calc power
for(jj in 1:4){
power[jj,1] = sum(PValsCatPhenos[,jj] <= 0.05)/numSims
power[jj,2] = sum(PValsContPhenos[,jj] <= 0.05)/numSims
}
#write out power results
write.csv(power, file = paste0(path,"/Power_Results_",percentageAssoc,"SNPsAssociated_",ld,"_",weighted,"_",standardized,"_Ghat_CatAndContPhenos_Prev",YPrev*100,"_s",s,"_TrueScore",TrueScore,"_",ORSize,"OR_assocSNPOrScore",snpOrScore,".csv"))
write.csv(power, file = paste0(path,"/Power_Results_",percentageAssoc,"SNPsAssociated_",ld,"_",weighted,"_",standardized,"_Ghat_CatAndContPhenos_Prev",YPrev*100,"_s",s,"_TrueScore",TrueScore,"_",ORSize,"OR_assocSNPOrScore",snpOrScore,".csv"))
}
##################################################
## SKAT Pipelines
##################################################
#TIE Pipeline with SKAT
#N x 2m matrix instead of N x (m+1)
RunTIEPipelineSKAT = function(chr, gene, numPairs, YPrev, kernel, kernelWeights=c(), standardizeScores = FALSE, weightedScores = FALSE, scoreWeights){
#function to run whole TIE pipeline, Calculates SKAT stat and pvalue for comparison
#Inputs:
#chr = chromosome number
#gene = gene name, in quotes
#numPairs = number of D/R pairs
#YPrev = prevalence of binary outcome Y
#kernel = which kernel to use for SKAT (linear, linear.weighted, IBS, IBS.weighted)
#kernelWeights = numeric vector of weights for weighted kernels
#standardizeScores = T or F whether the scores should be standardized based on maximum score value
#weightedScores = T or F whether the scores will be weighted
#scoreWeights = m x 1 vector of weights, one weight for each SNP
#Outputs:
#No direct outputs, writes scores and pvalues to csv files
#also writes TIE values to csv files
#load SKAT library
library(SKAT)
library(parallel)
#always the same
numSims = 5000
#define path to data
#for HapGen generated data
path = paste0("/home/vlynn/Paper_II_Sims/HapGen_Files/",gene,"_Results_",numPairs,"Pairs")
#source the needed functions
source("/home/vlynn/Paper_II_Sims/HapGen_Files/Scripts/ProjectIISourceFunctions_v2.R")
myList = lapply(1:numSims, rep, times = 1)
statsAndPVals = mclapply(myList, function(ii){
#define matrix to hold all Stats and Pvalues
statsAndPVals = matrix(NA, nrow = numSims, ncol = 16)
#pull recipient and donor genotypes
RGenos = obtainRGenotypes(chr = chr, numSamples = numPairs, simNum = ii, gene = gene, path = path)
DGenos = obtainDGenotypes(chr = chr, numSamples = numPairs, simNum = ii, gene = gene, path = path)
#calculate single snp scores
IBS.snp = calcIBSMismatch(RGenosMat = RGenos, DGenosMat = DGenos)
Incomp.snp = calcIncompatibilityScore(RGenosMat = RGenos, DGenosMat = DGenos)
AMS.snp = calcAMS(RGenosMat = RGenos, DGenosMat = DGenos)
BinMM.snp = calcBinaryMM(RGenosMat = RGenos, DGenosMat = DGenos)
#calculate gene based scores
#check to see if weights are used for scores
if(weightedScores == FALSE){
#check to see if scores should be standardized (can't be weighted and standardized)
if(standardizeScores == FALSE){
IBS.gene = calcGeneScore(SingleSNPKernel = IBS.snp, standardize = FALSE, useWeights = FALSE)
Incomp.gene = calcGeneScore(SingleSNPKernel = Incomp.snp, standardize = FALSE, useWeights = FALSE)
AMS.gene = calcGeneScore(SingleSNPKernel = AMS.snp, standardize = FALSE, useWeights = FALSE)
BinMM.gene = calcGeneScore(SingleSNPKernel = BinMM.snp, standardize = FALSE, useWeights = FALSE)
} else {
IBS.gene = calcGeneScore(SingleSNPKernel = IBS.snp, standardize = TRUE, useWeights = FALSE)
Incomp.gene = calcGeneScore(SingleSNPKernel = Incomp.snp, standardize = TRUE, useWeights = FALSE)
AMS.gene = calcGeneScore(SingleSNPKernel = AMS.snp, standardize = TRUE, useWeights = FALSE)
BinMM.gene = calcGeneScore(SingleSNPKernel = BinMM.snp, standardize = TRUE, useWeights = FALSE)
}
} else {
IBS.gene = calcGeneScore(SingleSNPKernel = IBS.snp, standardize = FALSE, useWeights = TRUE, scoreWeights)
Incomp.gene = calcGeneScore(SingleSNPKernel = Incomp.snp, standardize = FALSE, useWeights = TRUE, scoreWeights)
AMS.gene = calcGeneScore(SingleSNPKernel = AMS.snp, standardize = FALSE, useWeights = TRUE, scoreWeights)
BinMM.gene = calcGeneScore(SingleSNPKernel = BinMM.snp, standardize = FALSE, useWeights = TRUE, scoreWeights)
}
#generate covariates
#for now, a single binary and a single continous covariate
CovData = GenCovData(SampleSize = numPairs, BinaryValues = 1, ContinuousValues = 1)
#generate phenotypes, both continuous and binary
CatPhenos = GenNullPhenos(SampleSize = numPairs, includeCov = TRUE, YCat = TRUE, YPrev = YPrev, Covariates = CovData)
ContPhenos = GenNullPhenos(SampleSize = numPairs, includeCov = TRUE, YCat = FALSE, Covariates = CovData)
#Combine R geno and Scores into 4 separate datasets, size: N x (m+1)
RGeno.IBS.gene = cbind(RGenos, IBS.gene)
RGeno.Incomp.gene = cbind(RGenos, Incomp.gene)
RGeno.AMS.gene = cbind(RGenos, AMS.gene)
RGeno.BinMM.gene = cbind(RGenos, BinMM.gene)
## Generate SKAT Null Models
# formulas will be Y ~ covariates for continuous and dichotomous Y
obj_dich=SKAT_Null_Model(CatPhenos~CovData, out_type="D")
obj_cont=SKAT_Null_Model(ContPhenos~CovData, out_type="C")
#Perform SKAT for all 8 combos of score and cont/dich outcome
#unweighted SKAT
if(length(kernelWeights) == 0){
Stat_IBS_CatPhenos = SKAT(RGeno.IBS.gene, obj_dich, kernel = kernel, is_check_genotype = FALSE)
Stat_Incomp_CatPhenos = SKAT(RGeno.Incomp.gene, obj_dich, kernel = kernel, is_check_genotype = FALSE)
Stat_AMS_CatPhenos = SKAT(RGeno.AMS.gene, obj_dich, kernel = kernel, is_check_genotype = FALSE)
Stat_BinMM_CatPhenos = SKAT(RGeno.BinMM.gene, obj_dich, kernel = kernel, is_check_genotype = FALSE)
Stat_IBS_ContPhenos = SKAT(RGeno.IBS.gene, obj_cont, kernel = kernel, is_check_genotype = FALSE)
Stat_Incomp_ContPhenos = SKAT(RGeno.Incomp.gene, obj_cont, kernel = kernel, is_check_genotype = FALSE)
Stat_AMS_ContPhenos = SKAT(RGeno.AMS.gene, obj_cont, kernel = kernel, is_check_genotype = FALSE)
Stat_BinMM_ContPhenos = SKAT(RGeno.BinMM.gene, obj_cont, kernel = kernel, is_check_genotype = FALSE)
} else {#weighted SKAT
Stat_IBS_CatPhenos = SKAT(RGeno.IBS.gene, obj_dich, kernel = kernel, weights = kernelWeights, is_check_genotype = FALSE)
Stat_Incomp_CatPhenos = SKAT(RGeno.Incomp.gene, obj_dich, kernel = kernel, weights = kernelWeights, is_check_genotype = FALSE)
Stat_AMS_CatPhenos = SKAT(RGeno.AMS.gene, obj_dich, kernel = kernel, weights = kernelWeights, is_check_genotype = FALSE)
Stat_BinMM_CatPhenos = SKAT(RGeno.BinMM.gene, obj_dich, kernel = kernel, weights = kernelWeights, is_check_genotype = FALSE)
Stat_IBS_ContPhenos = SKAT(RGeno.IBS.gene, obj_cont, kernel = kernel, weights = kernelWeights, is_check_genotype = FALSE)
Stat_Incomp_ContPhenos = SKAT(RGeno.Incomp.gene, obj_cont, kernel = kernel, weights = kernelWeights, is_check_genotype = FALSE)
Stat_AMS_ContPhenos = SKAT(RGeno.AMS.gene, obj_cont, kernel = kernel, weights = kernelWeights, is_check_genotype = FALSE)
Stat_BinMM_ContPhenos = SKAT(RGeno.BinMM.gene, obj_cont, kernel = kernel, weights = kernelWeights, is_check_genotype = FALSE)
}
#fill columns in order
##Binary Phenos first, then cont
##IBS, Incomp, AMS, Bin MM
IBSCat = c(Stat_IBS_CatPhenos$Q, Stat_IBS_CatPhenos$p.value)
IncompCat = c(Stat_Incomp_CatPhenos$Q, Stat_Incomp_CatPhenos$p.value)
AMSCat = c(Stat_AMS_CatPhenos$Q, Stat_AMS_CatPhenos$p.value)
BinMMCat = c(Stat_BinMM_CatPhenos$Q, Stat_BinMM_CatPhenos$p.value)
##Cont Phenos
##IBS Cont, Incomp Cont, AMS Cont, Bin MM Cont
IBSCont = c(Stat_IBS_ContPhenos$Q, Stat_IBS_ContPhenos$p.value)
IncompCont = c(Stat_Incomp_ContPhenos$Q, Stat_Incomp_ContPhenos$p.value)
AMSCont = c(Stat_AMS_ContPhenos$Q, Stat_AMS_ContPhenos$p.value)
BinMMCont = c(Stat_BinMM_ContPhenos$Q, Stat_BinMM_ContPhenos$p.value)
IBSCat.mat = as.matrix(IBSCat)
IncompCat.mat = as.matrix(IncompCat)
AMSCat.mat = as.matrix(AMSCat)
BinMMCat.mat = as.matrix(BinMMCat)
IBSCont.mat = as.matrix(IBSCont)
IncompCont.mat = as.matrix(IncompCont)
AMSCont.mat = as.matrix(AMSCont)
BinMMCont.mat = as.matrix(BinMMCont)
statsAndPVals = cbind(t(IBSCat.mat),t(IncompCat.mat),t(AMSCat.mat),t(BinMMCat.mat),t(IBSCont.mat),t(IncompCont.mat),t(AMSCont.mat),t(BinMMCont.mat))
print(paste0("Simulation ",ii," is complete."))
statsAndPVals
}, mc.cores=4)
statsAndPValsAll = matrix(unlist(statsAndPVals),nrow = numSims, ncol = 16, byrow = TRUE)
#separate into cat and cont phenos
statsAndPValsCatPhenos = statsAndPValsAll[,1:8]
statsAndPValsContPhenos = statsAndPValsAll[,9:16]
#write out the Stats and p values
if(weightedScores == FALSE){
if(standardizeScores == FALSE){
write.csv(statsAndPValsCatPhenos, file = paste0(path,"/TIE_mPlus1_CatPhenos_Prev",YPrev*100,"_SKAT_kernel",kernel,"_StatsAndPValues.csv"))
write.csv(statsAndPValsContPhenos, file = paste0(path,"/TIE_mPlus1_ContPhenos_SKAT_kernel",kernel,"_StatsAndPValues.csv"))
} else { #scores are standardized
write.csv(statsAndPValsCatPhenos, file = paste0(path,"/TIE_mPlus1_CatPhenos_Prev",YPrev*100,"_SKAT_kernel",kernel,"_StandardizedScores_StatsAndPValues.csv"))
write.csv(statsAndPValsContPhenos, file = paste0(path,"/TIE_mPlus1_ContPhenos_SKAT_kernel",kernel,"_StandardizedScores_StatsAndPValues.csv"))
}
} else { #scores are weighted
write.csv(statsAndPValsCatPhenos, file = paste0(path,"/TIE_mPlus1_CatPhenos_Prev",YPrev*100,"_SKAT_kernel",kernel,"_WeightedScores_StatsAndPValues.csv"))
write.csv(statsAndPValsContPhenos, file = paste0(path,"/TIE_mPlus1_ContPhenos_SKAT_kernel",kernel,"_WeightedScores_StatsAndPValues.csv"))
}
#calculate the type I errors for each combination of score and r geno
#first column for cat phenos, second for cont phenos
#rows go IBS, Incomp, AMS, Bin MM
ties = matrix(nrow = 4, ncol = 2)
#pull only the pvalues
PValsCatPhenos = statsAndPValsCatPhenos[,c(2,4,6,8)]
PValsContPhenos = statsAndPValsContPhenos[,c(2,4,6,8)]
#calc type I error
for(jj in 1:4){
ties[jj,1] = sum(PValsCatPhenos[,jj] <= 0.05)/numSims
ties[jj,2] = sum(PValsContPhenos[,jj] <= 0.05)/numSims
}
#write out type I error results
if(weightedScores == FALSE){
if(standardizeScores == FALSE){
write.csv(ties, file = paste0(path,"/TIE_Results_mPlus1_SKAT_kernel",kernel,"_CatAndContPhenos_Prev",YPrev*100,".csv"))
} else { #scores are standardized
write.csv(ties, file = paste0(path,"/TIE_Results_mPlus1_SKAT_kernel",kernel,"_StandardizedScores_CatAndContPhenos_Prev",YPrev*100,".csv"))
}
} else { #scores are weighted
write.csv(ties, file = paste0(path,"/TIE_Results_mPlus1_SKAT_kernel",kernel,"_WeightedScores_CatAndContPhenos_Prev",YPrev*100,".csv"))
}
}
#Power Pipeline with SKAT for one of the Scores being true
# N x (m+1) instead of N x 2m
RunPowerPipelineSKAT_Scores = function(chr, gene, numPairs, YPrev, kernel, kernelWeights=c(), Gamma, TrueScore, ORSize, standardizeScores = FALSE, weightedScores = FALSE, scoreWeights, percentageAssoc, LowLD){
#function to run whole power pipeline , Calculates SKAT stat and pvalue for comparison
#Inputs:
#chr = chromosome number
#gene = gene name, in quotes
#numPairs = number of D/R pairs
#YPrev = prevalence of binary outcome Y
#kernel = which kernel to use for SKAT (linear, linear.weighted, IBS, IBS.weighted)
#kernelWeights = numeric vector of weights for weighted kernels
#Gamma = effect size for score, length 1, can be 0
#TrueScore = IBS.gene, Incomp.gene, AMS.gene, or BinMM.gene
#ORSize = Small, Medium, or Large for what OR was used for the associated SNP/score
#standardizeScores = T or F whether the scores should be standardized based on maximum score value
#weightedScores = T or F whether the scores will be weighted
#scoreWeights = m x 1 vector of weights, one weight for each SNP
#percentageAssoc = percentage of SNPs associated with outcome (either 5, 25, 50, 75, or 100)
#LowLD = TRUE or FALSE whether the associated SNPs are in low LD or high LD
#### This value shows which score is potentially being used to create phenotypes
#Outputs:
#No direct outputs, writes scores and pvalues to csv files
#also writes power values to csv files
#load SKAT library
library(SKAT)
library(parallel)
#need to define this for naming at the end
snpOrScore = TrueScore
#number of sims always the same
numSims = 5000
#define path to data
#for HapGen generated data
path = paste0("/home/vlynn/Paper_II_Sims/HapGen_Files/",gene,"_Results_",numPairs,"Pairs")
#source the needed functions
source("/home/vlynn/Paper_II_Sims/HapGen_Files/Scripts/ProjectIISourceFunctions_v2.R")
#turn output matrices into output lists
myList = lapply(1:numSims, rep, times = 1)
statsAndPValsMat = matrix(NA,ncol=16)
statsAndPValsMat = mclapply(myList, function(ii){
statsAndPValsMat = matrix(NA,ncol=16)
#pull recipient and donor genotypes
RGenos = obtainRGenotypes(chr = chr, numSamples = numPairs, simNum = ii, gene = gene, path = path)
DGenos = obtainDGenotypes(chr = chr, numSamples = numPairs, simNum = ii, gene = gene, path = path)
#calculate single snp scores
IBS.snp = calcIBSMismatch(RGenosMat = RGenos, DGenosMat = DGenos)
Incomp.snp = calcIncompatibilityScore(RGenosMat = RGenos, DGenosMat = DGenos)
AMS.snp = calcAMS(RGenosMat = RGenos, DGenosMat = DGenos)
BinMM.snp = calcBinaryMM(RGenosMat = RGenos, DGenosMat = DGenos)
#calc gene based score for all SNPs
IBS.gene = calcGeneScore(SingleSNPKernel = IBS.snp, standardize = standardizeScores, useWeights = FALSE)
Incomp.gene = calcGeneScore(SingleSNPKernel = Incomp.snp, standardize = standardizeScores, useWeights = FALSE)
AMS.gene = calcGeneScore(SingleSNPKernel = AMS.snp, standardize = standardizeScores, useWeights = FALSE)
BinMM.gene = calcGeneScore(SingleSNPKernel = BinMM.snp, standardize = standardizeScores, useWeights = FALSE)
#also need to calculate gene based scores if not all SNPs are associated
IBS.gene.PercentOfSNPs = calcGeneScorePercentOfSNPs(SingleSNPKernel = IBS.snp, gene = gene, percentageAssoc = percentageAssoc, LowLD = LowLD, standardize = FALSE, useWeights = FALSE)
Incomp.gene.PercentOfSNPs = calcGeneScorePercentOfSNPs(SingleSNPKernel = Incomp.snp, gene = gene, percentageAssoc = percentageAssoc, LowLD = LowLD, standardize = FALSE, useWeights = FALSE)
AMS.gene.PercentOfSNPs = calcGeneScorePercentOfSNPs(SingleSNPKernel = AMS.snp, gene = gene, percentageAssoc = percentageAssoc, LowLD = LowLD, standardize = FALSE, useWeights = FALSE)
BinMM.gene.PercentOfSNPs = calcGeneScorePercentOfSNPs(SingleSNPKernel = BinMM.snp, gene = gene, percentageAssoc = percentageAssoc, LowLD = LowLD, standardize = FALSE, useWeights = FALSE)
#need to use TrueScore to pull gene based scores matrix for generating phenotypes
if(TrueScore == "IBS.gene"){
PhenoScore = IBS.gene.PercentOfSNPs
} else if(TrueScore == "Incomp.gene"){
PhenoScore = Incomp.gene.PercentOfSNPs
} else if(TrueScore == "AMS.gene"){
PhenoScore = AMS.gene.PercentOfSNPs
} else {
PhenoScore = BinMM.gene.PercentOfSNPs
}
#generate covariates
#for now, a single binary and a single continous covariate
CovData = GenCovData(SampleSize = numPairs, BinaryValues = 1, ContinuousValues = 1)
#need to define null Betas for phenotype generation
nSNP = ncol(RGenos) #this should be the number of SNPs
Betas = rep(0,nSNP) #generate null beta values
Betas = as.matrix(Betas, ncol = 1)
#generate phenotypes, both continuous and binary
#Based on single true score
CatPhenos = GenAltPhenos(SampleSize = numPairs, includeCov = TRUE, YCat = TRUE, YPrev = YPrev, Covariates = CovData, RGenoData = RGenos, ScoreData = PhenoScore, Betas = Betas, Gamma = Gamma)
ContPhenos = GenAltPhenos(SampleSize = numPairs, includeCov = TRUE, YCat = FALSE, Covariates = CovData, RGenoData = RGenos, ScoreData = PhenoScore, Betas = Betas, Gamma = Gamma)
#Combine R geno and Scores into 4 separate datasets, size: N x (m+1)
RGeno.IBS.snp = cbind(RGenos, IBS.gene)
RGeno.Incomp.snp = cbind(RGenos, Incomp.gene)
RGeno.AMS.snp = cbind(RGenos, AMS.gene)
RGeno.BinMM.snp = cbind(RGenos, BinMM.gene)
## Generate SKAT Null Models
# formulas will be Y ~ covariates for continuous and dichotomous Y
obj_dich=SKAT_Null_Model(CatPhenos~CovData, out_type="D", Adjustment = FALSE)
obj_cont=SKAT_Null_Model(ContPhenos~CovData, out_type="C", Adjustment = FALSE)
#Perform SKAT for all 8 combos of score and cont/dich outcome
#unweighted SKAT
if(length(kernelWeights) == 0){
Stat_IBS_CatPhenos = SKAT(RGeno.IBS.snp, obj_dich, kernel = kernel, is_check_genotype = FALSE)
Stat_Incomp_CatPhenos = SKAT(RGeno.Incomp.snp, obj_dich, kernel = kernel, is_check_genotype = FALSE)
Stat_AMS_CatPhenos = SKAT(RGeno.AMS.snp, obj_dich, kernel = kernel, is_check_genotype = FALSE)
Stat_BinMM_CatPhenos = SKAT(RGeno.BinMM.snp, obj_dich, kernel = kernel, is_check_genotype = FALSE)
Stat_IBS_ContPhenos = SKAT(RGeno.IBS.snp, obj_cont, kernel = kernel, is_check_genotype = FALSE)
Stat_Incomp_ContPhenos = SKAT(RGeno.Incomp.snp, obj_cont, kernel = kernel, is_check_genotype = FALSE)
Stat_AMS_ContPhenos = SKAT(RGeno.AMS.snp, obj_cont, kernel = kernel, is_check_genotype = FALSE)
Stat_BinMM_ContPhenos = SKAT(RGeno.BinMM.snp, obj_cont, kernel = kernel, is_check_genotype = FALSE)
} else {
Stat_IBS_CatPhenos = SKAT(RGeno.IBS.snp, obj_dich, kernel = kernel, weights = kernelWeights, is_check_genotype = FALSE)
Stat_Incomp_CatPhenos = SKAT(RGeno.Incomp.snp, obj_dich, kernel = kernel, weights = kernelWeights, is_check_genotype = FALSE)
Stat_AMS_CatPhenos = SKAT(RGeno.AMS.snp, obj_dich, kernel = kernel, weights = kernelWeights, is_check_genotype = FALSE)
Stat_BinMM_CatPhenos = SKAT(RGeno.BinMM.snp, obj_dich, kernel = kernel, weights = kernelWeights, is_check_genotype = FALSE)
Stat_IBS_ContPhenos = SKAT(RGeno.IBS.snp, obj_cont, kernel = kernel, weights = kernelWeights, is_check_genotype = FALSE)
Stat_Incomp_ContPhenos = SKAT(RGeno.Incomp.snp, obj_cont, kernel = kernel, weights = kernelWeights, is_check_genotype = FALSE)
Stat_AMS_ContPhenos = SKAT(RGeno.AMS.snp, obj_cont, kernel = kernel, weights = kernelWeights, is_check_genotype = FALSE)
Stat_BinMM_ContPhenos = SKAT(RGeno.BinMM.snp, obj_cont, kernel = kernel, weights = kernelWeights, is_check_genotype = FALSE)
}
#fill columns in order
##Binary Phenos
##IBS Cat, Incomp Cat, AMS Cat, Bin MM Cat,
IBSCat = c(Stat_IBS_CatPhenos$Q, Stat_IBS_CatPhenos$p.value)
IncompCat = c(Stat_Incomp_CatPhenos$Q, Stat_Incomp_CatPhenos$p.value)
AMSCat = c(Stat_AMS_CatPhenos$Q, Stat_AMS_CatPhenos$p.value)
BinMMCat = c(Stat_BinMM_CatPhenos$Q, Stat_BinMM_CatPhenos$p.value)
##Cont Phenos
##IBS Cont, Incomp Cont, AMS Cont, Bin MM Cont
IBSCont = c(Stat_IBS_ContPhenos$Q, Stat_IBS_ContPhenos$p.value)
IncompCont = c(Stat_Incomp_ContPhenos$Q, Stat_Incomp_ContPhenos$p.value)
AMSCont = c(Stat_AMS_ContPhenos$Q, Stat_AMS_ContPhenos$p.value)
BinMMCont = c(Stat_BinMM_ContPhenos$Q, Stat_BinMM_ContPhenos$p.value)
statsAndPValsMat = cbind(IBSCat,IncompCat,AMSCat,BinMMCat,IBSCont,IncompCont,AMSCont,BinMMCont)
print(paste0("Simulation ",ii," is complete."))
statsAndPValsMat
}, mc.cores = 4)
statsAndPValsAll = matrix(unlist(statsAndPValsMat),nrow = numSims, ncol = 16, byrow = TRUE)
#separate into cat and cont phenos
statsAndPValsCatPhenos = statsAndPValsAll[,1:8]
statsAndPValsContPhenos = statsAndPValsAll[,9:16]
#define values for naming conventions
if(LowLD == TRUE){
ld = "LowLD"
} else {
ld = "HighLD"
}
if(weightedScores == FALSE){
weighted = ""
} else{
weighted = "WeightedScores"
}
if(standardizeScores == FALSE){
standardized = ""
} else {
standardized = "StandardizedScores"
}
#write out the Stats and p values
write.csv(statsAndPValsCatPhenos, file = paste0(path,"/Power_CatPhenos_",weighted,"_",standardized,"_Prev",YPrev*100,"_mPlus1_SKAT_kernel",kernel,"_StatsAndPValues_",percentageAssoc,"SNPsAssociated_",ld,"_TrueScore",TrueScore,"_",ORSize,"OR_assocSNPOrScore",snpOrScore,".csv"))
write.csv(statsAndPValsContPhenos, file = paste0(path,"/Power_ContPhenos_",weighted,"_",standardized,"_mPlus1_SKAT_kernel",kernel,"_StatsAndPValues_",percentageAssoc,"SNPsAssociated_",ld,"_TrueScore",TrueScore,"_",ORSize,"OR_assocSNPOrScore",snpOrScore,".csv"))
#calculate the power for each combination of score and r geno
#first column for cat phenos, second for cont phenos
#rows go IBS, Incomp, AMS, Bin MM
power = matrix(nrow = 4, ncol = 2)
#pull only the pvalues
PValsCatPhenos = statsAndPValsCatPhenos[,c(2,4,6,8)]
PValsContPhenos = statsAndPValsContPhenos[,c(2,4,6,8)]
#calc power
for(jj in 1:4){
power[jj,1] = sum(PValsCatPhenos[,jj] <= 0.05)/numSims
power[jj,2] = sum(PValsContPhenos[,jj] <= 0.05)/numSims
}
#write out power results
write.csv(power, file = paste0(path,"/Power_Results_",percentageAssoc,"SNPsAssociated_",ld,"_",weighted,"_",standardized,"_mPlus1_SKAT_kernel",kernel,"_CatAndContPhenos_Prev",YPrev*100,"_TrueScore",TrueScore,"_",ORSize,"OR_assocSNPOrScore",snpOrScore,".csv"))
}
#Power Pipeline with SKAT for one of the RSNPs being true
# N x (m+1) instead of N x 2m
RunPowerPipelineSKAT_RSNPs = function(chr, gene, numPairs, YPrev, kernel, kernelWeights=c(), Gamma, TrueScore, ORSize, standardizeScores = FALSE, weightedScores = FALSE, scoreWeights, percentageAssoc, LowLD){
#function to run whole power pipeline , Calculates SKAT stat and pvalue for comparison
#Inputs:
#chr = chromosome number
#gene = gene name, in quotes
#numPairs = number of D/R pairs
#YPrev = prevalence of binary outcome Y
#kernel = which kernel to use for SKAT (linear, linear.weighted, IBS, IBS.weighted)
#kernelWeights = numeric vector of weights for weighted kernels
#Gamma = effect size for score, length 1, can be 0
#TrueScore = IBS.gene, Incomp.gene, AMS.gene, or BinMM.gene
#ORSize = Small, Medium, or Large for what OR was used for the associated SNP/score
#standardizeScores = T or F whether the scores should be standardized based on maximum score value
#weightedScores = T or F whether the scores will be weighted
#scoreWeights = m x 1 vector of weights, one weight for each SNP
#percentageAssoc = percentage of SNPs associated with outcome (either 5, 25, 50, 75, or 100)
#LowLD = TRUE or FALSE whether the associated SNPs are in low LD or high LD
#### This value shows which score is potentially being used to create phenotypes
#Outputs:
#No direct outputs, writes scores and pvalues to csv files
#also writes power values to csv files
#load libraries
library(SKAT)
library(parallel)
#need to define this for naming at the end
snpOrScore = "RSNP"
#define effect based on OR size
if(ORSize == "small"){
effect = 0.14
} else if(ORSize == "medium"){
effect = 0.41
} else {
effect = 0.69
}
#number of sims always the same
numSims = 5000
#define path to data
#for HapGen generated data
path = paste0("/home/vlynn/Paper_II_Sims/HapGen_Files/",gene,"_Results_",numPairs,"Pairs")
#source the needed functions
source("/home/vlynn/Paper_II_Sims/HapGen_Files/Scripts/ProjectIISourceFunctions.R")
#determine which SNPs to actually set as assoc. based on gene
assocSNPs = DetermineAssocRSNPs(gene = gene, LowLD = LowLD, percentageAssoc = percentageAssoc)
#turn output matrices into output lists
myList = lapply(1:numSims, rep, times = 1)
statsAndPValsMat = matrix(NA,ncol=16)
statsAndPValsMat = mclapply(myList, function(ii){
statsAndPValsMat = matrix(NA,ncol=16)
#pull recipient and donor genotypes
RGenos = obtainRGenotypes(chr = chr, numSamples = numPairs, simNum = ii, gene = gene, path = path)
DGenos = obtainDGenotypes(chr = chr, numSamples = numPairs, simNum = ii, gene = gene, path = path)
#calculate single snp scores
IBS.snp = calcIBSMismatch(RGenosMat = RGenos, DGenosMat = DGenos)
Incomp.snp = calcIncompatibilityScore(RGenosMat = RGenos, DGenosMat = DGenos)
AMS.snp = calcAMS(RGenosMat = RGenos, DGenosMat = DGenos)
BinMM.snp = calcBinaryMM(RGenosMat = RGenos, DGenosMat = DGenos)
#calc gene based score for all SNPs
IBS.gene = calcGeneScore(SingleSNPKernel = IBS.snp, standardize = standardizeScores, useWeights = FALSE)
Incomp.gene = calcGeneScore(SingleSNPKernel = Incomp.snp, standardize = standardizeScores, useWeights = FALSE)
AMS.gene = calcGeneScore(SingleSNPKernel = AMS.snp, standardize = standardizeScores, useWeights = FALSE)
BinMM.gene = calcGeneScore(SingleSNPKernel = BinMM.snp, standardize = standardizeScores, useWeights = FALSE)
#need to use TrueScore to pull gene based scores matrix for generating phenotypes
if(TrueScore == "IBS.gene"){
PhenoScore = IBS.gene
} else if(TrueScore == "Incomp.gene"){
PhenoScore = Incomp.gene
} else if(TrueScore == "AMS.gene"){
PhenoScore = AMS.gene
} else {
PhenoScore = BinMM.gene
}
#generate covariates
#for now, a single binary and a single continous covariate
CovData = GenCovData(SampleSize = numPairs, BinaryValues = 1, ContinuousValues = 1)
#need to define null Betas for phenotype generation
nSNP = ncol(RGenos) #this should be the number of SNPs
nullBetas = rep(0,nSNP) #generate null beta values
Betas = nullBetas
#set assoc Betas
#all betas have same effect for now
for(jj in assocSNPs){
Betas[jj] = effect
}
Betas = as.matrix(Betas, ncol = 1)
#generate phenotypes, both continuous and binary
#Based on single true score
CatPhenos = GenAltPhenos(SampleSize = numPairs, includeCov = TRUE, YCat = TRUE, YPrev = YPrev, Covariates = CovData, RGenoData = RGenos, ScoreData = PhenoScore, Betas = Betas, Gamma = Gamma)
ContPhenos = GenAltPhenos(SampleSize = numPairs, includeCov = TRUE, YCat = FALSE, Covariates = CovData, RGenoData = RGenos, ScoreData = PhenoScore, Betas = Betas, Gamma = Gamma)
#Combine R geno and Scores into 4 separate datasets, size: N x (m+1)
RGeno.IBS.snp = cbind(RGenos, IBS.gene)
RGeno.Incomp.snp = cbind(RGenos, Incomp.gene)
RGeno.AMS.snp = cbind(RGenos, AMS.gene)
RGeno.BinMM.snp = cbind(RGenos, BinMM.gene)
## Generate SKAT Null Models
# formulas will be Y ~ covariates for continuous and dichotomous Y
obj_dich=SKAT_Null_Model(CatPhenos~CovData, out_type="D", Adjustment = FALSE)
obj_cont=SKAT_Null_Model(ContPhenos~CovData, out_type="C", Adjustment = FALSE)
#Perform SKAT for all 8 combos of score and cont/dich outcome
#unweighted SKAT
if(length(kernelWeights) == 0){
Stat_IBS_CatPhenos = SKAT(RGeno.IBS.snp, obj_dich, kernel = kernel, is_check_genotype = FALSE)
Stat_Incomp_CatPhenos = SKAT(RGeno.Incomp.snp, obj_dich, kernel = kernel, is_check_genotype = FALSE)
Stat_AMS_CatPhenos = SKAT(RGeno.AMS.snp, obj_dich, kernel = kernel, is_check_genotype = FALSE)
Stat_BinMM_CatPhenos = SKAT(RGeno.BinMM.snp, obj_dich, kernel = kernel, is_check_genotype = FALSE)
Stat_IBS_ContPhenos = SKAT(RGeno.IBS.snp, obj_cont, kernel = kernel, is_check_genotype = FALSE)
Stat_Incomp_ContPhenos = SKAT(RGeno.Incomp.snp, obj_cont, kernel = kernel, is_check_genotype = FALSE)
Stat_AMS_ContPhenos = SKAT(RGeno.AMS.snp, obj_cont, kernel = kernel, is_check_genotype = FALSE)
Stat_BinMM_ContPhenos = SKAT(RGeno.BinMM.snp, obj_cont, kernel = kernel, is_check_genotype = FALSE)
} else {
Stat_IBS_CatPhenos = SKAT(RGeno.IBS.snp, obj_dich, kernel = kernel, weights = kernelWeights, is_check_genotype = FALSE)
Stat_Incomp_CatPhenos = SKAT(RGeno.Incomp.snp, obj_dich, kernel = kernel, weights = kernelWeights, is_check_genotype = FALSE)
Stat_AMS_CatPhenos = SKAT(RGeno.AMS.snp, obj_dich, kernel = kernel, weights = kernelWeights, is_check_genotype = FALSE)
Stat_BinMM_CatPhenos = SKAT(RGeno.BinMM.snp, obj_dich, kernel = kernel, weights = kernelWeights, is_check_genotype = FALSE)
Stat_IBS_ContPhenos = SKAT(RGeno.IBS.snp, obj_cont, kernel = kernel, weights = kernelWeights, is_check_genotype = FALSE)
Stat_Incomp_ContPhenos = SKAT(RGeno.Incomp.snp, obj_cont, kernel = kernel, weights = kernelWeights, is_check_genotype = FALSE)
Stat_AMS_ContPhenos = SKAT(RGeno.AMS.snp, obj_cont, kernel = kernel, weights = kernelWeights, is_check_genotype = FALSE)
Stat_BinMM_ContPhenos = SKAT(RGeno.BinMM.snp, obj_cont, kernel = kernel, weights = kernelWeights, is_check_genotype = FALSE)
}
#fill columns in order
##Binary Phenos
##IBS Cat, Incomp Cat, AMS Cat, Bin MM Cat,
IBSCat = c(Stat_IBS_CatPhenos$Q, Stat_IBS_CatPhenos$p.value)
IncompCat = c(Stat_Incomp_CatPhenos$Q, Stat_Incomp_CatPhenos$p.value)
AMSCat = c(Stat_AMS_CatPhenos$Q, Stat_AMS_CatPhenos$p.value)
BinMMCat = c(Stat_BinMM_CatPhenos$Q, Stat_BinMM_CatPhenos$p.value)
##Cont Phenos
##IBS Cont, Incomp Cont, AMS Cont, Bin MM Cont
IBSCont = c(Stat_IBS_ContPhenos$Q, Stat_IBS_ContPhenos$p.value)
IncompCont = c(Stat_Incomp_ContPhenos$Q, Stat_Incomp_ContPhenos$p.value)
AMSCont = c(Stat_AMS_ContPhenos$Q, Stat_AMS_ContPhenos$p.value)
BinMMCont = c(Stat_BinMM_ContPhenos$Q, Stat_BinMM_ContPhenos$p.value)
statsAndPValsMat = cbind(IBSCat,IncompCat,AMSCat,BinMMCat,IBSCont,IncompCont,AMSCont,BinMMCont)
print(paste0("Simulation ",ii," is complete."))
#reset Betas
Betas = nullBetas
statsAndPValsMat
}, mc.cores = 4)
statsAndPValsAll = matrix(unlist(statsAndPValsMat),nrow = numSims, ncol = 16, byrow = TRUE)
#separate into cat and cont phenos
statsAndPValsCatPhenos = statsAndPValsAll[,1:8]
statsAndPValsContPhenos = statsAndPValsAll[,9:16]
#define values for naming conventions
if(LowLD == TRUE){
ld = "LowLD"
} else {
ld = "HighLD"
}
if(weightedScores == FALSE){
weighted = ""
} else{
weighted = "WeightedScores"
}
if(standardizeScores == FALSE){
standardized = ""
} else {
standardized = "StandardizedScores"
}
#write out the Stats and p values
write.csv(statsAndPValsCatPhenos, file = paste0(path,"/Power_CatPhenos_",weighted,"_",standardized,"_Prev",YPrev*100,"_mPlus1_SKAT_kernel",kernel,"_StatsAndPValues_",percentageAssoc,"SNPsAssociated_",ld,"_TrueScore",TrueScore,"_",ORSize,"OR_assocSNPOrScore",snpOrScore,".csv"))
write.csv(statsAndPValsContPhenos, file = paste0(path,"/Power_ContPhenos_",weighted,"_",standardized,"_mPlus1_SKAT_kernel",kernel,"_StatsAndPValues_",percentageAssoc,"SNPsAssociated_",ld,"_TrueScore",TrueScore,"_",ORSize,"OR_assocSNPOrScore",snpOrScore,".csv"))
#calculate the power for each combination of score and r geno
#first column for cat phenos, second for cont phenos
#rows go IBS, Incomp, AMS, Bin MM
power = matrix(nrow = 4, ncol = 2)
#pull only the pvalues
PValsCatPhenos = statsAndPValsCatPhenos[,c(2,4,6,8)]
PValsContPhenos = statsAndPValsContPhenos[,c(2,4,6,8)]
#calc power
for(jj in 1:4){
power[jj,1] = sum(PValsCatPhenos[,jj] <= 0.05)/numSims
power[jj,2] = sum(PValsContPhenos[,jj] <= 0.05)/numSims
}
#write out power results
write.csv(power, file = paste0(path,"/Power_Results_",percentageAssoc,"SNPsAssociated_",ld,"_",weighted,"_",standardized,"_mPlus1_SKAT_kernel",kernel,"_CatAndContPhenos_Prev",YPrev*100,"_TrueScore",TrueScore,"_",ORSize,"OR_assocSNPOrScore",snpOrScore,".csv"))
}
#########################################################################################################################################################
## These two functions were for testing whether standardizing the gene score to between 0-1 would improve power for the SKAT with linear kernel
RunPowerPipelineSKAT_RSNPs_standardized = function(chr, gene, numPairs, YPrev, kernel, kernelWeights=c(), Gamma, TrueScore, ORSize, standardizeScores = FALSE, weightedScores = FALSE, scoreWeights, percentageAssoc, LowLD){
#function to run whole power pipeline , Calculates SKAT stat and pvalue for comparison
#Standardizes the gene score to between 0-1
#Inputs:
#chr = chromosome number
#gene = gene name, in quotes
#numPairs = number of D/R pairs
#YPrev = prevalence of binary outcome Y
#kernel = which kernel to use for SKAT (linear, linear.weighted, IBS, IBS.weighted)
#kernelWeights = numeric vector of weights for weighted kernels
#Gamma = effect size for score, length 1, can be 0
#TrueScore = IBS.gene, Incomp.gene, AMS.gene, or BinMM.gene
#ORSize = Small, Medium, or Large for what OR was used for the associated SNP/score
#standardizeScores = T or F whether the scores should be standardized based on maximum score value
#weightedScores = T or F whether the scores will be weighted
#scoreWeights = m x 1 vector of weights, one weight for each SNP
#percentageAssoc = percentage of SNPs associated with outcome (either 5, 25, 50, 75, or 100)
#LowLD = TRUE or FALSE whether the associated SNPs are in low LD or high LD
#### This value shows which score is potentially being used to create phenotypes
#Outputs:
#No direct outputs, writes scores and pvalues to csv files
#also writes power values to csv files
#load libraries
library(SKAT)
library(parallel)
#need to define this for naming at the end
snpOrScore = "RSNP"
#define effect based on OR size
if(ORSize == "small"){
effect = 0.14
} else if(ORSize == "medium"){
effect = 0.41
} else {
effect = 0.69
}
#number of sims always the same
numSims = 5000
#define path to data
#for HapGen generated data
path = paste0("/home/vlynn/Paper_II_Sims/HapGen_Files/",gene,"_Results_",numPairs,"Pairs")
#source the needed functions
source("/home/vlynn/Paper_II_Sims/HapGen_Files/Scripts/ProjectIISourceFunctions.R")
#determine which SNPs to actually set as assoc. based on gene
assocSNPs = DetermineAssocRSNPs(gene = gene, LowLD = LowLD, percentageAssoc = percentageAssoc)
#turn output matrices into output lists
myList = lapply(1:numSims, rep, times = 1)
statsAndPValsMat = matrix(NA,ncol=16)
statsAndPValsMat = mclapply(myList, function(ii){
statsAndPValsMat = matrix(NA,ncol=16)
#pull recipient and donor genotypes
RGenos = obtainRGenotypes(chr = chr, numSamples = numPairs, simNum = ii, gene = gene, path = path)
DGenos = obtainDGenotypes(chr = chr, numSamples = numPairs, simNum = ii, gene = gene, path = path)
#calculate single snp scores
IBS.snp = calcIBSMismatch(RGenosMat = RGenos, DGenosMat = DGenos)
Incomp.snp = calcIncompatibilityScore(RGenosMat = RGenos, DGenosMat = DGenos)
AMS.snp = calcAMS(RGenosMat = RGenos, DGenosMat = DGenos)
BinMM.snp = calcBinaryMM(RGenosMat = RGenos, DGenosMat = DGenos)
#calc gene based score for all SNPs
IBS.gene = calcGeneScore(SingleSNPKernel = IBS.snp, standardize = FALSE, useWeights = FALSE)
Incomp.gene = calcGeneScore(SingleSNPKernel = Incomp.snp, standardize = FALSE, useWeights = FALSE)
AMS.gene = calcGeneScore(SingleSNPKernel = AMS.snp, standardize = FALSE, useWeights = FALSE)
BinMM.gene = calcGeneScore(SingleSNPKernel = BinMM.snp, standardize = FALSE, useWeights = FALSE)
#need to use TrueScore to pull gene based scores matrix for generating phenotypes
if(TrueScore == "IBS.gene"){
PhenoScore = IBS.gene
} else if(TrueScore == "Incomp.gene"){
PhenoScore = Incomp.gene
} else if(TrueScore == "AMS.gene"){
PhenoScore = AMS.gene
} else {
PhenoScore = BinMM.gene
}
#generate covariates
#for now, a single binary and a single continous covariate
CovData = GenCovData(SampleSize = numPairs, BinaryValues = 1, ContinuousValues = 1)
#need to define null Betas for phenotype generation
nSNP = ncol(RGenos) #this should be the number of SNPs
nullBetas = rep(0,nSNP) #generate null beta values
Betas = nullBetas
#set assoc Betas
#all betas have same effect for now
for(jj in assocSNPs){
Betas[jj] = effect
}
Betas = as.matrix(Betas, ncol = 1)
#generate phenotypes, both continuous and binary
#Based on single true score
CatPhenos = GenAltPhenos(SampleSize = numPairs, includeCov = TRUE, YCat = TRUE, YPrev = YPrev, Covariates = CovData, RGenoData = RGenos, ScoreData = PhenoScore, Betas = Betas, Gamma = Gamma)
ContPhenos = GenAltPhenos(SampleSize = numPairs, includeCov = TRUE, YCat = FALSE, Covariates = CovData, RGenoData = RGenos, ScoreData = PhenoScore, Betas = Betas, Gamma = Gamma)
#standardize to range from 0-1
IBS.stand = IBS.gene/(nSNP*2)
Incomp.stand = Incomp.gene/(nSNP)
AMS.stand = AMS.gene/(nSNP*2)
BinMM.stand = BinMM.gene/(nSNP)
#Combine R geno and Scores into 4 separate datasets, size: N x (m+1)
RGeno.IBS.snp = cbind(RGenos, IBS.stand)
RGeno.Incomp.snp = cbind(RGenos, Incomp.stand)
RGeno.AMS.snp = cbind(RGenos, AMS.stand)
RGeno.BinMM.snp = cbind(RGenos, BinMM.stand)
## Generate SKAT Null Models
# formulas will be Y ~ covariates for continuous and dichotomous Y
obj_dich=SKAT_Null_Model(CatPhenos~CovData, out_type="D", Adjustment = FALSE)
obj_cont=SKAT_Null_Model(ContPhenos~CovData, out_type="C", Adjustment = FALSE)
#Perform SKAT for all 8 combos of score and cont/dich outcome
#unweighted SKAT
if(length(kernelWeights) == 0){
Stat_IBS_CatPhenos = SKAT(RGeno.IBS.snp, obj_dich, kernel = kernel, is_check_genotype = FALSE)
Stat_Incomp_CatPhenos = SKAT(RGeno.Incomp.snp, obj_dich, kernel = kernel, is_check_genotype = FALSE)
Stat_AMS_CatPhenos = SKAT(RGeno.AMS.snp, obj_dich, kernel = kernel, is_check_genotype = FALSE)
Stat_BinMM_CatPhenos = SKAT(RGeno.BinMM.snp, obj_dich, kernel = kernel, is_check_genotype = FALSE)
Stat_IBS_ContPhenos = SKAT(RGeno.IBS.snp, obj_cont, kernel = kernel, is_check_genotype = FALSE)
Stat_Incomp_ContPhenos = SKAT(RGeno.Incomp.snp, obj_cont, kernel = kernel, is_check_genotype = FALSE)
Stat_AMS_ContPhenos = SKAT(RGeno.AMS.snp, obj_cont, kernel = kernel, is_check_genotype = FALSE)
Stat_BinMM_ContPhenos = SKAT(RGeno.BinMM.snp, obj_cont, kernel = kernel, is_check_genotype = FALSE)
} else {
Stat_IBS_CatPhenos = SKAT(RGeno.IBS.snp, obj_dich, kernel = kernel, weights = kernelWeights, is_check_genotype = FALSE)
Stat_Incomp_CatPhenos = SKAT(RGeno.Incomp.snp, obj_dich, kernel = kernel, weights = kernelWeights, is_check_genotype = FALSE)
Stat_AMS_CatPhenos = SKAT(RGeno.AMS.snp, obj_dich, kernel = kernel, weights = kernelWeights, is_check_genotype = FALSE)
Stat_BinMM_CatPhenos = SKAT(RGeno.BinMM.snp, obj_dich, kernel = kernel, weights = kernelWeights, is_check_genotype = FALSE)
Stat_IBS_ContPhenos = SKAT(RGeno.IBS.snp, obj_cont, kernel = kernel, weights = kernelWeights, is_check_genotype = FALSE)
Stat_Incomp_ContPhenos = SKAT(RGeno.Incomp.snp, obj_cont, kernel = kernel, weights = kernelWeights, is_check_genotype = FALSE)
Stat_AMS_ContPhenos = SKAT(RGeno.AMS.snp, obj_cont, kernel = kernel, weights = kernelWeights, is_check_genotype = FALSE)
Stat_BinMM_ContPhenos = SKAT(RGeno.BinMM.snp, obj_cont, kernel = kernel, weights = kernelWeights, is_check_genotype = FALSE)
}
#fill columns in order
##Binary Phenos
##IBS Cat, Incomp Cat, AMS Cat, Bin MM Cat,
IBSCat = c(Stat_IBS_CatPhenos$Q, Stat_IBS_CatPhenos$p.value)
IncompCat = c(Stat_Incomp_CatPhenos$Q, Stat_Incomp_CatPhenos$p.value)
AMSCat = c(Stat_AMS_CatPhenos$Q, Stat_AMS_CatPhenos$p.value)
BinMMCat = c(Stat_BinMM_CatPhenos$Q, Stat_BinMM_CatPhenos$p.value)
##Cont Phenos
##IBS Cont, Incomp Cont, AMS Cont, Bin MM Cont
IBSCont = c(Stat_IBS_ContPhenos$Q, Stat_IBS_ContPhenos$p.value)
IncompCont = c(Stat_Incomp_ContPhenos$Q, Stat_Incomp_ContPhenos$p.value)
AMSCont = c(Stat_AMS_ContPhenos$Q, Stat_AMS_ContPhenos$p.value)
BinMMCont = c(Stat_BinMM_ContPhenos$Q, Stat_BinMM_ContPhenos$p.value)
statsAndPValsMat = cbind(IBSCat,IncompCat,AMSCat,BinMMCat,IBSCont,IncompCont,AMSCont,BinMMCont)
print(paste0("Simulation ",ii," is complete."))
#reset Betas
Betas = nullBetas
statsAndPValsMat
}, mc.cores = 4)
statsAndPValsAll = matrix(unlist(statsAndPValsMat),nrow = numSims, ncol = 16, byrow = TRUE)
#separate into cat and cont phenos
statsAndPValsCatPhenos = statsAndPValsAll[,1:8]
statsAndPValsContPhenos = statsAndPValsAll[,9:16]
#define values for naming conventions
if(LowLD == TRUE){
ld = "LowLD"
} else {
ld = "HighLD"
}
if(weightedScores == FALSE){
weighted = ""
} else{
weighted = "WeightedScores"
}
if(standardizeScores == FALSE){
standardized = ""
} else {
standardized = "StandardizedScores"
}
#write out the Stats and p values
write.csv(statsAndPValsCatPhenos, file = paste0(path,"/Power_CatPhenos_",weighted,"_",standardized,"_Prev",YPrev*100,"_mPlus1_SKAT_kernel",kernel,"standardized_StatsAndPValues_",percentageAssoc,"SNPsAssociated_",ld,"_TrueScore",TrueScore,"_",ORSize,"OR_assocSNPOrScore",snpOrScore,".csv"))
write.csv(statsAndPValsContPhenos, file = paste0(path,"/Power_ContPhenos_",weighted,"_",standardized,"_mPlus1_SKAT_kernel",kernel,"standardized_StatsAndPValues_",percentageAssoc,"SNPsAssociated_",ld,"_TrueScore",TrueScore,"_",ORSize,"OR_assocSNPOrScore",snpOrScore,".csv"))
#calculate the power for each combination of score and r geno
#first column for cat phenos, second for cont phenos
#rows go IBS, Incomp, AMS, Bin MM
power = matrix(nrow = 4, ncol = 2)
#pull only the pvalues
PValsCatPhenos = statsAndPValsCatPhenos[,c(2,4,6,8)]
PValsContPhenos = statsAndPValsContPhenos[,c(2,4,6,8)]
#calc power
for(jj in 1:4){
power[jj,1] = sum(PValsCatPhenos[,jj] <= 0.05)/numSims
power[jj,2] = sum(PValsContPhenos[,jj] <= 0.05)/numSims
}
#write out power results
write.csv(power, file = paste0(path,"/Power_Results_",percentageAssoc,"SNPsAssociated_",ld,"_",weighted,"_",standardized,"_mPlus1_SKAT_kernel",kernel,"standardized_CatAndContPhenos_Prev",YPrev*100,"_TrueScore",TrueScore,"_",ORSize,"OR_assocSNPOrScore",snpOrScore,".csv"))
}
RunPowerPipelineSKAT_RSNPs_WFunctions = function(chr, gene, numPairs, YPrev, kernel, kernelWeights=c(), Gamma, TrueScore, ORSize, standardizeScores = FALSE, weightedScores = FALSE, scoreWeights, percentageAssoc, LowLD){
#function to run whole power pipeline , Calculates SKAT stat and pvalue for comparison
#Inputs:
#chr = chromosome number
#gene = gene name, in quotes
#numPairs = number of D/R pairs
#YPrev = prevalence of binary outcome Y
#kernel = which kernel to use for SKAT (linear, linear.weighted, IBS, IBS.weighted)
#kernelWeights = numeric vector of weights for weighted kernels
#Gamma = effect size for score, length 1, can be 0
#TrueScore = IBS.gene, Incomp.gene, AMS.gene, or BinMM.gene
#ORSize = Small, Medium, or Large for what OR was used for the associated SNP/score
#standardizeScores = T or F whether the scores should be standardized based on maximum score value
#weightedScores = T or F whether the scores will be weighted
#scoreWeights = m x 1 vector of weights, one weight for each SNP
#percentageAssoc = percentage of SNPs associated with outcome (either 5, 25, 50, 75, or 100)
#LowLD = TRUE or FALSE whether the associated SNPs are in low LD or high LD
#### This value shows which score is potentially being used to create phenotypes
#Outputs:
#No direct outputs, writes scores and pvalues to csv files
#also writes power values to csv files
#load libraries
library(SKAT)
library(parallel)
#need to define this for naming at the end
snpOrScore = "RSNP"
#number of sims always the same
numSims = 100
#define path to data
#for HapGen generated data
path = paste0("/home/vlynn/Paper_II_Sims/HapGen_Files/",gene,"_Results_",numPairs,"Pairs")
#source the needed functions
source("/home/vlynn/Paper_II_Sims/HapGen_Files/Scripts/ProjectIISourceFunctions_v2.R")
#turn output matrices into output lists
myList = lapply(1:numSims, rep, times = 1)
statsAndPValsMat = matrix(NA,ncol=16)
statsAndPValsMat = mclapply(myList, function(ii){
statsAndPValsMat = matrix(NA,ncol=16)
#generate alternate phenotypes with R SNPs associated
PhenosList = CalcAltPhenotypeData_RSNPs(chr = chr, numPairs = numPairs, simNum = ii, YPrev = YPrev, gene = gene, path = path, ORSize = ORSize, LowLD = LowLD, percentAssoc = percentAssoc, TrueScore = TrueScore)
#run SKAT analysis
# statsAndPValsMat = RunSKATAnalysis(PhenoList = PhenosList, kernel = kernel)
statsAndPValsMat = RunSKATBinaryAnalysis(PhenoList = PhenosList, kernel = kernel)
print(paste0("Simulation ",ii," is complete."))
statsAndPValsMat
}, mc.cores = 4)
statsAndPValsAll = matrix(unlist(statsAndPValsMat),nrow = numSims, ncol = 16, byrow = TRUE)
#separate into cat and cont phenos
statsAndPValsCatPhenos = statsAndPValsAll[,1:8]
statsAndPValsContPhenos = statsAndPValsAll[,9:16]
#define values for naming conventions
if(LowLD == TRUE){
ld = "LowLD"
} else {
ld = "HighLD"
}
if(weightedScores == FALSE){
weighted = ""
} else{
weighted = "WeightedScores"
}
if(standardizeScores == FALSE){
standardized = ""
} else {
standardized = "StandardizedScores"
}
#write out the Stats and p values
write.csv(statsAndPValsCatPhenos, file = paste0(path,"/Power_CatPhenos_",weighted,"_",standardized,"_Prev",YPrev*100,"_mPlus1_SKAT_kernel",kernel,"_StatsAndPValues_",percentageAssoc,"SNPsAssociated_",ld,"_TrueScore",TrueScore,"_",ORSize,"OR_assocSNPOrScore",snpOrScore,".csv"))
write.csv(statsAndPValsContPhenos, file = paste0(path,"/Power_ContPhenos_",weighted,"_",standardized,"_mPlus1_SKAT_kernel",kernel,"_StatsAndPValues_",percentageAssoc,"SNPsAssociated_",ld,"_TrueScore",TrueScore,"_",ORSize,"OR_assocSNPOrScore",snpOrScore,".csv"))
#calculate the power for each combination of score and r geno
#first column for cat phenos, second for cont phenos
#rows go IBS, Incomp, AMS, Bin MM
power = matrix(nrow = 4, ncol = 2)
#pull only the pvalues
PValsCatPhenos = statsAndPValsCatPhenos[,c(2,4,6,8)]
PValsContPhenos = statsAndPValsContPhenos[,c(2,4,6,8)]
#calc power
for(jj in 1:4){
power[jj,1] = sum(PValsCatPhenos[,jj] <= 0.05)/numSims
power[jj,2] = sum(PValsContPhenos[,jj] <= 0.05)/numSims
}
#write out power results
write.csv(power, file = paste0(path,"/Power_Results_",percentageAssoc,"SNPsAssociated_",ld,"_",weighted,"_",standardized,"_mPlus1_SKAT_kernel",kernel,"_CatAndContPhenos_Prev",YPrev*100,"_TrueScore",TrueScore,"_",ORSize,"OR_assocSNPOrScore",snpOrScore,".csv"))
}
|
d83b84e6c1eddda7f4806fb9a15daeeca81041de | b08b51f68b7a3e8adfb448075b766c9c5c2b90af | /plot3.R | 217a74d82fee5ac287670e3edbc9105a7a55ad15 | [] | no_license | zchen2015/ExData_Plotting1 | 4bf9b9abdfea0c2c101d4b2002d9625f7e057d4c | f0aa1782d6fcb0bd92b90656cb996c7cb62f7b9e | refs/heads/master | 2021-01-15T22:39:04.753343 | 2015-08-09T13:21:14 | 2015-08-09T13:21:14 | 40,303,967 | 0 | 0 | null | 2015-08-06T12:37:41 | 2015-08-06T12:37:41 | null | UTF-8 | R | false | false | 1,185 | r | plot3.R | ## download data file and unzip the file
print("downloading data ...")
url = "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(url, destfile="exdata-data-household_power_consumption.zip", method="libcurl", quiet=TRUE)
unzip("exdata-data-household_power_consumption.zip")
## load data and get subset
print("loading data ...")
cla <- sapply(read.csv("household_power_consumption.txt", sep=";", na.strings="?", nrow=10), class)
data <- read.csv("household_power_consumption.txt", sep=";", na.strings="?", colClasses=cla)
data[,"Date"] <- as.Date(data$Date, "%d/%m/%Y")
subset(data, Date=="2007-02-01" | Date=="2007-02-02") -> da1
## creat datetime column
da2 <- cbind(da1,datetime=as.POSIXct(paste(da1$Date,da1$Time, sep=" ")))
## plot3
print("make plot ...")
png("plot3.png", w=480, h=480)
plot(da2$datetime, da2$Sub_metering_1, ylab="Energy sub metering", type="l", xlab="")
points(da2$datetime, da2$Sub_metering_2, type="l", col="red")
points(da2$datetime, da2$Sub_metering_3, type="l", col="blue")
legend("topright", col=c("black","red", "blue"), legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=1)
dev.off()
|
ff0a35a3a92f2a178fdd9929924d29cec0513076 | c3e542e5b10011f2f209779def58a2a2df393335 | /man/addInterceptDS.Rd | c4764a1b9a5eeb3388c1ca554de9f4a7bfef68fd | [] | no_license | YouchengZHANG/dsMTLBase | 2d7b738b44e6cb54b9477ba416fbb1839672436b | 791ac77ff8146af611e53691e646c9056ba5eb7a | refs/heads/main | 2023-07-12T19:21:50.208248 | 2021-08-07T16:10:19 | 2021-08-07T16:10:19 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 826 | rd | addInterceptDS.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/addInterceptDS.R
\name{addInterceptDS}
\alias{addInterceptDS}
\title{Set intercept model}
\usage{
addInterceptDS(x.mat, intercept = FALSE)
}
\arguments{
\item{x.mat}{The name of design matrix}
\item{intercept}{The indicator to turn on (=TRUE) or off (=FALSE) the intercept model}
}
\value{
The designed matrix
}
\description{
The option to turn on or off intercept model.
}
\details{
If intercept==TRUE, the "1" column was attached on the left side of x.mat, the combination was returned. If intercept==FALSE,
x.mat was returned. In the linear regression, \eqn{y=x \beta+b}, the non-intercept model referred to b==0. In this case, the design matrix
and response should be z-standardized. The default was non-intercept model
}
\author{
Han Cao
}
|
d10d70e866725ca278e3f12c2fed1f35d09d7e42 | 48a44530ac143182464470e03b34a0c05b85f2ae | /data/data_clean/data_for_figures.R | 11431e22e185dfb1edc5a05cea522ff96e5abb65 | [] | no_license | xlulu/inls641_OlympicAnalysisVis | 0a9ffd5f397f3aa50dd5a8f0eec3027b4f97d170 | 94eb495526984242cfc3ccbdf3c57e5a8b89bf3c | refs/heads/master | 2020-03-29T12:46:17.750430 | 2019-03-26T13:46:12 | 2019-03-26T13:46:12 | 149,918,297 | 1 | 0 | null | 2018-11-10T19:51:39 | 2018-09-22T20:54:51 | R | UTF-8 | R | false | false | 867 | r | data_for_figures.R | library(tidyverse)
library(readr)
library(jsonlite)
setwd("/Users/ltl/Desktop/2018-Fall/INLS641/Project/data_for_figures")
pdata <- read.csv("./Project_Data.csv")
pdata_t <- as.tibble(pdata)
# Medal board
athlete_board_data <-
pdata_t %>% select(ID, Sex, Age, Height, Weight, Sport, Event)
colnames(athlete_board_data)[1] <- "Athlete_Id"
write.csv(athlete_board_data, '/Users/ltl/Desktop/2018-Fall/INLS641/Project/data_for_figures/athlete_board_data.csv')
mdata <- read.csv("/Users/ltl/Desktop/2018-Fall/INLS641/Project/data_for_figures/Metal.csv")
mdata_t <- as.tibble(mdata)
medal_board_data <-
mdata_t %>%
select(NOC, Medal, Year, Sport, Event) %>%
na.omit()
write.csv(medal_board_data, '/Users/ltl/Desktop/2018-Fall/INLS641/Project/data_for_figures/medal_board_data.csv')
data <- read.csv("./medal_board_data.csv")
data_t <- as.tibble(data) |
3670799a3c71ba66576832c774022abc45e935b0 | bccbc580218478dedd290ff5559ad82956c6e1a4 | /data/CSV to JSON.R | 17ede952a1b9a533a9996b66ce435f81c5a7ebf8 | [] | no_license | afdta/youth-employment-draft4 | 373b51e1514132a3f64add9b8efbe5c1e6aeafa4 | 5c57315b7b86f571a1a72f1b69d781e5dc1ba26d | HEAD | 2016-09-14T14:48:07.670824 | 2016-05-12T19:35:41 | 2016-05-12T19:35:41 | 58,666,488 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,492 | r | CSV to JSON.R | #to do: test the geo codes here
#question: why are some obs missing? if num, denom are both NA? is 2000 MOE zero always?
library("reshape2")
library("jsonlite")
setwd("~/Projects/Brookings/DataViz/youth-employment/data/CSV/")
setwd("~/Projects/Brookings/youth-employment/data/CSV/")
for(f in list.files()){
assign(sub("\\.csv","",f), read.csv(f, na.strings=c(""," "), stringsAsFactors=FALSE) )
}
rm(list=c("f"))
lv <- function(df){
nm <- names(df)
nl <- ""
s <- paste("(", 1:length(nm),")...", nm, sep="",collapse="\n")
cat(s)
}
#store final processed data in
dy <- list()
er <- list()
ur <- list()
#slim down the DY data
dy$char_edu <- DY_Char_Edu[,c(1,3,5,10,12,17,19,24,26,31:37)]
dy$char_nativity <- DY_Char_Nativity[,c(1,3,5,10,12,17,19,24:28)]
dy$char_race <- DY_Char_Race[,c(1,3,5,10,12,17,19,24,26,31,33,38,40,45,46:55)]
dy$char_sex <- DY_Char_Sex[,c(1,3,5,10,12,17,19,24:28)]
dy$nativity <- DY_Nativity[,c(1,3,5,7,12,14,19,20,21)]
dy$race <- DY_Race[,c(1,3,5,7,12,14,19,20,21)]
dy$sex <- DY_Sex[,c(1,3,5,7,12,14,19,20,21)]
dy$overall <- DY_Overall[,c(1,3,5,10,12,17,18,19)]
#slim down the ER data
er$edu <- ER_Edu[!(ER_Edu$age_recode=="16-19" & (ER_Edu$ed_label %in% c("SC","AA","BA+"))),c(1,2,4,13,7,12,15,20,21,22)]
er$race <- ER_Race[,c(1,2,4,13,7,12,15,20,21,22)]
er$sex <- ER_Sex[,c(1,2,4,13,7,12,15,20,21,22)]
er$overall <- ER_Overall[,c(1,2,4,6,11,13,18,19,20)]
#slim down the UR data
ur$edu <- UR_Edu[!(UR_Edu$age_recode=="16-19" & (UR_Edu$ed_label %in% c("SC","AA","BA+"))),c(1,2,4,13,7,12,15,20,21,22)]
ur$race <- UR_Race[,c(1,2,4,13,7,12,15,20,21,22)]
ur$sex <- UR_Sex[,c(1,2,4,13,7,12,15,20,21,22)]
ur$overall <- UR_Overall[,c(1,2,4,6,11,13,18,19,20)]
#make rectangular -- import strings not as factors
er$edu[,c("re_label","sex_label")] <- list(a="ar",b="bs")
er$edu$ed_label <- sub("\\+","Plus",er$edu$ed_label)
er$race[,c("ed_label","sex_label")] <- list(a="ae",b="bs")
er$sex[,c("ed_label","re_label")] <- list(a="ae",b="ar")
er$overall[,c("ed_label","sex_label","re_label")] <- list(a="ae",b="bs",c="ar")
er$all <- rbind(er$edu,er$race,er$sex,er$overall)
max(er$all$ER, na.rm=TRUE)
er$all$age_recode <- sub("-","to",er$all$age_recode)
er$all$sex_label <- sub("ale|emale","",er$all$sex_label)
er$all$re_label <- sub("sian|lack|hite|ther|atino","",er$all$re_label)
ur$edu[,c("re_label","sex_label")] <- list(a="ar",b="bs")
ur$edu$ed_label <- sub("\\+","Plus",ur$edu$ed_label)
ur$race[,c("ed_label","sex_label")] <- list(a="ae",b="bs")
ur$sex[,c("ed_label","re_label")] <- list(a="ae",b="ar")
ur$overall[,c("ed_label","sex_label","re_label")] <- list(a="ae",b="bs",c="ar")
ur$all <- rbind(ur$edu,ur$race,ur$sex,ur$overall)
max(ur$all$UR, na.rm=TRUE)
highUR <- ur$all[ur$all$UR>0.9 & !is.na(ur$all$UR), ]
ur$all$age_recode <- sub("-","to",ur$all$age_recode)
ur$all$sex_label <- sub("ale|emale","",ur$all$sex_label)
ur$all$re_label <- sub("sian|lack|hite|ther|atino","",ur$all$re_label)
dy$nativity[,c("re_label","sex_label")] <- list(a="ar",b="bs")
dy$race[,c("fb_label","sex_label")] <- list(a="an",b="bs")
dy$sex[,c("re_label","fb_label")] <- list(a="ar",b="an")
dy$overall[,c("re_label","sex_label","fb_label")] <- list(a="ar",b="bs",c="an")
dy$all <- rbind(dy$overall, dy$nativity, dy$race, dy$sex)
dy$all$age_recode <- sub("-","to",dy$all$age_recode)
dy$all$sex_label <- sub("ale|emale","",dy$all$sex_label)
dy$all$re_label <- sub("sian|lack|hite|ther|atino","",dy$all$re_label)
max(dy$all$ShareDY, na.rm=TRUE)
splitter <- function(df, keep, renames=keep, dy=FALSE){
if("YEAR" %in% names(df)){
D <- df[df$YEAR %in% 2008:2014, ]
D$YEAR <- D$YEAR - 2000
} else{
D <- df
}
if(dy){
lastsplit <- "fb_label"
} else{
lastsplit <- "ed_label"
}
s <- split(D, D$Assigned_CBSA)
s0 <- lapply(s, function(e){
#split metro groups by age
s0s <- split(e, e$age_recode)
s1 <- lapply(s0s, function(e){
#split metro age groups by sex
s1s <- split(e, e$sex_label)
s2 <- lapply(s1s, function(e){
#split metro-age-sex groups by race
s2s <- split(e, e$re_label)
s3 <- lapply(s2s, function(e){
#split metro-age-sex-race groups by edu
s3s <- split(e, e[,lastsplit])
s4 <- lapply(s3s, function(e){
K <- e[keep]
names(K) <- renames
K$SH <- round(K$SH*100,1)
K$SH_M <- round(K$SH_M*100,1)
return(K)
})
return(s4)
})
return(s3)
})
return(s2)
})
return(s1)
})
return(s0)
}
splitDY <- function(df){
df$age_recode <- sub("-","to",df$age_recode)
m <- split(df, df$Assigned_CBSA)
m1 <- lapply(m, function(e){
return(split(e, e$age_recode))
})
return(m1)
}
ERSplit <- splitter(er$all, c("YEAR","EMP","denom","ER","MOE_ER"), c("Y","E","P","SH","SH_M"))
URSplit <- splitter(ur$all, c("YEAR","UNEMP","denom","UR","MOE_UR"), c("Y","U","P","SH","SH_M"))
DYSplits <- list()
DYSplits$Rates <- splitter(dy$all, c("DY","denom","ShareDY","MOE_ShareDY"), c("DY","P","SH","SH_M"), TRUE)
DYSplits$Char <- list()
DYSplits$Char$Edu <- splitDY(dy$char_edu)
DYSplits$Char$Race <- splitDY(dy$char_race)
DYSplits$Char$Nativity <- splitDY(dy$char_nativity)
DYSplits$Char$Sex <- splitDY(dy$char_sex)
writeLines(toJSON(ERSplit, digits=5, na="null"), "../er.json")
writeLines(toJSON(URSplit, digits=5, na="null"), "../ur.json")
writeLines(toJSON(DYSplits, digits=5, na="null"), "../dy.json")
overall <- list()
overall$er <- lapply(ERSplit, function(e){
r <- list("16to19"=e[["16to19"]]$bs$ar$ae, "20to24"=e[["20to24"]]$bs$ar$ae, "25to54"=e[["25to54"]]$bs$ar$ae )
return(r)
})
overall$ur <- lapply(URSplit, function(e){
r <- list("16to19"=e[["16to19"]]$bs$ar$ae, "20to24"=e[["20to24"]]$bs$ar$ae, "25to54"=e[["25to54"]]$bs$ar$ae )
return(r)
})
overall$dy <- lapply(DYSplits$Rates, function(e){
r <- list("16to19"=e[["16to19"]]$bs$ar$an, "20to24"=e[["20to24"]]$bs$ar$an, "25to54"=e[["25to54"]]$bs$ar$an )
return(r)
})
writeLines(toJSON(overall, digits=5, na="null"), "../overall.json")
sum(names(overall$er) == names(ERSplit))
sum(names(overall$ur) == names(URSplit))
sum(names(overall$dy) == names(DYSplits$Rates))
#some checks
unique(er$all[is.na(er$all$MOE_ER) & !is.na(er$all$ER),c("YEAR")]) #MOE is only NA in 2000
#why aren't there a multiple of metros observations?
Akron <- er$edu[er$edu$Assigned_CBSA==10420,]
AkronNA <- Akron[is.na(Akron$EMP) | is.na(Akron$denom),]
with(Akron, table(age_recode, ed_label))
with(er$edu, table(age_recode, ed_label))
allNA <- er$edu[is.na(er$edu$EMP) & is.na(er$edu$denom),] #no obs with both num and denom missing
lapply(ur, function(e){with(e, table(YEAR))})
Akron <- ur$sex[ur$sex$Assigned_CBSA==10420,]
#ALTERNATIVE STRUCTURE
puller <- function(v.var){
casted <- dcast(er$all, YEAR + Assigned_CBSA + age_recode ~ sex_label + re_label + ed_label, value.var = v.var)
return(doublesplit(casted))
}
pullur <- function(v.var){
casted <- dcast(ur$all, YEAR + Assigned_CBSA + age_recode ~ sex_label + re_label + ed_label, value.var = v.var)
return(doublesplit(casted))
}
pulldy <- function(v.var){
casted <- dcast(dy$all, Assigned_CBSA + age_recode ~ sex_label + re_label + fb_label, value.var = v.var)
return(doublesplit(casted))
}
fer <- list()
fer$emp <- puller("EMP")
fer$emp_moe <- puller("moe_n")
fer$tot <- puller("denom")
fer$tot_moe <- puller("moe_d")
fer$er <- puller("ER")
fer$er_moe <- puller("MOE_ER")
fer$json <- toJSON(fer, digits=5, na="null") |
8841b658312bcefd072f47f6927b6129bdb16968 | 990a049d3ad2341cc32b82e14ee94a342f9d3a8f | /man/day4CheckSequence.Rd | 4c34726bd921f7037341059852549ad5203d2a6c | [
"Apache-2.0"
] | permissive | JDOsborne1/AOC2019 | 7fb5644589a283afb5e565b4e3a4162922c2315e | 5c3e50a8b2c1b0de7ea0499a713fea396e60cc87 | refs/heads/master | 2020-09-23T08:12:24.999663 | 2019-12-14T12:21:28 | 2019-12-14T12:21:28 | 225,449,016 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 429 | rd | day4CheckSequence.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Day_4.R
\name{day4CheckSequence}
\alias{day4CheckSequence}
\title{Sequence checker}
\usage{
day4CheckSequence(vector_split_sequence, min_val, max_val, part2 = FALSE)
}
\arguments{
\item{vector_split_sequence}{the vector sequence to check}
\item{min_val}{the lower bound}
\item{max_val}{the upper bound}
}
\value{
}
\description{
Sequence checker
}
|
0b5555397a402a5271387da9a0dcf17bb5a1b1ca | 53d7e351e21cc70ae0f2b746dbfbd8e2eec22566 | /man/xmu_twin_add_WeightMatrices.Rd | a557dedddb26d0e77e8fe55a1498210af16396b4 | [] | no_license | tbates/umx | eaa122285241fc00444846581225756be319299d | 12b1d8a43c84cc810b24244fda1a681f7a3eb813 | refs/heads/master | 2023-08-31T14:58:18.941189 | 2023-08-31T09:52:02 | 2023-08-31T09:52:02 | 5,418,108 | 38 | 25 | null | 2023-09-12T21:09:45 | 2012-08-14T20:18:01 | R | UTF-8 | R | false | true | 4,342 | rd | xmu_twin_add_WeightMatrices.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/xmu_make_top_twin_models.R
\name{xmu_twin_add_WeightMatrices}
\alias{xmu_twin_add_WeightMatrices}
\title{Add weight matrices to twin models.}
\usage{
xmu_twin_add_WeightMatrices(model, mzWeights = NULL, dzWeights = NULL)
}
\arguments{
\item{model}{umx-style twin model}
\item{mzWeights}{data for MZ weights matrix}
\item{dzWeights}{data for DZ weights matrix}
}
\value{
\itemize{
\item model
}
}
\description{
Add weight models (MZw, DZw) with matrices (e.g. mzWeightMatrix) to a twin model, and
update \code{mxFitFunctionMultigroup}. This yields a weighted model with vector objective.
To weight objective functions in OpenMx, you specify a container model that applies the weights
m1 is the model with no weights, but with "vector = TRUE" option added to the FIML objective.
This option makes FIML return individual likelihoods for each row of the data (rather than a single
-2LL value for the model)
You then optimize weighted versions of these likelihoods by building additional models containing
weight data and an algebra that multiplies the likelihoods from the first model by the weight vector.
}
\examples{
tmp = umx_make_twin_data_nice(data=twinData, sep="", zygosity="zygosity", numbering= 1:2)
m1 = umxACE(selDVs = "wt", data = tmp, dzData = "DZFF", mzData = "MZFF", autoRun= FALSE)
m1$MZ$fitfunction$vector= TRUE
tmp = xmu_twin_add_WeightMatrices(m1,
mzWeights= rnorm(nrow(m1$MZ$data$observed)),
dzWeights= rnorm(nrow(m1$DZ$data$observed))
)
}
\seealso{
Other xmu internal not for end user:
\code{\link{umxModel}()},
\code{\link{umxRenameMatrix}()},
\code{\link{umx_APA_pval}()},
\code{\link{umx_fun_mean_sd}()},
\code{\link{umx_get_bracket_addresses}()},
\code{\link{umx_make}()},
\code{\link{umx_standardize}()},
\code{\link{umx_string_to_algebra}()},
\code{\link{xmuHasSquareBrackets}()},
\code{\link{xmuLabel_MATRIX_Model}()},
\code{\link{xmuLabel_Matrix}()},
\code{\link{xmuLabel_RAM_Model}()},
\code{\link{xmuMI}()},
\code{\link{xmuMakeDeviationThresholdsMatrices}()},
\code{\link{xmuMakeOneHeadedPathsFromPathList}()},
\code{\link{xmuMakeTwoHeadedPathsFromPathList}()},
\code{\link{xmuMaxLevels}()},
\code{\link{xmuMinLevels}()},
\code{\link{xmuPropagateLabels}()},
\code{\link{xmuRAM2Ordinal}()},
\code{\link{xmuTwinSuper_Continuous}()},
\code{\link{xmuTwinSuper_NoBinary}()},
\code{\link{xmuTwinUpgradeMeansToCovariateModel}()},
\code{\link{xmu_CI_merge}()},
\code{\link{xmu_CI_stash}()},
\code{\link{xmu_DF_to_mxData_TypeCov}()},
\code{\link{xmu_PadAndPruneForDefVars}()},
\code{\link{xmu_bracket_address2rclabel}()},
\code{\link{xmu_cell_is_on}()},
\code{\link{xmu_check_levels_identical}()},
\code{\link{xmu_check_needs_means}()},
\code{\link{xmu_check_variance}()},
\code{\link{xmu_clean_label}()},
\code{\link{xmu_data_missing}()},
\code{\link{xmu_data_swap_a_block}()},
\code{\link{xmu_describe_data_WLS}()},
\code{\link{xmu_dot_make_paths}()},
\code{\link{xmu_dot_make_residuals}()},
\code{\link{xmu_dot_maker}()},
\code{\link{xmu_dot_move_ranks}()},
\code{\link{xmu_dot_rank_str}()},
\code{\link{xmu_extract_column}()},
\code{\link{xmu_get_CI}()},
\code{\link{xmu_lavaan_process_group}()},
\code{\link{xmu_make_TwinSuperModel}()},
\code{\link{xmu_make_bin_cont_pair_data}()},
\code{\link{xmu_make_mxData}()},
\code{\link{xmu_match.arg}()},
\code{\link{xmu_name_from_lavaan_str}()},
\code{\link{xmu_path2twin}()},
\code{\link{xmu_path_regex}()},
\code{\link{xmu_print_algebras}()},
\code{\link{xmu_rclabel_2_bracket_address}()},
\code{\link{xmu_safe_run_summary}()},
\code{\link{xmu_set_sep_from_suffix}()},
\code{\link{xmu_show_fit_or_comparison}()},
\code{\link{xmu_simplex_corner}()},
\code{\link{xmu_standardize_ACEcov}()},
\code{\link{xmu_standardize_ACEv}()},
\code{\link{xmu_standardize_ACE}()},
\code{\link{xmu_standardize_CP}()},
\code{\link{xmu_standardize_IP}()},
\code{\link{xmu_standardize_RAM}()},
\code{\link{xmu_standardize_SexLim}()},
\code{\link{xmu_standardize_Simplex}()},
\code{\link{xmu_start_value_list}()},
\code{\link{xmu_starts}()},
\code{\link{xmu_summary_RAM_group_parameters}()},
\code{\link{xmu_twin_check}()},
\code{\link{xmu_twin_get_var_names}()},
\code{\link{xmu_twin_make_def_means_mats_and_alg}()},
\code{\link{xmu_twin_upgrade_selDvs2SelVars}()}
}
\concept{xmu internal not for end user}
|
e9fa6772ab8ab8d4e35070e3151836966246dd82 | 912ad27fe0c462026131613a6cf838d074c6dd59 | /man/updateFit.Rd | d6dd83178169cfd59225b8297092567dbf7f0fe5 | [] | no_license | OakleyJ/MUCM | 8265ee0cf1bbf29f701a1d883aa8ab1f8074655b | 28782a74ef18ce087b1a23fe15df5a67b156083e | refs/heads/master | 2021-01-11T18:17:10.733210 | 2017-10-20T14:45:18 | 2017-10-20T14:45:18 | 69,334,386 | 5 | 7 | null | null | null | null | UTF-8 | R | false | true | 495 | rd | updateFit.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/updateFit.R
\name{updateFit}
\alias{updateFit}
\title{Update fit}
\usage{
updateFit(object)
}
\arguments{
\item{object}{An \code{"emulatorFit"} object to update.}
}
\value{
Returns the updated \code{"emulatorFit"} object.
}
\description{
Updates old \code{"emulatorFit"} objects to work with latest
code to allow for improvements to be made within the code.
}
\note{
A warning is issued if the object is updated.
}
|
be3882b8359069ff71d1491a31d17b1ffe29afd8 | f1d6b12c86bfa31aad41fb3cd0bdcc41a4cd55bd | /R/Snowdoop.R | 082d822101349298ca2fd6330d0f87d5af319cc4 | [] | no_license | edwardt/partools | da18d23c481ccdc4823293f4a61fd35217fc67d3 | 23b1a3259f0e14cc79ea2af4990803c72840fae0 | refs/heads/master | 2020-04-01T20:05:02.958542 | 2015-01-31T06:58:18 | 2015-01-31T06:58:18 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,211 | r | Snowdoop.R |
# suppose we have a file basename, stored in chunks, say basename.001,
# basename.002 etc.; this function determine the file name for the chunk
# to be handled by node nodenum; the latter is the ID for the executing
# node, partoolsenv$myid, set by setclsinfo()
filechunkname <- function (basename, ndigs,nodenum=NULL)
{
tmp <- basename
if (is.null(nodenum)) {
pte <- getpte()
nodenum <- pte$myid
}
n0s <- ndigs - nchar(as.character(nodenum))
zerostring <- paste(rep("0", n0s),sep="",collapse="")
paste(basename, ".", zerostring, nodenum, sep = "")
}
# distributed file sort on cls, based on column number colnum of input;
# file name from basename, ndigs; bucket sort, with categories
# determined by first sampling nsamp from each chunk; each node's output
# chunk written to file outname (plus suffix based on node number) in
# the node's global space
filesort <- function(cls,basename,ndigs,colnum,
outname,nsamp=1000,header=FALSE,sep="")
{
clusterEvalQ(cls,library(partools))
setclsinfo(cls)
samps <- clusterCall(cls,getsample,basename,ndigs,colnum,
header=header,sep=sep,nsamp)
samp <- Reduce(c,samps)
bds <- getbounds(samp,length(cls))
clusterApply(cls,bds,mysortedchunk,
basename,ndigs,colnum,outname,header,sep)
0
}
getsample <- function(basename,ndigs,colnum,
header=FALSE,sep="",nsamp)
{
fname <- filechunkname(basename,ndigs)
read.table(fname,nrows=nsamp,header=header,sep=sep)[,colnum]
}
getbounds <- function(samp,numnodes) {
bds <- list()
q <- quantile(samp,((2:numnodes) - 1) / numnodes)
samp <- sort(samp)
for (i in 1:numnodes) {
mylo <- if (i > 1) q[i-1] else NA
myhi <- if (i < numnodes) q[i] else NA
bds[[i]] <- c(mylo,myhi)
}
bds
}
mysortedchunk <- function(mybds,basename,ndigs,colnum,outname,header,sep) {
pte <- getpte()
me <- pte$myid
ncls <- pte$ncls
mylo <- mybds[1]
myhi <- mybds[2]
for (i in 1:ncls) {
tmp <-
read.table(filechunkname(basename,ndigs,i),header=header,sep)
tmpcol <- tmp[,colnum]
if (me == 1) {
tmp <- tmp[tmpcol <= myhi,]
} else if (me == ncls) {
tmp <- tmp[tmpcol > mylo,]
} else {
tmp <- tmp[tmpcol > mylo & tmpcol <= myhi,]
}
mychunk <- if (i == 1) tmp else rbind(mychunk,tmp)
}
sortedmchunk <- mychunk[order(mychunk[,colnum]),]
assign(outname,sortedmchunk,envir=.GlobalEnv)
}
# split a file into chunks, one per cluster node
filesplit <- function(cls,basename,header=FALSE) {
cmdout <- system(paste("wc -l",basename),intern=TRUE)
tmp <- strsplit(cmdout[[1]][1], " ")[[1]]
nlines <- as.integer(tmp[length(tmp) - 1])
con <- file(basename,open="r")
if (header) {
hdr <- readLines(con,1)
nlines <- nlines - 1
}
lcls <- length(cls)
ndigs <- ceiling(log10(lcls))
chunks <- clusterSplit(cls,1:nlines)
chunksizes <- sapply(chunks,length)
for (i in 1:lcls) {
chunk <- readLines(con,chunksizes[i])
fn <- filechunkname(basename,ndigs,i)
conout <- file(fn,open="w")
if (header) writeLines(hdr,conout)
writeLines(chunk,conout)
close(conout)
}
}
|
90dbb31dd5f438b82190fb57c024af489292137b | 2125e4f724f672459a881235aa2e9ef63433be83 | /Course 2 : R Programming/best.R | 7948166294843f391abd93ab3c63ff0cf492b58b | [] | no_license | wuthmone/Data-Science-Specalization-Coursera | 779e082f916a6b4a5a28057524f09b961f828091 | d65580fbbe7eab4aa6a3cce6822a4d4a1a9610c6 | refs/heads/master | 2020-06-26T03:43:38.459101 | 2017-03-24T15:32:03 | 2017-03-24T15:32:03 | 74,551,700 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,475 | r | best.R | best <- function(state, outcome) {
## Read outcome data
stored_data <- read.csv("outcome-of-care-measures.csv", colClasses = "character")
## Check that state and outcome are valid
try(if(state %in% stored_data$State == FALSE ) stop(" invalid state\n"))
try(if(outcome %in% c("heart attack","heart failure","pneumonia") == FALSE) stop("invalid outcome\n"))
df <- data.frame()
for(i in 1:length(stored_data$State)){
if(stored_data$State[i] == state){
df <- rbind(df,stored_data[i,])
}
}
## Return hospital name in that state with lowest 30-day death rate
if(outcome == "heart attack"){
df[, 11] <- as.numeric(df[, 11])
min_value <- min(df[,11], na.rm = TRUE)
index <- which(df[,11] == min_value )
df[index,2]
}
else if ( outcome == "heart failure") {
df[, 17] <- as.numeric(df[, 17])
min_value <- min(df[,17], na.rm = TRUE)
index <- which(df[,17] == min_value )
df[index,2]
}
else {
df[, 23] <- as.numeric(df[, 23])
min_value <- min(df[,23], na.rm = TRUE)
index <- which(df[,23] == min_value )
df[index,2]
}
}
|
cb3403fefdb5c5884fae62d9224ef25b55e7e4b1 | 5e602844e9bdf90b2ec51b4f8b985f765934a63d | /ExceptionalDayandEffectFormat.R | d81bffe51eb6d2c952c0521e5ffa40f681f3a975 | [] | no_license | Xiaoxi-X-G/CallCentreSS | 2a69927998208796240b032fe1590f3281857e4d | fc82a9430b3db91597a04f09005d4a365a7c56f3 | refs/heads/master | 2021-01-24T11:28:14.634839 | 2016-10-07T01:43:42 | 2016-10-07T01:43:42 | 70,206,941 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,619 | r | ExceptionalDayandEffectFormat.R | ExceptionalDayandEffectFormat<-function(ExceptionalDays, FirstDate, FinishDateT){
## ExceptionalDatesCSV = ["ExceptionalDate","Annual","ForecastIgnoreHistory","ForecastDateSpecific","ExceptionalDayTypeID"]
## FirstDate, FinishDate.T = Character
## Output = list(ExceptionalDays, ProximityDays)
#########################################################################
##### I: Complete Exceptional Days, from FirstDate to FinishDate.T
##### II: Find proximity days
#########################################################################
##### I: Complete Exceptional Days, from FirstDate to FinishDate.T
#ExceptionalDayandEffects<-ExceptionalDayandEffectFormatV2(ExceptionalDates, FirstDate, FinishDateT)
#################
FirstYear.temp<- strsplit(FirstDate, split = "-")
FirstYear <- FirstYear.temp[[1]]
LastYear.temp<- strsplit(FinishDateT, split = "-")
LastYear <- LastYear.temp[[1]]
AllYears<-seq(from = as.integer(FirstYear[1]), to = as.integer(LastYear[1]), by = 1)
ExceptionalDays$ExceptionalDate<- as.Date(ExceptionalDays$ExceptionalDate)
ExceptionalDays$Annual <- as.logical((ExceptionalDays$Annual))
ExceptionalDays$Annual[is.na(ExceptionalDays$Annual)]<-FALSE
###### Deal with duplicated FALSE and TRUE at Annual: delete FALSE row if same day Annual is TRUE
FalseAnnulInd <- which(! ExceptionalDays$Annual)
TrueAnnulInd <- which(ExceptionalDays$Annual)
if (length(FalseAnnulInd)>0){
DeleteInd <-c()
for (i in 1 : length(FalseAnnulInd)){
#print(format(ExceptionalDays$ExceptionalDate[FalseAnnulInd[i]], "%m-%d"))
if(format(ExceptionalDays$ExceptionalDate[FalseAnnulInd[i]], "%m-%d") %in% format(ExceptionalDays$ExceptionalDate[TrueAnnulInd], "%m-%d")){
DeleteInd<-c(DeleteInd, FalseAnnulInd[i])
}
}
#print(DeleteInd)
if (length(DeleteInd)>0){
ExceptionalDays <- ExceptionalDays[0-DeleteInd,]
}
}
## Define Unique Index for ExceptionalDayTypeID
Annual.Dates<-unique(format(ExceptionalDays$ExceptionalDate[which(ExceptionalDays$Annual)], "%m-%d"))
UniqueInd<-setdiff(sample(1:(nrow(ExceptionalDays)+1), nrow(ExceptionalDays), replace=F),
unique(ExceptionalDays$ExceptionalDayTypeID[!is.na(ExceptionalDays$ExceptionalDayTypeID)]))
if (length(Annual.Dates) > 0){
for (i in 1:length(Annual.Dates)){
ExceptionalDays$ExceptionalDayTypeID[which(format(ExceptionalDays$ExceptionalDate, "%m-%d")
== Annual.Dates[i])] <- UniqueInd[i]
}
ExceptionalDays$ExceptionalDayTypeID[is.na(ExceptionalDays$ExceptionalDayTypeID)]<-
format(round(runif(length(which(is.na(ExceptionalDays$ExceptionalDayTypeID))), min=0, max=9), 3),nsmall = 4) #fill-in a random number if NA
}
ExceptionalDays2 <- ExceptionalDays[,c(1,2,5)]
ExceptionalDays2$ExceptionalDate<- as.Date(ExceptionalDays2$ExceptionalDate)
## Add missing information
TrueAnnulInd <- c()
TrueAnnulInd <- which(ExceptionalDays$Annual)
if (length(TrueAnnulInd)>0){
for (i in 1:length(TrueAnnulInd)){
ExceptionalDate <-as.Date(paste(as.character(AllYears), "-", as.character(format(ExceptionalDays$ExceptionalDate[TrueAnnulInd[i]], "%m-%d")), sep=""))
ExceptionalDayTypeID <- rep(ExceptionalDays$ExceptionalDayTypeID[TrueAnnulInd[i]], length=length(ExceptionalDate))
Annual <- rep("TRUE", length(ExceptionalDate))
ExceptionalDays2 <- rbind(ExceptionalDays2, data.frame(ExceptionalDate, Annual, ExceptionalDayTypeID))
# }
}
ExceptionalDays2 <- ExceptionalDays2[(! duplicated(ExceptionalDays2$ExceptionalDate)) ,]
ExceptionalDays2 <- ExceptionalDays2[order(ExceptionalDays2$ExceptionalDate), ]
ExceptionalDays2$Annual <- as.logical(ExceptionalDays2$Annual)
}
##### II: Find proximity days
ProximityDays <- data.frame(Dates = rep(as.Date("2000-01-01"),length= 2*nrow(ExceptionalDays2)),
Annual = rep(FALSE, length = 2*nrow(ExceptionalDays2)),
ProximityDaysTypeID = rep("???", length = 2*nrow(ExceptionalDays2)))
ProximityDays$ProximityDaysTypeID <- as.character(ProximityDays$ProximityDaysTypeID)
if (nrow(ExceptionalDays) > 0){
for (i in 1:nrow(ExceptionalDays2)){
if (!(as.character(ExceptionalDays2$ExceptionalDate[i]-1) %in% as.character(ExceptionalDays2$ExceptionalDate))){
ProximityDays$Dates[i] <- ExceptionalDays2$ExceptionalDate[i]-1
ProximityDays$Annual[i] <- ExceptionalDays2$Annual[i]
ProximityDays$ProximityDaysTypeID[i] <- paste(as.character(ExceptionalDays2$ExceptionalDayTypeID[i]), "-", sep="")
}
}
for (i in 1:nrow(ExceptionalDays2)){
if ((!(as.character(ExceptionalDays2$ExceptionalDate[i]+1) %in% as.character(ExceptionalDays2$ExceptionalDate)))
&(!(as.character(ExceptionalDays2$ExceptionalDate[i]+1) %in% as.character(ProximityDays$Dates) )) ){
ProximityDays$Dates[nrow(ProximityDays)-i+1] <- ExceptionalDays2$ExceptionalDate[i]+1
ProximityDays$Annual[nrow(ProximityDays)-i+1] <- ExceptionalDays2$Annual[i]
ProximityDays$ProximityDaysTypeID[nrow(ProximityDays)-i+1] <- paste(as.character(ExceptionalDays2$ExceptionalDayTypeID[i]), "+", sep="")
}
}
if (length(which(ProximityDays$ProximityDaysTypeID == "???")) != 0){
ProximityDays<-ProximityDays[0-which(ProximityDays$ProximityDaysTypeID == "???"), ]
}
ProximityDays<- ProximityDays[order(ProximityDays$Dates), ]
}
return(list(ExceptionalDays2, ProximityDays))
} |
4cce95b955ecfe21c5d372ca8c39debe443ff50a | 900cd462c5476c83998b61cee8d258882fd48009 | /_build.sh | 8a0494176b29a2d9fb2705a2bf71779c2f9d8815 | [
"CC0-1.0"
] | permissive | sameerbhatnagar/parea_final_report | 25048d4675cc208392a501038f61b448f60101e6 | 6971edaf076e52498bcd8cf1ca898c29b73410c3 | refs/heads/master | 2021-01-21T19:07:07.563759 | 2017-05-23T02:20:20 | 2017-05-23T02:20:20 | 92,117,335 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 103 | sh | _build.sh | #!/usr/bin/env Rscript
devtools::install_github('rstudio/bookdown')
bookdown::render_book("index.Rmd") |
197b099d15766758f48fc10907e0c5ad5e841097 | 4cf0636708dd7d1a4afab0ba56de36498f0b16bc | /src/figure-dmem-decay.R | ce1de17553dc69d59f08aedc3e437989d3bf0238 | [] | no_license | dylanhmorris/heat-inactivation | ec093fc6131c8ac186f6189518dc3e7c4f2142ea | e8bd816b54a9dcb2f522d0837cdf72087c12e189 | refs/heads/main | 2023-06-18T05:16:04.143711 | 2021-01-14T04:54:23 | 2021-01-14T04:54:23 | 286,523,386 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,300 | r | figure-dmem-decay.R | #!/usr/bin/env Rscript
########################################
## filename: figure-dmem-decay.R
## author: Dylan Morris <dhmorris@princeton.edu>
## plot figure showing decay in heat-treated
## dmem for all dmem options
#######################################
script_packages <- c(
'rstan', # stan interface
'readr', # csv read-in
'dplyr', # for filter()
'tidybayes', # for for spread_draws(), etc.
'ggplot2', # for plotting
'tidyr', # for crossing()
'cowplot', # publication ready ggplot
'extrafont',
'heatinactivation'
)
## load in packages without messages
for (package in script_packages){
suppressPackageStartupMessages(
library(package,
character.only = TRUE))
}
#################################
# read in needed data
#################################
## read command line args
args <- commandArgs(trailingOnly=TRUE)
decay_data_path <- args[1]
decay_results_path <- args[2]
titer_path <- args[3]
outpath <- args[4]
decay_chains <- readRDS(decay_results_path)
titer_chains <- readRDS(titer_path)
dat <- read_csv(decay_data_path,
col_types = cols())
#################################
## overall plot styling
#################################
set.seed(989327) # reproducible! (since we use random draws)
n_lines <- 10
line_alpha <- 0.1
material_colors <- get_params("material_colors")
titer_ylab <- expression("virus titer (TCID"[50] * "/mL media)")
material_order <- c(
"DMEM uncovered plate oven",
"DMEM covered plate oven",
"DMEM closed vial oven",
"DMEM closed vial heat block",
"DMEM")
dat$material <- factor(
dat$material,
levels = material_order)
dat <- dat %>%
mutate(material = material %>%
recode_factor(
"DMEM uncovered plate oven" = "Uncovered plate oven",
"DMEM covered plate oven" = "Covered plate oven",
"DMEM closed vial oven" = "Closed vial oven",
"DMEM closed vial heat block" = "Closed vial heat block")
)
##################################################
## calculate posterior draws for regression lines
##################################################
plot_times <- tibble(time = seq(0, 2, length.out = 1000))
## get needed draws and add human readable names
int_draws <- decay_chains %>%
spread_draws(intercept[titer_id]) %>%
add_titer_metadata(dat)
decay_draws <- decay_chains %>%
spread_draws(decay_rate[experiment_id])
draws <- int_draws %>%
inner_join(decay_draws,
by = c(".draw", "experiment_id"))
pos_wells <- dat %>%
group_by(titer_id) %>%
summarise(
n_wells = n(),
n_pos = sum(virus_detect))
titer_draws <- titer_chains %>%
spread_draws(log10_titer[titer_id]) %>%
add_titer_metadata(dat) %>%
inner_join(pos_wells,
by = "titer_id") %>%
mutate(detectable = n_pos > 1)
## convert from TCID50/(0.1mL) to TCID50/mL
## and visualize 0 positive well titers at
## the traditional LOD
LOD_log10_per_ml = 0.5
LOD = 10^LOD_log10_per_ml
titer_draws <- titer_draws %>%
mutate(log10_titer_per_ml = ifelse(
detectable,
log10_titer + 1,
LOD_log10_per_ml)) %>%
ungroup() %>%
arrange(desc(time), desc(material))
###################################
## plot panel showing raw surface
## data
###################################
cat('plotting raw data...\n')
###################################
## plot panel showing fit of
## regression lines to real data
###################################
cat('plotting regression lines...\n')
## draw n_lines random regression lines
func_samples <- draws %>%
group_by(titer_id) %>%
sample_n(n_lines) %>%
ungroup()
## annotate lines so that each
## has a unique id for ggplot overplotting
## (else two lines from the same draw but
## different replicates can get confused
## with each other)
func_samples <- func_samples %>%
mutate(line_id = as.numeric(rownames(func_samples)))
## cross product decay_rates with x (time) values
## and calculate y (titer) values
cat('setting up x values...\n')
to_plot <- func_samples %>%
rename(t = time) %>%
crossing(plot_times)
## adding one to convert to per mL from per mL/10
to_plot <- to_plot %>%
mutate(predicted_titer = 10^(1 + intercept - decay_rate * time))
shape_scale = scale_shape_manual(
values = unlist(list("FALSE" = 25,
"TRUE" = 21)))
fit_panel <- to_plot %>%
ggplot(aes(x = time,
y = predicted_titer,
group = line_id)) +
geom_hline(aes(yintercept = LOD),
size = 2,
linetype = "dotted") +
geom_line(
aes(color = material),
alpha = line_alpha) +
stat_pointinterval(
.width = 0.95,
mapping = aes(
x = time,
y = 10^log10_titer_per_ml,
shape = detectable,
fill = material,
group = titer_id),
size = 4,
data = titer_draws,
stroke = 2) +
stat_pointinterval(
.width = 0.6827,
mapping = aes(
x = time,
y = 10^log10_titer_per_ml,
shape = detectable,
fill = material,
group = titer_id),
size = 8,
fatten_point = 2,
data = titer_draws,
stroke = 2) +
scale_fill_manual(values = unlist(material_colors)) +
scale_fill_manual(values = unlist(material_colors),
aesthetics = "point_fill") +
scale_color_manual(values = unlist(material_colors)) +
shape_scale +
scale_y_log10_mathformat() +
coord_cartesian(
ylim = c(1e0, 1e6),
xlim = c(0, 2)) +
facet_wrap(vars(material))
# styling: no facet labels because is background plot
fit_panel <- fit_panel +
theme_project() +
theme(legend.position = "none") +
xlab("time (hrs)") +
ylab(titer_ylab)
####################################
## compose full figure from panels
####################################
cat('making full figure...\n')
## save the plot to outpath
cat('saving figure to ', outpath, '...\n')
save_plot(outpath,
fit_panel,
base_width = 10,
base_height = 8)
warnings()
|
7d17a71287c087aac65d5fab30777e36fc6b904f | ec2b9803a923d928751c76bbf1c31227928bffc9 | /R/model.setAP.R | 921c66057db106feee30bb7ffe2842a939858a02 | [] | no_license | cran/BRugs | a5106711a3f8d3fa0adb91465df235e0f27a1b18 | acafa2035e6ef39e566085026eeabf67cd1361cd | refs/heads/master | 2023-05-27T11:23:45.986896 | 2023-05-15T05:52:29 | 2023-05-15T05:52:29 | 17,677,954 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 417 | r | model.setAP.R | "modelSetAP" <-
function(factoryName, adaptivePhase)
# Set the length of adaptive phase
{
name <- sQuote(factoryName)
command <- paste("UpdaterMethods.SetFactory(", name,
") ;UpdaterMethods.AdaptivePhaseGuard;",
"UpdaterMethods.SetAdaptivePhase(",
adaptivePhase,
")", sep = "")
.CmdInterpreter(command)
}
|
1f09b255d34ff785ca615e7e1072cdf012e51972 | 7988d042d1b710dbf2f37ed5a6627a6f6ecaf4e5 | /R/runCmd.R | dce6b61203ecbe98ed64f015f43a29dfc5426c59 | [] | no_license | anilchalisey/juggleR | c67231aa114c41a7d9d25fd344556d230c28746c | 253bc5907ea29e334005cc97fb5e21d7cec2ad97 | refs/heads/master | 2020-03-29T06:07:16.738113 | 2018-09-20T13:30:51 | 2018-09-20T13:30:51 | 149,610,513 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,752 | r | runCmd.R | #' Run command via Bash directly from R
#'
#' Allows users to run Bash commands directly from within R.
#' This will only work in WIndows if WSL has been set up.
#'
#' @param cmd the system command to be invoked, as a character string.
#' @param intern a logical (not NA) which indicates whether to capture the
#' output of the command as an R character vector.
#' @param wait a logical (not NA) indicating whether the R interpreter
#' should wait for the command to finish, or run it asynchronously. This
#' will be ignored (and the interpreter will always wait) if
#' intern = TRUE.
#'
#' @return
#' If intern = TRUE, a character vector giving the output of the command,
#' one line per character string. (Output lines of more than 8095 bytes
#' will be split). If the command could not be run an R error is generated.
#' Under the Rgui console intern = TRUE also captures stderr. If command runs
#' but gives a non-zero exit status this will be reported with a warning and
#' in the attribute "status" of the result: an attribute "errmsg" may also
#' be available.
#' If intern = FALSE, the return value is an error code (0 for success), given
#' the invisible attribute (so needs to be printed explicitly). If the command
#' could not be run for any reason, the value is 127. Otherwise if wait = TRUE
#' the value is the exit status returned by the command, and if wait = FALSE it
#' is 0 (the conventional success value).
#'
#' @export
#'
#' @examples
#' runCmd("ls")
#' runCmd("ls", intern = TRUE)
runCmd <- function(cmd, intern = FALSE, wait = TRUE) {
if (.Platform$OS.type != "windows") {
system(command = cmd, intern = intern)
} else {
shell(cmd = shQuote(cmd), shell = "bash", intern = intern)
}
}
|
92166af629735b0238bb3626a1572553d52fbd0d | 9c897a22a561ca7735825bcd8c2fcc3419399e2f | /app/R/messages.R | 6e17f7388b7aff4442d15f767eff01b3a3c3d867 | [
"Apache-2.0"
] | permissive | FujitsuLaboratories/COMEVIZZ | d2fafb57dd61e0e7562d10eff2dba86b7ddbe3ef | 1b81556d6c04c2cebe7613d16e17981f98d01a1f | refs/heads/master | 2021-09-13T07:09:34.649744 | 2018-04-26T11:08:49 | 2018-04-26T11:08:49 | 106,528,776 | 92 | 9 | Apache-2.0 | 2018-04-26T11:08:50 | 2017-10-11T08:45:46 | R | UTF-8 | R | false | false | 3,959 | r | messages.R | Messages <- setRefClass(
Class = "Messages",
fields = list(
lang = "character",
translation = "list"
),
methods = list(
initialize = function() {
lang <<- "en"
translation <<- list(
"title" = list(
"en" = "COMEVIZZ"
),
"projects.datasources.title" = list(
"en" = "Datasources"
),
"projects.datasources.metrics_data_file.label" = list(
"en" = "Metrics Datafile(.csv)"
),
"projects.datasources.calculate_density.label" = list(
"en" = "Normalized by Lines"
),
"projects.datasources.calculate_boxcox.label" = list(
"en" = "BoxCox Transform"
),
"projects.datasources.filter.title" = list(
"en" = "Filtering Projects"
),
"projects.datasources.filter.help" = list(
"en" = "You can filter projects for selections analyzation targets."
),
"projects.datasources.filter.population.label" = list(
"en" = "Population"
),
"projects.datasources.filter.target.label" = list(
"en" = "Target"
),
"projects.datasources.project_info.title" = list(
"en" = "Projects info"
),
"tab.metrics.display_name" = list(
"en" = "Metrics Stats"
),
"tab.metrics.select_metrics" = list(
"en" = "Select metrics item for analyzing"
),
"tab.metrics.select_metrics.help1" = list(
"en" = "Firstly please select datasource files from left pane."
),
"tab.metrics.select_metrics.label" = list(
"en" = "Select Metrics"
),
"tab.metrics.calculate_density.label" = list(
"en" = "Normalized by Lines"
),
"tab.metrics.main.title" = list(
"en" = "Statistics"
),
"tab.metrics.main.metrics.unselectable_metrics" = list(
"en" = "You cannot select non-numeric metrics"
),
"tab.metrics.main.metrics.no_data_error" = list(
"en" = "This Metrics has no data and cannot be displayed."
),
"tab.metrics.main.metrics.not_numeric_error" = list(
"en" = "This Metrics isn't numeric value, so can't be displayed."
),
"tab.metrics.main.statistics.average.label" = list(
"en" = "Average"
),
"tab.metrics.main.statistics.median.label" = list(
"en" = "Median"
),
"tab.metrics.main.statistics.stddev.label" = list(
"en" = "Standard Deviations"
),
"tab.metrics.main.statistics.row.label1" = list(
"en" = "Population"
),
"tab.metrics.main.statistics.row.label2" = list(
"en" = "Target"
),
"tab.metrics.main.target_project.title" = list(
"en" = "Target Project"
),
"tab.metrics.main.all_project.title" = list(
"en" = "All Project"
),
"tab.zscore.display_name" = list(
"en" = "Z-Score"
),
"tab.zscore.title" = list(
"en" = "Z-Score"
),
"tab.zscore.header.help1" = list(
"en" = "The number of metrics items must be 3 or more."
),
"tab.zscore.header.help2" = list(
"en" = "Please select metrics item for displaying z-score radarchart from right hand side."
),
"tab.zscore.right.help" = list(
"en" = "Select Metrics Sets"
),
"tab.zscore.right.select_metrics.label" = list(
"en" = "Select Metrics"
),
"tab.all_metrics.display_name" = list(
"en" = "All Metrics"
),
"tab.all_metrics.title" = list(
"en" = "All Metrics"
)
)
},
tr = function(text) {
sapply(text, function(s) translation[[s]][[lang]], USE.NAMES = FALSE)
},
set_lang = function(language = "en") {
lang <<- language
}
)
)
|
4a4eb45a632bf1ccb896fb567c2e21063f081129 | 5e14f843a1632ad9a1f39fae30b409944c56d931 | /WorkingCodes/generateClassifierAttributes.R | 6e15babeaa1b29d2948397eaa7112e7ce76a218a | [
"MIT"
] | permissive | sahas3/turbine-damage-probability | 1bd28acc0c3bba160786267a02cffdc47be084b6 | 911c0a2b215d29244a4379c536b5af8b53e72578 | refs/heads/master | 2021-01-11T12:26:27.875272 | 2016-12-16T16:03:55 | 2016-12-16T16:03:55 | 76,664,226 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 4,218 | r | generateClassifierAttributes.R | # generate all attributes for the turbines
generateClassifierAttributes <- function(StormData, # lightning data set : data-frame with columns
#
TurbineData, # turbine data set : data-frame with columns lot, lat, x, y
groundTruthDataClass = NULL, # ground truth data for Turbine Damage
# in absence of ground truth simulate the ground truth
peakAmpThresh = 20, # peak amplitude threshold for generating attributes
distThresh = 0.45, # distance threshold for generating attributes
peakAmpThreshGnd = runif(1, 30, 50), # peak amplitude threshold for simulating ground truth
distThreshGnd = runif(1, 0.3, 0.6)# distance threshold for simulating ground truth
)
{
# create variables for easy access to data
# numLightStrikes = length(StormData$x); # number of light strikes
lightStrikeCenters <- cbind(StormData$x, StormData$y) # cartesian co-ordinates
lightStrikeCentersLonLat <- cbind(StormData$lon, StormData$lat) # longitude-latitude coordinates
lightStrikeRadius <- cbind(StormData$majorRad, StormData$minorRad)
turbineCenters <- cbind(TurbineData$x, TurbineData$y)
turbineCentersLonLat <- cbind(TurbineData$lon, TurbineData$lat)
turbineRadius = rep(distThresh, 2) # defines attractive region of the turbine
if (is.null(groundTruthDataClass))
{
#### Simulate turbine damage if ground truth is not provided
turbineClass <- simulateTurbineDamage(lightStrikeCenters, lightStrikeCentersLonLat, lightStrikeRadius,
StormData$covMat, StormData$angle, StormData$peakAmp,
turbineCenters, turbineCentersLonLat,
peakAmpThreshGnd, distThreshGnd,
plotFlag = F)
} else {turbineClass <- groundTruthDataClass}
#### Generate attribute values based on overlap of circle around turbines and light strike ellipses
overlapAreaData <- turbineStrikeOverLapAreaAttributes(lightStrikeCenters,
lightStrikeCentersLonLat,
lightStrikeRadius,
StormData$area,
StormData$angle,
StormData$peakAmp,
turbineCenters,
turbineCentersLonLat,
turbineRadius,
turbineClass,
peakAmpThresh)
#### Generate attribute values based on Bivariate Gaussian Probability Distribution
probabilityData <- turbineStrikeProbabilityAttributes(lightStrikeCenters,
lightStrikeRadius,
StormData$angle,
StormData$peakAmp,
StormData$covMat,
turbineCenters,
turbineCentersLonLat,
turbineRadius,
turbineClass,
peakAmpThresh,
plotHeatMapFlag = F)
# combine the attributes
turbineDamageData <- cbind(data.frame(overlapAreaData), data.frame(probabilityData), turbineClass)
return(turbineDamageData)
}
|
c4b519c55a59706e5beb808509a32ddc9d10202e | b6bc8e750b13025ada2dc74c1121df4f7b39a20e | /R/Brazil.R | ba47b4627a97e2874e9fe089955bdb9787daeb52 | [
"MIT"
] | permissive | epiforecasts/covidregionaldata | 7f299a792fb8298c4dd14c9aaa9ad0593f1f02a8 | bc7bc24761ccc8acbfcd9380553696eaf5ce524e | refs/heads/master | 2023-04-19T05:33:04.357360 | 2022-05-25T14:37:00 | 2022-06-20T09:56:40 | 271,601,189 | 36 | 26 | NOASSERTION | 2022-06-20T09:56:42 | 2020-06-11T16:58:38 | R | UTF-8 | R | false | false | 4,894 | r | Brazil.R | #' Brazil Class for downloading, cleaning and processing notification data
#' @description Information for downloading, cleaning
#' and processing COVID-19 region data for Brazil.
#'
#'
#' Data available on Github, curated by Wesley Cota:
#' DOI 10.1590/SciELOPreprints.362
#'
#' @source \url{https://github.com/wcota/covid19br}
#' @concept dataset
#' @family subnational
#' @export
#' @examples
#' \dontrun{
#' region <- Brazil$new(verbose = TRUE, steps = TRUE, get = TRUE)
#' region$return()
#' }
Brazil <- R6::R6Class("Brazil",
inherit = DataClass,
public = list(
# Core Attributes
#' @field origin name of origin to fetch data for
origin = "Brazil",
#' @field supported_levels A list of supported levels.
supported_levels = list("1", "2"),
#' @field supported_region_names A list of region names in order of level.
supported_region_names = list(
"1" = "state",
"2" = "city"
),
#' @field supported_region_codes A list of region codes in order of level.
supported_region_codes = list(
"1" = "iso_3166_2"
),
#' @field common_data_urls List of named links to raw data. Data is
#' available at the city level and is aggregated to provide state data.
common_data_urls = list(
"main" = "https://github.com/wcota/covid19br/raw/master/cases-brazil-cities-time.csv.gz" # nolint
),
#' @field source_data_cols existing columns within the raw data
source_data_cols = c("cases_total", "deaths_total"),
#' @field source_text Plain text description of the source of the data
source_text = "Wesley Cota",
#' @field source_url Website address for explanation/introduction of the
#' data
source_url = "https://github.com/wcota/covid19br/blob/master/README.en.md",
#' @description Set up a table of region codes for clean data
#' @importFrom dplyr tibble
set_region_codes = function() {
self$codes_lookup <- tibble(
state_name = c(
"Acre", "Amap\u00e1", "Amazonas", "Par\u00e1", "Rond\u00f4nia",
"Roraima", "Tocantins", "Alagoas", "Bahia", "Cear\u00e1",
"Maranh\u00e3o", "Para\u00edba", "Pernambuco", "Piau\u00ed",
"Rio Grande do Norte", "Sergipe", "Espirito Santo", "Minas Gerais",
"Rio de Janeiro", "S\u00e3o Paulo", "Paran\u00e1",
"Rio Grande do Sul", "Santa Catarina", "Distrito Federal",
"Goi\u00e1s", "Mato Grosso", "Mato Grosso do Sul"
),
level_1_region_code = c(
"AC", "AP", "AM", "PA", "RO", "RR", "TO", "AL", "BA", "CE",
"MA", "PB", "PE", "PI", "RN", "SE", "ES", "MG", "RJ", "SP",
"PR", "RS", "SC", "DF", "GO", "MT", "MS"
)
)
},
#' @description Common data cleaning for both levels
#' @importFrom dplyr mutate filter select left_join group_by summarise
#' @importFrom lubridate ymd
clean_common = function() {
self$data$clean <- self$data$raw$main %>%
mutate(date = ymd(date)) %>%
filter(state != "TOTAL") %>%
left_join(self$codes_lookup,
by = c("state" = "level_1_region_code")
) %>%
mutate(state = gsub("^", "BR-", state)) %>%
select(date,
level_1_region = state_name,
level_2_region = city,
level_1_region_code = state,
cases_new = newCases,
cases_total = totalCases,
deaths_new = newDeaths,
deaths_total = deaths
)
},
#' @description State Level Data Cleaning
#' @importFrom dplyr select left_join group_by summarise ungroup
clean_level_1 = function() {
self$data$clean <- self$data$clean %>%
select(-level_2_region) %>%
group_by(date, level_1_region, level_1_region_code) %>%
summarise(
cases_new = sum(as.numeric(cases_new)),
cases_total = sum(as.numeric(cases_total)),
deaths_new = sum(as.numeric(deaths_new)),
deaths_total = sum(as.numeric(deaths_total)),
.groups = "drop_last"
) %>%
ungroup()
},
#' @description City Level Data Cleaning
# nolint start
#' @importFrom dplyr mutate select left_join group_by summarise recode ungroup
# nolint end
clean_level_2 = function() {
self$data$clean <- self$data$clean %>%
mutate(level_2_region = gsub("/[A-Z]*", "", level_2_region)) %>%
mutate(level_2_region = gsub(
"^CASO SEM.*DEFINIDA",
"Unknown City",
level_2_region
)) %>%
group_by(date, level_1_region, level_1_region_code, level_2_region) %>%
summarise(
cases_new = sum(as.numeric(cases_new)),
cases_total = sum(as.numeric(cases_total)),
deaths_new = sum(as.numeric(deaths_new)),
deaths_total = sum(as.numeric(deaths_total)),
.groups = "drop_last"
) %>%
ungroup()
}
)
)
|
8a28f51e5ff8a80a2ca9b26c075f05764a972c59 | 4add474db49f93935f1d0e22ca4158345c172f5c | /R/pcaMatrix.R | 8030ab79f2a5d64ec16ac73401c9367b3f9f2236 | [] | no_license | joeburns06/hocuspocus | f0b46d7845d1dbf1636a0279fc6c382cb2d68e02 | 67ab848e93fbafdadb3759bf64f18e4ac4717bfc | refs/heads/master | 2021-01-18T23:50:16.776976 | 2016-06-22T12:07:36 | 2016-06-22T12:07:36 | 45,411,569 | 3 | 4 | null | 2016-06-22T12:04:22 | 2015-11-02T17:51:24 | R | UTF-8 | R | false | false | 33,645 | r | pcaMatrix.R | #' Generate a series of plots comparing the first 10 PCs of a PCA analysis.
#'
#' Takes ExpressionSet object, performs PCA on the transposed expression matrix,
#' then plots the projections of the sample scores on the first 10 PCs.
#'
#' @param cellData ExpressionSet object created with readCells (and preferably
#' transformed with prepCells). It is also helpful to first run
#' reduceGenes_var.
#' @param scree Boolean specifying whether to generate a scree plot showing the
#' amount of variance contributed by each PC.
#' @param center Boolean specifying whether to the center the data prior to PCA.
#' This is generally recommended.
#' @param scale Boolean specifying whether the data should be scaled prior to
#' PCA. This is generally not recommended unless samples have different units
#' (e.g. some samples are counts and some are TPMs).
#' @param groups Character string specifying the title of the column in pData
#' that contains the names of the groups to which each sample belongs. The
#' dots representing each sample in the plots will be colored by group. The
#' column length should be the same length as the number of samples.
#' @param values Character string specifying the title of the column in pData
#' that contains a vector of numeric values to be plotted as a color gradient
#' on the dots in the plots. The column length should be the same length as
#' the number of samples. Gene and values cannot be specified simultaneously.
#' If groups and values are both specified, gene will be used for coloring.
#' Set bubble to TRUE to display group information as well.
#' @param gene Character string specifying a gene whose expression values will
#' be plotted a color gradient on the dots in the plots. One gene name should
#' be specified, and the gene must be present within the expression table.
#' Gene and values cannot be specified simultaneously. If groups and gene are
#' both specified, gene will be used for coloring. Set bubble to TRUE to
#' display group information as well.
#' @param colors Vector of character strings of length 2 specifying the color range for values or gene. If
#' not specified, a default black-to-yellow color gradient will be used.
#' @param logNumeric Boolean specifying whether the numbers in values should be
#' transformed to log2 space.
#' @param refPC Character string specifying the PC to be used as the reference
#' PC that is plotted on the x-axis of every plot.
#' @param alpha Numeric specifying the transparency (from 0 to 1) level of the
#' dot colors.
#' @param dotsize Numeric specifying the size of the dots in the plots.
#' @param bubble Boolean specifying whether dots of different sizes should be
#' plotted to display information about another variable on the plot.
#' @param bubbleSizes Vector of numerics specifying the dot sizes for each group
#' within groups. Length of bubbleSizes should be the same length as groups.
#' If bubbleSizes is not specified, default sizes will be generated according
#' to the levels in the groups column. Useful for displaying group
#' information along with values or gene. Additionally, can be used to
#' indicate additional information about groups. E.g. if the levels in groups
#' are 'A1', 'B1', 'C2', and 'D2', bubbleSizes could be c(3,3,6,6) to indicate
#' the '1' and '2' components of the groups with different dot sizes.
#' @param print Boolean specifying whether the genes with the most positive and
#' negative loadings on each of the 10 PCs should be printed in the terminal
#' window.
#' @param printNum Integer specifying the number of genes from each PC to print.
#' @param save Boolean specifying whether to save the resultant plots as a .tiff
#' file.
#' @return Matrix of PCA plots for the first 10 PCs.
#' @export
pcaMatrix <- function(cellData, scree = FALSE, center = TRUE, scale = FALSE, ICA=FALSE, groups, values, gene, colors, logNumeric = TRUE, refPC = "PC1", alpha = 0.7, dotsize = 2,
bubble = FALSE, bubbleSizes, print = FALSE, printNum = 50, save = FALSE) {
if (.Platform$OS.type == "windows") {
quartz <- function() windows()
}
if (cellData@logData$prepCells[1] == "No") {
warning("It would be wise to run prepCells prior to pcaMatrix.", call. = FALSE)
}
if (!missing(groups) && !all((groups %in% colnames(pData(cellData))))) {
stop("The column name specified for groups is not found in phenoData. If absent, add a column to phenoData containing the group for each cell, even if it only consists of 1 group.",
call. = FALSE)
}
if (!missing(values) && !all((values %in% colnames(pData(cellData))))) {
stop("The column name specified for values is not found in phenoData.",
call. = FALSE)
}
if (!missing(groups) && !all((names(cData(cellData)) %in% colnames(pData(cellData))))) {
stop("Some or all of the column names in colorData do not have matching column names in phenoData.",
call. = FALSE)
}
PCA.allgenes <- prcomp(t(exprs(cellData)), center = center, scale. = scale, save = FALSE)
if (scree == TRUE) {
quartz()
screeplot(PCA.allgenes, type = "lines", main = "Scree Plot")
if (save == TRUE) {
quartz(type = "pdf", file = "Scree Plot.pdf")
screeplot(PCA.allgenes, type = "lines")
dev.off
}
}
if (ICA==FALSE){
comp2 <- data.frame(PCA.allgenes$x[, 1:10])
comp.cell.type2 <- comp2
}
if (ICA==TRUE){
ica1 <- fastICA::fastICA(t(exprs_table), 10)
comp2 <- data.frame(ica1$S[, 1:10])
comp2["y"] <- seq(0, 0, length.out = length(samples[, groups]))
comp.cell.type2 <- comp2
}
if (missing(groups)) {
cell_colors <- "blue"
comp.cell.type2["Cell_Type"] <- rep("All", nrow(pData(cellData)))
}
if (!missing(groups)) {
cell_colors <- data.frame(cellData@colorData[[groups]], stringsAsFactors = FALSE)
cell_colors <- as.character(cell_colors[!apply(is.na(cell_colors) | cell_colors == "", 1, all), ])
groupz <- data.frame(pData(cellData)[, groups], stringsAsFactors = FALSE)
comp.cell.type2["Cell_Type"] <- factor(groupz[, colnames(groupz)], levels = unique(groupz[, colnames(groupz)]), ordered = FALSE)
}
if (bubble == TRUE) {
if (missing(bubbleSizes)) {
sizes <- 1:length(levels(comp.cell.type2$Cell_Type))
}
if (!missing(bubbleSizes)) {
sizes <- bubbleSizes
}
}
if (bubble == FALSE) {
sizes <- vector(length = length(cell_colors))
sizes[1:length(cell_colors)] <- dotsize
}
if (missing(values) && missing(gene)) {
if (bubble == TRUE) {
warning("Bubble sizes currently reflect the number of levels in groups and are not adding additional information to the plot.", call. = FALSE)
}
g1 <- ggplot(comp.cell.type2) + geom_point(aes_string(x = refPC, y = "PC1", fill = "factor(Cell_Type,levels=unique(Cell_Type),ordered=FALSE)", size = "factor(Cell_Type,levels=unique(Cell_Type),ordered=FALSE)"),
shape = 21, color = "black", alpha = alpha) + scale_fill_manual(name = "Cell Type", values = cell_colors) + scale_size_manual(name = "Cell Type",
values = sizes) + guides(fill = guide_legend("Cell Type"), color = guide_legend("Cell Type"), size = guide_legend("Cell Type")) + theme(legend.key.size = grid::unit(0.35,
"cm"), legend.text = element_text(size = 8))
g2 <- ggplot(comp.cell.type2) + geom_point(aes_string(x = refPC, y = "PC2", fill = "factor(Cell_Type,levels=unique(Cell_Type),ordered=FALSE)", size = "factor(Cell_Type,levels=unique(Cell_Type),ordered=FALSE)"),
shape = 21, color = "black", alpha = alpha) + scale_fill_manual(name = "Cell Type", values = cell_colors) + scale_size_manual(name = "Cell Type",
values = sizes) + guides(fill = guide_legend("Cell Type"), color = guide_legend("Cell Type"), size = guide_legend("Cell Type")) + theme(legend.key.size = grid::unit(0.35,
"cm"), legend.text = element_text(size = 8))
g3 <- ggplot(comp.cell.type2) + geom_point(aes_string(x = refPC, y = "PC3", fill = "factor(Cell_Type,levels=unique(Cell_Type),ordered=FALSE)", size = "factor(Cell_Type,levels=unique(Cell_Type),ordered=FALSE)"),
shape = 21, color = "black", alpha = alpha) + scale_fill_manual(name = "Cell Type", values = cell_colors) + scale_size_manual(name = "Cell Type",
values = sizes) + guides(fill = guide_legend("Cell Type"), color = guide_legend("Cell Type"), size = guide_legend("Cell Type")) + theme(legend.key.size = grid::unit(0.35,
"cm"), legend.text = element_text(size = 8))
g4 <- ggplot(comp.cell.type2) + geom_point(aes_string(x = refPC, y = "PC4", fill = "factor(Cell_Type,levels=unique(Cell_Type),ordered=FALSE)", size = "factor(Cell_Type,levels=unique(Cell_Type),ordered=FALSE)"),
shape = 21, color = "black", alpha = alpha) + scale_fill_manual(name = "Cell Type", values = cell_colors) + scale_size_manual(name = "Cell Type",
values = sizes) + guides(fill = guide_legend("Cell Type"), color = guide_legend("Cell Type"), size = guide_legend("Cell Type")) + theme(legend.key.size = grid::unit(0.35,
"cm"), legend.text = element_text(size = 8))
g5 <- ggplot(comp.cell.type2) + geom_point(aes_string(x = refPC, y = "PC5", fill = "factor(Cell_Type,levels=unique(Cell_Type),ordered=FALSE)", size = "factor(Cell_Type,levels=unique(Cell_Type),ordered=FALSE)"),
shape = 21, color = "black", alpha = alpha) + scale_fill_manual(name = "Cell Type", values = cell_colors) + scale_size_manual(name = "Cell Type",
values = sizes) + guides(fill = guide_legend("Cell Type"), color = guide_legend("Cell Type"), size = guide_legend("Cell Type")) + theme(legend.key.size = grid::unit(0.35,
"cm"), legend.text = element_text(size = 8))
g6 <- ggplot(comp.cell.type2) + geom_point(aes_string(x = refPC, y = "PC6", fill = "factor(Cell_Type,levels=unique(Cell_Type),ordered=FALSE)", size = "factor(Cell_Type,levels=unique(Cell_Type),ordered=FALSE)"),
shape = 21, color = "black", alpha = alpha) + scale_fill_manual(name = "Cell Type", values = cell_colors) + scale_size_manual(name = "Cell Type",
values = sizes) + guides(fill = guide_legend("Cell Type"), color = guide_legend("Cell Type"), size = guide_legend("Cell Type")) + theme(legend.key.size = grid::unit(0.35,
"cm"), legend.text = element_text(size = 8))
g7 <- ggplot(comp.cell.type2) + geom_point(aes_string(x = refPC, y = "PC7", fill = "factor(Cell_Type,levels=unique(Cell_Type),ordered=FALSE)", size = "factor(Cell_Type,levels=unique(Cell_Type),ordered=FALSE)"),
shape = 21, color = "black", alpha = alpha) + scale_fill_manual(name = "Cell Type", values = cell_colors) + scale_size_manual(name = "Cell Type",
values = sizes) + guides(fill = guide_legend("Cell Type"), color = guide_legend("Cell Type"), size = guide_legend("Cell Type")) + theme(legend.key.size = grid::unit(0.35,
"cm"), legend.text = element_text(size = 8))
g8 <- ggplot(comp.cell.type2) + geom_point(aes_string(x = refPC, y = "PC8", fill = "factor(Cell_Type,levels=unique(Cell_Type),ordered=FALSE)", size = "factor(Cell_Type,levels=unique(Cell_Type),ordered=FALSE)"),
shape = 21, color = "black", alpha = alpha) + scale_fill_manual(name = "Cell Type", values = cell_colors) + scale_size_manual(name = "Cell Type",
values = sizes) + guides(fill = guide_legend("Cell Type"), color = guide_legend("Cell Type"), size = guide_legend("Cell Type")) + theme(legend.key.size = grid::unit(0.35,
"cm"), legend.text = element_text(size = 8))
g9 <- ggplot(comp.cell.type2) + geom_point(aes_string(x = refPC, y = "PC9", fill = "factor(Cell_Type,levels=unique(Cell_Type),ordered=FALSE)", size = "factor(Cell_Type,levels=unique(Cell_Type),ordered=FALSE)"),
shape = 21, color = "black", alpha = alpha) + scale_fill_manual(name = "Cell Type", values = cell_colors) + scale_size_manual(name = "Cell Type",
values = sizes) + guides(fill = guide_legend("Cell Type"), color = guide_legend("Cell Type"), size = guide_legend("Cell Type")) + theme(legend.key.size = grid::unit(0.35,
"cm"), legend.text = element_text(size = 8))
if (save == TRUE) {
PCA_grob <- arrangeGrob(g1, g2, g3, g4, g5, g6, g7, g8, g9, ncol = 3)
ggsave(PCA_grob, file = paste("PCA Matrix.tiff"))
}
quartz()
PCA_grid <- grid.arrange(g1, g2, g3, g4, g5, g6, g7, g8, g9, ncol = 3)
}
if (!missing(values)) {
if (!missing(gene)) {
stop("Cannot specify both values and gene.", call. = FALSE)
}
if (!missing(colors)){
cell_colors <- colors
}
if (length(cell_colors) != 2) {
warning("Specify a vector of two colors to control color, using default colors instead", call. = FALSE)
cell_colors[1:2] <- c("black", "yellow")
}
valuez <- data.frame(pData(cellData)[, values], stringsAsFactors = FALSE)
if (logNumeric == TRUE) {
valuez <- log2(valuez)
}
comp.cell.type2["Values"] <- valuez
if (bubble == TRUE) {
g1 <- ggplot(comp.cell.type2) + geom_point(aes_string(x = refPC, y = "PC1", fill = "Values", size = "factor(Cell_Type,levels=unique(Cell_Type),ordered=FALSE)"),
shape = 21, color = "black", alpha = alpha) + scale_size_manual(name = "Groups", values = sizes) + guides(color = guide_legend("Values"), size = guide_legend("Groups")) +
scale_fill_gradient(low = cell_colors[1], high = cell_colors[2]) + theme(legend.key.size = grid::unit(0.35, "cm"), legend.text = element_text(size = 8))
g2 <- ggplot(comp.cell.type2) + geom_point(aes_string(x = refPC, y = "PC2", fill = "Values", size = "factor(Cell_Type,levels=unique(Cell_Type),ordered=FALSE)"),
shape = 21, color = "black", alpha = alpha) + scale_size_manual(name = "Groups", values = sizes) + guides(color = guide_legend("Values"), size = guide_legend("Groups")) +
scale_fill_gradient(low = cell_colors[1], high = cell_colors[2]) + theme(legend.key.size = grid::unit(0.35, "cm"), legend.text = element_text(size = 8))
g3 <- ggplot(comp.cell.type2) + geom_point(aes_string(x = refPC, y = "PC3", fill = "Values", size = "factor(Cell_Type,levels=unique(Cell_Type),ordered=FALSE)"),
shape = 21, color = "black", alpha = alpha) + scale_size_manual(name = "Groups", values = sizes) + guides(color = guide_legend("Values"), size = guide_legend("Groups")) +
scale_fill_gradient(low = cell_colors[1], high = cell_colors[2]) + theme(legend.key.size = grid::unit(0.35, "cm"), legend.text = element_text(size = 8))
g4 <- ggplot(comp.cell.type2) + geom_point(aes_string(x = refPC, y = "PC4", fill = "Values", size = "factor(Cell_Type,levels=unique(Cell_Type),ordered=FALSE)"),
shape = 21, color = "black", alpha = alpha) + scale_size_manual(name = "Groups", values = sizes) + guides(color = guide_legend("Values"), size = guide_legend("Groups")) +
scale_fill_gradient(low = cell_colors[1], high = cell_colors[2]) + theme(legend.key.size = grid::unit(0.35, "cm"), legend.text = element_text(size = 8))
g5 <- ggplot(comp.cell.type2) + geom_point(aes_string(x = refPC, y = "PC5", fill = "Values", size = "factor(Cell_Type,levels=unique(Cell_Type),ordered=FALSE)"),
shape = 21, color = "black", alpha = alpha) + scale_size_manual(name = "Groups", values = sizes) + guides(color = guide_legend("Values"), size = guide_legend("Groups")) +
scale_fill_gradient(low = cell_colors[1], high = cell_colors[2]) + theme(legend.key.size = grid::unit(0.35, "cm"), legend.text = element_text(size = 8))
g6 <- ggplot(comp.cell.type2) + geom_point(aes_string(x = refPC, y = "PC6", fill = "Values", size = "factor(Cell_Type,levels=unique(Cell_Type),ordered=FALSE)"),
shape = 21, color = "black", alpha = alpha) + scale_size_manual(name = "Groups", values = sizes) + guides(color = guide_legend("Values"), size = guide_legend("Groups")) +
scale_fill_gradient(low = cell_colors[1], high = cell_colors[2]) + theme(legend.key.size = grid::unit(0.35, "cm"), legend.text = element_text(size = 8))
g7 <- ggplot(comp.cell.type2) + geom_point(aes_string(x = refPC, y = "PC7", fill = "Values", size = "factor(Cell_Type,levels=unique(Cell_Type),ordered=FALSE)"),
shape = 21, color = "black", alpha = alpha) + scale_size_manual(name = "Groups", values = sizes) + guides(color = guide_legend("Values"), size = guide_legend("Groups")) +
scale_fill_gradient(low = cell_colors[1], high = cell_colors[2]) + theme(legend.key.size = grid::unit(0.35, "cm"), legend.text = element_text(size = 8))
g8 <- ggplot(comp.cell.type2) + geom_point(aes_string(x = refPC, y = "PC8", fill = "Values", size = "factor(Cell_Type,levels=unique(Cell_Type),ordered=FALSE)"),
shape = 21, color = "black", alpha = alpha) + scale_size_manual(name = "Groups", values = sizes) + guides(color = guide_legend("Values"), size = guide_legend("Groups")) +
scale_fill_gradient(low = cell_colors[1], high = cell_colors[2]) + theme(legend.key.size = grid::unit(0.35, "cm"), legend.text = element_text(size = 8))
g9 <- ggplot(comp.cell.type2) + geom_point(aes_string(x = refPC, y = "PC9", fill = "Values", size = "factor(Cell_Type,levels=unique(Cell_Type),ordered=FALSE)"),
shape = 21, color = "black", alpha = alpha) + scale_size_manual(name = "Groups", values = sizes) + guides(color = guide_legend("Values"), size = guide_legend("Groups")) +
scale_fill_gradient(low = cell_colors[1], high = cell_colors[2]) + theme(legend.key.size = grid::unit(0.35, "cm"), legend.text = element_text(size = 8))
if (save == TRUE) {
PCA_grob <- arrangeGrob(g1, g2, g3, g4, g5, g6, g7, g8, g9, ncol = 3)
ggsave(PCA_grob, file = paste("PCA Matrix.tiff"))
}
quartz()
PCA_grid <- grid.arrange(g1, g2, g3, g4, g5, g6, g7, g8, g9, ncol = 3)
}
if (bubble == FALSE) {
g1 <- ggplot(comp.cell.type2) + geom_point(aes_string(x = refPC, y = "PC1", fill = "Values"), size = dotsize, shape = 21, color = "black", alpha = alpha) +
guides(color = guide_legend(title = "Values")) + scale_fill_gradient(low = cell_colors[1], high = cell_colors[2]) + theme(legend.key.size = grid::unit(0.35,
"cm"), legend.text = element_text(size = 8))
g2 <- ggplot(comp.cell.type2) + geom_point(aes_string(x = refPC, y = "PC2", fill = "Values"), size = dotsize, shape = 21, color = "black", alpha = alpha) +
guides(color = guide_legend(title = "Values")) + scale_fill_gradient(low = cell_colors[1], high = cell_colors[2]) + theme(legend.key.size = grid::unit(0.35,
"cm"), legend.text = element_text(size = 8))
g3 <- ggplot(comp.cell.type2) + geom_point(aes_string(x = refPC, y = "PC3", fill = "Values"), size = dotsize, shape = 21, color = "black", alpha = alpha) +
guides(color = guide_legend(title = "Values")) + scale_fill_gradient(low = cell_colors[1], high = cell_colors[2]) + theme(legend.key.size = grid::unit(0.35,
"cm"), legend.text = element_text(size = 8))
g4 <- ggplot(comp.cell.type2) + geom_point(aes_string(x = refPC, y = "PC4", fill = "Values"), size = dotsize, shape = 21, color = "black", alpha = alpha) +
guides(color = guide_legend(title = "Values")) + scale_fill_gradient(low = cell_colors[1], high = cell_colors[2]) + theme(legend.key.size = grid::unit(0.35,
"cm"), legend.text = element_text(size = 8))
g5 <- ggplot(comp.cell.type2) + geom_point(aes_string(x = refPC, y = "PC5", fill = "Values"), size = dotsize, shape = 21, color = "black", alpha = alpha) +
guides(color = guide_legend(title = "Values")) + scale_fill_gradient(low = cell_colors[1], high = cell_colors[2]) + theme(legend.key.size = grid::unit(0.35,
"cm"), legend.text = element_text(size = 8))
g6 <- ggplot(comp.cell.type2) + geom_point(aes_string(x = refPC, y = "PC6", fill = "Values"), size = dotsize, shape = 21, color = "black", alpha = alpha) +
guides(color = guide_legend(title = "Values")) + scale_fill_gradient(low = cell_colors[1], high = cell_colors[2]) + theme(legend.key.size = grid::unit(0.35,
"cm"), legend.text = element_text(size = 8))
g7 <- ggplot(comp.cell.type2) + geom_point(aes_string(x = refPC, y = "PC7", fill = "Values"), size = dotsize, shape = 21, color = "black", alpha = alpha) +
guides(color = guide_legend(title = "Values")) + scale_fill_gradient(low = cell_colors[1], high = cell_colors[2]) + theme(legend.key.size = grid::unit(0.35,
"cm"), legend.text = element_text(size = 8))
g8 <- ggplot(comp.cell.type2) + geom_point(aes_string(x = refPC, y = "PC8", fill = "Values"), size = dotsize, shape = 21, color = "black", alpha = alpha) +
guides(color = guide_legend(title = "Values")) + scale_fill_gradient(low = cell_colors[1], high = cell_colors[2]) + theme(legend.key.size = grid::unit(0.35,
"cm"), legend.text = element_text(size = 8))
g9 <- ggplot(comp.cell.type2) + geom_point(aes_string(x = refPC, y = "PC9", fill = "Values"), size = dotsize, shape = 21, color = "black", alpha = alpha) +
guides(color = guide_legend(title = "Values")) + scale_fill_gradient(low = cell_colors[1], high = cell_colors[2]) + theme(legend.key.size = grid::unit(0.35,
"cm"), legend.text = element_text(size = 8))
if (save == TRUE) {
PCA_grob <- arrangeGrob(g1, g2, g3, g4, g5, g6, g7, g8, g9, ncol = 3)
ggsave(PCA_grob, file = paste("PCA Matrix.tiff"))
}
quartz()
PCA_grid <- grid.arrange(g1, g2, g3, g4, g5, g6, g7, g8, g9, ncol = 3)
}
}
if (!missing(gene)) {
if (!missing(values)) {
stop("Cannot specify both values and gene.", call. = FALSE)
}
if (!gene %in% row.names(exprs(cellData))) {
stop("Specified gene is not present in expression table", call. = FALSE)
}
if (!missing(colors)){
cell_colors <- colors
}
if (length(cell_colors) != 2) {
warning("Specify a vector of two colors to control color, using default colors instead", call. = FALSE)
cell_colors[1:2] <- c("black", "yellow")
}
genez <- data.frame(exprs(cellData)[gene, ], stringsAsFactors = FALSE)[, 1]
comp.cell.type2[gene] <- genez
if (bubble == TRUE) {
g1 <- ggplot(comp.cell.type2) + geom_point(aes_string(x = refPC, y = "PC1", fill = gene, size = "factor(Cell_Type,levels=unique(Cell_Type),ordered=FALSE)"),
shape = 21, color = "black", alpha = alpha) + scale_size_manual(name = "Groups", values = sizes) + guides(color = guide_legend(title = gene),
size = guide_legend("Groups")) + scale_fill_gradient(low = cell_colors[1], high = cell_colors[2]) + theme(legend.key.size = grid::unit(0.35,
"cm"), legend.text = element_text(size = 8))
g2 <- ggplot(comp.cell.type2) + geom_point(aes_string(x = refPC, y = "PC2", fill = gene, size = "factor(Cell_Type,levels=unique(Cell_Type),ordered=FALSE)"),
shape = 21, color = "black", alpha = alpha) + scale_size_manual(name = "Groups", values = sizes) + guides(color = guide_legend(title = gene),
size = guide_legend("Groups")) + scale_fill_gradient(low = cell_colors[1], high = cell_colors[2]) + theme(legend.key.size = grid::unit(0.35,
"cm"), legend.text = element_text(size = 8))
g3 <- ggplot(comp.cell.type2) + geom_point(aes_string(x = refPC, y = "PC3", fill = gene, size = "factor(Cell_Type,levels=unique(Cell_Type),ordered=FALSE)"),
shape = 21, color = "black", alpha = alpha) + scale_size_manual(name = "Groups", values = sizes) + guides(color = guide_legend(title = gene),
size = guide_legend("Groups")) + scale_fill_gradient(low = cell_colors[1], high = cell_colors[2]) + theme(legend.key.size = grid::unit(0.35,
"cm"), legend.text = element_text(size = 8))
g4 <- ggplot(comp.cell.type2) + geom_point(aes_string(x = refPC, y = "PC4", fill = gene, size = "factor(Cell_Type,levels=unique(Cell_Type),ordered=FALSE)"),
shape = 21, color = "black", alpha = alpha) + scale_size_manual(name = "Groups", values = sizes) + guides(color = guide_legend(title = gene),
size = guide_legend("Groups")) + scale_fill_gradient(low = cell_colors[1], high = cell_colors[2]) + theme(legend.key.size = grid::unit(0.35,
"cm"), legend.text = element_text(size = 8))
g5 <- ggplot(comp.cell.type2) + geom_point(aes_string(x = refPC, y = "PC5", fill = gene, size = "factor(Cell_Type,levels=unique(Cell_Type),ordered=FALSE)"),
shape = 21, color = "black", alpha = alpha) + scale_size_manual(name = "Groups", values = sizes) + guides(color = guide_legend(title = gene),
size = guide_legend("Groups")) + scale_fill_gradient(low = cell_colors[1], high = cell_colors[2]) + theme(legend.key.size = grid::unit(0.35,
"cm"), legend.text = element_text(size = 8))
g6 <- ggplot(comp.cell.type2) + geom_point(aes_string(x = refPC, y = "PC6", fill = gene, size = "factor(Cell_Type,levels=unique(Cell_Type),ordered=FALSE)"),
shape = 21, color = "black", alpha = alpha) + scale_size_manual(name = "Groups", values = sizes) + guides(color = guide_legend(title = gene),
size = guide_legend("Groups")) + scale_fill_gradient(low = cell_colors[1], high = cell_colors[2]) + theme(legend.key.size = grid::unit(0.35,
"cm"), legend.text = element_text(size = 8))
g7 <- ggplot(comp.cell.type2) + geom_point(aes_string(x = refPC, y = "PC7", fill = gene, size = "factor(Cell_Type,levels=unique(Cell_Type),ordered=FALSE)"),
shape = 21, color = "black", alpha = alpha) + scale_size_manual(name = "Groups", values = sizes) + guides(color = guide_legend(title = gene),
size = guide_legend("Groups")) + scale_fill_gradient(low = cell_colors[1], high = cell_colors[2]) + theme(legend.key.size = grid::unit(0.35,
"cm"), legend.text = element_text(size = 8))
g8 <- ggplot(comp.cell.type2) + geom_point(aes_string(x = refPC, y = "PC8", fill = gene, size = "factor(Cell_Type,levels=unique(Cell_Type),ordered=FALSE)"),
shape = 21, color = "black", alpha = alpha) + scale_size_manual(name = "Groups", values = sizes) + guides(color = guide_legend(title = gene),
size = guide_legend("Groups")) + scale_fill_gradient(low = cell_colors[1], high = cell_colors[2]) + theme(legend.key.size = grid::unit(0.35,
"cm"), legend.text = element_text(size = 8))
g9 <- ggplot(comp.cell.type2) + geom_point(aes_string(x = refPC, y = "PC9", fill = gene, size = "factor(Cell_Type,levels=unique(Cell_Type),ordered=FALSE)"),
shape = 21, color = "black", alpha = alpha) + scale_size_manual(name = "Groups", values = sizes) + guides(color = guide_legend(title = gene),
size = guide_legend("Groups")) + scale_fill_gradient(low = cell_colors[1], high = cell_colors[2]) + theme(legend.key.size = grid::unit(0.35,
"cm"), legend.text = element_text(size = 8))
if (save == TRUE) {
PCA_grob <- arrangeGrob(g1, g2, g3, g4, g5, g6, g7, g8, g9, ncol = 3)
ggsave(PCA_grob, file = paste("PCA Matrix.tiff"))
}
quartz()
PCA_grid <- grid.arrange(g1, g2, g3, g4, g5, g6, g7, g8, g9, ncol = 3)
}
if (bubble == FALSE) {
g1 <- ggplot(comp.cell.type2) + geom_point(aes_string(x = refPC, y = "PC1", fill = gene), size = dotsize, shape = 21, color = "black", alpha = alpha) +
guides(color = guide_legend(title = gene)) + scale_fill_gradient(low = cell_colors[1], high = cell_colors[2]) + theme(legend.key.size = grid::unit(0.35,
"cm"), legend.text = element_text(size = 8))
g2 <- ggplot(comp.cell.type2) + geom_point(aes_string(x = refPC, y = "PC2", fill = gene), size = dotsize, shape = 21, color = "black", alpha = alpha) +
guides(color = guide_legend(title = gene)) + scale_fill_gradient(low = cell_colors[1], high = cell_colors[2]) + theme(legend.key.size = grid::unit(0.35,
"cm"), legend.text = element_text(size = 8))
g3 <- ggplot(comp.cell.type2) + geom_point(aes_string(x = refPC, y = "PC3", fill = gene), size = dotsize, shape = 21, color = "black", alpha = alpha) +
guides(color = guide_legend(title = gene)) + scale_fill_gradient(low = cell_colors[1], high = cell_colors[2]) + theme(legend.key.size = grid::unit(0.35,
"cm"), legend.text = element_text(size = 8))
g4 <- ggplot(comp.cell.type2) + geom_point(aes_string(x = refPC, y = "PC4", fill = gene), size = dotsize, shape = 21, color = "black", alpha = alpha) +
guides(color = guide_legend(title = gene)) + scale_fill_gradient(low = cell_colors[1], high = cell_colors[2]) + theme(legend.key.size = grid::unit(0.35,
"cm"), legend.text = element_text(size = 8))
g5 <- ggplot(comp.cell.type2) + geom_point(aes_string(x = refPC, y = "PC5", fill = gene), size = dotsize, shape = 21, color = "black", alpha = alpha) +
guides(color = guide_legend(title = gene)) + scale_fill_gradient(low = cell_colors[1], high = cell_colors[2]) + theme(legend.key.size = grid::unit(0.35,
"cm"), legend.text = element_text(size = 8))
g6 <- ggplot(comp.cell.type2) + geom_point(aes_string(x = refPC, y = "PC6", fill = gene), size = dotsize, shape = 21, color = "black", alpha = alpha) +
guides(color = guide_legend(title = gene)) + scale_fill_gradient(low = cell_colors[1], high = cell_colors[2]) + theme(legend.key.size = grid::unit(0.35,
"cm"), legend.text = element_text(size = 8))
g7 <- ggplot(comp.cell.type2) + geom_point(aes_string(x = refPC, y = "PC7", fill = gene), size = dotsize, shape = 21, color = "black", alpha = alpha) +
guides(color = guide_legend(title = gene)) + scale_fill_gradient(low = cell_colors[1], high = cell_colors[2]) + theme(legend.key.size = grid::unit(0.35,
"cm"), legend.text = element_text(size = 8))
g8 <- ggplot(comp.cell.type2) + geom_point(aes_string(x = refPC, y = "PC8", fill = gene), size = dotsize, shape = 21, color = "black", alpha = alpha) +
guides(color = guide_legend(title = gene)) + scale_fill_gradient(low = cell_colors[1], high = cell_colors[2]) + theme(legend.key.size = grid::unit(0.35,
"cm"), legend.text = element_text(size = 8))
g9 <- ggplot(comp.cell.type2) + geom_point(aes_string(x = refPC, y = "PC9", fill = gene), size = dotsize, shape = 21, color = "black", alpha = alpha) +
guides(color = guide_legend(title = gene)) + scale_fill_gradient(low = cell_colors[1], high = cell_colors[2]) + theme(legend.key.size = grid::unit(0.35,
"cm"), legend.text = element_text(size = 8))
if (save == TRUE) {
PCA_grob <- arrangeGrob(g1, g2, g3, g4, g5, g6, g7, g8, g9, ncol = 3)
ggsave(PCA_grob, file = paste("PCA Matrix.tiff"))
}
quartz()
PCA_grid <- grid.arrange(g1, g2, g3, g4, g5, g6, g7, g8, g9, ncol = 3)
}
}
if (print == TRUE) {
if (printNum > nrow(exprs(cellData))) {
stop("There are fewer genes than printNum in the expression matrix.")
}
n <- printNum/2
comp3 <- data.frame(PCA.allgenes$rotation[, 1:10])
comp3_order <- apply(comp3, 2, order, decreasing = TRUE)
terminal_output <- data.frame()
for (i in 1:10) {
terminal_output[1:nrow(comp3), i] <- row.names(comp3)[comp3_order[, i]]
}
colnames(terminal_output) <- colnames(comp3)
terminal_output_final <- terminal_output[c(1:n, (nrow(terminal_output) - n):nrow(terminal_output)), ]
print(terminal_output_final)
}
}
|
c2eb8f21fbdf2b48d8e42a98a279b655c564e33a | 08eef8b893084b8353af55401f0f7ef67c285d64 | /Compare_2_db_result.R | ce33d720e972db75b35669077d66eb824a52838a | [] | no_license | imxiaow/IncRNA_2018_XW_430 | 8a174340bb6d275504cbb70d55c3252bbf36378a | 7b4095d47965e75046e29cad4c6a55d1636ae3b8 | refs/heads/master | 2022-01-06T20:15:09.753881 | 2019-06-01T17:41:21 | 2019-06-01T17:41:21 | 189,761,075 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,125 | r | Compare_2_db_result.R | #==================================================
if (! require(biomaRt, quietly=TRUE)) {
if (! exists("biocLite")) {
source("https://bioconductor.org/biocLite.R")
}
biocLite("biomaRt")
library(biomaRt)
}
if (!require(igraph, quietly=TRUE)) {
install.packages("igraph")
library(igraph)
}
if (!require(readr, quietly=TRUE)) {
install.packages("readr")
library(readr)
}
# Install Packages
#==================================================
# Load the data
#================================================================
myMart <- useMart("ensembl", dataset="hsapiens_gene_ensembl")
(filters <- listFilters(myMart))
(attributes <- listAttributes(myMart)) # View possible filters and attributes in Biomart
filters[grep("ENSG", filters$description), ] # Specific filter for ensemble IDs (ensemble_gene_id)
attributes[grep("symbol", attributes$description, ignore.case=TRUE), ] # Attribute description for HUGO symbol for mapping
#=============================
test_result_temp <- list()
result <- c()
for (ID in shared_geneID_005_NM) {
print(ID)
# retrieves information from the Ensembl database. by <getBM>.
test_result_temp[[ID]] <- getBM(filters = "ensembl_gene_id",
attributes = c("hgnc_symbol"),
values = ID,
mart = myMart)
cat(sprintf("Ensemble gene ID - %s \n ", ID)) # Format output
cat(sprintf("HGNC Symbol - %s \n ", test_result_temp[[ID]]["hgnc_symbol"][1,]))
cat("\n")
need_store <- test_result_temp[[ID]]["hgnc_symbol"][1,]
# print(test_result_temp[[ID]]["hgnc_symbol"][1,])
result <- c(result, need_store)
}
length(result) #1874
length(which(is.na(result))) # 57
# make it into dictionary
dict_TCGA_NM_result <- as.data.frame(cbind(shared_geneID_005_NM, result), stringsAsFactors=FALSE)
save(dict_TCGA_NM_result, file="dict_TCGA_NM_result_gene_ID_symbol.Rda")
# check number of NAs in the results,
length(which(is.na(dict_TCGA_NM_result$result))) #57
# dealing with NAs.
sel <-which(!is.na(dict_TCGA_NM_result$result))
newEnsemID <- dict_TCGA_NM_result$shared_geneID_005_NM[sel]
newHUGOid <- dict_TCGA_NM_result$result[sel]
newGeneSymToEnsm <- as.data.frame(cbind(newEnsemID, newHUGOid), stringsAsFactors = FALSE)
# compare how many gene symbols in the 259 df.
length(which(duplicated(newGeneSymToEnsm$newHUGOid))) # 0
length(unique(newGeneSymToEnsm$newHUGOid))# 1817
# compare result.
length(which(newGeneSymToEnsm$newHUGOid %in% common_fd_g)) #224
length(which(common_fd_g%in% newGeneSymToEnsm$newHUGOid)) #224
length(which(!common_fd_g%in% newGeneSymToEnsm$newHUGOid))#35
# what are these common genes results between these 2 database? what are they?
# what are the different genes (e.g. in PWCAGE but not in TCGA)
common_result_2_database <- newGeneSymToEnsm$newHUGOid[which(newGeneSymToEnsm$newHUGOid %in% common_fd_g)]
result_in_PWCAG_not_TCGA <- common_fd_g[which(!common_fd_g %in% newGeneSymToEnsm$newHUGOid)]
save(common_result_2_database, file = "common_result_2_database.Rda")
save(result_in_PWCAG_not_TCGA, file= "result_in_PWCAG_not_TCGA.Rda")
save(newGeneSymToEnsm, file="result_dict_no_NA.Rda")
write.table(common_result_2_database, "common_result_2_database.txt", sep="\t")
#=========
library(gProfileR)
gprofiler(newGeneSymToEnsm$newHUGOid, organism = "hsapiens",ordered_query = TRUE)
newGeneSymToEnsm$newHUGOid
write.table(newGeneSymToEnsm$newHUGOid, file="/Users/xiaowang/Desktop/common_genelistforgprofiler_TCGA.txt", quote=FALSE, append=FALSE,sep='\n', row.names = FALSE, col.names = FALSE)
all_pathways <- readLines('gmt_ids.txt') #17335 GO ID
common_NM_pathways_TCGA <- read.table("/Users/xiaowang/Desktop/gprofiler_results_common_TCGA.txt", sep="\t", header = TRUE, quote = "")
#294 GO total
Final_common_NM_pathways_TCGA <- common_NM_pathways_TCGA[which(common_NM_pathways_TCGA$GO.ID %in% all_pathways),] #206 in total
write.table(Final_common_NM_pathways_TCGA, file="/Users/xiaowang/Desktop/final_common_genelistforgprofiler_TCGA.txt", sep='\t',quote=FALSE,row.names = FALSE, col.names = TRUE)
#==========
#END |
9c6168e616039d12eede00905ee1eef961c556c1 | bf3f6ce3ce546b0bd8c684b735503f72bc9bdc8a | /R/data.R | c5b88b3778864f7f9e0f62dd34740899a73e1cc8 | [] | no_license | DudbridgeLab/indexevent | e40e958792fbab9c284655455145918219ddbf50 | 381e5f622f3b05d586e11892880d0b1895b96e58 | refs/heads/master | 2021-06-19T01:58:04.780623 | 2021-01-22T14:23:11 | 2021-01-22T14:23:11 | 141,335,840 | 2 | 1 | null | 2021-01-22T14:23:12 | 2018-07-17T19:44:06 | R | UTF-8 | R | false | false | 1,696 | r | data.R | #' Simulated effects on incidence and prognosis
#'
#' A simulated dataset consisting of regression coefficients on incidence and prognosis, with their standard errors,
#' for 10,000 variables (eg SNPs). 500 variables have effects on incidence only, 500 on prognosis only, and 500 on both.
#' The effects on incidence and prognosis are independent.
#' The estimates are obtained from linear regression in a simulated dataset of 20,000 individuals.
#'
#' @format A data frame with 10,000 rows and 4 variables:
#' \describe{
#' \item{xbeta}{Regression coefficient on incidence}
#' \item{xse}{Standard error of xbeta}
#' \item{ybeta}{Regression coefficient on prognosis}
#' \item{yse}{Standard error of ybeta}
#' }
#'
#' @examples
#' # Default analysis with Hedges-Olkin adjustment for regression dilution
#' # Does not calculate a standard error
#' indexevent(testData$xbeta,testData$xse,testData$ybeta,testData$yse)
#' # [1] "Coefficient -0.441061156526639"
#' # [1] "Standard error 0"
#' # [1] "95% CI -0.441061156526639 -0.441061156526639"
#'
#' # SIMEX adjustment with 100 simulations for each step
#' indexevent(testData$xbeta,testData$xse,testData$ybeta,testData$yse,method="SIMEX",B=100)
#' # [1] "Coefficient -0.446543628582032"
#' # [1] "Standard error 0.011576233488927"
#' # [1] "95% CI -0.470301533547 -0.424923532117153"
#'
#' # First few unadjusted effects on prognosis
#' testData$ybeta[1:5]
#' # [1] 0.032240 0.057070 -0.006959 0.080460 0.032820
#' # Adjusted effects
#' indexevent(testData$xbeta,testData$xse,testData$ybeta,testData$yse)$ybeta.adj[1:5]
#' # [1] 0.05219361 0.06110395 -0.01489810 0.08982814 0.01328099
"testData"
|
aef28eb686e74f7d91c3594296a3915709206589 | 2d00505c7940f1bd1dcff122aa1e8bbd5d2edea2 | /R/convertRValue.R | 57b0658a7ccb536185ed2379f11beda25bdc7f16 | [] | no_license | kashenfelter/RGCCTranslationUnit | cb647e6c57e78656bb196e68834d60fd664e66cd | 1bd45f5589516334afa57a75e936d2a36ff943b6 | refs/heads/master | 2020-03-23T00:32:22.815755 | 2013-04-21T21:38:45 | 2013-04-21T21:38:45 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 13,721 | r | convertRValue.R |
setGeneric("convertRValue",
function(to, name, parm, parameters, typeMap = list(), helperInfo = NULL) {
if(is(parm, "ResolvedTypeReference"))
parm = resolveType(parm)
if(is(parm, "PendingType")) {
parm = resolvePendingType(parm)
return(convertRValue(to, name, parm, parameters, typeMap))
}
map = lookupTypeMap(parm, typeMap)
if(!is.null(map) && !is.null(op <- map$convertRValue )) {
ans = userConversion(op, to, name, parm, parameters, typeMap)
if(!is.null(ans)) # add the ;
return( ans )
}
ans = standardGeneric("convertRValue")
if(!inherits(ans, "Statement"))
paste(to, if(to != "") " = ", ans, ";", sep = "")
else
ans
})
setMethod("convertRValue", c(parm = "SEXP"),
function(to, name, parm, parameters, typeMap = list(), helperInfo = NULL) {
return(name)
})
setMethod("convertRValue", c(parm = "intType"),
function(to, name, parm, parameters, typeMap = list(), helperInfo = NULL) {
paste("asInteger(", name, ");")
})
setMethod("convertRValue", c(parm = "unsignedIntType"),
function(to, name, parm, parameters, typeMap = list(), helperInfo = NULL) {
paste("asReal(", name, ");")
})
setMethod("convertRValue", c(parm = "doubleType"),
function(to, name, parm, parameters, typeMap = list(), helperInfo = NULL) {
# Cast for inheritance
paste("(", parm@name, ")", "asReal(", name, ")")
})
setMethod("convertRValue", c(parm = "unsignedCharType"),
function(to, name, parm, parameters, typeMap = list(), helperInfo = NULL) {
paste("(", parm@name, ")", "RAW(", name, ")[0]")
})
setMethod("convertRValue", c(parm = "boolType"),
function(to, name, parm, parameters, typeMap = list(), helperInfo = NULL) {
paste("asLogical(", name, ")")
})
setMethod("convertRValue", c(parm = "complexType"),
function(to, name, parm, parameters, typeMap = list(), helperInfo = NULL) {
paste("asComplex(", name, ")")
})
setMethod("convertRValue", c(parm = "EnumerationDefinition"),
function(to, name, parm, parameters, typeMap = list(), helperInfo = NULL) {
if(is(parm@name, "TypedefEnumerationName"))
parm@name = parm@name[2]
paste('(', parm@name, ')', "INTEGER(", name, ")[0]")
})
#
# We get ourselves into trouble here if we try to work with the
# object and not the pointer to it _if_ the = operator is not available
# to us. See DECLARE_NO_COPY_CLASS. We don't seem to be able to detect
# this by looking for private operator()= method since g++ is not giving us this.
#
# So the approach is treat it as a pointer and change the call to dereference the pointer
# e.g.
# bool CanRead(const wxFSFile& file)
# wxFSFile *file;
# file = R_GET_REF_TYPE();
# This->CanRead(*file);
#
setMethod("convertRValue", c(parm = "C++ReferenceType"),
function(to, name, parm, parameters, typeMap = list(), helperInfo = NULL) {
# note the preceeding * XXX
# removed for now.
# Want the type not to be a basic primitive type.
typeName = parm@type@name
refName = parm@type@name
if(is(parm@type, "PointerType")) {
typeName = getReferenceClassName(parm@type)
refName = parm@type@typeName
}
decl = getNativeDeclaration("", PointerType(parm@type), addSemiColon = FALSE, const = FALSE)
paste("(", decl, ")", derefNativeReference(name, parm@type, refName) )
})
derefNativeReference =
function(name, type, refName)
{
paste(
" R_getNativeReference(", name, ", ",
# paste('"', refName, '"', sep = ""),
dQuote(refName),
", ",
# paste('"', getReferenceClassName(type), '"', sep = ''),
"NULL", #XXX this is to allow for inheritance. Perhaps want other mechanism.
")", sep = "")
}
setMethod("convertRValue", c(parm = "PointerType"),
function(to, name, parm, parameters, typeMap = list(), helperInfo = NULL) {
if(parm@depth == 1 && is(parm@type, "voidType"))
return(paste("TYPEOF(", name, ") == RAWSXP ? (GET_LENGTH(", name, ") ? RAW(", name, ") : NULL) : ", derefNativeReference(name, "NULL", "NULL")))
if(parm@depth == 1 && length(parm@typeName) && parm@typeName == "char")
return(paste("GET_LENGTH(", name, ") > 0 ? CHAR(STRING_ELT(", name, ", 0)) : ", "NULL"))
if(is(parm@type, "BuiltinPrimitiveType")) {
op = BuiltinTypeTable[BuiltinTypeTable$RTypeClass == class(parm@type), "Caccessor"]
if(!is.na(op)) #??? When would it ever be NA??
return(paste(op, "(", name, ")"))
# Should be an paste(id, "ArrayRef"), not a regular Ref.
# Not quite sure id is supposed to be. Looks like the class
# name to which we add a Ref.
id = parm@type@name #? check
ptrType = dQuote(paste(id, "Ptr", sep = ""))
ptrName = paste("_p_", sub("^r_", "", name), sep = "")
txt = c(paste("if(IS_S4_INSTANCE(", name, ", ", ptrType, "))"),
paste("\t", ptrName, "= R_getNativeReference(", name, ", ", ptrType, ",", ptrType, ");"))
txt = c(txt, "else", paste("\t", ptrName, "=", op, "(", name, ");"))
txt = paste(txt, collapse = "\n\t\t\t")
class(txt) = c("IfStatement", "Statement")
return(txt)
}
if(length(grep("^struct ", parm@typeName)) > 0) {
tmp = gsub("^struct ", "", parm@typeName)
# paste("(", parm@typeName, " *) R_getNativeReference(", name, ", \"", tmp, "\", \"", tmp, "\")", sep = "")
#XXX consolidate this with the same pattern in the else below!
paste("(", parm@typeName, " *) ", derefNativeReference(name, tmp, parm@typeName), sep = "")
} else
# paste("R_GET_REF_TYPE(", name, ", ", parm@typeName, rep("*", parm@depth - 1), ")")
paste("(", getNativeDeclaration("", parm, addSemiColon = FALSE, const = FALSE), ")",
derefNativeReference(name, parm, getReferenceClassName(parm)),
sep = "")
#
# if(is(parm, "TypedefDefinition"))
# else {
# tmp = paste('"', parm@typeName, '"', sep = "")
# paste("(",
# ifelse(is(parm, "StructDefinition"), "struct", "union"),
# parm@typeName, "*) R_getNativeReference(", name, ",", tmp, ",", tmp, ");")
# }
})
setMethod("convertRValue", c(parm = "TypedefDefinition"),
function(to, name, parm, parameters, typeMap = list(), helperInfo = NULL) {
# txt = paste('FAIL("cannot convert C/C++ typedef yet:', parm@name, '")')
# class(txt) = "FailedConversion"
if(is(parm@type, "FunctionPointer"))
return(convertRValue("", name, parm@type, parameters, typeMap, helperInfo))
if(is(parm@type, "PointerType") && !is(parm@type@type, "RoutineDefinition")) {
paste('DEREF_REF_PTR(', name, ", ", parm@name, ')')
} else
paste('DEREF_REF(', name, ", ", parm@name, ')') # name
})
setMethod("convertRValue", c(parm = "StructDefinition"),
function(to, name, parm, parameters, typeMap = list(), helperInfo = NULL) {
# warning("Need converter for struct type ", parm@name)
rtype = dQuote(getReferenceClassName(parm))
cast = getNativeDeclaration("", parm, addSemiColon = FALSE, const = FALSE)
txt = paste("* (", cast, ' *) R_getNativeReference(', name, ',', rtype, ",", rtype, ")")
# class(txt) = "FailedConversion"
txt
})
#XXXX
setMethod("convertRValue", c(parm = "C++ClassDefinition"),
function(to, name, parm, parameters, typeMap = list(), helperInfo = NULL) {
warning("convertRValue for C++ClassDefinition is ignored currently as there is no obvious general, faithful mapping: class ", parm@name)
"" # XXX character(0)
})
setMethod("convertRValue", c(parm = "UnionDefinition"),
function(to, name, parm, parameters, typeMap = list(), helperInfo = NULL) {
txt = paste('FAIL("cannot convert raw C/C++ struct yet, only pointers to structs: ', parm@name, '")')
class(txt) = "FailedConversion"
txt
})
setMethod("convertRValue", c(parm = "ArrayType"),
function(to, name, parm, parameters, typeMap = list(), helperInfo = NULL) {
# If we have a pointer object, dereference that, checking it is of the correct type.
# Otherwise, if it is a builtin type, use that
if(is(parm@type, "CString")) {
#XXX Handle case we have a char **
code = c(" {",
" int i;",
" char **els = NULL;",
paste(" int isStringVector = TYPEOF(", name, ") == STRSXP;"),
paste(" if(!isStringVector)"),
paste(' els = DEREF_REF_PTR(', name, ", ", getNativeDeclaration("", parm@type, , FALSE, FALSE), ');'),
paste(" for(i=0; i <", parm@length, "; i++) {"),
#XXX strdup ? Need allocation
paste(" ", to, "[i] = isStringVector ? strdup(CHAR(STRING_ELT(", name, ", i))) : els[i];" ),
" }",
" }")
return(structure(code, class = c("StatementBlock", "Statement")))
}
txt = paste('DEREF_REF_PTR(', name, ", ", getNativeDeclaration("", parm@type, , FALSE, FALSE), ')')
ptype = new("PointerType", type = parm@type, typeName = parm@type@name, depth = as.integer(1))
if(is(parm@type, "BuiltinPrimitiveType"))
txt = paste("IS_S4_OBJECT(", name, ") ? ", txt, ":", convertRValue("", name, ptype, parameters, typeMap)) #XXX
setEls = if(is(parm@type, "BuiltinPrimitiveType")) {
# && Make the test check the types are not the same != )
paste(" ", copyRArrayElementsToNative(parm, to, name))
} else
c(getNativeDeclaration("_tmp", parm),
paste("_tmp = ", txt, ";"),
paste("memcpy(", to, ", _tmp, sizeof(", parm@type@name, ") *", parm@length, ");"))
structure(
c(" {", # extra space to avoid formatCode thinking this is a potential beginning!
setEls,
" }"), class = c("StatementBlock", "Statement"))
})
setMethod("convertRValue", c(parm = "FunctionPointer"),
function(to, name, parm, parameters, typeMap = list(), helperInfo = NULL) {
# Need to know the name of the routine in which this is being used so that we can
# access the relevant stack.
# Could use GCC when compiling the resulting code to find the name of the routine !!!! :-)
# But we arrange for the code that calls this generic convertRValue to supply the information
# as an attribute on the type.
m = attr(parm, "Caller")
decl = if(length(parm@alias))
parm@alias
else
RoutineDefinitionDeclaration(parm)
funName = if(!is.null(m)) paste(m$name, c("fun", "stack"), sep = "_")
else c("NULL", "NULL")
paste("(", decl, ") R_asFunctionPointer(", name, ", ", funName[1], ", ", funName[2], ");")
})
setMethod("convertRValue", c(parm = "UserDataType"),
function(to, name, parm, parameters, typeMap = list(), helperInfo = NULL) {
paste("TYPEOF(", name, ") == EXTPTRSXP ? R_ExternalPtrAddr(", name, ") : (void *)", name)
})
setMethod("convertRValue", c(parm = "Field"),
function(to, name, parm, parameters, typeMap = list(), helperInfo = NULL) {
convertRValue(to, name, parm@type, parameters, typeMap, helperInfo)
})
|
180c416754fee443b05184a19660943f967c815b | b8dd5c4b5a58c85058f697adf2ae3305e8cf770f | /stephen_kiilu_SRR1.R | ae8684b71259c71d60f4c53f3ba4e2f6c0a142c6 | [] | no_license | stephenkiilu/STATISTICAL-REGRESSION-WITH-R | 569975fa1f6c214071a5299541251a0a31c09e81 | de35ac5997e9465ee6bc971681d7d29c3dfbf606 | refs/heads/main | 2023-02-13T18:00:57.897158 | 2021-01-02T13:38:51 | 2021-01-02T13:38:51 | 326,189,374 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,690 | r | stephen_kiilu_SRR1.R | ##loading data
library(ggplot2)
death_rates <- read.csv("~/AIMS/REVIEW PHASE/REGRESSION WITH R/Assignment/Stats Regr - Homework1/death_rates.csv")
attach(death_rates)
death_rates
##Slicing the data
data=death_rates
attach(data)
male=data[1:31,]
female=data[32:62,]
##Question ONE
##Summary of age in the dataset
summary(Age)# summary statistics
n=length(Age)# sample size
n
mean_age=mean(Age)
var_age=sum((Age-mean_age)^2)/(n-1)#variance of the Age
var_age
sd_age=var_age^0.5#standard deviation of Age
sd_age
mean_death=mean( DeathRate)#mean DeathRate per 10,000
mean_death
summary(DeathRate)#summary of Death Rates
var_death=sum((DeathRate-mean_death)^2)/(n-1)#variance of DeathRates per 10,000
var_death
sd_age=var_age^0.5#standard deviation of Age
sd_death=var_death^0.5#standard deviation of DeathRates per 10,000
sd_death
cor(data$DeathRate,data$Age)
##Graphics
par(mfrow=c(1,2))## Boxplot of Age and DeathRate side by side
boxplot(Age,main="boxplot of Age ",xlab="Age",ylab="years")
boxplot(DeathRate,main="boxplot of DeathRate ",xlab="DeathRate",ylab="Number of deaths in 10,000")
plot(Age,DeathRate)##scatter diagram of Age and DeathRate
par(mfrow=c(2,1))## Histogram of Age and DeathRate in 10,000 side by side
hist(Age)
hist(DeathRate)
boxplot(Age,DeathRate,names=c("Age","DeathRate"),main="Boxplot of Age and DeathRate")
##QUESTION 2
#Side by side scatter diagrams of DeathRate aginst Age in females and males.
par(mfrow=c(2,1))
plot(female$Age,female$DeathRate,main = "scatter plot of DeathRate against Age in females",xlab="Age",ylab = "DeathRate")
plot(male$Age,male$DeathRate,main = "scatter plot of DeathRate against Age in males",xlab="Age",ylab = "DeathRate")
#male
mean_male_age=mean(male$Age)
var_male_age=sum((male$Age-mean_male_age)^2)/(31-1)
mean_male_DeathRate=mean(male$DeathRate)
var_male_DeathRate=sum((male$DeathRate-mean_male_DeathRate)^2)/(31-1)
cov_age_death=sum((male$Age-mean_male_age)*(male$DeathRate-mean_male_DeathRate))/(31-1)
cor_male=cov_age_death/(var_male_age*var_male_DeathRate)^0.5
cor_male##correlation between Age and DeathRate in males
#female
mean_female_age=mean(female$Age)
var_female_age=sum((female$Age-mean_female_age)^2)/(31-1)
mean_female_DeathRate=mean(female$DeathRate)
var_female_DeathRate=sum((female$DeathRate-mean_female_DeathRate)^2)/(31-1)
cov_age_death=sum((female$Age-mean_female_age)*(female$DeathRate-mean_female_DeathRate))/(31-1)
cor_female=cov_age_death/(var_female_age*var_female_DeathRate)^0.5
corr=cor(female$Age,female$DeathRate)
corr##correlation between Age and DeathRate in female
##QUESTION 3
beta=cov_age_death/var_female_age##Gradient of regression line
alpha=mean_female_DeathRate-(beta*mean_female_age)##Intercept of regression line
#DeathRate_female=0.7089*female_age-15.4879 rregression line equation
ggplot(female,aes(female$Age,female$DeathRate))+geom_point(color="tomato3")+geom_abline(method=lm)+geom_smooth(method = lm,color="tomato3")+
ggtitle("Regression line of Age against DeatRate in females")+ylab("DeathRate")+xlab("Age")
##QUESTION 4
y=0.7089*(51)-15.4879
y##Predicted DeathRates of females aged 51
##QUESTION 5
##Indicators of quality of the model
##Coefficient of Determination R-Squared
y_est=0.7089*female$Age-15.4879 ## y estimated
SSE=sum((female$DeathRate-y_est)^2)
SST=sum((female$DeathRate-mean_female_DeathRate)^2)
R_squared=1-SSE/SST
R_squared##Coefficient of Determination, R-Squared
#Mean square error,MSE
MSE=sum((female$DeathRate-y_est)^2)/29
MSE
plot(death_rates$Age,death_rates$DeathRate)
f=lm(death_rates$DeathRate~death_rates$Age)
abline(reg=f)
|
780d1cdbd69800e471fdb2f42e0a9bc1a247b860 | e5c25fe0ac126440c0f1f44c46adbb06908627ce | /scratch/OldScratch/BackupBeforeBigChanges20190310/R/SGGP_append_fs.R | 47f4793e89b19ae2acd2d78e8f046cbbb8fe685f | [] | no_license | CollinErickson/CGGP | e36638c490f3a6c61566d8533e821f9b7e7906ba | 0752b6cef2cf991d8cceef0f5df783bb0637f51e | refs/heads/master | 2022-09-06T23:36:29.559748 | 2021-05-09T15:00:53 | 2021-05-09T15:00:53 | 150,596,466 | 2 | 2 | null | 2022-08-30T08:19:15 | 2018-09-27T14:04:24 | HTML | UTF-8 | R | false | false | 26,706 | r | SGGP_append_fs.R | #' Calculate MSE over single dimension
#'
#' Calculated using grid of integration points.
#' Can be calculated exactly, but not much reason in 1D.
#'
#' @param xl Vector of points in 1D
#' @param theta Correlation parameters
#' @param CorrMat Function that gives correlation matrix for vectors of 1D points.
#'
#' @return MSE value
#' @export
#'
#' @examples
#' SGGP_internal_calcMSE(xl=c(0,.5,.9), theta=c(1,2,3),
#' CorrMat=SGGP_internal_CorrMatCauchySQT)
SGGP_internal_calcMSE <- function(xl, theta, CorrMat) {
S = CorrMat(xl, xl, theta)
xp = seq(0,1,l=101)
Cp = CorrMat(xp,xl,theta)
n = length(xl)
cholS = chol(S)
CiCp = backsolve(cholS,backsolve(cholS,t(Cp), transpose = TRUE))
MSE_MAPal = mean(1 - rowSums(t(CiCp)*Cp))
MSE_MAPal
}
#' Calculate MSE over blocks
#'
#' Delta of adding block is product over i=1..d of IMSE(i,j-1) - IMSE(i,j)
#'
#' @param valsinds Block levels to calculate MSEs for
#' @param MSE_MAP Matrix of MSE values
#'
#' @return All MSE values
#' @export
#'
#' @examples
#' SG <- SGGPcreate(d=3, batchsize=100)
#' y <- apply(SG$design, 1, function(x){x[1]+x[2]^2})
#' SG <- SGGPfit(SG, Y=y)
#' MSE_MAP <- outer(1:SG$d, 1:8,
#' Vectorize(function(dimlcv, lcv1) {
#' SGGP_internal_calcMSE(SG$xb[1:SG$sizest[dimlcv]],
#' theta=SG$thetaMAP[(dimlcv-1)*SG$numpara+1:SG$numpara],
#' CorrMat=SG$CorrMat)
#' }))
#' SGGP_internal_calcMSEde(SG$po[1:SG$poCOUNT, ], MSE_MAP)
SGGP_internal_calcMSEde <- function(valsinds, MSE_MAP) {
maxparam <- -Inf # Was set to -10 and ruined it.
if(is.matrix(valsinds)){
MSE_de = rep(0, dim(valsinds)[1])
for (levellcv2 in 1:dim(valsinds)[1]) {
MSE_de[levellcv2] = 0
for (levellcv in 1:dim(valsinds)[2]) {
if (valsinds[levellcv2, levellcv] > 1.5) {
MSE_de[levellcv2] = MSE_de[levellcv2] + max(log(-MSE_MAP[levellcv, valsinds[levellcv2, levellcv]] +
MSE_MAP[levellcv, valsinds[levellcv2, levellcv] - 1]),maxparam)
} else {
# This is when no ancestor block, 1 comes from when there is no data.
# 1 is correlation times integrated value over range.
# This depends on correlation function.
MSE_de[levellcv2] = MSE_de[levellcv2] + max(log(-MSE_MAP[levellcv, valsinds[levellcv2, levellcv]] + 1),maxparam)
}
}
}
} else {
MSE_de = 0
for (levellcv in 1:length(valsinds)) {
if (valsinds[levellcv] > 1.5) {
MSE_de = MSE_de + max(log(-MSE_MAP[levellcv, valsinds[levellcv]] + MSE_MAP[levellcv, valsinds[levellcv] -1]),maxparam)
} else {
MSE_de = MSE_de + max(log(-MSE_MAP[levellcv, valsinds[levellcv]] + 1),maxparam)
}
}
}
MSE_de = exp(MSE_de)
return(MSE_de)
}
#' Add points to SGGP
#'
#' Add `batchsize` points to `SG` using `theta`.
#'
#' @param SGGP Sparse grid object
#' @param batchsize Number of points to add
#' @param selectionmethod How points will be selected: one of `UCB`, `TS`,
#' `Greedy`, `Oldest`, `Random`, or `Lowest`
#' @param RIMSEperpoint Should RIMSE per point be used?
#' @param multioutputdim_weights Weights for each output dimension.
#' @importFrom stats quantile sd var
#'
#' @return SG with new points added.
#' @export
#' @family SGGP core functions
#'
#' @examples
#' SG <- SGGPcreate(d=3, batchsize=100)
#' y <- apply(SG$design, 1, function(x){x[1]+x[2]^2})
#' SG <- SGGPfit(SG, Y=y)
#' SG <- SGGPappend(SGGP=SG, batchsize=20)
#' # UCB,TS,Greedy
SGGPappend <- function(SGGP,batchsize, selectionmethod = "UCB", RIMSEperpoint=TRUE,
multioutputdim_weights=1){
if (!(selectionmethod %in% c("UCB", "TS", "Greedy", "Oldest", "Random", "Lowest"))) {
stop("selectionmethod in SGGPappend must be one of UCB, TS, Greedy, Oldest, Random, or Lowest")
}
if (is.numeric(multioutputdim_weights)) {
if (length(multioutputdim_weights) != 1 && length(multioutputdim_weights) != ncol(SGGP$y)) {
stop("multioutputdim_weights if numeric must have length 1 or number of outputs")
}
} else if (multioutputdim_weights == "/range^2") {
multioutputdim_weights <- 1 / (apply(SGGP$Y, 2, max) - apply(SGGP$Y, 2, min))^2
if (any(is.na(multioutputdim_weights)) || any(is.infinite(multioutputdim_weights))) {
stop("multioutputdim_weights = '/range^2' not available when range is 0.")
}
} else if (multioutputdim_weights == "/sigma2MAP") {
multioutputdim_weights <- 1 / SGGP$sigma2MAP
} else {
stop("multioutputdim_weights not acceptable")
}
if (!is.null(SGGP$design_unevaluated)) {
stop("Can't append if SGGP has unevaluated design points.")
}
n_before <- if (is.null(SGGP[["design"]]) || length(SGGP$design)==0) {0} else {nrow(SGGP$design)}
max_polevels = apply(SGGP$po[1:SGGP$poCOUNT, ,drop=FALSE], 2, max)
separateoutputparameterdimensions <- is.matrix(SGGP$thetaMAP)
# nopd is numberofoutputparameterdimensions
nopd <- if (separateoutputparameterdimensions) {
if (length(SGGP$y)>0) {ncol(SGGP$y)} else {ncol(SGGP$ys)}
} else {
1
}
if(selectionmethod=="Greedy"){
# Set up blank matrix to store MSE values
# MSE_MAP = matrix(0, SGGP$d, SGGP$maxlevel)
# Now use an array for nopd
MSE_MAP = array(0, dim=c(SGGP$d, SGGP$maxlevel,nopd))
# Loop over dimensions and design refinements
for (opdlcv in 1:nopd) {
thetaMAP.thisloop <- if (nopd==1) SGGP$thetaMAP else SGGP$thetaMAP[, opdlcv]
for (dimlcv in 1:SGGP$d) {
for (levellcv in 1:max_polevels[dimlcv]) {
# Calculate some sort of MSE from above, not sure what it's doing
MSE_MAP[dimlcv, levellcv, opdlcv] = max(0,
abs(SGGP_internal_calcMSE(SGGP$xb[1:SGGP$sizest[levellcv]],
thetaMAP.thisloop[(dimlcv-1)*SGGP$numpara+1:SGGP$numpara],
SGGP$CorrMat)))
if (levellcv > 1.5) { # If past first level, it is as good as one below it. Why isn't this a result of calculation?
MSE_MAP[dimlcv, levellcv, opdlcv] = min(MSE_MAP[dimlcv, levellcv, opdlcv], MSE_MAP[dimlcv, levellcv - 1, opdlcv])
}
}
}
}
# What is this? Integrate MSE
IMES_MAP = rep(0, SGGP$ML)
# For all possible blocks, calculate MSE_MAP? Is that all that MSE_de does?
# IMES_MAP[1:SGGP$poCOUNT] = SGGP_internal_calcMSEde(SGGP$po[1:SGGP$poCOUNT, ], MSE_MAP)
# Need to apply it over nopd
IMES_MAP_beforemean = (apply(MSE_MAP, 3, function(x) SGGP_internal_calcMSEde(SGGP$po[1:SGGP$poCOUNT, ,drop=F], x)))
if (SGGP$poCOUNT==1) {
IMES_MAP_beforemean <- matrix(IMES_MAP_beforemean, nrow=1)
}
if (!is.matrix(IMES_MAP_beforemean)) {stop("Need a matrix here 0923859")}
# Need as.matrix in case of single value, i.e. when only supp data and only po is initial point
# IMES_MAP_apply has a column for each opdlcv, so take rowMeans
# IMES_MAP[1:SGGP$poCOUNT] = rowMeans(IMES_MAP_beforemean)
# Need to include sigma2MAP here because weird things can happen if just using correlation.
# IMES_MAP[1:SGGP$poCOUNT] = rowMeans(sweep(IMES_MAP_beforemean, 2, SGGP$sigma2MAP * multioutputdim_weights, "*"))
# If multiple output but single opd, need to take mean
sigma2MAP.thisloop <- if (nopd==1) {mean(SGGP$sigma2MAP)} else {SGGP$sigma2MAP}
IMES_MAP[1:SGGP$poCOUNT] = rowMeans(sweep(IMES_MAP_beforemean, 2, sigma2MAP.thisloop * multioutputdim_weights, "*"))
# Clean up to avoid silly errors
rm(opdlcv, thetaMAP.thisloop, sigma2MAP.thisloop)
} else if (selectionmethod %in% c("UCB", "TS")) { # selectionmethod is UCB or TS
# MSE_PostSamples = array(0, c(SGGP$d, SGGP$maxlevel,SGGP$numPostSamples))
# Array needs another dimension for nopd
MSE_PostSamples = array(0, c(SGGP$d, SGGP$maxlevel,SGGP$numPostSamples, nopd))
# MSE_UCB = matrix(0, SGGP$d, SGGP$maxlevel)
# Dimensions can be considered independently
# Loop over dimensions and design refinements
for (opdlcv in 1:nopd) { # Loop over output parameter dimensions
thetaPostSamples.thisloop <- if (nopd==1) SGGP$thetaPostSamples else SGGP$thetaPostSamples[, , opdlcv]
for (dimlcv in 1:SGGP$d) {
for (levellcv in 1:max_polevels[dimlcv]) {
for(samplelcv in 1:SGGP$numPostSamples){
# Calculate some sort of MSE from above, not sure what it's doing
MSE_PostSamples[dimlcv, levellcv,samplelcv, opdlcv] =
max(0,
abs(
SGGP_internal_calcMSE(
SGGP$xb[1:SGGP$sizest[levellcv]],
thetaPostSamples.thisloop[(dimlcv-1)*SGGP$numpara+1:SGGP$numpara,
samplelcv],
SGGP$CorrMat)
)
)
if (levellcv > 1.5) { # If past first level, it is as good as one below it. Why isn't this a result of calculation?
MSE_PostSamples[dimlcv, levellcv,samplelcv, opdlcv] = min(MSE_PostSamples[dimlcv, levellcv,samplelcv, opdlcv],
MSE_PostSamples[dimlcv, levellcv - 1,samplelcv, opdlcv])
}
}
# done below MSE_UCB[dimlcv, levellcv] = quantile(MSE_PostSamples[dimlcv, levellcv,],0.99)
}
}
}
rm(opdlcv, dimlcv, levellcv, samplelcv) # Avoid dumb mistakes
IMES_PostSamples = matrix(0, SGGP$ML,SGGP$numPostSamples)
# Calculate sigma2 for all samples if needed
sigma2.allsamples.alloutputs <-
if (is.null(SGGP[["y"]]) || length(SGGP$y)==0) { # Only supp data
# Not sure this is right
matrix(SGGP$sigma2MAP, byrow=T, nrow=SGGP$numPostSamples, ncol=length(SGGP$sigma2MAP))
} else if (nopd == 1 && length(SGGP$sigma2MAP)==1) { # 1 opd and 1 od
as.matrix(
apply(SGGP$thetaPostSamples, 2,
function(th) {
SGGP_internal_calcsigma2(SGGP,
SGGP$y,
th
)$sigma2
}
)
)
} else if (nopd == 1) { # 1 opd but 2+ od
t(
apply(SGGP$thetaPostSamples, 2,
function(th) {
SGGP_internal_calcsigma2(SGGP,
SGGP$y,
th
)$sigma2
}
)
)
} else { # 2+ opd, so must be 2+ od
outer(1:SGGP$numPostSamples, 1:nopd,
Vectorize(function(samplenum, outputdim) {
SGGP_internal_calcsigma2(SGGP,
if (nopd==1) {SGGP$y} else {SGGP$y[,outputdim]},
if (nopd==1) {SGGP$thetaPostSamples[,samplenum]
} else {SGGP$thetaPostSamples[,samplenum,outputdim]}
)$sigma2
})
)}
for(samplelcv in 1:SGGP$numPostSamples){
# IMES_PostSamples[1:SGGP$poCOUNT,samplelcv] = SGGP_internal_calcMSEde(SGGP$po[1:SGGP$poCOUNT,], MSE_PostSamples[,,samplelcv])
# It's an array, need to average over opdlcv. Over 3rd dim since samplelcv removes 3rd dim of array.
if (nopd == 1) { # Will be a matrix
# Multiply by sigma2. If multiple output dimensions with shared parameters, take mean,
# Needed because each thetasample will have a different sigma2.
sigma2.thistime <- mean(sigma2.allsamples.alloutputs[samplelcv,])
IMES_PostSamples[1:SGGP$poCOUNT,samplelcv] = sigma2.thistime *
SGGP_internal_calcMSEde(SGGP$po[1:SGGP$poCOUNT,], MSE_PostSamples[,,samplelcv,])
rm(sigma2.thistime)
# IMES_PostSamples[1:SGGP$poCOUNT,samplelcv] = SGGP_internal_calcMSEde(SGGP$po[1:SGGP$poCOUNT,], MSE_PostSamples[,,samplelcv,])
} else { # Is a 3d array, need to use an apply and then apply again with mean
IMES_PostSamples_beforemean <- apply(MSE_PostSamples[,,samplelcv,], 3,
function(x){SGGP_internal_calcMSEde(SGGP$po[1:SGGP$poCOUNT,,drop=F], x)})
if (!is.matrix(IMES_PostSamples_beforemean)) { # Happens when SGGP$poCOUNT is 1, when only initial block avail
if (SGGP$poCOUNT!=1) {stop("Something is wrong here")}
IMES_PostSamples_beforemean <- matrix(IMES_PostSamples_beforemean, nrow=1)
}
# Need sigma2 for this theta sample, already calculated in sigma2.allsamples.alloutputs
IMES_PostSamples[1:SGGP$poCOUNT,samplelcv] <- apply(IMES_PostSamples_beforemean, 1,
function(x) {
# mean(multioutputdim_weights*x)
# Now weight by sigma2 samples
mean(sigma2.allsamples.alloutputs[samplelcv,] *
multioutputdim_weights*x)
})
}
}; rm(samplelcv)
# IMES_UCB = matrix(0, SGGP$ML) # Why was this matrix but other vector?
IMES_UCB = numeric(SGGP$ML)
# browser()
IMES_UCB[1:SGGP$poCOUNT] = apply(IMES_PostSamples[1:SGGP$poCOUNT,, drop=F],1,quantile, probs=0.9)
} else {
# Can be Oldest or Random or Lowest
}
# Removing bss entirely, was wrong and redundant.
# Increase count of points evaluated. Do we check this if not reached exactly???
# SGGP$bss = SGGP$bss + batchsize
max_design_points = SGGP$ss + batchsize
# Keep adding points until reaching bss
while (max_design_points > (SGGP$ss + min(SGGP$pogsize[1:SGGP$poCOUNT]) - 0.5)) {
if(selectionmethod=="Greedy"){
IMES = IMES_MAP
} else if(selectionmethod=="UCB"){
IMES = IMES_UCB
} else if(selectionmethod=="TS"){
IMES = IMES_PostSamples[,sample(1:SGGP$numPostSamples,1)]
} else if(selectionmethod=="Oldest"){
IMES = seq.int(from=SGGP$poCOUNT, to=1, by=-1)
# Multiply by size so it gets undone below
IMES <- IMES * SGGP$pogsize[1:SGGP$poCOUNT]
} else if(selectionmethod=="Random"){
IMES = rep(1,SGGP$poCOUNT)
# Multiply by size so it gets undone below
IMES <- IMES * SGGP$pogsize[1:SGGP$poCOUNT]
} else if(selectionmethod=="Lowest"){
IMES = rowSums(SGGP$po[1:SGGP$poCOUNT,])
# Make the lowest the highest value
IMES <- max(IMES) + 1 - IMES
# Multiply by size so it gets undone below
IMES <- IMES * SGGP$pogsize[1:SGGP$poCOUNT]
} else {
stop("Selection method not acceptable")
}
SGGP$uoCOUNT = SGGP$uoCOUNT + 1 #increment used count
# Old way, no RIMSEperpoint option
# # Find the best one that still fits
# M_comp = max(IMES[which(SGGP$pogsize[1:SGGP$poCOUNT] < (SGGP$bss - SGGP$ss + 0.5))])
# # Find which ones are close to M_comp and
# possibleO =which((IMES[1:SGGP$poCOUNT] >= 0.99*M_comp)&(SGGP$pogsize[1:SGGP$poCOUNT] < (SGGP$bss - SGGP$ss + 0.5)))
# New way, now you can pick best IMES per point in the block, more efficient
stillpossible <- which(SGGP$pogsize[1:SGGP$poCOUNT] < (max_design_points - SGGP$ss + 0.5))
# Either pick block with max IMES or with max IMES per point in the block.
if (RIMSEperpoint) {
metric <- IMES[1:SGGP$poCOUNT] / SGGP$pogsize[1:SGGP$poCOUNT]
} else {
metric <- IMES[1:SGGP$poCOUNT]
}
# Find the best one that still fits
M_comp = max(metric[stillpossible])
# Find which ones are close to M_comp and
# possibleO =which((IMES[stillpossible] >= 0.99*M_comp)&(SGGP$pogsize[1:SGGP$poCOUNT] < (SGGP$bss - SGGP$ss + 0.5)))
possibleO = stillpossible[metric[stillpossible] >= 0.99*M_comp]
# If more than one is possible and near the best, randomly pick among them.
if(length(possibleO)>1.5){
pstar = sample(possibleO,1)
} else{
pstar = possibleO
}
l0 = SGGP$po[pstar,] # Selected block
# Need to make sure there is still an open row in uo to set with new values
if (SGGP$uoCOUNT > nrow(SGGP$uo)) {
SGGP <- SGGP_internal_addrows(SGGP)
}
# print(list(dim(SGGP$uo), SGGP$uoCOUNT, SGGP$uo[SGGP$uoCOUNT,], l0))
SGGP$uo[SGGP$uoCOUNT,] = l0 # Save selected block
SGGP$ss = SGGP$ss + SGGP$pogsize[pstar] # Update selected size
# New ancestors???
# Protect against initial block which has no ancestors
# browser()
if (SGGP$pilaCOUNT[pstar] > 0) { # Protect for initial block
# new_an = if (SGGP$pilaCOUNT[pstar]>0 ){SGGP$pila[pstar, 1:SGGP$pilaCOUNT[pstar]]} else{numeric(0)}
new_an = SGGP$pila[pstar, 1:SGGP$pilaCOUNT[pstar]]
total_an = new_an
for (anlcv in 1:length(total_an)) { # Loop over ancestors
if (total_an[anlcv] > 1.5) { # If there's more than 1, do ???
total_an = unique(c(total_an, SGGP$uala[total_an[anlcv], 1:SGGP$ualaCOUNT[total_an[anlcv]]]))
}
}
SGGP$ualaCOUNT[SGGP$uoCOUNT] = length(total_an)
SGGP$uala[SGGP$uoCOUNT, 1:length(total_an)] = total_an
# Loop over all ancestors, why???
for (anlcv in 1:length(total_an)) {
lo = SGGP$uo[total_an[anlcv],]
if (max(abs(lo - l0)) < 1.5) {
SGGP$w[total_an[anlcv]] = SGGP$w[total_an[anlcv]] + (-1)^abs(round(sum(l0-lo)))
}
}
}
SGGP$w[SGGP$uoCOUNT] = SGGP$w[SGGP$uoCOUNT] + 1
# Update data. Remove selected item, move rest up.
# First get correct indices to change. Protect when selecting initial point
new_indices <- if (SGGP$poCOUNT>1) {1:(SGGP$poCOUNT - 1)} else {numeric(0)}
if (SGGP$poCOUNT < 1.5) { # Only option is first block, nothing else to move
old_indices <- numeric(0)
} else if (pstar < 1.5) {
old_indices <- 2:SGGP$poCOUNT
} else if (pstar > (SGGP$poCOUNT - 0.5)) {
old_indices <- 1:(pstar - 1)
} else if (pstar < (SGGP$poCOUNT - 0.5) && pstar > 1.5) {
old_indices <- c(1:(pstar - 1), (pstar + 1):SGGP$poCOUNT)
} else {stop("Not possible #729588")}
# Then change the data
# browser()
SGGP$po[new_indices,] = SGGP$po[old_indices,]
SGGP$pila[new_indices,] = SGGP$pila[old_indices,]
SGGP$pilaCOUNT[new_indices] = SGGP$pilaCOUNT[old_indices]
SGGP$pogsize[new_indices] = SGGP$pogsize[old_indices]
if(selectionmethod=="Greedy"){
IMES_MAP[new_indices] = IMES_MAP[old_indices]
}
if(selectionmethod=="UCB"){
IMES_UCB[new_indices] = IMES_UCB[old_indices]
}
if(selectionmethod=="TS"){
IMES_PostSamples[new_indices,] = IMES_PostSamples[old_indices,]
}
# And reduce number of available blocks by one.
SGGP$poCOUNT = SGGP$poCOUNT - 1
# Loop over possible descendents of selected block, add them if possible
for (dimlcv in 1:SGGP$d) {
lp = l0
lp[dimlcv] = lp[dimlcv] + 1
if (max(lp) < SGGP$maxlevel && SGGP$poCOUNT < 4 * SGGP$ML) {
kvals = which(lp > 1.5) # Dimensions above base level
canuse = 1
ap = rep(0, SGGP$d)
nap = 0
for (activedimlcv in 1:length(kvals)) {
lpp = lp
lpp[kvals[activedimlcv]] = lpp[kvals[activedimlcv]] - 1
ismem = rep(1, SGGP$uoCOUNT)
for (dimdimlcv in 1:SGGP$d) {
ismem = ismem * (SGGP$uo[1:SGGP$uoCOUNT, dimdimlcv] == lpp[dimdimlcv])
}
if (max(ismem) > 0.5) {
ap[activedimlcv] = which(ismem > 0.5)
nap = nap + 1
} else{
canuse = 0
}
}
if (canuse > 0.5) { # If it can be used, add to possible blocks
SGGP$poCOUNT = SGGP$poCOUNT + 1
SGGP$po[SGGP$poCOUNT,] = lp
SGGP$pogsize[SGGP$poCOUNT] = prod(SGGP$sizes[lp])
SGGP$pila[SGGP$poCOUNT, 1:nap] = ap[1:nap]
SGGP$pilaCOUNT[SGGP$poCOUNT] = nap
max_polevels_old = max_polevels
max_polevels = apply(SGGP$po[1:SGGP$poCOUNT, ,drop=F], 2, max)
if(selectionmethod=="Greedy"){
for (opdlcv in 1:nopd) { # Loop over output parameter dimensions
thetaMAP.thisloop <- if (nopd==1) SGGP$thetaMAP else SGGP$thetaMAP[, opdlcv]
for (dimlcv in 1:SGGP$d) {
if((max_polevels_old[dimlcv]+0.5)<max_polevels[dimlcv]){
levellcv = max_polevels[dimlcv]
MSE_MAP[dimlcv, levellcv,
opdlcv] = max(0, abs(SGGP_internal_calcMSE(SGGP$xb[1:SGGP$sizest[levellcv]],
thetaMAP.thisloop[(dimlcv-1)*SGGP$numpara+1:SGGP$numpara],
SGGP$CorrMat)))
if (levellcv > 1.5) { # If past first level, it is as good as one below it. Why isn't this a result of calculation?
MSE_MAP[dimlcv, levellcv, opdlcv] = min(MSE_MAP[dimlcv, levellcv, opdlcv], MSE_MAP[dimlcv, levellcv - 1, opdlcv])
}
}
}
}
# Clean up
rm(thetaMAP.thisloop, opdlcv)
} else if (selectionmethod %in% c("UCB", "TS")){ # selection method is UCB or TS
for (opdlcv in 1:nopd) {
thetaPostSamples.thisloop <- if (nopd==1) SGGP$thetaPostSamples else SGGP$thetaPostSamples[, , opdlcv]
for (dimlcv_2 in 1:SGGP$d) { # dimlcv is already used for which descendent to add
if((max_polevels_old[dimlcv_2]+0.5)<max_polevels[dimlcv_2]){
levellcv = max_polevels[dimlcv_2]
for(samplelcv in 1:SGGP$numPostSamples){
# Calculate some sort of MSE from above, not sure what it's doing
MSE_PostSamples[dimlcv_2, levellcv,
samplelcv, opdlcv] = max(0,
abs(SGGP_internal_calcMSE(
SGGP$xb[1:SGGP$sizest[levellcv]],
thetaPostSamples.thisloop[(dimlcv_2-1)*SGGP$numpara+1:SGGP$numpara,
samplelcv],
SGGP$CorrMat)))
if (levellcv > 1.5) { # If past first level, it is as good as one below it. Why isn't this a result of calculation?
MSE_PostSamples[dimlcv_2, levellcv,
samplelcv, opdlcv] = min(MSE_PostSamples[dimlcv_2, levellcv,samplelcv, opdlcv],
MSE_PostSamples[dimlcv_2, levellcv - 1,samplelcv, opdlcv])
}
}; rm(samplelcv)
}
}; rm(dimlcv_2)
}
# Clean up
rm(thetaPostSamples.thisloop, opdlcv)
} else {
# Can be Oldest or Random or Lowest
}
if(selectionmethod=="Greedy"){
# IMES_MAP[SGGP$poCOUNT] = SGGP_internal_calcMSEde(as.vector(SGGP$po[SGGP$poCOUNT, ]), MSE_MAP)
# Need to apply first
IMES_MAP_beforemeannewpoint <- apply(MSE_MAP, 3,
function(x) {SGGP_internal_calcMSEde(as.vector(SGGP$po[SGGP$poCOUNT, ]), x)})
# Take weighted mean over dimensions
IMES_MAP[SGGP$poCOUNT] <- mean(SGGP$sigma2MAP * IMES_MAP_beforemeannewpoint * multioutputdim_weights)
} else if (selectionmethod=="UCB" || selectionmethod=="TS"){
for(samplelcv in 1:SGGP$numPostSamples){
# IMES_PostSamples[SGGP$poCOUNT,samplelcv] = SGGP_internal_calcMSEde(as.vector(SGGP$po[SGGP$poCOUNT, ]),
# MSE_PostSamples[,,samplelcv])
if (nopd == 1) { # is a matrix
# Each sample has different sigma2, so use. If multiple output
# parameter dimensions, take mean over sigma2.
sigma2.thistime <- mean(sigma2.allsamples.alloutputs[samplelcv,])
IMES_PostSamples[SGGP$poCOUNT,samplelcv] = sigma2.thistime *
SGGP_internal_calcMSEde(as.vector(SGGP$po[SGGP$poCOUNT, ]),
MSE_PostSamples[,,samplelcv,])
rm(sigma2.thistime)
# IMES_PostSamples[SGGP$poCOUNT,samplelcv] = SGGP_internal_calcMSEde(as.vector(SGGP$po[SGGP$poCOUNT, ]),
# MSE_PostSamples[,,samplelcv,])
} else { # is an array, need to apply
IMES_PostSamples_beforemeannewpoint = apply(MSE_PostSamples[,,samplelcv,],
3, # 3rd dim since samplelcv removes 3rd
function(x) {
SGGP_internal_calcMSEde(as.vector(SGGP$po[SGGP$poCOUNT, ]), x)
}
)
IMES_PostSamples[SGGP$poCOUNT,samplelcv] <- mean(sigma2.allsamples.alloutputs[samplelcv,] *
multioutputdim_weights * IMES_PostSamples_beforemeannewpoint)
}
}; rm(samplelcv)
IMES_UCB[SGGP$poCOUNT] = quantile(IMES_PostSamples[SGGP$poCOUNT,],probs=0.9)
} else if (selectionmethod %in% c("Oldest", "Random", "Lowest")) {
# nothing needed
} else {stop("Not possible #9235058")}
}
}
}
}
# THIS OVERWRITES AND RECALCULATES design EVERY TIME, WHY NOT JUST DO FOR NEW ROWS?
SGGP <- SGGP_internal_getdesignfromSGGP(SGGP)
# Check if none were added, return warning/error
if (n_before == nrow(SGGP$design)) {
warning("No points could be added. You may need a larger batch size.")
} else {
# Save design_unevaluated to make it easy to know which ones to add
SGGP$design_unevaluated <- SGGP$design[(n_before+1):nrow(SGGP$design),]
}
return(SGGP)
}
|
ef3d79335847328d3b3630c83f04aac5e2d19d90 | db78542ec83aa66cb8a543a94463bb99c58151e7 | /02 Intermediate R/302 Func inside Functions.r | a6527b7111f3373ad4bf62234632bc603580ab1c | [] | no_license | chunhuayu/R-Learning | 59ee2567fb910c5124492da84603069ee5b9e2f1 | 36ede3bb562dca07029a8411e230b970e69f22e5 | refs/heads/master | 2020-05-09T14:59:05.094442 | 2019-06-29T03:06:52 | 2019-06-29T03:06:52 | 181,216,574 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 958 | r | 302 Func inside Functions.r | ### Functions inside functions
# You already know that R functions return objects that you can then use somewhere else.
# This makes it easy to use functions inside functions, as you've seen before:
speed <- 31
print(paste("Your speed is", speed))
# Notice that both the print() and paste() functions use the ellipsis - ... - as an argument.
# Can you figure out how they're used?
### INSTRUCTIONS
# Use abs() on linkedin - facebook to get the absolute differences between the daily Linkedin and Facebook profile views.
# Next, use this function call inside mean() to calculate the Mean Absolute Deviation.
# In the mean() call, make sure to specify na.rm to treat missing values correctly!
### R
> # The linkedin and facebook vectors have already been created for you
> linkedin <- c(16, 9, 13, 5, NA, 17, 14)
> facebook <- c(17, NA, 5, 16, 8, 13, 14)
>
> # Calculate the mean absolute deviation
> mean(abs(linkedin-facebook),na.rm=TRUE)
[1] 4.8
>
|
cc5079385ad336527c921382fb936be521872e14 | 6e7af9b27cf18bb4633ad9d0b63a7e8ed9a887fb | /man/plot_ranges_pLigand.Rd | 0b0b18dbce47d50fc2f16639269e9432baa9da18 | [
"MIT"
] | permissive | ApfeldLab/SensorOverlord | 0fc62dd3c11b702cd477d0692085ea7be46911a7 | 2fbe7e0d0963561241d5c1e78dd131211e1b31a0 | refs/heads/master | 2022-12-27T15:20:27.343783 | 2020-10-13T23:28:48 | 2020-10-13T23:28:48 | 176,821,341 | 2 | 0 | null | 2020-06-14T15:37:09 | 2019-03-20T21:40:17 | R | UTF-8 | R | false | true | 1,343 | rd | plot_ranges_pLigand.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{plot_ranges_pLigand}
\alias{plot_ranges_pLigand}
\title{Takes in a ranges_df dataframe and makes a plot (for pLigand).}
\usage{
plot_ranges_pLigand(ranges, ylim = c(1, 14), by = 1, ylab = "pLigand")
}
\arguments{
\item{ranges}{A dataframe of ranges with at least these columns:
'Sensor_Name': the name of the sensor
'Minimum': the minimum pLigand measurable at the given inaccuracy
'Maximum': the maximum pLigand measurable at the given inaccuracy
'Inaccuracy': the inaccuracy associated with this row (relative)
'error_thresh': the error threshold associated with this row}
\item{ylim}{The limits of the ranges plot}
\item{by}{the 'by' argument of the limits axis tick marks}
\item{ylab}{The label of the ranges plot}
}
\value{
A ggplot object
}
\description{
Takes in a ranges_df dataframe and makes a plot (for pLigand).
}
\examples{
error_df <- create_error_df_pLigand_multiple(
c(0.01, 0.02), 2, 10,
data.frame(
"Rmin" = c(1, 2),
"Rmax" = c(5, 6),
"delta" = c(0.2, 1.2),
"name" = c("normal", "plusOne"),
"pKd" = c(7, 8)
),
ligand_name = "NADPH"
)
ranges_df <- create_ranges_multiple(error_df,
parameter = "NADPH",
thresholds = c(0.01, 0.05, 0.10, 0.15, 0.20)
)
plot_ranges_pLigand(ranges_df, ylab = "pNADPH")
}
|
e8805a9697b3964840e31c1b1bfbb78ef69b0e4e | a0a00d56191541ecd569b2f4ba6310ee4abe073b | /man/var.sel.boruta.Rd | 02aefe4d9f49b08b1d6dc1912b20c5b2e87f9c79 | [] | no_license | silkeszy/Pomona | 72bcdac9395e742a6ad51c2de7601842e0889508 | cdb2c35793c2e05729f8be205eac7e0319c4266c | refs/heads/master | 2023-01-24T17:34:28.307309 | 2022-02-23T13:14:00 | 2022-02-23T13:14:00 | 89,244,056 | 6 | 5 | null | null | null | null | UTF-8 | R | false | true | 2,723 | rd | var.sel.boruta.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/variable_selection_boruta.R
\name{var.sel.boruta}
\alias{var.sel.boruta}
\title{Variable selection using Boruta function.}
\usage{
var.sel.boruta(
x,
y,
pValue = 0.01,
maxRuns = 100,
ntree = 500,
mtry.prop = 0.2,
nodesize.prop = 0.1,
no.threads = 1,
method = "ranger",
type = "regression",
importance = "impurity_corrected",
case.weights = NULL
)
}
\arguments{
\item{x}{matrix or data.frame of predictor variables with variables in
columns and samples in rows (Note: missing values are not allowed).}
\item{y}{vector with values of phenotype variable (Note: will be converted to factor if
classification mode is used).}
\item{pValue}{confidence level (default: 0.01 based on Boruta package)}
\item{maxRuns}{maximal number of importance source runs (default: 100 based on Boruta package)}
\item{ntree}{number of trees.}
\item{mtry.prop}{proportion of variables that should be used at each split.}
\item{nodesize.prop}{proportion of minimal number of samples in terminal
nodes.}
\item{no.threads}{number of threads used for parallel execution.}
\item{method}{implementation to be used ("ranger").}
\item{type}{mode of prediction ("regression", "classification" or "probability").}
\item{importance}{Variable importance mode ('none', 'impurity',
'impurity_corrected' or 'permutation'). Default is 'impurity_corrected'.}
\item{case.weights}{Weights for sampling of training observations. Observations with larger weights will be selected with higher probability in the bootstrap (or subsampled) samples for the trees.}
}
\value{
List with the following components:
\itemize{
\item \code{info} data.frame
with information of each variable
\itemize{
\item run.x = original variable importance (VIM) in run x
(includes min, mean and max of VIM of shadow variables)
\item decision = Boruta decision (Confirmed, Rejected or Tentative)
\item selected = variable has been selected
}
\item \code{var} vector of selected variables
\item \code{info.shadow.var} data.frame with information about
minimal, mean and maximal shadow variables of each run
}
@examples
# simulate toy data set
data = simulation.data.cor(no.samples = 100, group.size = rep(10, 6), no.var.total = 200)
# select variables
res = var.sel.boruta(x = data[, -1], y = data[, 1])
res$var
}
\description{
Variable selection using the Boruta function in the R package \code{\link[Boruta]{Boruta}}.
}
\details{
This function selects only variables that are confirmed based on Boruta implementation.
For more details see \code{\link[Boruta]{Boruta}}.
Note that this function uses the ranger implementation for variable selection.
}
|
14184ab50ddfbf1a4a95c13c4c59a9db8f7fb01d | cf294059ae844d1d2abfcc989e5e04ee124fe37b | /man/rmPeakList.Rd | 8d4c6fd1ccf1d5618d7d729a4f257f8285e2e5ef | [] | no_license | camilleroquencourt/ptairMS | 3557ba24e722508590a79c35f0c999d6ae186124 | 491ff08df3b2a07808db363dca961459a8aa3944 | refs/heads/master | 2022-08-19T04:05:01.232422 | 2022-06-30T14:11:08 | 2022-06-30T14:11:08 | 226,823,108 | 7 | 2 | null | 2020-03-18T10:30:49 | 2019-12-09T08:35:15 | R | UTF-8 | R | false | true | 757 | rd | rmPeakList.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/SetMethods.R
\name{rmPeakList}
\alias{rmPeakList}
\title{remove the peakList of an ptrSet object}
\usage{
rmPeakList(object)
}
\arguments{
\item{object}{ptrSet object}
}
\value{
a ptrSet
}
\description{
This function is useful when you want to change the parameters of the detect
peak function. First delete the peakList with \code{rmPeakList}, and apply
\code{detectPeak}with the new parameters.
}
\examples{
library(ptairData)
dirRaw <- system.file("extdata/exhaledAir", package = "ptairData")
exhaledPtrset <- createPtrSet(dir=dirRaw, setName="exhaledPtrset",
mzCalibRef = c(21.022, 60.0525), fracMaxTIC = 0.7, saveDir = NULL )
exhaledPtrset <-rmPeakList(exhaledPtrset )
}
|
f5cb8c22c16817182307215a2695a4cd8b3b68de | 874264d93ac74c663edf4574460ce3cb29078273 | /CuffDiff/150520_find noncoding.R | 3e1a0495f462533910ea10d57016f0c5636f41fc | [] | no_license | PaulEssers/RNAseq_Irs1 | 12203e5f062f04705d3c5b7496aca045a49dba89 | f38ac967d3a7c9edb1722510bfe9584f19f6eaa7 | refs/heads/master | 2021-01-10T16:03:22.228933 | 2015-11-03T14:48:12 | 2015-11-03T14:48:12 | 45,472,328 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,443 | r | 150520_find noncoding.R | load("C:/Users/PEssers/PowerFolders/Projects/IRS1-hypothalamus/Analysis/150513 CuffDiff (no bias correction)/cuffdiff_no_options/allGenes.Rdata")
load("C:/Users/PEssers/PowerFolders/Projects/IRS1-hypothalamus/Analysis/150513 CuffDiff (no bias correction)/cuffdiff_no_options/regulatedGenes.Rdata")
all.ncRNA<-allGenes[allGenes$gene_biotype!="protein_coding",]
regulated.ncRNA<-regulatedGenes[regulatedGenes$gene_biotype!="protein_coding",]
novel<-regulatedGenes[is.na(regulatedGenes$gene_biotype),]
known<-regulatedGenes[regulatedGenes$gene!="-",]
known<-known[known$gene_biotype!="protein_coding",]
#the following types of ncRNAs are present:
table(known$gene_biotype)
hist(known$transcript_length, main="transcript lengths", breaks=50)
hist(known$transcript_length, main="transcript lengths", breaks=2000,xlim=c(0,300))
#So a lot of the ncRNAs are smaller than 200nt. How reliable are these?
known<-known[-c(4),]
lncRNA<-known[which(as.integer(known$transcript_length)>200),]
lncRNA<-lncRNA[-1,]#duplicate entry for some reason, this messes up the names on the axis later
library("cummeRbund")
cuff<-readCufflinks()
id<-as.character(lncRNA$gene_id)
cb_genes<-cummeRbund::getGenes(cuff,id)
print(expressionBarplot(cb_genes,labRow=T, replicates=T,fullnames=FALSE,logMode=FALSE)+scale_x_discrete(labels=lncRNA[order(lncRNA$gene_id),]$external_gene_name))
geneNamesBarplot(myGeneNames=id,all_genes=allGenes,field="gene_id",title="")
id
lncRNAfollow_up<-lncRNA[c(1,3,6),]
View(lncRNAfollow_up)
setwd("C:/Users/PEssers/PowerFolders/Projects/IRS1-hypothalamus/Analysis/150513 CuffDiff (no bias correction)/cuffnorm_no_options")
gene.count<-read.table("genes.count_table",header=T)
gene.count<-gene.count[which(gene.count$tracking_id %in% lncRNAfollow_up$test_id),]
View(gene.count)
## novel ncRNAs. The following are multi-exon ncRNAs found to be differentially regulated:
novel_id<-c("XLOC_004849","XLOC_015211","XLOC_024455","XLOC_027797","XLOC_038391","XLOC_042045")
cb_genes<-cummeRbund::getGenes(cuff,novel_id)
print(expressionBarplot(cb_genes,labRow=T, replicates=T,fullnames=FALSE,logMode=FALSE))
### also do some general expression tests
allGenesHigh10<-allGenes[allGenes$value_1>quantile(allGenes$value_1,0.90),]
plot(density(all.novel.over1$value_1),xlim=c(0,10),col="red", main="ncRNA expression density")
lines(density(all.ncRNA$value_1),xlim=c(0,10),col="black")
|
3f67b76e28ee376fb32d2d2fb65e5cc322962fec | f3bbc879600ff372f713d7570ed25c307f09a75b | /names/changeInSlope.R | 89662b5d3f145aec5b66829a61295953c0a4a485 | [] | no_license | lauracova/senior_project | 3f73fd3368087c848f0b4ec2d3939a8d51fb0bc5 | 9d15734b6c36c9c72ff65440df4462f95e09f106 | refs/heads/master | 2020-05-03T10:56:51.433338 | 2019-12-03T23:05:57 | 2019-12-03T23:05:57 | 178,590,651 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,052 | r | changeInSlope.R |
name <- read.csv(paste('data/splitnames/',"Laura",sep='','.csv')) %>%
filter(Gender == "F") %>%
group_by(Name, Gender, Year) %>%
summarise(Count=sum(Count))
p <- name %>% ggplot(aes(x=Year, y=Count))+
geom_line(size=2, color="lightpink2")+
geom_line(data=dat, aes(x=Year, y=slope))+
theme_minimal()+
labs(title="Trend in Name Over Time")+
coord_cartesian(xlim=c(1910,2017))
ggplotly(p)
#########################################################
# working with the slope
y<-name$Count
x<-name$Year
n<- nrow(name)
y_n <- y[1:(n-1)]-y[2:n]
y_n<-append(y_n, NA, after = length(y_n))
x_n <- x[1:(n-1)]-x[2:n]
x_n <- append(x_n, NA, after = length(x_n))
#y_n2 <- y[1:(n-5)]-y[6:n]
#y_n2<-append(y_n, NA, after = length(y_n))
#x_n2 <- x[1:(n-5)]-x[6:n]
#x_n2 <- append(x_n, NA, after = length(x_n))
dat <- data.frame(x, y, x_n, y_n, x_n2, y_n2)
dat <- dat %>% mutate(slope = y_n/x_n) %>% rename(Year=x, Count=y, year_n=x_n, count_n=y_n)
dat %>% filter(Count == max(dat$Count)) # gives the highest count the name reached
|
09696efc2b6b237b8315d99d3a9542d01cb8a066 | 91cccf3e851b5077d78574a2f012cc8254a8ba33 | /erikb-2018/samples_model-analysis_mle.R | 4c46ae52af4b1078d073ce111e4261f1ea39854a | [] | no_license | vullab/numberline | 5a28882c68a61145f0d9ffd13a261ef6fb573d35 | 43e599402b096cd050d056fd2caaaadd6da4e07c | refs/heads/master | 2020-04-04T00:05:21.995804 | 2019-11-15T22:36:20 | 2019-11-15T22:36:20 | 155,639,921 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,266 | r | samples_model-analysis_mle.R | ### README ###
#' This file examines the slope and cutoff fit across subject and model estimates
#' by running iterated fits of slope and cutoff and looking at how much fits
#' vary by subject.
#'
#' It uses functions in `samples_model-fxns_basic` to read the data and
#' then uses more advanced functions in `samples_model-fxns_drift` to
#' fit lines to the model and human estimates
setwd("/Users/erikbrockbank/web/vullab/numberline/erikb-2018/")
rm(list=ls())
library(Rmisc) # needed for call to `multiplot`
# Fetch relevant model functions from samples_model
source('samples_model-fxns_basic.R')
# Fetch relevant functions for fitting lines to model data
source('samples_model-fxns_drift.R')
##########################
### ANALYSIS FUNCTIONS ###
##########################
plot.human.fit.scatter = function(subj.slopes) {
ggplot(data = subj.slopes, aes(x = cutoff, y = slope)) +
geom_point(size = 3, alpha = 0.5, color = "blue") +
facet_wrap(~subj, ncol = 5, scales = "free") +
ggtitle("") +
labs(x = "Fitted cutoff", y = "Fitted slope") +
theme(panel.background = element_blank(),
strip.background = element_blank(),
panel.grid = element_line(color = "gray"),
axis.line = element_line(color = "black"),
axis.title.y = element_text(face = "bold", size = 24),
axis.title.x = element_text(face = "bold", size = 24),
strip.text = element_text(face = "bold", size = 20))
}
################
### ANALYSIS ###
################
# Read subject data and extract relevant columns
data = read.data(DATA, TRIALS)
subj.data = data %>%
select(subject, trial, num_dots, answer)
# Run multiple iterations of slope fitting
ITERS = 25
slope.fits = data.frame(subj = character(),
iter = numeric(),
cutoff = numeric(),
slope = numeric(),
se = numeric())
for (iter_index in seq(ITERS)) {
print(paste("Fitting slopes: iteration ", iter_index))
PARAMS = c(0.7, 1.5, -0.5, 0.2, -0.7, 0.2)
names(PARAMS) = c("ma", "sa", "mb", "sb", "ms", "ss")
PRIORS = list()
# Uninformative priors
# PRIORS[[1]] = function(x){-dnorm(x, 2, 3.5, log = T)}
# PRIORS[[2]] = function(x){-dnorm(x, 0, 0.5, log = T)}
# PRIORS[[3]] = function(x){-dnorm(x, -1, 0.25, log = T)}
# More rigid priors
PRIORS[[1]] = function(x){-dnorm(x, 1.5, 0.1, log = T)} #
PRIORS[[2]] = function(x){-dnorm(x, -0.2, 0.1, log = T)} #
PRIORS[[3]] = function(x){-dnorm(x, -1, 0.1, log = T)} #
# Fit static subject data
bipower.fits.subj = data.frame(do.call(rbind, by(subj.data, subj.data$subject, brutefit)))
print(paste("Failed bipower fits:", sum(bipower.fits.subj$logL == -9999)))
slope.fits = rbind(slope.fits, data.frame(subj = bipower.fits.subj$subject,
iter = iter_index,
cutoff = 10^bipower.fits.subj$a,
slope = 10^bipower.fits.subj$b,
se = 10^bipower.fits.subj$s))
}
#############
### PLOTS ###
#############
plot.human.fit.scatter(slope.fits) # NB: save plot with size 1800x1500 for ideal proportions
|
88514bf6c57a3c77417d9cda9bc5dc12a2f643f8 | 3edd2989d3908a4a738267124e6a3c0f30fbaff8 | /codes/train.R | ebdf02c11033ce8fa60ebb5934fe8905af27e86b | [] | no_license | heyuan7676/Predict_contacts | 69588526424df576fd36bd84b6c761e43bed2956 | a202a1f31ddb1b9ccf6be616ea5a5ef8dc6cd107 | refs/heads/master | 2020-08-15T18:23:16.006358 | 2019-10-15T20:09:33 | 2019-10-15T20:09:33 | 215,387,295 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 21,815 | r | train.R | library(BayesTree)
library(neuralnet)
library(AUC)
library(randomForest)
library(e1071)
chr = "chr22"
### read in train data and test data
base_dir = "/scratch1/battle-fs1/heyuan/HiC/chromovar3d.stanford.edu/project"
data = read.table(paste0(base_dir,"/histone/chr22_60smp_train.txt"),header=T)
test_data1 = read.table(paste0(base_dir,"/histone/chr22_60smp_test.txt"),header=T)
chr21_1 = read.table(paste0(base_dir,"/histone/chr21_pos.csv"),header=T)
chr21_1 = cbind(chr21_1,"y"=rep("1",nrow(chr21_1)))
chr21_2 = read.table(paste0(base_dir,"/histone/chr21_neg.csv"),header=T)
chr21_2 = cbind(chr21_2,"y"=rep("0",nrow(chr21_2)))
test_data2 = rbind(chr21_1,chr21_2)
#################################################################################
## Feature set 1: 9 features
#################################################################################
###########################
## logistic model
###########################
logit_model = glm(y~.,data=data,family="binomial")
print("Print the odds ratios and 95% CIs for the coefficients")
print(exp(cbind(OR = coef(logit_model), confint(logit_model))))
summary(logit_model)
logit_function <- function(model,new_data){
predictions = predict(model,newdata=new_data,type="response")
ys = data.frame("predictions"=predictions,"response"=factor(new_data$y,levels=c(0,1),labels=c(0,1)))
roc_object = roc(ys$predictions,ys$response)
auc_area = auc(roc_object)
print(paste0("The AUC for the logistic model is ",auc_area))
return(roc_object)
}
## predict
roc1_logit = logit_function(logit_model,test_data1) ### 0.800
roc2_logit = logit_function(logit_model,test_data2) ### 0.801
#########################
## random forest
#########################
rf_function <- function(model,new_data){
predictions = predict(model,newdata=new_data,type="prob")[,2]
ys = data.frame("predictions"=predictions,"response"=factor(new_data$y,levels=c(0,1),labels=c(0,1)))
roc_object = roc(ys$predictions,ys$response)
auc_area = auc(roc_object)
print(paste0("The AUC for the logistic model is ",auc_area))
return(roc_object)
}
ntree = 200
rf = randomForest(factor(y) ~ ., data, importance=T, ntree=ntree, norm.votes=FALSE)
roc1_rf = rf_function(rf,test_data1)
roc2_rf = rf_function(rf,test_data2)
#########################
## neural network
#########################
nn_function <- function(model,new_data){
predictions = compute(model,new_data[,1:9])$net.result
ys = data.frame("predictions" = predictions,"response" = factor(new_data$y,levels=c(0,1),labels=c("0","1")))
roc_object = roc(ys$predictions,ys$response)
auc_area = auc(roc_object)
print(paste0("The AUC for the logistic model is ",auc_area))
return(roc_object)
}
i=5
nn = neuralnet(y~H3K4ME3_1+H3K4ME3_2+H3K4ME3_3+H3K4ME1_1+H3K4ME1_2+H3K4ME1_3+H3K27AC_1+H3K27AC_2+H3K27AC_3,data=data,hidden=i,linear.output=FALSE)
roc1_nn = nn_function(nn,test_data1)
roc2_nn = nn_function(nn,test_data2)
#########################
## SVM
#########################
#### read in SVM manullay
roc1_svm = read.table("/home/qliu24/ML_project/SVM/ROC_60smp.txt",header=T)
roc2_svm = read.table("/home/qliu24/ML_project/SVM/chr21/ROC_chr21.txt",header=T)
colnames(roc1_svm) = c("fpr","tpr")
colnames(roc2_svm) = c("fpr","tpr")
#########################
## naiveBayes
#########################
nBayes = naiveBayes(y~H3K4ME3_1+H3K4ME3_2+H3K4ME3_3+H3K4ME1_1+H3K4ME1_2+H3K4ME1_3+H3K27AC_1+H3K27AC_2+H3K27AC_3,data=data)
nBayes_function <- function(model,new_data){
predictions = predict(model,newdata=new_data,type="raw")[,2]
ys = data.frame("predictions"=predictions,"response"=factor(new_data$y,levels=c(0,1),labels=c(0,1)))
roc_object = roc(ys$predictions,ys$response)
auc_area = auc(roc_object)
print(paste0("The AUC for the logistic model is ",auc_area))
return(roc_object)
}
roc1_nB = nBayes_function(nBayes,test_data1) ### 0.7639
roc2_nB = nBayes_function(nBayes,test_data2) ### 0.7523
#########################
## BART model
#########################
bart_function <- function(train_data,test_data){
bart_model = bart(train_data[,c(1:9)],train_data$y,x.test = test_data[,c(1:9)],verbose=F)
### variable importance
times = apply(bart_model$varcount,2,sum)
prop = times/sum(times)
yhat_train_mean = apply(pnorm(bart_model$yhat.train),2,mean)
yhat_test_mean = apply(pnorm(bart_model$yhat.test),2,mean)
ys = data.frame("predictions"=yhat_test_mean,"response"=factor(test_data$y,levels=c(0,1),labels=c(0,1)))
roc_object = roc(ys$predictions,ys$response)
auc_area = auc(roc_object)
print(paste0("The AUC for the logistic model is ",auc_area))
return(roc_object)
}
roc1_bart = bart_function(data,test_data1) ### 0.9294
roc2_bart = bart_function(data,test_data2) ### 0.9352
#########################
## plot
#########################
library(ggplot2)
roc_plot <- function(model,roc_object){
roc_for_plot = data.frame("model"=rep(model,length(roc_object$fpr)),"fpr" = roc_object$fpr, "tpr" = roc_object$tpr )
return(roc_for_plot)
}
logit_plot1 = roc_plot("logistic(0.80)",roc1_logit)
rf_plot1 = roc_plot("random_forest(0.98)",roc1_rf)
nn_plot1 = roc_plot("neural_network(0.90)",roc1_nn)
svm_plot1 = roc_plot("SVM (0.98)",roc1_svm)
nB_plot1 = roc_plot("Naives_Bayes(0.76)",roc1_nB)
bart_plot1 = roc_plot("BART(0.93)",roc1_bart)
plot_df1 = rbind(logit_plot1,rf_plot1,nn_plot1,svm_plot1,nB_plot1,bart_plot1)
g1 = ggplot(plot_df1,aes(x=fpr,y=tpr,col=model))+
geom_line(aes(y=tpr))+
labs(title="Fig 2a. ROC curve for the test sample 1(Feature set 1)",x="False Positive Rate",y="True Positive Rate") +
theme(plot.title=element_text(size=7),axis.text.x=element_text(angle=45, hjust=1, size=5),axis.text.y=element_text(vjust=0, size=5),legend.text=element_text(size=5)) +
theme(legend.title=element_text(size=5),axis.title.x=element_text(size=6),axis.title.y=element_text(size=6))
logit_plot2 = roc_plot("logistic(0.69)",roc2_logit)
rf_plot2 = roc_plot("random_forest(0.81)",roc2_rf)
nn_plot2 = roc_plot("neural_network(0.90)",roc2_nn)
svm_plot2 = roc_plot("SVM (0.80)",roc2_svm)
nB_plot2 = roc_plot("Naives_Bayes(0.75)",roc2_nB)
bart_plot2 = roc_plot("BART(0.83)",roc2_bart)
plot_df2 = rbind(logit_plot2,rf_plot2,nn_plot2,svm_plot2,nB_plot2,bart_plot2)
g2 = ggplot(plot_df2,aes(x=fpr,col=model))+
geom_line(aes(y=tpr))+
labs(title="Fig 2b. ROC curve for the test sample 2(Feature set 1)",x="False Positive Rate",y="True Positive Rate") +
theme(plot.title=element_text(size=7),axis.text.x=element_text(angle=45, hjust=1, size=5),axis.text.y=element_text(vjust=0, size=5),legend.text=element_text(size=5)) +
theme(legend.title=element_text(size=5),axis.title.x=element_text(size=6),axis.title.y=element_text(size=6))
#################################################################################
## Feature set 2: 3 sums
#################################################################################
### read in train data and test data
base_dir = "/scratch1/battle-fs1/heyuan/HiC/chromovar3d.stanford.edu/project"
data = read.table(paste0(base_dir,"/histone/chr22_60smp_train.txt"),header=T)
test_data1 = read.table(paste0(base_dir,"/histone/chr22_60smp_test.txt"),header=T)
chr21_1 = read.table(paste0(base_dir,"/histone/chr21_pos.csv"),header=T)
chr21_1 = cbind(chr21_1,"y"=rep("1",nrow(chr21_1)))
chr21_2 = read.table(paste0(base_dir,"/histone/chr21_neg.csv"),header=T)
chr21_2 = cbind(chr21_2,"y"=rep("0",nrow(chr21_2)))
test_data2 = rbind(chr21_1,chr21_2)
computeSum <- function(data){
data = data.frame("H3K4ME3"=(data[,1]+data[,2]+data[,3]),"H3K4ME1"=(data[,4]+data[,5]+data[,6]),"H3K27AC"=(data[,7]+data[,8]+data[,9]),"y"=data[,10])
return(data)
}
data = computeSum(data)
test_data1 = computeSum(test_data1)
test_data2 = computeSum(test_data2)
###########################
## logistic model
###########################
logit_model = glm(y~.,data=data,family="binomial")
print("Print the odds ratios and 95% CIs for the coefficients")
print(exp(cbind(OR = coef(logit_model), confint(logit_model))))
summary(logit_model)
logit_function <- function(model,new_data){
predictions = predict(model,newdata=new_data,type="response")
ys = data.frame("predictions"=predictions,"response"=factor(new_data$y,levels=c(0,1),labels=c(0,1)))
roc_object = roc(ys$predictions,ys$response)
auc_area = auc(roc_object)
print(paste0("The AUC for the logistic model is ",auc_area))
return(roc_object)
}
## predict
roc1_logit = logit_function(logit_model,test_data1) ### 0.8151
roc2_logit = logit_function(logit_model,test_data2) ### 0.7997
#########################
## random forest
#########################
rf_function <- function(model,new_data){
predictions = predict(model,newdata=new_data,type="prob")[,2]
ys = data.frame("predictions"=predictions,"response"=factor(new_data$y,levels=c(0,1),labels=c(0,1)))
roc_object = roc(ys$predictions,ys$response)
auc_area = auc(roc_object)
print(paste0("The AUC for the logistic model is ",auc_area))
return(roc_object)
}
i=10
ntree = 20*i
rf = randomForest(factor(y) ~ ., data, importance=T, ntree=ntree, norm.votes=FALSE)
roc1_rf = rf_function(rf,test_data1) ### 0.97
roc2_rf = rf_function(rf,test_data2) ### 0.94
#########################
## neural network
#########################
nn_function <- function(model,new_data){
predictions = compute(model,new_data[,1:3])$net.result
ys = data.frame("predictions" = predictions,"response" = factor(new_data$y,levels=c(0,1),labels=c("0","1")))
roc_object = roc(ys$predictions,ys$response)
auc_area = auc(roc_object)
print(paste0("The AUC for the logistic model is ",auc_area))
return(roc_object)
}
nn = neuralnet(y~H3K4ME3+H3K4ME1+H3K27AC,data=data,hidden=2,linear.output=FALSE)
roc1_nn = nn_function(nn,test_data1) ### 0.86
roc2_nn = nn_function(nn,test_data2) ### 0.88
#########################
## SVM
#########################
#### read in SVM manullay
roc1_svm = read.table("/home/qliu24/ML_project/SVM/3binsumup/ROC_60smp.txt",header=T)
roc2_svm = read.table("/home/qliu24/ML_project/SVM/chr21/ROC_chr21_b1.txt",header=T)
colnames(roc1_svm) = c("fpr","tpr")
colnames(roc2_svm) = c("fpr","tpr")
#########################
## naiveBayes
#########################
nBayes = naiveBayes(y~H3K4ME3 + H3K4ME1 + H3K27AC,data=data)
nBayes_function <- function(model,new_data){
predictions = predict(model,newdata=new_data,type="raw")[,2]
ys = data.frame("predictions"=predictions,"response"=factor(new_data$y,levels=c(0,1),labels=c(0,1)))
roc_object = roc(ys$predictions,ys$response)
auc_area = auc(roc_object)
print(paste0("The AUC for the logistic model is ",auc_area))
return(roc_object)
}
roc1_nB = nBayes_function(nBayes,test_data1) ### 0.79
roc2_nB = nBayes_function(nBayes,test_data2) ### 0.77
#########################
## BART model
#########################
bart_function <- function(train_data,test_data){
bart_model = bart(train_data[,c(1:3)],train_data$y,x.test = test_data[,c(1:3)],verbose=F)
### variable importance
times = apply(bart_model$varcount,2,sum)
prop = times/sum(times)
yhat_train_mean = apply(pnorm(bart_model$yhat.train),2,mean)
yhat_test_mean = apply(pnorm(bart_model$yhat.test),2,mean)
ys = data.frame("predictions"=yhat_test_mean,"response"=factor(test_data$y,levels=c(0,1),labels=c(0,1)))
roc_object = roc(ys$predictions,ys$response)
auc_area = auc(roc_object)
print(paste0("The AUC for the logistic model is ",auc_area))
return(roc_object)
}
roc1_bart = bart_function(data,test_data1) ### 0.92
roc2_bart = bart_function(data,test_data2) ### 0.91
#########################
## plot
#########################
library(ggplot2)
roc_plot <- function(model,roc_object){
roc_for_plot = data.frame("model"=rep(model,length(roc_object$fpr)),"fpr" = roc_object$fpr, "tpr" = roc_object$tpr )
return(roc_for_plot)
}
logit_plot1 = roc_plot("logistic(0.82)",roc1_logit)
rf_plot1 = roc_plot("random_forest(0.97)",roc1_rf)
nn_plot1 = roc_plot("neural_network(0.85)",roc1_nn)
svm_plot1 = roc_plot("SVM(0.95)",roc1_svm)
nB_plot1 = roc_plot("Naives_Bayes(0.79)",roc1_nB)
bart_plot1 = roc_plot("BART(0.92)",roc1_bart)
plot_df1 = rbind(logit_plot1,rf_plot1,nn_plot1,svm_plot1,nB_plot1,bart_plot1)
g3 = ggplot(plot_df1,aes(x=fpr,y=tpr,col=model))+
geom_line(aes(y=tpr))+
labs(title="Fig 2c. ROC curve for the test sample 1 (Feature set 2)",x="False Positive Rate",y="True Positive Rate") +
theme(plot.title=element_text(size=7),axis.text.x=element_text(angle=45, hjust=1, size=5),axis.text.y=element_text(vjust=0, size=5),legend.text=element_text(size=5)) +
theme(legend.title=element_text(size=5),axis.title.x=element_text(size=6),axis.title.y=element_text(size=6))
logit_plot2 = roc_plot("logistic(0.62)",roc2_logit)
rf_plot2 = roc_plot("random_forest(0.76)",roc2_rf)
nn_plot2 = roc_plot("neural_network(0.85)",roc2_nn)
svm_plot2 = roc_plot("SVM(0.79)",roc2_svm)
nB_plot2 = roc_plot("Naives_Bayes(0.52)",roc2_nB)
bart_plot2 = roc_plot("BART(0.81)",roc2_bart)
plot_df2 = rbind(logit_plot2,rf_plot2,nn_plot2,svm_plot2,nB_plot2,bart_plot2)
g4 = ggplot(plot_df2,aes(x=fpr,col=model))+
geom_line(aes(y=tpr))+
labs(title="Fig 2d. ROC curve for the test sample 2(Feature set2)",x="False Positive Rate",y="True Positive Rate") +
theme(plot.title=element_text(size=7),axis.text.x=element_text(angle=45, hjust=1, size=5),axis.text.y=element_text(vjust=0, size=5),legend.text=element_text(size=5)) +
theme(legend.title=element_text(size=5),axis.title.x=element_text(size=6),axis.title.y=element_text(size=6))
#################################################################################
## Feature set 2: 3 best
#################################################################################
### read in train data and test data
base_dir = "/scratch1/battle-fs1/heyuan/HiC/chromovar3d.stanford.edu/project"
data = read.table(paste0(base_dir,"/histone/chr22_60smp_train.txt"),header=T)
test_data1 = read.table(paste0(base_dir,"/histone/chr22_60smp_test.txt"),header=T)
chr21_1 = read.table(paste0(base_dir,"/histone/chr21_pos.csv"),header=T)
chr21_1 = cbind(chr21_1,"y"=rep("1",nrow(chr21_1)))
chr21_2 = read.table(paste0(base_dir,"/histone/chr21_neg.csv"),header=T)
chr21_2 = cbind(chr21_2,"y"=rep("0",nrow(chr21_2)))
test_data2 = rbind(chr21_1,chr21_2)
pickout = c("H3K4ME3_1","H3K4ME3_2","H3K4ME1_1")
data = data[,c(pickout,"y")]
test_data1 = test_data1[,c(pickout,"y")]
test_data2 = test_data2[,c(pickout,"y")]
###########################
## logistic model
###########################
logit_model = glm(y~.,data=data,family="binomial")
print("Print the odds ratios and 95% CIs for the coefficients")
print(exp(cbind(OR = coef(logit_model), confint(logit_model))))
summary(logit_model)
logit_function <- function(model,new_data){
predictions = predict(model,newdata=new_data,type="response")
ys = data.frame("predictions"=predictions,"response"=factor(new_data$y,levels=c(0,1),labels=c(0,1)))
roc_object = roc(ys$predictions,ys$response)
auc_area = auc(roc_object)
print(paste0("The AUC for the logistic model is ",auc_area))
return(roc_object)
}
## predict
roc1_logit = logit_function(logit_model,test_data1) ### 0.8151
roc2_logit = logit_function(logit_model,test_data2) ### 0.7997
#########################
## random forest
#########################
rf_function <- function(model,new_data){
predictions = predict(model,newdata=new_data,type="prob")[,2]
ys = data.frame("predictions"=predictions,"response"=factor(new_data$y,levels=c(0,1),labels=c(0,1)))
roc_object = roc(ys$predictions,ys$response)
auc_area = auc(roc_object)
print(paste0("The AUC for the logistic model is ",auc_area))
return(roc_object)
}
ntree=200
rf = randomForest(factor(y) ~ ., data, importance=T, ntree=ntree, norm.votes=FALSE)
roc1_rf = rf_function(rf,test_data1) ### 0.97
roc2_rf = rf_function(rf,test_data2) ### 0.94
#########################
## neural network
#########################
nn_function <- function(model,new_data){
predictions = compute(model,new_data[,1:3])$net.result
ys = data.frame("predictions" = predictions,"response" = factor(new_data$y,levels=c(0,1),labels=c("0","1")))
roc_object = roc(ys$predictions,ys$response)
auc_area = auc(roc_object)
print(paste0("The AUC for the logistic model is ",auc_area))
return(roc_object)
}
nn = neuralnet(y~H3K4ME3_1+H3K4ME3_2+H3K4ME1_1,data=data,hidden=2,linear.output=FALSE)
roc1_nn = nn_function(nn,test_data1) ### 0.86
roc2_nn = nn_function(nn,test_data2) ### 0.88
#########################
## SVM
#########################
#### read in SVM manullay
roc1_svm = read.table("/home/qliu24/ML_project/SVM/3binbest/ROC_60smp.txt",header=T)
roc2_svm = read.table("/home/qliu24/ML_project/SVM/chr21/ROC_chr21_b2.txt",header=T)
colnames(roc1_svm) = c("fpr","tpr")
colnames(roc2_svm) = c("fpr","tpr")
#########################
## naiveBayes
#########################
nBayes = naiveBayes(y~H3K4ME3_1+H3K4ME3_2+H3K4ME1_1,data=data)
nBayes_function <- function(model,new_data){
predictions = predict(model,newdata=new_data,type="raw")[,2]
ys = data.frame("predictions"=predictions,"response"=factor(new_data$y,levels=c(0,1),labels=c(0,1)))
roc_object = roc(ys$predictions,ys$response)
auc_area = auc(roc_object)
print(paste0("The AUC for the logistic model is ",auc_area))
return(roc_object)
}
roc1_nB = nBayes_function(nBayes,test_data1) ### 0.79
roc2_nB = nBayes_function(nBayes,test_data2) ### 0.77
#########################
## BART model
#########################
bart_function <- function(train_data,test_data){
bart_model = bart(train_data[,c(1:3)],train_data$y,x.test = test_data[,c(1:3)],verbose=F)
### variable importance
times = apply(bart_model$varcount,2,sum)
prop = times/sum(times)
yhat_train_mean = apply(pnorm(bart_model$yhat.train),2,mean)
yhat_test_mean = apply(pnorm(bart_model$yhat.test),2,mean)
ys = data.frame("predictions"=yhat_test_mean,"response"=factor(test_data$y,levels=c(0,1),labels=c(0,1)))
roc_object = roc(ys$predictions,ys$response)
auc_area = auc(roc_object)
print(paste0("The AUC for the logistic model is ",auc_area))
return(roc_object)
}
roc1_bart = bart_function(data,test_data1) ### 0.92
roc2_bart = bart_function(data,test_data2) ### 0.91
#########################
## plot
#########################
library(ggplot2)
roc_plot <- function(model,roc_object){
roc_for_plot = data.frame("model"=rep(model,length(roc_object$fpr)),"fpr" = roc_object$fpr, "tpr" = roc_object$tpr )
return(roc_for_plot)
}
logit_plot1 = roc_plot("logistic(0.79)",roc1_logit)
rf_plot1 = roc_plot("random_forest(0.95)",roc1_rf)
nn_plot1 = roc_plot("neural_network(0.80)",roc1_nn)
svm_plot1 = roc_plot("SVM(0.89)",roc1_svm)
nB_plot1 = roc_plot("Naives_Bayes(0.76)",roc1_nB)
bart_plot1 = roc_plot("BART(0.83)",roc1_bart)
plot_df1 = rbind(logit_plot1,rf_plot1,nn_plot1,svm_plot1,nB_plot1,bart_plot1)
g5 = ggplot(plot_df1,aes(x=fpr,y=tpr,col=model))+
geom_line(aes(y=tpr))+
labs(title="Fig 2e. ROC curve for the test sample 1(Feature set3)",x="False Positive Rate",y="True Positive Rate") +
theme(plot.title=element_text(size=7),axis.text.x=element_text(angle=45, hjust=1, size=5),axis.text.y=element_text(vjust=0, size=5),legend.text=element_text(size=5)) +
theme(legend.title=element_text(size=5),axis.title.x=element_text(size=6),axis.title.y=element_text(size=6))
logit_plot2 = roc_plot("logistic(0.49)",roc2_logit)
rf_plot2 = roc_plot("random_forest(0.63)",roc2_rf)
nn_plot2 = roc_plot("neural_network(0.69)",roc2_nn)
svm_plot2 = roc_plot("SVM(0.58)",roc2_svm)
nB_plot2 = roc_plot("Naives_Bayes(0.52)",roc2_nB)
bart_plot2 = roc_plot("BART(0.72)",roc2_bart)
plot_df2 = rbind(logit_plot2,rf_plot2,nn_plot2,svm_plot2,nB_plot2,bart_plot2)
g6 = ggplot(plot_df2,aes(x=fpr,col=model))+
geom_line(aes(y=tpr))+
labs(title="Fig 2f. ROC curve for the test sample 2(Feature set3)",x="False Positive Rate",y="True Positive Rate") +
theme(plot.title=element_text(size=7),axis.text.x=element_text(angle=45, hjust=1, size=5),axis.text.y=element_text(vjust=0, size=5),legend.text=element_text(size=5)) +
theme(legend.title=element_text(size=5),axis.title.x=element_text(size=6),axis.title.y=element_text(size=6))
####### multiplot
multiplot <- function(..., plotlist=NULL, file, cols=1, layout=NULL) {
library(grid)
# Make a list from the ... arguments and plotlist
plots <- c(list(...), plotlist)
numPlots = length(plots)
# If layout is NULL, then use 'cols' to determine layout
if (is.null(layout)) {
# Make the panel
# ncol: Number of columns of plots
# nrow: Number of rows needed, calculated from # of cols
layout <- matrix(seq(1, cols * ceiling(numPlots/cols)),
ncol = cols, nrow = ceiling(numPlots/cols))
}
if (numPlots==1) {
print(plots[[1]])
} else {
# Set up the page
grid.newpage()
pushViewport(viewport(layout = grid.layout(nrow(layout), ncol(layout))))
# Make each plot, in the correct location
for (i in 1:numPlots) {
# Get the i,j matrix positions of the regions that contain this subplot
matchidx <- as.data.frame(which(layout == i, arr.ind = TRUE))
print(plots[[i]], vp = viewport(layout.pos.row = matchidx$row,
layout.pos.col = matchidx$col))
}
}
}
pdf("Fig 2.pdf")
multiplot(g1,g3,g5,g2,g4,g6,cols=2)
dev.off()
|
968bae22a591b5fead898ce070c59b9ce4108a7e | 8c36101c0716812a572b1dd91829aecede9d920b | /001_DNA/001_DNA.R | 2132b174e713d4f9132a805d79e59e79889c1c9d | [] | no_license | Humility-K/Rosalind | 3457a413b4a21cb40c5242f88e8ee940479b2590 | 0473510a2516d2c7a9c78dbb5e6e43bfe1894dc4 | refs/heads/master | 2021-05-28T21:59:40.151108 | 2015-03-27T14:28:36 | 2015-03-27T14:28:36 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 638 | r | 001_DNA.R | if(FALSE){
'''
My solution to Rosalind Bioinformatics Problem 001
Title: Counting DNA Nucleotides
Rosalind ID: DNA
Rosalind #: 001
URL: http://rosalind.info/problems/dna
Goal to count the number of each nucleotide in a string.
Note - To my knowledge, R does not possess a means
for multiline comments. As such, to avoid the hassle
of excessive # and computation of the useless string
the use of if(FALSE){} provides a workaround. Not
pretty but it works.
'''
}
require(stringr)
file_name = "data/rosalind_dna.txt"
seq = readChar(file_name, file.info(file_name)$size)
str_count(seq, c("A","C","G","T")) |
0d9a336529f9198808fd26ce75447c3121f9c13f | dbaeb60398f6cc9420d2dbd3ade57bce56aca2d1 | /R/plot_align.peaks.R | 4b24e4a93b4d49cd421480fde1d0b63586b707f2 | [] | no_license | dstreble/TLdating | 6125a9323a8b7c814323454dad4c436b1b189797 | ff7cbf39a67db240808f9b59d4135325744a42c7 | refs/heads/master | 2020-05-22T04:39:03.900638 | 2017-09-06T14:37:28 | 2017-09-06T14:37:28 | 52,873,093 | 5 | 1 | null | 2016-03-17T10:58:42 | 2016-03-01T11:51:17 | R | UTF-8 | R | false | false | 6,615 | r | plot_align.peaks.R | #' Plots mod_alignPeaks results
#'
#' This function plots the results obtained by mod_alignPeaks.
#'
#' @param temperatures
#' \link{numeric}: Vector containing the temperature step
#' @param old.TL
#' \link{numeric}: Matrix containing the luminescence signal before the peak alignment.
#' @param new.TL
#' \link{numeric}: Matrix containing the luminescence signal after the peak alignment.
#' @param ref.TL
#' \link{numeric}: Matrix containing the luminescence signal used as reference to define the peak position.
#' @param pos.peak
#' \link{numeric}: Average peak position.
#' @param plotting.parameters
#' \link{list} (with default): list containing the plotting parameters. See details.
#'
#'@details
#' \bold{Plotting parameters} \cr
#' The plotting parameters are: \cr
#' \describe{
#' \item{\code{plot.Tmin}}{
#' \link{logical}: Minimum temperature which is plotted.}
#' \item{\code{plot.Tmax}}{
#' \link{logical}: Maximum temperature which is plotted.}
#' }
#'
#' @seealso
#' \link{mod_align.peaks}
#'
#' @author David Strebler
#'
#' @export plot_align.peaks
plot_align.peaks <- function(
temperatures,
old.TL,
new.TL,
ref.TL,
pos.peak,
plotting.parameters=list(plot.Tmin=0,
plot.Tmax=NA)
){
# ------------------------------------------------------------------------------
# Integrity Check
# ------------------------------------------------------------------------------
if (missing(temperatures)){
stop("[plot_align.peaks] Error: Input 'temperatures' is missing.")
}else if (!is.numeric(temperatures)){
stop("[plot_align.peaks] Error: Input 'temperatures' is not of type 'numeric'.")
}
if (missing(old.TL)){
stop("[plot_align.peaks] Error: Input 'old.TL' is missing.")
}else if (!is.numeric(old.TL)){
stop("[plot_align.peaks] Error: Input 'old.TL' is not of type 'numeric'.")
}
if (missing(new.TL)){
stop("[plot_align.peaks] Error: Input 'new.TL' is missing.")
}else if (!is.numeric(new.TL)){
stop("[plot_align.peaks] Error: Input 'new.TL' is not of type 'numeric'.")
}
if (missing(ref.TL)){
stop("[plot_align.peaks] Error: Input 'ref.TL' is missing.")
}else if (!is.numeric(ref.TL)){
stop("[plot_align.peaks] Error: Input 'ref.TL' is not of type 'numeric'.")
}
if (missing(pos.peak)){
stop("[plot_align.peaks] Error: Input 'pos.peak' is missing.")
}else if (!is.numeric(pos.peak)){
stop("[plot_align.peaks] Error: Input 'pos.peak' is not of type 'numeric'.")
}
if(!is.list(plotting.parameters)){
stop("[plot_align.peaks] Error: Input 'plotting.parameters' is not of type 'list'.")
}
# ------------------------------------------------------------------------------
Tmax <- max(temperatures)
nPoints <- length(temperatures)
plot.Tmin <- plotting.parameters$plot.Tmin
plot.Tmax <- plotting.parameters$plot.Tmax
# Check Values -------------------
# Plotting parameters
if(!is.numeric(plot.Tmin)){
if(!is.finite(plot.Tmin) || is.null(plot.Tmin)){
plot.Tmin <- 0
}else{
stop("[plot_align.peaks] Error: plot.Tmin is not numeric.")
}
}
if(!is.numeric(plot.Tmax)){
if(!is.finite(plot.Tmax) || is.null(plot.Tmax)){
plot.Tmax <- Tmax
}else{
stop("[plot_align.peaks] Error: plot.Tmax is not numeric.")
}
}
if(plot.Tmin > plot.Tmax){
stop("[plot_align.peaks] Error: plot.Tmin > plot.Tmax")
}
if(plot.Tmin < 0){
plot.Tmin <- 0
}
if(plot.Tmax > Tmax){
plot.Tmax <- Tmax
}
# -------------------------------
Tstep <- Tmax/nPoints
plot.min <- ceiling(plot.Tmin/Tstep)
plot.max <-floor(plot.Tmax/Tstep)
#----------------------------------------------------------------------------------------------
#Plot results
#----------------------------------------------------------------------------------------------
#Layout
old.par <- par( no.readonly = TRUE )
par( oma = c(0.5, 0, 3, 0 ) )
layout(matrix(c(1,2,3,3), 2, 2, byrow = TRUE))
#Plot not aligned
#Boundary
plot.TL.max <- max(old.TL,na.rm = TRUE)
#color
colors <- 1:ncol(old.TL)
for(i in 1 : ncol(old.TL)){
temp.TL <- old.TL[,i]
temp.color <- colors[i]
if(i == 1) {
plot(x=temperatures,
y=temp.TL,
xlim=c(0,Tmax),
ylim=c(0,plot.TL.max),
xlab="Temperature (\u00b0C)",
ylab = "Luminescence signal",
main="TL before peaks alignement",
type="l",
col=temp.color)
par(new = TRUE)
}else{
lines(x=temperatures,
y=temp.TL,
col=temp.color,
xlim=c(0,Tmax),
ylim=c(0,plot.TL.max)
)
}
}
par(new = FALSE)
#Plot Reference TL (testdose)
#Boundary
plot.TL.max <- max(ref.TL,na.rm = TRUE)
#color
colors <- 1:ncol(ref.TL)
for(i in 1 : ncol(ref.TL)){
temp.TL <- ref.TL[,i]
temp.color <- colors[i]
if(i == 1) {
plot(x=temperatures,
y=temp.TL,
xlim=c(0,Tmax),
ylim=c(0,plot.TL.max),
xlab="Temperature (\u00b0C)",
ylab = "Luminescence signal (Tx)",
main="Peak position",
type="l",
col=temp.color)
par(new = TRUE)
}else{
lines(x=temperatures,
y=temp.TL,
col=temp.color,
xlim=c(0,Tmax),
ylim=c(0,plot.TL.max)
)
}
}
abline(v=pos.peak,col=2,lty=3)
par(new = FALSE)
#Plot aligned
#Boundary
plot.TL.max <- max(new.TL[plot.min:plot.max,],na.rm = TRUE)
#color
colors <- 1:ncol(new.TL)
for(i in 1 : ncol(new.TL)){
temp.TL <- new.TL[,i]
temp.color <- colors[i]
if(i == 1) {
plot(x=temperatures,
y=temp.TL,
xlim=c(plot.Tmin,plot.Tmax),
ylim=c(0,plot.TL.max),
xlab="Temperature (\u00b0C)",
ylab = "Luminescence signal (peaks shifted)",
main="TL after peak alignment",
type="l",
col=temp.color)
par(new = TRUE)
}else{
lines(x=temperatures,
y=temp.TL,
col=temp.color,
xlim=c(plot.Tmin,plot.Tmax),
ylim=c(0,plot.TL.max)
)
}
}
par(new = FALSE)
#Page title ---------------------------------------------------------
page.title <- paste("Peak Alignment")
mtext(page.title, outer=TRUE,font = 2)
#clean layout...
layout(1)
par(old.par)
}
|
d5c0fac89c9299b2331ee0b0d938951fe580c93b | 50b7b8ec1cdd63aabc6dc3f436c59c1dc8e94b6f | /scr/preprocess.R | 5001cc5d6db8d1c8395acdb83ddcc812c6db2795 | [] | no_license | Maizi-22/MIMICIII-FB | bccde1f3ed9e76d4000984fcba71c1bdd57dbfa2 | 030378efbfc0c70f1e26a967b5d3792f37cf0a65 | refs/heads/master | 2021-10-09T11:26:47.909994 | 2018-03-06T07:25:22 | 2018-03-06T07:25:22 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,112 | r | preprocess.R | # data preprocess
library(readr)
data.raw.sepsis <- read_csv("~/Documents/R-projects/MIMICIII_FB/data/cache_data/sepsis_02012018.csv")
mv.duration <- read_csv("~/Documents/R-projects/MIMICIII_FB/data/cache_data/mv_duration.csv")[, c(2,6)]
expire28 <- read_csv("~/Documents/R-projects/MIMICIII_FB/data/cache_data/expire28.csv")
# remove three record that may have wrong record in fluid balance
data.raw.sepsis <- data.raw.sepsis[which(data.raw.sepsis$subject_id != '234959' & data.raw.sepsis$icustay_id != '252601' & data.raw.sepsis$icustay_id != '235479'), ]
# Study cohort: all sepsis patients in mimic
# change age above 300 to 90
data.raw.sepsis$age[data.raw.sepsis$age >300] <- 90
# discard unuse data
data.raw.sepsis <- data.raw.sepsis[, -c(8, 10, 13:15, 17:19, 30, 37, 38, 44, 46, 48, 50, 52, 55, 57, 59, 61, 63, 65, 66, 68, 69)]
# convert gender to 0/1, 0 as famale, 1 as male
data.raw.sepsis$gender[which(data.raw.sepsis$gender == 'M')] <- 0
data.raw.sepsis$gender[which(data.raw.sepsis$gender == 'F')] <- 1
# change expire flag to numeric
data.raw.sepsis$icu_expire_flag[which(data.raw.sepsis$icu_expire_flag == 'Y')] <- 1
data.raw.sepsis$icu_expire_flag[which(data.raw.sepsis$icu_expire_flag == 'N')] <- 0
# convert binary variable to factor
data.raw.sepsis[, c(5, 9, 14:22, 24:31, 37, 44)] <- lapply(data.raw.sepsis[, c(5, 9, 14:22, 24:31, 37, 44)], as.factor)
# convert 'LOS' variable to day
data.raw.sepsis$los_hospital <- data.raw.sepsis$los_hospital/24
data.raw.sepsis$los_icu <- data.raw.sepsis$los_icu/24
# add MV_duration to our dataset
data.raw.sepsis <- merge(data.raw.sepsis[, -11], mv.duration, by = 'hadm_id', all.x = TRUE)
colnames(data.raw.sepsis)[49] <- 'Mechanical_ventilation_duration'
data.raw.sepsis$Mechanical_ventilation_duration[is.na(data.raw.sepsis$Mechanical_ventilation_duration)] <- 0
# and 28-day expire flag
data.raw.sepsis <- merge(data.raw.sepsis, expire28[, c(1,4)], by = 'hadm_id', all.x = TRUE)
data.raw.sepsis$expire_flag_28[is.na(data.raw.sepsis$expire_flag_28)] <- 0
data.raw.sepsis$expire_flag_28 <- as.factor(data.raw.sepsis$expire_flag_28)
# remove three variables contains many missings
data.raw.sepsis <- data.raw.sepsis[, names(data.raw.sepsis) %in% c('height', 'neutrophils', 'lactate') == FALSE]
# save data
write.csv(data.raw.sepsis, '~/Documents/R-projects/MIMICIII_FB/data/finaldata/data_raw_sepsis.csv')
# data update
library(readr)
library(dplyr)
data.sepsis.new <- read_csv("~/Documents/R-projects/MIMICIII_FB/data/cache_data/final_data_25Jan2018.csv")
chart.data <- read_csv("~/Documents/R-projects/MIMICIII_FB/data/merge/chart_events_first.csv")[, -c(1,2)]
data.sepsis.new <- merge(data.sepsis.new, chart.data, by.x <- 'icustay_id', by.y = 'ICUSTAY_ID', all.x = T)
admloc <- read_csv("~/Documents/R-projects/MIMICIII_FB/data/merge/admit_loc.csv")[, -1]
data.sepsis.new <- merge(data.sepsis.new, admloc, by.x <- 'hadm_id', by.y = 'HADM_ID', all.x = T)
data.sepsis.new <- distinct(data.sepsis.new, hadm_id, .keep_all = T)
data.sepsis.new <- data.sepsis.new[, c("subject_id", "hadm_id", "icustay_id", "age", "gender"
, "weight", "height", "admission_type", "los_hospital"
, "first_hosp_stay", "hospital_expire_flag", "los_icu"
, "first_careunit", "last_careunit", "first_icu_stay"
, "duration_hours", "gcseyes", "gcsmotor", "gcsverbal"
, "mingcs", "sofa", "diabetes", "hypertension"
, "congestive_heart_failure", "renal_failure", "liver_disease"
, "cancer", "aids", "chronic_pulmonary", "weight_loss", "obesity"
, "oasis", "infection", "explicit_sepsis", "organ_dysfunction"
, "mech_vent", "angus", "ADMISSION_LOCATION", "dialysis"
, "eskd2", "eskd1", "eskd", "lactate", "lactate_uom"
, "hemoglobin", "hemoglobin_uom", "creatinine", "creatinine_uom"
, "wbc", "wbc_uom", "neutrophils", "neutrophils_uom"
, "icu_expire_flag", "temp_value", "temp_uom", "resp_rate"
, "resp_rate_uom", "heart_rate", "heart_rate_uom", "sys_bp"
, "sys_bp_uom", "dias_bp", "dias_bp_uom", "mean_bp", "mean_bp_uom"
, "fb3hr", "fb12hr", "fb24hr", "fb48hr", "fb72hr", "expire_flag_28" )]
# remove three record that may have wrong record in fluid balance
data.sepsis.new <- data.sepsis.new[which(data.sepsis.new$subject_id != '234959' & data.sepsis.new$icustay_id != '252601' & data.sepsis.new$icustay_id != '235479'), ]
# Study cohort: all sepsis patients in mimic
# change age above 300 to 90
data.sepsis.new$age[data.sepsis.new$age >300] <- 90
# discard unuse data
data.sepsis.new <- data.sepsis.new[, -c(8, 10, 13:15, 17:19, 30, 37, 38, 44, 46, 48, 50, 52, 55, 57, 59, 61, 63, 65)]
# convert gender to 0/1, 0 as famale, 1 as male
data.sepsis.new$gender[which(data.sepsis.new$gender == 'M')] <- 0
data.sepsis.new$gender[which(data.sepsis.new$gender == 'F')] <- 1
# change expire flag to numeric
data.sepsis.new$icu_expire_flag[which(data.sepsis.new$icu_expire_flag == 'Y')] <- 1
data.sepsis.new$icu_expire_flag[which(data.sepsis.new$icu_expire_flag == 'N')] <- 0
# convert binary variable to factor
data.sepsis.new[, c(5, 9, 14:22, 24:31, 37, 44)] <- lapply(data.sepsis.new[, c(5, 9, 14:22, 24:31, 37, 44)], as.factor)
# convert 'LOS' variable to day
data.sepsis.new$los_hospital <- data.sepsis.new$los_hospital/24
data.sepsis.new$los_icu <- data.sepsis.new$los_icu/24
# add MV_duration to our dataset
mv.duration <- read_csv("~/Documents/R-projects/MIMICIII_FB/data/cache_data/mv_duration.csv")[, c(2,6)]
colnames(mv.duration)[2] <- 'Mechanical_ventilation_duration'
data.sepsis.new <- merge(data.sepsis.new, mv.duration, by = 'hadm_id', all.x = TRUE)
data.sepsis.new$duration_hours <- NULL
data.sepsis.new$Mechanical_ventilation_duration[is.na(data.sepsis.new$Mechanical_ventilation_duration)] <- 0
# set expire flag 28-days to 0 where exist null
expire28 <- read_csv("~/Documents/R-projects/MIMICIII_FB/data/cache_data/expire28.csv")[, c(1,3)]
data.sepsis.new$expire_flag_28 <- NULL
data.sepsis.new <- merge(data.sepsis.new, expire28, by = 'hadm_id', all.x = T)
data.sepsis.new$expire_flag_28[is.na(data.sepsis.new$expire_flag_28)] <- 0
# add pul
pul.data <- read_csv("~/Documents/R-projects/MIMICIII_FB/data/merge/source_of_infection.csv")[, -1]
data.sepsis.new <- merge(data.sepsis.new, pul.data, by.x = 'hadm_id', by.y = 'HADM_ID', all.x = T)
# remove three variables contains many missings
data.sepsis.new <- data.sepsis.new[, names(data.sepsis.new) %in% c('height', 'neutrophils', 'lactate') == FALSE]
# save data
write.csv(data.sepsis.new, '~/Documents/R-projects/MIMICIII_FB/data/finaldata/data_sepsis_update.csv')
|
a40794164f4c5be92dba378c39713133ed3ce5b3 | 725cce758902d6d9db049e87dc211c40ff10921e | /R/select.siteWins.R | 09f3f04d3e6cc63bb6e9a3c049bd4d1f32c3ff3f | [] | no_license | yaomin/dptmethods | a589e21dbff91075cea72fbbed40626fa643acee | 846a42d01c05e1a27fec290498e011b0f05d6882 | refs/heads/master | 2021-01-17T10:33:38.755292 | 2013-07-11T23:54:16 | 2013-07-11T23:54:16 | 7,569,298 | 1 | 0 | null | 2014-10-13T15:59:10 | 2013-01-11T23:58:27 | R | UTF-8 | R | false | false | 248 | r | select.siteWins.R | select.siteWins <-
function(patt, wins, sites, chr) {
.wins <- wins[wins%over%sites[[patt]]]
.fac <- match(.wins, sites[[patt]])
.dt <- data.frame(chr, patt, cbind(.fac, start(.wins)))
names(.dt) <- c("chr", "pattern", "ID", "Win")
.dt
}
|
f278f05afac5b742f1862eacb6b2b45853b82ecd | d98b3682b95b7842d9d5fff74802a4bf0a5a7ccc | /R/mf_symb_choro.R | 6b9ab8d7d28f8e9790f8f40664e31edbfbc90fe7 | [] | no_license | riatelab/mapsf | f162a5049205a52431813015275ecb357de1de1a | 7ec386db24b1d89f7771598f68a1699cb289c1a9 | refs/heads/master | 2023-08-27T19:57:48.700425 | 2023-07-27T13:06:11 | 2023-07-27T13:06:11 | 274,923,318 | 206 | 24 | null | 2023-03-29T14:30:14 | 2020-06-25T13:28:50 | R | UTF-8 | R | false | false | 5,337 | r | mf_symb_choro.R | #' @title Plot symbols using choropleth coloration
#' @description Plot symbols with colors based on a quantitative
#' data classification.
#' @eval my_params(c(
#' 'x',
#' 'var',
#' 'border',
#' 'lwd',
#' 'add' ,
#' 'col_na',
#' 'pal',
#' 'cexs',
#' 'pch',
#' 'pch_na',
#' 'cex_na',
#' 'val_order',
#' 'alpha',
#' 'breaks',
#' 'nbreaks',
#' 'leg_pos2',
#' 'leg_title',
#' 'leg_title_cex',
#' 'leg_val_cex',
#' 'leg_val_rnd',
#' 'leg_no_data',
#' 'leg_frame'))
#' @details
#' Breaks defined by a numeric vector or a classification method are
#' left-closed: breaks defined by \code{c(2, 5, 10, 15, 20)}
#' will be mapped as [2 - 5[, [5 - 10[, [10 - 15[, [15 - 20].
#' The "jenks" method is an exception and has to be right-closed.
#' Jenks breaks computed as \code{c(2, 5, 10, 15, 20)}
#' will be mapped as [2 - 5], ]5 - 10], ]10 - 15], ]15 - 20].
#' @importFrom graphics box
#' @keywords internal
#' @export
#' @return x is (invisibly) returned.
#' @examples
#' mtq <- mf_get_mtq()
#' mf_map(mtq)
#' mf_symb_choro(mtq, c("STATUS", "MED"))
#'
#' mf_map(mtq)
#' mtq$STATUS[30] <- NA
#' mtq$MED[5] <- NA
#' mf_symb_choro(mtq, c("STATUS", "MED"),
#' pal = "Reds 3", breaks = "quantile", nbreaks = 4,
#' pch = 21:23, cex = c(3, 2, 1),
#' pch_na = 25, cex_na = 1.5, col_na = "blue",
#' val_order = c(
#' "Prefecture",
#' "Sub-prefecture",
#' "Simple municipality"
#' )
#' )
mf_symb_choro <- function(x, var,
pal = "Mint",
alpha = 1,
breaks = "quantile",
nbreaks,
border,
pch,
cex = 1,
lwd = .7,
pch_na = 4,
cex_na = 1,
col_na = "white",
val_order,
leg_pos = mf_get_leg_pos(x, 2),
leg_title = var,
leg_title_cex = c(.8, .8),
leg_val_cex = c(.6, .6),
leg_val_rnd = 2,
leg_no_data = c("No data", "No data"),
leg_frame = c(FALSE, FALSE),
add = TRUE) {
# default
op <- par(mar = getOption("mapsf.mar"), no.readonly = TRUE)
on.exit(par(op))
bg <- getOption("mapsf.bg")
fg <- getOption("mapsf.fg")
if (missing(border)) border <- fg
xout <- x
var2 <- var[2]
var1 <- var[1]
# Transform to point
st_geometry(x) <- st_centroid(st_geometry(x), of_largest_polygon = TRUE)
################### COLORS ##########################
# jenks
jen <- FALSE
if (any(breaks %in% "jenks")) {
jen <- TRUE
}
# get the breaks
breaks <- mf_get_breaks(x = x[[var2]], nbreaks = nbreaks, breaks = breaks)
nbreaks <- length(breaks) - 1
# get the cols
pal <- get_the_pal(pal = pal, nbreaks = nbreaks, alpha = alpha)
# get the color vector
mycols <- get_col_vec(x = x[[var2]], breaks = breaks, pal = pal, jen = jen)
no_data <- c(FALSE, FALSE)
if (max(is.na(mycols)) == 1) {
no_data[2] <- TRUE
}
mycols[is.na(mycols)] <- col_na
###################################################################
################## SYMBOLS ######################################
# get modalities
val_order <- get_modalities(
x = x[[var1]],
val_order = val_order
)
if (missing(pch)) {
pchs <- c(0:25, 32:127)
pch <- pchs[seq_along(val_order)]
}
if (length(cex) != length(val_order)) {
if (length(cex) != 1) {
message(paste0(
"the length of cex does not match the number of",
"modalities. The first cex is used for all modalities"
))
}
cex <- rep(cex[1], length(val_order))
}
# get symbol list and association
mysym <- get_sym_typo(
x = x[[var1]], pch = pch,
val_order = val_order
)
# TO BE DONE pch_NA ##################################
mycex <- get_sym_typo(
x = x[[var1]], pch = cex,
val_order = val_order
)
# TO BE DONE symbol cex ##############################
if (max(is.na(mysym)) == 1) {
no_data[1] <- TRUE
}
mysym[is.na(mysym)] <- pch_na
mycex[is.na(mycex)] <- cex_na
mycolspt <- mycols
mycolspt[mysym %in% 21:25] <- border
mycolsptbg <- mycols
##################################################################
if (add == FALSE) {
mf_init(x)
add <- TRUE
}
plot(st_geometry(x),
col = mycolspt, bg = mycolsptbg, cex = mycex, pch = mysym,
lwd = lwd, add = add
)
leg_pos <- split_leg(leg_pos)
mf_legend_c(
pos = leg_pos[[2]], val = breaks, title = leg_title[2],
title_cex = leg_title_cex[2], val_cex = leg_val_cex[2],
val_rnd = leg_val_rnd,
col_na = col_na, no_data = no_data[2], no_data_txt = leg_no_data[2],
frame = leg_frame[2], pal = pal, bg = bg, fg = fg
)
mf_legend_s(
pos = leg_pos[[1]],
val = val_order,
title = leg_title[1],
title_cex = leg_title_cex[1],
val_cex = leg_val_cex[1],
col_na = "grey",
no_data = no_data[1],
no_data_txt = leg_no_data[1],
frame = leg_frame[1], border = border,
pal = rep("grey", length(val_order)),
pt_cex = cex, pt_pch = pch, pt_cex_na = cex_na,
pt_pch_na = pch_na, bg = bg, fg = fg
)
return(invisible(xout))
}
|
fced1cbb15ab6ce6bf9369bcc6a199e337ffb8fa | 24f87379e0ac9cd2c8ac63df51c049302604fd9d | /R/logLik.grm.R | 304c37a5d076490938effd8b8cbb130137b3dd6b | [] | no_license | swnydick/catIrt | 9cab5b0e200d25623871446dca92a1a7163c02c0 | e2ed85ef698bce65adcd26e99f7918aea312bb2d | refs/heads/master | 2022-06-14T07:55:48.241525 | 2022-05-25T21:58:38 | 2022-05-25T22:16:42 | 34,589,723 | 4 | 2 | null | null | null | null | UTF-8 | R | false | false | 954 | r | logLik.grm.R | logLik.grm <-
function( u, theta, params,
type = c("MLE", "BME"),
ddist = dnorm, ... ) # ddist and ... are prior distribution stuff
{
# u is the response, and x are the parameters.
type <- type[1]
# Then turn params into a matrix and determine stats:
params <- rbind(params)
## Calculating the loglikelihood without the Bayesian part: ##
p <- p.grm(theta, params)
logLik <- log( sel.prm(p, u, length(theta), nrow(params), ncol(params)) )
## Now, the Bayesian part: ##
if( type == "MLE" )
bme <- 1
if( type == "BME" )
bme <- ddist(x = theta, ... )
# if there is a silly prior, set it to something very small
bme <- ifelse(test = bme <= 0, yes = bme <- 1e-15 , no = bme)
## Returning Scalar or Vector of logLik's ##
if( length(theta) == 1 ){
return( sum(logLik) + log(bme) )
} else{
return( rowSums(logLik) + log(bme) )
} # END ifelse STATEMENT
} # END logLik.grm FUNCTION |
d09d8ac1b0a0998ed074f73c68c5b60279d56a15 | 1b5320354cc6157bf895c7422689da2c55d0466c | /R/utils.R | 693f5e5c7f5032330796d782707896dd8c49af9f | [] | no_license | afsc-gap-products/trawllight | c3c3e450792b47ee8d1774598f4197da1adc8a38 | 240ac9594c6817266159271cc63bf57dec219fa4 | refs/heads/main | 2023-04-14T01:15:24.227614 | 2022-12-14T15:37:35 | 2022-12-14T15:37:35 | 148,692,918 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 3,490 | r | utils.R | #' Open and bind rows of csv to data frame
#'
#' For use with AFSC/RACE/GAP data structure.
#'
#' @param directory Path to directory where csv files are stored.
#' @param string Unique pattern used in the csv files.
#' @export
csv_rbind <- function(directory, string) {
file.list <- grep(pattern = string, x = dir(directory))
if(substr(directory, nchar(directory), nchar(directory)) != "/") {
if(substr(directory, nchar(directory), nchar(directory)) != "\\") {
directory <- paste0(directory, "/")
}
}
for(i in 1:length(file.list)) {
if(i == 1) {
out.df <- read.csv(file = paste0(directory, dir(directory)[file.list[i]]), stringsAsFactors = F)
out.df$fname <- dir(directory)[file.list[i]]
} else {
out.comb <- read.csv(file = paste0(directory, dir(directory)[file.list[i]]), stringsAsFactors = F)
out.comb$fname <- dir(directory)[file.list[i]]
out.df <- rbind(out.df, out.comb)
}
}
return(out.df)
}
#' Correct tag time in cases where offsets were incorrect
#'
#' For use processing AOPs from AFSC/RACE/GAP data structure. Make adjustments to correct inconsistencies between tag time and survey time.
#'
#' @param light.data Data frame with light data
#' @param cast.data Data frame containing case data.
#' @export
time_adjustments <- function(light.data, cast.data) {
# Add vessel/cruise combination corrections for processing.
# Offsets for tags set to the wrong time zone
if(cast.data$cruise[1] == 201601) {
print("Correcting 2016")
light.data$ctime <- light.data$ctime + 3600 # Time off by 1 hour
}
if(cast.data$vessel[1] == 94 & cast.data$cruise[1] == 201501) {
print("Correcting 94-201501")
light.data$ctime <- light.data$ctime - 3600
}
if(cast.data$vessel[1] == 94 & cast.data$cruise[1] == 201401) {
print("Correcting 94-201401")
light.data$ctime <- light.data$ctime - 3600
}
if(cast.data$vessel[1] == 162 & cast.data$cruise[1] == 201101) {
print("Correcting 162-201101")
light.data$ctime <- light.data$ctime - (3600*8)
}
if(cast.data$vessel[1] == 134 & cast.data$cruise[1] == 200601) {
print("Correcting 134-200601")
light.data$ctime[lubridate::month(light.data$ctime) >=7 & lubridate::day(light.data$ctime) > 8] <- light.data$ctime[lubridate::month(light.data$ctime) >=7 & lubridate::day(light.data$ctime) > 8] - 3600*12 # Time off by 1 hour
}
return(light.data)
}
#' Convert radiometric energy to photon flux density
#'
#' Convert radiometric energy in watts to micromoles of photons per meter squared per second.
#'
#' @param x Numeric vector. Energy for a wavelength in units of watts.
#' @param wavelength Numeric vector.Wavelength in nanometers.
#' @return Numeric vector of energy in photon flux density.
#' @export
energy_to_quanta <- function(x, wavelength) {
return(x/(1e-6*6.02214076*10^23*(3e8/(wavelength*10e-9))*6.63e-34))
}
#' Photon flux density to radiometric energy
#'
#' Convert quantum units of micromoles of photons per meter squared per second to radiometric energy (watts per meter squared per second)
#'
#' Convert energy to quanta based on wavelength.
#' @param x Numeric vector. Energy for a wavelength in units of watts.
#' @param wavelength Numeric vector.Wavelength in nanometers.
#' @return Numeric vector of energy in radiometric energy in watts.
#' @export
quanta_to_energy <- function(x, wavelength) {
return(x*1e-6*6.02214076*10^23*(3e8/(wavelength*10e-9))*6.63e-34)
} |
4efcc4f152c28d75ff313c467d56260c2875864c | 5da32dcfa6ea1faaa3695d535629ec2e47bd0515 | /man/get_contents.Rd | 8f12f82335d75ee8179887dcc1dbd82679f04118 | [] | no_license | zdk123/simulator | 8f1c08bd33e8434190568c66ef0ec82975aa18b9 | a2492c09f63bd5cc81db9afeff97f06996955faa | refs/heads/master | 2021-01-10T23:05:40.215258 | 2016-09-21T16:18:41 | 2016-09-21T16:18:41 | 70,423,951 | 0 | 0 | null | 2016-10-09T18:57:24 | 2016-10-09T18:57:23 | null | UTF-8 | R | false | true | 638 | rd | get_contents.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/manage.R
\name{get_contents}
\alias{get_contents}
\title{Get the contents of a simulator directory}
\usage{
get_contents(dir = ".", out_loc = "out")
}
\arguments{
\item{dir}{name of the directory where directory named "files" exists}
\item{out_loc}{a length-1 character vector that gives location
(relative to model's path) that method outputs are stored.This can be
useful for staying organized when multiple simulations are based on
the same Model and Draws objects. Usually this is just "out"}
}
\description{
Get the contents of a simulator directory
}
|
e88806a564dc8429fd800884bc47f7e34c85edb6 | 964bdb6b6bd703408250848d5f1fcc422a2ce452 | /R/overlap-analysis.R | f589c7aae112b3649ee5c64c732ae9d6b740f90b | [] | no_license | jennasimit/BOAT | d0c654ebb8a0f86119d31c4c4d39ef30a0afb2b9 | ac1a724f5625675d2c3b7b67328a5d26bc7c1f53 | refs/heads/master | 2021-05-03T21:13:31.742174 | 2018-02-06T00:30:11 | 2018-02-06T00:30:11 | 120,379,415 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,872 | r | overlap-analysis.R | ftrait1=args[1] #trait1 output from summary-stat-overlap.R
ftrait2=args[2] # trait2 output from summary-stat-overlap.R
R=as.numeric(args[3]) # ratio of costs = type II error cost/ type I error cost
pi0=as.numeric(args[4]) # Pr(H_0); enter as a constant initially, but can also form it as a function
alpha=as.numeric(args[5]) # p-value sig threshold
fclump=args[6] #pruned SNPs using ABF or pv measure
fpv.out=args[7]
flist.abf=args[8]
flist.pv=args[9]
analysis.prep.fn <- function(ftrait1,ftrait2,fclump,pi0,R,alpha,ABF=TRUE,PV=TRUE) {
t1 <- read.table(ftrait1,header=TRUE) # output from summary-stat-overlap.R
t2 <- read.table(ftrait2,header=TRUE) # output from summary-stat-overlap.R
if(ABF) {
CLabf <- read.table(fclump,header=TRUE) # plink clump output -> abf
# extract CLabf SNPs from each trait for abf analysis
t1abf <- t1[match(CLabf$SNP,t1$SNP),]
t2abf <- t2[match(CLabf$SNP,t2$SNP),]
## abf analysis prep
PO <- pi0/(1-pi0) # prior odds of no association
theta <- PO/R
# reject H0 of no assocn at SNP if ABF > PO/R
sigabf1 <- 1*(t1abf$abf > theta)
sigabf2 <- 1*(t2abf$abf > theta)
return(list(t1abf=t1abf,t2abf=t2abf,sigabf1=sigabf1,sigabf2=sigabf2))
}
if(!ABF) {
sigabf1 <- NA
sigabf2 <- NA
}
if(PV) {
CLpv <- read.table(fclump,header=TRUE) # plink clump output -> pv
# extract CLpv SNPs from each trait for pv analysis
t1pv <- t1[match(CLpv$SNP,t1$SNP),]
t2pv <- t2[match(CLpv$SNP,t2$SNP),]
## pv analysis prep
sigpv1 <- 1*(t1pv$P.value < alpha)
sigpv2 <- 1*(t2pv$P.value < alpha)
return(list(t1pv=t1pv,t2pv=t2pv,sigpv1=sigpv1,sigpv2=sigpv2))
}
if(!PV) {
sigpv1 <- NA
sigpv2 <- NA
}
#return(list(t1abf=t1abf,t2abf=t2abf,t1pv=t1pv,t2pv=t2pv,sigabf1=sigabf1,sigabf2=sigabf2,sigpv1=sigpv1,sigpv2=sigpv2))
}
# McNemar mid p-value
McNemarMidp.fn <- function(contable) {
n <- contable[1,2] + contable[2,1]
# exact one-sided McNemar p-value
l <- min(c(contable[1,2], contable[2,1]))
print(c(n,l))
onepv <- pbinom(l,size=n,prob=.5)
pv <- 2*onepv - dbinom(l,size=n,prob=.5)
return(pv)
}
analysis.fn <- function(prep.out,alpha,R,pi0,ABF=TRUE,PV=TRUE) {
attach(prep.out)
if(ABF) {
contable.abf <- table(sigabf1,sigabf2)
pv.abf <- McNemarMidp.fn(contable.abf)
# overlap snp lists
ind <- which(sigabf1*sigabf2 == 1)
sigABF <- cbind(t1abf[ind,],t2abf[ind,])
}
if(PV) {
contable.pv <- table(sigpv1,sigpv2)
pv.pv <- McNemarMidp.fn(contable.pv)
ind <- which(sigpv1*sigpv2 == 1)
sigpv <- cbind(t1pv[ind,],t2pv[ind,])
}
PO <- pi0/(1-pi0) # prior odds of no association
Ltheta <- log10(PO/R)
if(!ABF) {
sigABF <- NA
pv.abf <- NA
}
if(!PV) {
sigpv <- NA
pv.pv <- NA
}
pv.out <- list(ABFpv=pv.abf,PVpv=pv.pv,LABFthreshold=Ltheta,PVthreshold=alpha,R=R,pi0=pi0)
detach(prep.out)
return(list(overlap.pv=pv.out,sigABF=sigABF,sigpv=sigpv))
}
|
57fffd3d825b1e8daa34e9c8b212df764df40c2e | 804f16080663d973c2c174cf312b7a6f012d48c4 | /projects/.Rproj.user/AE40687E/sources/s-6BDE1DB6/810E882D-contents | 35149d7f9de809c52ec6b3f160971d246c6625a2 | [] | no_license | farrukh-ahmed/ics | 90457de8cb18965ff911bcaa3ff6632785987cff | 1d4f5c1cba9ff748e62e2d77133236645e10e553 | refs/heads/master | 2023-08-19T18:51:17.927774 | 2021-10-20T22:28:36 | 2021-10-20T22:28:36 | 419,505,834 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,739 | 810E882D-contents | #TASK-1
transform(table(data2020$Region,dnn = "Region"))
transform(table(data2020$Subregion,dnn = "Sub-Region"))
fertilityBreaks <-seq(50.0,95.0,by=5);
transform(table(cut(data2020$Total.Fertility.Rate,fertilityBreaks),dnn = "Total Fertility Rate"))
breaks <-seq(1.0,8.0,by=1);
transform(table(cut(data2020$Life.Expectancy.at.Birth..Males,breaks),dnn = "Life Expectancy Of Male Genders"));
transform(table(cut(data2020$Life.Expectancy.at.Birth..Females,breaks),dnn = "Life Expectancy Of Female Genders"));
transform(table(cut(data2020$Life.Expectancy.at.Birth..Both.Sexes,breaks),dnn = "Life Expectancy Of Both Genders"))
hist(data2020$Life.Expectancy.at.Birth..Males,breaks = breaks,main = "Life Expectancy Of Males",xlab = "Life Expectancy Of Males")
hist(data2020$Life.Expectancy.at.Birth..Females,breaks = breaks,main = "Life Expectancy Of Females",xlab = "Life Expectancy Of Females")
hist(data2020$Life.Expectancy.at.Birth..Both.Sexes,breaks = breaks,main = "Life Expectancy Of Both Genders",xlab = "Life Expectancy Of Both Genders")
fertilityBreaks <-seq(1,8,by=1);
hist(data2020$Total.Fertility.Rate,breaks = fertilityBreaks,main = "Total Fertility Rate",xlab = "Total Fertility Rate")
ggplot(data.frame(data2020$Region), aes(x=data2020$Region)) + geom_bar() +labs(y= "Frequency", x = "Regions")
plot(data2020$Life.Expectancy.at.Birth..Males,data2020$Total.Fertility.Rate,xlab="Life Expectancy Of Males",ylab="Total Fertility Rate")
plot(data2020$Life.Expectancy.at.Birth..Females,data2020$Total.Fertility.Rate,xlab="Life Expectancy Of Females",ylab="Total Fertility Rate")
#TASK-2
library("ggpubr")
ggscatter(data2020, x = "Life.Expectancy.at.Birth..Males", y = "Total.Fertility.Rate",
add = "reg.line", conf.int = TRUE,
cor.coef = TRUE, cor.method = "pearson",
xlab = "Life Expectancy Of Males", ylab = "Total Fertility Rate")
ggscatter(data2020, x = "Life.Expectancy.at.Birth..Females", y = "Total.Fertility.Rate",
add = "reg.line", conf.int = TRUE,
cor.coef = TRUE, cor.method = "pearson",
xlab = "Life Expectancy Of Females", ylab = "Total Fertility Rate")
ggscatter(data2020, x = "Life.Expectancy.at.Birth..Both.Sexes", y = "Total.Fertility.Rate",
add = "reg.line", conf.int = TRUE,
cor.coef = TRUE, cor.method = "pearson",
xlab = "Life Expectancy Of Both Sexes", ylab = "Total Fertility Rate")
correlation_LFMale_And_Fertility <- cor.test(data2020$Life.Expectancy.at.Birth..Males, data2020$Total.Fertility.Rate,
method = "pearson")
correlation_LFMale_And_Fertility
correlation_LFFemale_And_Fertility <- cor.test(data2020$Life.Expectancy.at.Birth..Females, data2020$Total.Fertility.Rate,
method = "pearson")
correlation_LFFemale_And_Fertility
correlation_LFBothSexes_And_Fertility <- cor.test(data2020$Life.Expectancy.at.Birth..Both.Sexes, data2020$Total.Fertility.Rate,
method = "pearson")
correlation_LFBothSexes_And_Fertility
#TASK-3
ggplot(data2020, aes(x=Total.Fertility.Rate, y=Subregion)) +
geom_boxplot(outlier.colour="red", outlier.shape=1,outlier.size=2)+
labs(y= "Sub-Region", x = "Total Fertility Rate Per Woman")
ggplot(data2020, aes(x=Life.Expectancy.at.Birth..Both.Sexes, y=Subregion)) +
geom_boxplot(outlier.colour="red", outlier.shape=1,outlier.size=2)+
labs(y= "Sub-Region", x = "Life.Expectancy at Birth Of Both Sexes")
ggplot(data2020, aes(x=Life.Expectancy.at.Birth..Males, y=Subregion)) +
geom_boxplot(outlier.colour="red", outlier.shape=1,outlier.size=2)+
labs(y= "Sub-Region", x = "Life.Expectancy at Birth Of Males")
ggplot(data2020, aes(x=Life.Expectancy.at.Birth..Females, y=Subregion)) +
geom_boxplot(outlier.colour="red", outlier.shape=1,outlier.size=2)+
labs(y= "Sub-Region", x = "Life.Expectancy at Birth Of Females")
#TASK-4
boxplot(my_data$Total.Fertility.Rate ~ my_data$Year,main = "Total Fertility Rate Comparision Between 2000 and 2020",xlab = "Year", ylab = "Total Fertility Rate")
boxplot(my_data$Life.Expectancy.at.Birth..Males ~ my_data$Year,main = "Life Expectancy Of Male Comparision Between 2000 and 2020",xlab = "Year", ylab = "Life Expectancy Of Male",ylim=c(40,90))
boxplot(my_data$Life.Expectancy.at.Birth..Females ~ my_data$Year,main = "Life Expectancy Of Female Comparision Between 2000 and 2020",xlab = "Year", ylab = "Life Expectancy Of Female",ylim=c(40,100))
boxplot(my_data$Life.Expectancy.at.Birth..Both.Sexes ~ my_data$Year,main = "Life Expectancy Of Both Genders Comparision Between 2000 and 2020",xlab = "Year", ylab = "Life Expectancy Of Both Genders",ylim=c(40,100)) | |
f5909e9089a063dc31b50e420ed51e0b7d5769fc | 0e4548e316769ab5b300857e594e6aec439b2ef0 | /R/Urn.R | 243da145ea86781726318d14bfda769f3480bfc0 | [] | no_license | Euphrasiologist/GOL | cc366eeb1afdf5f1d54bfc35a406266c5beecbce | 33566734257a4cde9e3309b610b0535f84178636 | refs/heads/master | 2020-06-04T16:40:09.591860 | 2019-06-24T22:25:52 | 2019-06-24T22:25:52 | 192,107,816 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,015 | r | Urn.R | #' Urn function
#'
#' Implementation of the urn game.
#' @param red number of red balls
#' @param black number of black balls
#' @param trials number of trials in game
#' @keywords urn, gamesoflife, genetics
#' @export
#' @examples
#' urn(1,1,1000)
urn <- function(red, black, trials){
require(data.table); require(ggplot2)
dat <- data.frame(trial = 1:trials,
colour = vector(length = trials))
for(i in 1:trials){
dat$colour[i] <- sample(x = c(rep("red", red), rep("black", black)), size = 1, replace = T, prob = rep(1/(red+black), sum(red, black)) )
}
dat$outcome <- as.numeric(dat$colour == "red")
setDT(dat)
dat <- dat[, cum.sum := cumsum(outcome)/trial]
return(list(ggplot(dat, aes(x = trial, y = cum.sum))+
geom_hline(yintercept = 0.5, col = "red", size=0.3)+
geom_line()+
xlab(label = "Trial Number")+
ylab(label = "P(red)")+
theme_bw(base_line_size = 0),
dat))
}
|
ef19ace40e30c2de656565bab5bef1a4be2f9710 | 90713f63fef198d6525095d0b7569f5a1de31c75 | /Scripts/0_Data_Download/8_Download_SNVs_from_MC3_TCGA.R | 6f0c42c3a686e20bacffaa36f75e5eb0d35346d2 | [] | no_license | ALPH123/Age-associated_cancer_genome | 2c2fef2b7854b5032455accb8787a96da835def3 | 580877f9524adb6129a3ca1848dc7330a1248bab | refs/heads/master | 2023-04-11T00:20:40.474103 | 2021-04-22T08:40:17 | 2021-04-22T08:40:17 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 136 | r | 8_Download_SNVs_from_MC3_TCGA.R | ### Somatic mutation data was downloaded from https://gdc.cancer.gov/about-data/publications/mc3-2017
### mc3.v0.2.8.PUBLIC.maf.gz file
|
89439a383ff35d5126b375b0016e74ac646fe5f2 | 8dd039cef4960a7296559c4f7595335765ff1c1e | /Dog Breed Classifier Training/get_Dog_Prediction.R | e727065e610a7c705f8a8d53a4301fabe2478bcd | [] | no_license | Frank5547/Dog-Breed-Classifier-with-Shiny-App-Deployment | 8762a255b90083a2fff6f5eb4e1d95acacf40ec4 | a831eea88d7d50f400bb50d5b4580c9b242c59b5 | refs/heads/master | 2021-05-18T20:38:21.038581 | 2021-02-02T00:04:38 | 2021-02-02T00:04:38 | 251,409,046 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 695 | r | get_Dog_Prediction.R | #####################################
# get_Dog_Prediction.R
# Inputs
# - image: the path to an image file
# Outputs
# - prediction: a list containing the top 3 dog breed prediction and their likelihoods
# Creator: Francisco Javier Carrera Arias
# Date Created: 03/29/2020
####################################
library(reticulate)
get_Dog_Prediction <- function(image){
# Import the New_prediction Python file using reticulate's source Python
source_python("predict_breed_transfer.py")
# Get the LSTM's model prediction invoking the predict_new_review function
prediction <- predict_breed_transfer(image)
# Return the prediction
return(prediction)
} |
546a0621ea2ea542c3641a48c8be9aac40282572 | 79b935ef556d5b9748b69690275d929503a90cf6 | /man/quadrat.test.Rd | c1041546056699a1d8a2fddd1bb0992afa06e26e | [] | no_license | spatstat/spatstat.core | d0b94ed4f86a10fb0c9893b2d6d497183ece5708 | 6c80ceb9572d03f9046bc95c02d0ad53b6ff7f70 | refs/heads/master | 2022-06-26T21:58:46.194519 | 2022-05-24T05:37:16 | 2022-05-24T05:37:16 | 77,811,657 | 6 | 10 | null | 2022-03-09T02:53:21 | 2017-01-02T04:54:22 | R | UTF-8 | R | false | false | 11,478 | rd | quadrat.test.Rd | \name{quadrat.test}
\alias{quadrat.test}
\alias{quadrat.test.ppp}
\alias{quadrat.test.quadratcount}
\alias{quadrat.test.ppm}
\alias{quadrat.test.slrm}
\title{Dispersion Test for Spatial Point Pattern Based on
Quadrat Counts}
\description{
Performs a test of Complete Spatial Randomness
for a given point pattern, based on quadrat counts.
Alternatively performs a goodness-of-fit test of a fitted
inhomogeneous Poisson model.
By default performs chi-squared tests; can also perform
Monte Carlo based tests.
}
\usage{
quadrat.test(X, ...)
\method{quadrat.test}{ppp}(X, nx=5, ny=nx,
alternative=c("two.sided", "regular", "clustered"),
method=c("Chisq", "MonteCarlo"),
conditional=TRUE, CR=1,
lambda=NULL, df.est=NULL,
...,
xbreaks=NULL, ybreaks=NULL, tess=NULL,
nsim=1999)
\method{quadrat.test}{quadratcount}(X,
alternative=c("two.sided", "regular", "clustered"),
method=c("Chisq", "MonteCarlo"),
conditional=TRUE, CR=1,
lambda=NULL, df.est=NULL,
...,
nsim=1999)
\method{quadrat.test}{ppm}(X, nx=5, ny=nx,
alternative=c("two.sided", "regular", "clustered"),
method=c("Chisq", "MonteCarlo"),
conditional=TRUE, CR=1, df.est=NULL,
...,
xbreaks=NULL, ybreaks=NULL, tess=NULL,
nsim=1999)
\method{quadrat.test}{slrm}(X, nx=5, ny=nx,
alternative=c("two.sided", "regular", "clustered"),
method=c("Chisq", "MonteCarlo"),
conditional=TRUE, CR=1, df.est=NULL,
...,
xbreaks=NULL, ybreaks=NULL, tess=NULL,
nsim=1999)
}
\arguments{
\item{X}{
A point pattern (object of class \code{"ppp"})
to be subjected to the goodness-of-fit test.
Alternatively a fitted point process model (object of class
\code{"ppm"} or \code{"slrm"}) to be tested.
Alternatively \code{X} can be the result of applying
\code{\link{quadratcount}} to a point pattern.
}
\item{nx,ny}{
Numbers of quadrats in the \eqn{x} and \eqn{y} directions.
Incompatible with \code{xbreaks} and \code{ybreaks}.
}
\item{alternative}{
Character string (partially matched) specifying the alternative
hypothesis.
}
\item{method}{
Character string (partially matched) specifying the test to use:
either \code{method="Chisq"} for the chi-squared test (the default),
or \code{method="MonteCarlo"} for a Monte Carlo test.
}
\item{conditional}{
Logical. Should the Monte Carlo test be conducted
conditionally upon the observed number of points of the pattern?
Ignored if \code{method="Chisq"}.
}
\item{CR}{
Optional. Numerical value. The exponent
for the Cressie-Read test statistic. See Details.
}
\item{lambda}{
Optional. Pixel image (object of class \code{"im"})
or function (class \code{"funxy"}) giving the predicted
intensity of the point process.
}
\item{df.est}{
Optional. Advanced use only.
The number of fitted parameters, or the
number of degrees of freedom lost by estimation of
parameters.
}
\item{\dots}{Ignored.}
\item{xbreaks}{
Optional. Numeric vector giving the \eqn{x} coordinates of the
boundaries of the quadrats. Incompatible with \code{nx}.
}
\item{ybreaks}{
Optional. Numeric vector giving the \eqn{y} coordinates of the
boundaries of the quadrats. Incompatible with \code{ny}.
}
\item{tess}{
Tessellation (object of class \code{"tess"} or something acceptable
to \code{\link{as.tess}}) determining the
quadrats. Incompatible with \code{nx, ny, xbreaks, ybreaks}.
}
\item{nsim}{
The number of simulated samples to generate when
\code{method="MonteCarlo"}.
}
}
\details{
These functions perform \eqn{\chi^2}{chi^2} tests or Monte Carlo tests
of goodness-of-fit for a point process model, based on quadrat counts.
The function \code{quadrat.test} is generic, with methods for
point patterns (class \code{"ppp"}), split point patterns
(class \code{"splitppp"}), point process models
(class \code{"ppm"} or \code{"slrm"})
and quadrat count tables (class \code{"quadratcount"}).
\itemize{
\item
if \code{X} is a point pattern, we test the null hypothesis
that the data pattern is a realisation of Complete Spatial
Randomness (the uniform Poisson point process). Marks in the point
pattern are ignored. (If \code{lambda} is given then the null
hypothesis is the Poisson process with intensity \code{lambda}.)
\item
if \code{X} is a split point pattern, then for each of the
component point patterns (taken separately) we test
the null hypotheses of Complete Spatial Randomness.
See \code{\link{quadrat.test.splitppp}} for documentation.
\item
If \code{X} is a fitted point process model, then it should be
a Poisson point process model. The
data to which this model was fitted are extracted from the model
object, and are treated as the data point pattern for the test.
We test the null hypothesis
that the data pattern is a realisation of the (inhomogeneous) Poisson point
process specified by \code{X}.
}
In all cases, the window of observation is divided
into tiles, and the number of data points in each tile is
counted, as described in \code{\link{quadratcount}}.
The quadrats are rectangular by default, or may be regions of arbitrary shape
specified by the argument \code{tess}.
The expected number of points in each quadrat is also calculated,
as determined by CSR (in the first case) or by the fitted model
(in the second case).
Then the Pearson \eqn{X^2} statistic
\deqn{
X^2 = sum((observed - expected)^2/expected)
}
is computed.
If \code{method="Chisq"} then a \eqn{\chi^2}{chi^2} test of
goodness-of-fit is performed by comparing the test statistic
to the \eqn{\chi^2}{chi^2} distribution
with \eqn{m-k} degrees of freedom, where \code{m} is the number of
quadrats and \eqn{k} is the number of fitted parameters
(equal to 1 for \code{quadrat.test.ppp}). The default is to
compute the \emph{two-sided} \eqn{p}-value, so that the test will
be declared significant if \eqn{X^2} is either very large or very
small. One-sided \eqn{p}-values can be obtained by specifying the
\code{alternative}. An important requirement of the
\eqn{\chi^2}{chi^2} test is that the expected counts in each quadrat
be greater than 5.
If \code{method="MonteCarlo"} then a Monte Carlo test is performed,
obviating the need for all expected counts to be at least 5. In the
Monte Carlo test, \code{nsim} random point patterns are generated
from the null hypothesis (either CSR or the fitted point process
model). The Pearson \eqn{X^2} statistic is computed as above.
The \eqn{p}-value is determined by comparing the \eqn{X^2}
statistic for the observed point pattern, with the values obtained
from the simulations. Again the default is to
compute the \emph{two-sided} \eqn{p}-value.
If \code{conditional} is \code{TRUE} then the simulated samples are
generated from the multinomial distribution with the number of \dQuote{trials}
equal to the number of observed points and the vector of probabilities
equal to the expected counts divided by the sum of the expected counts.
Otherwise the simulated samples are independent Poisson counts, with
means equal to the expected counts.
If the argument \code{CR} is given, then instead of the
Pearson \eqn{X^2} statistic, the Cressie-Read (1984) power divergence
test statistic
\deqn{
2nI = \frac{2}{CR(CR+1)}
\sum_i \left[ \left( \frac{X_i}{E_i} \right)^CR - 1 \right]
}{
2nI = (2/(CR * (CR+1))) * sum((X[i]/E[i])^CR - 1)
}
is computed, where \eqn{X_i}{X[i]} is the \eqn{i}th observed count
and \eqn{E_i}{E[i]} is the corresponding expected count.
The value \code{CR=1} gives the Pearson \eqn{X^2} statistic;
\code{CR=0} gives the likelihood ratio test statistic \eqn{G^2};
\code{CR=-1/2} gives the Freeman-Tukey statistic \eqn{T^2};
\code{CR=-1} gives the modified likelihood ratio test statistic \eqn{GM^2};
and \code{CR=-2} gives Neyman's modified statistic \eqn{NM^2}.
In all cases the asymptotic distribution of this test statistic is
the same \eqn{\chi^2}{chi^2} distribution as above.
The return value is an object of class \code{"htest"}.
Printing the object gives comprehensible output
about the outcome of the test.
The return value also belongs to
the special class \code{"quadrat.test"}. Plotting the object
will display the quadrats, annotated by their observed and expected
counts and the Pearson residuals. See the examples.
}
\seealso{
\code{\link{quadrat.test.splitppp}},
\code{\link{quadratcount}},
\code{\link{quadrats}},
\code{\link{quadratresample}},
\code{\link{chisq.test}},
\code{\link{cdf.test}}.
To test a Poisson point process model against a specific alternative,
use \code{\link{anova.ppm}}.
}
\value{
An object of class \code{"htest"}. See \code{\link{chisq.test}}
for explanation.
The return value is also an object of the special class
\code{"quadrattest"}, and there is a plot method for this class.
See the examples.
}
\references{
Cressie, N. and Read, T.R.C. (1984)
Multinomial goodness-of-fit tests.
\emph{Journal of the Royal Statistical Society, Series B}
\bold{46}, 440--464.
}
\examples{
quadrat.test(simdat)
quadrat.test(simdat, 4, 3)
quadrat.test(simdat, alternative="regular")
quadrat.test(simdat, alternative="clustered")
## Likelihood ratio test
quadrat.test(simdat, CR=0)
## Power divergence tests
quadrat.test(simdat, CR=-1)$p.value
quadrat.test(simdat, CR=-2)$p.value
# Using Monte Carlo p-values
quadrat.test(swedishpines) # Get warning, small expected values.
# quadrat.test(swedishpines, method="M", nsim=4999)
# quadrat.test(swedishpines, method="M", nsim=4999, conditional=FALSE)
\testonly{
quadrat.test(swedishpines, method="M", nsim=19)
quadrat.test(swedishpines, method="M", nsim=19, conditional=FALSE)
}
# quadrat counts
qS <- quadratcount(simdat, 4, 3)
quadrat.test(qS)
# fitted model: inhomogeneous Poisson
fitx <- ppm(simdat ~ x)
quadrat.test(fitx)
# an equivalent test (results differ due to discretisation effects):
quadrat.test(simdat, lambda=predict(fitx), df.est=length(coef(fitx)))
te <- quadrat.test(simdat, 4)
residuals(te) # Pearson residuals
plot(te)
plot(simdat, pch="+", cols="green", lwd=2)
plot(te, add=TRUE, col="red", cex=1.4, lty=2, lwd=3)
sublab <- eval(substitute(expression(p[chi^2]==z),
list(z=signif(te$p.value,3))))
title(sub=sublab, cex.sub=3)
# quadrats of irregular shape
B <- dirichlet(runifpoint(6, Window(simdat)))
qB <- quadrat.test(simdat, tess=B)
plot(simdat, main="quadrat.test(simdat, tess=B)", pch="+")
plot(qB, add=TRUE, col="red", lwd=2, cex=1.2)
}
\author{\adrian
and \rolf
}
\keyword{spatial}
\keyword{htest}
|
40d170801e1d2c8f8b87971933f6f9380d63763d | c7daf6d40483ed719ec1cfa9a6b6590df5bcdd5c | /products/Introduction_to_Computational_Science_Modules/02_System_Dynamics/R/R_Computational_Toolbox_Files/fire_R/applyExtended.R | 1603badc5a190a1145f0093111fc6ab267563880 | [] | no_license | wmmurrah/computationalScience | b28ab2837c157c9afcf432cc5c41caaff6fd96d1 | a4d7df6b50f2ead22878ff68bfe39c5adb88bbbb | refs/heads/main | 2023-06-01T17:40:29.525021 | 2021-06-20T01:42:54 | 2021-06-20T01:42:54 | 332,249,589 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 612 | r | applyExtended.R | applyExtended = function(latExt, probLightning, probImmune) {
# APPLYEXTENDED - Function to apply
# spread(site, N, E, S, W, probLightning, probImmune) to every interior
# site of square array latExt and to return the resulting array
n = nrow(latExt) - 2
newmat = matrix(c(rep(0,n*n)), nrow = n)
for (j in 2:(n + 1)) {
for (i in 2:(n + 1)) {
site = latExt[i, j]
N = latExt[i - 1, j]
E = latExt[i, j + 1]
S = latExt[i + 1, j]
W = latExt[i, j - 1]
newmat[i - 1, j - 1] = spread(site, N, E, S, W, probLightning, probImmune)
}
}
return(newmat)
}
|
af0b38623f4d67e4967c24a8f17e2be33a7e59e2 | 64a5e8d9a68fbe60d24ce9a78b5e027e5e9d6783 | /Assignment2/Assignment2.R | cd15bd8e0d2854efdf429e70dbe02a9e7435b02f | [] | no_license | DannyGsGit/UW_DS350 | bf30d6f93de95defa798a6058300265bffc12abd | 1b4bb55fa91cec25bf10c6f9f9827908c8667c18 | refs/heads/master | 2020-04-10T21:25:35.558421 | 2016-10-11T00:47:26 | 2016-10-11T00:47:26 | 62,099,162 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,197 | r | Assignment2.R | #### Assignment 2 ####
# Prepared By: Danny Godbout
# Goal:
# Write an R-script to compute the Monty Hall Probabilities with
# simulations (get probabilities AND variances for switching and
# not switching).
#### Monty Hall Simulation Function ####
# Setup: 3 doors; 2 = Goat, 1 = Car
# 1- Pick a door
# 2- Host opens one of the remaining 2 doors, revealing a goat
# 3- Either keep current choice, or switch doors
f_monty_hall_single_run <- function() {
# This function runs a single cycle of the monty hall problem, returning
# results for scenarios where we keep the first choice door and where
# we switch.
#
# Input Args: N/A
#
# Output Args:
# 1) results: Single row data frame with 2 columns for original and switched
# choices. 1=win, 0=loss.
# Define all doors
all_doors <- 1:3
# Place car behind a random door
car_door <- sample(all_doors, 1)
# Choose a door
first_choice_door <- sample(all_doors, 1)
# first_choice_door <- car_door
# Host chooses a remaining goat door
remaining_goats <- all_doors[all_doors != car_door & all_doors != first_choice_door]
if(length(remaining_goats) == 1){
host_door <- remaining_goats
}else{
host_door <- sample(remaining_goats, 1)
}
# Switch
switched_door <- all_doors[all_doors != first_choice_door & all_doors != host_door]
# Which choice won a car?
switch_win <- ifelse(switched_door == car_door, "car", "goat") # The new, switch choice won
original_win <- ifelse(first_choice_door == car_door, "car", "goat") # The original door won
# Merge
results <- data.frame(switch = switch_win, original = original_win)
# Return
return(results)
}
#### Run multiple time ####
# Set number of runs
n <- 1000
# Run simulation
simulation_result<- data.frame(t(replicate(n, f_monty_hall_single_run(), simplify = "matrix")))
# Unlist column formats for analysis
simulation_result <- data.frame(apply(simulation_result, 2, unlist))
simulation_result$run <- 1:n
#### Plot results ####
# Melt simulation results for plotting
library(reshape2)
plot.data <- melt(simulation_result, id = "run")
# Build ggplot histogram with facet
library(ggplot2)
p.dist <- ggplot(plot.data, aes(value, fill = factor(value))) +
geom_bar() +
facet_grid(variable ~ .)
print(p.dist)
#### Calculate summary stats ####
## Probability function
f_probability <- function(data, n) {
# p = win / n
probability <- length(which(data == "car")) / n
return(probability)
}
## Variance function
f_variance <- function(p, n) {
# Var = np(1-p)
variance <- n * p * (1 - p)
return(variance)
}
# Summary Statistics for Switch Strategy
prob.switch <- f_probability(data = simulation_result$switch, n = n)
var.switch <- f_variance(p = prob.switch, n = 1)
# Summary Statistics for Not-Switching Strategy:
prob.original <- f_probability(data = simulation_result$original, n = n)
var.original <- f_variance(p = prob.original, n = 1)
# Combine stats to display:
stats.df <- data.frame(Switch = c(prob.switch, var.switch),
Original = c(prob.original, var.original))
row.names(stats.df) <- c("Probability", "Variance")
print(stats.df)
|
e9ae9f5e2cf82a1087a1031f84028f80f9813592 | fde6257c1dd48fb58f74cdf84b91d656f00bf7f1 | /R/npn_geoserver.R | 1e4c8cffabba63ef323670e1ac34136b8e53141c | [
"MIT"
] | permissive | tufangxu/rnpn | c366fe385d738e5de0b48bc287198e5a7b168922 | b8c0271e9a55c865135fcea8a633b877afb8575f | refs/heads/master | 2020-03-29T04:10:13.770308 | 2018-05-14T17:49:36 | 2018-05-14T17:49:36 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,257 | r | npn_geoserver.R |
#' @export
npn_download_geospatial <- function (
coverage_id,
date,
format = "geotiff",
output_path = NULL
){
z = NULL
if(is.null(output_path)){
z <- tempfile()
}
url <- paste(base_geoserver(), "coverageId=", coverage_id, "&SUBSET=time(\"", date, "T00:00:00.000Z\")&format=", format, sep="")
print (url)
if(is.null(output_path)){
download.file(url,z,method="libcurl", mode="wb")
ras <- raster::raster(z)
}else{
download.file(url,destfile=output_path,method="libcurl", mode="wb")
}
}
# Checks in the global variable "point values"
# to see if the exact data point being requested
# has already been asked for and returns the value
# if it's already saved.
npn_check_point_cached <- function(
layer,lat,long,date
){
val = NULL
if(exists("point_values")){
val <- point_values[point_values$layer == layer & point_values$lat == lat & point_values$long == long & point_values$date == date,]['value']
if(!is.null(val) && nrow(val) == 0){
val <- NULL
}
}
return(val)
}
# This function is for requested AGDD point values. Because the NPN has a separate
# data service that can provide AGDD values which is more accurate than Geoserver
# this function is ideal when requested point AGDD point values.
#' @export
npn_get_agdd_point_data <- function(
layer,
lat,
long,
date,
store_data=TRUE){
# If we already have this value stored in global memory then
# pull it from there.
cached_value <- npn_check_point_cached(layer,lat,long,date)
if(!is.null(cached_value)){
return(cached_value)
}
url <- paste0(base(), "stations/getTimeSeries.json?latitude=", lat, "&longitude=", long, "&start_date=", as.Date(date) - 1, "&end_date=", date, "&layer=", layer)
data = httr::GET(url,
query = list(),
httr::progress())
json_data <- tryCatch({
jsonlite::fromJSON(httr::content(data, as = "text"))
},error=function(msg){
print(paste("Failed:", url))
return(-9999)
})
v <- tryCatch({
as.numeric(json_data[json_data$date==date,"point_value"])
},error=function(msg){
print(paste("Failed:", url))
return(-9999)
})
# Once the value is known, then cache it in global memory so the script doesn't try to ask for the save
# data point more than once.
#
# TODO: Break this into it's own function
if(store_data){
if(!exists("point_values")){
point_values <<- data.frame(layer=layer,lat=lat,long=long,date=date,value=v)
}else{
point_values <<- rbind(point_values, data.frame(layer=layer,lat=lat,long=long,date=date,value=v))
}
}
return(v)
}
# This function can get point data about any layer, not just AGDD layers. It pulls this from
# the NPN's WCS service so the data may not be totally precise.
#' @export
npn_get_point_data <- function(
layer,
lat,
long,
date,
store_data=TRUE){
cached_value <- npn_check_point_cached(layer,lat,long,date)
if(!is.null(cached_value)){
return(cached_value)
}
url <- paste0(base_geoserver(), "coverageId=",layer,"&format=application/gml+xml&subset=http://www.opengis.net/def/axis/OGC/0/Long(",long,")&subset=http://www.opengis.net/def/axis/OGC/0/Lat(",lat,")&subset=http://www.opengis.net/def/axis/OGC/0/time(\"",date,"T00:00:00.000Z\")")
data = httr::GET(url,
query = list(),
httr::progress())
#Download the data as XML and store it as an XML doc
xml_data <- httr::content(data, as = "text")
doc <- XML::xmlInternalTreeParse(xml_data)
df <- XML::xmlToDataFrame(XML::xpathApply(doc, "//gml:RectifiedGridCoverage/gml:rangeSet/gml:DataBlock/tupleList"))
v <- as.numeric(as.list(strsplit(gsub("\n","",df[1,"text"]),' ')[[1]])[1])
if(store_data){
if(!exists("point_values")){
point_values <<- data.frame(layer=layer,lat=lat,long=long,date=date,value=v)
}else{
point_values <<- rbind(point_values, data.frame(layer=layer,lat=lat,long=long,date=date,value=v))
}
}
return(v)
}
npn_merge_geo_data <- function(
raster,
col_label,
df
){
coords <- data.frame(lon=df[,"longitude"],lat=df[,"latitude"])
sp::coordinates(coords)
values <- raster::extract(x=raster,y=coords)
df <- cbind(df,values)
names(df)[names(df) == "values"] <- col_label
return(df)
}
resolve_agdd_raster <- function(
agdd_layer
){
if(!is.null(agdd_layer)){
if(agdd_layer == 32){
agdd_layer <- "gdd:agdd"
}else if(agdd_layer == 50){
agdd_layer <- "gdd:agdd_50f"
}
}
}
resolve_six_raster <- function(
year,
phenophase = "leaf",
sub_model = NULL
){
current_year <- as.numeric(format(Sys.Date(), '%Y'))
num_year <- as.numeric(year)
src <- NULL
date <- NULL
if(num_year < current_year - 1){
src <- "prism"
date <- paste0(year,"-01-01")
}else{
src <- "ncep"
if(num_year != current_year){
date <- paste0(year,"-12-29")
}else{
date <- Sys.Date()
}
}
if(is.null(sub_model)){
sub_model = "average"
}
if(is.null(phenophase) || (phenophase != 'leaf' && phenophase != 'bloom')){
phenophase = 'leaf'
}
layer_name = paste0("si-x:", sub_model, "_", phenophase, "_", src)
raster <- npn_download_geospatial(layer_name, date,"tiff")
}
|
565072a160b7d0df4ba727aec2af5b5707515850 | 1626f4e1e5644a9dce31401d02dff7b6aa1e3db2 | /exercise-2/exercise.R | 1fb111b1d80a19ffc91315e2b9e0adefe5f15d36 | [
"MIT"
] | permissive | Yubo-W/m7-vectors | cedd10d194de649e5233c086e065c6d4128b873b | b67daef13433fb26c8e58a069ffb2ae654f176ae | refs/heads/master | 2020-05-29T08:47:56.640795 | 2016-10-11T20:46:39 | 2016-10-11T20:46:39 | 70,196,403 | 0 | 0 | null | 2016-10-06T21:56:39 | 2016-10-06T21:56:38 | null | UTF-8 | R | false | false | 1,573 | r | exercise.R | # Exercise 2: Subsetting and Manipulating Vectors
# Create a vector `x` that the numbers 5,2,6,2,1,7
x <- c(5, 2, 6, 2, 1, 7)
# Create a vector `y` that has the numbers 2,3
y <- c(2, 3)
# Create a vector `z` by adding (not combining, but adding) `x` and `y` (note the recycling!)
z <- x + y
# Create a vector `first.three` that has the first three elements of `z` in it
first.three <- z[1:3]
# Create a vector `small` that has the values of `z` that are less than 5
CheckSmall <- function(vector) {
small <- c();
for (value in vector) {
if (value < 5) {
small <- c(small, value)
}
}
return(small)
}
small <- CheckSmall(z)
# Create a vector `large` that has the values of `z` that are greater than or equal to 5
CheckLarge <- function(vector) {
large <- c();
for (value in vector) {
if (value > 5) {
large <- c(large, value)
}
}
return(large)
}
large <- CheckLarge(z)
### Bonus ###
# Replace the values in `z` that are larger than 5 with the number 5
ReplaceFive <- function(vector) {
new.z <- c()
for (value in vector) {
if (value > 5) {
new.z <- c(new.z, 5)
} else {
new.z <- c(new.z, value)
}
}
return(new.z)
}
replace.z <- ReplaceFive(z)
# Replace every other value in `z` with the number 0
ReplaceZero <- function(vector) {
replace.zero <- c()
for (value in vector) {
if (length(replace.zero) %% 2 == 0) {
replace.zero <- c(replace.zero, value)
} else {
replace.zero <- c(replace.zero, 0)
}
}
return(replace.zero)
}
new.replace.zero <- ReplaceZero(z) |
5019663b75dd713c2cca73fe12150d7c8a35f365 | 19ffb430a323bc8a207be7a08e1b716cd215b8fe | /RsNlme/R/estimates.r | 445c32054ba4e976d2e5ff15454cfcffe08e34b7 | [] | no_license | phxnlmedev/rpackages | 51b9bd9bf955e7da7a3dc4ca6f13b32adfd3f049 | 59dafc62c179d98407c4fbcbb4936786d71ee6a5 | refs/heads/master | 2020-05-07T20:12:28.132137 | 2019-07-23T11:58:39 | 2019-07-23T11:58:39 | 180,429,234 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 17,193 | r | estimates.r |
#' @import shiny
#' @import ggplot2
#' @import graphics
NULL
#'
#'@export
#'
generateInitialEstimatesInputAscii <-function(fileName,thetas,variables,sweepStart,sweepLength,numSweepSteps){
outFile=fileName
cat(length(thetas),file=outFile,append=FALSE,sep="\n")
tnames=names(thetas)
for ( n in tnames ) {
cat(n,file=outFile,append=TRUE,sep="\n")
cat(thetas[n],file=outFile,append=TRUE,sep="\n")
}
cat(as.integer(length(variables)),file=outFile,append=TRUE,sep="\n")
for ( v in variables ) {
cat(v,file=outFile,append=TRUE,sep="\n")
}
num=numSweepSteps + 1
cat(as.integer(num),file=outFile,append=TRUE,sep="\n")
sweepStep = sweepLength / numSweepSteps
for ( inx in 0:numSweepSteps ) {
cat(as.numeric(sweepStart + inx * sweepStep) , file=outFile,append=TRUE,sep="\n")
class((sweepStart + inx * sweepStep))
}
n=999999
cat(as.integer(n),file=outFile,append=TRUE,sep="\n")
}
readInitialEstimatesParams <-function(fileName){
inputFile = file(fileName,"rb")
numThetas = readBin(inputFile,integer(),n=1)
print(paste0("numThetas ",numThetas))
for ( n in 1:numThetas ) {
name=readBin(inputFile,character(),n=1)
value=readBin(inputFile,numeric(),n=1)
print(paste0(name," ",value))
}
numVars = readBin(inputFile,integer(),n=1)
print(paste0("numVariabels ",numVars))
if ( numVars > 0 )
for ( n in 1:numVars ) {
name=readBin(inputFile,character(),n=1)
print(name)
}
numSteps = readBin(inputFile,integer(),n=1)
print(numSteps)
steps=readBin(inputFile,numeric(),n=numSteps)
print(steps)
end=readBin(inputFile,integer(),n=1)
print(paste0("end marker", end))
}
#'
#'@export
#'
generateInitialEstimatesInput <-function(fileName,thetas,variables,sweepStart,sweepLength,numSweepSteps){
outFile=file(paste0(getwd(),"/",fileName),"wb")
writeBin(as.integer(length(thetas)),outFile)
tnames=names(thetas)
for ( n in tnames ) {
l=nchar(n)
writeBin(l,outFile,size=1)
writeChar(n,outFile,nchars=l)
# writeBin(n,outFile)
writeBin(as.numeric(thetas[n]), outFile)
}
writeBin(as.integer(length(variables)),outFile)
for ( v in variables ) {
# writeBin(v,outFile)
l=nchar(v)
writeBin(l,outFile,size=1)
writeChar(v,outFile,nchars=l)
}
num=numSweepSteps + 1
writeBin(as.integer(num),outFile)
print(paste0("numSweepSteps ",num))
sweepStep = sweepLength / numSweepSteps
for ( inx in 0:numSweepSteps ) {
writeBin(as.numeric(sweepStart + inx * sweepStep) , outFile)
class((sweepStart + inx * sweepStep))
}
n=999999
writeBin(as.integer(n),outFile)
close(outFile)
}
#'
#'@export
#'
generateGraph <-function(workingDir,inputFile,outputFile){
cwd=getwd()
tryCatch(
{
setwd(workingDir)
args = paste0("/plotinascii ", inputFile, " /plotout ", outputFile,
" /m 3 ",
" /n 10 ",
" /e -1 ",
" /o 6",
" /csv ",
" /sort ",
" cols1.txt ",
" data1.txt ",
" out.txt")
cmd=paste0(" NLME7.exe ",args)
print(cmd)
print(getwd())
shell(cmd, wait = TRUE)
print("-----")
setwd(cwd)
print(getwd())
},
error = function(ex) {
setwd(cwd)
} )
}
#'
#'@export
#'
readModelData <-function(fileName) {
out=list()
inputFile=file(fileName,"rb")
sig=readBin(inputFile,integer(),n=1)
tBased=readBin(inputFile,"integer",n=1)
nThetas=readBin(inputFile,"integer",n=1)
out$tBased = tBased
out$nThetas = nThetas
out$thetas= list()
for ( n in 1:nThetas ){
l=readBin(inputFile,"raw",size=1)
theta1=readChar(inputFile,nchars=as.integer(l))
value=readBin(inputFile,"double")
theta=c(name=theta1,value=value)
out$thetas[[n]] = theta
}
nVars=readBin(inputFile,"integer",n=1)
out$nVars = nVars
out$vars=list()
for ( n in 1:nVars ) {
l=readBin(inputFile,"raw",size=1)
var=readChar(inputFile,nchars=as.integer(l))
flag=readBin(inputFile,"integer",n=1)
out$vars=c(out$vars,list(name=var,flag=flag))
}
nSubjects=readBin(inputFile,"integer",n=1)
out$nSubjects=nSubjects
nVarsReturned=readBin(inputFile,"integer",n=1)
out$nVarsReturned = nVarsReturned
out$varsReturned = list()
for ( n in 1:nVarsReturned ) {
l=readBin(inputFile,"raw",size=1)
varName=readChar(inputFile,nchars=as.integer(l))
isAvail=readBin(inputFile,"integer",n=1)
sweep=readBin(inputFile,"integer",n=1)
out$varsReturned=c(out$varsReturned,list(varName=varName,isAvail=isAvail,sweep=sweep))
}
out$subjects=list()
for ( n in 1:nSubjects ) {
subject=list()
for ( m in 1:5 ) {
l=readBin(inputFile,"raw",size=1)
if ( l > 0 ) {
id=readChar(inputFile,nchars=as.integer(l))
subject$id = id
}
else
id=""
}
subject$observations = list()
for ( r in 1:out$nVarsReturned ){
obsValues = list()
nObs = readBin(inputFile,"integer",n=1)
obsValues$nObs = nObs
obsValues$obsTimes = list()
obsValues$obsValues= list()
for ( o in 1:nObs){
t=readBin(inputFile,"double",n=1)
v=readBin(inputFile,"double",n=1)
obsValues$obsTimes= c(obsValues$obsTimes,t)
obsValues$obsValues= c(obsValues$obsValues,v)
}
nVal = readBin(inputFile,"integer",n=1)
num = as.integer( 2 * nVal )
vals=readBin(inputFile,"double",n= num )
m=matrix(vals,ncol=2, byrow=TRUE)
x=m[,1]
y=m[,2]
obsValues$nVal = nVal ;
obsValues$x = x
obsValues$y = y
subject$observations[[r]] = obsValues
}
end=readBin(inputFile,"integer",n=1)
stopifnot ( end == 9999 )
out$subjects[[n]] = subject
}
close(inputFile)
return( out )
}
#'
#'@export
#'
compileModel <-function(model,host){
installDir=host@installationDirectory
modelDir= model@modelInfo@workingDir
script=paste0(installDir,"/","execNlmeCmd.bat")
Sys.setenv("INSTALLDIR"=installDir)
if ( attr(host,"hostType")== "Windows" )
args=paste0(" COMPILE test.mdl " , gsub("/","\\",modelDir,fixed=TRUE) ,
" MPINO NO 1" )
else
args=paste0(" COMPILE test.mdl " , modelDir , " MPINO NO 1" )
shell(paste0(script," ",args))
}
#'
#'estimatesUI
#'
#'User Interface to examine the model and evaluate estimates for fixed effects
#'
#'@param model PK/PD model
#'@param subjectIds Subject IDs
#'@param host Optional host parameter if model needs to be compiled
#'@param dataset Optional dataset parameter
#'
#'@examples
#' host = NlmeParallelHost(sharedDirectory=Sys.getenv("NLME_ROOT_DIRECTORY"),
#' parallelMethod=NlmeParallelMethod("LOCAL_MPI"),
#' hostName="MPI",
#' numCores=4)
#' input=read.csv("D:/SDCard/NlmeInstall_04_30_18/Pml/pk01cov3.csv")
#' model = pkmodel(numComp=1,
#' isPopulation=TRUE,
#' absorption = Intravenous,
#' parameterization = Clearance,
#' modelName="PK01Model",
#' isTlag = FALSE,
#' hasEliminationComp = FALSE,
#' isClosedForm = TRUE)
#'
#' dataset=NlmeDataset(model@modelInfo@workingDir)
#' initColMapping(model)=input
#' modelColumnMapping(model)=c(ID="xid", CObs="conc",A1="dose")
#' writeDefaultFiles(model,dataset)
#' estimatesUI(model,unique(input$id),host)
#'
#' estimates = getInitialEstimates(model)
#' print(estimates)
#' initFixedEffects(model) = estimates
#'
#'@export
#'
estimatesUI <-function(model,subjectIds,host=NULL,dataset=NULL) {
require(shiny)
if ( is.null(dataset) )
dataset=model@dataset
if ( ! is.null(dataset) )
writeDefaultFiles(model,dataset)
if ( ! is.null(host ) ) {
compileModel(model,host)
}
# thetas=initFixedEffects(model)
thetas=getThetas(model)
numThetas = length(thetas)
numSubjects = length(subjectIds)
observationNames=c()
if ( model@isTextual == FALSE ) {
numObservations = length(model@errorModel@effectsList)
for ( n in 1:numObservations )
observationNames = c(observationNames,model@errorModel@effectsList[[n]]@effectName)
} else {
observationNames = observationNames(model)
numObservations = length(observationNames)
}
plotVariables=observationNames
modelVar = reactiveValues()
modelVar = model
shinyApp(
ui = fluidPage(
sidebarLayout(
sidebarPanel(
fluidRow(
column(width = 6,
uiOutput("subject")
),
column(width = 6,
uiOutput("observation")
),
column(width = 6,
checkboxInput("overlay","Overlay",value=FALSE)
),
column( width = 6,
checkboxInput("log","Log",value=FALSE)
),
column( width = 12 ,
sliderInput(inputId="starttime",label="Start Time",min=0,max=100,value=0),
sliderInput(inputId="duration",label="Duration",min=1,max=100,value=25),
#Place holders for dynamically created elements
shiny::uiOutput("theta1"),
shiny::uiOutput("theta2"),
shiny::uiOutput("theta3"),
shiny::uiOutput("theta4"),
shiny::uiOutput("theta5"),
shiny::uiOutput("theta6"),
shiny::uiOutput("theta7"),
shiny::uiOutput("theta8"),
shiny::uiOutput("theta9")
))
),
mainPanel(
plotOutput("plot")
)
)
)
,
server = function(input, output,session) {
sliderMaxes <- reactiveValues()
modelData <- eventReactive(c(input$Observation,input$starttime,input$duration,input$theta1,input$theta2,input$theta3,input$theta4,input$theta5,input$theta6,input$theta7,input$theta8,input$theta9), {
sweepStart=input$starttime
sweepLength=input$duration
numSweepSteps=100
tnames=names(thetas)
values=c()
for ( t in 1:length(tnames ) ) {
val = switch( t, input$theta1,input$theta2,input$theta3,input$theta4,input$theta5,input$theta6,input$theta7,input$theta8,input$theta9)
max = 0
if ( !is.null(sliderMaxes) )
if ( length(sliderMaxes ) > 0 )
max = isolate(sliderMaxes[[paste0("theta",t)]])
if ( ( val >= (max - 0.1 ) ) ){
updateSliderInput(session,paste0("theta",t),
max=(val * 1.5 ))
sliderMaxes[[paste0("theta",t)]] = val * 1.5
}
values=c(values, val)
}
names(values)=tnames
ts=values
plotVariables = c(input$Observation)
workingDir = modelVar@modelInfo@workingDir
print("-------------------")
print(workingDir)
generateInitialEstimatesInputAscii(paste0(workingDir,"/params.txt"),ts,plotVariables,sweepStart,sweepLength,numSweepSteps)
generateGraph(workingDir,"params.txt","output.bin")
out=readModelData(paste0(workingDir,"/output.bin"))
out
} )
output$plot <- renderPlot({
out = modelData()
lastSubject =input$Subject
assign("lastSubject",lastSubject,envir=.GlobalEnv)
sIndx=as.integer(input$Subject)
obs=input$Observation
oIndx = 1
for ( indx in 1:length(observationNames)) {
if ( obs == observationNames[[indx]])
oIndx = indx
}
x=out$subjects[[sIndx]]$observations[[1]]$x
y=out$subjects[[sIndx]]$observations[[1]]$y
xs=list()
ys=list()
for ( indx in 1:out$nSubjects ) {
xs[[indx]] = out$subjects[[indx]]$observations[[1]]$x
ys[[indx]] = out$subjects[[indx]]$observations[[1]]$y
}
px=as.numeric(out$subjects[[sIndx]]$observations[[1]]$obsTimes)
py=as.numeric(out$subjects[[sIndx]]$observations[[1]]$obsValues)
pxs=c()
pys=c()
for ( indx in 1:out$nSubjects ) {
xxx=as.numeric(out$subjects[[indx]]$observations[[1]]$obsTimes)
yyy=as.numeric(out$subjects[[indx]]$observations[[1]]$obsValues)
pxs=c(pxs,xxx)
pys=c(pys,yyy)
}
xlim=c(max(min(c(x,as.numeric(pxs))),0.0001),max(c(x,as.numeric(pxs))))
ylim=c(max(min(c(y,as.numeric(pys))),0.0001),max(c(y,as.numeric(pys))))
if ( input$overlay == FALSE ) {
if ( input$log )
plot(x,y, col="blue",type="l",log="y",xlim=xlim,ylim=ylim,ylab="",xlab="")
else
plot(x,y, col="blue",type="l",xlim=xlim,ylim=ylim,ylab="",xlab="")
}
else {
for( indx in 1:out$nSubjects ) {
if ( input$log )
plot(xs[[indx]],ys[[indx]], col="blue",type="l",log = "y",xlim=xlim,ylim=ylim,ylab="",xlab="")
else {
if ( indx == 1 )
plot(xs[[indx]],ys[[indx]], col="blue",type="l",xlim=xlim,ylim=ylim,ylab="",xlab="")
else
lines(xs[[indx]],ys[[indx]],col="blue")
}
}
}
if ( input$overlay == FALSE ) {
points(px,py,col="red")
title(main = paste0("ID ",sIndx))
}
else{
points(pxs,pys,col="red")
title(main = paste0("All"))
}
})
tnames=names(thetas)
if ( length(thetas) > 0 ) {
name1=tnames[1]
val1=as.numeric(thetas[[1]])
sliderMaxes$theta1 = max(as.numeric(val1) * 1.5,10.0)
output$theta1 = shiny::renderUI({
sliderInput(inputId="theta1",label=name1,min=0.01,max=max(as.numeric(val1) * 1.5,10.0),
value=val1,step=0.1,round=FALSE)
})
}
if ( length(thetas) > 1 ) {
name2=tnames[2]
val2=as.numeric(thetas[[2]])
sliderMaxes$theta2 = max(as.numeric(val2) * 1.5,10.0)
output$theta2 = shiny::renderUI({
sliderInput(inputId="theta2",label=name2,min=0.01,max=max(as.numeric(val2) * 1.5,10.0),
value=val2,step=0.1,round=FALSE)
})
}
if ( length(thetas) > 2 ) {
name3=tnames[3]
val3=as.numeric(thetas[[3]])
sliderMaxes$theta3 = max(as.numeric(val3) * 1.5,10.0)
output$theta3 = shiny::renderUI({
sliderInput(inputId="theta3",label=name3,min=0.01,max(as.numeric(val3) * 1.5,10.0),
value=val3,step=0.1,round=FALSE)
})
}
if ( length(thetas) > 3 ) {
name4=tnames[4]
val4=as.numeric(thetas[[4]])
sliderMaxes$theta4 = max(as.numeric(val4) * 1.5,10.0)
output$theta4 = shiny::renderUI({
sliderInput(inputId="theta4",label=name4,min=0.01,max(as.numeric(val4) * 1.5,10.0),
value=val4,step=0.1,round=FALSE)
})
}
if ( length(thetas) > 4 ) {
name5=tnames[5]
val5=as.numeric(thetas[[5]])
sliderMaxes$theta5 = max(as.numeric(val5) * 1.5,10.0)
output$theta5 = shiny::renderUI({
sliderInput(inputId="theta5",label=name5,min=0.01,max=max(as.numeric(val5) * 1.5,10.0),
value=val5,step=0.1,round=FALSE)
})
}
if ( length(thetas) > 5 ) {
name6=tnames[6]
val6=as.numeric(thetas[[6]])
sliderMaxes$theta6 = max(as.numeric(val6) * 1.5,10.0)
output$theta6 = shiny::renderUI({
sliderInput(inputId="theta6",label=name6,min=0.01,max=max(as.numeric(val6) * 1.5,10.0),
value=val6,step=0.1,round=FALSE)
})
}
if ( length(thetas) > 6 ) {
name7=tnames[7]
val7=as.numeric(thetas[[7]])
sliderMaxes$theta7 = max(as.numeric(val7) * 1.5,10.0)
output$theta7 = shiny::renderUI({
sliderInput(inputId="theta7",label=name7,min=0.01,max=max(as.numeric(val7) * 1.5,10.0),
value=val7,step=0.1,round=FALSE)
})
}
if ( length(thetas) > 7 ) {
name8=tnames[8]
val8=as.numeric(thetas[[8]])
sliderMaxes$theta8 = max(as.numeric(val8) * 1.5,10.0)
output$theta8 = shiny::renderUI({
sliderInput(inputId="theta8",label=name8,min=0.01,max=max(as.numeric(val8) * 1.5,10.0),
value=val8,step=0.1,round=FALSE)
})
}
if ( length(thetas) > 8 ) {
name9=tnames[9]
val9=as.numeric(thetas[[9]])
sliderMaxes$theta9 = max(as.numeric(val9) * 1.5,10.0)
output$theta9 = shiny::renderUI({
sliderInput(inputId="theta9",label=name9,min=0.01,max=max(as.numeric(val9) * 1.5,10.0),
value=val9,step=0.1,round=FALSE)
})
}
output$subject = shiny::renderUI({
#nSubjects = out$nSubjects
nSubjects = numSubjects
subjects = subjectIds
selectInput("Subject", label ="Subject",
choices = subjects,
selected = 1)
})
output$observation = shiny::renderUI({
nObservations = numObservations
observations = observationNames
selectInput("Observation", label ="Observation",
choices = observations,
selected = 1)
})
}
)
}
#' getInitialEsimates
#'
#' Returns values from initial estimates shiny App.
#' Returned value can be used to set initial estimates in RsNlme
#'
#'@examples
#' ...
#' estimatesUI(model,unique(input$ID),host)
#'
#' estimates = getInitialEstimates(model)
#'
#' initFixedEffects(model) = estimates
#'
#'@export
#'
getInitialEstimates <-function(model){
# Read latest parameters
lines= readLines(paste0(model@modelInfo@workingDir,"/params.txt"))
effects=list()
numEffects=as.numeric(lines[[1]])
for ( i in 1:numEffects ) {
name = trimws(lines[[ ( i -1 ) * 2 + 2]],"both")
val = as.numeric(lines[[ ( i -1 ) * 2 + 3]])
effects[[name]]=val
}
effects
}
assign("getInitialEstimates",getInitialEstimates,envir=.GlobalEnv)
|
56aefb29e5e1b5a59b819476a5c2d0d8f1a7cfcd | 626084f27dc732fe9e36bd78ef336a7ee8c91d8c | /Sesion 1/Retos Sesion 1/reto_1_s1.R | 108c4630d277adb66746ce675f2ade35382747d0 | [] | no_license | ejgonzalez17/BEDU-Data-Science | 299b02d05f96779c1af4433e708025e5bab37e60 | 35066c3ce45adde3c9962d6c70a996643315b3b8 | refs/heads/main | 2023-02-25T22:18:00.957070 | 2021-02-03T17:46:54 | 2021-02-03T17:46:54 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 244 | r | reto_1_s1.R | netflix <- read.csv("https://raw.githubusercontent.com/ecoronadoj/Sesion_1/main/Data/netflix_titles.csv")
dim(netflix)
typeof(netflix)
str(netflix)
net.2015 <- netflix[netflix$release_year > 2015, ]
write.csv(net.2015, "netflix_release2015")
|
0b02624d10017005c043c0a4c849a85751f3230d | 5f13c75db818d549fab5fa1d57464858b766ee4f | /predict_weather.R | 384bef0d7c3780e63ebdcbf6320b02d4f03d79a5 | [] | no_license | JengsHub/DataAnalytics | 614647f9c6db6fe7142e3bda56526639ef8ef811 | 4fab2c1d129ad08d52edcc35da9e795a801b84ba | refs/heads/main | 2023-04-20T14:40:21.768965 | 2021-05-25T16:10:00 | 2021-05-25T16:10:00 | 370,746,989 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 16,403 | r | predict_weather.R | rm(list = ls())
# set to working directory
options(digits=4)
#Reading the data
WAUS <- read.csv("CloudPredict2021.csv")
L <- as.data.frame(c(1:49))
set.seed(29637996) # Your Student ID is the random seed
L <- L[sample(nrow(L), 10, replace = FALSE),] # sample 10 locations
WAUS <- WAUS[(WAUS$Location %in% L),]
WAUS <- WAUS[sample(nrow(WAUS), 2000, replace = FALSE),] # sample 3000 rows
# Checking number of NA values in each column
colSums(is.na(WAUS))
# More than half of these columns are empty, so we remove it
WAUS$Sunshine = NULL
WAUS$Evaporation = NULL
# show number of entries before and after removing rows
cat("Number of Entries before removal: ", nrow(WAUS),"\n")
# Omitting NAs
WAUS = na.omit(WAUS)
cat("Number of Entries after removal: ", nrow(WAUS), "\n")
# Data checking
num_missing_values <- function(x) { return(sum(is.na(x)))}
num_empty_cells <- function(x) { return(sum(x==""))}
num_rows = length(colnames(WAUS))
new_table = data.frame(num_missing_values = integer(num_rows))
new_table$num_missing_values = apply(WAUS,2,num_missing_values)
new_table$num_empty_cells = apply(WAUS,2,num_empty_cells)
rownames(new_table) = colnames(WAUS)
new_table
unique(WAUS$Location)
sum(WAUS$CloudTomorrow == 1)
sum(WAUS$CloudTomorrow == 0)
# Identify the proportion of 'Cloudy'
sum(WAUS$CloudTomorrow == 1) / nrow(WAUS) * 100
# Identify the proportion of 'clear'
sum(WAUS$CloudTomorrow == 0) / nrow(WAUS) * 100
# Descriptions of predictors, what is real valued attributes?
WAUS_real_valued = WAUS[c("MinTemp", "MaxTemp", "Rainfall", "Pressure9am",
"Pressure3pm", "Temp9am", "Temp3pm")]
summary(WAUS_real_valued)
# Categorize direction character to number
# For WindGustDir
wind_gust_label = as.data.frame(unique(WAUS$WindGustDir))
wind_gust_label$ID = 1:nrow(wind_gust_label)
colnames(wind_gust_label) <- c("WindGustDir","WindGustDirID")
wind_gust_label
WAUS = merge(x = WAUS, y= wind_gust_label, by="WindGustDir")
# For WindDir9am
wind_dir_9_label = as.data.frame(unique(WAUS$WindDir9am))
wind_dir_9_label$ID = 1:nrow(wind_dir_9_label)
colnames(wind_dir_9_label) <- c("WindDir9am","WindDir9amID")
wind_dir_9_label
WAUS = merge(x = WAUS, y= wind_dir_9_label, by="WindDir9am")
# For WindDir3pm
wind_dir_3_label = as.data.frame(unique(WAUS$WindDir3pm))
wind_dir_3_label$ID = 1:nrow(wind_dir_3_label)
colnames(wind_dir_3_label) <- c("WindDir3pm","WindDir3pmID")
wind_dir_3_label
WAUS = merge(x = WAUS, y= wind_dir_3_label, by="WindDir3pm")
# For RainToday
rain_tod_label = as.data.frame(unique(WAUS$RainToday))
rain_tod_label$ID = 1:nrow(rain_tod_label)
colnames(rain_tod_label) <- c("RainToday","RainTodayID")
rain_tod_label
WAUS = merge(x = WAUS, y= rain_tod_label, by="RainToday")
# Replace old columns with new ones
WAUS$WindGustDir = WAUS$WindGustDirID
WAUS$WindDir3pm = WAUS$WindDir3pmID
WAUS$WindDir9am = WAUS$WindDir9amID
WAUS$RainToday = WAUS$RainTodayID
WAUS$WindGustDirID = NULL
WAUS$WindDir9amID = NULL
WAUS$WindDir3pmID = NULL
WAUS$RainTodayID = NULL
fit = lm(CloudTomorrow ~ ., data = WAUS)
fit
summary(fit)
# filter p values lower than 0.45
WAUS = WAUS[c("Location", "Day", "Year", "MinTemp","MaxTemp", "WindSpeed3pm",
"Humidity9am", "WindGustSpeed",
"Humidity3pm", "Pressure3pm", "Temp9am", "Temp3pm", "Rainfall",
"CloudTomorrow")]
# Converting the Location, WindDir3pm and 9am to factors
WAUS$Location = factor(WAUS$Location)
set.seed(29637996) #Student ID as random seed
train.row = sample(1:nrow(WAUS), 0.7*nrow(WAUS))
data.train = WAUS[train.row,]
data.test = WAUS[-train.row,]
# Decision Tree Classification model
library(tree)
data.train$CloudTomorrow = as.factor(data.train$CloudTomorrow)
DT_Model = tree(CloudTomorrow ~ ., data = data.train)
# Naive Bayes
library(e1071)
NB_Model = naiveBayes(CloudTomorrow ~ ., data = data.train)
# Bagging
library(adabag)
bag_Model = bagging(CloudTomorrow ~ ., data= data.train, mfinal=10)
# Boosting
library(rpart)
boost_Model = boosting(CloudTomorrow ~ ., data = data.train, mfinal = 10)
# Random Forest
library(randomForest)
rf_Model = randomForest(CloudTomorrow ~., data = data.train)
# Use test data to classify test cases as "cloudy" or "not cloudy"
data.test$CloudTomorrow = as.factor(data.test$CloudTomorrow)
# obtain prediction for dt
dt.predict = predict(DT_Model, data.test, type = "class")
dt_table = table(predicted = dt.predict, actual = data.test$CloudTomorrow)
cat("Matrix for Decision Tree:")
dt_table
cat("Accuracy for Decision Tree:")
accu_dt = sum(diag(dt_table))/nrow(data.test)
accu_dt
# obtain prediction for naive bayes
nb.predict = predict(NB_Model, data.test)
nb_table = table(predicted = nb.predict, actual = data.test$CloudTomorrow)
cat("Matrix for Naive Bayes:")
nb_table
cat("Accuracy for Naive Bayes:")
accu_nb = sum(diag(nb_table))/nrow(data.test)
accu_nb
# obtain prediction bagging
bag.predict = predict.bagging(bag_Model,data.test)
accu_bag = sum(diag(bag.predict$confusion))/nrow(data.test)
cat("Accuracy for Bagging:")
accu_bag
# obtain prediction for boosting
boost.predict = predict.boosting(boost_Model,data.test)
accu_boost = sum(diag(boost.predict$confusion))/nrow(data.test)
cat("Accuracy for Boosting:")
accu_boost
# obtain prediction for random forest
rf.predict = predict(rf_Model,data.test)
rf_table = table(predicted = rf.predict, actual = data.test$CloudTomorrow)
cat("Matrix for Random Forest:")
rf_table
accu_rf = sum(diag((rf_table))/nrow(data.test))
cat("Accuracy for Random Forest:")
accu_rf
# Calculate confidence for predicting "cloudy tomorrow" for each class
library(ROCR)
# For decision tree
dt_predict_conf = predict(DT_Model, data.test,type = "vector")
dt_pred = prediction(dt_predict_conf[,2], data.test$CloudTomorrow)
dt_perf = performance(dt_pred, "tpr", "fpr")
plot(dt_perf,col = "blue")
# for naive bayes
nb_pred_conf = predict(NB_Model, data.test, type = "raw")
nb_pred = prediction(nb_pred_conf[,2], data.test$CloudTomorrow)
nb_perf = performance(nb_pred, "tpr", "fpr")
plot(nb_perf ,add = TRUE,col = "green")
# for bagging
bag_pred = prediction(bag.predict$prob[,2], data.test$CloudTomorrow)
bag_perf = performance(bag_pred, "tpr", "fpr")
plot(bag_perf ,add = TRUE,col = "yellow")
# for boosting
boost_pred = prediction(boost.predict$prob[,2], data.test$CloudTomorrow)
boost_perf = performance(boost_pred, "tpr", "fpr")
plot(boost_perf ,add = TRUE,col = "red")
# for random forest
rf_pred_prob = predict(rf_Model, data.test, type = "prob")
predData = cbind(rf_pred_prob[,2],data.test$CloudTomorrow)
temp_df = data.frame(predData)
rf_pred_conf = prediction(temp_df[,1], temp_df[,2])
rf_pred = performance(rf_pred_conf,"tpr","fpr")
plot(rf_pred, add = TRUE,col = "orange")
# Adding legend
title("ROC Curve for Classification models")
legend("bottomright", plot_range[2],
c("Decision Tree", "Naive Bayes", "Bagging", "Boosting", "Random Forest")
, col=c("blue","green","yellow","red","orange"), pch=21:22, lty=1:2)
# AUC for each classifier
# Decision tree AUC
dt_auc = performance(dt_pred, "auc")
cat("Decision Tree AUC: ", (as.numeric(dt_auc@y.values)))
# Naive Bayes tree AUC
nb_auc = performance(nb_pred, "auc")
cat("Naive Bayes AUC: ", (as.numeric(nb_auc@y.values)))
# Bagging AUC
bag_auc = performance(bag_pred, "auc")
cat("Bagging AUC: ", (as.numeric(bag_auc@y.values)))
# Boosting AUC
boost_auc = performance(boost_pred, "auc")
cat("Boosting AUC: ", (as.numeric(boost_auc@y.values)))
# Random Forest AUC
rf_auc = performance(rf_pred_conf, "auc")
cat("Random Forest AUC: ", (as.numeric(rf_auc@y.values)))
# Identifying attribute importance for classification models
cat("#Decision Tree Attribute Importance: \n")
print(summary(DT_Model))
plot(DT_Model)
text(DT_Model, pretty = 0)
# Naive Bayes assumes all attributes are of equal importance
cat("\n#Baging Attribute Importance: \n")
print(bag_Model$importance)
cat("\n#Boosting Attribute Importance: \n")
print(boost_Model$importance)
cat("\n#Random Forest Attribute Importance: \n")
print(rf_Model$importance)
test_dt_model = cv.tree(DT_Model, FUN = prune.misclass)
test_dt_model
# size 3 best
prune_dt_model = prune.misclass(DT_Model, best = 3)
summary(prune_dt_model)
plot(prune_dt_model)
text(prune_dt_model, pretty = 0)
# Testing performance of pruned Decision Tree
prune_dt.predict = predict(prune_dt_model, data.test, type = "class")
prune_dt_pred = prediction(dt_predict_conf[,2], data.test$CloudTomorrow)
prune_dt_table = table(predicted = prune_dt.predict, actual = data.test$CloudTomorrow)
cat("Matrix for Pruned Decision Tree:")
prune_dt_table
cat("Accuracy for Pruned Decision Tree:")
accu_prune_dt = sum(diag(prune_dt_table))/nrow(data.test)
accu_prune_dt
# ROC for Pruned Decision Tree
prune_dt_predict_conf = predict(prune_dt_model, data.test,type = "vector")
prune_dt_pred = prediction(prune_dt_predict_conf[,2], data.test$CloudTomorrow)
prune_dt_perf = performance(prune_dt_pred, "tpr", "fpr")
# Pruned Decision Tree AUC
pruned_dt_auc = performance(prune_dt_pred, "auc")
cat("Pruned Decision Tree AUC: ", (as.numeric(pruned_dt_auc@y.values)))
# Plotting the prunned decision tree with others
plot(prune_dt_perf,col = "purple")
# Re-plotting all the previous performance
plot(dt_perf, add = TRUE,col = "blue")
plot(nb_perf ,add = TRUE,col = "green")
plot(bag_perf ,add = TRUE,col = "yellow")
plot(boost_perf ,add = TRUE,col = "red")
plot(rf_pred, add = TRUE,col = "orange")
# Adding legend
title("ROC Curve for Classification models")
legend("bottomright", plot_range[2],
c("Decision Tree", "Naive Bayes", "Bagging", "Boosting", "Random Forest", "Pruned Decision Tree")
, col=c("blue","green","yellow","red","orange", "purple"), pch=21:22, lty=1:2)
# Creating the best tree-based classifier by adjustment
library(pROC)
library(caret)
new_train = data.train[c("Location",
"Humidity3pm",
"Humidity9am",
"CloudTomorrow")]
new_test = data.test[c("Location",
"Humidity3pm",
"Humidity9am",
"CloudTomorrow")]
# Finding the best settings using Random Forest with k-fold cross
# validation for parameter settings
controls = trainControl(method = "cv", number = 10, search = "grid")
set.seed(288)
rf_default_settings = train(CloudTomorrow~.,
data = new_train,
method = "rf",
metric = "Accuracy",
trControl = controls)
set.seed(288)
parameter_grid = expand.grid(.mtry = c(1: 10))
rf_mtry_adjusted = train(CloudTomorrow~.,
data = new_train,
method = "rf",
metric = "Accuracy",
tuneGrid = parameter_grid,
trControl = controls,
ntree = 600)
best_mtry = rf_mtry_adjusted$bestTune$mtry
best_mtry
# Performing test with the best 'mtry' parameter obtained previously
parameter_grid = expand.grid(.mtry = best_mtry)
list_maxnodes <- list()
# Iterate over different values of 'maxnodes' parameter, best mtry is used here
for (maxnodes in c(100: 110)) {
set.seed(288)
rf_maxnode_settings = train(CloudTomorrow ~ .,
data = new_train,
method = "rf",
metric = "Accuracy",
tuneGrid = parameter_grid,
trControl = controls,
maxnodes = maxnodes,
ntree = 600)
curr_maxnodes = toString(maxnodes)
list_maxnodes[[curr_maxnodes]] = rf_maxnode_settings
}
results_mtry = resamples(list_maxnodes)
summary(results_mtry) # 106 for maxnodes
store_maxtrees = list()
for (ntree in c(1000, 1500, 2000, 2500, 3000, 3500)) {
set.seed(288)
rf_maxtree_setting = train(CloudTomorrow ~ .,
data = new_train,
method = "rf",
metric = "Accuracy",
tuneGrid = parameter_grid,
trControl = controls,
maxnodes = 106,
ntree = ntree)
curr_ntree = toString(ntree)
store_maxtrees[[curr_ntree]] = rf_maxtree_setting
}
results_tree <- resamples(store_maxtrees)
summary(results_tree)
# 2500 trees is the best
## Final model with parameter best settings
set.seed(288)
rf_final <- train(CloudTomorrow ~ .,
new_train,
method = "rf",
metric = "Accuracy",
tuneGrid = parameter_grid,
trControl = controls,
ntree = 2500,
maxnodes = 106)
# Evaluate the models on the test data set
default_pred = predict(rf_default_settings, new_test)
final_pred = predict(rf_final, new_test)
# Confusion matrix and their accuracy
default_table = table(predicted = default_pred, actual = new_test$CloudTomorrow)
cat("Matrix for default Random Forest:")
default_table
accu_def_rf = sum(diag((default_table))/nrow(new_test))
cat("Accuracy for default Random Forest:")
accu_def_rf
# For final settings random forest
cv_table = table(predicted = final_pred, actual = new_test$CloudTomorrow)
cat("Matrix for Cross Validated Random Forest:")
cv_table
# Find accuracy
accu_cv_rf = sum(diag((cv_table))/nrow(new_test))
cat("Accuracy for Cross Validated Random Forest:")
accu_cv_rf
# Plotting the Cross validated Random Forest
rf_pred_prob = predict(rf_final, new_test, type = "prob")
cv_predData = cbind(rf_pred_prob[,2], new_test$CloudTomorrow)
cv_temp_df = data.frame(cv_predData)
cv_rf_pred_conf = prediction(cv_temp_df[,1], cv_temp_df[,2])
cv_rf_pred = performance(cv_rf_pred_conf,"tpr","fpr")
plot(cv_rf_pred,col = "black")
# Plotting the old ones
plot(prune_dt_perf,add = TRUE,col = "purple")
plot(dt_perf, add = TRUE,col = "blue")
plot(nb_perf ,add = TRUE,col = "green")
plot(bag_perf ,add = TRUE,col = "yellow")
plot(boost_perf ,add = TRUE,col = "red")
plot(rf_pred, add = TRUE,col = "orange")
# Adding legend
title("ROC Curve for Classification models")
legend("bottomright", plot_range[2],
c("Decision Tree", "Naive Bayes", "Bagging", "Boosting", "Random Forest", "Pruned Decision Tree", "CV Random Forest")
, col=c("blue","green","yellow","red","orange", "purple", "black"), pch=21:22, lty=1:2)
# Random Forest AUC
cv_rf_auc = performance(cv_rf_pred_conf, "auc")
cat("CV Random Forest AUC: ", (as.numeric(cv_rf_auc@y.values)))
library(neuralnet)
library(car)
# Remove variables here
ann_train = data.train[c("Location",
"Humidity3pm",
"Humidity9am",
"CloudTomorrow")]
ann_test = data.test[c("Location",
"Humidity3pm",
"Humidity9am",
"CloudTomorrow")]
DATAmerge = rbind(ann_train, ann_test)
# DATAmerge$Location = as.factor(DATAmerge$Location)
attach(DATAmerge)
Recodedata = model.matrix(~ Location+Humidity3pm+Humidity9am)
DATAmerge = cbind(DATAmerge,Recodedata)
DATAmerge = DATAmerge[, c(6,7,8,9,10,11,12,13,14,15,16,4)]
DATAmerge$CloudTomorrow = recode(DATAmerge$CloudTomorrow," '0' = 'FALSE' ; '1' = 'TRUE' ")
DATAmerge$CloudTomorrow = as.logical(DATAmerge$CloudTomorrow)
ann_train = DATAmerge[1 : 972,]
ann_test = DATAmerge[973: 1389,]
# ANN
# pre-process and select required attributes
ann_model = neuralnet(CloudTomorrow ~ ., ann_train, hidden = 2)
plot(ann_model)
# Testing the ANN model
ann_pred = compute(ann_model, ann_test)
ann_pred_final = round(ann_pred$net.result, 0)
ann_table = table(predicted = ann_pred_final, actual = ann_test$CloudTomorrow)
acc_ann = sum(diag(ann_table))/nrow(ann_test)
cat("Accuracy of ANN: ", acc_ann)
|
9c87f139c909fc2cb351bba78a5da010ba40595c | 55e042f05ee3da0db86ecfb806c0e695382a843d | /tests/testthat/test-type-gitlab.R | ba4b257ea5c0638af687c08eb7e81b803f2b68ee | [
"MIT"
] | permissive | r-lib/pkgdepends | f507dfe031e34c994311ca9a139dda9a6d7e016a | a0f5132320498780c8b87ce8eb4f66e754906376 | refs/heads/main | 2023-08-03T15:56:48.339228 | 2023-07-19T09:13:42 | 2023-07-19T09:13:42 | 102,942,545 | 86 | 23 | NOASSERTION | 2023-09-11T20:44:59 | 2017-09-09T09:17:38 | R | UTF-8 | R | false | false | 2,455 | r | test-type-gitlab.R |
test_that("resolve", {
skip_on_cran()
tmp <- tempfile()
on.exit(unlink(tmp, recursive = TRUE), add = TRUE)
p <- suppressMessages(new_pkg_installation_proposal(
"gitlab::gaborcsardi/cli",
config = list(library = tmp)
))
suppressMessages(p$resolve())
res <- p$get_resolution()
expect_snapshot({
res$error
res$package
res$version
res$metadata[[1]]
})
# group + branch
p <- suppressMessages(new_pkg_installation_proposal(
"gitlab::r-hub/filelock@cran-1-0-2",
config = list(library = tmp)
))
suppressMessages(p$resolve())
res <- p$get_resolution()
expect_snapshot({
res$error
res$package
res$version
res$metadata[[1]]
})
# tag
p <- suppressMessages(new_pkg_installation_proposal(
"gitlab::r-hub/filelock@v1.0.2",
config = list(library = tmp)
))
suppressMessages(p$resolve())
res <- p$get_resolution()
expect_snapshot({
res$error
res$package
res$version
res$metadata[[1]]
})
# subdirectory
p <- suppressMessages(new_pkg_installation_proposal(
"gitlab::gaborcsardi/feather/R",
config = list(library = tmp, dependencies = FALSE)
))
suppressMessages(p$resolve())
res <- p$get_resolution()
expect_snapshot({
res$error
res$package
res$version
res$metadata[[1]]
})
})
test_that("download", {
skip_on_cran()
tmp <- tempfile()
on.exit(unlink(tmp, recursive = TRUE), add = TRUE)
# subdirectory
p <- suppressMessages(new_pkg_installation_proposal(
"gitlab::gaborcsardi/feather/R",
config = list(library = tmp, dependencies = FALSE)
))
suppressMessages(p$resolve())
res <- p$get_resolution()
suppressMessages(p$solve())
suppressMessages(p$download())
dl <- p$get_downloads()
expect_true(dir.exists(dl$fulltarget_tree))
expect_snapshot(
dir(file.path(dl$fulltarget_tree, "feather", "R"))
)
})
test_that("satisfy", {
expect_true(
satisfy_remote_gitlab(
list(
package = "foo",
extra = list(list(remotesha = "badcafe"))
),
list(
type = "gitlab",
package = "foo",
extra = list(list(remotesha = "badcafe"))
)
)
)
})
test_that("installedok", {
expect_true(
installedok_remote_gitlab(
list(package = "foo", version = "1.0.0", remotesha = "badcafe"),
list(
package = "foo",
version = "1.0.0",
metadata = list(c("RemoteSha" = "badcafe"))
)
)
)
})
|
ca69ec129a60f3465de0d5793f95c3a5451d7cf2 | e88bf4703d84c3eed28d78adcd7158ae0a51acdb | /demo.R | 810f86d034591badf02f2d3f77c9de8ac77a7adf | [] | no_license | TomKellyGenetics/TokyoR78 | bc85f18867cd6ba3c42bc7df93e15a86f1e675fc | 892761f7fc17b47454cb19dbaf19f509d90d0767 | refs/heads/master | 2020-05-26T21:21:37.845928 | 2019-05-24T07:46:15 | 2019-05-24T07:46:15 | 188,377,285 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,672 | r | demo.R | # input data
system("wget raw.githubusercontent.com/swcarpentry/r-novice-gapminder/master/data/gapminder_data.csv")
gapminder_data <- data.table::fread("gapminder_data.csv", data.table = FALSE)
dim(gapminder_data)
head(gapminder_data)
# let's take a basic task (that we want to do many times)
mean(gapminder_data[gapminder_data$continent == "Asia" & gapminder_data$year == "2002",]$lifeExp)
# now you want to do it again for each continent: why not copy-paste (do not do this)
mean(gapminder_data[gapminder_data$continent == "Asia" & gapminder_data$year == "2002",]$lifeExp)
mean(gapminder_data[gapminder_data$continent == "Africa" & gapminder_data$year == "2002",]$lifeExp)
mean(gapminder_data[gapminder_data$continent == "Europe" & gapminder_data$year == "2002",]$lifeExp)
mean(gapminder_data[gapminder_data$continent == "Australasia" & gapminder_data$year == "2002",]$lifeExp)
mean(gapminder_data[gapminder_data$continent == "Americas" & gapminder_data$year == "2002",]$lifeExp)
table(gapminder_data$continent)
# doing simple operations (vectorised functions)
#scalar operation
1^2
2^2
3^2
#vector operation
c(1:3)^2
c(1:10)^2
# doing something exactly the same for many arguments: FOR Loop
print(paste0(1, "^2 = ", 1^2))
for(ii in 1:4){
print(paste0(ii, "^2 = ", ii^2))
}
for(continent in unique(gapminder_data$continent)){
print(
mean(gapminder_data[gapminder_data$continent == continent & gapminder_data$year == "2002",]$lifeExp)
)
}
mean_by_cont <- rep(NA, length(unique(gapminder_data$continent)))
for(continent in unique(gapminder_data$continent)){
ii <- match(continent, unique(gapminder_data$continent))
mean_by_cont[ii] <- mean(gapminder_data[gapminder_data$continent == continent & gapminder_data$year == "2002",]$lifeExp)
}
names(mean_by_cont) <- unique(gapminder_data$continent)
mean_by_cont
# doing something for different arguments: nested FOR Loop (can get messy)
mean_by_cont <- matrix(NA, length(unique(gapminder_data$year)), length(unique(gapminder_data$continent)))
for(continent in unique(gapminder_data$continent)){
jj <- match(continent, unique(gapminder_data$continent))
for(year in unique(gapminder_data$year)){
ii <- match(year, unique(gapminder_data$year))
mean_by_cont[ii, jj] <- mean(gapminder_data[gapminder_data$continent == continent & gapminder_data$year == year,]$lifeExp)
}
}
colnames(mean_by_cont) <- unique(gapminder_data$continent)
rownames(mean_by_cont) <- unique(gapminder_data$year)
mean_by_cont
# repeating tasks with functions: if you do something more than once, write a function
continent_mean <- function(continent){
mean(gapminder_data[gapminder_data$continent == continent & gapminder_data$year == "2002",]$lifeExp)
}
continent_mean("Africa")
continent_mean()
continent_mean <- function(continent = "Europe"){
mean(gapminder_data[gapminder_data$continent == continent & gapminder_data$year == "2002",]$lifeExp)
}
continent_mean("Africa")
continent_mean() #Europe is default
continent_mean <- function(data, continent = "Europe"){
mean(data[data$continent == continent & data$year == "2002",]$lifeExp)
}
continent_mean(gapminder_data, "Africa") # compatible with other datasets (not "harcoded")
continent_mean(gapminder_data) #Europe is default for "continent
continent_mean <- function(data, continent = "Europe", year = 2002){
mean(data[data$continent == continent & data$year == year,]$lifeExp)
}
continent_mean(gapminder_data) #Europe 2002 is default
continent_mean(gapminder_data, year = "1972", continent = "Asia") #Europe 2002 is default
#Now we can run our function on different inputs instead of copying the code
continent_mean(gapminder_data, year = "1972", continent = "Asia") #Europe 2002 is default
continent_mean(gapminder_data, year = "1972", continent = "Africa") #Europe 2002 is default
continent_mean(gapminder_data, year = "1972", continent = "Americas") #Europe 2002 is default
#Let's do our LOOP again with this function
continent_mean(gapminder_data, year = "1972", continent = "Asia") #Europe 2002 is default
mean_by_cont <- matrix(NA, length(unique(gapminder_data$year)), length(unique(gapminder_data$continent)))
for(continent in unique(gapminder_data$continent)){
jj <- match(continent, unique(gapminder_data$continent))
for(year in unique(gapminder_data$year)){
ii <- match(year, unique(gapminder_data$year))
mean_by_cont[ii, jj] <- continent_mean(gapminder_data, continent, year)
}
}
colnames(mean_by_cont) <- unique(gapminder_data$continent)
rownames(mean_by_cont) <- unique(gapminder_data$year)
mean_by_cont
# we can also "apply" functions to a list or matrix
#apply to a list
numbers <- list(1:10, c(0,4,6,2,8))
numbers
max(numbers[[1]])
lapply(numbers, max)
sapply(numbers, max)
#apply to a matrix/data.frame (e.g., for every country)
gdp <- apply(gapminder_data, 1, function(x) as.numeric(x[[6]]) * as.numeric(x[[3]]))
cbind(gapminder_data[,1:4], gdp)
#apply to a matrix columns
apply(gapminder_data[grep("2002", gapminder_data$year),c(3, 5, 6)], 2, sum, na.rm = TRUE)
apply(gapminder_data[grep("2002", gapminder_data$year),c(3, 5, 6)], 2, function(x) sum(x, na.rm = TRUE))
lapply(unique(gapminder_data$continent), function(continent){
continent_mean(gapminder_data, continent, year = "2002")
})
sapply(unique(gapminder_data$continent), function(continent){
continent_mean(gapminder_data, continent, year = "2002")
})
# we do not need to wait for each result to compute the next one: parallel computing
lapply(1:10, function(ii) ii^2)
library("snow") # simple network of workstations
cl <- makeCluster(3)
cl
parLapply(cl, 1:10, function(ii) ii^2)
# note these operations do not occur in order (but results are returned in order of inputs)
system("rm outs.txt")
system("touch outs.txt")
library("snow")
cl <- makeCluster(3)
cl
parLapply(cl, 1:10, function(ii){
print(ii)
write.table(ii^2, "outs.txt", append = TRUE)
return(ii^2)
})
system("cat outs.txt")
# note more cores is not always faster (Amdahl's law) due to set up time and core-to-core communication
system.time({
library("snow") # simple network of workstations
cl <- makeCluster(1)
cl
parLapply(cl, 1:1000, function(ii) ii^2)
})
system.time({
library("snow") # simple network of workstations
cl <- makeCluster(3)
cl
parLapply(cl, 1:1000, function(ii) ii^2)
})
system.time({
library("snow") # simple network of workstations
cl <- makeCluster(10) # more than on machine
cl
parLapply(cl, 1:1000, function(ii) ii^2)
})
stopCluster(cl)
# setting up parallel computing is better for more complex tasks
system.time({
lapply(unique(gapminder_data$continent), function(continent){
continent_mean(gapminder_data, continent, year = "2002")
})
})
system.time({
library("snow") # simple network of workstations
cl <- makeCluster(3) # more than on machine
cl
clusterExport(cl, list("gapminder_data", "continent_mean")) # export data to other cores (can be "ls()")
parLapply(cl, unique(gapminder_data$continent), function(continent){
continent_mean(gapminder_data, continent, year = "2002")
})
})
#slower due to sending data to each core
system.time({
lapply(unique(gapminder_data$continent), function(continent){
Sys.sleep(0.5)
continent_mean(gapminder_data, continent, year = "2002")
})
})
system.time({
library("snow") # simple network of workstations
cl <- makeCluster(3) # more than on machine
cl
clusterExport(cl, list("gapminder_data", "continent_mean"))
parLapply(cl, unique(gapminder_data$continent), function(continent){
Sys.sleep(0.5) # computational intensice step runs separately on each core
continent_mean(gapminder_data, continent, year = "2002")
})
}) |
406f0995a3891aeed68414f8da2c3de547179b50 | 0a906cf8b1b7da2aea87de958e3662870df49727 | /IntervalSurgeon/inst/testfiles/rcpp_depth/AFL_rcpp_depth/rcpp_depth_valgrind_files/1609857934-test.R | 44b5a8438151dedc0574f9c42d080be5878bac8c | [] | no_license | akhikolla/updated-only-Issues | a85c887f0e1aae8a8dc358717d55b21678d04660 | 7d74489dfc7ddfec3955ae7891f15e920cad2e0c | refs/heads/master | 2023-04-13T08:22:15.699449 | 2021-04-21T16:25:35 | 2021-04-21T16:25:35 | 360,232,775 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 644 | r | 1609857934-test.R | testlist <- list(pts = c(-1261966754L, -129171080L, -642760964L, 779827246L, 1878602521L, -612794743L, 31959320L, -1933440006L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L), ends = NULL, starts = NULL, sorted_ends = c(-1712958485L, -1313178345L, 1470710473L, 2108708770L, -1965616612L, -533569700L), sorted_starts = c(0L, -1627389952L, 682962941L, 546746628L))
result <- do.call(IntervalSurgeon:::rcpp_depth,testlist)
str(result) |
3b963968b70426e92493b0cc66ac1475fd6ecff6 | 933c674278e2b3b8ebc0a90a70ac4fd629ac72e9 | /R/birthdeath.R | d07b7ff03528e60b0cd76c8dfbb9244f9ee8b51c | [] | no_license | dkahle/algstat | bc63249b8adca4005016939a6e7db11f5253ee01 | a705514d3a3c592361cd7ee222d1c743ed8808c9 | refs/heads/master | 2023-05-27T22:17:47.006699 | 2023-05-17T17:18:06 | 2023-05-17T17:18:06 | 27,615,285 | 14 | 11 | null | 2022-08-18T13:44:36 | 2014-12-05T23:50:12 | R | UTF-8 | R | false | false | 358 | r | birthdeath.R | #' Andrews/Herzberg Birthday and Deathday Dataset
#'
#' A two-way table cross-classifying birth and death days of 82 individuals.
#'
#' @name birthdeath
#' @docType data
#' @keywords datasets
#' @usage data(birthdeath)
#' @format A 12x12 (contingency) table
#' @references Andrews, D. and A. Herzberg (1985). \emph{Data}. Springer-Verlag,
#' New York.
NULL |
3b867d95e082131d571aa91da8de2606944b75f2 | cb56caaec0011c69ea7b4ccc3457dd3845193efc | /Marport/match.set.from.gpstrack.marport.r | e637b3cb94267148fdd64948e78861c0094b808c | [] | no_license | jgmunden/misc | 585706725df283b03d0e09dc9f2299e824771358 | 471906bbc77a245762345fe4788b688743e7b34a | refs/heads/master | 2020-12-30T09:26:17.665780 | 2015-03-06T18:14:25 | 2015-03-06T18:14:25 | 31,780,116 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,537 | r | match.set.from.gpstrack.marport.r | match.set.from.gpstrack.marport=function( DS="post.perley", netswd=netswd ) {
DS.saved = gsub(".redo$", "", DS)
fn = file.path(netswd, paste(DS.saved, "meta", "rdata", sep= "."))
meta= NULL
if ( !grepl (".redo$", DS) ) {
if(file.exists(fn)) load(fn)
return(meta)
}
# Incorporation of newer data, combining timestamp
pp=net_mensuration.db( DS=DS.saved, netswd=netswd )
# testing methods with one
ppt = pp[which(pp$id == "2013-Mar04-034230.SET.LOG"), ]
# retrieving marport
marport = net_mensuration.db( DS="marport", netswd=marportdatadirectory )
mp = marport
meta=data.frame(uniqueid=unique(ppt$id), stringsAsFactors=FALSE )
meta$timestamp=NA
meta$mission=NA
meta$longitude=NA
meta$latitude=NA
meta$min.distance = NA
for(i in 1:nrow(meta)){
k = meta$uniqueid[i]
print(k)
j = which(ppt$id== k)
if(length(j)>0)
ppc=ppt[j,]
m = ppc$timestamp[1]
meta$timestamp[i] =as.character(m)
dif = as.duration(ymd_hms(meta$timestamp[i]) - mp$timestamp)
u = which(abs(dif)< dhours (9) )
if(length(u)> 1) {
mps=mp[u,]
mps$min.distance.test=NA
for(v in 1:nrow (gfs)){
distance.test = geodist(ppc[,c("lon","lat")], gfs[v,c("lon","lat")], method="great.circle")
gfs$min.distance.test[v] = min(distance.test, na.rm=TRUE)
}
w = which.min(gfs$min.distance.test)
if(gfs$min.distance.test[w]< 1 ){
meta$id[i]=gfs$id[w] # exact match with very high confidence
meta$min.distance[i] = gfs$min.distance.test[w]
}
}
}
}
}
# fnn2 = "C:/cygwin64/home/mundenj/ecomod/groundfish/R/meta.rdata"
# save( meta, file=fnn2)
# load (fnn2)
# Check for duplicates as some are data errors .. needed to be checked manually and raw data files altered
# others are due to bad tows being redone ... so invoke a distance based rule as the correct one in gsinf (good tows only are recorded)
dupids = unique( meta$id[ which( duplicated( meta$id, incomparables=NA) ) ] )
for ( dups in dupids ) {
uu = which(meta$id %in% dups)
good = uu[ which.min( meta$min.distance[uu] ) ]
notsogood = setdiff( uu, good )
meta$id[notsogood] = NA
}
# redo the distance-based match to catch any that did not due to being duplicates above
# does not seem to do much but kept for posterity
unmatched = which( is.na(meta$id ) )
if (length (unmatched) > 0) {
for(i in unmatched ){
k = meta$uniqueid[i]
print(k)
j = which(pp$id == k)
if(length(j)>0) {
ppc=pp[j,]
m = which.min(ppc$timestamp)
meta$sdate[i] = as.character(ppc$timestamp[m])
dif = as.duration(ymd_hms(meta$sdate[i]) - gf$sdate)
u = which(abs(dif)< dhours (9))
## the next two lines are where things are a little different from above
## the catch all as yet unmatched id's only for further processing
current.meta.ids = unique( sort( meta$id) )
u = u[ which( ! (gf$id[u] %in% current.meta.ids ) )]
if(length(u)> 1) {
gfs=gf[u,]
gfs$min.distance.test=NA
for(v in 1:nrow (gfs)){
distance.test = geodist(ppc[,c("lon","lat")], gfs[v,c("lon","lat")], method="great.circle")
gfs$min.distance.test[v] = min(distance.test, na.rm=TRUE)
}
w = which.min(gfs$min.distance.test)
if(gfs$min.distance.test[w]< 1 ){
meta$id[i]=gfs$id[w] # exact match with very high confidence
meta$min.distance[i] = gfs$min.distance.test[w]
}
}
}
}
}
## now do a more fuzzy match based upon time stamps as there are no matches based upon distance alone
nomatches = which( is.na( meta$id) )
if (length(nomatches) > 1) {
for(i in nomatches){
k = meta$uniqueid[i]
print(k)
j = which(pp$id == k)
if(length(j)>0) {
ppc=pp[j,]
m = which.min(ppc$timestamp)
meta$sdate[i] = as.character(ppc$timestamp[m])
dif = as.duration(ymd_hms(meta$sdate[i]) - gf$sdate)
u = which( abs(dif)< dhours (1) )
if (length(u) == 1 ) {
current.meta.ids = unique( sort( meta$id) )
u = u[ which( ! (gf$id[u] %in% current.meta.ids ) )]
if (length(u) == 1 ) meta$id[i]= gfs$id[u]
}
}
}
}
save(meta, file= fn, compress= TRUE)
}
|
6d2d260c01299980ba6fa3dd627d94bcdbb24fe2 | 137c532a5e2a125a9e154c204b4c381e6e09123d | /Part 1/1_5.R | c36b392ff31ed339ec185d16898adefb0afcfee6 | [] | no_license | adrikayak/IP_network_traffic_measurement_and_analysis | bfa4d0d329b172fd739be1823df423fe3f1b38d4 | 9ff79a91f2a39a0ea41c37f4cab781a65e1f0673 | refs/heads/master | 2020-07-10T19:45:01.017669 | 2016-09-05T13:05:42 | 2016-09-05T13:05:42 | 67,352,910 | 0 | 0 | null | null | null | null | WINDOWS-1252 | R | false | false | 2,869 | r | 1_5.R |
library(MASS)
setwd("C:/Users/Adrian/Dropbox/Universidad/1º de Master/2. Second semester/Network Traffic Measurement and Analysis/Assignments/Final Assignment/Part 2")
load(paste("C:/Users/Adrian/Documents/trace/data_bytes.RData", sep="", collapse = NULL))
bytes = data
rm(data)
gc()
#Histogram
jpeg("../Pictures/1_5_hist.jpg", 1266, 484)
hist(log(bytes$V8), freq = FALSE, breaks = 200, col = "blue", main = "Flow length distribution", xlab = "log( length [Bytes] )", ylab = "Frequency")
dev.off()
#Empirical CDF
P = ecdf(log(bytes$V8))
jpeg("../Pictures/1_5_ecdf.jpg", 1266, 484)
plot(P, pch=',', lwd = 2, col = "blue", main="ECDF of flows length distribution", xlab = "log( length [B] )", ylab = "Probability")
dev.off()
#Summary of flows length distribution
summary(bytes$V8)
#Fitting using log-normal distribution
parameters = fitdistr(log(bytes$V8),"lognormal")
jpeg("../Pictures/1_5_qq_lnorm.jpg", 1266, 484)
qqplot(qlnorm(ppoints(100), meanlog = parameters[[1]][1], sdlog = parameters[[1]][2]),
log(bytes$V8), main="Q-Q plot fitting log-normal distribution",ylab="Samples",col='blue',pch=3)
abline(0,1)
dev.off()
jpeg("../Pictures/1_5_hist_lnorm.jpg", 1266, 484)
hist(log(bytes$V8), freq = FALSE, breaks = 200, col = "blue", main = "Flow length distribution and fitted log-normal distribution", xlab = "log( length [Bytes] )", ylab = "Frequency")
curve(dlnorm(x, meanlog = parameters[[1]][1], sdlog = parameters[[1]][2]), col = "red", add = TRUE)
dev.off()
# Fitting using gamma distribution
parameters = fitdistr(log(bytes$V8),"gamma")
jpeg("../Pictures/1_5_qq_gamma.jpg", 1266, 484)
qqplot(qgamma(ppoints(100), shape = parameters[[1]][1], rate = parameters[[1]][2]),
log(bytes$V8), main="Q-Q plot fitting gamma distribution", ylab = "Samples",col = 'blue', pch=3)
abline(0,1)
dev.off()
jpeg("../Pictures/1_5_hist_gamma.jpg", 1266, 484)
hist(log(bytes$V8), breaks = 200, col = "blue", main = "Flow length distribution and fitted gamma distribution", xlab = "log( length [B] )", ylab = "Frequency")
curve(dgamma(x, lwd = 2, shape = parameters[[1]][1], rate = parameters[[1]][2]), col = "red", add = TRUE)
dev.off()
# Fitting using weibull distribution
parameters = fitdistr(log(bytes$V8),"weibull")
jpeg("../Pictures/1_5_qq_weibull.jpg", 1266, 484)
qqplot(qweibull(ppoints(100), shape = parameters[[1]][1], scale = parameters[[1]][2]),
log(bytes$V8), main="Q-Q plot fitting weibull distribution", ylab = "samples",col='blue',pch=3)
abline(0,1)
dev.off()
jpeg("../Pictures/1_5_hist_weibull.jpg", 1266, 484)
hist(log(bytes$V8), breaks = 200, col = "blue", main = "Flow length distribution and fitted weibull distribution", xlab = "log( length [B] )", ylab = "Frequency")
curve(dweibull(x, lwd = 2, shape = parameters[[1]][1], scale = parameters[[1]][2]), col = "red", add = TRUE)
dev.off()
|
e9774ce55bf7f6e2e3a1a86da946a2ac4dc2bec1 | d473a271deb529ed2199d2b7f1c4c07b8625a4aa | /NonLinearModels/PolynomialRegressionExample1.R | 910ee2d60ce5cd4d3ec1311162c079d4c8161fc2 | [] | no_license | yangboyubyron/DS_Recipes | e674820b9af45bc71852ac0acdeb5199b76c8533 | 5436e42597b26adc2ae2381e2180c9488627f94d | refs/heads/master | 2023-03-06T05:20:26.676369 | 2021-02-19T18:56:52 | 2021-02-19T18:56:52 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,305 | r | PolynomialRegressionExample1.R |
# regression polynomial
library(ISLR)
attach(Wage)
# poly argument makes it so you don't have to spell out: age+age^2+age^3+age^4
# poly function also produces regression in which each variable polynomial is
# orthogonal (ie: it reduces colinearity).
# It forms a matrix in which each column is a linear combination of the
# variables age, age, age^2, age^3, age^4
fit=lm(wage~poly(age,4),
data=Wage)
coef(summary(fit))
coef(fit)
summary(fit)
# if you don't want the orthoginal matrix columns (like from code above), use raw=T.
# this effects the coefficient values but does not affect the fitted values
fit2=lm(wage~poly(age, 4, raw=T),
data=Wage)
coef(summary(fit2))
# alternative method equivalent to fit2
fit2a=lm(wage~age+I(age^2)+I(age^3)+I(age^4),
data=Wage)
fit2b=lm(wage~cbind(age, age^2, age^3, age^4),
data=Wage)
# create min value and max value (range) of the age variable
# creates a sequence of numbers: min value (agelims[1]) to max value (agelims[2])
# create new var: predicted values based on the fit model.
# this uses new data to plug into the fit model output
# (age.grid sequence from above). also adds standard error
# create new var: Gives standard error bands:
# this adds 2 x standard error value to the predict fit value and
# subtracts 2 x standard error value from predicted fit value
agelims=range(age)
agelims
age.grid=seq(from=agelims[1],
to=agelims[2])
age.grid
preds=predict(fit,
newdata=list(age=age.grid),
se=TRUE)
se.bands=cbind(preds$fit + 2*preds$se.fit,
preds$fit-2*preds$se.fit)
par(mfrwo=c(1,2),
mar=c(4.5, 4.5, 1,1),
oma=c(0,0,4,0))
plot(age, wage, xlim=agelims, cex=0.5, col="darkgrey")
title("Degree-1 Polynomial", outer=T)
lines(age.grid, preds$fit, lwd=2, col="blue")
matlines(age.grid, se.bands, lwd=1, col="blue", lty=3)
# these two lines show that there is no meaningful difference between
# using the poly function (orthogonal column variables) described above,
# from the model that types out each varaible: age, age, age^2, age^3, age^4.
# We see the maximum absolute value of the difference between the two is very
# small (nearly zero)
preds2 =predict (fit2 ,
newdata =list(age=age.grid),
se=TRUE)
max(abs(preds$fit - preds2$fit ))
# In performing a polynomial regression we must decide on the degree of the
# polynomial to use. One way to do this is by using hypothesis tests. We now fit
# models ranging from linear to a degree-5 polynomial and seek to determine the
# simplest model which is sufficient to explain the relationship between wage
# and age. We use the anova() function, which performs an analysis of variance
# (ANOVA, using an F-test) in order to test the null hypothesis that a model M #
# variance 1 is sufficient to explain the data against the alternative hypothesis
# that a more complex modelM2 is required. In order to use the anova() function,
# 1 and M2 must be nested models: the predictors in M1 must be a subset of the
# predictors in M2. In this case, we fit five different models and sequentially
# compare the simpler model to the more complex model.
fit.1 = lm(wage~age, data=Wage)
fit.2 = lm(wage~poly(age,2), data=Wage)
fit.3 = lm(wage~poly(age,3), data=Wage)
fit.4 = lm(wage~poly(age,4), data=Wage)
fit.5 = lm(wage~poly(age,5), data=Wage)
anova(fit.1, fit.2, fit.3, fit.4, fit.5)
# Explanation of output:
# The p-value comparing the linear Model 1 to the quadratic Model 2 is essentially
# zero (<10−15), indicating that a linear fit is not sufficient. Similarly the p-value
# comparing the quadratic Model 2 to the cubic Model 3 is very low (0.0017), so the
# quadratic fit is also insufficient. The p-value comparing the cubic and degree-4
# polynomials, Model 3 and Model 4, is approximately 5% while the degree-5 polynomial
# Model 5 seems unnecessary because its p-value is 0.37. Hence, either a cubic or a
# quartic polynomial appear to provide a reasonable fit to the data, but lower- or
# higher-order models are not justified.
# NOTE: ANOVA can also work if there are other variables in the model as well.
# ex: fit .3= lm(wage∼education +poly(age ,3) ,data=Wage)
|
ba87fd338a8e83758ab3ac0b44137bc3c6e6df60 | b1111d0043c3ee6c936352914c005dcf0dfc36ff | /plot3.R | 433104dd533b8b193c4146680efeb6040747beb3 | [] | no_license | samadsajanlal/ExData_Plotting2 | 102025dbaaf3bfd3be08c5b108a19bb2fd5d7f68 | 5811394c8252164f46a65074597ce78402baa159 | refs/heads/master | 2020-12-31T00:18:36.222872 | 2015-11-19T23:27:28 | 2015-11-19T23:27:28 | 46,522,695 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,008 | r | plot3.R | ## This first line will likely take a few seconds. Be patient!
NEI <- readRDS("summarySCC_PM25.rds")
#SCC <- readRDS("Source_Classification_Code.rds") #we don't need this data for this specific plot
#subset the data for Baltimore only using the fips code
baltimoreNEI <- NEI[NEI$fips=="24510",]
#aggregate the totals for Baltimore so that we can use this for a bar chart
aggregateTotalsBaltimore <- aggregate(Emissions ~ year, baltimoreNEI ,sum)
#create the PNG device for the plot
png(filename='plot3.png', width=600, height=600)
#create the plot inside the PNG device using ggplot2
library(ggplot2)
ggp <- ggplot(baltimoreNEI,aes(factor(year),Emissions,fill=type)) +
geom_bar(stat="identity") +
theme_bw() + guides(fill=FALSE)+
facet_grid(.~type,scales = "free",space="free") +
labs(x="year", y=expression("Total PM"[2.5]*" Emission (Tons)")) +
labs(title=expression("PM"[2.5]*" Emissions, Baltimore City 1999-2008 by Source Type"))
print(ggp)
#close the device to free up memory
dev.off() |
5fab91d82ae715338a0509ca33f2186dfab15424 | 72d9009d19e92b721d5cc0e8f8045e1145921130 | /sirt/R/lsem_fitsem_joint_estimation_partable.R | e1a2b4270aec2be4353c05194ab25461fd4240f5 | [] | no_license | akhikolla/TestedPackages-NoIssues | be46c49c0836b3f0cf60e247087089868adf7a62 | eb8d498cc132def615c090941bc172e17fdce267 | refs/heads/master | 2023-03-01T09:10:17.227119 | 2021-01-25T19:44:44 | 2021-01-25T19:44:44 | 332,027,727 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 575 | r | lsem_fitsem_joint_estimation_partable.R | ## File Name: lsem_fitsem_joint_estimation_partable.R
## File Version: 0.03
lsem_fitsem_joint_estimation_partable <- function(lavfit, G, par_invariant=NULL,
par_linear=NULL, par_quadratic=NULL)
{
partable <- sirt_import_lavaan_parameterTable(lavfit)
partable$start <- partable$est
partable_joint <- lsem_fitsem_joint_estimation_prepare_partable(partable=partable,
G=G, par_invariant=par_invariant, par_linear=par_linear,
par_quadratic=par_quadratic)
return(partable_joint)
}
|
ef9498ae034828e3405c04849c7e42ae68024dcd | 22057bf4f2eb001f739761e53a5578de578e6920 | /dam_paper_initial_version/seleceted.reazs/2_h/codes/parameters.R | b92d112db05abd4c38a92adf3a54064aed8a8182 | [] | no_license | mrubayet/archived_codes_for_sfa_modeling | 3e26d9732f75d9ea5e87d4d4a01974230e0d61da | f300fe8984d1f1366f32af865e7d8a5b62accb0d | refs/heads/master | 2020-07-01T08:16:17.425365 | 2019-08-02T21:47:18 | 2019-08-02T21:47:18 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,742 | r | parameters.R | rm(list=ls())
library("rhdf5")
start.time = as.POSIXct("2010-01-01 00:00:00",tz="GMT",format="%Y-%m-%d %H:%M:%S")
end.time = as.POSIXct("2015-12-31 23:00:00",tz="GMT",format="%Y-%m-%d %H:%M:%S")
time.ticks = c(
as.POSIXct("2010-01-01 12:01:01",tz="GMT",format="%Y-%m-%d %H:%M:%S"),
as.POSIXct("2010-07-01 12:01:01",tz="GMT",format="%Y-%m-%d %H:%M:%S"),
as.POSIXct("2011-01-01 12:01:01",tz="GMT",format="%Y-%m-%d %H:%M:%S"),
as.POSIXct("2011-07-01 12:01:01",tz="GMT",format="%Y-%m-%d %H:%M:%S"),
as.POSIXct("2012-01-01 12:01:01",tz="GMT",format="%Y-%m-%d %H:%M:%S"),
as.POSIXct("2012-07-01 12:01:01",tz="GMT",format="%Y-%m-%d %H:%M:%S"),
as.POSIXct("2013-01-01 12:01:01",tz="GMT",format="%Y-%m-%d %H:%M:%S"),
as.POSIXct("2013-07-01 12:01:01",tz="GMT",format="%Y-%m-%d %H:%M:%S"),
as.POSIXct("2014-01-01 12:01:01",tz="GMT",format="%Y-%m-%d %H:%M:%S"),
as.POSIXct("2014-07-01 12:01:01",tz="GMT",format="%Y-%m-%d %H:%M:%S"),
as.POSIXct("2015-01-01 12:01:01",tz="GMT",format="%Y-%m-%d %H:%M:%S"),
as.POSIXct("2015-07-01 12:01:01",tz="GMT",format="%Y-%m-%d %H:%M:%S"),
as.POSIXct("2015-01-01 12:01:01",tz="GMT",format="%Y-%m-%d %H:%M:%S"),
as.POSIXct("2015-07-01 12:01:01",tz="GMT",format="%Y-%m-%d %H:%M:%S"),
as.POSIXct("2016-01-01 12:01:01",tz="GMT",format="%Y-%m-%d %H:%M:%S")
)
output.time = seq(0,(365*5+366)*24,3)*3600+start.time
obs.list = paste("Well_",seq(1,322),seq="")
nobs = length(obs.list)
x = h5read("1/2duniform-000.h5","Coordinates/X [m]")
y = h5read("1/2duniform-000.h5","Coordinates/Y [m]")
z = h5read("1/2duniform-000.h5","Coordinates/Z [m]")
x = head(x,-1)+0.5*diff(x)
y = head(y,-1)+0.5*diff(y)
z = head(z,-1)+0.5*diff(z)
nx = length(x)
ny = length(y)
nz = length(z)
##material_ids = h5read(paste("../../reazs.hete/2/T3_Slice_material.h5",sep=''),"Materials/Material Ids")
material_ids = h5read(paste("1/T3_Slice_material.h5",sep=''),"Materials/Material Ids")
material_ids = array(material_ids,c(nx,nz))
alluvium.river=NULL
for (ix in 1:nx)
{
for (iz in 1:(nz-1))
{
if ((material_ids[ix,iz+1]-material_ids[ix,iz])==-5)
{
alluvium.river=rbind(alluvium.river,c(ix,iz))
break()
}
}
}
alluvium.river = alluvium.river[order(alluvium.river[,1],alluvium.river[,2]),]
## hanford.river=NULL
## for (ix in 1:nx)
## {
## for (iz in 1:(nz-1))
## {
## if ((material_ids[ix,iz+1]-material_ids[ix,iz])==-1)
## {
## hanford.river=rbind(hanford.river,c(ix,iz))
## break()
## }
## }
## }
## hanford.river = hanford.river[order(hanford.river[,1],hanford.river[,2]),]
##find alluvium and hanford boundary
alluvium.hanford=NULL
for (ix in 1:nx)
{
for (iz in 1:(nz-1))
{
if ((material_ids[ix,iz+1]-material_ids[ix,iz])==4)
{
alluvium.hanford=rbind(alluvium.hanford,c(ix,iz))
break()
}
}
}
alluvium.hanford = alluvium.hanford[order(alluvium.hanford[,1],alluvium.hanford[,2]),]
alluvium.ringold=NULL
for (ix in 1:nx)
{
for (iz in 1:(nz-1))
{
if ((material_ids[ix,iz+1]-material_ids[ix,iz])==1)
{
alluvium.ringold=rbind(alluvium.ringold,c(ix,iz))
break()
}
}
}
alluvium.ringold = alluvium.ringold[order(alluvium.ringold[,1],alluvium.ringold[,2]),]
hanford.ringold=NULL
for (ix in 1:nx)
{
for (iz in 1:(nz-1))
{
if ((material_ids[ix,iz+1]-material_ids[ix,iz])==-3)
{
hanford.ringold=rbind(hanford.ringold,c(ix,iz))
break()
}
}
}
hanford.ringold = hanford.ringold[order(hanford.ringold[,1],hanford.ringold[,2]),]
plot(x[hanford.ringold[,1]],z[hanford.ringold[,2]],type="l",lwd=3,xlim=range(x),ylim=range(z))
lines(x[alluvium.river[,1]],z[alluvium.river[,2]],lwd=2,col="blue")
lines(x[alluvium.ringold[,1]],z[alluvium.ringold[,2]],lwd=2,col="green")
lines(x[alluvium.hanford[,1]],z[alluvium.hanford[,2]],lwd=2,col="red")
list=c("hanford.ringold",
"alluvium.hanford",
## "hanford.river",
"alluvium.river",
"alluvium.ringold",
"x","y","z",
"nx","ny","nz",
"obs.list","nobs",
"material_ids",
"start.time",
"end.time",
"output.time",
"time.ticks"
)
save(list=list,file="statistics/parameters.r")
|
ee7e986fd7b036be9d172036c6d2f15db7f91dcf | 5c551b43a32451d14254c5ccffef277a246d2709 | /Week4/Code/StatsWithSparrows18.R | 7fb6383a2fa6ed499662741b5688855458e4f97b | [
"Apache-2.0"
] | permissive | ph-u/CMEE_ph-u | 8e20a2b1269dc21a860a5827dbd1d84e03340dd3 | 8d52d4dcc3a643da7d55874e350c18f3bf377138 | refs/heads/master | 2023-01-22T07:29:46.118897 | 2020-12-02T10:31:51 | 2020-12-02T10:31:51 | 212,303,395 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 858 | r | StatsWithSparrows18.R | #!/bin/env Rscript
# Author: ph-u
# Script: StatsWithSparrows18.R
# Desc: minimal R function with two in-script tests
# Input: none -- run in R console line-by-line
# Output: R terminal output
# Arguments: 0
# Date: Oct 2019
a<-read.table("../Data/ObserverRepeatability.txt", header = T)
library(dplyr)
a %>% group_by(StudentID) %>% summarise(count=length(StudentID))
a %>% group_by(StudentID) %>% summarise(count=length(StudentID)) %>% summarise(length(StudentID))
a %>% group_by(StudentID) %>% summarise(count=length(StudentID)) %>% summarise(sum(count))
length(a$StudentID)
a %>% group_by(StudentID) %>% summarise(count=length(StudentID)) %>% summarise(sum(count^2))
mod<-lm(a$Tarsus~a$StudentID)
mod<-lm(a$Tarsus~a$Leg+a$Handedness+a$StudentID)
anova(mod)
library(lme4)
lmm<-lmer(a$Tarsus~a$Leg+a$Handedness+(1|a$StudentID))
summary(lmm)
var(a$Tarsus)
|
148804e028039ebc3aa7bcb6a82111b7b184c45c | 21818aeceda73fc35827ef8e79a56bb715305eb6 | /Datasets/collect/collect_marques.R | 9f4a3fa5da8231bd5ed8e223fed34fa0ffb18bad | [
"MIT"
] | permissive | JiahuaQu/Cell_BLAST | 25ab0c5072a05faa49cd2fcc4b5c743ae5d3b125 | 45b14bbd3385b8a7be0b48ef5ab42bc946f3558f | refs/heads/master | 2023-07-17T02:21:18.868383 | 2021-09-01T03:08:36 | 2021-09-01T03:08:36 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 593 | r | collect_marques.R | #! /usr/bin/env Rscript
# by weil
# 5 Oct 2018
# 5:15 PM
# This script converts the RDS data downloaded from hemberg website
# into HDF5 format to be used by different methods
source("../../Utilities/data.R", chdir = TRUE)
message("Reading data...")
sce <- readRDS("../download/Hemberg/Marques/marques.rds")
expr_mat <- as.matrix(counts(sce))
meta_df <- as.data.frame(colData(sce))
meta_df <- meta_df[, c("Species", "cell_type1", "Source", "age", "WellID", "Strain", "State", "sex")]
colnames(meta_df)[8] <- "gender"
construct_dataset("../data/Marques", expr_mat, meta_df)
message("Done!")
|
0a19c05f688c60703c43bb518ced849d78497ad2 | c3cb442a8882b002e6a1b760677f6b1aea8a87b9 | /server.R | 6c462ed484f92a85e4f18c7dbef6c17da210f775 | [] | no_license | Brewstarke/NLoad_shiny | d04ff34d34c780c3e4b5ac54a811dbf58107737c | 6c27f609181fd17322f4c8614acf6a164b7f0a5e | refs/heads/master | 2020-05-15T22:48:57.413300 | 2014-11-20T15:22:22 | 2014-11-20T15:22:22 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 15,164 | r | server.R | #### Shiny Server
#
# NLM Model running on shiny
# Runs basic Nitrogen loading models from data read in by user
# Sensitivity analysis is possible by manipulating the many parameters
#
#
####
library(shiny)
library(reshape2)
library(rCharts)
library(dplyr)
library(ggplot2)
library(RColorBrewer)
library(rjson)
# Shiny Server ------------------------------------------------------------------
shinyServer( # this will be run each time a user changes something.
function(input, output) {
# Datafile load
filedata <- reactive({
infile <- input$datafile
if (is.null(infile)) {
# User has not uploaded a file yet - from http://bl.ocks.org/psychemedia/9690079
return(NULL)# Output message in html....to ui.R
}
csv <- data.frame(read.csv(infile$datapath, header = TRUE))
csv # run filedata() should return a dataframe of input areas that feed into NLMout()
})
# Column Names for mapping:
mappingNames <- reactive({
if(is.null(filedata())){return(NULL)}
items <- names(filedata())
items <- as.list(items)
return(items)
})
# Table of data loaded in by user
output$filetable <- renderDataTable({
fd1 <- filedata()
if(is.null(fd1)){
return("Load data to see summary")
}
fd1
})
# Table of NLM outputs
output$filetable2 <- renderDataTable({
fd2 <- filedata()
if(is.null(fd2)){
return("Your NLM outputs will appear here")
} # If no table loaded into memory then return message- if loaded run below...
NLMout()
})
# Data mapping -------------------------------------------------
# From NLM_OysterBay Spreadsheet\
# ---Parameters--
# [Rainfall nitrate]:
# [Rainfall ammonia]:
# [Rainfall dissolved organic N]:
# [TDN]:
# Ave Annual Rainfall:
# Wet to Total Deposition Factor:
# % atmos N transported from wetlands
# % atmos N transported from freshwater ponds
# % atmos N transported from Nat'l Veg Soils:
# % atmos N transported from Turf Soils:
# % atmos N transported from Agr. Soils:
# Median Home Size:
# No of stories/home:
# House footprint area:
# Average area of roof:
# Average area of driveway:
# % atmos N transported from Impervious Soils (roof/driveway):
# Fertilizer N applied to lawns:
# Fertilizer N applied to agriculture:
# Fertilizer N applied to rec/golf courses:
# Average lawn area:
# % of homes that use fertilizer:
# % of fertilizer N transported from Turf Soils:
# % of fertilizer N transported from Agri Soils:
# % of fertilizer N transported from Rec. Soils:
# Per capita human N excretion rate:
# People per house:
# % N transported from septic tank
# %N transported through leaching field
# % waste transported from septic plumes:
# % watershed N transported from vadose zone:
# % N transported from aquifer:
# # of houses in high density residential areas:
# # of houses in medium-high density residential areas:
# # of houses in medium density residential areas:
# # of houses in medium-low density residential areas:
# # of houses in low density residential areas:
# percent of onsite wastewater systems that are cesspools
# uiRender commands ####
# These functions generate a user input (a select input)
# Site
# input$Site
output$Sites <- renderUI({ # need to create a site input for plots and other analysis.
if (is.null(filedata())) return(NULL)
selectInput("Site", "Site:", mappingNames(), selected = mappingNames()[1])
})
# wetlands
# input$WetlandsArea
output$WetlandsAreas <- renderUI({
if (is.null(filedata())) return(NULL)
selectInput("WetlandsArea", "Wetlands Area (ha):", mappingNames(), selected = mappingNames()[3])
})
# ponds
# input$PondsArea
output$PondsAreas <- renderUI({
if (is.null(filedata())) return(NULL)
selectInput("PondsArea", "Ponds Area (ha):", mappingNames(), selected = mappingNames()[4]) # inputID links to the /scratchspace.R input list at top.
})
# natural vegetation
# input$NatVegArea
output$NatVegAreas <- renderUI({
if (is.null(filedata())) return(NULL)
selectInput("NatVegArea", "Natural Vegetation Area (ha):", mappingNames(), selected = mappingNames()[5]) # inputID links to the /scratchspace.R input list at top.
})
# turfArea
# input$TurfArea
output$TurfAreas <- renderUI({
if (is.null(filedata())) return(NULL)
selectInput("TurfArea", "Turf Area (ha):", mappingNames(), selected = mappingNames()[6]) # inputID links to the /scratchspace.R input list at top.
})
# agArea
# input$AgArea
output$AgAreas <- renderUI({
if (is.null(filedata())) return(NULL)
selectInput("AgArea", "Agricultural Area (ha):", mappingNames(), selected = mappingNames()[7]) # inputID links to the /scratchspace.R input list at top.
})
# impervArea
# input$ImpervArea
output$ImpervAreas <- renderUI({
if (is.null(filedata())) return(NULL)
selectInput("ImpervArea", "Impervious Surface Area (ha):", mappingNames(), selected = mappingNames()[8]) # inputID links to the /scratchspace.R input list at top.
})
# activeAgArea
# input$ActiveAgArea
output$ActiveAgAreas <- renderUI({
if (is.null(filedata())) return(NULL)
selectInput("ActiveAgArea", "Active Agricultral Area (ha):", mappingNames(), selected = mappingNames()[9]) # inputID links to the /scratchspace.R input list at top.
})
# recArea
# input$RecArea
output$RecAreas <- renderUI({
if (is.null(filedata())) return(NULL)
selectInput("RecArea", "Recreational Areas (ha):", mappingNames(), selected = mappingNames()[10]) # inputID links to the /scratchspace.R input list at top.
})
# lawnArea
# input$LawnArea
output$LawnAreas <- renderUI({
if (is.null(filedata())) return(NULL)
selectInput("LawnArea", "Lawn Area (ha):", mappingNames(), selected = mappingNames()[11]) # inputID links to the /scratchspace.R input list at top.
})
# parkArea
# input$ParkArea
output$ParkAreas <- renderUI({
if (is.null(filedata())) return(NULL)
selectInput("ParkArea", "Park Area (ha):", mappingNames(), selected = mappingNames()[12]) # inputID links to the /scratchspace.R input list at top.
})
# uiOutput("ResdGT200m"),
output$ResdGT200ms <- renderUI({
if (is.null(filedata())) return(NULL)
selectInput("ResdGT200m", "No. of Residences located greater than 200m from the water:", mappingNames(), selected = mappingNames()[13]) # inputID links to the /scratchspace.R input list at top.
})
# uiOutput("ResdLT200ms"),
output$ResdLT200ms <- renderUI({
if (is.null(filedata())) return(NULL)
selectInput("ResdLT200m", "No. of Residences located less than 200m from the water:", mappingNames(), selected = mappingNames()[14]) # inputID links to the /scratchspace.R input list at top.
})
# uiOutput("persperhomes")
output$persperhomes <- renderUI({
if (is.null(filedata())) return(NULL)
selectInput("perperhome", "Average number of people per home:", mappingNames(), selected = mappingNames()[15]) # inputID links to the /scratchspace.R input list at top.
})
# Data Table Outputs ------------------------------
# Output data table- first page
# Function definitions for NLM -----
# NLoad <- reactive({
# From NLM_OysterBay Spreadsheet\
#
# [Rainfall nitrate]:
# [Rainfall ammonia]:
# [Rainfall dissolved organic N]:
# [TDN]:
# Ave Annual Rainfall:
# Wet to Total Deposition Factor:
# % atmos N transported from wetlands
# % atmos N transported from freshwater ponds
# % atmos N transported from Nat'l Veg Soils:
# % atmos N transported from Turf Soils:
# % atmos N transported from Agr. Soils:
# Median Home Size:
# No of stories/home:
# House footprint area:
# Average area of roof:
# Average area of driveway:
# % atmos N transported from Impervious Soils (roof/driveway):
# Fertilizer N applied to lawns:
# Fertilizer N applied to agriculture:
# Fertilizer N applied to rec/golf courses:
# Average lawn area:
# % of homes that use fertilizer:
# % of fertilizer N transported from Turf Soils:
# % of fertilizer N transported from Agri Soils:
# % of fertilizer N transported from Rec. Soils:
# Per capita human N excretion rate:
# People per house:
# % N transported from septic tank
# %N transported through leaching field
# % waste transported from septic plumes:
# % watershed N transported from vadose zone:
# % N transported from aquifer:
# # of houses in high density residential areas:
# # of houses in medium-high density residential areas:
# # of houses in medium density residential areas:
# # of houses in medium-low density residential areas:
# # of houses in low density residential areas:
# percent of onsite wastewater systems that are cesspools *** Make this a user datasheet loading input ***
# ----
# User loaded spatial paramters (areas) and population paramters/estimates
#- Read in on first tab and mapped out with uiOutput-renderOutput functions.
# fd == dataframe that is loaded in by user
# input$xxx == the column name of dataframe that is mapped to parameter XX.
NLMout <- reactive({
if (is.null(filedata())) return(NULL)
fd <- data.frame(filedata())
# assigns vector from dataframe (column) to parameter which will coerce forumlas to vector outputs and dataframes
SiteNames <- fd[[input$Site]]
TArea <- fd[[input$TurfArea]]
NVArea <- fd[[input$NatVegArea]]
RArea <- fd[[input$RecArea]]
LArea <- fd[[input$LawnArea]]
PArea <- fd[[input$ParkArea]]
AArea <- fd[[input$AgArea]]
AAArea <- fd[[input$ActiveAgArea]]
IArea <- fd[[input$ImpervArea]]
WArea <- fd[[input$WetlandsArea]]
PondArea <- fd[[input$PondsArea]]
ResGT <- fd[[input$ResdGT200m]]
ResLT <- fd[[input$ResdLT200m]]
Persons <- fd[[input$perperhome]]
# Build the NLM modelusing user loaded spatial data (areas from .csv)
# Parameter mapping...
# UI controlled parameters
## Atmospheric Loading Parameters ##-- A22:A35 in spreadsheet- a few unused parameters dropped
TDN <- input$AtmDepRate
WTWet <- input$AtmNtransWetlands
TAP <- input$AtmNtransPonds
ANTNV <- input$AtmNtransNatVeg
TAT <- input$AtmNtransTurf
ATAg <- input$AtmNtransAg
ATImp <- input$AtmNtransImperv #### NOT USED IN NLM OYSTERBAY_COLDSPRING HARBOR ####
## Fertilizer Loading Parameters ##-- A36:A43- Dropped average lawn area paramter- using Lawn Area (total land coverage area) from user loaded data
FertL <- input$FertLawns
FertAg <- input$FertAg
FertG <- input$FertRec
FertPerc <- input$PercentHomes
TranT <- input$FertTransTurf
FAgTran <- input$FertTransAg
FRecTran <- input$FertTransRec
# Septic and Cesspools Loading Parameters ## -- A44:A50
HL <- input$HumanLoad
NTS <- input$NtransFromSpeticTank
NTL <- input$NTransLeach
NTP <- input$NtransPlume
NTV <- input$NtransVadose
# DNit <- input$DeNit # NOT SURE THIS IS USED...
NTA <- input$NtransAquifer
PercCess <- input$percentCesspools
# Create blank object to store output
NLM <- NULL
NLM$Sites <- (SiteNames)
# Atmospheric Loads =============================================================
AtmWetlands <- (TDN * WArea * WTWet)
AtmFreshWater <- (TDN * PondArea * TAP)
AtmNatVeg <- (TDN * NVArea * ANTNV)
AtmTurfRec <- (TDN * (TArea + RArea + LArea + PArea ) * TAT)
AtmAg <- (TDN * (AArea + AAArea) * ATAg)
AtmImperv <- (TDN * IArea) #Need help with this formula....# NLM Oysterbay spreadsheet does NOT use the inpu$AtmNtransImperv input in formula
## Total N load to estuary sourced from Atmospheric Deposition
NLM$TotalLoadAtmospheric <- (AtmWetlands + AtmFreshWater + AtmNatVeg + AtmTurfRec + AtmAg + AtmImperv) * NTV * NTA %>% round()
# Fertilizer Application Loads ===================================================
#g
FertTurf <- (FertL * LArea * FertPerc * TranT) %>% round()
#ActiveAg- new to the NLM_Oyster Bay Spreadsheet
#h Is this Active Ag only? Need to find out and/or add actvie ag.
FertActiveAg <- (FertAg * FAgTran * AAArea) %>% round()
#i
FertGolf <- (FertG * RArea * FRecTran) %>% round()
FertParks <- (FertG * FRecTran * PArea) %>% round()
# Total Fertilixation Load
NLM$TotalFertLoad <- (FertTurf + FertActiveAg + FertGolf + FertParks) * NTV * NTA %>% round()
# Surface Loads- Fertilizer and Deposition ------------------------------------
#j
NLM$SurfaceLoad <- ((NLM$TotalLoadAtmospheric + NLM$TotalFertLoad) * 0.39 * 0.65) %>% round()
#k
NLM$SepticLoad <- ((1-PercCess) * HL * (ResGT * Persons) * NTS * NTL * NTP * NTA) + ((1-PercCess) * HL * (ResLT * Persons) * NTS * NTL * NTP * NTA) %>% round()
NLM$CesspoolLoad <- (PercCess * HL* (ResGT * Persons) * NTS * NTP * NTA) + (PercCess * HL* (ResLT * Persons) * NTS * NTP * NTA) %>% round()
# Total Nitrogen Loading to Estuary --------------------------------------------
# NLM$NLoadTotal <- (NLM$SurfaceLoad + NLM$SepticLoad + NLM$CesspoolLoad + NLM$WasteWaterLoad)
outNLM <- data.frame(NLM)
outNLM
})
# TEST #
# for interactivity-
output$NLMtest <- renderDataTable({
if(is.null(filedata())){
return("Load data to see ouputs")
}
NLMAtm <- NLMout()
NLMAtm[,1:2]
})
# Fertilizer and wastewater load table for UI paramter page
output$NLMwwfertloads <- renderDataTable({
if(is.null(filedata())){
return("Load data to see ouputs")
}
NLMwwfert <- NLMout()
NLMwwfert[,c(1,3,5,6)]
})
# Start of NLoad outputs----
# Data output summaries ----
# Shiny Plots ----
NLM.plot.data <- reactive({
NLoad_names <- names(NLMout()) # create vectoir list of names for melt/cast
NLoads.Melt <- NLMout() %>%
# Can add arrange command to 'sort' the
melt(id.vars = NLoad_names[1])
NLoads.Melt
})
# Stacked bar plot- absolute values- dimple plots
output$HStackBar <- renderChart2({
# Stacked horizontal plot Total loads descending
HSbar <- dPlot(y = names(NLM.plot.data())[1], x = "value", data= NLM.plot.data(), groups= "variable", type = "bar", height = 700, width= 700)
HSbar$yAxis(type= "addCategoryAxis", orderRule = 'rev(value)')
HSbar$xAxis(type= "addMeasureAxis")
HSbar$legend(
x = 0,
y = 0,
width = 500,
height = 1500,
horizontalAlign = "center")
HSbar$defaultColors(brewer.pal(6, "Set1"))
return(HSbar)
})
# Stacked horizontal percentage
output$HStackPct <- renderChart2({
HSbarPct <- dPlot(y = names(NLM.plot.data())[1], x = "value", data= NLM.plot.data(), groups= "variable", type = "bar", height = 700, width= 700)
HSbarPct$yAxis(type= "addCategoryAxis")
HSbarPct$xAxis(type= "addPctAxis")
HSbarPct$legend(
x = 0,
y = 0,
width = 700,
height = 700,
horizontalAlign = "right")
HSbarPct$defaultColors(brewer.pal(6, "Set1"))
return(HSbarPct)
})
# Download outputs to .csv file.
output$downloadOutput <- downloadHandler(
filename = "NLM_shiny_Output.csv",
content = function(file){
write.csv(NLMout(), file)
}
)
# output$plot <- renderChart2({
#
# plot1 <- nPlot(value ~ subwatershed_code,
# group = "variable",
# data = NLoads.Melt,
# type = "multiBarHorizontalChart")
# plot1$params$height = 600
# plot1$params$dom <- "plot"
# return(plot1)
# })
# ----
}
)
|
ed6dbb9c390c98ca8c47202663b4d8584f85bcbd | 8dabe77b4fa5368e1a8124bf2bf3fcae02052769 | /R/main.R | a5b7b1013821454b25a9e4986a83ce5c0114e312 | [] | no_license | jamesdu0504/rtree-1 | 73e4c6bce7a1b89804181796c1e2822961f534fd | 8017a0b0499b263d2cfd2fdc6c14da0271bc37a8 | refs/heads/master | 2022-01-12T06:00:30.913873 | 2017-03-07T03:15:19 | 2017-03-07T03:15:19 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,818 | r | main.R | Rcpp::loadModule("rtreecpp", TRUE)
#' Create an RTree
#'
#' Organizes points in an R-tree.
#'
#' The R-tree is created using the quadratic splitting algorithm, with the maximum number of elements
#' per node set to 16. See \url{http://www.boost.org/doc/libs/1_63_0/libs/geometry/doc/html/geometry/spatial_indexes/introduction.html} for details.
#'
#' @param x A 2-column numeric matrix of point coordinates.
#'
#' @return An RTree S3 object.
#'
#' @export
RTree <- function(x) {
if (!is.numeric(x)) {
stop('x must be numeric.')
}
if (length(dim(x)) != 2 | dim(x)[2] != 2) {
stop('x must be a 2-column matrix.')
}
rTreeCpp <- new(RTreeCpp, x)
me <- list(
rTreeCpp = rTreeCpp
)
class(me) <- append(class(me), "RTree")
return(me)
}
#' Get Points Within Distance
#'
#' For each point \eqn{y_i} in set \code{y}, returns the row-indices of the points indexed in \code{rTree}
#' that are within a given \code{distance} of \eqn{y_i}.
#'
#' @param rTree An \link{RTree} object.
#' @param y A 2-column numeric matrix of point coordinates.
#' @param distance A positive scalar.
#'
#' @export
withinDistance <- function(rTree, y, distance) {
UseMethod("withinDistance", rTree)
}
withinDistance.RTree <- function(rTree, y, distance) {
if (!inherits(rTree, "RTree")) {
stop('rTree must be of class RTree.')
}
if (!is.numeric(y)) {
stop('y must be numeric.')
}
if (length(dim(y)) != 2 | dim(y)[2] != 2) {
stop('y must be a 2-column matrix.')
}
if (!is.numeric(distance)) {
stop('distance must be numeric.')
}
if (length(distance) != 1) {
stop('distance must be a scalar.')
}
if (distance <= 0) {
stop('distance must be positive.')
}
index.ls <- rTree$rTreeCpp$within_distance_list(y, distance)
return(index.ls)
}
#' Get Nearest Neighbors
#'
#' For each point \eqn{y_i} in set \code{y}, returns the row-indices of the \code{k} points indexed in \code{rTree}
#' that are closest to \eqn{y_i}.
#'
#' @param rTree An \link{RTree} object.
#' @param y A 2-column numeric matrix of point coordinates.
#' @param k A positive integer.
#'
#' @export
knn <- function(rTree, y, k) {
UseMethod("knn", rTree)
}
knn.RTree <- function(rTree, y, k) {
if (!inherits(rTree, "RTree")) {
stop('rTree must be of class RTree.')
}
if (!is.numeric(y)) {
stop('y must be numeric.')
}
if (length(dim(y)) != 2 | dim(y)[2] != 2) {
stop('y must be a 2-column matrix.')
}
if (!is.numeric(k)) {
stop('k must be numeric.')
}
if (!is.integer(k)) {
k <- as.integer(k)
warning('k was cast to integer, this may lead to unexpected results.')
}
if (length(k) != 1) {
stop('k must be a scalar.')
}
if (k <= 0) {
stop('k must be positive.')
}
index.ls <- rTree$rTreeCpp$knn_list(y, k)
return(index.ls)
}
|
9aec42bfcd078d59361ad362bad5fa02fa0d418c | d0538e4b465d6104748e60b9ab35c35679d6d447 | /scWGS/CNVCalling/s00.splitSampleInfo.R | 787c517925213dfaaf841a2a0c83eb695a47f1ed | [] | no_license | WRui/Mutations-of-Normal-Cells | a6e02c9b34ffe6699b890e23a8c7a1da6c1bfa96 | 101ceb2e3716916621054d7aa2ae383c1286c9a6 | refs/heads/main | 2023-02-26T02:44:42.083848 | 2021-02-03T02:24:47 | 2021-02-03T02:24:47 | 335,482,811 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 330 | r | s00.splitSampleInfo.R | data <- read.table("Sample_Info_vivoPT.Epcam.txt",header=F,sep="\t")
DNA_lib <- unique(data$V2)
for(i in DNA_lib){
tmp_data <- data[data$V2==i,]
writeDf <- data.frame(Sample=tmp_data[,1],bar1=rep(0,nrow(tmp_data)),bar2=tmp_data[,3])
write.table(file=paste(i,".Info",sep=""),writeDf,quote=F,sep="\t",row.names=F,col.names=F)
}
|
3c4867791dd0d5b7db6c943444dd8e802e911992 | e189d2945876e7b372d3081f4c3b4195cf443982 | /man/URLs_WIKITEXT.Rd | 6929c9e219fc4f445983c57e138eefee761e8265 | [
"Apache-2.0"
] | permissive | Cdk29/fastai | 1f7a50662ed6204846975395927fce750ff65198 | 974677ad9d63fd4fa642a62583a5ae8b1610947b | refs/heads/master | 2023-04-14T09:00:08.682659 | 2021-04-30T12:18:58 | 2021-04-30T12:18:58 | 324,944,638 | 0 | 1 | Apache-2.0 | 2021-04-21T08:59:47 | 2020-12-28T07:38:23 | null | UTF-8 | R | false | true | 388 | rd | URLs_WIKITEXT.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/download_tar.R
\name{URLs_WIKITEXT}
\alias{URLs_WIKITEXT}
\title{WIKITEXT dataset}
\usage{
URLs_WIKITEXT(filename = "WIKITEXT", untar = TRUE)
}
\arguments{
\item{filename}{the name of the file}
\item{untar}{logical, whether to untar the '.tgz' file}
}
\value{
None
}
\description{
download WIKITEXT dataset
}
|
6068df01eb689ccbfd3aeb104a0b72dceb690209 | f5428950323f84deb2339fff37cf3671b32ac8cd | /R/fuzdb.R | 082a0648d0965f883bb96bae39d7f250e0a6e170 | [] | no_license | Chebuu/foa | a867d13e3f15f731bb87a7f53f3dd8a8adf6294f | 2d4ff80cdab88d4fda4514d21828ec177ee63fbe | refs/heads/master | 2023-03-03T10:05:25.349753 | 2021-02-17T15:12:19 | 2021-02-17T15:12:19 | 339,762,324 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 31 | r | fuzdb.R | # http://protdyn-database.org/
|
39d25fdfc6456a30048f037fbdb97d05bc80e061 | 23b88db8d1113f2022f6071477e58702b3860751 | /EA_simulations_cluster.R | a4d125aa6bec4f97f08e655408b5f2defa805f9c | [
"MIT"
] | permissive | alantump/socialDDM_Evolutionary_Algorithm | c3628f7135871efcad3b858f12d741a69f8f7be0 | 7c45ea8297d3d44ede8e2eaae85af88578dea1ae | refs/heads/master | 2021-06-24T08:19:24.097526 | 2021-05-01T14:06:16 | 2021-05-01T14:06:16 | 221,718,619 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,787 | r | EA_simulations_cluster.R |
#########################
# NOTES
# this R script runs the EA simulations on the tardis cluster
# Tardis thereby runs many of this scrips in parallel and asigns a diferent 'clusterid' to each worker
# The simulation contains following steps:
# Load functions and packages
# Assign 'clusterid'
# Define important algorithm paramters (e.g. population size and generations)
# Run evolutionary algorithm
# Arrange results
# Saves everything for later use (e.g. making figures and diagnosis)
##########################
#Name of analyis
name="simple_p_new"
#Load important functions
source("EA_functions4.R")
#Load important packages
packages <- c('dplyr',"tidyr","foreach","parallel","doParallel")
lapply(packages, require, character.only=TRUE)
#Get ID assigned by cluster
clusterid <- as.integer(commandArgs(TRUE)[1])
if(is.na(clusterid)){clusterid<-1}
#Define paramters
pop_size = 1000 #Population size
number_of_generations=1000 #Number of Generations
gif=0 # Do you whant a gif? (Needs special packages; keep=0 if you whant to play save)
reps <- 8 #How many parallel populations
do_parallel=1 #Yes please
#Define analysed paramter range
bound_initial_bias=c(-0.5,2)#c(0,0.001)#
bound_theta=c(0.1,12)
bound_delta=c(0,2)#c(3,3.001)
bound_bino=c(0,1)#c(3,3.001)
dummy=rbind(bound_initial_bias,bound_theta,bound_delta,bound_bino,bound_bino)
lb<-dummy[,1]
ub<-dummy[,2]
#Define features of the environment
#Group sizes
gs_vec=c(1,5,10,20,50)
#Cost asymmetry
fnc_vec=c(1,2,4)# #symmetric is 1 (with one point for correct)
#Intial infromation
initial_variance_vec=0#seq(0,1.5,0.5)
initial_knowledge_vec=0#seq(0,0.5,0.5)
#Information gathered during process
average_pdrift_vec = 0.3#seq(0.1,0.3,0.1)
variance_pdrift=0
#Time cost
time_cost_vec = c(0.05)
#Cooperative=1 or competitive=0
collective_vec=c(0,1)
#Define varables for current run:
final_data=expand.grid(Group_Size=gs_vec,FNC=fnc_vec,average_pdrift=average_pdrift_vec,initial_knowledge=initial_knowledge_vec,initial_variance=initial_variance_vec,time_cost=time_cost_vec,collective=collective_vec,m1=NA,sd1=NA,m2=NA,sd2=NA,m3=NA,sd3=NA,m4=NA,sd4=NA,m5=NA,sd5=NA,m6=NA,sd6=NA,m7=NA,sd7=NA)
nrow(final_data)
final_data$FNC <- as.numeric(final_data$FNC)
false_costs=c(2/(1+final_data$FNC[clusterid]),(2*final_data$FNC[clusterid])/(1+final_data$FNC[clusterid]))
numAgents=final_data$Group_Size[clusterid]
initial_variance=final_data$initial_variance[clusterid]
initial_knowledge=final_data$initial_knowledge[clusterid]
average_pdrift =final_data$average_pdrift[clusterid]
time_cost=as.numeric(final_data$time_cost[clusterid])
collective = final_data$collective[clusterid]
#Make parallel a
cl<-makeCluster(reps) #change the 2 to your number of CPU cores
registerDoParallel(cl)
clusterExport(cl, ls()) #export workspace into cluster
#Run EA algorithm
xx=foreach(r=1:reps) %dopar% { #each population in parralel
runEA(numAgents,pop_size,number_of_generations,gif,false_costs,time_cost,initial_knowledge,initial_variance,average_pdrift,variance_pdrift,collective)
}
stopCluster(cl)#stop cluster
#rearrange results
result=NULL
for(ii in 1:length(xx[[1]][1,])){
result[[ii]]=matrix(NA,number_of_generations,reps)
for (i in 1:reps){
result[[ii]][,i]<-t(xx[[i]][,ii])
}
}
#Get results of last 10 generations
final_data$m1=mean(apply(result[[1]],1,mean)[(number_of_generations-10):number_of_generations])
final_data$sd1=mean(apply(result[[1]],1,sd)[(number_of_generations-10):number_of_generations])
final_data$m2=mean(apply(result[[2]],1,mean)[(number_of_generations-10):number_of_generations])
final_data$sd2=mean(apply(result[[2]],1,sd)[(number_of_generations-10):number_of_generations])
final_data$m3=mean(apply(result[[3]],1,mean)[(number_of_generations-10):number_of_generations])
final_data$sd3=mean(apply(result[[3]],1,sd)[(number_of_generations-10):number_of_generations])
final_data$m4=mean(apply(result[[4]],1,mean)[(number_of_generations-10):number_of_generations])
final_data$sd4=mean(apply(result[[4]],1,sd)[(number_of_generations-10):number_of_generations])
final_data$m5=mean(apply(result[[5]],1,mean)[(number_of_generations-10):number_of_generations])
final_data$sd5=mean(apply(result[[5]],1,sd)[(number_of_generations-10):number_of_generations])
final_data$m6=mean(apply(result[[6]],1,mean)[(number_of_generations-10):number_of_generations])
final_data$sd6=mean(apply(result[[6]],1,sd)[(number_of_generations-10):number_of_generations])
final_data$m7=mean(apply(result[[7]],1,mean)[(number_of_generations-10):number_of_generations])
final_data$sd7=mean(apply(result[[7]],1,sd)[(number_of_generations-10):number_of_generations])
print(final_data[clusterid,])
dir.create(name, showWarnings = F)
#Save results
save.image(paste0(name,"/",clusterid,name,".RData"))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.